release-request-46bf7ca6-57e7-44b8-8edc-ea8830c1cb3b-for-git_oc-mr1-release-4090244 snap-temp-L07700000073092334

Change-Id: I584166e536b063d6b3f05eea3157dd0cc656bf51
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..0d20b64
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/AUTHORS b/AUTHORS
index 476d0c3..756d1dc 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -48,6 +48,7 @@
 Burcu Dogan <burcujdogan@gmail.com>
 Caitlin Potter <caitpotter88@gmail.com>
 Craig Schlenter <craig.schlenter@gmail.com>
+Choongwoo Han <cwhan.tunz@gmail.com>
 Chris Nardi <hichris123@gmail.com>
 Christopher A. Taylor <chris@gameclosure.com>
 Daniel Andersson <kodandersson@gmail.com>
@@ -64,7 +65,6 @@
 Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
 Geoffrey Garside <ggarside@gmail.com>
 Gwang Yoon Hwang <ryumiel@company100.net>
-Han Choongwoo <cwhan.tunz@gmail.com>
 Henrique Ferreiro <henrique.ferreiro@gmail.com>
 Hirofumi Mako <mkhrfm@gmail.com>
 Honggyu Kim <honggyu.kp@gmail.com>
@@ -81,6 +81,7 @@
 JunHo Seo <sejunho@gmail.com>
 Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
 Karl Skomski <karl@skomski.com>
+Kevin Gibbons <bakkot@gmail.com>
 Luis Reis <luis.m.reis@gmail.com>
 Luke Zarko <lukezarko@gmail.com>
 Maciej MaƂecki <me@mmalecki.com>
@@ -104,6 +105,7 @@
 Peter Rybin <peter.rybin@gmail.com>
 Peter Varga <pvarga@inf.u-szeged.hu>
 Paul Lind <plind44@gmail.com>
+Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
 Rafal Krypa <rafal@krypa.net>
 Refael Ackermann <refack@gmail.com>
 Rene Rebe <rene@exactcode.de>
diff --git a/Android.v8.mk b/Android.v8.mk
index fce8dfe..107ed38 100644
--- a/Android.v8.mk
+++ b/Android.v8.mk
@@ -23,7 +23,7 @@
 	src/assembler.cc \
 	src/assert-scope.cc \
 	src/ast/ast-expression-rewriter.cc \
-	src/ast/ast-literal-reindexer.cc \
+	src/ast/ast-function-literal-id-reindexer.cc \
 	src/ast/ast-numbering.cc \
 	src/ast/ast-types.cc \
 	src/ast/ast-value-factory.cc \
@@ -32,7 +32,6 @@
 	src/ast/context-slot-cache.cc \
 	src/ast/modules.cc \
 	src/ast/prettyprinter.cc \
-	src/ast/scopeinfo.cc \
 	src/ast/scopes.cc \
 	src/ast/variables.cc \
 	src/background-parsing-task.cc \
@@ -43,11 +42,16 @@
 	src/bit-vector.cc \
 	src/bootstrapper.cc \
 	src/builtins/builtins-api.cc \
+	src/builtins/builtins-arguments.cc \
 	src/builtins/builtins-array.cc \
 	src/builtins/builtins-arraybuffer.cc \
+	src/builtins/builtins-async-function.cc \
+	src/builtins/builtins-async-iterator.cc \
+	src/builtins/builtins-async.cc \
 	src/builtins/builtins-boolean.cc \
 	src/builtins/builtins-call.cc \
 	src/builtins/builtins-callsite.cc \
+	src/builtins/builtins-constructor.cc \
 	src/builtins/builtins-conversion.cc \
 	src/builtins/builtins-dataview.cc \
 	src/builtins/builtins-date.cc \
@@ -57,9 +61,9 @@
 	src/builtins/builtins-generator.cc \
 	src/builtins/builtins-global.cc \
 	src/builtins/builtins-handler.cc \
+	src/builtins/builtins-ic.cc \
 	src/builtins/builtins-internal.cc \
 	src/builtins/builtins-interpreter.cc \
-	src/builtins/builtins-iterator.cc \
 	src/builtins/builtins-json.cc \
 	src/builtins/builtins-math.cc \
 	src/builtins/builtins-number.cc \
@@ -72,6 +76,7 @@
 	src/builtins/builtins-string.cc \
 	src/builtins/builtins-symbol.cc \
 	src/builtins/builtins-typedarray.cc \
+	src/builtins/builtins-wasm.cc \
 	src/builtins/builtins.cc \
 	src/cached-powers.cc \
 	src/cancelable-task.cc \
@@ -87,6 +92,7 @@
 	src/compilation-statistics.cc \
 	src/compiler-dispatcher/compiler-dispatcher-job.cc \
 	src/compiler-dispatcher/compiler-dispatcher-tracer.cc \
+	src/compiler-dispatcher/compiler-dispatcher.cc \
 	src/compiler-dispatcher/optimizing-compile-dispatcher.cc \
 	src/compiler.cc \
 	src/compiler/access-builder.cc \
@@ -96,9 +102,9 @@
 	src/compiler/ast-loop-assignment-analyzer.cc \
 	src/compiler/basic-block-instrumentor.cc \
 	src/compiler/branch-elimination.cc \
-	src/compiler/bytecode-branch-analysis.cc \
+	src/compiler/bytecode-analysis.cc \
 	src/compiler/bytecode-graph-builder.cc \
-	src/compiler/bytecode-loop-analysis.cc \
+	src/compiler/bytecode-liveness-map.cc \
 	src/compiler/c-linkage.cc \
 	src/compiler/checkpoint-elimination.cc \
 	src/compiler/code-assembler.cc \
@@ -118,6 +124,7 @@
 	src/compiler/frame-states.cc \
 	src/compiler/frame.cc \
 	src/compiler/gap-resolver.cc \
+	src/compiler/graph-assembler.cc \
 	src/compiler/graph-reducer.cc \
 	src/compiler/graph-replay.cc \
 	src/compiler/graph-trimmer.cc \
@@ -133,13 +140,13 @@
 	src/compiler/js-create-lowering.cc \
 	src/compiler/js-frame-specialization.cc \
 	src/compiler/js-generic-lowering.cc \
-	src/compiler/js-global-object-specialization.cc \
 	src/compiler/js-graph.cc \
 	src/compiler/js-inlining-heuristic.cc \
 	src/compiler/js-inlining.cc \
 	src/compiler/js-intrinsic-lowering.cc \
 	src/compiler/js-native-context-specialization.cc \
 	src/compiler/js-operator.cc \
+	src/compiler/js-type-hint-lowering.cc \
 	src/compiler/js-typed-lowering.cc \
 	src/compiler/jump-threading.cc \
 	src/compiler/linkage.cc \
@@ -182,7 +189,6 @@
 	src/compiler/store-store-elimination.cc \
 	src/compiler/tail-call-optimization.cc \
 	src/compiler/type-cache.cc \
-	src/compiler/type-hint-analyzer.cc \
 	src/compiler/typed-optimization.cc \
 	src/compiler/typer.cc \
 	src/compiler/types.cc \
@@ -225,6 +231,7 @@
 	src/crankshaft/typing.cc \
 	src/date.cc \
 	src/dateparser.cc \
+	src/debug/debug-coverage.cc \
 	src/debug/debug-evaluate.cc \
 	src/debug/debug-frames.cc \
 	src/debug/debug-scopes.cc \
@@ -249,6 +256,8 @@
 	src/factory.cc \
 	src/fast-accessor-assembler.cc \
 	src/fast-dtoa.cc \
+	src/feedback-vector.cc \
+	src/ffi/ffi-compiler.cc \
 	src/field-type.cc \
 	src/fixed-dtoa.cc \
 	src/flags.cc \
@@ -260,6 +269,7 @@
 	src/handles.cc \
 	src/heap/array-buffer-tracker.cc \
 	src/heap/code-stats.cc \
+	src/heap/embedder-tracing.cc \
 	src/heap/gc-idle-time-handler.cc \
 	src/heap/gc-tracer.cc \
 	src/heap/heap.cc \
@@ -275,18 +285,21 @@
 	src/heap/store-buffer.cc \
 	src/i18n.cc \
 	src/ic/access-compiler.cc \
+	src/ic/accessor-assembler.cc \
 	src/ic/call-optimization.cc \
 	src/ic/handler-compiler.cc \
-	src/ic/ic-compiler.cc \
 	src/ic/ic-state.cc \
+	src/ic/ic-stats.cc \
 	src/ic/ic.cc \
 	src/ic/keyed-store-generic.cc \
 	src/ic/stub-cache.cc \
 	src/icu_util.cc \
 	src/identity-map.cc \
 	src/interface-descriptors.cc \
+	src/interpreter/bytecode-array-accessor.cc \
 	src/interpreter/bytecode-array-builder.cc \
 	src/interpreter/bytecode-array-iterator.cc \
+	src/interpreter/bytecode-array-random-iterator.cc \
 	src/interpreter/bytecode-array-writer.cc \
 	src/interpreter/bytecode-dead-code-optimizer.cc \
 	src/interpreter/bytecode-decoder.cc \
@@ -315,18 +328,23 @@
 	src/lookup-cache.cc \
 	src/lookup.cc \
 	src/machine-type.cc \
+	src/map-updater.cc \
 	src/messages.cc \
 	src/objects-debug.cc \
 	src/objects-printer.cc \
 	src/objects.cc \
+	src/objects/literal-objects.cc \
+	src/objects/scope-info.cc \
 	src/ostreams.cc \
 	src/parsing/duplicate-finder.cc \
 	src/parsing/func-name-inferrer.cc \
 	src/parsing/parameter-initializer-rewriter.cc \
 	src/parsing/parse-info.cc \
 	src/parsing/parser.cc \
+	src/parsing/parsing.cc \
 	src/parsing/pattern-rewriter.cc \
 	src/parsing/preparse-data.cc \
+	src/parsing/preparsed-scope-data.cc \
 	src/parsing/preparser.cc \
 	src/parsing/rewriter.cc \
 	src/parsing/scanner-character-streams.cc \
@@ -344,7 +362,6 @@
 	src/profiler/strings-storage.cc \
 	src/profiler/tick-sample.cc \
 	src/profiler/tracing-cpu-profiler.cc \
-	src/promise-utils.cc \
 	src/property-descriptor.cc \
 	src/property.cc \
 	src/regexp/interpreter-irregexp.cc \
@@ -384,7 +401,6 @@
 	src/runtime/runtime-proxy.cc \
 	src/runtime/runtime-regexp.cc \
 	src/runtime/runtime-scopes.cc \
-	src/runtime/runtime-simd.cc \
 	src/runtime/runtime-strings.cc \
 	src/runtime/runtime-symbol.cc \
 	src/runtime/runtime-test.cc \
@@ -405,13 +421,13 @@
 	src/source-position.cc \
 	src/startup-data-util.cc \
 	src/string-builder.cc \
+	src/string-case.cc \
 	src/string-stream.cc \
 	src/strtod.cc \
 	src/tracing/trace-event.cc \
 	src/tracing/traced-value.cc \
 	src/tracing/tracing-category-observer.cc \
 	src/transitions.cc \
-	src/type-feedback-vector.cc \
 	src/type-hints.cc \
 	src/type-info.cc \
 	src/unicode-decoder.cc \
@@ -422,9 +438,10 @@
 	src/v8threads.cc \
 	src/value-serializer.cc \
 	src/version.cc \
-	src/wasm/ast-decoder.cc \
+	src/wasm/function-body-decoder.cc \
 	src/wasm/module-decoder.cc \
 	src/wasm/signature-map.cc \
+	src/wasm/wasm-code-specialization.cc \
 	src/wasm/wasm-debug.cc \
 	src/wasm/wasm-external-refs.cc \
 	src/wasm/wasm-interpreter.cc \
@@ -434,6 +451,7 @@
 	src/wasm/wasm-objects.cc \
 	src/wasm/wasm-opcodes.cc \
 	src/wasm/wasm-result.cc \
+	src/wasm/wasm-text.cc \
 	src/zone/accounting-allocator.cc \
 	src/zone/zone-segment.cc \
 	src/zone/zone.cc
@@ -463,8 +481,6 @@
 	src/ic/arm/access-compiler-arm.cc \
 	src/ic/arm/handler-compiler-arm.cc \
 	src/ic/arm/ic-arm.cc \
-	src/ic/arm/ic-compiler-arm.cc \
-	src/ic/arm/stub-cache-arm.cc \
 	src/regexp/arm/regexp-macro-assembler-arm.cc
 LOCAL_SRC_FILES_arm64 += \
 	src/arm64/assembler-arm64.cc \
@@ -496,8 +512,6 @@
 	src/ic/arm64/access-compiler-arm64.cc \
 	src/ic/arm64/handler-compiler-arm64.cc \
 	src/ic/arm64/ic-arm64.cc \
-	src/ic/arm64/ic-compiler-arm64.cc \
-	src/ic/arm64/stub-cache-arm64.cc \
 	src/regexp/arm64/regexp-macro-assembler-arm64.cc
 LOCAL_SRC_FILES_mips += \
 	src/builtins/mips/builtins-mips.cc \
@@ -511,9 +525,7 @@
 	src/full-codegen/mips/full-codegen-mips.cc \
 	src/ic/mips/access-compiler-mips.cc \
 	src/ic/mips/handler-compiler-mips.cc \
-	src/ic/mips/ic-compiler-mips.cc \
 	src/ic/mips/ic-mips.cc \
-	src/ic/mips/stub-cache-mips.cc \
 	src/mips/assembler-mips.cc \
 	src/mips/code-stubs-mips.cc \
 	src/mips/codegen-mips.cc \
@@ -538,9 +550,7 @@
 	src/full-codegen/mips64/full-codegen-mips64.cc \
 	src/ic/mips64/access-compiler-mips64.cc \
 	src/ic/mips64/handler-compiler-mips64.cc \
-	src/ic/mips64/ic-compiler-mips64.cc \
 	src/ic/mips64/ic-mips64.cc \
-	src/ic/mips64/stub-cache-mips64.cc \
 	src/mips64/assembler-mips64.cc \
 	src/mips64/code-stubs-mips64.cc \
 	src/mips64/codegen-mips64.cc \
@@ -575,9 +585,7 @@
 	src/ia32/simulator-ia32.cc \
 	src/ic/ia32/access-compiler-ia32.cc \
 	src/ic/ia32/handler-compiler-ia32.cc \
-	src/ic/ia32/ic-compiler-ia32.cc \
 	src/ic/ia32/ic-ia32.cc \
-	src/ic/ia32/stub-cache-ia32.cc \
 	src/regexp/ia32/regexp-macro-assembler-ia32.cc
 LOCAL_SRC_FILES_x86_64 += \
 	src/builtins/x64/builtins-x64.cc \
@@ -592,9 +600,7 @@
 	src/full-codegen/x64/full-codegen-x64.cc \
 	src/ic/x64/access-compiler-x64.cc \
 	src/ic/x64/handler-compiler-x64.cc \
-	src/ic/x64/ic-compiler-x64.cc \
 	src/ic/x64/ic-x64.cc \
-	src/ic/x64/stub-cache-x64.cc \
 	src/regexp/x64/regexp-macro-assembler-x64.cc \
 	src/x64/assembler-x64.cc \
 	src/x64/code-stubs-x64.cc \
diff --git a/Android.v8gen.mk b/Android.v8gen.mk
index d3f670e..48aa42b 100644
--- a/Android.v8gen.mk
+++ b/Android.v8gen.mk
@@ -11,7 +11,6 @@
 	src/js/prologue.js \
 	src/js/runtime.js \
 	src/js/v8natives.js \
-	src/js/symbol.js \
 	src/js/array.js \
 	src/js/string.js \
 	src/js/arraybuffer.js \
@@ -24,16 +23,14 @@
 	src/js/templates.js \
 	src/js/spread.js \
 	src/js/proxy.js \
-	src/js/async-await.js \
+	src/js/harmony-string-padding.js \
 	src/debug/mirrors.js \
 	src/debug/debug.js \
 	src/debug/liveedit.js
 V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := \
 	src/js/macros.py \
 	src/messages.h \
-	src/js/harmony-atomics.js \
-	src/js/harmony-simd.js \
-	src/js/harmony-string-padding.js
+	src/js/harmony-atomics.js
 LOCAL_SRC_FILES += src/snapshot/snapshot-empty.cc
 LOCAL_JS_LIBRARY_FILES := $(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_LIBRARY_FILES))
 LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES := $(addprefix $(LOCAL_PATH)/, $(V8_LOCAL_JS_EXPERIMENTAL_LIBRARY_FILES))
diff --git a/BUILD.gn b/BUILD.gn
index 8587356..8895103 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -14,8 +14,6 @@
 
 import("gni/v8.gni")
 import("gni/isolate.gni")
-import("//build_overrides/v8.gni")
-
 import("snapshot_toolchain.gni")
 
 declare_args() {
@@ -23,13 +21,16 @@
   v8_android_log_stdout = false
 
   # Sets -DVERIFY_HEAP.
-  v8_enable_verify_heap = false
+  v8_enable_verify_heap = ""
+
+  # Sets -DVERIFY_PREDICTABLE
+  v8_enable_verify_predictable = false
 
   # Enable compiler warnings when using V8_DEPRECATED apis.
   v8_deprecation_warnings = false
 
   # Enable compiler warnings when using V8_DEPRECATE_SOON apis.
-  v8_imminent_deprecation_warnings = ""
+  v8_imminent_deprecation_warnings = false
 
   # Embeds the given script into the snapshot.
   v8_embed_script = ""
@@ -40,18 +41,30 @@
   # Sets -dENABLE_GDB_JIT_INTERFACE.
   v8_enable_gdbjit = ""
 
+  # Sets -dENABLE_VTUNE_JIT_INTERFACE.
+  v8_enable_vtunejit = false
+
   # Sets -dENABLE_HANDLE_ZAPPING.
   v8_enable_handle_zapping = is_debug
 
   # Enable slow dchecks.
   v8_enable_slow_dchecks = false
 
+  # Enable code-generation-time checking of types in the CodeStubAssembler.
+  v8_enable_verify_csa = false
+
   # Interpreted regexp engine exists as platform-independent alternative
   # based where the regular expression is compiled to a bytecode.
   v8_interpreted_regexp = false
 
   # Sets -dOBJECT_PRINT.
-  v8_object_print = ""
+  v8_enable_object_print = ""
+
+  # Sets -dTRACE_MAPS.
+  v8_enable_trace_maps = ""
+
+  # Sets -dV8_ENABLE_CHECKS.
+  v8_enable_v8_checks = ""
 
   # With post mortem support enabled, metadata is embedded into libv8 that
   # describes various parameters of the VM for use by debuggers. See
@@ -69,31 +82,39 @@
 
   # Similar to the ARM hard float ABI but on MIPS.
   v8_use_mips_abi_hardfloat = true
-}
 
-# Set project-specific defaults for some args if not provided in args.gn. The
-# defaults can be set in the respective build_overrides files.
-if (v8_imminent_deprecation_warnings == "") {
-  if (defined(v8_imminent_deprecation_warnings_default)) {
-    v8_imminent_deprecation_warnings = v8_imminent_deprecation_warnings_default
-  } else {
-    v8_imminent_deprecation_warnings = false
-  }
-}
-if (v8_enable_gdbjit == "") {
-  if (defined(v8_enable_gdbjit_default)) {
-    v8_enable_gdbjit = v8_enable_gdbjit_default
-  } else {
-    v8_enable_gdbjit = false
-  }
+  # List of extra files to snapshot. They will be snapshotted in order so
+  # if files export symbols used by later files, they should go first.
+  #
+  # This default is used by cctests. Projects using V8 will want to override.
+  v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
+
+  # Like v8_extra_library_files but for experimental features.
+  #
+  # This default is used by cctests. Projects using V8 will want to override.
+  v8_experimental_extra_library_files =
+      [ "//test/cctest/test-experimental-extra.js" ]
+
+  v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
+                       v8_current_cpu == "x87") && (is_linux || is_mac)) ||
+                     (v8_current_cpu == "ppc64" && is_linux)
 }
 
 # Derived defaults.
-if (v8_object_print == "") {
-  v8_object_print = is_debug && !v8_optimized_debug
+if (v8_enable_verify_heap == "") {
+  v8_enable_verify_heap = is_debug
+}
+if (v8_enable_object_print == "") {
+  v8_enable_object_print = is_debug
 }
 if (v8_enable_disassembler == "") {
-  v8_enable_disassembler = is_debug && !v8_optimized_debug
+  v8_enable_disassembler = is_debug
+}
+if (v8_enable_trace_maps == "") {
+  v8_enable_trace_maps = is_debug
+}
+if (v8_enable_v8_checks == "") {
+  v8_enable_v8_checks = is_debug
 }
 
 # Specifies if the target build is a simulator build. Comparing target cpu
@@ -155,7 +176,7 @@
     defines = [ "USING_V8_SHARED" ]
   }
   include_dirs = [ "include" ]
-  if (v8_enable_inspector_override) {
+  if (v8_enable_inspector) {
     include_dirs += [ "$target_gen_dir/include" ]
   }
 }
@@ -179,12 +200,24 @@
   if (v8_enable_gdbjit) {
     defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
   }
-  if (v8_object_print) {
+  if (v8_enable_vtunejit) {
+    defines += [ "ENABLE_VTUNE_JIT_INTERFACE" ]
+  }
+  if (v8_enable_object_print) {
     defines += [ "OBJECT_PRINT" ]
   }
   if (v8_enable_verify_heap) {
     defines += [ "VERIFY_HEAP" ]
   }
+  if (v8_enable_verify_predictable) {
+    defines += [ "VERIFY_PREDICTABLE" ]
+  }
+  if (v8_enable_trace_maps) {
+    defines += [ "TRACE_MAPS" ]
+  }
+  if (v8_enable_v8_checks) {
+    defines += [ "V8_ENABLE_CHECKS" ]
+  }
   if (v8_interpreted_regexp) {
     defines += [ "V8_INTERPRETED_REGEXP" ]
   }
@@ -348,15 +381,7 @@
       ldflags += [ "-rdynamic" ]
     }
 
-    # TODO(jochen): Add support for different debug optimization levels.
-    defines += [
-      "ENABLE_DISASSEMBLER",
-      "V8_ENABLE_CHECKS",
-      "OBJECT_PRINT",
-      "VERIFY_HEAP",
-      "DEBUG",
-      "TRACE_MAPS",
-    ]
+    defines += [ "DEBUG" ]
     if (v8_enable_slow_dchecks) {
       defines += [ "ENABLE_SLOW_DCHECKS" ]
     }
@@ -364,6 +389,10 @@
     defines += [ "DEBUG" ]
   }
 
+  if (v8_enable_verify_csa) {
+    defines += [ "ENABLE_VERIFY_CSA" ]
+  }
+
   if (v8_no_inline) {
     cflags += [
       "-fno-inline-functions",
@@ -378,11 +407,10 @@
       # TODO(hans): Remove once http://crbug.com/428099 is resolved.
       "-Winconsistent-missing-override",
     ]
-
-    if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
-        v8_current_cpu == "mips64el") {
-      cflags += [ "-Wshorten-64-to-32" ]
-    }
+    #if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
+    #    v8_current_cpu == "mips64el") {
+    #  cflags += [ "-Wshorten-64-to-32" ]
+    #}
   }
 }
 
@@ -408,7 +436,6 @@
     "src/js/prologue.js",
     "src/js/runtime.js",
     "src/js/v8natives.js",
-    "src/js/symbol.js",
     "src/js/array.js",
     "src/js/string.js",
     "src/js/arraybuffer.js",
@@ -421,7 +448,7 @@
     "src/js/templates.js",
     "src/js/spread.js",
     "src/js/proxy.js",
-    "src/js/async-await.js",
+    "src/js/harmony-string-padding.js",
     "src/debug/mirrors.js",
     "src/debug/debug.js",
     "src/debug/liveedit.js",
@@ -465,21 +492,12 @@
     "src/js/macros.py",
     "src/messages.h",
     "src/js/harmony-atomics.js",
-    "src/js/harmony-simd.js",
-    "src/js/harmony-string-padding.js",
   ]
 
   outputs = [
     "$target_gen_dir/experimental-libraries.cc",
   ]
 
-  if (v8_enable_i18n_support) {
-    sources += [
-      "src/js/datetime-format-to-parts.js",
-      "src/js/icu-case-mapping.js",
-    ]
-  }
-
   args = [
            rebase_path("$target_gen_dir/experimental-libraries.cc",
                        root_build_dir),
@@ -733,6 +751,7 @@
   ]
   args = [
     rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
+    "current_cpu=\"$current_cpu\"",
     "dcheck_always_on=$dcheck_always_on",
     "is_asan=$is_asan",
     "is_cfi=$is_cfi",
@@ -741,8 +760,9 @@
     "is_msan=$is_msan",
     "is_tsan=$is_tsan",
     "target_cpu=\"$target_cpu\"",
+    "v8_current_cpu=\"$v8_current_cpu\"",
     "v8_enable_i18n_support=$v8_enable_i18n_support",
-    "v8_enable_inspector=$v8_enable_inspector_override",
+    "v8_enable_inspector=$v8_enable_inspector",
     "v8_target_cpu=\"$v8_target_cpu\"",
     "v8_use_snapshot=$v8_use_snapshot",
   ]
@@ -848,6 +868,17 @@
   }
 }
 
+# This is split out to be a non-code containing target that the Chromium browser
+# DLL can depend upon to get only a version string.
+v8_header_set("v8_version") {
+  configs = [ ":internal_config" ]
+
+  sources = [
+    "include/v8-version-string.h",
+    "include/v8-version.h",
+  ]
+}
+
 v8_source_set("v8_base") {
   visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
@@ -861,7 +892,6 @@
     "include/v8-profiler.h",
     "include/v8-testing.h",
     "include/v8-util.h",
-    "include/v8-version.h",
     "include/v8.h",
     "include/v8config.h",
     "src/accessors.cc",
@@ -893,14 +923,15 @@
     "src/asmjs/asm-wasm-builder.h",
     "src/asmjs/switch-logic.cc",
     "src/asmjs/switch-logic.h",
+    "src/assembler-inl.h",
     "src/assembler.cc",
     "src/assembler.h",
     "src/assert-scope.cc",
     "src/assert-scope.h",
     "src/ast/ast-expression-rewriter.cc",
     "src/ast/ast-expression-rewriter.h",
-    "src/ast/ast-literal-reindexer.cc",
-    "src/ast/ast-literal-reindexer.h",
+    "src/ast/ast-function-literal-id-reindexer.cc",
+    "src/ast/ast-function-literal-id-reindexer.h",
     "src/ast/ast-numbering.cc",
     "src/ast/ast-numbering.h",
     "src/ast/ast-traversal-visitor.h",
@@ -919,7 +950,6 @@
     "src/ast/modules.h",
     "src/ast/prettyprinter.cc",
     "src/ast/prettyprinter.h",
-    "src/ast/scopeinfo.cc",
     "src/ast/scopes.cc",
     "src/ast/scopes.h",
     "src/ast/variables.cc",
@@ -939,11 +969,19 @@
     "src/bootstrapper.cc",
     "src/bootstrapper.h",
     "src/builtins/builtins-api.cc",
+    "src/builtins/builtins-arguments.cc",
+    "src/builtins/builtins-arguments.h",
     "src/builtins/builtins-array.cc",
     "src/builtins/builtins-arraybuffer.cc",
+    "src/builtins/builtins-async-function.cc",
+    "src/builtins/builtins-async-iterator.cc",
+    "src/builtins/builtins-async.cc",
+    "src/builtins/builtins-async.h",
     "src/builtins/builtins-boolean.cc",
     "src/builtins/builtins-call.cc",
     "src/builtins/builtins-callsite.cc",
+    "src/builtins/builtins-constructor.cc",
+    "src/builtins/builtins-constructor.h",
     "src/builtins/builtins-conversion.cc",
     "src/builtins/builtins-dataview.cc",
     "src/builtins/builtins-date.cc",
@@ -953,22 +991,26 @@
     "src/builtins/builtins-generator.cc",
     "src/builtins/builtins-global.cc",
     "src/builtins/builtins-handler.cc",
+    "src/builtins/builtins-ic.cc",
     "src/builtins/builtins-internal.cc",
     "src/builtins/builtins-interpreter.cc",
-    "src/builtins/builtins-iterator.cc",
     "src/builtins/builtins-json.cc",
     "src/builtins/builtins-math.cc",
     "src/builtins/builtins-number.cc",
     "src/builtins/builtins-object.cc",
+    "src/builtins/builtins-object.h",
     "src/builtins/builtins-promise.cc",
+    "src/builtins/builtins-promise.h",
     "src/builtins/builtins-proxy.cc",
     "src/builtins/builtins-reflect.cc",
     "src/builtins/builtins-regexp.cc",
+    "src/builtins/builtins-regexp.h",
     "src/builtins/builtins-sharedarraybuffer.cc",
     "src/builtins/builtins-string.cc",
     "src/builtins/builtins-symbol.cc",
     "src/builtins/builtins-typedarray.cc",
     "src/builtins/builtins-utils.h",
+    "src/builtins/builtins-wasm.cc",
     "src/builtins/builtins.cc",
     "src/builtins/builtins.h",
     "src/cached-powers.cc",
@@ -1002,6 +1044,8 @@
     "src/compiler-dispatcher/compiler-dispatcher-job.h",
     "src/compiler-dispatcher/compiler-dispatcher-tracer.cc",
     "src/compiler-dispatcher/compiler-dispatcher-tracer.h",
+    "src/compiler-dispatcher/compiler-dispatcher.cc",
+    "src/compiler-dispatcher/compiler-dispatcher.h",
     "src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
     "src/compiler-dispatcher/optimizing-compile-dispatcher.h",
     "src/compiler.cc",
@@ -1020,12 +1064,12 @@
     "src/compiler/basic-block-instrumentor.h",
     "src/compiler/branch-elimination.cc",
     "src/compiler/branch-elimination.h",
-    "src/compiler/bytecode-branch-analysis.cc",
-    "src/compiler/bytecode-branch-analysis.h",
+    "src/compiler/bytecode-analysis.cc",
+    "src/compiler/bytecode-analysis.h",
     "src/compiler/bytecode-graph-builder.cc",
     "src/compiler/bytecode-graph-builder.h",
-    "src/compiler/bytecode-loop-analysis.cc",
-    "src/compiler/bytecode-loop-analysis.h",
+    "src/compiler/bytecode-liveness-map.cc",
+    "src/compiler/bytecode-liveness-map.h",
     "src/compiler/c-linkage.cc",
     "src/compiler/checkpoint-elimination.cc",
     "src/compiler/checkpoint-elimination.h",
@@ -1065,6 +1109,8 @@
     "src/compiler/frame.h",
     "src/compiler/gap-resolver.cc",
     "src/compiler/gap-resolver.h",
+    "src/compiler/graph-assembler.cc",
+    "src/compiler/graph-assembler.h",
     "src/compiler/graph-reducer.cc",
     "src/compiler/graph-reducer.h",
     "src/compiler/graph-replay.cc",
@@ -1097,8 +1143,6 @@
     "src/compiler/js-frame-specialization.h",
     "src/compiler/js-generic-lowering.cc",
     "src/compiler/js-generic-lowering.h",
-    "src/compiler/js-global-object-specialization.cc",
-    "src/compiler/js-global-object-specialization.h",
     "src/compiler/js-graph.cc",
     "src/compiler/js-graph.h",
     "src/compiler/js-inlining-heuristic.cc",
@@ -1111,6 +1155,8 @@
     "src/compiler/js-native-context-specialization.h",
     "src/compiler/js-operator.cc",
     "src/compiler/js-operator.h",
+    "src/compiler/js-type-hint-lowering.cc",
+    "src/compiler/js-type-hint-lowering.h",
     "src/compiler/js-typed-lowering.cc",
     "src/compiler/js-typed-lowering.h",
     "src/compiler/jump-threading.cc",
@@ -1196,8 +1242,6 @@
     "src/compiler/tail-call-optimization.h",
     "src/compiler/type-cache.cc",
     "src/compiler/type-cache.h",
-    "src/compiler/type-hint-analyzer.cc",
-    "src/compiler/type-hint-analyzer.h",
     "src/compiler/typed-optimization.cc",
     "src/compiler/typed-optimization.h",
     "src/compiler/typer.cc",
@@ -1291,6 +1335,8 @@
     "src/dateparser-inl.h",
     "src/dateparser.cc",
     "src/dateparser.h",
+    "src/debug/debug-coverage.cc",
+    "src/debug/debug-coverage.h",
     "src/debug/debug-evaluate.cc",
     "src/debug/debug-evaluate.h",
     "src/debug/debug-frames.cc",
@@ -1300,6 +1346,7 @@
     "src/debug/debug-scopes.h",
     "src/debug/debug.cc",
     "src/debug/debug.h",
+    "src/debug/interface-types.h",
     "src/debug/liveedit.cc",
     "src/debug/liveedit.h",
     "src/deoptimize-reason.cc",
@@ -1343,10 +1390,16 @@
     "src/fast-accessor-assembler.h",
     "src/fast-dtoa.cc",
     "src/fast-dtoa.h",
+    "src/feedback-vector-inl.h",
+    "src/feedback-vector.cc",
+    "src/feedback-vector.h",
+    "src/ffi/ffi-compiler.cc",
+    "src/ffi/ffi-compiler.h",
     "src/field-index-inl.h",
     "src/field-index.h",
     "src/field-type.cc",
     "src/field-type.h",
+    "src/find-and-replace-pattern.h",
     "src/fixed-dtoa.cc",
     "src/fixed-dtoa.h",
     "src/flag-definitions.h",
@@ -1373,6 +1426,8 @@
     "src/heap/array-buffer-tracker.h",
     "src/heap/code-stats.cc",
     "src/heap/code-stats.h",
+    "src/heap/embedder-tracing.cc",
+    "src/heap/embedder-tracing.h",
     "src/heap/gc-idle-time-handler.cc",
     "src/heap/gc-idle-time-handler.h",
     "src/heap/gc-tracer.cc",
@@ -1414,17 +1469,19 @@
     "src/ic/access-compiler-data.h",
     "src/ic/access-compiler.cc",
     "src/ic/access-compiler.h",
+    "src/ic/accessor-assembler.cc",
+    "src/ic/accessor-assembler.h",
     "src/ic/call-optimization.cc",
     "src/ic/call-optimization.h",
     "src/ic/handler-compiler.cc",
     "src/ic/handler-compiler.h",
     "src/ic/handler-configuration-inl.h",
     "src/ic/handler-configuration.h",
-    "src/ic/ic-compiler.cc",
-    "src/ic/ic-compiler.h",
     "src/ic/ic-inl.h",
     "src/ic/ic-state.cc",
     "src/ic/ic-state.h",
+    "src/ic/ic-stats.cc",
+    "src/ic/ic-stats.h",
     "src/ic/ic.cc",
     "src/ic/ic.h",
     "src/ic/keyed-store-generic.cc",
@@ -1437,10 +1494,14 @@
     "src/identity-map.h",
     "src/interface-descriptors.cc",
     "src/interface-descriptors.h",
+    "src/interpreter/bytecode-array-accessor.cc",
+    "src/interpreter/bytecode-array-accessor.h",
     "src/interpreter/bytecode-array-builder.cc",
     "src/interpreter/bytecode-array-builder.h",
     "src/interpreter/bytecode-array-iterator.cc",
     "src/interpreter/bytecode-array-iterator.h",
+    "src/interpreter/bytecode-array-random-iterator.cc",
+    "src/interpreter/bytecode-array-random-iterator.h",
     "src/interpreter/bytecode-array-writer.cc",
     "src/interpreter/bytecode-array-writer.h",
     "src/interpreter/bytecode-dead-code-optimizer.cc",
@@ -1489,6 +1550,7 @@
     "src/json-stringifier.h",
     "src/keys.cc",
     "src/keys.h",
+    "src/label.h",
     "src/layout-descriptor-inl.h",
     "src/layout-descriptor.cc",
     "src/layout-descriptor.h",
@@ -1509,6 +1571,9 @@
     "src/machine-type.cc",
     "src/machine-type.h",
     "src/macro-assembler.h",
+    "src/managed.h",
+    "src/map-updater.cc",
+    "src/map-updater.h",
     "src/messages.cc",
     "src/messages.h",
     "src/msan.h",
@@ -1519,6 +1584,14 @@
     "src/objects-printer.cc",
     "src/objects.cc",
     "src/objects.h",
+    "src/objects/literal-objects.cc",
+    "src/objects/literal-objects.h",
+    "src/objects/module-info.h",
+    "src/objects/object-macros-undef.h",
+    "src/objects/object-macros.h",
+    "src/objects/regexp-match-info.h",
+    "src/objects/scope-info.cc",
+    "src/objects/scope-info.h",
     "src/ostreams.cc",
     "src/ostreams.h",
     "src/parsing/duplicate-finder.cc",
@@ -1533,10 +1606,14 @@
     "src/parsing/parser-base.h",
     "src/parsing/parser.cc",
     "src/parsing/parser.h",
+    "src/parsing/parsing.cc",
+    "src/parsing/parsing.h",
     "src/parsing/pattern-rewriter.cc",
     "src/parsing/preparse-data-format.h",
     "src/parsing/preparse-data.cc",
     "src/parsing/preparse-data.h",
+    "src/parsing/preparsed-scope-data.cc",
+    "src/parsing/preparsed-scope-data.h",
     "src/parsing/preparser.cc",
     "src/parsing/preparser.h",
     "src/parsing/rewriter.cc",
@@ -1578,8 +1655,6 @@
     "src/profiler/tracing-cpu-profiler.h",
     "src/profiler/unbound-queue-inl.h",
     "src/profiler/unbound-queue.h",
-    "src/promise-utils.cc",
-    "src/promise-utils.h",
     "src/property-descriptor.cc",
     "src/property-descriptor.h",
     "src/property-details.h",
@@ -1637,7 +1712,6 @@
     "src/runtime/runtime-proxy.cc",
     "src/runtime/runtime-regexp.cc",
     "src/runtime/runtime-scopes.cc",
-    "src/runtime/runtime-simd.cc",
     "src/runtime/runtime-strings.cc",
     "src/runtime/runtime-symbol.cc",
     "src/runtime/runtime-test.cc",
@@ -1679,6 +1753,8 @@
     "src/startup-data-util.h",
     "src/string-builder.cc",
     "src/string-builder.h",
+    "src/string-case.cc",
+    "src/string-case.h",
     "src/string-search.h",
     "src/string-stream.cc",
     "src/string-stream.h",
@@ -1693,9 +1769,7 @@
     "src/transitions-inl.h",
     "src/transitions.cc",
     "src/transitions.h",
-    "src/type-feedback-vector-inl.h",
-    "src/type-feedback-vector.cc",
-    "src/type-feedback-vector.h",
+    "src/trap-handler/trap-handler.h",
     "src/type-hints.cc",
     "src/type-hints.h",
     "src/type-info.cc",
@@ -1724,15 +1798,17 @@
     "src/version.h",
     "src/vm-state-inl.h",
     "src/vm-state.h",
-    "src/wasm/ast-decoder.cc",
-    "src/wasm/ast-decoder.h",
     "src/wasm/decoder.h",
+    "src/wasm/function-body-decoder-impl.h",
+    "src/wasm/function-body-decoder.cc",
+    "src/wasm/function-body-decoder.h",
     "src/wasm/leb-helper.h",
-    "src/wasm/managed.h",
     "src/wasm/module-decoder.cc",
     "src/wasm/module-decoder.h",
     "src/wasm/signature-map.cc",
     "src/wasm/signature-map.h",
+    "src/wasm/wasm-code-specialization.cc",
+    "src/wasm/wasm-code-specialization.h",
     "src/wasm/wasm-debug.cc",
     "src/wasm/wasm-external-refs.cc",
     "src/wasm/wasm-external-refs.h",
@@ -1740,6 +1816,7 @@
     "src/wasm/wasm-interpreter.h",
     "src/wasm/wasm-js.cc",
     "src/wasm/wasm-js.h",
+    "src/wasm/wasm-limits.h",
     "src/wasm/wasm-macro-gen.h",
     "src/wasm/wasm-module-builder.cc",
     "src/wasm/wasm-module-builder.h",
@@ -1751,12 +1828,15 @@
     "src/wasm/wasm-opcodes.h",
     "src/wasm/wasm-result.cc",
     "src/wasm/wasm-result.h",
+    "src/wasm/wasm-text.cc",
+    "src/wasm/wasm-text.h",
     "src/zone/accounting-allocator.cc",
     "src/zone/accounting-allocator.h",
     "src/zone/zone-allocator.h",
     "src/zone/zone-allocator.h",
     "src/zone/zone-chunk-list.h",
     "src/zone/zone-containers.h",
+    "src/zone/zone-handle-set.h",
     "src/zone/zone-segment.cc",
     "src/zone/zone-segment.h",
     "src/zone/zone.cc",
@@ -1797,9 +1877,7 @@
       "src/ia32/simulator-ia32.h",
       "src/ic/ia32/access-compiler-ia32.cc",
       "src/ic/ia32/handler-compiler-ia32.cc",
-      "src/ic/ia32/ic-compiler-ia32.cc",
       "src/ic/ia32/ic-ia32.cc",
-      "src/ic/ia32/stub-cache-ia32.cc",
       "src/regexp/ia32/regexp-macro-assembler-ia32.cc",
       "src/regexp/ia32/regexp-macro-assembler-ia32.h",
     ]
@@ -1822,9 +1900,7 @@
       "src/full-codegen/x64/full-codegen-x64.cc",
       "src/ic/x64/access-compiler-x64.cc",
       "src/ic/x64/handler-compiler-x64.cc",
-      "src/ic/x64/ic-compiler-x64.cc",
       "src/ic/x64/ic-x64.cc",
-      "src/ic/x64/stub-cache-x64.cc",
       "src/regexp/x64/regexp-macro-assembler-x64.cc",
       "src/regexp/x64/regexp-macro-assembler-x64.h",
       "src/third_party/valgrind/valgrind.h",
@@ -1889,8 +1965,6 @@
       "src/ic/arm/access-compiler-arm.cc",
       "src/ic/arm/handler-compiler-arm.cc",
       "src/ic/arm/ic-arm.cc",
-      "src/ic/arm/ic-compiler-arm.cc",
-      "src/ic/arm/stub-cache-arm.cc",
       "src/regexp/arm/regexp-macro-assembler-arm.cc",
       "src/regexp/arm/regexp-macro-assembler-arm.h",
     ]
@@ -1948,8 +2022,6 @@
       "src/ic/arm64/access-compiler-arm64.cc",
       "src/ic/arm64/handler-compiler-arm64.cc",
       "src/ic/arm64/ic-arm64.cc",
-      "src/ic/arm64/ic-compiler-arm64.cc",
-      "src/ic/arm64/stub-cache-arm64.cc",
       "src/regexp/arm64/regexp-macro-assembler-arm64.cc",
       "src/regexp/arm64/regexp-macro-assembler-arm64.h",
     ]
@@ -1970,9 +2042,7 @@
       "src/full-codegen/mips/full-codegen-mips.cc",
       "src/ic/mips/access-compiler-mips.cc",
       "src/ic/mips/handler-compiler-mips.cc",
-      "src/ic/mips/ic-compiler-mips.cc",
       "src/ic/mips/ic-mips.cc",
-      "src/ic/mips/stub-cache-mips.cc",
       "src/mips/assembler-mips-inl.h",
       "src/mips/assembler-mips.cc",
       "src/mips/assembler-mips.h",
@@ -2012,9 +2082,7 @@
       "src/full-codegen/mips64/full-codegen-mips64.cc",
       "src/ic/mips64/access-compiler-mips64.cc",
       "src/ic/mips64/handler-compiler-mips64.cc",
-      "src/ic/mips64/ic-compiler-mips64.cc",
       "src/ic/mips64/ic-mips64.cc",
-      "src/ic/mips64/stub-cache-mips64.cc",
       "src/mips64/assembler-mips64-inl.h",
       "src/mips64/assembler-mips64.cc",
       "src/mips64/assembler-mips64.h",
@@ -2054,9 +2122,7 @@
       "src/full-codegen/ppc/full-codegen-ppc.cc",
       "src/ic/ppc/access-compiler-ppc.cc",
       "src/ic/ppc/handler-compiler-ppc.cc",
-      "src/ic/ppc/ic-compiler-ppc.cc",
       "src/ic/ppc/ic-ppc.cc",
-      "src/ic/ppc/stub-cache-ppc.cc",
       "src/ppc/assembler-ppc-inl.h",
       "src/ppc/assembler-ppc.cc",
       "src/ppc/assembler-ppc.h",
@@ -2096,9 +2162,7 @@
       "src/full-codegen/s390/full-codegen-s390.cc",
       "src/ic/s390/access-compiler-s390.cc",
       "src/ic/s390/handler-compiler-s390.cc",
-      "src/ic/s390/ic-compiler-s390.cc",
       "src/ic/s390/ic-s390.cc",
-      "src/ic/s390/stub-cache-s390.cc",
       "src/regexp/s390/regexp-macro-assembler-s390.cc",
       "src/regexp/s390/regexp-macro-assembler-s390.h",
       "src/s390/assembler-s390-inl.h",
@@ -2138,9 +2202,7 @@
       "src/full-codegen/x87/full-codegen-x87.cc",
       "src/ic/x87/access-compiler-x87.cc",
       "src/ic/x87/handler-compiler-x87.cc",
-      "src/ic/x87/ic-compiler-x87.cc",
       "src/ic/x87/ic-x87.cc",
-      "src/ic/x87/stub-cache-x87.cc",
       "src/regexp/x87/regexp-macro-assembler-x87.cc",
       "src/regexp/x87/regexp-macro-assembler-x87.h",
       "src/x87/assembler-x87-inl.h",
@@ -2169,6 +2231,7 @@
   deps = [
     ":v8_libbase",
     ":v8_libsampler",
+    ":v8_version",
   ]
 
   sources += [ v8_generated_peephole_source ]
@@ -2196,7 +2259,7 @@
     deps += [ ":postmortem-metadata" ]
   }
 
-  if (v8_enable_inspector_override) {
+  if (v8_enable_inspector) {
     deps += [ "src/inspector:inspector" ]
   }
 }
@@ -2399,14 +2462,10 @@
     ":v8_libbase",
     ":v8_libplatform",
   ]
-}
 
-v8_source_set("simple_fuzzer") {
-  sources = [
-    "test/fuzzer/fuzzer.cc",
-  ]
-
-  configs = [ ":internal_config_base" ]
+  if (v8_enable_i18n_support) {
+    deps += [ "//third_party/icu" ]
+  }
 }
 
 ###############################################################################
@@ -2477,14 +2536,10 @@
 
   deps = [
     ":d8",
+    ":v8_fuzzers",
     ":v8_hello_world",
     ":v8_parser_shell",
     ":v8_sample_process",
-    ":v8_simple_json_fuzzer",
-    ":v8_simple_parser_fuzzer",
-    ":v8_simple_regexp_fuzzer",
-    ":v8_simple_wasm_asmjs_fuzzer",
-    ":v8_simple_wasm_fuzzer",
     "test:gn_all",
     "tools:gn_all",
   ]
@@ -2498,6 +2553,41 @@
   }
 }
 
+group("v8_clusterfuzz") {
+  deps = [
+    ":d8",
+  ]
+
+  if (v8_multi_arch_build) {
+    deps += [
+      ":d8(//build/toolchain/linux:clang_x64)",
+      ":d8(//build/toolchain/linux:clang_x64_v8_arm64)",
+      ":d8(//build/toolchain/linux:clang_x86)",
+      ":d8(//build/toolchain/linux:clang_x86_v8_arm)",
+    ]
+  }
+}
+
+group("v8_fuzzers") {
+  testonly = true
+  deps = [
+    ":v8_simple_json_fuzzer",
+    ":v8_simple_parser_fuzzer",
+    ":v8_simple_regexp_fuzzer",
+    ":v8_simple_wasm_asmjs_fuzzer",
+    ":v8_simple_wasm_call_fuzzer",
+    ":v8_simple_wasm_code_fuzzer",
+    ":v8_simple_wasm_data_section_fuzzer",
+    ":v8_simple_wasm_function_sigs_section_fuzzer",
+    ":v8_simple_wasm_fuzzer",
+    ":v8_simple_wasm_globals_section_fuzzer",
+    ":v8_simple_wasm_imports_section_fuzzer",
+    ":v8_simple_wasm_memory_section_fuzzer",
+    ":v8_simple_wasm_names_section_fuzzer",
+    ":v8_simple_wasm_types_section_fuzzer",
+  ]
+}
+
 if (is_component_build) {
   v8_component("v8") {
     sources = [
@@ -2527,6 +2617,7 @@
       ":v8_base",
       ":v8_maybe_snapshot",
     ]
+
     public_configs = [ ":external_config" ]
   }
 }
@@ -2554,8 +2645,6 @@
     "//build/win:default_exe_manifest",
   ]
 
-  # TODO(jochen): Add support for vtunejit.
-
   if (is_posix) {
     sources += [ "src/d8-posix.cc" ]
   } else if (is_win) {
@@ -2566,10 +2655,18 @@
     deps += [ "//third_party/icu" ]
   }
 
+  if (v8_correctness_fuzzer) {
+    deps += [ "tools/foozzie:v8_correctness_fuzzer_resources" ]
+  }
+
   defines = []
-  if (v8_enable_inspector_override) {
+  if (v8_enable_inspector) {
     defines += [ "V8_INSPECTOR_ENABLED" ]
   }
+
+  if (v8_enable_vtunejit) {
+    deps += [ "//src/third_party/vtune:v8_vtune" ]
+  }
 }
 
 v8_isolate_run("d8") {
@@ -2687,10 +2784,14 @@
   v8_executable("v8_simple_" + name) {
     deps = [
       ":" + name,
-      ":simple_fuzzer",
+      "//build/config/sanitizers:deps",
       "//build/win:default_exe_manifest",
     ]
 
+    sources = [
+      "test/fuzzer/fuzzer.cc",
+    ]
+
     configs = [ ":external_config" ]
   }
 }
@@ -3001,3 +3102,23 @@
 
 v8_fuzzer("wasm_data_section_fuzzer") {
 }
+
+v8_source_set("wasm_compile_fuzzer") {
+  sources = [
+    "test/fuzzer/wasm-compile.cc",
+  ]
+
+  deps = [
+    ":fuzzer_support",
+    ":wasm_module_runner",
+    ":wasm_test_signatures",
+  ]
+
+  configs = [
+    ":external_config",
+    ":internal_config_base",
+  ]
+}
+
+v8_fuzzer("wasm_compile_fuzzer") {
+}
diff --git a/ChangeLog b/ChangeLog
index 2dc7756..f3cd421 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,4024 @@
+2017-02-25: Version 5.8.283
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.282
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.281
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.280
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.279
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.278
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.277
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.276
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.275
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.274
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-24: Version 5.8.273
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-22: Version 5.8.272
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-22: Version 5.8.271
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-22: Version 5.8.270
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.269
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.268
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.267
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.266
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.265
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.264
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.263
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.262
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.261
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.260
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.259
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.258
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-21: Version 5.8.257
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.256
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.255
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.254
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.253
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.252
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.251
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.250
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.249
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.248
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.247
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.246
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-20: Version 5.8.245
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.244
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.243
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.242
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.241
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.240
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.239
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.238
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.237
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.236
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.235
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.234
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-17: Version 5.8.233
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.232
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.231
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.230
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.229
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.228
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.227
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.226
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.225
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.224
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.223
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.222
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.221
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.220
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.219
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.218
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.217
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-16: Version 5.8.216
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-15: Version 5.8.215
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-15: Version 5.8.214
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-15: Version 5.8.213
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-15: Version 5.8.212
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-15: Version 5.8.211
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-15: Version 5.8.210
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-15: Version 5.8.209
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-14: Version 5.8.208
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-14: Version 5.8.207
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-14: Version 5.8.206
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-14: Version 5.8.205
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-14: Version 5.8.204
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-14: Version 5.8.203
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-14: Version 5.8.202
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-14: Version 5.8.201
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-14: Version 5.8.200
+
+        Remove SIMD.js from V8 (issues 4124, 5948).
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-13: Version 5.8.199
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-13: Version 5.8.198
+
+        Remove SIMD.js from V8 (issues 4124, 5948).
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-13: Version 5.8.197
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-13: Version 5.8.196
+
+        Remove SIMD.js from V8 (issue 4124, Chromium issue 5948).
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-13: Version 5.8.195
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-12: Version 5.8.194
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-12: Version 5.8.193
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-11: Version 5.8.192
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-11: Version 5.8.191
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-11: Version 5.8.190
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.189
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.188
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.187
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.186
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.185
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.184
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.183
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.182
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.181
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.180
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.179
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-10: Version 5.8.178
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.177
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.176
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.175
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.174
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.173
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.172
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.171
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.170
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.169
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.168
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.167
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.166
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.165
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.164
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.163
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.162
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.161
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-09: Version 5.8.160
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.159
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.158
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.157
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.156
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.155
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.154
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.153
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.152
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.151
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.150
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.149
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-08: Version 5.8.148
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.147
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.146
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.145
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.144
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.143
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.142
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.141
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.140
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.139
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.138
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.137
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.136
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-07: Version 5.8.135
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.134
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.133
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.132
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.131
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.130
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.129
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.128
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.127
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.126
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.125
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.124
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-06: Version 5.8.123
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-05: Version 5.8.122
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.121
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.120
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.119
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.118
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.117
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.116
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.115
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.114
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.113
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.112
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-03: Version 5.8.111
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-02: Version 5.8.110
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-02: Version 5.8.109
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-02: Version 5.8.108
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-02: Version 5.8.107
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.106
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.105
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.104
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.103
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.102
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.101
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.100
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.99
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.98
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.97
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.96
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.95
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.94
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.93
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.92
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.91
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.90
+
+        Performance and stability improvements on all platforms.
+
+
+2017-02-01: Version 5.8.89
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-31: Version 5.8.88
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-31: Version 5.8.87
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-31: Version 5.8.86
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-31: Version 5.8.85
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-31: Version 5.8.84
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-31: Version 5.8.83
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-31: Version 5.8.82
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.81
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.80
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.79
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.78
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.77
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.76
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.75
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.74
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.73
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.72
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.71
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.70
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.69
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.68
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.67
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.66
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.65
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.64
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-30: Version 5.8.63
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-29: Version 5.8.62
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-29: Version 5.8.61
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-27: Version 5.8.60
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-27: Version 5.8.59
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-27: Version 5.8.58
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-27: Version 5.8.57
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.56
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.55
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.54
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.53
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.52
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.51
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.50
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.49
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.48
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.47
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-26: Version 5.8.46
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.45
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.44
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.43
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.42
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.41
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.40
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.39
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.38
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.37
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.36
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.35
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.34
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.33
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.32
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.31
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.30
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.29
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.28
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-25: Version 5.8.27
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.26
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.25
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.24
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.23
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.22
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.21
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.20
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.19
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.18
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-24: Version 5.8.17
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.16
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.15
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.14
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.13
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.12
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.11
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.10
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.9
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.8
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.7
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.6
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.5
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-23: Version 5.8.4
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-22: Version 5.8.3
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-21: Version 5.8.2
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-20: Version 5.8.1
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.514
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.513
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.512
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.511
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.510
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.509
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.508
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.507
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.506
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.505
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.504
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.503
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.502
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.501
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.500
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.499
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.498
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.497
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-18: Version 5.7.496
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.495
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.494
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.493
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.492
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.491
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.490
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.489
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.488
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.487
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.486
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.485
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.484
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.483
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.482
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.481
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.480
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.479
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-17: Version 5.7.478
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.477
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.476
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.475
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.474
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.473
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.472
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.471
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.470
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.469
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.468
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.467
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.466
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.465
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.464
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.463
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.462
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-16: Version 5.7.461
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-15: Version 5.7.460
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-15: Version 5.7.459
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-15: Version 5.7.458
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-15: Version 5.7.457
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-14: Version 5.7.456
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.455
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.454
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.453
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.452
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.451
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.450
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.449
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.448
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.447
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.446
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.445
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-13: Version 5.7.444
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-12: Version 5.7.443
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-12: Version 5.7.442
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-11: Version 5.7.441
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-11: Version 5.7.440
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-11: Version 5.7.439
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.438
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.437
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.436
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.435
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.434
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.433
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.432
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.431
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.430
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.429
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-10: Version 5.7.428
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.427
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.426
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.425
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.424
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.423
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.422
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.421
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.420
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.419
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.418
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.417
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.416
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.415
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.414
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.413
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.412
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-09: Version 5.7.411
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-08: Version 5.7.410
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-06: Version 5.7.409
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-06: Version 5.7.408
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-06: Version 5.7.407
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-06: Version 5.7.406
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-06: Version 5.7.405
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-06: Version 5.7.404
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-06: Version 5.7.403
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-05: Version 5.7.402
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-05: Version 5.7.401
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-05: Version 5.7.400
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-05: Version 5.7.399
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-05: Version 5.7.398
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-04: Version 5.7.397
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-04: Version 5.7.396
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-04: Version 5.7.395
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-04: Version 5.7.394
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.393
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.392
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.391
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.390
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.389
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.388
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.387
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.386
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.385
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.384
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.383
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.382
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.381
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-03: Version 5.7.380
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-02: Version 5.7.379
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-02: Version 5.7.378
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-02: Version 5.7.377
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-02: Version 5.7.376
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-02: Version 5.7.375
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-02: Version 5.7.374
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-02: Version 5.7.373
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-02: Version 5.7.372
+
+        Performance and stability improvements on all platforms.
+
+
+2017-01-02: Version 5.7.371
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-30: Version 5.7.370
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-30: Version 5.7.369
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-30: Version 5.7.368
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-30: Version 5.7.367
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-30: Version 5.7.366
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-28: Version 5.7.365
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-28: Version 5.7.364
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-28: Version 5.7.363
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-28: Version 5.7.362
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-28: Version 5.7.361
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-27: Version 5.7.360
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-27: Version 5.7.359
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-27: Version 5.7.358
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-27: Version 5.7.357
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-27: Version 5.7.356
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-27: Version 5.7.355
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-27: Version 5.7.354
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-27: Version 5.7.353
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-26: Version 5.7.352
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-24: Version 5.7.351
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-24: Version 5.7.350
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-24: Version 5.7.349
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-23: Version 5.7.348
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-23: Version 5.7.347
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-23: Version 5.7.346
+
+        [intl] Add new semantics + compat fallback to Intl constructor (issues
+        4360, 4870).
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-23: Version 5.7.345
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-23: Version 5.7.344
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-23: Version 5.7.343
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-23: Version 5.7.342
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.341
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.340
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.339
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.338
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.337
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.336
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.335
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.334
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.333
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.332
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.331
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-22: Version 5.7.330
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.329
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.328
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.327
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.326
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.325
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.324
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.323
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.322
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.321
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.320
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-21: Version 5.7.319
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.318
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.317
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.316
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.315
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.314
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.313
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.312
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.311
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.310
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.309
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.308
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.307
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.306
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.305
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.304
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.303
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.302
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.301
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.300
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-20: Version 5.7.299
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.298
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.297
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.296
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.295
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.294
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.293
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.292
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.291
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.290
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.289
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.288
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-19: Version 5.7.287
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-18: Version 5.7.286
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-18: Version 5.7.285
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-17: Version 5.7.284
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-17: Version 5.7.283
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-17: Version 5.7.282
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.281
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.280
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.279
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.278
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.277
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.276
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.275
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.274
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.273
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.272
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.271
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.270
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.269
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.268
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.267
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.266
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.265
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.264
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.263
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.262
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.261
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-16: Version 5.7.260
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.259
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.258
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.257
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.256
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.255
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.254
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.253
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.252
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.251
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.250
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.249
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.248
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.247
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-15: Version 5.7.246
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.245
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.244
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.243
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.242
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.241
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.240
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.239
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.238
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.237
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.236
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.235
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.234
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.233
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.232
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.231
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-14: Version 5.7.230
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.229
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.228
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.227
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.226
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.225
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.224
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.223
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.222
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.221
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.220
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.219
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.218
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.217
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.216
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-13: Version 5.7.215
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.214
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.213
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.212
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.211
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.210
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.209
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.208
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.207
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.206
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.205
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-12: Version 5.7.204
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-11: Version 5.7.203
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.202
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.201
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.200
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.199
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.198
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.197
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.196
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.195
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.194
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.193
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.192
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.191
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.190
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.189
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.188
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-09: Version 5.7.187
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.186
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.185
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.184
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.183
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.182
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.181
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.180
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.179
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.178
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.177
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.176
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.175
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.174
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.173
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.172
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.171
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.170
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.169
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.168
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.167
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.166
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-08: Version 5.7.165
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.164
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.163
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.162
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.161
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.160
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.159
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.158
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.157
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.156
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.155
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.154
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.153
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.152
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.151
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.150
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.149
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.148
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-07: Version 5.7.147
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-06: Version 5.7.146
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-06: Version 5.7.145
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-06: Version 5.7.144
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-06: Version 5.7.143
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-06: Version 5.7.142
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-06: Version 5.7.141
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-06: Version 5.7.140
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-06: Version 5.7.139
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-06: Version 5.7.138
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.137
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.136
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.135
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.134
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.133
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.132
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.131
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.130
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.129
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.128
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.127
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.126
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.125
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.124
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.123
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-05: Version 5.7.122
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-02: Version 5.7.121
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-02: Version 5.7.120
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-02: Version 5.7.119
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-02: Version 5.7.118
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-02: Version 5.7.117
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-02: Version 5.7.116
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-02: Version 5.7.115
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.114
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.113
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.112
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.111
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.110
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.109
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.108
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.107
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.106
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.105
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.104
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.103
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.102
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.101
+
+        Performance and stability improvements on all platforms.
+
+
+2016-12-01: Version 5.7.100
+
+        [build] Use MSVS 2015 by default (Chromium issue 603131).
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.99
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.98
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.97
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.96
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.95
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.94
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.93
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.92
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.91
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.90
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.89
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.88
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-30: Version 5.7.87
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.86
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.85
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.84
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.83
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.82
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.81
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.80
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.79
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.78
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.77
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.76
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.75
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.74
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.73
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.72
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-29: Version 5.7.71
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-28: Version 5.7.70
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-28: Version 5.7.69
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-28: Version 5.7.68
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-28: Version 5.7.67
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-28: Version 5.7.66
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-28: Version 5.7.65
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-28: Version 5.7.64
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-25: Version 5.7.63
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-25: Version 5.7.62
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-25: Version 5.7.61
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-25: Version 5.7.60
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-25: Version 5.7.59
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-25: Version 5.7.58
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-24: Version 5.7.57
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-24: Version 5.7.56
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-24: Version 5.7.55
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-24: Version 5.7.54
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-24: Version 5.7.53
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.52
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.51
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.50
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.49
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.48
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.47
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.46
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.45
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.44
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.43
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.42
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.41
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.40
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-23: Version 5.7.39
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.38
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.37
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.36
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.35
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.34
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.33
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.32
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.31
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.30
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.29
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.28
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.27
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.26
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.25
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.24
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.23
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-22: Version 5.7.22
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.21
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.20
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.19
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.18
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.17
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.16
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.15
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.14
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.13
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.12
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.11
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.10
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.9
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.8
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.7
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.6
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.5
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-21: Version 5.7.4
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-20: Version 5.7.3
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-20: Version 5.7.2
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-20: Version 5.7.1
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-17: Version 5.6.331
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-17: Version 5.6.330
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-17: Version 5.6.329
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-17: Version 5.6.328
+
+        Performance and stability improvements on all platforms.
+
+
+2016-11-16: Version 5.6.327
+
+        Performance and stability improvements on all platforms.
+
+
 2016-11-15: Version 5.6.326
 
         Performance and stability improvements on all platforms.
diff --git a/DEPS b/DEPS
index 161015d..f8e0085 100644
--- a/DEPS
+++ b/DEPS
@@ -8,23 +8,23 @@
 
 deps = {
   "v8/build":
-    Var("chromium_url") + "/chromium/src/build.git" + "@" + "a3b623a6eff6dc9d58a03251ae22bccf92f67cb2",
+    Var("chromium_url") + "/chromium/src/build.git" + "@" + "c7c2db69cd571523ce728c4d3dceedbd1896b519",
   "v8/tools/gyp":
     Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
   "v8/third_party/icu":
-    Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "c1a237113f525a1561d4b322d7653e1083f79aaa",
+    Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "450be73c9ee8ae29d43d4fdc82febb2a5f62bfb5",
   "v8/third_party/instrumented_libraries":
-    Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "45f5814b1543e41ea0be54c771e3840ea52cca4a",
+    Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "5b6f777da671be977f56f0e8fc3469a3ccbb4474",
   "v8/buildtools":
-    Var("chromium_url") + "/chromium/buildtools.git" + "@" + "39b1db2ab4aa4b2ccaa263c29bdf63e7c1ee28aa",
+    Var("chromium_url") + "/chromium/buildtools.git" + "@" + "94cdccbebc7a634c27145a3d84089e85fbb42e69",
   "v8/base/trace_event/common":
     Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290",
   "v8/third_party/jinja2":
-    Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "b61a2c009a579593a259c1b300e0ad02bf48fd78",
+    Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "d34383206fa42d52faa10bb9931d6d538f3a57e0",
   "v8/third_party/markupsafe":
-    Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961",
+    Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "8f45f5cfa0009d2a70589bcda0349b8cb2b72783",
   "v8/tools/swarming_client":
-    Var('chromium_url') + '/external/swarming.client.git' + '@' + "380e32662312eb107f06fcba6409b0409f8fef72",
+    Var('chromium_url') + '/external/swarming.client.git' + '@' + "11e31afa5d330756ff87aa12064bb5d032896cb5",
   "v8/testing/gtest":
     Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
   "v8/testing/gmock":
@@ -33,21 +33,22 @@
     Var("chromium_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f",
   "v8/test/mozilla/data":
     Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
-  "v8/test/simdjs/data": Var("chromium_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
   "v8/test/test262/data":
-    Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "fb61ab44eb1bbc2699d714fc00e33af2a19411ce",
+    Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "a72ee6d91275aa6524e84a9b7070103411ef2689",
   "v8/test/test262/harness":
-    Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8",
+    Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
   "v8/tools/clang":
-    Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "75350a858c51ad69e2aae051a8727534542da29f",
+    Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "9913fb19b687b0c858f697efd7bd2468d789a3d5",
+  "v8/test/wasm-js":
+    Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "b8b919e4a0d52db4d3d762e731e615bc3a38b3b2",
 }
 
 deps_os = {
   "android": {
     "v8/third_party/android_tools":
-      Var("chromium_url") + "/android_tools.git" + "@" + "25d57ead05d3dfef26e9c19b13ed10b0a69829cf",
+      Var("chromium_url") + "/android_tools.git" + "@" + "b43a6a289a7588b1769814f04dd6c7d7176974cc",
     "v8/third_party/catapult":
-      Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "6962f5c0344a79b152bf84460a93e1b2e11ea0f4",
+      Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "246a39a82c2213d913a96fff020a263838dc76e6",
   },
   "win": {
     "v8/third_party/cygwin":
@@ -263,7 +264,7 @@
     # Update the Windows toolchain if necessary.
     'name': 'win_toolchain',
     'pattern': '.',
-    'action': ['python', 'v8/gypfiles/vs_toolchain.py', 'update'],
+    'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
   },
   # Pull binutils for linux, enabled debug fission for faster linking /
   # debugging when used with clang on Ubuntu Precise.
diff --git a/Makefile b/Makefile
index 6eeac09..299d4aa 100644
--- a/Makefile
+++ b/Makefile
@@ -51,6 +51,10 @@
 ifeq ($(objectprint), on)
   GYPFLAGS += -Dv8_object_print=1
 endif
+# verifycsa=on
+ifeq ($(verifycsa), on)
+  GYPFLAGS += -Dv8_enable_verify_csa=1
+endif
 # verifyheap=on
 ifeq ($(verifyheap), on)
   GYPFLAGS += -Dv8_enable_verify_heap=1
diff --git a/OWNERS b/OWNERS
index 028f4ff..e375fa6 100644
--- a/OWNERS
+++ b/OWNERS
@@ -5,14 +5,19 @@
 bmeurer@chromium.org
 bradnelson@chromium.org
 cbruni@chromium.org
+clemensh@chromium.org
 danno@chromium.org
 epertoso@chromium.org
+franzih@chromium.org
+gsathya@chromium.org
 hablich@chromium.org
 hpayer@chromium.org
 ishell@chromium.org
 jarin@chromium.org
+jgruber@chromium.org
 jkummerow@chromium.org
 jochen@chromium.org
+leszeks@chromium.org
 littledan@chromium.org
 machenbach@chromium.org
 marja@chromium.org
@@ -21,9 +26,11 @@
 mtrofin@chromium.org
 mvstanton@chromium.org
 mythria@chromium.org
+petermarshall@chromium.org
 neis@chromium.org
 rmcilroy@chromium.org
 rossberg@chromium.org
+tebbi@chromium.org
 titzer@chromium.org
 ulan@chromium.org
 verwaest@chromium.org
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index ad21833..4cacf81 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -67,19 +67,22 @@
         input_api.PresubmitLocalPath(), 'tools'))
   from presubmit import CppLintProcessor
   from presubmit import SourceProcessor
-  from presubmit import CheckAuthorizedAuthor
-  from presubmit import CheckStatusFiles
+  from presubmit import StatusFilesProcessor
 
   results = []
-  if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
+  if not CppLintProcessor().RunOnFiles(
+      input_api.AffectedFiles(include_deletes=False)):
     results.append(output_api.PresubmitError("C++ lint check failed"))
-  if not SourceProcessor().Run(input_api.PresubmitLocalPath()):
+  if not SourceProcessor().RunOnFiles(
+      input_api.AffectedFiles(include_deletes=False)):
     results.append(output_api.PresubmitError(
         "Copyright header, trailing whitespaces and two empty lines " \
         "between declarations check failed"))
-  if not CheckStatusFiles(input_api.PresubmitLocalPath()):
+  if not StatusFilesProcessor().RunOnFiles(
+      input_api.AffectedFiles(include_deletes=True)):
     results.append(output_api.PresubmitError("Status file check failed"))
-  results.extend(CheckAuthorizedAuthor(input_api, output_api))
+  results.extend(input_api.canned_checks.CheckAuthorizedAuthor(
+      input_api, output_api))
   return results
 
 
diff --git a/V8_MERGE_REVISION b/V8_MERGE_REVISION
index 3a30d8d..3d47ee3 100644
--- a/V8_MERGE_REVISION
+++ b/V8_MERGE_REVISION
@@ -1,2 +1,2 @@
-v8 5.6.326.50
-https://chromium.googlesource.com/v8/v8/+/5.6.326.50
+v8 5.8.283.32
+https://chromium.googlesource.com/v8/v8/+/5.8.283.32
\ No newline at end of file
diff --git a/base/trace_event/common/trace_event_common.h b/base/trace_event/common/trace_event_common.h
index 0db9269..e87665b 100644
--- a/base/trace_event/common/trace_event_common.h
+++ b/base/trace_event/common/trace_event_common.h
@@ -223,49 +223,6 @@
                                             flow_flags, arg1_name, arg1_val, \
                                             arg2_name, arg2_val)
 
-// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
-// included in official builds.
-
-#if OFFICIAL_BUILD
-#undef TRACING_IS_OFFICIAL_BUILD
-#define TRACING_IS_OFFICIAL_BUILD 1
-#elif !defined(TRACING_IS_OFFICIAL_BUILD)
-#define TRACING_IS_OFFICIAL_BUILD 0
-#endif
-
-#if TRACING_IS_OFFICIAL_BUILD
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
-  (void)0
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
-                               arg2_name, arg2_val)                       \
-  (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
-                                       arg1_val)                               \
-  (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
-                                       arg1_val, arg2_name, arg2_val)          \
-  (void)0
-#else
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
-  TRACE_EVENT0(category_group, name)
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
-  TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
-                               arg2_name, arg2_val)                       \
-  TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
-  TRACE_EVENT_INSTANT0(category_group, name, scope)
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
-                                       arg1_val)                               \
-  TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
-                                       arg1_val, arg2_name, arg2_val)          \
-  TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val,       \
-                       arg2_name, arg2_val)
-#endif
-
 // Records a single event called "name" immediately, with 0, 1 or 2
 // associated arguments. If the category is not enabled, then this
 // does nothing.
@@ -301,16 +258,6 @@
       TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp,        \
       TRACE_EVENT_FLAG_NONE | scope)
 
-// Syntactic sugars for the sampling tracing in the main thread.
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
-  TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_GET_SAMPLING_STATE() \
-  TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
-#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
-  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(category_and_name) \
-  TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, category_and_name)
-
 // Records a single BEGIN event called "name" immediately, with 0, 1 or 2
 // associated arguments. If the category is not enabled, then this
 // does nothing.
@@ -1006,15 +953,15 @@
   INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context)
 
 // Macro to specify that two trace IDs are identical. For example,
-// TRACE_BIND_IDS(
+// TRACE_LINK_IDS(
 //     "category", "name",
 //     TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
 //     TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
 // tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
 // the current process have the same ID as events with ID
 // ("blink::ResourceFetcher::FetchRequest", 0x2000).
-#define TRACE_BIND_IDS(category_group, name, id, bind_id) \
-  INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id);
+#define TRACE_LINK_IDS(category_group, name, id, linked_id) \
+  INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id, linked_id);
 
 // Macro to efficiently determine if a given category group is enabled.
 #define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret)             \
@@ -1081,7 +1028,7 @@
 #define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
 #define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
 #define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
-#define TRACE_EVENT_PHASE_BIND_IDS ('=')
+#define TRACE_EVENT_PHASE_LINK_IDS ('=')
 
 // Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
 #define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
diff --git a/build_overrides/build.gni b/build_overrides/build.gni
index 6b8a4ff..8dcaf3a 100644
--- a/build_overrides/build.gni
+++ b/build_overrides/build.gni
@@ -24,3 +24,9 @@
 asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc"
 lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc"
 tsan_suppressions_file = "//build/sanitizers/tsan_suppressions.cc"
+
+# Skip assertions about 4GiB file size limit.
+ignore_elf32_limitations = true
+
+# Use the system install of Xcode for tools like ibtool, libtool, etc.
+use_system_xcode = true
diff --git a/build_overrides/v8.gni b/build_overrides/v8.gni
deleted file mode 100644
index df8320d..0000000
--- a/build_overrides/v8.gni
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright 2015 The V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/config/features.gni")
-import("//build/config/ui.gni")
-import("//build/config/v8_target_cpu.gni")
-import("//gni/v8.gni")
-
-if (is_android) {
-  import("//build/config/android/config.gni")
-}
-
-if (((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
-      v8_current_cpu == "x87") && (is_linux || is_mac)) ||
-    (v8_current_cpu == "ppc64" && is_linux)) {
-  v8_enable_gdbjit_default = true
-}
-
-v8_imminent_deprecation_warnings_default = true
-
-# Add simple extras solely for the purpose of the cctests.
-v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
-v8_experimental_extra_library_files =
-    [ "//test/cctest/test-experimental-extra.js" ]
-
-declare_args() {
-  # Enable inspector. See include/v8-inspector.h.
-  v8_enable_inspector = true
-}
-
-v8_enable_inspector_override = v8_enable_inspector
diff --git a/genmakefiles.py b/genmakefiles.py
index 10fa752..361e0f7 100644
--- a/genmakefiles.py
+++ b/genmakefiles.py
@@ -274,32 +274,29 @@
     out.write('LOCAL_C_INCLUDES := $(LOCAL_PATH)/src\n')
     out.write('include $(BUILD_HOST_STATIC_LIBRARY)\n')
 
+def GenerateMakefiles():
+  # Slurp in the content of the V8 gyp file.
+  with open(os.path.join(os.getcwd(), './src/v8.gyp'), 'r') as f:
+    gyp = eval(f.read())
 
-# Slurp in the content of the V8 gyp file.
-with open(os.path.join(os.getcwd(), './src/v8.gyp'), 'r') as f:
-  gyp = eval(f.read())
+  # Find the targets that we're interested in and write out the makefiles.
+  for target in gyp['targets']:
+    name = target['target_name']
+    sources = None
+    if target.get('sources'):
+      sources = [x for x in target['sources'] if x.endswith('.cc')]
+      sources.sort()
 
-# Find the targets that we're interested in and write out the makefiles.
-for target in gyp['targets']:
-  name = target['target_name']
-  sources = None
-  if target.get('sources'):
-    sources = [x for x in target['sources'] if x.endswith('.cc')]
-    sources.sort()
-
-  if name == 'v8_libplatform':
-    _writeMakefile('Android.platform.mk', 'libv8platform', sources)
-  elif name == 'v8_libsampler':
-    _writeMakefile('Android.sampler.mk', 'libv8sampler', sources)
-  elif name == 'v8_base':
-    _writeV8SrcMakefile(target)
-  elif name == 'mkpeephole':
-    _writeMkpeepholeMakefile(target)
-  elif name == 'js2c':
-    _writeGeneratedFilesMakfile(target)
-  elif name == 'v8_libbase':
-    _writeLibBaseMakefile(target)
-
-
-
+    if name == 'v8_libplatform':
+      _writeMakefile('Android.platform.mk', 'libv8platform', sources)
+    elif name == 'v8_libsampler':
+      _writeMakefile('Android.sampler.mk', 'libv8sampler', sources)
+    elif name == 'v8_base':
+      _writeV8SrcMakefile(target)
+    elif name == 'mkpeephole':
+      _writeMkpeepholeMakefile(target)
+    elif name == 'js2c':
+      _writeGeneratedFilesMakfile(target)
+    elif name == 'v8_libbase':
+      _writeLibBaseMakefile(target)
 
diff --git a/gni/isolate.gni b/gni/isolate.gni
index 1cc3a38..a347eea 100644
--- a/gni/isolate.gni
+++ b/gni/isolate.gni
@@ -3,7 +3,6 @@
 # found in the LICENSE file.
 
 import("//build/config/sanitizers/sanitizers.gni")
-import("//build_overrides/v8.gni")
 import("//third_party/icu/config.gni")
 import("v8.gni")
 
@@ -62,6 +61,11 @@
       } else {
         asan = "0"
       }
+      if (is_lsan) {
+        lsan = "1"
+      } else {
+        lsan = "0"
+      }
       if (is_msan) {
         msan = "1"
       } else {
@@ -97,7 +101,7 @@
       } else {
         icu_use_data_file_flag = "0"
       }
-      if (v8_enable_inspector_override) {
+      if (v8_enable_inspector) {
         enable_inspector = "1"
       } else {
         enable_inspector = "0"
@@ -159,6 +163,8 @@
         "--config-variable",
         "is_gn=1",
         "--config-variable",
+        "lsan=$lsan",
+        "--config-variable",
         "msan=$msan",
         "--config-variable",
         "tsan=$tsan",
@@ -181,7 +187,7 @@
       if (is_win) {
         args += [
           "--config-variable",
-          "msvs_version=2013",
+          "msvs_version=2015",
         ]
       } else {
         args += [
diff --git a/gni/v8.gni b/gni/v8.gni
index 3759572..ea628e0 100644
--- a/gni/v8.gni
+++ b/gni/v8.gni
@@ -6,6 +6,12 @@
 import("//build/config/v8_target_cpu.gni")
 
 declare_args() {
+  # Includes files needed for correctness fuzzing.
+  v8_correctness_fuzzer = false
+
+  # Adds additional compile target for building multiple architectures at once.
+  v8_multi_arch_build = false
+
   # Indicate if valgrind was fetched as a custom deps to make it available on
   # swarming.
   v8_has_valgrind = false
@@ -30,6 +36,12 @@
   # Enable ECMAScript Internationalization API. Enabling this feature will
   # add a dependency on the ICU library.
   v8_enable_i18n_support = true
+
+  # Enable inspector. See include/v8-inspector.h.
+  v8_enable_inspector = true
+
+  # Use static libraries instead of source_sets.
+  v8_static_library = false
 }
 
 if (v8_use_external_startup_data == "") {
@@ -83,6 +95,24 @@
 
 # All templates should be kept in sync.
 template("v8_source_set") {
+  if (defined(v8_static_library) && v8_static_library) {
+    static_library(target_name) {
+      forward_variables_from(invoker, "*", [ "configs" ])
+      configs += invoker.configs
+      configs -= v8_remove_configs
+      configs += v8_add_configs
+    }
+  } else {
+    source_set(target_name) {
+      forward_variables_from(invoker, "*", [ "configs" ])
+      configs += invoker.configs
+      configs -= v8_remove_configs
+      configs += v8_add_configs
+    }
+  }
+}
+
+template("v8_header_set") {
   source_set(target_name) {
     forward_variables_from(invoker, "*", [ "configs" ])
     configs += invoker.configs
diff --git a/gypfiles/all.gyp b/gypfiles/all.gyp
index a3f2eed..d3e275e 100644
--- a/gypfiles/all.gyp
+++ b/gypfiles/all.gyp
@@ -27,10 +27,14 @@
         }],
         ['v8_enable_inspector==1', {
           'dependencies': [
-            '../test/debugger/debugger.gyp:*',
             '../test/inspector/inspector.gyp:*',
           ],
         }],
+        ['v8_enable_inspector==1 and test_isolation_mode != "noop"', {
+          'dependencies': [
+            '../test/debugger/debugger.gyp:*',
+          ],
+        }],
         ['test_isolation_mode != "noop"', {
           'dependencies': [
             '../test/bot_default.gyp:*',
@@ -43,7 +47,6 @@
             '../test/optimize_for_size.gyp:*',
             '../test/perf.gyp:*',
             '../test/preparser/preparser.gyp:*',
-            '../test/simdjs/simdjs.gyp:*',
             '../test/test262/test262.gyp:*',
             '../test/webkit/webkit.gyp:*',
             '../tools/check-static-initializers.gyp:*',
diff --git a/gypfiles/features.gypi b/gypfiles/features.gypi
index 5a21a63..0c4873c 100644
--- a/gypfiles/features.gypi
+++ b/gypfiles/features.gypi
@@ -33,6 +33,8 @@
 
     'v8_enable_gdbjit%': 0,
 
+    'v8_enable_verify_csa%': 0,
+
     'v8_object_print%': 0,
 
     'v8_enable_verify_heap%': 0,
@@ -78,6 +80,9 @@
       ['v8_enable_gdbjit==1', {
         'defines': ['ENABLE_GDB_JIT_INTERFACE',],
       }],
+      ['v8_enable_verify_csa==1', {
+        'defines': ['ENABLE_VERIFY_CSA',],
+      }],
       ['v8_object_print==1', {
         'defines': ['OBJECT_PRINT',],
       }],
diff --git a/gypfiles/get_landmines.py b/gypfiles/get_landmines.py
index e6b6da6..6137648 100755
--- a/gypfiles/get_landmines.py
+++ b/gypfiles/get_landmines.py
@@ -31,6 +31,7 @@
   print 'Clober to fix windows build problems.'
   print 'Clober again to fix windows build problems.'
   print 'Clobber to possibly resolve failure on win-32 bot.'
+  print 'Clobber for http://crbug.com/668958.'
   return 0
 
 
diff --git a/gypfiles/isolate.gypi b/gypfiles/isolate.gypi
index 8f53a15..c55f3ca 100644
--- a/gypfiles/isolate.gypi
+++ b/gypfiles/isolate.gypi
@@ -75,6 +75,7 @@
         '--config-variable', 'has_valgrind=<(has_valgrind)',
         '--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)',
         '--config-variable', 'is_gn=0',
+        '--config-variable', 'lsan=<(lsan)',
         '--config-variable', 'msan=<(msan)',
         '--config-variable', 'tsan=<(tsan)',
         '--config-variable', 'coverage=<(coverage)',
diff --git a/gypfiles/toolchain.gypi b/gypfiles/toolchain.gypi
index 95eb1d9..88afb86 100644
--- a/gypfiles/toolchain.gypi
+++ b/gypfiles/toolchain.gypi
@@ -315,6 +315,8 @@
             'defines': [
               'V8_TARGET_ARCH_S390_LE_SIM',
             ],
+          }, {
+            'cflags': [ '-march=z196' ],
           }],
           ],
       }],  # s390
@@ -989,8 +991,6 @@
         #       present in VS 2003 and earlier.
         'msvs_disabled_warnings': [4351],
         'msvs_configuration_attributes': {
-          'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
-          'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
           'CharacterSet': '1',
         },
       }],
diff --git a/gypfiles/win/msvs_dependencies.isolate b/gypfiles/win/msvs_dependencies.isolate
new file mode 100644
index 0000000..79ae11a
--- /dev/null
+++ b/gypfiles/win/msvs_dependencies.isolate
@@ -0,0 +1,97 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# TODO(machenbach): Remove this when crbug.com/669910 is resolved.
+{
+  'conditions': [
+    # Copy the VS runtime DLLs into the isolate so that they
+    # don't have to be preinstalled on the target machine.
+    #
+    # VS2013 runtimes
+    ['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/msvcp120d.dll',
+          '<(PRODUCT_DIR)/msvcr120d.dll',
+        ],
+      },
+    }],
+    ['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/msvcp120.dll',
+          '<(PRODUCT_DIR)/msvcr120.dll',
+        ],
+      },
+    }],
+    # VS2015 runtimes
+    ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/msvcp140d.dll',
+          '<(PRODUCT_DIR)/vccorlib140d.dll',
+          '<(PRODUCT_DIR)/vcruntime140d.dll',
+          '<(PRODUCT_DIR)/ucrtbased.dll',
+        ],
+      },
+    }],
+    ['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/msvcp140.dll',
+          '<(PRODUCT_DIR)/vccorlib140.dll',
+          '<(PRODUCT_DIR)/vcruntime140.dll',
+          '<(PRODUCT_DIR)/ucrtbase.dll',
+        ],
+      },
+    }],
+    ['OS=="win" and msvs_version==2015 and component=="shared_library"', {
+      # Windows 10 Universal C Runtime binaries.
+      'variables': {
+        'files': [
+          '<(PRODUCT_DIR)/api-ms-win-core-console-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-datetime-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-debug-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-errorhandling-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-file-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-file-l1-2-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-file-l2-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-handle-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-heap-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-interlocked-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-libraryloader-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-localization-l1-2-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-memory-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-namedpipe-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-processenvironment-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-processthreads-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-processthreads-l1-1-1.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-profile-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-rtlsupport-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-string-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-synch-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-synch-l1-2-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-sysinfo-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-timezone-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-core-util-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-conio-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-convert-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-environment-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-filesystem-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-heap-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-locale-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-math-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-multibyte-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-private-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-process-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-runtime-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-stdio-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-string-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-time-l1-1-0.dll',
+          '<(PRODUCT_DIR)/api-ms-win-crt-utility-l1-1-0.dll',
+        ],
+      },
+    }],
+  ],
+}
diff --git a/include/libplatform/libplatform.h b/include/libplatform/libplatform.h
index 40f3f66..cab467f 100644
--- a/include/libplatform/libplatform.h
+++ b/include/libplatform/libplatform.h
@@ -35,6 +35,17 @@
                                         v8::Isolate* isolate);
 
 /**
+ * Runs pending idle tasks for at most |idle_time_in_seconds| seconds.
+ *
+ * The caller has to make sure that this is called from the right thread.
+ * This call does not block if no task is pending. The |platform| has to be
+ * created using |CreateDefaultPlatform|.
+ */
+V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
+                                     v8::Isolate* isolate,
+                                     double idle_time_in_seconds);
+
+/**
  * Attempts to set the tracing controller for the given platform.
  *
  * The |platform| has to be created using |CreateDefaultPlatform|.
diff --git a/include/v8-debug.h b/include/v8-debug.h
index 6385a31..797d233 100644
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -16,11 +16,9 @@
 enum DebugEvent {
   Break = 1,
   Exception = 2,
-  NewFunction = 3,
-  BeforeCompile = 4,
-  AfterCompile = 5,
-  CompileError = 6,
-  AsyncTaskEvent = 7,
+  AfterCompile = 3,
+  CompileError = 4,
+  AsyncTaskEvent = 5,
 };
 
 class V8_EXPORT Debug {
@@ -87,7 +85,6 @@
     virtual ~Message() {}
   };
 
-
   /**
    * An event details object passed to the debug event listener.
    */
@@ -119,9 +116,7 @@
     virtual Local<Value> GetCallbackData() const = 0;
 
     /**
-     * Client data passed to DebugBreakForCommand function. The
-     * debugger takes ownership of the data and will delete it even if
-     * there is no message handler.
+     * This is now a dummy that returns nullptr.
      */
     virtual ClientData* GetClientData() const = 0;
 
@@ -135,23 +130,18 @@
    *
    * \param event_details object providing information about the debug event
    *
-   * A EventCallback2 does not take possession of the event data,
+   * A EventCallback does not take possession of the event data,
    * and must not rely on the data persisting after the handler returns.
    */
   typedef void (*EventCallback)(const EventDetails& event_details);
 
   /**
-   * Debug message callback function.
-   *
-   * \param message the debug message handler message object
-   *
-   * A MessageHandler2 does not take possession of the message data,
-   * and must not rely on the data persisting after the handler returns.
+   * This is now a no-op.
    */
   typedef void (*MessageHandler)(const Message& message);
 
   /**
-   * Callback function for the host to ensure debug messages are processed.
+   * This is now a no-op.
    */
   typedef void (*DebugMessageDispatchHandler)();
 
@@ -167,76 +157,46 @@
   static void CancelDebugBreak(Isolate* isolate);
 
   // Check if a debugger break is scheduled in the given isolate.
-  static bool CheckDebugBreak(Isolate* isolate);
+  V8_DEPRECATED("No longer supported",
+                static bool CheckDebugBreak(Isolate* isolate));
 
-  // Message based interface. The message protocol is JSON.
-  static void SetMessageHandler(Isolate* isolate, MessageHandler handler);
+  // This is now a no-op.
+  V8_DEPRECATED("No longer supported",
+                static void SetMessageHandler(Isolate* isolate,
+                                              MessageHandler handler));
 
-  static void SendCommand(Isolate* isolate,
-                          const uint16_t* command, int length,
-                          ClientData* client_data = NULL);
+  // This is now a no-op.
+  V8_DEPRECATED("No longer supported",
+                static void SendCommand(Isolate* isolate,
+                                        const uint16_t* command, int length,
+                                        ClientData* client_data = NULL));
 
- /**
-  * Run a JavaScript function in the debugger.
-  * \param fun the function to call
-  * \param data passed as second argument to the function
-  * With this call the debugger is entered and the function specified is called
-  * with the execution state as the first argument. This makes it possible to
-  * get access to information otherwise not available during normal JavaScript
-  * execution e.g. details on stack frames. Receiver of the function call will
-  * be the debugger context global object, however this is a subject to change.
-  * The following example shows a JavaScript function which when passed to
-  * v8::Debug::Call will return the current line of JavaScript execution.
-  *
-  * \code
-  *   function frame_source_line(exec_state) {
-  *     return exec_state.frame(0).sourceLine();
-  *   }
-  * \endcode
-  */
+  /**
+   * Run a JavaScript function in the debugger.
+   * \param fun the function to call
+   * \param data passed as second argument to the function
+   * With this call the debugger is entered and the function specified is called
+   * with the execution state as the first argument. This makes it possible to
+   * get access to information otherwise not available during normal JavaScript
+   * execution e.g. details on stack frames. Receiver of the function call will
+   * be the debugger context global object, however this is a subject to change.
+   * The following example shows a JavaScript function which when passed to
+   * v8::Debug::Call will return the current line of JavaScript execution.
+   *
+   * \code
+   *   function frame_source_line(exec_state) {
+   *     return exec_state.frame(0).sourceLine();
+   *   }
+   * \endcode
+   */
   // TODO(dcarney): data arg should be a MaybeLocal
   static MaybeLocal<Value> Call(Local<Context> context,
                                 v8::Local<v8::Function> fun,
                                 Local<Value> data = Local<Value>());
 
-  /**
-   * Returns a mirror object for the given object.
-   */
-  static MaybeLocal<Value> GetMirror(Local<Context> context,
-                                     v8::Local<v8::Value> obj);
-
-  /**
-   * Makes V8 process all pending debug messages.
-   *
-   * From V8 point of view all debug messages come asynchronously (e.g. from
-   * remote debugger) but they all must be handled synchronously: V8 cannot
-   * do 2 things at one time so normal script execution must be interrupted
-   * for a while.
-   *
-   * Generally when message arrives V8 may be in one of 3 states:
-   * 1. V8 is running script; V8 will automatically interrupt and process all
-   * pending messages;
-   * 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
-   * to reading and processing debug messages;
-   * 3. V8 is not running at all or has called some long-working C++ function;
-   * by default it means that processing of all debug messages will be deferred
-   * until V8 gets control again; however, embedding application may improve
-   * this by manually calling this method.
-   *
-   * Technically this method in many senses is equivalent to executing empty
-   * script:
-   * 1. It does nothing except for processing all pending debug messages.
-   * 2. It should be invoked with the same precautions and from the same context
-   * as V8 script would be invoked from, because:
-   *   a. with "evaluate" command it can do whatever normal script can do,
-   *   including all native calls;
-   *   b. no other thread should call V8 while this method is running
-   *   (v8::Locker may be used here).
-   *
-   * "Evaluate" debug command behavior currently is not specified in scope
-   * of this method.
-   */
-  static void ProcessDebugMessages(Isolate* isolate);
+  // This is now a no-op.
+  V8_DEPRECATED("No longer supported",
+                static void ProcessDebugMessages(Isolate* isolate));
 
   /**
    * Debugger is running in its own context which is entered while debugger
@@ -245,13 +205,16 @@
    * to change. The Context exists only when the debugger is active, i.e. at
    * least one DebugEventListener or MessageHandler is set.
    */
-  static Local<Context> GetDebugContext(Isolate* isolate);
+  V8_DEPRECATED("Use v8-inspector",
+                static Local<Context> GetDebugContext(Isolate* isolate));
 
   /**
    * While in the debug context, this method returns the top-most non-debug
    * context, if it exists.
    */
-  static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate);
+  V8_DEPRECATED(
+      "No longer supported",
+      static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate));
 
   /**
    * Enable/disable LiveEdit functionality for the given Isolate
diff --git a/include/v8-inspector.h b/include/v8-inspector.h
index 0855ac1..c7209ed 100644
--- a/include/v8-inspector.h
+++ b/include/v8-inspector.h
@@ -85,6 +85,8 @@
   StringView auxData;
   bool hasMemoryOnConsole;
 
+  static int executionContextId(v8::Local<v8::Context> context);
+
  private:
   // Disallow copying and allocating this one.
   enum NotNullTagEnum { NotNullLiteral };
@@ -156,8 +158,6 @@
   virtual void releaseObjectGroup(const StringView&) = 0;
 };
 
-enum class V8ConsoleAPIType { kClear, kDebug, kLog, kInfo, kWarning, kError };
-
 class V8_EXPORT V8InspectorClient {
  public:
   virtual ~V8InspectorClient() {}
@@ -189,7 +189,8 @@
 
   virtual void installAdditionalCommandLineAPI(v8::Local<v8::Context>,
                                                v8::Local<v8::Object>) {}
-  virtual void consoleAPIMessage(int contextGroupId, V8ConsoleAPIType,
+  virtual void consoleAPIMessage(int contextGroupId,
+                                 v8::Isolate::MessageErrorLevel level,
                                  const StringView& message,
                                  const StringView& url, unsigned lineNumber,
                                  unsigned columnNumber, V8StackTrace*) {}
@@ -201,6 +202,7 @@
   virtual void consoleTime(const StringView& title) {}
   virtual void consoleTimeEnd(const StringView& title) {}
   virtual void consoleTimeStamp(const StringView& title) {}
+  virtual void consoleClear(int contextGroupId) {}
   virtual double currentTimeMS() { return 0; }
   typedef void (*TimerCallback)(void*);
   virtual void startRepeatingTimer(double, TimerCallback, void* data) {}
@@ -248,9 +250,9 @@
   class V8_EXPORT Channel {
    public:
     virtual ~Channel() {}
-    virtual void sendProtocolResponse(int callId,
-                                      const StringView& message) = 0;
-    virtual void sendProtocolNotification(const StringView& message) = 0;
+    virtual void sendResponse(int callId,
+                              std::unique_ptr<StringBuffer> message) = 0;
+    virtual void sendNotification(std::unique_ptr<StringBuffer> message) = 0;
     virtual void flushProtocolNotifications() = 0;
   };
   virtual std::unique_ptr<V8InspectorSession> connect(
diff --git a/include/v8-profiler.h b/include/v8-profiler.h
index 74c0613..f7d182f 100644
--- a/include/v8-profiler.h
+++ b/include/v8-profiler.h
@@ -5,6 +5,7 @@
 #ifndef V8_V8_PROFILER_H_
 #define V8_V8_PROFILER_H_
 
+#include <unordered_set>
 #include <vector>
 #include "v8.h"  // NOLINT(build/include)
 
@@ -392,8 +393,7 @@
                          // snapshot items together.
     kConsString = 10,    // Concatenated string. A pair of pointers to strings.
     kSlicedString = 11,  // Sliced string. A fragment of another string.
-    kSymbol = 12,        // A Symbol (ES6).
-    kSimdValue = 13      // A SIMD value stored in the heap (Proposed ES7).
+    kSymbol = 12         // A Symbol (ES6).
   };
 
   /** Returns node type (see HeapGraphNode::Type). */
@@ -630,6 +630,24 @@
     kSamplingForceGC = 1 << 0,
   };
 
+  typedef std::unordered_set<const v8::PersistentBase<v8::Value>*>
+      RetainerChildren;
+  typedef std::vector<std::pair<v8::RetainedObjectInfo*, RetainerChildren>>
+      RetainerGroups;
+  typedef std::vector<std::pair<const v8::PersistentBase<v8::Value>*,
+                                const v8::PersistentBase<v8::Value>*>>
+      RetainerEdges;
+
+  struct RetainerInfos {
+    RetainerGroups groups;
+    RetainerEdges edges;
+  };
+
+  /**
+   * Callback function invoked to retrieve all RetainerInfos from the embedder.
+   */
+  typedef RetainerInfos (*GetRetainerInfosCallback)(v8::Isolate* isolate);
+
   /**
    * Callback function invoked for obtaining RetainedObjectInfo for
    * the given JavaScript wrapper object. It is prohibited to enter V8
@@ -782,6 +800,8 @@
       uint16_t class_id,
       WrapperInfoCallback callback);
 
+  void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback);
+
   /**
    * Default value of persistent handle class ID. Must not be used to
    * define a class. Can be used to reset a class of a persistent
diff --git a/include/v8-util.h b/include/v8-util.h
index 8133fdd..a04a5e8 100644
--- a/include/v8-util.h
+++ b/include/v8-util.h
@@ -6,6 +6,7 @@
 #define V8_UTIL_H_
 
 #include "v8.h"  // NOLINT(build/include)
+#include <assert.h>
 #include <map>
 #include <vector>
 
@@ -210,7 +211,7 @@
    * key.
    */
   void RegisterExternallyReferencedObject(K& key) {
-    DCHECK(Contains(key));
+    assert(Contains(key));
     V8::RegisterExternallyReferencedObject(
         reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))),
         reinterpret_cast<internal::Isolate*>(GetIsolate()));
diff --git a/include/v8-version-string.h b/include/v8-version-string.h
new file mode 100644
index 0000000..075282d
--- /dev/null
+++ b/include/v8-version-string.h
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_VERSION_STRING_H_
+#define V8_VERSION_STRING_H_
+
+#include "v8-version.h"  // NOLINT(build/include)
+
+// This is here rather than v8-version.h to keep that file simple and
+// machine-processable.
+
+#if V8_IS_CANDIDATE_VERSION
+#define V8_CANDIDATE_STRING " (candidate)"
+#else
+#define V8_CANDIDATE_STRING ""
+#endif
+
+#define V8_SX(x) #x
+#define V8_S(x) V8_SX(x)
+
+#if V8_PATCH_LEVEL > 0
+#define V8_VERSION_STRING                                        \
+  V8_S(V8_MAJOR_VERSION)                                         \
+  "." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) "." V8_S( \
+      V8_PATCH_LEVEL) V8_CANDIDATE_STRING
+#else
+#define V8_VERSION_STRING \
+  V8_S(V8_MAJOR_VERSION)  \
+  "." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) V8_CANDIDATE_STRING
+#endif
+
+#endif  // V8_VERSION_STRING_H_
diff --git a/include/v8-version.h b/include/v8-version.h
index eda966b..3bd8e04 100644
--- a/include/v8-version.h
+++ b/include/v8-version.h
@@ -9,9 +9,9 @@
 // NOTE these macros are used by some of the tool scripts and the build
 // system so their names cannot be changed without changing the scripts.
 #define V8_MAJOR_VERSION 5
-#define V8_MINOR_VERSION 6
-#define V8_BUILD_NUMBER 326
-#define V8_PATCH_LEVEL 50
+#define V8_MINOR_VERSION 8
+#define V8_BUILD_NUMBER 283
+#define V8_PATCH_LEVEL 32
 
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/include/v8.h b/include/v8.h
index 5348ba7..baf4417 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -666,7 +666,7 @@
 /**
  * A PersistentBase which allows copy and assignment.
  *
- * Copy, assignment and destructor bevavior is controlled by the traits
+ * Copy, assignment and destructor behavior is controlled by the traits
  * class M.
  *
  * Note: Persistent class hierarchy is subject to future changes.
@@ -867,8 +867,8 @@
 
   HandleScope(const HandleScope&) = delete;
   void operator=(const HandleScope&) = delete;
-  void* operator new(size_t size) = delete;
-  void operator delete(void*, size_t) = delete;
+  void* operator new(size_t size);
+  void operator delete(void*, size_t);
 
  protected:
   V8_INLINE HandleScope() {}
@@ -919,8 +919,8 @@
 
   EscapableHandleScope(const EscapableHandleScope&) = delete;
   void operator=(const EscapableHandleScope&) = delete;
-  void* operator new(size_t size) = delete;
-  void operator delete(void*, size_t) = delete;
+  void* operator new(size_t size);
+  void operator delete(void*, size_t);
 
  private:
   internal::Object** Escape(internal::Object** escape_value);
@@ -934,8 +934,8 @@
 
   SealHandleScope(const SealHandleScope&) = delete;
   void operator=(const SealHandleScope&) = delete;
-  void* operator new(size_t size) = delete;
-  void operator delete(void*, size_t) = delete;
+  void* operator new(size_t size);
+  void operator delete(void*, size_t);
 
  private:
   internal::Isolate* const isolate_;
@@ -961,29 +961,31 @@
  */
 class ScriptOriginOptions {
  public:
-  V8_INLINE ScriptOriginOptions(bool is_embedder_debug_script = false,
-                                bool is_shared_cross_origin = false,
-                                bool is_opaque = false)
-      : flags_((is_embedder_debug_script ? kIsEmbedderDebugScript : 0) |
-               (is_shared_cross_origin ? kIsSharedCrossOrigin : 0) |
-               (is_opaque ? kIsOpaque : 0)) {}
+  V8_INLINE ScriptOriginOptions(bool is_shared_cross_origin = false,
+                                bool is_opaque = false, bool is_wasm = false,
+                                bool is_module = false)
+      : flags_((is_shared_cross_origin ? kIsSharedCrossOrigin : 0) |
+               (is_wasm ? kIsWasm : 0) | (is_opaque ? kIsOpaque : 0) |
+               (is_module ? kIsModule : 0)) {}
   V8_INLINE ScriptOriginOptions(int flags)
       : flags_(flags &
-               (kIsEmbedderDebugScript | kIsSharedCrossOrigin | kIsOpaque)) {}
-  bool IsEmbedderDebugScript() const {
-    return (flags_ & kIsEmbedderDebugScript) != 0;
-  }
+               (kIsSharedCrossOrigin | kIsOpaque | kIsWasm | kIsModule)) {}
+
   bool IsSharedCrossOrigin() const {
     return (flags_ & kIsSharedCrossOrigin) != 0;
   }
   bool IsOpaque() const { return (flags_ & kIsOpaque) != 0; }
+  bool IsWasm() const { return (flags_ & kIsWasm) != 0; }
+  bool IsModule() const { return (flags_ & kIsModule) != 0; }
+
   int Flags() const { return flags_; }
 
  private:
   enum {
-    kIsEmbedderDebugScript = 1,
-    kIsSharedCrossOrigin = 1 << 1,
-    kIsOpaque = 1 << 2
+    kIsSharedCrossOrigin = 1,
+    kIsOpaque = 1 << 1,
+    kIsWasm = 1 << 2,
+    kIsModule = 1 << 3
   };
   const int flags_;
 };
@@ -999,9 +1001,11 @@
       Local<Integer> resource_column_offset = Local<Integer>(),
       Local<Boolean> resource_is_shared_cross_origin = Local<Boolean>(),
       Local<Integer> script_id = Local<Integer>(),
-      Local<Boolean> resource_is_embedder_debug_script = Local<Boolean>(),
       Local<Value> source_map_url = Local<Value>(),
-      Local<Boolean> resource_is_opaque = Local<Boolean>());
+      Local<Boolean> resource_is_opaque = Local<Boolean>(),
+      Local<Boolean> is_wasm = Local<Boolean>(),
+      Local<Boolean> is_module = Local<Boolean>());
+
   V8_INLINE Local<Value> ResourceName() const;
   V8_INLINE Local<Integer> ResourceLineOffset() const;
   V8_INLINE Local<Integer> ResourceColumnOffset() const;
@@ -1191,6 +1195,8 @@
     // alive.
     V8_INLINE const CachedData* GetCachedData() const;
 
+    V8_INLINE const ScriptOriginOptions& GetResourceOptions() const;
+
     // Prevent copying.
     Source(const Source&) = delete;
     Source& operator=(const Source&) = delete;
@@ -1433,7 +1439,7 @@
 
  private:
   static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
-      Isolate* isolate, Source* source, CompileOptions options, bool is_module);
+      Isolate* isolate, Source* source, CompileOptions options);
 };
 
 
@@ -1486,6 +1492,11 @@
   int GetEndPosition() const;
 
   /**
+   * Returns the error level of the message.
+   */
+  int ErrorLevel() const;
+
+  /**
    * Returns the index within the line of the first character where
    * the error occurred.
    */
@@ -1713,9 +1724,26 @@
     virtual Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object);
 
     /*
+     * Called when the ValueSerializer is going to serialize a
+     * SharedArrayBuffer object. The embedder must return an ID for the
+     * object, using the same ID if this SharedArrayBuffer has already been
+     * serialized in this buffer. When deserializing, this ID will be passed to
+     * ValueDeserializer::TransferSharedArrayBuffer as |transfer_id|.
+     *
+     * If the object cannot be serialized, an
+     * exception should be thrown and Nothing<uint32_t>() returned.
+     */
+    virtual Maybe<uint32_t> GetSharedArrayBufferId(
+        Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer);
+
+    /*
      * Allocates memory for the buffer of at least the size provided. The actual
      * size (which may be greater or equal) is written to |actual_size|. If no
      * buffer has been allocated yet, nullptr will be provided.
+     *
+     * If the memory cannot be allocated, nullptr should be returned.
+     * |actual_size| will be ignored. It is assumed that |old_buffer| is still
+     * valid in this case and has not been modified.
      */
     virtual void* ReallocateBufferMemory(void* old_buffer, size_t size,
                                          size_t* actual_size);
@@ -1757,7 +1785,7 @@
 
   /*
    * Marks an ArrayBuffer as havings its contents transferred out of band.
-   * Pass the corresponding JSArrayBuffer in the deserializing context to
+   * Pass the corresponding ArrayBuffer in the deserializing context to
    * ValueDeserializer::TransferArrayBuffer.
    */
   void TransferArrayBuffer(uint32_t transfer_id,
@@ -1766,8 +1794,19 @@
   /*
    * Similar to TransferArrayBuffer, but for SharedArrayBuffer.
    */
-  void TransferSharedArrayBuffer(uint32_t transfer_id,
-                                 Local<SharedArrayBuffer> shared_array_buffer);
+  V8_DEPRECATE_SOON("Use Delegate::GetSharedArrayBufferId",
+                    void TransferSharedArrayBuffer(
+                        uint32_t transfer_id,
+                        Local<SharedArrayBuffer> shared_array_buffer));
+
+  /*
+   * Indicate whether to treat ArrayBufferView objects as host objects,
+   * i.e. pass them to Delegate::WriteHostObject. This should not be
+   * called when no Delegate was passed.
+   *
+   * The default is not to treat ArrayBufferViews as host objects.
+   */
+  void SetTreatArrayBufferViewsAsHostObjects(bool mode);
 
   /*
    * Write raw data in various common formats to the buffer.
@@ -1834,9 +1873,10 @@
 
   /*
    * Similar to TransferArrayBuffer, but for SharedArrayBuffer.
-   * transfer_id exists in the same namespace as unshared ArrayBuffer objects.
+   * The id is not necessarily in the same namespace as unshared ArrayBuffer
+   * objects.
    */
-  void TransferSharedArrayBuffer(uint32_t transfer_id,
+  void TransferSharedArrayBuffer(uint32_t id,
                                  Local<SharedArrayBuffer> shared_array_buffer);
 
   /*
@@ -1908,9 +1948,16 @@
    */
   V8_INLINE bool IsNull() const;
 
-   /**
-   * Returns true if this value is true.
+  /**
+   * Returns true if this value is either the null or the undefined value.
+   * See ECMA-262
+   * 4.3.11. and 4.3.12
    */
+  V8_INLINE bool IsNullOrUndefined() const;
+
+  /**
+  * Returns true if this value is true.
+  */
   bool IsTrue() const;
 
   /**
@@ -1920,7 +1967,6 @@
 
   /**
    * Returns true if this value is a symbol or a string.
-   * This is an experimental feature.
    */
   bool IsName() const;
 
@@ -1932,7 +1978,6 @@
 
   /**
    * Returns true if this value is a symbol.
-   * This is an experimental feature.
    */
   bool IsSymbol() const;
 
@@ -2004,7 +2049,6 @@
 
   /**
    * Returns true if this value is a Symbol object.
-   * This is an experimental feature.
    */
   bool IsSymbolObject() const;
 
@@ -2025,19 +2069,16 @@
 
   /**
    * Returns true if this value is a Generator function.
-   * This is an experimental feature.
    */
   bool IsGeneratorFunction() const;
 
   /**
    * Returns true if this value is a Generator object (iterator).
-   * This is an experimental feature.
    */
   bool IsGeneratorObject() const;
 
   /**
    * Returns true if this value is a Promise.
-   * This is an experimental feature.
    */
   bool IsPromise() const;
 
@@ -2073,85 +2114,66 @@
 
   /**
    * Returns true if this value is an ArrayBuffer.
-   * This is an experimental feature.
    */
   bool IsArrayBuffer() const;
 
   /**
    * Returns true if this value is an ArrayBufferView.
-   * This is an experimental feature.
    */
   bool IsArrayBufferView() const;
 
   /**
    * Returns true if this value is one of TypedArrays.
-   * This is an experimental feature.
    */
   bool IsTypedArray() const;
 
   /**
    * Returns true if this value is an Uint8Array.
-   * This is an experimental feature.
    */
   bool IsUint8Array() const;
 
   /**
    * Returns true if this value is an Uint8ClampedArray.
-   * This is an experimental feature.
    */
   bool IsUint8ClampedArray() const;
 
   /**
    * Returns true if this value is an Int8Array.
-   * This is an experimental feature.
    */
   bool IsInt8Array() const;
 
   /**
    * Returns true if this value is an Uint16Array.
-   * This is an experimental feature.
    */
   bool IsUint16Array() const;
 
   /**
    * Returns true if this value is an Int16Array.
-   * This is an experimental feature.
    */
   bool IsInt16Array() const;
 
   /**
    * Returns true if this value is an Uint32Array.
-   * This is an experimental feature.
    */
   bool IsUint32Array() const;
 
   /**
    * Returns true if this value is an Int32Array.
-   * This is an experimental feature.
    */
   bool IsInt32Array() const;
 
   /**
    * Returns true if this value is a Float32Array.
-   * This is an experimental feature.
    */
   bool IsFloat32Array() const;
 
   /**
    * Returns true if this value is a Float64Array.
-   * This is an experimental feature.
    */
   bool IsFloat64Array() const;
 
   /**
-   * Returns true if this value is a SIMD Float32x4.
-   * This is an experimental feature.
-   */
-  bool IsFloat32x4() const;
-
-  /**
    * Returns true if this value is a DataView.
-   * This is an experimental feature.
    */
   bool IsDataView() const;
 
@@ -2244,11 +2266,12 @@
 
   template <class T> V8_INLINE static Value* Cast(T* value);
 
-  Local<String> TypeOf(v8::Isolate*);
+  Local<String> TypeOf(Isolate*);
 
  private:
   V8_INLINE bool QuickIsUndefined() const;
   V8_INLINE bool QuickIsNull() const;
+  V8_INLINE bool QuickIsNullOrUndefined() const;
   V8_INLINE bool QuickIsString() const;
   bool FullIsUndefined() const;
   bool FullIsNull() const;
@@ -2291,9 +2314,10 @@
    */
   int GetIdentityHash();
 
-  V8_INLINE static Name* Cast(v8::Value* obj);
+  V8_INLINE static Name* Cast(Value* obj);
+
  private:
-  static void CheckCast(v8::Value* obj);
+  static void CheckCast(Value* obj);
 };
 
 
@@ -2310,7 +2334,7 @@
   enum Encoding {
     UNKNOWN_ENCODING = 0x1,
     TWO_BYTE_ENCODING = 0x0,
-    ONE_BYTE_ENCODING = 0x4
+    ONE_BYTE_ENCODING = 0x8
   };
   /**
    * Returns the number of characters in this string.
@@ -2391,7 +2415,7 @@
   /**
    * A zero length string.
    */
-  V8_INLINE static v8::Local<v8::String> Empty(Isolate* isolate);
+  V8_INLINE static Local<String> Empty(Isolate* isolate);
 
   /**
    * Returns true if the string is external
@@ -2425,7 +2449,8 @@
     void operator=(const ExternalStringResourceBase&) = delete;
 
    private:
-    friend class v8::internal::Heap;
+    friend class internal::Heap;
+    friend class v8::String;
   };
 
   /**
@@ -2669,8 +2694,6 @@
 
 /**
  * A JavaScript symbol (ECMA-262 edition 6)
- *
- * This is an experimental feature. Use at your own risk.
  */
 class V8_EXPORT Symbol : public Name {
  public:
@@ -2695,14 +2718,15 @@
   // Well-known symbols
   static Local<Symbol> GetIterator(Isolate* isolate);
   static Local<Symbol> GetUnscopables(Isolate* isolate);
+  static Local<Symbol> GetToPrimitive(Isolate* isolate);
   static Local<Symbol> GetToStringTag(Isolate* isolate);
   static Local<Symbol> GetIsConcatSpreadable(Isolate* isolate);
 
-  V8_INLINE static Symbol* Cast(v8::Value* obj);
+  V8_INLINE static Symbol* Cast(Value* obj);
 
  private:
   Symbol();
-  static void CheckCast(v8::Value* obj);
+  static void CheckCast(Value* obj);
 };
 
 
@@ -3695,7 +3719,7 @@
   /**
    * Tells whether this function is builtin.
    */
-  bool IsBuiltin() const;
+  V8_DEPRECATED("this should no longer be used.", bool IsBuiltin() const);
 
   /**
    * Returns scriptId.
@@ -3720,10 +3744,15 @@
 
 /**
  * An instance of the built-in Promise constructor (ES6 draft).
- * This API is experimental. Only works with --harmony flag.
  */
 class V8_EXPORT Promise : public Object {
  public:
+  /**
+   * State of the promise. Each value corresponds to one of the possible values
+   * of the [[PromiseState]] field.
+   */
+  enum PromiseState { kPending, kFulfilled, kRejected };
+
   class V8_EXPORT Resolver : public Object {
    public:
     /**
@@ -3780,6 +3809,17 @@
    */
   bool HasHandler();
 
+  /**
+   * Returns the content of the [[PromiseResult]] field. The Promise must not
+   * be pending.
+   */
+  Local<Value> Result();
+
+  /**
+   * Returns the value of the [[PromiseState]] field.
+   */
+  PromiseState State();
+
   V8_INLINE static Promise* Cast(Value* obj);
 
  private:
@@ -3926,7 +3966,6 @@
 
 /**
  * An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT ArrayBuffer : public Object {
  public:
@@ -3982,8 +4021,6 @@
    *
    * The Data pointer of ArrayBuffer::Contents is always allocated with
    * Allocator::Allocate that is set via Isolate::CreateParams.
-   *
-   * This API is experimental and may change significantly.
    */
   class V8_EXPORT Contents { // NOLINT
    public:
@@ -4084,8 +4121,6 @@
 /**
  * A base class for an instance of one of "views" over ArrayBuffer,
  * including TypedArrays and DataView (ES6 draft 15.13).
- *
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT ArrayBufferView : public Object {
  public:
@@ -4133,7 +4168,6 @@
 /**
  * A base class for an instance of TypedArray series of constructors
  * (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT TypedArray : public ArrayBufferView {
  public:
@@ -4153,7 +4187,6 @@
 
 /**
  * An instance of Uint8Array constructor (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT Uint8Array : public TypedArray {
  public:
@@ -4171,7 +4204,6 @@
 
 /**
  * An instance of Uint8ClampedArray constructor (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT Uint8ClampedArray : public TypedArray {
  public:
@@ -4189,7 +4221,6 @@
 
 /**
  * An instance of Int8Array constructor (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT Int8Array : public TypedArray {
  public:
@@ -4207,7 +4238,6 @@
 
 /**
  * An instance of Uint16Array constructor (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT Uint16Array : public TypedArray {
  public:
@@ -4225,7 +4255,6 @@
 
 /**
  * An instance of Int16Array constructor (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT Int16Array : public TypedArray {
  public:
@@ -4243,7 +4272,6 @@
 
 /**
  * An instance of Uint32Array constructor (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT Uint32Array : public TypedArray {
  public:
@@ -4261,7 +4289,6 @@
 
 /**
  * An instance of Int32Array constructor (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT Int32Array : public TypedArray {
  public:
@@ -4279,7 +4306,6 @@
 
 /**
  * An instance of Float32Array constructor (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT Float32Array : public TypedArray {
  public:
@@ -4297,7 +4323,6 @@
 
 /**
  * An instance of Float64Array constructor (ES6 draft 15.13.6).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT Float64Array : public TypedArray {
  public:
@@ -4315,7 +4340,6 @@
 
 /**
  * An instance of DataView constructor (ES6 draft 15.13.7).
- * This API is experimental and may change significantly.
  */
 class V8_EXPORT DataView : public ArrayBufferView {
  public:
@@ -4446,7 +4470,7 @@
    */
   double ValueOf() const;
 
-  V8_INLINE static Date* Cast(v8::Value* obj);
+  V8_INLINE static Date* Cast(Value* obj);
 
   /**
    * Notification that the embedder has changed the time zone,
@@ -4463,7 +4487,7 @@
   static void DateTimeConfigurationChangeNotification(Isolate* isolate);
 
  private:
-  static void CheckCast(v8::Value* obj);
+  static void CheckCast(Value* obj);
 };
 
 
@@ -4476,10 +4500,10 @@
 
   double ValueOf() const;
 
-  V8_INLINE static NumberObject* Cast(v8::Value* obj);
+  V8_INLINE static NumberObject* Cast(Value* obj);
 
  private:
-  static void CheckCast(v8::Value* obj);
+  static void CheckCast(Value* obj);
 };
 
 
@@ -4493,10 +4517,10 @@
 
   bool ValueOf() const;
 
-  V8_INLINE static BooleanObject* Cast(v8::Value* obj);
+  V8_INLINE static BooleanObject* Cast(Value* obj);
 
  private:
-  static void CheckCast(v8::Value* obj);
+  static void CheckCast(Value* obj);
 };
 
 
@@ -4509,17 +4533,15 @@
 
   Local<String> ValueOf() const;
 
-  V8_INLINE static StringObject* Cast(v8::Value* obj);
+  V8_INLINE static StringObject* Cast(Value* obj);
 
  private:
-  static void CheckCast(v8::Value* obj);
+  static void CheckCast(Value* obj);
 };
 
 
 /**
  * A Symbol object (ECMA-262 edition 6).
- *
- * This is an experimental feature. Use at your own risk.
  */
 class V8_EXPORT SymbolObject : public Object {
  public:
@@ -4527,10 +4549,10 @@
 
   Local<Symbol> ValueOf() const;
 
-  V8_INLINE static SymbolObject* Cast(v8::Value* obj);
+  V8_INLINE static SymbolObject* Cast(Value* obj);
 
  private:
-  static void CheckCast(v8::Value* obj);
+  static void CheckCast(Value* obj);
 };
 
 
@@ -4580,10 +4602,10 @@
    */
   Flags GetFlags() const;
 
-  V8_INLINE static RegExp* Cast(v8::Value* obj);
+  V8_INLINE static RegExp* Cast(Value* obj);
 
  private:
-  static void CheckCast(v8::Value* obj);
+  static void CheckCast(Value* obj);
 };
 
 
@@ -4600,8 +4622,11 @@
   static void CheckCast(v8::Value* obj);
 };
 
-
-#define V8_INTRINSICS_LIST(F) F(ArrayProto_values, array_values_iterator)
+#define V8_INTRINSICS_LIST(F)                    \
+  F(ArrayProto_entries, array_entries_iterator)  \
+  F(ArrayProto_forEach, array_for_each_iterator) \
+  F(ArrayProto_keys, array_keys_iterator)        \
+  F(ArrayProto_values, array_values_iterator)
 
 enum Intrinsic {
 #define V8_DECL_INTRINSIC(name, iname) k##name,
@@ -5144,7 +5169,11 @@
   /** Get the InstanceTemplate. */
   Local<ObjectTemplate> InstanceTemplate();
 
-  /** Causes the function template to inherit from a parent function template.*/
+  /**
+   * Causes the function template to inherit from a parent function template.
+   * This means the the function's prototype.__proto__ is set to the parent
+   * function's prototype.
+   **/
   void Inherit(Local<FunctionTemplate> parent);
 
   /**
@@ -5154,6 +5183,14 @@
   Local<ObjectTemplate> PrototypeTemplate();
 
   /**
+   * A PrototypeProviderTemplate is another function template whose prototype
+   * property is used for this template. This is mutually exclusive with setting
+   * a prototype template indirectly by calling PrototypeTemplate() or using
+   * Inherit().
+   **/
+  void SetPrototypeProviderTemplate(Local<FunctionTemplate> prototype_provider);
+
+  /**
    * Set the class name of the FunctionTemplate.  This is used for
    * printing objects created with the function created from the
    * FunctionTemplate as its constructor.
@@ -5611,9 +5648,9 @@
             const char** deps = 0,
             int source_length = -1);
   virtual ~Extension() { }
-  virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
-      v8::Isolate* isolate, v8::Local<v8::String> name) {
-    return v8::Local<v8::FunctionTemplate>();
+  virtual Local<FunctionTemplate> GetNativeFunctionTemplate(
+      Isolate* isolate, Local<String> name) {
+    return Local<FunctionTemplate>();
   }
 
   const char* name() const { return name_; }
@@ -5718,7 +5755,7 @@
 
 typedef void (*OOMErrorCallback)(const char* location, bool is_heap_oom);
 
-typedef void (*MessageCallback)(Local<Message> message, Local<Value> error);
+typedef void (*MessageCallback)(Local<Message> message, Local<Value> data);
 
 // --- Tracing ---
 
@@ -5787,6 +5824,27 @@
 typedef void (*CallCompletedCallback)(Isolate*);
 typedef void (*DeprecatedCallCompletedCallback)();
 
+/**
+ * PromiseHook with type kInit is called when a new promise is
+ * created. When a new promise is created as part of the chain in the
+ * case of Promise.then or in the intermediate promises created by
+ * Promise.{race, all}/AsyncFunctionAwait, we pass the parent promise
+ * otherwise we pass undefined.
+ *
+ * PromiseHook with type kResolve is called at the beginning of
+ * resolve or reject function defined by CreateResolvingFunctions.
+ *
+ * PromiseHook with type kBefore is called at the beginning of the
+ * PromiseReactionJob.
+ *
+ * PromiseHook with type kAfter is called right at the end of the
+ * PromiseReactionJob.
+ */
+enum class PromiseHookType { kInit, kResolve, kBefore, kAfter };
+
+typedef void (*PromiseHook)(PromiseHookType type, Local<Promise> promise,
+                            Local<Value> parent);
+
 // --- Promise Reject Callback ---
 enum PromiseRejectEvent {
   kPromiseRejectWithNoHandler = 0,
@@ -5889,6 +5947,21 @@
  */
 typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context);
 
+// --- WASM compilation callbacks ---
+
+/**
+ * Callback to check if a buffer source may be compiled to WASM, given
+ * the compilation is attempted as a promise or not.
+ */
+
+typedef bool (*AllowWasmCompileCallback)(Isolate* isolate, Local<Value> source,
+                                         bool as_promise);
+
+typedef bool (*AllowWasmInstantiateCallback)(Isolate* isolate,
+                                             Local<Value> module_or_bytes,
+                                             MaybeLocal<Value> ffi,
+                                             bool as_promise);
+
 // --- Garbage Collection Callbacks ---
 
 /**
@@ -6249,17 +6322,33 @@
 };
 
 /**
- * Callback to the embedder used in SnapshotCreator to handle internal fields.
+ * Callback and supporting data used in SnapshotCreator to implement embedder
+ * logic to serialize internal fields.
  */
-typedef StartupData (*SerializeInternalFieldsCallback)(Local<Object> holder,
-                                                       int index);
+struct SerializeInternalFieldsCallback {
+  typedef StartupData (*CallbackFunction)(Local<Object> holder, int index,
+                                          void* data);
+  SerializeInternalFieldsCallback(CallbackFunction function = nullptr,
+                                  void* data_arg = nullptr)
+      : callback(function), data(data_arg) {}
+  CallbackFunction callback;
+  void* data;
+};
 
 /**
- * Callback to the embedder used to deserialize internal fields.
+ * Callback and supporting data used to implement embedder logic to deserialize
+ * internal fields.
  */
-typedef void (*DeserializeInternalFieldsCallback)(Local<Object> holder,
-                                                  int index,
-                                                  StartupData payload);
+struct DeserializeInternalFieldsCallback {
+  typedef void (*CallbackFunction)(Local<Object> holder, int index,
+                                   StartupData payload, void* data);
+  DeserializeInternalFieldsCallback(CallbackFunction function = nullptr,
+                                    void* data_arg = nullptr)
+      : callback(function), data(data_arg) {}
+  void (*callback)(Local<Object> holder, int index, StartupData payload,
+                   void* data);
+  void* data;
+};
 
 /**
  * Isolate represents an isolated instance of the V8 engine.  V8 isolates have
@@ -6284,7 +6373,7 @@
           add_histogram_sample_callback(nullptr),
           array_buffer_allocator(nullptr),
           external_references(nullptr),
-          deserialize_internal_fields_callback(nullptr) {}
+          allow_atomics_wait(true) {}
 
     /**
      * The optional entry_hook allows the host application to provide the
@@ -6342,10 +6431,10 @@
     intptr_t* external_references;
 
     /**
-     * Specifies an optional callback to deserialize internal fields. It
-     * should match the SerializeInternalFieldCallback used to serialize.
+     * Whether calling Atomics.wait (a function that may block) is allowed in
+     * this isolate.
      */
-    DeserializeInternalFieldsCallback deserialize_internal_fields_callback;
+    bool allow_atomics_wait;
   };
 
 
@@ -6481,12 +6570,25 @@
     kLegacyDateParser = 33,
     kDefineGetterOrSetterWouldThrow = 34,
     kFunctionConstructorReturnedUndefined = 35,
+    kAssigmentExpressionLHSIsCallInSloppy = 36,
+    kAssigmentExpressionLHSIsCallInStrict = 37,
+    kPromiseConstructorReturnedUndefined = 38,
 
     // If you add new values here, you'll also need to update Chromium's:
     // UseCounter.h, V8PerIsolateData.cpp, histograms.xml
     kUseCounterFeatureCount  // This enum value must be last.
   };
 
+  enum MessageErrorLevel {
+    kMessageLog = (1 << 0),
+    kMessageDebug = (1 << 1),
+    kMessageInfo = (1 << 2),
+    kMessageError = (1 << 3),
+    kMessageWarning = (1 << 4),
+    kMessageAll = kMessageLog | kMessageDebug | kMessageInfo | kMessageError |
+                  kMessageWarning,
+  };
+
   typedef void (*UseCounterCallback)(Isolate* isolate,
                                      UseCounterFeature feature);
 
@@ -6707,6 +6809,14 @@
   Local<Context> GetEnteredContext();
 
   /**
+   * Returns either the last context entered through V8's C++ API, or the
+   * context of the currently running microtask while processing microtasks.
+   * If a context is entered while executing a microtask, that context is
+   * returned.
+   */
+  Local<Context> GetEnteredOrMicrotaskContext();
+
+  /**
    * Schedules an exception to be thrown when returning to JavaScript.  When an
    * exception has been scheduled it is illegal to invoke any JavaScript
    * operation; the caller must return immediately and only after the exception
@@ -6725,8 +6835,10 @@
    * garbage collection types it is sufficient to provide object groups
    * for partially dependent handles only.
    */
-  template<typename T> void SetObjectGroupId(const Persistent<T>& object,
-                                             UniqueId id);
+  template <typename T>
+  V8_DEPRECATED("Use EmbedderHeapTracer",
+                void SetObjectGroupId(const Persistent<T>& object,
+                                      UniqueId id));
 
   /**
    * Allows the host application to declare implicit references from an object
@@ -6735,8 +6847,10 @@
    * are removed. It is intended to be used in the before-garbage-collection
    * callback function.
    */
-  template<typename T> void SetReferenceFromGroup(UniqueId id,
-                                                  const Persistent<T>& child);
+  template <typename T>
+  V8_DEPRECATED("Use EmbedderHeapTracer",
+                void SetReferenceFromGroup(UniqueId id,
+                                           const Persistent<T>& child));
 
   /**
    * Allows the host application to declare implicit references from an object
@@ -6744,8 +6858,10 @@
    * too. After each garbage collection, all implicit references are removed. It
    * is intended to be used in the before-garbage-collection callback function.
    */
-  template<typename T, typename S>
-  void SetReference(const Persistent<T>& parent, const Persistent<S>& child);
+  template <typename T, typename S>
+  V8_DEPRECATED("Use EmbedderHeapTracer",
+                void SetReference(const Persistent<T>& parent,
+                                  const Persistent<S>& child));
 
   typedef void (*GCCallback)(Isolate* isolate, GCType type,
                              GCCallbackFlags flags);
@@ -6888,6 +7004,12 @@
           DeprecatedCallCompletedCallback callback));
 
   /**
+   * Experimental: Set the PromiseHook callback for various promise
+   * lifecycle events.
+   */
+  void SetPromiseHook(PromiseHook hook);
+
+  /**
    * Set callback to notify about promise reject with no handler, or
    * revocation of such a previous notification once the handler is added.
    */
@@ -7021,6 +7143,23 @@
   void SetRAILMode(RAILMode rail_mode);
 
   /**
+   * Optional notification to tell V8 the current isolate is used for debugging
+   * and requires higher heap limit.
+   */
+  void IncreaseHeapLimitForDebugging();
+
+  /**
+   * Restores the original heap limit after IncreaseHeapLimitForDebugging().
+   */
+  void RestoreOriginalHeapLimit();
+
+  /**
+   * Returns true if the heap limit was increased for debugging and the
+   * original heap limit was not restored yet.
+   */
+  bool IsHeapLimitIncreasedForDebugging();
+
+  /**
    * Allows the host application to provide the address of a function that is
    * notified each time code is added, moved or removed.
    *
@@ -7085,13 +7224,23 @@
       AllowCodeGenerationFromStringsCallback callback);
 
   /**
+   * Set the callback to invoke to check if wasm compilation from
+   * the specified object is allowed. By default, wasm compilation
+   * is allowed.
+   *
+   * Similar for instantiate.
+   */
+  void SetAllowWasmCompileCallback(AllowWasmCompileCallback callback);
+  void SetAllowWasmInstantiateCallback(AllowWasmInstantiateCallback callback);
+
+  /**
   * Check if V8 is dead and therefore unusable.  This is the case after
   * fatal errors such as out-of-memory situations.
   */
   bool IsDead();
 
   /**
-   * Adds a message listener.
+   * Adds a message listener (errors only).
    *
    * The same message listener can be added more than once and in that
    * case it will be called more than once for each message.
@@ -7103,6 +7252,21 @@
                           Local<Value> data = Local<Value>());
 
   /**
+   * Adds a message listener.
+   *
+   * The same message listener can be added more than once and in that
+   * case it will be called more than once for each message.
+   *
+   * If data is specified, it will be passed to the callback when it is called.
+   * Otherwise, the exception object will be passed to the callback instead.
+   *
+   * A listener can listen for particular error levels by providing a mask.
+   */
+  bool AddMessageListenerWithErrorLevel(MessageCallback that,
+                                        int message_levels,
+                                        Local<Value> data = Local<Value>());
+
+  /**
    * Remove all message listeners from the specified callback function.
    */
   void RemoveMessageListeners(MessageCallback that);
@@ -7598,10 +7762,23 @@
   Isolate* GetIsolate();
 
   /**
-   * Add a context to be included in the snapshot blob.
+   * Set the default context to be included in the snapshot blob.
+   * The snapshot will not contain the global proxy, and we expect one or a
+   * global object template to create one, to be provided upon deserialization.
+   */
+  void SetDefaultContext(Local<Context> context);
+
+  /**
+   * Add additional context to be included in the snapshot blob.
+   * The snapshot will include the global proxy.
+   *
+   * \param callback optional callback to serialize internal fields.
+   *
    * \returns the index of the context in the snapshot blob.
    */
-  size_t AddContext(Local<Context> context);
+  size_t AddContext(Local<Context> context,
+                    SerializeInternalFieldsCallback callback =
+                        SerializeInternalFieldsCallback());
 
   /**
    * Add a template to be included in the snapshot blob.
@@ -7614,12 +7791,10 @@
    * This must not be called from within a handle scope.
    * \param function_code_handling whether to include compiled function code
    *        in the snapshot.
-   * \param callback to serialize embedder-set internal fields.
    * \returns { nullptr, 0 } on failure, and a startup snapshot on success. The
    *        caller acquires ownership of the data array in the return value.
    */
-  StartupData CreateBlob(FunctionCodeHandling function_code_handling,
-                         SerializeInternalFieldsCallback callback = nullptr);
+  StartupData CreateBlob(FunctionCodeHandling function_code_handling);
 
   // Disallow copying and assigning.
   SnapshotCreator(const SnapshotCreator&) = delete;
@@ -7825,21 +8000,21 @@
    * UseAfterReturn is enabled, then the address returned will be the address
    * of the C++ try catch handler itself.
    */
-  static void* JSStackComparableAddress(v8::TryCatch* handler) {
+  static void* JSStackComparableAddress(TryCatch* handler) {
     if (handler == NULL) return NULL;
     return handler->js_stack_comparable_address_;
   }
 
   TryCatch(const TryCatch&) = delete;
   void operator=(const TryCatch&) = delete;
-  void* operator new(size_t size) = delete;
-  void operator delete(void*, size_t) = delete;
+  void* operator new(size_t size);
+  void operator delete(void*, size_t);
 
  private:
   void ResetInternal();
 
-  v8::internal::Isolate* isolate_;
-  v8::TryCatch* next_;
+  internal::Isolate* isolate_;
+  TryCatch* next_;
   void* exception_;
   void* message_obj_;
   void* js_stack_comparable_address_;
@@ -7849,7 +8024,7 @@
   bool rethrow_ : 1;
   bool has_terminated_ : 1;
 
-  friend class v8::internal::Isolate;
+  friend class internal::Isolate;
 };
 
 
@@ -7922,10 +8097,30 @@
       MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
       MaybeLocal<Value> global_object = MaybeLocal<Value>());
 
+  /**
+   * Create a new context from a (non-default) context snapshot. There
+   * is no way to provide a global object template since we do not create
+   * a new global object from template, but we can reuse a global object.
+   *
+   * \param isolate See v8::Context::New.
+   *
+   * \param context_snapshot_index The index of the context snapshot to
+   * deserialize from. Use v8::Context::New for the default snapshot.
+   *
+   * \param internal_fields_deserializer Optional callback to deserialize
+   * internal fields. It should match the SerializeInternalFieldCallback used
+   * to serialize.
+   *
+   * \param extensions See v8::Context::New.
+   *
+   * \param global_object See v8::Context::New.
+   */
+
   static MaybeLocal<Context> FromSnapshot(
       Isolate* isolate, size_t context_snapshot_index,
+      DeserializeInternalFieldsCallback internal_fields_deserializer =
+          DeserializeInternalFieldsCallback(),
       ExtensionConfiguration* extensions = nullptr,
-      MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
       MaybeLocal<Value> global_object = MaybeLocal<Value>());
 
   /**
@@ -7976,7 +8171,7 @@
   void Exit();
 
   /** Returns an isolate associated with a current context. */
-  v8::Isolate* GetIsolate();
+  Isolate* GetIsolate();
 
   /**
    * The field at kDebugIdIndex is reserved for V8 debugger implementation.
@@ -8308,10 +8503,10 @@
   static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
   static const int kContextHeaderSize = 2 * kApiPointerSize;
   static const int kContextEmbedderDataIndex = 5;
-  static const int kFullStringRepresentationMask = 0x07;
-  static const int kStringEncodingMask = 0x4;
+  static const int kFullStringRepresentationMask = 0x0f;
+  static const int kStringEncodingMask = 0x8;
   static const int kExternalTwoByteRepresentationTag = 0x02;
-  static const int kExternalOneByteRepresentationTag = 0x06;
+  static const int kExternalOneByteRepresentationTag = 0x0a;
 
   static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
   static const int kExternalMemoryOffset = 4 * kApiPointerSize;
@@ -8336,11 +8531,11 @@
   static const int kNodeIsIndependentShift = 3;
   static const int kNodeIsActiveShift = 4;
 
-  static const int kJSObjectType = 0xbc;
-  static const int kJSApiObjectType = 0xbb;
+  static const int kJSApiObjectType = 0xb9;
+  static const int kJSObjectType = 0xba;
   static const int kFirstNonstringType = 0x80;
-  static const int kOddballType = 0x83;
-  static const int kForeignType = 0x87;
+  static const int kOddballType = 0x82;
+  static const int kForeignType = 0x86;
 
   static const int kUndefinedOddballKind = 5;
   static const int kNullOddballKind = 3;
@@ -8856,17 +9051,17 @@
                            Local<Integer> resource_column_offset,
                            Local<Boolean> resource_is_shared_cross_origin,
                            Local<Integer> script_id,
-                           Local<Boolean> resource_is_embedder_debug_script,
                            Local<Value> source_map_url,
-                           Local<Boolean> resource_is_opaque)
+                           Local<Boolean> resource_is_opaque,
+                           Local<Boolean> is_wasm, Local<Boolean> is_module)
     : resource_name_(resource_name),
       resource_line_offset_(resource_line_offset),
       resource_column_offset_(resource_column_offset),
-      options_(!resource_is_embedder_debug_script.IsEmpty() &&
-                   resource_is_embedder_debug_script->IsTrue(),
-               !resource_is_shared_cross_origin.IsEmpty() &&
+      options_(!resource_is_shared_cross_origin.IsEmpty() &&
                    resource_is_shared_cross_origin->IsTrue(),
-               !resource_is_opaque.IsEmpty() && resource_is_opaque->IsTrue()),
+               !resource_is_opaque.IsEmpty() && resource_is_opaque->IsTrue(),
+               !is_wasm.IsEmpty() && is_wasm->IsTrue(),
+               !is_module.IsEmpty() && is_module->IsTrue()),
       script_id_(script_id),
       source_map_url_(source_map_url) {}
 
@@ -8915,14 +9110,16 @@
   return cached_data;
 }
 
+const ScriptOriginOptions& ScriptCompiler::Source::GetResourceOptions() const {
+  return resource_options;
+}
 
 Local<Boolean> Boolean::New(Isolate* isolate, bool value) {
   return value ? True(isolate) : False(isolate);
 }
 
-
-void Template::Set(Isolate* isolate, const char* name, v8::Local<Data> value) {
-  Set(v8::String::NewFromUtf8(isolate, name, NewStringType::kNormal)
+void Template::Set(Isolate* isolate, const char* name, Local<Data> value) {
+  Set(String::NewFromUtf8(isolate, name, NewStringType::kInternalized)
           .ToLocalChecked(),
       value);
 }
@@ -9056,6 +9253,23 @@
   return (I::GetOddballKind(obj) == I::kNullOddballKind);
 }
 
+bool Value::IsNullOrUndefined() const {
+#ifdef V8_ENABLE_CHECKS
+  return FullIsNull() || FullIsUndefined();
+#else
+  return QuickIsNullOrUndefined();
+#endif
+}
+
+bool Value::QuickIsNullOrUndefined() const {
+  typedef internal::Object O;
+  typedef internal::Internals I;
+  O* obj = *reinterpret_cast<O* const*>(this);
+  if (!I::HasHeapObjectTag(obj)) return false;
+  if (I::GetInstanceType(obj) != I::kOddballType) return false;
+  int kind = I::GetOddballKind(obj);
+  return kind == I::kNullOddballKind || kind == I::kUndefinedOddballKind;
+}
 
 bool Value::IsString() const {
 #ifdef V8_ENABLE_CHECKS
@@ -9531,7 +9745,7 @@
 void Isolate::SetObjectGroupId(const Persistent<T>& object,
                                UniqueId id) {
   TYPE_CHECK(Value, T);
-  SetObjectGroupId(reinterpret_cast<v8::internal::Object**>(object.val_), id);
+  SetObjectGroupId(reinterpret_cast<internal::Object**>(object.val_), id);
 }
 
 
@@ -9539,8 +9753,7 @@
 void Isolate::SetReferenceFromGroup(UniqueId id,
                                     const Persistent<T>& object) {
   TYPE_CHECK(Value, T);
-  SetReferenceFromGroup(id,
-                        reinterpret_cast<v8::internal::Object**>(object.val_));
+  SetReferenceFromGroup(id, reinterpret_cast<internal::Object**>(object.val_));
 }
 
 
@@ -9549,8 +9762,8 @@
                            const Persistent<S>& child) {
   TYPE_CHECK(Object, T);
   TYPE_CHECK(Value, S);
-  SetReference(reinterpret_cast<v8::internal::Object**>(parent.val_),
-               reinterpret_cast<v8::internal::Object**>(child.val_));
+  SetReference(reinterpret_cast<internal::Object**>(parent.val_),
+               reinterpret_cast<internal::Object**>(child.val_));
 }
 
 
@@ -9627,14 +9840,14 @@
 void V8::RemoveGCPrologueCallback(GCCallback callback) {
   Isolate* isolate = Isolate::GetCurrent();
   isolate->RemoveGCPrologueCallback(
-      reinterpret_cast<v8::Isolate::GCCallback>(callback));
+      reinterpret_cast<Isolate::GCCallback>(callback));
 }
 
 
 void V8::RemoveGCEpilogueCallback(GCCallback callback) {
   Isolate* isolate = Isolate::GetCurrent();
   isolate->RemoveGCEpilogueCallback(
-      reinterpret_cast<v8::Isolate::GCCallback>(callback));
+      reinterpret_cast<Isolate::GCCallback>(callback));
 }
 
 void V8::TerminateExecution(Isolate* isolate) { isolate->TerminateExecution(); }
diff --git a/infra/config/cq.cfg b/infra/config/cq.cfg
index e93895f..72381ee 100644
--- a/infra/config/cq.cfg
+++ b/infra/config/cq.cfg
@@ -4,11 +4,11 @@
 version: 1
 cq_name: "v8"
 cq_status_url: "https://chromium-cq-status.appspot.com"
-hide_ref_in_committed_msg: true
+git_repo_url: "https://chromium.googlesource.com/v8/v8"
 commit_burst_delay: 60
 max_commit_burst: 1
-target_ref: "refs/pending/heads/master"
 
+gerrit {}
 rietveld {
   url: "https://codereview.chromium.org"
 }
@@ -19,6 +19,11 @@
     dry_run_access_list: "project-v8-tryjob-access"
   }
 
+  gerrit_cq_ability {
+    committer_list: "project-v8-committers"
+    dry_run_access_list: "project-v8-tryjob-access"
+  }
+
   tree_status {
     tree_status_url: "https://v8-status.appspot.com"
   }
@@ -47,6 +52,11 @@
         name: "v8_linux64_rel_ng_triggered"
         triggered_by: "v8_linux64_rel_ng"
       }
+      builders { name: "v8_linux64_verify_csa_rel_ng" }
+      builders {
+        name: "v8_linux64_verify_csa_rel_ng_triggered"
+        triggered_by: "v8_linux64_verify_csa_rel_ng"
+      }
       builders { name: "v8_linux_arm64_rel_ng" }
       builders {
         name: "v8_linux_arm64_rel_ng_triggered"
@@ -76,6 +86,11 @@
         name: "v8_linux_rel_ng_triggered"
         triggered_by: "v8_linux_rel_ng"
       }
+      builders { name: "v8_linux_verify_csa_rel_ng" }
+      builders {
+        name: "v8_linux_verify_csa_rel_ng_triggered"
+        triggered_by: "v8_linux_verify_csa_rel_ng"
+      }
       builders { name: "v8_mac_rel_ng" }
       builders {
         name: "v8_mac_rel_ng_triggered"
diff --git a/infra/mb/mb_config.pyl b/infra/mb/mb_config.pyl
index d6a2a2d..2a78d86 100644
--- a/infra/mb/mb_config.pyl
+++ b/infra/mb/mb_config.pyl
@@ -18,6 +18,12 @@
       'ia32.debug': 'default_debug_x86',
       'ia32.optdebug': 'default_optdebug_x86',
       'ia32.release': 'default_release_x86',
+      'mipsel.debug': 'default_debug_mipsel',
+      'mipsel.optdebug': 'default_optdebug_mipsel',
+      'mipsel.release': 'default_release_mipsel',
+      'mips64el.debug': 'default_debug_mips64el',
+      'mips64el.optdebug': 'default_optdebug_mips64el',
+      'mips64el.release': 'default_release_mips64el',
       'x64.debug': 'default_debug_x64',
       'x64.optdebug': 'default_optdebug_x64',
       'x64.release': 'default_release_x64',
@@ -39,12 +45,14 @@
       'V8 Linux - nosnap debug builder': 'gn_debug_x86_no_snap',
       'V8 Linux - shared': 'gn_release_x86_shared_verify_heap',
       'V8 Linux - noi18n - debug': 'gn_debug_x86_no_i18n',
+      'V8 Linux - verify csa': 'gn_release_x86_verify_csa',
       # Linux64.
       'V8 Linux64 - builder': 'gn_release_x64_valgrind',
       'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
       'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
       'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
       'V8 Linux64 - gyp': 'gyp_release_x64',
+      'V8 Linux64 - verify csa': 'gn_release_x64_verify_csa',
       # Windows.
       'V8 Win32 - builder': 'gn_release_x86_minimal_symbols',
       'V8 Win32 - debug builder': 'gn_debug_x86_minimal_symbols',
@@ -66,6 +74,7 @@
       'V8 Linux64 TSAN': 'gn_release_x64_tsan',
       'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan',
       # Clusterfuzz.
+      'V8 Linux64 - release builder': 'gn_release_x64_correctness_fuzzer',
       'V8 Linux64 ASAN no inline - release builder':
           'gn_release_x64_asan_symbolized_edge_verify_heap',
       'V8 Linux64 ASAN - debug builder': 'gn_debug_x64_asan_edge',
@@ -116,8 +125,7 @@
       'V8 Linux - s390 - sim': 'gyp_release_simulate_s390',
       'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x',
       # X87.
-      'V8 Linux - x87 - nosnap - debug builder':
-          'gyp_debug_simulate_x87_no_snap',
+      'V8 Linux - x87 - nosnap - debug builder': 'gyp_debug_simulate_x87',
     },
     'client.v8.branches': {
       'V8 Linux - beta branch': 'gn_release_x86',
@@ -147,6 +155,7 @@
     },
     'tryserver.v8': {
       'v8_linux_rel_ng': 'gn_release_x86_gcmole_trybot',
+      'v8_linux_verify_csa_rel_ng': 'gn_release_x86_verify_csa',
       'v8_linux_avx2_dbg': 'gn_debug_x86_trybot',
       'v8_linux_nodcheck_rel_ng': 'gn_release_x86_minimal_symbols',
       'v8_linux_dbg_ng': 'gn_debug_x86_trybot',
@@ -157,6 +166,7 @@
       'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
       'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
       'v8_linux64_rel_ng': 'gn_release_x64_valgrind_trybot',
+      'v8_linux64_verify_csa_rel_ng': 'gn_release_x64_verify_csa',
       'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
       'v8_linux64_avx2_rel_ng': 'gn_release_x64_trybot',
       'v8_linux64_avx2_dbg': 'gn_debug_x64_trybot',
@@ -210,6 +220,20 @@
       'gn', 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks'],
     'default_release_arm64': [
       'gn', 'release', 'simulate_arm64'],
+    'default_debug_mipsel': [
+      'gn', 'debug', 'simulate_mipsel', 'v8_enable_slow_dchecks',
+      'v8_full_debug'],
+    'default_optdebug_mipsel': [
+      'gn', 'debug', 'simulate_mipsel', 'v8_enable_slow_dchecks'],
+    'default_release_mipsel': [
+      'gn', 'release', 'simulate_mipsel'],
+    'default_debug_mips64el': [
+      'gn', 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks',
+      'v8_full_debug'],
+    'default_optdebug_mips64el': [
+      'gn', 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
+    'default_release_mips64el': [
+      'gn', 'release', 'simulate_mips64el'],
     'default_debug_x64': [
       'gn', 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
     'default_optdebug_x64': [
@@ -286,6 +310,8 @@
       'v8_verify_heap'],
     'gn_release_x64_clang': [
       'gn', 'release_bot', 'x64', 'clang', 'swarming'],
+    'gn_release_x64_correctness_fuzzer' : [
+      'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer'],
     'gn_release_x64_internal': [
       'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
     'gn_release_x64_minimal_symbols': [
@@ -300,6 +326,9 @@
       'gn', 'release_bot', 'x64', 'swarming', 'valgrind'],
     'gn_release_x64_valgrind_trybot': [
       'gn', 'release_trybot', 'x64', 'swarming', 'valgrind'],
+    'gn_release_x64_verify_csa': [
+      'gn', 'release_bot', 'x64', 'swarming', 'dcheck_always_on',
+      'v8_enable_slow_dchecks', 'v8_verify_csa'],
 
     # GN debug configs for x64.
     'gn_debug_x64': [
@@ -357,11 +386,13 @@
       'gn', 'release', 'x86', 'goma', 'shared', 'swarming', 'v8_verify_heap'],
     'gn_release_x86_trybot': [
       'gn', 'release_trybot', 'x86', 'swarming'],
+    'gn_release_x86_verify_csa': [
+      'gn', 'release_bot', 'x86', 'swarming', 'dcheck_always_on',
+      'v8_enable_slow_dchecks', 'v8_verify_csa'],
 
     # Gyp debug configs for simulators.
-    'gyp_debug_simulate_x87_no_snap': [
-      'gyp', 'debug_bot_static', 'simulate_x87', 'swarming',
-      'v8_snapshot_none'],
+    'gyp_debug_simulate_x87': [
+      'gyp', 'debug_bot_static', 'simulate_x87', 'swarming'],
 
     # Gyp debug configs for x86.
     'gyp_debug_x86': [
@@ -626,6 +657,10 @@
       'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',
     },
 
+    'v8_correctness_fuzzer': {
+      'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
+    },
+
     'v8_disable_inspector': {
       'gn_args': 'v8_enable_inspector=false',
       'gyp_defines': 'v8_enable_inspector=0 ',
@@ -689,6 +724,10 @@
       'gyp_defines': 'v8_enable_verify_heap=1',
     },
 
+    'v8_verify_csa': {
+      'gn_args': 'v8_enable_verify_csa=true',
+    },
+
     'x64': {
       'gn_args': 'target_cpu="x64"',
       'gyp_defines': 'target_arch=x64',
diff --git a/src/DEPS b/src/DEPS
index 9114669..e9026b1 100644
--- a/src/DEPS
+++ b/src/DEPS
@@ -10,7 +10,9 @@
   "+src/heap/heap-inl.h",
   "-src/inspector",
   "-src/interpreter",
+  "+src/interpreter/bytecode-array-accessor.h",
   "+src/interpreter/bytecode-array-iterator.h",
+  "+src/interpreter/bytecode-array-random-iterator.h",
   "+src/interpreter/bytecode-decoder.h",
   "+src/interpreter/bytecode-flags.h",
   "+src/interpreter/bytecode-register.h",
diff --git a/src/accessors.cc b/src/accessors.cc
index 9ec24b8..1f2ce97 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -167,16 +167,38 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
 
+  DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
+
   Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder());
   Handle<JSArray> array = Handle<JSArray>::cast(object);
   Handle<Object> length_obj = Utils::OpenHandle(*val);
 
+  bool was_readonly = JSArray::HasReadOnlyLength(array);
+
   uint32_t length = 0;
   if (!JSArray::AnythingToArrayLength(isolate, length_obj, &length)) {
     isolate->OptionalRescheduleException(false);
     return;
   }
 
+  if (!was_readonly && V8_UNLIKELY(JSArray::HasReadOnlyLength(array)) &&
+      length != array->length()->Number()) {
+    // AnythingToArrayLength() may have called setter re-entrantly and modified
+    // its property descriptor. Don't perform this check if "length" was
+    // previously readonly, as this may have been called during
+    // DefineOwnPropertyIgnoreAttributes().
+    if (info.ShouldThrowOnError()) {
+      Factory* factory = isolate->factory();
+      isolate->Throw(*factory->NewTypeError(
+          MessageTemplate::kStrictReadOnlyProperty, Utils::OpenHandle(*name),
+          i::Object::TypeOf(isolate, object), object));
+      isolate->OptionalRescheduleException(false);
+    } else {
+      info.GetReturnValue().Set(false);
+    }
+    return;
+  }
+
   JSArray::SetLength(array, length);
 
   uint32_t actual_new_len = 0;
@@ -518,34 +540,6 @@
 
 
 //
-// Accessors::ScriptIsEmbedderDebugScript
-//
-
-
-void Accessors::ScriptIsEmbedderDebugScriptGetter(
-    v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
-  DisallowHeapAllocation no_allocation;
-  HandleScope scope(isolate);
-  Object* object = *Utils::OpenHandle(*info.Holder());
-  bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
-                                      ->origin_options()
-                                      .IsEmbedderDebugScript();
-  Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script);
-  info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
-}
-
-
-Handle<AccessorInfo> Accessors::ScriptIsEmbedderDebugScriptInfo(
-    Isolate* isolate, PropertyAttributes attributes) {
-  Handle<String> name(isolate->factory()->InternalizeOneByteString(
-      STATIC_CHAR_VECTOR("is_debugger_script")));
-  return MakeAccessor(isolate, name, &ScriptIsEmbedderDebugScriptGetter,
-                      nullptr, attributes);
-}
-
-
-//
 // Accessors::ScriptGetContextData
 //
 
@@ -829,8 +823,8 @@
   Handle<FixedArray> array = factory->NewFixedArray(argument_count);
   bool should_deoptimize = false;
   for (int i = 0; i < argument_count; ++i) {
-    // If we materialize any object, we should deopt because we might alias
-    // an object that was eliminated by escape analysis.
+    // If we materialize any object, we should deoptimize the frame because we
+    // might alias an object that was eliminated by escape analysis.
     should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
     Handle<Object> value = iter->GetValue();
     array->set(i, *value);
@@ -839,7 +833,7 @@
   arguments->set_elements(*array);
 
   if (should_deoptimize) {
-    translated_values.StoreMaterializedValuesAndDeopt();
+    translated_values.StoreMaterializedValuesAndDeopt(frame);
   }
 
   // Return the freshly allocated arguments object.
@@ -850,10 +844,10 @@
 static int FindFunctionInFrame(JavaScriptFrame* frame,
                                Handle<JSFunction> function) {
   DisallowHeapAllocation no_allocation;
-  List<JSFunction*> functions(2);
-  frame->GetFunctions(&functions);
-  for (int i = functions.length() - 1; i >= 0; i--) {
-    if (functions[i] == *function) return i;
+  List<FrameSummary> frames(2);
+  frame->Summarize(&frames);
+  for (int i = frames.length() - 1; i >= 0; i--) {
+    if (*frames[i].AsJavaScript().function() == *function) return i;
   }
   return -1;
 }
@@ -957,19 +951,16 @@
 class FrameFunctionIterator {
  public:
   FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
-      : isolate_(isolate),
-        frame_iterator_(isolate),
-        functions_(2),
-        index_(0) {
-    GetFunctions();
+      : isolate_(isolate), frame_iterator_(isolate), frames_(2), index_(0) {
+    GetFrames();
   }
   JSFunction* next() {
     while (true) {
-      if (functions_.length() == 0) return NULL;
-      JSFunction* next_function = functions_[index_];
+      if (frames_.length() == 0) return NULL;
+      JSFunction* next_function = *frames_[index_].AsJavaScript().function();
       index_--;
       if (index_ < 0) {
-        GetFunctions();
+        GetFrames();
       }
       // Skip functions from other origins.
       if (!AllowAccessToFunction(isolate_->context(), next_function)) continue;
@@ -990,18 +981,18 @@
   }
 
  private:
-  void GetFunctions() {
-    functions_.Rewind(0);
+  void GetFrames() {
+    frames_.Rewind(0);
     if (frame_iterator_.done()) return;
     JavaScriptFrame* frame = frame_iterator_.frame();
-    frame->GetFunctions(&functions_);
-    DCHECK(functions_.length() > 0);
+    frame->Summarize(&frames_);
+    DCHECK(frames_.length() > 0);
     frame_iterator_.Advance();
-    index_ = functions_.length() - 1;
+    index_ = frames_.length() - 1;
   }
   Isolate* isolate_;
   JavaScriptFrameIterator frame_iterator_;
-  List<JSFunction*> functions_;
+  List<FrameSummary> frames_;
   int index_;
 };
 
@@ -1025,10 +1016,11 @@
     if (caller == NULL) return MaybeHandle<JSFunction>();
   } while (caller->shared()->is_toplevel());
 
-  // If caller is a built-in function and caller's caller is also built-in,
+  // If caller is not user code and caller's caller is also not user code,
   // use that instead.
   JSFunction* potential_caller = caller;
-  while (potential_caller != NULL && potential_caller->shared()->IsBuiltin()) {
+  while (potential_caller != NULL &&
+         !potential_caller->shared()->IsUserJavaScript()) {
     caller = potential_caller;
     potential_caller = it.next();
   }
@@ -1210,7 +1202,8 @@
   // If stack is still an accessor (this could have changed in the meantime
   // since FormatStackTrace can execute arbitrary JS), replace it with a data
   // property.
-  Handle<Object> receiver = Utils::OpenHandle(*info.This());
+  Handle<Object> receiver =
+      Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
   Handle<Name> name = Utils::OpenHandle(*key);
   if (IsAccessor(receiver, name, holder)) {
     result = ReplaceAccessorWithDataProperty(isolate, receiver, holder, name,
@@ -1236,8 +1229,8 @@
     const v8::PropertyCallbackInfo<v8::Boolean>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<JSObject> obj =
-      Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+  Handle<JSObject> obj = Handle<JSObject>::cast(
+      Utils::OpenHandle(*v8::Local<v8::Value>(info.This())));
 
   // Clear internal properties to avoid memory leaks.
   Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
diff --git a/src/accessors.h b/src/accessors.h
index f53d309..a4d51fd 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -8,7 +8,6 @@
 #include "include/v8.h"
 #include "src/allocation.h"
 #include "src/globals.h"
-#include "src/handles.h"
 #include "src/property-details.h"
 
 namespace v8 {
@@ -16,6 +15,8 @@
 
 // Forward declarations.
 class AccessorInfo;
+template <typename T>
+class Handle;
 
 // The list of accessor descriptors. This is a second-order macro
 // taking a macro to be applied to all accessor descriptor names.
@@ -43,7 +44,6 @@
   V(ScriptType)                   \
   V(ScriptSourceUrl)              \
   V(ScriptSourceMappingUrl)       \
-  V(ScriptIsEmbedderDebugScript)  \
   V(StringLength)
 
 #define ACCESSOR_SETTER_LIST(V) \
diff --git a/src/allocation.cc b/src/allocation.cc
index 195a544..fde01f6 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -32,23 +32,6 @@
 }
 
 
-#ifdef DEBUG
-
-static void* invalid = static_cast<void*>(NULL);
-
-void* Embedded::operator new(size_t size) {
-  UNREACHABLE();
-  return invalid;
-}
-
-
-void Embedded::operator delete(void* p) {
-  UNREACHABLE();
-}
-
-#endif
-
-
 char* StrDup(const char* str) {
   int length = StrLength(str);
   char* result = NewArray<char>(length + 1);
diff --git a/src/allocation.h b/src/allocation.h
index e87a3f1..36019d9 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -26,24 +26,9 @@
   static void Delete(void* p);
 };
 
-
-// A macro is used for defining the base class used for embedded instances.
-// The reason is some compilers allocate a minimum of one word for the
-// superclass. The macro prevents the use of new & delete in debug mode.
-// In release mode we are not willing to pay this overhead.
-
-#ifdef DEBUG
-// Superclass for classes with instances allocated inside stack
-// activations or inside other objects.
-class Embedded {
- public:
-  void* operator new(size_t size);
-  void  operator delete(void* p);
-};
-#define BASE_EMBEDDED : public NON_EXPORTED_BASE(Embedded)
-#else
+// DEPRECATED
+// TODO(leszeks): Delete this during a quiet period
 #define BASE_EMBEDDED
-#endif
 
 
 // Superclass for classes only using static method functions.
diff --git a/src/api-arguments-inl.h b/src/api-arguments-inl.h
index bf72fc4..91ac253 100644
--- a/src/api-arguments-inl.h
+++ b/src/api-arguments-inl.h
@@ -10,6 +10,14 @@
 namespace v8 {
 namespace internal {
 
+#define SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_TYPE)            \
+  do {                                                        \
+    if (ISOLATE->needs_side_effect_check() &&                 \
+        !PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
+      return Handle<RETURN_TYPE>();                           \
+    }                                                         \
+  } while (false)
+
 #define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F)                  \
   F(AccessorNameGetterCallback, "get", v8::Value, Object)          \
   F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
@@ -19,6 +27,7 @@
   Handle<InternalReturn> PropertyCallbackArguments::Call(Function f,          \
                                                          Handle<Name> name) { \
     Isolate* isolate = this->isolate();                                       \
+    SIDE_EFFECT_CHECK(isolate, f, InternalReturn);                            \
     RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function);        \
     VMState<EXTERNAL> state(isolate);                                         \
     ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));              \
@@ -43,6 +52,7 @@
   Handle<InternalReturn> PropertyCallbackArguments::Call(Function f,       \
                                                          uint32_t index) { \
     Isolate* isolate = this->isolate();                                    \
+    SIDE_EFFECT_CHECK(isolate, f, InternalReturn);                         \
     RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function);     \
     VMState<EXTERNAL> state(isolate);                                      \
     ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));           \
@@ -62,6 +72,7 @@
     GenericNamedPropertySetterCallback f, Handle<Name> name,
     Handle<Object> value) {
   Isolate* isolate = this->isolate();
+  SIDE_EFFECT_CHECK(isolate, f, Object);
   RuntimeCallTimerScope timer(
       isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
   VMState<EXTERNAL> state(isolate);
@@ -77,6 +88,7 @@
     GenericNamedPropertyDefinerCallback f, Handle<Name> name,
     const v8::PropertyDescriptor& desc) {
   Isolate* isolate = this->isolate();
+  SIDE_EFFECT_CHECK(isolate, f, Object);
   RuntimeCallTimerScope timer(
       isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback);
   VMState<EXTERNAL> state(isolate);
@@ -92,6 +104,7 @@
                                                uint32_t index,
                                                Handle<Object> value) {
   Isolate* isolate = this->isolate();
+  SIDE_EFFECT_CHECK(isolate, f, Object);
   RuntimeCallTimerScope timer(isolate,
                               &RuntimeCallStats::IndexedPropertySetterCallback);
   VMState<EXTERNAL> state(isolate);
@@ -107,6 +120,7 @@
     IndexedPropertyDefinerCallback f, uint32_t index,
     const v8::PropertyDescriptor& desc) {
   Isolate* isolate = this->isolate();
+  SIDE_EFFECT_CHECK(isolate, f, Object);
   RuntimeCallTimerScope timer(
       isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback);
   VMState<EXTERNAL> state(isolate);
@@ -121,6 +135,10 @@
 void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
                                      Handle<Name> name, Handle<Object> value) {
   Isolate* isolate = this->isolate();
+  if (isolate->needs_side_effect_check() &&
+      !PerformSideEffectCheck(isolate, FUNCTION_ADDR(f))) {
+    return;
+  }
   RuntimeCallTimerScope timer(isolate,
                               &RuntimeCallStats::AccessorNameSetterCallback);
   VMState<EXTERNAL> state(isolate);
@@ -131,5 +149,7 @@
   f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
 }
 
+#undef SIDE_EFFECT_CHECK
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/api-arguments.cc b/src/api-arguments.cc
index f8d6c8f..c7c54e5 100644
--- a/src/api-arguments.cc
+++ b/src/api-arguments.cc
@@ -4,6 +4,8 @@
 
 #include "src/api-arguments.h"
 
+#include "src/debug/debug.h"
+#include "src/objects-inl.h"
 #include "src/tracing/trace-event.h"
 #include "src/vm-state-inl.h"
 
@@ -12,6 +14,10 @@
 
 Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
   Isolate* isolate = this->isolate();
+  if (isolate->needs_side_effect_check() &&
+      !isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
+    return Handle<Object>();
+  }
   RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
   VMState<EXTERNAL> state(isolate);
   ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@@ -23,6 +29,10 @@
 Handle<JSObject> PropertyCallbackArguments::Call(
     IndexedPropertyEnumeratorCallback f) {
   Isolate* isolate = this->isolate();
+  if (isolate->needs_side_effect_check() &&
+      !isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
+    return Handle<JSObject>();
+  }
   RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
   VMState<EXTERNAL> state(isolate);
   ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@@ -31,5 +41,10 @@
   return GetReturnValue<JSObject>(isolate);
 }
 
+bool PropertyCallbackArguments::PerformSideEffectCheck(Isolate* isolate,
+                                                       Address function) {
+  return isolate->debug()->PerformSideEffectCheckForCallback(function);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/api-arguments.h b/src/api-arguments.h
index d6d1b95..6c9ad7a 100644
--- a/src/api-arguments.h
+++ b/src/api-arguments.h
@@ -136,6 +136,8 @@
   inline JSObject* holder() {
     return JSObject::cast(this->begin()[T::kHolderIndex]);
   }
+
+  bool PerformSideEffectCheck(Isolate* isolate, Address function);
 };
 
 class FunctionCallbackArguments
diff --git a/src/api-experimental.cc b/src/api-experimental.cc
index 934b27a..a9b5bd0 100644
--- a/src/api-experimental.cc
+++ b/src/api-experimental.cc
@@ -8,10 +8,11 @@
 
 #include "src/api-experimental.h"
 
-#include "include/v8.h"
 #include "include/v8-experimental.h"
+#include "include/v8.h"
 #include "src/api.h"
 #include "src/fast-accessor-assembler.h"
+#include "src/objects-inl.h"
 
 namespace {
 
diff --git a/src/api-experimental.h b/src/api-experimental.h
index bc0bc55..5b1bc1b 100644
--- a/src/api-experimental.h
+++ b/src/api-experimental.h
@@ -5,11 +5,11 @@
 #ifndef V8_API_EXPERIMENTAL_H_
 #define V8_API_EXPERIMENTAL_H_
 
-#include "src/handles.h"
-
 namespace v8 {
 namespace internal {
 class Code;
+template <typename T>
+class MaybeHandle;
 }  // internal;
 namespace experimental {
 class FastAccessorBuilder;
diff --git a/src/api-natives.cc b/src/api-natives.cc
index 3fe59e2..045ff47 100644
--- a/src/api-natives.cc
+++ b/src/api-natives.cc
@@ -395,6 +395,28 @@
   return result;
 }
 
+namespace {
+MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
+                                         Object* function_template) {
+  // Enter a new scope.  Recursion could otherwise create a lot of handles.
+  HandleScope scope(isolate);
+  Handle<JSFunction> parent_instance;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, parent_instance,
+      InstantiateFunction(
+          isolate,
+          handle(FunctionTemplateInfo::cast(function_template), isolate)),
+      JSFunction);
+  Handle<Object> instance_prototype;
+  // TODO(cbruni): decide what to do here.
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, instance_prototype,
+      JSObject::GetProperty(parent_instance,
+                            isolate->factory()->prototype_string()),
+      JSFunction);
+  return scope.CloseAndEscape(instance_prototype);
+}
+}  // namespace
 
 MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
                                             Handle<FunctionTemplateInfo> data,
@@ -406,11 +428,18 @@
       return Handle<JSFunction>::cast(result);
     }
   }
-  Handle<JSObject> prototype;
+  Handle<Object> prototype;
   if (!data->remove_prototype()) {
     Object* prototype_templ = data->prototype_template();
     if (prototype_templ->IsUndefined(isolate)) {
-      prototype = isolate->factory()->NewJSObject(isolate->object_function());
+      Object* protoype_provider_templ = data->prototype_provider_template();
+      if (protoype_provider_templ->IsUndefined(isolate)) {
+        prototype = isolate->factory()->NewJSObject(isolate->object_function());
+      } else {
+        ASSIGN_RETURN_ON_EXCEPTION(
+            isolate, prototype,
+            GetInstancePrototype(isolate, protoype_provider_templ), JSFunction);
+      }
     } else {
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, prototype,
@@ -422,22 +451,12 @@
     }
     Object* parent = data->parent_template();
     if (!parent->IsUndefined(isolate)) {
-      // Enter a new scope.  Recursion could otherwise create a lot of handles.
-      HandleScope scope(isolate);
-      Handle<JSFunction> parent_instance;
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, parent_instance,
-          InstantiateFunction(
-              isolate, handle(FunctionTemplateInfo::cast(parent), isolate)),
-          JSFunction);
-      // TODO(dcarney): decide what to do here.
       Handle<Object> parent_prototype;
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, parent_prototype,
-          JSObject::GetProperty(parent_instance,
-                                isolate->factory()->prototype_string()),
-          JSFunction);
-      JSObject::ForceSetPrototype(prototype, parent_prototype);
+      ASSIGN_RETURN_ON_EXCEPTION(isolate, parent_prototype,
+                                 GetInstancePrototype(isolate, parent),
+                                 JSFunction);
+      JSObject::ForceSetPrototype(Handle<JSObject>::cast(prototype),
+                                  parent_prototype);
     }
   }
   Handle<JSFunction> function = ApiNatives::CreateApiFunction(
@@ -519,8 +538,6 @@
   JSFunction::SetInitialMap(object_function, object_map,
                             isolate->factory()->null_value());
   object_map->set_is_access_check_needed(true);
-  object_map->set_is_callable();
-  object_map->set_is_constructor(true);
 
   Handle<JSObject> object = isolate->factory()->NewJSObject(object_function);
   JSObject::ForceSetPrototype(object, isolate->factory()->null_value());
@@ -531,7 +548,7 @@
 void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
                                  Handle<Name> name, Handle<Object> value,
                                  PropertyAttributes attributes) {
-  PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+  PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
   auto details_handle = handle(details.AsSmi(), isolate);
   Handle<Object> data[] = {name, details_handle, value};
   AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -543,7 +560,7 @@
                                  PropertyAttributes attributes) {
   auto value = handle(Smi::FromInt(intrinsic), isolate);
   auto intrinsic_marker = isolate->factory()->true_value();
-  PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+  PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
   auto details_handle = handle(details.AsSmi(), isolate);
   Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
   AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -556,7 +573,7 @@
                                      Handle<FunctionTemplateInfo> getter,
                                      Handle<FunctionTemplateInfo> setter,
                                      PropertyAttributes attributes) {
-  PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
+  PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kNoCell);
   auto details_handle = handle(details.AsSmi(), isolate);
   Handle<Object> data[] = {name, details_handle, getter, setter};
   AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -606,7 +623,7 @@
 
   if (prototype->IsTheHole(isolate)) {
     prototype = isolate->factory()->NewFunctionPrototype(result);
-  } else {
+  } else if (obj->prototype_provider_template()->IsUndefined(isolate)) {
     JSObject::AddProperty(Handle<JSObject>::cast(prototype),
                           isolate->factory()->constructor_string(), result,
                           DONT_ENUM);
@@ -656,6 +673,12 @@
 
   // Mark as undetectable if needed.
   if (obj->undetectable()) {
+    // We only allow callable undetectable receivers here, since this whole
+    // undetectable business is only to support document.all, which is both
+    // undetectable and callable. If we ever see the need to have an object
+    // that is undetectable but not callable, we need to update the types.h
+    // to allow encoding this.
+    CHECK(!obj->instance_call_handler()->IsUndefined(isolate));
     map->set_is_undetectable();
   }
 
diff --git a/src/api.cc b/src/api.cc
index da7f2ef..beefd61 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -29,11 +29,13 @@
 #include "src/bootstrapper.h"
 #include "src/char-predicates-inl.h"
 #include "src/code-stubs.h"
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
 #include "src/compiler.h"
 #include "src/context-measure.h"
 #include "src/contexts.h"
 #include "src/conversions-inl.h"
 #include "src/counters.h"
+#include "src/debug/debug-coverage.h"
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/execution.h"
@@ -46,6 +48,7 @@
 #include "src/json-parser.h"
 #include "src/json-stringifier.h"
 #include "src/messages.h"
+#include "src/objects-inl.h"
 #include "src/parsing/parser.h"
 #include "src/parsing/scanner-character-streams.h"
 #include "src/pending-compilation-error-handler.h"
@@ -97,6 +100,15 @@
   ENTER_V8(isolate);                                                 \
   bool has_pending_exception = false
 
+#define PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, T)       \
+  if (IsExecutionTerminatingCheck(isolate)) {                                \
+    return MaybeLocal<T>();                                                  \
+  }                                                                          \
+  InternalEscapableScope handle_scope(isolate);                              \
+  CallDepthScope<false> call_depth_scope(isolate, v8::Local<v8::Context>()); \
+  ENTER_V8(isolate);                                                         \
+  bool has_pending_exception = false
+
 #define PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
                                            bailout_value, HandleScopeClass,    \
                                            do_callback)                        \
@@ -141,6 +153,23 @@
   PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
                                      false, i::HandleScope, false)
 
+#ifdef DEBUG
+#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate)                    \
+  i::VMState<v8::OTHER> __state__((isolate));                       \
+  i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate)); \
+  i::DisallowExceptions __no_exceptions__((isolate))
+
+#define ENTER_V8_FOR_NEW_CONTEXT(isolate)     \
+  i::VMState<v8::OTHER> __state__((isolate)); \
+  i::DisallowExceptions __no_exceptions__((isolate))
+#else
+#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \
+  i::VMState<v8::OTHER> __state__((isolate));
+
+#define ENTER_V8_FOR_NEW_CONTEXT(isolate) \
+  i::VMState<v8::OTHER> __state__((isolate));
+#endif  // DEBUG
+
 #define EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, value) \
   do {                                                 \
     if (has_pending_exception) {                       \
@@ -243,7 +272,7 @@
 
 static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
                                              i::Handle<i::Script> script) {
-  i::Handle<i::Object> scriptName(i::Script::GetNameOrSourceURL(script));
+  i::Handle<i::Object> scriptName(script->GetNameOrSourceURL(), isolate);
   i::Handle<i::Object> source_map_url(script->source_mapping_url(), isolate);
   v8::Isolate* v8_isolate =
       reinterpret_cast<v8::Isolate*>(script->GetIsolate());
@@ -254,9 +283,10 @@
       v8::Integer::New(v8_isolate, script->column_offset()),
       v8::Boolean::New(v8_isolate, options.IsSharedCrossOrigin()),
       v8::Integer::New(v8_isolate, script->id()),
-      v8::Boolean::New(v8_isolate, options.IsEmbedderDebugScript()),
       Utils::ToLocal(source_map_url),
-      v8::Boolean::New(v8_isolate, options.IsOpaque()));
+      v8::Boolean::New(v8_isolate, options.IsOpaque()),
+      v8::Boolean::New(v8_isolate, script->type() == i::Script::TYPE_WASM),
+      v8::Boolean::New(v8_isolate, options.IsModule()));
   return origin;
 }
 
@@ -452,6 +482,7 @@
 struct SnapshotCreatorData {
   explicit SnapshotCreatorData(Isolate* isolate)
       : isolate_(isolate),
+        default_context_(),
         contexts_(isolate),
         templates_(isolate),
         created_(false) {}
@@ -462,8 +493,10 @@
 
   ArrayBufferAllocator allocator_;
   Isolate* isolate_;
+  Persistent<Context> default_context_;
   PersistentValueVector<Context> contexts_;
   PersistentValueVector<Template> templates_;
+  std::vector<SerializeInternalFieldsCallback> internal_fields_serializers_;
   bool created_;
 };
 
@@ -500,7 +533,18 @@
   return SnapshotCreatorData::cast(data_)->isolate_;
 }
 
-size_t SnapshotCreator::AddContext(Local<Context> context) {
+void SnapshotCreator::SetDefaultContext(Local<Context> context) {
+  DCHECK(!context.IsEmpty());
+  SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+  DCHECK(!data->created_);
+  DCHECK(data->default_context_.IsEmpty());
+  Isolate* isolate = data->isolate_;
+  CHECK_EQ(isolate, context->GetIsolate());
+  data->default_context_.Reset(isolate, context);
+}
+
+size_t SnapshotCreator::AddContext(Local<Context> context,
+                                   SerializeInternalFieldsCallback callback) {
   DCHECK(!context.IsEmpty());
   SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
   DCHECK(!data->created_);
@@ -508,6 +552,7 @@
   CHECK_EQ(isolate, context->GetIsolate());
   size_t index = static_cast<int>(data->contexts_.Size());
   data->contexts_.Append(context);
+  data->internal_fields_serializers_.push_back(callback);
   return index;
 }
 
@@ -523,11 +568,13 @@
 }
 
 StartupData SnapshotCreator::CreateBlob(
-    SnapshotCreator::FunctionCodeHandling function_code_handling,
-    SerializeInternalFieldsCallback callback) {
+    SnapshotCreator::FunctionCodeHandling function_code_handling) {
   SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(data->isolate_);
   DCHECK(!data->created_);
+  DCHECK(!data->default_context_.IsEmpty());
+
+  int num_additional_contexts = static_cast<int>(data->contexts_.Size());
 
   {
     int num_templates = static_cast<int>(data->templates_.Size());
@@ -539,6 +586,18 @@
     }
     isolate->heap()->SetSerializedTemplates(*templates);
     data->templates_.Clear();
+
+    // We need to store the global proxy size upfront in case we need the
+    // bootstrapper to create a global proxy before we deserialize the context.
+    i::Handle<i::FixedArray> global_proxy_sizes =
+        isolate->factory()->NewFixedArray(num_additional_contexts, i::TENURED);
+    for (int i = 0; i < num_additional_contexts; i++) {
+      i::Handle<i::Context> context =
+          v8::Utils::OpenHandle(*data->contexts_.Get(i));
+      global_proxy_sizes->set(i,
+                              i::Smi::FromInt(context->global_proxy()->Size()));
+    }
+    isolate->heap()->SetSerializedGlobalProxySizes(*global_proxy_sizes);
   }
 
   // If we don't do this then we end up with a stray root pointing at the
@@ -549,15 +608,20 @@
 
   i::DisallowHeapAllocation no_gc_from_here_on;
 
-  int num_contexts = static_cast<int>(data->contexts_.Size());
-  i::List<i::Object*> contexts(num_contexts);
-  for (int i = 0; i < num_contexts; i++) {
+  i::List<i::Object*> contexts(num_additional_contexts);
+  i::Object* default_context;
+  {
     i::HandleScope scope(isolate);
-    i::Handle<i::Context> context =
-        v8::Utils::OpenHandle(*data->contexts_.Get(i));
-    contexts.Add(*context);
+    default_context =
+        *v8::Utils::OpenHandle(*data->default_context_.Get(data->isolate_));
+    data->default_context_.Reset();
+    for (int i = 0; i < num_additional_contexts; i++) {
+      i::Handle<i::Context> context =
+          v8::Utils::OpenHandle(*data->contexts_.Get(i));
+      contexts.Add(*context);
+    }
+    data->contexts_.Clear();
   }
-  data->contexts_.Clear();
 
 #ifdef DEBUG
   i::ExternalReferenceTable::instance(isolate)->ResetCount();
@@ -567,11 +631,20 @@
   startup_serializer.SerializeStrongReferences();
 
   // Serialize each context with a new partial serializer.
-  i::List<i::SnapshotData*> context_snapshots(num_contexts);
-  for (int i = 0; i < num_contexts; i++) {
-    i::PartialSerializer partial_serializer(isolate, &startup_serializer,
-                                            callback);
-    partial_serializer.Serialize(&contexts[i]);
+  i::List<i::SnapshotData*> context_snapshots(num_additional_contexts + 1);
+
+  {
+    // The default snapshot does not support internal fields.
+    i::PartialSerializer partial_serializer(
+        isolate, &startup_serializer, v8::SerializeInternalFieldsCallback());
+    partial_serializer.Serialize(&default_context, false);
+    context_snapshots.Add(new i::SnapshotData(&partial_serializer));
+  }
+
+  for (int i = 0; i < num_additional_contexts; i++) {
+    i::PartialSerializer partial_serializer(
+        isolate, &startup_serializer, data->internal_fields_serializers_[i]);
+    partial_serializer.Serialize(&contexts[i], true);
     context_snapshots.Add(new i::SnapshotData(&partial_serializer));
   }
 
@@ -611,7 +684,7 @@
           !RunExtraCode(isolate, context, embedded_source, "<embedded>")) {
         return result;
       }
-      snapshot_creator.AddContext(context);
+      snapshot_creator.SetDefaultContext(context);
     }
     result = snapshot_creator.CreateBlob(
         SnapshotCreator::FunctionCodeHandling::kClear);
@@ -652,7 +725,7 @@
       HandleScope handle_scope(isolate);
       isolate->ContextDisposedNotification(false);
       Local<Context> context = Context::New(isolate);
-      snapshot_creator.AddContext(context);
+      snapshot_creator.SetDefaultContext(context);
     }
     result = snapshot_creator.CreateBlob(
         SnapshotCreator::FunctionCodeHandling::kKeep);
@@ -929,6 +1002,12 @@
   i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
 }
 
+V8_NORETURN void* HandleScope::operator new(size_t) {
+  base::OS::Abort();
+  abort();
+}
+
+void HandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
 
 int HandleScope::NumberOfHandles(Isolate* isolate) {
   return i::HandleScope::NumberOfHandles(
@@ -967,6 +1046,13 @@
   return escape_slot_;
 }
 
+V8_NORETURN void* EscapableHandleScope::operator new(size_t) {
+  base::OS::Abort();
+  abort();
+}
+
+void EscapableHandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
+
 SealHandleScope::SealHandleScope(Isolate* isolate)
     : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
   i::HandleScopeData* current = isolate_->handle_scope_data();
@@ -985,11 +1071,17 @@
   current->sealed_level = prev_sealed_level_;
 }
 
+V8_NORETURN void* SealHandleScope::operator new(size_t) {
+  base::OS::Abort();
+  abort();
+}
+
+void SealHandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
 
 void Context::Enter() {
   i::Handle<i::Context> env = Utils::OpenHandle(this);
   i::Isolate* isolate = env->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
   impl->EnterContext(env);
   impl->SaveContext(isolate->context());
@@ -1000,7 +1092,7 @@
 void Context::Exit() {
   i::Handle<i::Context> env = Utils::OpenHandle(this);
   i::Isolate* isolate = env->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
   if (!Utils::ApiCheck(impl->LastEnteredContextWas(env),
                        "v8::Context::Exit()",
@@ -1099,7 +1191,7 @@
                    v8::PropertyAttribute attribute) {
   auto templ = Utils::OpenHandle(this);
   i::Isolate* isolate = templ->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   auto value_obj = Utils::OpenHandle(*value);
   CHECK(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo());
@@ -1130,7 +1222,7 @@
   DCHECK_EQ(v8::DEFAULT, access_control);
   auto templ = Utils::OpenHandle(this);
   auto isolate = templ->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   DCHECK(!name.IsEmpty());
   DCHECK(!getter.IsEmpty() || !setter.IsEmpty());
   i::HandleScope scope(isolate);
@@ -1154,7 +1246,7 @@
 
 Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
   i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
                               i_isolate);
   if (result->IsUndefined(i_isolate)) {
@@ -1166,6 +1258,16 @@
   return ToApiHandle<ObjectTemplate>(result);
 }
 
+void FunctionTemplate::SetPrototypeProviderTemplate(
+    Local<FunctionTemplate> prototype_provider) {
+  i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+  i::Handle<i::Object> result = Utils::OpenHandle(*prototype_provider);
+  auto info = Utils::OpenHandle(this);
+  CHECK(info->prototype_template()->IsUndefined(i_isolate));
+  CHECK(info->parent_template()->IsUndefined(i_isolate));
+  info->set_prototype_provider_template(*result);
+}
 
 static void EnsureNotInstantiated(i::Handle<i::FunctionTemplateInfo> info,
                                   const char* func) {
@@ -1177,8 +1279,9 @@
 void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
   auto info = Utils::OpenHandle(this);
   EnsureNotInstantiated(info, "v8::FunctionTemplate::Inherit");
-  i::Isolate* isolate = info->GetIsolate();
-  ENTER_V8(isolate);
+  i::Isolate* i_isolate = info->GetIsolate();
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+  CHECK(info->prototype_provider_template()->IsUndefined(i_isolate));
   info->set_parent_template(*Utils::OpenHandle(*value));
 }
 
@@ -1193,7 +1296,7 @@
       i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
   InitializeFunctionTemplate(obj);
   obj->set_do_not_cache(do_not_cache);
-  int next_serial_number = 0;
+  int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber;
   if (!do_not_cache) {
     next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
   }
@@ -1224,7 +1327,7 @@
   // Changes to the environment cannot be captured in the snapshot. Expect no
   // function templates when the isolate is created for serialization.
   LOG_API(i_isolate, FunctionTemplate, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   auto templ = FunctionTemplateNew(i_isolate, callback, nullptr, data,
                                    signature, length, false);
   if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
@@ -1252,7 +1355,7 @@
     v8::Local<Signature> signature, int length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   return FunctionTemplateNew(i_isolate, callback, fast_handler, data, signature,
                              length, false);
 }
@@ -1261,8 +1364,8 @@
     Isolate* isolate, FunctionCallback callback, Local<Private> cache_property,
     Local<Value> data, Local<Signature> signature, int length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
-  ENTER_V8(i_isolate);
+  LOG_API(i_isolate, FunctionTemplate, NewWithCache);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   return FunctionTemplateNew(i_isolate, callback, nullptr, data, signature,
                              length, false, cache_property);
 }
@@ -1291,7 +1394,7 @@
   auto info = Utils::OpenHandle(this);
   EnsureNotInstantiated(info, "v8::FunctionTemplate::SetCallHandler");
   i::Isolate* isolate = info->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::Struct> struct_obj =
       isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
@@ -1363,7 +1466,7 @@
     return Local<ObjectTemplate>();
   }
   i::Isolate* isolate = handle->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   if (handle->instance_template()->IsUndefined(isolate)) {
     Local<ObjectTemplate> templ =
         ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle));
@@ -1379,7 +1482,7 @@
   auto info = Utils::OpenHandle(this);
   EnsureNotInstantiated(info, "v8::FunctionTemplate::SetLength");
   auto isolate = info->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   info->set_length(length);
 }
 
@@ -1388,7 +1491,7 @@
   auto info = Utils::OpenHandle(this);
   EnsureNotInstantiated(info, "v8::FunctionTemplate::SetClassName");
   auto isolate = info->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   info->set_class_name(*Utils::OpenHandle(*name));
 }
 
@@ -1397,7 +1500,7 @@
   auto info = Utils::OpenHandle(this);
   EnsureNotInstantiated(info, "v8::FunctionTemplate::SetAcceptAnyReceiver");
   auto isolate = info->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   info->set_accept_any_receiver(value);
 }
 
@@ -1406,7 +1509,7 @@
   auto info = Utils::OpenHandle(this);
   EnsureNotInstantiated(info, "v8::FunctionTemplate::SetHiddenPrototype");
   auto isolate = info->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   info->set_hidden_prototype(value);
 }
 
@@ -1415,7 +1518,7 @@
   auto info = Utils::OpenHandle(this);
   EnsureNotInstantiated(info, "v8::FunctionTemplate::ReadOnlyPrototype");
   auto isolate = info->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   info->set_read_only_prototype(true);
 }
 
@@ -1424,7 +1527,7 @@
   auto info = Utils::OpenHandle(this);
   EnsureNotInstantiated(info, "v8::FunctionTemplate::RemovePrototype");
   auto isolate = info->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   info->set_remove_prototype(true);
 }
 
@@ -1446,7 +1549,7 @@
     i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
     bool do_not_cache) {
   LOG_API(isolate, ObjectTemplate, New);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::Handle<i::Struct> struct_obj =
       isolate->factory()->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
   i::Handle<i::ObjectTemplateInfo> obj =
@@ -1511,7 +1614,7 @@
                                 bool replace_on_access) {
   auto info = Utils::OpenHandle(template_obj);
   auto isolate = info->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   auto obj =
       MakeAccessorInfo(name, getter, setter, data, settings, attribute,
@@ -1558,7 +1661,7 @@
                                         PropertyAttribute attribute) {
   auto templ = Utils::OpenHandle(this);
   i::Isolate* isolate = templ->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
                                  intrinsic,
@@ -1631,7 +1734,7 @@
     Descriptor descriptor, Deleter remover, Enumerator enumerator,
     Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
   i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, templ);
   EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
@@ -1660,7 +1763,7 @@
 
 void ObjectTemplate::MarkAsUndetectable() {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, this);
   EnsureNotInstantiated(cons, "v8::ObjectTemplate::MarkAsUndetectable");
@@ -1671,7 +1774,7 @@
 void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
                                             Local<Value> data) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, this);
   EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetAccessCheckCallback");
@@ -1700,7 +1803,7 @@
     const IndexedPropertyHandlerConfiguration& indexed_handler,
     Local<Value> data) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, this);
   EnsureNotInstantiated(
@@ -1736,7 +1839,7 @@
 void ObjectTemplate::SetHandler(
     const IndexedPropertyHandlerConfiguration& config) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, this);
   EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
@@ -1751,7 +1854,7 @@
 void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
                                               Local<Value> data) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   auto cons = EnsureConstructor(isolate, this);
   EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
@@ -1780,7 +1883,7 @@
                        "Invalid internal field count")) {
     return;
   }
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   if (value > 0) {
     // The internal field count is set by the constructor function's
     // construct code, so we ensure that there is a constructor
@@ -1796,7 +1899,7 @@
 
 void ObjectTemplate::SetImmutableProto() {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   Utils::OpenHandle(this)->set_immutable_proto(true);
 }
 
@@ -2003,8 +2106,7 @@
 }
 
 MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
-    Isolate* v8_isolate, Source* source, CompileOptions options,
-    bool is_module) {
+    Isolate* v8_isolate, Source* source, CompileOptions options) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
   PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, ScriptCompiler, CompileUnbound,
                                      UnboundScript);
@@ -2049,7 +2151,7 @@
     result = i::Compiler::GetSharedFunctionInfoForScript(
         str, name_obj, line_offset, column_offset, source->resource_options,
         source_map_url, isolate->native_context(), NULL, &script_data, options,
-        i::NOT_NATIVES_CODE, is_module);
+        i::NOT_NATIVES_CODE);
     has_pending_exception = result.is_null();
     if (has_pending_exception && script_data != NULL) {
       // This case won't happen during normal operation; we have compiled
@@ -2078,24 +2180,34 @@
 
 MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundScript(
     Isolate* v8_isolate, Source* source, CompileOptions options) {
-  return CompileUnboundInternal(v8_isolate, source, options, false);
+  Utils::ApiCheck(
+      !source->GetResourceOptions().IsModule(),
+      "v8::ScriptCompiler::CompileUnboundScript",
+      "v8::ScriptCompiler::CompileModule must be used to compile modules");
+  return CompileUnboundInternal(v8_isolate, source, options);
 }
 
 
 Local<UnboundScript> ScriptCompiler::CompileUnbound(Isolate* v8_isolate,
                                                     Source* source,
                                                     CompileOptions options) {
-  RETURN_TO_LOCAL_UNCHECKED(
-      CompileUnboundInternal(v8_isolate, source, options, false),
-      UnboundScript);
+  Utils::ApiCheck(
+      !source->GetResourceOptions().IsModule(),
+      "v8::ScriptCompiler::CompileUnbound",
+      "v8::ScriptCompiler::CompileModule must be used to compile modules");
+  RETURN_TO_LOCAL_UNCHECKED(CompileUnboundInternal(v8_isolate, source, options),
+                            UnboundScript);
 }
 
 
 MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
                                            Source* source,
                                            CompileOptions options) {
+  Utils::ApiCheck(
+      !source->GetResourceOptions().IsModule(), "v8::ScriptCompiler::Compile",
+      "v8::ScriptCompiler::CompileModule must be used to compile modules");
   auto isolate = context->GetIsolate();
-  auto maybe = CompileUnboundInternal(isolate, source, options, false);
+  auto maybe = CompileUnboundInternal(isolate, source, options);
   Local<UnboundScript> result;
   if (!maybe.ToLocal(&result)) return MaybeLocal<Script>();
   v8::Context::Scope scope(context);
@@ -2115,7 +2227,10 @@
                                                  Source* source) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
 
-  auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions, true);
+  Utils::ApiCheck(source->GetResourceOptions().IsModule(),
+                  "v8::ScriptCompiler::CompileModule",
+                  "Invalid ScriptOrigin: is_module must be true");
+  auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions);
   Local<UnboundScript> unbound;
   if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
 
@@ -2171,9 +2286,14 @@
                         Function);
   TRACE_EVENT0("v8", "V8.ScriptCompiler");
   i::Handle<i::String> source_string;
+  int parameters_end_pos = i::kNoSourcePosition;
   auto factory = isolate->factory();
   if (arguments_count) {
-    source_string = factory->NewStringFromStaticChars("(function(");
+    if (i::FLAG_harmony_function_tostring) {
+      source_string = factory->NewStringFromStaticChars("(function anonymous(");
+    } else {
+      source_string = factory->NewStringFromStaticChars("(function(");
+    }
     for (size_t i = 0; i < arguments_count; ++i) {
       IsIdentifierHelper helper;
       if (!helper.Check(*Utils::OpenHandle(*arguments[i]))) {
@@ -2191,12 +2311,24 @@
                                       ',')).ToHandle(&source_string);
       RETURN_ON_FAILED_EXECUTION(Function);
     }
-    auto brackets = factory->NewStringFromStaticChars("){");
+    i::Handle<i::String> brackets;
+    if (i::FLAG_harmony_function_tostring) {
+      brackets = factory->NewStringFromStaticChars("\n) {");
+      parameters_end_pos = source_string->length() - 3;
+    } else {
+      brackets = factory->NewStringFromStaticChars("){");
+    }
     has_pending_exception = !factory->NewConsString(source_string, brackets)
                                  .ToHandle(&source_string);
     RETURN_ON_FAILED_EXECUTION(Function);
   } else {
-    source_string = factory->NewStringFromStaticChars("(function(){");
+    if (i::FLAG_harmony_function_tostring) {
+      source_string =
+          factory->NewStringFromStaticChars("(function anonymous(\n) {");
+      parameters_end_pos = source_string->length() - 3;
+    } else {
+      source_string = factory->NewStringFromStaticChars("(function(){");
+    }
   }
 
   int scope_position = source_string->length();
@@ -2246,9 +2378,9 @@
   has_pending_exception =
       !i::Compiler::GetFunctionFromEval(
            source_string, outer_info, context, i::SLOPPY,
-           i::ONLY_SINGLE_FUNCTION_LITERAL, eval_scope_position, eval_position,
-           line_offset, column_offset - scope_position, name_obj,
-           source->resource_options)
+           i::ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos,
+           eval_scope_position, eval_position, line_offset,
+           column_offset - scope_position, name_obj, source->resource_options)
            .ToHandle(&fun);
   if (has_pending_exception) {
     isolate->ReportPendingMessages();
@@ -2315,12 +2447,19 @@
   }
 
   source->info->set_script(script);
+  if (source->info->literal() == nullptr) {
+    source->parser->ReportErrors(isolate, script);
+  }
+  source->parser->UpdateStatistics(isolate, script);
 
-  // Do the parsing tasks which need to be done on the main thread. This will
-  // also handle parse errors.
-  source->parser->Internalize(isolate, script,
-                              source->info->literal() == nullptr);
-  source->parser->HandleSourceURLComments(isolate, script);
+  i::DeferredHandleScope deferred_handle_scope(isolate);
+  {
+    // Internalize AST values on the main thread.
+    source->info->ReopenHandlesInNewHandleScope();
+    source->info->ast_value_factory()->Internalize(isolate);
+    source->parser->HandleSourceURLComments(isolate, script);
+  }
+  source->info->set_deferred_handles(deferred_handle_scope.Detach());
 
   i::Handle<i::SharedFunctionInfo> result;
   if (source->info->literal() != nullptr) {
@@ -2454,6 +2593,12 @@
   }
 }
 
+V8_NORETURN void* v8::TryCatch::operator new(size_t) {
+  base::OS::Abort();
+  abort();
+}
+
+void v8::TryCatch::operator delete(void*, size_t) { base::OS::Abort(); }
 
 bool v8::TryCatch::HasCaught() const {
   return !reinterpret_cast<i::Object*>(exception_)->IsTheHole(isolate_);
@@ -2620,6 +2765,10 @@
   return self->end_position();
 }
 
+int Message::ErrorLevel() const {
+  auto self = Utils::OpenHandle(this);
+  return self->error_level();
+}
 
 Maybe<int> Message::GetStartColumn(Local<Context> context) const {
   auto self = Utils::OpenHandle(this);
@@ -2821,7 +2970,7 @@
 
 Local<NativeWeakMap> NativeWeakMap::New(Isolate* v8_isolate) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::Handle<i::JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
   i::JSWeakCollection::Initialize(weakmap, isolate);
   return Utils::NativeWeakMapToLocal(weakmap);
@@ -2875,7 +3024,7 @@
 bool NativeWeakMap::Has(Local<Value> v8_key) {
   i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
   i::Isolate* isolate = weak_collection->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
   if (!key->IsJSReceiver() && !key->IsSymbol()) {
@@ -2896,7 +3045,7 @@
 bool NativeWeakMap::Delete(Local<Value> v8_key) {
   i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
   i::Isolate* isolate = weak_collection->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
   if (!key->IsJSReceiver() && !key->IsSymbol()) {
@@ -2982,6 +3131,15 @@
   return Nothing<bool>();
 }
 
+Maybe<uint32_t> ValueSerializer::Delegate::GetSharedArrayBufferId(
+    Isolate* v8_isolate, Local<SharedArrayBuffer> shared_array_buffer) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  isolate->ScheduleThrow(*isolate->factory()->NewError(
+      isolate->error_function(), i::MessageTemplate::kDataCloneError,
+      Utils::OpenHandle(*shared_array_buffer)));
+  return Nothing<uint32_t>();
+}
+
 void* ValueSerializer::Delegate::ReallocateBufferMemory(void* old_buffer,
                                                         size_t size,
                                                         size_t* actual_size) {
@@ -3011,6 +3169,10 @@
 
 void ValueSerializer::WriteHeader() { private_->serializer.WriteHeader(); }
 
+void ValueSerializer::SetTreatArrayBufferViewsAsHostObjects(bool mode) {
+  private_->serializer.SetTreatArrayBufferViewsAsHostObjects(mode);
+}
+
 Maybe<bool> ValueSerializer::WriteValue(Local<Context> context,
                                         Local<Value> value) {
   PREPARE_FOR_EXECUTION_PRIMITIVE(context, ValueSerializer, WriteValue, bool);
@@ -3940,7 +4102,7 @@
 
 Local<String> Value::TypeOf(v8::Isolate* external_isolate) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   LOG_API(isolate, Value, TypeOf);
   return Utils::ToLocal(i::Object::TypeOf(isolate, Utils::OpenHandle(this)));
 }
@@ -4343,12 +4505,11 @@
   return SetPrototype(context, value).FromMaybe(false);
 }
 
-
 Local<Object> v8::Object::FindInstanceInPrototypeChain(
     v8::Local<FunctionTemplate> tmpl) {
-  auto isolate = Utils::OpenHandle(this)->GetIsolate();
-  i::PrototypeIterator iter(isolate, *Utils::OpenHandle(this),
-                            i::kStartAtReceiver);
+  auto self = Utils::OpenHandle(this);
+  auto isolate = self->GetIsolate();
+  i::PrototypeIterator iter(isolate, *self, i::kStartAtReceiver);
   auto tmpl_info = *Utils::OpenHandle(*tmpl);
   while (!tmpl_info->IsTemplateFor(iter.GetCurrent<i::JSObject>())) {
     iter.Advance();
@@ -4411,12 +4572,14 @@
 
 MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
   PREPARE_FOR_EXECUTION(context, Object, ObjectProtoToString, String);
-  auto obj = Utils::OpenHandle(this);
-  Local<String> result;
+  auto self = Utils::OpenHandle(this);
+  Local<Value> result;
   has_pending_exception =
-      !ToLocal<String>(i::JSObject::ObjectProtoToString(isolate, obj), &result);
+      !ToLocal<Value>(i::Execution::Call(isolate, isolate->object_to_string(),
+                                         self, 0, nullptr),
+                      &result);
   RETURN_ON_FAILED_EXECUTION(String);
-  RETURN_ESCAPED(result);
+  RETURN_ESCAPED(Local<String>::Cast(result));
 }
 
 
@@ -4554,7 +4717,7 @@
   has_pending_exception =
       !i::JSObject::SetAccessor(obj, info).ToHandle(&result);
   RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
-  if (result->IsUndefined(obj->GetIsolate())) return Nothing<bool>();
+  if (result->IsUndefined(obj->GetIsolate())) return Just(false);
   if (fast) {
     i::JSObject::MigrateSlowToFast(obj, 0, "APISetAccessor");
   }
@@ -4826,7 +4989,7 @@
 Local<v8::Object> v8::Object::Clone() {
   auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
   auto isolate = self->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   auto result = isolate->factory()->CopyJSObject(self);
   CHECK(!result.is_null());
   return Utils::ToLocal(result);
@@ -4835,8 +4998,7 @@
 
 Local<v8::Context> v8::Object::CreationContext() {
   auto self = Utils::OpenHandle(this);
-  auto context = handle(self->GetCreationContext());
-  return Utils::ToLocal(context);
+  return Utils::ToLocal(self->GetCreationContext());
 }
 
 
@@ -4915,7 +5077,7 @@
                                    int length, ConstructorBehavior behavior) {
   i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
   LOG_API(isolate, Function, New);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   auto templ = FunctionTemplateNew(isolate, callback, nullptr, data,
                                    Local<Signature>(), length, true);
   if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
@@ -5109,7 +5271,7 @@
     return false;
   }
   auto func = i::Handle<i::JSFunction>::cast(self);
-  return func->shared()->IsBuiltin();
+  return !func->shared()->IsUserJavaScript();
 }
 
 
@@ -5680,7 +5842,7 @@
   i::Handle<i::String> str = Utils::OpenHandle(this);
   i::Isolate* isolate = str->GetIsolate();
   LOG_API(isolate, String, WriteUtf8);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   if (options & HINT_MANY_WRITES_EXPECTED) {
     str = i::String::Flatten(str);  // Flatten the string for efficiency.
   }
@@ -5732,7 +5894,7 @@
                               int options) {
   i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
   LOG_API(isolate, String, Write);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   DCHECK(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(string);
   if (options & String::HINT_MANY_WRITES_EXPECTED) {
@@ -6069,10 +6231,11 @@
   i::Handle<i::Context> Invoke(
       i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
       v8::Local<v8::ObjectTemplate> global_object_template,
-      v8::ExtensionConfiguration* extensions, size_t context_snapshot_index) {
+      v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+      v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
     return isolate->bootstrapper()->CreateEnvironment(
         maybe_global_proxy, global_object_template, extensions,
-        context_snapshot_index);
+        context_snapshot_index, internal_fields_deserializer);
   }
 };
 
@@ -6081,7 +6244,8 @@
   i::Handle<i::JSGlobalProxy> Invoke(
       i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
       v8::Local<v8::ObjectTemplate> global_object_template,
-      v8::ExtensionConfiguration* extensions, size_t context_snapshot_index) {
+      v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+      v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
     USE(extensions);
     USE(context_snapshot_index);
     return isolate->bootstrapper()->NewRemoteContext(maybe_global_proxy,
@@ -6093,15 +6257,19 @@
 static i::Handle<ObjectType> CreateEnvironment(
     i::Isolate* isolate, v8::ExtensionConfiguration* extensions,
     v8::MaybeLocal<ObjectTemplate> maybe_global_template,
-    v8::MaybeLocal<Value> maybe_global_proxy, size_t context_snapshot_index) {
+    v8::MaybeLocal<Value> maybe_global_proxy, size_t context_snapshot_index,
+    v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
   i::Handle<ObjectType> result;
 
-  // Enter V8 via an ENTER_V8 scope.
   {
-    ENTER_V8(isolate);
+    ENTER_V8_FOR_NEW_CONTEXT(isolate);
     v8::Local<ObjectTemplate> proxy_template;
     i::Handle<i::FunctionTemplateInfo> proxy_constructor;
     i::Handle<i::FunctionTemplateInfo> global_constructor;
+    i::Handle<i::Object> named_interceptor(
+        isolate->factory()->undefined_value());
+    i::Handle<i::Object> indexed_interceptor(
+        isolate->factory()->undefined_value());
 
     if (!maybe_global_template.IsEmpty()) {
       v8::Local<v8::ObjectTemplate> global_template =
@@ -6134,6 +6302,24 @@
         global_constructor->set_access_check_info(
             isolate->heap()->undefined_value());
       }
+
+      // Same for other interceptors. If the global constructor has
+      // interceptors, we need to replace them temporarily with noop
+      // interceptors, so the map is correctly marked as having interceptors,
+      // but we don't invoke any.
+      if (!global_constructor->named_property_handler()->IsUndefined(isolate)) {
+        named_interceptor =
+            handle(global_constructor->named_property_handler(), isolate);
+        global_constructor->set_named_property_handler(
+            isolate->heap()->noop_interceptor_info());
+      }
+      if (!global_constructor->indexed_property_handler()->IsUndefined(
+              isolate)) {
+        indexed_interceptor =
+            handle(global_constructor->indexed_property_handler(), isolate);
+        global_constructor->set_indexed_property_handler(
+            isolate->heap()->noop_interceptor_info());
+      }
     }
 
     i::MaybeHandle<i::JSGlobalProxy> maybe_proxy;
@@ -6143,10 +6329,11 @@
     }
     // Create the environment.
     InvokeBootstrapper<ObjectType> invoke;
-    result = invoke.Invoke(isolate, maybe_proxy, proxy_template, extensions,
-                           context_snapshot_index);
+    result =
+        invoke.Invoke(isolate, maybe_proxy, proxy_template, extensions,
+                      context_snapshot_index, internal_fields_deserializer);
 
-    // Restore the access check info on the global template.
+    // Restore the access check info and interceptors on the global template.
     if (!maybe_global_template.IsEmpty()) {
       DCHECK(!global_constructor.is_null());
       DCHECK(!proxy_constructor.is_null());
@@ -6154,6 +6341,8 @@
           proxy_constructor->access_check_info());
       global_constructor->set_needs_access_check(
           proxy_constructor->needs_access_check());
+      global_constructor->set_named_property_handler(*named_interceptor);
+      global_constructor->set_indexed_property_handler(*indexed_interceptor);
     }
   }
   // Leave V8.
@@ -6161,20 +6350,20 @@
   return result;
 }
 
-Local<Context> NewContext(v8::Isolate* external_isolate,
-                          v8::ExtensionConfiguration* extensions,
-                          v8::MaybeLocal<ObjectTemplate> global_template,
-                          v8::MaybeLocal<Value> global_object,
-                          size_t context_snapshot_index) {
+Local<Context> NewContext(
+    v8::Isolate* external_isolate, v8::ExtensionConfiguration* extensions,
+    v8::MaybeLocal<ObjectTemplate> global_template,
+    v8::MaybeLocal<Value> global_object, size_t context_snapshot_index,
+    v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
   TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext");
   LOG_API(isolate, Context, New);
   i::HandleScope scope(isolate);
   ExtensionConfiguration no_extensions;
   if (extensions == NULL) extensions = &no_extensions;
-  i::Handle<i::Context> env =
-      CreateEnvironment<i::Context>(isolate, extensions, global_template,
-                                    global_object, context_snapshot_index);
+  i::Handle<i::Context> env = CreateEnvironment<i::Context>(
+      isolate, extensions, global_template, global_object,
+      context_snapshot_index, internal_fields_deserializer);
   if (env.is_null()) {
     if (isolate->has_pending_exception()) {
       isolate->OptionalRescheduleException(true);
@@ -6189,21 +6378,22 @@
                                 v8::MaybeLocal<ObjectTemplate> global_template,
                                 v8::MaybeLocal<Value> global_object) {
   return NewContext(external_isolate, extensions, global_template,
-                    global_object, 0);
+                    global_object, 0, DeserializeInternalFieldsCallback());
 }
 
 MaybeLocal<Context> v8::Context::FromSnapshot(
     v8::Isolate* external_isolate, size_t context_snapshot_index,
-    v8::ExtensionConfiguration* extensions,
-    v8::MaybeLocal<ObjectTemplate> global_template,
-    v8::MaybeLocal<Value> global_object) {
+    v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
+    v8::ExtensionConfiguration* extensions, MaybeLocal<Value> global_object) {
+  size_t index_including_default_context = context_snapshot_index + 1;
   if (!i::Snapshot::HasContextSnapshot(
           reinterpret_cast<i::Isolate*>(external_isolate),
-          context_snapshot_index)) {
+          index_including_default_context)) {
     return MaybeLocal<Context>();
   }
-  return NewContext(external_isolate, extensions, global_template,
-                    global_object, context_snapshot_index);
+  return NewContext(external_isolate, extensions, MaybeLocal<ObjectTemplate>(),
+                    global_object, index_including_default_context,
+                    internal_fields_deserializer);
 }
 
 MaybeLocal<Object> v8::Context::NewRemoteContext(
@@ -6225,7 +6415,8 @@
                   "Global template needs to have access check handlers.");
   i::Handle<i::JSGlobalProxy> global_proxy =
       CreateEnvironment<i::JSGlobalProxy>(isolate, nullptr, global_template,
-                                          global_object, 0);
+                                          global_object, 0,
+                                          DeserializeInternalFieldsCallback());
   if (global_proxy.is_null()) {
     if (isolate->has_pending_exception()) {
       isolate->OptionalRescheduleException(true);
@@ -6263,7 +6454,6 @@
   return reinterpret_cast<Isolate*>(env->GetIsolate());
 }
 
-
 v8::Local<v8::Object> Context::Global() {
   i::Handle<i::Context> context = Utils::OpenHandle(this);
   i::Isolate* isolate = context->GetIsolate();
@@ -6281,7 +6471,7 @@
 void Context::DetachGlobal() {
   i::Handle<i::Context> context = Utils::OpenHandle(this);
   i::Isolate* isolate = context->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   isolate->bootstrapper()->DetachGlobal(context);
 }
 
@@ -6297,7 +6487,7 @@
 void Context::AllowCodeGenerationFromStrings(bool allow) {
   i::Handle<i::Context> context = Utils::OpenHandle(this);
   i::Isolate* isolate = context->GetIsolate();
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   context->set_allow_code_gen_from_strings(
       allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
 }
@@ -6390,10 +6580,13 @@
     return true;
   }
   if (obj->IsJSGlobalProxy()) {
-    // If it's a global proxy object, then test with the global object.
+    // If it's a global proxy, then test with the global object. Note that the
+    // inner global object may not necessarily be a JSGlobalObject.
     i::PrototypeIterator iter(i::JSObject::cast(*obj)->map());
-    if (iter.IsAtEnd()) return false;
-    return self->IsTemplateFor(iter.GetCurrent<i::JSGlobalObject>());
+    // The global proxy should always have a prototype, as it is a bug to call
+    // this on a detached JSGlobalProxy.
+    DCHECK(!iter.IsAtEnd());
+    return self->IsTemplateFor(iter.GetCurrent<i::JSObject>());
   }
   return false;
 }
@@ -6403,7 +6596,7 @@
   STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, External, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::JSObject> external = i_isolate->factory()->NewExternal(value);
   return Utils::ExternalToLocal(external);
 }
@@ -6482,7 +6675,7 @@
     result = MaybeLocal<String>();                                         \
   } else {                                                                 \
     i::Isolate* i_isolate = reinterpret_cast<internal::Isolate*>(isolate); \
-    ENTER_V8(i_isolate);                                                   \
+    ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);                            \
     LOG_API(i_isolate, class_name, function_name);                         \
     if (length < 0) length = StringLength(data);                           \
     i::Handle<i::String> handle_result =                                   \
@@ -6569,13 +6762,19 @@
     return MaybeLocal<String>();
   }
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   LOG_API(i_isolate, String, NewExternalTwoByte);
-  i::Handle<i::String> string = i_isolate->factory()
-                                    ->NewExternalStringFromTwoByte(resource)
-                                    .ToHandleChecked();
-  i_isolate->heap()->RegisterExternalString(*string);
-  return Utils::ToLocal(string);
+  if (resource->length() > 0) {
+    i::Handle<i::String> string = i_isolate->factory()
+                                      ->NewExternalStringFromTwoByte(resource)
+                                      .ToHandleChecked();
+    i_isolate->heap()->RegisterExternalString(*string);
+    return Utils::ToLocal(string);
+  } else {
+    // The resource isn't going to be used, free it immediately.
+    resource->Dispose();
+    return Utils::ToLocal(i_isolate->factory()->empty_string());
+  }
 }
 
 
@@ -6593,13 +6792,19 @@
     return MaybeLocal<String>();
   }
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   LOG_API(i_isolate, String, NewExternalOneByte);
-  i::Handle<i::String> string = i_isolate->factory()
-                                    ->NewExternalStringFromOneByte(resource)
-                                    .ToHandleChecked();
-  i_isolate->heap()->RegisterExternalString(*string);
-  return Utils::ToLocal(string);
+  if (resource->length() > 0) {
+    i::Handle<i::String> string = i_isolate->factory()
+                                      ->NewExternalStringFromOneByte(resource)
+                                      .ToHandleChecked();
+    i_isolate->heap()->RegisterExternalString(*string);
+    return Utils::ToLocal(string);
+  } else {
+    // The resource isn't going to be used, free it immediately.
+    resource->Dispose();
+    return Utils::ToLocal(i_isolate->factory()->empty_string());
+  }
 }
 
 
@@ -6615,7 +6820,7 @@
   if (i::StringShape(*obj).IsExternal()) {
     return false;  // Already an external string.
   }
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   if (isolate->heap()->IsInGCPostProcessing()) {
     return false;
   }
@@ -6639,7 +6844,7 @@
   if (i::StringShape(*obj).IsExternal()) {
     return false;  // Already an external string.
   }
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   if (isolate->heap()->IsInGCPostProcessing()) {
     return false;
   }
@@ -6675,7 +6880,7 @@
 Local<v8::Object> v8::Object::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, Object, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::JSObject> obj =
       i_isolate->factory()->NewJSObject(i_isolate->object_function());
   return Utils::ToLocal(obj);
@@ -6685,7 +6890,7 @@
 Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, NumberObject, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
   i::Handle<i::Object> obj =
       i::Object::ToObject(i_isolate, number).ToHandleChecked();
@@ -6705,7 +6910,7 @@
 Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, BooleanObject, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::Object> boolean(value ? i_isolate->heap()->true_value()
                                      : i_isolate->heap()->false_value(),
                                i_isolate);
@@ -6733,7 +6938,7 @@
   i::Handle<i::String> string = Utils::OpenHandle(*value);
   i::Isolate* isolate = string->GetIsolate();
   LOG_API(isolate, StringObject, New);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::Handle<i::Object> obj =
       i::Object::ToObject(isolate, string).ToHandleChecked();
   return Utils::ToLocal(obj);
@@ -6753,7 +6958,7 @@
 Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, SymbolObject, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::Object> obj = i::Object::ToObject(
       i_isolate, Utils::OpenHandle(*value)).ToHandleChecked();
   return Utils::ToLocal(obj);
@@ -6803,7 +7008,7 @@
 void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, Date, DateTimeConfigurationChangeNotification);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i_isolate->date_cache()->ResetDateCache();
   if (!i_isolate->eternal_handles()->Exists(
           i::EternalHandles::DATE_CACHE_VERSION)) {
@@ -6868,7 +7073,7 @@
 Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, Array, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   int real_length = length > 0 ? length : 0;
   i::Handle<i::JSArray> obj = i_isolate->factory()->NewJSArray(real_length);
   i::Handle<i::Object> length_obj =
@@ -6913,7 +7118,7 @@
 Local<v8::Map> v8::Map::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, Map, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::JSMap> obj = i_isolate->factory()->NewJSMap();
   return Utils::ToLocal(obj);
 }
@@ -6929,7 +7134,7 @@
   auto self = Utils::OpenHandle(this);
   i::Isolate* isolate = self->GetIsolate();
   LOG_API(isolate, Map, Clear);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::JSMap::Clear(self);
 }
 
@@ -6988,15 +7193,14 @@
   return Just(result->IsTrue(isolate));
 }
 
-
-Local<Array> Map::AsArray() const {
-  i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
-  i::Isolate* isolate = obj->GetIsolate();
+namespace {
+i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
+                                 int offset, int kind) {
   i::Factory* factory = isolate->factory();
-  LOG_API(isolate, Map, AsArray);
-  ENTER_V8(isolate);
-  i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(obj->table()));
-  int length = table->NumberOfElements() * 2;
+  i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(table_obj));
+  if (offset >= table->NumberOfElements()) return factory->NewJSArray(0);
+  int length = (table->NumberOfElements() - offset) *
+               (kind == i::JSMapIterator::kKindEntries ? 2 : 1);
   i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
   int result_index = 0;
   {
@@ -7006,22 +7210,37 @@
     for (int i = 0; i < capacity; ++i) {
       i::Object* key = table->KeyAt(i);
       if (key == the_hole) continue;
-      result->set(result_index++, key);
-      result->set(result_index++, table->ValueAt(i));
+      if (offset-- > 0) continue;
+      if (kind == i::JSMapIterator::kKindEntries ||
+          kind == i::JSMapIterator::kKindKeys) {
+        result->set(result_index++, key);
+      }
+      if (kind == i::JSMapIterator::kKindEntries ||
+          kind == i::JSMapIterator::kKindValues) {
+        result->set(result_index++, table->ValueAt(i));
+      }
     }
   }
   DCHECK_EQ(result_index, result->length());
   DCHECK_EQ(result_index, length);
-  i::Handle<i::JSArray> result_array =
-      factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
-  return Utils::ToLocal(result_array);
+  return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
+}
+}  // namespace
+
+Local<Array> Map::AsArray() const {
+  i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
+  i::Isolate* isolate = obj->GetIsolate();
+  LOG_API(isolate, Map, AsArray);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+  return Utils::ToLocal(
+      MapAsArray(isolate, obj->table(), 0, i::JSMapIterator::kKindEntries));
 }
 
 
 Local<v8::Set> v8::Set::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, Set, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::JSSet> obj = i_isolate->factory()->NewJSSet();
   return Utils::ToLocal(obj);
 }
@@ -7037,7 +7256,7 @@
   auto self = Utils::OpenHandle(this);
   i::Isolate* isolate = self->GetIsolate();
   LOG_API(isolate, Set, Clear);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::JSSet::Clear(self);
 }
 
@@ -7080,15 +7299,13 @@
   return Just(result->IsTrue(isolate));
 }
 
-
-Local<Array> Set::AsArray() const {
-  i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
-  i::Isolate* isolate = obj->GetIsolate();
+namespace {
+i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object* table_obj,
+                                 int offset) {
   i::Factory* factory = isolate->factory();
-  LOG_API(isolate, Set, AsArray);
-  ENTER_V8(isolate);
-  i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(obj->table()));
-  int length = table->NumberOfElements();
+  i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(table_obj));
+  int length = table->NumberOfElements() - offset;
+  if (length <= 0) return factory->NewJSArray(0);
   i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
   int result_index = 0;
   {
@@ -7098,14 +7315,22 @@
     for (int i = 0; i < capacity; ++i) {
       i::Object* key = table->KeyAt(i);
       if (key == the_hole) continue;
+      if (offset-- > 0) continue;
       result->set(result_index++, key);
     }
   }
   DCHECK_EQ(result_index, result->length());
   DCHECK_EQ(result_index, length);
-  i::Handle<i::JSArray> result_array =
-      factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
-  return Utils::ToLocal(result_array);
+  return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
+}
+}  // namespace
+
+Local<Array> Set::AsArray() const {
+  i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
+  i::Isolate* isolate = obj->GetIsolate();
+  LOG_API(isolate, Set, AsArray);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+  return Utils::ToLocal(SetAsArray(isolate, obj->table(), 0));
 }
 
 
@@ -7113,7 +7338,7 @@
   PREPARE_FOR_EXECUTION(context, Promise_Resolver, New, Resolver);
   i::Handle<i::Object> result;
   has_pending_exception =
-      !i::Execution::Call(isolate, isolate->promise_create(),
+      !i::Execution::Call(isolate, isolate->promise_internal_constructor(),
                           isolate->factory()->undefined_value(), 0, NULL)
            .ToHandle(&result);
   RETURN_ON_FAILED_EXECUTION(Promise::Resolver);
@@ -7158,9 +7383,12 @@
                                       Local<Value> value) {
   PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool);
   auto self = Utils::OpenHandle(this);
-  i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
+
+  // We pass true to trigger the debugger's on exception handler.
+  i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value),
+                                 isolate->factory()->ToBoolean(true)};
   has_pending_exception =
-      i::Execution::Call(isolate, isolate->promise_reject(),
+      i::Execution::Call(isolate, isolate->promise_internal_reject(),
                          isolate->factory()->undefined_value(), arraysize(argv),
                          argv)
           .is_null();
@@ -7219,11 +7447,32 @@
   i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
   i::Isolate* isolate = promise->GetIsolate();
   LOG_API(isolate, Promise, HasRejectHandler);
-  ENTER_V8(isolate);
-  i::Handle<i::Symbol> key = isolate->factory()->promise_has_handler_symbol();
-  return i::JSReceiver::GetDataProperty(promise, key)->IsTrue(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+  if (promise->IsJSPromise()) {
+    i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
+    return js_promise->has_handler();
+  }
+  return false;
 }
 
+Local<Value> Promise::Result() {
+  i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
+  i::Isolate* isolate = promise->GetIsolate();
+  LOG_API(isolate, Promise, Result);
+  i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
+  Utils::ApiCheck(js_promise->status() != kPending, "v8_Promise_Result",
+                  "Promise is still pending");
+  i::Handle<i::Object> result(js_promise->result(), isolate);
+  return Utils::ToLocal(result);
+}
+
+Promise::PromiseState Promise::State() {
+  i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
+  i::Isolate* isolate = promise->GetIsolate();
+  LOG_API(isolate, Promise, Status);
+  i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
+  return static_cast<PromiseState>(js_promise->status());
+}
 
 Local<Object> Proxy::GetTarget() {
   i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
@@ -7268,7 +7517,7 @@
       i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
   i::Handle<i::WasmCompiledModule> compiled_part =
       i::handle(i::WasmCompiledModule::cast(obj->GetInternalField(0)));
-  i::Handle<i::String> wire_bytes = compiled_part->module_bytes();
+  i::Handle<i::String> wire_bytes(compiled_part->module_bytes());
   return Local<String>::Cast(Utils::ToLocal(wire_bytes));
 }
 
@@ -7326,11 +7575,8 @@
                                                            size_t length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::wasm::ErrorThrower thrower(i_isolate, "WasmCompiledModule::Deserialize()");
-  i::MaybeHandle<i::JSObject> maybe_compiled =
-      i::wasm::CreateModuleObjectFromBytes(
-          i_isolate, start, start + length, &thrower,
-          i::wasm::ModuleOrigin::kWasmOrigin, i::Handle<i::Script>::null(),
-          nullptr, nullptr);
+  i::MaybeHandle<i::JSObject> maybe_compiled = i::wasm::SyncCompile(
+      i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
   if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
   return Local<WasmCompiledModule>::Cast(
       Utils::ToLocal(maybe_compiled.ToHandleChecked()));
@@ -7382,7 +7628,7 @@
   Utils::ApiCheck(obj->is_neuterable(), "v8::ArrayBuffer::Neuter",
                   "Only neuterable ArrayBuffers can be neutered");
   LOG_API(isolate, ArrayBuffer, Neuter);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   obj->Neuter();
 }
 
@@ -7396,7 +7642,7 @@
 Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, ArrayBuffer, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
   // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
@@ -7415,7 +7661,7 @@
   CHECK(byte_length == 0 || data != NULL);
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, ArrayBuffer, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
   i::JSArrayBuffer::Setup(obj, i_isolate,
@@ -7491,7 +7737,7 @@
                                       size_t byte_offset, size_t length) { \
     i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate();  \
     LOG_API(isolate, Type##Array, New);                                    \
-    ENTER_V8(isolate);                                                     \
+    ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);                              \
     if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue), \
                          "v8::" #Type                                      \
                          "Array::New(Local<ArrayBuffer>, size_t, size_t)", \
@@ -7510,7 +7756,7 @@
     i::Isolate* isolate =                                                  \
         Utils::OpenHandle(*shared_array_buffer)->GetIsolate();             \
     LOG_API(isolate, Type##Array, New);                                    \
-    ENTER_V8(isolate);                                                     \
+    ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);                              \
     if (!Utils::ApiCheck(                                                  \
             length <= static_cast<size_t>(i::Smi::kMaxValue),              \
             "v8::" #Type                                                   \
@@ -7533,7 +7779,7 @@
   i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
   i::Isolate* isolate = buffer->GetIsolate();
   LOG_API(isolate, DataView, New);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::Handle<i::JSDataView> obj =
       isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
   return Utils::ToLocal(obj);
@@ -7546,7 +7792,7 @@
   i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*shared_array_buffer);
   i::Isolate* isolate = buffer->GetIsolate();
   LOG_API(isolate, DataView, New);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::Handle<i::JSDataView> obj =
       isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
   return Utils::ToLocal(obj);
@@ -7590,7 +7836,7 @@
   CHECK(i::FLAG_harmony_sharedarraybuffer);
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, SharedArrayBuffer, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
   // TODO(jbroman): It may be useful in the future to provide a MaybeLocal
@@ -7611,7 +7857,7 @@
   CHECK(byte_length == 0 || data != NULL);
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, SharedArrayBuffer, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
       i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
   i::JSArrayBuffer::Setup(obj, i_isolate,
@@ -7624,49 +7870,26 @@
 Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, Symbol, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
   if (!name.IsEmpty()) result->set_name(*Utils::OpenHandle(*name));
   return Utils::ToLocal(result);
 }
 
 
-static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
-                                      i::Handle<i::String> name,
-                                      i::Handle<i::String> part,
-                                      bool private_symbol) {
-  i::Handle<i::JSObject> registry = isolate->GetSymbolRegistry();
-  i::Handle<i::JSObject> symbols =
-      i::Handle<i::JSObject>::cast(
-          i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
-  i::Handle<i::Object> symbol =
-      i::Object::GetPropertyOrElement(symbols, name).ToHandleChecked();
-  if (!symbol->IsSymbol()) {
-    DCHECK(symbol->IsUndefined(isolate));
-    if (private_symbol)
-      symbol = isolate->factory()->NewPrivateSymbol();
-    else
-      symbol = isolate->factory()->NewSymbol();
-    i::Handle<i::Symbol>::cast(symbol)->set_name(*name);
-    i::Object::SetPropertyOrElement(symbols, name, symbol, i::STRICT).Assert();
-  }
-  return i::Handle<i::Symbol>::cast(symbol);
-}
-
-
 Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::Handle<i::String> i_name = Utils::OpenHandle(*name);
-  i::Handle<i::String> part = i_isolate->factory()->for_string();
-  return Utils::ToLocal(SymbolFor(i_isolate, i_name, part, false));
+  return Utils::ToLocal(i_isolate->SymbolFor(
+      i::Heap::kPublicSymbolTableRootIndex, i_name, false));
 }
 
 
 Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::Handle<i::String> i_name = Utils::OpenHandle(*name);
-  i::Handle<i::String> part = i_isolate->factory()->for_api_string();
-  return Utils::ToLocal(SymbolFor(i_isolate, i_name, part, false));
+  return Utils::ToLocal(
+      i_isolate->SymbolFor(i::Heap::kApiSymbolTableRootIndex, i_name, false));
 }
 
 
@@ -7681,6 +7904,10 @@
   return Utils::ToLocal(i_isolate->factory()->unscopables_symbol());
 }
 
+Local<Symbol> v8::Symbol::GetToPrimitive(Isolate* isolate) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  return Utils::ToLocal(i_isolate->factory()->to_primitive_symbol());
+}
 
 Local<Symbol> v8::Symbol::GetToStringTag(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -7697,7 +7924,7 @@
 Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, Private, New);
-  ENTER_V8(i_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
   i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
   if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
   Local<Symbol> result = Utils::ToLocal(symbol);
@@ -7708,9 +7935,8 @@
 Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::Handle<i::String> i_name = Utils::OpenHandle(*name);
-  i::Handle<i::String> part = i_isolate->factory()->private_api_string();
-  Local<Symbol> result =
-      Utils::ToLocal(SymbolFor(i_isolate, i_name, part, true));
+  Local<Symbol> result = Utils::ToLocal(i_isolate->SymbolFor(
+      i::Heap::kApiPrivateSymbolTableRootIndex, i_name, true));
   return v8::Local<Private>(reinterpret_cast<Private*>(*result));
 }
 
@@ -7721,7 +7947,7 @@
     // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
     value = std::numeric_limits<double>::quiet_NaN();
   }
-  ENTER_V8(internal_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
   i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
   return Utils::NumberToLocal(result);
 }
@@ -7733,7 +7959,7 @@
     return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
                                                       internal_isolate));
   }
-  ENTER_V8(internal_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
   i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
   return Utils::IntegerToLocal(result);
 }
@@ -7745,7 +7971,7 @@
   if (fits_into_int32_t) {
     return Integer::New(isolate, static_cast<int32_t>(value));
   }
-  ENTER_V8(internal_isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
   i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
   return Utils::IntegerToLocal(result);
 }
@@ -7804,6 +8030,18 @@
   return Utils::ToLocal(i::Handle<i::Context>::cast(last));
 }
 
+v8::Local<v8::Context> Isolate::GetEnteredOrMicrotaskContext() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  i::Handle<i::Object> last;
+  if (isolate->handle_scope_implementer()
+          ->MicrotaskContextIsLastEnteredContext()) {
+    last = isolate->handle_scope_implementer()->MicrotaskContext();
+  } else {
+    last = isolate->handle_scope_implementer()->LastEnteredContext();
+  }
+  if (last.is_null()) return Local<Context>();
+  return Utils::ToLocal(i::Handle<i::Context>::cast(last));
+}
 
 v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -7969,8 +8207,7 @@
   }
 
   isolate->set_api_external_references(params.external_references);
-  isolate->set_deserialize_internal_fields_callback(
-      params.deserialize_internal_fields_callback);
+  isolate->set_allow_atomics_wait(params.allow_atomics_wait);
   SetResourceConstraints(isolate, params.constraints);
   // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
   Isolate::Scope isolate_scope(v8_isolate);
@@ -8227,6 +8464,10 @@
       reinterpret_cast<CallCompletedCallback>(callback));
 }
 
+void Isolate::SetPromiseHook(PromiseHook hook) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->SetPromiseHook(hook);
+}
 
 void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
   if (callback == NULL) return;
@@ -8376,8 +8617,14 @@
 
 void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
+  bool on_isolate_thread =
+      v8::Locker::IsActive()
+          ? isolate->thread_manager()->IsLockedByCurrentThread()
+          : i::ThreadId::Current().Equals(isolate->thread_id());
+  isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
   isolate->allocator()->MemoryPressureNotification(level);
+  isolate->compiler_dispatcher()->MemoryPressureNotification(level,
+                                                             on_isolate_thread);
 }
 
 void Isolate::SetRAILMode(RAILMode rail_mode) {
@@ -8385,6 +8632,21 @@
   return isolate->SetRAILMode(rail_mode);
 }
 
+void Isolate::IncreaseHeapLimitForDebugging() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->heap()->IncreaseHeapLimitForDebugging();
+}
+
+void Isolate::RestoreOriginalHeapLimit() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->heap()->RestoreOriginalHeapLimit();
+}
+
+bool Isolate::IsHeapLimitIncreasedForDebugging() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  return isolate->heap()->IsHeapLimitIncreasedForDebugging();
+}
+
 void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
                                      JitCodeEventHandler event_handler) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8400,7 +8662,6 @@
   isolate->stack_guard()->SetStackLimit(stack_limit);
 }
 
-
 void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
   if (isolate->heap()->memory_allocator()->code_range()->valid()) {
@@ -8430,24 +8691,40 @@
   isolate->set_allow_code_gen_callback(callback);
 }
 
+void Isolate::SetAllowWasmCompileCallback(AllowWasmCompileCallback callback) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->set_allow_wasm_compile_callback(callback);
+}
+
+void Isolate::SetAllowWasmInstantiateCallback(
+    AllowWasmInstantiateCallback callback) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  isolate->set_allow_wasm_instantiate_callback(callback);
+}
 
 bool Isolate::IsDead() {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
   return isolate->IsDead();
 }
 
-
 bool Isolate::AddMessageListener(MessageCallback that, Local<Value> data) {
+  return AddMessageListenerWithErrorLevel(that, kMessageError, data);
+}
+
+bool Isolate::AddMessageListenerWithErrorLevel(MessageCallback that,
+                                               int message_levels,
+                                               Local<Value> data) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::TemplateList> list = isolate->factory()->message_listeners();
-  i::Handle<i::FixedArray> listener = isolate->factory()->NewFixedArray(2);
+  i::Handle<i::FixedArray> listener = isolate->factory()->NewFixedArray(3);
   i::Handle<i::Foreign> foreign =
       isolate->factory()->NewForeign(FUNCTION_ADDR(that));
   listener->set(0, *foreign);
   listener->set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
                                   : *Utils::OpenHandle(*data));
+  listener->set(2, i::Smi::FromInt(message_levels));
   list = i::TemplateList::Add(isolate, list, listener);
   isolate->heap()->SetMessageListeners(*list);
   return true;
@@ -8456,7 +8733,7 @@
 
 void Isolate::RemoveMessageListeners(MessageCallback that) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  ENTER_V8(isolate);
+  ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
   i::HandleScope scope(isolate);
   i::DisallowHeapAllocation no_gc;
   i::TemplateList* listeners = isolate->heap()->message_listeners();
@@ -8638,7 +8915,7 @@
   Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) {      \
     i::Isolate* isolate = i::Isolate::Current();                         \
     LOG_API(isolate, NAME, New);                                         \
-    ENTER_V8(isolate);                                                   \
+    ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);                            \
     i::Object* error;                                                    \
     {                                                                    \
       i::HandleScope scope(isolate);                                     \
@@ -8695,15 +8972,18 @@
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   ENTER_V8(i_isolate);
   i::HandleScope scope(i_isolate);
-  i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
-  if (that != NULL) {
-    foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
+  if (that == nullptr) {
+    i_isolate->debug()->SetDebugDelegate(nullptr, false);
+  } else {
+    i::Handle<i::Object> i_data = i_isolate->factory()->undefined_value();
+    if (!data.IsEmpty()) i_data = Utils::OpenHandle(*data);
+    i::NativeDebugDelegate* delegate =
+        new i::NativeDebugDelegate(i_isolate, that, i_data);
+    i_isolate->debug()->SetDebugDelegate(delegate, true);
   }
-  i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
   return true;
 }
 
-
 void Debug::DebugBreak(Isolate* isolate) {
   reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->RequestDebugBreak();
 }
@@ -8720,24 +9000,11 @@
   return internal_isolate->stack_guard()->CheckDebugBreak();
 }
 
-
 void Debug::SetMessageHandler(Isolate* isolate,
-                              v8::Debug::MessageHandler handler) {
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  ENTER_V8(i_isolate);
-  i_isolate->debug()->SetMessageHandler(handler);
-}
+                              v8::Debug::MessageHandler handler) {}
 
-
-void Debug::SendCommand(Isolate* isolate,
-                        const uint16_t* command,
-                        int length,
-                        ClientData* client_data) {
-  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  internal_isolate->debug()->EnqueueCommandMessage(
-      i::Vector<const uint16_t>(command, length), client_data);
-}
-
+void Debug::SendCommand(Isolate* isolate, const uint16_t* command, int length,
+                        ClientData* client_data) {}
 
 MaybeLocal<Value> Debug::Call(Local<Context> context,
                               v8::Local<v8::Function> fun,
@@ -8758,36 +9025,10 @@
 }
 
 
-MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
-                                   v8::Local<v8::Value> obj) {
-  PREPARE_FOR_EXECUTION(context, Debug, GetMirror, Value);
-  i::Debug* isolate_debug = isolate->debug();
-  has_pending_exception = !isolate_debug->Load();
-  RETURN_ON_FAILED_EXECUTION(Value);
-  i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
-  auto name = isolate->factory()->NewStringFromStaticChars("MakeMirror");
-  auto fun_obj = i::JSReceiver::GetProperty(debug, name).ToHandleChecked();
-  auto v8_fun = Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
-  const int kArgc = 1;
-  v8::Local<v8::Value> argv[kArgc] = {obj};
-  Local<Value> result;
-  has_pending_exception =
-      !v8_fun->Call(context, Utils::ToLocal(debug), kArgc, argv)
-           .ToLocal(&result);
-  RETURN_ON_FAILED_EXECUTION(Value);
-  RETURN_ESCAPED(result);
-}
-
-
-void Debug::ProcessDebugMessages(Isolate* isolate) {
-  reinterpret_cast<i::Isolate*>(isolate)->debug()->ProcessDebugMessages(true);
-}
-
+void Debug::ProcessDebugMessages(Isolate* isolate) {}
 
 Local<Context> Debug::GetDebugContext(Isolate* isolate) {
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  ENTER_V8(i_isolate);
-  return Utils::ToLocal(i_isolate->debug()->GetDebugContext());
+  return debug::GetDebugContext(isolate);
 }
 
 
@@ -8826,49 +9067,34 @@
   return Utils::ToLocal(result);
 }
 
-bool DebugInterface::SetDebugEventListener(Isolate* isolate,
-                                           DebugInterface::EventCallback that,
-                                           Local<Value> data) {
+Local<Context> debug::GetDebugContext(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   ENTER_V8(i_isolate);
-  i::HandleScope scope(i_isolate);
-  i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
-  if (that != NULL) {
-    foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
-  }
-  i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
-  return true;
+  return Utils::ToLocal(i_isolate->debug()->GetDebugContext());
 }
 
-Local<Context> DebugInterface::GetDebugContext(Isolate* isolate) {
-  return Debug::GetDebugContext(isolate);
-}
-
-MaybeLocal<Value> DebugInterface::Call(Local<Context> context,
-                                       v8::Local<v8::Function> fun,
-                                       v8::Local<v8::Value> data) {
+MaybeLocal<Value> debug::Call(Local<Context> context,
+                              v8::Local<v8::Function> fun,
+                              v8::Local<v8::Value> data) {
   return Debug::Call(context, fun, data);
 }
 
-void DebugInterface::SetLiveEditEnabled(Isolate* isolate, bool enable) {
+void debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
   Debug::SetLiveEditEnabled(isolate, enable);
 }
 
-void DebugInterface::DebugBreak(Isolate* isolate) {
-  Debug::DebugBreak(isolate);
-}
+void debug::DebugBreak(Isolate* isolate) { Debug::DebugBreak(isolate); }
 
-void DebugInterface::CancelDebugBreak(Isolate* isolate) {
+void debug::CancelDebugBreak(Isolate* isolate) {
   Debug::CancelDebugBreak(isolate);
 }
 
-MaybeLocal<Array> DebugInterface::GetInternalProperties(Isolate* isolate,
-                                                        Local<Value> value) {
+MaybeLocal<Array> debug::GetInternalProperties(Isolate* isolate,
+                                               Local<Value> value) {
   return Debug::GetInternalProperties(isolate, value);
 }
 
-void DebugInterface::ChangeBreakOnException(Isolate* isolate,
-                                            ExceptionBreakState type) {
+void debug::ChangeBreakOnException(Isolate* isolate, ExceptionBreakState type) {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
   internal_isolate->debug()->ChangeBreakOnException(
       i::BreakException, type == BreakOnAnyException);
@@ -8876,7 +9102,20 @@
                                                     type != NoBreakOnException);
 }
 
-void DebugInterface::PrepareStep(Isolate* v8_isolate, StepAction action) {
+void debug::SetBreakPointsActive(Isolate* v8_isolate, bool is_active) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  ENTER_V8(isolate);
+  isolate->debug()->set_break_points_active(is_active);
+}
+
+void debug::SetOutOfMemoryCallback(Isolate* isolate,
+                                   OutOfMemoryCallback callback, void* data) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  ENTER_V8(i_isolate);
+  i_isolate->heap()->SetOutOfMemoryCallback(callback, data);
+}
+
+void debug::PrepareStep(Isolate* v8_isolate, StepAction action) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
   ENTER_V8(isolate);
   CHECK(isolate->debug()->CheckExecutionState());
@@ -8886,38 +9125,45 @@
   isolate->debug()->PrepareStep(static_cast<i::StepAction>(action));
 }
 
-void DebugInterface::ClearStepping(Isolate* v8_isolate) {
+bool debug::HasNonBlackboxedFrameOnStack(Isolate* v8_isolate) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
   ENTER_V8(isolate);
-  // Clear all current stepping setup.
-  isolate->debug()->ClearStepping();
+  i::HandleScope scope(isolate);
+  for (i::StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
+    if (!it.is_javascript()) continue;
+    if (!isolate->debug()->IsFrameBlackboxed(it.javascript_frame())) {
+      return true;
+    }
+  }
+  return false;
 }
 
-v8::Isolate* DebugInterface::Script::GetIsolate() const {
+v8::Isolate* debug::Script::GetIsolate() const {
   return reinterpret_cast<v8::Isolate*>(Utils::OpenHandle(this)->GetIsolate());
 }
 
-ScriptOriginOptions DebugInterface::Script::OriginOptions() const {
+ScriptOriginOptions debug::Script::OriginOptions() const {
   return Utils::OpenHandle(this)->origin_options();
 }
 
-bool DebugInterface::Script::WasCompiled() const {
+bool debug::Script::WasCompiled() const {
   return Utils::OpenHandle(this)->compilation_state() ==
          i::Script::COMPILATION_STATE_COMPILED;
 }
 
-int DebugInterface::Script::Id() const { return Utils::OpenHandle(this)->id(); }
+int debug::Script::Id() const { return Utils::OpenHandle(this)->id(); }
 
-int DebugInterface::Script::LineOffset() const {
+int debug::Script::LineOffset() const {
   return Utils::OpenHandle(this)->line_offset();
 }
 
-int DebugInterface::Script::ColumnOffset() const {
+int debug::Script::ColumnOffset() const {
   return Utils::OpenHandle(this)->column_offset();
 }
 
-std::vector<int> DebugInterface::Script::LineEnds() const {
+std::vector<int> debug::Script::LineEnds() const {
   i::Handle<i::Script> script = Utils::OpenHandle(this);
+  if (script->type() == i::Script::TYPE_WASM) return std::vector<int>();
   i::Isolate* isolate = script->GetIsolate();
   i::HandleScope scope(isolate);
   i::Script::InitLineEnds(script);
@@ -8931,7 +9177,7 @@
   return result;
 }
 
-MaybeLocal<String> DebugInterface::Script::Name() const {
+MaybeLocal<String> debug::Script::Name() const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   i::HandleScope handle_scope(isolate);
   i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -8941,7 +9187,7 @@
       handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
 }
 
-MaybeLocal<String> DebugInterface::Script::SourceURL() const {
+MaybeLocal<String> debug::Script::SourceURL() const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   i::HandleScope handle_scope(isolate);
   i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -8951,7 +9197,7 @@
       handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
 }
 
-MaybeLocal<String> DebugInterface::Script::SourceMappingURL() const {
+MaybeLocal<String> debug::Script::SourceMappingURL() const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   i::HandleScope handle_scope(isolate);
   i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -8961,17 +9207,15 @@
       handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
 }
 
-MaybeLocal<String> DebugInterface::Script::ContextData() const {
+MaybeLocal<Value> debug::Script::ContextData() const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   i::HandleScope handle_scope(isolate);
   i::Handle<i::Script> script = Utils::OpenHandle(this);
   i::Handle<i::Object> value(script->context_data(), isolate);
-  if (!value->IsString()) return MaybeLocal<String>();
-  return Utils::ToLocal(
-      handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+  return Utils::ToLocal(handle_scope.CloseAndEscape(value));
 }
 
-MaybeLocal<String> DebugInterface::Script::Source() const {
+MaybeLocal<String> debug::Script::Source() const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   i::HandleScope handle_scope(isolate);
   i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -8981,17 +9225,30 @@
       handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
 }
 
+bool debug::Script::IsWasm() const {
+  return Utils::OpenHandle(this)->type() == i::Script::TYPE_WASM;
+}
+
+bool debug::Script::IsModule() const {
+  return Utils::OpenHandle(this)->origin_options().IsModule();
+}
+
 namespace {
 int GetSmiValue(i::Handle<i::FixedArray> array, int index) {
   return i::Smi::cast(array->get(index))->value();
 }
 }  // namespace
 
-bool DebugInterface::Script::GetPossibleBreakpoints(
-    const Location& start, const Location& end,
-    std::vector<Location>* locations) const {
+bool debug::Script::GetPossibleBreakpoints(
+    const debug::Location& start, const debug::Location& end,
+    std::vector<debug::Location>* locations) const {
   CHECK(!start.IsEmpty());
   i::Handle<i::Script> script = Utils::OpenHandle(this);
+  if (script->type() == i::Script::TYPE_WASM) {
+    i::Handle<i::WasmCompiledModule> compiled_module(
+        i::WasmCompiledModule::cast(script->wasm_compiled_module()));
+    return compiled_module->GetPossibleBreakpoints(start, end, locations);
+  }
 
   i::Script::InitLineEnds(script);
   CHECK(script->line_ends()->IsFixedArray());
@@ -9027,7 +9284,7 @@
     if (current_line_end_index > 0) {
       line_offset = GetSmiValue(line_ends, current_line_end_index - 1) + 1;
     }
-    locations->push_back(Location(
+    locations->push_back(debug::Location(
         current_line_end_index + script->line_offset(),
         offset - line_offset +
             (current_line_end_index == 0 ? script->column_offset() : 0)));
@@ -9035,8 +9292,12 @@
   return true;
 }
 
-int DebugInterface::Script::GetSourcePosition(const Location& location) const {
+int debug::Script::GetSourcePosition(const debug::Location& location) const {
   i::Handle<i::Script> script = Utils::OpenHandle(this);
+  if (script->type() == i::Script::TYPE_WASM) {
+    // TODO(clemensh): Return the proper thing for wasm.
+    return 0;
+  }
 
   int line = std::max(location.GetLineNumber() - script->line_offset(), 0);
   int column = location.GetColumnNumber();
@@ -9057,49 +9318,84 @@
   return std::min(prev_line_offset + column + 1, line_offset);
 }
 
-MaybeLocal<DebugInterface::Script> DebugInterface::Script::Wrap(
-    v8::Isolate* v8_isolate, v8::Local<v8::Object> script) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
-  ENTER_V8(isolate);
-  i::HandleScope handle_scope(isolate);
-  i::Handle<i::JSReceiver> script_receiver(Utils::OpenHandle(*script));
-  if (!script_receiver->IsJSValue()) return MaybeLocal<Script>();
-  i::Handle<i::Object> script_value(
-      i::Handle<i::JSValue>::cast(script_receiver)->value(), isolate);
-  if (!script_value->IsScript()) {
-    return MaybeLocal<Script>();
-  }
-  i::Handle<i::Script> script_obj = i::Handle<i::Script>::cast(script_value);
-  if (script_obj->type() != i::Script::TYPE_NORMAL) return MaybeLocal<Script>();
-  return ToApiHandle<DebugInterface::Script>(
-      handle_scope.CloseAndEscape(script_obj));
+debug::WasmScript* debug::WasmScript::Cast(debug::Script* script) {
+  CHECK(script->IsWasm());
+  return static_cast<WasmScript*>(script);
 }
 
-DebugInterface::Location::Location(int lineNumber, int columnNumber)
-    : lineNumber_(lineNumber), columnNumber_(columnNumber) {
-  CHECK(lineNumber >= 0);
-  CHECK(columnNumber >= 0);
+int debug::WasmScript::NumFunctions() const {
+  i::DisallowHeapAllocation no_gc;
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+  i::WasmCompiledModule* compiled_module =
+      i::WasmCompiledModule::cast(script->wasm_compiled_module());
+  DCHECK_GE(i::kMaxInt, compiled_module->module()->functions.size());
+  return static_cast<int>(compiled_module->module()->functions.size());
 }
 
-DebugInterface::Location::Location() : lineNumber_(-1), columnNumber_(-1) {}
-
-int DebugInterface::Location::GetLineNumber() const {
-  CHECK(lineNumber_ >= 0);
-  return lineNumber_;
+int debug::WasmScript::NumImportedFunctions() const {
+  i::DisallowHeapAllocation no_gc;
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+  i::WasmCompiledModule* compiled_module =
+      i::WasmCompiledModule::cast(script->wasm_compiled_module());
+  DCHECK_GE(i::kMaxInt, compiled_module->module()->num_imported_functions);
+  return static_cast<int>(compiled_module->module()->num_imported_functions);
 }
 
-int DebugInterface::Location::GetColumnNumber() const {
-  CHECK(columnNumber_ >= 0);
-  return columnNumber_;
+std::pair<int, int> debug::WasmScript::GetFunctionRange(
+    int function_index) const {
+  i::DisallowHeapAllocation no_gc;
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+  i::WasmCompiledModule* compiled_module =
+      i::WasmCompiledModule::cast(script->wasm_compiled_module());
+  DCHECK_LE(0, function_index);
+  DCHECK_GT(compiled_module->module()->functions.size(), function_index);
+  i::wasm::WasmFunction& func =
+      compiled_module->module()->functions[function_index];
+  DCHECK_GE(i::kMaxInt, func.code_start_offset);
+  DCHECK_GE(i::kMaxInt, func.code_end_offset);
+  return std::make_pair(static_cast<int>(func.code_start_offset),
+                        static_cast<int>(func.code_end_offset));
 }
 
-bool DebugInterface::Location::IsEmpty() const {
-  return lineNumber_ == -1 && columnNumber_ == -1;
+debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
+    int function_index) const {
+  i::DisallowHeapAllocation no_gc;
+  i::Handle<i::Script> script = Utils::OpenHandle(this);
+  DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+  i::WasmCompiledModule* compiled_module =
+      i::WasmCompiledModule::cast(script->wasm_compiled_module());
+  return compiled_module->DisassembleFunction(function_index);
 }
 
-void DebugInterface::GetLoadedScripts(
-    v8::Isolate* v8_isolate,
-    PersistentValueVector<DebugInterface::Script>& scripts) {
+debug::Location::Location(int line_number, int column_number)
+    : line_number_(line_number), column_number_(column_number) {
+  CHECK(line_number >= 0);
+  CHECK(column_number >= 0);
+}
+
+debug::Location::Location()
+    : line_number_(v8::Function::kLineOffsetNotFound),
+      column_number_(v8::Function::kLineOffsetNotFound) {}
+
+int debug::Location::GetLineNumber() const {
+  CHECK(line_number_ >= 0);
+  return line_number_;
+}
+
+int debug::Location::GetColumnNumber() const {
+  CHECK(column_number_ >= 0);
+  return column_number_;
+}
+
+bool debug::Location::IsEmpty() const {
+  return line_number_ == -1 && column_number_ == -1;
+}
+
+void debug::GetLoadedScripts(v8::Isolate* v8_isolate,
+                             PersistentValueVector<debug::Script>& scripts) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
   ENTER_V8(isolate);
   // TODO(kozyatinskiy): remove this GC once tests are dealt with.
@@ -9120,6 +9416,127 @@
   }
 }
 
+MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
+                                                        Local<String> source) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, UnboundScript);
+  i::ScriptData* script_data = NULL;
+  i::Handle<i::String> str = Utils::OpenHandle(*source);
+  i::Handle<i::SharedFunctionInfo> result;
+  {
+    ScriptOriginOptions origin_options;
+    result = i::Compiler::GetSharedFunctionInfoForScript(
+        str, i::Handle<i::Object>(), 0, 0, origin_options,
+        i::Handle<i::Object>(), isolate->native_context(), NULL, &script_data,
+        ScriptCompiler::kNoCompileOptions, i::INSPECTOR_CODE);
+    has_pending_exception = result.is_null();
+    RETURN_ON_FAILED_EXECUTION(UnboundScript);
+  }
+  RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
+}
+
+void debug::SetDebugDelegate(Isolate* v8_isolate,
+                             debug::DebugDelegate* delegate) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  ENTER_V8(isolate);
+  isolate->debug()->SetDebugDelegate(delegate, false);
+}
+
+void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
+                                      v8::Local<debug::Script> script) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  ENTER_V8(isolate);
+  i::DisallowHeapAllocation no_gc;
+  i::SharedFunctionInfo::ScriptIterator iter(Utils::OpenHandle(*script));
+  while (i::SharedFunctionInfo* info = iter.Next()) {
+    info->set_computed_debug_is_blackboxed(false);
+  }
+}
+
+int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  ENTER_V8(isolate);
+  i::Handle<i::Object> object = Utils::OpenHandle(*value);
+  if (object->IsSmi()) return i::kPointerSize;
+  CHECK(object->IsHeapObject());
+  return i::Handle<i::HeapObject>::cast(object)->Size();
+}
+
+v8::MaybeLocal<v8::Array> debug::EntriesPreview(Isolate* v8_isolate,
+                                                v8::Local<v8::Value> value,
+                                                bool* is_key_value) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  ENTER_V8(isolate);
+  if (value->IsMap()) {
+    *is_key_value = true;
+    return value.As<Map>()->AsArray();
+  }
+  if (value->IsSet()) {
+    *is_key_value = false;
+    return value.As<Set>()->AsArray();
+  }
+
+  i::Handle<i::Object> object = Utils::OpenHandle(*value);
+  if (object->IsJSWeakCollection()) {
+    *is_key_value = object->IsJSWeakMap();
+    return Utils::ToLocal(i::JSWeakCollection::GetEntries(
+        i::Handle<i::JSWeakCollection>::cast(object), 0));
+  }
+  if (object->IsJSMapIterator()) {
+    i::Handle<i::JSMapIterator> iterator =
+        i::Handle<i::JSMapIterator>::cast(object);
+    int iterator_kind = i::Smi::cast(iterator->kind())->value();
+    *is_key_value = iterator_kind == i::JSMapIterator::kKindEntries;
+    if (!iterator->HasMore()) return v8::Array::New(v8_isolate);
+    return Utils::ToLocal(MapAsArray(isolate, iterator->table(),
+                                     i::Smi::cast(iterator->index())->value(),
+                                     iterator_kind));
+  }
+  if (object->IsJSSetIterator()) {
+    i::Handle<i::JSSetIterator> it = i::Handle<i::JSSetIterator>::cast(object);
+    *is_key_value = false;
+    if (!it->HasMore()) return v8::Array::New(v8_isolate);
+    return Utils::ToLocal(
+        SetAsArray(isolate, it->table(), i::Smi::cast(it->index())->value()));
+  }
+  return v8::MaybeLocal<v8::Array>();
+}
+
+MaybeLocal<debug::Script> debug::GeneratorObject::Script() {
+  i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
+  i::Object* maybe_script = obj->function()->shared()->script();
+  if (!maybe_script->IsScript()) return MaybeLocal<debug::Script>();
+  i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
+  return ToApiHandle<debug::Script>(script);
+}
+
+Local<Function> debug::GeneratorObject::Function() {
+  i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
+  return Utils::ToLocal(handle(obj->function()));
+}
+
+debug::Location debug::GeneratorObject::SuspendedLocation() {
+  i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
+  CHECK(obj->is_suspended());
+  i::Object* maybe_script = obj->function()->shared()->script();
+  if (!maybe_script->IsScript()) return debug::Location();
+  i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
+  i::Script::PositionInfo info;
+  i::Script::GetPositionInfo(script, obj->source_position(), &info,
+                             i::Script::WITH_OFFSET);
+  return debug::Location(info.line, info.column);
+}
+
+bool debug::GeneratorObject::IsSuspended() {
+  return Utils::OpenHandle(this)->is_suspended();
+}
+
+v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast(
+    v8::Local<v8::Value> value) {
+  CHECK(value->IsGeneratorObject());
+  return ToApiHandle<debug::GeneratorObject>(Utils::OpenHandle(*value));
+}
+
 Local<String> CpuProfileNode::GetFunctionName() const {
   const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
   i::Isolate* isolate = node->isolate();
@@ -9137,6 +9554,56 @@
   }
 }
 
+debug::Coverage::FunctionData::FunctionData(i::CoverageFunction* function,
+                                            Local<debug::Script> script)
+    : function_(function) {
+  i::Handle<i::Script> i_script = v8::Utils::OpenHandle(*script);
+  i::Script::PositionInfo start;
+  i::Script::PositionInfo end;
+  i::Script::GetPositionInfo(i_script, function->start, &start,
+                             i::Script::WITH_OFFSET);
+  i::Script::GetPositionInfo(i_script, function->end, &end,
+                             i::Script::WITH_OFFSET);
+  start_ = Location(start.line, start.column);
+  end_ = Location(end.line, end.column);
+}
+
+uint32_t debug::Coverage::FunctionData::Count() { return function_->count; }
+
+MaybeLocal<String> debug::Coverage::FunctionData::Name() {
+  return ToApiHandle<String>(function_->name);
+}
+
+Local<debug::Script> debug::Coverage::ScriptData::GetScript() {
+  return ToApiHandle<debug::Script>(script_->script);
+}
+
+size_t debug::Coverage::ScriptData::FunctionCount() {
+  return script_->functions.size();
+}
+
+debug::Coverage::FunctionData debug::Coverage::ScriptData::GetFunctionData(
+    size_t i) {
+  return FunctionData(&script_->functions.at(i), GetScript());
+}
+
+debug::Coverage::~Coverage() { delete coverage_; }
+
+size_t debug::Coverage::ScriptCount() { return coverage_->size(); }
+
+debug::Coverage::ScriptData debug::Coverage::GetScriptData(size_t i) {
+  return ScriptData(&coverage_->at(i));
+}
+
+debug::Coverage debug::Coverage::Collect(Isolate* isolate, bool reset_count) {
+  return Coverage(i::Coverage::Collect(reinterpret_cast<i::Isolate*>(isolate),
+                                       reset_count));
+}
+
+void debug::Coverage::TogglePrecise(Isolate* isolate, bool enable) {
+  i::Coverage::TogglePrecise(reinterpret_cast<i::Isolate*>(isolate), enable);
+}
+
 const char* CpuProfileNode::GetFunctionNameStr() const {
   const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
   return node->entry()->name();
@@ -9392,13 +9859,12 @@
 
 
 int HeapGraphNode::GetChildrenCount() const {
-  return ToInternal(this)->children().length();
+  return ToInternal(this)->children_count();
 }
 
 
 const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
-  return reinterpret_cast<const HeapGraphEdge*>(
-      ToInternal(this)->children()[index]);
+  return reinterpret_cast<const HeapGraphEdge*>(ToInternal(this)->child(index));
 }
 
 
@@ -9548,6 +10014,11 @@
                                                                callback);
 }
 
+void HeapProfiler::SetGetRetainerInfosCallback(
+    GetRetainerInfosCallback callback) {
+  reinterpret_cast<i::HeapProfiler*>(this)->SetGetRetainerInfosCallback(
+      callback);
+}
 
 size_t HeapProfiler::GetProfilerMemorySize() {
   return reinterpret_cast<i::HeapProfiler*>(this)->
diff --git a/src/api.h b/src/api.h
index 6fcaa90..8deb117 100644
--- a/src/api.h
+++ b/src/api.h
@@ -11,7 +11,6 @@
 #include "src/factory.h"
 #include "src/isolate.h"
 #include "src/list.h"
-#include "src/objects-inl.h"
 
 namespace v8 {
 
@@ -70,47 +69,49 @@
   static RegisteredExtension* first_extension_;
 };
 
-#define OPEN_HANDLE_LIST(V)                  \
-  V(Template, TemplateInfo)                  \
-  V(FunctionTemplate, FunctionTemplateInfo)  \
-  V(ObjectTemplate, ObjectTemplateInfo)      \
-  V(Signature, FunctionTemplateInfo)         \
-  V(AccessorSignature, FunctionTemplateInfo) \
-  V(Data, Object)                            \
-  V(RegExp, JSRegExp)                        \
-  V(Object, JSReceiver)                      \
-  V(Array, JSArray)                          \
-  V(Map, JSMap)                              \
-  V(Set, JSSet)                              \
-  V(ArrayBuffer, JSArrayBuffer)              \
-  V(ArrayBufferView, JSArrayBufferView)      \
-  V(TypedArray, JSTypedArray)                \
-  V(Uint8Array, JSTypedArray)                \
-  V(Uint8ClampedArray, JSTypedArray)         \
-  V(Int8Array, JSTypedArray)                 \
-  V(Uint16Array, JSTypedArray)               \
-  V(Int16Array, JSTypedArray)                \
-  V(Uint32Array, JSTypedArray)               \
-  V(Int32Array, JSTypedArray)                \
-  V(Float32Array, JSTypedArray)              \
-  V(Float64Array, JSTypedArray)              \
-  V(DataView, JSDataView)                    \
-  V(SharedArrayBuffer, JSArrayBuffer)        \
-  V(Name, Name)                              \
-  V(String, String)                          \
-  V(Symbol, Symbol)                          \
-  V(Script, JSFunction)                      \
-  V(UnboundScript, SharedFunctionInfo)       \
-  V(Module, Module)                          \
-  V(Function, JSReceiver)                    \
-  V(Message, JSMessageObject)                \
-  V(Context, Context)                        \
-  V(External, Object)                        \
-  V(StackTrace, JSArray)                     \
-  V(StackFrame, JSObject)                    \
-  V(Proxy, JSProxy)                          \
-  V(NativeWeakMap, JSWeakMap)                \
-  V(DebugInterface::Script, Script)
+#define OPEN_HANDLE_LIST(V)                    \
+  V(Template, TemplateInfo)                    \
+  V(FunctionTemplate, FunctionTemplateInfo)    \
+  V(ObjectTemplate, ObjectTemplateInfo)        \
+  V(Signature, FunctionTemplateInfo)           \
+  V(AccessorSignature, FunctionTemplateInfo)   \
+  V(Data, Object)                              \
+  V(RegExp, JSRegExp)                          \
+  V(Object, JSReceiver)                        \
+  V(Array, JSArray)                            \
+  V(Map, JSMap)                                \
+  V(Set, JSSet)                                \
+  V(ArrayBuffer, JSArrayBuffer)                \
+  V(ArrayBufferView, JSArrayBufferView)        \
+  V(TypedArray, JSTypedArray)                  \
+  V(Uint8Array, JSTypedArray)                  \
+  V(Uint8ClampedArray, JSTypedArray)           \
+  V(Int8Array, JSTypedArray)                   \
+  V(Uint16Array, JSTypedArray)                 \
+  V(Int16Array, JSTypedArray)                  \
+  V(Uint32Array, JSTypedArray)                 \
+  V(Int32Array, JSTypedArray)                  \
+  V(Float32Array, JSTypedArray)                \
+  V(Float64Array, JSTypedArray)                \
+  V(DataView, JSDataView)                      \
+  V(SharedArrayBuffer, JSArrayBuffer)          \
+  V(Name, Name)                                \
+  V(String, String)                            \
+  V(Symbol, Symbol)                            \
+  V(Script, JSFunction)                        \
+  V(UnboundScript, SharedFunctionInfo)         \
+  V(Module, Module)                            \
+  V(Function, JSReceiver)                      \
+  V(Message, JSMessageObject)                  \
+  V(Context, Context)                          \
+  V(External, Object)                          \
+  V(StackTrace, JSArray)                       \
+  V(StackFrame, JSObject)                      \
+  V(Proxy, JSProxy)                            \
+  V(NativeWeakMap, JSWeakMap)                  \
+  V(debug::GeneratorObject, JSGeneratorObject) \
+  V(debug::Script, Script)                     \
+  V(Promise, JSPromise)
 
 class Utils {
  public:
@@ -349,8 +350,7 @@
 
 namespace internal {
 
-
-class DeferredHandles {
+class V8_EXPORT_PRIVATE DeferredHandles {
  public:
   ~DeferredHandles();
 
diff --git a/src/arguments.cc b/src/arguments.cc
index 815f5de..d246aad 100644
--- a/src/arguments.cc
+++ b/src/arguments.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/arguments.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/arguments.h b/src/arguments.h
index d5d2c02..1d91b20 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -6,7 +6,7 @@
 #define V8_ARGUMENTS_H_
 
 #include "src/allocation.h"
-#include "src/objects-inl.h"
+#include "src/objects.h"
 #include "src/tracing/trace-event.h"
 
 namespace v8 {
@@ -41,7 +41,8 @@
                                         index * kPointerSize));
   }
 
-  template <class S> Handle<S> at(int index) {
+  template <class S = Object>
+  Handle<S> at(int index) {
     Object** value = &((*this)[index]);
     // This cast checks that the object we're accessing does indeed have the
     // expected type.
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index bc501b1..5608256 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -41,14 +41,14 @@
 
 #include "src/assembler.h"
 #include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
 bool CpuFeatures::SupportsCrankshaft() { return true; }
 
-bool CpuFeatures::SupportsSimd128() { return false; }
+bool CpuFeatures::SupportsSimd128() { return true; }
 
 int DoubleRegister::NumRegisters() {
   return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
@@ -590,6 +590,17 @@
   }
 }
 
+Address Assembler::target_address_at(Address pc, Code* code) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index d90dc76..ec75b7d 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -351,13 +351,18 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
 void RelocInfo::unchecked_update_wasm_memory_reference(
     Address address, ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
 }
 
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
-                                                  ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+                                           ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_,
                                    reinterpret_cast<Address>(size), flush_mode);
 }
@@ -483,30 +488,6 @@
   }
 }
 
-
-NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
-  base_ = base;
-  switch (registers_count) {
-    case 1:
-      type_ = nlt_1;
-      break;
-    case 2:
-      type_ = nlt_2;
-      break;
-    case 3:
-      type_ = nlt_3;
-      break;
-    case 4:
-      type_ = nlt_4;
-      break;
-    default:
-      UNREACHABLE();
-      type_ = nlt_1;
-      break;
-  }
-}
-
-
 // -----------------------------------------------------------------------------
 // Specific instructions, constants, and masks.
 
@@ -2873,7 +2854,6 @@
        vm);
 }
 
-
 void Assembler::vmov(const DwVfpRegister dst,
                      const VmovIndex index,
                      const Register src,
@@ -2969,7 +2949,6 @@
   emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
 }
 
-
 // Type of data to read from or write to VFP register.
 // Used as specifier in generic vcvt instruction.
 enum VFPType { S32, U32, F32, F64 };
@@ -3899,32 +3878,805 @@
   dst.split_code(&vd, &d);
   int vm, m;
   src.split_code(&vm, &m);
-  emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
-        (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
+  int U = NeonU(dt);
+  int imm3 = 1 << NeonSz(dt);
+  emit(0xFU * B28 | B25 | U * B24 | B23 | d * B22 | imm3 * B19 | vd * B12 |
+       0xA * B8 | m * B5 | B4 | vm);
 }
 
-void Assembler::vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
-  DCHECK(VfpRegisterIsAvailable(srcdst0));
-  DCHECK(VfpRegisterIsAvailable(srcdst1));
-  DCHECK(!srcdst0.is(kScratchDoubleReg));
-  DCHECK(!srcdst1.is(kScratchDoubleReg));
-
-  if (srcdst0.is(srcdst1)) return;  // Swapping aliased registers emits nothing.
-
-  if (CpuFeatures::IsSupported(NEON)) {
-    // Instruction details available in ARM DDI 0406C.b, A8.8.418.
-    // 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
-    // Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
-    int vd, d;
-    srcdst0.split_code(&vd, &d);
-    int vm, m;
-    srcdst1.split_code(&vm, &m);
-    emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
-  } else {
-    vmov(kScratchDoubleReg, srcdst0);
-    vmov(srcdst0, srcdst1);
-    vmov(srcdst1, kScratchDoubleReg);
+static int EncodeScalar(NeonDataType dt, int index) {
+  int opc1_opc2 = 0;
+  DCHECK_LE(0, index);
+  switch (dt) {
+    case NeonS8:
+    case NeonU8:
+      DCHECK_GT(8, index);
+      opc1_opc2 = 0x8 | index;
+      break;
+    case NeonS16:
+    case NeonU16:
+      DCHECK_GT(4, index);
+      opc1_opc2 = 0x1 | (index << 1);
+      break;
+    case NeonS32:
+    case NeonU32:
+      DCHECK_GT(2, index);
+      opc1_opc2 = index << 2;
+      break;
+    default:
+      UNREACHABLE();
+      break;
   }
+  return (opc1_opc2 >> 2) * B21 | (opc1_opc2 & 0x3) * B5;
+}
+
+void Assembler::vmov(NeonDataType dt, DwVfpRegister dst, int index,
+                     Register src) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.940.
+  // vmov ARM core register to scalar.
+  DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int opc1_opc2 = EncodeScalar(dt, index);
+  emit(0xEEu * B24 | vd * B16 | src.code() * B12 | 0xB * B8 | d * B7 | B4 |
+       opc1_opc2);
+}
+
+void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
+                     int index) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.942.
+  // vmov Arm scalar to core register.
+  DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
+  int vn, n;
+  src.split_code(&vn, &n);
+  int opc1_opc2 = EncodeScalar(dt, index);
+  int u = NeonU(dt);
+  emit(0xEEu * B24 | u * B23 | B20 | vn * B16 | dst.code() * B12 | 0xB * B8 |
+       n * B7 | B4 | opc1_opc2);
+}
+
+void Assembler::vmov(const QwNeonRegister dst, const QwNeonRegister src) {
+  // Instruction details available in ARM DDI 0406C.b, A8-938.
+  // vmov is encoded as vorr.
+  vorr(dst, src, src);
+}
+
+void Assembler::vmvn(const QwNeonRegister dst, const QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  // Instruction details available in ARM DDI 0406C.b, A8-966.
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(0x1E7U * B23 | d * B22 | 3 * B20 | vd * B12 | 0x17 * B6 | m * B5 | vm);
+}
+
+void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.418.
+  // 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
+  // Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(IsEnabled(NEON));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
+}
+
+void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.418.
+  // 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
+  // Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(IsEnabled(NEON));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | B6 | m * B5 |
+       vm);
+}
+
+void Assembler::vdup(NeonSize size, const QwNeonRegister dst,
+                     const Register src) {
+  DCHECK(IsEnabled(NEON));
+  // Instruction details available in ARM DDI 0406C.b, A8-886.
+  int B = 0, E = 0;
+  switch (size) {
+    case Neon8:
+      B = 1;
+      break;
+    case Neon16:
+      E = 1;
+      break;
+    case Neon32:
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  int vd, d;
+  dst.split_code(&vd, &d);
+
+  emit(al | 0x1D * B23 | B * B22 | B21 | vd * B16 | src.code() * B12 |
+       0xB * B8 | d * B7 | E * B5 | B4);
+}
+
+void Assembler::vdup(const QwNeonRegister dst, const SwVfpRegister src) {
+  DCHECK(IsEnabled(NEON));
+  // Instruction details available in ARM DDI 0406C.b, A8-884.
+  int index = src.code() & 1;
+  int d_reg = src.code() / 2;
+  int imm4 = 4 | index << 3;  // esize = 32, index in bit 3.
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  DwVfpRegister::from_code(d_reg).split_code(&vm, &m);
+
+  emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | imm4 * B16 | vd * B12 | 0x18 * B7 |
+       B6 | m * B5 | vm);
+}
+
+// Encode NEON vcvt.src_type.dst_type instruction.
+static Instr EncodeNeonVCVT(const VFPType dst_type, const QwNeonRegister dst,
+                            const VFPType src_type, const QwNeonRegister src) {
+  DCHECK(src_type != dst_type);
+  DCHECK(src_type == F32 || dst_type == F32);
+  // Instruction details available in ARM DDI 0406C.b, A8.8.868.
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+
+  int op = 0;
+  if (src_type == F32) {
+    DCHECK(dst_type == S32 || dst_type == U32);
+    op = dst_type == U32 ? 3 : 2;
+  } else {
+    DCHECK(src_type == S32 || src_type == U32);
+    op = src_type == U32 ? 1 : 0;
+  }
+
+  return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x3 * B9 | op * B7 |
+         B6 | m * B5 | vm;
+}
+
+void Assembler::vcvt_f32_s32(const QwNeonRegister dst,
+                             const QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src));
+  emit(EncodeNeonVCVT(F32, dst, S32, src));
+}
+
+void Assembler::vcvt_f32_u32(const QwNeonRegister dst,
+                             const QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src));
+  emit(EncodeNeonVCVT(F32, dst, U32, src));
+}
+
+void Assembler::vcvt_s32_f32(const QwNeonRegister dst,
+                             const QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src));
+  emit(EncodeNeonVCVT(S32, dst, F32, src));
+}
+
+void Assembler::vcvt_u32_f32(const QwNeonRegister dst,
+                             const QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  DCHECK(VfpRegisterIsAvailable(dst));
+  DCHECK(VfpRegisterIsAvailable(src));
+  emit(EncodeNeonVCVT(U32, dst, F32, src));
+}
+
+// op is instr->Bits(11, 7).
+static Instr EncodeNeonUnaryOp(int op, bool is_float, NeonSize size,
+                               const QwNeonRegister dst,
+                               const QwNeonRegister src) {
+  DCHECK_IMPLIES(is_float, size == Neon32);
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  int F = is_float ? 1 : 0;
+  return 0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | B16 | vd * B12 |
+         F * B10 | B8 | op * B7 | B6 | m * B5 | vm;
+}
+
+void Assembler::vabs(const QwNeonRegister dst, const QwNeonRegister src) {
+  // Qd = vabs.f<size>(Qn, Qm) SIMD floating point absolute value.
+  // Instruction details available in ARM DDI 0406C.b, A8.8.824.
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonUnaryOp(0x6, true, Neon32, dst, src));
+}
+
+void Assembler::vabs(NeonSize size, const QwNeonRegister dst,
+                     const QwNeonRegister src) {
+  // Qd = vabs.s<size>(Qn, Qm) SIMD integer absolute value.
+  // Instruction details available in ARM DDI 0406C.b, A8.8.824.
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonUnaryOp(0x6, false, size, dst, src));
+}
+
+void Assembler::vneg(const QwNeonRegister dst, const QwNeonRegister src) {
+  // Qd = vabs.f<size>(Qn, Qm) SIMD floating point negate.
+  // Instruction details available in ARM DDI 0406C.b, A8.8.968.
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonUnaryOp(0x7, true, Neon32, dst, src));
+}
+
+void Assembler::vneg(NeonSize size, const QwNeonRegister dst,
+                     const QwNeonRegister src) {
+  // Qd = vabs.s<size>(Qn, Qm) SIMD integer negate.
+  // Instruction details available in ARM DDI 0406C.b, A8.8.968.
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonUnaryOp(0x7, false, size, dst, src));
+}
+
+void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
+                     DwVfpRegister src2) {
+  // Dd = veor(Dn, Dm) 64 bit integer exclusive OR.
+  // Instruction details available in ARM DDI 0406C.b, A8.8.888.
+  DCHECK(IsEnabled(NEON));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+  emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | B8 | n * B7 | m * B5 |
+       B4 | vm);
+}
+
+enum BinaryBitwiseOp { VAND, VBIC, VBIF, VBIT, VBSL, VEOR, VORR, VORN };
+
+static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op,
+                                       const QwNeonRegister dst,
+                                       const QwNeonRegister src1,
+                                       const QwNeonRegister src2) {
+  int op_encoding = 0;
+  switch (op) {
+    case VBIC:
+      op_encoding = 0x1 * B20;
+      break;
+    case VBIF:
+      op_encoding = B24 | 0x3 * B20;
+      break;
+    case VBIT:
+      op_encoding = B24 | 0x2 * B20;
+      break;
+    case VBSL:
+      op_encoding = B24 | 0x1 * B20;
+      break;
+    case VEOR:
+      op_encoding = B24;
+      break;
+    case VORR:
+      op_encoding = 0x2 * B20;
+      break;
+    case VORN:
+      op_encoding = 0x3 * B20;
+      break;
+    case VAND:
+      // op_encoding is 0.
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+  return 0x1E4U * B23 | op_encoding | d * B22 | vn * B16 | vd * B12 | B8 |
+         n * B7 | B6 | m * B5 | B4 | vm;
+}
+
+void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  // Qd = vand(Qn, Qm) SIMD AND.
+  // Instruction details available in ARM DDI 0406C.b, A8.8.836.
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonBinaryBitwiseOp(VAND, dst, src1, src2));
+}
+
+void Assembler::vbsl(QwNeonRegister dst, const QwNeonRegister src1,
+                     const QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vbsl(Qn, Qm) SIMD bitwise select.
+  // Instruction details available in ARM DDI 0406C.b, A8-844.
+  emit(EncodeNeonBinaryBitwiseOp(VBSL, dst, src1, src2));
+}
+
+void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  // Qd = veor(Qn, Qm) SIMD exclusive OR.
+  // Instruction details available in ARM DDI 0406C.b, A8.8.888.
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonBinaryBitwiseOp(VEOR, dst, src1, src2));
+}
+
+void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  // Qd = vorr(Qn, Qm) SIMD OR.
+  // Instruction details available in ARM DDI 0406C.b, A8.8.976.
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonBinaryBitwiseOp(VORR, dst, src1, src2));
+}
+
+enum FPBinOp {
+  VADDF,
+  VSUBF,
+  VMULF,
+  VMINF,
+  VMAXF,
+  VRECPS,
+  VRSQRTS,
+  VCEQF,
+  VCGEF,
+  VCGTF
+};
+
+static Instr EncodeNeonBinOp(FPBinOp op, QwNeonRegister dst,
+                             QwNeonRegister src1, QwNeonRegister src2) {
+  int op_encoding = 0;
+  switch (op) {
+    case VADDF:
+      op_encoding = 0xD * B8;
+      break;
+    case VSUBF:
+      op_encoding = B21 | 0xD * B8;
+      break;
+    case VMULF:
+      op_encoding = B24 | 0xD * B8 | B4;
+      break;
+    case VMINF:
+      op_encoding = B21 | 0xF * B8;
+      break;
+    case VMAXF:
+      op_encoding = 0xF * B8;
+      break;
+    case VRECPS:
+      op_encoding = 0xF * B8 | B4;
+      break;
+    case VRSQRTS:
+      op_encoding = B21 | 0xF * B8 | B4;
+      break;
+    case VCEQF:
+      op_encoding = 0xE * B8;
+      break;
+    case VCGEF:
+      op_encoding = B24 | 0xE * B8;
+      break;
+    case VCGTF:
+      op_encoding = B24 | B21 | 0xE * B8;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+  return 0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | n * B7 | B6 | m * B5 |
+         vm | op_encoding;
+}
+
+enum IntegerBinOp {
+  VADD,
+  VQADD,
+  VSUB,
+  VQSUB,
+  VMUL,
+  VMIN,
+  VMAX,
+  VTST,
+  VCEQ,
+  VCGE,
+  VCGT
+};
+
+static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt,
+                             const QwNeonRegister dst,
+                             const QwNeonRegister src1,
+                             const QwNeonRegister src2) {
+  int op_encoding = 0;
+  switch (op) {
+    case VADD:
+      op_encoding = 0x8 * B8;
+      break;
+    case VQADD:
+      op_encoding = B4;
+      break;
+    case VSUB:
+      op_encoding = B24 | 0x8 * B8;
+      break;
+    case VQSUB:
+      op_encoding = 0x2 * B8 | B4;
+      break;
+    case VMUL:
+      op_encoding = 0x9 * B8 | B4;
+      break;
+    case VMIN:
+      op_encoding = 0x6 * B8 | B4;
+      break;
+    case VMAX:
+      op_encoding = 0x6 * B8;
+      break;
+    case VTST:
+      op_encoding = 0x8 * B8 | B4;
+      break;
+    case VCEQ:
+      op_encoding = B24 | 0x8 * B8 | B4;
+      break;
+    case VCGE:
+      op_encoding = 0x3 * B8 | B4;
+      break;
+    case VCGT:
+      op_encoding = 0x3 * B8;
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+  int size = NeonSz(dt);
+  int u = NeonU(dt);
+  return 0x1E4U * B23 | u * B24 | d * B22 | size * B20 | vn * B16 | vd * B12 |
+         n * B7 | B6 | m * B5 | vm | op_encoding;
+}
+
+static Instr EncodeNeonBinOp(IntegerBinOp op, NeonSize size,
+                             const QwNeonRegister dst,
+                             const QwNeonRegister src1,
+                             const QwNeonRegister src2) {
+  // Map NeonSize values to the signed values in NeonDataType, so the U bit
+  // will be 0.
+  return EncodeNeonBinOp(op, static_cast<NeonDataType>(size), dst, src1, src2);
+}
+
+void Assembler::vadd(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vadd(Qn, Qm) SIMD floating point addition.
+  // Instruction details available in ARM DDI 0406C.b, A8-830.
+  emit(EncodeNeonBinOp(VADDF, dst, src1, src2));
+}
+
+void Assembler::vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vadd(Qn, Qm) SIMD integer addition.
+  // Instruction details available in ARM DDI 0406C.b, A8-828.
+  emit(EncodeNeonBinOp(VADD, size, dst, src1, src2));
+}
+
+void Assembler::vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+                      QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vqadd(Qn, Qm) SIMD integer saturating addition.
+  // Instruction details available in ARM DDI 0406C.b, A8-996.
+  emit(EncodeNeonBinOp(VQADD, dt, dst, src1, src2));
+}
+
+void Assembler::vsub(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vsub(Qn, Qm) SIMD floating point subtraction.
+  // Instruction details available in ARM DDI 0406C.b, A8-1086.
+  emit(EncodeNeonBinOp(VSUBF, dst, src1, src2));
+}
+
+void Assembler::vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vsub(Qn, Qm) SIMD integer subtraction.
+  // Instruction details available in ARM DDI 0406C.b, A8-1084.
+  emit(EncodeNeonBinOp(VSUB, size, dst, src1, src2));
+}
+
+void Assembler::vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+                      QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vqsub(Qn, Qm) SIMD integer saturating subtraction.
+  // Instruction details available in ARM DDI 0406C.b, A8-1020.
+  emit(EncodeNeonBinOp(VQSUB, dt, dst, src1, src2));
+}
+
+void Assembler::vmul(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vadd(Qn, Qm) SIMD floating point multiply.
+  // Instruction details available in ARM DDI 0406C.b, A8-958.
+  emit(EncodeNeonBinOp(VMULF, dst, src1, src2));
+}
+
+void Assembler::vmul(NeonSize size, QwNeonRegister dst,
+                     const QwNeonRegister src1, const QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vadd(Qn, Qm) SIMD integer multiply.
+  // Instruction details available in ARM DDI 0406C.b, A8-960.
+  emit(EncodeNeonBinOp(VMUL, size, dst, src1, src2));
+}
+
+void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1,
+                     const QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vmin(Qn, Qm) SIMD floating point MIN.
+  // Instruction details available in ARM DDI 0406C.b, A8-928.
+  emit(EncodeNeonBinOp(VMINF, dst, src1, src2));
+}
+
+void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vmin(Qn, Qm) SIMD integer MIN.
+  // Instruction details available in ARM DDI 0406C.b, A8-926.
+  emit(EncodeNeonBinOp(VMIN, dt, dst, src1, src2));
+}
+
+void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vmax(Qn, Qm) SIMD floating point MAX.
+  // Instruction details available in ARM DDI 0406C.b, A8-928.
+  emit(EncodeNeonBinOp(VMAXF, dst, src1, src2));
+}
+
+void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vmax(Qn, Qm) SIMD integer MAX.
+  // Instruction details available in ARM DDI 0406C.b, A8-926.
+  emit(EncodeNeonBinOp(VMAX, dt, dst, src1, src2));
+}
+
+enum NeonShiftOp { VSHL, VSHR };
+
+static Instr EncodeNeonShiftOp(NeonShiftOp op, NeonDataType dt,
+                               QwNeonRegister dst, QwNeonRegister src,
+                               int shift) {
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  int size_in_bits = kBitsPerByte << NeonSz(dt);
+  int op_encoding = 0;
+  int imm6 = 0;
+  if (op == VSHL) {
+    DCHECK(shift >= 0 && size_in_bits > shift);
+    imm6 = size_in_bits + shift;
+    op_encoding = 0x5 * B8;
+  } else {
+    DCHECK_EQ(VSHR, op);
+    DCHECK(shift > 0 && size_in_bits >= shift);
+    imm6 = 2 * size_in_bits - shift;
+    op_encoding = NeonU(dt) * B24;
+  }
+  return 0x1E5U * B23 | d * B22 | imm6 * B16 | vd * B12 | B6 | m * B5 | B4 |
+         vm | op_encoding;
+}
+
+void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
+                     int shift) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vshl(Qm, bits) SIMD shift left immediate.
+  // Instruction details available in ARM DDI 0406C.b, A8-1046.
+  emit(EncodeNeonShiftOp(VSHL, dt, dst, src, shift));
+}
+
+void Assembler::vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
+                     int shift) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vshl(Qm, bits) SIMD shift right immediate.
+  // Instruction details available in ARM DDI 0406C.b, A8-1052.
+  emit(EncodeNeonShiftOp(VSHR, dt, dst, src, shift));
+}
+
+static Instr EncodeNeonEstimateOp(bool is_rsqrt, QwNeonRegister dst,
+                                  QwNeonRegister src) {
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  int rsqrt = is_rsqrt ? 1 : 0;
+  return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x5 * B8 |
+         rsqrt * B7 | B6 | m * B5 | vm;
+}
+
+void Assembler::vrecpe(QwNeonRegister dst, QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vrecpe(Qm) SIMD reciprocal estimate.
+  // Instruction details available in ARM DDI 0406C.b, A8-1024.
+  emit(EncodeNeonEstimateOp(false, dst, src));
+}
+
+void Assembler::vrsqrte(QwNeonRegister dst, QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vrsqrte(Qm) SIMD reciprocal square root estimate.
+  // Instruction details available in ARM DDI 0406C.b, A8-1038.
+  emit(EncodeNeonEstimateOp(true, dst, src));
+}
+
+void Assembler::vrecps(QwNeonRegister dst, QwNeonRegister src1,
+                       QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vrecps(Qn, Qm) SIMD reciprocal refinement step.
+  // Instruction details available in ARM DDI 0406C.b, A8-1026.
+  emit(EncodeNeonBinOp(VRECPS, dst, src1, src2));
+}
+
+void Assembler::vrsqrts(QwNeonRegister dst, QwNeonRegister src1,
+                        QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vrsqrts(Qn, Qm) SIMD reciprocal square root refinement step.
+  // Instruction details available in ARM DDI 0406C.b, A8-1040.
+  emit(EncodeNeonBinOp(VRSQRTS, dst, src1, src2));
+}
+
+void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vtst(Qn, Qm) SIMD test integer operands.
+  // Instruction details available in ARM DDI 0406C.b, A8-1098.
+  emit(EncodeNeonBinOp(VTST, size, dst, src1, src2));
+}
+
+void Assembler::vceq(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vceq(Qn, Qm) SIMD floating point compare equal.
+  // Instruction details available in ARM DDI 0406C.b, A8-844.
+  emit(EncodeNeonBinOp(VCEQF, dst, src1, src2));
+}
+
+void Assembler::vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vceq(Qn, Qm) SIMD integer compare equal.
+  // Instruction details available in ARM DDI 0406C.b, A8-844.
+  emit(EncodeNeonBinOp(VCEQ, size, dst, src1, src2));
+}
+
+void Assembler::vcge(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vcge(Qn, Qm) SIMD floating point compare greater or equal.
+  // Instruction details available in ARM DDI 0406C.b, A8-848.
+  emit(EncodeNeonBinOp(VCGEF, dst, src1, src2));
+}
+
+void Assembler::vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vcge(Qn, Qm) SIMD integer compare greater or equal.
+  // Instruction details available in ARM DDI 0406C.b, A8-848.
+  emit(EncodeNeonBinOp(VCGE, dt, dst, src1, src2));
+}
+
+void Assembler::vcgt(QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vcgt(Qn, Qm) SIMD floating point compare greater than.
+  // Instruction details available in ARM DDI 0406C.b, A8-852.
+  emit(EncodeNeonBinOp(VCGTF, dst, src1, src2));
+}
+
+void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+                     QwNeonRegister src2) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vcgt(Qn, Qm) SIMD integer compare greater than.
+  // Instruction details available in ARM DDI 0406C.b, A8-852.
+  emit(EncodeNeonBinOp(VCGT, dt, dst, src1, src2));
+}
+
+void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1,
+                     const QwNeonRegister src2, int bytes) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vext(Qn, Qm) SIMD byte extract.
+  // Instruction details available in ARM DDI 0406C.b, A8-890.
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  src1.split_code(&vn, &n);
+  int vm, m;
+  src2.split_code(&vm, &m);
+  DCHECK_GT(16, bytes);
+  emit(0x1E5U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | bytes * B8 |
+       n * B7 | B6 | m * B5 | vm);
+}
+
+void Assembler::vzip(NeonSize size, QwNeonRegister dst,
+                     const QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  // Qd = vzip.<size>(Qn, Qm) SIMD zip (interleave).
+  // Instruction details available in ARM DDI 0406C.b, A8-1102.
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  int sz = static_cast<int>(size);
+  emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | 2 * B16 | vd * B12 |
+       0x3 * B7 | B6 | m * B5 | vm);
+}
+
+static Instr EncodeNeonVREV(NeonSize op_size, NeonSize size,
+                            const QwNeonRegister dst,
+                            const QwNeonRegister src) {
+  // Qd = vrev<op_size>.<size>(Qn, Qm) SIMD scalar reverse.
+  // Instruction details available in ARM DDI 0406C.b, A8-1028.
+  DCHECK_GT(op_size, static_cast<int>(size));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  int sz = static_cast<int>(size);
+  int op = static_cast<int>(Neon64) - static_cast<int>(op_size);
+  return 0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | vd * B12 | op * B7 |
+         B6 | m * B5 | vm;
+}
+
+void Assembler::vrev16(NeonSize size, const QwNeonRegister dst,
+                       const QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonVREV(Neon16, size, dst, src));
+}
+
+void Assembler::vrev32(NeonSize size, const QwNeonRegister dst,
+                       const QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonVREV(Neon32, size, dst, src));
+}
+
+void Assembler::vrev64(NeonSize size, const QwNeonRegister dst,
+                       const QwNeonRegister src) {
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonVREV(Neon64, size, dst, src));
+}
+
+// Encode NEON vtbl / vtbx instruction.
+static Instr EncodeNeonVTB(const DwVfpRegister dst, const NeonListOperand& list,
+                           const DwVfpRegister index, bool vtbx) {
+  // Dd = vtbl(table, Dm) SIMD vector permute, zero at out of range indices.
+  // Instruction details available in ARM DDI 0406C.b, A8-1094.
+  // Dd = vtbx(table, Dm) SIMD vector permute, skip out of range indices.
+  // Instruction details available in ARM DDI 0406C.b, A8-1094.
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vn, n;
+  list.base().split_code(&vn, &n);
+  int vm, m;
+  index.split_code(&vm, &m);
+  int op = vtbx ? 1 : 0;  // vtbl = 0, vtbx = 1.
+  return 0x1E7U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | 0x2 * B10 |
+         list.length() * B8 | n * B7 | op * B6 | m * B5 | vm;
+}
+
+void Assembler::vtbl(const DwVfpRegister dst, const NeonListOperand& list,
+                     const DwVfpRegister index) {
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonVTB(dst, list, index, false));
+}
+
+void Assembler::vtbx(const DwVfpRegister dst, const NeonListOperand& list,
+                     const DwVfpRegister index) {
+  DCHECK(IsEnabled(NEON));
+  emit(EncodeNeonVTB(dst, list, index, true));
 }
 
 // Pseudo instructions.
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 1283c39..763ef71 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -150,6 +150,7 @@
 const Register no_reg = {Register::kCode_no_reg};
 
 static const bool kSimpleFPAliasing = false;
+static const bool kSimdMaskRegisters = false;
 
 // Single word VFP register.
 struct SwVfpRegister {
@@ -302,6 +303,20 @@
     *m = (encoded_code & 0x10) >> 4;
     *vm = encoded_code & 0x0F;
   }
+  DwVfpRegister low() const {
+    DwVfpRegister reg;
+    reg.reg_code = reg_code * 2;
+
+    DCHECK(reg.is_valid());
+    return reg;
+  }
+  DwVfpRegister high() const {
+    DwVfpRegister reg;
+    reg.reg_code = reg_code * 2 + 1;
+
+    DCHECK(reg.is_valid());
+    return reg;
+  }
 
   int reg_code;
 };
@@ -403,9 +418,11 @@
 // compilation unit that includes this header doesn't use the variables.
 #define kFirstCalleeSavedDoubleReg d8
 #define kLastCalleeSavedDoubleReg d15
+// kDoubleRegZero and kScratchDoubleReg must pair to form kScratchQuadReg.
 #define kDoubleRegZero d14
 #define kScratchDoubleReg d15
-
+// After using kScratchQuadReg, kDoubleRegZero must be reset to 0.
+#define kScratchQuadReg q7
 
 // Coprocessor register
 struct CRegister {
@@ -624,12 +641,26 @@
 // Class NeonListOperand represents a list of NEON registers
 class NeonListOperand BASE_EMBEDDED {
  public:
-  explicit NeonListOperand(DoubleRegister base, int registers_count = 1);
+  explicit NeonListOperand(DoubleRegister base, int register_count = 1)
+    : base_(base), register_count_(register_count) {}
+  explicit NeonListOperand(QwNeonRegister q_reg)
+    : base_(q_reg.low()), register_count_(2) {}
   DoubleRegister base() const { return base_; }
-  NeonListType type() const { return type_; }
+  int register_count() { return register_count_; }
+  int length() const { return register_count_ - 1; }
+  NeonListType type() const {
+    switch (register_count_) {
+      default: UNREACHABLE();
+      // Fall through.
+      case 1: return nlt_1;
+      case 2: return nlt_2;
+      case 3: return nlt_3;
+      case 4: return nlt_4;
+    }
+  }
  private:
   DoubleRegister base_;
-  NeonListType type_;
+  int register_count_;
 };
 
 
@@ -698,17 +729,10 @@
   INLINE(static void set_target_address_at(
       Isolate* isolate, Address pc, Address constant_pool, Address target,
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
-  INLINE(static Address target_address_at(Address pc, Code* code)) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    return target_address_at(pc, constant_pool);
-  }
+  INLINE(static Address target_address_at(Address pc, Code* code));
   INLINE(static void set_target_address_at(
       Isolate* isolate, Address pc, Code* code, Address target,
-      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    set_target_address_at(isolate, pc, constant_pool, target,
-                          icache_flush_mode);
-  }
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
 
   // Return the code target address at a call site from the return address
   // of that call in the instruction stream.
@@ -1133,6 +1157,8 @@
   void vmov(const DwVfpRegister dst,
             const DwVfpRegister src,
             const Condition cond = al);
+  // TODO(bbudge) Replace uses of these with the more general core register to
+  // scalar register vmov's.
   void vmov(const DwVfpRegister dst,
             const VmovIndex index,
             const Register src,
@@ -1313,8 +1339,83 @@
             const NeonMemOperand& dst);
   void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
 
-  // Currently, vswp supports only D0 to D31.
-  void vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
+  // Only unconditional core <-> scalar moves are currently supported.
+  void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
+  void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
+
+  void vmov(const QwNeonRegister dst, const QwNeonRegister src);
+  void vmvn(const QwNeonRegister dst, const QwNeonRegister src);
+  void vswp(DwVfpRegister dst, DwVfpRegister src);
+  void vswp(QwNeonRegister dst, QwNeonRegister src);
+  // vdup conditional execution isn't supported.
+  void vdup(NeonSize size, const QwNeonRegister dst, const Register src);
+  void vdup(const QwNeonRegister dst, const SwVfpRegister src);
+
+  void vcvt_f32_s32(const QwNeonRegister dst, const QwNeonRegister src);
+  void vcvt_f32_u32(const QwNeonRegister dst, const QwNeonRegister src);
+  void vcvt_s32_f32(const QwNeonRegister dst, const QwNeonRegister src);
+  void vcvt_u32_f32(const QwNeonRegister dst, const QwNeonRegister src);
+
+  void vabs(const QwNeonRegister dst, const QwNeonRegister src);
+  void vabs(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
+  void vneg(const QwNeonRegister dst, const QwNeonRegister src);
+  void vneg(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
+  void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
+  void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+            QwNeonRegister src2);
+  void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+             QwNeonRegister src2);
+  void vsub(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+            QwNeonRegister src2);
+  void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+             QwNeonRegister src2);
+  void vmul(QwNeonRegister dst, QwNeonRegister src1,
+            QwNeonRegister src2);
+  void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+            QwNeonRegister src2);
+  void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vmin(NeonDataType dt, QwNeonRegister dst,
+            QwNeonRegister src1, QwNeonRegister src2);
+  void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vmax(NeonDataType dt, QwNeonRegister dst,
+            QwNeonRegister src1, QwNeonRegister src2);
+  void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
+  void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
+  // vrecpe and vrsqrte only support floating point lanes.
+  void vrecpe(QwNeonRegister dst, QwNeonRegister src);
+  void vrsqrte(QwNeonRegister dst, QwNeonRegister src);
+  void vrecps(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vrsqrts(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+            QwNeonRegister src2);
+  void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
+            QwNeonRegister src2);
+  void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vcge(NeonDataType dt, QwNeonRegister dst,
+            QwNeonRegister src1, QwNeonRegister src2);
+  void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+  void vcgt(NeonDataType dt, QwNeonRegister dst,
+            QwNeonRegister src1, QwNeonRegister src2);
+  void vext(const QwNeonRegister dst, const QwNeonRegister src1,
+            const QwNeonRegister src2, int bytes);
+  void vzip(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
+  void vrev16(NeonSize size, const QwNeonRegister dst,
+            const QwNeonRegister src);
+  void vrev32(NeonSize size, const QwNeonRegister dst,
+            const QwNeonRegister src);
+  void vrev64(NeonSize size, const QwNeonRegister dst,
+            const QwNeonRegister src);
+  void vtbl(const DwVfpRegister dst, const NeonListOperand& list,
+            const DwVfpRegister index);
+  void vtbx(const DwVfpRegister dst, const NeonListOperand& list,
+            const DwVfpRegister index);
 
   // Pseudo instructions
 
@@ -1395,9 +1496,6 @@
 
   // Debugging
 
-  // Mark generator continuation.
-  void RecordGeneratorContinuation();
-
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
@@ -1611,6 +1709,12 @@
            (reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters);
   }
 
+  bool VfpRegisterIsAvailable(QwNeonRegister reg) {
+    DCHECK(reg.is_valid());
+    return IsEnabled(VFP32DREGS) ||
+           (reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters / 2);
+  }
+
  private:
   int next_buffer_check_;  // pc offset of next buffer check
 
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 59f304d..80ef322 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -33,17 +33,6 @@
   __ TailCallRuntime(Runtime::kNewArray);
 }
 
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
-  descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
-  descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cond);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -206,9 +195,6 @@
     // Call runtime on identical symbols since we need to throw a TypeError.
     __ cmp(r4, Operand(SYMBOL_TYPE));
     __ b(eq, slow);
-    // Call runtime on identical SIMD values since we must throw a TypeError.
-    __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
-    __ b(eq, slow);
   } else {
     __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
     __ b(eq, &heap_number);
@@ -219,9 +205,6 @@
       // Call runtime on identical symbols since we need to throw a TypeError.
       __ cmp(r4, Operand(SYMBOL_TYPE));
       __ b(eq, slow);
-      // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
-      __ b(eq, slow);
       // Normally here we fall through to return_equal, but undefined is
       // special: (undefined == undefined) == true, but
       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -635,8 +618,11 @@
   if (cc == eq) {
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ Push(lhs, rhs);
-      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+      __ Push(cp);
+      __ Call(strict() ? isolate()->builtins()->StrictEqual()
+                       : isolate()->builtins()->Equal(),
+              RelocInfo::CODE_TARGET);
+      __ Pop(cp);
     }
     // Turn true into 0 and false into some non-zero value.
     STATIC_ASSERT(EQUAL == 0);
@@ -805,7 +791,6 @@
   SaveFPRegsMode mode = kSaveFPRegs;
   CEntryStub(isolate, 1, mode).GetCode();
   StoreBufferOverflowStub(isolate, mode).GetCode();
-  isolate->set_fp_stubs_generated(true);
 }
 
 
@@ -1038,12 +1023,12 @@
   // r2: receiver
   // r3: argc
   // r4: argv
-  int marker = type();
+  StackFrame::Type marker = type();
   if (FLAG_enable_embedded_constant_pool) {
     __ mov(r8, Operand::Zero());
   }
-  __ mov(r7, Operand(Smi::FromInt(marker)));
-  __ mov(r6, Operand(Smi::FromInt(marker)));
+  __ mov(r7, Operand(StackFrame::TypeToMarker(marker)));
+  __ mov(r6, Operand(StackFrame::TypeToMarker(marker)));
   __ mov(r5,
          Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   __ ldr(r5, MemOperand(r5));
@@ -1063,11 +1048,11 @@
   __ cmp(r6, Operand::Zero());
   __ b(ne, &non_outermost_js);
   __ str(fp, MemOperand(r5));
-  __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   Label cont;
   __ b(&cont);
   __ bind(&non_outermost_js);
-  __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+  __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
   __ bind(&cont);
   __ push(ip);
 
@@ -1133,7 +1118,7 @@
   // Check if the current stack frame is marked as the outermost JS frame.
   Label non_outermost_js_2;
   __ pop(r5);
-  __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ cmp(r5, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   __ b(ne, &non_outermost_js_2);
   __ mov(r6, Operand::Zero());
   __ mov(r5, Operand(ExternalReference(js_entry_sp)));
@@ -1162,55 +1147,6 @@
   __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
 }
 
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  // Ensure that the vector and slot registers won't be clobbered before
-  // calling the miss handler.
-  DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
-                     LoadWithVectorDescriptor::SlotRegister()));
-
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
-                                                          r5, &miss);
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
-  // Return address is in lr.
-  Label miss;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register index = LoadDescriptor::NameRegister();
-  Register scratch = r5;
-  Register result = r0;
-  DCHECK(!scratch.is(receiver) && !scratch.is(index));
-  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
-         result.is(LoadWithVectorDescriptor::SlotRegister()));
-
-  // StringCharAtGenerator doesn't use the result register until it's passed
-  // the different miss possibilities. If it did, we would have a conflict
-  // when FLAG_vector_ics is true.
-  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          RECEIVER_IS_STRING);
-  char_at_generator.GenerateFast(masm);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -1306,7 +1242,7 @@
   // (6) External string.  Make it, offset-wise, look like a sequential string.
   //     Go to (4).
   // (7) Short external string or not a string?  If yes, bail out to runtime.
-  // (8) Sliced string.  Replace subject with parent.  Go to (1).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
 
   Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
       not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
@@ -1328,6 +1264,7 @@
   // (2) Sequential or cons?  If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmp(r1, Operand(kExternalStringTag));
@@ -1355,10 +1292,10 @@
   __ b(ls, &runtime);
   __ SmiUntag(r1);
 
-  STATIC_ASSERT(4 == kOneByteStringTag);
+  STATIC_ASSERT(8 == kOneByteStringTag);
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ and_(r0, r0, Operand(kStringEncodingMask));
-  __ mov(r3, Operand(r0, ASR, 2), SetCC);
+  __ mov(r3, Operand(r0, ASR, 3), SetCC);
   __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
          ne);
   __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
@@ -1592,12 +1529,19 @@
   __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
   __ b(ne, &runtime);
 
-  // (8) Sliced string.  Replace subject with parent.  Go to (4).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
+  Label thin_string;
+  __ cmp(r1, Operand(kThinStringTag));
+  __ b(eq, &thin_string);
   // Load offset into r9 and replace subject string with parent.
   __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ SmiUntag(r9);
   __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   __ jmp(&check_underlying);  // Go to (4).
+
+  __ bind(&thin_string);
+  __ ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+  __ jmp(&check_underlying);  // Go to (4).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -1632,9 +1576,9 @@
   // r3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
 
-  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
-  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
   // Load the cache state into r5.
@@ -1644,7 +1588,7 @@
   // A monomorphic cache hit or an already megamorphic state: invoke the
   // function without changing the state.
   // We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
-  // this position in a symbol (see static asserts in type-feedback-vector.h).
+  // this position in a symbol (see static asserts in feedback-vector.h).
   Label check_allocation_site;
   Register feedback_map = r6;
   Register weak_value = r9;
@@ -1759,192 +1703,6 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
-                               Register slot) {
-  __ add(feedback_vector, feedback_vector,
-         Operand::PointerOffsetFromSmiKey(slot));
-  __ add(feedback_vector, feedback_vector,
-         Operand(FixedArray::kHeaderSize + kPointerSize));
-  __ ldr(slot, FieldMemOperand(feedback_vector, 0));
-  __ add(slot, slot, Operand(Smi::FromInt(1)));
-  __ str(slot, FieldMemOperand(feedback_vector, 0));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
-  // r0 - number of arguments
-  // r1 - function
-  // r3 - slot id
-  // r2 - vector
-  // r4 - allocation site (loaded from vector[slot])
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
-  __ cmp(r1, r5);
-  __ b(ne, miss);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, r2, r3);
-
-  __ mov(r2, r4);
-  __ mov(r3, r1);
-  ArrayConstructorStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
-  // r0 - number of arguments
-  // r1 - function
-  // r3 - slot id (Smi)
-  // r2 - vector
-  Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
-  // The checks. First, does r1 match the recorded monomorphic target?
-  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
-  __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
-
-  // We don't know that we have a weak cell. We might have a private symbol
-  // or an AllocationSite, but the memory is safe to examine.
-  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
-  // FixedArray.
-  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
-  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
-  // computed, meaning that it can't appear to be a pointer. If the low bit is
-  // 0, then hash is computed, but the 0 bit prevents the field from appearing
-  // to be a pointer.
-  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
-  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
-                    WeakCell::kValueOffset &&
-                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
-  __ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
-  __ cmp(r1, r5);
-  __ b(ne, &extra_checks_or_miss);
-
-  // The compare above could have been a SMI/SMI comparison. Guard against this
-  // convincing us that we have a monomorphic JSFunction.
-  __ JumpIfSmi(r1, &extra_checks_or_miss);
-
-  __ bind(&call_function);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, r2, r3);
-
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
-                                                    tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&extra_checks_or_miss);
-  Label uninitialized, miss, not_allocation_site;
-
-  __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
-  __ b(eq, &call);
-
-  // Verify that r4 contains an AllocationSite
-  __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
-  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
-  __ b(ne, &not_allocation_site);
-
-  // We have an allocation site.
-  HandleArrayCase(masm, &miss);
-
-  __ bind(&not_allocation_site);
-
-  // The following cases attempt to handle MISS cases without going to the
-  // runtime.
-  if (FLAG_trace_ic) {
-    __ jmp(&miss);
-  }
-
-  __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
-  __ b(eq, &uninitialized);
-
-  // We are going megamorphic. If the feedback is a JSFunction, it is fine
-  // to handle it here. More complex cases are dealt with in the runtime.
-  __ AssertNotSmi(r4);
-  __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
-  __ b(ne, &miss);
-  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
-  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
-  __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
-
-  __ bind(&call);
-
-  // Increment the call count for megamorphic function calls.
-  IncrementCallCount(masm, r2, r3);
-
-  __ bind(&call_count_incremented);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&uninitialized);
-
-  // We are going monomorphic, provided we actually have a JSFunction.
-  __ JumpIfSmi(r1, &miss);
-
-  // Goto miss case if we do not have a function.
-  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
-  __ b(ne, &miss);
-
-  // Make sure the function is not the Array() function, which requires special
-  // behavior on MISS.
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r4);
-  __ cmp(r1, r4);
-  __ b(eq, &miss);
-
-  // Make sure the function belongs to the same native context.
-  __ ldr(r4, FieldMemOperand(r1, JSFunction::kContextOffset));
-  __ ldr(r4, ContextMemOperand(r4, Context::NATIVE_CONTEXT_INDEX));
-  __ ldr(ip, NativeContextMemOperand());
-  __ cmp(r4, ip);
-  __ b(ne, &miss);
-
-  // Store the function. Use a stub since we need a frame for allocation.
-  // r2 - vector
-  // r3 - slot
-  // r1 - function
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    CreateWeakCellStub create_stub(masm->isolate());
-    __ SmiTag(r0);
-    __ Push(r0, r2, r3, cp, r1);
-    __ CallStub(&create_stub);
-    __ Pop(r2, r3, cp, r1);
-    __ Pop(r0);
-    __ SmiUntag(r0);
-  }
-
-  __ jmp(&call_function);
-
-  // We are here because tracing is on or we encountered a MISS case we can't
-  // handle here.
-  __ bind(&miss);
-  GenerateMiss(masm);
-
-  __ jmp(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
-  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
-  // Preserve the number of arguments as Smi.
-  __ SmiTag(r0);
-
-  // Push the receiver and the function and feedback info.
-  __ Push(r0, r1, r2, r3);
-
-  // Call the entry.
-  __ CallRuntime(Runtime::kCallIC_Miss);
-
-  // Move result to edi and exit the internal frame.
-  __ mov(r1, r0);
-
-  // Restore number of arguments.
-  __ Pop(r0);
-  __ SmiUntag(r0);
-}
-
-
 // StringCharCodeAtGenerator
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   // If the receiver is a smi trigger the non-string case.
@@ -2036,85 +1794,6 @@
   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
 }
 
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiShiftSize == 0);
-  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
-  __ tst(code_, Operand(kSmiTagMask |
-                        ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
-  __ b(ne, &slow_case_);
-
-  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged one-byte char code.
-  __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
-  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
-  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
-  __ b(eq, &slow_case_);
-  __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
-  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
-  __ bind(&slow_case_);
-  call_helper.BeforeCall(masm);
-  __ push(code_);
-  __ CallRuntime(Runtime::kStringCharFromCode);
-  __ Move(result_, r0);
-  call_helper.AfterCall(masm);
-  __ jmp(&exit_);
-
-  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
-                                          Register dest,
-                                          Register src,
-                                          Register count,
-                                          Register scratch,
-                                          String::Encoding encoding) {
-  if (FLAG_debug_code) {
-    // Check that destination is word aligned.
-    __ tst(dest, Operand(kPointerAlignmentMask));
-    __ Check(eq, kDestinationOfCopyNotAligned);
-  }
-
-  // Assumes word reads and writes are little endian.
-  // Nothing to do for zero characters.
-  Label done;
-  if (encoding == String::TWO_BYTE_ENCODING) {
-    __ add(count, count, Operand(count), SetCC);
-  }
-
-  Register limit = count;  // Read until dest equals this.
-  __ add(limit, dest, Operand(count));
-
-  Label loop_entry, loop;
-  // Copy bytes from src to dest until dest hits limit.
-  __ b(&loop_entry);
-  __ bind(&loop);
-  __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
-  __ strb(scratch, MemOperand(dest, 1, PostIndex));
-  __ bind(&loop_entry);
-  __ cmp(dest, Operand(limit));
-  __ b(lt, &loop);
-
-  __ bind(&done);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -2690,84 +2369,6 @@
   __ b(ne, miss);
 }
 
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
-                                                      Label* miss,
-                                                      Label* done,
-                                                      Register elements,
-                                                      Register name,
-                                                      Register scratch1,
-                                                      Register scratch2) {
-  DCHECK(!elements.is(scratch1));
-  DCHECK(!elements.is(scratch2));
-  DCHECK(!name.is(scratch1));
-  DCHECK(!name.is(scratch2));
-
-  __ AssertName(name);
-
-  // Compute the capacity mask.
-  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
-  __ SmiUntag(scratch1);
-  __ sub(scratch1, scratch1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
-    if (i > 0) {
-      // Add the probe offset (i + i * i) left shifted to avoid right shifting
-      // the hash in a separate instruction. The value hash + i + i * i is right
-      // shifted in the following and instruction.
-      DCHECK(NameDictionary::GetProbeOffset(i) <
-             1 << (32 - Name::kHashFieldOffset));
-      __ add(scratch2, scratch2, Operand(
-          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
-    }
-    __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
-
-    // Scale the index by multiplying by the entry size.
-    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    // scratch2 = scratch2 * 3.
-    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
-    // Check if the key is identical to the name.
-    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
-    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
-    __ cmp(name, Operand(ip));
-    __ b(eq, done);
-  }
-
-  const int spill_mask =
-      (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
-       r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
-      ~(scratch1.bit() | scratch2.bit());
-
-  __ stm(db_w, sp, spill_mask);
-  if (name.is(r0)) {
-    DCHECK(!elements.is(r1));
-    __ Move(r1, name);
-    __ Move(r0, elements);
-  } else {
-    __ Move(r0, elements);
-    __ Move(r1, name);
-  }
-  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
-  __ CallStub(&stub);
-  __ cmp(r0, Operand::Zero());
-  __ mov(scratch2, Operand(r2));
-  __ ldm(ia_w, sp, spill_mask);
-
-  __ b(ne, done);
-  __ b(eq, miss);
-}
-
-
 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   // we cannot call anything that could cause a GC from this stub.
@@ -3051,244 +2652,6 @@
   __ Ret();
 }
 
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(r2);
-  CallICStub stub(isolate(), state());
-  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
-                             Register receiver_map, Register scratch1,
-                             Register scratch2, bool is_polymorphic,
-                             Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-
-  Register cached_map = scratch1;
-
-  __ ldr(cached_map,
-         FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-  __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ cmp(receiver_map, cached_map);
-  __ b(ne, &start_polymorphic);
-  // found, now call handler.
-  Register handler = feedback;
-  __ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-
-  Register length = scratch2;
-  __ bind(&start_polymorphic);
-  __ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-  if (!is_polymorphic) {
-    // If the IC could be monomorphic we have to make sure we don't go past the
-    // end of the feedback array.
-    __ cmp(length, Operand(Smi::FromInt(2)));
-    __ b(eq, miss);
-  }
-
-  Register too_far = length;
-  Register pointer_reg = feedback;
-
-  // +-----+------+------+-----+-----+ ... ----+
-  // | map | len  | wm0  | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ... ----+
-  //                 0      1     2        len-1
-  //                              ^              ^
-  //                              |              |
-  //                         pointer_reg      too_far
-  //                         aka feedback     scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
-  __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(pointer_reg, feedback,
-         Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ ldr(cached_map, MemOperand(pointer_reg));
-  __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ cmp(receiver_map, cached_map);
-  __ b(ne, &prepare_next);
-  __ ldr(handler, MemOperand(pointer_reg, kPointerSize));
-  __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&prepare_next);
-  __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
-  __ cmp(pointer_reg, too_far);
-  __ b(lt, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
-                                  Register receiver_map, Register feedback,
-                                  Register vector, Register slot,
-                                  Register scratch, Label* compare_map,
-                                  Label* load_smi_map, Label* try_array) {
-  __ JumpIfSmi(receiver, load_smi_map);
-  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(compare_map);
-  Register cached_map = scratch;
-  // Move the weak map into the weak_cell register.
-  __ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
-  __ cmp(cached_map, receiver_map);
-  __ b(ne, try_array);
-  Register handler = feedback;
-  __ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
-  __ ldr(handler,
-         FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
-  __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  KeyedStoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
-                                       Register receiver_map, Register scratch1,
-                                       Register scratch2, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-  Label transition_call;
-
-  Register cached_map = scratch1;
-  Register too_far = scratch2;
-  Register pointer_reg = feedback;
-  __ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
-  // +-----+------+------+-----+-----+-----+ ... ----+
-  // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ----+ ... ----+
-  //                 0      1     2              len-1
-  //                 ^                                 ^
-  //                 |                                 |
-  //             pointer_reg                        too_far
-  //             aka feedback                       scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
-  __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(pointer_reg, feedback,
-         Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ ldr(cached_map, MemOperand(pointer_reg));
-  __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ cmp(receiver_map, cached_map);
-  __ b(ne, &prepare_next);
-  // Is it a transitioning store?
-  __ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
-  __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
-  __ b(ne, &transition_call);
-  __ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
-  __ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&transition_call);
-  __ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
-  __ JumpIfSmi(too_far, miss);
-
-  __ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
-  // Load the map into the correct register.
-  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
-  __ mov(feedback, too_far);
-
-  __ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  __ bind(&prepare_next);
-  __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
-  __ cmp(pointer_reg, too_far);
-  __ b(lt, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // r1
-  Register key = StoreWithVectorDescriptor::NameRegister();           // r2
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // r3
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // r4
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0));          // r0
-  Register feedback = r5;
-  Register receiver_map = r6;
-  Register scratch1 = r9;
-
-  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
-  __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ b(ne, &not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-
-  // We are using register r8, which is used for the embedded constant pool
-  // when FLAG_enable_embedded_constant_pool is true.
-  DCHECK(!FLAG_enable_embedded_constant_pool);
-  Register scratch2 = r8;
-
-  HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
-                             &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ b(ne, &try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmp(key, feedback);
-  __ b(ne, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
-  __ ldr(feedback,
-         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
-                   &miss);
-
-  __ bind(&miss);
-  KeyedStoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -3647,612 +3010,6 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r1 : target
-  //  -- r3 : new target
-  //  -- cp : context
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r1);
-  __ AssertReceiver(r3);
-
-  // Verify that the new target is a JSFunction.
-  Label new_object;
-  __ CompareObjectType(r3, r2, r2, JS_FUNCTION_TYPE);
-  __ b(ne, &new_object);
-
-  // Load the initial map and verify that it's in fact a map.
-  __ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(r2, &new_object);
-  __ CompareObjectType(r2, r0, r0, MAP_TYPE);
-  __ b(ne, &new_object);
-
-  // Fall back to runtime if the target differs from the new target's
-  // initial map constructor.
-  __ ldr(r0, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
-  __ cmp(r0, r1);
-  __ b(ne, &new_object);
-
-  // Allocate the JSObject on the heap.
-  Label allocate, done_allocate;
-  __ ldrb(r4, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-  __ Allocate(r4, r0, r5, r6, &allocate, SIZE_IN_WORDS);
-  __ bind(&done_allocate);
-
-  // Initialize the JSObject fields.
-  __ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
-  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
-  __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
-  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
-  // ----------- S t a t e -------------
-  //  -- r0 : result (tagged)
-  //  -- r1 : result fields (untagged)
-  //  -- r5 : result end (untagged)
-  //  -- r2 : initial map
-  //  -- cp : context
-  //  -- lr : return address
-  // -----------------------------------
-
-  // Perform in-object slack tracking if requested.
-  Label slack_tracking;
-  STATIC_ASSERT(Map::kNoSlackTracking == 0);
-  __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
-  __ ldr(r3, FieldMemOperand(r2, Map::kBitField3Offset));
-  __ tst(r3, Operand(Map::ConstructionCounter::kMask));
-  __ b(ne, &slack_tracking);
-  {
-    // Initialize all in-object fields with undefined.
-    __ InitializeFieldsWithFiller(r1, r5, r6);
-    __ Ret();
-  }
-  __ bind(&slack_tracking);
-  {
-    // Decrease generous allocation count.
-    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-    __ sub(r3, r3, Operand(1 << Map::ConstructionCounter::kShift));
-    __ str(r3, FieldMemOperand(r2, Map::kBitField3Offset));
-
-    // Initialize the in-object fields with undefined.
-    __ ldrb(r4, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
-    __ sub(r4, r5, Operand(r4, LSL, kPointerSizeLog2));
-    __ InitializeFieldsWithFiller(r1, r4, r6);
-
-    // Initialize the remaining (reserved) fields with one pointer filler map.
-    __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
-    __ InitializeFieldsWithFiller(r1, r5, r6);
-
-    // Check if we can finalize the instance size.
-    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
-    __ tst(r3, Operand(Map::ConstructionCounter::kMask));
-    __ Ret(ne);
-
-    // Finalize the instance size.
-    {
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ Push(r0, r2);
-      __ CallRuntime(Runtime::kFinalizeInstanceSize);
-      __ Pop(r0);
-    }
-    __ Ret();
-  }
-
-  // Fall back to %AllocateInNewSpace.
-  __ bind(&allocate);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    __ mov(r4, Operand(r4, LSL, kPointerSizeLog2 + 1));
-    __ Push(r2, r4);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(r2);
-  }
-  __ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-  __ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ sub(r5, r5, Operand(kHeapObjectTag));
-  __ b(&done_allocate);
-
-  // Fall back to %NewObject.
-  __ bind(&new_object);
-  __ Push(r1, r3);
-  __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r1);
-
-  // Make r2 point to the JavaScript frame.
-  __ mov(r2, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
-    __ cmp(ip, r1);
-    __ b(eq, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have rest parameters (only possible if we have an
-  // arguments adaptor frame below the function frame).
-  Label no_rest_parameters;
-  __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(ip, MemOperand(r2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(ne, &no_rest_parameters);
-
-  // Check if the arguments adaptor frame contains more arguments than
-  // specified by the function's internal formal parameter count.
-  Label rest_parameters;
-  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r3,
-         FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ sub(r0, r0, r3, SetCC);
-  __ b(gt, &rest_parameters);
-
-  // Return an empty rest parameter array.
-  __ bind(&no_rest_parameters);
-  {
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- lr : return address
-    // -----------------------------------
-
-    // Allocate an empty rest parameter array.
-    Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, r0, r1, r2, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the rest parameter array in r0.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
-    __ str(r1, FieldMemOperand(r0, JSArray::kMapOffset));
-    __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
-    __ str(r1, FieldMemOperand(r0, JSArray::kPropertiesOffset));
-    __ str(r1, FieldMemOperand(r0, JSArray::kElementsOffset));
-    __ mov(r1, Operand(0));
-    __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace.
-    __ bind(&allocate);
-    {
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ Push(Smi::FromInt(JSArray::kSize));
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-    }
-    __ jmp(&done_allocate);
-  }
-
-  __ bind(&rest_parameters);
-  {
-    // Compute the pointer to the first rest parameter (skippping the receiver).
-    __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
-    __ add(r2, r2,
-           Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
-
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- r0 : number of rest parameters (tagged)
-    //  -- r1 : function
-    //  -- r2 : pointer to first rest parameters
-    //  -- lr : return address
-    // -----------------------------------
-
-    // Allocate space for the rest parameter array plus the backing store.
-    Label allocate, done_allocate;
-    __ mov(r6, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
-    __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the elements array in r3.
-    __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
-    __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
-    __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
-    __ add(r4, r3, Operand(FixedArray::kHeaderSize));
-    {
-      Label loop, done_loop;
-      __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
-      __ bind(&loop);
-      __ cmp(r4, r1);
-      __ b(eq, &done_loop);
-      __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
-      __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
-      __ add(r4, r4, Operand(1 * kPointerSize));
-      __ b(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Setup the rest parameter array in r4.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
-    __ str(r1, FieldMemOperand(r4, JSArray::kMapOffset));
-    __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
-    __ str(r1, FieldMemOperand(r4, JSArray::kPropertiesOffset));
-    __ str(r3, FieldMemOperand(r4, JSArray::kElementsOffset));
-    __ str(r0, FieldMemOperand(r4, JSArray::kLengthOffset));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ mov(r0, r4);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace (if not too big).
-    Label too_big_for_new_space;
-    __ bind(&allocate);
-    __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
-    __ b(gt, &too_big_for_new_space);
-    {
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(r6);
-      __ Push(r0, r2, r6);
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-      __ mov(r3, r0);
-      __ Pop(r0, r2);
-    }
-    __ jmp(&done_allocate);
-
-    // Fall back to %NewRestParameter.
-    __ bind(&too_big_for_new_space);
-    __ push(r1);
-    __ TailCallRuntime(Runtime::kNewRestParameter);
-  }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r1);
-
-  // Make r9 point to the JavaScript frame.
-  __ mov(r9, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ ldr(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ ldr(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
-    __ cmp(ip, r1);
-    __ b(eq, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r2,
-         FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ add(r3, r9, Operand(r2, LSL, kPointerSizeLog2 - 1));
-  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // r1 : function
-  // r2 : number of parameters (tagged)
-  // r3 : parameters pointer
-  // r9 : JavaScript frame pointer
-  // Registers used over whole function:
-  //  r5 : arguments count (tagged)
-  //  r6 : mapped parameter count (tagged)
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ ldr(r4, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(eq, &adaptor_frame);
-
-  // No adaptor, parameter count = argument count.
-  __ mov(r5, r2);
-  __ mov(r6, r2);
-  __ b(&try_allocate);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ add(r4, r4, Operand(r5, LSL, 1));
-  __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // r5 = argument count (tagged)
-  // r6 = parameter count (tagged)
-  // Compute the mapped parameter count = min(r6, r5) in r6.
-  __ mov(r6, r2);
-  __ cmp(r6, Operand(r5));
-  __ mov(r6, Operand(r5), LeaveCC, gt);
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  // If there are no mapped parameters, we do not need the parameter_map.
-  __ cmp(r6, Operand(Smi::kZero));
-  __ mov(r9, Operand::Zero(), LeaveCC, eq);
-  __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
-  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
-
-  // 2. Backing store.
-  __ add(r9, r9, Operand(r5, LSL, 1));
-  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(r9, r0, r9, r4, &runtime, NO_ALLOCATION_FLAGS);
-
-  // r0 = address of new object(s) (tagged)
-  // r2 = argument count (smi-tagged)
-  // Get the arguments boilerplate from the current native context into r4.
-  const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  const int kAliasedOffset =
-      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
-  __ ldr(r4, NativeContextMemOperand());
-  __ cmp(r6, Operand::Zero());
-  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
-  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
-
-  // r0 = address of new object (tagged)
-  // r2 = argument count (smi-tagged)
-  // r4 = address of arguments map (tagged)
-  // r6 = mapped parameter count (tagged)
-  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
-  __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
-  __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
-  __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
-
-  // Set up the callee in-object property.
-  __ AssertNotSmi(r1);
-  __ str(r1, FieldMemOperand(r0, JSSloppyArgumentsObject::kCalleeOffset));
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(r5);
-  __ str(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, r4 will point there, otherwise
-  // it will point to the backing store.
-  __ add(r4, r0, Operand(JSSloppyArgumentsObject::kSize));
-  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
-
-  // r0 = address of new object (tagged)
-  // r2 = argument count (tagged)
-  // r4 = address of parameter map or backing store (tagged)
-  // r6 = mapped parameter count (tagged)
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ cmp(r6, Operand(Smi::kZero));
-  // Move backing store address to r1, because it is
-  // expected there when filling in the unmapped arguments.
-  __ mov(r1, r4, LeaveCC, eq);
-  __ b(eq, &skip_parameter_map);
-
-  __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
-  __ add(r5, r6, Operand(Smi::FromInt(2)));
-  __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
-  __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
-  __ add(r5, r4, Operand(r6, LSL, 1));
-  __ add(r5, r5, Operand(kParameterMapHeaderSize));
-  __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-  __ mov(r5, r6);
-  __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ sub(r9, r9, Operand(r6));
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ add(r1, r4, Operand(r5, LSL, 1));
-  __ add(r1, r1, Operand(kParameterMapHeaderSize));
-
-  // r1 = address of backing store (tagged)
-  // r4 = address of parameter map (tagged), which is also the address of new
-  //      object + Heap::kSloppyArgumentsObjectSize (tagged)
-  // r0 = temporary scratch (a.o., for address calculation)
-  // r5 = loop variable (tagged)
-  // ip = the hole value
-  __ jmp(&parameters_test);
-
-  __ bind(&parameters_loop);
-  __ sub(r5, r5, Operand(Smi::FromInt(1)));
-  __ mov(r0, Operand(r5, LSL, 1));
-  __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-  __ str(r9, MemOperand(r4, r0));
-  __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
-  __ str(ip, MemOperand(r1, r0));
-  __ add(r9, r9, Operand(Smi::FromInt(1)));
-  __ bind(&parameters_test);
-  __ cmp(r5, Operand(Smi::kZero));
-  __ b(ne, &parameters_loop);
-
-  // Restore r0 = new object (tagged) and r5 = argument count (tagged).
-  __ sub(r0, r4, Operand(JSSloppyArgumentsObject::kSize));
-  __ ldr(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
-
-  __ bind(&skip_parameter_map);
-  // r0 = address of new object (tagged)
-  // r1 = address of backing store (tagged)
-  // r5 = argument count (tagged)
-  // r6 = mapped parameter count (tagged)
-  // r9 = scratch
-  // Copy arguments header and remaining slots (if there are any).
-  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
-  __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
-  __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
-
-  Label arguments_loop, arguments_test;
-  __ sub(r3, r3, Operand(r6, LSL, 1));
-  __ jmp(&arguments_test);
-
-  __ bind(&arguments_loop);
-  __ sub(r3, r3, Operand(kPointerSize));
-  __ ldr(r4, MemOperand(r3, 0));
-  __ add(r9, r1, Operand(r6, LSL, 1));
-  __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
-  __ add(r6, r6, Operand(Smi::FromInt(1)));
-
-  __ bind(&arguments_test);
-  __ cmp(r6, Operand(r5));
-  __ b(lt, &arguments_loop);
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  // r0 = address of new object (tagged)
-  // r5 = argument count (tagged)
-  __ bind(&runtime);
-  __ Push(r1, r3, r5);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r1);
-
-  // Make r2 point to the JavaScript frame.
-  __ mov(r2, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
-    __ cmp(ip, r1);
-    __ b(eq, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have an arguments adaptor frame below the function frame.
-  Label arguments_adaptor, arguments_done;
-  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
-  __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ b(eq, &arguments_adaptor);
-  {
-    __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-    __ ldr(r0, FieldMemOperand(
-                   r4, SharedFunctionInfo::kFormalParameterCountOffset));
-    __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
-    __ add(r2, r2,
-           Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
-  }
-  __ b(&arguments_done);
-  __ bind(&arguments_adaptor);
-  {
-    __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ add(r2, r3, Operand(r0, LSL, kPointerSizeLog2 - 1));
-    __ add(r2, r2,
-           Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
-  }
-  __ bind(&arguments_done);
-
-  // ----------- S t a t e -------------
-  //  -- cp : context
-  //  -- r0 : number of rest parameters (tagged)
-  //  -- r1 : function
-  //  -- r2 : pointer to first rest parameters
-  //  -- lr : return address
-  // -----------------------------------
-
-  // Allocate space for the strict arguments object plus the backing store.
-  Label allocate, done_allocate;
-  __ mov(r6, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
-  __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Setup the elements array in r3.
-  __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
-  __ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
-  __ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
-  __ add(r4, r3, Operand(FixedArray::kHeaderSize));
-  {
-    Label loop, done_loop;
-    __ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
-    __ bind(&loop);
-    __ cmp(r4, r1);
-    __ b(eq, &done_loop);
-    __ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
-    __ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
-    __ add(r4, r4, Operand(1 * kPointerSize));
-    __ b(&loop);
-    __ bind(&done_loop);
-  }
-
-  // Setup the strict arguments object in r4.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r1);
-  __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kMapOffset));
-  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
-  __ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kPropertiesOffset));
-  __ str(r3, FieldMemOperand(r4, JSStrictArgumentsObject::kElementsOffset));
-  __ str(r0, FieldMemOperand(r4, JSStrictArgumentsObject::kLengthOffset));
-  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
-  __ mov(r0, r4);
-  __ Ret();
-
-  // Fall back to %AllocateInNewSpace (if not too big).
-  Label too_big_for_new_space;
-  __ bind(&allocate);
-  __ cmp(r6, Operand(kMaxRegularHeapObjectSize));
-  __ b(gt, &too_big_for_new_space);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(r6);
-    __ Push(r0, r2, r6);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ mov(r3, r0);
-    __ Pop(r0, r2);
-  }
-  __ b(&done_allocate);
-
-  // Fall back to %NewStrictArguments.
-  __ bind(&too_big_for_new_space);
-  __ push(r1);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 30ae358..6ec86ab 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -16,17 +16,6 @@
 
 class StringHelper : public AllStatic {
  public:
-  // Generate code for copying a large number of characters. This function
-  // is allowed to spend extra time setting up conditions to make copying
-  // faster. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     Register scratch,
-                                     String::Encoding encoding);
-
   // Compares two flat one-byte strings and returns result in r0.
   static void GenerateCompareFlatOneByteStrings(
       MacroAssembler* masm, Register left, Register right, Register scratch1,
@@ -280,14 +269,6 @@
                                      Handle<Name> name,
                                      Register scratch0);
 
-  static void GeneratePositiveLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register elements,
-                                     Register name,
-                                     Register r0,
-                                     Register r1);
-
   bool SometimesSetsUpAFrame() override { return false; }
 
  private:
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index e63da5c..9348752 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -317,342 +317,14 @@
 
 #define __ ACCESS_MASM(masm)
 
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* allocation_memento_found) {
-  Register scratch_elements = r4;
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     scratch_elements));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    DCHECK(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(
-        receiver, scratch_elements, allocation_memento_found);
-  }
-
-  // Set transitioned map.
-  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      r9,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Register lr contains the return address.
-  Label loop, entry, convert_hole, gc_required, only_change_map, done;
-  Register elements = r4;
-  Register length = r5;
-  Register array = r6;
-  Register array_end = array;
-
-  // target_map parameter can be clobbered.
-  Register scratch1 = target_map;
-  Register scratch2 = r9;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     elements, length, array, scratch2));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ b(eq, &only_change_map);
-
-  __ push(lr);
-  __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedDoubleArray.
-  // Use lr as a temporary register.
-  __ mov(lr, Operand(length, LSL, 2));
-  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
-  __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
-  __ sub(array, array, Operand(kHeapObjectTag));
-  // array: destination FixedDoubleArray, not tagged as heap object.
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // r4: source FixedArray.
-
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
-  __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  // Update receiver's map.
-  __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
-
-  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch2,
-                      kLRHasBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ add(scratch1, array, Operand(kHeapObjectTag));
-  __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver,
-                      JSObject::kElementsOffset,
-                      scratch1,
-                      scratch2,
-                      kLRHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Prepare for conversion loop.
-  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
-  __ add(array_end, scratch2, Operand(length, LSL, 2));
-
-  // Repurpose registers no longer in use.
-  Register hole_lower = elements;
-  Register hole_upper = length;
-
-  __ mov(hole_lower, Operand(kHoleNanLower32));
-  __ mov(hole_upper, Operand(kHoleNanUpper32));
-  // scratch1: begin of source FixedArray element fields, not tagged
-  // hole_lower: kHoleNanLower32
-  // hole_upper: kHoleNanUpper32
-  // array_end: end of destination FixedDoubleArray, not tagged
-  // scratch2: begin of FixedDoubleArray element fields, not tagged
-
-  __ b(&entry);
-
-  __ bind(&only_change_map);
-  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch2,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ b(&done);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ pop(lr);
-  __ b(fail);
-
-  // Convert and copy elements.
-  __ bind(&loop);
-  __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
-  // lr: current element
-  __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
-
-  // Normal smi, convert to double and store.
-  __ vmov(s0, lr);
-  __ vcvt_f64_s32(d0, s0);
-  __ vstr(d0, scratch2, 0);
-  __ add(scratch2, scratch2, Operand(8));
-  __ b(&entry);
-
-  // Hole found, store the-hole NaN.
-  __ bind(&convert_hole);
-  if (FLAG_debug_code) {
-    // Restore a "smi-untagged" heap object.
-    __ SmiTag(lr);
-    __ orr(lr, lr, Operand(1));
-    __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
-    __ Assert(eq, kObjectFoundInSmiOnlyArray);
-  }
-  __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
-
-  __ bind(&entry);
-  __ cmp(scratch2, array_end);
-  __ b(lt, &loop);
-
-  __ pop(lr);
-  __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Register lr contains the return address.
-  Label entry, loop, convert_hole, gc_required, only_change_map;
-  Register elements = r4;
-  Register array = r6;
-  Register length = r5;
-  Register scratch = r9;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     elements, array, length, scratch));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ b(eq, &only_change_map);
-
-  __ push(lr);
-  __ Push(target_map, receiver, key, value);
-  __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // elements: source FixedDoubleArray
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedArray.
-  // Re-use value and target_map registers, as they have been saved on the
-  // stack.
-  Register array_size = value;
-  Register allocate_scratch = target_map;
-  __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
-  __ add(array_size, array_size, Operand(length, LSL, 1));
-  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
-              NO_ALLOCATION_FLAGS);
-  // array: destination FixedArray, tagged as heap object
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-  __ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
-
-  __ sub(array, array, Operand(kHeapObjectTag));
-
-  // Prepare for conversion loop.
-  Register src_elements = elements;
-  Register dst_elements = target_map;
-  Register dst_end = length;
-  Register heap_number_map = scratch;
-  __ add(src_elements, elements,
-         Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
-  __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
-  __ add(dst_end, dst_elements, Operand(length, LSL, 1));
-
-  // Allocating heap numbers in the loop below can fail and cause a jump to
-  // gc_required. We can't leave a partly initialized FixedArray behind,
-  // so pessimistically fill it with holes now.
-  Label initialization_loop, initialization_loop_entry;
-  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-  __ b(&initialization_loop_entry);
-  __ bind(&initialization_loop);
-  __ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
-  __ bind(&initialization_loop_entry);
-  __ cmp(dst_elements, dst_end);
-  __ b(lt, &initialization_loop);
-
-  __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
-  __ add(array, array, Operand(kHeapObjectTag));
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  // Using offsetted addresses in src_elements to fully take advantage of
-  // post-indexing.
-  // dst_elements: begin of destination FixedArray element fields, not tagged
-  // src_elements: begin of source FixedDoubleArray element fields,
-  //               not tagged, +4
-  // dst_end: end of destination FixedArray, not tagged
-  // array: destination FixedArray
-  // heap_number_map: heap number map
-  __ b(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ Pop(target_map, receiver, key, value);
-  __ pop(lr);
-  __ b(fail);
-
-  __ bind(&loop);
-  Register upper_bits = key;
-  __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
-  // upper_bits: current element's upper 32 bit
-  // src_elements: address of next element's upper 32 bit
-  __ cmp(upper_bits, Operand(kHoleNanUpper32));
-  __ b(eq, &convert_hole);
-
-  // Non-hole double, copy value into a heap number.
-  Register heap_number = receiver;
-  Register scratch2 = value;
-  __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
-                        &gc_required);
-  // heap_number: new heap number
-  __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
-  __ Strd(scratch2, upper_bits,
-          FieldMemOperand(heap_number, HeapNumber::kValueOffset));
-  __ mov(scratch2, dst_elements);
-  __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
-  __ RecordWrite(array,
-                 scratch2,
-                 heap_number,
-                 kLRHasBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ b(&entry);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
-  __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
-
-  __ bind(&entry);
-  __ cmp(dst_elements, dst_end);
-  __ b(lt, &loop);
-
-  __ Pop(target_map, receiver, key, value);
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver,
-                      JSObject::kElementsOffset,
-                      array,
-                      scratch,
-                      kLRHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ pop(lr);
-
-  __ bind(&only_change_map);
-  // Update receiver's map.
-  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                        Register string,
                                        Register index,
                                        Register result,
                                        Label* call_runtime) {
+  Label indirect_string_loaded;
+  __ bind(&indirect_string_loaded);
+
   // Fetch the instance type of the receiver into result register.
   __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -663,17 +335,24 @@
   __ b(eq, &check_sequential);
 
   // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ tst(result, Operand(kSlicedNotConsMask));
+  Label cons_string, thin_string;
+  __ and_(result, result, Operand(kStringRepresentationMask));
+  __ cmp(result, Operand(kConsStringTag));
   __ b(eq, &cons_string);
+  __ cmp(result, Operand(kThinStringTag));
+  __ b(eq, &thin_string);
 
   // Handle slices.
-  Label indirect_string_loaded;
   __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
   __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
   __ add(index, index, Operand::SmiUntag(result));
   __ jmp(&indirect_string_loaded);
 
+  // Handle thin strings.
+  __ bind(&thin_string);
+  __ ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
+  __ jmp(&indirect_string_loaded);
+
   // Handle cons strings.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
@@ -685,10 +364,7 @@
   __ b(ne, call_runtime);
   // Get the first of the two strings and load its instance type.
   __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+  __ jmp(&indirect_string_loaded);
 
   // Distinguish sequential and external strings. Only these two string
   // representations can reach here (slices and flat cons strings have been
@@ -771,31 +447,23 @@
   return result;
 }
 
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
 
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                               MarkingParity* parity) {
-  if (IsYoungSequence(isolate, sequence)) {
-    *age = kNoAgeCodeAge;
-    *parity = NO_MARKING_PARITY;
-  } else {
-    Address target_address = Memory::Address_at(
-        sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
-    Code* stub = GetCodeFromTargetAddress(target_address);
-    GetCodeAgeAndParity(stub, age, parity);
-  }
+  Address target_address = Memory::Address_at(
+      sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+  Code* stub = GetCodeFromTargetAddress(target_address);
+  return GetAgeOfCodeAgeStub(stub);
 }
 
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
-                                Code::Age age,
-                                MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+                                Code::Age age) {
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
     Assembler::FlushICache(isolate, sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age);
     CodePatcher patcher(isolate, sequence,
                         young_length / Assembler::kInstrSize);
     patcher.masm()->add(r0, pc, Operand(-8));
@@ -804,7 +472,6 @@
   }
 }
 
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 2bade20..0b86f3e 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -190,6 +190,7 @@
   B7 = 1 << 7,
   B8 = 1 << 8,
   B9 = 1 << 9,
+  B10 = 1 << 10,
   B12 = 1 << 12,
   B16 = 1 << 16,
   B17 = 1 << 17,
@@ -218,7 +219,6 @@
   kOff8Mask = (1 << 8) - 1
 };
 
-
 enum BarrierOption {
   OSHLD = 0x1,
   OSHST = 0x2,
@@ -327,16 +327,18 @@
 
 // NEON data type
 enum NeonDataType {
-  NeonS8 = 0x1,   // U = 0, imm3 = 0b001
-  NeonS16 = 0x2,  // U = 0, imm3 = 0b010
-  NeonS32 = 0x4,  // U = 0, imm3 = 0b100
-  NeonU8 = 1 << 24 | 0x1,   // U = 1, imm3 = 0b001
-  NeonU16 = 1 << 24 | 0x2,  // U = 1, imm3 = 0b010
-  NeonU32 = 1 << 24 | 0x4,   // U = 1, imm3 = 0b100
-  NeonDataTypeSizeMask = 0x7,
-  NeonDataTypeUMask = 1 << 24
+  NeonS8 = 0,
+  NeonS16 = 1,
+  NeonS32 = 2,
+  // Gap to make it easier to extract U and size.
+  NeonU8 = 4,
+  NeonU16 = 5,
+  NeonU32 = 6
 };
 
+inline int NeonU(NeonDataType dt) { return static_cast<int>(dt) >> 2; }
+inline int NeonSz(NeonDataType dt) { return static_cast<int>(dt) & 0x3; }
+
 enum NeonListType {
   nlt_1 = 0x7,
   nlt_2 = 0xA,
@@ -374,10 +376,10 @@
 // Type of VFP register. Determines register encoding.
 enum VFPRegPrecision {
   kSinglePrecision = 0,
-  kDoublePrecision = 1
+  kDoublePrecision = 1,
+  kSimd128Precision = 2
 };
 
-
 // VFP FPSCR constants.
 enum VFPConversionMode {
   kFPSCRRounding = 0,
@@ -667,15 +669,22 @@
 
 
  private:
-  // Join split register codes, depending on single or double precision.
+  // Join split register codes, depending on register precision.
   // four_bit is the position of the least-significant bit of the four
   // bit specifier. one_bit is the position of the additional single bit
   // specifier.
   inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
     if (pre == kSinglePrecision) {
       return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
+    } else {
+      int reg_num = (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
+      if (pre == kDoublePrecision) {
+        return reg_num;
+      }
+      DCHECK_EQ(kSimd128Precision, pre);
+      DCHECK_EQ(reg_num & 1, 0);
+      return reg_num / 2;
     }
-    return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
   }
 
   // We need to prevent the creation of instances of class Instruction.
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 1231355..e0e602e 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -95,7 +95,7 @@
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
   for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
-    double double_value = input_->GetDoubleRegister(i);
+    Float64 double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
 }
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index e408e85..041df55 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1419,6 +1419,9 @@
 // Sd = vsqrt(Sm)
 // vmrs
 // vmsr
+// Qd = vdup.size(Qd, Rt)
+// vmov.size: Dd[i] = Rt
+// vmov.sign.size: Rt = Dn[i]
 void Decoder::DecodeTypeVFP(Instruction* instr) {
   VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
   VERIFY(instr->Bits(11, 9) == 0x5);
@@ -1531,21 +1534,71 @@
     if ((instr->VCValue() == 0x0) &&
         (instr->VAValue() == 0x0)) {
       DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
-    } else if ((instr->VLValue() == 0x0) &&
-               (instr->VCValue() == 0x1) &&
-               (instr->Bit(23) == 0x0)) {
-      if (instr->Bit(21) == 0x0) {
-        Format(instr, "vmov'cond.32 'Dd[0], 'rt");
+    } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
+      if (instr->Bit(23) == 0) {
+        int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
+        if ((opc1_opc2 & 0xb) == 0) {
+          // NeonS32/NeonU32
+          if (instr->Bit(21) == 0x0) {
+            Format(instr, "vmov'cond.32 'Dd[0], 'rt");
+          } else {
+            Format(instr, "vmov'cond.32 'Dd[1], 'rt");
+          }
+        } else {
+          int vd = instr->VFPNRegValue(kDoublePrecision);
+          int rt = instr->RtValue();
+          if ((opc1_opc2 & 0x8) != 0) {
+            // NeonS8 / NeonU8
+            int i = opc1_opc2 & 0x7;
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vmov.8 d%d[%d], r%d", vd, i, rt);
+          } else if ((opc1_opc2 & 0x1) != 0) {
+            // NeonS16 / NeonU16
+            int i = (opc1_opc2 >> 1) & 0x3;
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vmov.16 d%d[%d], r%d", vd, i, rt);
+          } else {
+            Unknown(instr);
+          }
+        }
       } else {
-        Format(instr, "vmov'cond.32 'Dd[1], 'rt");
+        int size = 32;
+        if (instr->Bit(5) != 0)
+          size = 16;
+        else if (instr->Bit(22) != 0)
+          size = 8;
+        int Vd = instr->VFPNRegValue(kSimd128Precision);
+        int Rt = instr->RtValue();
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                    "vdup.%i q%d, r%d", size, Vd, Rt);
       }
-    } else if ((instr->VLValue() == 0x1) &&
-               (instr->VCValue() == 0x1) &&
-               (instr->Bit(23) == 0x0)) {
-      if (instr->Bit(21) == 0x0) {
-        Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+    } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
+      int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
+      if ((opc1_opc2 & 0xb) == 0) {
+        // NeonS32 / NeonU32
+        if (instr->Bit(21) == 0x0) {
+          Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+        } else {
+          Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+        }
       } else {
-        Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+        const char* sign = instr->Bit(23) != 0 ? "u" : "s";
+        int rt = instr->RtValue();
+        int vn = instr->VFPNRegValue(kDoublePrecision);
+        if ((opc1_opc2 & 0x8) != 0) {
+          // NeonS8 / NeonU8
+          int i = opc1_opc2 & 0x7;
+          out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "vmov.%s8 r%d, d%d[%d]", sign, rt, vn, i);
+        } else if ((opc1_opc2 & 0x1) != 0) {
+          // NeonS16 / NeonU16
+          int i = (opc1_opc2 >> 1) & 0x3;
+          out_buffer_pos_ +=
+              SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%s16 r%d, d%d[%d]",
+                       sign, rt, vn, i);
+        } else {
+          Unknown(instr);
+        }
       }
     } else if ((instr->VCValue() == 0x0) &&
                (instr->VAValue() == 0x7) &&
@@ -1563,6 +1616,8 @@
           Format(instr, "vmrs'cond 'rt, FPSCR");
         }
       }
+    } else {
+      Unknown(instr);  // Not used by V8.
     }
   }
 }
@@ -1801,6 +1856,150 @@
 
 void Decoder::DecodeSpecialCondition(Instruction* instr) {
   switch (instr->SpecialValue()) {
+    case 4: {
+      int Vd, Vm, Vn;
+      if (instr->Bit(6) == 0) {
+        Vd = instr->VFPDRegValue(kDoublePrecision);
+        Vm = instr->VFPMRegValue(kDoublePrecision);
+        Vn = instr->VFPNRegValue(kDoublePrecision);
+      } else {
+        Vd = instr->VFPDRegValue(kSimd128Precision);
+        Vm = instr->VFPMRegValue(kSimd128Precision);
+        Vn = instr->VFPNRegValue(kSimd128Precision);
+      }
+      switch (instr->Bits(11, 8)) {
+        case 0x0: {
+          if (instr->Bit(4) == 1) {
+            int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+            // vqadd.s<size> Qd, Qm, Qn.
+            out_buffer_pos_ +=
+                SNPrintF(out_buffer_ + out_buffer_pos_,
+                         "vqadd.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0x1: {
+          if (instr->Bits(21, 20) == 2 && instr->Bit(6) == 1 &&
+              instr->Bit(4) == 1) {
+            if (Vm == Vn) {
+              // vmov Qd, Qm
+              out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                          "vmov q%d, q%d", Vd, Vm);
+            } else {
+              // vorr Qd, Qm, Qn.
+              out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                          "vorr q%d, q%d, q%d", Vd, Vn, Vm);
+            }
+          } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
+                     instr->Bit(4) == 1) {
+            // vand Qd, Qm, Qn.
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vand q%d, q%d, q%d", Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0x2: {
+          if (instr->Bit(4) == 1) {
+            int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+            // vqsub.s<size> Qd, Qm, Qn.
+            out_buffer_pos_ +=
+                SNPrintF(out_buffer_ + out_buffer_pos_,
+                         "vqsub.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0x3: {
+          int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+          const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
+          // vcge/vcgt.s<size> Qd, Qm, Qn.
+          out_buffer_pos_ +=
+              SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
+                       op, size, Vd, Vn, Vm);
+          break;
+        }
+        case 0x6: {
+          int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+          // vmin/vmax.s<size> Qd, Qm, Qn.
+          const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
+          out_buffer_pos_ +=
+              SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
+                       op, size, Vd, Vn, Vm);
+          break;
+        }
+        case 0x8: {
+          const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
+          int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+          // vadd/vtst.i<size> Qd, Qm, Qn.
+          out_buffer_pos_ +=
+              SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d",
+                       op, size, Vd, Vn, Vm);
+          break;
+        }
+        case 0x9: {
+          if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+            int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+            // vmul.i<size> Qd, Qm, Qn.
+            out_buffer_pos_ +=
+                SNPrintF(out_buffer_ + out_buffer_pos_,
+                         "vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0xd: {
+          if (instr->Bit(4) == 0) {
+            const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
+            // vadd/vsub.f32 Qd, Qm, Qn.
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0xe: {
+          if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
+            // vceq.f32 Qd, Qm, Qn.
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0xf: {
+          if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
+            if (instr->Bit(4) == 1) {
+              // vrecps/vrsqrts.f32 Qd, Qm, Qn.
+              const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
+              out_buffer_pos_ +=
+                  SNPrintF(out_buffer_ + out_buffer_pos_,
+                           "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+            } else {
+              // vmin/max.f32 Qd, Qm, Qn.
+              const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
+              out_buffer_pos_ +=
+                  SNPrintF(out_buffer_ + out_buffer_pos_,
+                           "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+            }
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        default:
+          Unknown(instr);
+          break;
+      }
+      break;
+    }
     case 5:
       if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
           (instr->Bit(4) == 1)) {
@@ -1810,11 +2009,152 @@
         int Vm = (instr->Bit(5) << 4) | instr->VmValue();
         int imm3 = instr->Bits(21, 19);
         out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                    "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
+                                    "vmovl.s%d q%d, d%d", imm3 * 8, Vd, Vm);
+      } else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
+        // vext.8 Qd, Qm, Qn, imm4
+        int imm4 = instr->Bits(11, 8);
+        int Vd = instr->VFPDRegValue(kSimd128Precision);
+        int Vm = instr->VFPMRegValue(kSimd128Precision);
+        int Vn = instr->VFPNRegValue(kSimd128Precision);
+        out_buffer_pos_ +=
+            SNPrintF(out_buffer_ + out_buffer_pos_, "vext.8 q%d, q%d, q%d, #%d",
+                     Vd, Vn, Vm, imm4);
+      } else if (instr->Bits(11, 7) == 0xA && instr->Bit(4) == 1) {
+        // vshl.i<size> Qd, Qm, shift
+        int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+        int shift = instr->Bits(21, 16) - size;
+        int Vd = instr->VFPDRegValue(kSimd128Precision);
+        int Vm = instr->VFPMRegValue(kSimd128Precision);
+        out_buffer_pos_ +=
+            SNPrintF(out_buffer_ + out_buffer_pos_, "vshl.i%d q%d, q%d, #%d",
+                     size, Vd, Vm, shift);
+      } else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
+        // vshr.s<size> Qd, Qm, shift
+        int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+        int shift = 2 * size - instr->Bits(21, 16);
+        int Vd = instr->VFPDRegValue(kSimd128Precision);
+        int Vm = instr->VFPMRegValue(kSimd128Precision);
+        out_buffer_pos_ +=
+            SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.s%d q%d, q%d, #%d",
+                     size, Vd, Vm, shift);
       } else {
         Unknown(instr);
       }
       break;
+    case 6: {
+      int Vd, Vm, Vn;
+      if (instr->Bit(6) == 0) {
+        Vd = instr->VFPDRegValue(kDoublePrecision);
+        Vm = instr->VFPMRegValue(kDoublePrecision);
+        Vn = instr->VFPNRegValue(kDoublePrecision);
+      } else {
+        Vd = instr->VFPDRegValue(kSimd128Precision);
+        Vm = instr->VFPMRegValue(kSimd128Precision);
+        Vn = instr->VFPNRegValue(kSimd128Precision);
+      }
+      switch (instr->Bits(11, 8)) {
+        case 0x0: {
+          if (instr->Bit(4) == 1) {
+            int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+            // vqadd.u<size> Qd, Qm, Qn.
+            out_buffer_pos_ +=
+                SNPrintF(out_buffer_ + out_buffer_pos_,
+                         "vqadd.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0x1: {
+          if (instr->Bits(21, 20) == 1 && instr->Bit(4) == 1) {
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vbsl q%d, q%d, q%d", Vd, Vn, Vm);
+          } else if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 1) {
+            if (instr->Bit(6) == 0) {
+              // veor Dd, Dn, Dm
+              out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                          "veor d%d, d%d, d%d", Vd, Vn, Vm);
+
+            } else {
+              // veor Qd, Qn, Qm
+              out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                          "veor q%d, q%d, q%d", Vd, Vn, Vm);
+            }
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0x2: {
+          if (instr->Bit(4) == 1) {
+            int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+            // vqsub.u<size> Qd, Qm, Qn.
+            out_buffer_pos_ +=
+                SNPrintF(out_buffer_ + out_buffer_pos_,
+                         "vqsub.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0x3: {
+          int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+          const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
+          // vcge/vcgt.u<size> Qd, Qm, Qn.
+          out_buffer_pos_ +=
+              SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
+                       op, size, Vd, Vn, Vm);
+          break;
+        }
+        case 0x6: {
+          int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+          // vmin/vmax.u<size> Qd, Qm, Qn.
+          const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
+          out_buffer_pos_ +=
+              SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
+                       op, size, Vd, Vn, Vm);
+          break;
+        }
+        case 0x8: {
+          int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+          if (instr->Bit(4) == 0) {
+            out_buffer_pos_ +=
+                SNPrintF(out_buffer_ + out_buffer_pos_,
+                         "vsub.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+          } else {
+            out_buffer_pos_ +=
+                SNPrintF(out_buffer_ + out_buffer_pos_,
+                         "vceq.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+          }
+          break;
+        }
+        case 0xd: {
+          if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+            // vmul.f32 Qd, Qn, Qm
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        case 0xe: {
+          if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
+            const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
+            // vcge/vcgt.f32 Qd, Qm, Qn.
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+          } else {
+            Unknown(instr);
+          }
+          break;
+        }
+        default:
+          Unknown(instr);
+          break;
+      }
+      break;
+    }
     case 7:
       if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
           (instr->Bit(4) == 1)) {
@@ -1824,14 +2164,119 @@
         int Vm = (instr->Bit(5) << 4) | instr->VmValue();
         int imm3 = instr->Bits(21, 19);
         out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                    "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
-      } else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) &&
-                 (instr->Bit(4) == 0)) {
-        int Vd = instr->VFPDRegValue(kDoublePrecision);
-        int Vm = instr->VFPMRegValue(kDoublePrecision);
-        char rtype = (instr->Bit(6) == 0) ? 'd' : 'q';
-        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                    "vswp %c%d, %c%d", rtype, Vd, rtype, Vm);
+                                    "vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm);
+      } else if (instr->Opc1Value() == 7 && instr->Bits(21, 20) == 0x3 &&
+                 instr->Bit(4) == 0) {
+        if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
+          if (instr->Bit(6) == 0) {
+            int Vd = instr->VFPDRegValue(kDoublePrecision);
+            int Vm = instr->VFPMRegValue(kDoublePrecision);
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vswp d%d, d%d", Vd, Vm);
+          } else {
+            int Vd = instr->VFPDRegValue(kSimd128Precision);
+            int Vm = instr->VFPMRegValue(kSimd128Precision);
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                        "vswp q%d, q%d", Vd, Vm);
+          }
+        } else if (instr->Bits(11, 7) == 0x18) {
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kDoublePrecision);
+          int index = instr->Bit(19);
+          out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "vdup q%d, d%d[%d]", Vd, Vm, index);
+        } else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          out_buffer_pos_ +=
+              SNPrintF(out_buffer_ + out_buffer_pos_, "vmvn q%d, q%d", Vd, Vm);
+        } else if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
+                   instr->Bit(6) == 1) {
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          const char* suffix = nullptr;
+          int op = instr->Bits(8, 7);
+          switch (op) {
+            case 0:
+              suffix = "f32.s32";
+              break;
+            case 1:
+              suffix = "f32.u32";
+              break;
+            case 2:
+              suffix = "s32.f32";
+              break;
+            case 3:
+              suffix = "u32.f32";
+              break;
+          }
+          out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "vcvt.%s q%d, q%d", suffix, Vd, Vm);
+        } else if (instr->Bits(11, 10) == 0x2) {
+          int Vd = instr->VFPDRegValue(kDoublePrecision);
+          int Vn = instr->VFPNRegValue(kDoublePrecision);
+          int Vm = instr->VFPMRegValue(kDoublePrecision);
+          int len = instr->Bits(9, 8);
+          NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
+          out_buffer_pos_ +=
+              SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
+                       instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
+          FormatNeonList(Vn, list.type());
+          Print(", ");
+          PrintDRegister(Vm);
+        } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 6) == 0x7) {
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          int size = kBitsPerByte * (1 << instr->Bits(19, 18));
+          // vzip.<size> Qd, Qm.
+          out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "vzip.%d q%d, q%d", size, Vd, Vm);
+        } else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          int size = kBitsPerByte * (1 << instr->Bits(19, 18));
+          int op = kBitsPerByte
+                   << (static_cast<int>(Neon64) - instr->Bits(8, 7));
+          // vrev<op>.<size> Qd, Qm.
+          out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "vrev%d.%d q%d, q%d", op, size, Vd, Vm);
+        } else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          int size = kBitsPerByte * (1 << instr->Bits(19, 18));
+          const char* type = instr->Bit(10) != 0 ? "f" : "s";
+          if (instr->Bits(9, 6) == 0xd) {
+            // vabs<type>.<size> Qd, Qm.
+            out_buffer_pos_ +=
+                SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%s%d q%d, q%d",
+                         type, size, Vd, Vm);
+          } else if (instr->Bits(9, 6) == 0xf) {
+            // vneg<type>.<size> Qd, Qm.
+            out_buffer_pos_ +=
+                SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%s%d q%d, q%d",
+                         type, size, Vd, Vm);
+          } else {
+            Unknown(instr);
+          }
+        } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
+          // vrecpe/vrsqrte.f32 Qd, Qm.
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
+          out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                      "%s.f32 q%d, q%d", op, Vd, Vm);
+        } else {
+          Unknown(instr);
+        }
+      } else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
+        // vshr.u<size> Qd, Qm, shift
+        int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+        int shift = 2 * size - instr->Bits(21, 16);
+        int Vd = instr->VFPDRegValue(kSimd128Precision);
+        int Vm = instr->VFPMRegValue(kSimd128Precision);
+        out_buffer_pos_ +=
+            SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.u%d q%d, q%d, #%d",
+                     size, Vd, Vm, shift);
       } else {
         Unknown(instr);
       }
@@ -1845,8 +2290,8 @@
         int size = instr->Bits(7, 6);
         int align = instr->Bits(5, 4);
         int Rm = instr->VmValue();
-        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                    "vst1.%d ", (1 << size) << 3);
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vst1.%d ",
+                                    (1 << size) << 3);
         FormatNeonList(Vd, type);
         Print(", ");
         FormatNeonMemory(Rn, align, Rm);
@@ -1858,8 +2303,8 @@
         int size = instr->Bits(7, 6);
         int align = instr->Bits(5, 4);
         int Rm = instr->VmValue();
-        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                    "vld1.%d ", (1 << size) << 3);
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d ",
+                                    (1 << size) << 3);
         FormatNeonList(Vd, type);
         Print(", ");
         FormatNeonMemory(Rn, align, Rm);
@@ -1873,8 +2318,8 @@
         int Rn = instr->Bits(19, 16);
         int offset = instr->Bits(11, 0);
         if (offset == 0) {
-          out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                      "pld [r%d]", Rn);
+          out_buffer_pos_ +=
+              SNPrintF(out_buffer_ + out_buffer_pos_, "pld [r%d]", Rn);
         } else if (instr->Bit(23) == 0) {
           out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                       "pld [r%d, #-%d]", Rn, offset);
@@ -1886,16 +2331,16 @@
         int option = instr->Bits(3, 0);
         switch (instr->Bits(7, 4)) {
           case 4:
-            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                        "dsb %s", barrier_option_names[option]);
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dsb %s",
+                                        barrier_option_names[option]);
             break;
           case 5:
-            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                        "dmb %s", barrier_option_names[option]);
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dmb %s",
+                                        barrier_option_names[option]);
             break;
           case 6:
-            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
-                                        "isb %s", barrier_option_names[option]);
+            out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "isb %s",
+                                        barrier_option_names[option]);
             break;
           default:
             Unknown(instr);
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index 75161af..8281f2a 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -66,37 +66,10 @@
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r2};
+  Register registers[] = {r1, r2, r3};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void FastNewObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r1, r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r1};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r1};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r1};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 // static
 const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
 
@@ -148,15 +121,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r1, r3};
+  Register registers[] = {r1, r0, r3};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r1, r0, r3, r2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -185,6 +156,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // r2 : start index (to support rest parameters)
+  // r1 : the target to call
+  Register registers[] = {r1, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void ConstructStubDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -219,13 +197,12 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
-      CallInterfaceDescriptorData* data) {                          \
-    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
-  }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+  Register registers[] = {r1, r3, r0, r2};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
 
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -436,6 +413,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r1,  // loaded new FP
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index c67fad8..e6323e4 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -264,6 +264,35 @@
   }
 }
 
+void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
+  if (!dst.is(src)) {
+    vmov(dst, src);
+  }
+}
+
+void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
+  if (srcdst0.is(srcdst1)) return;  // Swapping aliased registers emits nothing.
+
+  DCHECK(VfpRegisterIsAvailable(srcdst0));
+  DCHECK(VfpRegisterIsAvailable(srcdst1));
+
+  if (CpuFeatures::IsSupported(NEON)) {
+    vswp(srcdst0, srcdst1);
+  } else {
+    DCHECK(!srcdst0.is(kScratchDoubleReg));
+    DCHECK(!srcdst1.is(kScratchDoubleReg));
+    vmov(kScratchDoubleReg, srcdst0);
+    vmov(srcdst0, srcdst1);
+    vmov(srcdst1, kScratchDoubleReg);
+  }
+}
+
+void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
+  if (!srcdst0.is(srcdst1)) {
+    vswp(srcdst0, srcdst1);
+  }
+}
+
 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
                          Register srcA, Condition cond) {
   if (CpuFeatures::IsSupported(ARMv7)) {
@@ -1052,8 +1081,8 @@
 }
 
 void MacroAssembler::VmovExtended(Register dst, int src_code) {
-  DCHECK_LE(32, src_code);
-  DCHECK_GT(64, src_code);
+  DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code);
+  DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
   if (src_code & 0x1) {
     VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
   } else {
@@ -1062,8 +1091,8 @@
 }
 
 void MacroAssembler::VmovExtended(int dst_code, Register src) {
-  DCHECK_LE(32, dst_code);
-  DCHECK_GT(64, dst_code);
+  DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code);
+  DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
   if (dst_code & 0x1) {
     VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
   } else {
@@ -1073,22 +1102,23 @@
 
 void MacroAssembler::VmovExtended(int dst_code, int src_code,
                                   Register scratch) {
-  if (src_code < 32 && dst_code < 32) {
+  if (src_code < SwVfpRegister::kMaxNumRegisters &&
+      dst_code < SwVfpRegister::kMaxNumRegisters) {
     // src and dst are both s-registers.
     vmov(SwVfpRegister::from_code(dst_code),
          SwVfpRegister::from_code(src_code));
-  } else if (src_code < 32) {
+  } else if (src_code < SwVfpRegister::kMaxNumRegisters) {
     // src is an s-register.
     vmov(scratch, SwVfpRegister::from_code(src_code));
     VmovExtended(dst_code, scratch);
-  } else if (dst_code < 32) {
+  } else if (dst_code < SwVfpRegister::kMaxNumRegisters) {
     // dst is an s-register.
     VmovExtended(scratch, src_code);
     vmov(SwVfpRegister::from_code(dst_code), scratch);
   } else {
     // Neither src or dst are s-registers.
-    DCHECK_GT(64, src_code);
-    DCHECK_GT(64, dst_code);
+    DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
+    DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
     VmovExtended(scratch, src_code);
     VmovExtended(dst_code, scratch);
   }
@@ -1096,7 +1126,7 @@
 
 void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
                                   Register scratch) {
-  if (dst_code >= 32) {
+  if (dst_code >= SwVfpRegister::kMaxNumRegisters) {
     ldr(scratch, src);
     VmovExtended(dst_code, scratch);
   } else {
@@ -1106,7 +1136,7 @@
 
 void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
                                   Register scratch) {
-  if (src_code >= 32) {
+  if (src_code >= SwVfpRegister::kMaxNumRegisters) {
     VmovExtended(scratch, src_code);
     str(scratch, dst);
   } else {
@@ -1114,6 +1144,103 @@
   }
 }
 
+void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
+                                 NeonDataType dt, int lane) {
+  int size = NeonSz(dt);  // 0, 1, 2
+  int byte = lane << size;
+  int double_word = byte >> kDoubleSizeLog2;
+  int double_byte = byte & (kDoubleSize - 1);
+  int double_lane = double_byte >> size;
+  DwVfpRegister double_source =
+      DwVfpRegister::from_code(src.code() * 2 + double_word);
+  vmov(dt, dst, double_source, double_lane);
+}
+
+void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
+                                 Register scratch, int lane) {
+  int s_code = src.code() * 4 + lane;
+  VmovExtended(dst.code(), s_code, scratch);
+}
+
+void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+                                 Register src_lane, NeonDataType dt, int lane) {
+  Move(dst, src);
+  int size = NeonSz(dt);  // 0, 1, 2
+  int byte = lane << size;
+  int double_word = byte >> kDoubleSizeLog2;
+  int double_byte = byte & (kDoubleSize - 1);
+  int double_lane = double_byte >> size;
+  DwVfpRegister double_dst =
+      DwVfpRegister::from_code(dst.code() * 2 + double_word);
+  vmov(dt, double_dst, double_lane, src_lane);
+}
+
+void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+                                 SwVfpRegister src_lane, Register scratch,
+                                 int lane) {
+  Move(dst, src);
+  int s_code = dst.code() * 4 + lane;
+  VmovExtended(s_code, src_lane.code(), scratch);
+}
+
+void MacroAssembler::Swizzle(QwNeonRegister dst, QwNeonRegister src,
+                             Register scratch, NeonSize size, uint32_t lanes) {
+  // TODO(bbudge) Handle Int16x8, Int8x16 vectors.
+  DCHECK_EQ(Neon32, size);
+  DCHECK_IMPLIES(size == Neon32, lanes < 0xFFFFu);
+  if (size == Neon32) {
+    switch (lanes) {
+      // TODO(bbudge) Handle more special cases.
+      case 0x3210:  // Identity.
+        Move(dst, src);
+        return;
+      case 0x1032:  // Swap top and bottom.
+        vext(dst, src, src, 8);
+        return;
+      case 0x2103:  // Rotation.
+        vext(dst, src, src, 12);
+        return;
+      case 0x0321:  // Rotation.
+        vext(dst, src, src, 4);
+        return;
+      case 0x0000:  // Equivalent to vdup.
+      case 0x1111:
+      case 0x2222:
+      case 0x3333: {
+        int lane_code = src.code() * 4 + (lanes & 0xF);
+        if (lane_code >= SwVfpRegister::kMaxNumRegisters) {
+          // TODO(bbudge) use vdup (vdup.32 dst, D<src>[lane]) once implemented.
+          int temp_code = kScratchDoubleReg.code() * 2;
+          VmovExtended(temp_code, lane_code, scratch);
+          lane_code = temp_code;
+        }
+        vdup(dst, SwVfpRegister::from_code(lane_code));
+        return;
+      }
+      case 0x2301:  // Swap lanes 0, 1 and lanes 2, 3.
+        vrev64(Neon32, dst, src);
+        return;
+      default:  // Handle all other cases with vmovs.
+        int src_code = src.code() * 4;
+        int dst_code = dst.code() * 4;
+        bool in_place = src.is(dst);
+        if (in_place) {
+          vmov(kScratchQuadReg, src);
+          src_code = kScratchQuadReg.code() * 4;
+        }
+        for (int i = 0; i < 4; i++) {
+          int lane = (lanes >> (i * 4) & 0xF);
+          VmovExtended(dst_code + i, src_code + lane, scratch);
+        }
+        if (in_place) {
+          // Restore zero reg.
+          veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+        }
+        return;
+    }
+  }
+}
+
 void MacroAssembler::LslPair(Register dst_low, Register dst_high,
                              Register src_low, Register src_high,
                              Register scratch, Register shift) {
@@ -1270,7 +1397,7 @@
 }
 
 void MacroAssembler::StubPrologue(StackFrame::Type type) {
-  mov(ip, Operand(Smi::FromInt(type)));
+  mov(ip, Operand(StackFrame::TypeToMarker(type)));
   PushCommonFrame(ip);
   if (FLAG_enable_embedded_constant_pool) {
     LoadConstantPoolPointerRegister();
@@ -1300,18 +1427,17 @@
   }
 }
 
-
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
   ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
-  ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+  ldr(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+  ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
 }
 
 
 void MacroAssembler::EnterFrame(StackFrame::Type type,
                                 bool load_constant_pool_pointer_reg) {
   // r0-r3: preserved
-  mov(ip, Operand(Smi::FromInt(type)));
+  mov(ip, Operand(StackFrame::TypeToMarker(type)));
   PushCommonFrame(ip);
   if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
     LoadConstantPoolPointerRegister();
@@ -1366,7 +1492,7 @@
   DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
   DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
   DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
-  mov(ip, Operand(Smi::FromInt(frame_type)));
+  mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
   PushCommonFrame(ip);
   // Reserve room for saved entry sp and code object.
   sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1411,21 +1537,6 @@
   str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
 }
 
-
-void MacroAssembler::InitializeNewString(Register string,
-                                         Register length,
-                                         Heap::RootListIndex map_index,
-                                         Register scratch1,
-                                         Register scratch2) {
-  SmiTag(scratch1, length);
-  LoadRoot(scratch2, map_index);
-  str(scratch1, FieldMemOperand(string, String::kLengthOffset));
-  mov(scratch1, Operand(String::kEmptyHashField));
-  str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
-  str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
 int MacroAssembler::ActivationFrameAlignment() {
 #if V8_HOST_ARCH_ARM
   // Running on the real platform. Use the alignment as mandated by the local
@@ -1629,18 +1740,16 @@
   }
 }
 
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
-                                             const ParameterCount& expected,
-                                             const ParameterCount& actual) {
-  Label skip_flooding;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  mov(r4, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual) {
+  Label skip_hook;
+  ExternalReference debug_hook_avtive =
+      ExternalReference::debug_hook_on_function_call_address(isolate());
+  mov(r4, Operand(debug_hook_avtive));
   ldrsb(r4, MemOperand(r4));
-  cmp(r4, Operand(StepIn));
-  b(lt, &skip_flooding);
+  cmp(r4, Operand(0));
+  b(eq, &skip_hook);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1657,7 +1766,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    CallRuntime(Runtime::kDebugOnFunctionCall);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -1671,7 +1780,7 @@
       SmiUntag(expected.reg());
     }
   }
-  bind(&skip_flooding);
+  bind(&skip_hook);
 }
 
 
@@ -1685,8 +1794,8 @@
   DCHECK(function.is(r1));
   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
 
-  if (call_wrapper.NeedsDebugStepCheck()) {
-    FloodFunctionIfStepping(function, new_target, expected, actual);
+  if (call_wrapper.NeedsDebugHookCheck()) {
+    CheckDebugHook(function, new_target, expected, actual);
   }
 
   // Clear the new.target register if not given.
@@ -1795,17 +1904,17 @@
   b(hi, fail);
 }
 
-
-void MacroAssembler::DebugBreak() {
-  mov(r0, Operand::Zero());
-  mov(r1,
-      Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
-  CEntryStub ces(isolate(), 1);
-  DCHECK(AllowThisStubCall(&ces));
-  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+  // Check whether we need to drop frames to restart a function on the stack.
+  ExternalReference restart_fp =
+      ExternalReference::debug_restart_fp_address(isolate());
+  mov(r1, Operand(restart_fp));
+  ldr(r1, MemOperand(r1));
+  tst(r1, r1);
+  Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+       ne);
 }
 
-
 void MacroAssembler::PushStackHandler() {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
@@ -2177,112 +2286,6 @@
   add(result, result, Operand(kHeapObjectTag));
 }
 
-void MacroAssembler::AllocateTwoByteString(Register result,
-                                           Register length,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
-  add(scratch1, scratch1,
-      Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
-  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
-  // Allocate two-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result,
-                      length,
-                      Heap::kStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  DCHECK(kCharSize == 1);
-  add(scratch1, length,
-      Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
-  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
-  // Allocate one-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
-                                               Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result,
-                      length,
-                      Heap::kConsStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result,
-                      length,
-                      Heap::kSlicedStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
 void MacroAssembler::CompareObjectType(Register object,
                                        Register map,
                                        Register type_reg,
@@ -2314,68 +2317,6 @@
   cmp(obj, ip);
 }
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Register scratch,
-                                             Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-  b(ls, fail);
-  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
-  b(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
-                                          Register scratch,
-                                          Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-  b(hi, fail);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
-                                      Register value_reg,
-                                      Register key_reg,
-                                      Register elements_reg,
-                                      Register scratch1,
-                                      LowDwVfpRegister double_scratch,
-                                      Label* fail,
-                                      int elements_offset) {
-  DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
-  Label smi_value, store;
-
-  // Handle smi values specially.
-  JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number
-  CheckMap(value_reg,
-           scratch1,
-           isolate()->factory()->heap_number_map(),
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-  VFPCanonicalizeNaN(double_scratch);
-  b(&store);
-
-  bind(&smi_value);
-  SmiToDouble(double_scratch, value_reg);
-
-  bind(&store);
-  add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
-  vstr(double_scratch,
-       FieldMemOperand(scratch1,
-                       FixedDoubleArray::kHeaderSize - elements_offset));
-}
-
-
 void MacroAssembler::CompareMap(Register obj,
                                 Register scratch,
                                 Handle<Map> map,
@@ -2472,33 +2413,6 @@
   bind(&done);
 }
 
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
-                                             Register scratch, Label* miss) {
-  // Get the prototype or initial map from the function.
-  ldr(result,
-      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // If the prototype or initial map is the hole, don't return it and
-  // simply miss the cache instead. This will allow us to allocate a
-  // prototype object on-demand in the runtime system.
-  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  cmp(result, ip);
-  b(eq, miss);
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  CompareObjectType(result, scratch, scratch, MAP_TYPE);
-  b(ne, &done);
-
-  // Get the prototype from the initial map.
-  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  bind(&done);
-}
-
-
 void MacroAssembler::CallStub(CodeStub* stub,
                               TypeFeedbackId ast_id,
                               Condition cond) {
@@ -2878,28 +2792,6 @@
   }
 }
 
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  DCHECK(IsFastElementsKind(expected_kind));
-  DCHECK(IsFastElementsKind(transitioned_kind));
-
-  // Check that the function's map is the same as the expected cached map.
-  ldr(scratch, NativeContextMemOperand());
-  ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
-  cmp(map_in_out, ip);
-  b(ne, no_map_match);
-
-  // Use the transitioned cached map.
-  ldr(map_in_out,
-      ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
   ldr(dst, NativeContextMemOperand());
   ldr(dst, ContextMemOperand(dst, index));
@@ -2962,15 +2854,6 @@
   b(cc, smi_case);  // Shifter carry is not set for a smi.
 }
 
-
-void MacroAssembler::UntagAndJumpIfNotSmi(
-    Register dst, Register src, Label* non_smi_case) {
-  STATIC_ASSERT(kSmiTag == 0);
-  SmiUntag(dst, src, SetCC);
-  b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
-}
-
-
 void MacroAssembler::JumpIfEitherSmi(Register reg1,
                                      Register reg2,
                                      Label* on_either_smi) {
@@ -3411,19 +3294,6 @@
   b(ne, failure);
 }
 
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
-                                                              Register scratch,
-                                                              Label* failure) {
-  const int kFlatOneByteStringMask =
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatOneByteStringTag =
-      kStringTag | kOneByteStringTag | kSeqStringTag;
-  and_(scratch, type, Operand(kFlatOneByteStringMask));
-  cmp(scratch, Operand(kFlatOneByteStringTag));
-  b(ne, failure);
-}
-
 static const int kRegisterPassedArguments = 4;
 
 
@@ -3861,45 +3731,6 @@
   return no_reg;
 }
 
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
-    Register object,
-    Register scratch0,
-    Register scratch1,
-    Label* found) {
-  DCHECK(!scratch1.is(scratch0));
-  Register current = scratch0;
-  Label loop_again, end;
-
-  // scratch contained elements pointer.
-  mov(current, object);
-  ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
-  ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  CompareRoot(current, Heap::kNullValueRootIndex);
-  b(eq, &end);
-
-  // Loop based on the map going up the prototype chain.
-  bind(&loop_again);
-  ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
-
-  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
-  cmp(scratch1, Operand(JS_OBJECT_TYPE));
-  b(lo, found);
-
-  ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
-  DecodeField<Map::ElementsKindBits>(scratch1);
-  cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
-  b(eq, found);
-  ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  CompareRoot(current, Heap::kNullValueRootIndex);
-  b(ne, &loop_again);
-
-  bind(&end);
-}
-
-
 #ifdef DEBUG
 bool AreAliased(Register reg1,
                 Register reg2,
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 4f0ee82..bcba014 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -184,6 +184,10 @@
   }
   void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
   void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
+  void Move(QwNeonRegister dst, QwNeonRegister src);
+  // Register swap.
+  void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
+  void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
 
   void Load(Register dst, const MemOperand& src, Representation r);
   void Store(Register src, const MemOperand& dst, Representation r);
@@ -557,6 +561,16 @@
   void VmovExtended(int dst_code, const MemOperand& src, Register scratch);
   void VmovExtended(const MemOperand& dst, int src_code, Register scratch);
 
+  void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
+  void ExtractLane(SwVfpRegister dst, QwNeonRegister src, Register scratch,
+                   int lane);
+  void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
+                   NeonDataType dt, int lane);
+  void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+                   SwVfpRegister src_lane, Register scratch, int lane);
+  void Swizzle(QwNeonRegister dst, QwNeonRegister src, Register scratch,
+               NeonSize size, uint32_t lanes);
+
   void LslPair(Register dst_low, Register dst_high, Register src_low,
                Register src_high, Register scratch, Register shift);
   void LslPair(Register dst_low, Register dst_high, Register src_low,
@@ -635,17 +649,6 @@
     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
   }
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the native context if the map in register
-  // map_in_out is the cached Array map in the native context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch,
-      Label* no_map_match);
-
   void LoadNativeContextSlot(int index, Register dst);
 
   // Load the initial map from the global function. The registers
@@ -678,9 +681,10 @@
                           const ParameterCount& actual, InvokeFlag flag,
                           const CallWrapper& call_wrapper);
 
-  void FloodFunctionIfStepping(Register fun, Register new_target,
-                               const ParameterCount& expected,
-                               const ParameterCount& actual);
+  // On function call, call into the debugger if necessary.
+  void CheckDebugHook(Register fun, Register new_target,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
@@ -710,12 +714,9 @@
                         Register scratch,
                         Label* fail);
 
-  // ---------------------------------------------------------------------------
-  // Debugger Support
+  // Frame restart support
+  void MaybeDropFrames();
 
-  void DebugBreak();
-
-  // ---------------------------------------------------------------------------
   // Exception handling
 
   // Push a new stack handler and link into stack handler chain.
@@ -794,32 +795,6 @@
   void FastAllocate(Register object_size, Register result, Register result_end,
                     Register scratch, AllocationFlags flags);
 
-  void AllocateTwoByteString(Register result,
-                             Register length,
-                             Register scratch1,
-                             Register scratch2,
-                             Register scratch3,
-                             Label* gc_required);
-  void AllocateOneByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateTwoByteConsString(Register result,
-                                 Register length,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
-  void AllocateOneByteConsString(Register result, Register length,
-                                 Register scratch1, Register scratch2,
-                                 Label* gc_required);
-  void AllocateTwoByteSlicedString(Register result,
-                                   Register length,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Label* gc_required);
-  void AllocateOneByteSlicedString(Register result, Register length,
-                                   Register scratch1, Register scratch2,
-                                   Label* gc_required);
-
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed. All registers are clobbered also
   // when control continues at the gc_required label.
@@ -856,14 +831,6 @@
   void GetMapConstructor(Register result, Register map, Register temp,
                          Register temp2);
 
-  // Try to get function prototype of a function and puts the value in
-  // the result register. Checks that the function really is a
-  // function and jumps to the miss label if the fast checks fail. The
-  // function register will be untouched; the other registers may be
-  // clobbered.
-  void TryGetFunctionPrototype(Register function, Register result,
-                               Register scratch, Label* miss);
-
   // Compare object type for heap object.  heap_object contains a non-Smi
   // whose object type should be compared with the given type.  This both
   // sets the flags and leaves the object type in the type_reg register.
@@ -884,29 +851,6 @@
                            Register type_reg,
                            InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map,
-                               Register scratch,
-                               Label* fail);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiElements(Register map,
-                            Register scratch,
-                            Label* fail);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements. Otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register value_reg,
-                                   Register key_reg,
-                                   Register elements_reg,
-                                   Register scratch1,
-                                   LowDwVfpRegister double_scratch,
-                                   Label* fail,
-                                   int elements_offset = 0);
-
   // Compare an object's map with the specified map and its transitioned
   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
   // set with result of map compare. If multiple map compares are required, the
@@ -1287,10 +1231,6 @@
   // Souce and destination can be the same register.
   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
 
-  // Untag the source value into destination and jump if source is not a smi.
-  // Souce and destination can be the same register.
-  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
   // Test if the register contains a smi (Z == 0 (eq) if true).
   inline void SmiTst(Register value) {
     tst(value, Operand(kSmiTagMask));
@@ -1380,11 +1320,6 @@
       Register first_object_instance_type, Register second_object_instance_type,
       Register scratch1, Register scratch2, Label* failure);
 
-  // Check if instance type is sequential one-byte string and jump to label if
-  // it is not.
-  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
-                                                Label* failure);
-
   void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string,
@@ -1439,7 +1374,7 @@
   }
 
   // Load the type feedback vector from a JavaScript frame.
-  void EmitLoadTypeFeedbackVector(Register vector);
+  void EmitLoadFeedbackVector(Register vector);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type,
@@ -1464,20 +1399,6 @@
                                        Register scratch_reg,
                                        Label* no_memento_found);
 
-  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
-                                         Register scratch_reg,
-                                         Label* memento_found) {
-    Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
-                                    &no_memento_found);
-    b(eq, memento_found);
-    bind(&no_memento_found);
-  }
-
-  // Jumps to found label if a prototype map has dictionary elements.
-  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
-                                        Register scratch1, Label* found);
-
   // Loads the constant pool pointer (pp) register.
   void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
       Register code_target_address);
@@ -1498,12 +1419,6 @@
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  void InitializeNewString(Register string,
-                           Register length,
-                           Heap::RootListIndex map_index,
-                           Register scratch1,
-                           Register scratch2);
-
   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
   void InNewSpace(Register object,
                   Register scratch,
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 331a7e9..3a3a902 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -22,6 +22,10 @@
 namespace v8 {
 namespace internal {
 
+// static
+base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
+    LAZY_INSTANCE_INITIALIZER;
+
 // This macro provides a platform independent use of sscanf. The reason for
 // SScanF not being implemented in a platform independent way through
 // ::v8::internal::OS in the same way as SNPrintF is that the
@@ -569,7 +573,6 @@
   return start_page == end_page;
 }
 
-
 void Simulator::set_last_debugger_input(char* input) {
   DeleteArray(last_debugger_input_);
   last_debugger_input_ = input;
@@ -710,9 +713,10 @@
   last_debugger_input_ = NULL;
 }
 
-
-Simulator::~Simulator() { free(stack_); }
-
+Simulator::~Simulator() {
+  global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
+  free(stack_);
+}
 
 // When the generated code calls an external reference we need to catch that in
 // the simulator.  The external reference will be a function compiled for the
@@ -895,28 +899,16 @@
   memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
 }
 
-
-void Simulator::get_q_register(int qreg, uint64_t* value) {
+template <typename T>
+void Simulator::get_q_register(int qreg, T* value) {
   DCHECK((qreg >= 0) && (qreg < num_q_registers));
-  memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
+  memcpy(value, vfp_registers_ + qreg * 4, kSimd128Size);
 }
 
-
-void Simulator::set_q_register(int qreg, const uint64_t* value) {
+template <typename T>
+void Simulator::set_q_register(int qreg, const T* value) {
   DCHECK((qreg >= 0) && (qreg < num_q_registers));
-  memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
-}
-
-
-void Simulator::get_q_register(int qreg, uint32_t* value) {
-  DCHECK((qreg >= 0) && (qreg < num_q_registers));
-  memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
-}
-
-
-void Simulator::set_q_register(int qreg, const uint32_t* value) {
-  DCHECK((qreg >= 0) && (qreg < num_q_registers));
-  memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
+  memcpy(vfp_registers_ + qreg * 4, value, kSimd128Size);
 }
 
 
@@ -1052,78 +1044,166 @@
 int Simulator::ReadW(int32_t addr, Instruction* instr) {
   // All supported ARM targets allow unaligned accesses, so we don't need to
   // check the alignment here.
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyLoad(addr);
   intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
   return *ptr;
 }
 
+int Simulator::ReadExW(int32_t addr, Instruction* instr) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
+  global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+                                                   &global_monitor_processor_);
+  intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+  return *ptr;
+}
 
 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
   // All supported ARM targets allow unaligned accesses, so we don't need to
   // check the alignment here.
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyStore(addr);
+  global_monitor_.Pointer()->NotifyStore_Locked(addr,
+                                                &global_monitor_processor_);
   intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
   *ptr = value;
 }
 
+int Simulator::WriteExW(int32_t addr, int value, Instruction* instr) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
+      global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+          addr, &global_monitor_processor_)) {
+    intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+    *ptr = value;
+    return 0;
+  } else {
+    return 1;
+  }
+}
 
 uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
   // All supported ARM targets allow unaligned accesses, so we don't need to
   // check the alignment here.
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyLoad(addr);
   uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
   return *ptr;
 }
 
-
 int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
   // All supported ARM targets allow unaligned accesses, so we don't need to
   // check the alignment here.
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyLoad(addr);
   int16_t* ptr = reinterpret_cast<int16_t*>(addr);
   return *ptr;
 }
 
+uint16_t Simulator::ReadExHU(int32_t addr, Instruction* instr) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
+  global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+                                                   &global_monitor_processor_);
+  uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+  return *ptr;
+}
 
 void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
   // All supported ARM targets allow unaligned accesses, so we don't need to
   // check the alignment here.
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyStore(addr);
+  global_monitor_.Pointer()->NotifyStore_Locked(addr,
+                                                &global_monitor_processor_);
   uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
   *ptr = value;
 }
 
-
 void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
   // All supported ARM targets allow unaligned accesses, so we don't need to
   // check the alignment here.
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyStore(addr);
+  global_monitor_.Pointer()->NotifyStore_Locked(addr,
+                                                &global_monitor_processor_);
   int16_t* ptr = reinterpret_cast<int16_t*>(addr);
   *ptr = value;
 }
 
+int Simulator::WriteExH(int32_t addr, uint16_t value, Instruction* instr) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
+      global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+          addr, &global_monitor_processor_)) {
+    uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+    *ptr = value;
+    return 0;
+  } else {
+    return 1;
+  }
+}
 
 uint8_t Simulator::ReadBU(int32_t addr) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyLoad(addr);
   uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
   return *ptr;
 }
 
-
 int8_t Simulator::ReadB(int32_t addr) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyLoad(addr);
   int8_t* ptr = reinterpret_cast<int8_t*>(addr);
   return *ptr;
 }
 
+uint8_t Simulator::ReadExBU(int32_t addr) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
+  global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
+                                                   &global_monitor_processor_);
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  return *ptr;
+}
 
 void Simulator::WriteB(int32_t addr, uint8_t value) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyStore(addr);
+  global_monitor_.Pointer()->NotifyStore_Locked(addr,
+                                                &global_monitor_processor_);
   uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
   *ptr = value;
 }
 
-
 void Simulator::WriteB(int32_t addr, int8_t value) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyStore(addr);
+  global_monitor_.Pointer()->NotifyStore_Locked(addr,
+                                                &global_monitor_processor_);
   int8_t* ptr = reinterpret_cast<int8_t*>(addr);
   *ptr = value;
 }
 
+int Simulator::WriteExB(int32_t addr, uint8_t value) {
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
+      global_monitor_.Pointer()->NotifyStoreExcl_Locked(
+          addr, &global_monitor_processor_)) {
+    uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+    *ptr = value;
+    return 0;
+  } else {
+    return 1;
+  }
+}
 
 int32_t* Simulator::ReadDW(int32_t addr) {
   // All supported ARM targets allow unaligned accesses, so we don't need to
   // check the alignment here.
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyLoad(addr);
   int32_t* ptr = reinterpret_cast<int32_t*>(addr);
   return ptr;
 }
@@ -1132,6 +1212,10 @@
 void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
   // All supported ARM targets allow unaligned accesses, so we don't need to
   // check the alignment here.
+  base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
+  local_monitor_.NotifyStore(addr);
+  global_monitor_.Pointer()->NotifyStore_Locked(addr,
+                                                &global_monitor_processor_);
   int32_t* ptr = reinterpret_cast<int32_t*>(addr);
   *ptr++ = value1;
   *ptr = value2;
@@ -2085,7 +2169,72 @@
           }
         }
       } else {
-        UNIMPLEMENTED();  // Not used by V8.
+        if (instr->Bits(24, 23) == 3) {
+          if (instr->Bit(20) == 1) {
+            // ldrex
+            int rt = instr->RtValue();
+            int rn = instr->RnValue();
+            int32_t addr = get_register(rn);
+            switch (instr->Bits(22, 21)) {
+              case 0: {
+                // Format(instr, "ldrex'cond 'rt, ['rn]");
+                int value = ReadExW(addr, instr);
+                set_register(rt, value);
+                break;
+              }
+              case 2: {
+                // Format(instr, "ldrexb'cond 'rt, ['rn]");
+                uint8_t value = ReadExBU(addr);
+                set_register(rt, value);
+                break;
+              }
+              case 3: {
+                // Format(instr, "ldrexh'cond 'rt, ['rn]");
+                uint16_t value = ReadExHU(addr, instr);
+                set_register(rt, value);
+                break;
+              }
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            // The instruction is documented as strex rd, rt, [rn], but the
+            // "rt" register is using the rm bits.
+            int rd = instr->RdValue();
+            int rt = instr->RmValue();
+            int rn = instr->RnValue();
+            int32_t addr = get_register(rn);
+            switch (instr->Bits(22, 21)) {
+              case 0: {
+                // Format(instr, "strex'cond 'rd, 'rm, ['rn]");
+                int value = get_register(rt);
+                int status = WriteExW(addr, value, instr);
+                set_register(rd, status);
+                break;
+              }
+              case 2: {
+                // Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
+                uint8_t value = get_register(rt);
+                int status = WriteExB(addr, value);
+                set_register(rd, status);
+                break;
+              }
+              case 3: {
+                // Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
+                uint16_t value = get_register(rt);
+                int status = WriteExH(addr, value, instr);
+                set_register(rd, status);
+                break;
+              }
+              default:
+                UNREACHABLE();
+                break;
+            }
+          }
+        } else {
+          UNIMPLEMENTED();  // Not used by V8.
+        }
       }
     } else {
       // extra load/store instructions
@@ -3067,6 +3216,7 @@
 // Dd = vsqrt(Dm)
 // Sd = vsqrt(Sm)
 // vmrs
+// vdup.size Qd, Rt.
 void Simulator::DecodeTypeVFP(Instruction* instr) {
   DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
   DCHECK(instr->Bits(11, 9) == 0x5);
@@ -3277,24 +3427,117 @@
     if ((instr->VCValue() == 0x0) &&
         (instr->VAValue() == 0x0)) {
       DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
-    } else if ((instr->VLValue() == 0x0) &&
-               (instr->VCValue() == 0x1) &&
-               (instr->Bit(23) == 0x0)) {
-      // vmov (ARM core register to scalar)
-      int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
-      uint32_t data[2];
-      get_d_register(vd, data);
-      data[instr->Bit(21)] = get_register(instr->RtValue());
-      set_d_register(vd, data);
-    } else if ((instr->VLValue() == 0x1) &&
-               (instr->VCValue() == 0x1) &&
-               (instr->Bit(23) == 0x0)) {
+    } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
+      if (instr->Bit(23) == 0) {
+        // vmov (ARM core register to scalar)
+        int vd = instr->VFPNRegValue(kDoublePrecision);
+        int rt = instr->RtValue();
+        int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
+        if ((opc1_opc2 & 0xb) == 0) {
+          // NeonS32/NeonU32
+          uint32_t data[2];
+          get_d_register(vd, data);
+          data[instr->Bit(21)] = get_register(rt);
+          set_d_register(vd, data);
+        } else {
+          uint64_t data;
+          get_d_register(vd, &data);
+          uint64_t rt_value = get_register(rt);
+          if ((opc1_opc2 & 0x8) != 0) {
+            // NeonS8 / NeonU8
+            int i = opc1_opc2 & 0x7;
+            int shift = i * kBitsPerByte;
+            const uint64_t mask = 0xFF;
+            data &= ~(mask << shift);
+            data |= (rt_value & mask) << shift;
+            set_d_register(vd, &data);
+          } else if ((opc1_opc2 & 0x1) != 0) {
+            // NeonS16 / NeonU16
+            int i = (opc1_opc2 >> 1) & 0x3;
+            int shift = i * kBitsPerByte * kShortSize;
+            const uint64_t mask = 0xFFFF;
+            data &= ~(mask << shift);
+            data |= (rt_value & mask) << shift;
+            set_d_register(vd, &data);
+          } else {
+            UNREACHABLE();  // Not used by V8.
+          }
+        }
+      } else {
+        // vdup.size Qd, Rt.
+        NeonSize size = Neon32;
+        if (instr->Bit(5) != 0)
+          size = Neon16;
+        else if (instr->Bit(22) != 0)
+          size = Neon8;
+        int vd = instr->VFPNRegValue(kSimd128Precision);
+        int rt = instr->RtValue();
+        uint32_t rt_value = get_register(rt);
+        uint32_t q_data[4];
+        switch (size) {
+          case Neon8: {
+            rt_value &= 0xFF;
+            uint8_t* dst = reinterpret_cast<uint8_t*>(q_data);
+            for (int i = 0; i < 16; i++) {
+              dst[i] = rt_value;
+            }
+            break;
+          }
+          case Neon16: {
+            // Perform pairwise op.
+            rt_value &= 0xFFFFu;
+            uint32_t rt_rt = (rt_value << 16) | (rt_value & 0xFFFFu);
+            for (int i = 0; i < 4; i++) {
+              q_data[i] = rt_rt;
+            }
+            break;
+          }
+          case Neon32: {
+            for (int i = 0; i < 4; i++) {
+              q_data[i] = rt_value;
+            }
+            break;
+          }
+          default:
+            UNREACHABLE();
+            break;
+        }
+        set_q_register(vd, q_data);
+      }
+    } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
       // vmov (scalar to ARM core register)
-      int vn = instr->Bits(19, 16) | (instr->Bit(7) << 4);
-      double dn_value = get_double_from_d_register(vn);
-      int32_t data[2];
-      memcpy(data, &dn_value, 8);
-      set_register(instr->RtValue(), data[instr->Bit(21)]);
+      int vn = instr->VFPNRegValue(kDoublePrecision);
+      int rt = instr->RtValue();
+      int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
+      uint64_t data;
+      get_d_register(vn, &data);
+      if ((opc1_opc2 & 0xb) == 0) {
+        // NeonS32 / NeonU32
+        int32_t int_data[2];
+        memcpy(int_data, &data, sizeof(int_data));
+        set_register(rt, int_data[instr->Bit(21)]);
+      } else {
+        uint64_t data;
+        get_d_register(vn, &data);
+        bool u = instr->Bit(23) != 0;
+        if ((opc1_opc2 & 0x8) != 0) {
+          // NeonS8 / NeonU8
+          int i = opc1_opc2 & 0x7;
+          int shift = i * kBitsPerByte;
+          uint32_t scalar = (data >> shift) & 0xFFu;
+          if (!u && (scalar & 0x80) != 0) scalar |= 0xffffff00;
+          set_register(rt, scalar);
+        } else if ((opc1_opc2 & 0x1) != 0) {
+          // NeonS16 / NeonU16
+          int i = (opc1_opc2 >> 1) & 0x3;
+          int shift = i * kBitsPerByte * kShortSize;
+          uint32_t scalar = (data >> shift) & 0xFFFFu;
+          if (!u && (scalar & 0x8000) != 0) scalar |= 0xffff0000;
+          set_register(rt, scalar);
+        } else {
+          UNREACHABLE();  // Not used by V8.
+        }
+      }
     } else if ((instr->VLValue() == 0x1) &&
                (instr->VCValue() == 0x0) &&
                (instr->VAValue() == 0x7) &&
@@ -3520,6 +3763,48 @@
   }
 }
 
+int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer,
+                                      VFPRoundingMode mode) {
+  int32_t result =
+      unsigned_integer ? static_cast<uint32_t>(val) : static_cast<int32_t>(val);
+
+  inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
+
+  double abs_diff = unsigned_integer
+                        ? std::fabs(val - static_cast<uint32_t>(result))
+                        : std::fabs(val - result);
+
+  inexact_vfp_flag_ = (abs_diff != 0);
+
+  if (inv_op_vfp_flag_) {
+    result = VFPConversionSaturate(val, unsigned_integer);
+  } else {
+    switch (mode) {
+      case RN: {
+        int val_sign = (val > 0) ? 1 : -1;
+        if (abs_diff > 0.5) {
+          result += val_sign;
+        } else if (abs_diff == 0.5) {
+          // Round to even if exactly halfway.
+          result = ((result % 2) == 0) ? result : result + val_sign;
+        }
+        break;
+      }
+
+      case RM:
+        result = result > val ? result - 1 : result;
+        break;
+
+      case RZ:
+        // Nothing to do.
+        break;
+
+      default:
+        UNREACHABLE();
+    }
+  }
+  return result;
+}
 
 void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
   DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
@@ -3556,44 +3841,7 @@
     double val = double_precision ? get_double_from_d_register(src)
                                   : get_float_from_s_register(src);
 
-    int temp = unsigned_integer ? static_cast<uint32_t>(val)
-                                : static_cast<int32_t>(val);
-
-    inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
-
-    double abs_diff =
-      unsigned_integer ? std::fabs(val - static_cast<uint32_t>(temp))
-                       : std::fabs(val - temp);
-
-    inexact_vfp_flag_ = (abs_diff != 0);
-
-    if (inv_op_vfp_flag_) {
-      temp = VFPConversionSaturate(val, unsigned_integer);
-    } else {
-      switch (mode) {
-        case RN: {
-          int val_sign = (val > 0) ? 1 : -1;
-          if (abs_diff > 0.5) {
-            temp += val_sign;
-          } else if (abs_diff == 0.5) {
-            // Round to even if exactly halfway.
-            temp = ((temp % 2) == 0) ? temp : temp + val_sign;
-          }
-          break;
-        }
-
-        case RM:
-          temp = temp > val ? temp - 1 : temp;
-          break;
-
-        case RZ:
-          // Nothing to do.
-          break;
-
-        default:
-          UNREACHABLE();
-      }
-    }
+    int32_t temp = ConvertDoubleToInt(val, unsigned_integer, mode);
 
     // Update the destination register.
     set_s_register_from_sinteger(dst, temp);
@@ -3740,9 +3988,439 @@
   }
 }
 
+// Templated operations for NEON instructions.
+// TODO(bbudge) Add more templates for use in DecodeSpecialCondition.
+template <typename T>
+int64_t Widen(T value) {
+  static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
+  return static_cast<int64_t>(value);
+}
+
+template <typename T>
+T Clamp(int64_t value) {
+  static_assert(sizeof(int64_t) > sizeof(T), "T must be int32_t or smaller");
+  int64_t min = static_cast<int64_t>(std::numeric_limits<T>::min());
+  int64_t max = static_cast<int64_t>(std::numeric_limits<T>::max());
+  int64_t clamped = std::max(min, std::min(max, value));
+  return static_cast<T>(clamped);
+}
+
+template <typename T>
+void AddSaturate(Simulator* simulator, int Vd, int Vm, int Vn) {
+  static const int kLanes = 16 / sizeof(T);
+  T src1[kLanes], src2[kLanes];
+  simulator->get_q_register(Vn, src1);
+  simulator->get_q_register(Vm, src2);
+  for (int i = 0; i < kLanes; i++) {
+    src1[i] = Clamp<T>(Widen(src1[i]) + Widen(src2[i]));
+  }
+  simulator->set_q_register(Vd, src1);
+}
+
+template <typename T>
+void SubSaturate(Simulator* simulator, int Vd, int Vm, int Vn) {
+  static const int kLanes = 16 / sizeof(T);
+  T src1[kLanes], src2[kLanes];
+  simulator->get_q_register(Vn, src1);
+  simulator->get_q_register(Vm, src2);
+  for (int i = 0; i < kLanes; i++) {
+    src1[i] = Clamp<T>(Widen(src1[i]) - Widen(src2[i]));
+  }
+  simulator->set_q_register(Vd, src1);
+}
 
 void Simulator::DecodeSpecialCondition(Instruction* instr) {
   switch (instr->SpecialValue()) {
+    case 4: {
+      int Vd, Vm, Vn;
+      if (instr->Bit(6) == 0) {
+        Vd = instr->VFPDRegValue(kDoublePrecision);
+        Vm = instr->VFPMRegValue(kDoublePrecision);
+        Vn = instr->VFPNRegValue(kDoublePrecision);
+      } else {
+        Vd = instr->VFPDRegValue(kSimd128Precision);
+        Vm = instr->VFPMRegValue(kSimd128Precision);
+        Vn = instr->VFPNRegValue(kSimd128Precision);
+      }
+      switch (instr->Bits(11, 8)) {
+        case 0x0: {
+          if (instr->Bit(4) == 1) {
+            // vqadd.s<size> Qd, Qm, Qn.
+            NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+            switch (size) {
+              case Neon8:
+                AddSaturate<int8_t>(this, Vd, Vm, Vn);
+                break;
+              case Neon16:
+                AddSaturate<int16_t>(this, Vd, Vm, Vn);
+                break;
+              case Neon32:
+                AddSaturate<int32_t>(this, Vd, Vm, Vn);
+                break;
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0x1: {
+          if (instr->Bits(21, 20) == 2 && instr->Bit(6) == 1 &&
+              instr->Bit(4) == 1) {
+            // vmov Qd, Qm.
+            // vorr, Qd, Qm, Qn.
+            uint32_t src1[4];
+            get_q_register(Vm, src1);
+            if (Vm != Vn) {
+              uint32_t src2[4];
+              get_q_register(Vn, src2);
+              for (int i = 0; i < 4; i++) {
+                src1[i] = src1[i] | src2[i];
+              }
+            }
+            set_q_register(Vd, src1);
+          } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
+                     instr->Bit(4) == 1) {
+            // vand Qd, Qm, Qn.
+            uint32_t src1[4], src2[4];
+            get_q_register(Vn, src1);
+            get_q_register(Vm, src2);
+            for (int i = 0; i < 4; i++) {
+              src1[i] = src1[i] & src2[i];
+            }
+            set_q_register(Vd, src1);
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0x2: {
+          if (instr->Bit(4) == 1) {
+            // vqsub.s<size> Qd, Qm, Qn.
+            NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+            switch (size) {
+              case Neon8:
+                SubSaturate<int8_t>(this, Vd, Vm, Vn);
+                break;
+              case Neon16:
+                SubSaturate<int16_t>(this, Vd, Vm, Vn);
+                break;
+              case Neon32:
+                SubSaturate<int32_t>(this, Vd, Vm, Vn);
+                break;
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0x3: {
+          // vcge/vcgt.s<size> Qd, Qm, Qn.
+          bool ge = instr->Bit(4) == 1;
+          NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+          switch (size) {
+            case Neon8: {
+              int8_t src1[16], src2[16];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 16; i++) {
+                if (ge)
+                  src1[i] = src1[i] >= src2[i] ? 0xFF : 0;
+                else
+                  src1[i] = src1[i] > src2[i] ? 0xFF : 0;
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            case Neon16: {
+              int16_t src1[8], src2[8];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 8; i++) {
+                if (ge)
+                  src1[i] = src1[i] >= src2[i] ? 0xFFFF : 0;
+                else
+                  src1[i] = src1[i] > src2[i] ? 0xFFFF : 0;
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            case Neon32: {
+              int32_t src1[4], src2[4];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 4; i++) {
+                if (ge)
+                  src1[i] = src1[i] >= src2[i] ? 0xFFFFFFFF : 0;
+                else
+                  src1[i] = src1[i] > src2[i] ? 0xFFFFFFFF : 0;
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            default:
+              UNREACHABLE();
+              break;
+          }
+          break;
+        }
+        case 0x6: {
+          // vmin/vmax.s<size> Qd, Qm, Qn.
+          NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+          bool min = instr->Bit(4) != 0;
+          switch (size) {
+            case Neon8: {
+              int8_t src1[16], src2[16];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 16; i++) {
+                if (min)
+                  src1[i] = std::min(src1[i], src2[i]);
+                else
+                  src1[i] = std::max(src1[i], src2[i]);
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            case Neon16: {
+              int16_t src1[8], src2[8];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 8; i++) {
+                if (min)
+                  src1[i] = std::min(src1[i], src2[i]);
+                else
+                  src1[i] = std::max(src1[i], src2[i]);
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            case Neon32: {
+              int32_t src1[4], src2[4];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 4; i++) {
+                if (min)
+                  src1[i] = std::min(src1[i], src2[i]);
+                else
+                  src1[i] = std::max(src1[i], src2[i]);
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            default:
+              UNREACHABLE();
+              break;
+          }
+          break;
+        }
+        case 0x8: {
+          // vadd/vtst
+          NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+          if (instr->Bit(4) == 0) {
+            // vadd.i<size> Qd, Qm, Qn.
+            switch (size) {
+              case Neon8: {
+                uint8_t src1[16], src2[16];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 16; i++) {
+                  src1[i] += src2[i];
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon16: {
+                uint16_t src1[8], src2[8];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 8; i++) {
+                  src1[i] += src2[i];
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon32: {
+                uint32_t src1[4], src2[4];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 4; i++) {
+                  src1[i] += src2[i];
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            // vtst.i<size> Qd, Qm, Qn.
+            switch (size) {
+              case Neon8: {
+                uint8_t src1[16], src2[16];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 16; i++) {
+                  src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFu : 0;
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon16: {
+                uint16_t src1[8], src2[8];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 8; i++) {
+                  src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFFFu : 0;
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon32: {
+                uint32_t src1[4], src2[4];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 4; i++) {
+                  src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFFFFFFFu : 0;
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              default:
+                UNREACHABLE();
+                break;
+            }
+          }
+          break;
+        }
+        case 0x9: {
+          if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+            // vmul.i<size> Qd, Qm, Qn.
+            NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+            switch (size) {
+              case Neon8: {
+                uint8_t src1[16], src2[16];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 16; i++) {
+                  src1[i] *= src2[i];
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon16: {
+                uint16_t src1[8], src2[8];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 8; i++) {
+                  src1[i] *= src2[i];
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon32: {
+                uint32_t src1[4], src2[4];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 4; i++) {
+                  src1[i] *= src2[i];
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0xd: {
+          if (instr->Bit(4) == 0) {
+            float src1[4], src2[4];
+            get_q_register(Vn, src1);
+            get_q_register(Vm, src2);
+            for (int i = 0; i < 4; i++) {
+              if (instr->Bit(21) == 0) {
+                // vadd.f32 Qd, Qm, Qn.
+                src1[i] = src1[i] + src2[i];
+              } else {
+                // vsub.f32 Qd, Qm, Qn.
+                src1[i] = src1[i] - src2[i];
+              }
+            }
+            set_q_register(Vd, src1);
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0xe: {
+          if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
+            // vceq.f32.
+            float src1[4], src2[4];
+            get_q_register(Vn, src1);
+            get_q_register(Vm, src2);
+            uint32_t dst[4];
+            for (int i = 0; i < 4; i++) {
+              dst[i] = (src1[i] == src2[i]) ? 0xFFFFFFFF : 0;
+            }
+            set_q_register(Vd, dst);
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0xf: {
+          if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
+            float src1[4], src2[4];
+            get_q_register(Vn, src1);
+            get_q_register(Vm, src2);
+            if (instr->Bit(4) == 1) {
+              if (instr->Bit(21) == 0) {
+                // vrecps.f32 Qd, Qm, Qn.
+                for (int i = 0; i < 4; i++) {
+                  src1[i] = 2.0f - src1[i] * src2[i];
+                }
+              } else {
+                // vrsqrts.f32 Qd, Qm, Qn.
+                for (int i = 0; i < 4; i++) {
+                  src1[i] = (3.0f - src1[i] * src2[i]) * 0.5f;
+                }
+              }
+            } else {
+              if (instr->Bit(21) == 1) {
+                // vmin.f32 Qd, Qm, Qn.
+                for (int i = 0; i < 4; i++) {
+                  src1[i] = std::min(src1[i], src2[i]);
+                }
+              } else {
+                // vmax.f32 Qd, Qm, Qn.
+                for (int i = 0; i < 4; i++) {
+                  src1[i] = std::max(src1[i], src2[i]);
+                }
+              }
+            }
+            set_q_register(Vd, src1);
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        default:
+          UNIMPLEMENTED();
+          break;
+      }
+      break;
+    }
     case 5:
       if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
           (instr->Bit(4) == 1)) {
@@ -3763,10 +4441,419 @@
           e++;
         }
         set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+      } else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
+        // vext.
+        int imm4 = instr->Bits(11, 8);
+        int Vd = instr->VFPDRegValue(kSimd128Precision);
+        int Vm = instr->VFPMRegValue(kSimd128Precision);
+        int Vn = instr->VFPNRegValue(kSimd128Precision);
+        uint8_t src1[16], src2[16], dst[16];
+        get_q_register(Vn, src1);
+        get_q_register(Vm, src2);
+        int boundary = kSimd128Size - imm4;
+        int i = 0;
+        for (; i < boundary; i++) {
+          dst[i] = src1[i + imm4];
+        }
+        for (; i < 16; i++) {
+          dst[i] = src2[i - boundary];
+        }
+        set_q_register(Vd, dst);
+      } else if (instr->Bits(11, 7) == 0xA && instr->Bit(4) == 1) {
+        // vshl.i<size> Qd, Qm, shift
+        int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+        int shift = instr->Bits(21, 16) - size;
+        int Vd = instr->VFPDRegValue(kSimd128Precision);
+        int Vm = instr->VFPMRegValue(kSimd128Precision);
+        NeonSize ns = static_cast<NeonSize>(size / 16);
+        switch (ns) {
+          case Neon8: {
+            uint8_t src[16];
+            get_q_register(Vm, src);
+            for (int i = 0; i < 16; i++) {
+              src[i] <<= shift;
+            }
+            set_q_register(Vd, src);
+            break;
+          }
+          case Neon16: {
+            uint16_t src[8];
+            get_q_register(Vm, src);
+            for (int i = 0; i < 8; i++) {
+              src[i] <<= shift;
+            }
+            set_q_register(Vd, src);
+            break;
+          }
+          case Neon32: {
+            uint32_t src[4];
+            get_q_register(Vm, src);
+            for (int i = 0; i < 4; i++) {
+              src[i] <<= shift;
+            }
+            set_q_register(Vd, src);
+            break;
+          }
+          default:
+            UNREACHABLE();
+            break;
+        }
+      } else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
+        // vshr.s<size> Qd, Qm, shift
+        int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+        int shift = 2 * size - instr->Bits(21, 16);
+        int Vd = instr->VFPDRegValue(kSimd128Precision);
+        int Vm = instr->VFPMRegValue(kSimd128Precision);
+        NeonSize ns = static_cast<NeonSize>(size / 16);
+        switch (ns) {
+          case Neon8: {
+            int8_t src[16];
+            get_q_register(Vm, src);
+            for (int i = 0; i < 16; i++) {
+              src[i] = ArithmeticShiftRight(src[i], shift);
+            }
+            set_q_register(Vd, src);
+            break;
+          }
+          case Neon16: {
+            int16_t src[8];
+            get_q_register(Vm, src);
+            for (int i = 0; i < 8; i++) {
+              src[i] = ArithmeticShiftRight(src[i], shift);
+            }
+            set_q_register(Vd, src);
+            break;
+          }
+          case Neon32: {
+            int32_t src[4];
+            get_q_register(Vm, src);
+            for (int i = 0; i < 4; i++) {
+              src[i] = ArithmeticShiftRight(src[i], shift);
+            }
+            set_q_register(Vd, src);
+            break;
+          }
+          default:
+            UNREACHABLE();
+            break;
+        }
       } else {
         UNIMPLEMENTED();
       }
       break;
+    case 6: {
+      int Vd, Vm, Vn;
+      if (instr->Bit(6) == 0) {
+        Vd = instr->VFPDRegValue(kDoublePrecision);
+        Vm = instr->VFPMRegValue(kDoublePrecision);
+        Vn = instr->VFPNRegValue(kDoublePrecision);
+      } else {
+        Vd = instr->VFPDRegValue(kSimd128Precision);
+        Vm = instr->VFPMRegValue(kSimd128Precision);
+        Vn = instr->VFPNRegValue(kSimd128Precision);
+      }
+      switch (instr->Bits(11, 8)) {
+        case 0x0: {
+          if (instr->Bit(4) == 1) {
+            // vqadd.u<size> Qd, Qm, Qn.
+            NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+            switch (size) {
+              case Neon8:
+                AddSaturate<uint8_t>(this, Vd, Vm, Vn);
+                break;
+              case Neon16:
+                AddSaturate<uint16_t>(this, Vd, Vm, Vn);
+                break;
+              case Neon32:
+                AddSaturate<uint32_t>(this, Vd, Vm, Vn);
+                break;
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0x1: {
+          if (instr->Bits(21, 20) == 1 && instr->Bit(4) == 1) {
+            // vbsl.size Qd, Qm, Qn.
+            uint32_t dst[4], src1[4], src2[4];
+            get_q_register(Vd, dst);
+            get_q_register(Vn, src1);
+            get_q_register(Vm, src2);
+            for (int i = 0; i < 4; i++) {
+              dst[i] = (dst[i] & src1[i]) | (~dst[i] & src2[i]);
+            }
+            set_q_register(Vd, dst);
+          } else if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 1) {
+            if (instr->Bit(6) == 0) {
+              // veor Dd, Dn, Dm
+              uint64_t src1, src2;
+              get_d_register(Vn, &src1);
+              get_d_register(Vm, &src2);
+              src1 ^= src2;
+              set_d_register(Vd, &src1);
+
+            } else {
+              // veor Qd, Qn, Qm
+              uint32_t src1[4], src2[4];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 4; i++) src1[i] ^= src2[i];
+              set_q_register(Vd, src1);
+            }
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0x2: {
+          if (instr->Bit(4) == 1) {
+            // vqsub.u<size> Qd, Qm, Qn.
+            NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+            switch (size) {
+              case Neon8:
+                SubSaturate<uint8_t>(this, Vd, Vm, Vn);
+                break;
+              case Neon16:
+                SubSaturate<uint16_t>(this, Vd, Vm, Vn);
+                break;
+              case Neon32:
+                SubSaturate<uint32_t>(this, Vd, Vm, Vn);
+                break;
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0x3: {
+          // vcge/vcgt.u<size> Qd, Qm, Qn.
+          bool ge = instr->Bit(4) == 1;
+          NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+          switch (size) {
+            case Neon8: {
+              uint8_t src1[16], src2[16];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 16; i++) {
+                if (ge)
+                  src1[i] = src1[i] >= src2[i] ? 0xFFu : 0;
+                else
+                  src1[i] = src1[i] > src2[i] ? 0xFFu : 0;
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            case Neon16: {
+              uint16_t src1[8], src2[8];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 8; i++) {
+                if (ge)
+                  src1[i] = src1[i] >= src2[i] ? 0xFFFFu : 0;
+                else
+                  src1[i] = src1[i] > src2[i] ? 0xFFFFu : 0;
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            case Neon32: {
+              uint32_t src1[4], src2[4];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 4; i++) {
+                if (ge)
+                  src1[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
+                else
+                  src1[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            default:
+              UNREACHABLE();
+              break;
+          }
+          break;
+        }
+        case 0x6: {
+          // vmin/vmax.u<size> Qd, Qm, Qn.
+          NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+          bool min = instr->Bit(4) != 0;
+          switch (size) {
+            case Neon8: {
+              uint8_t src1[16], src2[16];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 16; i++) {
+                if (min)
+                  src1[i] = std::min(src1[i], src2[i]);
+                else
+                  src1[i] = std::max(src1[i], src2[i]);
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            case Neon16: {
+              uint16_t src1[8], src2[8];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 8; i++) {
+                if (min)
+                  src1[i] = std::min(src1[i], src2[i]);
+                else
+                  src1[i] = std::max(src1[i], src2[i]);
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            case Neon32: {
+              uint32_t src1[4], src2[4];
+              get_q_register(Vn, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 4; i++) {
+                if (min)
+                  src1[i] = std::min(src1[i], src2[i]);
+                else
+                  src1[i] = std::max(src1[i], src2[i]);
+              }
+              set_q_register(Vd, src1);
+              break;
+            }
+            default:
+              UNREACHABLE();
+              break;
+          }
+          break;
+        }
+        case 0x8: {
+          if (instr->Bit(4) == 0) {
+            // vsub.size Qd, Qm, Qn.
+            NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+            switch (size) {
+              case Neon8: {
+                uint8_t src1[16], src2[16];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 16; i++) {
+                  src1[i] -= src2[i];
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon16: {
+                uint16_t src1[8], src2[8];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 8; i++) {
+                  src1[i] -= src2[i];
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon32: {
+                uint32_t src1[4], src2[4];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 4; i++) {
+                  src1[i] -= src2[i];
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            // vceq.size Qd, Qm, Qn.
+            NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+            switch (size) {
+              case Neon8: {
+                uint8_t src1[16], src2[16];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 16; i++) {
+                  src1[i] = (src1[i] == src2[i]) ? 0xFFu : 0;
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon16: {
+                uint16_t src1[8], src2[8];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 8; i++) {
+                  src1[i] = (src1[i] == src2[i]) ? 0xFFFFu : 0;
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              case Neon32: {
+                uint32_t src1[4], src2[4];
+                get_q_register(Vn, src1);
+                get_q_register(Vm, src2);
+                for (int i = 0; i < 4; i++) {
+                  src1[i] = (src1[i] == src2[i]) ? 0xFFFFFFFFu : 0;
+                }
+                set_q_register(Vd, src1);
+                break;
+              }
+              default:
+                UNREACHABLE();
+                break;
+            }
+          }
+          break;
+        }
+        case 0xd: {
+          if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+            // vmul.f32 Qd, Qn, Qm
+            float src1[4], src2[4];
+            get_q_register(Vn, src1);
+            get_q_register(Vm, src2);
+            for (int i = 0; i < 4; i++) {
+              src1[i] = src1[i] * src2[i];
+            }
+            set_q_register(Vd, src1);
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        case 0xe: {
+          if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
+            // vcge/vcgt.f32 Qd, Qm, Qn
+            bool ge = instr->Bit(21) == 0;
+            float src1[4], src2[4];
+            get_q_register(Vn, src1);
+            get_q_register(Vm, src2);
+            uint32_t dst[4];
+            for (int i = 0; i < 4; i++) {
+              if (ge) {
+                dst[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
+              } else {
+                dst[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
+              }
+            }
+            set_q_register(Vd, dst);
+          } else {
+            UNIMPLEMENTED();
+          }
+          break;
+        }
+        default:
+          UNREACHABLE();
+          break;
+      }
+      break;
+    }
     case 7:
       if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
           (instr->Bit(4) == 1)) {
@@ -3787,21 +4874,400 @@
           e++;
         }
         set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
-      } else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) &&
-                 (instr->Bit(4) == 0)) {
-        int vd = instr->VFPDRegValue(kDoublePrecision);
-        int vm = instr->VFPMRegValue(kDoublePrecision);
-        if (instr->Bit(6) == 0) {
-          // vswp Dd, Dm.
-          uint64_t dval, mval;
-          get_d_register(vd, &dval);
-          get_d_register(vm, &mval);
-          set_d_register(vm, &dval);
-          set_d_register(vd, &mval);
+      } else if (instr->Opc1Value() == 7 && instr->Bit(4) == 0) {
+        if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
+            instr->Bit(6) == 1) {
+          // vcvt.<Td>.<Tm> Qd, Qm.
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          uint32_t q_data[4];
+          get_q_register(Vm, q_data);
+          int op = instr->Bits(8, 7);
+          for (int i = 0; i < 4; i++) {
+            switch (op) {
+              case 0:
+                // f32 <- s32, round towards nearest.
+                q_data[i] = bit_cast<uint32_t>(std::round(
+                    static_cast<float>(bit_cast<int32_t>(q_data[i]))));
+                break;
+              case 1:
+                // f32 <- u32, round towards nearest.
+                q_data[i] = bit_cast<uint32_t>(
+                    std::round(static_cast<float>(q_data[i])));
+                break;
+              case 2:
+                // s32 <- f32, round to zero.
+                q_data[i] = static_cast<uint32_t>(
+                    ConvertDoubleToInt(bit_cast<float>(q_data[i]), false, RZ));
+                break;
+              case 3:
+                // u32 <- f32, round to zero.
+                q_data[i] = static_cast<uint32_t>(
+                    ConvertDoubleToInt(bit_cast<float>(q_data[i]), true, RZ));
+                break;
+            }
+          }
+          set_q_register(Vd, q_data);
+        } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
+          if (instr->Bit(6) == 0) {
+            // vswp Dd, Dm.
+            uint64_t dval, mval;
+            int vd = instr->VFPDRegValue(kDoublePrecision);
+            int vm = instr->VFPMRegValue(kDoublePrecision);
+            get_d_register(vd, &dval);
+            get_d_register(vm, &mval);
+            set_d_register(vm, &dval);
+            set_d_register(vd, &mval);
+          } else {
+            // vswp Qd, Qm.
+            uint32_t dval[4], mval[4];
+            int vd = instr->VFPDRegValue(kSimd128Precision);
+            int vm = instr->VFPMRegValue(kSimd128Precision);
+            get_q_register(vd, dval);
+            get_q_register(vm, mval);
+            set_q_register(vm, dval);
+            set_q_register(vd, mval);
+          }
+        } else if (instr->Bits(11, 7) == 0x18) {
+          // vdup.32 Qd, Sm.
+          int vd = instr->VFPDRegValue(kSimd128Precision);
+          int vm = instr->VFPMRegValue(kDoublePrecision);
+          int index = instr->Bit(19);
+          uint32_t s_data = get_s_register(vm * 2 + index);
+          uint32_t q_data[4];
+          for (int i = 0; i < 4; i++) q_data[i] = s_data;
+          set_q_register(vd, q_data);
+        } else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
+          // vmvn Qd, Qm.
+          int vd = instr->VFPDRegValue(kSimd128Precision);
+          int vm = instr->VFPMRegValue(kSimd128Precision);
+          uint32_t q_data[4];
+          get_q_register(vm, q_data);
+          for (int i = 0; i < 4; i++) q_data[i] = ~q_data[i];
+          set_q_register(vd, q_data);
+        } else if (instr->Bits(11, 10) == 0x2) {
+          // vtb[l,x] Dd, <list>, Dm.
+          int vd = instr->VFPDRegValue(kDoublePrecision);
+          int vn = instr->VFPNRegValue(kDoublePrecision);
+          int vm = instr->VFPMRegValue(kDoublePrecision);
+          int table_len = (instr->Bits(9, 8) + 1) * kDoubleSize;
+          bool vtbx = instr->Bit(6) != 0;  // vtbl / vtbx
+          uint64_t destination = 0, indices = 0, result = 0;
+          get_d_register(vd, &destination);
+          get_d_register(vm, &indices);
+          for (int i = 0; i < kDoubleSize; i++) {
+            int shift = i * kBitsPerByte;
+            int index = (indices >> shift) & 0xFF;
+            if (index < table_len) {
+              uint64_t table;
+              get_d_register(vn + index / kDoubleSize, &table);
+              result |=
+                  ((table >> ((index % kDoubleSize) * kBitsPerByte)) & 0xFF)
+                  << shift;
+            } else if (vtbx) {
+              result |= destination & (0xFFull << shift);
+            }
+          }
+          set_d_register(vd, &result);
+        } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 6) == 0x7) {
+          // vzip.<size> Qd, Qm.
+          NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          switch (size) {
+            case Neon8: {
+              uint8_t src1[16], src2[16], dst1[16], dst2[16];
+              get_q_register(Vd, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 8; i++) {
+                dst1[i * 2] = src1[i];
+                dst1[i * 2 + 1] = src2[i];
+                dst2[i * 2] = src1[i + 8];
+                dst2[i * 2 + 1] = src2[i + 8];
+              }
+              set_q_register(Vd, dst1);
+              set_q_register(Vm, dst2);
+              break;
+            }
+            case Neon16: {
+              uint16_t src1[8], src2[8], dst1[8], dst2[8];
+              get_q_register(Vd, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 8; i += 2) {
+                dst1[i] = src1[i / 2];
+                dst1[i + 1] = src2[i / 2];
+                dst2[i] = src1[i / 2 + 4];
+                dst2[i + 1] = src2[i / 2 + 4];
+              }
+              set_q_register(Vd, dst1);
+              set_q_register(Vm, dst2);
+              break;
+            }
+            case Neon32: {
+              uint32_t src1[4], src2[4], dst1[4], dst2[4];
+              get_q_register(Vd, src1);
+              get_q_register(Vm, src2);
+              for (int i = 0; i < 2; i++) {
+                dst1[i * 2] = src1[i];
+                dst1[i * 2 + 1] = src2[i];
+                dst2[i * 2] = src1[i + 2];
+                dst2[i * 2 + 1] = src2[i + 2];
+              }
+              set_q_register(Vd, dst1);
+              set_q_register(Vm, dst2);
+              break;
+            }
+            default:
+              UNREACHABLE();
+              break;
+          }
+        } else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
+          // vrev<op>.size Qd, Qm
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+          NeonSize op = static_cast<NeonSize>(static_cast<int>(Neon64) -
+                                              instr->Bits(8, 7));
+          switch (op) {
+            case Neon16: {
+              DCHECK_EQ(Neon8, size);
+              uint8_t src[16];
+              get_q_register(Vm, src);
+              for (int i = 0; i < 16; i += 2) {
+                std::swap(src[i], src[i + 1]);
+              }
+              set_q_register(Vd, src);
+              break;
+            }
+            case Neon32: {
+              switch (size) {
+                case Neon16: {
+                  uint16_t src[8];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 8; i += 2) {
+                    std::swap(src[i], src[i + 1]);
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                }
+                case Neon8: {
+                  uint8_t src[16];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 4; i++) {
+                    std::swap(src[i * 4], src[i * 4 + 3]);
+                    std::swap(src[i * 4 + 1], src[i * 4 + 2]);
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                }
+                default:
+                  UNREACHABLE();
+                  break;
+              }
+              break;
+            }
+            case Neon64: {
+              switch (size) {
+                case Neon32: {
+                  uint32_t src[4];
+                  get_q_register(Vm, src);
+                  std::swap(src[0], src[1]);
+                  std::swap(src[2], src[3]);
+                  set_q_register(Vd, src);
+                  break;
+                }
+                case Neon16: {
+                  uint16_t src[8];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 4; i++) {
+                    std::swap(src[i * 4], src[i * 4 + 3]);
+                    std::swap(src[i * 4 + 1], src[i * 4 + 2]);
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                }
+                case Neon8: {
+                  uint8_t src[16];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 4; i++) {
+                    std::swap(src[i], src[7 - i]);
+                    std::swap(src[i + 8], src[15 - i]);
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                }
+                default:
+                  UNREACHABLE();
+                  break;
+              }
+              break;
+            }
+            default:
+              UNREACHABLE();
+              break;
+          }
+        } else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+          if (instr->Bits(9, 6) == 0xd) {
+            // vabs<type>.<size> Qd, Qm
+            if (instr->Bit(10) != 0) {
+              // floating point (clear sign bits)
+              uint32_t src[4];
+              get_q_register(Vm, src);
+              for (int i = 0; i < 4; i++) {
+                src[i] &= ~0x80000000;
+              }
+              set_q_register(Vd, src);
+            } else {
+              // signed integer
+              switch (size) {
+                case Neon8: {
+                  int8_t src[16];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 16; i++) {
+                    src[i] = std::abs(src[i]);
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                }
+                case Neon16: {
+                  int16_t src[8];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 8; i++) {
+                    src[i] = std::abs(src[i]);
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                }
+                case Neon32: {
+                  int32_t src[4];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 4; i++) {
+                    src[i] = std::abs(src[i]);
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                }
+                default:
+                  UNIMPLEMENTED();
+                  break;
+              }
+            }
+          } else if (instr->Bits(9, 6) == 0xf) {
+            // vneg<type>.<size> Qd, Qm (signed integer)
+            if (instr->Bit(10) != 0) {
+              // floating point (toggle sign bits)
+              uint32_t src[4];
+              get_q_register(Vm, src);
+              for (int i = 0; i < 4; i++) {
+                src[i] ^= 0x80000000;
+              }
+              set_q_register(Vd, src);
+            } else {
+              // signed integer
+              switch (size) {
+                case Neon8: {
+                  int8_t src[16];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 16; i++) {
+                    src[i] = -src[i];
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                }
+                case Neon16:
+                  int16_t src[8];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 8; i++) {
+                    src[i] = -src[i];
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                case Neon32: {
+                  int32_t src[4];
+                  get_q_register(Vm, src);
+                  for (int i = 0; i < 4; i++) {
+                    src[i] = -src[i];
+                  }
+                  set_q_register(Vd, src);
+                  break;
+                }
+                default:
+                  UNIMPLEMENTED();
+                  break;
+              }
+            }
+          } else {
+            UNIMPLEMENTED();
+          }
+        } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
+          // vrecpe/vrsqrte.f32 Qd, Qm.
+          int Vd = instr->VFPDRegValue(kSimd128Precision);
+          int Vm = instr->VFPMRegValue(kSimd128Precision);
+          uint32_t src[4];
+          get_q_register(Vm, src);
+          if (instr->Bit(7) == 0) {
+            for (int i = 0; i < 4; i++) {
+              float denom = bit_cast<float>(src[i]);
+              div_zero_vfp_flag_ = (denom == 0);
+              float result = 1.0f / denom;
+              result = canonicalizeNaN(result);
+              src[i] = bit_cast<uint32_t>(result);
+            }
+          } else {
+            lazily_initialize_fast_sqrt(isolate_);
+            for (int i = 0; i < 4; i++) {
+              float radicand = bit_cast<float>(src[i]);
+              float result = 1.0f / fast_sqrt(radicand, isolate_);
+              result = canonicalizeNaN(result);
+              src[i] = bit_cast<uint32_t>(result);
+            }
+          }
+          set_q_register(Vd, src);
         } else {
-          // Q register vswp unimplemented.
           UNIMPLEMENTED();
         }
+      } else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
+        // vshr.u<size> Qd, Qm, shift
+        int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
+        int shift = 2 * size - instr->Bits(21, 16);
+        int Vd = instr->VFPDRegValue(kSimd128Precision);
+        int Vm = instr->VFPMRegValue(kSimd128Precision);
+        NeonSize ns = static_cast<NeonSize>(size / 16);
+        switch (ns) {
+          case Neon8: {
+            uint8_t src[16];
+            get_q_register(Vm, src);
+            for (int i = 0; i < 16; i++) {
+              src[i] >>= shift;
+            }
+            set_q_register(Vd, src);
+            break;
+          }
+          case Neon16: {
+            uint16_t src[8];
+            get_q_register(Vm, src);
+            for (int i = 0; i < 8; i++) {
+              src[i] >>= shift;
+            }
+            set_q_register(Vd, src);
+            break;
+          }
+          case Neon32: {
+            uint32_t src[4];
+            get_q_register(Vm, src);
+            for (int i = 0; i < 4; i++) {
+              src[i] >>= shift;
+            }
+            set_q_register(Vd, src);
+            break;
+          }
+          default:
+            UNREACHABLE();
+            break;
+        }
       } else {
         UNIMPLEMENTED();
       }
@@ -4316,6 +5782,207 @@
   return address;
 }
 
+Simulator::LocalMonitor::LocalMonitor()
+    : access_state_(MonitorAccess::Open),
+      tagged_addr_(0),
+      size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+  access_state_ = MonitorAccess::Open;
+  tagged_addr_ = 0;
+  size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad(int32_t addr) {
+  if (access_state_ == MonitorAccess::Exclusive) {
+    // A load could cause a cache eviction which will affect the monitor. As a
+    // result, it's most strict to unconditionally clear the local monitor on
+    // load.
+    Clear();
+  }
+}
+
+void Simulator::LocalMonitor::NotifyLoadExcl(int32_t addr,
+                                             TransactionSize size) {
+  access_state_ = MonitorAccess::Exclusive;
+  tagged_addr_ = addr;
+  size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore(int32_t addr) {
+  if (access_state_ == MonitorAccess::Exclusive) {
+    // It is implementation-defined whether a non-exclusive store to an address
+    // covered by the local monitor during exclusive access transitions to open
+    // or exclusive access. See ARM DDI 0406C.b, A3.4.1.
+    //
+    // However, a store could cause a cache eviction which will affect the
+    // monitor. As a result, it's most strict to unconditionally clear the
+    // local monitor on store.
+    Clear();
+  }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreExcl(int32_t addr,
+                                              TransactionSize size) {
+  if (access_state_ == MonitorAccess::Exclusive) {
+    // It is allowed for a processor to require that the address matches
+    // exactly (A3.4.5), so this comparison does not mask addr.
+    if (addr == tagged_addr_ && size_ == size) {
+      Clear();
+      return true;
+    } else {
+      // It is implementation-defined whether an exclusive store to a
+      // non-tagged address will update memory. Behavior is unpredictable if
+      // the transaction size of the exclusive store differs from that of the
+      // exclusive load. See ARM DDI 0406C.b, A3.4.5.
+      Clear();
+      return false;
+    }
+  } else {
+    DCHECK(access_state_ == MonitorAccess::Open);
+    return false;
+  }
+}
+
+Simulator::GlobalMonitor::Processor::Processor()
+    : access_state_(MonitorAccess::Open),
+      tagged_addr_(0),
+      next_(nullptr),
+      prev_(nullptr),
+      failure_counter_(0) {}
+
+void Simulator::GlobalMonitor::Processor::Clear_Locked() {
+  access_state_ = MonitorAccess::Open;
+  tagged_addr_ = 0;
+}
+
+void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(int32_t addr) {
+  access_state_ = MonitorAccess::Exclusive;
+  tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
+    int32_t addr, bool is_requesting_processor) {
+  if (access_state_ == MonitorAccess::Exclusive) {
+    // It is implementation-defined whether a non-exclusive store by the
+    // requesting processor to an address covered by the global monitor
+    // during exclusive access transitions to open or exclusive access.
+    //
+    // For any other processor, the access state always transitions to open
+    // access.
+    //
+    // See ARM DDI 0406C.b, A3.4.2.
+    //
+    // However, similar to the local monitor, it is possible that a store
+    // caused a cache eviction, which can affect the montior, so
+    // conservatively, we always clear the monitor.
+    Clear_Locked();
+  }
+}
+
+bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
+    int32_t addr, bool is_requesting_processor) {
+  if (access_state_ == MonitorAccess::Exclusive) {
+    if (is_requesting_processor) {
+      // It is allowed for a processor to require that the address matches
+      // exactly (A3.4.5), so this comparison does not mask addr.
+      if (addr == tagged_addr_) {
+        // The access state for the requesting processor after a successful
+        // exclusive store is implementation-defined, but according to the ARM
+        // DDI, this has no effect on the subsequent operation of the global
+        // monitor.
+        Clear_Locked();
+        // Introduce occasional strex failures. This is to simulate the
+        // behavior of hardware, which can randomly fail due to background
+        // cache evictions.
+        if (failure_counter_++ >= kMaxFailureCounter) {
+          failure_counter_ = 0;
+          return false;
+        } else {
+          return true;
+        }
+      }
+    } else if ((addr & kExclusiveTaggedAddrMask) ==
+               (tagged_addr_ & kExclusiveTaggedAddrMask)) {
+      // Check the masked addresses when responding to a successful lock by
+      // another processor so the implementation is more conservative (i.e. the
+      // granularity of locking is as large as possible.)
+      Clear_Locked();
+      return false;
+    }
+  }
+  return false;
+}
+
+Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
+
+void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(int32_t addr,
+                                                     Processor* processor) {
+  processor->NotifyLoadExcl_Locked(addr);
+  PrependProcessor_Locked(processor);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(int32_t addr,
+                                                  Processor* processor) {
+  // Notify each processor of the store operation.
+  for (Processor* iter = head_; iter; iter = iter->next_) {
+    bool is_requesting_processor = iter == processor;
+    iter->NotifyStore_Locked(addr, is_requesting_processor);
+  }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(int32_t addr,
+                                                      Processor* processor) {
+  DCHECK(IsProcessorInLinkedList_Locked(processor));
+  if (processor->NotifyStoreExcl_Locked(addr, true)) {
+    // Notify the other processors that this StoreExcl succeeded.
+    for (Processor* iter = head_; iter; iter = iter->next_) {
+      if (iter != processor) {
+        iter->NotifyStoreExcl_Locked(addr, false);
+      }
+    }
+    return true;
+  } else {
+    return false;
+  }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+    Processor* processor) const {
+  return head_ == processor || processor->next_ || processor->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
+  if (IsProcessorInLinkedList_Locked(processor)) {
+    return;
+  }
+
+  if (head_) {
+    head_->prev_ = processor;
+  }
+  processor->prev_ = nullptr;
+  processor->next_ = head_;
+  head_ = processor;
+}
+
+void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
+  base::LockGuard<base::Mutex> lock_guard(&mutex);
+  if (!IsProcessorInLinkedList_Locked(processor)) {
+    return;
+  }
+
+  if (processor->prev_) {
+    processor->prev_->next_ = processor->next_;
+  } else {
+    head_ = processor->next_;
+  }
+  if (processor->next_) {
+    processor->next_->prev_ = processor->prev_;
+  }
+  processor->prev_ = nullptr;
+  processor->next_ = nullptr;
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 7435b77..39d9b7f 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -14,6 +14,8 @@
 #define V8_ARM_SIMULATOR_ARM_H_
 
 #include "src/allocation.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
 
 #if !defined(USE_SIMULATOR)
 // Running without a simulator on a native arm platform.
@@ -151,10 +153,11 @@
   void set_d_register(int dreg, const uint64_t* value);
   void get_d_register(int dreg, uint32_t* value);
   void set_d_register(int dreg, const uint32_t* value);
-  void get_q_register(int qreg, uint64_t* value);
-  void set_q_register(int qreg, const uint64_t* value);
-  void get_q_register(int qreg, uint32_t* value);
-  void set_q_register(int qreg, const uint32_t* value);
+  // Support for NEON.
+  template <typename T>
+  void get_q_register(int qreg, T* value);
+  template <typename T>
+  void set_q_register(int qreg, const T* value);
 
   void set_s_register(int reg, unsigned int value);
   unsigned int get_s_register(int reg) const;
@@ -301,19 +304,27 @@
   void PrintStopInfo(uint32_t code);
 
   // Read and write memory.
+  // The *Ex functions are exclusive access. The writes return the strex status:
+  // 0 if the write succeeds, and 1 if the write fails.
   inline uint8_t ReadBU(int32_t addr);
   inline int8_t ReadB(int32_t addr);
+  uint8_t ReadExBU(int32_t addr);
   inline void WriteB(int32_t addr, uint8_t value);
   inline void WriteB(int32_t addr, int8_t value);
+  int WriteExB(int32_t addr, uint8_t value);
 
   inline uint16_t ReadHU(int32_t addr, Instruction* instr);
   inline int16_t ReadH(int32_t addr, Instruction* instr);
+  uint16_t ReadExHU(int32_t addr, Instruction* instr);
   // Note: Overloaded on the sign of the value.
   inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
   inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
+  int WriteExH(int32_t addr, uint16_t value, Instruction* instr);
 
   inline int ReadW(int32_t addr, Instruction* instr);
+  int ReadExW(int32_t addr, Instruction* instr);
   inline void WriteW(int32_t addr, int value, Instruction* instr);
+  int WriteExW(int32_t addr, int value, Instruction* instr);
 
   int32_t* ReadDW(int32_t addr);
   void WriteDW(int32_t addr, int32_t value1, int32_t value2);
@@ -339,6 +350,8 @@
   void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
   void DecodeVCMP(Instruction* instr);
   void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+  int32_t ConvertDoubleToInt(double val, bool unsigned_integer,
+                             VFPRoundingMode mode);
   void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
 
   // Executes one instruction.
@@ -434,6 +447,94 @@
     char* desc;
   };
   StopCountAndDesc watched_stops_[kNumOfWatchedStops];
+
+  // Syncronization primitives. See ARM DDI 0406C.b, A2.9.
+  enum class MonitorAccess {
+    Open,
+    Exclusive,
+  };
+
+  enum class TransactionSize {
+    None = 0,
+    Byte = 1,
+    HalfWord = 2,
+    Word = 4,
+  };
+
+  // The least-significant bits of the address are ignored. The number of bits
+  // is implementation-defined, between 3 and 11. See ARM DDI 0406C.b, A3.4.3.
+  static const int32_t kExclusiveTaggedAddrMask = ~((1 << 11) - 1);
+
+  class LocalMonitor {
+   public:
+    LocalMonitor();
+
+    // These functions manage the state machine for the local monitor, but do
+    // not actually perform loads and stores. NotifyStoreExcl only returns
+    // true if the exclusive store is allowed; the global monitor will still
+    // have to be checked to see whether the memory should be updated.
+    void NotifyLoad(int32_t addr);
+    void NotifyLoadExcl(int32_t addr, TransactionSize size);
+    void NotifyStore(int32_t addr);
+    bool NotifyStoreExcl(int32_t addr, TransactionSize size);
+
+   private:
+    void Clear();
+
+    MonitorAccess access_state_;
+    int32_t tagged_addr_;
+    TransactionSize size_;
+  };
+
+  class GlobalMonitor {
+   public:
+    GlobalMonitor();
+
+    class Processor {
+     public:
+      Processor();
+
+     private:
+      friend class GlobalMonitor;
+      // These functions manage the state machine for the global monitor, but do
+      // not actually perform loads and stores.
+      void Clear_Locked();
+      void NotifyLoadExcl_Locked(int32_t addr);
+      void NotifyStore_Locked(int32_t addr, bool is_requesting_processor);
+      bool NotifyStoreExcl_Locked(int32_t addr, bool is_requesting_processor);
+
+      MonitorAccess access_state_;
+      int32_t tagged_addr_;
+      Processor* next_;
+      Processor* prev_;
+      // A strex can fail due to background cache evictions. Rather than
+      // simulating this, we'll just occasionally introduce cases where an
+      // exclusive store fails. This will happen once after every
+      // kMaxFailureCounter exclusive stores.
+      static const int kMaxFailureCounter = 5;
+      int failure_counter_;
+    };
+
+    // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+    base::Mutex mutex;
+
+    void NotifyLoadExcl_Locked(int32_t addr, Processor* processor);
+    void NotifyStore_Locked(int32_t addr, Processor* processor);
+    bool NotifyStoreExcl_Locked(int32_t addr, Processor* processor);
+
+    // Called when the simulator is destroyed.
+    void RemoveProcessor(Processor* processor);
+
+   private:
+    bool IsProcessorInLinkedList_Locked(Processor* processor) const;
+    void PrependProcessor_Locked(Processor* processor);
+
+    Processor* head_;
+  };
+
+  LocalMonitor local_monitor_;
+  GlobalMonitor::Processor global_monitor_processor_;
+  static base::LazyInstance<GlobalMonitor>::type global_monitor_;
 };
 
 
diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h
index a639e3e..5242387 100644
--- a/src/arm64/assembler-arm64-inl.h
+++ b/src/arm64/assembler-arm64-inl.h
@@ -8,7 +8,7 @@
 #include "src/arm64/assembler-arm64.h"
 #include "src/assembler.h"
 #include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index 37fdb26..3002d7c 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -194,13 +194,18 @@
   return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
 }
 
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
+}
+
 void RelocInfo::unchecked_update_wasm_memory_reference(
     Address address, ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
 }
 
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
-                                                  ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+                                           ICacheFlushMode flush_mode) {
   Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
 }
 
@@ -2950,15 +2955,13 @@
       (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
       (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
       (rmode == RelocInfo::DEOPT_INLINING_ID) ||
-      (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) ||
-      (rmode == RelocInfo::GENERATOR_CONTINUATION)) {
+      (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) {
     // Adjust code for new modes.
     DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
            RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
            RelocInfo::IsDeoptPosition(rmode) ||
            RelocInfo::IsInternalReference(rmode) ||
-           RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
-           RelocInfo::IsGeneratorContinuation(rmode));
+           RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
     // These modes do not need an entry in the constant pool.
   } else {
     constpool_.RecordEntry(data, rmode);
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index d5c2936..460ac44 100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -198,6 +198,7 @@
 };
 
 static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
 
 struct FPRegister : public CPURegister {
   enum Code {
@@ -938,9 +939,6 @@
 
   int buffer_space() const;
 
-  // Mark generator continuation.
-  void RecordGeneratorContinuation();
-
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index c0d700c..082565f 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -33,17 +33,6 @@
   __ TailCallRuntime(Runtime::kNewArray);
 }
 
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
-  descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
-  descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
                                                ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
@@ -174,9 +163,6 @@
     // Call runtime on identical symbols since we need to throw a TypeError.
     __ Cmp(right_type, SYMBOL_TYPE);
     __ B(eq, slow);
-    // Call runtime on identical SIMD values since we must throw a TypeError.
-    __ Cmp(right_type, SIMD128_VALUE_TYPE);
-    __ B(eq, slow);
   } else if (cond == eq) {
     __ JumpIfHeapNumber(right, &heap_number);
   } else {
@@ -188,9 +174,6 @@
     // Call runtime on identical symbols since we need to throw a TypeError.
     __ Cmp(right_type, SYMBOL_TYPE);
     __ B(eq, slow);
-    // Call runtime on identical SIMD values since we must throw a TypeError.
-    __ Cmp(right_type, SIMD128_VALUE_TYPE);
-    __ B(eq, slow);
     // Normally here we fall through to return_equal, but undefined is
     // special: (undefined == undefined) == true, but
     // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -590,8 +573,11 @@
   if (cond == eq) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(lhs, rhs);
-      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+      __ Push(cp);
+      __ Call(strict() ? isolate()->builtins()->StrictEqual()
+                       : isolate()->builtins()->Equal(),
+              RelocInfo::CODE_TARGET);
+      __ Pop(cp);
     }
     // Turn true into 0 and false into some non-zero value.
     STATIC_ASSERT(EQUAL == 0);
@@ -1147,10 +1133,10 @@
   __ Fmov(fp_zero, 0.0);
 
   // Build an entry frame (see layout below).
-  int marker = type();
+  StackFrame::Type marker = type();
   int64_t bad_frame_pointer = -1L;  // Bad frame pointer to fail if it is used.
   __ Mov(x13, bad_frame_pointer);
-  __ Mov(x12, Smi::FromInt(marker));
+  __ Mov(x12, StackFrame::TypeToMarker(marker));
   __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
   __ Ldr(x10, MemOperand(x11));
 
@@ -1166,12 +1152,12 @@
   __ Ldr(x11, MemOperand(x10));
   __ Cbnz(x11, &non_outermost_js);
   __ Str(fp, MemOperand(x10));
-  __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+  __ Mov(x12, StackFrame::OUTERMOST_JSENTRY_FRAME);
   __ Push(x12);
   __ B(&done);
   __ Bind(&non_outermost_js);
   // We spare one instruction by pushing xzr since the marker is 0.
-  DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+  DCHECK(StackFrame::INNER_JSENTRY_FRAME == 0);
   __ Push(xzr);
   __ Bind(&done);
 
@@ -1253,7 +1239,7 @@
   // Check if the current stack frame is marked as the outermost JS frame.
   Label non_outermost_js_2;
   __ Pop(x10);
-  __ Cmp(x10, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+  __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
   __ B(ne, &non_outermost_js_2);
   __ Mov(x11, ExternalReference(js_entry_sp));
   __ Str(xzr, MemOperand(x11));
@@ -1276,56 +1262,6 @@
   __ Ret();
 }
 
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  // Ensure that the vector and slot registers won't be clobbered before
-  // calling the miss handler.
-  DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::VectorRegister(),
-                     LoadWithVectorDescriptor::SlotRegister()));
-
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
-                                                          x11, &miss);
-
-  __ Bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
-  // Return address is in lr.
-  Label miss;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register index = LoadDescriptor::NameRegister();
-  Register result = x0;
-  Register scratch = x10;
-  DCHECK(!scratch.is(receiver) && !scratch.is(index));
-  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
-         result.is(LoadWithVectorDescriptor::SlotRegister()));
-
-  // StringCharAtGenerator doesn't use the result register until it's passed
-  // the different miss possibilities. If it did, we would have a conflict
-  // when FLAG_vector_ics is true.
-  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          RECEIVER_IS_STRING);
-  char_at_generator.GenerateFast(masm);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
-  __ Bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
 #ifdef V8_INTERPRETED_REGEXP
   __ TailCallRuntime(Runtime::kRegExpExec);
@@ -1453,7 +1389,7 @@
   // (6) External string.  Make it, offset-wise, look like a sequential string.
   //     Go to (4).
   // (7) Short external string or not a string?  If yes, bail out to runtime.
-  // (8) Sliced string.  Replace subject with parent.  Go to (1).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
 
   Label check_underlying;   // (1)
   Label seq_string;         // (4)
@@ -1487,6 +1423,7 @@
   // (2) Sequential or cons?  If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ Cmp(string_representation, kExternalStringTag);
@@ -1514,10 +1451,10 @@
   // before entering the exit frame.
   __ SmiUntag(x1, x10);
 
-  // The third bit determines the string encoding in string_type.
-  STATIC_ASSERT(kOneByteStringTag == 0x04);
+  // The fourth bit determines the string encoding in string_type.
+  STATIC_ASSERT(kOneByteStringTag == 0x08);
   STATIC_ASSERT(kTwoByteStringTag == 0x00);
-  STATIC_ASSERT(kStringEncodingMask == 0x04);
+  STATIC_ASSERT(kStringEncodingMask == 0x08);
 
   // Find the code object based on the assumptions above.
   // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
@@ -1525,7 +1462,7 @@
   STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
                 JSRegExp::kDataUC16CodeOffset);
   __ Mov(x10, kPointerSize);
-  // We will need the encoding later: Latin1 = 0x04
+  // We will need the encoding later: Latin1 = 0x08
   //                                  UC16   = 0x00
   __ Ands(string_encoding, string_type, kStringEncodingMask);
   __ CzeroX(x10, ne);
@@ -1573,10 +1510,10 @@
   __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
 
   // Handle UC16 encoding, two bytes make one character.
-  //   string_encoding: if Latin1: 0x04
+  //   string_encoding: if Latin1: 0x08
   //                    if UC16:   0x00
-  STATIC_ASSERT(kStringEncodingMask == 0x04);
-  __ Ubfx(string_encoding, string_encoding, 2, 1);
+  STATIC_ASSERT(kStringEncodingMask == 0x08);
+  __ Ubfx(string_encoding, string_encoding, 3, 1);
   __ Eor(string_encoding, string_encoding, 1);
   //   string_encoding: if Latin1: 0
   //                    if UC16:   1
@@ -1789,11 +1726,18 @@
                            kShortExternalStringMask | kIsNotStringMask,
                            &runtime);
 
-  // (8) Sliced string. Replace subject with parent.
+  // (8) Sliced or thin string. Replace subject with parent.
+  Label thin_string;
+  __ Cmp(string_representation, kThinStringTag);
+  __ B(eq, &thin_string);
   __ Ldr(sliced_string_offset,
          UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   __ B(&check_underlying);  // Go to (1).
+
+  __ bind(&thin_string);
+  __ Ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+  __ B(&check_underlying);  // Go to (1).
 #endif
 }
 
@@ -1834,9 +1778,9 @@
   //  index :           slot in feedback vector (smi)
   Label initialize, done, miss, megamorphic, not_array_function;
 
-  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
-  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
   // Load the cache state.
@@ -1851,7 +1795,7 @@
   // function without changing the state.
   // We don't know if feedback value is a WeakCell or a Symbol, but it's
   // harmless to read at this position in a symbol (see static asserts in
-  // type-feedback-vector.h).
+  // feedback-vector.h).
   Label check_allocation_site;
   __ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
   __ Cmp(function, feedback_value);
@@ -1971,212 +1915,6 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
-                               Register slot) {
-  __ Add(feedback_vector, feedback_vector,
-         Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
-  __ Add(feedback_vector, feedback_vector,
-         Operand(FixedArray::kHeaderSize + kPointerSize));
-  __ Ldr(slot, FieldMemOperand(feedback_vector, 0));
-  __ Add(slot, slot, Operand(Smi::FromInt(1)));
-  __ Str(slot, FieldMemOperand(feedback_vector, 0));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
-  // x0 - number of arguments
-  // x1 - function
-  // x3 - slot id
-  // x2 - vector
-  // x4 - allocation site (loaded from vector[slot])
-  Register function = x1;
-  Register feedback_vector = x2;
-  Register index = x3;
-  Register allocation_site = x4;
-  Register scratch = x5;
-
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch);
-  __ Cmp(function, scratch);
-  __ B(ne, miss);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, feedback_vector, index);
-
-  // Set up arguments for the array constructor stub.
-  Register allocation_site_arg = feedback_vector;
-  Register new_target_arg = index;
-  __ Mov(allocation_site_arg, allocation_site);
-  __ Mov(new_target_arg, function);
-  ArrayConstructorStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
-  ASM_LOCATION("CallICStub");
-
-  // x0 - number of arguments
-  // x1 - function
-  // x3 - slot id (Smi)
-  // x2 - vector
-  Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
-  Register function = x1;
-  Register feedback_vector = x2;
-  Register index = x3;
-
-  // The checks. First, does x1 match the recorded monomorphic target?
-  __ Add(x4, feedback_vector,
-         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
-  __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize));
-
-  // We don't know that we have a weak cell. We might have a private symbol
-  // or an AllocationSite, but the memory is safe to examine.
-  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
-  // FixedArray.
-  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
-  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
-  // computed, meaning that it can't appear to be a pointer. If the low bit is
-  // 0, then hash is computed, but the 0 bit prevents the field from appearing
-  // to be a pointer.
-  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
-  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
-                    WeakCell::kValueOffset &&
-                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
-  __ Ldr(x5, FieldMemOperand(x4, WeakCell::kValueOffset));
-  __ Cmp(x5, function);
-  __ B(ne, &extra_checks_or_miss);
-
-  // The compare above could have been a SMI/SMI comparison. Guard against this
-  // convincing us that we have a monomorphic JSFunction.
-  __ JumpIfSmi(function, &extra_checks_or_miss);
-
-  __ Bind(&call_function);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, feedback_vector, index);
-
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
-                                                    tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&extra_checks_or_miss);
-  Label uninitialized, miss, not_allocation_site;
-
-  __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &call);
-
-  __ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &not_allocation_site);
-
-  HandleArrayCase(masm, &miss);
-
-  __ bind(&not_allocation_site);
-
-  // The following cases attempt to handle MISS cases without going to the
-  // runtime.
-  if (FLAG_trace_ic) {
-    __ jmp(&miss);
-  }
-
-  // TODO(mvstanton): the code below is effectively disabled. Investigate.
-  __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
-
-  // We are going megamorphic. If the feedback is a JSFunction, it is fine
-  // to handle it here. More complex cases are dealt with in the runtime.
-  __ AssertNotSmi(x4);
-  __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
-  __ Add(x4, feedback_vector,
-         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
-  __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
-  __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
-
-  __ Bind(&call);
-
-  // Increment the call count for megamorphic function calls.
-  IncrementCallCount(masm, feedback_vector, index);
-
-  __ Bind(&call_count_incremented);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&uninitialized);
-
-  // We are going monomorphic, provided we actually have a JSFunction.
-  __ JumpIfSmi(function, &miss);
-
-  // Goto miss case if we do not have a function.
-  __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
-
-  // Make sure the function is not the Array() function, which requires special
-  // behavior on MISS.
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, x5);
-  __ Cmp(function, x5);
-  __ B(eq, &miss);
-
-  // Make sure the function belongs to the same native context.
-  __ Ldr(x4, FieldMemOperand(function, JSFunction::kContextOffset));
-  __ Ldr(x4, ContextMemOperand(x4, Context::NATIVE_CONTEXT_INDEX));
-  __ Ldr(x5, NativeContextMemOperand());
-  __ Cmp(x4, x5);
-  __ B(ne, &miss);
-
-  // Store the function. Use a stub since we need a frame for allocation.
-  // x2 - vector
-  // x3 - slot
-  // x1 - function
-  // x0 - number of arguments
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    CreateWeakCellStub create_stub(masm->isolate());
-    __ SmiTag(x0);
-    __ Push(x0);
-    __ Push(feedback_vector, index);
-
-    __ Push(cp, function);
-    __ CallStub(&create_stub);
-    __ Pop(cp, function);
-
-    __ Pop(feedback_vector, index);
-    __ Pop(x0);
-    __ SmiUntag(x0);
-  }
-
-  __ B(&call_function);
-
-  // We are here because tracing is on or we encountered a MISS case we can't
-  // handle here.
-  __ bind(&miss);
-  GenerateMiss(masm);
-
-  // The runtime increments the call count in the vector for us.
-  __ B(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
-  ASM_LOCATION("CallICStub[Miss]");
-
-  FrameScope scope(masm, StackFrame::INTERNAL);
-
-  // Preserve the number of arguments as Smi.
-  __ SmiTag(x0);
-
-  // Push the receiver and the function and feedback info.
-  __ Push(x0, x1, x2, x3);
-
-  // Call the entry.
-  __ CallRuntime(Runtime::kCallIC_Miss);
-
-  // Move result to edi and exit the internal frame.
-  __ Mov(x1, x0);
-
-  // Restore number of arguments.
-  __ Pop(x0);
-  __ SmiUntag(x0);
-}
-
-
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   // If the receiver is a smi trigger the non-string case.
   if (check_mode_ == RECEIVER_IS_UNKNOWN) {
@@ -2262,38 +2000,6 @@
   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
 }
 
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  __ JumpIfNotSmi(code_, &slow_case_);
-  __ Cmp(code_, Smi::FromInt(String::kMaxOneByteCharCode));
-  __ B(hi, &slow_case_);
-
-  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged one-byte char code.
-  __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
-  __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
-  __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
-  __ Bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
-  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
-  __ Bind(&slow_case_);
-  call_helper.BeforeCall(masm);
-  __ Push(code_);
-  __ CallRuntime(Runtime::kStringCharFromCode);
-  __ Mov(result_, x0);
-  call_helper.AfterCall(masm);
-  __ B(&exit_);
-
-  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
   // Inputs are in x0 (lhs) and x1 (rhs).
   DCHECK_EQ(CompareICState::BOOLEAN, state());
@@ -2974,240 +2680,6 @@
   __ Ret();
 }
 
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(x2);
-  CallICStub stub(isolate(), state());
-  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
-                             Register receiver_map, Register scratch1,
-                             Register scratch2, bool is_polymorphic,
-                             Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label load_smi_map, compare_map;
-  Label start_polymorphic;
-
-  Register cached_map = scratch1;
-
-  __ Ldr(cached_map,
-         FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-  __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ Cmp(receiver_map, cached_map);
-  __ B(ne, &start_polymorphic);
-  // found, now call handler.
-  Register handler = feedback;
-  __ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
-  __ Jump(feedback);
-
-  Register length = scratch2;
-  __ Bind(&start_polymorphic);
-  __ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-  if (!is_polymorphic) {
-    __ Cmp(length, Operand(Smi::FromInt(2)));
-    __ B(eq, miss);
-  }
-
-  Register too_far = length;
-  Register pointer_reg = feedback;
-
-  // +-----+------+------+-----+-----+ ... ----+
-  // | map | len  | wm0  | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ... ----+
-  //                 0      1     2        len-1
-  //                              ^              ^
-  //                              |              |
-  //                         pointer_reg      too_far
-  //                         aka feedback     scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ Add(too_far, feedback,
-         Operand::UntagSmiAndScale(length, kPointerSizeLog2));
-  __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(pointer_reg, feedback,
-         FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
-
-  __ Bind(&next_loop);
-  __ Ldr(cached_map, MemOperand(pointer_reg));
-  __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ Cmp(receiver_map, cached_map);
-  __ B(ne, &prepare_next);
-  __ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
-  __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
-  __ Jump(handler);
-
-  __ Bind(&prepare_next);
-  __ Add(pointer_reg, pointer_reg, kPointerSize * 2);
-  __ Cmp(pointer_reg, too_far);
-  __ B(lt, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
-                                  Register receiver_map, Register feedback,
-                                  Register vector, Register slot,
-                                  Register scratch, Label* compare_map,
-                                  Label* load_smi_map, Label* try_array) {
-  __ JumpIfSmi(receiver, load_smi_map);
-  __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(compare_map);
-  Register cached_map = scratch;
-  // Move the weak map into the weak_cell register.
-  __ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
-  __ Cmp(cached_map, receiver_map);
-  __ B(ne, try_array);
-
-  Register handler = feedback;
-  __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
-  __ Ldr(handler,
-         FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
-  __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
-  __ Jump(handler);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  KeyedStoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
-                                       Register receiver_map, Register scratch1,
-                                       Register scratch2, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-  Label transition_call;
-
-  Register cached_map = scratch1;
-  Register too_far = scratch2;
-  Register pointer_reg = feedback;
-
-  __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
-  // +-----+------+------+-----+-----+-----+ ... ----+
-  // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ----+ ... ----+
-  //                 0      1     2              len-1
-  //                 ^                                 ^
-  //                 |                                 |
-  //             pointer_reg                        too_far
-  //             aka feedback                       scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ Add(too_far, feedback,
-         Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
-  __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(pointer_reg, feedback,
-         FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
-
-  __ Bind(&next_loop);
-  __ Ldr(cached_map, MemOperand(pointer_reg));
-  __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ Cmp(receiver_map, cached_map);
-  __ B(ne, &prepare_next);
-  // Is it a transitioning store?
-  __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
-  __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
-  __ B(ne, &transition_call);
-
-  __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
-  __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
-  __ Jump(pointer_reg);
-
-  __ Bind(&transition_call);
-  __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
-  __ JumpIfSmi(too_far, miss);
-
-  __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-  // Load the map into the correct register.
-  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
-  __ mov(feedback, too_far);
-  __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
-  __ Jump(receiver_map);
-
-  __ Bind(&prepare_next);
-  __ Add(pointer_reg, pointer_reg, kPointerSize * 3);
-  __ Cmp(pointer_reg, too_far);
-  __ B(lt, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // x1
-  Register key = StoreWithVectorDescriptor::NameRegister();           // x2
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // x3
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // x4
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0));          // x0
-  Register feedback = x5;
-  Register receiver_map = x6;
-  Register scratch1 = x7;
-
-  __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
-  __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ Bind(&try_array);
-  // Is it a fixed array?
-  __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
-
-  // We have a polymorphic element handler.
-  Label try_poly_name;
-  HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
-
-  __ Bind(&not_array);
-  // Is it generic?
-  __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
-                   &try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ Bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ Cmp(key, feedback);
-  __ B(ne, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
-  __ Ldr(feedback,
-         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
-
-  __ Bind(&miss);
-  KeyedStoreIC::GenerateMiss(masm);
-
-  __ Bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
 // a "Push lr" instruction, followed by a call.
 static const unsigned int kProfileEntryHookCallSize =
@@ -3309,91 +2781,6 @@
   __ Blr(lr);
 }
 
-
-// Probe the name dictionary in the 'elements' register.
-// Jump to the 'done' label if a property with the given name is found.
-// Jump to the 'miss' label otherwise.
-//
-// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
-// 'elements' and 'name' registers are preserved on miss.
-void NameDictionaryLookupStub::GeneratePositiveLookup(
-    MacroAssembler* masm,
-    Label* miss,
-    Label* done,
-    Register elements,
-    Register name,
-    Register scratch1,
-    Register scratch2) {
-  DCHECK(!AreAliased(elements, name, scratch1, scratch2));
-
-  // Assert that name contains a string.
-  __ AssertName(name);
-
-  // Compute the capacity mask.
-  __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
-  __ Sub(scratch1, scratch1, 1);
-
-  // Generate an unrolled loop that performs a few probes before giving up.
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
-    if (i > 0) {
-      // Add the probe offset (i + i * i) left shifted to avoid right shifting
-      // the hash in a separate instruction. The value hash + i + i * i is right
-      // shifted in the following and instruction.
-      DCHECK(NameDictionary::GetProbeOffset(i) <
-          1 << (32 - Name::kHashFieldOffset));
-      __ Add(scratch2, scratch2, Operand(
-          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
-    }
-    __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
-
-    // Scale the index by multiplying by the element size.
-    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
-    // Check if the key is identical to the name.
-    UseScratchRegisterScope temps(masm);
-    Register scratch3 = temps.AcquireX();
-    __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
-    __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
-    __ Cmp(name, scratch3);
-    __ B(eq, done);
-  }
-
-  // The inlined probes didn't find the entry.
-  // Call the complete stub to scan the whole dictionary.
-
-  CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
-  spill_list.Combine(lr);
-  spill_list.Remove(scratch1);
-  spill_list.Remove(scratch2);
-
-  __ PushCPURegList(spill_list);
-
-  if (name.is(x0)) {
-    DCHECK(!elements.is(x1));
-    __ Mov(x1, name);
-    __ Mov(x0, elements);
-  } else {
-    __ Mov(x0, elements);
-    __ Mov(x1, name);
-  }
-
-  Label not_found;
-  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
-  __ CallStub(&stub);
-  __ Cbz(x0, &not_found);
-  __ Mov(scratch2, x2);  // Move entry index into scratch2.
-  __ PopCPURegList(spill_list);
-  __ B(done);
-
-  __ Bind(&not_found);
-  __ PopCPURegList(spill_list);
-  __ B(miss);
-}
-
-
 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
                                                       Label* miss,
                                                       Label* done,
@@ -3875,702 +3262,6 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x1 : target
-  //  -- x3 : new target
-  //  -- cp : context
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(x1);
-  __ AssertReceiver(x3);
-
-  // Verify that the new target is a JSFunction.
-  Label new_object;
-  __ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
-
-  // Load the initial map and verify that it's in fact a map.
-  __ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(x2, &new_object);
-  __ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
-
-  // Fall back to runtime if the target differs from the new target's
-  // initial map constructor.
-  __ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
-  __ CompareAndBranch(x0, x1, ne, &new_object);
-
-  // Allocate the JSObject on the heap.
-  Label allocate, done_allocate;
-  __ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
-  __ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
-  __ Bind(&done_allocate);
-
-  // Initialize the JSObject fields.
-  STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
-  __ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
-  __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
-  STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
-  STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
-  __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
-  __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
-  // ----------- S t a t e -------------
-  //  -- x0 : result (tagged)
-  //  -- x1 : result fields (untagged)
-  //  -- x5 : result end (untagged)
-  //  -- x2 : initial map
-  //  -- cp : context
-  //  -- lr : return address
-  // -----------------------------------
-
-  // Perform in-object slack tracking if requested.
-  Label slack_tracking;
-  STATIC_ASSERT(Map::kNoSlackTracking == 0);
-  __ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
-  __ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
-  __ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
-                           &slack_tracking);
-  {
-    // Initialize all in-object fields with undefined.
-    __ InitializeFieldsWithFiller(x1, x5, x6);
-    __ Ret();
-  }
-  __ Bind(&slack_tracking);
-  {
-    // Decrease generous allocation count.
-    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-    __ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
-    __ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
-
-    // Initialize the in-object fields with undefined.
-    __ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
-    __ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
-    __ InitializeFieldsWithFiller(x1, x4, x6);
-
-    // Initialize the remaining (reserved) fields with one pointer filler map.
-    __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
-    __ InitializeFieldsWithFiller(x1, x5, x6);
-
-    // Check if we can finalize the instance size.
-    Label finalize;
-    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
-    __ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
-    __ Ret();
-
-    // Finalize the instance size.
-    __ Bind(&finalize);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(x0, x2);
-      __ CallRuntime(Runtime::kFinalizeInstanceSize);
-      __ Pop(x0);
-    }
-    __ Ret();
-  }
-
-  // Fall back to %AllocateInNewSpace.
-  __ Bind(&allocate);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    __ Mov(x4,
-           Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
-    __ Push(x2, x4);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(x2);
-  }
-  __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
-  __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ Sub(x5, x5, kHeapObjectTag);  // Subtract the tag from end.
-  __ B(&done_allocate);
-
-  // Fall back to %NewObject.
-  __ Bind(&new_object);
-  __ Push(x1, x3);
-  __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(x1);
-
-  // Make x2 point to the JavaScript frame.
-  __ Mov(x2, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
-    __ Cmp(x3, x1);
-    __ B(eq, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ Bind(&ok);
-  }
-
-  // Check if we have rest parameters (only possible if we have an
-  // arguments adaptor frame below the function frame).
-  Label no_rest_parameters;
-  __ Ldr(x2, MemOperand(x2, CommonFrameConstants::kCallerFPOffset));
-  __ Ldr(x3, MemOperand(x2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Cmp(x3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ B(ne, &no_rest_parameters);
-
-  // Check if the arguments adaptor frame contains more arguments than
-  // specified by the function's internal formal parameter count.
-  Label rest_parameters;
-  __ Ldrsw(x0, UntagSmiMemOperand(
-                   x2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
-  __ Ldrsw(
-      x3, FieldMemOperand(x3, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Subs(x0, x0, x3);
-  __ B(gt, &rest_parameters);
-
-  // Return an empty rest parameter array.
-  __ Bind(&no_rest_parameters);
-  {
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- lr : return address
-    // -----------------------------------
-
-    // Allocate an empty rest parameter array.
-    Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, x0, x1, x2, &allocate, NO_ALLOCATION_FLAGS);
-    __ Bind(&done_allocate);
-
-    // Setup the rest parameter array in x0.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
-    __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
-    __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
-    __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
-    __ Str(x1, FieldMemOperand(x0, JSArray::kElementsOffset));
-    __ Mov(x1, Smi::kZero);
-    __ Str(x1, FieldMemOperand(x0, JSArray::kLengthOffset));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace.
-    __ Bind(&allocate);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(Smi::FromInt(JSArray::kSize));
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-    }
-    __ B(&done_allocate);
-  }
-
-  __ Bind(&rest_parameters);
-  {
-    // Compute the pointer to the first rest parameter (skippping the receiver).
-    __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
-    __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
-
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- x0 : number of rest parameters
-    //  -- x1 : function
-    //  -- x2 : pointer to first rest parameters
-    //  -- lr : return address
-    // -----------------------------------
-
-    // Allocate space for the rest parameter array plus the backing store.
-    Label allocate, done_allocate;
-    __ Mov(x6, JSArray::kSize + FixedArray::kHeaderSize);
-    __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
-    __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
-    __ Bind(&done_allocate);
-
-    // Compute arguments.length in x6.
-    __ SmiTag(x6, x0);
-
-    // Setup the elements array in x3.
-    __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
-    __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
-    __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
-    __ Add(x4, x3, FixedArray::kHeaderSize);
-    {
-      Label loop, done_loop;
-      __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
-      __ Bind(&loop);
-      __ Cmp(x4, x0);
-      __ B(eq, &done_loop);
-      __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
-      __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
-      __ Sub(x2, x2, Operand(1 * kPointerSize));
-      __ Add(x4, x4, Operand(1 * kPointerSize));
-      __ B(&loop);
-      __ Bind(&done_loop);
-    }
-
-    // Setup the rest parameter array in x0.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, x1);
-    __ Str(x1, FieldMemOperand(x0, JSArray::kMapOffset));
-    __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
-    __ Str(x1, FieldMemOperand(x0, JSArray::kPropertiesOffset));
-    __ Str(x3, FieldMemOperand(x0, JSArray::kElementsOffset));
-    __ Str(x6, FieldMemOperand(x0, JSArray::kLengthOffset));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace (if not too big).
-    Label too_big_for_new_space;
-    __ Bind(&allocate);
-    __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
-    __ B(gt, &too_big_for_new_space);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(x0);
-      __ SmiTag(x6);
-      __ Push(x0, x2, x6);
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-      __ Mov(x3, x0);
-      __ Pop(x2, x0);
-      __ SmiUntag(x0);
-    }
-    __ B(&done_allocate);
-
-    // Fall back to %NewRestParameter.
-    __ Bind(&too_big_for_new_space);
-    __ Push(x1);
-    __ TailCallRuntime(Runtime::kNewRestParameter);
-  }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(x1);
-
-  // Make x6 point to the JavaScript frame.
-  __ Mov(x6, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ Ldr(x6, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ Ldr(x3, MemOperand(x6, StandardFrameConstants::kFunctionOffset));
-    __ Cmp(x3, x1);
-    __ B(eq, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ Bind(&ok);
-  }
-
-  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
-  __ Ldrsw(
-      x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Add(x3, x6, Operand(x2, LSL, kPointerSizeLog2));
-  __ Add(x3, x3, Operand(StandardFrameConstants::kCallerSPOffset));
-  __ SmiTag(x2);
-
-  // x1 : function
-  // x2 : number of parameters (tagged)
-  // x3 : parameters pointer
-  // x6 : JavaScript frame pointer
-  //
-  // Returns pointer to result object in x0.
-
-  // Make an untagged copy of the parameter count.
-  // Note: arg_count_smi is an alias of param_count_smi.
-  Register function = x1;
-  Register arg_count_smi = x2;
-  Register param_count_smi = x2;
-  Register recv_arg = x3;
-  Register param_count = x7;
-  __ SmiUntag(param_count, param_count_smi);
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Register caller_fp = x11;
-  Register caller_ctx = x12;
-  Label runtime;
-  Label adaptor_frame, try_allocate;
-  __ Ldr(caller_fp, MemOperand(x6, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(
-      caller_ctx,
-      MemOperand(caller_fp, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ B(eq, &adaptor_frame);
-
-  // No adaptor, parameter count = argument count.
-
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped params, min(params, args) (uninit)
-  //   x7   param_count   number of function parameters
-  //   x11  caller_fp     caller's frame pointer
-  //   x14  arg_count     number of function arguments (uninit)
-
-  Register arg_count = x14;
-  Register mapped_params = x4;
-  __ Mov(arg_count, param_count);
-  __ Mov(mapped_params, param_count);
-  __ B(&try_allocate);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ Bind(&adaptor_frame);
-  __ Ldr(arg_count_smi,
-         MemOperand(caller_fp,
-                    ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(arg_count, arg_count_smi);
-  __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2));
-  __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
-
-  // Compute the mapped parameter count = min(param_count, arg_count)
-  __ Cmp(param_count, arg_count);
-  __ Csel(mapped_params, param_count, arg_count, lt);
-
-  __ Bind(&try_allocate);
-
-  //   x0   alloc_obj     pointer to allocated objects: param map, backing
-  //                      store, arguments (uninit)
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped parameters, min(params, args)
-  //   x7   param_count   number of function parameters
-  //   x10  size          size of objects to allocate (uninit)
-  //   x14  arg_count     number of function arguments
-
-  // Compute the size of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has two extra words containing context and backing
-  // store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-
-  // Calculate the parameter map size, assuming it exists.
-  Register size = x10;
-  __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2));
-  __ Add(size, size, kParameterMapHeaderSize);
-
-  // If there are no mapped parameters, set the running size total to zero.
-  // Otherwise, use the parameter map size calculated earlier.
-  __ Cmp(mapped_params, 0);
-  __ CzeroX(size, eq);
-
-  // 2. Add the size of the backing store and arguments object.
-  __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2));
-  __ Add(size, size, FixedArray::kHeaderSize + JSSloppyArgumentsObject::kSize);
-
-  // Do the allocation of all three objects in one go. Assign this to x0, as it
-  // will be returned to the caller.
-  Register alloc_obj = x0;
-  __ Allocate(size, alloc_obj, x11, x12, &runtime, NO_ALLOCATION_FLAGS);
-
-  // Get the arguments boilerplate from the current (global) context.
-
-  //   x0   alloc_obj       pointer to allocated objects (param map, backing
-  //                        store, arguments)
-  //   x1   function        function pointer
-  //   x2   arg_count_smi   number of function arguments (smi)
-  //   x3   recv_arg        pointer to receiver arguments
-  //   x4   mapped_params   number of mapped parameters, min(params, args)
-  //   x7   param_count     number of function parameters
-  //   x11  sloppy_args_map offset to args (or aliased args) map (uninit)
-  //   x14  arg_count       number of function arguments
-
-  Register global_ctx = x10;
-  Register sloppy_args_map = x11;
-  Register aliased_args_map = x10;
-  __ Ldr(global_ctx, NativeContextMemOperand());
-
-  __ Ldr(sloppy_args_map,
-         ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
-  __ Ldr(
-      aliased_args_map,
-      ContextMemOperand(global_ctx, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX));
-  __ Cmp(mapped_params, 0);
-  __ CmovX(sloppy_args_map, aliased_args_map, ne);
-
-  // Copy the JS object part.
-  __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
-  __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
-  __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
-  __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
-  // Set up the callee in-object property.
-  __ AssertNotSmi(function);
-  __ Str(function,
-         FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kCalleeOffset));
-
-  // Use the length and set that as an in-object property.
-  __ Str(arg_count_smi,
-         FieldMemOperand(alloc_obj, JSSloppyArgumentsObject::kLengthOffset));
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, "elements" will point there, otherwise
-  // it will point to the backing store.
-
-  //   x0   alloc_obj     pointer to allocated objects (param map, backing
-  //                      store, arguments)
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped parameters, min(params, args)
-  //   x5   elements      pointer to parameter map or backing store (uninit)
-  //   x6   backing_store pointer to backing store (uninit)
-  //   x7   param_count   number of function parameters
-  //   x14  arg_count     number of function arguments
-
-  Register elements = x5;
-  __ Add(elements, alloc_obj, JSSloppyArgumentsObject::kSize);
-  __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
-
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ Cmp(mapped_params, 0);
-  // Set up backing store address, because it is needed later for filling in
-  // the unmapped arguments.
-  Register backing_store = x6;
-  __ CmovX(backing_store, elements, eq);
-  __ B(eq, &skip_parameter_map);
-
-  __ LoadRoot(x10, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset));
-  __ Add(x10, mapped_params, 2);
-  __ SmiTag(x10);
-  __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Str(cp, FieldMemOperand(elements,
-                             FixedArray::kHeaderSize + 0 * kPointerSize));
-  __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2));
-  __ Add(x10, x10, kParameterMapHeaderSize);
-  __ Str(x10, FieldMemOperand(elements,
-                              FixedArray::kHeaderSize + 1 * kPointerSize));
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. Then index the context,
-  // where parameters are stored in reverse order, at:
-  //
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1
-  //
-  // The mapped parameter thus needs to get indices:
-  //
-  //   MIN_CONTEXT_SLOTS + parameter_count - 1 ..
-  //     MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count
-  //
-  // We loop from right to left.
-
-  //   x0   alloc_obj     pointer to allocated objects (param map, backing
-  //                      store, arguments)
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped parameters, min(params, args)
-  //   x5   elements      pointer to parameter map or backing store (uninit)
-  //   x6   backing_store pointer to backing store (uninit)
-  //   x7   param_count   number of function parameters
-  //   x11  loop_count    parameter loop counter (uninit)
-  //   x12  index         parameter index (smi, uninit)
-  //   x13  the_hole      hole value (uninit)
-  //   x14  arg_count     number of function arguments
-
-  Register loop_count = x11;
-  Register index = x12;
-  Register the_hole = x13;
-  Label parameters_loop, parameters_test;
-  __ Mov(loop_count, mapped_params);
-  __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS));
-  __ Sub(index, index, mapped_params);
-  __ SmiTag(index);
-  __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
-  __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2));
-  __ Add(backing_store, backing_store, kParameterMapHeaderSize);
-
-  __ B(&parameters_test);
-
-  __ Bind(&parameters_loop);
-  __ Sub(loop_count, loop_count, 1);
-  __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2));
-  __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag);
-  __ Str(index, MemOperand(elements, x10));
-  __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize);
-  __ Str(the_hole, MemOperand(backing_store, x10));
-  __ Add(index, index, Smi::FromInt(1));
-  __ Bind(&parameters_test);
-  __ Cbnz(loop_count, &parameters_loop);
-
-  __ Bind(&skip_parameter_map);
-  // Copy arguments header and remaining slots (if there are any.)
-  __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex);
-  __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset));
-  __ Str(arg_count_smi, FieldMemOperand(backing_store,
-                                        FixedArray::kLengthOffset));
-
-  //   x0   alloc_obj     pointer to allocated objects (param map, backing
-  //                      store, arguments)
-  //   x1   function      function pointer
-  //   x2   arg_count_smi number of function arguments (smi)
-  //   x3   recv_arg      pointer to receiver arguments
-  //   x4   mapped_params number of mapped parameters, min(params, args)
-  //   x6   backing_store pointer to backing store (uninit)
-  //   x14  arg_count     number of function arguments
-
-  Label arguments_loop, arguments_test;
-  __ Mov(x10, mapped_params);
-  __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2));
-  __ B(&arguments_test);
-
-  __ Bind(&arguments_loop);
-  __ Sub(recv_arg, recv_arg, kPointerSize);
-  __ Ldr(x11, MemOperand(recv_arg));
-  __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2));
-  __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize));
-  __ Add(x10, x10, 1);
-
-  __ Bind(&arguments_test);
-  __ Cmp(x10, arg_count);
-  __ B(lt, &arguments_loop);
-
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  __ Bind(&runtime);
-  __ Push(function, recv_arg, arg_count_smi);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(x1);
-
-  // Make x2 point to the JavaScript frame.
-  __ Mov(x2, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kFunctionOffset));
-    __ Cmp(x3, x1);
-    __ B(eq, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ Bind(&ok);
-  }
-
-  // Check if we have an arguments adaptor frame below the function frame.
-  Label arguments_adaptor, arguments_done;
-  __ Ldr(x3, MemOperand(x2, StandardFrameConstants::kCallerFPOffset));
-  __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Cmp(x4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ B(eq, &arguments_adaptor);
-  {
-    __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
-    __ Ldrsw(x0, FieldMemOperand(
-                     x4, SharedFunctionInfo::kFormalParameterCountOffset));
-    __ Add(x2, x2, Operand(x0, LSL, kPointerSizeLog2));
-    __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
-  }
-  __ B(&arguments_done);
-  __ Bind(&arguments_adaptor);
-  {
-    __ Ldrsw(x0, UntagSmiMemOperand(
-                     x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ Add(x2, x3, Operand(x0, LSL, kPointerSizeLog2));
-    __ Add(x2, x2, StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize);
-  }
-  __ Bind(&arguments_done);
-
-  // ----------- S t a t e -------------
-  //  -- cp : context
-  //  -- x0 : number of rest parameters
-  //  -- x1 : function
-  //  -- x2 : pointer to first rest parameters
-  //  -- lr : return address
-  // -----------------------------------
-
-  // Allocate space for the strict arguments object plus the backing store.
-  Label allocate, done_allocate;
-  __ Mov(x6, JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize);
-  __ Add(x6, x6, Operand(x0, LSL, kPointerSizeLog2));
-  __ Allocate(x6, x3, x4, x5, &allocate, NO_ALLOCATION_FLAGS);
-  __ Bind(&done_allocate);
-
-  // Compute arguments.length in x6.
-  __ SmiTag(x6, x0);
-
-  // Setup the elements array in x3.
-  __ LoadRoot(x1, Heap::kFixedArrayMapRootIndex);
-  __ Str(x1, FieldMemOperand(x3, FixedArray::kMapOffset));
-  __ Str(x6, FieldMemOperand(x3, FixedArray::kLengthOffset));
-  __ Add(x4, x3, FixedArray::kHeaderSize);
-  {
-    Label loop, done_loop;
-    __ Add(x0, x4, Operand(x0, LSL, kPointerSizeLog2));
-    __ Bind(&loop);
-    __ Cmp(x4, x0);
-    __ B(eq, &done_loop);
-    __ Ldr(x5, MemOperand(x2, 0 * kPointerSize));
-    __ Str(x5, FieldMemOperand(x4, 0 * kPointerSize));
-    __ Sub(x2, x2, Operand(1 * kPointerSize));
-    __ Add(x4, x4, Operand(1 * kPointerSize));
-    __ B(&loop);
-    __ Bind(&done_loop);
-  }
-
-  // Setup the strict arguments object in x0.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, x1);
-  __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kMapOffset));
-  __ LoadRoot(x1, Heap::kEmptyFixedArrayRootIndex);
-  __ Str(x1, FieldMemOperand(x0, JSStrictArgumentsObject::kPropertiesOffset));
-  __ Str(x3, FieldMemOperand(x0, JSStrictArgumentsObject::kElementsOffset));
-  __ Str(x6, FieldMemOperand(x0, JSStrictArgumentsObject::kLengthOffset));
-  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
-  __ Ret();
-
-  // Fall back to %AllocateInNewSpace (if not too big).
-  Label too_big_for_new_space;
-  __ Bind(&allocate);
-  __ Cmp(x6, Operand(kMaxRegularHeapObjectSize));
-  __ B(gt, &too_big_for_new_space);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(x0);
-    __ SmiTag(x6);
-    __ Push(x0, x2, x6);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Mov(x3, x0);
-    __ Pop(x2, x0);
-    __ SmiUntag(x0);
-  }
-  __ B(&done_allocate);
-
-  // Fall back to %NewStrictArguments.
-  __ Bind(&too_big_for_new_space);
-  __ Push(x1);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
 // The number of register that CallApiFunctionAndReturn will need to save on
 // the stack. The space for these registers need to be allocated in the
 // ExitFrame before calling CallApiFunctionAndReturn.
diff --git a/src/arm64/code-stubs-arm64.h b/src/arm64/code-stubs-arm64.h
index 4b56b54..13e1b9d 100644
--- a/src/arm64/code-stubs-arm64.h
+++ b/src/arm64/code-stubs-arm64.h
@@ -355,14 +355,6 @@
                                      Handle<Name> name,
                                      Register scratch0);
 
-  static void GeneratePositiveLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register elements,
-                                     Register name,
-                                     Register scratch1,
-                                     Register scratch2);
-
   bool SometimesSetsUpAFrame() override { return false; }
 
  private:
diff --git a/src/arm64/codegen-arm64.cc b/src/arm64/codegen-arm64.cc
index edd2899..4fb9a2d 100644
--- a/src/arm64/codegen-arm64.cc
+++ b/src/arm64/codegen-arm64.cc
@@ -40,272 +40,6 @@
 // -------------------------------------------------------------------------
 // Code generators
 
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* allocation_memento_found) {
-  ASM_LOCATION(
-      "ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
-  DCHECK(!AreAliased(receiver, key, value, target_map));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    DCHECK(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
-                                         allocation_memento_found);
-  }
-
-  // Set transitioned map.
-  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      x10,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
-  Label gc_required, only_change_map;
-  Register elements = x4;
-  Register length = x5;
-  Register array_size = x6;
-  Register array = x7;
-
-  Register scratch = x6;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     elements, length, array_size, array));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
-
-  __ Push(lr);
-  __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
-                                           FixedArray::kLengthOffset));
-
-  // Allocate new FixedDoubleArray.
-  __ Lsl(array_size, length, kDoubleSizeLog2);
-  __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
-  __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
-  // Register array is non-tagged heap object.
-
-  // Set the destination FixedDoubleArray's length and map.
-  Register map_root = array_size;
-  __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
-  __ SmiTag(x11, length);
-  __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
-
-  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
-                      kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ Move(x10, array);
-  __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch,
-                      kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Prepare for conversion loop.
-  Register src_elements = x10;
-  Register dst_elements = x11;
-  Register dst_end = x12;
-  __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
-  __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
-
-  FPRegister nan_d = d1;
-  __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
-
-  Label entry, done;
-  __ B(&entry);
-
-  __ Bind(&only_change_map);
-  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ B(&done);
-
-  // Call into runtime if GC is required.
-  __ Bind(&gc_required);
-  __ Pop(lr);
-  __ B(fail);
-
-  // Iterate over the array, copying and coverting smis to doubles. If an
-  // element is non-smi, write a hole to the destination.
-  {
-    Label loop;
-    __ Bind(&loop);
-    __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
-    __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
-    __ Tst(x13, kSmiTagMask);
-    __ Fcsel(d0, d0, nan_d, eq);
-    __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
-
-    __ Bind(&entry);
-    __ Cmp(dst_elements, dst_end);
-    __ B(lt, &loop);
-  }
-
-  __ Pop(lr);
-  __ Bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
-  Register elements = x4;
-  Register array_size = x6;
-  Register array = x7;
-  Register length = x5;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     elements, array_size, array, length));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  Label only_change_map;
-
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
-
-  __ Push(lr);
-  // TODO(all): These registers may not need to be pushed. Examine
-  // RecordWriteStub and check whether it's needed.
-  __ Push(target_map, receiver, key, value);
-  __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
-                                           FixedArray::kLengthOffset));
-  // Allocate new FixedArray.
-  Label gc_required;
-  __ Mov(array_size, FixedDoubleArray::kHeaderSize);
-  __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
-  __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
-
-  // Set destination FixedDoubleArray's length and map.
-  Register map_root = array_size;
-  __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
-  __ SmiTag(x11, length);
-  __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
-
-  // Prepare for conversion loop.
-  Register src_elements = x10;
-  Register dst_elements = x11;
-  Register dst_end = x12;
-  Register the_hole = x14;
-  __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
-  __ Add(src_elements, elements,
-         FixedDoubleArray::kHeaderSize - kHeapObjectTag);
-  __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
-
-  // Allocating heap numbers in the loop below can fail and cause a jump to
-  // gc_required. We can't leave a partly initialized FixedArray behind,
-  // so pessimistically fill it with holes now.
-  Label initialization_loop, initialization_loop_entry;
-  __ B(&initialization_loop_entry);
-  __ bind(&initialization_loop);
-  __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
-  __ bind(&initialization_loop_entry);
-  __ Cmp(dst_elements, dst_end);
-  __ B(lt, &initialization_loop);
-
-  __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
-
-  Register heap_num_map = x15;
-  __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
-
-  Label entry;
-  __ B(&entry);
-
-  // Call into runtime if GC is required.
-  __ Bind(&gc_required);
-  __ Pop(value, key, receiver, target_map);
-  __ Pop(lr);
-  __ B(fail);
-
-  {
-    Label loop, convert_hole;
-    __ Bind(&loop);
-    __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
-    __ Cmp(x13, kHoleNanInt64);
-    __ B(eq, &convert_hole);
-
-    // Non-hole double, copy value into a heap number.
-    Register heap_num = length;
-    Register scratch = array_size;
-    Register scratch2 = elements;
-    __ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
-                          x13, heap_num_map);
-    __ Mov(x13, dst_elements);
-    __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
-    __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
-                   EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-    __ B(&entry);
-
-    // Replace the-hole NaN with the-hole pointer.
-    __ Bind(&convert_hole);
-    __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
-
-    __ Bind(&entry);
-    __ Cmp(dst_elements, dst_end);
-    __ B(lt, &loop);
-  }
-
-  __ Pop(value, key, receiver, target_map);
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
-                      kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ Pop(lr);
-
-  __ Bind(&only_change_map);
-  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
   USE(isolate);
   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
@@ -338,30 +72,22 @@
   return MacroAssembler::IsYoungSequence(isolate, sequence);
 }
 
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
 
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                               MarkingParity* parity) {
-  if (IsYoungSequence(isolate, sequence)) {
-    *age = kNoAgeCodeAge;
-    *parity = NO_MARKING_PARITY;
-  } else {
-    byte* target = sequence + kCodeAgeStubEntryOffset;
-    Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
-    GetCodeAgeAndParity(stub, age, parity);
-  }
+  byte* target = sequence + kCodeAgeStubEntryOffset;
+  Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
+  return GetAgeOfCodeAgeStub(stub);
 }
 
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
-                                Code::Age age,
-                                MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+                                Code::Age age) {
   PatchingAssembler patcher(isolate, sequence,
                             kNoCodeAgeSequenceLength / kInstructionSize);
   if (age == kNoAgeCodeAge) {
     MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
   } else {
-    Code * stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age);
     MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
   }
 }
@@ -373,6 +99,9 @@
                                        Register result,
                                        Label* call_runtime) {
   DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
+  Label indirect_string_loaded;
+  __ Bind(&indirect_string_loaded);
+
   // Fetch the instance type of the receiver into result register.
   __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -382,17 +111,25 @@
   __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
 
   // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
+  Label cons_string, thin_string;
+  __ And(result, result, kStringRepresentationMask);
+  __ Cmp(result, kConsStringTag);
+  __ B(eq, &cons_string);
+  __ Cmp(result, kThinStringTag);
+  __ B(eq, &thin_string);
 
   // Handle slices.
-  Label indirect_string_loaded;
   __ Ldr(result.W(),
          UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
   __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
   __ Add(index, index, result.W());
   __ B(&indirect_string_loaded);
 
+  // Handle thin strings.
+  __ Bind(&thin_string);
+  __ Ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
+  __ B(&indirect_string_loaded);
+
   // Handle cons strings.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
@@ -403,10 +140,7 @@
   __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
   // Get the first of the two strings and load its instance type.
   __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ Bind(&indirect_string_loaded);
-  __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+  __ B(&indirect_string_loaded);
 
   // Distinguish sequential and external strings. Only these two string
   // representations can reach here (slices and flat cons strings have been
diff --git a/src/arm64/deoptimizer-arm64.cc b/src/arm64/deoptimizer-arm64.cc
index c1d04ac..0bedceb 100644
--- a/src/arm64/deoptimizer-arm64.cc
+++ b/src/arm64/deoptimizer-arm64.cc
@@ -78,7 +78,7 @@
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
   for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
-    double double_value = input_->GetDoubleRegister(i);
+    Float64 double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
 }
diff --git a/src/arm64/interface-descriptors-arm64.cc b/src/arm64/interface-descriptors-arm64.cc
index 13ecc2b..988f7e9 100644
--- a/src/arm64/interface-descriptors-arm64.cc
+++ b/src/arm64/interface-descriptors-arm64.cc
@@ -64,41 +64,13 @@
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  // x2: function info
-  Register registers[] = {x2};
+  // x1: function info
+  // x2: feedback vector
+  // x3: slot
+  Register registers[] = {x1, x2, x3};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void FastNewObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {x1, x3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // x1: function
-  Register registers[] = {x1};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // x1: function
-  Register registers[] = {x1};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // x1: function
-  Register registers[] = {x1};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 // static
 const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
 
@@ -167,15 +139,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {x1, x3};
+  Register registers[] = {x1, x0, x3};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {x1, x0, x3, x2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -204,6 +174,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // x1: target
+  // x2: start index (to supported rest parameters)
+  Register registers[] = {x1, x2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void ConstructStubDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -240,13 +217,12 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
-      CallInterfaceDescriptorData* data) {                          \
-    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
-  }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+  Register registers[] = {x1, x3, x0, x2};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
 
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -465,6 +441,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      x1,  // loaded new FP
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index bc7a281..549db5d 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1780,23 +1780,6 @@
   JumpToExternalReference(ExternalReference(fid, isolate()));
 }
 
-
-void MacroAssembler::InitializeNewString(Register string,
-                                         Register length,
-                                         Heap::RootListIndex map_index,
-                                         Register scratch1,
-                                         Register scratch2) {
-  DCHECK(!AreAliased(string, length, scratch1, scratch2));
-  LoadRoot(scratch2, map_index);
-  SmiTag(scratch1, length);
-  Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
-
-  Mov(scratch2, String::kEmptyHashField);
-  Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
-  Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
 int MacroAssembler::ActivationFrameAlignment() {
 #if V8_HOST_ARCH_ARM64
   // Running on the real platform. Use the alignment as mandated by the local
@@ -2203,65 +2186,6 @@
   Bind(&done);
 }
 
-
-void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
-    Register first, Register second, Register scratch1, Register scratch2,
-    Label* failure, SmiCheckType smi_check) {
-  if (smi_check == DO_SMI_CHECK) {
-    JumpIfEitherSmi(first, second, failure);
-  } else if (emit_debug_code()) {
-    DCHECK(smi_check == DONT_DO_SMI_CHECK);
-    Label not_smi;
-    JumpIfEitherSmi(first, second, NULL, &not_smi);
-
-    // At least one input is a smi, but the flags indicated a smi check wasn't
-    // needed.
-    Abort(kUnexpectedSmi);
-
-    Bind(&not_smi);
-  }
-
-  // Test that both first and second are sequential one-byte strings.
-  Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
-  Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
-  Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
-  JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
-                                                 scratch2, failure);
-}
-
-
-void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
-    Register first, Register second, Register scratch1, Register scratch2,
-    Label* failure) {
-  DCHECK(!AreAliased(scratch1, second));
-  DCHECK(!AreAliased(scratch1, scratch2));
-  const int kFlatOneByteStringMask =
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatOneByteStringTag =
-      kStringTag | kOneByteStringTag | kSeqStringTag;
-  And(scratch1, first, kFlatOneByteStringMask);
-  And(scratch2, second, kFlatOneByteStringMask);
-  Cmp(scratch1, kFlatOneByteStringTag);
-  Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
-  B(ne, failure);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
-                                                              Register scratch,
-                                                              Label* failure) {
-  const int kFlatOneByteStringMask =
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatOneByteStringTag =
-      kStringTag | kOneByteStringTag | kSeqStringTag;
-  And(scratch, type, kFlatOneByteStringMask);
-  Cmp(scratch, kFlatOneByteStringTag);
-  B(ne, failure);
-}
-
-
 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
     Register first, Register second, Register scratch1, Register scratch2,
     Label* failure) {
@@ -2425,17 +2349,15 @@
   Bind(&regular_invoke);
 }
 
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
-                                             const ParameterCount& expected,
-                                             const ParameterCount& actual) {
-  Label skip_flooding;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  Mov(x4, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual) {
+  Label skip_hook;
+  ExternalReference debug_hook_active =
+      ExternalReference::debug_hook_on_function_call_address(isolate());
+  Mov(x4, Operand(debug_hook_active));
   Ldrsb(x4, MemOperand(x4));
-  CompareAndBranch(x4, Operand(StepIn), lt, &skip_flooding);
+  CompareAndBranch(x4, Operand(0), eq, &skip_hook);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2452,7 +2374,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    CallRuntime(Runtime::kDebugOnFunctionCall);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -2466,7 +2388,7 @@
       SmiUntag(expected.reg());
     }
   }
-  bind(&skip_flooding);
+  bind(&skip_hook);
 }
 
 
@@ -2480,7 +2402,9 @@
   DCHECK(function.is(x1));
   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
 
-  FloodFunctionIfStepping(function, new_target, expected, actual);
+  if (call_wrapper.NeedsDebugHookCheck()) {
+    CheckDebugHook(function, new_target, expected, actual);
+  }
 
   // Clear the new.target register if not given.
   if (!new_target.is_valid()) {
@@ -2677,7 +2601,7 @@
   UseScratchRegisterScope temps(this);
   frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
   Register temp = temps.AcquireX();
-  Mov(temp, Smi::FromInt(type));
+  Mov(temp, StackFrame::TypeToMarker(type));
   Push(lr, fp);
   Mov(fp, StackPointer());
   Claim(frame_slots);
@@ -2693,11 +2617,10 @@
   }
 }
 
-
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
   Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  Ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
-  Ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+  Ldr(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+  Ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
 }
 
 
@@ -2709,13 +2632,13 @@
 
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
-  DCHECK(jssp.Is(StackPointer()));
   UseScratchRegisterScope temps(this);
   Register type_reg = temps.AcquireX();
   Register code_reg = temps.AcquireX();
 
   if (type == StackFrame::INTERNAL) {
-    Mov(type_reg, Smi::FromInt(type));
+    DCHECK(jssp.Is(StackPointer()));
+    Mov(type_reg, StackFrame::TypeToMarker(type));
     Push(lr, fp);
     Push(type_reg);
     Mov(code_reg, Operand(CodeObject()));
@@ -2725,8 +2648,19 @@
     // jssp[3] : fp
     // jssp[1] : type
     // jssp[0] : [code object]
+  } else if (type == StackFrame::WASM_COMPILED) {
+    DCHECK(csp.Is(StackPointer()));
+    Mov(type_reg, StackFrame::TypeToMarker(type));
+    Push(lr, fp);
+    Mov(fp, csp);
+    Push(type_reg, xzr);
+    // csp[3] : lr
+    // csp[2] : fp
+    // csp[1] : type
+    // csp[0] : for alignment
   } else {
-    Mov(type_reg, Smi::FromInt(type));
+    DCHECK(jssp.Is(StackPointer()));
+    Mov(type_reg, StackFrame::TypeToMarker(type));
     Push(lr, fp);
     Push(type_reg);
     Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
@@ -2738,12 +2672,19 @@
 
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
-  DCHECK(jssp.Is(StackPointer()));
-  // Drop the execution stack down to the frame pointer and restore
-  // the caller frame pointer and return address.
-  Mov(jssp, fp);
-  AssertStackConsistency();
-  Pop(fp, lr);
+  if (type == StackFrame::WASM_COMPILED) {
+    DCHECK(csp.Is(StackPointer()));
+    Mov(csp, fp);
+    AssertStackConsistency();
+    Pop(fp, lr);
+  } else {
+    DCHECK(jssp.Is(StackPointer()));
+    // Drop the execution stack down to the frame pointer and restore
+    // the caller frame pointer and return address.
+    Mov(jssp, fp);
+    AssertStackConsistency();
+    Pop(fp, lr);
+  }
 }
 
 
@@ -2790,7 +2731,7 @@
   // Set up the new stack frame.
   Push(lr, fp);
   Mov(fp, StackPointer());
-  Mov(scratch, Smi::FromInt(frame_type));
+  Mov(scratch, StackFrame::TypeToMarker(frame_type));
   Push(scratch);
   Push(xzr);
   Mov(scratch, Operand(CodeObject()));
@@ -2937,16 +2878,17 @@
   }
 }
 
-
-void MacroAssembler::DebugBreak() {
-  Mov(x0, 0);
-  Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
-  CEntryStub ces(isolate(), 1);
-  DCHECK(AllowThisStubCall(&ces));
-  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+  // Check whether we need to drop frames to restart a function on the stack.
+  ExternalReference restart_fp =
+      ExternalReference::debug_restart_fp_address(isolate());
+  Mov(x1, Operand(restart_fp));
+  Ldr(x1, MemOperand(x1));
+  Tst(x1, x1);
+  Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+       ne);
 }
 
-
 void MacroAssembler::PushStackHandler() {
   DCHECK(jssp.Is(StackPointer()));
   // Adjust this code if the asserts don't hold.
@@ -3208,114 +3150,6 @@
   ObjectTag(result, result);
 }
 
-void MacroAssembler::AllocateTwoByteString(Register result,
-                                           Register length,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  Add(scratch1, length, length);  // Length in bytes, not chars.
-  Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
-  Bic(scratch1, scratch1, kObjectAlignmentMask);
-
-  // Allocate two-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result,
-                      length,
-                      Heap::kStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  STATIC_ASSERT(kCharSize == 1);
-  Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
-  Bic(scratch1, scratch1, kObjectAlignmentMask);
-
-  // Allocate one-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
-                                               Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result,
-                      length,
-                      Heap::kConsStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  DCHECK(!AreAliased(result, length, scratch1, scratch2));
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result,
-                      length,
-                      Heap::kSlicedStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  DCHECK(!AreAliased(result, length, scratch1, scratch2));
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
 // Allocates a heap number or jumps to the need_gc label if the young space
 // is full and a scavenge is needed.
 void MacroAssembler::AllocateHeapNumber(Register result,
@@ -3564,32 +3398,6 @@
   Bind(&done);
 }
 
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
-                                             Register scratch, Label* miss) {
-  DCHECK(!AreAliased(function, result, scratch));
-
-  // Get the prototype or initial map from the function.
-  Ldr(result,
-      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // If the prototype or initial map is the hole, don't return it and simply
-  // miss the cache instead. This will allow us to allocate a prototype object
-  // on-demand in the runtime system.
-  JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
-
-  // Get the prototype from the initial map.
-  Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  Bind(&done);
-}
-
-
 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
   UseScratchRegisterScope temps(this);
   Register temp = temps.AcquireX();
@@ -3664,59 +3472,6 @@
   }
 }
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Register scratch,
-                                             Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-  // If cond==ls, set cond=hi, otherwise compare.
-  Ccmp(scratch,
-       Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
-  B(hi, fail);
-}
-
-
-// Note: The ARM version of this clobbers elements_reg, but this version does
-// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
-                                                 Register key_reg,
-                                                 Register elements_reg,
-                                                 Register scratch1,
-                                                 FPRegister fpscratch1,
-                                                 Label* fail,
-                                                 int elements_offset) {
-  DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
-  Label store_num;
-
-  // Speculatively convert the smi to a double - all smis can be exactly
-  // represented as a double.
-  SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
-
-  // If value_reg is a smi, we're done.
-  JumpIfSmi(value_reg, &store_num);
-
-  // Ensure that the object is a heap number.
-  JumpIfNotHeapNumber(value_reg, fail);
-
-  Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
-  // Canonicalize NaNs.
-  CanonicalizeNaN(fpscratch1);
-
-  // Store the result.
-  Bind(&store_num);
-  Add(scratch1, elements_reg,
-      Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
-  Str(fpscratch1,
-      FieldMemOperand(scratch1,
-                      FixedDoubleArray::kHeaderSize - elements_offset));
-}
-
-
 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
   return has_frame_ || !stub->SometimesSetsUpAFrame();
 }
@@ -4276,39 +4031,6 @@
   HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
 }
 
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
-    Register object,
-    Register scratch0,
-    Register scratch1,
-    Label* found) {
-  DCHECK(!AreAliased(object, scratch0, scratch1));
-  Register current = scratch0;
-  Label loop_again, end;
-
-  // Scratch contains elements pointer.
-  Mov(current, object);
-  Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
-  Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
-
-  // Loop based on the map going up the prototype chain.
-  Bind(&loop_again);
-  Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
-  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
-  B(lo, found);
-  Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
-  DecodeField<Map::ElementsKindBits>(scratch1);
-  CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
-  Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
-
-  Bind(&end);
-}
-
-
 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
                                  Register shift_scratch, Register load_scratch,
                                  Register length_scratch,
@@ -4471,30 +4193,6 @@
   TmpList()->set_list(old_tmp_list);
 }
 
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch1,
-    Register scratch2,
-    Label* no_map_match) {
-  DCHECK(IsFastElementsKind(expected_kind));
-  DCHECK(IsFastElementsKind(transitioned_kind));
-
-  // Check that the function's map is the same as the expected cached map.
-  Ldr(scratch1, NativeContextMemOperand());
-  Ldr(scratch2,
-      ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
-  Cmp(map_in_out, scratch2);
-  B(ne, no_map_match);
-
-  // Use the transitioned cached map.
-  Ldr(map_in_out,
-      ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
   Ldr(dst, NativeContextMemOperand());
   Ldr(dst, ContextMemOperand(dst, index));
@@ -4912,9 +4610,8 @@
   }
 }
 
-
 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
-    : reg_(NoReg), smi_check_(NULL) {
+    : reg_(NoReg), smi_check_delta_(0), smi_check_(NULL) {
   InstructionSequence* inline_data = InstructionSequence::At(info);
   DCHECK(inline_data->IsInlineData());
   if (inline_data->IsInlineData()) {
@@ -4926,9 +4623,9 @@
       uint32_t payload32 = static_cast<uint32_t>(payload);
       int reg_code = RegisterBits::decode(payload32);
       reg_ = Register::XRegFromCode(reg_code);
-      int smi_check_delta = DeltaBits::decode(payload32);
-      DCHECK(smi_check_delta != 0);
-      smi_check_ = inline_data->preceding(smi_check_delta);
+      smi_check_delta_ = DeltaBits::decode(payload32);
+      DCHECK_NE(0, smi_check_delta_);
+      smi_check_ = inline_data->preceding(smi_check_delta_);
     }
   }
 }
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index a89c106..560a824 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -1101,24 +1101,6 @@
 
   // ---- String Utilities ----
 
-
-  // Jump to label if either object is not a sequential one-byte string.
-  // Optionally perform a smi check on the objects first.
-  void JumpIfEitherIsNotSequentialOneByteStrings(
-      Register first, Register second, Register scratch1, Register scratch2,
-      Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
-
-  // Check if instance type is sequential one-byte string and jump to label if
-  // it is not.
-  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
-                                                Label* failure);
-
-  // Checks if both instance types are sequential one-byte strings and jumps to
-  // label if either is not.
-  void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
-      Register first_object_instance_type, Register second_object_instance_type,
-      Register scratch1, Register scratch2, Label* failure);
-
   // Checks if both instance types are sequential one-byte strings and jumps to
   // label if either is not.
   void JumpIfBothInstanceTypesAreNotSequentialOneByte(
@@ -1227,9 +1209,11 @@
                       InvokeFlag flag,
                       bool* definitely_mismatches,
                       const CallWrapper& call_wrapper);
-  void FloodFunctionIfStepping(Register fun, Register new_target,
-                               const ParameterCount& expected,
-                               const ParameterCount& actual);
+
+  // On function call, call into the debugger if necessary.
+  void CheckDebugHook(Register fun, Register new_target,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual);
   void InvokeFunctionCode(Register function, Register new_target,
                           const ParameterCount& expected,
                           const ParameterCount& actual, InvokeFlag flag,
@@ -1316,12 +1300,9 @@
     MacroAssembler* masm_;
   };
 
-  // ---------------------------------------------------------------------------
-  // Debugger Support
+  // Frame restart support
+  void MaybeDropFrames();
 
-  void DebugBreak();
-
-  // ---------------------------------------------------------------------------
   // Exception handling
 
   // Push a new stack handler and link into stack handler chain.
@@ -1360,32 +1341,6 @@
   void FastAllocate(int object_size, Register result, Register scratch1,
                     Register scratch2, AllocationFlags flags);
 
-  void AllocateTwoByteString(Register result,
-                             Register length,
-                             Register scratch1,
-                             Register scratch2,
-                             Register scratch3,
-                             Label* gc_required);
-  void AllocateOneByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateTwoByteConsString(Register result,
-                                 Register length,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
-  void AllocateOneByteConsString(Register result, Register length,
-                                 Register scratch1, Register scratch2,
-                                 Label* gc_required);
-  void AllocateTwoByteSlicedString(Register result,
-                                   Register length,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Label* gc_required);
-  void AllocateOneByteSlicedString(Register result, Register length,
-                                   Register scratch1, Register scratch2,
-                                   Label* gc_required);
-
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed.
   // All registers are clobbered.
@@ -1413,9 +1368,6 @@
   void GetMapConstructor(Register result, Register map, Register temp,
                          Register temp2);
 
-  void TryGetFunctionPrototype(Register function, Register result,
-                               Register scratch, Label* miss);
-
   // Compare object type for heap object.  heap_object contains a non-Smi
   // whose object type should be compared with the given type.  This both
   // sets the flags and leaves the object type in the type_reg register.
@@ -1566,21 +1518,6 @@
                     Label* if_any_set,
                     Label* fall_through);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map, Register scratch, Label* fail);
-
-  // Check to see if number can be stored as a double in FastDoubleElements.
-  // If it can, store it at the index specified by key_reg in the array,
-  // otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register value_reg,
-                                   Register key_reg,
-                                   Register elements_reg,
-                                   Register scratch1,
-                                   FPRegister fpscratch1,
-                                   Label* fail,
-                                   int elements_offset = 0);
-
   // ---------------------------------------------------------------------------
   // Inline caching support.
 
@@ -1598,7 +1535,7 @@
   // Frames.
 
   // Load the type feedback vector from a JavaScript frame.
-  void EmitLoadTypeFeedbackVector(Register vector);
+  void EmitLoadFeedbackVector(Register vector);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -1624,17 +1561,6 @@
                                        Register scratch2,
                                        Label* no_memento_found);
 
-  void JumpIfJSArrayHasAllocationMemento(Register receiver,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Label* memento_found) {
-    Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
-                                    &no_memento_found);
-    B(eq, memento_found);
-    Bind(&no_memento_found);
-  }
-
   // The stack pointer has to switch between csp and jssp when setting up and
   // destroying the exit frame. Hence preserving/restoring the registers is
   // slightly more complicated than simple push/pop operations.
@@ -1902,18 +1828,6 @@
   // Print a message to stderr and abort execution.
   void Abort(BailoutReason reason);
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the native context if the map in register
-  // map_in_out is the cached Array map in the native context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch1,
-      Register scratch2,
-      Label* no_map_match);
-
   void LoadNativeContextSlot(int index, Register dst);
 
   // Load the initial map from the global function. The registers function and
@@ -2002,10 +1916,6 @@
   // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
   static bool IsYoungSequence(Isolate* isolate, byte* sequence);
 
-  // Jumps to found label if a prototype map has dictionary elements.
-  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
-                                        Register scratch1, Label* found);
-
   // Perform necessary maintenance operations before a push or after a pop.
   //
   // Note that size is specified in bytes.
@@ -2086,12 +1996,6 @@
   CPURegList tmp_list_;
   CPURegList fptmp_list_;
 
-  void InitializeNewString(Register string,
-                           Register length,
-                           Heap::RootListIndex map_index,
-                           Register scratch1,
-                           Register scratch2);
-
  public:
   // Far branches resolving.
   //
@@ -2241,6 +2145,8 @@
     return smi_check_;
   }
 
+  int SmiCheckDelta() const { return smi_check_delta_; }
+
   // Use MacroAssembler::InlineData to emit information about patchable inline
   // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
   // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
@@ -2258,6 +2164,7 @@
 
  private:
   Register reg_;
+  int smi_check_delta_;
   Instruction* smi_check_;
 
   // Fields in the data encoded by InlineData.
diff --git a/src/asmjs/OWNERS b/src/asmjs/OWNERS
index 78e688d..b994be3 100644
--- a/src/asmjs/OWNERS
+++ b/src/asmjs/OWNERS
@@ -4,6 +4,7 @@
 
 ahaas@chromium.org
 bradnelson@chromium.org
+clemensh@chromium.org
 jpp@chromium.org
 mtrofin@chromium.org
 rossberg@chromium.org
diff --git a/src/asmjs/asm-js.cc b/src/asmjs/asm-js.cc
index 13f936d..95d1e8a 100644
--- a/src/asmjs/asm-js.cc
+++ b/src/asmjs/asm-js.cc
@@ -9,10 +9,13 @@
 #include "src/asmjs/asm-typer.h"
 #include "src/asmjs/asm-wasm-builder.h"
 #include "src/assert-scope.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/compilation-info.h"
 #include "src/execution.h"
 #include "src/factory.h"
 #include "src/handles.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 #include "src/objects.h"
 #include "src/parsing/parse-info.h"
 
@@ -31,6 +34,15 @@
 namespace internal {
 
 namespace {
+enum WasmDataEntries {
+  kWasmDataCompiledModule,
+  kWasmDataForeignGlobals,
+  kWasmDataUsesArray,
+  kWasmDataScript,
+  kWasmDataScriptPosition,
+  kWasmDataEntryCount,
+};
+
 Handle<i::Object> StdlibMathMember(i::Isolate* isolate,
                                    Handle<JSReceiver> stdlib,
                                    Handle<Name> name) {
@@ -151,29 +163,41 @@
 
 }  // namespace
 
-MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) {
+MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
   ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion");
-  wasm::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
-                       info->literal());
-  if (!typer.Validate()) {
+  base::ElapsedTimer asm_wasm_timer;
+  asm_wasm_timer.Start();
+  wasm::AsmWasmBuilder builder(info);
+  Handle<FixedArray> foreign_globals;
+  auto asm_wasm_result = builder.Run(&foreign_globals);
+  if (!asm_wasm_result.success) {
     DCHECK(!info->isolate()->has_pending_exception());
-    PrintF("Validation of asm.js module failed: %s", typer.error_message());
+    if (!FLAG_suppress_asm_messages) {
+      MessageHandler::ReportMessage(info->isolate(),
+                                    builder.typer()->message_location(),
+                                    builder.typer()->error_message());
+    }
     return MaybeHandle<FixedArray>();
   }
-  v8::internal::wasm::AsmWasmBuilder builder(info->isolate(), info->zone(),
-                                             info->literal(), &typer);
-  i::Handle<i::FixedArray> foreign_globals;
-  auto asm_wasm_result = builder.Run(&foreign_globals);
+  double asm_wasm_time = asm_wasm_timer.Elapsed().InMillisecondsF();
+
   wasm::ZoneBuffer* module = asm_wasm_result.module_bytes;
   wasm::ZoneBuffer* asm_offsets = asm_wasm_result.asm_offset_table;
+  Vector<const byte> asm_offsets_vec(asm_offsets->begin(),
+                                     static_cast<int>(asm_offsets->size()));
 
-  i::MaybeHandle<i::JSObject> compiled = wasm::CreateModuleObjectFromBytes(
-      info->isolate(), module->begin(), module->end(), &thrower,
-      internal::wasm::kAsmJsOrigin, info->script(), asm_offsets->begin(),
-      asm_offsets->end());
+  base::ElapsedTimer compile_timer;
+  compile_timer.Start();
+  MaybeHandle<JSObject> compiled = SyncCompileTranslatedAsmJs(
+      info->isolate(), &thrower,
+      wasm::ModuleWireBytes(module->begin(), module->end()), info->script(),
+      asm_offsets_vec);
   DCHECK(!compiled.is_null());
+  double compile_time = compile_timer.Elapsed().InMillisecondsF();
+  DCHECK_GE(module->end(), module->begin());
+  uintptr_t wasm_size = module->end() - module->begin();
 
-  wasm::AsmTyper::StdlibSet uses = typer.StdlibUses();
+  wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
   Handle<FixedArray> uses_array =
       info->isolate()->factory()->NewFixedArray(static_cast<int>(uses.size()));
   int count = 0;
@@ -181,16 +205,45 @@
     uses_array->set(count++, Smi::FromInt(i));
   }
 
-  Handle<FixedArray> result = info->isolate()->factory()->NewFixedArray(3);
-  result->set(0, *compiled.ToHandleChecked());
-  result->set(1, *foreign_globals);
-  result->set(2, *uses_array);
+  Handle<FixedArray> result =
+      info->isolate()->factory()->NewFixedArray(kWasmDataEntryCount);
+  result->set(kWasmDataCompiledModule, *compiled.ToHandleChecked());
+  result->set(kWasmDataForeignGlobals, *foreign_globals);
+  result->set(kWasmDataUsesArray, *uses_array);
+  result->set(kWasmDataScript, *info->script());
+  result->set(kWasmDataScriptPosition,
+              Smi::FromInt(info->literal()->position()));
+
+  MessageLocation location(info->script(), info->literal()->position(),
+                           info->literal()->position());
+  char text[100];
+  int length;
+  if (FLAG_predictable) {
+    length = base::OS::SNPrintF(text, arraysize(text), "success");
+  } else {
+    length = base::OS::SNPrintF(
+        text, arraysize(text),
+        "success, asm->wasm: %0.3f ms, compile: %0.3f ms, %" PRIuPTR " bytes",
+        asm_wasm_time, compile_time, wasm_size);
+  }
+  DCHECK_NE(-1, length);
+  USE(length);
+  Handle<String> stext(info->isolate()->factory()->InternalizeUtf8String(text));
+  Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
+      info->isolate(), MessageTemplate::kAsmJsCompiled, &location, stext,
+      Handle<JSArray>::null());
+  message->set_error_level(v8::Isolate::kMessageInfo);
+  if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
+    MessageHandler::ReportMessage(info->isolate(), &location, message);
+  }
+
   return result;
 }
 
 bool AsmJs::IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data,
                           Handle<JSReceiver> stdlib) {
-  i::Handle<i::FixedArray> uses(i::FixedArray::cast(wasm_data->get(2)));
+  i::Handle<i::FixedArray> uses(
+      i::FixedArray::cast(wasm_data->get(kWasmDataUsesArray)));
   for (int i = 0; i < uses->length(); ++i) {
     if (!IsStdlibMemberValid(isolate, stdlib,
                              uses->GetValueChecked<i::Object>(isolate, i))) {
@@ -204,28 +257,37 @@
                                               Handle<FixedArray> wasm_data,
                                               Handle<JSArrayBuffer> memory,
                                               Handle<JSReceiver> foreign) {
-  i::Handle<i::JSObject> module(i::JSObject::cast(wasm_data->get(0)));
+  base::ElapsedTimer instantiate_timer;
+  instantiate_timer.Start();
+  i::Handle<i::WasmModuleObject> module(
+      i::WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)));
   i::Handle<i::FixedArray> foreign_globals(
-      i::FixedArray::cast(wasm_data->get(1)));
+      i::FixedArray::cast(wasm_data->get(kWasmDataForeignGlobals)));
 
   ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
 
-  i::MaybeHandle<i::JSObject> maybe_module_object =
-      i::wasm::WasmModule::Instantiate(isolate, &thrower, module, foreign,
-                                       memory);
+  // Create the ffi object for foreign functions {"": foreign}.
+  Handle<JSObject> ffi_object;
+  if (!foreign.is_null()) {
+    Handle<JSFunction> object_function = Handle<JSFunction>(
+        isolate->native_context()->object_function(), isolate);
+    ffi_object = isolate->factory()->NewJSObject(object_function);
+    JSObject::AddProperty(ffi_object, isolate->factory()->empty_string(),
+                          foreign, NONE);
+  }
+
+  i::MaybeHandle<i::Object> maybe_module_object =
+      i::wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory);
   if (maybe_module_object.is_null()) {
     return MaybeHandle<Object>();
   }
+  i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
 
   i::Handle<i::Name> init_name(isolate->factory()->InternalizeUtf8String(
       wasm::AsmWasmBuilder::foreign_init_name));
+  i::Handle<i::Object> init =
+      i::Object::GetProperty(module_object, init_name).ToHandleChecked();
 
-  i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
-  i::MaybeHandle<i::Object> maybe_init =
-      i::Object::GetProperty(module_object, init_name);
-  DCHECK(!maybe_init.is_null());
-
-  i::Handle<i::Object> init = maybe_init.ToHandleChecked();
   i::Handle<i::Object> undefined(isolate->heap()->undefined_value(), isolate);
   i::Handle<i::Object>* foreign_args_array =
       new i::Handle<i::Object>[foreign_globals->length()];
@@ -258,7 +320,35 @@
       !single_function.ToHandleChecked()->IsUndefined(isolate)) {
     return single_function;
   }
-  return module_object;
+
+  i::Handle<i::Script> script(i::Script::cast(wasm_data->get(kWasmDataScript)));
+  int32_t position = 0;
+  if (!wasm_data->get(kWasmDataScriptPosition)->ToInt32(&position)) {
+    UNREACHABLE();
+  }
+  MessageLocation location(script, position, position);
+  char text[50];
+  int length;
+  if (FLAG_predictable) {
+    length = base::OS::SNPrintF(text, arraysize(text), "success");
+  } else {
+    length = base::OS::SNPrintF(text, arraysize(text), "success, %0.3f ms",
+                                instantiate_timer.Elapsed().InMillisecondsF());
+  }
+  DCHECK_NE(-1, length);
+  USE(length);
+  Handle<String> stext(isolate->factory()->InternalizeUtf8String(text));
+  Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
+      isolate, MessageTemplate::kAsmJsInstantiated, &location, stext,
+      Handle<JSArray>::null());
+  message->set_error_level(v8::Isolate::kMessageInfo);
+  if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
+    MessageHandler::ReportMessage(isolate, &location, message);
+  }
+
+  Handle<String> exports_name =
+      isolate->factory()->InternalizeUtf8String("exports");
+  return i::Object::GetProperty(module_object, exports_name);
 }
 
 }  // namespace internal
diff --git a/src/asmjs/asm-js.h b/src/asmjs/asm-js.h
index a2c5cec..a7795dc 100644
--- a/src/asmjs/asm-js.h
+++ b/src/asmjs/asm-js.h
@@ -10,13 +10,13 @@
 namespace v8 {
 namespace internal {
 
+class CompilationInfo;
 class JSArrayBuffer;
-class ParseInfo;
 
 // Interface to compile and instantiate for asmjs.
 class AsmJs {
  public:
-  static MaybeHandle<FixedArray> ConvertAsmToWasm(ParseInfo* info);
+  static MaybeHandle<FixedArray> CompileAsmViaWasm(CompilationInfo* info);
   static bool IsStdlibValid(Isolate* isolate, Handle<FixedArray> wasm_data,
                             Handle<JSReceiver> stdlib);
   static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
diff --git a/src/asmjs/asm-typer.cc b/src/asmjs/asm-typer.cc
index 55b5fc7..1d18360 100644
--- a/src/asmjs/asm-typer.cc
+++ b/src/asmjs/asm-typer.cc
@@ -9,6 +9,7 @@
 #include <memory>
 #include <string>
 
+#include "include/v8.h"
 #include "src/v8.h"
 
 #include "src/asmjs/asm-types.h"
@@ -17,18 +18,34 @@
 #include "src/base/bits.h"
 #include "src/codegen.h"
 #include "src/globals.h"
+#include "src/messages.h"
+#include "src/objects-inl.h"
 #include "src/utils.h"
+#include "src/vector.h"
 
-#define FAIL(node, msg)                                        \
-  do {                                                         \
-    int line = node->position() == kNoSourcePosition           \
-                   ? -1                                        \
-                   : script_->GetLineNumber(node->position()); \
-    base::OS::SNPrintF(error_message_, sizeof(error_message_), \
-                       "asm: line %d: %s\n", line + 1, msg);   \
-    return AsmType::None();                                    \
+#define FAIL_LOCATION_RAW(location, msg)                               \
+  do {                                                                 \
+    Handle<String> message(                                            \
+        isolate_->factory()->InternalizeOneByteString(msg));           \
+    error_message_ = MessageHandler::MakeMessageObject(                \
+        isolate_, MessageTemplate::kAsmJsInvalid, (location), message, \
+        Handle<JSArray>::null());                                      \
+    error_message_->set_error_level(v8::Isolate::kMessageWarning);     \
+    message_location_ = *(location);                                   \
+    return AsmType::None();                                            \
   } while (false)
 
+#define FAIL_RAW(node, msg)                                                \
+  do {                                                                     \
+    MessageLocation location(script_, node->position(), node->position()); \
+    FAIL_LOCATION_RAW(&location, msg);                                     \
+  } while (false)
+
+#define FAIL_LOCATION(location, msg) \
+  FAIL_LOCATION_RAW(location, STATIC_CHAR_VECTOR(msg))
+
+#define FAIL(node, msg) FAIL_RAW(node, STATIC_CHAR_VECTOR(msg))
+
 #define RECURSE(call)                                             \
   do {                                                            \
     if (GetCurrentStackPosition() < stack_limit_) {               \
@@ -91,6 +108,53 @@
 }
 
 // ----------------------------------------------------------------------------
+// Implementation of AsmTyper::SourceLayoutTracker
+
+bool AsmTyper::SourceLayoutTracker::IsValid() const {
+  const Section* kAllSections[] = {&use_asm_, &globals_, &functions_, &tables_,
+                                   &exports_};
+  for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
+    const auto& curr_section = *kAllSections[ii];
+    for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
+      if (curr_section.IsPrecededBy(*kAllSections[jj])) {
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+void AsmTyper::SourceLayoutTracker::Section::AddNewElement(
+    const AstNode& node) {
+  const int node_pos = node.position();
+  if (start_ == kNoSourcePosition) {
+    start_ = node_pos;
+  } else {
+    start_ = std::min(start_, node_pos);
+  }
+  if (end_ == kNoSourcePosition) {
+    end_ = node_pos;
+  } else {
+    end_ = std::max(end_, node_pos);
+  }
+}
+
+bool AsmTyper::SourceLayoutTracker::Section::IsPrecededBy(
+    const Section& other) const {
+  if (start_ == kNoSourcePosition) {
+    DCHECK_EQ(end_, kNoSourcePosition);
+    return false;
+  }
+  if (other.start_ == kNoSourcePosition) {
+    DCHECK_EQ(other.end_, kNoSourcePosition);
+    return false;
+  }
+  DCHECK_LE(start_, end_);
+  DCHECK_LE(other.start_, other.end_);
+  return other.start_ <= end_;
+}
+
+// ----------------------------------------------------------------------------
 // Implementation of AsmTyper::VariableInfo
 
 AsmTyper::VariableInfo* AsmTyper::VariableInfo::ForSpecialSymbol(
@@ -112,16 +176,16 @@
   return new_var_info;
 }
 
-void AsmTyper::VariableInfo::FirstForwardUseIs(VariableProxy* var) {
-  DCHECK(first_forward_use_ == nullptr);
+void AsmTyper::VariableInfo::SetFirstForwardUse(
+    const MessageLocation& source_location) {
   missing_definition_ = true;
-  first_forward_use_ = var;
+  source_location_ = source_location;
 }
 
 // ----------------------------------------------------------------------------
 // Implementation of AsmTyper
 
-AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
+AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
                    FunctionLiteral* root)
     : isolate_(isolate),
       zone_(zone),
@@ -137,9 +201,9 @@
       local_scope_(ZoneHashMap::kDefaultHashMapCapacity,
                    ZoneAllocationPolicy(zone)),
       stack_limit_(isolate->stack_guard()->real_climit()),
-      node_types_(zone_),
       fround_type_(AsmType::FroundType(zone_)),
-      ffi_type_(AsmType::FFIType(zone_)) {
+      ffi_type_(AsmType::FFIType(zone_)),
+      function_pointer_tables_(zone_) {
   InitializeStdlib();
 }
 
@@ -283,6 +347,9 @@
 AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) {
   auto* obj = import->obj();
   auto* key = import->key()->AsLiteral();
+  if (key == nullptr) {
+    return nullptr;
+  }
 
   ObjectTypeMap* stdlib = &stdlib_types_;
   if (auto* obj_as_property = obj->AsProperty()) {
@@ -319,6 +386,10 @@
     return obj_info;
   }
 
+  if (!key->IsPropertyName()) {
+    return nullptr;
+  }
+
   std::unique_ptr<char[]> aname = key->AsPropertyName()->ToCString();
   ObjectTypeMap::iterator i = stdlib->find(std::string(aname.get()));
   if (i == stdlib->end()) {
@@ -345,7 +416,8 @@
 }
 
 void AsmTyper::AddForwardReference(VariableProxy* proxy, VariableInfo* info) {
-  info->FirstForwardUseIs(proxy);
+  MessageLocation location(script_, proxy->position(), proxy->position());
+  info->SetFirstForwardUse(location);
   forward_definitions_.push_back(info);
 }
 
@@ -390,22 +462,58 @@
 
 void AsmTyper::SetTypeOf(AstNode* node, AsmType* type) {
   DCHECK_NE(type, AsmType::None());
-  DCHECK(node_types_.find(node) == node_types_.end());
-  node_types_.insert(std::make_pair(node, type));
+  if (in_function_) {
+    DCHECK(function_node_types_.find(node) == function_node_types_.end());
+    function_node_types_.insert(std::make_pair(node, type));
+  } else {
+    DCHECK(module_node_types_.find(node) == module_node_types_.end());
+    module_node_types_.insert(std::make_pair(node, type));
+  }
 }
 
+namespace {
+bool IsLiteralDouble(Literal* literal) {
+  return literal->raw_value()->IsNumber() &&
+         literal->raw_value()->ContainsDot();
+}
+
+bool IsLiteralInt(Literal* literal) {
+  return literal->raw_value()->IsNumber() &&
+         !literal->raw_value()->ContainsDot();
+}
+
+bool IsLiteralMinus1(Literal* literal) {
+  return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == -1.0;
+}
+
+bool IsLiteral1Dot0(Literal* literal) {
+  return IsLiteralDouble(literal) && literal->raw_value()->AsNumber() == 1.0;
+}
+
+bool IsLiteral0(Literal* literal) {
+  return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == 0.0;
+}
+}  // namespace
+
 AsmType* AsmTyper::TypeOf(AstNode* node) const {
-  auto node_type_iter = node_types_.find(node);
-  if (node_type_iter != node_types_.end()) {
+  auto node_type_iter = function_node_types_.find(node);
+  if (node_type_iter != function_node_types_.end()) {
+    return node_type_iter->second;
+  }
+  node_type_iter = module_node_types_.find(node);
+  if (node_type_iter != module_node_types_.end()) {
     return node_type_iter->second;
   }
 
   // Sometimes literal nodes are not added to the node_type_ map simply because
   // their are not visited with ValidateExpression().
   if (auto* literal = node->AsLiteral()) {
-    if (literal->raw_value()->ContainsDot()) {
+    if (IsLiteralDouble(literal)) {
       return AsmType::Double();
     }
+    if (!IsLiteralInt(literal)) {
+      return AsmType::None();
+    }
     uint32_t u;
     if (literal->value()->ToUint32(&u)) {
       if (u > LargestFixNum) {
@@ -433,13 +541,41 @@
   return member;
 }
 
+AsmType* AsmTyper::FailWithMessage(const char* text) {
+  FAIL_RAW(root_, OneByteVector(text));
+}
+
 bool AsmTyper::Validate() {
-  if (!AsmType::None()->IsExactly(ValidateModule(root_))) {
+  return ValidateBeforeFunctionsPhase() &&
+         !AsmType::None()->IsExactly(ValidateModuleFunctions(root_)) &&
+         ValidateAfterFunctionsPhase();
+}
+
+bool AsmTyper::ValidateBeforeFunctionsPhase() {
+  if (!AsmType::None()->IsExactly(ValidateModuleBeforeFunctionsPhase(root_))) {
     return true;
   }
   return false;
 }
 
+bool AsmTyper::ValidateInnerFunction(FunctionDeclaration* fun_decl) {
+  if (!AsmType::None()->IsExactly(ValidateModuleFunction(fun_decl))) {
+    return true;
+  }
+  return false;
+}
+
+bool AsmTyper::ValidateAfterFunctionsPhase() {
+  if (!AsmType::None()->IsExactly(ValidateModuleAfterFunctionsPhase(root_))) {
+    return true;
+  }
+  return false;
+}
+
+void AsmTyper::ClearFunctionNodeTypes() { function_node_types_.clear(); }
+
+AsmType* AsmTyper::TriggerParsingError() { FAIL(root_, "Parsing error"); }
+
 namespace {
 bool IsUseAsmDirective(Statement* first_statement) {
   ExpressionStatement* use_asm = first_statement->AsExpressionStatement();
@@ -477,91 +613,12 @@
 }  // namespace
 
 // 6.1 ValidateModule
-namespace {
-// SourceLayoutTracker keeps track of the start and end positions of each
-// section in the asm.js source. The sections should not overlap, otherwise the
-// asm.js source is invalid.
-class SourceLayoutTracker {
- public:
-  SourceLayoutTracker() = default;
-
-  bool IsValid() const {
-    const Section* kAllSections[] = {&use_asm_, &globals_, &functions_,
-                                     &tables_, &exports_};
-    for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
-      const auto& curr_section = *kAllSections[ii];
-      for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
-        if (curr_section.OverlapsWith(*kAllSections[jj])) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
-
-  void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
-
-  void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
-
-  void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
-
-  void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
-
- private:
-  class Section {
-   public:
-    Section() = default;
-    Section(const Section&) = default;
-    Section& operator=(const Section&) = default;
-
-    void AddNewElement(const AstNode& node) {
-      const int node_pos = node.position();
-      if (start_ == kNoSourcePosition) {
-        start_ = node_pos;
-      } else {
-        start_ = std::max(start_, node_pos);
-      }
-      if (end_ == kNoSourcePosition) {
-        end_ = node_pos;
-      } else {
-        end_ = std::max(end_, node_pos);
-      }
-    }
-
-    bool OverlapsWith(const Section& other) const {
-      if (start_ == kNoSourcePosition) {
-        DCHECK_EQ(end_, kNoSourcePosition);
-        return false;
-      }
-      if (other.start_ == kNoSourcePosition) {
-        DCHECK_EQ(other.end_, kNoSourcePosition);
-        return false;
-      }
-      return other.start_ < end_ || other.end_ < start_;
-    }
-
-   private:
-    int start_ = kNoSourcePosition;
-    int end_ = kNoSourcePosition;
-  };
-
-  Section use_asm_;
-  Section globals_;
-  Section functions_;
-  Section tables_;
-  Section exports_;
-
-  DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
-};
-}  // namespace
-
-AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
-  SourceLayoutTracker source_layout;
-
+AsmType* AsmTyper::ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun) {
   DeclarationScope* scope = fun->scope();
   if (!scope->is_function_scope()) FAIL(fun, "Not at function scope.");
+  if (scope->inner_scope_calls_eval()) {
+    FAIL(fun, "Invalid asm.js module using eval.");
+  }
   if (!ValidAsmIdentifier(fun->name()))
     FAIL(fun, "Invalid asm.js identifier in module name.");
   module_name_ = fun->name();
@@ -594,7 +651,6 @@
     }
   }
 
-  ZoneVector<Assignment*> function_pointer_tables(zone_);
   FlattenedStatements iter(zone_, fun->body());
   auto* use_asm_directive = iter.Next();
   if (use_asm_directive == nullptr) {
@@ -616,8 +672,8 @@
   if (!IsUseAsmDirective(use_asm_directive)) {
     FAIL(fun, "Missing \"use asm\".");
   }
-  source_layout.AddUseAsm(*use_asm_directive);
-  ReturnStatement* module_return = nullptr;
+  source_layout_.AddUseAsm(*use_asm_directive);
+  module_return_ = nullptr;
 
   // *VIOLATION* The spec states that globals should be followed by function
   // declarations, which should be followed by function pointer tables, followed
@@ -627,40 +683,57 @@
     if (auto* assign = ExtractInitializerExpression(current)) {
       if (assign->value()->IsArrayLiteral()) {
         // Save function tables for later validation.
-        function_pointer_tables.push_back(assign);
+        function_pointer_tables_.push_back(assign);
       } else {
         RECURSE(ValidateGlobalDeclaration(assign));
-        source_layout.AddGlobal(*assign);
+        source_layout_.AddGlobal(*assign);
       }
       continue;
     }
 
     if (auto* current_as_return = current->AsReturnStatement()) {
-      if (module_return != nullptr) {
+      if (module_return_ != nullptr) {
         FAIL(fun, "Multiple export statements.");
       }
-      module_return = current_as_return;
-      source_layout.AddExport(*module_return);
+      module_return_ = current_as_return;
+      source_layout_.AddExport(*module_return_);
       continue;
     }
 
     FAIL(current, "Invalid top-level statement in asm.js module.");
   }
 
+  return AsmType::Int();  // Any type that is not AsmType::None();
+}
+
+AsmType* AsmTyper::ValidateModuleFunction(FunctionDeclaration* fun_decl) {
+  RECURSE(ValidateFunction(fun_decl));
+  source_layout_.AddFunction(*fun_decl);
+
+  return AsmType::Int();  // Any type that is not AsmType::None();
+}
+
+AsmType* AsmTyper::ValidateModuleFunctions(FunctionLiteral* fun) {
+  DeclarationScope* scope = fun->scope();
   Declaration::List* decls = scope->declarations();
   for (Declaration* decl : *decls) {
     if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
-      RECURSE(ValidateFunction(fun_decl));
-      source_layout.AddFunction(*fun_decl);
+      RECURSE(ValidateModuleFunction(fun_decl));
       continue;
     }
   }
 
-  for (auto* function_table : function_pointer_tables) {
+  return AsmType::Int();  // Any type that is not AsmType::None();
+}
+
+AsmType* AsmTyper::ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun) {
+  for (auto* function_table : function_pointer_tables_) {
     RECURSE(ValidateFunctionTable(function_table));
-    source_layout.AddTable(*function_table);
+    source_layout_.AddTable(*function_table);
   }
 
+  DeclarationScope* scope = fun->scope();
+  Declaration::List* decls = scope->declarations();
   for (Declaration* decl : *decls) {
     if (decl->IsFunctionDeclaration()) {
       continue;
@@ -682,20 +755,20 @@
   }
 
   // 6.2 ValidateExport
-  if (module_return == nullptr) {
+  if (module_return_ == nullptr) {
     FAIL(fun, "Missing asm.js module export.");
   }
 
   for (auto* forward_def : forward_definitions_) {
     if (forward_def->missing_definition()) {
-      FAIL(forward_def->first_forward_use(),
-           "Missing definition for forward declared identifier.");
+      FAIL_LOCATION(forward_def->source_location(),
+                    "Missing definition for forward declared identifier.");
     }
   }
 
-  RECURSE(ValidateExport(module_return));
+  RECURSE(ValidateExport(module_return_));
 
-  if (!source_layout.IsValid()) {
+  if (!source_layout_.IsValid()) {
     FAIL(fun, "Invalid asm.js source code layout.");
   }
 
@@ -714,8 +787,7 @@
     return false;
   }
 
-  return right_as_literal->raw_value()->ContainsDot() &&
-         right_as_literal->raw_value()->AsNumber() == 1.0;
+  return IsLiteral1Dot0(right_as_literal);
 }
 
 bool IsIntAnnotation(BinaryOperation* binop) {
@@ -728,8 +800,7 @@
     return false;
   }
 
-  return !right_as_literal->raw_value()->ContainsDot() &&
-         right_as_literal->raw_value()->AsNumber() == 0.0;
+  return IsLiteral0(right_as_literal);
 }
 }  // namespace
 
@@ -894,6 +965,10 @@
     FAIL(fun_export, "Module export is not an asm.js function.");
   }
 
+  if (!fun_export->var()->is_function()) {
+    FAIL(fun_export, "Module exports must be function declarations.");
+  }
+
   return type;
 }
 
@@ -915,6 +990,10 @@
              "Only normal object properties may be used in the export object "
              "literal.");
       }
+      if (!prop->key()->AsLiteral()->IsPropertyName()) {
+        FAIL(prop->key(),
+             "Exported functions must have valid identifier names.");
+      }
 
       auto* export_obj = prop->value()->AsVariableProxy();
       if (export_obj == nullptr) {
@@ -1091,6 +1170,7 @@
     parameter_types.push_back(type);
     SetTypeOf(proxy, type);
     SetTypeOf(expr, type);
+    SetTypeOf(expr->value(), type);
   }
 
   if (static_cast<int>(annotated_parameters) != fun->parameter_count()) {
@@ -1146,10 +1226,12 @@
       if (as_block != nullptr) {
         statements = as_block->statements();
       } else {
-        // We don't check whether AsReturnStatement() below returns non-null --
-        // we leave that to the ReturnTypeAnnotations method.
-        RECURSE(return_type_ =
-                    ReturnTypeAnnotations(last_statement->AsReturnStatement()));
+        if (auto* ret_statement = last_statement->AsReturnStatement()) {
+          RECURSE(return_type_ =
+                      ReturnTypeAnnotations(ret_statement->expression()));
+        } else {
+          return_type_ = AsmType::Void();
+        }
       }
     }
   } while (return_type_ == AsmType::None());
@@ -1442,7 +1524,7 @@
     return false;
   }
 
-  if (lbl_expr->raw_value()->ContainsDot()) {
+  if (!IsLiteralInt(lbl_expr)) {
     return false;
   }
 
@@ -1539,8 +1621,7 @@
     return false;
   }
 
-  return !right_as_literal->raw_value()->ContainsDot() &&
-         right_as_literal->raw_value()->AsNumber() == -1.0;
+  return IsLiteralMinus1(right_as_literal);
 }
 
 bool IsUnaryMinus(BinaryOperation* binop) {
@@ -1554,8 +1635,7 @@
     return false;
   }
 
-  return !right_as_literal->raw_value()->ContainsDot() &&
-         right_as_literal->raw_value()->AsNumber() == -1.0;
+  return IsLiteralMinus1(right_as_literal);
 }
 }  // namespace
 
@@ -1684,7 +1764,7 @@
     return AsmType::Void();
   }
 
-  if (literal->raw_value()->ContainsDot()) {
+  if (IsLiteralDouble(literal)) {
     return AsmType::Double();
   }
 
@@ -1864,7 +1944,7 @@
     return false;
   }
 
-  if (literal->raw_value()->ContainsDot()) {
+  if (!IsLiteralInt(literal)) {
     return false;
   }
 
@@ -2204,12 +2284,12 @@
       RECURSE(type = ValidateCall(AsmType::Signed(), left_as_call));
       return type;
     }
-
-    // TODO(jpp): at this point we know that binop is expr|0. We could sinply
-    //
-    // RECURSE(t = ValidateExpression(left));
-    // FAIL_IF(t->IsNotA(Intish));
-    // return Signed;
+    AsmType* left_type;
+    RECURSE(left_type = ValidateExpression(left));
+    if (!left_type->IsA(AsmType::Intish())) {
+      FAIL(left, "Left side of |0 annotation must be intish.");
+    }
+    return AsmType::Signed();
   }
 
   auto* right = binop->right();
@@ -2273,7 +2353,7 @@
     return false;
   }
 
-  if (as_literal->raw_value()->ContainsDot()) {
+  if (!IsLiteralInt(as_literal)) {
     return false;
   }
 
@@ -2329,6 +2409,9 @@
         DCHECK(false);
         FAIL(call, "Redeclared global identifier.");
       }
+      if (call->GetCallType() != Call::OTHER_CALL) {
+        FAIL(call, "Invalid call of existing global function.");
+      }
       SetTypeOf(call_var_proxy, reinterpret_cast<AsmType*>(call_type));
       SetTypeOf(call, return_type);
       return return_type;
@@ -2359,6 +2442,10 @@
       FAIL(call, "Function invocation does not match function type.");
     }
 
+    if (call->GetCallType() != Call::OTHER_CALL) {
+      FAIL(call, "Invalid forward call of global function.");
+    }
+
     SetTypeOf(call_var_proxy, call_var_info->type());
     SetTypeOf(call, return_type);
     return return_type;
@@ -2417,6 +2504,9 @@
         DCHECK(false);
         FAIL(call, "Redeclared global identifier.");
       }
+      if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
+        FAIL(call, "Invalid call of existing function table.");
+      }
       SetTypeOf(call_property, reinterpret_cast<AsmType*>(call_type));
       SetTypeOf(call, return_type);
       return return_type;
@@ -2441,6 +2531,9 @@
            "signature.");
     }
 
+    if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
+      FAIL(call, "Invalid forward call of function table.");
+    }
     SetTypeOf(call_property, previous_type->signature());
     SetTypeOf(call, return_type);
     return return_type;
@@ -2457,7 +2550,7 @@
     return false;
   }
 
-  if (as_literal->raw_value()->ContainsDot()) {
+  if (!IsLiteralInt(as_literal)) {
     return false;
   }
 
@@ -2501,7 +2594,7 @@
   SetTypeOf(obj, obj_type);
 
   if (auto* key_as_literal = heap->key()->AsLiteral()) {
-    if (key_as_literal->raw_value()->ContainsDot()) {
+    if (!IsLiteralInt(key_as_literal)) {
       FAIL(key_as_literal, "Heap access index must be int.");
     }
 
@@ -2657,15 +2750,8 @@
 }
 
 // 5.2 ReturnTypeAnnotations
-AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
-  if (statement == nullptr) {
-    return AsmType::Void();
-  }
-
-  auto* ret_expr = statement->expression();
-  if (ret_expr == nullptr) {
-    return AsmType::Void();
-  }
+AsmType* AsmTyper::ReturnTypeAnnotations(Expression* ret_expr) {
+  DCHECK_NOT_NULL(ret_expr);
 
   if (auto* binop = ret_expr->AsBinaryOperation()) {
     if (IsDoubleAnnotation(binop)) {
@@ -2673,21 +2759,21 @@
     } else if (IsIntAnnotation(binop)) {
       return AsmType::Signed();
     }
-    FAIL(statement, "Invalid return type annotation.");
+    FAIL(ret_expr, "Invalid return type annotation.");
   }
 
   if (auto* call = ret_expr->AsCall()) {
     if (IsCallToFround(call)) {
       return AsmType::Float();
     }
-    FAIL(statement, "Invalid function call in return statement.");
+    FAIL(ret_expr, "Invalid function call in return statement.");
   }
 
   if (auto* literal = ret_expr->AsLiteral()) {
     int32_t _;
-    if (literal->raw_value()->ContainsDot()) {
+    if (IsLiteralDouble(literal)) {
       return AsmType::Double();
-    } else if (literal->value()->ToInt32(&_)) {
+    } else if (IsLiteralInt(literal) && literal->value()->ToInt32(&_)) {
       return AsmType::Signed();
     } else if (literal->IsUndefinedLiteral()) {
       // *VIOLATION* The parser changes
@@ -2699,28 +2785,46 @@
       // return undefined
       return AsmType::Void();
     }
-    FAIL(statement, "Invalid literal in return statement.");
+    FAIL(ret_expr, "Invalid literal in return statement.");
   }
 
   if (auto* proxy = ret_expr->AsVariableProxy()) {
     auto* var_info = Lookup(proxy->var());
 
     if (var_info == nullptr) {
-      FAIL(statement, "Undeclared identifier in return statement.");
+      FAIL(ret_expr, "Undeclared identifier in return statement.");
     }
 
     if (var_info->mutability() != VariableInfo::kConstGlobal) {
-      FAIL(statement, "Identifier in return statement is not const.");
+      FAIL(ret_expr, "Identifier in return statement is not const.");
     }
 
     if (!var_info->type()->IsReturnType()) {
-      FAIL(statement, "Constant in return must be signed, float, or double.");
+      FAIL(ret_expr, "Constant in return must be signed, float, or double.");
     }
 
     return var_info->type();
   }
 
-  FAIL(statement, "Invalid return type expression.");
+  // NOTE: This is not strictly valid asm.js, but is emitted by some versions of
+  // Emscripten.
+  if (auto* cond = ret_expr->AsConditional()) {
+    AsmType* a = AsmType::None();
+    AsmType* b = AsmType::None();
+    RECURSE(a = ReturnTypeAnnotations(cond->then_expression()));
+    if (a->IsA(AsmType::None())) {
+      return a;
+    }
+    RECURSE(b = ReturnTypeAnnotations(cond->else_expression()));
+    if (b->IsA(AsmType::None())) {
+      return b;
+    }
+    if (a->IsExactly(b)) {
+      return a;
+    }
+  }
+
+  FAIL(ret_expr, "Invalid return type expression.");
 }
 
 // 5.4 VariableTypeAnnotations
@@ -2728,13 +2832,15 @@
 AsmType* AsmTyper::VariableTypeAnnotations(
     Expression* initializer, VariableInfo::Mutability mutability_type) {
   if (auto* literal = initializer->AsLiteral()) {
-    if (literal->raw_value()->ContainsDot()) {
+    if (IsLiteralDouble(literal)) {
       SetTypeOf(initializer, AsmType::Double());
       return AsmType::Double();
     }
+    if (!IsLiteralInt(literal)) {
+      FAIL(initializer, "Invalid type annotation - forbidden literal.");
+    }
     int32_t i32;
     uint32_t u32;
-
     AsmType* initializer_type = nullptr;
     if (literal->value()->ToUint32(&u32)) {
       if (u32 > LargestFixNum) {
@@ -2793,13 +2899,17 @@
          "to fround.");
   }
 
-  // Float constants must contain dots in local, but not in globals.
-  if (mutability_type == VariableInfo::kLocal) {
-    if (!src_expr->raw_value()->ContainsDot()) {
-      FAIL(initializer,
-           "Invalid float type annotation - expected literal argument to be a "
-           "floating point literal.");
-    }
+  // ERRATA: 5.4
+  // According to the spec: float constants must contain dots in local,
+  // but not in globals.
+  // However, the errata doc (and actual programs), use integer values
+  // with fround(..).
+  // Skipping the check that would go here to enforce this.
+  // Checking instead the literal expression is at least a number.
+  if (!src_expr->raw_value()->IsNumber()) {
+    FAIL(initializer,
+         "Invalid float type annotation - expected numeric literal for call "
+         "to fround.");
   }
 
   return AsmType::Float();
@@ -2848,19 +2958,6 @@
   return heap_view_info->type();
 }
 
-bool IsValidAsm(Isolate* isolate, Zone* zone, Script* script,
-                FunctionLiteral* root, std::string* error_message) {
-  error_message->clear();
-
-  AsmTyper typer(isolate, zone, script, root);
-  if (typer.Validate()) {
-    return true;
-  }
-
-  *error_message = typer.error_message();
-  return false;
-}
-
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/asmjs/asm-typer.h b/src/asmjs/asm-typer.h
index 2c66948..9651373 100644
--- a/src/asmjs/asm-typer.h
+++ b/src/asmjs/asm-typer.h
@@ -7,6 +7,7 @@
 
 #include <cstdint>
 #include <string>
+#include <unordered_map>
 #include <unordered_set>
 
 #include "src/allocation.h"
@@ -15,6 +16,7 @@
 #include "src/ast/ast-types.h"
 #include "src/ast/ast.h"
 #include "src/effects.h"
+#include "src/messages.h"
 #include "src/type-info.h"
 #include "src/zone/zone-containers.h"
 #include "src/zone/zone.h"
@@ -25,6 +27,7 @@
 
 class AsmType;
 class AsmTyperHarnessBuilder;
+class SourceLayoutTracker;
 
 class AsmTyper final {
  public:
@@ -66,16 +69,29 @@
   };
 
   ~AsmTyper() = default;
-  AsmTyper(Isolate* isolate, Zone* zone, Script* script, FunctionLiteral* root);
+  AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
+           FunctionLiteral* root);
 
   bool Validate();
+  // Do asm.js validation in phases (to interleave with conversion to wasm).
+  bool ValidateBeforeFunctionsPhase();
+  bool ValidateInnerFunction(FunctionDeclaration* decl);
+  bool ValidateAfterFunctionsPhase();
+  void ClearFunctionNodeTypes();
 
-  const char* error_message() const { return error_message_; }
+  Handle<JSMessageObject> error_message() const { return error_message_; }
+  const MessageLocation* message_location() const { return &message_location_; }
+
+  AsmType* TriggerParsingError();
 
   AsmType* TypeOf(AstNode* node) const;
   AsmType* TypeOf(Variable* v) const;
   StandardMember VariableAsStandardMember(Variable* var);
 
+  // Allow the asm-wasm-builder to trigger failures (for interleaved
+  // validating).
+  AsmType* FailWithMessage(const char* text);
+
   typedef std::unordered_set<StandardMember, std::hash<int> > StdlibSet;
 
   StdlibSet StdlibUses() const { return stdlib_uses_; }
@@ -130,7 +146,7 @@
     bool IsHeap() const { return standard_member_ == kHeap; }
 
     void MarkDefined() { missing_definition_ = false; }
-    void FirstForwardUseIs(VariableProxy* var);
+    void SetFirstForwardUse(const MessageLocation& source_location);
 
     StandardMember standard_member() const { return standard_member_; }
     void set_standard_member(StandardMember standard_member) {
@@ -145,7 +161,7 @@
 
     bool missing_definition() const { return missing_definition_; }
 
-    VariableProxy* first_forward_use() const { return first_forward_use_; }
+    const MessageLocation* source_location() { return &source_location_; }
 
     static VariableInfo* ForSpecialSymbol(Zone* zone,
                                           StandardMember standard_member);
@@ -157,9 +173,8 @@
     // missing_definition_ is set to true for forward definition - i.e., use
     // before definition.
     bool missing_definition_ = false;
-    // first_forward_use_ holds the AST node that first referenced this
-    // VariableInfo. Used for error messages.
-    VariableProxy* first_forward_use_ = nullptr;
+    // Used for error messages.
+    MessageLocation source_location_;
   };
 
   // RAII-style manager for the in_function_ member variable.
@@ -199,6 +214,40 @@
     DISALLOW_IMPLICIT_CONSTRUCTORS(FlattenedStatements);
   };
 
+  class SourceLayoutTracker {
+   public:
+    SourceLayoutTracker() = default;
+    bool IsValid() const;
+    void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
+    void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
+    void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
+    void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
+    void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
+
+   private:
+    class Section {
+     public:
+      Section() = default;
+      Section(const Section&) = default;
+      Section& operator=(const Section&) = default;
+
+      void AddNewElement(const AstNode& node);
+      bool IsPrecededBy(const Section& other) const;
+
+     private:
+      int start_ = kNoSourcePosition;
+      int end_ = kNoSourcePosition;
+    };
+
+    Section use_asm_;
+    Section globals_;
+    Section functions_;
+    Section tables_;
+    Section exports_;
+
+    DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
+  };
+
   using ObjectTypeMap = ZoneMap<std::string, VariableInfo*>;
   void InitializeStdlib();
   void SetTypeOf(AstNode* node, AsmType* type);
@@ -220,7 +269,10 @@
   // validation failure.
 
   // 6.1 ValidateModule
-  AsmType* ValidateModule(FunctionLiteral* fun);
+  AsmType* ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun);
+  AsmType* ValidateModuleFunction(FunctionDeclaration* fun_decl);
+  AsmType* ValidateModuleFunctions(FunctionLiteral* fun);
+  AsmType* ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun);
   AsmType* ValidateGlobalDeclaration(Assignment* assign);
   // 6.2 ValidateExport
   AsmType* ExportType(VariableProxy* fun_export);
@@ -312,7 +364,7 @@
   AsmType* ParameterTypeAnnotations(Variable* parameter,
                                     Expression* annotation);
   // 5.2 ReturnTypeAnnotations
-  AsmType* ReturnTypeAnnotations(ReturnStatement* statement);
+  AsmType* ReturnTypeAnnotations(Expression* ret_expr);
   // 5.4 VariableTypeAnnotations
   // 5.5 GlobalVariableTypeAnnotations
   AsmType* VariableTypeAnnotations(
@@ -323,7 +375,7 @@
 
   Isolate* isolate_;
   Zone* zone_;
-  Script* script_;
+  Handle<Script> script_;
   FunctionLiteral* root_;
   bool in_function_ = false;
 
@@ -345,13 +397,19 @@
 
   std::uintptr_t stack_limit_;
   bool stack_overflow_ = false;
-  ZoneMap<AstNode*, AsmType*> node_types_;
-  static const int kErrorMessageLimit = 100;
+  std::unordered_map<AstNode*, AsmType*> module_node_types_;
+  std::unordered_map<AstNode*, AsmType*> function_node_types_;
+  static const int kErrorMessageLimit = 128;
   AsmType* fround_type_;
   AsmType* ffi_type_;
-  char error_message_[kErrorMessageLimit];
+  Handle<JSMessageObject> error_message_;
+  MessageLocation message_location_;
   StdlibSet stdlib_uses_;
 
+  SourceLayoutTracker source_layout_;
+  ReturnStatement* module_return_;
+  ZoneVector<Assignment*> function_pointer_tables_;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(AsmTyper);
 };
 
diff --git a/src/asmjs/asm-types.cc b/src/asmjs/asm-types.cc
index 8f3c9a5..79c43a3 100644
--- a/src/asmjs/asm-types.cc
+++ b/src/asmjs/asm-types.cc
@@ -6,6 +6,7 @@
 
 #include <cinttypes>
 
+#include "src/utils.h"
 #include "src/v8.h"
 
 namespace v8 {
diff --git a/src/asmjs/asm-wasm-builder.cc b/src/asmjs/asm-wasm-builder.cc
index cac6fbd..891cba3 100644
--- a/src/asmjs/asm-wasm-builder.cc
+++ b/src/asmjs/asm-wasm-builder.cc
@@ -19,6 +19,13 @@
 
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
+#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
+#include "src/counters.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/parsing/parse-info.h"
 
 namespace v8 {
 namespace internal {
@@ -31,19 +38,26 @@
     if (HasStackOverflow()) return; \
   } while (false)
 
+namespace {
+
 enum AsmScope { kModuleScope, kInitScope, kFuncScope, kExportScope };
 enum ValueFate { kDrop, kLeaveOnStack };
 
 struct ForeignVariable {
   Handle<Name> name;
   Variable* var;
-  LocalType type;
+  ValueType type;
 };
 
+enum TargetType : uint8_t { NoTarget, BreakTarget, ContinueTarget };
+
+}  // namespace
+
 class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
  public:
-  AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
-                     AsmTyper* typer)
+  AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, CompilationInfo* info,
+                     AstValueFactory* ast_value_factory, Handle<Script> script,
+                     FunctionLiteral* literal, AsmTyper* typer)
       : local_variables_(ZoneHashMap::kDefaultHashMapCapacity,
                          ZoneAllocationPolicy(zone)),
         functions_(ZoneHashMap::kDefaultHashMapCapacity,
@@ -56,15 +70,20 @@
         literal_(literal),
         isolate_(isolate),
         zone_(zone),
+        info_(info),
+        ast_value_factory_(ast_value_factory),
+        script_(script),
         typer_(typer),
+        typer_failed_(false),
+        typer_finished_(false),
         breakable_blocks_(zone),
         foreign_variables_(zone),
         init_function_(nullptr),
         foreign_init_function_(nullptr),
-        next_table_index_(0),
         function_tables_(ZoneHashMap::kDefaultHashMapCapacity,
                          ZoneAllocationPolicy(zone)),
-        imported_function_table_(this) {
+        imported_function_table_(this),
+        parent_binop_(nullptr) {
     InitializeAstVisitor(isolate);
   }
 
@@ -88,12 +107,13 @@
       foreign_init_function_->EmitGetLocal(static_cast<uint32_t>(pos));
       ForeignVariable* fv = &foreign_variables_[pos];
       uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
-      foreign_init_function_->EmitWithVarInt(kExprSetGlobal, index);
+      foreign_init_function_->EmitWithVarUint(kExprSetGlobal, index);
     }
+    foreign_init_function_->Emit(kExprEnd);
   }
 
-  i::Handle<i::FixedArray> GetForeignArgs() {
-    i::Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
+  Handle<FixedArray> GetForeignArgs() {
+    Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
         static_cast<int>(foreign_variables_.size()));
     for (size_t i = 0; i < foreign_variables_.size(); ++i) {
       ForeignVariable* fv = &foreign_variables_[i];
@@ -102,10 +122,26 @@
     return ret;
   }
 
-  void Build() {
+  bool Build() {
     InitializeInitFunction();
-    RECURSE(VisitFunctionLiteral(literal_));
+    if (!typer_->ValidateBeforeFunctionsPhase()) {
+      return false;
+    }
+    DCHECK(!HasStackOverflow());
+    VisitFunctionLiteral(literal_);
+    if (HasStackOverflow()) {
+      return false;
+    }
+    if (!typer_finished_ && !typer_failed_) {
+      typer_->FailWithMessage("Module missing export section.");
+      typer_failed_ = true;
+    }
+    if (typer_failed_) {
+      return false;
+    }
     BuildForeignInitFunction();
+    init_function_->Emit(kExprEnd);  // finish init function.
+    return true;
   }
 
   void VisitVariableDeclaration(VariableDeclaration* decl) {}
@@ -113,12 +149,70 @@
   void VisitFunctionDeclaration(FunctionDeclaration* decl) {
     DCHECK_EQ(kModuleScope, scope_);
     DCHECK_NULL(current_function_builder_);
+    FunctionLiteral* old_func = decl->fun();
+    DeclarationScope* new_func_scope = nullptr;
+    std::unique_ptr<ParseInfo> info;
+    if (decl->fun()->body() == nullptr) {
+      // TODO(titzer/bradnelson): Reuse SharedFunctionInfos used here when
+      // compiling the wasm module.
+      Handle<SharedFunctionInfo> shared =
+          Compiler::GetSharedFunctionInfo(decl->fun(), script_, info_);
+      shared->set_is_toplevel(false);
+      info.reset(new ParseInfo(script_));
+      info->set_shared_info(shared);
+      info->set_toplevel(false);
+      info->set_language_mode(decl->fun()->scope()->language_mode());
+      info->set_allow_lazy_parsing(false);
+      info->set_function_literal_id(shared->function_literal_id());
+      info->set_ast_value_factory(ast_value_factory_);
+      info->set_ast_value_factory_owned(false);
+      // Create fresh function scope to use to parse the function in.
+      new_func_scope = new (info->zone()) DeclarationScope(
+          info->zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
+      info->set_asm_function_scope(new_func_scope);
+      if (!Compiler::ParseAndAnalyze(info.get())) {
+        decl->fun()->scope()->outer_scope()->RemoveInnerScope(new_func_scope);
+        if (isolate_->has_pending_exception()) {
+          isolate_->clear_pending_exception();
+        }
+        typer_->TriggerParsingError();
+        typer_failed_ = true;
+        return;
+      }
+      FunctionLiteral* func = info->literal();
+      DCHECK_NOT_NULL(func);
+      decl->set_fun(func);
+    }
+    if (!typer_->ValidateInnerFunction(decl)) {
+      typer_failed_ = true;
+      decl->set_fun(old_func);
+      if (new_func_scope != nullptr) {
+        DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
+        if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
+          UNREACHABLE();
+        }
+      }
+      return;
+    }
     current_function_builder_ = LookupOrInsertFunction(decl->proxy()->var());
     scope_ = kFuncScope;
+
+    // Record start of the function, used as position for the stack check.
+    current_function_builder_->SetAsmFunctionStartPosition(
+        decl->fun()->start_position());
+
     RECURSE(Visit(decl->fun()));
+    decl->set_fun(old_func);
+    if (new_func_scope != nullptr) {
+      DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
+      if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
+        UNREACHABLE();
+      }
+    }
     scope_ = kModuleScope;
     current_function_builder_ = nullptr;
     local_variables_.Clear();
+    typer_->ClearFunctionNodeTypes();
   }
 
   void VisitStatements(ZoneList<Statement*>* stmts) {
@@ -129,7 +223,7 @@
         continue;
       }
       RECURSE(Visit(stmt));
-      if (stmt->IsJump()) break;
+      if (typer_failed_) break;
     }
   }
 
@@ -145,7 +239,8 @@
       }
     }
     if (scope_ == kFuncScope) {
-      BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
+      BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
+                           BreakTarget);
       RECURSE(VisitStatements(stmt->statements()));
     } else {
       RECURSE(VisitStatements(stmt->statements()));
@@ -158,10 +253,9 @@
 
    public:
     BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
-                 WasmOpcode opcode)
+                 WasmOpcode opcode, TargetType target_type = NoTarget)
         : builder_(builder) {
-      builder_->breakable_blocks_.push_back(
-          std::make_pair(stmt, opcode == kExprLoop));
+      builder_->breakable_blocks_.emplace_back(stmt, target_type);
       // block and loops have a type immediate.
       builder_->current_function_builder_->EmitWithU8(opcode, kLocalVoid);
     }
@@ -204,12 +298,13 @@
 
   void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
 
+  void VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
+
   void VisitIfStatement(IfStatement* stmt) {
     DCHECK_EQ(kFuncScope, scope_);
     RECURSE(Visit(stmt->condition()));
-    current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
-    // WASM ifs come with implement blocks for both arms.
-    breakable_blocks_.push_back(std::make_pair(nullptr, false));
+    // Wasm ifs come with implicit blocks for both arms.
+    BlockVisitor block(this, nullptr, kExprIf);
     if (stmt->HasThenStatement()) {
       RECURSE(Visit(stmt->then_statement()));
     }
@@ -217,18 +312,15 @@
       current_function_builder_->Emit(kExprElse);
       RECURSE(Visit(stmt->else_statement()));
     }
-    current_function_builder_->Emit(kExprEnd);
-    breakable_blocks_.pop_back();
   }
 
-  void DoBreakOrContinue(BreakableStatement* target, bool is_continue) {
+  void DoBreakOrContinue(BreakableStatement* target, TargetType type) {
     DCHECK_EQ(kFuncScope, scope_);
     for (int i = static_cast<int>(breakable_blocks_.size()) - 1; i >= 0; --i) {
       auto elem = breakable_blocks_.at(i);
-      if (elem.first == target && elem.second == is_continue) {
+      if (elem.first == target && elem.second == type) {
         int block_distance = static_cast<int>(breakable_blocks_.size() - i - 1);
-        current_function_builder_->Emit(kExprBr);
-        current_function_builder_->EmitVarInt(block_distance);
+        current_function_builder_->EmitWithVarUint(kExprBr, block_distance);
         return;
       }
     }
@@ -236,15 +328,25 @@
   }
 
   void VisitContinueStatement(ContinueStatement* stmt) {
-    DoBreakOrContinue(stmt->target(), true);
+    DoBreakOrContinue(stmt->target(), ContinueTarget);
   }
 
   void VisitBreakStatement(BreakStatement* stmt) {
-    DoBreakOrContinue(stmt->target(), false);
+    DoBreakOrContinue(stmt->target(), BreakTarget);
   }
 
   void VisitReturnStatement(ReturnStatement* stmt) {
     if (scope_ == kModuleScope) {
+      if (typer_finished_) {
+        typer_->FailWithMessage("Module has multiple returns.");
+        typer_failed_ = true;
+        return;
+      }
+      if (!typer_->ValidateAfterFunctionsPhase()) {
+        typer_failed_ = true;
+        return;
+      }
+      typer_finished_ = true;
       scope_ = kExportScope;
       RECURSE(Visit(stmt->expression()));
       scope_ = kModuleScope;
@@ -268,7 +370,7 @@
       current_function_builder_->Emit(kExprI32LtS);
       current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
       if_depth++;
-      breakable_blocks_.push_back(std::make_pair(nullptr, false));
+      breakable_blocks_.emplace_back(nullptr, NoTarget);
       HandleCase(node->left, case_to_block, tag, default_block, if_depth);
       current_function_builder_->Emit(kExprElse);
     }
@@ -278,7 +380,7 @@
       current_function_builder_->Emit(kExprI32GtS);
       current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
       if_depth++;
-      breakable_blocks_.push_back(std::make_pair(nullptr, false));
+      breakable_blocks_.emplace_back(nullptr, NoTarget);
       HandleCase(node->right, case_to_block, tag, default_block, if_depth);
       current_function_builder_->Emit(kExprElse);
     }
@@ -289,8 +391,8 @@
       current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
       DCHECK(case_to_block.find(node->begin) != case_to_block.end());
       current_function_builder_->Emit(kExprBr);
-      current_function_builder_->EmitVarInt(1 + if_depth +
-                                            case_to_block[node->begin]);
+      current_function_builder_->EmitVarUint(1 + if_depth +
+                                             case_to_block[node->begin]);
       current_function_builder_->Emit(kExprEnd);
     } else {
       if (node->begin != 0) {
@@ -301,21 +403,21 @@
         VisitVariableProxy(tag);
       }
       current_function_builder_->Emit(kExprBrTable);
-      current_function_builder_->EmitVarInt(node->end - node->begin + 1);
+      current_function_builder_->EmitVarUint(node->end - node->begin + 1);
       for (int v = node->begin; v <= node->end; ++v) {
         if (case_to_block.find(v) != case_to_block.end()) {
           uint32_t target = if_depth + case_to_block[v];
-          current_function_builder_->EmitVarInt(target);
+          current_function_builder_->EmitVarUint(target);
         } else {
           uint32_t target = if_depth + default_block;
-          current_function_builder_->EmitVarInt(target);
+          current_function_builder_->EmitVarUint(target);
         }
         if (v == kMaxInt) {
           break;
         }
       }
       uint32_t target = if_depth + default_block;
-      current_function_builder_->EmitVarInt(target);
+      current_function_builder_->EmitVarUint(target);
     }
 
     while (if_depth-- != prev_if_depth) {
@@ -332,7 +434,8 @@
     if (case_count == 0) {
       return;
     }
-    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
+    BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
+                         BreakTarget);
     ZoneVector<BlockVisitor*> blocks(zone_);
     ZoneVector<int32_t> cases(zone_);
     ZoneMap<int, unsigned int> case_to_block(zone_);
@@ -362,7 +465,7 @@
       if (root->left != nullptr || root->right != nullptr ||
           root->begin == root->end) {
         current_function_builder_->Emit(kExprBr);
-        current_function_builder_->EmitVarInt(default_block);
+        current_function_builder_->EmitVarUint(default_block);
       }
     }
     for (int i = 0; i < case_count; ++i) {
@@ -378,26 +481,28 @@
 
   void VisitDoWhileStatement(DoWhileStatement* stmt) {
     DCHECK_EQ(kFuncScope, scope_);
-    BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
+    BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
+                       BreakTarget);
     BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
-    RECURSE(Visit(stmt->body()));
+    {
+      BlockVisitor inner_block(this, stmt->AsBreakableStatement(), kExprBlock,
+                               ContinueTarget);
+      RECURSE(Visit(stmt->body()));
+    }
     RECURSE(Visit(stmt->cond()));
-    current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
-    current_function_builder_->EmitWithU8(kExprBr, 1);
-    current_function_builder_->Emit(kExprEnd);
+    current_function_builder_->EmitWithU8(kExprBrIf, 0);
   }
 
   void VisitWhileStatement(WhileStatement* stmt) {
     DCHECK_EQ(kFuncScope, scope_);
-    BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
-    BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
+    BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
+                       BreakTarget);
+    BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop,
+                      ContinueTarget);
     RECURSE(Visit(stmt->cond()));
-    breakable_blocks_.push_back(std::make_pair(nullptr, false));
-    current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
+    BlockVisitor if_block(this, nullptr, kExprIf);
     RECURSE(Visit(stmt->body()));
     current_function_builder_->EmitWithU8(kExprBr, 1);
-    current_function_builder_->Emit(kExprEnd);
-    breakable_blocks_.pop_back();
   }
 
   void VisitForStatement(ForStatement* stmt) {
@@ -405,8 +510,10 @@
     if (stmt->init() != nullptr) {
       RECURSE(Visit(stmt->init()));
     }
-    BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
-    BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
+    BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
+                       BreakTarget);
+    BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop,
+                      ContinueTarget);
     if (stmt->cond() != nullptr) {
       RECURSE(Visit(stmt->cond()));
       current_function_builder_->Emit(kExprI32Eqz);
@@ -440,16 +547,21 @@
         // Add the parameters for the function.
         const auto& arguments = func_type->Arguments();
         for (int i = 0; i < expr->parameter_count(); ++i) {
-          LocalType type = TypeFrom(arguments[i]);
-          DCHECK_NE(kAstStmt, type);
+          ValueType type = TypeFrom(arguments[i]);
+          DCHECK_NE(kWasmStmt, type);
           InsertParameter(scope->parameter(i), type, i);
         }
       } else {
         UNREACHABLE();
       }
     }
-    RECURSE(VisitStatements(expr->body()));
     RECURSE(VisitDeclarations(scope->declarations()));
+    if (typer_failed_) return;
+    RECURSE(VisitStatements(expr->body()));
+    if (scope_ == kFuncScope) {
+      // Finish the function-body scope block.
+      current_function_builder_->Emit(kExprEnd);
+    }
   }
 
   void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
@@ -459,20 +571,20 @@
   void VisitConditional(Conditional* expr) {
     DCHECK_EQ(kFuncScope, scope_);
     RECURSE(Visit(expr->condition()));
-    // WASM ifs come with implicit blocks for both arms.
-    breakable_blocks_.push_back(std::make_pair(nullptr, false));
-    LocalTypeCode type;
+    // Wasm ifs come with implicit blocks for both arms.
+    breakable_blocks_.emplace_back(nullptr, NoTarget);
+    ValueTypeCode type;
     switch (TypeOf(expr)) {
-      case kAstI32:
+      case kWasmI32:
         type = kLocalI32;
         break;
-      case kAstI64:
+      case kWasmI64:
         type = kLocalI64;
         break;
-      case kAstF32:
+      case kWasmF32:
         type = kLocalF32;
         break;
-      case kAstF64:
+      case kWasmF64:
         type = kLocalF64;
         break;
       default:
@@ -544,10 +656,10 @@
       if (VisitStdlibConstant(var)) {
         return;
       }
-      LocalType var_type = TypeOf(expr);
-      DCHECK_NE(kAstStmt, var_type);
+      ValueType var_type = TypeOf(expr);
+      DCHECK_NE(kWasmStmt, var_type);
       if (var->IsContextSlot()) {
-        current_function_builder_->EmitWithVarInt(
+        current_function_builder_->EmitWithVarUint(
             kExprGetGlobal, LookupOrInsertGlobal(var, var_type));
       } else {
         current_function_builder_->EmitGetLocal(
@@ -573,35 +685,26 @@
 
     if (type->IsA(AsmType::Signed())) {
       int32_t i = 0;
-      if (!value->ToInt32(&i)) {
-        UNREACHABLE();
-      }
-      byte code[] = {WASM_I32V(i)};
-      current_function_builder_->EmitCode(code, sizeof(code));
+      CHECK(value->ToInt32(&i));
+      current_function_builder_->EmitI32Const(i);
     } else if (type->IsA(AsmType::Unsigned()) || type->IsA(AsmType::FixNum())) {
       uint32_t u = 0;
-      if (!value->ToUint32(&u)) {
-        UNREACHABLE();
-      }
-      int32_t i = static_cast<int32_t>(u);
-      byte code[] = {WASM_I32V(i)};
-      current_function_builder_->EmitCode(code, sizeof(code));
+      CHECK(value->ToUint32(&u));
+      current_function_builder_->EmitI32Const(bit_cast<int32_t>(u));
     } else if (type->IsA(AsmType::Int())) {
       // The parser can collapse !0, !1 etc to true / false.
       // Allow these as int literals.
       if (expr->raw_value()->IsTrue()) {
-        byte code[] = {WASM_I32V(1)};
+        byte code[] = {WASM_ONE};
         current_function_builder_->EmitCode(code, sizeof(code));
       } else if (expr->raw_value()->IsFalse()) {
-        byte code[] = {WASM_I32V(0)};
+        byte code[] = {WASM_ZERO};
         current_function_builder_->EmitCode(code, sizeof(code));
       } else if (expr->raw_value()->IsNumber()) {
         // This can happen when -x becomes x * -1 (due to the parser).
         int32_t i = 0;
-        if (!value->ToInt32(&i) || i != -1) {
-          UNREACHABLE();
-        }
-        byte code[] = {WASM_I32V(i)};
+        CHECK(value->ToInt32(&i) && i == -1);
+        byte code[] = {WASM_I32V_1(-1)};
         current_function_builder_->EmitCode(code, sizeof(code));
       } else {
         UNREACHABLE();
@@ -638,12 +741,13 @@
       Literal* name = prop->key()->AsLiteral();
       DCHECK_NOT_NULL(name);
       DCHECK(name->IsPropertyName());
-      const AstRawString* raw_name = name->AsRawPropertyName();
+      Handle<String> function_name = name->AsPropertyName();
+      int length;
+      std::unique_ptr<char[]> utf8 = function_name->ToCString(
+          DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
       if (var->is_function()) {
         WasmFunctionBuilder* function = LookupOrInsertFunction(var);
-        function->Export();
-        function->SetName({reinterpret_cast<const char*>(raw_name->raw_data()),
-                           raw_name->length()});
+        function->ExportAs({utf8.get(), length});
       }
     }
   }
@@ -660,53 +764,67 @@
     current_function_builder_ = nullptr;
   }
 
-  void AddFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
-    auto* func_tbl_type = typer_->TypeOf(funcs)->AsFunctionTableType();
-    DCHECK_NOT_NULL(func_tbl_type);
-    auto* func_type = func_tbl_type->signature()->AsFunctionType();
+  struct FunctionTableIndices : public ZoneObject {
+    uint32_t start_index;
+    uint32_t signature_index;
+  };
+
+  FunctionTableIndices* LookupOrAddFunctionTable(VariableProxy* table,
+                                                 Property* p) {
+    FunctionTableIndices* indices = LookupFunctionTable(table->var());
+    if (indices != nullptr) {
+      // Already setup.
+      return indices;
+    }
+    indices = new (zone()) FunctionTableIndices();
+    auto* func_type = typer_->TypeOf(p)->AsFunctionType();
+    auto* func_table_type = typer_->TypeOf(p->obj()->AsVariableProxy()->var())
+                                ->AsFunctionTableType();
     const auto& arguments = func_type->Arguments();
-    LocalType return_type = TypeFrom(func_type->ReturnType());
-    FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
+    ValueType return_type = TypeFrom(func_type->ReturnType());
+    FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
                              arguments.size());
-    if (return_type != kAstStmt) {
+    if (return_type != kWasmStmt) {
       sig.AddReturn(return_type);
     }
     for (auto* arg : arguments) {
       sig.AddParam(TypeFrom(arg));
     }
     uint32_t signature_index = builder_->AddSignature(sig.Build());
-    InsertFunctionTable(table->var(), next_table_index_, signature_index);
-    next_table_index_ += funcs->values()->length();
-    for (int i = 0; i < funcs->values()->length(); ++i) {
-      VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
-      DCHECK_NOT_NULL(func);
-      builder_->AddIndirectFunction(
-          LookupOrInsertFunction(func->var())->func_index());
-    }
-  }
-
-  struct FunctionTableIndices : public ZoneObject {
-    uint32_t start_index;
-    uint32_t signature_index;
-  };
-
-  void InsertFunctionTable(Variable* v, uint32_t start_index,
-                           uint32_t signature_index) {
-    FunctionTableIndices* container = new (zone()) FunctionTableIndices();
-    container->start_index = start_index;
-    container->signature_index = signature_index;
+    indices->start_index = builder_->AllocateIndirectFunctions(
+        static_cast<uint32_t>(func_table_type->length()));
+    indices->signature_index = signature_index;
     ZoneHashMap::Entry* entry = function_tables_.LookupOrInsert(
-        v, ComputePointerHash(v), ZoneAllocationPolicy(zone()));
-    entry->value = container;
+        table->var(), ComputePointerHash(table->var()),
+        ZoneAllocationPolicy(zone()));
+    entry->value = indices;
+    return indices;
   }
 
   FunctionTableIndices* LookupFunctionTable(Variable* v) {
     ZoneHashMap::Entry* entry =
         function_tables_.Lookup(v, ComputePointerHash(v));
-    DCHECK_NOT_NULL(entry);
+    if (entry == nullptr) {
+      return nullptr;
+    }
     return reinterpret_cast<FunctionTableIndices*>(entry->value);
   }
 
+  void PopulateFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
+    FunctionTableIndices* indices = LookupFunctionTable(table->var());
+    // Ignore unused function tables.
+    if (indices == nullptr) {
+      return;
+    }
+    for (int i = 0; i < funcs->values()->length(); ++i) {
+      VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
+      DCHECK_NOT_NULL(func);
+      builder_->SetIndirectFunction(
+          indices->start_index + i,
+          LookupOrInsertFunction(func->var())->func_index());
+    }
+  }
+
   class ImportedFunctionTable {
    private:
     class ImportedFunctionIndices : public ZoneObject {
@@ -727,20 +845,33 @@
                  ZoneAllocationPolicy(builder->zone())),
           builder_(builder) {}
 
-    void AddImport(Variable* v, const char* name, int name_length) {
-      ImportedFunctionIndices* indices = new (builder_->zone())
-          ImportedFunctionIndices(name, name_length, builder_->zone());
+    ImportedFunctionIndices* LookupOrInsertImport(Variable* v) {
       auto* entry = table_.LookupOrInsert(
           v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone()));
-      entry->value = indices;
+      ImportedFunctionIndices* indices;
+      if (entry->value == nullptr) {
+        indices = new (builder_->zone())
+            ImportedFunctionIndices(nullptr, 0, builder_->zone());
+        entry->value = indices;
+      } else {
+        indices = reinterpret_cast<ImportedFunctionIndices*>(entry->value);
+      }
+      return indices;
+    }
+
+    void SetImportName(Variable* v, const char* name, int name_length) {
+      auto* indices = LookupOrInsertImport(v);
+      indices->name_ = name;
+      indices->name_length_ = name_length;
+      for (auto i : indices->signature_to_index_) {
+        builder_->builder_->SetImportName(i.second, indices->name_,
+                                          indices->name_length_);
+      }
     }
 
     // Get a function's index (or allocate if new).
-    uint32_t LookupOrInsertImport(Variable* v, FunctionSig* sig) {
-      ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v));
-      DCHECK_NOT_NULL(entry);
-      ImportedFunctionIndices* indices =
-          reinterpret_cast<ImportedFunctionIndices*>(entry->value);
+    uint32_t LookupOrInsertImportUse(Variable* v, FunctionSig* sig) {
+      auto* indices = LookupOrInsertImport(v);
       WasmModuleBuilder::SignatureMap::iterator pos =
           indices->signature_to_index_.find(sig);
       if (pos != indices->signature_to_index_.end()) {
@@ -819,13 +950,13 @@
     if (target_var != nullptr) {
       // Left hand side is a local or a global variable.
       Variable* var = target_var->var();
-      LocalType var_type = TypeOf(expr);
-      DCHECK_NE(kAstStmt, var_type);
+      ValueType var_type = TypeOf(expr);
+      DCHECK_NE(kWasmStmt, var_type);
       if (var->IsContextSlot()) {
         uint32_t index = LookupOrInsertGlobal(var, var_type);
-        current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
+        current_function_builder_->EmitWithVarUint(kExprSetGlobal, index);
         if (fate == kLeaveOnStack) {
-          current_function_builder_->EmitWithVarInt(kExprGetGlobal, index);
+          current_function_builder_->EmitWithVarUint(kExprGetGlobal, index);
         }
       } else {
         if (fate == kDrop) {
@@ -841,7 +972,7 @@
     Property* target_prop = expr->target()->AsProperty();
     if (target_prop != nullptr) {
       // Left hand side is a property access, i.e. the asm.js heap.
-      if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() &&
+      if (TypeOf(expr->value()) == kWasmF64 && expr->target()->IsProperty() &&
           typer_->TypeOf(expr->target()->AsProperty()->obj())
               ->IsA(AsmType::Float32Array())) {
         current_function_builder_->Emit(kExprF32ConvertF64);
@@ -901,7 +1032,7 @@
           if (typer_->TypeOf(target)->AsFFIType() != nullptr) {
             const AstRawString* name =
                 prop->key()->AsLiteral()->AsRawPropertyName();
-            imported_function_table_.AddImport(
+            imported_function_table_.SetImportName(
                 target->var(), reinterpret_cast<const char*>(name->raw_data()),
                 name->length());
           }
@@ -910,14 +1041,10 @@
         return;
       }
       ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
-      if (funcs != nullptr &&
-          typer_->TypeOf(funcs)
-              ->AsFunctionTableType()
-              ->signature()
-              ->AsFunctionType()) {
+      if (funcs != nullptr) {
         VariableProxy* target = expr->target()->AsVariableProxy();
         DCHECK_NOT_NULL(target);
-        AddFunctionTable(target, funcs);
+        PopulateFunctionTable(target, funcs);
         // Only add to the function table. No init needed.
         return;
       }
@@ -952,8 +1079,8 @@
     DCHECK_NOT_NULL(key_literal);
     if (!key_literal->value().is_null()) {
       Handle<Name> name =
-          i::Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
-      LocalType type = is_float ? kAstF64 : kAstI32;
+          Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
+      ValueType type = is_float ? kWasmF64 : kWasmI32;
       foreign_variables_.push_back({name, var, type});
     }
   }
@@ -961,7 +1088,7 @@
   void VisitPropertyAndEmitIndex(Property* expr, AsmType** atype) {
     Expression* obj = expr->obj();
     *atype = typer_->TypeOf(obj);
-    int size = (*atype)->ElementSizeInBytes();
+    int32_t size = (*atype)->ElementSizeInBytes();
     if (size == 1) {
       // Allow more general expression in byte arrays than the spec
       // strictly permits.
@@ -974,7 +1101,7 @@
     Literal* value = expr->key()->AsLiteral();
     if (value) {
       DCHECK(value->raw_value()->IsNumber());
-      DCHECK_EQ(kAstI32, TypeOf(value));
+      DCHECK_EQ(kWasmI32, TypeOf(value));
       int32_t val = static_cast<int32_t>(value->raw_value()->AsNumber());
       // TODO(titzer): handle overflow here.
       current_function_builder_->EmitI32Const(val * size);
@@ -984,14 +1111,13 @@
     if (binop) {
       DCHECK_EQ(Token::SAR, binop->op());
       DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
-      DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
+      DCHECK(kWasmI32 == TypeOf(binop->right()->AsLiteral()));
       DCHECK_EQ(size,
                 1 << static_cast<int>(
                     binop->right()->AsLiteral()->raw_value()->AsNumber()));
       // Mask bottom bits to match asm.js behavior.
-      byte mask = static_cast<byte>(~(size - 1));
       RECURSE(Visit(binop->left()));
-      current_function_builder_->EmitWithU8(kExprI8Const, mask);
+      current_function_builder_->EmitI32Const(~(size - 1));
       current_function_builder_->Emit(kExprI32And);
       return;
     }
@@ -1030,7 +1156,7 @@
     AsmTyper::StandardMember standard_object =
         typer_->VariableAsStandardMember(var);
     ZoneList<Expression*>* args = call->arguments();
-    LocalType call_type = TypeOf(call);
+    ValueType call_type = TypeOf(call);
 
     switch (standard_object) {
       case AsmTyper::kNone: {
@@ -1038,57 +1164,57 @@
       }
       case AsmTyper::kMathAcos: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Acos);
         break;
       }
       case AsmTyper::kMathAsin: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Asin);
         break;
       }
       case AsmTyper::kMathAtan: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Atan);
         break;
       }
       case AsmTyper::kMathCos: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Cos);
         break;
       }
       case AsmTyper::kMathSin: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Sin);
         break;
       }
       case AsmTyper::kMathTan: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Tan);
         break;
       }
       case AsmTyper::kMathExp: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Exp);
         break;
       }
       case AsmTyper::kMathLog: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Log);
         break;
       }
       case AsmTyper::kMathCeil: {
         VisitCallArgs(call);
-        if (call_type == kAstF32) {
+        if (call_type == kWasmF32) {
           current_function_builder_->Emit(kExprF32Ceil);
-        } else if (call_type == kAstF64) {
+        } else if (call_type == kWasmF64) {
           current_function_builder_->Emit(kExprF64Ceil);
         } else {
           UNREACHABLE();
@@ -1097,9 +1223,9 @@
       }
       case AsmTyper::kMathFloor: {
         VisitCallArgs(call);
-        if (call_type == kAstF32) {
+        if (call_type == kWasmF32) {
           current_function_builder_->Emit(kExprF32Floor);
-        } else if (call_type == kAstF64) {
+        } else if (call_type == kWasmF64) {
           current_function_builder_->Emit(kExprF64Floor);
         } else {
           UNREACHABLE();
@@ -1108,9 +1234,9 @@
       }
       case AsmTyper::kMathSqrt: {
         VisitCallArgs(call);
-        if (call_type == kAstF32) {
+        if (call_type == kWasmF32) {
           current_function_builder_->Emit(kExprF32Sqrt);
-        } else if (call_type == kAstF64) {
+        } else if (call_type == kWasmF64) {
           current_function_builder_->Emit(kExprF64Sqrt);
         } else {
           UNREACHABLE();
@@ -1119,18 +1245,18 @@
       }
       case AsmTyper::kMathClz32: {
         VisitCallArgs(call);
-        DCHECK(call_type == kAstI32);
+        DCHECK(call_type == kWasmI32);
         current_function_builder_->Emit(kExprI32Clz);
         break;
       }
       case AsmTyper::kMathAbs: {
-        if (call_type == kAstI32) {
-          WasmTemporary tmp(current_function_builder_, kAstI32);
+        if (call_type == kWasmI32) {
+          WasmTemporary tmp(current_function_builder_, kWasmI32);
 
           // if set_local(tmp, x) < 0
           Visit(call->arguments()->at(0));
           current_function_builder_->EmitTeeLocal(tmp.index());
-          byte code[] = {WASM_I8(0)};
+          byte code[] = {WASM_ZERO};
           current_function_builder_->EmitCode(code, sizeof(code));
           current_function_builder_->Emit(kExprI32LtS);
           current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
@@ -1146,10 +1272,10 @@
           // end
           current_function_builder_->Emit(kExprEnd);
 
-        } else if (call_type == kAstF32) {
+        } else if (call_type == kWasmF32) {
           VisitCallArgs(call);
           current_function_builder_->Emit(kExprF32Abs);
-        } else if (call_type == kAstF64) {
+        } else if (call_type == kWasmF64) {
           VisitCallArgs(call);
           current_function_builder_->Emit(kExprF64Abs);
         } else {
@@ -1159,9 +1285,9 @@
       }
       case AsmTyper::kMathMin: {
         // TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
-        if (call_type == kAstI32) {
-          WasmTemporary tmp_x(current_function_builder_, kAstI32);
-          WasmTemporary tmp_y(current_function_builder_, kAstI32);
+        if (call_type == kWasmI32) {
+          WasmTemporary tmp_x(current_function_builder_, kWasmI32);
+          WasmTemporary tmp_y(current_function_builder_, kWasmI32);
 
           // if set_local(tmp_x, x) < set_local(tmp_y, y)
           Visit(call->arguments()->at(0));
@@ -1181,10 +1307,10 @@
           current_function_builder_->EmitGetLocal(tmp_y.index());
           current_function_builder_->Emit(kExprEnd);
 
-        } else if (call_type == kAstF32) {
+        } else if (call_type == kWasmF32) {
           VisitCallArgs(call);
           current_function_builder_->Emit(kExprF32Min);
-        } else if (call_type == kAstF64) {
+        } else if (call_type == kWasmF64) {
           VisitCallArgs(call);
           current_function_builder_->Emit(kExprF64Min);
         } else {
@@ -1194,9 +1320,9 @@
       }
       case AsmTyper::kMathMax: {
         // TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
-        if (call_type == kAstI32) {
-          WasmTemporary tmp_x(current_function_builder_, kAstI32);
-          WasmTemporary tmp_y(current_function_builder_, kAstI32);
+        if (call_type == kWasmI32) {
+          WasmTemporary tmp_x(current_function_builder_, kWasmI32);
+          WasmTemporary tmp_y(current_function_builder_, kWasmI32);
 
           // if set_local(tmp_x, x) < set_local(tmp_y, y)
           Visit(call->arguments()->at(0));
@@ -1217,10 +1343,10 @@
           current_function_builder_->EmitGetLocal(tmp_x.index());
           current_function_builder_->Emit(kExprEnd);
 
-        } else if (call_type == kAstF32) {
+        } else if (call_type == kWasmF32) {
           VisitCallArgs(call);
           current_function_builder_->Emit(kExprF32Max);
-        } else if (call_type == kAstF64) {
+        } else if (call_type == kWasmF64) {
           VisitCallArgs(call);
           current_function_builder_->Emit(kExprF64Max);
         } else {
@@ -1230,13 +1356,13 @@
       }
       case AsmTyper::kMathAtan2: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Atan2);
         break;
       }
       case AsmTyper::kMathPow: {
         VisitCallArgs(call);
-        DCHECK_EQ(kAstF64, call_type);
+        DCHECK_EQ(kWasmF64, call_type);
         current_function_builder_->Emit(kExprF64Pow);
         break;
       }
@@ -1298,6 +1424,10 @@
   bool VisitCallExpression(Call* expr) {
     Call::CallType call_type = expr->GetCallType();
     bool returns_value = true;
+
+    // Save the parent now, it might be overwritten in VisitCallArgs.
+    BinaryOperation* parent_binop = parent_binop_;
+
     switch (call_type) {
       case Call::OTHER_CALL: {
         VariableProxy* proxy = expr->expression()->AsVariableProxy();
@@ -1313,11 +1443,11 @@
         VariableProxy* vp = expr->expression()->AsVariableProxy();
         DCHECK_NOT_NULL(vp);
         if (typer_->TypeOf(vp)->AsFFIType() != nullptr) {
-          LocalType return_type = TypeOf(expr);
+          ValueType return_type = TypeOf(expr);
           ZoneList<Expression*>* args = expr->arguments();
-          FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
+          FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
                                    args->length());
-          if (return_type != kAstStmt) {
+          if (return_type != kWasmStmt) {
             sig.AddReturn(return_type);
           } else {
             returns_value = false;
@@ -1325,16 +1455,23 @@
           for (int i = 0; i < args->length(); ++i) {
             sig.AddParam(TypeOf(args->at(i)));
           }
-          uint32_t index = imported_function_table_.LookupOrInsertImport(
+          uint32_t index = imported_function_table_.LookupOrInsertImportUse(
               vp->var(), sig.Build());
           VisitCallArgs(expr);
-          current_function_builder_->AddAsmWasmOffset(expr->position());
+          // For non-void functions, we must know the parent node.
+          DCHECK_IMPLIES(returns_value, parent_binop != nullptr);
+          DCHECK_IMPLIES(returns_value, parent_binop->left() == expr ||
+                                            parent_binop->right() == expr);
+          int pos = expr->position();
+          int parent_pos = returns_value ? parent_binop->position() : pos;
+          current_function_builder_->AddAsmWasmOffset(pos, parent_pos);
           current_function_builder_->Emit(kExprCallFunction);
-          current_function_builder_->EmitVarInt(index);
+          current_function_builder_->EmitVarUint(index);
         } else {
           WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
           VisitCallArgs(expr);
-          current_function_builder_->AddAsmWasmOffset(expr->position());
+          current_function_builder_->AddAsmWasmOffset(expr->position(),
+                                                      expr->position());
           current_function_builder_->Emit(kExprCallFunction);
           current_function_builder_->EmitDirectCallIndex(
               function->func_index());
@@ -1348,22 +1485,23 @@
         DCHECK_NOT_NULL(p);
         VariableProxy* var = p->obj()->AsVariableProxy();
         DCHECK_NOT_NULL(var);
-        FunctionTableIndices* indices = LookupFunctionTable(var->var());
+        FunctionTableIndices* indices = LookupOrAddFunctionTable(var, p);
         Visit(p->key());  // TODO(titzer): should use RECURSE()
 
         // We have to use a temporary for the correct order of evaluation.
         current_function_builder_->EmitI32Const(indices->start_index);
         current_function_builder_->Emit(kExprI32Add);
-        WasmTemporary tmp(current_function_builder_, kAstI32);
+        WasmTemporary tmp(current_function_builder_, kWasmI32);
         current_function_builder_->EmitSetLocal(tmp.index());
 
         VisitCallArgs(expr);
 
         current_function_builder_->EmitGetLocal(tmp.index());
-        current_function_builder_->AddAsmWasmOffset(expr->position());
+        current_function_builder_->AddAsmWasmOffset(expr->position(),
+                                                    expr->position());
         current_function_builder_->Emit(kExprCallIndirect);
-        current_function_builder_->EmitVarInt(indices->signature_index);
-        current_function_builder_->EmitVarInt(0);  // table index
+        current_function_builder_->EmitVarUint(indices->signature_index);
+        current_function_builder_->EmitVarUint(0);  // table index
         returns_value =
             builder_->GetSignature(indices->signature_index)->return_count() >
             0;
@@ -1383,7 +1521,7 @@
     RECURSE(Visit(expr->expression()));
     switch (expr->op()) {
       case Token::NOT: {
-        DCHECK_EQ(kAstI32, TypeOf(expr->expression()));
+        DCHECK_EQ(kWasmI32, TypeOf(expr->expression()));
         current_function_builder_->Emit(kExprI32Eqz);
         break;
       }
@@ -1398,10 +1536,10 @@
                                int32_t val) {
     DCHECK_NOT_NULL(expr->right());
     if (expr->op() == op && expr->right()->IsLiteral() &&
-        TypeOf(expr) == kAstI32) {
+        TypeOf(expr) == kWasmI32) {
       Literal* right = expr->right()->AsLiteral();
-      DCHECK(right->raw_value()->IsNumber());
-      if (static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
+      if (right->raw_value()->IsNumber() &&
+          static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
         return true;
       }
     }
@@ -1412,7 +1550,7 @@
                                   double val) {
     DCHECK_NOT_NULL(expr->right());
     if (expr->op() == op && expr->right()->IsLiteral() &&
-        TypeOf(expr) == kAstF64) {
+        TypeOf(expr) == kWasmF64) {
       Literal* right = expr->right()->AsLiteral();
       DCHECK(right->raw_value()->IsNumber());
       if (right->raw_value()->AsNumber() == val) {
@@ -1426,7 +1564,7 @@
 
   ConvertOperation MatchOr(BinaryOperation* expr) {
     if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0) &&
-        (TypeOf(expr->left()) == kAstI32)) {
+        (TypeOf(expr->left()) == kWasmI32)) {
       return kAsIs;
     } else {
       return kNone;
@@ -1436,7 +1574,7 @@
   ConvertOperation MatchShr(BinaryOperation* expr) {
     if (MatchIntBinaryOperation(expr, Token::SHR, 0)) {
       // TODO(titzer): this probably needs to be kToUint
-      return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+      return (TypeOf(expr->left()) == kWasmI32) ? kAsIs : kToInt;
     } else {
       return kNone;
     }
@@ -1444,13 +1582,13 @@
 
   ConvertOperation MatchXor(BinaryOperation* expr) {
     if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
-      DCHECK_EQ(kAstI32, TypeOf(expr->left()));
-      DCHECK_EQ(kAstI32, TypeOf(expr->right()));
+      DCHECK_EQ(kWasmI32, TypeOf(expr->left()));
+      DCHECK_EQ(kWasmI32, TypeOf(expr->right()));
       BinaryOperation* op = expr->left()->AsBinaryOperation();
       if (op != nullptr) {
         if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
-          DCHECK_EQ(kAstI32, TypeOf(op->right()));
-          if (TypeOf(op->left()) != kAstI32) {
+          DCHECK_EQ(kWasmI32, TypeOf(op->right()));
+          if (TypeOf(op->left()) != kWasmI32) {
             return kToInt;
           } else {
             return kAsIs;
@@ -1463,8 +1601,8 @@
 
   ConvertOperation MatchMul(BinaryOperation* expr) {
     if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
-      DCHECK_EQ(kAstF64, TypeOf(expr->right()));
-      if (TypeOf(expr->left()) != kAstF64) {
+      DCHECK_EQ(kWasmF64, TypeOf(expr->right()));
+      if (TypeOf(expr->left()) != kWasmF64) {
         return kToDouble;
       } else {
         return kAsIs;
@@ -1532,6 +1670,7 @@
   void VisitBinaryOperation(BinaryOperation* expr) {
     ConvertOperation convertOperation = MatchBinaryOperation(expr);
     static const bool kDontIgnoreSign = false;
+    parent_binop_ = expr;
     if (convertOperation == kToDouble) {
       RECURSE(Visit(expr->left()));
       TypeIndex type = TypeIndexOf(expr->left(), kDontIgnoreSign);
@@ -1694,6 +1833,9 @@
   void VisitDeclarations(Declaration::List* decls) {
     for (Declaration* decl : *decls) {
       RECURSE(Visit(decl));
+      if (typer_failed_) {
+        return;
+      }
     }
   }
 
@@ -1719,7 +1861,7 @@
     uint32_t index;
   };
 
-  uint32_t LookupOrInsertLocal(Variable* v, LocalType type) {
+  uint32_t LookupOrInsertLocal(Variable* v, ValueType type) {
     DCHECK_NOT_NULL(current_function_builder_);
     ZoneHashMap::Entry* entry =
         local_variables_.Lookup(v, ComputePointerHash(v));
@@ -1736,7 +1878,7 @@
     return (reinterpret_cast<IndexContainer*>(entry->value))->index;
   }
 
-  void InsertParameter(Variable* v, LocalType type, uint32_t index) {
+  void InsertParameter(Variable* v, ValueType type, uint32_t index) {
     DCHECK(v->IsParameter());
     DCHECK_NOT_NULL(current_function_builder_);
     ZoneHashMap::Entry* entry =
@@ -1749,7 +1891,7 @@
     entry->value = container;
   }
 
-  uint32_t LookupOrInsertGlobal(Variable* v, LocalType type) {
+  uint32_t LookupOrInsertGlobal(Variable* v, ValueType type) {
     ZoneHashMap::Entry* entry =
         global_variables_.Lookup(v, ComputePointerHash(v));
     if (entry == nullptr) {
@@ -1770,14 +1912,14 @@
       auto* func_type = typer_->TypeOf(v)->AsFunctionType();
       DCHECK_NOT_NULL(func_type);
       // Build the signature for the function.
-      LocalType return_type = TypeFrom(func_type->ReturnType());
+      ValueType return_type = TypeFrom(func_type->ReturnType());
       const auto& arguments = func_type->Arguments();
-      FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
+      FunctionSig::Builder b(zone(), return_type == kWasmStmt ? 0 : 1,
                              arguments.size());
-      if (return_type != kAstStmt) b.AddReturn(return_type);
+      if (return_type != kWasmStmt) b.AddReturn(return_type);
       for (int i = 0; i < static_cast<int>(arguments.size()); ++i) {
-        LocalType type = TypeFrom(arguments[i]);
-        DCHECK_NE(kAstStmt, type);
+        ValueType type = TypeFrom(arguments[i]);
+        DCHECK_NE(kWasmStmt, type);
         b.AddParam(type);
       }
 
@@ -1792,22 +1934,22 @@
     return (reinterpret_cast<WasmFunctionBuilder*>(entry->value));
   }
 
-  LocalType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
+  ValueType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
 
-  LocalType TypeFrom(AsmType* type) {
+  ValueType TypeFrom(AsmType* type) {
     if (type->IsA(AsmType::Intish())) {
-      return kAstI32;
+      return kWasmI32;
     }
 
     if (type->IsA(AsmType::Floatish())) {
-      return kAstF32;
+      return kWasmF32;
     }
 
     if (type->IsA(AsmType::DoubleQ())) {
-      return kAstF64;
+      return kWasmF64;
     }
 
-    return kAstStmt;
+    return kWasmStmt;
   }
 
   Zone* zone() { return zone_; }
@@ -1821,14 +1963,22 @@
   FunctionLiteral* literal_;
   Isolate* isolate_;
   Zone* zone_;
+  CompilationInfo* info_;
+  AstValueFactory* ast_value_factory_;
+  Handle<Script> script_;
   AsmTyper* typer_;
-  ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
+  bool typer_failed_;
+  bool typer_finished_;
+  ZoneVector<std::pair<BreakableStatement*, TargetType>> breakable_blocks_;
   ZoneVector<ForeignVariable> foreign_variables_;
   WasmFunctionBuilder* init_function_;
   WasmFunctionBuilder* foreign_init_function_;
   uint32_t next_table_index_;
   ZoneHashMap function_tables_;
   ImportedFunctionTable imported_function_table_;
+  // Remember the parent node for reporting the correct location for ToNumber
+  // conversions after calls.
+  BinaryOperation* parent_binop_;
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 
@@ -1836,22 +1986,27 @@
   DISALLOW_COPY_AND_ASSIGN(AsmWasmBuilderImpl);
 };
 
-AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
-                               FunctionLiteral* literal, AsmTyper* typer)
-    : isolate_(isolate), zone_(zone), literal_(literal), typer_(typer) {}
+AsmWasmBuilder::AsmWasmBuilder(CompilationInfo* info)
+    : info_(info),
+      typer_(info->isolate(), info->zone(), info->script(), info->literal()) {}
 
 // TODO(aseemgarg): probably should take zone (to write wasm to) as input so
 // that zone in constructor may be thrown away once wasm module is written.
-AsmWasmBuilder::Result AsmWasmBuilder::Run(
-    i::Handle<i::FixedArray>* foreign_args) {
-  AsmWasmBuilderImpl impl(isolate_, zone_, literal_, typer_);
-  impl.Build();
+AsmWasmBuilder::Result AsmWasmBuilder::Run(Handle<FixedArray>* foreign_args) {
+  HistogramTimerScope asm_wasm_time_scope(
+      info_->isolate()->counters()->asm_wasm_translation_time());
+
+  Zone* zone = info_->zone();
+  AsmWasmBuilderImpl impl(info_->isolate(), zone, info_,
+                          info_->parse_info()->ast_value_factory(),
+                          info_->script(), info_->literal(), &typer_);
+  bool success = impl.Build();
   *foreign_args = impl.GetForeignArgs();
-  ZoneBuffer* module_buffer = new (zone_) ZoneBuffer(zone_);
+  ZoneBuffer* module_buffer = new (zone) ZoneBuffer(zone);
   impl.builder_->WriteTo(*module_buffer);
-  ZoneBuffer* asm_offsets_buffer = new (zone_) ZoneBuffer(zone_);
+  ZoneBuffer* asm_offsets_buffer = new (zone) ZoneBuffer(zone);
   impl.builder_->WriteAsmJsOffsetTable(*asm_offsets_buffer);
-  return {module_buffer, asm_offsets_buffer};
+  return {module_buffer, asm_offsets_buffer, success};
 }
 
 const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__";
diff --git a/src/asmjs/asm-wasm-builder.h b/src/asmjs/asm-wasm-builder.h
index f234abd..a5db096 100644
--- a/src/asmjs/asm-wasm-builder.h
+++ b/src/asmjs/asm-wasm-builder.h
@@ -14,7 +14,7 @@
 namespace v8 {
 namespace internal {
 
-class FunctionLiteral;
+class CompilationInfo;
 
 namespace wasm {
 
@@ -23,20 +23,20 @@
   struct Result {
     ZoneBuffer* module_bytes;
     ZoneBuffer* asm_offset_table;
+    bool success;
   };
 
-  explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
-                          AsmTyper* typer);
+  explicit AsmWasmBuilder(CompilationInfo* info);
   Result Run(Handle<FixedArray>* foreign_args);
 
   static const char* foreign_init_name;
   static const char* single_function_name;
 
+  const AsmTyper* typer() { return &typer_; }
+
  private:
-  Isolate* isolate_;
-  Zone* zone_;
-  FunctionLiteral* literal_;
-  AsmTyper* typer_;
+  CompilationInfo* info_;
+  AsmTyper typer_;
 };
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/assembler-inl.h b/src/assembler-inl.h
new file mode 100644
index 0000000..24d0377
--- /dev/null
+++ b/src/assembler-inl.h
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ASSEMBLER_INL_H_
+#define V8_ASSEMBLER_INL_H_
+
+#include "src/assembler.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/assembler-arm64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/assembler-ppc-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/assembler-mips-inl.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64-inl.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/assembler-s390-inl.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/assembler-x87-inl.h"
+#else
+#error Unknown architecture.
+#endif
+
+#endif  // V8_ASSEMBLER_INL_H_
diff --git a/src/assembler.cc b/src/assembler.cc
index a2c0ebe..d945cc4 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -35,8 +35,11 @@
 #include "src/assembler.h"
 
 #include <math.h>
+#include <string.h>
 #include <cmath>
+
 #include "src/api.h"
+#include "src/assembler-inl.h"
 #include "src/base/cpu.h"
 #include "src/base/functional.h"
 #include "src/base/ieee754.h"
@@ -62,28 +65,6 @@
 #include "src/snapshot/serializer-common.h"
 #include "src/wasm/wasm-external-refs.h"
 
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32-inl.h"  // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64-inl.h"  // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64-inl.h"  // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm-inl.h"  // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/assembler-ppc-inl.h"  // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips-inl.h"  // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64-inl.h"  // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/assembler-s390-inl.h"  // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/assembler-x87-inl.h"  // NOLINT
-#else
-#error "Unknown architecture."
-#endif
-
 // Include native regexp-macro-assembler.
 #ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
@@ -254,17 +235,6 @@
 unsigned CpuFeatures::dcache_line_size_ = 0;
 
 // -----------------------------------------------------------------------------
-// Implementation of Label
-
-int Label::pos() const {
-  if (pos_ < 0) return -pos_ - 1;
-  if (pos_ > 0) return  pos_ - 1;
-  UNREACHABLE();
-  return 0;
-}
-
-
-// -----------------------------------------------------------------------------
 // Implementation of RelocInfoWriter and RelocIterator
 //
 // Relocation information is written backwards in memory, from high addresses
@@ -338,26 +308,25 @@
 const int kDeoptReasonTag = 1;
 
 void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_reference;
-    DCHECK_GE(wasm_memory_reference(), old_base);
-    updated_reference = new_base + (wasm_memory_reference() - old_base);
-    // The reference is not checked here but at runtime. Validity of references
-    // may change over time.
-    unchecked_update_wasm_memory_reference(updated_reference,
-                                           icache_flush_mode);
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t current_size_reference = wasm_memory_size_reference();
-    uint32_t updated_size_reference =
-        new_size + (current_size_reference - old_size);
-    unchecked_update_wasm_memory_size(updated_size_reference,
-                                      icache_flush_mode);
-  } else {
-    UNREACHABLE();
+    Address old_base, Address new_base, ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemoryReference(rmode_));
+  DCHECK_GE(wasm_memory_reference(), old_base);
+  Address updated_reference = new_base + (wasm_memory_reference() - old_base);
+  // The reference is not checked here but at runtime. Validity of references
+  // may change over time.
+  unchecked_update_wasm_memory_reference(updated_reference, icache_flush_mode);
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
   }
+}
+
+void RelocInfo::update_wasm_memory_size(uint32_t old_size, uint32_t new_size,
+                                        ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmMemorySizeReference(rmode_));
+  uint32_t current_size_reference = wasm_memory_size_reference();
+  uint32_t updated_size_reference =
+      new_size + (current_size_reference - old_size);
+  unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
     Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
   }
@@ -378,6 +347,18 @@
   }
 }
 
+void RelocInfo::update_wasm_function_table_size_reference(
+    uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  uint32_t current_size_reference = wasm_function_table_size_reference();
+  uint32_t updated_size_reference =
+      new_size + (current_size_reference - old_size);
+  unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
+  }
+}
+
 void RelocInfo::set_target_address(Address target,
                                    WriteBarrierMode write_barrier_mode,
                                    ICacheFlushMode icache_flush_mode) {
@@ -496,7 +477,8 @@
       WriteData(rinfo->data());
     } else if (RelocInfo::IsConstPool(rmode) ||
                RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
-               RelocInfo::IsDeoptPosition(rmode)) {
+               RelocInfo::IsDeoptPosition(rmode) ||
+               RelocInfo::IsWasmProtectedLanding(rmode)) {
       WriteIntData(static_cast<int>(rinfo->data()));
     }
   }
@@ -645,7 +627,8 @@
         } else if (RelocInfo::IsConstPool(rmode) ||
                    RelocInfo::IsVeneerPool(rmode) ||
                    RelocInfo::IsDeoptId(rmode) ||
-                   RelocInfo::IsDeoptPosition(rmode)) {
+                   RelocInfo::IsDeoptPosition(rmode) ||
+                   RelocInfo::IsWasmProtectedLanding(rmode)) {
           if (SetMode(rmode)) {
             AdvanceReadInt();
             return;
@@ -742,8 +725,6 @@
       return "no reloc 64";
     case EMBEDDED_OBJECT:
       return "embedded object";
-    case DEBUGGER_STATEMENT:
-      return "debugger statement";
     case CODE_TARGET:
       return "code target";
     case CODE_TARGET_WITH_ID:
@@ -782,14 +763,16 @@
       return "debug break slot at tail call";
     case CODE_AGE_SEQUENCE:
       return "code age sequence";
-    case GENERATOR_CONTINUATION:
-      return "generator continuation";
     case WASM_MEMORY_REFERENCE:
       return "wasm memory reference";
     case WASM_MEMORY_SIZE_REFERENCE:
       return "wasm memory size reference";
     case WASM_GLOBAL_REFERENCE:
       return "wasm global value reference";
+    case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
+      return "wasm function table size reference";
+    case WASM_PROTECTED_INSTRUCTION_LANDING:
+      return "wasm protected instruction landing";
     case NUMBER_OF_MODES:
     case PC_JUMP:
       UNREACHABLE();
@@ -849,7 +832,6 @@
     case CELL:
       Object::VerifyPointer(target_cell());
       break;
-    case DEBUGGER_STATEMENT:
     case CODE_TARGET_WITH_ID:
     case CODE_TARGET: {
       // convert inline target address to code object
@@ -884,10 +866,12 @@
     case DEBUG_BREAK_SLOT_AT_RETURN:
     case DEBUG_BREAK_SLOT_AT_CALL:
     case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
-    case GENERATOR_CONTINUATION:
     case WASM_MEMORY_REFERENCE:
     case WASM_MEMORY_SIZE_REFERENCE:
     case WASM_GLOBAL_REFERENCE:
+    case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
+    case WASM_PROTECTED_INSTRUCTION_LANDING:
+    // TODO(eholk): make sure the protected instruction is in range.
     case NONE32:
     case NONE64:
       break;
@@ -1204,6 +1188,12 @@
   return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper)));
 }
 
+ExternalReference ExternalReference::wasm_call_trap_callback_for_testing(
+    Isolate* isolate) {
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(wasm::call_trap_callback_for_testing)));
+}
+
 ExternalReference ExternalReference::log_enter_external_function(
     Isolate* isolate) {
   return ExternalReference(
@@ -1548,6 +1538,14 @@
       Redirect(isolate, FUNCTION_ADDR(base::ieee754::tanh), BUILTIN_FP_CALL));
 }
 
+void* libc_memchr(void* string, int character, size_t search_length) {
+  return memchr(string, character, search_length);
+}
+
+ExternalReference ExternalReference::libc_memchr_function(Isolate* isolate) {
+  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memchr)));
+}
+
 ExternalReference ExternalReference::page_flags(Page* page) {
   return ExternalReference(reinterpret_cast<Address>(page) +
                            MemoryChunk::kFlagsOffset);
@@ -1569,18 +1567,21 @@
   return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
 }
 
+ExternalReference ExternalReference::promise_hook_or_debug_is_active_address(
+    Isolate* isolate) {
+  return ExternalReference(isolate->promise_hook_or_debug_is_active_address());
+}
+
 ExternalReference ExternalReference::debug_is_active_address(
     Isolate* isolate) {
   return ExternalReference(isolate->debug()->is_active_address());
 }
 
-
-ExternalReference ExternalReference::debug_after_break_target_address(
+ExternalReference ExternalReference::debug_hook_on_function_call_address(
     Isolate* isolate) {
-  return ExternalReference(isolate->debug()->after_break_target_address());
+  return ExternalReference(isolate->debug()->hook_on_function_call_address());
 }
 
-
 ExternalReference ExternalReference::runtime_function_table_address(
     Isolate* isolate) {
   return ExternalReference(
@@ -1661,6 +1662,11 @@
   return ExternalReference(isolate->debug()->suspended_generator_address());
 }
 
+ExternalReference ExternalReference::debug_restart_fp_address(
+    Isolate* isolate) {
+  return ExternalReference(isolate->debug()->restart_fp_address());
+}
+
 ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
   return ExternalReference(reinterpret_cast<void*>(
       FixedTypedArrayBase::kDataOffset - kHeapObjectTag));
@@ -1914,12 +1920,6 @@
 }
 
 
-void Assembler::RecordGeneratorContinuation() {
-  EnsureSpace ensure_space(this);
-  RecordRelocInfo(RelocInfo::GENERATOR_CONTINUATION);
-}
-
-
 void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
   EnsureSpace ensure_space(this);
   DCHECK(RelocInfo::IsDebugBreakSlot(mode));
diff --git a/src/assembler.h b/src/assembler.h
index 2169b15..856072f 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -40,6 +40,7 @@
 #include "src/deoptimize-reason.h"
 #include "src/globals.h"
 #include "src/isolate.h"
+#include "src/label.h"
 #include "src/log.h"
 #include "src/register-configuration.h"
 #include "src/runtime/runtime.h"
@@ -272,79 +273,6 @@
 };
 
 
-// -----------------------------------------------------------------------------
-// Labels represent pc locations; they are typically jump or call targets.
-// After declaration, a label can be freely used to denote known or (yet)
-// unknown pc location. Assembler::bind() is used to bind a label to the
-// current pc. A label can be bound only once.
-
-class Label {
- public:
-  enum Distance {
-    kNear, kFar
-  };
-
-  INLINE(Label()) {
-    Unuse();
-    UnuseNear();
-  }
-
-  INLINE(~Label()) {
-    DCHECK(!is_linked());
-    DCHECK(!is_near_linked());
-  }
-
-  INLINE(void Unuse()) { pos_ = 0; }
-  INLINE(void UnuseNear()) { near_link_pos_ = 0; }
-
-  INLINE(bool is_bound() const) { return pos_ <  0; }
-  INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
-  INLINE(bool is_linked() const) { return pos_ >  0; }
-  INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
-
-  // Returns the position of bound or linked labels. Cannot be used
-  // for unused labels.
-  int pos() const;
-  int near_link_pos() const { return near_link_pos_ - 1; }
-
- private:
-  // pos_ encodes both the binding state (via its sign)
-  // and the binding position (via its value) of a label.
-  //
-  // pos_ <  0  bound label, pos() returns the jump target position
-  // pos_ == 0  unused label
-  // pos_ >  0  linked label, pos() returns the last reference position
-  int pos_;
-
-  // Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
-  int near_link_pos_;
-
-  void bind_to(int pos)  {
-    pos_ = -pos - 1;
-    DCHECK(is_bound());
-  }
-  void link_to(int pos, Distance distance = kFar) {
-    if (distance == kNear) {
-      near_link_pos_ = pos + 1;
-      DCHECK(is_near_linked());
-    } else {
-      pos_ = pos + 1;
-      DCHECK(is_linked());
-    }
-  }
-
-  friend class Assembler;
-  friend class Displacement;
-  friend class RegExpMacroAssemblerIrregexp;
-
-#if V8_TARGET_ARCH_ARM64
-  // On ARM64, the Assembler keeps track of pointers to Labels to resolve
-  // branches to distant targets. Copying labels would confuse the Assembler.
-  DISALLOW_COPY_AND_ASSIGN(Label);  // NOLINT
-#endif
-};
-
-
 enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
 
 enum ArgvMode { kArgvOnStack, kArgvInRegister };
@@ -389,12 +317,13 @@
     // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
     CODE_TARGET,  // Code target which is not any of the above.
     CODE_TARGET_WITH_ID,
-    DEBUGGER_STATEMENT,  // Code target for the debugger statement.
     EMBEDDED_OBJECT,
     // To relocate pointers into the wasm memory embedded in wasm code
     WASM_MEMORY_REFERENCE,
     WASM_GLOBAL_REFERENCE,
     WASM_MEMORY_SIZE_REFERENCE,
+    WASM_FUNCTION_TABLE_SIZE_REFERENCE,
+    WASM_PROTECTED_INSTRUCTION_LANDING,
     CELL,
 
     // Everything after runtime_entry (inclusive) is not GC'ed.
@@ -413,9 +342,6 @@
     // Encoded internal reference, used only on MIPS, MIPS64 and PPC.
     INTERNAL_REFERENCE_ENCODED,
 
-    // Continuation points for a generator yield.
-    GENERATOR_CONTINUATION,
-
     // Marks constant and veneer pools. Only used on ARM and ARM64.
     // They use a custom noncompact encoding.
     CONST_POOL,
@@ -439,8 +365,8 @@
 
     FIRST_REAL_RELOC_MODE = CODE_TARGET,
     LAST_REAL_RELOC_MODE = VENEER_POOL,
-    LAST_CODE_ENUM = DEBUGGER_STATEMENT,
-    LAST_GCED_ENUM = WASM_MEMORY_SIZE_REFERENCE,
+    LAST_CODE_ENUM = CODE_TARGET_WITH_ID,
+    LAST_GCED_ENUM = WASM_FUNCTION_TABLE_SIZE_REFERENCE,
     FIRST_SHAREABLE_RELOC_MODE = CELL,
   };
 
@@ -515,18 +441,12 @@
   static inline bool IsDebugBreakSlotAtTailCall(Mode mode) {
     return mode == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
   }
-  static inline bool IsDebuggerStatement(Mode mode) {
-    return mode == DEBUGGER_STATEMENT;
-  }
   static inline bool IsNone(Mode mode) {
     return mode == NONE32 || mode == NONE64;
   }
   static inline bool IsCodeAgeSequence(Mode mode) {
     return mode == CODE_AGE_SEQUENCE;
   }
-  static inline bool IsGeneratorContinuation(Mode mode) {
-    return mode == GENERATOR_CONTINUATION;
-  }
   static inline bool IsWasmMemoryReference(Mode mode) {
     return mode == WASM_MEMORY_REFERENCE;
   }
@@ -536,6 +456,25 @@
   static inline bool IsWasmGlobalReference(Mode mode) {
     return mode == WASM_GLOBAL_REFERENCE;
   }
+  static inline bool IsWasmFunctionTableSizeReference(Mode mode) {
+    return mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
+  }
+  static inline bool IsWasmReference(Mode mode) {
+    return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
+           mode == WASM_MEMORY_SIZE_REFERENCE ||
+           mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
+  }
+  static inline bool IsWasmSizeReference(Mode mode) {
+    return mode == WASM_MEMORY_SIZE_REFERENCE ||
+           mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
+  }
+  static inline bool IsWasmPtrReference(Mode mode) {
+    return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE;
+  }
+  static inline bool IsWasmProtectedLanding(Mode mode) {
+    return mode == WASM_PROTECTED_INSTRUCTION_LANDING;
+  }
+
   static inline int ModeMask(Mode mode) { return 1 << mode; }
 
   // Accessors
@@ -564,13 +503,20 @@
 
   Address wasm_memory_reference();
   Address wasm_global_reference();
+  uint32_t wasm_function_table_size_reference();
   uint32_t wasm_memory_size_reference();
   void update_wasm_memory_reference(
-      Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
+      Address old_base, Address new_base,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+  void update_wasm_memory_size(
+      uint32_t old_size, uint32_t new_size,
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
   void update_wasm_global_reference(
       Address old_base, Address new_base,
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+  void update_wasm_function_table_size_reference(
+      uint32_t old_base, uint32_t new_base,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
   void set_target_address(
       Address target,
       WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
@@ -679,8 +625,7 @@
  private:
   void unchecked_update_wasm_memory_reference(Address address,
                                               ICacheFlushMode flush_mode);
-  void unchecked_update_wasm_memory_size(uint32_t size,
-                                         ICacheFlushMode flush_mode);
+  void unchecked_update_wasm_size(uint32_t size, ICacheFlushMode flush_mode);
 
   Isolate* isolate_;
   // On ARM, note that pc_ is the address of the constant pool entry
@@ -949,6 +894,10 @@
   static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
   static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
 
+  // Trap callback function for cctest/wasm/wasm-run-utils.h
+  static ExternalReference wasm_call_trap_callback_for_testing(
+      Isolate* isolate);
+
   // Log support.
   static ExternalReference log_enter_external_function(Isolate* isolate);
   static ExternalReference log_leave_external_function(Isolate* isolate);
@@ -1031,6 +980,8 @@
   static ExternalReference ieee754_tan_function(Isolate* isolate);
   static ExternalReference ieee754_tanh_function(Isolate* isolate);
 
+  static ExternalReference libc_memchr_function(Isolate* isolate);
+
   static ExternalReference page_flags(Page* page);
 
   static ExternalReference ForDeoptEntry(Address entry);
@@ -1041,12 +992,17 @@
       Isolate* isolate);
 
   static ExternalReference debug_is_active_address(Isolate* isolate);
+  static ExternalReference debug_hook_on_function_call_address(
+      Isolate* isolate);
   static ExternalReference debug_after_break_target_address(Isolate* isolate);
 
   static ExternalReference is_profiling_address(Isolate* isolate);
   static ExternalReference invoke_function_callback(Isolate* isolate);
   static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
 
+  static ExternalReference promise_hook_or_debug_is_active_address(
+      Isolate* isolate);
+
   V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
       Isolate* isolate);
 
@@ -1058,6 +1014,9 @@
   // Used to check for suspended generator, used for stepping across await call.
   static ExternalReference debug_suspended_generator_address(Isolate* isolate);
 
+  // Used to store the frame pointer to drop to when restarting a frame.
+  static ExternalReference debug_restart_fp_address(Isolate* isolate);
+
 #ifndef V8_INTERPRETED_REGEXP
   // C functions called from RegExp generated code.
 
@@ -1117,6 +1076,7 @@
 
 // -----------------------------------------------------------------------------
 // Utility functions
+void* libc_memchr(void* string, int character, size_t search_length);
 
 inline int NumberOfBitsSet(uint32_t x) {
   unsigned int num_bits_set;
@@ -1144,7 +1104,7 @@
   // Called just after emitting a call, i.e., at the return site for the call.
   virtual void AfterCall() const = 0;
   // Return whether call needs to check for debug stepping.
-  virtual bool NeedsDebugStepCheck() const { return false; }
+  virtual bool NeedsDebugHookCheck() const { return false; }
 };
 
 
@@ -1163,7 +1123,7 @@
   virtual ~CheckDebugStepCallWrapper() {}
   virtual void BeforeCall(int call_size) const {}
   virtual void AfterCall() const {}
-  virtual bool NeedsDebugStepCheck() const { return true; }
+  virtual bool NeedsDebugHookCheck() const { return true; }
 };
 
 
diff --git a/src/assert-scope.cc b/src/assert-scope.cc
index 3852709..8754cca 100644
--- a/src/assert-scope.cc
+++ b/src/assert-scope.cc
@@ -6,7 +6,6 @@
 
 #include "src/base/lazy-instance.h"
 #include "src/base/platform/platform.h"
-#include "src/debug/debug.h"
 #include "src/isolate.h"
 #include "src/utils.h"
 
@@ -83,15 +82,21 @@
 
 template <PerThreadAssertType kType, bool kAllow>
 PerThreadAssertScope<kType, kAllow>::~PerThreadAssertScope() {
+  if (data_ == nullptr) return;
+  Release();
+}
+
+template <PerThreadAssertType kType, bool kAllow>
+void PerThreadAssertScope<kType, kAllow>::Release() {
   DCHECK_NOT_NULL(data_);
   data_->Set(kType, old_state_);
   if (data_->DecrementLevel()) {
     PerThreadAssertData::SetCurrent(NULL);
     delete data_;
   }
+  data_ = nullptr;
 }
 
-
 // static
 template <PerThreadAssertType kType, bool kAllow>
 bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
@@ -149,6 +154,8 @@
 template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>;
 template class PerIsolateAssertScope<COMPILATION_ASSERT, false>;
 template class PerIsolateAssertScope<COMPILATION_ASSERT, true>;
+template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, false>;
+template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, true>;
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/assert-scope.h b/src/assert-scope.h
index fde49f8..981a037 100644
--- a/src/assert-scope.h
+++ b/src/assert-scope.h
@@ -26,12 +26,12 @@
   LAST_PER_THREAD_ASSERT_TYPE
 };
 
-
 enum PerIsolateAssertType {
   JAVASCRIPT_EXECUTION_ASSERT,
   JAVASCRIPT_EXECUTION_THROWS,
   DEOPTIMIZATION_ASSERT,
-  COMPILATION_ASSERT
+  COMPILATION_ASSERT,
+  NO_EXCEPTION_ASSERT
 };
 
 template <PerThreadAssertType kType, bool kAllow>
@@ -42,6 +42,8 @@
 
   V8_EXPORT_PRIVATE static bool IsAllowed();
 
+  void Release();
+
  private:
   PerThreadAssertData* data_;
   bool old_state_;
@@ -76,6 +78,7 @@
 class PerThreadAssertScopeDebugOnly {
  public:
   PerThreadAssertScopeDebugOnly() { }
+  void Release() {}
 #endif
 };
 
@@ -147,6 +150,14 @@
 typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>
     AllowJavascriptExecution;
 
+// Scope to document where we do not expect javascript execution (debug only)
+typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, false>
+    DisallowJavascriptExecutionDebugOnly;
+
+// Scope to introduce an exception to DisallowJavascriptExecutionDebugOnly.
+typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, true>
+    AllowJavascriptExecutionDebugOnly;
+
 // Scope in which javascript execution leads to exception being thrown.
 typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
     ThrowOnJavascriptExecution;
@@ -170,6 +181,14 @@
 // Scope to introduce an exception to DisallowDeoptimization.
 typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true>
     AllowCompilation;
+
+// Scope to document where we do not expect exceptions.
+typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, false>
+    DisallowExceptions;
+
+// Scope to introduce an exception to DisallowExceptions.
+typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, true>
+    AllowExceptions;
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ast/OWNERS b/src/ast/OWNERS
index b4e1473..16e048a 100644
--- a/src/ast/OWNERS
+++ b/src/ast/OWNERS
@@ -5,5 +5,6 @@
 littledan@chromium.org
 marja@chromium.org
 mstarzinger@chromium.org
+neis@chromium.org
 rossberg@chromium.org
 verwaest@chromium.org
diff --git a/src/ast/ast-expression-rewriter.cc b/src/ast/ast-expression-rewriter.cc
index d0db9ea..f46e21b 100644
--- a/src/ast/ast-expression-rewriter.cc
+++ b/src/ast/ast-expression-rewriter.cc
@@ -2,8 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/ast/ast.h"
 #include "src/ast/ast-expression-rewriter.h"
+#include "src/ast/ast.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -372,6 +373,9 @@
   NOTHING();
 }
 
+void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
+  AST_REWRITE_PROPERTY(Expression, node, iterable);
+}
 
 void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
   REWRITE_THIS(node);
diff --git a/src/ast/ast-function-literal-id-reindexer.cc b/src/ast/ast-function-literal-id-reindexer.cc
new file mode 100644
index 0000000..5cb1e87
--- /dev/null
+++ b/src/ast/ast-function-literal-id-reindexer.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast/ast-function-literal-id-reindexer.h"
+#include "src/objects-inl.h"
+
+#include "src/ast/ast.h"
+
+namespace v8 {
+namespace internal {
+
+AstFunctionLiteralIdReindexer::AstFunctionLiteralIdReindexer(size_t stack_limit,
+                                                             int delta)
+    : AstTraversalVisitor(stack_limit), delta_(delta) {}
+
+AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() {}
+
+void AstFunctionLiteralIdReindexer::Reindex(Expression* pattern) {
+  Visit(pattern);
+}
+
+void AstFunctionLiteralIdReindexer::VisitFunctionLiteral(FunctionLiteral* lit) {
+  AstTraversalVisitor::VisitFunctionLiteral(lit);
+  lit->set_function_literal_id(lit->function_literal_id() + delta_);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/ast/ast-function-literal-id-reindexer.h b/src/ast/ast-function-literal-id-reindexer.h
new file mode 100644
index 0000000..837595f
--- /dev/null
+++ b/src/ast/ast-function-literal-id-reindexer.h
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
+#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
+
+#include "src/ast/ast-traversal-visitor.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Changes the ID of all FunctionLiterals in the given Expression by adding the
+// given delta.
+class AstFunctionLiteralIdReindexer final
+    : public AstTraversalVisitor<AstFunctionLiteralIdReindexer> {
+ public:
+  AstFunctionLiteralIdReindexer(size_t stack_limit, int delta);
+  ~AstFunctionLiteralIdReindexer();
+
+  void Reindex(Expression* pattern);
+
+  // AstTraversalVisitor implementation.
+  void VisitFunctionLiteral(FunctionLiteral* lit);
+
+ private:
+  int delta_;
+
+  DISALLOW_COPY_AND_ASSIGN(AstFunctionLiteralIdReindexer);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
diff --git a/src/ast/ast-literal-reindexer.cc b/src/ast/ast-literal-reindexer.cc
deleted file mode 100644
index 81a5225..0000000
--- a/src/ast/ast-literal-reindexer.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ast/ast-literal-reindexer.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-
-namespace v8 {
-namespace internal {
-
-
-void AstLiteralReindexer::VisitVariableDeclaration(VariableDeclaration* node) {
-  VisitVariableProxy(node->proxy());
-}
-
-
-void AstLiteralReindexer::VisitEmptyStatement(EmptyStatement* node) {}
-
-
-void AstLiteralReindexer::VisitSloppyBlockFunctionStatement(
-    SloppyBlockFunctionStatement* node) {
-  Visit(node->statement());
-}
-
-
-void AstLiteralReindexer::VisitContinueStatement(ContinueStatement* node) {}
-
-
-void AstLiteralReindexer::VisitBreakStatement(BreakStatement* node) {}
-
-
-void AstLiteralReindexer::VisitDebuggerStatement(DebuggerStatement* node) {}
-
-
-void AstLiteralReindexer::VisitNativeFunctionLiteral(
-    NativeFunctionLiteral* node) {}
-
-
-void AstLiteralReindexer::VisitDoExpression(DoExpression* node) {
-  Visit(node->block());
-  Visit(node->result());
-}
-
-
-void AstLiteralReindexer::VisitLiteral(Literal* node) {}
-
-
-void AstLiteralReindexer::VisitRegExpLiteral(RegExpLiteral* node) {
-  UpdateIndex(node);
-}
-
-
-void AstLiteralReindexer::VisitVariableProxy(VariableProxy* node) {}
-
-
-void AstLiteralReindexer::VisitThisFunction(ThisFunction* node) {}
-
-
-void AstLiteralReindexer::VisitSuperPropertyReference(
-    SuperPropertyReference* node) {
-  Visit(node->this_var());
-  Visit(node->home_object());
-}
-
-
-void AstLiteralReindexer::VisitSuperCallReference(SuperCallReference* node) {
-  Visit(node->this_var());
-  Visit(node->new_target_var());
-  Visit(node->this_function_var());
-}
-
-
-void AstLiteralReindexer::VisitRewritableExpression(
-    RewritableExpression* node) {
-  Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitExpressionStatement(ExpressionStatement* node) {
-  Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitReturnStatement(ReturnStatement* node) {
-  Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitYield(Yield* node) {
-  Visit(node->generator_object());
-  Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitThrow(Throw* node) { Visit(node->exception()); }
-
-
-void AstLiteralReindexer::VisitUnaryOperation(UnaryOperation* node) {
-  Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitCountOperation(CountOperation* node) {
-  Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitBlock(Block* node) {
-  VisitStatements(node->statements());
-}
-
-
-void AstLiteralReindexer::VisitFunctionDeclaration(FunctionDeclaration* node) {
-  VisitVariableProxy(node->proxy());
-  VisitFunctionLiteral(node->fun());
-}
-
-
-void AstLiteralReindexer::VisitCallRuntime(CallRuntime* node) {
-  VisitArguments(node->arguments());
-}
-
-
-void AstLiteralReindexer::VisitWithStatement(WithStatement* node) {
-  Visit(node->expression());
-  Visit(node->statement());
-}
-
-
-void AstLiteralReindexer::VisitDoWhileStatement(DoWhileStatement* node) {
-  Visit(node->body());
-  Visit(node->cond());
-}
-
-
-void AstLiteralReindexer::VisitWhileStatement(WhileStatement* node) {
-  Visit(node->cond());
-  Visit(node->body());
-}
-
-
-void AstLiteralReindexer::VisitTryCatchStatement(TryCatchStatement* node) {
-  Visit(node->try_block());
-  Visit(node->catch_block());
-}
-
-
-void AstLiteralReindexer::VisitTryFinallyStatement(TryFinallyStatement* node) {
-  Visit(node->try_block());
-  Visit(node->finally_block());
-}
-
-
-void AstLiteralReindexer::VisitProperty(Property* node) {
-  Visit(node->key());
-  Visit(node->obj());
-}
-
-
-void AstLiteralReindexer::VisitAssignment(Assignment* node) {
-  Visit(node->target());
-  Visit(node->value());
-}
-
-
-void AstLiteralReindexer::VisitBinaryOperation(BinaryOperation* node) {
-  Visit(node->left());
-  Visit(node->right());
-}
-
-
-void AstLiteralReindexer::VisitCompareOperation(CompareOperation* node) {
-  Visit(node->left());
-  Visit(node->right());
-}
-
-
-void AstLiteralReindexer::VisitSpread(Spread* node) {
-  // This is reachable because ParserBase::ParseArrowFunctionLiteral calls
-  // ReindexLiterals before calling RewriteDestructuringAssignments.
-  Visit(node->expression());
-}
-
-
-void AstLiteralReindexer::VisitEmptyParentheses(EmptyParentheses* node) {}
-
-
-void AstLiteralReindexer::VisitForInStatement(ForInStatement* node) {
-  Visit(node->each());
-  Visit(node->enumerable());
-  Visit(node->body());
-}
-
-
-void AstLiteralReindexer::VisitForOfStatement(ForOfStatement* node) {
-  Visit(node->assign_iterator());
-  Visit(node->next_result());
-  Visit(node->result_done());
-  Visit(node->assign_each());
-  Visit(node->body());
-}
-
-
-void AstLiteralReindexer::VisitConditional(Conditional* node) {
-  Visit(node->condition());
-  Visit(node->then_expression());
-  Visit(node->else_expression());
-}
-
-
-void AstLiteralReindexer::VisitIfStatement(IfStatement* node) {
-  Visit(node->condition());
-  Visit(node->then_statement());
-  if (node->HasElseStatement()) {
-    Visit(node->else_statement());
-  }
-}
-
-
-void AstLiteralReindexer::VisitSwitchStatement(SwitchStatement* node) {
-  Visit(node->tag());
-  ZoneList<CaseClause*>* cases = node->cases();
-  for (int i = 0; i < cases->length(); i++) {
-    VisitCaseClause(cases->at(i));
-  }
-}
-
-
-void AstLiteralReindexer::VisitCaseClause(CaseClause* node) {
-  if (!node->is_default()) Visit(node->label());
-  VisitStatements(node->statements());
-}
-
-
-void AstLiteralReindexer::VisitForStatement(ForStatement* node) {
-  if (node->init() != NULL) Visit(node->init());
-  if (node->cond() != NULL) Visit(node->cond());
-  if (node->next() != NULL) Visit(node->next());
-  Visit(node->body());
-}
-
-
-void AstLiteralReindexer::VisitClassLiteral(ClassLiteral* node) {
-  if (node->extends()) Visit(node->extends());
-  if (node->constructor()) Visit(node->constructor());
-  if (node->class_variable_proxy()) {
-    VisitVariableProxy(node->class_variable_proxy());
-  }
-  for (int i = 0; i < node->properties()->length(); i++) {
-    VisitLiteralProperty(node->properties()->at(i));
-  }
-}
-
-void AstLiteralReindexer::VisitObjectLiteral(ObjectLiteral* node) {
-  UpdateIndex(node);
-  for (int i = 0; i < node->properties()->length(); i++) {
-    VisitLiteralProperty(node->properties()->at(i));
-  }
-}
-
-void AstLiteralReindexer::VisitLiteralProperty(LiteralProperty* node) {
-  Visit(node->key());
-  Visit(node->value());
-}
-
-
-void AstLiteralReindexer::VisitArrayLiteral(ArrayLiteral* node) {
-  UpdateIndex(node);
-  for (int i = 0; i < node->values()->length(); i++) {
-    Visit(node->values()->at(i));
-  }
-}
-
-
-void AstLiteralReindexer::VisitCall(Call* node) {
-  Visit(node->expression());
-  VisitArguments(node->arguments());
-}
-
-
-void AstLiteralReindexer::VisitCallNew(CallNew* node) {
-  Visit(node->expression());
-  VisitArguments(node->arguments());
-}
-
-
-void AstLiteralReindexer::VisitStatements(ZoneList<Statement*>* statements) {
-  if (statements == NULL) return;
-  for (int i = 0; i < statements->length(); i++) {
-    Visit(statements->at(i));
-  }
-}
-
-
-void AstLiteralReindexer::VisitDeclarations(
-    ZoneList<Declaration*>* declarations) {
-  for (int i = 0; i < declarations->length(); i++) {
-    Visit(declarations->at(i));
-  }
-}
-
-
-void AstLiteralReindexer::VisitArguments(ZoneList<Expression*>* arguments) {
-  for (int i = 0; i < arguments->length(); i++) {
-    Visit(arguments->at(i));
-  }
-}
-
-
-void AstLiteralReindexer::VisitFunctionLiteral(FunctionLiteral* node) {
-  // We don't recurse into the declarations or body of the function literal:
-}
-
-void AstLiteralReindexer::Reindex(Expression* pattern) { Visit(pattern); }
-}  // namespace internal
-}  // namespace v8
diff --git a/src/ast/ast-literal-reindexer.h b/src/ast/ast-literal-reindexer.h
deleted file mode 100644
index 4e0ca6b..0000000
--- a/src/ast/ast-literal-reindexer.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_AST_AST_LITERAL_REINDEXER
-#define V8_AST_AST_LITERAL_REINDEXER
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-
-namespace v8 {
-namespace internal {
-
-class AstLiteralReindexer final : public AstVisitor<AstLiteralReindexer> {
- public:
-  AstLiteralReindexer() : next_index_(0) {}
-
-  int count() const { return next_index_; }
-  void Reindex(Expression* pattern);
-
- private:
-#define DEFINE_VISIT(type) void Visit##type(type* node);
-  AST_NODE_LIST(DEFINE_VISIT)
-#undef DEFINE_VISIT
-
-  void VisitStatements(ZoneList<Statement*>* statements);
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
-  void VisitArguments(ZoneList<Expression*>* arguments);
-  void VisitLiteralProperty(LiteralProperty* property);
-
-  void UpdateIndex(MaterializedLiteral* literal) {
-    literal->literal_index_ = next_index_++;
-  }
-
-  int next_index_;
-
-  DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
-  DISALLOW_COPY_AND_ASSIGN(AstLiteralReindexer);
-};
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_AST_AST_LITERAL_REINDEXER
diff --git a/src/ast/ast-numbering.cc b/src/ast/ast-numbering.cc
index 82f9767..499760d 100644
--- a/src/ast/ast-numbering.cc
+++ b/src/ast/ast-numbering.cc
@@ -6,22 +6,27 @@
 
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
+#include "src/compiler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
 class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
  public:
-  AstNumberingVisitor(Isolate* isolate, Zone* zone)
-      : isolate_(isolate),
-        zone_(zone),
+  AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
+                      Compiler::EagerInnerFunctionLiterals* eager_literals)
+      : zone_(zone),
+        eager_literals_(eager_literals),
         next_id_(BailoutId::FirstUsable().ToInt()),
         yield_count_(0),
         properties_(zone),
+        language_mode_(SLOPPY),
         slot_cache_(zone),
+        disable_crankshaft_reason_(kNoReason),
         dont_optimize_reason_(kNoReason),
         catch_prediction_(HandlerTable::UNCAUGHT) {
-    InitializeAstVisitor(isolate);
+    InitializeAstVisitor(stack_limit);
   }
 
   bool Renumber(FunctionLiteral* node);
@@ -32,10 +37,12 @@
   AST_NODE_LIST(DEFINE_VISIT)
 #undef DEFINE_VISIT
 
+  void VisitVariableProxy(VariableProxy* node, TypeofMode typeof_mode);
   void VisitVariableProxyReference(VariableProxy* node);
   void VisitPropertyReference(Property* node);
   void VisitReference(Expression* expr);
 
+  void VisitStatementsAndDeclarations(Block* node);
   void VisitStatements(ZoneList<Statement*>* statements);
   void VisitDeclarations(Declaration::List* declarations);
   void VisitArguments(ZoneList<Expression*>* arguments);
@@ -55,25 +62,43 @@
     dont_optimize_reason_ = reason;
     DisableSelfOptimization();
   }
-  void DisableCrankshaft(BailoutReason reason) {
-    properties_.flags() |= AstProperties::kDontCrankshaft;
+  void DisableFullCodegenAndCrankshaft(BailoutReason reason) {
+    disable_crankshaft_reason_ = reason;
+    properties_.flags() |= AstProperties::kMustUseIgnitionTurbo;
   }
 
   template <typename Node>
   void ReserveFeedbackSlots(Node* node) {
-    node->AssignFeedbackVectorSlots(isolate_, properties_.get_spec(),
-                                    &slot_cache_);
+    node->AssignFeedbackSlots(properties_.get_spec(), language_mode_,
+                              &slot_cache_);
   }
 
+  class LanguageModeScope {
+   public:
+    LanguageModeScope(AstNumberingVisitor* visitor, LanguageMode language_mode)
+        : visitor_(visitor), outer_language_mode_(visitor->language_mode_) {
+      visitor_->language_mode_ = language_mode;
+    }
+    ~LanguageModeScope() { visitor_->language_mode_ = outer_language_mode_; }
+
+   private:
+    AstNumberingVisitor* visitor_;
+    LanguageMode outer_language_mode_;
+  };
+
   BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
 
-  Isolate* isolate_;
+  Zone* zone() const { return zone_; }
+
   Zone* zone_;
+  Compiler::EagerInnerFunctionLiterals* eager_literals_;
   int next_id_;
   int yield_count_;
   AstProperties properties_;
-  // The slot cache allows us to reuse certain feedback vector slots.
-  FeedbackVectorSlotCache slot_cache_;
+  LanguageMode language_mode_;
+  // The slot cache allows us to reuse certain feedback slots.
+  FeedbackSlotCache slot_cache_;
+  BailoutReason disable_crankshaft_reason_;
   BailoutReason dont_optimize_reason_;
   HandlerTable::CatchPrediction catch_prediction_;
 
@@ -112,8 +137,7 @@
 
 void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
   IncrementNodeCount();
-  DisableOptimization(kDebuggerStatement);
-  node->set_base_id(ReserveIdRange(DebuggerStatement::num_ids()));
+  DisableFullCodegenAndCrankshaft(kDebuggerStatement);
 }
 
 
@@ -122,6 +146,7 @@
   IncrementNodeCount();
   DisableOptimization(kNativeFunctionLiteral);
   node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
+  ReserveFeedbackSlots(node);
 }
 
 
@@ -142,6 +167,7 @@
 void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
   IncrementNodeCount();
   node->set_base_id(ReserveIdRange(RegExpLiteral::num_ids()));
+  ReserveFeedbackSlots(node);
 }
 
 
@@ -149,10 +175,11 @@
   IncrementNodeCount();
   switch (node->var()->location()) {
     case VariableLocation::LOOKUP:
-      DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
+      DisableFullCodegenAndCrankshaft(
+          kReferenceToAVariableWhichRequiresDynamicLookup);
       break;
     case VariableLocation::MODULE:
-      DisableCrankshaft(kReferenceToModuleVariable);
+      DisableFullCodegenAndCrankshaft(kReferenceToModuleVariable);
       break;
     default:
       break;
@@ -160,10 +187,14 @@
   node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
 }
 
+void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node,
+                                             TypeofMode typeof_mode) {
+  VisitVariableProxyReference(node);
+  node->AssignFeedbackSlots(properties_.get_spec(), typeof_mode, &slot_cache_);
+}
 
 void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
-  VisitVariableProxyReference(node);
-  ReserveFeedbackSlots(node);
+  VisitVariableProxy(node, NOT_INSIDE_TYPEOF);
 }
 
 
@@ -176,7 +207,7 @@
 void AstNumberingVisitor::VisitSuperPropertyReference(
     SuperPropertyReference* node) {
   IncrementNodeCount();
-  DisableCrankshaft(kSuperReference);
+  DisableFullCodegenAndCrankshaft(kSuperReference);
   node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
   Visit(node->this_var());
   Visit(node->home_object());
@@ -185,7 +216,7 @@
 
 void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
   IncrementNodeCount();
-  DisableCrankshaft(kSuperReference);
+  DisableFullCodegenAndCrankshaft(kSuperReference);
   node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
   Visit(node->this_var());
   Visit(node->new_target_var());
@@ -202,6 +233,9 @@
 void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
   IncrementNodeCount();
   Visit(node->expression());
+
+  DCHECK(!node->is_async_return() ||
+         properties_.flags() & AstProperties::kMustUseIgnitionTurbo);
 }
 
 
@@ -225,7 +259,12 @@
 void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
   IncrementNodeCount();
   node->set_base_id(ReserveIdRange(UnaryOperation::num_ids()));
-  Visit(node->expression());
+  if ((node->op() == Token::TYPEOF) && node->expression()->IsVariableProxy()) {
+    VariableProxy* proxy = node->expression()->AsVariableProxy();
+    VisitVariableProxy(proxy, INSIDE_TYPEOF);
+  } else {
+    Visit(node->expression());
+  }
 }
 
 
@@ -240,10 +279,21 @@
 void AstNumberingVisitor::VisitBlock(Block* node) {
   IncrementNodeCount();
   node->set_base_id(ReserveIdRange(Block::num_ids()));
-  if (node->scope() != NULL) VisitDeclarations(node->scope()->declarations());
-  VisitStatements(node->statements());
+  Scope* scope = node->scope();
+  if (scope != nullptr) {
+    LanguageModeScope language_mode_scope(this, scope->language_mode());
+    VisitStatementsAndDeclarations(node);
+  } else {
+    VisitStatementsAndDeclarations(node);
+  }
 }
 
+void AstNumberingVisitor::VisitStatementsAndDeclarations(Block* node) {
+  Scope* scope = node->scope();
+  DCHECK(scope == nullptr || !scope->HasBeenRemoved());
+  if (scope) VisitDeclarations(scope->declarations());
+  VisitStatements(node->statements());
+}
 
 void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
   IncrementNodeCount();
@@ -282,8 +332,7 @@
 
 void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
   IncrementNodeCount();
-  DisableCrankshaft(kWithStatement);
-  node->set_base_id(ReserveIdRange(WithStatement::num_ids()));
+  DisableFullCodegenAndCrankshaft(kWithStatement);
   Visit(node->expression());
   Visit(node->statement());
 }
@@ -312,8 +361,9 @@
 
 
 void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
+  DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
   IncrementNodeCount();
-  DisableCrankshaft(kTryCatchStatement);
+  DisableFullCodegenAndCrankshaft(kTryCatchStatement);
   {
     const HandlerTable::CatchPrediction old_prediction = catch_prediction_;
     // This node uses its own prediction, unless it's "uncaught", in which case
@@ -332,7 +382,7 @@
 
 void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
   IncrementNodeCount();
-  DisableCrankshaft(kTryFinallyStatement);
+  DisableFullCodegenAndCrankshaft(kTryFinallyStatement);
   // We can't know whether the finally block will override ("catch") an
   // exception thrown in the try block, so we just adopt the outer prediction.
   node->set_catch_prediction(catch_prediction_);
@@ -393,14 +443,25 @@
   ReserveFeedbackSlots(node);
 }
 
-
-void AstNumberingVisitor::VisitSpread(Spread* node) { UNREACHABLE(); }
-
+void AstNumberingVisitor::VisitSpread(Spread* node) {
+  IncrementNodeCount();
+  // We can only get here from spread calls currently.
+  DisableFullCodegenAndCrankshaft(kSpreadCall);
+  node->set_base_id(ReserveIdRange(Spread::num_ids()));
+  Visit(node->expression());
+}
 
 void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
   UNREACHABLE();
 }
 
+void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
+  IncrementNodeCount();
+  DisableFullCodegenAndCrankshaft(kGetIterator);
+  node->set_base_id(ReserveIdRange(GetIterator::num_ids()));
+  Visit(node->iterable());
+  ReserveFeedbackSlots(node);
+}
 
 void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
   IncrementNodeCount();
@@ -417,7 +478,7 @@
 
 void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
   IncrementNodeCount();
-  DisableCrankshaft(kForOfStatement);
+  DisableFullCodegenAndCrankshaft(kForOfStatement);
   node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
   Visit(node->assign_iterator());  // Not part of loop.
   node->set_first_yield_id(yield_count_);
@@ -484,8 +545,8 @@
 
 void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
   IncrementNodeCount();
-  DisableCrankshaft(kClassLiteral);
-  node->set_base_id(ReserveIdRange(node->num_ids()));
+  DisableFullCodegenAndCrankshaft(kClassLiteral);
+  node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
   if (node->extends()) Visit(node->extends());
   if (node->constructor()) Visit(node->constructor());
   if (node->class_variable_proxy()) {
@@ -504,7 +565,7 @@
   for (int i = 0; i < node->properties()->length(); i++) {
     VisitLiteralProperty(node->properties()->at(i));
   }
-  node->BuildConstantProperties(isolate_);
+  node->InitDepthAndFlags();
   // Mark all computed expressions that are bound to a key that
   // is shadowed by a later occurrence of the same key. For the
   // marked expressions, no store code will be is emitted.
@@ -513,7 +574,8 @@
 }
 
 void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
-  if (node->is_computed_name()) DisableCrankshaft(kComputedPropertyName);
+  if (node->is_computed_name())
+    DisableFullCodegenAndCrankshaft(kComputedPropertyName);
   Visit(node->key());
   Visit(node->value());
 }
@@ -524,12 +586,15 @@
   for (int i = 0; i < node->values()->length(); i++) {
     Visit(node->values()->at(i));
   }
-  node->BuildConstantElements(isolate_);
+  node->InitDepthAndFlags();
   ReserveFeedbackSlots(node);
 }
 
 
 void AstNumberingVisitor::VisitCall(Call* node) {
+  if (node->is_possibly_eval()) {
+    DisableFullCodegenAndCrankshaft(kFunctionCallsEval);
+  }
   IncrementNodeCount();
   ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(Call::num_ids()));
@@ -569,8 +634,20 @@
 void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
   IncrementNodeCount();
   node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
-  // We don't recurse into the declarations or body of the function literal:
-  // you have to separately Renumber() each FunctionLiteral that you compile.
+  if (node->ShouldEagerCompile()) {
+    if (eager_literals_) {
+      eager_literals_->Add(new (zone())
+                               ThreadedListZoneEntry<FunctionLiteral*>(node));
+    }
+
+    // If the function literal is being eagerly compiled, recurse into the
+    // declarations and body of the function literal.
+    if (!AstNumbering::Renumber(stack_limit_, zone_, node, eager_literals_)) {
+      SetStackOverflow();
+      return;
+    }
+  }
+  ReserveFeedbackSlots(node);
 }
 
 
@@ -584,37 +661,65 @@
 
 bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
   DeclarationScope* scope = node->scope();
-  if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
-  if (scope->calls_eval()) DisableCrankshaft(kFunctionCallsEval);
-  if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
-    DisableCrankshaft(kContextAllocatedArguments);
+  DCHECK(!scope->HasBeenRemoved());
+
+  if (scope->new_target_var() != nullptr ||
+      scope->this_function_var() != nullptr) {
+    DisableFullCodegenAndCrankshaft(kSuperReference);
+  }
+
+  if (scope->arguments() != nullptr &&
+      !scope->arguments()->IsStackAllocated()) {
+    DisableFullCodegenAndCrankshaft(kContextAllocatedArguments);
   }
 
   if (scope->rest_parameter() != nullptr) {
-    DisableCrankshaft(kRestParameter);
+    DisableFullCodegenAndCrankshaft(kRestParameter);
   }
 
-  if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) {
-    DisableCrankshaft(kGenerator);
+  if (IsResumableFunction(node->kind())) {
+    DisableFullCodegenAndCrankshaft(kGenerator);
   }
 
   if (IsClassConstructor(node->kind())) {
-    DisableCrankshaft(kClassConstructorFunction);
+    DisableFullCodegenAndCrankshaft(kClassConstructorFunction);
   }
 
+  LanguageModeScope language_mode_scope(this, node->language_mode());
+
   VisitDeclarations(scope->declarations());
   VisitStatements(node->body());
 
   node->set_ast_properties(&properties_);
   node->set_dont_optimize_reason(dont_optimize_reason());
   node->set_yield_count(yield_count_);
+
+  if (FLAG_trace_opt) {
+    if (disable_crankshaft_reason_ != kNoReason) {
+      // TODO(leszeks): This is a quick'n'dirty fix to allow the debug name of
+      // the function to be accessed in the below print. This DCHECK will fail
+      // if we move ast numbering off the main thread, but that won't be before
+      // we remove FCG, in which case this entire check isn't necessary anyway.
+      AllowHandleDereference allow_deref;
+      DCHECK(!node->debug_name().is_null());
+
+      PrintF("[enforcing Ignition and TurboFan for %s because: %s\n",
+             node->debug_name()->ToCString().get(),
+             GetBailoutReason(disable_crankshaft_reason_));
+    }
+  }
+
   return !HasStackOverflow();
 }
 
+bool AstNumbering::Renumber(
+    uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
+    Compiler::EagerInnerFunctionLiterals* eager_literals) {
+  DisallowHeapAllocation no_allocation;
+  DisallowHandleAllocation no_handles;
+  DisallowHandleDereference no_deref;
 
-bool AstNumbering::Renumber(Isolate* isolate, Zone* zone,
-                            FunctionLiteral* function) {
-  AstNumberingVisitor visitor(isolate, zone);
+  AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
   return visitor.Renumber(function);
 }
 }  // namespace internal
diff --git a/src/ast/ast-numbering.h b/src/ast/ast-numbering.h
index 7327895..bea441d 100644
--- a/src/ast/ast-numbering.h
+++ b/src/ast/ast-numbering.h
@@ -5,6 +5,8 @@
 #ifndef V8_AST_AST_NUMBERING_H_
 #define V8_AST_AST_NUMBERING_H_
 
+#include <stdint.h>
+
 namespace v8 {
 namespace internal {
 
@@ -12,11 +14,20 @@
 class FunctionLiteral;
 class Isolate;
 class Zone;
+template <typename T>
+class ThreadedList;
+template <typename T>
+class ThreadedListZoneEntry;
+template <typename T>
+class ZoneVector;
 
 namespace AstNumbering {
 // Assign type feedback IDs, bailout IDs, and generator yield IDs to an AST node
-// tree; perform catch prediction for TryStatements.
-bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
+// tree; perform catch prediction for TryStatements. If |eager_literals| is
+// non-null, adds any eager inner literal functions into it.
+bool Renumber(
+    uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
+    ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
 }
 
 // Some details on yield IDs
diff --git a/src/ast/ast-traversal-visitor.h b/src/ast/ast-traversal-visitor.h
index d93e02f..6d0c386 100644
--- a/src/ast/ast-traversal-visitor.h
+++ b/src/ast/ast-traversal-visitor.h
@@ -288,7 +288,7 @@
   DeclarationScope* scope = expr->scope();
   RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
   // A lazily parsed function literal won't have a body.
-  if (expr->scope()->is_lazily_parsed()) return;
+  if (expr->scope()->was_lazily_parsed()) return;
   RECURSE_EXPRESSION(VisitStatements(expr->body()));
 }
 
@@ -471,6 +471,12 @@
 }
 
 template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitGetIterator(GetIterator* expr) {
+  PROCESS_EXPRESSION(expr);
+  RECURSE_EXPRESSION(Visit(expr->iterable()));
+}
+
+template <class Subclass>
 void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
   PROCESS_EXPRESSION(expr);
diff --git a/src/ast/ast-types.cc b/src/ast/ast-types.cc
index 49551dd..3dde864 100644
--- a/src/ast/ast-types.cc
+++ b/src/ast/ast-types.cc
@@ -7,6 +7,7 @@
 #include "src/ast/ast-types.h"
 
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 #include "src/ostreams.h"
 
 namespace v8 {
@@ -156,6 +157,8 @@
     case ONE_BYTE_STRING_TYPE:
     case CONS_STRING_TYPE:
     case CONS_ONE_BYTE_STRING_TYPE:
+    case THIN_STRING_TYPE:
+    case THIN_ONE_BYTE_STRING_TYPE:
     case SLICED_STRING_TYPE:
     case SLICED_ONE_BYTE_STRING_TYPE:
     case EXTERNAL_STRING_TYPE:
@@ -192,8 +195,6 @@
     }
     case HEAP_NUMBER_TYPE:
       return kNumber & kTaggedPointer;
-    case SIMD128_VALUE_TYPE:
-      return kSimd;
     case JS_OBJECT_TYPE:
     case JS_ARGUMENTS_TYPE:
     case JS_ERROR_TYPE:
@@ -209,7 +210,6 @@
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
     case JS_MODULE_NAMESPACE_TYPE:
-    case JS_FIXED_ARRAY_ITERATOR_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_ARRAY_TYPE:
     case JS_REGEXP_TYPE:  // TODO(rossberg): there should be a RegExp type.
@@ -220,6 +220,7 @@
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
     case JS_STRING_ITERATOR_TYPE:
+    case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
 
     case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
     case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
@@ -259,6 +260,7 @@
 
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
+    case JS_PROMISE_CAPABILITY_TYPE:
     case JS_PROMISE_TYPE:
     case JS_BOUND_FUNCTION_TYPE:
       DCHECK(!map->is_undetectable());
@@ -304,19 +306,18 @@
     case PROMISE_REACTION_JOB_INFO_TYPE:
     case FUNCTION_TEMPLATE_INFO_TYPE:
     case OBJECT_TEMPLATE_INFO_TYPE:
-    case SIGNATURE_INFO_TYPE:
-    case TYPE_SWITCH_INFO_TYPE:
     case ALLOCATION_MEMENTO_TYPE:
     case TYPE_FEEDBACK_INFO_TYPE:
     case ALIASED_ARGUMENTS_ENTRY_TYPE:
-    case BOX_TYPE:
     case DEBUG_INFO_TYPE:
     case BREAK_POINT_INFO_TYPE:
     case CELL_TYPE:
     case WEAK_CELL_TYPE:
     case PROTOTYPE_INFO_TYPE:
+    case TUPLE2_TYPE:
     case TUPLE3_TYPE:
     case CONTEXT_EXTENSION_TYPE:
+    case CONSTANT_ELEMENTS_PAIR_TYPE:
       UNREACHABLE();
       return kNone;
   }
@@ -1295,13 +1296,6 @@
   return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
 }
 
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
-  AstType* AstType::Name(Isolate* isolate, Zone* zone) {             \
-    return Class(i::handle(isolate->heap()->name##_map()), zone);    \
-  }
-SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
 // -----------------------------------------------------------------------------
 // Instantiations.
 
diff --git a/src/ast/ast-types.h b/src/ast/ast-types.h
index 0b6e23f..ea0be65 100644
--- a/src/ast/ast-types.h
+++ b/src/ast/ast-types.h
@@ -156,15 +156,15 @@
 #define AST_REPRESENTATION(k) ((k) & AstBitsetType::kRepresentation)
 #define AST_SEMANTIC(k)       ((k) & AstBitsetType::kSemantic)
 
+// Bits 21-22 are available.
 #define AST_REPRESENTATION_BITSET_TYPE_LIST(V)    \
   V(None,               0)                    \
-  V(UntaggedBit,        1u << 22 | kSemantic) \
-  V(UntaggedIntegral8,  1u << 23 | kSemantic) \
-  V(UntaggedIntegral16, 1u << 24 | kSemantic) \
-  V(UntaggedIntegral32, 1u << 25 | kSemantic) \
-  V(UntaggedFloat32,    1u << 26 | kSemantic) \
-  V(UntaggedFloat64,    1u << 27 | kSemantic) \
-  V(UntaggedSimd128,    1u << 28 | kSemantic) \
+  V(UntaggedBit,        1u << 23 | kSemantic) \
+  V(UntaggedIntegral8,  1u << 24 | kSemantic) \
+  V(UntaggedIntegral16, 1u << 25 | kSemantic) \
+  V(UntaggedIntegral32, 1u << 26 | kSemantic) \
+  V(UntaggedFloat32,    1u << 27 | kSemantic) \
+  V(UntaggedFloat64,    1u << 28 | kSemantic) \
   V(UntaggedPointer,    1u << 29 | kSemantic) \
   V(TaggedSigned,       1u << 30 | kSemantic) \
   V(TaggedPointer,      1u << 31 | kSemantic) \
@@ -197,13 +197,12 @@
   V(Symbol,              1u << 12 | AST_REPRESENTATION(kTaggedPointer)) \
   V(InternalizedString,  1u << 13 | AST_REPRESENTATION(kTaggedPointer)) \
   V(OtherString,         1u << 14 | AST_REPRESENTATION(kTaggedPointer)) \
-  V(Simd,                1u << 15 | AST_REPRESENTATION(kTaggedPointer)) \
-  V(OtherObject,         1u << 17 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(OtherObject,         1u << 15 | AST_REPRESENTATION(kTaggedPointer)) \
   V(OtherUndetectable,   1u << 16 | AST_REPRESENTATION(kTaggedPointer)) \
-  V(Proxy,               1u << 18 | AST_REPRESENTATION(kTaggedPointer)) \
-  V(Function,            1u << 19 | AST_REPRESENTATION(kTaggedPointer)) \
-  V(Hole,                1u << 20 | AST_REPRESENTATION(kTaggedPointer)) \
-  V(OtherInternal,       1u << 21 |                                     \
+  V(Proxy,               1u << 17 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(Function,            1u << 18 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(Hole,                1u << 19 | AST_REPRESENTATION(kTaggedPointer)) \
+  V(OtherInternal,       1u << 20 |                                     \
                          AST_REPRESENTATION(kTagged | kUntagged))       \
   \
   V(Signed31,                   kUnsigned30 | kNegative31) \
@@ -232,11 +231,10 @@
   V(NullOrUndefined,            kNull | kUndefined) \
   V(Undetectable,               kNullOrUndefined | kOtherUndetectable) \
   V(NumberOrOddball,            kNumber | kNullOrUndefined | kBoolean | kHole) \
-  V(NumberOrSimdOrString,       kNumber | kSimd | kString) \
   V(NumberOrString,             kNumber | kString) \
   V(NumberOrUndefined,          kNumber | kUndefined) \
   V(PlainPrimitive,             kNumberOrString | kBoolean | kNullOrUndefined) \
-  V(Primitive,                  kSymbol | kSimd | kPlainPrimitive) \
+  V(Primitive,                  kSymbol | kPlainPrimitive) \
   V(DetectableReceiver,         kFunction | kOtherObject | kProxy) \
   V(Object,                     kFunction | kOtherObject | kOtherUndetectable) \
   V(Receiver,                   kObject | kProxy) \
@@ -770,11 +768,6 @@
     return tuple;
   }
 
-#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
-  static AstType* Name(Isolate* isolate, Zone* zone);
-  SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
-#undef CONSTRUCT_SIMD_TYPE
-
   static AstType* Union(AstType* type1, AstType* type2, Zone* zone);
   static AstType* Intersect(AstType* type1, AstType* type2, Zone* zone);
 
diff --git a/src/ast/ast-value-factory.cc b/src/ast/ast-value-factory.cc
index ed2976f..b160c48 100644
--- a/src/ast/ast-value-factory.cc
+++ b/src/ast/ast-value-factory.cc
@@ -28,6 +28,8 @@
 #include "src/ast/ast-value-factory.h"
 
 #include "src/api.h"
+#include "src/char-predicates-inl.h"
+#include "src/objects-inl.h"
 #include "src/objects.h"
 #include "src/utils.h"
 
@@ -127,6 +129,36 @@
   return false;
 }
 
+bool AstRawString::Compare(void* a, void* b) {
+  const AstRawString* lhs = static_cast<AstRawString*>(a);
+  const AstRawString* rhs = static_cast<AstRawString*>(b);
+  DCHECK_EQ(lhs->hash(), rhs->hash());
+  if (lhs->length() != rhs->length()) return false;
+  const unsigned char* l = lhs->raw_data();
+  const unsigned char* r = rhs->raw_data();
+  size_t length = rhs->length();
+  if (lhs->is_one_byte()) {
+    if (rhs->is_one_byte()) {
+      return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
+                                  reinterpret_cast<const uint8_t*>(r),
+                                  length) == 0;
+    } else {
+      return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
+                                  reinterpret_cast<const uint16_t*>(r),
+                                  length) == 0;
+    }
+  } else {
+    if (rhs->is_one_byte()) {
+      return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
+                                  reinterpret_cast<const uint8_t*>(r),
+                                  length) == 0;
+    } else {
+      return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
+                                  reinterpret_cast<const uint16_t*>(r),
+                                  length) == 0;
+    }
+  }
+}
 
 void AstConsString::Internalize(Isolate* isolate) {
   // AstRawStrings are internalized before AstConsStrings so left and right are
@@ -182,14 +214,10 @@
       DCHECK(!string_->string().is_null());
       break;
     case SYMBOL:
-      if (symbol_name_[0] == 'i') {
-        DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
-        set_value(isolate->factory()->iterator_symbol());
-      } else if (strcmp(symbol_name_, "hasInstance_symbol") == 0) {
-        set_value(isolate->factory()->has_instance_symbol());
-      } else {
-        DCHECK_EQ(0, strcmp(symbol_name_, "home_object_symbol"));
-        set_value(isolate->factory()->home_object_symbol());
+      switch (symbol_) {
+        case AstSymbol::kHomeObjectSymbol:
+          set_value(isolate->factory()->home_object_symbol());
+          break;
       }
       break;
     case NUMBER_WITH_DOT:
@@ -219,9 +247,17 @@
   }
 }
 
-
 AstRawString* AstValueFactory::GetOneByteStringInternal(
     Vector<const uint8_t> literal) {
+  if (literal.length() == 1 && IsInRange(literal[0], 'a', 'z')) {
+    int key = literal[0] - 'a';
+    if (one_character_strings_[key] == nullptr) {
+      uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
+          literal.start(), literal.length(), hash_seed_);
+      one_character_strings_[key] = GetString(hash, true, literal);
+    }
+    return one_character_strings_[key];
+  }
   uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
       literal.start(), literal.length(), hash_seed_);
   return GetString(hash, true, literal);
@@ -260,39 +296,6 @@
   return new_string;
 }
 
-const AstRawString* AstValueFactory::ConcatStrings(const AstRawString* left,
-                                                   const AstRawString* right) {
-  int left_length = left->length();
-  int right_length = right->length();
-  const unsigned char* left_data = left->raw_data();
-  const unsigned char* right_data = right->raw_data();
-  if (left->is_one_byte() && right->is_one_byte()) {
-    uint8_t* buffer = zone_->NewArray<uint8_t>(left_length + right_length);
-    memcpy(buffer, left_data, left_length);
-    memcpy(buffer + left_length, right_data, right_length);
-    Vector<const uint8_t> literal(buffer, left_length + right_length);
-    return GetOneByteStringInternal(literal);
-  } else {
-    uint16_t* buffer = zone_->NewArray<uint16_t>(left_length + right_length);
-    if (left->is_one_byte()) {
-      for (int i = 0; i < left_length; ++i) {
-        buffer[i] = left_data[i];
-      }
-    } else {
-      memcpy(buffer, left_data, 2 * left_length);
-    }
-    if (right->is_one_byte()) {
-      for (int i = 0; i < right_length; ++i) {
-        buffer[i + left_length] = right_data[i];
-      }
-    } else {
-      memcpy(buffer + left_length, right_data, 2 * right_length);
-    }
-    Vector<const uint16_t> literal(buffer, left_length + right_length);
-    return GetTwoByteStringInternal(literal);
-  }
-}
-
 void AstValueFactory::Internalize(Isolate* isolate) {
   // Strings need to be internalized before values, because values refer to
   // strings.
@@ -318,9 +321,8 @@
   return AddValue(value);
 }
 
-
-const AstValue* AstValueFactory::NewSymbol(const char* name) {
-  AstValue* value = new (zone_) AstValue(name);
+const AstValue* AstValueFactory::NewSymbol(AstSymbol symbol) {
+  AstValue* value = new (zone_) AstValue(symbol);
   return AddValue(value);
 }
 
@@ -379,7 +381,7 @@
   // return this AstRawString.
   AstRawString key(is_one_byte, literal_bytes, hash);
   base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
-  if (entry->value == NULL) {
+  if (entry->value == nullptr) {
     // Copy literal contents for later comparison.
     int length = literal_bytes.length();
     byte* new_literal_bytes = zone_->NewArray<byte>(length);
@@ -394,36 +396,5 @@
   return reinterpret_cast<AstRawString*>(entry->key);
 }
 
-
-bool AstValueFactory::AstRawStringCompare(void* a, void* b) {
-  const AstRawString* lhs = static_cast<AstRawString*>(a);
-  const AstRawString* rhs = static_cast<AstRawString*>(b);
-  DCHECK_EQ(lhs->hash(), rhs->hash());
-  if (lhs->length() != rhs->length()) return false;
-  const unsigned char* l = lhs->raw_data();
-  const unsigned char* r = rhs->raw_data();
-  size_t length = rhs->length();
-  if (lhs->is_one_byte()) {
-    if (rhs->is_one_byte()) {
-      return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
-                                  reinterpret_cast<const uint8_t*>(r),
-                                  length) == 0;
-    } else {
-      return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
-                                  reinterpret_cast<const uint16_t*>(r),
-                                  length) == 0;
-    }
-  } else {
-    if (rhs->is_one_byte()) {
-      return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
-                                  reinterpret_cast<const uint8_t*>(r),
-                                  length) == 0;
-    } else {
-      return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
-                                  reinterpret_cast<const uint16_t*>(r),
-                                  length) == 0;
-    }
-  }
-}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ast/ast-value-factory.h b/src/ast/ast-value-factory.h
index 4ce480f..c1ed7ac 100644
--- a/src/ast/ast-value-factory.h
+++ b/src/ast/ast-value-factory.h
@@ -28,9 +28,11 @@
 #ifndef V8_AST_AST_VALUE_FACTORY_H_
 #define V8_AST_AST_VALUE_FACTORY_H_
 
-#include "src/api.h"
 #include "src/base/hashmap.h"
+#include "src/conversions.h"
+#include "src/factory.h"
 #include "src/globals.h"
+#include "src/isolate.h"
 #include "src/utils.h"
 
 // AstString, AstValue and AstValueFactory are for storing strings and values
@@ -104,14 +106,17 @@
     return *c;
   }
 
+  static bool Compare(void* a, void* b);
+
   // For storing AstRawStrings in a hash map.
   uint32_t hash() const {
     return hash_;
   }
 
  private:
-  friend class AstValueFactory;
   friend class AstRawStringInternalizationKey;
+  friend class AstStringConstants;
+  friend class AstValueFactory;
 
   AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
                uint32_t hash)
@@ -149,19 +154,19 @@
   const AstString* right_;
 };
 
+enum class AstSymbol : uint8_t { kHomeObjectSymbol };
 
-// AstValue is either a string, a number, a string array, a boolean, or a
-// special value (null, undefined, the hole).
+// AstValue is either a string, a symbol, a number, a string array, a boolean,
+// or a special value (null, undefined, the hole).
 class AstValue : public ZoneObject {
  public:
   bool IsString() const {
     return type_ == STRING;
   }
 
-  bool IsNumber() const {
-    return type_ == NUMBER || type_ == NUMBER_WITH_DOT || type_ == SMI ||
-           type_ == SMI_WITH_DOT;
-  }
+  bool IsSymbol() const { return type_ == SYMBOL; }
+
+  bool IsNumber() const { return IsSmi() || IsHeapNumber(); }
 
   bool ContainsDot() const {
     return type_ == NUMBER_WITH_DOT || type_ == SMI_WITH_DOT;
@@ -172,20 +177,36 @@
     return string_;
   }
 
+  AstSymbol AsSymbol() const {
+    CHECK_EQ(SYMBOL, type_);
+    return symbol_;
+  }
+
   double AsNumber() const {
-    if (type_ == NUMBER || type_ == NUMBER_WITH_DOT)
-      return number_;
-    if (type_ == SMI || type_ == SMI_WITH_DOT)
-      return smi_;
+    if (IsHeapNumber()) return number_;
+    if (IsSmi()) return smi_;
     UNREACHABLE();
     return 0;
   }
 
   Smi* AsSmi() const {
-    CHECK(type_ == SMI || type_ == SMI_WITH_DOT);
+    CHECK(IsSmi());
     return Smi::FromInt(smi_);
   }
 
+  bool ToUint32(uint32_t* value) const {
+    if (IsSmi()) {
+      int num = smi_;
+      if (num < 0) return false;
+      *value = static_cast<uint32_t>(num);
+      return true;
+    }
+    if (IsHeapNumber()) {
+      return DoubleToUint32IfEqualToSelf(number_, value);
+    }
+    return false;
+  }
+
   bool EqualsString(const AstRawString* string) const {
     return type_ == STRING && string_ == string;
   }
@@ -195,6 +216,9 @@
   bool BooleanValue() const;
 
   bool IsSmi() const { return type_ == SMI || type_ == SMI_WITH_DOT; }
+  bool IsHeapNumber() const {
+    return type_ == NUMBER || type_ == NUMBER_WITH_DOT;
+  }
   bool IsFalse() const { return type_ == BOOLEAN && !bool_; }
   bool IsTrue() const { return type_ == BOOLEAN && bool_; }
   bool IsUndefined() const { return type_ == UNDEFINED; }
@@ -235,8 +259,8 @@
     string_ = s;
   }
 
-  explicit AstValue(const char* name) : type_(SYMBOL), next_(nullptr) {
-    symbol_name_ = name;
+  explicit AstValue(AstSymbol symbol) : type_(SYMBOL), next_(nullptr) {
+    symbol_ = symbol;
   }
 
   explicit AstValue(double n, bool with_dot) : next_(nullptr) {
@@ -276,11 +300,10 @@
     double number_;
     int smi_;
     bool bool_;
-    const char* symbol_name_;
+    AstSymbol symbol_;
   };
 };
 
-
 // For generating constants.
 #define STRING_CONSTANTS(F)                     \
   F(anonymous_function, "(anonymous function)") \
@@ -291,7 +314,6 @@
   F(default, "default")                         \
   F(done, "done")                               \
   F(dot, ".")                                   \
-  F(dot_class_field_init, ".class-field-init")  \
   F(dot_for, ".for")                            \
   F(dot_generator_object, ".generator_object")  \
   F(dot_iterator, ".iterator")                  \
@@ -304,6 +326,7 @@
   F(get_space, "get ")                          \
   F(length, "length")                           \
   F(let, "let")                                 \
+  F(name, "name")                               \
   F(native, "native")                           \
   F(new_target, ".new.target")                  \
   F(next, "next")                               \
@@ -320,6 +343,55 @@
   F(use_strict, "use strict")                   \
   F(value, "value")
 
+class AstStringConstants final {
+ public:
+  AstStringConstants(Isolate* isolate, uint32_t hash_seed)
+      : zone_(isolate->allocator(), ZONE_NAME),
+        string_table_(AstRawString::Compare),
+        hash_seed_(hash_seed) {
+    DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+#define F(name, str)                                                      \
+  {                                                                       \
+    const char* data = str;                                               \
+    Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
+                                  static_cast<int>(strlen(data)));        \
+    uint32_t hash = StringHasher::HashSequentialString<uint8_t>(          \
+        literal.start(), literal.length(), hash_seed_);                   \
+    name##_string_ = new (&zone_) AstRawString(true, literal, hash);      \
+    /* The Handle returned by the factory is located on the roots */      \
+    /* array, not on the temporary HandleScope, so this is safe.  */      \
+    name##_string_->set_string(isolate->factory()->name##_string());      \
+    base::HashMap::Entry* entry =                                         \
+        string_table_.InsertNew(name##_string_, name##_string_->hash());  \
+    DCHECK(entry->value == nullptr);                                      \
+    entry->value = reinterpret_cast<void*>(1);                            \
+  }
+    STRING_CONSTANTS(F)
+#undef F
+  }
+
+#define F(name, str) \
+  const AstRawString* name##_string() const { return name##_string_; }
+  STRING_CONSTANTS(F)
+#undef F
+
+  uint32_t hash_seed() const { return hash_seed_; }
+  const base::CustomMatcherHashMap* string_table() const {
+    return &string_table_;
+  }
+
+ private:
+  Zone zone_;
+  base::CustomMatcherHashMap string_table_;
+  uint32_t hash_seed_;
+
+#define F(name, str) AstRawString* name##_string_;
+  STRING_CONSTANTS(F)
+#undef F
+
+  DISALLOW_COPY_AND_ASSIGN(AstStringConstants);
+};
+
 #define OTHER_CONSTANTS(F) \
   F(true_value)            \
   F(false_value)           \
@@ -329,21 +401,23 @@
 
 class AstValueFactory {
  public:
-  AstValueFactory(Zone* zone, uint32_t hash_seed)
-      : string_table_(AstRawStringCompare),
+  AstValueFactory(Zone* zone, const AstStringConstants* string_constants,
+                  uint32_t hash_seed)
+      : string_table_(string_constants->string_table()),
         values_(nullptr),
-        smis_(),
         strings_(nullptr),
         strings_end_(&strings_),
+        string_constants_(string_constants),
         zone_(zone),
         hash_seed_(hash_seed) {
-#define F(name, str) name##_string_ = NULL;
-    STRING_CONSTANTS(F)
-#undef F
-#define F(name) name##_ = NULL;
+#define F(name) name##_ = nullptr;
     OTHER_CONSTANTS(F)
 #undef F
+    DCHECK_EQ(hash_seed, string_constants->hash_seed());
     std::fill(smis_, smis_ + arraysize(smis_), nullptr);
+    std::fill(one_character_strings_,
+              one_character_strings_ + arraysize(one_character_strings_),
+              nullptr);
   }
 
   Zone* zone() const { return zone_; }
@@ -361,28 +435,21 @@
   const AstRawString* GetString(Handle<String> literal);
   const AstConsString* NewConsString(const AstString* left,
                                      const AstString* right);
-  const AstRawString* ConcatStrings(const AstRawString* left,
-                                    const AstRawString* right);
 
-  void Internalize(Isolate* isolate);
+  V8_EXPORT_PRIVATE void Internalize(Isolate* isolate);
 
-#define F(name, str)                                                    \
-  const AstRawString* name##_string() {                                 \
-    if (name##_string_ == NULL) {                                       \
-      const char* data = str;                                           \
-      name##_string_ = GetOneByteString(                                \
-          Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), \
-                                static_cast<int>(strlen(data))));       \
-    }                                                                   \
-    return name##_string_;                                              \
+#define F(name, str)                           \
+  const AstRawString* name##_string() {        \
+    return string_constants_->name##_string(); \
   }
   STRING_CONSTANTS(F)
 #undef F
 
-  const AstValue* NewString(const AstRawString* string);
+  V8_EXPORT_PRIVATE const AstValue* NewString(const AstRawString* string);
   // A JavaScript symbol (ECMA-262 edition 6).
-  const AstValue* NewSymbol(const char* name);
-  const AstValue* NewNumber(double number, bool with_dot = false);
+  const AstValue* NewSymbol(AstSymbol symbol);
+  V8_EXPORT_PRIVATE const AstValue* NewNumber(double number,
+                                              bool with_dot = false);
   const AstValue* NewSmi(uint32_t number);
   const AstValue* NewBoolean(bool b);
   const AstValue* NewStringList(ZoneList<const AstRawString*>* strings);
@@ -415,27 +482,29 @@
   AstRawString* GetString(uint32_t hash, bool is_one_byte,
                           Vector<const byte> literal_bytes);
 
-  static bool AstRawStringCompare(void* a, void* b);
-
   // All strings are copied here, one after another (no NULLs inbetween).
   base::CustomMatcherHashMap string_table_;
   // For keeping track of all AstValues and AstRawStrings we've created (so that
   // they can be internalized later).
   AstValue* values_;
 
-  AstValue* smis_[kMaxCachedSmi + 1];
   // We need to keep track of strings_ in order since cons strings require their
   // members to be internalized first.
   AstString* strings_;
   AstString** strings_end_;
+
+  // Holds constant string values which are shared across the isolate.
+  const AstStringConstants* string_constants_;
+
+  // Caches for faster access: small numbers, one character lowercase strings
+  // (for minified code).
+  AstValue* smis_[kMaxCachedSmi + 1];
+  AstRawString* one_character_strings_[26];
+
   Zone* zone_;
 
   uint32_t hash_seed_;
 
-#define F(name, str) const AstRawString* name##_string_;
-  STRING_CONSTANTS(F)
-#undef F
-
 #define F(name) AstValue* name##_;
   OTHER_CONSTANTS(F)
 #undef F
diff --git a/src/ast/ast.cc b/src/ast/ast.cc
index fc8bd8a..5705c70 100644
--- a/src/ast/ast.cc
+++ b/src/ast/ast.cc
@@ -10,11 +10,15 @@
 #include "src/ast/prettyprinter.h"
 #include "src/ast/scopes.h"
 #include "src/base/hashmap.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/builtins/builtins.h"
 #include "src/code-stubs.h"
 #include "src/contexts.h"
 #include "src/conversions.h"
+#include "src/double.h"
 #include "src/elements.h"
+#include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
 #include "src/property-details.h"
 #include "src/property.h"
 #include "src/string-stream.h"
@@ -28,6 +32,24 @@
 
 #ifdef DEBUG
 
+static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) {
+  switch (idx) {
+#define NATIVE_CONTEXT_FIELDS_IDX(NAME, Type, name) \
+  case Context::NAME:                               \
+    return #name;
+
+    NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELDS_IDX)
+#undef NATIVE_CONTEXT_FIELDS_IDX
+
+    default:
+      break;
+  }
+
+  return "UnknownIntrinsicIndex";
+}
+
+void AstNode::Print() { Print(Isolate::Current()); }
+
 void AstNode::Print(Isolate* isolate) {
   AstPrinter::PrintOut(isolate, this);
 }
@@ -70,6 +92,10 @@
   return IsLiteral() && AsLiteral()->raw_value()->IsSmi();
 }
 
+bool Expression::IsNumberLiteral() const {
+  return IsLiteral() && AsLiteral()->raw_value()->IsNumber();
+}
+
 bool Expression::IsStringLiteral() const {
   return IsLiteral() && AsLiteral()->raw_value()->IsString();
 }
@@ -195,50 +221,51 @@
   set_var(var);
   set_is_resolved();
   var->set_is_used();
+  if (is_assigned()) var->set_maybe_assigned();
 }
 
-
-void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
-                                              FeedbackVectorSpec* spec,
-                                              FeedbackVectorSlotCache* cache) {
+void VariableProxy::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                        TypeofMode typeof_mode,
+                                        FeedbackSlotCache* cache) {
   if (UsesVariableFeedbackSlot()) {
     // VariableProxies that point to the same Variable within a function can
     // make their loads from the same IC slot.
     if (var()->IsUnallocated() || var()->mode() == DYNAMIC_GLOBAL) {
-      ZoneHashMap::Entry* entry = cache->Get(var());
-      if (entry != NULL) {
-        variable_feedback_slot_ = FeedbackVectorSlot(
-            static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
+      FeedbackSlot slot = cache->Get(typeof_mode, var());
+      if (!slot.IsInvalid()) {
+        variable_feedback_slot_ = slot;
         return;
       }
-      variable_feedback_slot_ = spec->AddLoadGlobalICSlot(var()->name());
-      cache->Put(var(), variable_feedback_slot_);
+      variable_feedback_slot_ = spec->AddLoadGlobalICSlot(typeof_mode);
+      cache->Put(typeof_mode, var(), variable_feedback_slot_);
     } else {
       variable_feedback_slot_ = spec->AddLoadICSlot();
     }
   }
 }
 
-
 static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
-                              FeedbackVectorSlot* out_slot) {
+                              LanguageMode language_mode,
+                              FeedbackSlot* out_slot) {
   Property* property = expr->AsProperty();
   LhsKind assign_type = Property::GetAssignType(property);
   if ((assign_type == VARIABLE &&
        expr->AsVariableProxy()->var()->IsUnallocated()) ||
       assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
     // TODO(ishell): consider using ICSlotCache for variables here.
-    FeedbackVectorSlotKind kind = assign_type == KEYED_PROPERTY
-                                      ? FeedbackVectorSlotKind::KEYED_STORE_IC
-                                      : FeedbackVectorSlotKind::STORE_IC;
-    *out_slot = spec->AddSlot(kind);
+    if (assign_type == KEYED_PROPERTY) {
+      *out_slot = spec->AddKeyedStoreICSlot(language_mode);
+
+    } else {
+      *out_slot = spec->AddStoreICSlot(language_mode);
+    }
   }
 }
 
-void ForInStatement::AssignFeedbackVectorSlots(Isolate* isolate,
-                                               FeedbackVectorSpec* spec,
-                                               FeedbackVectorSlotCache* cache) {
-  AssignVectorSlots(each(), spec, &each_slot_);
+void ForInStatement::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                         LanguageMode language_mode,
+                                         FeedbackSlotCache* cache) {
+  AssignVectorSlots(each(), spec, language_mode, &each_slot_);
   for_in_feedback_slot_ = spec->AddGeneralSlot();
 }
 
@@ -253,17 +280,16 @@
                 StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
 }
 
-void Assignment::AssignFeedbackVectorSlots(Isolate* isolate,
-                                           FeedbackVectorSpec* spec,
-                                           FeedbackVectorSlotCache* cache) {
-  AssignVectorSlots(target(), spec, &slot_);
+void Assignment::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                     LanguageMode language_mode,
+                                     FeedbackSlotCache* cache) {
+  AssignVectorSlots(target(), spec, language_mode, &slot_);
 }
 
-
-void CountOperation::AssignFeedbackVectorSlots(Isolate* isolate,
-                                               FeedbackVectorSpec* spec,
-                                               FeedbackVectorSlotCache* cache) {
-  AssignVectorSlots(expression(), spec, &slot_);
+void CountOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                         LanguageMode language_mode,
+                                         FeedbackSlotCache* cache) {
+  AssignVectorSlots(expression(), spec, language_mode, &slot_);
   // Assign a slot to collect feedback about binary operations. Used only in
   // ignition. Fullcodegen uses AstId to record type feedback.
   binary_operation_slot_ = spec->AddInterpreterBinaryOpICSlot();
@@ -346,6 +372,16 @@
   }
 }
 
+FeedbackSlot LiteralProperty::GetStoreDataPropertySlot() const {
+  int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
+  return GetSlot(offset);
+}
+
+void LiteralProperty::SetStoreDataPropertySlot(FeedbackSlot slot) {
+  int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
+  return SetSlot(slot, offset);
+}
+
 bool LiteralProperty::NeedsSetFunctionName() const {
   return is_computed_name_ &&
          (value_->IsAnonymousFunctionDefinition() ||
@@ -360,22 +396,27 @@
       kind_(kind),
       is_static_(is_static) {}
 
-void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
-                                             FeedbackVectorSpec* spec,
-                                             FeedbackVectorSlotCache* cache) {
+void ClassLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                       LanguageMode language_mode,
+                                       FeedbackSlotCache* cache) {
   // This logic that computes the number of slots needed for vector store
-  // ICs must mirror FullCodeGenerator::VisitClassLiteral.
-  prototype_slot_ = spec->AddLoadICSlot();
+  // ICs must mirror BytecodeGenerator::VisitClassLiteral.
+  if (FunctionLiteral::NeedsHomeObject(constructor())) {
+    home_object_slot_ = spec->AddStoreICSlot(language_mode);
+  }
+
   if (NeedsProxySlot()) {
-    proxy_slot_ = spec->AddStoreICSlot();
+    proxy_slot_ = spec->AddStoreICSlot(language_mode);
   }
 
   for (int i = 0; i < properties()->length(); i++) {
     ClassLiteral::Property* property = properties()->at(i);
     Expression* value = property->value();
     if (FunctionLiteral::NeedsHomeObject(value)) {
-      property->SetSlot(spec->AddStoreICSlot());
+      property->SetSlot(spec->AddStoreICSlot(language_mode));
     }
+    property->SetStoreDataPropertySlot(
+        spec->AddStoreDataPropertyInLiteralICSlot());
   }
 }
 
@@ -392,9 +433,11 @@
 
 bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
 
-void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
-                                              FeedbackVectorSpec* spec,
-                                              FeedbackVectorSlotCache* cache) {
+void ObjectLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                        LanguageMode language_mode,
+                                        FeedbackSlotCache* cache) {
+  MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, cache);
+
   // This logic that computes the number of slots needed for vector store
   // ics must mirror FullCodeGenerator::VisitObjectLiteral.
   int property_index = 0;
@@ -406,6 +449,7 @@
     Literal* key = property->key()->AsLiteral();
     Expression* value = property->value();
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -413,29 +457,29 @@
       case ObjectLiteral::Property::COMPUTED:
         // It is safe to use [[Put]] here because the boilerplate already
         // contains computed properties with an uninitialized value.
-        if (key->value()->IsInternalizedString()) {
+        if (key->IsStringLiteral()) {
           if (property->emit_store()) {
-            property->SetSlot(spec->AddStoreICSlot());
+            property->SetSlot(spec->AddStoreOwnICSlot());
             if (FunctionLiteral::NeedsHomeObject(value)) {
-              property->SetSlot(spec->AddStoreICSlot(), 1);
+              property->SetSlot(spec->AddStoreICSlot(language_mode), 1);
             }
           }
           break;
         }
         if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
-          property->SetSlot(spec->AddStoreICSlot());
+          property->SetSlot(spec->AddStoreICSlot(language_mode));
         }
         break;
       case ObjectLiteral::Property::PROTOTYPE:
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
-          property->SetSlot(spec->AddStoreICSlot());
+          property->SetSlot(spec->AddStoreICSlot(language_mode));
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
-          property->SetSlot(spec->AddStoreICSlot());
+          property->SetSlot(spec->AddStoreICSlot(language_mode));
         }
         break;
     }
@@ -447,9 +491,11 @@
     Expression* value = property->value();
     if (property->kind() != ObjectLiteral::Property::PROTOTYPE) {
       if (FunctionLiteral::NeedsHomeObject(value)) {
-        property->SetSlot(spec->AddStoreICSlot());
+        property->SetSlot(spec->AddStoreICSlot(language_mode));
       }
     }
+    property->SetStoreDataPropertySlot(
+        spec->AddStoreDataPropertyInLiteralICSlot());
   }
 }
 
@@ -491,13 +537,8 @@
          property->kind() != ObjectLiteral::Property::PROTOTYPE;
 }
 
-
-void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
-  if (!constant_properties_.is_null()) return;
-
-  // Allocate a fixed array to hold all the constant properties.
-  Handle<FixedArray> constant_properties = isolate->factory()->NewFixedArray(
-      boilerplate_properties_ * 2, TENURED);
+void ObjectLiteral::InitDepthAndFlags() {
+  if (depth_ > 0) return;
 
   int position = 0;
   // Accumulate the value in local variables and store it at the end.
@@ -521,50 +562,43 @@
 
     MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
     if (m_literal != NULL) {
-      m_literal->BuildConstants(isolate);
+      m_literal->InitDepthAndFlags();
       if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1;
     }
 
-    // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
-    // value for COMPUTED properties, the real value is filled in at
-    // runtime. The enumeration order is maintained.
-    Handle<Object> key = property->key()->AsLiteral()->value();
-    Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
+    const AstValue* key = property->key()->AsLiteral()->raw_value();
+    Expression* value = property->value();
+
+    bool is_compile_time_value = CompileTimeValue::IsCompileTimeValue(value);
 
     // Ensure objects that may, at any point in time, contain fields with double
     // representation are always treated as nested objects. This is true for
-    // computed fields (value is undefined), and smi and double literals
-    // (value->IsNumber()).
+    // computed fields, and smi and double literals.
     // TODO(verwaest): Remove once we can store them inline.
     if (FLAG_track_double_fields &&
-        (value->IsNumber() || value->IsUninitialized(isolate))) {
+        (value->IsNumberLiteral() || !is_compile_time_value)) {
       bit_field_ = MayStoreDoublesField::update(bit_field_, true);
     }
 
-    is_simple = is_simple && !value->IsUninitialized(isolate);
+    is_simple = is_simple && is_compile_time_value;
 
     // Keep track of the number of elements in the object literal and
     // the largest element index.  If the largest element index is
     // much larger than the number of elements, creating an object
     // literal with fast elements will be a waste of space.
     uint32_t element_index = 0;
-    if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
+    if (key->IsString() && key->AsString()->AsArrayIndex(&element_index)) {
       max_element_index = Max(element_index, max_element_index);
       elements++;
-      key = isolate->factory()->NewNumberFromUint(element_index);
-    } else if (key->ToArrayIndex(&element_index)) {
+    } else if (key->ToUint32(&element_index) && element_index != kMaxUInt32) {
       max_element_index = Max(element_index, max_element_index);
       elements++;
-    } else if (key->IsNumber()) {
-      key = isolate->factory()->NumberToString(key);
     }
 
-    // Add name, value pair to the fixed array.
-    constant_properties->set(position++, *key);
-    constant_properties->set(position++, *value);
+    // Increment the position for the key and the value.
+    position += 2;
   }
 
-  constant_properties_ = constant_properties;
   bit_field_ = FastElementsField::update(
       bit_field_,
       (max_element_index <= 32) || ((2 * elements) >= max_element_index));
@@ -574,6 +608,117 @@
   set_depth(depth_acc);
 }
 
+void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
+  if (!constant_properties_.is_null()) return;
+
+  int index_keys = 0;
+  bool has_seen_proto = false;
+  for (int i = 0; i < properties()->length(); i++) {
+    ObjectLiteral::Property* property = properties()->at(i);
+    if (!IsBoilerplateProperty(property)) {
+      has_seen_proto = true;
+      continue;
+    }
+    if (property->is_computed_name()) {
+      continue;
+    }
+
+    Handle<Object> key = property->key()->AsLiteral()->value();
+
+    uint32_t element_index = 0;
+    if (key->ToArrayIndex(&element_index) ||
+        (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index))) {
+      index_keys++;
+    }
+  }
+
+  Handle<BoilerplateDescription> constant_properties =
+      isolate->factory()->NewBoilerplateDescription(boilerplate_properties_,
+                                                    properties()->length(),
+                                                    index_keys, has_seen_proto);
+
+  int position = 0;
+  for (int i = 0; i < properties()->length(); i++) {
+    ObjectLiteral::Property* property = properties()->at(i);
+    if (!IsBoilerplateProperty(property)) {
+      continue;
+    }
+
+    if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
+      DCHECK(property->is_computed_name());
+      break;
+    }
+    DCHECK(!property->is_computed_name());
+
+    MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
+    if (m_literal != NULL) {
+      m_literal->BuildConstants(isolate);
+    }
+
+    // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
+    // value for COMPUTED properties, the real value is filled in at
+    // runtime. The enumeration order is maintained.
+    Handle<Object> key = property->key()->AsLiteral()->value();
+    Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
+
+    uint32_t element_index = 0;
+    if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
+      key = isolate->factory()->NewNumberFromUint(element_index);
+    } else if (key->IsNumber() && !key->ToArrayIndex(&element_index)) {
+      key = isolate->factory()->NumberToString(key);
+    }
+
+    // Add name, value pair to the fixed array.
+    constant_properties->set(position++, *key);
+    constant_properties->set(position++, *value);
+  }
+
+  constant_properties_ = constant_properties;
+}
+
+bool ObjectLiteral::IsFastCloningSupported() const {
+  // The FastCloneShallowObject builtin doesn't copy elements, and object
+  // literals don't support copy-on-write (COW) elements for now.
+  // TODO(mvstanton): make object literals support COW elements.
+  return fast_elements() && has_shallow_properties() &&
+         properties_count() <= ConstructorBuiltinsAssembler::
+                                   kMaximumClonedShallowObjectProperties;
+}
+
+ElementsKind ArrayLiteral::constant_elements_kind() const {
+  return static_cast<ElementsKind>(constant_elements()->elements_kind());
+}
+
+void ArrayLiteral::InitDepthAndFlags() {
+  DCHECK_LT(first_spread_index_, 0);
+
+  if (depth_ > 0) return;
+
+  int constants_length = values()->length();
+
+  // Fill in the literals.
+  bool is_simple = true;
+  int depth_acc = 1;
+  int array_index = 0;
+  for (; array_index < constants_length; array_index++) {
+    Expression* element = values()->at(array_index);
+    DCHECK(!element->IsSpread());
+    MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
+    if (m_literal != NULL) {
+      m_literal->InitDepthAndFlags();
+      if (m_literal->depth() + 1 > depth_acc) {
+        depth_acc = m_literal->depth() + 1;
+      }
+    }
+
+    if (!CompileTimeValue::IsCompileTimeValue(element)) {
+      is_simple = false;
+    }
+  }
+
+  set_is_simple(is_simple);
+  set_depth(depth_acc);
+}
 
 void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
   DCHECK_LT(first_spread_index_, 0);
@@ -586,8 +731,6 @@
       isolate->factory()->NewFixedArrayWithHoles(constants_length);
 
   // Fill in the literals.
-  bool is_simple = true;
-  int depth_acc = 1;
   bool is_holey = false;
   int array_index = 0;
   for (; array_index < constants_length; array_index++) {
@@ -596,9 +739,6 @@
     MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
     if (m_literal != NULL) {
       m_literal->BuildConstants(isolate);
-      if (m_literal->depth() + 1 > depth_acc) {
-        depth_acc = m_literal->depth() + 1;
-      }
     }
 
     // New handle scope here, needs to be after BuildContants().
@@ -611,7 +751,6 @@
 
     if (boilerplate_value->IsUninitialized(isolate)) {
       boilerplate_value = handle(Smi::kZero, isolate);
-      is_simple = false;
     }
 
     kind = GetMoreGeneralElementsKind(kind,
@@ -623,7 +762,7 @@
 
   // Simple and shallow arrays can be lazily copied, we transform the
   // elements array to a copy-on-write array.
-  if (is_simple && depth_acc == 1 && array_index > 0 &&
+  if (is_simple() && depth() == 1 && array_index > 0 &&
       IsFastSmiOrObjectElementsKind(kind)) {
     fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
   }
@@ -637,21 +776,29 @@
     accessor->CopyElements(fixed_array, from_kind, elements, constants_length);
   }
 
-  // Remember both the literal's constant values as well as the ElementsKind
-  // in a 2-element FixedArray.
-  Handle<FixedArray> literals = isolate->factory()->NewFixedArray(2, TENURED);
-  literals->set(0, Smi::FromInt(kind));
-  literals->set(1, *elements);
+  // Remember both the literal's constant values as well as the ElementsKind.
+  Handle<ConstantElementsPair> literals =
+      isolate->factory()->NewConstantElementsPair(kind, elements);
 
   constant_elements_ = literals;
-  set_is_simple(is_simple);
-  set_depth(depth_acc);
 }
 
+bool ArrayLiteral::IsFastCloningSupported() const {
+  return depth() <= 1 &&
+         values()->length() <=
+             ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements;
+}
 
-void ArrayLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
-                                             FeedbackVectorSpec* spec,
-                                             FeedbackVectorSlotCache* cache) {
+void ArrayLiteral::RewindSpreads() {
+  values_->Rewind(first_spread_index_);
+  first_spread_index_ = -1;
+}
+
+void ArrayLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                       LanguageMode language_mode,
+                                       FeedbackSlotCache* cache) {
+  MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, cache);
+
   // This logic that computes the number of slots needed for vector store
   // ics must mirror FullCodeGenerator::VisitArrayLiteral.
   for (int array_index = 0; array_index < values()->length(); array_index++) {
@@ -661,7 +808,7 @@
 
     // We'll reuse the same literal slot for all of the non-constant
     // subexpressions that use a keyed store IC.
-    literal_slot_ = spec->AddKeyedStoreICSlot();
+    literal_slot_ = spec->AddKeyedStoreICSlot(language_mode);
     return;
   }
 }
@@ -678,6 +825,16 @@
   return isolate->factory()->uninitialized_value();
 }
 
+void MaterializedLiteral::InitDepthAndFlags() {
+  if (IsArrayLiteral()) {
+    return AsArrayLiteral()->InitDepthAndFlags();
+  }
+  if (IsObjectLiteral()) {
+    return AsObjectLiteral()->InitDepthAndFlags();
+  }
+  DCHECK(IsRegExpLiteral());
+  DCHECK_LE(1, depth());  // Depth should be initialized.
+}
 
 void MaterializedLiteral::BuildConstants(Isolate* isolate) {
   if (IsArrayLiteral()) {
@@ -687,7 +844,6 @@
     return AsObjectLiteral()->BuildConstantProperties(isolate);
   }
   DCHECK(IsRegExpLiteral());
-  DCHECK(depth() >= 1);  // Depth should be initialized.
 }
 
 
@@ -710,9 +866,9 @@
   set_to_boolean_types(oracle->ToBooleanTypes(right()->test_id()));
 }
 
-void BinaryOperation::AssignFeedbackVectorSlots(
-    Isolate* isolate, FeedbackVectorSpec* spec,
-    FeedbackVectorSlotCache* cache) {
+void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                          LanguageMode language_mode,
+                                          FeedbackSlotCache* cache) {
   // Feedback vector slot is only used by interpreter for binary operations.
   // Full-codegen uses AstId to record type feedback.
   switch (op()) {
@@ -722,7 +878,7 @@
     case Token::OR:
       return;
     default:
-      type_feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
+      feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
       return;
   }
 }
@@ -732,9 +888,9 @@
   return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
 }
 
-void CompareOperation::AssignFeedbackVectorSlots(
-    Isolate* isolate, FeedbackVectorSpec* spec,
-    FeedbackVectorSlotCache* cache_) {
+void CompareOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                           LanguageMode language_mode,
+                                           FeedbackSlotCache* cache_) {
   // Feedback vector slot is only used by interpreter for binary operations.
   // Full-codegen uses AstId to record type feedback.
   switch (op()) {
@@ -743,7 +899,7 @@
     case Token::IN:
       return;
     default:
-      type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
+      feedback_slot_ = spec->AddInterpreterCompareICSlot();
   }
 }
 
@@ -892,8 +1048,9 @@
   }
 }
 
-void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                     FeedbackVectorSlotCache* cache) {
+void Call::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                               LanguageMode language_mode,
+                               FeedbackSlotCache* cache) {
   ic_slot_ = spec->AddCallICSlot();
 }
 
@@ -931,10 +1088,10 @@
       statements_(statements),
       compare_type_(AstType::None()) {}
 
-void CaseClause::AssignFeedbackVectorSlots(Isolate* isolate,
-                                           FeedbackVectorSpec* spec,
-                                           FeedbackVectorSlotCache* cache) {
-  type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
+void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
+                                     LanguageMode language_mode,
+                                     FeedbackSlotCache* cache) {
+  feedback_slot_ = spec->AddInterpreterCompareICSlot();
 }
 
 uint32_t Literal::Hash() {
@@ -952,5 +1109,14 @@
          (x->IsNumber() && y->IsNumber() && x->AsNumber() == y->AsNumber());
 }
 
+const char* CallRuntime::debug_name() {
+#ifdef DEBUG
+  return is_jsruntime() ? NameForNativeContextIntrinsicIndex(context_index_)
+                        : function_->name;
+#else
+  return is_jsruntime() ? "(context function)" : function_->name;
+#endif  // DEBUG
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/ast/ast.h b/src/ast/ast.h
index 99e0672..90e94bb 100644
--- a/src/ast/ast.h
+++ b/src/ast/ast.h
@@ -14,11 +14,12 @@
 #include "src/factory.h"
 #include "src/globals.h"
 #include "src/isolate.h"
+#include "src/label.h"
 #include "src/list.h"
+#include "src/objects/literal-objects.h"
 #include "src/parsing/token.h"
 #include "src/runtime/runtime.h"
 #include "src/small-pointer-list.h"
-#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -102,6 +103,7 @@
   V(SuperCallReference)         \
   V(CaseClause)                 \
   V(EmptyParentheses)           \
+  V(GetIterator)                \
   V(DoExpression)               \
   V(RewritableExpression)
 
@@ -125,27 +127,29 @@
 AST_NODE_LIST(DEF_FORWARD_DECLARATION)
 #undef DEF_FORWARD_DECLARATION
 
-
-class FeedbackVectorSlotCache {
+class FeedbackSlotCache {
  public:
-  explicit FeedbackVectorSlotCache(Zone* zone)
-      : zone_(zone),
-        hash_map_(ZoneHashMap::kDefaultHashMapCapacity,
-                  ZoneAllocationPolicy(zone)) {}
+  typedef std::pair<TypeofMode, Variable*> Key;
 
-  void Put(Variable* variable, FeedbackVectorSlot slot) {
-    ZoneHashMap::Entry* entry = hash_map_.LookupOrInsert(
-        variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
-    entry->value = reinterpret_cast<void*>(slot.ToInt());
+  explicit FeedbackSlotCache(Zone* zone) : map_(zone) {}
+
+  void Put(TypeofMode typeof_mode, Variable* variable, FeedbackSlot slot) {
+    Key key = std::make_pair(typeof_mode, variable);
+    auto entry = std::make_pair(key, slot);
+    map_.insert(entry);
   }
 
-  ZoneHashMap::Entry* Get(Variable* variable) const {
-    return hash_map_.Lookup(variable, ComputePointerHash(variable));
+  FeedbackSlot Get(TypeofMode typeof_mode, Variable* variable) const {
+    Key key = std::make_pair(typeof_mode, variable);
+    auto iter = map_.find(key);
+    if (iter != map_.end()) {
+      return iter->second;
+    }
+    return FeedbackSlot();
   }
 
  private:
-  Zone* zone_;
-  ZoneHashMap hash_map_;
+  ZoneMap<Key, FeedbackSlot> map_;
 };
 
 
@@ -154,7 +158,7 @@
   enum Flag {
     kNoFlags = 0,
     kDontSelfOptimize = 1 << 0,
-    kDontCrankshaft = 1 << 1
+    kMustUseIgnitionTurbo = 1 << 1
   };
 
   typedef base::Flags<Flag> Flags;
@@ -190,6 +194,7 @@
   int position() const { return position_; }
 
 #ifdef DEBUG
+  void Print();
   void Print(Isolate* isolate);
 #endif  // DEBUG
 
@@ -317,6 +322,9 @@
   // True iff the expression is a literal represented as a smi.
   bool IsSmiLiteral() const;
 
+  // True iff the expression is a literal represented as a number.
+  bool IsNumberLiteral() const;
+
   // True iff the expression is a string literal.
   bool IsStringLiteral() const;
 
@@ -466,9 +474,6 @@
 
   class IgnoreCompletionField
       : public BitField<bool, BreakableStatement::kNextBitFieldIndex, 1> {};
-
- protected:
-  static const uint8_t kNextBitFieldIndex = IgnoreCompletionField::kNext;
 };
 
 
@@ -484,9 +489,6 @@
   }
   bool IsAnonymousFunctionDefinition() const;
 
- protected:
-  static const uint8_t kNextBitFieldIndex = Expression::kNextBitFieldIndex;
-
  private:
   friend class AstNodeFactory;
 
@@ -518,8 +520,6 @@
   Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type)
       : AstNode(pos, type), proxy_(proxy), scope_(scope), next_(nullptr) {}
 
-  static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
-
  private:
   VariableProxy* proxy_;
   // Nested scope from which the declaration originated.
@@ -734,10 +734,10 @@
   void set_subject(Expression* e) { subject_ = e; }
 
   // Type feedback information.
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
-  FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
-  FeedbackVectorSlot ForInFeedbackSlot() {
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
+  FeedbackSlot EachFeedbackSlot() const { return each_slot_; }
+  FeedbackSlot ForInFeedbackSlot() {
     DCHECK(!for_in_feedback_slot_.IsInvalid());
     return for_in_feedback_slot_;
   }
@@ -773,14 +773,11 @@
 
   Expression* each_;
   Expression* subject_;
-  FeedbackVectorSlot each_slot_;
-  FeedbackVectorSlot for_in_feedback_slot_;
+  FeedbackSlot each_slot_;
+  FeedbackSlot for_in_feedback_slot_;
 
   class ForInTypeField
       : public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
-
- protected:
-  static const uint8_t kNextBitFieldIndex = ForInTypeField::kNext;
 };
 
 
@@ -826,12 +823,6 @@
   void set_result_done(Expression* e) { result_done_ = e; }
   void set_assign_each(Expression* e) { assign_each_ = e; }
 
-  BailoutId ContinueId() const { return EntryId(); }
-  BailoutId StackCheckId() const { return BackEdgeId(); }
-
-  static int num_ids() { return parent_num_ids() + 1; }
-  BailoutId BackEdgeId() const { return BailoutId(local_id(0)); }
-
  private:
   friend class AstNodeFactory;
 
@@ -842,8 +833,6 @@
         next_result_(NULL),
         result_done_(NULL),
         assign_each_(NULL) {}
-  static int parent_num_ids() { return ForEachStatement::num_ids(); }
-  int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
   Variable* iterator_;
   Expression* assign_iterator_;
@@ -908,17 +897,25 @@
 
 class ReturnStatement final : public JumpStatement {
  public:
+  enum Type { kNormal, kAsyncReturn };
   Expression* expression() const { return expression_; }
 
   void set_expression(Expression* e) { expression_ = e; }
+  Type type() const { return TypeField::decode(bit_field_); }
+  bool is_async_return() const { return type() == kAsyncReturn; }
 
  private:
   friend class AstNodeFactory;
 
-  ReturnStatement(Expression* expression, int pos)
-      : JumpStatement(pos, kReturnStatement), expression_(expression) {}
+  ReturnStatement(Expression* expression, Type type, int pos)
+      : JumpStatement(pos, kReturnStatement), expression_(expression) {
+    bit_field_ |= TypeField::encode(type);
+  }
 
   Expression* expression_;
+
+  class TypeField
+      : public BitField<Type, JumpStatement::kNextBitFieldIndex, 1> {};
 };
 
 
@@ -930,30 +927,16 @@
   Statement* statement() const { return statement_; }
   void set_statement(Statement* s) { statement_ = s; }
 
-  void set_base_id(int id) { base_id_ = id; }
-  static int num_ids() { return parent_num_ids() + 2; }
-  BailoutId ToObjectId() const { return BailoutId(local_id(0)); }
-  BailoutId EntryId() const { return BailoutId(local_id(1)); }
-
  private:
   friend class AstNodeFactory;
 
   WithStatement(Scope* scope, Expression* expression, Statement* statement,
                 int pos)
       : Statement(pos, kWithStatement),
-        base_id_(BailoutId::None().ToInt()),
         scope_(scope),
         expression_(expression),
         statement_(statement) {}
 
-  static int parent_num_ids() { return 0; }
-  int base_id() const {
-    DCHECK(!BailoutId(base_id_).IsNone());
-    return base_id_;
-  }
-  int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
-  int base_id_;
   Scope* scope_;
   Expression* expression_;
   Statement* statement_;
@@ -981,12 +964,10 @@
   // CaseClause will have both a slot in the feedback vector and the
   // TypeFeedbackId to record the type information. TypeFeedbackId is used by
   // full codegen and the feedback vector slot is used by interpreter.
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
 
-  FeedbackVectorSlot CompareOperationFeedbackSlot() {
-    return type_feedback_slot_;
-  }
+  FeedbackSlot CompareOperationFeedbackSlot() { return feedback_slot_; }
 
  private:
   friend class AstNodeFactory;
@@ -999,7 +980,7 @@
   Label body_target_;
   ZoneList<Statement*>* statements_;
   AstType* compare_type_;
-  FeedbackVectorSlot type_feedback_slot_;
+  FeedbackSlot feedback_slot_;
 };
 
 
@@ -1174,26 +1155,10 @@
 
 
 class DebuggerStatement final : public Statement {
- public:
-  void set_base_id(int id) { base_id_ = id; }
-  static int num_ids() { return parent_num_ids() + 1; }
-  BailoutId DebugBreakId() const { return BailoutId(local_id(0)); }
-
  private:
   friend class AstNodeFactory;
 
-  explicit DebuggerStatement(int pos)
-      : Statement(pos, kDebuggerStatement),
-        base_id_(BailoutId::None().ToInt()) {}
-
-  static int parent_num_ids() { return 0; }
-  int base_id() const {
-    DCHECK(!BailoutId(base_id_).IsNone());
-    return base_id_;
-  }
-  int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
-  int base_id_;
+  explicit DebuggerStatement(int pos) : Statement(pos, kDebuggerStatement) {}
 };
 
 
@@ -1212,22 +1177,15 @@
  public:
   Statement* statement() const { return statement_; }
   void set_statement(Statement* statement) { statement_ = statement; }
-  Scope* scope() const { return scope_; }
-  SloppyBlockFunctionStatement* next() { return next_; }
-  void set_next(SloppyBlockFunctionStatement* next) { next_ = next; }
 
  private:
   friend class AstNodeFactory;
 
-  SloppyBlockFunctionStatement(Statement* statement, Scope* scope)
+  explicit SloppyBlockFunctionStatement(Statement* statement)
       : Statement(kNoSourcePosition, kSloppyBlockFunctionStatement),
-        statement_(statement),
-        scope_(scope),
-        next_(nullptr) {}
+        statement_(statement) {}
 
   Statement* statement_;
-  Scope* const scope_;
-  SloppyBlockFunctionStatement* next_;
 };
 
 
@@ -1275,32 +1233,32 @@
   const AstValue* value_;
 };
 
-
-class AstLiteralReindexer;
-
-// Base class for literals that needs space in the corresponding JSFunction.
+// Base class for literals that need space in the type feedback vector.
 class MaterializedLiteral : public Expression {
  public:
-  int literal_index() { return literal_index_; }
-
   int depth() const {
     // only callable after initialization.
     DCHECK(depth_ >= 1);
     return depth_;
   }
 
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache) {
+    literal_slot_ = spec->AddLiteralSlot();
+  }
+
+  FeedbackSlot literal_slot() const { return literal_slot_; }
+
  private:
   int depth_ : 31;
-  int literal_index_;
-
-  friend class AstLiteralReindexer;
+  FeedbackSlot literal_slot_;
 
   class IsSimpleField
       : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
 
  protected:
-  MaterializedLiteral(int literal_index, int pos, NodeType type)
-      : Expression(pos, type), depth_(0), literal_index_(literal_index) {
+  MaterializedLiteral(int pos, NodeType type)
+      : Expression(pos, type), depth_(0) {
     bit_field_ |= IsSimpleField::encode(false);
   }
 
@@ -1317,6 +1275,9 @@
     depth_ = depth;
   }
 
+  // Populate the depth field and any flags the literal has.
+  void InitDepthAndFlags();
+
   // Populate the constant properties/elements fixed array.
   void BuildConstants(Isolate* isolate);
   friend class ArrayLiteral;
@@ -1342,16 +1303,20 @@
 
   bool is_computed_name() const { return is_computed_name_; }
 
-  FeedbackVectorSlot GetSlot(int offset = 0) const {
+  FeedbackSlot GetSlot(int offset = 0) const {
     DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
     return slots_[offset];
   }
 
-  void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
+  FeedbackSlot GetStoreDataPropertySlot() const;
+
+  void SetSlot(FeedbackSlot slot, int offset = 0) {
     DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
     slots_[offset] = slot;
   }
 
+  void SetStoreDataPropertySlot(FeedbackSlot slot);
+
   bool NeedsSetFunctionName() const;
 
  protected:
@@ -1360,7 +1325,7 @@
 
   Expression* key_;
   Expression* value_;
-  FeedbackVectorSlot slots_[2];
+  FeedbackSlot slots_[2];
   bool is_computed_name_;
 };
 
@@ -1374,8 +1339,9 @@
     COMPUTED,              // Property with computed value (execution time).
     MATERIALIZED_LITERAL,  // Property value is a materialized literal.
     GETTER,
-    SETTER,    // Property is an accessor function.
-    PROTOTYPE  // Property is __proto__.
+    SETTER,     // Property is an accessor function.
+    PROTOTYPE,  // Property is __proto__.
+    SPREAD
   };
 
   Kind kind() const { return kind_; }
@@ -1411,7 +1377,8 @@
  public:
   typedef ObjectLiteralProperty Property;
 
-  Handle<FixedArray> constant_properties() const {
+  Handle<BoilerplateDescription> constant_properties() const {
+    DCHECK(!constant_properties_.is_null());
     return constant_properties_;
   }
   int properties_count() const { return boilerplate_properties_; }
@@ -1424,10 +1391,25 @@
   bool has_shallow_properties() const {
     return depth() == 1 && !has_elements() && !may_store_doubles();
   }
+  bool has_rest_property() const {
+    return HasRestPropertyField::decode(bit_field_);
+  }
 
   // Decide if a property should be in the object boilerplate.
   static bool IsBoilerplateProperty(Property* property);
 
+  // Populate the depth field and flags.
+  void InitDepthAndFlags();
+
+  // Get the constant properties fixed array, populating it if necessary.
+  Handle<BoilerplateDescription> GetOrBuildConstantProperties(
+      Isolate* isolate) {
+    if (constant_properties_.is_null()) {
+      BuildConstantProperties(isolate);
+    }
+    return constant_properties();
+  }
+
   // Populate the constant properties fixed array.
   void BuildConstantProperties(Isolate* isolate);
 
@@ -1436,6 +1418,9 @@
   // marked expressions, no store code is emitted.
   void CalculateEmitStore(Zone* zone);
 
+  // Determines whether the {FastCloneShallowObject} builtin can be used.
+  bool IsFastCloningSupported() const;
+
   // Assemble bitfield of flags for the CreateObjectLiteral helper.
   int ComputeFlags(bool disable_mementos = false) const {
     int flags = fast_elements() ? kFastElements : kNoFlags;
@@ -1452,7 +1437,8 @@
     kNoFlags = 0,
     kFastElements = 1,
     kShallowProperties = 1 << 1,
-    kDisableMementos = 1 << 2
+    kDisableMementos = 1 << 2,
+    kHasRestProperty = 1 << 3,
   };
 
   struct Accessors: public ZoneObject {
@@ -1465,43 +1451,37 @@
   BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
 
   // Return an AST id for a property that is used in simulate instructions.
-  BailoutId GetIdForPropertyName(int i) {
-    return BailoutId(local_id(2 * i + 1));
-  }
-  BailoutId GetIdForPropertySet(int i) {
-    return BailoutId(local_id(2 * i + 2));
-  }
+  BailoutId GetIdForPropertySet(int i) { return BailoutId(local_id(i + 1)); }
 
   // Unlike other AST nodes, this number of bailout IDs allocated for an
   // ObjectLiteral can vary, so num_ids() is not a static method.
-  int num_ids() const {
-    return parent_num_ids() + 1 + 2 * properties()->length();
-  }
+  int num_ids() const { return parent_num_ids() + 1 + properties()->length(); }
 
   // Object literals need one feedback slot for each non-trivial value, as well
   // as some slots for home objects.
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
 
  private:
   friend class AstNodeFactory;
 
-  ObjectLiteral(ZoneList<Property*>* properties, int literal_index,
-                uint32_t boilerplate_properties, int pos)
-      : MaterializedLiteral(literal_index, pos, kObjectLiteral),
+  ObjectLiteral(ZoneList<Property*>* properties,
+                uint32_t boilerplate_properties, int pos,
+                bool has_rest_property)
+      : MaterializedLiteral(pos, kObjectLiteral),
         boilerplate_properties_(boilerplate_properties),
         properties_(properties) {
     bit_field_ |= FastElementsField::encode(false) |
                   HasElementsField::encode(false) |
-                  MayStoreDoublesField::encode(false);
+                  MayStoreDoublesField::encode(false) |
+                  HasRestPropertyField::encode(has_rest_property);
   }
 
   static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
   uint32_t boilerplate_properties_;
-  FeedbackVectorSlot slot_;
-  Handle<FixedArray> constant_properties_;
+  Handle<BoilerplateDescription> constant_properties_;
   ZoneList<Property*>* properties_;
 
   class FastElementsField
@@ -1510,9 +1490,8 @@
   };
   class MayStoreDoublesField
       : public BitField<bool, HasElementsField::kNext, 1> {};
-
- protected:
-  static const uint8_t kNextBitFieldIndex = MayStoreDoublesField::kNext;
+  class HasRestPropertyField
+      : public BitField<bool, MayStoreDoublesField::kNext, 1> {};
 };
 
 
@@ -1543,14 +1522,14 @@
 class RegExpLiteral final : public MaterializedLiteral {
  public:
   Handle<String> pattern() const { return pattern_->string(); }
+  const AstRawString* raw_pattern() const { return pattern_; }
   int flags() const { return flags_; }
 
  private:
   friend class AstNodeFactory;
 
-  RegExpLiteral(const AstRawString* pattern, int flags, int literal_index,
-                int pos)
-      : MaterializedLiteral(literal_index, pos, kRegExpLiteral),
+  RegExpLiteral(const AstRawString* pattern, int flags, int pos)
+      : MaterializedLiteral(pos, kRegExpLiteral),
         flags_(flags),
         pattern_(pattern) {
     set_depth(1);
@@ -1565,12 +1544,10 @@
 // for minimizing the work when constructing it at runtime.
 class ArrayLiteral final : public MaterializedLiteral {
  public:
-  Handle<FixedArray> constant_elements() const { return constant_elements_; }
-  ElementsKind constant_elements_kind() const {
-    DCHECK_EQ(2, constant_elements_->length());
-    return static_cast<ElementsKind>(
-        Smi::cast(constant_elements_->get(0))->value());
+  Handle<ConstantElementsPair> constant_elements() const {
+    return constant_elements_;
   }
+  ElementsKind constant_elements_kind() const;
 
   ZoneList<Expression*>* values() const { return values_; }
 
@@ -1583,9 +1560,23 @@
   // ArrayLiteral can vary, so num_ids() is not a static method.
   int num_ids() const { return parent_num_ids() + 1 + values()->length(); }
 
+  // Populate the depth field and flags.
+  void InitDepthAndFlags();
+
+  // Get the constant elements fixed array, populating it if necessary.
+  Handle<ConstantElementsPair> GetOrBuildConstantElements(Isolate* isolate) {
+    if (constant_elements_.is_null()) {
+      BuildConstantElements(isolate);
+    }
+    return constant_elements();
+  }
+
   // Populate the constant elements fixed array.
   void BuildConstantElements(Isolate* isolate);
 
+  // Determines whether the {FastCloneShallowArray} builtin can be used.
+  bool IsFastCloningSupported() const;
+
   // Assemble bitfield of flags for the CreateArrayLiteral helper.
   int ComputeFlags(bool disable_mementos = false) const {
     int flags = depth() == 1 ? kShallowElements : kNoFlags;
@@ -1603,10 +1594,7 @@
   ZoneList<Expression*>::iterator EndValue() const { return values_->end(); }
 
   // Rewind an array literal omitting everything from the first spread on.
-  void RewindSpreads() {
-    values_->Rewind(first_spread_index_);
-    first_spread_index_ = -1;
-  }
+  void RewindSpreads();
 
   enum Flags {
     kNoFlags = 0,
@@ -1614,16 +1602,15 @@
     kDisableMementos = 1 << 1
   };
 
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
-  FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; }
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
+  FeedbackSlot LiteralFeedbackSlot() const { return literal_slot_; }
 
  private:
   friend class AstNodeFactory;
 
-  ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index,
-               int literal_index, int pos)
-      : MaterializedLiteral(literal_index, pos, kArrayLiteral),
+  ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index, int pos)
+      : MaterializedLiteral(pos, kArrayLiteral),
         first_spread_index_(first_spread_index),
         values_(values) {}
 
@@ -1631,8 +1618,8 @@
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
   int first_spread_index_;
-  FeedbackVectorSlot literal_slot_;
-  Handle<FixedArray> constant_elements_;
+  FeedbackSlot literal_slot_;
+  Handle<ConstantElementsPair> constant_elements_;
   ZoneList<Expression*>* values_;
 };
 
@@ -1663,6 +1650,9 @@
   bool is_assigned() const { return IsAssignedField::decode(bit_field_); }
   void set_is_assigned() {
     bit_field_ = IsAssignedField::update(bit_field_, true);
+    if (is_resolved()) {
+      var()->set_maybe_assigned();
+    }
   }
 
   bool is_resolved() const { return IsResolvedField::decode(bit_field_); }
@@ -1690,10 +1680,10 @@
     return var()->IsUnallocated() || var()->IsLookupSlot();
   }
 
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, TypeofMode typeof_mode,
+                           FeedbackSlotCache* cache);
 
-  FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
+  FeedbackSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
 
   static int num_ids() { return parent_num_ids() + 1; }
   BailoutId BeforeId() const { return BailoutId(local_id(0)); }
@@ -1719,7 +1709,7 @@
   class HoleCheckModeField
       : public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
 
-  FeedbackVectorSlot variable_feedback_slot_;
+  FeedbackSlot variable_feedback_slot_;
   union {
     const AstRawString* raw_name_;  // if !is_resolved_
     Variable* var_;                 // if is_resolved_
@@ -1786,17 +1776,16 @@
 
   bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
 
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache) {
-    FeedbackVectorSlotKind kind = key()->IsPropertyName()
-                                      ? FeedbackVectorSlotKind::LOAD_IC
-                                      : FeedbackVectorSlotKind::KEYED_LOAD_IC;
-    property_feedback_slot_ = spec->AddSlot(kind);
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache) {
+    if (key()->IsPropertyName()) {
+      property_feedback_slot_ = spec->AddLoadICSlot();
+    } else {
+      property_feedback_slot_ = spec->AddKeyedLoadICSlot();
+    }
   }
 
-  FeedbackVectorSlot PropertyFeedbackSlot() const {
-    return property_feedback_slot_;
-  }
+  FeedbackSlot PropertyFeedbackSlot() const { return property_feedback_slot_; }
 
   // Returns the properties assign type.
   static LhsKind GetAssignType(Property* property) {
@@ -1829,7 +1818,7 @@
   class InlineCacheStateField
       : public BitField<InlineCacheState, KeyTypeField::kNext, 4> {};
 
-  FeedbackVectorSlot property_feedback_slot_;
+  FeedbackSlot property_feedback_slot_;
   Expression* obj_;
   Expression* key_;
   SmallMapList receiver_types_;
@@ -1844,10 +1833,10 @@
   void set_expression(Expression* e) { expression_ = e; }
 
   // Type feedback information.
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
 
-  FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
+  FeedbackSlot CallFeedbackICSlot() const { return ic_slot_; }
 
   SmallMapList* GetReceiverTypes() {
     if (expression()->IsProperty()) {
@@ -1876,11 +1865,9 @@
     allocation_site_ = site;
   }
 
-  static int num_ids() { return parent_num_ids() + 4; }
+  static int num_ids() { return parent_num_ids() + 2; }
   BailoutId ReturnId() const { return BailoutId(local_id(0)); }
-  BailoutId EvalId() const { return BailoutId(local_id(1)); }
-  BailoutId LookupId() const { return BailoutId(local_id(2)); }
-  BailoutId CallId() const { return BailoutId(local_id(3)); }
+  BailoutId CallId() const { return BailoutId(local_id(1)); }
 
   bool is_uninitialized() const {
     return IsUninitializedField::decode(bit_field_);
@@ -1899,6 +1886,10 @@
   }
   void MarkTail() { bit_field_ = IsTailField::update(bit_field_, true); }
 
+  bool only_last_arg_is_spread() {
+    return !arguments_->is_empty() && arguments_->last()->IsSpread();
+  }
+
   enum CallType {
     GLOBAL_CALL,
     WITH_CALL,
@@ -1948,7 +1939,7 @@
   class IsTailField : public BitField<bool, IsUninitializedField::kNext, 1> {};
   class IsPossiblyEvalField : public BitField<bool, IsTailField::kNext, 1> {};
 
-  FeedbackVectorSlot ic_slot_;
+  FeedbackSlot ic_slot_;
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
   Handle<JSFunction> target_;
@@ -1964,14 +1955,14 @@
   void set_expression(Expression* e) { expression_ = e; }
 
   // Type feedback information.
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache) {
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache) {
     // CallNew stores feedback in the exact same way as Call. We can
     // piggyback on the type feedback infrastructure for calls.
     callnew_feedback_slot_ = spec->AddCallICSlot();
   }
 
-  FeedbackVectorSlot CallNewFeedbackSlot() {
+  FeedbackSlot CallNewFeedbackSlot() {
     DCHECK(!callnew_feedback_slot_.IsInvalid());
     return callnew_feedback_slot_;
   }
@@ -1998,6 +1989,10 @@
     set_is_monomorphic(true);
   }
 
+  bool only_last_arg_is_spread() {
+    return !arguments_->is_empty() && arguments_->last()->IsSpread();
+  }
+
  private:
   friend class AstNodeFactory;
 
@@ -2011,7 +2006,7 @@
   static int parent_num_ids() { return Expression::num_ids(); }
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
-  FeedbackVectorSlot callnew_feedback_slot_;
+  FeedbackSlot callnew_feedback_slot_;
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
   Handle<JSFunction> target_;
@@ -2046,10 +2041,7 @@
 
   static int num_ids() { return parent_num_ids() + 1; }
   BailoutId CallId() { return BailoutId(local_id(0)); }
-
-  const char* debug_name() {
-    return is_jsruntime() ? "(context function)" : function_->name;
-  }
+  const char* debug_name();
 
  private:
   friend class AstNodeFactory;
@@ -2138,12 +2130,10 @@
   // BinaryOperation will have both a slot in the feedback vector and the
   // TypeFeedbackId to record the type information. TypeFeedbackId is used
   // by full codegen and the feedback vector slot is used by interpreter.
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
 
-  FeedbackVectorSlot BinaryOperationFeedbackSlot() const {
-    return type_feedback_slot_;
-  }
+  FeedbackSlot BinaryOperationFeedbackSlot() const { return feedback_slot_; }
 
   TypeFeedbackId BinaryOperationFeedbackId() const {
     return TypeFeedbackId(local_id(1));
@@ -2181,7 +2171,7 @@
   Expression* left_;
   Expression* right_;
   Handle<AllocationSite> allocation_site_;
-  FeedbackVectorSlot type_feedback_slot_;
+  FeedbackSlot feedback_slot_;
 
   class OperatorField
       : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
@@ -2227,13 +2217,13 @@
   }
 
   // Feedback slot for binary operation is only used by ignition.
-  FeedbackVectorSlot CountBinaryOpFeedbackSlot() const {
+  FeedbackSlot CountBinaryOpFeedbackSlot() const {
     return binary_operation_slot_;
   }
 
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
-  FeedbackVectorSlot CountSlot() const { return slot_; }
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
+  FeedbackSlot CountSlot() const { return slot_; }
 
  private:
   friend class AstNodeFactory;
@@ -2255,8 +2245,8 @@
       : public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
   class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
 
-  FeedbackVectorSlot slot_;
-  FeedbackVectorSlot binary_operation_slot_;
+  FeedbackSlot slot_;
+  FeedbackSlot binary_operation_slot_;
   AstType* type_;
   Expression* expression_;
   SmallMapList receiver_types_;
@@ -2283,12 +2273,10 @@
   // CompareOperation will have both a slot in the feedback vector and the
   // TypeFeedbackId to record the type information. TypeFeedbackId is used
   // by full codegen and the feedback vector slot is used by interpreter.
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
 
-  FeedbackVectorSlot CompareOperationFeedbackSlot() const {
-    return type_feedback_slot_;
-  }
+  FeedbackSlot CompareOperationFeedbackSlot() const { return feedback_slot_; }
 
   // Match special cases.
   bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -2315,7 +2303,7 @@
   Expression* right_;
 
   AstType* combined_type_;
-  FeedbackVectorSlot type_feedback_slot_;
+  FeedbackSlot feedback_slot_;
   class OperatorField
       : public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
 };
@@ -2429,9 +2417,9 @@
     bit_field_ = StoreModeField::update(bit_field_, mode);
   }
 
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
-  FeedbackVectorSlot AssignmentSlot() const { return slot_; }
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
+  FeedbackSlot AssignmentSlot() const { return slot_; }
 
  private:
   friend class AstNodeFactory;
@@ -2449,7 +2437,7 @@
       : public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
   class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
 
-  FeedbackVectorSlot slot_;
+  FeedbackSlot slot_;
   Expression* target_;
   Expression* value_;
   BinaryOperation* binary_operation_;
@@ -2571,6 +2559,8 @@
     kAccessorOrMethod
   };
 
+  enum IdType { kIdTypeInvalid = -1, kIdTypeTopLevel = 0 };
+
   enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters };
 
   enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
@@ -2594,9 +2584,15 @@
   }
   LanguageMode language_mode() const;
 
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache) {
+    literal_feedback_slot_ = spec->AddCreateClosureSlot();
+  }
+
+  FeedbackSlot LiteralFeedbackSlot() const { return literal_feedback_slot_; }
+
   static bool NeedsHomeObject(Expression* expr);
 
-  int materialized_literal_count() { return materialized_literal_count_; }
   int expected_property_count() { return expected_property_count_; }
   int parameter_count() { return parameter_count_; }
   int function_length() { return function_length_; }
@@ -2644,8 +2640,6 @@
     return HasDuplicateParameters::decode(bit_field_);
   }
 
-  bool is_function() const { return IsFunction::decode(bit_field_); }
-
   // This is used as a heuristic on when to eagerly compile a function
   // literal. We consider the following constructs as hints that the
   // function will be called immediately:
@@ -2691,38 +2685,27 @@
   int yield_count() { return yield_count_; }
   void set_yield_count(int yield_count) { yield_count_ = yield_count; }
 
-  bool requires_class_field_init() {
-    return RequiresClassFieldInit::decode(bit_field_);
-  }
-  void set_requires_class_field_init(bool requires_class_field_init) {
-    bit_field_ =
-        RequiresClassFieldInit::update(bit_field_, requires_class_field_init);
-  }
-  bool is_class_field_initializer() {
-    return IsClassFieldInitializer::decode(bit_field_);
-  }
-  void set_is_class_field_initializer(bool is_class_field_initializer) {
-    bit_field_ =
-        IsClassFieldInitializer::update(bit_field_, is_class_field_initializer);
-  }
-
   int return_position() {
     return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0));
   }
 
+  int function_literal_id() const { return function_literal_id_; }
+  void set_function_literal_id(int function_literal_id) {
+    function_literal_id_ = function_literal_id;
+  }
+
  private:
   friend class AstNodeFactory;
 
   FunctionLiteral(Zone* zone, const AstString* name,
                   AstValueFactory* ast_value_factory, DeclarationScope* scope,
-                  ZoneList<Statement*>* body, int materialized_literal_count,
-                  int expected_property_count, int parameter_count,
-                  int function_length, FunctionType function_type,
+                  ZoneList<Statement*>* body, int expected_property_count,
+                  int parameter_count, int function_length,
+                  FunctionType function_type,
                   ParameterFlag has_duplicate_parameters,
                   EagerCompileHint eager_compile_hint, int position,
-                  bool is_function, bool has_braces)
+                  bool has_braces, int function_literal_id)
       : Expression(position, kFunctionLiteral),
-        materialized_literal_count_(materialized_literal_count),
         expected_property_count_(expected_property_count),
         parameter_count_(parameter_count),
         function_length_(function_length),
@@ -2733,16 +2716,14 @@
         scope_(scope),
         body_(body),
         raw_inferred_name_(ast_value_factory->empty_string()),
-        ast_properties_(zone) {
-    bit_field_ |=
-        FunctionTypeBits::encode(function_type) | Pretenure::encode(false) |
-        HasDuplicateParameters::encode(has_duplicate_parameters ==
-                                       kHasDuplicateParameters) |
-        IsFunction::encode(is_function) |
-        RequiresClassFieldInit::encode(false) |
-        ShouldNotBeUsedOnceHintField::encode(false) |
-        DontOptimizeReasonField::encode(kNoReason) |
-        IsClassFieldInitializer::encode(false);
+        ast_properties_(zone),
+        function_literal_id_(function_literal_id) {
+    bit_field_ |= FunctionTypeBits::encode(function_type) |
+                  Pretenure::encode(false) |
+                  HasDuplicateParameters::encode(has_duplicate_parameters ==
+                                                 kHasDuplicateParameters) |
+                  ShouldNotBeUsedOnceHintField::encode(false) |
+                  DontOptimizeReasonField::encode(kNoReason);
     if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
   }
 
@@ -2750,17 +2731,12 @@
       : public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
   class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
   class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
-  class IsFunction : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
   class ShouldNotBeUsedOnceHintField
-      : public BitField<bool, IsFunction::kNext, 1> {};
-  class RequiresClassFieldInit
-      : public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
-  class IsClassFieldInitializer
-      : public BitField<bool, RequiresClassFieldInit::kNext, 1> {};
+      : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
   class DontOptimizeReasonField
-      : public BitField<BailoutReason, IsClassFieldInitializer::kNext, 8> {};
+      : public BitField<BailoutReason, ShouldNotBeUsedOnceHintField::kNext, 8> {
+  };
 
-  int materialized_literal_count_;
   int expected_property_count_;
   int parameter_count_;
   int function_length_;
@@ -2774,6 +2750,8 @@
   const AstString* raw_inferred_name_;
   Handle<String> inferred_name_;
   AstProperties ast_properties_;
+  int function_literal_id_;
+  FeedbackSlot literal_feedback_slot_;
 };
 
 // Property is used for passing information
@@ -2808,62 +2786,55 @@
   ZoneList<Property*>* properties() const { return properties_; }
   int start_position() const { return position(); }
   int end_position() const { return end_position_; }
-
-  VariableProxy* static_initializer_proxy() const {
-    return static_initializer_proxy_;
+  bool has_name_static_property() const {
+    return HasNameStaticProperty::decode(bit_field_);
   }
-  void set_static_initializer_proxy(VariableProxy* proxy) {
-    static_initializer_proxy_ = proxy;
+  bool has_static_computed_names() const {
+    return HasStaticComputedNames::decode(bit_field_);
   }
 
-  BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
-  BailoutId PrototypeId() { return BailoutId(local_id(1)); }
-
-  // Return an AST id for a property that is used in simulate instructions.
-  BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 2)); }
-
-  // Unlike other AST nodes, this number of bailout IDs allocated for an
-  // ClassLiteral can vary, so num_ids() is not a static method.
-  int num_ids() const { return parent_num_ids() + 2 + properties()->length(); }
-
   // Object literals need one feedback slot for each non-trivial value, as well
   // as some slots for home objects.
-  void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
-                                 FeedbackVectorSlotCache* cache);
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache);
 
   bool NeedsProxySlot() const {
     return class_variable_proxy() != nullptr &&
            class_variable_proxy()->var()->IsUnallocated();
   }
 
-  FeedbackVectorSlot PrototypeSlot() const { return prototype_slot_; }
-  FeedbackVectorSlot ProxySlot() const { return proxy_slot_; }
+  FeedbackSlot HomeObjectSlot() const { return home_object_slot_; }
+  FeedbackSlot ProxySlot() const { return proxy_slot_; }
 
  private:
   friend class AstNodeFactory;
 
   ClassLiteral(VariableProxy* class_variable_proxy, Expression* extends,
                FunctionLiteral* constructor, ZoneList<Property*>* properties,
-               int start_position, int end_position)
+               int start_position, int end_position,
+               bool has_name_static_property, bool has_static_computed_names)
       : Expression(start_position, kClassLiteral),
         end_position_(end_position),
         class_variable_proxy_(class_variable_proxy),
         extends_(extends),
         constructor_(constructor),
-        properties_(properties),
-        static_initializer_proxy_(nullptr) {}
-
-  static int parent_num_ids() { return Expression::num_ids(); }
-  int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+        properties_(properties) {
+    bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) |
+                  HasStaticComputedNames::encode(has_static_computed_names);
+  }
 
   int end_position_;
-  FeedbackVectorSlot prototype_slot_;
-  FeedbackVectorSlot proxy_slot_;
+  FeedbackSlot home_object_slot_;
+  FeedbackSlot proxy_slot_;
   VariableProxy* class_variable_proxy_;
   Expression* extends_;
   FunctionLiteral* constructor_;
   ZoneList<Property*>* properties_;
-  VariableProxy* static_initializer_proxy_;
+
+  class HasNameStaticProperty
+      : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+  class HasStaticComputedNames
+      : public BitField<bool, HasNameStaticProperty::kNext, 1> {};
 };
 
 
@@ -2871,6 +2842,14 @@
  public:
   Handle<String> name() const { return name_->string(); }
   v8::Extension* extension() const { return extension_; }
+  FeedbackSlot LiteralFeedbackSlot() const { return literal_feedback_slot_; }
+
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache) {
+    // TODO(mvstanton): The FeedbackSlotCache can be adapted
+    // to always return the same slot for this case.
+    literal_feedback_slot_ = spec->AddCreateClosureSlot();
+  }
 
  private:
   friend class AstNodeFactory;
@@ -2883,6 +2862,7 @@
 
   const AstRawString* name_;
   v8::Extension* extension_;
+  FeedbackSlot literal_feedback_slot_;
 };
 
 
@@ -2955,7 +2935,59 @@
   explicit EmptyParentheses(int pos) : Expression(pos, kEmptyParentheses) {}
 };
 
+// Represents the spec operation `GetIterator()`
+// (defined at https://tc39.github.io/ecma262/#sec-getiterator). Ignition
+// desugars this into a LoadIC / JSLoadNamed, CallIC, and a type-check to
+// validate return value of the Symbol.iterator() call.
+enum class IteratorType { kNormal, kAsync };
+class GetIterator final : public Expression {
+ public:
+  IteratorType hint() const { return hint_; }
 
+  Expression* iterable() const { return iterable_; }
+  void set_iterable(Expression* iterable) { iterable_ = iterable; }
+
+  static int num_ids() { return parent_num_ids(); }
+
+  void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
+                           FeedbackSlotCache* cache) {
+    iterator_property_feedback_slot_ = spec->AddLoadICSlot();
+    iterator_call_feedback_slot_ = spec->AddCallICSlot();
+    if (hint() == IteratorType::kAsync) {
+      async_iterator_property_feedback_slot_ = spec->AddLoadICSlot();
+      async_iterator_call_feedback_slot_ = spec->AddCallICSlot();
+    }
+  }
+
+  FeedbackSlot IteratorPropertyFeedbackSlot() const {
+    return iterator_property_feedback_slot_;
+  }
+
+  FeedbackSlot IteratorCallFeedbackSlot() const {
+    return iterator_call_feedback_slot_;
+  }
+
+  FeedbackSlot AsyncIteratorPropertyFeedbackSlot() const {
+    return async_iterator_property_feedback_slot_;
+  }
+
+  FeedbackSlot AsyncIteratorCallFeedbackSlot() const {
+    return async_iterator_call_feedback_slot_;
+  }
+
+ private:
+  friend class AstNodeFactory;
+
+  explicit GetIterator(Expression* iterable, IteratorType hint, int pos)
+      : Expression(pos, kGetIterator), hint_(hint), iterable_(iterable) {}
+
+  IteratorType hint_;
+  Expression* iterable_;
+  FeedbackSlot iterator_property_feedback_slot_;
+  FeedbackSlot iterator_call_feedback_slot_;
+  FeedbackSlot async_iterator_property_feedback_slot_;
+  FeedbackSlot async_iterator_call_feedback_slot_;
+};
 
 // ----------------------------------------------------------------------------
 // Basic visitor
@@ -3170,6 +3202,11 @@
     return NULL;
   }
 
+  ForOfStatement* NewForOfStatement(ZoneList<const AstRawString*>* labels,
+                                    int pos) {
+    return new (zone_) ForOfStatement(labels, pos);
+  }
+
   ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
     return new (zone_) ExpressionStatement(expression, pos);
   }
@@ -3183,7 +3220,13 @@
   }
 
   ReturnStatement* NewReturnStatement(Expression* expression, int pos) {
-    return new (zone_) ReturnStatement(expression, pos);
+    return new (zone_)
+        ReturnStatement(expression, ReturnStatement::kNormal, pos);
+  }
+
+  ReturnStatement* NewAsyncReturnStatement(Expression* expression, int pos) {
+    return new (zone_)
+        ReturnStatement(expression, ReturnStatement::kAsyncReturn, pos);
   }
 
   WithStatement* NewWithStatement(Scope* scope,
@@ -3217,15 +3260,6 @@
         try_block, scope, variable, catch_block, HandlerTable::UNCAUGHT, pos);
   }
 
-  TryCatchStatement* NewTryCatchStatementForPromiseReject(Block* try_block,
-                                                          Scope* scope,
-                                                          Variable* variable,
-                                                          Block* catch_block,
-                                                          int pos) {
-    return new (zone_) TryCatchStatement(
-        try_block, scope, variable, catch_block, HandlerTable::PROMISE, pos);
-  }
-
   TryCatchStatement* NewTryCatchStatementForDesugaring(Block* try_block,
                                                        Scope* scope,
                                                        Variable* variable,
@@ -3258,9 +3292,9 @@
     return new (zone_) EmptyStatement(pos);
   }
 
-  SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(Scope* scope) {
-    return new (zone_) SloppyBlockFunctionStatement(
-        NewEmptyStatement(kNoSourcePosition), scope);
+  SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement() {
+    return new (zone_)
+        SloppyBlockFunctionStatement(NewEmptyStatement(kNoSourcePosition));
   }
 
   CaseClause* NewCaseClause(
@@ -3273,8 +3307,8 @@
   }
 
   // A JavaScript symbol (ECMA-262 edition 6).
-  Literal* NewSymbolLiteral(const char* name, int pos) {
-    return new (zone_) Literal(ast_value_factory_->NewSymbol(name), pos);
+  Literal* NewSymbolLiteral(AstSymbol symbol, int pos) {
+    return new (zone_) Literal(ast_value_factory_->NewSymbol(symbol), pos);
   }
 
   Literal* NewNumberLiteral(double number, int pos, bool with_dot = false) {
@@ -3303,10 +3337,10 @@
   }
 
   ObjectLiteral* NewObjectLiteral(
-      ZoneList<ObjectLiteral::Property*>* properties, int literal_index,
-      uint32_t boilerplate_properties, int pos) {
-    return new (zone_)
-        ObjectLiteral(properties, literal_index, boilerplate_properties, pos);
+      ZoneList<ObjectLiteral::Property*>* properties,
+      uint32_t boilerplate_properties, int pos, bool has_rest_property) {
+    return new (zone_) ObjectLiteral(properties, boilerplate_properties, pos,
+                                     has_rest_property);
   }
 
   ObjectLiteral::Property* NewObjectLiteralProperty(
@@ -3324,21 +3358,18 @@
   }
 
   RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
-                                  int literal_index, int pos) {
-    return new (zone_) RegExpLiteral(pattern, flags, literal_index, pos);
+                                  int pos) {
+    return new (zone_) RegExpLiteral(pattern, flags, pos);
   }
 
   ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
-                                int literal_index,
                                 int pos) {
-    return new (zone_) ArrayLiteral(values, -1, literal_index, pos);
+    return new (zone_) ArrayLiteral(values, -1, pos);
   }
 
   ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
-                                int first_spread_index, int literal_index,
-                                int pos) {
-    return new (zone_)
-        ArrayLiteral(values, first_spread_index, literal_index, pos);
+                                int first_spread_index, int pos) {
+    return new (zone_) ArrayLiteral(values, first_spread_index, pos);
   }
 
   VariableProxy* NewVariableProxy(Variable* var,
@@ -3437,9 +3468,13 @@
                             Expression* value,
                             int pos) {
     DCHECK(Token::IsAssignmentOp(op));
+
+    if (op != Token::INIT && target->IsVariableProxy()) {
+      target->AsVariableProxy()->set_is_assigned();
+    }
+
     Assignment* assign = new (zone_) Assignment(op, target, value, pos);
     if (assign->is_compound()) {
-      DCHECK(Token::IsAssignmentOp(op));
       assign->binary_operation_ =
           NewBinaryOperation(assign->binary_op(), target, value, pos + 1);
     }
@@ -3458,32 +3493,33 @@
 
   FunctionLiteral* NewFunctionLiteral(
       const AstRawString* name, DeclarationScope* scope,
-      ZoneList<Statement*>* body, int materialized_literal_count,
-      int expected_property_count, int parameter_count, int function_length,
+      ZoneList<Statement*>* body, int expected_property_count,
+      int parameter_count, int function_length,
       FunctionLiteral::ParameterFlag has_duplicate_parameters,
       FunctionLiteral::FunctionType function_type,
       FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
-      bool has_braces) {
+      bool has_braces, int function_literal_id) {
     return new (zone_) FunctionLiteral(
-        zone_, name, ast_value_factory_, scope, body,
-        materialized_literal_count, expected_property_count, parameter_count,
-        function_length, function_type, has_duplicate_parameters,
-        eager_compile_hint, position, true, has_braces);
+        zone_, name, ast_value_factory_, scope, body, expected_property_count,
+        parameter_count, function_length, function_type,
+        has_duplicate_parameters, eager_compile_hint, position, has_braces,
+        function_literal_id);
   }
 
   // Creates a FunctionLiteral representing a top-level script, the
   // result of an eval (top-level or otherwise), or the result of calling
   // the Function constructor.
-  FunctionLiteral* NewScriptOrEvalFunctionLiteral(
-      DeclarationScope* scope, ZoneList<Statement*>* body,
-      int materialized_literal_count, int expected_property_count,
-      int parameter_count) {
+  FunctionLiteral* NewScriptOrEvalFunctionLiteral(DeclarationScope* scope,
+                                                  ZoneList<Statement*>* body,
+                                                  int expected_property_count,
+                                                  int parameter_count) {
     return new (zone_) FunctionLiteral(
         zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
-        body, materialized_literal_count, expected_property_count,
-        parameter_count, parameter_count, FunctionLiteral::kAnonymousExpression,
+        body, expected_property_count, parameter_count, parameter_count,
+        FunctionLiteral::kAnonymousExpression,
         FunctionLiteral::kNoDuplicateParameters,
-        FunctionLiteral::kShouldLazyCompile, 0, false, true);
+        FunctionLiteral::kShouldLazyCompile, 0, true,
+        FunctionLiteral::kIdTypeTopLevel);
   }
 
   ClassLiteral::Property* NewClassLiteralProperty(
@@ -3496,9 +3532,12 @@
   ClassLiteral* NewClassLiteral(VariableProxy* proxy, Expression* extends,
                                 FunctionLiteral* constructor,
                                 ZoneList<ClassLiteral::Property*>* properties,
-                                int start_position, int end_position) {
-    return new (zone_) ClassLiteral(proxy, extends, constructor, properties,
-                                    start_position, end_position);
+                                int start_position, int end_position,
+                                bool has_name_static_property,
+                                bool has_static_computed_names) {
+    return new (zone_) ClassLiteral(
+        proxy, extends, constructor, properties, start_position, end_position,
+        has_name_static_property, has_static_computed_names);
   }
 
   NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
@@ -3534,6 +3573,11 @@
     return new (zone_) EmptyParentheses(pos);
   }
 
+  GetIterator* NewGetIterator(Expression* iterable, IteratorType hint,
+                              int pos) {
+    return new (zone_) GetIterator(iterable, hint, pos);
+  }
+
   Zone* zone() const { return zone_; }
   void set_zone(Zone* zone) { zone_ = zone; }
 
diff --git a/src/ast/compile-time-value.cc b/src/ast/compile-time-value.cc
index eda536b..27dd29f 100644
--- a/src/ast/compile-time-value.cc
+++ b/src/ast/compile-time-value.cc
@@ -48,8 +48,8 @@
   return static_cast<LiteralType>(literal_type->value());
 }
 
-Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
-  return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
+Handle<HeapObject> CompileTimeValue::GetElements(Handle<FixedArray> value) {
+  return Handle<HeapObject>(HeapObject::cast(value->get(kElementsSlot)));
 }
 
 }  // namespace internal
diff --git a/src/ast/compile-time-value.h b/src/ast/compile-time-value.h
index 27351b7..d61443e 100644
--- a/src/ast/compile-time-value.h
+++ b/src/ast/compile-time-value.h
@@ -31,8 +31,8 @@
   // Get the type of a compile time value returned by GetValue().
   static LiteralType GetLiteralType(Handle<FixedArray> value);
 
-  // Get the elements array of a compile time value returned by GetValue().
-  static Handle<FixedArray> GetElements(Handle<FixedArray> value);
+  // Get the elements of a compile time value returned by GetValue().
+  static Handle<HeapObject> GetElements(Handle<FixedArray> value);
 
  private:
   static const int kLiteralTypeSlot = 0;
diff --git a/src/ast/context-slot-cache.cc b/src/ast/context-slot-cache.cc
index b1387e1..4548218 100644
--- a/src/ast/context-slot-cache.cc
+++ b/src/ast/context-slot-cache.cc
@@ -12,9 +12,9 @@
 // (disallowed) include: src/factory.h -> src/objects-inl.h
 #include "src/objects-inl.h"
 // FIXME(mstarzinger, marja): This is weird, but required because of the missing
-// (disallowed) include: src/type-feedback-vector.h ->
-// src/type-feedback-vector-inl.h
-#include "src/type-feedback-vector-inl.h"
+// (disallowed) include: src/feedback-vector.h ->
+// src/feedback-vector-inl.h
+#include "src/feedback-vector-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ast/modules.cc b/src/ast/modules.cc
index 339d64c..9d3a235 100644
--- a/src/ast/modules.cc
+++ b/src/ast/modules.cc
@@ -5,6 +5,9 @@
 #include "src/ast/modules.h"
 #include "src/ast/ast-value-factory.h"
 #include "src/ast/scopes.h"
+#include "src/objects-inl.h"
+#include "src/objects/module-info.h"
+#include "src/pending-compilation-error-handler.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ast/modules.h b/src/ast/modules.h
index 94550fb..ce8aba8 100644
--- a/src/ast/modules.h
+++ b/src/ast/modules.h
@@ -6,7 +6,6 @@
 #define V8_AST_MODULES_H_
 
 #include "src/parsing/scanner.h"  // Only for Scanner::Location.
-#include "src/pending-compilation-error-handler.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -16,6 +15,7 @@
 class AstRawString;
 class ModuleInfo;
 class ModuleInfoEntry;
+class PendingCompilationErrorHandler;
 
 class ModuleDescriptor : public ZoneObject {
  public:
diff --git a/src/ast/prettyprinter.cc b/src/ast/prettyprinter.cc
index a3fc50a..725a8a7 100644
--- a/src/ast/prettyprinter.cc
+++ b/src/ast/prettyprinter.cc
@@ -10,18 +10,19 @@
 #include "src/ast/scopes.h"
 #include "src/base/platform/platform.h"
 #include "src/globals.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
-CallPrinter::CallPrinter(Isolate* isolate, bool is_builtin)
+CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
     : builder_(isolate) {
   isolate_ = isolate;
   position_ = 0;
   num_prints_ = 0;
   found_ = false;
   done_ = false;
-  is_builtin_ = is_builtin;
+  is_user_js_ = is_user_js;
   InitializeAstVisitor(isolate);
 }
 
@@ -239,11 +240,11 @@
 
 
 void CallPrinter::VisitVariableProxy(VariableProxy* node) {
-  if (is_builtin_) {
-    // Variable names of builtins are meaningless due to minification.
-    Print("(var)");
-  } else {
+  if (is_user_js_) {
     PrintLiteral(node->name(), false);
+  } else {
+    // Variable names of non-user code are meaningless due to minification.
+    Print("(var)");
   }
 }
 
@@ -279,9 +280,9 @@
 void CallPrinter::VisitCall(Call* node) {
   bool was_found = !found_ && node->position() == position_;
   if (was_found) {
-    // Bail out if the error is caused by a direct call to a variable in builtin
-    // code. The variable name is meaningless due to minification.
-    if (is_builtin_ && node->expression()->IsVariableProxy()) {
+    // Bail out if the error is caused by a direct call to a variable in
+    // non-user JS code. The variable name is meaningless due to minification.
+    if (!is_user_js_ && node->expression()->IsVariableProxy()) {
       done_ = true;
       return;
     }
@@ -297,9 +298,9 @@
 void CallPrinter::VisitCallNew(CallNew* node) {
   bool was_found = !found_ && node->position() == position_;
   if (was_found) {
-    // Bail out if the error is caused by a direct call to a variable in builtin
-    // code. The variable name is meaningless due to minification.
-    if (is_builtin_ && node->expression()->IsVariableProxy()) {
+    // Bail out if the error is caused by a direct call to a variable in
+    // non-user JS code. The variable name is meaningless due to minification.
+    if (!is_user_js_ && node->expression()->IsVariableProxy()) {
       done_ = true;
       return;
     }
@@ -370,6 +371,11 @@
   UNREACHABLE();
 }
 
+void CallPrinter::VisitGetIterator(GetIterator* node) {
+  Print("GetIterator(");
+  Find(node->iterable(), true);
+  Print(")");
+}
 
 void CallPrinter::VisitThisFunction(ThisFunction* node) {}
 
@@ -434,9 +440,9 @@
 
 #ifdef DEBUG
 
-// A helper for ast nodes that use FeedbackVectorSlots.
+// A helper for ast nodes that use FeedbackSlots.
 static int FormatSlotNode(Vector<char>* buf, Expression* node,
-                          const char* node_name, FeedbackVectorSlot slot) {
+                          const char* node_name, FeedbackSlot slot) {
   int pos = SNPrintF(*buf, "%s", node_name);
   if (!slot.IsInvalid()) {
     pos += SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
@@ -874,15 +880,16 @@
     case HandlerTable::CAUGHT:
       prediction = "CAUGHT";
       break;
-    case HandlerTable::PROMISE:
-      prediction = "PROMISE";
-      break;
     case HandlerTable::DESUGARING:
       prediction = "DESUGARING";
       break;
     case HandlerTable::ASYNC_AWAIT:
       prediction = "ASYNC_AWAIT";
       break;
+    case HandlerTable::PROMISE:
+      // Catch prediction resulting in promise rejections aren't
+      // parsed by the parser.
+      UNREACHABLE();
   }
   Print(" %s\n", prediction);
 }
@@ -971,7 +978,7 @@
 void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
   IndentedScope indent(this, "REGEXP LITERAL", node->position());
   EmbeddedVector<char, 128> buf;
-  SNPrintF(buf, "literal_index = %d\n", node->literal_index());
+  SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
   PrintIndented(buf.start());
   PrintLiteralIndented("PATTERN", node->pattern(), false);
   int i = 0;
@@ -990,7 +997,7 @@
 void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
   IndentedScope indent(this, "OBJ LITERAL", node->position());
   EmbeddedVector<char, 128> buf;
-  SNPrintF(buf, "literal_index = %d\n", node->literal_index());
+  SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
   PrintIndented(buf.start());
   PrintObjectProperties(node->properties());
 }
@@ -1019,6 +1026,9 @@
       case ObjectLiteral::Property::SETTER:
         prop_kind = "SETTER";
         break;
+      case ObjectLiteral::Property::SPREAD:
+        prop_kind = "SPREAD";
+        break;
     }
     EmbeddedVector<char, 128> buf;
     SNPrintF(buf, "PROPERTY - %s", prop_kind);
@@ -1033,7 +1043,7 @@
   IndentedScope indent(this, "ARRAY LITERAL", node->position());
 
   EmbeddedVector<char, 128> buf;
-  SNPrintF(buf, "literal_index = %d\n", node->literal_index());
+  SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
   PrintIndented(buf.start());
   if (node->values()->length() > 0) {
     IndentedScope indent(this, "VALUES", node->position());
@@ -1136,7 +1146,14 @@
 
 void AstPrinter::VisitCallRuntime(CallRuntime* node) {
   EmbeddedVector<char, 128> buf;
-  SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
+  if (node->is_jsruntime()) {
+    SNPrintF(
+        buf, "CALL RUNTIME %s code = %p", node->debug_name(),
+        static_cast<void*>(isolate_->context()->get(node->context_index())));
+  } else {
+    SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
+  }
+
   IndentedScope indent(this, buf.start(), node->position());
   PrintArguments(node->arguments());
 }
@@ -1181,6 +1198,10 @@
   IndentedScope indent(this, "()", node->position());
 }
 
+void AstPrinter::VisitGetIterator(GetIterator* node) {
+  IndentedScope indent(this, "GET-ITERATOR", node->position());
+  Visit(node->iterable());
+}
 
 void AstPrinter::VisitThisFunction(ThisFunction* node) {
   IndentedScope indent(this, "THIS-FUNCTION", node->position());
diff --git a/src/ast/prettyprinter.h b/src/ast/prettyprinter.h
index b56c834..fdc079c 100644
--- a/src/ast/prettyprinter.h
+++ b/src/ast/prettyprinter.h
@@ -15,7 +15,7 @@
 
 class CallPrinter final : public AstVisitor<CallPrinter> {
  public:
-  explicit CallPrinter(Isolate* isolate, bool is_builtin);
+  explicit CallPrinter(Isolate* isolate, bool is_user_js);
 
   // The following routine prints the node with position |position| into a
   // string.
@@ -38,7 +38,7 @@
   int position_;  // position of ast node to print
   bool found_;
   bool done_;
-  bool is_builtin_;
+  bool is_user_js_;
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 
diff --git a/src/ast/scopes.cc b/src/ast/scopes.cc
index c1679a4..225793c 100644
--- a/src/ast/scopes.cc
+++ b/src/ast/scopes.cc
@@ -9,12 +9,29 @@
 #include "src/accessors.h"
 #include "src/ast/ast.h"
 #include "src/bootstrapper.h"
+#include "src/counters.h"
 #include "src/messages.h"
+#include "src/objects-inl.h"
+#include "src/objects/module-info.h"
+#include "src/objects/scope-info.h"
 #include "src/parsing/parse-info.h"
+#include "src/parsing/preparsed-scope-data.h"
 
 namespace v8 {
 namespace internal {
 
+namespace {
+void* kDummyPreParserVariable = reinterpret_cast<void*>(0x1);
+void* kDummyPreParserLexicalVariable = reinterpret_cast<void*>(0x2);
+
+bool IsLexical(Variable* variable) {
+  if (variable == kDummyPreParserLexicalVariable) return true;
+  if (variable == kDummyPreParserVariable) return false;
+  return IsLexicalVariableMode(variable->mode());
+}
+
+}  // namespace
+
 // ----------------------------------------------------------------------------
 // Implementation of LocalsMap
 //
@@ -49,6 +66,20 @@
   return reinterpret_cast<Variable*>(p->value);
 }
 
+Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
+                                   VariableMode mode) {
+  Entry* p =
+      ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
+                                  ZoneAllocationPolicy(zone));
+  if (p->value == nullptr) {
+    // The variable has not been declared yet -> insert it.
+    DCHECK_EQ(name, p->key);
+    p->value =
+        mode == VAR ? kDummyPreParserVariable : kDummyPreParserLexicalVariable;
+  }
+  return reinterpret_cast<Variable*>(p->value);
+}
+
 void VariableMap::Remove(Variable* var) {
   const AstRawString* name = var->raw_name();
   ZoneHashMap::Remove(const_cast<AstRawString*>(name), name->hash());
@@ -74,21 +105,27 @@
   return NULL;
 }
 
+void SloppyBlockFunctionMap::Delegate::set_statement(Statement* statement) {
+  if (statement_ != nullptr) {
+    statement_->set_statement(statement);
+  }
+}
+
 SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
     : ZoneHashMap(8, ZoneAllocationPolicy(zone)) {}
 
-void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
-                                     SloppyBlockFunctionStatement* stmt) {
+void SloppyBlockFunctionMap::Declare(
+    Zone* zone, const AstRawString* name,
+    SloppyBlockFunctionMap::Delegate* delegate) {
   // AstRawStrings are unambiguous, i.e., the same string is always represented
   // by the same AstRawString*.
   Entry* p =
       ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
                                   ZoneAllocationPolicy(zone));
-  stmt->set_next(static_cast<SloppyBlockFunctionStatement*>(p->value));
-  p->value = stmt;
+  delegate->set_next(static_cast<SloppyBlockFunctionMap::Delegate*>(p->value));
+  p->value = delegate;
 }
 
-
 // ----------------------------------------------------------------------------
 // Implementation of Scope
 
@@ -243,8 +280,7 @@
   // Cache the catch variable, even though it's also available via the
   // scope_info, as the parser expects that a catch scope always has the catch
   // variable as first and only variable.
-  Variable* variable = Declare(zone, this, catch_variable_name, VAR,
-                               NORMAL_VARIABLE, kCreatedInitialized);
+  Variable* variable = Declare(zone, catch_variable_name, VAR);
   AllocateHeapSlot(variable);
 }
 
@@ -261,9 +297,16 @@
   new_target_ = nullptr;
   function_ = nullptr;
   arguments_ = nullptr;
-  this_function_ = nullptr;
+  rare_data_ = nullptr;
   should_eager_compile_ = false;
-  is_lazily_parsed_ = false;
+  was_lazily_parsed_ = false;
+#ifdef DEBUG
+  DeclarationScope* outer_declaration_scope =
+      outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
+  is_being_lazily_parsed_ =
+      outer_declaration_scope ? outer_declaration_scope->is_being_lazily_parsed_
+                              : false;
+#endif
 }
 
 void Scope::SetDefaults() {
@@ -305,7 +348,7 @@
 }
 
 void DeclarationScope::set_should_eager_compile() {
-  should_eager_compile_ = !is_lazily_parsed_;
+  should_eager_compile_ = !was_lazily_parsed_;
 }
 
 void DeclarationScope::set_asm_module() {
@@ -354,17 +397,16 @@
       }
       DCHECK(!scope_info->HasOuterScopeInfo());
       break;
-    } else if (scope_info->scope_type() == FUNCTION_SCOPE ||
-               scope_info->scope_type() == EVAL_SCOPE) {
-      // TODO(neis): For an eval scope, we currently create an ordinary function
-      // context.  This is wrong and needs to be fixed.
-      // https://bugs.chromium.org/p/v8/issues/detail?id=5295
+    } else if (scope_info->scope_type() == FUNCTION_SCOPE) {
       outer_scope =
           new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info));
       if (scope_info->IsAsmFunction())
         outer_scope->AsDeclarationScope()->set_asm_function();
       if (scope_info->IsAsmModule())
         outer_scope->AsDeclarationScope()->set_asm_module();
+    } else if (scope_info->scope_type() == EVAL_SCOPE) {
+      outer_scope =
+          new (zone) DeclarationScope(zone, EVAL_SCOPE, handle(scope_info));
     } else if (scope_info->scope_type() == BLOCK_SCOPE) {
       if (scope_info->is_declaration_scope()) {
         outer_scope =
@@ -424,11 +466,21 @@
   return is_declaration_scope() ? AsDeclarationScope()->num_parameters() : 0;
 }
 
+void DeclarationScope::DeclareSloppyBlockFunction(
+    const AstRawString* name, Scope* scope,
+    SloppyBlockFunctionStatement* statement) {
+  auto* delegate =
+      new (zone()) SloppyBlockFunctionMap::Delegate(scope, statement);
+  sloppy_block_function_map_.Declare(zone(), name, delegate);
+}
+
 void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
   DCHECK(is_sloppy(language_mode()));
   DCHECK(is_function_scope() || is_eval_scope() || is_script_scope() ||
          (is_block_scope() && outer_scope()->is_function_scope()));
-  DCHECK(HasSimpleParameters() || is_block_scope());
+  DCHECK(HasSimpleParameters() || is_block_scope() || is_being_lazily_parsed_);
+  DCHECK_EQ(factory == nullptr, is_being_lazily_parsed_);
+
   bool has_simple_parameters = HasSimpleParameters();
   // For each variable which is used as a function declaration in a sloppy
   // block,
@@ -457,10 +509,10 @@
       }
     }
 
-    bool var_created = false;
+    Variable* created_variable = nullptr;
 
     // Write in assignments to var for each block-scoped function declaration
-    auto delegates = static_cast<SloppyBlockFunctionStatement*>(p->value);
+    auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value);
 
     DeclarationScope* decl_scope = this;
     while (decl_scope->is_eval_scope()) {
@@ -468,7 +520,7 @@
     }
     Scope* outer_scope = decl_scope->outer_scope();
 
-    for (SloppyBlockFunctionStatement* delegate = delegates;
+    for (SloppyBlockFunctionMap::Delegate* delegate = delegates;
          delegate != nullptr; delegate = delegate->next()) {
       // Check if there's a conflict with a lexical declaration
       Scope* query_scope = delegate->scope()->outer_scope();
@@ -482,7 +534,7 @@
       // `{ let e; try {} catch (e) { function e(){} } }`
       do {
         var = query_scope->LookupLocal(name);
-        if (var != nullptr && IsLexicalVariableMode(var->mode())) {
+        if (var != nullptr && IsLexical(var)) {
           should_hoist = false;
           break;
         }
@@ -492,32 +544,47 @@
       if (!should_hoist) continue;
 
       // Declare a var-style binding for the function in the outer scope
-      if (!var_created) {
-        var_created = true;
-        VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
-        Declaration* declaration =
-            factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
-        // Based on the preceding check, it doesn't matter what we pass as
-        // allow_harmony_restrictive_generators and
-        // sloppy_mode_block_scope_function_redefinition.
-        bool ok = true;
-        DeclareVariable(declaration, VAR,
-                        Variable::DefaultInitializationFlag(VAR), false,
-                        nullptr, &ok);
-        CHECK(ok);  // Based on the preceding check, this should not fail
-      }
+      if (factory) {
+        DCHECK(!is_being_lazily_parsed_);
+        if (created_variable == nullptr) {
+          VariableProxy* proxy =
+              factory->NewVariableProxy(name, NORMAL_VARIABLE);
+          auto declaration =
+              factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
+          // Based on the preceding check, it doesn't matter what we pass as
+          // allow_harmony_restrictive_generators and
+          // sloppy_mode_block_scope_function_redefinition.
+          bool ok = true;
+          created_variable = DeclareVariable(
+              declaration, VAR, Variable::DefaultInitializationFlag(VAR), false,
+              nullptr, &ok);
+          CHECK(ok);  // Based on the preceding check, this should not fail
+        }
 
-      Expression* assignment = factory->NewAssignment(
-          Token::ASSIGN, NewUnresolved(factory, name),
-          delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
-      Statement* statement =
-          factory->NewExpressionStatement(assignment, kNoSourcePosition);
-      delegate->set_statement(statement);
+        Expression* assignment = factory->NewAssignment(
+            Token::ASSIGN, NewUnresolved(factory, name),
+            delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
+        Statement* statement =
+            factory->NewExpressionStatement(assignment, kNoSourcePosition);
+        delegate->set_statement(statement);
+      } else {
+        DCHECK(is_being_lazily_parsed_);
+        if (created_variable == nullptr) {
+          created_variable = DeclareVariableName(name, VAR);
+          if (created_variable != kDummyPreParserVariable &&
+              created_variable != kDummyPreParserLexicalVariable) {
+            DCHECK(FLAG_preparser_scope_analysis);
+            created_variable->set_maybe_assigned();
+          }
+        }
+      }
     }
   }
 }
 
 void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
+  RuntimeCallTimerScope runtimeTimer(info->isolate(),
+                                     &RuntimeCallStats::CompileScopeAnalysis);
   DCHECK(info->literal() != NULL);
   DeclarationScope* scope = info->literal()->scope();
 
@@ -542,13 +609,15 @@
     scope->HoistSloppyBlockFunctions(&factory);
   }
 
-  // We are compiling one of three cases:
+  // We are compiling one of four cases:
   // 1) top-level code,
   // 2) a function/eval/module on the top-level
   // 3) a function/eval in a scope that was already resolved.
+  // 4) an asm.js function
   DCHECK(scope->scope_type() == SCRIPT_SCOPE ||
          scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
-         scope->outer_scope()->already_resolved_);
+         scope->outer_scope()->already_resolved_ ||
+         (info->asm_function_scope() && scope->is_function_scope()));
 
   // The outer scope is never lazy.
   scope->set_should_eager_compile();
@@ -565,6 +634,7 @@
 #ifdef DEBUG
   if (info->script_is_native() ? FLAG_print_builtin_scopes
                                : FLAG_print_scopes) {
+    PrintF("Global scope:\n");
     scope->Print();
   }
   scope->CheckScopePositions();
@@ -577,11 +647,11 @@
   DCHECK(is_declaration_scope());
   DCHECK(has_this_declaration());
 
-  bool subclass_constructor = IsSubclassConstructor(function_kind_);
-  Variable* var = Declare(
-      zone(), this, ast_value_factory->this_string(),
-      subclass_constructor ? CONST : VAR, THIS_VARIABLE,
-      subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
+  bool derived_constructor = IsDerivedConstructor(function_kind_);
+  Variable* var =
+      Declare(zone(), ast_value_factory->this_string(),
+              derived_constructor ? CONST : VAR, THIS_VARIABLE,
+              derived_constructor ? kNeedsInitialization : kCreatedInitialized);
   receiver_ = var;
 }
 
@@ -594,9 +664,8 @@
     // Declare 'arguments' variable which exists in all non arrow functions.
     // Note that it might never be accessed, in which case it won't be
     // allocated during variable allocation.
-    arguments_ = Declare(zone(), this, ast_value_factory->arguments_string(),
-                         VAR, NORMAL_VARIABLE, kCreatedInitialized);
-  } else if (IsLexicalVariableMode(arguments_->mode())) {
+    arguments_ = Declare(zone(), ast_value_factory->arguments_string(), VAR);
+  } else if (IsLexical(arguments_)) {
     // Check if there's lexically declared variable named arguments to avoid
     // redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
     arguments_ = nullptr;
@@ -609,14 +678,12 @@
   DCHECK(!is_arrow_scope());
 
   DeclareThis(ast_value_factory);
-  new_target_ = Declare(zone(), this, ast_value_factory->new_target_string(),
-                        CONST, NORMAL_VARIABLE, kCreatedInitialized);
+  new_target_ = Declare(zone(), ast_value_factory->new_target_string(), CONST);
 
   if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
       IsAccessorFunction(function_kind_)) {
-    this_function_ =
-        Declare(zone(), this, ast_value_factory->this_function_string(), CONST,
-                NORMAL_VARIABLE, kCreatedInitialized);
+    EnsureRareData()->this_function =
+        Declare(zone(), ast_value_factory->this_function_string(), CONST);
   }
 }
 
@@ -636,24 +703,32 @@
   return function_;
 }
 
+Variable* DeclarationScope::DeclareGeneratorObjectVar(
+    const AstRawString* name) {
+  DCHECK(is_function_scope() || is_module_scope());
+  DCHECK_NULL(generator_object_var());
+
+  Variable* result = EnsureRareData()->generator_object =
+      NewTemporary(name, kNotAssigned);
+  result->set_is_used();
+  return result;
+}
+
+Variable* DeclarationScope::DeclarePromiseVar(const AstRawString* name) {
+  DCHECK(is_function_scope());
+  DCHECK_NULL(promise_var());
+  Variable* result = EnsureRareData()->promise = NewTemporary(name);
+  result->set_is_used();
+  return result;
+}
+
 bool Scope::HasBeenRemoved() const {
-  // TODO(neis): Store this information somewhere instead of calculating it.
-
-  if (!is_block_scope()) return false;  // Shortcut.
-
-  Scope* parent = outer_scope();
-  if (parent == nullptr) {
-    DCHECK(is_script_scope());
-    return false;
+  if (sibling() == this) {
+    DCHECK_NULL(inner_scope_);
+    DCHECK(is_block_scope());
+    return true;
   }
-
-  Scope* sibling = parent->inner_scope();
-  for (; sibling != nullptr; sibling = sibling->sibling()) {
-    if (sibling == this) return false;
-  }
-
-  DCHECK_NULL(inner_scope_);
-  return true;
+  return false;
 }
 
 Scope* Scope::GetUnremovedScope() {
@@ -667,6 +742,7 @@
 
 Scope* Scope::FinalizeBlockScope() {
   DCHECK(is_block_scope());
+  DCHECK(!HasBeenRemoved());
 
   if (variables_.occupancy() > 0 ||
       (is_declaration_scope() && calls_sloppy_eval())) {
@@ -705,7 +781,12 @@
   PropagateUsageFlagsToScope(outer_scope_);
   // This block does not need a context.
   num_heap_slots_ = 0;
-  return NULL;
+
+  // Mark scope as removed by making it its own sibling.
+  sibling_ = this;
+  DCHECK(HasBeenRemoved());
+
+  return nullptr;
 }
 
 void DeclarationScope::AddLocal(Variable* var) {
@@ -715,13 +796,13 @@
   locals_.Add(var);
 }
 
-Variable* Scope::Declare(Zone* zone, Scope* scope, const AstRawString* name,
+Variable* Scope::Declare(Zone* zone, const AstRawString* name,
                          VariableMode mode, VariableKind kind,
                          InitializationFlag initialization_flag,
                          MaybeAssignedFlag maybe_assigned_flag) {
   bool added;
   Variable* var =
-      variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
+      variables_.Declare(zone, this, name, mode, kind, initialization_flag,
                          maybe_assigned_flag, &added);
   if (added) locals_.Add(var);
   return var;
@@ -796,6 +877,7 @@
   DCHECK(!already_resolved_);
   DCHECK(!other->already_resolved_);
   if (calls_eval()) other->RecordEvalCall();
+  if (inner_scope_calls_eval_) other->inner_scope_calls_eval_ = true;
 }
 
 Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
@@ -869,12 +951,14 @@
   DCHECK(is_function_scope() || is_module_scope());
   DCHECK(!has_rest_);
   DCHECK(!is_optional || !is_rest);
+  DCHECK(!is_being_lazily_parsed_);
+  DCHECK(!was_lazily_parsed_);
   Variable* var;
   if (mode == TEMPORARY) {
     var = NewTemporary(name);
   } else {
-    var =
-        Declare(zone(), this, name, mode, NORMAL_VARIABLE, kCreatedInitialized);
+    DCHECK_EQ(mode, VAR);
+    var = Declare(zone(), name, mode);
     // TODO(wingo): Avoid O(n^2) check.
     *is_duplicate = IsDeclaredParameter(name);
   }
@@ -886,6 +970,26 @@
   return var;
 }
 
+Variable* DeclarationScope::DeclareParameterName(
+    const AstRawString* name, bool is_rest,
+    AstValueFactory* ast_value_factory) {
+  DCHECK(!already_resolved_);
+  DCHECK(is_function_scope() || is_module_scope());
+  DCHECK(!has_rest_ || is_rest);
+  DCHECK(is_being_lazily_parsed_);
+  has_rest_ = is_rest;
+  if (name == ast_value_factory->arguments_string()) {
+    has_arguments_parameter_ = true;
+  }
+  if (FLAG_preparser_scope_analysis) {
+    Variable* var = Declare(zone(), name, VAR);
+    params_.Add(var, zone());
+    return var;
+  }
+  DeclareVariableName(name, VAR);
+  return nullptr;
+}
+
 Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
                               InitializationFlag init_flag, VariableKind kind,
                               MaybeAssignedFlag maybe_assigned_flag) {
@@ -894,8 +998,10 @@
   // introduced during variable allocation, and TEMPORARY variables are
   // allocated via NewTemporary().
   DCHECK(IsDeclaredVariableMode(mode));
-  return Declare(zone(), this, name, mode, kind, init_flag,
-                 maybe_assigned_flag);
+  DCHECK_IMPLIES(GetDeclarationScope()->is_being_lazily_parsed(),
+                 mode == VAR || mode == LET || mode == CONST);
+  DCHECK(!GetDeclarationScope()->was_lazily_parsed());
+  return Declare(zone(), name, mode, kind, init_flag, maybe_assigned_flag);
 }
 
 Variable* Scope::DeclareVariable(
@@ -904,6 +1010,8 @@
     bool* sloppy_mode_block_scope_function_redefinition, bool* ok) {
   DCHECK(IsDeclaredVariableMode(mode));
   DCHECK(!already_resolved_);
+  DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
+  DCHECK(!GetDeclarationScope()->was_lazily_parsed());
 
   if (mode == VAR && !is_declaration_scope()) {
     return GetDeclarationScope()->DeclareVariable(
@@ -920,15 +1028,25 @@
   const AstRawString* name = proxy->raw_name();
   bool is_function_declaration = declaration->IsFunctionDeclaration();
 
+  // Pessimistically assume that top-level variables will be assigned.
+  //
+  // Top-level variables in a script can be accessed by other scripts or even
+  // become global properties. While this does not apply to top-level variables
+  // in a module (assuming they are not exported), we must still mark these as
+  // assigned because they might be accessed by a lazily parsed top-level
+  // function, which, for efficiency, we preparse without variable tracking.
+  if (is_script_scope() || is_module_scope()) {
+    if (mode != CONST) proxy->set_is_assigned();
+  }
+
   Variable* var = nullptr;
   if (is_eval_scope() && is_sloppy(language_mode()) && mode == VAR) {
     // In a var binding in a sloppy direct eval, pollute the enclosing scope
     // with this new binding by doing the following:
     // The proxy is bound to a lookup variable to force a dynamic declaration
     // using the DeclareEvalVar or DeclareEvalFunction runtime functions.
-    VariableKind kind = NORMAL_VARIABLE;
-    // TODO(sigurds) figure out if kNotAssigned is OK here
-    var = new (zone()) Variable(this, name, mode, kind, init, kNotAssigned);
+    var = new (zone())
+        Variable(this, name, mode, NORMAL_VARIABLE, init, kMaybeAssigned);
     var->AllocateTo(VariableLocation::LOOKUP, -1);
   } else {
     // Declare the variable in the declaration scope.
@@ -1002,6 +1120,43 @@
   return var;
 }
 
+Variable* Scope::DeclareVariableName(const AstRawString* name,
+                                     VariableMode mode) {
+  DCHECK(IsDeclaredVariableMode(mode));
+  DCHECK(!already_resolved_);
+  DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
+
+  if (mode == VAR && !is_declaration_scope()) {
+    return GetDeclarationScope()->DeclareVariableName(name, mode);
+  }
+  DCHECK(!is_with_scope());
+  DCHECK(!is_eval_scope());
+  // Unlike DeclareVariable, DeclareVariableName allows declaring variables in
+  // catch scopes: Parser::RewriteCatchPattern bypasses DeclareVariable by
+  // calling DeclareLocal directly, and it doesn't make sense to add a similar
+  // bypass mechanism for PreParser.
+  DCHECK(is_declaration_scope() || (IsLexicalVariableMode(mode) &&
+                                    (is_block_scope() || is_catch_scope())));
+  DCHECK(scope_info_.is_null());
+
+  // Declare the variable in the declaration scope.
+  if (FLAG_preparser_scope_analysis) {
+    Variable* var = LookupLocal(name);
+    DCHECK_NE(var, kDummyPreParserLexicalVariable);
+    DCHECK_NE(var, kDummyPreParserVariable);
+    if (var == nullptr) {
+      var = DeclareLocal(name, mode);
+    } else if (mode == VAR) {
+      DCHECK_EQ(var->mode(), VAR);
+      var->set_maybe_assigned();
+    }
+    var->set_is_used();
+    return var;
+  } else {
+    return variables_.DeclareName(zone(), name, mode);
+  }
+}
+
 VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
                                     const AstRawString* name,
                                     int start_position, VariableKind kind) {
@@ -1009,7 +1164,7 @@
   // the same name because they may be removed selectively via
   // RemoveUnresolved().
   DCHECK(!already_resolved_);
-  DCHECK_EQ(!needs_migration_, factory->zone() == zone());
+  DCHECK_EQ(factory->zone(), zone());
   VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_position);
   proxy->set_next_unresolved(unresolved_);
   unresolved_ = proxy;
@@ -1026,8 +1181,8 @@
 Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
                                                  VariableKind kind) {
   DCHECK(is_script_scope());
-  return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind,
-                            kCreatedInitialized);
+  return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind);
+  // TODO(neis): Mark variable as maybe-assigned?
 }
 
 
@@ -1050,31 +1205,17 @@
   return false;
 }
 
-bool Scope::RemoveUnresolved(const AstRawString* name) {
-  if (unresolved_ != nullptr && unresolved_->raw_name() == name) {
-    VariableProxy* removed = unresolved_;
-    unresolved_ = unresolved_->next_unresolved();
-    removed->set_next_unresolved(nullptr);
-    return true;
-  }
-  VariableProxy* current = unresolved_;
-  while (current != nullptr) {
-    VariableProxy* next = current->next_unresolved();
-    if (next != nullptr && next->raw_name() == name) {
-      current->set_next_unresolved(next->next_unresolved());
-      next->set_next_unresolved(nullptr);
-      return true;
-    }
-    current = next;
-  }
-  return false;
+Variable* Scope::NewTemporary(const AstRawString* name) {
+  return NewTemporary(name, kMaybeAssigned);
 }
 
-Variable* Scope::NewTemporary(const AstRawString* name) {
+Variable* Scope::NewTemporary(const AstRawString* name,
+                              MaybeAssignedFlag maybe_assigned) {
   DeclarationScope* scope = GetClosureScope();
   Variable* var = new (zone())
       Variable(scope, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
   scope->AddLocal(var);
+  if (maybe_assigned == kMaybeAssigned) var->set_maybe_assigned();
   return var;
 }
 
@@ -1157,9 +1298,9 @@
   // guaranteed to be correct.
   for (const Scope* s = this; s != outer; s = s->outer_scope_) {
     // Eval forces context allocation on all outer scopes, so we don't need to
-    // look at those scopes. Sloppy eval makes all top-level variables dynamic,
-    // whereas strict-mode requires context allocation.
-    if (s->is_eval_scope()) return !is_strict(s->language_mode());
+    // look at those scopes. Sloppy eval makes top-level non-lexical variables
+    // dynamic, whereas strict-mode requires context allocation.
+    if (s->is_eval_scope()) return is_sloppy(s->language_mode());
     // Catch scopes force context allocation of all variables.
     if (s->is_catch_scope()) continue;
     // With scopes do not introduce variables that need allocation.
@@ -1276,7 +1417,7 @@
 
 Handle<StringSet> DeclarationScope::CollectNonLocals(
     ParseInfo* info, Handle<StringSet> non_locals) {
-  VariableProxy* free_variables = FetchFreeVariables(this, true, info);
+  VariableProxy* free_variables = FetchFreeVariables(this, info);
   for (VariableProxy* proxy = free_variables; proxy != nullptr;
        proxy = proxy->next_unresolved()) {
     non_locals = StringSet::Add(non_locals, proxy->name());
@@ -1289,27 +1430,42 @@
   DCHECK(is_function_scope());
 
   // Reset all non-trivial members.
-  params_.Clear();
+  if (!aborted || !IsArrowFunction(function_kind_)) {
+    // Do not remove parameters when lazy parsing an Arrow Function has failed,
+    // as the formal parameters are not re-parsed.
+    params_.Clear();
+  }
   decls_.Clear();
   locals_.Clear();
-  sloppy_block_function_map_.Clear();
-  variables_.Clear();
-  // Make sure we won't walk the scope tree from here on.
   inner_scope_ = nullptr;
   unresolved_ = nullptr;
 
-  if (aborted && !IsArrowFunction(function_kind_)) {
-    DeclareDefaultFunctionVariables(ast_value_factory);
+  if (aborted) {
+    // Prepare scope for use in the outer zone.
+    zone_ = ast_value_factory->zone();
+    variables_.Reset(ZoneAllocationPolicy(zone_));
+    sloppy_block_function_map_.Reset(ZoneAllocationPolicy(zone_));
+    if (!IsArrowFunction(function_kind_)) {
+      DeclareDefaultFunctionVariables(ast_value_factory);
+    }
+  } else {
+    // Make sure this scope isn't used for allocation anymore.
+    zone_ = nullptr;
+    variables_.Invalidate();
+    sloppy_block_function_map_.Invalidate();
   }
 
 #ifdef DEBUG
   needs_migration_ = false;
+  is_being_lazily_parsed_ = false;
 #endif
 
-  is_lazily_parsed_ = !aborted;
+  was_lazily_parsed_ = !aborted;
 }
 
-void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
+void DeclarationScope::AnalyzePartially(
+    AstNodeFactory* ast_node_factory,
+    PreParsedScopeData* preparsed_scope_data) {
   DCHECK(!force_eager_compilation_);
   VariableProxy* unresolved = nullptr;
 
@@ -1317,9 +1473,8 @@
     // Try to resolve unresolved variables for this Scope and migrate those
     // which cannot be resolved inside. It doesn't make sense to try to resolve
     // them in the outer Scopes here, because they are incomplete.
-    for (VariableProxy* proxy =
-             FetchFreeVariables(this, !FLAG_lazy_inner_functions);
-         proxy != nullptr; proxy = proxy->next_unresolved()) {
+    for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr;
+         proxy = proxy->next_unresolved()) {
       DCHECK(!proxy->is_resolved());
       VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
       copy->set_next_unresolved(unresolved);
@@ -1331,7 +1486,20 @@
         !(MustAllocate(arguments_) && !has_arguments_parameter_)) {
       arguments_ = nullptr;
     }
+
+    if (FLAG_preparser_scope_analysis) {
+      // Decide context allocation for the locals and parameters and store the
+      // info away.
+      AllocateVariablesRecursively();
+      CollectVariableData(preparsed_scope_data);
+    }
   }
+#ifdef DEBUG
+  if (FLAG_print_scopes) {
+    PrintF("Inner function scope:\n");
+    Print();
+  }
+#endif
 
   ResetAfterPreparsing(ast_node_factory->ast_value_factory(), false);
 
@@ -1339,8 +1507,10 @@
 }
 
 #ifdef DEBUG
-static const char* Header(ScopeType scope_type, FunctionKind function_kind,
-                          bool is_declaration_scope) {
+namespace {
+
+const char* Header(ScopeType scope_type, FunctionKind function_kind,
+                   bool is_declaration_scope) {
   switch (scope_type) {
     case EVAL_SCOPE: return "eval";
     // TODO(adamk): Should we print concise method scopes specially?
@@ -1359,18 +1529,13 @@
   return NULL;
 }
 
+void Indent(int n, const char* str) { PrintF("%*s%s", n, "", str); }
 
-static void Indent(int n, const char* str) {
-  PrintF("%*s%s", n, "", str);
-}
-
-
-static void PrintName(const AstRawString* name) {
+void PrintName(const AstRawString* name) {
   PrintF("%.*s", name->length(), name->raw_data());
 }
 
-
-static void PrintLocation(Variable* var) {
+void PrintLocation(Variable* var) {
   switch (var->location()) {
     case VariableLocation::UNALLOCATED:
       break;
@@ -1392,45 +1557,52 @@
   }
 }
 
-
-static void PrintVar(int indent, Variable* var) {
-  if (var->is_used() || !var->IsUnallocated()) {
-    Indent(indent, VariableMode2String(var->mode()));
-    PrintF(" ");
-    if (var->raw_name()->IsEmpty())
-      PrintF(".%p", reinterpret_cast<void*>(var));
-    else
-      PrintName(var->raw_name());
-    PrintF(";  // ");
-    PrintLocation(var);
-    bool comma = !var->IsUnallocated();
-    if (var->has_forced_context_allocation()) {
-      if (comma) PrintF(", ");
-      PrintF("forced context allocation");
-      comma = true;
-    }
-    if (var->maybe_assigned() == kNotAssigned) {
-      if (comma) PrintF(", ");
-      PrintF("never assigned");
-    }
-    PrintF("\n");
+void PrintVar(int indent, Variable* var) {
+  Indent(indent, VariableMode2String(var->mode()));
+  PrintF(" ");
+  if (var->raw_name()->IsEmpty())
+    PrintF(".%p", reinterpret_cast<void*>(var));
+  else
+    PrintName(var->raw_name());
+  PrintF(";  // ");
+  PrintLocation(var);
+  bool comma = !var->IsUnallocated();
+  if (var->has_forced_context_allocation()) {
+    if (comma) PrintF(", ");
+    PrintF("forced context allocation");
+    comma = true;
   }
+  if (var->maybe_assigned() == kNotAssigned) {
+    if (comma) PrintF(", ");
+    PrintF("never assigned");
+  }
+  PrintF("\n");
 }
 
-static void PrintMap(int indent, VariableMap* map, bool locals) {
+void PrintMap(int indent, const char* label, VariableMap* map, bool locals,
+              Variable* function_var) {
+  bool printed_label = false;
   for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
     Variable* var = reinterpret_cast<Variable*>(p->value);
+    if (var == function_var) continue;
+    if (var == kDummyPreParserVariable ||
+        var == kDummyPreParserLexicalVariable) {
+      continue;
+    }
     bool local = !IsDynamicVariableMode(var->mode());
-    if (locals ? local : !local) {
-      if (var == nullptr) {
-        Indent(indent, "<?>\n");
-      } else {
-        PrintVar(indent, var);
+    if ((locals ? local : !local) &&
+        (var->is_used() || !var->IsUnallocated())) {
+      if (!printed_label) {
+        Indent(indent, label);
+        printed_label = true;
       }
+      PrintVar(indent, var);
     }
   }
 }
 
+}  // anonymous namespace
+
 void DeclarationScope::PrintParameters() {
   PrintF(" (");
   for (int i = 0; i < params_.length(); i++) {
@@ -1466,6 +1638,9 @@
   }
 
   PrintF(" { // (%d, %d)\n", start_position(), end_position());
+  if (is_hidden()) {
+    Indent(n1, "// is hidden\n");
+  }
 
   // Function name, if any (named function literals, only).
   if (function != nullptr) {
@@ -1487,9 +1662,12 @@
   if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
   if (is_declaration_scope()) {
     DeclarationScope* scope = AsDeclarationScope();
-    if (scope->is_lazily_parsed()) Indent(n1, "// lazily parsed\n");
+    if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n");
     if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
   }
+  if (has_forced_context_allocation()) {
+    Indent(n1, "// forces context allocation\n");
+  }
   if (num_stack_slots_ > 0) {
     Indent(n1, "// ");
     PrintF("%d stack slots\n", num_stack_slots_);
@@ -1505,12 +1683,22 @@
     PrintVar(n1, function);
   }
 
-  if (variables_.Start() != NULL) {
-    Indent(n1, "// local vars:\n");
-    PrintMap(n1, &variables_, true);
+  // Print temporaries.
+  {
+    bool printed_header = false;
+    for (Variable* local : locals_) {
+      if (local->mode() != TEMPORARY) continue;
+      if (!printed_header) {
+        printed_header = true;
+        Indent(n1, "// temporary vars:\n");
+      }
+      PrintVar(n1, local);
+    }
+  }
 
-    Indent(n1, "// dynamic vars:\n");
-    PrintMap(n1, &variables_, false);
+  if (variables_.occupancy() > 0) {
+    PrintMap(n1, "// local vars:\n", &variables_, true, function);
+    PrintMap(n1, "// dynamic vars:\n", &variables_, false, function);
   }
 
   // Print inner scopes (disable by providing negative n).
@@ -1539,6 +1727,12 @@
 void Scope::CheckZones() {
   DCHECK(!needs_migration_);
   for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+    if (scope->is_declaration_scope() &&
+        scope->AsDeclarationScope()->was_lazily_parsed()) {
+      DCHECK_NULL(scope->zone());
+      DCHECK_NULL(scope->inner_scope_);
+      continue;
+    }
     CHECK_EQ(scope->zone(), zone());
     scope->CheckZones();
   }
@@ -1548,8 +1742,7 @@
 Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
   // Declare a new non-local.
   DCHECK(IsDynamicVariableMode(mode));
-  Variable* var = variables_.Declare(zone(), NULL, name, mode, NORMAL_VARIABLE,
-                                     kCreatedInitialized);
+  Variable* var = variables_.Declare(zone(), nullptr, name, mode);
   // Allocate it by giving it a dynamic lookup.
   var->AllocateTo(VariableLocation::LOOKUP, -1);
   return var;
@@ -1590,6 +1783,13 @@
   // The variable could not be resolved statically.
   if (var == nullptr) return var;
 
+  // TODO(marja): Separate LookupRecursive for preparsed scopes better.
+  if (var == kDummyPreParserVariable || var == kDummyPreParserLexicalVariable) {
+    DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
+    DCHECK(FLAG_lazy_inner_functions);
+    return var;
+  }
+
   if (is_function_scope() && !var->is_dynamic()) {
     var->ForceContextAllocation();
   }
@@ -1641,34 +1841,20 @@
   DCHECK(!proxy->is_resolved());
   Variable* var = LookupRecursive(proxy, nullptr);
   ResolveTo(info, proxy, var);
-
-  if (FLAG_lazy_inner_functions) {
-    if (info != nullptr && info->is_native()) return;
-    // Pessimistically force context allocation for all variables to which inner
-    // scope variables could potentially resolve to.
-    Scope* scope = GetClosureScope()->outer_scope_;
-    while (scope != nullptr && scope->scope_info_.is_null()) {
-      var = scope->LookupLocal(proxy->raw_name());
-      if (var != nullptr) {
-        // Since we don't lazy parse inner arrow functions, inner functions
-        // cannot refer to the outer "this".
-        if (!var->is_dynamic() && !var->is_this() &&
-            !var->has_forced_context_allocation()) {
-          var->ForceContextAllocation();
-          var->set_is_used();
-          // We don't know what the (potentially lazy parsed) inner function
-          // does with the variable; pessimistically assume that it's assigned.
-          var->set_maybe_assigned();
-        }
-      }
-      scope = scope->outer_scope_;
-    }
-  }
 }
 
 namespace {
 
 bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
+  if (var->mode() == DYNAMIC_LOCAL) {
+    // Dynamically introduced variables never need a hole check (since they're
+    // VAR bindings, either from var or function declarations), but the variable
+    // they shadow might need a hole check, which we want to do if we decide
+    // that no shadowing variable was dynamically introoduced.
+    DCHECK(!var->binding_needs_init());
+    return AccessNeedsHoleCheck(var->local_if_not_shadowed(), proxy, scope);
+  }
+
   if (!var->binding_needs_init()) {
     return false;
   }
@@ -1703,8 +1889,7 @@
   }
 
   if (var->is_this()) {
-    DCHECK(
-        IsSubclassConstructor(scope->GetDeclarationScope()->function_kind()));
+    DCHECK(IsDerivedConstructor(scope->GetDeclarationScope()->function_kind()));
     // TODO(littledan): implement 'this' hole check elimination.
     return true;
   }
@@ -1742,44 +1927,79 @@
 #endif
 
   DCHECK_NOT_NULL(var);
-  if (proxy->is_assigned()) var->set_maybe_assigned();
   if (AccessNeedsHoleCheck(var, proxy, this)) proxy->set_needs_hole_check();
   proxy->BindTo(var);
 }
 
 void Scope::ResolveVariablesRecursively(ParseInfo* info) {
   DCHECK(info->script_scope()->is_script_scope());
+  // Lazy parsed declaration scopes are already partially analyzed. If there are
+  // unresolved references remaining, they just need to be resolved in outer
+  // scopes.
+  if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
+    DCHECK(variables_.occupancy() == 0);
+    for (VariableProxy* proxy = unresolved_; proxy != nullptr;
+         proxy = proxy->next_unresolved()) {
+      Variable* var = outer_scope()->LookupRecursive(proxy, nullptr);
+      if (!var->is_dynamic()) {
+        var->set_is_used();
+        var->ForceContextAllocation();
+        if (proxy->is_assigned()) var->set_maybe_assigned();
+      }
+    }
+  } else {
+    // Resolve unresolved variables for this scope.
+    for (VariableProxy* proxy = unresolved_; proxy != nullptr;
+         proxy = proxy->next_unresolved()) {
+      ResolveVariable(info, proxy);
+    }
 
-  // Resolve unresolved variables for this scope.
-  for (VariableProxy* proxy = unresolved_; proxy != nullptr;
-       proxy = proxy->next_unresolved()) {
-    ResolveVariable(info, proxy);
-  }
-
-  // Resolve unresolved variables for inner scopes.
-  for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
-    scope->ResolveVariablesRecursively(info);
+    // Resolve unresolved variables for inner scopes.
+    for (Scope* scope = inner_scope_; scope != nullptr;
+         scope = scope->sibling_) {
+      scope->ResolveVariablesRecursively(info);
+    }
   }
 }
 
 VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
-                                         bool try_to_resolve, ParseInfo* info,
+                                         ParseInfo* info,
                                          VariableProxy* stack) {
+  // Module variables must be allocated before variable resolution
+  // to ensure that AccessNeedsHoleCheck() can detect import variables.
+  if (info != nullptr && is_module_scope()) {
+    AsModuleScope()->AllocateModuleVariables();
+  }
+  // Lazy parsed declaration scopes are already partially analyzed. If there are
+  // unresolved references remaining, they just need to be resolved in outer
+  // scopes.
+  Scope* lookup =
+      is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
+          ? outer_scope()
+          : this;
   for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr;
        proxy = next) {
     next = proxy->next_unresolved();
     DCHECK(!proxy->is_resolved());
-    Variable* var = nullptr;
-    if (try_to_resolve) {
-      var = LookupRecursive(proxy, max_outer_scope->outer_scope());
-    }
+    Variable* var =
+        lookup->LookupRecursive(proxy, max_outer_scope->outer_scope());
     if (var == nullptr) {
       proxy->set_next_unresolved(stack);
       stack = proxy;
-    } else if (info != nullptr) {
-      ResolveTo(info, proxy, var);
-    } else {
-      var->set_is_used();
+    } else if (var != kDummyPreParserVariable &&
+               var != kDummyPreParserLexicalVariable) {
+      if (info != nullptr) {
+        // In this case we need to leave scopes in a way that they can be
+        // allocated. If we resolved variables from lazy parsed scopes, we need
+        // to context allocate the var.
+        ResolveTo(info, proxy, var);
+        if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
+      } else {
+        var->set_is_used();
+        if (proxy->is_assigned()) {
+          var->set_maybe_assigned();
+        }
+      }
     }
   }
 
@@ -1787,14 +2007,16 @@
   unresolved_ = nullptr;
 
   for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
-    stack =
-        scope->FetchFreeVariables(max_outer_scope, try_to_resolve, info, stack);
+    stack = scope->FetchFreeVariables(max_outer_scope, info, stack);
   }
 
   return stack;
 }
 
 bool Scope::MustAllocate(Variable* var) {
+  if (var == kDummyPreParserLexicalVariable || var == kDummyPreParserVariable) {
+    return true;
+  }
   DCHECK(var->location() != VariableLocation::MODULE);
   // Give var a read/write use if there is a chance it might be accessed
   // via an eval() call.  This is only possible if the variable has a
@@ -1823,7 +2045,10 @@
   if (has_forced_context_allocation()) return true;
   if (var->mode() == TEMPORARY) return false;
   if (is_catch_scope()) return true;
-  if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true;
+  if ((is_script_scope() || is_eval_scope()) &&
+      IsLexicalVariableMode(var->mode())) {
+    return true;
+  }
   return var->has_forced_context_allocation() || inner_scope_calls_eval_;
 }
 
@@ -1880,6 +2105,7 @@
     DCHECK_EQ(this, var->scope());
     if (uses_sloppy_arguments) {
       var->set_is_used();
+      var->set_maybe_assigned();
       var->ForceContextAllocation();
     }
     AllocateParameter(var, i);
@@ -1946,9 +2172,8 @@
     new_target_ = nullptr;
   }
 
-  if (this_function_ != nullptr && !MustAllocate(this_function_)) {
-    this_function_ = nullptr;
-  }
+  NullifyRareVariableIf(RareVariable::kThisFunction,
+                        [=](Variable* var) { return !MustAllocate(var); });
 }
 
 void ModuleScope::AllocateModuleVariables() {
@@ -1967,9 +2192,10 @@
 
 void Scope::AllocateVariablesRecursively() {
   DCHECK(!already_resolved_);
-  DCHECK_EQ(0, num_stack_slots_);
+  DCHECK_IMPLIES(!FLAG_preparser_scope_analysis, num_stack_slots_ == 0);
+
   // Don't allocate variables of preparsed scopes.
-  if (is_declaration_scope() && AsDeclarationScope()->is_lazily_parsed()) {
+  if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
     return;
   }
 
@@ -1994,9 +2220,9 @@
   // Force allocation of a context for this scope if necessary. For a 'with'
   // scope and for a function scope that makes an 'eval' call we need a context,
   // even if no local variables were statically allocated in the scope.
-  // Likewise for modules.
+  // Likewise for modules and function scopes representing asm.js modules.
   bool must_have_context =
-      is_with_scope() || is_module_scope() ||
+      is_with_scope() || is_module_scope() || IsAsmModule() ||
       (is_function_scope() && calls_sloppy_eval()) ||
       (is_block_scope() && is_declaration_scope() && calls_sloppy_eval());
 
@@ -2043,6 +2269,17 @@
   }
 }
 
+void Scope::CollectVariableData(PreParsedScopeData* data) {
+  PreParsedScopeData::ScopeScope scope_scope(data, scope_type(),
+                                             start_position(), end_position());
+  for (Variable* local : locals_) {
+    scope_scope.MaybeAddVariable(local);
+  }
+  for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
+    inner->CollectVariableData(data);
+  }
+}
+
 int Scope::StackLocalCount() const {
   Variable* function =
       is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;
diff --git a/src/ast/scopes.h b/src/ast/scopes.h
index c7d88ac..119d77c 100644
--- a/src/ast/scopes.h
+++ b/src/ast/scopes.h
@@ -19,7 +19,9 @@
 class AstRawString;
 class Declaration;
 class ParseInfo;
+class PreParsedScopeData;
 class SloppyBlockFunctionStatement;
+class Statement;
 class StringSet;
 class VariableProxy;
 
@@ -28,11 +30,17 @@
  public:
   explicit VariableMap(Zone* zone);
 
-  Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
-                    VariableMode mode, VariableKind kind,
-                    InitializationFlag initialization_flag,
-                    MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
-                    bool* added = nullptr);
+  Variable* Declare(
+      Zone* zone, Scope* scope, const AstRawString* name, VariableMode mode,
+      VariableKind kind = NORMAL_VARIABLE,
+      InitializationFlag initialization_flag = kCreatedInitialized,
+      MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
+      bool* added = nullptr);
+
+  // Records that "name" exists (if not recorded yet) but doesn't create a
+  // Variable. Useful for preparsing.
+  Variable* DeclareName(Zone* zone, const AstRawString* name,
+                        VariableMode mode);
 
   Variable* Lookup(const AstRawString* name);
   void Remove(Variable* var);
@@ -43,9 +51,24 @@
 // Sloppy block-scoped function declarations to var-bind
 class SloppyBlockFunctionMap : public ZoneHashMap {
  public:
+  class Delegate : public ZoneObject {
+   public:
+    explicit Delegate(Scope* scope,
+                      SloppyBlockFunctionStatement* statement = nullptr)
+        : scope_(scope), statement_(statement), next_(nullptr) {}
+    void set_statement(Statement* statement);
+    void set_next(Delegate* next) { next_ = next; }
+    Delegate* next() const { return next_; }
+    Scope* scope() const { return scope_; }
+
+   private:
+    Scope* scope_;
+    SloppyBlockFunctionStatement* statement_;
+    Delegate* next_;
+  };
+
   explicit SloppyBlockFunctionMap(Zone* zone);
-  void Declare(Zone* zone, const AstRawString* name,
-               SloppyBlockFunctionStatement* statement);
+  void Declare(Zone* zone, const AstRawString* name, Delegate* delegate);
 };
 
 enum class AnalyzeMode { kRegular, kDebugger };
@@ -148,7 +171,8 @@
   // Declare a local variable in this scope. If the variable has been
   // declared before, the previously declared variable is returned.
   Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
-                         InitializationFlag init_flag, VariableKind kind,
+                         InitializationFlag init_flag = kCreatedInitialized,
+                         VariableKind kind = NORMAL_VARIABLE,
                          MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
 
   Variable* DeclareVariable(Declaration* declaration, VariableMode mode,
@@ -157,6 +181,9 @@
                             bool* sloppy_mode_block_scope_function_redefinition,
                             bool* ok);
 
+  // The return value is meaningful only if FLAG_preparser_scope_analysis is on.
+  Variable* DeclareVariableName(const AstRawString* name, VariableMode mode);
+
   // Declarations list.
   ThreadedList<Declaration>* declarations() { return &decls_; }
 
@@ -177,7 +204,6 @@
   // allocated globally as a "ghost" variable. RemoveUnresolved removes
   // such a variable again if it was added; otherwise this is a no-op.
   bool RemoveUnresolved(VariableProxy* var);
-  bool RemoveUnresolved(const AstRawString* name);
 
   // Creates a new temporary variable in this scope's TemporaryScope.  The
   // name is only used for printing and cannot be used to find the variable.
@@ -207,14 +233,11 @@
   // Scope-specific info.
 
   // Inform the scope and outer scopes that the corresponding code contains an
-  // eval call. We don't record eval calls from innner scopes in the outer most
-  // script scope, as we only see those when parsing eagerly. If we recorded the
-  // calls then, the outer most script scope would look different depending on
-  // whether we parsed eagerly or not which is undesirable.
+  // eval call.
   void RecordEvalCall() {
     scope_calls_eval_ = true;
     inner_scope_calls_eval_ = true;
-    for (Scope* scope = outer_scope(); scope && !scope->is_script_scope();
+    for (Scope* scope = outer_scope(); scope != nullptr;
          scope = scope->outer_scope()) {
       scope->inner_scope_calls_eval_ = true;
     }
@@ -303,6 +326,7 @@
   bool calls_sloppy_eval() const {
     return scope_calls_eval_ && is_sloppy(language_mode());
   }
+  bool inner_scope_calls_eval() const { return inner_scope_calls_eval_; }
   bool IsAsmModule() const;
   bool IsAsmFunction() const;
   // Does this scope have the potential to execute declarations non-linearly?
@@ -387,7 +411,7 @@
   Scope* GetOuterScopeWithContext();
 
   // Analyze() must have been called once to create the ScopeInfo.
-  Handle<ScopeInfo> scope_info() {
+  Handle<ScopeInfo> scope_info() const {
     DCHECK(!scope_info_.is_null());
     return scope_info_;
   }
@@ -423,6 +447,22 @@
   void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
   bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; }
 
+  bool RemoveInnerScope(Scope* inner_scope) {
+    DCHECK_NOT_NULL(inner_scope);
+    if (inner_scope == inner_scope_) {
+      inner_scope_ = inner_scope_->sibling_;
+      return true;
+    }
+    for (Scope* scope = inner_scope_; scope != nullptr;
+         scope = scope->sibling_) {
+      if (scope->sibling_ == inner_scope) {
+        scope->sibling_ = scope->sibling_->sibling_;
+        return true;
+      }
+    }
+    return false;
+  }
+
  protected:
   explicit Scope(Zone* zone);
 
@@ -431,10 +471,11 @@
   }
 
  private:
-  Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
-                    VariableMode mode, VariableKind kind,
-                    InitializationFlag initialization_flag,
-                    MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
+  Variable* Declare(
+      Zone* zone, const AstRawString* name, VariableMode mode,
+      VariableKind kind = NORMAL_VARIABLE,
+      InitializationFlag initialization_flag = kCreatedInitialized,
+      MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
 
   // This method should only be invoked on scopes created during parsing (i.e.,
   // not deserialized from a context). Also, since NeedsContext() is only
@@ -442,6 +483,8 @@
   // should also be invoked after resolution.
   bool NeedsScopeInfo() const;
 
+  Variable* NewTemporary(const AstRawString* name,
+                         MaybeAssignedFlag maybe_assigned);
   Zone* zone_;
 
   // Scope tree.
@@ -527,7 +570,6 @@
   // list along the way, so full resolution cannot be done afterwards.
   // If a ParseInfo* is passed, non-free variables will be resolved.
   VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope,
-                                    bool try_to_resolve = true,
                                     ParseInfo* info = nullptr,
                                     VariableProxy* stack = nullptr);
 
@@ -548,6 +590,8 @@
   void AllocateDebuggerScopeInfos(Isolate* isolate,
                                   MaybeHandle<ScopeInfo> outer_scope);
 
+  void CollectVariableData(PreParsedScopeData* data);
+
   // Construct a scope based on the scope info.
   Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
 
@@ -556,33 +600,18 @@
         Handle<ScopeInfo> scope_info);
 
   void AddInnerScope(Scope* inner_scope) {
-    DCHECK_EQ(!needs_migration_, inner_scope->zone() == zone());
     inner_scope->sibling_ = inner_scope_;
     inner_scope_ = inner_scope;
     inner_scope->outer_scope_ = this;
   }
 
-  void RemoveInnerScope(Scope* inner_scope) {
-    DCHECK_NOT_NULL(inner_scope);
-    if (inner_scope == inner_scope_) {
-      inner_scope_ = inner_scope_->sibling_;
-      return;
-    }
-    for (Scope* scope = inner_scope_; scope != nullptr;
-         scope = scope->sibling_) {
-      if (scope->sibling_ == inner_scope) {
-        scope->sibling_ = scope->sibling_->sibling_;
-        return;
-      }
-    }
-  }
-
   void SetDefaults();
 
   friend class DeclarationScope;
+  friend class ScopeTestHelper;
 };
 
-class DeclarationScope : public Scope {
+class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
  public:
   DeclarationScope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
                    FunctionKind function_kind = kNormalFunction);
@@ -616,7 +645,15 @@
                                         IsClassConstructor(function_kind())));
   }
 
-  bool is_lazily_parsed() const { return is_lazily_parsed_; }
+  bool was_lazily_parsed() const { return was_lazily_parsed_; }
+
+#ifdef DEBUG
+  void set_is_being_lazily_parsed(bool is_being_lazily_parsed) {
+    is_being_lazily_parsed_ = is_being_lazily_parsed;
+  }
+  bool is_being_lazily_parsed() const { return is_being_lazily_parsed_; }
+#endif
+
   bool ShouldEagerCompile() const;
   void set_should_eager_compile();
 
@@ -629,7 +666,7 @@
   bool asm_module() const { return asm_module_; }
   void set_asm_module();
   bool asm_function() const { return asm_function_; }
-  void set_asm_function() { asm_module_ = true; }
+  void set_asm_function() { asm_function_ = true; }
 
   void DeclareThis(AstValueFactory* ast_value_factory);
   void DeclareArguments(AstValueFactory* ast_value_factory);
@@ -645,6 +682,11 @@
   // calls sloppy eval.
   Variable* DeclareFunctionVar(const AstRawString* name);
 
+  // Declare some special internal variables which must be accessible to
+  // Ignition without ScopeInfo.
+  Variable* DeclareGeneratorObjectVar(const AstRawString* name);
+  Variable* DeclarePromiseVar(const AstRawString* name);
+
   // Declare a parameter in this scope.  When there are duplicated
   // parameters the rightmost one 'wins'.  However, the implementation
   // expects all parameters to be declared and from left to right.
@@ -652,6 +694,11 @@
                              bool is_optional, bool is_rest, bool* is_duplicate,
                              AstValueFactory* ast_value_factory);
 
+  // Declares that a parameter with the name exists. Creates a Variable and
+  // returns it if FLAG_preparser_scope_analysis is on.
+  Variable* DeclareParameterName(const AstRawString* name, bool is_rest,
+                                 AstValueFactory* ast_value_factory);
+
   // Declare an implicit global variable in this scope which must be a
   // script scope.  The variable was introduced (possibly from an inner
   // scope) by a reference to an unresolved variable with no intervening
@@ -683,6 +730,17 @@
     return function_;
   }
 
+  Variable* generator_object_var() const {
+    DCHECK(is_function_scope() || is_module_scope());
+    return GetRareVariable(RareVariable::kGeneratorObject);
+  }
+
+  Variable* promise_var() const {
+    DCHECK(is_function_scope());
+    DCHECK(IsAsyncFunction(function_kind_));
+    return GetRareVariable(RareVariable::kPromise);
+  }
+
   // Parameters. The left-most parameter has index 0.
   // Only valid for function and module scopes.
   Variable* parameter(int index) const {
@@ -723,12 +781,14 @@
   }
 
   Variable* this_function_var() const {
+    Variable* this_function = GetRareVariable(RareVariable::kThisFunction);
+
     // This is only used in derived constructors atm.
-    DCHECK(this_function_ == nullptr ||
+    DCHECK(this_function == nullptr ||
            (is_function_scope() && (IsClassConstructor(function_kind()) ||
                                     IsConciseMethod(function_kind()) ||
                                     IsAccessorFunction(function_kind()))));
-    return this_function_;
+    return this_function;
   }
 
   // Adds a local variable in this scope's locals list. This is for adjusting
@@ -736,10 +796,9 @@
   // initializers.
   void AddLocal(Variable* var);
 
-  void DeclareSloppyBlockFunction(const AstRawString* name,
-                                  SloppyBlockFunctionStatement* statement) {
-    sloppy_block_function_map_.Declare(zone(), name, statement);
-  }
+  void DeclareSloppyBlockFunction(
+      const AstRawString* name, Scope* scope,
+      SloppyBlockFunctionStatement* statement = nullptr);
 
   // Go through sloppy_block_function_map_ and hoist those (into this scope)
   // which should be hoisted.
@@ -759,7 +818,8 @@
   // records variables which cannot be resolved inside the Scope (we don't yet
   // know what they will resolve to since the outer Scopes are incomplete) and
   // migrates them into migrate_to.
-  void AnalyzePartially(AstNodeFactory* ast_node_factory);
+  void AnalyzePartially(AstNodeFactory* ast_node_factory,
+                        PreParsedScopeData* preparsed_scope_data);
 
   Handle<StringSet> CollectNonLocals(ParseInfo* info,
                                      Handle<StringSet> non_locals);
@@ -819,7 +879,11 @@
   // This scope uses "super" property ('super.foo').
   bool scope_uses_super_property_ : 1;
   bool should_eager_compile_ : 1;
-  bool is_lazily_parsed_ : 1;
+  // Set to true after we have finished lazy parsing the scope.
+  bool was_lazily_parsed_ : 1;
+#if DEBUG
+  bool is_being_lazily_parsed_ : 1;
+#endif
 
   // Parameter list in source order.
   ZoneList<Variable*> params_;
@@ -833,8 +897,48 @@
   Variable* new_target_;
   // Convenience variable; function scopes only.
   Variable* arguments_;
-  // Convenience variable; Subclass constructor only
-  Variable* this_function_;
+
+  struct RareData : public ZoneObject {
+    // Convenience variable; Subclass constructor only
+    Variable* this_function = nullptr;
+
+    // Generator object, if any; generator function scopes and module scopes
+    // only.
+    Variable* generator_object = nullptr;
+    // Promise, if any; async function scopes only.
+    Variable* promise = nullptr;
+  };
+
+  enum class RareVariable {
+    kThisFunction = offsetof(RareData, this_function),
+    kGeneratorObject = offsetof(RareData, generator_object),
+    kPromise = offsetof(RareData, promise)
+  };
+
+  V8_INLINE RareData* EnsureRareData() {
+    if (rare_data_ == nullptr) {
+      rare_data_ = new (zone_) RareData;
+    }
+    return rare_data_;
+  }
+
+  V8_INLINE Variable* GetRareVariable(RareVariable id) const {
+    if (rare_data_ == nullptr) return nullptr;
+    return *reinterpret_cast<Variable**>(
+        reinterpret_cast<uint8_t*>(rare_data_) + static_cast<ptrdiff_t>(id));
+  }
+
+  // Set `var` to null if it's non-null and Predicate (Variable*) -> bool
+  // returns true.
+  template <typename Predicate>
+  V8_INLINE void NullifyRareVariableIf(RareVariable id, Predicate predicate) {
+    if (V8_LIKELY(rare_data_ == nullptr)) return;
+    Variable** var = reinterpret_cast<Variable**>(
+        reinterpret_cast<uint8_t*>(rare_data_) + static_cast<ptrdiff_t>(id));
+    if (*var && predicate(*var)) *var = nullptr;
+  }
+
+  RareData* rare_data_ = nullptr;
 };
 
 class ModuleScope final : public DeclarationScope {
diff --git a/src/ast/variables.cc b/src/ast/variables.cc
index 3771bfe..cd1d8f7 100644
--- a/src/ast/variables.cc
+++ b/src/ast/variables.cc
@@ -6,6 +6,7 @@
 
 #include "src/ast/scopes.h"
 #include "src/globals.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -36,9 +37,8 @@
 bool Variable::IsGlobalObjectProperty() const {
   // Temporaries are never global, they must always be allocated in the
   // activation frame.
-  return (IsDynamicVariableMode(mode()) ||
-          (IsDeclaredVariableMode(mode()) && !IsLexicalVariableMode(mode()))) &&
-         scope_ != NULL && scope_->is_script_scope();
+  return (IsDynamicVariableMode(mode()) || mode() == VAR) &&
+         scope_ != nullptr && scope_->is_script_scope();
 }
 
 }  // namespace internal
diff --git a/src/background-parsing-task.cc b/src/background-parsing-task.cc
index e0af700..4a5b9cb 100644
--- a/src/background-parsing-task.cc
+++ b/src/background-parsing-task.cc
@@ -4,7 +4,7 @@
 
 #include "src/background-parsing-task.h"
 
-#include "src/debug/debug.h"
+#include "src/objects-inl.h"
 #include "src/parsing/parser.h"
 
 namespace v8 {
@@ -13,7 +13,6 @@
 void StreamedSource::Release() {
   parser.reset();
   info.reset();
-  zone.reset();
 }
 
 BackgroundParsingTask::BackgroundParsingTask(
@@ -29,10 +28,8 @@
 
   // Prepare the data for the internalization phase and compilation phase, which
   // will happen in the main thread after parsing.
-  Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
-  ParseInfo* info = new ParseInfo(zone);
+  ParseInfo* info = new ParseInfo(isolate->allocator());
   info->set_toplevel();
-  source->zone.reset(zone);
   source->info.reset(info);
   info->set_isolate(isolate);
   info->set_source_stream(source->source_stream.get());
diff --git a/src/background-parsing-task.h b/src/background-parsing-task.h
index d7fe6ba..061e365 100644
--- a/src/background-parsing-task.h
+++ b/src/background-parsing-task.h
@@ -38,7 +38,6 @@
   // between parsing and compilation. These need to be initialized before the
   // compilation starts.
   UnicodeCache unicode_cache;
-  std::unique_ptr<Zone> zone;
   std::unique_ptr<ParseInfo> info;
   std::unique_ptr<Parser> parser;
 
diff --git a/src/bailout-reason.h b/src/bailout-reason.h
index 247024f..b5e39c6 100644
--- a/src/bailout-reason.h
+++ b/src/bailout-reason.h
@@ -88,6 +88,7 @@
     "The function_data field should be a BytecodeArray on interpreter entry")  \
   V(kGeneratedCodeIsTooLarge, "Generated code is too large")                   \
   V(kGenerator, "Generator")                                                   \
+  V(kGetIterator, "GetIterator")                                               \
   V(kGlobalFunctionsMustHaveInitialMap,                                        \
     "Global functions must have initial map")                                  \
   V(kGraphBuildingFailed, "Optimized graph construction failed")               \
@@ -125,6 +126,7 @@
   V(kLookupVariableInCountOperation, "Lookup variable in count operation")     \
   V(kMapBecameDeprecated, "Map became deprecated")                             \
   V(kMapBecameUnstable, "Map became unstable")                                 \
+  V(kMissingBytecodeArray, "Missing bytecode array from function")             \
   V(kNativeFunctionLiteral, "Native function literal")                         \
   V(kNeedSmiLiteral, "Need a Smi literal here")                                \
   V(kNoCasesLeft, "No cases left")                                             \
@@ -138,7 +140,6 @@
   V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR")              \
   V(kNotEnoughVirtualRegistersRegalloc,                                        \
     "Not enough virtual registers (regalloc)")                                 \
-  V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array")              \
   V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
   V(kOffsetOutOfRange, "Offset out of range")                                  \
   V(kOperandIsANumber, "Operand is a number")                                  \
@@ -165,7 +166,7 @@
   V(kObjectNotTagged, "The object is not tagged")                              \
   V(kOptimizationDisabled, "Optimization disabled")                            \
   V(kOptimizationDisabledForTest, "Optimization disabled for test")            \
-  V(kOptimizedTooManyTimes, "Optimized too many times")                        \
+  V(kDeoptimizedTooManyTimes, "Deoptimized too many times")                    \
   V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister,                   \
     "Out of virtual registers while trying to allocate temp register")         \
   V(kParseScopeError, "Parse/scope error")                                     \
@@ -185,6 +186,7 @@
     "Sloppy function expects JSReceiver as receiver.")                         \
   V(kSmiAdditionOverflow, "Smi addition overflow")                             \
   V(kSmiSubtractionOverflow, "Smi subtraction overflow")                       \
+  V(kSpreadCall, "Call with spread argument")                                  \
   V(kStackAccessBelowStackPointer, "Stack access below stack pointer")         \
   V(kStackFrameTypesMustMatch, "Stack frame types must match")                 \
   V(kSuperReference, "Super reference")                                        \
@@ -211,14 +213,10 @@
     "Unexpected ElementsKind in array constructor")                            \
   V(kUnexpectedFallthroughFromCharCodeAtSlowCase,                              \
     "Unexpected fallthrough from CharCodeAt slow case")                        \
-  V(kUnexpectedFallthroughFromCharFromCodeSlowCase,                            \
-    "Unexpected fallthrough from CharFromCode slow case")                      \
   V(kUnexpectedFallThroughFromStringComparison,                                \
     "Unexpected fall-through from string comparison")                          \
   V(kUnexpectedFallthroughToCharCodeAtSlowCase,                                \
     "Unexpected fallthrough to CharCodeAt slow case")                          \
-  V(kUnexpectedFallthroughToCharFromCodeSlowCase,                              \
-    "Unexpected fallthrough to CharFromCode slow case")                        \
   V(kUnexpectedFPUStackDepthAfterInstruction,                                  \
     "Unexpected FPU stack depth after instruction")                            \
   V(kUnexpectedInitialMapForArrayFunction1,                                    \
@@ -251,6 +249,8 @@
   V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments")         \
   V(kUnsupportedPhiUseOfConstVariable,                                         \
     "Unsupported phi use of const or let variable")                            \
+  V(kUnexpectedReturnFromFrameDropper,                                         \
+    "Unexpectedly returned from dropping frames")                              \
   V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw")          \
   V(kUnsupportedSwitchStatement, "Unsupported switch statement")               \
   V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate")               \
@@ -263,7 +263,9 @@
   V(kWrongArgumentCountForInvokeIntrinsic,                                     \
     "Wrong number of arguments for intrinsic")                                 \
   V(kShouldNotDirectlyEnterOsrFunction,                                        \
-    "Should not directly enter OSR-compiled function")
+    "Should not directly enter OSR-compiled function")                         \
+  V(kUnexpectedReturnFromWasmTrap,                                             \
+    "Should not return after throwing a wasm trap")
 
 #define ERROR_MESSAGES_CONSTANTS(C, T) C,
 enum BailoutReason {
diff --git a/src/base.isolate b/src/base.isolate
index c457f00..85f59ec 100644
--- a/src/base.isolate
+++ b/src/base.isolate
@@ -4,6 +4,9 @@
 {
   'includes': [
     '../third_party/icu/icu.isolate',
+
+    # MSVS runtime libraries.
+    '../gypfiles/win/msvs_dependencies.isolate',
   ],
   'conditions': [
     ['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
diff --git a/src/base/atomic-utils.h b/src/base/atomic-utils.h
index f40853c..f4f43fc 100644
--- a/src/base/atomic-utils.h
+++ b/src/base/atomic-utils.h
@@ -31,7 +31,9 @@
         &value_, -static_cast<base::AtomicWord>(decrement)));
   }
 
-  V8_INLINE T Value() { return static_cast<T>(base::Acquire_Load(&value_)); }
+  V8_INLINE T Value() const {
+    return static_cast<T>(base::Acquire_Load(&value_));
+  }
 
   V8_INLINE void SetValue(T new_value) {
     base::Release_Store(&value_, static_cast<base::AtomicWord>(new_value));
diff --git a/src/base/cpu.cc b/src/base/cpu.cc
index cf1f9c3..896c25d 100644
--- a/src/base/cpu.cc
+++ b/src/base/cpu.cc
@@ -24,6 +24,9 @@
 #ifndef POWER_8
 #define POWER_8 0x10000
 #endif
+#ifndef POWER_9
+#define POWER_9 0x20000
+#endif
 #endif
 #if V8_OS_POSIX
 #include <unistd.h>  // sysconf()
@@ -670,7 +673,9 @@
 
   part_ = -1;
   if (auxv_cpu_type) {
-    if (strcmp(auxv_cpu_type, "power8") == 0) {
+    if (strcmp(auxv_cpu_type, "power9") == 0) {
+      part_ = PPC_POWER9;
+    } else if (strcmp(auxv_cpu_type, "power8") == 0) {
       part_ = PPC_POWER8;
     } else if (strcmp(auxv_cpu_type, "power7") == 0) {
       part_ = PPC_POWER7;
@@ -689,6 +694,9 @@
 
 #elif V8_OS_AIX
   switch (_system_configuration.implementation) {
+    case POWER_9:
+      part_ = PPC_POWER9;
+      break;
     case POWER_8:
       part_ = PPC_POWER8;
       break;
diff --git a/src/base/cpu.h b/src/base/cpu.h
index e0fcea1..ef55b57 100644
--- a/src/base/cpu.h
+++ b/src/base/cpu.h
@@ -69,6 +69,7 @@
     PPC_POWER6,
     PPC_POWER7,
     PPC_POWER8,
+    PPC_POWER9,
     PPC_G4,
     PPC_G5,
     PPC_PA6T
diff --git a/src/base/hashmap.h b/src/base/hashmap.h
index d2fc133..4436a2d 100644
--- a/src/base/hashmap.h
+++ b/src/base/hashmap.h
@@ -40,6 +40,11 @@
                       MatchFun match = MatchFun(),
                       AllocationPolicy allocator = AllocationPolicy());
 
+  // Clones the given hashmap and creates a copy with the same entries.
+  TemplateHashMapImpl(const TemplateHashMapImpl<Key, Value, MatchFun,
+                                                AllocationPolicy>* original,
+                      AllocationPolicy allocator = AllocationPolicy());
+
   ~TemplateHashMapImpl();
 
   // If an entry with matching key is found, returns that entry.
@@ -70,6 +75,14 @@
   // Empties the hash map (occupancy() == 0).
   void Clear();
 
+  // Empties the map and makes it unusable for allocation.
+  void Invalidate() {
+    AllocationPolicy::Delete(map_);
+    map_ = nullptr;
+    occupancy_ = 0;
+    capacity_ = 0;
+  }
+
   // The number of (non-empty) entries in the table.
   uint32_t occupancy() const { return occupancy_; }
 
@@ -89,6 +102,14 @@
   Entry* Start() const;
   Entry* Next(Entry* entry) const;
 
+  void Reset(AllocationPolicy allocator) {
+    Initialize(capacity_, allocator);
+    occupancy_ = 0;
+  }
+
+ protected:
+  void Initialize(uint32_t capacity, AllocationPolicy allocator);
+
  private:
   Entry* map_;
   uint32_t capacity_;
@@ -102,8 +123,9 @@
   Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value,
                         uint32_t hash,
                         AllocationPolicy allocator = AllocationPolicy());
-  void Initialize(uint32_t capacity, AllocationPolicy allocator);
   void Resize(AllocationPolicy allocator);
+
+  DISALLOW_COPY_AND_ASSIGN(TemplateHashMapImpl);
 };
 template <typename Key, typename Value, typename MatchFun,
           class AllocationPolicy>
@@ -116,6 +138,19 @@
 
 template <typename Key, typename Value, typename MatchFun,
           class AllocationPolicy>
+TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::
+    TemplateHashMapImpl(const TemplateHashMapImpl<Key, Value, MatchFun,
+                                                  AllocationPolicy>* original,
+                        AllocationPolicy allocator)
+    : capacity_(original->capacity_),
+      occupancy_(original->occupancy_),
+      match_(original->match_) {
+  map_ = reinterpret_cast<Entry*>(allocator.New(capacity_ * sizeof(Entry)));
+  memcpy(map_, original->map_, capacity_ * sizeof(Entry));
+}
+
+template <typename Key, typename Value, typename MatchFun,
+          class AllocationPolicy>
 TemplateHashMapImpl<Key, Value, MatchFun,
                     AllocationPolicy>::~TemplateHashMapImpl() {
   AllocationPolicy::Delete(map_);
@@ -367,6 +402,14 @@
       AllocationPolicy allocator = AllocationPolicy())
       : Base(capacity, HashEqualityThenKeyMatcher<void*, MatchFun>(match),
              allocator) {}
+
+  CustomMatcherTemplateHashMapImpl(
+      const CustomMatcherTemplateHashMapImpl<AllocationPolicy>* original,
+      AllocationPolicy allocator = AllocationPolicy())
+      : Base(original, allocator) {}
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(CustomMatcherTemplateHashMapImpl);
 };
 
 typedef CustomMatcherTemplateHashMapImpl<DefaultAllocationPolicy>
diff --git a/src/base/iterator.h b/src/base/iterator.h
index e380dc3..175698c 100644
--- a/src/base/iterator.h
+++ b/src/base/iterator.h
@@ -7,8 +7,6 @@
 
 #include <iterator>
 
-#include "src/base/macros.h"
-
 namespace v8 {
 namespace base {
 
diff --git a/src/base/logging.cc b/src/base/logging.cc
index cadcb6f..c94fe9d 100644
--- a/src/base/logging.cc
+++ b/src/base/logging.cc
@@ -14,9 +14,8 @@
 namespace base {
 
 // Explicit instantiations for commonly used comparisons.
-#define DEFINE_MAKE_CHECK_OP_STRING(type)              \
-  template std::string* MakeCheckOpString<type, type>( \
-      type const&, type const&, char const*);
+#define DEFINE_MAKE_CHECK_OP_STRING(type) \
+  template std::string* MakeCheckOpString<type, type>(type, type, char const*);
 DEFINE_MAKE_CHECK_OP_STRING(int)
 DEFINE_MAKE_CHECK_OP_STRING(long)       // NOLINT(runtime/int)
 DEFINE_MAKE_CHECK_OP_STRING(long long)  // NOLINT(runtime/int)
@@ -29,11 +28,11 @@
 
 
 // Explicit instantiations for floating point checks.
-#define DEFINE_CHECK_OP_IMPL(NAME)                          \
-  template std::string* Check##NAME##Impl<float, float>(    \
-      float const& lhs, float const& rhs, char const* msg); \
-  template std::string* Check##NAME##Impl<double, double>(  \
-      double const& lhs, double const& rhs, char const* msg);
+#define DEFINE_CHECK_OP_IMPL(NAME)                                            \
+  template std::string* Check##NAME##Impl<float, float>(float lhs, float rhs, \
+                                                        char const* msg);     \
+  template std::string* Check##NAME##Impl<double, double>(                    \
+      double lhs, double rhs, char const* msg);
 DEFINE_CHECK_OP_IMPL(EQ)
 DEFINE_CHECK_OP_IMPL(NE)
 DEFINE_CHECK_OP_IMPL(LE)
diff --git a/src/base/logging.h b/src/base/logging.h
index 7bbb82a..e852dde 100644
--- a/src/base/logging.h
+++ b/src/base/logging.h
@@ -43,25 +43,26 @@
 //
 // We make sure CHECK et al. always evaluates their arguments, as
 // doing CHECK(FunctionWithSideEffect()) is a common idiom.
-#define CHECK(condition)                                             \
-  do {                                                               \
-    if (V8_UNLIKELY(!(condition))) {                                 \
-      V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", #condition); \
-    }                                                                \
+#define CHECK_WITH_MSG(condition, message)                        \
+  do {                                                            \
+    if (V8_UNLIKELY(!(condition))) {                              \
+      V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", message); \
+    }                                                             \
   } while (0)
-
+#define CHECK(condition) CHECK_WITH_MSG(condition, #condition)
 
 #ifdef DEBUG
 
 // Helper macro for binary operators.
 // Don't use this macro directly in your code, use CHECK_EQ et al below.
-#define CHECK_OP(name, op, lhs, rhs)                                    \
-  do {                                                                  \
-    if (std::string* _msg = ::v8::base::Check##name##Impl(              \
-            (lhs), (rhs), #lhs " " #op " " #rhs)) {                     \
-      V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
-      delete _msg;                                                      \
-    }                                                                   \
+#define CHECK_OP(name, op, lhs, rhs)                                     \
+  do {                                                                   \
+    if (std::string* _msg =                                              \
+            ::v8::base::Check##name##Impl<decltype(lhs), decltype(rhs)>( \
+                (lhs), (rhs), #lhs " " #op " " #rhs)) {                  \
+      V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str());  \
+      delete _msg;                                                       \
+    }                                                                    \
   } while (0)
 
 #else
@@ -69,17 +70,31 @@
 // Make all CHECK functions discard their log strings to reduce code
 // bloat for official release builds.
 
-#define CHECK_OP(name, op, lhs, rhs) CHECK((lhs)op(rhs))
+#define CHECK_OP(name, op, lhs, rhs)                                         \
+  do {                                                                       \
+    bool _cmp =                                                              \
+        ::v8::base::Cmp##name##Impl<decltype(lhs), decltype(rhs)>(lhs, rhs); \
+    CHECK_WITH_MSG(_cmp, #lhs " " #op " " #rhs);                             \
+  } while (0)
 
 #endif
 
+// Helper to determine how to pass values: Pass scalars and arrays by value,
+// others by const reference. std::decay<T> provides the type which should be
+// used to pass T by value, e.g. converts array to pointer and removes const,
+// volatile and reference.
+template <typename T>
+struct PassType : public std::conditional<
+                      std::is_scalar<typename std::decay<T>::type>::value,
+                      typename std::decay<T>::type, T const&> {};
 
 // Build the error message string.  This is separate from the "Impl"
 // function template because it is not performance critical and so can
 // be out of line, while the "Impl" code should be inline. Caller
 // takes ownership of the returned string.
 template <typename Lhs, typename Rhs>
-std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs,
+std::string* MakeCheckOpString(typename PassType<Lhs>::type lhs,
+                               typename PassType<Rhs>::type rhs,
                                char const* msg) {
   std::ostringstream ss;
   ss << msg << " (" << lhs << " vs. " << rhs << ")";
@@ -90,7 +105,7 @@
 // in logging.cc.
 #define DEFINE_MAKE_CHECK_OP_STRING(type)                                    \
   extern template V8_BASE_EXPORT std::string* MakeCheckOpString<type, type>( \
-      type const&, type const&, char const*);
+      type, type, char const*);
 DEFINE_MAKE_CHECK_OP_STRING(int)
 DEFINE_MAKE_CHECK_OP_STRING(long)       // NOLINT(runtime/int)
 DEFINE_MAKE_CHECK_OP_STRING(long long)  // NOLINT(runtime/int)
@@ -101,27 +116,77 @@
 DEFINE_MAKE_CHECK_OP_STRING(void const*)
 #undef DEFINE_MAKE_CHECK_OP_STRING
 
+// is_signed_vs_unsigned::value is true if both types are integral, Lhs is
+// signed, and Rhs is unsigned. False in all other cases.
+template <typename Lhs, typename Rhs>
+struct is_signed_vs_unsigned {
+  enum : bool {
+    value = std::is_integral<Lhs>::value && std::is_integral<Rhs>::value &&
+            std::is_signed<Lhs>::value && std::is_unsigned<Rhs>::value
+  };
+};
+// Same thing, other way around: Lhs is unsigned, Rhs signed.
+template <typename Lhs, typename Rhs>
+struct is_unsigned_vs_signed : public is_signed_vs_unsigned<Rhs, Lhs> {};
+
+// Specialize the compare functions for signed vs. unsigned comparisons.
+// std::enable_if ensures that this template is only instantiable if both Lhs
+// and Rhs are integral types, and their signedness does not match.
+#define MAKE_UNSIGNED(Type, value) \
+  static_cast<typename std::make_unsigned<Type>::type>(value)
+#define DEFINE_SIGNED_MISMATCH_COMP(CHECK, NAME, IMPL)                  \
+  template <typename Lhs, typename Rhs>                                 \
+  V8_INLINE typename std::enable_if<CHECK<Lhs, Rhs>::value, bool>::type \
+      Cmp##NAME##Impl(Lhs const& lhs, Rhs const& rhs) {                 \
+    return IMPL;                                                        \
+  }
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, EQ,
+                            lhs >= 0 && MAKE_UNSIGNED(Lhs, lhs) == rhs)
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LT,
+                            lhs < 0 || MAKE_UNSIGNED(Lhs, lhs) < rhs)
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LE,
+                            lhs <= 0 || MAKE_UNSIGNED(Lhs, lhs) <= rhs)
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, NE, !CmpEQImpl(lhs, rhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GT, !CmpLEImpl(lhs, rhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GE, !CmpLTImpl(lhs, rhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, EQ, CmpEQImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, NE, CmpNEImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, LT, CmpGTImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, LE, CmpGEImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, GT, CmpLTImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, GE, CmpLEImpl(rhs, lhs))
+#undef MAKE_UNSIGNED
+#undef DEFINE_SIGNED_MISMATCH_COMP
 
 // Helper functions for CHECK_OP macro.
-// The (int, int) specialization works around the issue that the compiler
-// will not instantiate the template version of the function on values of
-// unnamed enum type - see comment below.
 // The (float, float) and (double, double) instantiations are explicitly
-// externialized to ensure proper 32/64-bit comparisons on x86.
+// externalized to ensure proper 32/64-bit comparisons on x86.
+// The Cmp##NAME##Impl function is only instantiable if one of the two types is
+// not integral or their signedness matches (i.e. whenever no specialization is
+// required, see above). Otherwise it is disabled by the enable_if construct,
+// and the compiler will pick a specialization from above.
 #define DEFINE_CHECK_OP_IMPL(NAME, op)                                         \
   template <typename Lhs, typename Rhs>                                        \
-  V8_INLINE std::string* Check##NAME##Impl(Lhs const& lhs, Rhs const& rhs,     \
-                                           char const* msg) {                  \
-    return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
+  V8_INLINE                                                                    \
+      typename std::enable_if<!is_signed_vs_unsigned<Lhs, Rhs>::value &&       \
+                                  !is_unsigned_vs_signed<Lhs, Rhs>::value,     \
+                              bool>::type                                      \
+          Cmp##NAME##Impl(typename PassType<Lhs>::type lhs,                    \
+                          typename PassType<Rhs>::type rhs) {                  \
+    return lhs op rhs;                                                         \
   }                                                                            \
-  V8_INLINE std::string* Check##NAME##Impl(int lhs, int rhs,                   \
+  template <typename Lhs, typename Rhs>                                        \
+  V8_INLINE std::string* Check##NAME##Impl(typename PassType<Lhs>::type lhs,   \
+                                           typename PassType<Rhs>::type rhs,   \
                                            char const* msg) {                  \
-    return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
+    bool cmp = Cmp##NAME##Impl<Lhs, Rhs>(lhs, rhs);                            \
+    return V8_LIKELY(cmp) ? nullptr                                            \
+                          : MakeCheckOpString<Lhs, Rhs>(lhs, rhs, msg);        \
   }                                                                            \
   extern template V8_BASE_EXPORT std::string* Check##NAME##Impl<float, float>( \
-      float const& lhs, float const& rhs, char const* msg);                    \
+      float lhs, float rhs, char const* msg);                                  \
   extern template V8_BASE_EXPORT std::string*                                  \
-      Check##NAME##Impl<double, double>(double const& lhs, double const& rhs,  \
+      Check##NAME##Impl<double, double>(double lhs, double rhs,                \
                                         char const* msg);
 DEFINE_CHECK_OP_IMPL(EQ, ==)
 DEFINE_CHECK_OP_IMPL(NE, !=)
@@ -139,12 +204,8 @@
 #define CHECK_GT(lhs, rhs) CHECK_OP(GT, >, lhs, rhs)
 #define CHECK_NULL(val) CHECK((val) == nullptr)
 #define CHECK_NOT_NULL(val) CHECK((val) != nullptr)
-#define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs))
-
-
-// Exposed for making debugging easier (to see where your function is being
-// called, just add a call to DumpBacktrace).
-void DumpBacktrace();
+#define CHECK_IMPLIES(lhs, rhs) \
+  CHECK_WITH_MSG(!(lhs) || (rhs), #lhs " implies " #rhs)
 
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/macros.h b/src/base/macros.h
index e386617..33a0ef0 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -282,23 +282,4 @@
   return RoundDown<T>(static_cast<T>(x + m - 1), m);
 }
 
-
-namespace v8 {
-namespace base {
-
-// TODO(yangguo): This is a poor man's replacement for std::is_fundamental,
-// which requires C++11. Switch to std::is_fundamental once possible.
-template <typename T>
-inline bool is_fundamental() {
-  return false;
-}
-
-template <>
-inline bool is_fundamental<uint8_t>() {
-  return true;
-}
-
-}  // namespace base
-}  // namespace v8
-
 #endif   // V8_BASE_MACROS_H_
diff --git a/src/base/platform/platform-linux.cc b/src/base/platform/platform-linux.cc
index a35d423..cd52dfe 100644
--- a/src/base/platform/platform-linux.cc
+++ b/src/base/platform/platform-linux.cc
@@ -19,7 +19,7 @@
 // executable. Otherwise, OS raises an exception when executing code
 // in that page.
 #include <errno.h>
-#include <fcntl.h>      // open
+#include <fcntl.h>  // open
 #include <stdarg.h>
 #include <strings.h>    // index
 #include <sys/mman.h>   // mmap & munmap
@@ -30,7 +30,7 @@
 // GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
 // Old versions of the C library <signal.h> didn't define the type.
 #if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
-    (defined(__arm__) || defined(__aarch64__)) && \
+    (defined(__arm__) || defined(__aarch64__)) &&                 \
     !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
 #include <asm/sigcontext.h>  // NOLINT
 #endif
@@ -49,21 +49,19 @@
 namespace v8 {
 namespace base {
 
-
 #ifdef __arm__
 
 bool OS::ArmUsingHardFloat() {
-  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
-  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
-  // We use these as well as a couple of other defines to statically determine
-  // what FP ABI used.
-  // GCC versions 4.4 and below don't support hard-fp.
-  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
-  // __ARM_PCS_VFP.
+// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+// the Floating Point ABI used (PCS stands for Procedure Call Standard).
+// We use these as well as a couple of other defines to statically determine
+// what FP ABI used.
+// GCC versions 4.4 and below don't support hard-fp.
+// GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+// __ARM_PCS_VFP.
 
-#define GCC_VERSION (__GNUC__ * 10000                                          \
-                     + __GNUC_MINOR__ * 100                                    \
-                     + __GNUC_PATCHLEVEL__)
+#define GCC_VERSION \
+  (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
 #if GCC_VERSION >= 40600 && !defined(__clang__)
 #if defined(__ARM_PCS_VFP)
   return true;
@@ -78,10 +76,11 @@
 #if defined(__ARM_PCS_VFP)
   return true;
 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
-      !defined(__VFP_FP__)
+    !defined(__VFP_FP__)
   return false;
 #else
-#error "Your version of compiler does not report the FP ABI compiled for."     \
+#error \
+    "Your version of compiler does not report the FP ABI compiled for."     \
        "Please report it on this issue"                                        \
        "http://code.google.com/p/v8/issues/detail?id=2140"
 
@@ -92,17 +91,15 @@
 
 #endif  // def __arm__
 
-
 const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
   if (std::isnan(time)) return "";
-  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  time_t tv = static_cast<time_t>(std::floor(time / msPerSecond));
   struct tm tm;
   struct tm* t = localtime_r(&tv, &tm);
   if (!t || !t->tm_zone) return "";
   return t->tm_zone;
 }
 
-
 double OS::LocalTimeOffset(TimezoneCache* cache) {
   time_t tv = time(NULL);
   struct tm tm;
@@ -112,9 +109,7 @@
                              (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
 }
 
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
+void* OS::Allocate(const size_t requested, size_t* allocated,
                    bool is_executable) {
   const size_t msize = RoundUp(requested, AllocateAlignment());
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
@@ -125,7 +120,6 @@
   return mbase;
 }
 
-
 std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
   std::vector<SharedLibraryAddress> result;
   // This function assumes that the layout of the file is as follows:
@@ -169,8 +163,8 @@
         lib_name[strlen(lib_name) - 1] = '\0';
       } else {
         // No library name found, just record the raw address range.
-        snprintf(lib_name, kLibNameLen,
-                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+        snprintf(lib_name, kLibNameLen, "%08" V8PRIxPTR "-%08" V8PRIxPTR, start,
+                 end);
       }
       result.push_back(SharedLibraryAddress(lib_name, start, end));
     } else {
@@ -187,7 +181,6 @@
   return result;
 }
 
-
 void OS::SignalCodeMovingGC() {
   // Support for ll_prof.py.
   //
@@ -203,38 +196,30 @@
     OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
     OS::Abort();
   }
-  void* addr = mmap(OS::GetRandomMmapAddr(), size,
-                    PROT_READ | PROT_EXEC,
+  void* addr = mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_EXEC,
                     MAP_PRIVATE, fileno(f), 0);
   DCHECK_NE(MAP_FAILED, addr);
   OS::Free(addr, size);
   fclose(f);
 }
 
-
 // Constants used for mmap.
 static const int kMmapFd = -1;
 static const int kMmapFdOffset = 0;
 
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
 
 VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
+    : address_(ReserveRegion(size)), size_(size) {}
 
 VirtualMemory::VirtualMemory(size_t size, size_t alignment)
     : address_(NULL), size_(0) {
   DCHECK((alignment % OS::AllocateAlignment()) == 0);
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
+  size_t request_size =
+      RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation =
+      mmap(OS::GetRandomMmapAddr(), request_size, PROT_NONE,
+           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
   if (reservation == MAP_FAILED) return;
 
   uint8_t* base = static_cast<uint8_t*>(reservation);
@@ -266,7 +251,6 @@
 #endif
 }
 
-
 VirtualMemory::~VirtualMemory() {
   if (IsReserved()) {
     bool result = ReleaseRegion(address(), size());
@@ -275,44 +259,33 @@
   }
 }
 
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
+bool VirtualMemory::IsReserved() { return address_ != NULL; }
 
 void VirtualMemory::Reset() {
   address_ = NULL;
   size_ = 0;
 }
 
-
 bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
   CHECK(InVM(address, size));
   return CommitRegion(address, size, is_executable);
 }
 
-
 bool VirtualMemory::Uncommit(void* address, size_t size) {
   CHECK(InVM(address, size));
   return UncommitRegion(address, size);
 }
 
-
 bool VirtualMemory::Guard(void* address) {
   CHECK(InVM(address, OS::CommitPageSize()));
   OS::Guard(address, OS::CommitPageSize());
   return true;
 }
 
-
 void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
+  void* result =
+      mmap(OS::GetRandomMmapAddr(), size, PROT_NONE,
+           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
 
   if (result == MAP_FAILED) return NULL;
 
@@ -322,14 +295,10 @@
   return result;
 }
 
-
 bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
   int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
+  if (MAP_FAILED == mmap(base, size, prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
                          kMmapFdOffset)) {
     return false;
   }
@@ -337,13 +306,9 @@
   return true;
 }
 
-
 bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
+  return mmap(base, size, PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
               kMmapFdOffset) != MAP_FAILED;
 }
 
@@ -363,10 +328,7 @@
   return munmap(base, size) == 0;
 }
 
-
-bool VirtualMemory::HasLazyCommits() {
-  return true;
-}
+bool VirtualMemory::HasLazyCommits() { return true; }
 
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc
index 3f4165d..f195649 100644
--- a/src/base/platform/platform-posix.cc
+++ b/src/base/platform/platform-posix.cc
@@ -99,6 +99,20 @@
   return page_size;
 }
 
+void* OS::AllocateGuarded(const size_t requested) {
+  size_t allocated = 0;
+  const bool is_executable = false;
+  void* mbase = OS::Allocate(requested, &allocated, is_executable);
+  if (allocated != requested) {
+    OS::Free(mbase, allocated);
+    return nullptr;
+  }
+  if (mbase == nullptr) {
+    return nullptr;
+  }
+  OS::Guard(mbase, requested);
+  return mbase;
+}
 
 void OS::Free(void* address, const size_t size) {
   // TODO(1240712): munmap has a return value which is ignored here.
@@ -129,6 +143,15 @@
 #endif
 }
 
+// Make a region of memory readable and writable.
+void OS::Unprotect(void* address, const size_t size) {
+#if V8_OS_CYGWIN
+  DWORD oldprotect;
+  VirtualProtect(address, size, PAGE_READWRITE, &oldprotect);
+#else
+  mprotect(address, size, PROT_READ | PROT_WRITE);
+#endif
+}
 
 static LazyInstance<RandomNumberGenerator>::type
     platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
@@ -597,12 +620,15 @@
   result = pthread_attr_init(&attr);
   DCHECK_EQ(0, result);
   size_t stack_size = stack_size_;
-#if V8_OS_AIX
   if (stack_size == 0) {
-    // Default on AIX is 96KB -- bump up to 2MB
+#if V8_OS_MACOSX
+    // Default on Mac OS X is 512kB -- bump up to 1MB
+    stack_size = 1 * 1024 * 1024;
+#elif V8_OS_AIX
+    // Default on AIX is 96kB -- bump up to 2MB
     stack_size = 2 * 1024 * 1024;
-  }
 #endif
+  }
   if (stack_size > 0) {
     result = pthread_attr_setstacksize(&attr, stack_size);
     DCHECK_EQ(0, result);
diff --git a/src/base/platform/platform-win32.cc b/src/base/platform/platform-win32.cc
index 080e6bc..60b60fd 100644
--- a/src/base/platform/platform-win32.cc
+++ b/src/base/platform/platform-win32.cc
@@ -797,6 +797,9 @@
   return mbase;
 }
 
+void* OS::AllocateGuarded(const size_t requested) {
+  return VirtualAlloc(nullptr, requested, MEM_RESERVE, PAGE_NOACCESS);
+}
 
 void OS::Free(void* address, const size_t size) {
   // TODO(1240712): VirtualFree has a return value which is ignored here.
@@ -821,6 +824,10 @@
   VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
 }
 
+void OS::Unprotect(void* address, const size_t size) {
+  LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE);
+  DCHECK_IMPLIES(result != nullptr, GetLastError() == 0);
+}
 
 void OS::Sleep(TimeDelta interval) {
   ::Sleep(static_cast<DWORD>(interval.InMilliseconds()));
diff --git a/src/base/platform/platform.h b/src/base/platform/platform.h
index 5d570e7..374cddf 100644
--- a/src/base/platform/platform.h
+++ b/src/base/platform/platform.h
@@ -178,6 +178,11 @@
                         bool is_executable);
   static void Free(void* address, const size_t size);
 
+  // Allocates a region of memory that is inaccessible. On Windows this reserves
+  // but does not commit the memory. On Linux, it is equivalent to a call to
+  // Allocate() followed by Guard().
+  static void* AllocateGuarded(const size_t requested);
+
   // This is the granularity at which the ProtectCode(...) call can set page
   // permissions.
   static intptr_t CommitPageSize();
@@ -188,6 +193,9 @@
   // Assign memory as a guard page so that access will cause an exception.
   static void Guard(void* address, const size_t size);
 
+  // Make a region of memory readable and writable.
+  static void Unprotect(void* address, const size_t size);
+
   // Generate a random address to be used for hinting mmap().
   static void* GetRandomMmapAddr();
 
diff --git a/src/bit-vector.cc b/src/bit-vector.cc
index 0fbb018..e6aec7e 100644
--- a/src/bit-vector.cc
+++ b/src/bit-vector.cc
@@ -5,6 +5,7 @@
 #include "src/bit-vector.h"
 
 #include "src/base/bits.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/bit-vector.h b/src/bit-vector.h
index 13f9e97..fd61489 100644
--- a/src/bit-vector.h
+++ b/src/bit-vector.h
@@ -166,7 +166,7 @@
     return true;
   }
 
-  bool Equals(const BitVector& other) {
+  bool Equals(const BitVector& other) const {
     for (int i = 0; i < data_length_; i++) {
       if (data_[i] != other.data_[i]) return false;
     }
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index ba5f4d5..3e095ea 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -9,18 +9,24 @@
 #include "src/base/ieee754.h"
 #include "src/code-stubs.h"
 #include "src/compiler.h"
+#include "src/debug/debug.h"
 #include "src/extensions/externalize-string-extension.h"
 #include "src/extensions/free-buffer-extension.h"
 #include "src/extensions/gc-extension.h"
 #include "src/extensions/ignition-statistics-extension.h"
 #include "src/extensions/statistics-extension.h"
 #include "src/extensions/trigger-failure-extension.h"
+#include "src/ffi/ffi-compiler.h"
 #include "src/heap/heap.h"
 #include "src/isolate-inl.h"
 #include "src/snapshot/natives.h"
 #include "src/snapshot/snapshot.h"
 #include "src/wasm/wasm-js.h"
 
+#if V8_I18N_SUPPORT
+#include "src/i18n.h"
+#endif  // V8_I18N_SUPPORT
+
 namespace v8 {
 namespace internal {
 
@@ -140,7 +146,8 @@
  public:
   Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
           v8::Local<v8::ObjectTemplate> global_proxy_template,
-          v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+          size_t context_snapshot_index,
+          v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
           GlobalContextType context_type);
   Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
           v8::Local<v8::ObjectTemplate> global_proxy_template);
@@ -168,6 +175,7 @@
 
   void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
   void CreateIteratorMaps(Handle<JSFunction> empty);
+  void CreateAsyncIteratorMaps();
   void CreateAsyncFunctionMaps(Handle<JSFunction> empty);
   void CreateJSProxyMaps();
 
@@ -178,20 +186,19 @@
   // in through the API.  We call this regardless of whether we are building a
   // context from scratch or using a deserialized one from the partial snapshot
   // but in the latter case we don't use the objects it produces directly, as
-  // we have to used the deserialized ones that are linked together with the
-  // rest of the context snapshot.
+  // we have to use the deserialized ones that are linked together with the
+  // rest of the context snapshot. At the end we link the global proxy and the
+  // context to each other.
   Handle<JSGlobalObject> CreateNewGlobals(
       v8::Local<v8::ObjectTemplate> global_proxy_template,
       Handle<JSGlobalProxy> global_proxy);
-  // Hooks the given global proxy into the context.  If the context was created
-  // by deserialization then this will unhook the global proxy that was
-  // deserialized, leaving the GC to pick it up.
-  void HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
-                         Handle<JSGlobalProxy> global_proxy);
   // Similarly, we want to use the global that has been created by the templates
   // passed through the API.  The global from the snapshot is detached from the
   // other objects in the snapshot.
   void HookUpGlobalObject(Handle<JSGlobalObject> global_object);
+  // Hooks the given global proxy into the context in the case we do not
+  // replace the global object from the deserialized native context.
+  void HookUpGlobalProxy(Handle<JSGlobalProxy> global_proxy);
   // The native context has a ScriptContextTable that store declarative bindings
   // made in script scopes.  Add a "this" binding to that table pointing to the
   // global proxy.
@@ -212,6 +219,8 @@
   HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
 #undef DECLARE_FEATURE_INITIALIZATION
 
+  void InitializeGlobal_enable_fast_array_builtins();
+
   Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
                                         const char* name, Builtins::Name call,
                                         BuiltinFunctionId id);
@@ -293,6 +302,7 @@
   // prototype, maps.
   Handle<Map> sloppy_function_map_writable_prototype_;
   Handle<Map> strict_function_map_writable_prototype_;
+  Handle<Map> class_function_map_;
   Handle<JSFunction> strict_poison_function_;
   Handle<JSFunction> restricted_function_properties_thrower_;
 
@@ -310,10 +320,12 @@
     MaybeHandle<JSGlobalProxy> maybe_global_proxy,
     v8::Local<v8::ObjectTemplate> global_proxy_template,
     v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+    v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
     GlobalContextType context_type) {
   HandleScope scope(isolate_);
   Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
-                  extensions, context_snapshot_index, context_type);
+                  context_snapshot_index, internal_fields_deserializer,
+                  context_type);
   Handle<Context> env = genesis.result();
   if (env.is_null() || !InstallExtensions(env, extensions)) {
     return Handle<Context>();
@@ -332,14 +344,15 @@
 }
 
 void Bootstrapper::DetachGlobal(Handle<Context> env) {
-  env->GetIsolate()->counters()->errors_thrown_per_context()->AddSample(
-    env->GetErrorsThrown());
+  Isolate* isolate = env->GetIsolate();
+  isolate->counters()->errors_thrown_per_context()->AddSample(
+      env->GetErrorsThrown());
 
-  Factory* factory = env->GetIsolate()->factory();
+  Heap* heap = isolate->heap();
   Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
-  global_proxy->set_native_context(*factory->null_value());
-  JSObject::ForceSetPrototype(global_proxy, factory->null_value());
-  global_proxy->map()->SetConstructor(*factory->null_value());
+  global_proxy->set_native_context(heap->null_value());
+  JSObject::ForceSetPrototype(global_proxy, isolate->factory()->null_value());
+  global_proxy->map()->SetConstructor(heap->null_value());
   if (FLAG_track_detached_contexts) {
     env->GetIsolate()->AddDetachedContext(env);
   }
@@ -354,7 +367,6 @@
   if (target->IsJSGlobalObject()) {
     function->shared()->set_instance_class_name(*function_name);
   }
-  function->shared()->set_native(true);
 }
 
 void InstallFunction(Handle<JSObject> target, Handle<JSFunction> function,
@@ -372,11 +384,14 @@
   Factory* factory = isolate->factory();
   Handle<Code> call_code(isolate->builtins()->builtin(call));
   Handle<JSObject> prototype;
-  return maybe_prototype.ToHandle(&prototype)
-             ? factory->NewFunction(name, call_code, prototype, type,
-                                    instance_size, strict_function_map)
-             : factory->NewFunctionWithoutPrototype(name, call_code,
-                                                    strict_function_map);
+  Handle<JSFunction> result =
+      maybe_prototype.ToHandle(&prototype)
+          ? factory->NewFunction(name, call_code, prototype, type,
+                                 instance_size, strict_function_map)
+          : factory->NewFunctionWithoutPrototype(name, call_code,
+                                                 strict_function_map);
+  result->shared()->set_native(true);
+  return result;
 }
 
 Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
@@ -460,14 +475,12 @@
           .ToHandleChecked();
   Handle<JSFunction> getter =
       SimpleCreateFunction(isolate, getter_name, call_getter, 0, true);
-  getter->shared()->set_native(true);
 
   Handle<String> setter_name =
       Name::ToFunctionName(name, isolate->factory()->set_string())
           .ToHandleChecked();
   Handle<JSFunction> setter =
       SimpleCreateFunction(isolate, setter_name, call_setter, 1, true);
-  setter->shared()->set_native(true);
 
   JSObject::DefineAccessor(base, name, getter, setter, attribs).Check();
 }
@@ -483,7 +496,6 @@
           .ToHandleChecked();
   Handle<JSFunction> getter =
       SimpleCreateFunction(isolate, getter_name, call, 0, adapt);
-  getter->shared()->set_native(true);
 
   Handle<Object> setter = isolate->factory()->undefined_value();
 
@@ -507,6 +519,22 @@
   return fun;
 }
 
+void InstallConstant(Isolate* isolate, Handle<JSObject> holder,
+                     const char* name, Handle<Object> value) {
+  JSObject::AddProperty(
+      holder, isolate->factory()->NewStringFromAsciiChecked(name), value,
+      static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+}
+
+void InstallSpeciesGetter(Handle<JSFunction> constructor) {
+  Factory* factory = constructor->GetIsolate()->factory();
+  // TODO(adamk): We should be able to share a SharedFunctionInfo
+  // between all these JSFunctins.
+  SimpleInstallGetter(constructor, factory->symbol_species_string(),
+                      factory->species_symbol(), Builtins::kReturnReceiver,
+                      true);
+}
+
 }  // namespace
 
 Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
@@ -589,8 +617,11 @@
   Handle<String> source = factory->NewStringFromStaticChars("() {}");
   Handle<Script> script = factory->NewScript(source);
   script->set_type(Script::TYPE_NATIVE);
+  Handle<FixedArray> infos = factory->NewFixedArray(2);
+  script->set_shared_function_infos(*infos);
   empty_function->shared()->set_start_position(0);
   empty_function->shared()->set_end_position(source->length());
+  empty_function->shared()->set_function_literal_id(1);
   empty_function->shared()->DontAdaptArguments();
   SharedFunctionInfo::SetScript(handle(empty_function->shared()), script);
 
@@ -677,6 +708,10 @@
   strict_function_map_writable_prototype_ = factory()->CreateStrictFunctionMap(
       FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
 
+  // Allocate map for classes
+  class_function_map_ = factory()->CreateClassFunctionMap(empty);
+  native_context()->set_class_function_map(*class_function_map_);
+
   // Now that the strict mode function map is available, set up the
   // restricted "arguments" and "caller" getters.
   AddRestrictedFunctionProperties(empty);
@@ -689,8 +724,7 @@
 
   Handle<JSFunction> iterator_prototype_iterator = SimpleCreateFunction(
       isolate(), factory()->NewStringFromAsciiChecked("[Symbol.iterator]"),
-      Builtins::kIteratorPrototypeIterator, 0, true);
-  iterator_prototype_iterator->shared()->set_native(true);
+      Builtins::kReturnReceiver, 0, true);
 
   JSObject::AddProperty(iterator_prototype, factory()->iterator_symbol(),
                         iterator_prototype_iterator, DONT_ENUM);
@@ -729,10 +763,12 @@
   SimpleInstallFunction(generator_object_prototype, "throw",
                         Builtins::kGeneratorPrototypeThrow, 1, true);
 
-  // Internal version of generator_prototype_next, flagged as non-native.
+  // Internal version of generator_prototype_next, flagged as non-native such
+  // that it doesn't show up in Error traces.
   Handle<JSFunction> generator_next_internal =
       SimpleCreateFunction(isolate(), factory()->next_string(),
                            Builtins::kGeneratorPrototypeNext, 1, true);
+  generator_next_internal->shared()->set_native(false);
   native_context()->set_generator_next_internal(*generator_next_internal);
 
   // Create maps for generator functions and their prototypes.  Store those
@@ -741,21 +777,11 @@
   // 04-14-15, section 25.2.4.3).
   Handle<Map> strict_function_map(strict_function_map_writable_prototype_);
   // Generator functions do not have "caller" or "arguments" accessors.
-  Handle<Map> sloppy_generator_function_map =
-      Map::Copy(strict_function_map, "SloppyGeneratorFunction");
-  sloppy_generator_function_map->set_is_constructor(false);
-  Map::SetPrototype(sloppy_generator_function_map,
-                    generator_function_prototype);
-  native_context()->set_sloppy_generator_function_map(
-      *sloppy_generator_function_map);
-
-  Handle<Map> strict_generator_function_map =
-      Map::Copy(strict_function_map, "StrictGeneratorFunction");
-  strict_generator_function_map->set_is_constructor(false);
-  Map::SetPrototype(strict_generator_function_map,
-                    generator_function_prototype);
-  native_context()->set_strict_generator_function_map(
-      *strict_generator_function_map);
+  Handle<Map> generator_function_map =
+      Map::Copy(strict_function_map, "GeneratorFunction");
+  generator_function_map->set_is_constructor(false);
+  Map::SetPrototype(generator_function_map, generator_function_prototype);
+  native_context()->set_generator_function_map(*generator_function_map);
 
   Handle<JSFunction> object_function(native_context()->object_function());
   Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
@@ -764,6 +790,50 @@
       *generator_object_prototype_map);
 }
 
+void Genesis::CreateAsyncIteratorMaps() {
+  // %AsyncIteratorPrototype%
+  // proposal-async-iteration/#sec-asynciteratorprototype
+  Handle<JSObject> async_iterator_prototype =
+      factory()->NewJSObject(isolate()->object_function(), TENURED);
+
+  Handle<JSFunction> async_iterator_prototype_iterator = SimpleCreateFunction(
+      isolate(), factory()->NewStringFromAsciiChecked("[Symbol.asyncIterator]"),
+      Builtins::kReturnReceiver, 0, true);
+
+  JSObject::AddProperty(async_iterator_prototype,
+                        factory()->async_iterator_symbol(),
+                        async_iterator_prototype_iterator, DONT_ENUM);
+
+  // %AsyncFromSyncIteratorPrototype%
+  // proposal-async-iteration/#sec-%asyncfromsynciteratorprototype%-object
+  Handle<JSObject> async_from_sync_iterator_prototype =
+      factory()->NewJSObject(isolate()->object_function(), TENURED);
+  SimpleInstallFunction(async_from_sync_iterator_prototype,
+                        factory()->next_string(),
+                        Builtins::kAsyncFromSyncIteratorPrototypeNext, 1, true);
+  SimpleInstallFunction(
+      async_from_sync_iterator_prototype, factory()->return_string(),
+      Builtins::kAsyncFromSyncIteratorPrototypeReturn, 1, true);
+  SimpleInstallFunction(
+      async_from_sync_iterator_prototype, factory()->throw_string(),
+      Builtins::kAsyncFromSyncIteratorPrototypeThrow, 1, true);
+
+  JSObject::AddProperty(
+      async_from_sync_iterator_prototype, factory()->to_string_tag_symbol(),
+      factory()->NewStringFromAsciiChecked("Async-from-Sync Iterator"),
+      static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+  JSObject::ForceSetPrototype(async_from_sync_iterator_prototype,
+                              async_iterator_prototype);
+
+  Handle<Map> async_from_sync_iterator_map = factory()->NewMap(
+      JS_ASYNC_FROM_SYNC_ITERATOR_TYPE, JSAsyncFromSyncIterator::kSize);
+  Map::SetPrototype(async_from_sync_iterator_map,
+                    async_from_sync_iterator_prototype);
+  native_context()->set_async_from_sync_iterator_map(
+      *async_from_sync_iterator_map);
+}
+
 void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
   // %AsyncFunctionPrototype% intrinsic
   Handle<JSObject> async_function_prototype =
@@ -777,17 +847,11 @@
 
   Handle<Map> strict_function_map(
       native_context()->strict_function_without_prototype_map());
-  Handle<Map> sloppy_async_function_map =
-      Map::Copy(strict_function_map, "SloppyAsyncFunction");
-  sloppy_async_function_map->set_is_constructor(false);
-  Map::SetPrototype(sloppy_async_function_map, async_function_prototype);
-  native_context()->set_sloppy_async_function_map(*sloppy_async_function_map);
-
-  Handle<Map> strict_async_function_map =
-      Map::Copy(strict_function_map, "StrictAsyncFunction");
-  strict_async_function_map->set_is_constructor(false);
-  Map::SetPrototype(strict_async_function_map, async_function_prototype);
-  native_context()->set_strict_async_function_map(*strict_async_function_map);
+  Handle<Map> async_function_map =
+      Map::Copy(strict_function_map, "AsyncFunction");
+  async_function_map->set_is_constructor(false);
+  Map::SetPrototype(async_function_map, async_function_prototype);
+  native_context()->set_async_function_map(*async_function_map);
 }
 
 void Genesis::CreateJSProxyMaps() {
@@ -821,8 +885,8 @@
                              Handle<AccessorPair> accessor_pair) {
   DescriptorArray* descriptors = map->instance_descriptors();
   int idx = descriptors->SearchWithCache(map->GetIsolate(), *name, *map);
-  AccessorConstantDescriptor descriptor(name, accessor_pair, attributes);
-  descriptors->Replace(idx, &descriptor);
+  Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
+  descriptors->Replace(idx, &d);
 }
 
 void Genesis::AddRestrictedFunctionProperties(Handle<JSFunction> empty) {
@@ -976,30 +1040,42 @@
   global_proxy_function->shared()->set_instance_class_name(*global_name);
   global_proxy_function->initial_map()->set_is_access_check_needed(true);
   global_proxy_function->initial_map()->set_has_hidden_prototype(true);
+  native_context()->set_global_proxy_function(*global_proxy_function);
 
   // Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
   // Return the global proxy.
 
   factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
-  return global_object;
-}
 
-
-void Genesis::HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
-                                Handle<JSGlobalProxy> global_proxy) {
   // Set the native context for the global object.
   global_object->set_native_context(*native_context());
   global_object->set_global_proxy(*global_proxy);
+  // Set the native context of the global proxy.
   global_proxy->set_native_context(*native_context());
-  // If we deserialized the context, the global proxy is already
-  // correctly set up. Otherwise it's undefined.
+  // Set the global proxy of the native context. If the native context has been
+  // deserialized, the global proxy is already correctly set up by the
+  // deserializer. Otherwise it's undefined.
   DCHECK(native_context()
              ->get(Context::GLOBAL_PROXY_INDEX)
              ->IsUndefined(isolate()) ||
          native_context()->global_proxy() == *global_proxy);
   native_context()->set_global_proxy(*global_proxy);
+
+  return global_object;
 }
 
+void Genesis::HookUpGlobalProxy(Handle<JSGlobalProxy> global_proxy) {
+  // Re-initialize the global proxy with the global proxy function from the
+  // snapshot, and then set up the link to the native context.
+  Handle<JSFunction> global_proxy_function(
+      native_context()->global_proxy_function());
+  factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
+  Handle<JSObject> global_object(
+      JSObject::cast(native_context()->global_object()));
+  JSObject::ForceSetPrototype(global_proxy, global_object);
+  global_proxy->set_native_context(*native_context());
+  DCHECK(native_context()->global_proxy() == *global_proxy);
+}
 
 void Genesis::HookUpGlobalObject(Handle<JSGlobalObject> global_object) {
   Handle<JSGlobalObject> global_object_from_snapshot(
@@ -1082,8 +1158,8 @@
   Handle<AccessorInfo> error_stack =
       Accessors::ErrorStackInfo(isolate, attribs);
   {
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(error_stack->name())),
-                                 error_stack, attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        Handle<Name>(Name::cast(error_stack->name())), error_stack, attribs);
     initial_map->AppendDescriptor(&d);
   }
 }
@@ -1116,6 +1192,8 @@
   Isolate* isolate = global_object->GetIsolate();
   Factory* factory = isolate->factory();
 
+  native_context()->set_osr_code_table(*factory->empty_fixed_array());
+
   Handle<ScriptContextTable> script_context_table =
       factory->NewScriptContextTable();
   native_context()->set_script_context_table(*script_context_table);
@@ -1129,8 +1207,6 @@
 
     SimpleInstallFunction(object_function, factory->assign_string(),
                           Builtins::kObjectAssign, 2, false);
-    SimpleInstallFunction(object_function, factory->create_string(),
-                          Builtins::kObjectCreate, 2, true);
     SimpleInstallFunction(object_function, "getOwnPropertyDescriptor",
                           Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
     SimpleInstallFunction(object_function,
@@ -1147,6 +1223,11 @@
     SimpleInstallFunction(object_function, "seal",
                           Builtins::kObjectSeal, 1, false);
 
+    Handle<JSFunction> object_create =
+        SimpleInstallFunction(object_function, factory->create_string(),
+                              Builtins::kObjectCreate, 2, true);
+    native_context()->set_object_create(*object_create);
+
     Handle<JSFunction> object_define_properties = SimpleInstallFunction(
         object_function, "defineProperties",
         Builtins::kObjectDefineProperties, 2, true);
@@ -1232,20 +1313,8 @@
     // Setup the methods on the %FunctionPrototype%.
     SimpleInstallFunction(prototype, factory->apply_string(),
                           Builtins::kFunctionPrototypeApply, 2, false);
-
-    if (FLAG_minimal) {
-      SimpleInstallFunction(prototype, factory->bind_string(),
-                            Builtins::kFunctionPrototypeBind, 1, false);
-    } else {
-      FastFunctionBindStub bind_stub(isolate);
-      Handle<JSFunction> bind_function = factory->NewFunctionWithoutPrototype(
-          factory->bind_string(), bind_stub.GetCode(), false);
-      bind_function->shared()->DontAdaptArguments();
-      bind_function->shared()->set_length(1);
-      InstallFunction(prototype, bind_function, factory->bind_string(),
-                      DONT_ENUM);
-    }
-
+    SimpleInstallFunction(prototype, factory->bind_string(),
+                          Builtins::kFastFunctionPrototypeBind, 1, false);
     SimpleInstallFunction(prototype, factory->call_string(),
                           Builtins::kFunctionPrototypeCall, 1, false);
     SimpleInstallFunction(prototype, factory->toString_string(),
@@ -1272,6 +1341,17 @@
 
     sloppy_function_map_writable_prototype_->SetConstructor(*function_fun);
     strict_function_map_writable_prototype_->SetConstructor(*function_fun);
+    class_function_map_->SetConstructor(*function_fun);
+  }
+
+  {
+    // --- A s y n c F r o m S y n c I t e r a t o r
+    Handle<Code> code = isolate->builtins()->AsyncIteratorValueUnwrap();
+    Handle<SharedFunctionInfo> info =
+        factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+    info->set_internal_formal_parameter_count(1);
+    info->set_length(1);
+    native_context()->set_async_iterator_value_unwrap_shared_fun(*info);
   }
 
   {  // --- A r r a y ---
@@ -1299,7 +1379,7 @@
     Handle<AccessorInfo> array_length =
         Accessors::ArrayLengthInfo(isolate, attribs);
     {  // Add length.
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(array_length->name())), array_length,
           attribs);
       initial_map->AppendDescriptor(&d);
@@ -1307,6 +1387,7 @@
 
     InstallWithIntrinsicDefaultProto(isolate, array_function,
                                      Context::ARRAY_FUNCTION_INDEX);
+    InstallSpeciesGetter(array_function);
 
     // Cache the array maps, needed by ArrayConstructorStub
     CacheInitialJSArrayMaps(native_context(), initial_map);
@@ -1349,6 +1430,7 @@
         isolate, factory->ArrayIterator_string(),
         JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize,
         array_iterator_prototype, Builtins::kIllegal);
+    array_iterator_function->shared()->set_native(false);
     array_iterator_function->shared()->set_instance_class_name(
         isolate->heap()->ArrayIterator_string());
 
@@ -1523,8 +1605,8 @@
         Accessors::StringLengthInfo(isolate, attribs));
 
     {  // Add length.
-      AccessorConstantDescriptor d(factory->length_string(), string_length,
-                                   attribs);
+      Descriptor d = Descriptor::AccessorConstant(factory->length_string(),
+                                                  string_length, attribs);
       string_map->AppendDescriptor(&d);
     }
 
@@ -1563,6 +1645,10 @@
                           Builtins::kStringPrototypeLocaleCompare, 1, true);
     SimpleInstallFunction(prototype, "normalize",
                           Builtins::kStringPrototypeNormalize, 0, false);
+    SimpleInstallFunction(prototype, "replace",
+                          Builtins::kStringPrototypeReplace, 2, true);
+    SimpleInstallFunction(prototype, "split", Builtins::kStringPrototypeSplit,
+                          2, true);
     SimpleInstallFunction(prototype, "substr", Builtins::kStringPrototypeSubstr,
                           2, true);
     SimpleInstallFunction(prototype, "substring",
@@ -1577,13 +1663,22 @@
                           Builtins::kStringPrototypeTrimLeft, 0, false);
     SimpleInstallFunction(prototype, "trimRight",
                           Builtins::kStringPrototypeTrimRight, 0, false);
+    SimpleInstallFunction(prototype, "toLocaleLowerCase",
+                          Builtins::kStringPrototypeToLocaleLowerCase, 0,
+                          false);
+    SimpleInstallFunction(prototype, "toLocaleUpperCase",
+                          Builtins::kStringPrototypeToLocaleUpperCase, 0,
+                          false);
+    SimpleInstallFunction(prototype, "toLowerCase",
+                          Builtins::kStringPrototypeToLowerCase, 0, false);
+    SimpleInstallFunction(prototype, "toUpperCase",
+                          Builtins::kStringPrototypeToUpperCase, 0, false);
     SimpleInstallFunction(prototype, "valueOf",
                           Builtins::kStringPrototypeValueOf, 0, true);
 
     Handle<JSFunction> iterator = SimpleCreateFunction(
         isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
         Builtins::kStringPrototypeIterator, 0, true);
-    iterator->shared()->set_native(true);
     iterator->shared()->set_builtin_function_id(kStringIterator);
     JSObject::AddProperty(prototype, factory->iterator_symbol(), iterator,
                           static_cast<PropertyAttributes>(DONT_ENUM));
@@ -1619,6 +1714,7 @@
         isolate, factory->NewStringFromAsciiChecked("StringIterator"),
         JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize,
         string_iterator_prototype, Builtins::kIllegal);
+    string_iterator_function->shared()->set_native(false);
     native_context()->set_string_iterator_map(
         string_iterator_function->initial_map());
   }
@@ -1636,6 +1732,30 @@
     symbol_fun->shared()->DontAdaptArguments();
     native_context()->set_symbol_function(*symbol_fun);
 
+    // Install the Symbol.for and Symbol.keyFor functions.
+    SimpleInstallFunction(symbol_fun, "for", Builtins::kSymbolFor, 1, false);
+    SimpleInstallFunction(symbol_fun, "keyFor", Builtins::kSymbolKeyFor, 1,
+                          false);
+
+    // Install well-known symbols.
+    InstallConstant(isolate, symbol_fun, "hasInstance",
+                    factory->has_instance_symbol());
+    InstallConstant(isolate, symbol_fun, "isConcatSpreadable",
+                    factory->is_concat_spreadable_symbol());
+    InstallConstant(isolate, symbol_fun, "iterator",
+                    factory->iterator_symbol());
+    InstallConstant(isolate, symbol_fun, "match", factory->match_symbol());
+    InstallConstant(isolate, symbol_fun, "replace", factory->replace_symbol());
+    InstallConstant(isolate, symbol_fun, "search", factory->search_symbol());
+    InstallConstant(isolate, symbol_fun, "species", factory->species_symbol());
+    InstallConstant(isolate, symbol_fun, "split", factory->split_symbol());
+    InstallConstant(isolate, symbol_fun, "toPrimitive",
+                    factory->to_primitive_symbol());
+    InstallConstant(isolate, symbol_fun, "toStringTag",
+                    factory->to_string_tag_symbol());
+    InstallConstant(isolate, symbol_fun, "unscopables",
+                    factory->unscopables_symbol());
+
     // Install the @@toStringTag property on the {prototype}.
     JSObject::AddProperty(
         prototype, factory->to_string_tag_symbol(),
@@ -1770,7 +1890,7 @@
     SimpleInstallFunction(prototype, "setUTCSeconds",
                           Builtins::kDatePrototypeSetUTCSeconds, 2, false);
     SimpleInstallFunction(prototype, "valueOf", Builtins::kDatePrototypeValueOf,
-                          0, false);
+                          0, true);
     SimpleInstallFunction(prototype, "getYear", Builtins::kDatePrototypeGetYear,
                           0, true);
     SimpleInstallFunction(prototype, "setYear", Builtins::kDatePrototypeSetYear,
@@ -1800,6 +1920,155 @@
     to_primitive->shared()->set_length(1);
   }
 
+  {
+    Handle<Code> code = isolate->builtins()->PromiseGetCapabilitiesExecutor();
+    Handle<SharedFunctionInfo> info =
+        factory->NewSharedFunctionInfo(factory->empty_string(), code, true);
+    info->SetConstructStub(*isolate->builtins()->JSBuiltinsConstructStub());
+    info->set_instance_class_name(isolate->heap()->Object_string());
+    info->set_internal_formal_parameter_count(2);
+    info->set_length(2);
+    native_context()->set_promise_get_capabilities_executor_shared_fun(*info);
+
+    // %new_promise_capability(C, debugEvent)
+    Handle<JSFunction> new_promise_capability =
+        SimpleCreateFunction(isolate, factory->empty_string(),
+                             Builtins::kNewPromiseCapability, 2, false);
+    InstallWithIntrinsicDefaultProto(isolate, new_promise_capability,
+                                     Context::NEW_PROMISE_CAPABILITY_INDEX);
+  }
+
+  {  // -- P r o m i s e
+    // Set catch prediction
+    Handle<Code> promise_code = isolate->builtins()->PromiseConstructor();
+    promise_code->set_is_promise_rejection(true);
+
+    Handle<JSObject> prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    Handle<JSFunction> promise_fun =
+        InstallFunction(global, "Promise", JS_PROMISE_TYPE, JSPromise::kSize,
+                        prototype, Builtins::kPromiseConstructor);
+    InstallWithIntrinsicDefaultProto(isolate, promise_fun,
+                                     Context::PROMISE_FUNCTION_INDEX);
+
+    Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate);
+    shared->SetConstructStub(*isolate->builtins()->JSBuiltinsConstructStub());
+    shared->set_instance_class_name(isolate->heap()->Object_string());
+    shared->set_internal_formal_parameter_count(1);
+    shared->set_length(1);
+
+    // Install the "constructor" property on the {prototype}.
+    JSObject::AddProperty(prototype, factory->constructor_string(), promise_fun,
+                          DONT_ENUM);
+
+    // Install the @@toStringTag property on the {prototype}.
+    JSObject::AddProperty(
+        prototype, factory->to_string_tag_symbol(), factory->Promise_string(),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+    Handle<JSFunction> promise_then =
+        SimpleInstallFunction(prototype, isolate->factory()->then_string(),
+                              Builtins::kPromiseThen, 2, true);
+    InstallWithIntrinsicDefaultProto(isolate, promise_then,
+                                     Context::PROMISE_THEN_INDEX);
+
+    Handle<JSFunction> promise_catch = SimpleInstallFunction(
+        prototype, "catch", Builtins::kPromiseCatch, 1, true, DONT_ENUM);
+    InstallWithIntrinsicDefaultProto(isolate, promise_catch,
+                                     Context::PROMISE_CATCH_INDEX);
+
+    InstallSpeciesGetter(promise_fun);
+
+    SimpleInstallFunction(promise_fun, "resolve", Builtins::kPromiseResolve, 1,
+                          true, DONT_ENUM);
+
+    SimpleInstallFunction(promise_fun, "reject", Builtins::kPromiseReject, 1,
+                          true, DONT_ENUM);
+
+    Handle<Map> prototype_map(prototype->map());
+    Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
+
+    // Store the initial Promise.prototype map. This is used in fast-path
+    // checks. Do not alter the prototype after this point.
+    native_context()->set_promise_prototype_map(*prototype_map);
+
+    {  // Internal: PromiseInternalConstructor
+       // Also exposed as extrasUtils.createPromise.
+      Handle<JSFunction> function =
+          SimpleCreateFunction(isolate, factory->empty_string(),
+                               Builtins::kPromiseInternalConstructor, 1, true);
+      function->shared()->set_native(false);
+      InstallWithIntrinsicDefaultProto(
+          isolate, function, Context::PROMISE_INTERNAL_CONSTRUCTOR_INDEX);
+    }
+
+    {  // Internal: IsPromise
+      Handle<JSFunction> function = SimpleCreateFunction(
+          isolate, factory->empty_string(), Builtins::kIsPromise, 1, false);
+      InstallWithIntrinsicDefaultProto(isolate, function,
+                                       Context::IS_PROMISE_INDEX);
+    }
+
+    {  // Internal: ResolvePromise
+       // Also exposed as extrasUtils.resolvePromise.
+      Handle<JSFunction> function = SimpleCreateFunction(
+          isolate, factory->empty_string(), Builtins::kResolvePromise, 2, true);
+      function->shared()->set_native(false);
+      InstallWithIntrinsicDefaultProto(isolate, function,
+                                       Context::PROMISE_RESOLVE_INDEX);
+    }
+
+    {  // Internal: PromiseHandle
+      Handle<JSFunction> function = SimpleCreateFunction(
+          isolate, factory->empty_string(), Builtins::kPromiseHandle, 5, false);
+      InstallWithIntrinsicDefaultProto(isolate, function,
+                                       Context::PROMISE_HANDLE_INDEX);
+      // Set up catch prediction
+      Handle<Code> promise_handle = isolate->builtins()->PromiseHandle();
+      promise_handle->set_is_promise_rejection(true);
+    }
+
+    {  // Internal: PromiseHandleReject
+      Handle<JSFunction> function =
+          SimpleCreateFunction(isolate, factory->empty_string(),
+                               Builtins::kPromiseHandleReject, 3, false);
+      InstallWithIntrinsicDefaultProto(isolate, function,
+                                       Context::PROMISE_HANDLE_REJECT_INDEX);
+      // Set up catch prediction
+      Handle<Code> promise_handle = isolate->builtins()->PromiseHandleReject();
+      promise_handle->set_is_exception_caught(true);
+    }
+
+    {  // Internal: InternalPromiseReject
+      Handle<JSFunction> function =
+          SimpleCreateFunction(isolate, factory->empty_string(),
+                               Builtins::kInternalPromiseReject, 3, true);
+      function->shared()->set_native(false);
+      InstallWithIntrinsicDefaultProto(isolate, function,
+                                       Context::PROMISE_INTERNAL_REJECT_INDEX);
+    }
+
+    {
+      Handle<Code> code =
+          handle(isolate->builtins()->builtin(Builtins::kPromiseResolveClosure),
+                 isolate);
+      Handle<SharedFunctionInfo> info =
+          factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+      info->set_internal_formal_parameter_count(1);
+      info->set_length(1);
+      native_context()->set_promise_resolve_shared_fun(*info);
+
+      code =
+          handle(isolate->builtins()->builtin(Builtins::kPromiseRejectClosure),
+                 isolate);
+      info =
+          factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+      info->set_internal_formal_parameter_count(1);
+      info->set_length(1);
+      native_context()->set_promise_reject_shared_fun(*info);
+    }
+  }
+
   {  // -- R e g E x p
     // Builtin functions for RegExp.prototype.
     Handle<JSObject> prototype =
@@ -1811,9 +2080,9 @@
                                      Context::REGEXP_FUNCTION_INDEX);
 
     Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate);
-    shared->SetConstructStub(*isolate->builtins()->RegExpConstructor());
+    shared->SetConstructStub(*isolate->builtins()->JSBuiltinsConstructStub());
     shared->set_instance_class_name(isolate->heap()->RegExp_string());
-    shared->DontAdaptArguments();
+    shared->set_internal_formal_parameter_count(2);
     shared->set_length(2);
 
     {
@@ -1839,14 +2108,14 @@
       SimpleInstallGetter(prototype, factory->multiline_string(),
                           Builtins::kRegExpPrototypeMultilineGetter, true);
       SimpleInstallGetter(prototype, factory->source_string(),
-                          Builtins::kRegExpPrototypeSourceGetter, false);
+                          Builtins::kRegExpPrototypeSourceGetter, true);
       SimpleInstallGetter(prototype, factory->sticky_string(),
                           Builtins::kRegExpPrototypeStickyGetter, true);
       SimpleInstallGetter(prototype, factory->unicode_string(),
                           Builtins::kRegExpPrototypeUnicodeGetter, true);
 
       SimpleInstallFunction(prototype, "compile",
-                            Builtins::kRegExpPrototypeCompile, 2, false,
+                            Builtins::kRegExpPrototypeCompile, 2, true,
                             DONT_ENUM);
       SimpleInstallFunction(prototype, factory->toString_string(),
                             Builtins::kRegExpPrototypeToString, 0, false,
@@ -1857,7 +2126,7 @@
       {
         Handle<JSFunction> fun = SimpleCreateFunction(
             isolate, factory->InternalizeUtf8String("[Symbol.match]"),
-            Builtins::kRegExpPrototypeMatch, 1, false);
+            Builtins::kRegExpPrototypeMatch, 1, true);
         InstallFunction(prototype, fun, factory->match_symbol(), DONT_ENUM);
       }
 
@@ -1878,22 +2147,22 @@
       {
         Handle<JSFunction> fun = SimpleCreateFunction(
             isolate, factory->InternalizeUtf8String("[Symbol.split]"),
-            Builtins::kRegExpPrototypeSplit, 2, false);
+            Builtins::kRegExpPrototypeSplit, 2, true);
         InstallFunction(prototype, fun, factory->split_symbol(), DONT_ENUM);
       }
 
+      Handle<Map> prototype_map(prototype->map());
+      Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
+
       // Store the initial RegExp.prototype map. This is used in fast-path
       // checks. Do not alter the prototype after this point.
-      native_context()->set_regexp_prototype_map(prototype->map());
+      native_context()->set_regexp_prototype_map(*prototype_map);
     }
 
     {
       // RegExp getters and setters.
 
-      SimpleInstallGetter(regexp_fun,
-                          factory->InternalizeUtf8String("[Symbol.species]"),
-                          factory->species_symbol(),
-                          Builtins::kRegExpPrototypeSpeciesGetter, false);
+      InstallSpeciesGetter(regexp_fun);
 
       // Static properties set by a successful match.
 
@@ -1963,10 +2232,10 @@
     // ECMA-262, section 15.10.7.5.
     PropertyAttributes writable =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
-    DataDescriptor field(factory->lastIndex_string(),
-                         JSRegExp::kLastIndexFieldIndex, writable,
-                         Representation::Tagged());
-    initial_map->AppendDescriptor(&field);
+    Descriptor d = Descriptor::DataField(factory->lastIndex_string(),
+                                         JSRegExp::kLastIndexFieldIndex,
+                                         writable, Representation::Tagged());
+    initial_map->AppendDescriptor(&d);
 
     static const int num_fields = JSRegExp::kInObjectFieldCount;
     initial_map->SetInObjectProperties(num_fields);
@@ -2043,6 +2312,10 @@
     InstallError(isolate, dummy, factory->CompileError_string(),
                  Context::WASM_COMPILE_ERROR_FUNCTION_INDEX);
 
+    // -- L i n k E r r o r
+    InstallError(isolate, dummy, factory->LinkError_string(),
+                 Context::WASM_LINK_ERROR_FUNCTION_INDEX);
+
     // -- R u n t i m e E r r o r
     InstallError(isolate, dummy, factory->RuntimeError_string(),
                  Context::WASM_RUNTIME_ERROR_FUNCTION_INDEX);
@@ -2121,48 +2394,111 @@
     // Install math constants.
     double const kE = base::ieee754::exp(1.0);
     double const kPI = 3.1415926535897932;
-    JSObject::AddProperty(
-        math, factory->NewStringFromAsciiChecked("E"), factory->NewNumber(kE),
-        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
-    JSObject::AddProperty(
-        math, factory->NewStringFromAsciiChecked("LN10"),
-        factory->NewNumber(base::ieee754::log(10.0)),
-        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
-    JSObject::AddProperty(
-        math, factory->NewStringFromAsciiChecked("LN2"),
-        factory->NewNumber(base::ieee754::log(2.0)),
-        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
-    JSObject::AddProperty(
-        math, factory->NewStringFromAsciiChecked("LOG10E"),
-        factory->NewNumber(base::ieee754::log10(kE)),
-        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
-    JSObject::AddProperty(
-        math, factory->NewStringFromAsciiChecked("LOG2E"),
-        factory->NewNumber(base::ieee754::log2(kE)),
-        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
-    JSObject::AddProperty(
-        math, factory->NewStringFromAsciiChecked("PI"), factory->NewNumber(kPI),
-        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
-    JSObject::AddProperty(
-        math, factory->NewStringFromAsciiChecked("SQRT1_2"),
-        factory->NewNumber(std::sqrt(0.5)),
-        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
-    JSObject::AddProperty(
-        math, factory->NewStringFromAsciiChecked("SQRT2"),
-        factory->NewNumber(std::sqrt(2.0)),
-        static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+    InstallConstant(isolate, math, "E", factory->NewNumber(kE));
+    InstallConstant(isolate, math, "LN10",
+                    factory->NewNumber(base::ieee754::log(10.0)));
+    InstallConstant(isolate, math, "LN2",
+                    factory->NewNumber(base::ieee754::log(2.0)));
+    InstallConstant(isolate, math, "LOG10E",
+                    factory->NewNumber(base::ieee754::log10(kE)));
+    InstallConstant(isolate, math, "LOG2E",
+                    factory->NewNumber(base::ieee754::log2(kE)));
+    InstallConstant(isolate, math, "PI", factory->NewNumber(kPI));
+    InstallConstant(isolate, math, "SQRT1_2",
+                    factory->NewNumber(std::sqrt(0.5)));
+    InstallConstant(isolate, math, "SQRT2", factory->NewNumber(std::sqrt(2.0)));
     JSObject::AddProperty(
         math, factory->to_string_tag_symbol(),
         factory->NewStringFromAsciiChecked("Math"),
         static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
   }
 
+#ifdef V8_I18N_SUPPORT
+  {  // -- I n t l
+    Handle<String> name = factory->InternalizeUtf8String("Intl");
+    Handle<JSFunction> cons = factory->NewFunction(name);
+    JSFunction::SetInstancePrototype(
+        cons,
+        Handle<Object>(native_context()->initial_object_prototype(), isolate));
+    Handle<JSObject> intl = factory->NewJSObject(cons, TENURED);
+    DCHECK(intl->IsJSObject());
+    JSObject::AddProperty(global, name, intl, DONT_ENUM);
+
+    Handle<JSObject> date_time_format_prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    // Install the @@toStringTag property on the {prototype}.
+    JSObject::AddProperty(
+        date_time_format_prototype, factory->to_string_tag_symbol(),
+        factory->Object_string(),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+    Handle<JSFunction> date_time_format_constructor = InstallFunction(
+        intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize,
+        date_time_format_prototype, Builtins::kIllegal);
+    JSObject::AddProperty(date_time_format_prototype,
+                          factory->constructor_string(),
+                          date_time_format_constructor, DONT_ENUM);
+    InstallWithIntrinsicDefaultProto(
+        isolate, date_time_format_constructor,
+        Context::INTL_DATE_TIME_FORMAT_FUNCTION_INDEX);
+
+    Handle<JSObject> number_format_prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    // Install the @@toStringTag property on the {prototype}.
+    JSObject::AddProperty(
+        number_format_prototype, factory->to_string_tag_symbol(),
+        factory->Object_string(),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+    Handle<JSFunction> number_format_constructor = InstallFunction(
+        intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize,
+        number_format_prototype, Builtins::kIllegal);
+    JSObject::AddProperty(number_format_prototype,
+                          factory->constructor_string(),
+                          number_format_constructor, DONT_ENUM);
+    InstallWithIntrinsicDefaultProto(
+        isolate, number_format_constructor,
+        Context::INTL_NUMBER_FORMAT_FUNCTION_INDEX);
+
+    Handle<JSObject> collator_prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    // Install the @@toStringTag property on the {prototype}.
+    JSObject::AddProperty(
+        collator_prototype, factory->to_string_tag_symbol(),
+        factory->Object_string(),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+    Handle<JSFunction> collator_constructor =
+        InstallFunction(intl, "Collator", JS_OBJECT_TYPE, Collator::kSize,
+                        collator_prototype, Builtins::kIllegal);
+    JSObject::AddProperty(collator_prototype, factory->constructor_string(),
+                          collator_constructor, DONT_ENUM);
+    InstallWithIntrinsicDefaultProto(isolate, collator_constructor,
+                                     Context::INTL_COLLATOR_FUNCTION_INDEX);
+
+    Handle<JSObject> v8_break_iterator_prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    // Install the @@toStringTag property on the {prototype}.
+    JSObject::AddProperty(
+        v8_break_iterator_prototype, factory->to_string_tag_symbol(),
+        factory->Object_string(),
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+    Handle<JSFunction> v8_break_iterator_constructor = InstallFunction(
+        intl, "v8BreakIterator", JS_OBJECT_TYPE, V8BreakIterator::kSize,
+        v8_break_iterator_prototype, Builtins::kIllegal);
+    JSObject::AddProperty(v8_break_iterator_prototype,
+                          factory->constructor_string(),
+                          v8_break_iterator_constructor, DONT_ENUM);
+    InstallWithIntrinsicDefaultProto(
+        isolate, v8_break_iterator_constructor,
+        Context::INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX);
+  }
+#endif  // V8_I18N_SUPPORT
+
   {  // -- A r r a y B u f f e r
     Handle<JSFunction> array_buffer_fun = InstallArrayBuffer(
         global, "ArrayBuffer", Builtins::kArrayBufferPrototypeGetByteLength,
         BuiltinFunctionId::kArrayBufferByteLength);
     InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
                                      Context::ARRAY_BUFFER_FUN_INDEX);
+    InstallSpeciesGetter(array_buffer_fun);
   }
 
   {  // -- T y p e d A r r a y
@@ -2174,6 +2510,8 @@
         CreateFunction(isolate, factory->InternalizeUtf8String("TypedArray"),
                        JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, prototype,
                        Builtins::kIllegal);
+    typed_array_fun->shared()->set_native(false);
+    InstallSpeciesGetter(typed_array_fun);
 
     // Install the "constructor" property on the {prototype}.
     JSObject::AddProperty(prototype, factory->constructor_string(),
@@ -2211,6 +2549,10 @@
     values->shared()->set_builtin_function_id(kTypedArrayValues);
     JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
                           DONT_ENUM);
+
+    // TODO(caitp): alphasort accessors/methods
+    SimpleInstallFunction(prototype, "copyWithin",
+                          Builtins::kTypedArrayPrototypeCopyWithin, 2, false);
   }
 
   {  // -- T y p e d A r r a y s
@@ -2301,6 +2643,7 @@
         isolate->initial_object_prototype(), Builtins::kIllegal);
     InstallWithIntrinsicDefaultProto(isolate, js_map_fun,
                                      Context::JS_MAP_FUN_INDEX);
+    InstallSpeciesGetter(js_map_fun);
   }
 
   {  // -- S e t
@@ -2309,33 +2652,27 @@
         isolate->initial_object_prototype(), Builtins::kIllegal);
     InstallWithIntrinsicDefaultProto(isolate, js_set_fun,
                                      Context::JS_SET_FUN_INDEX);
+    InstallSpeciesGetter(js_set_fun);
   }
 
   {  // -- J S M o d u l e N a m e s p a c e
     Handle<Map> map =
         factory->NewMap(JS_MODULE_NAMESPACE_TYPE, JSModuleNamespace::kSize);
     Map::SetPrototype(map, isolate->factory()->null_value());
-    Map::EnsureDescriptorSlack(map, 2);
+    Map::EnsureDescriptorSlack(map, 1);
     native_context()->set_js_module_namespace_map(*map);
 
     {  // Install @@toStringTag.
       PropertyAttributes attribs =
-          static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
-      DataConstantDescriptor d(factory->to_string_tag_symbol(),
-                               factory->NewStringFromAsciiChecked("Module"),
-                               attribs);
+          static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
+      Descriptor d =
+          Descriptor::DataField(factory->to_string_tag_symbol(),
+                                JSModuleNamespace::kToStringTagFieldIndex,
+                                attribs, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
 
-    {  // Install @@iterator.
-      Handle<JSFunction> iterator = SimpleCreateFunction(
-          isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
-          Builtins::kModuleNamespaceIterator, 0, true);
-      iterator->shared()->set_native(true);
-      // TODO(neis): Is this really supposed to be writable?
-      DataConstantDescriptor d(factory->iterator_symbol(), iterator, DONT_ENUM);
-      map->AppendDescriptor(&d);
-    }
+    map->SetInObjectProperties(JSModuleNamespace::kInObjectFieldCount);
   }
 
   {  // -- I t e r a t o r R e s u l t
@@ -2345,14 +2682,16 @@
     Map::EnsureDescriptorSlack(map, 2);
 
     {  // value
-      DataDescriptor d(factory->value_string(), JSIteratorResult::kValueIndex,
-                       NONE, Representation::Tagged());
+      Descriptor d = Descriptor::DataField(factory->value_string(),
+                                           JSIteratorResult::kValueIndex, NONE,
+                                           Representation::Tagged());
       map->AppendDescriptor(&d);
     }
 
     {  // done
-      DataDescriptor d(factory->done_string(), JSIteratorResult::kDoneIndex,
-                       NONE, Representation::Tagged());
+      Descriptor d = Descriptor::DataField(factory->done_string(),
+                                           JSIteratorResult::kDoneIndex, NONE,
+                                           Representation::Tagged());
       map->AppendDescriptor(&d);
     }
 
@@ -2458,15 +2797,15 @@
     Handle<AccessorInfo> bound_length =
         Accessors::BoundFunctionLengthInfo(isolate, roc_attribs);
     {  // length
-      AccessorConstantDescriptor d(factory->length_string(), bound_length,
-                                   roc_attribs);
+      Descriptor d = Descriptor::AccessorConstant(factory->length_string(),
+                                                  bound_length, roc_attribs);
       map->AppendDescriptor(&d);
     }
     Handle<AccessorInfo> bound_name =
         Accessors::BoundFunctionNameInfo(isolate, roc_attribs);
-    {  // length
-      AccessorConstantDescriptor d(factory->name_string(), bound_name,
-                                   roc_attribs);
+    {  // name
+      Descriptor d = Descriptor::AccessorConstant(factory->name_string(),
+                                                  bound_name, roc_attribs);
       map->AppendDescriptor(&d);
     }
     map->SetInObjectProperties(0);
@@ -2493,15 +2832,15 @@
     Map::EnsureDescriptorSlack(map, 2);
 
     {  // length
-      DataDescriptor d(factory->length_string(),
-                       JSSloppyArgumentsObject::kLengthIndex, DONT_ENUM,
-                       Representation::Tagged());
+      Descriptor d = Descriptor::DataField(
+          factory->length_string(), JSSloppyArgumentsObject::kLengthIndex,
+          DONT_ENUM, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // callee
-      DataDescriptor d(factory->callee_string(),
-                       JSSloppyArgumentsObject::kCalleeIndex, DONT_ENUM,
-                       Representation::Tagged());
+      Descriptor d = Descriptor::DataField(
+          factory->callee_string(), JSSloppyArgumentsObject::kCalleeIndex,
+          DONT_ENUM, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     // @@iterator method is added later.
@@ -2550,14 +2889,14 @@
     Map::EnsureDescriptorSlack(map, 2);
 
     {  // length
-      DataDescriptor d(factory->length_string(),
-                       JSStrictArgumentsObject::kLengthIndex, DONT_ENUM,
-                       Representation::Tagged());
+      Descriptor d = Descriptor::DataField(
+          factory->length_string(), JSStrictArgumentsObject::kLengthIndex,
+          DONT_ENUM, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // callee
-      AccessorConstantDescriptor d(factory->callee_string(), callee,
-                                   attributes);
+      Descriptor d = Descriptor::AccessorConstant(factory->callee_string(),
+                                                  callee, attributes);
       map->AppendDescriptor(&d);
     }
     // @@iterator method is added later.
@@ -2644,6 +2983,8 @@
   HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
   HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
 #undef FEATURE_INITIALIZE_GLOBAL
+
+  InitializeGlobal_enable_fast_array_builtins();
 }
 
 
@@ -2714,7 +3055,7 @@
   // environment has been at least partially initialized. Add a stack check
   // before entering JS code to catch overflow early.
   StackLimitCheck check(isolate);
-  if (check.JsHasOverflowed(1 * KB)) {
+  if (check.JsHasOverflowed(4 * KB)) {
     isolate->StackOverflow();
     return false;
   }
@@ -2726,8 +3067,7 @@
   Handle<SharedFunctionInfo> function_info =
       Compiler::GetSharedFunctionInfoForScript(
           source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
-          context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
-          false);
+          context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag);
   if (function_info.is_null()) return false;
 
   DCHECK(context->IsNativeContext());
@@ -2739,12 +3079,16 @@
 
   // For non-extension scripts, run script to get the function wrapper.
   Handle<Object> wrapper;
-  if (!Execution::Call(isolate, fun, receiver, 0, NULL).ToHandle(&wrapper)) {
+  if (!Execution::TryCall(isolate, fun, receiver, 0, nullptr,
+                          Execution::MessageHandling::kKeepPending, nullptr)
+           .ToHandle(&wrapper)) {
     return false;
   }
   // Then run the function wrapper.
-  return !Execution::Call(isolate, Handle<JSFunction>::cast(wrapper), receiver,
-                          argc, argv).is_null();
+  return !Execution::TryCall(isolate, Handle<JSFunction>::cast(wrapper),
+                             receiver, argc, argv,
+                             Execution::MessageHandling::kKeepPending, nullptr)
+              .is_null();
 }
 
 
@@ -2756,7 +3100,9 @@
   Handle<Object> fun = JSObject::GetDataProperty(utils, name_string);
   Handle<Object> receiver = isolate->factory()->undefined_value();
   Handle<Object> args[] = {utils};
-  return !Execution::Call(isolate, fun, receiver, 1, args).is_null();
+  return !Execution::TryCall(isolate, fun, receiver, 1, args,
+                             Execution::MessageHandling::kKeepPending, nullptr)
+              .is_null();
 }
 
 
@@ -2784,7 +3130,7 @@
     function_info = Compiler::GetSharedFunctionInfoForScript(
         source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
         context, extension, NULL, ScriptCompiler::kNoCompileOptions,
-        EXTENSION_CODE, false);
+        EXTENSION_CODE);
     if (function_info.is_null()) return false;
     cache->Add(name, function_info);
   }
@@ -2798,7 +3144,9 @@
   // Call function using either the runtime object or the global
   // object as the receiver. Provide no parameters.
   Handle<Object> receiver = isolate->global_object();
-  return !Execution::Call(isolate, fun, receiver, 0, NULL).is_null();
+  return !Execution::TryCall(isolate, fun, receiver, 0, nullptr,
+                             Execution::MessageHandling::kKeepPending, nullptr)
+              .is_null();
 }
 
 
@@ -2858,6 +3206,8 @@
 
   // The utils object can be removed for cases that reach this point.
   native_context()->set_natives_utils_object(heap()->undefined_value());
+  native_context()->set_extras_utils_object(heap()->undefined_value());
+  native_context()->set_exports_container(heap()->undefined_value());
 }
 
 
@@ -2896,7 +3246,7 @@
                         iterator_prototype, NONE);
 
   {
-    PrototypeIterator iter(native_context->sloppy_generator_function_map());
+    PrototypeIterator iter(native_context->generator_function_map());
     Handle<JSObject> generator_function_prototype(iter.GetCurrent<JSObject>());
 
     JSObject::AddProperty(
@@ -2909,7 +3259,7 @@
         generator_function_prototype, Builtins::kGeneratorFunctionConstructor,
         kUseStrictFunctionMap);
     generator_function_function->set_prototype_or_initial_map(
-        native_context->sloppy_generator_function_map());
+        native_context->generator_function_map());
     generator_function_function->shared()->DontAdaptArguments();
     generator_function_function->shared()->SetConstructStub(
         *isolate->builtins()->GeneratorFunctionConstructor());
@@ -2925,29 +3275,8 @@
         generator_function_function,
         static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
 
-    native_context->sloppy_generator_function_map()->SetConstructor(
+    native_context->generator_function_map()->SetConstructor(
         *generator_function_function);
-    native_context->strict_generator_function_map()->SetConstructor(
-        *generator_function_function);
-  }
-
-  {  // -- F i x e d A r r a y I t e r a t o r
-    int size = JSFixedArrayIterator::kHeaderSize +
-               JSFixedArrayIterator::kInObjectPropertyCount * kPointerSize;
-    Handle<Map> map = factory->NewMap(JS_FIXED_ARRAY_ITERATOR_TYPE, size);
-    Map::SetPrototype(map, iterator_prototype);
-    Map::EnsureDescriptorSlack(map,
-                               JSFixedArrayIterator::kInObjectPropertyCount);
-    map->SetInObjectProperties(JSFixedArrayIterator::kInObjectPropertyCount);
-    map->SetConstructor(native_context->object_function());
-
-    {  // next
-      DataDescriptor d(factory->next_string(), JSFixedArrayIterator::kNextIndex,
-                       DONT_ENUM, Representation::Tagged());
-      map->AppendDescriptor(&d);
-    }
-
-    native_context->set_fixed_array_iterator_map(*map);
   }
 
   {  // -- S e t I t e r a t o r
@@ -2989,7 +3318,7 @@
     Handle<AccessorInfo> script_column =
         Accessors::ScriptColumnOffsetInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_column->name())), script_column,
           attribs);
       script_map->AppendDescriptor(&d);
@@ -2997,8 +3326,8 @@
 
     Handle<AccessorInfo> script_id = Accessors::ScriptIdInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(Handle<Name>(Name::cast(script_id->name())),
-                                   script_id, attribs);
+      Descriptor d = Descriptor::AccessorConstant(
+          Handle<Name>(Name::cast(script_id->name())), script_id, attribs);
       script_map->AppendDescriptor(&d);
     }
 
@@ -3006,7 +3335,7 @@
     Handle<AccessorInfo> script_name =
         Accessors::ScriptNameInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_name->name())), script_name, attribs);
       script_map->AppendDescriptor(&d);
     }
@@ -3014,7 +3343,7 @@
     Handle<AccessorInfo> script_line =
         Accessors::ScriptLineOffsetInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_line->name())), script_line, attribs);
       script_map->AppendDescriptor(&d);
     }
@@ -3022,7 +3351,7 @@
     Handle<AccessorInfo> script_source =
         Accessors::ScriptSourceInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_source->name())), script_source,
           attribs);
       script_map->AppendDescriptor(&d);
@@ -3031,7 +3360,7 @@
     Handle<AccessorInfo> script_type =
         Accessors::ScriptTypeInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_type->name())), script_type, attribs);
       script_map->AppendDescriptor(&d);
     }
@@ -3039,7 +3368,7 @@
     Handle<AccessorInfo> script_compilation_type =
         Accessors::ScriptCompilationTypeInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_compilation_type->name())),
           script_compilation_type, attribs);
       script_map->AppendDescriptor(&d);
@@ -3048,7 +3377,7 @@
     Handle<AccessorInfo> script_context_data =
         Accessors::ScriptContextDataInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_context_data->name())),
           script_context_data, attribs);
       script_map->AppendDescriptor(&d);
@@ -3057,7 +3386,7 @@
     Handle<AccessorInfo> script_eval_from_script =
         Accessors::ScriptEvalFromScriptInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_eval_from_script->name())),
           script_eval_from_script, attribs);
       script_map->AppendDescriptor(&d);
@@ -3066,7 +3395,7 @@
     Handle<AccessorInfo> script_eval_from_script_position =
         Accessors::ScriptEvalFromScriptPositionInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_eval_from_script_position->name())),
           script_eval_from_script_position, attribs);
       script_map->AppendDescriptor(&d);
@@ -3075,7 +3404,7 @@
     Handle<AccessorInfo> script_eval_from_function_name =
         Accessors::ScriptEvalFromFunctionNameInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_eval_from_function_name->name())),
           script_eval_from_function_name, attribs);
       script_map->AppendDescriptor(&d);
@@ -3084,7 +3413,7 @@
     Handle<AccessorInfo> script_source_url =
         Accessors::ScriptSourceUrlInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_source_url->name())),
           script_source_url, attribs);
       script_map->AppendDescriptor(&d);
@@ -3093,55 +3422,90 @@
     Handle<AccessorInfo> script_source_mapping_url =
         Accessors::ScriptSourceMappingUrlInfo(isolate, attribs);
     {
-      AccessorConstantDescriptor d(
+      Descriptor d = Descriptor::AccessorConstant(
           Handle<Name>(Name::cast(script_source_mapping_url->name())),
           script_source_mapping_url, attribs);
       script_map->AppendDescriptor(&d);
     }
+  }
 
-    Handle<AccessorInfo> script_is_embedder_debug_script =
-        Accessors::ScriptIsEmbedderDebugScriptInfo(isolate, attribs);
+  {  // -- A s y n c F u n c t i o n
+    // Builtin functions for AsyncFunction.
+    PrototypeIterator iter(native_context->async_function_map());
+    Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
+
+    static const bool kUseStrictFunctionMap = true;
+    Handle<JSFunction> async_function_constructor = InstallFunction(
+        container, "AsyncFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
+        async_function_prototype, Builtins::kAsyncFunctionConstructor,
+        kUseStrictFunctionMap);
+    async_function_constructor->shared()->DontAdaptArguments();
+    async_function_constructor->shared()->SetConstructStub(
+        *isolate->builtins()->AsyncFunctionConstructor());
+    async_function_constructor->shared()->set_length(1);
+    InstallWithIntrinsicDefaultProto(isolate, async_function_constructor,
+                                     Context::ASYNC_FUNCTION_FUNCTION_INDEX);
+    JSObject::ForceSetPrototype(async_function_constructor,
+                                isolate->function_function());
+
+    JSObject::AddProperty(
+        async_function_prototype, factory->constructor_string(),
+        async_function_constructor,
+        static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+    JSFunction::SetPrototype(async_function_constructor,
+                             async_function_prototype);
+
     {
-      AccessorConstantDescriptor d(
-          Handle<Name>(Name::cast(script_is_embedder_debug_script->name())),
-          script_is_embedder_debug_script, attribs);
-      script_map->AppendDescriptor(&d);
+      Handle<JSFunction> function =
+          SimpleCreateFunction(isolate, factory->empty_string(),
+                               Builtins::kAsyncFunctionAwaitCaught, 3, false);
+      InstallWithIntrinsicDefaultProto(
+          isolate, function, Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX);
     }
 
     {
-      PrototypeIterator iter(native_context->sloppy_async_function_map());
-      Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
+      Handle<JSFunction> function =
+          SimpleCreateFunction(isolate, factory->empty_string(),
+                               Builtins::kAsyncFunctionAwaitUncaught, 3, false);
+      InstallWithIntrinsicDefaultProto(
+          isolate, function, Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
+    }
 
-      static const bool kUseStrictFunctionMap = true;
-      Handle<JSFunction> async_function_constructor = InstallFunction(
-          container, "AsyncFunction", JS_FUNCTION_TYPE, JSFunction::kSize,
-          async_function_prototype, Builtins::kAsyncFunctionConstructor,
-          kUseStrictFunctionMap);
-      async_function_constructor->shared()->DontAdaptArguments();
-      async_function_constructor->shared()->SetConstructStub(
-          *isolate->builtins()->AsyncFunctionConstructor());
-      async_function_constructor->shared()->set_length(1);
-      InstallWithIntrinsicDefaultProto(isolate, async_function_constructor,
-                                       Context::ASYNC_FUNCTION_FUNCTION_INDEX);
-      JSObject::ForceSetPrototype(async_function_constructor,
-                                  isolate->function_function());
+    {
+      Handle<Code> code =
+          isolate->builtins()->AsyncFunctionAwaitRejectClosure();
+      Handle<SharedFunctionInfo> info =
+          factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+      info->set_internal_formal_parameter_count(1);
+      info->set_length(1);
+      native_context->set_async_function_await_reject_shared_fun(*info);
+    }
 
-      JSObject::AddProperty(
-          async_function_prototype, factory->constructor_string(),
-          async_function_constructor,
-          static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+    {
+      Handle<Code> code =
+          isolate->builtins()->AsyncFunctionAwaitResolveClosure();
+      Handle<SharedFunctionInfo> info =
+          factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+      info->set_internal_formal_parameter_count(1);
+      info->set_length(1);
+      native_context->set_async_function_await_resolve_shared_fun(*info);
+    }
 
-      JSFunction::SetPrototype(async_function_constructor,
-                               async_function_prototype);
+    {
+      Handle<JSFunction> function =
+          SimpleCreateFunction(isolate, factory->empty_string(),
+                               Builtins::kAsyncFunctionPromiseCreate, 0, false);
+      InstallWithIntrinsicDefaultProto(
+          isolate, function, Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX);
+    }
 
-      Handle<JSFunction> async_function_next =
-          SimpleInstallFunction(container, "AsyncFunctionNext",
-                                Builtins::kGeneratorPrototypeNext, 1, true);
-      Handle<JSFunction> async_function_throw =
-          SimpleInstallFunction(container, "AsyncFunctionThrow",
-                                Builtins::kGeneratorPrototypeThrow, 1, true);
-      async_function_next->shared()->set_native(false);
-      async_function_throw->shared()->set_native(false);
+    {
+      Handle<JSFunction> function = SimpleCreateFunction(
+          isolate, factory->empty_string(),
+          Builtins::kAsyncFunctionPromiseRelease, 1, false);
+      InstallWithIntrinsicDefaultProto(
+          isolate, function, Context::ASYNC_FUNCTION_PROMISE_RELEASE_INDEX);
     }
   }
 
@@ -3199,24 +3563,7 @@
       Accessors::FunctionSetPrototype(callsite_fun, proto).Assert();
     }
   }
-}
-
-
-void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
-                                                 Handle<JSObject> container) {
-  HandleScope scope(isolate);
-
-#ifdef V8_I18N_SUPPORT
-#define INITIALIZE_FLAG(FLAG)                                         \
-  {                                                                   \
-    Handle<String> name =                                             \
-        isolate->factory()->NewStringFromAsciiChecked(#FLAG);         \
-    JSObject::AddProperty(container, name,                            \
-                          isolate->factory()->ToBoolean(FLAG), NONE); \
-  }
-
-#undef INITIALIZE_FLAG
-#endif
+  isolate->native_context()->set_exports_container(*container);
 }
 
 
@@ -3229,15 +3576,13 @@
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
-#ifdef V8_I18N_SUPPORT
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(datetime_format_to_parts)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(icu_case_mapping)
-#endif
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_async_await)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_trailing_commas)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_rest_spread)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_template_escapes)
 
 void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
                          const char* name, Handle<Symbol> value) {
@@ -3252,6 +3597,31 @@
   JSObject::AddProperty(symbol, name_string, value, attributes);
 }
 
+void Genesis::InitializeGlobal_enable_fast_array_builtins() {
+  if (!FLAG_enable_fast_array_builtins) return;
+
+  Handle<JSGlobalObject> global(native_context()->global_object());
+  Isolate* isolate = global->GetIsolate();
+  Factory* factory = isolate->factory();
+
+  LookupIterator it1(global, factory->NewStringFromAsciiChecked("Array"),
+                     LookupIterator::OWN_SKIP_INTERCEPTOR);
+  Handle<Object> array_object = Object::GetProperty(&it1).ToHandleChecked();
+  LookupIterator it2(array_object,
+                     factory->NewStringFromAsciiChecked("prototype"),
+                     LookupIterator::OWN_SKIP_INTERCEPTOR);
+  Handle<Object> array_prototype = Object::GetProperty(&it2).ToHandleChecked();
+  LookupIterator it3(array_prototype,
+                     factory->NewStringFromAsciiChecked("forEach"),
+                     LookupIterator::OWN_SKIP_INTERCEPTOR);
+  Handle<Object> for_each_function =
+      Object::GetProperty(&it3).ToHandleChecked();
+  Handle<JSFunction>::cast(for_each_function)
+      ->set_code(isolate->builtins()->builtin(Builtins::kArrayForEach));
+  Handle<JSFunction>::cast(for_each_function)
+      ->shared()
+      ->set_code(isolate->builtins()->builtin(Builtins::kArrayForEach));
+}
 
 void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
   if (!FLAG_harmony_sharedarraybuffer) return;
@@ -3281,38 +3651,6 @@
                         Builtins::kAtomicsStore, 3, true);
 }
 
-
-void Genesis::InitializeGlobal_harmony_simd() {
-  if (!FLAG_harmony_simd) return;
-
-  Handle<JSGlobalObject> global(
-      JSGlobalObject::cast(native_context()->global_object()));
-  Isolate* isolate = global->GetIsolate();
-  Factory* factory = isolate->factory();
-
-  Handle<String> name = factory->InternalizeUtf8String("SIMD");
-  Handle<JSFunction> cons = factory->NewFunction(name);
-  JSFunction::SetInstancePrototype(
-      cons,
-      Handle<Object>(native_context()->initial_object_prototype(), isolate));
-  cons->shared()->set_instance_class_name(*name);
-  Handle<JSObject> simd_object = factory->NewJSObject(cons, TENURED);
-  DCHECK(simd_object->IsJSObject());
-  JSObject::AddProperty(global, name, simd_object, DONT_ENUM);
-
-// Install SIMD type functions. Set the instance class names since
-// InstallFunction only does this when we install on the JSGlobalObject.
-#define SIMD128_INSTALL_FUNCTION(TYPE, Type, type, lane_count, lane_type) \
-  Handle<JSFunction> type##_function = InstallFunction(                   \
-      simd_object, #Type, JS_VALUE_TYPE, JSValue::kSize,                  \
-      isolate->initial_object_prototype(), Builtins::kIllegal);           \
-  native_context()->set_##type##_function(*type##_function);              \
-  type##_function->shared()->set_instance_class_name(*factory->Type##_string());
-  SIMD128_TYPES(SIMD128_INSTALL_FUNCTION)
-#undef SIMD128_INSTALL_FUNCTION
-}
-
-
 void Genesis::InitializeGlobal_harmony_array_prototype_values() {
   if (!FLAG_harmony_array_prototype_values) return;
   Handle<JSFunction> array_constructor(native_context()->array_function());
@@ -3334,6 +3672,143 @@
                         NONE);
 }
 
+void Genesis::InitializeGlobal_harmony_async_iteration() {
+  if (!FLAG_harmony_async_iteration) return;
+  Handle<JSFunction> symbol_fun(native_context()->symbol_function());
+  InstallConstant(isolate(), symbol_fun, "asyncIterator",
+                  factory()->async_iterator_symbol());
+}
+
+void Genesis::InitializeGlobal_harmony_promise_finally() {
+  if (!FLAG_harmony_promise_finally) return;
+
+  Handle<JSFunction> constructor(native_context()->promise_function());
+  Handle<JSObject> prototype(JSObject::cast(constructor->instance_prototype()));
+  SimpleInstallFunction(prototype, "finally", Builtins::kPromiseFinally, 1,
+                        true, DONT_ENUM);
+
+  // The promise prototype map has changed because we added a property
+  // to prototype, so we update the saved map.
+  Handle<Map> prototype_map(prototype->map());
+  Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate());
+  native_context()->set_promise_prototype_map(*prototype_map);
+
+  {
+    Handle<Code> code =
+        handle(isolate()->builtins()->builtin(Builtins::kPromiseThenFinally),
+               isolate());
+    Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
+        factory()->empty_string(), code, false);
+    info->set_internal_formal_parameter_count(1);
+    info->set_length(1);
+    info->set_native(true);
+    native_context()->set_promise_then_finally_shared_fun(*info);
+  }
+
+  {
+    Handle<Code> code =
+        handle(isolate()->builtins()->builtin(Builtins::kPromiseCatchFinally),
+               isolate());
+    Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
+        factory()->empty_string(), code, false);
+    info->set_internal_formal_parameter_count(1);
+    info->set_length(1);
+    info->set_native(true);
+    native_context()->set_promise_catch_finally_shared_fun(*info);
+  }
+
+  {
+    Handle<Code> code = handle(
+        isolate()->builtins()->builtin(Builtins::kPromiseValueThunkFinally),
+        isolate());
+    Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
+        factory()->empty_string(), code, false);
+    info->set_internal_formal_parameter_count(0);
+    info->set_length(0);
+    native_context()->set_promise_value_thunk_finally_shared_fun(*info);
+  }
+
+  {
+    Handle<Code> code =
+        handle(isolate()->builtins()->builtin(Builtins::kPromiseThrowerFinally),
+               isolate());
+    Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
+        factory()->empty_string(), code, false);
+    info->set_internal_formal_parameter_count(0);
+    info->set_length(0);
+    native_context()->set_promise_thrower_finally_shared_fun(*info);
+  }
+}
+
+#ifdef V8_I18N_SUPPORT
+void Genesis::InitializeGlobal_datetime_format_to_parts() {
+  if (!FLAG_datetime_format_to_parts) return;
+  Handle<JSReceiver> exports_container(
+      JSReceiver::cast(native_context()->exports_container()));
+  Handle<JSObject> date_time_format_prototype(JSObject::cast(
+      native_context()->intl_date_time_format_function()->prototype()));
+  Handle<JSFunction> format_date_to_parts = Handle<JSFunction>::cast(
+      JSReceiver::GetProperty(
+          exports_container,
+          factory()->InternalizeUtf8String("FormatDateToParts"))
+          .ToHandleChecked());
+  InstallFunction(date_time_format_prototype, format_date_to_parts,
+                  factory()->InternalizeUtf8String("formatToParts"));
+}
+
+namespace {
+
+void SetFunction(Handle<JSObject> target, Handle<JSFunction> function,
+                 Handle<Name> name, PropertyAttributes attributes = DONT_ENUM) {
+  JSObject::SetOwnPropertyIgnoreAttributes(target, name, function, attributes)
+      .ToHandleChecked();
+}
+
+}  // namespace
+
+void Genesis::InitializeGlobal_icu_case_mapping() {
+  if (!FLAG_icu_case_mapping) return;
+
+  Handle<JSReceiver> exports_container(
+      JSReceiver::cast(native_context()->exports_container()));
+
+  Handle<JSObject> string_prototype(
+      JSObject::cast(native_context()->string_function()->prototype()));
+
+  Handle<JSFunction> to_lower_case = Handle<JSFunction>::cast(
+      JSReceiver::GetProperty(
+          exports_container,
+          factory()->InternalizeUtf8String("ToLowerCaseI18N"))
+          .ToHandleChecked());
+  SetFunction(string_prototype, to_lower_case,
+              factory()->InternalizeUtf8String("toLowerCase"));
+
+  Handle<JSFunction> to_upper_case = Handle<JSFunction>::cast(
+      JSReceiver::GetProperty(
+          exports_container,
+          factory()->InternalizeUtf8String("ToUpperCaseI18N"))
+          .ToHandleChecked());
+  SetFunction(string_prototype, to_upper_case,
+              factory()->InternalizeUtf8String("toUpperCase"));
+
+  Handle<JSFunction> to_locale_lower_case = Handle<JSFunction>::cast(
+      JSReceiver::GetProperty(
+          exports_container,
+          factory()->InternalizeUtf8String("ToLocaleLowerCaseI18N"))
+          .ToHandleChecked());
+  SetFunction(string_prototype, to_locale_lower_case,
+              factory()->InternalizeUtf8String("toLocaleLowerCase"));
+
+  Handle<JSFunction> to_locale_upper_case = Handle<JSFunction>::cast(
+      JSReceiver::GetProperty(
+          exports_container,
+          factory()->InternalizeUtf8String("ToLocaleUpperCaseI18N"))
+          .ToHandleChecked());
+  SetFunction(string_prototype, to_locale_upper_case,
+              factory()->InternalizeUtf8String("toLocaleUpperCase"));
+}
+#endif
+
 Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
                                                const char* name,
                                                Builtins::Name call,
@@ -3405,8 +3880,8 @@
   Handle<AccessorInfo> array_length =
       Accessors::ArrayLengthInfo(isolate(), attribs);
   {  // Add length.
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(array_length->name())),
-                                 array_length, attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        Handle<Name>(Name::cast(array_length->name())), array_length, attribs);
     initial_map->AppendDescriptor(&d);
   }
 
@@ -3430,6 +3905,11 @@
 
   InstallInternalArray(extras_utils, "InternalPackedArray", FAST_ELEMENTS);
 
+  InstallFunction(extras_utils, isolate()->promise_internal_constructor(),
+                  factory()->NewStringFromAsciiChecked("createPromise"));
+  InstallFunction(extras_utils, isolate()->promise_resolve(),
+                  factory()->NewStringFromAsciiChecked("resolvePromise"));
+
   int builtin_index = Natives::GetDebuggerCount();
   // Only run prologue.js and runtime.js at this point.
   DCHECK_EQ(builtin_index, Natives::GetIndex("prologue"));
@@ -3501,10 +3981,11 @@
   // Store the map for the %StringPrototype% after the natives has been compiled
   // and the String function has been set up.
   Handle<JSFunction> string_function(native_context()->string_function());
-  DCHECK(JSObject::cast(
-      string_function->initial_map()->prototype())->HasFastProperties());
+  JSObject* string_function_prototype =
+      JSObject::cast(string_function->initial_map()->prototype());
+  DCHECK(string_function_prototype->HasFastProperties());
   native_context()->set_string_function_prototype_map(
-      HeapObject::cast(string_function->initial_map()->prototype())->map());
+      string_function_prototype->map());
 
   Handle<JSGlobalObject> global_object =
       handle(native_context()->global_object());
@@ -3586,46 +4067,6 @@
     concat->shared()->set_length(1);
   }
 
-  // Set up the Promise constructor.
-  {
-    Handle<String> key = factory()->Promise_string();
-    Handle<JSFunction> function = Handle<JSFunction>::cast(
-        JSReceiver::GetProperty(global_object, key).ToHandleChecked());
-    JSFunction::EnsureHasInitialMap(function);
-    function->initial_map()->set_instance_type(JS_PROMISE_TYPE);
-    function->shared()->SetConstructStub(
-        *isolate()->builtins()->JSBuiltinsConstructStub());
-    InstallWithIntrinsicDefaultProto(isolate(), function,
-                                     Context::PROMISE_FUNCTION_INDEX);
-
-    {
-      Handle<Code> code = handle(
-          isolate()->builtins()->builtin(Builtins::kPromiseResolveClosure),
-          isolate());
-      Handle<SharedFunctionInfo> info =
-          isolate()->factory()->NewSharedFunctionInfo(factory()->empty_string(),
-                                                      code, false);
-      info->set_internal_formal_parameter_count(1);
-      info->set_length(1);
-      native_context()->set_promise_resolve_shared_fun(*info);
-
-      code = handle(
-          isolate()->builtins()->builtin(Builtins::kPromiseRejectClosure),
-          isolate());
-      info = isolate()->factory()->NewSharedFunctionInfo(
-          factory()->empty_string(), code, false);
-      info->set_internal_formal_parameter_count(2);
-      info->set_length(1);
-      native_context()->set_promise_reject_shared_fun(*info);
-    }
-
-    Handle<JSFunction> create_resolving_functions =
-        SimpleCreateFunction(isolate(), factory()->empty_string(),
-                             Builtins::kCreateResolvingFunctions, 2, false);
-    native_context()->set_create_resolving_functions(
-        *create_resolving_functions);
-  }
-
   InstallBuiltinFunctionIds();
 
   // Create a map for accessor property descriptors (a variant of JSObject
@@ -3638,27 +4079,29 @@
     Map::EnsureDescriptorSlack(map, 4);
 
     {  // get
-      DataDescriptor d(factory()->get_string(),
-                       JSAccessorPropertyDescriptor::kGetIndex, NONE,
-                       Representation::Tagged());
+      Descriptor d = Descriptor::DataField(
+          factory()->get_string(), JSAccessorPropertyDescriptor::kGetIndex,
+          NONE, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // set
-      DataDescriptor d(factory()->set_string(),
-                       JSAccessorPropertyDescriptor::kSetIndex, NONE,
-                       Representation::Tagged());
+      Descriptor d = Descriptor::DataField(
+          factory()->set_string(), JSAccessorPropertyDescriptor::kSetIndex,
+          NONE, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // enumerable
-      DataDescriptor d(factory()->enumerable_string(),
-                       JSAccessorPropertyDescriptor::kEnumerableIndex, NONE,
-                       Representation::Tagged());
+      Descriptor d =
+          Descriptor::DataField(factory()->enumerable_string(),
+                                JSAccessorPropertyDescriptor::kEnumerableIndex,
+                                NONE, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // configurable
-      DataDescriptor d(factory()->configurable_string(),
-                       JSAccessorPropertyDescriptor::kConfigurableIndex, NONE,
-                       Representation::Tagged());
+      Descriptor d = Descriptor::DataField(
+          factory()->configurable_string(),
+          JSAccessorPropertyDescriptor::kConfigurableIndex, NONE,
+          Representation::Tagged());
       map->AppendDescriptor(&d);
     }
 
@@ -3681,27 +4124,30 @@
     Map::EnsureDescriptorSlack(map, 4);
 
     {  // value
-      DataDescriptor d(factory()->value_string(),
-                       JSDataPropertyDescriptor::kValueIndex, NONE,
-                       Representation::Tagged());
+      Descriptor d = Descriptor::DataField(
+          factory()->value_string(), JSDataPropertyDescriptor::kValueIndex,
+          NONE, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // writable
-      DataDescriptor d(factory()->writable_string(),
-                       JSDataPropertyDescriptor::kWritableIndex, NONE,
-                       Representation::Tagged());
+      Descriptor d =
+          Descriptor::DataField(factory()->writable_string(),
+                                JSDataPropertyDescriptor::kWritableIndex, NONE,
+                                Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // enumerable
-      DataDescriptor d(factory()->enumerable_string(),
-                       JSDataPropertyDescriptor::kEnumerableIndex, NONE,
-                       Representation::Tagged());
+      Descriptor d =
+          Descriptor::DataField(factory()->enumerable_string(),
+                                JSDataPropertyDescriptor::kEnumerableIndex,
+                                NONE, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // configurable
-      DataDescriptor d(factory()->configurable_string(),
-                       JSDataPropertyDescriptor::kConfigurableIndex, NONE,
-                       Representation::Tagged());
+      Descriptor d =
+          Descriptor::DataField(factory()->configurable_string(),
+                                JSDataPropertyDescriptor::kConfigurableIndex,
+                                NONE, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
 
@@ -3743,23 +4189,23 @@
       int old = array_descriptors->SearchWithCache(
           isolate(), *length, array_function->initial_map());
       DCHECK(old != DescriptorArray::kNotFound);
-      AccessorConstantDescriptor desc(
+      Descriptor d = Descriptor::AccessorConstant(
           length, handle(array_descriptors->GetValue(old), isolate()),
           array_descriptors->GetDetails(old).attributes());
-      initial_map->AppendDescriptor(&desc);
+      initial_map->AppendDescriptor(&d);
     }
     {
-      DataDescriptor index_field(factory()->index_string(),
-                                 JSRegExpResult::kIndexIndex, NONE,
-                                 Representation::Tagged());
-      initial_map->AppendDescriptor(&index_field);
+      Descriptor d = Descriptor::DataField(factory()->index_string(),
+                                           JSRegExpResult::kIndexIndex, NONE,
+                                           Representation::Tagged());
+      initial_map->AppendDescriptor(&d);
     }
 
     {
-      DataDescriptor input_field(factory()->input_string(),
-                                 JSRegExpResult::kInputIndex, NONE,
-                                 Representation::Tagged());
-      initial_map->AppendDescriptor(&input_field);
+      Descriptor d = Descriptor::DataField(factory()->input_string(),
+                                           JSRegExpResult::kInputIndex, NONE,
+                                           Representation::Tagged());
+      initial_map->AppendDescriptor(&d);
     }
 
     initial_map->SetInObjectProperties(2);
@@ -3774,29 +4220,29 @@
     Handle<AccessorInfo> arguments_iterator =
         Accessors::ArgumentsIteratorInfo(isolate(), attribs);
     {
-      AccessorConstantDescriptor d(factory()->iterator_symbol(),
-                                   arguments_iterator, attribs);
+      Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
+                                                  arguments_iterator, attribs);
       Handle<Map> map(native_context()->sloppy_arguments_map());
       Map::EnsureDescriptorSlack(map, 1);
       map->AppendDescriptor(&d);
     }
     {
-      AccessorConstantDescriptor d(factory()->iterator_symbol(),
-                                   arguments_iterator, attribs);
+      Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
+                                                  arguments_iterator, attribs);
       Handle<Map> map(native_context()->fast_aliased_arguments_map());
       Map::EnsureDescriptorSlack(map, 1);
       map->AppendDescriptor(&d);
     }
     {
-      AccessorConstantDescriptor d(factory()->iterator_symbol(),
-                                   arguments_iterator, attribs);
+      Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
+                                                  arguments_iterator, attribs);
       Handle<Map> map(native_context()->slow_aliased_arguments_map());
       Map::EnsureDescriptorSlack(map, 1);
       map->AppendDescriptor(&d);
     }
     {
-      AccessorConstantDescriptor d(factory()->iterator_symbol(),
-                                   arguments_iterator, attribs);
+      Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
+                                                  arguments_iterator, attribs);
       Handle<Map> map(native_context()->strict_arguments_map());
       Map::EnsureDescriptorSlack(map, 1);
       map->AppendDescriptor(&d);
@@ -3811,26 +4257,25 @@
   static const char* harmony_tailcalls_natives[] = {nullptr};
   static const char* harmony_sharedarraybuffer_natives[] = {
       "native harmony-atomics.js", NULL};
-  static const char* harmony_simd_natives[] = {"native harmony-simd.js",
-                                               nullptr};
   static const char* harmony_do_expressions_natives[] = {nullptr};
   static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
   static const char* harmony_regexp_named_captures_natives[] = {nullptr};
   static const char* harmony_regexp_property_natives[] = {nullptr};
   static const char* harmony_function_sent_natives[] = {nullptr};
   static const char* harmony_array_prototype_values_natives[] = {nullptr};
-  static const char* harmony_string_padding_natives[] = {
-      "native harmony-string-padding.js", nullptr};
 #ifdef V8_I18N_SUPPORT
-  static const char* icu_case_mapping_natives[] = {"native icu-case-mapping.js",
-                                                   nullptr};
-  static const char* datetime_format_to_parts_natives[] = {
-      "native datetime-format-to-parts.js", nullptr};
+  static const char* icu_case_mapping_natives[] = {nullptr};
+  static const char* datetime_format_to_parts_natives[] = {nullptr};
 #endif
-  static const char* harmony_async_await_natives[] = {nullptr};
   static const char* harmony_restrictive_generators_natives[] = {nullptr};
   static const char* harmony_trailing_commas_natives[] = {nullptr};
+  static const char* harmony_function_tostring_natives[] = {nullptr};
   static const char* harmony_class_fields_natives[] = {nullptr};
+  static const char* harmony_object_rest_spread_natives[] = {nullptr};
+  static const char* harmony_async_iteration_natives[] = {nullptr};
+  static const char* harmony_dynamic_import_natives[] = {nullptr};
+  static const char* harmony_promise_finally_natives[] = {nullptr};
+  static const char* harmony_template_escapes_natives[] = {nullptr};
 
   for (int i = ExperimentalNatives::GetDebuggerCount();
        i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -3948,7 +4393,6 @@
   }
 }
 
-
 #undef INSTALL_BUILTIN_ID
 
 
@@ -3975,8 +4419,6 @@
 
   Factory* factory = isolate->factory();
   HandleScope scope(isolate);
-  Handle<JSGlobalObject> global(JSGlobalObject::cast(
-      native_context->global_object()));
 
   Handle<JSObject> Error = isolate->error_function();
   Handle<String> name =
@@ -3984,26 +4426,11 @@
   Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
   JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
 
-  // Expose the debug global object in global if a name for it is specified.
-  if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
-    // If loading fails we just bail out without installing the
-    // debugger but without tanking the whole context.
-    Debug* debug = isolate->debug();
-    if (!debug->Load()) return true;
-    Handle<Context> debug_context = debug->debug_context();
-    // Set the security token for the debugger context to the same as
-    // the shell native context to allow calling between these (otherwise
-    // exposing debug global object doesn't make much sense).
-    debug_context->set_security_token(native_context->security_token());
-    Handle<String> debug_string =
-        factory->InternalizeUtf8String(FLAG_expose_debug_as);
-    uint32_t index;
-    if (debug_string->AsArrayIndex(&index)) return true;
-    Handle<Object> global_proxy(debug_context->global_proxy(), isolate);
-    JSObject::AddProperty(global, debug_string, global_proxy, DONT_ENUM);
+  if (FLAG_expose_wasm || FLAG_validate_asm) {
+    WasmJs::Install(isolate);
   }
 
-  WasmJs::Install(isolate, global);
+  InstallFFIMap(isolate);
 
   return true;
 }
@@ -4133,7 +4560,6 @@
     isolate->clear_pending_exception();
   }
   extension_states->set_state(current, INSTALLED);
-  isolate->NotifyExtensionInstalled();
   return result;
 }
 
@@ -4207,27 +4633,30 @@
         Handle<DescriptorArray>(from->map()->instance_descriptors());
     for (int i = 0; i < from->map()->NumberOfOwnDescriptors(); i++) {
       PropertyDetails details = descs->GetDetails(i);
-      switch (details.type()) {
-        case DATA: {
+      if (details.location() == kField) {
+        if (details.kind() == kData) {
           HandleScope inner(isolate());
           Handle<Name> key = Handle<Name>(descs->GetKey(i));
           FieldIndex index = FieldIndex::ForDescriptor(from->map(), i);
           DCHECK(!descs->GetDetails(i).representation().IsDouble());
-          Handle<Object> value = Handle<Object>(from->RawFastPropertyAt(index),
-                                                isolate());
+          Handle<Object> value(from->RawFastPropertyAt(index), isolate());
           JSObject::AddProperty(to, key, value, details.attributes());
-          break;
+        } else {
+          DCHECK_EQ(kAccessor, details.kind());
+          UNREACHABLE();
         }
-        case DATA_CONSTANT: {
+
+      } else {
+        DCHECK_EQ(kDescriptor, details.location());
+        if (details.kind() == kData) {
+          DCHECK(!FLAG_track_constant_fields);
           HandleScope inner(isolate());
           Handle<Name> key = Handle<Name>(descs->GetKey(i));
-          Handle<Object> constant(descs->GetConstant(i), isolate());
-          JSObject::AddProperty(to, key, constant, details.attributes());
-          break;
-        }
-        case ACCESSOR:
-          UNREACHABLE();
-        case ACCESSOR_CONSTANT: {
+          Handle<Object> value(descs->GetValue(i), isolate());
+          JSObject::AddProperty(to, key, value, details.attributes());
+
+        } else {
+          DCHECK_EQ(kAccessor, details.kind());
           Handle<Name> key(descs->GetKey(i));
           LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
           CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
@@ -4236,59 +4665,63 @@
           HandleScope inner(isolate());
           DCHECK(!to->HasFastProperties());
           // Add to dictionary.
-          Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
-          PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+          Handle<Object> value(descs->GetValue(i), isolate());
+          PropertyDetails d(kAccessor, details.attributes(), i + 1,
                             PropertyCellType::kMutable);
-          JSObject::SetNormalizedProperty(to, key, callbacks, d);
-          break;
+          JSObject::SetNormalizedProperty(to, key, value, d);
         }
       }
     }
   } else if (from->IsJSGlobalObject()) {
+    // Copy all keys and values in enumeration order.
     Handle<GlobalDictionary> properties =
         Handle<GlobalDictionary>(from->global_dictionary());
-    int capacity = properties->Capacity();
-    for (int i = 0; i < capacity; i++) {
-      Object* raw_key(properties->KeyAt(i));
-      if (properties->IsKey(isolate(), raw_key)) {
-        DCHECK(raw_key->IsName());
-        // If the property is already there we skip it.
-        Handle<Name> key(Name::cast(raw_key));
-        LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
-        CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
-        if (it.IsFound()) continue;
-        // Set the property.
-        DCHECK(properties->ValueAt(i)->IsPropertyCell());
-        Handle<PropertyCell> cell(PropertyCell::cast(properties->ValueAt(i)));
-        Handle<Object> value(cell->value(), isolate());
-        if (value->IsTheHole(isolate())) continue;
-        PropertyDetails details = cell->property_details();
-        DCHECK_EQ(kData, details.kind());
-        JSObject::AddProperty(to, key, value, details.attributes());
-      }
+    Handle<FixedArray> key_indices =
+        GlobalDictionary::IterationIndices(properties);
+    for (int i = 0; i < key_indices->length(); i++) {
+      int key_index = Smi::cast(key_indices->get(i))->value();
+      Object* raw_key = properties->KeyAt(key_index);
+      DCHECK(properties->IsKey(isolate(), raw_key));
+      DCHECK(raw_key->IsName());
+      // If the property is already there we skip it.
+      Handle<Name> key(Name::cast(raw_key), isolate());
+      LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+      CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+      if (it.IsFound()) continue;
+      // Set the property.
+      DCHECK(properties->ValueAt(key_index)->IsPropertyCell());
+      Handle<PropertyCell> cell(
+          PropertyCell::cast(properties->ValueAt(key_index)), isolate());
+      Handle<Object> value(cell->value(), isolate());
+      if (value->IsTheHole(isolate())) continue;
+      PropertyDetails details = cell->property_details();
+      if (details.kind() != kData) continue;
+      JSObject::AddProperty(to, key, value, details.attributes());
     }
   } else {
+    // Copy all keys and values in enumeration order.
     Handle<NameDictionary> properties =
         Handle<NameDictionary>(from->property_dictionary());
-    int capacity = properties->Capacity();
-    for (int i = 0; i < capacity; i++) {
-      Object* raw_key(properties->KeyAt(i));
-      if (properties->IsKey(isolate(), raw_key)) {
-        DCHECK(raw_key->IsName());
-        // If the property is already there we skip it.
-        Handle<Name> key(Name::cast(raw_key));
-        LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
-        CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
-        if (it.IsFound()) continue;
-        // Set the property.
-        Handle<Object> value = Handle<Object>(properties->ValueAt(i),
-                                              isolate());
-        DCHECK(!value->IsCell());
-        DCHECK(!value->IsTheHole(isolate()));
-        PropertyDetails details = properties->DetailsAt(i);
-        DCHECK_EQ(kData, details.kind());
-        JSObject::AddProperty(to, key, value, details.attributes());
-      }
+    Handle<FixedArray> key_indices =
+        NameDictionary::IterationIndices(properties);
+    for (int i = 0; i < key_indices->length(); i++) {
+      int key_index = Smi::cast(key_indices->get(i))->value();
+      Object* raw_key = properties->KeyAt(key_index);
+      DCHECK(properties->IsKey(isolate(), raw_key));
+      DCHECK(raw_key->IsName());
+      // If the property is already there we skip it.
+      Handle<Name> key(Name::cast(raw_key), isolate());
+      LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+      CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+      if (it.IsFound()) continue;
+      // Set the property.
+      Handle<Object> value =
+          Handle<Object>(properties->ValueAt(key_index), isolate());
+      DCHECK(!value->IsCell());
+      DCHECK(!value->IsTheHole(isolate()));
+      PropertyDetails details = properties->DetailsAt(key_index);
+      DCHECK_EQ(kData, details.kind());
+      JSObject::AddProperty(to, key, value, details.attributes());
     }
   }
 }
@@ -4357,11 +4790,12 @@
   bool enabled_;
 };
 
-Genesis::Genesis(Isolate* isolate,
-                 MaybeHandle<JSGlobalProxy> maybe_global_proxy,
-                 v8::Local<v8::ObjectTemplate> global_proxy_template,
-                 v8::ExtensionConfiguration* extensions,
-                 size_t context_snapshot_index, GlobalContextType context_type)
+Genesis::Genesis(
+    Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+    v8::Local<v8::ObjectTemplate> global_proxy_template,
+    size_t context_snapshot_index,
+    v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
+    GlobalContextType context_type)
     : isolate_(isolate), active_(isolate->bootstrapper()) {
   NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
   result_ = Handle<Context>::null();
@@ -4385,12 +4819,22 @@
   // and initialize it later in CreateNewGlobals.
   Handle<JSGlobalProxy> global_proxy;
   if (!maybe_global_proxy.ToHandle(&global_proxy)) {
-    const int internal_field_count =
-        !global_proxy_template.IsEmpty()
-            ? global_proxy_template->InternalFieldCount()
-            : 0;
-    global_proxy = isolate->factory()->NewUninitializedJSGlobalProxy(
-        JSGlobalProxy::SizeWithInternalFields(internal_field_count));
+    int instance_size = 0;
+    if (context_snapshot_index > 0) {
+      // The global proxy function to reinitialize this global proxy is in the
+      // context that is yet to be deserialized. We need to prepare a global
+      // proxy of the correct size.
+      Object* size = isolate->heap()->serialized_global_proxy_sizes()->get(
+          static_cast<int>(context_snapshot_index) - 1);
+      instance_size = Smi::cast(size)->value();
+    } else {
+      instance_size = JSGlobalProxy::SizeWithInternalFields(
+          global_proxy_template.IsEmpty()
+              ? 0
+              : global_proxy_template->InternalFieldCount());
+    }
+    global_proxy =
+        isolate->factory()->NewUninitializedJSGlobalProxy(instance_size);
   }
 
   // We can only de-serialize a context if the isolate was initialized from
@@ -4398,7 +4842,8 @@
   // Also create a context from scratch to expose natives, if required by flag.
   if (!isolate->initialized_from_snapshot() ||
       !Snapshot::NewContextFromSnapshot(isolate, global_proxy,
-                                        context_snapshot_index)
+                                        context_snapshot_index,
+                                        internal_fields_deserializer)
            .ToHandle(&native_context_)) {
     native_context_ = Handle<Context>();
   }
@@ -4416,23 +4861,29 @@
       Map::TraceAllTransitions(object_fun->initial_map());
     }
 #endif
-    Handle<JSGlobalObject> global_object =
-        CreateNewGlobals(global_proxy_template, global_proxy);
 
-    HookUpGlobalProxy(global_object, global_proxy);
-    HookUpGlobalObject(global_object);
+    if (context_snapshot_index == 0) {
+      Handle<JSGlobalObject> global_object =
+          CreateNewGlobals(global_proxy_template, global_proxy);
+      HookUpGlobalObject(global_object);
 
-    if (!ConfigureGlobalObjects(global_proxy_template)) return;
+      if (!ConfigureGlobalObjects(global_proxy_template)) return;
+    } else {
+      // The global proxy needs to be integrated into the native context.
+      HookUpGlobalProxy(global_proxy);
+    }
+    DCHECK(!global_proxy->IsDetachedFrom(native_context()->global_object()));
   } else {
+    DCHECK_EQ(0u, context_snapshot_index);
     // We get here if there was no context snapshot.
     CreateRoots();
     Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
     CreateStrictModeFunctionMaps(empty_function);
     CreateIteratorMaps(empty_function);
+    CreateAsyncIteratorMaps();
     CreateAsyncFunctionMaps(empty_function);
     Handle<JSGlobalObject> global_object =
         CreateNewGlobals(global_proxy_template, global_proxy);
-    HookUpGlobalProxy(global_object, global_proxy);
     InitializeGlobal(global_object, empty_function, context_type);
     InitializeNormalizedMapCaches();
 
@@ -4444,9 +4895,6 @@
     if (!ConfigureGlobalObjects(global_proxy_template)) return;
 
     isolate->counters()->contexts_created_from_scratch()->Increment();
-    // Re-initialize the counter because it got incremented during snapshot
-    // creation.
-    isolate->native_context()->set_errors_thrown(Smi::kZero);
   }
 
   // Install experimental natives. Do not include them into the
@@ -4460,6 +4908,15 @@
       if (FLAG_experimental_extras) {
         if (!InstallExperimentalExtraNatives()) return;
       }
+
+      // Store String.prototype's map again in case it has been changed by
+      // experimental natives.
+      Handle<JSFunction> string_function(native_context()->string_function());
+      JSObject* string_function_prototype =
+          JSObject::cast(string_function->initial_map()->prototype());
+      DCHECK(string_function_prototype->HasFastProperties());
+      native_context()->set_string_function_prototype_map(
+          string_function_prototype->map());
     }
     // The serializer cannot serialize typed arrays. Reset those typed arrays
     // for each new context.
@@ -4475,6 +4932,7 @@
   // We do not need script contexts for native scripts.
   DCHECK_EQ(1, native_context()->script_context_table()->used());
 
+  native_context()->ResetErrorsThrown();
   result_ = native_context();
 }
 
@@ -4507,11 +4965,19 @@
     global_proxy = factory()->NewUninitializedJSGlobalProxy(proxy_size);
   }
 
-  // CreateNewGlobals.
+  // Create a remote object as the global object.
   Handle<ObjectTemplateInfo> global_proxy_data =
-      v8::Utils::OpenHandle(*global_proxy_template);
+      Utils::OpenHandle(*global_proxy_template);
   Handle<FunctionTemplateInfo> global_constructor(
       FunctionTemplateInfo::cast(global_proxy_data->constructor()));
+
+  Handle<ObjectTemplateInfo> global_object_template(
+      ObjectTemplateInfo::cast(global_constructor->prototype_template()));
+  Handle<JSObject> global_object =
+      ApiNatives::InstantiateRemoteObject(
+          global_object_template).ToHandleChecked();
+
+  // (Re)initialize the global proxy object.
   Handle<SharedFunctionInfo> shared =
       FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate,
                                                           global_constructor);
@@ -4527,19 +4993,20 @@
   JSFunction::SetInitialMap(global_proxy_function, global_proxy_map,
                             factory()->null_value());
   global_proxy_map->set_is_access_check_needed(true);
-  global_proxy_map->set_is_callable();
-  global_proxy_map->set_is_constructor(true);
   global_proxy_map->set_has_hidden_prototype(true);
 
   Handle<String> global_name = factory()->global_string();
   global_proxy_function->shared()->set_instance_class_name(*global_name);
   factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
 
-  // HookUpGlobalProxy.
+  // A remote global proxy has no native context.
   global_proxy->set_native_context(heap()->null_value());
 
-  // DetachGlobal.
-  JSObject::ForceSetPrototype(global_proxy, factory()->null_value());
+  // Configure the hidden prototype chain of the global proxy.
+  JSObject::ForceSetPrototype(global_proxy, global_object);
+  // TODO(dcheng): This is a hack. Why does this need to be manually called
+  // here? Line 4812 should have taken care of it?
+  global_proxy->map()->set_has_hidden_prototype(true);
 
   global_proxy_ = global_proxy;
 }
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 51022fd..81ef396 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -80,6 +80,7 @@
       MaybeHandle<JSGlobalProxy> maybe_global_proxy,
       v8::Local<v8::ObjectTemplate> global_object_template,
       v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+      v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
       GlobalContextType context_type = FULL_CONTEXT);
 
   Handle<JSGlobalProxy> NewRemoteContext(
@@ -120,8 +121,6 @@
   static bool CompileExperimentalExtraBuiltin(Isolate* isolate, int index);
 
   static void ExportFromRuntime(Isolate* isolate, Handle<JSObject> container);
-  static void ExportExperimentalFromRuntime(Isolate* isolate,
-                                            Handle<JSObject> container);
 
  private:
   Isolate* isolate_;
diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc
index 6103971..2d03783 100644
--- a/src/builtins/arm/builtins-arm.cc
+++ b/src/builtins/arm/builtins-arm.cc
@@ -326,11 +326,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(r6);
     __ EnterBuiltinFrame(cp, r1, r6);
     __ Push(r2);  // first argument
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(r2);
     __ LeaveBuiltinFrame(cp, r1, r6);
     __ SmiUntag(r6);
@@ -474,11 +474,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(r6);
     __ EnterBuiltinFrame(cp, r1, r6);
     __ Push(r2);  // first argument
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(r2);
     __ LeaveBuiltinFrame(cp, r1, r6);
     __ SmiUntag(r6);
@@ -552,6 +552,8 @@
 void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
                                     bool create_implicit_receiver,
                                     bool check_derived_construct) {
+  Label post_instantiation_deopt_entry;
+
   // ----------- S t a t e -------------
   //  -- r0     : number of arguments
   //  -- r1     : constructor function
@@ -574,8 +576,8 @@
     if (create_implicit_receiver) {
       // Allocate the new receiver object.
       __ Push(r1, r3);
-      FastNewObjectStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+              RelocInfo::CODE_TARGET);
       __ mov(r4, r0);
       __ Pop(r1, r3);
 
@@ -601,6 +603,9 @@
       __ PushRoot(Heap::kTheHoleValueRootIndex);
     }
 
+    // Deoptimizer re-enters stub code here.
+    __ bind(&post_instantiation_deopt_entry);
+
     // Set up pointer to last argument.
     __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
 
@@ -633,7 +638,8 @@
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+      masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+          masm->pc_offset());
     }
 
     // Restore context from the frame.
@@ -697,6 +703,35 @@
     __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
   }
   __ Jump(lr);
+
+  // Store offset of trampoline address for deoptimizer. This is the bailout
+  // point after the receiver instantiation but before the function invocation.
+  // We need to restore some registers in order to continue the above code.
+  if (create_implicit_receiver && !is_api_function) {
+    masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+        masm->pc_offset());
+
+    // ----------- S t a t e -------------
+    //  -- r0    : newly allocated object
+    //  -- sp[0] : constructor function
+    // -----------------------------------
+
+    __ pop(r1);
+    __ push(r0);
+    __ push(r0);
+
+    // Retrieve smi-tagged arguments count from the stack.
+    __ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+    __ SmiUntag(r0);
+
+    // Retrieve the new target value from the stack. This was placed into the
+    // frame description in place of the receiver by the optimizing compiler.
+    __ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+    __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2));
+
+    // Continue with constructor function invocation.
+    __ b(&post_instantiation_deopt_entry);
+  }
 }
 
 }  // namespace
@@ -737,19 +772,18 @@
   __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
 
   // Load suspended function and context.
-  __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
   __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+  __ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
 
   // Flood function if we are stepping.
   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
   Label stepping_prepared;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(masm->isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  __ mov(ip, Operand(last_step_action));
+  ExternalReference debug_hook =
+      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+  __ mov(ip, Operand(debug_hook));
   __ ldrsb(ip, MemOperand(ip));
-  __ cmp(ip, Operand(StepIn));
-  __ b(ge, &prepare_step_in_if_stepping);
+  __ cmp(ip, Operand(0));
+  __ b(ne, &prepare_step_in_if_stepping);
 
   // Flood function if we need to continue stepping in the suspended generator.
   ExternalReference debug_suspended_generator =
@@ -790,14 +824,15 @@
     __ bind(&done_loop);
   }
 
-  // Dispatch on the kind of generator object.
-  Label old_generator;
-  __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
-  __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
-  __ b(ne, &old_generator);
+  // Underlying function needs to have bytecode available.
+  if (FLAG_debug_code) {
+    __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+    __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kMissingBytecodeArray);
+  }
 
-  // New-style (ignition/turbofan) generator object
+  // Resume (Ignition/TurboFan) generator object.
   {
     __ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
     __ ldr(r0, FieldMemOperand(
@@ -812,54 +847,11 @@
     __ Jump(r5);
   }
 
-  // Old-style (full-codegen) generator object
-  __ bind(&old_generator);
-  {
-    // Enter a new JavaScript frame, and initialize its slots as they were when
-    // the generator was suspended.
-    DCHECK(!FLAG_enable_embedded_constant_pool);
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Push(lr, fp);
-    __ Move(fp, sp);
-    __ Push(cp, r4);
-
-    // Restore the operand stack.
-    __ ldr(r0, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
-    __ ldr(r3, FieldMemOperand(r0, FixedArray::kLengthOffset));
-    __ add(r0, r0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    __ add(r3, r0, Operand(r3, LSL, kPointerSizeLog2 - 1));
-    {
-      Label done_loop, loop;
-      __ bind(&loop);
-      __ cmp(r0, r3);
-      __ b(eq, &done_loop);
-      __ ldr(ip, MemOperand(r0, kPointerSize, PostIndex));
-      __ Push(ip);
-      __ b(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Reset operand stack so we don't leak.
-    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
-    __ str(ip, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
-
-    // Resume the generator function at the continuation.
-    __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-    __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
-    __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
-    __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
-    __ add(r3, r3, Operand(r2, ASR, 1));
-    __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
-    __ Move(r0, r1);  // Continuation expects generator object in r0.
-    __ Jump(r3);
-  }
-
   __ bind(&prepare_step_in_if_stepping);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
     __ Push(r1, r2, r4);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ CallRuntime(Runtime::kDebugOnFunctionCall);
     __ Pop(r1, r2);
     __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
   }
@@ -1045,7 +1037,7 @@
   Register debug_info = kInterpreterBytecodeArrayRegister;
   DCHECK(!debug_info.is(r0));
   __ ldr(debug_info, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
-  __ cmp(debug_info, Operand(DebugInfo::uninitialized()));
+  __ SmiTst(debug_info);
   // Load original bytecode array or the debug copy.
   __ ldr(kInterpreterBytecodeArrayRegister,
          FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset), eq);
@@ -1059,15 +1051,15 @@
   __ b(ne, &switch_to_different_code_kind);
 
   // Increment invocation count for the function.
-  __ ldr(r2, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
-  __ ldr(r2, FieldMemOperand(r2, LiteralsArray::kFeedbackVectorOffset));
+  __ ldr(r2, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
+  __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
   __ ldr(r9, FieldMemOperand(
-                 r2, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                         TypeFeedbackVector::kHeaderSize));
+                 r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                         FeedbackVector::kHeaderSize));
   __ add(r9, r9, Operand(Smi::FromInt(1)));
   __ str(r9, FieldMemOperand(
-                 r2, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                         TypeFeedbackVector::kHeaderSize));
+                 r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                         FeedbackVector::kHeaderSize));
 
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
@@ -1078,6 +1070,11 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Reset code age.
+  __ mov(r9, Operand(BytecodeArray::kNoAgeBytecodeAge));
+  __ strb(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                              BytecodeArray::kBytecodeAgeOffset));
+
   // Load the initial bytecode offset.
   __ mov(kInterpreterBytecodeOffsetRegister,
          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1186,7 +1183,7 @@
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
-    CallableType function_type) {
+    InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- r0 : the number of arguments (not including the receiver)
   //  -- r2 : the address of the first argument to be pushed. Subsequent
@@ -1202,12 +1199,14 @@
   Generate_InterpreterPushArgs(masm, r3, r2, r4, r5, &stack_overflow);
 
   // Call the target.
-  if (function_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
                                                       tail_call_mode),
             RelocInfo::CODE_TARGET);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(function_type, CallableType::kAny);
     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
@@ -1223,7 +1222,7 @@
 
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
-    MacroAssembler* masm, CallableType construct_type) {
+    MacroAssembler* masm, InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   // -- r0 : argument count (not including receiver)
   // -- r3 : new target
@@ -1241,7 +1240,7 @@
   Generate_InterpreterPushArgs(masm, r0, r4, r5, r6, &stack_overflow);
 
   __ AssertUndefinedOrAllocationSite(r2, r5);
-  if (construct_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ AssertFunction(r1);
 
     // Tail call to the function-specific construct stub (still in the caller
@@ -1250,9 +1249,12 @@
     __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
     // Jump to the construct function.
     __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
-
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    // Call the constructor with r0, r1, and r3 unmodified.
+    __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(construct_type, CallableType::kAny);
+    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
     // Call the constructor with r0, r1, and r3 unmodified.
     __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   }
@@ -1374,20 +1376,26 @@
   Register argument_count = r0;
   Register closure = r1;
   Register new_target = r3;
+  Register map = argument_count;
+  Register index = r2;
+
+  // Do we have a valid feedback vector?
+  __ ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+  __ ldr(index, FieldMemOperand(index, Cell::kValueOffset));
+  __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
+                &gotta_call_runtime_no_stack);
+
   __ push(argument_count);
   __ push(new_target);
   __ push(closure);
 
-  Register map = argument_count;
-  Register index = r2;
   __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
   __ ldr(map,
          FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
   __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
   __ cmp(index, Operand(Smi::FromInt(2)));
-  __ b(lt, &gotta_call_runtime);
+  __ b(lt, &try_shared);
 
-  // Find literals.
   // r3  : native context
   // r2  : length / index
   // r0  : optimized code map
@@ -1407,26 +1415,6 @@
   __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
   __ cmp(temp, native_context);
   __ b(ne, &loop_bottom);
-  // OSR id set to none?
-  __ ldr(temp, FieldMemOperand(array_pointer,
-                               SharedFunctionInfo::kOffsetToPreviousOsrAstId));
-  const int bailout_id = BailoutId::None().ToInt();
-  __ cmp(temp, Operand(Smi::FromInt(bailout_id)));
-  __ b(ne, &loop_bottom);
-  // Literals available?
-  __ ldr(temp, FieldMemOperand(array_pointer,
-                               SharedFunctionInfo::kOffsetToPreviousLiterals));
-  __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
-
-  // Save the literals in the closure.
-  __ ldr(r4, MemOperand(sp, 0));
-  __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
-  __ push(index);
-  __ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ pop(index);
 
   // Code available?
   Register entry = r4;
@@ -1436,7 +1424,7 @@
   __ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
   __ JumpIfSmi(entry, &try_shared);
 
-  // Found literals and code. Get them into the closure and return.
+  // Found code. Get it into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1471,9 +1459,7 @@
   __ cmp(index, Operand(Smi::FromInt(1)));
   __ b(gt, &loop_top);
 
-  // We found neither literals nor code.
-  __ jmp(&gotta_call_runtime);
-
+  // We found no code.
   __ bind(&try_shared);
   __ pop(closure);
   __ pop(new_target);
@@ -1485,14 +1471,14 @@
                               SharedFunctionInfo::kMarkedForTierUpByteOffset));
   __ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
   __ b(ne, &gotta_call_runtime_no_stack);
-  // Is the full code valid?
+
+  // If SFI points to anything other than CompileLazy, install that.
   __ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
-  __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
-  __ and_(r5, r5, Operand(Code::KindField::kMask));
-  __ mov(r5, Operand(r5, LSR, Code::KindField::kShift));
-  __ cmp(r5, Operand(Code::BUILTIN));
+  __ Move(r5, masm->CodeObject());
+  __ cmp(entry, r5);
   __ b(eq, &gotta_call_runtime_no_stack);
-  // Yes, install the full code.
+
+  // Install the SFI's code entry.
   __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
   __ RecordWriteCodeEntryField(closure, entry, r5);
@@ -1609,14 +1595,9 @@
   __ mov(pc, r0);
 }
 
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
-  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
-  }                                                           \
-  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                              \
+  void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+    GenerateMakeCodeYoungAgainCommon(masm);                               \
   }
 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2112,20 +2093,20 @@
   __ bind(&target_not_constructor);
   {
     __ str(r1, MemOperand(sp, 0));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 
   // 4c. The new.target is not a constructor, throw an appropriate TypeError.
   __ bind(&new_target_not_constructor);
   {
     __ str(r3, MemOperand(sp, 0));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 }
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ SmiTag(r0);
-  __ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
                        (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
                        fp.bit() | lr.bit());
@@ -2158,7 +2139,8 @@
 
   // Create the list of arguments from the array-like argumentsList.
   {
-    Label create_arguments, create_array, create_runtime, done_create;
+    Label create_arguments, create_array, create_holey_array, create_runtime,
+        done_create;
     __ JumpIfSmi(r0, &create_runtime);
 
     // Load the map of argumentsList into r2.
@@ -2202,17 +2184,37 @@
     __ mov(r0, r4);
     __ b(&done_create);
 
+    // For holey JSArrays we need to check that the array prototype chain
+    // protector is intact and our prototype is the Array.prototype actually.
+    __ bind(&create_holey_array);
+    __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+    __ ldr(r4, ContextMemOperand(r4, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+    __ cmp(r2, r4);
+    __ b(ne, &create_runtime);
+    __ LoadRoot(r4, Heap::kArrayProtectorRootIndex);
+    __ ldr(r2, FieldMemOperand(r4, PropertyCell::kValueOffset));
+    __ cmp(r2, Operand(Smi::FromInt(Isolate::kProtectorValid)));
+    __ b(ne, &create_runtime);
+    __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
+    __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
+    __ SmiUntag(r2);
+    __ b(&done_create);
+
     // Try to create the list from a JSArray object.
+    //  -- r2 and r4 must be preserved till bne create_holey_array.
     __ bind(&create_array);
-    __ ldr(r2, FieldMemOperand(r2, Map::kBitField2Offset));
-    __ DecodeField<Map::ElementsKindBits>(r2);
+    __ ldr(r5, FieldMemOperand(r2, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(r5);
     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
     STATIC_ASSERT(FAST_ELEMENTS == 2);
-    __ cmp(r2, Operand(FAST_ELEMENTS));
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+    __ cmp(r5, Operand(FAST_HOLEY_ELEMENTS));
     __ b(hi, &create_runtime);
-    __ cmp(r2, Operand(FAST_HOLEY_SMI_ELEMENTS));
-    __ b(eq, &create_runtime);
+    // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+    __ tst(r5, Operand(1));
+    __ b(ne, &create_holey_array);
+    // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
     __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
     __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
     __ SmiUntag(r2);
@@ -2247,12 +2249,16 @@
   // Push arguments onto the stack (thisArgument is already on the stack).
   {
     __ mov(r4, Operand(0));
+    __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+    __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
     Label done, loop;
     __ bind(&loop);
     __ cmp(r4, r2);
     __ b(eq, &done);
     __ add(ip, r0, Operand(r4, LSL, kPointerSizeLog2));
     __ ldr(ip, FieldMemOperand(ip, FixedArray::kHeaderSize));
+    __ cmp(r5, ip);
+    __ mov(ip, r6, LeaveCC, eq);
     __ Push(ip);
     __ add(r4, r4, Operand(1));
     __ b(&loop);
@@ -2268,6 +2274,72 @@
   }
 }
 
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+                                           Handle<Code> code) {
+  // ----------- S t a t e -------------
+  //  -- r1    : the target to call (can be any Object)
+  //  -- r2    : start index (to support rest parameters)
+  //  -- lr    : return address.
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ cmp(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ b(eq, &arguments_adaptor);
+  {
+    __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r0, FieldMemOperand(
+                   r0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ mov(r3, fp);
+  }
+  __ b(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    // Load the length from the ArgumentsAdaptorFrame.
+    __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  }
+  __ bind(&arguments_done);
+
+  Label stack_empty, stack_done, stack_overflow;
+  __ SmiUntag(r0);
+  __ sub(r0, r0, r2, SetCC);
+  __ b(le, &stack_empty);
+  {
+    // Check for stack overflow.
+    Generate_StackOverflowCheck(masm, r0, r2, &stack_overflow);
+
+    // Forward the arguments from the caller frame.
+    {
+      Label loop;
+      __ add(r3, r3, Operand(kPointerSize));
+      __ mov(r2, r0);
+      __ bind(&loop);
+      {
+        __ ldr(ip, MemOperand(r3, r2, LSL, kPointerSizeLog2));
+        __ push(ip);
+        __ sub(r2, r2, Operand(1), SetCC);
+        __ b(ne, &loop);
+      }
+    }
+  }
+  __ b(&stack_done);
+  __ bind(&stack_overflow);
+  __ TailCallRuntime(Runtime::kThrowStackOverflow);
+  __ bind(&stack_empty);
+  {
+    // We just pass the receiver, which is already on the stack.
+    __ mov(r0, Operand(0));
+  }
+  __ bind(&stack_done);
+
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
 namespace {
 
 // Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2318,7 +2390,7 @@
     Label no_interpreter_frame;
     __ ldr(scratch3,
            MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+    __ cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
     __ b(ne, &no_interpreter_frame);
     __ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ bind(&no_interpreter_frame);
@@ -2330,7 +2402,8 @@
   __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ ldr(scratch3,
          MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(scratch3,
+         Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(ne, &no_arguments_adaptor);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -2638,6 +2711,161 @@
   }
 }
 
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+  Register argc = r0;
+  Register constructor = r1;
+  Register new_target = r3;
+
+  Register scratch = r2;
+  Register scratch2 = r6;
+
+  Register spread = r4;
+  Register spread_map = r5;
+
+  Register spread_len = r5;
+
+  Label runtime_call, push_args;
+  __ ldr(spread, MemOperand(sp, 0));
+  __ JumpIfSmi(spread, &runtime_call);
+  __ ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+
+  // Check that the spread is an array.
+  __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
+  __ b(ne, &runtime_call);
+
+  // Check that we have the original ArrayPrototype.
+  __ ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+  __ ldr(scratch2, NativeContextMemOperand());
+  __ ldr(scratch2,
+         ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+  __ cmp(scratch, scratch2);
+  __ b(ne, &runtime_call);
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+  __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
+  __ b(ne, &runtime_call);
+
+  // Check that the map of the initial array iterator hasn't changed.
+  __ ldr(scratch2, NativeContextMemOperand());
+  __ ldr(scratch,
+         ContextMemOperand(scratch2,
+                           Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ ldr(scratch2,
+         ContextMemOperand(
+             scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+  __ cmp(scratch, scratch2);
+  __ b(ne, &runtime_call);
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  Label no_protector_check;
+  __ ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+  __ DecodeField<Map::ElementsKindBits>(scratch);
+  __ cmp(scratch, Operand(FAST_HOLEY_ELEMENTS));
+  __ b(hi, &runtime_call);
+  // For non-FastHoley kinds, we can skip the protector check.
+  __ cmp(scratch, Operand(FAST_SMI_ELEMENTS));
+  __ b(eq, &no_protector_check);
+  __ cmp(scratch, Operand(FAST_ELEMENTS));
+  __ b(eq, &no_protector_check);
+  // Check the ArrayProtector cell.
+  __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+  __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
+  __ b(ne, &runtime_call);
+
+  __ bind(&no_protector_check);
+  // Load the FixedArray backing store, but use the length from the array.
+  __ ldr(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
+  __ SmiUntag(spread_len);
+  __ ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+  __ b(&push_args);
+
+  __ bind(&runtime_call);
+  {
+    // Call the builtin for the result of the spread.
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(argc);
+    __ Push(constructor);
+    __ Push(new_target);
+    __ Push(argc);
+    __ Push(spread);
+    __ CallRuntime(Runtime::kSpreadIterableFixed);
+    __ mov(spread, r0);
+    __ Pop(argc);
+    __ Pop(new_target);
+    __ Pop(constructor);
+    __ SmiUntag(argc);
+  }
+
+  {
+    // Calculate the new nargs including the result of the spread.
+    __ ldr(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
+    __ SmiUntag(spread_len);
+
+    __ bind(&push_args);
+    // argc += spread_len - 1. Subtract 1 for the spread itself.
+    __ add(argc, argc, spread_len);
+    __ sub(argc, argc, Operand(1));
+
+    // Pop the spread argument off the stack.
+    __ Pop(scratch);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+    // Make scratch the space we have left. The stack might already be
+    // overflowed here which will cause scratch to become negative.
+    __ sub(scratch, sp, scratch);
+    // Check if the arguments will overflow the stack.
+    __ cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
+    __ b(gt, &done);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // Put the evaluated spread onto the stack as additional arguments.
+  {
+    __ mov(scratch, Operand(0));
+    Label done, push, loop;
+    __ bind(&loop);
+    __ cmp(scratch, spread_len);
+    __ b(eq, &done);
+    __ add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
+    __ ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+    __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+    __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(scratch2);
+    __ add(scratch, scratch, Operand(1));
+    __ b(&loop);
+    __ bind(&done);
+  }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  // CheckSpreadAndPushToStack will push r3 to save it.
+  __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            TailCallMode::kDisallow),
+          RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -2753,6 +2981,19 @@
 }
 
 // static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r0 : the number of arguments (not including the receiver)
+  //  -- r1 : the constructor to call (can be any Object)
+  //  -- r3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r1 : requested object size (untagged)
diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc
index aeb0508..74e6c70 100644
--- a/src/builtins/arm64/builtins-arm64.cc
+++ b/src/builtins/arm64/builtins-arm64.cc
@@ -315,11 +315,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(x6);
     __ EnterBuiltinFrame(cp, x1, x6);
     __ Push(x2);  // first argument
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(x2);
     __ LeaveBuiltinFrame(cp, x1, x6);
     __ SmiUntag(x6);
@@ -467,11 +467,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(x6);
     __ EnterBuiltinFrame(cp, x1, x6);
     __ Push(x2);  // first argument
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(x2);
     __ LeaveBuiltinFrame(cp, x1, x6);
     __ SmiUntag(x6);
@@ -540,6 +540,8 @@
 void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
                                     bool create_implicit_receiver,
                                     bool check_derived_construct) {
+  Label post_instantiation_deopt_entry;
+
   // ----------- S t a t e -------------
   //  -- x0     : number of arguments
   //  -- x1     : constructor function
@@ -569,8 +571,8 @@
     if (create_implicit_receiver) {
       // Allocate the new receiver object.
       __ Push(constructor, new_target);
-      FastNewObjectStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+              RelocInfo::CODE_TARGET);
       __ Mov(x4, x0);
       __ Pop(new_target, constructor);
 
@@ -597,6 +599,9 @@
       __ PushRoot(Heap::kTheHoleValueRootIndex);
     }
 
+    // Deoptimizer re-enters stub code here.
+    __ Bind(&post_instantiation_deopt_entry);
+
     // Set up pointer to last argument.
     __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
 
@@ -635,7 +640,8 @@
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+      masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+          masm->pc_offset());
     }
 
     // Restore the context from the frame.
@@ -698,6 +704,34 @@
     __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
   }
   __ Ret();
+
+  // Store offset of trampoline address for deoptimizer. This is the bailout
+  // point after the receiver instantiation but before the function invocation.
+  // We need to restore some registers in order to continue the above code.
+  if (create_implicit_receiver && !is_api_function) {
+    masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+        masm->pc_offset());
+
+    // ----------- S t a t e -------------
+    //  -- x0    : newly allocated object
+    //  -- sp[0] : constructor function
+    // -----------------------------------
+
+    __ Pop(x1);
+    __ Push(x0, x0);
+
+    // Retrieve smi-tagged arguments count from the stack.
+    __ Ldr(x0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+    __ SmiUntag(x0);
+
+    // Retrieve the new target value from the stack. This was placed into the
+    // frame description in place of the receiver by the optimizing compiler.
+    __ Add(x3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+    __ Ldr(x3, MemOperand(x3, x0, LSL, kPointerSizeLog2));
+
+    // Continue with constructor function invocation.
+    __ B(&post_instantiation_deopt_entry);
+  }
 }
 
 }  // namespace
@@ -744,18 +778,17 @@
   __ Str(x2, FieldMemOperand(x1, JSGeneratorObject::kResumeModeOffset));
 
   // Load suspended function and context.
-  __ Ldr(cp, FieldMemOperand(x1, JSGeneratorObject::kContextOffset));
   __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+  __ Ldr(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
 
   // Flood function if we are stepping.
   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
   Label stepping_prepared;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(masm->isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  __ Mov(x10, Operand(last_step_action));
+  ExternalReference debug_hook =
+      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+  __ Mov(x10, Operand(debug_hook));
   __ Ldrsb(x10, MemOperand(x10));
-  __ CompareAndBranch(x10, Operand(StepIn), ge, &prepare_step_in_if_stepping);
+  __ CompareAndBranch(x10, Operand(0), ne, &prepare_step_in_if_stepping);
 
   // Flood function if we need to continue stepping in the suspended generator.
   ExternalReference debug_suspended_generator =
@@ -789,14 +822,15 @@
   __ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
   __ PushMultipleTimes(x11, w10);
 
-  // Dispatch on the kind of generator object.
-  Label old_generator;
-  __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
-  __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
-  __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
-  __ B(ne, &old_generator);
+  // Underlying function needs to have bytecode available.
+  if (FLAG_debug_code) {
+    __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+    __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+    __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kMissingBytecodeArray);
+  }
 
-  // New-style (ignition/turbofan) generator object
+  // Resume (Ignition/TurboFan) generator object.
   {
     __ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
     __ Ldr(w0, FieldMemOperand(
@@ -810,54 +844,11 @@
     __ Jump(x5);
   }
 
-  // Old-style (full-codegen) generator object
-  __ bind(&old_generator);
-  {
-    // Enter a new JavaScript frame, and initialize its slots as they were when
-    // the generator was suspended.
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Push(lr, fp);
-    __ Move(fp, jssp);
-    __ Push(cp, x4);
-
-    // Restore the operand stack.
-    __ Ldr(x0, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
-    __ Ldr(w3, UntagSmiFieldMemOperand(x0, FixedArray::kLengthOffset));
-    __ Add(x0, x0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    __ Add(x3, x0, Operand(x3, LSL, kPointerSizeLog2));
-    {
-      Label done_loop, loop;
-      __ Bind(&loop);
-      __ Cmp(x0, x3);
-      __ B(eq, &done_loop);
-      __ Ldr(x10, MemOperand(x0, kPointerSize, PostIndex));
-      __ Push(x10);
-      __ B(&loop);
-      __ Bind(&done_loop);
-    }
-
-    // Reset operand stack so we don't leak.
-    __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
-    __ Str(x10, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
-
-    // Resume the generator function at the continuation.
-    __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
-    __ Ldr(x10, FieldMemOperand(x10, SharedFunctionInfo::kCodeOffset));
-    __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
-    __ Ldrsw(x11, UntagSmiFieldMemOperand(
-                      x1, JSGeneratorObject::kContinuationOffset));
-    __ Add(x10, x10, x11);
-    __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
-    __ Str(x12, FieldMemOperand(x1, JSGeneratorObject::kContinuationOffset));
-    __ Move(x0, x1);  // Continuation expects generator object in x0.
-    __ Br(x10);
-  }
-
   __ Bind(&prepare_step_in_if_stepping);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(x1, x2, x4);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ CallRuntime(Runtime::kDebugOnFunctionCall);
     __ Pop(x2, x1);
     __ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
   }
@@ -1050,8 +1041,7 @@
   Label load_debug_bytecode_array, bytecode_array_loaded;
   DCHECK(!debug_info.is(x0));
   __ Ldr(debug_info, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
-  __ Cmp(debug_info, Operand(DebugInfo::uninitialized()));
-  __ B(ne, &load_debug_bytecode_array);
+  __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
   __ Ldr(kInterpreterBytecodeArrayRegister,
          FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
   __ Bind(&bytecode_array_loaded);
@@ -1063,15 +1053,15 @@
   __ B(ne, &switch_to_different_code_kind);
 
   // Increment invocation count for the function.
-  __ Ldr(x11, FieldMemOperand(x1, JSFunction::kLiteralsOffset));
-  __ Ldr(x11, FieldMemOperand(x11, LiteralsArray::kFeedbackVectorOffset));
-  __ Ldr(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
-                                           kPointerSize +
-                                       TypeFeedbackVector::kHeaderSize));
+  __ Ldr(x11, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
+  __ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
+  __ Ldr(x10, FieldMemOperand(
+                  x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                           FeedbackVector::kHeaderSize));
   __ Add(x10, x10, Operand(Smi::FromInt(1)));
-  __ Str(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
-                                           kPointerSize +
-                                       TypeFeedbackVector::kHeaderSize));
+  __ Str(x10, FieldMemOperand(
+                  x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                           FeedbackVector::kHeaderSize));
 
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
@@ -1082,6 +1072,11 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Reset code age.
+  __ Mov(x10, Operand(BytecodeArray::kNoAgeBytecodeAge));
+  __ Strb(x10, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                               BytecodeArray::kBytecodeAgeOffset));
+
   // Load the initial bytecode offset.
   __ Mov(kInterpreterBytecodeOffsetRegister,
          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1201,7 +1196,7 @@
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
-    CallableType function_type) {
+    InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- x0 : the number of arguments (not including the receiver)
   //  -- x2 : the address of the first argument to be pushed. Subsequent
@@ -1218,12 +1213,14 @@
   Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6, &stack_overflow);
 
   // Call the target.
-  if (function_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
                                                       tail_call_mode),
             RelocInfo::CODE_TARGET);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(function_type, CallableType::kAny);
     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
@@ -1238,7 +1235,7 @@
 
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
-    MacroAssembler* masm, CallableType construct_type) {
+    MacroAssembler* masm, InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   // -- x0 : argument count (not including receiver)
   // -- x3 : new target
@@ -1255,7 +1252,7 @@
   Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7, &stack_overflow);
 
   __ AssertUndefinedOrAllocationSite(x2, x6);
-  if (construct_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ AssertFunction(x1);
 
     // Tail call to the function-specific construct stub (still in the caller
@@ -1264,8 +1261,12 @@
     __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
     __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
     __ Br(x4);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    // Call the constructor with x0, x1, and x3 unmodified.
+    __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(construct_type, CallableType::kAny);
+    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
     // Call the constructor with x0, x1, and x3 unmodified.
     __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   }
@@ -1384,14 +1385,19 @@
   Register closure = x1;
   Register map = x13;
   Register index = x2;
+
+  // Do we have a valid feedback vector?
+  __ Ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+  __ Ldr(index, FieldMemOperand(index, Cell::kValueOffset));
+  __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+
   __ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
   __ Ldr(map,
          FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
   __ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset));
   __ Cmp(index, Operand(2));
-  __ B(lt, &gotta_call_runtime);
+  __ B(lt, &try_shared);
 
-  // Find literals.
   // x3  : native context
   // x2  : length / index
   // x13 : optimized code map
@@ -1411,23 +1417,6 @@
   __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
   __ Cmp(temp, native_context);
   __ B(ne, &loop_bottom);
-  // OSR id set to none?
-  __ Ldr(temp, FieldMemOperand(array_pointer,
-                               SharedFunctionInfo::kOffsetToPreviousOsrAstId));
-  const int bailout_id = BailoutId::None().ToInt();
-  __ Cmp(temp, Operand(Smi::FromInt(bailout_id)));
-  __ B(ne, &loop_bottom);
-  // Literals available?
-  __ Ldr(temp, FieldMemOperand(array_pointer,
-                               SharedFunctionInfo::kOffsetToPreviousLiterals));
-  __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
-
-  // Save the literals in the closure.
-  __ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset));
-  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
 
   // Code available?
   Register entry = x7;
@@ -1437,7 +1426,7 @@
   __ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
   __ JumpIfSmi(entry, &try_shared);
 
-  // Found literals and code. Get them into the closure and return.
+  // Found code. Get it into the closure and return.
   __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
   __ RecordWriteCodeEntryField(closure, entry, x5);
@@ -1466,9 +1455,7 @@
   __ Cmp(index, Operand(1));
   __ B(gt, &loop_top);
 
-  // We found neither literals nor code.
-  __ B(&gotta_call_runtime);
-
+  // We found no code.
   __ Bind(&try_shared);
   __ Ldr(entry,
          FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -1478,14 +1465,14 @@
   __ TestAndBranchIfAnySet(
       temp, 1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte,
       &gotta_call_runtime);
-  // Is the full code valid?
+
+  // If SFI points to anything other than CompileLazy, install that.
   __ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
-  __ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
-  __ and_(x5, x5, Operand(Code::KindField::kMask));
-  __ Mov(x5, Operand(x5, LSR, Code::KindField::kShift));
-  __ Cmp(x5, Operand(Code::BUILTIN));
+  __ Move(temp, masm->CodeObject());
+  __ Cmp(entry, temp);
   __ B(eq, &gotta_call_runtime);
-  // Yes, install the full code.
+
+  // Install the SFI's code entry.
   __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
   __ RecordWriteCodeEntryField(closure, entry, x5);
@@ -1599,14 +1586,9 @@
   __ Br(x0);
 }
 
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
-  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
-  }                                                           \
-  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                              \
+  void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+    GenerateMakeCodeYoungAgainCommon(masm);                               \
   }
 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2166,20 +2148,20 @@
   __ Bind(&target_not_constructor);
   {
     __ Poke(target, 0);
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 
   // 4c. The new.target is not a constructor, throw an appropriate TypeError.
   __ Bind(&new_target_not_constructor);
   {
     __ Poke(new_target, 0);
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 }
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ SmiTag(x10, x0);
-  __ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
   __ Push(lr, fp);
   __ Push(x11, x1, x10);
   __ Add(fp, jssp,
@@ -2218,7 +2200,8 @@
 
   // Create the list of arguments from the array-like argumentsList.
   {
-    Label create_arguments, create_array, create_runtime, done_create;
+    Label create_arguments, create_array, create_holey_array, create_runtime,
+        done_create;
     __ JumpIfSmi(arguments_list, &create_runtime);
 
     // Load native context.
@@ -2240,7 +2223,7 @@
     __ B(eq, &create_arguments);
 
     // Check if argumentsList is a fast JSArray.
-    __ CompareInstanceType(arguments_list_map, native_context, JS_ARRAY_TYPE);
+    __ CompareInstanceType(arguments_list_map, x10, JS_ARRAY_TYPE);
     __ B(eq, &create_array);
 
     // Ask the runtime to create the list (actually a FixedArray).
@@ -2265,14 +2248,42 @@
     __ Mov(args, x10);
     __ B(&done_create);
 
+    // For holey JSArrays we need to check that the array prototype chain
+    // protector is intact and our prototype is the Array.prototype actually.
+    __ Bind(&create_holey_array);
+    //  -- x2 : arguments_list_map
+    //  -- x4 : native_context
+    Register arguments_list_prototype = x2;
+    __ Ldr(arguments_list_prototype,
+           FieldMemOperand(arguments_list_map, Map::kPrototypeOffset));
+    __ Ldr(x10, ContextMemOperand(native_context,
+                                  Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+    __ Cmp(arguments_list_prototype, x10);
+    __ B(ne, &create_runtime);
+    __ LoadRoot(x10, Heap::kArrayProtectorRootIndex);
+    __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, PropertyCell::kValueOffset));
+    __ Cmp(x11, Isolate::kProtectorValid);
+    __ B(ne, &create_runtime);
+    __ Ldrsw(len,
+             UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+    __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
+    __ B(&done_create);
+
     // Try to create the list from a JSArray object.
     __ Bind(&create_array);
     __ Ldr(x10, FieldMemOperand(arguments_list_map, Map::kBitField2Offset));
     __ DecodeField<Map::ElementsKindBits>(x10);
     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
     STATIC_ASSERT(FAST_ELEMENTS == 2);
-    // Branch for anything that's not FAST_{SMI_}ELEMENTS.
-    __ TestAndBranchIfAnySet(x10, ~FAST_ELEMENTS, &create_runtime);
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+    // Check if it is a holey array, the order of the cmp is important as
+    // anything higher than FAST_HOLEY_ELEMENTS will fall back to runtime.
+    __ Cmp(x10, FAST_HOLEY_ELEMENTS);
+    __ B(hi, &create_runtime);
+    // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+    __ Tbnz(x10, 0, &create_holey_array);
+    // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
     __ Ldrsw(len,
              UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
     __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
@@ -2306,16 +2317,24 @@
 
   // Push arguments onto the stack (thisArgument is already on the stack).
   {
-    Label done, loop;
+    Label done, push, loop;
     Register src = x4;
 
     __ Add(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
     __ Mov(x0, len);  // The 'len' argument for Call() or Construct().
     __ Cbz(len, &done);
+    Register the_hole_value = x11;
+    Register undefined_value = x12;
+    // We do not use the CompareRoot macro as it would do a LoadRoot behind the
+    // scenes and we want to avoid that in a loop.
+    __ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex);
+    __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
     __ Claim(len);
     __ Bind(&loop);
     __ Sub(len, len, 1);
     __ Ldr(x10, MemOperand(src, kPointerSize, PostIndex));
+    __ Cmp(x10, the_hole_value);
+    __ Csel(x10, x10, undefined_value, ne);
     __ Poke(x10, Operand(len, LSL, kPointerSizeLog2));
     __ Cbnz(len, &loop);
     __ Bind(&done);
@@ -2340,6 +2359,72 @@
   }
 }
 
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+                                           Handle<Code> code) {
+  // ----------- S t a t e -------------
+  //  -- x1    : the target to call (can be any Object)
+  //  -- x2    : start index (to support rest parameters)
+  //  -- lr    : return address.
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ Ldr(x3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
+  __ B(eq, &arguments_adaptor);
+  {
+    __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    __ Ldr(x0, FieldMemOperand(x0, JSFunction::kSharedFunctionInfoOffset));
+    __ Ldrsw(x0, FieldMemOperand(
+                     x0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ Mov(x3, fp);
+  }
+  __ B(&arguments_done);
+  __ Bind(&arguments_adaptor);
+  {
+    // Just load the length from ArgumentsAdaptorFrame.
+    __ Ldrsw(x0, UntagSmiMemOperand(
+                     x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  }
+  __ Bind(&arguments_done);
+
+  Label stack_empty, stack_done, stack_overflow;
+  __ Subs(x0, x0, x2);
+  __ B(le, &stack_empty);
+  {
+    // Check for stack overflow.
+    Generate_StackOverflowCheck(masm, x0, x2, &stack_overflow);
+
+    // Forward the arguments from the caller frame.
+    {
+      Label loop;
+      __ Add(x3, x3, kPointerSize);
+      __ Mov(x2, x0);
+      __ bind(&loop);
+      {
+        __ Ldr(x4, MemOperand(x3, x2, LSL, kPointerSizeLog2));
+        __ Push(x4);
+        __ Subs(x2, x2, 1);
+        __ B(ne, &loop);
+      }
+    }
+  }
+  __ B(&stack_done);
+  __ Bind(&stack_overflow);
+  __ TailCallRuntime(Runtime::kThrowStackOverflow);
+  __ Bind(&stack_empty);
+  {
+    // We just pass the receiver, which is already on the stack.
+    __ Mov(x0, 0);
+  }
+  __ Bind(&stack_done);
+
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
 namespace {
 
 // Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2390,7 +2475,7 @@
     Label no_interpreter_frame;
     __ Ldr(scratch3,
            MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
+    __ Cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
     __ B(ne, &no_interpreter_frame);
     __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ bind(&no_interpreter_frame);
@@ -2402,7 +2487,8 @@
   __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ Ldr(scratch3,
          MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ Cmp(scratch3,
+         Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ B(ne, &no_arguments_adaptor);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -2705,6 +2791,155 @@
   }
 }
 
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+  Register argc = x0;
+  Register constructor = x1;
+  Register new_target = x3;
+
+  Register scratch = x2;
+  Register scratch2 = x6;
+
+  Register spread = x4;
+  Register spread_map = x5;
+
+  Register spread_len = x5;
+
+  Label runtime_call, push_args;
+  __ Peek(spread, 0);
+  __ JumpIfSmi(spread, &runtime_call);
+  __ Ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+
+  // Check that the spread is an array.
+  __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
+  __ B(ne, &runtime_call);
+
+  // Check that we have the original ArrayPrototype.
+  __ Ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+  __ Ldr(scratch2, NativeContextMemOperand());
+  __ Ldr(scratch2,
+         ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+  __ Cmp(scratch, scratch2);
+  __ B(ne, &runtime_call);
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+  __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
+  __ B(ne, &runtime_call);
+
+  // Check that the map of the initial array iterator hasn't changed.
+  __ Ldr(scratch2, NativeContextMemOperand());
+  __ Ldr(scratch,
+         ContextMemOperand(scratch2,
+                           Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+  __ Ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ Ldr(scratch2,
+         ContextMemOperand(
+             scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+  __ Cmp(scratch, scratch2);
+  __ B(ne, &runtime_call);
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  Label no_protector_check;
+  __ Ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+  __ DecodeField<Map::ElementsKindBits>(scratch);
+  __ Cmp(scratch, FAST_HOLEY_ELEMENTS);
+  __ B(hi, &runtime_call);
+  // For non-FastHoley kinds, we can skip the protector check.
+  __ Cmp(scratch, FAST_SMI_ELEMENTS);
+  __ B(eq, &no_protector_check);
+  __ Cmp(scratch, FAST_ELEMENTS);
+  __ B(eq, &no_protector_check);
+  // Check the ArrayProtector cell.
+  __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+  __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
+  __ B(ne, &runtime_call);
+
+  __ Bind(&no_protector_check);
+  // Load the FixedArray backing store, but use the length from the array.
+  __ Ldrsw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
+  __ Ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+  __ B(&push_args);
+
+  __ Bind(&runtime_call);
+  {
+    // Call the builtin for the result of the spread.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(argc);
+    __ Push(constructor, new_target, argc, spread);
+    __ CallRuntime(Runtime::kSpreadIterableFixed);
+    __ Mov(spread, x0);
+    __ Pop(argc, new_target, constructor);
+    __ SmiUntag(argc);
+  }
+
+  {
+    // Calculate the new nargs including the result of the spread.
+    __ Ldrsw(spread_len,
+             UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
+
+    __ Bind(&push_args);
+    // argc += spread_len - 1. Subtract 1 for the spread itself.
+    __ Add(argc, argc, spread_len);
+    __ Sub(argc, argc, 1);
+
+    // Pop the spread argument off the stack.
+    __ Pop(scratch);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+    // Make scratch the space we have left. The stack might already be
+    // overflowed here which will cause scratch to become negative.
+    __ Sub(scratch, masm->StackPointer(), scratch);
+    // Check if the arguments will overflow the stack.
+    __ Cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
+    __ B(gt, &done);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ Bind(&done);
+  }
+
+  // Put the evaluated spread onto the stack as additional arguments.
+  {
+    __ Mov(scratch, 0);
+    Label done, push, loop;
+    __ Bind(&loop);
+    __ Cmp(scratch, spread_len);
+    __ B(eq, &done);
+    __ Add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
+    __ Ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+    __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+    __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(scratch2);
+    __ Add(scratch, scratch, Operand(1));
+    __ B(&loop);
+    __ Bind(&done);
+  }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  // CheckSpreadAndPushToStack will push r3 to save it.
+  __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            TailCallMode::kDisallow),
+          RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -2826,6 +3061,19 @@
 }
 
 // static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- x0 : the number of arguments (not including the receiver)
+  //  -- x1 : the constructor to call (can be any Object)
+  //  -- x3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
   ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
   // ----------- S t a t e -------------
diff --git a/src/builtins/builtins-api.cc b/src/builtins/builtins-api.cc
index defc4dc..eb34638 100644
--- a/src/builtins/builtins-api.cc
+++ b/src/builtins/builtins-api.cc
@@ -7,6 +7,10 @@
 #include "src/api-arguments.h"
 #include "src/api-natives.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/counters.h"
+#include "src/log.h"
+#include "src/objects-inl.h"
+#include "src/prototype.h"
 
 namespace v8 {
 namespace internal {
@@ -77,6 +81,7 @@
         !isolate->MayAccess(handle(isolate->context()), js_receiver)) {
       isolate->ReportFailedAccessCheck(js_receiver);
       RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+      return isolate->factory()->undefined_value();
     }
 
     raw_holder = GetCompatibleReceiver(isolate, *fun_data, *js_receiver);
diff --git a/src/builtins/builtins-arguments.cc b/src/builtins/builtins-arguments.cc
new file mode 100644
index 0000000..337c862
--- /dev/null
+++ b/src/builtins/builtins-arguments.cc
@@ -0,0 +1,425 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-arguments.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/interface-descriptors.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+
+std::tuple<Node*, Node*, Node*>
+ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
+                                                      ParameterMode mode) {
+  CSA_ASSERT(this, HasInstanceType(function, JS_FUNCTION_TYPE));
+
+  Variable frame_ptr(this, MachineType::PointerRepresentation());
+  frame_ptr.Bind(LoadParentFramePointer());
+  CSA_ASSERT(this,
+             WordEqual(function,
+                       LoadBufferObject(frame_ptr.value(),
+                                        StandardFrameConstants::kFunctionOffset,
+                                        MachineType::Pointer())));
+  Variable argument_count(this, ParameterRepresentation(mode));
+  VariableList list({&frame_ptr, &argument_count}, zone());
+  Label done_argument_count(this, list);
+
+  // Determine the number of passed parameters, which is either the count stored
+  // in an arguments adapter frame or fetched from the shared function info.
+  Node* frame_ptr_above = LoadBufferObject(
+      frame_ptr.value(), StandardFrameConstants::kCallerFPOffset,
+      MachineType::Pointer());
+  Node* shared =
+      LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+  Node* formal_parameter_count = LoadSharedFunctionInfoSpecialField(
+      shared, SharedFunctionInfo::kFormalParameterCountOffset, mode);
+  argument_count.Bind(formal_parameter_count);
+  Node* marker_or_function = LoadBufferObject(
+      frame_ptr_above, CommonFrameConstants::kContextOrFrameTypeOffset);
+  GotoIf(
+      MarkerIsNotFrameType(marker_or_function, StackFrame::ARGUMENTS_ADAPTOR),
+      &done_argument_count);
+  Node* adapted_parameter_count = LoadBufferObject(
+      frame_ptr_above, ArgumentsAdaptorFrameConstants::kLengthOffset);
+  frame_ptr.Bind(frame_ptr_above);
+  argument_count.Bind(TaggedToParameter(adapted_parameter_count, mode));
+  Goto(&done_argument_count);
+
+  Bind(&done_argument_count);
+  return std::tuple<Node*, Node*, Node*>(
+      frame_ptr.value(), argument_count.value(), formal_parameter_count);
+}
+
+std::tuple<Node*, Node*, Node*>
+ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
+                                                    Node* arguments_count,
+                                                    Node* parameter_map_count,
+                                                    ParameterMode mode,
+                                                    int base_size) {
+  // Allocate the parameter object (either a Rest parameter object, a strict
+  // argument object or a sloppy arguments object) and the elements/mapped
+  // arguments together.
+  int elements_offset = base_size;
+  Node* element_count = arguments_count;
+  if (parameter_map_count != nullptr) {
+    base_size += FixedArray::kHeaderSize;
+    element_count = IntPtrOrSmiAdd(element_count, parameter_map_count, mode);
+  }
+  bool empty = IsIntPtrOrSmiConstantZero(arguments_count);
+  DCHECK_IMPLIES(empty, parameter_map_count == nullptr);
+  Node* size =
+      empty ? IntPtrConstant(base_size)
+            : ElementOffsetFromIndex(element_count, FAST_ELEMENTS, mode,
+                                     base_size + FixedArray::kHeaderSize);
+  Node* result = Allocate(size);
+  Comment("Initialize arguments object");
+  StoreMapNoWriteBarrier(result, map);
+  Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+  StoreObjectField(result, JSArray::kPropertiesOffset, empty_fixed_array);
+  Node* smi_arguments_count = ParameterToTagged(arguments_count, mode);
+  StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
+                                 smi_arguments_count);
+  Node* arguments = nullptr;
+  if (!empty) {
+    arguments = InnerAllocate(result, elements_offset);
+    StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset,
+                                   smi_arguments_count);
+    Node* fixed_array_map = LoadRoot(Heap::kFixedArrayMapRootIndex);
+    StoreMapNoWriteBarrier(arguments, fixed_array_map);
+  }
+  Node* parameter_map = nullptr;
+  if (parameter_map_count != nullptr) {
+    Node* parameter_map_offset = ElementOffsetFromIndex(
+        arguments_count, FAST_ELEMENTS, mode, FixedArray::kHeaderSize);
+    parameter_map = InnerAllocate(arguments, parameter_map_offset);
+    StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
+                                   parameter_map);
+    Node* sloppy_elements_map =
+        LoadRoot(Heap::kSloppyArgumentsElementsMapRootIndex);
+    StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map);
+    parameter_map_count = ParameterToTagged(parameter_map_count, mode);
+    StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset,
+                                   parameter_map_count);
+  } else {
+    if (empty) {
+      StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
+                                     empty_fixed_array);
+    } else {
+      StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
+                                     arguments);
+    }
+  }
+  return std::tuple<Node*, Node*, Node*>(result, arguments, parameter_map);
+}
+
+Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
+    Node* map, Node* frame_ptr, Node* arg_count, Node* first_arg,
+    Node* rest_count, ParameterMode param_mode, int base_size) {
+  // Allocate the parameter object (either a Rest parameter object, a strict
+  // argument object or a sloppy arguments object) and the elements together and
+  // fill in the contents with the arguments above |formal_parameter_count|.
+  Node* result;
+  Node* elements;
+  Node* unused;
+  std::tie(result, elements, unused) =
+      AllocateArgumentsObject(map, rest_count, nullptr, param_mode, base_size);
+  DCHECK(unused == nullptr);
+  CodeStubArguments arguments(this, arg_count, frame_ptr, param_mode);
+  Variable offset(this, MachineType::PointerRepresentation());
+  offset.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
+  VariableList list({&offset}, zone());
+  arguments.ForEach(list,
+                    [this, elements, &offset](Node* arg) {
+                      StoreNoWriteBarrier(MachineRepresentation::kTagged,
+                                          elements, offset.value(), arg);
+                      Increment(offset, kPointerSize);
+                    },
+                    first_arg, nullptr, param_mode);
+  return result;
+}
+
+Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
+                                                           Node* function) {
+  Node* frame_ptr;
+  Node* argument_count;
+  Node* formal_parameter_count;
+
+  ParameterMode mode = OptimalParameterMode();
+  Node* zero = IntPtrOrSmiConstant(0, mode);
+
+  std::tie(frame_ptr, argument_count, formal_parameter_count) =
+      GetArgumentsFrameAndCount(function, mode);
+
+  Variable result(this, MachineRepresentation::kTagged);
+  Label no_rest_parameters(this), runtime(this, Label::kDeferred),
+      done(this, &result);
+
+  Node* rest_count =
+      IntPtrOrSmiSub(argument_count, formal_parameter_count, mode);
+  Node* const native_context = LoadNativeContext(context);
+  Node* const array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+  GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode),
+         &no_rest_parameters);
+
+  GotoIfFixedArraySizeDoesntFitInNewSpace(
+      rest_count, &runtime, JSArray::kSize + FixedArray::kHeaderSize, mode);
+
+  // Allocate the Rest JSArray and the elements together and fill in the
+  // contents with the arguments above |formal_parameter_count|.
+  result.Bind(ConstructParametersObjectFromArgs(
+      array_map, frame_ptr, argument_count, formal_parameter_count, rest_count,
+      mode, JSArray::kSize));
+  Goto(&done);
+
+  Bind(&no_rest_parameters);
+  {
+    Node* arguments;
+    Node* elements;
+    Node* unused;
+    std::tie(arguments, elements, unused) =
+        AllocateArgumentsObject(array_map, zero, nullptr, mode, JSArray::kSize);
+    result.Bind(arguments);
+    Goto(&done);
+  }
+
+  Bind(&runtime);
+  {
+    result.Bind(CallRuntime(Runtime::kNewRestParameter, context, function));
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return result.value();
+}
+
+TF_BUILTIN(FastNewRestParameter, ArgumentsBuiltinsAssembler) {
+  Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
+  Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
+  Return(EmitFastNewRestParameter(context, function));
+}
+
+Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
+                                                             Node* function) {
+  Variable result(this, MachineRepresentation::kTagged);
+  Label done(this, &result), empty(this), runtime(this, Label::kDeferred);
+
+  Node* frame_ptr;
+  Node* argument_count;
+  Node* formal_parameter_count;
+
+  ParameterMode mode = OptimalParameterMode();
+  Node* zero = IntPtrOrSmiConstant(0, mode);
+
+  std::tie(frame_ptr, argument_count, formal_parameter_count) =
+      GetArgumentsFrameAndCount(function, mode);
+
+  GotoIfFixedArraySizeDoesntFitInNewSpace(
+      argument_count, &runtime,
+      JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
+
+  Node* const native_context = LoadNativeContext(context);
+  Node* const map =
+      LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
+  GotoIf(WordEqual(argument_count, zero), &empty);
+
+  result.Bind(ConstructParametersObjectFromArgs(
+      map, frame_ptr, argument_count, zero, argument_count, mode,
+      JSStrictArgumentsObject::kSize));
+  Goto(&done);
+
+  Bind(&empty);
+  {
+    Node* arguments;
+    Node* elements;
+    Node* unused;
+    std::tie(arguments, elements, unused) = AllocateArgumentsObject(
+        map, zero, nullptr, mode, JSStrictArgumentsObject::kSize);
+    result.Bind(arguments);
+    Goto(&done);
+  }
+
+  Bind(&runtime);
+  {
+    result.Bind(CallRuntime(Runtime::kNewStrictArguments, context, function));
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return result.value();
+}
+
+TF_BUILTIN(FastNewStrictArguments, ArgumentsBuiltinsAssembler) {
+  Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
+  Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
+  Return(EmitFastNewStrictArguments(context, function));
+}
+
+Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
+                                                             Node* function) {
+  Node* frame_ptr;
+  Node* argument_count;
+  Node* formal_parameter_count;
+  Variable result(this, MachineRepresentation::kTagged);
+
+  ParameterMode mode = OptimalParameterMode();
+  Node* zero = IntPtrOrSmiConstant(0, mode);
+
+  Label done(this, &result), empty(this), no_parameters(this),
+      runtime(this, Label::kDeferred);
+
+  std::tie(frame_ptr, argument_count, formal_parameter_count) =
+      GetArgumentsFrameAndCount(function, mode);
+
+  GotoIf(WordEqual(argument_count, zero), &empty);
+
+  GotoIf(WordEqual(formal_parameter_count, zero), &no_parameters);
+
+  {
+    Comment("Mapped parameter JSSloppyArgumentsObject");
+
+    Node* mapped_count =
+        IntPtrOrSmiMin(argument_count, formal_parameter_count, mode);
+
+    Node* parameter_map_size =
+        IntPtrOrSmiAdd(mapped_count, IntPtrOrSmiConstant(2, mode), mode);
+
+    // Verify that the overall allocation will fit in new space.
+    Node* elements_allocated =
+        IntPtrOrSmiAdd(argument_count, parameter_map_size, mode);
+    GotoIfFixedArraySizeDoesntFitInNewSpace(
+        elements_allocated, &runtime,
+        JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode);
+
+    Node* const native_context = LoadNativeContext(context);
+    Node* const map = LoadContextElement(
+        native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
+    Node* argument_object;
+    Node* elements;
+    Node* map_array;
+    std::tie(argument_object, elements, map_array) =
+        AllocateArgumentsObject(map, argument_count, parameter_map_size, mode,
+                                JSSloppyArgumentsObject::kSize);
+    StoreObjectFieldNoWriteBarrier(
+        argument_object, JSSloppyArgumentsObject::kCalleeOffset, function);
+    StoreFixedArrayElement(map_array, 0, context, SKIP_WRITE_BARRIER);
+    StoreFixedArrayElement(map_array, 1, elements, SKIP_WRITE_BARRIER);
+
+    Comment("Fill in non-mapped parameters");
+    Node* argument_offset =
+        ElementOffsetFromIndex(argument_count, FAST_ELEMENTS, mode,
+                               FixedArray::kHeaderSize - kHeapObjectTag);
+    Node* mapped_offset =
+        ElementOffsetFromIndex(mapped_count, FAST_ELEMENTS, mode,
+                               FixedArray::kHeaderSize - kHeapObjectTag);
+    CodeStubArguments arguments(this, argument_count, frame_ptr, mode);
+    Variable current_argument(this, MachineType::PointerRepresentation());
+    current_argument.Bind(arguments.AtIndexPtr(argument_count, mode));
+    VariableList var_list1({&current_argument}, zone());
+    mapped_offset = BuildFastLoop(
+        var_list1, argument_offset, mapped_offset,
+        [this, elements, &current_argument](Node* offset) {
+          Increment(current_argument, kPointerSize);
+          Node* arg = LoadBufferObject(current_argument.value(), 0);
+          StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
+                              arg);
+        },
+        -kPointerSize, INTPTR_PARAMETERS);
+
+    // Copy the parameter slots and the holes in the arguments.
+    // We need to fill in mapped_count slots. They index the context,
+    // where parameters are stored in reverse order, at
+    //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+argument_count-1
+    // The mapped parameter thus need to get indices
+    //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+    //       MIN_CONTEXT_SLOTS+argument_count-mapped_count
+    // We loop from right to left.
+    Comment("Fill in mapped parameters");
+    Variable context_index(this, OptimalParameterRepresentation());
+    context_index.Bind(IntPtrOrSmiSub(
+        IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode),
+                       formal_parameter_count, mode),
+        mapped_count, mode));
+    Node* the_hole = TheHoleConstant();
+    VariableList var_list2({&context_index}, zone());
+    const int kParameterMapHeaderSize =
+        FixedArray::kHeaderSize + 2 * kPointerSize;
+    Node* adjusted_map_array = IntPtrAdd(
+        BitcastTaggedToWord(map_array),
+        IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+    Node* zero_offset = ElementOffsetFromIndex(
+        zero, FAST_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
+    BuildFastLoop(var_list2, mapped_offset, zero_offset,
+                  [this, the_hole, elements, adjusted_map_array, &context_index,
+                   mode](Node* offset) {
+                    StoreNoWriteBarrier(MachineRepresentation::kTagged,
+                                        elements, offset, the_hole);
+                    StoreNoWriteBarrier(
+                        MachineRepresentation::kTagged, adjusted_map_array,
+                        offset, ParameterToTagged(context_index.value(), mode));
+                    Increment(context_index, 1, mode);
+                  },
+                  -kPointerSize, INTPTR_PARAMETERS);
+
+    result.Bind(argument_object);
+    Goto(&done);
+  }
+
+  Bind(&no_parameters);
+  {
+    Comment("No parameters JSSloppyArgumentsObject");
+    GotoIfFixedArraySizeDoesntFitInNewSpace(
+        argument_count, &runtime,
+        JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
+    Node* const native_context = LoadNativeContext(context);
+    Node* const map =
+        LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+    result.Bind(ConstructParametersObjectFromArgs(
+        map, frame_ptr, argument_count, zero, argument_count, mode,
+        JSSloppyArgumentsObject::kSize));
+    StoreObjectFieldNoWriteBarrier(
+        result.value(), JSSloppyArgumentsObject::kCalleeOffset, function);
+    Goto(&done);
+  }
+
+  Bind(&empty);
+  {
+    Comment("Empty JSSloppyArgumentsObject");
+    Node* const native_context = LoadNativeContext(context);
+    Node* const map =
+        LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+    Node* arguments;
+    Node* elements;
+    Node* unused;
+    std::tie(arguments, elements, unused) = AllocateArgumentsObject(
+        map, zero, nullptr, mode, JSSloppyArgumentsObject::kSize);
+    result.Bind(arguments);
+    StoreObjectFieldNoWriteBarrier(
+        result.value(), JSSloppyArgumentsObject::kCalleeOffset, function);
+    Goto(&done);
+  }
+
+  Bind(&runtime);
+  {
+    result.Bind(CallRuntime(Runtime::kNewSloppyArguments, context, function));
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return result.value();
+}
+
+TF_BUILTIN(FastNewSloppyArguments, ArgumentsBuiltinsAssembler) {
+  Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
+  Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
+  Return(EmitFastNewSloppyArguments(context, function));
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-arguments.h b/src/builtins/builtins-arguments.h
new file mode 100644
index 0000000..e7c3823
--- /dev/null
+++ b/src/builtins/builtins-arguments.h
@@ -0,0 +1,55 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+
+class ArgumentsBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  explicit ArgumentsBuiltinsAssembler(CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+
+  Node* EmitFastNewStrictArguments(Node* context, Node* function);
+  Node* EmitFastNewSloppyArguments(Node* context, Node* function);
+  Node* EmitFastNewRestParameter(Node* context, Node* function);
+
+ private:
+  // Calculates and returns the the frame pointer, argument count and formal
+  // parameter count to be used to access a function's parameters, taking
+  // argument adapter frames into account. The tuple is of the form:
+  // <frame_ptr, # parameters actually passed, formal parameter count>
+  std::tuple<Node*, Node*, Node*> GetArgumentsFrameAndCount(Node* function,
+                                                            ParameterMode mode);
+
+  // Allocates an an arguments (either rest, strict or sloppy) together with the
+  // FixedArray elements for the arguments and a parameter map (for sloppy
+  // arguments only). A tuple is returned with pointers to the arguments object,
+  // the elements and parameter map in the form:
+  // <argument object, arguments FixedArray, parameter map or nullptr>
+  std::tuple<Node*, Node*, Node*> AllocateArgumentsObject(
+      Node* map, Node* arguments, Node* mapped_arguments,
+      ParameterMode param_mode, int base_size);
+
+  // For Rest parameters and Strict arguments, the copying of parameters from
+  // the stack into the arguments object is straight-forward and shares much of
+  // the same underlying logic, which is encapsulated by this function. It
+  // allocates an arguments-like object of size |base_size| with the map |map|,
+  // and then copies |rest_count| arguments from the stack frame pointed to by
+  // |frame_ptr| starting from |first_arg|. |arg_count| == |first_arg| +
+  // |rest_count|.
+  Node* ConstructParametersObjectFromArgs(Node* map, Node* frame_ptr,
+                                          Node* arg_count, Node* first_arg,
+                                          Node* rest_count,
+                                          ParameterMode param_mode,
+                                          int base_size);
+};
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-array.cc b/src/builtins/builtins-array.cc
index c09f11b..183820e 100644
--- a/src/builtins/builtins-array.cc
+++ b/src/builtins/builtins-array.cc
@@ -6,8 +6,14 @@
 #include "src/builtins/builtins-utils.h"
 
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
 #include "src/contexts.h"
+#include "src/counters.h"
 #include "src/elements.h"
+#include "src/isolate.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
+#include "src/prototype.h"
 
 namespace v8 {
 namespace internal {
@@ -32,7 +38,7 @@
       *out = static_cast<int>(value);
     }
     return true;
-  } else if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
+  } else if (object->IsNullOrUndefined(isolate)) {
     *out = 0;
     return true;
   } else if (object->IsBoolean()) {
@@ -55,7 +61,13 @@
   Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
   if (!len_obj->IsSmi()) return false;
   *out = Max(0, Smi::cast(len_obj)->value());
-  return *out <= object->elements()->length();
+
+  FixedArray* parameters = FixedArray::cast(object->elements());
+  if (object->HasSloppyArgumentsElements()) {
+    FixedArray* arguments = FixedArray::cast(parameters->get(1));
+    return *out <= arguments->length();
+  }
+  return *out <= parameters->length();
 }
 
 inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
@@ -144,14 +156,15 @@
   int argc = args.length() - 1;
   ScopedVector<Handle<Object>> argv(argc);
   for (int i = 0; i < argc; ++i) {
-    argv[i] = args.at<Object>(i + 1);
+    argv[i] = args.at(i + 1);
   }
   RETURN_RESULT_OR_FAILURE(
       isolate,
       Execution::Call(isolate, function, args.receiver(), argc, argv.start()));
 }
+}  // namespace
 
-Object* DoArrayPush(Isolate* isolate, BuiltinArguments args) {
+BUILTIN(ArrayPush) {
   HandleScope scope(isolate);
   Handle<Object> receiver = args.receiver();
   if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
@@ -174,19 +187,175 @@
   int new_length = accessor->Push(array, &args, to_add);
   return Smi::FromInt(new_length);
 }
-}  // namespace
 
-BUILTIN(ArrayPush) { return DoArrayPush(isolate, args); }
+void Builtins::Generate_FastArrayPush(compiler::CodeAssemblerState* state) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+  CodeStubAssembler assembler(state);
+  Variable arg_index(&assembler, MachineType::PointerRepresentation());
+  Label default_label(&assembler, &arg_index);
+  Label smi_transition(&assembler);
+  Label object_push_pre(&assembler);
+  Label object_push(&assembler, &arg_index);
+  Label double_push(&assembler, &arg_index);
+  Label double_transition(&assembler);
+  Label runtime(&assembler, Label::kDeferred);
 
-// TODO(verwaest): This is a temporary helper until the FastArrayPush stub can
-// tailcall to the builtin directly.
-RUNTIME_FUNCTION(Runtime_ArrayPush) {
-  DCHECK_EQ(2, args.length());
-  Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
-  // Rewrap the arguments as builtins arguments.
-  int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
-  BuiltinArguments caller_args(argc, incoming->arguments() + 1);
-  return DoArrayPush(isolate, caller_args);
+  Node* argc = assembler.Parameter(BuiltinDescriptor::kArgumentsCount);
+  Node* context = assembler.Parameter(BuiltinDescriptor::kContext);
+  Node* new_target = assembler.Parameter(BuiltinDescriptor::kNewTarget);
+
+  CodeStubArguments args(&assembler, assembler.ChangeInt32ToIntPtr(argc));
+  Node* receiver = args.GetReceiver();
+  Node* kind = nullptr;
+
+  Label fast(&assembler);
+  {
+    assembler.BranchIfFastJSArray(
+        receiver, context, CodeStubAssembler::FastJSArrayAccessMode::ANY_ACCESS,
+        &fast, &runtime);
+  }
+
+  assembler.Bind(&fast);
+  {
+    // Disallow pushing onto prototypes. It might be the JSArray prototype.
+    // Disallow pushing onto non-extensible objects.
+    assembler.Comment("Disallow pushing onto prototypes");
+    Node* map = assembler.LoadMap(receiver);
+    Node* bit_field2 = assembler.LoadMapBitField2(map);
+    int mask = static_cast<int>(Map::IsPrototypeMapBits::kMask) |
+               (1 << Map::kIsExtensible);
+    Node* test = assembler.Word32And(bit_field2, assembler.Int32Constant(mask));
+    assembler.GotoIf(
+        assembler.Word32NotEqual(
+            test, assembler.Int32Constant(1 << Map::kIsExtensible)),
+        &runtime);
+
+    // Disallow pushing onto arrays in dictionary named property mode. We need
+    // to figure out whether the length property is still writable.
+    assembler.Comment(
+        "Disallow pushing onto arrays in dictionary named property mode");
+    assembler.GotoIf(assembler.IsDictionaryMap(map), &runtime);
+
+    // Check whether the length property is writable. The length property is the
+    // only default named property on arrays. It's nonconfigurable, hence is
+    // guaranteed to stay the first property.
+    Node* descriptors = assembler.LoadMapDescriptors(map);
+    Node* details = assembler.LoadFixedArrayElement(
+        descriptors, DescriptorArray::ToDetailsIndex(0));
+    assembler.GotoIf(
+        assembler.IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask),
+        &runtime);
+
+    arg_index.Bind(assembler.IntPtrConstant(0));
+    kind = assembler.DecodeWord32<Map::ElementsKindBits>(bit_field2);
+
+    assembler.GotoIf(
+        assembler.Int32GreaterThan(
+            kind, assembler.Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+        &object_push_pre);
+
+    Node* new_length = assembler.BuildAppendJSArray(
+        FAST_SMI_ELEMENTS, context, receiver, args, arg_index, &smi_transition);
+    args.PopAndReturn(new_length);
+  }
+
+  // If the argument is not a smi, then use a heavyweight SetProperty to
+  // transition the array for only the single next element. If the argument is
+  // a smi, the failure is due to some other reason and we should fall back on
+  // the most generic implementation for the rest of the array.
+  assembler.Bind(&smi_transition);
+  {
+    Node* arg = args.AtIndex(arg_index.value());
+    assembler.GotoIf(assembler.TaggedIsSmi(arg), &default_label);
+    Node* length = assembler.LoadJSArrayLength(receiver);
+    // TODO(danno): Use the KeyedStoreGeneric stub here when possible,
+    // calling into the runtime to do the elements transition is overkill.
+    assembler.CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+                          assembler.SmiConstant(STRICT));
+    assembler.Increment(arg_index);
+    // The runtime SetProperty call could have converted the array to dictionary
+    // mode, which must be detected to abort the fast-path.
+    Node* map = assembler.LoadMap(receiver);
+    Node* bit_field2 = assembler.LoadMapBitField2(map);
+    Node* kind = assembler.DecodeWord32<Map::ElementsKindBits>(bit_field2);
+    assembler.GotoIf(assembler.Word32Equal(
+                         kind, assembler.Int32Constant(DICTIONARY_ELEMENTS)),
+                     &default_label);
+
+    assembler.GotoIfNotNumber(arg, &object_push);
+    assembler.Goto(&double_push);
+  }
+
+  assembler.Bind(&object_push_pre);
+  {
+    assembler.Branch(assembler.Int32GreaterThan(
+                         kind, assembler.Int32Constant(FAST_HOLEY_ELEMENTS)),
+                     &double_push, &object_push);
+  }
+
+  assembler.Bind(&object_push);
+  {
+    Node* new_length = assembler.BuildAppendJSArray(
+        FAST_ELEMENTS, context, receiver, args, arg_index, &default_label);
+    args.PopAndReturn(new_length);
+  }
+
+  assembler.Bind(&double_push);
+  {
+    Node* new_length =
+        assembler.BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, context, receiver,
+                                     args, arg_index, &double_transition);
+    args.PopAndReturn(new_length);
+  }
+
+  // If the argument is not a double, then use a heavyweight SetProperty to
+  // transition the array for only the single next element. If the argument is
+  // a double, the failure is due to some other reason and we should fall back
+  // on the most generic implementation for the rest of the array.
+  assembler.Bind(&double_transition);
+  {
+    Node* arg = args.AtIndex(arg_index.value());
+    assembler.GotoIfNumber(arg, &default_label);
+    Node* length = assembler.LoadJSArrayLength(receiver);
+    // TODO(danno): Use the KeyedStoreGeneric stub here when possible,
+    // calling into the runtime to do the elements transition is overkill.
+    assembler.CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+                          assembler.SmiConstant(STRICT));
+    assembler.Increment(arg_index);
+    // The runtime SetProperty call could have converted the array to dictionary
+    // mode, which must be detected to abort the fast-path.
+    Node* map = assembler.LoadMap(receiver);
+    Node* bit_field2 = assembler.LoadMapBitField2(map);
+    Node* kind = assembler.DecodeWord32<Map::ElementsKindBits>(bit_field2);
+    assembler.GotoIf(assembler.Word32Equal(
+                         kind, assembler.Int32Constant(DICTIONARY_ELEMENTS)),
+                     &default_label);
+    assembler.Goto(&object_push);
+  }
+
+  // Fallback that stores un-processed arguments using the full, heavyweight
+  // SetProperty machinery.
+  assembler.Bind(&default_label);
+  {
+    args.ForEach(
+        [&assembler, receiver, context](Node* arg) {
+          Node* length = assembler.LoadJSArrayLength(receiver);
+          assembler.CallRuntime(Runtime::kSetProperty, context, receiver,
+                                length, arg, assembler.SmiConstant(STRICT));
+        },
+        arg_index.value());
+    args.PopAndReturn(assembler.LoadJSArrayLength(receiver));
+  }
+
+  assembler.Bind(&runtime);
+  {
+    Node* target = assembler.LoadFromFrame(
+        StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer());
+    assembler.TailCallStub(CodeFactory::ArrayPush(assembler.isolate()), context,
+                           target, new_target, argc);
+  }
 }
 
 BUILTIN(ArrayPop) {
@@ -262,6 +431,242 @@
   return Smi::FromInt(new_length);
 }
 
+class ForEachCodeStubAssembler : public CodeStubAssembler {
+ public:
+  explicit ForEachCodeStubAssembler(compiler::CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+
+  void VisitOneElement(Node* context, Node* this_arg, Node* o, Node* k,
+                       Node* callbackfn) {
+    Comment("begin VisitOneElement");
+
+    // a. Let Pk be ToString(k).
+    Node* p_k = ToString(context, k);
+
+    // b. Let kPresent be HasProperty(O, Pk).
+    // c. ReturnIfAbrupt(kPresent).
+    Node* k_present =
+        CallStub(CodeFactory::HasProperty(isolate()), context, p_k, o);
+
+    // d. If kPresent is true, then
+    Label not_present(this);
+    GotoIf(WordNotEqual(k_present, TrueConstant()), &not_present);
+
+    // i. Let kValue be Get(O, Pk).
+    // ii. ReturnIfAbrupt(kValue).
+    Node* k_value =
+        CallStub(CodeFactory::GetProperty(isolate()), context, o, k);
+
+    // iii. Let funcResult be Call(callbackfn, T, «kValue, k, O»).
+    // iv. ReturnIfAbrupt(funcResult).
+    CallJS(CodeFactory::Call(isolate()), context, callbackfn, this_arg, k_value,
+           k, o);
+
+    Goto(&not_present);
+    Bind(&not_present);
+    Comment("end VisitOneElement");
+  }
+
+  void VisitAllFastElements(Node* context, ElementsKind kind, Node* this_arg,
+                            Node* o, Node* len, Node* callbackfn,
+                            ParameterMode mode) {
+    Comment("begin VisitAllFastElements");
+    Variable original_map(this, MachineRepresentation::kTagged);
+    original_map.Bind(LoadMap(o));
+    VariableList list({&original_map}, zone());
+    BuildFastLoop(
+        list, IntPtrOrSmiConstant(0, mode), TaggedToParameter(len, mode),
+        [context, kind, this, o, &original_map, callbackfn, this_arg,
+         mode](Node* index) {
+          Label one_element_done(this), array_changed(this, Label::kDeferred),
+              hole_element(this);
+
+          // Check if o's map has changed during the callback. If so, we have to
+          // fall back to the slower spec implementation for the rest of the
+          // iteration.
+          Node* o_map = LoadMap(o);
+          GotoIf(WordNotEqual(o_map, original_map.value()), &array_changed);
+
+          // Check if o's length has changed during the callback and if the
+          // index is now out of range of the new length.
+          Node* tagged_index = ParameterToTagged(index, mode);
+          GotoIf(SmiGreaterThanOrEqual(tagged_index, LoadJSArrayLength(o)),
+                 &array_changed);
+
+          // Re-load the elements array. If may have been resized.
+          Node* elements = LoadElements(o);
+
+          // Fast case: load the element directly from the elements FixedArray
+          // and call the callback if the element is not the hole.
+          DCHECK(kind == FAST_ELEMENTS || kind == FAST_DOUBLE_ELEMENTS);
+          int base_size = kind == FAST_ELEMENTS
+                              ? FixedArray::kHeaderSize
+                              : (FixedArray::kHeaderSize - kHeapObjectTag);
+          Node* offset = ElementOffsetFromIndex(index, kind, mode, base_size);
+          Node* value = nullptr;
+          if (kind == FAST_ELEMENTS) {
+            value = LoadObjectField(elements, offset);
+            GotoIf(WordEqual(value, TheHoleConstant()), &hole_element);
+          } else {
+            Node* double_value =
+                LoadDoubleWithHoleCheck(elements, offset, &hole_element);
+            value = AllocateHeapNumberWithValue(double_value);
+          }
+          CallJS(CodeFactory::Call(isolate()), context, callbackfn, this_arg,
+                 value, tagged_index, o);
+          Goto(&one_element_done);
+
+          Bind(&hole_element);
+          BranchIfPrototypesHaveNoElements(o_map, &one_element_done,
+                                           &array_changed);
+
+          // O's changed during the forEach. Use the implementation precisely
+          // specified in the spec for the rest of the iteration, also making
+          // the failed original_map sticky in case of a subseuent change that
+          // goes back to the original map.
+          Bind(&array_changed);
+          VisitOneElement(context, this_arg, o, ParameterToTagged(index, mode),
+                          callbackfn);
+          original_map.Bind(UndefinedConstant());
+          Goto(&one_element_done);
+
+          Bind(&one_element_done);
+        },
+        1, mode, IndexAdvanceMode::kPost);
+    Comment("end VisitAllFastElements");
+  }
+};
+
+TF_BUILTIN(ArrayForEach, ForEachCodeStubAssembler) {
+  Label non_array(this), examine_elements(this), fast_elements(this),
+      slow(this), maybe_double_elements(this), fast_double_elements(this);
+
+  Node* receiver = Parameter(ForEachDescriptor::kReceiver);
+  Node* callbackfn = Parameter(ForEachDescriptor::kCallback);
+  Node* this_arg = Parameter(ForEachDescriptor::kThisArg);
+  Node* context = Parameter(ForEachDescriptor::kContext);
+
+  // TODO(danno): Seriously? Do we really need to throw the exact error message
+  // on null and undefined so that the webkit tests pass?
+  Label throw_null_undefined_exception(this, Label::kDeferred);
+  GotoIf(WordEqual(receiver, NullConstant()), &throw_null_undefined_exception);
+  GotoIf(WordEqual(receiver, UndefinedConstant()),
+         &throw_null_undefined_exception);
+
+  // By the book: taken directly from the ECMAScript 2015 specification
+
+  // 1. Let O be ToObject(this value).
+  // 2. ReturnIfAbrupt(O)
+  Node* o = CallStub(CodeFactory::ToObject(isolate()), context, receiver);
+
+  // 3. Let len be ToLength(Get(O, "length")).
+  // 4. ReturnIfAbrupt(len).
+  Variable merged_length(this, MachineRepresentation::kTagged);
+  Label has_length(this, &merged_length), not_js_array(this);
+  GotoIf(DoesntHaveInstanceType(o, JS_ARRAY_TYPE), &not_js_array);
+  merged_length.Bind(LoadJSArrayLength(o));
+  Goto(&has_length);
+  Bind(&not_js_array);
+  Node* len_property =
+      CallStub(CodeFactory::GetProperty(isolate()), context, o,
+               HeapConstant(isolate()->factory()->length_string()));
+  merged_length.Bind(
+      CallStub(CodeFactory::ToLength(isolate()), context, len_property));
+  Goto(&has_length);
+  Bind(&has_length);
+  Node* len = merged_length.value();
+
+  // 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
+  Label type_exception(this, Label::kDeferred);
+  GotoIf(TaggedIsSmi(callbackfn), &type_exception);
+  GotoIfNot(IsCallableMap(LoadMap(callbackfn)), &type_exception);
+
+  // 6. If thisArg was supplied, let T be thisArg; else let T be undefined.
+  // [Already done by the arguments adapter]
+
+  // Non-smi lengths must use the slow path.
+  GotoIf(TaggedIsNotSmi(len), &slow);
+
+  BranchIfFastJSArray(o, context,
+                      CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
+                      &examine_elements, &slow);
+
+  Bind(&examine_elements);
+
+  ParameterMode mode = OptimalParameterMode();
+
+  // Select by ElementsKind
+  Node* o_map = LoadMap(o);
+  Node* bit_field2 = LoadMapBitField2(o_map);
+  Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
+  Branch(Int32GreaterThan(kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
+         &maybe_double_elements, &fast_elements);
+
+  Bind(&fast_elements);
+  {
+    VisitAllFastElements(context, FAST_ELEMENTS, this_arg, o, len, callbackfn,
+                         mode);
+
+    // No exception, return success
+    Return(UndefinedConstant());
+  }
+
+  Bind(&maybe_double_elements);
+  Branch(Int32GreaterThan(kind, Int32Constant(FAST_HOLEY_DOUBLE_ELEMENTS)),
+         &slow, &fast_double_elements);
+
+  Bind(&fast_double_elements);
+  {
+    VisitAllFastElements(context, FAST_DOUBLE_ELEMENTS, this_arg, o, len,
+                         callbackfn, mode);
+
+    // No exception, return success
+    Return(UndefinedConstant());
+  }
+
+  Bind(&slow);
+  {
+    // By the book: taken from the ECMAScript 2015 specification (cont.)
+
+    // 7. Let k be 0.
+    Variable k(this, MachineRepresentation::kTagged);
+    k.Bind(SmiConstant(0));
+
+    // 8. Repeat, while k < len
+    Label loop(this, &k);
+    Label after_loop(this);
+    Goto(&loop);
+    Bind(&loop);
+    {
+      GotoUnlessNumberLessThan(k.value(), len, &after_loop);
+
+      VisitOneElement(context, this_arg, o, k.value(), callbackfn);
+
+      // e. Increase k by 1.
+      k.Bind(NumberInc(k.value()));
+      Goto(&loop);
+    }
+    Bind(&after_loop);
+    Return(UndefinedConstant());
+  }
+
+  Bind(&throw_null_undefined_exception);
+  {
+    CallRuntime(Runtime::kThrowTypeError, context,
+                SmiConstant(MessageTemplate::kCalledOnNullOrUndefined),
+                HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(
+                    "Array.prototype.forEach")));
+    Unreachable();
+  }
+
+  Bind(&type_exception);
+  {
+    CallRuntime(Runtime::kThrowTypeError, context,
+                SmiConstant(MessageTemplate::kCalledNonCallable), callbackfn);
+    Unreachable();
+  }
+}
+
 BUILTIN(ArraySlice) {
   HandleScope scope(isolate);
   Handle<Object> receiver = args.receiver();
@@ -461,8 +866,9 @@
         SeededNumberDictionary::cast(*storage_));
     // The object holding this backing store has just been allocated, so
     // it cannot yet be used as a prototype.
-    Handle<SeededNumberDictionary> result =
-        SeededNumberDictionary::AtNumberPut(dict, index, elm, false);
+    Handle<JSObject> not_a_prototype_holder;
+    Handle<SeededNumberDictionary> result = SeededNumberDictionary::AtNumberPut(
+        dict, index, elm, not_a_prototype_holder);
     if (!result.is_identical_to(dict)) {
       // Dictionary needed to grow.
       clear_storage();
@@ -533,9 +939,10 @@
           if (!element->IsTheHole(isolate_)) {
             // The object holding this backing store has just been allocated, so
             // it cannot yet be used as a prototype.
+            Handle<JSObject> not_a_prototype_holder;
             Handle<SeededNumberDictionary> new_storage =
                 SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
-                                                    false);
+                                                    not_a_prototype_holder);
             if (!new_storage.is_identical_to(slow_storage)) {
               slow_storage = loop_scope.CloseAndEscape(new_storage);
             }
@@ -1001,8 +1408,9 @@
   // If estimated number of elements is more than half of length, a
   // fixed array (fast case) is more time and space-efficient than a
   // dictionary.
-  bool fast_case =
-      is_array_species && (estimate_nof_elements * 2) >= estimate_result_length;
+  bool fast_case = is_array_species &&
+                   (estimate_nof_elements * 2) >= estimate_result_length &&
+                   isolate->IsIsConcatSpreadableLookupChainIntact();
 
   if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
     Handle<FixedArrayBase> storage =
@@ -1202,7 +1610,7 @@
 
   Handle<Object> receiver = args.receiver();
   // TODO(bmeurer): Do we really care about the exact exception message here?
-  if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+  if (receiver->IsNullOrUndefined(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
                               isolate->factory()->NewStringFromAsciiChecked(
@@ -1237,591 +1645,447 @@
   return Slow_ArrayConcat(&args, species, isolate);
 }
 
-void Builtins::Generate_ArrayIsArray(CodeStubAssembler* assembler) {
+void Builtins::Generate_ArrayIsArray(compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
+  CodeStubAssembler assembler(state);
 
-  Node* object = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
+  Node* object = assembler.Parameter(1);
+  Node* context = assembler.Parameter(4);
 
-  Label call_runtime(assembler), return_true(assembler),
-      return_false(assembler);
+  Label call_runtime(&assembler), return_true(&assembler),
+      return_false(&assembler);
 
-  assembler->GotoIf(assembler->TaggedIsSmi(object), &return_false);
-  Node* instance_type = assembler->LoadInstanceType(object);
+  assembler.GotoIf(assembler.TaggedIsSmi(object), &return_false);
+  Node* instance_type = assembler.LoadInstanceType(object);
 
-  assembler->GotoIf(assembler->Word32Equal(
-                        instance_type, assembler->Int32Constant(JS_ARRAY_TYPE)),
-                    &return_true);
+  assembler.GotoIf(assembler.Word32Equal(
+                       instance_type, assembler.Int32Constant(JS_ARRAY_TYPE)),
+                   &return_true);
 
   // TODO(verwaest): Handle proxies in-place.
-  assembler->Branch(assembler->Word32Equal(
-                        instance_type, assembler->Int32Constant(JS_PROXY_TYPE)),
-                    &call_runtime, &return_false);
+  assembler.Branch(assembler.Word32Equal(
+                       instance_type, assembler.Int32Constant(JS_PROXY_TYPE)),
+                   &call_runtime, &return_false);
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  assembler.Bind(&return_true);
+  assembler.Return(assembler.BooleanConstant(true));
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  assembler.Bind(&return_false);
+  assembler.Return(assembler.BooleanConstant(false));
 
-  assembler->Bind(&call_runtime);
-  assembler->Return(
-      assembler->CallRuntime(Runtime::kArrayIsArray, context, object));
+  assembler.Bind(&call_runtime);
+  assembler.Return(
+      assembler.CallRuntime(Runtime::kArrayIsArray, context, object));
 }
 
-void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(ArrayIncludes, CodeStubAssembler) {
+  Node* const array = Parameter(0);
+  Node* const search_element = Parameter(1);
+  Node* const start_from = Parameter(2);
+  Node* const context = Parameter(3 + 2);
 
-  Node* array = assembler->Parameter(0);
-  Node* search_element = assembler->Parameter(1);
-  Node* start_from = assembler->Parameter(2);
-  Node* context = assembler->Parameter(3 + 2);
+  Variable len_var(this, MachineType::PointerRepresentation()),
+      index_var(this, MachineType::PointerRepresentation());
 
-  Node* intptr_zero = assembler->IntPtrConstant(0);
-  Node* intptr_one = assembler->IntPtrConstant(1);
+  Label init_k(this), return_true(this), return_false(this), call_runtime(this);
+  Label init_len(this), select_loop(this);
 
-  Node* the_hole = assembler->TheHoleConstant();
-  Node* undefined = assembler->UndefinedConstant();
-  Node* heap_number_map = assembler->HeapNumberMapConstant();
-
-  Variable len_var(assembler, MachineType::PointerRepresentation()),
-      index_var(assembler, MachineType::PointerRepresentation()),
-      start_from_var(assembler, MachineType::PointerRepresentation());
-
-  Label init_k(assembler), return_true(assembler), return_false(assembler),
-      call_runtime(assembler);
-
-  Label init_len(assembler);
-
-  index_var.Bind(intptr_zero);
-  len_var.Bind(intptr_zero);
+  index_var.Bind(IntPtrConstant(0));
+  len_var.Bind(IntPtrConstant(0));
 
   // Take slow path if not a JSArray, if retrieving elements requires
   // traversing prototype, or if access checks are required.
-  assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+  BranchIfFastJSArray(array, context,
+                      CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
+                      &init_len, &call_runtime);
 
-  assembler->Bind(&init_len);
+  Bind(&init_len);
   {
     // Handle case where JSArray length is not an Smi in the runtime
-    Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
-    assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
+    Node* len = LoadObjectField(array, JSArray::kLengthOffset);
+    GotoIfNot(TaggedIsSmi(len), &call_runtime);
 
-    len_var.Bind(assembler->SmiToWord(len));
-    assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
-                      &return_false, &init_k);
+    len_var.Bind(SmiToWord(len));
+
+    GotoIf(IsUndefined(start_from), &select_loop);
+
+    // Bailout to slow path if startIndex is not an Smi.
+    Branch(TaggedIsSmi(start_from), &init_k, &call_runtime);
   }
 
-  assembler->Bind(&init_k);
-  {
-    Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
-        init_k_zero(assembler), init_k_n(assembler);
-    Node* tagged_n = assembler->ToInteger(context, start_from);
+  Bind(&init_k);
+  CSA_ASSERT(this, TaggedIsSmi(start_from));
+  Node* const untagged_start_from = SmiToWord(start_from);
+  index_var.Bind(Select(
+      IntPtrGreaterThanOrEqual(untagged_start_from, IntPtrConstant(0)),
+      [=]() { return untagged_start_from; },
+      [=]() {
+        Node* const index = IntPtrAdd(len_var.value(), untagged_start_from);
+        return SelectConstant(IntPtrLessThan(index, IntPtrConstant(0)),
+                              IntPtrConstant(0), index,
+                              MachineType::PointerRepresentation());
+      },
+      MachineType::PointerRepresentation()));
 
-    assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
-                      &init_k_heap_num);
-
-    assembler->Bind(&init_k_smi);
-    {
-      start_from_var.Bind(assembler->SmiUntag(tagged_n));
-      assembler->Goto(&init_k_n);
-    }
-
-    assembler->Bind(&init_k_heap_num);
-    {
-      Label do_return_false(assembler);
-      // This round is lossless for all valid lengths.
-      Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
-      Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
-      assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
-                        &do_return_false);
-      start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
-          assembler->TruncateFloat64ToWord32(fp_n)));
-      assembler->Goto(&init_k_n);
-
-      assembler->Bind(&do_return_false);
-      {
-        index_var.Bind(intptr_zero);
-        assembler->Goto(&return_false);
-      }
-    }
-
-    assembler->Bind(&init_k_n);
-    {
-      Label if_positive(assembler), if_negative(assembler), done(assembler);
-      assembler->Branch(
-          assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
-          &if_negative, &if_positive);
-
-      assembler->Bind(&if_positive);
-      {
-        index_var.Bind(start_from_var.value());
-        assembler->Goto(&done);
-      }
-
-      assembler->Bind(&if_negative);
-      {
-        index_var.Bind(
-            assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
-        assembler->Branch(
-            assembler->IntPtrLessThan(index_var.value(), intptr_zero),
-            &init_k_zero, &done);
-      }
-
-      assembler->Bind(&init_k_zero);
-      {
-        index_var.Bind(intptr_zero);
-        assembler->Goto(&done);
-      }
-
-      assembler->Bind(&done);
-    }
-  }
-
+  Goto(&select_loop);
+  Bind(&select_loop);
   static int32_t kElementsKind[] = {
       FAST_SMI_ELEMENTS,   FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
       FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS,    FAST_HOLEY_DOUBLE_ELEMENTS,
   };
 
-  Label if_smiorobjects(assembler), if_packed_doubles(assembler),
-      if_holey_doubles(assembler);
+  Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
   Label* element_kind_handlers[] = {&if_smiorobjects,   &if_smiorobjects,
                                     &if_smiorobjects,   &if_smiorobjects,
                                     &if_packed_doubles, &if_holey_doubles};
 
-  Node* map = assembler->LoadMap(array);
-  Node* elements_kind = assembler->LoadMapElementsKind(map);
-  Node* elements = assembler->LoadElements(array);
-  assembler->Switch(elements_kind, &return_false, kElementsKind,
-                    element_kind_handlers, arraysize(kElementsKind));
+  Node* map = LoadMap(array);
+  Node* elements_kind = LoadMapElementsKind(map);
+  Node* elements = LoadElements(array);
+  Switch(elements_kind, &return_false, kElementsKind, element_kind_handlers,
+         arraysize(kElementsKind));
 
-  assembler->Bind(&if_smiorobjects);
+  Bind(&if_smiorobjects);
   {
-    Variable search_num(assembler, MachineRepresentation::kFloat64);
-    Label ident_loop(assembler, &index_var),
-        heap_num_loop(assembler, &search_num),
-        string_loop(assembler, &index_var), simd_loop(assembler),
-        undef_loop(assembler, &index_var), not_smi(assembler),
-        not_heap_num(assembler);
+    Variable search_num(this, MachineRepresentation::kFloat64);
+    Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
+        string_loop(this, &index_var), undef_loop(this, &index_var),
+        not_smi(this), not_heap_num(this);
 
-    assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
-    search_num.Bind(assembler->SmiToFloat64(search_element));
-    assembler->Goto(&heap_num_loop);
+    GotoIfNot(TaggedIsSmi(search_element), &not_smi);
+    search_num.Bind(SmiToFloat64(search_element));
+    Goto(&heap_num_loop);
 
-    assembler->Bind(&not_smi);
-    assembler->GotoIf(assembler->WordEqual(search_element, undefined),
-                      &undef_loop);
-    Node* map = assembler->LoadMap(search_element);
-    assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
-                      &not_heap_num);
-    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
-    assembler->Goto(&heap_num_loop);
+    Bind(&not_smi);
+    GotoIf(WordEqual(search_element, UndefinedConstant()), &undef_loop);
+    Node* map = LoadMap(search_element);
+    GotoIfNot(IsHeapNumberMap(map), &not_heap_num);
+    search_num.Bind(LoadHeapNumberValue(search_element));
+    Goto(&heap_num_loop);
 
-    assembler->Bind(&not_heap_num);
-    Node* search_type = assembler->LoadMapInstanceType(map);
-    assembler->GotoIf(assembler->IsStringInstanceType(search_type),
-                      &string_loop);
-    assembler->GotoIf(
-        assembler->Word32Equal(search_type,
-                               assembler->Int32Constant(SIMD128_VALUE_TYPE)),
-        &simd_loop);
-    assembler->Goto(&ident_loop);
+    Bind(&not_heap_num);
+    Node* search_type = LoadMapInstanceType(map);
+    GotoIf(IsStringInstanceType(search_type), &string_loop);
+    Goto(&ident_loop);
 
-    assembler->Bind(&ident_loop);
+    Bind(&ident_loop);
     {
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_false);
-      Node* element_k = assembler->LoadFixedArrayElement(
-          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->WordEqual(element_k, search_element),
-                        &return_true);
+      GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                &return_false);
+      Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+      GotoIf(WordEqual(element_k, search_element), &return_true);
 
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&ident_loop);
+      index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+      Goto(&ident_loop);
     }
 
-    assembler->Bind(&undef_loop);
+    Bind(&undef_loop);
     {
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_false);
-      Node* element_k = assembler->LoadFixedArrayElement(
-          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->WordEqual(element_k, undefined),
-                        &return_true);
-      assembler->GotoIf(assembler->WordEqual(element_k, the_hole),
-                        &return_true);
+      GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                &return_false);
+      Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+      GotoIf(WordEqual(element_k, UndefinedConstant()), &return_true);
+      GotoIf(WordEqual(element_k, TheHoleConstant()), &return_true);
 
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&undef_loop);
+      index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+      Goto(&undef_loop);
     }
 
-    assembler->Bind(&heap_num_loop);
+    Bind(&heap_num_loop);
     {
-      Label nan_loop(assembler, &index_var),
-          not_nan_loop(assembler, &index_var);
-      assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
-                                      &not_nan_loop);
+      Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
+      BranchIfFloat64IsNaN(search_num.value(), &nan_loop, &not_nan_loop);
 
-      assembler->Bind(&not_nan_loop);
+      Bind(&not_nan_loop);
       {
-        Label continue_loop(assembler), not_smi(assembler);
-        assembler->GotoUnless(
-            assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-            &return_false);
-        Node* element_k = assembler->LoadFixedArrayElement(
-            elements, index_var.value(), 0,
-            CodeStubAssembler::INTPTR_PARAMETERS);
-        assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
-        assembler->Branch(
-            assembler->Float64Equal(search_num.value(),
-                                    assembler->SmiToFloat64(element_k)),
-            &return_true, &continue_loop);
+        Label continue_loop(this), not_smi(this);
+        GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                  &return_false);
+        Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+        GotoIfNot(TaggedIsSmi(element_k), &not_smi);
+        Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
+               &return_true, &continue_loop);
 
-        assembler->Bind(&not_smi);
-        assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
-                                                  heap_number_map),
-                          &continue_loop);
-        assembler->Branch(
-            assembler->Float64Equal(search_num.value(),
-                                    assembler->LoadHeapNumberValue(element_k)),
-            &return_true, &continue_loop);
+        Bind(&not_smi);
+        GotoIfNot(IsHeapNumber(element_k), &continue_loop);
+        Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
+               &return_true, &continue_loop);
 
-        assembler->Bind(&continue_loop);
-        index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-        assembler->Goto(&not_nan_loop);
+        Bind(&continue_loop);
+        index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+        Goto(&not_nan_loop);
       }
 
-      assembler->Bind(&nan_loop);
+      Bind(&nan_loop);
       {
-        Label continue_loop(assembler);
-        assembler->GotoUnless(
-            assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-            &return_false);
-        Node* element_k = assembler->LoadFixedArrayElement(
-            elements, index_var.value(), 0,
-            CodeStubAssembler::INTPTR_PARAMETERS);
-        assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
-        assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
-                                                  heap_number_map),
-                          &continue_loop);
-        assembler->BranchIfFloat64IsNaN(
-            assembler->LoadHeapNumberValue(element_k), &return_true,
-            &continue_loop);
+        Label continue_loop(this);
+        GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                  &return_false);
+        Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+        GotoIf(TaggedIsSmi(element_k), &continue_loop);
+        GotoIfNot(IsHeapNumber(element_k), &continue_loop);
+        BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_true,
+                             &continue_loop);
 
-        assembler->Bind(&continue_loop);
-        index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-        assembler->Goto(&nan_loop);
+        Bind(&continue_loop);
+        index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+        Goto(&nan_loop);
       }
     }
 
-    assembler->Bind(&string_loop);
+    Bind(&string_loop);
     {
-      Label continue_loop(assembler);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_false);
-      Node* element_k = assembler->LoadFixedArrayElement(
-          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
-      assembler->GotoUnless(assembler->IsStringInstanceType(
-                                assembler->LoadInstanceType(element_k)),
-                            &continue_loop);
+      Label continue_loop(this);
+      GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                &return_false);
+      Node* element_k = LoadFixedArrayElement(elements, index_var.value());
+      GotoIf(TaggedIsSmi(element_k), &continue_loop);
+      GotoIfNot(IsStringInstanceType(LoadInstanceType(element_k)),
+                &continue_loop);
 
       // TODO(bmeurer): Consider inlining the StringEqual logic here.
-      Callable callable = CodeFactory::StringEqual(assembler->isolate());
-      Node* result =
-          assembler->CallStub(callable, context, search_element, element_k);
-      assembler->Branch(
-          assembler->WordEqual(assembler->BooleanConstant(true), result),
-          &return_true, &continue_loop);
+      Node* result = CallStub(CodeFactory::StringEqual(isolate()), context,
+                              search_element, element_k);
+      Branch(WordEqual(BooleanConstant(true), result), &return_true,
+             &continue_loop);
 
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&string_loop);
-    }
-
-    assembler->Bind(&simd_loop);
-    {
-      Label continue_loop(assembler, &index_var),
-          loop_body(assembler, &index_var);
-      Node* map = assembler->LoadMap(search_element);
-
-      assembler->Goto(&loop_body);
-      assembler->Bind(&loop_body);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_false);
-
-      Node* element_k = assembler->LoadFixedArrayElement(
-          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
-
-      Node* map_k = assembler->LoadMap(element_k);
-      assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
-                                      &return_true, &continue_loop);
-
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&loop_body);
+      Bind(&continue_loop);
+      index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+      Goto(&string_loop);
     }
   }
 
-  assembler->Bind(&if_packed_doubles);
+  Bind(&if_packed_doubles);
   {
-    Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
-        hole_loop(assembler, &index_var), search_notnan(assembler);
-    Variable search_num(assembler, MachineRepresentation::kFloat64);
+    Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
+        hole_loop(this, &index_var), search_notnan(this);
+    Variable search_num(this, MachineRepresentation::kFloat64);
 
-    assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
-                          &search_notnan);
-    search_num.Bind(assembler->SmiToFloat64(search_element));
-    assembler->Goto(&not_nan_loop);
+    GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
+    search_num.Bind(SmiToFloat64(search_element));
+    Goto(&not_nan_loop);
 
-    assembler->Bind(&search_notnan);
-    assembler->GotoIf(assembler->WordNotEqual(
-                          assembler->LoadMap(search_element), heap_number_map),
-                      &return_false);
+    Bind(&search_notnan);
+    GotoIfNot(IsHeapNumber(search_element), &return_false);
 
-    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+    search_num.Bind(LoadHeapNumberValue(search_element));
 
-    assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
-                                    &not_nan_loop);
+    BranchIfFloat64IsNaN(search_num.value(), &nan_loop, &not_nan_loop);
 
     // Search for HeapNumber
-    assembler->Bind(&not_nan_loop);
+    Bind(&not_nan_loop);
     {
-      Label continue_loop(assembler);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_false);
-      Node* element_k = assembler->LoadFixedDoubleArrayElement(
-          elements, index_var.value(), MachineType::Float64(), 0,
-          CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
-                        &return_true, &continue_loop);
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&not_nan_loop);
+      Label continue_loop(this);
+      GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                &return_false);
+      Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+                                                    MachineType::Float64());
+      Branch(Float64Equal(element_k, search_num.value()), &return_true,
+             &continue_loop);
+      Bind(&continue_loop);
+      index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+      Goto(&not_nan_loop);
     }
 
     // Search for NaN
-    assembler->Bind(&nan_loop);
+    Bind(&nan_loop);
     {
-      Label continue_loop(assembler);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_false);
-      Node* element_k = assembler->LoadFixedDoubleArrayElement(
-          elements, index_var.value(), MachineType::Float64(), 0,
-          CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&nan_loop);
+      Label continue_loop(this);
+      GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                &return_false);
+      Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+                                                    MachineType::Float64());
+      BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+      Bind(&continue_loop);
+      index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+      Goto(&nan_loop);
     }
   }
 
-  assembler->Bind(&if_holey_doubles);
+  Bind(&if_holey_doubles);
   {
-    Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
-        hole_loop(assembler, &index_var), search_notnan(assembler);
-    Variable search_num(assembler, MachineRepresentation::kFloat64);
+    Label nan_loop(this, &index_var), not_nan_loop(this, &index_var),
+        hole_loop(this, &index_var), search_notnan(this);
+    Variable search_num(this, MachineRepresentation::kFloat64);
 
-    assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
-                          &search_notnan);
-    search_num.Bind(assembler->SmiToFloat64(search_element));
-    assembler->Goto(&not_nan_loop);
+    GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
+    search_num.Bind(SmiToFloat64(search_element));
+    Goto(&not_nan_loop);
 
-    assembler->Bind(&search_notnan);
-    assembler->GotoIf(assembler->WordEqual(search_element, undefined),
-                      &hole_loop);
-    assembler->GotoIf(assembler->WordNotEqual(
-                          assembler->LoadMap(search_element), heap_number_map),
-                      &return_false);
+    Bind(&search_notnan);
+    GotoIf(WordEqual(search_element, UndefinedConstant()), &hole_loop);
+    GotoIfNot(IsHeapNumber(search_element), &return_false);
 
-    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+    search_num.Bind(LoadHeapNumberValue(search_element));
 
-    assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
-                                    &not_nan_loop);
+    BranchIfFloat64IsNaN(search_num.value(), &nan_loop, &not_nan_loop);
 
     // Search for HeapNumber
-    assembler->Bind(&not_nan_loop);
+    Bind(&not_nan_loop);
     {
-      Label continue_loop(assembler);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_false);
+      Label continue_loop(this);
+      GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                &return_false);
 
       // Load double value or continue if it contains a double hole.
-      Node* element_k = assembler->LoadFixedDoubleArrayElement(
+      Node* element_k = LoadFixedDoubleArrayElement(
           elements, index_var.value(), MachineType::Float64(), 0,
           CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
 
-      assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
-                        &return_true, &continue_loop);
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&not_nan_loop);
+      Branch(Float64Equal(element_k, search_num.value()), &return_true,
+             &continue_loop);
+      Bind(&continue_loop);
+      index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+      Goto(&not_nan_loop);
     }
 
     // Search for NaN
-    assembler->Bind(&nan_loop);
+    Bind(&nan_loop);
     {
-      Label continue_loop(assembler);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_false);
+      Label continue_loop(this);
+      GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                &return_false);
 
       // Load double value or continue if it contains a double hole.
-      Node* element_k = assembler->LoadFixedDoubleArrayElement(
+      Node* element_k = LoadFixedDoubleArrayElement(
           elements, index_var.value(), MachineType::Float64(), 0,
           CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
 
-      assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&nan_loop);
+      BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+      Bind(&continue_loop);
+      index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+      Goto(&nan_loop);
     }
 
     // Search for the Hole
-    assembler->Bind(&hole_loop);
+    Bind(&hole_loop);
     {
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_false);
+      GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
+                &return_false);
 
       // Check if the element is a double hole, but don't load it.
-      assembler->LoadFixedDoubleArrayElement(
+      LoadFixedDoubleArrayElement(
           elements, index_var.value(), MachineType::None(), 0,
           CodeStubAssembler::INTPTR_PARAMETERS, &return_true);
 
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&hole_loop);
+      index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+      Goto(&hole_loop);
     }
   }
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  Bind(&return_true);
+  Return(TrueConstant());
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  Bind(&return_false);
+  Return(FalseConstant());
 
-  assembler->Bind(&call_runtime);
-  assembler->Return(assembler->CallRuntime(Runtime::kArrayIncludes_Slow,
-                                           context, array, search_element,
-                                           start_from));
+  Bind(&call_runtime);
+  Return(CallRuntime(Runtime::kArrayIncludes_Slow, context, array,
+                     search_element, start_from));
 }
 
-void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
+void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
   typedef CodeStubAssembler::Variable Variable;
+  CodeStubAssembler assembler(state);
 
-  Node* array = assembler->Parameter(0);
-  Node* search_element = assembler->Parameter(1);
-  Node* start_from = assembler->Parameter(2);
-  Node* context = assembler->Parameter(3 + 2);
+  Node* array = assembler.Parameter(0);
+  Node* search_element = assembler.Parameter(1);
+  Node* start_from = assembler.Parameter(2);
+  Node* context = assembler.Parameter(3 + 2);
 
-  Node* intptr_zero = assembler->IntPtrConstant(0);
-  Node* intptr_one = assembler->IntPtrConstant(1);
+  Node* intptr_zero = assembler.IntPtrConstant(0);
+  Node* intptr_one = assembler.IntPtrConstant(1);
 
-  Node* undefined = assembler->UndefinedConstant();
-  Node* heap_number_map = assembler->HeapNumberMapConstant();
+  Node* undefined = assembler.UndefinedConstant();
 
-  Variable len_var(assembler, MachineType::PointerRepresentation()),
-      index_var(assembler, MachineType::PointerRepresentation()),
-      start_from_var(assembler, MachineType::PointerRepresentation());
+  Variable len_var(&assembler, MachineType::PointerRepresentation()),
+      index_var(&assembler, MachineType::PointerRepresentation()),
+      start_from_var(&assembler, MachineType::PointerRepresentation());
 
-  Label init_k(assembler), return_found(assembler), return_not_found(assembler),
-      call_runtime(assembler);
+  Label init_k(&assembler), return_found(&assembler),
+      return_not_found(&assembler), call_runtime(&assembler);
 
-  Label init_len(assembler);
+  Label init_len(&assembler);
 
   index_var.Bind(intptr_zero);
   len_var.Bind(intptr_zero);
 
   // Take slow path if not a JSArray, if retrieving elements requires
   // traversing prototype, or if access checks are required.
-  assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+  assembler.BranchIfFastJSArray(
+      array, context, CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
+      &init_len, &call_runtime);
 
-  assembler->Bind(&init_len);
+  assembler.Bind(&init_len);
   {
     // Handle case where JSArray length is not an Smi in the runtime
-    Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
-    assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
+    Node* len = assembler.LoadObjectField(array, JSArray::kLengthOffset);
+    assembler.GotoIfNot(assembler.TaggedIsSmi(len), &call_runtime);
 
-    len_var.Bind(assembler->SmiToWord(len));
-    assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
-                      &return_not_found, &init_k);
+    len_var.Bind(assembler.SmiToWord(len));
+    assembler.Branch(assembler.WordEqual(len_var.value(), intptr_zero),
+                     &return_not_found, &init_k);
   }
 
-  assembler->Bind(&init_k);
+  assembler.Bind(&init_k);
   {
-    Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
-        init_k_zero(assembler), init_k_n(assembler);
-    Node* tagged_n = assembler->ToInteger(context, start_from);
+    // For now only deal with undefined and Smis here; we must be really careful
+    // with side-effects from the ToInteger conversion as the side-effects might
+    // render our assumptions about the receiver being a fast JSArray and the
+    // length invalid.
+    Label done(&assembler), init_k_smi(&assembler), init_k_other(&assembler),
+        init_k_zero(&assembler), init_k_n(&assembler);
 
-    assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
-                      &init_k_heap_num);
+    assembler.Branch(assembler.TaggedIsSmi(start_from), &init_k_smi,
+                     &init_k_other);
 
-    assembler->Bind(&init_k_smi);
+    assembler.Bind(&init_k_smi);
     {
-      start_from_var.Bind(assembler->SmiUntag(tagged_n));
-      assembler->Goto(&init_k_n);
+      start_from_var.Bind(assembler.SmiUntag(start_from));
+      assembler.Goto(&init_k_n);
     }
 
-    assembler->Bind(&init_k_heap_num);
+    assembler.Bind(&init_k_other);
     {
-      Label do_return_not_found(assembler);
-      // This round is lossless for all valid lengths.
-      Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
-      Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
-      assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
-                        &do_return_not_found);
-      start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
-          assembler->TruncateFloat64ToWord32(fp_n)));
-      assembler->Goto(&init_k_n);
-
-      assembler->Bind(&do_return_not_found);
-      {
-        index_var.Bind(intptr_zero);
-        assembler->Goto(&return_not_found);
-      }
+      // The fromIndex must be undefined here, otherwise bailout and let the
+      // runtime deal with the full ToInteger conversion.
+      assembler.GotoIfNot(assembler.IsUndefined(start_from), &call_runtime);
+      start_from_var.Bind(intptr_zero);
+      assembler.Goto(&init_k_n);
     }
 
-    assembler->Bind(&init_k_n);
+    assembler.Bind(&init_k_n);
     {
-      Label if_positive(assembler), if_negative(assembler), done(assembler);
-      assembler->Branch(
-          assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
+      Label if_positive(&assembler), if_negative(&assembler), done(&assembler);
+      assembler.Branch(
+          assembler.IntPtrLessThan(start_from_var.value(), intptr_zero),
           &if_negative, &if_positive);
 
-      assembler->Bind(&if_positive);
+      assembler.Bind(&if_positive);
       {
         index_var.Bind(start_from_var.value());
-        assembler->Goto(&done);
+        assembler.Goto(&done);
       }
 
-      assembler->Bind(&if_negative);
+      assembler.Bind(&if_negative);
       {
         index_var.Bind(
-            assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
-        assembler->Branch(
-            assembler->IntPtrLessThan(index_var.value(), intptr_zero),
+            assembler.IntPtrAdd(len_var.value(), start_from_var.value()));
+        assembler.Branch(
+            assembler.IntPtrLessThan(index_var.value(), intptr_zero),
             &init_k_zero, &done);
       }
 
-      assembler->Bind(&init_k_zero);
+      assembler.Bind(&init_k_zero);
       {
         index_var.Bind(intptr_zero);
-        assembler->Goto(&done);
+        assembler.Goto(&done);
       }
 
-      assembler->Bind(&done);
+      assembler.Bind(&done);
     }
   }
 
@@ -1830,384 +2094,357 @@
       FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS,    FAST_HOLEY_DOUBLE_ELEMENTS,
   };
 
-  Label if_smiorobjects(assembler), if_packed_doubles(assembler),
-      if_holey_doubles(assembler);
+  Label if_smiorobjects(&assembler), if_packed_doubles(&assembler),
+      if_holey_doubles(&assembler);
   Label* element_kind_handlers[] = {&if_smiorobjects,   &if_smiorobjects,
                                     &if_smiorobjects,   &if_smiorobjects,
                                     &if_packed_doubles, &if_holey_doubles};
 
-  Node* map = assembler->LoadMap(array);
-  Node* elements_kind = assembler->LoadMapElementsKind(map);
-  Node* elements = assembler->LoadElements(array);
-  assembler->Switch(elements_kind, &return_not_found, kElementsKind,
-                    element_kind_handlers, arraysize(kElementsKind));
+  Node* map = assembler.LoadMap(array);
+  Node* elements_kind = assembler.LoadMapElementsKind(map);
+  Node* elements = assembler.LoadElements(array);
+  assembler.Switch(elements_kind, &return_not_found, kElementsKind,
+                   element_kind_handlers, arraysize(kElementsKind));
 
-  assembler->Bind(&if_smiorobjects);
+  assembler.Bind(&if_smiorobjects);
   {
-    Variable search_num(assembler, MachineRepresentation::kFloat64);
-    Label ident_loop(assembler, &index_var),
-        heap_num_loop(assembler, &search_num),
-        string_loop(assembler, &index_var), simd_loop(assembler),
-        undef_loop(assembler, &index_var), not_smi(assembler),
-        not_heap_num(assembler);
+    Variable search_num(&assembler, MachineRepresentation::kFloat64);
+    Label ident_loop(&assembler, &index_var),
+        heap_num_loop(&assembler, &search_num),
+        string_loop(&assembler, &index_var), undef_loop(&assembler, &index_var),
+        not_smi(&assembler), not_heap_num(&assembler);
 
-    assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
-    search_num.Bind(assembler->SmiToFloat64(search_element));
-    assembler->Goto(&heap_num_loop);
+    assembler.GotoIfNot(assembler.TaggedIsSmi(search_element), &not_smi);
+    search_num.Bind(assembler.SmiToFloat64(search_element));
+    assembler.Goto(&heap_num_loop);
 
-    assembler->Bind(&not_smi);
-    assembler->GotoIf(assembler->WordEqual(search_element, undefined),
-                      &undef_loop);
-    Node* map = assembler->LoadMap(search_element);
-    assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
-                      &not_heap_num);
-    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
-    assembler->Goto(&heap_num_loop);
+    assembler.Bind(&not_smi);
+    assembler.GotoIf(assembler.WordEqual(search_element, undefined),
+                     &undef_loop);
+    Node* map = assembler.LoadMap(search_element);
+    assembler.GotoIfNot(assembler.IsHeapNumberMap(map), &not_heap_num);
+    search_num.Bind(assembler.LoadHeapNumberValue(search_element));
+    assembler.Goto(&heap_num_loop);
 
-    assembler->Bind(&not_heap_num);
-    Node* search_type = assembler->LoadMapInstanceType(map);
-    assembler->GotoIf(assembler->IsStringInstanceType(search_type),
-                      &string_loop);
-    assembler->GotoIf(
-        assembler->Word32Equal(search_type,
-                               assembler->Int32Constant(SIMD128_VALUE_TYPE)),
-        &simd_loop);
-    assembler->Goto(&ident_loop);
+    assembler.Bind(&not_heap_num);
+    Node* search_type = assembler.LoadMapInstanceType(map);
+    assembler.GotoIf(assembler.IsStringInstanceType(search_type), &string_loop);
+    assembler.Goto(&ident_loop);
 
-    assembler->Bind(&ident_loop);
+    assembler.Bind(&ident_loop);
     {
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+      assembler.GotoIfNot(
+          assembler.UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
-      Node* element_k = assembler->LoadFixedArrayElement(
-          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->WordEqual(element_k, search_element),
-                        &return_found);
+      Node* element_k =
+          assembler.LoadFixedArrayElement(elements, index_var.value());
+      assembler.GotoIf(assembler.WordEqual(element_k, search_element),
+                       &return_found);
 
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&ident_loop);
+      index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+      assembler.Goto(&ident_loop);
     }
 
-    assembler->Bind(&undef_loop);
+    assembler.Bind(&undef_loop);
     {
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+      assembler.GotoIfNot(
+          assembler.UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
-      Node* element_k = assembler->LoadFixedArrayElement(
-          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->WordEqual(element_k, undefined),
-                        &return_found);
+      Node* element_k =
+          assembler.LoadFixedArrayElement(elements, index_var.value());
+      assembler.GotoIf(assembler.WordEqual(element_k, undefined),
+                       &return_found);
 
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&undef_loop);
+      index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+      assembler.Goto(&undef_loop);
     }
 
-    assembler->Bind(&heap_num_loop);
+    assembler.Bind(&heap_num_loop);
     {
-      Label not_nan_loop(assembler, &index_var);
-      assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
-                                      &not_nan_loop);
+      Label not_nan_loop(&assembler, &index_var);
+      assembler.BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+                                     &not_nan_loop);
 
-      assembler->Bind(&not_nan_loop);
+      assembler.Bind(&not_nan_loop);
       {
-        Label continue_loop(assembler), not_smi(assembler);
-        assembler->GotoUnless(
-            assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+        Label continue_loop(&assembler), not_smi(&assembler);
+        assembler.GotoIfNot(
+            assembler.UintPtrLessThan(index_var.value(), len_var.value()),
             &return_not_found);
-        Node* element_k = assembler->LoadFixedArrayElement(
-            elements, index_var.value(), 0,
-            CodeStubAssembler::INTPTR_PARAMETERS);
-        assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
-        assembler->Branch(
-            assembler->Float64Equal(search_num.value(),
-                                    assembler->SmiToFloat64(element_k)),
+        Node* element_k =
+            assembler.LoadFixedArrayElement(elements, index_var.value());
+        assembler.GotoIfNot(assembler.TaggedIsSmi(element_k), &not_smi);
+        assembler.Branch(
+            assembler.Float64Equal(search_num.value(),
+                                   assembler.SmiToFloat64(element_k)),
             &return_found, &continue_loop);
 
-        assembler->Bind(&not_smi);
-        assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
-                                                  heap_number_map),
-                          &continue_loop);
-        assembler->Branch(
-            assembler->Float64Equal(search_num.value(),
-                                    assembler->LoadHeapNumberValue(element_k)),
+        assembler.Bind(&not_smi);
+        assembler.GotoIfNot(
+            assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
+            &continue_loop);
+        assembler.Branch(
+            assembler.Float64Equal(search_num.value(),
+                                   assembler.LoadHeapNumberValue(element_k)),
             &return_found, &continue_loop);
 
-        assembler->Bind(&continue_loop);
-        index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-        assembler->Goto(&not_nan_loop);
+        assembler.Bind(&continue_loop);
+        index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+        assembler.Goto(&not_nan_loop);
       }
     }
 
-    assembler->Bind(&string_loop);
+    assembler.Bind(&string_loop);
     {
-      Label continue_loop(assembler);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+      Label continue_loop(&assembler);
+      assembler.GotoIfNot(
+          assembler.UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
-      Node* element_k = assembler->LoadFixedArrayElement(
-          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
-      assembler->GotoUnless(assembler->IsStringInstanceType(
-                                assembler->LoadInstanceType(element_k)),
-                            &continue_loop);
+      Node* element_k =
+          assembler.LoadFixedArrayElement(elements, index_var.value());
+      assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
+      assembler.GotoIfNot(
+          assembler.IsStringInstanceType(assembler.LoadInstanceType(element_k)),
+          &continue_loop);
 
       // TODO(bmeurer): Consider inlining the StringEqual logic here.
-      Callable callable = CodeFactory::StringEqual(assembler->isolate());
+      Callable callable = CodeFactory::StringEqual(assembler.isolate());
       Node* result =
-          assembler->CallStub(callable, context, search_element, element_k);
-      assembler->Branch(
-          assembler->WordEqual(assembler->BooleanConstant(true), result),
+          assembler.CallStub(callable, context, search_element, element_k);
+      assembler.Branch(
+          assembler.WordEqual(assembler.BooleanConstant(true), result),
           &return_found, &continue_loop);
 
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&string_loop);
-    }
-
-    assembler->Bind(&simd_loop);
-    {
-      Label continue_loop(assembler, &index_var),
-          loop_body(assembler, &index_var);
-      Node* map = assembler->LoadMap(search_element);
-
-      assembler->Goto(&loop_body);
-      assembler->Bind(&loop_body);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
-          &return_not_found);
-
-      Node* element_k = assembler->LoadFixedArrayElement(
-          elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
-
-      Node* map_k = assembler->LoadMap(element_k);
-      assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
-                                      &return_found, &continue_loop);
-
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&loop_body);
+      assembler.Bind(&continue_loop);
+      index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+      assembler.Goto(&string_loop);
     }
   }
 
-  assembler->Bind(&if_packed_doubles);
+  assembler.Bind(&if_packed_doubles);
   {
-    Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
-    Variable search_num(assembler, MachineRepresentation::kFloat64);
+    Label not_nan_loop(&assembler, &index_var), search_notnan(&assembler);
+    Variable search_num(&assembler, MachineRepresentation::kFloat64);
 
-    assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
-                          &search_notnan);
-    search_num.Bind(assembler->SmiToFloat64(search_element));
-    assembler->Goto(&not_nan_loop);
+    assembler.GotoIfNot(assembler.TaggedIsSmi(search_element), &search_notnan);
+    search_num.Bind(assembler.SmiToFloat64(search_element));
+    assembler.Goto(&not_nan_loop);
 
-    assembler->Bind(&search_notnan);
-    assembler->GotoIf(assembler->WordNotEqual(
-                          assembler->LoadMap(search_element), heap_number_map),
-                      &return_not_found);
+    assembler.Bind(&search_notnan);
+    assembler.GotoIfNot(
+        assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+        &return_not_found);
 
-    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+    search_num.Bind(assembler.LoadHeapNumberValue(search_element));
 
-    assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
-                                    &not_nan_loop);
+    assembler.BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+                                   &not_nan_loop);
 
     // Search for HeapNumber
-    assembler->Bind(&not_nan_loop);
+    assembler.Bind(&not_nan_loop);
     {
-      Label continue_loop(assembler);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+      Label continue_loop(&assembler);
+      assembler.GotoIfNot(
+          assembler.UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
-      Node* element_k = assembler->LoadFixedDoubleArrayElement(
-          elements, index_var.value(), MachineType::Float64(), 0,
-          CodeStubAssembler::INTPTR_PARAMETERS);
-      assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
-                        &return_found, &continue_loop);
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&not_nan_loop);
+      Node* element_k = assembler.LoadFixedDoubleArrayElement(
+          elements, index_var.value(), MachineType::Float64());
+      assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+                       &return_found, &continue_loop);
+      assembler.Bind(&continue_loop);
+      index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+      assembler.Goto(&not_nan_loop);
     }
   }
 
-  assembler->Bind(&if_holey_doubles);
+  assembler.Bind(&if_holey_doubles);
   {
-    Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
-    Variable search_num(assembler, MachineRepresentation::kFloat64);
+    Label not_nan_loop(&assembler, &index_var), search_notnan(&assembler);
+    Variable search_num(&assembler, MachineRepresentation::kFloat64);
 
-    assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
-                          &search_notnan);
-    search_num.Bind(assembler->SmiToFloat64(search_element));
-    assembler->Goto(&not_nan_loop);
+    assembler.GotoIfNot(assembler.TaggedIsSmi(search_element), &search_notnan);
+    search_num.Bind(assembler.SmiToFloat64(search_element));
+    assembler.Goto(&not_nan_loop);
 
-    assembler->Bind(&search_notnan);
-    assembler->GotoIf(assembler->WordNotEqual(
-                          assembler->LoadMap(search_element), heap_number_map),
-                      &return_not_found);
+    assembler.Bind(&search_notnan);
+    assembler.GotoIfNot(
+        assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+        &return_not_found);
 
-    search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+    search_num.Bind(assembler.LoadHeapNumberValue(search_element));
 
-    assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
-                                    &not_nan_loop);
+    assembler.BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+                                   &not_nan_loop);
 
     // Search for HeapNumber
-    assembler->Bind(&not_nan_loop);
+    assembler.Bind(&not_nan_loop);
     {
-      Label continue_loop(assembler);
-      assembler->GotoUnless(
-          assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+      Label continue_loop(&assembler);
+      assembler.GotoIfNot(
+          assembler.UintPtrLessThan(index_var.value(), len_var.value()),
           &return_not_found);
 
       // Load double value or continue if it contains a double hole.
-      Node* element_k = assembler->LoadFixedDoubleArrayElement(
+      Node* element_k = assembler.LoadFixedDoubleArrayElement(
           elements, index_var.value(), MachineType::Float64(), 0,
           CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
 
-      assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
-                        &return_found, &continue_loop);
-      assembler->Bind(&continue_loop);
-      index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
-      assembler->Goto(&not_nan_loop);
+      assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+                       &return_found, &continue_loop);
+      assembler.Bind(&continue_loop);
+      index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+      assembler.Goto(&not_nan_loop);
     }
   }
 
-  assembler->Bind(&return_found);
-  assembler->Return(assembler->ChangeInt32ToTagged(index_var.value()));
+  assembler.Bind(&return_found);
+  assembler.Return(assembler.SmiTag(index_var.value()));
 
-  assembler->Bind(&return_not_found);
-  assembler->Return(assembler->NumberConstant(-1));
+  assembler.Bind(&return_not_found);
+  assembler.Return(assembler.NumberConstant(-1));
 
-  assembler->Bind(&call_runtime);
-  assembler->Return(assembler->CallRuntime(Runtime::kArrayIndexOf, context,
-                                           array, search_element, start_from));
+  assembler.Bind(&call_runtime);
+  assembler.Return(assembler.CallRuntime(Runtime::kArrayIndexOf, context, array,
+                                         search_element, start_from));
 }
 
 namespace {
 
 template <IterationKind kIterationKind>
-void Generate_ArrayPrototypeIterationMethod(CodeStubAssembler* assembler) {
+void Generate_ArrayPrototypeIterationMethod(
+    compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
   typedef CodeStubAssembler::Variable Variable;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
+  Node* receiver = assembler.Parameter(0);
+  Node* context = assembler.Parameter(3);
 
-  Variable var_array(assembler, MachineRepresentation::kTagged);
-  Variable var_map(assembler, MachineRepresentation::kTagged);
-  Variable var_type(assembler, MachineRepresentation::kWord32);
+  Variable var_array(&assembler, MachineRepresentation::kTagged);
+  Variable var_map(&assembler, MachineRepresentation::kTagged);
+  Variable var_type(&assembler, MachineRepresentation::kWord32);
 
-  Label if_isnotobject(assembler, Label::kDeferred);
-  Label create_array_iterator(assembler);
+  Label if_isnotobject(&assembler, Label::kDeferred);
+  Label create_array_iterator(&assembler);
 
-  assembler->GotoIf(assembler->TaggedIsSmi(receiver), &if_isnotobject);
+  assembler.GotoIf(assembler.TaggedIsSmi(receiver), &if_isnotobject);
   var_array.Bind(receiver);
-  var_map.Bind(assembler->LoadMap(receiver));
-  var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
-  assembler->Branch(assembler->IsJSReceiverInstanceType(var_type.value()),
-                    &create_array_iterator, &if_isnotobject);
+  var_map.Bind(assembler.LoadMap(receiver));
+  var_type.Bind(assembler.LoadMapInstanceType(var_map.value()));
+  assembler.Branch(assembler.IsJSReceiverInstanceType(var_type.value()),
+                   &create_array_iterator, &if_isnotobject);
 
-  assembler->Bind(&if_isnotobject);
+  assembler.Bind(&if_isnotobject);
   {
-    Callable callable = CodeFactory::ToObject(assembler->isolate());
-    Node* result = assembler->CallStub(callable, context, receiver);
+    Callable callable = CodeFactory::ToObject(assembler.isolate());
+    Node* result = assembler.CallStub(callable, context, receiver);
     var_array.Bind(result);
-    var_map.Bind(assembler->LoadMap(result));
-    var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
-    assembler->Goto(&create_array_iterator);
+    var_map.Bind(assembler.LoadMap(result));
+    var_type.Bind(assembler.LoadMapInstanceType(var_map.value()));
+    assembler.Goto(&create_array_iterator);
   }
 
-  assembler->Bind(&create_array_iterator);
-  assembler->Return(assembler->CreateArrayIterator(
-      var_array.value(), var_map.value(), var_type.value(), context,
-      kIterationKind));
+  assembler.Bind(&create_array_iterator);
+  assembler.Return(
+      assembler.CreateArrayIterator(var_array.value(), var_map.value(),
+                                    var_type.value(), context, kIterationKind));
 }
 
 }  // namespace
 
-void Builtins::Generate_ArrayPrototypeValues(CodeStubAssembler* assembler) {
-  Generate_ArrayPrototypeIterationMethod<IterationKind::kValues>(assembler);
+void Builtins::Generate_ArrayPrototypeValues(
+    compiler::CodeAssemblerState* state) {
+  Generate_ArrayPrototypeIterationMethod<IterationKind::kValues>(state);
 }
 
-void Builtins::Generate_ArrayPrototypeEntries(CodeStubAssembler* assembler) {
-  Generate_ArrayPrototypeIterationMethod<IterationKind::kEntries>(assembler);
+void Builtins::Generate_ArrayPrototypeEntries(
+    compiler::CodeAssemblerState* state) {
+  Generate_ArrayPrototypeIterationMethod<IterationKind::kEntries>(state);
 }
 
-void Builtins::Generate_ArrayPrototypeKeys(CodeStubAssembler* assembler) {
-  Generate_ArrayPrototypeIterationMethod<IterationKind::kKeys>(assembler);
+void Builtins::Generate_ArrayPrototypeKeys(
+    compiler::CodeAssemblerState* state) {
+  Generate_ArrayPrototypeIterationMethod<IterationKind::kKeys>(state);
 }
 
 void Builtins::Generate_ArrayIteratorPrototypeNext(
-    CodeStubAssembler* assembler) {
+    compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
   typedef CodeStubAssembler::Variable Variable;
+  CodeStubAssembler assembler(state);
 
-  Node* iterator = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
+  Handle<String> operation = assembler.factory()->NewStringFromAsciiChecked(
+      "Array Iterator.prototype.next", TENURED);
 
-  Variable var_value(assembler, MachineRepresentation::kTagged);
-  Variable var_done(assembler, MachineRepresentation::kTagged);
+  Node* iterator = assembler.Parameter(0);
+  Node* context = assembler.Parameter(3);
+
+  Variable var_value(&assembler, MachineRepresentation::kTagged);
+  Variable var_done(&assembler, MachineRepresentation::kTagged);
 
   // Required, or else `throw_bad_receiver` fails a DCHECK due to these
   // variables not being bound along all paths, despite not being used.
-  var_done.Bind(assembler->TrueConstant());
-  var_value.Bind(assembler->UndefinedConstant());
+  var_done.Bind(assembler.TrueConstant());
+  var_value.Bind(assembler.UndefinedConstant());
 
-  Label throw_bad_receiver(assembler, Label::kDeferred);
-  Label set_done(assembler);
-  Label allocate_key_result(assembler);
-  Label allocate_entry_if_needed(assembler);
-  Label allocate_iterator_result(assembler);
-  Label generic_values(assembler);
+  Label throw_bad_receiver(&assembler, Label::kDeferred);
+  Label set_done(&assembler);
+  Label allocate_key_result(&assembler);
+  Label allocate_entry_if_needed(&assembler);
+  Label allocate_iterator_result(&assembler);
+  Label generic_values(&assembler);
 
   // If O does not have all of the internal slots of an Array Iterator Instance
   // (22.1.5.3), throw a TypeError exception
-  assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
-  Node* instance_type = assembler->LoadInstanceType(iterator);
-  assembler->GotoIf(
-      assembler->Uint32LessThan(
-          assembler->Int32Constant(LAST_ARRAY_ITERATOR_TYPE -
-                                   FIRST_ARRAY_ITERATOR_TYPE),
-          assembler->Int32Sub(instance_type, assembler->Int32Constant(
-                                                 FIRST_ARRAY_ITERATOR_TYPE))),
+  assembler.GotoIf(assembler.TaggedIsSmi(iterator), &throw_bad_receiver);
+  Node* instance_type = assembler.LoadInstanceType(iterator);
+  assembler.GotoIf(
+      assembler.Uint32LessThan(
+          assembler.Int32Constant(LAST_ARRAY_ITERATOR_TYPE -
+                                  FIRST_ARRAY_ITERATOR_TYPE),
+          assembler.Int32Sub(instance_type, assembler.Int32Constant(
+                                                FIRST_ARRAY_ITERATOR_TYPE))),
       &throw_bad_receiver);
 
   // Let a be O.[[IteratedObject]].
-  Node* array = assembler->LoadObjectField(
+  Node* array = assembler.LoadObjectField(
       iterator, JSArrayIterator::kIteratedObjectOffset);
 
   // Let index be O.[[ArrayIteratorNextIndex]].
   Node* index =
-      assembler->LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
-  Node* orig_map = assembler->LoadObjectField(
+      assembler.LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
+  Node* orig_map = assembler.LoadObjectField(
       iterator, JSArrayIterator::kIteratedObjectMapOffset);
-  Node* array_map = assembler->LoadMap(array);
+  Node* array_map = assembler.LoadMap(array);
 
-  Label if_isfastarray(assembler), if_isnotfastarray(assembler);
+  Label if_isfastarray(&assembler), if_isnotfastarray(&assembler),
+      if_isdetached(&assembler, Label::kDeferred);
 
-  assembler->Branch(assembler->WordEqual(orig_map, array_map), &if_isfastarray,
-                    &if_isnotfastarray);
+  assembler.Branch(assembler.WordEqual(orig_map, array_map), &if_isfastarray,
+                   &if_isnotfastarray);
 
-  assembler->Bind(&if_isfastarray);
+  assembler.Bind(&if_isfastarray);
   {
-    CSA_ASSERT(assembler,
-               assembler->Word32Equal(assembler->LoadMapInstanceType(array_map),
-                                      assembler->Int32Constant(JS_ARRAY_TYPE)));
+    CSA_ASSERT(&assembler,
+               assembler.Word32Equal(assembler.LoadMapInstanceType(array_map),
+                                     assembler.Int32Constant(JS_ARRAY_TYPE)));
 
-    Node* length = assembler->LoadObjectField(array, JSArray::kLengthOffset);
+    Node* length = assembler.LoadObjectField(array, JSArray::kLengthOffset);
 
-    CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
-    CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
+    CSA_ASSERT(&assembler, assembler.TaggedIsSmi(length));
+    CSA_ASSERT(&assembler, assembler.TaggedIsSmi(index));
 
-    assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
+    assembler.GotoIfNot(assembler.SmiBelow(index, length), &set_done);
 
-    Node* one = assembler->SmiConstant(Smi::FromInt(1));
-    assembler->StoreObjectFieldNoWriteBarrier(
-        iterator, JSArrayIterator::kNextIndexOffset,
-        assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
-                             assembler->BitcastTaggedToWord(one)));
+    Node* one = assembler.SmiConstant(Smi::FromInt(1));
+    assembler.StoreObjectFieldNoWriteBarrier(iterator,
+                                             JSArrayIterator::kNextIndexOffset,
+                                             assembler.SmiAdd(index, one));
 
-    var_done.Bind(assembler->FalseConstant());
-    Node* elements = assembler->LoadElements(array);
+    var_done.Bind(assembler.FalseConstant());
+    Node* elements = assembler.LoadElements(array);
 
     static int32_t kInstanceType[] = {
         JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
@@ -2225,8 +2462,8 @@
         JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
     };
 
-    Label packed_object_values(assembler), holey_object_values(assembler),
-        packed_double_values(assembler), holey_double_values(assembler);
+    Label packed_object_values(&assembler), holey_object_values(&assembler),
+        packed_double_values(&assembler), holey_double_values(&assembler);
     Label* kInstanceTypeHandlers[] = {
         &allocate_key_result,  &packed_object_values, &holey_object_values,
         &packed_object_values, &holey_object_values,  &packed_double_values,
@@ -2234,216 +2471,192 @@
         &packed_object_values, &holey_object_values,  &packed_double_values,
         &holey_double_values};
 
-    assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
-                      kInstanceTypeHandlers, arraysize(kInstanceType));
+    assembler.Switch(instance_type, &throw_bad_receiver, kInstanceType,
+                     kInstanceTypeHandlers, arraysize(kInstanceType));
 
-    assembler->Bind(&packed_object_values);
+    assembler.Bind(&packed_object_values);
     {
-      var_value.Bind(assembler->LoadFixedArrayElement(
+      var_value.Bind(assembler.LoadFixedArrayElement(
           elements, index, 0, CodeStubAssembler::SMI_PARAMETERS));
-      assembler->Goto(&allocate_entry_if_needed);
+      assembler.Goto(&allocate_entry_if_needed);
     }
 
-    assembler->Bind(&packed_double_values);
+    assembler.Bind(&packed_double_values);
     {
-      Node* value = assembler->LoadFixedDoubleArrayElement(
+      Node* value = assembler.LoadFixedDoubleArrayElement(
           elements, index, MachineType::Float64(), 0,
           CodeStubAssembler::SMI_PARAMETERS);
-      var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
-      assembler->Goto(&allocate_entry_if_needed);
+      var_value.Bind(assembler.AllocateHeapNumberWithValue(value));
+      assembler.Goto(&allocate_entry_if_needed);
     }
 
-    assembler->Bind(&holey_object_values);
+    assembler.Bind(&holey_object_values);
     {
       // Check the array_protector cell, and take the slow path if it's invalid.
       Node* invalid =
-          assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
-      Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+          assembler.SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+      Node* cell = assembler.LoadRoot(Heap::kArrayProtectorRootIndex);
       Node* cell_value =
-          assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
-      assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
-                        &generic_values);
+          assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
+      assembler.GotoIf(assembler.WordEqual(cell_value, invalid),
+                       &generic_values);
 
-      var_value.Bind(assembler->UndefinedConstant());
-      Node* value = assembler->LoadFixedArrayElement(
+      var_value.Bind(assembler.UndefinedConstant());
+      Node* value = assembler.LoadFixedArrayElement(
           elements, index, 0, CodeStubAssembler::SMI_PARAMETERS);
-      assembler->GotoIf(
-          assembler->WordEqual(value, assembler->TheHoleConstant()),
-          &allocate_entry_if_needed);
+      assembler.GotoIf(assembler.WordEqual(value, assembler.TheHoleConstant()),
+                       &allocate_entry_if_needed);
       var_value.Bind(value);
-      assembler->Goto(&allocate_entry_if_needed);
+      assembler.Goto(&allocate_entry_if_needed);
     }
 
-    assembler->Bind(&holey_double_values);
+    assembler.Bind(&holey_double_values);
     {
       // Check the array_protector cell, and take the slow path if it's invalid.
       Node* invalid =
-          assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
-      Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+          assembler.SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+      Node* cell = assembler.LoadRoot(Heap::kArrayProtectorRootIndex);
       Node* cell_value =
-          assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
-      assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
-                        &generic_values);
+          assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
+      assembler.GotoIf(assembler.WordEqual(cell_value, invalid),
+                       &generic_values);
 
-      var_value.Bind(assembler->UndefinedConstant());
-      Node* value = assembler->LoadFixedDoubleArrayElement(
+      var_value.Bind(assembler.UndefinedConstant());
+      Node* value = assembler.LoadFixedDoubleArrayElement(
           elements, index, MachineType::Float64(), 0,
           CodeStubAssembler::SMI_PARAMETERS, &allocate_entry_if_needed);
-      var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
-      assembler->Goto(&allocate_entry_if_needed);
+      var_value.Bind(assembler.AllocateHeapNumberWithValue(value));
+      assembler.Goto(&allocate_entry_if_needed);
     }
   }
 
-  assembler->Bind(&if_isnotfastarray);
+  assembler.Bind(&if_isnotfastarray);
   {
-    Label if_istypedarray(assembler), if_isgeneric(assembler);
+    Label if_istypedarray(&assembler), if_isgeneric(&assembler);
 
     // If a is undefined, return CreateIterResultObject(undefined, true)
-    assembler->GotoIf(
-        assembler->WordEqual(array, assembler->UndefinedConstant()),
-        &allocate_iterator_result);
+    assembler.GotoIf(assembler.WordEqual(array, assembler.UndefinedConstant()),
+                     &allocate_iterator_result);
 
-    Node* array_type = assembler->LoadInstanceType(array);
-    assembler->Branch(
-        assembler->Word32Equal(array_type,
-                               assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+    Node* array_type = assembler.LoadInstanceType(array);
+    assembler.Branch(
+        assembler.Word32Equal(array_type,
+                              assembler.Int32Constant(JS_TYPED_ARRAY_TYPE)),
         &if_istypedarray, &if_isgeneric);
 
-    assembler->Bind(&if_isgeneric);
+    assembler.Bind(&if_isgeneric);
     {
-      Label if_wasfastarray(assembler);
+      Label if_wasfastarray(&assembler);
 
       Node* length = nullptr;
       {
-        Variable var_length(assembler, MachineRepresentation::kTagged);
-        Label if_isarray(assembler), if_isnotarray(assembler), done(assembler);
-        assembler->Branch(
-            assembler->Word32Equal(array_type,
-                                   assembler->Int32Constant(JS_ARRAY_TYPE)),
+        Variable var_length(&assembler, MachineRepresentation::kTagged);
+        Label if_isarray(&assembler), if_isnotarray(&assembler),
+            done(&assembler);
+        assembler.Branch(
+            assembler.Word32Equal(array_type,
+                                  assembler.Int32Constant(JS_ARRAY_TYPE)),
             &if_isarray, &if_isnotarray);
 
-        assembler->Bind(&if_isarray);
+        assembler.Bind(&if_isarray);
         {
           var_length.Bind(
-              assembler->LoadObjectField(array, JSArray::kLengthOffset));
+              assembler.LoadObjectField(array, JSArray::kLengthOffset));
 
           // Invalidate protector cell if needed
-          assembler->Branch(
-              assembler->WordNotEqual(orig_map, assembler->UndefinedConstant()),
+          assembler.Branch(
+              assembler.WordNotEqual(orig_map, assembler.UndefinedConstant()),
               &if_wasfastarray, &done);
 
-          assembler->Bind(&if_wasfastarray);
+          assembler.Bind(&if_wasfastarray);
           {
-            Label if_invalid(assembler, Label::kDeferred);
+            Label if_invalid(&assembler, Label::kDeferred);
             // A fast array iterator transitioned to a slow iterator during
             // iteration. Invalidate fast_array_iteration_prtoector cell to
             // prevent potential deopt loops.
-            assembler->StoreObjectFieldNoWriteBarrier(
+            assembler.StoreObjectFieldNoWriteBarrier(
                 iterator, JSArrayIterator::kIteratedObjectMapOffset,
-                assembler->UndefinedConstant());
-            assembler->GotoIf(
-                assembler->Uint32LessThanOrEqual(
-                    instance_type, assembler->Int32Constant(
+                assembler.UndefinedConstant());
+            assembler.GotoIf(
+                assembler.Uint32LessThanOrEqual(
+                    instance_type, assembler.Int32Constant(
                                        JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
                 &done);
 
-            Node* invalid = assembler->SmiConstant(
-                Smi::FromInt(Isolate::kProtectorInvalid));
-            Node* cell = assembler->LoadRoot(
-                Heap::kFastArrayIterationProtectorRootIndex);
-            assembler->StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset,
-                                                      invalid);
-            assembler->Goto(&done);
+            Node* invalid =
+                assembler.SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+            Node* cell =
+                assembler.LoadRoot(Heap::kFastArrayIterationProtectorRootIndex);
+            assembler.StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset,
+                                                     invalid);
+            assembler.Goto(&done);
           }
         }
 
-        assembler->Bind(&if_isnotarray);
+        assembler.Bind(&if_isnotarray);
         {
-          Node* length_string = assembler->HeapConstant(
-              assembler->isolate()->factory()->length_string());
-          Callable get_property =
-              CodeFactory::GetProperty(assembler->isolate());
+          Node* length_string = assembler.HeapConstant(
+              assembler.isolate()->factory()->length_string());
+          Callable get_property = CodeFactory::GetProperty(assembler.isolate());
           Node* length =
-              assembler->CallStub(get_property, context, array, length_string);
-          Callable to_length = CodeFactory::ToLength(assembler->isolate());
-          var_length.Bind(assembler->CallStub(to_length, context, length));
-          assembler->Goto(&done);
+              assembler.CallStub(get_property, context, array, length_string);
+          Callable to_length = CodeFactory::ToLength(assembler.isolate());
+          var_length.Bind(assembler.CallStub(to_length, context, length));
+          assembler.Goto(&done);
         }
 
-        assembler->Bind(&done);
+        assembler.Bind(&done);
         length = var_length.value();
       }
 
-      assembler->GotoUnlessNumberLessThan(index, length, &set_done);
+      assembler.GotoUnlessNumberLessThan(index, length, &set_done);
 
-      assembler->StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
-                                  assembler->NumberInc(index));
-      var_done.Bind(assembler->FalseConstant());
+      assembler.StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
+                                 assembler.NumberInc(index));
+      var_done.Bind(assembler.FalseConstant());
 
-      assembler->Branch(
-          assembler->Uint32LessThanOrEqual(
+      assembler.Branch(
+          assembler.Uint32LessThanOrEqual(
               instance_type,
-              assembler->Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
+              assembler.Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
           &allocate_key_result, &generic_values);
 
-      assembler->Bind(&generic_values);
+      assembler.Bind(&generic_values);
       {
-        Callable get_property = CodeFactory::GetProperty(assembler->isolate());
-        var_value.Bind(
-            assembler->CallStub(get_property, context, array, index));
-        assembler->Goto(&allocate_entry_if_needed);
+        Callable get_property = CodeFactory::GetProperty(assembler.isolate());
+        var_value.Bind(assembler.CallStub(get_property, context, array, index));
+        assembler.Goto(&allocate_entry_if_needed);
       }
     }
 
-    assembler->Bind(&if_istypedarray);
+    assembler.Bind(&if_istypedarray);
     {
-      Node* length = nullptr;
-      {
-        Variable var_length(assembler, MachineRepresentation::kTagged);
-        Label if_isdetached(assembler, Label::kDeferred),
-            if_isnotdetached(assembler), done(assembler);
+      Node* buffer =
+          assembler.LoadObjectField(array, JSTypedArray::kBufferOffset);
+      assembler.GotoIf(assembler.IsDetachedBuffer(buffer), &if_isdetached);
 
-        Node* buffer =
-            assembler->LoadObjectField(array, JSTypedArray::kBufferOffset);
-        assembler->Branch(assembler->IsDetachedBuffer(buffer), &if_isdetached,
-                          &if_isnotdetached);
+      Node* length =
+          assembler.LoadObjectField(array, JSTypedArray::kLengthOffset);
 
-        assembler->Bind(&if_isnotdetached);
-        {
-          var_length.Bind(
-              assembler->LoadObjectField(array, JSTypedArray::kLengthOffset));
-          assembler->Goto(&done);
-        }
+      CSA_ASSERT(&assembler, assembler.TaggedIsSmi(length));
+      CSA_ASSERT(&assembler, assembler.TaggedIsSmi(index));
 
-        assembler->Bind(&if_isdetached);
-        {
-          // TODO(caitp): If IsDetached(buffer) is true, throw a TypeError, per
-          // https://github.com/tc39/ecma262/issues/713
-          var_length.Bind(assembler->SmiConstant(Smi::kZero));
-          assembler->Goto(&done);
-        }
+      assembler.GotoIfNot(assembler.SmiBelow(index, length), &set_done);
 
-        assembler->Bind(&done);
-        length = var_length.value();
-      }
-      CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
-      CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
-
-      assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
-
-      Node* one = assembler->SmiConstant(Smi::FromInt(1));
-      assembler->StoreObjectFieldNoWriteBarrier(
+      Node* one = assembler.SmiConstant(1);
+      assembler.StoreObjectFieldNoWriteBarrier(
           iterator, JSArrayIterator::kNextIndexOffset,
-          assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
-                               assembler->BitcastTaggedToWord(one)));
-      var_done.Bind(assembler->FalseConstant());
+          assembler.SmiAdd(index, one));
+      var_done.Bind(assembler.FalseConstant());
 
-      Node* elements = assembler->LoadElements(array);
-      Node* base_ptr = assembler->LoadObjectField(
+      Node* elements = assembler.LoadElements(array);
+      Node* base_ptr = assembler.LoadObjectField(
           elements, FixedTypedArrayBase::kBasePointerOffset);
-      Node* external_ptr = assembler->LoadObjectField(
-          elements, FixedTypedArrayBase::kExternalPointerOffset);
-      Node* data_ptr = assembler->IntPtrAdd(base_ptr, external_ptr);
+      Node* external_ptr = assembler.LoadObjectField(
+          elements, FixedTypedArrayBase::kExternalPointerOffset,
+          MachineType::Pointer());
+      Node* data_ptr = assembler.IntPtrAdd(
+          assembler.BitcastTaggedToWord(base_ptr), external_ptr);
 
       static int32_t kInstanceType[] = {
           JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
@@ -2467,10 +2680,10 @@
           JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
       };
 
-      Label uint8_values(assembler), int8_values(assembler),
-          uint16_values(assembler), int16_values(assembler),
-          uint32_values(assembler), int32_values(assembler),
-          float32_values(assembler), float64_values(assembler);
+      Label uint8_values(&assembler), int8_values(&assembler),
+          uint16_values(&assembler), int16_values(&assembler),
+          uint32_values(&assembler), int32_values(&assembler),
+          float32_values(&assembler), float64_values(&assembler);
       Label* kInstanceTypeHandlers[] = {
           &allocate_key_result, &uint8_values,  &uint8_values,
           &int8_values,         &uint16_values, &int16_values,
@@ -2481,152 +2694,156 @@
           &float64_values,
       };
 
-      var_done.Bind(assembler->FalseConstant());
-      assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
-                        kInstanceTypeHandlers, arraysize(kInstanceType));
+      var_done.Bind(assembler.FalseConstant());
+      assembler.Switch(instance_type, &throw_bad_receiver, kInstanceType,
+                       kInstanceTypeHandlers, arraysize(kInstanceType));
 
-      assembler->Bind(&uint8_values);
+      assembler.Bind(&uint8_values);
       {
-        Node* value_uint8 = assembler->LoadFixedTypedArrayElement(
+        Node* value_uint8 = assembler.LoadFixedTypedArrayElement(
             data_ptr, index, UINT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
-        var_value.Bind(assembler->SmiFromWord(value_uint8));
-        assembler->Goto(&allocate_entry_if_needed);
+        var_value.Bind(assembler.SmiFromWord32(value_uint8));
+        assembler.Goto(&allocate_entry_if_needed);
       }
 
-      assembler->Bind(&int8_values);
+      assembler.Bind(&int8_values);
       {
-        Node* value_int8 = assembler->LoadFixedTypedArrayElement(
+        Node* value_int8 = assembler.LoadFixedTypedArrayElement(
             data_ptr, index, INT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
-        var_value.Bind(assembler->SmiFromWord(value_int8));
-        assembler->Goto(&allocate_entry_if_needed);
+        var_value.Bind(assembler.SmiFromWord32(value_int8));
+        assembler.Goto(&allocate_entry_if_needed);
       }
 
-      assembler->Bind(&uint16_values);
+      assembler.Bind(&uint16_values);
       {
-        Node* value_uint16 = assembler->LoadFixedTypedArrayElement(
+        Node* value_uint16 = assembler.LoadFixedTypedArrayElement(
             data_ptr, index, UINT16_ELEMENTS,
             CodeStubAssembler::SMI_PARAMETERS);
-        var_value.Bind(assembler->SmiFromWord(value_uint16));
-        assembler->Goto(&allocate_entry_if_needed);
+        var_value.Bind(assembler.SmiFromWord32(value_uint16));
+        assembler.Goto(&allocate_entry_if_needed);
       }
 
-      assembler->Bind(&int16_values);
+      assembler.Bind(&int16_values);
       {
-        Node* value_int16 = assembler->LoadFixedTypedArrayElement(
+        Node* value_int16 = assembler.LoadFixedTypedArrayElement(
             data_ptr, index, INT16_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
-        var_value.Bind(assembler->SmiFromWord(value_int16));
-        assembler->Goto(&allocate_entry_if_needed);
+        var_value.Bind(assembler.SmiFromWord32(value_int16));
+        assembler.Goto(&allocate_entry_if_needed);
       }
 
-      assembler->Bind(&uint32_values);
+      assembler.Bind(&uint32_values);
       {
-        Node* value_uint32 = assembler->LoadFixedTypedArrayElement(
+        Node* value_uint32 = assembler.LoadFixedTypedArrayElement(
             data_ptr, index, UINT32_ELEMENTS,
             CodeStubAssembler::SMI_PARAMETERS);
-        var_value.Bind(assembler->ChangeUint32ToTagged(value_uint32));
-        assembler->Goto(&allocate_entry_if_needed);
+        var_value.Bind(assembler.ChangeUint32ToTagged(value_uint32));
+        assembler.Goto(&allocate_entry_if_needed);
       }
-      assembler->Bind(&int32_values);
+      assembler.Bind(&int32_values);
       {
-        Node* value_int32 = assembler->LoadFixedTypedArrayElement(
+        Node* value_int32 = assembler.LoadFixedTypedArrayElement(
             data_ptr, index, INT32_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
-        var_value.Bind(assembler->ChangeInt32ToTagged(value_int32));
-        assembler->Goto(&allocate_entry_if_needed);
+        var_value.Bind(assembler.ChangeInt32ToTagged(value_int32));
+        assembler.Goto(&allocate_entry_if_needed);
       }
-      assembler->Bind(&float32_values);
+      assembler.Bind(&float32_values);
       {
-        Node* value_float32 = assembler->LoadFixedTypedArrayElement(
+        Node* value_float32 = assembler.LoadFixedTypedArrayElement(
             data_ptr, index, FLOAT32_ELEMENTS,
             CodeStubAssembler::SMI_PARAMETERS);
-        var_value.Bind(assembler->AllocateHeapNumberWithValue(
-            assembler->ChangeFloat32ToFloat64(value_float32)));
-        assembler->Goto(&allocate_entry_if_needed);
+        var_value.Bind(assembler.AllocateHeapNumberWithValue(
+            assembler.ChangeFloat32ToFloat64(value_float32)));
+        assembler.Goto(&allocate_entry_if_needed);
       }
-      assembler->Bind(&float64_values);
+      assembler.Bind(&float64_values);
       {
-        Node* value_float64 = assembler->LoadFixedTypedArrayElement(
+        Node* value_float64 = assembler.LoadFixedTypedArrayElement(
             data_ptr, index, FLOAT64_ELEMENTS,
             CodeStubAssembler::SMI_PARAMETERS);
-        var_value.Bind(assembler->AllocateHeapNumberWithValue(value_float64));
-        assembler->Goto(&allocate_entry_if_needed);
+        var_value.Bind(assembler.AllocateHeapNumberWithValue(value_float64));
+        assembler.Goto(&allocate_entry_if_needed);
       }
     }
   }
 
-  assembler->Bind(&set_done);
+  assembler.Bind(&set_done);
   {
-    assembler->StoreObjectFieldNoWriteBarrier(
+    assembler.StoreObjectFieldNoWriteBarrier(
         iterator, JSArrayIterator::kIteratedObjectOffset,
-        assembler->UndefinedConstant());
-    assembler->Goto(&allocate_iterator_result);
+        assembler.UndefinedConstant());
+    assembler.Goto(&allocate_iterator_result);
   }
 
-  assembler->Bind(&allocate_key_result);
+  assembler.Bind(&allocate_key_result);
   {
     var_value.Bind(index);
-    var_done.Bind(assembler->FalseConstant());
-    assembler->Goto(&allocate_iterator_result);
+    var_done.Bind(assembler.FalseConstant());
+    assembler.Goto(&allocate_iterator_result);
   }
 
-  assembler->Bind(&allocate_entry_if_needed);
+  assembler.Bind(&allocate_entry_if_needed);
   {
-    assembler->GotoIf(
-        assembler->Int32GreaterThan(
+    assembler.GotoIf(
+        assembler.Int32GreaterThan(
             instance_type,
-            assembler->Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
+            assembler.Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
         &allocate_iterator_result);
 
-    Node* elements = assembler->AllocateFixedArray(FAST_ELEMENTS,
-                                                   assembler->Int32Constant(2));
-    assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(0),
-                                      index, SKIP_WRITE_BARRIER);
-    assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(1),
-                                      var_value.value(), SKIP_WRITE_BARRIER);
+    Node* elements = assembler.AllocateFixedArray(FAST_ELEMENTS,
+                                                  assembler.IntPtrConstant(2));
+    assembler.StoreFixedArrayElement(elements, 0, index, SKIP_WRITE_BARRIER);
+    assembler.StoreFixedArrayElement(elements, 1, var_value.value(),
+                                     SKIP_WRITE_BARRIER);
 
-    Node* entry = assembler->Allocate(JSArray::kSize);
-    Node* map = assembler->LoadContextElement(
-        assembler->LoadNativeContext(context),
-        Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
+    Node* entry = assembler.Allocate(JSArray::kSize);
+    Node* map =
+        assembler.LoadContextElement(assembler.LoadNativeContext(context),
+                                     Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
 
-    assembler->StoreMapNoWriteBarrier(entry, map);
-    assembler->StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
-                                    Heap::kEmptyFixedArrayRootIndex);
-    assembler->StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset,
-                                              elements);
-    assembler->StoreObjectFieldNoWriteBarrier(
-        entry, JSArray::kLengthOffset, assembler->SmiConstant(Smi::FromInt(2)));
+    assembler.StoreMapNoWriteBarrier(entry, map);
+    assembler.StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
+                                   Heap::kEmptyFixedArrayRootIndex);
+    assembler.StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset,
+                                             elements);
+    assembler.StoreObjectFieldNoWriteBarrier(
+        entry, JSArray::kLengthOffset, assembler.SmiConstant(Smi::FromInt(2)));
 
     var_value.Bind(entry);
-    assembler->Goto(&allocate_iterator_result);
+    assembler.Goto(&allocate_iterator_result);
   }
 
-  assembler->Bind(&allocate_iterator_result);
+  assembler.Bind(&allocate_iterator_result);
   {
-    Node* result = assembler->Allocate(JSIteratorResult::kSize);
+    Node* result = assembler.Allocate(JSIteratorResult::kSize);
     Node* map =
-        assembler->LoadContextElement(assembler->LoadNativeContext(context),
-                                      Context::ITERATOR_RESULT_MAP_INDEX);
-    assembler->StoreMapNoWriteBarrier(result, map);
-    assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
-                                    Heap::kEmptyFixedArrayRootIndex);
-    assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
-                                    Heap::kEmptyFixedArrayRootIndex);
-    assembler->StoreObjectFieldNoWriteBarrier(
+        assembler.LoadContextElement(assembler.LoadNativeContext(context),
+                                     Context::ITERATOR_RESULT_MAP_INDEX);
+    assembler.StoreMapNoWriteBarrier(result, map);
+    assembler.StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+                                   Heap::kEmptyFixedArrayRootIndex);
+    assembler.StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+                                   Heap::kEmptyFixedArrayRootIndex);
+    assembler.StoreObjectFieldNoWriteBarrier(
         result, JSIteratorResult::kValueOffset, var_value.value());
-    assembler->StoreObjectFieldNoWriteBarrier(
+    assembler.StoreObjectFieldNoWriteBarrier(
         result, JSIteratorResult::kDoneOffset, var_done.value());
-    assembler->Return(result);
+    assembler.Return(result);
   }
 
-  assembler->Bind(&throw_bad_receiver);
+  assembler.Bind(&throw_bad_receiver);
   {
     // The {receiver} is not a valid JSArrayIterator.
-    Node* result = assembler->CallRuntime(
-        Runtime::kThrowIncompatibleMethodReceiver, context,
-        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
-            "Array Iterator.prototype.next", TENURED)),
-        iterator);
-    assembler->Return(result);
+    assembler.CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+                          assembler.HeapConstant(operation), iterator);
+    assembler.Unreachable();
+  }
+
+  assembler.Bind(&if_isdetached);
+  {
+    Node* message = assembler.SmiConstant(MessageTemplate::kDetachedOperation);
+    assembler.CallRuntime(Runtime::kThrowTypeError, context, message,
+                          assembler.HeapConstant(operation));
+    assembler.Unreachable();
   }
 }
 
diff --git a/src/builtins/builtins-arraybuffer.cc b/src/builtins/builtins-arraybuffer.cc
index ad36758..e82c385 100644
--- a/src/builtins/builtins-arraybuffer.cc
+++ b/src/builtins/builtins-arraybuffer.cc
@@ -2,8 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/builtins/builtins-async-function.cc b/src/builtins/builtins-async-function.cc
new file mode 100644
index 0000000..309d481
--- /dev/null
+++ b/src/builtins/builtins-async-function.cc
@@ -0,0 +1,208 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-async.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
+ public:
+  explicit AsyncFunctionBuiltinsAssembler(CodeAssemblerState* state)
+      : AsyncBuiltinsAssembler(state) {}
+
+ protected:
+  void AsyncFunctionAwait(Node* const context, Node* const generator,
+                          Node* const awaited, Node* const outer_promise,
+                          const bool is_predicted_as_caught);
+
+  void AsyncFunctionAwaitResumeClosure(
+      Node* const context, Node* const sent_value,
+      JSGeneratorObject::ResumeMode resume_mode);
+};
+
+namespace {
+
+// Describe fields of Context associated with AsyncFunctionAwait resume
+// closures.
+// TODO(jgruber): Refactor to reuse code for upcoming async-generators.
+class AwaitContext {
+ public:
+  enum Fields { kGeneratorSlot = Context::MIN_CONTEXT_SLOTS, kLength };
+};
+
+}  // anonymous namespace
+
+void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
+    Node* context, Node* sent_value,
+    JSGeneratorObject::ResumeMode resume_mode) {
+  DCHECK(resume_mode == JSGeneratorObject::kNext ||
+         resume_mode == JSGeneratorObject::kThrow);
+
+  Node* const generator =
+      LoadContextElement(context, AwaitContext::kGeneratorSlot);
+  CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
+
+  // Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
+  // unnecessary runtime checks removed.
+  // TODO(jgruber): Refactor to reuse code from builtins-generator.cc.
+
+  // Ensure that the generator is neither closed nor running.
+  CSA_SLOW_ASSERT(
+      this,
+      SmiGreaterThan(
+          LoadObjectField(generator, JSGeneratorObject::kContinuationOffset),
+          SmiConstant(JSGeneratorObject::kGeneratorClosed)));
+
+  // Resume the {receiver} using our trampoline.
+  Callable callable = CodeFactory::ResumeGenerator(isolate());
+  CallStub(callable, context, sent_value, generator, SmiConstant(resume_mode));
+
+  // The resulting Promise is a throwaway, so it doesn't matter what it
+  // resolves to. What is important is that we don't end up keeping the
+  // whole chain of intermediate Promises alive by returning the return value
+  // of ResumeGenerator, as that would create a memory leak.
+}
+
+TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
+  CSA_ASSERT_JS_ARGC_EQ(this, 1);
+  Node* const sentError = Parameter(1);
+  Node* const context = Parameter(4);
+
+  AsyncFunctionAwaitResumeClosure(context, sentError,
+                                  JSGeneratorObject::kThrow);
+  Return(UndefinedConstant());
+}
+
+TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
+  CSA_ASSERT_JS_ARGC_EQ(this, 1);
+  Node* const sentValue = Parameter(1);
+  Node* const context = Parameter(4);
+
+  AsyncFunctionAwaitResumeClosure(context, sentValue, JSGeneratorObject::kNext);
+  Return(UndefinedConstant());
+}
+
+// ES#abstract-ops-async-function-await
+// AsyncFunctionAwait ( value )
+// Shared logic for the core of await. The parser desugars
+//   await awaited
+// into
+//   yield AsyncFunctionAwait{Caught,Uncaught}(.generator, awaited, .promise)
+// The 'awaited' parameter is the value; the generator stands in
+// for the asyncContext, and .promise is the larger promise under
+// construction by the enclosing async function.
+void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
+    Node* const context, Node* const generator, Node* const awaited,
+    Node* const outer_promise, const bool is_predicted_as_caught) {
+  CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
+  CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
+
+  NodeGenerator1 create_closure_context = [&](Node* native_context) -> Node* {
+    Node* const context =
+        CreatePromiseContext(native_context, AwaitContext::kLength);
+    StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
+                                      generator);
+    return context;
+  };
+
+  // TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
+  // the awaited promise if it is already a promise. Reuse is non-spec compliant
+  // but part of our old behavior gives us a couple of percent
+  // performance boost.
+  // TODO(jgruber): Use a faster specialized version of
+  // InternalPerformPromiseThen.
+
+  Node* const result = Await(
+      context, generator, awaited, outer_promise, create_closure_context,
+      Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
+      Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, is_predicted_as_caught);
+
+  Return(result);
+}
+
+// Called by the parser from the desugaring of 'await' when catch
+// prediction indicates that there is a locally surrounding catch block.
+TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
+  CSA_ASSERT_JS_ARGC_EQ(this, 3);
+  Node* const generator = Parameter(1);
+  Node* const awaited = Parameter(2);
+  Node* const outer_promise = Parameter(3);
+  Node* const context = Parameter(6);
+
+  static const bool kIsPredictedAsCaught = true;
+
+  AsyncFunctionAwait(context, generator, awaited, outer_promise,
+                     kIsPredictedAsCaught);
+}
+
+// Called by the parser from the desugaring of 'await' when catch
+// prediction indicates no locally surrounding catch block.
+TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
+  CSA_ASSERT_JS_ARGC_EQ(this, 3);
+  Node* const generator = Parameter(1);
+  Node* const awaited = Parameter(2);
+  Node* const outer_promise = Parameter(3);
+  Node* const context = Parameter(6);
+
+  static const bool kIsPredictedAsCaught = false;
+
+  AsyncFunctionAwait(context, generator, awaited, outer_promise,
+                     kIsPredictedAsCaught);
+}
+
+TF_BUILTIN(AsyncFunctionPromiseCreate, AsyncFunctionBuiltinsAssembler) {
+  CSA_ASSERT_JS_ARGC_EQ(this, 0);
+  Node* const context = Parameter(3);
+
+  Node* const promise = AllocateAndInitJSPromise(context);
+
+  Label if_is_debug_active(this, Label::kDeferred);
+  GotoIf(IsDebugActive(), &if_is_debug_active);
+
+  // Early exit if debug is not active.
+  Return(promise);
+
+  Bind(&if_is_debug_active);
+  {
+    // Push the Promise under construction in an async function on
+    // the catch prediction stack to handle exceptions thrown before
+    // the first await.
+    // Assign ID and create a recurring task to save stack for future
+    // resumptions from await.
+    CallRuntime(Runtime::kDebugAsyncFunctionPromiseCreated, context, promise);
+    Return(promise);
+  }
+}
+
+TF_BUILTIN(AsyncFunctionPromiseRelease, AsyncFunctionBuiltinsAssembler) {
+  CSA_ASSERT_JS_ARGC_EQ(this, 1);
+  Node* const promise = Parameter(1);
+  Node* const context = Parameter(4);
+
+  Label if_is_debug_active(this, Label::kDeferred);
+  GotoIf(IsDebugActive(), &if_is_debug_active);
+
+  // Early exit if debug is not active.
+  Return(UndefinedConstant());
+
+  Bind(&if_is_debug_active);
+  {
+    // Pop the Promise under construction in an async function on
+    // from catch prediction stack.
+    CallRuntime(Runtime::kDebugPopPromise, context);
+    Return(promise);
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-async-iterator.cc b/src/builtins/builtins-async-iterator.cc
new file mode 100644
index 0000000..13d15ef
--- /dev/null
+++ b/src/builtins/builtins-async-iterator.cc
@@ -0,0 +1,326 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-async.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// Describe fields of Context associated with the AsyncIterator unwrap closure.
+class ValueUnwrapContext {
+ public:
+  enum Fields { kDoneSlot = Context::MIN_CONTEXT_SLOTS, kLength };
+};
+
+class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
+ public:
+  explicit AsyncFromSyncBuiltinsAssembler(CodeAssemblerState* state)
+      : AsyncBuiltinsAssembler(state) {}
+
+  void ThrowIfNotAsyncFromSyncIterator(Node* const context, Node* const object,
+                                       Label* if_exception,
+                                       Variable* var_exception,
+                                       const char* method_name);
+
+  typedef std::function<void(Node* const context, Node* const promise,
+                             Label* if_exception)>
+      UndefinedMethodHandler;
+  void Generate_AsyncFromSyncIteratorMethod(
+      Node* const context, Node* const iterator, Node* const sent_value,
+      Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+      const char* operation_name,
+      Label::Type reject_label_type = Label::kDeferred,
+      Node* const initial_exception_value = nullptr);
+
+  Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
+                                                Node* done);
+
+  // Load "value" and "done" from an iterator result object. If an exception
+  // is thrown at any point, jumps to te `if_exception` label with exception
+  // stored in `var_exception`.
+  //
+  // Returns a Pair of Nodes, whose first element is the value of the "value"
+  // property, and whose second element is the value of the "done" property,
+  // converted to a Boolean if needed.
+  std::pair<Node*, Node*> LoadIteratorResult(Node* const context,
+                                             Node* const native_context,
+                                             Node* const iter_result,
+                                             Label* if_exception,
+                                             Variable* var_exception);
+
+  Node* CreateUnwrapClosure(Node* const native_context, Node* const done);
+};
+
+void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
+    Node* const context, Node* const object, Label* if_exception,
+    Variable* var_exception, const char* method_name) {
+  Label if_receiverisincompatible(this, Label::kDeferred), done(this);
+
+  GotoIf(TaggedIsSmi(object), &if_receiverisincompatible);
+  Branch(HasInstanceType(object, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE), &done,
+         &if_receiverisincompatible);
+
+  Bind(&if_receiverisincompatible);
+  {
+    // If Type(O) is not Object, or if O does not have a [[SyncIterator]]
+    // internal slot, then
+
+    // Let badIteratorError be a new TypeError exception.
+    Node* const error =
+        MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
+                      CStringConstant(method_name), object);
+
+    // Perform ! Call(promiseCapability.[[Reject]], undefined,
+    //                « badIteratorError »).
+    var_exception->Bind(error);
+    Goto(if_exception);
+  }
+
+  Bind(&done);
+}
+
+void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
+    Node* const context, Node* const iterator, Node* const sent_value,
+    Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
+    const char* operation_name, Label::Type reject_label_type,
+    Node* const initial_exception_value) {
+  Node* const native_context = LoadNativeContext(context);
+  Node* const promise = AllocateAndInitJSPromise(context);
+
+  Variable var_exception(this, MachineRepresentation::kTagged,
+                         initial_exception_value == nullptr
+                             ? UndefinedConstant()
+                             : initial_exception_value);
+  Label reject_promise(this, reject_label_type);
+
+  ThrowIfNotAsyncFromSyncIterator(context, iterator, &reject_promise,
+                                  &var_exception, operation_name);
+
+  Node* const sync_iterator =
+      LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
+
+  Node* const method = GetProperty(context, sync_iterator, method_name);
+
+  if (if_method_undefined) {
+    Label if_isnotundefined(this);
+
+    GotoIfNot(IsUndefined(method), &if_isnotundefined);
+    if_method_undefined(native_context, promise, &reject_promise);
+
+    Bind(&if_isnotundefined);
+  }
+
+  Node* const iter_result = CallJS(CodeFactory::Call(isolate()), context,
+                                   method, sync_iterator, sent_value);
+  GotoIfException(iter_result, &reject_promise, &var_exception);
+
+  Node* value;
+  Node* done;
+  std::tie(value, done) = LoadIteratorResult(
+      context, native_context, iter_result, &reject_promise, &var_exception);
+  Node* const wrapper = AllocateAndInitJSPromise(context);
+
+  // Perform ! Call(valueWrapperCapability.[[Resolve]], undefined, «
+  // throwValue »).
+  InternalResolvePromise(context, wrapper, value);
+
+  // Let onFulfilled be a new built-in function object as defined in
+  // Async Iterator Value Unwrap Functions.
+  // Set onFulfilled.[[Done]] to throwDone.
+  Node* const on_fulfilled = CreateUnwrapClosure(native_context, done);
+
+  // Perform ! PerformPromiseThen(valueWrapperCapability.[[Promise]],
+  //     onFulfilled, undefined, promiseCapability).
+  Node* const undefined = UndefinedConstant();
+  InternalPerformPromiseThen(context, wrapper, on_fulfilled, undefined, promise,
+                             undefined, undefined);
+  Return(promise);
+
+  Bind(&reject_promise);
+  {
+    Node* const exception = var_exception.value();
+    InternalPromiseReject(context, promise, exception, TrueConstant());
+
+    Return(promise);
+  }
+}
+
+std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
+    Node* const context, Node* const native_context, Node* const iter_result,
+    Label* if_exception, Variable* var_exception) {
+  Label if_fastpath(this), if_slowpath(this), merge(this), to_boolean(this),
+      done(this), if_notanobject(this, Label::kDeferred);
+  GotoIf(TaggedIsSmi(iter_result), &if_notanobject);
+
+  Node* const iter_result_map = LoadMap(iter_result);
+  GotoIfNot(IsJSReceiverMap(iter_result_map), &if_notanobject);
+
+  Node* const fast_iter_result_map =
+      LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+  Variable var_value(this, MachineRepresentation::kTagged);
+  Variable var_done(this, MachineRepresentation::kTagged);
+  Branch(WordEqual(iter_result_map, fast_iter_result_map), &if_fastpath,
+         &if_slowpath);
+
+  Bind(&if_fastpath);
+  {
+    var_value.Bind(
+        LoadObjectField(iter_result, JSIteratorResult::kValueOffset));
+    var_done.Bind(LoadObjectField(iter_result, JSIteratorResult::kDoneOffset));
+    Goto(&merge);
+  }
+
+  Bind(&if_slowpath);
+  {
+    // Let nextValue be IteratorValue(nextResult).
+    // IfAbruptRejectPromise(nextValue, promiseCapability).
+    Node* const value =
+        GetProperty(context, iter_result, factory()->value_string());
+    GotoIfException(value, if_exception, var_exception);
+
+    // Let nextDone be IteratorComplete(nextResult).
+    // IfAbruptRejectPromise(nextDone, promiseCapability).
+    Node* const done =
+        GetProperty(context, iter_result, factory()->done_string());
+    GotoIfException(done, if_exception, var_exception);
+
+    var_value.Bind(value);
+    var_done.Bind(done);
+    Goto(&merge);
+  }
+
+  Bind(&if_notanobject);
+  {
+    // Sync iterator result is not an object --- Produce a TypeError and jump
+    // to the `if_exception` path.
+    Node* const error = MakeTypeError(
+        MessageTemplate::kIteratorResultNotAnObject, context, iter_result);
+    var_exception->Bind(error);
+    Goto(if_exception);
+  }
+
+  Bind(&merge);
+  // Ensure `iterResult.done` is a Boolean.
+  GotoIf(TaggedIsSmi(var_done.value()), &to_boolean);
+  Branch(IsBoolean(var_done.value()), &done, &to_boolean);
+
+  Bind(&to_boolean);
+  {
+    Node* const result =
+        CallStub(CodeFactory::ToBoolean(isolate()), context, var_done.value());
+    var_done.Bind(result);
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return std::make_pair(var_value.value(), var_done.value());
+}
+
+Node* AsyncFromSyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
+                                                          Node* done) {
+  Node* const map = LoadContextElement(
+      native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+  Node* const on_fulfilled_shared = LoadContextElement(
+      native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN);
+  CSA_ASSERT(this,
+             HasInstanceType(on_fulfilled_shared, SHARED_FUNCTION_INFO_TYPE));
+  Node* const closure_context =
+      AllocateAsyncIteratorValueUnwrapContext(native_context, done);
+  return AllocateFunctionWithMapAndContext(map, on_fulfilled_shared,
+                                           closure_context);
+}
+
+Node* AsyncFromSyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
+    Node* native_context, Node* done) {
+  CSA_ASSERT(this, IsNativeContext(native_context));
+  CSA_ASSERT(this, IsBoolean(done));
+
+  Node* const context =
+      CreatePromiseContext(native_context, ValueUnwrapContext::kLength);
+  StoreContextElementNoWriteBarrier(context, ValueUnwrapContext::kDoneSlot,
+                                    done);
+  return context;
+}
+}  // namespace
+
+// https://tc39.github.io/proposal-async-iteration/
+// Section #sec-%asyncfromsynciteratorprototype%.next
+TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
+  Node* const iterator = Parameter(0);
+  Node* const value = Parameter(1);
+  Node* const context = Parameter(4);
+
+  Generate_AsyncFromSyncIteratorMethod(
+      context, iterator, value, factory()->next_string(),
+      UndefinedMethodHandler(), "[Async-from-Sync Iterator].prototype.next");
+}
+
+// https://tc39.github.io/proposal-async-iteration/
+// Section #sec-%asyncfromsynciteratorprototype%.return
+TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
+           AsyncFromSyncBuiltinsAssembler) {
+  Node* const iterator = Parameter(0);
+  Node* const value = Parameter(1);
+  Node* const context = Parameter(4);
+
+  auto if_return_undefined = [=](Node* const native_context,
+                                 Node* const promise, Label* if_exception) {
+    // If return is undefined, then
+    // Let iterResult be ! CreateIterResultObject(value, true)
+    Node* const iter_result =
+        CallStub(CodeFactory::CreateIterResultObject(isolate()), context, value,
+                 TrueConstant());
+
+    // Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
+    // IfAbruptRejectPromise(nextDone, promiseCapability).
+    // Return promiseCapability.[[Promise]].
+    PromiseFulfill(context, promise, iter_result, v8::Promise::kFulfilled);
+    Return(promise);
+  };
+
+  Generate_AsyncFromSyncIteratorMethod(
+      context, iterator, value, factory()->return_string(), if_return_undefined,
+      "[Async-from-Sync Iterator].prototype.return");
+}
+
+// https://tc39.github.io/proposal-async-iteration/
+// Section #sec-%asyncfromsynciteratorprototype%.throw
+TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
+           AsyncFromSyncBuiltinsAssembler) {
+  Node* const iterator = Parameter(0);
+  Node* const reason = Parameter(1);
+  Node* const context = Parameter(4);
+
+  auto if_throw_undefined = [=](Node* const native_context, Node* const promise,
+                                Label* if_exception) { Goto(if_exception); };
+
+  Generate_AsyncFromSyncIteratorMethod(
+      context, iterator, reason, factory()->throw_string(), if_throw_undefined,
+      "[Async-from-Sync Iterator].prototype.throw", Label::kNonDeferred,
+      reason);
+}
+
+TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncFromSyncBuiltinsAssembler) {
+  Node* const value = Parameter(1);
+  Node* const context = Parameter(4);
+
+  Node* const done = LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
+  CSA_ASSERT(this, IsBoolean(done));
+
+  Node* const unwrapped_value = CallStub(
+      CodeFactory::CreateIterResultObject(isolate()), context, value, done);
+
+  Return(unwrapped_value);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-async.cc b/src/builtins/builtins-async.cc
new file mode 100644
index 0000000..4c64637
--- /dev/null
+++ b/src/builtins/builtins-async.cc
@@ -0,0 +1,92 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-async.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/frames-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Node* AsyncBuiltinsAssembler::Await(
+    Node* context, Node* generator, Node* value, Node* outer_promise,
+    const NodeGenerator1& create_closure_context, int on_resolve_context_index,
+    int on_reject_context_index, bool is_predicted_as_caught) {
+  // Let promiseCapability be ! NewPromiseCapability(%Promise%).
+  Node* const wrapped_value = AllocateAndInitJSPromise(context);
+
+  // Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
+  InternalResolvePromise(context, wrapped_value, value);
+
+  Node* const native_context = LoadNativeContext(context);
+
+  Node* const closure_context = create_closure_context(native_context);
+  Node* const map = LoadContextElement(
+      native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+
+  // Load and allocate on_resolve closure
+  Node* const on_resolve_shared_fun =
+      LoadContextElement(native_context, on_resolve_context_index);
+  CSA_SLOW_ASSERT(
+      this, HasInstanceType(on_resolve_shared_fun, SHARED_FUNCTION_INFO_TYPE));
+  Node* const on_resolve = AllocateFunctionWithMapAndContext(
+      map, on_resolve_shared_fun, closure_context);
+
+  // Load and allocate on_reject closure
+  Node* const on_reject_shared_fun =
+      LoadContextElement(native_context, on_reject_context_index);
+  CSA_SLOW_ASSERT(
+      this, HasInstanceType(on_reject_shared_fun, SHARED_FUNCTION_INFO_TYPE));
+  Node* const on_reject = AllocateFunctionWithMapAndContext(
+      map, on_reject_shared_fun, closure_context);
+
+  Node* const throwaway_promise =
+      AllocateAndInitJSPromise(context, wrapped_value);
+
+  // The Promise will be thrown away and not handled, but it shouldn't trigger
+  // unhandled reject events as its work is done
+  PromiseSetHasHandler(throwaway_promise);
+
+  Label do_perform_promise_then(this);
+  GotoIfNot(IsDebugActive(), &do_perform_promise_then);
+  {
+    Label common(this);
+    GotoIf(TaggedIsSmi(value), &common);
+    GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &common);
+    {
+      // Mark the reject handler callback to be a forwarding edge, rather
+      // than a meaningful catch handler
+      Node* const key =
+          HeapConstant(factory()->promise_forwarding_handler_symbol());
+      CallRuntime(Runtime::kSetProperty, context, on_reject, key,
+                  TrueConstant(), SmiConstant(STRICT));
+
+      if (is_predicted_as_caught) PromiseSetHandledHint(value);
+    }
+
+    Goto(&common);
+    Bind(&common);
+    // Mark the dependency to outer Promise in case the throwaway Promise is
+    // found on the Promise stack
+    CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
+
+    Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
+    CallRuntime(Runtime::kSetProperty, context, throwaway_promise, key,
+                outer_promise, SmiConstant(STRICT));
+  }
+
+  Goto(&do_perform_promise_then);
+  Bind(&do_perform_promise_then);
+  InternalPerformPromiseThen(context, wrapped_value, on_resolve, on_reject,
+                             throwaway_promise, UndefinedConstant(),
+                             UndefinedConstant());
+
+  return wrapped_value;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-async.h b/src/builtins/builtins-async.h
new file mode 100644
index 0000000..9f5df6e
--- /dev/null
+++ b/src/builtins/builtins-async.h
@@ -0,0 +1,35 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_ASYNC_H_
+#define V8_BUILTINS_BUILTINS_ASYNC_H_
+
+#include "src/builtins/builtins-promise.h"
+
+namespace v8 {
+namespace internal {
+
+class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
+ public:
+  explicit AsyncBuiltinsAssembler(CodeAssemblerState* state)
+      : PromiseBuiltinsAssembler(state) {}
+
+ protected:
+  typedef std::function<Node*(Node*)> NodeGenerator1;
+
+  // Perform steps to resume generator after `value` is resolved.
+  // `on_reject_context_index` is an index into the Native Context, which should
+  // point to a SharedFunctioninfo instance used to create the closure. The
+  // value following the reject index should be a similar value for the resolve
+  // closure. Returns the Promise-wrapped `value`.
+  Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
+              const NodeGenerator1& create_closure_context,
+              int on_resolve_context_index, int on_reject_context_index,
+              bool is_predicted_as_caught);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_BUILTINS_BUILTINS_ASYNC_H_
diff --git a/src/builtins/builtins-boolean.cc b/src/builtins/builtins-boolean.cc
index e7ccf95..65bdb03 100644
--- a/src/builtins/builtins-boolean.cc
+++ b/src/builtins/builtins-boolean.cc
@@ -2,8 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -34,28 +37,32 @@
 }
 
 // ES6 section 19.3.3.2 Boolean.prototype.toString ( )
-void Builtins::Generate_BooleanPrototypeToString(CodeStubAssembler* assembler) {
+void Builtins::Generate_BooleanPrototypeToString(
+    compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
+  Node* receiver = assembler.Parameter(0);
+  Node* context = assembler.Parameter(3);
 
-  Node* value = assembler->ToThisValue(
+  Node* value = assembler.ToThisValue(
       context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.toString");
-  Node* result = assembler->LoadObjectField(value, Oddball::kToStringOffset);
-  assembler->Return(result);
+  Node* result = assembler.LoadObjectField(value, Oddball::kToStringOffset);
+  assembler.Return(result);
 }
 
 // ES6 section 19.3.3.3 Boolean.prototype.valueOf ( )
-void Builtins::Generate_BooleanPrototypeValueOf(CodeStubAssembler* assembler) {
+void Builtins::Generate_BooleanPrototypeValueOf(
+    compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
+  Node* receiver = assembler.Parameter(0);
+  Node* context = assembler.Parameter(3);
 
-  Node* result = assembler->ToThisValue(
+  Node* result = assembler.ToThisValue(
       context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.valueOf");
-  assembler->Return(result);
+  assembler.Return(result);
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-call.cc b/src/builtins/builtins-call.cc
index e3054a9..40ef3f0 100644
--- a/src/builtins/builtins-call.cc
+++ b/src/builtins/builtins-call.cc
@@ -2,8 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/isolate.h"
+#include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -147,5 +150,14 @@
   Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
 }
 
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
+  Generate_CallForwardVarargs(masm, masm->isolate()->builtins()->Call());
+}
+
+void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
+  Generate_CallForwardVarargs(masm,
+                              masm->isolate()->builtins()->CallFunction());
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-callsite.cc b/src/builtins/builtins-callsite.cc
index ae9c76d..37da78f 100644
--- a/src/builtins/builtins-callsite.cc
+++ b/src/builtins/builtins-callsite.cc
@@ -5,6 +5,8 @@
 #include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
 
+#include "src/counters.h"
+#include "src/objects-inl.h"
 #include "src/string-builder.h"
 #include "src/wasm/wasm-module.h"
 
diff --git a/src/builtins/builtins-constructor.cc b/src/builtins/builtins-constructor.cc
new file mode 100644
index 0000000..ec79e4b
--- /dev/null
+++ b/src/builtins/builtins-constructor.cc
@@ -0,0 +1,789 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-constructor.h"
+#include "src/ast/ast.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/interface-descriptors.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
+                                                       Node* feedback_vector,
+                                                       Node* slot,
+                                                       Node* context) {
+  typedef compiler::CodeAssembler::Label Label;
+  typedef compiler::CodeAssembler::Variable Variable;
+
+  Isolate* isolate = this->isolate();
+  Factory* factory = isolate->factory();
+  IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
+
+  // Create a new closure from the given function info in new space
+  Node* result = Allocate(JSFunction::kSize);
+
+  // Calculate the index of the map we should install on the function based on
+  // the FunctionKind and LanguageMode of the function.
+  // Note: Must be kept in sync with Context::FunctionMapIndex
+  Node* compiler_hints =
+      LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
+                      MachineType::Uint32());
+  Node* is_strict = Word32And(
+      compiler_hints, Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
+
+  Label if_normal(this), if_generator(this), if_async(this),
+      if_class_constructor(this), if_function_without_prototype(this),
+      load_map(this);
+  Variable map_index(this, MachineType::PointerRepresentation());
+
+  STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
+  Node* is_not_normal =
+      Word32And(compiler_hints,
+                Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
+  GotoIfNot(is_not_normal, &if_normal);
+
+  Node* is_generator = Word32And(
+      compiler_hints, Int32Constant(FunctionKind::kGeneratorFunction
+                                    << SharedFunctionInfo::kFunctionKindShift));
+  GotoIf(is_generator, &if_generator);
+
+  Node* is_async = Word32And(
+      compiler_hints, Int32Constant(FunctionKind::kAsyncFunction
+                                    << SharedFunctionInfo::kFunctionKindShift));
+  GotoIf(is_async, &if_async);
+
+  Node* is_class_constructor = Word32And(
+      compiler_hints, Int32Constant(FunctionKind::kClassConstructor
+                                    << SharedFunctionInfo::kFunctionKindShift));
+  GotoIf(is_class_constructor, &if_class_constructor);
+
+  if (FLAG_debug_code) {
+    // Function must be a function without a prototype.
+    CSA_ASSERT(
+        this,
+        Word32And(compiler_hints,
+                  Int32Constant((FunctionKind::kAccessorFunction |
+                                 FunctionKind::kArrowFunction |
+                                 FunctionKind::kConciseMethod)
+                                << SharedFunctionInfo::kFunctionKindShift)));
+  }
+  Goto(&if_function_without_prototype);
+
+  Bind(&if_normal);
+  {
+    map_index.Bind(SelectIntPtrConstant(is_strict,
+                                        Context::STRICT_FUNCTION_MAP_INDEX,
+                                        Context::SLOPPY_FUNCTION_MAP_INDEX));
+    Goto(&load_map);
+  }
+
+  Bind(&if_generator);
+  {
+    map_index.Bind(IntPtrConstant(Context::GENERATOR_FUNCTION_MAP_INDEX));
+    Goto(&load_map);
+  }
+
+  Bind(&if_async);
+  {
+    map_index.Bind(IntPtrConstant(Context::ASYNC_FUNCTION_MAP_INDEX));
+    Goto(&load_map);
+  }
+
+  Bind(&if_class_constructor);
+  {
+    map_index.Bind(IntPtrConstant(Context::CLASS_FUNCTION_MAP_INDEX));
+    Goto(&load_map);
+  }
+
+  Bind(&if_function_without_prototype);
+  {
+    map_index.Bind(
+        IntPtrConstant(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+    Goto(&load_map);
+  }
+
+  Bind(&load_map);
+
+  // Get the function map in the current native context and set that
+  // as the map of the allocated object.
+  Node* native_context = LoadNativeContext(context);
+  Node* map_slot_value =
+      LoadFixedArrayElement(native_context, map_index.value());
+  StoreMapNoWriteBarrier(result, map_slot_value);
+
+  // Initialize the rest of the function.
+  Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
+  StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
+                                 empty_fixed_array);
+  StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
+                                 empty_fixed_array);
+  Node* literals_cell = LoadFixedArrayElement(
+      feedback_vector, slot, 0, CodeStubAssembler::SMI_PARAMETERS);
+  {
+    // Bump the closure counter encoded in the cell's map.
+    Node* cell_map = LoadMap(literals_cell);
+    Label no_closures(this), one_closure(this), cell_done(this);
+
+    GotoIf(IsNoClosuresCellMap(cell_map), &no_closures);
+    GotoIf(IsOneClosureCellMap(cell_map), &one_closure);
+    CSA_ASSERT(this, IsManyClosuresCellMap(cell_map));
+    Goto(&cell_done);
+
+    Bind(&no_closures);
+    StoreMapNoWriteBarrier(literals_cell, Heap::kOneClosureCellMapRootIndex);
+    Goto(&cell_done);
+
+    Bind(&one_closure);
+    StoreMapNoWriteBarrier(literals_cell, Heap::kManyClosuresCellMapRootIndex);
+    Goto(&cell_done);
+
+    Bind(&cell_done);
+  }
+  StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
+                                 literals_cell);
+  StoreObjectFieldNoWriteBarrier(
+      result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
+  StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
+                                 shared_info);
+  StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
+  Handle<Code> lazy_builtin_handle(
+      isolate->builtins()->builtin(Builtins::kCompileLazy));
+  Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
+  Node* lazy_builtin_entry =
+      IntPtrAdd(BitcastTaggedToWord(lazy_builtin),
+                IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+  StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeEntryOffset,
+                                 lazy_builtin_entry,
+                                 MachineType::PointerRepresentation());
+  StoreObjectFieldNoWriteBarrier(result, JSFunction::kNextFunctionLinkOffset,
+                                 UndefinedConstant());
+
+  return result;
+}
+
+TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
+  Node* shared = Parameter(FastNewClosureDescriptor::kSharedFunctionInfo);
+  Node* context = Parameter(FastNewClosureDescriptor::kContext);
+  Node* vector = Parameter(FastNewClosureDescriptor::kVector);
+  Node* slot = Parameter(FastNewClosureDescriptor::kSlot);
+  Return(EmitFastNewClosure(shared, vector, slot, context));
+}
+
+TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
+  typedef FastNewObjectDescriptor Descriptor;
+  Node* context = Parameter(Descriptor::kContext);
+  Node* target = Parameter(Descriptor::kTarget);
+  Node* new_target = Parameter(Descriptor::kNewTarget);
+
+  Label call_runtime(this);
+
+  Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
+  Return(result);
+
+  Bind(&call_runtime);
+  TailCallRuntime(Runtime::kNewObject, context, target, new_target);
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
+                                                      Node* target,
+                                                      Node* new_target) {
+  Variable var_obj(this, MachineRepresentation::kTagged);
+  Label call_runtime(this), end(this);
+
+  Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
+  var_obj.Bind(result);
+  Goto(&end);
+
+  Bind(&call_runtime);
+  var_obj.Bind(CallRuntime(Runtime::kNewObject, context, target, new_target));
+  Goto(&end);
+
+  Bind(&end);
+  return var_obj.value();
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewObject(
+    Node* context, Node* target, Node* new_target,
+    CodeAssemblerLabel* call_runtime) {
+  CSA_ASSERT(this, HasInstanceType(target, JS_FUNCTION_TYPE));
+  CSA_ASSERT(this, IsJSReceiver(new_target));
+
+  // Verify that the new target is a JSFunction.
+  Label fast(this), end(this);
+  GotoIf(HasInstanceType(new_target, JS_FUNCTION_TYPE), &fast);
+  Goto(call_runtime);
+
+  Bind(&fast);
+
+  // Load the initial map and verify that it's in fact a map.
+  Node* initial_map =
+      LoadObjectField(new_target, JSFunction::kPrototypeOrInitialMapOffset);
+  GotoIf(TaggedIsSmi(initial_map), call_runtime);
+  GotoIf(DoesntHaveInstanceType(initial_map, MAP_TYPE), call_runtime);
+
+  // Fall back to runtime if the target differs from the new target's
+  // initial map constructor.
+  Node* new_target_constructor =
+      LoadObjectField(initial_map, Map::kConstructorOrBackPointerOffset);
+  GotoIf(WordNotEqual(target, new_target_constructor), call_runtime);
+
+  Node* instance_size_words = ChangeUint32ToWord(LoadObjectField(
+      initial_map, Map::kInstanceSizeOffset, MachineType::Uint8()));
+  Node* instance_size =
+      WordShl(instance_size_words, IntPtrConstant(kPointerSizeLog2));
+
+  Node* object = Allocate(instance_size);
+  StoreMapNoWriteBarrier(object, initial_map);
+  Node* empty_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+  StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOffset,
+                                 empty_array);
+  StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset,
+                                 empty_array);
+
+  instance_size_words = ChangeUint32ToWord(LoadObjectField(
+      initial_map, Map::kInstanceSizeOffset, MachineType::Uint8()));
+  instance_size =
+      WordShl(instance_size_words, IntPtrConstant(kPointerSizeLog2));
+
+  // Perform in-object slack tracking if requested.
+  Node* bit_field3 = LoadMapBitField3(initial_map);
+  Label slack_tracking(this), finalize(this, Label::kDeferred), done(this);
+  GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
+
+  // Initialize remaining fields.
+  {
+    Comment("no slack tracking");
+    InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
+                             instance_size, Heap::kUndefinedValueRootIndex);
+    Goto(&end);
+  }
+
+  {
+    Bind(&slack_tracking);
+
+    // Decrease generous allocation count.
+    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+    Comment("update allocation count");
+    Node* new_bit_field3 = Int32Sub(
+        bit_field3, Int32Constant(1 << Map::ConstructionCounter::kShift));
+    StoreObjectFieldNoWriteBarrier(initial_map, Map::kBitField3Offset,
+                                   new_bit_field3,
+                                   MachineRepresentation::kWord32);
+    GotoIf(IsClearWord32<Map::ConstructionCounter>(new_bit_field3), &finalize);
+
+    Node* unused_fields = LoadObjectField(
+        initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
+    Node* used_size =
+        IntPtrSub(instance_size, WordShl(ChangeUint32ToWord(unused_fields),
+                                         IntPtrConstant(kPointerSizeLog2)));
+
+    Comment("initialize filler fields (no finalize)");
+    InitializeFieldsWithRoot(object, used_size, instance_size,
+                             Heap::kOnePointerFillerMapRootIndex);
+
+    Comment("initialize undefined fields (no finalize)");
+    InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
+                             used_size, Heap::kUndefinedValueRootIndex);
+    Goto(&end);
+  }
+
+  {
+    // Finalize the instance size.
+    Bind(&finalize);
+
+    Node* unused_fields = LoadObjectField(
+        initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
+    Node* used_size =
+        IntPtrSub(instance_size, WordShl(ChangeUint32ToWord(unused_fields),
+                                         IntPtrConstant(kPointerSizeLog2)));
+
+    Comment("initialize filler fields (finalize)");
+    InitializeFieldsWithRoot(object, used_size, instance_size,
+                             Heap::kOnePointerFillerMapRootIndex);
+
+    Comment("initialize undefined fields (finalize)");
+    InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
+                             used_size, Heap::kUndefinedValueRootIndex);
+
+    CallRuntime(Runtime::kFinalizeInstanceSize, context, initial_map);
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return object;
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
+    Node* function, Node* slots, Node* context, ScopeType scope_type) {
+  slots = ChangeUint32ToWord(slots);
+
+  // TODO(ishell): Use CSA::OptimalParameterMode() here.
+  CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+  Node* min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
+  Node* length = IntPtrAdd(slots, min_context_slots);
+  Node* size = GetFixedArrayAllocationSize(length, FAST_ELEMENTS, mode);
+
+  // Create a new closure from the given function info in new space
+  Node* function_context = Allocate(size);
+
+  Heap::RootListIndex context_type;
+  switch (scope_type) {
+    case EVAL_SCOPE:
+      context_type = Heap::kEvalContextMapRootIndex;
+      break;
+    case FUNCTION_SCOPE:
+      context_type = Heap::kFunctionContextMapRootIndex;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  StoreMapNoWriteBarrier(function_context, context_type);
+  StoreObjectFieldNoWriteBarrier(function_context, Context::kLengthOffset,
+                                 SmiTag(length));
+
+  // Set up the fixed slots.
+  StoreFixedArrayElement(function_context, Context::CLOSURE_INDEX, function,
+                         SKIP_WRITE_BARRIER);
+  StoreFixedArrayElement(function_context, Context::PREVIOUS_INDEX, context,
+                         SKIP_WRITE_BARRIER);
+  StoreFixedArrayElement(function_context, Context::EXTENSION_INDEX,
+                         TheHoleConstant(), SKIP_WRITE_BARRIER);
+
+  // Copy the native context from the previous context.
+  Node* native_context = LoadNativeContext(context);
+  StoreFixedArrayElement(function_context, Context::NATIVE_CONTEXT_INDEX,
+                         native_context, SKIP_WRITE_BARRIER);
+
+  // Initialize the rest of the slots to undefined.
+  Node* undefined = UndefinedConstant();
+  BuildFastFixedArrayForEach(
+      function_context, FAST_ELEMENTS, min_context_slots, length,
+      [this, undefined](Node* context, Node* offset) {
+        StoreNoWriteBarrier(MachineRepresentation::kTagged, context, offset,
+                            undefined);
+      },
+      mode);
+
+  return function_context;
+}
+
+// static
+int ConstructorBuiltinsAssembler::MaximumFunctionContextSlots() {
+  return FLAG_test_small_max_function_context_stub_size ? kSmallMaximumSlots
+                                                        : kMaximumSlots;
+}
+
+TF_BUILTIN(FastNewFunctionContextEval, ConstructorBuiltinsAssembler) {
+  Node* function = Parameter(FastNewFunctionContextDescriptor::kFunction);
+  Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
+  Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
+  Return(EmitFastNewFunctionContext(function, slots, context,
+                                    ScopeType::EVAL_SCOPE));
+}
+
+TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
+  Node* function = Parameter(FastNewFunctionContextDescriptor::kFunction);
+  Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
+  Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
+  Return(EmitFastNewFunctionContext(function, slots, context,
+                                    ScopeType::FUNCTION_SCOPE));
+}
+
+Handle<Code> Builtins::NewFunctionContext(ScopeType scope_type) {
+  switch (scope_type) {
+    case ScopeType::EVAL_SCOPE:
+      return FastNewFunctionContextEval();
+    case ScopeType::FUNCTION_SCOPE:
+      return FastNewFunctionContextFunction();
+    default:
+      UNREACHABLE();
+  }
+  return Handle<Code>::null();
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
+                                                        Node* literal_index,
+                                                        Node* pattern,
+                                                        Node* flags,
+                                                        Node* context) {
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+  typedef compiler::Node Node;
+
+  Label call_runtime(this, Label::kDeferred), end(this);
+
+  Variable result(this, MachineRepresentation::kTagged);
+
+  Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
+  Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
+  Node* boilerplate = LoadFixedArrayElement(feedback_vector, literal_index, 0,
+                                            CodeStubAssembler::SMI_PARAMETERS);
+  GotoIf(IsUndefined(boilerplate), &call_runtime);
+
+  {
+    int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+    Node* copy = Allocate(size);
+    for (int offset = 0; offset < size; offset += kPointerSize) {
+      Node* value = LoadObjectField(boilerplate, offset);
+      StoreObjectFieldNoWriteBarrier(copy, offset, value);
+    }
+    result.Bind(copy);
+    Goto(&end);
+  }
+
+  Bind(&call_runtime);
+  {
+    result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context, closure,
+                            literal_index, pattern, flags));
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return result.value();
+}
+
+TF_BUILTIN(FastCloneRegExp, ConstructorBuiltinsAssembler) {
+  Node* closure = Parameter(FastCloneRegExpDescriptor::kClosure);
+  Node* literal_index = Parameter(FastCloneRegExpDescriptor::kLiteralIndex);
+  Node* pattern = Parameter(FastCloneRegExpDescriptor::kPattern);
+  Node* flags = Parameter(FastCloneRegExpDescriptor::kFlags);
+  Node* context = Parameter(FastCloneRegExpDescriptor::kContext);
+
+  Return(EmitFastCloneRegExp(closure, literal_index, pattern, flags, context));
+}
+
+Node* ConstructorBuiltinsAssembler::NonEmptyShallowClone(
+    Node* boilerplate, Node* boilerplate_map, Node* boilerplate_elements,
+    Node* allocation_site, Node* capacity, ElementsKind kind) {
+  typedef CodeStubAssembler::ParameterMode ParameterMode;
+
+  ParameterMode param_mode = OptimalParameterMode();
+
+  Node* length = LoadJSArrayLength(boilerplate);
+  capacity = TaggedToParameter(capacity, param_mode);
+
+  Node *array, *elements;
+  std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+      kind, boilerplate_map, length, allocation_site, capacity, param_mode);
+
+  Comment("copy elements header");
+  // Header consists of map and length.
+  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
+  StoreMap(elements, LoadMap(boilerplate_elements));
+  {
+    int offset = FixedArrayBase::kLengthOffset;
+    StoreObjectFieldNoWriteBarrier(
+        elements, offset, LoadObjectField(boilerplate_elements, offset));
+  }
+
+  length = TaggedToParameter(length, param_mode);
+
+  Comment("copy boilerplate elements");
+  CopyFixedArrayElements(kind, boilerplate_elements, elements, length,
+                         SKIP_WRITE_BARRIER, param_mode);
+  IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1);
+
+  return array;
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
+    Node* closure, Node* literal_index, Node* context,
+    CodeAssemblerLabel* call_runtime, AllocationSiteMode allocation_site_mode) {
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+  typedef compiler::Node Node;
+
+  Label zero_capacity(this), cow_elements(this), fast_elements(this),
+      return_result(this);
+  Variable result(this, MachineRepresentation::kTagged);
+
+  Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
+  Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
+  Node* allocation_site = LoadFixedArrayElement(
+      feedback_vector, literal_index, 0, CodeStubAssembler::SMI_PARAMETERS);
+
+  GotoIf(IsUndefined(allocation_site), call_runtime);
+  allocation_site = LoadFixedArrayElement(feedback_vector, literal_index, 0,
+                                          CodeStubAssembler::SMI_PARAMETERS);
+
+  Node* boilerplate =
+      LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+  Node* boilerplate_map = LoadMap(boilerplate);
+  Node* boilerplate_elements = LoadElements(boilerplate);
+  Node* capacity = LoadFixedArrayBaseLength(boilerplate_elements);
+  allocation_site =
+      allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
+
+  Node* zero = SmiConstant(Smi::kZero);
+  GotoIf(SmiEqual(capacity, zero), &zero_capacity);
+
+  Node* elements_map = LoadMap(boilerplate_elements);
+  GotoIf(IsFixedCOWArrayMap(elements_map), &cow_elements);
+
+  GotoIf(IsFixedArrayMap(elements_map), &fast_elements);
+  {
+    Comment("fast double elements path");
+    if (FLAG_debug_code) {
+      Label correct_elements_map(this), abort(this, Label::kDeferred);
+      Branch(IsFixedDoubleArrayMap(elements_map), &correct_elements_map,
+             &abort);
+
+      Bind(&abort);
+      {
+        Node* abort_id = SmiConstant(
+            Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
+        CallRuntime(Runtime::kAbort, context, abort_id);
+        result.Bind(UndefinedConstant());
+        Goto(&return_result);
+      }
+      Bind(&correct_elements_map);
+    }
+
+    Node* array =
+        NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
+                             allocation_site, capacity, FAST_DOUBLE_ELEMENTS);
+    result.Bind(array);
+    Goto(&return_result);
+  }
+
+  Bind(&fast_elements);
+  {
+    Comment("fast elements path");
+    Node* array =
+        NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
+                             allocation_site, capacity, FAST_ELEMENTS);
+    result.Bind(array);
+    Goto(&return_result);
+  }
+
+  Variable length(this, MachineRepresentation::kTagged),
+      elements(this, MachineRepresentation::kTagged);
+  Label allocate_without_elements(this);
+
+  Bind(&cow_elements);
+  {
+    Comment("fixed cow path");
+    length.Bind(LoadJSArrayLength(boilerplate));
+    elements.Bind(boilerplate_elements);
+
+    Goto(&allocate_without_elements);
+  }
+
+  Bind(&zero_capacity);
+  {
+    Comment("zero capacity path");
+    length.Bind(zero);
+    elements.Bind(LoadRoot(Heap::kEmptyFixedArrayRootIndex));
+
+    Goto(&allocate_without_elements);
+  }
+
+  Bind(&allocate_without_elements);
+  {
+    Node* array = AllocateUninitializedJSArrayWithoutElements(
+        FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
+    StoreObjectField(array, JSObject::kElementsOffset, elements.value());
+    result.Bind(array);
+    Goto(&return_result);
+  }
+
+  Bind(&return_result);
+  return result.value();
+}
+
+void ConstructorBuiltinsAssembler::CreateFastCloneShallowArrayBuiltin(
+    AllocationSiteMode allocation_site_mode) {
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Label Label;
+
+  Node* closure = Parameter(FastCloneShallowArrayDescriptor::kClosure);
+  Node* literal_index =
+      Parameter(FastCloneShallowArrayDescriptor::kLiteralIndex);
+  Node* constant_elements =
+      Parameter(FastCloneShallowArrayDescriptor::kConstantElements);
+  Node* context = Parameter(FastCloneShallowArrayDescriptor::kContext);
+  Label call_runtime(this, Label::kDeferred);
+  Return(EmitFastCloneShallowArray(closure, literal_index, context,
+                                   &call_runtime, allocation_site_mode));
+
+  Bind(&call_runtime);
+  {
+    Comment("call runtime");
+    Node* flags =
+        SmiConstant(Smi::FromInt(ArrayLiteral::kShallowElements |
+                                 (allocation_site_mode == TRACK_ALLOCATION_SITE
+                                      ? 0
+                                      : ArrayLiteral::kDisableMementos)));
+    Return(CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
+                       literal_index, constant_elements, flags));
+  }
+}
+
+TF_BUILTIN(FastCloneShallowArrayTrack, ConstructorBuiltinsAssembler) {
+  CreateFastCloneShallowArrayBuiltin(TRACK_ALLOCATION_SITE);
+}
+
+TF_BUILTIN(FastCloneShallowArrayDontTrack, ConstructorBuiltinsAssembler) {
+  CreateFastCloneShallowArrayBuiltin(DONT_TRACK_ALLOCATION_SITE);
+}
+
+Handle<Code> Builtins::NewCloneShallowArray(
+    AllocationSiteMode allocation_mode) {
+  switch (allocation_mode) {
+    case TRACK_ALLOCATION_SITE:
+      return FastCloneShallowArrayTrack();
+    case DONT_TRACK_ALLOCATION_SITE:
+      return FastCloneShallowArrayDontTrack();
+    default:
+      UNREACHABLE();
+  }
+  return Handle<Code>::null();
+}
+
+// static
+int ConstructorBuiltinsAssembler::FastCloneShallowObjectPropertiesCount(
+    int literal_length) {
+  // This heuristic of setting empty literals to have
+  // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
+  // runtime.
+  // TODO(verwaest): Unify this with the heuristic in the runtime.
+  return literal_length == 0
+             ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
+             : literal_length;
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
+    CodeAssemblerLabel* call_runtime, Node* closure, Node* literals_index,
+    Node* properties_count) {
+  Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
+  Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
+  Node* allocation_site = LoadFixedArrayElement(
+      feedback_vector, literals_index, 0, CodeStubAssembler::SMI_PARAMETERS);
+  GotoIf(IsUndefined(allocation_site), call_runtime);
+
+  // Calculate the object and allocation size based on the properties count.
+  Node* object_size = IntPtrAdd(WordShl(properties_count, kPointerSizeLog2),
+                                IntPtrConstant(JSObject::kHeaderSize));
+  Node* allocation_size = object_size;
+  if (FLAG_allocation_site_pretenuring) {
+    allocation_size =
+        IntPtrAdd(object_size, IntPtrConstant(AllocationMemento::kSize));
+  }
+  Node* boilerplate =
+      LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+  Node* boilerplate_map = LoadMap(boilerplate);
+  Node* instance_size = LoadMapInstanceSize(boilerplate_map);
+  Node* size_in_words = WordShr(object_size, kPointerSizeLog2);
+  GotoIfNot(WordEqual(instance_size, size_in_words), call_runtime);
+
+  Node* copy = Allocate(allocation_size);
+
+  // Copy boilerplate elements.
+  Variable offset(this, MachineType::PointerRepresentation());
+  offset.Bind(IntPtrConstant(-kHeapObjectTag));
+  Node* end_offset = IntPtrAdd(object_size, offset.value());
+  Label loop_body(this, &offset), loop_check(this, &offset);
+  // We should always have an object size greater than zero.
+  Goto(&loop_body);
+  Bind(&loop_body);
+  {
+    // The Allocate above guarantees that the copy lies in new space. This
+    // allows us to skip write barriers. This is necessary since we may also be
+    // copying unboxed doubles.
+    Node* field = Load(MachineType::IntPtr(), boilerplate, offset.value());
+    StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
+                        offset.value(), field);
+    Goto(&loop_check);
+  }
+  Bind(&loop_check);
+  {
+    offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
+    GotoIfNot(IntPtrGreaterThanOrEqual(offset.value(), end_offset), &loop_body);
+  }
+
+  if (FLAG_allocation_site_pretenuring) {
+    Node* memento = InnerAllocate(copy, object_size);
+    StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex);
+    StoreObjectFieldNoWriteBarrier(
+        memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
+    Node* memento_create_count = LoadObjectField(
+        allocation_site, AllocationSite::kPretenureCreateCountOffset);
+    memento_create_count =
+        SmiAdd(memento_create_count, SmiConstant(Smi::FromInt(1)));
+    StoreObjectFieldNoWriteBarrier(allocation_site,
+                                   AllocationSite::kPretenureCreateCountOffset,
+                                   memento_create_count);
+  }
+
+  // TODO(verwaest): Allocate and fill in double boxes.
+  return copy;
+}
+
+void ConstructorBuiltinsAssembler::CreateFastCloneShallowObjectBuiltin(
+    int properties_count) {
+  DCHECK_GE(properties_count, 0);
+  DCHECK_LE(properties_count, kMaximumClonedShallowObjectProperties);
+  Label call_runtime(this);
+  Node* closure = Parameter(0);
+  Node* literals_index = Parameter(1);
+
+  Node* properties_count_node =
+      IntPtrConstant(FastCloneShallowObjectPropertiesCount(properties_count));
+  Node* copy = EmitFastCloneShallowObject(
+      &call_runtime, closure, literals_index, properties_count_node);
+  Return(copy);
+
+  Bind(&call_runtime);
+  Node* constant_properties = Parameter(2);
+  Node* flags = Parameter(3);
+  Node* context = Parameter(4);
+  TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
+                  literals_index, constant_properties, flags);
+}
+
+#define SHALLOW_OBJECT_BUILTIN(props)                                       \
+  TF_BUILTIN(FastCloneShallowObject##props, ConstructorBuiltinsAssembler) { \
+    CreateFastCloneShallowObjectBuiltin(props);                             \
+  }
+
+SHALLOW_OBJECT_BUILTIN(0);
+SHALLOW_OBJECT_BUILTIN(1);
+SHALLOW_OBJECT_BUILTIN(2);
+SHALLOW_OBJECT_BUILTIN(3);
+SHALLOW_OBJECT_BUILTIN(4);
+SHALLOW_OBJECT_BUILTIN(5);
+SHALLOW_OBJECT_BUILTIN(6);
+
+Handle<Code> Builtins::NewCloneShallowObject(int length) {
+  switch (length) {
+    case 0:
+      return FastCloneShallowObject0();
+    case 1:
+      return FastCloneShallowObject1();
+    case 2:
+      return FastCloneShallowObject2();
+    case 3:
+      return FastCloneShallowObject3();
+    case 4:
+      return FastCloneShallowObject4();
+    case 5:
+      return FastCloneShallowObject5();
+    case 6:
+      return FastCloneShallowObject6();
+    default:
+      UNREACHABLE();
+  }
+  return Handle<Code>::null();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-constructor.h b/src/builtins/builtins-constructor.h
new file mode 100644
index 0000000..68629a7
--- /dev/null
+++ b/src/builtins/builtins-constructor.h
@@ -0,0 +1,68 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+
+class ConstructorBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  explicit ConstructorBuiltinsAssembler(CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+
+  Node* EmitFastNewClosure(Node* shared_info, Node* feedback_vector, Node* slot,
+                           Node* context);
+  Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context,
+                                   ScopeType scope_type);
+  static int MaximumFunctionContextSlots();
+
+  Node* EmitFastCloneRegExp(Node* closure, Node* literal_index, Node* pattern,
+                            Node* flags, Node* context);
+  Node* EmitFastCloneShallowArray(Node* closure, Node* literal_index,
+                                  Node* context,
+                                  CodeAssemblerLabel* call_runtime,
+                                  AllocationSiteMode allocation_site_mode);
+
+  // Maximum number of elements in copied array (chosen so that even an array
+  // backed by a double backing store will fit into new-space).
+  static const int kMaximumClonedShallowArrayElements =
+      JSArray::kInitialMaxFastElementArray * kPointerSize / kDoubleSize;
+
+  void CreateFastCloneShallowArrayBuiltin(
+      AllocationSiteMode allocation_site_mode);
+
+  // Maximum number of properties in copied objects.
+  static const int kMaximumClonedShallowObjectProperties = 6;
+  static int FastCloneShallowObjectPropertiesCount(int literal_length);
+  Node* EmitFastCloneShallowObject(CodeAssemblerLabel* call_runtime,
+                                   Node* closure, Node* literals_index,
+                                   Node* properties_count);
+  void CreateFastCloneShallowObjectBuiltin(int properties_count);
+
+  Node* EmitFastNewObject(Node* context, Node* target, Node* new_target);
+
+  Node* EmitFastNewObject(Node* context, Node* target, Node* new_target,
+                          CodeAssemblerLabel* call_runtime);
+
+ private:
+  static const int kMaximumSlots = 0x8000;
+  static const int kSmallMaximumSlots = 10;
+
+  Node* NonEmptyShallowClone(Node* boilerplate, Node* boilerplate_map,
+                             Node* boilerplate_elements, Node* allocation_site,
+                             Node* capacity, ElementsKind kind);
+
+  // FastNewFunctionContext can only allocate closures which fit in the
+  // new space.
+  STATIC_ASSERT(((kMaximumSlots + Context::MIN_CONTEXT_SLOTS) * kPointerSize +
+                 FixedArray::kHeaderSize) < kMaxRegularHeapObjectSize);
+};
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-conversion.cc b/src/builtins/builtins-conversion.cc
index 0eaf79c..2aff1c5 100644
--- a/src/builtins/builtins-conversion.cc
+++ b/src/builtins/builtins-conversion.cc
@@ -2,9 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -23,6 +25,7 @@
 }
 
 namespace {
+
 // ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
 void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
                                       ToPrimitiveHint hint) {
@@ -51,7 +54,8 @@
   {
     // Invoke the {exotic_to_prim} method on the {input} with a string
     // representation of the {hint}.
-    Callable callable = CodeFactory::Call(assembler->isolate());
+    Callable callable = CodeFactory::Call(
+        assembler->isolate(), ConvertReceiverMode::kNotNullOrUndefined);
     Node* hint_string = assembler->HeapConstant(
         assembler->factory()->ToPrimitiveHintString(hint));
     Node* result = assembler->CallJS(callable, context, exotic_to_prim, input,
@@ -92,113 +96,119 @@
     assembler->TailCallStub(callable, context, input);
   }
 }
-}  // anonymous namespace
+
+}  // namespace
 
 void Builtins::Generate_NonPrimitiveToPrimitive_Default(
-    CodeStubAssembler* assembler) {
-  Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kDefault);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_NonPrimitiveToPrimitive(&assembler, ToPrimitiveHint::kDefault);
 }
 
 void Builtins::Generate_NonPrimitiveToPrimitive_Number(
-    CodeStubAssembler* assembler) {
-  Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kNumber);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_NonPrimitiveToPrimitive(&assembler, ToPrimitiveHint::kNumber);
 }
 
 void Builtins::Generate_NonPrimitiveToPrimitive_String(
-    CodeStubAssembler* assembler) {
-  Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kString);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_NonPrimitiveToPrimitive(&assembler, ToPrimitiveHint::kString);
 }
 
-void Builtins::Generate_StringToNumber(CodeStubAssembler* assembler) {
+void Builtins::Generate_StringToNumber(compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef TypeConversionDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* input = assembler->Parameter(Descriptor::kArgument);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* input = assembler.Parameter(Descriptor::kArgument);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  assembler->Return(assembler->StringToNumber(context, input));
+  assembler.Return(assembler.StringToNumber(context, input));
 }
 
-void Builtins::Generate_ToName(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToName(compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef TypeConversionDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* input = assembler->Parameter(Descriptor::kArgument);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* input = assembler.Parameter(Descriptor::kArgument);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  assembler->Return(assembler->ToName(context, input));
+  assembler.Return(assembler.ToName(context, input));
 }
 
 // static
-void Builtins::Generate_NonNumberToNumber(CodeStubAssembler* assembler) {
+void Builtins::Generate_NonNumberToNumber(compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef TypeConversionDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* input = assembler->Parameter(Descriptor::kArgument);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* input = assembler.Parameter(Descriptor::kArgument);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  assembler->Return(assembler->NonNumberToNumber(context, input));
+  assembler.Return(assembler.NonNumberToNumber(context, input));
 }
 
 // ES6 section 7.1.3 ToNumber ( argument )
-void Builtins::Generate_ToNumber(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToNumber(compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef TypeConversionDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* input = assembler->Parameter(Descriptor::kArgument);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* input = assembler.Parameter(Descriptor::kArgument);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  assembler->Return(assembler->ToNumber(context, input));
+  assembler.Return(assembler.ToNumber(context, input));
 }
 
-void Builtins::Generate_ToString(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToString(compiler::CodeAssemblerState* state) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
   typedef TypeConversionDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* input = assembler->Parameter(Descriptor::kArgument);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* input = assembler.Parameter(Descriptor::kArgument);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  Label is_number(assembler);
-  Label runtime(assembler);
+  Label is_number(&assembler);
+  Label runtime(&assembler);
 
-  assembler->GotoIf(assembler->TaggedIsSmi(input), &is_number);
+  assembler.GotoIf(assembler.TaggedIsSmi(input), &is_number);
 
-  Node* input_map = assembler->LoadMap(input);
-  Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
+  Node* input_map = assembler.LoadMap(input);
+  Node* input_instance_type = assembler.LoadMapInstanceType(input_map);
 
-  Label not_string(assembler);
-  assembler->GotoUnless(assembler->IsStringInstanceType(input_instance_type),
-                        &not_string);
-  assembler->Return(input);
+  Label not_string(&assembler);
+  assembler.GotoIfNot(assembler.IsStringInstanceType(input_instance_type),
+                      &not_string);
+  assembler.Return(input);
 
-  Label not_heap_number(assembler);
+  Label not_heap_number(&assembler);
 
-  assembler->Bind(&not_string);
+  assembler.Bind(&not_string);
   {
-    assembler->GotoUnless(
-        assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
-        &not_heap_number);
-    assembler->Goto(&is_number);
+    assembler.GotoIfNot(assembler.IsHeapNumberMap(input_map), &not_heap_number);
+    assembler.Goto(&is_number);
   }
 
-  assembler->Bind(&is_number);
-  { assembler->Return(assembler->NumberToString(context, input)); }
+  assembler.Bind(&is_number);
+  { assembler.Return(assembler.NumberToString(context, input)); }
 
-  assembler->Bind(&not_heap_number);
+  assembler.Bind(&not_heap_number);
   {
-    assembler->GotoIf(
-        assembler->Word32NotEqual(input_instance_type,
-                                  assembler->Int32Constant(ODDBALL_TYPE)),
+    assembler.GotoIf(
+        assembler.Word32NotEqual(input_instance_type,
+                                 assembler.Int32Constant(ODDBALL_TYPE)),
         &runtime);
-    assembler->Return(
-        assembler->LoadObjectField(input, Oddball::kToStringOffset));
+    assembler.Return(
+        assembler.LoadObjectField(input, Oddball::kToStringOffset));
   }
 
-  assembler->Bind(&runtime);
+  assembler.Bind(&runtime);
   {
-    assembler->Return(
-        assembler->CallRuntime(Runtime::kToString, context, input));
+    assembler.Return(assembler.CallRuntime(Runtime::kToString, context, input));
   }
 }
 
@@ -214,6 +224,7 @@
 }
 
 namespace {
+
 // 7.1.1.1 OrdinaryToPrimitive ( O, hint )
 void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
                                   OrdinaryToPrimitiveHint hint) {
@@ -256,7 +267,8 @@
     assembler->Bind(&if_methodiscallable);
     {
       // Call the {method} on the {input}.
-      Callable callable = CodeFactory::Call(assembler->isolate());
+      Callable callable = CodeFactory::Call(
+          assembler->isolate(), ConvertReceiverMode::kNotNullOrUndefined);
       Node* result = assembler->CallJS(callable, context, method, input);
       var_result.Bind(result);
 
@@ -280,197 +292,215 @@
   assembler->Bind(&return_result);
   assembler->Return(var_result.value());
 }
-}  // anonymous namespace
+
+}  // namespace
 
 void Builtins::Generate_OrdinaryToPrimitive_Number(
-    CodeStubAssembler* assembler) {
-  Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kNumber);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_OrdinaryToPrimitive(&assembler, OrdinaryToPrimitiveHint::kNumber);
 }
 
 void Builtins::Generate_OrdinaryToPrimitive_String(
-    CodeStubAssembler* assembler) {
-  Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kString);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_OrdinaryToPrimitive(&assembler, OrdinaryToPrimitiveHint::kString);
 }
 
 // ES6 section 7.1.2 ToBoolean ( argument )
-void Builtins::Generate_ToBoolean(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToBoolean(compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
   typedef TypeConversionDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* value = assembler->Parameter(Descriptor::kArgument);
+  Node* value = assembler.Parameter(Descriptor::kArgument);
 
-  Label return_true(assembler), return_false(assembler);
-  assembler->BranchIfToBooleanIsTrue(value, &return_true, &return_false);
+  Label return_true(&assembler), return_false(&assembler);
+  assembler.BranchIfToBooleanIsTrue(value, &return_true, &return_false);
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  assembler.Bind(&return_true);
+  assembler.Return(assembler.BooleanConstant(true));
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  assembler.Bind(&return_false);
+  assembler.Return(assembler.BooleanConstant(false));
 }
 
-void Builtins::Generate_ToLength(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToLength(compiler::CodeAssemblerState* state) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Variable Variable;
+  CodeStubAssembler assembler(state);
 
-  Node* context = assembler->Parameter(1);
+  Node* context = assembler.Parameter(1);
 
   // We might need to loop once for ToNumber conversion.
-  Variable var_len(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_len);
-  var_len.Bind(assembler->Parameter(0));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Variable var_len(&assembler, MachineRepresentation::kTagged);
+  Label loop(&assembler, &var_len);
+  var_len.Bind(assembler.Parameter(0));
+  assembler.Goto(&loop);
+  assembler.Bind(&loop);
   {
     // Shared entry points.
-    Label return_len(assembler),
-        return_two53minus1(assembler, Label::kDeferred),
-        return_zero(assembler, Label::kDeferred);
+    Label return_len(&assembler),
+        return_two53minus1(&assembler, Label::kDeferred),
+        return_zero(&assembler, Label::kDeferred);
 
     // Load the current {len} value.
     Node* len = var_len.value();
 
     // Check if {len} is a positive Smi.
-    assembler->GotoIf(assembler->WordIsPositiveSmi(len), &return_len);
+    assembler.GotoIf(assembler.TaggedIsPositiveSmi(len), &return_len);
 
     // Check if {len} is a (negative) Smi.
-    assembler->GotoIf(assembler->TaggedIsSmi(len), &return_zero);
+    assembler.GotoIf(assembler.TaggedIsSmi(len), &return_zero);
 
     // Check if {len} is a HeapNumber.
-    Label if_lenisheapnumber(assembler),
-        if_lenisnotheapnumber(assembler, Label::kDeferred);
-    assembler->Branch(assembler->IsHeapNumberMap(assembler->LoadMap(len)),
-                      &if_lenisheapnumber, &if_lenisnotheapnumber);
+    Label if_lenisheapnumber(&assembler),
+        if_lenisnotheapnumber(&assembler, Label::kDeferred);
+    assembler.Branch(assembler.IsHeapNumberMap(assembler.LoadMap(len)),
+                     &if_lenisheapnumber, &if_lenisnotheapnumber);
 
-    assembler->Bind(&if_lenisheapnumber);
+    assembler.Bind(&if_lenisheapnumber);
     {
       // Load the floating-point value of {len}.
-      Node* len_value = assembler->LoadHeapNumberValue(len);
+      Node* len_value = assembler.LoadHeapNumberValue(len);
 
       // Check if {len} is not greater than zero.
-      assembler->GotoUnless(assembler->Float64GreaterThan(
-                                len_value, assembler->Float64Constant(0.0)),
-                            &return_zero);
+      assembler.GotoIfNot(assembler.Float64GreaterThan(
+                              len_value, assembler.Float64Constant(0.0)),
+                          &return_zero);
 
       // Check if {len} is greater than or equal to 2^53-1.
-      assembler->GotoIf(
-          assembler->Float64GreaterThanOrEqual(
-              len_value, assembler->Float64Constant(kMaxSafeInteger)),
+      assembler.GotoIf(
+          assembler.Float64GreaterThanOrEqual(
+              len_value, assembler.Float64Constant(kMaxSafeInteger)),
           &return_two53minus1);
 
       // Round the {len} towards -Infinity.
-      Node* value = assembler->Float64Floor(len_value);
-      Node* result = assembler->ChangeFloat64ToTagged(value);
-      assembler->Return(result);
+      Node* value = assembler.Float64Floor(len_value);
+      Node* result = assembler.ChangeFloat64ToTagged(value);
+      assembler.Return(result);
     }
 
-    assembler->Bind(&if_lenisnotheapnumber);
+    assembler.Bind(&if_lenisnotheapnumber);
     {
       // Need to convert {len} to a Number first.
-      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
-      var_len.Bind(assembler->CallStub(callable, context, len));
-      assembler->Goto(&loop);
+      Callable callable = CodeFactory::NonNumberToNumber(assembler.isolate());
+      var_len.Bind(assembler.CallStub(callable, context, len));
+      assembler.Goto(&loop);
     }
 
-    assembler->Bind(&return_len);
-    assembler->Return(var_len.value());
+    assembler.Bind(&return_len);
+    assembler.Return(var_len.value());
 
-    assembler->Bind(&return_two53minus1);
-    assembler->Return(assembler->NumberConstant(kMaxSafeInteger));
+    assembler.Bind(&return_two53minus1);
+    assembler.Return(assembler.NumberConstant(kMaxSafeInteger));
 
-    assembler->Bind(&return_zero);
-    assembler->Return(assembler->SmiConstant(Smi::kZero));
+    assembler.Bind(&return_zero);
+    assembler.Return(assembler.SmiConstant(Smi::kZero));
   }
 }
 
-void Builtins::Generate_ToInteger(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToInteger(compiler::CodeAssemblerState* state) {
   typedef TypeConversionDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  compiler::Node* input = assembler->Parameter(Descriptor::kArgument);
-  compiler::Node* context = assembler->Parameter(Descriptor::kContext);
+  compiler::Node* input = assembler.Parameter(Descriptor::kArgument);
+  compiler::Node* context = assembler.Parameter(Descriptor::kContext);
 
-  assembler->Return(assembler->ToInteger(context, input));
+  assembler.Return(assembler.ToInteger(context, input));
 }
 
 // ES6 section 7.1.13 ToObject (argument)
-void Builtins::Generate_ToObject(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToObject(compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
   typedef CodeStubAssembler::Variable Variable;
   typedef TypeConversionDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Label if_number(assembler, Label::kDeferred), if_notsmi(assembler),
-      if_jsreceiver(assembler), if_noconstructor(assembler, Label::kDeferred),
-      if_wrapjsvalue(assembler);
+  Label if_number(&assembler, Label::kDeferred), if_notsmi(&assembler),
+      if_jsreceiver(&assembler), if_noconstructor(&assembler, Label::kDeferred),
+      if_wrapjsvalue(&assembler);
 
-  Node* object = assembler->Parameter(Descriptor::kArgument);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* object = assembler.Parameter(Descriptor::kArgument);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  Variable constructor_function_index_var(assembler,
+  Variable constructor_function_index_var(&assembler,
                                           MachineType::PointerRepresentation());
 
-  assembler->Branch(assembler->TaggedIsSmi(object), &if_number, &if_notsmi);
+  assembler.Branch(assembler.TaggedIsSmi(object), &if_number, &if_notsmi);
 
-  assembler->Bind(&if_notsmi);
-  Node* map = assembler->LoadMap(object);
+  assembler.Bind(&if_notsmi);
+  Node* map = assembler.LoadMap(object);
 
-  assembler->GotoIf(assembler->IsHeapNumberMap(map), &if_number);
+  assembler.GotoIf(assembler.IsHeapNumberMap(map), &if_number);
 
-  Node* instance_type = assembler->LoadMapInstanceType(map);
-  assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
-                    &if_jsreceiver);
+  Node* instance_type = assembler.LoadMapInstanceType(map);
+  assembler.GotoIf(assembler.IsJSReceiverInstanceType(instance_type),
+                   &if_jsreceiver);
 
   Node* constructor_function_index =
-      assembler->LoadMapConstructorFunctionIndex(map);
-  assembler->GotoIf(assembler->WordEqual(constructor_function_index,
-                                         assembler->IntPtrConstant(
-                                             Map::kNoConstructorFunctionIndex)),
-                    &if_noconstructor);
+      assembler.LoadMapConstructorFunctionIndex(map);
+  assembler.GotoIf(assembler.WordEqual(constructor_function_index,
+                                       assembler.IntPtrConstant(
+                                           Map::kNoConstructorFunctionIndex)),
+                   &if_noconstructor);
   constructor_function_index_var.Bind(constructor_function_index);
-  assembler->Goto(&if_wrapjsvalue);
+  assembler.Goto(&if_wrapjsvalue);
 
-  assembler->Bind(&if_number);
+  assembler.Bind(&if_number);
   constructor_function_index_var.Bind(
-      assembler->IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
-  assembler->Goto(&if_wrapjsvalue);
+      assembler.IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
+  assembler.Goto(&if_wrapjsvalue);
 
-  assembler->Bind(&if_wrapjsvalue);
-  Node* native_context = assembler->LoadNativeContext(context);
-  Node* constructor = assembler->LoadFixedArrayElement(
-      native_context, constructor_function_index_var.value(), 0,
-      CodeStubAssembler::INTPTR_PARAMETERS);
-  Node* initial_map = assembler->LoadObjectField(
+  assembler.Bind(&if_wrapjsvalue);
+  Node* native_context = assembler.LoadNativeContext(context);
+  Node* constructor = assembler.LoadFixedArrayElement(
+      native_context, constructor_function_index_var.value());
+  Node* initial_map = assembler.LoadObjectField(
       constructor, JSFunction::kPrototypeOrInitialMapOffset);
-  Node* js_value = assembler->Allocate(JSValue::kSize);
-  assembler->StoreMapNoWriteBarrier(js_value, initial_map);
-  assembler->StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
-                                  Heap::kEmptyFixedArrayRootIndex);
-  assembler->StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
-                                  Heap::kEmptyFixedArrayRootIndex);
-  assembler->StoreObjectField(js_value, JSValue::kValueOffset, object);
-  assembler->Return(js_value);
+  Node* js_value = assembler.Allocate(JSValue::kSize);
+  assembler.StoreMapNoWriteBarrier(js_value, initial_map);
+  assembler.StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
+                                 Heap::kEmptyFixedArrayRootIndex);
+  assembler.StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
+                                 Heap::kEmptyFixedArrayRootIndex);
+  assembler.StoreObjectField(js_value, JSValue::kValueOffset, object);
+  assembler.Return(js_value);
 
-  assembler->Bind(&if_noconstructor);
-  assembler->TailCallRuntime(
+  assembler.Bind(&if_noconstructor);
+  assembler.TailCallRuntime(
       Runtime::kThrowUndefinedOrNullToObject, context,
-      assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
-          "ToObject", TENURED)));
+      assembler.HeapConstant(
+          assembler.factory()->NewStringFromAsciiChecked("ToObject", TENURED)));
 
-  assembler->Bind(&if_jsreceiver);
-  assembler->Return(object);
+  assembler.Bind(&if_jsreceiver);
+  assembler.Return(object);
+}
+
+// Deprecated ES5 [[Class]] internal property (used to implement %_ClassOf).
+void Builtins::Generate_ClassOf(compiler::CodeAssemblerState* state) {
+  typedef compiler::Node Node;
+  typedef TypeofDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
+
+  Node* object = assembler.Parameter(Descriptor::kObject);
+
+  assembler.Return(assembler.ClassOf(object));
 }
 
 // ES6 section 12.5.5 typeof operator
-void Builtins::Generate_Typeof(CodeStubAssembler* assembler) {
+void Builtins::Generate_Typeof(compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
   typedef TypeofDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* object = assembler->Parameter(Descriptor::kObject);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* object = assembler.Parameter(Descriptor::kObject);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  assembler->Return(assembler->Typeof(object, context));
+  assembler.Return(assembler.Typeof(object, context));
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-dataview.cc b/src/builtins/builtins-dataview.cc
index 45a5fd9..131749c 100644
--- a/src/builtins/builtins-dataview.cc
+++ b/src/builtins/builtins-dataview.cc
@@ -2,8 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -42,8 +47,7 @@
   Handle<Object> offset;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, offset,
-      Object::ToIndex(isolate, byte_offset,
-                      MessageTemplate::kInvalidDataViewOffset));
+      Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
 
   // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
   // We currently violate the specification at this point.
@@ -55,8 +59,7 @@
   // 7. If offset > bufferByteLength, throw a RangeError exception
   if (offset->Number() > buffer_byte_length) {
     THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate,
-        NewRangeError(MessageTemplate::kInvalidDataViewOffset, offset));
+        isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
   }
 
   Handle<Object> view_byte_length;
diff --git a/src/builtins/builtins-date.cc b/src/builtins/builtins-date.cc
index 949620b..1bc1dfa 100644
--- a/src/builtins/builtins-date.cc
+++ b/src/builtins/builtins-date.cc
@@ -2,10 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/conversions.h"
+#include "src/counters.h"
 #include "src/dateparser-inl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -209,7 +213,7 @@
   if (argc == 0) {
     time_val = JSDate::CurrentTimeValue(isolate);
   } else if (argc == 1) {
-    Handle<Object> value = args.at<Object>(1);
+    Handle<Object> value = args.at(1);
     if (value->IsJSDate()) {
       time_val = Handle<JSDate>::cast(value)->value()->Number();
     } else {
@@ -226,37 +230,37 @@
   } else {
     Handle<Object> year_object;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
-                                       Object::ToNumber(args.at<Object>(1)));
+                                       Object::ToNumber(args.at(1)));
     Handle<Object> month_object;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
-                                       Object::ToNumber(args.at<Object>(2)));
+                                       Object::ToNumber(args.at(2)));
     double year = year_object->Number();
     double month = month_object->Number();
     double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
     if (argc >= 3) {
       Handle<Object> date_object;
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
-                                         Object::ToNumber(args.at<Object>(3)));
+                                         Object::ToNumber(args.at(3)));
       date = date_object->Number();
       if (argc >= 4) {
         Handle<Object> hours_object;
-        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-            isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
+                                           Object::ToNumber(args.at(4)));
         hours = hours_object->Number();
         if (argc >= 5) {
           Handle<Object> minutes_object;
-          ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-              isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+          ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
+                                             Object::ToNumber(args.at(5)));
           minutes = minutes_object->Number();
           if (argc >= 6) {
             Handle<Object> seconds_object;
-            ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-                isolate, seconds_object, Object::ToNumber(args.at<Object>(6)));
+            ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
+                                               Object::ToNumber(args.at(6)));
             seconds = seconds_object->Number();
             if (argc >= 7) {
               Handle<Object> ms_object;
-              ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-                  isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+              ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms_object,
+                                                 Object::ToNumber(args.at(7)));
               ms = ms_object->Number();
             }
           }
@@ -301,43 +305,42 @@
   HandleScope scope(isolate);
   int const argc = args.length() - 1;
   double year = std::numeric_limits<double>::quiet_NaN();
-  double month = std::numeric_limits<double>::quiet_NaN();
-  double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
+  double month = 0.0, date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0,
+         ms = 0.0;
   if (argc >= 1) {
     Handle<Object> year_object;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
-                                       Object::ToNumber(args.at<Object>(1)));
+                                       Object::ToNumber(args.at(1)));
     year = year_object->Number();
     if (argc >= 2) {
       Handle<Object> month_object;
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
-                                         Object::ToNumber(args.at<Object>(2)));
+                                         Object::ToNumber(args.at(2)));
       month = month_object->Number();
       if (argc >= 3) {
         Handle<Object> date_object;
-        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-            isolate, date_object, Object::ToNumber(args.at<Object>(3)));
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+                                           Object::ToNumber(args.at(3)));
         date = date_object->Number();
         if (argc >= 4) {
           Handle<Object> hours_object;
-          ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-              isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+          ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
+                                             Object::ToNumber(args.at(4)));
           hours = hours_object->Number();
           if (argc >= 5) {
             Handle<Object> minutes_object;
-            ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-                isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+            ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
+                                               Object::ToNumber(args.at(5)));
             minutes = minutes_object->Number();
             if (argc >= 6) {
               Handle<Object> seconds_object;
-              ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-                  isolate, seconds_object,
-                  Object::ToNumber(args.at<Object>(6)));
+              ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
+                                                 Object::ToNumber(args.at(6)));
               seconds = seconds_object->Number();
               if (argc >= 7) {
                 Handle<Object> ms_object;
                 ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-                    isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+                    isolate, ms_object, Object::ToNumber(args.at(7)));
                 ms = ms_object->Number();
               }
             }
@@ -394,11 +397,11 @@
     dt = day;
   }
   if (argc >= 2) {
-    Handle<Object> month = args.at<Object>(2);
+    Handle<Object> month = args.at(2);
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
     m = month->Number();
     if (argc >= 3) {
-      Handle<Object> date = args.at<Object>(3);
+      Handle<Object> date = args.at(3);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
       dt = date->Number();
     }
@@ -425,15 +428,15 @@
     double s = (time_within_day / 1000) % 60;
     double milli = time_within_day % 1000;
     if (argc >= 2) {
-      Handle<Object> min = args.at<Object>(2);
+      Handle<Object> min = args.at(2);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
       m = min->Number();
       if (argc >= 3) {
-        Handle<Object> sec = args.at<Object>(3);
+        Handle<Object> sec = args.at(3);
         ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
         s = sec->Number();
         if (argc >= 4) {
-          Handle<Object> ms = args.at<Object>(4);
+          Handle<Object> ms = args.at(4);
           ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
           milli = ms->Number();
         }
@@ -482,11 +485,11 @@
     double s = (time_within_day / 1000) % 60;
     double milli = time_within_day % 1000;
     if (argc >= 2) {
-      Handle<Object> sec = args.at<Object>(2);
+      Handle<Object> sec = args.at(2);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
       s = sec->Number();
       if (argc >= 3) {
-        Handle<Object> ms = args.at<Object>(3);
+        Handle<Object> ms = args.at(3);
         ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
         milli = ms->Number();
       }
@@ -514,7 +517,7 @@
     double m = month->Number();
     double dt = day;
     if (argc >= 2) {
-      Handle<Object> date = args.at<Object>(2);
+      Handle<Object> date = args.at(2);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
       dt = date->Number();
     }
@@ -541,7 +544,7 @@
     double s = sec->Number();
     double milli = time_within_day % 1000;
     if (argc >= 2) {
-      Handle<Object> ms = args.at<Object>(2);
+      Handle<Object> ms = args.at(2);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
       milli = ms->Number();
     }
@@ -595,11 +598,11 @@
     dt = day;
   }
   if (argc >= 2) {
-    Handle<Object> month = args.at<Object>(2);
+    Handle<Object> month = args.at(2);
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
     m = month->Number();
     if (argc >= 3) {
-      Handle<Object> date = args.at<Object>(3);
+      Handle<Object> date = args.at(3);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
       dt = date->Number();
     }
@@ -625,15 +628,15 @@
     double s = (time_within_day / 1000) % 60;
     double milli = time_within_day % 1000;
     if (argc >= 2) {
-      Handle<Object> min = args.at<Object>(2);
+      Handle<Object> min = args.at(2);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
       m = min->Number();
       if (argc >= 3) {
-        Handle<Object> sec = args.at<Object>(3);
+        Handle<Object> sec = args.at(3);
         ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
         s = sec->Number();
         if (argc >= 4) {
-          Handle<Object> ms = args.at<Object>(4);
+          Handle<Object> ms = args.at(4);
           ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
           milli = ms->Number();
         }
@@ -680,11 +683,11 @@
     double s = (time_within_day / 1000) % 60;
     double milli = time_within_day % 1000;
     if (argc >= 2) {
-      Handle<Object> sec = args.at<Object>(2);
+      Handle<Object> sec = args.at(2);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
       s = sec->Number();
       if (argc >= 3) {
-        Handle<Object> ms = args.at<Object>(3);
+        Handle<Object> ms = args.at(3);
         ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
         milli = ms->Number();
       }
@@ -711,7 +714,7 @@
     double m = month->Number();
     double dt = day;
     if (argc >= 2) {
-      Handle<Object> date = args.at<Object>(2);
+      Handle<Object> date = args.at(2);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
       dt = date->Number();
     }
@@ -737,7 +740,7 @@
     double s = sec->Number();
     double milli = time_within_day % 1000;
     if (argc >= 2) {
-      Handle<Object> ms = args.at<Object>(2);
+      Handle<Object> ms = args.at(2);
       ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
       milli = ms->Number();
     }
@@ -825,22 +828,6 @@
   return *isolate->factory()->NewStringFromAsciiChecked(buffer);
 }
 
-// ES6 section 20.3.4.44 Date.prototype.valueOf ( )
-BUILTIN(DatePrototypeValueOf) {
-  HandleScope scope(isolate);
-  CHECK_RECEIVER(JSDate, date, "Date.prototype.valueOf");
-  return date->value();
-}
-
-// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
-BUILTIN(DatePrototypeToPrimitive) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CHECK_RECEIVER(JSReceiver, receiver, "Date.prototype [ @@toPrimitive ]");
-  Handle<Object> hint = args.at<Object>(1);
-  RETURN_RESULT_OR_FAILURE(isolate, JSDate::ToPrimitive(receiver, hint));
-}
-
 // ES6 section B.2.4.1 Date.prototype.getYear ( )
 BUILTIN(DatePrototypeGetYear) {
   HandleScope scope(isolate);
@@ -908,9 +895,10 @@
   }
 }
 
-// static
-void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
-                                               int field_index) {
+namespace {
+
+void Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
+                                     int field_index) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
 
@@ -952,7 +940,7 @@
     Node* function = assembler->ExternalConstant(
         ExternalReference::get_date_field_function(assembler->isolate()));
     Node* result = assembler->CallCFunction2(
-        MachineType::AnyTagged(), MachineType::Pointer(),
+        MachineType::AnyTagged(), MachineType::AnyTagged(),
         MachineType::AnyTagged(), function, receiver, field_index_smi);
     assembler->Return(result);
   }
@@ -960,105 +948,227 @@
   // Raise a TypeError if the receiver is not a date.
   assembler->Bind(&receiver_not_date);
   {
-    Node* result = assembler->CallRuntime(Runtime::kThrowNotDateError, context);
-    assembler->Return(result);
+    assembler->CallRuntime(Runtime::kThrowNotDateError, context);
+    assembler->Unreachable();
   }
 }
 
+}  // namespace
+
 // static
-void Builtins::Generate_DatePrototypeGetDate(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kDay);
+void Builtins::Generate_DatePrototypeGetDate(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kDay);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetDay(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kWeekday);
+void Builtins::Generate_DatePrototypeGetDay(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kWeekday);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetFullYear(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kYear);
+void Builtins::Generate_DatePrototypeGetFullYear(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kYear);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetHours(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kHour);
+void Builtins::Generate_DatePrototypeGetHours(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kHour);
 }
 
 // static
 void Builtins::Generate_DatePrototypeGetMilliseconds(
-    CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kMillisecond);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kMillisecond);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetMinutes(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kMinute);
+void Builtins::Generate_DatePrototypeGetMinutes(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kMinute);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetMonth(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kMonth);
+void Builtins::Generate_DatePrototypeGetMonth(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kMonth);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetSeconds(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kSecond);
+void Builtins::Generate_DatePrototypeGetSeconds(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kSecond);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetTime(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kDateValue);
+void Builtins::Generate_DatePrototypeGetTime(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kDateValue);
 }
 
 // static
 void Builtins::Generate_DatePrototypeGetTimezoneOffset(
-    CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kTimezoneOffset);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kTimezoneOffset);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCDate(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kDayUTC);
+void Builtins::Generate_DatePrototypeGetUTCDate(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kDayUTC);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCDay(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kWeekdayUTC);
+void Builtins::Generate_DatePrototypeGetUTCDay(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kWeekdayUTC);
 }
 
 // static
 void Builtins::Generate_DatePrototypeGetUTCFullYear(
-    CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kYearUTC);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kYearUTC);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCHours(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kHourUTC);
+void Builtins::Generate_DatePrototypeGetUTCHours(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kHourUTC);
 }
 
 // static
 void Builtins::Generate_DatePrototypeGetUTCMilliseconds(
-    CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kMillisecondUTC);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kMillisecondUTC);
 }
 
 // static
 void Builtins::Generate_DatePrototypeGetUTCMinutes(
-    CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kMinuteUTC);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kMinuteUTC);
 }
 
 // static
-void Builtins::Generate_DatePrototypeGetUTCMonth(CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kMonthUTC);
+void Builtins::Generate_DatePrototypeGetUTCMonth(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kMonthUTC);
 }
 
 // static
 void Builtins::Generate_DatePrototypeGetUTCSeconds(
-    CodeStubAssembler* assembler) {
-  Generate_DatePrototype_GetField(assembler, JSDate::kSecondUTC);
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kSecondUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeValueOf(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  Generate_DatePrototype_GetField(&assembler, JSDate::kDateValue);
+}
+
+// static
+void Builtins::Generate_DatePrototypeToPrimitive(
+    compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+
+  Node* receiver = assembler.Parameter(0);
+  Node* hint = assembler.Parameter(1);
+  Node* context = assembler.Parameter(4);
+
+  // Check if the {receiver} is actually a JSReceiver.
+  Label receiver_is_invalid(&assembler, Label::kDeferred);
+  assembler.GotoIf(assembler.TaggedIsSmi(receiver), &receiver_is_invalid);
+  assembler.GotoIfNot(assembler.IsJSReceiver(receiver), &receiver_is_invalid);
+
+  // Dispatch to the appropriate OrdinaryToPrimitive builtin.
+  Label hint_is_number(&assembler), hint_is_string(&assembler),
+      hint_is_invalid(&assembler, Label::kDeferred);
+
+  // Fast cases for internalized strings.
+  Node* number_string = assembler.LoadRoot(Heap::knumber_stringRootIndex);
+  assembler.GotoIf(assembler.WordEqual(hint, number_string), &hint_is_number);
+  Node* default_string = assembler.LoadRoot(Heap::kdefault_stringRootIndex);
+  assembler.GotoIf(assembler.WordEqual(hint, default_string), &hint_is_string);
+  Node* string_string = assembler.LoadRoot(Heap::kstring_stringRootIndex);
+  assembler.GotoIf(assembler.WordEqual(hint, string_string), &hint_is_string);
+
+  // Slow-case with actual string comparisons.
+  Callable string_equal = CodeFactory::StringEqual(assembler.isolate());
+  assembler.GotoIf(assembler.TaggedIsSmi(hint), &hint_is_invalid);
+  assembler.GotoIfNot(assembler.IsString(hint), &hint_is_invalid);
+  assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
+                                                          hint, number_string),
+                                       assembler.TrueConstant()),
+                   &hint_is_number);
+  assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
+                                                          hint, default_string),
+                                       assembler.TrueConstant()),
+                   &hint_is_string);
+  assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
+                                                          hint, string_string),
+                                       assembler.TrueConstant()),
+                   &hint_is_string);
+  assembler.Goto(&hint_is_invalid);
+
+  // Use the OrdinaryToPrimitive builtin to convert to a Number.
+  assembler.Bind(&hint_is_number);
+  {
+    Callable callable = CodeFactory::OrdinaryToPrimitive(
+        assembler.isolate(), OrdinaryToPrimitiveHint::kNumber);
+    Node* result = assembler.CallStub(callable, context, receiver);
+    assembler.Return(result);
+  }
+
+  // Use the OrdinaryToPrimitive builtin to convert to a String.
+  assembler.Bind(&hint_is_string);
+  {
+    Callable callable = CodeFactory::OrdinaryToPrimitive(
+        assembler.isolate(), OrdinaryToPrimitiveHint::kString);
+    Node* result = assembler.CallStub(callable, context, receiver);
+    assembler.Return(result);
+  }
+
+  // Raise a TypeError if the {hint} is invalid.
+  assembler.Bind(&hint_is_invalid);
+  {
+    assembler.CallRuntime(Runtime::kThrowInvalidHint, context, hint);
+    assembler.Unreachable();
+  }
+
+  // Raise a TypeError if the {receiver} is not a JSReceiver instance.
+  assembler.Bind(&receiver_is_invalid);
+  {
+    assembler.CallRuntime(
+        Runtime::kThrowIncompatibleMethodReceiver, context,
+        assembler.HeapConstant(assembler.factory()->NewStringFromAsciiChecked(
+            "Date.prototype [ @@toPrimitive ]", TENURED)),
+        receiver);
+    assembler.Unreachable();
+  }
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-debug.cc b/src/builtins/builtins-debug.cc
index 011eba3..de60328 100644
--- a/src/builtins/builtins-debug.cc
+++ b/src/builtins/builtins-debug.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
 #include "src/debug/debug.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -19,8 +20,12 @@
                                        DebugCodegen::IGNORE_RESULT_REGISTER);
 }
 
-void Builtins::Generate_FrameDropper_LiveEdit(MacroAssembler* masm) {
-  DebugCodegen::GenerateFrameDropperLiveEdit(masm);
+void Builtins::Generate_FrameDropperTrampoline(MacroAssembler* masm) {
+  DebugCodegen::GenerateFrameDropperTrampoline(masm);
+}
+
+void Builtins::Generate_HandleDebuggerStatement(MacroAssembler* masm) {
+  DebugCodegen::GenerateHandleDebuggerStatement(masm);
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-error.cc b/src/builtins/builtins-error.cc
index 24ae56b..5b28863 100644
--- a/src/builtins/builtins-error.cc
+++ b/src/builtins/builtins-error.cc
@@ -5,7 +5,9 @@
 #include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
 
+#include "src/counters.h"
 #include "src/messages.h"
+#include "src/objects-inl.h"
 #include "src/property-descriptor.h"
 #include "src/string-builder.h"
 
diff --git a/src/builtins/builtins-function.cc b/src/builtins/builtins-function.cc
index 9a8ee79..e58cad3 100644
--- a/src/builtins/builtins-function.cc
+++ b/src/builtins/builtins-function.cc
@@ -2,10 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
 #include "src/compiler.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
 #include "src/string-builder.h"
 
 namespace v8 {
@@ -31,44 +36,55 @@
 
   // Build the source string.
   Handle<String> source;
+  int parameters_end_pos = kNoSourcePosition;
   {
     IncrementalStringBuilder builder(isolate);
     builder.AppendCharacter('(');
     builder.AppendCString(token);
-    builder.AppendCharacter('(');
+    if (FLAG_harmony_function_tostring) {
+      builder.AppendCString(" anonymous(");
+    } else {
+      builder.AppendCharacter('(');
+    }
     bool parenthesis_in_arg_string = false;
     if (argc > 1) {
       for (int i = 1; i < argc; ++i) {
         if (i > 1) builder.AppendCharacter(',');
         Handle<String> param;
         ASSIGN_RETURN_ON_EXCEPTION(
-            isolate, param, Object::ToString(isolate, args.at<Object>(i)),
-            Object);
+            isolate, param, Object::ToString(isolate, args.at(i)), Object);
         param = String::Flatten(param);
         builder.AppendString(param);
-        // If the formal parameters string include ) - an illegal
-        // character - it may make the combined function expression
-        // compile. We avoid this problem by checking for this early on.
-        DisallowHeapAllocation no_gc;  // Ensure vectors stay valid.
-        String::FlatContent param_content = param->GetFlatContent();
-        for (int i = 0, length = param->length(); i < length; ++i) {
-          if (param_content.Get(i) == ')') {
-            parenthesis_in_arg_string = true;
-            break;
+        if (!FLAG_harmony_function_tostring) {
+          // If the formal parameters string include ) - an illegal
+          // character - it may make the combined function expression
+          // compile. We avoid this problem by checking for this early on.
+          DisallowHeapAllocation no_gc;  // Ensure vectors stay valid.
+          String::FlatContent param_content = param->GetFlatContent();
+          for (int i = 0, length = param->length(); i < length; ++i) {
+            if (param_content.Get(i) == ')') {
+              parenthesis_in_arg_string = true;
+              break;
+            }
           }
         }
       }
-      // If the formal parameters include an unbalanced block comment, the
-      // function must be rejected. Since JavaScript does not allow nested
-      // comments we can include a trailing block comment to catch this.
-      builder.AppendCString("\n/**/");
+      if (!FLAG_harmony_function_tostring) {
+        // If the formal parameters include an unbalanced block comment, the
+        // function must be rejected. Since JavaScript does not allow nested
+        // comments we can include a trailing block comment to catch this.
+        builder.AppendCString("\n/*``*/");
+      }
+    }
+    if (FLAG_harmony_function_tostring) {
+      builder.AppendCharacter('\n');
+      parameters_end_pos = builder.Length();
     }
     builder.AppendCString(") {\n");
     if (argc > 0) {
       Handle<String> body;
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, body, Object::ToString(isolate, args.at<Object>(argc)),
-          Object);
+          isolate, body, Object::ToString(isolate, args.at(argc)), Object);
       builder.AppendString(body);
     }
     builder.AppendCString("\n})");
@@ -87,11 +103,12 @@
   // come from here.
   Handle<JSFunction> function;
   {
-    ASSIGN_RETURN_ON_EXCEPTION(isolate, function,
-                               Compiler::GetFunctionFromString(
-                                   handle(target->native_context(), isolate),
-                                   source, ONLY_SINGLE_FUNCTION_LITERAL),
-                               Object);
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, function,
+        Compiler::GetFunctionFromString(
+            handle(target->native_context(), isolate), source,
+            ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos),
+        Object);
     Handle<Object> result;
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, result,
@@ -179,9 +196,9 @@
   Handle<Object> this_arg = isolate->factory()->undefined_value();
   ScopedVector<Handle<Object>> argv(std::max(0, args.length() - 2));
   if (args.length() > 1) {
-    this_arg = args.at<Object>(1);
+    this_arg = args.at(1);
     for (int i = 2; i < args.length(); ++i) {
-      argv[i - 2] = args.at<Object>(i);
+      argv[i - 2] = args.at(i);
     }
   }
   Handle<JSBoundFunction> function;
@@ -255,6 +272,184 @@
 // ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
 BUILTIN(FunctionPrototypeBind) { return DoFunctionBind(isolate, args); }
 
+void Builtins::Generate_FastFunctionPrototypeBind(
+    compiler::CodeAssemblerState* state) {
+  using compiler::Node;
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+  CodeStubAssembler assembler(state);
+  Label slow(&assembler);
+
+  Node* argc = assembler.Parameter(BuiltinDescriptor::kArgumentsCount);
+  Node* context = assembler.Parameter(BuiltinDescriptor::kContext);
+  Node* new_target = assembler.Parameter(BuiltinDescriptor::kNewTarget);
+
+  CodeStubArguments args(&assembler, assembler.ChangeInt32ToIntPtr(argc));
+
+  // Check that receiver has instance type of JS_FUNCTION_TYPE
+  Node* receiver = args.GetReceiver();
+  assembler.GotoIf(assembler.TaggedIsSmi(receiver), &slow);
+
+  Node* receiver_map = assembler.LoadMap(receiver);
+  Node* instance_type = assembler.LoadMapInstanceType(receiver_map);
+  assembler.GotoIf(
+      assembler.Word32NotEqual(instance_type,
+                               assembler.Int32Constant(JS_FUNCTION_TYPE)),
+      &slow);
+
+  // Disallow binding of slow-mode functions. We need to figure out whether the
+  // length and name property are in the original state.
+  assembler.Comment("Disallow binding of slow-mode functions");
+  assembler.GotoIf(assembler.IsDictionaryMap(receiver_map), &slow);
+
+  // Check whether the length and name properties are still present as
+  // AccessorInfo objects. In that case, their value can be recomputed even if
+  // the actual value on the object changes.
+  assembler.Comment("Check descriptor array length");
+  Node* descriptors = assembler.LoadMapDescriptors(receiver_map);
+  Node* descriptors_length = assembler.LoadFixedArrayBaseLength(descriptors);
+  assembler.GotoIf(assembler.SmiLessThanOrEqual(descriptors_length,
+                                                assembler.SmiConstant(1)),
+                   &slow);
+
+  // Check whether the length and name properties are still present as
+  // AccessorInfo objects. In that case, their value can be recomputed even if
+  // the actual value on the object changes.
+  assembler.Comment("Check name and length properties");
+  const int length_index = JSFunction::kLengthDescriptorIndex;
+  Node* maybe_length = assembler.LoadFixedArrayElement(
+      descriptors, DescriptorArray::ToKeyIndex(length_index));
+  assembler.GotoIf(
+      assembler.WordNotEqual(maybe_length,
+                             assembler.LoadRoot(Heap::klength_stringRootIndex)),
+      &slow);
+
+  Node* maybe_length_accessor = assembler.LoadFixedArrayElement(
+      descriptors, DescriptorArray::ToValueIndex(length_index));
+  assembler.GotoIf(assembler.TaggedIsSmi(maybe_length_accessor), &slow);
+  Node* length_value_map = assembler.LoadMap(maybe_length_accessor);
+  assembler.GotoIfNot(assembler.IsAccessorInfoMap(length_value_map), &slow);
+
+  const int name_index = JSFunction::kNameDescriptorIndex;
+  Node* maybe_name = assembler.LoadFixedArrayElement(
+      descriptors, DescriptorArray::ToKeyIndex(name_index));
+  assembler.GotoIf(
+      assembler.WordNotEqual(maybe_name,
+                             assembler.LoadRoot(Heap::kname_stringRootIndex)),
+      &slow);
+
+  Node* maybe_name_accessor = assembler.LoadFixedArrayElement(
+      descriptors, DescriptorArray::ToValueIndex(name_index));
+  assembler.GotoIf(assembler.TaggedIsSmi(maybe_name_accessor), &slow);
+  Node* name_value_map = assembler.LoadMap(maybe_name_accessor);
+  assembler.GotoIfNot(assembler.IsAccessorInfoMap(name_value_map), &slow);
+
+  // Choose the right bound function map based on whether the target is
+  // constructable.
+  assembler.Comment("Choose the right bound function map");
+  Variable bound_function_map(&assembler, MachineRepresentation::kTagged);
+  Label with_constructor(&assembler);
+  CodeStubAssembler::VariableList vars({&bound_function_map}, assembler.zone());
+  Node* native_context = assembler.LoadNativeContext(context);
+
+  Label map_done(&assembler, vars);
+  Node* bit_field = assembler.LoadMapBitField(receiver_map);
+  int mask = static_cast<int>(1 << Map::kIsConstructor);
+  assembler.GotoIf(assembler.IsSetWord32(bit_field, mask), &with_constructor);
+
+  bound_function_map.Bind(assembler.LoadContextElement(
+      native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
+  assembler.Goto(&map_done);
+
+  assembler.Bind(&with_constructor);
+  bound_function_map.Bind(assembler.LoadContextElement(
+      native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
+  assembler.Goto(&map_done);
+
+  assembler.Bind(&map_done);
+
+  // Verify that __proto__ matches that of a the target bound function.
+  assembler.Comment("Verify that __proto__ matches target bound function");
+  Node* prototype = assembler.LoadMapPrototype(receiver_map);
+  Node* expected_prototype =
+      assembler.LoadMapPrototype(bound_function_map.value());
+  assembler.GotoIf(assembler.WordNotEqual(prototype, expected_prototype),
+                   &slow);
+
+  // Allocate the arguments array.
+  assembler.Comment("Allocate the arguments array");
+  Variable argument_array(&assembler, MachineRepresentation::kTagged);
+  Label empty_arguments(&assembler);
+  Label arguments_done(&assembler, &argument_array);
+  assembler.GotoIf(
+      assembler.Uint32LessThanOrEqual(argc, assembler.Int32Constant(1)),
+      &empty_arguments);
+  Node* elements_length = assembler.ChangeUint32ToWord(
+      assembler.Int32Sub(argc, assembler.Int32Constant(1)));
+  Node* elements = assembler.AllocateFixedArray(FAST_ELEMENTS, elements_length);
+  Variable index(&assembler, MachineType::PointerRepresentation());
+  index.Bind(assembler.IntPtrConstant(0));
+  CodeStubAssembler::VariableList foreach_vars({&index}, assembler.zone());
+  args.ForEach(foreach_vars,
+               [&assembler, elements, &index](compiler::Node* arg) {
+                 assembler.StoreFixedArrayElement(elements, index.value(), arg);
+                 assembler.Increment(index);
+               },
+               assembler.IntPtrConstant(1));
+  argument_array.Bind(elements);
+  assembler.Goto(&arguments_done);
+
+  assembler.Bind(&empty_arguments);
+  argument_array.Bind(assembler.EmptyFixedArrayConstant());
+  assembler.Goto(&arguments_done);
+
+  assembler.Bind(&arguments_done);
+
+  // Determine bound receiver.
+  assembler.Comment("Determine bound receiver");
+  Variable bound_receiver(&assembler, MachineRepresentation::kTagged);
+  Label has_receiver(&assembler);
+  Label receiver_done(&assembler, &bound_receiver);
+  assembler.GotoIf(assembler.Word32NotEqual(argc, assembler.Int32Constant(0)),
+                   &has_receiver);
+  bound_receiver.Bind(assembler.UndefinedConstant());
+  assembler.Goto(&receiver_done);
+
+  assembler.Bind(&has_receiver);
+  bound_receiver.Bind(args.AtIndex(0));
+  assembler.Goto(&receiver_done);
+
+  assembler.Bind(&receiver_done);
+
+  // Allocate the resulting bound function.
+  assembler.Comment("Allocate the resulting bound function");
+  Node* bound_function = assembler.Allocate(JSBoundFunction::kSize);
+  assembler.StoreMapNoWriteBarrier(bound_function, bound_function_map.value());
+  assembler.StoreObjectFieldNoWriteBarrier(
+      bound_function, JSBoundFunction::kBoundTargetFunctionOffset, receiver);
+  assembler.StoreObjectFieldNoWriteBarrier(bound_function,
+                                           JSBoundFunction::kBoundThisOffset,
+                                           bound_receiver.value());
+  assembler.StoreObjectFieldNoWriteBarrier(
+      bound_function, JSBoundFunction::kBoundArgumentsOffset,
+      argument_array.value());
+  Node* empty_fixed_array = assembler.EmptyFixedArrayConstant();
+  assembler.StoreObjectFieldNoWriteBarrier(
+      bound_function, JSObject::kPropertiesOffset, empty_fixed_array);
+  assembler.StoreObjectFieldNoWriteBarrier(
+      bound_function, JSObject::kElementsOffset, empty_fixed_array);
+
+  args.PopAndReturn(bound_function);
+  assembler.Bind(&slow);
+
+  Node* target = assembler.LoadFromFrame(
+      StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer());
+  assembler.TailCallStub(
+      CodeFactory::FunctionPrototypeBind(assembler.isolate()), context, target,
+      new_target, argc);
+}
+
 // TODO(verwaest): This is a temporary helper until the FastFunctionBind stub
 // can tailcall to the builtin directly.
 RUNTIME_FUNCTION(Runtime_FunctionBind) {
@@ -283,14 +478,15 @@
 
 // ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
 void Builtins::Generate_FunctionPrototypeHasInstance(
-    CodeStubAssembler* assembler) {
+    compiler::CodeAssemblerState* state) {
   using compiler::Node;
+  CodeStubAssembler assembler(state);
 
-  Node* f = assembler->Parameter(0);
-  Node* v = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* result = assembler->OrdinaryHasInstance(context, f, v);
-  assembler->Return(result);
+  Node* f = assembler.Parameter(0);
+  Node* v = assembler.Parameter(1);
+  Node* context = assembler.Parameter(4);
+  Node* result = assembler.OrdinaryHasInstance(context, f, v);
+  assembler.Return(result);
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-generator.cc b/src/builtins/builtins-generator.cc
index fe1f2d2..14a11ed 100644
--- a/src/builtins/builtins-generator.cc
+++ b/src/builtins/builtins-generator.cc
@@ -2,115 +2,114 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
-namespace {
+typedef compiler::CodeAssemblerState CodeAssemblerState;
 
-void Generate_GeneratorPrototypeResume(
-    CodeStubAssembler* assembler, JSGeneratorObject::ResumeMode resume_mode,
-    char const* const method_name) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+class GeneratorBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  explicit GeneratorBuiltinsAssembler(CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
 
-  Node* receiver = assembler->Parameter(0);
-  Node* value = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* closed =
-      assembler->SmiConstant(Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+ protected:
+  void GeneratorPrototypeResume(JSGeneratorObject::ResumeMode resume_mode,
+                                char const* const method_name);
+};
+
+void GeneratorBuiltinsAssembler::GeneratorPrototypeResume(
+    JSGeneratorObject::ResumeMode resume_mode, char const* const method_name) {
+  Node* receiver = Parameter(0);
+  Node* value = Parameter(1);
+  Node* context = Parameter(4);
+  Node* closed = SmiConstant(JSGeneratorObject::kGeneratorClosed);
 
   // Check if the {receiver} is actually a JSGeneratorObject.
-  Label if_receiverisincompatible(assembler, Label::kDeferred);
-  assembler->GotoIf(assembler->TaggedIsSmi(receiver),
-                    &if_receiverisincompatible);
-  Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
-  assembler->GotoUnless(assembler->Word32Equal(
-                            receiver_instance_type,
-                            assembler->Int32Constant(JS_GENERATOR_OBJECT_TYPE)),
-                        &if_receiverisincompatible);
+  Label if_receiverisincompatible(this, Label::kDeferred);
+  GotoIf(TaggedIsSmi(receiver), &if_receiverisincompatible);
+  Node* receiver_instance_type = LoadInstanceType(receiver);
+  GotoIfNot(Word32Equal(receiver_instance_type,
+                        Int32Constant(JS_GENERATOR_OBJECT_TYPE)),
+            &if_receiverisincompatible);
 
   // Check if the {receiver} is running or already closed.
-  Node* receiver_continuation = assembler->LoadObjectField(
-      receiver, JSGeneratorObject::kContinuationOffset);
-  Label if_receiverisclosed(assembler, Label::kDeferred),
-      if_receiverisrunning(assembler, Label::kDeferred);
-  assembler->GotoIf(assembler->SmiEqual(receiver_continuation, closed),
-                    &if_receiverisclosed);
+  Node* receiver_continuation =
+      LoadObjectField(receiver, JSGeneratorObject::kContinuationOffset);
+  Label if_receiverisclosed(this, Label::kDeferred),
+      if_receiverisrunning(this, Label::kDeferred);
+  GotoIf(SmiEqual(receiver_continuation, closed), &if_receiverisclosed);
   DCHECK_LT(JSGeneratorObject::kGeneratorExecuting,
             JSGeneratorObject::kGeneratorClosed);
-  assembler->GotoIf(assembler->SmiLessThan(receiver_continuation, closed),
-                    &if_receiverisrunning);
+  GotoIf(SmiLessThan(receiver_continuation, closed), &if_receiverisrunning);
 
   // Resume the {receiver} using our trampoline.
-  Node* result = assembler->CallStub(
-      CodeFactory::ResumeGenerator(assembler->isolate()), context, value,
-      receiver, assembler->SmiConstant(Smi::FromInt(resume_mode)));
-  assembler->Return(result);
+  Node* result = CallStub(CodeFactory::ResumeGenerator(isolate()), context,
+                          value, receiver, SmiConstant(resume_mode));
+  Return(result);
 
-  assembler->Bind(&if_receiverisincompatible);
+  Bind(&if_receiverisincompatible);
   {
     // The {receiver} is not a valid JSGeneratorObject.
-    Node* result = assembler->CallRuntime(
-        Runtime::kThrowIncompatibleMethodReceiver, context,
-        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
-            method_name, TENURED)),
-        receiver);
-    assembler->Return(result);  // Never reached.
+    CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+                HeapConstant(
+                    factory()->NewStringFromAsciiChecked(method_name, TENURED)),
+                receiver);
+    Unreachable();
   }
 
-  assembler->Bind(&if_receiverisclosed);
+  Bind(&if_receiverisclosed);
   {
+    Callable create_iter_result_object =
+        CodeFactory::CreateIterResultObject(isolate());
+
     // The {receiver} is closed already.
     Node* result = nullptr;
     switch (resume_mode) {
       case JSGeneratorObject::kNext:
-        result = assembler->CallRuntime(Runtime::kCreateIterResultObject,
-                                        context, assembler->UndefinedConstant(),
-                                        assembler->BooleanConstant(true));
+        result = CallStub(create_iter_result_object, context,
+                          UndefinedConstant(), TrueConstant());
         break;
       case JSGeneratorObject::kReturn:
         result =
-            assembler->CallRuntime(Runtime::kCreateIterResultObject, context,
-                                   value, assembler->BooleanConstant(true));
+            CallStub(create_iter_result_object, context, value, TrueConstant());
         break;
       case JSGeneratorObject::kThrow:
-        result = assembler->CallRuntime(Runtime::kThrow, context, value);
+        result = CallRuntime(Runtime::kThrow, context, value);
         break;
     }
-    assembler->Return(result);
+    Return(result);
   }
 
-  assembler->Bind(&if_receiverisrunning);
+  Bind(&if_receiverisrunning);
   {
-    Node* result =
-        assembler->CallRuntime(Runtime::kThrowGeneratorRunning, context);
-    assembler->Return(result);  // Never reached.
+    CallRuntime(Runtime::kThrowGeneratorRunning, context);
+    Unreachable();
   }
 }
 
-}  // anonymous namespace
-
 // ES6 section 25.3.1.2 Generator.prototype.next ( value )
-void Builtins::Generate_GeneratorPrototypeNext(CodeStubAssembler* assembler) {
-  Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kNext,
-                                    "[Generator].prototype.next");
+TF_BUILTIN(GeneratorPrototypeNext, GeneratorBuiltinsAssembler) {
+  GeneratorPrototypeResume(JSGeneratorObject::kNext,
+                           "[Generator].prototype.next");
 }
 
 // ES6 section 25.3.1.3 Generator.prototype.return ( value )
-void Builtins::Generate_GeneratorPrototypeReturn(CodeStubAssembler* assembler) {
-  Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kReturn,
-                                    "[Generator].prototype.return");
+TF_BUILTIN(GeneratorPrototypeReturn, GeneratorBuiltinsAssembler) {
+  GeneratorPrototypeResume(JSGeneratorObject::kReturn,
+                           "[Generator].prototype.return");
 }
 
 // ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
-void Builtins::Generate_GeneratorPrototypeThrow(CodeStubAssembler* assembler) {
-  Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kThrow,
-                                    "[Generator].prototype.throw");
+TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
+  GeneratorPrototypeResume(JSGeneratorObject::kThrow,
+                           "[Generator].prototype.throw");
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-global.cc b/src/builtins/builtins-global.cc
index 1fa0967..2af6e99 100644
--- a/src/builtins/builtins-global.cc
+++ b/src/builtins/builtins-global.cc
@@ -2,11 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
 #include "src/compiler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 #include "src/uri.h"
 
 namespace v8 {
@@ -92,120 +94,120 @@
   }
   Handle<JSFunction> function;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, function, Compiler::GetFunctionFromString(
-                             handle(target->native_context(), isolate),
-                             Handle<String>::cast(x), NO_PARSE_RESTRICTION));
+      isolate, function,
+      Compiler::GetFunctionFromString(handle(target->native_context(), isolate),
+                                      Handle<String>::cast(x),
+                                      NO_PARSE_RESTRICTION, kNoSourcePosition));
   RETURN_RESULT_OR_FAILURE(
       isolate,
       Execution::Call(isolate, function, target_global_proxy, 0, nullptr));
 }
 
 // ES6 section 18.2.2 isFinite ( number )
-void Builtins::Generate_GlobalIsFinite(CodeStubAssembler* assembler) {
+void Builtins::Generate_GlobalIsFinite(compiler::CodeAssemblerState* state) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Variable Variable;
+  CodeStubAssembler assembler(state);
 
-  Node* context = assembler->Parameter(4);
+  Node* context = assembler.Parameter(4);
 
-  Label return_true(assembler), return_false(assembler);
+  Label return_true(&assembler), return_false(&assembler);
 
   // We might need to loop once for ToNumber conversion.
-  Variable var_num(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_num);
-  var_num.Bind(assembler->Parameter(1));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Variable var_num(&assembler, MachineRepresentation::kTagged);
+  Label loop(&assembler, &var_num);
+  var_num.Bind(assembler.Parameter(1));
+  assembler.Goto(&loop);
+  assembler.Bind(&loop);
   {
     // Load the current {num} value.
     Node* num = var_num.value();
 
     // Check if {num} is a Smi or a HeapObject.
-    assembler->GotoIf(assembler->TaggedIsSmi(num), &return_true);
+    assembler.GotoIf(assembler.TaggedIsSmi(num), &return_true);
 
     // Check if {num} is a HeapNumber.
-    Label if_numisheapnumber(assembler),
-        if_numisnotheapnumber(assembler, Label::kDeferred);
-    assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
-                                           assembler->HeapNumberMapConstant()),
-                      &if_numisheapnumber, &if_numisnotheapnumber);
+    Label if_numisheapnumber(&assembler),
+        if_numisnotheapnumber(&assembler, Label::kDeferred);
+    assembler.Branch(assembler.IsHeapNumberMap(assembler.LoadMap(num)),
+                     &if_numisheapnumber, &if_numisnotheapnumber);
 
-    assembler->Bind(&if_numisheapnumber);
+    assembler.Bind(&if_numisheapnumber);
     {
       // Check if {num} contains a finite, non-NaN value.
-      Node* num_value = assembler->LoadHeapNumberValue(num);
-      assembler->BranchIfFloat64IsNaN(
-          assembler->Float64Sub(num_value, num_value), &return_false,
-          &return_true);
+      Node* num_value = assembler.LoadHeapNumberValue(num);
+      assembler.BranchIfFloat64IsNaN(assembler.Float64Sub(num_value, num_value),
+                                     &return_false, &return_true);
     }
 
-    assembler->Bind(&if_numisnotheapnumber);
+    assembler.Bind(&if_numisnotheapnumber);
     {
       // Need to convert {num} to a Number first.
-      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
-      var_num.Bind(assembler->CallStub(callable, context, num));
-      assembler->Goto(&loop);
+      Callable callable = CodeFactory::NonNumberToNumber(assembler.isolate());
+      var_num.Bind(assembler.CallStub(callable, context, num));
+      assembler.Goto(&loop);
     }
   }
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  assembler.Bind(&return_true);
+  assembler.Return(assembler.BooleanConstant(true));
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  assembler.Bind(&return_false);
+  assembler.Return(assembler.BooleanConstant(false));
 }
 
 // ES6 section 18.2.3 isNaN ( number )
-void Builtins::Generate_GlobalIsNaN(CodeStubAssembler* assembler) {
+void Builtins::Generate_GlobalIsNaN(compiler::CodeAssemblerState* state) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Variable Variable;
+  CodeStubAssembler assembler(state);
 
-  Node* context = assembler->Parameter(4);
+  Node* context = assembler.Parameter(4);
 
-  Label return_true(assembler), return_false(assembler);
+  Label return_true(&assembler), return_false(&assembler);
 
   // We might need to loop once for ToNumber conversion.
-  Variable var_num(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_num);
-  var_num.Bind(assembler->Parameter(1));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Variable var_num(&assembler, MachineRepresentation::kTagged);
+  Label loop(&assembler, &var_num);
+  var_num.Bind(assembler.Parameter(1));
+  assembler.Goto(&loop);
+  assembler.Bind(&loop);
   {
     // Load the current {num} value.
     Node* num = var_num.value();
 
     // Check if {num} is a Smi or a HeapObject.
-    assembler->GotoIf(assembler->TaggedIsSmi(num), &return_false);
+    assembler.GotoIf(assembler.TaggedIsSmi(num), &return_false);
 
     // Check if {num} is a HeapNumber.
-    Label if_numisheapnumber(assembler),
-        if_numisnotheapnumber(assembler, Label::kDeferred);
-    assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
-                                           assembler->HeapNumberMapConstant()),
-                      &if_numisheapnumber, &if_numisnotheapnumber);
+    Label if_numisheapnumber(&assembler),
+        if_numisnotheapnumber(&assembler, Label::kDeferred);
+    assembler.Branch(assembler.IsHeapNumberMap(assembler.LoadMap(num)),
+                     &if_numisheapnumber, &if_numisnotheapnumber);
 
-    assembler->Bind(&if_numisheapnumber);
+    assembler.Bind(&if_numisheapnumber);
     {
       // Check if {num} contains a NaN.
-      Node* num_value = assembler->LoadHeapNumberValue(num);
-      assembler->BranchIfFloat64IsNaN(num_value, &return_true, &return_false);
+      Node* num_value = assembler.LoadHeapNumberValue(num);
+      assembler.BranchIfFloat64IsNaN(num_value, &return_true, &return_false);
     }
 
-    assembler->Bind(&if_numisnotheapnumber);
+    assembler.Bind(&if_numisnotheapnumber);
     {
       // Need to convert {num} to a Number first.
-      Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
-      var_num.Bind(assembler->CallStub(callable, context, num));
-      assembler->Goto(&loop);
+      Callable callable = CodeFactory::NonNumberToNumber(assembler.isolate());
+      var_num.Bind(assembler.CallStub(callable, context, num));
+      assembler.Goto(&loop);
     }
   }
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  assembler.Bind(&return_true);
+  assembler.Return(assembler.BooleanConstant(true));
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  assembler.Bind(&return_false);
+  assembler.Return(assembler.BooleanConstant(false));
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-handler.cc b/src/builtins/builtins-handler.cc
index 88597f8..766d437 100644
--- a/src/builtins/builtins-handler.cc
+++ b/src/builtins/builtins-handler.cc
@@ -4,183 +4,263 @@
 
 #include "src/builtins/builtins-utils.h"
 #include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
 #include "src/ic/keyed-store-generic.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
-void Builtins::Generate_KeyedLoadIC_Megamorphic_TF(
-    CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(KeyedLoadIC_IndexedString, CodeStubAssembler) {
   typedef LoadWithVectorDescriptor Descriptor;
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* index = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
 
-  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
-  assembler->KeyedLoadICGeneric(&p);
+  Label miss(this);
+
+  Node* index_intptr = TryToIntptr(index, &miss);
+  Node* length = SmiUntag(LoadStringLength(receiver));
+  GotoIf(UintPtrGreaterThanOrEqual(index_intptr, length), &miss);
+
+  Node* code = StringCharCodeAt(receiver, index_intptr, INTPTR_PARAMETERS);
+  Node* result = StringFromCharCode(code);
+  Return(result);
+
+  Bind(&miss);
+  TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, index, slot,
+                  vector);
 }
 
-void Builtins::Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateMiss(masm);
-}
-void Builtins::Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateRuntimeGetProperty(masm);
+TF_BUILTIN(KeyedLoadIC_Miss, CodeStubAssembler) {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name, slot,
+                  vector);
 }
 
-void Builtins::Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
+TF_BUILTIN(KeyedLoadIC_Slow, CodeStubAssembler) {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* context = Parameter(Descriptor::kContext);
+
+  TailCallRuntime(Runtime::kKeyedGetProperty, context, receiver, name);
 }
 
-void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
+void Builtins::Generate_KeyedStoreIC_Megamorphic(
+    compiler::CodeAssemblerState* state) {
+  KeyedStoreGenericGenerator::Generate(state, SLOPPY);
 }
 
-void KeyedStoreICMegamorphic(CodeStubAssembler* assembler, LanguageMode mode) {
-  typedef compiler::Node Node;
+void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(
+    compiler::CodeAssemblerState* state) {
+  KeyedStoreGenericGenerator::Generate(state, STRICT);
+}
+
+TF_BUILTIN(KeyedStoreIC_Miss, CodeStubAssembler) {
   typedef StoreWithVectorDescriptor Descriptor;
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
 
-  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
-                                         vector);
-  KeyedStoreGenericGenerator::Generate(assembler, &p, mode);
+  TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot, vector,
+                  receiver, name);
 }
 
-void Builtins::Generate_KeyedStoreIC_Megamorphic_TF(
-    CodeStubAssembler* assembler) {
-  KeyedStoreICMegamorphic(assembler, SLOPPY);
+TF_BUILTIN(KeyedStoreIC_Slow, CodeStubAssembler) {
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  TailCallRuntime(Runtime::kKeyedStoreIC_Slow, context, value, slot, vector,
+                  receiver, name);
 }
 
-void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict_TF(
-    CodeStubAssembler* assembler) {
-  KeyedStoreICMegamorphic(assembler, STRICT);
-}
-
-void Builtins::Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateMiss(masm);
-}
-
-void Builtins::Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateSlow(masm);
-}
-
-void Builtins::Generate_LoadGlobalIC_Miss(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(LoadGlobalIC_Miss, CodeStubAssembler) {
   typedef LoadGlobalWithVectorDescriptor Descriptor;
 
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
 
-  assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, slot,
-                             vector);
+  TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name, slot, vector);
 }
 
-void Builtins::Generate_LoadGlobalIC_Slow(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(LoadGlobalIC_Slow, CodeStubAssembler) {
   typedef LoadGlobalWithVectorDescriptor Descriptor;
 
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
 
-  assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, slot,
-                             vector);
+  TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, name, slot, vector);
 }
 
 void Builtins::Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
   NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
 }
 
-void Builtins::Generate_LoadIC_Miss(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
   typedef LoadWithVectorDescriptor Descriptor;
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
 
-  assembler->TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
-                             slot, vector);
+  Label miss(this);
+
+  Node* proto_or_map =
+      LoadObjectField(receiver, JSFunction::kPrototypeOrInitialMapOffset);
+  GotoIf(IsTheHole(proto_or_map), &miss);
+
+  Variable var_result(this, MachineRepresentation::kTagged, proto_or_map);
+  Label done(this, &var_result);
+  GotoIfNot(IsMap(proto_or_map), &done);
+
+  var_result.Bind(LoadMapPrototype(proto_or_map));
+  Goto(&done);
+
+  Bind(&done);
+  Return(var_result.value());
+
+  Bind(&miss);
+  TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
 }
 
-void Builtins::Generate_LoadIC_Normal(MacroAssembler* masm) {
-  LoadIC::GenerateNormal(masm);
-}
-
-void Builtins::Generate_LoadIC_Slow(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(LoadIC_Miss, CodeStubAssembler) {
   typedef LoadWithVectorDescriptor Descriptor;
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
 
-  assembler->TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+  TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
 }
 
-void Builtins::Generate_StoreIC_Miss(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(LoadIC_Normal, CodeStubAssembler) {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* context = Parameter(Descriptor::kContext);
+
+  Label slow(this);
+  {
+    Node* properties = LoadProperties(receiver);
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    Label found(this, &var_name_index);
+    NameDictionaryLookup<NameDictionary>(properties, name, &found,
+                                         &var_name_index, &slow);
+    Bind(&found);
+    {
+      Variable var_details(this, MachineRepresentation::kWord32);
+      Variable var_value(this, MachineRepresentation::kTagged);
+      LoadPropertyFromNameDictionary(properties, var_name_index.value(),
+                                     &var_details, &var_value);
+      Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
+                                         context, receiver, &slow);
+      Return(value);
+    }
+  }
+
+  Bind(&slow);
+  TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+}
+
+TF_BUILTIN(LoadIC_Slow, CodeStubAssembler) {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* context = Parameter(Descriptor::kContext);
+
+  TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+}
+
+TF_BUILTIN(StoreIC_Miss, CodeStubAssembler) {
   typedef StoreWithVectorDescriptor Descriptor;
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
 
-  assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
-                             vector, receiver, name);
+  TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
+                  receiver, name);
 }
 
-void Builtins::Generate_StoreIC_Normal(MacroAssembler* masm) {
-  StoreIC::GenerateNormal(masm);
+TF_BUILTIN(StoreIC_Normal, CodeStubAssembler) {
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  Label slow(this);
+  {
+    Node* properties = LoadProperties(receiver);
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    Label found(this, &var_name_index);
+    NameDictionaryLookup<NameDictionary>(properties, name, &found,
+                                         &var_name_index, &slow);
+    Bind(&found);
+    {
+      Node* details = LoadDetailsByKeyIndex<NameDictionary>(
+          properties, var_name_index.value());
+      // Check that the property is a writable data property (no accessor).
+      const int kTypeAndReadOnlyMask = PropertyDetails::KindField::kMask |
+                                       PropertyDetails::kAttributesReadOnlyMask;
+      STATIC_ASSERT(kData == 0);
+      GotoIf(IsSetWord32(details, kTypeAndReadOnlyMask), &slow);
+      StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
+                                           value);
+      Return(value);
+    }
+  }
+
+  Bind(&slow);
+  TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
+                  receiver, name);
 }
 
 void Builtins::Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
   NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
 }
 
-namespace {
-void Generate_StoreIC_Slow(CodeStubAssembler* assembler,
-                           LanguageMode language_mode) {
-  typedef compiler::Node Node;
-  typedef StoreWithVectorDescriptor Descriptor;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  Node* lang_mode = assembler->SmiConstant(Smi::FromInt(language_mode));
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  assembler->TailCallRuntime(Runtime::kSetProperty, context, receiver, name,
-                             value, lang_mode);
-}
-}  // anonymous namespace
-
-void Builtins::Generate_StoreIC_SlowSloppy(CodeStubAssembler* assembler) {
-  Generate_StoreIC_Slow(assembler, SLOPPY);
-}
-
-void Builtins::Generate_StoreIC_SlowStrict(CodeStubAssembler* assembler) {
-  Generate_StoreIC_Slow(assembler, STRICT);
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/builtins/builtins-ic.cc b/src/builtins/builtins-ic.cc
new file mode 100644
index 0000000..e11afbe
--- /dev/null
+++ b/src/builtins/builtins-ic.cc
@@ -0,0 +1,51 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/ic/accessor-assembler.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define IC_BUILTIN(Name)                                                \
+  void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
+    AccessorAssembler assembler(state);                                 \
+    assembler.Generate##Name();                                         \
+  }
+
+#define IC_BUILTIN_PARAM(BuiltinName, GeneratorName, parameter)                \
+  void Builtins::Generate_##BuiltinName(compiler::CodeAssemblerState* state) { \
+    AccessorAssembler assembler(state);                                        \
+    assembler.Generate##GeneratorName(parameter);                              \
+  }
+
+IC_BUILTIN(LoadIC)
+IC_BUILTIN(KeyedLoadIC)
+IC_BUILTIN(LoadICTrampoline)
+IC_BUILTIN(LoadField)
+IC_BUILTIN(KeyedLoadICTrampoline)
+IC_BUILTIN(KeyedLoadIC_Megamorphic)
+IC_BUILTIN(StoreIC)
+IC_BUILTIN(StoreICTrampoline)
+
+IC_BUILTIN_PARAM(StoreICStrict, StoreIC, /* no param */)
+IC_BUILTIN_PARAM(StoreICStrictTrampoline, StoreICTrampoline, /* no param */)
+
+IC_BUILTIN_PARAM(KeyedStoreIC, KeyedStoreIC, SLOPPY)
+IC_BUILTIN_PARAM(KeyedStoreICTrampoline, KeyedStoreICTrampoline, SLOPPY)
+IC_BUILTIN_PARAM(KeyedStoreICStrict, KeyedStoreIC, STRICT)
+IC_BUILTIN_PARAM(KeyedStoreICStrictTrampoline, KeyedStoreICTrampoline, STRICT)
+IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline,
+                 NOT_INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline,
+                 INSIDE_TYPEOF)
+IC_BUILTIN_PARAM(LoadICProtoArray, LoadICProtoArray, false)
+IC_BUILTIN_PARAM(LoadICProtoArrayThrowIfNonexistent, LoadICProtoArray, true)
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-internal.cc b/src/builtins/builtins-internal.cc
index bec6ff3..b1c737b 100644
--- a/src/builtins/builtins-internal.cc
+++ b/src/builtins/builtins-internal.cc
@@ -2,10 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/counters.h"
 #include "src/interface-descriptors.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -54,86 +57,260 @@
 // TurboFan support builtins.
 
 void Builtins::Generate_CopyFastSmiOrObjectElements(
-    CodeStubAssembler* assembler) {
+    compiler::CodeAssemblerState* state) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
   typedef CopyFastSmiOrObjectElementsDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* object = assembler->Parameter(Descriptor::kObject);
+  Node* object = assembler.Parameter(Descriptor::kObject);
 
   // Load the {object}s elements.
-  Node* source = assembler->LoadObjectField(object, JSObject::kElementsOffset);
+  Node* source = assembler.LoadObjectField(object, JSObject::kElementsOffset);
 
-  CodeStubAssembler::ParameterMode mode = assembler->OptimalParameterMode();
-  Node* length = assembler->UntagParameter(
-      assembler->LoadFixedArrayBaseLength(source), mode);
+  CodeStubAssembler::ParameterMode mode = assembler.OptimalParameterMode();
+  Node* length = assembler.TaggedToParameter(
+      assembler.LoadFixedArrayBaseLength(source), mode);
 
   // Check if we can allocate in new space.
   ElementsKind kind = FAST_ELEMENTS;
   int max_elements = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
-  Label if_newspace(assembler), if_oldspace(assembler);
-  assembler->Branch(
-      assembler->UintPtrLessThan(
-          length, assembler->IntPtrOrSmiConstant(max_elements, mode)),
+  Label if_newspace(&assembler), if_oldspace(&assembler);
+  assembler.Branch(
+      assembler.UintPtrOrSmiLessThan(
+          length, assembler.IntPtrOrSmiConstant(max_elements, mode), mode),
       &if_newspace, &if_oldspace);
 
+  assembler.Bind(&if_newspace);
+  {
+    Node* target = assembler.AllocateFixedArray(kind, length, mode);
+    assembler.CopyFixedArrayElements(kind, source, target, length,
+                                     SKIP_WRITE_BARRIER, mode);
+    assembler.StoreObjectField(object, JSObject::kElementsOffset, target);
+    assembler.Return(target);
+  }
+
+  assembler.Bind(&if_oldspace);
+  {
+    Node* target = assembler.AllocateFixedArray(kind, length, mode,
+                                                CodeStubAssembler::kPretenured);
+    assembler.CopyFixedArrayElements(kind, source, target, length,
+                                     UPDATE_WRITE_BARRIER, mode);
+    assembler.StoreObjectField(object, JSObject::kElementsOffset, target);
+    assembler.Return(target);
+  }
+}
+
+void Builtins::Generate_GrowFastDoubleElements(
+    compiler::CodeAssemblerState* state) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef GrowArrayElementsDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
+
+  Node* object = assembler.Parameter(Descriptor::kObject);
+  Node* key = assembler.Parameter(Descriptor::kKey);
+  Node* context = assembler.Parameter(Descriptor::kContext);
+
+  Label runtime(&assembler, CodeStubAssembler::Label::kDeferred);
+  Node* elements = assembler.LoadElements(object);
+  elements = assembler.TryGrowElementsCapacity(
+      object, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
+  assembler.Return(elements);
+
+  assembler.Bind(&runtime);
+  assembler.TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+}
+
+void Builtins::Generate_GrowFastSmiOrObjectElements(
+    compiler::CodeAssemblerState* state) {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef GrowArrayElementsDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
+
+  Node* object = assembler.Parameter(Descriptor::kObject);
+  Node* key = assembler.Parameter(Descriptor::kKey);
+  Node* context = assembler.Parameter(Descriptor::kContext);
+
+  Label runtime(&assembler, CodeStubAssembler::Label::kDeferred);
+  Node* elements = assembler.LoadElements(object);
+  elements = assembler.TryGrowElementsCapacity(object, elements, FAST_ELEMENTS,
+                                               key, &runtime);
+  assembler.Return(elements);
+
+  assembler.Bind(&runtime);
+  assembler.TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+}
+
+namespace {
+
+void Generate_NewArgumentsElements(CodeStubAssembler* assembler,
+                                   compiler::Node* frame,
+                                   compiler::Node* length) {
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+  typedef compiler::Node Node;
+
+  // Check if we can allocate in new space.
+  ElementsKind kind = FAST_ELEMENTS;
+  int max_elements = FixedArray::GetMaxLengthForNewSpaceAllocation(kind);
+  Label if_newspace(assembler), if_oldspace(assembler, Label::kDeferred);
+  assembler->Branch(assembler->IntPtrLessThan(
+                        length, assembler->IntPtrConstant(max_elements)),
+                    &if_newspace, &if_oldspace);
+
   assembler->Bind(&if_newspace);
   {
-    Node* target = assembler->AllocateFixedArray(kind, length, mode);
-    assembler->CopyFixedArrayElements(kind, source, target, length,
-                                      SKIP_WRITE_BARRIER, mode);
-    assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
-    assembler->Return(target);
+    // Prefer EmptyFixedArray in case of non-positive {length} (the {length}
+    // can be negative here for rest parameters).
+    Label if_empty(assembler), if_notempty(assembler);
+    assembler->Branch(
+        assembler->IntPtrLessThanOrEqual(length, assembler->IntPtrConstant(0)),
+        &if_empty, &if_notempty);
+
+    assembler->Bind(&if_empty);
+    assembler->Return(assembler->EmptyFixedArrayConstant());
+
+    assembler->Bind(&if_notempty);
+    {
+      // Allocate a FixedArray in new space.
+      Node* result = assembler->AllocateFixedArray(kind, length);
+
+      // Compute the effective {offset} into the {frame}.
+      Node* offset = assembler->IntPtrAdd(length, assembler->IntPtrConstant(1));
+
+      // Copy the parameters from {frame} (starting at {offset}) to {result}.
+      Variable var_index(assembler, MachineType::PointerRepresentation());
+      Label loop(assembler, &var_index), done_loop(assembler);
+      var_index.Bind(assembler->IntPtrConstant(0));
+      assembler->Goto(&loop);
+      assembler->Bind(&loop);
+      {
+        // Load the current {index}.
+        Node* index = var_index.value();
+
+        // Check if we are done.
+        assembler->GotoIf(assembler->WordEqual(index, length), &done_loop);
+
+        // Load the parameter at the given {index}.
+        Node* value = assembler->Load(
+            MachineType::AnyTagged(), frame,
+            assembler->WordShl(assembler->IntPtrSub(offset, index),
+                               assembler->IntPtrConstant(kPointerSizeLog2)));
+
+        // Store the {value} into the {result}.
+        assembler->StoreFixedArrayElement(result, index, value,
+                                          SKIP_WRITE_BARRIER);
+
+        // Continue with next {index}.
+        var_index.Bind(
+            assembler->IntPtrAdd(index, assembler->IntPtrConstant(1)));
+        assembler->Goto(&loop);
+      }
+
+      assembler->Bind(&done_loop);
+      assembler->Return(result);
+    }
   }
 
   assembler->Bind(&if_oldspace);
   {
-    Node* target = assembler->AllocateFixedArray(
-        kind, length, mode, CodeStubAssembler::kPretenured);
-    assembler->CopyFixedArrayElements(kind, source, target, length,
-                                      UPDATE_WRITE_BARRIER, mode);
-    assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
-    assembler->Return(target);
+    // Allocate in old space (or large object space).
+    assembler->TailCallRuntime(
+        Runtime::kNewArgumentsElements, assembler->NoContextConstant(),
+        assembler->BitcastWordToTagged(frame), assembler->SmiFromWord(length));
   }
 }
 
-void Builtins::Generate_GrowFastDoubleElements(CodeStubAssembler* assembler) {
+}  // namespace
+
+void Builtins::Generate_NewUnmappedArgumentsElements(
+    compiler::CodeAssemblerState* state) {
   typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
   typedef compiler::Node Node;
-  typedef GrowArrayElementsDescriptor Descriptor;
+  typedef NewArgumentsElementsDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* object = assembler->Parameter(Descriptor::kObject);
-  Node* key = assembler->Parameter(Descriptor::kKey);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* formal_parameter_count =
+      assembler.Parameter(Descriptor::kFormalParameterCount);
 
-  Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
-  Node* elements = assembler->LoadElements(object);
-  elements = assembler->TryGrowElementsCapacity(
-      object, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
-  assembler->Return(elements);
+  // Determine the frame that holds the parameters.
+  Label done(&assembler);
+  Variable var_frame(&assembler, MachineType::PointerRepresentation()),
+      var_length(&assembler, MachineType::PointerRepresentation());
+  var_frame.Bind(assembler.LoadParentFramePointer());
+  var_length.Bind(formal_parameter_count);
+  Node* parent_frame = assembler.Load(
+      MachineType::Pointer(), var_frame.value(),
+      assembler.IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
+  Node* parent_frame_type =
+      assembler.Load(MachineType::AnyTagged(), parent_frame,
+                     assembler.IntPtrConstant(
+                         CommonFrameConstants::kContextOrFrameTypeOffset));
+  assembler.GotoIfNot(assembler.MarkerIsFrameType(
+                          parent_frame_type, StackFrame::ARGUMENTS_ADAPTOR),
+                      &done);
+  {
+    // Determine the length from the ArgumentsAdaptorFrame.
+    Node* length = assembler.LoadAndUntagSmi(
+        parent_frame, ArgumentsAdaptorFrameConstants::kLengthOffset);
 
-  assembler->Bind(&runtime);
-  assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+    // Take the arguments from the ArgumentsAdaptorFrame.
+    var_frame.Bind(parent_frame);
+    var_length.Bind(length);
+  }
+  assembler.Goto(&done);
+
+  // Allocate the actual FixedArray for the elements.
+  assembler.Bind(&done);
+  Generate_NewArgumentsElements(&assembler, var_frame.value(),
+                                var_length.value());
 }
 
-void Builtins::Generate_GrowFastSmiOrObjectElements(
-    CodeStubAssembler* assembler) {
+void Builtins::Generate_NewRestParameterElements(
+    compiler::CodeAssemblerState* state) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
-  typedef GrowArrayElementsDescriptor Descriptor;
+  typedef NewArgumentsElementsDescriptor Descriptor;
+  CodeStubAssembler assembler(state);
 
-  Node* object = assembler->Parameter(Descriptor::kObject);
-  Node* key = assembler->Parameter(Descriptor::kKey);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* formal_parameter_count =
+      assembler.Parameter(Descriptor::kFormalParameterCount);
 
-  Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
-  Node* elements = assembler->LoadElements(object);
-  elements = assembler->TryGrowElementsCapacity(object, elements, FAST_ELEMENTS,
-                                                key, &runtime);
-  assembler->Return(elements);
+  // Check if we have an ArgumentsAdaptorFrame, as we will only have rest
+  // parameters in that case.
+  Label if_empty(&assembler);
+  Node* frame = assembler.Load(
+      MachineType::Pointer(), assembler.LoadParentFramePointer(),
+      assembler.IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
+  Node* frame_type =
+      assembler.Load(MachineType::AnyTagged(), frame,
+                     assembler.IntPtrConstant(
+                         CommonFrameConstants::kContextOrFrameTypeOffset));
+  assembler.GotoIfNot(
+      assembler.MarkerIsFrameType(frame_type, StackFrame::ARGUMENTS_ADAPTOR),
+      &if_empty);
 
-  assembler->Bind(&runtime);
-  assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+  // Determine the length from the ArgumentsAdaptorFrame.
+  Node* frame_length = assembler.LoadAndUntagSmi(
+      frame, ArgumentsAdaptorFrameConstants::kLengthOffset);
+
+  // Compute the actual rest parameter length (may be negative).
+  Node* length = assembler.IntPtrSub(frame_length, formal_parameter_count);
+
+  // Allocate the actual FixedArray for the elements.
+  Generate_NewArgumentsElements(&assembler, frame, length);
+
+  // No rest parameters, return an empty FixedArray.
+  assembler.Bind(&if_empty);
+  assembler.Return(assembler.EmptyFixedArrayConstant());
+}
+
+void Builtins::Generate_ReturnReceiver(compiler::CodeAssemblerState* state) {
+  CodeStubAssembler assembler(state);
+  assembler.Return(assembler.Parameter(0));
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-interpreter.cc b/src/builtins/builtins-interpreter.cc
index 1609184..3cfa57b 100644
--- a/src/builtins/builtins-interpreter.cc
+++ b/src/builtins/builtins-interpreter.cc
@@ -2,24 +2,28 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
-Handle<Code> Builtins::InterpreterPushArgsAndCall(TailCallMode tail_call_mode,
-                                                  CallableType function_type) {
-  switch (tail_call_mode) {
-    case TailCallMode::kDisallow:
-      if (function_type == CallableType::kJSFunction) {
+Handle<Code> Builtins::InterpreterPushArgsAndCall(
+    TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
+  switch (mode) {
+    case InterpreterPushArgsMode::kJSFunction:
+      if (tail_call_mode == TailCallMode::kDisallow) {
         return InterpreterPushArgsAndCallFunction();
       } else {
-        return InterpreterPushArgsAndCall();
-      }
-    case TailCallMode::kAllow:
-      if (function_type == CallableType::kJSFunction) {
         return InterpreterPushArgsAndTailCallFunction();
+      }
+    case InterpreterPushArgsMode::kWithFinalSpread:
+      CHECK(tail_call_mode == TailCallMode::kDisallow);
+      return InterpreterPushArgsAndCallWithFinalSpread();
+    case InterpreterPushArgsMode::kOther:
+      if (tail_call_mode == TailCallMode::kDisallow) {
+        return InterpreterPushArgsAndCall();
       } else {
         return InterpreterPushArgsAndTailCall();
       }
@@ -29,33 +33,41 @@
 }
 
 void Builtins::Generate_InterpreterPushArgsAndCall(MacroAssembler* masm) {
-  return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kDisallow,
-                                                 CallableType::kAny);
+  return Generate_InterpreterPushArgsAndCallImpl(
+      masm, TailCallMode::kDisallow, InterpreterPushArgsMode::kOther);
 }
 
 void Builtins::Generate_InterpreterPushArgsAndCallFunction(
     MacroAssembler* masm) {
-  return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kDisallow,
-                                                 CallableType::kJSFunction);
+  return Generate_InterpreterPushArgsAndCallImpl(
+      masm, TailCallMode::kDisallow, InterpreterPushArgsMode::kJSFunction);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndCallWithFinalSpread(
+    MacroAssembler* masm) {
+  return Generate_InterpreterPushArgsAndCallImpl(
+      masm, TailCallMode::kDisallow, InterpreterPushArgsMode::kWithFinalSpread);
 }
 
 void Builtins::Generate_InterpreterPushArgsAndTailCall(MacroAssembler* masm) {
-  return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow,
-                                                 CallableType::kAny);
+  return Generate_InterpreterPushArgsAndCallImpl(
+      masm, TailCallMode::kAllow, InterpreterPushArgsMode::kOther);
 }
 
 void Builtins::Generate_InterpreterPushArgsAndTailCallFunction(
     MacroAssembler* masm) {
-  return Generate_InterpreterPushArgsAndCallImpl(masm, TailCallMode::kAllow,
-                                                 CallableType::kJSFunction);
+  return Generate_InterpreterPushArgsAndCallImpl(
+      masm, TailCallMode::kAllow, InterpreterPushArgsMode::kJSFunction);
 }
 
 Handle<Code> Builtins::InterpreterPushArgsAndConstruct(
-    CallableType function_type) {
-  switch (function_type) {
-    case CallableType::kJSFunction:
+    InterpreterPushArgsMode mode) {
+  switch (mode) {
+    case InterpreterPushArgsMode::kJSFunction:
       return InterpreterPushArgsAndConstructFunction();
-    case CallableType::kAny:
+    case InterpreterPushArgsMode::kWithFinalSpread:
+      return InterpreterPushArgsAndConstructWithFinalSpread();
+    case InterpreterPushArgsMode::kOther:
       return InterpreterPushArgsAndConstruct();
   }
   UNREACHABLE();
@@ -63,13 +75,20 @@
 }
 
 void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
-  return Generate_InterpreterPushArgsAndConstructImpl(masm, CallableType::kAny);
+  return Generate_InterpreterPushArgsAndConstructImpl(
+      masm, InterpreterPushArgsMode::kOther);
+}
+
+void Builtins::Generate_InterpreterPushArgsAndConstructWithFinalSpread(
+    MacroAssembler* masm) {
+  return Generate_InterpreterPushArgsAndConstructImpl(
+      masm, InterpreterPushArgsMode::kWithFinalSpread);
 }
 
 void Builtins::Generate_InterpreterPushArgsAndConstructFunction(
     MacroAssembler* masm) {
   return Generate_InterpreterPushArgsAndConstructImpl(
-      masm, CallableType::kJSFunction);
+      masm, InterpreterPushArgsMode::kJSFunction);
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-iterator.cc b/src/builtins/builtins-iterator.cc
deleted file mode 100644
index 7f74c20..0000000
--- a/src/builtins/builtins-iterator.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils.h"
-#include "src/builtins/builtins.h"
-#include "src/frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void Builtins::Generate_IteratorPrototypeIterator(
-    CodeStubAssembler* assembler) {
-  assembler->Return(assembler->Parameter(0));
-}
-
-BUILTIN(ModuleNamespaceIterator) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
-  Handle<Object> receiver = args.at<Object>(0);
-
-  if (!receiver->IsJSModuleNamespace()) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
-                              isolate->factory()->iterator_symbol(), receiver));
-  }
-  auto ns = Handle<JSModuleNamespace>::cast(receiver);
-
-  Handle<FixedArray> names =
-      KeyAccumulator::GetKeys(ns, KeyCollectionMode::kOwnOnly, SKIP_SYMBOLS)
-          .ToHandleChecked();
-  return *isolate->factory()->NewJSFixedArrayIterator(names);
-}
-
-BUILTIN(FixedArrayIteratorNext) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
-  Handle<Object> receiver = args.at<Object>(0);
-
-  // It is an error if this function is called on anything other than the
-  // particular iterator object for which the function was created.
-  if (!receiver->IsJSFixedArrayIterator() ||
-      Handle<JSFixedArrayIterator>::cast(receiver)->initial_next() !=
-          *args.target()) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
-                              isolate->factory()->next_string(), receiver));
-  }
-
-  auto iterator = Handle<JSFixedArrayIterator>::cast(receiver);
-  Handle<Object> value;
-  bool done;
-
-  int index = iterator->index();
-  if (index < iterator->array()->length()) {
-    value = handle(iterator->array()->get(index), isolate);
-    done = false;
-    iterator->set_index(index + 1);
-  } else {
-    value = isolate->factory()->undefined_value();
-    done = true;
-  }
-
-  return *isolate->factory()->NewJSIteratorResult(value, done);
-}
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/builtins/builtins-json.cc b/src/builtins/builtins-json.cc
index 4a8c7c5..7bc6ab0 100644
--- a/src/builtins/builtins-json.cc
+++ b/src/builtins/builtins-json.cc
@@ -5,8 +5,10 @@
 #include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
 
+#include "src/counters.h"
 #include "src/json-parser.h"
 #include "src/json-stringifier.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/builtins/builtins-math.cc b/src/builtins/builtins-math.cc
index 30f12ba..f524913 100644
--- a/src/builtins/builtins-math.cc
+++ b/src/builtins/builtins-math.cc
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -13,332 +15,300 @@
 // -----------------------------------------------------------------------------
 // ES6 section 20.2.2 Function Properties of the Math Object
 
-// ES6 section - 20.2.2.1 Math.abs ( x )
-void Builtins::Generate_MathAbs(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
+class MathBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  explicit MathBuiltinsAssembler(compiler::CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
 
-  Node* context = assembler->Parameter(4);
+ protected:
+  void MathRoundingOperation(Node* (CodeStubAssembler::*float64op)(Node*));
+  void MathUnaryOperation(Node* (CodeStubAssembler::*float64op)(Node*));
+};
+
+// ES6 section - 20.2.2.1 Math.abs ( x )
+TF_BUILTIN(MathAbs, CodeStubAssembler) {
+  Node* context = Parameter(4);
 
   // We might need to loop once for ToNumber conversion.
-  Variable var_x(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_x);
-  var_x.Bind(assembler->Parameter(1));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Variable var_x(this, MachineRepresentation::kTagged);
+  Label loop(this, &var_x);
+  var_x.Bind(Parameter(1));
+  Goto(&loop);
+  Bind(&loop);
   {
     // Load the current {x} value.
     Node* x = var_x.value();
 
     // Check if {x} is a Smi or a HeapObject.
-    Label if_xissmi(assembler), if_xisnotsmi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
+    Label if_xissmi(this), if_xisnotsmi(this);
+    Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
 
-    assembler->Bind(&if_xissmi);
+    Bind(&if_xissmi);
     {
       // Check if {x} is already positive.
-      Label if_xispositive(assembler), if_xisnotpositive(assembler);
-      assembler->BranchIfSmiLessThanOrEqual(
-          assembler->SmiConstant(Smi::FromInt(0)), x, &if_xispositive,
-          &if_xisnotpositive);
+      Label if_xispositive(this), if_xisnotpositive(this);
+      BranchIfSmiLessThanOrEqual(SmiConstant(Smi::FromInt(0)), x,
+                                 &if_xispositive, &if_xisnotpositive);
 
-      assembler->Bind(&if_xispositive);
+      Bind(&if_xispositive);
       {
         // Just return the input {x}.
-        assembler->Return(x);
+        Return(x);
       }
 
-      assembler->Bind(&if_xisnotpositive);
+      Bind(&if_xisnotpositive);
       {
         // Try to negate the {x} value.
-        Node* pair = assembler->IntPtrSubWithOverflow(
-            assembler->IntPtrConstant(0), assembler->BitcastTaggedToWord(x));
-        Node* overflow = assembler->Projection(1, pair);
-        Label if_overflow(assembler, Label::kDeferred),
-            if_notoverflow(assembler);
-        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+        Node* pair =
+            IntPtrSubWithOverflow(IntPtrConstant(0), BitcastTaggedToWord(x));
+        Node* overflow = Projection(1, pair);
+        Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+        Branch(overflow, &if_overflow, &if_notoverflow);
 
-        assembler->Bind(&if_notoverflow);
+        Bind(&if_notoverflow);
         {
           // There is a Smi representation for negated {x}.
-          Node* result = assembler->Projection(0, pair);
-          result = assembler->BitcastWordToTagged(result);
-          assembler->Return(result);
+          Node* result = Projection(0, pair);
+          Return(BitcastWordToTagged(result));
         }
 
-        assembler->Bind(&if_overflow);
-        {
-          Node* result = assembler->NumberConstant(0.0 - Smi::kMinValue);
-          assembler->Return(result);
-        }
+        Bind(&if_overflow);
+        { Return(NumberConstant(0.0 - Smi::kMinValue)); }
       }
     }
 
-    assembler->Bind(&if_xisnotsmi);
+    Bind(&if_xisnotsmi);
     {
       // Check if {x} is a HeapNumber.
-      Label if_xisheapnumber(assembler),
-          if_xisnotheapnumber(assembler, Label::kDeferred);
-      assembler->Branch(
-          assembler->WordEqual(assembler->LoadMap(x),
-                               assembler->HeapNumberMapConstant()),
-          &if_xisheapnumber, &if_xisnotheapnumber);
+      Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
+      Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
+             &if_xisnotheapnumber);
 
-      assembler->Bind(&if_xisheapnumber);
+      Bind(&if_xisheapnumber);
       {
-        Node* x_value = assembler->LoadHeapNumberValue(x);
-        Node* value = assembler->Float64Abs(x_value);
-        Node* result = assembler->AllocateHeapNumberWithValue(value);
-        assembler->Return(result);
+        Node* x_value = LoadHeapNumberValue(x);
+        Node* value = Float64Abs(x_value);
+        Node* result = AllocateHeapNumberWithValue(value);
+        Return(result);
       }
 
-      assembler->Bind(&if_xisnotheapnumber);
+      Bind(&if_xisnotheapnumber);
       {
         // Need to convert {x} to a Number first.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_x.Bind(assembler->CallStub(callable, context, x));
-        assembler->Goto(&loop);
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_x.Bind(CallStub(callable, context, x));
+        Goto(&loop);
       }
     }
   }
 }
 
-namespace {
-
-void Generate_MathRoundingOperation(
-    CodeStubAssembler* assembler,
-    compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* context = assembler->Parameter(4);
+void MathBuiltinsAssembler::MathRoundingOperation(
+    Node* (CodeStubAssembler::*float64op)(Node*)) {
+  Node* context = Parameter(4);
 
   // We might need to loop once for ToNumber conversion.
-  Variable var_x(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_x);
-  var_x.Bind(assembler->Parameter(1));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Variable var_x(this, MachineRepresentation::kTagged);
+  Label loop(this, &var_x);
+  var_x.Bind(Parameter(1));
+  Goto(&loop);
+  Bind(&loop);
   {
     // Load the current {x} value.
     Node* x = var_x.value();
 
     // Check if {x} is a Smi or a HeapObject.
-    Label if_xissmi(assembler), if_xisnotsmi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
+    Label if_xissmi(this), if_xisnotsmi(this);
+    Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
 
-    assembler->Bind(&if_xissmi);
+    Bind(&if_xissmi);
     {
       // Nothing to do when {x} is a Smi.
-      assembler->Return(x);
+      Return(x);
     }
 
-    assembler->Bind(&if_xisnotsmi);
+    Bind(&if_xisnotsmi);
     {
       // Check if {x} is a HeapNumber.
-      Label if_xisheapnumber(assembler),
-          if_xisnotheapnumber(assembler, Label::kDeferred);
-      assembler->Branch(
-          assembler->WordEqual(assembler->LoadMap(x),
-                               assembler->HeapNumberMapConstant()),
-          &if_xisheapnumber, &if_xisnotheapnumber);
+      Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
+      Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
+             &if_xisnotheapnumber);
 
-      assembler->Bind(&if_xisheapnumber);
+      Bind(&if_xisheapnumber);
       {
-        Node* x_value = assembler->LoadHeapNumberValue(x);
-        Node* value = (assembler->*float64op)(x_value);
-        Node* result = assembler->ChangeFloat64ToTagged(value);
-        assembler->Return(result);
+        Node* x_value = LoadHeapNumberValue(x);
+        Node* value = (this->*float64op)(x_value);
+        Node* result = ChangeFloat64ToTagged(value);
+        Return(result);
       }
 
-      assembler->Bind(&if_xisnotheapnumber);
+      Bind(&if_xisnotheapnumber);
       {
         // Need to convert {x} to a Number first.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_x.Bind(assembler->CallStub(callable, context, x));
-        assembler->Goto(&loop);
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_x.Bind(CallStub(callable, context, x));
+        Goto(&loop);
       }
     }
   }
 }
 
-void Generate_MathUnaryOperation(
-    CodeStubAssembler* assembler,
-    compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
-  typedef compiler::Node Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = (assembler->*float64op)(x_value);
-  Node* result = assembler->AllocateHeapNumberWithValue(value);
-  assembler->Return(result);
+void MathBuiltinsAssembler::MathUnaryOperation(
+    Node* (CodeStubAssembler::*float64op)(Node*)) {
+  Node* x = Parameter(1);
+  Node* context = Parameter(4);
+  Node* x_value = TruncateTaggedToFloat64(context, x);
+  Node* value = (this->*float64op)(x_value);
+  Node* result = AllocateHeapNumberWithValue(value);
+  Return(result);
 }
 
-}  // namespace
-
 // ES6 section 20.2.2.2 Math.acos ( x )
-void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acos);
+TF_BUILTIN(MathAcos, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Acos);
 }
 
 // ES6 section 20.2.2.3 Math.acosh ( x )
-void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acosh);
+TF_BUILTIN(MathAcosh, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Acosh);
 }
 
 // ES6 section 20.2.2.4 Math.asin ( x )
-void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asin);
+TF_BUILTIN(MathAsin, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Asin);
 }
 
 // ES6 section 20.2.2.5 Math.asinh ( x )
-void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asinh);
+TF_BUILTIN(MathAsinh, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Asinh);
 }
-
 // ES6 section 20.2.2.6 Math.atan ( x )
-void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atan);
+TF_BUILTIN(MathAtan, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Atan);
 }
 
 // ES6 section 20.2.2.7 Math.atanh ( x )
-void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atanh);
+TF_BUILTIN(MathAtanh, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Atanh);
 }
 
 // ES6 section 20.2.2.8 Math.atan2 ( y, x )
-void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
-  using compiler::Node;
+TF_BUILTIN(MathAtan2, CodeStubAssembler) {
+  Node* y = Parameter(1);
+  Node* x = Parameter(2);
+  Node* context = Parameter(5);
 
-  Node* y = assembler->Parameter(1);
-  Node* x = assembler->Parameter(2);
-  Node* context = assembler->Parameter(5);
-  Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value = assembler->Float64Atan2(y_value, x_value);
-  Node* result = assembler->AllocateHeapNumberWithValue(value);
-  assembler->Return(result);
+  Node* y_value = TruncateTaggedToFloat64(context, y);
+  Node* x_value = TruncateTaggedToFloat64(context, x);
+  Node* value = Float64Atan2(y_value, x_value);
+  Node* result = AllocateHeapNumberWithValue(value);
+  Return(result);
 }
 
 // ES6 section 20.2.2.10 Math.ceil ( x )
-void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
-  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
+TF_BUILTIN(MathCeil, MathBuiltinsAssembler) {
+  MathRoundingOperation(&CodeStubAssembler::Float64Ceil);
 }
 
 // ES6 section 20.2.2.9 Math.cbrt ( x )
-void Builtins::Generate_MathCbrt(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cbrt);
+TF_BUILTIN(MathCbrt, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Cbrt);
 }
 
 // ES6 section 20.2.2.11 Math.clz32 ( x )
-void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* context = assembler->Parameter(4);
+TF_BUILTIN(MathClz32, CodeStubAssembler) {
+  Node* context = Parameter(4);
 
   // Shared entry point for the clz32 operation.
-  Variable var_clz32_x(assembler, MachineRepresentation::kWord32);
-  Label do_clz32(assembler);
+  Variable var_clz32_x(this, MachineRepresentation::kWord32);
+  Label do_clz32(this);
 
   // We might need to loop once for ToNumber conversion.
-  Variable var_x(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_x);
-  var_x.Bind(assembler->Parameter(1));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Variable var_x(this, MachineRepresentation::kTagged);
+  Label loop(this, &var_x);
+  var_x.Bind(Parameter(1));
+  Goto(&loop);
+  Bind(&loop);
   {
     // Load the current {x} value.
     Node* x = var_x.value();
 
     // Check if {x} is a Smi or a HeapObject.
-    Label if_xissmi(assembler), if_xisnotsmi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
+    Label if_xissmi(this), if_xisnotsmi(this);
+    Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
 
-    assembler->Bind(&if_xissmi);
+    Bind(&if_xissmi);
     {
-      var_clz32_x.Bind(assembler->SmiToWord32(x));
-      assembler->Goto(&do_clz32);
+      var_clz32_x.Bind(SmiToWord32(x));
+      Goto(&do_clz32);
     }
 
-    assembler->Bind(&if_xisnotsmi);
+    Bind(&if_xisnotsmi);
     {
       // Check if {x} is a HeapNumber.
-      Label if_xisheapnumber(assembler),
-          if_xisnotheapnumber(assembler, Label::kDeferred);
-      assembler->Branch(
-          assembler->WordEqual(assembler->LoadMap(x),
-                               assembler->HeapNumberMapConstant()),
-          &if_xisheapnumber, &if_xisnotheapnumber);
+      Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
+      Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
+             &if_xisnotheapnumber);
 
-      assembler->Bind(&if_xisheapnumber);
+      Bind(&if_xisheapnumber);
       {
-        var_clz32_x.Bind(assembler->TruncateHeapNumberValueToWord32(x));
-        assembler->Goto(&do_clz32);
+        var_clz32_x.Bind(TruncateHeapNumberValueToWord32(x));
+        Goto(&do_clz32);
       }
 
-      assembler->Bind(&if_xisnotheapnumber);
+      Bind(&if_xisnotheapnumber);
       {
         // Need to convert {x} to a Number first.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_x.Bind(assembler->CallStub(callable, context, x));
-        assembler->Goto(&loop);
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_x.Bind(CallStub(callable, context, x));
+        Goto(&loop);
       }
     }
   }
 
-  assembler->Bind(&do_clz32);
+  Bind(&do_clz32);
   {
     Node* x_value = var_clz32_x.value();
-    Node* value = assembler->Word32Clz(x_value);
-    Node* result = assembler->ChangeInt32ToTagged(value);
-    assembler->Return(result);
+    Node* value = Word32Clz(x_value);
+    Node* result = ChangeInt32ToTagged(value);
+    Return(result);
   }
 }
 
 // ES6 section 20.2.2.12 Math.cos ( x )
-void Builtins::Generate_MathCos(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cos);
+TF_BUILTIN(MathCos, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Cos);
 }
 
 // ES6 section 20.2.2.13 Math.cosh ( x )
-void Builtins::Generate_MathCosh(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cosh);
+TF_BUILTIN(MathCosh, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Cosh);
 }
 
 // ES6 section 20.2.2.14 Math.exp ( x )
-void Builtins::Generate_MathExp(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Exp);
+TF_BUILTIN(MathExp, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Exp);
 }
 
 // ES6 section 20.2.2.15 Math.expm1 ( x )
-void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Expm1);
+TF_BUILTIN(MathExpm1, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Expm1);
 }
 
 // ES6 section 20.2.2.16 Math.floor ( x )
-void Builtins::Generate_MathFloor(CodeStubAssembler* assembler) {
-  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Floor);
+TF_BUILTIN(MathFloor, MathBuiltinsAssembler) {
+  MathRoundingOperation(&CodeStubAssembler::Float64Floor);
 }
 
 // ES6 section 20.2.2.17 Math.fround ( x )
-void Builtins::Generate_MathFround(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* value32 = assembler->TruncateFloat64ToFloat32(x_value);
-  Node* value = assembler->ChangeFloat32ToFloat64(value32);
-  Node* result = assembler->AllocateHeapNumberWithValue(value);
-  assembler->Return(result);
+TF_BUILTIN(MathFround, CodeStubAssembler) {
+  Node* x = Parameter(1);
+  Node* context = Parameter(4);
+  Node* x_value = TruncateTaggedToFloat64(context, x);
+  Node* value32 = TruncateFloat64ToFloat32(x_value);
+  Node* value = ChangeFloat32ToFloat64(value32);
+  Node* result = AllocateHeapNumberWithValue(value);
+  Return(result);
 }
 
 // ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values )
@@ -351,7 +321,7 @@
   bool one_arg_is_nan = false;
   List<double> abs_values(length);
   for (int i = 0; i < length; i++) {
-    Handle<Object> x = args.at<Object>(i + 1);
+    Handle<Object> x = args.at(i + 1);
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
     double abs_value = std::abs(x->Number());
 
@@ -394,153 +364,134 @@
 }
 
 // ES6 section 20.2.2.19 Math.imul ( x, y )
-void Builtins::Generate_MathImul(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* y = assembler->Parameter(2);
-  Node* context = assembler->Parameter(5);
-  Node* x_value = assembler->TruncateTaggedToWord32(context, x);
-  Node* y_value = assembler->TruncateTaggedToWord32(context, y);
-  Node* value = assembler->Int32Mul(x_value, y_value);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  assembler->Return(result);
+TF_BUILTIN(MathImul, CodeStubAssembler) {
+  Node* x = Parameter(1);
+  Node* y = Parameter(2);
+  Node* context = Parameter(5);
+  Node* x_value = TruncateTaggedToWord32(context, x);
+  Node* y_value = TruncateTaggedToWord32(context, y);
+  Node* value = Int32Mul(x_value, y_value);
+  Node* result = ChangeInt32ToTagged(value);
+  Return(result);
 }
 
 // ES6 section 20.2.2.20 Math.log ( x )
-void Builtins::Generate_MathLog(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log);
+TF_BUILTIN(MathLog, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Log);
 }
 
 // ES6 section 20.2.2.21 Math.log1p ( x )
-void Builtins::Generate_MathLog1p(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log1p);
+TF_BUILTIN(MathLog1p, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Log1p);
 }
 
 // ES6 section 20.2.2.22 Math.log10 ( x )
-void Builtins::Generate_MathLog10(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log10);
+TF_BUILTIN(MathLog10, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Log10);
 }
 
 // ES6 section 20.2.2.23 Math.log2 ( x )
-void Builtins::Generate_MathLog2(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log2);
+TF_BUILTIN(MathLog2, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Log2);
 }
 
 // ES6 section 20.2.2.26 Math.pow ( x, y )
-void Builtins::Generate_MathPow(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* x = assembler->Parameter(1);
-  Node* y = assembler->Parameter(2);
-  Node* context = assembler->Parameter(5);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
-  Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
-  Node* value = assembler->Float64Pow(x_value, y_value);
-  Node* result = assembler->ChangeFloat64ToTagged(value);
-  assembler->Return(result);
+TF_BUILTIN(MathPow, CodeStubAssembler) {
+  Node* x = Parameter(1);
+  Node* y = Parameter(2);
+  Node* context = Parameter(5);
+  Node* x_value = TruncateTaggedToFloat64(context, x);
+  Node* y_value = TruncateTaggedToFloat64(context, y);
+  Node* value = Float64Pow(x_value, y_value);
+  Node* result = ChangeFloat64ToTagged(value);
+  Return(result);
 }
 
 // ES6 section 20.2.2.27 Math.random ( )
-void Builtins::Generate_MathRandom(CodeStubAssembler* assembler) {
-  using compiler::Node;
-
-  Node* context = assembler->Parameter(3);
-  Node* native_context = assembler->LoadNativeContext(context);
+TF_BUILTIN(MathRandom, CodeStubAssembler) {
+  Node* context = Parameter(3);
+  Node* native_context = LoadNativeContext(context);
 
   // Load cache index.
-  CodeStubAssembler::Variable smi_index(assembler,
-                                        MachineRepresentation::kTagged);
-  smi_index.Bind(assembler->LoadContextElement(
-      native_context, Context::MATH_RANDOM_INDEX_INDEX));
+  Variable smi_index(this, MachineRepresentation::kTagged);
+  smi_index.Bind(
+      LoadContextElement(native_context, Context::MATH_RANDOM_INDEX_INDEX));
 
   // Cached random numbers are exhausted if index is 0. Go to slow path.
-  CodeStubAssembler::Label if_cached(assembler);
-  assembler->GotoIf(assembler->SmiAbove(smi_index.value(),
-                                        assembler->SmiConstant(Smi::kZero)),
-                    &if_cached);
+  Label if_cached(this);
+  GotoIf(SmiAbove(smi_index.value(), SmiConstant(Smi::kZero)), &if_cached);
 
   // Cache exhausted, populate the cache. Return value is the new index.
-  smi_index.Bind(
-      assembler->CallRuntime(Runtime::kGenerateRandomNumbers, context));
-  assembler->Goto(&if_cached);
+  smi_index.Bind(CallRuntime(Runtime::kGenerateRandomNumbers, context));
+  Goto(&if_cached);
 
   // Compute next index by decrement.
-  assembler->Bind(&if_cached);
-  Node* new_smi_index = assembler->SmiSub(
-      smi_index.value(), assembler->SmiConstant(Smi::FromInt(1)));
-  assembler->StoreContextElement(
-      native_context, Context::MATH_RANDOM_INDEX_INDEX, new_smi_index);
+  Bind(&if_cached);
+  Node* new_smi_index = SmiSub(smi_index.value(), SmiConstant(Smi::FromInt(1)));
+  StoreContextElement(native_context, Context::MATH_RANDOM_INDEX_INDEX,
+                      new_smi_index);
 
   // Load and return next cached random number.
-  Node* array = assembler->LoadContextElement(native_context,
-                                              Context::MATH_RANDOM_CACHE_INDEX);
-  Node* random = assembler->LoadFixedDoubleArrayElement(
-      array, new_smi_index, MachineType::Float64(), 0,
-      CodeStubAssembler::SMI_PARAMETERS);
-  assembler->Return(assembler->AllocateHeapNumberWithValue(random));
+  Node* array =
+      LoadContextElement(native_context, Context::MATH_RANDOM_CACHE_INDEX);
+  Node* random = LoadFixedDoubleArrayElement(
+      array, new_smi_index, MachineType::Float64(), 0, SMI_PARAMETERS);
+  Return(AllocateHeapNumberWithValue(random));
 }
 
 // ES6 section 20.2.2.28 Math.round ( x )
-void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
-  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
+TF_BUILTIN(MathRound, MathBuiltinsAssembler) {
+  MathRoundingOperation(&CodeStubAssembler::Float64Round);
 }
 
 // ES6 section 20.2.2.29 Math.sign ( x )
-void Builtins::Generate_MathSign(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  using compiler::Node;
-
+TF_BUILTIN(MathSign, CodeStubAssembler) {
   // Convert the {x} value to a Number.
-  Node* x = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-  Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+  Node* x = Parameter(1);
+  Node* context = Parameter(4);
+  Node* x_value = TruncateTaggedToFloat64(context, x);
 
   // Return -1 if {x} is negative, 1 if {x} is positive, or {x} itself.
-  Label if_xisnegative(assembler), if_xispositive(assembler);
-  assembler->GotoIf(
-      assembler->Float64LessThan(x_value, assembler->Float64Constant(0.0)),
-      &if_xisnegative);
-  assembler->GotoIf(
-      assembler->Float64LessThan(assembler->Float64Constant(0.0), x_value),
-      &if_xispositive);
-  assembler->Return(assembler->ChangeFloat64ToTagged(x_value));
+  Label if_xisnegative(this), if_xispositive(this);
+  GotoIf(Float64LessThan(x_value, Float64Constant(0.0)), &if_xisnegative);
+  GotoIf(Float64LessThan(Float64Constant(0.0), x_value), &if_xispositive);
+  Return(ChangeFloat64ToTagged(x_value));
 
-  assembler->Bind(&if_xisnegative);
-  assembler->Return(assembler->SmiConstant(Smi::FromInt(-1)));
+  Bind(&if_xisnegative);
+  Return(SmiConstant(Smi::FromInt(-1)));
 
-  assembler->Bind(&if_xispositive);
-  assembler->Return(assembler->SmiConstant(Smi::FromInt(1)));
+  Bind(&if_xispositive);
+  Return(SmiConstant(Smi::FromInt(1)));
 }
 
 // ES6 section 20.2.2.30 Math.sin ( x )
-void Builtins::Generate_MathSin(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sin);
+TF_BUILTIN(MathSin, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Sin);
 }
 
 // ES6 section 20.2.2.31 Math.sinh ( x )
-void Builtins::Generate_MathSinh(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sinh);
+TF_BUILTIN(MathSinh, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Sinh);
 }
 
 // ES6 section 20.2.2.32 Math.sqrt ( x )
-void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sqrt);
+TF_BUILTIN(MathSqrt, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Sqrt);
 }
 
 // ES6 section 20.2.2.33 Math.tan ( x )
-void Builtins::Generate_MathTan(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tan);
+TF_BUILTIN(MathTan, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Tan);
 }
 
 // ES6 section 20.2.2.34 Math.tanh ( x )
-void Builtins::Generate_MathTanh(CodeStubAssembler* assembler) {
-  Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tanh);
+TF_BUILTIN(MathTanh, MathBuiltinsAssembler) {
+  MathUnaryOperation(&CodeStubAssembler::Float64Tanh);
 }
 
 // ES6 section 20.2.2.35 Math.trunc ( x )
-void Builtins::Generate_MathTrunc(CodeStubAssembler* assembler) {
-  Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Trunc);
+TF_BUILTIN(MathTrunc, MathBuiltinsAssembler) {
+  MathRoundingOperation(&CodeStubAssembler::Float64Trunc);
 }
 
 void Builtins::Generate_MathMax(MacroAssembler* masm) {
diff --git a/src/builtins/builtins-number.cc b/src/builtins/builtins-number.cc
index 3e2bc55..90f54ef 100644
--- a/src/builtins/builtins-number.cc
+++ b/src/builtins/builtins-number.cc
@@ -5,253 +5,254 @@
 #include "src/builtins/builtins-utils.h"
 #include "src/builtins/builtins.h"
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
+class NumberBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  explicit NumberBuiltinsAssembler(compiler::CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+
+ protected:
+  template <Signedness signed_result = kSigned>
+  void BitwiseOp(std::function<Node*(Node* lhs, Node* rhs)> body) {
+    Node* left = Parameter(0);
+    Node* right = Parameter(1);
+    Node* context = Parameter(2);
+
+    Node* lhs_value = TruncateTaggedToWord32(context, left);
+    Node* rhs_value = TruncateTaggedToWord32(context, right);
+    Node* value = body(lhs_value, rhs_value);
+    Node* result = signed_result == kSigned ? ChangeInt32ToTagged(value)
+                                            : ChangeUint32ToTagged(value);
+    Return(result);
+  }
+
+  template <Signedness signed_result = kSigned>
+  void BitwiseShiftOp(std::function<Node*(Node* lhs, Node* shift_count)> body) {
+    BitwiseOp<signed_result>([this, body](Node* lhs, Node* rhs) {
+      Node* shift_count = Word32And(rhs, Int32Constant(0x1f));
+      return body(lhs, shift_count);
+    });
+  }
+
+  void RelationalComparisonBuiltin(RelationalComparisonMode mode) {
+    Node* lhs = Parameter(0);
+    Node* rhs = Parameter(1);
+    Node* context = Parameter(2);
+
+    Return(RelationalComparison(mode, lhs, rhs, context));
+  }
+};
+
 // -----------------------------------------------------------------------------
 // ES6 section 20.1 Number Objects
 
 // ES6 section 20.1.2.2 Number.isFinite ( number )
-void Builtins::Generate_NumberIsFinite(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
+  Node* number = Parameter(1);
 
-  Node* number = assembler->Parameter(1);
-
-  Label return_true(assembler), return_false(assembler);
+  Label return_true(this), return_false(this);
 
   // Check if {number} is a Smi.
-  assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
+  GotoIf(TaggedIsSmi(number), &return_true);
 
   // Check if {number} is a HeapNumber.
-  assembler->GotoUnless(
-      assembler->WordEqual(assembler->LoadMap(number),
-                           assembler->HeapNumberMapConstant()),
-      &return_false);
+  GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
 
   // Check if {number} contains a finite, non-NaN value.
-  Node* number_value = assembler->LoadHeapNumberValue(number);
-  assembler->BranchIfFloat64IsNaN(
-      assembler->Float64Sub(number_value, number_value), &return_false,
-      &return_true);
+  Node* number_value = LoadHeapNumberValue(number);
+  BranchIfFloat64IsNaN(Float64Sub(number_value, number_value), &return_false,
+                       &return_true);
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  Bind(&return_true);
+  Return(BooleanConstant(true));
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  Bind(&return_false);
+  Return(BooleanConstant(false));
 }
 
 // ES6 section 20.1.2.3 Number.isInteger ( number )
-void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
+  Node* number = Parameter(1);
 
-  Node* number = assembler->Parameter(1);
-
-  Label return_true(assembler), return_false(assembler);
+  Label return_true(this), return_false(this);
 
   // Check if {number} is a Smi.
-  assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
+  GotoIf(TaggedIsSmi(number), &return_true);
 
   // Check if {number} is a HeapNumber.
-  assembler->GotoUnless(
-      assembler->WordEqual(assembler->LoadMap(number),
-                           assembler->HeapNumberMapConstant()),
-      &return_false);
+  GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
 
   // Load the actual value of {number}.
-  Node* number_value = assembler->LoadHeapNumberValue(number);
+  Node* number_value = LoadHeapNumberValue(number);
 
   // Truncate the value of {number} to an integer (or an infinity).
-  Node* integer = assembler->Float64Trunc(number_value);
+  Node* integer = Float64Trunc(number_value);
 
   // Check if {number}s value matches the integer (ruling out the infinities).
-  assembler->Branch(
-      assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
-                              assembler->Float64Constant(0.0)),
-      &return_true, &return_false);
+  Branch(Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
+         &return_true, &return_false);
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  Bind(&return_true);
+  Return(BooleanConstant(true));
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  Bind(&return_false);
+  Return(BooleanConstant(false));
 }
 
 // ES6 section 20.1.2.4 Number.isNaN ( number )
-void Builtins::Generate_NumberIsNaN(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
+  Node* number = Parameter(1);
 
-  Node* number = assembler->Parameter(1);
-
-  Label return_true(assembler), return_false(assembler);
+  Label return_true(this), return_false(this);
 
   // Check if {number} is a Smi.
-  assembler->GotoIf(assembler->TaggedIsSmi(number), &return_false);
+  GotoIf(TaggedIsSmi(number), &return_false);
 
   // Check if {number} is a HeapNumber.
-  assembler->GotoUnless(
-      assembler->WordEqual(assembler->LoadMap(number),
-                           assembler->HeapNumberMapConstant()),
-      &return_false);
+  GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
 
   // Check if {number} contains a NaN value.
-  Node* number_value = assembler->LoadHeapNumberValue(number);
-  assembler->BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
+  Node* number_value = LoadHeapNumberValue(number);
+  BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  Bind(&return_true);
+  Return(BooleanConstant(true));
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  Bind(&return_false);
+  Return(BooleanConstant(false));
 }
 
 // ES6 section 20.1.2.5 Number.isSafeInteger ( number )
-void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
+  Node* number = Parameter(1);
 
-  Node* number = assembler->Parameter(1);
-
-  Label return_true(assembler), return_false(assembler);
+  Label return_true(this), return_false(this);
 
   // Check if {number} is a Smi.
-  assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
+  GotoIf(TaggedIsSmi(number), &return_true);
 
   // Check if {number} is a HeapNumber.
-  assembler->GotoUnless(
-      assembler->WordEqual(assembler->LoadMap(number),
-                           assembler->HeapNumberMapConstant()),
-      &return_false);
+  GotoIfNot(IsHeapNumberMap(LoadMap(number)), &return_false);
 
   // Load the actual value of {number}.
-  Node* number_value = assembler->LoadHeapNumberValue(number);
+  Node* number_value = LoadHeapNumberValue(number);
 
   // Truncate the value of {number} to an integer (or an infinity).
-  Node* integer = assembler->Float64Trunc(number_value);
+  Node* integer = Float64Trunc(number_value);
 
   // Check if {number}s value matches the integer (ruling out the infinities).
-  assembler->GotoUnless(
-      assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
-                              assembler->Float64Constant(0.0)),
+  GotoIfNot(
+      Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
       &return_false);
 
   // Check if the {integer} value is in safe integer range.
-  assembler->Branch(assembler->Float64LessThanOrEqual(
-                        assembler->Float64Abs(integer),
-                        assembler->Float64Constant(kMaxSafeInteger)),
-                    &return_true, &return_false);
+  Branch(Float64LessThanOrEqual(Float64Abs(integer),
+                                Float64Constant(kMaxSafeInteger)),
+         &return_true, &return_false);
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  Bind(&return_true);
+  Return(BooleanConstant(true));
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
+  Bind(&return_false);
+  Return(BooleanConstant(false));
 }
 
 // ES6 section 20.1.2.12 Number.parseFloat ( string )
-void Builtins::Generate_NumberParseFloat(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* context = assembler->Parameter(4);
+TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
+  Node* context = Parameter(4);
 
   // We might need to loop once for ToString conversion.
-  Variable var_input(assembler, MachineRepresentation::kTagged);
-  Label loop(assembler, &var_input);
-  var_input.Bind(assembler->Parameter(1));
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Variable var_input(this, MachineRepresentation::kTagged);
+  Label loop(this, &var_input);
+  var_input.Bind(Parameter(1));
+  Goto(&loop);
+  Bind(&loop);
   {
     // Load the current {input} value.
     Node* input = var_input.value();
 
     // Check if the {input} is a HeapObject or a Smi.
-    Label if_inputissmi(assembler), if_inputisnotsmi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(input), &if_inputissmi,
-                      &if_inputisnotsmi);
+    Label if_inputissmi(this), if_inputisnotsmi(this);
+    Branch(TaggedIsSmi(input), &if_inputissmi, &if_inputisnotsmi);
 
-    assembler->Bind(&if_inputissmi);
+    Bind(&if_inputissmi);
     {
       // The {input} is already a Number, no need to do anything.
-      assembler->Return(input);
+      Return(input);
     }
 
-    assembler->Bind(&if_inputisnotsmi);
+    Bind(&if_inputisnotsmi);
     {
       // The {input} is a HeapObject, check if it's already a String.
-      Label if_inputisstring(assembler), if_inputisnotstring(assembler);
-      Node* input_map = assembler->LoadMap(input);
-      Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
-      assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
-                        &if_inputisstring, &if_inputisnotstring);
+      Label if_inputisstring(this), if_inputisnotstring(this);
+      Node* input_map = LoadMap(input);
+      Node* input_instance_type = LoadMapInstanceType(input_map);
+      Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
+             &if_inputisnotstring);
 
-      assembler->Bind(&if_inputisstring);
+      Bind(&if_inputisstring);
       {
         // The {input} is already a String, check if {input} contains
         // a cached array index.
-        Label if_inputcached(assembler), if_inputnotcached(assembler);
-        Node* input_hash = assembler->LoadNameHashField(input);
-        Node* input_bit = assembler->Word32And(
-            input_hash,
-            assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
-        assembler->Branch(
-            assembler->Word32Equal(input_bit, assembler->Int32Constant(0)),
-            &if_inputcached, &if_inputnotcached);
+        Label if_inputcached(this), if_inputnotcached(this);
+        Node* input_hash = LoadNameHashField(input);
+        Node* input_bit = Word32And(
+            input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
+        Branch(Word32Equal(input_bit, Int32Constant(0)), &if_inputcached,
+               &if_inputnotcached);
 
-        assembler->Bind(&if_inputcached);
+        Bind(&if_inputcached);
         {
           // Just return the {input}s cached array index.
           Node* input_array_index =
-              assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
-                  input_hash);
-          assembler->Return(assembler->SmiTag(input_array_index));
+              DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
+          Return(SmiTag(input_array_index));
         }
 
-        assembler->Bind(&if_inputnotcached);
+        Bind(&if_inputnotcached);
         {
           // Need to fall back to the runtime to convert {input} to double.
-          assembler->Return(assembler->CallRuntime(Runtime::kStringParseFloat,
-                                                   context, input));
+          Return(CallRuntime(Runtime::kStringParseFloat, context, input));
         }
       }
 
-      assembler->Bind(&if_inputisnotstring);
+      Bind(&if_inputisnotstring);
       {
         // The {input} is neither a String nor a Smi, check for HeapNumber.
-        Label if_inputisnumber(assembler),
-            if_inputisnotnumber(assembler, Label::kDeferred);
-        assembler->Branch(
-            assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
-            &if_inputisnumber, &if_inputisnotnumber);
+        Label if_inputisnumber(this),
+            if_inputisnotnumber(this, Label::kDeferred);
+        Branch(IsHeapNumberMap(input_map), &if_inputisnumber,
+               &if_inputisnotnumber);
 
-        assembler->Bind(&if_inputisnumber);
+        Bind(&if_inputisnumber);
         {
           // The {input} is already a Number, take care of -0.
-          Label if_inputiszero(assembler), if_inputisnotzero(assembler);
-          Node* input_value = assembler->LoadHeapNumberValue(input);
-          assembler->Branch(assembler->Float64Equal(
-                                input_value, assembler->Float64Constant(0.0)),
-                            &if_inputiszero, &if_inputisnotzero);
+          Label if_inputiszero(this), if_inputisnotzero(this);
+          Node* input_value = LoadHeapNumberValue(input);
+          Branch(Float64Equal(input_value, Float64Constant(0.0)),
+                 &if_inputiszero, &if_inputisnotzero);
 
-          assembler->Bind(&if_inputiszero);
-          assembler->Return(assembler->SmiConstant(0));
+          Bind(&if_inputiszero);
+          Return(SmiConstant(0));
 
-          assembler->Bind(&if_inputisnotzero);
-          assembler->Return(input);
+          Bind(&if_inputisnotzero);
+          Return(input);
         }
 
-        assembler->Bind(&if_inputisnotnumber);
+        Bind(&if_inputisnotnumber);
         {
           // Need to convert the {input} to String first.
           // TODO(bmeurer): This could be more efficient if necessary.
-          Callable callable = CodeFactory::ToString(assembler->isolate());
-          var_input.Bind(assembler->CallStub(callable, context, input));
-          assembler->Goto(&loop);
+          Callable callable = CodeFactory::ToString(isolate());
+          var_input.Bind(CallStub(callable, context, input));
+          Goto(&loop);
         }
       }
     }
@@ -259,106 +260,86 @@
 }
 
 // ES6 section 20.1.2.13 Number.parseInt ( string, radix )
-void Builtins::Generate_NumberParseInt(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-
-  Node* input = assembler->Parameter(1);
-  Node* radix = assembler->Parameter(2);
-  Node* context = assembler->Parameter(5);
+TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
+  Node* input = Parameter(1);
+  Node* radix = Parameter(2);
+  Node* context = Parameter(5);
 
   // Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
-  Label if_radix10(assembler), if_generic(assembler, Label::kDeferred);
-  assembler->GotoIf(assembler->WordEqual(radix, assembler->UndefinedConstant()),
-                    &if_radix10);
-  assembler->GotoIf(
-      assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(10))),
-      &if_radix10);
-  assembler->GotoIf(
-      assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(0))),
-      &if_radix10);
-  assembler->Goto(&if_generic);
+  Label if_radix10(this), if_generic(this, Label::kDeferred);
+  GotoIf(WordEqual(radix, UndefinedConstant()), &if_radix10);
+  GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(10))), &if_radix10);
+  GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(0))), &if_radix10);
+  Goto(&if_generic);
 
-  assembler->Bind(&if_radix10);
+  Bind(&if_radix10);
   {
     // Check if we can avoid the ToString conversion on {input}.
-    Label if_inputissmi(assembler), if_inputisheapnumber(assembler),
-        if_inputisstring(assembler);
-    assembler->GotoIf(assembler->TaggedIsSmi(input), &if_inputissmi);
-    Node* input_map = assembler->LoadMap(input);
-    assembler->GotoIf(
-        assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
-        &if_inputisheapnumber);
-    Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
-    assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
-                      &if_inputisstring, &if_generic);
+    Label if_inputissmi(this), if_inputisheapnumber(this),
+        if_inputisstring(this);
+    GotoIf(TaggedIsSmi(input), &if_inputissmi);
+    Node* input_map = LoadMap(input);
+    GotoIf(IsHeapNumberMap(input_map), &if_inputisheapnumber);
+    Node* input_instance_type = LoadMapInstanceType(input_map);
+    Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
+           &if_generic);
 
-    assembler->Bind(&if_inputissmi);
+    Bind(&if_inputissmi);
     {
       // Just return the {input}.
-      assembler->Return(input);
+      Return(input);
     }
 
-    assembler->Bind(&if_inputisheapnumber);
+    Bind(&if_inputisheapnumber);
     {
       // Check if the {input} value is in Signed32 range.
-      Label if_inputissigned32(assembler);
-      Node* input_value = assembler->LoadHeapNumberValue(input);
-      Node* input_value32 = assembler->TruncateFloat64ToWord32(input_value);
-      assembler->GotoIf(
-          assembler->Float64Equal(
-              input_value, assembler->ChangeInt32ToFloat64(input_value32)),
-          &if_inputissigned32);
+      Label if_inputissigned32(this);
+      Node* input_value = LoadHeapNumberValue(input);
+      Node* input_value32 = TruncateFloat64ToWord32(input_value);
+      GotoIf(Float64Equal(input_value, ChangeInt32ToFloat64(input_value32)),
+             &if_inputissigned32);
 
       // Check if the absolute {input} value is in the ]0.01,1e9[ range.
-      Node* input_value_abs = assembler->Float64Abs(input_value);
+      Node* input_value_abs = Float64Abs(input_value);
 
-      assembler->GotoUnless(
-          assembler->Float64LessThan(input_value_abs,
-                                     assembler->Float64Constant(1e9)),
-          &if_generic);
-      assembler->Branch(assembler->Float64LessThan(
-                            assembler->Float64Constant(0.01), input_value_abs),
-                        &if_inputissigned32, &if_generic);
+      GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1e9)),
+                &if_generic);
+      Branch(Float64LessThan(Float64Constant(0.01), input_value_abs),
+             &if_inputissigned32, &if_generic);
 
       // Return the truncated int32 value, and return the tagged result.
-      assembler->Bind(&if_inputissigned32);
-      Node* result = assembler->ChangeInt32ToTagged(input_value32);
-      assembler->Return(result);
+      Bind(&if_inputissigned32);
+      Node* result = ChangeInt32ToTagged(input_value32);
+      Return(result);
     }
 
-    assembler->Bind(&if_inputisstring);
+    Bind(&if_inputisstring);
     {
       // Check if the String {input} has a cached array index.
-      Node* input_hash = assembler->LoadNameHashField(input);
-      Node* input_bit = assembler->Word32And(
-          input_hash,
-          assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
-      assembler->GotoIf(
-          assembler->Word32NotEqual(input_bit, assembler->Int32Constant(0)),
-          &if_generic);
+      Node* input_hash = LoadNameHashField(input);
+      Node* input_bit = Word32And(
+          input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
+      GotoIf(Word32NotEqual(input_bit, Int32Constant(0)), &if_generic);
 
       // Return the cached array index as result.
       Node* input_index =
-          assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
-              input_hash);
-      Node* result = assembler->SmiTag(input_index);
-      assembler->Return(result);
+          DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
+      Node* result = SmiTag(input_index);
+      Return(result);
     }
   }
 
-  assembler->Bind(&if_generic);
+  Bind(&if_generic);
   {
-    Node* result =
-        assembler->CallRuntime(Runtime::kStringParseInt, context, input, radix);
-    assembler->Return(result);
+    Node* result = CallRuntime(Runtime::kStringParseInt, context, input, radix);
+    Return(result);
   }
 }
 
 // ES6 section 20.1.3.2 Number.prototype.toExponential ( fractionDigits )
 BUILTIN(NumberPrototypeToExponential) {
   HandleScope scope(isolate);
-  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> value = args.at(0);
   Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
 
   // Unwrap the receiver {value}.
@@ -401,7 +382,7 @@
 // ES6 section 20.1.3.3 Number.prototype.toFixed ( fractionDigits )
 BUILTIN(NumberPrototypeToFixed) {
   HandleScope scope(isolate);
-  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> value = args.at(0);
   Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
 
   // Unwrap the receiver {value}.
@@ -444,7 +425,7 @@
 // ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] )
 BUILTIN(NumberPrototypeToLocaleString) {
   HandleScope scope(isolate);
-  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> value = args.at(0);
 
   // Unwrap the receiver {value}.
   if (value->IsJSValue()) {
@@ -464,7 +445,7 @@
 // ES6 section 20.1.3.5 Number.prototype.toPrecision ( precision )
 BUILTIN(NumberPrototypeToPrecision) {
   HandleScope scope(isolate);
-  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> value = args.at(0);
   Handle<Object> precision = args.atOrUndefined(isolate, 1);
 
   // Unwrap the receiver {value}.
@@ -508,7 +489,7 @@
 // ES6 section 20.1.3.6 Number.prototype.toString ( [ radix ] )
 BUILTIN(NumberPrototypeToString) {
   HandleScope scope(isolate);
-  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> value = args.at(0);
   Handle<Object> radix = args.atOrUndefined(isolate, 1);
 
   // Unwrap the receiver {value}.
@@ -543,7 +524,8 @@
   }
 
   // Fast case where the result is a one character string.
-  if (IsUint32Double(value_number) && value_number < radix_number) {
+  if ((IsUint32Double(value_number) && value_number < radix_number) ||
+      value_number == -0.0) {
     // Character array used for conversion.
     static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
     return *isolate->factory()->LookupSingleCharacterStringFromCode(
@@ -564,342 +546,315 @@
 }
 
 // ES6 section 20.1.3.7 Number.prototype.valueOf ( )
-void Builtins::Generate_NumberPrototypeValueOf(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) {
+  Node* receiver = Parameter(0);
+  Node* context = Parameter(3);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
-
-  Node* result = assembler->ToThisValue(
-      context, receiver, PrimitiveType::kNumber, "Number.prototype.valueOf");
-  assembler->Return(result);
+  Node* result = ToThisValue(context, receiver, PrimitiveType::kNumber,
+                             "Number.prototype.valueOf");
+  Return(result);
 }
 
-// static
-void Builtins::Generate_Add(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* left = assembler->Parameter(0);
-  Node* right = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
+TF_BUILTIN(Add, CodeStubAssembler) {
+  Node* left = Parameter(0);
+  Node* right = Parameter(1);
+  Node* context = Parameter(2);
 
   // Shared entry for floating point addition.
-  Label do_fadd(assembler);
-  Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
-      var_fadd_rhs(assembler, MachineRepresentation::kFloat64);
+  Label do_fadd(this);
+  Variable var_fadd_lhs(this, MachineRepresentation::kFloat64),
+      var_fadd_rhs(this, MachineRepresentation::kFloat64);
 
   // We might need to loop several times due to ToPrimitive, ToString and/or
   // ToNumber conversions.
-  Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged),
-      var_result(assembler, MachineRepresentation::kTagged);
+  Variable var_lhs(this, MachineRepresentation::kTagged),
+      var_rhs(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kTagged);
   Variable* loop_vars[2] = {&var_lhs, &var_rhs};
-  Label loop(assembler, 2, loop_vars), end(assembler),
-      string_add_convert_left(assembler, Label::kDeferred),
-      string_add_convert_right(assembler, Label::kDeferred);
+  Label loop(this, 2, loop_vars), end(this),
+      string_add_convert_left(this, Label::kDeferred),
+      string_add_convert_right(this, Label::kDeferred);
   var_lhs.Bind(left);
   var_rhs.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Goto(&loop);
+  Bind(&loop);
   {
     // Load the current {lhs} and {rhs} values.
     Node* lhs = var_lhs.value();
     Node* rhs = var_rhs.value();
 
     // Check if the {lhs} is a Smi or a HeapObject.
-    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
-                      &if_lhsisnotsmi);
+    Label if_lhsissmi(this), if_lhsisnotsmi(this);
+    Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
 
-    assembler->Bind(&if_lhsissmi);
+    Bind(&if_lhsissmi);
     {
       // Check if the {rhs} is also a Smi.
-      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-      assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
-                        &if_rhsisnotsmi);
+      Label if_rhsissmi(this), if_rhsisnotsmi(this);
+      Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
 
-      assembler->Bind(&if_rhsissmi);
+      Bind(&if_rhsissmi);
       {
         // Try fast Smi addition first.
-        Node* pair = assembler->IntPtrAddWithOverflow(
-            assembler->BitcastTaggedToWord(lhs),
-            assembler->BitcastTaggedToWord(rhs));
-        Node* overflow = assembler->Projection(1, pair);
+        Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(lhs),
+                                           BitcastTaggedToWord(rhs));
+        Node* overflow = Projection(1, pair);
 
         // Check if the Smi additon overflowed.
-        Label if_overflow(assembler), if_notoverflow(assembler);
-        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+        Label if_overflow(this), if_notoverflow(this);
+        Branch(overflow, &if_overflow, &if_notoverflow);
 
-        assembler->Bind(&if_overflow);
+        Bind(&if_overflow);
         {
-          var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
-          var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
-          assembler->Goto(&do_fadd);
+          var_fadd_lhs.Bind(SmiToFloat64(lhs));
+          var_fadd_rhs.Bind(SmiToFloat64(rhs));
+          Goto(&do_fadd);
         }
 
-        assembler->Bind(&if_notoverflow);
-        var_result.Bind(assembler->BitcastWordToTaggedSigned(
-            assembler->Projection(0, pair)));
-        assembler->Goto(&end);
+        Bind(&if_notoverflow);
+        var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+        Goto(&end);
       }
 
-      assembler->Bind(&if_rhsisnotsmi);
+      Bind(&if_rhsisnotsmi);
       {
         // Load the map of {rhs}.
-        Node* rhs_map = assembler->LoadMap(rhs);
+        Node* rhs_map = LoadMap(rhs);
 
         // Check if the {rhs} is a HeapNumber.
-        Label if_rhsisnumber(assembler),
-            if_rhsisnotnumber(assembler, Label::kDeferred);
-        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
-                          &if_rhsisnotnumber);
+        Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+        Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
 
-        assembler->Bind(&if_rhsisnumber);
+        Bind(&if_rhsisnumber);
         {
-          var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
-          var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-          assembler->Goto(&do_fadd);
+          var_fadd_lhs.Bind(SmiToFloat64(lhs));
+          var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+          Goto(&do_fadd);
         }
 
-        assembler->Bind(&if_rhsisnotnumber);
+        Bind(&if_rhsisnotnumber);
         {
           // Load the instance type of {rhs}.
-          Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+          Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
 
           // Check if the {rhs} is a String.
-          Label if_rhsisstring(assembler, Label::kDeferred),
-              if_rhsisnotstring(assembler, Label::kDeferred);
-          assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
-                            &if_rhsisstring, &if_rhsisnotstring);
+          Label if_rhsisstring(this, Label::kDeferred),
+              if_rhsisnotstring(this, Label::kDeferred);
+          Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+                 &if_rhsisnotstring);
 
-          assembler->Bind(&if_rhsisstring);
+          Bind(&if_rhsisstring);
           {
             var_lhs.Bind(lhs);
             var_rhs.Bind(rhs);
-            assembler->Goto(&string_add_convert_left);
+            Goto(&string_add_convert_left);
           }
 
-          assembler->Bind(&if_rhsisnotstring);
+          Bind(&if_rhsisnotstring);
           {
             // Check if {rhs} is a JSReceiver.
-            Label if_rhsisreceiver(assembler, Label::kDeferred),
-                if_rhsisnotreceiver(assembler, Label::kDeferred);
-            assembler->Branch(
-                assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                &if_rhsisreceiver, &if_rhsisnotreceiver);
+            Label if_rhsisreceiver(this, Label::kDeferred),
+                if_rhsisnotreceiver(this, Label::kDeferred);
+            Branch(IsJSReceiverInstanceType(rhs_instance_type),
+                   &if_rhsisreceiver, &if_rhsisnotreceiver);
 
-            assembler->Bind(&if_rhsisreceiver);
+            Bind(&if_rhsisreceiver);
             {
               // Convert {rhs} to a primitive first passing no hint.
               Callable callable =
-                  CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
-              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-              assembler->Goto(&loop);
+                  CodeFactory::NonPrimitiveToPrimitive(isolate());
+              var_rhs.Bind(CallStub(callable, context, rhs));
+              Goto(&loop);
             }
 
-            assembler->Bind(&if_rhsisnotreceiver);
+            Bind(&if_rhsisnotreceiver);
             {
               // Convert {rhs} to a Number first.
-              Callable callable =
-                  CodeFactory::NonNumberToNumber(assembler->isolate());
-              var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-              assembler->Goto(&loop);
+              Callable callable = CodeFactory::NonNumberToNumber(isolate());
+              var_rhs.Bind(CallStub(callable, context, rhs));
+              Goto(&loop);
             }
           }
         }
       }
     }
 
-    assembler->Bind(&if_lhsisnotsmi);
+    Bind(&if_lhsisnotsmi);
     {
       // Load the map and instance type of {lhs}.
-      Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+      Node* lhs_instance_type = LoadInstanceType(lhs);
 
       // Check if {lhs} is a String.
-      Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
-      assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
-                        &if_lhsisstring, &if_lhsisnotstring);
+      Label if_lhsisstring(this), if_lhsisnotstring(this);
+      Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+             &if_lhsisnotstring);
 
-      assembler->Bind(&if_lhsisstring);
+      Bind(&if_lhsisstring);
       {
         var_lhs.Bind(lhs);
         var_rhs.Bind(rhs);
-        assembler->Goto(&string_add_convert_right);
+        Goto(&string_add_convert_right);
       }
 
-      assembler->Bind(&if_lhsisnotstring);
+      Bind(&if_lhsisnotstring);
       {
         // Check if {rhs} is a Smi.
-        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-        assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
-                          &if_rhsisnotsmi);
+        Label if_rhsissmi(this), if_rhsisnotsmi(this);
+        Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
 
-        assembler->Bind(&if_rhsissmi);
+        Bind(&if_rhsissmi);
         {
           // Check if {lhs} is a Number.
-          Label if_lhsisnumber(assembler),
-              if_lhsisnotnumber(assembler, Label::kDeferred);
-          assembler->Branch(assembler->Word32Equal(
-                                lhs_instance_type,
-                                assembler->Int32Constant(HEAP_NUMBER_TYPE)),
-                            &if_lhsisnumber, &if_lhsisnotnumber);
+          Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
+          Branch(
+              Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+              &if_lhsisnumber, &if_lhsisnotnumber);
 
-          assembler->Bind(&if_lhsisnumber);
+          Bind(&if_lhsisnumber);
           {
             // The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
-            var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-            var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
-            assembler->Goto(&do_fadd);
+            var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
+            var_fadd_rhs.Bind(SmiToFloat64(rhs));
+            Goto(&do_fadd);
           }
 
-          assembler->Bind(&if_lhsisnotnumber);
+          Bind(&if_lhsisnotnumber);
           {
             // The {lhs} is neither a Number nor a String, and the {rhs} is a
             // Smi.
-            Label if_lhsisreceiver(assembler, Label::kDeferred),
-                if_lhsisnotreceiver(assembler, Label::kDeferred);
-            assembler->Branch(
-                assembler->IsJSReceiverInstanceType(lhs_instance_type),
-                &if_lhsisreceiver, &if_lhsisnotreceiver);
+            Label if_lhsisreceiver(this, Label::kDeferred),
+                if_lhsisnotreceiver(this, Label::kDeferred);
+            Branch(IsJSReceiverInstanceType(lhs_instance_type),
+                   &if_lhsisreceiver, &if_lhsisnotreceiver);
 
-            assembler->Bind(&if_lhsisreceiver);
+            Bind(&if_lhsisreceiver);
             {
               // Convert {lhs} to a primitive first passing no hint.
               Callable callable =
-                  CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
-              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-              assembler->Goto(&loop);
+                  CodeFactory::NonPrimitiveToPrimitive(isolate());
+              var_lhs.Bind(CallStub(callable, context, lhs));
+              Goto(&loop);
             }
 
-            assembler->Bind(&if_lhsisnotreceiver);
+            Bind(&if_lhsisnotreceiver);
             {
               // Convert {lhs} to a Number first.
-              Callable callable =
-                  CodeFactory::NonNumberToNumber(assembler->isolate());
-              var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-              assembler->Goto(&loop);
+              Callable callable = CodeFactory::NonNumberToNumber(isolate());
+              var_lhs.Bind(CallStub(callable, context, lhs));
+              Goto(&loop);
             }
           }
         }
 
-        assembler->Bind(&if_rhsisnotsmi);
+        Bind(&if_rhsisnotsmi);
         {
           // Load the instance type of {rhs}.
-          Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+          Node* rhs_instance_type = LoadInstanceType(rhs);
 
           // Check if {rhs} is a String.
-          Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
-          assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
-                            &if_rhsisstring, &if_rhsisnotstring);
+          Label if_rhsisstring(this), if_rhsisnotstring(this);
+          Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+                 &if_rhsisnotstring);
 
-          assembler->Bind(&if_rhsisstring);
+          Bind(&if_rhsisstring);
           {
             var_lhs.Bind(lhs);
             var_rhs.Bind(rhs);
-            assembler->Goto(&string_add_convert_left);
+            Goto(&string_add_convert_left);
           }
 
-          assembler->Bind(&if_rhsisnotstring);
+          Bind(&if_rhsisnotstring);
           {
             // Check if {lhs} is a HeapNumber.
-            Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
-            assembler->Branch(assembler->Word32Equal(
-                                  lhs_instance_type,
-                                  assembler->Int32Constant(HEAP_NUMBER_TYPE)),
-                              &if_lhsisnumber, &if_lhsisnotnumber);
+            Label if_lhsisnumber(this), if_lhsisnotnumber(this);
+            Branch(
+                Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+                &if_lhsisnumber, &if_lhsisnotnumber);
 
-            assembler->Bind(&if_lhsisnumber);
+            Bind(&if_lhsisnumber);
             {
               // Check if {rhs} is also a HeapNumber.
-              Label if_rhsisnumber(assembler),
-                  if_rhsisnotnumber(assembler, Label::kDeferred);
-              assembler->Branch(assembler->Word32Equal(
-                                    rhs_instance_type,
-                                    assembler->Int32Constant(HEAP_NUMBER_TYPE)),
-                                &if_rhsisnumber, &if_rhsisnotnumber);
+              Label if_rhsisnumber(this),
+                  if_rhsisnotnumber(this, Label::kDeferred);
+              Branch(Word32Equal(rhs_instance_type,
+                                 Int32Constant(HEAP_NUMBER_TYPE)),
+                     &if_rhsisnumber, &if_rhsisnotnumber);
 
-              assembler->Bind(&if_rhsisnumber);
+              Bind(&if_rhsisnumber);
               {
                 // Perform a floating point addition.
-                var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-                var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-                assembler->Goto(&do_fadd);
+                var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
+                var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+                Goto(&do_fadd);
               }
 
-              assembler->Bind(&if_rhsisnotnumber);
+              Bind(&if_rhsisnotnumber);
               {
                 // Check if {rhs} is a JSReceiver.
-                Label if_rhsisreceiver(assembler, Label::kDeferred),
-                    if_rhsisnotreceiver(assembler, Label::kDeferred);
-                assembler->Branch(
-                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                    &if_rhsisreceiver, &if_rhsisnotreceiver);
+                Label if_rhsisreceiver(this, Label::kDeferred),
+                    if_rhsisnotreceiver(this, Label::kDeferred);
+                Branch(IsJSReceiverInstanceType(rhs_instance_type),
+                       &if_rhsisreceiver, &if_rhsisnotreceiver);
 
-                assembler->Bind(&if_rhsisreceiver);
+                Bind(&if_rhsisreceiver);
                 {
                   // Convert {rhs} to a primitive first passing no hint.
-                  Callable callable = CodeFactory::NonPrimitiveToPrimitive(
-                      assembler->isolate());
-                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                  assembler->Goto(&loop);
+                  Callable callable =
+                      CodeFactory::NonPrimitiveToPrimitive(isolate());
+                  var_rhs.Bind(CallStub(callable, context, rhs));
+                  Goto(&loop);
                 }
 
-                assembler->Bind(&if_rhsisnotreceiver);
+                Bind(&if_rhsisnotreceiver);
                 {
                   // Convert {rhs} to a Number first.
-                  Callable callable =
-                      CodeFactory::NonNumberToNumber(assembler->isolate());
-                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                  assembler->Goto(&loop);
+                  Callable callable = CodeFactory::NonNumberToNumber(isolate());
+                  var_rhs.Bind(CallStub(callable, context, rhs));
+                  Goto(&loop);
                 }
               }
             }
 
-            assembler->Bind(&if_lhsisnotnumber);
+            Bind(&if_lhsisnotnumber);
             {
               // Check if {lhs} is a JSReceiver.
-              Label if_lhsisreceiver(assembler, Label::kDeferred),
-                  if_lhsisnotreceiver(assembler);
-              assembler->Branch(
-                  assembler->IsJSReceiverInstanceType(lhs_instance_type),
-                  &if_lhsisreceiver, &if_lhsisnotreceiver);
+              Label if_lhsisreceiver(this, Label::kDeferred),
+                  if_lhsisnotreceiver(this);
+              Branch(IsJSReceiverInstanceType(lhs_instance_type),
+                     &if_lhsisreceiver, &if_lhsisnotreceiver);
 
-              assembler->Bind(&if_lhsisreceiver);
+              Bind(&if_lhsisreceiver);
               {
                 // Convert {lhs} to a primitive first passing no hint.
                 Callable callable =
-                    CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
-                var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-                assembler->Goto(&loop);
+                    CodeFactory::NonPrimitiveToPrimitive(isolate());
+                var_lhs.Bind(CallStub(callable, context, lhs));
+                Goto(&loop);
               }
 
-              assembler->Bind(&if_lhsisnotreceiver);
+              Bind(&if_lhsisnotreceiver);
               {
                 // Check if {rhs} is a JSReceiver.
-                Label if_rhsisreceiver(assembler, Label::kDeferred),
-                    if_rhsisnotreceiver(assembler, Label::kDeferred);
-                assembler->Branch(
-                    assembler->IsJSReceiverInstanceType(rhs_instance_type),
-                    &if_rhsisreceiver, &if_rhsisnotreceiver);
+                Label if_rhsisreceiver(this, Label::kDeferred),
+                    if_rhsisnotreceiver(this, Label::kDeferred);
+                Branch(IsJSReceiverInstanceType(rhs_instance_type),
+                       &if_rhsisreceiver, &if_rhsisnotreceiver);
 
-                assembler->Bind(&if_rhsisreceiver);
+                Bind(&if_rhsisreceiver);
                 {
                   // Convert {rhs} to a primitive first passing no hint.
-                  Callable callable = CodeFactory::NonPrimitiveToPrimitive(
-                      assembler->isolate());
-                  var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-                  assembler->Goto(&loop);
+                  Callable callable =
+                      CodeFactory::NonPrimitiveToPrimitive(isolate());
+                  var_rhs.Bind(CallStub(callable, context, rhs));
+                  Goto(&loop);
                 }
 
-                assembler->Bind(&if_rhsisnotreceiver);
+                Bind(&if_rhsisnotreceiver);
                 {
                   // Convert {lhs} to a Number first.
-                  Callable callable =
-                      CodeFactory::NonNumberToNumber(assembler->isolate());
-                  var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-                  assembler->Goto(&loop);
+                  Callable callable = CodeFactory::NonNumberToNumber(isolate());
+                  var_lhs.Bind(CallStub(callable, context, lhs));
+                  Goto(&loop);
                 }
               }
             }
@@ -908,910 +863,755 @@
       }
     }
   }
-  assembler->Bind(&string_add_convert_left);
+  Bind(&string_add_convert_left);
+  {
+    // Convert {lhs}, which is a Smi, to a String and concatenate the
+    // resulting string with the String {rhs}.
+    Callable callable =
+        CodeFactory::StringAdd(isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+    var_result.Bind(
+        CallStub(callable, context, var_lhs.value(), var_rhs.value()));
+    Goto(&end);
+  }
+
+  Bind(&string_add_convert_right);
   {
     // Convert {lhs}, which is a Smi, to a String and concatenate the
     // resulting string with the String {rhs}.
     Callable callable = CodeFactory::StringAdd(
-        assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
-    var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
-                                        var_rhs.value()));
-    assembler->Goto(&end);
+        isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+    var_result.Bind(
+        CallStub(callable, context, var_lhs.value(), var_rhs.value()));
+    Goto(&end);
   }
 
-  assembler->Bind(&string_add_convert_right);
-  {
-    // Convert {lhs}, which is a Smi, to a String and concatenate the
-    // resulting string with the String {rhs}.
-    Callable callable = CodeFactory::StringAdd(
-        assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
-    var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
-                                        var_rhs.value()));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&do_fadd);
+  Bind(&do_fadd);
   {
     Node* lhs_value = var_fadd_lhs.value();
     Node* rhs_value = var_fadd_rhs.value();
-    Node* value = assembler->Float64Add(lhs_value, rhs_value);
-    Node* result = assembler->AllocateHeapNumberWithValue(value);
+    Node* value = Float64Add(lhs_value, rhs_value);
+    Node* result = AllocateHeapNumberWithValue(value);
     var_result.Bind(result);
-    assembler->Goto(&end);
+    Goto(&end);
   }
-  assembler->Bind(&end);
-  assembler->Return(var_result.value());
+  Bind(&end);
+  Return(var_result.value());
 }
 
-void Builtins::Generate_Subtract(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* left = assembler->Parameter(0);
-  Node* right = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
+TF_BUILTIN(Subtract, CodeStubAssembler) {
+  Node* left = Parameter(0);
+  Node* right = Parameter(1);
+  Node* context = Parameter(2);
 
   // Shared entry for floating point subtraction.
-  Label do_fsub(assembler), end(assembler);
-  Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
-      var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
+  Label do_fsub(this), end(this);
+  Variable var_fsub_lhs(this, MachineRepresentation::kFloat64),
+      var_fsub_rhs(this, MachineRepresentation::kFloat64);
 
   // We might need to loop several times due to ToPrimitive and/or ToNumber
   // conversions.
-  Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged),
-      var_result(assembler, MachineRepresentation::kTagged);
+  Variable var_lhs(this, MachineRepresentation::kTagged),
+      var_rhs(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kTagged);
   Variable* loop_vars[2] = {&var_lhs, &var_rhs};
-  Label loop(assembler, 2, loop_vars);
+  Label loop(this, 2, loop_vars);
   var_lhs.Bind(left);
   var_rhs.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Goto(&loop);
+  Bind(&loop);
   {
     // Load the current {lhs} and {rhs} values.
     Node* lhs = var_lhs.value();
     Node* rhs = var_rhs.value();
 
     // Check if the {lhs} is a Smi or a HeapObject.
-    Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
-                      &if_lhsisnotsmi);
+    Label if_lhsissmi(this), if_lhsisnotsmi(this);
+    Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
 
-    assembler->Bind(&if_lhsissmi);
+    Bind(&if_lhsissmi);
     {
       // Check if the {rhs} is also a Smi.
-      Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-      assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
-                        &if_rhsisnotsmi);
+      Label if_rhsissmi(this), if_rhsisnotsmi(this);
+      Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
 
-      assembler->Bind(&if_rhsissmi);
+      Bind(&if_rhsissmi);
       {
         // Try a fast Smi subtraction first.
-        Node* pair = assembler->IntPtrSubWithOverflow(
-            assembler->BitcastTaggedToWord(lhs),
-            assembler->BitcastTaggedToWord(rhs));
-        Node* overflow = assembler->Projection(1, pair);
+        Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(lhs),
+                                           BitcastTaggedToWord(rhs));
+        Node* overflow = Projection(1, pair);
 
         // Check if the Smi subtraction overflowed.
-        Label if_overflow(assembler), if_notoverflow(assembler);
-        assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+        Label if_overflow(this), if_notoverflow(this);
+        Branch(overflow, &if_overflow, &if_notoverflow);
 
-        assembler->Bind(&if_overflow);
+        Bind(&if_overflow);
         {
           // The result doesn't fit into Smi range.
-          var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
-          var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
-          assembler->Goto(&do_fsub);
+          var_fsub_lhs.Bind(SmiToFloat64(lhs));
+          var_fsub_rhs.Bind(SmiToFloat64(rhs));
+          Goto(&do_fsub);
         }
 
-        assembler->Bind(&if_notoverflow);
-        var_result.Bind(assembler->BitcastWordToTaggedSigned(
-            assembler->Projection(0, pair)));
-        assembler->Goto(&end);
+        Bind(&if_notoverflow);
+        var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+        Goto(&end);
       }
 
-      assembler->Bind(&if_rhsisnotsmi);
+      Bind(&if_rhsisnotsmi);
       {
         // Load the map of the {rhs}.
-        Node* rhs_map = assembler->LoadMap(rhs);
+        Node* rhs_map = LoadMap(rhs);
 
         // Check if {rhs} is a HeapNumber.
-        Label if_rhsisnumber(assembler),
-            if_rhsisnotnumber(assembler, Label::kDeferred);
-        assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
-                          &if_rhsisnotnumber);
+        Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+        Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
 
-        assembler->Bind(&if_rhsisnumber);
+        Bind(&if_rhsisnumber);
         {
           // Perform a floating point subtraction.
-          var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
-          var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-          assembler->Goto(&do_fsub);
+          var_fsub_lhs.Bind(SmiToFloat64(lhs));
+          var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
+          Goto(&do_fsub);
         }
 
-        assembler->Bind(&if_rhsisnotnumber);
+        Bind(&if_rhsisnotnumber);
         {
           // Convert the {rhs} to a Number first.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-          assembler->Goto(&loop);
+          Callable callable = CodeFactory::NonNumberToNumber(isolate());
+          var_rhs.Bind(CallStub(callable, context, rhs));
+          Goto(&loop);
         }
       }
     }
 
-    assembler->Bind(&if_lhsisnotsmi);
+    Bind(&if_lhsisnotsmi);
     {
       // Load the map of the {lhs}.
-      Node* lhs_map = assembler->LoadMap(lhs);
+      Node* lhs_map = LoadMap(lhs);
 
       // Check if the {lhs} is a HeapNumber.
-      Label if_lhsisnumber(assembler),
-          if_lhsisnotnumber(assembler, Label::kDeferred);
-      Node* number_map = assembler->HeapNumberMapConstant();
-      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
-                        &if_lhsisnumber, &if_lhsisnotnumber);
+      Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
+      Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
 
-      assembler->Bind(&if_lhsisnumber);
+      Bind(&if_lhsisnumber);
       {
         // Check if the {rhs} is a Smi.
-        Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
-        assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
-                          &if_rhsisnotsmi);
+        Label if_rhsissmi(this), if_rhsisnotsmi(this);
+        Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
 
-        assembler->Bind(&if_rhsissmi);
+        Bind(&if_rhsissmi);
         {
           // Perform a floating point subtraction.
-          var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-          var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
-          assembler->Goto(&do_fsub);
+          var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
+          var_fsub_rhs.Bind(SmiToFloat64(rhs));
+          Goto(&do_fsub);
         }
 
-        assembler->Bind(&if_rhsisnotsmi);
+        Bind(&if_rhsisnotsmi);
         {
           // Load the map of the {rhs}.
-          Node* rhs_map = assembler->LoadMap(rhs);
+          Node* rhs_map = LoadMap(rhs);
 
           // Check if the {rhs} is a HeapNumber.
-          Label if_rhsisnumber(assembler),
-              if_rhsisnotnumber(assembler, Label::kDeferred);
-          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                            &if_rhsisnumber, &if_rhsisnotnumber);
+          Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+          Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
 
-          assembler->Bind(&if_rhsisnumber);
+          Bind(&if_rhsisnumber);
           {
             // Perform a floating point subtraction.
-            var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
-            var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
-            assembler->Goto(&do_fsub);
+            var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
+            var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
+            Goto(&do_fsub);
           }
 
-          assembler->Bind(&if_rhsisnotnumber);
+          Bind(&if_rhsisnotnumber);
           {
             // Convert the {rhs} to a Number first.
-            Callable callable =
-                CodeFactory::NonNumberToNumber(assembler->isolate());
-            var_rhs.Bind(assembler->CallStub(callable, context, rhs));
-            assembler->Goto(&loop);
+            Callable callable = CodeFactory::NonNumberToNumber(isolate());
+            var_rhs.Bind(CallStub(callable, context, rhs));
+            Goto(&loop);
           }
         }
       }
 
-      assembler->Bind(&if_lhsisnotnumber);
+      Bind(&if_lhsisnotnumber);
       {
         // Convert the {lhs} to a Number first.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-        assembler->Goto(&loop);
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_lhs.Bind(CallStub(callable, context, lhs));
+        Goto(&loop);
       }
     }
   }
 
-  assembler->Bind(&do_fsub);
+  Bind(&do_fsub);
   {
     Node* lhs_value = var_fsub_lhs.value();
     Node* rhs_value = var_fsub_rhs.value();
-    Node* value = assembler->Float64Sub(lhs_value, rhs_value);
-    var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
-    assembler->Goto(&end);
+    Node* value = Float64Sub(lhs_value, rhs_value);
+    var_result.Bind(AllocateHeapNumberWithValue(value));
+    Goto(&end);
   }
-  assembler->Bind(&end);
-  assembler->Return(var_result.value());
+  Bind(&end);
+  Return(var_result.value());
 }
 
-void Builtins::Generate_Multiply(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* left = assembler->Parameter(0);
-  Node* right = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
+TF_BUILTIN(Multiply, CodeStubAssembler) {
+  Node* left = Parameter(0);
+  Node* right = Parameter(1);
+  Node* context = Parameter(2);
 
   // Shared entry point for floating point multiplication.
-  Label do_fmul(assembler), return_result(assembler);
-  Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
-      var_rhs_float64(assembler, MachineRepresentation::kFloat64);
-
-  Node* number_map = assembler->HeapNumberMapConstant();
+  Label do_fmul(this), return_result(this);
+  Variable var_lhs_float64(this, MachineRepresentation::kFloat64),
+      var_rhs_float64(this, MachineRepresentation::kFloat64);
 
   // We might need to loop one or two times due to ToNumber conversions.
-  Variable var_lhs(assembler, MachineRepresentation::kTagged),
-      var_rhs(assembler, MachineRepresentation::kTagged),
-      var_result(assembler, MachineRepresentation::kTagged);
+  Variable var_lhs(this, MachineRepresentation::kTagged),
+      var_rhs(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kTagged);
   Variable* loop_variables[] = {&var_lhs, &var_rhs};
-  Label loop(assembler, 2, loop_variables);
+  Label loop(this, 2, loop_variables);
   var_lhs.Bind(left);
   var_rhs.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Goto(&loop);
+  Bind(&loop);
   {
     Node* lhs = var_lhs.value();
     Node* rhs = var_rhs.value();
 
-    Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi,
-                      &lhs_is_not_smi);
+    Label lhs_is_smi(this), lhs_is_not_smi(this);
+    Branch(TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
 
-    assembler->Bind(&lhs_is_smi);
+    Bind(&lhs_is_smi);
     {
-      Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
-      assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
-                        &rhs_is_not_smi);
+      Label rhs_is_smi(this), rhs_is_not_smi(this);
+      Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
 
-      assembler->Bind(&rhs_is_smi);
+      Bind(&rhs_is_smi);
       {
         // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
         // in case of overflow.
-        var_result.Bind(assembler->SmiMul(lhs, rhs));
-        assembler->Goto(&return_result);
+        var_result.Bind(SmiMul(lhs, rhs));
+        Goto(&return_result);
       }
 
-      assembler->Bind(&rhs_is_not_smi);
+      Bind(&rhs_is_not_smi);
       {
-        Node* rhs_map = assembler->LoadMap(rhs);
+        Node* rhs_map = LoadMap(rhs);
 
         // Check if {rhs} is a HeapNumber.
-        Label rhs_is_number(assembler),
-            rhs_is_not_number(assembler, Label::kDeferred);
-        assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                          &rhs_is_number, &rhs_is_not_number);
+        Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
+        Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
 
-        assembler->Bind(&rhs_is_number);
+        Bind(&rhs_is_number);
         {
           // Convert {lhs} to a double and multiply it with the value of {rhs}.
-          var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
-          var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
-          assembler->Goto(&do_fmul);
+          var_lhs_float64.Bind(SmiToFloat64(lhs));
+          var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
+          Goto(&do_fmul);
         }
 
-        assembler->Bind(&rhs_is_not_number);
+        Bind(&rhs_is_not_number);
         {
           // Multiplication is commutative, swap {lhs} with {rhs} and loop.
           var_lhs.Bind(rhs);
           var_rhs.Bind(lhs);
-          assembler->Goto(&loop);
+          Goto(&loop);
         }
       }
     }
 
-    assembler->Bind(&lhs_is_not_smi);
+    Bind(&lhs_is_not_smi);
     {
-      Node* lhs_map = assembler->LoadMap(lhs);
+      Node* lhs_map = LoadMap(lhs);
 
       // Check if {lhs} is a HeapNumber.
-      Label lhs_is_number(assembler),
-          lhs_is_not_number(assembler, Label::kDeferred);
-      assembler->Branch(assembler->WordEqual(lhs_map, number_map),
-                        &lhs_is_number, &lhs_is_not_number);
+      Label lhs_is_number(this), lhs_is_not_number(this, Label::kDeferred);
+      Branch(IsHeapNumberMap(lhs_map), &lhs_is_number, &lhs_is_not_number);
 
-      assembler->Bind(&lhs_is_number);
+      Bind(&lhs_is_number);
       {
         // Check if {rhs} is a Smi.
-        Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
-        assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
-                          &rhs_is_not_smi);
+        Label rhs_is_smi(this), rhs_is_not_smi(this);
+        Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
 
-        assembler->Bind(&rhs_is_smi);
+        Bind(&rhs_is_smi);
         {
           // Convert {rhs} to a double and multiply it with the value of {lhs}.
-          var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
-          var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
-          assembler->Goto(&do_fmul);
+          var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
+          var_rhs_float64.Bind(SmiToFloat64(rhs));
+          Goto(&do_fmul);
         }
 
-        assembler->Bind(&rhs_is_not_smi);
+        Bind(&rhs_is_not_smi);
         {
-          Node* rhs_map = assembler->LoadMap(rhs);
+          Node* rhs_map = LoadMap(rhs);
 
           // Check if {rhs} is a HeapNumber.
-          Label rhs_is_number(assembler),
-              rhs_is_not_number(assembler, Label::kDeferred);
-          assembler->Branch(assembler->WordEqual(rhs_map, number_map),
-                            &rhs_is_number, &rhs_is_not_number);
+          Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
+          Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
 
-          assembler->Bind(&rhs_is_number);
+          Bind(&rhs_is_number);
           {
             // Both {lhs} and {rhs} are HeapNumbers. Load their values and
             // multiply them.
-            var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
-            var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
-            assembler->Goto(&do_fmul);
+            var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
+            var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
+            Goto(&do_fmul);
           }
 
-          assembler->Bind(&rhs_is_not_number);
+          Bind(&rhs_is_not_number);
           {
             // Multiplication is commutative, swap {lhs} with {rhs} and loop.
             var_lhs.Bind(rhs);
             var_rhs.Bind(lhs);
-            assembler->Goto(&loop);
+            Goto(&loop);
           }
         }
       }
 
-      assembler->Bind(&lhs_is_not_number);
+      Bind(&lhs_is_not_number);
       {
         // Convert {lhs} to a Number and loop.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_lhs.Bind(assembler->CallStub(callable, context, lhs));
-        assembler->Goto(&loop);
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_lhs.Bind(CallStub(callable, context, lhs));
+        Goto(&loop);
       }
     }
   }
 
-  assembler->Bind(&do_fmul);
+  Bind(&do_fmul);
   {
-    Node* value =
-        assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
-    Node* result = assembler->AllocateHeapNumberWithValue(value);
+    Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
+    Node* result = AllocateHeapNumberWithValue(value);
     var_result.Bind(result);
-    assembler->Goto(&return_result);
+    Goto(&return_result);
   }
 
-  assembler->Bind(&return_result);
-  assembler->Return(var_result.value());
+  Bind(&return_result);
+  Return(var_result.value());
 }
 
-void Builtins::Generate_Divide(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* left = assembler->Parameter(0);
-  Node* right = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
+TF_BUILTIN(Divide, CodeStubAssembler) {
+  Node* left = Parameter(0);
+  Node* right = Parameter(1);
+  Node* context = Parameter(2);
 
   // Shared entry point for floating point division.
-  Label do_fdiv(assembler), end(assembler);
-  Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
-      var_divisor_float64(assembler, MachineRepresentation::kFloat64);
-
-  Node* number_map = assembler->HeapNumberMapConstant();
+  Label do_fdiv(this), end(this);
+  Variable var_dividend_float64(this, MachineRepresentation::kFloat64),
+      var_divisor_float64(this, MachineRepresentation::kFloat64);
 
   // We might need to loop one or two times due to ToNumber conversions.
-  Variable var_dividend(assembler, MachineRepresentation::kTagged),
-      var_divisor(assembler, MachineRepresentation::kTagged),
-      var_result(assembler, MachineRepresentation::kTagged);
+  Variable var_dividend(this, MachineRepresentation::kTagged),
+      var_divisor(this, MachineRepresentation::kTagged),
+      var_result(this, MachineRepresentation::kTagged);
   Variable* loop_variables[] = {&var_dividend, &var_divisor};
-  Label loop(assembler, 2, loop_variables);
+  Label loop(this, 2, loop_variables);
   var_dividend.Bind(left);
   var_divisor.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Goto(&loop);
+  Bind(&loop);
   {
     Node* dividend = var_dividend.value();
     Node* divisor = var_divisor.value();
 
-    Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
-                      &dividend_is_not_smi);
+    Label dividend_is_smi(this), dividend_is_not_smi(this);
+    Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
 
-    assembler->Bind(&dividend_is_smi);
+    Bind(&dividend_is_smi);
     {
-      Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-      assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
-                        &divisor_is_not_smi);
+      Label divisor_is_smi(this), divisor_is_not_smi(this);
+      Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
 
-      assembler->Bind(&divisor_is_smi);
+      Bind(&divisor_is_smi);
       {
-        Label bailout(assembler);
+        Label bailout(this);
 
         // Do floating point division if {divisor} is zero.
-        assembler->GotoIf(
-            assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
-            &bailout);
+        GotoIf(SmiEqual(divisor, SmiConstant(0)), &bailout);
 
         // Do floating point division {dividend} is zero and {divisor} is
         // negative.
-        Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
-        assembler->Branch(
-            assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
-            &dividend_is_zero, &dividend_is_not_zero);
+        Label dividend_is_zero(this), dividend_is_not_zero(this);
+        Branch(SmiEqual(dividend, SmiConstant(0)), &dividend_is_zero,
+               &dividend_is_not_zero);
 
-        assembler->Bind(&dividend_is_zero);
+        Bind(&dividend_is_zero);
         {
-          assembler->GotoIf(
-              assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
-              &bailout);
-          assembler->Goto(&dividend_is_not_zero);
+          GotoIf(SmiLessThan(divisor, SmiConstant(0)), &bailout);
+          Goto(&dividend_is_not_zero);
         }
-        assembler->Bind(&dividend_is_not_zero);
+        Bind(&dividend_is_not_zero);
 
-        Node* untagged_divisor = assembler->SmiUntag(divisor);
-        Node* untagged_dividend = assembler->SmiUntag(dividend);
+        Node* untagged_divisor = SmiToWord32(divisor);
+        Node* untagged_dividend = SmiToWord32(dividend);
 
         // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
         // if the Smi size is 31) and {divisor} is -1.
-        Label divisor_is_minus_one(assembler),
-            divisor_is_not_minus_one(assembler);
-        assembler->Branch(assembler->Word32Equal(untagged_divisor,
-                                                 assembler->Int32Constant(-1)),
-                          &divisor_is_minus_one, &divisor_is_not_minus_one);
+        Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
+        Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
+               &divisor_is_minus_one, &divisor_is_not_minus_one);
 
-        assembler->Bind(&divisor_is_minus_one);
+        Bind(&divisor_is_minus_one);
         {
-          assembler->GotoIf(
-              assembler->Word32Equal(
-                  untagged_dividend,
-                  assembler->Int32Constant(
-                      kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+          GotoIf(
+              Word32Equal(untagged_dividend,
+                          Int32Constant(kSmiValueSize == 32 ? kMinInt
+                                                            : (kMinInt >> 1))),
               &bailout);
-          assembler->Goto(&divisor_is_not_minus_one);
+          Goto(&divisor_is_not_minus_one);
         }
-        assembler->Bind(&divisor_is_not_minus_one);
+        Bind(&divisor_is_not_minus_one);
 
         // TODO(epertoso): consider adding a machine instruction that returns
         // both the result and the remainder.
-        Node* untagged_result =
-            assembler->Int32Div(untagged_dividend, untagged_divisor);
-        Node* truncated =
-            assembler->Int32Mul(untagged_result, untagged_divisor);
+        Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
+        Node* truncated = Int32Mul(untagged_result, untagged_divisor);
         // Do floating point division if the remainder is not 0.
-        assembler->GotoIf(
-            assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
-        var_result.Bind(assembler->SmiTag(untagged_result));
-        assembler->Goto(&end);
+        GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
+        var_result.Bind(SmiFromWord32(untagged_result));
+        Goto(&end);
 
         // Bailout: convert {dividend} and {divisor} to double and do double
         // division.
-        assembler->Bind(&bailout);
+        Bind(&bailout);
         {
-          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
-          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
-          assembler->Goto(&do_fdiv);
+          var_dividend_float64.Bind(SmiToFloat64(dividend));
+          var_divisor_float64.Bind(SmiToFloat64(divisor));
+          Goto(&do_fdiv);
         }
       }
 
-      assembler->Bind(&divisor_is_not_smi);
+      Bind(&divisor_is_not_smi);
       {
-        Node* divisor_map = assembler->LoadMap(divisor);
+        Node* divisor_map = LoadMap(divisor);
 
         // Check if {divisor} is a HeapNumber.
-        Label divisor_is_number(assembler),
-            divisor_is_not_number(assembler, Label::kDeferred);
-        assembler->Branch(assembler->WordEqual(divisor_map, number_map),
-                          &divisor_is_number, &divisor_is_not_number);
+        Label divisor_is_number(this),
+            divisor_is_not_number(this, Label::kDeferred);
+        Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+               &divisor_is_not_number);
 
-        assembler->Bind(&divisor_is_number);
+        Bind(&divisor_is_number);
         {
           // Convert {dividend} to a double and divide it with the value of
           // {divisor}.
-          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
-          var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
-          assembler->Goto(&do_fdiv);
+          var_dividend_float64.Bind(SmiToFloat64(dividend));
+          var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+          Goto(&do_fdiv);
         }
 
-        assembler->Bind(&divisor_is_not_number);
+        Bind(&divisor_is_not_number);
         {
           // Convert {divisor} to a number and loop.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_divisor.Bind(assembler->CallStub(callable, context, divisor));
-          assembler->Goto(&loop);
+          Callable callable = CodeFactory::NonNumberToNumber(isolate());
+          var_divisor.Bind(CallStub(callable, context, divisor));
+          Goto(&loop);
         }
       }
     }
 
-    assembler->Bind(&dividend_is_not_smi);
+    Bind(&dividend_is_not_smi);
     {
-      Node* dividend_map = assembler->LoadMap(dividend);
+      Node* dividend_map = LoadMap(dividend);
 
       // Check if {dividend} is a HeapNumber.
-      Label dividend_is_number(assembler),
-          dividend_is_not_number(assembler, Label::kDeferred);
-      assembler->Branch(assembler->WordEqual(dividend_map, number_map),
-                        &dividend_is_number, &dividend_is_not_number);
+      Label dividend_is_number(this),
+          dividend_is_not_number(this, Label::kDeferred);
+      Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
+             &dividend_is_not_number);
 
-      assembler->Bind(&dividend_is_number);
+      Bind(&dividend_is_number);
       {
         // Check if {divisor} is a Smi.
-        Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-        assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
-                          &divisor_is_not_smi);
+        Label divisor_is_smi(this), divisor_is_not_smi(this);
+        Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
 
-        assembler->Bind(&divisor_is_smi);
+        Bind(&divisor_is_smi);
         {
           // Convert {divisor} to a double and use it for a floating point
           // division.
-          var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
-          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
-          assembler->Goto(&do_fdiv);
+          var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+          var_divisor_float64.Bind(SmiToFloat64(divisor));
+          Goto(&do_fdiv);
         }
 
-        assembler->Bind(&divisor_is_not_smi);
+        Bind(&divisor_is_not_smi);
         {
-          Node* divisor_map = assembler->LoadMap(divisor);
+          Node* divisor_map = LoadMap(divisor);
 
           // Check if {divisor} is a HeapNumber.
-          Label divisor_is_number(assembler),
-              divisor_is_not_number(assembler, Label::kDeferred);
-          assembler->Branch(assembler->WordEqual(divisor_map, number_map),
-                            &divisor_is_number, &divisor_is_not_number);
+          Label divisor_is_number(this),
+              divisor_is_not_number(this, Label::kDeferred);
+          Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+                 &divisor_is_not_number);
 
-          assembler->Bind(&divisor_is_number);
+          Bind(&divisor_is_number);
           {
             // Both {dividend} and {divisor} are HeapNumbers. Load their values
             // and divide them.
-            var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
-            var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
-            assembler->Goto(&do_fdiv);
+            var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+            var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+            Goto(&do_fdiv);
           }
 
-          assembler->Bind(&divisor_is_not_number);
+          Bind(&divisor_is_not_number);
           {
             // Convert {divisor} to a number and loop.
-            Callable callable =
-                CodeFactory::NonNumberToNumber(assembler->isolate());
-            var_divisor.Bind(assembler->CallStub(callable, context, divisor));
-            assembler->Goto(&loop);
+            Callable callable = CodeFactory::NonNumberToNumber(isolate());
+            var_divisor.Bind(CallStub(callable, context, divisor));
+            Goto(&loop);
           }
         }
       }
 
-      assembler->Bind(&dividend_is_not_number);
+      Bind(&dividend_is_not_number);
       {
         // Convert {dividend} to a Number and loop.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_dividend.Bind(assembler->CallStub(callable, context, dividend));
-        assembler->Goto(&loop);
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_dividend.Bind(CallStub(callable, context, dividend));
+        Goto(&loop);
       }
     }
   }
 
-  assembler->Bind(&do_fdiv);
+  Bind(&do_fdiv);
   {
-    Node* value = assembler->Float64Div(var_dividend_float64.value(),
-                                        var_divisor_float64.value());
-    var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
-    assembler->Goto(&end);
+    Node* value =
+        Float64Div(var_dividend_float64.value(), var_divisor_float64.value());
+    var_result.Bind(AllocateHeapNumberWithValue(value));
+    Goto(&end);
   }
-  assembler->Bind(&end);
-  assembler->Return(var_result.value());
+  Bind(&end);
+  Return(var_result.value());
 }
 
-void Builtins::Generate_Modulus(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(Modulus, CodeStubAssembler) {
+  Node* left = Parameter(0);
+  Node* right = Parameter(1);
+  Node* context = Parameter(2);
 
-  Node* left = assembler->Parameter(0);
-  Node* right = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-
-  Variable var_result(assembler, MachineRepresentation::kTagged);
-  Label return_result(assembler, &var_result);
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Label return_result(this, &var_result);
 
   // Shared entry point for floating point modulus.
-  Label do_fmod(assembler);
-  Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
-      var_divisor_float64(assembler, MachineRepresentation::kFloat64);
-
-  Node* number_map = assembler->HeapNumberMapConstant();
+  Label do_fmod(this);
+  Variable var_dividend_float64(this, MachineRepresentation::kFloat64),
+      var_divisor_float64(this, MachineRepresentation::kFloat64);
 
   // We might need to loop one or two times due to ToNumber conversions.
-  Variable var_dividend(assembler, MachineRepresentation::kTagged),
-      var_divisor(assembler, MachineRepresentation::kTagged);
+  Variable var_dividend(this, MachineRepresentation::kTagged),
+      var_divisor(this, MachineRepresentation::kTagged);
   Variable* loop_variables[] = {&var_dividend, &var_divisor};
-  Label loop(assembler, 2, loop_variables);
+  Label loop(this, 2, loop_variables);
   var_dividend.Bind(left);
   var_divisor.Bind(right);
-  assembler->Goto(&loop);
-  assembler->Bind(&loop);
+  Goto(&loop);
+  Bind(&loop);
   {
     Node* dividend = var_dividend.value();
     Node* divisor = var_divisor.value();
 
-    Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
-                      &dividend_is_not_smi);
+    Label dividend_is_smi(this), dividend_is_not_smi(this);
+    Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
 
-    assembler->Bind(&dividend_is_smi);
+    Bind(&dividend_is_smi);
     {
-      Label dividend_is_not_zero(assembler);
-      Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-      assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
-                        &divisor_is_not_smi);
+      Label dividend_is_not_zero(this);
+      Label divisor_is_smi(this), divisor_is_not_smi(this);
+      Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
 
-      assembler->Bind(&divisor_is_smi);
+      Bind(&divisor_is_smi);
       {
         // Compute the modulus of two Smis.
-        var_result.Bind(assembler->SmiMod(dividend, divisor));
-        assembler->Goto(&return_result);
+        var_result.Bind(SmiMod(dividend, divisor));
+        Goto(&return_result);
       }
 
-      assembler->Bind(&divisor_is_not_smi);
+      Bind(&divisor_is_not_smi);
       {
-        Node* divisor_map = assembler->LoadMap(divisor);
+        Node* divisor_map = LoadMap(divisor);
 
         // Check if {divisor} is a HeapNumber.
-        Label divisor_is_number(assembler),
-            divisor_is_not_number(assembler, Label::kDeferred);
-        assembler->Branch(assembler->WordEqual(divisor_map, number_map),
-                          &divisor_is_number, &divisor_is_not_number);
+        Label divisor_is_number(this),
+            divisor_is_not_number(this, Label::kDeferred);
+        Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+               &divisor_is_not_number);
 
-        assembler->Bind(&divisor_is_number);
+        Bind(&divisor_is_number);
         {
           // Convert {dividend} to a double and compute its modulus with the
           // value of {dividend}.
-          var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
-          var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
-          assembler->Goto(&do_fmod);
+          var_dividend_float64.Bind(SmiToFloat64(dividend));
+          var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+          Goto(&do_fmod);
         }
 
-        assembler->Bind(&divisor_is_not_number);
+        Bind(&divisor_is_not_number);
         {
           // Convert {divisor} to a number and loop.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_divisor.Bind(assembler->CallStub(callable, context, divisor));
-          assembler->Goto(&loop);
+          Callable callable = CodeFactory::NonNumberToNumber(isolate());
+          var_divisor.Bind(CallStub(callable, context, divisor));
+          Goto(&loop);
         }
       }
     }
 
-    assembler->Bind(&dividend_is_not_smi);
+    Bind(&dividend_is_not_smi);
     {
-      Node* dividend_map = assembler->LoadMap(dividend);
+      Node* dividend_map = LoadMap(dividend);
 
       // Check if {dividend} is a HeapNumber.
-      Label dividend_is_number(assembler),
-          dividend_is_not_number(assembler, Label::kDeferred);
-      assembler->Branch(assembler->WordEqual(dividend_map, number_map),
-                        &dividend_is_number, &dividend_is_not_number);
+      Label dividend_is_number(this),
+          dividend_is_not_number(this, Label::kDeferred);
+      Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
+             &dividend_is_not_number);
 
-      assembler->Bind(&dividend_is_number);
+      Bind(&dividend_is_number);
       {
         // Check if {divisor} is a Smi.
-        Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
-        assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
-                          &divisor_is_not_smi);
+        Label divisor_is_smi(this), divisor_is_not_smi(this);
+        Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
 
-        assembler->Bind(&divisor_is_smi);
+        Bind(&divisor_is_smi);
         {
           // Convert {divisor} to a double and compute {dividend}'s modulus with
           // it.
-          var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
-          var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
-          assembler->Goto(&do_fmod);
+          var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+          var_divisor_float64.Bind(SmiToFloat64(divisor));
+          Goto(&do_fmod);
         }
 
-        assembler->Bind(&divisor_is_not_smi);
+        Bind(&divisor_is_not_smi);
         {
-          Node* divisor_map = assembler->LoadMap(divisor);
+          Node* divisor_map = LoadMap(divisor);
 
           // Check if {divisor} is a HeapNumber.
-          Label divisor_is_number(assembler),
-              divisor_is_not_number(assembler, Label::kDeferred);
-          assembler->Branch(assembler->WordEqual(divisor_map, number_map),
-                            &divisor_is_number, &divisor_is_not_number);
+          Label divisor_is_number(this),
+              divisor_is_not_number(this, Label::kDeferred);
+          Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+                 &divisor_is_not_number);
 
-          assembler->Bind(&divisor_is_number);
+          Bind(&divisor_is_number);
           {
             // Both {dividend} and {divisor} are HeapNumbers. Load their values
             // and compute their modulus.
-            var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
-            var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
-            assembler->Goto(&do_fmod);
+            var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+            var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+            Goto(&do_fmod);
           }
 
-          assembler->Bind(&divisor_is_not_number);
+          Bind(&divisor_is_not_number);
           {
             // Convert {divisor} to a number and loop.
-            Callable callable =
-                CodeFactory::NonNumberToNumber(assembler->isolate());
-            var_divisor.Bind(assembler->CallStub(callable, context, divisor));
-            assembler->Goto(&loop);
+            Callable callable = CodeFactory::NonNumberToNumber(isolate());
+            var_divisor.Bind(CallStub(callable, context, divisor));
+            Goto(&loop);
           }
         }
       }
 
-      assembler->Bind(&dividend_is_not_number);
+      Bind(&dividend_is_not_number);
       {
         // Convert {dividend} to a Number and loop.
-        Callable callable =
-            CodeFactory::NonNumberToNumber(assembler->isolate());
-        var_dividend.Bind(assembler->CallStub(callable, context, dividend));
-        assembler->Goto(&loop);
+        Callable callable = CodeFactory::NonNumberToNumber(isolate());
+        var_dividend.Bind(CallStub(callable, context, dividend));
+        Goto(&loop);
       }
     }
   }
 
-  assembler->Bind(&do_fmod);
+  Bind(&do_fmod);
   {
-    Node* value = assembler->Float64Mod(var_dividend_float64.value(),
-                                        var_divisor_float64.value());
-    var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
-    assembler->Goto(&return_result);
+    Node* value =
+        Float64Mod(var_dividend_float64.value(), var_divisor_float64.value());
+    var_result.Bind(AllocateHeapNumberWithValue(value));
+    Goto(&return_result);
   }
 
-  assembler->Bind(&return_result);
-  assembler->Return(var_result.value());
+  Bind(&return_result);
+  Return(var_result.value());
 }
 
-void Builtins::Generate_ShiftLeft(CodeStubAssembler* assembler) {
-  compiler::Node* left = assembler->Parameter(0);
-  compiler::Node* right = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* shift_count =
-      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
-  Node* value = assembler->Word32Shl(lhs_value, shift_count);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  assembler->Return(result);
+TF_BUILTIN(ShiftLeft, NumberBuiltinsAssembler) {
+  BitwiseShiftOp([this](Node* lhs, Node* shift_count) {
+    return Word32Shl(lhs, shift_count);
+  });
 }
 
-void Builtins::Generate_ShiftRight(CodeStubAssembler* assembler) {
-  compiler::Node* left = assembler->Parameter(0);
-  compiler::Node* right = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* shift_count =
-      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
-  Node* value = assembler->Word32Sar(lhs_value, shift_count);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  assembler->Return(result);
+TF_BUILTIN(ShiftRight, NumberBuiltinsAssembler) {
+  BitwiseShiftOp([this](Node* lhs, Node* shift_count) {
+    return Word32Sar(lhs, shift_count);
+  });
 }
 
-void Builtins::Generate_ShiftRightLogical(CodeStubAssembler* assembler) {
-  compiler::Node* left = assembler->Parameter(0);
-  compiler::Node* right = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* shift_count =
-      assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
-  Node* value = assembler->Word32Shr(lhs_value, shift_count);
-  Node* result = assembler->ChangeUint32ToTagged(value);
-  assembler->Return(result);
+TF_BUILTIN(ShiftRightLogical, NumberBuiltinsAssembler) {
+  BitwiseShiftOp<kUnsigned>([this](Node* lhs, Node* shift_count) {
+    return Word32Shr(lhs, shift_count);
+  });
 }
 
-void Builtins::Generate_BitwiseAnd(CodeStubAssembler* assembler) {
-  compiler::Node* left = assembler->Parameter(0);
-  compiler::Node* right = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* value = assembler->Word32And(lhs_value, rhs_value);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  assembler->Return(result);
+TF_BUILTIN(BitwiseAnd, NumberBuiltinsAssembler) {
+  BitwiseOp([this](Node* lhs, Node* rhs) { return Word32And(lhs, rhs); });
 }
 
-void Builtins::Generate_BitwiseOr(CodeStubAssembler* assembler) {
-  compiler::Node* left = assembler->Parameter(0);
-  compiler::Node* right = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* value = assembler->Word32Or(lhs_value, rhs_value);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  assembler->Return(result);
+TF_BUILTIN(BitwiseOr, NumberBuiltinsAssembler) {
+  BitwiseOp([this](Node* lhs, Node* rhs) { return Word32Or(lhs, rhs); });
 }
 
-void Builtins::Generate_BitwiseXor(CodeStubAssembler* assembler) {
-  compiler::Node* left = assembler->Parameter(0);
-  compiler::Node* right = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  using compiler::Node;
-
-  Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
-  Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
-  Node* value = assembler->Word32Xor(lhs_value, rhs_value);
-  Node* result = assembler->ChangeInt32ToTagged(value);
-  assembler->Return(result);
+TF_BUILTIN(BitwiseXor, NumberBuiltinsAssembler) {
+  BitwiseOp([this](Node* lhs, Node* rhs) { return Word32Xor(lhs, rhs); });
 }
 
-void Builtins::Generate_LessThan(CodeStubAssembler* assembler) {
-  compiler::Node* lhs = assembler->Parameter(0);
-  compiler::Node* rhs = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  assembler->Return(assembler->RelationalComparison(
-      CodeStubAssembler::kLessThan, lhs, rhs, context));
+TF_BUILTIN(LessThan, NumberBuiltinsAssembler) {
+  RelationalComparisonBuiltin(kLessThan);
 }
 
-void Builtins::Generate_LessThanOrEqual(CodeStubAssembler* assembler) {
-  compiler::Node* lhs = assembler->Parameter(0);
-  compiler::Node* rhs = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  assembler->Return(assembler->RelationalComparison(
-      CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context));
+TF_BUILTIN(LessThanOrEqual, NumberBuiltinsAssembler) {
+  RelationalComparisonBuiltin(kLessThanOrEqual);
 }
 
-void Builtins::Generate_GreaterThan(CodeStubAssembler* assembler) {
-  compiler::Node* lhs = assembler->Parameter(0);
-  compiler::Node* rhs = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  assembler->Return(assembler->RelationalComparison(
-      CodeStubAssembler::kGreaterThan, lhs, rhs, context));
+TF_BUILTIN(GreaterThan, NumberBuiltinsAssembler) {
+  RelationalComparisonBuiltin(kGreaterThan);
 }
 
-void Builtins::Generate_GreaterThanOrEqual(CodeStubAssembler* assembler) {
-  compiler::Node* lhs = assembler->Parameter(0);
-  compiler::Node* rhs = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
-
-  assembler->Return(assembler->RelationalComparison(
-      CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context));
+TF_BUILTIN(GreaterThanOrEqual, NumberBuiltinsAssembler) {
+  RelationalComparisonBuiltin(kGreaterThanOrEqual);
 }
 
-void Builtins::Generate_Equal(CodeStubAssembler* assembler) {
-  compiler::Node* lhs = assembler->Parameter(0);
-  compiler::Node* rhs = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(Equal, CodeStubAssembler) {
+  Node* lhs = Parameter(0);
+  Node* rhs = Parameter(1);
+  Node* context = Parameter(2);
 
-  assembler->Return(assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs,
-                                     rhs, context));
+  Return(Equal(kDontNegateResult, lhs, rhs, context));
 }
 
-void Builtins::Generate_NotEqual(CodeStubAssembler* assembler) {
-  compiler::Node* lhs = assembler->Parameter(0);
-  compiler::Node* rhs = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(NotEqual, CodeStubAssembler) {
+  Node* lhs = Parameter(0);
+  Node* rhs = Parameter(1);
+  Node* context = Parameter(2);
 
-  assembler->Return(
-      assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context));
+  Return(Equal(kNegateResult, lhs, rhs, context));
 }
 
-void Builtins::Generate_StrictEqual(CodeStubAssembler* assembler) {
-  compiler::Node* lhs = assembler->Parameter(0);
-  compiler::Node* rhs = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(StrictEqual, CodeStubAssembler) {
+  Node* lhs = Parameter(0);
+  Node* rhs = Parameter(1);
+  Node* context = Parameter(2);
 
-  assembler->Return(assembler->StrictEqual(CodeStubAssembler::kDontNegateResult,
-                                           lhs, rhs, context));
+  Return(StrictEqual(kDontNegateResult, lhs, rhs, context));
 }
 
-void Builtins::Generate_StrictNotEqual(CodeStubAssembler* assembler) {
-  compiler::Node* lhs = assembler->Parameter(0);
-  compiler::Node* rhs = assembler->Parameter(1);
-  compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(StrictNotEqual, CodeStubAssembler) {
+  Node* lhs = Parameter(0);
+  Node* rhs = Parameter(1);
+  Node* context = Parameter(2);
 
-  assembler->Return(assembler->StrictEqual(CodeStubAssembler::kNegateResult,
-                                           lhs, rhs, context));
+  Return(StrictEqual(kNegateResult, lhs, rhs, context));
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-object.cc b/src/builtins/builtins-object.cc
index abb5c47..af5a42a 100644
--- a/src/builtins/builtins-object.cc
+++ b/src/builtins/builtins-object.cc
@@ -2,149 +2,87 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-object.h"
 #include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/keys.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
 #include "src/property-descriptor.h"
 
 namespace v8 {
 namespace internal {
 
+typedef compiler::Node Node;
+
+std::tuple<Node*, Node*, Node*> ObjectBuiltinsAssembler::EmitForInPrepare(
+    Node* object, Node* context, Label* call_runtime,
+    Label* nothing_to_iterate) {
+  Label use_cache(this);
+  CSA_ASSERT(this, IsJSReceiver(object));
+
+  CheckEnumCache(object, &use_cache, call_runtime);
+  Bind(&use_cache);
+  Node* map = LoadMap(object);
+  Node* enum_length = EnumLength(map);
+  GotoIf(WordEqual(enum_length, SmiConstant(0)), nothing_to_iterate);
+  Node* descriptors = LoadMapDescriptors(map);
+  Node* cache_offset =
+      LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
+  Node* enum_cache = LoadObjectField(
+      cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
+
+  return std::make_tuple(map, enum_cache, enum_length);
+}
 // -----------------------------------------------------------------------------
 // ES6 section 19.1 Object Objects
 
-void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(ObjectHasOwnProperty, ObjectBuiltinsAssembler) {
+  Node* object = Parameter(0);
+  Node* key = Parameter(1);
+  Node* context = Parameter(4);
 
-  Node* object = assembler->Parameter(0);
-  Node* key = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
-
-  Label call_runtime(assembler), return_true(assembler),
-      return_false(assembler);
+  Label call_runtime(this), return_true(this), return_false(this);
 
   // Smi receivers do not have own properties.
-  Label if_objectisnotsmi(assembler);
-  assembler->Branch(assembler->TaggedIsSmi(object), &return_false,
-                    &if_objectisnotsmi);
-  assembler->Bind(&if_objectisnotsmi);
+  Label if_objectisnotsmi(this);
+  Branch(TaggedIsSmi(object), &return_false, &if_objectisnotsmi);
+  Bind(&if_objectisnotsmi);
 
-  Node* map = assembler->LoadMap(object);
-  Node* instance_type = assembler->LoadMapInstanceType(map);
+  Node* map = LoadMap(object);
+  Node* instance_type = LoadMapInstanceType(map);
 
-  Variable var_index(assembler, MachineType::PointerRepresentation());
+  {
+    Variable var_index(this, MachineType::PointerRepresentation());
+    Variable var_unique(this, MachineRepresentation::kTagged);
 
-  Label keyisindex(assembler), if_iskeyunique(assembler);
-  assembler->TryToName(key, &keyisindex, &var_index, &if_iskeyunique,
-                       &call_runtime);
+    Label keyisindex(this), if_iskeyunique(this);
+    TryToName(key, &keyisindex, &var_index, &if_iskeyunique, &var_unique,
+              &call_runtime);
 
-  assembler->Bind(&if_iskeyunique);
-  assembler->TryHasOwnProperty(object, map, instance_type, key, &return_true,
-                               &return_false, &call_runtime);
+    Bind(&if_iskeyunique);
+    TryHasOwnProperty(object, map, instance_type, var_unique.value(),
+                      &return_true, &return_false, &call_runtime);
 
-  assembler->Bind(&keyisindex);
-  // Handle negative keys in the runtime.
-  assembler->GotoIf(assembler->IntPtrLessThan(var_index.value(),
-                                              assembler->IntPtrConstant(0)),
-                    &call_runtime);
-  assembler->TryLookupElement(object, map, instance_type, var_index.value(),
-                              &return_true, &return_false, &call_runtime);
+    Bind(&keyisindex);
+    // Handle negative keys in the runtime.
+    GotoIf(IntPtrLessThan(var_index.value(), IntPtrConstant(0)), &call_runtime);
+    TryLookupElement(object, map, instance_type, var_index.value(),
+                     &return_true, &return_false, &call_runtime);
+  }
+  Bind(&return_true);
+  Return(BooleanConstant(true));
 
-  assembler->Bind(&return_true);
-  assembler->Return(assembler->BooleanConstant(true));
+  Bind(&return_false);
+  Return(BooleanConstant(false));
 
-  assembler->Bind(&return_false);
-  assembler->Return(assembler->BooleanConstant(false));
-
-  assembler->Bind(&call_runtime);
-  assembler->Return(assembler->CallRuntime(Runtime::kObjectHasOwnProperty,
-                                           context, object, key));
+  Bind(&call_runtime);
+  Return(CallRuntime(Runtime::kObjectHasOwnProperty, context, object, key));
 }
 
-namespace {
-
-MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
-                                       Handle<Object> next_source) {
-  // Non-empty strings are the only non-JSReceivers that need to be handled
-  // explicitly by Object.assign.
-  if (!next_source->IsJSReceiver()) {
-    return Just(!next_source->IsString() ||
-                String::cast(*next_source)->length() == 0);
-  }
-
-  // If the target is deprecated, the object will be updated on first store. If
-  // the source for that store equals the target, this will invalidate the
-  // cached representation of the source. Preventively upgrade the target.
-  // Do this on each iteration since any property load could cause deprecation.
-  if (to->map()->is_deprecated()) {
-    JSObject::MigrateInstance(Handle<JSObject>::cast(to));
-  }
-
-  Isolate* isolate = to->GetIsolate();
-  Handle<Map> map(JSReceiver::cast(*next_source)->map(), isolate);
-
-  if (!map->IsJSObjectMap()) return Just(false);
-  if (!map->OnlyHasSimpleProperties()) return Just(false);
-
-  Handle<JSObject> from = Handle<JSObject>::cast(next_source);
-  if (from->elements() != isolate->heap()->empty_fixed_array()) {
-    return Just(false);
-  }
-
-  Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
-  int length = map->NumberOfOwnDescriptors();
-
-  bool stable = true;
-
-  for (int i = 0; i < length; i++) {
-    Handle<Name> next_key(descriptors->GetKey(i), isolate);
-    Handle<Object> prop_value;
-    // Directly decode from the descriptor array if |from| did not change shape.
-    if (stable) {
-      PropertyDetails details = descriptors->GetDetails(i);
-      if (!details.IsEnumerable()) continue;
-      if (details.kind() == kData) {
-        if (details.location() == kDescriptor) {
-          prop_value = handle(descriptors->GetValue(i), isolate);
-        } else {
-          Representation representation = details.representation();
-          FieldIndex index = FieldIndex::ForDescriptor(*map, i);
-          prop_value = JSObject::FastPropertyAt(from, representation, index);
-        }
-      } else {
-        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-            isolate, prop_value, JSReceiver::GetProperty(from, next_key),
-            Nothing<bool>());
-        stable = from->map() == *map;
-      }
-    } else {
-      // If the map did change, do a slower lookup. We are still guaranteed that
-      // the object has a simple shape, and that the key is a name.
-      LookupIterator it(from, next_key, from,
-                        LookupIterator::OWN_SKIP_INTERCEPTOR);
-      if (!it.IsFound()) continue;
-      DCHECK(it.state() == LookupIterator::DATA ||
-             it.state() == LookupIterator::ACCESSOR);
-      if (!it.IsEnumerable()) continue;
-      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-          isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
-    }
-    LookupIterator it(to, next_key, to);
-    bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
-    Maybe<bool> result = Object::SetProperty(
-        &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
-    if (result.IsNothing()) return result;
-    if (stable && call_to_js) stable = from->map() == *map;
-  }
-
-  return Just(true);
-}
-
-}  // namespace
-
 // ES6 19.1.2.1 Object.assign
 BUILTIN(ObjectAssign) {
   HandleScope scope(isolate);
@@ -160,44 +98,9 @@
   //    second argument.
   // 4. For each element nextSource of sources, in ascending index order,
   for (int i = 2; i < args.length(); ++i) {
-    Handle<Object> next_source = args.at<Object>(i);
-    Maybe<bool> fast_assign = FastAssign(to, next_source);
-    if (fast_assign.IsNothing()) return isolate->heap()->exception();
-    if (fast_assign.FromJust()) continue;
-    // 4a. If nextSource is undefined or null, let keys be an empty List.
-    // 4b. Else,
-    // 4b i. Let from be ToObject(nextSource).
-    // Only non-empty strings and JSReceivers have enumerable properties.
-    Handle<JSReceiver> from =
-        Object::ToObject(isolate, next_source).ToHandleChecked();
-    // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
-    Handle<FixedArray> keys;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, keys, KeyAccumulator::GetKeys(
-                           from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
-                           GetKeysConversion::kKeepNumbers));
-    // 4c. Repeat for each element nextKey of keys in List order,
-    for (int j = 0; j < keys->length(); ++j) {
-      Handle<Object> next_key(keys->get(j), isolate);
-      // 4c i. Let desc be ? from.[[GetOwnProperty]](nextKey).
-      PropertyDescriptor desc;
-      Maybe<bool> found =
-          JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
-      if (found.IsNothing()) return isolate->heap()->exception();
-      // 4c ii. If desc is not undefined and desc.[[Enumerable]] is true, then
-      if (found.FromJust() && desc.enumerable()) {
-        // 4c ii 1. Let propValue be ? Get(from, nextKey).
-        Handle<Object> prop_value;
-        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-            isolate, prop_value,
-            Runtime::GetObjectProperty(isolate, from, next_key));
-        // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
-        Handle<Object> status;
-        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-            isolate, status, Runtime::SetObjectProperty(isolate, to, next_key,
-                                                        prop_value, STRICT));
-      }
-    }
+    Handle<Object> next_source = args.at(i);
+    MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, to, next_source),
+                 isolate->heap()->exception());
   }
   // 5. Return to.
   return *to;
@@ -219,134 +122,90 @@
   return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
 }
 
-namespace {  // anonymous namespace for ObjectProtoToString()
+void ObjectBuiltinsAssembler::IsString(Node* object, Label* if_string,
+                                       Label* if_notstring) {
+  Label if_notsmi(this);
+  Branch(TaggedIsSmi(object), if_notstring, &if_notsmi);
 
-void IsString(CodeStubAssembler* assembler, compiler::Node* object,
-              CodeStubAssembler::Label* if_string,
-              CodeStubAssembler::Label* if_notstring) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-
-  Label if_notsmi(assembler);
-  assembler->Branch(assembler->TaggedIsSmi(object), if_notstring, &if_notsmi);
-
-  assembler->Bind(&if_notsmi);
+  Bind(&if_notsmi);
   {
-    Node* instance_type = assembler->LoadInstanceType(object);
+    Node* instance_type = LoadInstanceType(object);
 
-    assembler->Branch(assembler->IsStringInstanceType(instance_type), if_string,
-                      if_notstring);
+    Branch(IsStringInstanceType(instance_type), if_string, if_notstring);
   }
 }
 
-void ReturnToStringFormat(CodeStubAssembler* assembler, compiler::Node* context,
-                          compiler::Node* string) {
-  typedef compiler::Node Node;
+void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
+                                                   Node* string) {
+  Node* lhs = HeapConstant(factory()->NewStringFromStaticChars("[object "));
+  Node* rhs = HeapConstant(factory()->NewStringFromStaticChars("]"));
 
-  Node* lhs = assembler->HeapConstant(
-      assembler->factory()->NewStringFromStaticChars("[object "));
-  Node* rhs = assembler->HeapConstant(
-      assembler->factory()->NewStringFromStaticChars("]"));
+  Callable callable =
+      CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
 
-  Callable callable = CodeFactory::StringAdd(
-      assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
-
-  assembler->Return(assembler->CallStub(
-      callable, context, assembler->CallStub(callable, context, lhs, string),
-      rhs));
+  Return(CallStub(callable, context, CallStub(callable, context, lhs, string),
+                  rhs));
 }
 
-void ReturnIfPrimitive(CodeStubAssembler* assembler,
-                       compiler::Node* instance_type,
-                       CodeStubAssembler::Label* return_string,
-                       CodeStubAssembler::Label* return_boolean,
-                       CodeStubAssembler::Label* return_number) {
-  assembler->GotoIf(assembler->IsStringInstanceType(instance_type),
-                    return_string);
-
-  assembler->GotoIf(assembler->Word32Equal(
-                        instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
-                    return_boolean);
-
-  assembler->GotoIf(
-      assembler->Word32Equal(instance_type,
-                             assembler->Int32Constant(HEAP_NUMBER_TYPE)),
-      return_number);
-}
-
-}  // namespace
-
 // ES6 section 19.1.3.6 Object.prototype.toString
-void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(ObjectProtoToString, ObjectBuiltinsAssembler) {
+  Label return_undefined(this, Label::kDeferred),
+      return_null(this, Label::kDeferred),
+      return_arguments(this, Label::kDeferred), return_array(this),
+      return_api(this, Label::kDeferred), return_object(this),
+      return_regexp(this), return_function(this), return_error(this),
+      return_date(this), return_jsvalue(this),
+      return_jsproxy(this, Label::kDeferred);
 
-  Label return_undefined(assembler, Label::kDeferred),
-      return_null(assembler, Label::kDeferred),
-      return_arguments(assembler, Label::kDeferred), return_array(assembler),
-      return_api(assembler, Label::kDeferred), return_object(assembler),
-      return_regexp(assembler), return_function(assembler),
-      return_error(assembler), return_date(assembler), return_string(assembler),
-      return_boolean(assembler), return_jsvalue(assembler),
-      return_jsproxy(assembler, Label::kDeferred), return_number(assembler);
+  Label if_isproxy(this, Label::kDeferred);
 
-  Label if_isproxy(assembler, Label::kDeferred);
+  Label checkstringtag(this);
+  Label if_tostringtag(this), if_notostringtag(this);
 
-  Label checkstringtag(assembler);
-  Label if_tostringtag(assembler), if_notostringtag(assembler);
+  Node* receiver = Parameter(0);
+  Node* context = Parameter(3);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
+  GotoIf(WordEqual(receiver, UndefinedConstant()), &return_undefined);
 
-  assembler->GotoIf(
-      assembler->WordEqual(receiver, assembler->UndefinedConstant()),
-      &return_undefined);
+  GotoIf(WordEqual(receiver, NullConstant()), &return_null);
 
-  assembler->GotoIf(assembler->WordEqual(receiver, assembler->NullConstant()),
-                    &return_null);
+  Callable to_object = CodeFactory::ToObject(isolate());
+  receiver = CallStub(to_object, context, receiver);
 
-  assembler->GotoIf(assembler->TaggedIsSmi(receiver), &return_number);
-
-  Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
-  ReturnIfPrimitive(assembler, receiver_instance_type, &return_string,
-                    &return_boolean, &return_number);
+  Node* receiver_instance_type = LoadInstanceType(receiver);
 
   // for proxies, check IsArray before getting @@toStringTag
-  Variable var_proxy_is_array(assembler, MachineRepresentation::kTagged);
-  var_proxy_is_array.Bind(assembler->BooleanConstant(false));
+  Variable var_proxy_is_array(this, MachineRepresentation::kTagged);
+  var_proxy_is_array.Bind(BooleanConstant(false));
 
-  assembler->Branch(
-      assembler->Word32Equal(receiver_instance_type,
-                             assembler->Int32Constant(JS_PROXY_TYPE)),
-      &if_isproxy, &checkstringtag);
+  Branch(Word32Equal(receiver_instance_type, Int32Constant(JS_PROXY_TYPE)),
+         &if_isproxy, &checkstringtag);
 
-  assembler->Bind(&if_isproxy);
+  Bind(&if_isproxy);
   {
     // This can throw
     var_proxy_is_array.Bind(
-        assembler->CallRuntime(Runtime::kArrayIsArray, context, receiver));
-    assembler->Goto(&checkstringtag);
+        CallRuntime(Runtime::kArrayIsArray, context, receiver));
+    Goto(&checkstringtag);
   }
 
-  assembler->Bind(&checkstringtag);
+  Bind(&checkstringtag);
   {
-    Node* to_string_tag_symbol = assembler->HeapConstant(
-        assembler->isolate()->factory()->to_string_tag_symbol());
+    Node* to_string_tag_symbol =
+        HeapConstant(isolate()->factory()->to_string_tag_symbol());
 
-    GetPropertyStub stub(assembler->isolate());
+    GetPropertyStub stub(isolate());
     Callable get_property =
         Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
-    Node* to_string_tag_value = assembler->CallStub(
-        get_property, context, receiver, to_string_tag_symbol);
+    Node* to_string_tag_value =
+        CallStub(get_property, context, receiver, to_string_tag_symbol);
 
-    IsString(assembler, to_string_tag_value, &if_tostringtag,
-             &if_notostringtag);
+    IsString(to_string_tag_value, &if_tostringtag, &if_notostringtag);
 
-    assembler->Bind(&if_tostringtag);
-    ReturnToStringFormat(assembler, context, to_string_tag_value);
+    Bind(&if_tostringtag);
+    ReturnToStringFormat(context, to_string_tag_value);
   }
-  assembler->Bind(&if_notostringtag);
+  Bind(&if_notostringtag);
   {
     size_t const kNumCases = 11;
     Label* case_labels[kNumCases];
@@ -374,178 +233,164 @@
     case_labels[10] = &return_jsproxy;
     case_values[10] = JS_PROXY_TYPE;
 
-    assembler->Switch(receiver_instance_type, &return_object, case_values,
-                      case_labels, arraysize(case_values));
+    Switch(receiver_instance_type, &return_object, case_values, case_labels,
+           arraysize(case_values));
 
-    assembler->Bind(&return_undefined);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->undefined_to_string()));
+    Bind(&return_undefined);
+    Return(HeapConstant(isolate()->factory()->undefined_to_string()));
 
-    assembler->Bind(&return_null);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->null_to_string()));
+    Bind(&return_null);
+    Return(HeapConstant(isolate()->factory()->null_to_string()));
 
-    assembler->Bind(&return_number);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->number_to_string()));
+    Bind(&return_arguments);
+    Return(HeapConstant(isolate()->factory()->arguments_to_string()));
 
-    assembler->Bind(&return_string);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->string_to_string()));
+    Bind(&return_array);
+    Return(HeapConstant(isolate()->factory()->array_to_string()));
 
-    assembler->Bind(&return_boolean);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->boolean_to_string()));
+    Bind(&return_function);
+    Return(HeapConstant(isolate()->factory()->function_to_string()));
 
-    assembler->Bind(&return_arguments);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->arguments_to_string()));
+    Bind(&return_error);
+    Return(HeapConstant(isolate()->factory()->error_to_string()));
 
-    assembler->Bind(&return_array);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->array_to_string()));
+    Bind(&return_date);
+    Return(HeapConstant(isolate()->factory()->date_to_string()));
 
-    assembler->Bind(&return_function);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->function_to_string()));
+    Bind(&return_regexp);
+    Return(HeapConstant(isolate()->factory()->regexp_to_string()));
 
-    assembler->Bind(&return_error);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->error_to_string()));
-
-    assembler->Bind(&return_date);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->date_to_string()));
-
-    assembler->Bind(&return_regexp);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->regexp_to_string()));
-
-    assembler->Bind(&return_api);
+    Bind(&return_api);
     {
-      Node* class_name =
-          assembler->CallRuntime(Runtime::kClassOf, context, receiver);
-      ReturnToStringFormat(assembler, context, class_name);
+      Node* class_name = CallRuntime(Runtime::kClassOf, context, receiver);
+      ReturnToStringFormat(context, class_name);
     }
 
-    assembler->Bind(&return_jsvalue);
+    Bind(&return_jsvalue);
     {
-      Node* value = assembler->LoadJSValueValue(receiver);
-      assembler->GotoIf(assembler->TaggedIsSmi(value), &return_number);
+      Label return_boolean(this), return_number(this), return_string(this);
 
-      ReturnIfPrimitive(assembler, assembler->LoadInstanceType(value),
-                        &return_string, &return_boolean, &return_number);
-      assembler->Goto(&return_object);
+      Node* value = LoadJSValueValue(receiver);
+      GotoIf(TaggedIsSmi(value), &return_number);
+      Node* instance_type = LoadInstanceType(value);
+
+      GotoIf(IsStringInstanceType(instance_type), &return_string);
+      GotoIf(Word32Equal(instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+             &return_number);
+      GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)),
+             &return_boolean);
+
+      CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
+      Goto(&return_object);
+
+      Bind(&return_string);
+      Return(HeapConstant(isolate()->factory()->string_to_string()));
+
+      Bind(&return_number);
+      Return(HeapConstant(isolate()->factory()->number_to_string()));
+
+      Bind(&return_boolean);
+      Return(HeapConstant(isolate()->factory()->boolean_to_string()));
     }
 
-    assembler->Bind(&return_jsproxy);
+    Bind(&return_jsproxy);
     {
-      assembler->GotoIf(assembler->WordEqual(var_proxy_is_array.value(),
-                                             assembler->BooleanConstant(true)),
-                        &return_array);
+      GotoIf(WordEqual(var_proxy_is_array.value(), BooleanConstant(true)),
+             &return_array);
 
-      Node* map = assembler->LoadMap(receiver);
+      Node* map = LoadMap(receiver);
 
       // Return object if the proxy {receiver} is not callable.
-      assembler->Branch(assembler->IsCallableMap(map), &return_function,
-                        &return_object);
+      Branch(IsCallableMap(map), &return_function, &return_object);
     }
 
     // Default
-    assembler->Bind(&return_object);
-    assembler->Return(assembler->HeapConstant(
-        assembler->isolate()->factory()->object_to_string()));
+    Bind(&return_object);
+    Return(HeapConstant(isolate()->factory()->object_to_string()));
   }
 }
 
-void Builtins::Generate_ObjectCreate(CodeStubAssembler* a) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
+  Node* prototype = Parameter(1);
+  Node* properties = Parameter(2);
+  Node* context = Parameter(3 + 2);
 
-  Node* prototype = a->Parameter(1);
-  Node* properties = a->Parameter(2);
-  Node* context = a->Parameter(3 + 2);
-
-  Label call_runtime(a, Label::kDeferred), prototype_valid(a), no_properties(a);
+  Label call_runtime(this, Label::kDeferred), prototype_valid(this),
+      no_properties(this);
   {
-    a->Comment("Argument 1 check: prototype");
-    a->GotoIf(a->WordEqual(prototype, a->NullConstant()), &prototype_valid);
-    a->BranchIfJSReceiver(prototype, &prototype_valid, &call_runtime);
+    Comment("Argument 1 check: prototype");
+    GotoIf(WordEqual(prototype, NullConstant()), &prototype_valid);
+    BranchIfJSReceiver(prototype, &prototype_valid, &call_runtime);
   }
 
-  a->Bind(&prototype_valid);
+  Bind(&prototype_valid);
   {
-    a->Comment("Argument 2 check: properties");
+    Comment("Argument 2 check: properties");
     // Check that we have a simple object
-    a->GotoIf(a->TaggedIsSmi(properties), &call_runtime);
+    GotoIf(TaggedIsSmi(properties), &call_runtime);
     // Undefined implies no properties.
-    a->GotoIf(a->WordEqual(properties, a->UndefinedConstant()), &no_properties);
-    Node* properties_map = a->LoadMap(properties);
-    a->GotoIf(a->IsSpecialReceiverMap(properties_map), &call_runtime);
+    GotoIf(WordEqual(properties, UndefinedConstant()), &no_properties);
+    Node* properties_map = LoadMap(properties);
+    GotoIf(IsSpecialReceiverMap(properties_map), &call_runtime);
     // Stay on the fast path only if there are no elements.
-    a->GotoUnless(a->WordEqual(a->LoadElements(properties),
-                               a->LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
-                  &call_runtime);
+    GotoIfNot(WordEqual(LoadElements(properties),
+                        LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
+              &call_runtime);
     // Handle dictionary objects or fast objects with properties in runtime.
-    Node* bit_field3 = a->LoadMapBitField3(properties_map);
-    a->GotoIf(a->IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
-    a->Branch(a->IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
-              &call_runtime, &no_properties);
+    Node* bit_field3 = LoadMapBitField3(properties_map);
+    GotoIf(IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
+    Branch(IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
+           &call_runtime, &no_properties);
   }
 
   // Create a new object with the given prototype.
-  a->Bind(&no_properties);
+  Bind(&no_properties);
   {
-    Variable map(a, MachineRepresentation::kTagged);
-    Variable properties(a, MachineRepresentation::kTagged);
-    Label non_null_proto(a), instantiate_map(a), good(a);
+    Variable map(this, MachineRepresentation::kTagged);
+    Variable properties(this, MachineRepresentation::kTagged);
+    Label non_null_proto(this), instantiate_map(this), good(this);
 
-    a->Branch(a->WordEqual(prototype, a->NullConstant()), &good,
-              &non_null_proto);
+    Branch(WordEqual(prototype, NullConstant()), &good, &non_null_proto);
 
-    a->Bind(&good);
+    Bind(&good);
     {
-      map.Bind(a->LoadContextElement(
+      map.Bind(LoadContextElement(
           context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
-      properties.Bind(
-          a->AllocateNameDictionary(NameDictionary::kInitialCapacity));
-      a->Goto(&instantiate_map);
+      properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity));
+      Goto(&instantiate_map);
     }
 
-    a->Bind(&non_null_proto);
+    Bind(&non_null_proto);
     {
-      properties.Bind(a->EmptyFixedArrayConstant());
+      properties.Bind(EmptyFixedArrayConstant());
       Node* object_function =
-          a->LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX);
-      Node* object_function_map = a->LoadObjectField(
+          LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX);
+      Node* object_function_map = LoadObjectField(
           object_function, JSFunction::kPrototypeOrInitialMapOffset);
       map.Bind(object_function_map);
-      a->GotoIf(a->WordEqual(prototype, a->LoadMapPrototype(map.value())),
-                &instantiate_map);
+      GotoIf(WordEqual(prototype, LoadMapPrototype(map.value())),
+             &instantiate_map);
       // Try loading the prototype info.
       Node* prototype_info =
-          a->LoadMapPrototypeInfo(a->LoadMap(prototype), &call_runtime);
-      a->Comment("Load ObjectCreateMap from PrototypeInfo");
+          LoadMapPrototypeInfo(LoadMap(prototype), &call_runtime);
+      Comment("Load ObjectCreateMap from PrototypeInfo");
       Node* weak_cell =
-          a->LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
-      a->GotoIf(a->WordEqual(weak_cell, a->UndefinedConstant()), &call_runtime);
-      map.Bind(a->LoadWeakCellValue(weak_cell, &call_runtime));
-      a->Goto(&instantiate_map);
+          LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
+      GotoIf(WordEqual(weak_cell, UndefinedConstant()), &call_runtime);
+      map.Bind(LoadWeakCellValue(weak_cell, &call_runtime));
+      Goto(&instantiate_map);
     }
 
-    a->Bind(&instantiate_map);
+    Bind(&instantiate_map);
     {
-      Node* instance =
-          a->AllocateJSObjectFromMap(map.value(), properties.value());
-      a->Return(instance);
+      Node* instance = AllocateJSObjectFromMap(map.value(), properties.value());
+      Return(instance);
     }
   }
 
-  a->Bind(&call_runtime);
+  Bind(&call_runtime);
   {
-    a->Return(
-        a->CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
+    Return(CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
   }
 }
 
@@ -553,8 +398,8 @@
 BUILTIN(ObjectDefineProperties) {
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
-  Handle<Object> target = args.at<Object>(1);
-  Handle<Object> properties = args.at<Object>(2);
+  Handle<Object> target = args.at(1);
+  Handle<Object> properties = args.at(2);
 
   RETURN_RESULT_OR_FAILURE(
       isolate, JSReceiver::DefineProperties(isolate, target, properties));
@@ -564,9 +409,9 @@
 BUILTIN(ObjectDefineProperty) {
   HandleScope scope(isolate);
   DCHECK_EQ(4, args.length());
-  Handle<Object> target = args.at<Object>(1);
-  Handle<Object> key = args.at<Object>(2);
-  Handle<Object> attributes = args.at<Object>(3);
+  Handle<Object> target = args.at(1);
+  Handle<Object> key = args.at(2);
+  Handle<Object> attributes = args.at(3);
 
   return JSReceiver::DefineProperty(isolate, target, key, attributes);
 }
@@ -640,13 +485,33 @@
         RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
         return isolate->heap()->undefined_value();
 
-      case LookupIterator::JSPROXY:
-        return isolate->heap()->undefined_value();
+      case LookupIterator::JSPROXY: {
+        PropertyDescriptor desc;
+        Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
+            isolate, it.GetHolder<JSProxy>(), it.GetName(), &desc);
+        MAYBE_RETURN(found, isolate->heap()->exception());
+        if (found.FromJust()) {
+          if (component == ACCESSOR_GETTER && desc.has_get()) {
+            return *desc.get();
+          }
+          if (component == ACCESSOR_SETTER && desc.has_set()) {
+            return *desc.set();
+          }
+          return isolate->heap()->undefined_value();
+        }
+        Handle<Object> prototype;
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+            isolate, prototype, JSProxy::GetPrototype(it.GetHolder<JSProxy>()));
+        if (prototype->IsNull(isolate)) {
+          return isolate->heap()->undefined_value();
+        }
+        return ObjectLookupAccessor(isolate, prototype, key, component);
+      }
 
       case LookupIterator::INTEGER_INDEXED_EXOTIC:
-        return isolate->heap()->undefined_value();
       case LookupIterator::DATA:
-        continue;
+        return isolate->heap()->undefined_value();
+
       case LookupIterator::ACCESSOR: {
         Handle<Object> maybe_pair = it.GetAccessors();
         if (maybe_pair->IsAccessorPair()) {
@@ -666,9 +531,9 @@
 // https://tc39.github.io/ecma262/#sec-object.prototype.__defineGetter__
 BUILTIN(ObjectDefineGetter) {
   HandleScope scope(isolate);
-  Handle<Object> object = args.at<Object>(0);  // Receiver.
-  Handle<Object> name = args.at<Object>(1);
-  Handle<Object> getter = args.at<Object>(2);
+  Handle<Object> object = args.at(0);  // Receiver.
+  Handle<Object> name = args.at(1);
+  Handle<Object> getter = args.at(2);
   return ObjectDefineAccessor<ACCESSOR_GETTER>(isolate, object, name, getter);
 }
 
@@ -676,9 +541,9 @@
 // https://tc39.github.io/ecma262/#sec-object.prototype.__defineSetter__
 BUILTIN(ObjectDefineSetter) {
   HandleScope scope(isolate);
-  Handle<Object> object = args.at<Object>(0);  // Receiver.
-  Handle<Object> name = args.at<Object>(1);
-  Handle<Object> setter = args.at<Object>(2);
+  Handle<Object> object = args.at(0);  // Receiver.
+  Handle<Object> name = args.at(1);
+  Handle<Object> setter = args.at(2);
   return ObjectDefineAccessor<ACCESSOR_SETTER>(isolate, object, name, setter);
 }
 
@@ -686,8 +551,8 @@
 // https://tc39.github.io/ecma262/#sec-object.prototype.__lookupGetter__
 BUILTIN(ObjectLookupGetter) {
   HandleScope scope(isolate);
-  Handle<Object> object = args.at<Object>(0);
-  Handle<Object> name = args.at<Object>(1);
+  Handle<Object> object = args.at(0);
+  Handle<Object> name = args.at(1);
   return ObjectLookupAccessor(isolate, object, name, ACCESSOR_GETTER);
 }
 
@@ -695,8 +560,8 @@
 // https://tc39.github.io/ecma262/#sec-object.prototype.__lookupSetter__
 BUILTIN(ObjectLookupSetter) {
   HandleScope scope(isolate);
-  Handle<Object> object = args.at<Object>(0);
-  Handle<Object> name = args.at<Object>(1);
+  Handle<Object> object = args.at(0);
+  Handle<Object> name = args.at(1);
   return ObjectLookupAccessor(isolate, object, name, ACCESSOR_SETTER);
 }
 
@@ -731,7 +596,7 @@
 
   // 1. Let O be ? RequireObjectCoercible(O).
   Handle<Object> object = args.atOrUndefined(isolate, 1);
-  if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+  if (object->IsNullOrUndefined(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
                               isolate->factory()->NewStringFromAsciiChecked(
@@ -777,7 +642,7 @@
   HandleScope scope(isolate);
   // 1. Let O be ? RequireObjectCoercible(this value).
   Handle<Object> object = args.receiver();
-  if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+  if (object->IsNullOrUndefined(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
                               isolate->factory()->NewStringFromAsciiChecked(
@@ -785,7 +650,7 @@
   }
 
   // 2. If Type(proto) is neither Object nor Null, return undefined.
-  Handle<Object> proto = args.at<Object>(1);
+  Handle<Object> proto = args.at(1);
   if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
     return isolate->heap()->undefined_value();
   }
@@ -860,8 +725,8 @@
 BUILTIN(ObjectIs) {
   SealHandleScope shs(isolate);
   DCHECK_EQ(3, args.length());
-  Handle<Object> value1 = args.at<Object>(1);
-  Handle<Object> value2 = args.at<Object>(2);
+  Handle<Object> value1 = args.at(1);
+  Handle<Object> value2 = args.at(2);
   return isolate->heap()->ToBoolean(value1->SameValue(*value2));
 }
 
@@ -1022,50 +887,116 @@
   return *object;
 }
 
-void Builtins::Generate_HasProperty(CodeStubAssembler* assembler) {
-  typedef HasPropertyDescriptor Descriptor;
-  typedef compiler::Node Node;
+TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
+  typedef CreateIterResultObjectDescriptor Descriptor;
 
-  Node* key = assembler->Parameter(Descriptor::kKey);
-  Node* object = assembler->Parameter(Descriptor::kObject);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* const value = Parameter(Descriptor::kValue);
+  Node* const done = Parameter(Descriptor::kDone);
+  Node* const context = Parameter(Descriptor::kContext);
 
-  assembler->Return(
-      assembler->HasProperty(object, key, context, Runtime::kHasProperty));
+  Node* const native_context = LoadNativeContext(context);
+  Node* const map =
+      LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+  Node* const result = AllocateJSObjectFromMap(map);
+
+  StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value);
+  StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done);
+
+  Return(result);
 }
 
-void Builtins::Generate_ForInFilter(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(HasProperty, ObjectBuiltinsAssembler) {
+  typedef HasPropertyDescriptor Descriptor;
+
+  Node* key = Parameter(Descriptor::kKey);
+  Node* object = Parameter(Descriptor::kObject);
+  Node* context = Parameter(Descriptor::kContext);
+
+  Return(HasProperty(object, key, context, Runtime::kHasProperty));
+}
+
+TF_BUILTIN(ForInFilter, ObjectBuiltinsAssembler) {
   typedef ForInFilterDescriptor Descriptor;
 
-  Node* key = assembler->Parameter(Descriptor::kKey);
-  Node* object = assembler->Parameter(Descriptor::kObject);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* key = Parameter(Descriptor::kKey);
+  Node* object = Parameter(Descriptor::kObject);
+  Node* context = Parameter(Descriptor::kContext);
 
-  assembler->Return(assembler->ForInFilter(key, object, context));
+  Return(ForInFilter(key, object, context));
 }
 
-void Builtins::Generate_InstanceOf(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
-  typedef CompareDescriptor Descriptor;
-  Node* object = assembler->Parameter(Descriptor::kLeft);
-  Node* callable = assembler->Parameter(Descriptor::kRight);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+TF_BUILTIN(ForInNext, ObjectBuiltinsAssembler) {
+  typedef ForInNextDescriptor Descriptor;
 
-  assembler->Return(assembler->InstanceOf(object, callable, context));
+  Label filter(this);
+  Node* object = Parameter(Descriptor::kObject);
+  Node* cache_array = Parameter(Descriptor::kCacheArray);
+  Node* cache_type = Parameter(Descriptor::kCacheType);
+  Node* index = Parameter(Descriptor::kIndex);
+  Node* context = Parameter(Descriptor::kContext);
+
+  Node* key = LoadFixedArrayElement(cache_array, SmiUntag(index));
+  Node* map = LoadMap(object);
+  GotoIfNot(WordEqual(map, cache_type), &filter);
+  Return(key);
+  Bind(&filter);
+  Return(ForInFilter(key, object, context));
+}
+
+TF_BUILTIN(ForInPrepare, ObjectBuiltinsAssembler) {
+  typedef ForInPrepareDescriptor Descriptor;
+
+  Label call_runtime(this), nothing_to_iterate(this);
+  Node* object = Parameter(Descriptor::kObject);
+  Node* context = Parameter(Descriptor::kContext);
+
+  Node* cache_type;
+  Node* cache_array;
+  Node* cache_length;
+  std::tie(cache_type, cache_array, cache_length) =
+      EmitForInPrepare(object, context, &call_runtime, &nothing_to_iterate);
+
+  Return(cache_type, cache_array, cache_length);
+
+  Bind(&call_runtime);
+  TailCallRuntime(Runtime::kForInPrepare, context, object);
+
+  Bind(&nothing_to_iterate);
+  {
+    Node* zero = SmiConstant(0);
+    Return(zero, zero, zero);
+  }
+}
+
+TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) {
+  typedef CompareDescriptor Descriptor;
+
+  Node* object = Parameter(Descriptor::kLeft);
+  Node* callable = Parameter(Descriptor::kRight);
+  Node* context = Parameter(Descriptor::kContext);
+
+  Return(InstanceOf(object, callable, context));
 }
 
 // ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
-void Builtins::Generate_OrdinaryHasInstance(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(OrdinaryHasInstance, ObjectBuiltinsAssembler) {
   typedef CompareDescriptor Descriptor;
 
-  Node* constructor = assembler->Parameter(Descriptor::kLeft);
-  Node* object = assembler->Parameter(Descriptor::kRight);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* constructor = Parameter(Descriptor::kLeft);
+  Node* object = Parameter(Descriptor::kRight);
+  Node* context = Parameter(Descriptor::kContext);
 
-  assembler->Return(
-      assembler->OrdinaryHasInstance(context, constructor, object));
+  Return(OrdinaryHasInstance(context, constructor, object));
+}
+
+TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) {
+  typedef TypeofDescriptor Descriptor;
+
+  Node* object = Parameter(Descriptor::kObject);
+  Node* context = Parameter(Descriptor::kContext);
+
+  Return(GetSuperConstructor(object, context));
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-object.h b/src/builtins/builtins-object.h
new file mode 100644
index 0000000..4943426
--- /dev/null
+++ b/src/builtins/builtins-object.h
@@ -0,0 +1,26 @@
+
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class ObjectBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+
+  std::tuple<Node*, Node*, Node*> EmitForInPrepare(Node* object, Node* context,
+                                                   Label* call_runtime,
+                                                   Label* nothing_to_iterate);
+
+ protected:
+  void IsString(Node* object, Label* if_string, Label* if_notstring);
+  void ReturnToStringFormat(Node* context, Node* string);
+};
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins-promise.cc b/src/builtins/builtins-promise.cc
index 9f5d7c8..0d0238d 100644
--- a/src/builtins/builtins-promise.cc
+++ b/src/builtins/builtins-promise.cc
@@ -2,82 +2,1780 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/builtins/builtins-promise.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/builtins/builtins-utils.h"
 #include "src/builtins/builtins.h"
-
-#include "src/promise-utils.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// ES#sec-promise-resolve-functions
-// Promise Resolve Functions
-BUILTIN(PromiseResolveClosure) {
-  HandleScope scope(isolate);
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
 
-  Handle<Context> context(isolate->context(), isolate);
+Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
+  Node* const native_context = LoadNativeContext(context);
+  Node* const promise_fun =
+      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  Node* const initial_map =
+      LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+  Node* const instance = AllocateJSObjectFromMap(initial_map);
+  return instance;
+}
 
-  if (PromiseUtils::HasAlreadyVisited(context)) {
-    return isolate->heap()->undefined_value();
+void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
+  StoreObjectField(promise, JSPromise::kStatusOffset,
+                   SmiConstant(v8::Promise::kPending));
+  StoreObjectField(promise, JSPromise::kFlagsOffset, SmiConstant(0));
+}
+
+Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context) {
+  return AllocateAndInitJSPromise(context, UndefinedConstant());
+}
+
+Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
+                                                         Node* parent) {
+  Node* const instance = AllocateJSPromise(context);
+  PromiseInit(instance);
+
+  Label out(this);
+  GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
+  CallRuntime(Runtime::kPromiseHookInit, context, instance, parent);
+  Goto(&out);
+
+  Bind(&out);
+  return instance;
+}
+
+Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(Node* context,
+                                                        Node* status,
+                                                        Node* result) {
+  CSA_ASSERT(this, TaggedIsSmi(status));
+
+  Node* const instance = AllocateJSPromise(context);
+
+  StoreObjectFieldNoWriteBarrier(instance, JSPromise::kStatusOffset, status);
+  StoreObjectFieldNoWriteBarrier(instance, JSPromise::kResultOffset, result);
+  StoreObjectFieldNoWriteBarrier(instance, JSPromise::kFlagsOffset,
+                                 SmiConstant(0));
+
+  Label out(this);
+  GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
+  CallRuntime(Runtime::kPromiseHookInit, context, instance,
+              UndefinedConstant());
+  Goto(&out);
+
+  Bind(&out);
+  return instance;
+}
+
+std::pair<Node*, Node*>
+PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions(
+    Node* promise, Node* debug_event, Node* native_context) {
+  Node* const promise_context = CreatePromiseResolvingFunctionsContext(
+      promise, debug_event, native_context);
+  Node* const map = LoadContextElement(
+      native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+  Node* const resolve_info =
+      LoadContextElement(native_context, Context::PROMISE_RESOLVE_SHARED_FUN);
+  Node* const resolve =
+      AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
+  Node* const reject_info =
+      LoadContextElement(native_context, Context::PROMISE_REJECT_SHARED_FUN);
+  Node* const reject =
+      AllocateFunctionWithMapAndContext(map, reject_info, promise_context);
+  return std::make_pair(resolve, reject);
+}
+
+Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
+                                                     Node* constructor,
+                                                     Node* debug_event) {
+  if (debug_event == nullptr) {
+    debug_event = TrueConstant();
   }
 
-  PromiseUtils::SetAlreadyVisited(context);
-  Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
-  Handle<Object> value = args.atOrUndefined(isolate, 1);
+  Node* native_context = LoadNativeContext(context);
 
-  MaybeHandle<Object> maybe_result;
-  Handle<Object> argv[] = {promise, value};
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate, Execution::Call(isolate, isolate->promise_resolve(),
-                               isolate->factory()->undefined_value(),
-                               arraysize(argv), argv));
-  return isolate->heap()->undefined_value();
+  Node* map = LoadRoot(Heap::kJSPromiseCapabilityMapRootIndex);
+  Node* capability = AllocateJSObjectFromMap(map);
+
+  StoreObjectFieldNoWriteBarrier(
+      capability, JSPromiseCapability::kPromiseOffset, UndefinedConstant());
+  StoreObjectFieldNoWriteBarrier(
+      capability, JSPromiseCapability::kResolveOffset, UndefinedConstant());
+  StoreObjectFieldNoWriteBarrier(capability, JSPromiseCapability::kRejectOffset,
+                                 UndefinedConstant());
+
+  Variable var_result(this, MachineRepresentation::kTagged);
+  var_result.Bind(capability);
+
+  Label if_builtin_promise(this), if_custom_promise(this, Label::kDeferred),
+      out(this);
+  Branch(WordEqual(constructor,
+                   LoadContextElement(native_context,
+                                      Context::PROMISE_FUNCTION_INDEX)),
+         &if_builtin_promise, &if_custom_promise);
+
+  Bind(&if_builtin_promise);
+  {
+    Node* promise = AllocateJSPromise(context);
+    PromiseInit(promise);
+    StoreObjectFieldNoWriteBarrier(
+        capability, JSPromiseCapability::kPromiseOffset, promise);
+
+    Node* resolve = nullptr;
+    Node* reject = nullptr;
+
+    std::tie(resolve, reject) =
+        CreatePromiseResolvingFunctions(promise, debug_event, native_context);
+    StoreObjectField(capability, JSPromiseCapability::kResolveOffset, resolve);
+    StoreObjectField(capability, JSPromiseCapability::kRejectOffset, reject);
+
+    GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &out);
+    CallRuntime(Runtime::kPromiseHookInit, context, promise,
+                UndefinedConstant());
+    Goto(&out);
+  }
+
+  Bind(&if_custom_promise);
+  {
+    Label if_notcallable(this, Label::kDeferred);
+    Node* executor_context =
+        CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
+    Node* executor_info = LoadContextElement(
+        native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN);
+    Node* function_map = LoadContextElement(
+        native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+    Node* executor = AllocateFunctionWithMapAndContext(
+        function_map, executor_info, executor_context);
+
+    Node* promise = ConstructJS(CodeFactory::Construct(isolate()), context,
+                                constructor, executor);
+
+    Node* resolve =
+        LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+    GotoIf(TaggedIsSmi(resolve), &if_notcallable);
+    GotoIfNot(IsCallableMap(LoadMap(resolve)), &if_notcallable);
+
+    Node* reject =
+        LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+    GotoIf(TaggedIsSmi(reject), &if_notcallable);
+    GotoIfNot(IsCallableMap(LoadMap(reject)), &if_notcallable);
+
+    StoreObjectField(capability, JSPromiseCapability::kPromiseOffset, promise);
+
+    Goto(&out);
+
+    Bind(&if_notcallable);
+    Node* message = SmiConstant(MessageTemplate::kPromiseNonCallable);
+    StoreObjectField(capability, JSPromiseCapability::kPromiseOffset,
+                     UndefinedConstant());
+    StoreObjectField(capability, JSPromiseCapability::kResolveOffset,
+                     UndefinedConstant());
+    StoreObjectField(capability, JSPromiseCapability::kRejectOffset,
+                     UndefinedConstant());
+    CallRuntime(Runtime::kThrowTypeError, context, message);
+    Unreachable();
+  }
+
+  Bind(&out);
+  return var_result.value();
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
+                                                     int slots) {
+  DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
+
+  Node* const context = Allocate(FixedArray::SizeFor(slots));
+  StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
+  StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
+                                 SmiConstant(slots));
+
+  Node* const empty_fn =
+      LoadContextElement(native_context, Context::CLOSURE_INDEX);
+  StoreContextElementNoWriteBarrier(context, Context::CLOSURE_INDEX, empty_fn);
+  StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX,
+                                    UndefinedConstant());
+  StoreContextElementNoWriteBarrier(context, Context::EXTENSION_INDEX,
+                                    TheHoleConstant());
+  StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX,
+                                    native_context);
+  return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
+    Node* promise, Node* debug_event, Node* native_context) {
+  Node* const context =
+      CreatePromiseContext(native_context, kPromiseContextLength);
+  StoreContextElementNoWriteBarrier(context, kAlreadyVisitedSlot,
+                                    SmiConstant(0));
+  StoreContextElementNoWriteBarrier(context, kPromiseSlot, promise);
+  StoreContextElementNoWriteBarrier(context, kDebugEventSlot, debug_event);
+  return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseGetCapabilitiesExecutorContext(
+    Node* promise_capability, Node* native_context) {
+  int kContextLength = kCapabilitiesContextLength;
+  Node* context = CreatePromiseContext(native_context, kContextLength);
+  StoreContextElementNoWriteBarrier(context, kCapabilitySlot,
+                                    promise_capability);
+  return context;
+}
+
+Node* PromiseBuiltinsAssembler::ThrowIfNotJSReceiver(
+    Node* context, Node* value, MessageTemplate::Template msg_template,
+    const char* method_name) {
+  Label out(this), throw_exception(this, Label::kDeferred);
+  Variable var_value_map(this, MachineRepresentation::kTagged);
+
+  GotoIf(TaggedIsSmi(value), &throw_exception);
+
+  // Load the instance type of the {value}.
+  var_value_map.Bind(LoadMap(value));
+  Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
+
+  Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
+
+  // The {value} is not a compatible receiver for this method.
+  Bind(&throw_exception);
+  {
+    Node* const method =
+        method_name == nullptr
+            ? UndefinedConstant()
+            : HeapConstant(
+                  isolate()->factory()->NewStringFromAsciiChecked(method_name));
+    Node* const message_id = SmiConstant(msg_template);
+    CallRuntime(Runtime::kThrowTypeError, context, message_id, method);
+    Unreachable();
+  }
+
+  Bind(&out);
+  return var_value_map.value();
+}
+
+Node* PromiseBuiltinsAssembler::PromiseHasHandler(Node* promise) {
+  Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
+  return IsSetWord(SmiUntag(flags), 1 << JSPromise::kHasHandlerBit);
+}
+
+void PromiseBuiltinsAssembler::PromiseSetHasHandler(Node* promise) {
+  Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
+  Node* const new_flags =
+      SmiOr(flags, SmiConstant(1 << JSPromise::kHasHandlerBit));
+  StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
+}
+
+void PromiseBuiltinsAssembler::PromiseSetHandledHint(Node* promise) {
+  Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
+  Node* const new_flags =
+      SmiOr(flags, SmiConstant(1 << JSPromise::kHandledHintBit));
+  StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
+}
+
+Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
+                                                   Node* default_constructor) {
+  Isolate* isolate = this->isolate();
+  Variable var_result(this, MachineRepresentation::kTagged);
+  var_result.Bind(default_constructor);
+
+  // 2. Let C be ? Get(O, "constructor").
+  Node* const constructor_str =
+      HeapConstant(isolate->factory()->constructor_string());
+  Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+  Node* const constructor =
+      CallStub(getproperty_callable, context, object, constructor_str);
+
+  // 3. If C is undefined, return defaultConstructor.
+  Label out(this);
+  GotoIf(IsUndefined(constructor), &out);
+
+  // 4. If Type(C) is not Object, throw a TypeError exception.
+  ThrowIfNotJSReceiver(context, constructor,
+                       MessageTemplate::kConstructorNotReceiver);
+
+  // 5. Let S be ? Get(C, @@species).
+  Node* const species_symbol =
+      HeapConstant(isolate->factory()->species_symbol());
+  Node* const species =
+      CallStub(getproperty_callable, context, constructor, species_symbol);
+
+  // 6. If S is either undefined or null, return defaultConstructor.
+  GotoIf(IsUndefined(species), &out);
+  GotoIf(WordEqual(species, NullConstant()), &out);
+
+  // 7. If IsConstructor(S) is true, return S.
+  Label throw_error(this);
+  Node* species_bitfield = LoadMapBitField(LoadMap(species));
+  GotoIfNot(Word32Equal(Word32And(species_bitfield,
+                                  Int32Constant((1 << Map::kIsConstructor))),
+                        Int32Constant(1 << Map::kIsConstructor)),
+            &throw_error);
+  var_result.Bind(species);
+  Goto(&out);
+
+  // 8. Throw a TypeError exception.
+  Bind(&throw_error);
+  {
+    Node* const message_id =
+        SmiConstant(MessageTemplate::kSpeciesNotConstructor);
+    CallRuntime(Runtime::kThrowTypeError, context, message_id);
+    Unreachable();
+  }
+
+  Bind(&out);
+  return var_result.value();
+}
+
+void PromiseBuiltinsAssembler::AppendPromiseCallback(int offset, Node* promise,
+                                                     Node* value) {
+  Node* elements = LoadObjectField(promise, offset);
+  Node* length = LoadFixedArrayBaseLength(elements);
+  CodeStubAssembler::ParameterMode mode = OptimalParameterMode();
+  length = TaggedToParameter(length, mode);
+
+  Node* delta = IntPtrOrSmiConstant(1, mode);
+  Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
+
+  const ElementsKind kind = FAST_ELEMENTS;
+  const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
+  const CodeStubAssembler::AllocationFlags flags =
+      CodeStubAssembler::kAllowLargeObjectAllocation;
+  int additional_offset = 0;
+
+  Node* new_elements = AllocateFixedArray(kind, new_capacity, mode, flags);
+
+  CopyFixedArrayElements(kind, elements, new_elements, length, barrier_mode,
+                         mode);
+  StoreFixedArrayElement(new_elements, length, value, barrier_mode,
+                         additional_offset, mode);
+
+  StoreObjectField(promise, offset, new_elements);
+}
+
+Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
+                                                    Node* promise,
+                                                    Node* on_resolve,
+                                                    Node* on_reject) {
+  Isolate* isolate = this->isolate();
+
+  // 2. If IsPromise(promise) is false, throw a TypeError exception.
+  ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
+                         "Promise.prototype.then");
+
+  Node* const native_context = LoadNativeContext(context);
+  Node* const promise_fun =
+      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+
+  // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
+  Node* constructor = SpeciesConstructor(context, promise, promise_fun);
+
+  // 4. Let resultCapability be ? NewPromiseCapability(C).
+  Callable call_callable = CodeFactory::Call(isolate);
+  Label fast_promise_capability(this), promise_capability(this),
+      perform_promise_then(this);
+  Variable var_deferred_promise(this, MachineRepresentation::kTagged),
+      var_deferred_on_resolve(this, MachineRepresentation::kTagged),
+      var_deferred_on_reject(this, MachineRepresentation::kTagged);
+
+  Branch(WordEqual(promise_fun, constructor), &fast_promise_capability,
+         &promise_capability);
+
+  Bind(&fast_promise_capability);
+  {
+    Node* const deferred_promise = AllocateAndInitJSPromise(context, promise);
+    var_deferred_promise.Bind(deferred_promise);
+    var_deferred_on_resolve.Bind(UndefinedConstant());
+    var_deferred_on_reject.Bind(UndefinedConstant());
+    Goto(&perform_promise_then);
+  }
+
+  Bind(&promise_capability);
+  {
+    Node* const capability = NewPromiseCapability(context, constructor);
+    var_deferred_promise.Bind(
+        LoadObjectField(capability, JSPromiseCapability::kPromiseOffset));
+    var_deferred_on_resolve.Bind(
+        LoadObjectField(capability, JSPromiseCapability::kResolveOffset));
+    var_deferred_on_reject.Bind(
+        LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+    Goto(&perform_promise_then);
+  }
+
+  // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
+  //    resultCapability).
+  Bind(&perform_promise_then);
+  Node* const result = InternalPerformPromiseThen(
+      context, promise, on_resolve, on_reject, var_deferred_promise.value(),
+      var_deferred_on_resolve.value(), var_deferred_on_reject.value());
+  return result;
+}
+
+Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
+    Node* context, Node* promise, Node* on_resolve, Node* on_reject,
+    Node* deferred_promise, Node* deferred_on_resolve,
+    Node* deferred_on_reject) {
+
+  Variable var_on_resolve(this, MachineRepresentation::kTagged),
+      var_on_reject(this, MachineRepresentation::kTagged);
+
+  var_on_resolve.Bind(on_resolve);
+  var_on_reject.Bind(on_reject);
+
+  Label out(this), if_onresolvenotcallable(this), onrejectcheck(this),
+      append_callbacks(this);
+  GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable);
+
+  Isolate* isolate = this->isolate();
+  Node* const on_resolve_map = LoadMap(on_resolve);
+  Branch(IsCallableMap(on_resolve_map), &onrejectcheck,
+         &if_onresolvenotcallable);
+
+  Bind(&if_onresolvenotcallable);
+  {
+    Node* const default_resolve_handler_symbol = HeapConstant(
+        isolate->factory()->promise_default_resolve_handler_symbol());
+    var_on_resolve.Bind(default_resolve_handler_symbol);
+    Goto(&onrejectcheck);
+  }
+
+  Bind(&onrejectcheck);
+  {
+    Label if_onrejectnotcallable(this);
+    GotoIf(TaggedIsSmi(on_reject), &if_onrejectnotcallable);
+
+    Node* const on_reject_map = LoadMap(on_reject);
+    Branch(IsCallableMap(on_reject_map), &append_callbacks,
+           &if_onrejectnotcallable);
+
+    Bind(&if_onrejectnotcallable);
+    {
+      Node* const default_reject_handler_symbol = HeapConstant(
+          isolate->factory()->promise_default_reject_handler_symbol());
+      var_on_reject.Bind(default_reject_handler_symbol);
+      Goto(&append_callbacks);
+    }
+  }
+
+  Bind(&append_callbacks);
+  {
+    Label fulfilled_check(this);
+    Node* const status = LoadObjectField(promise, JSPromise::kStatusOffset);
+    GotoIfNot(SmiEqual(status, SmiConstant(v8::Promise::kPending)),
+              &fulfilled_check);
+
+    Node* const existing_deferred_promise =
+        LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
+
+    Label if_noexistingcallbacks(this), if_existingcallbacks(this);
+    Branch(IsUndefined(existing_deferred_promise), &if_noexistingcallbacks,
+           &if_existingcallbacks);
+
+    Bind(&if_noexistingcallbacks);
+    {
+      // Store callbacks directly in the slots.
+      StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
+                       deferred_promise);
+      StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
+                       deferred_on_resolve);
+      StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
+                       deferred_on_reject);
+      StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
+                       var_on_resolve.value());
+      StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
+                       var_on_reject.value());
+      Goto(&out);
+    }
+
+    Bind(&if_existingcallbacks);
+    {
+      Label if_singlecallback(this), if_multiplecallbacks(this);
+      BranchIfJSObject(existing_deferred_promise, &if_singlecallback,
+                       &if_multiplecallbacks);
+
+      Bind(&if_singlecallback);
+      {
+        // Create new FixedArrays to store callbacks, and migrate
+        // existing callbacks.
+        Node* const deferred_promise_arr =
+            AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+        StoreFixedArrayElement(deferred_promise_arr, 0,
+                               existing_deferred_promise);
+        StoreFixedArrayElement(deferred_promise_arr, 1, deferred_promise);
+
+        Node* const deferred_on_resolve_arr =
+            AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+        StoreFixedArrayElement(
+            deferred_on_resolve_arr, 0,
+            LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset));
+        StoreFixedArrayElement(deferred_on_resolve_arr, 1, deferred_on_resolve);
+
+        Node* const deferred_on_reject_arr =
+            AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+        StoreFixedArrayElement(
+            deferred_on_reject_arr, 0,
+            LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset));
+        StoreFixedArrayElement(deferred_on_reject_arr, 1, deferred_on_reject);
+
+        Node* const fulfill_reactions =
+            AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+        StoreFixedArrayElement(
+            fulfill_reactions, 0,
+            LoadObjectField(promise, JSPromise::kFulfillReactionsOffset));
+        StoreFixedArrayElement(fulfill_reactions, 1, var_on_resolve.value());
+
+        Node* const reject_reactions =
+            AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+        StoreFixedArrayElement(
+            reject_reactions, 0,
+            LoadObjectField(promise, JSPromise::kRejectReactionsOffset));
+        StoreFixedArrayElement(reject_reactions, 1, var_on_reject.value());
+
+        // Store new FixedArrays in promise.
+        StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
+                         deferred_promise_arr);
+        StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
+                         deferred_on_resolve_arr);
+        StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
+                         deferred_on_reject_arr);
+        StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
+                         fulfill_reactions);
+        StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
+                         reject_reactions);
+        Goto(&out);
+      }
+
+      Bind(&if_multiplecallbacks);
+      {
+        AppendPromiseCallback(JSPromise::kDeferredPromiseOffset, promise,
+                              deferred_promise);
+        AppendPromiseCallback(JSPromise::kDeferredOnResolveOffset, promise,
+                              deferred_on_resolve);
+        AppendPromiseCallback(JSPromise::kDeferredOnRejectOffset, promise,
+                              deferred_on_reject);
+        AppendPromiseCallback(JSPromise::kFulfillReactionsOffset, promise,
+                              var_on_resolve.value());
+        AppendPromiseCallback(JSPromise::kRejectReactionsOffset, promise,
+                              var_on_reject.value());
+        Goto(&out);
+      }
+    }
+
+    Bind(&fulfilled_check);
+    {
+      Label reject(this);
+      Node* const result = LoadObjectField(promise, JSPromise::kResultOffset);
+      GotoIfNot(WordEqual(status, SmiConstant(v8::Promise::kFulfilled)),
+                &reject);
+
+      Node* info = AllocatePromiseReactionJobInfo(
+          result, var_on_resolve.value(), deferred_promise, deferred_on_resolve,
+          deferred_on_reject, context);
+      // TODO(gsathya): Move this to TF
+      CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
+      Goto(&out);
+
+      Bind(&reject);
+      {
+        Node* const has_handler = PromiseHasHandler(promise);
+        Label enqueue(this);
+
+        // TODO(gsathya): Fold these runtime calls and move to TF.
+        GotoIf(has_handler, &enqueue);
+        CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
+        Goto(&enqueue);
+
+        Bind(&enqueue);
+        {
+          Node* info = AllocatePromiseReactionJobInfo(
+              result, var_on_reject.value(), deferred_promise,
+              deferred_on_resolve, deferred_on_reject, context);
+          // TODO(gsathya): Move this to TF
+          CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
+          Goto(&out);
+        }
+      }
+    }
+  }
+
+  Bind(&out);
+  PromiseSetHasHandler(promise);
+  return deferred_promise;
+}
+
+// Promise fast path implementations rely on unmodified JSPromise instances.
+// We use a fairly coarse granularity for this and simply check whether both
+// the promise itself is unmodified (i.e. its map has not changed) and its
+// prototype is unmodified.
+// TODO(gsathya): Refactor this out to prevent code dupe with builtins-regexp
+void PromiseBuiltinsAssembler::BranchIfFastPath(Node* context, Node* promise,
+                                                Label* if_isunmodified,
+                                                Label* if_ismodified) {
+  Node* const native_context = LoadNativeContext(context);
+  Node* const promise_fun =
+      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  BranchIfFastPath(native_context, promise_fun, promise, if_isunmodified,
+                   if_ismodified);
+}
+
+void PromiseBuiltinsAssembler::BranchIfFastPath(Node* native_context,
+                                                Node* promise_fun,
+                                                Node* promise,
+                                                Label* if_isunmodified,
+                                                Label* if_ismodified) {
+  CSA_ASSERT(this, IsNativeContext(native_context));
+  CSA_ASSERT(this,
+             WordEqual(promise_fun,
+                       LoadContextElement(native_context,
+                                          Context::PROMISE_FUNCTION_INDEX)));
+
+  Node* const map = LoadMap(promise);
+  Node* const initial_map =
+      LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+  Node* const has_initialmap = WordEqual(map, initial_map);
+
+  GotoIfNot(has_initialmap, if_ismodified);
+
+  Node* const initial_proto_initial_map =
+      LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_MAP_INDEX);
+  Node* const proto_map = LoadMap(LoadMapPrototype(map));
+  Node* const proto_has_initialmap =
+      WordEqual(proto_map, initial_proto_initial_map);
+
+  Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+}
+
+Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobInfo(
+    Node* thenable, Node* then, Node* resolve, Node* reject, Node* context) {
+  Node* const info = Allocate(PromiseResolveThenableJobInfo::kSize);
+  StoreMapNoWriteBarrier(info,
+                         Heap::kPromiseResolveThenableJobInfoMapRootIndex);
+  StoreObjectFieldNoWriteBarrier(
+      info, PromiseResolveThenableJobInfo::kThenableOffset, thenable);
+  StoreObjectFieldNoWriteBarrier(
+      info, PromiseResolveThenableJobInfo::kThenOffset, then);
+  StoreObjectFieldNoWriteBarrier(
+      info, PromiseResolveThenableJobInfo::kResolveOffset, resolve);
+  StoreObjectFieldNoWriteBarrier(
+      info, PromiseResolveThenableJobInfo::kRejectOffset, reject);
+  StoreObjectFieldNoWriteBarrier(
+      info, PromiseResolveThenableJobInfo::kContextOffset, context);
+  return info;
+}
+
+void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
+                                                      Node* promise,
+                                                      Node* result) {
+  Isolate* isolate = this->isolate();
+
+  Variable var_reason(this, MachineRepresentation::kTagged),
+      var_then(this, MachineRepresentation::kTagged);
+
+  Label do_enqueue(this), fulfill(this), if_cycle(this, Label::kDeferred),
+      if_rejectpromise(this, Label::kDeferred), out(this);
+
+  Label cycle_check(this);
+  GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &cycle_check);
+  CallRuntime(Runtime::kPromiseHookResolve, context, promise);
+  Goto(&cycle_check);
+
+  Bind(&cycle_check);
+  // 6. If SameValue(resolution, promise) is true, then
+  GotoIf(SameValue(promise, result, context), &if_cycle);
+
+  // 7. If Type(resolution) is not Object, then
+  GotoIf(TaggedIsSmi(result), &fulfill);
+  GotoIfNot(IsJSReceiver(result), &fulfill);
+
+  Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
+  Node* const native_context = LoadNativeContext(context);
+  Node* const promise_fun =
+      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  BranchIfFastPath(native_context, promise_fun, result, &if_nativepromise,
+                   &if_notnativepromise);
+
+  // Resolution is a native promise and if it's already resolved or
+  // rejected, shortcircuit the resolution procedure by directly
+  // reusing the value from the promise.
+  Bind(&if_nativepromise);
+  {
+    Node* const thenable_status =
+        LoadObjectField(result, JSPromise::kStatusOffset);
+    Node* const thenable_value =
+        LoadObjectField(result, JSPromise::kResultOffset);
+
+    Label if_isnotpending(this);
+    GotoIfNot(SmiEqual(SmiConstant(v8::Promise::kPending), thenable_status),
+              &if_isnotpending);
+
+    // TODO(gsathya): Use a marker here instead of the actual then
+    // callback, and check for the marker in PromiseResolveThenableJob
+    // and perform PromiseThen.
+    Node* const then =
+        LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+    var_then.Bind(then);
+    Goto(&do_enqueue);
+
+    Bind(&if_isnotpending);
+    {
+      Label if_fulfilled(this), if_rejected(this);
+      Branch(SmiEqual(SmiConstant(v8::Promise::kFulfilled), thenable_status),
+             &if_fulfilled, &if_rejected);
+
+      Bind(&if_fulfilled);
+      {
+        PromiseFulfill(context, promise, thenable_value,
+                       v8::Promise::kFulfilled);
+        PromiseSetHasHandler(promise);
+        Goto(&out);
+      }
+
+      Bind(&if_rejected);
+      {
+        Label reject(this);
+        Node* const has_handler = PromiseHasHandler(result);
+
+        // Promise has already been rejected, but had no handler.
+        // Revoke previously triggered reject event.
+        GotoIf(has_handler, &reject);
+        CallRuntime(Runtime::kPromiseRevokeReject, context, result);
+        Goto(&reject);
+
+        Bind(&reject);
+        // Don't cause a debug event as this case is forwarding a rejection
+        InternalPromiseReject(context, promise, thenable_value, false);
+        PromiseSetHasHandler(result);
+        Goto(&out);
+      }
+    }
+  }
+
+  Bind(&if_notnativepromise);
+  {
+    // 8. Let then be Get(resolution, "then").
+    Node* const then_str = HeapConstant(isolate->factory()->then_string());
+    Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+    Node* const then =
+        CallStub(getproperty_callable, context, result, then_str);
+
+    // 9. If then is an abrupt completion, then
+    GotoIfException(then, &if_rejectpromise, &var_reason);
+
+    // 11. If IsCallable(thenAction) is false, then
+    GotoIf(TaggedIsSmi(then), &fulfill);
+    Node* const then_map = LoadMap(then);
+    GotoIfNot(IsCallableMap(then_map), &fulfill);
+    var_then.Bind(then);
+    Goto(&do_enqueue);
+  }
+
+  Bind(&do_enqueue);
+  {
+    // TODO(gsathya): Add fast path for native promises with unmodified
+    // PromiseThen (which don't need these resolving functions, but
+    // instead can just call resolve/reject directly).
+    Node* resolve = nullptr;
+    Node* reject = nullptr;
+    std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
+        promise, FalseConstant(), native_context);
+
+    Node* const info = AllocatePromiseResolveThenableJobInfo(
+        result, var_then.value(), resolve, reject, context);
+
+    Label enqueue(this);
+    GotoIfNot(IsDebugActive(), &enqueue);
+
+    GotoIf(TaggedIsSmi(result), &enqueue);
+    GotoIfNot(HasInstanceType(result, JS_PROMISE_TYPE), &enqueue);
+
+    // Mark the dependency of the new promise on the resolution
+    Node* const key =
+        HeapConstant(isolate->factory()->promise_handled_by_symbol());
+    CallRuntime(Runtime::kSetProperty, context, result, key, promise,
+                SmiConstant(STRICT));
+    Goto(&enqueue);
+
+    // 12. Perform EnqueueJob("PromiseJobs",
+    // PromiseResolveThenableJob, « promise, resolution, thenAction»).
+    Bind(&enqueue);
+    // TODO(gsathya): Move this to TF
+    CallRuntime(Runtime::kEnqueuePromiseResolveThenableJob, context, info);
+    Goto(&out);
+  }
+
+  // 7.b Return FulfillPromise(promise, resolution).
+  Bind(&fulfill);
+  {
+    PromiseFulfill(context, promise, result, v8::Promise::kFulfilled);
+    Goto(&out);
+  }
+
+  Bind(&if_cycle);
+  {
+    // 6.a Let selfResolutionError be a newly created TypeError object.
+    Node* const message_id = SmiConstant(MessageTemplate::kPromiseCyclic);
+    Node* const error =
+        CallRuntime(Runtime::kNewTypeError, context, message_id, result);
+    var_reason.Bind(error);
+
+    // 6.b Return RejectPromise(promise, selfResolutionError).
+    Goto(&if_rejectpromise);
+  }
+
+  // 9.a Return RejectPromise(promise, then.[[Value]]).
+  Bind(&if_rejectpromise);
+  {
+    InternalPromiseReject(context, promise, var_reason.value(), true);
+    Goto(&out);
+  }
+
+  Bind(&out);
+}
+
+void PromiseBuiltinsAssembler::PromiseFulfill(
+    Node* context, Node* promise, Node* result,
+    v8::Promise::PromiseState status) {
+  Label do_promisereset(this), debug_async_event_enqueue_recurring(this);
+
+  Node* const status_smi = SmiConstant(static_cast<int>(status));
+  Node* const deferred_promise =
+      LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
+
+  GotoIf(IsUndefined(deferred_promise), &debug_async_event_enqueue_recurring);
+
+  Node* const tasks =
+      status == v8::Promise::kFulfilled
+          ? LoadObjectField(promise, JSPromise::kFulfillReactionsOffset)
+          : LoadObjectField(promise, JSPromise::kRejectReactionsOffset);
+
+  Node* const deferred_on_resolve =
+      LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset);
+  Node* const deferred_on_reject =
+      LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset);
+
+  Node* const info = AllocatePromiseReactionJobInfo(
+      result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject,
+      context);
+
+  CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, info);
+  Goto(&debug_async_event_enqueue_recurring);
+
+  Bind(&debug_async_event_enqueue_recurring);
+  {
+    GotoIfNot(IsDebugActive(), &do_promisereset);
+    CallRuntime(Runtime::kDebugAsyncEventEnqueueRecurring, context, promise,
+                status_smi);
+    Goto(&do_promisereset);
+  }
+
+  Bind(&do_promisereset);
+  {
+    StoreObjectField(promise, JSPromise::kStatusOffset, status_smi);
+    StoreObjectField(promise, JSPromise::kResultOffset, result);
+    StoreObjectFieldRoot(promise, JSPromise::kDeferredPromiseOffset,
+                         Heap::kUndefinedValueRootIndex);
+    StoreObjectFieldRoot(promise, JSPromise::kDeferredOnResolveOffset,
+                         Heap::kUndefinedValueRootIndex);
+    StoreObjectFieldRoot(promise, JSPromise::kDeferredOnRejectOffset,
+                         Heap::kUndefinedValueRootIndex);
+    StoreObjectFieldRoot(promise, JSPromise::kFulfillReactionsOffset,
+                         Heap::kUndefinedValueRootIndex);
+    StoreObjectFieldRoot(promise, JSPromise::kRejectReactionsOffset,
+                         Heap::kUndefinedValueRootIndex);
+  }
+}
+
+void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
+    Node* context, Node* native_context, Node* promise_constructor,
+    Node* executor, Label* if_noaccess) {
+  Variable var_executor(this, MachineRepresentation::kTagged);
+  var_executor.Bind(executor);
+  Label has_access(this), call_runtime(this, Label::kDeferred);
+
+  // If executor is a bound function, load the bound function until we've
+  // reached an actual function.
+  Label found_function(this), loop_over_bound_function(this, &var_executor);
+  Goto(&loop_over_bound_function);
+  Bind(&loop_over_bound_function);
+  {
+    Node* executor_type = LoadInstanceType(var_executor.value());
+    GotoIf(InstanceTypeEqual(executor_type, JS_FUNCTION_TYPE), &found_function);
+    GotoIfNot(InstanceTypeEqual(executor_type, JS_BOUND_FUNCTION_TYPE),
+              &call_runtime);
+    var_executor.Bind(LoadObjectField(
+        var_executor.value(), JSBoundFunction::kBoundTargetFunctionOffset));
+    Goto(&loop_over_bound_function);
+  }
+
+  // Load the context from the function and compare it to the Promise
+  // constructor's context. If they match, everything is fine, otherwise, bail
+  // out to the runtime.
+  Bind(&found_function);
+  {
+    Node* function_context =
+        LoadObjectField(var_executor.value(), JSFunction::kContextOffset);
+    Node* native_function_context = LoadNativeContext(function_context);
+    Branch(WordEqual(native_context, native_function_context), &has_access,
+           &call_runtime);
+  }
+
+  Bind(&call_runtime);
+  {
+    Branch(WordEqual(CallRuntime(Runtime::kAllowDynamicFunction, context,
+                                 promise_constructor),
+                     BooleanConstant(true)),
+           &has_access, if_noaccess);
+  }
+
+  Bind(&has_access);
+}
+
+void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
+                                                     Node* promise, Node* value,
+                                                     Node* debug_event) {
+  Label out(this);
+  GotoIfNot(IsDebugActive(), &out);
+  GotoIfNot(WordEqual(TrueConstant(), debug_event), &out);
+  CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
+  Goto(&out);
+
+  Bind(&out);
+  InternalPromiseReject(context, promise, value, false);
+}
+
+// This duplicates a lot of logic from PromiseRejectEvent in
+// runtime-promise.cc
+void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
+                                                     Node* promise, Node* value,
+                                                     bool debug_event) {
+  Label fulfill(this), report_unhandledpromise(this), run_promise_hook(this);
+
+  if (debug_event) {
+    GotoIfNot(IsDebugActive(), &run_promise_hook);
+    CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
+    Goto(&run_promise_hook);
+  } else {
+    Goto(&run_promise_hook);
+  }
+
+  Bind(&run_promise_hook);
+  {
+    GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &report_unhandledpromise);
+    CallRuntime(Runtime::kPromiseHookResolve, context, promise);
+    Goto(&report_unhandledpromise);
+  }
+
+  Bind(&report_unhandledpromise);
+  {
+    GotoIf(PromiseHasHandler(promise), &fulfill);
+    CallRuntime(Runtime::kReportPromiseReject, context, promise, value);
+    Goto(&fulfill);
+  }
+
+  Bind(&fulfill);
+  PromiseFulfill(context, promise, value, v8::Promise::kRejected);
 }
 
 // ES#sec-promise-reject-functions
 // Promise Reject Functions
-BUILTIN(PromiseRejectClosure) {
-  HandleScope scope(isolate);
+TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
+  Node* const value = Parameter(1);
+  Node* const context = Parameter(4);
 
-  Handle<Context> context(isolate->context(), isolate);
+  Label out(this);
 
-  if (PromiseUtils::HasAlreadyVisited(context)) {
-    return isolate->heap()->undefined_value();
-  }
+  // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+  int has_already_visited_slot = kAlreadyVisitedSlot;
 
-  PromiseUtils::SetAlreadyVisited(context);
-  Handle<Object> value = args.atOrUndefined(isolate, 1);
-  Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
-  Handle<Object> debug_event =
-      handle(PromiseUtils::GetDebugEvent(context), isolate);
-  MaybeHandle<Object> maybe_result;
-  Handle<Object> argv[] = {promise, value, debug_event};
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate, Execution::Call(isolate, isolate->promise_internal_reject(),
-                               isolate->factory()->undefined_value(),
-                               arraysize(argv), argv));
-  return isolate->heap()->undefined_value();
+  Node* const has_already_visited =
+      LoadContextElement(context, has_already_visited_slot);
+
+  // 4. If alreadyResolved.[[Value]] is true, return undefined.
+  GotoIf(SmiEqual(has_already_visited, SmiConstant(1)), &out);
+
+  // 5.Set alreadyResolved.[[Value]] to true.
+  StoreContextElementNoWriteBarrier(context, has_already_visited_slot,
+                                    SmiConstant(1));
+
+  // 2. Let promise be F.[[Promise]].
+  Node* const promise =
+      LoadContextElement(context, IntPtrConstant(kPromiseSlot));
+  Node* const debug_event =
+      LoadContextElement(context, IntPtrConstant(kDebugEventSlot));
+
+  InternalPromiseReject(context, promise, value, debug_event);
+  Return(UndefinedConstant());
+
+  Bind(&out);
+  Return(UndefinedConstant());
 }
 
-// ES#sec-createresolvingfunctions
-// CreateResolvingFunctions ( promise )
-BUILTIN(CreateResolvingFunctions) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(3, args.length());
+TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
+  Node* const executor = Parameter(1);
+  Node* const new_target = Parameter(2);
+  Node* const context = Parameter(4);
+  Isolate* isolate = this->isolate();
 
-  Handle<JSObject> promise = args.at<JSObject>(1);
-  Handle<Object> debug_event = args.at<Object>(2);
-  Handle<JSFunction> resolve, reject;
+  Label if_targetisundefined(this, Label::kDeferred);
 
-  PromiseUtils::CreateResolvingFunctions(isolate, promise, debug_event,
-                                         &resolve, &reject);
+  GotoIf(IsUndefined(new_target), &if_targetisundefined);
 
-  Handle<FixedArray> result = isolate->factory()->NewFixedArray(2);
-  result->set(0, *resolve);
-  result->set(1, *reject);
+  Label if_notcallable(this, Label::kDeferred);
 
-  return *isolate->factory()->NewJSArrayWithElements(result, FAST_ELEMENTS, 2,
-                                                     NOT_TENURED);
+  GotoIf(TaggedIsSmi(executor), &if_notcallable);
+
+  Node* const executor_map = LoadMap(executor);
+  GotoIfNot(IsCallableMap(executor_map), &if_notcallable);
+
+  Node* const native_context = LoadNativeContext(context);
+  Node* const promise_fun =
+      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  Node* const is_debug_active = IsDebugActive();
+  Label if_targetisnotmodified(this),
+      if_targetismodified(this, Label::kDeferred), run_executor(this),
+      debug_push(this), if_noaccess(this, Label::kDeferred);
+
+  BranchIfAccessCheckFailed(context, native_context, promise_fun, executor,
+                            &if_noaccess);
+
+  Branch(WordEqual(promise_fun, new_target), &if_targetisnotmodified,
+         &if_targetismodified);
+
+  Variable var_result(this, MachineRepresentation::kTagged),
+      var_reject_call(this, MachineRepresentation::kTagged),
+      var_reason(this, MachineRepresentation::kTagged);
+
+  Bind(&if_targetisnotmodified);
+  {
+    Node* const instance = AllocateAndInitJSPromise(context);
+    var_result.Bind(instance);
+    Goto(&debug_push);
+  }
+
+  Bind(&if_targetismodified);
+  {
+    ConstructorBuiltinsAssembler constructor_assembler(this->state());
+    Node* const instance = constructor_assembler.EmitFastNewObject(
+        context, promise_fun, new_target);
+    PromiseInit(instance);
+    var_result.Bind(instance);
+
+    GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &debug_push);
+    CallRuntime(Runtime::kPromiseHookInit, context, instance,
+                UndefinedConstant());
+    Goto(&debug_push);
+  }
+
+  Bind(&debug_push);
+  {
+    GotoIfNot(is_debug_active, &run_executor);
+    CallRuntime(Runtime::kDebugPushPromise, context, var_result.value());
+    Goto(&run_executor);
+  }
+
+  Bind(&run_executor);
+  {
+    Label out(this), if_rejectpromise(this), debug_pop(this, Label::kDeferred);
+
+    Node *resolve, *reject;
+    std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
+        var_result.value(), TrueConstant(), native_context);
+    Callable call_callable = CodeFactory::Call(isolate);
+
+    Node* const maybe_exception = CallJS(call_callable, context, executor,
+                                         UndefinedConstant(), resolve, reject);
+
+    GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
+    Branch(is_debug_active, &debug_pop, &out);
+
+    Bind(&if_rejectpromise);
+    {
+      Callable call_callable = CodeFactory::Call(isolate);
+      CallJS(call_callable, context, reject, UndefinedConstant(),
+             var_reason.value());
+      Branch(is_debug_active, &debug_pop, &out);
+    }
+
+    Bind(&debug_pop);
+    {
+      CallRuntime(Runtime::kDebugPopPromise, context);
+      Goto(&out);
+    }
+    Bind(&out);
+    Return(var_result.value());
+  }
+
+  // 1. If NewTarget is undefined, throw a TypeError exception.
+  Bind(&if_targetisundefined);
+  {
+    Node* const message_id = SmiConstant(MessageTemplate::kNotAPromise);
+    CallRuntime(Runtime::kThrowTypeError, context, message_id, new_target);
+    Unreachable();
+  }
+
+  // 2. If IsCallable(executor) is false, throw a TypeError exception.
+  Bind(&if_notcallable);
+  {
+    Node* const message_id =
+        SmiConstant(MessageTemplate::kResolverNotAFunction);
+    CallRuntime(Runtime::kThrowTypeError, context, message_id, executor);
+    Unreachable();
+  }
+
+  // Silently fail if the stack looks fishy.
+  Bind(&if_noaccess);
+  {
+    Node* const counter_id =
+        SmiConstant(v8::Isolate::kPromiseConstructorReturnedUndefined);
+    CallRuntime(Runtime::kIncrementUseCounter, context, counter_id);
+    Return(UndefinedConstant());
+  }
+}
+
+TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
+  Node* const parent = Parameter(1);
+  Node* const context = Parameter(4);
+  Return(AllocateAndInitJSPromise(context, parent));
+}
+
+TF_BUILTIN(IsPromise, PromiseBuiltinsAssembler) {
+  Node* const maybe_promise = Parameter(1);
+  Label if_notpromise(this, Label::kDeferred);
+
+  GotoIf(TaggedIsSmi(maybe_promise), &if_notpromise);
+
+  Node* const result =
+      SelectBooleanConstant(HasInstanceType(maybe_promise, JS_PROMISE_TYPE));
+  Return(result);
+
+  Bind(&if_notpromise);
+  Return(FalseConstant());
+}
+
+// ES#sec-promise.prototype.then
+// Promise.prototype.catch ( onFulfilled, onRejected )
+TF_BUILTIN(PromiseThen, PromiseBuiltinsAssembler) {
+  // 1. Let promise be the this value.
+  Node* const promise = Parameter(0);
+  Node* const on_resolve = Parameter(1);
+  Node* const on_reject = Parameter(2);
+  Node* const context = Parameter(5);
+
+  Node* const result =
+      InternalPromiseThen(context, promise, on_resolve, on_reject);
+  Return(result);
+}
+
+// ES#sec-promise-resolve-functions
+// Promise Resolve Functions
+TF_BUILTIN(PromiseResolveClosure, PromiseBuiltinsAssembler) {
+  Node* const value = Parameter(1);
+  Node* const context = Parameter(4);
+
+  Label out(this);
+
+  // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+  int has_already_visited_slot = kAlreadyVisitedSlot;
+
+  Node* const has_already_visited =
+      LoadContextElement(context, has_already_visited_slot);
+
+  // 4. If alreadyResolved.[[Value]] is true, return undefined.
+  GotoIf(SmiEqual(has_already_visited, SmiConstant(1)), &out);
+
+  // 5.Set alreadyResolved.[[Value]] to true.
+  StoreContextElementNoWriteBarrier(context, has_already_visited_slot,
+                                    SmiConstant(1));
+
+  // 2. Let promise be F.[[Promise]].
+  Node* const promise =
+      LoadContextElement(context, IntPtrConstant(kPromiseSlot));
+
+  InternalResolvePromise(context, promise, value);
+  Return(UndefinedConstant());
+
+  Bind(&out);
+  Return(UndefinedConstant());
+}
+
+TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
+  Node* const promise = Parameter(1);
+  Node* const result = Parameter(2);
+  Node* const context = Parameter(5);
+
+  InternalResolvePromise(context, promise, result);
+  Return(UndefinedConstant());
+}
+
+TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
+  typedef PromiseHandleRejectDescriptor Descriptor;
+
+  Node* const promise = Parameter(Descriptor::kPromise);
+  Node* const on_reject = Parameter(Descriptor::kOnReject);
+  Node* const exception = Parameter(Descriptor::kException);
+  Node* const context = Parameter(Descriptor::kContext);
+
+  Callable call_callable = CodeFactory::Call(isolate());
+  Variable var_unused(this, MachineRepresentation::kTagged);
+
+  Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
+  Branch(IsUndefined(on_reject), &if_internalhandler, &if_customhandler);
+
+  Bind(&if_internalhandler);
+  {
+    InternalPromiseReject(context, promise, exception, false);
+    Return(UndefinedConstant());
+  }
+
+  Bind(&if_customhandler);
+  {
+    CallJS(call_callable, context, on_reject, UndefinedConstant(), exception);
+    Return(UndefinedConstant());
+  }
+}
+
+TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
+  Node* const value = Parameter(1);
+  Node* const handler = Parameter(2);
+  Node* const deferred_promise = Parameter(3);
+  Node* const deferred_on_resolve = Parameter(4);
+  Node* const deferred_on_reject = Parameter(5);
+  Node* const context = Parameter(8);
+  Isolate* isolate = this->isolate();
+
+  Variable var_reason(this, MachineRepresentation::kTagged);
+
+  Node* const is_debug_active = IsDebugActive();
+  Label run_handler(this), if_rejectpromise(this), promisehook_before(this),
+      promisehook_after(this), debug_pop(this);
+
+  GotoIfNot(is_debug_active, &promisehook_before);
+  CallRuntime(Runtime::kDebugPushPromise, context, deferred_promise);
+  Goto(&promisehook_before);
+
+  Bind(&promisehook_before);
+  {
+    GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &run_handler);
+    CallRuntime(Runtime::kPromiseHookBefore, context, deferred_promise);
+    Goto(&run_handler);
+  }
+
+  Bind(&run_handler);
+  {
+    Label if_defaulthandler(this), if_callablehandler(this),
+        if_internalhandler(this), if_customhandler(this, Label::kDeferred);
+    Variable var_result(this, MachineRepresentation::kTagged);
+
+    Branch(IsSymbol(handler), &if_defaulthandler, &if_callablehandler);
+
+    Bind(&if_defaulthandler);
+    {
+      Label if_resolve(this), if_reject(this);
+      Node* const default_resolve_handler_symbol = HeapConstant(
+          isolate->factory()->promise_default_resolve_handler_symbol());
+      Branch(WordEqual(default_resolve_handler_symbol, handler), &if_resolve,
+             &if_reject);
+
+      Bind(&if_resolve);
+      {
+        var_result.Bind(value);
+        Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
+               &if_customhandler);
+      }
+
+      Bind(&if_reject);
+      {
+        var_reason.Bind(value);
+        Goto(&if_rejectpromise);
+      }
+    }
+
+    Bind(&if_callablehandler);
+    {
+      Callable call_callable = CodeFactory::Call(isolate);
+      Node* const result =
+          CallJS(call_callable, context, handler, UndefinedConstant(), value);
+      var_result.Bind(result);
+      GotoIfException(result, &if_rejectpromise, &var_reason);
+      Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
+             &if_customhandler);
+    }
+
+    Bind(&if_internalhandler);
+    InternalResolvePromise(context, deferred_promise, var_result.value());
+    Goto(&promisehook_after);
+
+    Bind(&if_customhandler);
+    {
+      Callable call_callable = CodeFactory::Call(isolate);
+      Node* const maybe_exception =
+          CallJS(call_callable, context, deferred_on_resolve,
+                 UndefinedConstant(), var_result.value());
+      GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
+      Goto(&promisehook_after);
+    }
+  }
+
+  Bind(&if_rejectpromise);
+  {
+    Callable promise_handle_reject = CodeFactory::PromiseHandleReject(isolate);
+    CallStub(promise_handle_reject, context, deferred_promise,
+             deferred_on_reject, var_reason.value());
+    Goto(&promisehook_after);
+  }
+
+  Bind(&promisehook_after);
+  {
+    GotoIfNot(IsPromiseHookEnabledOrDebugIsActive(), &debug_pop);
+    CallRuntime(Runtime::kPromiseHookAfter, context, deferred_promise);
+    Goto(&debug_pop);
+  }
+
+  Bind(&debug_pop);
+  {
+    Label out(this);
+
+    GotoIfNot(is_debug_active, &out);
+    CallRuntime(Runtime::kDebugPopPromise, context);
+    Goto(&out);
+
+    Bind(&out);
+    Return(UndefinedConstant());
+  }
+}
+
+// ES#sec-promise.prototype.catch
+// Promise.prototype.catch ( onRejected )
+TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) {
+  // 1. Let promise be the this value.
+  Node* const promise = Parameter(0);
+  Node* const on_resolve = UndefinedConstant();
+  Node* const on_reject = Parameter(1);
+  Node* const context = Parameter(4);
+
+  Label if_internalthen(this), if_customthen(this, Label::kDeferred);
+  GotoIf(TaggedIsSmi(promise), &if_customthen);
+  BranchIfFastPath(context, promise, &if_internalthen, &if_customthen);
+
+  Bind(&if_internalthen);
+  {
+    Node* const result =
+        InternalPromiseThen(context, promise, on_resolve, on_reject);
+    Return(result);
+  }
+
+  Bind(&if_customthen);
+  {
+    Isolate* isolate = this->isolate();
+    Node* const then_str = HeapConstant(isolate->factory()->then_string());
+    Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+    Node* const then =
+        CallStub(getproperty_callable, context, promise, then_str);
+    Callable call_callable = CodeFactory::Call(isolate);
+    Node* const result =
+        CallJS(call_callable, context, then, promise, on_resolve, on_reject);
+    Return(result);
+  }
+}
+
+TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
+  //  1. Let C be the this value.
+  Node* receiver = Parameter(0);
+  Node* value = Parameter(1);
+  Node* context = Parameter(4);
+  Isolate* isolate = this->isolate();
+
+  // 2. If Type(C) is not Object, throw a TypeError exception.
+  ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+                       "PromiseResolve");
+
+  Label if_valueisnativepromise(this), if_valueisnotnativepromise(this),
+      if_valueisnotpromise(this);
+
+  // 3.If IsPromise(x) is true, then
+  GotoIf(TaggedIsSmi(value), &if_valueisnotpromise);
+
+  // This shortcircuits the constructor lookups.
+  GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &if_valueisnotpromise);
+
+  // This adds a fast path as non-subclassed native promises don't have
+  // an observable constructor lookup.
+  Node* const native_context = LoadNativeContext(context);
+  Node* const promise_fun =
+      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  BranchIfFastPath(native_context, promise_fun, value, &if_valueisnativepromise,
+                   &if_valueisnotnativepromise);
+
+  Bind(&if_valueisnativepromise);
+  {
+    GotoIfNot(WordEqual(promise_fun, receiver), &if_valueisnotnativepromise);
+    Return(value);
+  }
+
+  // At this point, value or/and receiver are not native promises, but
+  // they could be of the same subclass.
+  Bind(&if_valueisnotnativepromise);
+  {
+    // 3.a Let xConstructor be ? Get(x, "constructor").
+    // The constructor lookup is observable.
+    Node* const constructor_str =
+        HeapConstant(isolate->factory()->constructor_string());
+    Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+    Node* const constructor =
+        CallStub(getproperty_callable, context, value, constructor_str);
+
+    // 3.b If SameValue(xConstructor, C) is true, return x.
+    GotoIfNot(SameValue(constructor, receiver, context), &if_valueisnotpromise);
+
+    Return(value);
+  }
+
+  Bind(&if_valueisnotpromise);
+  {
+    Label if_nativepromise(this), if_notnativepromise(this);
+    BranchIfFastPath(context, receiver, &if_nativepromise,
+                     &if_notnativepromise);
+
+    // This adds a fast path for native promises that don't need to
+    // create NewPromiseCapability.
+    Bind(&if_nativepromise);
+    {
+      Label do_resolve(this);
+
+      Node* const result = AllocateAndInitJSPromise(context);
+      InternalResolvePromise(context, result, value);
+      Return(result);
+    }
+
+    Bind(&if_notnativepromise);
+    {
+      // 4. Let promiseCapability be ? NewPromiseCapability(C).
+      Node* const capability = NewPromiseCapability(context, receiver);
+
+      // 5. Perform ? Call(promiseCapability.[[Resolve]], undefined, « x »).
+      Callable call_callable = CodeFactory::Call(isolate);
+      Node* const resolve =
+          LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+      CallJS(call_callable, context, resolve, UndefinedConstant(), value);
+
+      // 6. Return promiseCapability.[[Promise]].
+      Node* const result =
+          LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+      Return(result);
+    }
+  }
+}
+
+TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
+  Node* const resolve = Parameter(1);
+  Node* const reject = Parameter(2);
+  Node* const context = Parameter(5);
+
+  Node* const capability = LoadContextElement(context, kCapabilitySlot);
+
+  Label if_alreadyinvoked(this, Label::kDeferred);
+  GotoIf(WordNotEqual(
+             LoadObjectField(capability, JSPromiseCapability::kResolveOffset),
+             UndefinedConstant()),
+         &if_alreadyinvoked);
+  GotoIf(WordNotEqual(
+             LoadObjectField(capability, JSPromiseCapability::kRejectOffset),
+             UndefinedConstant()),
+         &if_alreadyinvoked);
+
+  StoreObjectField(capability, JSPromiseCapability::kResolveOffset, resolve);
+  StoreObjectField(capability, JSPromiseCapability::kRejectOffset, reject);
+
+  Return(UndefinedConstant());
+
+  Bind(&if_alreadyinvoked);
+  Node* message = SmiConstant(MessageTemplate::kPromiseExecutorAlreadyInvoked);
+  CallRuntime(Runtime::kThrowTypeError, context, message);
+  Unreachable();
+}
+
+TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
+  Node* constructor = Parameter(1);
+  Node* debug_event = Parameter(2);
+  Node* context = Parameter(5);
+
+  CSA_ASSERT_JS_ARGC_EQ(this, 2);
+
+  Return(NewPromiseCapability(context, constructor, debug_event));
+}
+
+TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
+  // 1. Let C be the this value.
+  Node* const receiver = Parameter(0);
+  Node* const reason = Parameter(1);
+  Node* const context = Parameter(4);
+
+  // 2. If Type(C) is not Object, throw a TypeError exception.
+  ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+                       "PromiseReject");
+
+  Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
+  Node* const native_context = LoadNativeContext(context);
+  Node* const promise_fun =
+      LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+  Branch(WordEqual(promise_fun, receiver), &if_nativepromise,
+         &if_custompromise);
+
+  Bind(&if_nativepromise);
+  {
+    Node* const promise = AllocateAndSetJSPromise(
+        context, SmiConstant(v8::Promise::kRejected), reason);
+    CallRuntime(Runtime::kPromiseRejectEventFromStack, context, promise,
+                reason);
+    Return(promise);
+  }
+
+  Bind(&if_custompromise);
+  {
+    // 3. Let promiseCapability be ? NewPromiseCapability(C).
+    Node* const capability = NewPromiseCapability(context, receiver);
+
+    // 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
+    Node* const reject =
+        LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+    Callable call_callable = CodeFactory::Call(isolate());
+    CallJS(call_callable, context, reject, UndefinedConstant(), reason);
+
+    // 5. Return promiseCapability.[[Promise]].
+    Node* const promise =
+        LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+    Return(promise);
+  }
+}
+
+TF_BUILTIN(InternalPromiseReject, PromiseBuiltinsAssembler) {
+  Node* const promise = Parameter(1);
+  Node* const reason = Parameter(2);
+  Node* const debug_event = Parameter(3);
+  Node* const context = Parameter(6);
+
+  InternalPromiseReject(context, promise, reason, debug_event);
+  Return(UndefinedConstant());
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseFinallyContext(
+    Node* on_finally, Node* native_context) {
+  Node* const context =
+      CreatePromiseContext(native_context, kOnFinallyContextLength);
+  StoreContextElementNoWriteBarrier(context, kOnFinallySlot, on_finally);
+  return context;
+}
+
+std::pair<Node*, Node*> PromiseBuiltinsAssembler::CreatePromiseFinallyFunctions(
+    Node* on_finally, Node* native_context) {
+  Node* const promise_context =
+      CreatePromiseFinallyContext(on_finally, native_context);
+  Node* const map = LoadContextElement(
+      native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+  Node* const then_finally_info = LoadContextElement(
+      native_context, Context::PROMISE_THEN_FINALLY_SHARED_FUN);
+  Node* const then_finally = AllocateFunctionWithMapAndContext(
+      map, then_finally_info, promise_context);
+  Node* const catch_finally_info = LoadContextElement(
+      native_context, Context::PROMISE_CATCH_FINALLY_SHARED_FUN);
+  Node* const catch_finally = AllocateFunctionWithMapAndContext(
+      map, catch_finally_info, promise_context);
+  return std::make_pair(then_finally, catch_finally);
+}
+
+TF_BUILTIN(PromiseValueThunkFinally, PromiseBuiltinsAssembler) {
+  Node* const context = Parameter(3);
+
+  Node* const value = LoadContextElement(context, kOnFinallySlot);
+  Return(value);
+}
+
+Node* PromiseBuiltinsAssembler::CreateValueThunkFunctionContext(
+    Node* value, Node* native_context) {
+  Node* const context =
+      CreatePromiseContext(native_context, kOnFinallyContextLength);
+  StoreContextElementNoWriteBarrier(context, kOnFinallySlot, value);
+  return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreateValueThunkFunction(Node* value,
+                                                         Node* native_context) {
+  Node* const value_thunk_context =
+      CreateValueThunkFunctionContext(value, native_context);
+  Node* const map = LoadContextElement(
+      native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+  Node* const value_thunk_info = LoadContextElement(
+      native_context, Context::PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN);
+  Node* const value_thunk = AllocateFunctionWithMapAndContext(
+      map, value_thunk_info, value_thunk_context);
+  return value_thunk;
+}
+
+TF_BUILTIN(PromiseThenFinally, PromiseBuiltinsAssembler) {
+  CSA_ASSERT_JS_ARGC_EQ(this, 1);
+
+  Node* const value = Parameter(1);
+  Node* const context = Parameter(4);
+
+  Node* const on_finally = LoadContextElement(context, kOnFinallySlot);
+
+  // 2.a Let result be ?  Call(onFinally, undefined).
+  Callable call_callable = CodeFactory::Call(isolate());
+  Node* result =
+      CallJS(call_callable, context, on_finally, UndefinedConstant());
+
+  // 2.b Let promise be !  PromiseResolve( %Promise%, result).
+  Node* const promise = AllocateAndInitJSPromise(context);
+  InternalResolvePromise(context, promise, result);
+
+  // 2.c Let valueThunk be equivalent to a function that returns value.
+  Node* native_context = LoadNativeContext(context);
+  Node* const value_thunk = CreateValueThunkFunction(value, native_context);
+
+  // 2.d Let promiseCapability be !  NewPromiseCapability( %Promise%).
+  Node* const promise_capability = AllocateAndInitJSPromise(context, promise);
+
+  // 2.e Return PerformPromiseThen(promise, valueThunk, undefined,
+  // promiseCapability).
+  InternalPerformPromiseThen(context, promise, value_thunk, UndefinedConstant(),
+                             promise_capability, UndefinedConstant(),
+                             UndefinedConstant());
+  Return(promise_capability);
+}
+
+TF_BUILTIN(PromiseThrowerFinally, PromiseBuiltinsAssembler) {
+  Node* const context = Parameter(3);
+
+  Node* const reason = LoadContextElement(context, kOnFinallySlot);
+  CallRuntime(Runtime::kThrow, context, reason);
+  Unreachable();
+}
+
+Node* PromiseBuiltinsAssembler::CreateThrowerFunctionContext(
+    Node* reason, Node* native_context) {
+  Node* const context =
+      CreatePromiseContext(native_context, kOnFinallyContextLength);
+  StoreContextElementNoWriteBarrier(context, kOnFinallySlot, reason);
+  return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreateThrowerFunction(Node* reason,
+                                                      Node* native_context) {
+  Node* const thrower_context =
+      CreateThrowerFunctionContext(reason, native_context);
+  Node* const map = LoadContextElement(
+      native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+  Node* const thrower_info = LoadContextElement(
+      native_context, Context::PROMISE_THROWER_FINALLY_SHARED_FUN);
+  Node* const thrower =
+      AllocateFunctionWithMapAndContext(map, thrower_info, thrower_context);
+  return thrower;
+}
+
+TF_BUILTIN(PromiseCatchFinally, PromiseBuiltinsAssembler) {
+  CSA_ASSERT_JS_ARGC_EQ(this, 1);
+
+  Node* const reason = Parameter(1);
+  Node* const context = Parameter(4);
+
+  Node* const on_finally = LoadContextElement(context, kOnFinallySlot);
+
+  // 2.a Let result be ?  Call(onFinally, undefined).
+  Callable call_callable = CodeFactory::Call(isolate());
+  Node* result =
+      CallJS(call_callable, context, on_finally, UndefinedConstant());
+
+  // 2.b Let promise be !  PromiseResolve( %Promise%, result).
+  Node* const promise = AllocateAndInitJSPromise(context);
+  InternalResolvePromise(context, promise, result);
+
+  // 2.c Let thrower be equivalent to a function that throws reason.
+  Node* native_context = LoadNativeContext(context);
+  Node* const thrower = CreateThrowerFunction(reason, native_context);
+
+  // 2.d Let promiseCapability be !  NewPromiseCapability( %Promise%).
+  Node* const promise_capability = AllocateAndInitJSPromise(context, promise);
+
+  // 2.e Return PerformPromiseThen(promise, thrower, undefined,
+  // promiseCapability).
+  InternalPerformPromiseThen(context, promise, thrower, UndefinedConstant(),
+                             promise_capability, UndefinedConstant(),
+                             UndefinedConstant());
+  Return(promise_capability);
+}
+
+TF_BUILTIN(PromiseFinally, PromiseBuiltinsAssembler) {
+  CSA_ASSERT_JS_ARGC_EQ(this, 1);
+
+  // 1.  Let promise be the this value.
+  Node* const promise = Parameter(0);
+  Node* const on_finally = Parameter(1);
+  Node* const context = Parameter(4);
+
+  // 2. If IsPromise(promise) is false, throw a TypeError exception.
+  ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
+                         "Promise.prototype.finally");
+
+  Variable var_then_finally(this, MachineRepresentation::kTagged),
+      var_catch_finally(this, MachineRepresentation::kTagged);
+
+  Label if_notcallable(this, Label::kDeferred), perform_finally(this);
+
+  // 3. Let thenFinally be !  CreateThenFinally(onFinally).
+  // 4. Let catchFinally be !  CreateCatchFinally(onFinally).
+  GotoIf(TaggedIsSmi(on_finally), &if_notcallable);
+  Node* const on_finally_map = LoadMap(on_finally);
+  GotoIfNot(IsCallableMap(on_finally_map), &if_notcallable);
+
+  Node* const native_context = LoadNativeContext(context);
+  Node* then_finally = nullptr;
+  Node* catch_finally = nullptr;
+  std::tie(then_finally, catch_finally) =
+      CreatePromiseFinallyFunctions(on_finally, native_context);
+  var_then_finally.Bind(then_finally);
+  var_catch_finally.Bind(catch_finally);
+  Goto(&perform_finally);
+
+  Bind(&if_notcallable);
+  {
+    var_then_finally.Bind(on_finally);
+    var_catch_finally.Bind(on_finally);
+    Goto(&perform_finally);
+  }
+
+  // 5. Return PerformPromiseThen(promise, valueThunk, undefined,
+  // promiseCapability).
+  Bind(&perform_finally);
+  Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
+  BranchIfFastPath(context, promise, &if_nativepromise, &if_custompromise);
+
+  Bind(&if_nativepromise);
+  {
+    Node* deferred_promise = AllocateAndInitJSPromise(context, promise);
+    InternalPerformPromiseThen(context, promise, var_then_finally.value(),
+                               var_catch_finally.value(), deferred_promise,
+                               UndefinedConstant(), UndefinedConstant());
+    Return(deferred_promise);
+  }
+
+  Bind(&if_custompromise);
+  {
+    Isolate* isolate = this->isolate();
+    Node* const then_str = HeapConstant(isolate->factory()->then_string());
+    Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+    Node* const then =
+        CallStub(getproperty_callable, context, promise, then_str);
+    Callable call_callable = CodeFactory::Call(isolate);
+    // 5. Return ?  Invoke(promise, "then", « thenFinally, catchFinally »).
+    Node* const result =
+        CallJS(call_callable, context, then, promise, var_then_finally.value(),
+               var_catch_finally.value());
+    Return(result);
+  }
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-promise.h b/src/builtins/builtins-promise.h
new file mode 100644
index 0000000..df01182
--- /dev/null
+++ b/src/builtins/builtins-promise.h
@@ -0,0 +1,147 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_PROMISE_H_
+#define V8_BUILTINS_BUILTINS_PROMISE_H_
+
+#include "src/code-stub-assembler.h"
+#include "src/contexts.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+class PromiseBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  enum PromiseResolvingFunctionContextSlot {
+    // Whether the resolve/reject callback was already called.
+    kAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
+
+    // The promise which resolve/reject callbacks fulfill.
+    kPromiseSlot,
+
+    // Whether to trigger a debug event or not. Used in catch
+    // prediction.
+    kDebugEventSlot,
+    kPromiseContextLength,
+  };
+
+  enum FunctionContextSlot {
+    kCapabilitySlot = Context::MIN_CONTEXT_SLOTS,
+
+    kCapabilitiesContextLength,
+  };
+
+  // This is used by the PromiseThenFinally and PromiseCatchFinally
+  // builtins to store the onFinally in the onFinallySlot.
+  //
+  // This is also used by the PromiseValueThunkFinally to store the
+  // value in the onFinallySlot and PromiseThrowerFinally to store the
+  // reason in the onFinallySlot.
+  enum PromiseFinallyContextSlot {
+    kOnFinallySlot = Context::MIN_CONTEXT_SLOTS,
+
+    kOnFinallyContextLength,
+  };
+
+  explicit PromiseBuiltinsAssembler(CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+  // These allocate and initialize a promise with pending state and
+  // undefined fields.
+  //
+  // This uses undefined as the parent promise for the promise init
+  // hook.
+  Node* AllocateAndInitJSPromise(Node* context);
+  // This uses the given parent as the parent promise for the promise
+  // init hook.
+  Node* AllocateAndInitJSPromise(Node* context, Node* parent);
+
+  // This allocates and initializes a promise with the given state and
+  // fields.
+  Node* AllocateAndSetJSPromise(Node* context, Node* status, Node* result);
+
+  Node* AllocatePromiseResolveThenableJobInfo(Node* result, Node* then,
+                                              Node* resolve, Node* reject,
+                                              Node* context);
+
+  std::pair<Node*, Node*> CreatePromiseResolvingFunctions(
+      Node* promise, Node* native_context, Node* promise_context);
+
+  Node* PromiseHasHandler(Node* promise);
+
+  Node* CreatePromiseResolvingFunctionsContext(Node* promise, Node* debug_event,
+                                               Node* native_context);
+
+  Node* CreatePromiseGetCapabilitiesExecutorContext(Node* native_context,
+                                                    Node* promise_capability);
+
+  Node* NewPromiseCapability(Node* context, Node* constructor,
+                             Node* debug_event = nullptr);
+
+ protected:
+  void PromiseInit(Node* promise);
+
+  Node* ThrowIfNotJSReceiver(Node* context, Node* value,
+                             MessageTemplate::Template msg_template,
+                             const char* method_name = nullptr);
+
+  Node* SpeciesConstructor(Node* context, Node* object,
+                           Node* default_constructor);
+
+  void PromiseSetHasHandler(Node* promise);
+  void PromiseSetHandledHint(Node* promise);
+
+  void AppendPromiseCallback(int offset, compiler::Node* promise,
+                             compiler::Node* value);
+
+  Node* InternalPromiseThen(Node* context, Node* promise, Node* on_resolve,
+                            Node* on_reject);
+
+  Node* InternalPerformPromiseThen(Node* context, Node* promise,
+                                   Node* on_resolve, Node* on_reject,
+                                   Node* deferred_promise,
+                                   Node* deferred_on_resolve,
+                                   Node* deferred_on_reject);
+
+  void InternalResolvePromise(Node* context, Node* promise, Node* result);
+
+  void BranchIfFastPath(Node* context, Node* promise, Label* if_isunmodified,
+                        Label* if_ismodified);
+
+  void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
+                        Label* if_isunmodified, Label* if_ismodified);
+
+  Node* CreatePromiseContext(Node* native_context, int slots);
+  void PromiseFulfill(Node* context, Node* promise, Node* result,
+                      v8::Promise::PromiseState status);
+
+  void BranchIfAccessCheckFailed(Node* context, Node* native_context,
+                                 Node* promise_constructor, Node* executor,
+                                 Label* if_noaccess);
+
+  void InternalPromiseReject(Node* context, Node* promise, Node* value,
+                             bool debug_event);
+  void InternalPromiseReject(Node* context, Node* promise, Node* value,
+                             Node* debug_event);
+  std::pair<Node*, Node*> CreatePromiseFinallyFunctions(Node* on_finally,
+                                                        Node* native_context);
+  Node* CreatePromiseFinallyContext(Node* on_finally, Node* native_context);
+
+  Node* CreateValueThunkFunction(Node* value, Node* native_context);
+  Node* CreateValueThunkFunctionContext(Node* value, Node* native_context);
+
+  Node* CreateThrowerFunctionContext(Node* reason, Node* native_context);
+  Node* CreateThrowerFunction(Node* reason, Node* native_context);
+
+ private:
+  Node* AllocateJSPromise(Node* context);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_BUILTINS_BUILTINS_PROMISE_H_
diff --git a/src/builtins/builtins-proxy.cc b/src/builtins/builtins-proxy.cc
index 05ba304..db6f7b5 100644
--- a/src/builtins/builtins-proxy.cc
+++ b/src/builtins/builtins-proxy.cc
@@ -5,6 +5,9 @@
 #include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
 
+#include "src/counters.h"
+#include "src/objects-inl.h"
+
 namespace v8 {
 namespace internal {
 
diff --git a/src/builtins/builtins-reflect.cc b/src/builtins/builtins-reflect.cc
index b4d16c4..9b29634 100644
--- a/src/builtins/builtins-reflect.cc
+++ b/src/builtins/builtins-reflect.cc
@@ -5,6 +5,10 @@
 #include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
 
+#include "src/counters.h"
+#include "src/keys.h"
+#include "src/lookup.h"
+#include "src/objects-inl.h"
 #include "src/property-descriptor.h"
 
 namespace v8 {
@@ -17,9 +21,9 @@
 BUILTIN(ReflectDefineProperty) {
   HandleScope scope(isolate);
   DCHECK_EQ(4, args.length());
-  Handle<Object> target = args.at<Object>(1);
-  Handle<Object> key = args.at<Object>(2);
-  Handle<Object> attributes = args.at<Object>(3);
+  Handle<Object> target = args.at(1);
+  Handle<Object> key = args.at(2);
+  Handle<Object> attributes = args.at(3);
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -48,8 +52,8 @@
 BUILTIN(ReflectDeleteProperty) {
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
-  Handle<Object> target = args.at<Object>(1);
-  Handle<Object> key = args.at<Object>(2);
+  Handle<Object> target = args.at(1);
+  Handle<Object> key = args.at(2);
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -73,7 +77,7 @@
   HandleScope scope(isolate);
   Handle<Object> target = args.atOrUndefined(isolate, 1);
   Handle<Object> key = args.atOrUndefined(isolate, 2);
-  Handle<Object> receiver = args.length() > 3 ? args.at<Object>(3) : target;
+  Handle<Object> receiver = args.length() > 3 ? args.at(3) : target;
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -95,8 +99,8 @@
 BUILTIN(ReflectGetOwnPropertyDescriptor) {
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
-  Handle<Object> target = args.at<Object>(1);
-  Handle<Object> key = args.at<Object>(2);
+  Handle<Object> target = args.at(1);
+  Handle<Object> key = args.at(2);
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -121,7 +125,7 @@
 BUILTIN(ReflectGetPrototypeOf) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
-  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> target = args.at(1);
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -138,8 +142,8 @@
 BUILTIN(ReflectHas) {
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
-  Handle<Object> target = args.at<Object>(1);
-  Handle<Object> key = args.at<Object>(2);
+  Handle<Object> target = args.at(1);
+  Handle<Object> key = args.at(2);
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -162,7 +166,7 @@
 BUILTIN(ReflectIsExtensible) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
-  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> target = args.at(1);
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -181,7 +185,7 @@
 BUILTIN(ReflectOwnKeys) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
-  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> target = args.at(1);
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -203,7 +207,7 @@
 BUILTIN(ReflectPreventExtensions) {
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
-  Handle<Object> target = args.at<Object>(1);
+  Handle<Object> target = args.at(1);
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -224,7 +228,7 @@
   Handle<Object> target = args.atOrUndefined(isolate, 1);
   Handle<Object> key = args.atOrUndefined(isolate, 2);
   Handle<Object> value = args.atOrUndefined(isolate, 3);
-  Handle<Object> receiver = args.length() > 4 ? args.at<Object>(4) : target;
+  Handle<Object> receiver = args.length() > 4 ? args.at(4) : target;
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
@@ -249,8 +253,8 @@
 BUILTIN(ReflectSetPrototypeOf) {
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
-  Handle<Object> target = args.at<Object>(1);
-  Handle<Object> proto = args.at<Object>(2);
+  Handle<Object> target = args.at(1);
+  Handle<Object> proto = args.at(2);
 
   if (!target->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
diff --git a/src/builtins/builtins-regexp.cc b/src/builtins/builtins-regexp.cc
index 5f8d18b..f76136b 100644
--- a/src/builtins/builtins-regexp.cc
+++ b/src/builtins/builtins-regexp.cc
@@ -2,10 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/builtins/builtins-regexp.h"
+
+#include "src/builtins/builtins-constructor.h"
 #include "src/builtins/builtins-utils.h"
 #include "src/builtins/builtins.h"
-
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
+#include "src/objects/regexp-match-info.h"
 #include "src/regexp/jsregexp.h"
 #include "src/regexp/regexp-utils.h"
 #include "src/string-builder.h"
@@ -13,735 +19,942 @@
 namespace v8 {
 namespace internal {
 
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+
+
 // -----------------------------------------------------------------------------
 // ES6 section 21.2 RegExp Objects
 
-namespace {
-
-Handle<String> PatternFlags(Isolate* isolate, Handle<JSRegExp> regexp) {
-  static const int kMaxFlagsLength = 5 + 1;  // 5 flags and '\0';
-  char flags_string[kMaxFlagsLength];
-  int i = 0;
-
-  const JSRegExp::Flags flags = regexp->GetFlags();
-
-  if ((flags & JSRegExp::kGlobal) != 0) flags_string[i++] = 'g';
-  if ((flags & JSRegExp::kIgnoreCase) != 0) flags_string[i++] = 'i';
-  if ((flags & JSRegExp::kMultiline) != 0) flags_string[i++] = 'm';
-  if ((flags & JSRegExp::kUnicode) != 0) flags_string[i++] = 'u';
-  if ((flags & JSRegExp::kSticky) != 0) flags_string[i++] = 'y';
-
-  DCHECK_LT(i, kMaxFlagsLength);
-  memset(&flags_string[i], '\0', kMaxFlagsLength - i);
-
-  return isolate->factory()->NewStringFromAsciiChecked(flags_string);
-}
-
-// ES#sec-regexpinitialize
-// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
-MUST_USE_RESULT MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
-                                                       Handle<JSRegExp> regexp,
-                                                       Handle<Object> pattern,
-                                                       Handle<Object> flags) {
-  Handle<String> pattern_string;
-  if (pattern->IsUndefined(isolate)) {
-    pattern_string = isolate->factory()->empty_string();
-  } else {
-    ASSIGN_RETURN_ON_EXCEPTION(isolate, pattern_string,
-                               Object::ToString(isolate, pattern), JSRegExp);
-  }
-
-  Handle<String> flags_string;
-  if (flags->IsUndefined(isolate)) {
-    flags_string = isolate->factory()->empty_string();
-  } else {
-    ASSIGN_RETURN_ON_EXCEPTION(isolate, flags_string,
-                               Object::ToString(isolate, flags), JSRegExp);
-  }
-
-  // TODO(jgruber): We could avoid the flags back and forth conversions.
-  return JSRegExp::Initialize(regexp, pattern_string, flags_string);
-}
-
-}  // namespace
-
-// ES#sec-regexp-pattern-flags
-// RegExp ( pattern, flags )
-BUILTIN(RegExpConstructor) {
-  HandleScope scope(isolate);
-
-  Handle<HeapObject> new_target = args.new_target();
-  Handle<Object> pattern = args.atOrUndefined(isolate, 1);
-  Handle<Object> flags = args.atOrUndefined(isolate, 2);
-
-  Handle<JSFunction> target = isolate->regexp_function();
-
-  bool pattern_is_regexp;
-  {
-    Maybe<bool> maybe_pattern_is_regexp =
-        RegExpUtils::IsRegExp(isolate, pattern);
-    if (maybe_pattern_is_regexp.IsNothing()) {
-      DCHECK(isolate->has_pending_exception());
-      return isolate->heap()->exception();
-    }
-    pattern_is_regexp = maybe_pattern_is_regexp.FromJust();
-  }
-
-  if (new_target->IsUndefined(isolate)) {
-    new_target = target;
-
-    // ES6 section 21.2.3.1 step 3.b
-    if (pattern_is_regexp && flags->IsUndefined(isolate)) {
-      Handle<Object> pattern_constructor;
-      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-          isolate, pattern_constructor,
-          Object::GetProperty(pattern,
-                              isolate->factory()->constructor_string()));
-
-      if (pattern_constructor.is_identical_to(new_target)) {
-        return *pattern;
-      }
-    }
-  }
-
-  if (pattern->IsJSRegExp()) {
-    Handle<JSRegExp> regexp_pattern = Handle<JSRegExp>::cast(pattern);
-
-    if (flags->IsUndefined(isolate)) {
-      flags = PatternFlags(isolate, regexp_pattern);
-    }
-    pattern = handle(regexp_pattern->source(), isolate);
-  } else if (pattern_is_regexp) {
-    Handle<Object> pattern_source;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, pattern_source,
-        Object::GetProperty(pattern, isolate->factory()->source_string()));
-
-    if (flags->IsUndefined(isolate)) {
-      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-          isolate, flags,
-          Object::GetProperty(pattern, isolate->factory()->flags_string()));
-    }
-    pattern = pattern_source;
-  }
-
-  Handle<JSReceiver> new_target_receiver = Handle<JSReceiver>::cast(new_target);
-
-  // TODO(jgruber): Fast-path for target == new_target == unmodified JSRegExp.
-
-  Handle<JSObject> object;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, object, JSObject::New(target, new_target_receiver));
-  Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
-
-  RETURN_RESULT_OR_FAILURE(isolate,
-                           RegExpInitialize(isolate, regexp, pattern, flags));
-}
-
-BUILTIN(RegExpPrototypeCompile) {
-  HandleScope scope(isolate);
-  CHECK_RECEIVER(JSRegExp, regexp, "RegExp.prototype.compile");
-
-  Handle<Object> pattern = args.atOrUndefined(isolate, 1);
-  Handle<Object> flags = args.atOrUndefined(isolate, 2);
-
-  if (pattern->IsJSRegExp()) {
-    Handle<JSRegExp> pattern_regexp = Handle<JSRegExp>::cast(pattern);
-
-    if (!flags->IsUndefined(isolate)) {
-      THROW_NEW_ERROR_RETURN_FAILURE(
-          isolate, NewTypeError(MessageTemplate::kRegExpFlags));
-    }
-
-    flags = PatternFlags(isolate, pattern_regexp);
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, pattern,
-        Object::GetProperty(pattern, isolate->factory()->source_string()));
-  }
-
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, regexp, RegExpInitialize(isolate, regexp, pattern, flags));
-
-  // Return undefined for compatibility with JSC.
-  // See http://crbug.com/585775 for web compat details.
-
-  return isolate->heap()->undefined_value();
-}
-
-namespace {
-
-compiler::Node* FastLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
-                                  compiler::Node* regexp) {
+Node* RegExpBuiltinsAssembler::FastLoadLastIndex(Node* regexp) {
   // Load the in-object field.
   static const int field_offset =
       JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
-  return a->LoadObjectField(regexp, field_offset);
+  return LoadObjectField(regexp, field_offset);
 }
 
-compiler::Node* SlowLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
-                                  compiler::Node* regexp) {
+Node* RegExpBuiltinsAssembler::SlowLoadLastIndex(Node* context, Node* regexp) {
   // Load through the GetProperty stub.
-  typedef compiler::Node Node;
-
-  Node* const name =
-      a->HeapConstant(a->isolate()->factory()->lastIndex_string());
-  Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
-  return a->CallStub(getproperty_callable, context, regexp, name);
+  Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
+  Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+  return CallStub(getproperty_callable, context, regexp, name);
 }
 
-compiler::Node* LoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
-                              compiler::Node* has_initialmap,
-                              compiler::Node* regexp) {
-  typedef CodeStubAssembler::Variable Variable;
-  typedef CodeStubAssembler::Label Label;
-
-  Variable var_value(a, MachineRepresentation::kTagged);
-
-  Label out(a), if_unmodified(a), if_modified(a);
-  a->Branch(has_initialmap, &if_unmodified, &if_modified);
-
-  a->Bind(&if_unmodified);
-  {
-    var_value.Bind(FastLoadLastIndex(a, context, regexp));
-    a->Goto(&out);
-  }
-
-  a->Bind(&if_modified);
-  {
-    var_value.Bind(SlowLoadLastIndex(a, context, regexp));
-    a->Goto(&out);
-  }
-
-  a->Bind(&out);
-  return var_value.value();
+Node* RegExpBuiltinsAssembler::LoadLastIndex(Node* context, Node* regexp,
+                                             bool is_fastpath) {
+  return is_fastpath ? FastLoadLastIndex(regexp)
+                     : SlowLoadLastIndex(context, regexp);
 }
 
 // The fast-path of StoreLastIndex when regexp is guaranteed to be an unmodified
 // JSRegExp instance.
-void FastStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
-                        compiler::Node* regexp, compiler::Node* value) {
+void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) {
   // Store the in-object field.
   static const int field_offset =
       JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
-  a->StoreObjectField(regexp, field_offset, value);
+  StoreObjectField(regexp, field_offset, value);
 }
 
-void SlowStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
-                        compiler::Node* regexp, compiler::Node* value) {
+void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp,
+                                                 Node* value) {
   // Store through runtime.
   // TODO(ishell): Use SetPropertyStub here once available.
-  typedef compiler::Node Node;
-
-  Node* const name =
-      a->HeapConstant(a->isolate()->factory()->lastIndex_string());
-  Node* const language_mode = a->SmiConstant(Smi::FromInt(STRICT));
-  a->CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
-                 language_mode);
+  Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
+  Node* const language_mode = SmiConstant(Smi::FromInt(STRICT));
+  CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
+              language_mode);
 }
 
-void StoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
-                    compiler::Node* has_initialmap, compiler::Node* regexp,
-                    compiler::Node* value) {
-  typedef CodeStubAssembler::Label Label;
-
-  Label out(a), if_unmodified(a), if_modified(a);
-  a->Branch(has_initialmap, &if_unmodified, &if_modified);
-
-  a->Bind(&if_unmodified);
-  {
-    FastStoreLastIndex(a, context, regexp, value);
-    a->Goto(&out);
+void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
+                                             Node* value, bool is_fastpath) {
+  if (is_fastpath) {
+    FastStoreLastIndex(regexp, value);
+  } else {
+    SlowStoreLastIndex(context, regexp, value);
   }
-
-  a->Bind(&if_modified);
-  {
-    SlowStoreLastIndex(a, context, regexp, value);
-    a->Goto(&out);
-  }
-
-  a->Bind(&out);
 }
 
-compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
-                                                CodeStubAssembler* a,
-                                                compiler::Node* context,
-                                                compiler::Node* match_info,
-                                                compiler::Node* string) {
-  typedef CodeStubAssembler::Variable Variable;
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(
+    Node* const context, Node* const regexp, Node* const match_info,
+    Node* const string) {
+  Label named_captures(this), out(this);
 
-  Label out(a);
-
-  CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
-  Node* const num_indices = a->SmiUntag(a->LoadFixedArrayElement(
-      match_info, a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0,
-      mode));
-  Node* const num_results = a->SmiTag(a->WordShr(num_indices, 1));
-  Node* const start = a->LoadFixedArrayElement(
-      match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), 0,
-      mode);
-  Node* const end = a->LoadFixedArrayElement(
-      match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0,
-      mode);
+  Node* const num_indices = SmiUntag(LoadFixedArrayElement(
+      match_info, RegExpMatchInfo::kNumberOfCapturesIndex));
+  Node* const num_results = SmiTag(WordShr(num_indices, 1));
+  Node* const start =
+      LoadFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex);
+  Node* const end = LoadFixedArrayElement(
+      match_info, RegExpMatchInfo::kFirstCaptureIndex + 1);
 
   // Calculate the substring of the first match before creating the result array
   // to avoid an unnecessary write barrier storing the first result.
-  Node* const first = a->SubString(context, string, start, end);
+  Node* const first = SubString(context, string, start, end);
 
   Node* const result =
-      a->AllocateRegExpResult(context, num_results, start, string);
-  Node* const result_elements = a->LoadElements(result);
+      AllocateRegExpResult(context, num_results, start, string);
+  Node* const result_elements = LoadElements(result);
 
-  a->StoreFixedArrayElement(result_elements, a->IntPtrConstant(0), first,
-                            SKIP_WRITE_BARRIER);
+  StoreFixedArrayElement(result_elements, 0, first, SKIP_WRITE_BARRIER);
 
-  a->GotoIf(a->SmiEqual(num_results, a->SmiConstant(Smi::FromInt(1))), &out);
+  // If no captures exist we can skip named capture handling as well.
+  GotoIf(SmiEqual(num_results, SmiConstant(1)), &out);
 
   // Store all remaining captures.
-  Node* const limit = a->IntPtrAdd(
-      a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
+  Node* const limit = IntPtrAdd(
+      IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
 
-  Variable var_from_cursor(a, MachineType::PointerRepresentation());
-  Variable var_to_cursor(a, MachineType::PointerRepresentation());
-
-  var_from_cursor.Bind(
-      a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
-  var_to_cursor.Bind(a->IntPtrConstant(1));
+  Variable var_from_cursor(
+      this, MachineType::PointerRepresentation(),
+      IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
+  Variable var_to_cursor(this, MachineType::PointerRepresentation(),
+                         IntPtrConstant(1));
 
   Variable* vars[] = {&var_from_cursor, &var_to_cursor};
-  Label loop(a, 2, vars);
+  Label loop(this, 2, vars);
 
-  a->Goto(&loop);
-  a->Bind(&loop);
+  Goto(&loop);
+  Bind(&loop);
   {
     Node* const from_cursor = var_from_cursor.value();
     Node* const to_cursor = var_to_cursor.value();
-    Node* const start = a->LoadFixedArrayElement(match_info, from_cursor);
+    Node* const start = LoadFixedArrayElement(match_info, from_cursor);
 
-    Label next_iter(a);
-    a->GotoIf(a->SmiEqual(start, a->SmiConstant(Smi::FromInt(-1))), &next_iter);
+    Label next_iter(this);
+    GotoIf(SmiEqual(start, SmiConstant(-1)), &next_iter);
 
-    Node* const from_cursor_plus1 =
-        a->IntPtrAdd(from_cursor, a->IntPtrConstant(1));
-    Node* const end = a->LoadFixedArrayElement(match_info, from_cursor_plus1);
+    Node* const from_cursor_plus1 = IntPtrAdd(from_cursor, IntPtrConstant(1));
+    Node* const end = LoadFixedArrayElement(match_info, from_cursor_plus1);
 
-    Node* const capture = a->SubString(context, string, start, end);
-    a->StoreFixedArrayElement(result_elements, to_cursor, capture);
-    a->Goto(&next_iter);
+    Node* const capture = SubString(context, string, start, end);
+    StoreFixedArrayElement(result_elements, to_cursor, capture);
+    Goto(&next_iter);
 
-    a->Bind(&next_iter);
-    var_from_cursor.Bind(a->IntPtrAdd(from_cursor, a->IntPtrConstant(2)));
-    var_to_cursor.Bind(a->IntPtrAdd(to_cursor, a->IntPtrConstant(1)));
-    a->Branch(a->UintPtrLessThan(var_from_cursor.value(), limit), &loop, &out);
+    Bind(&next_iter);
+    var_from_cursor.Bind(IntPtrAdd(from_cursor, IntPtrConstant(2)));
+    var_to_cursor.Bind(IntPtrAdd(to_cursor, IntPtrConstant(1)));
+    Branch(UintPtrLessThan(var_from_cursor.value(), limit), &loop,
+           &named_captures);
   }
 
-  a->Bind(&out);
+  Bind(&named_captures);
+  {
+    // We reach this point only if captures exist, implying that this is an
+    // IRREGEXP JSRegExp.
+
+    CSA_ASSERT(this, HasInstanceType(regexp, JS_REGEXP_TYPE));
+    CSA_ASSERT(this, SmiGreaterThan(num_results, SmiConstant(1)));
+
+    // Preparations for named capture properties. Exit early if the result does
+    // not have any named captures to minimize performance impact.
+
+    Node* const data = LoadObjectField(regexp, JSRegExp::kDataOffset);
+    CSA_ASSERT(this, SmiEqual(LoadFixedArrayElement(data, JSRegExp::kTagIndex),
+                              SmiConstant(JSRegExp::IRREGEXP)));
+
+    // The names fixed array associates names at even indices with a capture
+    // index at odd indices.
+    Node* const names =
+        LoadFixedArrayElement(data, JSRegExp::kIrregexpCaptureNameMapIndex);
+    GotoIf(SmiEqual(names, SmiConstant(0)), &out);
+
+    // Allocate a new object to store the named capture properties.
+    // TODO(jgruber): Could be optimized by adding the object map to the heap
+    // root list.
+
+    Node* const native_context = LoadNativeContext(context);
+    Node* const map = LoadContextElement(
+        native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP);
+    Node* const properties =
+        AllocateNameDictionary(NameDictionary::kInitialCapacity);
+
+    Node* const group_object = AllocateJSObjectFromMap(map, properties);
+
+    // Store it on the result as a 'group' property.
+
+    {
+      Node* const name = HeapConstant(isolate()->factory()->group_string());
+      CallRuntime(Runtime::kCreateDataProperty, context, result, name,
+                  group_object);
+    }
+
+    // One or more named captures exist, add a property for each one.
+
+    CSA_ASSERT(this, HasInstanceType(names, FIXED_ARRAY_TYPE));
+    Node* const names_length = LoadAndUntagFixedArrayBaseLength(names);
+    CSA_ASSERT(this, IntPtrGreaterThan(names_length, IntPtrConstant(0)));
+
+    Variable var_i(this, MachineType::PointerRepresentation());
+    var_i.Bind(IntPtrConstant(0));
+
+    Variable* vars[] = {&var_i};
+    const int vars_count = sizeof(vars) / sizeof(vars[0]);
+    Label loop(this, vars_count, vars);
+
+    Goto(&loop);
+    Bind(&loop);
+    {
+      Node* const i = var_i.value();
+      Node* const i_plus_1 = IntPtrAdd(i, IntPtrConstant(1));
+      Node* const i_plus_2 = IntPtrAdd(i_plus_1, IntPtrConstant(1));
+
+      Node* const name = LoadFixedArrayElement(names, i);
+      Node* const index = LoadFixedArrayElement(names, i_plus_1);
+      Node* const capture =
+          LoadFixedArrayElement(result_elements, SmiUntag(index));
+
+      CallRuntime(Runtime::kCreateDataProperty, context, group_object, name,
+                  capture);
+
+      var_i.Bind(i_plus_2);
+      Branch(IntPtrGreaterThanOrEqual(var_i.value(), names_length), &out,
+             &loop);
+    }
+  }
+
+  Bind(&out);
   return result;
 }
 
 // ES#sec-regexp.prototype.exec
 // RegExp.prototype.exec ( string )
-compiler::Node* RegExpPrototypeExecInternal(CodeStubAssembler* a,
-                                            compiler::Node* context,
-                                            compiler::Node* maybe_receiver,
-                                            compiler::Node* maybe_string) {
-  typedef CodeStubAssembler::Variable Variable;
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+// Implements the core of RegExp.prototype.exec but without actually
+// constructing the JSRegExpResult. Returns either null (if the RegExp did not
+// match) or a fixed array containing match indices as returned by
+// RegExpExecStub.
+Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
+    Node* const context, Node* const regexp, Node* const string,
+    Label* if_didnotmatch, const bool is_fastpath) {
+  Isolate* const isolate = this->isolate();
 
-  Isolate* const isolate = a->isolate();
+  Node* const null = NullConstant();
+  Node* const int_zero = IntPtrConstant(0);
+  Node* const smi_zero = SmiConstant(Smi::kZero);
 
-  Node* const null = a->NullConstant();
-  Node* const int_zero = a->IntPtrConstant(0);
-  Node* const smi_zero = a->SmiConstant(Smi::kZero);
+  if (!is_fastpath) {
+    ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
+                           "RegExp.prototype.exec");
+  }
 
-  Variable var_result(a, MachineRepresentation::kTagged);
-  Label out(a);
+  CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
+  CSA_ASSERT(this, HasInstanceType(regexp, JS_REGEXP_TYPE));
 
-  // Ensure {maybe_receiver} is a JSRegExp.
-  Node* const regexp_map = a->ThrowIfNotInstanceType(
-      context, maybe_receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
-  Node* const regexp = maybe_receiver;
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Label out(this);
 
-  // Check whether the regexp instance is unmodified.
-  Node* const native_context = a->LoadNativeContext(context);
-  Node* const regexp_fun =
-      a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
-  Node* const initial_map =
-      a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
-  Node* const has_initialmap = a->WordEqual(regexp_map, initial_map);
+  // Load lastIndex.
+  Variable var_lastindex(this, MachineRepresentation::kTagged);
+  {
+    Node* const regexp_lastindex = LoadLastIndex(context, regexp, is_fastpath);
+    var_lastindex.Bind(regexp_lastindex);
 
-  // Convert {maybe_string} to a string.
-  Callable tostring_callable = CodeFactory::ToString(isolate);
-  Node* const string = a->CallStub(tostring_callable, context, maybe_string);
-  Node* const string_length = a->LoadStringLength(string);
+    if (is_fastpath) {
+      // ToLength on a positive smi is a nop and can be skipped.
+      CSA_ASSERT(this, TaggedIsPositiveSmi(regexp_lastindex));
+    } else {
+      // Omit ToLength if lastindex is a non-negative smi.
+      Label call_tolength(this, Label::kDeferred), next(this);
+      Branch(TaggedIsPositiveSmi(regexp_lastindex), &next, &call_tolength);
+
+      Bind(&call_tolength);
+      {
+        Callable tolength_callable = CodeFactory::ToLength(isolate);
+        var_lastindex.Bind(
+            CallStub(tolength_callable, context, regexp_lastindex));
+        Goto(&next);
+      }
+
+      Bind(&next);
+    }
+  }
 
   // Check whether the regexp is global or sticky, which determines whether we
   // update last index later on.
-  Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
-  Node* const is_global_or_sticky =
-      a->WordAnd(a->SmiUntag(flags),
-                 a->IntPtrConstant(JSRegExp::kGlobal | JSRegExp::kSticky));
+  Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+  Node* const is_global_or_sticky = WordAnd(
+      SmiUntag(flags), IntPtrConstant(JSRegExp::kGlobal | JSRegExp::kSticky));
   Node* const should_update_last_index =
-      a->WordNotEqual(is_global_or_sticky, int_zero);
+      WordNotEqual(is_global_or_sticky, int_zero);
 
   // Grab and possibly update last index.
-  Label run_exec(a);
-  Variable var_lastindex(a, MachineRepresentation::kTagged);
+  Label run_exec(this);
   {
-    Label if_doupdate(a), if_dontupdate(a);
-    a->Branch(should_update_last_index, &if_doupdate, &if_dontupdate);
+    Label if_doupdate(this), if_dontupdate(this);
+    Branch(should_update_last_index, &if_doupdate, &if_dontupdate);
 
-    a->Bind(&if_doupdate);
+    Bind(&if_doupdate);
     {
-      Node* const regexp_lastindex =
-          LoadLastIndex(a, context, has_initialmap, regexp);
+      Node* const lastindex = var_lastindex.value();
 
-      Callable tolength_callable = CodeFactory::ToLength(isolate);
-      Node* const lastindex =
-          a->CallStub(tolength_callable, context, regexp_lastindex);
-      var_lastindex.Bind(lastindex);
+      Label if_isoob(this, Label::kDeferred);
+      GotoIfNot(TaggedIsSmi(lastindex), &if_isoob);
+      Node* const string_length = LoadStringLength(string);
+      GotoIfNot(SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
+      Goto(&run_exec);
 
-      Label if_isoob(a, Label::kDeferred);
-      a->GotoUnless(a->TaggedIsSmi(lastindex), &if_isoob);
-      a->GotoUnless(a->SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
-      a->Goto(&run_exec);
-
-      a->Bind(&if_isoob);
+      Bind(&if_isoob);
       {
-        StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
+        StoreLastIndex(context, regexp, smi_zero, is_fastpath);
         var_result.Bind(null);
-        a->Goto(&out);
+        Goto(if_didnotmatch);
       }
     }
 
-    a->Bind(&if_dontupdate);
+    Bind(&if_dontupdate);
     {
       var_lastindex.Bind(smi_zero);
-      a->Goto(&run_exec);
+      Goto(&run_exec);
     }
   }
 
   Node* match_indices;
-  Label successful_match(a);
-  a->Bind(&run_exec);
+  Label successful_match(this);
+  Bind(&run_exec);
   {
     // Get last match info from the context.
-    Node* const last_match_info = a->LoadContextElement(
+    Node* const native_context = LoadNativeContext(context);
+    Node* const last_match_info = LoadContextElement(
         native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
 
     // Call the exec stub.
     Callable exec_callable = CodeFactory::RegExpExec(isolate);
-    match_indices = a->CallStub(exec_callable, context, regexp, string,
-                                var_lastindex.value(), last_match_info);
+    match_indices = CallStub(exec_callable, context, regexp, string,
+                             var_lastindex.value(), last_match_info);
+    var_result.Bind(match_indices);
 
     // {match_indices} is either null or the RegExpMatchInfo array.
     // Return early if exec failed, possibly updating last index.
-    a->GotoUnless(a->WordEqual(match_indices, null), &successful_match);
+    GotoIfNot(WordEqual(match_indices, null), &successful_match);
 
-    Label return_null(a);
-    a->GotoUnless(should_update_last_index, &return_null);
+    GotoIfNot(should_update_last_index, if_didnotmatch);
 
-    StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
-    a->Goto(&return_null);
-
-    a->Bind(&return_null);
-    var_result.Bind(null);
-    a->Goto(&out);
+    StoreLastIndex(context, regexp, smi_zero, is_fastpath);
+    Goto(if_didnotmatch);
   }
 
-  Label construct_result(a);
-  a->Bind(&successful_match);
+  Bind(&successful_match);
   {
-    a->GotoUnless(should_update_last_index, &construct_result);
+    GotoIfNot(should_update_last_index, &out);
 
     // Update the new last index from {match_indices}.
-    Node* const new_lastindex = a->LoadFixedArrayElement(
-        match_indices,
-        a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1));
+    Node* const new_lastindex = LoadFixedArrayElement(
+        match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
 
-    StoreLastIndex(a, context, has_initialmap, regexp, new_lastindex);
-    a->Goto(&construct_result);
-
-    a->Bind(&construct_result);
-    {
-      Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
-                                                     match_indices, string);
-      var_result.Bind(result);
-      a->Goto(&out);
-    }
+    StoreLastIndex(context, regexp, new_lastindex, is_fastpath);
+    Goto(&out);
   }
 
-  a->Bind(&out);
+  Bind(&out);
   return var_result.value();
 }
 
-}  // namespace
-
 // ES#sec-regexp.prototype.exec
 // RegExp.prototype.exec ( string )
-void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
-  typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(Node* const context,
+                                                       Node* const regexp,
+                                                       Node* const string,
+                                                       const bool is_fastpath) {
+  Node* const null = NullConstant();
 
-  Node* const maybe_receiver = a->Parameter(0);
-  Node* const maybe_string = a->Parameter(1);
-  Node* const context = a->Parameter(4);
+  Variable var_result(this, MachineRepresentation::kTagged);
 
-  Node* const result =
-      RegExpPrototypeExecInternal(a, context, maybe_receiver, maybe_string);
-  a->Return(result);
-}
+  Label if_didnotmatch(this), out(this);
+  Node* const indices_or_null = RegExpPrototypeExecBodyWithoutResult(
+      context, regexp, string, &if_didnotmatch, is_fastpath);
 
-namespace {
-
-compiler::Node* ThrowIfNotJSReceiver(CodeStubAssembler* a, Isolate* isolate,
-                                     compiler::Node* context,
-                                     compiler::Node* value,
-                                     MessageTemplate::Template msg_template,
-                                     char const* method_name) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Label out(a), throw_exception(a, Label::kDeferred);
-  Variable var_value_map(a, MachineRepresentation::kTagged);
-
-  a->GotoIf(a->TaggedIsSmi(value), &throw_exception);
-
-  // Load the instance type of the {value}.
-  var_value_map.Bind(a->LoadMap(value));
-  Node* const value_instance_type =
-      a->LoadMapInstanceType(var_value_map.value());
-
-  a->Branch(a->IsJSReceiverInstanceType(value_instance_type), &out,
-            &throw_exception);
-
-  // The {value} is not a compatible receiver for this method.
-  a->Bind(&throw_exception);
+  // Successful match.
   {
-    Node* const message_id = a->SmiConstant(Smi::FromInt(msg_template));
-    Node* const method_name_str = a->HeapConstant(
-        isolate->factory()->NewStringFromAsciiChecked(method_name, TENURED));
-
-    Callable callable = CodeFactory::ToString(isolate);
-    Node* const value_str = a->CallStub(callable, context, value);
-
-    a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
-                   method_name_str, value_str);
-    var_value_map.Bind(a->UndefinedConstant());
-    a->Goto(&out);  // Never reached.
+    Node* const match_indices = indices_or_null;
+    Node* const result =
+        ConstructNewResultFromMatchInfo(context, regexp, match_indices, string);
+    var_result.Bind(result);
+    Goto(&out);
   }
 
-  a->Bind(&out);
+  Bind(&if_didnotmatch);
+  {
+    var_result.Bind(null);
+    Goto(&out);
+  }
+
+  Bind(&out);
+  return var_result.value();
+}
+
+Node* RegExpBuiltinsAssembler::ThrowIfNotJSReceiver(
+    Node* context, Node* maybe_receiver, MessageTemplate::Template msg_template,
+    char const* method_name) {
+  Label out(this), throw_exception(this, Label::kDeferred);
+  Variable var_value_map(this, MachineRepresentation::kTagged);
+
+  GotoIf(TaggedIsSmi(maybe_receiver), &throw_exception);
+
+  // Load the instance type of the {value}.
+  var_value_map.Bind(LoadMap(maybe_receiver));
+  Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
+
+  Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
+
+  // The {value} is not a compatible receiver for this method.
+  Bind(&throw_exception);
+  {
+    Node* const message_id = SmiConstant(Smi::FromInt(msg_template));
+    Node* const method_name_str = HeapConstant(
+        isolate()->factory()->NewStringFromAsciiChecked(method_name, TENURED));
+
+    Callable callable = CodeFactory::ToString(isolate());
+    Node* const value_str = CallStub(callable, context, maybe_receiver);
+
+    CallRuntime(Runtime::kThrowTypeError, context, message_id, method_name_str,
+                value_str);
+    Unreachable();
+  }
+
+  Bind(&out);
   return var_value_map.value();
 }
 
-compiler::Node* IsInitialRegExpMap(CodeStubAssembler* a,
-                                   compiler::Node* context,
-                                   compiler::Node* map) {
-  typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::IsInitialRegExpMap(Node* context, Node* object,
+                                                  Node* map) {
+  Label out(this);
+  Variable var_result(this, MachineRepresentation::kWord32);
 
-  Node* const native_context = a->LoadNativeContext(context);
+  Node* const native_context = LoadNativeContext(context);
   Node* const regexp_fun =
-      a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+      LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
   Node* const initial_map =
-      a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
-  Node* const has_initialmap = a->WordEqual(map, initial_map);
+      LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+  Node* const has_initialmap = WordEqual(map, initial_map);
 
-  return has_initialmap;
+  var_result.Bind(has_initialmap);
+  GotoIfNot(has_initialmap, &out);
+
+  // The smi check is required to omit ToLength(lastIndex) calls with possible
+  // user-code execution on the fast path.
+  Node* const last_index = FastLoadLastIndex(object);
+  var_result.Bind(TaggedIsPositiveSmi(last_index));
+  Goto(&out);
+
+  Bind(&out);
+  return var_result.value();
 }
 
 // RegExp fast path implementations rely on unmodified JSRegExp instances.
 // We use a fairly coarse granularity for this and simply check whether both
-// the regexp itself is unmodified (i.e. its map has not changed) and its
-// prototype is unmodified.
-void BranchIfFastPath(CodeStubAssembler* a, compiler::Node* context,
-                      compiler::Node* map,
-                      CodeStubAssembler::Label* if_isunmodified,
-                      CodeStubAssembler::Label* if_ismodified) {
-  typedef compiler::Node Node;
-
-  Node* const native_context = a->LoadNativeContext(context);
-  Node* const regexp_fun =
-      a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
-  Node* const initial_map =
-      a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
-  Node* const has_initialmap = a->WordEqual(map, initial_map);
-
-  a->GotoUnless(has_initialmap, if_ismodified);
-
-  Node* const initial_proto_initial_map = a->LoadContextElement(
-      native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
-  Node* const proto_map = a->LoadMap(a->LoadMapPrototype(map));
-  Node* const proto_has_initialmap =
-      a->WordEqual(proto_map, initial_proto_initial_map);
+// the regexp itself is unmodified (i.e. its map has not changed), its
+// prototype is unmodified, and lastIndex is a non-negative smi.
+void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* const context,
+                                                 Node* const object,
+                                                 Node* const map,
+                                                 Label* const if_isunmodified,
+                                                 Label* const if_ismodified) {
+  CSA_ASSERT(this, WordEqual(LoadMap(object), map));
 
   // TODO(ishell): Update this check once map changes for constant field
   // tracking are landing.
 
-  a->Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+  Node* const native_context = LoadNativeContext(context);
+  Node* const regexp_fun =
+      LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+  Node* const initial_map =
+      LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+  Node* const has_initialmap = WordEqual(map, initial_map);
+
+  GotoIfNot(has_initialmap, if_ismodified);
+
+  Node* const initial_proto_initial_map =
+      LoadContextElement(native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
+  Node* const proto_map = LoadMap(LoadMapPrototype(map));
+  Node* const proto_has_initialmap =
+      WordEqual(proto_map, initial_proto_initial_map);
+
+  GotoIfNot(proto_has_initialmap, if_ismodified);
+
+  // The smi check is required to omit ToLength(lastIndex) calls with possible
+  // user-code execution on the fast path.
+  Node* const last_index = FastLoadLastIndex(object);
+  Branch(TaggedIsPositiveSmi(last_index), if_isunmodified, if_ismodified);
 }
 
-}  // namespace
+Node* RegExpBuiltinsAssembler::IsFastRegExpMap(Node* const context,
+                                               Node* const object,
+                                               Node* const map) {
+  Label yup(this), nope(this), out(this);
+  Variable var_result(this, MachineRepresentation::kWord32);
 
-void Builtins::Generate_RegExpPrototypeFlagsGetter(CodeStubAssembler* a) {
-  typedef CodeStubAssembler::Variable Variable;
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+  BranchIfFastRegExp(context, object, map, &yup, &nope);
 
-  Node* const receiver = a->Parameter(0);
-  Node* const context = a->Parameter(3);
+  Bind(&yup);
+  var_result.Bind(Int32Constant(1));
+  Goto(&out);
 
-  Isolate* isolate = a->isolate();
-  Node* const int_zero = a->IntPtrConstant(0);
-  Node* const int_one = a->IntPtrConstant(1);
+  Bind(&nope);
+  var_result.Bind(Int32Constant(0));
+  Goto(&out);
 
-  Node* const map = ThrowIfNotJSReceiver(a, isolate, context, receiver,
-                                         MessageTemplate::kRegExpNonObject,
-                                         "RegExp.prototype.flags");
+  Bind(&out);
+  return var_result.value();
+}
 
-  Variable var_length(a, MachineType::PointerRepresentation());
-  Variable var_flags(a, MachineType::PointerRepresentation());
+void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* context, Node* map,
+                                                       Label* if_isunmodified,
+                                                       Label* if_ismodified) {
+  Node* const native_context = LoadNativeContext(context);
+  Node* const initial_regexp_result_map =
+      LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
+
+  Branch(WordEqual(map, initial_regexp_result_map), if_isunmodified,
+         if_ismodified);
+}
+
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
+  Node* const maybe_receiver = Parameter(0);
+  Node* const maybe_string = Parameter(1);
+  Node* const context = Parameter(4);
+
+  // Ensure {maybe_receiver} is a JSRegExp.
+  ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE,
+                         "RegExp.prototype.exec");
+  Node* const receiver = maybe_receiver;
+
+  // Convert {maybe_string} to a String.
+  Node* const string = ToString(context, maybe_string);
+
+  Label if_isfastpath(this), if_isslowpath(this);
+  Branch(IsInitialRegExpMap(context, receiver, LoadMap(receiver)),
+         &if_isfastpath, &if_isslowpath);
+
+  Bind(&if_isfastpath);
+  {
+    Node* const result =
+        RegExpPrototypeExecBody(context, receiver, string, true);
+    Return(result);
+  }
+
+  Bind(&if_isslowpath);
+  {
+    Node* const result =
+        RegExpPrototypeExecBody(context, receiver, string, false);
+    Return(result);
+  }
+}
+
+Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
+                                           Node* const regexp,
+                                           bool is_fastpath) {
+  Isolate* isolate = this->isolate();
+
+  Node* const int_zero = IntPtrConstant(0);
+  Node* const int_one = IntPtrConstant(1);
+  Variable var_length(this, MachineType::PointerRepresentation(), int_zero);
+  Variable var_flags(this, MachineType::PointerRepresentation());
 
   // First, count the number of characters we will need and check which flags
   // are set.
 
-  var_length.Bind(int_zero);
-
-  Label if_isunmodifiedjsregexp(a),
-      if_isnotunmodifiedjsregexp(a, Label::kDeferred);
-  a->Branch(IsInitialRegExpMap(a, context, map), &if_isunmodifiedjsregexp,
-            &if_isnotunmodifiedjsregexp);
-
-  Label construct_string(a);
-  a->Bind(&if_isunmodifiedjsregexp);
-  {
+  if (is_fastpath) {
     // Refer to JSRegExp's flag property on the fast-path.
-    Node* const flags_smi =
-        a->LoadObjectField(receiver, JSRegExp::kFlagsOffset);
-    Node* const flags_intptr = a->SmiUntag(flags_smi);
+    Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+    Node* const flags_intptr = SmiUntag(flags_smi);
     var_flags.Bind(flags_intptr);
 
-    Label label_global(a), label_ignorecase(a), label_multiline(a),
-        label_unicode(a), label_sticky(a);
-
-#define CASE_FOR_FLAG(FLAG, LABEL, NEXT_LABEL)                        \
-  do {                                                                \
-    a->Bind(&LABEL);                                                  \
-    Node* const mask = a->IntPtrConstant(FLAG);                       \
-    a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
-              &NEXT_LABEL);                                           \
-    var_length.Bind(a->IntPtrAdd(var_length.value(), int_one));       \
-    a->Goto(&NEXT_LABEL);                                             \
+#define CASE_FOR_FLAG(FLAG)                                  \
+  do {                                                       \
+    Label next(this);                                        \
+    GotoIfNot(IsSetWord(flags_intptr, FLAG), &next);         \
+    var_length.Bind(IntPtrAdd(var_length.value(), int_one)); \
+    Goto(&next);                                             \
+    Bind(&next);                                             \
   } while (false)
 
-    a->Goto(&label_global);
-    CASE_FOR_FLAG(JSRegExp::kGlobal, label_global, label_ignorecase);
-    CASE_FOR_FLAG(JSRegExp::kIgnoreCase, label_ignorecase, label_multiline);
-    CASE_FOR_FLAG(JSRegExp::kMultiline, label_multiline, label_unicode);
-    CASE_FOR_FLAG(JSRegExp::kUnicode, label_unicode, label_sticky);
-    CASE_FOR_FLAG(JSRegExp::kSticky, label_sticky, construct_string);
+    CASE_FOR_FLAG(JSRegExp::kGlobal);
+    CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
+    CASE_FOR_FLAG(JSRegExp::kMultiline);
+    CASE_FOR_FLAG(JSRegExp::kUnicode);
+    CASE_FOR_FLAG(JSRegExp::kSticky);
 #undef CASE_FOR_FLAG
-  }
+  } else {
+    DCHECK(!is_fastpath);
 
-  a->Bind(&if_isnotunmodifiedjsregexp);
-  {
     // Fall back to GetProperty stub on the slow-path.
     var_flags.Bind(int_zero);
 
-    Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
-    Label label_global(a), label_ignorecase(a), label_multiline(a),
-        label_unicode(a), label_sticky(a);
+    Callable getproperty_callable = CodeFactory::GetProperty(isolate);
 
-#define CASE_FOR_FLAG(NAME, FLAG, LABEL, NEXT_LABEL)                          \
+#define CASE_FOR_FLAG(NAME, FLAG)                                             \
   do {                                                                        \
-    a->Bind(&LABEL);                                                          \
+    Label next(this);                                                         \
     Node* const name =                                                        \
-        a->HeapConstant(isolate->factory()->NewStringFromAsciiChecked(NAME)); \
-    Node* const flag =                                                        \
-        a->CallStub(getproperty_callable, context, receiver, name);           \
-    Label if_isflagset(a);                                                    \
-    a->BranchIfToBooleanIsTrue(flag, &if_isflagset, &NEXT_LABEL);             \
-    a->Bind(&if_isflagset);                                                   \
-    var_length.Bind(a->IntPtrAdd(var_length.value(), int_one));               \
-    var_flags.Bind(a->WordOr(var_flags.value(), a->IntPtrConstant(FLAG)));    \
-    a->Goto(&NEXT_LABEL);                                                     \
+        HeapConstant(isolate->factory()->InternalizeUtf8String(NAME));        \
+    Node* const flag = CallStub(getproperty_callable, context, regexp, name); \
+    Label if_isflagset(this);                                                 \
+    BranchIfToBooleanIsTrue(flag, &if_isflagset, &next);                      \
+    Bind(&if_isflagset);                                                      \
+    var_length.Bind(IntPtrAdd(var_length.value(), int_one));                  \
+    var_flags.Bind(WordOr(var_flags.value(), IntPtrConstant(FLAG)));          \
+    Goto(&next);                                                              \
+    Bind(&next);                                                              \
   } while (false)
 
-    a->Goto(&label_global);
-    CASE_FOR_FLAG("global", JSRegExp::kGlobal, label_global, label_ignorecase);
-    CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase, label_ignorecase,
-                  label_multiline);
-    CASE_FOR_FLAG("multiline", JSRegExp::kMultiline, label_multiline,
-                  label_unicode);
-    CASE_FOR_FLAG("unicode", JSRegExp::kUnicode, label_unicode, label_sticky);
-    CASE_FOR_FLAG("sticky", JSRegExp::kSticky, label_sticky, construct_string);
+    CASE_FOR_FLAG("global", JSRegExp::kGlobal);
+    CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase);
+    CASE_FOR_FLAG("multiline", JSRegExp::kMultiline);
+    CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
+    CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
 #undef CASE_FOR_FLAG
   }
 
   // Allocate a string of the required length and fill it with the corresponding
   // char for each set flag.
 
-  a->Bind(&construct_string);
   {
-    Node* const result =
-        a->AllocateSeqOneByteString(context, var_length.value());
+    Node* const result = AllocateSeqOneByteString(context, var_length.value());
     Node* const flags_intptr = var_flags.value();
 
-    Variable var_offset(a, MachineType::PointerRepresentation());
-    var_offset.Bind(
-        a->IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+    Variable var_offset(
+        this, MachineType::PointerRepresentation(),
+        IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
 
-    Label label_global(a), label_ignorecase(a), label_multiline(a),
-        label_unicode(a), label_sticky(a), out(a);
-
-#define CASE_FOR_FLAG(FLAG, CHAR, LABEL, NEXT_LABEL)                  \
-  do {                                                                \
-    a->Bind(&LABEL);                                                  \
-    Node* const mask = a->IntPtrConstant(FLAG);                       \
-    a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
-              &NEXT_LABEL);                                           \
-    Node* const value = a->IntPtrConstant(CHAR);                      \
-    a->StoreNoWriteBarrier(MachineRepresentation::kWord8, result,     \
-                           var_offset.value(), value);                \
-    var_offset.Bind(a->IntPtrAdd(var_offset.value(), int_one));       \
-    a->Goto(&NEXT_LABEL);                                             \
+#define CASE_FOR_FLAG(FLAG, CHAR)                              \
+  do {                                                         \
+    Label next(this);                                          \
+    GotoIfNot(IsSetWord(flags_intptr, FLAG), &next);           \
+    Node* const value = Int32Constant(CHAR);                   \
+    StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
+                        var_offset.value(), value);            \
+    var_offset.Bind(IntPtrAdd(var_offset.value(), int_one));   \
+    Goto(&next);                                               \
+    Bind(&next);                                               \
   } while (false)
 
-    a->Goto(&label_global);
-    CASE_FOR_FLAG(JSRegExp::kGlobal, 'g', label_global, label_ignorecase);
-    CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i', label_ignorecase,
-                  label_multiline);
-    CASE_FOR_FLAG(JSRegExp::kMultiline, 'm', label_multiline, label_unicode);
-    CASE_FOR_FLAG(JSRegExp::kUnicode, 'u', label_unicode, label_sticky);
-    CASE_FOR_FLAG(JSRegExp::kSticky, 'y', label_sticky, out);
+    CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
+    CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
+    CASE_FOR_FLAG(JSRegExp::kMultiline, 'm');
+    CASE_FOR_FLAG(JSRegExp::kUnicode, 'u');
+    CASE_FOR_FLAG(JSRegExp::kSticky, 'y');
 #undef CASE_FOR_FLAG
 
-    a->Bind(&out);
-    a->Return(result);
+    return result;
   }
 }
 
-// ES6 21.2.5.10.
-BUILTIN(RegExpPrototypeSourceGetter) {
-  HandleScope scope(isolate);
+// ES#sec-isregexp IsRegExp ( argument )
+Node* RegExpBuiltinsAssembler::IsRegExp(Node* const context,
+                                        Node* const maybe_receiver) {
+  Label out(this), if_isregexp(this);
 
-  Handle<Object> recv = args.receiver();
-  if (!recv->IsJSRegExp()) {
-    Handle<JSFunction> regexp_fun = isolate->regexp_function();
-    if (*recv == regexp_fun->prototype()) {
-      isolate->CountUsage(v8::Isolate::kRegExpPrototypeSourceGetter);
-      return *isolate->factory()->NewStringFromAsciiChecked("(?:)");
-    }
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate, NewTypeError(MessageTemplate::kRegExpNonRegExp,
-                              isolate->factory()->NewStringFromAsciiChecked(
-                                  "RegExp.prototype.source")));
+  Variable var_result(this, MachineRepresentation::kWord32, Int32Constant(0));
+
+  GotoIf(TaggedIsSmi(maybe_receiver), &out);
+  GotoIfNot(IsJSReceiver(maybe_receiver), &out);
+
+  Node* const receiver = maybe_receiver;
+
+  // Check @@match.
+  {
+    Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+    Node* const name = HeapConstant(isolate()->factory()->match_symbol());
+    Node* const value = CallStub(getproperty_callable, context, receiver, name);
+
+    Label match_isundefined(this), match_isnotundefined(this);
+    Branch(IsUndefined(value), &match_isundefined, &match_isnotundefined);
+
+    Bind(&match_isundefined);
+    Branch(HasInstanceType(receiver, JS_REGEXP_TYPE), &if_isregexp, &out);
+
+    Bind(&match_isnotundefined);
+    BranchIfToBooleanIsTrue(value, &if_isregexp, &out);
   }
 
-  Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(recv);
-  return regexp->source();
+  Bind(&if_isregexp);
+  var_result.Bind(Int32Constant(1));
+  Goto(&out);
+
+  Bind(&out);
+  return var_result.value();
+}
+
+// ES#sec-regexpinitialize
+// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
+Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
+                                                Node* const regexp,
+                                                Node* const maybe_pattern,
+                                                Node* const maybe_flags) {
+  // Normalize pattern.
+  Node* const pattern =
+      Select(IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); },
+             [=] { return ToString(context, maybe_pattern); },
+             MachineRepresentation::kTagged);
+
+  // Normalize flags.
+  Node* const flags =
+      Select(IsUndefined(maybe_flags), [=] { return EmptyStringConstant(); },
+             [=] { return ToString(context, maybe_flags); },
+             MachineRepresentation::kTagged);
+
+  // Initialize.
+
+  return CallRuntime(Runtime::kRegExpInitializeAndCompile, context, regexp,
+                     pattern, flags);
+}
+
+TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
+  Node* const maybe_receiver = Parameter(0);
+  Node* const context = Parameter(3);
+
+  Node* const map = ThrowIfNotJSReceiver(context, maybe_receiver,
+                                         MessageTemplate::kRegExpNonObject,
+                                         "RegExp.prototype.flags");
+  Node* const receiver = maybe_receiver;
+
+  Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred);
+  Branch(IsInitialRegExpMap(context, receiver, map), &if_isfastpath,
+         &if_isslowpath);
+
+  Bind(&if_isfastpath);
+  Return(FlagsGetter(context, receiver, true));
+
+  Bind(&if_isslowpath);
+  Return(FlagsGetter(context, receiver, false));
+}
+
+// ES#sec-regexp-pattern-flags
+// RegExp ( pattern, flags )
+TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
+  Node* const pattern = Parameter(1);
+  Node* const flags = Parameter(2);
+  Node* const new_target = Parameter(3);
+  Node* const context = Parameter(5);
+
+  Isolate* isolate = this->isolate();
+
+  Variable var_flags(this, MachineRepresentation::kTagged, flags);
+  Variable var_pattern(this, MachineRepresentation::kTagged, pattern);
+  Variable var_new_target(this, MachineRepresentation::kTagged, new_target);
+
+  Node* const native_context = LoadNativeContext(context);
+  Node* const regexp_function =
+      LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+
+  Node* const pattern_is_regexp = IsRegExp(context, pattern);
+
+  {
+    Label next(this);
+
+    GotoIfNot(IsUndefined(new_target), &next);
+    var_new_target.Bind(regexp_function);
+
+    GotoIfNot(pattern_is_regexp, &next);
+    GotoIfNot(IsUndefined(flags), &next);
+
+    Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+    Node* const name = HeapConstant(isolate->factory()->constructor_string());
+    Node* const value = CallStub(getproperty_callable, context, pattern, name);
+
+    GotoIfNot(WordEqual(value, regexp_function), &next);
+    Return(pattern);
+
+    Bind(&next);
+  }
+
+  {
+    Label next(this), if_patternisfastregexp(this),
+        if_patternisslowregexp(this);
+    GotoIf(TaggedIsSmi(pattern), &next);
+
+    GotoIf(HasInstanceType(pattern, JS_REGEXP_TYPE), &if_patternisfastregexp);
+
+    Branch(pattern_is_regexp, &if_patternisslowregexp, &next);
+
+    Bind(&if_patternisfastregexp);
+    {
+      Node* const source = LoadObjectField(pattern, JSRegExp::kSourceOffset);
+      var_pattern.Bind(source);
+
+      {
+        Label inner_next(this);
+        GotoIfNot(IsUndefined(flags), &inner_next);
+
+        Node* const value = FlagsGetter(context, pattern, true);
+        var_flags.Bind(value);
+        Goto(&inner_next);
+
+        Bind(&inner_next);
+      }
+
+      Goto(&next);
+    }
+
+    Bind(&if_patternisslowregexp);
+    {
+      Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+
+      {
+        Node* const name = HeapConstant(isolate->factory()->source_string());
+        Node* const value =
+            CallStub(getproperty_callable, context, pattern, name);
+        var_pattern.Bind(value);
+      }
+
+      {
+        Label inner_next(this);
+        GotoIfNot(IsUndefined(flags), &inner_next);
+
+        Node* const name = HeapConstant(isolate->factory()->flags_string());
+        Node* const value =
+            CallStub(getproperty_callable, context, pattern, name);
+        var_flags.Bind(value);
+        Goto(&inner_next);
+
+        Bind(&inner_next);
+      }
+
+      Goto(&next);
+    }
+
+    Bind(&next);
+  }
+
+  // Allocate.
+
+  Variable var_regexp(this, MachineRepresentation::kTagged);
+  {
+    Label allocate_jsregexp(this), allocate_generic(this, Label::kDeferred),
+        next(this);
+    Branch(WordEqual(var_new_target.value(), regexp_function),
+           &allocate_jsregexp, &allocate_generic);
+
+    Bind(&allocate_jsregexp);
+    {
+      Node* const initial_map = LoadObjectField(
+          regexp_function, JSFunction::kPrototypeOrInitialMapOffset);
+      Node* const regexp = AllocateJSObjectFromMap(initial_map);
+      var_regexp.Bind(regexp);
+      Goto(&next);
+    }
+
+    Bind(&allocate_generic);
+    {
+      ConstructorBuiltinsAssembler constructor_assembler(this->state());
+      Node* const regexp = constructor_assembler.EmitFastNewObject(
+          context, regexp_function, var_new_target.value());
+      var_regexp.Bind(regexp);
+      Goto(&next);
+    }
+
+    Bind(&next);
+  }
+
+  Node* const result = RegExpInitialize(context, var_regexp.value(),
+                                        var_pattern.value(), var_flags.value());
+  Return(result);
+}
+
+// ES#sec-regexp.prototype.compile
+// RegExp.prototype.compile ( pattern, flags )
+TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
+  Node* const maybe_receiver = Parameter(0);
+  Node* const maybe_pattern = Parameter(1);
+  Node* const maybe_flags = Parameter(2);
+  Node* const context = Parameter(5);
+
+  ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE,
+                         "RegExp.prototype.compile");
+  Node* const receiver = maybe_receiver;
+
+  Variable var_flags(this, MachineRepresentation::kTagged, maybe_flags);
+  Variable var_pattern(this, MachineRepresentation::kTagged, maybe_pattern);
+
+  // Handle a JSRegExp pattern.
+  {
+    Label next(this);
+
+    GotoIf(TaggedIsSmi(maybe_pattern), &next);
+    GotoIfNot(HasInstanceType(maybe_pattern, JS_REGEXP_TYPE), &next);
+
+    Node* const pattern = maybe_pattern;
+
+    // {maybe_flags} must be undefined in this case, otherwise throw.
+    {
+      Label next(this);
+      GotoIf(IsUndefined(maybe_flags), &next);
+
+      Node* const message_id = SmiConstant(MessageTemplate::kRegExpFlags);
+      TailCallRuntime(Runtime::kThrowTypeError, context, message_id);
+
+      Bind(&next);
+    }
+
+    Node* const new_flags = FlagsGetter(context, pattern, true);
+    Node* const new_pattern = LoadObjectField(pattern, JSRegExp::kSourceOffset);
+
+    var_flags.Bind(new_flags);
+    var_pattern.Bind(new_pattern);
+
+    Goto(&next);
+    Bind(&next);
+  }
+
+  Node* const result = RegExpInitialize(context, receiver, var_pattern.value(),
+                                        var_flags.value());
+  Return(result);
+}
+
+// ES6 21.2.5.10.
+TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
+  Node* const receiver = Parameter(0);
+  Node* const context = Parameter(3);
+
+  // Check whether we have an unmodified regexp instance.
+  Label if_isjsregexp(this), if_isnotjsregexp(this, Label::kDeferred);
+
+  GotoIf(TaggedIsSmi(receiver), &if_isnotjsregexp);
+  Branch(HasInstanceType(receiver, JS_REGEXP_TYPE), &if_isjsregexp,
+         &if_isnotjsregexp);
+
+  Bind(&if_isjsregexp);
+  {
+    Node* const source = LoadObjectField(receiver, JSRegExp::kSourceOffset);
+    Return(source);
+  }
+
+  Bind(&if_isnotjsregexp);
+  {
+    Isolate* isolate = this->isolate();
+    Node* const native_context = LoadNativeContext(context);
+    Node* const regexp_fun =
+        LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+    Node* const initial_map =
+        LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+    Node* const initial_prototype = LoadMapPrototype(initial_map);
+
+    Label if_isprototype(this), if_isnotprototype(this);
+    Branch(WordEqual(receiver, initial_prototype), &if_isprototype,
+           &if_isnotprototype);
+
+    Bind(&if_isprototype);
+    {
+      const int counter = v8::Isolate::kRegExpPrototypeSourceGetter;
+      Node* const counter_smi = SmiConstant(counter);
+      CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
+
+      Node* const result =
+          HeapConstant(isolate->factory()->NewStringFromAsciiChecked("(?:)"));
+      Return(result);
+    }
+
+    Bind(&if_isnotprototype);
+    {
+      Node* const message_id =
+          SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+      Node* const method_name_str =
+          HeapConstant(isolate->factory()->NewStringFromAsciiChecked(
+              "RegExp.prototype.source"));
+      TailCallRuntime(Runtime::kThrowTypeError, context, message_id,
+                      method_name_str);
+    }
+  }
 }
 
 BUILTIN(RegExpPrototypeToString) {
@@ -781,126 +994,166 @@
   RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
 }
 
-// ES6 21.2.4.2.
-BUILTIN(RegExpPrototypeSpeciesGetter) {
-  HandleScope scope(isolate);
-  return *args.receiver();
-}
-
-namespace {
-
 // Fast-path implementation for flag checks on an unmodified JSRegExp instance.
-compiler::Node* FastFlagGetter(CodeStubAssembler* a,
-                               compiler::Node* const regexp,
-                               JSRegExp::Flag flag) {
-  typedef compiler::Node Node;
-
-  Node* const smi_zero = a->SmiConstant(Smi::kZero);
-  Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
-  Node* const mask = a->SmiConstant(Smi::FromInt(flag));
-  Node* const is_flag_set = a->WordNotEqual(a->WordAnd(flags, mask), smi_zero);
+Node* RegExpBuiltinsAssembler::FastFlagGetter(Node* const regexp,
+                                              JSRegExp::Flag flag) {
+  Node* const smi_zero = SmiConstant(Smi::kZero);
+  Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+  Node* const mask = SmiConstant(Smi::FromInt(flag));
+  Node* const is_flag_set = WordNotEqual(SmiAnd(flags, mask), smi_zero);
 
   return is_flag_set;
 }
 
-void Generate_FlagGetter(CodeStubAssembler* a, JSRegExp::Flag flag,
-                         v8::Isolate::UseCounterFeature counter,
-                         const char* method_name) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+// Load through the GetProperty stub.
+Node* RegExpBuiltinsAssembler::SlowFlagGetter(Node* const context,
+                                              Node* const regexp,
+                                              JSRegExp::Flag flag) {
+  Factory* factory = isolate()->factory();
 
-  Node* const receiver = a->Parameter(0);
-  Node* const context = a->Parameter(3);
+  Label out(this);
+  Variable var_result(this, MachineRepresentation::kWord32);
 
-  Isolate* isolate = a->isolate();
+  Node* name;
 
-  // Check whether we have an unmodified regexp instance.
-  Label if_isunmodifiedjsregexp(a),
-      if_isnotunmodifiedjsregexp(a, Label::kDeferred);
-
-  a->GotoIf(a->TaggedIsSmi(receiver), &if_isnotunmodifiedjsregexp);
-
-  Node* const receiver_map = a->LoadMap(receiver);
-  Node* const instance_type = a->LoadMapInstanceType(receiver_map);
-
-  a->Branch(a->Word32Equal(instance_type, a->Int32Constant(JS_REGEXP_TYPE)),
-            &if_isunmodifiedjsregexp, &if_isnotunmodifiedjsregexp);
-
-  a->Bind(&if_isunmodifiedjsregexp);
-  {
-    // Refer to JSRegExp's flag property on the fast-path.
-    Node* const is_flag_set = FastFlagGetter(a, receiver, flag);
-    a->Return(a->Select(is_flag_set, a->TrueConstant(), a->FalseConstant()));
+  switch (flag) {
+    case JSRegExp::kGlobal:
+      name = HeapConstant(factory->global_string());
+      break;
+    case JSRegExp::kIgnoreCase:
+      name = HeapConstant(factory->ignoreCase_string());
+      break;
+    case JSRegExp::kMultiline:
+      name = HeapConstant(factory->multiline_string());
+      break;
+    case JSRegExp::kSticky:
+      name = HeapConstant(factory->sticky_string());
+      break;
+    case JSRegExp::kUnicode:
+      name = HeapConstant(factory->unicode_string());
+      break;
+    default:
+      UNREACHABLE();
   }
 
-  a->Bind(&if_isnotunmodifiedjsregexp);
+  Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+  Node* const value = CallStub(getproperty_callable, context, regexp, name);
+
+  Label if_true(this), if_false(this);
+  BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+
+  Bind(&if_true);
   {
-    Node* const native_context = a->LoadNativeContext(context);
+    var_result.Bind(Int32Constant(1));
+    Goto(&out);
+  }
+
+  Bind(&if_false);
+  {
+    var_result.Bind(Int32Constant(0));
+    Goto(&out);
+  }
+
+  Bind(&out);
+  return var_result.value();
+}
+
+Node* RegExpBuiltinsAssembler::FlagGetter(Node* const context,
+                                          Node* const regexp,
+                                          JSRegExp::Flag flag,
+                                          bool is_fastpath) {
+  return is_fastpath ? FastFlagGetter(regexp, flag)
+                     : SlowFlagGetter(context, regexp, flag);
+}
+
+void RegExpBuiltinsAssembler::FlagGetter(JSRegExp::Flag flag,
+                                         v8::Isolate::UseCounterFeature counter,
+                                         const char* method_name) {
+  Node* const receiver = Parameter(0);
+  Node* const context = Parameter(3);
+
+  Isolate* isolate = this->isolate();
+
+  // Check whether we have an unmodified regexp instance.
+  Label if_isunmodifiedjsregexp(this),
+      if_isnotunmodifiedjsregexp(this, Label::kDeferred);
+
+  GotoIf(TaggedIsSmi(receiver), &if_isnotunmodifiedjsregexp);
+
+  Node* const receiver_map = LoadMap(receiver);
+  Node* const instance_type = LoadMapInstanceType(receiver_map);
+
+  Branch(Word32Equal(instance_type, Int32Constant(JS_REGEXP_TYPE)),
+         &if_isunmodifiedjsregexp, &if_isnotunmodifiedjsregexp);
+
+  Bind(&if_isunmodifiedjsregexp);
+  {
+    // Refer to JSRegExp's flag property on the fast-path.
+    Node* const is_flag_set = FastFlagGetter(receiver, flag);
+    Return(SelectBooleanConstant(is_flag_set));
+  }
+
+  Bind(&if_isnotunmodifiedjsregexp);
+  {
+    Node* const native_context = LoadNativeContext(context);
     Node* const regexp_fun =
-        a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
-    Node* const initial_map = a->LoadObjectField(
-        regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
-    Node* const initial_prototype = a->LoadMapPrototype(initial_map);
+        LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+    Node* const initial_map =
+        LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+    Node* const initial_prototype = LoadMapPrototype(initial_map);
 
-    Label if_isprototype(a), if_isnotprototype(a);
-    a->Branch(a->WordEqual(receiver, initial_prototype), &if_isprototype,
-              &if_isnotprototype);
+    Label if_isprototype(this), if_isnotprototype(this);
+    Branch(WordEqual(receiver, initial_prototype), &if_isprototype,
+           &if_isnotprototype);
 
-    a->Bind(&if_isprototype);
+    Bind(&if_isprototype);
     {
-      Node* const counter_smi = a->SmiConstant(Smi::FromInt(counter));
-      a->CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
-      a->Return(a->UndefinedConstant());
+      Node* const counter_smi = SmiConstant(Smi::FromInt(counter));
+      CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
+      Return(UndefinedConstant());
     }
 
-    a->Bind(&if_isnotprototype);
+    Bind(&if_isnotprototype);
     {
       Node* const message_id =
-          a->SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
-      Node* const method_name_str = a->HeapConstant(
+          SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+      Node* const method_name_str = HeapConstant(
           isolate->factory()->NewStringFromAsciiChecked(method_name));
-      a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
-                     method_name_str);
-      a->Return(a->UndefinedConstant());  // Never reached.
+      CallRuntime(Runtime::kThrowTypeError, context, message_id,
+                  method_name_str);
+      Unreachable();
     }
   }
 }
 
-}  // namespace
-
 // ES6 21.2.5.4.
-void Builtins::Generate_RegExpPrototypeGlobalGetter(CodeStubAssembler* a) {
-  Generate_FlagGetter(a, JSRegExp::kGlobal,
-                      v8::Isolate::kRegExpPrototypeOldFlagGetter,
-                      "RegExp.prototype.global");
+TF_BUILTIN(RegExpPrototypeGlobalGetter, RegExpBuiltinsAssembler) {
+  FlagGetter(JSRegExp::kGlobal, v8::Isolate::kRegExpPrototypeOldFlagGetter,
+             "RegExp.prototype.global");
 }
 
 // ES6 21.2.5.5.
-void Builtins::Generate_RegExpPrototypeIgnoreCaseGetter(CodeStubAssembler* a) {
-  Generate_FlagGetter(a, JSRegExp::kIgnoreCase,
-                      v8::Isolate::kRegExpPrototypeOldFlagGetter,
-                      "RegExp.prototype.ignoreCase");
+TF_BUILTIN(RegExpPrototypeIgnoreCaseGetter, RegExpBuiltinsAssembler) {
+  FlagGetter(JSRegExp::kIgnoreCase, v8::Isolate::kRegExpPrototypeOldFlagGetter,
+             "RegExp.prototype.ignoreCase");
 }
 
 // ES6 21.2.5.7.
-void Builtins::Generate_RegExpPrototypeMultilineGetter(CodeStubAssembler* a) {
-  Generate_FlagGetter(a, JSRegExp::kMultiline,
-                      v8::Isolate::kRegExpPrototypeOldFlagGetter,
-                      "RegExp.prototype.multiline");
+TF_BUILTIN(RegExpPrototypeMultilineGetter, RegExpBuiltinsAssembler) {
+  FlagGetter(JSRegExp::kMultiline, v8::Isolate::kRegExpPrototypeOldFlagGetter,
+             "RegExp.prototype.multiline");
 }
 
 // ES6 21.2.5.12.
-void Builtins::Generate_RegExpPrototypeStickyGetter(CodeStubAssembler* a) {
-  Generate_FlagGetter(a, JSRegExp::kSticky,
-                      v8::Isolate::kRegExpPrototypeStickyGetter,
-                      "RegExp.prototype.sticky");
+TF_BUILTIN(RegExpPrototypeStickyGetter, RegExpBuiltinsAssembler) {
+  FlagGetter(JSRegExp::kSticky, v8::Isolate::kRegExpPrototypeStickyGetter,
+             "RegExp.prototype.sticky");
 }
 
 // ES6 21.2.5.15.
-void Builtins::Generate_RegExpPrototypeUnicodeGetter(CodeStubAssembler* a) {
-  Generate_FlagGetter(a, JSRegExp::kUnicode,
-                      v8::Isolate::kRegExpPrototypeUnicodeGetter,
-                      "RegExp.prototype.unicode");
+TF_BUILTIN(RegExpPrototypeUnicodeGetter, RegExpBuiltinsAssembler) {
+  FlagGetter(JSRegExp::kUnicode, v8::Isolate::kRegExpPrototypeUnicodeGetter,
+             "RegExp.prototype.unicode");
 }
 
 // The properties $1..$9 are the first nine capturing substrings of the last
@@ -986,722 +1239,1040 @@
   return *isolate->factory()->NewSubString(last_subject, start_index, len);
 }
 
-namespace {
-
 // ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
-compiler::Node* RegExpExec(CodeStubAssembler* a, compiler::Node* context,
-                           compiler::Node* recv, compiler::Node* string) {
-  typedef CodeStubAssembler::Variable Variable;
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
+                                          Node* string) {
+  Isolate* isolate = this->isolate();
 
-  Isolate* isolate = a->isolate();
+  Node* const null = NullConstant();
 
-  Node* const null = a->NullConstant();
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Label out(this), if_isfastpath(this), if_isslowpath(this);
 
-  Variable var_result(a, MachineRepresentation::kTagged);
-  Label out(a), call_builtin_exec(a), slow_path(a, Label::kDeferred);
+  Node* const map = LoadMap(regexp);
+  BranchIfFastRegExp(context, regexp, map, &if_isfastpath, &if_isslowpath);
 
-  Node* const map = a->LoadMap(recv);
-  BranchIfFastPath(a, context, map, &call_builtin_exec, &slow_path);
-
-  a->Bind(&call_builtin_exec);
+  Bind(&if_isfastpath);
   {
-    Node* const result = RegExpPrototypeExecInternal(a, context, recv, string);
+    Node* const result = RegExpPrototypeExecBody(context, regexp, string, true);
     var_result.Bind(result);
-    a->Goto(&out);
+    Goto(&out);
   }
 
-  a->Bind(&slow_path);
+  Bind(&if_isslowpath);
   {
     // Take the slow path of fetching the exec property, calling it, and
     // verifying its return value.
 
     // Get the exec property.
-    Node* const name = a->HeapConstant(isolate->factory()->exec_string());
-    Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
-    Node* const exec = a->CallStub(getproperty_callable, context, recv, name);
+    Node* const name = HeapConstant(isolate->factory()->exec_string());
+    Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+    Node* const exec = CallStub(getproperty_callable, context, regexp, name);
 
     // Is {exec} callable?
-    Label if_iscallable(a), if_isnotcallable(a);
+    Label if_iscallable(this), if_isnotcallable(this);
 
-    a->GotoIf(a->TaggedIsSmi(exec), &if_isnotcallable);
+    GotoIf(TaggedIsSmi(exec), &if_isnotcallable);
 
-    Node* const exec_map = a->LoadMap(exec);
-    a->Branch(a->IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
+    Node* const exec_map = LoadMap(exec);
+    Branch(IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
 
-    a->Bind(&if_iscallable);
+    Bind(&if_iscallable);
     {
       Callable call_callable = CodeFactory::Call(isolate);
-      Node* const result =
-          a->CallJS(call_callable, context, exec, recv, string);
+      Node* const result = CallJS(call_callable, context, exec, regexp, string);
 
       var_result.Bind(result);
-      a->GotoIf(a->WordEqual(result, null), &out);
+      GotoIf(WordEqual(result, null), &out);
 
-      ThrowIfNotJSReceiver(a, isolate, context, result,
+      ThrowIfNotJSReceiver(context, result,
                            MessageTemplate::kInvalidRegExpExecResult, "unused");
 
-      a->Goto(&out);
+      Goto(&out);
     }
 
-    a->Bind(&if_isnotcallable);
+    Bind(&if_isnotcallable);
     {
-      a->ThrowIfNotInstanceType(context, recv, JS_REGEXP_TYPE,
-                                "RegExp.prototype.exec");
-      a->Goto(&call_builtin_exec);
+      ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
+                             "RegExp.prototype.exec");
+
+      Node* const result =
+          RegExpPrototypeExecBody(context, regexp, string, false);
+      var_result.Bind(result);
+      Goto(&out);
     }
   }
 
-  a->Bind(&out);
+  Bind(&out);
   return var_result.value();
 }
 
-}  // namespace
-
 // ES#sec-regexp.prototype.test
 // RegExp.prototype.test ( S )
-void Builtins::Generate_RegExpPrototypeTest(CodeStubAssembler* a) {
-  typedef compiler::Node Node;
-
-  Isolate* const isolate = a->isolate();
-
-  Node* const maybe_receiver = a->Parameter(0);
-  Node* const maybe_string = a->Parameter(1);
-  Node* const context = a->Parameter(4);
+TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
+  Node* const maybe_receiver = Parameter(0);
+  Node* const maybe_string = Parameter(1);
+  Node* const context = Parameter(4);
 
   // Ensure {maybe_receiver} is a JSReceiver.
-  ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
+  ThrowIfNotJSReceiver(context, maybe_receiver,
                        MessageTemplate::kIncompatibleMethodReceiver,
                        "RegExp.prototype.test");
   Node* const receiver = maybe_receiver;
 
   // Convert {maybe_string} to a String.
-  Node* const string = a->ToString(context, maybe_string);
+  Node* const string = ToString(context, maybe_string);
 
-  // Call exec.
-  Node* const match_indices = RegExpExec(a, context, receiver, string);
+  Label fast_path(this), slow_path(this);
+  BranchIfFastRegExp(context, receiver, LoadMap(receiver), &fast_path,
+                     &slow_path);
 
-  // Return true iff exec matched successfully.
-  Node* const result = a->Select(a->WordEqual(match_indices, a->NullConstant()),
-                                 a->FalseConstant(), a->TrueConstant());
-  a->Return(result);
+  Bind(&fast_path);
+  {
+    Label if_didnotmatch(this);
+    RegExpPrototypeExecBodyWithoutResult(context, receiver, string,
+                                         &if_didnotmatch, true);
+    Return(TrueConstant());
+
+    Bind(&if_didnotmatch);
+    Return(FalseConstant());
+  }
+
+  Bind(&slow_path);
+  {
+    // Call exec.
+    Node* const match_indices = RegExpExec(context, receiver, string);
+
+    // Return true iff exec matched successfully.
+    Node* const result =
+        SelectBooleanConstant(WordNotEqual(match_indices, NullConstant()));
+    Return(result);
+  }
+}
+
+Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
+                                                  Node* const index,
+                                                  Node* const is_unicode,
+                                                  bool is_fastpath) {
+  CSA_ASSERT(this, IsHeapNumberMap(LoadReceiverMap(index)));
+  if (is_fastpath) CSA_ASSERT(this, TaggedIsPositiveSmi(index));
+
+  // Default to last_index + 1.
+  Node* const index_plus_one = NumberInc(index);
+  Variable var_result(this, MachineRepresentation::kTagged, index_plus_one);
+
+  // Advancing the index has some subtle issues involving the distinction
+  // between Smis and HeapNumbers. There's three cases:
+  // * {index} is a Smi, {index_plus_one} is a Smi. The standard case.
+  // * {index} is a Smi, {index_plus_one} overflows into a HeapNumber.
+  //   In this case we can return the result early, because
+  //   {index_plus_one} > {string}.length.
+  // * {index} is a HeapNumber, {index_plus_one} is a HeapNumber. This can only
+  //   occur when {index} is outside the Smi range since we normalize
+  //   explicitly. Again we can return early.
+  if (is_fastpath) {
+    // Must be in Smi range on the fast path. We control the value of {index}
+    // on all call-sites and can never exceed the length of the string.
+    STATIC_ASSERT(String::kMaxLength + 2 < Smi::kMaxValue);
+    CSA_ASSERT(this, TaggedIsPositiveSmi(index_plus_one));
+  }
+
+  Label if_isunicode(this), out(this);
+  GotoIfNot(is_unicode, &out);
+
+  // Keep this unconditional (even on the fast path) just to be safe.
+  Branch(TaggedIsPositiveSmi(index_plus_one), &if_isunicode, &out);
+
+  Bind(&if_isunicode);
+  {
+    Node* const string_length = LoadStringLength(string);
+    GotoIfNot(SmiLessThan(index_plus_one, string_length), &out);
+
+    Node* const lead = StringCharCodeAt(string, index);
+    GotoIfNot(Word32Equal(Word32And(lead, Int32Constant(0xFC00)),
+                          Int32Constant(0xD800)),
+              &out);
+
+    Node* const trail = StringCharCodeAt(string, index_plus_one);
+    GotoIfNot(Word32Equal(Word32And(trail, Int32Constant(0xFC00)),
+                          Int32Constant(0xDC00)),
+              &out);
+
+    // At a surrogate pair, return index + 2.
+    Node* const index_plus_two = NumberInc(index_plus_one);
+    var_result.Bind(index_plus_two);
+
+    Goto(&out);
+  }
+
+  Bind(&out);
+  return var_result.value();
+}
+
+namespace {
+
+// Utility class implementing a growable fixed array through CSA.
+class GrowableFixedArray {
+  typedef CodeStubAssembler::Label Label;
+  typedef CodeStubAssembler::Variable Variable;
+
+ public:
+  explicit GrowableFixedArray(CodeStubAssembler* a)
+      : assembler_(a),
+        var_array_(a, MachineRepresentation::kTagged),
+        var_length_(a, MachineType::PointerRepresentation()),
+        var_capacity_(a, MachineType::PointerRepresentation()) {
+    Initialize();
+  }
+
+  Node* length() const { return var_length_.value(); }
+
+  Variable* var_array() { return &var_array_; }
+  Variable* var_length() { return &var_length_; }
+  Variable* var_capacity() { return &var_capacity_; }
+
+  void Push(Node* const value) {
+    CodeStubAssembler* a = assembler_;
+
+    Node* const length = var_length_.value();
+    Node* const capacity = var_capacity_.value();
+
+    Label grow(a), store(a);
+    a->Branch(a->IntPtrEqual(capacity, length), &grow, &store);
+
+    a->Bind(&grow);
+    {
+      Node* const new_capacity = NewCapacity(a, capacity);
+      Node* const new_array = ResizeFixedArray(length, new_capacity);
+
+      var_capacity_.Bind(new_capacity);
+      var_array_.Bind(new_array);
+      a->Goto(&store);
+    }
+
+    a->Bind(&store);
+    {
+      Node* const array = var_array_.value();
+      a->StoreFixedArrayElement(array, length, value);
+
+      Node* const new_length = a->IntPtrAdd(length, a->IntPtrConstant(1));
+      var_length_.Bind(new_length);
+    }
+  }
+
+  Node* ToJSArray(Node* const context) {
+    CodeStubAssembler* a = assembler_;
+
+    const ElementsKind kind = FAST_ELEMENTS;
+
+    Node* const native_context = a->LoadNativeContext(context);
+    Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
+
+    // Shrink to fit if necessary.
+    {
+      Label next(a);
+
+      Node* const length = var_length_.value();
+      Node* const capacity = var_capacity_.value();
+
+      a->GotoIf(a->WordEqual(length, capacity), &next);
+
+      Node* const array = ResizeFixedArray(length, length);
+      var_array_.Bind(array);
+      var_capacity_.Bind(length);
+      a->Goto(&next);
+
+      a->Bind(&next);
+    }
+
+    Node* const result_length = a->SmiTag(length());
+    Node* const result = a->AllocateUninitializedJSArrayWithoutElements(
+        kind, array_map, result_length, nullptr);
+
+    // Note: We do not currently shrink the fixed array.
+
+    a->StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
+
+    return result;
+  }
+
+ private:
+  void Initialize() {
+    CodeStubAssembler* a = assembler_;
+
+    const ElementsKind kind = FAST_ELEMENTS;
+
+    static const int kInitialArraySize = 8;
+    Node* const capacity = a->IntPtrConstant(kInitialArraySize);
+    Node* const array = a->AllocateFixedArray(kind, capacity);
+
+    a->FillFixedArrayWithValue(kind, array, a->IntPtrConstant(0), capacity,
+                               Heap::kTheHoleValueRootIndex);
+
+    var_array_.Bind(array);
+    var_capacity_.Bind(capacity);
+    var_length_.Bind(a->IntPtrConstant(0));
+  }
+
+  Node* NewCapacity(CodeStubAssembler* a, Node* const current_capacity) {
+    CSA_ASSERT(a, a->IntPtrGreaterThan(current_capacity, a->IntPtrConstant(0)));
+
+    // Growth rate is analog to JSObject::NewElementsCapacity:
+    // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
+
+    Node* const new_capacity = a->IntPtrAdd(
+        a->IntPtrAdd(current_capacity, a->WordShr(current_capacity, 1)),
+        a->IntPtrConstant(16));
+
+    return new_capacity;
+  }
+
+  // Creates a new array with {new_capacity} and copies the first
+  // {element_count} elements from the current array.
+  Node* ResizeFixedArray(Node* const element_count, Node* const new_capacity) {
+    CodeStubAssembler* a = assembler_;
+
+    CSA_ASSERT(a, a->IntPtrGreaterThan(element_count, a->IntPtrConstant(0)));
+    CSA_ASSERT(a, a->IntPtrGreaterThan(new_capacity, a->IntPtrConstant(0)));
+    CSA_ASSERT(a, a->IntPtrGreaterThanOrEqual(new_capacity, element_count));
+
+    const ElementsKind kind = FAST_ELEMENTS;
+    const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
+    const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+    const CodeStubAssembler::AllocationFlags flags =
+        CodeStubAssembler::kAllowLargeObjectAllocation;
+
+    Node* const from_array = var_array_.value();
+    Node* const to_array =
+        a->AllocateFixedArray(kind, new_capacity, mode, flags);
+    a->CopyFixedArrayElements(kind, from_array, kind, to_array, element_count,
+                              new_capacity, barrier_mode, mode);
+
+    return to_array;
+  }
+
+ private:
+  CodeStubAssembler* const assembler_;
+  Variable var_array_;
+  Variable var_length_;
+  Variable var_capacity_;
+};
+
+}  // namespace
+
+void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
+                                                       Node* const regexp,
+                                                       Node* const string,
+                                                       const bool is_fastpath) {
+  Isolate* const isolate = this->isolate();
+
+  Node* const null = NullConstant();
+  Node* const int_zero = IntPtrConstant(0);
+  Node* const smi_zero = SmiConstant(Smi::kZero);
+
+  Node* const is_global =
+      FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
+
+  Label if_isglobal(this), if_isnotglobal(this);
+  Branch(is_global, &if_isglobal, &if_isnotglobal);
+
+  Bind(&if_isnotglobal);
+  {
+    Node* const result =
+        is_fastpath ? RegExpPrototypeExecBody(context, regexp, string, true)
+                    : RegExpExec(context, regexp, string);
+    Return(result);
+  }
+
+  Bind(&if_isglobal);
+  {
+    Node* const is_unicode =
+        FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath);
+
+    StoreLastIndex(context, regexp, smi_zero, is_fastpath);
+
+    // Allocate an array to store the resulting match strings.
+
+    GrowableFixedArray array(this);
+
+    // Loop preparations. Within the loop, collect results from RegExpExec
+    // and store match strings in the array.
+
+    Variable* vars[] = {array.var_array(), array.var_length(),
+                        array.var_capacity()};
+    Label loop(this, 3, vars), out(this);
+    Goto(&loop);
+
+    Bind(&loop);
+    {
+      Variable var_match(this, MachineRepresentation::kTagged);
+
+      Label if_didmatch(this), if_didnotmatch(this);
+      if (is_fastpath) {
+        // On the fast path, grab the matching string from the raw match index
+        // array.
+        Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
+            context, regexp, string, &if_didnotmatch, true);
+
+        Node* const match_from = LoadFixedArrayElement(
+            match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+        Node* const match_to = LoadFixedArrayElement(
+            match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+
+        Node* match = SubString(context, string, match_from, match_to);
+        var_match.Bind(match);
+
+        Goto(&if_didmatch);
+      } else {
+        DCHECK(!is_fastpath);
+        Node* const result = RegExpExec(context, regexp, string);
+
+        Label load_match(this);
+        Branch(WordEqual(result, null), &if_didnotmatch, &load_match);
+
+        Bind(&load_match);
+        {
+          Label fast_result(this), slow_result(this);
+          BranchIfFastRegExpResult(context, LoadMap(result), &fast_result,
+                                   &slow_result);
+
+          Bind(&fast_result);
+          {
+            Node* const result_fixed_array = LoadElements(result);
+            Node* const match = LoadFixedArrayElement(result_fixed_array, 0);
+
+            // The match is guaranteed to be a string on the fast path.
+            CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(match)));
+
+            var_match.Bind(match);
+            Goto(&if_didmatch);
+          }
+
+          Bind(&slow_result);
+          {
+            // TODO(ishell): Use GetElement stub once it's available.
+            Node* const name = smi_zero;
+            Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+            Node* const match =
+                CallStub(getproperty_callable, context, result, name);
+
+            var_match.Bind(ToString(context, match));
+            Goto(&if_didmatch);
+          }
+        }
+      }
+
+      Bind(&if_didnotmatch);
+      {
+        // Return null if there were no matches, otherwise just exit the loop.
+        GotoIfNot(IntPtrEqual(array.length(), int_zero), &out);
+        Return(null);
+      }
+
+      Bind(&if_didmatch);
+      {
+        Node* match = var_match.value();
+
+        // Store the match, growing the fixed array if needed.
+
+        array.Push(match);
+
+        // Advance last index if the match is the empty string.
+
+        Node* const match_length = LoadStringLength(match);
+        GotoIfNot(SmiEqual(match_length, smi_zero), &loop);
+
+        Node* last_index = LoadLastIndex(context, regexp, is_fastpath);
+        if (is_fastpath) {
+          CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
+        } else {
+          Callable tolength_callable = CodeFactory::ToLength(isolate);
+          last_index = CallStub(tolength_callable, context, last_index);
+        }
+
+        Node* const new_last_index =
+            AdvanceStringIndex(string, last_index, is_unicode, is_fastpath);
+
+        if (is_fastpath) {
+          // On the fast path, we can be certain that lastIndex can never be
+          // incremented to overflow the Smi range since the maximal string
+          // length is less than the maximal Smi value.
+          STATIC_ASSERT(String::kMaxLength < Smi::kMaxValue);
+          CSA_ASSERT(this, TaggedIsPositiveSmi(new_last_index));
+        }
+
+        StoreLastIndex(context, regexp, new_last_index, is_fastpath);
+
+        Goto(&loop);
+      }
+    }
+
+    Bind(&out);
+    {
+      // Wrap the match in a JSArray.
+
+      Node* const result = array.ToJSArray(context);
+      Return(result);
+    }
+  }
 }
 
 // ES#sec-regexp.prototype-@@match
 // RegExp.prototype [ @@match ] ( string )
-BUILTIN(RegExpPrototypeMatch) {
-  HandleScope scope(isolate);
-  CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@match");
+TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
+  Node* const maybe_receiver = Parameter(0);
+  Node* const maybe_string = Parameter(1);
+  Node* const context = Parameter(4);
 
-  Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
+  // Ensure {maybe_receiver} is a JSReceiver.
+  ThrowIfNotJSReceiver(context, maybe_receiver,
+                       MessageTemplate::kIncompatibleMethodReceiver,
+                       "RegExp.prototype.@@match");
+  Node* const receiver = maybe_receiver;
 
-  Handle<String> string;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
-                                     Object::ToString(isolate, string_obj));
+  // Convert {maybe_string} to a String.
+  Node* const string = ToString(context, maybe_string);
 
-  Handle<Object> global_obj;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, global_obj,
-      JSReceiver::GetProperty(recv, isolate->factory()->global_string()));
-  const bool global = global_obj->BooleanValue();
+  Label fast_path(this), slow_path(this);
+  BranchIfFastRegExp(context, receiver, LoadMap(receiver), &fast_path,
+                     &slow_path);
 
-  if (!global) {
-    RETURN_RESULT_OR_FAILURE(
-        isolate,
-        RegExpUtils::RegExpExec(isolate, recv, string,
-                                isolate->factory()->undefined_value()));
-  }
+  Bind(&fast_path);
+  RegExpPrototypeMatchBody(context, receiver, string, true);
 
-  Handle<Object> unicode_obj;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, unicode_obj,
-      JSReceiver::GetProperty(recv, isolate->factory()->unicode_string()));
-  const bool unicode = unicode_obj->BooleanValue();
-
-  RETURN_FAILURE_ON_EXCEPTION(isolate,
-                              RegExpUtils::SetLastIndex(isolate, recv, 0));
-
-  static const int kInitialArraySize = 8;
-  Handle<FixedArray> elems =
-      isolate->factory()->NewFixedArrayWithHoles(kInitialArraySize);
-
-  int n = 0;
-  for (;; n++) {
-    Handle<Object> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result,
-        RegExpUtils::RegExpExec(isolate, recv, string,
-                                isolate->factory()->undefined_value()));
-
-    if (result->IsNull(isolate)) {
-      if (n == 0) return isolate->heap()->null_value();
-      break;
-    }
-
-    Handle<Object> match_obj;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
-                                       Object::GetElement(isolate, result, 0));
-
-    Handle<String> match;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match,
-                                       Object::ToString(isolate, match_obj));
-
-    elems = FixedArray::SetAndGrow(elems, n, match);
-
-    if (match->length() == 0) {
-      RETURN_FAILURE_ON_EXCEPTION(isolate, RegExpUtils::SetAdvancedStringIndex(
-                                               isolate, recv, string, unicode));
-    }
-  }
-
-  elems->Shrink(n);
-  return *isolate->factory()->NewJSArrayWithElements(elems);
+  Bind(&slow_path);
+  RegExpPrototypeMatchBody(context, receiver, string, false);
 }
 
-namespace {
-
-void Generate_RegExpPrototypeSearchBody(CodeStubAssembler* a,
-                                        compiler::Node* const receiver,
-                                        compiler::Node* const string,
-                                        compiler::Node* const context,
-                                        bool is_fastpath) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-
-  Isolate* const isolate = a->isolate();
-
-  Node* const smi_zero = a->SmiConstant(Smi::kZero);
-
+void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
+    Node* const context, Node* const regexp, Node* const string) {
   // Grab the initial value of last index.
-  Node* const previous_last_index =
-      is_fastpath ? FastLoadLastIndex(a, context, receiver)
-                  : SlowLoadLastIndex(a, context, receiver);
+  Node* const previous_last_index = FastLoadLastIndex(regexp);
 
   // Ensure last index is 0.
-  if (is_fastpath) {
-    FastStoreLastIndex(a, context, receiver, smi_zero);
-  } else {
-    Label next(a);
-    a->GotoIf(a->SameValue(previous_last_index, smi_zero, context), &next);
+  FastStoreLastIndex(regexp, SmiConstant(Smi::kZero));
 
-    SlowStoreLastIndex(a, context, receiver, smi_zero);
-    a->Goto(&next);
-    a->Bind(&next);
+  // Call exec.
+  Label if_didnotmatch(this);
+  Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
+      context, regexp, string, &if_didnotmatch, true);
+
+  // Successful match.
+  {
+    // Reset last index.
+    FastStoreLastIndex(regexp, previous_last_index);
+
+    // Return the index of the match.
+    Node* const index = LoadFixedArrayElement(
+        match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+    Return(index);
+  }
+
+  Bind(&if_didnotmatch);
+  {
+    // Reset last index and return -1.
+    FastStoreLastIndex(regexp, previous_last_index);
+    Return(SmiConstant(-1));
+  }
+}
+
+void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
+    Node* const context, Node* const regexp, Node* const string) {
+  Isolate* const isolate = this->isolate();
+
+  Node* const smi_zero = SmiConstant(Smi::kZero);
+
+  // Grab the initial value of last index.
+  Node* const previous_last_index = SlowLoadLastIndex(context, regexp);
+
+  // Ensure last index is 0.
+  {
+    Label next(this);
+    GotoIf(SameValue(previous_last_index, smi_zero, context), &next);
+
+    SlowStoreLastIndex(context, regexp, smi_zero);
+    Goto(&next);
+    Bind(&next);
   }
 
   // Call exec.
-  Node* const match_indices =
-      is_fastpath ? RegExpPrototypeExecInternal(a, context, receiver, string)
-                  : RegExpExec(a, context, receiver, string);
+  Node* const exec_result = RegExpExec(context, regexp, string);
 
   // Reset last index if necessary.
-  if (is_fastpath) {
-    FastStoreLastIndex(a, context, receiver, previous_last_index);
-  } else {
-    Label next(a);
-    Node* const current_last_index = SlowLoadLastIndex(a, context, receiver);
+  {
+    Label next(this);
+    Node* const current_last_index = SlowLoadLastIndex(context, regexp);
 
-    a->GotoIf(a->SameValue(current_last_index, previous_last_index, context),
-              &next);
+    GotoIf(SameValue(current_last_index, previous_last_index, context), &next);
 
-    SlowStoreLastIndex(a, context, receiver, previous_last_index);
-    a->Goto(&next);
-    a->Bind(&next);
+    SlowStoreLastIndex(context, regexp, previous_last_index);
+    Goto(&next);
+
+    Bind(&next);
   }
 
   // Return -1 if no match was found.
   {
-    Label next(a);
-    a->GotoUnless(a->WordEqual(match_indices, a->NullConstant()), &next);
-    a->Return(a->SmiConstant(-1));
-    a->Bind(&next);
+    Label next(this);
+    GotoIfNot(WordEqual(exec_result, NullConstant()), &next);
+    Return(SmiConstant(-1));
+    Bind(&next);
   }
 
   // Return the index of the match.
   {
-    Label fast_result(a), slow_result(a, Label::kDeferred);
+    Label fast_result(this), slow_result(this, Label::kDeferred);
+    BranchIfFastRegExpResult(context, LoadMap(exec_result), &fast_result,
+                             &slow_result);
 
-    Node* const native_context = a->LoadNativeContext(context);
-    Node* const initial_regexp_result_map =
-        a->LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
-    Node* const match_indices_map = a->LoadMap(match_indices);
-
-    a->Branch(a->WordEqual(match_indices_map, initial_regexp_result_map),
-              &fast_result, &slow_result);
-
-    a->Bind(&fast_result);
+    Bind(&fast_result);
     {
       Node* const index =
-          a->LoadObjectField(match_indices, JSRegExpResult::kIndexOffset,
-                             MachineType::AnyTagged());
-      a->Return(index);
+          LoadObjectField(exec_result, JSRegExpResult::kIndexOffset);
+      Return(index);
     }
 
-    a->Bind(&slow_result);
+    Bind(&slow_result);
     {
-      Node* const name = a->HeapConstant(isolate->factory()->index_string());
-      Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
+      Node* const name = HeapConstant(isolate->factory()->index_string());
+      Callable getproperty_callable = CodeFactory::GetProperty(isolate);
       Node* const index =
-          a->CallStub(getproperty_callable, context, match_indices, name);
-      a->Return(index);
+          CallStub(getproperty_callable, context, exec_result, name);
+      Return(index);
     }
   }
 }
 
-}  // namespace
-
 // ES#sec-regexp.prototype-@@search
 // RegExp.prototype [ @@search ] ( string )
-void Builtins::Generate_RegExpPrototypeSearch(CodeStubAssembler* a) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-
-  Isolate* const isolate = a->isolate();
-
-  Node* const maybe_receiver = a->Parameter(0);
-  Node* const maybe_string = a->Parameter(1);
-  Node* const context = a->Parameter(4);
+TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
+  Node* const maybe_receiver = Parameter(0);
+  Node* const maybe_string = Parameter(1);
+  Node* const context = Parameter(4);
 
   // Ensure {maybe_receiver} is a JSReceiver.
-  Node* const map =
-      ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
-                           MessageTemplate::kIncompatibleMethodReceiver,
-                           "RegExp.prototype.@@search");
+  ThrowIfNotJSReceiver(context, maybe_receiver,
+                       MessageTemplate::kIncompatibleMethodReceiver,
+                       "RegExp.prototype.@@search");
   Node* const receiver = maybe_receiver;
 
   // Convert {maybe_string} to a String.
-  Node* const string = a->ToString(context, maybe_string);
+  Node* const string = ToString(context, maybe_string);
 
-  Label fast_path(a), slow_path(a);
-  BranchIfFastPath(a, context, map, &fast_path, &slow_path);
+  Label fast_path(this), slow_path(this);
+  BranchIfFastRegExp(context, receiver, LoadMap(receiver), &fast_path,
+                     &slow_path);
 
-  a->Bind(&fast_path);
-  Generate_RegExpPrototypeSearchBody(a, receiver, string, context, true);
+  Bind(&fast_path);
+  RegExpPrototypeSearchBodyFast(context, receiver, string);
 
-  a->Bind(&slow_path);
-  Generate_RegExpPrototypeSearchBody(a, receiver, string, context, false);
+  Bind(&slow_path);
+  RegExpPrototypeSearchBodySlow(context, receiver, string);
 }
 
-namespace {
+// Generates the fast path for @@split. {regexp} is an unmodified JSRegExp,
+// {string} is a String, and {limit} is a Smi.
+void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
+                                                       Node* const regexp,
+                                                       Node* const string,
+                                                       Node* const limit) {
+  Isolate* isolate = this->isolate();
 
-MUST_USE_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
-                                             Handle<Object> object,
-                                             uint32_t* out) {
-  if (object->IsUndefined(isolate)) {
-    *out = kMaxUInt32;
-    return object;
+  Node* const null = NullConstant();
+  Node* const smi_zero = SmiConstant(0);
+  Node* const int_zero = IntPtrConstant(0);
+  Node* const int_limit = SmiUntag(limit);
+
+  const ElementsKind kind = FAST_ELEMENTS;
+  const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+
+  Node* const allocation_site = nullptr;
+  Node* const native_context = LoadNativeContext(context);
+  Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+
+  Label return_empty_array(this, Label::kDeferred);
+
+  // If limit is zero, return an empty array.
+  {
+    Label next(this), if_limitiszero(this, Label::kDeferred);
+    Branch(SmiEqual(limit, smi_zero), &return_empty_array, &next);
+    Bind(&next);
   }
 
-  Handle<Object> number;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, number, Object::ToNumber(object), Object);
-  *out = NumberToUint32(*number);
-  return object;
-}
+  Node* const string_length = LoadStringLength(string);
 
-bool AtSurrogatePair(Isolate* isolate, Handle<String> string, int index) {
-  if (index + 1 >= string->length()) return false;
-  const uint16_t first = string->Get(index);
-  if (first < 0xD800 || first > 0xDBFF) return false;
-  const uint16_t second = string->Get(index + 1);
-  return (second >= 0xDC00 && second <= 0xDFFF);
-}
+  // If passed the empty {string}, return either an empty array or a singleton
+  // array depending on whether the {regexp} matches.
+  {
+    Label next(this), if_stringisempty(this, Label::kDeferred);
+    Branch(SmiEqual(string_length, smi_zero), &if_stringisempty, &next);
 
-Handle<JSArray> NewJSArrayWithElements(Isolate* isolate,
-                                       Handle<FixedArray> elems,
-                                       int num_elems) {
-  elems->Shrink(num_elems);
-  return isolate->factory()->NewJSArrayWithElements(elems);
-}
-
-MaybeHandle<JSArray> RegExpSplit(Isolate* isolate, Handle<JSRegExp> regexp,
-                                 Handle<String> string,
-                                 Handle<Object> limit_obj) {
-  Factory* factory = isolate->factory();
-
-  uint32_t limit;
-  RETURN_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit), JSArray);
-
-  const int length = string->length();
-
-  if (limit == 0) return factory->NewJSArray(0);
-
-  Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
-
-  if (length == 0) {
-    Handle<Object> match_indices;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, match_indices,
-        RegExpImpl::Exec(regexp, string, 0, last_match_info), JSArray);
-
-    if (!match_indices->IsNull(isolate)) return factory->NewJSArray(0);
-
-    Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
-    elems->set(0, *string);
-    return factory->NewJSArrayWithElements(elems);
-  }
-
-  int current_index = 0;
-  int start_index = 0;
-  int start_match = 0;
-
-  static const int kInitialArraySize = 8;
-  Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
-  int num_elems = 0;
-
-  while (true) {
-    if (start_index == length) {
-      Handle<String> substr =
-          factory->NewSubString(string, current_index, length);
-      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
-      break;
-    }
-
-    Handle<Object> match_indices_obj;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, match_indices_obj,
-        RegExpImpl::Exec(regexp, string, start_index,
-                         isolate->regexp_last_match_info()),
-        JSArray);
-
-    if (match_indices_obj->IsNull(isolate)) {
-      Handle<String> substr =
-          factory->NewSubString(string, current_index, length);
-      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
-      break;
-    }
-
-    auto match_indices = Handle<RegExpMatchInfo>::cast(match_indices_obj);
-
-    start_match = match_indices->Capture(0);
-
-    if (start_match == length) {
-      Handle<String> substr =
-          factory->NewSubString(string, current_index, length);
-      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
-      break;
-    }
-
-    const int end_index = match_indices->Capture(1);
-
-    if (start_index == end_index && end_index == current_index) {
-      const bool unicode = (regexp->GetFlags() & JSRegExp::kUnicode) != 0;
-      if (unicode && AtSurrogatePair(isolate, string, start_index)) {
-        start_index += 2;
-      } else {
-        start_index += 1;
-      }
-      continue;
-    }
-
+    Bind(&if_stringisempty);
     {
-      Handle<String> substr =
-          factory->NewSubString(string, current_index, start_match);
-      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
-    }
+      Node* const last_match_info = LoadContextElement(
+          native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
 
-    if (static_cast<uint32_t>(num_elems) == limit) break;
+      Callable exec_callable = CodeFactory::RegExpExec(isolate);
+      Node* const match_indices = CallStub(exec_callable, context, regexp,
+                                           string, smi_zero, last_match_info);
 
-    for (int i = 2; i < match_indices->NumberOfCaptureRegisters(); i += 2) {
-      const int start = match_indices->Capture(i);
-      const int end = match_indices->Capture(i + 1);
+      Label return_singleton_array(this);
+      Branch(WordEqual(match_indices, null), &return_singleton_array,
+             &return_empty_array);
 
-      if (end != -1) {
-        Handle<String> substr = factory->NewSubString(string, start, end);
-        elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
-      } else {
-        elems = FixedArray::SetAndGrow(elems, num_elems++,
-                                       factory->undefined_value());
-      }
+      Bind(&return_singleton_array);
+      {
+        Node* const length = SmiConstant(1);
+        Node* const capacity = IntPtrConstant(1);
+        Node* const result = AllocateJSArray(kind, array_map, capacity, length,
+                                             allocation_site, mode);
 
-      if (static_cast<uint32_t>(num_elems) == limit) {
-        return NewJSArrayWithElements(isolate, elems, num_elems);
+        Node* const fixed_array = LoadElements(result);
+        StoreFixedArrayElement(fixed_array, 0, string);
+
+        Return(result);
       }
     }
 
-    start_index = current_index = end_index;
+    Bind(&next);
   }
 
-  return NewJSArrayWithElements(isolate, elems, num_elems);
+  // Loop preparations.
+
+  GrowableFixedArray array(this);
+
+  Variable var_last_matched_until(this, MachineRepresentation::kTagged);
+  Variable var_next_search_from(this, MachineRepresentation::kTagged);
+
+  var_last_matched_until.Bind(smi_zero);
+  var_next_search_from.Bind(smi_zero);
+
+  Variable* vars[] = {array.var_array(), array.var_length(),
+                      array.var_capacity(), &var_last_matched_until,
+                      &var_next_search_from};
+  const int vars_count = sizeof(vars) / sizeof(vars[0]);
+  Label loop(this, vars_count, vars), push_suffix_and_out(this), out(this);
+  Goto(&loop);
+
+  Bind(&loop);
+  {
+    Node* const next_search_from = var_next_search_from.value();
+    Node* const last_matched_until = var_last_matched_until.value();
+
+    // We're done if we've reached the end of the string.
+    {
+      Label next(this);
+      Branch(SmiEqual(next_search_from, string_length), &push_suffix_and_out,
+             &next);
+      Bind(&next);
+    }
+
+    // Search for the given {regexp}.
+
+    Node* const last_match_info = LoadContextElement(
+        native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+
+    Callable exec_callable = CodeFactory::RegExpExec(isolate);
+    Node* const match_indices = CallStub(exec_callable, context, regexp, string,
+                                         next_search_from, last_match_info);
+
+    // We're done if no match was found.
+    {
+      Label next(this);
+      Branch(WordEqual(match_indices, null), &push_suffix_and_out, &next);
+      Bind(&next);
+    }
+
+    Node* const match_from = LoadFixedArrayElement(
+        match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+
+    // We're done if the match starts beyond the string.
+    {
+      Label next(this);
+      Branch(WordEqual(match_from, string_length), &push_suffix_and_out, &next);
+      Bind(&next);
+    }
+
+    Node* const match_to = LoadFixedArrayElement(
+        match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+
+    // Advance index and continue if the match is empty.
+    {
+      Label next(this);
+
+      GotoIfNot(SmiEqual(match_to, next_search_from), &next);
+      GotoIfNot(SmiEqual(match_to, last_matched_until), &next);
+
+      Node* const is_unicode = FastFlagGetter(regexp, JSRegExp::kUnicode);
+      Node* const new_next_search_from =
+          AdvanceStringIndex(string, next_search_from, is_unicode, true);
+      var_next_search_from.Bind(new_next_search_from);
+      Goto(&loop);
+
+      Bind(&next);
+    }
+
+    // A valid match was found, add the new substring to the array.
+    {
+      Node* const from = last_matched_until;
+      Node* const to = match_from;
+
+      Node* const substr = SubString(context, string, from, to);
+      array.Push(substr);
+
+      GotoIf(WordEqual(array.length(), int_limit), &out);
+    }
+
+    // Add all captures to the array.
+    {
+      Node* const num_registers = LoadFixedArrayElement(
+          match_indices, RegExpMatchInfo::kNumberOfCapturesIndex);
+      Node* const int_num_registers = SmiUntag(num_registers);
+
+      Variable var_reg(this, MachineType::PointerRepresentation());
+      var_reg.Bind(IntPtrConstant(2));
+
+      Variable* vars[] = {array.var_array(), array.var_length(),
+                          array.var_capacity(), &var_reg};
+      const int vars_count = sizeof(vars) / sizeof(vars[0]);
+      Label nested_loop(this, vars_count, vars), nested_loop_out(this);
+      Branch(IntPtrLessThan(var_reg.value(), int_num_registers), &nested_loop,
+             &nested_loop_out);
+
+      Bind(&nested_loop);
+      {
+        Node* const reg = var_reg.value();
+        Node* const from = LoadFixedArrayElement(
+            match_indices, reg,
+            RegExpMatchInfo::kFirstCaptureIndex * kPointerSize, mode);
+        Node* const to = LoadFixedArrayElement(
+            match_indices, reg,
+            (RegExpMatchInfo::kFirstCaptureIndex + 1) * kPointerSize, mode);
+
+        Label select_capture(this), select_undefined(this), store_value(this);
+        Variable var_value(this, MachineRepresentation::kTagged);
+        Branch(SmiEqual(to, SmiConstant(-1)), &select_undefined,
+               &select_capture);
+
+        Bind(&select_capture);
+        {
+          Node* const substr = SubString(context, string, from, to);
+          var_value.Bind(substr);
+          Goto(&store_value);
+        }
+
+        Bind(&select_undefined);
+        {
+          Node* const undefined = UndefinedConstant();
+          var_value.Bind(undefined);
+          Goto(&store_value);
+        }
+
+        Bind(&store_value);
+        {
+          array.Push(var_value.value());
+          GotoIf(WordEqual(array.length(), int_limit), &out);
+
+          Node* const new_reg = IntPtrAdd(reg, IntPtrConstant(2));
+          var_reg.Bind(new_reg);
+
+          Branch(IntPtrLessThan(new_reg, int_num_registers), &nested_loop,
+                 &nested_loop_out);
+        }
+      }
+
+      Bind(&nested_loop_out);
+    }
+
+    var_last_matched_until.Bind(match_to);
+    var_next_search_from.Bind(match_to);
+    Goto(&loop);
+  }
+
+  Bind(&push_suffix_and_out);
+  {
+    Node* const from = var_last_matched_until.value();
+    Node* const to = string_length;
+
+    Node* const substr = SubString(context, string, from, to);
+    array.Push(substr);
+
+    Goto(&out);
+  }
+
+  Bind(&out);
+  {
+    Node* const result = array.ToJSArray(context);
+    Return(result);
+  }
+
+  Bind(&return_empty_array);
+  {
+    Node* const length = smi_zero;
+    Node* const capacity = int_zero;
+    Node* const result = AllocateJSArray(kind, array_map, capacity, length,
+                                         allocation_site, mode);
+    Return(result);
+  }
 }
 
-// ES##sec-speciesconstructor
-// SpeciesConstructor ( O, defaultConstructor )
-MUST_USE_RESULT MaybeHandle<Object> SpeciesConstructor(
-    Isolate* isolate, Handle<JSReceiver> recv,
-    Handle<JSFunction> default_ctor) {
-  Handle<Object> ctor_obj;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, ctor_obj,
-      JSObject::GetProperty(recv, isolate->factory()->constructor_string()),
-      Object);
+// Helper that skips a few initial checks.
+TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
+  typedef RegExpSplitDescriptor Descriptor;
 
-  if (ctor_obj->IsUndefined(isolate)) return default_ctor;
+  Node* const regexp = Parameter(Descriptor::kReceiver);
+  Node* const string = Parameter(Descriptor::kString);
+  Node* const maybe_limit = Parameter(Descriptor::kLimit);
+  Node* const context = Parameter(Descriptor::kContext);
 
-  if (!ctor_obj->IsJSReceiver()) {
-    THROW_NEW_ERROR(isolate,
-                    NewTypeError(MessageTemplate::kConstructorNotReceiver),
-                    Object);
+  CSA_ASSERT(this, IsFastRegExpMap(context, regexp, LoadMap(regexp)));
+  CSA_ASSERT(this, IsString(string));
+
+  // TODO(jgruber): Even if map checks send us to the fast path, we still need
+  // to verify the constructor property and jump to the slow path if it has
+  // been changed.
+
+  // Convert {maybe_limit} to a uint32, capping at the maximal smi value.
+  Variable var_limit(this, MachineRepresentation::kTagged, maybe_limit);
+  Label if_limitissmimax(this), limit_done(this), runtime(this);
+
+  GotoIf(IsUndefined(maybe_limit), &if_limitissmimax);
+  GotoIf(TaggedIsPositiveSmi(maybe_limit), &limit_done);
+
+  Node* const limit = ToUint32(context, maybe_limit);
+  {
+    // ToUint32(limit) could potentially change the shape of the RegExp
+    // object. Recheck that we are still on the fast path and bail to runtime
+    // otherwise.
+    {
+      Label next(this);
+      BranchIfFastRegExp(context, regexp, LoadMap(regexp), &next, &runtime);
+      Bind(&next);
+    }
+
+    GotoIfNot(TaggedIsSmi(limit), &if_limitissmimax);
+
+    var_limit.Bind(limit);
+    Goto(&limit_done);
   }
 
-  Handle<JSReceiver> ctor = Handle<JSReceiver>::cast(ctor_obj);
-
-  Handle<Object> species;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, species,
-      JSObject::GetProperty(ctor, isolate->factory()->species_symbol()),
-      Object);
-
-  if (species->IsNull(isolate) || species->IsUndefined(isolate)) {
-    return default_ctor;
+  Bind(&if_limitissmimax);
+  {
+    // TODO(jgruber): In this case, we can probably avoid generation of limit
+    // checks in Generate_RegExpPrototypeSplitBody.
+    var_limit.Bind(SmiConstant(Smi::kMaxValue));
+    Goto(&limit_done);
   }
 
-  if (species->IsConstructor()) return species;
+  Bind(&limit_done);
+  {
+    Node* const limit = var_limit.value();
+    RegExpPrototypeSplitBody(context, regexp, string, limit);
+  }
 
-  THROW_NEW_ERROR(
-      isolate, NewTypeError(MessageTemplate::kSpeciesNotConstructor), Object);
+  Bind(&runtime);
+  {
+    // The runtime call passes in limit to ensure the second ToUint32(limit)
+    // call is not observable.
+    CSA_ASSERT(this, IsHeapNumberMap(LoadReceiverMap(limit)));
+    Return(CallRuntime(Runtime::kRegExpSplit, context, regexp, string, limit));
+  }
 }
 
-}  // namespace
-
 // ES#sec-regexp.prototype-@@split
 // RegExp.prototype [ @@split ] ( string, limit )
-BUILTIN(RegExpPrototypeSplit) {
-  HandleScope scope(isolate);
-  CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@split");
+TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
+  Node* const maybe_receiver = Parameter(0);
+  Node* const maybe_string = Parameter(1);
+  Node* const maybe_limit = Parameter(2);
+  Node* const context = Parameter(5);
 
-  Factory* factory = isolate->factory();
+  // Ensure {maybe_receiver} is a JSReceiver.
+  ThrowIfNotJSReceiver(context, maybe_receiver,
+                       MessageTemplate::kIncompatibleMethodReceiver,
+                       "RegExp.prototype.@@split");
+  Node* const receiver = maybe_receiver;
 
-  Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
-  Handle<Object> limit_obj = args.atOrUndefined(isolate, 2);
+  // Convert {maybe_string} to a String.
+  Node* const string = ToString(context, maybe_string);
 
-  Handle<String> string;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
-                                     Object::ToString(isolate, string_obj));
+  Label stub(this), runtime(this, Label::kDeferred);
+  BranchIfFastRegExp(context, receiver, LoadMap(receiver), &stub, &runtime);
 
-  if (RegExpUtils::IsUnmodifiedRegExp(isolate, recv)) {
-    RETURN_RESULT_OR_FAILURE(
-        isolate,
-        RegExpSplit(isolate, Handle<JSRegExp>::cast(recv), string, limit_obj));
-  }
+  Bind(&stub);
+  Callable split_callable = CodeFactory::RegExpSplit(isolate());
+  Return(CallStub(split_callable, context, receiver, string, maybe_limit));
 
-  Handle<JSFunction> regexp_fun = isolate->regexp_function();
-  Handle<Object> ctor;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, ctor, SpeciesConstructor(isolate, recv, regexp_fun));
-
-  Handle<Object> flags_obj;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, flags_obj, JSObject::GetProperty(recv, factory->flags_string()));
-
-  Handle<String> flags;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags,
-                                     Object::ToString(isolate, flags_obj));
-
-  Handle<String> u_str = factory->LookupSingleCharacterStringFromCode('u');
-  const bool unicode = (String::IndexOf(isolate, flags, u_str, 0) >= 0);
-
-  Handle<String> y_str = factory->LookupSingleCharacterStringFromCode('y');
-  const bool sticky = (String::IndexOf(isolate, flags, y_str, 0) >= 0);
-
-  Handle<String> new_flags = flags;
-  if (!sticky) {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_flags,
-                                       factory->NewConsString(flags, y_str));
-  }
-
-  Handle<JSReceiver> splitter;
-  {
-    const int argc = 2;
-
-    ScopedVector<Handle<Object>> argv(argc);
-    argv[0] = recv;
-    argv[1] = new_flags;
-
-    Handle<JSFunction> ctor_fun = Handle<JSFunction>::cast(ctor);
-    Handle<Object> splitter_obj;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, splitter_obj, Execution::New(ctor_fun, argc, argv.start()));
-
-    splitter = Handle<JSReceiver>::cast(splitter_obj);
-  }
-
-  uint32_t limit;
-  RETURN_FAILURE_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit));
-
-  const int length = string->length();
-
-  if (limit == 0) return *factory->NewJSArray(0);
-
-  if (length == 0) {
-    Handle<Object> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
-                                                 factory->undefined_value()));
-
-    if (!result->IsNull(isolate)) return *factory->NewJSArray(0);
-
-    Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
-    elems->set(0, *string);
-    return *factory->NewJSArrayWithElements(elems);
-  }
-
-  // TODO(jgruber): Wrap this in a helper class.
-  static const int kInitialArraySize = 8;
-  Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
-  int num_elems = 0;
-
-  int string_index = 0;
-  int prev_string_index = 0;
-  while (string_index < length) {
-    RETURN_FAILURE_ON_EXCEPTION(
-        isolate, RegExpUtils::SetLastIndex(isolate, splitter, string_index));
-
-    Handle<Object> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
-                                                 factory->undefined_value()));
-
-    if (result->IsNull(isolate)) {
-      string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
-                                                     string_index, unicode);
-      continue;
-    }
-
-    // TODO(jgruber): Extract toLength of some property into function.
-    Handle<Object> last_index_obj;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, last_index_obj, RegExpUtils::GetLastIndex(isolate, splitter));
-
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, last_index_obj, Object::ToLength(isolate, last_index_obj));
-    const int last_index = Handle<Smi>::cast(last_index_obj)->value();
-
-    const int end = std::min(last_index, length);
-    if (end == prev_string_index) {
-      string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
-                                                     string_index, unicode);
-      continue;
-    }
-
-    {
-      Handle<String> substr =
-          factory->NewSubString(string, prev_string_index, string_index);
-      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
-      if (static_cast<uint32_t>(num_elems) == limit) {
-        return *NewJSArrayWithElements(isolate, elems, num_elems);
-      }
-    }
-
-    prev_string_index = end;
-
-    Handle<Object> num_captures_obj;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, num_captures_obj,
-        Object::GetProperty(result, isolate->factory()->length_string()));
-
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, num_captures_obj, Object::ToLength(isolate, num_captures_obj));
-    const int num_captures =
-        std::max(Handle<Smi>::cast(num_captures_obj)->value(), 0);
-
-    for (int i = 1; i < num_captures; i++) {
-      Handle<Object> capture;
-      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-          isolate, capture, Object::GetElement(isolate, result, i));
-      elems = FixedArray::SetAndGrow(elems, num_elems++, capture);
-      if (static_cast<uint32_t>(num_elems) == limit) {
-        return *NewJSArrayWithElements(isolate, elems, num_elems);
-      }
-    }
-
-    string_index = prev_string_index;
-  }
-
-  {
-    Handle<String> substr =
-        factory->NewSubString(string, prev_string_index, length);
-    elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
-  }
-
-  return *NewJSArrayWithElements(isolate, elems, num_elems);
+  Bind(&runtime);
+  Return(CallRuntime(Runtime::kRegExpSplit, context, receiver, string,
+                     maybe_limit));
 }
 
-namespace {
-
-compiler::Node* ReplaceGlobalCallableFastPath(
-    CodeStubAssembler* a, compiler::Node* context, compiler::Node* regexp,
-    compiler::Node* subject_string, compiler::Node* replace_callable) {
+Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
+    Node* context, Node* regexp, Node* string, Node* replace_callable) {
   // The fast path is reached only if {receiver} is a global unmodified
   // JSRegExp instance and {replace_callable} is callable.
 
-  typedef CodeStubAssembler::Variable Variable;
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+  Isolate* const isolate = this->isolate();
 
-  Isolate* const isolate = a->isolate();
+  Node* const null = NullConstant();
+  Node* const undefined = UndefinedConstant();
+  Node* const int_zero = IntPtrConstant(0);
+  Node* const int_one = IntPtrConstant(1);
+  Node* const smi_zero = SmiConstant(Smi::kZero);
 
-  Node* const null = a->NullConstant();
-  Node* const undefined = a->UndefinedConstant();
-  Node* const int_zero = a->IntPtrConstant(0);
-  Node* const int_one = a->IntPtrConstant(1);
-  Node* const smi_zero = a->SmiConstant(Smi::kZero);
+  Node* const native_context = LoadNativeContext(context);
 
-  Node* const native_context = a->LoadNativeContext(context);
-
-  Label out(a);
-  Variable var_result(a, MachineRepresentation::kTagged);
+  Label out(this);
+  Variable var_result(this, MachineRepresentation::kTagged);
 
   // Set last index to 0.
-  FastStoreLastIndex(a, context, regexp, smi_zero);
+  FastStoreLastIndex(regexp, smi_zero);
 
   // Allocate {result_array}.
   Node* result_array;
   {
     ElementsKind kind = FAST_ELEMENTS;
-    Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
-    Node* const capacity = a->IntPtrConstant(16);
+    Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+    Node* const capacity = IntPtrConstant(16);
     Node* const length = smi_zero;
     Node* const allocation_site = nullptr;
-    CodeStubAssembler::ParameterMode capacity_mode =
-        CodeStubAssembler::INTPTR_PARAMETERS;
+    ParameterMode capacity_mode = CodeStubAssembler::INTPTR_PARAMETERS;
 
-    result_array = a->AllocateJSArray(kind, array_map, capacity, length,
-                                      allocation_site, capacity_mode);
+    result_array = AllocateJSArray(kind, array_map, capacity, length,
+                                   allocation_site, capacity_mode);
   }
 
   // Call into runtime for RegExpExecMultiple.
-  Node* last_match_info = a->LoadContextElement(
-      native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
-  Node* const res =
-      a->CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
-                     subject_string, last_match_info, result_array);
+  Node* last_match_info =
+      LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+  Node* const res = CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
+                                string, last_match_info, result_array);
 
   // Reset last index to 0.
-  FastStoreLastIndex(a, context, regexp, smi_zero);
+  FastStoreLastIndex(regexp, smi_zero);
 
   // If no matches, return the subject string.
-  var_result.Bind(subject_string);
-  a->GotoIf(a->WordEqual(res, null), &out);
+  var_result.Bind(string);
+  GotoIf(WordEqual(res, null), &out);
 
   // Reload last match info since it might have changed.
-  last_match_info = a->LoadContextElement(
-      native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+  last_match_info =
+      LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
 
-  Node* const res_length = a->LoadJSArrayLength(res);
-  Node* const res_elems = a->LoadElements(res);
-  CSA_ASSERT(a, a->HasInstanceType(res_elems, FIXED_ARRAY_TYPE));
+  Node* const res_length = LoadJSArrayLength(res);
+  Node* const res_elems = LoadElements(res);
+  CSA_ASSERT(this, HasInstanceType(res_elems, FIXED_ARRAY_TYPE));
 
-  CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
-  Node* const num_capture_registers = a->LoadFixedArrayElement(
-      last_match_info,
-      a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0, mode);
+  Node* const num_capture_registers = LoadFixedArrayElement(
+      last_match_info, RegExpMatchInfo::kNumberOfCapturesIndex);
 
-  Label if_hasexplicitcaptures(a), if_noexplicitcaptures(a), create_result(a);
-  a->Branch(a->SmiEqual(num_capture_registers, a->SmiConstant(Smi::FromInt(2))),
-            &if_noexplicitcaptures, &if_hasexplicitcaptures);
+  Label if_hasexplicitcaptures(this), if_noexplicitcaptures(this),
+      create_result(this);
+  Branch(SmiEqual(num_capture_registers, SmiConstant(Smi::FromInt(2))),
+         &if_noexplicitcaptures, &if_hasexplicitcaptures);
 
-  a->Bind(&if_noexplicitcaptures);
+  Bind(&if_noexplicitcaptures);
   {
     // If the number of captures is two then there are no explicit captures in
     // the regexp, just the implicit capture that captures the whole match. In
@@ -1710,394 +2281,388 @@
     // input string and some replacements that were returned from the replace
     // function.
 
-    Variable var_match_start(a, MachineRepresentation::kTagged);
+    Variable var_match_start(this, MachineRepresentation::kTagged);
     var_match_start.Bind(smi_zero);
 
-    Node* const end = a->SmiUntag(res_length);
-    Variable var_i(a, MachineType::PointerRepresentation());
+    Node* const end = SmiUntag(res_length);
+    Variable var_i(this, MachineType::PointerRepresentation());
     var_i.Bind(int_zero);
 
     Variable* vars[] = {&var_i, &var_match_start};
-    Label loop(a, 2, vars);
-    a->Goto(&loop);
-    a->Bind(&loop);
+    Label loop(this, 2, vars);
+    Goto(&loop);
+    Bind(&loop);
     {
       Node* const i = var_i.value();
-      a->GotoUnless(a->IntPtrLessThan(i, end), &create_result);
+      GotoIfNot(IntPtrLessThan(i, end), &create_result);
 
-      CodeStubAssembler::ParameterMode mode =
-          CodeStubAssembler::INTPTR_PARAMETERS;
-      Node* const elem = a->LoadFixedArrayElement(res_elems, i, 0, mode);
+      Node* const elem = LoadFixedArrayElement(res_elems, i);
 
-      Label if_issmi(a), if_isstring(a), loop_epilogue(a);
-      a->Branch(a->TaggedIsSmi(elem), &if_issmi, &if_isstring);
+      Label if_issmi(this), if_isstring(this), loop_epilogue(this);
+      Branch(TaggedIsSmi(elem), &if_issmi, &if_isstring);
 
-      a->Bind(&if_issmi);
+      Bind(&if_issmi);
       {
         // Integers represent slices of the original string.
-        Label if_isnegativeorzero(a), if_ispositive(a);
-        a->BranchIfSmiLessThanOrEqual(elem, smi_zero, &if_isnegativeorzero,
-                                      &if_ispositive);
+        Label if_isnegativeorzero(this), if_ispositive(this);
+        BranchIfSmiLessThanOrEqual(elem, smi_zero, &if_isnegativeorzero,
+                                   &if_ispositive);
 
-        a->Bind(&if_ispositive);
+        Bind(&if_ispositive);
         {
-          Node* const int_elem = a->SmiUntag(elem);
+          Node* const int_elem = SmiUntag(elem);
           Node* const new_match_start =
-              a->IntPtrAdd(a->WordShr(int_elem, a->IntPtrConstant(11)),
-                           a->WordAnd(int_elem, a->IntPtrConstant(0x7ff)));
-          var_match_start.Bind(a->SmiTag(new_match_start));
-          a->Goto(&loop_epilogue);
+              IntPtrAdd(WordShr(int_elem, IntPtrConstant(11)),
+                        WordAnd(int_elem, IntPtrConstant(0x7ff)));
+          var_match_start.Bind(SmiTag(new_match_start));
+          Goto(&loop_epilogue);
         }
 
-        a->Bind(&if_isnegativeorzero);
+        Bind(&if_isnegativeorzero);
         {
-          Node* const next_i = a->IntPtrAdd(i, int_one);
+          Node* const next_i = IntPtrAdd(i, int_one);
           var_i.Bind(next_i);
 
-          Node* const next_elem =
-              a->LoadFixedArrayElement(res_elems, next_i, 0, mode);
+          Node* const next_elem = LoadFixedArrayElement(res_elems, next_i);
 
-          Node* const new_match_start = a->SmiSub(next_elem, elem);
+          Node* const new_match_start = SmiSub(next_elem, elem);
           var_match_start.Bind(new_match_start);
-          a->Goto(&loop_epilogue);
+          Goto(&loop_epilogue);
         }
       }
 
-      a->Bind(&if_isstring);
+      Bind(&if_isstring);
       {
-        CSA_ASSERT(a, a->IsStringInstanceType(a->LoadInstanceType(elem)));
+        CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(elem)));
 
         Callable call_callable = CodeFactory::Call(isolate);
         Node* const replacement_obj =
-            a->CallJS(call_callable, context, replace_callable, undefined, elem,
-                      var_match_start.value(), subject_string);
+            CallJS(call_callable, context, replace_callable, undefined, elem,
+                   var_match_start.value(), string);
 
-        Node* const replacement_str = a->ToString(context, replacement_obj);
-        a->StoreFixedArrayElement(res_elems, i, replacement_str);
+        Node* const replacement_str = ToString(context, replacement_obj);
+        StoreFixedArrayElement(res_elems, i, replacement_str);
 
-        Node* const elem_length = a->LoadStringLength(elem);
+        Node* const elem_length = LoadStringLength(elem);
         Node* const new_match_start =
-            a->SmiAdd(var_match_start.value(), elem_length);
+            SmiAdd(var_match_start.value(), elem_length);
         var_match_start.Bind(new_match_start);
 
-        a->Goto(&loop_epilogue);
+        Goto(&loop_epilogue);
       }
 
-      a->Bind(&loop_epilogue);
+      Bind(&loop_epilogue);
       {
-        var_i.Bind(a->IntPtrAdd(var_i.value(), int_one));
-        a->Goto(&loop);
+        var_i.Bind(IntPtrAdd(var_i.value(), int_one));
+        Goto(&loop);
       }
     }
   }
 
-  a->Bind(&if_hasexplicitcaptures);
+  Bind(&if_hasexplicitcaptures);
   {
-    CodeStubAssembler::ParameterMode mode =
-        CodeStubAssembler::INTPTR_PARAMETERS;
-
     Node* const from = int_zero;
-    Node* const to = a->SmiUntag(res_length);
+    Node* const to = SmiUntag(res_length);
     const int increment = 1;
 
-    a->BuildFastLoop(
-        MachineType::PointerRepresentation(), from, to,
-        [res_elems, isolate, native_context, context, undefined,
-         replace_callable, mode](CodeStubAssembler* a, Node* index) {
-          Node* const elem =
-              a->LoadFixedArrayElement(res_elems, index, 0, mode);
+    BuildFastLoop(
+        from, to,
+        [this, res_elems, isolate, native_context, context, undefined,
+         replace_callable](Node* index) {
+          Node* const elem = LoadFixedArrayElement(res_elems, index);
 
-          Label do_continue(a);
-          a->GotoIf(a->TaggedIsSmi(elem), &do_continue);
+          Label do_continue(this);
+          GotoIf(TaggedIsSmi(elem), &do_continue);
 
           // elem must be an Array.
           // Use the apply argument as backing for global RegExp properties.
 
-          CSA_ASSERT(a, a->HasInstanceType(elem, JS_ARRAY_TYPE));
+          CSA_ASSERT(this, HasInstanceType(elem, JS_ARRAY_TYPE));
 
           // TODO(jgruber): Remove indirection through Call->ReflectApply.
           Callable call_callable = CodeFactory::Call(isolate);
-          Node* const reflect_apply = a->LoadContextElement(
-              native_context, Context::REFLECT_APPLY_INDEX);
+          Node* const reflect_apply =
+              LoadContextElement(native_context, Context::REFLECT_APPLY_INDEX);
 
           Node* const replacement_obj =
-              a->CallJS(call_callable, context, reflect_apply, undefined,
-                        replace_callable, undefined, elem);
+              CallJS(call_callable, context, reflect_apply, undefined,
+                     replace_callable, undefined, elem);
 
           // Overwrite the i'th element in the results with the string we got
           // back from the callback function.
 
-          Node* const replacement_str = a->ToString(context, replacement_obj);
-          a->StoreFixedArrayElement(res_elems, index, replacement_str,
-                                    UPDATE_WRITE_BARRIER, mode);
+          Node* const replacement_str = ToString(context, replacement_obj);
+          StoreFixedArrayElement(res_elems, index, replacement_str);
 
-          a->Goto(&do_continue);
-          a->Bind(&do_continue);
+          Goto(&do_continue);
+          Bind(&do_continue);
         },
-        increment, CodeStubAssembler::IndexAdvanceMode::kPost);
+        increment, CodeStubAssembler::INTPTR_PARAMETERS,
+        CodeStubAssembler::IndexAdvanceMode::kPost);
 
-    a->Goto(&create_result);
+    Goto(&create_result);
   }
 
-  a->Bind(&create_result);
+  Bind(&create_result);
   {
-    Node* const result = a->CallRuntime(Runtime::kStringBuilderConcat, context,
-                                        res, res_length, subject_string);
+    Node* const result = CallRuntime(Runtime::kStringBuilderConcat, context,
+                                     res, res_length, string);
     var_result.Bind(result);
-    a->Goto(&out);
+    Goto(&out);
   }
 
-  a->Bind(&out);
+  Bind(&out);
   return var_result.value();
 }
 
-compiler::Node* ReplaceSimpleStringFastPath(CodeStubAssembler* a,
-                                            compiler::Node* context,
-                                            compiler::Node* regexp,
-                                            compiler::Node* subject_string,
-                                            compiler::Node* replace_string) {
+Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
+    Node* context, Node* regexp, Node* string, Node* replace_string) {
   // The fast path is reached only if {receiver} is an unmodified
   // JSRegExp instance, {replace_value} is non-callable, and
   // ToString({replace_value}) does not contain '$', i.e. we're doing a simple
   // string replacement.
 
-  typedef CodeStubAssembler::Variable Variable;
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+  Node* const int_zero = IntPtrConstant(0);
+  Node* const smi_zero = SmiConstant(Smi::kZero);
 
-  Isolate* const isolate = a->isolate();
-
-  Node* const null = a->NullConstant();
-  Node* const int_zero = a->IntPtrConstant(0);
-  Node* const smi_zero = a->SmiConstant(Smi::kZero);
-
-  Label out(a);
-  Variable var_result(a, MachineRepresentation::kTagged);
+  Label out(this);
+  Variable var_result(this, MachineRepresentation::kTagged);
 
   // Load the last match info.
-  Node* const native_context = a->LoadNativeContext(context);
-  Node* const last_match_info = a->LoadContextElement(
-      native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+  Node* const native_context = LoadNativeContext(context);
+  Node* const last_match_info =
+      LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
 
   // Is {regexp} global?
-  Label if_isglobal(a), if_isnonglobal(a);
-  Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+  Label if_isglobal(this), if_isnonglobal(this);
+  Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
   Node* const is_global =
-      a->WordAnd(a->SmiUntag(flags), a->IntPtrConstant(JSRegExp::kGlobal));
-  a->Branch(a->WordEqual(is_global, int_zero), &if_isnonglobal, &if_isglobal);
+      WordAnd(SmiUntag(flags), IntPtrConstant(JSRegExp::kGlobal));
+  Branch(WordEqual(is_global, int_zero), &if_isnonglobal, &if_isglobal);
 
-  a->Bind(&if_isglobal);
+  Bind(&if_isglobal);
   {
     // Hand off global regexps to runtime.
-    FastStoreLastIndex(a, context, regexp, smi_zero);
+    FastStoreLastIndex(regexp, smi_zero);
     Node* const result =
-        a->CallRuntime(Runtime::kStringReplaceGlobalRegExpWithString, context,
-                       subject_string, regexp, replace_string, last_match_info);
+        CallRuntime(Runtime::kStringReplaceGlobalRegExpWithString, context,
+                    string, regexp, replace_string, last_match_info);
     var_result.Bind(result);
-    a->Goto(&out);
+    Goto(&out);
   }
 
-  a->Bind(&if_isnonglobal);
+  Bind(&if_isnonglobal);
   {
     // Run exec, then manually construct the resulting string.
-    Callable exec_callable = CodeFactory::RegExpExec(isolate);
-    Node* const match_indices =
-        a->CallStub(exec_callable, context, regexp, subject_string, smi_zero,
-                    last_match_info);
+    Label if_didnotmatch(this);
+    Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
+        context, regexp, string, &if_didnotmatch, true);
 
-    Label if_matched(a), if_didnotmatch(a);
-    a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
-
-    a->Bind(&if_didnotmatch);
+    // Successful match.
     {
-      FastStoreLastIndex(a, context, regexp, smi_zero);
-      var_result.Bind(subject_string);
-      a->Goto(&out);
-    }
-
-    a->Bind(&if_matched);
-    {
-      CodeStubAssembler::ParameterMode mode =
-          CodeStubAssembler::INTPTR_PARAMETERS;
-
       Node* const subject_start = smi_zero;
-      Node* const match_start = a->LoadFixedArrayElement(
-          match_indices, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex),
-          0, mode);
-      Node* const match_end = a->LoadFixedArrayElement(
-          match_indices,
-          a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0, mode);
-      Node* const subject_end = a->LoadStringLength(subject_string);
+      Node* const match_start = LoadFixedArrayElement(
+          match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+      Node* const match_end = LoadFixedArrayElement(
+          match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+      Node* const subject_end = LoadStringLength(string);
 
-      Label if_replaceisempty(a), if_replaceisnotempty(a);
-      Node* const replace_length = a->LoadStringLength(replace_string);
-      a->Branch(a->SmiEqual(replace_length, smi_zero), &if_replaceisempty,
-                &if_replaceisnotempty);
+      Label if_replaceisempty(this), if_replaceisnotempty(this);
+      Node* const replace_length = LoadStringLength(replace_string);
+      Branch(SmiEqual(replace_length, smi_zero), &if_replaceisempty,
+             &if_replaceisnotempty);
 
-      a->Bind(&if_replaceisempty);
+      Bind(&if_replaceisempty);
       {
         // TODO(jgruber): We could skip many of the checks that using SubString
         // here entails.
 
         Node* const first_part =
-            a->SubString(context, subject_string, subject_start, match_start);
+            SubString(context, string, subject_start, match_start);
         Node* const second_part =
-            a->SubString(context, subject_string, match_end, subject_end);
+            SubString(context, string, match_end, subject_end);
 
-        Node* const result = a->StringAdd(context, first_part, second_part);
+        Node* const result = StringAdd(context, first_part, second_part);
         var_result.Bind(result);
-        a->Goto(&out);
+        Goto(&out);
       }
 
-      a->Bind(&if_replaceisnotempty);
+      Bind(&if_replaceisnotempty);
       {
         Node* const first_part =
-            a->SubString(context, subject_string, subject_start, match_start);
+            SubString(context, string, subject_start, match_start);
         Node* const second_part = replace_string;
         Node* const third_part =
-            a->SubString(context, subject_string, match_end, subject_end);
+            SubString(context, string, match_end, subject_end);
 
-        Node* result = a->StringAdd(context, first_part, second_part);
-        result = a->StringAdd(context, result, third_part);
+        Node* result = StringAdd(context, first_part, second_part);
+        result = StringAdd(context, result, third_part);
 
         var_result.Bind(result);
-        a->Goto(&out);
+        Goto(&out);
       }
     }
+
+    Bind(&if_didnotmatch);
+    {
+      var_result.Bind(string);
+      Goto(&out);
+    }
   }
 
-  a->Bind(&out);
+  Bind(&out);
   return var_result.value();
 }
 
-}  // namespace
+// Helper that skips a few initial checks.
+TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
+  typedef RegExpReplaceDescriptor Descriptor;
 
-// ES#sec-regexp.prototype-@@replace
-// RegExp.prototype [ @@replace ] ( string, replaceValue )
-void Builtins::Generate_RegExpPrototypeReplace(CodeStubAssembler* a) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+  Node* const regexp = Parameter(Descriptor::kReceiver);
+  Node* const string = Parameter(Descriptor::kString);
+  Node* const replace_value = Parameter(Descriptor::kReplaceValue);
+  Node* const context = Parameter(Descriptor::kContext);
 
-  Isolate* const isolate = a->isolate();
+  CSA_ASSERT(this, IsFastRegExpMap(context, regexp, LoadMap(regexp)));
+  CSA_ASSERT(this, IsString(string));
 
-  Node* const maybe_receiver = a->Parameter(0);
-  Node* const maybe_string = a->Parameter(1);
-  Node* const replace_value = a->Parameter(2);
-  Node* const context = a->Parameter(5);
-
-  Node* const int_zero = a->IntPtrConstant(0);
-
-  // Ensure {maybe_receiver} is a JSReceiver.
-  Node* const map =
-      ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
-                           MessageTemplate::kIncompatibleMethodReceiver,
-                           "RegExp.prototype.@@replace");
-  Node* const receiver = maybe_receiver;
-
-  // Convert {maybe_string} to a String.
-  Callable tostring_callable = CodeFactory::ToString(isolate);
-  Node* const string = a->CallStub(tostring_callable, context, maybe_string);
-
-  // Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
-  Label checkreplacecallable(a), runtime(a, Label::kDeferred), fastpath(a);
-  BranchIfFastPath(a, context, map, &checkreplacecallable, &runtime);
-
-  a->Bind(&checkreplacecallable);
-  Node* const regexp = receiver;
+  Label checkreplacestring(this), if_iscallable(this),
+      runtime(this, Label::kDeferred);
 
   // 2. Is {replace_value} callable?
-  Label checkreplacestring(a), if_iscallable(a);
-  a->GotoIf(a->TaggedIsSmi(replace_value), &checkreplacestring);
-
-  Node* const replace_value_map = a->LoadMap(replace_value);
-  a->Branch(a->IsCallableMap(replace_value_map), &if_iscallable,
-            &checkreplacestring);
+  GotoIf(TaggedIsSmi(replace_value), &checkreplacestring);
+  Branch(IsCallableMap(LoadMap(replace_value)), &if_iscallable,
+         &checkreplacestring);
 
   // 3. Does ToString({replace_value}) contain '$'?
-  a->Bind(&checkreplacestring);
+  Bind(&checkreplacestring);
   {
+    Callable tostring_callable = CodeFactory::ToString(isolate());
     Node* const replace_string =
-        a->CallStub(tostring_callable, context, replace_value);
+        CallStub(tostring_callable, context, replace_value);
 
-    Node* const dollar_char = a->IntPtrConstant('$');
-    Node* const smi_minusone = a->SmiConstant(Smi::FromInt(-1));
-    a->GotoUnless(a->SmiEqual(a->StringIndexOfChar(context, replace_string,
-                                                   dollar_char, int_zero),
-                              smi_minusone),
-                  &runtime);
+    // ToString(replaceValue) could potentially change the shape of the RegExp
+    // object. Recheck that we are still on the fast path and bail to runtime
+    // otherwise.
+    {
+      Label next(this);
+      BranchIfFastRegExp(context, regexp, LoadMap(regexp), &next, &runtime);
+      Bind(&next);
+    }
 
-    a->Return(ReplaceSimpleStringFastPath(a, context, regexp, string,
-                                          replace_string));
+    Callable indexof_callable = CodeFactory::StringIndexOf(isolate());
+    Node* const dollar_string = HeapConstant(
+        isolate()->factory()->LookupSingleCharacterStringFromCode('$'));
+    Node* const dollar_ix = CallStub(indexof_callable, context, replace_string,
+                                     dollar_string, SmiConstant(0));
+    GotoIfNot(SmiEqual(dollar_ix, SmiConstant(-1)), &runtime);
+
+    Return(
+        ReplaceSimpleStringFastPath(context, regexp, string, replace_string));
   }
 
   // {regexp} is unmodified and {replace_value} is callable.
-  a->Bind(&if_iscallable);
+  Bind(&if_iscallable);
   {
-    Node* const replace_callable = replace_value;
+    Node* const replace_fn = replace_value;
 
     // Check if the {regexp} is global.
-    Label if_isglobal(a), if_isnotglobal(a);
-    Node* const is_global = FastFlagGetter(a, regexp, JSRegExp::kGlobal);
-    a->Branch(is_global, &if_isglobal, &if_isnotglobal);
+    Label if_isglobal(this), if_isnotglobal(this);
 
-    a->Bind(&if_isglobal);
-    {
-      Node* const result = ReplaceGlobalCallableFastPath(
-          a, context, regexp, string, replace_callable);
-      a->Return(result);
-    }
+    Node* const is_global = FastFlagGetter(regexp, JSRegExp::kGlobal);
+    Branch(is_global, &if_isglobal, &if_isnotglobal);
 
-    a->Bind(&if_isnotglobal);
-    {
-      Node* const result =
-          a->CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
-                         context, string, regexp, replace_callable);
-      a->Return(result);
-    }
+    Bind(&if_isglobal);
+    Return(ReplaceGlobalCallableFastPath(context, regexp, string, replace_fn));
+
+    Bind(&if_isnotglobal);
+    Return(CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
+                       context, string, regexp, replace_fn));
   }
 
-  a->Bind(&runtime);
-  {
-    Node* const result = a->CallRuntime(Runtime::kRegExpReplace, context,
-                                        receiver, string, replace_value);
-    a->Return(result);
-  }
+  Bind(&runtime);
+  Return(CallRuntime(Runtime::kRegExpReplace, context, regexp, string,
+                     replace_value));
+}
+
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@replace ] ( string, replaceValue )
+TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
+  Node* const maybe_receiver = Parameter(0);
+  Node* const maybe_string = Parameter(1);
+  Node* const replace_value = Parameter(2);
+  Node* const context = Parameter(5);
+
+  // RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
+  //
+  // if (!IsFastRegExp(receiver)) CallRuntime(RegExpReplace)
+  // if (IsCallable(replace)) {
+  //   if (IsGlobal(receiver)) {
+  //     // Called 'fast-path' but contains several runtime calls.
+  //     ReplaceGlobalCallableFastPath()
+  //   } else {
+  //     CallRuntime(StringReplaceNonGlobalRegExpWithFunction)
+  //   }
+  // } else {
+  //   if (replace.contains("$")) {
+  //     CallRuntime(RegExpReplace)
+  //   } else {
+  //     ReplaceSimpleStringFastPath()  // Bails to runtime for global regexps.
+  //   }
+  // }
+
+  // Ensure {maybe_receiver} is a JSReceiver.
+  ThrowIfNotJSReceiver(context, maybe_receiver,
+                       MessageTemplate::kIncompatibleMethodReceiver,
+                       "RegExp.prototype.@@replace");
+  Node* const receiver = maybe_receiver;
+
+  // Convert {maybe_string} to a String.
+  Callable tostring_callable = CodeFactory::ToString(isolate());
+  Node* const string = CallStub(tostring_callable, context, maybe_string);
+
+  // Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
+  Label stub(this), runtime(this, Label::kDeferred);
+  BranchIfFastRegExp(context, receiver, LoadMap(receiver), &stub, &runtime);
+
+  Bind(&stub);
+  Callable replace_callable = CodeFactory::RegExpReplace(isolate());
+  Return(CallStub(replace_callable, context, receiver, string, replace_value));
+
+  Bind(&runtime);
+  Return(CallRuntime(Runtime::kRegExpReplace, context, receiver, string,
+                     replace_value));
 }
 
 // Simple string matching functionality for internal use which does not modify
 // the last match info.
-void Builtins::Generate_RegExpInternalMatch(CodeStubAssembler* a) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
+TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
+  Node* const regexp = Parameter(1);
+  Node* const string = Parameter(2);
+  Node* const context = Parameter(5);
 
-  Isolate* const isolate = a->isolate();
+  Node* const null = NullConstant();
+  Node* const smi_zero = SmiConstant(Smi::FromInt(0));
 
-  Node* const regexp = a->Parameter(1);
-  Node* const string = a->Parameter(2);
-  Node* const context = a->Parameter(5);
-
-  Node* const null = a->NullConstant();
-  Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
-
-  Node* const native_context = a->LoadNativeContext(context);
-  Node* const internal_match_info = a->LoadContextElement(
+  Node* const native_context = LoadNativeContext(context);
+  Node* const internal_match_info = LoadContextElement(
       native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
 
-  Callable exec_callable = CodeFactory::RegExpExec(isolate);
-  Node* const match_indices = a->CallStub(
-      exec_callable, context, regexp, string, smi_zero, internal_match_info);
+  Callable exec_callable = CodeFactory::RegExpExec(isolate());
+  Node* const match_indices = CallStub(exec_callable, context, regexp, string,
+                                       smi_zero, internal_match_info);
 
-  Label if_matched(a), if_didnotmatch(a);
-  a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
+  Label if_matched(this), if_didnotmatch(this);
+  Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
 
-  a->Bind(&if_didnotmatch);
-  a->Return(null);
+  Bind(&if_didnotmatch);
+  Return(null);
 
-  a->Bind(&if_matched);
+  Bind(&if_matched);
   {
-    Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
-                                                   match_indices, string);
-    a->Return(result);
+    Node* result =
+        ConstructNewResultFromMatchInfo(context, regexp, match_indices, string);
+    Return(result);
   }
 }
 
diff --git a/src/builtins/builtins-regexp.h b/src/builtins/builtins-regexp.h
new file mode 100644
index 0000000..9e1bfdf
--- /dev/null
+++ b/src/builtins/builtins-regexp.h
@@ -0,0 +1,99 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_REGEXP_H_
+#define V8_BUILTINS_BUILTINS_REGEXP_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+
+class RegExpBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  explicit RegExpBuiltinsAssembler(CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+
+  void BranchIfFastRegExp(Node* const context, Node* const object,
+                          Node* const map, Label* const if_isunmodified,
+                          Label* const if_ismodified);
+
+ protected:
+  Node* FastLoadLastIndex(Node* regexp);
+  Node* SlowLoadLastIndex(Node* context, Node* regexp);
+  Node* LoadLastIndex(Node* context, Node* regexp, bool is_fastpath);
+
+  void FastStoreLastIndex(Node* regexp, Node* value);
+  void SlowStoreLastIndex(Node* context, Node* regexp, Node* value);
+  void StoreLastIndex(Node* context, Node* regexp, Node* value,
+                      bool is_fastpath);
+
+  Node* ConstructNewResultFromMatchInfo(Node* const context, Node* const regexp,
+                                        Node* const match_info,
+                                        Node* const string);
+
+  Node* RegExpPrototypeExecBodyWithoutResult(Node* const context,
+                                             Node* const regexp,
+                                             Node* const string,
+                                             Label* if_didnotmatch,
+                                             const bool is_fastpath);
+  Node* RegExpPrototypeExecBody(Node* const context, Node* const regexp,
+                                Node* const string, const bool is_fastpath);
+
+  Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
+                             MessageTemplate::Template msg_template,
+                             char const* method_name);
+
+  // Analogous to BranchIfFastRegExp, for use in asserts.
+  Node* IsFastRegExpMap(Node* const context, Node* const object,
+                        Node* const map);
+
+  Node* IsInitialRegExpMap(Node* context, Node* object, Node* map);
+  void BranchIfFastRegExpResult(Node* context, Node* map,
+                                Label* if_isunmodified, Label* if_ismodified);
+
+  Node* FlagsGetter(Node* const context, Node* const regexp, bool is_fastpath);
+
+  Node* FastFlagGetter(Node* const regexp, JSRegExp::Flag flag);
+  Node* SlowFlagGetter(Node* const context, Node* const regexp,
+                       JSRegExp::Flag flag);
+  Node* FlagGetter(Node* const context, Node* const regexp, JSRegExp::Flag flag,
+                   bool is_fastpath);
+  void FlagGetter(JSRegExp::Flag flag, v8::Isolate::UseCounterFeature counter,
+                  const char* method_name);
+
+  Node* IsRegExp(Node* const context, Node* const maybe_receiver);
+  Node* RegExpInitialize(Node* const context, Node* const regexp,
+                         Node* const maybe_pattern, Node* const maybe_flags);
+
+  Node* RegExpExec(Node* context, Node* regexp, Node* string);
+
+  Node* AdvanceStringIndex(Node* const string, Node* const index,
+                           Node* const is_unicode, bool is_fastpath);
+
+  void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
+                                Node* const string, const bool is_fastpath);
+
+  void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
+                                     Node* const string);
+  void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp,
+                                     Node* const string);
+
+  void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
+                                Node* const string, Node* const limit);
+
+  Node* ReplaceGlobalCallableFastPath(Node* context, Node* regexp, Node* string,
+                                      Node* replace_callable);
+  Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp, Node* string,
+                                    Node* replace_string);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_BUILTINS_BUILTINS_REGEXP_H_
diff --git a/src/builtins/builtins-sharedarraybuffer.cc b/src/builtins/builtins-sharedarraybuffer.cc
index 2b5bf49..b918078 100644
--- a/src/builtins/builtins-sharedarraybuffer.cc
+++ b/src/builtins/builtins-sharedarraybuffer.cc
@@ -2,10 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -31,7 +31,7 @@
                               compiler::Node* context,
                               compiler::Node** out_instance_type,
                               compiler::Node** out_backing_store) {
-  using namespace compiler;
+  using compiler::Node;
   CodeStubAssembler::Label is_smi(a), not_smi(a), is_typed_array(a),
       not_typed_array(a), is_shared(a), not_shared(a), is_float_or_clamped(a),
       not_float_or_clamped(a), invalid(a);
@@ -43,8 +43,8 @@
 
   // Fail if the array's instance type is not JSTypedArray.
   a->Bind(&not_smi);
-  a->Branch(a->WordEqual(a->LoadInstanceType(tagged),
-                         a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+  a->Branch(a->Word32Equal(a->LoadInstanceType(tagged),
+                           a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
             &is_typed_array, &not_typed_array);
   a->Bind(&not_typed_array);
   a->Goto(&invalid);
@@ -78,7 +78,7 @@
   a->Bind(&invalid);
   a->CallRuntime(Runtime::kThrowNotIntegerSharedTypedArrayError, context,
                  tagged);
-  a->Return(a->UndefinedConstant());
+  a->Unreachable();
 
   a->Bind(&not_float_or_clamped);
   *out_instance_type = elements_instance_type;
@@ -88,14 +88,15 @@
   Node* byte_offset = a->ChangeUint32ToWord(a->TruncateTaggedToWord32(
       context,
       a->LoadObjectField(tagged, JSArrayBufferView::kByteOffsetOffset)));
-  *out_backing_store = a->IntPtrAdd(backing_store, byte_offset);
+  *out_backing_store =
+      a->IntPtrAdd(a->BitcastTaggedToWord(backing_store), byte_offset);
 }
 
 // https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
 compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
                                                  compiler::Node* tagged,
                                                  compiler::Node* context) {
-  using namespace compiler;
+  using compiler::Node;
   CodeStubAssembler::Variable var_result(a, MachineRepresentation::kWord32);
 
   Callable to_number = CodeFactory::ToNumber(a->isolate());
@@ -128,8 +129,8 @@
     }
 
     a->Bind(&if_indexesarenotequal);
-    a->Return(
-        a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+    a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
+    a->Unreachable();
   }
 
   a->Bind(&done);
@@ -139,40 +140,41 @@
 void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
                          compiler::Node* array_length_word,
                          compiler::Node* context) {
-  using namespace compiler;
+  using compiler::Node;
   // Check if the index is in bounds. If not, throw RangeError.
   CodeStubAssembler::Label if_inbounds(a), if_notinbounds(a);
   // TODO(jkummerow): Use unsigned comparison instead of "i<0 || i>length".
   a->Branch(
-      a->WordOr(a->Int32LessThan(index_word, a->Int32Constant(0)),
-                a->Int32GreaterThanOrEqual(index_word, array_length_word)),
+      a->Word32Or(a->Int32LessThan(index_word, a->Int32Constant(0)),
+                  a->Int32GreaterThanOrEqual(index_word, array_length_word)),
       &if_notinbounds, &if_inbounds);
   a->Bind(&if_notinbounds);
-  a->Return(
-      a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context));
+  a->CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
+  a->Unreachable();
   a->Bind(&if_inbounds);
 }
 
 }  // anonymous namespace
 
-void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) {
-  using namespace compiler;
-  Node* array = a->Parameter(1);
-  Node* index = a->Parameter(2);
-  Node* context = a->Parameter(3 + 2);
+void Builtins::Generate_AtomicsLoad(compiler::CodeAssemblerState* state) {
+  using compiler::Node;
+  CodeStubAssembler a(state);
+  Node* array = a.Parameter(1);
+  Node* index = a.Parameter(2);
+  Node* context = a.Parameter(3 + 2);
 
   Node* instance_type;
   Node* backing_store;
-  ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+  ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store);
 
-  Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
-  Node* array_length_word32 = a->TruncateTaggedToWord32(
-      context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
-  ValidateAtomicIndex(a, index_word32, array_length_word32, context);
-  Node* index_word = a->ChangeUint32ToWord(index_word32);
+  Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context);
+  Node* array_length_word32 = a.TruncateTaggedToWord32(
+      context, a.LoadObjectField(array, JSTypedArray::kLengthOffset));
+  ValidateAtomicIndex(&a, index_word32, array_length_word32, context);
+  Node* index_word = a.ChangeUint32ToWord(index_word32);
 
-  CodeStubAssembler::Label i8(a), u8(a), i16(a), u16(a), i32(a), u32(a),
-      other(a);
+  CodeStubAssembler::Label i8(&a), u8(&a), i16(&a), u16(&a), i32(&a), u32(&a),
+      other(&a);
   int32_t case_values[] = {
       FIXED_INT8_ARRAY_TYPE,   FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
       FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
@@ -180,59 +182,60 @@
   CodeStubAssembler::Label* case_labels[] = {
       &i8, &u8, &i16, &u16, &i32, &u32,
   };
-  a->Switch(instance_type, &other, case_values, case_labels,
-            arraysize(case_labels));
+  a.Switch(instance_type, &other, case_values, case_labels,
+           arraysize(case_labels));
 
-  a->Bind(&i8);
-  a->Return(
-      a->SmiTag(a->AtomicLoad(MachineType::Int8(), backing_store, index_word)));
+  a.Bind(&i8);
+  a.Return(a.SmiFromWord32(
+      a.AtomicLoad(MachineType::Int8(), backing_store, index_word)));
 
-  a->Bind(&u8);
-  a->Return(a->SmiTag(
-      a->AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
+  a.Bind(&u8);
+  a.Return(a.SmiFromWord32(
+      a.AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
 
-  a->Bind(&i16);
-  a->Return(a->SmiTag(a->AtomicLoad(MachineType::Int16(), backing_store,
-                                    a->WordShl(index_word, 1))));
+  a.Bind(&i16);
+  a.Return(a.SmiFromWord32(a.AtomicLoad(MachineType::Int16(), backing_store,
+                                        a.WordShl(index_word, 1))));
 
-  a->Bind(&u16);
-  a->Return(a->SmiTag(a->AtomicLoad(MachineType::Uint16(), backing_store,
-                                    a->WordShl(index_word, 1))));
+  a.Bind(&u16);
+  a.Return(a.SmiFromWord32(a.AtomicLoad(MachineType::Uint16(), backing_store,
+                                        a.WordShl(index_word, 1))));
 
-  a->Bind(&i32);
-  a->Return(a->ChangeInt32ToTagged(a->AtomicLoad(
-      MachineType::Int32(), backing_store, a->WordShl(index_word, 2))));
+  a.Bind(&i32);
+  a.Return(a.ChangeInt32ToTagged(a.AtomicLoad(
+      MachineType::Int32(), backing_store, a.WordShl(index_word, 2))));
 
-  a->Bind(&u32);
-  a->Return(a->ChangeUint32ToTagged(a->AtomicLoad(
-      MachineType::Uint32(), backing_store, a->WordShl(index_word, 2))));
+  a.Bind(&u32);
+  a.Return(a.ChangeUint32ToTagged(a.AtomicLoad(
+      MachineType::Uint32(), backing_store, a.WordShl(index_word, 2))));
 
   // This shouldn't happen, we've already validated the type.
-  a->Bind(&other);
-  a->Return(a->Int32Constant(0));
+  a.Bind(&other);
+  a.Return(a.SmiConstant(0));
 }
 
-void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
-  using namespace compiler;
-  Node* array = a->Parameter(1);
-  Node* index = a->Parameter(2);
-  Node* value = a->Parameter(3);
-  Node* context = a->Parameter(4 + 2);
+void Builtins::Generate_AtomicsStore(compiler::CodeAssemblerState* state) {
+  using compiler::Node;
+  CodeStubAssembler a(state);
+  Node* array = a.Parameter(1);
+  Node* index = a.Parameter(2);
+  Node* value = a.Parameter(3);
+  Node* context = a.Parameter(4 + 2);
 
   Node* instance_type;
   Node* backing_store;
-  ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+  ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store);
 
-  Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
-  Node* array_length_word32 = a->TruncateTaggedToWord32(
-      context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
-  ValidateAtomicIndex(a, index_word32, array_length_word32, context);
-  Node* index_word = a->ChangeUint32ToWord(index_word32);
+  Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context);
+  Node* array_length_word32 = a.TruncateTaggedToWord32(
+      context, a.LoadObjectField(array, JSTypedArray::kLengthOffset));
+  ValidateAtomicIndex(&a, index_word32, array_length_word32, context);
+  Node* index_word = a.ChangeUint32ToWord(index_word32);
 
-  Node* value_integer = a->ToInteger(context, value);
-  Node* value_word32 = a->TruncateTaggedToWord32(context, value_integer);
+  Node* value_integer = a.ToInteger(context, value);
+  Node* value_word32 = a.TruncateTaggedToWord32(context, value_integer);
 
-  CodeStubAssembler::Label u8(a), u16(a), u32(a), other(a);
+  CodeStubAssembler::Label u8(&a), u16(&a), u32(&a), other(&a);
   int32_t case_values[] = {
       FIXED_INT8_ARRAY_TYPE,   FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
       FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
@@ -240,27 +243,27 @@
   CodeStubAssembler::Label* case_labels[] = {
       &u8, &u8, &u16, &u16, &u32, &u32,
   };
-  a->Switch(instance_type, &other, case_values, case_labels,
-            arraysize(case_labels));
+  a.Switch(instance_type, &other, case_values, case_labels,
+           arraysize(case_labels));
 
-  a->Bind(&u8);
-  a->AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
-                 value_word32);
-  a->Return(value_integer);
+  a.Bind(&u8);
+  a.AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
+                value_word32);
+  a.Return(value_integer);
 
-  a->Bind(&u16);
-  a->AtomicStore(MachineRepresentation::kWord16, backing_store,
-                 a->WordShl(index_word, 1), value_word32);
-  a->Return(value_integer);
+  a.Bind(&u16);
+  a.AtomicStore(MachineRepresentation::kWord16, backing_store,
+                a.WordShl(index_word, 1), value_word32);
+  a.Return(value_integer);
 
-  a->Bind(&u32);
-  a->AtomicStore(MachineRepresentation::kWord32, backing_store,
-                 a->WordShl(index_word, 2), value_word32);
-  a->Return(value_integer);
+  a.Bind(&u32);
+  a.AtomicStore(MachineRepresentation::kWord32, backing_store,
+                a.WordShl(index_word, 2), value_word32);
+  a.Return(value_integer);
 
   // This shouldn't happen, we've already validated the type.
-  a->Bind(&other);
-  a->Return(a->Int32Constant(0));
+  a.Bind(&other);
+  a.Return(a.SmiConstant(0));
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-string.cc b/src/builtins/builtins-string.cc
index 4ccccbc..7cef567 100644
--- a/src/builtins/builtins-string.cc
+++ b/src/builtins/builtins-string.cc
@@ -2,11 +2,18 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
+#include "src/builtins/builtins-regexp.h"
 #include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 #include "src/regexp/regexp-utils.h"
+#include "src/string-case.h"
+#include "src/unicode-inl.h"
+#include "src/unicode.h"
 
 namespace v8 {
 namespace internal {
@@ -14,9 +21,130 @@
 typedef CodeStubAssembler::ResultMode ResultMode;
 typedef CodeStubAssembler::RelationalComparisonMode RelationalComparisonMode;
 
-namespace {
+class StringBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  explicit StringBuiltinsAssembler(compiler::CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
 
-void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
+ protected:
+  Node* DirectStringData(Node* string, Node* string_instance_type) {
+    // Compute the effective offset of the first character.
+    Variable var_data(this, MachineType::PointerRepresentation());
+    Label if_sequential(this), if_external(this), if_join(this);
+    Branch(Word32Equal(Word32And(string_instance_type,
+                                 Int32Constant(kStringRepresentationMask)),
+                       Int32Constant(kSeqStringTag)),
+           &if_sequential, &if_external);
+
+    Bind(&if_sequential);
+    {
+      var_data.Bind(IntPtrAdd(
+          IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+          BitcastTaggedToWord(string)));
+      Goto(&if_join);
+    }
+
+    Bind(&if_external);
+    {
+      // This is only valid for ExternalStrings where the resource data
+      // pointer is cached (i.e. no short external strings).
+      CSA_ASSERT(this, Word32NotEqual(
+                           Word32And(string_instance_type,
+                                     Int32Constant(kShortExternalStringMask)),
+                           Int32Constant(kShortExternalStringTag)));
+      var_data.Bind(LoadObjectField(string, ExternalString::kResourceDataOffset,
+                                    MachineType::Pointer()));
+      Goto(&if_join);
+    }
+
+    Bind(&if_join);
+    return var_data.value();
+  }
+
+  Node* LoadOneByteChar(Node* string, Node* index) {
+    return Load(MachineType::Uint8(), string, OneByteCharOffset(index));
+  }
+
+  Node* OneByteCharAddress(Node* string, Node* index) {
+    Node* offset = OneByteCharOffset(index);
+    return IntPtrAdd(string, offset);
+  }
+
+  Node* OneByteCharOffset(Node* index) {
+    return CharOffset(String::ONE_BYTE_ENCODING, index);
+  }
+
+  Node* CharOffset(String::Encoding encoding, Node* index) {
+    const int header = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+    Node* offset = index;
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset = IntPtrAdd(offset, offset);
+    }
+    offset = IntPtrAdd(offset, IntPtrConstant(header));
+    return offset;
+  }
+
+  void DispatchOnStringInstanceType(Node* const instance_type,
+                                    Label* if_onebyte_sequential,
+                                    Label* if_onebyte_external,
+                                    Label* if_otherwise) {
+    const int kMask = kStringRepresentationMask | kStringEncodingMask;
+    Node* const encoding_and_representation =
+        Word32And(instance_type, Int32Constant(kMask));
+
+    int32_t values[] = {
+        kOneByteStringTag | kSeqStringTag,
+        kOneByteStringTag | kExternalStringTag,
+    };
+    Label* labels[] = {
+        if_onebyte_sequential, if_onebyte_external,
+    };
+    STATIC_ASSERT(arraysize(values) == arraysize(labels));
+
+    Switch(encoding_and_representation, if_otherwise, values, labels,
+           arraysize(values));
+  }
+
+  void GenerateStringEqual(ResultMode mode);
+  void GenerateStringRelationalComparison(RelationalComparisonMode mode);
+
+  Node* ToSmiBetweenZeroAnd(Node* context, Node* value, Node* limit);
+
+  Node* LoadSurrogatePairAt(Node* string, Node* length, Node* index,
+                            UnicodeEncoding encoding);
+
+  void StringIndexOf(Node* receiver, Node* instance_type, Node* search_string,
+                     Node* search_string_instance_type, Node* position,
+                     std::function<void(Node*)> f_return);
+
+  Node* IsNullOrUndefined(Node* const value);
+  void RequireObjectCoercible(Node* const context, Node* const value,
+                              const char* method_name);
+
+  Node* SmiIsNegative(Node* const value) {
+    return SmiLessThan(value, SmiConstant(0));
+  }
+
+  // Implements boilerplate logic for {match, split, replace, search} of the
+  // form:
+  //
+  //  if (!IS_NULL_OR_UNDEFINED(object)) {
+  //    var maybe_function = object[symbol];
+  //    if (!IS_UNDEFINED(maybe_function)) {
+  //      return %_Call(maybe_function, ...);
+  //    }
+  //  }
+  //
+  // Contains fast paths for Smi and RegExp objects.
+  typedef std::function<Node*()> NodeFunction0;
+  typedef std::function<Node*(Node* fn)> NodeFunction1;
+  void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
+                                 Handle<Symbol> symbol,
+                                 const NodeFunction0& regexp_call,
+                                 const NodeFunction1& generic_call);
+};
+
+void StringBuiltinsAssembler::GenerateStringEqual(ResultMode mode) {
   // Here's pseudo-code for the algorithm below in case of kDontNegateResult
   // mode; for kNegateResult mode we properly negate the result.
   //
@@ -31,506 +159,442 @@
   //   }
   //   return true;
   // }
+  // if (lhs and/or rhs are indirect strings) {
+  //   unwrap them and restart from the beginning;
+  // }
   // return %StringEqual(lhs, rhs);
 
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
+  Variable var_left(this, MachineRepresentation::kTagged);
+  Variable var_right(this, MachineRepresentation::kTagged);
+  var_left.Bind(Parameter(0));
+  var_right.Bind(Parameter(1));
+  Node* context = Parameter(2);
 
-  Node* lhs = assembler->Parameter(0);
-  Node* rhs = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-
-  Label if_equal(assembler), if_notequal(assembler);
+  Variable* input_vars[2] = {&var_left, &var_right};
+  Label if_equal(this), if_notequal(this), restart(this, 2, input_vars);
+  Goto(&restart);
+  Bind(&restart);
+  Node* lhs = var_left.value();
+  Node* rhs = var_right.value();
 
   // Fast check to see if {lhs} and {rhs} refer to the same String object.
-  Label if_same(assembler), if_notsame(assembler);
-  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+  GotoIf(WordEqual(lhs, rhs), &if_equal);
 
-  assembler->Bind(&if_same);
-  assembler->Goto(&if_equal);
+  // Load the length of {lhs} and {rhs}.
+  Node* lhs_length = LoadStringLength(lhs);
+  Node* rhs_length = LoadStringLength(rhs);
 
-  assembler->Bind(&if_notsame);
+  // Strings with different lengths cannot be equal.
+  GotoIf(WordNotEqual(lhs_length, rhs_length), &if_notequal);
+
+  // Load instance types of {lhs} and {rhs}.
+  Node* lhs_instance_type = LoadInstanceType(lhs);
+  Node* rhs_instance_type = LoadInstanceType(rhs);
+
+  // Combine the instance types into a single 16-bit value, so we can check
+  // both of them at once.
+  Node* both_instance_types = Word32Or(
+      lhs_instance_type, Word32Shl(rhs_instance_type, Int32Constant(8)));
+
+  // Check if both {lhs} and {rhs} are internalized. Since we already know
+  // that they're not the same object, they're not equal in that case.
+  int const kBothInternalizedMask =
+      kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
+  int const kBothInternalizedTag = kInternalizedTag | (kInternalizedTag << 8);
+  GotoIf(Word32Equal(Word32And(both_instance_types,
+                               Int32Constant(kBothInternalizedMask)),
+                     Int32Constant(kBothInternalizedTag)),
+         &if_notequal);
+
+  // Check that both {lhs} and {rhs} are flat one-byte strings, and that
+  // in case of ExternalStrings the data pointer is cached..
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  int const kBothDirectOneByteStringMask =
+      kStringEncodingMask | kIsIndirectStringMask | kShortExternalStringMask |
+      ((kStringEncodingMask | kIsIndirectStringMask | kShortExternalStringMask)
+       << 8);
+  int const kBothDirectOneByteStringTag =
+      kOneByteStringTag | (kOneByteStringTag << 8);
+  Label if_bothdirectonebytestrings(this), if_notbothdirectonebytestrings(this);
+  Branch(Word32Equal(Word32And(both_instance_types,
+                               Int32Constant(kBothDirectOneByteStringMask)),
+                     Int32Constant(kBothDirectOneByteStringTag)),
+         &if_bothdirectonebytestrings, &if_notbothdirectonebytestrings);
+
+  Bind(&if_bothdirectonebytestrings);
   {
-    // The {lhs} and {rhs} don't refer to the exact same String object.
+    // Compute the effective offset of the first character.
+    Node* lhs_data = DirectStringData(lhs, lhs_instance_type);
+    Node* rhs_data = DirectStringData(rhs, rhs_instance_type);
 
-    // Load the length of {lhs} and {rhs}.
-    Node* lhs_length = assembler->LoadStringLength(lhs);
-    Node* rhs_length = assembler->LoadStringLength(rhs);
+    // Compute the first offset after the string from the length.
+    Node* length = SmiUntag(lhs_length);
 
-    // Check if the lengths of {lhs} and {rhs} are equal.
-    Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
-    assembler->Branch(assembler->WordEqual(lhs_length, rhs_length),
-                      &if_lengthisequal, &if_lengthisnotequal);
-
-    assembler->Bind(&if_lengthisequal);
+    // Loop over the {lhs} and {rhs} strings to see if they are equal.
+    Variable var_offset(this, MachineType::PointerRepresentation());
+    Label loop(this, &var_offset);
+    var_offset.Bind(IntPtrConstant(0));
+    Goto(&loop);
+    Bind(&loop);
     {
-      // Load instance types of {lhs} and {rhs}.
-      Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
-      Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+      // If {offset} equals {end}, no difference was found, so the
+      // strings are equal.
+      Node* offset = var_offset.value();
+      GotoIf(WordEqual(offset, length), &if_equal);
 
-      // Combine the instance types into a single 16-bit value, so we can check
-      // both of them at once.
-      Node* both_instance_types = assembler->Word32Or(
-          lhs_instance_type,
-          assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+      // Load the next characters from {lhs} and {rhs}.
+      Node* lhs_value = Load(MachineType::Uint8(), lhs_data, offset);
+      Node* rhs_value = Load(MachineType::Uint8(), rhs_data, offset);
 
-      // Check if both {lhs} and {rhs} are internalized.
-      int const kBothInternalizedMask =
-          kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
-      int const kBothInternalizedTag =
-          kInternalizedTag | (kInternalizedTag << 8);
-      Label if_bothinternalized(assembler), if_notbothinternalized(assembler);
-      assembler->Branch(assembler->Word32Equal(
-                            assembler->Word32And(both_instance_types,
-                                                 assembler->Int32Constant(
-                                                     kBothInternalizedMask)),
-                            assembler->Int32Constant(kBothInternalizedTag)),
-                        &if_bothinternalized, &if_notbothinternalized);
+      // Check if the characters match.
+      GotoIf(Word32NotEqual(lhs_value, rhs_value), &if_notequal);
 
-      assembler->Bind(&if_bothinternalized);
-      {
-        // Fast negative check for internalized-to-internalized equality.
-        assembler->Goto(&if_notequal);
-      }
-
-      assembler->Bind(&if_notbothinternalized);
-      {
-        // Check that both {lhs} and {rhs} are flat one-byte strings.
-        int const kBothSeqOneByteStringMask =
-            kStringEncodingMask | kStringRepresentationMask |
-            ((kStringEncodingMask | kStringRepresentationMask) << 8);
-        int const kBothSeqOneByteStringTag =
-            kOneByteStringTag | kSeqStringTag |
-            ((kOneByteStringTag | kSeqStringTag) << 8);
-        Label if_bothonebyteseqstrings(assembler),
-            if_notbothonebyteseqstrings(assembler);
-        assembler->Branch(
-            assembler->Word32Equal(
-                assembler->Word32And(
-                    both_instance_types,
-                    assembler->Int32Constant(kBothSeqOneByteStringMask)),
-                assembler->Int32Constant(kBothSeqOneByteStringTag)),
-            &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
-
-        assembler->Bind(&if_bothonebyteseqstrings);
-        {
-          // Compute the effective offset of the first character.
-          Node* begin = assembler->IntPtrConstant(
-              SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
-          // Compute the first offset after the string from the length.
-          Node* end =
-              assembler->IntPtrAdd(begin, assembler->SmiUntag(lhs_length));
-
-          // Loop over the {lhs} and {rhs} strings to see if they are equal.
-          Variable var_offset(assembler, MachineType::PointerRepresentation());
-          Label loop(assembler, &var_offset);
-          var_offset.Bind(begin);
-          assembler->Goto(&loop);
-          assembler->Bind(&loop);
-          {
-            // Check if {offset} equals {end}.
-            Node* offset = var_offset.value();
-            Label if_done(assembler), if_notdone(assembler);
-            assembler->Branch(assembler->WordEqual(offset, end), &if_done,
-                              &if_notdone);
-
-            assembler->Bind(&if_notdone);
-            {
-              // Load the next characters from {lhs} and {rhs}.
-              Node* lhs_value =
-                  assembler->Load(MachineType::Uint8(), lhs, offset);
-              Node* rhs_value =
-                  assembler->Load(MachineType::Uint8(), rhs, offset);
-
-              // Check if the characters match.
-              Label if_valueissame(assembler), if_valueisnotsame(assembler);
-              assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
-                                &if_valueissame, &if_valueisnotsame);
-
-              assembler->Bind(&if_valueissame);
-              {
-                // Advance to next character.
-                var_offset.Bind(
-                    assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
-              }
-              assembler->Goto(&loop);
-
-              assembler->Bind(&if_valueisnotsame);
-              assembler->Goto(&if_notequal);
-            }
-
-            assembler->Bind(&if_done);
-            assembler->Goto(&if_equal);
-          }
-        }
-
-        assembler->Bind(&if_notbothonebyteseqstrings);
-        {
-          // TODO(bmeurer): Add fast case support for flattened cons strings;
-          // also add support for two byte string equality checks.
-          Runtime::FunctionId function_id =
-              (mode == ResultMode::kDontNegateResult)
-                  ? Runtime::kStringEqual
-                  : Runtime::kStringNotEqual;
-          assembler->TailCallRuntime(function_id, context, lhs, rhs);
-        }
-      }
-    }
-
-    assembler->Bind(&if_lengthisnotequal);
-    {
-      // Mismatch in length of {lhs} and {rhs}, cannot be equal.
-      assembler->Goto(&if_notequal);
+      // Advance to next character.
+      var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
+      Goto(&loop);
     }
   }
 
-  assembler->Bind(&if_equal);
-  assembler->Return(
-      assembler->BooleanConstant(mode == ResultMode::kDontNegateResult));
+  Bind(&if_notbothdirectonebytestrings);
+  {
+    // Try to unwrap indirect strings, restart the above attempt on success.
+    MaybeDerefIndirectStrings(&var_left, lhs_instance_type, &var_right,
+                              rhs_instance_type, &restart);
+    // TODO(bmeurer): Add support for two byte string equality checks.
 
-  assembler->Bind(&if_notequal);
-  assembler->Return(
-      assembler->BooleanConstant(mode == ResultMode::kNegateResult));
+    Runtime::FunctionId function_id = (mode == ResultMode::kDontNegateResult)
+                                          ? Runtime::kStringEqual
+                                          : Runtime::kStringNotEqual;
+    TailCallRuntime(function_id, context, lhs, rhs);
+  }
+
+  Bind(&if_equal);
+  Return(BooleanConstant(mode == ResultMode::kDontNegateResult));
+
+  Bind(&if_notequal);
+  Return(BooleanConstant(mode == ResultMode::kNegateResult));
 }
 
+void StringBuiltinsAssembler::GenerateStringRelationalComparison(
+    RelationalComparisonMode mode) {
+  Variable var_left(this, MachineRepresentation::kTagged);
+  Variable var_right(this, MachineRepresentation::kTagged);
+  var_left.Bind(Parameter(0));
+  var_right.Bind(Parameter(1));
+  Node* context = Parameter(2);
 
-void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
-                                        RelationalComparisonMode mode) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
+  Variable* input_vars[2] = {&var_left, &var_right};
+  Label if_less(this), if_equal(this), if_greater(this);
+  Label restart(this, 2, input_vars);
+  Goto(&restart);
+  Bind(&restart);
 
-  Node* lhs = assembler->Parameter(0);
-  Node* rhs = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-
-  Label if_less(assembler), if_equal(assembler), if_greater(assembler);
-
+  Node* lhs = var_left.value();
+  Node* rhs = var_right.value();
   // Fast check to see if {lhs} and {rhs} refer to the same String object.
-  Label if_same(assembler), if_notsame(assembler);
-  assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+  GotoIf(WordEqual(lhs, rhs), &if_equal);
 
-  assembler->Bind(&if_same);
-  assembler->Goto(&if_equal);
+  // Load instance types of {lhs} and {rhs}.
+  Node* lhs_instance_type = LoadInstanceType(lhs);
+  Node* rhs_instance_type = LoadInstanceType(rhs);
 
-  assembler->Bind(&if_notsame);
+  // Combine the instance types into a single 16-bit value, so we can check
+  // both of them at once.
+  Node* both_instance_types = Word32Or(
+      lhs_instance_type, Word32Shl(rhs_instance_type, Int32Constant(8)));
+
+  // Check that both {lhs} and {rhs} are flat one-byte strings.
+  int const kBothSeqOneByteStringMask =
+      kStringEncodingMask | kStringRepresentationMask |
+      ((kStringEncodingMask | kStringRepresentationMask) << 8);
+  int const kBothSeqOneByteStringTag =
+      kOneByteStringTag | kSeqStringTag |
+      ((kOneByteStringTag | kSeqStringTag) << 8);
+  Label if_bothonebyteseqstrings(this), if_notbothonebyteseqstrings(this);
+  Branch(Word32Equal(Word32And(both_instance_types,
+                               Int32Constant(kBothSeqOneByteStringMask)),
+                     Int32Constant(kBothSeqOneByteStringTag)),
+         &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+  Bind(&if_bothonebyteseqstrings);
   {
-    // Load instance types of {lhs} and {rhs}.
-    Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
-    Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+    // Load the length of {lhs} and {rhs}.
+    Node* lhs_length = LoadStringLength(lhs);
+    Node* rhs_length = LoadStringLength(rhs);
 
-    // Combine the instance types into a single 16-bit value, so we can check
-    // both of them at once.
-    Node* both_instance_types = assembler->Word32Or(
-        lhs_instance_type,
-        assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
+    // Determine the minimum length.
+    Node* length = SmiMin(lhs_length, rhs_length);
 
-    // Check that both {lhs} and {rhs} are flat one-byte strings.
-    int const kBothSeqOneByteStringMask =
-        kStringEncodingMask | kStringRepresentationMask |
-        ((kStringEncodingMask | kStringRepresentationMask) << 8);
-    int const kBothSeqOneByteStringTag =
-        kOneByteStringTag | kSeqStringTag |
-        ((kOneByteStringTag | kSeqStringTag) << 8);
-    Label if_bothonebyteseqstrings(assembler),
-        if_notbothonebyteseqstrings(assembler);
-    assembler->Branch(assembler->Word32Equal(
-                          assembler->Word32And(both_instance_types,
-                                               assembler->Int32Constant(
-                                                   kBothSeqOneByteStringMask)),
-                          assembler->Int32Constant(kBothSeqOneByteStringTag)),
-                      &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+    // Compute the effective offset of the first character.
+    Node* begin =
+        IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag);
 
-    assembler->Bind(&if_bothonebyteseqstrings);
+    // Compute the first offset after the string from the length.
+    Node* end = IntPtrAdd(begin, SmiUntag(length));
+
+    // Loop over the {lhs} and {rhs} strings to see if they are equal.
+    Variable var_offset(this, MachineType::PointerRepresentation());
+    Label loop(this, &var_offset);
+    var_offset.Bind(begin);
+    Goto(&loop);
+    Bind(&loop);
     {
-      // Load the length of {lhs} and {rhs}.
-      Node* lhs_length = assembler->LoadStringLength(lhs);
-      Node* rhs_length = assembler->LoadStringLength(rhs);
+      // Check if {offset} equals {end}.
+      Node* offset = var_offset.value();
+      Label if_done(this), if_notdone(this);
+      Branch(WordEqual(offset, end), &if_done, &if_notdone);
 
-      // Determine the minimum length.
-      Node* length = assembler->SmiMin(lhs_length, rhs_length);
-
-      // Compute the effective offset of the first character.
-      Node* begin = assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
-                                              kHeapObjectTag);
-
-      // Compute the first offset after the string from the length.
-      Node* end = assembler->IntPtrAdd(begin, assembler->SmiUntag(length));
-
-      // Loop over the {lhs} and {rhs} strings to see if they are equal.
-      Variable var_offset(assembler, MachineType::PointerRepresentation());
-      Label loop(assembler, &var_offset);
-      var_offset.Bind(begin);
-      assembler->Goto(&loop);
-      assembler->Bind(&loop);
+      Bind(&if_notdone);
       {
-        // Check if {offset} equals {end}.
-        Node* offset = var_offset.value();
-        Label if_done(assembler), if_notdone(assembler);
-        assembler->Branch(assembler->WordEqual(offset, end), &if_done,
-                          &if_notdone);
+        // Load the next characters from {lhs} and {rhs}.
+        Node* lhs_value = Load(MachineType::Uint8(), lhs, offset);
+        Node* rhs_value = Load(MachineType::Uint8(), rhs, offset);
 
-        assembler->Bind(&if_notdone);
+        // Check if the characters match.
+        Label if_valueissame(this), if_valueisnotsame(this);
+        Branch(Word32Equal(lhs_value, rhs_value), &if_valueissame,
+               &if_valueisnotsame);
+
+        Bind(&if_valueissame);
         {
-          // Load the next characters from {lhs} and {rhs}.
-          Node* lhs_value = assembler->Load(MachineType::Uint8(), lhs, offset);
-          Node* rhs_value = assembler->Load(MachineType::Uint8(), rhs, offset);
-
-          // Check if the characters match.
-          Label if_valueissame(assembler), if_valueisnotsame(assembler);
-          assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
-                            &if_valueissame, &if_valueisnotsame);
-
-          assembler->Bind(&if_valueissame);
-          {
-            // Advance to next character.
-            var_offset.Bind(
-                assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
-          }
-          assembler->Goto(&loop);
-
-          assembler->Bind(&if_valueisnotsame);
-          assembler->Branch(assembler->Uint32LessThan(lhs_value, rhs_value),
-                            &if_less, &if_greater);
+          // Advance to next character.
+          var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
         }
+        Goto(&loop);
 
-        assembler->Bind(&if_done);
-        {
-          // All characters up to the min length are equal, decide based on
-          // string length.
-          Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
-          assembler->Branch(assembler->SmiEqual(lhs_length, rhs_length),
-                            &if_lengthisequal, &if_lengthisnotequal);
+        Bind(&if_valueisnotsame);
+        Branch(Uint32LessThan(lhs_value, rhs_value), &if_less, &if_greater);
+      }
 
-          assembler->Bind(&if_lengthisequal);
-          assembler->Goto(&if_equal);
-
-          assembler->Bind(&if_lengthisnotequal);
-          assembler->BranchIfSmiLessThan(lhs_length, rhs_length, &if_less,
-                                         &if_greater);
-        }
+      Bind(&if_done);
+      {
+        // All characters up to the min length are equal, decide based on
+        // string length.
+        GotoIf(SmiEqual(lhs_length, rhs_length), &if_equal);
+        BranchIfSmiLessThan(lhs_length, rhs_length, &if_less, &if_greater);
       }
     }
+    }
 
-    assembler->Bind(&if_notbothonebyteseqstrings);
+    Bind(&if_notbothonebyteseqstrings);
     {
-      // TODO(bmeurer): Add fast case support for flattened cons strings;
-      // also add support for two byte string relational comparisons.
+      // Try to unwrap indirect strings, restart the above attempt on success.
+      MaybeDerefIndirectStrings(&var_left, lhs_instance_type, &var_right,
+                                rhs_instance_type, &restart);
+      // TODO(bmeurer): Add support for two byte string relational comparisons.
       switch (mode) {
         case RelationalComparisonMode::kLessThan:
-          assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
-                                     rhs);
+          TailCallRuntime(Runtime::kStringLessThan, context, lhs, rhs);
           break;
         case RelationalComparisonMode::kLessThanOrEqual:
-          assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
-                                     lhs, rhs);
+          TailCallRuntime(Runtime::kStringLessThanOrEqual, context, lhs, rhs);
           break;
         case RelationalComparisonMode::kGreaterThan:
-          assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
-                                     rhs);
+          TailCallRuntime(Runtime::kStringGreaterThan, context, lhs, rhs);
           break;
         case RelationalComparisonMode::kGreaterThanOrEqual:
-          assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
-                                     context, lhs, rhs);
+          TailCallRuntime(Runtime::kStringGreaterThanOrEqual, context, lhs,
+                          rhs);
           break;
       }
     }
+
+    Bind(&if_less);
+    switch (mode) {
+      case RelationalComparisonMode::kLessThan:
+      case RelationalComparisonMode::kLessThanOrEqual:
+        Return(BooleanConstant(true));
+        break;
+
+      case RelationalComparisonMode::kGreaterThan:
+      case RelationalComparisonMode::kGreaterThanOrEqual:
+        Return(BooleanConstant(false));
+        break;
   }
 
-  assembler->Bind(&if_less);
-  switch (mode) {
-    case RelationalComparisonMode::kLessThan:
-    case RelationalComparisonMode::kLessThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(true));
-      break;
-
-    case RelationalComparisonMode::kGreaterThan:
-    case RelationalComparisonMode::kGreaterThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(false));
-      break;
-  }
-
-  assembler->Bind(&if_equal);
+  Bind(&if_equal);
   switch (mode) {
     case RelationalComparisonMode::kLessThan:
     case RelationalComparisonMode::kGreaterThan:
-      assembler->Return(assembler->BooleanConstant(false));
+      Return(BooleanConstant(false));
       break;
 
     case RelationalComparisonMode::kLessThanOrEqual:
     case RelationalComparisonMode::kGreaterThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(true));
+      Return(BooleanConstant(true));
       break;
   }
 
-  assembler->Bind(&if_greater);
+  Bind(&if_greater);
   switch (mode) {
     case RelationalComparisonMode::kLessThan:
     case RelationalComparisonMode::kLessThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(false));
+      Return(BooleanConstant(false));
       break;
 
     case RelationalComparisonMode::kGreaterThan:
     case RelationalComparisonMode::kGreaterThanOrEqual:
-      assembler->Return(assembler->BooleanConstant(true));
+      Return(BooleanConstant(true));
       break;
   }
 }
 
-}  // namespace
-
-// static
-void Builtins::Generate_StringEqual(CodeStubAssembler* assembler) {
-  GenerateStringEqual(assembler, ResultMode::kDontNegateResult);
+TF_BUILTIN(StringEqual, StringBuiltinsAssembler) {
+  GenerateStringEqual(ResultMode::kDontNegateResult);
 }
 
-// static
-void Builtins::Generate_StringNotEqual(CodeStubAssembler* assembler) {
-  GenerateStringEqual(assembler, ResultMode::kNegateResult);
+TF_BUILTIN(StringNotEqual, StringBuiltinsAssembler) {
+  GenerateStringEqual(ResultMode::kNegateResult);
 }
 
-// static
-void Builtins::Generate_StringLessThan(CodeStubAssembler* assembler) {
-  GenerateStringRelationalComparison(assembler,
-                                     RelationalComparisonMode::kLessThan);
+TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) {
+  GenerateStringRelationalComparison(RelationalComparisonMode::kLessThan);
 }
 
-// static
-void Builtins::Generate_StringLessThanOrEqual(CodeStubAssembler* assembler) {
+TF_BUILTIN(StringLessThanOrEqual, StringBuiltinsAssembler) {
   GenerateStringRelationalComparison(
-      assembler, RelationalComparisonMode::kLessThanOrEqual);
+      RelationalComparisonMode::kLessThanOrEqual);
 }
 
-// static
-void Builtins::Generate_StringGreaterThan(CodeStubAssembler* assembler) {
-  GenerateStringRelationalComparison(assembler,
-                                     RelationalComparisonMode::kGreaterThan);
+TF_BUILTIN(StringGreaterThan, StringBuiltinsAssembler) {
+  GenerateStringRelationalComparison(RelationalComparisonMode::kGreaterThan);
 }
 
-// static
-void Builtins::Generate_StringGreaterThanOrEqual(CodeStubAssembler* assembler) {
+TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
   GenerateStringRelationalComparison(
-      assembler, RelationalComparisonMode::kGreaterThanOrEqual);
+      RelationalComparisonMode::kGreaterThanOrEqual);
+}
+
+TF_BUILTIN(StringCharAt, CodeStubAssembler) {
+  Node* receiver = Parameter(0);
+  Node* position = Parameter(1);
+
+  // Load the character code at the {position} from the {receiver}.
+  Node* code = StringCharCodeAt(receiver, position, INTPTR_PARAMETERS);
+
+  // And return the single character string with only that {code}
+  Node* result = StringFromCharCode(code);
+  Return(result);
+}
+
+TF_BUILTIN(StringCharCodeAt, CodeStubAssembler) {
+  Node* receiver = Parameter(0);
+  Node* position = Parameter(1);
+
+  // Load the character code at the {position} from the {receiver}.
+  Node* code = StringCharCodeAt(receiver, position, INTPTR_PARAMETERS);
+
+  // And return it as TaggedSigned value.
+  // TODO(turbofan): Allow builtins to return values untagged.
+  Node* result = SmiFromWord32(code);
+  Return(result);
 }
 
 // -----------------------------------------------------------------------------
 // ES6 section 21.1 String Objects
 
 // ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
-void Builtins::Generate_StringFromCharCode(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
+  Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+  Node* context = Parameter(BuiltinDescriptor::kContext);
 
-  Node* argc = assembler->ChangeInt32ToIntPtr(
-      assembler->Parameter(BuiltinDescriptor::kArgumentsCount));
-  Node* context = assembler->Parameter(BuiltinDescriptor::kContext);
-
-  CodeStubArguments arguments(assembler, argc);
+  CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
+  // From now on use word-size argc value.
+  argc = arguments.GetLength();
 
   // Check if we have exactly one argument (plus the implicit receiver), i.e.
   // if the parent frame is not an arguments adaptor frame.
-  Label if_oneargument(assembler), if_notoneargument(assembler);
-  assembler->Branch(assembler->WordEqual(argc, assembler->IntPtrConstant(1)),
-                    &if_oneargument, &if_notoneargument);
+  Label if_oneargument(this), if_notoneargument(this);
+  Branch(WordEqual(argc, IntPtrConstant(1)), &if_oneargument,
+         &if_notoneargument);
 
-  assembler->Bind(&if_oneargument);
+  Bind(&if_oneargument);
   {
     // Single argument case, perform fast single character string cache lookup
     // for one-byte code units, or fall back to creating a single character
     // string on the fly otherwise.
     Node* code = arguments.AtIndex(0);
-    Node* code32 = assembler->TruncateTaggedToWord32(context, code);
-    Node* code16 = assembler->Word32And(
-        code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
-    Node* result = assembler->StringFromCharCode(code16);
+    Node* code32 = TruncateTaggedToWord32(context, code);
+    Node* code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
+    Node* result = StringFromCharCode(code16);
     arguments.PopAndReturn(result);
   }
 
   Node* code16 = nullptr;
-  assembler->Bind(&if_notoneargument);
+  Bind(&if_notoneargument);
   {
-    Label two_byte(assembler);
+    Label two_byte(this);
     // Assume that the resulting string contains only one-byte characters.
-    Node* one_byte_result = assembler->AllocateSeqOneByteString(context, argc);
+    Node* one_byte_result = AllocateSeqOneByteString(context, argc);
 
-    Variable max_index(assembler, MachineType::PointerRepresentation());
-    max_index.Bind(assembler->IntPtrConstant(0));
+    Variable max_index(this, MachineType::PointerRepresentation());
+    max_index.Bind(IntPtrConstant(0));
 
     // Iterate over the incoming arguments, converting them to 8-bit character
     // codes. Stop if any of the conversions generates a code that doesn't fit
     // in 8 bits.
-    CodeStubAssembler::VariableList vars({&max_index}, assembler->zone());
-    arguments.ForEach(vars, [context, &two_byte, &max_index, &code16,
-                             one_byte_result](CodeStubAssembler* assembler,
-                                              Node* arg) {
-      Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
-      code16 = assembler->Word32And(
-          code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+    CodeStubAssembler::VariableList vars({&max_index}, zone());
+    arguments.ForEach(vars, [this, context, &two_byte, &max_index, &code16,
+                             one_byte_result](Node* arg) {
+      Node* code32 = TruncateTaggedToWord32(context, arg);
+      code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
 
-      assembler->GotoIf(
-          assembler->Int32GreaterThan(
-              code16, assembler->Int32Constant(String::kMaxOneByteCharCode)),
+      GotoIf(
+          Int32GreaterThan(code16, Int32Constant(String::kMaxOneByteCharCode)),
           &two_byte);
 
       // The {code16} fits into the SeqOneByteString {one_byte_result}.
-      Node* offset = assembler->ElementOffsetFromIndex(
+      Node* offset = ElementOffsetFromIndex(
           max_index.value(), UINT8_ELEMENTS,
           CodeStubAssembler::INTPTR_PARAMETERS,
           SeqOneByteString::kHeaderSize - kHeapObjectTag);
-      assembler->StoreNoWriteBarrier(MachineRepresentation::kWord8,
-                                     one_byte_result, offset, code16);
-      max_index.Bind(assembler->IntPtrAdd(max_index.value(),
-                                          assembler->IntPtrConstant(1)));
+      StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result,
+                          offset, code16);
+      max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
     });
     arguments.PopAndReturn(one_byte_result);
 
-    assembler->Bind(&two_byte);
+    Bind(&two_byte);
 
     // At least one of the characters in the string requires a 16-bit
     // representation.  Allocate a SeqTwoByteString to hold the resulting
     // string.
-    Node* two_byte_result = assembler->AllocateSeqTwoByteString(context, argc);
+    Node* two_byte_result = AllocateSeqTwoByteString(context, argc);
 
     // Copy the characters that have already been put in the 8-bit string into
     // their corresponding positions in the new 16-bit string.
-    Node* zero = assembler->IntPtrConstant(0);
-    assembler->CopyStringCharacters(
-        one_byte_result, two_byte_result, zero, zero, max_index.value(),
-        String::ONE_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
-        CodeStubAssembler::INTPTR_PARAMETERS);
+    Node* zero = IntPtrConstant(0);
+    CopyStringCharacters(one_byte_result, two_byte_result, zero, zero,
+                         max_index.value(), String::ONE_BYTE_ENCODING,
+                         String::TWO_BYTE_ENCODING,
+                         CodeStubAssembler::INTPTR_PARAMETERS);
 
     // Write the character that caused the 8-bit to 16-bit fault.
-    Node* max_index_offset = assembler->ElementOffsetFromIndex(
-        max_index.value(), UINT16_ELEMENTS,
-        CodeStubAssembler::INTPTR_PARAMETERS,
-        SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-    assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
-                                   two_byte_result, max_index_offset, code16);
-    max_index.Bind(
-        assembler->IntPtrAdd(max_index.value(), assembler->IntPtrConstant(1)));
+    Node* max_index_offset =
+        ElementOffsetFromIndex(max_index.value(), UINT16_ELEMENTS,
+                               CodeStubAssembler::INTPTR_PARAMETERS,
+                               SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+    StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
+                        max_index_offset, code16);
+    max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
 
     // Resume copying the passed-in arguments from the same place where the
     // 8-bit copy stopped, but this time copying over all of the characters
     // using a 16-bit representation.
     arguments.ForEach(
         vars,
-        [context, two_byte_result, &max_index](CodeStubAssembler* assembler,
-                                               Node* arg) {
-          Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
-          Node* code16 = assembler->Word32And(
-              code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+        [this, context, two_byte_result, &max_index](Node* arg) {
+          Node* code32 = TruncateTaggedToWord32(context, arg);
+          Node* code16 =
+              Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
 
-          Node* offset = assembler->ElementOffsetFromIndex(
+          Node* offset = ElementOffsetFromIndex(
               max_index.value(), UINT16_ELEMENTS,
               CodeStubAssembler::INTPTR_PARAMETERS,
               SeqTwoByteString::kHeaderSize - kHeapObjectTag);
-          assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
-                                         two_byte_result, offset, code16);
-          max_index.Bind(assembler->IntPtrAdd(max_index.value(),
-                                              assembler->IntPtrConstant(1)));
+          StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
+                              offset, code16);
+          max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
         },
         max_index.value());
 
@@ -558,7 +622,7 @@
 }
 
 uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) {
-  Handle<Object> value = args.at<Object>(1 + index);
+  Handle<Object> value = args.at(1 + index);
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::ToNumber(value), -1);
   if (!IsValidCodePoint(isolate, value)) {
     isolate->Throw(*isolate->factory()->NewRangeError(
@@ -632,91 +696,79 @@
 }
 
 // ES6 section 21.1.3.1 String.prototype.charAt ( pos )
-void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(0);
-  Node* position = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
+TF_BUILTIN(StringPrototypeCharAt, CodeStubAssembler) {
+  Node* receiver = Parameter(0);
+  Node* position = Parameter(1);
+  Node* context = Parameter(4);
 
   // Check that {receiver} is coercible to Object and convert it to a String.
-  receiver =
-      assembler->ToThisString(context, receiver, "String.prototype.charAt");
+  receiver = ToThisString(context, receiver, "String.prototype.charAt");
 
   // Convert the {position} to a Smi and check that it's in bounds of the
   // {receiver}.
   {
-    Label return_emptystring(assembler, Label::kDeferred);
-    position = assembler->ToInteger(context, position,
-                                    CodeStubAssembler::kTruncateMinusZero);
-    assembler->GotoUnless(assembler->TaggedIsSmi(position),
-                          &return_emptystring);
+    Label return_emptystring(this, Label::kDeferred);
+    position =
+        ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
+    GotoIfNot(TaggedIsSmi(position), &return_emptystring);
 
     // Determine the actual length of the {receiver} String.
-    Node* receiver_length =
-        assembler->LoadObjectField(receiver, String::kLengthOffset);
+    Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
 
     // Return "" if the Smi {position} is outside the bounds of the {receiver}.
-    Label if_positioninbounds(assembler);
-    assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
-                      &return_emptystring, &if_positioninbounds);
+    Label if_positioninbounds(this);
+    Branch(SmiAboveOrEqual(position, receiver_length), &return_emptystring,
+           &if_positioninbounds);
 
-    assembler->Bind(&return_emptystring);
-    assembler->Return(assembler->EmptyStringConstant());
+    Bind(&return_emptystring);
+    Return(EmptyStringConstant());
 
-    assembler->Bind(&if_positioninbounds);
+    Bind(&if_positioninbounds);
   }
 
   // Load the character code at the {position} from the {receiver}.
-  Node* code = assembler->StringCharCodeAt(receiver, position);
+  Node* code = StringCharCodeAt(receiver, position);
 
   // And return the single character string with only that {code}.
-  Node* result = assembler->StringFromCharCode(code);
-  assembler->Return(result);
+  Node* result = StringFromCharCode(code);
+  Return(result);
 }
 
 // ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
-void Builtins::Generate_StringPrototypeCharCodeAt(
-    CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(0);
-  Node* position = assembler->Parameter(1);
-  Node* context = assembler->Parameter(4);
+TF_BUILTIN(StringPrototypeCharCodeAt, CodeStubAssembler) {
+  Node* receiver = Parameter(0);
+  Node* position = Parameter(1);
+  Node* context = Parameter(4);
 
   // Check that {receiver} is coercible to Object and convert it to a String.
-  receiver =
-      assembler->ToThisString(context, receiver, "String.prototype.charCodeAt");
+  receiver = ToThisString(context, receiver, "String.prototype.charCodeAt");
 
   // Convert the {position} to a Smi and check that it's in bounds of the
   // {receiver}.
   {
-    Label return_nan(assembler, Label::kDeferred);
-    position = assembler->ToInteger(context, position,
-                                    CodeStubAssembler::kTruncateMinusZero);
-    assembler->GotoUnless(assembler->TaggedIsSmi(position), &return_nan);
+    Label return_nan(this, Label::kDeferred);
+    position =
+        ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
+    GotoIfNot(TaggedIsSmi(position), &return_nan);
 
     // Determine the actual length of the {receiver} String.
-    Node* receiver_length =
-        assembler->LoadObjectField(receiver, String::kLengthOffset);
+    Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
 
     // Return NaN if the Smi {position} is outside the bounds of the {receiver}.
-    Label if_positioninbounds(assembler);
-    assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
-                      &return_nan, &if_positioninbounds);
+    Label if_positioninbounds(this);
+    Branch(SmiAboveOrEqual(position, receiver_length), &return_nan,
+           &if_positioninbounds);
 
-    assembler->Bind(&return_nan);
-    assembler->Return(assembler->NaNConstant());
+    Bind(&return_nan);
+    Return(NaNConstant());
 
-    assembler->Bind(&if_positioninbounds);
+    Bind(&if_positioninbounds);
   }
 
   // Load the character at the {position} from the {receiver}.
-  Node* value = assembler->StringCharCodeAt(receiver, position);
-  Node* result = assembler->SmiFromWord32(value);
-  assembler->Return(result);
+  Node* value = StringCharCodeAt(receiver, position);
+  Node* result = SmiFromWord32(value);
+  Return(result);
 }
 
 // ES6 section 21.1.3.6
@@ -750,16 +802,30 @@
   } else {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
                                        Object::ToInteger(isolate, position));
-    double index = std::max(position->Number(), 0.0);
-    index = std::min(index, static_cast<double>(str->length()));
-    end = static_cast<uint32_t>(index);
+    end = str->ToValidIndex(*position);
   }
 
   int start = end - search_string->length();
   if (start < 0) return isolate->heap()->false_value();
 
-  FlatStringReader str_reader(isolate, String::Flatten(str));
-  FlatStringReader search_reader(isolate, String::Flatten(search_string));
+  str = String::Flatten(str);
+  search_string = String::Flatten(search_string);
+
+  DisallowHeapAllocation no_gc;  // ensure vectors stay valid
+  String::FlatContent str_content = str->GetFlatContent();
+  String::FlatContent search_content = search_string->GetFlatContent();
+
+  if (str_content.IsOneByte() && search_content.IsOneByte()) {
+    Vector<const uint8_t> str_vector = str_content.ToOneByteVector();
+    Vector<const uint8_t> search_vector = search_content.ToOneByteVector();
+
+    return isolate->heap()->ToBoolean(memcmp(str_vector.start() + start,
+                                             search_vector.start(),
+                                             search_string->length()) == 0);
+  }
+
+  FlatStringReader str_reader(isolate, str);
+  FlatStringReader search_reader(isolate, search_string);
 
   for (int i = 0; i < search_string->length(); i++) {
     if (str_reader.Get(start + i) != search_reader.Get(i)) {
@@ -796,21 +862,218 @@
       isolate, position,
       Object::ToInteger(isolate, args.atOrUndefined(isolate, 2)));
 
-  double index = std::max(position->Number(), 0.0);
-  index = std::min(index, static_cast<double>(str->length()));
-
-  int index_in_str = String::IndexOf(isolate, str, search_string,
-                                     static_cast<uint32_t>(index));
+  uint32_t index = str->ToValidIndex(*position);
+  int index_in_str = String::IndexOf(isolate, str, search_string, index);
   return *isolate->factory()->ToBoolean(index_in_str != -1);
 }
 
-// ES6 section 21.1.3.8 String.prototype.indexOf ( searchString [ , position ] )
-BUILTIN(StringPrototypeIndexOf) {
-  HandleScope handle_scope(isolate);
+void StringBuiltinsAssembler::StringIndexOf(
+    Node* receiver, Node* instance_type, Node* search_string,
+    Node* search_string_instance_type, Node* position,
+    std::function<void(Node*)> f_return) {
+  CSA_ASSERT(this, IsString(receiver));
+  CSA_ASSERT(this, IsString(search_string));
+  CSA_ASSERT(this, TaggedIsSmi(position));
 
-  return String::IndexOf(isolate, args.receiver(),
-                         args.atOrUndefined(isolate, 1),
-                         args.atOrUndefined(isolate, 2));
+  Label zero_length_needle(this),
+      call_runtime_unchecked(this, Label::kDeferred), return_minus_1(this),
+      check_search_string(this), continue_fast_path(this);
+
+  Node* const int_zero = IntPtrConstant(0);
+  Variable var_needle_byte(this, MachineType::PointerRepresentation(),
+                           int_zero);
+  Variable var_string_addr(this, MachineType::PointerRepresentation(),
+                           int_zero);
+
+  Node* needle_length = SmiUntag(LoadStringLength(search_string));
+  // Use faster/complex runtime fallback for long search strings.
+  GotoIf(IntPtrLessThan(IntPtrConstant(1), needle_length),
+         &call_runtime_unchecked);
+  Node* string_length = SmiUntag(LoadStringLength(receiver));
+  Node* start_position = IntPtrMax(SmiUntag(position), int_zero);
+
+  GotoIf(IntPtrEqual(int_zero, needle_length), &zero_length_needle);
+  // Check that the needle fits in the start position.
+  GotoIfNot(IntPtrLessThanOrEqual(needle_length,
+                                  IntPtrSub(string_length, start_position)),
+            &return_minus_1);
+
+  // Load the string address.
+  {
+    Label if_onebyte_sequential(this);
+    Label if_onebyte_external(this, Label::kDeferred);
+
+    // Only support one-byte strings on the fast path.
+    DispatchOnStringInstanceType(instance_type, &if_onebyte_sequential,
+                                 &if_onebyte_external, &call_runtime_unchecked);
+
+    Bind(&if_onebyte_sequential);
+    {
+      var_string_addr.Bind(
+          OneByteCharAddress(BitcastTaggedToWord(receiver), start_position));
+      Goto(&check_search_string);
+    }
+
+    Bind(&if_onebyte_external);
+    {
+      Node* const unpacked = TryDerefExternalString(receiver, instance_type,
+                                                    &call_runtime_unchecked);
+      var_string_addr.Bind(OneByteCharAddress(unpacked, start_position));
+      Goto(&check_search_string);
+    }
+  }
+
+  // Load the needle character.
+  Bind(&check_search_string);
+  {
+    Label if_onebyte_sequential(this);
+    Label if_onebyte_external(this, Label::kDeferred);
+
+    DispatchOnStringInstanceType(search_string_instance_type,
+                                 &if_onebyte_sequential, &if_onebyte_external,
+                                 &call_runtime_unchecked);
+
+    Bind(&if_onebyte_sequential);
+    {
+      var_needle_byte.Bind(
+          ChangeInt32ToIntPtr(LoadOneByteChar(search_string, int_zero)));
+      Goto(&continue_fast_path);
+    }
+
+    Bind(&if_onebyte_external);
+    {
+      Node* const unpacked = TryDerefExternalString(
+          search_string, search_string_instance_type, &call_runtime_unchecked);
+      var_needle_byte.Bind(
+          ChangeInt32ToIntPtr(LoadOneByteChar(unpacked, int_zero)));
+      Goto(&continue_fast_path);
+    }
+  }
+
+  Bind(&continue_fast_path);
+  {
+    Node* needle_byte = var_needle_byte.value();
+    Node* string_addr = var_string_addr.value();
+    Node* search_length = IntPtrSub(string_length, start_position);
+    // Call out to the highly optimized memchr to perform the actual byte
+    // search.
+    Node* memchr =
+        ExternalConstant(ExternalReference::libc_memchr_function(isolate()));
+    Node* result_address =
+        CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
+                       MachineType::IntPtr(), MachineType::UintPtr(), memchr,
+                       string_addr, needle_byte, search_length);
+    GotoIf(WordEqual(result_address, int_zero), &return_minus_1);
+    Node* result_index =
+        IntPtrAdd(IntPtrSub(result_address, string_addr), start_position);
+    f_return(SmiTag(result_index));
+  }
+
+  Bind(&return_minus_1);
+  f_return(SmiConstant(-1));
+
+  Bind(&zero_length_needle);
+  {
+    Comment("0-length search_string");
+    f_return(SmiTag(IntPtrMin(string_length, start_position)));
+  }
+
+  Bind(&call_runtime_unchecked);
+  {
+    // Simplified version of the runtime call where the types of the arguments
+    // are already known due to type checks in this stub.
+    Comment("Call Runtime Unchecked");
+    Node* result = CallRuntime(Runtime::kStringIndexOfUnchecked, SmiConstant(0),
+                               receiver, search_string, position);
+    f_return(result);
+  }
+}
+
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
+// Unchecked helper for builtins lowering.
+TF_BUILTIN(StringIndexOf, StringBuiltinsAssembler) {
+  Node* receiver = Parameter(0);
+  Node* search_string = Parameter(1);
+  Node* position = Parameter(2);
+
+  Node* instance_type = LoadInstanceType(receiver);
+  Node* search_string_instance_type = LoadInstanceType(search_string);
+
+  StringIndexOf(receiver, instance_type, search_string,
+                search_string_instance_type, position,
+                [this](Node* result) { this->Return(result); });
+}
+
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
+TF_BUILTIN(StringPrototypeIndexOf, StringBuiltinsAssembler) {
+  Variable search_string(this, MachineRepresentation::kTagged),
+      position(this, MachineRepresentation::kTagged);
+  Label call_runtime(this), call_runtime_unchecked(this), argc_0(this),
+      no_argc_0(this), argc_1(this), no_argc_1(this), argc_2(this),
+      fast_path(this), return_minus_1(this);
+
+  Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+  Node* context = Parameter(BuiltinDescriptor::kContext);
+
+  CodeStubArguments arguments(this, ChangeInt32ToIntPtr(argc));
+  Node* receiver = arguments.GetReceiver();
+  // From now on use word-size argc value.
+  argc = arguments.GetLength();
+
+  GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &argc_0);
+  GotoIf(IntPtrEqual(argc, IntPtrConstant(1)), &argc_1);
+  Goto(&argc_2);
+  Bind(&argc_0);
+  {
+    Comment("0 Argument case");
+    Node* undefined = UndefinedConstant();
+    search_string.Bind(undefined);
+    position.Bind(undefined);
+    Goto(&call_runtime);
+  }
+  Bind(&argc_1);
+  {
+    Comment("1 Argument case");
+    search_string.Bind(arguments.AtIndex(0));
+    position.Bind(SmiConstant(0));
+    Goto(&fast_path);
+  }
+  Bind(&argc_2);
+  {
+    Comment("2 Argument case");
+    search_string.Bind(arguments.AtIndex(0));
+    position.Bind(arguments.AtIndex(1));
+    GotoIfNot(TaggedIsSmi(position.value()), &call_runtime);
+    Goto(&fast_path);
+  }
+
+  Bind(&fast_path);
+  {
+    Comment("Fast Path");
+    GotoIf(TaggedIsSmi(receiver), &call_runtime);
+    Node* needle = search_string.value();
+    GotoIf(TaggedIsSmi(needle), &call_runtime);
+
+    Node* instance_type = LoadInstanceType(receiver);
+    GotoIfNot(IsStringInstanceType(instance_type), &call_runtime);
+
+    Node* needle_instance_type = LoadInstanceType(needle);
+    GotoIfNot(IsStringInstanceType(needle_instance_type), &call_runtime);
+
+    StringIndexOf(
+        receiver, instance_type, needle, needle_instance_type, position.value(),
+        [&arguments](Node* result) { arguments.PopAndReturn(result); });
+  }
+
+  Bind(&call_runtime);
+  {
+    Comment("Call Runtime");
+    Node* result = CallRuntime(Runtime::kStringIndexOf, context, receiver,
+                               search_string.value(), position.value());
+    arguments.PopAndReturn(result);
+  }
 }
 
 // ES6 section 21.1.3.9
@@ -834,8 +1097,8 @@
 
   TO_THIS_STRING(str1, "String.prototype.localeCompare");
   Handle<String> str2;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, str2, Object::ToString(isolate, args.at<Object>(1)));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str2,
+                                     Object::ToString(isolate, args.at(1)));
 
   if (str1.is_identical_to(str2)) return Smi::kZero;  // Equal.
   int str1_length = str1->length();
@@ -907,237 +1170,573 @@
   return *string;
 }
 
+compiler::Node* StringBuiltinsAssembler::IsNullOrUndefined(Node* const value) {
+  return Word32Or(IsUndefined(value), IsNull(value));
+}
+
+void StringBuiltinsAssembler::RequireObjectCoercible(Node* const context,
+                                                     Node* const value,
+                                                     const char* method_name) {
+  Label out(this), throw_exception(this, Label::kDeferred);
+  Branch(IsNullOrUndefined(value), &throw_exception, &out);
+
+  Bind(&throw_exception);
+  TailCallRuntime(
+      Runtime::kThrowCalledOnNullOrUndefined, context,
+      HeapConstant(factory()->NewStringFromAsciiChecked(method_name, TENURED)));
+
+  Bind(&out);
+}
+
+void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
+    Node* const context, Node* const object, Handle<Symbol> symbol,
+    const NodeFunction0& regexp_call, const NodeFunction1& generic_call) {
+  Label out(this);
+
+  // Smis definitely don't have an attached symbol.
+  GotoIf(TaggedIsSmi(object), &out);
+
+  Node* const object_map = LoadMap(object);
+
+  // Skip the slow lookup for Strings.
+  {
+    Label next(this);
+
+    GotoIfNot(IsStringInstanceType(LoadMapInstanceType(object_map)), &next);
+
+    Node* const native_context = LoadNativeContext(context);
+    Node* const initial_proto_initial_map = LoadContextElement(
+        native_context, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX);
+
+    Node* const string_fun =
+        LoadContextElement(native_context, Context::STRING_FUNCTION_INDEX);
+    Node* const initial_map =
+        LoadObjectField(string_fun, JSFunction::kPrototypeOrInitialMapOffset);
+    Node* const proto_map = LoadMap(LoadMapPrototype(initial_map));
+
+    Branch(WordEqual(proto_map, initial_proto_initial_map), &out, &next);
+
+    Bind(&next);
+  }
+
+  // Take the fast path for RegExps.
+  {
+    Label stub_call(this), slow_lookup(this);
+
+    RegExpBuiltinsAssembler regexp_asm(state());
+    regexp_asm.BranchIfFastRegExp(context, object, object_map, &stub_call,
+                                  &slow_lookup);
+
+    Bind(&stub_call);
+    Return(regexp_call());
+
+    Bind(&slow_lookup);
+  }
+
+  GotoIf(IsNullOrUndefined(object), &out);
+
+  // Fall back to a slow lookup of {object[symbol]}.
+
+  Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+  Node* const key = HeapConstant(symbol);
+  Node* const maybe_func = CallStub(getproperty_callable, context, object, key);
+
+  GotoIf(IsUndefined(maybe_func), &out);
+
+  // Attempt to call the function.
+
+  Node* const result = generic_call(maybe_func);
+  Return(result);
+
+  Bind(&out);
+}
+
+// ES6 section 21.1.3.16 String.prototype.replace ( search, replace )
+TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
+  Label out(this);
+
+  Node* const receiver = Parameter(0);
+  Node* const search = Parameter(1);
+  Node* const replace = Parameter(2);
+  Node* const context = Parameter(5);
+
+  Node* const smi_zero = SmiConstant(0);
+
+  RequireObjectCoercible(context, receiver, "String.prototype.replace");
+
+  // Redirect to replacer method if {search[@@replace]} is not undefined.
+
+  MaybeCallFunctionAtSymbol(
+      context, search, isolate()->factory()->replace_symbol(),
+      [=]() {
+        Callable tostring_callable = CodeFactory::ToString(isolate());
+        Node* const subject_string =
+            CallStub(tostring_callable, context, receiver);
+
+        Callable replace_callable = CodeFactory::RegExpReplace(isolate());
+        return CallStub(replace_callable, context, search, subject_string,
+                        replace);
+      },
+      [=](Node* fn) {
+        Callable call_callable = CodeFactory::Call(isolate());
+        return CallJS(call_callable, context, fn, search, receiver, replace);
+      });
+
+  // Convert {receiver} and {search} to strings.
+
+  Callable tostring_callable = CodeFactory::ToString(isolate());
+  Callable indexof_callable = CodeFactory::StringIndexOf(isolate());
+
+  Node* const subject_string = CallStub(tostring_callable, context, receiver);
+  Node* const search_string = CallStub(tostring_callable, context, search);
+
+  Node* const subject_length = LoadStringLength(subject_string);
+  Node* const search_length = LoadStringLength(search_string);
+
+  // Fast-path single-char {search}, long {receiver}, and simple string
+  // {replace}.
+  {
+    Label next(this);
+
+    GotoIfNot(SmiEqual(search_length, SmiConstant(1)), &next);
+    GotoIfNot(SmiGreaterThan(subject_length, SmiConstant(0xFF)), &next);
+    GotoIf(TaggedIsSmi(replace), &next);
+    GotoIfNot(IsString(replace), &next);
+
+    Node* const dollar_string = HeapConstant(
+        isolate()->factory()->LookupSingleCharacterStringFromCode('$'));
+    Node* const dollar_ix =
+        CallStub(indexof_callable, context, replace, dollar_string, smi_zero);
+    GotoIfNot(SmiIsNegative(dollar_ix), &next);
+
+    // Searching by traversing a cons string tree and replace with cons of
+    // slices works only when the replaced string is a single character, being
+    // replaced by a simple string and only pays off for long strings.
+    // TODO(jgruber): Reevaluate if this is still beneficial.
+    // TODO(jgruber): TailCallRuntime when it correctly handles adapter frames.
+    Return(CallRuntime(Runtime::kStringReplaceOneCharWithString, context,
+                       subject_string, search_string, replace));
+
+    Bind(&next);
+  }
+
+  // TODO(jgruber): Extend StringIndexOf to handle two-byte strings and
+  // longer substrings - we can handle up to 8 chars (one-byte) / 4 chars
+  // (2-byte).
+
+  Node* const match_start_index = CallStub(
+      indexof_callable, context, subject_string, search_string, smi_zero);
+  CSA_ASSERT(this, TaggedIsSmi(match_start_index));
+
+  // Early exit if no match found.
+  {
+    Label next(this), return_subject(this);
+
+    GotoIfNot(SmiIsNegative(match_start_index), &next);
+
+    // The spec requires to perform ToString(replace) if the {replace} is not
+    // callable even if we are going to exit here.
+    // Since ToString() being applied to Smi does not have side effects for
+    // numbers we can skip it.
+    GotoIf(TaggedIsSmi(replace), &return_subject);
+    GotoIf(IsCallableMap(LoadMap(replace)), &return_subject);
+
+    // TODO(jgruber): Could introduce ToStringSideeffectsStub which only
+    // performs observable parts of ToString.
+    CallStub(tostring_callable, context, replace);
+    Goto(&return_subject);
+
+    Bind(&return_subject);
+    Return(subject_string);
+
+    Bind(&next);
+  }
+
+  Node* const match_end_index = SmiAdd(match_start_index, search_length);
+
+  Callable substring_callable = CodeFactory::SubString(isolate());
+  Callable stringadd_callable =
+      CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+
+  Variable var_result(this, MachineRepresentation::kTagged,
+                      EmptyStringConstant());
+
+  // Compute the prefix.
+  {
+    Label next(this);
+
+    GotoIf(SmiEqual(match_start_index, smi_zero), &next);
+    Node* const prefix = CallStub(substring_callable, context, subject_string,
+                                  smi_zero, match_start_index);
+    var_result.Bind(prefix);
+
+    Goto(&next);
+    Bind(&next);
+  }
+
+  // Compute the string to replace with.
+
+  Label if_iscallablereplace(this), if_notcallablereplace(this);
+  GotoIf(TaggedIsSmi(replace), &if_notcallablereplace);
+  Branch(IsCallableMap(LoadMap(replace)), &if_iscallablereplace,
+         &if_notcallablereplace);
+
+  Bind(&if_iscallablereplace);
+  {
+    Callable call_callable = CodeFactory::Call(isolate());
+    Node* const replacement =
+        CallJS(call_callable, context, replace, UndefinedConstant(),
+               search_string, match_start_index, subject_string);
+    Node* const replacement_string =
+        CallStub(tostring_callable, context, replacement);
+    var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
+                             replacement_string));
+    Goto(&out);
+  }
+
+  Bind(&if_notcallablereplace);
+  {
+    Node* const replace_string = CallStub(tostring_callable, context, replace);
+
+    // TODO(jgruber): Simplified GetSubstitution implementation in CSA.
+    Node* const matched = CallStub(substring_callable, context, subject_string,
+                                   match_start_index, match_end_index);
+    Node* const replacement_string =
+        CallRuntime(Runtime::kGetSubstitution, context, matched, subject_string,
+                    match_start_index, replace_string);
+    var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
+                             replacement_string));
+    Goto(&out);
+  }
+
+  Bind(&out);
+  {
+    Node* const suffix = CallStub(substring_callable, context, subject_string,
+                                  match_end_index, subject_length);
+    Node* const result =
+        CallStub(stringadd_callable, context, var_result.value(), suffix);
+    Return(result);
+  }
+}
+
+// ES6 section 21.1.3.19 String.prototype.split ( separator, limit )
+TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
+  Label out(this);
+
+  Node* const receiver = Parameter(0);
+  Node* const separator = Parameter(1);
+  Node* const limit = Parameter(2);
+  Node* const context = Parameter(5);
+
+  Node* const smi_zero = SmiConstant(0);
+
+  RequireObjectCoercible(context, receiver, "String.prototype.split");
+
+  // Redirect to splitter method if {separator[@@split]} is not undefined.
+
+  MaybeCallFunctionAtSymbol(
+      context, separator, isolate()->factory()->split_symbol(),
+      [=]() {
+        Callable tostring_callable = CodeFactory::ToString(isolate());
+        Node* const subject_string =
+            CallStub(tostring_callable, context, receiver);
+
+        Callable split_callable = CodeFactory::RegExpSplit(isolate());
+        return CallStub(split_callable, context, separator, subject_string,
+                        limit);
+      },
+      [=](Node* fn) {
+        Callable call_callable = CodeFactory::Call(isolate());
+        return CallJS(call_callable, context, fn, separator, receiver, limit);
+      });
+
+  // String and integer conversions.
+  // TODO(jgruber): The old implementation used Uint32Max instead of SmiMax -
+  // but AFAIK there should not be a difference since arrays are capped at Smi
+  // lengths.
+
+  Callable tostring_callable = CodeFactory::ToString(isolate());
+  Node* const subject_string = CallStub(tostring_callable, context, receiver);
+  Node* const limit_number =
+      Select(IsUndefined(limit), [=]() { return SmiConstant(Smi::kMaxValue); },
+             [=]() { return ToUint32(context, limit); },
+             MachineRepresentation::kTagged);
+  Node* const separator_string =
+      CallStub(tostring_callable, context, separator);
+
+  // Shortcut for {limit} == 0.
+  {
+    Label next(this);
+    GotoIfNot(SmiEqual(limit_number, smi_zero), &next);
+
+    const ElementsKind kind = FAST_ELEMENTS;
+    Node* const native_context = LoadNativeContext(context);
+    Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+
+    Node* const length = smi_zero;
+    Node* const capacity = IntPtrConstant(0);
+    Node* const result = AllocateJSArray(kind, array_map, capacity, length);
+
+    Return(result);
+
+    Bind(&next);
+  }
+
+  // ECMA-262 says that if {separator} is undefined, the result should
+  // be an array of size 1 containing the entire string.
+  {
+    Label next(this);
+    GotoIfNot(IsUndefined(separator), &next);
+
+    const ElementsKind kind = FAST_ELEMENTS;
+    Node* const native_context = LoadNativeContext(context);
+    Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+
+    Node* const length = SmiConstant(1);
+    Node* const capacity = IntPtrConstant(1);
+    Node* const result = AllocateJSArray(kind, array_map, capacity, length);
+
+    Node* const fixed_array = LoadElements(result);
+    StoreFixedArrayElement(fixed_array, 0, subject_string);
+
+    Return(result);
+
+    Bind(&next);
+  }
+
+  // If the separator string is empty then return the elements in the subject.
+  {
+    Label next(this);
+    GotoIfNot(SmiEqual(LoadStringLength(separator_string), smi_zero), &next);
+
+    Node* const result = CallRuntime(Runtime::kStringToArray, context,
+                                     subject_string, limit_number);
+    Return(result);
+
+    Bind(&next);
+  }
+
+  Node* const result =
+      CallRuntime(Runtime::kStringSplit, context, subject_string,
+                  separator_string, limit_number);
+  Return(result);
+}
+
 // ES6 section B.2.3.1 String.prototype.substr ( start, length )
-void Builtins::Generate_StringPrototypeSubstr(CodeStubAssembler* a) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringPrototypeSubstr, CodeStubAssembler) {
+  Label out(this), handle_length(this);
 
-  Label out(a), handle_length(a);
+  Variable var_start(this, MachineRepresentation::kTagged);
+  Variable var_length(this, MachineRepresentation::kTagged);
 
-  Variable var_start(a, MachineRepresentation::kTagged);
-  Variable var_length(a, MachineRepresentation::kTagged);
+  Node* const receiver = Parameter(0);
+  Node* const start = Parameter(1);
+  Node* const length = Parameter(2);
+  Node* const context = Parameter(5);
 
-  Node* const receiver = a->Parameter(0);
-  Node* const start = a->Parameter(1);
-  Node* const length = a->Parameter(2);
-  Node* const context = a->Parameter(5);
-
-  Node* const zero = a->SmiConstant(Smi::kZero);
+  Node* const zero = SmiConstant(Smi::kZero);
 
   // Check that {receiver} is coercible to Object and convert it to a String.
   Node* const string =
-      a->ToThisString(context, receiver, "String.prototype.substr");
+      ToThisString(context, receiver, "String.prototype.substr");
 
-  Node* const string_length = a->LoadStringLength(string);
+  Node* const string_length = LoadStringLength(string);
 
   // Conversions and bounds-checks for {start}.
   {
     Node* const start_int =
-        a->ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
+        ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
 
-    Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
-    a->Branch(a->TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
+    Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
+    Branch(TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
 
-    a->Bind(&if_issmi);
+    Bind(&if_issmi);
     {
-      Node* const length_plus_start = a->SmiAdd(string_length, start_int);
-      var_start.Bind(a->Select(a->SmiLessThan(start_int, zero),
-                               a->SmiMax(length_plus_start, zero), start_int));
-      a->Goto(&handle_length);
+      Node* const length_plus_start = SmiAdd(string_length, start_int);
+      var_start.Bind(Select(SmiLessThan(start_int, zero),
+                            [&] { return SmiMax(length_plus_start, zero); },
+                            [&] { return start_int; },
+                            MachineRepresentation::kTagged));
+      Goto(&handle_length);
     }
 
-    a->Bind(&if_isheapnumber);
+    Bind(&if_isheapnumber);
     {
       // If {start} is a heap number, it is definitely out of bounds. If it is
       // negative, {start} = max({string_length} + {start}),0) = 0'. If it is
       // positive, set {start} to {string_length} which ultimately results in
       // returning an empty string.
-      Node* const float_zero = a->Float64Constant(0.);
-      Node* const start_float = a->LoadHeapNumberValue(start_int);
-      var_start.Bind(a->Select(a->Float64LessThan(start_float, float_zero),
-                               zero, string_length));
-      a->Goto(&handle_length);
+      Node* const float_zero = Float64Constant(0.);
+      Node* const start_float = LoadHeapNumberValue(start_int);
+      var_start.Bind(SelectTaggedConstant(
+          Float64LessThan(start_float, float_zero), zero, string_length));
+      Goto(&handle_length);
     }
   }
 
   // Conversions and bounds-checks for {length}.
-  a->Bind(&handle_length);
+  Bind(&handle_length);
   {
-    Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
+    Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
 
     // Default to {string_length} if {length} is undefined.
     {
-      Label if_isundefined(a, Label::kDeferred), if_isnotundefined(a);
-      a->Branch(a->WordEqual(length, a->UndefinedConstant()), &if_isundefined,
-                &if_isnotundefined);
+      Label if_isundefined(this, Label::kDeferred), if_isnotundefined(this);
+      Branch(WordEqual(length, UndefinedConstant()), &if_isundefined,
+             &if_isnotundefined);
 
-      a->Bind(&if_isundefined);
+      Bind(&if_isundefined);
       var_length.Bind(string_length);
-      a->Goto(&if_issmi);
+      Goto(&if_issmi);
 
-      a->Bind(&if_isnotundefined);
+      Bind(&if_isnotundefined);
       var_length.Bind(
-          a->ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
+          ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
     }
 
-    a->Branch(a->TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
+    Branch(TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
 
     // Set {length} to min(max({length}, 0), {string_length} - {start}
-    a->Bind(&if_issmi);
+    Bind(&if_issmi);
     {
-      Node* const positive_length = a->SmiMax(var_length.value(), zero);
+      Node* const positive_length = SmiMax(var_length.value(), zero);
 
-      Node* const minimal_length = a->SmiSub(string_length, var_start.value());
-      var_length.Bind(a->SmiMin(positive_length, minimal_length));
+      Node* const minimal_length = SmiSub(string_length, var_start.value());
+      var_length.Bind(SmiMin(positive_length, minimal_length));
 
-      a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
-      a->Return(a->EmptyStringConstant());
+      GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
+      Return(EmptyStringConstant());
     }
 
-    a->Bind(&if_isheapnumber);
+    Bind(&if_isheapnumber);
     {
       // If {length} is a heap number, it is definitely out of bounds. There are
       // two cases according to the spec: if it is negative, "" is returned; if
       // it is positive, then length is set to {string_length} - {start}.
 
-      CSA_ASSERT(a, a->WordEqual(a->LoadMap(var_length.value()),
-                                 a->HeapNumberMapConstant()));
+      CSA_ASSERT(this, IsHeapNumberMap(LoadMap(var_length.value())));
 
-      Label if_isnegative(a), if_ispositive(a);
-      Node* const float_zero = a->Float64Constant(0.);
-      Node* const length_float = a->LoadHeapNumberValue(var_length.value());
-      a->Branch(a->Float64LessThan(length_float, float_zero), &if_isnegative,
-                &if_ispositive);
+      Label if_isnegative(this), if_ispositive(this);
+      Node* const float_zero = Float64Constant(0.);
+      Node* const length_float = LoadHeapNumberValue(var_length.value());
+      Branch(Float64LessThan(length_float, float_zero), &if_isnegative,
+             &if_ispositive);
 
-      a->Bind(&if_isnegative);
-      a->Return(a->EmptyStringConstant());
+      Bind(&if_isnegative);
+      Return(EmptyStringConstant());
 
-      a->Bind(&if_ispositive);
+      Bind(&if_ispositive);
       {
-        var_length.Bind(a->SmiSub(string_length, var_start.value()));
-        a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
-        a->Return(a->EmptyStringConstant());
+        var_length.Bind(SmiSub(string_length, var_start.value()));
+        GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
+        Return(EmptyStringConstant());
       }
     }
   }
 
-  a->Bind(&out);
+  Bind(&out);
   {
-    Node* const end = a->SmiAdd(var_start.value(), var_length.value());
-    Node* const result = a->SubString(context, string, var_start.value(), end);
-    a->Return(result);
+    Node* const end = SmiAdd(var_start.value(), var_length.value());
+    Node* const result = SubString(context, string, var_start.value(), end);
+    Return(result);
   }
 }
 
-namespace {
-
-compiler::Node* ToSmiBetweenZeroAnd(CodeStubAssembler* a,
-                                    compiler::Node* context,
-                                    compiler::Node* value,
-                                    compiler::Node* limit) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Label out(a);
-  Variable var_result(a, MachineRepresentation::kTagged);
+compiler::Node* StringBuiltinsAssembler::ToSmiBetweenZeroAnd(Node* context,
+                                                             Node* value,
+                                                             Node* limit) {
+  Label out(this);
+  Variable var_result(this, MachineRepresentation::kTagged);
 
   Node* const value_int =
-      a->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
+      this->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
 
-  Label if_issmi(a), if_isnotsmi(a, Label::kDeferred);
-  a->Branch(a->TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
+  Label if_issmi(this), if_isnotsmi(this, Label::kDeferred);
+  Branch(TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
 
-  a->Bind(&if_issmi);
+  Bind(&if_issmi);
   {
-    Label if_isinbounds(a), if_isoutofbounds(a, Label::kDeferred);
-    a->Branch(a->SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
+    Label if_isinbounds(this), if_isoutofbounds(this, Label::kDeferred);
+    Branch(SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
 
-    a->Bind(&if_isinbounds);
+    Bind(&if_isinbounds);
     {
       var_result.Bind(value_int);
-      a->Goto(&out);
+      Goto(&out);
     }
 
-    a->Bind(&if_isoutofbounds);
+    Bind(&if_isoutofbounds);
     {
-      Node* const zero = a->SmiConstant(Smi::kZero);
-      var_result.Bind(a->Select(a->SmiLessThan(value_int, zero), zero, limit));
-      a->Goto(&out);
+      Node* const zero = SmiConstant(Smi::kZero);
+      var_result.Bind(
+          SelectTaggedConstant(SmiLessThan(value_int, zero), zero, limit));
+      Goto(&out);
     }
   }
 
-  a->Bind(&if_isnotsmi);
+  Bind(&if_isnotsmi);
   {
     // {value} is a heap number - in this case, it is definitely out of bounds.
-    CSA_ASSERT(a,
-               a->WordEqual(a->LoadMap(value_int), a->HeapNumberMapConstant()));
+    CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value_int)));
 
-    Node* const float_zero = a->Float64Constant(0.);
-    Node* const smi_zero = a->SmiConstant(Smi::kZero);
-    Node* const value_float = a->LoadHeapNumberValue(value_int);
-    var_result.Bind(a->Select(a->Float64LessThan(value_float, float_zero),
-                              smi_zero, limit));
-    a->Goto(&out);
+    Node* const float_zero = Float64Constant(0.);
+    Node* const smi_zero = SmiConstant(Smi::kZero);
+    Node* const value_float = LoadHeapNumberValue(value_int);
+    var_result.Bind(SelectTaggedConstant(
+        Float64LessThan(value_float, float_zero), smi_zero, limit));
+    Goto(&out);
   }
 
-  a->Bind(&out);
+  Bind(&out);
   return var_result.value();
 }
 
-}  // namespace
-
 // ES6 section 21.1.3.19 String.prototype.substring ( start, end )
-void Builtins::Generate_StringPrototypeSubstring(CodeStubAssembler* a) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
+  Label out(this);
 
-  Label out(a);
+  Variable var_start(this, MachineRepresentation::kTagged);
+  Variable var_end(this, MachineRepresentation::kTagged);
 
-  Variable var_start(a, MachineRepresentation::kTagged);
-  Variable var_end(a, MachineRepresentation::kTagged);
-
-  Node* const receiver = a->Parameter(0);
-  Node* const start = a->Parameter(1);
-  Node* const end = a->Parameter(2);
-  Node* const context = a->Parameter(5);
+  Node* const receiver = Parameter(0);
+  Node* const start = Parameter(1);
+  Node* const end = Parameter(2);
+  Node* const context = Parameter(5);
 
   // Check that {receiver} is coercible to Object and convert it to a String.
   Node* const string =
-      a->ToThisString(context, receiver, "String.prototype.substring");
+      ToThisString(context, receiver, "String.prototype.substring");
 
-  Node* const length = a->LoadStringLength(string);
+  Node* const length = LoadStringLength(string);
 
   // Conversion and bounds-checks for {start}.
-  var_start.Bind(ToSmiBetweenZeroAnd(a, context, start, length));
+  var_start.Bind(ToSmiBetweenZeroAnd(context, start, length));
 
   // Conversion and bounds-checks for {end}.
   {
     var_end.Bind(length);
-    a->GotoIf(a->WordEqual(end, a->UndefinedConstant()), &out);
+    GotoIf(WordEqual(end, UndefinedConstant()), &out);
 
-    var_end.Bind(ToSmiBetweenZeroAnd(a, context, end, length));
+    var_end.Bind(ToSmiBetweenZeroAnd(context, end, length));
 
-    Label if_endislessthanstart(a);
-    a->Branch(a->SmiLessThan(var_end.value(), var_start.value()),
-              &if_endislessthanstart, &out);
+    Label if_endislessthanstart(this);
+    Branch(SmiLessThan(var_end.value(), var_start.value()),
+           &if_endislessthanstart, &out);
 
-    a->Bind(&if_endislessthanstart);
+    Bind(&if_endislessthanstart);
     {
       Node* const tmp = var_end.value();
       var_end.Bind(var_start.value());
       var_start.Bind(tmp);
-      a->Goto(&out);
+      Goto(&out);
     }
   }
 
-  a->Bind(&out);
+  Bind(&out);
   {
     Node* result =
-        a->SubString(context, string, var_start.value(), var_end.value());
-    a->Return(result);
+        SubString(context, string, var_start.value(), var_end.value());
+    Return(result);
   }
 }
 
@@ -1170,9 +1769,7 @@
   } else {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
                                        Object::ToInteger(isolate, position));
-    double index = std::max(position->Number(), 0.0);
-    index = std::min(index, static_cast<double>(str->length()));
-    start = static_cast<uint32_t>(index);
+    start = str->ToValidIndex(*position);
   }
 
   if (start + search_string->length() > str->length()) {
@@ -1191,15 +1788,13 @@
 }
 
 // ES6 section 21.1.3.25 String.prototype.toString ()
-void Builtins::Generate_StringPrototypeToString(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(StringPrototypeToString, CodeStubAssembler) {
+  Node* receiver = Parameter(0);
+  Node* context = Parameter(3);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
-
-  Node* result = assembler->ToThisValue(
-      context, receiver, PrimitiveType::kString, "String.prototype.toString");
-  assembler->Return(result);
+  Node* result = ToThisValue(context, receiver, PrimitiveType::kString,
+                             "String.prototype.toString");
+  Return(result);
 }
 
 // ES6 section 21.1.3.27 String.prototype.trim ()
@@ -1224,103 +1819,82 @@
 }
 
 // ES6 section 21.1.3.28 String.prototype.valueOf ( )
-void Builtins::Generate_StringPrototypeValueOf(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(StringPrototypeValueOf, CodeStubAssembler) {
+  Node* receiver = Parameter(0);
+  Node* context = Parameter(3);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
-
-  Node* result = assembler->ToThisValue(
-      context, receiver, PrimitiveType::kString, "String.prototype.valueOf");
-  assembler->Return(result);
+  Node* result = ToThisValue(context, receiver, PrimitiveType::kString,
+                             "String.prototype.valueOf");
+  Return(result);
 }
 
-void Builtins::Generate_StringPrototypeIterator(CodeStubAssembler* assembler) {
-  typedef compiler::Node Node;
+TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
+  Node* receiver = Parameter(0);
+  Node* context = Parameter(3);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
+  Node* string =
+      ToThisString(context, receiver, "String.prototype[Symbol.iterator]");
 
-  Node* string = assembler->ToThisString(context, receiver,
-                                         "String.prototype[Symbol.iterator]");
-
-  Node* native_context = assembler->LoadNativeContext(context);
-  Node* map = assembler->LoadFixedArrayElement(
-      native_context,
-      assembler->IntPtrConstant(Context::STRING_ITERATOR_MAP_INDEX), 0,
-      CodeStubAssembler::INTPTR_PARAMETERS);
-  Node* iterator = assembler->Allocate(JSStringIterator::kSize);
-  assembler->StoreMapNoWriteBarrier(iterator, map);
-  assembler->StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
-                                  Heap::kEmptyFixedArrayRootIndex);
-  assembler->StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
-                                  Heap::kEmptyFixedArrayRootIndex);
-  assembler->StoreObjectFieldNoWriteBarrier(
-      iterator, JSStringIterator::kStringOffset, string);
-  Node* index = assembler->SmiConstant(Smi::kZero);
-  assembler->StoreObjectFieldNoWriteBarrier(
-      iterator, JSStringIterator::kNextIndexOffset, index);
-  assembler->Return(iterator);
+  Node* native_context = LoadNativeContext(context);
+  Node* map =
+      LoadContextElement(native_context, Context::STRING_ITERATOR_MAP_INDEX);
+  Node* iterator = Allocate(JSStringIterator::kSize);
+  StoreMapNoWriteBarrier(iterator, map);
+  StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
+                       Heap::kEmptyFixedArrayRootIndex);
+  StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
+                       Heap::kEmptyFixedArrayRootIndex);
+  StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kStringOffset,
+                                 string);
+  Node* index = SmiConstant(Smi::kZero);
+  StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
+                                 index);
+  Return(iterator);
 }
 
-namespace {
-
 // Return the |word32| codepoint at {index}. Supports SeqStrings and
 // ExternalStrings.
-compiler::Node* LoadSurrogatePairInternal(CodeStubAssembler* assembler,
-                                          compiler::Node* string,
-                                          compiler::Node* length,
-                                          compiler::Node* index,
-                                          UnicodeEncoding encoding) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-  Label handle_surrogate_pair(assembler), return_result(assembler);
-  Variable var_result(assembler, MachineRepresentation::kWord32);
-  Variable var_trail(assembler, MachineRepresentation::kWord16);
-  var_result.Bind(assembler->StringCharCodeAt(string, index));
-  var_trail.Bind(assembler->Int32Constant(0));
+compiler::Node* StringBuiltinsAssembler::LoadSurrogatePairAt(
+    compiler::Node* string, compiler::Node* length, compiler::Node* index,
+    UnicodeEncoding encoding) {
+  Label handle_surrogate_pair(this), return_result(this);
+  Variable var_result(this, MachineRepresentation::kWord32);
+  Variable var_trail(this, MachineRepresentation::kWord32);
+  var_result.Bind(StringCharCodeAt(string, index));
+  var_trail.Bind(Int32Constant(0));
 
-  assembler->GotoIf(assembler->Word32NotEqual(
-                        assembler->Word32And(var_result.value(),
-                                             assembler->Int32Constant(0xFC00)),
-                        assembler->Int32Constant(0xD800)),
-                    &return_result);
-  Node* next_index =
-      assembler->SmiAdd(index, assembler->SmiConstant(Smi::FromInt(1)));
+  GotoIf(Word32NotEqual(Word32And(var_result.value(), Int32Constant(0xFC00)),
+                        Int32Constant(0xD800)),
+         &return_result);
+  Node* next_index = SmiAdd(index, SmiConstant(Smi::FromInt(1)));
 
-  assembler->GotoUnless(assembler->SmiLessThan(next_index, length),
-                        &return_result);
-  var_trail.Bind(assembler->StringCharCodeAt(string, next_index));
-  assembler->Branch(assembler->Word32Equal(
-                        assembler->Word32And(var_trail.value(),
-                                             assembler->Int32Constant(0xFC00)),
-                        assembler->Int32Constant(0xDC00)),
-                    &handle_surrogate_pair, &return_result);
+  GotoIfNot(SmiLessThan(next_index, length), &return_result);
+  var_trail.Bind(StringCharCodeAt(string, next_index));
+  Branch(Word32Equal(Word32And(var_trail.value(), Int32Constant(0xFC00)),
+                     Int32Constant(0xDC00)),
+         &handle_surrogate_pair, &return_result);
 
-  assembler->Bind(&handle_surrogate_pair);
+  Bind(&handle_surrogate_pair);
   {
     Node* lead = var_result.value();
     Node* trail = var_trail.value();
 
     // Check that this path is only taken if a surrogate pair is found
-    CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
-                                   lead, assembler->Int32Constant(0xD800)));
-    CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
-                                   lead, assembler->Int32Constant(0xDC00)));
-    CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
-                                   trail, assembler->Int32Constant(0xDC00)));
-    CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
-                                   trail, assembler->Int32Constant(0xE000)));
+    CSA_SLOW_ASSERT(this,
+                    Uint32GreaterThanOrEqual(lead, Int32Constant(0xD800)));
+    CSA_SLOW_ASSERT(this, Uint32LessThan(lead, Int32Constant(0xDC00)));
+    CSA_SLOW_ASSERT(this,
+                    Uint32GreaterThanOrEqual(trail, Int32Constant(0xDC00)));
+    CSA_SLOW_ASSERT(this, Uint32LessThan(trail, Int32Constant(0xE000)));
 
     switch (encoding) {
       case UnicodeEncoding::UTF16:
-        var_result.Bind(assembler->WordOr(
+        var_result.Bind(Word32Or(
 // Need to swap the order for big-endian platforms
 #if V8_TARGET_BIG_ENDIAN
-            assembler->WordShl(lead, assembler->Int32Constant(16)), trail));
+            Word32Shl(lead, Int32Constant(16)), trail));
 #else
-            assembler->WordShl(trail, assembler->Int32Constant(16)), lead));
+            Word32Shl(trail, Int32Constant(16)), lead));
 #endif
         break;
 
@@ -1328,108 +1902,277 @@
         // Convert UTF16 surrogate pair into |word32| code point, encoded as
         // UTF32.
         Node* surrogate_offset =
-            assembler->Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
+            Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
 
         // (lead << 10) + trail + SURROGATE_OFFSET
-        var_result.Bind(assembler->Int32Add(
-            assembler->WordShl(lead, assembler->Int32Constant(10)),
-            assembler->Int32Add(trail, surrogate_offset)));
+        var_result.Bind(Int32Add(WordShl(lead, Int32Constant(10)),
+                                 Int32Add(trail, surrogate_offset)));
         break;
       }
     }
-    assembler->Goto(&return_result);
+    Goto(&return_result);
   }
 
-  assembler->Bind(&return_result);
+  Bind(&return_result);
   return var_result.value();
 }
 
-compiler::Node* LoadSurrogatePairAt(CodeStubAssembler* assembler,
-                                    compiler::Node* string,
-                                    compiler::Node* length,
-                                    compiler::Node* index) {
-  return LoadSurrogatePairInternal(assembler, string, length, index,
-                                   UnicodeEncoding::UTF16);
+TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
+  Variable var_value(this, MachineRepresentation::kTagged);
+  Variable var_done(this, MachineRepresentation::kTagged);
+
+  var_value.Bind(UndefinedConstant());
+  var_done.Bind(BooleanConstant(true));
+
+  Label throw_bad_receiver(this), next_codepoint(this), return_result(this);
+
+  Node* iterator = Parameter(0);
+  Node* context = Parameter(3);
+
+  GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
+  GotoIfNot(Word32Equal(LoadInstanceType(iterator),
+                        Int32Constant(JS_STRING_ITERATOR_TYPE)),
+            &throw_bad_receiver);
+
+  Node* string = LoadObjectField(iterator, JSStringIterator::kStringOffset);
+  Node* position =
+      LoadObjectField(iterator, JSStringIterator::kNextIndexOffset);
+  Node* length = LoadObjectField(string, String::kLengthOffset);
+
+  Branch(SmiLessThan(position, length), &next_codepoint, &return_result);
+
+  Bind(&next_codepoint);
+  {
+    UnicodeEncoding encoding = UnicodeEncoding::UTF16;
+    Node* ch = LoadSurrogatePairAt(string, length, position, encoding);
+    Node* value = StringFromCodePoint(ch, encoding);
+    var_value.Bind(value);
+    Node* length = LoadObjectField(value, String::kLengthOffset);
+    StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
+                                   SmiAdd(position, length));
+    var_done.Bind(BooleanConstant(false));
+    Goto(&return_result);
+  }
+
+  Bind(&return_result);
+  {
+    Node* native_context = LoadNativeContext(context);
+    Node* map =
+        LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+    Node* result = Allocate(JSIteratorResult::kSize);
+    StoreMapNoWriteBarrier(result, map);
+    StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+                         Heap::kEmptyFixedArrayRootIndex);
+    StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+                         Heap::kEmptyFixedArrayRootIndex);
+    StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset,
+                                   var_value.value());
+    StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset,
+                                   var_done.value());
+    Return(result);
+  }
+
+  Bind(&throw_bad_receiver);
+  {
+    // The {receiver} is not a valid JSGeneratorObject.
+    CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+                HeapConstant(factory()->NewStringFromAsciiChecked(
+                    "String Iterator.prototype.next", TENURED)),
+                iterator);
+    Unreachable();
+  }
+}
+
+namespace {
+
+inline bool ToUpperOverflows(uc32 character) {
+  // y with umlauts and the micro sign are the only characters that stop
+  // fitting into one-byte when converting to uppercase.
+  static const uc32 yuml_code = 0xff;
+  static const uc32 micro_code = 0xb5;
+  return (character == yuml_code || character == micro_code);
+}
+
+template <class Converter>
+MUST_USE_RESULT static Object* ConvertCaseHelper(
+    Isolate* isolate, String* string, SeqString* result, int result_length,
+    unibrow::Mapping<Converter, 128>* mapping) {
+  DisallowHeapAllocation no_gc;
+  // We try this twice, once with the assumption that the result is no longer
+  // than the input and, if that assumption breaks, again with the exact
+  // length.  This may not be pretty, but it is nicer than what was here before
+  // and I hereby claim my vaffel-is.
+  //
+  // NOTE: This assumes that the upper/lower case of an ASCII
+  // character is also ASCII.  This is currently the case, but it
+  // might break in the future if we implement more context and locale
+  // dependent upper/lower conversions.
+  bool has_changed_character = false;
+
+  // Convert all characters to upper case, assuming that they will fit
+  // in the buffer
+  StringCharacterStream stream(string);
+  unibrow::uchar chars[Converter::kMaxWidth];
+  // We can assume that the string is not empty
+  uc32 current = stream.GetNext();
+  bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
+  for (int i = 0; i < result_length;) {
+    bool has_next = stream.HasMore();
+    uc32 next = has_next ? stream.GetNext() : 0;
+    int char_length = mapping->get(current, next, chars);
+    if (char_length == 0) {
+      // The case conversion of this character is the character itself.
+      result->Set(i, current);
+      i++;
+    } else if (char_length == 1 &&
+               (ignore_overflow || !ToUpperOverflows(current))) {
+      // Common case: converting the letter resulted in one character.
+      DCHECK(static_cast<uc32>(chars[0]) != current);
+      result->Set(i, chars[0]);
+      has_changed_character = true;
+      i++;
+    } else if (result_length == string->length()) {
+      bool overflows = ToUpperOverflows(current);
+      // We've assumed that the result would be as long as the
+      // input but here is a character that converts to several
+      // characters.  No matter, we calculate the exact length
+      // of the result and try the whole thing again.
+      //
+      // Note that this leaves room for optimization.  We could just
+      // memcpy what we already have to the result string.  Also,
+      // the result string is the last object allocated we could
+      // "realloc" it and probably, in the vast majority of cases,
+      // extend the existing string to be able to hold the full
+      // result.
+      int next_length = 0;
+      if (has_next) {
+        next_length = mapping->get(next, 0, chars);
+        if (next_length == 0) next_length = 1;
+      }
+      int current_length = i + char_length + next_length;
+      while (stream.HasMore()) {
+        current = stream.GetNext();
+        overflows |= ToUpperOverflows(current);
+        // NOTE: we use 0 as the next character here because, while
+        // the next character may affect what a character converts to,
+        // it does not in any case affect the length of what it convert
+        // to.
+        int char_length = mapping->get(current, 0, chars);
+        if (char_length == 0) char_length = 1;
+        current_length += char_length;
+        if (current_length > String::kMaxLength) {
+          AllowHeapAllocation allocate_error_and_return;
+          THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+                                         NewInvalidStringLengthError());
+        }
+      }
+      // Try again with the real length.  Return signed if we need
+      // to allocate a two-byte string for to uppercase.
+      return (overflows && !ignore_overflow) ? Smi::FromInt(-current_length)
+                                             : Smi::FromInt(current_length);
+    } else {
+      for (int j = 0; j < char_length; j++) {
+        result->Set(i, chars[j]);
+        i++;
+      }
+      has_changed_character = true;
+    }
+    current = next;
+  }
+  if (has_changed_character) {
+    return result;
+  } else {
+    // If we didn't actually change anything in doing the conversion
+    // we simple return the result and let the converted string
+    // become garbage; there is no reason to keep two identical strings
+    // alive.
+    return string;
+  }
+}
+
+template <class Converter>
+MUST_USE_RESULT static Object* ConvertCase(
+    Handle<String> s, Isolate* isolate,
+    unibrow::Mapping<Converter, 128>* mapping) {
+  s = String::Flatten(s);
+  int length = s->length();
+  // Assume that the string is not empty; we need this assumption later
+  if (length == 0) return *s;
+
+  // Simpler handling of ASCII strings.
+  //
+  // NOTE: This assumes that the upper/lower case of an ASCII
+  // character is also ASCII.  This is currently the case, but it
+  // might break in the future if we implement more context and locale
+  // dependent upper/lower conversions.
+  if (s->IsOneByteRepresentationUnderneath()) {
+    // Same length as input.
+    Handle<SeqOneByteString> result =
+        isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+    DisallowHeapAllocation no_gc;
+    String::FlatContent flat_content = s->GetFlatContent();
+    DCHECK(flat_content.IsFlat());
+    bool has_changed_character = false;
+    int index_to_first_unprocessed = FastAsciiConvert<Converter::kIsToLower>(
+        reinterpret_cast<char*>(result->GetChars()),
+        reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
+        length, &has_changed_character);
+    // If not ASCII, we discard the result and take the 2 byte path.
+    if (index_to_first_unprocessed == length)
+      return has_changed_character ? *result : *s;
+  }
+
+  Handle<SeqString> result;  // Same length as input.
+  if (s->IsOneByteRepresentation()) {
+    result = isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+  } else {
+    result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
+  }
+
+  Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
+  if (answer->IsException(isolate) || answer->IsString()) return answer;
+
+  DCHECK(answer->IsSmi());
+  length = Smi::cast(answer)->value();
+  if (s->IsOneByteRepresentation() && length > 0) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, isolate->factory()->NewRawOneByteString(length));
+  } else {
+    if (length < 0) length = -length;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, isolate->factory()->NewRawTwoByteString(length));
+  }
+  return ConvertCaseHelper(isolate, *s, *result, length, mapping);
 }
 
 }  // namespace
 
-void Builtins::Generate_StringIteratorPrototypeNext(
-    CodeStubAssembler* assembler) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
+BUILTIN(StringPrototypeToLocaleLowerCase) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.toLocaleLowerCase");
+  return ConvertCase(string, isolate,
+                     isolate->runtime_state()->to_lower_mapping());
+}
 
-  Variable var_value(assembler, MachineRepresentation::kTagged);
-  Variable var_done(assembler, MachineRepresentation::kTagged);
+BUILTIN(StringPrototypeToLocaleUpperCase) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.toLocaleUpperCase");
+  return ConvertCase(string, isolate,
+                     isolate->runtime_state()->to_upper_mapping());
+}
 
-  var_value.Bind(assembler->UndefinedConstant());
-  var_done.Bind(assembler->BooleanConstant(true));
+BUILTIN(StringPrototypeToLowerCase) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.toLowerCase");
+  return ConvertCase(string, isolate,
+                     isolate->runtime_state()->to_lower_mapping());
+}
 
-  Label throw_bad_receiver(assembler), next_codepoint(assembler),
-      return_result(assembler);
-
-  Node* iterator = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
-
-  assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
-  assembler->GotoUnless(
-      assembler->WordEqual(assembler->LoadInstanceType(iterator),
-                           assembler->Int32Constant(JS_STRING_ITERATOR_TYPE)),
-      &throw_bad_receiver);
-
-  Node* string =
-      assembler->LoadObjectField(iterator, JSStringIterator::kStringOffset);
-  Node* position =
-      assembler->LoadObjectField(iterator, JSStringIterator::kNextIndexOffset);
-  Node* length = assembler->LoadObjectField(string, String::kLengthOffset);
-
-  assembler->Branch(assembler->SmiLessThan(position, length), &next_codepoint,
-                    &return_result);
-
-  assembler->Bind(&next_codepoint);
-  {
-    Node* ch = LoadSurrogatePairAt(assembler, string, length, position);
-    Node* value = assembler->StringFromCodePoint(ch, UnicodeEncoding::UTF16);
-    var_value.Bind(value);
-    Node* length = assembler->LoadObjectField(value, String::kLengthOffset);
-    assembler->StoreObjectFieldNoWriteBarrier(
-        iterator, JSStringIterator::kNextIndexOffset,
-        assembler->SmiAdd(position, length));
-    var_done.Bind(assembler->BooleanConstant(false));
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&return_result);
-  {
-    Node* native_context = assembler->LoadNativeContext(context);
-    Node* map = assembler->LoadFixedArrayElement(
-        native_context,
-        assembler->IntPtrConstant(Context::ITERATOR_RESULT_MAP_INDEX), 0,
-        CodeStubAssembler::INTPTR_PARAMETERS);
-    Node* result = assembler->Allocate(JSIteratorResult::kSize);
-    assembler->StoreMapNoWriteBarrier(result, map);
-    assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
-                                    Heap::kEmptyFixedArrayRootIndex);
-    assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
-                                    Heap::kEmptyFixedArrayRootIndex);
-    assembler->StoreObjectFieldNoWriteBarrier(
-        result, JSIteratorResult::kValueOffset, var_value.value());
-    assembler->StoreObjectFieldNoWriteBarrier(
-        result, JSIteratorResult::kDoneOffset, var_done.value());
-    assembler->Return(result);
-  }
-
-  assembler->Bind(&throw_bad_receiver);
-  {
-    // The {receiver} is not a valid JSGeneratorObject.
-    Node* result = assembler->CallRuntime(
-        Runtime::kThrowIncompatibleMethodReceiver, context,
-        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
-            "String Iterator.prototype.next", TENURED)),
-        iterator);
-    assembler->Return(result);  // Never reached.
-  }
+BUILTIN(StringPrototypeToUpperCase) {
+  HandleScope scope(isolate);
+  TO_THIS_STRING(string, "String.prototype.toUpperCase");
+  return ConvertCase(string, isolate,
+                     isolate->runtime_state()->to_upper_mapping());
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-symbol.cc b/src/builtins/builtins-symbol.cc
index 8dd8a1f..f57d0bf 100644
--- a/src/builtins/builtins-symbol.cc
+++ b/src/builtins/builtins-symbol.cc
@@ -2,8 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -32,44 +35,81 @@
                             isolate->factory()->Symbol_string()));
 }
 
+// ES6 section 19.4.2.1 Symbol.for.
+BUILTIN(SymbolFor) {
+  HandleScope scope(isolate);
+  Handle<Object> key_obj = args.atOrUndefined(isolate, 1);
+  Handle<String> key;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+                                     Object::ToString(isolate, key_obj));
+  return *isolate->SymbolFor(Heap::kPublicSymbolTableRootIndex, key, false);
+}
+
+// ES6 section 19.4.2.5 Symbol.keyFor.
+BUILTIN(SymbolKeyFor) {
+  HandleScope scope(isolate);
+  Handle<Object> obj = args.atOrUndefined(isolate, 1);
+  if (!obj->IsSymbol()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kSymbolKeyFor, obj));
+  }
+  Handle<Symbol> symbol = Handle<Symbol>::cast(obj);
+  DisallowHeapAllocation no_gc;
+  Object* result;
+  if (symbol->is_public()) {
+    result = symbol->name();
+    DCHECK(result->IsString());
+  } else {
+    result = isolate->heap()->undefined_value();
+  }
+  DCHECK_EQ(isolate->heap()->public_symbol_table()->SlowReverseLookup(*symbol),
+            result);
+  return result;
+}
+
 // ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint )
 void Builtins::Generate_SymbolPrototypeToPrimitive(
-    CodeStubAssembler* assembler) {
+    compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(4);
+  Node* receiver = assembler.Parameter(0);
+  Node* context = assembler.Parameter(4);
 
   Node* result =
-      assembler->ToThisValue(context, receiver, PrimitiveType::kSymbol,
-                             "Symbol.prototype [ @@toPrimitive ]");
-  assembler->Return(result);
+      assembler.ToThisValue(context, receiver, PrimitiveType::kSymbol,
+                            "Symbol.prototype [ @@toPrimitive ]");
+  assembler.Return(result);
 }
 
 // ES6 section 19.4.3.2 Symbol.prototype.toString ( )
-void Builtins::Generate_SymbolPrototypeToString(CodeStubAssembler* assembler) {
+void Builtins::Generate_SymbolPrototypeToString(
+    compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
+  Node* receiver = assembler.Parameter(0);
+  Node* context = assembler.Parameter(3);
 
-  Node* value = assembler->ToThisValue(
-      context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.toString");
+  Node* value = assembler.ToThisValue(context, receiver, PrimitiveType::kSymbol,
+                                      "Symbol.prototype.toString");
   Node* result =
-      assembler->CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
-  assembler->Return(result);
+      assembler.CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
+  assembler.Return(result);
 }
 
 // ES6 section 19.4.3.3 Symbol.prototype.valueOf ( )
-void Builtins::Generate_SymbolPrototypeValueOf(CodeStubAssembler* assembler) {
+void Builtins::Generate_SymbolPrototypeValueOf(
+    compiler::CodeAssemblerState* state) {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
+  Node* receiver = assembler.Parameter(0);
+  Node* context = assembler.Parameter(3);
 
-  Node* result = assembler->ToThisValue(
+  Node* result = assembler.ToThisValue(
       context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.valueOf");
-  assembler->Return(result);
+  assembler.Return(result);
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-typedarray.cc b/src/builtins/builtins-typedarray.cc
index 94173fa..9a9ec59 100644
--- a/src/builtins/builtins-typedarray.cc
+++ b/src/builtins/builtins-typedarray.cc
@@ -2,12 +2,27 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/builtins/builtins.h"
 #include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
+class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
+ public:
+  explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+
+ protected:
+  void GenerateTypedArrayPrototypeGetter(const char* method_name,
+                                         int object_offset);
+  template <IterationKind kIterationKind>
+  void GenerateTypedArrayPrototypeIterationMethod(const char* method_name);
+};
+
 // -----------------------------------------------------------------------------
 // ES6 section 22.2 TypedArray Objects
 
@@ -18,151 +33,204 @@
   return *typed_array->GetBuffer();
 }
 
-namespace {
-
-void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
-                                       const char* method_name,
-                                       int object_offset) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
+void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeGetter(
+    const char* method_name, int object_offset) {
+  Node* receiver = Parameter(0);
+  Node* context = Parameter(3);
 
   // Check if the {receiver} is actually a JSTypedArray.
-  Label if_receiverisincompatible(assembler, Label::kDeferred);
-  assembler->GotoIf(assembler->TaggedIsSmi(receiver),
-                    &if_receiverisincompatible);
-  Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
-  assembler->GotoUnless(
-      assembler->Word32Equal(receiver_instance_type,
-                             assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
-      &if_receiverisincompatible);
+  Label receiver_is_incompatible(this, Label::kDeferred);
+  GotoIf(TaggedIsSmi(receiver), &receiver_is_incompatible);
+  GotoIfNot(HasInstanceType(receiver, JS_TYPED_ARRAY_TYPE),
+            &receiver_is_incompatible);
 
   // Check if the {receiver}'s JSArrayBuffer was neutered.
   Node* receiver_buffer =
-      assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
-  Label if_receiverisneutered(assembler, Label::kDeferred);
-  assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
-                    &if_receiverisneutered);
-  assembler->Return(assembler->LoadObjectField(receiver, object_offset));
+      LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+  Label if_receiverisneutered(this, Label::kDeferred);
+  GotoIf(IsDetachedBuffer(receiver_buffer), &if_receiverisneutered);
+  Return(LoadObjectField(receiver, object_offset));
 
-  assembler->Bind(&if_receiverisneutered);
+  Bind(&if_receiverisneutered);
   {
     // The {receiver}s buffer was neutered, default to zero.
-    assembler->Return(assembler->SmiConstant(0));
+    Return(SmiConstant(0));
   }
 
-  assembler->Bind(&if_receiverisincompatible);
+  Bind(&receiver_is_incompatible);
   {
-    // The {receiver} is not a valid JSGeneratorObject.
-    Node* result = assembler->CallRuntime(
-        Runtime::kThrowIncompatibleMethodReceiver, context,
-        assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
-            method_name, TENURED)),
-        receiver);
-    assembler->Return(result);  // Never reached.
+    // The {receiver} is not a valid JSTypedArray.
+    CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+                HeapConstant(
+                    factory()->NewStringFromAsciiChecked(method_name, TENURED)),
+                receiver);
+    Unreachable();
   }
 }
 
-}  // namespace
-
 // ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength
-void Builtins::Generate_TypedArrayPrototypeByteLength(
-    CodeStubAssembler* assembler) {
-  Generate_TypedArrayProtoypeGetter(assembler,
-                                    "get TypedArray.prototype.byteLength",
+TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
+  GenerateTypedArrayPrototypeGetter("get TypedArray.prototype.byteLength",
                                     JSTypedArray::kByteLengthOffset);
 }
 
 // ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset
-void Builtins::Generate_TypedArrayPrototypeByteOffset(
-    CodeStubAssembler* assembler) {
-  Generate_TypedArrayProtoypeGetter(assembler,
-                                    "get TypedArray.prototype.byteOffset",
+TF_BUILTIN(TypedArrayPrototypeByteOffset, TypedArrayBuiltinsAssembler) {
+  GenerateTypedArrayPrototypeGetter("get TypedArray.prototype.byteOffset",
                                     JSTypedArray::kByteOffsetOffset);
 }
 
 // ES6 section 22.2.3.18 get %TypedArray%.prototype.length
-void Builtins::Generate_TypedArrayPrototypeLength(
-    CodeStubAssembler* assembler) {
-  Generate_TypedArrayProtoypeGetter(assembler,
-                                    "get TypedArray.prototype.length",
+TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
+  GenerateTypedArrayPrototypeGetter("get TypedArray.prototype.length",
                                     JSTypedArray::kLengthOffset);
 }
 
+template <IterationKind kIterationKind>
+void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod(
+    const char* method_name) {
+  Node* receiver = Parameter(0);
+  Node* context = Parameter(3);
+
+  Label throw_bad_receiver(this, Label::kDeferred);
+  Label throw_typeerror(this, Label::kDeferred);
+
+  GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver);
+
+  Node* map = LoadMap(receiver);
+  Node* instance_type = LoadMapInstanceType(map);
+  GotoIf(Word32NotEqual(instance_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+         &throw_bad_receiver);
+
+  // Check if the {receiver}'s JSArrayBuffer was neutered.
+  Node* receiver_buffer =
+      LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+  Label if_receiverisneutered(this, Label::kDeferred);
+  GotoIf(IsDetachedBuffer(receiver_buffer), &if_receiverisneutered);
+
+  Return(CreateArrayIterator(receiver, map, instance_type, context,
+                             kIterationKind));
+
+  Variable var_message(this, MachineRepresentation::kTagged);
+  Bind(&throw_bad_receiver);
+  var_message.Bind(SmiConstant(MessageTemplate::kNotTypedArray));
+  Goto(&throw_typeerror);
+
+  Bind(&if_receiverisneutered);
+  var_message.Bind(
+      SmiConstant(Smi::FromInt(MessageTemplate::kDetachedOperation)));
+  Goto(&throw_typeerror);
+
+  Bind(&throw_typeerror);
+  {
+    Node* method_arg = HeapConstant(
+        isolate()->factory()->NewStringFromAsciiChecked(method_name, TENURED));
+    Node* result = CallRuntime(Runtime::kThrowTypeError, context,
+                               var_message.value(), method_arg);
+    Return(result);
+  }
+}
+
+TF_BUILTIN(TypedArrayPrototypeValues, TypedArrayBuiltinsAssembler) {
+  GenerateTypedArrayPrototypeIterationMethod<IterationKind::kValues>(
+      "%TypedArray%.prototype.values()");
+}
+
+TF_BUILTIN(TypedArrayPrototypeEntries, TypedArrayBuiltinsAssembler) {
+  GenerateTypedArrayPrototypeIterationMethod<IterationKind::kEntries>(
+      "%TypedArray%.prototype.entries()");
+}
+
+TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) {
+  GenerateTypedArrayPrototypeIterationMethod<IterationKind::kKeys>(
+      "%TypedArray%.prototype.keys()");
+}
+
 namespace {
 
-template <IterationKind kIterationKind>
-void Generate_TypedArrayPrototypeIterationMethod(CodeStubAssembler* assembler,
-                                                 const char* method_name) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
-  Node* receiver = assembler->Parameter(0);
-  Node* context = assembler->Parameter(3);
-
-  Label throw_bad_receiver(assembler, Label::kDeferred);
-  Label throw_typeerror(assembler, Label::kDeferred);
-
-  assembler->GotoIf(assembler->TaggedIsSmi(receiver), &throw_bad_receiver);
-
-  Node* map = assembler->LoadMap(receiver);
-  Node* instance_type = assembler->LoadMapInstanceType(map);
-  assembler->GotoIf(
-      assembler->Word32NotEqual(instance_type,
-                                assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
-      &throw_bad_receiver);
-
-  // Check if the {receiver}'s JSArrayBuffer was neutered.
-  Node* receiver_buffer =
-      assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
-  Label if_receiverisneutered(assembler, Label::kDeferred);
-  assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
-                    &if_receiverisneutered);
-
-  assembler->Return(assembler->CreateArrayIterator(receiver, map, instance_type,
-                                                   context, kIterationKind));
-
-  Variable var_message(assembler, MachineRepresentation::kTagged);
-  assembler->Bind(&throw_bad_receiver);
-  var_message.Bind(
-      assembler->SmiConstant(Smi::FromInt(MessageTemplate::kNotTypedArray)));
-  assembler->Goto(&throw_typeerror);
-
-  assembler->Bind(&if_receiverisneutered);
-  var_message.Bind(assembler->SmiConstant(
-      Smi::FromInt(MessageTemplate::kDetachedOperation)));
-  assembler->Goto(&throw_typeerror);
-
-  assembler->Bind(&throw_typeerror);
-  {
-    Node* arg1 = assembler->HeapConstant(
-        assembler->isolate()->factory()->NewStringFromAsciiChecked(method_name,
-                                                                   TENURED));
-    Node* result = assembler->CallRuntime(Runtime::kThrowTypeError, context,
-                                          var_message.value(), arg1);
-    assembler->Return(result);
+int64_t CapRelativeIndex(Handle<Object> num, int64_t minimum, int64_t maximum) {
+  int64_t relative;
+  if (V8_LIKELY(num->IsSmi())) {
+    relative = Smi::cast(*num)->value();
+  } else {
+    DCHECK(num->IsHeapNumber());
+    double fp = HeapNumber::cast(*num)->value();
+    if (V8_UNLIKELY(!std::isfinite(fp))) {
+      // +Infinity / -Infinity
+      DCHECK(!std::isnan(fp));
+      return fp < 0 ? minimum : maximum;
+    }
+    relative = static_cast<int64_t>(fp);
   }
+  return relative < 0 ? std::max<int64_t>(relative + maximum, minimum)
+                      : std::min<int64_t>(relative, maximum);
 }
+
 }  // namespace
 
-void Builtins::Generate_TypedArrayPrototypeValues(
-    CodeStubAssembler* assembler) {
-  Generate_TypedArrayPrototypeIterationMethod<IterationKind::kValues>(
-      assembler, "%TypedArray%.prototype.values()");
-}
+BUILTIN(TypedArrayPrototypeCopyWithin) {
+  HandleScope scope(isolate);
 
-void Builtins::Generate_TypedArrayPrototypeEntries(
-    CodeStubAssembler* assembler) {
-  Generate_TypedArrayPrototypeIterationMethod<IterationKind::kEntries>(
-      assembler, "%TypedArray%.prototype.entries()");
-}
+  Handle<JSTypedArray> array;
+  const char* method = "%TypedArray%.prototype.copyWithin";
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
 
-void Builtins::Generate_TypedArrayPrototypeKeys(CodeStubAssembler* assembler) {
-  Generate_TypedArrayPrototypeIterationMethod<IterationKind::kKeys>(
-      assembler, "%TypedArray%.prototype.keys()");
+  if (V8_UNLIKELY(array->WasNeutered())) return *array;
+
+  int64_t len = array->length_value();
+  int64_t to = 0;
+  int64_t from = 0;
+  int64_t final = len;
+
+  if (V8_LIKELY(args.length() > 1)) {
+    Handle<Object> num;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, num, Object::ToInteger(isolate, args.at<Object>(1)));
+    to = CapRelativeIndex(num, 0, len);
+
+    if (args.length() > 2) {
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, num, Object::ToInteger(isolate, args.at<Object>(2)));
+      from = CapRelativeIndex(num, 0, len);
+
+      Handle<Object> end = args.atOrUndefined(isolate, 3);
+      if (!end->IsUndefined(isolate)) {
+        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, num,
+                                           Object::ToInteger(isolate, end));
+        final = CapRelativeIndex(num, 0, len);
+      }
+    }
+  }
+
+  int64_t count = std::min<int64_t>(final - from, len - to);
+  if (count <= 0) return *array;
+
+  // TypedArray buffer may have been transferred/detached during parameter
+  // processing above. Return early in this case, to prevent potential UAF error
+  // TODO(caitp): throw here, as though the full algorithm were performed (the
+  // throw would have come from ecma262/#sec-integerindexedelementget)
+  // (see )
+  if (V8_UNLIKELY(array->WasNeutered())) return *array;
+
+  // Ensure processed indexes are within array bounds
+  DCHECK_GE(from, 0);
+  DCHECK_LT(from, len);
+  DCHECK_GE(to, 0);
+  DCHECK_LT(to, len);
+  DCHECK_GE(len - count, 0);
+
+  Handle<FixedTypedArrayBase> elements(
+      FixedTypedArrayBase::cast(array->elements()));
+  size_t element_size = array->element_size();
+  to = to * element_size;
+  from = from * element_size;
+  count = count * element_size;
+
+  uint8_t* data = static_cast<uint8_t*>(elements->DataPtr());
+  std::memmove(data + to, data + from, count);
+
+  return *array;
 }
 
 }  // namespace internal
diff --git a/src/builtins/builtins-utils.h b/src/builtins/builtins-utils.h
index 6378fdf..7a2424d 100644
--- a/src/builtins/builtins-utils.h
+++ b/src/builtins/builtins-utils.h
@@ -8,11 +8,16 @@
 #include "src/arguments.h"
 #include "src/base/logging.h"
 #include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
+#include "src/factory.h"
+#include "src/isolate.h"
 
 namespace v8 {
 namespace internal {
 
+namespace compiler {
+class CodeAssemblerState;
+}
+
 // Arguments object passed to C++ builtins.
 class BuiltinArguments : public Arguments {
  public:
@@ -27,7 +32,7 @@
     return Arguments::operator[](index);
   }
 
-  template <class S>
+  template <class S = Object>
   Handle<S> at(int index) {
     DCHECK_LT(index, length());
     return Arguments::at<S>(index);
@@ -102,6 +107,31 @@
                                                      Isolate* isolate)
 
 // ----------------------------------------------------------------------------
+// Support macro for defining builtins with Turbofan.
+// ----------------------------------------------------------------------------
+//
+// A builtin function is defined by writing:
+//
+//   TF_BUILTIN(name, code_assember_base_class) {
+//     ...
+//   }
+//
+// In the body of the builtin function the arguments can be accessed
+// as "Parameter(n)".
+#define TF_BUILTIN(Name, AssemblerBase)                                 \
+  class Name##Assembler : public AssemblerBase {                        \
+   public:                                                              \
+    explicit Name##Assembler(compiler::CodeAssemblerState* state)       \
+        : AssemblerBase(state) {}                                       \
+    void Generate##Name##Impl();                                        \
+  };                                                                    \
+  void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
+    Name##Assembler assembler(state);                                   \
+    assembler.Generate##Name##Impl();                                   \
+  }                                                                     \
+  void Name##Assembler::Generate##Name##Impl()
+
+// ----------------------------------------------------------------------------
 
 #define CHECK_RECEIVER(Type, name, method)                                  \
   if (!args.receiver()->Is##Type()) {                                       \
@@ -117,8 +147,7 @@
 // or converts the receiver to a String otherwise and assigns it to a new var
 // with the given {name}.
 #define TO_THIS_STRING(name, method)                                          \
-  if (args.receiver()->IsNull(isolate) ||                                     \
-      args.receiver()->IsUndefined(isolate)) {                                \
+  if (args.receiver()->IsNullOrUndefined(isolate)) {                          \
     THROW_NEW_ERROR_RETURN_FAILURE(                                           \
         isolate,                                                              \
         NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,               \
diff --git a/src/builtins/builtins-wasm.cc b/src/builtins/builtins-wasm.cc
new file mode 100644
index 0000000..c809ccc
--- /dev/null
+++ b/src/builtins/builtins-wasm.cc
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/code-stub-assembler.h"
+#include "src/objects-inl.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+
+TF_BUILTIN(WasmStackGuard, CodeStubAssembler) {
+  Node* context = SmiConstant(Smi::kZero);
+  TailCallRuntime(Runtime::kWasmStackGuard, context);
+}
+
+#define DECLARE_ENUM(name)                                                    \
+  TF_BUILTIN(ThrowWasm##name, CodeStubAssembler) {                            \
+    int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
+    TailCallRuntime(Runtime::kThrowWasmErrorFromTrapIf,                       \
+                    SmiConstant(Smi::kZero), SmiConstant(message_id));        \
+  }
+FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
+#undef DECLARE_ENUM
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/builtins/builtins.cc b/src/builtins/builtins.cc
index ec981fe..518075a 100644
--- a/src/builtins/builtins.cc
+++ b/src/builtins/builtins.cc
@@ -3,13 +3,14 @@
 // found in the LICENSE file.
 
 #include "src/builtins/builtins.h"
+#include "src/api.h"
 #include "src/code-events.h"
-#include "src/code-stub-assembler.h"
+#include "src/compiler/code-assembler.h"
 #include "src/ic/ic-state.h"
 #include "src/interface-descriptors.h"
 #include "src/isolate.h"
 #include "src/macro-assembler.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -42,7 +43,7 @@
 }
 
 typedef void (*MacroAssemblerGenerator)(MacroAssembler*);
-typedef void (*CodeAssemblerGenerator)(CodeStubAssembler*);
+typedef void (*CodeAssemblerGenerator)(compiler::CodeAssemblerState*);
 
 Code* BuildWithMacroAssembler(Isolate* isolate,
                               MacroAssemblerGenerator generator,
@@ -86,9 +87,10 @@
   Zone zone(isolate->allocator(), ZONE_NAME);
   const int argc_with_recv =
       (argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
-  CodeStubAssembler assembler(isolate, &zone, argc_with_recv, flags, name);
-  generator(&assembler);
-  Handle<Code> code = assembler.GenerateCode();
+  compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv, flags,
+                                     name);
+  generator(&state);
+  Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
   PostBuildProfileAndTracing(isolate, *code, name);
   return *code;
 }
@@ -97,7 +99,8 @@
 Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
                                    CodeAssemblerGenerator generator,
                                    CallDescriptors::Key interface_descriptor,
-                                   Code::Flags flags, const char* name) {
+                                   Code::Flags flags, const char* name,
+                                   int result_size) {
   HandleScope scope(isolate);
   Zone zone(isolate->allocator(), ZONE_NAME);
   // The interface descriptor with given key must be initialized at this point
@@ -105,9 +108,10 @@
   CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
   // Ensure descriptor is already initialized.
   DCHECK_LE(0, descriptor.GetRegisterParameterCount());
-  CodeStubAssembler assembler(isolate, &zone, descriptor, flags, name);
-  generator(&assembler);
-  Handle<Code> code = assembler.GenerateCode();
+  compiler::CodeAssemblerState state(isolate, &zone, descriptor, flags, name,
+                                     result_size);
+  generator(&state);
+  Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
   PostBuildProfileAndTracing(isolate, *code, name);
   return *code;
 }
@@ -135,11 +139,11 @@
   code = BuildWithCodeStubAssemblerJS(isolate, &Generate_##Name, Argc, \
                                       kBuiltinFlags, #Name);           \
   builtins_[index++] = code;
-#define BUILD_TFS(Name, Kind, Extra, InterfaceDescriptor)              \
+#define BUILD_TFS(Name, Kind, Extra, InterfaceDescriptor, result_size) \
   { InterfaceDescriptor##Descriptor descriptor(isolate); }             \
   code = BuildWithCodeStubAssemblerCS(                                 \
       isolate, &Generate_##Name, CallDescriptors::InterfaceDescriptor, \
-      Code::ComputeFlags(Code::Kind, Extra), #Name);                   \
+      Code::ComputeFlags(Code::Kind, Extra), #Name, result_size);      \
   builtins_[index++] = code;
 #define BUILD_ASM(Name)                                                        \
   code =                                                                       \
diff --git a/src/builtins/builtins.h b/src/builtins/builtins.h
index a6b126d..f2b0c4f 100644
--- a/src/builtins/builtins.h
+++ b/src/builtins/builtins.h
@@ -6,11 +6,15 @@
 #define V8_BUILTINS_BUILTINS_H_
 
 #include "src/base/flags.h"
-#include "src/handles.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 
+template <typename T>
+class Handle;
+class Isolate;
+
 #define CODE_AGE_LIST_WITH_ARG(V, A) \
   V(Quadragenarian, A)               \
   V(Quinquagenarian, A)              \
@@ -29,9 +33,7 @@
   V(NoAge)                        \
   CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
 
-#define DECLARE_CODE_AGE_BUILTIN(C, V) \
-  V(Make##C##CodeYoungAgainOddMarking) \
-  V(Make##C##CodeYoungAgainEvenMarking)
+#define DECLARE_CODE_AGE_BUILTIN(C, V) V(Make##C##CodeYoungAgain)
 
 // CPP: Builtin in C++. Entered via BUILTIN_EXIT frame.
 //      Args: name
@@ -40,665 +42,812 @@
 // TFJ: Builtin in Turbofan, with JS linkage (callable as Javascript function).
 //      Args: name, arguments count
 // TFS: Builtin in Turbofan, with CodeStub linkage.
-//      Args: name, code kind, extra IC state, interface descriptor
+//      Args: name, code kind, extra IC state, interface descriptor, return_size
 // ASM: Builtin in platform-dependent assembly.
 //      Args: name
 // ASH: Handlers implemented in platform-dependent assembly.
 //      Args: name, code kind, extra IC state
 // DBG: Builtin in platform-dependent assembly, used by the debugger.
 //      Args: name
-#define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG)                       \
-  ASM(Abort)                                                                  \
-  /* Code aging */                                                            \
-  CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM)                       \
-                                                                              \
-  TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion)                     \
-                                                                              \
-  /* Calls */                                                                 \
-  ASM(ArgumentsAdaptorTrampoline)                                             \
-  /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */             \
-  ASM(CallFunction_ReceiverIsNullOrUndefined)                                 \
-  ASM(CallFunction_ReceiverIsNotNullOrUndefined)                              \
-  ASM(CallFunction_ReceiverIsAny)                                             \
-  ASM(TailCallFunction_ReceiverIsNullOrUndefined)                             \
-  ASM(TailCallFunction_ReceiverIsNotNullOrUndefined)                          \
-  ASM(TailCallFunction_ReceiverIsAny)                                         \
-  /* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */           \
-  ASM(CallBoundFunction)                                                      \
-  ASM(TailCallBoundFunction)                                                  \
-  /* ES6 section 7.3.12 Call(F, V, [argumentsList]) */                        \
-  ASM(Call_ReceiverIsNullOrUndefined)                                         \
-  ASM(Call_ReceiverIsNotNullOrUndefined)                                      \
-  ASM(Call_ReceiverIsAny)                                                     \
-  ASM(TailCall_ReceiverIsNullOrUndefined)                                     \
-  ASM(TailCall_ReceiverIsNotNullOrUndefined)                                  \
-  ASM(TailCall_ReceiverIsAny)                                                 \
-                                                                              \
-  /* Construct */                                                             \
-  /* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */           \
-  ASM(ConstructFunction)                                                      \
-  /* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */          \
-  ASM(ConstructBoundFunction)                                                 \
-  ASM(ConstructedNonConstructable)                                            \
-  /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */          \
-  ASM(ConstructProxy)                                                         \
-  /* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */        \
-  ASM(Construct)                                                              \
-  ASM(JSConstructStubApi)                                                     \
-  ASM(JSConstructStubGeneric)                                                 \
-  ASM(JSBuiltinsConstructStub)                                                \
-  ASM(JSBuiltinsConstructStubForDerived)                                      \
-                                                                              \
-  /* Apply and entries */                                                     \
-  ASM(Apply)                                                                  \
-  ASM(JSEntryTrampoline)                                                      \
-  ASM(JSConstructEntryTrampoline)                                             \
-  ASM(ResumeGeneratorTrampoline)                                              \
-                                                                              \
-  /* Stack and interrupt check */                                             \
-  ASM(InterruptCheck)                                                         \
-  ASM(StackCheck)                                                             \
-                                                                              \
-  /* String helpers */                                                        \
-  TFS(StringEqual, BUILTIN, kNoExtraICState, Compare)                         \
-  TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare)                      \
-  TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare)                      \
-  TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare)               \
-  TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare)                   \
-  TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare)            \
-                                                                              \
-  /* Interpreter */                                                           \
-  ASM(InterpreterEntryTrampoline)                                             \
-  ASM(InterpreterPushArgsAndCall)                                             \
-  ASM(InterpreterPushArgsAndCallFunction)                                     \
-  ASM(InterpreterPushArgsAndTailCall)                                         \
-  ASM(InterpreterPushArgsAndTailCallFunction)                                 \
-  ASM(InterpreterPushArgsAndConstruct)                                        \
-  ASM(InterpreterPushArgsAndConstructFunction)                                \
-  ASM(InterpreterPushArgsAndConstructArray)                                   \
-  ASM(InterpreterEnterBytecodeAdvance)                                        \
-  ASM(InterpreterEnterBytecodeDispatch)                                       \
-  ASM(InterpreterOnStackReplacement)                                          \
-                                                                              \
-  /* Code life-cycle */                                                       \
-  ASM(CompileLazy)                                                            \
-  ASM(CompileBaseline)                                                        \
-  ASM(CompileOptimized)                                                       \
-  ASM(CompileOptimizedConcurrent)                                             \
-  ASM(InOptimizationQueue)                                                    \
-  ASM(InstantiateAsmJs)                                                       \
-  ASM(MarkCodeAsToBeExecutedOnce)                                             \
-  ASM(MarkCodeAsExecutedOnce)                                                 \
-  ASM(MarkCodeAsExecutedTwice)                                                \
-  ASM(NotifyDeoptimized)                                                      \
-  ASM(NotifySoftDeoptimized)                                                  \
-  ASM(NotifyLazyDeoptimized)                                                  \
-  ASM(NotifyStubFailure)                                                      \
-  ASM(NotifyStubFailureSaveDoubles)                                           \
-  ASM(OnStackReplacement)                                                     \
-                                                                              \
-  /* API callback handling */                                                 \
-  API(HandleApiCall)                                                          \
-  API(HandleApiCallAsFunction)                                                \
-  API(HandleApiCallAsConstructor)                                             \
-  ASM(HandleFastApiCall)                                                      \
-                                                                              \
-  /* Adapters for Turbofan into runtime */                                    \
-  ASM(AllocateInNewSpace)                                                     \
-  ASM(AllocateInOldSpace)                                                     \
-                                                                              \
-  /* TurboFan support builtins */                                             \
-  TFS(CopyFastSmiOrObjectElements, BUILTIN, kNoExtraICState,                  \
-      CopyFastSmiOrObjectElements)                                            \
-  TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements)    \
-  TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState,                  \
-      GrowArrayElements)                                                      \
-                                                                              \
-  /* Debugger */                                                              \
-  DBG(FrameDropper_LiveEdit)                                                  \
-  DBG(Return_DebugBreak)                                                      \
-  DBG(Slot_DebugBreak)                                                        \
-                                                                              \
-  /* Type conversions */                                                      \
-  TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion)                    \
-  TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion)   \
-  TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion)   \
-  TFS(NonPrimitiveToPrimitive_Default, BUILTIN, kNoExtraICState,              \
-      TypeConversion)                                                         \
-  TFS(NonPrimitiveToPrimitive_Number, BUILTIN, kNoExtraICState,               \
-      TypeConversion)                                                         \
-  TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState,               \
-      TypeConversion)                                                         \
-  TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion)               \
-  TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion)                       \
-  TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion)            \
-  TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion)                     \
-  TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion)                     \
-  TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion)                    \
-  TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion)                     \
-  TFS(Typeof, BUILTIN, kNoExtraICState, Typeof)                               \
-                                                                              \
-  /* Handlers */                                                              \
-  TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState,             \
-      LoadWithVector)                                                         \
-  ASM(KeyedLoadIC_Miss)                                                       \
-  ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC)                         \
-  ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState)              \
-  ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC,                        \
-      StoreICState::kStrictModeState)                                         \
-  TFS(KeyedStoreIC_Megamorphic_TF, KEYED_STORE_IC, kNoExtraICState,           \
-      StoreWithVector)                                                        \
-  TFS(KeyedStoreIC_Megamorphic_Strict_TF, KEYED_STORE_IC,                     \
-      StoreICState::kStrictModeState, StoreWithVector)                        \
-  ASM(KeyedStoreIC_Miss)                                                      \
-  ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC)                       \
-  TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector)      \
-  TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
-  ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState)                       \
-  TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector)                  \
-  ASH(LoadIC_Normal, HANDLER, Code::LOAD_IC)                                  \
-  TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector)                    \
-  TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector)                \
-  ASH(StoreIC_Normal, HANDLER, Code::STORE_IC)                                \
-  ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState)      \
-  TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector)           \
-  TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector)           \
-                                                                              \
-  /* Built-in functions for Javascript */                                     \
-  /* Special internal builtins */                                             \
-  CPP(EmptyFunction)                                                          \
-  CPP(Illegal)                                                                \
-  CPP(RestrictedFunctionPropertiesThrower)                                    \
-  CPP(RestrictedStrictArgumentsPropertiesThrower)                             \
-  CPP(UnsupportedThrower)                                                     \
-                                                                              \
-  /* Array */                                                                 \
-  ASM(ArrayCode)                                                              \
-  ASM(InternalArrayCode)                                                      \
-  CPP(ArrayConcat)                                                            \
-  /* ES6 section 22.1.2.2 Array.isArray */                                    \
-  TFJ(ArrayIsArray, 1)                                                        \
-  /* ES7 #sec-array.prototype.includes */                                     \
-  TFJ(ArrayIncludes, 2)                                                       \
-  TFJ(ArrayIndexOf, 2)                                                        \
-  CPP(ArrayPop)                                                               \
-  CPP(ArrayPush)                                                              \
-  CPP(ArrayShift)                                                             \
-  CPP(ArraySlice)                                                             \
-  CPP(ArraySplice)                                                            \
-  CPP(ArrayUnshift)                                                           \
-  /* ES6 #sec-array.prototype.entries */                                      \
-  TFJ(ArrayPrototypeEntries, 0)                                               \
-  /* ES6 #sec-array.prototype.keys */                                         \
-  TFJ(ArrayPrototypeKeys, 0)                                                  \
-  /* ES6 #sec-array.prototype.values */                                       \
-  TFJ(ArrayPrototypeValues, 0)                                                \
-  /* ES6 #sec-%arrayiteratorprototype%.next */                                \
-  TFJ(ArrayIteratorPrototypeNext, 0)                                          \
-                                                                              \
-  /* ArrayBuffer */                                                           \
-  CPP(ArrayBufferConstructor)                                                 \
-  CPP(ArrayBufferConstructor_ConstructStub)                                   \
-  CPP(ArrayBufferPrototypeGetByteLength)                                      \
-  CPP(ArrayBufferIsView)                                                      \
-                                                                              \
-  /* Boolean */                                                               \
-  CPP(BooleanConstructor)                                                     \
-  CPP(BooleanConstructor_ConstructStub)                                       \
-  /* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */                   \
-  TFJ(BooleanPrototypeToString, 0)                                            \
-  /* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */                    \
-  TFJ(BooleanPrototypeValueOf, 0)                                             \
-                                                                              \
-  /* CallSite */                                                              \
-  CPP(CallSitePrototypeGetColumnNumber)                                       \
-  CPP(CallSitePrototypeGetEvalOrigin)                                         \
-  CPP(CallSitePrototypeGetFileName)                                           \
-  CPP(CallSitePrototypeGetFunction)                                           \
-  CPP(CallSitePrototypeGetFunctionName)                                       \
-  CPP(CallSitePrototypeGetLineNumber)                                         \
-  CPP(CallSitePrototypeGetMethodName)                                         \
-  CPP(CallSitePrototypeGetPosition)                                           \
-  CPP(CallSitePrototypeGetScriptNameOrSourceURL)                              \
-  CPP(CallSitePrototypeGetThis)                                               \
-  CPP(CallSitePrototypeGetTypeName)                                           \
-  CPP(CallSitePrototypeIsConstructor)                                         \
-  CPP(CallSitePrototypeIsEval)                                                \
-  CPP(CallSitePrototypeIsNative)                                              \
-  CPP(CallSitePrototypeIsToplevel)                                            \
-  CPP(CallSitePrototypeToString)                                              \
-                                                                              \
-  /* DataView */                                                              \
-  CPP(DataViewConstructor)                                                    \
-  CPP(DataViewConstructor_ConstructStub)                                      \
-  CPP(DataViewPrototypeGetBuffer)                                             \
-  CPP(DataViewPrototypeGetByteLength)                                         \
-  CPP(DataViewPrototypeGetByteOffset)                                         \
-  CPP(DataViewPrototypeGetInt8)                                               \
-  CPP(DataViewPrototypeSetInt8)                                               \
-  CPP(DataViewPrototypeGetUint8)                                              \
-  CPP(DataViewPrototypeSetUint8)                                              \
-  CPP(DataViewPrototypeGetInt16)                                              \
-  CPP(DataViewPrototypeSetInt16)                                              \
-  CPP(DataViewPrototypeGetUint16)                                             \
-  CPP(DataViewPrototypeSetUint16)                                             \
-  CPP(DataViewPrototypeGetInt32)                                              \
-  CPP(DataViewPrototypeSetInt32)                                              \
-  CPP(DataViewPrototypeGetUint32)                                             \
-  CPP(DataViewPrototypeSetUint32)                                             \
-  CPP(DataViewPrototypeGetFloat32)                                            \
-  CPP(DataViewPrototypeSetFloat32)                                            \
-  CPP(DataViewPrototypeGetFloat64)                                            \
-  CPP(DataViewPrototypeSetFloat64)                                            \
-                                                                              \
-  /* Date */                                                                  \
-  CPP(DateConstructor)                                                        \
-  CPP(DateConstructor_ConstructStub)                                          \
-  /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */                       \
-  TFJ(DatePrototypeGetDate, 0)                                                \
-  /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */                        \
-  TFJ(DatePrototypeGetDay, 0)                                                 \
-  /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */                   \
-  TFJ(DatePrototypeGetFullYear, 0)                                            \
-  /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */                      \
-  TFJ(DatePrototypeGetHours, 0)                                               \
-  /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */               \
-  TFJ(DatePrototypeGetMilliseconds, 0)                                        \
-  /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */                    \
-  TFJ(DatePrototypeGetMinutes, 0)                                             \
-  /* ES6 section 20.3.4.8 Date.prototype.getMonth */                          \
-  TFJ(DatePrototypeGetMonth, 0)                                               \
-  /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */                    \
-  TFJ(DatePrototypeGetSeconds, 0)                                             \
-  /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */                      \
-  TFJ(DatePrototypeGetTime, 0)                                                \
-  /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */            \
-  TFJ(DatePrototypeGetTimezoneOffset, 0)                                      \
-  /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */                   \
-  TFJ(DatePrototypeGetUTCDate, 0)                                             \
-  /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */                    \
-  TFJ(DatePrototypeGetUTCDay, 0)                                              \
-  /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */               \
-  TFJ(DatePrototypeGetUTCFullYear, 0)                                         \
-  /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */                  \
-  TFJ(DatePrototypeGetUTCHours, 0)                                            \
-  /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */           \
-  TFJ(DatePrototypeGetUTCMilliseconds, 0)                                     \
-  /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */                \
-  TFJ(DatePrototypeGetUTCMinutes, 0)                                          \
-  /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */                  \
-  TFJ(DatePrototypeGetUTCMonth, 0)                                            \
-  /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */                \
-  TFJ(DatePrototypeGetUTCSeconds, 0)                                          \
-  CPP(DatePrototypeGetYear)                                                   \
-  CPP(DatePrototypeSetYear)                                                   \
-  CPP(DateNow)                                                                \
-  CPP(DateParse)                                                              \
-  CPP(DatePrototypeSetDate)                                                   \
-  CPP(DatePrototypeSetFullYear)                                               \
-  CPP(DatePrototypeSetHours)                                                  \
-  CPP(DatePrototypeSetMilliseconds)                                           \
-  CPP(DatePrototypeSetMinutes)                                                \
-  CPP(DatePrototypeSetMonth)                                                  \
-  CPP(DatePrototypeSetSeconds)                                                \
-  CPP(DatePrototypeSetTime)                                                   \
-  CPP(DatePrototypeSetUTCDate)                                                \
-  CPP(DatePrototypeSetUTCFullYear)                                            \
-  CPP(DatePrototypeSetUTCHours)                                               \
-  CPP(DatePrototypeSetUTCMilliseconds)                                        \
-  CPP(DatePrototypeSetUTCMinutes)                                             \
-  CPP(DatePrototypeSetUTCMonth)                                               \
-  CPP(DatePrototypeSetUTCSeconds)                                             \
-  CPP(DatePrototypeToDateString)                                              \
-  CPP(DatePrototypeToISOString)                                               \
-  CPP(DatePrototypeToPrimitive)                                               \
-  CPP(DatePrototypeToUTCString)                                               \
-  CPP(DatePrototypeToString)                                                  \
-  CPP(DatePrototypeToTimeString)                                              \
-  CPP(DatePrototypeValueOf)                                                   \
-  CPP(DatePrototypeToJson)                                                    \
-  CPP(DateUTC)                                                                \
-                                                                              \
-  /* Error */                                                                 \
-  CPP(ErrorConstructor)                                                       \
-  CPP(ErrorCaptureStackTrace)                                                 \
-  CPP(ErrorPrototypeToString)                                                 \
-  CPP(MakeError)                                                              \
-  CPP(MakeRangeError)                                                         \
-  CPP(MakeSyntaxError)                                                        \
-  CPP(MakeTypeError)                                                          \
-  CPP(MakeURIError)                                                           \
-                                                                              \
-  /* Function */                                                              \
-  CPP(FunctionConstructor)                                                    \
-  ASM(FunctionPrototypeApply)                                                 \
-  CPP(FunctionPrototypeBind)                                                  \
-  ASM(FunctionPrototypeCall)                                                  \
-  /* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */       \
-  TFJ(FunctionPrototypeHasInstance, 1)                                        \
-  CPP(FunctionPrototypeToString)                                              \
-                                                                              \
-  /* Generator and Async */                                                   \
-  CPP(GeneratorFunctionConstructor)                                           \
-  /* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */               \
-  TFJ(GeneratorPrototypeNext, 1)                                              \
-  /* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */             \
-  TFJ(GeneratorPrototypeReturn, 1)                                            \
-  /* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */          \
-  TFJ(GeneratorPrototypeThrow, 1)                                             \
-  CPP(AsyncFunctionConstructor)                                               \
-                                                                              \
-  /* Global object */                                                         \
-  CPP(GlobalDecodeURI)                                                        \
-  CPP(GlobalDecodeURIComponent)                                               \
-  CPP(GlobalEncodeURI)                                                        \
-  CPP(GlobalEncodeURIComponent)                                               \
-  CPP(GlobalEscape)                                                           \
-  CPP(GlobalUnescape)                                                         \
-  CPP(GlobalEval)                                                             \
-  /* ES6 section 18.2.2 isFinite ( number ) */                                \
-  TFJ(GlobalIsFinite, 1)                                                      \
-  /* ES6 section 18.2.3 isNaN ( number ) */                                   \
-  TFJ(GlobalIsNaN, 1)                                                         \
-                                                                              \
-  /* ES6 #sec-%iteratorprototype%-@@iterator */                               \
-  TFJ(IteratorPrototypeIterator, 0)                                           \
-                                                                              \
-  /* JSON */                                                                  \
-  CPP(JsonParse)                                                              \
-  CPP(JsonStringify)                                                          \
-                                                                              \
-  /* Math */                                                                  \
-  /* ES6 section 20.2.2.1 Math.abs ( x ) */                                   \
-  TFJ(MathAbs, 1)                                                             \
-  /* ES6 section 20.2.2.2 Math.acos ( x ) */                                  \
-  TFJ(MathAcos, 1)                                                            \
-  /* ES6 section 20.2.2.3 Math.acosh ( x ) */                                 \
-  TFJ(MathAcosh, 1)                                                           \
-  /* ES6 section 20.2.2.4 Math.asin ( x ) */                                  \
-  TFJ(MathAsin, 1)                                                            \
-  /* ES6 section 20.2.2.5 Math.asinh ( x ) */                                 \
-  TFJ(MathAsinh, 1)                                                           \
-  /* ES6 section 20.2.2.6 Math.atan ( x ) */                                  \
-  TFJ(MathAtan, 1)                                                            \
-  /* ES6 section 20.2.2.7 Math.atanh ( x ) */                                 \
-  TFJ(MathAtanh, 1)                                                           \
-  /* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */                              \
-  TFJ(MathAtan2, 2)                                                           \
-  /* ES6 section 20.2.2.9 Math.cbrt ( x ) */                                  \
-  TFJ(MathCbrt, 1)                                                            \
-  /* ES6 section 20.2.2.10 Math.ceil ( x ) */                                 \
-  TFJ(MathCeil, 1)                                                            \
-  /* ES6 section 20.2.2.11 Math.clz32 ( x ) */                                \
-  TFJ(MathClz32, 1)                                                           \
-  /* ES6 section 20.2.2.12 Math.cos ( x ) */                                  \
-  TFJ(MathCos, 1)                                                             \
-  /* ES6 section 20.2.2.13 Math.cosh ( x ) */                                 \
-  TFJ(MathCosh, 1)                                                            \
-  /* ES6 section 20.2.2.14 Math.exp ( x ) */                                  \
-  TFJ(MathExp, 1)                                                             \
-  /* ES6 section 20.2.2.15 Math.expm1 ( x ) */                                \
-  TFJ(MathExpm1, 1)                                                           \
-  /* ES6 section 20.2.2.16 Math.floor ( x ) */                                \
-  TFJ(MathFloor, 1)                                                           \
-  /* ES6 section 20.2.2.17 Math.fround ( x ) */                               \
-  TFJ(MathFround, 1)                                                          \
-  /* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */        \
-  CPP(MathHypot)                                                              \
-  /* ES6 section 20.2.2.19 Math.imul ( x, y ) */                              \
-  TFJ(MathImul, 2)                                                            \
-  /* ES6 section 20.2.2.20 Math.log ( x ) */                                  \
-  TFJ(MathLog, 1)                                                             \
-  /* ES6 section 20.2.2.21 Math.log1p ( x ) */                                \
-  TFJ(MathLog1p, 1)                                                           \
-  /* ES6 section 20.2.2.22 Math.log10 ( x ) */                                \
-  TFJ(MathLog10, 1)                                                           \
-  /* ES6 section 20.2.2.23 Math.log2 ( x ) */                                 \
-  TFJ(MathLog2, 1)                                                            \
-  /* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */         \
-  ASM(MathMax)                                                                \
-  /* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */         \
-  ASM(MathMin)                                                                \
-  /* ES6 section 20.2.2.26 Math.pow ( x, y ) */                               \
-  TFJ(MathPow, 2)                                                             \
-  /* ES6 section 20.2.2.27 Math.random */                                     \
-  TFJ(MathRandom, 0)                                                          \
-  /* ES6 section 20.2.2.28 Math.round ( x ) */                                \
-  TFJ(MathRound, 1)                                                           \
-  /* ES6 section 20.2.2.29 Math.sign ( x ) */                                 \
-  TFJ(MathSign, 1)                                                            \
-  /* ES6 section 20.2.2.30 Math.sin ( x ) */                                  \
-  TFJ(MathSin, 1)                                                             \
-  /* ES6 section 20.2.2.31 Math.sinh ( x ) */                                 \
-  TFJ(MathSinh, 1)                                                            \
-  /* ES6 section 20.2.2.32 Math.sqrt ( x ) */                                 \
-  TFJ(MathTan, 1)                                                             \
-  /* ES6 section 20.2.2.33 Math.tan ( x ) */                                  \
-  TFJ(MathTanh, 1)                                                            \
-  /* ES6 section 20.2.2.34 Math.tanh ( x ) */                                 \
-  TFJ(MathSqrt, 1)                                                            \
-  /* ES6 section 20.2.2.35 Math.trunc ( x ) */                                \
-  TFJ(MathTrunc, 1)                                                           \
-                                                                              \
-  /* Number */                                                                \
-  /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */       \
-  ASM(NumberConstructor)                                                      \
-  /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */  \
-  ASM(NumberConstructor_ConstructStub)                                        \
-  /* ES6 section 20.1.2.2 Number.isFinite ( number ) */                       \
-  TFJ(NumberIsFinite, 1)                                                      \
-  /* ES6 section 20.1.2.3 Number.isInteger ( number ) */                      \
-  TFJ(NumberIsInteger, 1)                                                     \
-  /* ES6 section 20.1.2.4 Number.isNaN ( number ) */                          \
-  TFJ(NumberIsNaN, 1)                                                         \
-  /* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */                  \
-  TFJ(NumberIsSafeInteger, 1)                                                 \
-  /* ES6 section 20.1.2.12 Number.parseFloat ( string ) */                    \
-  TFJ(NumberParseFloat, 1)                                                    \
-  /* ES6 section 20.1.2.13 Number.parseInt ( string, radix ) */               \
-  TFJ(NumberParseInt, 2)                                                      \
-  CPP(NumberPrototypeToExponential)                                           \
-  CPP(NumberPrototypeToFixed)                                                 \
-  CPP(NumberPrototypeToLocaleString)                                          \
-  CPP(NumberPrototypeToPrecision)                                             \
-  CPP(NumberPrototypeToString)                                                \
-  /* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */                     \
-  TFJ(NumberPrototypeValueOf, 0)                                              \
-  TFS(Add, BUILTIN, kNoExtraICState, BinaryOp)                                \
-  TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp)                           \
-  TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp)                           \
-  TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp)                             \
-  TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp)                            \
-  TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp)                         \
-  TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp)                          \
-  TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp)                         \
-  TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp)                          \
-  TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp)                         \
-  TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp)                  \
-  TFS(LessThan, BUILTIN, kNoExtraICState, Compare)                            \
-  TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare)                     \
-  TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare)                         \
-  TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare)                  \
-  TFS(Equal, BUILTIN, kNoExtraICState, Compare)                               \
-  TFS(NotEqual, BUILTIN, kNoExtraICState, Compare)                            \
-  TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare)                         \
-  TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare)                      \
-                                                                              \
-  /* Object */                                                                \
-  CPP(ObjectAssign)                                                           \
-  TFJ(ObjectCreate, 2)                                                        \
-  CPP(ObjectDefineGetter)                                                     \
-  CPP(ObjectDefineProperties)                                                 \
-  CPP(ObjectDefineProperty)                                                   \
-  CPP(ObjectDefineSetter)                                                     \
-  CPP(ObjectEntries)                                                          \
-  CPP(ObjectFreeze)                                                           \
-  CPP(ObjectGetOwnPropertyDescriptor)                                         \
-  CPP(ObjectGetOwnPropertyDescriptors)                                        \
-  CPP(ObjectGetOwnPropertyNames)                                              \
-  CPP(ObjectGetOwnPropertySymbols)                                            \
-  CPP(ObjectGetPrototypeOf)                                                   \
-  CPP(ObjectSetPrototypeOf)                                                   \
-  /* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */                  \
-  TFJ(ObjectHasOwnProperty, 1)                                                \
-  CPP(ObjectIs)                                                               \
-  CPP(ObjectIsExtensible)                                                     \
-  CPP(ObjectIsFrozen)                                                         \
-  CPP(ObjectIsSealed)                                                         \
-  CPP(ObjectKeys)                                                             \
-  CPP(ObjectLookupGetter)                                                     \
-  CPP(ObjectLookupSetter)                                                     \
-  CPP(ObjectPreventExtensions)                                                \
-  /* ES6 section 19.1.3.6 Object.prototype.toString () */                     \
-  TFJ(ObjectProtoToString, 0)                                                 \
-  CPP(ObjectPrototypePropertyIsEnumerable)                                    \
-  CPP(ObjectPrototypeGetProto)                                                \
-  CPP(ObjectPrototypeSetProto)                                                \
-  CPP(ObjectSeal)                                                             \
-  CPP(ObjectValues)                                                           \
-                                                                              \
-  TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty)                     \
-  TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare)                          \
-  TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare)                 \
-  TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter)                     \
-                                                                              \
-  /* Promise */                                                               \
-  CPP(CreateResolvingFunctions)                                               \
-  CPP(PromiseResolveClosure)                                                  \
-  CPP(PromiseRejectClosure)                                                   \
-                                                                              \
-  /* Proxy */                                                                 \
-  CPP(ProxyConstructor)                                                       \
-  CPP(ProxyConstructor_ConstructStub)                                         \
-                                                                              \
-  /* Reflect */                                                               \
-  ASM(ReflectApply)                                                           \
-  ASM(ReflectConstruct)                                                       \
-  CPP(ReflectDefineProperty)                                                  \
-  CPP(ReflectDeleteProperty)                                                  \
-  CPP(ReflectGet)                                                             \
-  CPP(ReflectGetOwnPropertyDescriptor)                                        \
-  CPP(ReflectGetPrototypeOf)                                                  \
-  CPP(ReflectHas)                                                             \
-  CPP(ReflectIsExtensible)                                                    \
-  CPP(ReflectOwnKeys)                                                         \
-  CPP(ReflectPreventExtensions)                                               \
-  CPP(ReflectSet)                                                             \
-  CPP(ReflectSetPrototypeOf)                                                  \
-                                                                              \
-  /* RegExp */                                                                \
-  CPP(RegExpCapture1Getter)                                                   \
-  CPP(RegExpCapture2Getter)                                                   \
-  CPP(RegExpCapture3Getter)                                                   \
-  CPP(RegExpCapture4Getter)                                                   \
-  CPP(RegExpCapture5Getter)                                                   \
-  CPP(RegExpCapture6Getter)                                                   \
-  CPP(RegExpCapture7Getter)                                                   \
-  CPP(RegExpCapture8Getter)                                                   \
-  CPP(RegExpCapture9Getter)                                                   \
-  CPP(RegExpConstructor)                                                      \
-  TFJ(RegExpInternalMatch, 2)                                                 \
-  CPP(RegExpInputGetter)                                                      \
-  CPP(RegExpInputSetter)                                                      \
-  CPP(RegExpLastMatchGetter)                                                  \
-  CPP(RegExpLastParenGetter)                                                  \
-  CPP(RegExpLeftContextGetter)                                                \
-  CPP(RegExpPrototypeCompile)                                                 \
-  TFJ(RegExpPrototypeExec, 1)                                                 \
-  TFJ(RegExpPrototypeFlagsGetter, 0)                                          \
-  TFJ(RegExpPrototypeGlobalGetter, 0)                                         \
-  TFJ(RegExpPrototypeIgnoreCaseGetter, 0)                                     \
-  CPP(RegExpPrototypeMatch)                                                   \
-  TFJ(RegExpPrototypeMultilineGetter, 0)                                      \
-  TFJ(RegExpPrototypeReplace, 2)                                              \
-  TFJ(RegExpPrototypeSearch, 1)                                               \
-  CPP(RegExpPrototypeSourceGetter)                                            \
-  CPP(RegExpPrototypeSpeciesGetter)                                           \
-  CPP(RegExpPrototypeSplit)                                                   \
-  TFJ(RegExpPrototypeStickyGetter, 0)                                         \
-  TFJ(RegExpPrototypeTest, 1)                                                 \
-  CPP(RegExpPrototypeToString)                                                \
-  TFJ(RegExpPrototypeUnicodeGetter, 0)                                        \
-  CPP(RegExpRightContextGetter)                                               \
-                                                                              \
-  /* SharedArrayBuffer */                                                     \
-  CPP(SharedArrayBufferPrototypeGetByteLength)                                \
-  TFJ(AtomicsLoad, 2)                                                         \
-  TFJ(AtomicsStore, 3)                                                        \
-                                                                              \
-  /* String */                                                                \
-  ASM(StringConstructor)                                                      \
-  ASM(StringConstructor_ConstructStub)                                        \
-  CPP(StringFromCodePoint)                                                    \
-  /* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */             \
-  TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel)    \
-  /* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */                  \
-  TFJ(StringPrototypeCharAt, 1)                                               \
-  /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */              \
-  TFJ(StringPrototypeCharCodeAt, 1)                                           \
-  /* ES6 section 21.1.3.6 */                                                  \
-  /* String.prototype.endsWith ( searchString [ , endPosition ] ) */          \
-  CPP(StringPrototypeEndsWith)                                                \
-  /* ES6 section 21.1.3.7 */                                                  \
-  /* String.prototype.includes ( searchString [ , position ] ) */             \
-  CPP(StringPrototypeIncludes)                                                \
-  /* ES6 section 21.1.3.8 */                                                  \
-  /* String.prototype.indexOf ( searchString [ , position ] ) */              \
-  CPP(StringPrototypeIndexOf)                                                 \
-  /* ES6 section 21.1.3.9 */                                                  \
-  /* String.prototype.lastIndexOf ( searchString [ , position ] ) */          \
-  CPP(StringPrototypeLastIndexOf)                                             \
-  /* ES6 section 21.1.3.10 String.prototype.localeCompare ( that ) */         \
-  CPP(StringPrototypeLocaleCompare)                                           \
-  /* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */           \
-  CPP(StringPrototypeNormalize)                                               \
-  /* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */         \
-  TFJ(StringPrototypeSubstr, 2)                                               \
-  /* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */       \
-  TFJ(StringPrototypeSubstring, 2)                                            \
-  /* ES6 section 21.1.3.20 */                                                 \
-  /* String.prototype.startsWith ( searchString [ , position ] ) */           \
-  CPP(StringPrototypeStartsWith)                                              \
-  /* ES6 section 21.1.3.25 String.prototype.toString () */                    \
-  TFJ(StringPrototypeToString, 0)                                             \
-  CPP(StringPrototypeTrim)                                                    \
-  CPP(StringPrototypeTrimLeft)                                                \
-  CPP(StringPrototypeTrimRight)                                               \
-  /* ES6 section 21.1.3.28 String.prototype.valueOf () */                     \
-  TFJ(StringPrototypeValueOf, 0)                                              \
-  /* ES6 #sec-string.prototype-@@iterator */                                  \
-  TFJ(StringPrototypeIterator, 0)                                             \
-                                                                              \
-  /* StringIterator */                                                        \
-  TFJ(StringIteratorPrototypeNext, 0)                                         \
-                                                                              \
-  /* Symbol */                                                                \
-  CPP(SymbolConstructor)                                                      \
-  CPP(SymbolConstructor_ConstructStub)                                        \
-  /* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */      \
-  TFJ(SymbolPrototypeToPrimitive, 1)                                          \
-  /* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */                    \
-  TFJ(SymbolPrototypeToString, 0)                                             \
-  /* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */                     \
-  TFJ(SymbolPrototypeValueOf, 0)                                              \
-                                                                              \
-  /* TypedArray */                                                            \
-  CPP(TypedArrayPrototypeBuffer)                                              \
-  /* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */            \
-  TFJ(TypedArrayPrototypeByteLength, 0)                                       \
-  /* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */            \
-  TFJ(TypedArrayPrototypeByteOffset, 0)                                       \
-  /* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */               \
-  TFJ(TypedArrayPrototypeLength, 0)                                           \
-  /* ES6 #sec-%typedarray%.prototype.entries */                               \
-  TFJ(TypedArrayPrototypeEntries, 0)                                          \
-  /* ES6 #sec-%typedarray%.prototype.keys */                                  \
-  TFJ(TypedArrayPrototypeKeys, 0)                                             \
-  /* ES6 #sec-%typedarray%.prototype.values */                                \
-  TFJ(TypedArrayPrototypeValues, 0)                                           \
-                                                                              \
-  CPP(ModuleNamespaceIterator)                                                \
-  CPP(FixedArrayIteratorNext)
+#define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG)                        \
+  ASM(Abort)                                                                   \
+  /* Code aging */                                                             \
+  CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM)                        \
+                                                                               \
+  /* Declared first for dependency reasons */                                  \
+  ASM(CompileLazy)                                                             \
+  TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion, 1)                   \
+  TFS(FastNewObject, BUILTIN, kNoExtraICState, FastNewObject, 1)               \
+  TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty, 1)                   \
+                                                                               \
+  /* Calls */                                                                  \
+  ASM(ArgumentsAdaptorTrampoline)                                              \
+  /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */              \
+  ASM(CallFunction_ReceiverIsNullOrUndefined)                                  \
+  ASM(CallFunction_ReceiverIsNotNullOrUndefined)                               \
+  ASM(CallFunction_ReceiverIsAny)                                              \
+  ASM(TailCallFunction_ReceiverIsNullOrUndefined)                              \
+  ASM(TailCallFunction_ReceiverIsNotNullOrUndefined)                           \
+  ASM(TailCallFunction_ReceiverIsAny)                                          \
+  /* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */            \
+  ASM(CallBoundFunction)                                                       \
+  ASM(TailCallBoundFunction)                                                   \
+  /* ES6 section 7.3.12 Call(F, V, [argumentsList]) */                         \
+  ASM(Call_ReceiverIsNullOrUndefined)                                          \
+  ASM(Call_ReceiverIsNotNullOrUndefined)                                       \
+  ASM(Call_ReceiverIsAny)                                                      \
+  ASM(TailCall_ReceiverIsNullOrUndefined)                                      \
+  ASM(TailCall_ReceiverIsNotNullOrUndefined)                                   \
+  ASM(TailCall_ReceiverIsAny)                                                  \
+  ASM(CallWithSpread)                                                          \
+  ASM(CallForwardVarargs)                                                      \
+  ASM(CallFunctionForwardVarargs)                                              \
+                                                                               \
+  /* Construct */                                                              \
+  /* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */            \
+  ASM(ConstructFunction)                                                       \
+  /* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */           \
+  ASM(ConstructBoundFunction)                                                  \
+  ASM(ConstructedNonConstructable)                                             \
+  /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */           \
+  ASM(ConstructProxy)                                                          \
+  /* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */         \
+  ASM(Construct)                                                               \
+  ASM(ConstructWithSpread)                                                     \
+  ASM(JSConstructStubApi)                                                      \
+  ASM(JSConstructStubGeneric)                                                  \
+  ASM(JSBuiltinsConstructStub)                                                 \
+  ASM(JSBuiltinsConstructStubForDerived)                                       \
+  TFS(FastNewClosure, BUILTIN, kNoExtraICState, FastNewClosure, 1)             \
+  TFS(FastNewFunctionContextEval, BUILTIN, kNoExtraICState,                    \
+      FastNewFunctionContext, 1)                                               \
+  TFS(FastNewFunctionContextFunction, BUILTIN, kNoExtraICState,                \
+      FastNewFunctionContext, 1)                                               \
+  TFS(FastNewStrictArguments, BUILTIN, kNoExtraICState, FastNewArguments, 1)   \
+  TFS(FastNewSloppyArguments, BUILTIN, kNoExtraICState, FastNewArguments, 1)   \
+  TFS(FastNewRestParameter, BUILTIN, kNoExtraICState, FastNewArguments, 1)     \
+  TFS(FastCloneRegExp, BUILTIN, kNoExtraICState, FastCloneRegExp, 1)           \
+  TFS(FastCloneShallowArrayTrack, BUILTIN, kNoExtraICState,                    \
+      FastCloneShallowArray, 1)                                                \
+  TFS(FastCloneShallowArrayDontTrack, BUILTIN, kNoExtraICState,                \
+      FastCloneShallowArray, 1)                                                \
+  TFS(FastCloneShallowObject0, BUILTIN, kNoExtraICState,                       \
+      FastCloneShallowObject, 1)                                               \
+  TFS(FastCloneShallowObject1, BUILTIN, kNoExtraICState,                       \
+      FastCloneShallowObject, 1)                                               \
+  TFS(FastCloneShallowObject2, BUILTIN, kNoExtraICState,                       \
+      FastCloneShallowObject, 1)                                               \
+  TFS(FastCloneShallowObject3, BUILTIN, kNoExtraICState,                       \
+      FastCloneShallowObject, 1)                                               \
+  TFS(FastCloneShallowObject4, BUILTIN, kNoExtraICState,                       \
+      FastCloneShallowObject, 1)                                               \
+  TFS(FastCloneShallowObject5, BUILTIN, kNoExtraICState,                       \
+      FastCloneShallowObject, 1)                                               \
+  TFS(FastCloneShallowObject6, BUILTIN, kNoExtraICState,                       \
+      FastCloneShallowObject, 1)                                               \
+                                                                               \
+  /* Apply and entries */                                                      \
+  ASM(Apply)                                                                   \
+  ASM(JSEntryTrampoline)                                                       \
+  ASM(JSConstructEntryTrampoline)                                              \
+  ASM(ResumeGeneratorTrampoline)                                               \
+                                                                               \
+  /* Stack and interrupt check */                                              \
+  ASM(InterruptCheck)                                                          \
+  ASM(StackCheck)                                                              \
+                                                                               \
+  /* String helpers */                                                         \
+  TFS(StringCharAt, BUILTIN, kNoExtraICState, StringCharAt, 1)                 \
+  TFS(StringCharCodeAt, BUILTIN, kNoExtraICState, StringCharCodeAt, 1)         \
+  TFS(StringEqual, BUILTIN, kNoExtraICState, Compare, 1)                       \
+  TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare, 1)                 \
+  TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare, 1)          \
+  TFS(StringIndexOf, BUILTIN, kNoExtraICState, StringIndexOf, 1)               \
+  TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare, 1)                    \
+  TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare, 1)             \
+  TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare, 1)                    \
+                                                                               \
+  /* Interpreter */                                                            \
+  ASM(InterpreterEntryTrampoline)                                              \
+  ASM(InterpreterPushArgsAndCall)                                              \
+  ASM(InterpreterPushArgsAndCallFunction)                                      \
+  ASM(InterpreterPushArgsAndCallWithFinalSpread)                               \
+  ASM(InterpreterPushArgsAndTailCall)                                          \
+  ASM(InterpreterPushArgsAndTailCallFunction)                                  \
+  ASM(InterpreterPushArgsAndConstruct)                                         \
+  ASM(InterpreterPushArgsAndConstructFunction)                                 \
+  ASM(InterpreterPushArgsAndConstructArray)                                    \
+  ASM(InterpreterPushArgsAndConstructWithFinalSpread)                          \
+  ASM(InterpreterEnterBytecodeAdvance)                                         \
+  ASM(InterpreterEnterBytecodeDispatch)                                        \
+  ASM(InterpreterOnStackReplacement)                                           \
+                                                                               \
+  /* Code life-cycle */                                                        \
+  ASM(CompileBaseline)                                                         \
+  ASM(CompileOptimized)                                                        \
+  ASM(CompileOptimizedConcurrent)                                              \
+  ASM(InOptimizationQueue)                                                     \
+  ASM(InstantiateAsmJs)                                                        \
+  ASM(MarkCodeAsToBeExecutedOnce)                                              \
+  ASM(MarkCodeAsExecutedOnce)                                                  \
+  ASM(MarkCodeAsExecutedTwice)                                                 \
+  ASM(NotifyDeoptimized)                                                       \
+  ASM(NotifySoftDeoptimized)                                                   \
+  ASM(NotifyLazyDeoptimized)                                                   \
+  ASM(NotifyStubFailure)                                                       \
+  ASM(NotifyStubFailureSaveDoubles)                                            \
+  ASM(OnStackReplacement)                                                      \
+                                                                               \
+  /* API callback handling */                                                  \
+  API(HandleApiCall)                                                           \
+  API(HandleApiCallAsFunction)                                                 \
+  API(HandleApiCallAsConstructor)                                              \
+  ASM(HandleFastApiCall)                                                       \
+                                                                               \
+  /* Adapters for Turbofan into runtime */                                     \
+  ASM(AllocateInNewSpace)                                                      \
+  ASM(AllocateInOldSpace)                                                      \
+                                                                               \
+  /* TurboFan support builtins */                                              \
+  TFS(CopyFastSmiOrObjectElements, BUILTIN, kNoExtraICState,                   \
+      CopyFastSmiOrObjectElements, 1)                                          \
+  TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements, 1)  \
+  TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState,                   \
+      GrowArrayElements, 1)                                                    \
+  TFS(NewUnmappedArgumentsElements, BUILTIN, kNoExtraICState,                  \
+      NewArgumentsElements, 1)                                                 \
+  TFS(NewRestParameterElements, BUILTIN, kNoExtraICState,                      \
+      NewArgumentsElements, 1)                                                 \
+                                                                               \
+  /* Debugger */                                                               \
+  DBG(FrameDropperTrampoline)                                                  \
+  DBG(HandleDebuggerStatement)                                                 \
+  DBG(Return_DebugBreak)                                                       \
+  DBG(Slot_DebugBreak)                                                         \
+                                                                               \
+  /* Type conversions */                                                       \
+  TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion, 1)                  \
+  TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+  TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion, 1) \
+  TFS(NonPrimitiveToPrimitive_Default, BUILTIN, kNoExtraICState,               \
+      TypeConversion, 1)                                                       \
+  TFS(NonPrimitiveToPrimitive_Number, BUILTIN, kNoExtraICState,                \
+      TypeConversion, 1)                                                       \
+  TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState,                \
+      TypeConversion, 1)                                                       \
+  TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion, 1)             \
+  TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion, 1)                     \
+  TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion, 1)          \
+  TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion, 1)                   \
+  TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion, 1)                   \
+  TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion, 1)                  \
+  TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion, 1)                   \
+  TFS(ClassOf, BUILTIN, kNoExtraICState, Typeof, 1)                            \
+  TFS(Typeof, BUILTIN, kNoExtraICState, Typeof, 1)                             \
+  TFS(GetSuperConstructor, BUILTIN, kNoExtraICState, TypeConversion, 1)        \
+                                                                               \
+  /* Handlers */                                                               \
+  TFS(LoadICProtoArray, BUILTIN, kNoExtraICState, LoadICProtoArray, 1)         \
+  TFS(LoadICProtoArrayThrowIfNonexistent, BUILTIN, kNoExtraICState,            \
+      LoadICProtoArray, 1)                                                     \
+  TFS(KeyedLoadIC_Megamorphic, BUILTIN, kNoExtraICState, LoadWithVector, 1)    \
+  TFS(KeyedLoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector, 1)           \
+  TFS(KeyedLoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector, 1)             \
+  TFS(KeyedLoadIC_IndexedString, HANDLER, Code::LOAD_IC, LoadWithVector, 1)    \
+  TFS(KeyedStoreIC_Megamorphic, BUILTIN, kNoExtraICState, StoreWithVector, 1)  \
+  TFS(KeyedStoreIC_Megamorphic_Strict, BUILTIN, kNoExtraICState,               \
+      StoreWithVector, 1)                                                      \
+  TFS(KeyedStoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector, 1)         \
+  TFS(KeyedStoreIC_Slow, HANDLER, Code::STORE_IC, StoreWithVector, 1)          \
+  TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector, 1)    \
+  TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector,  \
+      1)                                                                       \
+  TFS(LoadField, BUILTIN, kNoExtraICState, LoadField, 1)                       \
+  TFS(LoadIC_FunctionPrototype, HANDLER, Code::LOAD_IC, LoadWithVector, 1)     \
+  ASH(LoadIC_Getter_ForDeopt, BUILTIN, kNoExtraICState)                        \
+  TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector, 1)                \
+  TFS(LoadIC_Normal, HANDLER, Code::LOAD_IC, LoadWithVector, 1)                \
+  TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector, 1)                  \
+  TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector, 1)              \
+  TFS(StoreIC_Normal, HANDLER, Code::STORE_IC, StoreWithVector, 1)             \
+  ASH(StoreIC_Setter_ForDeopt, BUILTIN, kNoExtraICState)                       \
+                                                                               \
+  /* Built-in functions for Javascript */                                      \
+  /* Special internal builtins */                                              \
+  CPP(EmptyFunction)                                                           \
+  CPP(Illegal)                                                                 \
+  CPP(RestrictedFunctionPropertiesThrower)                                     \
+  CPP(RestrictedStrictArgumentsPropertiesThrower)                              \
+  CPP(UnsupportedThrower)                                                      \
+  TFJ(ReturnReceiver, 0)                                                       \
+                                                                               \
+  /* Array */                                                                  \
+  ASM(ArrayCode)                                                               \
+  ASM(InternalArrayCode)                                                       \
+  CPP(ArrayConcat)                                                             \
+  /* ES6 section 22.1.2.2 Array.isArray */                                     \
+  TFJ(ArrayIsArray, 1)                                                         \
+  /* ES7 #sec-array.prototype.includes */                                      \
+  TFJ(ArrayIncludes, 2)                                                        \
+  TFJ(ArrayIndexOf, 2)                                                         \
+  CPP(ArrayPop)                                                                \
+  CPP(ArrayPush)                                                               \
+  TFJ(FastArrayPush, -1)                                                       \
+  CPP(ArrayShift)                                                              \
+  CPP(ArraySlice)                                                              \
+  CPP(ArraySplice)                                                             \
+  CPP(ArrayUnshift)                                                            \
+  TFJ(ArrayForEach, 2)                                                         \
+  /* ES6 #sec-array.prototype.entries */                                       \
+  TFJ(ArrayPrototypeEntries, 0)                                                \
+  /* ES6 #sec-array.prototype.keys */                                          \
+  TFJ(ArrayPrototypeKeys, 0)                                                   \
+  /* ES6 #sec-array.prototype.values */                                        \
+  TFJ(ArrayPrototypeValues, 0)                                                 \
+  /* ES6 #sec-%arrayiteratorprototype%.next */                                 \
+  TFJ(ArrayIteratorPrototypeNext, 0)                                           \
+                                                                               \
+  /* ArrayBuffer */                                                            \
+  CPP(ArrayBufferConstructor)                                                  \
+  CPP(ArrayBufferConstructor_ConstructStub)                                    \
+  CPP(ArrayBufferPrototypeGetByteLength)                                       \
+  CPP(ArrayBufferIsView)                                                       \
+                                                                               \
+  /* AsyncFunction */                                                          \
+  TFJ(AsyncFunctionAwaitCaught, 3)                                             \
+  TFJ(AsyncFunctionAwaitUncaught, 3)                                           \
+  TFJ(AsyncFunctionAwaitRejectClosure, 1)                                      \
+  TFJ(AsyncFunctionAwaitResolveClosure, 1)                                     \
+  TFJ(AsyncFunctionPromiseCreate, 0)                                           \
+  TFJ(AsyncFunctionPromiseRelease, 1)                                          \
+                                                                               \
+  /* Boolean */                                                                \
+  CPP(BooleanConstructor)                                                      \
+  CPP(BooleanConstructor_ConstructStub)                                        \
+  /* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */                    \
+  TFJ(BooleanPrototypeToString, 0)                                             \
+  /* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */                     \
+  TFJ(BooleanPrototypeValueOf, 0)                                              \
+                                                                               \
+  /* CallSite */                                                               \
+  CPP(CallSitePrototypeGetColumnNumber)                                        \
+  CPP(CallSitePrototypeGetEvalOrigin)                                          \
+  CPP(CallSitePrototypeGetFileName)                                            \
+  CPP(CallSitePrototypeGetFunction)                                            \
+  CPP(CallSitePrototypeGetFunctionName)                                        \
+  CPP(CallSitePrototypeGetLineNumber)                                          \
+  CPP(CallSitePrototypeGetMethodName)                                          \
+  CPP(CallSitePrototypeGetPosition)                                            \
+  CPP(CallSitePrototypeGetScriptNameOrSourceURL)                               \
+  CPP(CallSitePrototypeGetThis)                                                \
+  CPP(CallSitePrototypeGetTypeName)                                            \
+  CPP(CallSitePrototypeIsConstructor)                                          \
+  CPP(CallSitePrototypeIsEval)                                                 \
+  CPP(CallSitePrototypeIsNative)                                               \
+  CPP(CallSitePrototypeIsToplevel)                                             \
+  CPP(CallSitePrototypeToString)                                               \
+                                                                               \
+  /* DataView */                                                               \
+  CPP(DataViewConstructor)                                                     \
+  CPP(DataViewConstructor_ConstructStub)                                       \
+  CPP(DataViewPrototypeGetBuffer)                                              \
+  CPP(DataViewPrototypeGetByteLength)                                          \
+  CPP(DataViewPrototypeGetByteOffset)                                          \
+  CPP(DataViewPrototypeGetInt8)                                                \
+  CPP(DataViewPrototypeSetInt8)                                                \
+  CPP(DataViewPrototypeGetUint8)                                               \
+  CPP(DataViewPrototypeSetUint8)                                               \
+  CPP(DataViewPrototypeGetInt16)                                               \
+  CPP(DataViewPrototypeSetInt16)                                               \
+  CPP(DataViewPrototypeGetUint16)                                              \
+  CPP(DataViewPrototypeSetUint16)                                              \
+  CPP(DataViewPrototypeGetInt32)                                               \
+  CPP(DataViewPrototypeSetInt32)                                               \
+  CPP(DataViewPrototypeGetUint32)                                              \
+  CPP(DataViewPrototypeSetUint32)                                              \
+  CPP(DataViewPrototypeGetFloat32)                                             \
+  CPP(DataViewPrototypeSetFloat32)                                             \
+  CPP(DataViewPrototypeGetFloat64)                                             \
+  CPP(DataViewPrototypeSetFloat64)                                             \
+                                                                               \
+  /* Date */                                                                   \
+  CPP(DateConstructor)                                                         \
+  CPP(DateConstructor_ConstructStub)                                           \
+  /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */                        \
+  TFJ(DatePrototypeGetDate, 0)                                                 \
+  /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */                         \
+  TFJ(DatePrototypeGetDay, 0)                                                  \
+  /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */                    \
+  TFJ(DatePrototypeGetFullYear, 0)                                             \
+  /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */                       \
+  TFJ(DatePrototypeGetHours, 0)                                                \
+  /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */                \
+  TFJ(DatePrototypeGetMilliseconds, 0)                                         \
+  /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */                     \
+  TFJ(DatePrototypeGetMinutes, 0)                                              \
+  /* ES6 section 20.3.4.8 Date.prototype.getMonth */                           \
+  TFJ(DatePrototypeGetMonth, 0)                                                \
+  /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */                     \
+  TFJ(DatePrototypeGetSeconds, 0)                                              \
+  /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */                       \
+  TFJ(DatePrototypeGetTime, 0)                                                 \
+  /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */             \
+  TFJ(DatePrototypeGetTimezoneOffset, 0)                                       \
+  /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */                    \
+  TFJ(DatePrototypeGetUTCDate, 0)                                              \
+  /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */                     \
+  TFJ(DatePrototypeGetUTCDay, 0)                                               \
+  /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */                \
+  TFJ(DatePrototypeGetUTCFullYear, 0)                                          \
+  /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */                   \
+  TFJ(DatePrototypeGetUTCHours, 0)                                             \
+  /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */            \
+  TFJ(DatePrototypeGetUTCMilliseconds, 0)                                      \
+  /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */                 \
+  TFJ(DatePrototypeGetUTCMinutes, 0)                                           \
+  /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */                   \
+  TFJ(DatePrototypeGetUTCMonth, 0)                                             \
+  /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */                 \
+  TFJ(DatePrototypeGetUTCSeconds, 0)                                           \
+  /* ES6 section 20.3.4.44 Date.prototype.valueOf ( ) */                       \
+  TFJ(DatePrototypeValueOf, 0)                                                 \
+  /* ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint ) */        \
+  TFJ(DatePrototypeToPrimitive, 1)                                             \
+  CPP(DatePrototypeGetYear)                                                    \
+  CPP(DatePrototypeSetYear)                                                    \
+  CPP(DateNow)                                                                 \
+  CPP(DateParse)                                                               \
+  CPP(DatePrototypeSetDate)                                                    \
+  CPP(DatePrototypeSetFullYear)                                                \
+  CPP(DatePrototypeSetHours)                                                   \
+  CPP(DatePrototypeSetMilliseconds)                                            \
+  CPP(DatePrototypeSetMinutes)                                                 \
+  CPP(DatePrototypeSetMonth)                                                   \
+  CPP(DatePrototypeSetSeconds)                                                 \
+  CPP(DatePrototypeSetTime)                                                    \
+  CPP(DatePrototypeSetUTCDate)                                                 \
+  CPP(DatePrototypeSetUTCFullYear)                                             \
+  CPP(DatePrototypeSetUTCHours)                                                \
+  CPP(DatePrototypeSetUTCMilliseconds)                                         \
+  CPP(DatePrototypeSetUTCMinutes)                                              \
+  CPP(DatePrototypeSetUTCMonth)                                                \
+  CPP(DatePrototypeSetUTCSeconds)                                              \
+  CPP(DatePrototypeToDateString)                                               \
+  CPP(DatePrototypeToISOString)                                                \
+  CPP(DatePrototypeToUTCString)                                                \
+  CPP(DatePrototypeToString)                                                   \
+  CPP(DatePrototypeToTimeString)                                               \
+  CPP(DatePrototypeToJson)                                                     \
+  CPP(DateUTC)                                                                 \
+                                                                               \
+  /* Error */                                                                  \
+  CPP(ErrorConstructor)                                                        \
+  CPP(ErrorCaptureStackTrace)                                                  \
+  CPP(ErrorPrototypeToString)                                                  \
+  CPP(MakeError)                                                               \
+  CPP(MakeRangeError)                                                          \
+  CPP(MakeSyntaxError)                                                         \
+  CPP(MakeTypeError)                                                           \
+  CPP(MakeURIError)                                                            \
+                                                                               \
+  /* Function */                                                               \
+  CPP(FunctionConstructor)                                                     \
+  ASM(FunctionPrototypeApply)                                                  \
+  CPP(FunctionPrototypeBind)                                                   \
+  TFJ(FastFunctionPrototypeBind,                                               \
+      SharedFunctionInfo::kDontAdaptArgumentsSentinel)                         \
+  ASM(FunctionPrototypeCall)                                                   \
+  /* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */        \
+  TFJ(FunctionPrototypeHasInstance, 1)                                         \
+  CPP(FunctionPrototypeToString)                                               \
+                                                                               \
+  /* Belongs to Objects but is a dependency of GeneratorPrototypeResume */     \
+  TFS(CreateIterResultObject, BUILTIN, kNoExtraICState,                        \
+      CreateIterResultObject, 1)                                               \
+                                                                               \
+  /* Generator and Async */                                                    \
+  CPP(GeneratorFunctionConstructor)                                            \
+  /* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */                \
+  TFJ(GeneratorPrototypeNext, 1)                                               \
+  /* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */              \
+  TFJ(GeneratorPrototypeReturn, 1)                                             \
+  /* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */           \
+  TFJ(GeneratorPrototypeThrow, 1)                                              \
+  CPP(AsyncFunctionConstructor)                                                \
+                                                                               \
+  /* Global object */                                                          \
+  CPP(GlobalDecodeURI)                                                         \
+  CPP(GlobalDecodeURIComponent)                                                \
+  CPP(GlobalEncodeURI)                                                         \
+  CPP(GlobalEncodeURIComponent)                                                \
+  CPP(GlobalEscape)                                                            \
+  CPP(GlobalUnescape)                                                          \
+  CPP(GlobalEval)                                                              \
+  /* ES6 section 18.2.2 isFinite ( number ) */                                 \
+  TFJ(GlobalIsFinite, 1)                                                       \
+  /* ES6 section 18.2.3 isNaN ( number ) */                                    \
+  TFJ(GlobalIsNaN, 1)                                                          \
+                                                                               \
+  /* JSON */                                                                   \
+  CPP(JsonParse)                                                               \
+  CPP(JsonStringify)                                                           \
+                                                                               \
+  /* ICs */                                                                    \
+  TFS(LoadIC, LOAD_IC, kNoExtraICState, LoadWithVector, 1)                     \
+  TFS(LoadICTrampoline, LOAD_IC, kNoExtraICState, Load, 1)                     \
+  TFS(KeyedLoadIC, KEYED_LOAD_IC, kNoExtraICState, LoadWithVector, 1)          \
+  TFS(KeyedLoadICTrampoline, KEYED_LOAD_IC, kNoExtraICState, Load, 1)          \
+  TFS(StoreIC, STORE_IC, kNoExtraICState, StoreWithVector, 1)                  \
+  TFS(StoreICTrampoline, STORE_IC, kNoExtraICState, Store, 1)                  \
+  TFS(StoreICStrict, STORE_IC, kNoExtraICState, StoreWithVector, 1)            \
+  TFS(StoreICStrictTrampoline, STORE_IC, kNoExtraICState, Store, 1)            \
+  TFS(KeyedStoreIC, KEYED_STORE_IC, kNoExtraICState, StoreWithVector, 1)       \
+  TFS(KeyedStoreICTrampoline, KEYED_STORE_IC, kNoExtraICState, Store, 1)       \
+  TFS(KeyedStoreICStrict, KEYED_STORE_IC, kNoExtraICState, StoreWithVector, 1) \
+  TFS(KeyedStoreICStrictTrampoline, KEYED_STORE_IC, kNoExtraICState, Store, 1) \
+  TFS(LoadGlobalIC, LOAD_GLOBAL_IC, kNoExtraICState, LoadGlobalWithVector, 1)  \
+  TFS(LoadGlobalICInsideTypeof, LOAD_GLOBAL_IC, kNoExtraICState,               \
+      LoadGlobalWithVector, 1)                                                 \
+  TFS(LoadGlobalICTrampoline, LOAD_GLOBAL_IC, kNoExtraICState, LoadGlobal, 1)  \
+  TFS(LoadGlobalICInsideTypeofTrampoline, LOAD_GLOBAL_IC, kNoExtraICState,     \
+      LoadGlobal, 1)                                                           \
+                                                                               \
+  /* Math */                                                                   \
+  /* ES6 section 20.2.2.1 Math.abs ( x ) */                                    \
+  TFJ(MathAbs, 1)                                                              \
+  /* ES6 section 20.2.2.2 Math.acos ( x ) */                                   \
+  TFJ(MathAcos, 1)                                                             \
+  /* ES6 section 20.2.2.3 Math.acosh ( x ) */                                  \
+  TFJ(MathAcosh, 1)                                                            \
+  /* ES6 section 20.2.2.4 Math.asin ( x ) */                                   \
+  TFJ(MathAsin, 1)                                                             \
+  /* ES6 section 20.2.2.5 Math.asinh ( x ) */                                  \
+  TFJ(MathAsinh, 1)                                                            \
+  /* ES6 section 20.2.2.6 Math.atan ( x ) */                                   \
+  TFJ(MathAtan, 1)                                                             \
+  /* ES6 section 20.2.2.7 Math.atanh ( x ) */                                  \
+  TFJ(MathAtanh, 1)                                                            \
+  /* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */                               \
+  TFJ(MathAtan2, 2)                                                            \
+  /* ES6 section 20.2.2.9 Math.cbrt ( x ) */                                   \
+  TFJ(MathCbrt, 1)                                                             \
+  /* ES6 section 20.2.2.10 Math.ceil ( x ) */                                  \
+  TFJ(MathCeil, 1)                                                             \
+  /* ES6 section 20.2.2.11 Math.clz32 ( x ) */                                 \
+  TFJ(MathClz32, 1)                                                            \
+  /* ES6 section 20.2.2.12 Math.cos ( x ) */                                   \
+  TFJ(MathCos, 1)                                                              \
+  /* ES6 section 20.2.2.13 Math.cosh ( x ) */                                  \
+  TFJ(MathCosh, 1)                                                             \
+  /* ES6 section 20.2.2.14 Math.exp ( x ) */                                   \
+  TFJ(MathExp, 1)                                                              \
+  /* ES6 section 20.2.2.15 Math.expm1 ( x ) */                                 \
+  TFJ(MathExpm1, 1)                                                            \
+  /* ES6 section 20.2.2.16 Math.floor ( x ) */                                 \
+  TFJ(MathFloor, 1)                                                            \
+  /* ES6 section 20.2.2.17 Math.fround ( x ) */                                \
+  TFJ(MathFround, 1)                                                           \
+  /* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */         \
+  CPP(MathHypot)                                                               \
+  /* ES6 section 20.2.2.19 Math.imul ( x, y ) */                               \
+  TFJ(MathImul, 2)                                                             \
+  /* ES6 section 20.2.2.20 Math.log ( x ) */                                   \
+  TFJ(MathLog, 1)                                                              \
+  /* ES6 section 20.2.2.21 Math.log1p ( x ) */                                 \
+  TFJ(MathLog1p, 1)                                                            \
+  /* ES6 section 20.2.2.22 Math.log10 ( x ) */                                 \
+  TFJ(MathLog10, 1)                                                            \
+  /* ES6 section 20.2.2.23 Math.log2 ( x ) */                                  \
+  TFJ(MathLog2, 1)                                                             \
+  /* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */          \
+  ASM(MathMax)                                                                 \
+  /* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */          \
+  ASM(MathMin)                                                                 \
+  /* ES6 section 20.2.2.26 Math.pow ( x, y ) */                                \
+  TFJ(MathPow, 2)                                                              \
+  /* ES6 section 20.2.2.27 Math.random */                                      \
+  TFJ(MathRandom, 0)                                                           \
+  /* ES6 section 20.2.2.28 Math.round ( x ) */                                 \
+  TFJ(MathRound, 1)                                                            \
+  /* ES6 section 20.2.2.29 Math.sign ( x ) */                                  \
+  TFJ(MathSign, 1)                                                             \
+  /* ES6 section 20.2.2.30 Math.sin ( x ) */                                   \
+  TFJ(MathSin, 1)                                                              \
+  /* ES6 section 20.2.2.31 Math.sinh ( x ) */                                  \
+  TFJ(MathSinh, 1)                                                             \
+  /* ES6 section 20.2.2.32 Math.sqrt ( x ) */                                  \
+  TFJ(MathTan, 1)                                                              \
+  /* ES6 section 20.2.2.33 Math.tan ( x ) */                                   \
+  TFJ(MathTanh, 1)                                                             \
+  /* ES6 section 20.2.2.34 Math.tanh ( x ) */                                  \
+  TFJ(MathSqrt, 1)                                                             \
+  /* ES6 section 20.2.2.35 Math.trunc ( x ) */                                 \
+  TFJ(MathTrunc, 1)                                                            \
+                                                                               \
+  /* Number */                                                                 \
+  /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */        \
+  ASM(NumberConstructor)                                                       \
+  /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */   \
+  ASM(NumberConstructor_ConstructStub)                                         \
+  /* ES6 section 20.1.2.2 Number.isFinite ( number ) */                        \
+  TFJ(NumberIsFinite, 1)                                                       \
+  /* ES6 section 20.1.2.3 Number.isInteger ( number ) */                       \
+  TFJ(NumberIsInteger, 1)                                                      \
+  /* ES6 section 20.1.2.4 Number.isNaN ( number ) */                           \
+  TFJ(NumberIsNaN, 1)                                                          \
+  /* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */                   \
+  TFJ(NumberIsSafeInteger, 1)                                                  \
+  /* ES6 section 20.1.2.12 Number.parseFloat ( string ) */                     \
+  TFJ(NumberParseFloat, 1)                                                     \
+  /* ES6 section 20.1.2.13 Number.parseInt ( string, radix ) */                \
+  TFJ(NumberParseInt, 2)                                                       \
+  CPP(NumberPrototypeToExponential)                                            \
+  CPP(NumberPrototypeToFixed)                                                  \
+  CPP(NumberPrototypeToLocaleString)                                           \
+  CPP(NumberPrototypeToPrecision)                                              \
+  CPP(NumberPrototypeToString)                                                 \
+  /* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */                      \
+  TFJ(NumberPrototypeValueOf, 0)                                               \
+  TFS(Add, BUILTIN, kNoExtraICState, BinaryOp, 1)                              \
+  TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp, 1)                         \
+  TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp, 1)                         \
+  TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp, 1)                           \
+  TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp, 1)                          \
+  TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp, 1)                       \
+  TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp, 1)                        \
+  TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp, 1)                       \
+  TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp, 1)                        \
+  TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp, 1)                       \
+  TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp, 1)                \
+  TFS(LessThan, BUILTIN, kNoExtraICState, Compare, 1)                          \
+  TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare, 1)                   \
+  TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare, 1)                       \
+  TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare, 1)                \
+  TFS(Equal, BUILTIN, kNoExtraICState, Compare, 1)                             \
+  TFS(NotEqual, BUILTIN, kNoExtraICState, Compare, 1)                          \
+  TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare, 1)                       \
+  TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare, 1)                    \
+                                                                               \
+  /* Object */                                                                 \
+  CPP(ObjectAssign)                                                            \
+  TFJ(ObjectCreate, 2)                                                         \
+  CPP(ObjectDefineGetter)                                                      \
+  CPP(ObjectDefineProperties)                                                  \
+  CPP(ObjectDefineProperty)                                                    \
+  CPP(ObjectDefineSetter)                                                      \
+  CPP(ObjectEntries)                                                           \
+  CPP(ObjectFreeze)                                                            \
+  CPP(ObjectGetOwnPropertyDescriptor)                                          \
+  CPP(ObjectGetOwnPropertyDescriptors)                                         \
+  CPP(ObjectGetOwnPropertyNames)                                               \
+  CPP(ObjectGetOwnPropertySymbols)                                             \
+  CPP(ObjectGetPrototypeOf)                                                    \
+  CPP(ObjectSetPrototypeOf)                                                    \
+  /* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */                   \
+  TFJ(ObjectHasOwnProperty, 1)                                                 \
+  CPP(ObjectIs)                                                                \
+  CPP(ObjectIsExtensible)                                                      \
+  CPP(ObjectIsFrozen)                                                          \
+  CPP(ObjectIsSealed)                                                          \
+  CPP(ObjectKeys)                                                              \
+  CPP(ObjectLookupGetter)                                                      \
+  CPP(ObjectLookupSetter)                                                      \
+  CPP(ObjectPreventExtensions)                                                 \
+  /* ES6 section 19.1.3.6 Object.prototype.toString () */                      \
+  TFJ(ObjectProtoToString, 0)                                                  \
+  CPP(ObjectPrototypePropertyIsEnumerable)                                     \
+  CPP(ObjectPrototypeGetProto)                                                 \
+  CPP(ObjectPrototypeSetProto)                                                 \
+  CPP(ObjectSeal)                                                              \
+  CPP(ObjectValues)                                                            \
+                                                                               \
+  /* instanceof */                                                             \
+  TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare, 1)               \
+  TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare, 1)                        \
+                                                                               \
+  /* for-in */                                                                 \
+  TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter, 1)                   \
+  TFS(ForInNext, BUILTIN, kNoExtraICState, ForInNext, 1)                       \
+  TFS(ForInPrepare, BUILTIN, kNoExtraICState, ForInPrepare, 3)                 \
+                                                                               \
+  /* Promise */                                                                \
+  TFJ(PromiseGetCapabilitiesExecutor, 2)                                       \
+  TFJ(NewPromiseCapability, 2)                                                 \
+  TFJ(PromiseConstructor, 1)                                                   \
+  TFJ(PromiseInternalConstructor, 1)                                           \
+  TFJ(IsPromise, 1)                                                            \
+  TFJ(PromiseResolveClosure, 1)                                                \
+  TFJ(PromiseRejectClosure, 1)                                                 \
+  TFJ(PromiseThen, 2)                                                          \
+  TFJ(PromiseCatch, 1)                                                         \
+  TFJ(ResolvePromise, 2)                                                       \
+  TFS(PromiseHandleReject, BUILTIN, kNoExtraICState, PromiseHandleReject, 1)   \
+  TFJ(PromiseHandle, 5)                                                        \
+  TFJ(PromiseResolve, 1)                                                       \
+  TFJ(PromiseReject, 1)                                                        \
+  TFJ(InternalPromiseReject, 3)                                                \
+  TFJ(PromiseFinally, 1)                                                       \
+  TFJ(PromiseThenFinally, 1)                                                   \
+  TFJ(PromiseCatchFinally, 1)                                                  \
+  TFJ(PromiseValueThunkFinally, 0)                                             \
+  TFJ(PromiseThrowerFinally, 0)                                                \
+                                                                               \
+  /* Proxy */                                                                  \
+  CPP(ProxyConstructor)                                                        \
+  CPP(ProxyConstructor_ConstructStub)                                          \
+                                                                               \
+  /* Reflect */                                                                \
+  ASM(ReflectApply)                                                            \
+  ASM(ReflectConstruct)                                                        \
+  CPP(ReflectDefineProperty)                                                   \
+  CPP(ReflectDeleteProperty)                                                   \
+  CPP(ReflectGet)                                                              \
+  CPP(ReflectGetOwnPropertyDescriptor)                                         \
+  CPP(ReflectGetPrototypeOf)                                                   \
+  CPP(ReflectHas)                                                              \
+  CPP(ReflectIsExtensible)                                                     \
+  CPP(ReflectOwnKeys)                                                          \
+  CPP(ReflectPreventExtensions)                                                \
+  CPP(ReflectSet)                                                              \
+  CPP(ReflectSetPrototypeOf)                                                   \
+                                                                               \
+  /* RegExp */                                                                 \
+  CPP(RegExpCapture1Getter)                                                    \
+  CPP(RegExpCapture2Getter)                                                    \
+  CPP(RegExpCapture3Getter)                                                    \
+  CPP(RegExpCapture4Getter)                                                    \
+  CPP(RegExpCapture5Getter)                                                    \
+  CPP(RegExpCapture6Getter)                                                    \
+  CPP(RegExpCapture7Getter)                                                    \
+  CPP(RegExpCapture8Getter)                                                    \
+  CPP(RegExpCapture9Getter)                                                    \
+  TFJ(RegExpConstructor, 2)                                                    \
+  TFJ(RegExpInternalMatch, 2)                                                  \
+  CPP(RegExpInputGetter)                                                       \
+  CPP(RegExpInputSetter)                                                       \
+  CPP(RegExpLastMatchGetter)                                                   \
+  CPP(RegExpLastParenGetter)                                                   \
+  CPP(RegExpLeftContextGetter)                                                 \
+  TFJ(RegExpPrototypeCompile, 2)                                               \
+  TFJ(RegExpPrototypeExec, 1)                                                  \
+  TFJ(RegExpPrototypeFlagsGetter, 0)                                           \
+  TFJ(RegExpPrototypeGlobalGetter, 0)                                          \
+  TFJ(RegExpPrototypeIgnoreCaseGetter, 0)                                      \
+  TFJ(RegExpPrototypeMatch, 1)                                                 \
+  TFJ(RegExpPrototypeMultilineGetter, 0)                                       \
+  TFJ(RegExpPrototypeSearch, 1)                                                \
+  TFJ(RegExpPrototypeSourceGetter, 0)                                          \
+  TFJ(RegExpPrototypeStickyGetter, 0)                                          \
+  TFJ(RegExpPrototypeTest, 1)                                                  \
+  CPP(RegExpPrototypeToString)                                                 \
+  TFJ(RegExpPrototypeUnicodeGetter, 0)                                         \
+  CPP(RegExpRightContextGetter)                                                \
+                                                                               \
+  TFS(RegExpReplace, BUILTIN, kNoExtraICState, RegExpReplace, 1)               \
+  TFJ(RegExpPrototypeReplace, 2)                                               \
+                                                                               \
+  TFS(RegExpSplit, BUILTIN, kNoExtraICState, RegExpSplit, 1)                   \
+  TFJ(RegExpPrototypeSplit, 2)                                                 \
+                                                                               \
+  /* SharedArrayBuffer */                                                      \
+  CPP(SharedArrayBufferPrototypeGetByteLength)                                 \
+  TFJ(AtomicsLoad, 2)                                                          \
+  TFJ(AtomicsStore, 3)                                                         \
+                                                                               \
+  /* String */                                                                 \
+  ASM(StringConstructor)                                                       \
+  ASM(StringConstructor_ConstructStub)                                         \
+  CPP(StringFromCodePoint)                                                     \
+  /* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */              \
+  TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel)     \
+  /* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */                   \
+  TFJ(StringPrototypeCharAt, 1)                                                \
+  /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */               \
+  TFJ(StringPrototypeCharCodeAt, 1)                                            \
+  /* ES6 section 21.1.3.6 */                                                   \
+  /* String.prototype.endsWith ( searchString [ , endPosition ] ) */           \
+  CPP(StringPrototypeEndsWith)                                                 \
+  /* ES6 section 21.1.3.7 */                                                   \
+  /* String.prototype.includes ( searchString [ , position ] ) */              \
+  CPP(StringPrototypeIncludes)                                                 \
+  /* ES6 section #sec-string.prototype.indexof */                              \
+  /* String.prototype.indexOf ( searchString [ , position ] ) */               \
+  TFJ(StringPrototypeIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+  /* ES6 section 21.1.3.9 */                                                   \
+  /* String.prototype.lastIndexOf ( searchString [ , position ] ) */           \
+  CPP(StringPrototypeLastIndexOf)                                              \
+  /* ES6 section 21.1.3.10 String.prototype.localeCompare ( that ) */          \
+  CPP(StringPrototypeLocaleCompare)                                            \
+  /* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */            \
+  CPP(StringPrototypeNormalize)                                                \
+  /* ES6 section 21.1.3.16 String.prototype.replace ( search, replace ) */     \
+  TFJ(StringPrototypeReplace, 2)                                               \
+  /* ES6 section 21.1.3.19 String.prototype.split ( separator, limit )  */     \
+  TFJ(StringPrototypeSplit, 2)                                                 \
+  /* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */          \
+  TFJ(StringPrototypeSubstr, 2)                                                \
+  /* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */        \
+  TFJ(StringPrototypeSubstring, 2)                                             \
+  /* ES6 section 21.1.3.20 */                                                  \
+  /* String.prototype.startsWith ( searchString [ , position ] ) */            \
+  CPP(StringPrototypeStartsWith)                                               \
+  /* ES6 section 21.1.3.25 String.prototype.toString () */                     \
+  TFJ(StringPrototypeToString, 0)                                              \
+  /* ES #sec-string.prototype.tolocalelowercase */                             \
+  CPP(StringPrototypeToLocaleLowerCase)                                        \
+  /* ES #sec-string.prototype.tolocaleuppercase */                             \
+  CPP(StringPrototypeToLocaleUpperCase)                                        \
+  /* ES #sec-string.prototype.tolowercase */                                   \
+  CPP(StringPrototypeToLowerCase)                                              \
+  /* ES #sec-string.prototype.touppercase */                                   \
+  CPP(StringPrototypeToUpperCase)                                              \
+  CPP(StringPrototypeTrim)                                                     \
+  CPP(StringPrototypeTrimLeft)                                                 \
+  CPP(StringPrototypeTrimRight)                                                \
+  /* ES6 section 21.1.3.28 String.prototype.valueOf () */                      \
+  TFJ(StringPrototypeValueOf, 0)                                               \
+  /* ES6 #sec-string.prototype-@@iterator */                                   \
+  TFJ(StringPrototypeIterator, 0)                                              \
+                                                                               \
+  /* StringIterator */                                                         \
+  TFJ(StringIteratorPrototypeNext, 0)                                          \
+                                                                               \
+  /* Symbol */                                                                 \
+  CPP(SymbolConstructor)                                                       \
+  CPP(SymbolConstructor_ConstructStub)                                         \
+  /* ES6 section 19.4.2.1 Symbol.for */                                        \
+  CPP(SymbolFor)                                                               \
+  /* ES6 section 19.4.2.5 Symbol.keyFor */                                     \
+  CPP(SymbolKeyFor)                                                            \
+  /* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */       \
+  TFJ(SymbolPrototypeToPrimitive, 1)                                           \
+  /* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */                     \
+  TFJ(SymbolPrototypeToString, 0)                                              \
+  /* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */                      \
+  TFJ(SymbolPrototypeValueOf, 0)                                               \
+                                                                               \
+  /* TypedArray */                                                             \
+  CPP(TypedArrayPrototypeBuffer)                                               \
+  /* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */             \
+  TFJ(TypedArrayPrototypeByteLength, 0)                                        \
+  /* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */             \
+  TFJ(TypedArrayPrototypeByteOffset, 0)                                        \
+  /* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */                \
+  TFJ(TypedArrayPrototypeLength, 0)                                            \
+  /* ES6 #sec-%typedarray%.prototype.entries */                                \
+  TFJ(TypedArrayPrototypeEntries, 0)                                           \
+  /* ES6 #sec-%typedarray%.prototype.keys */                                   \
+  TFJ(TypedArrayPrototypeKeys, 0)                                              \
+  /* ES6 #sec-%typedarray%.prototype.values */                                 \
+  TFJ(TypedArrayPrototypeValues, 0)                                            \
+  /* ES6 #sec-%typedarray%.prototype.copywithin */                             \
+  CPP(TypedArrayPrototypeCopyWithin)                                           \
+                                                                               \
+  /* Wasm */                                                                   \
+  TFS(WasmStackGuard, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1)            \
+  TFS(ThrowWasmTrapUnreachable, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1)  \
+  TFS(ThrowWasmTrapMemOutOfBounds, BUILTIN, kNoExtraICState, WasmRuntimeCall,  \
+      1)                                                                       \
+  TFS(ThrowWasmTrapDivByZero, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1)    \
+  TFS(ThrowWasmTrapDivUnrepresentable, BUILTIN, kNoExtraICState,               \
+      WasmRuntimeCall, 1)                                                      \
+  TFS(ThrowWasmTrapRemByZero, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1)    \
+  TFS(ThrowWasmTrapFloatUnrepresentable, BUILTIN, kNoExtraICState,             \
+      WasmRuntimeCall, 1)                                                      \
+  TFS(ThrowWasmTrapFuncInvalid, BUILTIN, kNoExtraICState, WasmRuntimeCall, 1)  \
+  TFS(ThrowWasmTrapFuncSigMismatch, BUILTIN, kNoExtraICState, WasmRuntimeCall, \
+      1)                                                                       \
+                                                                               \
+  /* Async-from-Sync Iterator */                                               \
+                                                                               \
+  /* %AsyncFromSyncIteratorPrototype% */                                       \
+  /* (proposal-async-iteration/#sec-%asyncfromsynciteratorprototype%-object)*/ \
+  TFJ(AsyncFromSyncIteratorPrototypeNext, 1)                                   \
+  TFJ(AsyncFromSyncIteratorPrototypeThrow, 1)                                  \
+  TFJ(AsyncFromSyncIteratorPrototypeReturn, 1)                                 \
+                                                                               \
+  /* proposal-async-iteration/#sec-async-iterator-value-unwrap-functions */    \
+  TFJ(AsyncIteratorValueUnwrap, 1)
 
 #define IGNORE_BUILTIN(...)
 
@@ -717,8 +866,11 @@
                IGNORE_BUILTIN, IGNORE_BUILTIN, V)
 
 // Forward declarations.
-class CodeStubAssembler;
 class ObjectVisitor;
+enum class InterpreterPushArgsMode : unsigned;
+namespace compiler {
+class CodeAssemblerState;
+}
 
 class Builtins {
  public:
@@ -735,7 +887,7 @@
   // Disassembler support.
   const char* Lookup(byte* pc);
 
-  enum Name {
+  enum Name : int32_t {
 #define DEF_ENUM(Name, ...) k##Name,
     BUILTIN_LIST_ALL(DEF_ENUM)
 #undef DEF_ENUM
@@ -757,10 +909,12 @@
   Handle<Code> NonPrimitiveToPrimitive(
       ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
   Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
-  Handle<Code> InterpreterPushArgsAndCall(
-      TailCallMode tail_call_mode,
-      CallableType function_type = CallableType::kAny);
-  Handle<Code> InterpreterPushArgsAndConstruct(CallableType function_type);
+  Handle<Code> InterpreterPushArgsAndCall(TailCallMode tail_call_mode,
+                                          InterpreterPushArgsMode mode);
+  Handle<Code> InterpreterPushArgsAndConstruct(InterpreterPushArgsMode mode);
+  Handle<Code> NewFunctionContext(ScopeType scope_type);
+  Handle<Code> NewCloneShallowArray(AllocationSiteMode allocation_mode);
+  Handle<Code> NewCloneShallowObject(int length);
 
   Code* builtin(Name name) {
     // Code::cast cannot be used here since we access builtins
@@ -809,16 +963,15 @@
 
   static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
                             TailCallMode tail_call_mode);
+  static void Generate_CallForwardVarargs(MacroAssembler* masm,
+                                          Handle<Code> code);
 
   static void Generate_InterpreterPushArgsAndCallImpl(
       MacroAssembler* masm, TailCallMode tail_call_mode,
-      CallableType function_type);
+      InterpreterPushArgsMode mode);
 
   static void Generate_InterpreterPushArgsAndConstructImpl(
-      MacroAssembler* masm, CallableType function_type);
-
-  static void Generate_DatePrototype_GetField(CodeStubAssembler* masm,
-                                              int field_index);
+      MacroAssembler* masm, InterpreterPushArgsMode mode);
 
   enum class MathMaxMinKind { kMax, kMin };
   static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
@@ -826,7 +979,7 @@
 #define DECLARE_ASM(Name, ...) \
   static void Generate_##Name(MacroAssembler* masm);
 #define DECLARE_TF(Name, ...) \
-  static void Generate_##Name(CodeStubAssembler* csasm);
+  static void Generate_##Name(compiler::CodeAssemblerState* state);
 
   BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
                DECLARE_ASM, DECLARE_ASM, DECLARE_ASM)
diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc
index 4287333..c074dd8 100644
--- a/src/builtins/ia32/builtins-ia32.cc
+++ b/src/builtins/ia32/builtins-ia32.cc
@@ -115,6 +115,8 @@
 void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
                                     bool create_implicit_receiver,
                                     bool check_derived_construct) {
+  Label post_instantiation_deopt_entry;
+
   // ----------- S t a t e -------------
   //  -- eax: number of arguments
   //  -- esi: context
@@ -135,8 +137,8 @@
       // Allocate the new receiver object.
       __ Push(edi);
       __ Push(edx);
-      FastNewObjectStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+              RelocInfo::CODE_TARGET);
       __ mov(ebx, eax);
       __ Pop(edx);
       __ Pop(edi);
@@ -163,6 +165,9 @@
       __ PushRoot(Heap::kTheHoleValueRootIndex);
     }
 
+    // Deoptimizer re-enters stub code here.
+    __ bind(&post_instantiation_deopt_entry);
+
     // Set up pointer to last argument.
     __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
 
@@ -183,7 +188,8 @@
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+      masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+          masm->pc_offset());
     }
 
     // Restore context from the frame.
@@ -240,6 +246,35 @@
     __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
   }
   __ ret(0);
+
+  // Store offset of trampoline address for deoptimizer. This is the bailout
+  // point after the receiver instantiation but before the function invocation.
+  // We need to restore some registers in order to continue the above code.
+  if (create_implicit_receiver && !is_api_function) {
+    masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+        masm->pc_offset());
+
+    // ----------- S t a t e -------------
+    //  -- eax    : newly allocated object
+    //  -- esp[0] : constructor function
+    // -----------------------------------
+
+    __ pop(edi);
+    __ push(eax);
+    __ push(eax);
+
+    // Retrieve smi-tagged arguments count from the stack.
+    __ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
+    __ SmiUntag(eax);
+
+    // Retrieve the new target value from the stack. This was placed into the
+    // frame description in place of the receiver by the optimizing compiler.
+    __ mov(edx, Operand(ebp, eax, times_pointer_size,
+                        StandardFrameConstants::kCallerSPOffset));
+
+    // Continue with constructor function invocation.
+    __ jmp(&post_instantiation_deopt_entry);
+  }
 }
 
 }  // namespace
@@ -386,17 +421,16 @@
   __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
 
   // Load suspended function and context.
-  __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
   __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Flood function if we are stepping.
   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
   Label stepping_prepared;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(masm->isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
-  __ j(greater_equal, &prepare_step_in_if_stepping);
+  ExternalReference debug_hook =
+      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+  __ cmpb(Operand::StaticVariable(debug_hook), Immediate(0));
+  __ j(not_equal, &prepare_step_in_if_stepping);
 
   // Flood function if we need to continue stepping in the suspended generator.
   ExternalReference debug_suspended_generator =
@@ -437,19 +471,20 @@
     __ bind(&done_loop);
   }
 
-  // Dispatch on the kind of generator object.
-  Label old_generator;
-  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
-  __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
-  __ j(not_equal, &old_generator);
+  // Underlying function needs to have bytecode available.
+  if (FLAG_debug_code) {
+    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+    __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+    __ Assert(equal, kMissingBytecodeArray);
+  }
 
-  // New-style (ignition/turbofan) generator object
+  // Resume (Ignition/TurboFan) generator object.
   {
     __ PushReturnAddressFrom(eax);
     __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
     __ mov(eax,
-           FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+           FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
     // We abuse new.target both to indicate that this is a resume call and to
     // pass in the generator object.  In ordinary calls, new.target is always
     // undefined because generator functions are non-constructable.
@@ -457,56 +492,13 @@
     __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
   }
 
-  // Old-style (full-codegen) generator object
-  __ bind(&old_generator);
-  {
-    // Enter a new JavaScript frame, and initialize its slots as they were when
-    // the generator was suspended.
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ PushReturnAddressFrom(eax);  // Return address.
-    __ Push(ebp);                   // Caller's frame pointer.
-    __ Move(ebp, esp);
-    __ Push(esi);  // Callee's context.
-    __ Push(edi);  // Callee's JS Function.
-
-    // Restore the operand stack.
-    __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
-    {
-      Label done_loop, loop;
-      __ Move(ecx, Smi::kZero);
-      __ bind(&loop);
-      __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
-      __ j(equal, &done_loop, Label::kNear);
-      __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
-                           FixedArray::kHeaderSize));
-      __ add(ecx, Immediate(Smi::FromInt(1)));
-      __ jmp(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Reset operand stack so we don't leak.
-    __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
-           Immediate(masm->isolate()->factory()->empty_fixed_array()));
-
-    // Resume the generator function at the continuation.
-    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
-    __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(ecx);
-    __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
-    __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
-           Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ mov(eax, ebx);  // Continuation expects generator object in eax.
-    __ jmp(edx);
-  }
-
   __ bind(&prepare_step_in_if_stepping);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(ebx);
     __ Push(edx);
     __ Push(edi);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ CallRuntime(Runtime::kDebugOnFunctionCall);
     __ Pop(edx);
     __ Pop(ebx);
     __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
@@ -577,9 +569,8 @@
   // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   Label load_debug_bytecode_array, bytecode_array_loaded;
-  __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
-         Immediate(DebugInfo::uninitialized()));
-  __ j(not_equal, &load_debug_bytecode_array);
+  __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+                  &load_debug_bytecode_array);
   __ mov(kInterpreterBytecodeArrayRegister,
          FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
@@ -591,11 +582,11 @@
   __ j(not_equal, &switch_to_different_code_kind);
 
   // Increment invocation count for the function.
-  __ EmitLoadTypeFeedbackVector(ecx);
-  __ add(FieldOperand(ecx,
-                      TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                          TypeFeedbackVector::kHeaderSize),
-         Immediate(Smi::FromInt(1)));
+  __ EmitLoadFeedbackVector(ecx);
+  __ add(
+      FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                            FeedbackVector::kHeaderSize),
+      Immediate(Smi::FromInt(1)));
 
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
@@ -605,6 +596,11 @@
     __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Reset code age.
+  __ mov_b(FieldOperand(kInterpreterBytecodeArrayRegister,
+                        BytecodeArray::kBytecodeAgeOffset),
+           Immediate(BytecodeArray::kNoAgeBytecodeAge));
+
   // Push bytecode array.
   __ push(kInterpreterBytecodeArrayRegister);
   // Push Smi tagged initial bytecode array offset.
@@ -732,7 +728,7 @@
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
-    CallableType function_type) {
+    InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- ebx : the address of the first argument to be pushed. Subsequent
@@ -764,12 +760,14 @@
   // Call the target.
   __ Push(edx);  // Re-push return address.
 
-  if (function_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
                                                       tail_call_mode),
             RelocInfo::CODE_TARGET);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(function_type, CallableType::kAny);
     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
@@ -882,7 +880,7 @@
 
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
-    MacroAssembler* masm, CallableType construct_type) {
+    MacroAssembler* masm, InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edx : the new target
@@ -908,7 +906,7 @@
   __ Pop(edi);
 
   __ AssertUndefinedOrAllocationSite(ebx);
-  if (construct_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     // Tail call to the function-specific construct stub (still in the caller
     // context at this point).
     __ AssertFunction(edi);
@@ -917,9 +915,12 @@
     __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
     __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
     __ jmp(ecx);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    // Call the constructor with unmodified eax, edi, edx values.
+    __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(construct_type, CallableType::kAny);
-
+    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
     // Call the constructor with unmodified eax, edi, edx values.
     __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   }
@@ -1062,6 +1063,12 @@
   Register new_target = edx;
   Register argument_count = eax;
 
+  // Do we have a valid feedback vector?
+  __ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+  __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
+  __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex,
+                &gotta_call_runtime_no_stack);
+
   __ push(argument_count);
   __ push(new_target);
   __ push(closure);
@@ -1072,9 +1079,8 @@
   __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
   __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
   __ cmp(index, Immediate(Smi::FromInt(2)));
-  __ j(less, &gotta_call_runtime);
+  __ j(less, &try_shared);
 
-  // Find literals.
   // edx : native context
   // ebx : length / index
   // eax : optimized code map
@@ -1092,25 +1098,6 @@
   __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
   __ cmp(temp, native_context);
   __ j(not_equal, &loop_bottom);
-  // OSR id set to none?
-  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
-                            SharedFunctionInfo::kOffsetToPreviousOsrAstId));
-  const int bailout_id = BailoutId::None().ToInt();
-  __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
-  __ j(not_equal, &loop_bottom);
-  // Literals available?
-  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
-                            SharedFunctionInfo::kOffsetToPreviousLiterals));
-  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
-
-  // Save the literals in the closure.
-  __ mov(ecx, Operand(esp, 0));
-  __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
-  __ push(index);
-  __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
-                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ pop(index);
 
   // Code available?
   Register entry = ecx;
@@ -1119,7 +1106,7 @@
   __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
   __ JumpIfSmi(entry, &try_shared);
 
-  // Found literals and code. Get them into the closure and return.
+  // Found code. Get it into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
@@ -1153,9 +1140,7 @@
   __ cmp(index, Immediate(Smi::FromInt(1)));
   __ j(greater, &loop_top);
 
-  // We found neither literals nor code.
-  __ jmp(&gotta_call_runtime);
-
+  // We found no code.
   __ bind(&try_shared);
   __ pop(closure);
   __ pop(new_target);
@@ -1165,14 +1150,14 @@
   __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
             Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
   __ j(not_zero, &gotta_call_runtime_no_stack);
-  // Is the full code valid?
+
+  // If SFI points to anything other than CompileLazy, install that.
   __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
-  __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
-  __ and_(ebx, Code::KindField::kMask);
-  __ shr(ebx, Code::KindField::kShift);
-  __ cmp(ebx, Immediate(Code::BUILTIN));
+  __ Move(ebx, masm->CodeObject());
+  __ cmp(entry, ebx);
   __ j(equal, &gotta_call_runtime_no_stack);
-  // Yes, install the full code.
+
+  // Install the SFI's code entry.
   __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
   __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
   __ RecordWriteCodeEntryField(closure, entry, ebx);
@@ -1294,14 +1279,9 @@
   __ ret(0);
 }
 
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
-  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
-  }                                                           \
-  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                              \
+  void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+    GenerateMakeCodeYoungAgainCommon(masm);                               \
   }
 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1670,14 +1650,14 @@
   __ bind(&target_not_constructor);
   {
     __ mov(Operand(esp, kPointerSize), edi);
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 
   // 4c. The new.target is not a constructor, throw an appropriate TypeError.
   __ bind(&new_target_not_constructor);
   {
     __ mov(Operand(esp, kPointerSize), edx);
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 }
 
@@ -1969,8 +1949,8 @@
     FrameScope scope(masm, StackFrame::MANUAL);
     __ EnterBuiltinFrame(esi, edi, ecx);
     __ Push(ebx);  // the first argument
-    FastNewObjectStub stub(masm->isolate());
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(FieldOperand(eax, JSValue::kValueOffset));
     __ LeaveBuiltinFrame(esi, edi, ecx);
   }
@@ -2132,8 +2112,8 @@
     __ SmiTag(ebx);
     __ EnterBuiltinFrame(esi, edi, ebx);
     __ Push(eax);  // the first argument
-    FastNewObjectStub stub(masm->isolate());
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(FieldOperand(eax, JSValue::kValueOffset));
     __ LeaveBuiltinFrame(esi, edi, ebx);
     __ SmiUntag(ebx);
@@ -2154,7 +2134,7 @@
   __ mov(ebp, esp);
 
   // Store the arguments adaptor context sentinel.
-  __ push(Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ push(Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Push the function on the stack.
   __ push(edi);
@@ -2193,7 +2173,8 @@
 
   // Create the list of arguments from the array-like argumentsList.
   {
-    Label create_arguments, create_array, create_runtime, done_create;
+    Label create_arguments, create_array, create_holey_array, create_runtime,
+        done_create;
     __ JumpIfSmi(eax, &create_runtime);
 
     // Load the map of argumentsList into ecx.
@@ -2237,6 +2218,22 @@
     __ mov(eax, ecx);
     __ jmp(&done_create);
 
+    // For holey JSArrays we need to check that the array prototype chain
+    // protector is intact and our prototype is the Array.prototype actually.
+    __ bind(&create_holey_array);
+    __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+    __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+    __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+    __ j(not_equal, &create_runtime);
+    __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
+    __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
+           Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+    __ j(not_equal, &create_runtime);
+    __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+    __ SmiUntag(ebx);
+    __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+    __ jmp(&done_create);
+
     // Try to create the list from a JSArray object.
     __ bind(&create_array);
     __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
@@ -2244,10 +2241,12 @@
     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
     STATIC_ASSERT(FAST_ELEMENTS == 2);
-    __ cmp(ecx, Immediate(FAST_ELEMENTS));
-    __ j(above, &create_runtime);
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
     __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
-    __ j(equal, &create_runtime);
+    __ j(equal, &create_holey_array, Label::kNear);
+    __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+    __ j(equal, &create_holey_array, Label::kNear);
+    __ j(above, &create_runtime);
     __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
     __ SmiUntag(ebx);
     __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
@@ -2287,18 +2286,26 @@
   // Push arguments onto the stack (thisArgument is already on the stack).
   {
     __ movd(xmm0, edx);
+    __ movd(xmm1, edi);
     __ PopReturnAddressTo(edx);
     __ Move(ecx, Immediate(0));
-    Label done, loop;
+    Label done, push, loop;
     __ bind(&loop);
     __ cmp(ecx, ebx);
     __ j(equal, &done, Label::kNear);
-    __ Push(
-        FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+    // Turn the hole into undefined as we go.
+    __ mov(edi,
+           FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+    __ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
+    __ j(not_equal, &push, Label::kNear);
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(edi);
     __ inc(ecx);
     __ jmp(&loop);
     __ bind(&done);
     __ PushReturnAddressFrom(edx);
+    __ movd(edi, xmm1);
     __ movd(edx, xmm0);
     __ Move(eax, ebx);
   }
@@ -2311,6 +2318,86 @@
   }
 }
 
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+                                           Handle<Code> code) {
+  // ----------- S t a t e -------------
+  //  -- edi    : the target to call (can be any Object)
+  //  -- ecx    : start index (to support rest parameters)
+  //  -- esp[0] : return address.
+  //  -- esp[4] : thisArgument
+  // -----------------------------------
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
+         Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &arguments_adaptor, Label::kNear);
+  {
+    __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+    __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(eax,
+           FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ mov(ebx, ebp);
+  }
+  __ jmp(&arguments_done, Label::kNear);
+  __ bind(&arguments_adaptor);
+  {
+    // Just load the length from the ArgumentsAdaptorFrame.
+    __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  }
+  __ bind(&arguments_done);
+
+  Label stack_empty, stack_done;
+  __ SmiUntag(eax);
+  __ sub(eax, ecx);
+  __ j(less_equal, &stack_empty);
+  {
+    // Check for stack overflow.
+    {
+      // Check the stack for overflow. We are not trying to catch interruptions
+      // (i.e. debug break and preemption) here, so check the "real stack
+      // limit".
+      Label done;
+      __ LoadRoot(ecx, Heap::kRealStackLimitRootIndex);
+      // Make ecx the space we have left. The stack might already be
+      // overflowed here which will cause ecx to become negative.
+      __ neg(ecx);
+      __ add(ecx, esp);
+      __ sar(ecx, kPointerSizeLog2);
+      // Check if the arguments will overflow the stack.
+      __ cmp(ecx, eax);
+      __ j(greater, &done, Label::kNear);  // Signed comparison.
+      __ TailCallRuntime(Runtime::kThrowStackOverflow);
+      __ bind(&done);
+    }
+
+    // Forward the arguments from the caller frame.
+    {
+      Label loop;
+      __ mov(ecx, eax);
+      __ pop(edx);
+      __ bind(&loop);
+      {
+        __ Push(Operand(ebx, ecx, times_pointer_size, 1 * kPointerSize));
+        __ dec(ecx);
+        __ j(not_zero, &loop);
+      }
+      __ push(edx);
+    }
+  }
+  __ jmp(&stack_done, Label::kNear);
+  __ bind(&stack_empty);
+  {
+    // We just pass the receiver, which is already on the stack.
+    __ Move(eax, Immediate(0));
+  }
+  __ bind(&stack_done);
+
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
 namespace {
 
 // Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2361,7 +2448,7 @@
   {
     Label no_interpreter_frame;
     __ cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
-           Immediate(Smi::FromInt(StackFrame::STUB)));
+           Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
     __ j(not_equal, &no_interpreter_frame, Label::kNear);
     __ mov(ebp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
     __ bind(&no_interpreter_frame);
@@ -2372,7 +2459,7 @@
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+         Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -2693,6 +2780,178 @@
   }
 }
 
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+  // Free up some registers.
+  __ movd(xmm0, edx);
+  __ movd(xmm1, edi);
+
+  Register argc = eax;
+
+  Register scratch = ecx;
+  Register scratch2 = edi;
+
+  Register spread = ebx;
+  Register spread_map = edx;
+
+  Register spread_len = edx;
+
+  Label runtime_call, push_args;
+  __ mov(spread, Operand(esp, kPointerSize));
+  __ JumpIfSmi(spread, &runtime_call);
+  __ mov(spread_map, FieldOperand(spread, HeapObject::kMapOffset));
+
+  // Check that the spread is an array.
+  __ CmpInstanceType(spread_map, JS_ARRAY_TYPE);
+  __ j(not_equal, &runtime_call);
+
+  // Check that we have the original ArrayPrototype.
+  __ mov(scratch, FieldOperand(spread_map, Map::kPrototypeOffset));
+  __ mov(scratch2, NativeContextOperand());
+  __ cmp(scratch,
+         ContextOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+  __ j(not_equal, &runtime_call);
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+  __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+         Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+  __ j(not_equal, &runtime_call);
+
+  // Check that the map of the initial array iterator hasn't changed.
+  __ mov(scratch2, NativeContextOperand());
+  __ mov(scratch,
+         ContextOperand(scratch2,
+                        Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+  __ cmp(scratch,
+         ContextOperand(scratch2,
+                        Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+  __ j(not_equal, &runtime_call);
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  Label no_protector_check;
+  __ mov(scratch, FieldOperand(spread_map, Map::kBitField2Offset));
+  __ DecodeField<Map::ElementsKindBits>(scratch);
+  __ cmp(scratch, Immediate(FAST_HOLEY_ELEMENTS));
+  __ j(above, &runtime_call);
+  // For non-FastHoley kinds, we can skip the protector check.
+  __ cmp(scratch, Immediate(FAST_SMI_ELEMENTS));
+  __ j(equal, &no_protector_check);
+  __ cmp(scratch, Immediate(FAST_ELEMENTS));
+  __ j(equal, &no_protector_check);
+  // Check the ArrayProtector cell.
+  __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+  __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+         Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+  __ j(not_equal, &runtime_call);
+
+  __ bind(&no_protector_check);
+  // Load the FixedArray backing store, but use the length from the array.
+  __ mov(spread_len, FieldOperand(spread, JSArray::kLengthOffset));
+  __ SmiUntag(spread_len);
+  __ mov(spread, FieldOperand(spread, JSArray::kElementsOffset));
+  __ jmp(&push_args);
+
+  __ bind(&runtime_call);
+  {
+    // Call the builtin for the result of the spread.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Need to save these on the stack.
+    __ movd(edi, xmm1);
+    __ movd(edx, xmm0);
+    __ Push(edi);
+    __ Push(edx);
+    __ SmiTag(argc);
+    __ Push(argc);
+    __ Push(spread);
+    __ CallRuntime(Runtime::kSpreadIterableFixed);
+    __ mov(spread, eax);
+    __ Pop(argc);
+    __ SmiUntag(argc);
+    __ Pop(edx);
+    __ Pop(edi);
+    // Free up some registers.
+    __ movd(xmm0, edx);
+    __ movd(xmm1, edi);
+  }
+
+  {
+    // Calculate the new nargs including the result of the spread.
+    __ mov(spread_len, FieldOperand(spread, FixedArray::kLengthOffset));
+    __ SmiUntag(spread_len);
+
+    __ bind(&push_args);
+    // argc += spread_len - 1. Subtract 1 for the spread itself.
+    __ lea(argc, Operand(argc, spread_len, times_1, -1));
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+    // Make scratch the space we have left. The stack might already be
+    // overflowed here which will cause scratch to become negative.
+    __ neg(scratch);
+    __ add(scratch, esp);
+    __ sar(scratch, kPointerSizeLog2);
+    // Check if the arguments will overflow the stack.
+    __ cmp(scratch, spread_len);
+    __ j(greater, &done, Label::kNear);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // Put the evaluated spread onto the stack as additional arguments.
+  {
+    Register return_address = edi;
+    // Pop the return address and spread argument.
+    __ PopReturnAddressTo(return_address);
+    __ Pop(scratch);
+
+    Register scratch2 = esi;
+    __ movd(xmm2, esi);
+
+    __ mov(scratch, Immediate(0));
+    Label done, push, loop;
+    __ bind(&loop);
+    __ cmp(scratch, spread_len);
+    __ j(equal, &done, Label::kNear);
+    __ mov(scratch2, FieldOperand(spread, scratch, times_pointer_size,
+                                  FixedArray::kHeaderSize));
+    __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+    __ j(not_equal, &push, Label::kNear);
+    __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(scratch2);
+    __ inc(scratch);
+    __ jmp(&loop);
+    __ bind(&done);
+    __ PushReturnAddressFrom(return_address);
+    __ movd(esi, xmm2);
+    __ movd(edi, xmm1);
+    __ movd(edx, xmm0);
+  }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the target to call (can be any Object)
+  // -----------------------------------
+
+  // CheckSpreadAndPushToStack will push edx to save it.
+  __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            TailCallMode::kDisallow),
+          RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -2816,6 +3075,19 @@
 }
 
 // static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  //  -- edi : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- edx    : requested object size (untagged)
diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc
index b9c4a72..fe975e2 100644
--- a/src/builtins/mips/builtins-mips.cc
+++ b/src/builtins/mips/builtins-mips.cc
@@ -139,7 +139,7 @@
   __ LoadRoot(t2, root_index);
   __ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
 
-  Label done_loop, loop;
+  Label done_loop, loop, done;
   __ mov(a3, a0);
   __ bind(&loop);
   {
@@ -195,15 +195,25 @@
     // accumulator value on the left hand side (f0) and the next parameter value
     // on the right hand side (f2).
     // We need to work out which HeapNumber (or smi) the result came from.
-    Label compare_nan, set_value;
+    Label compare_nan, set_value, ool_min, ool_max;
     __ BranchF(nullptr, &compare_nan, eq, f0, f2);
     __ Move(t0, t1, f0);
     if (kind == MathMaxMinKind::kMin) {
-      __ MinNaNCheck_d(f0, f0, f2);
+      __ Float64Min(f0, f0, f2, &ool_min);
     } else {
       DCHECK(kind == MathMaxMinKind::kMax);
-      __ MaxNaNCheck_d(f0, f0, f2);
+      __ Float64Max(f0, f0, f2, &ool_max);
     }
+    __ jmp(&done);
+
+    __ bind(&ool_min);
+    __ Float64MinOutOfLine(f0, f0, f2);
+    __ jmp(&done);
+
+    __ bind(&ool_max);
+    __ Float64MaxOutOfLine(f0, f0, f2);
+
+    __ bind(&done);
     __ Move(at, t8, f0);
     __ Branch(&set_value, ne, t0, Operand(at));
     __ Branch(&set_value, ne, t1, Operand(t8));
@@ -331,11 +341,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(t0);
     __ EnterBuiltinFrame(cp, a1, t0);
     __ Push(a0);  // first argument
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(a0);
     __ LeaveBuiltinFrame(cp, a1, t0);
     __ SmiUntag(t0);
@@ -482,11 +492,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(t0);
     __ EnterBuiltinFrame(cp, a1, t0);
     __ Push(a0);  // first argument
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(a0);
     __ LeaveBuiltinFrame(cp, a1, t0);
     __ SmiUntag(t0);
@@ -553,6 +563,8 @@
 void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
                                     bool create_implicit_receiver,
                                     bool check_derived_construct) {
+  Label post_instantiation_deopt_entry;
+
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
   //  -- a1     : constructor function
@@ -575,8 +587,8 @@
     if (create_implicit_receiver) {
       // Allocate the new receiver object.
       __ Push(a1, a3);
-      FastNewObjectStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+              RelocInfo::CODE_TARGET);
       __ mov(t4, v0);
       __ Pop(a1, a3);
 
@@ -601,6 +613,9 @@
       __ PushRoot(Heap::kTheHoleValueRootIndex);
     }
 
+    // Deoptimizer re-enters stub code here.
+    __ bind(&post_instantiation_deopt_entry);
+
     // Set up pointer to last argument.
     __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
 
@@ -634,7 +649,8 @@
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+      masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+          masm->pc_offset());
     }
 
     // Restore context from the frame.
@@ -695,6 +711,35 @@
     __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
   }
   __ Ret();
+
+  // Store offset of trampoline address for deoptimizer. This is the bailout
+  // point after the receiver instantiation but before the function invocation.
+  // We need to restore some registers in order to continue the above code.
+  if (create_implicit_receiver && !is_api_function) {
+    masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+        masm->pc_offset());
+
+    // ----------- S t a t e -------------
+    //  -- a0    : newly allocated object
+    //  -- sp[0] : constructor function
+    // -----------------------------------
+
+    __ Pop(a1);
+    __ Push(a0, a0);
+
+    // Retrieve smi-tagged arguments count from the stack.
+    __ lw(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+    __ SmiUntag(a0);
+
+    // Retrieve the new target value from the stack. This was placed into the
+    // frame description in place of the receiver by the optimizing compiler.
+    __ Addu(a3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+    __ Lsa(a3, a3, a0, kPointerSizeLog2);
+    __ lw(a3, MemOperand(a3));
+
+    // Continue with constructor function invocation.
+    __ jmp(&post_instantiation_deopt_entry);
+  }
 }
 
 }  // namespace
@@ -854,18 +899,17 @@
   __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
 
   // Load suspended function and context.
-  __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
   __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+  __ lw(cp, FieldMemOperand(t0, JSFunction::kContextOffset));
 
   // Flood function if we are stepping.
   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
   Label stepping_prepared;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(masm->isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  __ li(t1, Operand(last_step_action));
+  ExternalReference debug_hook =
+      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+  __ li(t1, Operand(debug_hook));
   __ lb(t1, MemOperand(t1));
-  __ Branch(&prepare_step_in_if_stepping, ge, t1, Operand(StepIn));
+  __ Branch(&prepare_step_in_if_stepping, ne, t1, Operand(zero_reg));
 
   // Flood function if we need to continue stepping in the suspended generator.
   ExternalReference debug_suspended_generator =
@@ -905,14 +949,15 @@
     __ bind(&done_loop);
   }
 
-  // Dispatch on the kind of generator object.
-  Label old_generator;
-  __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
-  __ GetObjectType(a3, a3, a3);
-  __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+  // Underlying function needs to have bytecode available.
+  if (FLAG_debug_code) {
+    __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+    __ GetObjectType(a3, a3, a3);
+    __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+  }
 
-  // New-style (ignition/turbofan) generator object.
+  // Resume (Ignition/TurboFan) generator object.
   {
     __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
     __ lw(a0,
@@ -927,54 +972,11 @@
     __ Jump(a2);
   }
 
-  // Old-style (full-codegen) generator object
-  __ bind(&old_generator);
-  {
-    // Enter a new JavaScript frame, and initialize its slots as they were when
-    // the generator was suspended.
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Push(ra, fp);
-    __ Move(fp, sp);
-    __ Push(cp, t0);
-
-    // Restore the operand stack.
-    __ lw(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-    __ lw(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
-    __ Addu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    __ Lsa(a3, a0, a3, kPointerSizeLog2 - 1);
-    {
-      Label done_loop, loop;
-      __ bind(&loop);
-      __ Branch(&done_loop, eq, a0, Operand(a3));
-      __ lw(t1, MemOperand(a0));
-      __ Push(t1);
-      __ Branch(USE_DELAY_SLOT, &loop);
-      __ addiu(a0, a0, kPointerSize);  // In delay slot.
-      __ bind(&done_loop);
-    }
-
-    // Reset operand stack so we don't leak.
-    __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
-    __ sw(t1, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-
-    // Resume the generator function at the continuation.
-    __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
-    __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
-    __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
-    __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(a2);
-    __ Addu(a3, a3, Operand(a2));
-    __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-    __ Move(v0, a1);  // Continuation expects generator object in v0.
-    __ Jump(a3);
-  }
-
   __ bind(&prepare_step_in_if_stepping);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(a1, a2, t0);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ CallRuntime(Runtime::kDebugOnFunctionCall);
     __ Pop(a1, a2);
   }
   __ Branch(USE_DELAY_SLOT, &stepping_prepared);
@@ -1038,8 +1040,7 @@
   Register debug_info = kInterpreterBytecodeArrayRegister;
   DCHECK(!debug_info.is(a0));
   __ lw(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
-  __ Branch(&load_debug_bytecode_array, ne, debug_info,
-            Operand(DebugInfo::uninitialized()));
+  __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
   __ lw(kInterpreterBytecodeArrayRegister,
         FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
@@ -1051,15 +1052,15 @@
             Operand(masm->CodeObject()));  // Self-reference to this code.
 
   // Increment invocation count for the function.
-  __ lw(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
-  __ lw(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+  __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+  __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
   __ lw(t0, FieldMemOperand(
-                a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                        TypeFeedbackVector::kHeaderSize));
+                a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                        FeedbackVector::kHeaderSize));
   __ Addu(t0, t0, Operand(Smi::FromInt(1)));
   __ sw(t0, FieldMemOperand(
-                a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                        TypeFeedbackVector::kHeaderSize));
+                a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                        FeedbackVector::kHeaderSize));
 
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
@@ -1071,6 +1072,11 @@
               Operand(BYTECODE_ARRAY_TYPE));
   }
 
+  // Reset code age.
+  DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
+  __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                                  BytecodeArray::kBytecodeAgeOffset));
+
   // Load initial bytecode offset.
   __ li(kInterpreterBytecodeOffsetRegister,
         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1187,7 +1193,7 @@
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
-    CallableType function_type) {
+    InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1203,12 +1209,14 @@
   Generate_InterpreterPushArgs(masm, t0, a2, t4, t1, &stack_overflow);
 
   // Call the target.
-  if (function_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
                                                       tail_call_mode),
             RelocInfo::CODE_TARGET);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(function_type, CallableType::kAny);
     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
@@ -1224,7 +1232,7 @@
 
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
-    MacroAssembler* masm, CallableType construct_type) {
+    MacroAssembler* masm, InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   // -- a0 : argument count (not including receiver)
   // -- a3 : new target
@@ -1241,7 +1249,7 @@
   Generate_InterpreterPushArgs(masm, a0, t4, t1, t0, &stack_overflow);
 
   __ AssertUndefinedOrAllocationSite(a2, t0);
-  if (construct_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ AssertFunction(a1);
 
     // Tail call to the function-specific construct stub (still in the caller
@@ -1250,8 +1258,12 @@
     __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
     __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
     __ Jump(at);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    // Call the constructor with a0, a1, and a3 unmodified.
+    __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(construct_type, CallableType::kAny);
+    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
     // Call the constructor with a0, a1, and a3 unmodified.
     __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   }
@@ -1376,18 +1388,24 @@
   Register argument_count = a0;
   Register closure = a1;
   Register new_target = a3;
+  Register map = a0;
+  Register index = a2;
+
+  // Do we have a valid feedback vector?
+  __ lw(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+  __ lw(index, FieldMemOperand(index, Cell::kValueOffset));
+  __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
+                &gotta_call_runtime_no_stack);
+
   __ push(argument_count);
   __ push(new_target);
   __ push(closure);
 
-  Register map = a0;
-  Register index = a2;
   __ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
   __ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
   __ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
-  __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+  __ Branch(&try_shared, lt, index, Operand(Smi::FromInt(2)));
 
-  // Find literals.
   // a3  : native context
   // a2  : length / index
   // a0  : optimized code map
@@ -1407,25 +1425,6 @@
                               SharedFunctionInfo::kOffsetToPreviousContext));
   __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
   __ Branch(&loop_bottom, ne, temp, Operand(native_context));
-  // OSR id set to none?
-  __ lw(temp, FieldMemOperand(array_pointer,
-                              SharedFunctionInfo::kOffsetToPreviousOsrAstId));
-  const int bailout_id = BailoutId::None().ToInt();
-  __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
-  // Literals available?
-  __ lw(temp, FieldMemOperand(array_pointer,
-                              SharedFunctionInfo::kOffsetToPreviousLiterals));
-  __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
-
-  // Save the literals in the closure.
-  __ lw(t0, MemOperand(sp, 0));
-  __ sw(temp, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
-  __ push(index);
-  __ RecordWriteField(t0, JSFunction::kLiteralsOffset, temp, index,
-                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ pop(index);
 
   // Code available?
   Register entry = t0;
@@ -1435,7 +1434,7 @@
   __ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
   __ JumpIfSmi(entry, &try_shared);
 
-  // Found literals and code. Get them into the closure and return.
+  // Found code. Get it into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1470,9 +1469,7 @@
           Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
   __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
 
-  // We found neither literals nor code.
-  __ jmp(&gotta_call_runtime);
-
+  // We found no code.
   __ bind(&try_shared);
   __ pop(closure);
   __ pop(new_target);
@@ -1484,13 +1481,13 @@
   __ And(t1, t1,
          Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
   __ Branch(&gotta_call_runtime_no_stack, ne, t1, Operand(zero_reg));
-  // Is the full code valid?
+
+  // If SFI points to anything other than CompileLazy, install that.
   __ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
-  __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
-  __ And(t1, t1, Operand(Code::KindField::kMask));
-  __ srl(t1, t1, Code::KindField::kShift);
-  __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN));
-  // Yes, install the full code.
+  __ Move(t1, masm->CodeObject());
+  __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
+
+  // Install the SFI's code entry.
   __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
   __ RecordWriteCodeEntryField(closure, entry, t1);
@@ -1605,14 +1602,9 @@
   __ Jump(a0);
 }
 
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
-  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
-  }                                                           \
-  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                              \
+  void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+    GenerateMakeCodeYoungAgainCommon(masm);                               \
   }
 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2128,20 +2120,20 @@
   __ bind(&target_not_constructor);
   {
     __ sw(a1, MemOperand(sp));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 
   // 4c. The new.target is not a constructor, throw an appropriate TypeError.
   __ bind(&new_target_not_constructor);
   {
     __ sw(a3, MemOperand(sp));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 }
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ sll(a0, a0, kSmiTagSize);
-  __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ li(t0, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
   __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
                           kPointerSize));
@@ -2173,7 +2165,8 @@
 
   // Create the list of arguments from the array-like argumentsList.
   {
-    Label create_arguments, create_array, create_runtime, done_create;
+    Label create_arguments, create_array, create_holey_array, create_runtime,
+        done_create;
     __ JumpIfSmi(a0, &create_runtime);
 
     // Load the map of argumentsList into a2.
@@ -2189,8 +2182,7 @@
     __ Branch(&create_arguments, eq, a2, Operand(at));
 
     // Check if argumentsList is a fast JSArray.
-    __ lw(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
-    __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+    __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
     __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
 
     // Ask the runtime to create the list (actually a FixedArray).
@@ -2216,15 +2208,32 @@
     __ mov(a0, t0);
     __ Branch(&done_create);
 
+    // For holey JSArrays we need to check that the array prototype chain
+    // protector is intact and our prototype is the Array.prototype actually.
+    __ bind(&create_holey_array);
+    __ lw(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
+    __ lw(at, ContextMemOperand(t0, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+    __ Branch(&create_runtime, ne, a2, Operand(at));
+    __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
+    __ lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
+    __ Branch(&create_runtime, ne, a2,
+              Operand(Smi::FromInt(Isolate::kProtectorValid)));
+    __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
+    __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+    __ SmiUntag(a2);
+    __ Branch(&done_create);
+
     // Try to create the list from a JSArray object.
     __ bind(&create_array);
-    __ lw(a2, FieldMemOperand(a2, Map::kBitField2Offset));
-    __ DecodeField<Map::ElementsKindBits>(a2);
+    __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(t1);
     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
     STATIC_ASSERT(FAST_ELEMENTS == 2);
-    __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
-    __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+    __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
+    __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
+    __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
     __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
     __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
     __ SmiUntag(a2);
@@ -2259,11 +2268,15 @@
   // Push arguments onto the stack (thisArgument is already on the stack).
   {
     __ mov(t0, zero_reg);
-    Label done, loop;
+    Label done, push, loop;
+    __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
     __ bind(&loop);
     __ Branch(&done, eq, t0, Operand(a2));
     __ Lsa(at, a0, t0, kPointerSizeLog2);
     __ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
+    __ Branch(&push, ne, t1, Operand(at));
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
     __ Push(at);
     __ Addu(t0, t0, Operand(1));
     __ Branch(&loop);
@@ -2282,6 +2295,72 @@
   }
 }
 
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+                                           Handle<Code> code) {
+  // ----------- S t a t e -------------
+  //  -- a1    : the target to call (can be any Object)
+  //  -- a2    : start index (to support rest parameters)
+  //  -- ra    : return address.
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ Branch(&arguments_adaptor, eq, a0,
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+  {
+    __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    __ lw(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a0,
+          FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ mov(a3, fp);
+  }
+  __ Branch(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    // Just get the length from the ArgumentsAdaptorFrame.
+    __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  }
+  __ bind(&arguments_done);
+
+  Label stack_empty, stack_done, stack_overflow;
+  __ SmiUntag(a0);
+  __ Subu(a0, a0, a2);
+  __ Branch(&stack_empty, le, a0, Operand(zero_reg));
+  {
+    // Check for stack overflow.
+    Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+
+    // Forward the arguments from the caller frame.
+    {
+      Label loop;
+      __ mov(a2, a0);
+      __ bind(&loop);
+      {
+        __ Lsa(at, a3, a2, kPointerSizeLog2);
+        __ lw(at, MemOperand(at, 1 * kPointerSize));
+        __ push(at);
+        __ Subu(a2, a2, Operand(1));
+        __ Branch(&loop, ne, a2, Operand(zero_reg));
+      }
+    }
+  }
+  __ Branch(&stack_done);
+  __ bind(&stack_overflow);
+  __ TailCallRuntime(Runtime::kThrowStackOverflow);
+  __ bind(&stack_empty);
+  {
+    // We just pass the receiver, which is already on the stack.
+    __ li(a0, Operand(0));
+  }
+  __ bind(&stack_done);
+
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
 namespace {
 
 // Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2332,7 +2411,7 @@
     __ lw(scratch3,
           MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ Branch(&no_interpreter_frame, ne, scratch3,
-              Operand(Smi::FromInt(StackFrame::STUB)));
+              Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
     __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ bind(&no_interpreter_frame);
   }
@@ -2344,7 +2423,7 @@
   __ lw(scratch3,
         MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&no_arguments_adaptor, ne, scratch3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(fp, scratch2);
@@ -2640,6 +2719,151 @@
   }
 }
 
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+  Register argc = a0;
+  Register constructor = a1;
+  Register new_target = a3;
+
+  Register scratch = t0;
+  Register scratch2 = t1;
+
+  Register spread = a2;
+  Register spread_map = t3;
+
+  Register spread_len = t3;
+
+  Register native_context = t4;
+
+  Label runtime_call, push_args;
+  __ lw(spread, MemOperand(sp, 0));
+  __ JumpIfSmi(spread, &runtime_call);
+  __ lw(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+  __ lw(native_context, NativeContextMemOperand());
+
+  // Check that the spread is an array.
+  __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
+  __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+  // Check that we have the original ArrayPrototype.
+  __ lw(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+  __ lw(scratch2, ContextMemOperand(native_context,
+                                    Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+  __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+  __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ Branch(&runtime_call, ne, scratch,
+            Operand(Smi::FromInt(Isolate::kProtectorValid)));
+
+  // Check that the map of the initial array iterator hasn't changed.
+  __ lw(scratch,
+        ContextMemOperand(native_context,
+                          Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ lw(scratch2,
+        ContextMemOperand(native_context,
+                          Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+  __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  Label no_protector_check;
+  __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+  __ DecodeField<Map::ElementsKindBits>(scratch);
+  __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
+  // For non-FastHoley kinds, we can skip the protector check.
+  __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
+  __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
+  // Check the ArrayProtector cell.
+  __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+  __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ Branch(&runtime_call, ne, scratch,
+            Operand(Smi::FromInt(Isolate::kProtectorValid)));
+
+  __ bind(&no_protector_check);
+  // Load the FixedArray backing store, but use the length from the array.
+  __ lw(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
+  __ SmiUntag(spread_len);
+  __ lw(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+  __ Branch(&push_args);
+
+  __ bind(&runtime_call);
+  {
+    // Call the builtin for the result of the spread.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(argc);
+    __ Push(constructor, new_target, argc, spread);
+    __ CallRuntime(Runtime::kSpreadIterableFixed);
+    __ mov(spread, v0);
+    __ Pop(constructor, new_target, argc);
+    __ SmiUntag(argc);
+  }
+
+  {
+    // Calculate the new nargs including the result of the spread.
+    __ lw(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
+    __ SmiUntag(spread_len);
+
+    __ bind(&push_args);
+    // argc += spread_len - 1. Subtract 1 for the spread itself.
+    __ Addu(argc, argc, spread_len);
+    __ Subu(argc, argc, Operand(1));
+
+    // Pop the spread argument off the stack.
+    __ Pop(scratch);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+    // Make scratch the space we have left. The stack might already be
+    // overflowed here which will cause ip to become negative.
+    __ Subu(scratch, sp, scratch);
+    // Check if the arguments will overflow the stack.
+    __ sll(at, spread_len, kPointerSizeLog2);
+    __ Branch(&done, gt, scratch, Operand(at));  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // Put the evaluated spread onto the stack as additional arguments.
+  {
+    __ mov(scratch, zero_reg);
+    Label done, push, loop;
+    __ bind(&loop);
+    __ Branch(&done, eq, scratch, Operand(spread_len));
+    __ Lsa(scratch2, spread, scratch, kPointerSizeLog2);
+    __ lw(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+    __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+    __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(scratch2);
+    __ Addu(scratch, scratch, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done);
+  }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  // CheckSpreadAndPushToStack will push a3 to save it.
+  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            TailCallMode::kDisallow),
+          RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -2822,6 +3046,19 @@
 }
 
 // static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the constructor to call (can be any Object)
+  //  -- a3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0 : requested object size (untagged)
diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc
index a6abb55..8fcce9f 100644
--- a/src/builtins/mips64/builtins-mips64.cc
+++ b/src/builtins/mips64/builtins-mips64.cc
@@ -139,7 +139,7 @@
   __ LoadRoot(t1, root_index);
   __ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
 
-  Label done_loop, loop;
+  Label done_loop, loop, done;
   __ mov(a3, a0);
   __ bind(&loop);
   {
@@ -195,15 +195,25 @@
     // accumulator value on the left hand side (f0) and the next parameter value
     // on the right hand side (f2).
     // We need to work out which HeapNumber (or smi) the result came from.
-    Label compare_nan;
+    Label compare_nan, ool_min, ool_max;
     __ BranchF(nullptr, &compare_nan, eq, f0, f2);
     __ Move(a4, f0);
     if (kind == MathMaxMinKind::kMin) {
-      __ MinNaNCheck_d(f0, f0, f2);
+      __ Float64Min(f0, f0, f2, &ool_min);
     } else {
       DCHECK(kind == MathMaxMinKind::kMax);
-      __ MaxNaNCheck_d(f0, f0, f2);
+      __ Float64Max(f0, f0, f2, &ool_max);
     }
+    __ jmp(&done);
+
+    __ bind(&ool_min);
+    __ Float64MinOutOfLine(f0, f0, f2);
+    __ jmp(&done);
+
+    __ bind(&ool_max);
+    __ Float64MaxOutOfLine(f0, f0, f2);
+
+    __ bind(&done);
     __ Move(at, f0);
     __ Branch(&loop, eq, a4, Operand(at));
     __ mov(t1, a2);
@@ -329,11 +339,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(t0);
     __ EnterBuiltinFrame(cp, a1, t0);
     __ Push(a0);
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(a0);
     __ LeaveBuiltinFrame(cp, a1, t0);
     __ SmiUntag(t0);
@@ -481,11 +491,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(t0);
     __ EnterBuiltinFrame(cp, a1, t0);
     __ Push(a0);
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(a0);
     __ LeaveBuiltinFrame(cp, a1, t0);
     __ SmiUntag(t0);
@@ -551,6 +561,8 @@
 void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
                                     bool create_implicit_receiver,
                                     bool check_derived_construct) {
+  Label post_instantiation_deopt_entry;
+
   // ----------- S t a t e -------------
   //  -- a0     : number of arguments
   //  -- a1     : constructor function
@@ -572,8 +584,8 @@
 
     if (create_implicit_receiver) {
       __ Push(a1, a3);
-      FastNewObjectStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+              RelocInfo::CODE_TARGET);
       __ mov(t0, v0);
       __ Pop(a1, a3);
 
@@ -595,6 +607,9 @@
       __ PushRoot(Heap::kTheHoleValueRootIndex);
     }
 
+    // Deoptimizer re-enters stub code here.
+    __ bind(&post_instantiation_deopt_entry);
+
     // Set up pointer to last argument.
     __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
 
@@ -628,7 +643,8 @@
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+      masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+          masm->pc_offset());
     }
 
     // Restore context from the frame.
@@ -690,6 +706,35 @@
     __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
   }
   __ Ret();
+
+  // Store offset of trampoline address for deoptimizer. This is the bailout
+  // point after the receiver instantiation but before the function invocation.
+  // We need to restore some registers in order to continue the above code.
+  if (create_implicit_receiver && !is_api_function) {
+    masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+        masm->pc_offset());
+
+    // ----------- S t a t e -------------
+    //  -- a0    : newly allocated object
+    //  -- sp[0] : constructor function
+    // -----------------------------------
+
+    __ Pop(a1);
+    __ Push(a0, a0);
+
+    // Retrieve smi-tagged arguments count from the stack.
+    __ ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+    __ SmiUntag(a0);
+
+    // Retrieve the new target value from the stack. This was placed into the
+    // frame description in place of the receiver by the optimizing compiler.
+    __ Daddu(a3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+    __ Dlsa(a3, a3, a0, kPointerSizeLog2);
+    __ ld(a3, MemOperand(a3));
+
+    // Continue with constructor function invocation.
+    __ jmp(&post_instantiation_deopt_entry);
+  }
 }
 
 }  // namespace
@@ -730,18 +775,17 @@
   __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
 
   // Load suspended function and context.
-  __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
   __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+  __ ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
 
   // Flood function if we are stepping.
   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
   Label stepping_prepared;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(masm->isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  __ li(a5, Operand(last_step_action));
+  ExternalReference debug_hook =
+      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+  __ li(a5, Operand(debug_hook));
   __ lb(a5, MemOperand(a5));
-  __ Branch(&prepare_step_in_if_stepping, ge, a5, Operand(StepIn));
+  __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
 
   // Flood function if we need to continue stepping in the suspended generator.
   ExternalReference debug_suspended_generator =
@@ -781,14 +825,15 @@
     __ bind(&done_loop);
   }
 
-  // Dispatch on the kind of generator object.
-  Label old_generator;
-  __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
-  __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
-  __ GetObjectType(a3, a3, a3);
-  __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+  // Underlying function needs to have bytecode available.
+  if (FLAG_debug_code) {
+    __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+    __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+    __ GetObjectType(a3, a3, a3);
+    __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+  }
 
-  // New-style (ignition/turbofan) generator object.
+  // Resume (Ignition/TurboFan) generator object.
   {
     __ ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
     __ lw(a0,
@@ -802,55 +847,11 @@
     __ Jump(a2);
   }
 
-  // Old-style (full-codegen) generator object
-  __ bind(&old_generator);
-  {
-    // Enter a new JavaScript frame, and initialize its slots as they were when
-    // the generator was suspended.
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ Push(ra, fp);
-    __ Move(fp, sp);
-    __ Push(cp, a4);
-
-    // Restore the operand stack.
-    __ ld(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-    __ ld(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
-    __ SmiUntag(a3);
-    __ Daddu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-    __ Dlsa(a3, a0, a3, kPointerSizeLog2);
-    {
-      Label done_loop, loop;
-      __ bind(&loop);
-      __ Branch(&done_loop, eq, a0, Operand(a3));
-      __ ld(a5, MemOperand(a0));
-      __ Push(a5);
-      __ Branch(USE_DELAY_SLOT, &loop);
-      __ daddiu(a0, a0, kPointerSize);  // In delay slot.
-      __ bind(&done_loop);
-    }
-
-    // Reset operand stack so we don't leak.
-    __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
-    __ sd(a5, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-
-    // Resume the generator function at the continuation.
-    __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
-    __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
-    __ Daddu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
-    __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(a2);
-    __ Daddu(a3, a3, Operand(a2));
-    __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-    __ Move(v0, a1);  // Continuation expects generator object in v0.
-    __ Jump(a3);
-  }
-
   __ bind(&prepare_step_in_if_stepping);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(a1, a2, a4);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ CallRuntime(Runtime::kDebugOnFunctionCall);
     __ Pop(a1, a2);
   }
   __ Branch(USE_DELAY_SLOT, &stepping_prepared);
@@ -1030,8 +1031,7 @@
   Register debug_info = kInterpreterBytecodeArrayRegister;
   DCHECK(!debug_info.is(a0));
   __ ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
-  __ Branch(&load_debug_bytecode_array, ne, debug_info,
-            Operand(DebugInfo::uninitialized()));
+  __ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
   __ ld(kInterpreterBytecodeArrayRegister,
         FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
@@ -1043,15 +1043,15 @@
             Operand(masm->CodeObject()));  // Self-reference to this code.
 
   // Increment invocation count for the function.
-  __ ld(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
-  __ ld(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+  __ ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+  __ ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
   __ ld(a4, FieldMemOperand(
-                a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                        TypeFeedbackVector::kHeaderSize));
+                a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                        FeedbackVector::kHeaderSize));
   __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
   __ sd(a4, FieldMemOperand(
-                a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                        TypeFeedbackVector::kHeaderSize));
+                a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                        FeedbackVector::kHeaderSize));
 
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
@@ -1063,6 +1063,11 @@
               Operand(BYTECODE_ARRAY_TYPE));
   }
 
+  // Reset code age.
+  DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
+  __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                                  BytecodeArray::kBytecodeAgeOffset));
+
   // Load initial bytecode offset.
   __ li(kInterpreterBytecodeOffsetRegister,
         Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1179,7 +1184,7 @@
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
-    CallableType function_type) {
+    InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
   //  -- a2 : the address of the first argument to be pushed. Subsequent
@@ -1195,12 +1200,14 @@
   Generate_InterpreterPushArgs(masm, a3, a2, a4, t0, &stack_overflow);
 
   // Call the target.
-  if (function_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
                                                       tail_call_mode),
             RelocInfo::CODE_TARGET);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(function_type, CallableType::kAny);
     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
@@ -1216,7 +1223,7 @@
 
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
-    MacroAssembler* masm, CallableType construct_type) {
+    MacroAssembler* masm, InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   // -- a0 : argument count (not including receiver)
   // -- a3 : new target
@@ -1233,7 +1240,7 @@
   Generate_InterpreterPushArgs(masm, a0, a4, a5, t0, &stack_overflow);
 
   __ AssertUndefinedOrAllocationSite(a2, t0);
-  if (construct_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ AssertFunction(a1);
 
     // Tail call to the function-specific construct stub (still in the caller
@@ -1242,8 +1249,12 @@
     __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
     __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
     __ Jump(at);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    // Call the constructor with a0, a1, and a3 unmodified.
+    __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(construct_type, CallableType::kAny);
+    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
     // Call the constructor with a0, a1, and a3 unmodified.
     __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   }
@@ -1318,9 +1329,9 @@
   }
 
   // Get the target bytecode offset from the frame.
-  __ ld(kInterpreterBytecodeOffsetRegister,
-        MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
-  __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+  __ lw(
+      kInterpreterBytecodeOffsetRegister,
+      UntagSmiMemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
 
   // Dispatch to the target bytecode.
   __ Daddu(a1, kInterpreterBytecodeArrayRegister,
@@ -1368,18 +1379,24 @@
   Register argument_count = a0;
   Register closure = a1;
   Register new_target = a3;
+  Register map = a0;
+  Register index = a2;
+
+  // Do we have a valid feedback vector?
+  __ ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+  __ ld(index, FieldMemOperand(index, Cell::kValueOffset));
+  __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
+                &gotta_call_runtime_no_stack);
+
   __ push(argument_count);
   __ push(new_target);
   __ push(closure);
 
-  Register map = a0;
-  Register index = a2;
   __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
   __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
   __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
-  __ Branch(&gotta_call_runtime, lt, index, Operand(Smi::FromInt(2)));
+  __ Branch(&try_shared, lt, index, Operand(Smi::FromInt(2)));
 
-  // Find literals.
   // a3  : native context
   // a2  : length / index
   // a0  : optimized code map
@@ -1399,25 +1416,6 @@
                               SharedFunctionInfo::kOffsetToPreviousContext));
   __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
   __ Branch(&loop_bottom, ne, temp, Operand(native_context));
-  // OSR id set to none?
-  __ ld(temp, FieldMemOperand(array_pointer,
-                              SharedFunctionInfo::kOffsetToPreviousOsrAstId));
-  const int bailout_id = BailoutId::None().ToInt();
-  __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
-  // Literals available?
-  __ ld(temp, FieldMemOperand(array_pointer,
-                              SharedFunctionInfo::kOffsetToPreviousLiterals));
-  __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
-
-  // Save the literals in the closure.
-  __ ld(a4, MemOperand(sp, 0));
-  __ sd(temp, FieldMemOperand(a4, JSFunction::kLiteralsOffset));
-  __ push(index);
-  __ RecordWriteField(a4, JSFunction::kLiteralsOffset, temp, index,
-                      kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ pop(index);
 
   // Code available?
   Register entry = a4;
@@ -1427,7 +1425,7 @@
   __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
   __ JumpIfSmi(entry, &try_shared);
 
-  // Found literals and code. Get them into the closure and return.
+  // Found code. Get it into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1462,9 +1460,7 @@
            Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
   __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
 
-  // We found neither literals nor code.
-  __ jmp(&gotta_call_runtime);
-
+  // We found no code.
   __ bind(&try_shared);
   __ pop(closure);
   __ pop(new_target);
@@ -1476,13 +1472,13 @@
   __ And(a5, a5,
          Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
   __ Branch(&gotta_call_runtime_no_stack, ne, a5, Operand(zero_reg));
-  // Is the full code valid?
+
+  // If SFI points to anything other than CompileLazy, install that.
   __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
-  __ lw(a5, FieldMemOperand(entry, Code::kFlagsOffset));
-  __ And(a5, a5, Operand(Code::KindField::kMask));
-  __ dsrl(a5, a5, Code::KindField::kShift);
-  __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN));
-  // Yes, install the full code.
+  __ Move(t1, masm->CodeObject());
+  __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
+
+  // Install the SFI's code entry.
   __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
   __ RecordWriteCodeEntryField(closure, entry, a5);
@@ -1596,14 +1592,9 @@
   __ Jump(a0);
 }
 
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
-  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
-  }                                                           \
-  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                              \
+  void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+    GenerateMakeCodeYoungAgainCommon(masm);                               \
   }
 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1686,8 +1677,7 @@
   }
 
   // Get the full codegen state from the stack and untag it -> a6.
-  __ ld(a6, MemOperand(sp, 0 * kPointerSize));
-  __ SmiUntag(a6);
+  __ lw(a6, UntagSmiMemOperand(sp, 0 * kPointerSize));
   // Switch on the state.
   Label with_tos_register, unknown_state;
   __ Branch(
@@ -1855,10 +1845,10 @@
 
   // Load the OSR entrypoint offset from the deoptimization data.
   // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
-  __ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
-                               DeoptimizationInputData::kOsrPcOffsetIndex) -
-                               kHeapObjectTag));
-  __ SmiUntag(a1);
+  __ lw(a1,
+        UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt(
+                                   DeoptimizationInputData::kOsrPcOffsetIndex) -
+                                   kHeapObjectTag));
 
   // Compute the target address = code_obj + header_size + osr_offset
   // <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1886,52 +1876,56 @@
   //  -- sp[8] : receiver
   // -----------------------------------
 
+  Register argc = a0;
+  Register arg_array = a0;
+  Register receiver = a1;
+  Register this_arg = a2;
+  Register undefined_value = a3;
+  Register scratch = a4;
+
+  __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
   // 1. Load receiver into a1, argArray into a0 (if present), remove all
   // arguments from the stack (including the receiver), and push thisArg (if
   // present) instead.
   {
-    Label no_arg;
-    Register scratch = a4;
-    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    __ mov(a3, a2);
-    // Dlsa() cannot be used hare as scratch value used later.
-    __ dsll(scratch, a0, kPointerSizeLog2);
-    __ Daddu(a0, sp, Operand(scratch));
-    __ ld(a1, MemOperand(a0));  // receiver
-    __ Dsubu(a0, a0, Operand(kPointerSize));
-    __ Branch(&no_arg, lt, a0, Operand(sp));
-    __ ld(a2, MemOperand(a0));  // thisArg
-    __ Dsubu(a0, a0, Operand(kPointerSize));
-    __ Branch(&no_arg, lt, a0, Operand(sp));
-    __ ld(a3, MemOperand(a0));  // argArray
-    __ bind(&no_arg);
-    __ Daddu(sp, sp, Operand(scratch));
-    __ sd(a2, MemOperand(sp));
-    __ mov(a0, a3);
+    // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
+    // consistent state for a simple pop operation.
+
+    __ Dsubu(sp, sp, Operand(2 * kPointerSize));
+    __ Dlsa(sp, sp, argc, kPointerSizeLog2);
+    __ mov(scratch, argc);
+    __ Pop(this_arg, arg_array);                   // Overwrite argc
+    __ Movz(arg_array, undefined_value, scratch);  // if argc == 0
+    __ Movz(this_arg, undefined_value, scratch);   // if argc == 0
+    __ Dsubu(scratch, scratch, Operand(1));
+    __ Movz(arg_array, undefined_value, scratch);  // if argc == 1
+    __ ld(receiver, MemOperand(sp));
+    __ sd(this_arg, MemOperand(sp));
   }
 
   // ----------- S t a t e -------------
   //  -- a0    : argArray
   //  -- a1    : receiver
+  //  -- a3    : undefined root value
   //  -- sp[0] : thisArg
   // -----------------------------------
 
   // 2. Make sure the receiver is actually callable.
   Label receiver_not_callable;
-  __ JumpIfSmi(a1, &receiver_not_callable);
-  __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ JumpIfSmi(receiver, &receiver_not_callable);
+  __ ld(a4, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
   __ And(a4, a4, Operand(1 << Map::kIsCallable));
   __ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
 
   // 3. Tail call with no arguments if argArray is null or undefined.
   Label no_arguments;
-  __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
-  __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+  __ JumpIfRoot(arg_array, Heap::kNullValueRootIndex, &no_arguments);
+  __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
 
   // 4a. Apply the receiver to the given argArray (passing undefined for
   // new.target).
-  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  DCHECK(undefined_value.is(a3));
   __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
 
   // 4b. The argArray is either null or undefined, so we tail call without any
@@ -1939,13 +1933,14 @@
   __ bind(&no_arguments);
   {
     __ mov(a0, zero_reg);
+    DCHECK(receiver.is(a1));
     __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
   }
 
   // 4c. The receiver is not callable, throw an appropriate TypeError.
   __ bind(&receiver_not_callable);
   {
-    __ sd(a1, MemOperand(sp));
+    __ sd(receiver, MemOperand(sp));
     __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
   }
 }
@@ -1995,62 +1990,67 @@
 void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0     : argc
-  //  -- sp[0]  : argumentsList
-  //  -- sp[4]  : thisArgument
-  //  -- sp[8]  : target
+  //  -- sp[0]  : argumentsList  (if argc ==3)
+  //  -- sp[4]  : thisArgument   (if argc >=2)
+  //  -- sp[8]  : target         (if argc >=1)
   //  -- sp[12] : receiver
   // -----------------------------------
 
+  Register argc = a0;
+  Register arguments_list = a0;
+  Register target = a1;
+  Register this_argument = a2;
+  Register undefined_value = a3;
+  Register scratch = a4;
+
+  __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
   // 1. Load target into a1 (if present), argumentsList into a0 (if present),
   // remove all arguments from the stack (including the receiver), and push
   // thisArgument (if present) instead.
   {
-    Label no_arg;
-    Register scratch = a4;
-    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
-    __ mov(a2, a1);
-    __ mov(a3, a1);
-    __ dsll(scratch, a0, kPointerSizeLog2);
-    __ mov(a0, scratch);
-    __ Dsubu(a0, a0, Operand(kPointerSize));
-    __ Branch(&no_arg, lt, a0, Operand(zero_reg));
-    __ Daddu(a0, sp, Operand(a0));
-    __ ld(a1, MemOperand(a0));  // target
-    __ Dsubu(a0, a0, Operand(kPointerSize));
-    __ Branch(&no_arg, lt, a0, Operand(sp));
-    __ ld(a2, MemOperand(a0));  // thisArgument
-    __ Dsubu(a0, a0, Operand(kPointerSize));
-    __ Branch(&no_arg, lt, a0, Operand(sp));
-    __ ld(a3, MemOperand(a0));  // argumentsList
-    __ bind(&no_arg);
-    __ Daddu(sp, sp, Operand(scratch));
-    __ sd(a2, MemOperand(sp));
-    __ mov(a0, a3);
+    // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+    // consistent state for a simple pop operation.
+
+    __ Dsubu(sp, sp, Operand(3 * kPointerSize));
+    __ Dlsa(sp, sp, argc, kPointerSizeLog2);
+    __ mov(scratch, argc);
+    __ Pop(target, this_argument, arguments_list);
+    __ Movz(arguments_list, undefined_value, scratch);  // if argc == 0
+    __ Movz(this_argument, undefined_value, scratch);   // if argc == 0
+    __ Movz(target, undefined_value, scratch);          // if argc == 0
+    __ Dsubu(scratch, scratch, Operand(1));
+    __ Movz(arguments_list, undefined_value, scratch);  // if argc == 1
+    __ Movz(this_argument, undefined_value, scratch);   // if argc == 1
+    __ Dsubu(scratch, scratch, Operand(1));
+    __ Movz(arguments_list, undefined_value, scratch);  // if argc == 2
+
+    __ sd(this_argument, MemOperand(sp, 0));  // Overwrite receiver
   }
 
   // ----------- S t a t e -------------
   //  -- a0    : argumentsList
   //  -- a1    : target
+  //  -- a3    : undefined root value
   //  -- sp[0] : thisArgument
   // -----------------------------------
 
   // 2. Make sure the target is actually callable.
   Label target_not_callable;
-  __ JumpIfSmi(a1, &target_not_callable);
-  __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ JumpIfSmi(target, &target_not_callable);
+  __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
   __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
   __ And(a4, a4, Operand(1 << Map::kIsCallable));
   __ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
 
   // 3a. Apply the target to the given argumentsList (passing undefined for
   // new.target).
-  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  DCHECK(undefined_value.is(a3));
   __ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
 
   // 3b. The target is not callable, throw an appropriate TypeError.
   __ bind(&target_not_callable);
   {
-    __ sd(a1, MemOperand(sp));
+    __ sd(target, MemOperand(sp));
     __ TailCallRuntime(Runtime::kThrowApplyNonFunction);
   }
 }
@@ -2058,59 +2058,61 @@
 void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0     : argc
-  //  -- sp[0]  : new.target (optional)
-  //  -- sp[4]  : argumentsList
-  //  -- sp[8]  : target
+  //  -- sp[0]  : new.target (optional) (dummy value if argc <= 2)
+  //  -- sp[4]  : argumentsList         (dummy value if argc <= 1)
+  //  -- sp[8]  : target                (dummy value if argc == 0)
   //  -- sp[12] : receiver
   // -----------------------------------
+  Register argc = a0;
+  Register arguments_list = a0;
+  Register target = a1;
+  Register new_target = a3;
+  Register undefined_value = a4;
+  Register scratch = a5;
 
   // 1. Load target into a1 (if present), argumentsList into a0 (if present),
   // new.target into a3 (if present, otherwise use target), remove all
   // arguments from the stack (including the receiver), and push thisArgument
   // (if present) instead.
   {
-    Label no_arg;
-    Register scratch = a4;
-    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
-    __ mov(a2, a1);
-    // Dlsa() cannot be used hare as scratch value used later.
-    __ dsll(scratch, a0, kPointerSizeLog2);
-    __ Daddu(a0, sp, Operand(scratch));
-    __ sd(a2, MemOperand(a0));  // receiver
-    __ Dsubu(a0, a0, Operand(kPointerSize));
-    __ Branch(&no_arg, lt, a0, Operand(sp));
-    __ ld(a1, MemOperand(a0));  // target
-    __ mov(a3, a1);             // new.target defaults to target
-    __ Dsubu(a0, a0, Operand(kPointerSize));
-    __ Branch(&no_arg, lt, a0, Operand(sp));
-    __ ld(a2, MemOperand(a0));  // argumentsList
-    __ Dsubu(a0, a0, Operand(kPointerSize));
-    __ Branch(&no_arg, lt, a0, Operand(sp));
-    __ ld(a3, MemOperand(a0));  // new.target
-    __ bind(&no_arg);
-    __ Daddu(sp, sp, Operand(scratch));
-    __ mov(a0, a2);
+    // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+    // consistent state for a simple pop operation.
+
+    __ Dsubu(sp, sp, Operand(3 * kPointerSize));
+    __ Dlsa(sp, sp, argc, kPointerSizeLog2);
+    __ mov(scratch, argc);
+    __ Pop(target, arguments_list, new_target);
+    __ Movz(arguments_list, undefined_value, scratch);  // if argc == 0
+    __ Movz(new_target, undefined_value, scratch);      // if argc == 0
+    __ Movz(target, undefined_value, scratch);          // if argc == 0
+    __ Dsubu(scratch, scratch, Operand(1));
+    __ Movz(arguments_list, undefined_value, scratch);  // if argc == 1
+    __ Movz(new_target, target, scratch);               // if argc == 1
+    __ Dsubu(scratch, scratch, Operand(1));
+    __ Movz(new_target, target, scratch);  // if argc == 2
+
+    __ sd(undefined_value, MemOperand(sp, 0));  // Overwrite receiver
   }
 
   // ----------- S t a t e -------------
   //  -- a0    : argumentsList
-  //  -- a3    : new.target
   //  -- a1    : target
+  //  -- a3    : new.target
   //  -- sp[0] : receiver (undefined)
   // -----------------------------------
 
   // 2. Make sure the target is actually a constructor.
   Label target_not_constructor;
-  __ JumpIfSmi(a1, &target_not_constructor);
-  __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ JumpIfSmi(target, &target_not_constructor);
+  __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
   __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
   __ And(a4, a4, Operand(1 << Map::kIsConstructor));
   __ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
 
   // 3. Make sure the target is actually a constructor.
   Label new_target_not_constructor;
-  __ JumpIfSmi(a3, &new_target_not_constructor);
-  __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ JumpIfSmi(new_target, &new_target_not_constructor);
+  __ ld(a4, FieldMemOperand(new_target, HeapObject::kMapOffset));
   __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
   __ And(a4, a4, Operand(1 << Map::kIsConstructor));
   __ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
@@ -2121,22 +2123,22 @@
   // 4b. The target is not a constructor, throw an appropriate TypeError.
   __ bind(&target_not_constructor);
   {
-    __ sd(a1, MemOperand(sp));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ sd(target, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 
   // 4c. The new.target is not a constructor, throw an appropriate TypeError.
   __ bind(&new_target_not_constructor);
   {
-    __ sd(a3, MemOperand(sp));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ sd(new_target, MemOperand(sp));
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 }
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   // __ sll(a0, a0, kSmiTagSize);
   __ dsll32(a0, a0, 0);
-  __ li(a4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ li(a4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
   __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
                            kPointerSize));
@@ -2167,63 +2169,90 @@
   //  -- sp[0] : thisArgument
   // -----------------------------------
 
+  Register arguments_list = a0;
+  Register target = a1;
+  Register new_target = a3;
+
+  Register args = a0;
+  Register len = a2;
+
   // Create the list of arguments from the array-like argumentsList.
   {
-    Label create_arguments, create_array, create_runtime, done_create;
-    __ JumpIfSmi(a0, &create_runtime);
+    Label create_arguments, create_array, create_holey_array, create_runtime,
+        done_create;
+    __ JumpIfSmi(arguments_list, &create_runtime);
 
     // Load the map of argumentsList into a2.
-    __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+    Register arguments_list_map = a2;
+    __ ld(arguments_list_map,
+          FieldMemOperand(arguments_list, HeapObject::kMapOffset));
 
     // Load native context into a4.
-    __ ld(a4, NativeContextMemOperand());
+    Register native_context = a4;
+    __ ld(native_context, NativeContextMemOperand());
 
     // Check if argumentsList is an (unmodified) arguments object.
-    __ ld(at, ContextMemOperand(a4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
-    __ Branch(&create_arguments, eq, a2, Operand(at));
-    __ ld(at, ContextMemOperand(a4, Context::STRICT_ARGUMENTS_MAP_INDEX));
-    __ Branch(&create_arguments, eq, a2, Operand(at));
+    __ ld(at, ContextMemOperand(native_context,
+                                Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+    __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
+    __ ld(at, ContextMemOperand(native_context,
+                                Context::STRICT_ARGUMENTS_MAP_INDEX));
+    __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
 
     // Check if argumentsList is a fast JSArray.
-    __ ld(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
-    __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+    __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
     __ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
 
     // Ask the runtime to create the list (actually a FixedArray).
     __ bind(&create_runtime);
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(a1, a3, a0);
+      __ Push(target, new_target, arguments_list);
       __ CallRuntime(Runtime::kCreateListFromArrayLike);
-      __ mov(a0, v0);
-      __ Pop(a1, a3);
-      __ ld(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
-      __ SmiUntag(a2);
+      __ mov(arguments_list, v0);
+      __ Pop(target, new_target);
+      __ lw(len, UntagSmiFieldMemOperand(v0, FixedArray::kLengthOffset));
     }
     __ Branch(&done_create);
 
     // Try to create the list from an arguments object.
     __ bind(&create_arguments);
-    __ ld(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
-    __ ld(a4, FieldMemOperand(a0, JSObject::kElementsOffset));
-    __ ld(at, FieldMemOperand(a4, FixedArray::kLengthOffset));
+    __ lw(len, UntagSmiFieldMemOperand(arguments_list,
+                                       JSArgumentsObject::kLengthOffset));
+    __ ld(a4, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
+    __ lw(at, UntagSmiFieldMemOperand(a4, FixedArray::kLengthOffset));
+    __ Branch(&create_runtime, ne, len, Operand(at));
+    __ mov(args, a4);
+
+    __ Branch(&done_create);
+
+    // For holey JSArrays we need to check that the array prototype chain
+    // protector is intact and our prototype is the Array.prototype actually.
+    __ bind(&create_holey_array);
+    __ ld(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
+    __ ld(at, ContextMemOperand(native_context,
+                                Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
     __ Branch(&create_runtime, ne, a2, Operand(at));
-    __ SmiUntag(a2);
-    __ mov(a0, a4);
+    __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
+    __ lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
+    __ Branch(&create_runtime, ne, a2,
+              Operand(Smi::FromInt(Isolate::kProtectorValid)));
+    __ lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
+    __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
     __ Branch(&done_create);
 
     // Try to create the list from a JSArray object.
     __ bind(&create_array);
-    __ ld(a2, FieldMemOperand(a2, Map::kBitField2Offset));
-    __ DecodeField<Map::ElementsKindBits>(a2);
+    __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(t1);
     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
     STATIC_ASSERT(FAST_ELEMENTS == 2);
-    __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
-    __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
-    __ ld(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
-    __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
-    __ SmiUntag(a2);
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+    __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
+    __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
+    __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
+    __ lw(a2, UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+    __ ld(a0, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
 
     __ bind(&done_create);
   }
@@ -2238,7 +2267,7 @@
     // here which will cause ip to become negative.
     __ Dsubu(a4, sp, a4);
     // Check if the arguments will overflow the stack.
-    __ dsll(at, a2, kPointerSizeLog2);
+    __ dsll(at, len, kPointerSizeLog2);
     __ Branch(&done, gt, a4, Operand(at));  // Signed comparison.
     __ TailCallRuntime(Runtime::kThrowStackOverflow);
     __ bind(&done);
@@ -2254,19 +2283,38 @@
 
   // Push arguments onto the stack (thisArgument is already on the stack).
   {
-    __ mov(a4, zero_reg);
-    Label done, loop;
+    Label done, push, loop;
+    Register src = a4;
+    Register scratch = len;
+
+    __ daddiu(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+    __ Branch(&done, eq, len, Operand(zero_reg), i::USE_DELAY_SLOT);
+    __ mov(a0, len);  // The 'len' argument for Call() or Construct().
+    __ dsll(scratch, len, kPointerSizeLog2);
+    __ Dsubu(scratch, sp, Operand(scratch));
+    __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
     __ bind(&loop);
-    __ Branch(&done, eq, a4, Operand(a2));
-    __ Dlsa(at, a0, a4, kPointerSizeLog2);
-    __ ld(at, FieldMemOperand(at, FixedArray::kHeaderSize));
-    __ Push(at);
-    __ Daddu(a4, a4, Operand(1));
-    __ Branch(&loop);
+    __ ld(a5, MemOperand(src));
+    __ Branch(&push, ne, a5, Operand(t1));
+    __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ daddiu(src, src, kPointerSize);
+    __ Push(a5);
+    __ Branch(&loop, ne, scratch, Operand(sp));
     __ bind(&done);
-    __ Move(a0, a4);
   }
 
+  // ----------- S t a t e -------------
+  //  -- a0             : argument count (len)
+  //  -- a1             : target
+  //  -- a3             : new.target (checked to be constructor or undefinded)
+  //  -- sp[0]          : args[len-1]
+  //  -- sp[8]          : args[len-2]
+  //     ...            : ...
+  //  -- sp[8*(len-2)]  : args[1]
+  //  -- sp[8*(len-1)]  : args[0]
+  //  ----------------------------------
+
   // Dispatch to Call or Construct depending on whether new.target is undefined.
   {
     Label construct;
@@ -2278,6 +2326,72 @@
   }
 }
 
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+                                           Handle<Code> code) {
+  // ----------- S t a t e -------------
+  //  -- a1    : the target to call (can be any Object)
+  //  -- a2    : start index (to support rest parameters)
+  //  -- ra    : return address.
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ Branch(&arguments_adaptor, eq, a0,
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+  {
+    __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    __ ld(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(a0,
+          FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ mov(a3, fp);
+  }
+  __ Branch(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    // Just get the length from the ArgumentsAdaptorFrame.
+    __ lw(a0, UntagSmiMemOperand(
+                  a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  }
+  __ bind(&arguments_done);
+
+  Label stack_empty, stack_done, stack_overflow;
+  __ Subu(a0, a0, a2);
+  __ Branch(&stack_empty, le, a0, Operand(zero_reg));
+  {
+    // Check for stack overflow.
+    Generate_StackOverflowCheck(masm, a0, a4, a5, &stack_overflow);
+
+    // Forward the arguments from the caller frame.
+    {
+      Label loop;
+      __ mov(a2, a0);
+      __ bind(&loop);
+      {
+        __ Dlsa(at, a3, a2, kPointerSizeLog2);
+        __ ld(at, MemOperand(at, 1 * kPointerSize));
+        __ push(at);
+        __ Subu(a2, a2, Operand(1));
+        __ Branch(&loop, ne, a2, Operand(zero_reg));
+      }
+    }
+  }
+  __ Branch(&stack_done);
+  __ bind(&stack_overflow);
+  __ TailCallRuntime(Runtime::kThrowStackOverflow);
+  __ bind(&stack_empty);
+  {
+    // We just pass the receiver, which is already on the stack.
+    __ mov(a0, zero_reg);
+  }
+  __ bind(&stack_done);
+
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
 namespace {
 
 // Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2328,7 +2442,7 @@
     __ ld(scratch3,
           MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ Branch(&no_interpreter_frame, ne, scratch3,
-              Operand(Smi::FromInt(StackFrame::STUB)));
+              Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
     __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ bind(&no_interpreter_frame);
   }
@@ -2340,13 +2454,12 @@
   __ ld(scratch3,
         MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
   __ Branch(&no_arguments_adaptor, ne, scratch3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(fp, scratch2);
-  __ ld(caller_args_count_reg,
-        MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiUntag(caller_args_count_reg);
+  __ lw(caller_args_count_reg,
+        UntagSmiMemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
   __ Branch(&formal_parameter_count_loaded);
 
   __ bind(&no_arguments_adaptor);
@@ -2503,8 +2616,7 @@
 
   // Load [[BoundArguments]] into a2 and length of that into a4.
   __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
-  __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
-  __ SmiUntag(a4);
+  __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
 
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
@@ -2551,8 +2663,7 @@
   // Copy [[BoundArguments]] to the stack (below the arguments).
   {
     Label loop, done_loop;
-    __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
-    __ SmiUntag(a4);
+    __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
     __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
     __ bind(&loop);
     __ Dsubu(a4, a4, Operand(1));
@@ -2634,6 +2745,150 @@
   }
 }
 
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+  Register argc = a0;
+  Register constructor = a1;
+  Register new_target = a3;
+
+  Register scratch = t0;
+  Register scratch2 = t1;
+
+  Register spread = a2;
+  Register spread_map = a4;
+
+  Register spread_len = a4;
+
+  Register native_context = a5;
+
+  Label runtime_call, push_args;
+  __ ld(spread, MemOperand(sp, 0));
+  __ JumpIfSmi(spread, &runtime_call);
+  __ ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+  __ ld(native_context, NativeContextMemOperand());
+
+  // Check that the spread is an array.
+  __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
+  __ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+  // Check that we have the original ArrayPrototype.
+  __ ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+  __ ld(scratch2, ContextMemOperand(native_context,
+                                    Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+  __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+  __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ Branch(&runtime_call, ne, scratch,
+            Operand(Smi::FromInt(Isolate::kProtectorValid)));
+
+  // Check that the map of the initial array iterator hasn't changed.
+  __ ld(scratch,
+        ContextMemOperand(native_context,
+                          Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+  __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ ld(scratch2,
+        ContextMemOperand(native_context,
+                          Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+  __ Branch(&runtime_call, ne, scratch, Operand(scratch2));
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  Label no_protector_check;
+  __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+  __ DecodeField<Map::ElementsKindBits>(scratch);
+  __ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
+  // For non-FastHoley kinds, we can skip the protector check.
+  __ Branch(&no_protector_check, eq, scratch, Operand(FAST_SMI_ELEMENTS));
+  __ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
+  // Check the ArrayProtector cell.
+  __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+  __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ Branch(&runtime_call, ne, scratch,
+            Operand(Smi::FromInt(Isolate::kProtectorValid)));
+
+  __ bind(&no_protector_check);
+  // Load the FixedArray backing store, but use the length from the array.
+  __ lw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
+  __ ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+  __ Branch(&push_args);
+
+  __ bind(&runtime_call);
+  {
+    // Call the builtin for the result of the spread.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(argc);
+    __ Push(constructor, new_target, argc, spread);
+    __ CallRuntime(Runtime::kSpreadIterableFixed);
+    __ mov(spread, v0);
+    __ Pop(constructor, new_target, argc);
+    __ SmiUntag(argc);
+  }
+
+  {
+    // Calculate the new nargs including the result of the spread.
+    __ lw(spread_len,
+          UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
+
+    __ bind(&push_args);
+    // argc += spread_len - 1. Subtract 1 for the spread itself.
+    __ Daddu(argc, argc, spread_len);
+    __ Dsubu(argc, argc, Operand(1));
+
+    // Pop the spread argument off the stack.
+    __ Pop(scratch);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+    // Make scratch the space we have left. The stack might already be
+    // overflowed here which will cause ip to become negative.
+    __ Dsubu(scratch, sp, scratch);
+    // Check if the arguments will overflow the stack.
+    __ dsll(at, spread_len, kPointerSizeLog2);
+    __ Branch(&done, gt, scratch, Operand(at));  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // Put the evaluated spread onto the stack as additional arguments.
+  {
+    __ mov(scratch, zero_reg);
+    Label done, push, loop;
+    __ bind(&loop);
+    __ Branch(&done, eq, scratch, Operand(spread_len));
+    __ Dlsa(scratch2, spread, scratch, kPointerSizeLog2);
+    __ ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+    __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+    __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(scratch2);
+    __ Daddu(scratch, scratch, Operand(1));
+    __ Branch(&loop);
+    __ bind(&done);
+  }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the target to call (can be any Object).
+  // -----------------------------------
+
+  // CheckSpreadAndPushToStack will push a3 to save it.
+  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            TailCallMode::kDisallow),
+          RelocInfo::CODE_TARGET);
+}
+
 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
@@ -2665,8 +2920,7 @@
 
   // Load [[BoundArguments]] into a2 and length of that into a4.
   __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
-  __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
-  __ SmiUntag(a4);
+  __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
 
   // ----------- S t a t e -------------
   //  -- a0 : the number of arguments (not including the receiver)
@@ -2714,8 +2968,7 @@
   // Copy [[BoundArguments]] to the stack (below the arguments).
   {
     Label loop, done_loop;
-    __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
-    __ SmiUntag(a4);
+    __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
     __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
     __ bind(&loop);
     __ Dsubu(a4, a4, Operand(1));
@@ -2815,6 +3068,19 @@
 }
 
 // static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : the number of arguments (not including the receiver)
+  //  -- a1 : the constructor to call (can be any Object)
+  //  -- a3 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- a0 : requested object size (untagged)
diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc
index be1e67c..be07f74 100644
--- a/src/builtins/ppc/builtins-ppc.cc
+++ b/src/builtins/ppc/builtins-ppc.cc
@@ -338,8 +338,8 @@
     __ SmiTag(r9);
     __ EnterBuiltinFrame(cp, r4, r9);
     __ Push(r5);  // first argument
-    FastNewObjectStub stub(masm->isolate());
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(r5);
     __ LeaveBuiltinFrame(cp, r4, r9);
     __ SmiUntag(r9);
@@ -490,8 +490,8 @@
     __ SmiTag(r9);
     __ EnterBuiltinFrame(cp, r4, r9);
     __ Push(r5);  // first argument
-    FastNewObjectStub stub(masm->isolate());
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(r5);
     __ LeaveBuiltinFrame(cp, r4, r9);
     __ SmiUntag(r9);
@@ -560,6 +560,7 @@
 void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
                                     bool create_implicit_receiver,
                                     bool check_derived_construct) {
+  Label post_instantiation_deopt_entry;
   // ----------- S t a t e -------------
   //  -- r3     : number of arguments
   //  -- r4     : constructor function
@@ -587,8 +588,8 @@
 
       // Allocate the new receiver object.
       __ Push(r4, r6);
-      FastNewObjectStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+              RelocInfo::CODE_TARGET);
       __ mr(r7, r3);
       __ Pop(r4, r6);
 
@@ -608,6 +609,9 @@
       __ Push(r7, r7);
     }
 
+    // Deoptimizer re-enters stub code here.
+    __ bind(&post_instantiation_deopt_entry);
+
     // Set up pointer to last argument.
     __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
 
@@ -636,14 +640,17 @@
     // r3: number of arguments
     // r4: constructor function
     // r6: new target
-
-    ParameterCount actual(r3);
-    __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
-                      CheckDebugStepCallWrapper());
+    {
+      ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+      ParameterCount actual(r3);
+      __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
+                        CheckDebugStepCallWrapper());
+    }
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+      masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+          masm->pc_offset());
     }
 
     // Restore context from the frame.
@@ -708,6 +715,34 @@
     __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
   }
   __ blr();
+  // Store offset of trampoline address for deoptimizer. This is the bailout
+  // point after the receiver instantiation but before the function invocation.
+  // We need to restore some registers in order to continue the above code.
+  if (create_implicit_receiver && !is_api_function) {
+    masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+        masm->pc_offset());
+
+    // ----------- S t a t e -------------
+    //  -- r3    : newly allocated object
+    //  -- sp[0] : constructor function
+    // -----------------------------------
+
+    __ pop(r4);
+    __ Push(r3, r3);
+
+    // Retrieve smi-tagged arguments count from the stack.
+    __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+    __ SmiUntag(r3);
+
+    // Retrieve the new target value from the stack. This was placed into the
+    // frame description in place of the receiver by the optimizing compiler.
+    __ addi(r6, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+    __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+    __ LoadPX(r6, MemOperand(r6, ip));
+
+    // Continue with constructor function invocation.
+    __ b(&post_instantiation_deopt_entry);
+  }
 }
 
 }  // namespace
@@ -749,20 +784,19 @@
   __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kResumeModeOffset), r0);
 
   // Load suspended function and context.
-  __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
   __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+  __ LoadP(cp, FieldMemOperand(r7, JSFunction::kContextOffset));
 
   // Flood function if we are stepping.
   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
   Label stepping_prepared;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(masm->isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  __ mov(ip, Operand(last_step_action));
+  ExternalReference debug_hook =
+      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+  __ mov(ip, Operand(debug_hook));
   __ LoadByte(ip, MemOperand(ip), r0);
   __ extsb(ip, ip);
-  __ cmpi(ip, Operand(StepIn));
-  __ bge(&prepare_step_in_if_stepping);
+  __ CmpSmiLiteral(ip, Smi::kZero, r0);
+  __ bne(&prepare_step_in_if_stepping);
 
   // Flood function if we need to continue stepping in the suspended generator.
 
@@ -812,13 +846,14 @@
     __ bind(&done_loop);
   }
 
-  // Dispatch on the kind of generator object.
-  Label old_generator;
-  __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
-  __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
-  __ bne(&old_generator);
+  // Underlying function needs to have bytecode available.
+  if (FLAG_debug_code) {
+    __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+    __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kMissingBytecodeArray);
+  }
 
-  // New-style (ignition/turbofan) generator object
+  // Resume (Ignition/TurboFan) generator object.
   {
     // We abuse new.target both to indicate that this is a resume call and to
     // pass in the generator object.  In ordinary calls, new.target is always
@@ -829,62 +864,11 @@
     __ JumpToJSEntry(ip);
   }
 
-  // Old-style (full-codegen) generator object
-  __ bind(&old_generator);
-  {
-    // Enter a new JavaScript frame, and initialize its slots as they were when
-    // the generator was suspended.
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ PushStandardFrame(r7);
-
-    // Restore the operand stack.
-    __ LoadP(r3, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
-    __ LoadP(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
-    __ addi(r3, r3,
-            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-    {
-      Label loop, done_loop;
-      __ SmiUntag(r6, SetRC);
-      __ beq(&done_loop, cr0);
-      __ mtctr(r6);
-      __ bind(&loop);
-      __ LoadPU(ip, MemOperand(r3, kPointerSize));
-      __ Push(ip);
-      __ bdnz(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Reset operand stack so we don't leak.
-    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
-    __ StoreP(ip, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset),
-              r0);
-
-    // Resume the generator function at the continuation.
-    __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
-    __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
-    __ addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
-    {
-      ConstantPoolUnavailableScope constant_pool_unavailable(masm);
-      if (FLAG_enable_embedded_constant_pool) {
-        __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r6);
-      }
-      __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
-      __ SmiUntag(r5);
-      __ add(r6, r6, r5);
-      __ LoadSmiLiteral(r5,
-                        Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
-      __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
-                r0);
-      __ mr(r3, r4);  // Continuation expects generator object in r3.
-      __ Jump(r6);
-    }
-  }
-
   __ bind(&prepare_step_in_if_stepping);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
     __ Push(r4, r5, r7);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ CallRuntime(Runtime::kDebugOnFunctionCall);
     __ Pop(r4, r5);
     __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
   }
@@ -1070,8 +1054,8 @@
   // Load original bytecode array or the debug copy.
   __ LoadP(kInterpreterBytecodeArrayRegister,
            FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
-  __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
-  __ beq(&array_done);
+  __ TestIfSmi(debug_info, r0);
+  __ beq(&array_done, cr0);
   __ LoadP(kInterpreterBytecodeArrayRegister,
            FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
   __ bind(&array_done);
@@ -1084,27 +1068,33 @@
   __ bne(&switch_to_different_code_kind);
 
   // Increment invocation count for the function.
-  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
-  __ LoadP(r7, FieldMemOperand(r7, LiteralsArray::kFeedbackVectorOffset));
-  __ LoadP(r8, FieldMemOperand(r7, TypeFeedbackVector::kInvocationCountIndex *
-                                           kPointerSize +
-                                       TypeFeedbackVector::kHeaderSize));
+  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
+  __ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset));
+  __ LoadP(r8, FieldMemOperand(
+                   r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                           FeedbackVector::kHeaderSize));
   __ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
-  __ StoreP(r8, FieldMemOperand(r7, TypeFeedbackVector::kInvocationCountIndex *
-                                            kPointerSize +
-                                        TypeFeedbackVector::kHeaderSize),
+  __ StoreP(r8, FieldMemOperand(
+                    r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                            FeedbackVector::kHeaderSize),
             r0);
 
   // Check function data field is actually a BytecodeArray object.
 
   if (FLAG_debug_code) {
     __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
-    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
     __ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
                          BYTECODE_ARRAY_TYPE);
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Reset code age.
+  __ mov(r8, Operand(BytecodeArray::kNoAgeBytecodeAge));
+  __ StoreByte(r8, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                                   BytecodeArray::kBytecodeAgeOffset),
+               r0);
+
   // Load initial bytecode offset.
   __ mov(kInterpreterBytecodeOffsetRegister,
          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1208,7 +1198,7 @@
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
-    CallableType function_type) {
+    InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- r3 : the number of arguments (not including the receiver)
   //  -- r5 : the address of the first argument to be pushed. Subsequent
@@ -1225,12 +1215,14 @@
   Generate_InterpreterPushArgs(masm, r6, r5, r6, r7, &stack_overflow);
 
   // Call the target.
-  if (function_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
                                                       tail_call_mode),
             RelocInfo::CODE_TARGET);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(function_type, CallableType::kAny);
     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
@@ -1246,7 +1238,7 @@
 
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
-    MacroAssembler* masm, CallableType construct_type) {
+    MacroAssembler* masm, InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   // -- r3 : argument count (not including receiver)
   // -- r6 : new target
@@ -1269,7 +1261,7 @@
   __ bind(&skip);
 
   __ AssertUndefinedOrAllocationSite(r5, r8);
-  if (construct_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ AssertFunction(r4);
 
     // Tail call to the function-specific construct stub (still in the caller
@@ -1279,9 +1271,12 @@
     // Jump to the construct function.
     __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
     __ Jump(ip);
-
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    // Call the constructor with r3, r4, and r6 unmodified.
+    __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(construct_type, CallableType::kAny);
+    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
     // Call the constructor with r3, r4, and r6 unmodified.
     __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   }
@@ -1347,7 +1342,7 @@
   if (FLAG_debug_code) {
     // Check function data field is actually a BytecodeArray object.
     __ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
-    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+    __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
     __ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
                          BYTECODE_ARRAY_TYPE);
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
@@ -1405,15 +1400,20 @@
   Register closure = r4;
   Register map = r9;
   Register index = r5;
+
+  // Do we have a valid feedback vector?
+  __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+  __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
+  __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+
   __ LoadP(map,
            FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
   __ LoadP(map,
            FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
   __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
   __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
-  __ blt(&gotta_call_runtime);
+  __ blt(&try_shared);
 
-  // Find literals.
   // r10 : native context
   // r5  : length / index
   // r9  : optimized code map
@@ -1434,25 +1434,6 @@
   __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
   __ cmp(temp, native_context);
   __ bne(&loop_bottom);
-  // OSR id set to none?
-  __ LoadP(temp,
-           FieldMemOperand(array_pointer,
-                           SharedFunctionInfo::kOffsetToPreviousOsrAstId));
-  const int bailout_id = BailoutId::None().ToInt();
-  __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
-  __ bne(&loop_bottom);
-  // Literals available?
-  __ LoadP(temp,
-           FieldMemOperand(array_pointer,
-                           SharedFunctionInfo::kOffsetToPreviousLiterals));
-  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
-
-  // Save the literals in the closure.
-  __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
-  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r7,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
 
   // Code available?
   Register entry = r7;
@@ -1462,7 +1443,7 @@
   __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
   __ JumpIfSmi(entry, &try_shared);
 
-  // Found literals and code. Get them into the closure and return.
+  // Found code. Get it into the closure and return.
   // Store code entry in the closure.
   __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
@@ -1496,7 +1477,7 @@
   __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
   __ bgt(&loop_top);
 
-  // We found neither literals nor code.
+  // We found no code.
   __ b(&gotta_call_runtime);
 
   __ bind(&try_shared);
@@ -1507,13 +1488,14 @@
                              SharedFunctionInfo::kMarkedForTierUpByteOffset));
   __ TestBit(r8, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
   __ bne(&gotta_call_runtime, cr0);
-  // Is the full code valid?
+
+  // If SFI points to anything other than CompileLazy, install that.
   __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
-  __ lwz(r8, FieldMemOperand(entry, Code::kFlagsOffset));
-  __ DecodeField<Code::KindField>(r8);
-  __ cmpi(r8, Operand(Code::BUILTIN));
+  __ mov(r8, Operand(masm->CodeObject()));
+  __ cmp(entry, r8);
   __ beq(&gotta_call_runtime);
-  // Yes, install the full code.
+
+  // Install the SFI's code entry.
   __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
   __ RecordWriteCodeEntryField(closure, entry, r8);
@@ -1627,14 +1609,9 @@
   __ Jump(ip);
 }
 
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
-  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
-  }                                                           \
-  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                              \
+  void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+    GenerateMakeCodeYoungAgainCommon(masm);                               \
   }
 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2169,20 +2146,20 @@
   __ bind(&target_not_constructor);
   {
     __ StoreP(r4, MemOperand(sp, 0));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 
   // 4c. The new.target is not a constructor, throw an appropriate TypeError.
   __ bind(&new_target_not_constructor);
   {
     __ StoreP(r6, MemOperand(sp, 0));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 }
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ SmiTag(r3);
-  __ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ mov(r7, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ mflr(r0);
   __ push(r0);
   if (FLAG_enable_embedded_constant_pool) {
@@ -2219,7 +2196,8 @@
 
   // Create the list of arguments from the array-like argumentsList.
   {
-    Label create_arguments, create_array, create_runtime, done_create;
+    Label create_arguments, create_array, create_holey_array, create_runtime,
+        done_create;
     __ JumpIfSmi(r3, &create_runtime);
 
     // Load the map of argumentsList into r5.
@@ -2263,17 +2241,37 @@
     __ mr(r3, r7);
     __ b(&done_create);
 
+    // For holey JSArrays we need to check that the array prototype chain
+    // protector is intact and our prototype is the Array.prototype actually.
+    __ bind(&create_holey_array);
+    __ LoadP(r5, FieldMemOperand(r5, Map::kPrototypeOffset));
+    __ LoadP(r7, ContextMemOperand(r7, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+    __ cmp(r5, r7);
+    __ bne(&create_runtime);
+    __ LoadRoot(r7, Heap::kArrayProtectorRootIndex);
+    __ LoadP(r5, FieldMemOperand(r7, PropertyCell::kValueOffset));
+    __ CmpSmiLiteral(r5, Smi::FromInt(Isolate::kProtectorValid), r0);
+    __ bne(&create_runtime);
+    __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
+    __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+    __ SmiUntag(r5);
+    __ b(&done_create);
+
     // Try to create the list from a JSArray object.
+    // -- r5 and r7 must be preserved till bne create_holey_array.
     __ bind(&create_array);
-    __ lbz(r5, FieldMemOperand(r5, Map::kBitField2Offset));
-    __ DecodeField<Map::ElementsKindBits>(r5);
+    __ lbz(r8, FieldMemOperand(r5, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(r8);
     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
     STATIC_ASSERT(FAST_ELEMENTS == 2);
-    __ cmpi(r5, Operand(FAST_ELEMENTS));
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+    __ cmpi(r8, Operand(FAST_HOLEY_ELEMENTS));
     __ bgt(&create_runtime);
-    __ cmpi(r5, Operand(FAST_HOLEY_SMI_ELEMENTS));
-    __ beq(&create_runtime);
+    // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+    __ TestBit(r8, Map::kHasNonInstancePrototype, r0);
+    __ bne(&create_holey_array, cr0);
+    // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
     __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
     __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
     __ SmiUntag(r5);
@@ -2308,15 +2306,20 @@
 
   // Push arguments onto the stack (thisArgument is already on the stack).
   {
-    Label loop, no_args;
+    __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+    Label loop, no_args, skip;
     __ cmpi(r5, Operand::Zero());
     __ beq(&no_args);
     __ addi(r3, r3,
             Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
     __ mtctr(r5);
     __ bind(&loop);
-    __ LoadPU(r0, MemOperand(r3, kPointerSize));
-    __ push(r0);
+    __ LoadPU(ip, MemOperand(r3, kPointerSize));
+    __ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ bne(&skip);
+    __ mr(ip, r9);
+    __ bind(&skip);
+    __ push(ip);
     __ bdnz(&loop);
     __ bind(&no_args);
     __ mr(r3, r5);
@@ -2330,6 +2333,76 @@
   }
 }
 
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+                                           Handle<Code> code) {
+  // ----------- S t a t e -------------
+  //  -- r4    : the target to call (can be any Object)
+  //  -- r5    : start index (to support rest parameters)
+  //  -- lr    : return address.
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ cmpi(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ beq(&arguments_adaptor);
+  {
+    __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadWordArith(
+        r3,
+        FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ mr(r6, fp);
+  }
+  __ b(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    // Load the length from the ArgumentsAdaptorFrame.
+    __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  }
+  __ bind(&arguments_done);
+
+  Label stack_empty, stack_done, stack_overflow;
+  __ SmiUntag(r3);
+  __ sub(r3, r3, r5);
+  __ cmpi(r3, Operand::Zero());
+  __ ble(&stack_empty);
+  {
+    // Check for stack overflow.
+    Generate_StackOverflowCheck(masm, r3, r5, &stack_overflow);
+
+    // Forward the arguments from the caller frame.
+    {
+      Label loop;
+      __ addi(r6, r6, Operand(kPointerSize));
+      __ mr(r5, r3);
+      __ bind(&loop);
+      {
+        __ ShiftLeftImm(ip, r5, Operand(kPointerSizeLog2));
+        __ LoadPX(ip, MemOperand(r6, ip));
+        __ push(ip);
+        __ subi(r5, r5, Operand(1));
+        __ cmpi(r5, Operand::Zero());
+        __ bne(&loop);
+      }
+    }
+  }
+  __ b(&stack_done);
+  __ bind(&stack_overflow);
+  __ TailCallRuntime(Runtime::kThrowStackOverflow);
+  __ bind(&stack_empty);
+  {
+    // We just pass the receiver, which is already on the stack.
+    __ mov(r3, Operand::Zero());
+  }
+  __ bind(&stack_done);
+
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
 namespace {
 
 // Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2380,7 +2453,7 @@
     Label no_interpreter_frame;
     __ LoadP(scratch3,
              MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+    __ cmpi(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
     __ bne(&no_interpreter_frame);
     __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ bind(&no_interpreter_frame);
@@ -2393,7 +2466,8 @@
   __ LoadP(
       scratch3,
       MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ cmpi(scratch3,
+          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ bne(&no_arguments_adaptor);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -2715,6 +2789,156 @@
   }
 }
 
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+  Register argc = r3;
+  Register constructor = r4;
+  Register new_target = r6;
+
+  Register scratch = r5;
+  Register scratch2 = r9;
+
+  Register spread = r7;
+  Register spread_map = r8;
+  Register spread_len = r8;
+  Label runtime_call, push_args;
+  __ LoadP(spread, MemOperand(sp, 0));
+  __ JumpIfSmi(spread, &runtime_call);
+  __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+
+  // Check that the spread is an array.
+  __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
+  __ bne(&runtime_call);
+
+  // Check that we have the original ArrayPrototype.
+  __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+  __ LoadP(scratch2, NativeContextMemOperand());
+  __ LoadP(scratch2,
+           ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+  __ cmp(scratch, scratch2);
+  __ bne(&runtime_call);
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+  __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
+  __ bne(&runtime_call);
+
+  // Check that the map of the initial array iterator hasn't changed.
+  __ LoadP(scratch2, NativeContextMemOperand());
+  __ LoadP(scratch,
+           ContextMemOperand(scratch2,
+                             Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+  __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ LoadP(scratch2,
+           ContextMemOperand(
+               scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+  __ cmp(scratch, scratch2);
+  __ bne(&runtime_call);
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  Label no_protector_check;
+  __ lbz(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+  __ DecodeField<Map::ElementsKindBits>(scratch);
+  __ cmpi(scratch, Operand(FAST_HOLEY_ELEMENTS));
+  __ bgt(&runtime_call);
+  // For non-FastHoley kinds, we can skip the protector check.
+  __ cmpi(scratch, Operand(FAST_SMI_ELEMENTS));
+  __ beq(&no_protector_check);
+  __ cmpi(scratch, Operand(FAST_ELEMENTS));
+  __ beq(&no_protector_check);
+  // Check the ArrayProtector cell.
+  __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+  __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
+  __ bne(&runtime_call);
+
+  __ bind(&no_protector_check);
+  // Load the FixedArray backing store, but use the length from the array.
+  __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
+  __ SmiUntag(spread_len);
+  __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+  __ b(&push_args);
+
+  __ bind(&runtime_call);
+  {
+    // Call the builtin for the result of the spread.
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(argc);
+    __ Push(constructor, new_target, argc, spread);
+    __ CallRuntime(Runtime::kSpreadIterableFixed);
+    __ mr(spread, r3);
+    __ Pop(constructor, new_target, argc);
+    __ SmiUntag(argc);
+  }
+
+  {
+    // Calculate the new nargs including the result of the spread.
+    __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
+    __ SmiUntag(spread_len);
+
+    __ bind(&push_args);
+    // argc += spread_len - 1. Subtract 1 for the spread itself.
+    __ add(argc, argc, spread_len);
+    __ subi(argc, argc, Operand(1));
+
+    // Pop the spread argument off the stack.
+    __ Pop(scratch);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+    // Make scratch the space we have left. The stack might already be
+    // overflowed here which will cause scratch to become negative.
+    __ sub(scratch, sp, scratch);
+    // Check if the arguments will overflow the stack.
+    __ ShiftLeftImm(r0, spread_len, Operand(kPointerSizeLog2));
+    __ cmp(scratch, r0);
+    __ bgt(&done);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // Put the evaluated spread onto the stack as additional arguments.
+  {
+    __ li(scratch, Operand::Zero());
+    Label done, push, loop;
+    __ bind(&loop);
+    __ cmp(scratch, spread_len);
+    __ beq(&done);
+    __ ShiftLeftImm(r0, scratch, Operand(kPointerSizeLog2));
+    __ add(scratch2, spread, r0);
+    __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+    __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+    __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(scratch2);
+    __ addi(scratch, scratch, Operand(1));
+    __ b(&loop);
+    __ bind(&done);
+  }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  // CheckSpreadAndPushToStack will push r6 to save it.
+  __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            TailCallMode::kDisallow),
+          RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -2835,6 +3059,18 @@
           RelocInfo::CODE_TARGET);
 }
 
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : the number of arguments (not including the receiver)
+  //  -- r4 : the constructor to call (can be any Object)
+  //  -- r6 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
   // ----------- S t a t e -------------
diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc
index 8655ab8..429282d 100644
--- a/src/builtins/s390/builtins-s390.cc
+++ b/src/builtins/s390/builtins-s390.cc
@@ -334,11 +334,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(r8);
     __ EnterBuiltinFrame(cp, r3, r8);
     __ Push(r4);  // first argument
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(r4);
     __ LeaveBuiltinFrame(cp, r3, r8);
     __ SmiUntag(r8);
@@ -484,11 +484,11 @@
   __ bind(&new_object);
   {
     FrameScope scope(masm, StackFrame::MANUAL);
-    FastNewObjectStub stub(masm->isolate());
     __ SmiTag(r8);
     __ EnterBuiltinFrame(cp, r3, r8);
     __ Push(r4);  // first argument
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(r4);
     __ LeaveBuiltinFrame(cp, r3, r8);
     __ SmiUntag(r8);
@@ -556,6 +556,7 @@
 void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
                                     bool create_implicit_receiver,
                                     bool check_derived_construct) {
+  Label post_instantiation_deopt_entry;
   // ----------- S t a t e -------------
   //  -- r2     : number of arguments
   //  -- r3     : constructor function
@@ -584,8 +585,8 @@
 
       // Allocate the new receiver object.
       __ Push(r3, r5);
-      FastNewObjectStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+              RelocInfo::CODE_TARGET);
       __ LoadRR(r6, r2);
       __ Pop(r3, r5);
 
@@ -606,6 +607,9 @@
       __ Push(r6, r6);
     }
 
+    // Deoptimizer re-enters stub code here.
+    __ bind(&post_instantiation_deopt_entry);
+
     // Set up pointer to last argument.
     __ la(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
 
@@ -641,7 +645,8 @@
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+      masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+          masm->pc_offset());
     }
 
     // Restore context from the frame.
@@ -707,6 +712,35 @@
     __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r3, r4);
   }
   __ Ret();
+
+  // Store offset of trampoline address for deoptimizer. This is the bailout
+  // point after the receiver instantiation but before the function invocation.
+  // We need to restore some registers in order to continue the above code.
+  if (create_implicit_receiver && !is_api_function) {
+    masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+        masm->pc_offset());
+
+    // ----------- S t a t e -------------
+    //  -- r2    : newly allocated object
+    //  -- sp[0] : constructor function
+    // -----------------------------------
+
+    __ pop(r3);
+    __ Push(r2, r2);
+
+    // Retrieve smi-tagged arguments count from the stack.
+    __ LoadP(r2, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+    __ SmiUntag(r2);
+
+    // Retrieve the new target value from the stack. This was placed into the
+    // frame description in place of the receiver by the optimizing compiler.
+    __ la(r5, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+    __ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
+    __ LoadP(r5, MemOperand(r5, ip));
+
+    // Continue with constructor function invocation.
+    __ b(&post_instantiation_deopt_entry);
+  }
 }
 
 }  // namespace
@@ -748,19 +782,18 @@
   __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
 
   // Load suspended function and context.
-  __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
   __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+  __ LoadP(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
 
   // Flood function if we are stepping.
   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
   Label stepping_prepared;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(masm->isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  __ mov(ip, Operand(last_step_action));
+  ExternalReference debug_hook =
+      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+  __ mov(ip, Operand(debug_hook));
   __ LoadB(ip, MemOperand(ip));
-  __ CmpP(ip, Operand(StepIn));
-  __ bge(&prepare_step_in_if_stepping);
+  __ CmpSmiLiteral(ip, Smi::kZero, r0);
+  __ bne(&prepare_step_in_if_stepping);
 
   // Flood function if we need to continue stepping in the suspended generator.
 
@@ -811,13 +844,14 @@
     __ bind(&done_loop);
   }
 
-  // Dispatch on the kind of generator object.
-  Label old_generator;
-  __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
-  __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
-  __ bne(&old_generator, Label::kNear);
+  // Underlying function needs to have bytecode available.
+  if (FLAG_debug_code) {
+    __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+    __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
+    __ Assert(eq, kMissingBytecodeArray);
+  }
 
-  // New-style (ignition/turbofan) generator object
+  // Resume (Ignition/TurboFan) generator object.
   {
     // We abuse new.target both to indicate that this is a resume call and to
     // pass in the generator object.  In ordinary calls, new.target is always
@@ -827,61 +861,12 @@
     __ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
     __ JumpToJSEntry(ip);
   }
-  // Old-style (full-codegen) generator object
-  __ bind(&old_generator);
-  {
-    // Enter a new JavaScript frame, and initialize its slots as they were when
-    // the generator was suspended.
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ PushStandardFrame(r6);
-
-    // Restore the operand stack.
-    __ LoadP(r2, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
-    __ LoadP(r5, FieldMemOperand(r2, FixedArray::kLengthOffset));
-    __ AddP(r2, r2,
-            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-    {
-      Label loop, done_loop;
-      __ SmiUntag(r5);
-      __ LoadAndTestP(r5, r5);
-      __ beq(&done_loop);
-      __ LoadRR(r1, r5);
-      __ bind(&loop);
-      __ LoadP(ip, MemOperand(r2, kPointerSize));
-      __ la(r2, MemOperand(r2, kPointerSize));
-      __ Push(ip);
-      __ BranchOnCount(r1, &loop);
-      __ bind(&done_loop);
-    }
-
-    // Reset operand stack so we don't leak.
-    __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
-    __ StoreP(ip, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset),
-              r0);
-
-    // Resume the generator function at the continuation.
-    __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
-    __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
-    __ AddP(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
-    {
-      ConstantPoolUnavailableScope constant_pool_unavailable(masm);
-      __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
-      __ SmiUntag(r4);
-      __ AddP(r5, r5, r4);
-      __ LoadSmiLiteral(r4,
-                        Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
-      __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
-                r0);
-      __ LoadRR(r2, r3);  // Continuation expects generator object in r2.
-      __ Jump(r5);
-    }
-  }
 
   __ bind(&prepare_step_in_if_stepping);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
     __ Push(r3, r4, r6);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ CallRuntime(Runtime::kDebugOnFunctionCall);
     __ Pop(r3, r4);
     __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
   }
@@ -1074,7 +1059,7 @@
   // Load original bytecode array or the debug copy.
   __ LoadP(kInterpreterBytecodeArrayRegister,
            FieldMemOperand(r2, SharedFunctionInfo::kFunctionDataOffset));
-  __ CmpSmiLiteral(debug_info, DebugInfo::uninitialized(), r0);
+  __ TestIfSmi(debug_info);
   __ beq(&array_done);
   __ LoadP(kInterpreterBytecodeArrayRegister,
            FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
@@ -1087,15 +1072,15 @@
   __ bne(&switch_to_different_code_kind);
 
   // Increment invocation count for the function.
-  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
-  __ LoadP(r6, FieldMemOperand(r6, LiteralsArray::kFeedbackVectorOffset));
-  __ LoadP(r1, FieldMemOperand(r6, TypeFeedbackVector::kInvocationCountIndex *
-                                           kPointerSize +
-                                       TypeFeedbackVector::kHeaderSize));
+  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
+  __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
+  __ LoadP(r1, FieldMemOperand(
+                   r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                           FeedbackVector::kHeaderSize));
   __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
-  __ StoreP(r1, FieldMemOperand(r6, TypeFeedbackVector::kInvocationCountIndex *
-                                            kPointerSize +
-                                        TypeFeedbackVector::kHeaderSize));
+  __ StoreP(r1, FieldMemOperand(
+                    r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                            FeedbackVector::kHeaderSize));
 
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
@@ -1106,6 +1091,12 @@
     __ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Reset code age.
+  __ mov(r1, Operand(BytecodeArray::kNoAgeBytecodeAge));
+  __ StoreByte(r1, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+                                   BytecodeArray::kBytecodeAgeOffset),
+               r0);
+
   // Load the initial bytecode offset.
   __ mov(kInterpreterBytecodeOffsetRegister,
          Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1213,7 +1204,7 @@
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
-    CallableType function_type) {
+    InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- r2 : the number of arguments (not including the receiver)
   //  -- r4 : the address of the first argument to be pushed. Subsequent
@@ -1230,12 +1221,14 @@
   Generate_InterpreterPushArgs(masm, r5, r4, r5, r6, &stack_overflow);
 
   // Call the target.
-  if (function_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
                                                       tail_call_mode),
             RelocInfo::CODE_TARGET);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(function_type, CallableType::kAny);
     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
@@ -1251,7 +1244,7 @@
 
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
-    MacroAssembler* masm, CallableType construct_type) {
+    MacroAssembler* masm, InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   // -- r2 : argument count (not including receiver)
   // -- r5 : new target
@@ -1273,7 +1266,7 @@
   __ bind(&skip);
 
   __ AssertUndefinedOrAllocationSite(r4, r7);
-  if (construct_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ AssertFunction(r3);
 
     // Tail call to the function-specific construct stub (still in the caller
@@ -1283,9 +1276,12 @@
     // Jump to the construct function.
     __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
     __ Jump(ip);
-
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    // Call the constructor with r2, r3, and r5 unmodified.
+    __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(construct_type, CallableType::kAny);
+    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
     // Call the constructor with r2, r3, and r5 unmodified.
     __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   }
@@ -1408,13 +1404,19 @@
   Register closure = r3;
   Register map = r8;
   Register index = r4;
+
+  // Do we have a valid feedback vector?
+  __ LoadP(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+  __ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
+  __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+
   __ LoadP(map,
            FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
   __ LoadP(map,
            FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
   __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
   __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
-  __ blt(&gotta_call_runtime);
+  __ blt(&try_shared);
 
   // Find literals.
   // r9 : native context
@@ -1437,25 +1439,6 @@
   __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
   __ CmpP(temp, native_context);
   __ bne(&loop_bottom, Label::kNear);
-  // OSR id set to none?
-  __ LoadP(temp,
-           FieldMemOperand(array_pointer,
-                           SharedFunctionInfo::kOffsetToPreviousOsrAstId));
-  const int bailout_id = BailoutId::None().ToInt();
-  __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
-  __ bne(&loop_bottom, Label::kNear);
-  // Literals available?
-  __ LoadP(temp,
-           FieldMemOperand(array_pointer,
-                           SharedFunctionInfo::kOffsetToPreviousLiterals));
-  __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
-
-  // Save the literals in the closure.
-  __ StoreP(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset), r0);
-  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r6,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
 
   // Code available?
   Register entry = r6;
@@ -1465,7 +1448,7 @@
   __ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
   __ JumpIfSmi(entry, &try_shared);
 
-  // Found literals and code. Get them into the closure and return.
+  // Found code. Get it into the closure and return.
   // Store code entry in the closure.
   __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
@@ -1499,7 +1482,7 @@
   __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
   __ bgt(&loop_top);
 
-  // We found neither literals nor code.
+  // We found no code.
   __ b(&gotta_call_runtime);
 
   __ bind(&try_shared);
@@ -1510,13 +1493,14 @@
                       entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
   __ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
   __ bne(&gotta_call_runtime);
-  // Is the full code valid?
+
+  // If SFI points to anything other than CompileLazy, install that.
   __ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
-  __ LoadlW(r7, FieldMemOperand(entry, Code::kFlagsOffset));
-  __ DecodeField<Code::KindField>(r7);
-  __ CmpP(r7, Operand(Code::BUILTIN));
+  __ mov(r7, Operand(masm->CodeObject()));
+  __ CmpP(entry, r7);
   __ beq(&gotta_call_runtime);
-  // Yes, install the full code.
+
+  // Install the SFI's code entry.
   __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
   __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
   __ RecordWriteCodeEntryField(closure, entry, r7);
@@ -1632,14 +1616,9 @@
   __ Jump(ip);
 }
 
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
-  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
-  }                                                           \
-  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                              \
+  void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+    GenerateMakeCodeYoungAgainCommon(masm);                               \
   }
 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2169,20 +2148,20 @@
   __ bind(&target_not_constructor);
   {
     __ StoreP(r3, MemOperand(sp, 0));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 
   // 4c. The new.target is not a constructor, throw an appropriate TypeError.
   __ bind(&new_target_not_constructor);
   {
     __ StoreP(r5, MemOperand(sp, 0));
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 }
 
 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
   __ SmiTag(r2);
-  __ LoadSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Load(r6, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   // Stack updated as such:
   //    old SP --->
   //                 R14 Return Addr
@@ -2228,7 +2207,8 @@
 
   // Create the list of arguments from the array-like argumentsList.
   {
-    Label create_arguments, create_array, create_runtime, done_create;
+    Label create_arguments, create_array, create_holey_array, create_runtime,
+        done_create;
     __ JumpIfSmi(r2, &create_runtime);
 
     // Load the map of argumentsList into r4.
@@ -2272,17 +2252,37 @@
     __ LoadRR(r2, r6);
     __ b(&done_create);
 
+    // For holey JSArrays we need to check that the array prototype chain
+    // protector is intact and our prototype is the Array.prototype actually.
+    __ bind(&create_holey_array);
+    __ LoadP(r4, FieldMemOperand(r4, Map::kPrototypeOffset));
+    __ LoadP(r6, ContextMemOperand(r6, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+    __ CmpP(r4, r6);
+    __ bne(&create_runtime);
+    __ LoadRoot(r6, Heap::kArrayProtectorRootIndex);
+    __ LoadP(r4, FieldMemOperand(r6, PropertyCell::kValueOffset));
+    __ CmpSmiLiteral(r4, Smi::FromInt(Isolate::kProtectorValid), r0);
+    __ bne(&create_runtime);
+    __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
+    __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
+    __ SmiUntag(r4);
+    __ b(&done_create);
+
     // Try to create the list from a JSArray object.
+    // -- r4 and r6 must be preserved till bne create_holey_array.
     __ bind(&create_array);
-    __ LoadlB(r4, FieldMemOperand(r4, Map::kBitField2Offset));
-    __ DecodeField<Map::ElementsKindBits>(r4);
+    __ LoadlB(r7, FieldMemOperand(r4, Map::kBitField2Offset));
+    __ DecodeField<Map::ElementsKindBits>(r7);
     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
     STATIC_ASSERT(FAST_ELEMENTS == 2);
-    __ CmpP(r4, Operand(FAST_ELEMENTS));
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+    __ CmpP(r7, Operand(FAST_HOLEY_ELEMENTS));
     __ bgt(&create_runtime);
-    __ CmpP(r4, Operand(FAST_HOLEY_SMI_ELEMENTS));
-    __ beq(&create_runtime);
+    // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+    __ TestBit(r7, Map::kHasNonInstancePrototype, r0);
+    __ bne(&create_holey_array);
+    // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
     __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
     __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
     __ SmiUntag(r4);
@@ -2317,16 +2317,21 @@
 
   // Push arguments onto the stack (thisArgument is already on the stack).
   {
-    Label loop, no_args;
+    __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+    Label loop, no_args, skip;
     __ CmpP(r4, Operand::Zero());
     __ beq(&no_args);
     __ AddP(r2, r2,
             Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
     __ LoadRR(r1, r4);
     __ bind(&loop);
-    __ LoadP(r0, MemOperand(r2, kPointerSize));
+    __ LoadP(ip, MemOperand(r2, kPointerSize));
     __ la(r2, MemOperand(r2, kPointerSize));
-    __ push(r0);
+    __ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ bne(&skip, Label::kNear);
+    __ LoadRR(ip, r8);
+    __ bind(&skip);
+    __ push(ip);
     __ BranchOnCount(r1, &loop);
     __ bind(&no_args);
     __ LoadRR(r2, r4);
@@ -2340,6 +2345,75 @@
   }
 }
 
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+                                           Handle<Code> code) {
+  // ----------- S t a t e -------------
+  //  -- r3    : the target to call (can be any Object)
+  //  -- r4    : start index (to support rest parameters)
+  //  -- lr    : return address.
+  //  -- sp[0] : thisArgument
+  // -----------------------------------
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
+  __ CmpP(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ beq(&arguments_adaptor);
+  {
+    __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    __ LoadP(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadW(r2, FieldMemOperand(
+                     r2, SharedFunctionInfo::kFormalParameterCountOffset));
+    __ LoadRR(r5, fp);
+  }
+  __ b(&arguments_done);
+  __ bind(&arguments_adaptor);
+  {
+    // Load the length from the ArgumentsAdaptorFrame.
+    __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  }
+  __ bind(&arguments_done);
+
+  Label stack_empty, stack_done, stack_overflow;
+  __ SmiUntag(r2);
+  __ SubP(r2, r2, r4);
+  __ CmpP(r2, Operand::Zero());
+  __ ble(&stack_empty);
+  {
+    // Check for stack overflow.
+    Generate_StackOverflowCheck(masm, r2, r4, &stack_overflow);
+
+    // Forward the arguments from the caller frame.
+    {
+      Label loop;
+      __ AddP(r5, r5, Operand(kPointerSize));
+      __ LoadRR(r4, r2);
+      __ bind(&loop);
+      {
+        __ ShiftLeftP(ip, r4, Operand(kPointerSizeLog2));
+        __ LoadP(ip, MemOperand(r5, ip));
+        __ push(ip);
+        __ SubP(r4, r4, Operand(1));
+        __ CmpP(r4, Operand::Zero());
+        __ bne(&loop);
+      }
+    }
+  }
+  __ b(&stack_done);
+  __ bind(&stack_overflow);
+  __ TailCallRuntime(Runtime::kThrowStackOverflow);
+  __ bind(&stack_empty);
+  {
+    // We just pass the receiver, which is already on the stack.
+    __ mov(r2, Operand::Zero());
+  }
+  __ bind(&stack_done);
+
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
 namespace {
 
 // Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2390,7 +2464,7 @@
     Label no_interpreter_frame;
     __ LoadP(scratch3,
              MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::STUB), r0);
+    __ CmpP(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
     __ bne(&no_interpreter_frame);
     __ LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ bind(&no_interpreter_frame);
@@ -2403,7 +2477,8 @@
   __ LoadP(
       scratch3,
       MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ CmpP(scratch3,
+          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ bne(&no_arguments_adaptor);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -2726,6 +2801,156 @@
   }
 }
 
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+  Register argc = r2;
+  Register constructor = r3;
+  Register new_target = r5;
+
+  Register scratch = r4;
+  Register scratch2 = r8;
+
+  Register spread = r6;
+  Register spread_map = r7;
+  Register spread_len = r7;
+  Label runtime_call, push_args;
+  __ LoadP(spread, MemOperand(sp, 0));
+  __ JumpIfSmi(spread, &runtime_call);
+  __ LoadP(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+
+  // Check that the spread is an array.
+  __ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
+  __ bne(&runtime_call);
+
+  // Check that we have the original ArrayPrototype.
+  __ LoadP(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+  __ LoadP(scratch2, NativeContextMemOperand());
+  __ LoadP(scratch2,
+           ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+  __ CmpP(scratch, scratch2);
+  __ bne(&runtime_call);
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+  __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
+  __ bne(&runtime_call);
+
+  // Check that the map of the initial array iterator hasn't changed.
+  __ LoadP(scratch2, NativeContextMemOperand());
+  __ LoadP(scratch,
+           ContextMemOperand(scratch2,
+                             Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+  __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ LoadP(scratch2,
+           ContextMemOperand(
+               scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+  __ CmpP(scratch, scratch2);
+  __ bne(&runtime_call);
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  Label no_protector_check;
+  __ LoadlB(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+  __ DecodeField<Map::ElementsKindBits>(scratch);
+  __ CmpP(scratch, Operand(FAST_HOLEY_ELEMENTS));
+  __ bgt(&runtime_call);
+  // For non-FastHoley kinds, we can skip the protector check.
+  __ CmpP(scratch, Operand(FAST_SMI_ELEMENTS));
+  __ beq(&no_protector_check);
+  __ CmpP(scratch, Operand(FAST_ELEMENTS));
+  __ beq(&no_protector_check);
+  // Check the ArrayProtector cell.
+  __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+  __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+  __ CmpSmiLiteral(scratch, Smi::FromInt(Isolate::kProtectorValid), r0);
+  __ bne(&runtime_call);
+
+  __ bind(&no_protector_check);
+  // Load the FixedArray backing store, but use the length from the array.
+  __ LoadP(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
+  __ SmiUntag(spread_len);
+  __ LoadP(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+  __ b(&push_args);
+
+  __ bind(&runtime_call);
+  {
+    // Call the builtin for the result of the spread.
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ SmiTag(argc);
+    __ Push(constructor, new_target, argc, spread);
+    __ CallRuntime(Runtime::kSpreadIterableFixed);
+    __ LoadRR(spread, r2);
+    __ Pop(constructor, new_target, argc);
+    __ SmiUntag(argc);
+  }
+
+  {
+    // Calculate the new nargs including the result of the spread.
+    __ LoadP(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
+    __ SmiUntag(spread_len);
+
+    __ bind(&push_args);
+    // argc += spread_len - 1. Subtract 1 for the spread itself.
+    __ AddP(argc, argc, spread_len);
+    __ SubP(argc, argc, Operand(1));
+
+    // Pop the spread argument off the stack.
+    __ Pop(scratch);
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+    // Make scratch the space we have left. The stack might already be
+    // overflowed here which will cause scratch to become negative.
+    __ SubP(scratch, sp, scratch);
+    // Check if the arguments will overflow the stack.
+    __ ShiftLeftP(r0, spread_len, Operand(kPointerSizeLog2));
+    __ CmpP(scratch, r0);
+    __ bgt(&done);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // Put the evaluated spread onto the stack as additional arguments.
+  {
+    __ LoadImmP(scratch, Operand::Zero());
+    Label done, push, loop;
+    __ bind(&loop);
+    __ CmpP(scratch, spread_len);
+    __ beq(&done);
+    __ ShiftLeftP(r0, scratch, Operand(kPointerSizeLog2));
+    __ AddP(scratch2, spread, r0);
+    __ LoadP(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+    __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+    __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(scratch2);
+    __ AddP(scratch, scratch, Operand(1));
+    __ b(&loop);
+    __ bind(&done);
+  }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  // CheckSpreadAndPushToStack will push r5 to save it.
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            TailCallMode::kDisallow),
+          RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -2846,6 +3071,18 @@
           RelocInfo::CODE_TARGET);
 }
 
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r2 : the number of arguments (not including the receiver)
+  //  -- r3 : the constructor to call (can be any Object)
+  //  -- r5 : the new target (either the same as the constructor or
+  //          the JSFunction on which new was invoked initially)
+  // -----------------------------------
+
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
   // ----------- S t a t e -------------
diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc
index cde0264..703a7e7 100644
--- a/src/builtins/x64/builtins-x64.cc
+++ b/src/builtins/x64/builtins-x64.cc
@@ -6,8 +6,10 @@
 
 #include "src/code-factory.h"
 #include "src/codegen.h"
+#include "src/counters.h"
 #include "src/deoptimizer.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -117,6 +119,8 @@
 void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
                                     bool create_implicit_receiver,
                                     bool check_derived_construct) {
+  Label post_instantiation_deopt_entry;
+
   // ----------- S t a t e -------------
   //  -- rax: number of arguments
   //  -- rsi: context
@@ -137,8 +141,8 @@
       // Allocate the new receiver object.
       __ Push(rdi);
       __ Push(rdx);
-      FastNewObjectStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+              RelocInfo::CODE_TARGET);
       __ movp(rbx, rax);
       __ Pop(rdx);
       __ Pop(rdi);
@@ -151,9 +155,7 @@
 
       // Retrieve smi-tagged arguments count from the stack.
       __ SmiToInteger32(rax, Operand(rsp, 0 * kPointerSize));
-    }
 
-    if (create_implicit_receiver) {
       // Push the allocated receiver to the stack. We need two copies
       // because we may have to return the original one and the calling
       // conventions dictate that the called function pops the receiver.
@@ -163,6 +165,9 @@
       __ PushRoot(Heap::kTheHoleValueRootIndex);
     }
 
+    // Deoptimizer re-enters stub code here.
+    __ bind(&post_instantiation_deopt_entry);
+
     // Set up pointer to last argument.
     __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
 
@@ -183,7 +188,8 @@
 
     // Store offset of return address for deoptimizer.
     if (create_implicit_receiver && !is_api_function) {
-      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+      masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+          masm->pc_offset());
     }
 
     // Restore context from the frame.
@@ -242,6 +248,35 @@
     __ IncrementCounter(counters->constructed_objects(), 1);
   }
   __ ret(0);
+
+  // Store offset of trampoline address for deoptimizer. This is the bailout
+  // point after the receiver instantiation but before the function invocation.
+  // We need to restore some registers in order to continue the above code.
+  if (create_implicit_receiver && !is_api_function) {
+    masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+        masm->pc_offset());
+
+    // ----------- S t a t e -------------
+    //  -- rax    : newly allocated object
+    //  -- rsp[0] : constructor function
+    // -----------------------------------
+
+    __ Pop(rdi);
+    __ Push(rax);
+    __ Push(rax);
+
+    // Retrieve smi-tagged arguments count from the stack.
+    __ SmiToInteger32(rax,
+                      Operand(rbp, ConstructFrameConstants::kLengthOffset));
+
+    // Retrieve the new target value from the stack. This was placed into the
+    // frame description in place of the receiver by the optimizing compiler.
+    __ movp(rdx, Operand(rbp, rax, times_pointer_size,
+                         StandardFrameConstants::kCallerSPOffset));
+
+    // Continue with constructor function invocation.
+    __ jmp(&post_instantiation_deopt_entry);
+  }
 }
 
 }  // namespace
@@ -460,18 +495,17 @@
   __ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
 
   // Load suspended function and context.
-  __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
   __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+  __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
   // Flood function if we are stepping.
   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
   Label stepping_prepared;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(masm->isolate());
-  Operand last_step_action_operand = masm->ExternalOperand(last_step_action);
-  STATIC_ASSERT(StepFrame > StepIn);
-  __ cmpb(last_step_action_operand, Immediate(StepIn));
-  __ j(greater_equal, &prepare_step_in_if_stepping);
+  ExternalReference debug_hook =
+      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+  Operand debug_hook_operand = masm->ExternalOperand(debug_hook);
+  __ cmpb(debug_hook_operand, Immediate(0));
+  __ j(not_equal, &prepare_step_in_if_stepping);
 
   // Flood function if we need to continue stepping in the suspended generator.
   ExternalReference debug_suspended_generator =
@@ -514,14 +548,15 @@
     __ bind(&done_loop);
   }
 
-  // Dispatch on the kind of generator object.
-  Label old_generator;
-  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
-  __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
-  __ j(not_equal, &old_generator);
+  // Underlying function needs to have bytecode available.
+  if (FLAG_debug_code) {
+    __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+    __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+    __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
+    __ Assert(equal, kMissingBytecodeArray);
+  }
 
-  // New-style (ignition/turbofan) generator object.
+  // Resume (Ignition/TurboFan) generator object.
   {
     __ PushReturnAddressFrom(rax);
     __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -534,60 +569,13 @@
     __ jmp(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
   }
 
-  // Old-style (full-codegen) generator object.
-  __ bind(&old_generator);
-  {
-    // Enter a new JavaScript frame, and initialize its slots as they were when
-    // the generator was suspended.
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ PushReturnAddressFrom(rax);  // Return address.
-    __ Push(rbp);                   // Caller's frame pointer.
-    __ Move(rbp, rsp);
-    __ Push(rsi);  // Callee's context.
-    __ Push(rdi);  // Callee's JS Function.
-
-    // Restore the operand stack.
-    __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
-    __ SmiToInteger32(rax, FieldOperand(rsi, FixedArray::kLengthOffset));
-    {
-      Label done_loop, loop;
-      __ Set(rcx, 0);
-      __ bind(&loop);
-      __ cmpl(rcx, rax);
-      __ j(equal, &done_loop, Label::kNear);
-      __ Push(
-          FieldOperand(rsi, rcx, times_pointer_size, FixedArray::kHeaderSize));
-      __ addl(rcx, Immediate(1));
-      __ jmp(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Reset operand stack so we don't leak.
-    __ LoadRoot(FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset),
-                Heap::kEmptyFixedArrayRootIndex);
-
-    // Restore context.
-    __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
-
-    // Resume the generator function at the continuation.
-    __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-    __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
-    __ SmiToInteger64(
-        rcx, FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
-    __ leap(rdx, FieldOperand(rdx, rcx, times_1, Code::kHeaderSize));
-    __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
-            Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
-    __ movp(rax, rbx);  // Continuation expects generator object in rax.
-    __ jmp(rdx);
-  }
-
   __ bind(&prepare_step_in_if_stepping);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(rbx);
     __ Push(rdx);
     __ Push(rdi);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ CallRuntime(Runtime::kDebugOnFunctionCall);
     __ Pop(rdx);
     __ Pop(rbx);
     __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
@@ -658,10 +646,8 @@
   // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
   Label load_debug_bytecode_array, bytecode_array_loaded;
-  DCHECK_EQ(Smi::kZero, DebugInfo::uninitialized());
-  __ cmpp(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
-          Immediate(0));
-  __ j(not_equal, &load_debug_bytecode_array);
+  __ JumpIfNotSmi(FieldOperand(rax, SharedFunctionInfo::kDebugInfoOffset),
+                  &load_debug_bytecode_array);
   __ movp(kInterpreterBytecodeArrayRegister,
           FieldOperand(rax, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
@@ -673,12 +659,11 @@
   __ j(not_equal, &switch_to_different_code_kind);
 
   // Increment invocation count for the function.
-  __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
-  __ movp(rcx, FieldOperand(rcx, LiteralsArray::kFeedbackVectorOffset));
+  __ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
+  __ movp(rcx, FieldOperand(rcx, Cell::kValueOffset));
   __ SmiAddConstant(
-      FieldOperand(rcx,
-                   TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                       TypeFeedbackVector::kHeaderSize),
+      FieldOperand(rcx, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                            FeedbackVector::kHeaderSize),
       Smi::FromInt(1));
 
   // Check function data field is actually a BytecodeArray object.
@@ -689,6 +674,11 @@
     __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Reset code age.
+  __ movb(FieldOperand(kInterpreterBytecodeArrayRegister,
+                       BytecodeArray::kBytecodeAgeOffset),
+          Immediate(BytecodeArray::kNoAgeBytecodeAge));
+
   // Load initial bytecode offset.
   __ movp(kInterpreterBytecodeOffsetRegister,
           Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -766,24 +756,23 @@
   __ jmp(rcx);
 }
 
-static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
-                                        Register scratch1, Register scratch2,
-                                        Label* stack_overflow) {
+static void Generate_StackOverflowCheck(
+    MacroAssembler* masm, Register num_args, Register scratch,
+    Label* stack_overflow,
+    Label::Distance stack_overflow_distance = Label::kFar) {
   // Check the stack for overflow. We are not trying to catch
   // interruptions (e.g. debug break and preemption) here, so the "real stack
   // limit" is checked.
-  __ LoadRoot(scratch1, Heap::kRealStackLimitRootIndex);
-  __ movp(scratch2, rsp);
-  // Make scratch2 the space we have left. The stack might already be overflowed
-  // here which will cause scratch2 to become negative.
-  __ subp(scratch2, scratch1);
-  // Make scratch1 the space we need for the array when it is unrolled onto the
-  // stack.
-  __ movp(scratch1, num_args);
-  __ shlp(scratch1, Immediate(kPointerSizeLog2));
+  __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+  __ movp(scratch, rsp);
+  // Make scratch the space we have left. The stack might already be overflowed
+  // here which will cause scratch to become negative.
+  __ subp(scratch, kScratchRegister);
+  __ sarp(scratch, Immediate(kPointerSizeLog2));
   // Check if the arguments will overflow the stack.
-  __ cmpp(scratch2, scratch1);
-  __ j(less_equal, stack_overflow);  // Signed comparison.
+  __ cmpp(scratch, num_args);
+  // Signed comparison.
+  __ j(less_equal, stack_overflow, stack_overflow_distance);
 }
 
 static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@@ -810,7 +799,7 @@
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
-    CallableType function_type) {
+    InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- rax : the number of arguments (not including the receiver)
   //  -- rbx : the address of the first argument to be pushed. Subsequent
@@ -825,7 +814,7 @@
   __ addp(rcx, Immediate(1));  // Add one for receiver.
 
   // Add a stack check before pushing arguments.
-  Generate_StackOverflowCheck(masm, rcx, rdx, r8, &stack_overflow);
+  Generate_StackOverflowCheck(masm, rcx, rdx, &stack_overflow);
 
   // Pop return address to allow tail-call after pushing arguments.
   __ PopReturnAddressTo(kScratchRegister);
@@ -836,12 +825,14 @@
   // Call the target.
   __ PushReturnAddressFrom(kScratchRegister);  // Re-push return address.
 
-  if (function_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
                                                       tail_call_mode),
             RelocInfo::CODE_TARGET);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(function_type, CallableType::kAny);
     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
@@ -858,7 +849,7 @@
 
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
-    MacroAssembler* masm, CallableType construct_type) {
+    MacroAssembler* masm, InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- rax : the number of arguments (not including the receiver)
   //  -- rdx : the new target (either the same as the constructor or
@@ -872,7 +863,7 @@
   Label stack_overflow;
 
   // Add a stack check before pushing arguments.
-  Generate_StackOverflowCheck(masm, rax, r8, r9, &stack_overflow);
+  Generate_StackOverflowCheck(masm, rax, r8, &stack_overflow);
 
   // Pop return address to allow tail-call after pushing arguments.
   __ PopReturnAddressTo(kScratchRegister);
@@ -887,7 +878,7 @@
   __ PushReturnAddressFrom(kScratchRegister);
 
   __ AssertUndefinedOrAllocationSite(rbx);
-  if (construct_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     // Tail call to the function-specific construct stub (still in the caller
     // context at this point).
     __ AssertFunction(rdi);
@@ -897,8 +888,12 @@
     __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
     // Jump to the constructor function (rax, rbx, rdx passed on).
     __ jmp(rcx);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    // Call the constructor (rax, rdx, rdi passed on).
+    __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(construct_type, CallableType::kAny);
+    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
     // Call the constructor (rax, rdx, rdi passed on).
     __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   }
@@ -930,7 +925,7 @@
   __ addp(r8, Immediate(1));  // Add one for receiver.
 
   // Add a stack check before pushing arguments.
-  Generate_StackOverflowCheck(masm, r8, rdi, r9, &stack_overflow);
+  Generate_StackOverflowCheck(masm, r8, rdi, &stack_overflow);
 
   // Pop return address to allow tail-call after pushing arguments.
   __ PopReturnAddressTo(kScratchRegister);
@@ -1037,13 +1032,18 @@
   Register closure = rdi;
   Register map = r8;
   Register index = r9;
+
+  // Do we have a valid feedback vector?
+  __ movp(rbx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+  __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
+  __ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+
   __ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
   __ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
   __ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset));
   __ cmpl(index, Immediate(2));
-  __ j(less, &gotta_call_runtime);
+  __ j(less, &try_shared);
 
-  // Find literals.
   // r14 : native context
   // r9  : length / index
   // r8  : optimized code map
@@ -1060,24 +1060,6 @@
   __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
   __ cmpp(temp, native_context);
   __ j(not_equal, &loop_bottom);
-  // OSR id set to none?
-  __ movp(temp, FieldOperand(map, index, times_pointer_size,
-                             SharedFunctionInfo::kOffsetToPreviousOsrAstId));
-  __ SmiToInteger32(temp, temp);
-  const int bailout_id = BailoutId::None().ToInt();
-  __ cmpl(temp, Immediate(bailout_id));
-  __ j(not_equal, &loop_bottom);
-  // Literals available?
-  __ movp(temp, FieldOperand(map, index, times_pointer_size,
-                             SharedFunctionInfo::kOffsetToPreviousLiterals));
-  __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
-
-  // Save the literals in the closure.
-  __ movp(FieldOperand(closure, JSFunction::kLiteralsOffset), temp);
-  __ movp(r15, index);
-  __ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, r15,
-                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 
   // Code available?
   Register entry = rcx;
@@ -1086,7 +1068,7 @@
   __ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
   __ JumpIfSmi(entry, &try_shared);
 
-  // Found literals and code. Get them into the closure and return.
+  // Found code. Get it into the closure and return.
   __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
   __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
   __ RecordWriteCodeEntryField(closure, entry, r15);
@@ -1117,23 +1099,21 @@
   __ cmpl(index, Immediate(1));
   __ j(greater, &loop_top);
 
-  // We found neither literals nor code.
-  __ jmp(&gotta_call_runtime);
-
+  // We found no code.
   __ bind(&try_shared);
   __ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
   // Is the shared function marked for tier up?
   __ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
            Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
   __ j(not_zero, &gotta_call_runtime);
-  // Is the full code valid?
+
+  // If SFI points to anything other than CompileLazy, install that.
   __ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
-  __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
-  __ andl(rbx, Immediate(Code::KindField::kMask));
-  __ shrl(rbx, Immediate(Code::KindField::kShift));
-  __ cmpl(rbx, Immediate(Code::BUILTIN));
+  __ Move(rbx, masm->CodeObject());
+  __ cmpp(entry, rbx);
   __ j(equal, &gotta_call_runtime);
-  // Yes, install the full code.
+
+  // Install the SFI's code entry.
   __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
   __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
   __ RecordWriteCodeEntryField(closure, entry, r15);
@@ -1166,7 +1146,7 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     // Preserve argument count for later compare.
-    __ movp(kScratchRegister, rax);
+    __ movp(rcx, rax);
     // Push the number of arguments to the callee.
     __ Integer32ToSmi(rax, rax);
     __ Push(rax);
@@ -1181,7 +1161,7 @@
     for (int j = 0; j < 4; ++j) {
       Label over;
       if (j < 3) {
-        __ cmpp(kScratchRegister, Immediate(j));
+        __ cmpp(rcx, Immediate(j));
         __ j(not_equal, &over, Label::kNear);
       }
       for (int i = j - 1; i >= 0; --i) {
@@ -1204,13 +1184,13 @@
     __ JumpIfSmi(rax, &failed, Label::kNear);
 
     __ Drop(2);
-    __ Pop(kScratchRegister);
-    __ SmiToInteger32(kScratchRegister, kScratchRegister);
+    __ Pop(rcx);
+    __ SmiToInteger32(rcx, rcx);
     scope.GenerateLeaveFrame();
 
     __ PopReturnAddressTo(rbx);
-    __ incp(kScratchRegister);
-    __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+    __ incp(rcx);
+    __ leap(rsp, Operand(rsp, rcx, times_pointer_size, 0));
     __ PushReturnAddressFrom(rbx);
     __ ret(0);
 
@@ -1248,14 +1228,9 @@
   __ ret(0);
 }
 
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
-  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
-  }                                                           \
-  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                              \
+  void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+    GenerateMakeCodeYoungAgainCommon(masm);                               \
   }
 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1637,7 +1612,7 @@
   {
     StackArgumentsAccessor args(rsp, 0);
     __ movp(args.GetReceiverOperand(), rdi);
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 
   // 4c. The new.target is not a constructor, throw an appropriate TypeError.
@@ -1645,7 +1620,7 @@
   {
     StackArgumentsAccessor args(rsp, 0);
     __ movp(args.GetReceiverOperand(), rdx);
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 }
 
@@ -1931,8 +1906,8 @@
     FrameScope scope(masm, StackFrame::MANUAL);
     __ EnterBuiltinFrame(rsi, rdi, r8);
     __ Push(rbx);  // the first argument
-    FastNewObjectStub stub(masm->isolate());
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(FieldOperand(rax, JSValue::kValueOffset));
     __ LeaveBuiltinFrame(rsi, rdi, r8);
   }
@@ -2086,8 +2061,8 @@
     FrameScope scope(masm, StackFrame::MANUAL);
     __ EnterBuiltinFrame(rsi, rdi, r8);
     __ Push(rbx);  // the first argument
-    FastNewObjectStub stub(masm->isolate());
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(FieldOperand(rax, JSValue::kValueOffset));
     __ LeaveBuiltinFrame(rsi, rdi, r8);
   }
@@ -2108,7 +2083,7 @@
   __ movp(rbp, rsp);
 
   // Store the arguments adaptor context sentinel.
-  __ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Push the function on the stack.
   __ Push(rdi);
@@ -2199,7 +2174,7 @@
     __ bind(&enough);
     EnterArgumentsAdaptorFrame(masm);
     // The registers rcx and r8 will be modified. The register rbx is only read.
-    Generate_StackOverflowCheck(masm, rbx, rcx, r8, &stack_overflow);
+    Generate_StackOverflowCheck(masm, rbx, rcx, &stack_overflow);
 
     // Copy receiver and all expected arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2221,7 +2196,7 @@
 
     EnterArgumentsAdaptorFrame(masm);
     // The registers rcx and r8 will be modified. The register rbx is only read.
-    Generate_StackOverflowCheck(masm, rbx, rcx, r8, &stack_overflow);
+    Generate_StackOverflowCheck(masm, rbx, rcx, &stack_overflow);
 
     // Copy receiver and all actual arguments.
     const int offset = StandardFrameConstants::kCallerSPOffset;
@@ -2292,7 +2267,8 @@
 
   // Create the list of arguments from the array-like argumentsList.
   {
-    Label create_arguments, create_array, create_runtime, done_create;
+    Label create_arguments, create_array, create_holey_array, create_runtime,
+        done_create;
     __ JumpIfSmi(rax, &create_runtime);
 
     // Load the map of argumentsList into rcx.
@@ -2335,6 +2311,21 @@
     __ movp(rax, rcx);
     __ jmp(&done_create);
 
+    __ bind(&create_holey_array);
+    // For holey JSArrays we need to check that the array prototype chain
+    // protector is intact and our prototype is the Array.prototype actually.
+    __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+    __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+    __ cmpp(rcx, ContextOperand(rbx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+    __ j(not_equal, &create_runtime);
+    __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
+    __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
+           Smi::FromInt(Isolate::kProtectorValid));
+    __ j(not_equal, &create_runtime);
+    __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
+    __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
+    __ jmp(&done_create);
+
     // Try to create the list from a JSArray object.
     __ bind(&create_array);
     __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
@@ -2342,10 +2333,12 @@
     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
     STATIC_ASSERT(FAST_ELEMENTS == 2);
-    __ cmpl(rcx, Immediate(FAST_ELEMENTS));
-    __ j(above, &create_runtime);
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
     __ cmpl(rcx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
-    __ j(equal, &create_runtime);
+    __ j(equal, &create_holey_array);
+    __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+    __ j(equal, &create_holey_array);
+    __ j(above, &create_runtime);
     __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
     __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
 
@@ -2383,12 +2376,18 @@
   {
     __ PopReturnAddressTo(r8);
     __ Set(rcx, 0);
-    Label done, loop;
+    Label done, push, loop;
     __ bind(&loop);
     __ cmpl(rcx, rbx);
     __ j(equal, &done, Label::kNear);
-    __ Push(
-        FieldOperand(rax, rcx, times_pointer_size, FixedArray::kHeaderSize));
+    // Turn the hole into undefined as we go.
+    __ movp(r9, FieldOperand(rax, rcx, times_pointer_size,
+                             FixedArray::kHeaderSize));
+    __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+    __ j(not_equal, &push, Label::kNear);
+    __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(r9);
     __ incl(rcx);
     __ jmp(&loop);
     __ bind(&done);
@@ -2404,6 +2403,72 @@
   }
 }
 
+// static
+void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
+                                           Handle<Code> code) {
+  // ----------- S t a t e -------------
+  //  -- rdi    : the target to call (can be any Object)
+  //  -- rcx    : start index (to support rest parameters)
+  //  -- rsp[0] : return address.
+  //  -- rsp[8] : thisArgument
+  // -----------------------------------
+
+  // Check if we have an arguments adaptor frame below the function frame.
+  Label arguments_adaptor, arguments_done;
+  __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ cmpp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
+          Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ j(equal, &arguments_adaptor, Label::kNear);
+  {
+    __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+    __ movp(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+    __ LoadSharedFunctionInfoSpecialField(
+        rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+    __ movp(rbx, rbp);
+  }
+  __ jmp(&arguments_done, Label::kNear);
+  __ bind(&arguments_adaptor);
+  {
+    __ SmiToInteger32(
+        rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  }
+  __ bind(&arguments_done);
+
+  Label stack_empty, stack_done, stack_overflow;
+  __ subl(rax, rcx);
+  __ j(less_equal, &stack_empty);
+  {
+    // Check for stack overflow.
+    Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
+
+    // Forward the arguments from the caller frame.
+    {
+      Label loop;
+      __ movl(rcx, rax);
+      __ Pop(r8);
+      __ bind(&loop);
+      {
+        StackArgumentsAccessor args(rbx, rcx, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+        __ Push(args.GetArgumentOperand(0));
+        __ decl(rcx);
+        __ j(not_zero, &loop);
+      }
+      __ Push(r8);
+    }
+  }
+  __ jmp(&stack_done, Label::kNear);
+  __ bind(&stack_overflow);
+  __ TailCallRuntime(Runtime::kThrowStackOverflow);
+  __ bind(&stack_empty);
+  {
+    // We just pass the receiver, which is already on the stack.
+    __ Set(rax, 0);
+  }
+  __ bind(&stack_done);
+
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
 namespace {
 
 // Drops top JavaScript frame and an arguments adaptor frame below it (if
@@ -2452,8 +2517,8 @@
   // Drop possible interpreter handler/stub frame.
   {
     Label no_interpreter_frame;
-    __ Cmp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
-           Smi::FromInt(StackFrame::STUB));
+    __ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+            Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
     __ j(not_equal, &no_interpreter_frame, Label::kNear);
     __ movp(rbp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
     __ bind(&no_interpreter_frame);
@@ -2463,8 +2528,8 @@
   Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ cmpp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
+          Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -2790,6 +2855,148 @@
   }
 }
 
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+  Label runtime_call, push_args;
+  // Load the spread argument into rbx.
+  __ movp(rbx, Operand(rsp, kPointerSize));
+  __ JumpIfSmi(rbx, &runtime_call);
+  // Load the map of the spread into r15.
+  __ movp(r15, FieldOperand(rbx, HeapObject::kMapOffset));
+  // Load native context into r14.
+  __ movp(r14, NativeContextOperand());
+
+  // Check that the spread is an array.
+  __ CmpInstanceType(r15, JS_ARRAY_TYPE);
+  __ j(not_equal, &runtime_call);
+
+  // Check that we have the original ArrayPrototype.
+  __ movp(rcx, FieldOperand(r15, Map::kPrototypeOffset));
+  __ cmpp(rcx, ContextOperand(r14, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+  __ j(not_equal, &runtime_call);
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  __ LoadRoot(rcx, Heap::kArrayIteratorProtectorRootIndex);
+  __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
+         Smi::FromInt(Isolate::kProtectorValid));
+  __ j(not_equal, &runtime_call);
+
+  // Check that the map of the initial array iterator hasn't changed.
+  __ movp(rcx,
+          ContextOperand(r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+  __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+  __ cmpp(rcx, ContextOperand(
+                   r14, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+  __ j(not_equal, &runtime_call);
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  Label no_protector_check;
+  __ movzxbp(rcx, FieldOperand(r15, Map::kBitField2Offset));
+  __ DecodeField<Map::ElementsKindBits>(rcx);
+  __ cmpp(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+  __ j(above, &runtime_call);
+  // For non-FastHoley kinds, we can skip the protector check.
+  __ cmpp(rcx, Immediate(FAST_SMI_ELEMENTS));
+  __ j(equal, &no_protector_check);
+  __ cmpp(rcx, Immediate(FAST_ELEMENTS));
+  __ j(equal, &no_protector_check);
+  // Check the ArrayProtector cell.
+  __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
+  __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
+         Smi::FromInt(Isolate::kProtectorValid));
+  __ j(not_equal, &runtime_call);
+
+  __ bind(&no_protector_check);
+  // Load the FixedArray backing store, but use the length from the array.
+  __ SmiToInteger32(r9, FieldOperand(rbx, JSArray::kLengthOffset));
+  __ movp(rbx, FieldOperand(rbx, JSArray::kElementsOffset));
+  __ jmp(&push_args);
+
+  __ bind(&runtime_call);
+  {
+    // Call the builtin for the result of the spread.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(rdi);  // target
+    __ Push(rdx);  // new target
+    __ Integer32ToSmi(rax, rax);
+    __ Push(rax);  // nargs
+    __ Push(rbx);
+    __ CallRuntime(Runtime::kSpreadIterableFixed);
+    __ movp(rbx, rax);
+    __ Pop(rax);  // nargs
+    __ SmiToInteger32(rax, rax);
+    __ Pop(rdx);  // new target
+    __ Pop(rdi);  // target
+  }
+
+  {
+    // Calculate the new nargs including the result of the spread.
+    __ SmiToInteger32(r9, FieldOperand(rbx, FixedArray::kLengthOffset));
+
+    __ bind(&push_args);
+    // rax += r9 - 1. Subtract 1 for the spread itself.
+    __ leap(rax, Operand(rax, r9, times_1, -1));
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+    __ movp(rcx, rsp);
+    // Make rcx the space we have left. The stack might already be overflowed
+    // here which will cause rcx to become negative.
+    __ subp(rcx, kScratchRegister);
+    __ sarp(rcx, Immediate(kPointerSizeLog2));
+    // Check if the arguments will overflow the stack.
+    __ cmpp(rcx, r9);
+    __ j(greater, &done, Label::kNear);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // Put the evaluated spread onto the stack as additional arguments.
+  {
+    // Pop the return address and spread argument.
+    __ PopReturnAddressTo(r8);
+    __ Pop(rcx);
+
+    __ Set(rcx, 0);
+    Label done, push, loop;
+    __ bind(&loop);
+    __ cmpl(rcx, r9);
+    __ j(equal, &done, Label::kNear);
+    __ movp(kScratchRegister, FieldOperand(rbx, rcx, times_pointer_size,
+                                           FixedArray::kHeaderSize));
+    __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+    __ j(not_equal, &push, Label::kNear);
+    __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(kScratchRegister);
+    __ incl(rcx);
+    __ jmp(&loop);
+    __ bind(&done);
+    __ PushReturnAddressFrom(r8);
+  }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdi : the target to call (can be any Object)
+  // -----------------------------------
+
+  // CheckSpreadAndPushToStack will push rdx to save it.
+  __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            TailCallMode::kDisallow),
+          RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -2912,6 +3119,19 @@
           RelocInfo::CODE_TARGET);
 }
 
+// static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- rax : the number of arguments (not including the receiver)
+  //  -- rdx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  //  -- rdi : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
 static void CompatibleReceiverCheck(MacroAssembler* masm, Register receiver,
                                     Register function_template_info,
                                     Register scratch0, Register scratch1,
diff --git a/src/builtins/x87/OWNERS b/src/builtins/x87/OWNERS
index dd9998b..61245ae 100644
--- a/src/builtins/x87/OWNERS
+++ b/src/builtins/x87/OWNERS
@@ -1 +1,2 @@
 weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/src/builtins/x87/builtins-x87.cc b/src/builtins/x87/builtins-x87.cc
index 2187f86..d13e868 100644
--- a/src/builtins/x87/builtins-x87.cc
+++ b/src/builtins/x87/builtins-x87.cc
@@ -135,8 +135,8 @@
       // Allocate the new receiver object.
       __ Push(edi);
       __ Push(edx);
-      FastNewObjectStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+              RelocInfo::CODE_TARGET);
       __ mov(ebx, eax);
       __ Pop(edx);
       __ Pop(edi);
@@ -387,17 +387,16 @@
   __ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
 
   // Load suspended function and context.
-  __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
   __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   // Flood function if we are stepping.
   Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
   Label stepping_prepared;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(masm->isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
-  __ j(greater_equal, &prepare_step_in_if_stepping);
+  ExternalReference debug_hook =
+      ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+  __ cmpb(Operand::StaticVariable(debug_hook), Immediate(0));
+  __ j(not_equal, &prepare_step_in_if_stepping);
 
   // Flood function if we need to continue stepping in the suspended generator.
   ExternalReference debug_suspended_generator =
@@ -438,19 +437,20 @@
     __ bind(&done_loop);
   }
 
-  // Dispatch on the kind of generator object.
-  Label old_generator;
-  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
-  __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
-  __ j(not_equal, &old_generator);
+  // Underlying function needs to have bytecode available.
+  if (FLAG_debug_code) {
+    __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+    __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+    __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+    __ Assert(equal, kMissingBytecodeArray);
+  }
 
-  // New-style (ignition/turbofan) generator object
+  // Resume (Ignition/TurboFan) generator object.
   {
     __ PushReturnAddressFrom(eax);
     __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
     __ mov(eax,
-           FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+           FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
     // We abuse new.target both to indicate that this is a resume call and to
     // pass in the generator object.  In ordinary calls, new.target is always
     // undefined because generator functions are non-constructable.
@@ -458,56 +458,13 @@
     __ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
   }
 
-  // Old-style (full-codegen) generator object
-  __ bind(&old_generator);
-  {
-    // Enter a new JavaScript frame, and initialize its slots as they were when
-    // the generator was suspended.
-    FrameScope scope(masm, StackFrame::MANUAL);
-    __ PushReturnAddressFrom(eax);  // Return address.
-    __ Push(ebp);                   // Caller's frame pointer.
-    __ Move(ebp, esp);
-    __ Push(esi);  // Callee's context.
-    __ Push(edi);  // Callee's JS Function.
-
-    // Restore the operand stack.
-    __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
-    {
-      Label done_loop, loop;
-      __ Move(ecx, Smi::kZero);
-      __ bind(&loop);
-      __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
-      __ j(equal, &done_loop, Label::kNear);
-      __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
-                           FixedArray::kHeaderSize));
-      __ add(ecx, Immediate(Smi::FromInt(1)));
-      __ jmp(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Reset operand stack so we don't leak.
-    __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
-           Immediate(masm->isolate()->factory()->empty_fixed_array()));
-
-    // Resume the generator function at the continuation.
-    __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
-    __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
-    __ SmiUntag(ecx);
-    __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
-    __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
-           Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
-    __ mov(eax, ebx);  // Continuation expects generator object in eax.
-    __ jmp(edx);
-  }
-
   __ bind(&prepare_step_in_if_stepping);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(ebx);
     __ Push(edx);
     __ Push(edi);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ CallRuntime(Runtime::kDebugOnFunctionCall);
     __ Pop(edx);
     __ Pop(ebx);
     __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
@@ -578,9 +535,8 @@
   // it is present) and load it into kInterpreterBytecodeArrayRegister.
   __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   Label load_debug_bytecode_array, bytecode_array_loaded;
-  __ cmp(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
-         Immediate(DebugInfo::uninitialized()));
-  __ j(not_equal, &load_debug_bytecode_array);
+  __ JumpIfNotSmi(FieldOperand(eax, SharedFunctionInfo::kDebugInfoOffset),
+                  &load_debug_bytecode_array);
   __ mov(kInterpreterBytecodeArrayRegister,
          FieldOperand(eax, SharedFunctionInfo::kFunctionDataOffset));
   __ bind(&bytecode_array_loaded);
@@ -592,11 +548,11 @@
   __ j(not_equal, &switch_to_different_code_kind);
 
   // Increment invocation count for the function.
-  __ EmitLoadTypeFeedbackVector(ecx);
-  __ add(FieldOperand(ecx,
-                      TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                          TypeFeedbackVector::kHeaderSize),
-         Immediate(Smi::FromInt(1)));
+  __ EmitLoadFeedbackVector(ecx);
+  __ add(
+      FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                            FeedbackVector::kHeaderSize),
+      Immediate(Smi::FromInt(1)));
 
   // Check function data field is actually a BytecodeArray object.
   if (FLAG_debug_code) {
@@ -606,6 +562,11 @@
     __ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
   }
 
+  // Reset code age.
+  __ mov_b(FieldOperand(kInterpreterBytecodeArrayRegister,
+                        BytecodeArray::kBytecodeAgeOffset),
+           Immediate(BytecodeArray::kNoAgeBytecodeAge));
+
   // Push bytecode array.
   __ push(kInterpreterBytecodeArrayRegister);
   // Push Smi tagged initial bytecode array offset.
@@ -733,7 +694,7 @@
 // static
 void Builtins::Generate_InterpreterPushArgsAndCallImpl(
     MacroAssembler* masm, TailCallMode tail_call_mode,
-    CallableType function_type) {
+    InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- ebx : the address of the first argument to be pushed. Subsequent
@@ -765,12 +726,14 @@
   // Call the target.
   __ Push(edx);  // Re-push return address.
 
-  if (function_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     __ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
                                                       tail_call_mode),
             RelocInfo::CODE_TARGET);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    __ Jump(masm->isolate()->builtins()->CallWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(function_type, CallableType::kAny);
     __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
                                               tail_call_mode),
             RelocInfo::CODE_TARGET);
@@ -883,7 +846,7 @@
 
 // static
 void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
-    MacroAssembler* masm, CallableType construct_type) {
+    MacroAssembler* masm, InterpreterPushArgsMode mode) {
   // ----------- S t a t e -------------
   //  -- eax : the number of arguments (not including the receiver)
   //  -- edx : the new target
@@ -909,7 +872,7 @@
   __ Pop(edi);
 
   __ AssertUndefinedOrAllocationSite(ebx);
-  if (construct_type == CallableType::kJSFunction) {
+  if (mode == InterpreterPushArgsMode::kJSFunction) {
     // Tail call to the function-specific construct stub (still in the caller
     // context at this point).
     __ AssertFunction(edi);
@@ -918,9 +881,12 @@
     __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
     __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
     __ jmp(ecx);
+  } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+    // Call the constructor with unmodified eax, edi, edx values.
+    __ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
+            RelocInfo::CODE_TARGET);
   } else {
-    DCHECK_EQ(construct_type, CallableType::kAny);
-
+    DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
     // Call the constructor with unmodified eax, edi, edx values.
     __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
   }
@@ -1063,6 +1029,12 @@
   Register new_target = edx;
   Register argument_count = eax;
 
+  // Do we have a valid feedback vector?
+  __ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
+  __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
+  __ cmp(ebx, masm->isolate()->factory()->undefined_value());
+  __ j(equal, &gotta_call_runtime_no_stack);
+
   __ push(argument_count);
   __ push(new_target);
   __ push(closure);
@@ -1073,9 +1045,8 @@
   __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
   __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
   __ cmp(index, Immediate(Smi::FromInt(2)));
-  __ j(less, &gotta_call_runtime);
+  __ j(less, &try_shared);
 
-  // Find literals.
   // edx : native context
   // ebx : length / index
   // eax : optimized code map
@@ -1093,26 +1064,6 @@
   __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
   __ cmp(temp, native_context);
   __ j(not_equal, &loop_bottom);
-  // OSR id set to none?
-  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
-                            SharedFunctionInfo::kOffsetToPreviousOsrAstId));
-  const int bailout_id = BailoutId::None().ToInt();
-  __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
-  __ j(not_equal, &loop_bottom);
-  // Literals available?
-  __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
-                            SharedFunctionInfo::kOffsetToPreviousLiterals));
-  __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
-
-  // Save the literals in the closure.
-  __ mov(ecx, Operand(esp, 0));
-  __ mov(FieldOperand(ecx, JSFunction::kLiteralsOffset), temp);
-  __ push(index);
-  __ RecordWriteField(ecx, JSFunction::kLiteralsOffset, temp, index,
-                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ pop(index);
-
   // Code available?
   Register entry = ecx;
   __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
@@ -1120,7 +1071,7 @@
   __ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
   __ JumpIfSmi(entry, &try_shared);
 
-  // Found literals and code. Get them into the closure and return.
+  // Found code. Get it into the closure and return.
   __ pop(closure);
   // Store code entry in the closure.
   __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
@@ -1154,7 +1105,7 @@
   __ cmp(index, Immediate(Smi::FromInt(1)));
   __ j(greater, &loop_top);
 
-  // We found neither literals nor code.
+  // We found no code.
   __ jmp(&gotta_call_runtime);
 
   __ bind(&try_shared);
@@ -1166,14 +1117,14 @@
   __ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
             Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
   __ j(not_zero, &gotta_call_runtime_no_stack);
-  // Is the full code valid?
+
+  // If SFI points to anything other than CompileLazy, install that.
   __ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
-  __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
-  __ and_(ebx, Code::KindField::kMask);
-  __ shr(ebx, Code::KindField::kShift);
-  __ cmp(ebx, Immediate(Code::BUILTIN));
+  __ Move(ebx, masm->CodeObject());
+  __ cmp(entry, ebx);
   __ j(equal, &gotta_call_runtime_no_stack);
-  // Yes, install the full code.
+
+  // Install the SFI's code entry.
   __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
   __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
   __ RecordWriteCodeEntryField(closure, entry, ebx);
@@ -1295,14 +1246,9 @@
   __ ret(0);
 }
 
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
-  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
-  }                                                           \
-  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
-      MacroAssembler* masm) {                                 \
-    GenerateMakeCodeYoungAgainCommon(masm);                   \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                              \
+  void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+    GenerateMakeCodeYoungAgainCommon(masm);                               \
   }
 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1671,14 +1617,14 @@
   __ bind(&target_not_constructor);
   {
     __ mov(Operand(esp, kPointerSize), edi);
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 
   // 4c. The new.target is not a constructor, throw an appropriate TypeError.
   __ bind(&new_target_not_constructor);
   {
     __ mov(Operand(esp, kPointerSize), edx);
-    __ TailCallRuntime(Runtime::kThrowCalledNonCallable);
+    __ TailCallRuntime(Runtime::kThrowNotConstructor);
   }
 }
 
@@ -1986,8 +1932,8 @@
     FrameScope scope(masm, StackFrame::MANUAL);
     __ EnterBuiltinFrame(esi, edi, ecx);
     __ Push(ebx);  // the first argument
-    FastNewObjectStub stub(masm->isolate());
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(FieldOperand(eax, JSValue::kValueOffset));
     __ LeaveBuiltinFrame(esi, edi, ecx);
   }
@@ -2149,8 +2095,8 @@
     __ SmiTag(ebx);
     __ EnterBuiltinFrame(esi, edi, ebx);
     __ Push(eax);  // the first argument
-    FastNewObjectStub stub(masm->isolate());
-    __ CallStub(&stub);
+    __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+            RelocInfo::CODE_TARGET);
     __ Pop(FieldOperand(eax, JSValue::kValueOffset));
     __ LeaveBuiltinFrame(esi, edi, ebx);
     __ SmiUntag(ebx);
@@ -2210,7 +2156,8 @@
 
   // Create the list of arguments from the array-like argumentsList.
   {
-    Label create_arguments, create_array, create_runtime, done_create;
+    Label create_arguments, create_array, create_holey_array, create_runtime,
+        done_create;
     __ JumpIfSmi(eax, &create_runtime);
 
     // Load the map of argumentsList into ecx.
@@ -2254,6 +2201,22 @@
     __ mov(eax, ecx);
     __ jmp(&done_create);
 
+    // For holey JSArrays we need to check that the array prototype chain
+    // protector is intact and our prototype is the Array.prototype actually.
+    __ bind(&create_holey_array);
+    __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+    __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+    __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+    __ j(not_equal, &create_runtime);
+    __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
+    __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
+           Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+    __ j(not_equal, &create_runtime);
+    __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+    __ SmiUntag(ebx);
+    __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+    __ jmp(&done_create);
+
     // Try to create the list from a JSArray object.
     __ bind(&create_array);
     __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
@@ -2261,10 +2224,12 @@
     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
     STATIC_ASSERT(FAST_ELEMENTS == 2);
-    __ cmp(ecx, Immediate(FAST_ELEMENTS));
-    __ j(above, &create_runtime);
+    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
     __ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
-    __ j(equal, &create_runtime);
+    __ j(equal, &create_holey_array, Label::kNear);
+    __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+    __ j(equal, &create_holey_array, Label::kNear);
+    __ j(above, &create_runtime);
     __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
     __ SmiUntag(ebx);
     __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
@@ -2303,26 +2268,38 @@
 
   // Push arguments onto the stack (thisArgument is already on the stack).
   {
+    // Save edx/edi to stX0/stX1.
     __ push(edx);
+    __ push(edi);
     __ fld_s(MemOperand(esp, 0));
-    __ lea(esp, Operand(esp, kFloatSize));
+    __ fld_s(MemOperand(esp, 4));
+    __ lea(esp, Operand(esp, 2 * kFloatSize));
 
     __ PopReturnAddressTo(edx);
     __ Move(ecx, Immediate(0));
-    Label done, loop;
+    Label done, push, loop;
     __ bind(&loop);
     __ cmp(ecx, ebx);
     __ j(equal, &done, Label::kNear);
-    __ Push(
-        FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+    // Turn the hole into undefined as we go.
+    __ mov(edi,
+           FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+    __ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
+    __ j(not_equal, &push, Label::kNear);
+    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(edi);
     __ inc(ecx);
     __ jmp(&loop);
     __ bind(&done);
     __ PushReturnAddressFrom(edx);
 
-    __ lea(esp, Operand(esp, -kFloatSize));
+    // Restore edx/edi from stX0/stX1.
+    __ lea(esp, Operand(esp, -2 * kFloatSize));
     __ fstp_s(MemOperand(esp, 0));
+    __ fstp_s(MemOperand(esp, 4));
     __ pop(edx);
+    __ pop(edi);
 
     __ Move(eax, ebx);
   }
@@ -2717,6 +2694,199 @@
   }
 }
 
+static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
+  // Free up some registers.
+  // Save edx/edi to stX0/stX1.
+  __ push(edx);
+  __ push(edi);
+  __ fld_s(MemOperand(esp, 0));
+  __ fld_s(MemOperand(esp, 4));
+  __ lea(esp, Operand(esp, 2 * kFloatSize));
+
+  Register argc = eax;
+
+  Register scratch = ecx;
+  Register scratch2 = edi;
+
+  Register spread = ebx;
+  Register spread_map = edx;
+
+  Register spread_len = edx;
+
+  Label runtime_call, push_args;
+  __ mov(spread, Operand(esp, kPointerSize));
+  __ JumpIfSmi(spread, &runtime_call);
+  __ mov(spread_map, FieldOperand(spread, HeapObject::kMapOffset));
+
+  // Check that the spread is an array.
+  __ CmpInstanceType(spread_map, JS_ARRAY_TYPE);
+  __ j(not_equal, &runtime_call);
+
+  // Check that we have the original ArrayPrototype.
+  __ mov(scratch, FieldOperand(spread_map, Map::kPrototypeOffset));
+  __ mov(scratch2, NativeContextOperand());
+  __ cmp(scratch,
+         ContextOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+  __ j(not_equal, &runtime_call);
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  __ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
+  __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+         Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+  __ j(not_equal, &runtime_call);
+
+  // Check that the map of the initial array iterator hasn't changed.
+  __ mov(scratch2, NativeContextOperand());
+  __ mov(scratch,
+         ContextOperand(scratch2,
+                        Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
+  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+  __ cmp(scratch,
+         ContextOperand(scratch2,
+                        Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
+  __ j(not_equal, &runtime_call);
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  Label no_protector_check;
+  __ mov(scratch, FieldOperand(spread_map, Map::kBitField2Offset));
+  __ DecodeField<Map::ElementsKindBits>(scratch);
+  __ cmp(scratch, Immediate(FAST_HOLEY_ELEMENTS));
+  __ j(above, &runtime_call);
+  // For non-FastHoley kinds, we can skip the protector check.
+  __ cmp(scratch, Immediate(FAST_SMI_ELEMENTS));
+  __ j(equal, &no_protector_check);
+  __ cmp(scratch, Immediate(FAST_ELEMENTS));
+  __ j(equal, &no_protector_check);
+  // Check the ArrayProtector cell.
+  __ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
+  __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+         Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+  __ j(not_equal, &runtime_call);
+
+  __ bind(&no_protector_check);
+  // Load the FixedArray backing store, but use the length from the array.
+  __ mov(spread_len, FieldOperand(spread, JSArray::kLengthOffset));
+  __ SmiUntag(spread_len);
+  __ mov(spread, FieldOperand(spread, JSArray::kElementsOffset));
+  __ jmp(&push_args);
+
+  __ bind(&runtime_call);
+  {
+    // Call the builtin for the result of the spread.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Need to save these on the stack.
+    // Restore edx/edi from stX0/stX1.
+    __ lea(esp, Operand(esp, -2 * kFloatSize));
+    __ fstp_s(MemOperand(esp, 0));
+    __ fstp_s(MemOperand(esp, 4));
+    __ pop(edx);
+    __ pop(edi);
+
+    __ Push(edi);
+    __ Push(edx);
+    __ SmiTag(argc);
+    __ Push(argc);
+    __ Push(spread);
+    __ CallRuntime(Runtime::kSpreadIterableFixed);
+    __ mov(spread, eax);
+    __ Pop(argc);
+    __ SmiUntag(argc);
+    __ Pop(edx);
+    __ Pop(edi);
+    // Free up some registers.
+    // Save edx/edi to stX0/stX1.
+    __ push(edx);
+    __ push(edi);
+    __ fld_s(MemOperand(esp, 0));
+    __ fld_s(MemOperand(esp, 4));
+    __ lea(esp, Operand(esp, 2 * kFloatSize));
+  }
+
+  {
+    // Calculate the new nargs including the result of the spread.
+    __ mov(spread_len, FieldOperand(spread, FixedArray::kLengthOffset));
+    __ SmiUntag(spread_len);
+
+    __ bind(&push_args);
+    // argc += spread_len - 1. Subtract 1 for the spread itself.
+    __ lea(argc, Operand(argc, spread_len, times_1, -1));
+  }
+
+  // Check for stack overflow.
+  {
+    // Check the stack for overflow. We are not trying to catch interruptions
+    // (i.e. debug break and preemption) here, so check the "real stack limit".
+    Label done;
+    __ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
+    // Make scratch the space we have left. The stack might already be
+    // overflowed here which will cause scratch to become negative.
+    __ neg(scratch);
+    __ add(scratch, esp);
+    __ sar(scratch, kPointerSizeLog2);
+    // Check if the arguments will overflow the stack.
+    __ cmp(scratch, spread_len);
+    __ j(greater, &done, Label::kNear);  // Signed comparison.
+    __ TailCallRuntime(Runtime::kThrowStackOverflow);
+    __ bind(&done);
+  }
+
+  // Put the evaluated spread onto the stack as additional arguments.
+  {
+    Register return_address = edi;
+    // Pop the return address and spread argument.
+    __ PopReturnAddressTo(return_address);
+    __ Pop(scratch);
+
+    Register scratch2 = esi;
+    // Save esi to stX0, edx/edi in stX1/stX2 now.
+    __ push(esi);
+    __ fld_s(MemOperand(esp, 0));
+    __ lea(esp, Operand(esp, 1 * kFloatSize));
+
+    __ mov(scratch, Immediate(0));
+    Label done, push, loop;
+    __ bind(&loop);
+    __ cmp(scratch, spread_len);
+    __ j(equal, &done, Label::kNear);
+    __ mov(scratch2, FieldOperand(spread, scratch, times_pointer_size,
+                                  FixedArray::kHeaderSize));
+    __ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
+    __ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
+    __ bind(&push);
+    __ Push(scratch2);
+    __ inc(scratch);
+    __ jmp(&loop);
+    __ bind(&done);
+    __ PushReturnAddressFrom(return_address);
+
+    // Now Restore esi from stX0, edx/edi from stX1/stX2.
+    __ lea(esp, Operand(esp, -3 * kFloatSize));
+    __ fstp_s(MemOperand(esp, 0));
+    __ fstp_s(MemOperand(esp, 4));
+    __ fstp_s(MemOperand(esp, 8));
+    __ pop(esi);
+    __ pop(edx);
+    __ pop(edi);
+  }
+}
+
+// static
+void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edi : the target to call (can be any Object)
+  // -----------------------------------
+
+  // CheckSpreadAndPushToStack will push edx to save it.
+  __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
+                                            TailCallMode::kDisallow),
+          RelocInfo::CODE_TARGET);
+}
+
 // static
 void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
   // ----------- S t a t e -------------
@@ -2840,6 +3010,19 @@
 }
 
 // static
+void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- eax : the number of arguments (not including the receiver)
+  //  -- edx : the new target (either the same as the constructor or
+  //           the JSFunction on which new was invoked initially)
+  //  -- edi : the constructor to call (can be any Object)
+  // -----------------------------------
+
+  CheckSpreadAndPushToStack(masm);
+  __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
+}
+
+// static
 void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- edx    : requested object size (untagged)
diff --git a/src/cancelable-task.cc b/src/cancelable-task.cc
index ea351f8..b0387f4 100644
--- a/src/cancelable-task.cc
+++ b/src/cancelable-task.cc
@@ -93,13 +93,36 @@
   }
 }
 
+CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbortAll() {
+  // Clean up all cancelable fore- and background tasks. Tasks are canceled on
+  // the way if possible, i.e., if they have not started yet.
+  base::LockGuard<base::Mutex> guard(&mutex_);
+
+  if (cancelable_tasks_.empty()) return kTaskRemoved;
+
+  for (auto it = cancelable_tasks_.begin(); it != cancelable_tasks_.end();) {
+    if (it->second->Cancel()) {
+      it = cancelable_tasks_.erase(it);
+    } else {
+      ++it;
+    }
+  }
+
+  return cancelable_tasks_.empty() ? kTaskAborted : kTaskRunning;
+}
 
 CancelableTask::CancelableTask(Isolate* isolate)
-    : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
+    : CancelableTask(isolate, isolate->cancelable_task_manager()) {}
 
+CancelableTask::CancelableTask(Isolate* isolate, CancelableTaskManager* manager)
+    : Cancelable(manager), isolate_(isolate) {}
 
 CancelableIdleTask::CancelableIdleTask(Isolate* isolate)
-    : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
+    : CancelableIdleTask(isolate, isolate->cancelable_task_manager()) {}
+
+CancelableIdleTask::CancelableIdleTask(Isolate* isolate,
+                                       CancelableTaskManager* manager)
+    : Cancelable(manager), isolate_(isolate) {}
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/cancelable-task.h b/src/cancelable-task.h
index 65f98e7..5b1a5f1 100644
--- a/src/cancelable-task.h
+++ b/src/cancelable-task.h
@@ -45,6 +45,17 @@
   // already running. This disallows subsequent Register calls.
   void CancelAndWait();
 
+  // Tries to cancel all remaining registered tasks. The return value indicates
+  // whether
+  //
+  // 1) No tasks were registered (kTaskRemoved), or
+  //
+  // 2) There is at least one remaining task that couldn't be cancelled
+  // (kTaskRunning), or
+  //
+  // 3) All registered tasks were cancelled (kTaskAborted).
+  TryAbortResult TryAbortAll();
+
  private:
   // Only called by {Cancelable} destructor. The task is done with executing,
   // but needs to be removed.
@@ -123,9 +134,11 @@
 
 
 // Multiple inheritance can be used because Task is a pure interface.
-class CancelableTask : public Cancelable, public Task {
+class V8_EXPORT_PRIVATE CancelableTask : public Cancelable,
+                                         NON_EXPORTED_BASE(public Task) {
  public:
   explicit CancelableTask(Isolate* isolate);
+  CancelableTask(Isolate* isolate, CancelableTaskManager* manager);
 
   // Task overrides.
   void Run() final {
@@ -148,6 +161,7 @@
 class CancelableIdleTask : public Cancelable, public IdleTask {
  public:
   explicit CancelableIdleTask(Isolate* isolate);
+  CancelableIdleTask(Isolate* isolate, CancelableTaskManager* manager);
 
   // IdleTask overrides.
   void Run(double deadline_in_seconds) final {
diff --git a/src/code-events.h b/src/code-events.h
index 94f7dbd..db43d88 100644
--- a/src/code-events.h
+++ b/src/code-events.h
@@ -90,7 +90,7 @@
   virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                                Name* name) = 0;
   virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
-                               SharedFunctionInfo* shared, Name* name) = 0;
+                               SharedFunctionInfo* shared, Name* source) = 0;
   virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
                                SharedFunctionInfo* shared, Name* source,
                                int line, int column) = 0;
diff --git a/src/code-factory.cc b/src/code-factory.cc
index 128c709..3ebfad0 100644
--- a/src/code-factory.cc
+++ b/src/code-factory.cc
@@ -6,6 +6,7 @@
 
 #include "src/bootstrapper.h"
 #include "src/ic/ic.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -22,9 +23,25 @@
 }  // namespace
 
 // static
+Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
+  CEntryStub stub(isolate, result_size);
+  return stub.GetCode();
+}
+
+// static
 Callable CodeFactory::LoadIC(Isolate* isolate) {
-  LoadICTrampolineStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->LoadICTrampoline(),
+                  LoadDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::LoadICProtoArray(Isolate* isolate,
+                                       bool throw_if_nonexistent) {
+  return Callable(
+      throw_if_nonexistent
+          ? isolate->builtins()->LoadICProtoArrayThrowIfNonexistent()
+          : isolate->builtins()->LoadICProtoArray(),
+      LoadICProtoArrayDescriptor(isolate));
 }
 
 // static
@@ -35,101 +52,106 @@
 
 // static
 Callable CodeFactory::LoadICInOptimizedCode(Isolate* isolate) {
-  LoadICStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->LoadIC(),
+                  LoadWithVectorDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
-  LoadGlobalICTrampolineStub stub(isolate, LoadGlobalICState(typeof_mode));
-  return make_callable(stub);
+  return Callable(
+      typeof_mode == NOT_INSIDE_TYPEOF
+          ? isolate->builtins()->LoadGlobalICTrampoline()
+          : isolate->builtins()->LoadGlobalICInsideTypeofTrampoline(),
+      LoadGlobalDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
                                                   TypeofMode typeof_mode) {
-  LoadGlobalICStub stub(isolate, LoadGlobalICState(typeof_mode));
-  return make_callable(stub);
+  return Callable(typeof_mode == NOT_INSIDE_TYPEOF
+                      ? isolate->builtins()->LoadGlobalIC()
+                      : isolate->builtins()->LoadGlobalICInsideTypeof(),
+                  LoadGlobalWithVectorDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
-  KeyedLoadICTrampolineTFStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->KeyedLoadICTrampoline(),
+                  LoadDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
-  KeyedLoadICTFStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::KeyedLoadIC_Megamorphic(Isolate* isolate) {
-  return Callable(isolate->builtins()->KeyedLoadIC_Megamorphic_TF(),
+  return Callable(isolate->builtins()->KeyedLoadIC(),
                   LoadWithVectorDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::CallIC(Isolate* isolate, ConvertReceiverMode mode,
                              TailCallMode tail_call_mode) {
-  CallICTrampolineStub stub(isolate, CallICState(mode, tail_call_mode));
+  CallICStub stub(isolate, mode, tail_call_mode);
   return make_callable(stub);
 }
 
 // static
-Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate,
-                                            ConvertReceiverMode mode,
-                                            TailCallMode tail_call_mode) {
-  CallICStub stub(isolate, CallICState(mode, tail_call_mode));
+Callable CodeFactory::CallICTrampoline(Isolate* isolate,
+                                       ConvertReceiverMode mode,
+                                       TailCallMode tail_call_mode) {
+  CallICTrampolineStub stub(isolate, mode, tail_call_mode);
   return make_callable(stub);
 }
 
 // static
 Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
-  StoreICTrampolineStub stub(isolate, StoreICState(language_mode));
-  return make_callable(stub);
+  return Callable(language_mode == STRICT
+                      ? isolate->builtins()->StoreICStrictTrampoline()
+                      : isolate->builtins()->StoreICTrampoline(),
+                  StoreDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
                                              LanguageMode language_mode) {
-  StoreICStub stub(isolate, StoreICState(language_mode));
-  return make_callable(stub);
+  return Callable(language_mode == STRICT ? isolate->builtins()->StoreICStrict()
+                                          : isolate->builtins()->StoreIC(),
+                  StoreWithVectorDescriptor(isolate));
+}
+
+Callable CodeFactory::StoreOwnIC(Isolate* isolate) {
+  // TODO(ishell): Currently we use StoreOwnIC only for storing properties that
+  // already exist in the boilerplate therefore we can use StoreIC.
+  return Callable(isolate->builtins()->StoreICStrictTrampoline(),
+                  StoreDescriptor(isolate));
+}
+
+Callable CodeFactory::StoreOwnICInOptimizedCode(Isolate* isolate) {
+  // TODO(ishell): Currently we use StoreOwnIC only for storing properties that
+  // already exist in the boilerplate therefore we can use StoreIC.
+  return Callable(isolate->builtins()->StoreICStrict(),
+                  StoreWithVectorDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
                                    LanguageMode language_mode) {
-  if (FLAG_tf_store_ic_stub) {
-    KeyedStoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
-    return make_callable(stub);
-  }
-  KeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
-  return make_callable(stub);
+  return Callable(language_mode == STRICT
+                      ? isolate->builtins()->KeyedStoreICStrictTrampoline()
+                      : isolate->builtins()->KeyedStoreICTrampoline(),
+                  StoreDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::KeyedStoreICInOptimizedCode(Isolate* isolate,
                                                   LanguageMode language_mode) {
-  if (FLAG_tf_store_ic_stub) {
-    KeyedStoreICTFStub stub(isolate, StoreICState(language_mode));
-    return make_callable(stub);
-  }
-  KeyedStoreICStub stub(isolate, StoreICState(language_mode));
-  return make_callable(stub);
+  return Callable(language_mode == STRICT
+                      ? isolate->builtins()->KeyedStoreICStrict()
+                      : isolate->builtins()->KeyedStoreIC(),
+                  StoreWithVectorDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::KeyedStoreIC_Megamorphic(Isolate* isolate,
                                                LanguageMode language_mode) {
-  if (FLAG_tf_store_ic_stub) {
-    return Callable(
-        language_mode == STRICT
-            ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict_TF()
-            : isolate->builtins()->KeyedStoreIC_Megamorphic_TF(),
-        StoreWithVectorDescriptor(isolate));
-  }
   return Callable(language_mode == STRICT
                       ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
                       : isolate->builtins()->KeyedStoreIC_Megamorphic(),
@@ -155,36 +177,6 @@
 }
 
 // static
-Callable CodeFactory::ToBoolean(Isolate* isolate) {
-  return Callable(isolate->builtins()->ToBoolean(),
-                  TypeConversionDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ToNumber(Isolate* isolate) {
-  return Callable(isolate->builtins()->ToNumber(),
-                  TypeConversionDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::NonNumberToNumber(Isolate* isolate) {
-  return Callable(isolate->builtins()->NonNumberToNumber(),
-                  TypeConversionDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringToNumber(Isolate* isolate) {
-  return Callable(isolate->builtins()->StringToNumber(),
-                  TypeConversionDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ToName(Isolate* isolate) {
-  return Callable(isolate->builtins()->ToName(),
-                  TypeConversionDescriptor(isolate));
-}
-
-// static
 Callable CodeFactory::NonPrimitiveToPrimitive(Isolate* isolate,
                                               ToPrimitiveHint hint) {
   return Callable(isolate->builtins()->NonPrimitiveToPrimitive(hint),
@@ -216,7 +208,7 @@
   return Callable(code, BuiltinDescriptor(isolate));
 }
 
-#define DECLARE_TFS(Name, Kind, Extra, InterfaceDescriptor) \
+#define DECLARE_TFS(Name, Kind, Extra, InterfaceDescriptor, result_size) \
   typedef InterfaceDescriptor##Descriptor Name##Descriptor;
 BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TFS,
              IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
@@ -248,30 +240,46 @@
 TFS_BUILTIN(NotEqual)
 TFS_BUILTIN(StrictEqual)
 TFS_BUILTIN(StrictNotEqual)
+TFS_BUILTIN(CreateIterResultObject)
 TFS_BUILTIN(HasProperty)
+TFS_BUILTIN(NonNumberToNumber)
+TFS_BUILTIN(StringToNumber)
+TFS_BUILTIN(ToBoolean)
 TFS_BUILTIN(ToInteger)
 TFS_BUILTIN(ToLength)
+TFS_BUILTIN(ToName)
+TFS_BUILTIN(ToNumber)
 TFS_BUILTIN(ToObject)
+TFS_BUILTIN(ClassOf)
 TFS_BUILTIN(Typeof)
 TFS_BUILTIN(InstanceOf)
 TFS_BUILTIN(OrdinaryHasInstance)
+TFS_BUILTIN(CopyFastSmiOrObjectElements)
+TFS_BUILTIN(GrowFastDoubleElements)
+TFS_BUILTIN(GrowFastSmiOrObjectElements)
+TFS_BUILTIN(NewUnmappedArgumentsElements)
+TFS_BUILTIN(NewRestParameterElements)
+TFS_BUILTIN(FastCloneRegExp)
+TFS_BUILTIN(FastNewClosure)
+TFS_BUILTIN(FastNewObject)
 TFS_BUILTIN(ForInFilter)
+TFS_BUILTIN(GetSuperConstructor)
+TFS_BUILTIN(KeyedLoadIC_Megamorphic)
+TFS_BUILTIN(PromiseHandleReject)
+TFS_BUILTIN(RegExpReplace)
+TFS_BUILTIN(RegExpSplit)
+TFS_BUILTIN(StringCharAt)
+TFS_BUILTIN(StringCharCodeAt)
+TFS_BUILTIN(StringEqual)
+TFS_BUILTIN(StringNotEqual)
+TFS_BUILTIN(StringLessThan)
+TFS_BUILTIN(StringLessThanOrEqual)
+TFS_BUILTIN(StringGreaterThan)
+TFS_BUILTIN(StringGreaterThanOrEqual)
 
 #undef TFS_BUILTIN
 
 // static
-Callable CodeFactory::Inc(Isolate* isolate) {
-  IncStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::Dec(Isolate* isolate) {
-  DecStub stub(isolate);
-  return make_callable(stub);
-}
-
-// static
 Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
                                 PretenureFlag pretenure_flag) {
   StringAddStub stub(isolate, flags, pretenure_flag);
@@ -303,39 +311,9 @@
 }
 
 // static
-Callable CodeFactory::StringEqual(Isolate* isolate) {
-  return Callable(isolate->builtins()->StringEqual(),
-                  CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringNotEqual(Isolate* isolate) {
-  return Callable(isolate->builtins()->StringNotEqual(),
-                  CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringLessThan(Isolate* isolate) {
-  return Callable(isolate->builtins()->StringLessThan(),
-                  CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringLessThanOrEqual(Isolate* isolate) {
-  return Callable(isolate->builtins()->StringLessThanOrEqual(),
-                  CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringGreaterThan(Isolate* isolate) {
-  return Callable(isolate->builtins()->StringGreaterThan(),
-                  CompareDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::StringGreaterThanOrEqual(Isolate* isolate) {
-  return Callable(isolate->builtins()->StringGreaterThanOrEqual(),
-                  CompareDescriptor(isolate));
+Callable CodeFactory::StringIndexOf(Isolate* isolate) {
+  return Callable(isolate->builtins()->StringIndexOf(),
+                  StringIndexOfDescriptor(isolate));
 }
 
 // static
@@ -351,80 +329,65 @@
 }
 
 // static
-Callable CodeFactory::FastCloneRegExp(Isolate* isolate) {
-  FastCloneRegExpStub stub(isolate);
-  return make_callable(stub);
+Callable CodeFactory::FrameDropperTrampoline(Isolate* isolate) {
+  return Callable(isolate->builtins()->FrameDropperTrampoline(),
+                  FrameDropperTrampolineDescriptor(isolate));
 }
 
 // static
-Callable CodeFactory::FastCloneShallowArray(Isolate* isolate) {
-  // TODO(mstarzinger): Thread through AllocationSiteMode at some point.
-  FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE);
-  return make_callable(stub);
+Callable CodeFactory::HandleDebuggerStatement(Isolate* isolate) {
+  return Callable(isolate->builtins()->HandleDebuggerStatement(),
+                  ContextOnlyDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::FastCloneShallowArray(
+    Isolate* isolate, AllocationSiteMode allocation_mode) {
+  return Callable(isolate->builtins()->NewCloneShallowArray(allocation_mode),
+                  FastCloneShallowArrayDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::FastCloneShallowObject(Isolate* isolate, int length) {
-  FastCloneShallowObjectStub stub(isolate, length);
-  return make_callable(stub);
-}
-
-
-// static
-Callable CodeFactory::FastNewFunctionContext(Isolate* isolate) {
-  FastNewFunctionContextStub stub(isolate);
-  return make_callable(stub);
+  return Callable(isolate->builtins()->NewCloneShallowObject(length),
+                  FastCloneShallowObjectDescriptor(isolate));
 }
 
 // static
-Callable CodeFactory::FastNewClosure(Isolate* isolate) {
-  FastNewClosureStub stub(isolate);
-  return make_callable(stub);
+Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
+                                             ScopeType scope_type) {
+  return Callable(isolate->builtins()->NewFunctionContext(scope_type),
+                  FastNewFunctionContextDescriptor(isolate));
 }
 
 // static
-Callable CodeFactory::FastNewObject(Isolate* isolate) {
-  FastNewObjectStub stub(isolate);
-  return make_callable(stub);
+Callable CodeFactory::FastNewRestParameter(Isolate* isolate) {
+  return Callable(isolate->builtins()->FastNewRestParameter(),
+                  FastNewRestParameterDescriptor(isolate));
 }
 
 // static
-Callable CodeFactory::FastNewRestParameter(Isolate* isolate,
-                                           bool skip_stub_frame) {
-  FastNewRestParameterStub stub(isolate, skip_stub_frame);
-  return make_callable(stub);
+Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate) {
+  return Callable(isolate->builtins()->FastNewSloppyArguments(),
+                  FastNewRestParameterDescriptor(isolate));
 }
 
 // static
-Callable CodeFactory::FastNewSloppyArguments(Isolate* isolate,
-                                             bool skip_stub_frame) {
-  FastNewSloppyArgumentsStub stub(isolate, skip_stub_frame);
-  return make_callable(stub);
+Callable CodeFactory::FastNewStrictArguments(Isolate* isolate) {
+  return Callable(isolate->builtins()->FastNewStrictArguments(),
+                  FastNewRestParameterDescriptor(isolate));
 }
 
 // static
-Callable CodeFactory::FastNewStrictArguments(Isolate* isolate,
-                                             bool skip_stub_frame) {
-  FastNewStrictArgumentsStub stub(isolate, skip_stub_frame);
-  return make_callable(stub);
+Callable CodeFactory::ForInPrepare(Isolate* isolate) {
+  return Callable(isolate->builtins()->ForInPrepare(),
+                  ForInPrepareDescriptor(isolate));
 }
 
 // static
-Callable CodeFactory::CopyFastSmiOrObjectElements(Isolate* isolate) {
-  return Callable(isolate->builtins()->CopyFastSmiOrObjectElements(),
-                  CopyFastSmiOrObjectElementsDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::GrowFastDoubleElements(Isolate* isolate) {
-  return Callable(isolate->builtins()->GrowFastDoubleElements(),
-                  GrowArrayElementsDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::GrowFastSmiOrObjectElements(Isolate* isolate) {
-  return Callable(isolate->builtins()->GrowFastSmiOrObjectElements(),
-                  GrowArrayElementsDescriptor(isolate));
+Callable CodeFactory::ForInNext(Isolate* isolate) {
+  return Callable(isolate->builtins()->ForInNext(),
+                  ForInNextDescriptor(isolate));
 }
 
 // static
@@ -433,14 +396,6 @@
   return make_callable(stub);
 }
 
-#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
-  Callable CodeFactory::Allocate##Type(Isolate* isolate) {     \
-    Allocate##Type##Stub stub(isolate);                        \
-    return make_callable(stub);                                \
-  }
-SIMD128_TYPES(SIMD128_ALLOC)
-#undef SIMD128_ALLOC
-
 // static
 Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
   return Callable(isolate->builtins()->ArgumentsAdaptorTrampoline(),
@@ -455,18 +410,43 @@
 }
 
 // static
-Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode) {
-  return Callable(isolate->builtins()->CallFunction(mode),
+Callable CodeFactory::CallWithSpread(Isolate* isolate) {
+  return Callable(isolate->builtins()->CallWithSpread(),
                   CallTrampolineDescriptor(isolate));
 }
 
 // static
+Callable CodeFactory::CallFunction(Isolate* isolate, ConvertReceiverMode mode,
+                                   TailCallMode tail_call_mode) {
+  return Callable(isolate->builtins()->CallFunction(mode, tail_call_mode),
+                  CallTrampolineDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::CallForwardVarargs(Isolate* isolate) {
+  return Callable(isolate->builtins()->CallForwardVarargs(),
+                  CallForwardVarargsDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::CallFunctionForwardVarargs(Isolate* isolate) {
+  return Callable(isolate->builtins()->CallFunctionForwardVarargs(),
+                  CallForwardVarargsDescriptor(isolate));
+}
+
+// static
 Callable CodeFactory::Construct(Isolate* isolate) {
   return Callable(isolate->builtins()->Construct(),
                   ConstructTrampolineDescriptor(isolate));
 }
 
 // static
+Callable CodeFactory::ConstructWithSpread(Isolate* isolate) {
+  return Callable(isolate->builtins()->ConstructWithSpread(),
+                  ConstructTrampolineDescriptor(isolate));
+}
+
+// static
 Callable CodeFactory::ConstructFunction(Isolate* isolate) {
   return Callable(isolate->builtins()->ConstructFunction(),
                   ConstructTrampolineDescriptor(isolate));
@@ -475,18 +455,17 @@
 // static
 Callable CodeFactory::InterpreterPushArgsAndCall(Isolate* isolate,
                                                  TailCallMode tail_call_mode,
-                                                 CallableType function_type) {
-  return Callable(isolate->builtins()->InterpreterPushArgsAndCall(
-                      tail_call_mode, function_type),
-                  InterpreterPushArgsAndCallDescriptor(isolate));
+                                                 InterpreterPushArgsMode mode) {
+  return Callable(
+      isolate->builtins()->InterpreterPushArgsAndCall(tail_call_mode, mode),
+      InterpreterPushArgsAndCallDescriptor(isolate));
 }
 
 // static
 Callable CodeFactory::InterpreterPushArgsAndConstruct(
-    Isolate* isolate, CallableType function_type) {
-  return Callable(
-      isolate->builtins()->InterpreterPushArgsAndConstruct(function_type),
-      InterpreterPushArgsAndConstructDescriptor(isolate));
+    Isolate* isolate, InterpreterPushArgsMode mode) {
+  return Callable(isolate->builtins()->InterpreterPushArgsAndConstruct(mode),
+                  InterpreterPushArgsAndConstructDescriptor(isolate));
 }
 
 // static
@@ -509,5 +488,22 @@
                   ContextOnlyDescriptor(isolate));
 }
 
+// static
+Callable CodeFactory::ArrayConstructor(Isolate* isolate) {
+  ArrayConstructorStub stub(isolate);
+  return make_callable(stub);
+}
+
+// static
+Callable CodeFactory::ArrayPush(Isolate* isolate) {
+  return Callable(isolate->builtins()->ArrayPush(), BuiltinDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::FunctionPrototypeBind(Isolate* isolate) {
+  return Callable(isolate->builtins()->FunctionPrototypeBind(),
+                  BuiltinDescriptor(isolate));
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/code-factory.h b/src/code-factory.h
index 033e5d5..d50c7f4 100644
--- a/src/code-factory.h
+++ b/src/code-factory.h
@@ -30,9 +30,16 @@
 
 class V8_EXPORT_PRIVATE CodeFactory final {
  public:
+  // CEntryStub has var-args semantics (all the arguments are passed on the
+  // stack and the arguments count is passed via register) which currently
+  // can't be expressed in CallInterfaceDescriptor. Therefore only the code
+  // is exported here.
+  static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
+
   // Initial states for ICs.
   static Callable LoadIC(Isolate* isolate);
   static Callable LoadICInOptimizedCode(Isolate* isolate);
+  static Callable LoadICProtoArray(Isolate* isolate, bool throw_if_nonexistent);
   static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
   static Callable LoadGlobalICInOptimizedCode(Isolate* isolate,
                                               TypeofMode typeof_mode);
@@ -42,11 +49,13 @@
   static Callable CallIC(Isolate* isolate,
                          ConvertReceiverMode mode = ConvertReceiverMode::kAny,
                          TailCallMode tail_call_mode = TailCallMode::kDisallow);
-  static Callable CallICInOptimizedCode(
+  static Callable CallICTrampoline(
       Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
   static Callable StoreIC(Isolate* isolate, LanguageMode mode);
   static Callable StoreICInOptimizedCode(Isolate* isolate, LanguageMode mode);
+  static Callable StoreOwnIC(Isolate* isolate);
+  static Callable StoreOwnICInOptimizedCode(Isolate* isolate);
   static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
   static Callable KeyedStoreICInOptimizedCode(Isolate* isolate,
                                               LanguageMode mode);
@@ -54,6 +63,9 @@
 
   static Callable ResumeGenerator(Isolate* isolate);
 
+  static Callable FrameDropperTrampoline(Isolate* isolate);
+  static Callable HandleDebuggerStatement(Isolate* isolate);
+
   static Callable CompareIC(Isolate* isolate, Token::Value op);
   static Callable CompareNilIC(Isolate* isolate, NilValue nil_value);
 
@@ -99,8 +111,6 @@
   static Callable BitwiseAnd(Isolate* isolate);
   static Callable BitwiseOr(Isolate* isolate);
   static Callable BitwiseXor(Isolate* isolate);
-  static Callable Inc(Isolate* isolate);
-  static Callable Dec(Isolate* isolate);
   static Callable LessThan(Isolate* isolate);
   static Callable LessThanOrEqual(Isolate* isolate);
   static Callable GreaterThan(Isolate* isolate);
@@ -112,6 +122,8 @@
 
   static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
                             PretenureFlag pretenure_flag);
+  static Callable StringCharAt(Isolate* isolate);
+  static Callable StringCharCodeAt(Isolate* isolate);
   static Callable StringCompare(Isolate* isolate, Token::Value token);
   static Callable StringEqual(Isolate* isolate);
   static Callable StringNotEqual(Isolate* isolate);
@@ -120,52 +132,70 @@
   static Callable StringGreaterThan(Isolate* isolate);
   static Callable StringGreaterThanOrEqual(Isolate* isolate);
   static Callable SubString(Isolate* isolate);
+  static Callable StringIndexOf(Isolate* isolate);
 
+  static Callable RegExpReplace(Isolate* isolate);
+  static Callable RegExpSplit(Isolate* isolate);
+
+  static Callable ClassOf(Isolate* isolate);
   static Callable Typeof(Isolate* isolate);
+  static Callable GetSuperConstructor(Isolate* isolate);
 
   static Callable FastCloneRegExp(Isolate* isolate);
-  static Callable FastCloneShallowArray(Isolate* isolate);
+  static Callable FastCloneShallowArray(Isolate* isolate,
+                                        AllocationSiteMode allocation_mode);
   static Callable FastCloneShallowObject(Isolate* isolate, int length);
 
-  static Callable FastNewFunctionContext(Isolate* isolate);
+  static Callable FastNewFunctionContext(Isolate* isolate,
+                                         ScopeType scope_type);
   static Callable FastNewClosure(Isolate* isolate);
   static Callable FastNewObject(Isolate* isolate);
-  static Callable FastNewRestParameter(Isolate* isolate,
-                                       bool skip_stub_frame = false);
-  static Callable FastNewSloppyArguments(Isolate* isolate,
-                                         bool skip_stub_frame = false);
-  static Callable FastNewStrictArguments(Isolate* isolate,
-                                         bool skip_stub_frame = false);
+  static Callable FastNewRestParameter(Isolate* isolate);
+  static Callable FastNewSloppyArguments(Isolate* isolate);
+  static Callable FastNewStrictArguments(Isolate* isolate);
+
+  static Callable ForInPrepare(Isolate* isolate);
+  static Callable ForInNext(Isolate* isolate);
 
   static Callable CopyFastSmiOrObjectElements(Isolate* isolate);
   static Callable GrowFastDoubleElements(Isolate* isolate);
   static Callable GrowFastSmiOrObjectElements(Isolate* isolate);
 
+  static Callable NewUnmappedArgumentsElements(Isolate* isolate);
+  static Callable NewRestParameterElements(Isolate* isolate);
+
   static Callable AllocateHeapNumber(Isolate* isolate);
-#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
-  static Callable Allocate##Type(Isolate* isolate);
-  SIMD128_TYPES(SIMD128_ALLOC)
-#undef SIMD128_ALLOC
 
   static Callable ArgumentAdaptor(Isolate* isolate);
   static Callable Call(Isolate* isolate,
                        ConvertReceiverMode mode = ConvertReceiverMode::kAny,
                        TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  static Callable CallWithSpread(Isolate* isolate);
   static Callable CallFunction(
-      Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
+      Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny,
+      TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  static Callable CallForwardVarargs(Isolate* isolate);
+  static Callable CallFunctionForwardVarargs(Isolate* isolate);
   static Callable Construct(Isolate* isolate);
+  static Callable ConstructWithSpread(Isolate* isolate);
   static Callable ConstructFunction(Isolate* isolate);
+  static Callable CreateIterResultObject(Isolate* isolate);
   static Callable HasProperty(Isolate* isolate);
   static Callable ForInFilter(Isolate* isolate);
 
-  static Callable InterpreterPushArgsAndCall(
-      Isolate* isolate, TailCallMode tail_call_mode,
-      CallableType function_type = CallableType::kAny);
-  static Callable InterpreterPushArgsAndConstruct(
-      Isolate* isolate, CallableType function_type = CallableType::kAny);
+  static Callable InterpreterPushArgsAndCall(Isolate* isolate,
+                                             TailCallMode tail_call_mode,
+                                             InterpreterPushArgsMode mode);
+  static Callable InterpreterPushArgsAndConstruct(Isolate* isolate,
+                                                  InterpreterPushArgsMode mode);
   static Callable InterpreterPushArgsAndConstructArray(Isolate* isolate);
   static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
   static Callable InterpreterOnStackReplacement(Isolate* isolate);
+
+  static Callable ArrayConstructor(Isolate* isolate);
+  static Callable ArrayPush(Isolate* isolate);
+  static Callable FunctionPrototypeBind(Isolate* isolate);
+  static Callable PromiseHandleReject(Isolate* isolate);
 };
 
 }  // namespace internal
diff --git a/src/code-stub-assembler.cc b/src/code-stub-assembler.cc
index b1ed2f1..e1ab040 100644
--- a/src/code-stub-assembler.cc
+++ b/src/code-stub-assembler.cc
@@ -5,59 +5,142 @@
 #include "src/code-factory.h"
 #include "src/frames-inl.h"
 #include "src/frames.h"
-#include "src/ic/handler-configuration.h"
-#include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 using compiler::Node;
 
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
-                                     const CallInterfaceDescriptor& descriptor,
-                                     Code::Flags flags, const char* name,
-                                     size_t result_size)
-    : compiler::CodeAssembler(isolate, zone, descriptor, flags, name,
-                              result_size) {}
+CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
+    : compiler::CodeAssembler(state) {
+  if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) {
+    HandleBreakOnNode();
+  }
+}
 
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
-                                     int parameter_count, Code::Flags flags,
-                                     const char* name)
-    : compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
+void CodeStubAssembler::HandleBreakOnNode() {
+  // FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a
+  // string specifying the name of a stub and NODE is number specifying node id.
+  const char* name = state()->name();
+  size_t name_length = strlen(name);
+  if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) {
+    // Different name.
+    return;
+  }
+  size_t option_length = strlen(FLAG_csa_trap_on_node);
+  if (option_length < name_length + 2 ||
+      FLAG_csa_trap_on_node[name_length] != ',') {
+    // Option is too short.
+    return;
+  }
+  const char* start = &FLAG_csa_trap_on_node[name_length + 1];
+  char* end;
+  int node_id = static_cast<int>(strtol(start, &end, 10));
+  if (start == end) {
+    // Bad node id.
+    return;
+  }
+  BreakOnNode(node_id);
+}
 
-void CodeStubAssembler::Assert(ConditionBody codition_body, const char* message,
-                               const char* file, int line) {
+void CodeStubAssembler::Assert(const NodeGenerator& codition_body,
+                               const char* message, const char* file,
+                               int line) {
 #if defined(DEBUG)
-  Label ok(this);
-  Label not_ok(this, Label::kDeferred);
-  if (message != nullptr && FLAG_code_comments) {
-    Comment("[ Assert: %s", message);
-  } else {
-    Comment("[ Assert");
-  }
-  Node* condition = codition_body();
-  DCHECK_NOT_NULL(condition);
-  Branch(condition, &ok, &not_ok);
-  Bind(&not_ok);
-  if (message != nullptr) {
-    char chars[1024];
-    Vector<char> buffer(chars);
-    if (file != nullptr) {
-      SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
+  if (FLAG_debug_code) {
+    Label ok(this);
+    Label not_ok(this, Label::kDeferred);
+    if (message != nullptr && FLAG_code_comments) {
+      Comment("[ Assert: %s", message);
     } else {
-      SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
+      Comment("[ Assert");
     }
-    CallRuntime(
-        Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
-        HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
+    Node* condition = codition_body();
+    DCHECK_NOT_NULL(condition);
+    Branch(condition, &ok, &not_ok);
+    Bind(&not_ok);
+    if (message != nullptr) {
+      char chars[1024];
+      Vector<char> buffer(chars);
+      if (file != nullptr) {
+        SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file,
+                 line);
+      } else {
+        SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
+      }
+      CallRuntime(
+          Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
+          HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
+    }
+    DebugBreak();
+    Goto(&ok);
+    Bind(&ok);
+    Comment("] Assert");
   }
-  DebugBreak();
-  Goto(&ok);
-  Bind(&ok);
-  Comment("] Assert");
 #endif
 }
 
+Node* CodeStubAssembler::Select(Node* condition, const NodeGenerator& true_body,
+                                const NodeGenerator& false_body,
+                                MachineRepresentation rep) {
+  Variable value(this, rep);
+  Label vtrue(this), vfalse(this), end(this);
+  Branch(condition, &vtrue, &vfalse);
+
+  Bind(&vtrue);
+  {
+    value.Bind(true_body());
+    Goto(&end);
+  }
+  Bind(&vfalse);
+  {
+    value.Bind(false_body());
+    Goto(&end);
+  }
+
+  Bind(&end);
+  return value.value();
+}
+
+Node* CodeStubAssembler::SelectConstant(Node* condition, Node* true_value,
+                                        Node* false_value,
+                                        MachineRepresentation rep) {
+  return Select(condition, [=] { return true_value; },
+                [=] { return false_value; }, rep);
+}
+
+Node* CodeStubAssembler::SelectInt32Constant(Node* condition, int true_value,
+                                             int false_value) {
+  return SelectConstant(condition, Int32Constant(true_value),
+                        Int32Constant(false_value),
+                        MachineRepresentation::kWord32);
+}
+
+Node* CodeStubAssembler::SelectIntPtrConstant(Node* condition, int true_value,
+                                              int false_value) {
+  return SelectConstant(condition, IntPtrConstant(true_value),
+                        IntPtrConstant(false_value),
+                        MachineType::PointerRepresentation());
+}
+
+Node* CodeStubAssembler::SelectBooleanConstant(Node* condition) {
+  return SelectConstant(condition, TrueConstant(), FalseConstant(),
+                        MachineRepresentation::kTagged);
+}
+
+Node* CodeStubAssembler::SelectTaggedConstant(Node* condition, Node* true_value,
+                                              Node* false_value) {
+  return SelectConstant(condition, true_value, false_value,
+                        MachineRepresentation::kTagged);
+}
+
+Node* CodeStubAssembler::SelectSmiConstant(Node* condition, Smi* true_value,
+                                           Smi* false_value) {
+  return SelectConstant(condition, SmiConstant(true_value),
+                        SmiConstant(false_value),
+                        MachineRepresentation::kTaggedSigned);
+}
+
 Node* CodeStubAssembler::NoContextConstant() { return NumberConstant(0); }
 
 #define HEAP_CONSTANT_ACCESSOR(rootName, name)     \
@@ -86,46 +169,19 @@
   if (mode == SMI_PARAMETERS) {
     return SmiConstant(Smi::FromInt(value));
   } else {
-    DCHECK(mode == INTEGER_PARAMETERS || mode == INTPTR_PARAMETERS);
+    DCHECK_EQ(INTPTR_PARAMETERS, mode);
     return IntPtrConstant(value);
   }
 }
 
-Node* CodeStubAssembler::IntPtrAddFoldConstants(Node* left, Node* right) {
-  int32_t left_constant;
-  bool is_left_constant = ToInt32Constant(left, left_constant);
-  int32_t right_constant;
-  bool is_right_constant = ToInt32Constant(right, right_constant);
-  if (is_left_constant) {
-    if (is_right_constant) {
-      return IntPtrConstant(left_constant + right_constant);
-    }
-    if (left_constant == 0) {
-      return right;
-    }
-  } else if (is_right_constant) {
-    if (right_constant == 0) {
-      return left;
-    }
+bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test) {
+  int32_t constant_test;
+  Smi* smi_test;
+  if ((ToInt32Constant(test, constant_test) && constant_test == 0) ||
+      (ToSmiConstant(test, smi_test) && smi_test->value() == 0)) {
+    return true;
   }
-  return IntPtrAdd(left, right);
-}
-
-Node* CodeStubAssembler::IntPtrSubFoldConstants(Node* left, Node* right) {
-  int32_t left_constant;
-  bool is_left_constant = ToInt32Constant(left, left_constant);
-  int32_t right_constant;
-  bool is_right_constant = ToInt32Constant(right, right_constant);
-  if (is_left_constant) {
-    if (is_right_constant) {
-      return IntPtrConstant(left_constant - right_constant);
-    }
-  } else if (is_right_constant) {
-    if (right_constant == 0) {
-      return left;
-    }
-  }
-  return IntPtrSub(left, right);
+  return false;
 }
 
 Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
@@ -141,9 +197,11 @@
 Node* CodeStubAssembler::WordIsPowerOfTwo(Node* value) {
   // value && !(value & (value - 1))
   return WordEqual(
-      Select(WordEqual(value, IntPtrConstant(0)), IntPtrConstant(1),
-             WordAnd(value, IntPtrSub(value, IntPtrConstant(1))),
-             MachineType::PointerRepresentation()),
+      Select(
+          WordEqual(value, IntPtrConstant(0)),
+          [=] { return IntPtrConstant(1); },
+          [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); },
+          MachineType::PointerRepresentation()),
       IntPtrConstant(0));
 }
 
@@ -151,11 +209,10 @@
   Node* one = Float64Constant(1.0);
   Node* one_half = Float64Constant(0.5);
 
-  Variable var_x(this, MachineRepresentation::kFloat64);
   Label return_x(this);
 
   // Round up {x} towards Infinity.
-  var_x.Bind(Float64Ceil(x));
+  Variable var_x(this, MachineRepresentation::kFloat64, Float64Ceil(x));
 
   GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
          &return_x);
@@ -176,9 +233,8 @@
   Node* two_52 = Float64Constant(4503599627370496.0E0);
   Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
 
-  Variable var_x(this, MachineRepresentation::kFloat64);
+  Variable var_x(this, MachineRepresentation::kFloat64, x);
   Label return_x(this), return_minus_x(this);
-  var_x.Bind(x);
 
   // Check if {x} is greater than zero.
   Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
@@ -192,7 +248,7 @@
 
     // Round positive {x} towards Infinity.
     var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
-    GotoUnless(Float64LessThan(var_x.value(), x), &return_x);
+    GotoIfNot(Float64LessThan(var_x.value(), x), &return_x);
     var_x.Bind(Float64Add(var_x.value(), one));
     Goto(&return_x);
   }
@@ -201,12 +257,12 @@
   {
     // Just return {x} unless it's in the range ]-2^52,0[
     GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
-    GotoUnless(Float64LessThan(x, zero), &return_x);
+    GotoIfNot(Float64LessThan(x, zero), &return_x);
 
     // Round negated {x} towards Infinity and return the result negated.
     Node* minus_x = Float64Neg(x);
     var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
-    GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+    GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
     var_x.Bind(Float64Sub(var_x.value(), one));
     Goto(&return_minus_x);
   }
@@ -229,9 +285,8 @@
   Node* two_52 = Float64Constant(4503599627370496.0E0);
   Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
 
-  Variable var_x(this, MachineRepresentation::kFloat64);
+  Variable var_x(this, MachineRepresentation::kFloat64, x);
   Label return_x(this), return_minus_x(this);
-  var_x.Bind(x);
 
   // Check if {x} is greater than zero.
   Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
@@ -245,7 +300,7 @@
 
     // Round positive {x} towards -Infinity.
     var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
-    GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+    GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
     var_x.Bind(Float64Sub(var_x.value(), one));
     Goto(&return_x);
   }
@@ -254,12 +309,12 @@
   {
     // Just return {x} unless it's in the range ]-2^52,0[
     GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
-    GotoUnless(Float64LessThan(x, zero), &return_x);
+    GotoIfNot(Float64LessThan(x, zero), &return_x);
 
     // Round negated {x} towards -Infinity and return the result negated.
     Node* minus_x = Float64Neg(x);
     var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
-    GotoUnless(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
+    GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
     var_x.Bind(Float64Add(var_x.value(), one));
     Goto(&return_minus_x);
   }
@@ -313,9 +368,8 @@
   Node* two_52 = Float64Constant(4503599627370496.0E0);
   Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
 
-  Variable var_x(this, MachineRepresentation::kFloat64);
+  Variable var_x(this, MachineRepresentation::kFloat64, x);
   Label return_x(this), return_minus_x(this);
-  var_x.Bind(x);
 
   // Check if {x} is greater than 0.
   Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
@@ -332,7 +386,7 @@
 
       // Round positive {x} towards -Infinity.
       var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
-      GotoUnless(Float64GreaterThan(var_x.value(), x), &return_x);
+      GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
       var_x.Bind(Float64Sub(var_x.value(), one));
     }
     Goto(&return_x);
@@ -346,12 +400,12 @@
     } else {
       // Just return {x} unless its in the range ]-2^52,0[.
       GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
-      GotoUnless(Float64LessThan(x, zero), &return_x);
+      GotoIfNot(Float64LessThan(x, zero), &return_x);
 
       // Round negated {x} towards -Infinity and return result negated.
       Node* minus_x = Float64Neg(x);
       var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
-      GotoUnless(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
+      GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
       var_x.Bind(Float64Sub(var_x.value(), one));
       Goto(&return_minus_x);
     }
@@ -388,57 +442,19 @@
 
 Node* CodeStubAssembler::SmiToWord32(Node* value) {
   Node* result = SmiUntag(value);
-  if (Is64()) {
-    result = TruncateInt64ToInt32(result);
-  }
-  return result;
+  return TruncateWordToWord32(result);
 }
 
 Node* CodeStubAssembler::SmiToFloat64(Node* value) {
   return ChangeInt32ToFloat64(SmiToWord32(value));
 }
 
-Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) {
-  return BitcastWordToTaggedSigned(
-      IntPtrAdd(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
-}
-
-Node* CodeStubAssembler::SmiSub(Node* a, Node* b) {
-  return BitcastWordToTaggedSigned(
-      IntPtrSub(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
-}
-
-Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) {
-  return WordEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiAbove(Node* a, Node* b) {
-  return UintPtrGreaterThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
-  return UintPtrGreaterThanOrEqual(BitcastTaggedToWord(a),
-                                   BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiBelow(Node* a, Node* b) {
-  return UintPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
-  return IntPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
-  return IntPtrLessThanOrEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
 Node* CodeStubAssembler::SmiMax(Node* a, Node* b) {
-  return Select(SmiLessThan(a, b), b, a);
+  return SelectTaggedConstant(SmiLessThan(a, b), b, a);
 }
 
 Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
-  return Select(SmiLessThan(a, b), a, b);
+  return SelectTaggedConstant(SmiLessThan(a, b), a, b);
 }
 
 Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
@@ -473,7 +489,7 @@
       // Check if {a} is kMinInt and {b} is -1 (only relevant if the
       // kMinInt is actually representable as a Smi).
       Label join(this);
-      GotoUnless(Word32Equal(a, Int32Constant(kMinInt)), &join);
+      GotoIfNot(Word32Equal(a, Int32Constant(kMinInt)), &join);
       GotoIf(Word32Equal(b, Int32Constant(-1)), &return_minuszero);
       Goto(&join);
       Bind(&join);
@@ -527,7 +543,7 @@
     Label answer_zero(this), answer_not_zero(this);
     Node* answer = Projection(0, pair);
     Node* zero = Int32Constant(0);
-    Branch(WordEqual(answer, zero), &answer_zero, &answer_not_zero);
+    Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero);
     Bind(&answer_not_zero);
     {
       var_result.Bind(ChangeInt32ToTagged(answer));
@@ -546,7 +562,7 @@
       }
       Bind(&if_should_be_zero);
       {
-        var_result.Bind(zero);
+        var_result.Bind(SmiConstant(0));
         Goto(&return_result);
       }
     }
@@ -565,13 +581,27 @@
   return var_result.value();
 }
 
+Node* CodeStubAssembler::TruncateWordToWord32(Node* value) {
+  if (Is64()) {
+    return TruncateInt64ToInt32(value);
+  }
+  return value;
+}
+
 Node* CodeStubAssembler::TaggedIsSmi(Node* a) {
   return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
                    IntPtrConstant(0));
 }
 
-Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
-  return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
+Node* CodeStubAssembler::TaggedIsNotSmi(Node* a) {
+  return WordNotEqual(
+      WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
+      IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::TaggedIsPositiveSmi(Node* a) {
+  return WordEqual(WordAnd(BitcastTaggedToWord(a),
+                           IntPtrConstant(kSmiTagMask | kSmiSignMask)),
                    IntPtrConstant(0));
 }
 
@@ -580,82 +610,10 @@
                    WordAnd(word, IntPtrConstant((1 << kPointerSizeLog2) - 1)));
 }
 
-void CodeStubAssembler::BranchIfSimd128Equal(Node* lhs, Node* lhs_map,
-                                             Node* rhs, Node* rhs_map,
-                                             Label* if_equal,
-                                             Label* if_notequal) {
-  Label if_mapsame(this), if_mapnotsame(this);
-  Branch(WordEqual(lhs_map, rhs_map), &if_mapsame, &if_mapnotsame);
-
-  Bind(&if_mapsame);
-  {
-    // Both {lhs} and {rhs} are Simd128Values with the same map, need special
-    // handling for Float32x4 because of NaN comparisons.
-    Label if_float32x4(this), if_notfloat32x4(this);
-    Node* float32x4_map = HeapConstant(factory()->float32x4_map());
-    Branch(WordEqual(lhs_map, float32x4_map), &if_float32x4, &if_notfloat32x4);
-
-    Bind(&if_float32x4);
-    {
-      // Both {lhs} and {rhs} are Float32x4, compare the lanes individually
-      // using a floating point comparison.
-      for (int offset = Float32x4::kValueOffset - kHeapObjectTag;
-           offset < Float32x4::kSize - kHeapObjectTag;
-           offset += sizeof(float)) {
-        // Load the floating point values for {lhs} and {rhs}.
-        Node* lhs_value =
-            Load(MachineType::Float32(), lhs, IntPtrConstant(offset));
-        Node* rhs_value =
-            Load(MachineType::Float32(), rhs, IntPtrConstant(offset));
-
-        // Perform a floating point comparison.
-        Label if_valueequal(this), if_valuenotequal(this);
-        Branch(Float32Equal(lhs_value, rhs_value), &if_valueequal,
-               &if_valuenotequal);
-        Bind(&if_valuenotequal);
-        Goto(if_notequal);
-        Bind(&if_valueequal);
-      }
-
-      // All 4 lanes match, {lhs} and {rhs} considered equal.
-      Goto(if_equal);
-    }
-
-    Bind(&if_notfloat32x4);
-    {
-      // For other Simd128Values we just perform a bitwise comparison.
-      for (int offset = Simd128Value::kValueOffset - kHeapObjectTag;
-           offset < Simd128Value::kSize - kHeapObjectTag;
-           offset += kPointerSize) {
-        // Load the word values for {lhs} and {rhs}.
-        Node* lhs_value =
-            Load(MachineType::Pointer(), lhs, IntPtrConstant(offset));
-        Node* rhs_value =
-            Load(MachineType::Pointer(), rhs, IntPtrConstant(offset));
-
-        // Perform a bitwise word-comparison.
-        Label if_valueequal(this), if_valuenotequal(this);
-        Branch(WordEqual(lhs_value, rhs_value), &if_valueequal,
-               &if_valuenotequal);
-        Bind(&if_valuenotequal);
-        Goto(if_notequal);
-        Bind(&if_valueequal);
-      }
-
-      // Bitwise comparison succeeded, {lhs} and {rhs} considered equal.
-      Goto(if_equal);
-    }
-  }
-
-  Bind(&if_mapnotsame);
-  Goto(if_notequal);
-}
-
 void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
     Node* receiver_map, Label* definitely_no_elements,
     Label* possibly_elements) {
-  Variable var_map(this, MachineRepresentation::kTagged);
-  var_map.Bind(receiver_map);
+  Variable var_map(this, MachineRepresentation::kTagged, receiver_map);
   Label loop_body(this, &var_map);
   Node* empty_elements = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
   Goto(&loop_body);
@@ -698,25 +656,27 @@
          if_true, if_false);
 }
 
-void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
-                                            Label* if_true, Label* if_false) {
+void CodeStubAssembler::BranchIfFastJSArray(
+    Node* object, Node* context, CodeStubAssembler::FastJSArrayAccessMode mode,
+    Label* if_true, Label* if_false) {
   // Bailout if receiver is a Smi.
   GotoIf(TaggedIsSmi(object), if_false);
 
   Node* map = LoadMap(object);
 
   // Bailout if instance type is not JS_ARRAY_TYPE.
-  GotoIf(WordNotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
+  GotoIf(Word32NotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
          if_false);
 
   Node* elements_kind = LoadMapElementsKind(map);
 
   // Bailout if receiver has slow elements.
-  GotoUnless(IsFastElementsKind(elements_kind), if_false);
+  GotoIfNot(IsFastElementsKind(elements_kind), if_false);
 
   // Check prototype chain if receiver does not have packed elements.
-  GotoUnless(IsHoleyFastElementsKind(elements_kind), if_true);
-
+  if (mode == FastJSArrayAccessMode::INBOUNDS_READ) {
+    GotoIfNot(IsHoleyFastElementsKind(elements_kind), if_true);
+  }
   BranchIfPrototypesHaveNoElements(map, if_true, if_false);
 }
 
@@ -732,6 +692,22 @@
   Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
   Label merge_runtime(this, &result);
 
+  if (flags & kAllowLargeObjectAllocation) {
+    Label next(this);
+    GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
+
+    Node* runtime_flags = SmiConstant(
+        Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+                     AllocateTargetSpace::encode(AllocationSpace::LO_SPACE)));
+    Node* const runtime_result =
+        CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
+                    SmiTag(size_in_bytes), runtime_flags);
+    result.Bind(runtime_result);
+    Goto(&merge_runtime);
+
+    Bind(&next);
+  }
+
   Node* new_top = IntPtrAdd(top, size_in_bytes);
   Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
          &no_runtime_call);
@@ -772,10 +748,9 @@
                                             Node* limit_address) {
   Node* top = Load(MachineType::Pointer(), top_address);
   Node* limit = Load(MachineType::Pointer(), limit_address);
-  Variable adjusted_size(this, MachineType::PointerRepresentation());
-  adjusted_size.Bind(size_in_bytes);
+  Variable adjusted_size(this, MachineType::PointerRepresentation(),
+                         size_in_bytes);
   if (flags & kDoubleAlignment) {
-    // TODO(epertoso): Simd128 alignment.
     Label aligned(this), not_aligned(this), merge(this, &adjusted_size);
     Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
            &aligned);
@@ -792,8 +767,9 @@
     Bind(&merge);
   }
 
-  Variable address(this, MachineRepresentation::kTagged);
-  address.Bind(AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit));
+  Variable address(
+      this, MachineRepresentation::kTagged,
+      AllocateRawUnaligned(adjusted_size.value(), kNone, top, limit));
 
   Label needs_filler(this), doesnt_need_filler(this),
       merge_address(this, &address);
@@ -802,8 +778,6 @@
 
   Bind(&needs_filler);
   // Store a filler and increase the address by kPointerSize.
-  // TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
-  // it when Simd128 alignment is supported.
   StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
                       LoadRoot(Heap::kOnePointerFillerMapRootIndex));
   address.Bind(BitcastWordToTagged(
@@ -827,10 +801,17 @@
       new_space
           ? ExternalReference::new_space_allocation_top_address(isolate())
           : ExternalReference::old_space_allocation_top_address(isolate()));
-  Node* limit_address = ExternalConstant(
-      new_space
-          ? ExternalReference::new_space_allocation_limit_address(isolate())
-          : ExternalReference::old_space_allocation_limit_address(isolate()));
+  DCHECK_EQ(kPointerSize,
+            ExternalReference::new_space_allocation_limit_address(isolate())
+                    .address() -
+                ExternalReference::new_space_allocation_top_address(isolate())
+                    .address());
+  DCHECK_EQ(kPointerSize,
+            ExternalReference::old_space_allocation_limit_address(isolate())
+                    .address() -
+                ExternalReference::old_space_allocation_top_address(isolate())
+                    .address());
+  Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize));
 
 #ifdef V8_HOST_ARCH_32_BIT
   if (flags & kDoubleAlignment) {
@@ -846,7 +827,7 @@
 }
 
 Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) {
-  return BitcastWordToTagged(IntPtrAdd(previous, offset));
+  return BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset));
 }
 
 Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
@@ -860,11 +841,10 @@
 
 void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
                                                 Label* if_false) {
-  Label if_valueissmi(this), if_valueisnotsmi(this), if_valueisstring(this),
-      if_valueisheapnumber(this), if_valueisother(this);
+  Label if_valueissmi(this), if_valueisnotsmi(this),
+      if_valueisheapnumber(this, Label::kDeferred);
 
-  // Fast check for Boolean {value}s (common case).
-  GotoIf(WordEqual(value, BooleanConstant(true)), if_true);
+  // Rule out false {value}.
   GotoIf(WordEqual(value, BooleanConstant(false)), if_false);
 
   // Check if {value} is a Smi or a HeapObject.
@@ -878,27 +858,24 @@
 
   Bind(&if_valueisnotsmi);
   {
+    // Check if {value} is the empty string.
+    GotoIf(IsEmptyString(value), if_false);
+
     // The {value} is a HeapObject, load its map.
     Node* value_map = LoadMap(value);
 
-    // Load the {value}s instance type.
-    Node* value_instance_type = LoadMapInstanceType(value_map);
+    // Only null, undefined and document.all have the undetectable bit set,
+    // so we can return false immediately when that bit is set.
+    Node* value_map_bitfield = LoadMapBitField(value_map);
+    Node* value_map_undetectable =
+        Word32And(value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
 
-    // Dispatch based on the instance type; we distinguish all String instance
-    // types, the HeapNumber type and everything else.
-    GotoIf(Word32Equal(value_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
-           &if_valueisheapnumber);
-    Branch(IsStringInstanceType(value_instance_type), &if_valueisstring,
-           &if_valueisother);
+    // Check if the {value} is undetectable.
+    GotoIfNot(Word32Equal(value_map_undetectable, Int32Constant(0)), if_false);
 
-    Bind(&if_valueisstring);
-    {
-      // Load the string length field of the {value}.
-      Node* value_length = LoadObjectField(value, String::kLengthOffset);
-
-      // Check if the {value} is the empty string.
-      BranchIfSmiEqual(value_length, SmiConstant(0), if_false, if_true);
-    }
+    // We still need to handle numbers specially, but all other {value}s
+    // that make it here yield true.
+    Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber, if_true);
 
     Bind(&if_valueisheapnumber);
     {
@@ -910,32 +887,15 @@
       Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
              if_true, if_false);
     }
-
-    Bind(&if_valueisother);
-    {
-      // Load the bit field from the {value}s map. The {value} is now either
-      // Null or Undefined, which have the undetectable bit set (so we always
-      // return false for those), or a Symbol or Simd128Value, whose maps never
-      // have the undetectable bit set (so we always return true for those), or
-      // a JSReceiver, which may or may not have the undetectable bit set.
-      Node* value_map_bitfield = LoadMapBitField(value_map);
-      Node* value_map_undetectable = Word32And(
-          value_map_bitfield, Int32Constant(1 << Map::kIsUndetectable));
-
-      // Check if the {value} is undetectable.
-      Branch(Word32Equal(value_map_undetectable, Int32Constant(0)), if_true,
-             if_false);
-    }
   }
 }
 
-compiler::Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
+Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
   Node* frame_pointer = LoadFramePointer();
   return Load(rep, frame_pointer, IntPtrConstant(offset));
 }
 
-compiler::Node* CodeStubAssembler::LoadFromParentFrame(int offset,
-                                                       MachineType rep) {
+Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) {
   Node* frame_pointer = LoadParentFramePointer();
   return Load(rep, frame_pointer, IntPtrConstant(offset));
 }
@@ -1009,6 +969,24 @@
   }
 }
 
+Node* CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
+  if (Is64()) {
+    int zero_offset = offset + kPointerSize / 2;
+    int payload_offset = offset;
+#if V8_TARGET_LITTLE_ENDIAN
+    std::swap(zero_offset, payload_offset);
+#endif
+    StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
+                        IntPtrConstant(zero_offset), Int32Constant(0));
+    return StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
+                               IntPtrConstant(payload_offset),
+                               TruncateInt64ToInt32(value));
+  } else {
+    return StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
+                               IntPtrConstant(offset), SmiTag(value));
+  }
+}
+
 Node* CodeStubAssembler::LoadHeapNumberValue(Node* object) {
   return LoadObjectField(object, HeapNumber::kValueOffset,
                          MachineType::Float64());
@@ -1027,6 +1005,11 @@
   return Word32Equal(LoadInstanceType(object), Int32Constant(instance_type));
 }
 
+Node* CodeStubAssembler::DoesntHaveInstanceType(Node* object,
+                                                InstanceType instance_type) {
+  return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
+}
+
 Node* CodeStubAssembler::LoadProperties(Node* object) {
   return LoadObjectField(object, JSObject::kPropertiesOffset);
 }
@@ -1089,9 +1072,9 @@
   Node* prototype_info =
       LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
   GotoIf(TaggedIsSmi(prototype_info), if_no_proto_info);
-  GotoUnless(WordEqual(LoadMap(prototype_info),
-                       LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
-             if_no_proto_info);
+  GotoIfNot(WordEqual(LoadMap(prototype_info),
+                      LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
+            if_no_proto_info);
   return prototype_info;
 }
 
@@ -1126,8 +1109,8 @@
 
 Node* CodeStubAssembler::LoadMapConstructor(Node* map) {
   CSA_SLOW_ASSERT(this, IsMap(map));
-  Variable result(this, MachineRepresentation::kTagged);
-  result.Bind(LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
+  Variable result(this, MachineRepresentation::kTagged,
+                  LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
 
   Label done(this), loop(this, &result);
   Goto(&loop);
@@ -1136,7 +1119,7 @@
     GotoIf(TaggedIsSmi(result.value()), &done);
     Node* is_map_type =
         Word32Equal(LoadInstanceType(result.value()), Int32Constant(MAP_TYPE));
-    GotoUnless(is_map_type, &done);
+    GotoIfNot(is_map_type, &done);
     result.Bind(
         LoadObjectField(result.value(), Map::kConstructorOrBackPointerOffset));
     Goto(&loop);
@@ -1145,6 +1128,25 @@
   return result.value();
 }
 
+Node* CodeStubAssembler::LoadSharedFunctionInfoSpecialField(
+    Node* shared, int offset, ParameterMode mode) {
+  if (Is64()) {
+    Node* result = LoadObjectField(shared, offset, MachineType::Int32());
+    if (mode == SMI_PARAMETERS) {
+      result = SmiTag(result);
+    } else {
+      result = ChangeUint32ToWord(result);
+    }
+    return result;
+  } else {
+    Node* result = LoadObjectField(shared, offset);
+    if (mode != SMI_PARAMETERS) {
+      result = SmiUntag(result);
+    }
+    return result;
+  }
+}
+
 Node* CodeStubAssembler::LoadNameHashField(Node* name) {
   CSA_ASSERT(this, IsName(name));
   return LoadObjectField(name, Name::kHashFieldOffset, MachineType::Uint32());
@@ -1303,8 +1305,7 @@
 Node* CodeStubAssembler::StoreContextElement(Node* context, int slot_index,
                                              Node* value) {
   int offset = Context::SlotOffset(slot_index);
-  return Store(MachineRepresentation::kTagged, context, IntPtrConstant(offset),
-               value);
+  return Store(context, IntPtrConstant(offset), value);
 }
 
 Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index,
@@ -1312,7 +1313,15 @@
   Node* offset =
       IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
                 IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
-  return Store(MachineRepresentation::kTagged, context, offset, value);
+  return Store(context, offset, value);
+}
+
+Node* CodeStubAssembler::StoreContextElementNoWriteBarrier(Node* context,
+                                                           int slot_index,
+                                                           Node* value) {
+  int offset = Context::SlotOffset(slot_index);
+  return StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
+                             IntPtrConstant(offset), value);
 }
 
 Node* CodeStubAssembler::LoadNativeContext(Node* context) {
@@ -1322,8 +1331,7 @@
 Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
                                                 Node* native_context) {
   CSA_ASSERT(this, IsNativeContext(native_context));
-  return LoadFixedArrayElement(native_context,
-                               IntPtrConstant(Context::ArrayMapIndex(kind)));
+  return LoadContextElement(native_context, Context::ArrayMapIndex(kind));
 }
 
 Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
@@ -1333,8 +1341,8 @@
 
 Node* CodeStubAssembler::StoreObjectField(
     Node* object, int offset, Node* value) {
-  return Store(MachineRepresentation::kTagged, object,
-               IntPtrConstant(offset - kHeapObjectTag), value);
+  DCHECK_NE(HeapObject::kMapOffset, offset);  // Use StoreMap instead.
+  return Store(object, IntPtrConstant(offset - kHeapObjectTag), value);
 }
 
 Node* CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
@@ -1343,8 +1351,8 @@
   if (ToInt32Constant(offset, const_offset)) {
     return StoreObjectField(object, const_offset, value);
   }
-  return Store(MachineRepresentation::kTagged, object,
-               IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
+  return Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)),
+               value);
 }
 
 Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
@@ -1363,10 +1371,22 @@
       rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
 }
 
+Node* CodeStubAssembler::StoreMap(Node* object, Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
+  return StoreWithMapWriteBarrier(
+      object, IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
+}
+
+Node* CodeStubAssembler::StoreMapNoWriteBarrier(
+    Node* object, Heap::RootListIndex map_root_index) {
+  return StoreMapNoWriteBarrier(object, LoadRoot(map_root_index));
+}
+
 Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
+  CSA_SLOW_ASSERT(this, IsMap(map));
   return StoreNoWriteBarrier(
       MachineRepresentation::kTagged, object,
-      IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
+      IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
 }
 
 Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
@@ -1381,17 +1401,19 @@
 Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
                                                 Node* value,
                                                 WriteBarrierMode barrier_mode,
+                                                int additional_offset,
                                                 ParameterMode parameter_mode) {
   DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
          barrier_mode == UPDATE_WRITE_BARRIER);
-  Node* offset =
-      ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS, parameter_mode,
-                             FixedArray::kHeaderSize - kHeapObjectTag);
-  MachineRepresentation rep = MachineRepresentation::kTagged;
+  int header_size =
+      FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
+  Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
+                                        parameter_mode, header_size);
   if (barrier_mode == SKIP_WRITE_BARRIER) {
-    return StoreNoWriteBarrier(rep, object, offset, value);
+    return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
+                               value);
   } else {
-    return Store(rep, object, offset, value);
+    return Store(object, offset, value);
   }
 }
 
@@ -1405,13 +1427,90 @@
   return StoreNoWriteBarrier(rep, object, offset, value);
 }
 
+Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* context,
+                                            Node* array,
+                                            CodeStubArguments& args,
+                                            Variable& arg_index,
+                                            Label* bailout) {
+  Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
+  Label pre_bailout(this);
+  Label success(this);
+  Variable var_tagged_length(this, MachineRepresentation::kTagged);
+  ParameterMode mode = OptimalParameterMode();
+  Variable var_length(this, OptimalParameterRepresentation(),
+                      TaggedToParameter(LoadJSArrayLength(array), mode));
+  Variable var_elements(this, MachineRepresentation::kTagged,
+                        LoadElements(array));
+  Node* capacity =
+      TaggedToParameter(LoadFixedArrayBaseLength(var_elements.value()), mode);
+
+  // Resize the capacity of the fixed array if it doesn't fit.
+  Label fits(this, &var_elements);
+  Node* first = arg_index.value();
+  Node* growth = IntPtrSub(args.GetLength(), first);
+  Node* new_length =
+      IntPtrOrSmiAdd(WordToParameter(growth, mode), var_length.value(), mode);
+  GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
+  Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
+  var_elements.Bind(GrowElementsCapacity(array, var_elements.value(), kind,
+                                         kind, capacity, new_capacity, mode,
+                                         &pre_bailout));
+  Goto(&fits);
+  Bind(&fits);
+  Node* elements = var_elements.value();
+
+  // Push each argument onto the end of the array now that there is enough
+  // capacity.
+  CodeStubAssembler::VariableList push_vars({&var_length}, zone());
+  args.ForEach(
+      push_vars,
+      [this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) {
+        if (IsFastSmiElementsKind(kind)) {
+          GotoIf(TaggedIsNotSmi(arg), &pre_bailout);
+        } else if (IsFastDoubleElementsKind(kind)) {
+          GotoIfNotNumber(arg, &pre_bailout);
+        }
+        if (IsFastDoubleElementsKind(kind)) {
+          Node* double_value = ChangeNumberToFloat64(arg);
+          StoreFixedDoubleArrayElement(elements, var_length.value(),
+                                       Float64SilenceNaN(double_value), mode);
+        } else {
+          WriteBarrierMode barrier_mode = IsFastSmiElementsKind(kind)
+                                              ? SKIP_WRITE_BARRIER
+                                              : UPDATE_WRITE_BARRIER;
+          StoreFixedArrayElement(elements, var_length.value(), arg,
+                                 barrier_mode, 0, mode);
+        }
+        Increment(var_length, 1, mode);
+      },
+      first, nullptr);
+  {
+    Node* length = ParameterToTagged(var_length.value(), mode);
+    var_tagged_length.Bind(length);
+    StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+    Goto(&success);
+  }
+
+  Bind(&pre_bailout);
+  {
+    Node* length = ParameterToTagged(var_length.value(), mode);
+    var_tagged_length.Bind(length);
+    Node* diff = SmiSub(length, LoadJSArrayLength(array));
+    StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+    arg_index.Bind(IntPtrAdd(arg_index.value(), SmiUntag(diff)));
+    Goto(bailout);
+  }
+
+  Bind(&success);
+  return var_tagged_length.value();
+}
+
 Node* CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
   Node* result = Allocate(HeapNumber::kSize, kNone);
   Heap::RootListIndex heap_map_index =
       mode == IMMUTABLE ? Heap::kHeapNumberMapRootIndex
                         : Heap::kMutableHeapNumberMapRootIndex;
-  Node* map = LoadRoot(heap_map_index);
-  StoreMapNoWriteBarrier(result, map);
+  StoreMapNoWriteBarrier(result, heap_map_index);
   return result;
 }
 
@@ -1425,14 +1524,18 @@
 Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
                                                   AllocationFlags flags) {
   Comment("AllocateSeqOneByteString");
+  if (length == 0) {
+    return LoadRoot(Heap::kempty_stringRootIndex);
+  }
   Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
   DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
-  StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
+  StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
   StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
                                  SmiConstant(Smi::FromInt(length)));
-  StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+  // Initialize both used and unused parts of hash field slot at once.
+  StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
                                  IntPtrConstant(String::kEmptyHashField),
-                                 MachineRepresentation::kWord32);
+                                 MachineType::PointerRepresentation());
   return result;
 }
 
@@ -1443,8 +1546,10 @@
   Variable var_result(this, MachineRepresentation::kTagged);
 
   // Compute the SeqOneByteString size and check if it fits into new space.
-  Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
-      if_join(this);
+  Label if_lengthiszero(this), if_sizeissmall(this),
+      if_notsizeissmall(this, Label::kDeferred), if_join(this);
+  GotoIf(WordEqual(length, IntPtrOrSmiConstant(0, mode)), &if_lengthiszero);
+
   Node* raw_size = GetArrayAllocationSize(
       length, UINT8_ELEMENTS, mode,
       SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
@@ -1457,13 +1562,13 @@
     // Just allocate the SeqOneByteString in new space.
     Node* result = Allocate(size, flags);
     DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
-    StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
-    StoreObjectFieldNoWriteBarrier(
-        result, SeqOneByteString::kLengthOffset,
-        mode == SMI_PARAMETERS ? length : SmiFromWord(length));
-    StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+    StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
+    StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
+                                   ParameterToTagged(length, mode));
+    // Initialize both used and unused parts of hash field slot at once.
+    StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
                                    IntPtrConstant(String::kEmptyHashField),
-                                   MachineRepresentation::kWord32);
+                                   MachineType::PointerRepresentation());
     var_result.Bind(result);
     Goto(&if_join);
   }
@@ -1471,13 +1576,18 @@
   Bind(&if_notsizeissmall);
   {
     // We might need to allocate in large object space, go to the runtime.
-    Node* result =
-        CallRuntime(Runtime::kAllocateSeqOneByteString, context,
-                    mode == SMI_PARAMETERS ? length : SmiFromWord(length));
+    Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
+                               ParameterToTagged(length, mode));
     var_result.Bind(result);
     Goto(&if_join);
   }
 
+  Bind(&if_lengthiszero);
+  {
+    var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
+    Goto(&if_join);
+  }
+
   Bind(&if_join);
   return var_result.value();
 }
@@ -1485,14 +1595,18 @@
 Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
                                                   AllocationFlags flags) {
   Comment("AllocateSeqTwoByteString");
+  if (length == 0) {
+    return LoadRoot(Heap::kempty_stringRootIndex);
+  }
   Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
   DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
-  StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
+  StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
   StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
                                  SmiConstant(Smi::FromInt(length)));
-  StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+  // Initialize both used and unused parts of hash field slot at once.
+  StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
                                  IntPtrConstant(String::kEmptyHashField),
-                                 MachineRepresentation::kWord32);
+                                 MachineType::PointerRepresentation());
   return result;
 }
 
@@ -1503,8 +1617,10 @@
   Variable var_result(this, MachineRepresentation::kTagged);
 
   // Compute the SeqTwoByteString size and check if it fits into new space.
-  Label if_sizeissmall(this), if_notsizeissmall(this, Label::kDeferred),
-      if_join(this);
+  Label if_lengthiszero(this), if_sizeissmall(this),
+      if_notsizeissmall(this, Label::kDeferred), if_join(this);
+  GotoIf(WordEqual(length, IntPtrOrSmiConstant(0, mode)), &if_lengthiszero);
+
   Node* raw_size = GetArrayAllocationSize(
       length, UINT16_ELEMENTS, mode,
       SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
@@ -1517,13 +1633,14 @@
     // Just allocate the SeqTwoByteString in new space.
     Node* result = Allocate(size, flags);
     DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
-    StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
+    StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
     StoreObjectFieldNoWriteBarrier(
         result, SeqTwoByteString::kLengthOffset,
         mode == SMI_PARAMETERS ? length : SmiFromWord(length));
-    StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+    // Initialize both used and unused parts of hash field slot at once.
+    StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
                                    IntPtrConstant(String::kEmptyHashField),
-                                   MachineRepresentation::kWord32);
+                                   MachineType::PointerRepresentation());
     var_result.Bind(result);
     Goto(&if_join);
   }
@@ -1538,6 +1655,12 @@
     Goto(&if_join);
   }
 
+  Bind(&if_lengthiszero);
+  {
+    var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
+    Goto(&if_join);
+  }
+
   Bind(&if_join);
   return var_result.value();
 }
@@ -1547,14 +1670,14 @@
     Node* offset) {
   CSA_ASSERT(this, TaggedIsSmi(length));
   Node* result = Allocate(SlicedString::kSize);
-  Node* map = LoadRoot(map_root_index);
   DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
-  StoreMapNoWriteBarrier(result, map);
+  StoreMapNoWriteBarrier(result, map_root_index);
   StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
                                  MachineRepresentation::kTagged);
-  StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
-                                 Int32Constant(String::kEmptyHashField),
-                                 MachineRepresentation::kWord32);
+  // Initialize both used and unused parts of hash field slot at once.
+  StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldSlot,
+                                 IntPtrConstant(String::kEmptyHashField),
+                                 MachineType::PointerRepresentation());
   StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
                                  MachineRepresentation::kTagged);
   StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
@@ -1580,14 +1703,14 @@
                                             AllocationFlags flags) {
   CSA_ASSERT(this, TaggedIsSmi(length));
   Node* result = Allocate(ConsString::kSize, flags);
-  Node* map = LoadRoot(map_root_index);
   DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
-  StoreMapNoWriteBarrier(result, map);
+  StoreMapNoWriteBarrier(result, map_root_index);
   StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
                                  MachineRepresentation::kTagged);
-  StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
-                                 Int32Constant(String::kEmptyHashField),
-                                 MachineRepresentation::kWord32);
+  // Initialize both used and unused parts of hash field slot at once.
+  StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldSlot,
+                                 IntPtrConstant(String::kEmptyHashField),
+                                 MachineType::PointerRepresentation());
   bool const new_space = !(flags & kPretenured);
   if (new_space) {
     StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first,
@@ -1624,8 +1747,10 @@
   Node* right_instance_type = LoadInstanceType(right);
 
   // Compute intersection and difference of instance types.
-  Node* anded_instance_types = WordAnd(left_instance_type, right_instance_type);
-  Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
+  Node* anded_instance_types =
+      Word32And(left_instance_type, right_instance_type);
+  Node* xored_instance_types =
+      Word32Xor(left_instance_type, right_instance_type);
 
   // We create a one-byte cons string if
   // 1. both strings are one-byte, or
@@ -1642,15 +1767,15 @@
   Label two_byte_map(this);
   Variable result(this, MachineRepresentation::kTagged);
   Label done(this, &result);
-  GotoIf(WordNotEqual(
-             WordAnd(anded_instance_types,
-                     IntPtrConstant(kStringEncodingMask | kOneByteDataHintTag)),
-             IntPtrConstant(0)),
+  GotoIf(Word32NotEqual(Word32And(anded_instance_types,
+                                  Int32Constant(kStringEncodingMask |
+                                                kOneByteDataHintTag)),
+                        Int32Constant(0)),
          &one_byte_map);
-  Branch(WordNotEqual(WordAnd(xored_instance_types,
-                              IntPtrConstant(kStringEncodingMask |
-                                             kOneByteDataHintMask)),
-                      IntPtrConstant(kOneByteStringTag | kOneByteDataHintTag)),
+  Branch(Word32NotEqual(Word32And(xored_instance_types,
+                                  Int32Constant(kStringEncodingMask |
+                                                kOneByteDataHintMask)),
+                        Int32Constant(kOneByteStringTag | kOneByteDataHintTag)),
          &two_byte_map, &one_byte_map);
 
   Bind(&one_byte_map);
@@ -1700,15 +1825,13 @@
   Node* const zero = IntPtrConstant(0);
   Node* const length_intptr = SmiUntag(length);
   const ElementsKind elements_kind = FAST_ELEMENTS;
-  const ParameterMode parameter_mode = INTPTR_PARAMETERS;
 
-  Node* const elements =
-      AllocateFixedArray(elements_kind, length_intptr, parameter_mode);
+  Node* const elements = AllocateFixedArray(elements_kind, length_intptr);
   StoreObjectField(result, JSArray::kElementsOffset, elements);
 
   // Fill in the elements with undefined.
   FillFixedArrayWithValue(elements_kind, elements, zero, length_intptr,
-                          Heap::kUndefinedValueRootIndex, parameter_mode);
+                          Heap::kUndefinedValueRootIndex);
 
   return result;
 }
@@ -1727,14 +1850,14 @@
 
   Node* length = EntryToIndex<NameDictionary>(capacity);
   Node* store_size =
-      IntPtrAddFoldConstants(WordShl(length, IntPtrConstant(kPointerSizeLog2)),
-                             IntPtrConstant(NameDictionary::kHeaderSize));
+      IntPtrAdd(WordShl(length, IntPtrConstant(kPointerSizeLog2)),
+                IntPtrConstant(NameDictionary::kHeaderSize));
 
   Node* result = Allocate(store_size);
   Comment("Initialize NameDictionary");
   // Initialize FixedArray fields.
-  StoreObjectFieldRoot(result, FixedArray::kMapOffset,
-                       Heap::kHashTableMapRootIndex);
+  DCHECK(Heap::RootIsImmortalImmovable(Heap::kHashTableMapRootIndex));
+  StoreMapNoWriteBarrier(result, Heap::kHashTableMapRootIndex);
   StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
                                  SmiFromWord(length));
   // Initialized HashTable fields.
@@ -1754,25 +1877,25 @@
                          SKIP_WRITE_BARRIER);
 
   // Initialize NameDictionary elements.
-  result = BitcastTaggedToWord(result);
+  Node* result_word = BitcastTaggedToWord(result);
   Node* start_address = IntPtrAdd(
-      result, IntPtrConstant(NameDictionary::OffsetOfElementAt(
-                                 NameDictionary::kElementsStartIndex) -
-                             kHeapObjectTag));
+      result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt(
+                                      NameDictionary::kElementsStartIndex) -
+                                  kHeapObjectTag));
   Node* end_address = IntPtrAdd(
-      result,
-      IntPtrSubFoldConstants(store_size, IntPtrConstant(kHeapObjectTag)));
+      result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag)));
   StoreFieldsNoWriteBarrier(start_address, end_address, filler);
   return result;
 }
 
 Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
-                                                 Node* elements) {
+                                                 Node* elements,
+                                                 AllocationFlags flags) {
   CSA_ASSERT(this, IsMap(map));
   Node* size =
       IntPtrMul(LoadMapInstanceSize(map), IntPtrConstant(kPointerSize));
   CSA_ASSERT(this, IsRegularHeapObjectSize(size));
-  Node* object = Allocate(size);
+  Node* object = Allocate(size, flags);
   StoreMapNoWriteBarrier(object, map);
   InitializeJSObjectFromMap(object, map, size, properties, elements);
   return object;
@@ -1806,6 +1929,7 @@
   Comment("InitializeJSObjectBody");
   Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
   // Calculate the untagged field addresses.
+  object = BitcastTaggedToWord(object);
   Node* start_address =
       IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
   Node* end_address =
@@ -1819,12 +1943,12 @@
   Comment("StoreFieldsNoWriteBarrier");
   CSA_ASSERT(this, WordIsWordAligned(start_address));
   CSA_ASSERT(this, WordIsWordAligned(end_address));
-  BuildFastLoop(
-      MachineType::PointerRepresentation(), start_address, end_address,
-      [value](CodeStubAssembler* a, Node* current) {
-        a->StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
-      },
-      kPointerSize, IndexAdvanceMode::kPost);
+  BuildFastLoop(start_address, end_address,
+                [this, value](Node* current) {
+                  StoreNoWriteBarrier(MachineRepresentation::kTagged, current,
+                                      value);
+                },
+                kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
 }
 
 Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
@@ -1861,9 +1985,8 @@
   Node* array = AllocateUninitializedJSArray(kind, array_map, length,
                                              allocation_site, size);
 
-  // The bitcast here is safe because InnerAllocate doesn't actually allocate.
-  Node* elements = InnerAllocate(BitcastTaggedToWord(array), elements_offset);
-  StoreObjectField(array, JSObject::kElementsOffset, elements);
+  Node* elements = InnerAllocate(array, elements_offset);
+  StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset, elements);
 
   return {array, elements};
 }
@@ -1878,6 +2001,7 @@
   Comment("write JSArray headers");
   StoreMapNoWriteBarrier(array, array_map);
 
+  CSA_ASSERT(this, TaggedIsSmi(length));
   StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
 
   StoreObjectFieldRoot(array, JSArray::kPropertiesOffset,
@@ -1893,25 +2017,31 @@
                                          Node* capacity, Node* length,
                                          Node* allocation_site,
                                          ParameterMode capacity_mode) {
-  bool is_double = IsFastDoubleElementsKind(kind);
-
-  // Allocate both array and elements object, and initialize the JSArray.
-  Node *array, *elements;
-  std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
-      kind, array_map, length, allocation_site, capacity, capacity_mode);
-  // Setup elements object.
-  Heap* heap = isolate()->heap();
-  Handle<Map> elements_map(is_double ? heap->fixed_double_array_map()
-                                     : heap->fixed_array_map());
-  StoreMapNoWriteBarrier(elements, HeapConstant(elements_map));
-  StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
-                                 TagParameter(capacity, capacity_mode));
-
-  // Fill in the elements with holes.
-  FillFixedArrayWithValue(
-      kind, elements, capacity_mode == SMI_PARAMETERS ? SmiConstant(Smi::kZero)
-                                                      : IntPtrConstant(0),
-      capacity, Heap::kTheHoleValueRootIndex, capacity_mode);
+  Node *array = nullptr, *elements = nullptr;
+  if (IsIntPtrOrSmiConstantZero(capacity)) {
+    // Array is empty. Use the shared empty fixed array instead of allocating a
+    // new one.
+    array = AllocateUninitializedJSArrayWithoutElements(kind, array_map, length,
+                                                        nullptr);
+    StoreObjectFieldRoot(array, JSArray::kElementsOffset,
+                         Heap::kEmptyFixedArrayRootIndex);
+  } else {
+    // Allocate both array and elements object, and initialize the JSArray.
+    std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+        kind, array_map, length, allocation_site, capacity, capacity_mode);
+    // Setup elements object.
+    Heap::RootListIndex elements_map_index =
+        IsFastDoubleElementsKind(kind) ? Heap::kFixedDoubleArrayMapRootIndex
+                                       : Heap::kFixedArrayMapRootIndex;
+    DCHECK(Heap::RootIsImmortalImmovable(elements_map_index));
+    StoreMapNoWriteBarrier(elements, elements_map_index);
+    StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
+                                   ParameterToTagged(capacity, capacity_mode));
+    // Fill in the elements with holes.
+    FillFixedArrayWithValue(kind, elements,
+                            IntPtrOrSmiConstant(0, capacity_mode), capacity,
+                            Heap::kTheHoleValueRootIndex, capacity_mode);
+  }
 
   return array;
 }
@@ -1920,23 +2050,19 @@
                                             Node* capacity_node,
                                             ParameterMode mode,
                                             AllocationFlags flags) {
-  CSA_ASSERT(this,
-             IntPtrGreaterThan(capacity_node, IntPtrOrSmiConstant(0, mode)));
+  CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
+                                          IntPtrOrSmiConstant(0, mode), mode));
   Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
 
   // Allocate both array and elements object, and initialize the JSArray.
   Node* array = Allocate(total_size, flags);
-  Heap* heap = isolate()->heap();
-  Handle<Map> map(IsFastDoubleElementsKind(kind)
-                      ? heap->fixed_double_array_map()
-                      : heap->fixed_array_map());
-  if (flags & kPretenured) {
-    StoreObjectField(array, JSObject::kMapOffset, HeapConstant(map));
-  } else {
-    StoreMapNoWriteBarrier(array, HeapConstant(map));
-  }
+  Heap::RootListIndex map_index = IsFastDoubleElementsKind(kind)
+                                      ? Heap::kFixedDoubleArrayMapRootIndex
+                                      : Heap::kFixedArrayMapRootIndex;
+  DCHECK(Heap::RootIsImmortalImmovable(map_index));
+  StoreMapNoWriteBarrier(array, map_index);
   StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
-                                 TagParameter(capacity_node, mode));
+                                 ParameterToTagged(capacity_node, mode));
   return array;
 }
 
@@ -1954,8 +2080,7 @@
 
   BuildFastFixedArrayForEach(
       array, kind, from_node, to_node,
-      [value, is_double, double_hole](CodeStubAssembler* assembler, Node* array,
-                                      Node* offset) {
+      [this, value, is_double, double_hole](Node* array, Node* offset) {
         if (is_double) {
           // Don't use doubles to store the hole double, since manipulating the
           // signaling NaN used for the hole in C++, e.g. with bit_cast, will
@@ -1965,21 +2090,19 @@
           // TODO(danno): When we have a Float32/Float64 wrapper class that
           // preserves double bits during manipulation, remove this code/change
           // this to an indexed Float64 store.
-          if (assembler->Is64()) {
-            assembler->StoreNoWriteBarrier(MachineRepresentation::kWord64,
-                                           array, offset, double_hole);
+          if (Is64()) {
+            StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
+                                double_hole);
           } else {
-            assembler->StoreNoWriteBarrier(MachineRepresentation::kWord32,
-                                           array, offset, double_hole);
-            assembler->StoreNoWriteBarrier(
-                MachineRepresentation::kWord32, array,
-                assembler->IntPtrAdd(offset,
-                                     assembler->IntPtrConstant(kPointerSize)),
-                double_hole);
+            StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
+                                double_hole);
+            StoreNoWriteBarrier(MachineRepresentation::kWord32, array,
+                                IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+                                double_hole);
           }
         } else {
-          assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, array,
-                                         offset, value);
+          StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
+                              value);
         }
       },
       mode);
@@ -2024,9 +2147,9 @@
 
   Node* limit_offset = ElementOffsetFromIndex(
       IntPtrOrSmiConstant(0, mode), from_kind, mode, first_element_offset);
-  Variable var_from_offset(this, MachineType::PointerRepresentation());
-  var_from_offset.Bind(ElementOffsetFromIndex(element_count, from_kind, mode,
-                                              first_element_offset));
+  Variable var_from_offset(this, MachineType::PointerRepresentation(),
+                           ElementOffsetFromIndex(element_count, from_kind,
+                                                  mode, first_element_offset));
   // This second variable is used only when the element sizes of source and
   // destination arrays do not match.
   Variable var_to_offset(this, MachineType::PointerRepresentation());
@@ -2076,7 +2199,7 @@
         from_array, var_from_offset.value(), from_kind, to_kind, if_hole);
 
     if (needs_write_barrier) {
-      Store(MachineRepresentation::kTagged, to_array, to_offset, value);
+      Store(to_array, to_offset, value);
     } else if (to_double_elements) {
       StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset,
                           value);
@@ -2119,11 +2242,12 @@
   Comment("] CopyFixedArrayElements");
 }
 
-void CodeStubAssembler::CopyStringCharacters(
-    compiler::Node* from_string, compiler::Node* to_string,
-    compiler::Node* from_index, compiler::Node* to_index,
-    compiler::Node* character_count, String::Encoding from_encoding,
-    String::Encoding to_encoding, ParameterMode mode) {
+void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
+                                             Node* from_index, Node* to_index,
+                                             Node* character_count,
+                                             String::Encoding from_encoding,
+                                             String::Encoding to_encoding,
+                                             ParameterMode mode) {
   bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
   bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
   DCHECK_IMPLIES(to_one_byte, from_one_byte);
@@ -2140,7 +2264,7 @@
   Node* to_offset =
       ElementOffsetFromIndex(to_index, to_kind, mode, header_size);
   Node* byte_count = ElementOffsetFromIndex(character_count, from_kind, mode);
-  Node* limit_offset = IntPtrAddFoldConstants(from_offset, byte_count);
+  Node* limit_offset = IntPtrAdd(from_offset, byte_count);
 
   // Prepare the fast loop
   MachineType type =
@@ -2150,9 +2274,9 @@
   int from_increment = 1 << ElementsKindToShiftSize(from_kind);
   int to_increment = 1 << ElementsKindToShiftSize(to_kind);
 
-  Variable current_to_offset(this, MachineType::PointerRepresentation());
+  Variable current_to_offset(this, MachineType::PointerRepresentation(),
+                             to_offset);
   VariableList vars({&current_to_offset}, zone());
-  current_to_offset.Bind(to_offset);
   int to_index_constant = 0, from_index_constant = 0;
   Smi* to_index_smi = nullptr;
   Smi* from_index_smi = nullptr;
@@ -2164,21 +2288,18 @@
                      (ToSmiConstant(from_index, from_index_smi) &&
                       ToSmiConstant(to_index, to_index_smi) &&
                       to_index_smi == from_index_smi));
-  BuildFastLoop(vars, MachineType::PointerRepresentation(), from_offset,
-                limit_offset,
-                [from_string, to_string, &current_to_offset, to_increment, type,
-                 rep, index_same](CodeStubAssembler* assembler, Node* offset) {
-                  Node* value = assembler->Load(type, from_string, offset);
-                  assembler->StoreNoWriteBarrier(
+  BuildFastLoop(vars, from_offset, limit_offset,
+                [this, from_string, to_string, &current_to_offset, to_increment,
+                 type, rep, index_same](Node* offset) {
+                  Node* value = Load(type, from_string, offset);
+                  StoreNoWriteBarrier(
                       rep, to_string,
                       index_same ? offset : current_to_offset.value(), value);
                   if (!index_same) {
-                    current_to_offset.Bind(assembler->IntPtrAdd(
-                        current_to_offset.value(),
-                        assembler->IntPtrConstant(to_increment)));
+                    Increment(current_to_offset, to_increment);
                   }
                 },
-                from_increment, IndexAdvanceMode::kPost);
+                from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
 }
 
 Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
@@ -2212,17 +2333,10 @@
 
 Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
                                                       ParameterMode mode) {
-  Node* half_old_capacity = WordShr(old_capacity, IntPtrConstant(1));
-  Node* new_capacity = IntPtrAdd(half_old_capacity, old_capacity);
-  Node* unconditioned_result =
-      IntPtrAdd(new_capacity, IntPtrOrSmiConstant(16, mode));
-  if (mode == INTEGER_PARAMETERS || mode == INTPTR_PARAMETERS) {
-    return unconditioned_result;
-  } else {
-    int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
-    return WordAnd(unconditioned_result,
-                   IntPtrConstant(static_cast<size_t>(-1) << kSmiShiftBits));
-  }
+  Node* half_old_capacity = WordOrSmiShr(old_capacity, 1, mode);
+  Node* new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity, mode);
+  Node* padding = IntPtrOrSmiConstant(16, mode);
+  return IntPtrOrSmiAdd(new_capacity, padding, mode);
 }
 
 Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
@@ -2231,8 +2345,8 @@
   Node* capacity = LoadFixedArrayBaseLength(elements);
 
   ParameterMode mode = OptimalParameterMode();
-  capacity = UntagParameter(capacity, mode);
-  key = UntagParameter(key, mode);
+  capacity = TaggedToParameter(capacity, mode);
+  key = TaggedToParameter(key, mode);
 
   return TryGrowElementsCapacity(object, elements, kind, key, capacity, mode,
                                  bailout);
@@ -2247,12 +2361,12 @@
 
   // If the gap growth is too big, fall back to the runtime.
   Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
-  Node* max_capacity = IntPtrAdd(capacity, max_gap);
-  GotoIf(UintPtrGreaterThanOrEqual(key, max_capacity), bailout);
+  Node* max_capacity = IntPtrOrSmiAdd(capacity, max_gap, mode);
+  GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity, mode), bailout);
 
   // Calculate the capacity of the new backing store.
   Node* new_capacity = CalculateNewElementsCapacity(
-      IntPtrAdd(key, IntPtrOrSmiConstant(1, mode)), mode);
+      IntPtrOrSmiAdd(key, IntPtrOrSmiConstant(1, mode), mode), mode);
   return GrowElementsCapacity(object, elements, kind, kind, capacity,
                               new_capacity, mode, bailout);
 }
@@ -2264,8 +2378,8 @@
   // If size of the allocation for the new capacity doesn't fit in a page
   // that we can bump-pointer allocate from, fall back to the runtime.
   int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(to_kind);
-  GotoIf(UintPtrGreaterThanOrEqual(new_capacity,
-                                   IntPtrOrSmiConstant(max_size, mode)),
+  GotoIf(UintPtrOrSmiGreaterThanOrEqual(
+             new_capacity, IntPtrOrSmiConstant(max_size, mode), mode),
          bailout);
 
   // Allocate the new backing store.
@@ -2282,9 +2396,9 @@
   return new_elements;
 }
 
-void CodeStubAssembler::InitializeAllocationMemento(
-    compiler::Node* base_allocation, int base_allocation_size,
-    compiler::Node* allocation_site) {
+void CodeStubAssembler::InitializeAllocationMemento(Node* base_allocation,
+                                                    int base_allocation_size,
+                                                    Node* allocation_site) {
   StoreObjectFieldNoWriteBarrier(
       base_allocation, AllocationMemento::kMapOffset + base_allocation_size,
       HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map())));
@@ -2370,10 +2484,9 @@
 
 Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
   // We might need to loop once due to ToNumber conversion.
-  Variable var_value(this, MachineRepresentation::kTagged),
+  Variable var_value(this, MachineRepresentation::kTagged, value),
       var_result(this, MachineRepresentation::kWord32);
   Label loop(this, &var_value), done_loop(this, &var_result);
-  var_value.Bind(value);
   Goto(&loop);
   Bind(&loop);
   {
@@ -2396,8 +2509,8 @@
       // Check if {value} is a HeapNumber.
       Label if_valueisheapnumber(this),
           if_valueisnotheapnumber(this, Label::kDeferred);
-      Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
-             &if_valueisheapnumber, &if_valueisnotheapnumber);
+      Branch(IsHeapNumberMap(LoadMap(value)), &if_valueisheapnumber,
+             &if_valueisnotheapnumber);
 
       Bind(&if_valueisheapnumber);
       {
@@ -2434,7 +2547,7 @@
   Branch(Float64Equal(value, value64), &if_valueisequal, &if_valueisnotequal);
   Bind(&if_valueisequal);
   {
-    GotoUnless(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
+    GotoIfNot(Word32Equal(value32, Int32Constant(0)), &if_valueisint32);
     Branch(Int32LessThan(Float64ExtractHighWord32(value), Int32Constant(0)),
            &if_valueisheapnumber, &if_valueisint32);
   }
@@ -2457,7 +2570,7 @@
       Goto(&if_valueisheapnumber);
       Bind(&if_notoverflow);
       {
-        Node* result = Projection(0, pair);
+        Node* result = BitcastWordToTaggedSigned(Projection(0, pair));
         var_result.Bind(result);
         Goto(&if_join);
       }
@@ -2492,7 +2605,7 @@
   Goto(&if_join);
   Bind(&if_notoverflow);
   {
-    Node* result = Projection(0, pair);
+    Node* result = BitcastWordToTaggedSigned(Projection(0, pair));
     var_result.Bind(result);
   }
   Goto(&if_join);
@@ -2519,7 +2632,7 @@
       Node* overflow = Projection(1, pair);
       GotoIf(overflow, &if_overflow);
 
-      Node* result = Projection(0, pair);
+      Node* result = BitcastWordToTaggedSigned(Projection(0, pair));
       var_result.Bind(result);
     }
   }
@@ -2538,8 +2651,7 @@
 
 Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
                                       char const* method_name) {
-  Variable var_value(this, MachineRepresentation::kTagged);
-  var_value.Bind(value);
+  Variable var_value(this, MachineRepresentation::kTagged, value);
 
   // Check if the {value} is a Smi or a HeapObject.
   Label if_valueissmi(this, Label::kDeferred), if_valueisnotsmi(this),
@@ -2582,7 +2694,7 @@
         CallRuntime(Runtime::kThrowCalledOnNullOrUndefined, context,
                     HeapConstant(factory()->NewStringFromAsciiChecked(
                         method_name, TENURED)));
-        Goto(&if_valueisstring);  // Never reached.
+        Unreachable();
       }
     }
   }
@@ -2597,14 +2709,32 @@
   return var_value.value();
 }
 
+Node* CodeStubAssembler::ChangeNumberToFloat64(compiler::Node* value) {
+  Variable result(this, MachineRepresentation::kFloat64);
+  Label smi(this);
+  Label done(this, &result);
+  GotoIf(TaggedIsSmi(value), &smi);
+  result.Bind(
+      LoadObjectField(value, HeapNumber::kValueOffset, MachineType::Float64()));
+  Goto(&done);
+
+  Bind(&smi);
+  {
+    result.Bind(SmiToFloat64(value));
+    Goto(&done);
+  }
+
+  Bind(&done);
+  return result.value();
+}
+
 Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
                                      PrimitiveType primitive_type,
                                      char const* method_name) {
   // We might need to loop once due to JSValue unboxing.
-  Variable var_value(this, MachineRepresentation::kTagged);
+  Variable var_value(this, MachineRepresentation::kTagged, value);
   Label loop(this, &var_value), done_loop(this),
       done_throw(this, Label::kDeferred);
-  var_value.Bind(value);
   Goto(&loop);
   Bind(&loop);
   {
@@ -2663,7 +2793,7 @@
     CallRuntime(Runtime::kThrowNotGeneric, context,
                 HeapConstant(factory()->NewStringFromAsciiChecked(method_name,
                                                                   TENURED)));
-    Goto(&done_loop);  // Never reached.
+    Unreachable();
   }
 
   Bind(&done_loop);
@@ -2691,21 +2821,25 @@
       Runtime::kThrowIncompatibleMethodReceiver, context,
       HeapConstant(factory()->NewStringFromAsciiChecked(method_name, TENURED)),
       value);
-  var_value_map.Bind(UndefinedConstant());
-  Goto(&out);  // Never reached.
+  Unreachable();
 
   Bind(&out);
   return var_value_map.value();
 }
 
+Node* CodeStubAssembler::InstanceTypeEqual(Node* instance_type, int type) {
+  return Word32Equal(instance_type, Int32Constant(type));
+}
+
 Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
   Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
   uint32_t mask =
       1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
   USE(mask);
   // Interceptors or access checks imply special receiver.
-  CSA_ASSERT(this, Select(IsSetWord32(LoadMapBitField(map), mask), is_special,
-                          Int32Constant(1), MachineRepresentation::kWord32));
+  CSA_ASSERT(this,
+             SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
+                            Int32Constant(1), MachineRepresentation::kWord32));
   return is_special;
 }
 
@@ -2723,6 +2857,17 @@
       Int32Constant(0));
 }
 
+Node* CodeStubAssembler::IsCallable(Node* object) {
+  return IsCallableMap(LoadMap(object));
+}
+
+Node* CodeStubAssembler::IsConstructorMap(Node* map) {
+  CSA_ASSERT(this, IsMap(map));
+  return Word32NotEqual(
+      Word32And(LoadMapBitField(map), Int32Constant(1 << Map::kIsConstructor)),
+      Int32Constant(0));
+}
+
 Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
   STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
   return Int32LessThanOrEqual(instance_type,
@@ -2745,6 +2890,11 @@
   return IsJSReceiverInstanceType(LoadInstanceType(object));
 }
 
+Node* CodeStubAssembler::IsJSReceiverMap(Node* map) {
+  STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+  return IsJSReceiverInstanceType(LoadMapInstanceType(map));
+}
+
 Node* CodeStubAssembler::IsJSObject(Node* object) {
   STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
   return Int32GreaterThanOrEqual(LoadInstanceType(object),
@@ -2772,6 +2922,14 @@
   return HasInstanceType(object, WEAK_CELL_TYPE);
 }
 
+Node* CodeStubAssembler::IsBoolean(Node* object) {
+  return IsBooleanMap(LoadMap(object));
+}
+
+Node* CodeStubAssembler::IsHeapNumber(Node* object) {
+  return IsHeapNumberMap(LoadMap(object));
+}
+
 Node* CodeStubAssembler::IsName(Node* object) {
   return Int32LessThanOrEqual(LoadInstanceType(object),
                               Int32Constant(LAST_NAME_TYPE));
@@ -2782,6 +2940,22 @@
                               Int32Constant(FIRST_NONSTRING_TYPE));
 }
 
+Node* CodeStubAssembler::IsSymbol(Node* object) {
+  return IsSymbolMap(LoadMap(object));
+}
+
+Node* CodeStubAssembler::IsPrivateSymbol(Node* object) {
+  return Select(
+      IsSymbol(object),
+      [=] {
+        Node* const flags =
+            SmiToWord32(LoadObjectField(object, Symbol::kFlagsOffset));
+        const int kPrivateMask = 1 << Symbol::kPrivateBit;
+        return IsSetWord32(flags, kPrivateMask);
+      },
+      [=] { return Int32Constant(0); }, MachineRepresentation::kWord32);
+}
+
 Node* CodeStubAssembler::IsNativeContext(Node* object) {
   return WordEqual(LoadMap(object), LoadRoot(Heap::kNativeContextMapRootIndex));
 }
@@ -2795,7 +2969,7 @@
 }
 
 Node* CodeStubAssembler::IsDictionary(Node* object) {
-  return WordOr(IsHashTable(object), IsUnseededNumberDictionary(object));
+  return Word32Or(IsHashTable(object), IsUnseededNumberDictionary(object));
 }
 
 Node* CodeStubAssembler::IsUnseededNumberDictionary(Node* object) {
@@ -2803,19 +2977,22 @@
                    LoadRoot(Heap::kUnseededNumberDictionaryMapRootIndex));
 }
 
-Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
+Node* CodeStubAssembler::IsJSFunction(Node* object) {
+  return HasInstanceType(object, JS_FUNCTION_TYPE);
+}
+
+Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index,
+                                          ParameterMode parameter_mode) {
   CSA_ASSERT(this, IsString(string));
   // Translate the {index} into a Word.
-  index = SmiToWord(index);
+  index = ParameterToWord(index, parameter_mode);
 
-  // We may need to loop in case of cons or sliced strings.
-  Variable var_index(this, MachineType::PointerRepresentation());
+  // We may need to loop in case of cons, thin, or sliced strings.
+  Variable var_index(this, MachineType::PointerRepresentation(), index);
+  Variable var_string(this, MachineRepresentation::kTagged, string);
   Variable var_result(this, MachineRepresentation::kWord32);
-  Variable var_string(this, MachineRepresentation::kTagged);
   Variable* loop_vars[] = {&var_index, &var_string};
   Label done_loop(this, &var_result), loop(this, 2, loop_vars);
-  var_string.Bind(string);
-  var_index.Bind(index);
   Goto(&loop);
   Bind(&loop);
   {
@@ -2960,14 +3137,29 @@
 
         Bind(&if_stringisnotexternal);
         {
-          // The {string} is a SlicedString, continue with its parent.
-          Node* string_offset =
-              LoadAndUntagObjectField(string, SlicedString::kOffsetOffset);
-          Node* string_parent =
-              LoadObjectField(string, SlicedString::kParentOffset);
-          var_index.Bind(IntPtrAdd(index, string_offset));
-          var_string.Bind(string_parent);
-          Goto(&loop);
+          Label if_stringissliced(this), if_stringisthin(this);
+          Branch(
+              Word32Equal(Word32And(string_instance_type,
+                                    Int32Constant(kStringRepresentationMask)),
+                          Int32Constant(kSlicedStringTag)),
+              &if_stringissliced, &if_stringisthin);
+          Bind(&if_stringissliced);
+          {
+            // The {string} is a SlicedString, continue with its parent.
+            Node* string_offset =
+                LoadAndUntagObjectField(string, SlicedString::kOffsetOffset);
+            Node* string_parent =
+                LoadObjectField(string, SlicedString::kParentOffset);
+            var_index.Bind(IntPtrAdd(index, string_offset));
+            var_string.Bind(string_parent);
+            Goto(&loop);
+          }
+          Bind(&if_stringisthin);
+          {
+            // The {string} is a ThinString, continue with its actual value.
+            var_string.Bind(LoadObjectField(string, ThinString::kActualOffset));
+            Goto(&loop);
+          }
         }
       }
     }
@@ -2989,12 +3181,13 @@
   {
     // Load the isolate wide single character string cache.
     Node* cache = LoadRoot(Heap::kSingleCharacterStringCacheRootIndex);
+    Node* code_index = ChangeUint32ToWord(code);
 
     // Check if we have an entry for the {code} in the single character string
     // cache already.
     Label if_entryisundefined(this, Label::kDeferred),
         if_entryisnotundefined(this);
-    Node* entry = LoadFixedArrayElement(cache, code);
+    Node* entry = LoadFixedArrayElement(cache, code_index);
     Branch(WordEqual(entry, UndefinedConstant()), &if_entryisundefined,
            &if_entryisnotundefined);
 
@@ -3005,7 +3198,7 @@
       StoreNoWriteBarrier(
           MachineRepresentation::kWord8, result,
           IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag), code);
-      StoreFixedArrayElement(cache, code, result);
+      StoreFixedArrayElement(cache, code_index, result);
       var_result.Bind(result);
       Goto(&if_done);
     }
@@ -3096,31 +3289,28 @@
   Label end(this);
   Label runtime(this);
 
-  Variable var_instance_type(this, MachineRepresentation::kWord8);  // Int32.
-  Variable var_result(this, MachineRepresentation::kTagged);        // String.
-  Variable var_from(this, MachineRepresentation::kTagged);          // Smi.
-  Variable var_string(this, MachineRepresentation::kTagged);        // String.
+  Node* const int_zero = Int32Constant(0);
 
-  var_instance_type.Bind(Int32Constant(0));
-  var_string.Bind(string);
-  var_from.Bind(from);
+  // Int32 variables.
+  Variable var_instance_type(this, MachineRepresentation::kWord32, int_zero);
+  Variable var_representation(this, MachineRepresentation::kWord32, int_zero);
+
+  Variable var_from(this, MachineRepresentation::kTagged, from);      // Smi.
+  Variable var_string(this, MachineRepresentation::kTagged, string);  // String.
+  Variable var_result(this, MachineRepresentation::kTagged);          // String.
 
   // Make sure first argument is a string.
-
-  // Bailout if receiver is a Smi.
-  GotoIf(TaggedIsSmi(string), &runtime);
+  CSA_ASSERT(this, TaggedIsNotSmi(string));
+  CSA_ASSERT(this, IsString(string));
 
   // Load the instance type of the {string}.
   Node* const instance_type = LoadInstanceType(string);
   var_instance_type.Bind(instance_type);
 
-  // Check if {string} is a String.
-  GotoUnless(IsStringInstanceType(instance_type), &runtime);
-
   // Make sure that both from and to are non-negative smis.
 
-  GotoUnless(WordIsPositiveSmi(from), &runtime);
-  GotoUnless(WordIsPositiveSmi(to), &runtime);
+  GotoIfNot(TaggedIsPositiveSmi(from), &runtime);
+  GotoIfNot(TaggedIsPositiveSmi(to), &runtime);
 
   Node* const substr_length = SmiSub(to, from);
   Node* const string_length = LoadStringLength(string);
@@ -3142,7 +3332,8 @@
   // and put the underlying string into var_string.
 
   // If the string is not indirect, it can only be sequential or external.
-  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask ==
+                (kSlicedStringTag & kConsStringTag & kThinStringTag));
   STATIC_ASSERT(kIsIndirectStringMask != 0);
   Label underlying_unpacked(this);
   GotoIf(Word32Equal(
@@ -3150,13 +3341,14 @@
              Int32Constant(0)),
          &underlying_unpacked);
 
-  // The subject string is either a sliced or cons string.
+  // The subject string is a sliced, cons, or thin string.
 
-  Label sliced_string(this);
-  GotoIf(Word32NotEqual(
-             Word32And(instance_type, Int32Constant(kSlicedNotConsMask)),
-             Int32Constant(0)),
-         &sliced_string);
+  Label thin_string(this), thin_or_sliced(this);
+  var_representation.Bind(
+      Word32And(instance_type, Int32Constant(kStringRepresentationMask)));
+  GotoIf(
+      Word32NotEqual(var_representation.value(), Int32Constant(kConsStringTag)),
+      &thin_or_sliced);
 
   // Cons string.  Check whether it is flat, then fetch first part.
   // Flat cons strings have an empty second part.
@@ -3168,14 +3360,25 @@
     Node* first_string_part = LoadObjectField(string, ConsString::kFirstOffset);
     var_string.Bind(first_string_part);
     var_instance_type.Bind(LoadInstanceType(first_string_part));
+    var_representation.Bind(Word32And(
+        var_instance_type.value(), Int32Constant(kStringRepresentationMask)));
 
-    Goto(&underlying_unpacked);
+    // The loaded first part might be a thin string.
+    Branch(Word32Equal(Word32And(var_instance_type.value(),
+                                 Int32Constant(kIsIndirectStringMask)),
+                       Int32Constant(0)),
+           &underlying_unpacked, &thin_string);
   }
 
-  Bind(&sliced_string);
+  Bind(&thin_or_sliced);
   {
+    GotoIf(
+        Word32Equal(var_representation.value(), Int32Constant(kThinStringTag)),
+        &thin_string);
+    // Otherwise it's a sliced string.
     // Fetch parent and correct start index by offset.
-    Node* sliced_offset = LoadObjectField(string, SlicedString::kOffsetOffset);
+    Node* sliced_offset =
+        LoadObjectField(var_string.value(), SlicedString::kOffsetOffset);
     var_from.Bind(SmiAdd(from, sliced_offset));
 
     Node* slice_parent = LoadObjectField(string, SlicedString::kParentOffset);
@@ -3184,6 +3387,19 @@
     Node* slice_parent_instance_type = LoadInstanceType(slice_parent);
     var_instance_type.Bind(slice_parent_instance_type);
 
+    // The loaded parent might be a thin string.
+    Branch(Word32Equal(Word32And(var_instance_type.value(),
+                                 Int32Constant(kIsIndirectStringMask)),
+                       Int32Constant(0)),
+           &underlying_unpacked, &thin_string);
+  }
+
+  Bind(&thin_string);
+  {
+    Node* actual_string =
+        LoadObjectField(var_string.value(), ThinString::kActualOffset);
+    var_string.Bind(actual_string);
+    var_instance_type.Bind(LoadInstanceType(actual_string));
     Goto(&underlying_unpacked);
   }
 
@@ -3231,10 +3447,10 @@
     // encoding at this point.
     STATIC_ASSERT(kExternalStringTag != 0);
     STATIC_ASSERT(kSeqStringTag == 0);
-    GotoUnless(Word32Equal(Word32And(var_instance_type.value(),
-                                     Int32Constant(kExternalStringTag)),
-                           Int32Constant(0)),
-               &external_string);
+    GotoIfNot(Word32Equal(Word32And(var_instance_type.value(),
+                                    Int32Constant(kExternalStringTag)),
+                          Int32Constant(0)),
+              &external_string);
 
     var_result.Bind(AllocAndCopyStringCharacters(
         this, context, var_string.value(), var_instance_type.value(),
@@ -3249,22 +3465,8 @@
   // Handle external string.
   Bind(&external_string);
   {
-    // Rule out short external strings.
-    STATIC_ASSERT(kShortExternalStringTag != 0);
-    GotoIf(Word32NotEqual(Word32And(var_instance_type.value(),
-                                    Int32Constant(kShortExternalStringMask)),
-                          Int32Constant(0)),
-           &runtime);
-
-    // Move the pointer so that offset-wise, it looks like a sequential string.
-    STATIC_ASSERT(SeqTwoByteString::kHeaderSize ==
-                  SeqOneByteString::kHeaderSize);
-
-    Node* resource_data = LoadObjectField(var_string.value(),
-                                          ExternalString::kResourceDataOffset);
-    Node* const fake_sequential_string = IntPtrSub(
-        resource_data,
-        IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+    Node* const fake_sequential_string = TryDerefExternalString(
+        var_string.value(), var_instance_type.value(), &runtime);
 
     var_result.Bind(AllocAndCopyStringCharacters(
         this, context, fake_sequential_string, var_instance_type.value(),
@@ -3313,12 +3515,91 @@
   return var_result.value();
 }
 
+namespace {
+
+Node* IsExternalStringInstanceType(CodeStubAssembler* a,
+                                   Node* const instance_type) {
+  CSA_ASSERT(a, a->IsStringInstanceType(instance_type));
+  return a->Word32Equal(
+      a->Word32And(instance_type, a->Int32Constant(kStringRepresentationMask)),
+      a->Int32Constant(kExternalStringTag));
+}
+
+Node* IsShortExternalStringInstanceType(CodeStubAssembler* a,
+                                        Node* const instance_type) {
+  CSA_ASSERT(a, a->IsStringInstanceType(instance_type));
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  return a->Word32NotEqual(
+      a->Word32And(instance_type, a->Int32Constant(kShortExternalStringMask)),
+      a->Int32Constant(0));
+}
+
+}  // namespace
+
+Node* CodeStubAssembler::TryDerefExternalString(Node* const string,
+                                                Node* const instance_type,
+                                                Label* if_bailout) {
+  Label out(this);
+
+  USE(IsExternalStringInstanceType);
+  CSA_ASSERT(this, IsExternalStringInstanceType(this, instance_type));
+  GotoIf(IsShortExternalStringInstanceType(this, instance_type), if_bailout);
+
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+
+  Node* resource_data = LoadObjectField(
+      string, ExternalString::kResourceDataOffset, MachineType::Pointer());
+  Node* const fake_sequential_string =
+      IntPtrSub(resource_data,
+                IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  return fake_sequential_string;
+}
+
+void CodeStubAssembler::MaybeDerefIndirectString(Variable* var_string,
+                                                 Node* instance_type,
+                                                 Variable* var_did_something) {
+  Label deref(this), done(this, var_did_something);
+  Node* representation =
+      Word32And(instance_type, Int32Constant(kStringRepresentationMask));
+  GotoIf(Word32Equal(representation, Int32Constant(kThinStringTag)), &deref);
+  GotoIf(Word32NotEqual(representation, Int32Constant(kConsStringTag)), &done);
+  // Cons string.
+  Node* rhs = LoadObjectField(var_string->value(), ConsString::kSecondOffset);
+  GotoIf(WordEqual(rhs, EmptyStringConstant()), &deref);
+  Goto(&done);
+
+  Bind(&deref);
+  STATIC_ASSERT(ThinString::kActualOffset == ConsString::kFirstOffset);
+  var_string->Bind(
+      LoadObjectField(var_string->value(), ThinString::kActualOffset));
+  var_did_something->Bind(IntPtrConstant(1));
+  Goto(&done);
+
+  Bind(&done);
+}
+
+void CodeStubAssembler::MaybeDerefIndirectStrings(Variable* var_left,
+                                                  Node* left_instance_type,
+                                                  Variable* var_right,
+                                                  Node* right_instance_type,
+                                                  Label* did_something) {
+  Variable var_did_something(this, MachineType::PointerRepresentation(),
+                             IntPtrConstant(0));
+  MaybeDerefIndirectString(var_left, left_instance_type, &var_did_something);
+  MaybeDerefIndirectString(var_right, right_instance_type, &var_did_something);
+
+  GotoIf(WordNotEqual(var_did_something.value(), IntPtrConstant(0)),
+         did_something);
+  // Fall through if neither string was an indirect string.
+}
+
 Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
                                    AllocationFlags flags) {
   Label check_right(this);
   Label runtime(this, Label::kDeferred);
   Label cons(this);
-  Label non_cons(this);
   Variable result(this, MachineRepresentation::kTagged);
   Label done(this, &result);
   Label done_native(this, &result);
@@ -3336,73 +3617,90 @@
   Goto(&done_native);
 
   Bind(&cons);
-  CSA_ASSERT(this, TaggedIsSmi(left_length));
-  CSA_ASSERT(this, TaggedIsSmi(right_length));
-  Node* new_length = SmiAdd(left_length, right_length);
-  GotoIf(UintPtrGreaterThanOrEqual(
-             new_length, SmiConstant(Smi::FromInt(String::kMaxLength))),
-         &runtime);
-
-  GotoIf(IntPtrLessThan(new_length,
-                        SmiConstant(Smi::FromInt(ConsString::kMinLength))),
-         &non_cons);
-
-  result.Bind(NewConsString(context, new_length, left, right, flags));
-  Goto(&done_native);
-
-  Bind(&non_cons);
-
-  Comment("Full string concatenate");
-  Node* left_instance_type = LoadInstanceType(left);
-  Node* right_instance_type = LoadInstanceType(right);
-  // Compute intersection and difference of instance types.
-
-  Node* ored_instance_types = WordOr(left_instance_type, right_instance_type);
-  Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
-
-  // Check if both strings have the same encoding and both are sequential.
-  GotoIf(WordNotEqual(
-             WordAnd(xored_instance_types, IntPtrConstant(kStringEncodingMask)),
-             IntPtrConstant(0)),
-         &runtime);
-  GotoIf(WordNotEqual(WordAnd(ored_instance_types,
-                              IntPtrConstant(kStringRepresentationMask)),
-                      IntPtrConstant(0)),
-         &runtime);
-
-  Label two_byte(this);
-  GotoIf(WordEqual(
-             WordAnd(ored_instance_types, IntPtrConstant(kStringEncodingMask)),
-             IntPtrConstant(kTwoByteStringTag)),
-         &two_byte);
-  // One-byte sequential string case
-  Node* new_string =
-      AllocateSeqOneByteString(context, new_length, SMI_PARAMETERS);
-  CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
-                       SmiConstant(Smi::kZero), left_length,
-                       String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
-                       SMI_PARAMETERS);
-  CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero), left_length,
-                       right_length, String::ONE_BYTE_ENCODING,
-                       String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
-  result.Bind(new_string);
-  Goto(&done_native);
-
-  Bind(&two_byte);
   {
-    // Two-byte sequential string case
-    new_string = AllocateSeqTwoByteString(context, new_length, SMI_PARAMETERS);
-    CopyStringCharacters(left, new_string, SmiConstant(Smi::kZero),
+    CSA_ASSERT(this, TaggedIsSmi(left_length));
+    CSA_ASSERT(this, TaggedIsSmi(right_length));
+    Node* new_length = SmiAdd(left_length, right_length);
+    GotoIf(SmiAboveOrEqual(new_length, SmiConstant(String::kMaxLength)),
+           &runtime);
+
+    Variable var_left(this, MachineRepresentation::kTagged, left);
+    Variable var_right(this, MachineRepresentation::kTagged, right);
+    Variable* input_vars[2] = {&var_left, &var_right};
+    Label non_cons(this, 2, input_vars);
+    Label slow(this, Label::kDeferred);
+    GotoIf(SmiLessThan(new_length, SmiConstant(ConsString::kMinLength)),
+           &non_cons);
+
+    result.Bind(NewConsString(context, new_length, var_left.value(),
+                              var_right.value(), flags));
+    Goto(&done_native);
+
+    Bind(&non_cons);
+
+    Comment("Full string concatenate");
+    Node* left_instance_type = LoadInstanceType(var_left.value());
+    Node* right_instance_type = LoadInstanceType(var_right.value());
+    // Compute intersection and difference of instance types.
+
+    Node* ored_instance_types =
+        Word32Or(left_instance_type, right_instance_type);
+    Node* xored_instance_types =
+        Word32Xor(left_instance_type, right_instance_type);
+
+    // Check if both strings have the same encoding and both are sequential.
+    GotoIf(Word32NotEqual(Word32And(xored_instance_types,
+                                    Int32Constant(kStringEncodingMask)),
+                          Int32Constant(0)),
+           &runtime);
+    GotoIf(Word32NotEqual(Word32And(ored_instance_types,
+                                    Int32Constant(kStringRepresentationMask)),
+                          Int32Constant(0)),
+           &slow);
+
+    Label two_byte(this);
+    GotoIf(Word32Equal(Word32And(ored_instance_types,
+                                 Int32Constant(kStringEncodingMask)),
+                       Int32Constant(kTwoByteStringTag)),
+           &two_byte);
+    // One-byte sequential string case
+    Node* new_string =
+        AllocateSeqOneByteString(context, new_length, SMI_PARAMETERS);
+    CopyStringCharacters(var_left.value(), new_string, SmiConstant(Smi::kZero),
                          SmiConstant(Smi::kZero), left_length,
-                         String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+                         String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
                          SMI_PARAMETERS);
-    CopyStringCharacters(right, new_string, SmiConstant(Smi::kZero),
-                         left_length, right_length, String::TWO_BYTE_ENCODING,
-                         String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
+    CopyStringCharacters(var_right.value(), new_string, SmiConstant(Smi::kZero),
+                         left_length, right_length, String::ONE_BYTE_ENCODING,
+                         String::ONE_BYTE_ENCODING, SMI_PARAMETERS);
     result.Bind(new_string);
     Goto(&done_native);
-  }
 
+    Bind(&two_byte);
+    {
+      // Two-byte sequential string case
+      new_string =
+          AllocateSeqTwoByteString(context, new_length, SMI_PARAMETERS);
+      CopyStringCharacters(var_left.value(), new_string,
+                           SmiConstant(Smi::kZero), SmiConstant(Smi::kZero),
+                           left_length, String::TWO_BYTE_ENCODING,
+                           String::TWO_BYTE_ENCODING, SMI_PARAMETERS);
+      CopyStringCharacters(var_right.value(), new_string,
+                           SmiConstant(Smi::kZero), left_length, right_length,
+                           String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+                           SMI_PARAMETERS);
+      result.Bind(new_string);
+      Goto(&done_native);
+    }
+
+    Bind(&slow);
+    {
+      // Try to unwrap indirect strings, restart the above attempt on success.
+      MaybeDerefIndirectStrings(&var_left, left_instance_type, &var_right,
+                                right_instance_type, &non_cons);
+      Goto(&runtime);
+    }
+  }
   Bind(&runtime);
   {
     result.Bind(CallRuntime(Runtime::kStringAdd, context, left, right));
@@ -3419,76 +3717,10 @@
   return result.value();
 }
 
-Node* CodeStubAssembler::StringIndexOfChar(Node* context, Node* string,
-                                           Node* needle_char, Node* from) {
-  CSA_ASSERT(this, IsString(string));
-  Variable var_result(this, MachineRepresentation::kTagged);
-
-  Label out(this), runtime(this, Label::kDeferred);
-
-  // Let runtime handle non-one-byte {needle_char}.
-
-  Node* const one_byte_char_mask = IntPtrConstant(0xFF);
-  GotoUnless(WordEqual(WordAnd(needle_char, one_byte_char_mask), needle_char),
-             &runtime);
-
-  // TODO(jgruber): Handle external and two-byte strings.
-
-  Node* const one_byte_seq_mask = Int32Constant(
-      kIsIndirectStringMask | kExternalStringTag | kStringEncodingMask);
-  Node* const expected_masked = Int32Constant(kOneByteStringTag);
-
-  Node* const string_instance_type = LoadInstanceType(string);
-  GotoUnless(Word32Equal(Word32And(string_instance_type, one_byte_seq_mask),
-                         expected_masked),
-             &runtime);
-
-  // If we reach this, {string} is a non-indirect, non-external one-byte string.
-
-  Node* const length = LoadStringLength(string);
-  Node* const search_range_length = SmiUntag(SmiSub(length, from));
-
-  const int offset = SeqOneByteString::kHeaderSize - kHeapObjectTag;
-  Node* const begin = IntPtrConstant(offset);
-  Node* const cursor = IntPtrAdd(begin, SmiUntag(from));
-  Node* const end = IntPtrAdd(cursor, search_range_length);
-
-  var_result.Bind(SmiConstant(Smi::FromInt(-1)));
-
-  BuildFastLoop(MachineType::PointerRepresentation(), cursor, end,
-                [string, needle_char, begin, &var_result, &out](
-                    CodeStubAssembler* csa, Node* cursor) {
-                  Label next(csa);
-                  Node* value = csa->Load(MachineType::Uint8(), string, cursor);
-                  csa->GotoUnless(csa->WordEqual(value, needle_char), &next);
-
-                  // Found a match.
-                  Node* index = csa->SmiTag(csa->IntPtrSub(cursor, begin));
-                  var_result.Bind(index);
-                  csa->Goto(&out);
-
-                  csa->Bind(&next);
-                },
-                1, IndexAdvanceMode::kPost);
-  Goto(&out);
-
-  Bind(&runtime);
-  {
-    Node* const pattern = StringFromCharCode(needle_char);
-    Node* const result =
-        CallRuntime(Runtime::kStringIndexOf, context, string, pattern, from);
-    var_result.Bind(result);
-    Goto(&out);
-  }
-
-  Bind(&out);
-  return var_result.value();
-}
-
-Node* CodeStubAssembler::StringFromCodePoint(compiler::Node* codepoint,
+Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
                                              UnicodeEncoding encoding) {
-  Variable var_result(this, MachineRepresentation::kTagged);
-  var_result.Bind(EmptyStringConstant());
+  Variable var_result(this, MachineRepresentation::kTagged,
+                      EmptyStringConstant());
 
   Label if_isword16(this), if_isword32(this), return_result(this);
 
@@ -3563,8 +3795,7 @@
   return var_result.value();
 }
 
-Node* CodeStubAssembler::NumberToString(compiler::Node* context,
-                                        compiler::Node* argument) {
+Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
   Variable result(this, MachineRepresentation::kTagged);
   Label runtime(this, Label::kDeferred);
   Label smi(this);
@@ -3575,7 +3806,9 @@
 
   // Make the hash mask from the length of the number string cache. It
   // contains two elements (number and string) for each cache entry.
-  Node* mask = LoadFixedArrayBaseLength(number_string_cache);
+  // TODO(ishell): cleanup mask handling.
+  Node* mask =
+      BitcastTaggedToWord(LoadFixedArrayBaseLength(number_string_cache));
   Node* one = IntPtrConstant(1);
   mask = IntPtrSub(mask, one);
 
@@ -3583,7 +3816,7 @@
 
   // Argument isn't smi, check to see if it's a heap-number.
   Node* map = LoadMap(argument);
-  GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+  GotoIfNot(IsHeapNumberMap(map), &runtime);
 
   // Make a hash from the two 32-bit values of the double.
   Node* low =
@@ -3591,29 +3824,27 @@
   Node* high = LoadObjectField(argument, HeapNumber::kValueOffset + kIntSize,
                                MachineType::Int32());
   Node* hash = Word32Xor(low, high);
-  if (Is64()) hash = ChangeInt32ToInt64(hash);
+  hash = ChangeInt32ToIntPtr(hash);
   hash = WordShl(hash, one);
-  Node* index = WordAnd(hash, SmiToWord(mask));
+  Node* index = WordAnd(hash, SmiUntag(BitcastWordToTagged(mask)));
 
   // Cache entry's key must be a heap number
-  Node* number_key =
-      LoadFixedArrayElement(number_string_cache, index, 0, INTPTR_PARAMETERS);
+  Node* number_key = LoadFixedArrayElement(number_string_cache, index);
   GotoIf(TaggedIsSmi(number_key), &runtime);
   map = LoadMap(number_key);
-  GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+  GotoIfNot(IsHeapNumberMap(map), &runtime);
 
   // Cache entry's key must match the heap number value we're looking for.
   Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
                                       MachineType::Int32());
   Node* high_compare = LoadObjectField(
       number_key, HeapNumber::kValueOffset + kIntSize, MachineType::Int32());
-  GotoUnless(WordEqual(low, low_compare), &runtime);
-  GotoUnless(WordEqual(high, high_compare), &runtime);
+  GotoIfNot(Word32Equal(low, low_compare), &runtime);
+  GotoIfNot(Word32Equal(high, high_compare), &runtime);
 
-  // Heap number match, return value fro cache entry.
+  // Heap number match, return value from cache entry.
   IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
-  result.Bind(LoadFixedArrayElement(number_string_cache, index, kPointerSize,
-                                    INTPTR_PARAMETERS));
+  result.Bind(LoadFixedArrayElement(number_string_cache, index, kPointerSize));
   Goto(&done);
 
   Bind(&runtime);
@@ -3626,7 +3857,8 @@
   Bind(&smi);
   {
     // Load the smi key, make sure it matches the smi we're looking for.
-    Node* smi_index = WordAnd(WordShl(argument, one), mask);
+    Node* smi_index = BitcastWordToTagged(
+        WordAnd(WordShl(BitcastTaggedToWord(argument), one), mask));
     Node* smi_key = LoadFixedArrayElement(number_string_cache, smi_index, 0,
                                           SMI_PARAMETERS);
     GotoIf(WordNotEqual(smi_key, argument), &runtime);
@@ -3643,9 +3875,6 @@
 }
 
 Node* CodeStubAssembler::ToName(Node* context, Node* value) {
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
   Label end(this);
   Variable var_result(this, MachineRepresentation::kTagged);
 
@@ -3694,14 +3923,13 @@
 Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
   // Assert input is a HeapObject (not smi or heap number)
   CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(input)));
-  CSA_ASSERT(this, Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
+  CSA_ASSERT(this, Word32BinaryNot(IsHeapNumberMap(LoadMap(input))));
 
   // We might need to loop once here due to ToPrimitive conversions.
-  Variable var_input(this, MachineRepresentation::kTagged);
+  Variable var_input(this, MachineRepresentation::kTagged, input);
   Variable var_result(this, MachineRepresentation::kTagged);
   Label loop(this, &var_input);
   Label end(this);
-  var_input.Bind(input);
   Goto(&loop);
   Bind(&loop);
   {
@@ -3745,7 +3973,7 @@
       Label if_resultisnumber(this), if_resultisnotnumber(this);
       GotoIf(TaggedIsSmi(result), &if_resultisnumber);
       Node* result_map = LoadMap(result);
-      Branch(WordEqual(result_map, HeapNumberMapConstant()), &if_resultisnumber,
+      Branch(IsHeapNumberMap(result_map), &if_resultisnumber,
              &if_resultisnotnumber);
 
       Bind(&if_resultisnumber);
@@ -3765,8 +3993,8 @@
 
     Bind(&if_inputisother);
     {
-      // The {input} is something else (i.e. Symbol or Simd128Value), let the
-      // runtime figure out the correct exception.
+      // The {input} is something else (e.g. Symbol), let the runtime figure
+      // out the correct exception.
       // Note: We cannot tail call to the runtime here, as js-to-wasm
       // trampolines also use this code currently, and they declare all
       // outgoing parameters as untagged, while we would push a tagged
@@ -3785,7 +4013,7 @@
   Label end(this);
 
   Label not_smi(this, Label::kDeferred);
-  GotoUnless(TaggedIsSmi(input), &not_smi);
+  GotoIfNot(TaggedIsSmi(input), &not_smi);
   var_result.Bind(input);
   Goto(&end);
 
@@ -3793,8 +4021,7 @@
   {
     Label not_heap_number(this, Label::kDeferred);
     Node* input_map = LoadMap(input);
-    GotoIf(Word32NotEqual(input_map, HeapNumberMapConstant()),
-           &not_heap_number);
+    GotoIfNot(IsHeapNumberMap(input_map), &not_heap_number);
 
     var_result.Bind(input);
     Goto(&end);
@@ -3810,6 +4037,107 @@
   return var_result.value();
 }
 
+Node* CodeStubAssembler::ToUint32(Node* context, Node* input) {
+  Node* const float_zero = Float64Constant(0.0);
+  Node* const float_two_32 = Float64Constant(static_cast<double>(1ULL << 32));
+
+  Label out(this);
+
+  Variable var_result(this, MachineRepresentation::kTagged, input);
+
+  // Early exit for positive smis.
+  {
+    // TODO(jgruber): This branch and the recheck below can be removed once we
+    // have a ToNumber with multiple exits.
+    Label next(this, Label::kDeferred);
+    Branch(TaggedIsPositiveSmi(input), &out, &next);
+    Bind(&next);
+  }
+
+  Node* const number = ToNumber(context, input);
+  var_result.Bind(number);
+
+  // Perhaps we have a positive smi now.
+  {
+    Label next(this, Label::kDeferred);
+    Branch(TaggedIsPositiveSmi(number), &out, &next);
+    Bind(&next);
+  }
+
+  Label if_isnegativesmi(this), if_isheapnumber(this);
+  Branch(TaggedIsSmi(number), &if_isnegativesmi, &if_isheapnumber);
+
+  Bind(&if_isnegativesmi);
+  {
+    // floor({input}) mod 2^32 === {input} + 2^32.
+    Node* const float_number = SmiToFloat64(number);
+    Node* const float_result = Float64Add(float_number, float_two_32);
+    Node* const result = ChangeFloat64ToTagged(float_result);
+    var_result.Bind(result);
+    Goto(&out);
+  }
+
+  Bind(&if_isheapnumber);
+  {
+    Label return_zero(this);
+    Node* const value = LoadHeapNumberValue(number);
+
+    {
+      // +-0.
+      Label next(this);
+      Branch(Float64Equal(value, float_zero), &return_zero, &next);
+      Bind(&next);
+    }
+
+    {
+      // NaN.
+      Label next(this);
+      Branch(Float64Equal(value, value), &next, &return_zero);
+      Bind(&next);
+    }
+
+    {
+      // +Infinity.
+      Label next(this);
+      Node* const positive_infinity =
+          Float64Constant(std::numeric_limits<double>::infinity());
+      Branch(Float64Equal(value, positive_infinity), &return_zero, &next);
+      Bind(&next);
+    }
+
+    {
+      // -Infinity.
+      Label next(this);
+      Node* const negative_infinity =
+          Float64Constant(-1.0 * std::numeric_limits<double>::infinity());
+      Branch(Float64Equal(value, negative_infinity), &return_zero, &next);
+      Bind(&next);
+    }
+
+    // Return floor({input}) mod 2^32 (assuming mod semantics that always return
+    // positive results).
+    {
+      Node* x = Float64Floor(value);
+      x = Float64Mod(x, float_two_32);
+      x = Float64Add(x, float_two_32);
+      x = Float64Mod(x, float_two_32);
+
+      Node* const result = ChangeFloat64ToTagged(x);
+      var_result.Bind(result);
+      Goto(&out);
+    }
+
+    Bind(&return_zero);
+    {
+      var_result.Bind(SmiConstant(Smi::kZero));
+      Goto(&out);
+    }
+  }
+
+  Bind(&out);
+  return var_result.value();
+}
+
 Node* CodeStubAssembler::ToString(Node* context, Node* input) {
   Label is_number(this);
   Label runtime(this, Label::kDeferred);
@@ -3825,8 +4153,7 @@
   GotoIf(IsStringInstanceType(input_instance_type), &done);
 
   Label not_heap_number(this);
-  Branch(WordNotEqual(input_map, HeapNumberMapConstant()), &not_heap_number,
-         &is_number);
+  Branch(IsHeapNumberMap(input_map), &is_number, &not_heap_number);
 
   Bind(&is_number);
   result.Bind(NumberToString(context, input));
@@ -3850,45 +4177,6 @@
   return result.value();
 }
 
-Node* CodeStubAssembler::FlattenString(Node* string) {
-  CSA_ASSERT(this, IsString(string));
-  Variable var_result(this, MachineRepresentation::kTagged);
-  var_result.Bind(string);
-
-  Node* instance_type = LoadInstanceType(string);
-
-  // Check if the {string} is not a ConsString (i.e. already flat).
-  Label is_cons(this, Label::kDeferred), is_flat_in_cons(this), end(this);
-  {
-    GotoUnless(Word32Equal(Word32And(instance_type,
-                                     Int32Constant(kStringRepresentationMask)),
-                           Int32Constant(kConsStringTag)),
-               &end);
-
-    // Check whether the right hand side is the empty string (i.e. if
-    // this is really a flat string in a cons string).
-    Node* rhs = LoadObjectField(string, ConsString::kSecondOffset);
-    Branch(WordEqual(rhs, EmptyStringConstant()), &is_flat_in_cons, &is_cons);
-  }
-
-  // Bail out to the runtime.
-  Bind(&is_cons);
-  {
-    var_result.Bind(
-        CallRuntime(Runtime::kFlattenString, NoContextConstant(), string));
-    Goto(&end);
-  }
-
-  Bind(&is_flat_in_cons);
-  {
-    var_result.Bind(LoadObjectField(string, ConsString::kFirstOffset));
-    Goto(&end);
-  }
-
-  Bind(&end);
-  return var_result.value();
-}
-
 Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
   Label if_isreceiver(this, Label::kDeferred), if_isnotreceiver(this);
   Variable result(this, MachineRepresentation::kTagged);
@@ -3917,9 +4205,8 @@
 Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
                                    ToIntegerTruncationMode mode) {
   // We might need to loop once for ToNumber conversion.
-  Variable var_arg(this, MachineRepresentation::kTagged);
+  Variable var_arg(this, MachineRepresentation::kTagged, input);
   Label loop(this, &var_arg), out(this);
-  var_arg.Bind(input);
   Goto(&loop);
   Bind(&loop);
   {
@@ -3935,8 +4222,8 @@
     // Check if {arg} is a HeapNumber.
     Label if_argisheapnumber(this),
         if_argisnotheapnumber(this, Label::kDeferred);
-    Branch(WordEqual(LoadMap(arg), HeapNumberMapConstant()),
-           &if_argisheapnumber, &if_argisnotheapnumber);
+    Branch(IsHeapNumberMap(LoadMap(arg)), &if_argisheapnumber,
+           &if_argisnotheapnumber);
 
     Bind(&if_argisheapnumber);
     {
@@ -3944,7 +4231,7 @@
       Node* arg_value = LoadHeapNumberValue(arg);
 
       // Check if {arg} is NaN.
-      GotoUnless(Float64Equal(arg_value, arg_value), &return_zero);
+      GotoIfNot(Float64Equal(arg_value, arg_value), &return_zero);
 
       // Truncate {arg} towards zero.
       Node* value = Float64Trunc(arg_value);
@@ -4013,29 +4300,42 @@
   }
 }
 
+void CodeStubAssembler::Increment(Variable& variable, int value,
+                                  ParameterMode mode) {
+  DCHECK_IMPLIES(mode == INTPTR_PARAMETERS,
+                 variable.rep() == MachineType::PointerRepresentation());
+  DCHECK_IMPLIES(mode == SMI_PARAMETERS,
+                 variable.rep() == MachineRepresentation::kTagged ||
+                     variable.rep() == MachineRepresentation::kTaggedSigned);
+  variable.Bind(
+      IntPtrOrSmiAdd(variable.value(), IntPtrOrSmiConstant(value, mode), mode));
+}
+
 void CodeStubAssembler::Use(Label* label) {
   GotoIf(Word32Equal(Int32Constant(0), Int32Constant(1)), label);
 }
 
 void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
                                   Variable* var_index, Label* if_keyisunique,
-                                  Label* if_bailout) {
+                                  Variable* var_unique, Label* if_bailout) {
   DCHECK_EQ(MachineType::PointerRepresentation(), var_index->rep());
+  DCHECK_EQ(MachineRepresentation::kTagged, var_unique->rep());
   Comment("TryToName");
 
-  Label if_hascachedindex(this), if_keyisnotindex(this);
+  Label if_hascachedindex(this), if_keyisnotindex(this), if_thinstring(this);
   // Handle Smi and HeapNumber keys.
   var_index->Bind(TryToIntptr(key, &if_keyisnotindex));
   Goto(if_keyisindex);
 
   Bind(&if_keyisnotindex);
-  Node* key_instance_type = LoadInstanceType(key);
+  Node* key_map = LoadMap(key);
+  var_unique->Bind(key);
   // Symbols are unique.
-  GotoIf(Word32Equal(key_instance_type, Int32Constant(SYMBOL_TYPE)),
-         if_keyisunique);
+  GotoIf(IsSymbolMap(key_map), if_keyisunique);
+  Node* key_instance_type = LoadMapInstanceType(key_map);
   // Miss if |key| is not a String.
   STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
-  GotoUnless(IsStringInstanceType(key_instance_type), if_bailout);
+  GotoIfNot(IsStringInstanceType(key_instance_type), if_bailout);
   // |key| is a String. Check if it has a cached array index.
   Node* hash = LoadNameHashField(key);
   Node* contains_index =
@@ -4046,6 +4346,12 @@
   Node* not_an_index =
       Word32And(hash, Int32Constant(Name::kIsNotArrayIndexMask));
   GotoIf(Word32Equal(not_an_index, Int32Constant(0)), if_bailout);
+  // Check if we have a ThinString.
+  GotoIf(Word32Equal(key_instance_type, Int32Constant(THIN_STRING_TYPE)),
+         &if_thinstring);
+  GotoIf(
+      Word32Equal(key_instance_type, Int32Constant(THIN_ONE_BYTE_STRING_TYPE)),
+      &if_thinstring);
   // Finally, check if |key| is internalized.
   STATIC_ASSERT(kNotInternalizedTag != 0);
   Node* not_internalized =
@@ -4053,6 +4359,10 @@
   GotoIf(Word32NotEqual(not_internalized, Int32Constant(0)), if_bailout);
   Goto(if_keyisunique);
 
+  Bind(&if_thinstring);
+  var_unique->Bind(LoadObjectField(key, ThinString::kActualOffset));
+  Goto(if_keyisunique);
+
   Bind(&if_hascachedindex);
   var_index->Bind(DecodeWordFromWord32<Name::ArrayIndexValueBits>(hash));
   Goto(if_keyisindex);
@@ -4067,6 +4377,8 @@
 
 template Node* CodeStubAssembler::EntryToIndex<NameDictionary>(Node*, int);
 template Node* CodeStubAssembler::EntryToIndex<GlobalDictionary>(Node*, int);
+template Node* CodeStubAssembler::EntryToIndex<SeededNumberDictionary>(Node*,
+                                                                       int);
 
 Node* CodeStubAssembler::HashTableComputeCapacity(Node* at_least_space_for) {
   Node* capacity = IntPtrRoundUpToPowerOfTwo32(
@@ -4075,8 +4387,49 @@
 }
 
 Node* CodeStubAssembler::IntPtrMax(Node* left, Node* right) {
-  return Select(IntPtrGreaterThanOrEqual(left, right), left, right,
-                MachineType::PointerRepresentation());
+  return SelectConstant(IntPtrGreaterThanOrEqual(left, right), left, right,
+                        MachineType::PointerRepresentation());
+}
+
+Node* CodeStubAssembler::IntPtrMin(Node* left, Node* right) {
+  return SelectConstant(IntPtrLessThanOrEqual(left, right), left, right,
+                        MachineType::PointerRepresentation());
+}
+
+template <class Dictionary>
+Node* CodeStubAssembler::GetNumberOfElements(Node* dictionary) {
+  return LoadFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex);
+}
+
+template <class Dictionary>
+void CodeStubAssembler::SetNumberOfElements(Node* dictionary,
+                                            Node* num_elements_smi) {
+  StoreFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex,
+                         num_elements_smi, SKIP_WRITE_BARRIER);
+}
+
+template <class Dictionary>
+Node* CodeStubAssembler::GetNumberOfDeletedElements(Node* dictionary) {
+  return LoadFixedArrayElement(dictionary,
+                               Dictionary::kNumberOfDeletedElementsIndex);
+}
+
+template <class Dictionary>
+Node* CodeStubAssembler::GetCapacity(Node* dictionary) {
+  return LoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex);
+}
+
+template <class Dictionary>
+Node* CodeStubAssembler::GetNextEnumerationIndex(Node* dictionary) {
+  return LoadFixedArrayElement(dictionary,
+                               Dictionary::kNextEnumerationIndexIndex);
+}
+
+template <class Dictionary>
+void CodeStubAssembler::SetNextEnumerationIndex(Node* dictionary,
+                                                Node* next_enum_index_smi) {
+  StoreFixedArrayElement(dictionary, Dictionary::kNextEnumerationIndexIndex,
+                         next_enum_index_smi, SKIP_WRITE_BARRIER);
 }
 
 template <typename Dictionary>
@@ -4084,14 +4437,15 @@
                                              Node* unique_name, Label* if_found,
                                              Variable* var_name_index,
                                              Label* if_not_found,
-                                             int inlined_probes) {
+                                             int inlined_probes,
+                                             LookupMode mode) {
   CSA_ASSERT(this, IsDictionary(dictionary));
   DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
+  DCHECK_IMPLIES(mode == kFindInsertionIndex,
+                 inlined_probes == 0 && if_found == nullptr);
   Comment("NameDictionaryLookup");
 
-  Node* capacity = SmiUntag(LoadFixedArrayElement(
-      dictionary, IntPtrConstant(Dictionary::kCapacityIndex), 0,
-      INTPTR_PARAMETERS));
+  Node* capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
   Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
   Node* hash = ChangeUint32ToWord(LoadNameHash(unique_name));
 
@@ -4103,42 +4457,46 @@
     Node* index = EntryToIndex<Dictionary>(entry);
     var_name_index->Bind(index);
 
-    Node* current =
-        LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
+    Node* current = LoadFixedArrayElement(dictionary, index);
     GotoIf(WordEqual(current, unique_name), if_found);
 
     // See Dictionary::NextProbe().
     count = IntPtrConstant(i + 1);
     entry = WordAnd(IntPtrAdd(entry, count), mask);
   }
+  if (mode == kFindInsertionIndex) {
+    // Appease the variable merging algorithm for "Goto(&loop)" below.
+    var_name_index->Bind(IntPtrConstant(0));
+  }
 
   Node* undefined = UndefinedConstant();
+  Node* the_hole = mode == kFindExisting ? nullptr : TheHoleConstant();
 
-  Variable var_count(this, MachineType::PointerRepresentation());
-  Variable var_entry(this, MachineType::PointerRepresentation());
+  Variable var_count(this, MachineType::PointerRepresentation(), count);
+  Variable var_entry(this, MachineType::PointerRepresentation(), entry);
   Variable* loop_vars[] = {&var_count, &var_entry, var_name_index};
   Label loop(this, 3, loop_vars);
-  var_count.Bind(count);
-  var_entry.Bind(entry);
   Goto(&loop);
   Bind(&loop);
   {
-    Node* count = var_count.value();
     Node* entry = var_entry.value();
 
     Node* index = EntryToIndex<Dictionary>(entry);
     var_name_index->Bind(index);
 
-    Node* current =
-        LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
+    Node* current = LoadFixedArrayElement(dictionary, index);
     GotoIf(WordEqual(current, undefined), if_not_found);
-    GotoIf(WordEqual(current, unique_name), if_found);
+    if (mode == kFindExisting) {
+      GotoIf(WordEqual(current, unique_name), if_found);
+    } else {
+      DCHECK_EQ(kFindInsertionIndex, mode);
+      GotoIf(WordEqual(current, the_hole), if_not_found);
+    }
 
     // See Dictionary::NextProbe().
-    count = IntPtrAdd(count, IntPtrConstant(1));
-    entry = WordAnd(IntPtrAdd(entry, count), mask);
+    Increment(var_count);
+    entry = WordAnd(IntPtrAdd(entry, var_count.value()), mask);
 
-    var_count.Bind(count);
     var_entry.Bind(entry);
     Goto(&loop);
   }
@@ -4146,13 +4504,13 @@
 
 // Instantiate template methods to workaround GCC compilation issue.
 template void CodeStubAssembler::NameDictionaryLookup<NameDictionary>(
-    Node*, Node*, Label*, Variable*, Label*, int);
+    Node*, Node*, Label*, Variable*, Label*, int, LookupMode);
 template void CodeStubAssembler::NameDictionaryLookup<GlobalDictionary>(
-    Node*, Node*, Label*, Variable*, Label*, int);
+    Node*, Node*, Label*, Variable*, Label*, int, LookupMode);
 
 Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
   // See v8::internal::ComputeIntegerHash()
-  Node* hash = key;
+  Node* hash = TruncateWordToWord32(key);
   hash = Word32Xor(hash, seed);
   hash = Int32Add(Word32Xor(hash, Int32Constant(0xffffffff)),
                   Word32Shl(hash, Int32Constant(15)));
@@ -4174,9 +4532,7 @@
   DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
   Comment("NumberDictionaryLookup");
 
-  Node* capacity = SmiUntag(LoadFixedArrayElement(
-      dictionary, IntPtrConstant(Dictionary::kCapacityIndex), 0,
-      INTPTR_PARAMETERS));
+  Node* capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
   Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
 
   Node* int32_seed;
@@ -4195,20 +4551,17 @@
   Node* undefined = UndefinedConstant();
   Node* the_hole = TheHoleConstant();
 
-  Variable var_count(this, MachineType::PointerRepresentation());
+  Variable var_count(this, MachineType::PointerRepresentation(), count);
   Variable* loop_vars[] = {&var_count, var_entry};
   Label loop(this, 2, loop_vars);
-  var_count.Bind(count);
   var_entry->Bind(entry);
   Goto(&loop);
   Bind(&loop);
   {
-    Node* count = var_count.value();
     Node* entry = var_entry->value();
 
     Node* index = EntryToIndex<Dictionary>(entry);
-    Node* current =
-        LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
+    Node* current = LoadFixedArrayElement(dictionary, index);
     GotoIf(WordEqual(current, undefined), if_not_found);
     Label next_probe(this);
     {
@@ -4231,38 +4584,273 @@
 
     Bind(&next_probe);
     // See Dictionary::NextProbe().
-    count = IntPtrAdd(count, IntPtrConstant(1));
-    entry = WordAnd(IntPtrAdd(entry, count), mask);
+    Increment(var_count);
+    entry = WordAnd(IntPtrAdd(entry, var_count.value()), mask);
 
-    var_count.Bind(count);
     var_entry->Bind(entry);
     Goto(&loop);
   }
 }
 
+template <class Dictionary>
+void CodeStubAssembler::FindInsertionEntry(Node* dictionary, Node* key,
+                                           Variable* var_key_index) {
+  UNREACHABLE();
+}
+
+template <>
+void CodeStubAssembler::FindInsertionEntry<NameDictionary>(
+    Node* dictionary, Node* key, Variable* var_key_index) {
+  Label done(this);
+  NameDictionaryLookup<NameDictionary>(dictionary, key, nullptr, var_key_index,
+                                       &done, 0, kFindInsertionIndex);
+  Bind(&done);
+}
+
+template <class Dictionary>
+void CodeStubAssembler::InsertEntry(Node* dictionary, Node* key, Node* value,
+                                    Node* index, Node* enum_index) {
+  UNREACHABLE();  // Use specializations instead.
+}
+
+template <>
+void CodeStubAssembler::InsertEntry<NameDictionary>(Node* dictionary,
+                                                    Node* name, Node* value,
+                                                    Node* index,
+                                                    Node* enum_index) {
+  // Store name and value.
+  StoreFixedArrayElement(dictionary, index, name);
+  StoreValueByKeyIndex<NameDictionary>(dictionary, index, value);
+
+  // Prepare details of the new property.
+  const int kInitialIndex = 0;
+  PropertyDetails d(kData, NONE, kInitialIndex, PropertyCellType::kNoCell);
+  enum_index =
+      SmiShl(enum_index, PropertyDetails::DictionaryStorageField::kShift);
+  STATIC_ASSERT(kInitialIndex == 0);
+  Variable var_details(this, MachineRepresentation::kTaggedSigned,
+                       SmiOr(SmiConstant(d.AsSmi()), enum_index));
+
+  // Private names must be marked non-enumerable.
+  Label not_private(this, &var_details);
+  GotoIfNot(IsSymbolMap(LoadMap(name)), &not_private);
+  Node* flags = SmiToWord32(LoadObjectField(name, Symbol::kFlagsOffset));
+  const int kPrivateMask = 1 << Symbol::kPrivateBit;
+  GotoIfNot(IsSetWord32(flags, kPrivateMask), &not_private);
+  Node* dont_enum =
+      SmiShl(SmiConstant(DONT_ENUM), PropertyDetails::AttributesField::kShift);
+  var_details.Bind(SmiOr(var_details.value(), dont_enum));
+  Goto(&not_private);
+  Bind(&not_private);
+
+  // Finally, store the details.
+  StoreDetailsByKeyIndex<NameDictionary>(dictionary, index,
+                                         var_details.value());
+}
+
+template <>
+void CodeStubAssembler::InsertEntry<GlobalDictionary>(Node* dictionary,
+                                                      Node* key, Node* value,
+                                                      Node* index,
+                                                      Node* enum_index) {
+  UNIMPLEMENTED();
+}
+
+template <class Dictionary>
+void CodeStubAssembler::Add(Node* dictionary, Node* key, Node* value,
+                            Label* bailout) {
+  Node* capacity = GetCapacity<Dictionary>(dictionary);
+  Node* nof = GetNumberOfElements<Dictionary>(dictionary);
+  Node* new_nof = SmiAdd(nof, SmiConstant(1));
+  // Require 33% to still be free after adding additional_elements.
+  // Computing "x + (x >> 1)" on a Smi x does not return a valid Smi!
+  // But that's OK here because it's only used for a comparison.
+  Node* required_capacity_pseudo_smi = SmiAdd(new_nof, SmiShr(new_nof, 1));
+  GotoIf(SmiBelow(capacity, required_capacity_pseudo_smi), bailout);
+  // Require rehashing if more than 50% of free elements are deleted elements.
+  Node* deleted = GetNumberOfDeletedElements<Dictionary>(dictionary);
+  CSA_ASSERT(this, SmiAbove(capacity, new_nof));
+  Node* half_of_free_elements = SmiShr(SmiSub(capacity, new_nof), 1);
+  GotoIf(SmiAbove(deleted, half_of_free_elements), bailout);
+  Node* enum_index = nullptr;
+  if (Dictionary::kIsEnumerable) {
+    enum_index = GetNextEnumerationIndex<Dictionary>(dictionary);
+    Node* new_enum_index = SmiAdd(enum_index, SmiConstant(1));
+    Node* max_enum_index =
+        SmiConstant(PropertyDetails::DictionaryStorageField::kMax);
+    GotoIf(SmiAbove(new_enum_index, max_enum_index), bailout);
+
+    // No more bailouts after this point.
+    // Operations from here on can have side effects.
+
+    SetNextEnumerationIndex<Dictionary>(dictionary, new_enum_index);
+  } else {
+    USE(enum_index);
+  }
+  SetNumberOfElements<Dictionary>(dictionary, new_nof);
+
+  Variable var_key_index(this, MachineType::PointerRepresentation());
+  FindInsertionEntry<Dictionary>(dictionary, key, &var_key_index);
+  InsertEntry<Dictionary>(dictionary, key, value, var_key_index.value(),
+                          enum_index);
+}
+
+template void CodeStubAssembler::Add<NameDictionary>(Node*, Node*, Node*,
+                                                     Label*);
+
 void CodeStubAssembler::DescriptorLookupLinear(Node* unique_name,
                                                Node* descriptors, Node* nof,
                                                Label* if_found,
                                                Variable* var_name_index,
                                                Label* if_not_found) {
+  Comment("DescriptorLookupLinear");
   Node* first_inclusive = IntPtrConstant(DescriptorArray::ToKeyIndex(0));
-  Node* factor = IntPtrConstant(DescriptorArray::kDescriptorSize);
+  Node* factor = IntPtrConstant(DescriptorArray::kEntrySize);
   Node* last_exclusive = IntPtrAdd(first_inclusive, IntPtrMul(nof, factor));
 
-  BuildFastLoop(
-      MachineType::PointerRepresentation(), last_exclusive, first_inclusive,
-      [descriptors, unique_name, if_found, var_name_index](
-          CodeStubAssembler* assembler, Node* name_index) {
-        Node* candidate_name = assembler->LoadFixedArrayElement(
-            descriptors, name_index, 0, INTPTR_PARAMETERS);
-        var_name_index->Bind(name_index);
-        assembler->GotoIf(assembler->WordEqual(candidate_name, unique_name),
-                          if_found);
-      },
-      -DescriptorArray::kDescriptorSize, IndexAdvanceMode::kPre);
+  BuildFastLoop(last_exclusive, first_inclusive,
+                [this, descriptors, unique_name, if_found,
+                 var_name_index](Node* name_index) {
+                  Node* candidate_name =
+                      LoadFixedArrayElement(descriptors, name_index);
+                  var_name_index->Bind(name_index);
+                  GotoIf(WordEqual(candidate_name, unique_name), if_found);
+                },
+                -DescriptorArray::kEntrySize, INTPTR_PARAMETERS,
+                IndexAdvanceMode::kPre);
   Goto(if_not_found);
 }
 
+Node* CodeStubAssembler::DescriptorArrayNumberOfEntries(Node* descriptors) {
+  return LoadAndUntagToWord32FixedArrayElement(
+      descriptors, IntPtrConstant(DescriptorArray::kDescriptorLengthIndex));
+}
+
+namespace {
+
+Node* DescriptorNumberToIndex(CodeStubAssembler* a, Node* descriptor_number) {
+  Node* descriptor_size = a->Int32Constant(DescriptorArray::kEntrySize);
+  Node* index = a->Int32Mul(descriptor_number, descriptor_size);
+  return a->ChangeInt32ToIntPtr(index);
+}
+
+}  // namespace
+
+Node* CodeStubAssembler::DescriptorArrayToKeyIndex(Node* descriptor_number) {
+  return IntPtrAdd(IntPtrConstant(DescriptorArray::ToKeyIndex(0)),
+                   DescriptorNumberToIndex(this, descriptor_number));
+}
+
+Node* CodeStubAssembler::DescriptorArrayGetSortedKeyIndex(
+    Node* descriptors, Node* descriptor_number) {
+  const int details_offset = DescriptorArray::ToDetailsIndex(0) * kPointerSize;
+  Node* details = LoadAndUntagToWord32FixedArrayElement(
+      descriptors, DescriptorNumberToIndex(this, descriptor_number),
+      details_offset);
+  return DecodeWord32<PropertyDetails::DescriptorPointer>(details);
+}
+
+Node* CodeStubAssembler::DescriptorArrayGetKey(Node* descriptors,
+                                               Node* descriptor_number) {
+  const int key_offset = DescriptorArray::ToKeyIndex(0) * kPointerSize;
+  return LoadFixedArrayElement(descriptors,
+                               DescriptorNumberToIndex(this, descriptor_number),
+                               key_offset);
+}
+
+void CodeStubAssembler::DescriptorLookupBinary(Node* unique_name,
+                                               Node* descriptors, Node* nof,
+                                               Label* if_found,
+                                               Variable* var_name_index,
+                                               Label* if_not_found) {
+  Comment("DescriptorLookupBinary");
+  Variable var_low(this, MachineRepresentation::kWord32, Int32Constant(0));
+  Node* limit =
+      Int32Sub(DescriptorArrayNumberOfEntries(descriptors), Int32Constant(1));
+  Variable var_high(this, MachineRepresentation::kWord32, limit);
+  Node* hash = LoadNameHashField(unique_name);
+  CSA_ASSERT(this, Word32NotEqual(hash, Int32Constant(0)));
+
+  // Assume non-empty array.
+  CSA_ASSERT(this, Uint32LessThanOrEqual(var_low.value(), var_high.value()));
+
+  Variable* loop_vars[] = {&var_high, &var_low};
+  Label binary_loop(this, 2, loop_vars);
+  Goto(&binary_loop);
+  Bind(&binary_loop);
+  {
+    // mid = low + (high - low) / 2 (to avoid overflow in "(low + high) / 2").
+    Node* mid =
+        Int32Add(var_low.value(),
+                 Word32Shr(Int32Sub(var_high.value(), var_low.value()), 1));
+    // mid_name = descriptors->GetSortedKey(mid).
+    Node* sorted_key_index = DescriptorArrayGetSortedKeyIndex(descriptors, mid);
+    Node* mid_name = DescriptorArrayGetKey(descriptors, sorted_key_index);
+
+    Node* mid_hash = LoadNameHashField(mid_name);
+
+    Label mid_greater(this), mid_less(this), merge(this);
+    Branch(Uint32GreaterThanOrEqual(mid_hash, hash), &mid_greater, &mid_less);
+    Bind(&mid_greater);
+    {
+      var_high.Bind(mid);
+      Goto(&merge);
+    }
+    Bind(&mid_less);
+    {
+      var_low.Bind(Int32Add(mid, Int32Constant(1)));
+      Goto(&merge);
+    }
+    Bind(&merge);
+    GotoIf(Word32NotEqual(var_low.value(), var_high.value()), &binary_loop);
+  }
+
+  Label scan_loop(this, &var_low);
+  Goto(&scan_loop);
+  Bind(&scan_loop);
+  {
+    GotoIf(Int32GreaterThan(var_low.value(), limit), if_not_found);
+
+    Node* sort_index =
+        DescriptorArrayGetSortedKeyIndex(descriptors, var_low.value());
+    Node* current_name = DescriptorArrayGetKey(descriptors, sort_index);
+    Node* current_hash = LoadNameHashField(current_name);
+    GotoIf(Word32NotEqual(current_hash, hash), if_not_found);
+    Label next(this);
+    GotoIf(WordNotEqual(current_name, unique_name), &next);
+    GotoIf(Int32GreaterThanOrEqual(sort_index, nof), if_not_found);
+    var_name_index->Bind(DescriptorArrayToKeyIndex(sort_index));
+    Goto(if_found);
+
+    Bind(&next);
+    var_low.Bind(Int32Add(var_low.value(), Int32Constant(1)));
+    Goto(&scan_loop);
+  }
+}
+
+void CodeStubAssembler::DescriptorLookup(Node* unique_name, Node* descriptors,
+                                         Node* bitfield3, Label* if_found,
+                                         Variable* var_name_index,
+                                         Label* if_not_found) {
+  Comment("DescriptorArrayLookup");
+  Node* nof = DecodeWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+  GotoIf(Word32Equal(nof, Int32Constant(0)), if_not_found);
+  Label linear_search(this), binary_search(this);
+  const int kMaxElementsForLinearSearch = 32;
+  Branch(Int32LessThanOrEqual(nof, Int32Constant(kMaxElementsForLinearSearch)),
+         &linear_search, &binary_search);
+  Bind(&linear_search);
+  {
+    DescriptorLookupLinear(unique_name, descriptors, ChangeInt32ToIntPtr(nof),
+                           if_found, var_name_index, if_not_found);
+  }
+  Bind(&binary_search);
+  {
+    DescriptorLookupBinary(unique_name, descriptors, nof, if_found,
+                           var_name_index, if_not_found);
+  }
+}
+
 void CodeStubAssembler::TryLookupProperty(
     Node* object, Node* map, Node* instance_type, Node* unique_name,
     Label* if_found_fast, Label* if_found_dict, Label* if_found_global,
@@ -4288,20 +4876,11 @@
          &if_isfastmap);
   Bind(&if_isfastmap);
   {
-    Comment("DescriptorArrayLookup");
-    Node* nof =
-        DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3);
-    // Bail out to the runtime for large numbers of own descriptors. The stub
-    // only does linear search, which becomes too expensive in that case.
-    {
-      static const int32_t kMaxLinear = 210;
-      GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), if_bailout);
-    }
     Node* descriptors = LoadMapDescriptors(map);
     var_meta_storage->Bind(descriptors);
 
-    DescriptorLookupLinear(unique_name, descriptors, nof, if_found_fast,
-                           var_name_index, if_not_found);
+    DescriptorLookup(unique_name, descriptors, bit_field3, if_found_fast,
+                     var_name_index, if_not_found);
   }
   Bind(&if_isslowmap);
   {
@@ -4314,8 +4893,8 @@
   Bind(&if_objectisspecial);
   {
     // Handle global object here and other special objects in runtime.
-    GotoUnless(Word32Equal(instance_type, Int32Constant(JS_GLOBAL_OBJECT_TYPE)),
-               if_bailout);
+    GotoIfNot(Word32Equal(instance_type, Int32Constant(JS_GLOBAL_OBJECT_TYPE)),
+              if_bailout);
 
     // Handle interceptors and access checks in runtime.
     Node* bit_field = LoadMapBitField(map);
@@ -4332,11 +4911,10 @@
   }
 }
 
-void CodeStubAssembler::TryHasOwnProperty(compiler::Node* object,
-                                          compiler::Node* map,
-                                          compiler::Node* instance_type,
-                                          compiler::Node* unique_name,
-                                          Label* if_found, Label* if_not_found,
+void CodeStubAssembler::TryHasOwnProperty(Node* object, Node* map,
+                                          Node* instance_type,
+                                          Node* unique_name, Label* if_found,
+                                          Label* if_not_found,
                                           Label* if_bailout) {
   Comment("TryHasOwnProperty");
   Variable var_meta_storage(this, MachineRepresentation::kTagged);
@@ -4367,15 +4945,8 @@
   DCHECK_EQ(MachineRepresentation::kTagged, var_value->rep());
   Comment("[ LoadPropertyFromFastObject");
 
-  const int name_to_details_offset =
-      (DescriptorArray::kDescriptorDetails - DescriptorArray::kDescriptorKey) *
-      kPointerSize;
-  const int name_to_value_offset =
-      (DescriptorArray::kDescriptorValue - DescriptorArray::kDescriptorKey) *
-      kPointerSize;
-
-  Node* details = LoadAndUntagToWord32FixedArrayElement(descriptors, name_index,
-                                                        name_to_details_offset);
+  Node* details =
+      LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
   var_details->Bind(details);
 
   Node* location = DecodeWord32<PropertyDetails::LocationField>(details);
@@ -4458,9 +5029,8 @@
   }
   Bind(&if_in_descriptor);
   {
-    Node* value =
-        LoadFixedArrayElement(descriptors, name_index, name_to_value_offset);
-    var_value->Bind(value);
+    var_value->Bind(
+        LoadValueByKeyIndex<DescriptorArray>(descriptors, name_index));
     Goto(&done);
   }
   Bind(&done);
@@ -4474,19 +5044,10 @@
                                                        Variable* var_value) {
   Comment("LoadPropertyFromNameDictionary");
   CSA_ASSERT(this, IsDictionary(dictionary));
-  const int name_to_details_offset =
-      (NameDictionary::kEntryDetailsIndex - NameDictionary::kEntryKeyIndex) *
-      kPointerSize;
-  const int name_to_value_offset =
-      (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
-      kPointerSize;
 
-  Node* details = LoadAndUntagToWord32FixedArrayElement(dictionary, name_index,
-                                                        name_to_details_offset);
-
-  var_details->Bind(details);
-  var_value->Bind(
-      LoadFixedArrayElement(dictionary, name_index, name_to_value_offset));
+  var_details->Bind(
+      LoadDetailsByKeyIndex<NameDictionary>(dictionary, name_index));
+  var_value->Bind(LoadValueByKeyIndex<NameDictionary>(dictionary, name_index));
 
   Comment("] LoadPropertyFromNameDictionary");
 }
@@ -4499,12 +5060,8 @@
   Comment("[ LoadPropertyFromGlobalDictionary");
   CSA_ASSERT(this, IsDictionary(dictionary));
 
-  const int name_to_value_offset =
-      (GlobalDictionary::kEntryValueIndex - GlobalDictionary::kEntryKeyIndex) *
-      kPointerSize;
-
   Node* property_cell =
-      LoadFixedArrayElement(dictionary, name_index, name_to_value_offset);
+      LoadValueByKeyIndex<GlobalDictionary>(dictionary, name_index);
 
   Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
   GotoIf(WordEqual(value, TheHoleConstant()), if_deleted);
@@ -4524,8 +5081,7 @@
 Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
                                               Node* context, Node* receiver,
                                               Label* if_bailout) {
-  Variable var_value(this, MachineRepresentation::kTagged);
-  var_value.Bind(value);
+  Variable var_value(this, MachineRepresentation::kTagged, value);
   Label done(this);
 
   Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
@@ -4548,7 +5104,7 @@
 
     // Return undefined if the {getter} is not callable.
     var_value.Bind(UndefinedConstant());
-    GotoUnless(IsCallableMap(getter_map), &done);
+    GotoIfNot(IsCallableMap(getter_map), &done);
 
     // Call the accessor.
     Callable callable = CodeFactory::Call(isolate());
@@ -4664,10 +5220,9 @@
     Node* elements = LoadElements(object);
     Node* length = LoadAndUntagFixedArrayBaseLength(elements);
 
-    GotoUnless(UintPtrLessThan(intptr_index, length), &if_oob);
+    GotoIfNot(UintPtrLessThan(intptr_index, length), &if_oob);
 
-    Node* element =
-        LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS);
+    Node* element = LoadFixedArrayElement(elements, intptr_index);
     Node* the_hole = TheHoleConstant();
     Branch(WordEqual(element, the_hole), if_not_found, if_found);
   }
@@ -4676,7 +5231,7 @@
     Node* elements = LoadElements(object);
     Node* length = LoadAndUntagFixedArrayBaseLength(elements);
 
-    GotoUnless(UintPtrLessThan(intptr_index, length), &if_oob);
+    GotoIfNot(UintPtrLessThan(intptr_index, length), &if_oob);
 
     // Check if the element is a double hole, but don't load it.
     LoadFixedDoubleArrayElement(elements, intptr_index, MachineType::None(), 0,
@@ -4727,8 +5282,8 @@
     UnseededNumberDictionary>(Node*, Node*, Label*, Variable*, Label*);
 
 void CodeStubAssembler::TryPrototypeChainLookup(
-    Node* receiver, Node* key, LookupInHolder& lookup_property_in_holder,
-    LookupInHolder& lookup_element_in_holder, Label* if_end,
+    Node* receiver, Node* key, const LookupInHolder& lookup_property_in_holder,
+    const LookupInHolder& lookup_element_in_holder, Label* if_end,
     Label* if_bailout) {
   // Ensure receiver is JSReceiver, otherwise bailout.
   Label if_objectisnotsmi(this);
@@ -4748,22 +5303,22 @@
   }
 
   Variable var_index(this, MachineType::PointerRepresentation());
+  Variable var_unique(this, MachineRepresentation::kTagged);
 
   Label if_keyisindex(this), if_iskeyunique(this);
-  TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, if_bailout);
+  TryToName(key, &if_keyisindex, &var_index, &if_iskeyunique, &var_unique,
+            if_bailout);
 
   Bind(&if_iskeyunique);
   {
-    Variable var_holder(this, MachineRepresentation::kTagged);
-    Variable var_holder_map(this, MachineRepresentation::kTagged);
-    Variable var_holder_instance_type(this, MachineRepresentation::kWord8);
+    Variable var_holder(this, MachineRepresentation::kTagged, receiver);
+    Variable var_holder_map(this, MachineRepresentation::kTagged, map);
+    Variable var_holder_instance_type(this, MachineRepresentation::kWord32,
+                                      instance_type);
 
     Variable* merged_variables[] = {&var_holder, &var_holder_map,
                                     &var_holder_instance_type};
     Label loop(this, arraysize(merged_variables), merged_variables);
-    var_holder.Bind(receiver);
-    var_holder_map.Bind(map);
-    var_holder_instance_type.Bind(instance_type);
     Goto(&loop);
     Bind(&loop);
     {
@@ -4772,8 +5327,8 @@
 
       Label next_proto(this);
       lookup_property_in_holder(receiver, var_holder.value(), holder_map,
-                                holder_instance_type, key, &next_proto,
-                                if_bailout);
+                                holder_instance_type, var_unique.value(),
+                                &next_proto, if_bailout);
       Bind(&next_proto);
 
       // Bailout if it can be an integer indexed exotic case.
@@ -4798,16 +5353,14 @@
   }
   Bind(&if_keyisindex);
   {
-    Variable var_holder(this, MachineRepresentation::kTagged);
-    Variable var_holder_map(this, MachineRepresentation::kTagged);
-    Variable var_holder_instance_type(this, MachineRepresentation::kWord8);
+    Variable var_holder(this, MachineRepresentation::kTagged, receiver);
+    Variable var_holder_map(this, MachineRepresentation::kTagged, map);
+    Variable var_holder_instance_type(this, MachineRepresentation::kWord32,
+                                      instance_type);
 
     Variable* merged_variables[] = {&var_holder, &var_holder_map,
                                     &var_holder_instance_type};
     Label loop(this, arraysize(merged_variables), merged_variables);
-    var_holder.Bind(receiver);
-    var_holder_map.Bind(map);
-    var_holder_instance_type.Bind(instance_type);
     Goto(&loop);
     Bind(&loop);
     {
@@ -4855,10 +5408,10 @@
   Node* instanceof_cache_map = LoadRoot(Heap::kInstanceofCacheMapRootIndex);
   {
     Label instanceof_cache_miss(this);
-    GotoUnless(WordEqual(instanceof_cache_function, callable),
-               &instanceof_cache_miss);
-    GotoUnless(WordEqual(instanceof_cache_map, object_map),
-               &instanceof_cache_miss);
+    GotoIfNot(WordEqual(instanceof_cache_function, callable),
+              &instanceof_cache_miss);
+    GotoIfNot(WordEqual(instanceof_cache_map, object_map),
+              &instanceof_cache_miss);
     var_result.Bind(LoadRoot(Heap::kInstanceofCacheAnswerRootIndex));
     Goto(&return_result);
     Bind(&instanceof_cache_miss);
@@ -4872,14 +5425,14 @@
 
   // Goto runtime if {callable} is not a JSFunction.
   Node* callable_instance_type = LoadMapInstanceType(callable_map);
-  GotoUnless(
+  GotoIfNot(
       Word32Equal(callable_instance_type, Int32Constant(JS_FUNCTION_TYPE)),
       &return_runtime);
 
   // Goto runtime if {callable} is not a constructor or has
   // a non-instance "prototype".
   Node* callable_bitfield = LoadMapBitField(callable_map);
-  GotoUnless(
+  GotoIfNot(
       Word32Equal(Word32And(callable_bitfield,
                             Int32Constant((1 << Map::kHasNonInstancePrototype) |
                                           (1 << Map::kIsConstructor))),
@@ -4890,9 +5443,9 @@
   Node* callable_prototype =
       LoadObjectField(callable, JSFunction::kPrototypeOrInitialMapOffset);
   {
-    Variable var_callable_prototype(this, MachineRepresentation::kTagged);
     Label callable_prototype_valid(this);
-    var_callable_prototype.Bind(callable_prototype);
+    Variable var_callable_prototype(this, MachineRepresentation::kTagged,
+                                    callable_prototype);
 
     // Resolve the "prototype" if the {callable} has an initial map.  Afterwards
     // the {callable_prototype} will be either the JSReceiver prototype object
@@ -4900,7 +5453,7 @@
     // created so far and hence we should return false.
     Node* callable_prototype_instance_type =
         LoadInstanceType(callable_prototype);
-    GotoUnless(
+    GotoIfNot(
         Word32Equal(callable_prototype_instance_type, Int32Constant(MAP_TYPE)),
         &callable_prototype_valid);
     var_callable_prototype.Bind(
@@ -4916,8 +5469,7 @@
   StoreRoot(Heap::kInstanceofCacheMapRootIndex, object_map);
 
   // Loop through the prototype chain looking for the {callable} prototype.
-  Variable var_object_map(this, MachineRepresentation::kTagged);
-  var_object_map.Bind(object_map);
+  Variable var_object_map(this, MachineRepresentation::kTagged, object_map);
   Label loop(this, &var_object_map);
   Goto(&loop);
   Bind(&loop);
@@ -4926,7 +5478,7 @@
 
     // Check if the current {object} needs to be access checked.
     Node* object_bitfield = LoadMapBitField(object_map);
-    GotoUnless(
+    GotoIfNot(
         Word32Equal(Word32And(object_bitfield,
                               Int32Constant(1 << Map::kIsAccessCheckNeeded)),
                     Int32Constant(0)),
@@ -4971,10 +5523,10 @@
   return var_result.value();
 }
 
-compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
-                                                          ElementsKind kind,
-                                                          ParameterMode mode,
-                                                          int base_size) {
+Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
+                                                ElementsKind kind,
+                                                ParameterMode mode,
+                                                int base_size) {
   int element_size_shift = ElementsKindToShiftSize(kind);
   int element_size = 1 << element_size_shift;
   int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
@@ -4986,10 +5538,6 @@
     constant_index = ToSmiConstant(index_node, smi_index);
     if (constant_index) index = smi_index->value();
     index_node = BitcastTaggedToWord(index_node);
-  } else if (mode == INTEGER_PARAMETERS) {
-    int32_t temp = 0;
-    constant_index = ToInt32Constant(index_node, temp);
-    index = static_cast<intptr_t>(temp);
   } else {
     DCHECK(mode == INTPTR_PARAMETERS);
     constant_index = ToIntPtrConstant(index_node, index);
@@ -4997,9 +5545,6 @@
   if (constant_index) {
     return IntPtrConstant(base_size + element_size * index);
   }
-  if (Is64() && mode == INTEGER_PARAMETERS) {
-    index_node = ChangeInt32ToInt64(index_node);
-  }
 
   Node* shifted_index =
       (element_size_shift == 0)
@@ -5007,32 +5552,28 @@
           : ((element_size_shift > 0)
                  ? WordShl(index_node, IntPtrConstant(element_size_shift))
                  : WordShr(index_node, IntPtrConstant(-element_size_shift)));
-  return IntPtrAddFoldConstants(IntPtrConstant(base_size), shifted_index);
+  return IntPtrAdd(IntPtrConstant(base_size), shifted_index);
 }
 
-compiler::Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
+Node* CodeStubAssembler::LoadFeedbackVectorForStub() {
   Node* function =
       LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset);
-  Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
-  return LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
+  Node* cell = LoadObjectField(function, JSFunction::kFeedbackVectorOffset);
+  return LoadObjectField(cell, Cell::kValueOffset);
 }
 
-void CodeStubAssembler::UpdateFeedback(compiler::Node* feedback,
-                                       compiler::Node* type_feedback_vector,
-                                       compiler::Node* slot_id) {
+void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* feedback_vector,
+                                       Node* slot_id) {
   // This method is used for binary op and compare feedback. These
   // vector nodes are initialized with a smi 0, so we can simply OR
   // our new feedback in place.
-  // TODO(interpreter): Consider passing the feedback as Smi already to avoid
-  // the tagging completely.
-  Node* previous_feedback =
-      LoadFixedArrayElement(type_feedback_vector, slot_id);
-  Node* combined_feedback = SmiOr(previous_feedback, SmiFromWord32(feedback));
-  StoreFixedArrayElement(type_feedback_vector, slot_id, combined_feedback,
+  Node* previous_feedback = LoadFixedArrayElement(feedback_vector, slot_id);
+  Node* combined_feedback = SmiOr(previous_feedback, feedback);
+  StoreFixedArrayElement(feedback_vector, slot_id, combined_feedback,
                          SKIP_WRITE_BARRIER);
 }
 
-compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
+Node* CodeStubAssembler::LoadReceiverMap(Node* receiver) {
   Variable var_receiver_map(this, MachineRepresentation::kTagged);
   Label load_smi_map(this, Label::kDeferred), load_receiver_map(this),
       if_result(this);
@@ -5052,252 +5593,16 @@
   return var_receiver_map.value();
 }
 
-compiler::Node* CodeStubAssembler::TryMonomorphicCase(
-    compiler::Node* slot, compiler::Node* vector, compiler::Node* receiver_map,
-    Label* if_handler, Variable* var_handler, Label* if_miss) {
-  Comment("TryMonomorphicCase");
-  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
-
-  // TODO(ishell): add helper class that hides offset computations for a series
-  // of loads.
-  int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
-  // Adding |header_size| with a separate IntPtrAdd rather than passing it
-  // into ElementOffsetFromIndex() allows it to be folded into a single
-  // [base, index, offset] indirect memory access on x64.
-  Node* offset =
-      ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
-  Node* feedback = Load(MachineType::AnyTagged(), vector,
-                        IntPtrAdd(offset, IntPtrConstant(header_size)));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  GotoIf(WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(feedback)),
-         if_miss);
-
-  Node* handler =
-      Load(MachineType::AnyTagged(), vector,
-           IntPtrAdd(offset, IntPtrConstant(header_size + kPointerSize)));
-
-  var_handler->Bind(handler);
-  Goto(if_handler);
-  return feedback;
-}
-
-void CodeStubAssembler::HandlePolymorphicCase(
-    compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
-    Variable* var_handler, Label* if_miss, int unroll_count) {
-  Comment("HandlePolymorphicCase");
-  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
-
-  // Iterate {feedback} array.
-  const int kEntrySize = 2;
-
-  for (int i = 0; i < unroll_count; i++) {
-    Label next_entry(this);
-    Node* cached_map = LoadWeakCellValue(LoadFixedArrayElement(
-        feedback, IntPtrConstant(i * kEntrySize), 0, INTPTR_PARAMETERS));
-    GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
-
-    // Found, now call handler.
-    Node* handler = LoadFixedArrayElement(
-        feedback, IntPtrConstant(i * kEntrySize + 1), 0, INTPTR_PARAMETERS);
-    var_handler->Bind(handler);
-    Goto(if_handler);
-
-    Bind(&next_entry);
-  }
-
-  // Loop from {unroll_count}*kEntrySize to {length}.
-  Node* init = IntPtrConstant(unroll_count * kEntrySize);
-  Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
-  BuildFastLoop(
-      MachineType::PointerRepresentation(), init, length,
-      [receiver_map, feedback, if_handler, var_handler](CodeStubAssembler* csa,
-                                                        Node* index) {
-        Node* cached_map = csa->LoadWeakCellValue(
-            csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
-
-        Label next_entry(csa);
-        csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
-
-        // Found, now call handler.
-        Node* handler = csa->LoadFixedArrayElement(
-            feedback, index, kPointerSize, INTPTR_PARAMETERS);
-        var_handler->Bind(handler);
-        csa->Goto(if_handler);
-
-        csa->Bind(&next_entry);
-      },
-      kEntrySize, IndexAdvanceMode::kPost);
-  // The loop falls through if no handler was found.
-  Goto(if_miss);
-}
-
-void CodeStubAssembler::HandleKeyedStorePolymorphicCase(
-    compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
-    Variable* var_handler, Label* if_transition_handler,
-    Variable* var_transition_map_cell, Label* if_miss) {
-  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
-  DCHECK_EQ(MachineRepresentation::kTagged, var_transition_map_cell->rep());
-
-  const int kEntrySize = 3;
-
-  Node* init = IntPtrConstant(0);
-  Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
-  BuildFastLoop(
-      MachineType::PointerRepresentation(), init, length,
-      [receiver_map, feedback, if_handler, var_handler, if_transition_handler,
-       var_transition_map_cell](CodeStubAssembler* csa, Node* index) {
-        Node* cached_map = csa->LoadWeakCellValue(
-            csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
-        Label next_entry(csa);
-        csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
-
-        Node* maybe_transition_map_cell = csa->LoadFixedArrayElement(
-            feedback, index, kPointerSize, INTPTR_PARAMETERS);
-
-        var_handler->Bind(csa->LoadFixedArrayElement(
-            feedback, index, 2 * kPointerSize, INTPTR_PARAMETERS));
-        csa->GotoIf(
-            csa->WordEqual(maybe_transition_map_cell,
-                           csa->LoadRoot(Heap::kUndefinedValueRootIndex)),
-            if_handler);
-        var_transition_map_cell->Bind(maybe_transition_map_cell);
-        csa->Goto(if_transition_handler);
-
-        csa->Bind(&next_entry);
-      },
-      kEntrySize, IndexAdvanceMode::kPost);
-  // The loop falls through if no handler was found.
-  Goto(if_miss);
-}
-
-compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
-                                                          compiler::Node* map) {
-  // See v8::internal::StubCache::PrimaryOffset().
-  STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
-  // Compute the hash of the name (use entire hash field).
-  Node* hash_field = LoadNameHashField(name);
-  CSA_ASSERT(this,
-             Word32Equal(Word32And(hash_field,
-                                   Int32Constant(Name::kHashNotComputedMask)),
-                         Int32Constant(0)));
-
-  // Using only the low bits in 64-bit mode is unlikely to increase the
-  // risk of collision even if the heap is spread over an area larger than
-  // 4Gb (and not at all if it isn't).
-  Node* hash = Int32Add(hash_field, map);
-  // Base the offset on a simple combination of name and map.
-  hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
-  uint32_t mask = (StubCache::kPrimaryTableSize - 1)
-                  << StubCache::kCacheIndexShift;
-  return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
-}
-
-compiler::Node* CodeStubAssembler::StubCacheSecondaryOffset(
-    compiler::Node* name, compiler::Node* seed) {
-  // See v8::internal::StubCache::SecondaryOffset().
-
-  // Use the seed from the primary cache in the secondary cache.
-  Node* hash = Int32Sub(seed, name);
-  hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
-  int32_t mask = (StubCache::kSecondaryTableSize - 1)
-                 << StubCache::kCacheIndexShift;
-  return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
-}
-
-enum CodeStubAssembler::StubCacheTable : int {
-  kPrimary = static_cast<int>(StubCache::kPrimary),
-  kSecondary = static_cast<int>(StubCache::kSecondary)
-};
-
-void CodeStubAssembler::TryProbeStubCacheTable(
-    StubCache* stub_cache, StubCacheTable table_id,
-    compiler::Node* entry_offset, compiler::Node* name, compiler::Node* map,
-    Label* if_handler, Variable* var_handler, Label* if_miss) {
-  StubCache::Table table = static_cast<StubCache::Table>(table_id);
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    Goto(if_miss);
-    return;
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    Goto(if_miss);
-    return;
-  }
-#endif
-  // The {table_offset} holds the entry offset times four (due to masking
-  // and shifting optimizations).
-  const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
-  entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
-
-  // Check that the key in the entry matches the name.
-  Node* key_base =
-      ExternalConstant(ExternalReference(stub_cache->key_reference(table)));
-  Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
-  GotoIf(WordNotEqual(name, entry_key), if_miss);
-
-  // Get the map entry from the cache.
-  DCHECK_EQ(kPointerSize * 2, stub_cache->map_reference(table).address() -
-                                  stub_cache->key_reference(table).address());
-  Node* entry_map =
-      Load(MachineType::Pointer(), key_base,
-           IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize * 2)));
-  GotoIf(WordNotEqual(map, entry_map), if_miss);
-
-  DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
-                              stub_cache->key_reference(table).address());
-  Node* handler = Load(MachineType::TaggedPointer(), key_base,
-                       IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
-
-  // We found the handler.
-  var_handler->Bind(handler);
-  Goto(if_handler);
-}
-
-void CodeStubAssembler::TryProbeStubCache(
-    StubCache* stub_cache, compiler::Node* receiver, compiler::Node* name,
-    Label* if_handler, Variable* var_handler, Label* if_miss) {
-  Label try_secondary(this), miss(this);
-
-  Counters* counters = isolate()->counters();
-  IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the {receiver} isn't a smi.
-  GotoIf(TaggedIsSmi(receiver), &miss);
-
-  Node* receiver_map = LoadMap(receiver);
-
-  // Probe the primary table.
-  Node* primary_offset = StubCachePrimaryOffset(name, receiver_map);
-  TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name,
-                         receiver_map, if_handler, var_handler, &try_secondary);
-
-  Bind(&try_secondary);
-  {
-    // Probe the secondary table.
-    Node* secondary_offset = StubCacheSecondaryOffset(name, primary_offset);
-    TryProbeStubCacheTable(stub_cache, kSecondary, secondary_offset, name,
-                           receiver_map, if_handler, var_handler, &miss);
-  }
-
-  Bind(&miss);
-  {
-    IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-    Goto(if_miss);
-  }
-}
-
 Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
   Variable var_intptr_key(this, MachineType::PointerRepresentation());
   Label done(this, &var_intptr_key), key_is_smi(this);
   GotoIf(TaggedIsSmi(key), &key_is_smi);
   // Try to convert a heap number to a Smi.
-  GotoUnless(WordEqual(LoadMap(key), HeapNumberMapConstant()), miss);
+  GotoIfNot(IsHeapNumberMap(LoadMap(key)), miss);
   {
     Node* value = LoadHeapNumberValue(key);
     Node* int_value = RoundFloat64ToInt32(value);
-    GotoUnless(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss);
+    GotoIfNot(Float64Equal(value, ChangeInt32ToFloat64(int_value)), miss);
     var_intptr_key.Bind(ChangeInt32ToIntPtr(int_value));
     Goto(&done);
   }
@@ -5312,1377 +5617,6 @@
   return var_intptr_key.value();
 }
 
-void CodeStubAssembler::EmitFastElementsBoundsCheck(Node* object,
-                                                    Node* elements,
-                                                    Node* intptr_index,
-                                                    Node* is_jsarray_condition,
-                                                    Label* miss) {
-  Variable var_length(this, MachineType::PointerRepresentation());
-  Comment("Fast elements bounds check");
-  Label if_array(this), length_loaded(this, &var_length);
-  GotoIf(is_jsarray_condition, &if_array);
-  {
-    var_length.Bind(SmiUntag(LoadFixedArrayBaseLength(elements)));
-    Goto(&length_loaded);
-  }
-  Bind(&if_array);
-  {
-    var_length.Bind(SmiUntag(LoadJSArrayLength(object)));
-    Goto(&length_loaded);
-  }
-  Bind(&length_loaded);
-  GotoUnless(UintPtrLessThan(intptr_index, var_length.value()), miss);
-}
-
-void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
-                                        Node* elements_kind, Node* intptr_index,
-                                        Node* is_jsarray_condition,
-                                        Label* if_hole, Label* rebox_double,
-                                        Variable* var_double_value,
-                                        Label* unimplemented_elements_kind,
-                                        Label* out_of_bounds, Label* miss) {
-  Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
-      if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
-      if_dictionary(this);
-  GotoIf(
-      IntPtrGreaterThan(elements_kind, IntPtrConstant(LAST_FAST_ELEMENTS_KIND)),
-      &if_nonfast);
-
-  EmitFastElementsBoundsCheck(object, elements, intptr_index,
-                              is_jsarray_condition, out_of_bounds);
-  int32_t kinds[] = {// Handled by if_fast_packed.
-                     FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                     // Handled by if_fast_holey.
-                     FAST_HOLEY_SMI_ELEMENTS, FAST_HOLEY_ELEMENTS,
-                     // Handled by if_fast_double.
-                     FAST_DOUBLE_ELEMENTS,
-                     // Handled by if_fast_holey_double.
-                     FAST_HOLEY_DOUBLE_ELEMENTS};
-  Label* labels[] = {// FAST_{SMI,}_ELEMENTS
-                     &if_fast_packed, &if_fast_packed,
-                     // FAST_HOLEY_{SMI,}_ELEMENTS
-                     &if_fast_holey, &if_fast_holey,
-                     // FAST_DOUBLE_ELEMENTS
-                     &if_fast_double,
-                     // FAST_HOLEY_DOUBLE_ELEMENTS
-                     &if_fast_holey_double};
-  Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
-         arraysize(kinds));
-
-  Bind(&if_fast_packed);
-  {
-    Comment("fast packed elements");
-    Return(LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS));
-  }
-
-  Bind(&if_fast_holey);
-  {
-    Comment("fast holey elements");
-    Node* element =
-        LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS);
-    GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
-    Return(element);
-  }
-
-  Bind(&if_fast_double);
-  {
-    Comment("packed double elements");
-    var_double_value->Bind(LoadFixedDoubleArrayElement(
-        elements, intptr_index, MachineType::Float64(), 0, INTPTR_PARAMETERS));
-    Goto(rebox_double);
-  }
-
-  Bind(&if_fast_holey_double);
-  {
-    Comment("holey double elements");
-    Node* value = LoadFixedDoubleArrayElement(elements, intptr_index,
-                                              MachineType::Float64(), 0,
-                                              INTPTR_PARAMETERS, if_hole);
-    var_double_value->Bind(value);
-    Goto(rebox_double);
-  }
-
-  Bind(&if_nonfast);
-  {
-    STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
-    GotoIf(IntPtrGreaterThanOrEqual(
-               elements_kind,
-               IntPtrConstant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
-           &if_typed_array);
-    GotoIf(IntPtrEqual(elements_kind, IntPtrConstant(DICTIONARY_ELEMENTS)),
-           &if_dictionary);
-    Goto(unimplemented_elements_kind);
-  }
-
-  Bind(&if_dictionary);
-  {
-    Comment("dictionary elements");
-    GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
-    Variable var_entry(this, MachineType::PointerRepresentation());
-    Label if_found(this);
-    NumberDictionaryLookup<SeededNumberDictionary>(
-        elements, intptr_index, &if_found, &var_entry, if_hole);
-    Bind(&if_found);
-    // Check that the value is a data property.
-    Node* details_index = EntryToIndex<SeededNumberDictionary>(
-        var_entry.value(), SeededNumberDictionary::kEntryDetailsIndex);
-    Node* details = SmiToWord32(
-        LoadFixedArrayElement(elements, details_index, 0, INTPTR_PARAMETERS));
-    Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
-    // TODO(jkummerow): Support accessors without missing?
-    GotoUnless(Word32Equal(kind, Int32Constant(kData)), miss);
-    // Finally, load the value.
-    Node* value_index = EntryToIndex<SeededNumberDictionary>(
-        var_entry.value(), SeededNumberDictionary::kEntryValueIndex);
-    Return(LoadFixedArrayElement(elements, value_index, 0, INTPTR_PARAMETERS));
-  }
-
-  Bind(&if_typed_array);
-  {
-    Comment("typed elements");
-    // Check if buffer has been neutered.
-    Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
-    Node* bitfield = LoadObjectField(buffer, JSArrayBuffer::kBitFieldOffset,
-                                     MachineType::Uint32());
-    Node* neutered_bit =
-        Word32And(bitfield, Int32Constant(JSArrayBuffer::WasNeutered::kMask));
-    GotoUnless(Word32Equal(neutered_bit, Int32Constant(0)), miss);
-
-    // Bounds check.
-    Node* length =
-        SmiUntag(LoadObjectField(object, JSTypedArray::kLengthOffset));
-    GotoUnless(UintPtrLessThan(intptr_index, length), out_of_bounds);
-
-    // Backing store = external_pointer + base_pointer.
-    Node* external_pointer =
-        LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
-                        MachineType::Pointer());
-    Node* base_pointer =
-        LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
-    Node* backing_store = IntPtrAdd(external_pointer, base_pointer);
-
-    Label uint8_elements(this), int8_elements(this), uint16_elements(this),
-        int16_elements(this), uint32_elements(this), int32_elements(this),
-        float32_elements(this), float64_elements(this);
-    Label* elements_kind_labels[] = {
-        &uint8_elements,  &uint8_elements,   &int8_elements,
-        &uint16_elements, &int16_elements,   &uint32_elements,
-        &int32_elements,  &float32_elements, &float64_elements};
-    int32_t elements_kinds[] = {
-        UINT8_ELEMENTS,  UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
-        UINT16_ELEMENTS, INT16_ELEMENTS,         UINT32_ELEMENTS,
-        INT32_ELEMENTS,  FLOAT32_ELEMENTS,       FLOAT64_ELEMENTS};
-    const size_t kTypedElementsKindCount =
-        LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
-        FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
-    DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
-    DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
-    Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
-           kTypedElementsKindCount);
-    Bind(&uint8_elements);
-    {
-      Comment("UINT8_ELEMENTS");  // Handles UINT8_CLAMPED_ELEMENTS too.
-      Return(SmiTag(Load(MachineType::Uint8(), backing_store, intptr_index)));
-    }
-    Bind(&int8_elements);
-    {
-      Comment("INT8_ELEMENTS");
-      Return(SmiTag(Load(MachineType::Int8(), backing_store, intptr_index)));
-    }
-    Bind(&uint16_elements);
-    {
-      Comment("UINT16_ELEMENTS");
-      Node* index = WordShl(intptr_index, IntPtrConstant(1));
-      Return(SmiTag(Load(MachineType::Uint16(), backing_store, index)));
-    }
-    Bind(&int16_elements);
-    {
-      Comment("INT16_ELEMENTS");
-      Node* index = WordShl(intptr_index, IntPtrConstant(1));
-      Return(SmiTag(Load(MachineType::Int16(), backing_store, index)));
-    }
-    Bind(&uint32_elements);
-    {
-      Comment("UINT32_ELEMENTS");
-      Node* index = WordShl(intptr_index, IntPtrConstant(2));
-      Node* element = Load(MachineType::Uint32(), backing_store, index);
-      Return(ChangeUint32ToTagged(element));
-    }
-    Bind(&int32_elements);
-    {
-      Comment("INT32_ELEMENTS");
-      Node* index = WordShl(intptr_index, IntPtrConstant(2));
-      Node* element = Load(MachineType::Int32(), backing_store, index);
-      Return(ChangeInt32ToTagged(element));
-    }
-    Bind(&float32_elements);
-    {
-      Comment("FLOAT32_ELEMENTS");
-      Node* index = WordShl(intptr_index, IntPtrConstant(2));
-      Node* element = Load(MachineType::Float32(), backing_store, index);
-      var_double_value->Bind(ChangeFloat32ToFloat64(element));
-      Goto(rebox_double);
-    }
-    Bind(&float64_elements);
-    {
-      Comment("FLOAT64_ELEMENTS");
-      Node* index = WordShl(intptr_index, IntPtrConstant(3));
-      Node* element = Load(MachineType::Float64(), backing_store, index);
-      var_double_value->Bind(element);
-      Goto(rebox_double);
-    }
-  }
-}
-
-void CodeStubAssembler::HandleLoadICHandlerCase(
-    const LoadICParameters* p, Node* handler, Label* miss,
-    ElementSupport support_elements) {
-  Comment("have_handler");
-  Variable var_holder(this, MachineRepresentation::kTagged);
-  var_holder.Bind(p->receiver);
-  Variable var_smi_handler(this, MachineRepresentation::kTagged);
-  var_smi_handler.Bind(handler);
-
-  Variable* vars[] = {&var_holder, &var_smi_handler};
-  Label if_smi_handler(this, 2, vars);
-  Label try_proto_handler(this), call_handler(this);
-
-  Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
-
-  // |handler| is a Smi, encoding what to do. See SmiHandler methods
-  // for the encoding format.
-  Bind(&if_smi_handler);
-  {
-    HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
-                               miss, support_elements);
-  }
-
-  Bind(&try_proto_handler);
-  {
-    GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
-    HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
-                             &if_smi_handler, miss);
-  }
-
-  Bind(&call_handler);
-  {
-    typedef LoadWithVectorDescriptor Descriptor;
-    TailCallStub(Descriptor(isolate()), handler, p->context,
-                 Arg(Descriptor::kReceiver, p->receiver),
-                 Arg(Descriptor::kName, p->name),
-                 Arg(Descriptor::kSlot, p->slot),
-                 Arg(Descriptor::kVector, p->vector));
-  }
-}
-
-void CodeStubAssembler::HandleLoadICSmiHandlerCase(
-    const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
-    ElementSupport support_elements) {
-  Variable var_double_value(this, MachineRepresentation::kFloat64);
-  Label rebox_double(this, &var_double_value);
-
-  Node* handler_word = SmiUntag(smi_handler);
-  Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
-  if (support_elements == kSupportElements) {
-    Label property(this);
-    GotoUnless(
-        WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForElements)),
-        &property);
-
-    Comment("element_load");
-    Node* intptr_index = TryToIntptr(p->name, miss);
-    Node* elements = LoadElements(holder);
-    Node* is_jsarray_condition =
-        IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
-    Node* elements_kind =
-        DecodeWord<LoadHandler::ElementsKindBits>(handler_word);
-    Label if_hole(this), unimplemented_elements_kind(this);
-    Label* out_of_bounds = miss;
-    EmitElementLoad(holder, elements, elements_kind, intptr_index,
-                    is_jsarray_condition, &if_hole, &rebox_double,
-                    &var_double_value, &unimplemented_elements_kind,
-                    out_of_bounds, miss);
-
-    Bind(&unimplemented_elements_kind);
-    {
-      // Smi handlers should only be installed for supported elements kinds.
-      // Crash if we get here.
-      DebugBreak();
-      Goto(miss);
-    }
-
-    Bind(&if_hole);
-    {
-      Comment("convert hole");
-      GotoUnless(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
-      Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
-      DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
-      GotoUnless(
-          WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
-                    SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
-          miss);
-      Return(UndefinedConstant());
-    }
-
-    Bind(&property);
-    Comment("property_load");
-  }
-
-  Label constant(this), field(this);
-  Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForFields)),
-         &field, &constant);
-
-  Bind(&field);
-  {
-    Comment("field_load");
-    Node* offset = DecodeWord<LoadHandler::FieldOffsetBits>(handler_word);
-
-    Label inobject(this), out_of_object(this);
-    Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
-           &out_of_object);
-
-    Bind(&inobject);
-    {
-      Label is_double(this);
-      GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
-      Return(LoadObjectField(holder, offset));
-
-      Bind(&is_double);
-      if (FLAG_unbox_double_fields) {
-        var_double_value.Bind(
-            LoadObjectField(holder, offset, MachineType::Float64()));
-      } else {
-        Node* mutable_heap_number = LoadObjectField(holder, offset);
-        var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
-      }
-      Goto(&rebox_double);
-    }
-
-    Bind(&out_of_object);
-    {
-      Label is_double(this);
-      Node* properties = LoadProperties(holder);
-      Node* value = LoadObjectField(properties, offset);
-      GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
-      Return(value);
-
-      Bind(&is_double);
-      var_double_value.Bind(LoadHeapNumberValue(value));
-      Goto(&rebox_double);
-    }
-
-    Bind(&rebox_double);
-    Return(AllocateHeapNumberWithValue(var_double_value.value()));
-  }
-
-  Bind(&constant);
-  {
-    Comment("constant_load");
-    Node* descriptors = LoadMapDescriptors(LoadMap(holder));
-    Node* descriptor =
-        DecodeWord<LoadHandler::DescriptorValueIndexBits>(handler_word);
-    CSA_ASSERT(this,
-               UintPtrLessThan(descriptor,
-                               LoadAndUntagFixedArrayBaseLength(descriptors)));
-    Node* value =
-        LoadFixedArrayElement(descriptors, descriptor, 0, INTPTR_PARAMETERS);
-
-    Label if_accessor_info(this);
-    GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
-           &if_accessor_info);
-    Return(value);
-
-    Bind(&if_accessor_info);
-    Callable callable = CodeFactory::ApiGetter(isolate());
-    TailCallStub(callable, p->context, p->receiver, holder, value);
-  }
-}
-
-void CodeStubAssembler::HandleLoadICProtoHandler(
-    const LoadICParameters* p, Node* handler, Variable* var_holder,
-    Variable* var_smi_handler, Label* if_smi_handler, Label* miss) {
-  DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
-  DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
-
-  // IC dispatchers rely on these assumptions to be held.
-  STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kHolderCellOffset);
-  DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
-            LoadHandler::kSmiHandlerOffset);
-  DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
-            LoadHandler::kValidityCellOffset);
-
-  // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
-  Label validity_cell_check_done(this);
-  Node* validity_cell =
-      LoadObjectField(handler, LoadHandler::kValidityCellOffset);
-  GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
-         &validity_cell_check_done);
-  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
-  GotoIf(WordNotEqual(cell_value,
-                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
-         miss);
-  Goto(&validity_cell_check_done);
-
-  Bind(&validity_cell_check_done);
-  Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
-  CSA_ASSERT(this, TaggedIsSmi(smi_handler));
-  Node* handler_flags = SmiUntag(smi_handler);
-
-  Label check_prototypes(this);
-  GotoUnless(
-      IsSetWord<LoadHandler::DoNegativeLookupOnReceiverBits>(handler_flags),
-      &check_prototypes);
-  {
-    CSA_ASSERT(this, Word32BinaryNot(
-                         HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
-    // We have a dictionary receiver, do a negative lookup check.
-    NameDictionaryNegativeLookup(p->receiver, p->name, miss);
-    Goto(&check_prototypes);
-  }
-
-  Bind(&check_prototypes);
-  Node* maybe_holder_cell =
-      LoadObjectField(handler, LoadHandler::kHolderCellOffset);
-  Label array_handler(this), tuple_handler(this);
-  Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
-
-  Bind(&tuple_handler);
-  {
-    Label load_existent(this);
-    GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
-    // This is a handler for a load of a non-existent value.
-    Return(UndefinedConstant());
-
-    Bind(&load_existent);
-    Node* holder = LoadWeakCellValue(maybe_holder_cell);
-    // The |holder| is guaranteed to be alive at this point since we passed
-    // both the receiver map check and the validity cell check.
-    CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
-
-    var_holder->Bind(holder);
-    var_smi_handler->Bind(smi_handler);
-    Goto(if_smi_handler);
-  }
-
-  Bind(&array_handler);
-  {
-    typedef LoadICProtoArrayDescriptor Descriptor;
-    LoadICProtoArrayStub stub(isolate());
-    Node* target = HeapConstant(stub.GetCode());
-    TailCallStub(Descriptor(isolate()), target, p->context,
-                 Arg(Descriptor::kReceiver, p->receiver),
-                 Arg(Descriptor::kName, p->name),
-                 Arg(Descriptor::kSlot, p->slot),
-                 Arg(Descriptor::kVector, p->vector),
-                 Arg(Descriptor::kHandler, handler));
-  }
-}
-
-void CodeStubAssembler::LoadICProtoArray(const LoadICParameters* p,
-                                         Node* handler) {
-  Label miss(this);
-  CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
-  CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
-
-  Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
-  Node* handler_flags = SmiUntag(smi_handler);
-
-  Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
-
-  Node* holder = EmitLoadICProtoArrayCheck(p, handler, handler_length,
-                                           handler_flags, &miss);
-
-  HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, kOnlyProperties);
-
-  Bind(&miss);
-  {
-    TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
-                    p->slot, p->vector);
-  }
-}
-
-Node* CodeStubAssembler::EmitLoadICProtoArrayCheck(const LoadICParameters* p,
-                                                   Node* handler,
-                                                   Node* handler_length,
-                                                   Node* handler_flags,
-                                                   Label* miss) {
-  Variable start_index(this, MachineType::PointerRepresentation());
-  start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
-
-  Label can_access(this);
-  GotoUnless(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
-             &can_access);
-  {
-    // Skip this entry of a handler.
-    start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
-
-    int offset =
-        FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
-    Node* expected_native_context =
-        LoadWeakCellValue(LoadObjectField(handler, offset), miss);
-    CSA_ASSERT(this, IsNativeContext(expected_native_context));
-
-    Node* native_context = LoadNativeContext(p->context);
-    GotoIf(WordEqual(expected_native_context, native_context), &can_access);
-    // If the receiver is not a JSGlobalProxy then we miss.
-    GotoUnless(IsJSGlobalProxy(p->receiver), miss);
-    // For JSGlobalProxy receiver try to compare security tokens of current
-    // and expected native contexts.
-    Node* expected_token = LoadContextElement(expected_native_context,
-                                              Context::SECURITY_TOKEN_INDEX);
-    Node* current_token =
-        LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
-    Branch(WordEqual(expected_token, current_token), &can_access, miss);
-  }
-  Bind(&can_access);
-
-  BuildFastLoop(
-      MachineType::PointerRepresentation(), start_index.value(), handler_length,
-      [this, p, handler, miss](CodeStubAssembler*, Node* current) {
-        Node* prototype_cell =
-            LoadFixedArrayElement(handler, current, 0, INTPTR_PARAMETERS);
-        CheckPrototype(prototype_cell, p->name, miss);
-      },
-      1, IndexAdvanceMode::kPost);
-
-  Node* maybe_holder_cell = LoadFixedArrayElement(
-      handler, IntPtrConstant(LoadHandler::kHolderCellIndex), 0,
-      INTPTR_PARAMETERS);
-  Label load_existent(this);
-  GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
-  // This is a handler for a load of a non-existent value.
-  Return(UndefinedConstant());
-
-  Bind(&load_existent);
-  Node* holder = LoadWeakCellValue(maybe_holder_cell);
-  // The |holder| is guaranteed to be alive at this point since we passed
-  // the receiver map check, the validity cell check and the prototype chain
-  // check.
-  CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
-  return holder;
-}
-
-void CodeStubAssembler::CheckPrototype(Node* prototype_cell, Node* name,
-                                       Label* miss) {
-  Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
-
-  Label done(this);
-  Label if_property_cell(this), if_dictionary_object(this);
-
-  // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
-  Branch(WordEqual(LoadMap(maybe_prototype),
-                   LoadRoot(Heap::kGlobalPropertyCellMapRootIndex)),
-         &if_property_cell, &if_dictionary_object);
-
-  Bind(&if_dictionary_object);
-  {
-    CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
-    NameDictionaryNegativeLookup(maybe_prototype, name, miss);
-    Goto(&done);
-  }
-
-  Bind(&if_property_cell);
-  {
-    // Ensure the property cell still contains the hole.
-    Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
-    GotoIf(WordNotEqual(value, LoadRoot(Heap::kTheHoleValueRootIndex)), miss);
-    Goto(&done);
-  }
-
-  Bind(&done);
-}
-
-void CodeStubAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
-                                                     Label* miss) {
-  CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
-  Node* properties = LoadProperties(object);
-  // Ensure the property does not exist in a dictionary-mode object.
-  Variable var_name_index(this, MachineType::PointerRepresentation());
-  Label done(this);
-  NameDictionaryLookup<NameDictionary>(properties, name, miss, &var_name_index,
-                                       &done);
-  Bind(&done);
-}
-
-void CodeStubAssembler::LoadIC(const LoadICParameters* p) {
-  Variable var_handler(this, MachineRepresentation::kTagged);
-  // TODO(ishell): defer blocks when it works.
-  Label if_handler(this, &var_handler), try_polymorphic(this),
-      try_megamorphic(this /*, Label::kDeferred*/),
-      miss(this /*, Label::kDeferred*/);
-
-  Node* receiver_map = LoadReceiverMap(p->receiver);
-
-  // Check monomorphic case.
-  Node* feedback =
-      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
-                         &var_handler, &try_polymorphic);
-  Bind(&if_handler);
-  {
-    HandleLoadICHandlerCase(p, var_handler.value(), &miss);
-  }
-
-  Bind(&try_polymorphic);
-  {
-    // Check polymorphic case.
-    Comment("LoadIC_try_polymorphic");
-    GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
-               &try_megamorphic);
-    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
-                          &miss, 2);
-  }
-
-  Bind(&try_megamorphic);
-  {
-    // Check megamorphic case.
-    GotoUnless(
-        WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
-        &miss);
-
-    TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
-                      &if_handler, &var_handler, &miss);
-  }
-  Bind(&miss);
-  {
-    TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
-                    p->slot, p->vector);
-  }
-}
-
-void CodeStubAssembler::KeyedLoadIC(const LoadICParameters* p) {
-  Variable var_handler(this, MachineRepresentation::kTagged);
-  // TODO(ishell): defer blocks when it works.
-  Label if_handler(this, &var_handler), try_polymorphic(this),
-      try_megamorphic(this /*, Label::kDeferred*/),
-      try_polymorphic_name(this /*, Label::kDeferred*/),
-      miss(this /*, Label::kDeferred*/);
-
-  Node* receiver_map = LoadReceiverMap(p->receiver);
-
-  // Check monomorphic case.
-  Node* feedback =
-      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
-                         &var_handler, &try_polymorphic);
-  Bind(&if_handler);
-  {
-    HandleLoadICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
-  }
-
-  Bind(&try_polymorphic);
-  {
-    // Check polymorphic case.
-    Comment("KeyedLoadIC_try_polymorphic");
-    GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
-               &try_megamorphic);
-    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
-                          &miss, 2);
-  }
-
-  Bind(&try_megamorphic);
-  {
-    // Check megamorphic case.
-    Comment("KeyedLoadIC_try_megamorphic");
-    GotoUnless(
-        WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
-        &try_polymorphic_name);
-    // TODO(jkummerow): Inline this? Or some of it?
-    TailCallStub(CodeFactory::KeyedLoadIC_Megamorphic(isolate()), p->context,
-                 p->receiver, p->name, p->slot, p->vector);
-  }
-  Bind(&try_polymorphic_name);
-  {
-    // We might have a name in feedback, and a fixed array in the next slot.
-    Comment("KeyedLoadIC_try_polymorphic_name");
-    GotoUnless(WordEqual(feedback, p->name), &miss);
-    // If the name comparison succeeded, we know we have a fixed array with
-    // at least one map/handler pair.
-    Node* offset = ElementOffsetFromIndex(
-        p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
-        FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
-    Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
-    HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
-                          1);
-  }
-  Bind(&miss);
-  {
-    Comment("KeyedLoadIC_miss");
-    TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
-                    p->name, p->slot, p->vector);
-  }
-}
-
-void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
-  Variable var_index(this, MachineType::PointerRepresentation());
-  Variable var_details(this, MachineRepresentation::kWord32);
-  Variable var_value(this, MachineRepresentation::kTagged);
-  Label if_index(this), if_unique_name(this), if_element_hole(this),
-      if_oob(this), slow(this), stub_cache_miss(this),
-      if_property_dictionary(this), if_found_on_receiver(this);
-
-  Node* receiver = p->receiver;
-  GotoIf(TaggedIsSmi(receiver), &slow);
-  Node* receiver_map = LoadMap(receiver);
-  Node* instance_type = LoadMapInstanceType(receiver_map);
-  // Receivers requiring non-standard element accesses (interceptors, access
-  // checks, strings and string wrappers, proxies) are handled in the runtime.
-  GotoIf(Int32LessThanOrEqual(instance_type,
-                              Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
-         &slow);
-
-  Node* key = p->name;
-  TryToName(key, &if_index, &var_index, &if_unique_name, &slow);
-
-  Bind(&if_index);
-  {
-    Comment("integer index");
-    Node* index = var_index.value();
-    Node* elements = LoadElements(receiver);
-    Node* elements_kind = LoadMapElementsKind(receiver_map);
-    Node* is_jsarray_condition =
-        Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
-    Variable var_double_value(this, MachineRepresentation::kFloat64);
-    Label rebox_double(this, &var_double_value);
-
-    // Unimplemented elements kinds fall back to a runtime call.
-    Label* unimplemented_elements_kind = &slow;
-    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
-    EmitElementLoad(receiver, elements, elements_kind, index,
-                    is_jsarray_condition, &if_element_hole, &rebox_double,
-                    &var_double_value, unimplemented_elements_kind, &if_oob,
-                    &slow);
-
-    Bind(&rebox_double);
-    Return(AllocateHeapNumberWithValue(var_double_value.value()));
-  }
-
-  Bind(&if_oob);
-  {
-    Comment("out of bounds");
-    Node* index = var_index.value();
-    // Negative keys can't take the fast OOB path.
-    GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), &slow);
-    // Positive OOB indices are effectively the same as hole loads.
-    Goto(&if_element_hole);
-  }
-
-  Bind(&if_element_hole);
-  {
-    Comment("found the hole");
-    Label return_undefined(this);
-    BranchIfPrototypesHaveNoElements(receiver_map, &return_undefined, &slow);
-
-    Bind(&return_undefined);
-    Return(UndefinedConstant());
-  }
-
-  Node* properties = nullptr;
-  Bind(&if_unique_name);
-  {
-    Comment("key is unique name");
-    // Check if the receiver has fast or slow properties.
-    properties = LoadProperties(receiver);
-    Node* properties_map = LoadMap(properties);
-    GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
-           &if_property_dictionary);
-
-    // Try looking up the property on the receiver; if unsuccessful, look
-    // for a handler in the stub cache.
-    Comment("DescriptorArray lookup");
-
-    // Skip linear search if there are too many descriptors.
-    // TODO(jkummerow): Consider implementing binary search.
-    // See also TryLookupProperty() which has the same limitation.
-    const int32_t kMaxLinear = 210;
-    Label stub_cache(this);
-    Node* bitfield3 = LoadMapBitField3(receiver_map);
-    Node* nof =
-        DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
-    GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), &stub_cache);
-    Node* descriptors = LoadMapDescriptors(receiver_map);
-    Variable var_name_index(this, MachineType::PointerRepresentation());
-    Label if_descriptor_found(this);
-    DescriptorLookupLinear(key, descriptors, nof, &if_descriptor_found,
-                           &var_name_index, &stub_cache);
-
-    Bind(&if_descriptor_found);
-    {
-      LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
-                                 var_name_index.value(), &var_details,
-                                 &var_value);
-      Goto(&if_found_on_receiver);
-    }
-
-    Bind(&stub_cache);
-    {
-      Comment("stub cache probe for fast property load");
-      Variable var_handler(this, MachineRepresentation::kTagged);
-      Label found_handler(this, &var_handler), stub_cache_miss(this);
-      TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
-                        &found_handler, &var_handler, &stub_cache_miss);
-      Bind(&found_handler);
-      { HandleLoadICHandlerCase(p, var_handler.value(), &slow); }
-
-      Bind(&stub_cache_miss);
-      {
-        Comment("KeyedLoadGeneric_miss");
-        TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
-                        p->name, p->slot, p->vector);
-      }
-    }
-  }
-
-  Bind(&if_property_dictionary);
-  {
-    Comment("dictionary property load");
-    // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
-    // seeing global objects here (which would need special handling).
-
-    Variable var_name_index(this, MachineType::PointerRepresentation());
-    Label dictionary_found(this, &var_name_index);
-    NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
-                                         &var_name_index, &slow);
-    Bind(&dictionary_found);
-    {
-      LoadPropertyFromNameDictionary(properties, var_name_index.value(),
-                                     &var_details, &var_value);
-      Goto(&if_found_on_receiver);
-    }
-  }
-
-  Bind(&if_found_on_receiver);
-  {
-    Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
-                                       p->context, receiver, &slow);
-    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
-    Return(value);
-  }
-
-  Bind(&slow);
-  {
-    Comment("KeyedLoadGeneric_slow");
-    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
-    // TODO(jkummerow): Should we use the GetProperty TF stub instead?
-    TailCallRuntime(Runtime::kKeyedGetProperty, p->context, p->receiver,
-                    p->name);
-  }
-}
-
-void CodeStubAssembler::HandleStoreFieldAndReturn(Node* handler_word,
-                                                  Node* holder,
-                                                  Representation representation,
-                                                  Node* value, Node* transition,
-                                                  Label* miss) {
-  bool transition_to_field = transition != nullptr;
-  Node* prepared_value = PrepareValueForWrite(value, representation, miss);
-
-  if (transition_to_field) {
-    Label storage_extended(this);
-    GotoUnless(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
-               &storage_extended);
-    Comment("[ Extend storage");
-    ExtendPropertiesBackingStore(holder);
-    Comment("] Extend storage");
-    Goto(&storage_extended);
-
-    Bind(&storage_extended);
-  }
-
-  Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
-  Label if_inobject(this), if_out_of_object(this);
-  Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
-         &if_out_of_object);
-
-  Bind(&if_inobject);
-  {
-    StoreNamedField(holder, offset, true, representation, prepared_value,
-                    transition_to_field);
-    if (transition_to_field) {
-      StoreObjectField(holder, JSObject::kMapOffset, transition);
-    }
-    Return(value);
-  }
-
-  Bind(&if_out_of_object);
-  {
-    StoreNamedField(holder, offset, false, representation, prepared_value,
-                    transition_to_field);
-    if (transition_to_field) {
-      StoreObjectField(holder, JSObject::kMapOffset, transition);
-    }
-    Return(value);
-  }
-}
-
-void CodeStubAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
-                                                    Node* holder, Node* value,
-                                                    Node* transition,
-                                                    Label* miss) {
-  Comment(transition ? "transitioning field store" : "field store");
-
-#ifdef DEBUG
-  Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
-  if (transition) {
-    CSA_ASSERT(
-        this,
-        WordOr(WordEqual(handler_kind,
-                         IntPtrConstant(StoreHandler::kTransitionToField)),
-               WordEqual(handler_kind,
-                         IntPtrConstant(StoreHandler::kTransitionToConstant))));
-  } else {
-    CSA_ASSERT(this, WordEqual(handler_kind,
-                               IntPtrConstant(StoreHandler::kStoreField)));
-  }
-#endif
-
-  Node* field_representation =
-      DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
-
-  Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
-      if_tagged_field(this);
-
-  GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)),
-         &if_tagged_field);
-  GotoIf(WordEqual(field_representation,
-                   IntPtrConstant(StoreHandler::kHeapObject)),
-         &if_heap_object_field);
-  GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)),
-         &if_double_field);
-  CSA_ASSERT(this, WordEqual(field_representation,
-                             IntPtrConstant(StoreHandler::kSmi)));
-  Goto(&if_smi_field);
-
-  Bind(&if_tagged_field);
-  {
-    Comment("store tagged field");
-    HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
-                              value, transition, miss);
-  }
-
-  Bind(&if_double_field);
-  {
-    Comment("store double field");
-    HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
-                              value, transition, miss);
-  }
-
-  Bind(&if_heap_object_field);
-  {
-    Comment("store heap object field");
-    // Generate full field type check here and then store value as Tagged.
-    Node* prepared_value =
-        PrepareValueForWrite(value, Representation::HeapObject(), miss);
-    Node* value_index_in_descriptor =
-        DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
-    Node* descriptors =
-        LoadMapDescriptors(transition ? transition : LoadMap(holder));
-    Node* maybe_field_type = LoadFixedArrayElement(
-        descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
-    Label do_store(this);
-    GotoIf(TaggedIsSmi(maybe_field_type), &do_store);
-    // Check that value type matches the field type.
-    {
-      Node* field_type = LoadWeakCellValue(maybe_field_type, miss);
-      Branch(WordEqual(LoadMap(prepared_value), field_type), &do_store, miss);
-    }
-    Bind(&do_store);
-    HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
-                              prepared_value, transition, miss);
-  }
-
-  Bind(&if_smi_field);
-  {
-    Comment("store smi field");
-    HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
-                              value, transition, miss);
-  }
-}
-
-void CodeStubAssembler::HandleStoreICHandlerCase(const StoreICParameters* p,
-                                                 Node* handler, Label* miss) {
-  Label if_smi_handler(this);
-  Label try_proto_handler(this), call_handler(this);
-
-  Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
-
-  // |handler| is a Smi, encoding what to do. See SmiHandler methods
-  // for the encoding format.
-  Bind(&if_smi_handler);
-  {
-    Node* holder = p->receiver;
-    Node* handler_word = SmiUntag(handler);
-
-    // Handle non-transitioning field stores.
-    HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
-  }
-
-  Bind(&try_proto_handler);
-  {
-    GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
-    HandleStoreICProtoHandler(p, handler, miss);
-  }
-
-  // |handler| is a heap object. Must be code, call it.
-  Bind(&call_handler);
-  {
-    StoreWithVectorDescriptor descriptor(isolate());
-    TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
-                 p->value, p->slot, p->vector);
-  }
-}
-
-void CodeStubAssembler::HandleStoreICProtoHandler(const StoreICParameters* p,
-                                                  Node* handler, Label* miss) {
-  // IC dispatchers rely on these assumptions to be held.
-  STATIC_ASSERT(FixedArray::kLengthOffset ==
-                StoreHandler::kTransitionCellOffset);
-  DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
-            StoreHandler::kSmiHandlerOffset);
-  DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
-            StoreHandler::kValidityCellOffset);
-
-  // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
-  Label validity_cell_check_done(this);
-  Node* validity_cell =
-      LoadObjectField(handler, StoreHandler::kValidityCellOffset);
-  GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
-         &validity_cell_check_done);
-  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
-  GotoIf(WordNotEqual(cell_value,
-                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
-         miss);
-  Goto(&validity_cell_check_done);
-
-  Bind(&validity_cell_check_done);
-  Node* smi_handler = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
-  CSA_ASSERT(this, TaggedIsSmi(smi_handler));
-
-  Node* maybe_transition_cell =
-      LoadObjectField(handler, StoreHandler::kTransitionCellOffset);
-  Label array_handler(this), tuple_handler(this);
-  Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
-
-  Variable var_transition(this, MachineRepresentation::kTagged);
-  Label if_transition(this), if_transition_to_constant(this);
-  Bind(&tuple_handler);
-  {
-    Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
-    var_transition.Bind(transition);
-    Goto(&if_transition);
-  }
-
-  Bind(&array_handler);
-  {
-    Node* length = SmiUntag(maybe_transition_cell);
-    BuildFastLoop(MachineType::PointerRepresentation(),
-                  IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
-                  [this, p, handler, miss](CodeStubAssembler*, Node* current) {
-                    Node* prototype_cell = LoadFixedArrayElement(
-                        handler, current, 0, INTPTR_PARAMETERS);
-                    CheckPrototype(prototype_cell, p->name, miss);
-                  },
-                  1, IndexAdvanceMode::kPost);
-
-    Node* maybe_transition_cell = LoadFixedArrayElement(
-        handler, IntPtrConstant(StoreHandler::kTransitionCellIndex), 0,
-        INTPTR_PARAMETERS);
-    Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
-    var_transition.Bind(transition);
-    Goto(&if_transition);
-  }
-
-  Bind(&if_transition);
-  {
-    Node* holder = p->receiver;
-    Node* transition = var_transition.value();
-    Node* handler_word = SmiUntag(smi_handler);
-
-    GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(transition)), miss);
-
-    Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
-    GotoIf(WordEqual(handler_kind,
-                     IntPtrConstant(StoreHandler::kTransitionToConstant)),
-           &if_transition_to_constant);
-
-    // Handle transitioning field stores.
-    HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition,
-                                miss);
-
-    Bind(&if_transition_to_constant);
-    {
-      // Check that constant matches value.
-      Node* value_index_in_descriptor =
-          DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
-      Node* descriptors = LoadMapDescriptors(transition);
-      Node* constant = LoadFixedArrayElement(
-          descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
-      GotoIf(WordNotEqual(p->value, constant), miss);
-
-      StoreObjectField(p->receiver, JSObject::kMapOffset, transition);
-      Return(p->value);
-    }
-  }
-}
-
-void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
-  Variable var_handler(this, MachineRepresentation::kTagged);
-  // TODO(ishell): defer blocks when it works.
-  Label if_handler(this, &var_handler), try_polymorphic(this),
-      try_megamorphic(this /*, Label::kDeferred*/),
-      miss(this /*, Label::kDeferred*/);
-
-  Node* receiver_map = LoadReceiverMap(p->receiver);
-
-  // Check monomorphic case.
-  Node* feedback =
-      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
-                         &var_handler, &try_polymorphic);
-  Bind(&if_handler);
-  {
-    Comment("StoreIC_if_handler");
-    HandleStoreICHandlerCase(p, var_handler.value(), &miss);
-  }
-
-  Bind(&try_polymorphic);
-  {
-    // Check polymorphic case.
-    Comment("StoreIC_try_polymorphic");
-    GotoUnless(
-        WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
-        &try_megamorphic);
-    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
-                          &miss, 2);
-  }
-
-  Bind(&try_megamorphic);
-  {
-    // Check megamorphic case.
-    GotoUnless(
-        WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
-        &miss);
-
-    TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
-                      &if_handler, &var_handler, &miss);
-  }
-  Bind(&miss);
-  {
-    TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
-                    p->vector, p->receiver, p->name);
-  }
-}
-
-void CodeStubAssembler::KeyedStoreIC(const StoreICParameters* p,
-                                     LanguageMode language_mode) {
-  Variable var_handler(this, MachineRepresentation::kTagged);
-  // This is to make |miss| label see the var_handler bound on all paths.
-  var_handler.Bind(IntPtrConstant(0));
-
-  // TODO(ishell): defer blocks when it works.
-  Label if_handler(this, &var_handler), try_polymorphic(this),
-      try_megamorphic(this /*, Label::kDeferred*/),
-      try_polymorphic_name(this /*, Label::kDeferred*/),
-      miss(this /*, Label::kDeferred*/);
-
-  Node* receiver_map = LoadReceiverMap(p->receiver);
-
-  // Check monomorphic case.
-  Node* feedback =
-      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
-                         &var_handler, &try_polymorphic);
-  Bind(&if_handler);
-  {
-    Comment("KeyedStoreIC_if_handler");
-    HandleStoreICHandlerCase(p, var_handler.value(), &miss);
-  }
-
-  Bind(&try_polymorphic);
-  {
-    // CheckPolymorphic case.
-    Comment("KeyedStoreIC_try_polymorphic");
-    GotoUnless(
-        WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
-        &try_megamorphic);
-    Label if_transition_handler(this);
-    Variable var_transition_map_cell(this, MachineRepresentation::kTagged);
-    HandleKeyedStorePolymorphicCase(receiver_map, feedback, &if_handler,
-                                    &var_handler, &if_transition_handler,
-                                    &var_transition_map_cell, &miss);
-    Bind(&if_transition_handler);
-    Comment("KeyedStoreIC_polymorphic_transition");
-    Node* transition_map =
-        LoadWeakCellValue(var_transition_map_cell.value(), &miss);
-    StoreTransitionDescriptor descriptor(isolate());
-    TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
-                 p->name, transition_map, p->value, p->slot, p->vector);
-  }
-
-  Bind(&try_megamorphic);
-  {
-    // Check megamorphic case.
-    Comment("KeyedStoreIC_try_megamorphic");
-    GotoUnless(
-        WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
-        &try_polymorphic_name);
-    TailCallStub(
-        CodeFactory::KeyedStoreIC_Megamorphic(isolate(), language_mode),
-        p->context, p->receiver, p->name, p->value, p->slot, p->vector);
-  }
-
-  Bind(&try_polymorphic_name);
-  {
-    // We might have a name in feedback, and a fixed array in the next slot.
-    Comment("KeyedStoreIC_try_polymorphic_name");
-    GotoUnless(WordEqual(feedback, p->name), &miss);
-    // If the name comparison succeeded, we know we have a FixedArray with
-    // at least one map/handler pair.
-    Node* offset = ElementOffsetFromIndex(
-        p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
-        FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
-    Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
-    HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
-                          1);
-  }
-
-  Bind(&miss);
-  {
-    Comment("KeyedStoreIC_miss");
-    TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
-                    p->vector, p->receiver, p->name);
-  }
-}
-
-void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
-  Label try_handler(this), miss(this);
-  Node* weak_cell =
-      LoadFixedArrayElement(p->vector, p->slot, 0, SMI_PARAMETERS);
-  CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
-
-  // Load value or try handler case if the {weak_cell} is cleared.
-  Node* property_cell = LoadWeakCellValue(weak_cell, &try_handler);
-  CSA_ASSERT(this, HasInstanceType(property_cell, PROPERTY_CELL_TYPE));
-
-  Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
-  GotoIf(WordEqual(value, TheHoleConstant()), &miss);
-  Return(value);
-
-  Bind(&try_handler);
-  {
-    Node* handler =
-        LoadFixedArrayElement(p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
-    GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
-           &miss);
-
-    // In this case {handler} must be a Code object.
-    CSA_ASSERT(this, HasInstanceType(handler, CODE_TYPE));
-    LoadWithVectorDescriptor descriptor(isolate());
-    Node* native_context = LoadNativeContext(p->context);
-    Node* receiver =
-        LoadContextElement(native_context, Context::EXTENSION_INDEX);
-    Node* fake_name = IntPtrConstant(0);
-    TailCallStub(descriptor, handler, p->context, receiver, fake_name, p->slot,
-                 p->vector);
-  }
-  Bind(&miss);
-  {
-    TailCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context, p->slot,
-                    p->vector);
-  }
-}
-
-void CodeStubAssembler::ExtendPropertiesBackingStore(compiler::Node* object) {
-  Node* properties = LoadProperties(object);
-  Node* length = LoadFixedArrayBaseLength(properties);
-
-  ParameterMode mode = OptimalParameterMode();
-  length = UntagParameter(length, mode);
-
-  Node* delta = IntPtrOrSmiConstant(JSObject::kFieldsAdded, mode);
-  Node* new_capacity = IntPtrAdd(length, delta);
-
-  // Grow properties array.
-  ElementsKind kind = FAST_ELEMENTS;
-  DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
-         FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
-  // The size of a new properties backing store is guaranteed to be small
-  // enough that the new backing store will be allocated in new space.
-  CSA_ASSERT(this, UintPtrLessThan(new_capacity,
-                                   IntPtrConstant(kMaxNumberOfDescriptors +
-                                                  JSObject::kFieldsAdded)));
-
-  Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
-
-  FillFixedArrayWithValue(kind, new_properties, length, new_capacity,
-                          Heap::kUndefinedValueRootIndex, mode);
-
-  // |new_properties| is guaranteed to be in new space, so we can skip
-  // the write barrier.
-  CopyFixedArrayElements(kind, properties, new_properties, length,
-                         SKIP_WRITE_BARRIER, mode);
-
-  StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
-}
-
-Node* CodeStubAssembler::PrepareValueForWrite(Node* value,
-                                              Representation representation,
-                                              Label* bailout) {
-  if (representation.IsDouble()) {
-    value = TryTaggedToFloat64(value, bailout);
-  } else if (representation.IsHeapObject()) {
-    // Field type is checked by the handler, here we only check if the value
-    // is a heap object.
-    GotoIf(TaggedIsSmi(value), bailout);
-  } else if (representation.IsSmi()) {
-    GotoUnless(TaggedIsSmi(value), bailout);
-  } else {
-    DCHECK(representation.IsTagged());
-  }
-  return value;
-}
-
-void CodeStubAssembler::StoreNamedField(Node* object, FieldIndex index,
-                                        Representation representation,
-                                        Node* value, bool transition_to_field) {
-  DCHECK_EQ(index.is_double(), representation.IsDouble());
-
-  StoreNamedField(object, IntPtrConstant(index.offset()), index.is_inobject(),
-                  representation, value, transition_to_field);
-}
-
-void CodeStubAssembler::StoreNamedField(Node* object, Node* offset,
-                                        bool is_inobject,
-                                        Representation representation,
-                                        Node* value, bool transition_to_field) {
-  bool store_value_as_double = representation.IsDouble();
-  Node* property_storage = object;
-  if (!is_inobject) {
-    property_storage = LoadProperties(object);
-  }
-
-  if (representation.IsDouble()) {
-    if (!FLAG_unbox_double_fields || !is_inobject) {
-      if (transition_to_field) {
-        Node* heap_number = AllocateHeapNumberWithValue(value, MUTABLE);
-        // Store the new mutable heap number into the object.
-        value = heap_number;
-        store_value_as_double = false;
-      } else {
-        // Load the heap number.
-        property_storage = LoadObjectField(property_storage, offset);
-        // Store the double value into it.
-        offset = IntPtrConstant(HeapNumber::kValueOffset);
-      }
-    }
-  }
-
-  if (store_value_as_double) {
-    StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
-                                   MachineRepresentation::kFloat64);
-  } else if (representation.IsSmi()) {
-    StoreObjectFieldNoWriteBarrier(property_storage, offset, value);
-  } else {
-    StoreObjectField(property_storage, offset, value);
-  }
-}
-
 Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
                                                   Node* value, Label* bailout) {
   // Mapped arguments are actual arguments. Unmapped arguments are values added
@@ -6713,7 +5647,7 @@
 
   bool is_load = value == nullptr;
 
-  GotoUnless(TaggedIsSmi(key), bailout);
+  GotoIfNot(TaggedIsSmi(key), bailout);
   key = SmiUntag(key);
   GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout);
 
@@ -6730,37 +5664,33 @@
 
   GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped);
 
-  Node* mapped_index = LoadFixedArrayElement(
-      elements, IntPtrAdd(key, intptr_two), 0, INTPTR_PARAMETERS);
+  Node* mapped_index =
+      LoadFixedArrayElement(elements, IntPtrAdd(key, intptr_two));
   Branch(WordEqual(mapped_index, TheHoleConstant()), &if_unmapped, &if_mapped);
 
   Bind(&if_mapped);
   {
     CSA_ASSERT(this, TaggedIsSmi(mapped_index));
     mapped_index = SmiUntag(mapped_index);
-    Node* the_context = LoadFixedArrayElement(elements, IntPtrConstant(0), 0,
-                                              INTPTR_PARAMETERS);
+    Node* the_context = LoadFixedArrayElement(elements, 0);
     // Assert that we can use LoadFixedArrayElement/StoreFixedArrayElement
     // methods for accessing Context.
     STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
     DCHECK_EQ(Context::SlotOffset(0) + kHeapObjectTag,
               FixedArray::OffsetOfElementAt(0));
     if (is_load) {
-      Node* result = LoadFixedArrayElement(the_context, mapped_index, 0,
-                                           INTPTR_PARAMETERS);
+      Node* result = LoadFixedArrayElement(the_context, mapped_index);
       CSA_ASSERT(this, WordNotEqual(result, TheHoleConstant()));
       var_result.Bind(result);
     } else {
-      StoreFixedArrayElement(the_context, mapped_index, value,
-                             UPDATE_WRITE_BARRIER, INTPTR_PARAMETERS);
+      StoreFixedArrayElement(the_context, mapped_index, value);
     }
     Goto(&end);
   }
 
   Bind(&if_unmapped);
   {
-    Node* backing_store = LoadFixedArrayElement(elements, IntPtrConstant(1), 0,
-                                                INTPTR_PARAMETERS);
+    Node* backing_store = LoadFixedArrayElement(elements, 1);
     GotoIf(WordNotEqual(LoadMap(backing_store), FixedArrayMapConstant()),
            bailout);
 
@@ -6770,13 +5700,11 @@
 
     // The key falls into unmapped range.
     if (is_load) {
-      Node* result =
-          LoadFixedArrayElement(backing_store, key, 0, INTPTR_PARAMETERS);
+      Node* result = LoadFixedArrayElement(backing_store, key);
       GotoIf(WordEqual(result, TheHoleConstant()), bailout);
       var_result.Bind(result);
     } else {
-      StoreFixedArrayElement(backing_store, key, value, UPDATE_WRITE_BARRIER,
-                             INTPTR_PARAMETERS);
+      StoreFixedArrayElement(backing_store, key, value);
     }
     Goto(&end);
   }
@@ -6844,7 +5772,7 @@
     value = Float64SilenceNaN(value);
     StoreFixedDoubleArrayElement(elements, index, value, mode);
   } else {
-    StoreFixedArrayElement(elements, index, value, barrier_mode, mode);
+    StoreFixedArrayElement(elements, index, value, barrier_mode, 0, mode);
   }
 }
 
@@ -6852,8 +5780,7 @@
   Label done(this);
   Node* int32_zero = Int32Constant(0);
   Node* int32_255 = Int32Constant(255);
-  Variable var_value(this, MachineRepresentation::kWord32);
-  var_value.Bind(int32_value);
+  Variable var_value(this, MachineRepresentation::kWord32, int32_value);
   GotoIf(Uint32LessThanOrEqual(int32_value, int32_255), &done);
   var_value.Bind(int32_zero);
   GotoIf(Int32LessThan(int32_value, int32_zero), &done);
@@ -6865,8 +5792,7 @@
 
 Node* CodeStubAssembler::Float64ToUint8Clamped(Node* float64_value) {
   Label done(this);
-  Variable var_value(this, MachineRepresentation::kWord32);
-  var_value.Bind(Int32Constant(0));
+  Variable var_value(this, MachineRepresentation::kWord32, Int32Constant(0));
   GotoIf(Float64LessThanOrEqual(float64_value, Float64Constant(0.0)), &done);
   var_value.Bind(Int32Constant(255));
   GotoIf(Float64LessThanOrEqual(Float64Constant(255.0), float64_value), &done);
@@ -6909,7 +5835,7 @@
   Label done(this, &var_result), if_smi(this);
   GotoIf(TaggedIsSmi(input), &if_smi);
   // Try to convert a heap number to a Smi.
-  GotoUnless(IsHeapNumberMap(LoadMap(input)), bailout);
+  GotoIfNot(IsHeapNumberMap(LoadMap(input)), bailout);
   {
     Node* value = LoadHeapNumberValue(input);
     if (rep == MachineRepresentation::kWord32) {
@@ -6979,24 +5905,20 @@
 
     // Check if buffer has been neutered.
     Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
-    Node* bitfield = LoadObjectField(buffer, JSArrayBuffer::kBitFieldOffset,
-                                     MachineType::Uint32());
-    Node* neutered_bit =
-        Word32And(bitfield, Int32Constant(JSArrayBuffer::WasNeutered::kMask));
-    GotoUnless(Word32Equal(neutered_bit, Int32Constant(0)), bailout);
+    GotoIf(IsDetachedBuffer(buffer), bailout);
 
     // Bounds check.
-    Node* length = UntagParameter(
+    Node* length = TaggedToParameter(
         LoadObjectField(object, JSTypedArray::kLengthOffset), parameter_mode);
 
     if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
       // Skip the store if we write beyond the length.
-      GotoUnless(IntPtrLessThan(key, length), &done);
+      GotoIfNot(IntPtrLessThan(key, length), &done);
       // ... but bailout if the key is negative.
     } else {
       DCHECK_EQ(STANDARD_STORE, store_mode);
     }
-    GotoUnless(UintPtrLessThan(key, length), bailout);
+    GotoIfNot(UintPtrLessThan(key, length), bailout);
 
     // Backing store = external_pointer + base_pointer.
     Node* external_pointer =
@@ -7004,7 +5926,8 @@
                         MachineType::Pointer());
     Node* base_pointer =
         LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
-    Node* backing_store = IntPtrAdd(external_pointer, base_pointer);
+    Node* backing_store =
+        IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer));
     StoreElement(backing_store, elements_kind, key, value, parameter_mode);
     Goto(&done);
 
@@ -7016,13 +5939,13 @@
 
   Node* length = is_jsarray ? LoadObjectField(object, JSArray::kLengthOffset)
                             : LoadFixedArrayBaseLength(elements);
-  length = UntagParameter(length, parameter_mode);
+  length = TaggedToParameter(length, parameter_mode);
 
   // In case value is stored into a fast smi array, assure that the value is
   // a smi before manipulating the backing store. Otherwise the backing store
   // may be left in an invalid state.
   if (IsFastSmiElementsKind(elements_kind)) {
-    GotoUnless(TaggedIsSmi(value), bailout);
+    GotoIfNot(TaggedIsSmi(value), bailout);
   } else if (IsFastDoubleElementsKind(elements_kind)) {
     value = TryTaggedToFloat64(value, bailout);
   }
@@ -7031,7 +5954,7 @@
     elements = CheckForCapacityGrow(object, elements, elements_kind, length,
                                     key, parameter_mode, is_jsarray, bailout);
   } else {
-    GotoUnless(UintPtrLessThan(key, length), bailout);
+    GotoIfNot(UintPtrLessThan(key, length), bailout);
 
     if ((store_mode == STORE_NO_TRANSITION_HANDLE_COW) &&
         IsFastSmiOrObjectElementsKind(elements_kind)) {
@@ -7061,7 +5984,7 @@
   Bind(&grow_case);
   {
     Node* current_capacity =
-        UntagParameter(LoadFixedArrayBaseLength(elements), mode);
+        TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
 
     checked_elements.Bind(elements);
 
@@ -7079,14 +6002,14 @@
     if (is_js_array) {
       Node* new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
       StoreObjectFieldNoWriteBarrier(object, JSArray::kLengthOffset,
-                                     TagParameter(new_length, mode));
+                                     ParameterToTagged(new_length, mode));
     }
     Goto(&done);
   }
 
   Bind(&no_grow_case);
   {
-    GotoUnless(UintPtrLessThan(key, length), bailout);
+    GotoIfNot(UintPtrLessThan(key, length), bailout);
     checked_elements.Bind(elements);
     Goto(&done);
   }
@@ -7099,15 +6022,15 @@
                                              ElementsKind kind, Node* length,
                                              ParameterMode mode,
                                              Label* bailout) {
-  Variable new_elements_var(this, MachineRepresentation::kTagged);
+  Variable new_elements_var(this, MachineRepresentation::kTagged, elements);
   Label done(this);
 
-  new_elements_var.Bind(elements);
-  GotoUnless(
+  GotoIfNot(
       WordEqual(LoadMap(elements), LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
       &done);
   {
-    Node* capacity = UntagParameter(LoadFixedArrayBaseLength(elements), mode);
+    Node* capacity =
+        TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
     Node* new_elements = GrowElementsCapacity(object, elements, kind, kind,
                                               length, capacity, mode, bailout);
 
@@ -7119,9 +6042,11 @@
   return new_elements_var.value();
 }
 
-void CodeStubAssembler::TransitionElementsKind(
-    compiler::Node* object, compiler::Node* map, ElementsKind from_kind,
-    ElementsKind to_kind, bool is_jsarray, Label* bailout) {
+void CodeStubAssembler::TransitionElementsKind(Node* object, Node* map,
+                                               ElementsKind from_kind,
+                                               ElementsKind to_kind,
+                                               bool is_jsarray,
+                                               Label* bailout) {
   DCHECK(!IsFastHoleyElementsKind(from_kind) ||
          IsFastHoleyElementsKind(to_kind));
   if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
@@ -7151,7 +6076,7 @@
     Bind(&done);
   }
 
-  StoreObjectField(object, JSObject::kMapOffset, map);
+  StoreMap(object, map);
 }
 
 void CodeStubAssembler::TrapAllocationMemento(Node* object,
@@ -7167,7 +6092,8 @@
       kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
 
   // Bail out if the object is not in new space.
-  Node* object_page = PageFromAddress(object);
+  Node* object_word = BitcastTaggedToWord(object);
+  Node* object_page = PageFromAddress(object_word);
   {
     Node* page_flags = Load(MachineType::IntPtr(), object_page,
                             IntPtrConstant(Page::kFlagsOffset));
@@ -7178,7 +6104,7 @@
   }
 
   Node* memento_last_word = IntPtrAdd(
-      object, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag));
+      object_word, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag));
   Node* memento_last_word_page = PageFromAddress(memento_last_word);
 
   Node* new_space_top = Load(MachineType::Pointer(), new_space_top_address);
@@ -7227,11 +6153,10 @@
 
 void CodeStubAssembler::CheckEnumCache(Node* receiver, Label* use_cache,
                                        Label* use_runtime) {
-  Variable current_js_object(this, MachineRepresentation::kTagged);
-  current_js_object.Bind(receiver);
+  Variable current_js_object(this, MachineRepresentation::kTagged, receiver);
 
-  Variable current_map(this, MachineRepresentation::kTagged);
-  current_map.Bind(LoadMap(current_js_object.value()));
+  Variable current_map(this, MachineRepresentation::kTagged,
+                       LoadMap(current_js_object.value()));
 
   // These variables are updated in the loop below.
   Variable* loop_vars[2] = {&current_js_object, &current_map};
@@ -7291,15 +6216,13 @@
   Node* size = IntPtrConstant(AllocationSite::kSize);
   Node* site = Allocate(size, CodeStubAssembler::kPretenured);
 
-  // Store the map
-  StoreObjectFieldRoot(site, AllocationSite::kMapOffset,
-                       Heap::kAllocationSiteMapRootIndex);
-  Node* kind = SmiConstant(Smi::FromInt(GetInitialFastElementsKind()));
+  StoreMap(site, AllocationSiteMapConstant());
+  Node* kind = SmiConstant(GetInitialFastElementsKind());
   StoreObjectFieldNoWriteBarrier(site, AllocationSite::kTransitionInfoOffset,
                                  kind);
 
   // Unlike literals, constructed arrays don't have nested sites
-  Node* zero = IntPtrConstant(0);
+  Node* zero = SmiConstant(0);
   StoreObjectFieldNoWriteBarrier(site, AllocationSite::kNestedSiteOffset, zero);
 
   // Pretenuring calculation field.
@@ -7327,7 +6250,7 @@
   StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
   StoreNoWriteBarrier(MachineRepresentation::kTagged, site_list, site);
 
-  StoreFixedArrayElement(feedback_vector, slot, site, UPDATE_WRITE_BARRIER,
+  StoreFixedArrayElement(feedback_vector, slot, site, UPDATE_WRITE_BARRIER, 0,
                          CodeStubAssembler::SMI_PARAMETERS);
   return site;
 }
@@ -7339,26 +6262,28 @@
   Node* cell = Allocate(size, CodeStubAssembler::kPretenured);
 
   // Initialize the WeakCell.
-  StoreObjectFieldRoot(cell, WeakCell::kMapOffset, Heap::kWeakCellMapRootIndex);
+  DCHECK(Heap::RootIsImmortalImmovable(Heap::kWeakCellMapRootIndex));
+  StoreMapNoWriteBarrier(cell, Heap::kWeakCellMapRootIndex);
   StoreObjectField(cell, WeakCell::kValueOffset, value);
   StoreObjectFieldRoot(cell, WeakCell::kNextOffset,
                        Heap::kTheHoleValueRootIndex);
 
   // Store the WeakCell in the feedback vector.
-  StoreFixedArrayElement(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER,
+  StoreFixedArrayElement(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER, 0,
                          CodeStubAssembler::SMI_PARAMETERS);
   return cell;
 }
 
-void CodeStubAssembler::BuildFastLoop(
-    const CodeStubAssembler::VariableList& vars,
-    MachineRepresentation index_rep, Node* start_index, Node* end_index,
-    std::function<void(CodeStubAssembler* assembler, Node* index)> body,
-    int increment, IndexAdvanceMode mode) {
-  Variable var(this, index_rep);
+Node* CodeStubAssembler::BuildFastLoop(
+    const CodeStubAssembler::VariableList& vars, Node* start_index,
+    Node* end_index, const FastLoopBody& body, int increment,
+    ParameterMode parameter_mode, IndexAdvanceMode advance_mode) {
+  MachineRepresentation index_rep = (parameter_mode == INTPTR_PARAMETERS)
+                                        ? MachineType::PointerRepresentation()
+                                        : MachineRepresentation::kTaggedSigned;
+  Variable var(this, index_rep, start_index);
   VariableList vars_copy(vars, zone());
   vars_copy.Add(&var, zone());
-  var.Bind(start_index);
   Label loop(this, vars_copy);
   Label after_loop(this);
   // Introduce an explicit second check of the termination condition before the
@@ -7371,25 +6296,23 @@
   Branch(WordEqual(var.value(), end_index), &after_loop, &loop);
   Bind(&loop);
   {
-    if (mode == IndexAdvanceMode::kPre) {
-      var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+    if (advance_mode == IndexAdvanceMode::kPre) {
+      Increment(var, increment, parameter_mode);
     }
-    body(this, var.value());
-    if (mode == IndexAdvanceMode::kPost) {
-      var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+    body(var.value());
+    if (advance_mode == IndexAdvanceMode::kPost) {
+      Increment(var, increment, parameter_mode);
     }
     Branch(WordNotEqual(var.value(), end_index), &loop, &after_loop);
   }
   Bind(&after_loop);
+  return var.value();
 }
 
 void CodeStubAssembler::BuildFastFixedArrayForEach(
-    compiler::Node* fixed_array, ElementsKind kind,
-    compiler::Node* first_element_inclusive,
-    compiler::Node* last_element_exclusive,
-    std::function<void(CodeStubAssembler* assembler,
-                       compiler::Node* fixed_array, compiler::Node* offset)>
-        body,
+    const CodeStubAssembler::VariableList& vars, Node* fixed_array,
+    ElementsKind kind, Node* first_element_inclusive,
+    Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
     ParameterMode mode, ForEachDirection direction) {
   STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
   int32_t first_val;
@@ -7406,7 +6329,7 @@
           Node* offset =
               ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
                                      FixedArray::kHeaderSize - kHeapObjectTag);
-          body(this, fixed_array, offset);
+          body(fixed_array, offset);
         }
       } else {
         for (int i = last_val - 1; i >= first_val; --i) {
@@ -7414,7 +6337,7 @@
           Node* offset =
               ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
                                      FixedArray::kHeaderSize - kHeapObjectTag);
-          body(this, fixed_array, offset);
+          body(fixed_array, offset);
         }
       }
       return;
@@ -7431,20 +6354,42 @@
 
   int increment = IsFastDoubleElementsKind(kind) ? kDoubleSize : kPointerSize;
   BuildFastLoop(
-      MachineType::PointerRepresentation(), start, limit,
-      [fixed_array, body](CodeStubAssembler* assembler, Node* offset) {
-        body(assembler, fixed_array, offset);
-      },
+      vars, start, limit,
+      [fixed_array, &body](Node* offset) { body(fixed_array, offset); },
       direction == ForEachDirection::kReverse ? -increment : increment,
+      INTPTR_PARAMETERS,
       direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
                                               : IndexAdvanceMode::kPost);
 }
 
-void CodeStubAssembler::BranchIfNumericRelationalComparison(
-    RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
-    Label* if_true, Label* if_false) {
-  typedef compiler::Node Node;
+void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
+    Node* element_count, Label* doesnt_fit, int base_size, ParameterMode mode) {
+  int max_newspace_parameters =
+      (kMaxRegularHeapObjectSize - base_size) / kPointerSize;
+  GotoIf(IntPtrOrSmiGreaterThan(
+             element_count, IntPtrOrSmiConstant(max_newspace_parameters, mode),
+             mode),
+         doesnt_fit);
+}
 
+void CodeStubAssembler::InitializeFieldsWithRoot(
+    Node* object, Node* start_offset, Node* end_offset,
+    Heap::RootListIndex root_index) {
+  start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
+  end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
+  Node* root_value = LoadRoot(root_index);
+  BuildFastLoop(end_offset, start_offset,
+                [this, object, root_value](Node* current) {
+                  StoreNoWriteBarrier(MachineRepresentation::kTagged, object,
+                                      current, root_value);
+                },
+                -kPointerSize, INTPTR_PARAMETERS,
+                CodeStubAssembler::IndexAdvanceMode::kPre);
+}
+
+void CodeStubAssembler::BranchIfNumericRelationalComparison(
+    RelationalComparisonMode mode, Node* lhs, Node* rhs, Label* if_true,
+    Label* if_false) {
   Label end(this);
   Variable result(this, MachineRepresentation::kTagged);
 
@@ -7484,7 +6429,7 @@
 
     Bind(&if_rhsisnotsmi);
     {
-      CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+      CSA_ASSERT(this, IsHeapNumberMap(LoadMap(rhs)));
       // Convert the {lhs} and {rhs} to floating point values, and
       // perform a floating point comparison.
       var_fcmp_lhs.Bind(SmiToFloat64(lhs));
@@ -7495,7 +6440,7 @@
 
   Bind(&if_lhsisnotsmi);
   {
-    CSA_ASSERT(this, WordEqual(LoadMap(lhs), HeapNumberMapConstant()));
+    CSA_ASSERT(this, IsHeapNumberMap(LoadMap(lhs)));
 
     // Check if {rhs} is a Smi or a HeapObject.
     Label if_rhsissmi(this), if_rhsisnotsmi(this);
@@ -7512,7 +6457,7 @@
 
     Bind(&if_rhsisnotsmi);
     {
-      CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+      CSA_ASSERT(this, IsHeapNumberMap(LoadMap(rhs)));
 
       // Convert the {lhs} and {rhs} to floating point values, and
       // perform a floating point comparison.
@@ -7546,19 +6491,16 @@
   }
 }
 
-void CodeStubAssembler::GotoUnlessNumberLessThan(compiler::Node* lhs,
-                                                 compiler::Node* rhs,
+void CodeStubAssembler::GotoUnlessNumberLessThan(Node* lhs, Node* rhs,
                                                  Label* if_false) {
   Label if_true(this);
   BranchIfNumericRelationalComparison(kLessThan, lhs, rhs, &if_true, if_false);
   Bind(&if_true);
 }
 
-compiler::Node* CodeStubAssembler::RelationalComparison(
-    RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
-    compiler::Node* context) {
-  typedef compiler::Node Node;
-
+Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
+                                              Node* lhs, Node* rhs,
+                                              Node* context) {
   Label return_true(this), return_false(this), end(this);
   Variable result(this, MachineRepresentation::kTagged);
 
@@ -7569,12 +6511,10 @@
 
   // We might need to loop several times due to ToPrimitive and/or ToNumber
   // conversions.
-  Variable var_lhs(this, MachineRepresentation::kTagged),
-      var_rhs(this, MachineRepresentation::kTagged);
+  Variable var_lhs(this, MachineRepresentation::kTagged, lhs),
+      var_rhs(this, MachineRepresentation::kTagged, rhs);
   Variable* loop_vars[2] = {&var_lhs, &var_rhs};
   Label loop(this, 2, loop_vars);
-  var_lhs.Bind(lhs);
-  var_rhs.Bind(rhs);
   Goto(&loop);
   Bind(&loop);
   {
@@ -7644,9 +6584,6 @@
 
     Bind(&if_lhsisnotsmi);
     {
-      // Load the HeapNumber map for later comparisons.
-      Node* number_map = HeapNumberMapConstant();
-
       // Load the map of {lhs}.
       Node* lhs_map = LoadMap(lhs);
 
@@ -7658,8 +6595,7 @@
       {
         // Check if the {lhs} is a HeapNumber.
         Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
-        Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
-               &if_lhsisnotnumber);
+        Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
 
         Bind(&if_lhsisnumber);
         {
@@ -7689,8 +6625,7 @@
 
         // Check if {lhs} is a HeapNumber.
         Label if_lhsisnumber(this), if_lhsisnotnumber(this);
-        Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
-               &if_lhsisnotnumber);
+        Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
 
         Bind(&if_lhsisnumber);
         {
@@ -7879,17 +6814,14 @@
 
 namespace {
 
-void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
+void GenerateEqual_Same(CodeStubAssembler* assembler, Node* value,
                         CodeStubAssembler::Label* if_equal,
                         CodeStubAssembler::Label* if_notequal) {
   // In case of abstract or strict equality checks, we need additional checks
   // for NaN values because they are not considered equal, even if both the
   // left and the right hand side reference exactly the same value.
-  // TODO(bmeurer): This seems to violate the SIMD.js specification, but it
-  // seems to be what is tested in the current SIMD.js testsuite.
 
   typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
 
   // Check if {value} is a Smi or a HeapObject.
   Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
@@ -7922,26 +6854,15 @@
   assembler->Bind(&if_valueissmi);
   assembler->Goto(if_equal);
 }
-
-void GenerateEqual_Simd128Value_HeapObject(
-    CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
-    compiler::Node* rhs, compiler::Node* rhs_map,
-    CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
-  assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
-                                  if_notequal);
-}
-
 }  // namespace
 
 // ES6 section 7.2.12 Abstract Equality Comparison
-compiler::Node* CodeStubAssembler::Equal(ResultMode mode, compiler::Node* lhs,
-                                         compiler::Node* rhs,
-                                         compiler::Node* context) {
+Node* CodeStubAssembler::Equal(ResultMode mode, Node* lhs, Node* rhs,
+                               Node* context) {
   // This is a slightly optimized version of Object::Equals represented as
   // scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
   // change something functionality wise in here, remember to update the
   // Object::Equals method as well.
-  typedef compiler::Node Node;
 
   Label if_equal(this), if_notequal(this),
       do_rhsstringtonumber(this, Label::kDeferred), end(this);
@@ -7954,12 +6875,10 @@
 
   // We might need to loop several times due to ToPrimitive and/or ToNumber
   // conversions.
-  Variable var_lhs(this, MachineRepresentation::kTagged),
-      var_rhs(this, MachineRepresentation::kTagged);
+  Variable var_lhs(this, MachineRepresentation::kTagged, lhs),
+      var_rhs(this, MachineRepresentation::kTagged, rhs);
   Variable* loop_vars[2] = {&var_lhs, &var_rhs};
   Label loop(this, 2, loop_vars);
-  var_lhs.Bind(lhs);
-  var_rhs.Bind(rhs);
   Goto(&loop);
   Bind(&loop);
   {
@@ -8001,10 +6920,8 @@
           Node* rhs_map = LoadMap(rhs);
 
           // Check if {rhs} is a HeapNumber.
-          Node* number_map = HeapNumberMapConstant();
           Label if_rhsisnumber(this), if_rhsisnotnumber(this);
-          Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
-                 &if_rhsisnotnumber);
+          Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
 
           Bind(&if_rhsisnumber);
           {
@@ -8094,8 +7011,8 @@
         Bind(&if_rhsisnotsmi);
         {
           Label if_lhsisstring(this), if_lhsisnumber(this),
-              if_lhsissymbol(this), if_lhsissimd128value(this),
-              if_lhsisoddball(this), if_lhsisreceiver(this);
+              if_lhsissymbol(this), if_lhsisoddball(this),
+              if_lhsisreceiver(this);
 
           // Both {lhs} and {rhs} are HeapObjects, load their maps
           // and their instance types.
@@ -8107,7 +7024,7 @@
           Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
 
           // Dispatch based on the instance type of {lhs}.
-          size_t const kNumCases = FIRST_NONSTRING_TYPE + 4;
+          size_t const kNumCases = FIRST_NONSTRING_TYPE + 3;
           Label* case_labels[kNumCases];
           int32_t case_values[kNumCases];
           for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
@@ -8118,10 +7035,8 @@
           case_values[FIRST_NONSTRING_TYPE + 0] = HEAP_NUMBER_TYPE;
           case_labels[FIRST_NONSTRING_TYPE + 1] = &if_lhsissymbol;
           case_values[FIRST_NONSTRING_TYPE + 1] = SYMBOL_TYPE;
-          case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsissimd128value;
-          case_values[FIRST_NONSTRING_TYPE + 2] = SIMD128_VALUE_TYPE;
-          case_labels[FIRST_NONSTRING_TYPE + 3] = &if_lhsisoddball;
-          case_values[FIRST_NONSTRING_TYPE + 3] = ODDBALL_TYPE;
+          case_labels[FIRST_NONSTRING_TYPE + 2] = &if_lhsisoddball;
+          case_values[FIRST_NONSTRING_TYPE + 2] = ODDBALL_TYPE;
           Switch(lhs_instance_type, &if_lhsisreceiver, case_values, case_labels,
                  arraysize(case_values));
           for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
@@ -8305,47 +7220,6 @@
             }
           }
 
-          Bind(&if_lhsissimd128value);
-          {
-            // Check if the {rhs} is also a Simd128Value.
-            Label if_rhsissimd128value(this), if_rhsisnotsimd128value(this);
-            Branch(Word32Equal(lhs_instance_type, rhs_instance_type),
-                   &if_rhsissimd128value, &if_rhsisnotsimd128value);
-
-            Bind(&if_rhsissimd128value);
-            {
-              // Both {lhs} and {rhs} is a Simd128Value.
-              GenerateEqual_Simd128Value_HeapObject(
-                  this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
-            }
-
-            Bind(&if_rhsisnotsimd128value);
-            {
-              // Check if the {rhs} is a JSReceiver.
-              Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
-              STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-              Branch(IsJSReceiverInstanceType(rhs_instance_type),
-                     &if_rhsisreceiver, &if_rhsisnotreceiver);
-
-              Bind(&if_rhsisreceiver);
-              {
-                // The {lhs} is a Primitive and the {rhs} is a JSReceiver.
-                // Swapping {lhs} and {rhs} is not observable and doesn't
-                // matter for the result, so we can just swap them and use
-                // the JSReceiver handling below (for {lhs} being a JSReceiver).
-                var_lhs.Bind(rhs);
-                var_rhs.Bind(lhs);
-                Goto(&loop);
-              }
-
-              Bind(&if_rhsisnotreceiver);
-              {
-                // The {rhs} is some other Primitive.
-                Goto(&if_notequal);
-              }
-            }
-          }
-
           Bind(&if_lhsisreceiver);
           {
             // Check if the {rhs} is also a JSReceiver.
@@ -8435,10 +7309,8 @@
   return result.value();
 }
 
-compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
-                                               compiler::Node* lhs,
-                                               compiler::Node* rhs,
-                                               compiler::Node* context) {
+Node* CodeStubAssembler::StrictEqual(ResultMode mode, Node* lhs, Node* rhs,
+                                     Node* context) {
   // Here's pseudo-code for the algorithm below in case of kDontNegateResult
   // mode; for kNegateResult mode we properly negate the result.
   //
@@ -8466,10 +7338,6 @@
   //         } else {
   //           return false;
   //         }
-  //       } else if (lhs->IsSimd128()) {
-  //         if (rhs->IsSimd128()) {
-  //           return %StrictEqual(lhs, rhs);
-  //         }
   //       } else {
   //         return false;
   //       }
@@ -8487,8 +7355,6 @@
   //   }
   // }
 
-  typedef compiler::Node Node;
-
   Label if_equal(this), if_notequal(this), end(this);
   Variable result(this, MachineRepresentation::kTagged);
 
@@ -8505,9 +7371,8 @@
 
   Bind(&if_notsame);
   {
-    // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
-    // String and Simd128Value they can still be considered equal.
-    Node* number_map = HeapNumberMapConstant();
+    // The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber
+    // and String they can still be considered equal.
 
     // Check if {lhs} is a Smi or a HeapObject.
     Label if_lhsissmi(this), if_lhsisnotsmi(this);
@@ -8520,8 +7385,7 @@
 
       // Check if {lhs} is a HeapNumber.
       Label if_lhsisnumber(this), if_lhsisnotnumber(this);
-      Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
-             &if_lhsisnotnumber);
+      Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
 
       Bind(&if_lhsisnumber);
       {
@@ -8546,8 +7410,7 @@
 
           // Check if {rhs} is also a HeapNumber.
           Label if_rhsisnumber(this), if_rhsisnotnumber(this);
-          Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
-                 &if_rhsisnotnumber);
+          Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
 
           Bind(&if_rhsisnumber);
           {
@@ -8608,26 +7471,7 @@
           }
 
           Bind(&if_lhsisnotstring);
-          {
-            // Check if {lhs} is a Simd128Value.
-            Label if_lhsissimd128value(this), if_lhsisnotsimd128value(this);
-            Branch(Word32Equal(lhs_instance_type,
-                               Int32Constant(SIMD128_VALUE_TYPE)),
-                   &if_lhsissimd128value, &if_lhsisnotsimd128value);
-
-            Bind(&if_lhsissimd128value);
-            {
-              // Load the map of {rhs}.
-              Node* rhs_map = LoadMap(rhs);
-
-              // Check if {rhs} is also a Simd128Value that is equal to {lhs}.
-              GenerateEqual_Simd128Value_HeapObject(
-                  this, lhs, lhs_map, rhs, rhs_map, &if_equal, &if_notequal);
-            }
-
-            Bind(&if_lhsisnotsimd128value);
-            Goto(&if_notequal);
-          }
+          Goto(&if_notequal);
         }
       }
     }
@@ -8652,8 +7496,7 @@
 
         // The {rhs} could be a HeapNumber with the same value as {lhs}.
         Label if_rhsisnumber(this), if_rhsisnotnumber(this);
-        Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
-               &if_rhsisnotnumber);
+        Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
 
         Bind(&if_rhsisnumber);
         {
@@ -8690,14 +7533,12 @@
 // ECMA#sec-samevalue
 // This algorithm differs from the Strict Equality Comparison Algorithm in its
 // treatment of signed zeroes and NaNs.
-compiler::Node* CodeStubAssembler::SameValue(compiler::Node* lhs,
-                                             compiler::Node* rhs,
-                                             compiler::Node* context) {
-  Variable var_result(this, MachineType::PointerRepresentation());
+Node* CodeStubAssembler::SameValue(Node* lhs, Node* rhs, Node* context) {
+  Variable var_result(this, MachineRepresentation::kWord32);
   Label strict_equal(this), out(this);
 
-  Node* const int_false = IntPtrConstant(0);
-  Node* const int_true = IntPtrConstant(1);
+  Node* const int_false = Int32Constant(0);
+  Node* const int_true = Int32Constant(1);
 
   Label if_equal(this), if_notequal(this);
   Branch(WordEqual(lhs, rhs), &if_equal, &if_notequal);
@@ -8727,8 +7568,8 @@
       // Return true iff {rhs} is NaN.
 
       Node* const result =
-          Select(Float64Equal(rhs_float, rhs_float), int_false, int_true,
-                 MachineType::PointerRepresentation());
+          SelectConstant(Float64Equal(rhs_float, rhs_float), int_false,
+                         int_true, MachineRepresentation::kWord32);
       var_result.Bind(result);
       Goto(&out);
     }
@@ -8776,9 +7617,7 @@
   return var_result.value();
 }
 
-compiler::Node* CodeStubAssembler::ForInFilter(compiler::Node* key,
-                                               compiler::Node* object,
-                                               compiler::Node* context) {
+Node* CodeStubAssembler::ForInFilter(Node* key, Node* object, Node* context) {
   Label return_undefined(this, Label::kDeferred), return_to_name(this),
       end(this);
 
@@ -8806,13 +7645,9 @@
   return var_result.value();
 }
 
-compiler::Node* CodeStubAssembler::HasProperty(
-    compiler::Node* object, compiler::Node* key, compiler::Node* context,
+Node* CodeStubAssembler::HasProperty(
+    Node* object, Node* key, Node* context,
     Runtime::FunctionId fallback_runtime_function_id) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-
   Label call_runtime(this, Label::kDeferred), return_true(this),
       return_false(this), end(this);
 
@@ -8860,8 +7695,58 @@
   return result.value();
 }
 
-compiler::Node* CodeStubAssembler::Typeof(compiler::Node* value,
-                                          compiler::Node* context) {
+Node* CodeStubAssembler::ClassOf(Node* value) {
+  Variable var_result(this, MachineRepresentation::kTaggedPointer);
+  Label if_function(this, Label::kDeferred), if_object(this, Label::kDeferred),
+      if_primitive(this, Label::kDeferred), return_result(this);
+
+  // Check if {value} is a Smi.
+  GotoIf(TaggedIsSmi(value), &if_primitive);
+
+  Node* value_map = LoadMap(value);
+  Node* value_instance_type = LoadMapInstanceType(value_map);
+
+  // Check if {value} is a JSFunction or JSBoundFunction.
+  STATIC_ASSERT(LAST_TYPE == LAST_FUNCTION_TYPE);
+  GotoIf(Uint32LessThanOrEqual(Int32Constant(FIRST_FUNCTION_TYPE),
+                               value_instance_type),
+         &if_function);
+
+  // Check if {value} is a primitive HeapObject.
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  GotoIf(Uint32LessThan(value_instance_type,
+                        Int32Constant(FIRST_JS_RECEIVER_TYPE)),
+         &if_primitive);
+
+  // Load the {value}s constructor, and check that it's a JSFunction.
+  Node* constructor = LoadMapConstructor(value_map);
+  GotoIfNot(IsJSFunction(constructor), &if_object);
+
+  // Return the instance class name for the {constructor}.
+  Node* shared_info =
+      LoadObjectField(constructor, JSFunction::kSharedFunctionInfoOffset);
+  Node* instance_class_name = LoadObjectField(
+      shared_info, SharedFunctionInfo::kInstanceClassNameOffset);
+  var_result.Bind(instance_class_name);
+  Goto(&return_result);
+
+  Bind(&if_function);
+  var_result.Bind(LoadRoot(Heap::kFunction_stringRootIndex));
+  Goto(&return_result);
+
+  Bind(&if_object);
+  var_result.Bind(LoadRoot(Heap::kObject_stringRootIndex));
+  Goto(&return_result);
+
+  Bind(&if_primitive);
+  var_result.Bind(NullConstant());
+  Goto(&return_result);
+
+  Bind(&return_result);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::Typeof(Node* value, Node* context) {
   Variable result_var(this, MachineRepresentation::kTagged);
 
   Label return_number(this, Label::kDeferred), if_oddball(this),
@@ -8886,20 +7771,13 @@
                      Int32Constant(1 << Map::kIsCallable)),
          &return_function);
 
-  GotoUnless(Word32Equal(callable_or_undetectable_mask, Int32Constant(0)),
-             &return_undefined);
+  GotoIfNot(Word32Equal(callable_or_undetectable_mask, Int32Constant(0)),
+            &return_undefined);
 
   GotoIf(IsJSReceiverInstanceType(instance_type), &return_object);
 
   GotoIf(IsStringInstanceType(instance_type), &return_string);
 
-#define SIMD128_BRANCH(TYPE, Type, type, lane_count, lane_type) \
-  Label return_##type(this);                                    \
-  Node* type##_map = HeapConstant(factory()->type##_map());     \
-  GotoIf(WordEqual(map, type##_map), &return_##type);
-  SIMD128_TYPES(SIMD128_BRANCH)
-#undef SIMD128_BRANCH
-
   CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
   result_var.Bind(HeapConstant(isolate()->factory()->symbol_string()));
   Goto(&return_result);
@@ -8941,52 +7819,121 @@
     Goto(&return_result);
   }
 
-#define SIMD128_BIND_RETURN(TYPE, Type, type, lane_count, lane_type)      \
-  Bind(&return_##type);                                                   \
-  {                                                                       \
-    result_var.Bind(HeapConstant(isolate()->factory()->type##_string())); \
-    Goto(&return_result);                                                 \
-  }
-  SIMD128_TYPES(SIMD128_BIND_RETURN)
-#undef SIMD128_BIND_RETURN
-
   Bind(&return_result);
   return result_var.value();
 }
 
-compiler::Node* CodeStubAssembler::InstanceOf(compiler::Node* object,
-                                              compiler::Node* callable,
-                                              compiler::Node* context) {
-  Label return_runtime(this, Label::kDeferred), end(this);
+Node* CodeStubAssembler::GetSuperConstructor(Node* active_function,
+                                             Node* context) {
+  CSA_ASSERT(this, IsJSFunction(active_function));
+
+  Label is_not_constructor(this, Label::kDeferred), out(this);
   Variable result(this, MachineRepresentation::kTagged);
 
-  // Check if no one installed @@hasInstance somewhere.
-  GotoUnless(
-      WordEqual(LoadObjectField(LoadRoot(Heap::kHasInstanceProtectorRootIndex),
-                                PropertyCell::kValueOffset),
-                SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
-      &return_runtime);
+  Node* map = LoadMap(active_function);
+  Node* prototype = LoadMapPrototype(map);
+  Node* prototype_map = LoadMap(prototype);
+  GotoIfNot(IsConstructorMap(prototype_map), &is_not_constructor);
 
-  // Check if {callable} is a valid receiver.
-  GotoIf(TaggedIsSmi(callable), &return_runtime);
-  GotoUnless(IsCallableMap(LoadMap(callable)), &return_runtime);
+  result.Bind(prototype);
+  Goto(&out);
 
-  // Use the inline OrdinaryHasInstance directly.
-  result.Bind(OrdinaryHasInstance(context, callable, object));
-  Goto(&end);
-
-  // TODO(bmeurer): Use GetPropertyStub here once available.
-  Bind(&return_runtime);
+  Bind(&is_not_constructor);
   {
-    result.Bind(CallRuntime(Runtime::kInstanceOf, context, object, callable));
-    Goto(&end);
+    CallRuntime(Runtime::kThrowNotSuperConstructor, context, prototype,
+                active_function);
+    Unreachable();
   }
 
-  Bind(&end);
+  Bind(&out);
   return result.value();
 }
 
-compiler::Node* CodeStubAssembler::NumberInc(compiler::Node* value) {
+Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
+                                    Node* context) {
+  Variable var_result(this, MachineRepresentation::kTagged);
+  Label if_notcallable(this, Label::kDeferred),
+      if_notreceiver(this, Label::kDeferred), if_otherhandler(this),
+      if_nohandler(this, Label::kDeferred), return_true(this),
+      return_false(this), return_result(this, &var_result);
+
+  // Ensure that the {callable} is actually a JSReceiver.
+  GotoIf(TaggedIsSmi(callable), &if_notreceiver);
+  GotoIfNot(IsJSReceiver(callable), &if_notreceiver);
+
+  // Load the @@hasInstance property from {callable}.
+  Node* inst_of_handler = CallStub(CodeFactory::GetProperty(isolate()), context,
+                                   callable, HasInstanceSymbolConstant());
+
+  // Optimize for the likely case where {inst_of_handler} is the builtin
+  // Function.prototype[@@hasInstance] method, and emit a direct call in
+  // that case without any additional checking.
+  Node* native_context = LoadNativeContext(context);
+  Node* function_has_instance =
+      LoadContextElement(native_context, Context::FUNCTION_HAS_INSTANCE_INDEX);
+  GotoIfNot(WordEqual(inst_of_handler, function_has_instance),
+            &if_otherhandler);
+  {
+    // Call to Function.prototype[@@hasInstance] directly.
+    Callable builtin(isolate()->builtins()->FunctionPrototypeHasInstance(),
+                     CallTrampolineDescriptor(isolate()));
+    Node* result = CallJS(builtin, context, inst_of_handler, callable, object);
+    var_result.Bind(result);
+    Goto(&return_result);
+  }
+
+  Bind(&if_otherhandler);
+  {
+    // Check if there's actually an {inst_of_handler}.
+    GotoIf(IsNull(inst_of_handler), &if_nohandler);
+    GotoIf(IsUndefined(inst_of_handler), &if_nohandler);
+
+    // Call the {inst_of_handler} for {callable} and {object}.
+    Node* result = CallJS(
+        CodeFactory::Call(isolate(), ConvertReceiverMode::kNotNullOrUndefined),
+        context, inst_of_handler, callable, object);
+
+    // Convert the {result} to a Boolean.
+    BranchIfToBooleanIsTrue(result, &return_true, &return_false);
+  }
+
+  Bind(&if_nohandler);
+  {
+    // Ensure that the {callable} is actually Callable.
+    GotoIfNot(IsCallable(callable), &if_notcallable);
+
+    // Use the OrdinaryHasInstance algorithm.
+    Node* result = CallStub(CodeFactory::OrdinaryHasInstance(isolate()),
+                            context, callable, object);
+    var_result.Bind(result);
+    Goto(&return_result);
+  }
+
+  Bind(&if_notcallable);
+  {
+    CallRuntime(Runtime::kThrowNonCallableInInstanceOfCheck, context);
+    Unreachable();
+  }
+
+  Bind(&if_notreceiver);
+  {
+    CallRuntime(Runtime::kThrowNonObjectInInstanceOfCheck, context);
+    Unreachable();
+  }
+
+  Bind(&return_true);
+  var_result.Bind(TrueConstant());
+  Goto(&return_result);
+
+  Bind(&return_false);
+  var_result.Bind(FalseConstant());
+  Goto(&return_result);
+
+  Bind(&return_result);
+  return var_result.value();
+}
+
+Node* CodeStubAssembler::NumberInc(Node* value) {
   Variable var_result(this, MachineRepresentation::kTagged),
       var_finc_value(this, MachineRepresentation::kFloat64);
   Label if_issmi(this), if_isnotsmi(this), do_finc(this), end(this);
@@ -9005,7 +7952,7 @@
     Branch(overflow, &if_overflow, &if_notoverflow);
 
     Bind(&if_notoverflow);
-    var_result.Bind(Projection(0, pair));
+    var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
     Goto(&end);
 
     Bind(&if_overflow);
@@ -9038,9 +7985,23 @@
   return var_result.value();
 }
 
-compiler::Node* CodeStubAssembler::CreateArrayIterator(
-    compiler::Node* array, compiler::Node* array_map,
-    compiler::Node* array_type, compiler::Node* context, IterationKind mode) {
+void CodeStubAssembler::GotoIfNotNumber(Node* input, Label* is_not_number) {
+  Label is_number(this);
+  GotoIf(TaggedIsSmi(input), &is_number);
+  Node* input_map = LoadMap(input);
+  Branch(IsHeapNumberMap(input_map), &is_number, is_not_number);
+  Bind(&is_number);
+}
+
+void CodeStubAssembler::GotoIfNumber(Node* input, Label* is_number) {
+  GotoIf(TaggedIsSmi(input), is_number);
+  Node* input_map = LoadMap(input);
+  GotoIf(IsHeapNumberMap(input_map), is_number);
+}
+
+Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
+                                             Node* array_type, Node* context,
+                                             IterationKind mode) {
   int kBaseMapIndex = 0;
   switch (mode) {
     case IterationKind::kKeys:
@@ -9094,7 +8055,8 @@
     Bind(&if_isgeneric);
     {
       Label if_isfast(this), if_isslow(this);
-      BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+      BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
+                          &if_isfast, &if_isslow);
 
       Bind(&if_isfast);
       {
@@ -9128,7 +8090,8 @@
     Bind(&if_isgeneric);
     {
       Label if_isfast(this), if_isslow(this);
-      BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+      BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
+                          &if_isfast, &if_isslow);
 
       Bind(&if_isfast);
       {
@@ -9146,7 +8109,7 @@
           // here, and take the slow path if any fail.
           Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
           DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
-          GotoUnless(
+          GotoIfNot(
               WordEqual(
                   LoadObjectField(protector_cell, PropertyCell::kValueOffset),
                   SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
@@ -9157,13 +8120,13 @@
           Node* prototype = LoadMapPrototype(array_map);
           Node* array_prototype = LoadContextElement(
               native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
-          GotoUnless(WordEqual(prototype, array_prototype), &if_isslow);
+          GotoIfNot(WordEqual(prototype, array_prototype), &if_isslow);
 
           Node* map = LoadMap(prototype);
           prototype = LoadMapPrototype(map);
           Node* object_prototype = LoadContextElement(
               native_context, Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
-          GotoUnless(WordEqual(prototype, object_prototype), &if_isslow);
+          GotoIfNot(WordEqual(prototype, object_prototype), &if_isslow);
 
           map = LoadMap(prototype);
           prototype = LoadMapPrototype(map);
@@ -9173,7 +8136,7 @@
         {
           Node* map_index =
               IntPtrAdd(IntPtrConstant(kBaseMapIndex + kFastIteratorOffset),
-                        LoadMapElementsKind(array_map));
+                        ChangeUint32ToWord(LoadMapElementsKind(array_map)));
           CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
                                map_index, IntPtrConstant(kBaseMapIndex +
                                                          kFastIteratorOffset)));
@@ -9201,7 +8164,7 @@
     {
       Node* map_index =
           IntPtrAdd(IntPtrConstant(kBaseMapIndex - UINT8_ELEMENTS),
-                    LoadMapElementsKind(array_map));
+                    ChangeUint32ToWord(LoadMapElementsKind(array_map)));
       CSA_ASSERT(
           this, IntPtrLessThan(map_index, IntPtrConstant(kBaseMapIndex +
                                                          kFastIteratorOffset)));
@@ -9215,9 +8178,8 @@
 
   Bind(&allocate_iterator);
   {
-    Node* map =
-        LoadFixedArrayElement(LoadNativeContext(context), var_map_index.value(),
-                              0, CodeStubAssembler::INTPTR_PARAMETERS);
+    Node* map = LoadFixedArrayElement(LoadNativeContext(context),
+                                      var_map_index.value());
     var_result.Bind(AllocateJSArrayIterator(array, var_array_map.value(), map));
     Goto(&return_result);
   }
@@ -9226,8 +8188,8 @@
   return var_result.value();
 }
 
-compiler::Node* CodeStubAssembler::AllocateJSArrayIterator(
-    compiler::Node* array, compiler::Node* array_map, compiler::Node* map) {
+Node* CodeStubAssembler::AllocateJSArrayIterator(Node* array, Node* array_map,
+                                                 Node* map) {
   Node* iterator = Allocate(JSArrayIterator::kSize);
   StoreMapNoWriteBarrier(iterator, map);
   StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOffset,
@@ -9243,96 +8205,94 @@
   return iterator;
 }
 
-compiler::Node* CodeStubAssembler::IsDetachedBuffer(compiler::Node* buffer) {
+Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
   CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
 
   Node* buffer_bit_field = LoadObjectField(
       buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
-  Node* was_neutered_mask = Int32Constant(JSArrayBuffer::WasNeutered::kMask);
-
-  return Word32NotEqual(Word32And(buffer_bit_field, was_neutered_mask),
-                        Int32Constant(0));
+  return IsSetWord32<JSArrayBuffer::WasNeutered>(buffer_bit_field);
 }
 
-CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
-                                     compiler::Node* argc,
+CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
+                                     Node* fp,
                                      CodeStubAssembler::ParameterMode mode)
     : assembler_(assembler),
+      argc_mode_(mode),
       argc_(argc),
       arguments_(nullptr),
-      fp_(assembler->LoadFramePointer()) {
-  compiler::Node* offset = assembler->ElementOffsetFromIndex(
+      fp_(fp != nullptr ? fp : assembler->LoadFramePointer()) {
+  Node* offset = assembler->ElementOffsetFromIndex(
       argc_, FAST_ELEMENTS, mode,
       (StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kPointerSize);
-  arguments_ = assembler_->IntPtrAddFoldConstants(fp_, offset);
-  if (mode == CodeStubAssembler::INTEGER_PARAMETERS) {
-    argc_ = assembler->ChangeInt32ToIntPtr(argc_);
-  } else if (mode == CodeStubAssembler::SMI_PARAMETERS) {
-    argc_ = assembler->SmiUntag(argc_);
-  }
+  arguments_ = assembler_->IntPtrAdd(fp_, offset);
 }
 
-compiler::Node* CodeStubArguments::GetReceiver() {
+Node* CodeStubArguments::GetReceiver() const {
   return assembler_->Load(MachineType::AnyTagged(), arguments_,
                           assembler_->IntPtrConstant(kPointerSize));
 }
 
-compiler::Node* CodeStubArguments::AtIndex(
-    compiler::Node* index, CodeStubAssembler::ParameterMode mode) {
+Node* CodeStubArguments::AtIndexPtr(
+    Node* index, CodeStubAssembler::ParameterMode mode) const {
   typedef compiler::Node Node;
-  Node* negated_index = assembler_->IntPtrSubFoldConstants(
-      assembler_->IntPtrOrSmiConstant(0, mode), index);
+  Node* negated_index = assembler_->IntPtrOrSmiSub(
+      assembler_->IntPtrOrSmiConstant(0, mode), index, mode);
   Node* offset =
       assembler_->ElementOffsetFromIndex(negated_index, FAST_ELEMENTS, mode, 0);
-  return assembler_->Load(MachineType::AnyTagged(), arguments_, offset);
+  return assembler_->IntPtrAdd(arguments_, offset);
 }
 
-compiler::Node* CodeStubArguments::AtIndex(int index) {
+Node* CodeStubArguments::AtIndex(Node* index,
+                                 CodeStubAssembler::ParameterMode mode) const {
+  DCHECK_EQ(argc_mode_, mode);
+  CSA_ASSERT(assembler_,
+             assembler_->UintPtrOrSmiLessThan(index, GetLength(), mode));
+  return assembler_->Load(MachineType::AnyTagged(), AtIndexPtr(index, mode));
+}
+
+Node* CodeStubArguments::AtIndex(int index) const {
   return AtIndex(assembler_->IntPtrConstant(index));
 }
 
-void CodeStubArguments::ForEach(const CodeStubAssembler::VariableList& vars,
-                                CodeStubArguments::ForEachBodyFunction body,
-                                compiler::Node* first, compiler::Node* last,
-                                CodeStubAssembler::ParameterMode mode) {
+void CodeStubArguments::ForEach(
+    const CodeStubAssembler::VariableList& vars,
+    const CodeStubArguments::ForEachBodyFunction& body, Node* first, Node* last,
+    CodeStubAssembler::ParameterMode mode) {
   assembler_->Comment("CodeStubArguments::ForEach");
-  DCHECK_IMPLIES(first == nullptr || last == nullptr,
-                 mode == CodeStubAssembler::INTPTR_PARAMETERS);
   if (first == nullptr) {
     first = assembler_->IntPtrOrSmiConstant(0, mode);
   }
   if (last == nullptr) {
+    DCHECK_EQ(mode, argc_mode_);
     last = argc_;
   }
-  compiler::Node* start = assembler_->IntPtrSubFoldConstants(
+  Node* start = assembler_->IntPtrSub(
       arguments_,
       assembler_->ElementOffsetFromIndex(first, FAST_ELEMENTS, mode));
-  compiler::Node* end = assembler_->IntPtrSubFoldConstants(
+  Node* end = assembler_->IntPtrSub(
       arguments_,
       assembler_->ElementOffsetFromIndex(last, FAST_ELEMENTS, mode));
-  assembler_->BuildFastLoop(
-      vars, MachineType::PointerRepresentation(), start, end,
-      [body](CodeStubAssembler* assembler, compiler::Node* current) {
-        Node* arg = assembler->Load(MachineType::AnyTagged(), current);
-        body(assembler, arg);
-      },
-      -kPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
+  assembler_->BuildFastLoop(vars, start, end,
+                            [this, &body](Node* current) {
+                              Node* arg = assembler_->Load(
+                                  MachineType::AnyTagged(), current);
+                              body(arg);
+                            },
+                            -kPointerSize, CodeStubAssembler::INTPTR_PARAMETERS,
+                            CodeStubAssembler::IndexAdvanceMode::kPost);
 }
 
-void CodeStubArguments::PopAndReturn(compiler::Node* value) {
+void CodeStubArguments::PopAndReturn(Node* value) {
   assembler_->PopAndReturn(
-      assembler_->IntPtrAddFoldConstants(argc_, assembler_->IntPtrConstant(1)),
-      value);
+      assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1)), value);
 }
 
-compiler::Node* CodeStubAssembler::IsFastElementsKind(
-    compiler::Node* elements_kind) {
+Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
   return Uint32LessThanOrEqual(elements_kind,
                                Int32Constant(LAST_FAST_ELEMENTS_KIND));
 }
 
-compiler::Node* CodeStubAssembler::IsHoleyFastElementsKind(
-    compiler::Node* elements_kind) {
+Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
   CSA_ASSERT(this, IsFastElementsKind(elements_kind));
 
   STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
@@ -9344,5 +8304,110 @@
   return Word32Equal(holey_elements, Int32Constant(1));
 }
 
+Node* CodeStubAssembler::IsDebugActive() {
+  Node* is_debug_active = Load(
+      MachineType::Uint8(),
+      ExternalConstant(ExternalReference::debug_is_active_address(isolate())));
+  return Word32NotEqual(is_debug_active, Int32Constant(0));
+}
+
+Node* CodeStubAssembler::IsPromiseHookEnabledOrDebugIsActive() {
+  Node* const promise_hook_or_debug_is_active =
+      Load(MachineType::Uint8(),
+           ExternalConstant(
+               ExternalReference::promise_hook_or_debug_is_active_address(
+                   isolate())));
+  return Word32NotEqual(promise_hook_or_debug_is_active, Int32Constant(0));
+}
+
+Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
+                                                           Node* shared_info,
+                                                           Node* context) {
+  Node* const code = BitcastTaggedToWord(
+      LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset));
+  Node* const code_entry =
+      IntPtrAdd(code, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+
+  Node* const fun = Allocate(JSFunction::kSize);
+  StoreMapNoWriteBarrier(fun, map);
+  StoreObjectFieldRoot(fun, JSObject::kPropertiesOffset,
+                       Heap::kEmptyFixedArrayRootIndex);
+  StoreObjectFieldRoot(fun, JSObject::kElementsOffset,
+                       Heap::kEmptyFixedArrayRootIndex);
+  StoreObjectFieldRoot(fun, JSFunction::kFeedbackVectorOffset,
+                       Heap::kUndefinedCellRootIndex);
+  StoreObjectFieldRoot(fun, JSFunction::kPrototypeOrInitialMapOffset,
+                       Heap::kTheHoleValueRootIndex);
+  StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
+                                 shared_info);
+  StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
+  StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeEntryOffset, code_entry,
+                                 MachineType::PointerRepresentation());
+  StoreObjectFieldRoot(fun, JSFunction::kNextFunctionLinkOffset,
+                       Heap::kUndefinedValueRootIndex);
+
+  return fun;
+}
+
+Node* CodeStubAssembler::AllocatePromiseReactionJobInfo(
+    Node* value, Node* tasks, Node* deferred_promise, Node* deferred_on_resolve,
+    Node* deferred_on_reject, Node* context) {
+  Node* const result = Allocate(PromiseReactionJobInfo::kSize);
+  StoreMapNoWriteBarrier(result, Heap::kPromiseReactionJobInfoMapRootIndex);
+  StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kValueOffset,
+                                 value);
+  StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kTasksOffset,
+                                 tasks);
+  StoreObjectFieldNoWriteBarrier(
+      result, PromiseReactionJobInfo::kDeferredPromiseOffset, deferred_promise);
+  StoreObjectFieldNoWriteBarrier(
+      result, PromiseReactionJobInfo::kDeferredOnResolveOffset,
+      deferred_on_resolve);
+  StoreObjectFieldNoWriteBarrier(
+      result, PromiseReactionJobInfo::kDeferredOnRejectOffset,
+      deferred_on_reject);
+  StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kContextOffset,
+                                 context);
+  return result;
+}
+
+Node* CodeStubAssembler::MarkerIsFrameType(Node* marker_or_function,
+                                           StackFrame::Type frame_type) {
+  return WordEqual(
+      marker_or_function,
+      IntPtrConstant(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+}
+
+Node* CodeStubAssembler::MarkerIsNotFrameType(Node* marker_or_function,
+                                              StackFrame::Type frame_type) {
+  return WordNotEqual(
+      marker_or_function,
+      IntPtrConstant(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
+}
+
+void CodeStubAssembler::Print(const char* s) {
+#ifdef DEBUG
+  std::string formatted(s);
+  formatted += "\n";
+  Handle<String> string = isolate()->factory()->NewStringFromAsciiChecked(
+      formatted.c_str(), TENURED);
+  CallRuntime(Runtime::kGlobalPrint, NoContextConstant(), HeapConstant(string));
+#endif
+}
+
+void CodeStubAssembler::Print(const char* prefix, Node* tagged_value) {
+#ifdef DEBUG
+  if (prefix != nullptr) {
+    std::string formatted(prefix);
+    formatted += ": ";
+    Handle<String> string = isolate()->factory()->NewStringFromAsciiChecked(
+        formatted.c_str(), TENURED);
+    CallRuntime(Runtime::kGlobalPrint, NoContextConstant(),
+                HeapConstant(string));
+  }
+  CallRuntime(Runtime::kDebugPrint, NoContextConstant(), tagged_value);
+#endif
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/code-stub-assembler.h b/src/code-stub-assembler.h
index f8f2686..52e8583 100644
--- a/src/code-stub-assembler.h
+++ b/src/code-stub-assembler.h
@@ -15,26 +15,37 @@
 namespace internal {
 
 class CallInterfaceDescriptor;
+class CodeStubArguments;
 class StatsCounter;
 class StubCache;
 
 enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
 
-#define HEAP_CONSTANT_LIST(V)                 \
-  V(BooleanMap, BooleanMap)                   \
-  V(CodeMap, CodeMap)                         \
-  V(empty_string, EmptyString)                \
-  V(EmptyFixedArray, EmptyFixedArray)         \
-  V(FalseValue, False)                        \
-  V(FixedArrayMap, FixedArrayMap)             \
-  V(FixedCOWArrayMap, FixedCOWArrayMap)       \
-  V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
-  V(HeapNumberMap, HeapNumberMap)             \
-  V(MinusZeroValue, MinusZero)                \
-  V(NanValue, Nan)                            \
-  V(NullValue, Null)                          \
-  V(TheHoleValue, TheHole)                    \
-  V(TrueValue, True)                          \
+#define HEAP_CONSTANT_LIST(V)                         \
+  V(AccessorInfoMap, AccessorInfoMap)                 \
+  V(AllocationSiteMap, AllocationSiteMap)             \
+  V(BooleanMap, BooleanMap)                           \
+  V(CodeMap, CodeMap)                                 \
+  V(empty_string, EmptyString)                        \
+  V(EmptyFixedArray, EmptyFixedArray)                 \
+  V(FalseValue, False)                                \
+  V(FixedArrayMap, FixedArrayMap)                     \
+  V(FixedCOWArrayMap, FixedCOWArrayMap)               \
+  V(FixedDoubleArrayMap, FixedDoubleArrayMap)         \
+  V(FunctionTemplateInfoMap, FunctionTemplateInfoMap) \
+  V(has_instance_symbol, HasInstanceSymbol)           \
+  V(HeapNumberMap, HeapNumberMap)                     \
+  V(NoClosuresCellMap, NoClosuresCellMap)             \
+  V(OneClosureCellMap, OneClosureCellMap)             \
+  V(ManyClosuresCellMap, ManyClosuresCellMap)         \
+  V(MinusZeroValue, MinusZero)                        \
+  V(NanValue, Nan)                                    \
+  V(NullValue, Null)                                  \
+  V(SymbolMap, SymbolMap)                             \
+  V(TheHoleValue, TheHole)                            \
+  V(TrueValue, True)                                  \
+  V(Tuple2Map, Tuple2Map)                             \
+  V(Tuple3Map, Tuple3Map)                             \
   V(UndefinedValue, Undefined)
 
 // Provides JavaScript-specific "macro-assembler" functionality on top of the
@@ -44,29 +55,20 @@
 // from a compiler directory OWNER).
 class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
  public:
-  // Create with CallStub linkage.
-  // |result_size| specifies the number of results returned by the stub.
-  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
-  CodeStubAssembler(Isolate* isolate, Zone* zone,
-                    const CallInterfaceDescriptor& descriptor,
-                    Code::Flags flags, const char* name,
-                    size_t result_size = 1);
+  typedef compiler::Node Node;
 
-  // Create with JSCall linkage.
-  CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
-                    Code::Flags flags, const char* name);
+  CodeStubAssembler(compiler::CodeAssemblerState* state);
 
   enum AllocationFlag : uint8_t {
     kNone = 0,
     kDoubleAlignment = 1,
-    kPretenured = 1 << 1
+    kPretenured = 1 << 1,
+    kAllowLargeObjectAllocation = 1 << 2,
   };
 
   typedef base::Flags<AllocationFlag> AllocationFlags;
 
-  // TODO(ishell): Fix all loads/stores from arrays by int32 offsets/indices
-  // and eventually remove INTEGER_PARAMETERS in favour of INTPTR_PARAMETERS.
-  enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS, INTPTR_PARAMETERS };
+  enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS };
 
   // On 32-bit platforms, there is a slight performance advantage to doing all
   // of the array offset/index arithmetic with SMIs, since it's possible
@@ -78,437 +80,518 @@
     return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS;
   }
 
-  compiler::Node* UntagParameter(compiler::Node* value, ParameterMode mode) {
-    if (mode != SMI_PARAMETERS) value = SmiUntag(value);
+  MachineRepresentation ParameterRepresentation(ParameterMode mode) const {
+    return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation()
+                                     : MachineRepresentation::kTaggedSigned;
+  }
+
+  MachineRepresentation OptimalParameterRepresentation() const {
+    return ParameterRepresentation(OptimalParameterMode());
+  }
+
+  Node* ParameterToWord(Node* value, ParameterMode mode) {
+    if (mode == SMI_PARAMETERS) value = SmiUntag(value);
     return value;
   }
 
-  compiler::Node* TagParameter(compiler::Node* value, ParameterMode mode) {
+  Node* WordToParameter(Node* value, ParameterMode mode) {
+    if (mode == SMI_PARAMETERS) value = SmiTag(value);
+    return value;
+  }
+
+  Node* ParameterToTagged(Node* value, ParameterMode mode) {
     if (mode != SMI_PARAMETERS) value = SmiTag(value);
     return value;
   }
 
-  compiler::Node* NoContextConstant();
-#define HEAP_CONSTANT_ACCESSOR(rootName, name) compiler::Node* name##Constant();
+  Node* TaggedToParameter(Node* value, ParameterMode mode) {
+    if (mode != SMI_PARAMETERS) value = SmiUntag(value);
+    return value;
+  }
+
+#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
+  Node* OpName(Node* a, Node* b, ParameterMode mode) {   \
+    if (mode == SMI_PARAMETERS) {                        \
+      return SmiOpName(a, b);                            \
+    } else {                                             \
+      DCHECK_EQ(INTPTR_PARAMETERS, mode);                \
+      return IntPtrOpName(a, b);                         \
+    }                                                    \
+  }
+  PARAMETER_BINOP(IntPtrOrSmiMin, IntPtrMin, SmiMin)
+  PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd)
+  PARAMETER_BINOP(IntPtrOrSmiSub, IntPtrSub, SmiSub)
+  PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan)
+  PARAMETER_BINOP(IntPtrOrSmiLessThanOrEqual, IntPtrLessThanOrEqual,
+                  SmiLessThanOrEqual)
+  PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan)
+  PARAMETER_BINOP(IntPtrOrSmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual,
+                  SmiGreaterThanOrEqual)
+  PARAMETER_BINOP(UintPtrOrSmiLessThan, UintPtrLessThan, SmiBelow)
+  PARAMETER_BINOP(UintPtrOrSmiGreaterThanOrEqual, UintPtrGreaterThanOrEqual,
+                  SmiAboveOrEqual)
+#undef PARAMETER_BINOP
+
+  Node* NoContextConstant();
+#define HEAP_CONSTANT_ACCESSOR(rootName, name) Node* name##Constant();
   HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR)
 #undef HEAP_CONSTANT_ACCESSOR
 
-#define HEAP_CONSTANT_TEST(rootName, name) \
-  compiler::Node* Is##name(compiler::Node* value);
+#define HEAP_CONSTANT_TEST(rootName, name) Node* Is##name(Node* value);
   HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST)
 #undef HEAP_CONSTANT_TEST
 
-  compiler::Node* HashSeed();
-  compiler::Node* StaleRegisterConstant();
+  Node* HashSeed();
+  Node* StaleRegisterConstant();
 
-  compiler::Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
+  Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
 
-  compiler::Node* IntPtrAddFoldConstants(compiler::Node* left,
-                                         compiler::Node* right);
-  compiler::Node* IntPtrSubFoldConstants(compiler::Node* left,
-                                         compiler::Node* right);
+  bool IsIntPtrOrSmiConstantZero(Node* test);
+
   // Round the 32bits payload of the provided word up to the next power of two.
-  compiler::Node* IntPtrRoundUpToPowerOfTwo32(compiler::Node* value);
-  compiler::Node* IntPtrMax(compiler::Node* left, compiler::Node* right);
+  Node* IntPtrRoundUpToPowerOfTwo32(Node* value);
+  // Select the maximum of the two provided IntPtr values.
+  Node* IntPtrMax(Node* left, Node* right);
+  // Select the minimum of the two provided IntPtr values.
+  Node* IntPtrMin(Node* left, Node* right);
 
   // Float64 operations.
-  compiler::Node* Float64Ceil(compiler::Node* x);
-  compiler::Node* Float64Floor(compiler::Node* x);
-  compiler::Node* Float64Round(compiler::Node* x);
-  compiler::Node* Float64RoundToEven(compiler::Node* x);
-  compiler::Node* Float64Trunc(compiler::Node* x);
+  Node* Float64Ceil(Node* x);
+  Node* Float64Floor(Node* x);
+  Node* Float64Round(Node* x);
+  Node* Float64RoundToEven(Node* x);
+  Node* Float64Trunc(Node* x);
 
   // Tag a Word as a Smi value.
-  compiler::Node* SmiTag(compiler::Node* value);
+  Node* SmiTag(Node* value);
   // Untag a Smi value as a Word.
-  compiler::Node* SmiUntag(compiler::Node* value);
+  Node* SmiUntag(Node* value);
 
   // Smi conversions.
-  compiler::Node* SmiToFloat64(compiler::Node* value);
-  compiler::Node* SmiFromWord(compiler::Node* value) { return SmiTag(value); }
-  compiler::Node* SmiFromWord32(compiler::Node* value);
-  compiler::Node* SmiToWord(compiler::Node* value) { return SmiUntag(value); }
-  compiler::Node* SmiToWord32(compiler::Node* value);
+  Node* SmiToFloat64(Node* value);
+  Node* SmiFromWord(Node* value) { return SmiTag(value); }
+  Node* SmiFromWord32(Node* value);
+  Node* SmiToWord(Node* value) { return SmiUntag(value); }
+  Node* SmiToWord32(Node* value);
 
   // Smi operations.
-  compiler::Node* SmiAdd(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiSub(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiEqual(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiAbove(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiAboveOrEqual(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiBelow(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiLessThan(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiMax(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b);
-  // Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
-  compiler::Node* SmiMod(compiler::Node* a, compiler::Node* b);
-  // Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
-  compiler::Node* SmiMul(compiler::Node* a, compiler::Node* b);
-  compiler::Node* SmiOr(compiler::Node* a, compiler::Node* b) {
-    return BitcastWordToTaggedSigned(
-        WordOr(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
+#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName)                  \
+  Node* SmiOpName(Node* a, Node* b) {                                  \
+    return BitcastWordToTaggedSigned(                                  \
+        IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \
+  }
+  SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd)
+  SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub)
+  SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd)
+  SMI_ARITHMETIC_BINOP(SmiOr, WordOr)
+#undef SMI_ARITHMETIC_BINOP
+
+  Node* SmiShl(Node* a, int shift) {
+    return BitcastWordToTaggedSigned(WordShl(BitcastTaggedToWord(a), shift));
   }
 
+  Node* SmiShr(Node* a, int shift) {
+    return BitcastWordToTaggedSigned(
+        WordAnd(WordShr(BitcastTaggedToWord(a), shift),
+                BitcastTaggedToWord(SmiConstant(-1))));
+  }
+
+  Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
+    if (mode == SMI_PARAMETERS) {
+      return SmiShl(a, shift);
+    } else {
+      DCHECK_EQ(INTPTR_PARAMETERS, mode);
+      return WordShl(a, shift);
+    }
+  }
+
+  Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) {
+    if (mode == SMI_PARAMETERS) {
+      return SmiShr(a, shift);
+    } else {
+      DCHECK_EQ(INTPTR_PARAMETERS, mode);
+      return WordShr(a, shift);
+    }
+  }
+
+#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName)                       \
+  Node* SmiOpName(Node* a, Node* b) {                                    \
+    return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
+  }
+  SMI_COMPARISON_OP(SmiEqual, WordEqual)
+  SMI_COMPARISON_OP(SmiNotEqual, WordNotEqual)
+  SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan)
+  SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual)
+  SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan)
+  SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan)
+  SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual)
+  SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan)
+  SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual)
+#undef SMI_COMPARISON_OP
+  Node* SmiMax(Node* a, Node* b);
+  Node* SmiMin(Node* a, Node* b);
+  // Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
+  Node* SmiMod(Node* a, Node* b);
+  // Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
+  Node* SmiMul(Node* a, Node* b);
+
   // Smi | HeapNumber operations.
-  compiler::Node* NumberInc(compiler::Node* value);
+  Node* NumberInc(Node* value);
+  void GotoIfNotNumber(Node* value, Label* is_not_number);
+  void GotoIfNumber(Node* value, Label* is_number);
 
   // Allocate an object of the given size.
-  compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone);
-  compiler::Node* Allocate(int size, AllocationFlags flags = kNone);
-  compiler::Node* InnerAllocate(compiler::Node* previous, int offset);
-  compiler::Node* InnerAllocate(compiler::Node* previous,
-                                compiler::Node* offset);
-  compiler::Node* IsRegularHeapObjectSize(compiler::Node* size);
+  Node* Allocate(Node* size, AllocationFlags flags = kNone);
+  Node* Allocate(int size, AllocationFlags flags = kNone);
+  Node* InnerAllocate(Node* previous, int offset);
+  Node* InnerAllocate(Node* previous, Node* offset);
+  Node* IsRegularHeapObjectSize(Node* size);
 
-  typedef std::function<compiler::Node*()> ConditionBody;
-  void Assert(ConditionBody condition_body, const char* string = nullptr,
+  typedef std::function<Node*()> NodeGenerator;
+
+  void Assert(const NodeGenerator& condition_body, const char* string = nullptr,
               const char* file = nullptr, int line = 0);
 
-  // Check a value for smi-ness
-  compiler::Node* TaggedIsSmi(compiler::Node* a);
-  // Check that the value is a non-negative smi.
-  compiler::Node* WordIsPositiveSmi(compiler::Node* a);
-  // Check that a word has a word-aligned address.
-  compiler::Node* WordIsWordAligned(compiler::Node* word);
-  compiler::Node* WordIsPowerOfTwo(compiler::Node* value);
+  Node* Select(Node* condition, const NodeGenerator& true_body,
+               const NodeGenerator& false_body, MachineRepresentation rep);
 
-  void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true,
-                        Label* if_false) {
+  Node* SelectConstant(Node* condition, Node* true_value, Node* false_value,
+                       MachineRepresentation rep);
+
+  Node* SelectInt32Constant(Node* condition, int true_value, int false_value);
+  Node* SelectIntPtrConstant(Node* condition, int true_value, int false_value);
+  Node* SelectBooleanConstant(Node* condition);
+  Node* SelectTaggedConstant(Node* condition, Node* true_value,
+                             Node* false_value);
+  Node* SelectSmiConstant(Node* condition, Smi* true_value, Smi* false_value);
+  Node* SelectSmiConstant(Node* condition, int true_value, Smi* false_value) {
+    return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value);
+  }
+  Node* SelectSmiConstant(Node* condition, Smi* true_value, int false_value) {
+    return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value));
+  }
+  Node* SelectSmiConstant(Node* condition, int true_value, int false_value) {
+    return SelectSmiConstant(condition, Smi::FromInt(true_value),
+                             Smi::FromInt(false_value));
+  }
+
+  Node* TruncateWordToWord32(Node* value);
+
+  // Check a value for smi-ness
+  Node* TaggedIsSmi(Node* a);
+  Node* TaggedIsNotSmi(Node* a);
+  // Check that the value is a non-negative smi.
+  Node* TaggedIsPositiveSmi(Node* a);
+  // Check that a word has a word-aligned address.
+  Node* WordIsWordAligned(Node* word);
+  Node* WordIsPowerOfTwo(Node* value);
+
+  void BranchIfSmiEqual(Node* a, Node* b, Label* if_true, Label* if_false) {
     Branch(SmiEqual(a, b), if_true, if_false);
   }
 
-  void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true,
-                           Label* if_false) {
+  void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
     Branch(SmiLessThan(a, b), if_true, if_false);
   }
 
-  void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b,
-                                  Label* if_true, Label* if_false) {
+  void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
+                                  Label* if_false) {
     Branch(SmiLessThanOrEqual(a, b), if_true, if_false);
   }
 
-  void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true,
-                            Label* if_false) {
+  void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
     Branch(Float64Equal(value, value), if_false, if_true);
   }
 
   // Branches to {if_true} if ToBoolean applied to {value} yields true,
   // otherwise goes to {if_false}.
-  void BranchIfToBooleanIsTrue(compiler::Node* value, Label* if_true,
-                               Label* if_false);
+  void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false);
 
-  void BranchIfSimd128Equal(compiler::Node* lhs, compiler::Node* lhs_map,
-                            compiler::Node* rhs, compiler::Node* rhs_map,
-                            Label* if_equal, Label* if_notequal);
-  void BranchIfSimd128Equal(compiler::Node* lhs, compiler::Node* rhs,
-                            Label* if_equal, Label* if_notequal) {
-    BranchIfSimd128Equal(lhs, LoadMap(lhs), rhs, LoadMap(rhs), if_equal,
-                         if_notequal);
-  }
+  void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
+  void BranchIfJSObject(Node* object, Label* if_true, Label* if_false);
 
-  void BranchIfJSReceiver(compiler::Node* object, Label* if_true,
-                          Label* if_false);
-  void BranchIfJSObject(compiler::Node* object, Label* if_true,
-                        Label* if_false);
-  void BranchIfFastJSArray(compiler::Node* object, compiler::Node* context,
-                           Label* if_true, Label* if_false);
+  enum class FastJSArrayAccessMode { INBOUNDS_READ, ANY_ACCESS };
+  void BranchIfFastJSArray(Node* object, Node* context,
+                           FastJSArrayAccessMode mode, Label* if_true,
+                           Label* if_false);
 
   // Load value from current frame by given offset in bytes.
-  compiler::Node* LoadFromFrame(int offset,
-                                MachineType rep = MachineType::AnyTagged());
+  Node* LoadFromFrame(int offset, MachineType rep = MachineType::AnyTagged());
   // Load value from current parent frame by given offset in bytes.
-  compiler::Node* LoadFromParentFrame(
-      int offset, MachineType rep = MachineType::AnyTagged());
+  Node* LoadFromParentFrame(int offset,
+                            MachineType rep = MachineType::AnyTagged());
 
   // Load an object pointer from a buffer that isn't in the heap.
-  compiler::Node* LoadBufferObject(compiler::Node* buffer, int offset,
-                                   MachineType rep = MachineType::AnyTagged());
+  Node* LoadBufferObject(Node* buffer, int offset,
+                         MachineType rep = MachineType::AnyTagged());
   // Load a field from an object on the heap.
-  compiler::Node* LoadObjectField(compiler::Node* object, int offset,
-                                  MachineType rep = MachineType::AnyTagged());
-  compiler::Node* LoadObjectField(compiler::Node* object,
-                                  compiler::Node* offset,
-                                  MachineType rep = MachineType::AnyTagged());
+  Node* LoadObjectField(Node* object, int offset,
+                        MachineType rep = MachineType::AnyTagged());
+  Node* LoadObjectField(Node* object, Node* offset,
+                        MachineType rep = MachineType::AnyTagged());
   // Load a SMI field and untag it.
-  compiler::Node* LoadAndUntagObjectField(compiler::Node* object, int offset);
+  Node* LoadAndUntagObjectField(Node* object, int offset);
   // Load a SMI field, untag it, and convert to Word32.
-  compiler::Node* LoadAndUntagToWord32ObjectField(compiler::Node* object,
-                                                  int offset);
+  Node* LoadAndUntagToWord32ObjectField(Node* object, int offset);
   // Load a SMI and untag it.
-  compiler::Node* LoadAndUntagSmi(compiler::Node* base, int index);
+  Node* LoadAndUntagSmi(Node* base, int index);
   // Load a SMI root, untag it, and convert to Word32.
-  compiler::Node* LoadAndUntagToWord32Root(Heap::RootListIndex root_index);
+  Node* LoadAndUntagToWord32Root(Heap::RootListIndex root_index);
+
+  // Tag a smi and store it.
+  Node* StoreAndTagSmi(Node* base, int offset, Node* value);
 
   // Load the floating point value of a HeapNumber.
-  compiler::Node* LoadHeapNumberValue(compiler::Node* object);
+  Node* LoadHeapNumberValue(Node* object);
   // Load the Map of an HeapObject.
-  compiler::Node* LoadMap(compiler::Node* object);
+  Node* LoadMap(Node* object);
   // Load the instance type of an HeapObject.
-  compiler::Node* LoadInstanceType(compiler::Node* object);
+  Node* LoadInstanceType(Node* object);
   // Compare the instance the type of the object against the provided one.
-  compiler::Node* HasInstanceType(compiler::Node* object, InstanceType type);
+  Node* HasInstanceType(Node* object, InstanceType type);
+  Node* DoesntHaveInstanceType(Node* object, InstanceType type);
   // Load the properties backing store of a JSObject.
-  compiler::Node* LoadProperties(compiler::Node* object);
+  Node* LoadProperties(Node* object);
   // Load the elements backing store of a JSObject.
-  compiler::Node* LoadElements(compiler::Node* object);
+  Node* LoadElements(Node* object);
   // Load the length of a JSArray instance.
-  compiler::Node* LoadJSArrayLength(compiler::Node* array);
+  Node* LoadJSArrayLength(Node* array);
   // Load the length of a fixed array base instance.
-  compiler::Node* LoadFixedArrayBaseLength(compiler::Node* array);
+  Node* LoadFixedArrayBaseLength(Node* array);
   // Load the length of a fixed array base instance.
-  compiler::Node* LoadAndUntagFixedArrayBaseLength(compiler::Node* array);
+  Node* LoadAndUntagFixedArrayBaseLength(Node* array);
   // Load the bit field of a Map.
-  compiler::Node* LoadMapBitField(compiler::Node* map);
+  Node* LoadMapBitField(Node* map);
   // Load bit field 2 of a map.
-  compiler::Node* LoadMapBitField2(compiler::Node* map);
+  Node* LoadMapBitField2(Node* map);
   // Load bit field 3 of a map.
-  compiler::Node* LoadMapBitField3(compiler::Node* map);
+  Node* LoadMapBitField3(Node* map);
   // Load the instance type of a map.
-  compiler::Node* LoadMapInstanceType(compiler::Node* map);
+  Node* LoadMapInstanceType(Node* map);
   // Load the ElementsKind of a map.
-  compiler::Node* LoadMapElementsKind(compiler::Node* map);
+  Node* LoadMapElementsKind(Node* map);
   // Load the instance descriptors of a map.
-  compiler::Node* LoadMapDescriptors(compiler::Node* map);
+  Node* LoadMapDescriptors(Node* map);
   // Load the prototype of a map.
-  compiler::Node* LoadMapPrototype(compiler::Node* map);
+  Node* LoadMapPrototype(Node* map);
   // Load the prototype info of a map. The result has to be checked if it is a
   // prototype info object or not.
-  compiler::Node* LoadMapPrototypeInfo(compiler::Node* map,
-                                       Label* if_has_no_proto_info);
+  Node* LoadMapPrototypeInfo(Node* map, Label* if_has_no_proto_info);
   // Load the instance size of a Map.
-  compiler::Node* LoadMapInstanceSize(compiler::Node* map);
+  Node* LoadMapInstanceSize(Node* map);
   // Load the inobject properties count of a Map (valid only for JSObjects).
-  compiler::Node* LoadMapInobjectProperties(compiler::Node* map);
+  Node* LoadMapInobjectProperties(Node* map);
   // Load the constructor function index of a Map (only for primitive maps).
-  compiler::Node* LoadMapConstructorFunctionIndex(compiler::Node* map);
+  Node* LoadMapConstructorFunctionIndex(Node* map);
   // Load the constructor of a Map (equivalent to Map::GetConstructor()).
-  compiler::Node* LoadMapConstructor(compiler::Node* map);
+  Node* LoadMapConstructor(Node* map);
+  // Loads a value from the specially encoded integer fields in the
+  // SharedFunctionInfo object.
+  // TODO(danno): This currently only works for the integer fields that are
+  // mapped to the upper part of 64-bit words. We should customize
+  // SFI::BodyDescriptor and store int32 values directly.
+  Node* LoadSharedFunctionInfoSpecialField(Node* shared, int offset,
+                                           ParameterMode param_mode);
+
   // Check if the map is set for slow properties.
-  compiler::Node* IsDictionaryMap(compiler::Node* map);
+  Node* IsDictionaryMap(Node* map);
 
   // Load the hash field of a name as an uint32 value.
-  compiler::Node* LoadNameHashField(compiler::Node* name);
+  Node* LoadNameHashField(Node* name);
   // Load the hash value of a name as an uint32 value.
   // If {if_hash_not_computed} label is specified then it also checks if
   // hash is actually computed.
-  compiler::Node* LoadNameHash(compiler::Node* name,
-                               Label* if_hash_not_computed = nullptr);
+  Node* LoadNameHash(Node* name, Label* if_hash_not_computed = nullptr);
 
   // Load length field of a String object.
-  compiler::Node* LoadStringLength(compiler::Node* object);
+  Node* LoadStringLength(Node* object);
   // Load value field of a JSValue object.
-  compiler::Node* LoadJSValueValue(compiler::Node* object);
+  Node* LoadJSValueValue(Node* object);
   // Load value field of a WeakCell object.
-  compiler::Node* LoadWeakCellValueUnchecked(compiler::Node* weak_cell);
-  compiler::Node* LoadWeakCellValue(compiler::Node* weak_cell,
-                                    Label* if_cleared = nullptr);
+  Node* LoadWeakCellValueUnchecked(Node* weak_cell);
+  Node* LoadWeakCellValue(Node* weak_cell, Label* if_cleared = nullptr);
 
   // Load an array element from a FixedArray.
-  compiler::Node* LoadFixedArrayElement(
-      compiler::Node* object, compiler::Node* index, int additional_offset = 0,
-      ParameterMode parameter_mode = INTEGER_PARAMETERS);
+  Node* LoadFixedArrayElement(Node* object, Node* index,
+                              int additional_offset = 0,
+                              ParameterMode parameter_mode = INTPTR_PARAMETERS);
+  Node* LoadFixedArrayElement(Node* object, int index,
+                              int additional_offset = 0) {
+    return LoadFixedArrayElement(object, IntPtrConstant(index),
+                                 additional_offset);
+  }
   // Load an array element from a FixedArray, untag it and return it as Word32.
-  compiler::Node* LoadAndUntagToWord32FixedArrayElement(
-      compiler::Node* object, compiler::Node* index, int additional_offset = 0,
-      ParameterMode parameter_mode = INTEGER_PARAMETERS);
+  Node* LoadAndUntagToWord32FixedArrayElement(
+      Node* object, Node* index, int additional_offset = 0,
+      ParameterMode parameter_mode = INTPTR_PARAMETERS);
   // Load an array element from a FixedDoubleArray.
-  compiler::Node* LoadFixedDoubleArrayElement(
-      compiler::Node* object, compiler::Node* index, MachineType machine_type,
+  Node* LoadFixedDoubleArrayElement(
+      Node* object, Node* index, MachineType machine_type,
       int additional_offset = 0,
-      ParameterMode parameter_mode = INTEGER_PARAMETERS,
+      ParameterMode parameter_mode = INTPTR_PARAMETERS,
       Label* if_hole = nullptr);
 
   // Load Float64 value by |base| + |offset| address. If the value is a double
   // hole then jump to |if_hole|. If |machine_type| is None then only the hole
   // check is generated.
-  compiler::Node* LoadDoubleWithHoleCheck(
-      compiler::Node* base, compiler::Node* offset, Label* if_hole,
+  Node* LoadDoubleWithHoleCheck(
+      Node* base, Node* offset, Label* if_hole,
       MachineType machine_type = MachineType::Float64());
-  compiler::Node* LoadFixedTypedArrayElement(
-      compiler::Node* data_pointer, compiler::Node* index_node,
-      ElementsKind elements_kind,
-      ParameterMode parameter_mode = INTEGER_PARAMETERS);
+  Node* LoadFixedTypedArrayElement(
+      Node* data_pointer, Node* index_node, ElementsKind elements_kind,
+      ParameterMode parameter_mode = INTPTR_PARAMETERS);
 
   // Context manipulation
-  compiler::Node* LoadContextElement(compiler::Node* context, int slot_index);
-  compiler::Node* LoadContextElement(compiler::Node* context,
-                                     compiler::Node* slot_index);
-  compiler::Node* StoreContextElement(compiler::Node* context, int slot_index,
-                                      compiler::Node* value);
-  compiler::Node* StoreContextElement(compiler::Node* context,
-                                      compiler::Node* slot_index,
-                                      compiler::Node* value);
-  compiler::Node* LoadNativeContext(compiler::Node* context);
+  Node* LoadContextElement(Node* context, int slot_index);
+  Node* LoadContextElement(Node* context, Node* slot_index);
+  Node* StoreContextElement(Node* context, int slot_index, Node* value);
+  Node* StoreContextElement(Node* context, Node* slot_index, Node* value);
+  Node* StoreContextElementNoWriteBarrier(Node* context, int slot_index,
+                                          Node* value);
+  Node* LoadNativeContext(Node* context);
 
-  compiler::Node* LoadJSArrayElementsMap(ElementsKind kind,
-                                         compiler::Node* native_context);
+  Node* LoadJSArrayElementsMap(ElementsKind kind, Node* native_context);
 
   // Store the floating point value of a HeapNumber.
-  compiler::Node* StoreHeapNumberValue(compiler::Node* object,
-                                       compiler::Node* value);
+  Node* StoreHeapNumberValue(Node* object, Node* value);
   // Store a field to an object on the heap.
-  compiler::Node* StoreObjectField(
-      compiler::Node* object, int offset, compiler::Node* value);
-  compiler::Node* StoreObjectField(compiler::Node* object,
-                                   compiler::Node* offset,
-                                   compiler::Node* value);
-  compiler::Node* StoreObjectFieldNoWriteBarrier(
-      compiler::Node* object, int offset, compiler::Node* value,
+  Node* StoreObjectField(Node* object, int offset, Node* value);
+  Node* StoreObjectField(Node* object, Node* offset, Node* value);
+  Node* StoreObjectFieldNoWriteBarrier(
+      Node* object, int offset, Node* value,
       MachineRepresentation rep = MachineRepresentation::kTagged);
-  compiler::Node* StoreObjectFieldNoWriteBarrier(
-      compiler::Node* object, compiler::Node* offset, compiler::Node* value,
+  Node* StoreObjectFieldNoWriteBarrier(
+      Node* object, Node* offset, Node* value,
       MachineRepresentation rep = MachineRepresentation::kTagged);
   // Store the Map of an HeapObject.
-  compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object,
-                                         compiler::Node* map);
-  compiler::Node* StoreObjectFieldRoot(compiler::Node* object, int offset,
-                                       Heap::RootListIndex root);
+  Node* StoreMap(Node* object, Node* map);
+  Node* StoreMapNoWriteBarrier(Node* object,
+                               Heap::RootListIndex map_root_index);
+  Node* StoreMapNoWriteBarrier(Node* object, Node* map);
+  Node* StoreObjectFieldRoot(Node* object, int offset,
+                             Heap::RootListIndex root);
   // Store an array element to a FixedArray.
-  compiler::Node* StoreFixedArrayElement(
-      compiler::Node* object, int index, compiler::Node* value,
-      WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
-      ParameterMode parameter_mode = INTEGER_PARAMETERS) {
-    return StoreFixedArrayElement(object, Int32Constant(index), value,
-                                  barrier_mode, parameter_mode);
+  Node* StoreFixedArrayElement(
+      Node* object, int index, Node* value,
+      WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
+    return StoreFixedArrayElement(object, IntPtrConstant(index), value,
+                                  barrier_mode);
   }
 
-  compiler::Node* StoreFixedArrayElement(
-      compiler::Node* object, compiler::Node* index, compiler::Node* value,
+  Node* StoreFixedArrayElement(
+      Node* object, Node* index, Node* value,
       WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
-      ParameterMode parameter_mode = INTEGER_PARAMETERS);
+      int additional_offset = 0,
+      ParameterMode parameter_mode = INTPTR_PARAMETERS);
 
-  compiler::Node* StoreFixedDoubleArrayElement(
-      compiler::Node* object, compiler::Node* index, compiler::Node* value,
-      ParameterMode parameter_mode = INTEGER_PARAMETERS);
+  Node* StoreFixedDoubleArrayElement(
+      Node* object, Node* index, Node* value,
+      ParameterMode parameter_mode = INTPTR_PARAMETERS);
 
-  void StoreFieldsNoWriteBarrier(compiler::Node* start_address,
-                                 compiler::Node* end_address,
-                                 compiler::Node* value);
+  Node* BuildAppendJSArray(ElementsKind kind, Node* context, Node* array,
+                           CodeStubArguments& args, Variable& arg_index,
+                           Label* bailout);
+
+  void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
+                                 Node* value);
 
   // Allocate a HeapNumber without initializing its value.
-  compiler::Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
+  Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
   // Allocate a HeapNumber with a specific value.
-  compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value,
-                                              MutableMode mode = IMMUTABLE);
+  Node* AllocateHeapNumberWithValue(Node* value, MutableMode mode = IMMUTABLE);
   // Allocate a SeqOneByteString with the given length.
-  compiler::Node* AllocateSeqOneByteString(int length,
-                                           AllocationFlags flags = kNone);
-  compiler::Node* AllocateSeqOneByteString(
-      compiler::Node* context, compiler::Node* length,
-      ParameterMode mode = INTPTR_PARAMETERS, AllocationFlags flags = kNone);
+  Node* AllocateSeqOneByteString(int length, AllocationFlags flags = kNone);
+  Node* AllocateSeqOneByteString(Node* context, Node* length,
+                                 ParameterMode mode = INTPTR_PARAMETERS,
+                                 AllocationFlags flags = kNone);
   // Allocate a SeqTwoByteString with the given length.
-  compiler::Node* AllocateSeqTwoByteString(int length,
-                                           AllocationFlags flags = kNone);
-  compiler::Node* AllocateSeqTwoByteString(
-      compiler::Node* context, compiler::Node* length,
-      ParameterMode mode = INTPTR_PARAMETERS, AllocationFlags flags = kNone);
+  Node* AllocateSeqTwoByteString(int length, AllocationFlags flags = kNone);
+  Node* AllocateSeqTwoByteString(Node* context, Node* length,
+                                 ParameterMode mode = INTPTR_PARAMETERS,
+                                 AllocationFlags flags = kNone);
 
   // Allocate a SlicedOneByteString with the given length, parent and offset.
   // |length| and |offset| are expected to be tagged.
-  compiler::Node* AllocateSlicedOneByteString(compiler::Node* length,
-                                              compiler::Node* parent,
-                                              compiler::Node* offset);
+  Node* AllocateSlicedOneByteString(Node* length, Node* parent, Node* offset);
   // Allocate a SlicedTwoByteString with the given length, parent and offset.
   // |length| and |offset| are expected to be tagged.
-  compiler::Node* AllocateSlicedTwoByteString(compiler::Node* length,
-                                              compiler::Node* parent,
-                                              compiler::Node* offset);
+  Node* AllocateSlicedTwoByteString(Node* length, Node* parent, Node* offset);
 
   // Allocate a one-byte ConsString with the given length, first and second
   // parts. |length| is expected to be tagged, and |first| and |second| are
   // expected to be one-byte strings.
-  compiler::Node* AllocateOneByteConsString(compiler::Node* length,
-                                            compiler::Node* first,
-                                            compiler::Node* second,
-                                            AllocationFlags flags = kNone);
+  Node* AllocateOneByteConsString(Node* length, Node* first, Node* second,
+                                  AllocationFlags flags = kNone);
   // Allocate a two-byte ConsString with the given length, first and second
   // parts. |length| is expected to be tagged, and |first| and |second| are
   // expected to be two-byte strings.
-  compiler::Node* AllocateTwoByteConsString(compiler::Node* length,
-                                            compiler::Node* first,
-                                            compiler::Node* second,
-                                            AllocationFlags flags = kNone);
+  Node* AllocateTwoByteConsString(Node* length, Node* first, Node* second,
+                                  AllocationFlags flags = kNone);
 
   // Allocate an appropriate one- or two-byte ConsString with the first and
   // second parts specified by |first| and |second|.
-  compiler::Node* NewConsString(compiler::Node* context, compiler::Node* length,
-                                compiler::Node* left, compiler::Node* right,
-                                AllocationFlags flags = kNone);
+  Node* NewConsString(Node* context, Node* length, Node* left, Node* right,
+                      AllocationFlags flags = kNone);
 
   // Allocate a RegExpResult with the given length (the number of captures,
   // including the match itself), index (the index where the match starts),
   // and input string. |length| and |index| are expected to be tagged, and
   // |input| must be a string.
-  compiler::Node* AllocateRegExpResult(compiler::Node* context,
-                                       compiler::Node* length,
-                                       compiler::Node* index,
-                                       compiler::Node* input);
+  Node* AllocateRegExpResult(Node* context, Node* length, Node* index,
+                             Node* input);
 
-  compiler::Node* AllocateNameDictionary(int capacity);
-  compiler::Node* AllocateNameDictionary(compiler::Node* capacity);
+  Node* AllocateNameDictionary(int capacity);
+  Node* AllocateNameDictionary(Node* capacity);
 
-  compiler::Node* AllocateJSObjectFromMap(compiler::Node* map,
-                                          compiler::Node* properties = nullptr,
-                                          compiler::Node* elements = nullptr);
+  Node* AllocateJSObjectFromMap(Node* map, Node* properties = nullptr,
+                                Node* elements = nullptr,
+                                AllocationFlags flags = kNone);
 
-  void InitializeJSObjectFromMap(compiler::Node* object, compiler::Node* map,
-                                 compiler::Node* size,
-                                 compiler::Node* properties = nullptr,
-                                 compiler::Node* elements = nullptr);
+  void InitializeJSObjectFromMap(Node* object, Node* map, Node* size,
+                                 Node* properties = nullptr,
+                                 Node* elements = nullptr);
 
-  void InitializeJSObjectBody(compiler::Node* object, compiler::Node* map,
-                              compiler::Node* size,
+  void InitializeJSObjectBody(Node* object, Node* map, Node* size,
                               int start_offset = JSObject::kHeaderSize);
 
   // Allocate a JSArray without elements and initialize the header fields.
-  compiler::Node* AllocateUninitializedJSArrayWithoutElements(
-      ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
-      compiler::Node* allocation_site);
+  Node* AllocateUninitializedJSArrayWithoutElements(ElementsKind kind,
+                                                    Node* array_map,
+                                                    Node* length,
+                                                    Node* allocation_site);
   // Allocate and return a JSArray with initialized header fields and its
   // uninitialized elements.
   // The ParameterMode argument is only used for the capacity parameter.
-  std::pair<compiler::Node*, compiler::Node*>
-  AllocateUninitializedJSArrayWithElements(
-      ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
-      compiler::Node* allocation_site, compiler::Node* capacity,
-      ParameterMode capacity_mode = INTEGER_PARAMETERS);
+  std::pair<Node*, Node*> AllocateUninitializedJSArrayWithElements(
+      ElementsKind kind, Node* array_map, Node* length, Node* allocation_site,
+      Node* capacity, ParameterMode capacity_mode = INTPTR_PARAMETERS);
   // Allocate a JSArray and fill elements with the hole.
   // The ParameterMode argument is only used for the capacity parameter.
-  compiler::Node* AllocateJSArray(
-      ElementsKind kind, compiler::Node* array_map, compiler::Node* capacity,
-      compiler::Node* length, compiler::Node* allocation_site = nullptr,
-      ParameterMode capacity_mode = INTEGER_PARAMETERS);
+  Node* AllocateJSArray(ElementsKind kind, Node* array_map, Node* capacity,
+                        Node* length, Node* allocation_site = nullptr,
+                        ParameterMode capacity_mode = INTPTR_PARAMETERS);
 
-  compiler::Node* AllocateFixedArray(ElementsKind kind,
-                                     compiler::Node* capacity,
-                                     ParameterMode mode = INTEGER_PARAMETERS,
-                                     AllocationFlags flags = kNone);
+  Node* AllocateFixedArray(ElementsKind kind, Node* capacity,
+                           ParameterMode mode = INTPTR_PARAMETERS,
+                           AllocationFlags flags = kNone);
 
   // Perform CreateArrayIterator (ES6 #sec-createarrayiterator).
-  compiler::Node* CreateArrayIterator(compiler::Node* array,
-                                      compiler::Node* array_map,
-                                      compiler::Node* array_type,
-                                      compiler::Node* context,
-                                      IterationKind mode);
+  Node* CreateArrayIterator(Node* array, Node* array_map, Node* array_type,
+                            Node* context, IterationKind mode);
 
-  compiler::Node* AllocateJSArrayIterator(compiler::Node* array,
-                                          compiler::Node* array_map,
-                                          compiler::Node* map);
+  Node* AllocateJSArrayIterator(Node* array, Node* array_map, Node* map);
 
-  void FillFixedArrayWithValue(ElementsKind kind, compiler::Node* array,
-                               compiler::Node* from_index,
-                               compiler::Node* to_index,
+  void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
+                               Node* to_index,
                                Heap::RootListIndex value_root_index,
-                               ParameterMode mode = INTEGER_PARAMETERS);
+                               ParameterMode mode = INTPTR_PARAMETERS);
 
   // Copies all elements from |from_array| of |length| size to
   // |to_array| of the same size respecting the elements kind.
   void CopyFixedArrayElements(
-      ElementsKind kind, compiler::Node* from_array, compiler::Node* to_array,
-      compiler::Node* length,
+      ElementsKind kind, Node* from_array, Node* to_array, Node* length,
       WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
-      ParameterMode mode = INTEGER_PARAMETERS) {
+      ParameterMode mode = INTPTR_PARAMETERS) {
     CopyFixedArrayElements(kind, from_array, kind, to_array, length, length,
                            barrier_mode, mode);
   }
@@ -516,11 +599,10 @@
   // Copies |element_count| elements from |from_array| to |to_array| of
   // |capacity| size respecting both array's elements kinds.
   void CopyFixedArrayElements(
-      ElementsKind from_kind, compiler::Node* from_array, ElementsKind to_kind,
-      compiler::Node* to_array, compiler::Node* element_count,
-      compiler::Node* capacity,
+      ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
+      Node* to_array, Node* element_count, Node* capacity,
       WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
-      ParameterMode mode = INTEGER_PARAMETERS);
+      ParameterMode mode = INTPTR_PARAMETERS);
 
   // Copies |character_count| elements from |from_string| to |to_string|
   // starting at the |from_index|'th character. |from_string| and |to_string|
@@ -530,11 +612,9 @@
   // intptr_ts depending on |mode| s.t. 0 <= |from_index| <= |from_index| +
   // |character_count| <= from_string.length and 0 <= |to_index| <= |to_index| +
   // |character_count| <= to_string.length.
-  void CopyStringCharacters(compiler::Node* from_string,
-                            compiler::Node* to_string,
-                            compiler::Node* from_index,
-                            compiler::Node* to_index,
-                            compiler::Node* character_count,
+  void CopyStringCharacters(Node* from_string, Node* to_string,
+                            Node* from_index, Node* to_index,
+                            Node* character_count,
                             String::Encoding from_encoding,
                             String::Encoding to_encoding, ParameterMode mode);
 
@@ -542,154 +622,157 @@
   // (NOTE: not index!), does a hole check if |if_hole| is provided and
   // converts the value so that it becomes ready for storing to array of
   // |to_kind| elements.
-  compiler::Node* LoadElementAndPrepareForStore(compiler::Node* array,
-                                                compiler::Node* offset,
-                                                ElementsKind from_kind,
-                                                ElementsKind to_kind,
-                                                Label* if_hole);
+  Node* LoadElementAndPrepareForStore(Node* array, Node* offset,
+                                      ElementsKind from_kind,
+                                      ElementsKind to_kind, Label* if_hole);
 
-  compiler::Node* CalculateNewElementsCapacity(
-      compiler::Node* old_capacity, ParameterMode mode = INTEGER_PARAMETERS);
+  Node* CalculateNewElementsCapacity(Node* old_capacity,
+                                     ParameterMode mode = INTPTR_PARAMETERS);
 
   // Tries to grow the |elements| array of given |object| to store the |key|
   // or bails out if the growing gap is too big. Returns new elements.
-  compiler::Node* TryGrowElementsCapacity(compiler::Node* object,
-                                          compiler::Node* elements,
-                                          ElementsKind kind,
-                                          compiler::Node* key, Label* bailout);
+  Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
+                                Node* key, Label* bailout);
 
   // Tries to grow the |capacity|-length |elements| array of given |object|
   // to store the |key| or bails out if the growing gap is too big. Returns
   // new elements.
-  compiler::Node* TryGrowElementsCapacity(compiler::Node* object,
-                                          compiler::Node* elements,
-                                          ElementsKind kind,
-                                          compiler::Node* key,
-                                          compiler::Node* capacity,
-                                          ParameterMode mode, Label* bailout);
+  Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
+                                Node* key, Node* capacity, ParameterMode mode,
+                                Label* bailout);
 
   // Grows elements capacity of given object. Returns new elements.
-  compiler::Node* GrowElementsCapacity(
-      compiler::Node* object, compiler::Node* elements, ElementsKind from_kind,
-      ElementsKind to_kind, compiler::Node* capacity,
-      compiler::Node* new_capacity, ParameterMode mode, Label* bailout);
+  Node* GrowElementsCapacity(Node* object, Node* elements,
+                             ElementsKind from_kind, ElementsKind to_kind,
+                             Node* capacity, Node* new_capacity,
+                             ParameterMode mode, Label* bailout);
 
   // Allocation site manipulation
-  void InitializeAllocationMemento(compiler::Node* base_allocation,
+  void InitializeAllocationMemento(Node* base_allocation,
                                    int base_allocation_size,
-                                   compiler::Node* allocation_site);
+                                   Node* allocation_site);
 
-  compiler::Node* TryTaggedToFloat64(compiler::Node* value,
-                                     Label* if_valueisnotnumber);
-  compiler::Node* TruncateTaggedToFloat64(compiler::Node* context,
-                                          compiler::Node* value);
-  compiler::Node* TruncateTaggedToWord32(compiler::Node* context,
-                                         compiler::Node* value);
+  Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber);
+  Node* TruncateTaggedToFloat64(Node* context, Node* value);
+  Node* TruncateTaggedToWord32(Node* context, Node* value);
   // Truncate the floating point value of a HeapNumber to an Int32.
-  compiler::Node* TruncateHeapNumberValueToWord32(compiler::Node* object);
+  Node* TruncateHeapNumberValueToWord32(Node* object);
 
   // Conversions.
-  compiler::Node* ChangeFloat64ToTagged(compiler::Node* value);
-  compiler::Node* ChangeInt32ToTagged(compiler::Node* value);
-  compiler::Node* ChangeUint32ToTagged(compiler::Node* value);
+  Node* ChangeFloat64ToTagged(Node* value);
+  Node* ChangeInt32ToTagged(Node* value);
+  Node* ChangeUint32ToTagged(Node* value);
+  Node* ChangeNumberToFloat64(Node* value);
 
   // Type conversions.
   // Throws a TypeError for {method_name} if {value} is not coercible to Object,
   // or returns the {value} converted to a String otherwise.
-  compiler::Node* ToThisString(compiler::Node* context, compiler::Node* value,
-                               char const* method_name);
+  Node* ToThisString(Node* context, Node* value, char const* method_name);
   // Throws a TypeError for {method_name} if {value} is neither of the given
   // {primitive_type} nor a JSValue wrapping a value of {primitive_type}, or
   // returns the {value} (or wrapped value) otherwise.
-  compiler::Node* ToThisValue(compiler::Node* context, compiler::Node* value,
-                              PrimitiveType primitive_type,
-                              char const* method_name);
+  Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type,
+                    char const* method_name);
 
   // Throws a TypeError for {method_name} if {value} is not of the given
   // instance type. Returns {value}'s map.
-  compiler::Node* ThrowIfNotInstanceType(compiler::Node* context,
-                                         compiler::Node* value,
-                                         InstanceType instance_type,
-                                         char const* method_name);
+  Node* ThrowIfNotInstanceType(Node* context, Node* value,
+                               InstanceType instance_type,
+                               char const* method_name);
 
   // Type checks.
   // Check whether the map is for an object with special properties, such as a
   // JSProxy or an object with interceptors.
-  compiler::Node* IsSpecialReceiverMap(compiler::Node* map);
-  compiler::Node* IsSpecialReceiverInstanceType(compiler::Node* instance_type);
-  compiler::Node* IsStringInstanceType(compiler::Node* instance_type);
-  compiler::Node* IsString(compiler::Node* object);
-  compiler::Node* IsJSObject(compiler::Node* object);
-  compiler::Node* IsJSGlobalProxy(compiler::Node* object);
-  compiler::Node* IsJSReceiverInstanceType(compiler::Node* instance_type);
-  compiler::Node* IsJSReceiver(compiler::Node* object);
-  compiler::Node* IsMap(compiler::Node* object);
-  compiler::Node* IsCallableMap(compiler::Node* map);
-  compiler::Node* IsName(compiler::Node* object);
-  compiler::Node* IsJSValue(compiler::Node* object);
-  compiler::Node* IsJSArray(compiler::Node* object);
-  compiler::Node* IsNativeContext(compiler::Node* object);
-  compiler::Node* IsWeakCell(compiler::Node* object);
-  compiler::Node* IsFixedDoubleArray(compiler::Node* object);
-  compiler::Node* IsHashTable(compiler::Node* object);
-  compiler::Node* IsDictionary(compiler::Node* object);
-  compiler::Node* IsUnseededNumberDictionary(compiler::Node* object);
+  Node* InstanceTypeEqual(Node* instance_type, int type);
+  Node* IsSpecialReceiverMap(Node* map);
+  Node* IsSpecialReceiverInstanceType(Node* instance_type);
+  Node* IsStringInstanceType(Node* instance_type);
+  Node* IsString(Node* object);
+  Node* IsJSObject(Node* object);
+  Node* IsJSGlobalProxy(Node* object);
+  Node* IsJSReceiverInstanceType(Node* instance_type);
+  Node* IsJSReceiver(Node* object);
+  Node* IsJSReceiverMap(Node* map);
+  Node* IsMap(Node* object);
+  Node* IsCallableMap(Node* map);
+  Node* IsCallable(Node* object);
+  Node* IsBoolean(Node* object);
+  Node* IsHeapNumber(Node* object);
+  Node* IsName(Node* object);
+  Node* IsSymbol(Node* object);
+  Node* IsPrivateSymbol(Node* object);
+  Node* IsJSValue(Node* object);
+  Node* IsJSArray(Node* object);
+  Node* IsNativeContext(Node* object);
+  Node* IsWeakCell(Node* object);
+  Node* IsFixedDoubleArray(Node* object);
+  Node* IsHashTable(Node* object);
+  Node* IsDictionary(Node* object);
+  Node* IsUnseededNumberDictionary(Node* object);
+  Node* IsConstructorMap(Node* map);
+  Node* IsJSFunction(Node* object);
 
   // ElementsKind helpers:
-  compiler::Node* IsFastElementsKind(compiler::Node* elements_kind);
-  compiler::Node* IsHoleyFastElementsKind(compiler::Node* elements_kind);
+  Node* IsFastElementsKind(Node* elements_kind);
+  Node* IsHoleyFastElementsKind(Node* elements_kind);
 
   // String helpers.
   // Load a character from a String (might flatten a ConsString).
-  compiler::Node* StringCharCodeAt(compiler::Node* string,
-                                   compiler::Node* smi_index);
+  Node* StringCharCodeAt(Node* string, Node* index,
+                         ParameterMode parameter_mode = SMI_PARAMETERS);
   // Return the single character string with only {code}.
-  compiler::Node* StringFromCharCode(compiler::Node* code);
+  Node* StringFromCharCode(Node* code);
   // Return a new string object which holds a substring containing the range
   // [from,to[ of string.  |from| and |to| are expected to be tagged.
-  compiler::Node* SubString(compiler::Node* context, compiler::Node* string,
-                            compiler::Node* from, compiler::Node* to);
+  Node* SubString(Node* context, Node* string, Node* from, Node* to);
 
   // Return a new string object produced by concatenating |first| with |second|.
-  compiler::Node* StringAdd(compiler::Node* context, compiler::Node* first,
-                            compiler::Node* second,
-                            AllocationFlags flags = kNone);
+  Node* StringAdd(Node* context, Node* first, Node* second,
+                  AllocationFlags flags = kNone);
 
-  // Return the first index >= {from} at which {needle_char} was found in
-  // {string}, or -1 if such an index does not exist. The returned value is
-  // a Smi, {string} is expected to be a String, {needle_char} is an intptr,
-  // and {from} is expected to be tagged.
-  compiler::Node* StringIndexOfChar(compiler::Node* context,
-                                    compiler::Node* string,
-                                    compiler::Node* needle_char,
-                                    compiler::Node* from);
+  // Unpack the external string, returning a pointer that (offset-wise) looks
+  // like a sequential string.
+  // Note that this pointer is not tagged and does not point to a real
+  // sequential string instance, and may only be used to access the string
+  // data. The pointer is GC-safe as long as a reference to the container
+  // ExternalString is live.
+  // |string| must be an external string. Bailout for short external strings.
+  Node* TryDerefExternalString(Node* const string, Node* const instance_type,
+                               Label* if_bailout);
 
-  compiler::Node* StringFromCodePoint(compiler::Node* codepoint,
-                                      UnicodeEncoding encoding);
+  // Check if |var_string| has an indirect (thin or flat cons) string type,
+  // and unpack it if so.
+  void MaybeDerefIndirectString(Variable* var_string, Node* instance_type,
+                                Variable* var_did_something);
+  // Check if |var_left| or |var_right| has an indirect (thin or flat cons)
+  // string type, and unpack it/them if so. Fall through if nothing was done.
+  void MaybeDerefIndirectStrings(Variable* var_left, Node* left_instance_type,
+                                 Variable* var_right, Node* right_instance_type,
+                                 Label* did_something);
+
+  Node* StringFromCodePoint(Node* codepoint, UnicodeEncoding encoding);
 
   // Type conversion helpers.
   // Convert a String to a Number.
-  compiler::Node* StringToNumber(compiler::Node* context,
-                                 compiler::Node* input);
-  compiler::Node* NumberToString(compiler::Node* context,
-                                 compiler::Node* input);
+  Node* StringToNumber(Node* context, Node* input);
+  Node* NumberToString(Node* context, Node* input);
   // Convert an object to a name.
-  compiler::Node* ToName(compiler::Node* context, compiler::Node* input);
+  Node* ToName(Node* context, Node* input);
   // Convert a Non-Number object to a Number.
-  compiler::Node* NonNumberToNumber(compiler::Node* context,
-                                    compiler::Node* input);
+  Node* NonNumberToNumber(Node* context, Node* input);
   // Convert any object to a Number.
-  compiler::Node* ToNumber(compiler::Node* context, compiler::Node* input);
+  Node* ToNumber(Node* context, Node* input);
+
+  // Converts |input| to one of 2^32 integer values in the range 0 through
+  // 2^32-1, inclusive.
+  // ES#sec-touint32
+  compiler::Node* ToUint32(compiler::Node* context, compiler::Node* input);
 
   // Convert any object to a String.
-  compiler::Node* ToString(compiler::Node* context, compiler::Node* input);
+  Node* ToString(Node* context, Node* input);
 
   // Convert any object to a Primitive.
-  compiler::Node* JSReceiverToPrimitive(compiler::Node* context,
-                                        compiler::Node* input);
-
-  // Convert a String to a flat String.
-  compiler::Node* FlattenString(compiler::Node* string);
+  Node* JSReceiverToPrimitive(Node* context, Node* input);
 
   enum ToIntegerTruncationMode {
     kNoTruncation,
@@ -697,127 +780,244 @@
   };
 
   // Convert any object to an Integer.
-  compiler::Node* ToInteger(compiler::Node* context, compiler::Node* input,
-                            ToIntegerTruncationMode mode = kNoTruncation);
+  Node* ToInteger(Node* context, Node* input,
+                  ToIntegerTruncationMode mode = kNoTruncation);
 
   // Returns a node that contains a decoded (unsigned!) value of a bit
   // field |T| in |word32|. Returns result as an uint32 node.
   template <typename T>
-  compiler::Node* DecodeWord32(compiler::Node* word32) {
+  Node* DecodeWord32(Node* word32) {
     return DecodeWord32(word32, T::kShift, T::kMask);
   }
 
   // Returns a node that contains a decoded (unsigned!) value of a bit
   // field |T| in |word|. Returns result as a word-size node.
   template <typename T>
-  compiler::Node* DecodeWord(compiler::Node* word) {
+  Node* DecodeWord(Node* word) {
     return DecodeWord(word, T::kShift, T::kMask);
   }
 
   // Returns a node that contains a decoded (unsigned!) value of a bit
   // field |T| in |word32|. Returns result as a word-size node.
   template <typename T>
-  compiler::Node* DecodeWordFromWord32(compiler::Node* word32) {
+  Node* DecodeWordFromWord32(Node* word32) {
     return DecodeWord<T>(ChangeUint32ToWord(word32));
   }
 
+  // Returns a node that contains a decoded (unsigned!) value of a bit
+  // field |T| in |word|. Returns result as an uint32 node.
+  template <typename T>
+  Node* DecodeWord32FromWord(Node* word) {
+    return TruncateWordToWord32(DecodeWord<T>(word));
+  }
+
   // Decodes an unsigned (!) value from |word32| to an uint32 node.
-  compiler::Node* DecodeWord32(compiler::Node* word32, uint32_t shift,
-                               uint32_t mask);
+  Node* DecodeWord32(Node* word32, uint32_t shift, uint32_t mask);
 
   // Decodes an unsigned (!) value from |word| to a word-size node.
-  compiler::Node* DecodeWord(compiler::Node* word, uint32_t shift,
-                             uint32_t mask);
+  Node* DecodeWord(Node* word, uint32_t shift, uint32_t mask);
 
   // Returns true if any of the |T|'s bits in given |word32| are set.
   template <typename T>
-  compiler::Node* IsSetWord32(compiler::Node* word32) {
+  Node* IsSetWord32(Node* word32) {
     return IsSetWord32(word32, T::kMask);
   }
 
   // Returns true if any of the mask's bits in given |word32| are set.
-  compiler::Node* IsSetWord32(compiler::Node* word32, uint32_t mask) {
+  Node* IsSetWord32(Node* word32, uint32_t mask) {
     return Word32NotEqual(Word32And(word32, Int32Constant(mask)),
                           Int32Constant(0));
   }
 
   // Returns true if any of the |T|'s bits in given |word| are set.
   template <typename T>
-  compiler::Node* IsSetWord(compiler::Node* word) {
-    return WordNotEqual(WordAnd(word, IntPtrConstant(T::kMask)),
-                        IntPtrConstant(0));
+  Node* IsSetWord(Node* word) {
+    return IsSetWord(word, T::kMask);
+  }
+
+  // Returns true if any of the mask's bits in given |word| are set.
+  Node* IsSetWord(Node* word, uint32_t mask) {
+    return WordNotEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
+  }
+
+  // Returns true if any of the mask's bit are set in the given Smi.
+  // Smi-encoding of the mask is performed implicitly!
+  Node* IsSetSmi(Node* smi, int untagged_mask) {
+    intptr_t mask_word = bit_cast<intptr_t>(Smi::FromInt(untagged_mask));
+    return WordNotEqual(
+        WordAnd(BitcastTaggedToWord(smi), IntPtrConstant(mask_word)),
+        IntPtrConstant(0));
+  }
+
+  // Returns true if all of the |T|'s bits in given |word32| are clear.
+  template <typename T>
+  Node* IsClearWord32(Node* word32) {
+    return IsClearWord32(word32, T::kMask);
+  }
+
+  // Returns true if all of the mask's bits in given |word32| are clear.
+  Node* IsClearWord32(Node* word32, uint32_t mask) {
+    return Word32Equal(Word32And(word32, Int32Constant(mask)),
+                       Int32Constant(0));
+  }
+
+  // Returns true if all of the |T|'s bits in given |word| are clear.
+  template <typename T>
+  Node* IsClearWord(Node* word) {
+    return IsClearWord(word, T::kMask);
+  }
+
+  // Returns true if all of the mask's bits in given |word| are clear.
+  Node* IsClearWord(Node* word, uint32_t mask) {
+    return WordEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
   }
 
   void SetCounter(StatsCounter* counter, int value);
   void IncrementCounter(StatsCounter* counter, int delta);
   void DecrementCounter(StatsCounter* counter, int delta);
 
+  void Increment(Variable& variable, int value = 1,
+                 ParameterMode mode = INTPTR_PARAMETERS);
+
   // Generates "if (false) goto label" code. Useful for marking a label as
   // "live" to avoid assertion failures during graph building. In the resulting
   // code this check will be eliminated.
   void Use(Label* label);
 
   // Various building blocks for stubs doing property lookups.
-  void TryToName(compiler::Node* key, Label* if_keyisindex, Variable* var_index,
-                 Label* if_keyisunique, Label* if_bailout);
+  void TryToName(Node* key, Label* if_keyisindex, Variable* var_index,
+                 Label* if_keyisunique, Variable* var_unique,
+                 Label* if_bailout);
 
   // Calculates array index for given dictionary entry and entry field.
   // See Dictionary::EntryToIndex().
   template <typename Dictionary>
-  compiler::Node* EntryToIndex(compiler::Node* entry, int field_index);
+  Node* EntryToIndex(Node* entry, int field_index);
   template <typename Dictionary>
-  compiler::Node* EntryToIndex(compiler::Node* entry) {
+  Node* EntryToIndex(Node* entry) {
     return EntryToIndex<Dictionary>(entry, Dictionary::kEntryKeyIndex);
   }
+
+  // Loads the details for the entry with the given key_index.
+  // Returns an untagged int32.
+  template <class ContainerType>
+  Node* LoadDetailsByKeyIndex(Node* container, Node* key_index) {
+    const int kKeyToDetailsOffset =
+        (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
+        kPointerSize;
+    return LoadAndUntagToWord32FixedArrayElement(container, key_index,
+                                                 kKeyToDetailsOffset);
+  }
+
+  // Loads the value for the entry with the given key_index.
+  // Returns a tagged value.
+  template <class ContainerType>
+  Node* LoadValueByKeyIndex(Node* container, Node* key_index) {
+    const int kKeyToValueOffset =
+        (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
+        kPointerSize;
+    return LoadFixedArrayElement(container, key_index, kKeyToValueOffset);
+  }
+
+  // Stores the details for the entry with the given key_index.
+  // |details| must be a Smi.
+  template <class ContainerType>
+  void StoreDetailsByKeyIndex(Node* container, Node* key_index, Node* details) {
+    const int kKeyToDetailsOffset =
+        (ContainerType::kEntryDetailsIndex - ContainerType::kEntryKeyIndex) *
+        kPointerSize;
+    StoreFixedArrayElement(container, key_index, details, SKIP_WRITE_BARRIER,
+                           kKeyToDetailsOffset);
+  }
+
+  // Stores the value for the entry with the given key_index.
+  template <class ContainerType>
+  void StoreValueByKeyIndex(Node* container, Node* key_index, Node* value) {
+    const int kKeyToValueOffset =
+        (ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
+        kPointerSize;
+    StoreFixedArrayElement(container, key_index, value, UPDATE_WRITE_BARRIER,
+                           kKeyToValueOffset);
+  }
+
   // Calculate a valid size for the a hash table.
-  compiler::Node* HashTableComputeCapacity(compiler::Node* at_least_space_for);
+  Node* HashTableComputeCapacity(Node* at_least_space_for);
+
+  template <class Dictionary>
+  Node* GetNumberOfElements(Node* dictionary);
+
+  template <class Dictionary>
+  void SetNumberOfElements(Node* dictionary, Node* num_elements_smi);
+
+  template <class Dictionary>
+  Node* GetNumberOfDeletedElements(Node* dictionary);
+
+  template <class Dictionary>
+  Node* GetCapacity(Node* dictionary);
+
+  template <class Dictionary>
+  Node* GetNextEnumerationIndex(Node* dictionary);
+
+  template <class Dictionary>
+  void SetNextEnumerationIndex(Node* dictionary, Node* next_enum_index_smi);
 
   // Looks up an entry in a NameDictionaryBase successor. If the entry is found
   // control goes to {if_found} and {var_name_index} contains an index of the
   // key field of the entry found. If the key is not found control goes to
   // {if_not_found}.
   static const int kInlinedDictionaryProbes = 4;
+  enum LookupMode { kFindExisting, kFindInsertionIndex };
   template <typename Dictionary>
-  void NameDictionaryLookup(compiler::Node* dictionary,
-                            compiler::Node* unique_name, Label* if_found,
-                            Variable* var_name_index, Label* if_not_found,
-                            int inlined_probes = kInlinedDictionaryProbes);
+  void NameDictionaryLookup(Node* dictionary, Node* unique_name,
+                            Label* if_found, Variable* var_name_index,
+                            Label* if_not_found,
+                            int inlined_probes = kInlinedDictionaryProbes,
+                            LookupMode mode = kFindExisting);
 
-  compiler::Node* ComputeIntegerHash(compiler::Node* key, compiler::Node* seed);
+  Node* ComputeIntegerHash(Node* key, Node* seed);
 
   template <typename Dictionary>
-  void NumberDictionaryLookup(compiler::Node* dictionary,
-                              compiler::Node* intptr_index, Label* if_found,
-                              Variable* var_entry, Label* if_not_found);
+  void NumberDictionaryLookup(Node* dictionary, Node* intptr_index,
+                              Label* if_found, Variable* var_entry,
+                              Label* if_not_found);
+
+  template <class Dictionary>
+  void FindInsertionEntry(Node* dictionary, Node* key, Variable* var_key_index);
+
+  template <class Dictionary>
+  void InsertEntry(Node* dictionary, Node* key, Node* value, Node* index,
+                   Node* enum_index);
+
+  template <class Dictionary>
+  void Add(Node* dictionary, Node* key, Node* value, Label* bailout);
 
   // Tries to check if {object} has own {unique_name} property.
-  void TryHasOwnProperty(compiler::Node* object, compiler::Node* map,
-                         compiler::Node* instance_type,
-                         compiler::Node* unique_name, Label* if_found,
+  void TryHasOwnProperty(Node* object, Node* map, Node* instance_type,
+                         Node* unique_name, Label* if_found,
                          Label* if_not_found, Label* if_bailout);
 
   // Tries to get {object}'s own {unique_name} property value. If the property
   // is an accessor then it also calls a getter. If the property is a double
   // field it re-wraps value in an immutable heap number.
-  void TryGetOwnProperty(compiler::Node* context, compiler::Node* receiver,
-                         compiler::Node* object, compiler::Node* map,
-                         compiler::Node* instance_type,
-                         compiler::Node* unique_name, Label* if_found,
-                         Variable* var_value, Label* if_not_found,
-                         Label* if_bailout);
+  void TryGetOwnProperty(Node* context, Node* receiver, Node* object, Node* map,
+                         Node* instance_type, Node* unique_name,
+                         Label* if_found, Variable* var_value,
+                         Label* if_not_found, Label* if_bailout);
 
-  void LoadPropertyFromFastObject(compiler::Node* object, compiler::Node* map,
-                                  compiler::Node* descriptors,
-                                  compiler::Node* name_index,
-                                  Variable* var_details, Variable* var_value);
+  Node* GetProperty(Node* context, Node* receiver, Handle<Name> name) {
+    return CallStub(CodeFactory::GetProperty(isolate()), context, receiver,
+                    HeapConstant(name));
+  }
 
-  void LoadPropertyFromNameDictionary(compiler::Node* dictionary,
-                                      compiler::Node* entry,
+  void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
+                                  Node* name_index, Variable* var_details,
+                                  Variable* var_value);
+
+  void LoadPropertyFromNameDictionary(Node* dictionary, Node* entry,
                                       Variable* var_details,
                                       Variable* var_value);
 
-  void LoadPropertyFromGlobalDictionary(compiler::Node* dictionary,
-                                        compiler::Node* entry,
+  void LoadPropertyFromGlobalDictionary(Node* dictionary, Node* entry,
                                         Variable* var_details,
                                         Variable* var_value, Label* if_deleted);
 
@@ -833,24 +1033,21 @@
   //
   // Note: this code does not check if the global dictionary points to deleted
   // entry! This has to be done by the caller.
-  void TryLookupProperty(compiler::Node* object, compiler::Node* map,
-                         compiler::Node* instance_type,
-                         compiler::Node* unique_name, Label* if_found_fast,
+  void TryLookupProperty(Node* object, Node* map, Node* instance_type,
+                         Node* unique_name, Label* if_found_fast,
                          Label* if_found_dict, Label* if_found_global,
                          Variable* var_meta_storage, Variable* var_name_index,
                          Label* if_not_found, Label* if_bailout);
 
-  void TryLookupElement(compiler::Node* object, compiler::Node* map,
-                        compiler::Node* instance_type,
-                        compiler::Node* intptr_index, Label* if_found,
+  void TryLookupElement(Node* object, Node* map, Node* instance_type,
+                        Node* intptr_index, Label* if_found,
                         Label* if_not_found, Label* if_bailout);
 
   // This is a type of a lookup in holder generator function. In case of a
-  // property lookup the {key} is guaranteed to be a unique name and in case of
+  // property lookup the {key} is guaranteed to be an unique name and in case of
   // element lookup the key is an Int32 index.
-  typedef std::function<void(compiler::Node* receiver, compiler::Node* holder,
-                             compiler::Node* map, compiler::Node* instance_type,
-                             compiler::Node* key, Label* next_holder,
+  typedef std::function<void(Node* receiver, Node* holder, Node* map,
+                             Node* instance_type, Node* key, Label* next_holder,
                              Label* if_bailout)>
       LookupInHolder;
 
@@ -860,231 +1057,140 @@
   // Upon reaching the end of prototype chain the control goes to {if_end}.
   // If it can't handle the case {receiver}/{key} case then the control goes
   // to {if_bailout}.
-  void TryPrototypeChainLookup(compiler::Node* receiver, compiler::Node* key,
-                               LookupInHolder& lookup_property_in_holder,
-                               LookupInHolder& lookup_element_in_holder,
+  void TryPrototypeChainLookup(Node* receiver, Node* key,
+                               const LookupInHolder& lookup_property_in_holder,
+                               const LookupInHolder& lookup_element_in_holder,
                                Label* if_end, Label* if_bailout);
 
   // Instanceof helpers.
   // ES6 section 7.3.19 OrdinaryHasInstance (C, O)
-  compiler::Node* OrdinaryHasInstance(compiler::Node* context,
-                                      compiler::Node* callable,
-                                      compiler::Node* object);
-
-  // Load/StoreIC helpers.
-  struct LoadICParameters {
-    LoadICParameters(compiler::Node* context, compiler::Node* receiver,
-                     compiler::Node* name, compiler::Node* slot,
-                     compiler::Node* vector)
-        : context(context),
-          receiver(receiver),
-          name(name),
-          slot(slot),
-          vector(vector) {}
-
-    compiler::Node* context;
-    compiler::Node* receiver;
-    compiler::Node* name;
-    compiler::Node* slot;
-    compiler::Node* vector;
-  };
-
-  struct StoreICParameters : public LoadICParameters {
-    StoreICParameters(compiler::Node* context, compiler::Node* receiver,
-                      compiler::Node* name, compiler::Node* value,
-                      compiler::Node* slot, compiler::Node* vector)
-        : LoadICParameters(context, receiver, name, slot, vector),
-          value(value) {}
-    compiler::Node* value;
-  };
+  Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object);
 
   // Load type feedback vector from the stub caller's frame.
-  compiler::Node* LoadTypeFeedbackVectorForStub();
+  Node* LoadFeedbackVectorForStub();
 
   // Update the type feedback vector.
-  void UpdateFeedback(compiler::Node* feedback,
-                      compiler::Node* type_feedback_vector,
-                      compiler::Node* slot_id);
+  void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
 
-  compiler::Node* LoadReceiverMap(compiler::Node* receiver);
-
-  // Checks monomorphic case. Returns {feedback} entry of the vector.
-  compiler::Node* TryMonomorphicCase(compiler::Node* slot,
-                                     compiler::Node* vector,
-                                     compiler::Node* receiver_map,
-                                     Label* if_handler, Variable* var_handler,
-                                     Label* if_miss);
-  void HandlePolymorphicCase(compiler::Node* receiver_map,
-                             compiler::Node* feedback, Label* if_handler,
-                             Variable* var_handler, Label* if_miss,
-                             int unroll_count);
-  void HandleKeyedStorePolymorphicCase(compiler::Node* receiver_map,
-                                       compiler::Node* feedback,
-                                       Label* if_handler, Variable* var_handler,
-                                       Label* if_transition_handler,
-                                       Variable* var_transition_map_cell,
-                                       Label* if_miss);
-
-  compiler::Node* StubCachePrimaryOffset(compiler::Node* name,
-                                         compiler::Node* map);
-
-  compiler::Node* StubCacheSecondaryOffset(compiler::Node* name,
-                                           compiler::Node* seed);
-
-  // This enum is used here as a replacement for StubCache::Table to avoid
-  // including stub cache header.
-  enum StubCacheTable : int;
-
-  void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
-                              compiler::Node* entry_offset,
-                              compiler::Node* name, compiler::Node* map,
-                              Label* if_handler, Variable* var_handler,
-                              Label* if_miss);
-
-  void TryProbeStubCache(StubCache* stub_cache, compiler::Node* receiver,
-                         compiler::Node* name, Label* if_handler,
-                         Variable* var_handler, Label* if_miss);
-
-  // Extends properties backing store by JSObject::kFieldsAdded elements.
-  void ExtendPropertiesBackingStore(compiler::Node* object);
-
-  compiler::Node* PrepareValueForWrite(compiler::Node* value,
-                                       Representation representation,
-                                       Label* bailout);
-
-  void StoreNamedField(compiler::Node* object, FieldIndex index,
-                       Representation representation, compiler::Node* value,
-                       bool transition_to_field);
-
-  void StoreNamedField(compiler::Node* object, compiler::Node* offset,
-                       bool is_inobject, Representation representation,
-                       compiler::Node* value, bool transition_to_field);
+  Node* LoadReceiverMap(Node* receiver);
 
   // Emits keyed sloppy arguments load. Returns either the loaded value.
-  compiler::Node* LoadKeyedSloppyArguments(compiler::Node* receiver,
-                                           compiler::Node* key,
-                                           Label* bailout) {
+  Node* LoadKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) {
     return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout);
   }
 
   // Emits keyed sloppy arguments store.
-  void StoreKeyedSloppyArguments(compiler::Node* receiver, compiler::Node* key,
-                                 compiler::Node* value, Label* bailout) {
+  void StoreKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
+                                 Label* bailout) {
     DCHECK_NOT_NULL(value);
     EmitKeyedSloppyArguments(receiver, key, value, bailout);
   }
 
   // Loads script context from the script context table.
-  compiler::Node* LoadScriptContext(compiler::Node* context, int context_index);
+  Node* LoadScriptContext(Node* context, int context_index);
 
-  compiler::Node* Int32ToUint8Clamped(compiler::Node* int32_value);
-  compiler::Node* Float64ToUint8Clamped(compiler::Node* float64_value);
+  Node* Int32ToUint8Clamped(Node* int32_value);
+  Node* Float64ToUint8Clamped(Node* float64_value);
 
-  compiler::Node* PrepareValueForWriteToTypedArray(compiler::Node* key,
-                                                   ElementsKind elements_kind,
-                                                   Label* bailout);
+  Node* PrepareValueForWriteToTypedArray(Node* key, ElementsKind elements_kind,
+                                         Label* bailout);
 
   // Store value to an elements array with given elements kind.
-  void StoreElement(compiler::Node* elements, ElementsKind kind,
-                    compiler::Node* index, compiler::Node* value,
+  void StoreElement(Node* elements, ElementsKind kind, Node* index, Node* value,
                     ParameterMode mode);
 
-  void EmitElementStore(compiler::Node* object, compiler::Node* key,
-                        compiler::Node* value, bool is_jsarray,
+  void EmitElementStore(Node* object, Node* key, Node* value, bool is_jsarray,
                         ElementsKind elements_kind,
                         KeyedAccessStoreMode store_mode, Label* bailout);
 
-  compiler::Node* CheckForCapacityGrow(compiler::Node* object,
-                                       compiler::Node* elements,
-                                       ElementsKind kind,
-                                       compiler::Node* length,
-                                       compiler::Node* key, ParameterMode mode,
-                                       bool is_js_array, Label* bailout);
+  Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind,
+                             Node* length, Node* key, ParameterMode mode,
+                             bool is_js_array, Label* bailout);
 
-  compiler::Node* CopyElementsOnWrite(compiler::Node* object,
-                                      compiler::Node* elements,
-                                      ElementsKind kind, compiler::Node* length,
-                                      ParameterMode mode, Label* bailout);
+  Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind,
+                            Node* length, ParameterMode mode, Label* bailout);
 
-  void LoadIC(const LoadICParameters* p);
-  void LoadICProtoArray(const LoadICParameters* p, compiler::Node* handler);
-  void LoadGlobalIC(const LoadICParameters* p);
-  void KeyedLoadIC(const LoadICParameters* p);
-  void KeyedLoadICGeneric(const LoadICParameters* p);
-  void StoreIC(const StoreICParameters* p);
-  void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
+  void TransitionElementsKind(Node* object, Node* map, ElementsKind from_kind,
+                              ElementsKind to_kind, bool is_jsarray,
+                              Label* bailout);
 
-  void TransitionElementsKind(compiler::Node* object, compiler::Node* map,
-                              ElementsKind from_kind, ElementsKind to_kind,
-                              bool is_jsarray, Label* bailout);
+  void TrapAllocationMemento(Node* object, Label* memento_found);
 
-  void TrapAllocationMemento(compiler::Node* object, Label* memento_found);
-
-  compiler::Node* PageFromAddress(compiler::Node* address);
+  Node* PageFromAddress(Node* address);
 
   // Get the enumerable length from |map| and return the result as a Smi.
-  compiler::Node* EnumLength(compiler::Node* map);
+  Node* EnumLength(Node* map);
 
   // Check the cache validity for |receiver|. Branch to |use_cache| if
   // the cache is valid, otherwise branch to |use_runtime|.
-  void CheckEnumCache(compiler::Node* receiver,
-                      CodeStubAssembler::Label* use_cache,
+  void CheckEnumCache(Node* receiver, CodeStubAssembler::Label* use_cache,
                       CodeStubAssembler::Label* use_runtime);
 
   // Create a new weak cell with a specified value and install it into a
   // feedback vector.
-  compiler::Node* CreateWeakCellInFeedbackVector(
-      compiler::Node* feedback_vector, compiler::Node* slot,
-      compiler::Node* value);
+  Node* CreateWeakCellInFeedbackVector(Node* feedback_vector, Node* slot,
+                                       Node* value);
 
   // Create a new AllocationSite and install it into a feedback vector.
-  compiler::Node* CreateAllocationSiteInFeedbackVector(
-      compiler::Node* feedback_vector, compiler::Node* slot);
+  Node* CreateAllocationSiteInFeedbackVector(Node* feedback_vector, Node* slot);
 
   enum class IndexAdvanceMode { kPre, kPost };
 
-  void BuildFastLoop(
-      const VariableList& var_list, MachineRepresentation index_rep,
-      compiler::Node* start_index, compiler::Node* end_index,
-      std::function<void(CodeStubAssembler* assembler, compiler::Node* index)>
-          body,
-      int increment, IndexAdvanceMode mode = IndexAdvanceMode::kPre);
+  typedef std::function<void(Node* index)> FastLoopBody;
 
-  void BuildFastLoop(
-      MachineRepresentation index_rep, compiler::Node* start_index,
-      compiler::Node* end_index,
-      std::function<void(CodeStubAssembler* assembler, compiler::Node* index)>
-          body,
-      int increment, IndexAdvanceMode mode = IndexAdvanceMode::kPre) {
-    BuildFastLoop(VariableList(0, zone()), index_rep, start_index, end_index,
-                  body, increment, mode);
+  Node* BuildFastLoop(const VariableList& var_list, Node* start_index,
+                      Node* end_index, const FastLoopBody& body, int increment,
+                      ParameterMode parameter_mode,
+                      IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
+
+  Node* BuildFastLoop(Node* start_index, Node* end_index,
+                      const FastLoopBody& body, int increment,
+                      ParameterMode parameter_mode,
+                      IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
+    return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
+                         increment, parameter_mode, advance_mode);
   }
 
   enum class ForEachDirection { kForward, kReverse };
 
+  typedef std::function<void(Node* fixed_array, Node* offset)>
+      FastFixedArrayForEachBody;
+
   void BuildFastFixedArrayForEach(
-      compiler::Node* fixed_array, ElementsKind kind,
-      compiler::Node* first_element_inclusive,
-      compiler::Node* last_element_exclusive,
-      std::function<void(CodeStubAssembler* assembler,
-                         compiler::Node* fixed_array, compiler::Node* offset)>
-          body,
+      const CodeStubAssembler::VariableList& vars, Node* fixed_array,
+      ElementsKind kind, Node* first_element_inclusive,
+      Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
       ParameterMode mode = INTPTR_PARAMETERS,
       ForEachDirection direction = ForEachDirection::kReverse);
 
-  compiler::Node* GetArrayAllocationSize(compiler::Node* element_count,
-                                         ElementsKind kind, ParameterMode mode,
-                                         int header_size) {
+  void BuildFastFixedArrayForEach(
+      Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
+      Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
+      ParameterMode mode = INTPTR_PARAMETERS,
+      ForEachDirection direction = ForEachDirection::kReverse) {
+    CodeStubAssembler::VariableList list(0, zone());
+    BuildFastFixedArrayForEach(list, fixed_array, kind, first_element_inclusive,
+                               last_element_exclusive, body, mode, direction);
+  }
+
+  Node* GetArrayAllocationSize(Node* element_count, ElementsKind kind,
+                               ParameterMode mode, int header_size) {
     return ElementOffsetFromIndex(element_count, kind, mode, header_size);
   }
 
-  compiler::Node* GetFixedArrayAllocationSize(compiler::Node* element_count,
-                                              ElementsKind kind,
-                                              ParameterMode mode) {
+  Node* GetFixedArrayAllocationSize(Node* element_count, ElementsKind kind,
+                                    ParameterMode mode) {
     return GetArrayAllocationSize(element_count, kind, mode,
                                   FixedArray::kHeaderSize);
   }
 
+  void GotoIfFixedArraySizeDoesntFitInNewSpace(Node* element_count,
+                                               Label* doesnt_fit, int base_size,
+                                               ParameterMode mode);
+
+  void InitializeFieldsWithRoot(Node* object, Node* start_offset,
+                                Node* end_offset, Heap::RootListIndex root);
+
   enum RelationalComparisonMode {
     kLessThan,
     kLessThanOrEqual,
@@ -1092,222 +1198,219 @@
     kGreaterThanOrEqual
   };
 
-  compiler::Node* RelationalComparison(RelationalComparisonMode mode,
-                                       compiler::Node* lhs, compiler::Node* rhs,
-                                       compiler::Node* context);
+  Node* RelationalComparison(RelationalComparisonMode mode, Node* lhs,
+                             Node* rhs, Node* context);
 
   void BranchIfNumericRelationalComparison(RelationalComparisonMode mode,
-                                           compiler::Node* lhs,
-                                           compiler::Node* rhs, Label* if_true,
+                                           Node* lhs, Node* rhs, Label* if_true,
                                            Label* if_false);
 
-  void GotoUnlessNumberLessThan(compiler::Node* lhs, compiler::Node* rhs,
-                                Label* if_false);
+  void GotoUnlessNumberLessThan(Node* lhs, Node* rhs, Label* if_false);
 
   enum ResultMode { kDontNegateResult, kNegateResult };
 
-  compiler::Node* Equal(ResultMode mode, compiler::Node* lhs,
-                        compiler::Node* rhs, compiler::Node* context);
+  Node* Equal(ResultMode mode, Node* lhs, Node* rhs, Node* context);
 
-  compiler::Node* StrictEqual(ResultMode mode, compiler::Node* lhs,
-                              compiler::Node* rhs, compiler::Node* context);
+  Node* StrictEqual(ResultMode mode, Node* lhs, Node* rhs, Node* context);
 
   // ECMA#sec-samevalue
   // Similar to StrictEqual except that NaNs are treated as equal and minus zero
   // differs from positive zero.
   // Unlike Equal and StrictEqual, returns a value suitable for use in Branch
   // instructions, e.g. Branch(SameValue(...), &label).
-  compiler::Node* SameValue(compiler::Node* lhs, compiler::Node* rhs,
-                            compiler::Node* context);
+  Node* SameValue(Node* lhs, Node* rhs, Node* context);
 
-  compiler::Node* HasProperty(
-      compiler::Node* object, compiler::Node* key, compiler::Node* context,
+  Node* HasProperty(
+      Node* object, Node* key, Node* context,
       Runtime::FunctionId fallback_runtime_function_id = Runtime::kHasProperty);
-  compiler::Node* ForInFilter(compiler::Node* key, compiler::Node* object,
-                              compiler::Node* context);
+  Node* ForInFilter(Node* key, Node* object, Node* context);
 
-  compiler::Node* Typeof(compiler::Node* value, compiler::Node* context);
+  Node* ClassOf(Node* object);
 
-  compiler::Node* InstanceOf(compiler::Node* object, compiler::Node* callable,
-                             compiler::Node* context);
+  Node* Typeof(Node* value, Node* context);
+
+  Node* GetSuperConstructor(Node* value, Node* context);
+
+  Node* InstanceOf(Node* object, Node* callable, Node* context);
+
+  // Debug helpers
+  Node* IsDebugActive();
 
   // TypedArray/ArrayBuffer helpers
-  compiler::Node* IsDetachedBuffer(compiler::Node* buffer);
+  Node* IsDetachedBuffer(Node* buffer);
 
-  compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
-                                         ElementsKind kind, ParameterMode mode,
-                                         int base_size = 0);
+  Node* ElementOffsetFromIndex(Node* index, ElementsKind kind,
+                               ParameterMode mode, int base_size = 0);
+
+  Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info,
+                                          Node* context);
+
+  // Promise helpers
+  Node* IsPromiseHookEnabledOrDebugIsActive();
+
+  Node* AllocatePromiseReactionJobInfo(Node* value, Node* tasks,
+                                       Node* deferred_promise,
+                                       Node* deferred_on_resolve,
+                                       Node* deferred_on_reject, Node* context);
+
+  // Helpers for StackFrame markers.
+  Node* MarkerIsFrameType(Node* marker_or_function,
+                          StackFrame::Type frame_type);
+  Node* MarkerIsNotFrameType(Node* marker_or_function,
+                             StackFrame::Type frame_type);
+
+  // Support for printf-style debugging
+  void Print(const char* s);
+  void Print(const char* prefix, Node* tagged_value);
+  inline void Print(Node* tagged_value) { return Print(nullptr, tagged_value); }
+
+  template <class... TArgs>
+  Node* MakeTypeError(MessageTemplate::Template message, Node* context,
+                      TArgs... args) {
+    STATIC_ASSERT(sizeof...(TArgs) <= 3);
+    Node* const make_type_error = LoadContextElement(
+        LoadNativeContext(context), Context::MAKE_TYPE_ERROR_INDEX);
+    return CallJS(CodeFactory::Call(isolate()), context, make_type_error,
+                  UndefinedConstant(), SmiConstant(message), args...);
+  }
 
  protected:
-  void HandleStoreICHandlerCase(const StoreICParameters* p,
-                                compiler::Node* handler, Label* miss);
+  void DescriptorLookup(Node* unique_name, Node* descriptors, Node* bitfield3,
+                        Label* if_found, Variable* var_name_index,
+                        Label* if_not_found);
+  void DescriptorLookupLinear(Node* unique_name, Node* descriptors, Node* nof,
+                              Label* if_found, Variable* var_name_index,
+                              Label* if_not_found);
+  void DescriptorLookupBinary(Node* unique_name, Node* descriptors, Node* nof,
+                              Label* if_found, Variable* var_name_index,
+                              Label* if_not_found);
+
+  Node* CallGetterIfAccessor(Node* value, Node* details, Node* context,
+                             Node* receiver, Label* if_bailout);
+
+  Node* TryToIntptr(Node* key, Label* miss);
+
+  void BranchIfPrototypesHaveNoElements(Node* receiver_map,
+                                        Label* definitely_no_elements,
+                                        Label* possibly_elements);
 
  private:
   friend class CodeStubArguments;
 
-  enum ElementSupport { kOnlyProperties, kSupportElements };
+  void HandleBreakOnNode();
 
-  void DescriptorLookupLinear(compiler::Node* unique_name,
-                              compiler::Node* descriptors, compiler::Node* nof,
-                              Label* if_found, Variable* var_name_index,
-                              Label* if_not_found);
-  compiler::Node* CallGetterIfAccessor(compiler::Node* value,
-                                       compiler::Node* details,
-                                       compiler::Node* context,
-                                       compiler::Node* receiver,
-                                       Label* if_bailout);
-
-  void HandleLoadICHandlerCase(
-      const LoadICParameters* p, compiler::Node* handler, Label* miss,
-      ElementSupport support_elements = kOnlyProperties);
-
-  void HandleLoadICSmiHandlerCase(const LoadICParameters* p,
-                                  compiler::Node* holder,
-                                  compiler::Node* smi_handler, Label* miss,
-                                  ElementSupport support_elements);
-
-  void HandleLoadICProtoHandler(const LoadICParameters* p,
-                                compiler::Node* handler, Variable* var_holder,
-                                Variable* var_smi_handler,
-                                Label* if_smi_handler, Label* miss);
-
-  compiler::Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p,
-                                            compiler::Node* handler,
-                                            compiler::Node* handler_length,
-                                            compiler::Node* handler_flags,
-                                            Label* miss);
-
-  void CheckPrototype(compiler::Node* prototype_cell, compiler::Node* name,
-                      Label* miss);
-
-  void NameDictionaryNegativeLookup(compiler::Node* object,
-                                    compiler::Node* name, Label* miss);
-
-  // If |transition| is nullptr then the normal field store is generated or
-  // transitioning store otherwise.
-  void HandleStoreFieldAndReturn(compiler::Node* handler_word,
-                                 compiler::Node* holder,
-                                 Representation representation,
-                                 compiler::Node* value,
-                                 compiler::Node* transition, Label* miss);
-
-  // If |transition| is nullptr then the normal field store is generated or
-  // transitioning store otherwise.
-  void HandleStoreICSmiHandlerCase(compiler::Node* handler_word,
-                                   compiler::Node* holder,
-                                   compiler::Node* value,
-                                   compiler::Node* transition, Label* miss);
-
-  void HandleStoreICProtoHandler(const StoreICParameters* p,
-                                 compiler::Node* handler, Label* miss);
-
-  compiler::Node* TryToIntptr(compiler::Node* key, Label* miss);
-  void EmitFastElementsBoundsCheck(compiler::Node* object,
-                                   compiler::Node* elements,
-                                   compiler::Node* intptr_index,
-                                   compiler::Node* is_jsarray_condition,
-                                   Label* miss);
-  void EmitElementLoad(compiler::Node* object, compiler::Node* elements,
-                       compiler::Node* elements_kind, compiler::Node* key,
-                       compiler::Node* is_jsarray_condition, Label* if_hole,
-                       Label* rebox_double, Variable* var_double_value,
-                       Label* unimplemented_elements_kind, Label* out_of_bounds,
-                       Label* miss);
-  void BranchIfPrototypesHaveNoElements(compiler::Node* receiver_map,
-                                        Label* definitely_no_elements,
-                                        Label* possibly_elements);
-
-  compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes,
-                                     AllocationFlags flags,
-                                     compiler::Node* top_address,
-                                     compiler::Node* limit_address);
-  compiler::Node* AllocateRawUnaligned(compiler::Node* size_in_bytes,
-                                       AllocationFlags flags,
-                                       compiler::Node* top_adddress,
-                                       compiler::Node* limit_address);
+  Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
+                           Node* top_address, Node* limit_address);
+  Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
+                             Node* top_adddress, Node* limit_address);
   // Allocate and return a JSArray of given total size in bytes with header
   // fields initialized.
-  compiler::Node* AllocateUninitializedJSArray(ElementsKind kind,
-                                               compiler::Node* array_map,
-                                               compiler::Node* length,
-                                               compiler::Node* allocation_site,
-                                               compiler::Node* size_in_bytes);
+  Node* AllocateUninitializedJSArray(ElementsKind kind, Node* array_map,
+                                     Node* length, Node* allocation_site,
+                                     Node* size_in_bytes);
 
-  compiler::Node* SmiShiftBitsConstant();
+  Node* SmiShiftBitsConstant();
 
   // Emits keyed sloppy arguments load if the |value| is nullptr or store
   // otherwise. Returns either the loaded value or |value|.
-  compiler::Node* EmitKeyedSloppyArguments(compiler::Node* receiver,
-                                           compiler::Node* key,
-                                           compiler::Node* value,
-                                           Label* bailout);
+  Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
+                                 Label* bailout);
 
-  compiler::Node* AllocateSlicedString(Heap::RootListIndex map_root_index,
-                                       compiler::Node* length,
-                                       compiler::Node* parent,
-                                       compiler::Node* offset);
+  Node* AllocateSlicedString(Heap::RootListIndex map_root_index, Node* length,
+                             Node* parent, Node* offset);
 
-  compiler::Node* AllocateConsString(Heap::RootListIndex map_root_index,
-                                     compiler::Node* length,
-                                     compiler::Node* first,
-                                     compiler::Node* second,
-                                     AllocationFlags flags);
+  Node* AllocateConsString(Heap::RootListIndex map_root_index, Node* length,
+                           Node* first, Node* second, AllocationFlags flags);
+
+  // Implements DescriptorArray::number_of_entries.
+  // Returns an untagged int32.
+  Node* DescriptorArrayNumberOfEntries(Node* descriptors);
+  // Implements DescriptorArray::ToKeyIndex.
+  // Returns an untagged IntPtr.
+  Node* DescriptorArrayToKeyIndex(Node* descriptor_number);
+  // Implements DescriptorArray::GetSortedKeyIndex.
+  // Returns an untagged int32.
+  Node* DescriptorArrayGetSortedKeyIndex(Node* descriptors,
+                                         Node* descriptor_number);
+  // Implements DescriptorArray::GetKey.
+  Node* DescriptorArrayGetKey(Node* descriptors, Node* descriptor_number);
 
   static const int kElementLoopUnrollThreshold = 8;
 };
 
 class CodeStubArguments {
  public:
-  // |argc| specifies the number of arguments passed to the builtin excluding
-  // the receiver.
-  CodeStubArguments(CodeStubAssembler* assembler, compiler::Node* argc,
-                    CodeStubAssembler::ParameterMode mode =
-                        CodeStubAssembler::INTPTR_PARAMETERS);
+  typedef compiler::Node Node;
 
-  compiler::Node* GetReceiver();
+  // |argc| is an uint32 value which specifies the number of arguments passed
+  // to the builtin excluding the receiver.
+  CodeStubArguments(CodeStubAssembler* assembler, Node* argc)
+      : CodeStubArguments(assembler, argc, nullptr,
+                          CodeStubAssembler::INTPTR_PARAMETERS) {}
+  CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp,
+                    CodeStubAssembler::ParameterMode param_mode);
+
+  Node* GetReceiver() const;
+
+  Node* AtIndexPtr(Node* index, CodeStubAssembler::ParameterMode mode =
+                                    CodeStubAssembler::INTPTR_PARAMETERS) const;
 
   // |index| is zero-based and does not include the receiver
-  compiler::Node* AtIndex(compiler::Node* index,
-                          CodeStubAssembler::ParameterMode mode =
-                              CodeStubAssembler::INTPTR_PARAMETERS);
+  Node* AtIndex(Node* index, CodeStubAssembler::ParameterMode mode =
+                                 CodeStubAssembler::INTPTR_PARAMETERS) const;
 
-  compiler::Node* AtIndex(int index);
+  Node* AtIndex(int index) const;
 
-  typedef std::function<void(CodeStubAssembler* assembler, compiler::Node* arg)>
-      ForEachBodyFunction;
+  Node* GetLength() const { return argc_; }
+
+  typedef std::function<void(Node* arg)> ForEachBodyFunction;
 
   // Iteration doesn't include the receiver. |first| and |last| are zero-based.
-  void ForEach(ForEachBodyFunction body, compiler::Node* first = nullptr,
-               compiler::Node* last = nullptr,
-               CodeStubAssembler::ParameterMode mode =
-                   CodeStubAssembler::INTPTR_PARAMETERS) {
+  void ForEach(const ForEachBodyFunction& body, Node* first = nullptr,
+               Node* last = nullptr, CodeStubAssembler::ParameterMode mode =
+                                         CodeStubAssembler::INTPTR_PARAMETERS) {
     CodeStubAssembler::VariableList list(0, assembler_->zone());
     ForEach(list, body, first, last);
   }
 
   // Iteration doesn't include the receiver. |first| and |last| are zero-based.
   void ForEach(const CodeStubAssembler::VariableList& vars,
-               ForEachBodyFunction body, compiler::Node* first = nullptr,
-               compiler::Node* last = nullptr,
-               CodeStubAssembler::ParameterMode mode =
-                   CodeStubAssembler::INTPTR_PARAMETERS);
+               const ForEachBodyFunction& body, Node* first = nullptr,
+               Node* last = nullptr, CodeStubAssembler::ParameterMode mode =
+                                         CodeStubAssembler::INTPTR_PARAMETERS);
 
-  void PopAndReturn(compiler::Node* value);
+  void PopAndReturn(Node* value);
 
  private:
-  compiler::Node* GetArguments();
+  Node* GetArguments();
 
   CodeStubAssembler* assembler_;
-  compiler::Node* argc_;
-  compiler::Node* arguments_;
-  compiler::Node* fp_;
+  CodeStubAssembler::ParameterMode argc_mode_;
+  Node* argc_;
+  Node* arguments_;
+  Node* fp_;
 };
 
 #ifdef DEBUG
 #define CSA_ASSERT(csa, x) \
   (csa)->Assert([&] { return (x); }, #x, __FILE__, __LINE__)
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected)               \
+  (csa)->Assert(                                                   \
+      [&] {                                                        \
+        const CodeAssemblerState* state = (csa)->state();          \
+        /* See Linkage::GetJSCallDescriptor(). */                  \
+        int argc_index = state->parameter_count() - 2;             \
+        compiler::Node* const argc = (csa)->Parameter(argc_index); \
+        return (csa)->Op(argc, (csa)->Int32Constant(expected));    \
+      },                                                           \
+      "argc " #op " " #expected, __FILE__, __LINE__)
+
+#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
+  CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
+
 #else
 #define CSA_ASSERT(csa, x) ((void)0)
+#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
 #endif
 
 #ifdef ENABLE_SLOW_DCHECKS
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 790f687..4c10e20 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -8,10 +8,12 @@
 
 #include "src/bailout-reason.h"
 #include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
 #include "src/crankshaft/hydrogen.h"
 #include "src/crankshaft/lithium.h"
 #include "src/field-index.h"
 #include "src/ic/ic.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -77,17 +79,6 @@
   HContext* context() { return context_; }
   Isolate* isolate() { return info_->isolate(); }
 
-  HLoadNamedField* BuildLoadNamedField(HValue* object, FieldIndex index);
-  void BuildStoreNamedField(HValue* object, HValue* value, FieldIndex index,
-                            Representation representation,
-                            bool transition_to_field);
-
-  HValue* BuildPushElement(HValue* object, HValue* argc,
-                           HValue* argument_elements, ElementsKind kind);
-
-  HValue* BuildToString(HValue* input, bool convert);
-  HValue* BuildToPrimitive(HValue* input, HValue* input_map);
-
  private:
   std::unique_ptr<HParameter* []> parameters_;
   HValue* arguments_length_;
@@ -251,8 +242,9 @@
   const char* name = CodeStub::MajorName(MajorKey());
   Zone zone(isolate()->allocator(), ZONE_NAME);
   CallInterfaceDescriptor interface_descriptor(GetCallInterfaceDescriptor());
-  CodeStubAssembler assembler(isolate(), &zone, interface_descriptor,
-                              GetCodeFlags(), name);
+  compiler::CodeAssemblerState state(isolate(), &zone, interface_descriptor,
+                                     GetCodeFlags(), name);
+  CodeStubAssembler assembler(&state);
   int total_params = interface_descriptor.GetStackParameterCount() +
                      interface_descriptor.GetRegisterParameterCount();
   switch (total_params) {
@@ -284,7 +276,7 @@
       UNIMPLEMENTED();
       break;
   }
-  return assembler.GenerateCode();
+  return compiler::CodeAssembler::GenerateCode(&state);
 }
 
 template <class Stub>
@@ -327,502 +319,6 @@
   return code;
 }
 
-
-HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
-                                                   HValue* argument_elements,
-                                                   ElementsKind kind) {
-  // Precheck whether all elements fit into the array.
-  if (!IsFastObjectElementsKind(kind)) {
-    LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
-    HValue* start = graph()->GetConstant0();
-    HValue* key = builder.BeginBody(start, argc, Token::LT);
-    {
-      HInstruction* argument =
-          Add<HAccessArgumentsAt>(argument_elements, argc, key);
-      IfBuilder can_store(this);
-      can_store.IfNot<HIsSmiAndBranch>(argument);
-      if (IsFastDoubleElementsKind(kind)) {
-        can_store.And();
-        can_store.IfNot<HCompareMap>(argument,
-                                     isolate()->factory()->heap_number_map());
-      }
-      can_store.ThenDeopt(DeoptimizeReason::kFastPathFailed);
-      can_store.End();
-    }
-    builder.EndBody();
-  }
-
-  HValue* length = Add<HLoadNamedField>(object, nullptr,
-                                        HObjectAccess::ForArrayLength(kind));
-  HValue* new_length = AddUncasted<HAdd>(length, argc);
-  HValue* max_key = AddUncasted<HSub>(new_length, graph()->GetConstant1());
-
-  HValue* elements = Add<HLoadNamedField>(object, nullptr,
-                                          HObjectAccess::ForElementsPointer());
-  elements = BuildCheckForCapacityGrow(object, elements, kind, length, max_key,
-                                       true, STORE);
-
-  LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
-  HValue* start = graph()->GetConstant0();
-  HValue* key = builder.BeginBody(start, argc, Token::LT);
-  {
-    HValue* argument = Add<HAccessArgumentsAt>(argument_elements, argc, key);
-    HValue* index = AddUncasted<HAdd>(key, length);
-    AddElementAccess(elements, index, argument, object, nullptr, kind, STORE);
-  }
-  builder.EndBody();
-  return new_length;
-}
-
-template <>
-HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
-  // TODO(verwaest): Fix deoptimizer messages.
-  HValue* argc = GetArgumentsLength();
-
-  HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
-  HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
-                                                 graph()->GetConstantMinus1());
-  BuildCheckHeapObject(object);
-  HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
-  Add<HCheckInstanceType>(object, HCheckInstanceType::IS_JS_ARRAY);
-
-  // Disallow pushing onto prototypes. It might be the JSArray prototype.
-  // Disallow pushing onto non-extensible objects.
-  {
-    HValue* bit_field2 =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
-    HValue* mask =
-        Add<HConstant>(static_cast<int>(Map::IsPrototypeMapBits::kMask) |
-                       (1 << Map::kIsExtensible));
-    HValue* bits = AddUncasted<HBitwise>(Token::BIT_AND, bit_field2, mask);
-    IfBuilder check(this);
-    check.If<HCompareNumericAndBranch>(
-        bits, Add<HConstant>(1 << Map::kIsExtensible), Token::NE);
-    check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
-    check.End();
-  }
-
-  // Disallow pushing onto arrays in dictionary named property mode. We need to
-  // figure out whether the length property is still writable.
-  {
-    HValue* bit_field3 =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
-    HValue* mask = Add<HConstant>(static_cast<int>(Map::DictionaryMap::kMask));
-    HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
-    IfBuilder check(this);
-    check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
-    check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
-    check.End();
-  }
-
-  // Check whether the length property is writable. The length property is the
-  // only default named property on arrays. It's nonconfigurable, hence is
-  // guaranteed to stay the first property.
-  {
-    HValue* descriptors =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapDescriptors());
-    HValue* details = Add<HLoadKeyed>(
-        descriptors, Add<HConstant>(DescriptorArray::ToDetailsIndex(0)),
-        nullptr, nullptr, FAST_SMI_ELEMENTS);
-    HValue* mask =
-        Add<HConstant>(READ_ONLY << PropertyDetails::AttributesField::kShift);
-    HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, details, mask);
-    IfBuilder readonly(this);
-    readonly.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
-    readonly.ThenDeopt(DeoptimizeReason::kFastPathFailed);
-    readonly.End();
-  }
-
-  HValue* null = Add<HLoadRoot>(Heap::kNullValueRootIndex);
-  HValue* empty = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
-  environment()->Push(map);
-  LoopBuilder check_prototypes(this);
-  check_prototypes.BeginBody(1);
-  {
-    HValue* parent_map = environment()->Pop();
-    HValue* prototype = Add<HLoadNamedField>(parent_map, nullptr,
-                                             HObjectAccess::ForPrototype());
-
-    IfBuilder is_null(this);
-    is_null.If<HCompareObjectEqAndBranch>(prototype, null);
-    is_null.Then();
-    check_prototypes.Break();
-    is_null.End();
-
-    HValue* prototype_map =
-        Add<HLoadNamedField>(prototype, nullptr, HObjectAccess::ForMap());
-    HValue* instance_type = Add<HLoadNamedField>(
-        prototype_map, nullptr, HObjectAccess::ForMapInstanceType());
-    IfBuilder check_instance_type(this);
-    check_instance_type.If<HCompareNumericAndBranch>(
-        instance_type, Add<HConstant>(LAST_CUSTOM_ELEMENTS_RECEIVER),
-        Token::LTE);
-    check_instance_type.ThenDeopt(DeoptimizeReason::kFastPathFailed);
-    check_instance_type.End();
-
-    HValue* elements = Add<HLoadNamedField>(
-        prototype, nullptr, HObjectAccess::ForElementsPointer());
-    IfBuilder no_elements(this);
-    no_elements.IfNot<HCompareObjectEqAndBranch>(elements, empty);
-    no_elements.ThenDeopt(DeoptimizeReason::kFastPathFailed);
-    no_elements.End();
-
-    environment()->Push(prototype_map);
-  }
-  check_prototypes.EndBody();
-
-  HValue* bit_field2 =
-      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
-  HValue* kind = BuildDecodeField<Map::ElementsKindBits>(bit_field2);
-
-  // Below we only check the upper bound of the relevant ranges to include both
-  // holey and non-holey versions. We check them in order smi, object, double
-  // since smi < object < double.
-  STATIC_ASSERT(FAST_SMI_ELEMENTS < FAST_HOLEY_SMI_ELEMENTS);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS < FAST_HOLEY_ELEMENTS);
-  STATIC_ASSERT(FAST_ELEMENTS < FAST_HOLEY_ELEMENTS);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
-  STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
-  IfBuilder has_smi_elements(this);
-  has_smi_elements.If<HCompareNumericAndBranch>(
-      kind, Add<HConstant>(FAST_HOLEY_SMI_ELEMENTS), Token::LTE);
-  has_smi_elements.Then();
-  {
-    HValue* new_length = BuildPushElement(object, argc, argument_elements,
-                                          FAST_HOLEY_SMI_ELEMENTS);
-    environment()->Push(new_length);
-  }
-  has_smi_elements.Else();
-  {
-    IfBuilder has_object_elements(this);
-    has_object_elements.If<HCompareNumericAndBranch>(
-        kind, Add<HConstant>(FAST_HOLEY_ELEMENTS), Token::LTE);
-    has_object_elements.Then();
-    {
-      HValue* new_length = BuildPushElement(object, argc, argument_elements,
-                                            FAST_HOLEY_ELEMENTS);
-      environment()->Push(new_length);
-    }
-    has_object_elements.Else();
-    {
-      IfBuilder has_double_elements(this);
-      has_double_elements.If<HCompareNumericAndBranch>(
-          kind, Add<HConstant>(FAST_HOLEY_DOUBLE_ELEMENTS), Token::LTE);
-      has_double_elements.Then();
-      {
-        HValue* new_length = BuildPushElement(object, argc, argument_elements,
-                                              FAST_HOLEY_DOUBLE_ELEMENTS);
-        environment()->Push(new_length);
-      }
-      has_double_elements.ElseDeopt(DeoptimizeReason::kFastPathFailed);
-      has_double_elements.End();
-    }
-    has_object_elements.End();
-  }
-  has_smi_elements.End();
-
-  return environment()->Pop();
-}
-
-Handle<Code> FastArrayPushStub::GenerateCode() { return DoGenerateCode(this); }
-
-template <>
-HValue* CodeStubGraphBuilder<FastFunctionBindStub>::BuildCodeStub() {
-  // TODO(verwaest): Fix deoptimizer messages.
-  HValue* argc = GetArgumentsLength();
-  HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
-  HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
-                                                 graph()->GetConstantMinus1());
-  BuildCheckHeapObject(object);
-  HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
-  Add<HCheckInstanceType>(object, HCheckInstanceType::IS_JS_FUNCTION);
-
-  // Disallow binding of slow-mode functions. We need to figure out whether the
-  // length and name property are in the original state.
-  {
-    HValue* bit_field3 =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
-    HValue* mask = Add<HConstant>(static_cast<int>(Map::DictionaryMap::kMask));
-    HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
-    IfBuilder check(this);
-    check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
-    check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
-    check.End();
-  }
-
-  // Check whether the length and name properties are still present as
-  // AccessorInfo objects. In that case, their value can be recomputed even if
-  // the actual value on the object changes.
-  {
-    HValue* descriptors =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapDescriptors());
-
-    HValue* descriptors_length = Add<HLoadNamedField>(
-        descriptors, nullptr, HObjectAccess::ForFixedArrayLength());
-    IfBuilder range(this);
-    range.If<HCompareNumericAndBranch>(descriptors_length,
-                                       graph()->GetConstant1(), Token::LTE);
-    range.ThenDeopt(DeoptimizeReason::kFastPathFailed);
-    range.End();
-
-    // Verify .length.
-    const int length_index = JSFunction::kLengthDescriptorIndex;
-    HValue* maybe_length = Add<HLoadKeyed>(
-        descriptors, Add<HConstant>(DescriptorArray::ToKeyIndex(length_index)),
-        nullptr, nullptr, FAST_ELEMENTS);
-    Unique<Name> length_string = Unique<Name>::CreateUninitialized(
-        isolate()->factory()->length_string());
-    Add<HCheckValue>(maybe_length, length_string, false);
-
-    HValue* maybe_length_accessor = Add<HLoadKeyed>(
-        descriptors,
-        Add<HConstant>(DescriptorArray::ToValueIndex(length_index)), nullptr,
-        nullptr, FAST_ELEMENTS);
-    BuildCheckHeapObject(maybe_length_accessor);
-    Add<HCheckMaps>(maybe_length_accessor,
-                    isolate()->factory()->accessor_info_map());
-
-    // Verify .name.
-    const int name_index = JSFunction::kNameDescriptorIndex;
-    HValue* maybe_name = Add<HLoadKeyed>(
-        descriptors, Add<HConstant>(DescriptorArray::ToKeyIndex(name_index)),
-        nullptr, nullptr, FAST_ELEMENTS);
-    Unique<Name> name_string =
-        Unique<Name>::CreateUninitialized(isolate()->factory()->name_string());
-    Add<HCheckValue>(maybe_name, name_string, false);
-
-    HValue* maybe_name_accessor = Add<HLoadKeyed>(
-        descriptors, Add<HConstant>(DescriptorArray::ToValueIndex(name_index)),
-        nullptr, nullptr, FAST_ELEMENTS);
-    BuildCheckHeapObject(maybe_name_accessor);
-    Add<HCheckMaps>(maybe_name_accessor,
-                    isolate()->factory()->accessor_info_map());
-  }
-
-  // Choose the right bound function map based on whether the target is
-  // constructable.
-  {
-    HValue* bit_field =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
-    HValue* mask = Add<HConstant>(static_cast<int>(1 << Map::kIsConstructor));
-    HValue* bits = AddUncasted<HBitwise>(Token::BIT_AND, bit_field, mask);
-
-    HValue* native_context = BuildGetNativeContext();
-    IfBuilder is_constructor(this);
-    is_constructor.If<HCompareNumericAndBranch>(bits, mask, Token::EQ);
-    is_constructor.Then();
-    {
-      HValue* map = Add<HLoadNamedField>(
-          native_context, nullptr,
-          HObjectAccess::ForContextSlot(
-              Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
-      environment()->Push(map);
-    }
-    is_constructor.Else();
-    {
-      HValue* map = Add<HLoadNamedField>(
-          native_context, nullptr,
-          HObjectAccess::ForContextSlot(
-              Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
-      environment()->Push(map);
-    }
-    is_constructor.End();
-  }
-  HValue* bound_function_map = environment()->Pop();
-
-  // Verify that __proto__ matches that of a the target bound function.
-  {
-    HValue* prototype =
-        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForPrototype());
-    HValue* expected_prototype = Add<HLoadNamedField>(
-        bound_function_map, nullptr, HObjectAccess::ForPrototype());
-    IfBuilder equal_prototype(this);
-    equal_prototype.IfNot<HCompareObjectEqAndBranch>(prototype,
-                                                     expected_prototype);
-    equal_prototype.ThenDeopt(DeoptimizeReason::kFastPathFailed);
-    equal_prototype.End();
-  }
-
-  // Allocate the arguments array.
-  IfBuilder empty_args(this);
-  empty_args.If<HCompareNumericAndBranch>(argc, graph()->GetConstant1(),
-                                          Token::LTE);
-  empty_args.Then();
-  { environment()->Push(Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex)); }
-  empty_args.Else();
-  {
-    HValue* elements_length = AddUncasted<HSub>(argc, graph()->GetConstant1());
-    HValue* elements =
-        BuildAllocateAndInitializeArray(FAST_ELEMENTS, elements_length);
-
-    LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
-    HValue* start = graph()->GetConstant1();
-    HValue* key = builder.BeginBody(start, argc, Token::LT);
-    {
-      HValue* argument = Add<HAccessArgumentsAt>(argument_elements, argc, key);
-      HValue* index = AddUncasted<HSub>(key, graph()->GetConstant1());
-      AddElementAccess(elements, index, argument, elements, nullptr,
-                       FAST_ELEMENTS, STORE);
-    }
-    builder.EndBody();
-    environment()->Push(elements);
-  }
-  empty_args.End();
-  HValue* elements = environment()->Pop();
-
-  // Find the 'this' to bind.
-  IfBuilder no_receiver(this);
-  no_receiver.If<HCompareNumericAndBranch>(argc, graph()->GetConstant0(),
-                                           Token::EQ);
-  no_receiver.Then();
-  { environment()->Push(Add<HLoadRoot>(Heap::kUndefinedValueRootIndex)); }
-  no_receiver.Else();
-  {
-    environment()->Push(Add<HAccessArgumentsAt>(argument_elements, argc,
-                                                graph()->GetConstant0()));
-  }
-  no_receiver.End();
-  HValue* receiver = environment()->Pop();
-
-  // Allocate the resulting bound function.
-  HValue* size = Add<HConstant>(JSBoundFunction::kSize);
-  HValue* bound_function =
-      Add<HAllocate>(size, HType::JSObject(), NOT_TENURED,
-                     JS_BOUND_FUNCTION_TYPE, graph()->GetConstant0());
-  Add<HStoreNamedField>(bound_function, HObjectAccess::ForMap(),
-                        bound_function_map);
-  HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
-  Add<HStoreNamedField>(bound_function, HObjectAccess::ForPropertiesPointer(),
-                        empty_fixed_array);
-  Add<HStoreNamedField>(bound_function, HObjectAccess::ForElementsPointer(),
-                        empty_fixed_array);
-  Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundTargetFunction(),
-                        object);
-
-  Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundThis(),
-                        receiver);
-  Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundArguments(),
-                        elements);
-
-  return bound_function;
-}
-
-Handle<Code> FastFunctionBindStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-template <>
-HValue* CodeStubGraphBuilder<LoadFastElementStub>::BuildCodeStub() {
-  LoadKeyedHoleMode hole_mode = casted_stub()->convert_hole_to_undefined()
-                                    ? CONVERT_HOLE_TO_UNDEFINED
-                                    : NEVER_RETURN_HOLE;
-
-  HInstruction* load = BuildUncheckedMonomorphicElementAccess(
-      GetParameter(Descriptor::kReceiver), GetParameter(Descriptor::kName),
-      NULL, casted_stub()->is_js_array(), casted_stub()->elements_kind(), LOAD,
-      hole_mode, STANDARD_STORE);
-  return load;
-}
-
-
-Handle<Code> LoadFastElementStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
-    HValue* object, FieldIndex index) {
-  Representation representation = index.is_double()
-      ? Representation::Double()
-      : Representation::Tagged();
-  int offset = index.offset();
-  HObjectAccess access = index.is_inobject()
-      ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
-      : HObjectAccess::ForBackingStoreOffset(offset, representation);
-  if (index.is_double() &&
-      (!FLAG_unbox_double_fields || !index.is_inobject())) {
-    // Load the heap number.
-    object = Add<HLoadNamedField>(
-        object, nullptr, access.WithRepresentation(Representation::Tagged()));
-    // Load the double value from it.
-    access = HObjectAccess::ForHeapNumberValue();
-  }
-  return Add<HLoadNamedField>(object, nullptr, access);
-}
-
-
-template<>
-HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
-  return BuildLoadNamedField(GetParameter(Descriptor::kReceiver),
-                             casted_stub()->index());
-}
-
-
-Handle<Code> LoadFieldStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
-  HValue* map = AddLoadMap(GetParameter(Descriptor::kReceiver), NULL);
-  HObjectAccess descriptors_access = HObjectAccess::ForObservableJSObjectOffset(
-      Map::kDescriptorsOffset, Representation::Tagged());
-  HValue* descriptors = Add<HLoadNamedField>(map, nullptr, descriptors_access);
-  HObjectAccess value_access = HObjectAccess::ForObservableJSObjectOffset(
-      DescriptorArray::GetValueOffset(casted_stub()->constant_index()));
-  return Add<HLoadNamedField>(descriptors, nullptr, value_access);
-}
-
-
-Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
-
-
-void CodeStubGraphBuilderBase::BuildStoreNamedField(
-    HValue* object, HValue* value, FieldIndex index,
-    Representation representation, bool transition_to_field) {
-  DCHECK(!index.is_double() || representation.IsDouble());
-  int offset = index.offset();
-  HObjectAccess access =
-      index.is_inobject()
-          ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
-          : HObjectAccess::ForBackingStoreOffset(offset, representation);
-
-  if (representation.IsDouble()) {
-    if (!FLAG_unbox_double_fields || !index.is_inobject()) {
-      HObjectAccess heap_number_access =
-          access.WithRepresentation(Representation::Tagged());
-      if (transition_to_field) {
-        // The store requires a mutable HeapNumber to be allocated.
-        NoObservableSideEffectsScope no_side_effects(this);
-        HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
-
-        // TODO(hpayer): Allocation site pretenuring support.
-        HInstruction* heap_number =
-            Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
-                           MUTABLE_HEAP_NUMBER_TYPE, graph()->GetConstant0());
-        AddStoreMapConstant(heap_number,
-                            isolate()->factory()->mutable_heap_number_map());
-        Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
-                              value);
-        // Store the new mutable heap number into the object.
-        access = heap_number_access;
-        value = heap_number;
-      } else {
-        // Load the heap number.
-        object = Add<HLoadNamedField>(object, nullptr, heap_number_access);
-        // Store the double value into it.
-        access = HObjectAccess::ForHeapNumberValue();
-      }
-    }
-  } else if (representation.IsHeapObject()) {
-    BuildCheckHeapObject(value);
-  }
-
-  Add<HStoreNamedField>(object, access, value, INITIALIZING_STORE);
-}
-
-
 template <>
 HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
   ElementsKind const from_kind = casted_stub()->from_kind();
@@ -988,138 +484,6 @@
 }
 
 
-HValue* CodeStubGraphBuilderBase::BuildToString(HValue* input, bool convert) {
-  if (!convert) return BuildCheckString(input);
-  IfBuilder if_inputissmi(this);
-  HValue* inputissmi = if_inputissmi.If<HIsSmiAndBranch>(input);
-  if_inputissmi.Then();
-  {
-    // Convert the input smi to a string.
-    Push(BuildNumberToString(input, AstType::SignedSmall()));
-  }
-  if_inputissmi.Else();
-  {
-    HValue* input_map =
-        Add<HLoadNamedField>(input, inputissmi, HObjectAccess::ForMap());
-    HValue* input_instance_type = Add<HLoadNamedField>(
-        input_map, inputissmi, HObjectAccess::ForMapInstanceType());
-    IfBuilder if_inputisstring(this);
-    if_inputisstring.If<HCompareNumericAndBranch>(
-        input_instance_type, Add<HConstant>(FIRST_NONSTRING_TYPE), Token::LT);
-    if_inputisstring.Then();
-    {
-      // The input is already a string.
-      Push(input);
-    }
-    if_inputisstring.Else();
-    {
-      // Convert to primitive first (if necessary), see
-      // ES6 section 12.7.3 The Addition operator.
-      IfBuilder if_inputisprimitive(this);
-      STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
-      if_inputisprimitive.If<HCompareNumericAndBranch>(
-          input_instance_type, Add<HConstant>(LAST_PRIMITIVE_TYPE), Token::LTE);
-      if_inputisprimitive.Then();
-      {
-        // The input is already a primitive.
-        Push(input);
-      }
-      if_inputisprimitive.Else();
-      {
-        // Convert the input to a primitive.
-        Push(BuildToPrimitive(input, input_map));
-      }
-      if_inputisprimitive.End();
-      // Convert the primitive to a string value.
-      HValue* values[] = {Pop()};
-      Callable toString = CodeFactory::ToString(isolate());
-      Push(AddUncasted<HCallWithDescriptor>(Add<HConstant>(toString.code()), 0,
-                                            toString.descriptor(),
-                                            ArrayVector(values)));
-    }
-    if_inputisstring.End();
-  }
-  if_inputissmi.End();
-  return Pop();
-}
-
-
-HValue* CodeStubGraphBuilderBase::BuildToPrimitive(HValue* input,
-                                                   HValue* input_map) {
-  // Get the native context of the caller.
-  HValue* native_context = BuildGetNativeContext();
-
-  // Determine the initial map of the %ObjectPrototype%.
-  HValue* object_function_prototype_map =
-      Add<HLoadNamedField>(native_context, nullptr,
-                           HObjectAccess::ForContextSlot(
-                               Context::OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX));
-
-  // Determine the initial map of the %StringPrototype%.
-  HValue* string_function_prototype_map =
-      Add<HLoadNamedField>(native_context, nullptr,
-                           HObjectAccess::ForContextSlot(
-                               Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
-
-  // Determine the initial map of the String function.
-  HValue* string_function = Add<HLoadNamedField>(
-      native_context, nullptr,
-      HObjectAccess::ForContextSlot(Context::STRING_FUNCTION_INDEX));
-  HValue* string_function_initial_map = Add<HLoadNamedField>(
-      string_function, nullptr, HObjectAccess::ForPrototypeOrInitialMap());
-
-  // Determine the map of the [[Prototype]] of {input}.
-  HValue* input_prototype =
-      Add<HLoadNamedField>(input_map, nullptr, HObjectAccess::ForPrototype());
-  HValue* input_prototype_map =
-      Add<HLoadNamedField>(input_prototype, nullptr, HObjectAccess::ForMap());
-
-  // For string wrappers (JSValue instances with [[StringData]] internal
-  // fields), we can shortcirciut the ToPrimitive if
-  //
-  //  (a) the {input} map matches the initial map of the String function,
-  //  (b) the {input} [[Prototype]] is the unmodified %StringPrototype% (i.e.
-  //      no one monkey-patched toString, @@toPrimitive or valueOf), and
-  //  (c) the %ObjectPrototype% (i.e. the [[Prototype]] of the
-  //      %StringPrototype%) is also unmodified, that is no one sneaked a
-  //      @@toPrimitive into the %ObjectPrototype%.
-  //
-  // If all these assumptions hold, we can just take the [[StringData]] value
-  // and return it.
-  // TODO(bmeurer): This just repairs a regression introduced by removing the
-  // weird (and broken) intrinsic %_IsStringWrapperSafeForDefaultValue, which
-  // was intendend to something similar to this, although less efficient and
-  // wrong in the presence of @@toPrimitive. Long-term we might want to move
-  // into the direction of having a ToPrimitiveStub that can do common cases
-  // while staying in JavaScript land (i.e. not going to C++).
-  IfBuilder if_inputisstringwrapper(this);
-  if_inputisstringwrapper.If<HCompareObjectEqAndBranch>(
-      input_map, string_function_initial_map);
-  if_inputisstringwrapper.And();
-  if_inputisstringwrapper.If<HCompareObjectEqAndBranch>(
-      input_prototype_map, string_function_prototype_map);
-  if_inputisstringwrapper.And();
-  if_inputisstringwrapper.If<HCompareObjectEqAndBranch>(
-      Add<HLoadNamedField>(Add<HLoadNamedField>(input_prototype_map, nullptr,
-                                                HObjectAccess::ForPrototype()),
-                           nullptr, HObjectAccess::ForMap()),
-      object_function_prototype_map);
-  if_inputisstringwrapper.Then();
-  {
-    Push(BuildLoadNamedField(
-        input, FieldIndex::ForInObjectOffset(JSValue::kValueOffset)));
-  }
-  if_inputisstringwrapper.Else();
-  {
-    // TODO(bmeurer): Add support for fast ToPrimitive conversion using
-    // a dedicated ToPrimitiveStub.
-    Add<HPushArguments>(input);
-    Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kToPrimitive), 1));
-  }
-  if_inputisstringwrapper.End();
-  return Pop();
-}
-
 template <>
 HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
   ToBooleanICStub* stub = casted_stub();
@@ -1134,24 +498,5 @@
 
 Handle<Code> ToBooleanICStub::GenerateCode() { return DoGenerateCode(this); }
 
-template <>
-HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() {
-  HValue* receiver = GetParameter(Descriptor::kReceiver);
-  HValue* key = GetParameter(Descriptor::kName);
-
-  Add<HCheckSmi>(key);
-
-  HValue* elements = AddLoadElements(receiver);
-
-  HValue* hash = BuildElementIndexHash(key);
-
-  return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash);
-}
-
-
-Handle<Code> LoadDictionaryElementStub::GenerateCode() {
-  return DoGenerateCode(this);
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 2ee5ece..48d24f8 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -6,19 +6,25 @@
 
 #include <sstream>
 
+#include "src/arguments.h"
 #include "src/ast/ast.h"
 #include "src/bootstrapper.h"
 #include "src/code-factory.h"
 #include "src/code-stub-assembler.h"
+#include "src/counters.h"
 #include "src/factory.h"
 #include "src/gdb-jit.h"
-#include "src/ic/handler-compiler.h"
+#include "src/heap/heap-inl.h"
+#include "src/ic/ic-stats.h"
 #include "src/ic/ic.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
+#include "src/tracing/tracing-category-observer.h"
 
 namespace v8 {
 namespace internal {
 
+using compiler::CodeAssemblerState;
 
 RUNTIME_FUNCTION(UnexpectedStubMiss) {
   FATAL("Unexpected deopt of a stub");
@@ -69,7 +75,7 @@
 
 bool CodeStub::FindCodeInCache(Code** code_out) {
   UnseededNumberDictionary* stubs = isolate()->heap()->code_stubs();
-  int index = stubs->FindEntry(GetKey());
+  int index = stubs->FindEntry(isolate(), GetKey());
   if (index != UnseededNumberDictionary::kNotFound) {
     *code_out = Code::cast(stubs->ValueAt(index));
     return true;
@@ -101,8 +107,7 @@
   return Code::ComputeFlags(GetCodeKind(), GetExtraICState());
 }
 
-
-Handle<Code> CodeStub::GetCodeCopy(const Code::FindAndReplacePattern& pattern) {
+Handle<Code> CodeStub::GetCodeCopy(const FindAndReplacePattern& pattern) {
   Handle<Code> ic = GetCode();
   ic = isolate()->factory()->CopyCode(ic);
   ic->FindAndReplace(pattern);
@@ -110,6 +115,12 @@
   return ic;
 }
 
+void CodeStub::DeleteStubFromCacheForTesting() {
+  Heap* heap = isolate_->heap();
+  Handle<UnseededNumberDictionary> dict(heap->code_stubs());
+  dict = UnseededNumberDictionary::DeleteKey(dict, GetKey());
+  heap->SetRootCodeStubs(*dict);
+}
 
 Handle<Code> PlatformCodeStub::GenerateCode() {
   Factory* factory = isolate()->factory();
@@ -183,8 +194,7 @@
   }
 
   Activate(code);
-  DCHECK(!NeedsImmovableCode() ||
-         heap->lo_space()->Contains(code) ||
+  DCHECK(!NeedsImmovableCode() || Heap::IsImmovable(code) ||
          heap->code_space()->FirstPage()->Contains(code->address()));
   return Handle<Code>(code, isolate());
 }
@@ -322,36 +332,38 @@
   os << "StringAddStub_" << flags() << "_" << pretenure_flag();
 }
 
-void StringAddStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void StringAddStub::GenerateAssembly(
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
-  Node* left = assembler->Parameter(Descriptor::kLeft);
-  Node* right = assembler->Parameter(Descriptor::kRight);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  CodeStubAssembler assembler(state);
+  Node* left = assembler.Parameter(Descriptor::kLeft);
+  Node* right = assembler.Parameter(Descriptor::kRight);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
   if ((flags() & STRING_ADD_CHECK_LEFT) != 0) {
     DCHECK((flags() & STRING_ADD_CONVERT) != 0);
     // TODO(danno): The ToString and JSReceiverToPrimitive below could be
     // combined to avoid duplicate smi and instance type checks.
-    left = assembler->ToString(context,
-                               assembler->JSReceiverToPrimitive(context, left));
+    left = assembler.ToString(context,
+                              assembler.JSReceiverToPrimitive(context, left));
   }
   if ((flags() & STRING_ADD_CHECK_RIGHT) != 0) {
     DCHECK((flags() & STRING_ADD_CONVERT) != 0);
     // TODO(danno): The ToString and JSReceiverToPrimitive below could be
     // combined to avoid duplicate smi and instance type checks.
-    right = assembler->ToString(
-        context, assembler->JSReceiverToPrimitive(context, right));
+    right = assembler.ToString(context,
+                               assembler.JSReceiverToPrimitive(context, right));
   }
 
   if ((flags() & STRING_ADD_CHECK_BOTH) == 0) {
     CodeStubAssembler::AllocationFlag flags =
         (pretenure_flag() == TENURED) ? CodeStubAssembler::kPretenured
                                       : CodeStubAssembler::kNone;
-    assembler->Return(assembler->StringAdd(context, left, right, flags));
+    assembler.Return(assembler.StringAdd(context, left, right, flags));
   } else {
     Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
                                                pretenure_flag());
-    assembler->TailCallStub(callable, context, left, right);
+    assembler.TailCallStub(callable, context, left, right);
   }
 }
 
@@ -422,305 +434,96 @@
   const char* name = CodeStub::MajorName(MajorKey());
   Zone zone(isolate()->allocator(), ZONE_NAME);
   CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
-  CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
-                              name);
-  GenerateAssembly(&assembler);
-  return assembler.GenerateCode();
-}
-
-void LoadICTrampolineStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
-  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
-  assembler->LoadIC(&p);
-}
-
-void LoadICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
-  assembler->LoadIC(&p);
-}
-
-void LoadICProtoArrayStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* handler = assembler->Parameter(Descriptor::kHandler);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
-  assembler->LoadICProtoArray(&p, handler);
-}
-
-void LoadGlobalICTrampolineStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
-  CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
-                                        vector);
-  assembler->LoadGlobalIC(&p);
-}
-
-void LoadGlobalICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
-                                        vector);
-  assembler->LoadGlobalIC(&p);
-}
-
-void KeyedLoadICTrampolineTFStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
-  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
-  assembler->KeyedLoadIC(&p);
-}
-
-void KeyedLoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
-  assembler->KeyedLoadIC(&p);
-}
-
-void StoreICTrampolineStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
-  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
-                                         vector);
-  assembler->StoreIC(&p);
-}
-
-void StoreICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
-                                         vector);
-  assembler->StoreIC(&p);
-}
-
-void KeyedStoreICTrampolineTFStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
-  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
-                                         vector);
-  assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
-}
-
-void KeyedStoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
-                                         vector);
-  assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
-}
-
-void StoreMapStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* map = assembler->Parameter(Descriptor::kMap);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-
-  assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
-  assembler->Return(value);
-}
-
-void StoreTransitionStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* offset =
-      assembler->SmiUntag(assembler->Parameter(Descriptor::kFieldOffset));
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* map = assembler->Parameter(Descriptor::kMap);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  Label miss(assembler);
-
-  Representation representation = this->representation();
-  assembler->Comment("StoreTransitionStub: is_inobject: %d: representation: %s",
-                     is_inobject(), representation.Mnemonic());
-
-  Node* prepared_value =
-      assembler->PrepareValueForWrite(value, representation, &miss);
-
-  if (store_mode() == StoreTransitionStub::ExtendStorageAndStoreMapAndValue) {
-    assembler->Comment("Extend storage");
-    assembler->ExtendPropertiesBackingStore(receiver);
-  } else {
-    DCHECK(store_mode() == StoreTransitionStub::StoreMapAndValue);
-  }
-
-  // Store the new value into the "extended" object.
-  assembler->Comment("Store value");
-  assembler->StoreNamedField(receiver, offset, is_inobject(), representation,
-                             prepared_value, true);
-
-  // And finally update the map.
-  assembler->Comment("Store map");
-  assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
-  assembler->Return(value);
-
-  // Only store to tagged field never bails out.
-  if (!representation.IsTagged()) {
-    assembler->Bind(&miss);
-    {
-      assembler->Comment("Miss");
-      assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
-                                 vector, receiver, name);
-    }
-  }
+  compiler::CodeAssemblerState state(isolate(), &zone, descriptor,
+                                     GetCodeFlags(), name);
+  GenerateAssembly(&state);
+  return compiler::CodeAssembler::GenerateCode(&state);
 }
 
 void ElementsTransitionAndStoreStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* key = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* map = assembler->Parameter(Descriptor::kMap);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+  Node* key = assembler.Parameter(Descriptor::kName);
+  Node* value = assembler.Parameter(Descriptor::kValue);
+  Node* map = assembler.Parameter(Descriptor::kMap);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.Parameter(Descriptor::kVector);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  assembler->Comment(
+  assembler.Comment(
       "ElementsTransitionAndStoreStub: from_kind=%s, to_kind=%s,"
       " is_jsarray=%d, store_mode=%d",
       ElementsKindToString(from_kind()), ElementsKindToString(to_kind()),
       is_jsarray(), store_mode());
 
-  Label miss(assembler);
+  Label miss(&assembler);
 
   if (FLAG_trace_elements_transitions) {
     // Tracing elements transitions is the job of the runtime.
-    assembler->Goto(&miss);
+    assembler.Goto(&miss);
   } else {
-    assembler->TransitionElementsKind(receiver, map, from_kind(), to_kind(),
-                                      is_jsarray(), &miss);
-    assembler->EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
-                                store_mode(), &miss);
-    assembler->Return(value);
+    assembler.TransitionElementsKind(receiver, map, from_kind(), to_kind(),
+                                     is_jsarray(), &miss);
+    assembler.EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
+                               store_mode(), &miss);
+    assembler.Return(value);
   }
 
-  assembler->Bind(&miss);
+  assembler.Bind(&miss);
   {
-    assembler->Comment("Miss");
-    assembler->TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
-                               context, receiver, key, value, map, slot,
-                               vector);
+    assembler.Comment("Miss");
+    assembler.TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
+                              context, receiver, key, value, map, slot, vector);
   }
 }
 
 void AllocateHeapNumberStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* result = assembler->AllocateHeapNumber();
-  assembler->Return(result);
+  Node* result = assembler.AllocateHeapNumber();
+  assembler.Return(result);
 }
 
-#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type)            \
-  void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \
-      const {                                                               \
-    compiler::Node* result =                                                \
-        assembler->Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
-    compiler::Node* map = assembler->LoadMap(result);                       \
-    assembler->StoreNoWriteBarrier(                                         \
-        MachineRepresentation::kTagged, map,                                \
-        assembler->HeapConstant(isolate()->factory()->type##_map()));       \
-    assembler->Return(result);                                              \
+void StringLengthStub::GenerateAssembly(
+    compiler::CodeAssemblerState* state) const {
+  CodeStubAssembler assembler(state);
+  compiler::Node* value = assembler.Parameter(0);
+  compiler::Node* string = assembler.LoadJSValueValue(value);
+  compiler::Node* result = assembler.LoadStringLength(string);
+  assembler.Return(result);
+}
+
+#define BINARY_OP_STUB(Name)                                                  \
+  void Name::GenerateAssembly(compiler::CodeAssemblerState* state) const {    \
+    typedef BinaryOpWithVectorDescriptor Descriptor;                          \
+    CodeStubAssembler assembler(state);                                       \
+    assembler.Return(Generate(                                                \
+        &assembler, assembler.Parameter(Descriptor::kLeft),                   \
+        assembler.Parameter(Descriptor::kRight),                              \
+        assembler.ChangeUint32ToWord(assembler.Parameter(Descriptor::kSlot)), \
+        assembler.Parameter(Descriptor::kVector),                             \
+        assembler.Parameter(Descriptor::kContext)));                          \
   }
-SIMD128_TYPES(SIMD128_GEN_ASM)
-#undef SIMD128_GEN_ASM
-
-void StringLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  compiler::Node* value = assembler->Parameter(0);
-  compiler::Node* string = assembler->LoadJSValueValue(value);
-  compiler::Node* result = assembler->LoadStringLength(string);
-  assembler->Return(result);
-}
+BINARY_OP_STUB(AddWithFeedbackStub)
+BINARY_OP_STUB(SubtractWithFeedbackStub)
+BINARY_OP_STUB(MultiplyWithFeedbackStub)
+BINARY_OP_STUB(DivideWithFeedbackStub)
+BINARY_OP_STUB(ModulusWithFeedbackStub)
+#undef BINARY_OP_STUB
 
 // static
-compiler::Node* AddWithFeedbackStub::Generate(
-    CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
-    compiler::Node* slot_id, compiler::Node* type_feedback_vector,
-    compiler::Node* context) {
+compiler::Node* AddWithFeedbackStub::Generate(CodeStubAssembler* assembler,
+                                              compiler::Node* lhs,
+                                              compiler::Node* rhs,
+                                              compiler::Node* slot_id,
+                                              compiler::Node* feedback_vector,
+                                              compiler::Node* context) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Variable Variable;
@@ -732,7 +535,7 @@
       call_add_stub(assembler), end(assembler);
   Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
       var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
-      var_type_feedback(assembler, MachineRepresentation::kWord32),
+      var_type_feedback(assembler, MachineRepresentation::kTaggedSigned),
       var_result(assembler, MachineRepresentation::kTagged);
 
   // Check if the {lhs} is a Smi or a HeapObject.
@@ -768,7 +571,7 @@
       assembler->Bind(&if_notoverflow);
       {
         var_type_feedback.Bind(
-            assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+            assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
         var_result.Bind(assembler->BitcastWordToTaggedSigned(
             assembler->Projection(0, pair)));
         assembler->Goto(&end);
@@ -781,8 +584,8 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if the {rhs} is a HeapNumber.
-      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
-                            &check_rhsisoddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+                           &check_rhsisoddball);
 
       var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
       var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
@@ -796,8 +599,8 @@
     Node* lhs_map = assembler->LoadMap(lhs);
 
     // Check if {lhs} is a HeapNumber.
-    assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
-                          &if_lhsisnotnumber);
+    assembler->GotoIfNot(assembler->IsHeapNumberMap(lhs_map),
+                         &if_lhsisnotnumber);
 
     // Check if the {rhs} is Smi.
     Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
@@ -817,8 +620,8 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if the {rhs} is a HeapNumber.
-      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
-                            &check_rhsisoddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+                           &check_rhsisoddball);
 
       var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
       var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
@@ -829,7 +632,7 @@
   assembler->Bind(&do_fadd);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumber));
     Node* value =
         assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
     Node* result = assembler->AllocateHeapNumberWithValue(value);
@@ -862,8 +665,8 @@
     assembler->Bind(&if_lhsisnotoddball);
     {
       // Exit unless {lhs} is a string
-      assembler->GotoUnless(assembler->IsStringInstanceType(lhs_instance_type),
-                            &call_with_any_feedback);
+      assembler->GotoIfNot(assembler->IsStringInstanceType(lhs_instance_type),
+                           &call_with_any_feedback);
 
       // Check if the {rhs} is a smi, and exit the string check early if it is.
       assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_any_feedback);
@@ -872,11 +675,11 @@
 
       // Exit unless {rhs} is a string. Since {lhs} is a string we no longer
       // need an Oddball check.
-      assembler->GotoUnless(assembler->IsStringInstanceType(rhs_instance_type),
-                            &call_with_any_feedback);
+      assembler->GotoIfNot(assembler->IsStringInstanceType(rhs_instance_type),
+                           &call_with_any_feedback);
 
       var_type_feedback.Bind(
-          assembler->Int32Constant(BinaryOperationFeedback::kString));
+          assembler->SmiConstant(BinaryOperationFeedback::kString));
       Callable callable = CodeFactory::StringAdd(
           assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
       var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
@@ -899,14 +702,14 @@
   assembler->Bind(&call_with_oddball_feedback);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
     assembler->Goto(&call_add_stub);
   }
 
   assembler->Bind(&call_with_any_feedback);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kAny));
+        assembler->SmiConstant(BinaryOperationFeedback::kAny));
     assembler->Goto(&call_add_stub);
   }
 
@@ -918,7 +721,7 @@
   }
 
   assembler->Bind(&end);
-  assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+  assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
                             slot_id);
   return var_result.value();
 }
@@ -926,7 +729,7 @@
 // static
 compiler::Node* SubtractWithFeedbackStub::Generate(
     CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
-    compiler::Node* slot_id, compiler::Node* type_feedback_vector,
+    compiler::Node* slot_id, compiler::Node* feedback_vector,
     compiler::Node* context) {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
@@ -938,7 +741,7 @@
       call_with_any_feedback(assembler);
   Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
       var_fsub_rhs(assembler, MachineRepresentation::kFloat64),
-      var_type_feedback(assembler, MachineRepresentation::kWord32),
+      var_type_feedback(assembler, MachineRepresentation::kTaggedSigned),
       var_result(assembler, MachineRepresentation::kTagged);
 
   // Check if the {lhs} is a Smi or a HeapObject.
@@ -976,7 +779,7 @@
       assembler->Bind(&if_notoverflow);
       // lhs, rhs, result smi. combined - smi.
       var_type_feedback.Bind(
-          assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+          assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
       var_result.Bind(
           assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
       assembler->Goto(&end);
@@ -988,8 +791,8 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if {rhs} is a HeapNumber.
-      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
-                            &check_rhsisoddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+                           &check_rhsisoddball);
 
       // Perform a floating point subtraction.
       var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
@@ -1004,8 +807,8 @@
     Node* lhs_map = assembler->LoadMap(lhs);
 
     // Check if the {lhs} is a HeapNumber.
-    assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
-                          &if_lhsisnotnumber);
+    assembler->GotoIfNot(assembler->IsHeapNumberMap(lhs_map),
+                         &if_lhsisnotnumber);
 
     // Check if the {rhs} is a Smi.
     Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
@@ -1026,8 +829,8 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if the {rhs} is a HeapNumber.
-      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
-                            &check_rhsisoddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+                           &check_rhsisoddball);
 
       // Perform a floating point subtraction.
       var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
@@ -1039,7 +842,7 @@
   assembler->Bind(&do_fsub);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumber));
     Node* lhs_value = var_fsub_lhs.value();
     Node* rhs_value = var_fsub_rhs.value();
     Node* value = assembler->Float64Sub(lhs_value, rhs_value);
@@ -1054,7 +857,7 @@
     Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
     Node* lhs_is_oddball = assembler->Word32Equal(
         lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
-    assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
+    assembler->GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
 
     Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
     assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
@@ -1063,7 +866,7 @@
     assembler->Bind(&if_rhsissmi);
     {
       var_type_feedback.Bind(
-          assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+          assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
       assembler->Goto(&call_subtract_stub);
     }
 
@@ -1073,11 +876,11 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if {rhs} is a HeapNumber.
-      assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
-                            &check_rhsisoddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+                           &check_rhsisoddball);
 
       var_type_feedback.Bind(
-          assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+          assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
       assembler->Goto(&call_subtract_stub);
     }
   }
@@ -1089,17 +892,17 @@
     Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
     Node* rhs_is_oddball = assembler->Word32Equal(
         rhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
-    assembler->GotoUnless(rhs_is_oddball, &call_with_any_feedback);
+    assembler->GotoIfNot(rhs_is_oddball, &call_with_any_feedback);
 
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
     assembler->Goto(&call_subtract_stub);
   }
 
   assembler->Bind(&call_with_any_feedback);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kAny));
+        assembler->SmiConstant(BinaryOperationFeedback::kAny));
     assembler->Goto(&call_subtract_stub);
   }
 
@@ -1111,7 +914,7 @@
   }
 
   assembler->Bind(&end);
-  assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+  assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
                             slot_id);
   return var_result.value();
 }
@@ -1120,7 +923,7 @@
 // static
 compiler::Node* MultiplyWithFeedbackStub::Generate(
     CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* rhs,
-    compiler::Node* slot_id, compiler::Node* type_feedback_vector,
+    compiler::Node* slot_id, compiler::Node* feedback_vector,
     compiler::Node* context) {
   using compiler::Node;
   typedef CodeStubAssembler::Label Label;
@@ -1134,9 +937,7 @@
   Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
       var_rhs_float64(assembler, MachineRepresentation::kFloat64),
       var_result(assembler, MachineRepresentation::kTagged),
-      var_type_feedback(assembler, MachineRepresentation::kWord32);
-
-  Node* number_map = assembler->HeapNumberMapConstant();
+      var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
 
   Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
   assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
@@ -1152,11 +953,10 @@
       // Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
       // in case of overflow.
       var_result.Bind(assembler->SmiMul(lhs, rhs));
-      var_type_feedback.Bind(assembler->Select(
+      var_type_feedback.Bind(assembler->SelectSmiConstant(
           assembler->TaggedIsSmi(var_result.value()),
-          assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
-          assembler->Int32Constant(BinaryOperationFeedback::kNumber),
-          MachineRepresentation::kWord32));
+          BinaryOperationFeedback::kSignedSmall,
+          BinaryOperationFeedback::kNumber));
       assembler->Goto(&end);
     }
 
@@ -1165,8 +965,8 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if {rhs} is a HeapNumber.
-      assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
-                            &check_rhsisoddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+                           &check_rhsisoddball);
 
       // Convert {lhs} to a double and multiply it with the value of {rhs}.
       var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
@@ -1180,8 +980,8 @@
     Node* lhs_map = assembler->LoadMap(lhs);
 
     // Check if {lhs} is a HeapNumber.
-    assembler->GotoUnless(assembler->WordEqual(lhs_map, number_map),
-                          &if_lhsisnotnumber);
+    assembler->GotoIfNot(assembler->IsHeapNumberMap(lhs_map),
+                         &if_lhsisnotnumber);
 
     // Check if {rhs} is a Smi.
     Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
@@ -1201,8 +1001,8 @@
       Node* rhs_map = assembler->LoadMap(rhs);
 
       // Check if {rhs} is a HeapNumber.
-      assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
-                            &check_rhsisoddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(rhs_map),
+                           &check_rhsisoddball);
 
       // Both {lhs} and {rhs} are HeapNumbers. Load their values and
       // multiply them.
@@ -1215,7 +1015,7 @@
   assembler->Bind(&do_fmul);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumber));
     Node* value =
         assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
     Node* result = assembler->AllocateHeapNumberWithValue(value);
@@ -1230,7 +1030,7 @@
     Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
     Node* lhs_is_oddball = assembler->Word32Equal(
         lhs_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
-    assembler->GotoUnless(lhs_is_oddball, &call_with_any_feedback);
+    assembler->GotoIfNot(lhs_is_oddball, &call_with_any_feedback);
 
     assembler->GotoIf(assembler->TaggedIsSmi(rhs), &call_with_oddball_feedback);
 
@@ -1256,14 +1056,14 @@
   assembler->Bind(&call_with_oddball_feedback);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
     assembler->Goto(&call_multiply_stub);
   }
 
   assembler->Bind(&call_with_any_feedback);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kAny));
+        assembler->SmiConstant(BinaryOperationFeedback::kAny));
     assembler->Goto(&call_multiply_stub);
   }
 
@@ -1275,7 +1075,7 @@
   }
 
   assembler->Bind(&end);
-  assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+  assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
                             slot_id);
   return var_result.value();
 }
@@ -1285,7 +1085,7 @@
 compiler::Node* DivideWithFeedbackStub::Generate(
     CodeStubAssembler* assembler, compiler::Node* dividend,
     compiler::Node* divisor, compiler::Node* slot_id,
-    compiler::Node* type_feedback_vector, compiler::Node* context) {
+    compiler::Node* feedback_vector, compiler::Node* context) {
   using compiler::Node;
   typedef CodeStubAssembler::Label Label;
   typedef CodeStubAssembler::Variable Variable;
@@ -1298,9 +1098,7 @@
   Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
       var_divisor_float64(assembler, MachineRepresentation::kFloat64),
       var_result(assembler, MachineRepresentation::kTagged),
-      var_type_feedback(assembler, MachineRepresentation::kWord32);
-
-  Node* number_map = assembler->HeapNumberMapConstant();
+      var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
 
   Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
   assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
@@ -1318,27 +1116,26 @@
 
       // Do floating point division if {divisor} is zero.
       assembler->GotoIf(
-          assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
-          &bailout);
+          assembler->WordEqual(divisor, assembler->SmiConstant(0)), &bailout);
 
       // Do floating point division {dividend} is zero and {divisor} is
       // negative.
       Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
       assembler->Branch(
-          assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+          assembler->WordEqual(dividend, assembler->SmiConstant(0)),
           &dividend_is_zero, &dividend_is_not_zero);
 
       assembler->Bind(&dividend_is_zero);
       {
         assembler->GotoIf(
-            assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+            assembler->SmiLessThan(divisor, assembler->SmiConstant(0)),
             &bailout);
         assembler->Goto(&dividend_is_not_zero);
       }
       assembler->Bind(&dividend_is_not_zero);
 
-      Node* untagged_divisor = assembler->SmiUntag(divisor);
-      Node* untagged_dividend = assembler->SmiUntag(dividend);
+      Node* untagged_divisor = assembler->SmiToWord32(divisor);
+      Node* untagged_dividend = assembler->SmiToWord32(dividend);
 
       // Do floating point division if {dividend} is kMinInt (or kMinInt - 1
       // if the Smi size is 31) and {divisor} is -1.
@@ -1367,8 +1164,8 @@
       assembler->GotoIf(assembler->Word32NotEqual(untagged_dividend, truncated),
                         &bailout);
       var_type_feedback.Bind(
-          assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
-      var_result.Bind(assembler->SmiTag(untagged_result));
+          assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
+      var_result.Bind(assembler->SmiFromWord32(untagged_result));
       assembler->Goto(&end);
 
       // Bailout: convert {dividend} and {divisor} to double and do double
@@ -1386,8 +1183,8 @@
       Node* divisor_map = assembler->LoadMap(divisor);
 
       // Check if {divisor} is a HeapNumber.
-      assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
-                            &check_divisor_for_oddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
+                           &check_divisor_for_oddball);
 
       // Convert {dividend} to a double and divide it with the value of
       // {divisor}.
@@ -1401,8 +1198,8 @@
       Node* dividend_map = assembler->LoadMap(dividend);
 
       // Check if {dividend} is a HeapNumber.
-      assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
-                            &dividend_is_not_number);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(dividend_map),
+                           &dividend_is_not_number);
 
       // Check if {divisor} is a Smi.
       Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
@@ -1423,8 +1220,8 @@
         Node* divisor_map = assembler->LoadMap(divisor);
 
         // Check if {divisor} is a HeapNumber.
-        assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
-                              &check_divisor_for_oddball);
+        assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
+                             &check_divisor_for_oddball);
 
         // Both {dividend} and {divisor} are HeapNumbers. Load their values
         // and divide them.
@@ -1438,7 +1235,7 @@
   assembler->Bind(&do_fdiv);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumber));
     Node* value = assembler->Float64Div(var_dividend_float64.value(),
                                         var_divisor_float64.value());
     var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
@@ -1452,7 +1249,7 @@
     Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
     Node* dividend_is_oddball = assembler->Word32Equal(
         dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
-    assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
+    assembler->GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
 
     assembler->GotoIf(assembler->TaggedIsSmi(divisor),
                       &call_with_oddball_feedback);
@@ -1479,14 +1276,14 @@
   assembler->Bind(&call_with_oddball_feedback);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
     assembler->Goto(&call_divide_stub);
   }
 
   assembler->Bind(&call_with_any_feedback);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kAny));
+        assembler->SmiConstant(BinaryOperationFeedback::kAny));
     assembler->Goto(&call_divide_stub);
   }
 
@@ -1498,7 +1295,7 @@
   }
 
   assembler->Bind(&end);
-  assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+  assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
                             slot_id);
   return var_result.value();
 }
@@ -1507,7 +1304,7 @@
 compiler::Node* ModulusWithFeedbackStub::Generate(
     CodeStubAssembler* assembler, compiler::Node* dividend,
     compiler::Node* divisor, compiler::Node* slot_id,
-    compiler::Node* type_feedback_vector, compiler::Node* context) {
+    compiler::Node* feedback_vector, compiler::Node* context) {
   using compiler::Node;
   typedef CodeStubAssembler::Label Label;
   typedef CodeStubAssembler::Variable Variable;
@@ -1520,9 +1317,7 @@
   Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
       var_divisor_float64(assembler, MachineRepresentation::kFloat64),
       var_result(assembler, MachineRepresentation::kTagged),
-      var_type_feedback(assembler, MachineRepresentation::kWord32);
-
-  Node* number_map = assembler->HeapNumberMapConstant();
+      var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
 
   Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
   assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
@@ -1537,10 +1332,10 @@
     assembler->Bind(&divisor_is_smi);
     {
       var_result.Bind(assembler->SmiMod(dividend, divisor));
-      var_type_feedback.Bind(assembler->Select(
+      var_type_feedback.Bind(assembler->SelectSmiConstant(
           assembler->TaggedIsSmi(var_result.value()),
-          assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
-          assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
+          BinaryOperationFeedback::kSignedSmall,
+          BinaryOperationFeedback::kNumber));
       assembler->Goto(&end);
     }
 
@@ -1549,8 +1344,8 @@
       Node* divisor_map = assembler->LoadMap(divisor);
 
       // Check if {divisor} is a HeapNumber.
-      assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
-                            &check_divisor_for_oddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
+                           &check_divisor_for_oddball);
 
       // Convert {dividend} to a double and divide it with the value of
       // {divisor}.
@@ -1565,8 +1360,8 @@
     Node* dividend_map = assembler->LoadMap(dividend);
 
     // Check if {dividend} is a HeapNumber.
-    assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
-                          &dividend_is_not_number);
+    assembler->GotoIfNot(assembler->IsHeapNumberMap(dividend_map),
+                         &dividend_is_not_number);
 
     // Check if {divisor} is a Smi.
     Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
@@ -1587,8 +1382,8 @@
       Node* divisor_map = assembler->LoadMap(divisor);
 
       // Check if {divisor} is a HeapNumber.
-      assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
-                            &check_divisor_for_oddball);
+      assembler->GotoIfNot(assembler->IsHeapNumberMap(divisor_map),
+                           &check_divisor_for_oddball);
 
       // Both {dividend} and {divisor} are HeapNumbers. Load their values
       // and divide them.
@@ -1601,7 +1396,7 @@
   assembler->Bind(&do_fmod);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumber));
     Node* value = assembler->Float64Mod(var_dividend_float64.value(),
                                         var_divisor_float64.value());
     var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
@@ -1615,7 +1410,7 @@
     Node* dividend_instance_type = assembler->LoadInstanceType(dividend);
     Node* dividend_is_oddball = assembler->Word32Equal(
         dividend_instance_type, assembler->Int32Constant(ODDBALL_TYPE));
-    assembler->GotoUnless(dividend_is_oddball, &call_with_any_feedback);
+    assembler->GotoIfNot(dividend_is_oddball, &call_with_any_feedback);
 
     assembler->GotoIf(assembler->TaggedIsSmi(divisor),
                       &call_with_oddball_feedback);
@@ -1642,14 +1437,14 @@
   assembler->Bind(&call_with_oddball_feedback);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+        assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
     assembler->Goto(&call_modulus_stub);
   }
 
   assembler->Bind(&call_with_any_feedback);
   {
     var_type_feedback.Bind(
-        assembler->Int32Constant(BinaryOperationFeedback::kAny));
+        assembler->SmiConstant(BinaryOperationFeedback::kAny));
     assembler->Goto(&call_modulus_stub);
   }
 
@@ -1661,282 +1456,18 @@
   }
 
   assembler->Bind(&end);
-  assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+  assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
                             slot_id);
   return var_result.value();
 }
 
-// static
-compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
-                                  compiler::Node* value,
-                                  compiler::Node* context,
-                                  compiler::Node* type_feedback_vector,
-                                  compiler::Node* slot_id) {
-  typedef CodeStubAssembler::Label Label;
+void NumberToStringStub::GenerateAssembly(
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  // Shared entry for floating point increment.
-  Label do_finc(assembler), end(assembler);
-  Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
-
-  // We might need to try again due to ToNumber conversion.
-  Variable value_var(assembler, MachineRepresentation::kTagged);
-  Variable result_var(assembler, MachineRepresentation::kTagged);
-  Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
-  Variable* loop_vars[] = {&value_var, &var_type_feedback};
-  Label start(assembler, 2, loop_vars);
-  value_var.Bind(value);
-  var_type_feedback.Bind(
-      assembler->Int32Constant(BinaryOperationFeedback::kNone));
-  assembler->Goto(&start);
-  assembler->Bind(&start);
-  {
-    value = value_var.value();
-
-    Label if_issmi(assembler), if_isnotsmi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
-
-    assembler->Bind(&if_issmi);
-    {
-      // Try fast Smi addition first.
-      Node* one = assembler->SmiConstant(Smi::FromInt(1));
-      Node* pair = assembler->IntPtrAddWithOverflow(
-          assembler->BitcastTaggedToWord(value),
-          assembler->BitcastTaggedToWord(one));
-      Node* overflow = assembler->Projection(1, pair);
-
-      // Check if the Smi addition overflowed.
-      Label if_overflow(assembler), if_notoverflow(assembler);
-      assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
-      assembler->Bind(&if_notoverflow);
-      var_type_feedback.Bind(assembler->Word32Or(
-          var_type_feedback.value(),
-          assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
-      result_var.Bind(
-          assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
-      assembler->Goto(&end);
-
-      assembler->Bind(&if_overflow);
-      {
-        var_finc_value.Bind(assembler->SmiToFloat64(value));
-        assembler->Goto(&do_finc);
-      }
-    }
-
-    assembler->Bind(&if_isnotsmi);
-    {
-      // Check if the value is a HeapNumber.
-      Label if_valueisnumber(assembler),
-          if_valuenotnumber(assembler, Label::kDeferred);
-      Node* value_map = assembler->LoadMap(value);
-      assembler->Branch(assembler->IsHeapNumberMap(value_map),
-                        &if_valueisnumber, &if_valuenotnumber);
-
-      assembler->Bind(&if_valueisnumber);
-      {
-        // Load the HeapNumber value.
-        var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
-        assembler->Goto(&do_finc);
-      }
-
-      assembler->Bind(&if_valuenotnumber);
-      {
-        // We do not require an Or with earlier feedback here because once we
-        // convert the value to a number, we cannot reach this path. We can
-        // only reach this path on the first pass when the feedback is kNone.
-        CSA_ASSERT(assembler,
-                   assembler->Word32Equal(var_type_feedback.value(),
-                                          assembler->Int32Constant(
-                                              BinaryOperationFeedback::kNone)));
-
-        Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
-        Node* instance_type = assembler->LoadMapInstanceType(value_map);
-        Node* is_oddball = assembler->Word32Equal(
-            instance_type, assembler->Int32Constant(ODDBALL_TYPE));
-        assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
-
-        assembler->Bind(&if_valueisoddball);
-        {
-          // Convert Oddball to Number and check again.
-          value_var.Bind(
-              assembler->LoadObjectField(value, Oddball::kToNumberOffset));
-          var_type_feedback.Bind(assembler->Int32Constant(
-              BinaryOperationFeedback::kNumberOrOddball));
-          assembler->Goto(&start);
-        }
-
-        assembler->Bind(&if_valuenotoddball);
-        {
-          // Convert to a Number first and try again.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_type_feedback.Bind(
-              assembler->Int32Constant(BinaryOperationFeedback::kAny));
-          value_var.Bind(assembler->CallStub(callable, context, value));
-          assembler->Goto(&start);
-        }
-      }
-    }
-  }
-
-  assembler->Bind(&do_finc);
-  {
-    Node* finc_value = var_finc_value.value();
-    Node* one = assembler->Float64Constant(1.0);
-    Node* finc_result = assembler->Float64Add(finc_value, one);
-    var_type_feedback.Bind(assembler->Word32Or(
-        var_type_feedback.value(),
-        assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
-    result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&end);
-  assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
-                            slot_id);
-  return result_var.value();
-}
-
-void NumberToStringStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-  Node* argument = assembler->Parameter(Descriptor::kArgument);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  assembler->Return(assembler->NumberToString(context, argument));
-}
-
-// static
-compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
-                                  compiler::Node* value,
-                                  compiler::Node* context,
-                                  compiler::Node* type_feedback_vector,
-                                  compiler::Node* slot_id) {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Variable Variable;
-
-  // Shared entry for floating point decrement.
-  Label do_fdec(assembler), end(assembler);
-  Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
-
-  // We might need to try again due to ToNumber conversion.
-  Variable value_var(assembler, MachineRepresentation::kTagged);
-  Variable result_var(assembler, MachineRepresentation::kTagged);
-  Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
-  Variable* loop_vars[] = {&value_var, &var_type_feedback};
-  Label start(assembler, 2, loop_vars);
-  var_type_feedback.Bind(
-      assembler->Int32Constant(BinaryOperationFeedback::kNone));
-  value_var.Bind(value);
-  assembler->Goto(&start);
-  assembler->Bind(&start);
-  {
-    value = value_var.value();
-
-    Label if_issmi(assembler), if_isnotsmi(assembler);
-    assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
-
-    assembler->Bind(&if_issmi);
-    {
-      // Try fast Smi subtraction first.
-      Node* one = assembler->SmiConstant(Smi::FromInt(1));
-      Node* pair = assembler->IntPtrSubWithOverflow(
-          assembler->BitcastTaggedToWord(value),
-          assembler->BitcastTaggedToWord(one));
-      Node* overflow = assembler->Projection(1, pair);
-
-      // Check if the Smi subtraction overflowed.
-      Label if_overflow(assembler), if_notoverflow(assembler);
-      assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
-      assembler->Bind(&if_notoverflow);
-      var_type_feedback.Bind(assembler->Word32Or(
-          var_type_feedback.value(),
-          assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
-      result_var.Bind(
-          assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
-      assembler->Goto(&end);
-
-      assembler->Bind(&if_overflow);
-      {
-        var_fdec_value.Bind(assembler->SmiToFloat64(value));
-        assembler->Goto(&do_fdec);
-      }
-    }
-
-    assembler->Bind(&if_isnotsmi);
-    {
-      // Check if the value is a HeapNumber.
-      Label if_valueisnumber(assembler),
-          if_valuenotnumber(assembler, Label::kDeferred);
-      Node* value_map = assembler->LoadMap(value);
-      assembler->Branch(assembler->IsHeapNumberMap(value_map),
-                        &if_valueisnumber, &if_valuenotnumber);
-
-      assembler->Bind(&if_valueisnumber);
-      {
-        // Load the HeapNumber value.
-        var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
-        assembler->Goto(&do_fdec);
-      }
-
-      assembler->Bind(&if_valuenotnumber);
-      {
-        // We do not require an Or with earlier feedback here because once we
-        // convert the value to a number, we cannot reach this path. We can
-        // only reach this path on the first pass when the feedback is kNone.
-        CSA_ASSERT(assembler,
-                   assembler->Word32Equal(var_type_feedback.value(),
-                                          assembler->Int32Constant(
-                                              BinaryOperationFeedback::kNone)));
-
-        Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
-        Node* instance_type = assembler->LoadMapInstanceType(value_map);
-        Node* is_oddball = assembler->Word32Equal(
-            instance_type, assembler->Int32Constant(ODDBALL_TYPE));
-        assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
-
-        assembler->Bind(&if_valueisoddball);
-        {
-          // Convert Oddball to Number and check again.
-          value_var.Bind(
-              assembler->LoadObjectField(value, Oddball::kToNumberOffset));
-          var_type_feedback.Bind(assembler->Int32Constant(
-              BinaryOperationFeedback::kNumberOrOddball));
-          assembler->Goto(&start);
-        }
-
-        assembler->Bind(&if_valuenotoddball);
-        {
-          // Convert to a Number first and try again.
-          Callable callable =
-              CodeFactory::NonNumberToNumber(assembler->isolate());
-          var_type_feedback.Bind(
-              assembler->Int32Constant(BinaryOperationFeedback::kAny));
-          value_var.Bind(assembler->CallStub(callable, context, value));
-          assembler->Goto(&start);
-        }
-      }
-    }
-  }
-
-  assembler->Bind(&do_fdec);
-  {
-    Node* fdec_value = var_fdec_value.value();
-    Node* one = assembler->Float64Constant(1.0);
-    Node* fdec_result = assembler->Float64Sub(fdec_value, one);
-    var_type_feedback.Bind(assembler->Word32Or(
-        var_type_feedback.value(),
-        assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
-    result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&end);
-  assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
-                            slot_id);
-  return result_var.value();
+  CodeStubAssembler assembler(state);
+  Node* argument = assembler.Parameter(Descriptor::kArgument);
+  Node* context = assembler.Parameter(Descriptor::kContext);
+  assembler.Return(assembler.NumberToString(context, argument));
 }
 
 // ES6 section 21.1.3.19 String.prototype.substring ( start, end )
@@ -1948,110 +1479,68 @@
   return assembler->SubString(context, string, from, to);
 }
 
-void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  // For now we only support receiver_is_holder.
-  DCHECK(receiver_is_holder());
-  Node* holder = receiver;
-  Node* map = assembler->LoadMap(receiver);
-  Node* descriptors = assembler->LoadMapDescriptors(map);
-  Node* value_index =
-      assembler->IntPtrConstant(DescriptorArray::ToValueIndex(index()));
-  Node* callback = assembler->LoadFixedArrayElement(
-      descriptors, value_index, 0, CodeStubAssembler::INTPTR_PARAMETERS);
-  assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
-                          holder, callback);
+void SubStringStub::GenerateAssembly(
+    compiler::CodeAssemblerState* state) const {
+  CodeStubAssembler assembler(state);
+  assembler.Return(Generate(&assembler,
+                            assembler.Parameter(Descriptor::kString),
+                            assembler.Parameter(Descriptor::kFrom),
+                            assembler.Parameter(Descriptor::kTo),
+                            assembler.Parameter(Descriptor::kContext)));
 }
 
-void StoreFieldStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void StoreGlobalStub::GenerateAssembly(
+    compiler::CodeAssemblerState* state) const {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  FieldIndex index = this->index();
-  Representation representation = this->representation();
-
-  assembler->Comment("StoreFieldStub: inobject=%d, offset=%d, rep=%s",
-                     index.is_inobject(), index.offset(),
-                     representation.Mnemonic());
-
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  Label miss(assembler);
-
-  Node* prepared_value =
-      assembler->PrepareValueForWrite(value, representation, &miss);
-  assembler->StoreNamedField(receiver, index, representation, prepared_value,
-                             false);
-  assembler->Return(value);
-
-  // Only stores to tagged field can't bailout.
-  if (!representation.IsTagged()) {
-    assembler->Bind(&miss);
-    {
-      assembler->Comment("Miss");
-      assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
-                                 vector, receiver, name);
-    }
-  }
-}
-
-void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-
-  assembler->Comment(
+  assembler.Comment(
       "StoreGlobalStub: cell_type=%d, constant_type=%d, check_global=%d",
       cell_type(), PropertyCellType::kConstantType == cell_type()
                        ? static_cast<int>(constant_type())
                        : -1,
       check_global());
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+  Node* name = assembler.Parameter(Descriptor::kName);
+  Node* value = assembler.Parameter(Descriptor::kValue);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.Parameter(Descriptor::kVector);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  Label miss(assembler);
+  Label miss(&assembler);
 
   if (check_global()) {
     // Check that the map of the global has not changed: use a placeholder map
     // that will be replaced later with the global object's map.
-    Node* proxy_map = assembler->LoadMap(receiver);
-    Node* global = assembler->LoadObjectField(proxy_map, Map::kPrototypeOffset);
-    Node* map_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+    Node* proxy_map = assembler.LoadMap(receiver);
+    Node* global = assembler.LoadObjectField(proxy_map, Map::kPrototypeOffset);
+    Node* map_cell = assembler.HeapConstant(isolate()->factory()->NewWeakCell(
         StoreGlobalStub::global_map_placeholder(isolate())));
-    Node* expected_map = assembler->LoadWeakCellValueUnchecked(map_cell);
-    Node* map = assembler->LoadMap(global);
-    assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
+    Node* expected_map = assembler.LoadWeakCellValueUnchecked(map_cell);
+    Node* map = assembler.LoadMap(global);
+    assembler.GotoIf(assembler.WordNotEqual(expected_map, map), &miss);
   }
 
-  Node* weak_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+  Node* weak_cell = assembler.HeapConstant(isolate()->factory()->NewWeakCell(
       StoreGlobalStub::property_cell_placeholder(isolate())));
-  Node* cell = assembler->LoadWeakCellValue(weak_cell);
-  assembler->GotoIf(assembler->TaggedIsSmi(cell), &miss);
+  Node* cell = assembler.LoadWeakCellValue(weak_cell);
+  assembler.GotoIf(assembler.TaggedIsSmi(cell), &miss);
 
   // Load the payload of the global parameter cell. A hole indicates that the
   // cell has been invalidated and that the store must be handled by the
   // runtime.
   Node* cell_contents =
-      assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
+      assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
 
   PropertyCellType cell_type = this->cell_type();
   if (cell_type == PropertyCellType::kConstant ||
       cell_type == PropertyCellType::kUndefined) {
     // This is always valid for all states a cell can be in.
-    assembler->GotoIf(assembler->WordNotEqual(cell_contents, value), &miss);
+    assembler.GotoIf(assembler.WordNotEqual(cell_contents, value), &miss);
   } else {
-    assembler->GotoIf(assembler->IsTheHole(cell_contents), &miss);
+    assembler.GotoIf(assembler.IsTheHole(cell_contents), &miss);
 
     // When dealing with constant types, the type may be allowed to change, as
     // long as optimized code remains valid.
@@ -2059,7 +1548,7 @@
     if (cell_type == PropertyCellType::kConstantType) {
       switch (constant_type()) {
         case PropertyCellConstantType::kSmi:
-          assembler->GotoUnless(assembler->TaggedIsSmi(value), &miss);
+          assembler.GotoIfNot(assembler.TaggedIsSmi(value), &miss);
           value_is_smi = true;
           break;
         case PropertyCellConstantType::kStableMap: {
@@ -2068,291 +1557,350 @@
           // are the maps that were originally in the cell or not. If optimized
           // code will deopt when a cell has a unstable map and if it has a
           // dependency on a stable map, it will deopt if the map destabilizes.
-          assembler->GotoIf(assembler->TaggedIsSmi(value), &miss);
-          assembler->GotoIf(assembler->TaggedIsSmi(cell_contents), &miss);
-          Node* expected_map = assembler->LoadMap(cell_contents);
-          Node* map = assembler->LoadMap(value);
-          assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
+          assembler.GotoIf(assembler.TaggedIsSmi(value), &miss);
+          assembler.GotoIf(assembler.TaggedIsSmi(cell_contents), &miss);
+          Node* expected_map = assembler.LoadMap(cell_contents);
+          Node* map = assembler.LoadMap(value);
+          assembler.GotoIf(assembler.WordNotEqual(expected_map, map), &miss);
           break;
         }
       }
     }
     if (value_is_smi) {
-      assembler->StoreObjectFieldNoWriteBarrier(
-          cell, PropertyCell::kValueOffset, value);
+      assembler.StoreObjectFieldNoWriteBarrier(cell, PropertyCell::kValueOffset,
+                                               value);
     } else {
-      assembler->StoreObjectField(cell, PropertyCell::kValueOffset, value);
+      assembler.StoreObjectField(cell, PropertyCell::kValueOffset, value);
     }
   }
 
-  assembler->Return(value);
+  assembler.Return(value);
 
-  assembler->Bind(&miss);
+  assembler.Bind(&miss);
   {
-    assembler->Comment("Miss");
-    assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
-                               vector, receiver, name);
+    assembler.Comment("Miss");
+    assembler.TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+                              vector, receiver, name);
   }
 }
 
 void KeyedLoadSloppyArgumentsStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* key = assembler->Parameter(Descriptor::kName);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+  Node* key = assembler.Parameter(Descriptor::kName);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.Parameter(Descriptor::kVector);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  Label miss(assembler);
+  Label miss(&assembler);
 
-  Node* result = assembler->LoadKeyedSloppyArguments(receiver, key, &miss);
-  assembler->Return(result);
+  Node* result = assembler.LoadKeyedSloppyArguments(receiver, key, &miss);
+  assembler.Return(result);
 
-  assembler->Bind(&miss);
+  assembler.Bind(&miss);
   {
-    assembler->Comment("Miss");
-    assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
-                               key, slot, vector);
+    assembler.Comment("Miss");
+    assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
+                              key, slot, vector);
   }
 }
 
 void KeyedStoreSloppyArgumentsStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* key = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+  Node* key = assembler.Parameter(Descriptor::kName);
+  Node* value = assembler.Parameter(Descriptor::kValue);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.Parameter(Descriptor::kVector);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  Label miss(assembler);
+  Label miss(&assembler);
 
-  assembler->StoreKeyedSloppyArguments(receiver, key, value, &miss);
-  assembler->Return(value);
+  assembler.StoreKeyedSloppyArguments(receiver, key, value, &miss);
+  assembler.Return(value);
 
-  assembler->Bind(&miss);
+  assembler.Bind(&miss);
   {
-    assembler->Comment("Miss");
-    assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
-                               slot, vector, receiver, key);
+    assembler.Comment("Miss");
+    assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot,
+                              vector, receiver, key);
   }
 }
 
 void LoadScriptContextFieldStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  assembler->Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
-                     context_index(), slot_index());
+  assembler.Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
+                    context_index(), slot_index());
 
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  Node* script_context = assembler->LoadScriptContext(context, context_index());
-  Node* result = assembler->LoadFixedArrayElement(
-      script_context, assembler->IntPtrConstant(slot_index()), 0,
-      CodeStubAssembler::INTPTR_PARAMETERS);
-  assembler->Return(result);
+  Node* script_context = assembler.LoadScriptContext(context, context_index());
+  Node* result = assembler.LoadFixedArrayElement(script_context, slot_index());
+  assembler.Return(result);
 }
 
 void StoreScriptContextFieldStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  assembler->Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
-                     context_index(), slot_index());
+  assembler.Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
+                    context_index(), slot_index());
 
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* value = assembler.Parameter(Descriptor::kValue);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  Node* script_context = assembler->LoadScriptContext(context, context_index());
-  assembler->StoreFixedArrayElement(
-      script_context, assembler->IntPtrConstant(slot_index()), value,
-      UPDATE_WRITE_BARRIER, CodeStubAssembler::INTPTR_PARAMETERS);
-  assembler->Return(value);
+  Node* script_context = assembler.LoadScriptContext(context, context_index());
+  assembler.StoreFixedArrayElement(
+      script_context, assembler.IntPtrConstant(slot_index()), value);
+  assembler.Return(value);
 }
 
 void StoreInterceptorStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* name = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  assembler->TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
-                             receiver, name, value);
+  Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+  Node* name = assembler.Parameter(Descriptor::kName);
+  Node* value = assembler.Parameter(Descriptor::kValue);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.Parameter(Descriptor::kVector);
+  Node* context = assembler.Parameter(Descriptor::kContext);
+  assembler.TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
+                            value, slot, vector, receiver, name);
 }
 
 void LoadIndexedInterceptorStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
+  CodeStubAssembler assembler(state);
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* key = assembler->Parameter(Descriptor::kName);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+  Node* key = assembler.Parameter(Descriptor::kName);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.Parameter(Descriptor::kVector);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  Label if_keyispositivesmi(assembler), if_keyisinvalid(assembler);
-  assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositivesmi,
-                    &if_keyisinvalid);
-  assembler->Bind(&if_keyispositivesmi);
-  assembler->TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
-                             receiver, key);
+  Label if_keyispositivesmi(&assembler), if_keyisinvalid(&assembler);
+  assembler.Branch(assembler.TaggedIsPositiveSmi(key), &if_keyispositivesmi,
+                   &if_keyisinvalid);
+  assembler.Bind(&if_keyispositivesmi);
+  assembler.TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
+                            receiver, key);
 
-  assembler->Bind(&if_keyisinvalid);
-  assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
-                             slot, vector);
-}
-
-// static
-bool FastCloneShallowObjectStub::IsSupported(ObjectLiteral* expr) {
-  // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
-  // support copy-on-write (COW) elements for now.
-  // TODO(mvstanton): make object literals support COW elements.
-  return expr->fast_elements() && expr->has_shallow_properties() &&
-         expr->properties_count() <= kMaximumClonedProperties;
-}
-
-// static
-int FastCloneShallowObjectStub::PropertiesCount(int literal_length) {
-  // This heuristic of setting empty literals to have
-  // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
-  // runtime.
-  // TODO(verwaest): Unify this with the heuristic in the runtime.
-  return literal_length == 0
-             ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
-             : literal_length;
-}
-
-// static
-compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
-    CodeStubAssembler* assembler, compiler::CodeAssembler::Label* call_runtime,
-    compiler::Node* closure, compiler::Node* literals_index,
-    compiler::Node* properties_count) {
-  typedef compiler::Node Node;
-  typedef compiler::CodeAssembler::Label Label;
-  typedef compiler::CodeAssembler::Variable Variable;
-
-  Node* literals_array =
-      assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
-  Node* allocation_site = assembler->LoadFixedArrayElement(
-      literals_array, literals_index,
-      LiteralsArray::kFirstLiteralIndex * kPointerSize,
-      CodeStubAssembler::SMI_PARAMETERS);
-  assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
-
-  // Calculate the object and allocation size based on the properties count.
-  Node* object_size = assembler->IntPtrAdd(
-      assembler->WordShl(properties_count, kPointerSizeLog2),
-      assembler->IntPtrConstant(JSObject::kHeaderSize));
-  Node* allocation_size = object_size;
-  if (FLAG_allocation_site_pretenuring) {
-    allocation_size = assembler->IntPtrAdd(
-        object_size, assembler->IntPtrConstant(AllocationMemento::kSize));
-  }
-  Node* boilerplate = assembler->LoadObjectField(
-      allocation_site, AllocationSite::kTransitionInfoOffset);
-  Node* boilerplate_map = assembler->LoadMap(boilerplate);
-  Node* instance_size = assembler->LoadMapInstanceSize(boilerplate_map);
-  Node* size_in_words = assembler->WordShr(object_size, kPointerSizeLog2);
-  assembler->GotoUnless(assembler->Word32Equal(instance_size, size_in_words),
-                        call_runtime);
-
-  Node* copy = assembler->Allocate(allocation_size);
-
-  // Copy boilerplate elements.
-  Variable offset(assembler, MachineType::PointerRepresentation());
-  offset.Bind(assembler->IntPtrConstant(-kHeapObjectTag));
-  Node* end_offset = assembler->IntPtrAdd(object_size, offset.value());
-  Label loop_body(assembler, &offset), loop_check(assembler, &offset);
-  // We should always have an object size greater than zero.
-  assembler->Goto(&loop_body);
-  assembler->Bind(&loop_body);
-  {
-    // The Allocate above guarantees that the copy lies in new space. This
-    // allows us to skip write barriers. This is necessary since we may also be
-    // copying unboxed doubles.
-    Node* field =
-        assembler->Load(MachineType::IntPtr(), boilerplate, offset.value());
-    assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
-                                   offset.value(), field);
-    assembler->Goto(&loop_check);
-  }
-  assembler->Bind(&loop_check);
-  {
-    offset.Bind(assembler->IntPtrAdd(offset.value(),
-                                     assembler->IntPtrConstant(kPointerSize)));
-    assembler->GotoUnless(
-        assembler->IntPtrGreaterThanOrEqual(offset.value(), end_offset),
-        &loop_body);
-  }
-
-  if (FLAG_allocation_site_pretenuring) {
-    Node* memento = assembler->InnerAllocate(copy, object_size);
-    assembler->StoreObjectFieldNoWriteBarrier(
-        memento, HeapObject::kMapOffset,
-        assembler->LoadRoot(Heap::kAllocationMementoMapRootIndex));
-    assembler->StoreObjectFieldNoWriteBarrier(
-        memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
-    Node* memento_create_count = assembler->LoadObjectField(
-        allocation_site, AllocationSite::kPretenureCreateCountOffset);
-    memento_create_count = assembler->SmiAdd(
-        memento_create_count, assembler->SmiConstant(Smi::FromInt(1)));
-    assembler->StoreObjectFieldNoWriteBarrier(
-        allocation_site, AllocationSite::kPretenureCreateCountOffset,
-        memento_create_count);
-  }
-
-  // TODO(verwaest): Allocate and fill in double boxes.
-  return copy;
-}
-
-void FastCloneShallowObjectStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  typedef CodeStubAssembler::Label Label;
-  typedef compiler::Node Node;
-  Label call_runtime(assembler);
-  Node* closure = assembler->Parameter(0);
-  Node* literals_index = assembler->Parameter(1);
-
-  Node* properties_count =
-      assembler->IntPtrConstant(PropertiesCount(this->length()));
-  Node* copy = GenerateFastPath(assembler, &call_runtime, closure,
-                                literals_index, properties_count);
-  assembler->Return(copy);
-
-  assembler->Bind(&call_runtime);
-  Node* constant_properties = assembler->Parameter(2);
-  Node* flags = assembler->Parameter(3);
-  Node* context = assembler->Parameter(4);
-  assembler->TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
-                             literals_index, constant_properties, flags);
-}
-
-template<class StateType>
-void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
-  // Note: Although a no-op transition is semantically OK, it is hinting at a
-  // bug somewhere in our state transition machinery.
-  DCHECK(from != to);
-  if (!FLAG_trace_ic) return;
-  OFStream os(stdout);
-  os << "[";
-  PrintBaseName(os);
-  os << ": " << from << "=>" << to << "]" << std::endl;
+  assembler.Bind(&if_keyisinvalid);
+  assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
+                            slot, vector);
 }
 
 void CallICStub::PrintState(std::ostream& os) const {  // NOLINT
-  os << state();
+  os << convert_mode() << ", " << tail_call_mode();
 }
 
+void CallICStub::GenerateAssembly(compiler::CodeAssemblerState* state) const {
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
+
+  Node* context = assembler.Parameter(Descriptor::kContext);
+  Node* target = assembler.Parameter(Descriptor::kTarget);
+  Node* argc = assembler.Parameter(Descriptor::kActualArgumentsCount);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.Parameter(Descriptor::kVector);
+
+  // TODO(bmeurer): The slot should actually be an IntPtr, but TurboFan's
+  // SimplifiedLowering cannot deal with IntPtr machine type properly yet.
+  slot = assembler.ChangeInt32ToIntPtr(slot);
+
+  // Static checks to assert it is safe to examine the type feedback element.
+  // We don't know that we have a weak cell. We might have a private symbol
+  // or an AllocationSite, but the memory is safe to examine.
+  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
+  // FixedArray.
+  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
+  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
+  // computed, meaning that it can't appear to be a pointer. If the low bit is
+  // 0, then hash is computed, but the 0 bit prevents the field from appearing
+  // to be a pointer.
+  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
+  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
+                    WeakCell::kValueOffset &&
+                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
+
+  // Increment the call count.
+  // TODO(bmeurer): Would it be beneficial to use Int32Add on 64-bit?
+  assembler.Comment("increment call count");
+  Node* call_count =
+      assembler.LoadFixedArrayElement(vector, slot, 1 * kPointerSize);
+  Node* new_count = assembler.SmiAdd(call_count, assembler.SmiConstant(1));
+  // Count is Smi, so we don't need a write barrier.
+  assembler.StoreFixedArrayElement(vector, slot, new_count, SKIP_WRITE_BARRIER,
+                                   1 * kPointerSize);
+
+  Label call_function(&assembler), extra_checks(&assembler), call(&assembler);
+
+  // The checks. First, does function match the recorded monomorphic target?
+  Node* feedback_element = assembler.LoadFixedArrayElement(vector, slot);
+  Node* feedback_value = assembler.LoadWeakCellValueUnchecked(feedback_element);
+  Node* is_monomorphic = assembler.WordEqual(target, feedback_value);
+  assembler.GotoIfNot(is_monomorphic, &extra_checks);
+
+  // The compare above could have been a SMI/SMI comparison. Guard against
+  // this convincing us that we have a monomorphic JSFunction.
+  Node* is_smi = assembler.TaggedIsSmi(target);
+  assembler.Branch(is_smi, &extra_checks, &call_function);
+
+  assembler.Bind(&call_function);
+  {
+    // Call using CallFunction builtin.
+    Callable callable =
+        CodeFactory::CallFunction(isolate(), convert_mode(), tail_call_mode());
+    assembler.TailCallStub(callable, context, target, argc);
+  }
+
+  assembler.Bind(&extra_checks);
+  {
+    Label check_initialized(&assembler), mark_megamorphic(&assembler),
+        create_allocation_site(&assembler, Label::kDeferred),
+        create_weak_cell(&assembler, Label::kDeferred);
+
+    assembler.Comment("check if megamorphic");
+    // Check if it is a megamorphic target.
+    Node* is_megamorphic = assembler.WordEqual(
+        feedback_element,
+        assembler.HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
+    assembler.GotoIf(is_megamorphic, &call);
+
+    assembler.Comment("check if it is an allocation site");
+    assembler.GotoIfNot(
+        assembler.IsAllocationSiteMap(assembler.LoadMap(feedback_element)),
+        &check_initialized);
+
+    // If it is not the Array() function, mark megamorphic.
+    Node* context_slot = assembler.LoadContextElement(
+        assembler.LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
+    Node* is_array_function = assembler.WordEqual(context_slot, target);
+    assembler.GotoIfNot(is_array_function, &mark_megamorphic);
+
+    // Call ArrayConstructorStub.
+    Callable callable = CodeFactory::ArrayConstructor(isolate());
+    assembler.TailCallStub(callable, context, target, target, argc,
+                           feedback_element);
+
+    assembler.Bind(&check_initialized);
+    {
+      assembler.Comment("check if uninitialized");
+      // Check if it is uninitialized target first.
+      Node* is_uninitialized = assembler.WordEqual(
+          feedback_element,
+          assembler.HeapConstant(
+              FeedbackVector::UninitializedSentinel(isolate())));
+      assembler.GotoIfNot(is_uninitialized, &mark_megamorphic);
+
+      assembler.Comment("handle unitinitialized");
+      // If it is not a JSFunction mark it as megamorphic.
+      Node* is_smi = assembler.TaggedIsSmi(target);
+      assembler.GotoIf(is_smi, &mark_megamorphic);
+
+      // Check if function is an object of JSFunction type.
+      Node* is_js_function = assembler.IsJSFunction(target);
+      assembler.GotoIfNot(is_js_function, &mark_megamorphic);
+
+      // Check if it is the Array() function.
+      Node* context_slot = assembler.LoadContextElement(
+          assembler.LoadNativeContext(context), Context::ARRAY_FUNCTION_INDEX);
+      Node* is_array_function = assembler.WordEqual(context_slot, target);
+      assembler.GotoIf(is_array_function, &create_allocation_site);
+
+      // Check if the function belongs to the same native context.
+      Node* native_context = assembler.LoadNativeContext(
+          assembler.LoadObjectField(target, JSFunction::kContextOffset));
+      Node* is_same_native_context = assembler.WordEqual(
+          native_context, assembler.LoadNativeContext(context));
+      assembler.Branch(is_same_native_context, &create_weak_cell,
+                       &mark_megamorphic);
+    }
+
+    assembler.Bind(&create_weak_cell);
+    {
+      // Wrap the {target} in a WeakCell and remember it.
+      assembler.Comment("create weak cell");
+      assembler.CreateWeakCellInFeedbackVector(vector, assembler.SmiTag(slot),
+                                               target);
+
+      // Call using CallFunction builtin.
+      assembler.Goto(&call_function);
+    }
+
+    assembler.Bind(&create_allocation_site);
+    {
+      // Create an AllocationSite for the {target}.
+      assembler.Comment("create allocation site");
+      assembler.CreateAllocationSiteInFeedbackVector(vector,
+                                                     assembler.SmiTag(slot));
+
+      // Call using CallFunction builtin. CallICs have a PREMONOMORPHIC state.
+      // They start collecting feedback only when a call is executed the second
+      // time. So, do not pass any feedback here.
+      assembler.Goto(&call_function);
+    }
+
+    assembler.Bind(&mark_megamorphic);
+    {
+      // Mark it as a megamorphic.
+      // MegamorphicSentinel is created as a part of Heap::InitialObjects
+      // and will not move during a GC. So it is safe to skip write barrier.
+      DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
+      assembler.StoreFixedArrayElement(
+          vector, slot, assembler.HeapConstant(
+                            FeedbackVector::MegamorphicSentinel(isolate())),
+          SKIP_WRITE_BARRIER);
+      assembler.Goto(&call);
+    }
+  }
+
+  assembler.Bind(&call);
+  {
+    // Call using call builtin.
+    assembler.Comment("call using Call builtin");
+    Callable callable_call =
+        CodeFactory::Call(isolate(), convert_mode(), tail_call_mode());
+    assembler.TailCallStub(callable_call, context, target, argc);
+  }
+}
+
+void CallICTrampolineStub::PrintState(std::ostream& os) const {  // NOLINT
+  os << convert_mode() << ", " << tail_call_mode();
+}
+
+void CallICTrampolineStub::GenerateAssembly(
+    compiler::CodeAssemblerState* state) const {
+  typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
+
+  Node* context = assembler.Parameter(Descriptor::kContext);
+  Node* target = assembler.Parameter(Descriptor::kTarget);
+  Node* argc = assembler.Parameter(Descriptor::kActualArgumentsCount);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.LoadFeedbackVectorForStub();
+
+  Callable callable =
+      CodeFactory::CallIC(isolate(), convert_mode(), tail_call_mode());
+  assembler.TailCallStub(callable, context, target, argc, slot, vector);
+}
 
 void JSEntryStub::FinishCode(Handle<Code> code) {
   Handle<FixedArray> handler_table =
@@ -2361,31 +1909,6 @@
   code->set_handler_table(*handler_table);
 }
 
-
-void LoadDictionaryElementStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  descriptor->Initialize(
-      FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
-}
-
-void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
-  if (kind() == Code::KEYED_LOAD_IC) {
-    descriptor->Initialize(
-        FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
-  }
-}
-
-
-CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
-  if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
-    return LoadWithVectorDescriptor(isolate());
-  } else {
-    DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
-    return StoreWithVectorDescriptor(isolate());
-  }
-}
-
 void TransitionElementsKindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
   descriptor->Initialize(
@@ -2400,15 +1923,6 @@
 }
 
 
-#define SIMD128_INIT_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Stub::InitializeDescriptor(                 \
-      CodeStubDescriptor* descriptor) {                            \
-    descriptor->Initialize(                                        \
-        Runtime::FunctionForId(Runtime::kCreate##Type)->entry);    \
-  }
-SIMD128_TYPES(SIMD128_INIT_DESC)
-#undef SIMD128_INIT_DESC
-
 void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
   descriptor->SetMissHandler(Runtime::kToBooleanIC_Miss);
@@ -2427,528 +1941,67 @@
       FUNCTION_ADDR(Runtime_BinaryOpIC_MissWithAllocationSite));
 }
 
-void GetPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void GetPropertyStub::GenerateAssembly(
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
   typedef CodeStubAssembler::Label Label;
   typedef CodeStubAssembler::Variable Variable;
+  CodeStubAssembler assembler(state);
 
-  Label call_runtime(assembler, Label::kDeferred), return_undefined(assembler),
-      end(assembler);
+  Label call_runtime(&assembler, Label::kDeferred),
+      return_undefined(&assembler), end(&assembler);
 
-  Node* object = assembler->Parameter(0);
-  Node* key = assembler->Parameter(1);
-  Node* context = assembler->Parameter(2);
-  Variable var_result(assembler, MachineRepresentation::kTagged);
+  Node* object = assembler.Parameter(0);
+  Node* key = assembler.Parameter(1);
+  Node* context = assembler.Parameter(2);
+  Variable var_result(&assembler, MachineRepresentation::kTagged);
 
   CodeStubAssembler::LookupInHolder lookup_property_in_holder =
-      [assembler, context, &var_result, &end](
+      [&assembler, context, &var_result, &end](
           Node* receiver, Node* holder, Node* holder_map,
           Node* holder_instance_type, Node* unique_name, Label* next_holder,
           Label* if_bailout) {
-        Variable var_value(assembler, MachineRepresentation::kTagged);
-        Label if_found(assembler);
-        assembler->TryGetOwnProperty(
+        Variable var_value(&assembler, MachineRepresentation::kTagged);
+        Label if_found(&assembler);
+        assembler.TryGetOwnProperty(
             context, receiver, holder, holder_map, holder_instance_type,
             unique_name, &if_found, &var_value, next_holder, if_bailout);
-        assembler->Bind(&if_found);
+        assembler.Bind(&if_found);
         {
           var_result.Bind(var_value.value());
-          assembler->Goto(&end);
+          assembler.Goto(&end);
         }
       };
 
   CodeStubAssembler::LookupInHolder lookup_element_in_holder =
-      [assembler, context, &var_result, &end](
+      [&assembler](
           Node* receiver, Node* holder, Node* holder_map,
           Node* holder_instance_type, Node* index, Label* next_holder,
           Label* if_bailout) {
         // Not supported yet.
-        assembler->Use(next_holder);
-        assembler->Goto(if_bailout);
+        assembler.Use(next_holder);
+        assembler.Goto(if_bailout);
       };
 
-  assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
-                                     lookup_element_in_holder,
-                                     &return_undefined, &call_runtime);
+  assembler.TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+                                    lookup_element_in_holder, &return_undefined,
+                                    &call_runtime);
 
-  assembler->Bind(&return_undefined);
+  assembler.Bind(&return_undefined);
   {
-    var_result.Bind(assembler->UndefinedConstant());
-    assembler->Goto(&end);
+    var_result.Bind(assembler.UndefinedConstant());
+    assembler.Goto(&end);
   }
 
-  assembler->Bind(&call_runtime);
+  assembler.Bind(&call_runtime);
   {
     var_result.Bind(
-        assembler->CallRuntime(Runtime::kGetProperty, context, object, key));
-    assembler->Goto(&end);
+        assembler.CallRuntime(Runtime::kGetProperty, context, object, key));
+    assembler.Goto(&end);
   }
 
-  assembler->Bind(&end);
-  assembler->Return(var_result.value());
-}
-
-// static
-compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
-                                             compiler::Node* shared_info,
-                                             compiler::Node* context) {
-  typedef compiler::Node Node;
-  typedef compiler::CodeAssembler::Label Label;
-  typedef compiler::CodeAssembler::Variable Variable;
-
-  Isolate* isolate = assembler->isolate();
-  Factory* factory = assembler->isolate()->factory();
-  assembler->IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
-
-  // Create a new closure from the given function info in new space
-  Node* result = assembler->Allocate(JSFunction::kSize);
-
-  // Calculate the index of the map we should install on the function based on
-  // the FunctionKind and LanguageMode of the function.
-  // Note: Must be kept in sync with Context::FunctionMapIndex
-  Node* compiler_hints = assembler->LoadObjectField(
-      shared_info, SharedFunctionInfo::kCompilerHintsOffset,
-      MachineType::Uint32());
-  Node* is_strict = assembler->Word32And(
-      compiler_hints,
-      assembler->Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
-
-  Label if_normal(assembler), if_generator(assembler), if_async(assembler),
-      if_class_constructor(assembler), if_function_without_prototype(assembler),
-      load_map(assembler);
-  Variable map_index(assembler, MachineType::PointerRepresentation());
-
-  STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
-  Node* is_not_normal = assembler->Word32And(
-      compiler_hints,
-      assembler->Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
-  assembler->GotoUnless(is_not_normal, &if_normal);
-
-  Node* is_generator = assembler->Word32And(
-      compiler_hints,
-      assembler->Int32Constant(FunctionKind::kGeneratorFunction
-                               << SharedFunctionInfo::kFunctionKindShift));
-  assembler->GotoIf(is_generator, &if_generator);
-
-  Node* is_async = assembler->Word32And(
-      compiler_hints,
-      assembler->Int32Constant(FunctionKind::kAsyncFunction
-                               << SharedFunctionInfo::kFunctionKindShift));
-  assembler->GotoIf(is_async, &if_async);
-
-  Node* is_class_constructor = assembler->Word32And(
-      compiler_hints,
-      assembler->Int32Constant(FunctionKind::kClassConstructor
-                               << SharedFunctionInfo::kFunctionKindShift));
-  assembler->GotoIf(is_class_constructor, &if_class_constructor);
-
-  if (FLAG_debug_code) {
-    // Function must be a function without a prototype.
-    CSA_ASSERT(assembler, assembler->Word32And(
-                              compiler_hints,
-                              assembler->Int32Constant(
-                                  (FunctionKind::kAccessorFunction |
-                                   FunctionKind::kArrowFunction |
-                                   FunctionKind::kConciseMethod)
-                                  << SharedFunctionInfo::kFunctionKindShift)));
-  }
-  assembler->Goto(&if_function_without_prototype);
-
-  assembler->Bind(&if_normal);
-  {
-    map_index.Bind(assembler->Select(
-        is_strict,
-        assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX),
-        assembler->IntPtrConstant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
-    assembler->Goto(&load_map);
-  }
-
-  assembler->Bind(&if_generator);
-  {
-    map_index.Bind(assembler->Select(
-        is_strict,
-        assembler->IntPtrConstant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
-        assembler->IntPtrConstant(
-            Context::SLOPPY_GENERATOR_FUNCTION_MAP_INDEX)));
-    assembler->Goto(&load_map);
-  }
-
-  assembler->Bind(&if_async);
-  {
-    map_index.Bind(assembler->Select(
-        is_strict,
-        assembler->IntPtrConstant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
-        assembler->IntPtrConstant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
-    assembler->Goto(&load_map);
-  }
-
-  assembler->Bind(&if_class_constructor);
-  {
-    map_index.Bind(
-        assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX));
-    assembler->Goto(&load_map);
-  }
-
-  assembler->Bind(&if_function_without_prototype);
-  {
-    map_index.Bind(assembler->IntPtrConstant(
-        Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
-    assembler->Goto(&load_map);
-  }
-
-  assembler->Bind(&load_map);
-
-  // Get the function map in the current native context and set that
-  // as the map of the allocated object.
-  Node* native_context = assembler->LoadNativeContext(context);
-  Node* map_slot_value =
-      assembler->LoadFixedArrayElement(native_context, map_index.value(), 0,
-                                       CodeStubAssembler::INTPTR_PARAMETERS);
-  assembler->StoreMapNoWriteBarrier(result, map_slot_value);
-
-  // Initialize the rest of the function.
-  Node* empty_fixed_array =
-      assembler->HeapConstant(factory->empty_fixed_array());
-  Node* empty_literals_array =
-      assembler->HeapConstant(factory->empty_literals_array());
-  assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
-                                            empty_fixed_array);
-  assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
-                                            empty_fixed_array);
-  assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
-                                            empty_literals_array);
-  assembler->StoreObjectFieldNoWriteBarrier(
-      result, JSFunction::kPrototypeOrInitialMapOffset,
-      assembler->TheHoleConstant());
-  assembler->StoreObjectFieldNoWriteBarrier(
-      result, JSFunction::kSharedFunctionInfoOffset, shared_info);
-  assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset,
-                                            context);
-  Handle<Code> lazy_builtin_handle(
-      assembler->isolate()->builtins()->builtin(Builtins::kCompileLazy));
-  Node* lazy_builtin = assembler->HeapConstant(lazy_builtin_handle);
-  Node* lazy_builtin_entry = assembler->IntPtrAdd(
-      lazy_builtin,
-      assembler->IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
-  assembler->StoreObjectFieldNoWriteBarrier(
-      result, JSFunction::kCodeEntryOffset, lazy_builtin_entry);
-  assembler->StoreObjectFieldNoWriteBarrier(result,
-                                            JSFunction::kNextFunctionLinkOffset,
-                                            assembler->UndefinedConstant());
-
-  return result;
-}
-
-void FastNewClosureStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  assembler->Return(
-      Generate(assembler, assembler->Parameter(0), assembler->Parameter(1)));
-}
-
-// static
-compiler::Node* FastNewFunctionContextStub::Generate(
-    CodeStubAssembler* assembler, compiler::Node* function,
-    compiler::Node* slots, compiler::Node* context) {
-  typedef compiler::Node Node;
-
-  Node* min_context_slots =
-      assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS);
-  Node* length = assembler->Int32Add(slots, min_context_slots);
-  Node* size = assembler->Int32Add(
-      assembler->Word32Shl(length, assembler->Int32Constant(kPointerSizeLog2)),
-      assembler->Int32Constant(FixedArray::kHeaderSize));
-
-  // Create a new closure from the given function info in new space
-  Node* function_context = assembler->Allocate(size);
-
-  Isolate* isolate = assembler->isolate();
-  assembler->StoreMapNoWriteBarrier(
-      function_context,
-      assembler->HeapConstant(isolate->factory()->function_context_map()));
-  assembler->StoreObjectFieldNoWriteBarrier(function_context,
-                                            Context::kLengthOffset,
-                                            assembler->SmiFromWord32(length));
-
-  // Set up the fixed slots.
-  assembler->StoreFixedArrayElement(
-      function_context, assembler->Int32Constant(Context::CLOSURE_INDEX),
-      function, SKIP_WRITE_BARRIER);
-  assembler->StoreFixedArrayElement(
-      function_context, assembler->Int32Constant(Context::PREVIOUS_INDEX),
-      context, SKIP_WRITE_BARRIER);
-  assembler->StoreFixedArrayElement(
-      function_context, assembler->Int32Constant(Context::EXTENSION_INDEX),
-      assembler->TheHoleConstant(), SKIP_WRITE_BARRIER);
-
-  // Copy the native context from the previous context.
-  Node* native_context = assembler->LoadNativeContext(context);
-  assembler->StoreFixedArrayElement(
-      function_context, assembler->Int32Constant(Context::NATIVE_CONTEXT_INDEX),
-      native_context, SKIP_WRITE_BARRIER);
-
-  // Initialize the rest of the slots to undefined.
-  Node* undefined = assembler->UndefinedConstant();
-  assembler->BuildFastFixedArrayForEach(
-      function_context, FAST_ELEMENTS, min_context_slots, length,
-      [undefined](CodeStubAssembler* assembler, Node* context, Node* offset) {
-        assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(),
-                                       context, offset, undefined);
-      });
-
-  return function_context;
-}
-
-void FastNewFunctionContextStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-  Node* function = assembler->Parameter(Descriptor::kFunction);
-  Node* slots = assembler->Parameter(FastNewFunctionContextDescriptor::kSlots);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  assembler->Return(Generate(assembler, function, slots, context));
-}
-
-// static
-compiler::Node* FastCloneRegExpStub::Generate(CodeStubAssembler* assembler,
-                                              compiler::Node* closure,
-                                              compiler::Node* literal_index,
-                                              compiler::Node* pattern,
-                                              compiler::Node* flags,
-                                              compiler::Node* context) {
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-  typedef compiler::Node Node;
-
-  Label call_runtime(assembler, Label::kDeferred), end(assembler);
-
-  Variable result(assembler, MachineRepresentation::kTagged);
-
-  Node* literals_array =
-      assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
-  Node* boilerplate = assembler->LoadFixedArrayElement(
-      literals_array, literal_index,
-      LiteralsArray::kFirstLiteralIndex * kPointerSize,
-      CodeStubAssembler::SMI_PARAMETERS);
-  assembler->GotoIf(assembler->IsUndefined(boilerplate), &call_runtime);
-
-  {
-    int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
-    Node* copy = assembler->Allocate(size);
-    for (int offset = 0; offset < size; offset += kPointerSize) {
-      Node* value = assembler->LoadObjectField(boilerplate, offset);
-      assembler->StoreObjectFieldNoWriteBarrier(copy, offset, value);
-    }
-    result.Bind(copy);
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&call_runtime);
-  {
-    result.Bind(assembler->CallRuntime(Runtime::kCreateRegExpLiteral, context,
-                                       closure, literal_index, pattern, flags));
-    assembler->Goto(&end);
-  }
-
-  assembler->Bind(&end);
-  return result.value();
-}
-
-void FastCloneRegExpStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-  Node* closure = assembler->Parameter(Descriptor::kClosure);
-  Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
-  Node* pattern = assembler->Parameter(Descriptor::kPattern);
-  Node* flags = assembler->Parameter(Descriptor::kFlags);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-
-  assembler->Return(
-      Generate(assembler, closure, literal_index, pattern, flags, context));
-}
-
-namespace {
-
-compiler::Node* NonEmptyShallowClone(CodeStubAssembler* assembler,
-                                     compiler::Node* boilerplate,
-                                     compiler::Node* boilerplate_map,
-                                     compiler::Node* boilerplate_elements,
-                                     compiler::Node* allocation_site,
-                                     compiler::Node* capacity,
-                                     ElementsKind kind) {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::ParameterMode ParameterMode;
-
-  ParameterMode param_mode = CodeStubAssembler::SMI_PARAMETERS;
-
-  Node* length = assembler->LoadJSArrayLength(boilerplate);
-
-  if (assembler->Is64()) {
-    capacity = assembler->SmiUntag(capacity);
-    param_mode = CodeStubAssembler::INTEGER_PARAMETERS;
-  }
-
-  Node *array, *elements;
-  std::tie(array, elements) =
-      assembler->AllocateUninitializedJSArrayWithElements(
-          kind, boilerplate_map, length, allocation_site, capacity, param_mode);
-
-  assembler->Comment("copy elements header");
-  for (int offset = 0; offset < FixedArrayBase::kHeaderSize;
-       offset += kPointerSize) {
-    Node* value = assembler->LoadObjectField(boilerplate_elements, offset);
-    assembler->StoreObjectField(elements, offset, value);
-  }
-
-  if (assembler->Is64()) {
-    length = assembler->SmiUntag(length);
-  }
-
-  assembler->Comment("copy boilerplate elements");
-  assembler->CopyFixedArrayElements(kind, boilerplate_elements, elements,
-                                    length, SKIP_WRITE_BARRIER, param_mode);
-  assembler->IncrementCounter(
-      assembler->isolate()->counters()->inlined_copied_elements(), 1);
-
-  return array;
-}
-
-}  // namespace
-
-// static
-compiler::Node* FastCloneShallowArrayStub::Generate(
-    CodeStubAssembler* assembler, compiler::Node* closure,
-    compiler::Node* literal_index, compiler::Node* context,
-    CodeStubAssembler::Label* call_runtime,
-    AllocationSiteMode allocation_site_mode) {
-  typedef CodeStubAssembler::Label Label;
-  typedef CodeStubAssembler::Variable Variable;
-  typedef compiler::Node Node;
-
-  Label zero_capacity(assembler), cow_elements(assembler),
-      fast_elements(assembler), return_result(assembler);
-  Variable result(assembler, MachineRepresentation::kTagged);
-
-  Node* literals_array =
-      assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
-  Node* allocation_site = assembler->LoadFixedArrayElement(
-      literals_array, literal_index,
-      LiteralsArray::kFirstLiteralIndex * kPointerSize,
-      CodeStubAssembler::SMI_PARAMETERS);
-
-  assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
-  allocation_site = assembler->LoadFixedArrayElement(
-      literals_array, literal_index,
-      LiteralsArray::kFirstLiteralIndex * kPointerSize,
-      CodeStubAssembler::SMI_PARAMETERS);
-
-  Node* boilerplate = assembler->LoadObjectField(
-      allocation_site, AllocationSite::kTransitionInfoOffset);
-  Node* boilerplate_map = assembler->LoadMap(boilerplate);
-  Node* boilerplate_elements = assembler->LoadElements(boilerplate);
-  Node* capacity = assembler->LoadFixedArrayBaseLength(boilerplate_elements);
-  allocation_site =
-      allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
-
-  Node* zero = assembler->SmiConstant(Smi::kZero);
-  assembler->GotoIf(assembler->SmiEqual(capacity, zero), &zero_capacity);
-
-  Node* elements_map = assembler->LoadMap(boilerplate_elements);
-  assembler->GotoIf(assembler->IsFixedCOWArrayMap(elements_map), &cow_elements);
-
-  assembler->GotoIf(assembler->IsFixedArrayMap(elements_map), &fast_elements);
-  {
-    assembler->Comment("fast double elements path");
-    if (FLAG_debug_code) {
-      Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
-      assembler->Branch(assembler->IsFixedDoubleArrayMap(elements_map),
-                        &correct_elements_map, &abort);
-
-      assembler->Bind(&abort);
-      {
-        Node* abort_id = assembler->SmiConstant(
-            Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
-        assembler->CallRuntime(Runtime::kAbort, context, abort_id);
-        result.Bind(assembler->UndefinedConstant());
-        assembler->Goto(&return_result);
-      }
-      assembler->Bind(&correct_elements_map);
-    }
-
-    Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
-                                       boilerplate_elements, allocation_site,
-                                       capacity, FAST_DOUBLE_ELEMENTS);
-    result.Bind(array);
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&fast_elements);
-  {
-    assembler->Comment("fast elements path");
-    Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
-                                       boilerplate_elements, allocation_site,
-                                       capacity, FAST_ELEMENTS);
-    result.Bind(array);
-    assembler->Goto(&return_result);
-  }
-
-  Variable length(assembler, MachineRepresentation::kTagged),
-      elements(assembler, MachineRepresentation::kTagged);
-  Label allocate_without_elements(assembler);
-
-  assembler->Bind(&cow_elements);
-  {
-    assembler->Comment("fixed cow path");
-    length.Bind(assembler->LoadJSArrayLength(boilerplate));
-    elements.Bind(boilerplate_elements);
-
-    assembler->Goto(&allocate_without_elements);
-  }
-
-  assembler->Bind(&zero_capacity);
-  {
-    assembler->Comment("zero capacity path");
-    length.Bind(zero);
-    elements.Bind(assembler->LoadRoot(Heap::kEmptyFixedArrayRootIndex));
-
-    assembler->Goto(&allocate_without_elements);
-  }
-
-  assembler->Bind(&allocate_without_elements);
-  {
-    Node* array = assembler->AllocateUninitializedJSArrayWithoutElements(
-        FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
-    assembler->StoreObjectField(array, JSObject::kElementsOffset,
-                                elements.value());
-    result.Bind(array);
-    assembler->Goto(&return_result);
-  }
-
-  assembler->Bind(&return_result);
-  return result.value();
-}
-
-void FastCloneShallowArrayStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  typedef compiler::Node Node;
-  typedef CodeStubAssembler::Label Label;
-  Node* closure = assembler->Parameter(Descriptor::kClosure);
-  Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
-  Node* constant_elements = assembler->Parameter(Descriptor::kConstantElements);
-  Node* context = assembler->Parameter(Descriptor::kContext);
-  Label call_runtime(assembler, Label::kDeferred);
-  assembler->Return(Generate(assembler, closure, literal_index, context,
-                             &call_runtime, allocation_site_mode()));
-
-  assembler->Bind(&call_runtime);
-  {
-    assembler->Comment("call runtime");
-    Node* flags = assembler->SmiConstant(
-        Smi::FromInt(ArrayLiteral::kShallowElements |
-                     (allocation_site_mode() == TRACK_ALLOCATION_SITE
-                          ? 0
-                          : ArrayLiteral::kDisableMementos)));
-    assembler->Return(assembler->CallRuntime(Runtime::kCreateArrayLiteral,
-                                             context, closure, literal_index,
-                                             constant_elements, flags));
-  }
+  assembler.Bind(&end);
+  assembler.Return(var_result.value());
 }
 
 void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -2962,39 +2015,50 @@
   stub.GetCode();
 }
 
+void StoreSlowElementStub::GenerateAssembly(
+    compiler::CodeAssemblerState* state) const {
+  typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-void StoreElementStub::Generate(MacroAssembler* masm) {
-  DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind());
-  KeyedStoreIC::GenerateSlow(masm);
+  Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+  Node* name = assembler.Parameter(Descriptor::kName);
+  Node* value = assembler.Parameter(Descriptor::kValue);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.Parameter(Descriptor::kVector);
+  Node* context = assembler.Parameter(Descriptor::kContext);
+
+  assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Slow, context, value, slot,
+                            vector, receiver, name);
 }
 
 void StoreFastElementStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef CodeStubAssembler::Label Label;
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
 
-  assembler->Comment(
+  assembler.Comment(
       "StoreFastElementStub: js_array=%d, elements_kind=%s, store_mode=%d",
       is_js_array(), ElementsKindToString(elements_kind()), store_mode());
 
-  Node* receiver = assembler->Parameter(Descriptor::kReceiver);
-  Node* key = assembler->Parameter(Descriptor::kName);
-  Node* value = assembler->Parameter(Descriptor::kValue);
-  Node* slot = assembler->Parameter(Descriptor::kSlot);
-  Node* vector = assembler->Parameter(Descriptor::kVector);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+  Node* key = assembler.Parameter(Descriptor::kName);
+  Node* value = assembler.Parameter(Descriptor::kValue);
+  Node* slot = assembler.Parameter(Descriptor::kSlot);
+  Node* vector = assembler.Parameter(Descriptor::kVector);
+  Node* context = assembler.Parameter(Descriptor::kContext);
 
-  Label miss(assembler);
+  Label miss(&assembler);
 
-  assembler->EmitElementStore(receiver, key, value, is_js_array(),
-                              elements_kind(), store_mode(), &miss);
-  assembler->Return(value);
+  assembler.EmitElementStore(receiver, key, value, is_js_array(),
+                             elements_kind(), store_mode(), &miss);
+  assembler.Return(value);
 
-  assembler->Bind(&miss);
+  assembler.Bind(&miss);
   {
-    assembler->Comment("Miss");
-    assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
-                               slot, vector, receiver, key);
+    assembler.Comment("Miss");
+    assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot,
+                              vector, receiver, key);
   }
 }
 
@@ -3044,15 +2108,12 @@
     new_hints |= ToBooleanHint::kHeapNumber;
     double value = HeapNumber::cast(*object)->value();
     to_boolean_value = value != 0 && !std::isnan(value);
-  } else if (object->IsSimd128Value()) {
-    new_hints |= ToBooleanHint::kSimdValue;
-    to_boolean_value = true;
   } else {
     // We should never see an internal object at runtime here!
     UNREACHABLE();
     to_boolean_value = true;
   }
-  TraceTransition(old_hints, new_hints);
+
   set_sub_minor_key(HintsBits::update(sub_minor_key(), new_hints));
   return to_boolean_value;
 }
@@ -3078,50 +2139,55 @@
 }
 
 void CreateAllocationSiteStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
-  assembler->Return(assembler->CreateAllocationSiteInFeedbackVector(
-      assembler->Parameter(Descriptor::kVector),
-      assembler->Parameter(Descriptor::kSlot)));
+    compiler::CodeAssemblerState* state) const {
+  CodeStubAssembler assembler(state);
+  assembler.Return(assembler.CreateAllocationSiteInFeedbackVector(
+      assembler.Parameter(Descriptor::kVector),
+      assembler.Parameter(Descriptor::kSlot)));
 }
 
-void CreateWeakCellStub::GenerateAssembly(CodeStubAssembler* assembler) const {
-  assembler->Return(assembler->CreateWeakCellInFeedbackVector(
-      assembler->Parameter(Descriptor::kVector),
-      assembler->Parameter(Descriptor::kSlot),
-      assembler->Parameter(Descriptor::kValue)));
+void CreateWeakCellStub::GenerateAssembly(
+    compiler::CodeAssemblerState* state) const {
+  CodeStubAssembler assembler(state);
+  assembler.Return(assembler.CreateWeakCellInFeedbackVector(
+      assembler.Parameter(Descriptor::kVector),
+      assembler.Parameter(Descriptor::kSlot),
+      assembler.Parameter(Descriptor::kValue)));
 }
 
 void ArrayNoArgumentConstructorStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
-  Node* native_context = assembler->LoadObjectField(
-      assembler->Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
+  CodeStubAssembler assembler(state);
+  Node* native_context = assembler.LoadObjectField(
+      assembler.Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
   bool track_allocation_site =
       AllocationSite::GetMode(elements_kind()) == TRACK_ALLOCATION_SITE &&
       override_mode() != DISABLE_ALLOCATION_SITES;
-  Node* allocation_site =
-      track_allocation_site ? assembler->Parameter(Descriptor::kAllocationSite)
-                            : nullptr;
+  Node* allocation_site = track_allocation_site
+                              ? assembler.Parameter(Descriptor::kAllocationSite)
+                              : nullptr;
   Node* array_map =
-      assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
-  Node* array = assembler->AllocateJSArray(
+      assembler.LoadJSArrayElementsMap(elements_kind(), native_context);
+  Node* array = assembler.AllocateJSArray(
       elements_kind(), array_map,
-      assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
-      assembler->SmiConstant(Smi::kZero), allocation_site);
-  assembler->Return(array);
+      assembler.IntPtrConstant(JSArray::kPreallocatedArrayElements),
+      assembler.SmiConstant(Smi::kZero), allocation_site);
+  assembler.Return(array);
 }
 
 void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
+  CodeStubAssembler assembler(state);
   Node* array_map =
-      assembler->LoadObjectField(assembler->Parameter(Descriptor::kFunction),
-                                 JSFunction::kPrototypeOrInitialMapOffset);
-  Node* array = assembler->AllocateJSArray(
+      assembler.LoadObjectField(assembler.Parameter(Descriptor::kFunction),
+                                JSFunction::kPrototypeOrInitialMapOffset);
+  Node* array = assembler.AllocateJSArray(
       elements_kind(), array_map,
-      assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
-      assembler->SmiConstant(Smi::kZero), nullptr);
-  assembler->Return(array);
+      assembler.IntPtrConstant(JSArray::kPreallocatedArrayElements),
+      assembler.SmiConstant(Smi::kZero));
+  assembler.Return(array);
 }
 
 namespace {
@@ -3191,49 +2257,52 @@
 }  // namespace
 
 void ArraySingleArgumentConstructorStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
-  Node* function = assembler->Parameter(Descriptor::kFunction);
+  CodeStubAssembler assembler(state);
+  Node* function = assembler.Parameter(Descriptor::kFunction);
   Node* native_context =
-      assembler->LoadObjectField(function, JSFunction::kContextOffset);
+      assembler.LoadObjectField(function, JSFunction::kContextOffset);
   Node* array_map =
-      assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
+      assembler.LoadJSArrayElementsMap(elements_kind(), native_context);
   AllocationSiteMode mode = override_mode() == DISABLE_ALLOCATION_SITES
                                 ? DONT_TRACK_ALLOCATION_SITE
                                 : AllocationSite::GetMode(elements_kind());
-  Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
-  SingleArgumentConstructorCommon<Descriptor>(assembler, elements_kind(),
+  Node* allocation_site = assembler.Parameter(Descriptor::kAllocationSite);
+  SingleArgumentConstructorCommon<Descriptor>(&assembler, elements_kind(),
                                               array_map, allocation_site, mode);
 }
 
 void InternalArraySingleArgumentConstructorStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
-  Node* function = assembler->Parameter(Descriptor::kFunction);
-  Node* array_map = assembler->LoadObjectField(
+  CodeStubAssembler assembler(state);
+  Node* function = assembler.Parameter(Descriptor::kFunction);
+  Node* array_map = assembler.LoadObjectField(
       function, JSFunction::kPrototypeOrInitialMapOffset);
   SingleArgumentConstructorCommon<Descriptor>(
-      assembler, elements_kind(), array_map, assembler->UndefinedConstant(),
+      &assembler, elements_kind(), array_map, assembler.UndefinedConstant(),
       DONT_TRACK_ALLOCATION_SITE);
 }
 
 void GrowArrayElementsStub::GenerateAssembly(
-    CodeStubAssembler* assembler) const {
+    compiler::CodeAssemblerState* state) const {
   typedef compiler::Node Node;
-  CodeStubAssembler::Label runtime(assembler,
+  CodeStubAssembler assembler(state);
+  CodeStubAssembler::Label runtime(&assembler,
                                    CodeStubAssembler::Label::kDeferred);
 
-  Node* object = assembler->Parameter(Descriptor::kObject);
-  Node* key = assembler->Parameter(Descriptor::kKey);
-  Node* context = assembler->Parameter(Descriptor::kContext);
+  Node* object = assembler.Parameter(Descriptor::kObject);
+  Node* key = assembler.Parameter(Descriptor::kKey);
+  Node* context = assembler.Parameter(Descriptor::kContext);
   ElementsKind kind = elements_kind();
 
-  Node* elements = assembler->LoadElements(object);
+  Node* elements = assembler.LoadElements(object);
   Node* new_elements =
-      assembler->TryGrowElementsCapacity(object, elements, kind, key, &runtime);
-  assembler->Return(new_elements);
+      assembler.TryGrowElementsCapacity(object, elements, kind, key, &runtime);
+  assembler.Return(new_elements);
 
-  assembler->Bind(&runtime);
+  assembler.Bind(&runtime);
   // TODO(danno): Make this a tail call when the stub is only used from TurboFan
   // code. This musn't be a tail call for now, since the caller site in lithium
   // creates a safepoint. This safepoint musn't have a different number of
@@ -3243,8 +2312,8 @@
   // tail call pushing the arguments on the stack for the runtime call). By not
   // tail-calling, the runtime call case also has zero arguments on the stack
   // for the stub frame.
-  assembler->Return(assembler->CallRuntime(Runtime::kGrowArrayElements, context,
-                                           object, key));
+  assembler.Return(
+      assembler.CallRuntime(Runtime::kGrowArrayElements, context, object, key));
 }
 
 ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 450d0c1..fca830c 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -7,8 +7,9 @@
 
 #include "src/allocation.h"
 #include "src/assembler.h"
-#include "src/code-stub-assembler.h"
 #include "src/codegen.h"
+#include "src/factory.h"
+#include "src/find-and-replace-pattern.h"
 #include "src/globals.h"
 #include "src/ic/ic-state.h"
 #include "src/interface-descriptors.h"
@@ -19,7 +20,13 @@
 namespace v8 {
 namespace internal {
 
-class ObjectLiteral;
+// Forward declarations.
+class CodeStubAssembler;
+namespace compiler {
+class CodeAssemblerLabel;
+class CodeAssemblerState;
+class Node;
+}
 
 // List of code stubs used on all platforms.
 #define CODE_STUB_LIST_ALL_PLATFORMS(V)       \
@@ -33,23 +40,15 @@
   V(CEntry)                                   \
   V(CompareIC)                                \
   V(DoubleToI)                                \
-  V(FunctionPrototype)                        \
   V(InternalArrayConstructor)                 \
   V(JSEntry)                                  \
-  V(LoadIndexedString)                        \
   V(MathPow)                                  \
   V(ProfileEntryHook)                         \
   V(RecordWrite)                              \
   V(RegExpExec)                               \
   V(StoreBufferOverflow)                      \
-  V(StoreElement)                             \
+  V(StoreSlowElement)                         \
   V(SubString)                                \
-  V(KeyedStoreIC)                             \
-  V(LoadGlobalIC)                             \
-  V(FastNewObject)                            \
-  V(FastNewRestParameter)                     \
-  V(FastNewSloppyArguments)                   \
-  V(FastNewStrictArguments)                   \
   V(NameDictionaryLookup)                     \
   /* This can be removed once there are no */ \
   /* more deopting Hydrogen stubs. */         \
@@ -59,20 +58,7 @@
   /* version of the corresponding stub is  */ \
   /* used universally */                      \
   V(CallICTrampoline)                         \
-  V(KeyedStoreICTrampoline)                   \
   /* --- HydrogenCodeStubs --- */             \
-  /* These builtins w/ JS linkage are */      \
-  /* just fast-cases of C++ builtins. They */ \
-  /* require varg support from TF */          \
-  V(FastArrayPush)                            \
-  V(FastFunctionBind)                         \
-  /* These will be ported/eliminated */       \
-  /* as part of the new IC system, ask */     \
-  /* ishell before doing anything  */         \
-  V(LoadConstant)                             \
-  V(LoadDictionaryElement)                    \
-  V(LoadFastElement)                          \
-  V(LoadField)                                \
   /* These should never be ported to TF */    \
   /* because they are either used only by */  \
   /* FCG/Crankshaft or are deprecated */      \
@@ -82,16 +68,6 @@
   V(TransitionElementsKind)                   \
   /* --- TurboFanCodeStubs --- */             \
   V(AllocateHeapNumber)                       \
-  V(AllocateFloat32x4)                        \
-  V(AllocateInt32x4)                          \
-  V(AllocateUint32x4)                         \
-  V(AllocateBool32x4)                         \
-  V(AllocateInt16x8)                          \
-  V(AllocateUint16x8)                         \
-  V(AllocateBool16x8)                         \
-  V(AllocateInt8x16)                          \
-  V(AllocateUint8x16)                         \
-  V(AllocateBool8x16)                         \
   V(ArrayNoArgumentConstructor)               \
   V(ArraySingleArgumentConstructor)           \
   V(ArrayNArgumentsConstructor)               \
@@ -103,16 +79,9 @@
   V(MultiplyWithFeedback)                     \
   V(DivideWithFeedback)                       \
   V(ModulusWithFeedback)                      \
-  V(Inc)                                      \
   V(InternalArrayNoArgumentConstructor)       \
   V(InternalArraySingleArgumentConstructor)   \
-  V(Dec)                                      \
   V(ElementsTransitionAndStore)               \
-  V(FastCloneRegExp)                          \
-  V(FastCloneShallowArray)                    \
-  V(FastCloneShallowObject)                   \
-  V(FastNewClosure)                           \
-  V(FastNewFunctionContext)                   \
   V(KeyedLoadSloppyArguments)                 \
   V(KeyedStoreSloppyArguments)                \
   V(LoadScriptContextField)                   \
@@ -120,28 +89,11 @@
   V(NumberToString)                           \
   V(StringAdd)                                \
   V(GetProperty)                              \
-  V(LoadIC)                                   \
-  V(LoadICProtoArray)                         \
-  V(KeyedLoadICTF)                            \
   V(StoreFastElement)                         \
-  V(StoreField)                               \
   V(StoreGlobal)                              \
-  V(StoreIC)                                  \
-  V(KeyedStoreICTF)                           \
   V(StoreInterceptor)                         \
-  V(StoreMap)                                 \
-  V(StoreTransition)                          \
-  V(LoadApiGetter)                            \
   V(LoadIndexedInterceptor)                   \
-  V(GrowArrayElements)                        \
-  /* These are only called from FGC and */    \
-  /* can be removed when we use ignition */   \
-  /* only */                                  \
-  V(LoadICTrampoline)                         \
-  V(LoadGlobalICTrampoline)                   \
-  V(KeyedLoadICTrampolineTF)                  \
-  V(StoreICTrampoline)                        \
-  V(KeyedStoreICTrampolineTF)
+  V(GrowArrayElements)
 
 // List of code stubs only used on ARM 32 bits platforms.
 #if V8_TARGET_ARCH_ARM
@@ -225,7 +177,7 @@
   Handle<Code> GetCode();
 
   // Retrieve the code for the stub, make and return a copy of the code.
-  Handle<Code> GetCodeCopy(const Code::FindAndReplacePattern& pattern);
+  Handle<Code> GetCodeCopy(const FindAndReplacePattern& pattern);
 
   static Major MajorKeyFromKey(uint32_t key) {
     return static_cast<Major>(MajorKeyBits::decode(key));
@@ -291,6 +243,8 @@
 
   Isolate* isolate() const { return isolate_; }
 
+  void DeleteStubFromCacheForTesting();
+
  protected:
   CodeStub(uint32_t key, Isolate* isolate)
       : minor_key_(MinorKeyFromKey(key)), isolate_(isolate) {}
@@ -370,7 +324,6 @@
  public:                                                   \
   inline Major MajorKey() const override { return NAME; }; \
                                                            \
- protected:                                                \
   DEFINE_CODE_STUB_BASE(NAME##Stub, SUPER)
 
 
@@ -386,59 +339,27 @@
   Handle<Code> GenerateCode() override;                               \
   DEFINE_CODE_STUB(NAME, SUPER)
 
-#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER)                        \
- public:                                                              \
-  void GenerateAssembly(CodeStubAssembler* assembler) const override; \
+#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER)                               \
+ public:                                                                     \
+  void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
   DEFINE_CODE_STUB(NAME, SUPER)
 
-#define DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(NAME, SUPER)                       \
- public:                                                                       \
-  static compiler::Node* Generate(CodeStubAssembler* assembler,                \
-                                  compiler::Node* left, compiler::Node* right, \
-                                  compiler::Node* context);                    \
-  void GenerateAssembly(CodeStubAssembler* assembler) const override {         \
-    assembler->Return(Generate(assembler, assembler->Parameter(0),             \
-                               assembler->Parameter(1),                        \
-                               assembler->Parameter(2)));                      \
-  }                                                                            \
+#define DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER)       \
+ public:                                                                     \
+  static compiler::Node* Generate(                                           \
+      CodeStubAssembler* assembler, compiler::Node* left,                    \
+      compiler::Node* right, compiler::Node* slot_id,                        \
+      compiler::Node* feedback_vector, compiler::Node* context);             \
+  void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
   DEFINE_CODE_STUB(NAME, SUPER)
 
-#define DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER)        \
- public:                                                                      \
-  static compiler::Node* Generate(                                            \
-      CodeStubAssembler* assembler, compiler::Node* left,                     \
-      compiler::Node* right, compiler::Node* slot_id,                         \
-      compiler::Node* type_feedback_vector, compiler::Node* context);         \
-  void GenerateAssembly(CodeStubAssembler* assembler) const override {        \
-    assembler->Return(                                                        \
-        Generate(assembler, assembler->Parameter(0), assembler->Parameter(1), \
-                 assembler->Parameter(2), assembler->Parameter(3),            \
-                 assembler->Parameter(4)));                                   \
-  }                                                                           \
-  DEFINE_CODE_STUB(NAME, SUPER)
-
-#define DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(NAME, SUPER)                \
- public:                                                               \
-  static compiler::Node* Generate(CodeStubAssembler* assembler,        \
-                                  compiler::Node* value,               \
-                                  compiler::Node* context);            \
-  void GenerateAssembly(CodeStubAssembler* assembler) const override { \
-    assembler->Return(Generate(assembler, assembler->Parameter(0),     \
-                               assembler->Parameter(1)));              \
-  }                                                                    \
-  DEFINE_CODE_STUB(NAME, SUPER)
-
-#define DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER)         \
- public:                                                                      \
-  static compiler::Node* Generate(                                            \
-      CodeStubAssembler* assembler, compiler::Node* value,                    \
-      compiler::Node* context, compiler::Node* type_feedback_vector,          \
-      compiler::Node* slot_id);                                               \
-  void GenerateAssembly(CodeStubAssembler* assembler) const override {        \
-    assembler->Return(                                                        \
-        Generate(assembler, assembler->Parameter(0), assembler->Parameter(1), \
-                 assembler->Parameter(2), assembler->Parameter(3)));          \
-  }                                                                           \
+#define DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER)        \
+ public:                                                                     \
+  static compiler::Node* Generate(                                           \
+      CodeStubAssembler* assembler, compiler::Node* value,                   \
+      compiler::Node* context, compiler::Node* feedback_vector,              \
+      compiler::Node* slot_id);                                              \
+  void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
   DEFINE_CODE_STUB(NAME, SUPER)
 
 #define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
@@ -638,7 +559,7 @@
  protected:
   explicit TurboFanCodeStub(Isolate* isolate) : CodeStub(isolate) {}
 
-  virtual void GenerateAssembly(CodeStubAssembler* assembler) const = 0;
+  virtual void GenerateAssembly(compiler::CodeAssemblerState* state) const = 0;
 
  private:
   DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
@@ -774,33 +695,15 @@
                                                     TurboFanCodeStub);
 };
 
-class IncStub final : public TurboFanCodeStub {
- public:
-  explicit IncStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(CountOp);
-  DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(Inc, TurboFanCodeStub);
-};
-
-class DecStub final : public TurboFanCodeStub {
- public:
-  explicit DecStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(CountOp);
-  DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(Dec, TurboFanCodeStub);
-};
-
 class StoreInterceptorStub : public TurboFanCodeStub {
  public:
   explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
 
-  void GenerateAssembly(CodeStubAssembler* assember) const override;
-
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
   ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
 };
 
 class LoadIndexedInterceptorStub : public TurboFanCodeStub {
@@ -832,184 +735,6 @@
   DEFINE_TURBOFAN_CODE_STUB(NumberToString, TurboFanCodeStub);
 };
 
-class FastNewClosureStub : public TurboFanCodeStub {
- public:
-  explicit FastNewClosureStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  static compiler::Node* Generate(CodeStubAssembler* assembler,
-                                  compiler::Node* shared_info,
-                                  compiler::Node* context);
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
-  DEFINE_TURBOFAN_CODE_STUB(FastNewClosure, TurboFanCodeStub);
-};
-
-class FastNewFunctionContextStub final : public TurboFanCodeStub {
- public:
-  static const int kMaximumSlots = 0x8000;
-
-  explicit FastNewFunctionContextStub(Isolate* isolate)
-      : TurboFanCodeStub(isolate) {}
-
-  static compiler::Node* Generate(CodeStubAssembler* assembler,
-                                  compiler::Node* function,
-                                  compiler::Node* slots,
-                                  compiler::Node* context);
-
- private:
-  // FastNewFunctionContextStub can only allocate closures which fit in the
-  // new space.
-  STATIC_ASSERT(((kMaximumSlots + Context::MIN_CONTEXT_SLOTS) * kPointerSize +
-                 FixedArray::kHeaderSize) < kMaxRegularHeapObjectSize);
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewFunctionContext);
-  DEFINE_TURBOFAN_CODE_STUB(FastNewFunctionContext, TurboFanCodeStub);
-};
-
-
-class FastNewObjectStub final : public PlatformCodeStub {
- public:
-  explicit FastNewObjectStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewObject);
-  DEFINE_PLATFORM_CODE_STUB(FastNewObject, PlatformCodeStub);
-};
-
-
-// TODO(turbofan): This stub should be possible to write in TurboFan
-// using the CodeStubAssembler very soon in a way that is as efficient
-// and easy as the current handwritten version, which is partly a copy
-// of the strict arguments object materialization code.
-class FastNewRestParameterStub final : public PlatformCodeStub {
- public:
-  explicit FastNewRestParameterStub(Isolate* isolate,
-                                    bool skip_stub_frame = false)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
-  }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewRestParameter);
-  DEFINE_PLATFORM_CODE_STUB(FastNewRestParameter, PlatformCodeStub);
-
-  int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
-
- private:
-  class SkipStubFrameBits : public BitField<bool, 0, 1> {};
-};
-
-
-// TODO(turbofan): This stub should be possible to write in TurboFan
-// using the CodeStubAssembler very soon in a way that is as efficient
-// and easy as the current handwritten version.
-class FastNewSloppyArgumentsStub final : public PlatformCodeStub {
- public:
-  explicit FastNewSloppyArgumentsStub(Isolate* isolate,
-                                      bool skip_stub_frame = false)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
-  }
-
-  int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewSloppyArguments);
-  DEFINE_PLATFORM_CODE_STUB(FastNewSloppyArguments, PlatformCodeStub);
-
- private:
-  class SkipStubFrameBits : public BitField<bool, 0, 1> {};
-};
-
-
-// TODO(turbofan): This stub should be possible to write in TurboFan
-// using the CodeStubAssembler very soon in a way that is as efficient
-// and easy as the current handwritten version.
-class FastNewStrictArgumentsStub final : public PlatformCodeStub {
- public:
-  explicit FastNewStrictArgumentsStub(Isolate* isolate,
-                                      bool skip_stub_frame = false)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = SkipStubFrameBits::encode(skip_stub_frame);
-  }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewStrictArguments);
-  DEFINE_PLATFORM_CODE_STUB(FastNewStrictArguments, PlatformCodeStub);
-
-  int skip_stub_frame() const { return SkipStubFrameBits::decode(minor_key_); }
-
- private:
-  class SkipStubFrameBits : public BitField<bool, 0, 1> {};
-};
-
-class FastCloneRegExpStub final : public TurboFanCodeStub {
- public:
-  explicit FastCloneRegExpStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  static compiler::Node* Generate(CodeStubAssembler* assembler,
-                                  compiler::Node* closure,
-                                  compiler::Node* literal_index,
-                                  compiler::Node* pattern,
-                                  compiler::Node* flags,
-                                  compiler::Node* context);
-
- private:
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneRegExp);
-  DEFINE_TURBOFAN_CODE_STUB(FastCloneRegExp, TurboFanCodeStub);
-};
-
-class FastCloneShallowArrayStub : public TurboFanCodeStub {
- public:
-  FastCloneShallowArrayStub(Isolate* isolate,
-                            AllocationSiteMode allocation_site_mode)
-      : TurboFanCodeStub(isolate) {
-    minor_key_ = AllocationSiteModeBits::encode(allocation_site_mode);
-  }
-
-  static compiler::Node* Generate(CodeStubAssembler* assembler,
-                                  compiler::Node* closure,
-                                  compiler::Node* literal_index,
-                                  compiler::Node* context,
-                                  CodeStubAssembler::Label* call_runtime,
-                                  AllocationSiteMode allocation_site_mode);
-
-  AllocationSiteMode allocation_site_mode() const {
-    return AllocationSiteModeBits::decode(minor_key_);
-  }
-
- private:
-  class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowArray);
-  DEFINE_TURBOFAN_CODE_STUB(FastCloneShallowArray, TurboFanCodeStub);
-};
-
-class FastCloneShallowObjectStub : public TurboFanCodeStub {
- public:
-  // Maximum number of properties in copied object.
-  static const int kMaximumClonedProperties = 6;
-
-  FastCloneShallowObjectStub(Isolate* isolate, int length)
-      : TurboFanCodeStub(isolate) {
-    DCHECK_GE(length, 0);
-    DCHECK_LE(length, kMaximumClonedProperties);
-    minor_key_ = LengthBits::encode(LengthBits::encode(length));
-  }
-
-  static compiler::Node* GenerateFastPath(
-      CodeStubAssembler* assembler,
-      compiler::CodeAssembler::Label* call_runtime, compiler::Node* closure,
-      compiler::Node* literals_index, compiler::Node* properties_count);
-
-  static bool IsSupported(ObjectLiteral* expr);
-  static int PropertiesCount(int literal_length);
-
-  int length() const { return LengthBits::decode(minor_key_); }
-
- private:
-  class LengthBits : public BitField<int, 0, 4> {};
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowObject);
-  DEFINE_TURBOFAN_CODE_STUB(FastCloneShallowObject, TurboFanCodeStub);
-};
-
 class CreateAllocationSiteStub : public TurboFanCodeStub {
  public:
   explicit CreateAllocationSiteStub(Isolate* isolate)
@@ -1048,24 +773,6 @@
   DEFINE_TURBOFAN_CODE_STUB(GrowArrayElements, TurboFanCodeStub);
 };
 
-class FastArrayPushStub : public HydrogenCodeStub {
- public:
-  explicit FastArrayPushStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
-
- private:
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(VarArgFunction);
-  DEFINE_HYDROGEN_CODE_STUB(FastArrayPush, HydrogenCodeStub);
-};
-
-class FastFunctionBindStub : public HydrogenCodeStub {
- public:
-  explicit FastFunctionBindStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
-
- private:
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(VarArgFunction);
-  DEFINE_HYDROGEN_CODE_STUB(FastFunctionBind, HydrogenCodeStub);
-};
-
 enum AllocationSiteOverrideMode {
   DONT_OVERRIDE,
   DISABLE_ALLOCATION_SITES,
@@ -1081,7 +788,7 @@
   void GenerateDispatchToArrayStub(MacroAssembler* masm,
                                    AllocationSiteOverrideMode mode);
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayNArgumentsConstructor);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
   DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
 };
 
@@ -1129,110 +836,31 @@
   DEFINE_PLATFORM_CODE_STUB(MathPow, PlatformCodeStub);
 };
 
-
-class CallICStub: public PlatformCodeStub {
+class CallICStub : public TurboFanCodeStub {
  public:
-  CallICStub(Isolate* isolate, const CallICState& state)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
-
-  Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
-
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
+  CallICStub(Isolate* isolate, ConvertReceiverMode convert_mode,
+             TailCallMode tail_call_mode)
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = ConvertModeBits::encode(convert_mode) |
+                 TailCallModeBits::encode(tail_call_mode);
   }
 
  protected:
-  ConvertReceiverMode convert_mode() const { return state().convert_mode(); }
-  TailCallMode tail_call_mode() const { return state().tail_call_mode(); }
+  typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
+  typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
 
-  CallICState state() const { return CallICState(GetExtraICState()); }
-
-  // Code generation helpers.
-  void GenerateMiss(MacroAssembler* masm);
-  void HandleArrayCase(MacroAssembler* masm, Label* miss);
+  ConvertReceiverMode convert_mode() const {
+    return ConvertModeBits::decode(minor_key_);
+  }
+  TailCallMode tail_call_mode() const {
+    return TailCallModeBits::decode(minor_key_);
+  }
 
  private:
-  void PrintState(std::ostream& os) const override;  // NOLINT
+  void PrintState(std::ostream& os) const final;  // NOLINT
 
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedbackAndVector);
-  DEFINE_PLATFORM_CODE_STUB(CallIC, PlatformCodeStub);
-};
-
-
-// TODO(verwaest): Translate to hydrogen code stub.
-class FunctionPrototypeStub : public PlatformCodeStub {
- public:
-  explicit FunctionPrototypeStub(Isolate* isolate)
-      : PlatformCodeStub(isolate) {}
-
-  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
-
-  // TODO(mvstanton): only the receiver register is accessed. When this is
-  // translated to a hydrogen code stub, a new CallInterfaceDescriptor
-  // should be created that just uses that register for more efficient code.
-  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
-    return LoadWithVectorDescriptor(isolate());
-  }
-
-  DEFINE_PLATFORM_CODE_STUB(FunctionPrototype, PlatformCodeStub);
-};
-
-
-class LoadIndexedStringStub : public PlatformCodeStub {
- public:
-  explicit LoadIndexedStringStub(Isolate* isolate)
-      : PlatformCodeStub(isolate) {}
-
-  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  ExtraICState GetExtraICState() const override { return Code::KEYED_LOAD_IC; }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
-  DEFINE_PLATFORM_CODE_STUB(LoadIndexedString, PlatformCodeStub);
-};
-
-
-class HandlerStub : public HydrogenCodeStub {
- public:
-  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  ExtraICState GetExtraICState() const override { return kind(); }
-
-  void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
-
-  CallInterfaceDescriptor GetCallInterfaceDescriptor() const override;
-
- protected:
-  explicit HandlerStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
-
-  virtual Code::Kind kind() const = 0;
-
-  DEFINE_CODE_STUB_BASE(HandlerStub, HydrogenCodeStub);
-};
-
-
-class LoadFieldStub: public HandlerStub {
- public:
-  LoadFieldStub(Isolate* isolate, FieldIndex index) : HandlerStub(isolate) {
-    int property_index_key = index.GetFieldAccessStubKey();
-    set_sub_minor_key(LoadFieldByIndexBits::encode(property_index_key));
-  }
-
-  FieldIndex index() const {
-    int property_index_key = LoadFieldByIndexBits::decode(sub_minor_key());
-    return FieldIndex::FromFieldAccessStubKey(property_index_key);
-  }
-
- protected:
-  Code::Kind kind() const override { return Code::LOAD_IC; }
-
- private:
-  class LoadFieldByIndexBits : public BitField<int, 0, 13> {};
-
-  // TODO(ishell): The stub uses only kReceiver parameter.
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_HANDLER_CODE_STUB(LoadField, HandlerStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CallIC);
+  DEFINE_TURBOFAN_CODE_STUB(CallIC, TurboFanCodeStub);
 };
 
 class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
@@ -1267,141 +895,6 @@
   DEFINE_TURBOFAN_CODE_STUB(KeyedStoreSloppyArguments, TurboFanCodeStub);
 };
 
-
-class LoadConstantStub : public HandlerStub {
- public:
-  LoadConstantStub(Isolate* isolate, int constant_index)
-      : HandlerStub(isolate) {
-    set_sub_minor_key(ConstantIndexBits::encode(constant_index));
-  }
-
-  int constant_index() const {
-    return ConstantIndexBits::decode(sub_minor_key());
-  }
-
- protected:
-  Code::Kind kind() const override { return Code::LOAD_IC; }
-
- private:
-  class ConstantIndexBits : public BitField<int, 0, kSubMinorKeyBits> {};
-
-  // TODO(ishell): The stub uses only kReceiver parameter.
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_HANDLER_CODE_STUB(LoadConstant, HandlerStub);
-};
-
-class LoadApiGetterStub : public TurboFanCodeStub {
- public:
-  LoadApiGetterStub(Isolate* isolate, bool receiver_is_holder, int index)
-      : TurboFanCodeStub(isolate) {
-    // If that's not true, we need to ensure that the receiver is actually a
-    // JSReceiver. http://crbug.com/609134
-    DCHECK(receiver_is_holder);
-    minor_key_ = IndexBits::encode(index) |
-                 ReceiverIsHolderBits::encode(receiver_is_holder);
-  }
-
-  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  ExtraICState GetExtraICState() const override { return Code::LOAD_IC; }
-
-  int index() const { return IndexBits::decode(minor_key_); }
-  bool receiver_is_holder() const {
-    return ReceiverIsHolderBits::decode(minor_key_);
-  }
-
- private:
-  class ReceiverIsHolderBits : public BitField<bool, 0, 1> {};
-  class IndexBits : public BitField<int, 1, kDescriptorIndexBitCount> {};
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
-  DEFINE_TURBOFAN_CODE_STUB(LoadApiGetter, TurboFanCodeStub);
-};
-
-class StoreFieldStub : public TurboFanCodeStub {
- public:
-  StoreFieldStub(Isolate* isolate, FieldIndex index,
-                 Representation representation)
-      : TurboFanCodeStub(isolate) {
-    int property_index_key = index.GetFieldAccessStubKey();
-    minor_key_ = StoreFieldByIndexBits::encode(property_index_key) |
-                 RepresentationBits::encode(representation.kind());
-  }
-
-  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-
-  FieldIndex index() const {
-    int property_index_key = StoreFieldByIndexBits::decode(minor_key_);
-    return FieldIndex::FromFieldAccessStubKey(property_index_key);
-  }
-
-  Representation representation() const {
-    return Representation::FromKind(RepresentationBits::decode(minor_key_));
-  }
-
- private:
-  class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
-  class RepresentationBits
-      : public BitField<Representation::Kind, StoreFieldByIndexBits::kNext, 4> {
-  };
-  STATIC_ASSERT(Representation::kNumRepresentations - 1 <
-                RepresentationBits::kMax);
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_TURBOFAN_CODE_STUB(StoreField, TurboFanCodeStub);
-};
-
-class StoreMapStub : public TurboFanCodeStub {
- public:
-  explicit StoreMapStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-
- private:
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreTransition);
-  DEFINE_TURBOFAN_CODE_STUB(StoreMap, TurboFanCodeStub);
-};
-
-class StoreTransitionStub : public TurboFanCodeStub {
- public:
-  enum StoreMode {
-    StoreMapAndValue,
-    ExtendStorageAndStoreMapAndValue
-  };
-
-  StoreTransitionStub(Isolate* isolate, bool is_inobject,
-                      Representation representation, StoreMode store_mode)
-      : TurboFanCodeStub(isolate) {
-    minor_key_ = IsInobjectBits::encode(is_inobject) |
-                 RepresentationBits::encode(representation.kind()) |
-                 StoreModeBits::encode(store_mode);
-  }
-
-  Code::Kind GetCodeKind() const override { return Code::HANDLER; }
-  ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-
-  bool is_inobject() const { return IsInobjectBits::decode(minor_key_); }
-
-  Representation representation() const {
-    return Representation::FromKind(RepresentationBits::decode(minor_key_));
-  }
-
-  StoreMode store_mode() const { return StoreModeBits::decode(minor_key_); }
-
- private:
-  class IsInobjectBits : public BitField<bool, 0, 1> {};
-  class RepresentationBits
-      : public BitField<Representation::Kind, IsInobjectBits::kNext, 4> {};
-  STATIC_ASSERT(Representation::kNumRepresentations - 1 <
-                RepresentationBits::kMax);
-  class StoreModeBits
-      : public BitField<StoreMode, RepresentationBits::kNext, 1> {};
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreNamedTransition);
-  DEFINE_TURBOFAN_CODE_STUB(StoreTransition, TurboFanCodeStub);
-};
-
 class StoreGlobalStub : public TurboFanCodeStub {
  public:
   StoreGlobalStub(Isolate* isolate, PropertyCellType type,
@@ -1428,7 +921,7 @@
 
   Handle<Code> GetCodeCopyFromTemplate(Handle<JSGlobalObject> global,
                                        Handle<PropertyCell> cell) {
-    Code::FindAndReplacePattern pattern;
+    FindAndReplacePattern pattern;
     if (check_global()) {
       pattern.Add(handle(global_map_placeholder(isolate())->map()),
                   Map::WeakCellForMap(Handle<Map>(global->map())));
@@ -1562,7 +1055,7 @@
   static void GenerateAheadOfTime(Isolate* isolate);
 
   Handle<Code> GetCodeCopyFromTemplate(Handle<AllocationSite> allocation_site) {
-    Code::FindAndReplacePattern pattern;
+    FindAndReplacePattern pattern;
     pattern.Add(isolate()->factory()->undefined_map(), allocation_site);
     return CodeStub::GetCodeCopy(pattern);
   }
@@ -1847,13 +1340,6 @@
   void GenerateSlow(MacroAssembler* masm, EmbedMode embed_mode,
                     const RuntimeCallHelper& call_helper);
 
-  // Skip handling slow case and directly jump to bailout.
-  void SkipSlow(MacroAssembler* masm, Label* bailout) {
-    masm->bind(&index_not_smi_);
-    masm->bind(&call_runtime_);
-    masm->jmp(bailout);
-  }
-
  private:
   Register object_;
   Register index_;
@@ -1873,327 +1359,31 @@
   DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
 };
 
-
-// Generates code for creating a one-char string from a char code.
-class StringCharFromCodeGenerator {
+class CallICTrampolineStub : public TurboFanCodeStub {
  public:
-  StringCharFromCodeGenerator(Register code,
-                              Register result)
-      : code_(code),
-        result_(result) {
-    DCHECK(!code_.is(result_));
+  CallICTrampolineStub(Isolate* isolate, ConvertReceiverMode convert_mode,
+                       TailCallMode tail_call_mode)
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = ConvertModeBits::encode(convert_mode) |
+                 TailCallModeBits::encode(tail_call_mode);
   }
 
-  // Generates the fast case code. On the fallthrough path |result|
-  // register contains the result.
-  void GenerateFast(MacroAssembler* masm);
+ protected:
+  typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
+  typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
 
-  // Generates the slow case code. Must not be naturally
-  // reachable. Expected to be put after a ret instruction (e.g., in
-  // deferred code). Always jumps back to the fast case.
-  void GenerateSlow(MacroAssembler* masm,
-                    const RuntimeCallHelper& call_helper);
-
-  // Skip handling slow case and directly jump to bailout.
-  void SkipSlow(MacroAssembler* masm, Label* bailout) {
-    masm->bind(&slow_case_);
-    masm->jmp(bailout);
+  ConvertReceiverMode convert_mode() const {
+    return ConvertModeBits::decode(minor_key_);
+  }
+  TailCallMode tail_call_mode() const {
+    return TailCallModeBits::decode(minor_key_);
   }
 
  private:
-  Register code_;
-  Register result_;
+  void PrintState(std::ostream& os) const override;  // NOLINT
 
-  Label slow_case_;
-  Label exit_;
-
-  DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
-};
-
-
-// Generates code implementing String.prototype.charAt.
-//
-// Only supports the case when the receiver is a string and the index
-// is a number (smi or heap number) that is a valid index into the
-// string. Additional index constraints are specified by the
-// flags. Otherwise, bails out to the provided labels.
-//
-// Register usage: |object| may be changed to another string in a way
-// that doesn't affect charCodeAt/charAt semantics, |index| is
-// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
-class StringCharAtGenerator {
- public:
-  StringCharAtGenerator(Register object, Register index, Register scratch,
-                        Register result, Label* receiver_not_string,
-                        Label* index_not_number, Label* index_out_of_range,
-                        ReceiverCheckMode check_mode = RECEIVER_IS_UNKNOWN)
-      : char_code_at_generator_(object, index, scratch, receiver_not_string,
-                                index_not_number, index_out_of_range,
-                                check_mode),
-        char_from_code_generator_(scratch, result) {}
-
-  // Generates the fast case code. On the fallthrough path |result|
-  // register contains the result.
-  void GenerateFast(MacroAssembler* masm) {
-    char_code_at_generator_.GenerateFast(masm);
-    char_from_code_generator_.GenerateFast(masm);
-  }
-
-  // Generates the slow case code. Must not be naturally
-  // reachable. Expected to be put after a ret instruction (e.g., in
-  // deferred code). Always jumps back to the fast case.
-  void GenerateSlow(MacroAssembler* masm, EmbedMode embed_mode,
-                    const RuntimeCallHelper& call_helper) {
-    char_code_at_generator_.GenerateSlow(masm, embed_mode, call_helper);
-    char_from_code_generator_.GenerateSlow(masm, call_helper);
-  }
-
-  // Skip handling slow case and directly jump to bailout.
-  void SkipSlow(MacroAssembler* masm, Label* bailout) {
-    char_code_at_generator_.SkipSlow(masm, bailout);
-    char_from_code_generator_.SkipSlow(masm, bailout);
-  }
-
- private:
-  StringCharCodeAtGenerator char_code_at_generator_;
-  StringCharFromCodeGenerator char_from_code_generator_;
-
-  DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
-};
-
-
-class LoadDictionaryElementStub : public HydrogenCodeStub {
- public:
-  explicit LoadDictionaryElementStub(Isolate* isolate)
-      : HydrogenCodeStub(isolate) {}
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
-};
-
-class LoadICTrampolineStub : public TurboFanCodeStub {
- public:
-  explicit LoadICTrampolineStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
-  DEFINE_CODE_STUB(LoadICTrampoline, TurboFanCodeStub);
-};
-
-class LoadGlobalICTrampolineStub : public TurboFanCodeStub {
- public:
-  explicit LoadGlobalICTrampolineStub(Isolate* isolate,
-                                      const LoadGlobalICState& state)
-      : TurboFanCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::LOAD_GLOBAL_IC; }
-
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobal);
-  DEFINE_CODE_STUB(LoadGlobalICTrampoline, TurboFanCodeStub);
-};
-
-class KeyedLoadICTrampolineTFStub : public LoadICTrampolineStub {
- public:
-  explicit KeyedLoadICTrampolineTFStub(Isolate* isolate)
-      : LoadICTrampolineStub(isolate) {}
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
-  DEFINE_CODE_STUB(KeyedLoadICTrampolineTF, LoadICTrampolineStub);
-};
-
-class StoreICTrampolineStub : public TurboFanCodeStub {
- public:
-  StoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
-      : TurboFanCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
-
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
- protected:
-  StoreICState state() const { return StoreICState(GetExtraICState()); }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
-  DEFINE_CODE_STUB(StoreICTrampoline, TurboFanCodeStub);
-};
-
-class KeyedStoreICTrampolineStub : public PlatformCodeStub {
- public:
-  KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
-
-  Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
-
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
- protected:
-  StoreICState state() const { return StoreICState(GetExtraICState()); }
-
- private:
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
-  DEFINE_PLATFORM_CODE_STUB(KeyedStoreICTrampoline, PlatformCodeStub);
-};
-
-class KeyedStoreICTrampolineTFStub : public StoreICTrampolineStub {
- public:
-  KeyedStoreICTrampolineTFStub(Isolate* isolate, const StoreICState& state)
-      : StoreICTrampolineStub(isolate, state) {}
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
-
-  DEFINE_CODE_STUB(KeyedStoreICTrampolineTF, StoreICTrampolineStub);
-};
-
-class CallICTrampolineStub : public PlatformCodeStub {
- public:
-  CallICTrampolineStub(Isolate* isolate, const CallICState& state)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
-
-  Code::Kind GetCodeKind() const override { return Code::CALL_IC; }
-
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
- protected:
-  CallICState state() const {
-    return CallICState(static_cast<ExtraICState>(minor_key_));
-  }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedback);
-  DEFINE_PLATFORM_CODE_STUB(CallICTrampoline, PlatformCodeStub);
-};
-
-class LoadICStub : public TurboFanCodeStub {
- public:
-  explicit LoadICStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_CODE_STUB(LoadIC, TurboFanCodeStub);
-};
-
-class LoadICProtoArrayStub : public TurboFanCodeStub {
- public:
-  explicit LoadICProtoArrayStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadICProtoArray);
-  DEFINE_CODE_STUB(LoadICProtoArray, TurboFanCodeStub);
-};
-
-class LoadGlobalICStub : public TurboFanCodeStub {
- public:
-  explicit LoadGlobalICStub(Isolate* isolate, const LoadGlobalICState& state)
-      : TurboFanCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::LOAD_GLOBAL_IC; }
-
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobalWithVector);
-  DEFINE_CODE_STUB(LoadGlobalIC, TurboFanCodeStub);
-};
-
-class KeyedLoadICTFStub : public LoadICStub {
- public:
-  explicit KeyedLoadICTFStub(Isolate* isolate) : LoadICStub(isolate) {}
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
-  DEFINE_CODE_STUB(KeyedLoadICTF, LoadICStub);
-};
-
-class StoreICStub : public TurboFanCodeStub {
- public:
-  StoreICStub(Isolate* isolate, const StoreICState& state)
-      : TurboFanCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_CODE_STUB(StoreIC, TurboFanCodeStub);
-};
-
-class KeyedStoreICStub : public PlatformCodeStub {
- public:
-  KeyedStoreICStub(Isolate* isolate, const StoreICState& state)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = state.GetExtraICState();
-  }
-
-  void GenerateForTrampoline(MacroAssembler* masm);
-
-  Code::Kind GetCodeKind() const final { return Code::KEYED_STORE_IC; }
-
-  ExtraICState GetExtraICState() const final {
-    return static_cast<ExtraICState>(minor_key_);
-  }
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_PLATFORM_CODE_STUB(KeyedStoreIC, PlatformCodeStub);
-
- protected:
-  void GenerateImpl(MacroAssembler* masm, bool in_frame);
-};
-
-class KeyedStoreICTFStub : public StoreICStub {
- public:
-  KeyedStoreICTFStub(Isolate* isolate, const StoreICState& state)
-      : StoreICStub(isolate, state) {}
-
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
-  Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
-
-  DEFINE_CODE_STUB(KeyedStoreICTF, StoreICStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CallICTrampoline);
+  DEFINE_TURBOFAN_CODE_STUB(CallICTrampoline, TurboFanCodeStub);
 };
 
 class DoubleToIStub : public PlatformCodeStub {
@@ -2301,39 +1491,6 @@
   DEFINE_TURBOFAN_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
 };
 
-
-class LoadFastElementStub : public HandlerStub {
- public:
-  LoadFastElementStub(Isolate* isolate, bool is_js_array,
-                      ElementsKind elements_kind,
-                      bool convert_hole_to_undefined = false)
-      : HandlerStub(isolate) {
-    set_sub_minor_key(
-        ElementsKindBits::encode(elements_kind) |
-        IsJSArrayBits::encode(is_js_array) |
-        CanConvertHoleToUndefined::encode(convert_hole_to_undefined));
-  }
-
-  Code::Kind kind() const override { return Code::KEYED_LOAD_IC; }
-
-  bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
-  bool convert_hole_to_undefined() const {
-    return CanConvertHoleToUndefined::decode(sub_minor_key());
-  }
-
-  ElementsKind elements_kind() const {
-    return ElementsKindBits::decode(sub_minor_key());
-  }
-
- private:
-  class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
-  class IsJSArrayBits: public BitField<bool, 8, 1> {};
-  class CanConvertHoleToUndefined : public BitField<bool, 9, 1> {};
-
-  DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
-  DEFINE_HANDLER_CODE_STUB(LoadFastElement, HandlerStub);
-};
-
 class StoreFastElementStub : public TurboFanCodeStub {
  public:
   StoreFastElementStub(Isolate* isolate, bool is_js_array,
@@ -2398,27 +1555,11 @@
       : TurboFanCodeStub(isolate) {}
 
   void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
-  void GenerateAssembly(CodeStubAssembler* assembler) const override;
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
-  DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
 };
 
-#define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type)     \
-  class Allocate##Type##Stub : public TurboFanCodeStub {                \
-   public:                                                              \
-    explicit Allocate##Type##Stub(Isolate* isolate)                     \
-        : TurboFanCodeStub(isolate) {}                                  \
-                                                                        \
-    void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
-    void GenerateAssembly(CodeStubAssembler* assembler) const override; \
-                                                                        \
-    DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type);                   \
-    DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub);                 \
-  };
-SIMD128_TYPES(SIMD128_ALLOC_STUB)
-#undef SIMD128_ALLOC_STUB
-
 class CommonArrayConstructorStub : public TurboFanCodeStub {
  protected:
   CommonArrayConstructorStub(Isolate* isolate, ElementsKind kind,
@@ -2540,31 +1681,19 @@
   DEFINE_PLATFORM_CODE_STUB(ArrayNArgumentsConstructor, PlatformCodeStub);
 };
 
-class StoreElementStub : public PlatformCodeStub {
+class StoreSlowElementStub : public TurboFanCodeStub {
  public:
-  StoreElementStub(Isolate* isolate, ElementsKind elements_kind,
-                   KeyedAccessStoreMode mode)
-      : PlatformCodeStub(isolate) {
-    // TODO(jkummerow): Rename this stub to StoreSlowElementStub,
-    // drop elements_kind parameter.
-    DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
-    minor_key_ = ElementsKindBits::encode(elements_kind) |
-                 CommonStoreModeBits::encode(mode);
+  StoreSlowElementStub(Isolate* isolate, KeyedAccessStoreMode mode)
+      : TurboFanCodeStub(isolate) {
+    minor_key_ = CommonStoreModeBits::encode(mode);
   }
 
   Code::Kind GetCodeKind() const override { return Code::HANDLER; }
   ExtraICState GetExtraICState() const override { return Code::KEYED_STORE_IC; }
 
  private:
-  ElementsKind elements_kind() const {
-    return ElementsKindBits::decode(minor_key_);
-  }
-
-  class ElementsKindBits
-      : public BitField<ElementsKind, CommonStoreModeBits::kNext, 8> {};
-
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
-  DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(StoreSlowElement, TurboFanCodeStub);
 };
 
 class ToBooleanICStub : public HydrogenCodeStub {
@@ -2602,7 +1731,7 @@
   ToBooleanICStub(Isolate* isolate, InitializationState init_state)
       : HydrogenCodeStub(isolate, init_state) {}
 
-  static const int kNumHints = 9;
+  static const int kNumHints = 8;
   STATIC_ASSERT(static_cast<int>(ToBooleanHint::kAny) ==
                 ((1 << kNumHints) - 1));
   class HintsBits : public BitField<uint16_t, 0, kNumHints> {};
@@ -2713,16 +1842,8 @@
                                   compiler::Node* string, compiler::Node* from,
                                   compiler::Node* to, compiler::Node* context);
 
-  void GenerateAssembly(CodeStubAssembler* assembler) const override {
-    assembler->Return(Generate(assembler,
-                               assembler->Parameter(Descriptor::kString),
-                               assembler->Parameter(Descriptor::kFrom),
-                               assembler->Parameter(Descriptor::kTo),
-                               assembler->Parameter(Descriptor::kContext)));
-  }
-
   DEFINE_CALL_INTERFACE_DESCRIPTOR(SubString);
-  DEFINE_CODE_STUB(SubString, TurboFanCodeStub);
+  DEFINE_TURBOFAN_CODE_STUB(SubString, TurboFanCodeStub);
 };
 
 
diff --git a/src/codegen.cc b/src/codegen.cc
index afd8a6f..11837e9 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -13,8 +13,10 @@
 #include "src/ast/prettyprinter.h"
 #include "src/bootstrapper.h"
 #include "src/compilation-info.h"
+#include "src/counters.h"
 #include "src/debug/debug.h"
 #include "src/eh-frame.h"
+#include "src/objects-inl.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
@@ -132,13 +134,103 @@
       info->prologue_offset(), info->is_debug() && !is_crankshafted);
   isolate->counters()->total_compiled_code_size()->Increment(
       code->instruction_size());
-  isolate->heap()->IncrementCodeGeneratedBytes(is_crankshafted,
-      code->instruction_size());
   return code;
 }
 
+// Print function's source if it was not printed before.
+// Return a sequential id under which this function was printed.
+static int PrintFunctionSource(CompilationInfo* info,
+                               std::vector<Handle<SharedFunctionInfo>>* printed,
+                               int inlining_id,
+                               Handle<SharedFunctionInfo> shared) {
+  // Outermost function has source id -1 and inlined functions take
+  // source ids starting from 0.
+  int source_id = -1;
+  if (inlining_id != SourcePosition::kNotInlined) {
+    for (unsigned i = 0; i < printed->size(); i++) {
+      if (printed->at(i).is_identical_to(shared)) {
+        return i;
+      }
+    }
+    source_id = static_cast<int>(printed->size());
+    printed->push_back(shared);
+  }
+
+  Isolate* isolate = info->isolate();
+  if (!shared->script()->IsUndefined(isolate)) {
+    Handle<Script> script(Script::cast(shared->script()), isolate);
+
+    if (!script->source()->IsUndefined(isolate)) {
+      CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+      Object* source_name = script->name();
+      OFStream os(tracing_scope.file());
+      os << "--- FUNCTION SOURCE (";
+      if (source_name->IsString()) {
+        os << String::cast(source_name)->ToCString().get() << ":";
+      }
+      os << shared->DebugName()->ToCString().get() << ") id{";
+      os << info->optimization_id() << "," << source_id << "} start{";
+      os << shared->start_position() << "} ---\n";
+      {
+        DisallowHeapAllocation no_allocation;
+        int start = shared->start_position();
+        int len = shared->end_position() - start;
+        String::SubStringRange source(String::cast(script->source()), start,
+                                      len);
+        for (const auto& c : source) {
+          os << AsReversiblyEscapedUC16(c);
+        }
+      }
+
+      os << "\n--- END ---\n";
+    }
+  }
+
+  return source_id;
+}
+
+// Print information for the given inlining: which function was inlined and
+// where the inlining occured.
+static void PrintInlinedFunctionInfo(
+    CompilationInfo* info, int source_id, int inlining_id,
+    const CompilationInfo::InlinedFunctionHolder& h) {
+  CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+  OFStream os(tracing_scope.file());
+  os << "INLINE (" << h.shared_info->DebugName()->ToCString().get() << ") id{"
+     << info->optimization_id() << "," << source_id << "} AS " << inlining_id
+     << " AT ";
+  const SourcePosition position = h.position.position;
+  if (position.IsKnown()) {
+    os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
+  } else {
+    os << "<?>";
+  }
+  os << std::endl;
+}
+
+// Print the source of all functions that participated in this optimizing
+// compilation. For inlined functions print source position of their inlining.
+static void DumpParticipatingSource(CompilationInfo* info) {
+  AllowDeferredHandleDereference allow_deference_for_print_code;
+
+  std::vector<Handle<SharedFunctionInfo>> printed;
+  printed.reserve(info->inlined_functions().size());
+
+  PrintFunctionSource(info, &printed, SourcePosition::kNotInlined,
+                      info->shared_info());
+  const auto& inlined = info->inlined_functions();
+  for (unsigned id = 0; id < inlined.size(); id++) {
+    const int source_id =
+        PrintFunctionSource(info, &printed, id, inlined[id].shared_info);
+    PrintInlinedFunctionInfo(info, source_id, id, inlined[id]);
+  }
+}
 
 void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
+  if (FLAG_print_opt_source && info->IsOptimizing()) {
+    DumpParticipatingSource(info);
+  }
+
 #ifdef ENABLE_DISASSEMBLER
   AllowDeferredHandleDereference allow_deference_for_print_code;
   Isolate* isolate = info->isolate();
@@ -147,7 +239,8 @@
           ? FLAG_print_builtin_code
           : (FLAG_print_code || (info->IsStub() && FLAG_print_code_stubs) ||
              (info->IsOptimizing() && FLAG_print_opt_code &&
-              info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)));
+              info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)) ||
+             (info->IsWasm() && FLAG_print_wasm_code));
   if (print_code) {
     std::unique_ptr<char[]> debug_name = info->GetDebugName();
     CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
diff --git a/src/codegen.h b/src/codegen.h
index a17ad2a..b909edc 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -104,43 +104,6 @@
 double fast_sqrt(double input, Isolate* isolate);
 void lazily_initialize_fast_sqrt(Isolate* isolate);
 
-
-class ElementsTransitionGenerator : public AllStatic {
- public:
-  // If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
-  // |allocation_memento_found| may be NULL.
-  static void GenerateMapChangeElementsTransition(
-      MacroAssembler* masm,
-      Register receiver,
-      Register key,
-      Register value,
-      Register target_map,
-      AllocationSiteMode mode,
-      Label* allocation_memento_found);
-  static void GenerateSmiToDouble(
-      MacroAssembler* masm,
-      Register receiver,
-      Register key,
-      Register value,
-      Register target_map,
-      AllocationSiteMode mode,
-      Label* fail);
-  static void GenerateDoubleToObject(
-      MacroAssembler* masm,
-      Register receiver,
-      Register key,
-      Register value,
-      Register target_map,
-      AllocationSiteMode mode,
-      Label* fail);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
-};
-
-static const int kNumberDictionaryProbes = 4;
-
-
 class CodeAgingHelper {
  public:
   explicit CodeAgingHelper(Isolate* isolate);
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index af9fbb5..8b2e51e 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -19,12 +19,11 @@
 // Initial size of each compilation cache table allocated.
 static const int kInitialCacheSize = 64;
 
-
 CompilationCache::CompilationCache(Isolate* isolate)
     : isolate_(isolate),
-      script_(isolate, 1),
-      eval_global_(isolate, 1),
-      eval_contextual_(isolate, 1),
+      script_(isolate),
+      eval_global_(isolate),
+      eval_contextual_(isolate),
       reg_exp_(isolate, kRegExpGenerations),
       enabled_(true) {
   CompilationSubCache* subcaches[kSubCacheCount] =
@@ -103,11 +102,8 @@
   }
 }
 
-
-CompilationCacheScript::CompilationCacheScript(Isolate* isolate,
-                                               int generations)
-    : CompilationSubCache(isolate, generations) {}
-
+CompilationCacheScript::CompilationCacheScript(Isolate* isolate)
+    : CompilationSubCache(isolate, 1) {}
 
 // We only re-use a cached function for some script source code if the
 // script originates from the same place. This is to avoid issues
@@ -141,29 +137,31 @@
 // be cached in the same script generation. Currently the first use
 // will be cached, but subsequent code from different source / line
 // won't.
-Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(
+InfoVectorPair CompilationCacheScript::Lookup(
     Handle<String> source, Handle<Object> name, int line_offset,
     int column_offset, ScriptOriginOptions resource_options,
     Handle<Context> context, LanguageMode language_mode) {
-  Object* result = NULL;
-  int generation;
+  InfoVectorPair result;
 
   // Probe the script generation tables. Make sure not to leak handles
   // into the caller's handle scope.
   { HandleScope scope(isolate());
-    for (generation = 0; generation < generations(); generation++) {
-      Handle<CompilationCacheTable> table = GetTable(generation);
-      Handle<Object> probe = table->Lookup(source, context, language_mode);
-      if (probe->IsSharedFunctionInfo()) {
-        Handle<SharedFunctionInfo> function_info =
-            Handle<SharedFunctionInfo>::cast(probe);
-        // Break when we've found a suitable shared function info that
-        // matches the origin.
-        if (HasOrigin(function_info, name, line_offset, column_offset,
-                      resource_options)) {
-          result = *function_info;
-          break;
-        }
+    const int generation = 0;
+    DCHECK(generations() == 1);
+    Handle<CompilationCacheTable> table = GetTable(generation);
+    InfoVectorPair probe = table->LookupScript(source, context, language_mode);
+    if (probe.has_shared()) {
+      Handle<SharedFunctionInfo> function_info(probe.shared(), isolate());
+      Handle<Cell> vector_handle;
+      if (probe.has_vector()) {
+        vector_handle = Handle<Cell>(probe.vector(), isolate());
+      }
+      // Break when we've found a suitable shared function info that
+      // matches the origin.
+      if (HasOrigin(function_info, name, line_offset, column_offset,
+                    resource_options)) {
+        result = InfoVectorPair(*function_info,
+                                probe.has_vector() ? *vector_handle : nullptr);
       }
     }
   }
@@ -171,72 +169,60 @@
   // Once outside the manacles of the handle scope, we need to recheck
   // to see if we actually found a cached script. If so, we return a
   // handle created in the caller's handle scope.
-  if (result != NULL) {
-    Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
-                                      isolate());
+  if (result.has_shared()) {
+    Handle<SharedFunctionInfo> shared(result.shared(), isolate());
+    // TODO(mvstanton): Make sure HasOrigin can't allocate, or it will
+    // mess up our InfoVectorPair.
     DCHECK(
         HasOrigin(shared, name, line_offset, column_offset, resource_options));
-    // If the script was found in a later generation, we promote it to
-    // the first generation to let it survive longer in the cache.
-    if (generation != 0) Put(source, context, language_mode, shared);
     isolate()->counters()->compilation_cache_hits()->Increment();
-    return shared;
   } else {
     isolate()->counters()->compilation_cache_misses()->Increment();
-    return Handle<SharedFunctionInfo>::null();
   }
+  return result;
 }
 
-
-void CompilationCacheScript::Put(Handle<String> source,
-                                 Handle<Context> context,
+void CompilationCacheScript::Put(Handle<String> source, Handle<Context> context,
                                  LanguageMode language_mode,
-                                 Handle<SharedFunctionInfo> function_info) {
+                                 Handle<SharedFunctionInfo> function_info,
+                                 Handle<Cell> literals) {
   HandleScope scope(isolate());
   Handle<CompilationCacheTable> table = GetFirstTable();
-  SetFirstTable(CompilationCacheTable::Put(table, source, context,
-                                           language_mode, function_info));
+  SetFirstTable(CompilationCacheTable::PutScript(
+      table, source, context, language_mode, function_info, literals));
 }
 
-
-MaybeHandle<SharedFunctionInfo> CompilationCacheEval::Lookup(
+InfoVectorPair CompilationCacheEval::Lookup(
     Handle<String> source, Handle<SharedFunctionInfo> outer_info,
-    LanguageMode language_mode, int scope_position) {
+    Handle<Context> native_context, LanguageMode language_mode, int position) {
   HandleScope scope(isolate());
   // Make sure not to leak the table into the surrounding handle
   // scope. Otherwise, we risk keeping old tables around even after
   // having cleared the cache.
-  Handle<Object> result = isolate()->factory()->undefined_value();
-  int generation;
-  for (generation = 0; generation < generations(); generation++) {
-    Handle<CompilationCacheTable> table = GetTable(generation);
-    result =
-        table->LookupEval(source, outer_info, language_mode, scope_position);
-    if (result->IsSharedFunctionInfo()) break;
-  }
-  if (result->IsSharedFunctionInfo()) {
-    Handle<SharedFunctionInfo> function_info =
-        Handle<SharedFunctionInfo>::cast(result);
-    if (generation != 0) {
-      Put(source, outer_info, function_info, scope_position);
-    }
+  InfoVectorPair result;
+  const int generation = 0;
+  DCHECK(generations() == 1);
+  Handle<CompilationCacheTable> table = GetTable(generation);
+  result = table->LookupEval(source, outer_info, native_context, language_mode,
+                             position);
+  if (result.has_shared()) {
     isolate()->counters()->compilation_cache_hits()->Increment();
-    return scope.CloseAndEscape(function_info);
   } else {
     isolate()->counters()->compilation_cache_misses()->Increment();
-    return MaybeHandle<SharedFunctionInfo>();
   }
+  return result;
 }
 
-
 void CompilationCacheEval::Put(Handle<String> source,
                                Handle<SharedFunctionInfo> outer_info,
                                Handle<SharedFunctionInfo> function_info,
-                               int scope_position) {
+                               Handle<Context> native_context,
+                               Handle<Cell> literals, int position) {
   HandleScope scope(isolate());
   Handle<CompilationCacheTable> table = GetFirstTable();
-  table = CompilationCacheTable::PutEval(table, source, outer_info,
-                                         function_info, scope_position);
+  table =
+      CompilationCacheTable::PutEval(table, source, outer_info, function_info,
+                                     native_context, literals, position);
   SetFirstTable(table);
 }
 
@@ -286,32 +272,33 @@
   script_.Remove(function_info);
 }
 
-
-MaybeHandle<SharedFunctionInfo> CompilationCache::LookupScript(
+InfoVectorPair CompilationCache::LookupScript(
     Handle<String> source, Handle<Object> name, int line_offset,
     int column_offset, ScriptOriginOptions resource_options,
     Handle<Context> context, LanguageMode language_mode) {
-  if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
+  InfoVectorPair empty_result;
+  if (!IsEnabled()) return empty_result;
 
   return script_.Lookup(source, name, line_offset, column_offset,
                         resource_options, context, language_mode);
 }
 
-
-MaybeHandle<SharedFunctionInfo> CompilationCache::LookupEval(
+InfoVectorPair CompilationCache::LookupEval(
     Handle<String> source, Handle<SharedFunctionInfo> outer_info,
-    Handle<Context> context, LanguageMode language_mode, int scope_position) {
-  if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
+    Handle<Context> context, LanguageMode language_mode, int position) {
+  InfoVectorPair result;
+  if (!IsEnabled()) return result;
 
-  MaybeHandle<SharedFunctionInfo> result;
   if (context->IsNativeContext()) {
-    result =
-        eval_global_.Lookup(source, outer_info, language_mode, scope_position);
+    result = eval_global_.Lookup(source, outer_info, context, language_mode,
+                                 position);
   } else {
-    DCHECK(scope_position != kNoSourcePosition);
-    result = eval_contextual_.Lookup(source, outer_info, language_mode,
-                                     scope_position);
+    DCHECK(position != kNoSourcePosition);
+    Handle<Context> native_context(context->native_context(), isolate());
+    result = eval_contextual_.Lookup(source, outer_info, native_context,
+                                     language_mode, position);
   }
+
   return result;
 }
 
@@ -323,30 +310,31 @@
   return reg_exp_.Lookup(source, flags);
 }
 
-
-void CompilationCache::PutScript(Handle<String> source,
-                                 Handle<Context> context,
+void CompilationCache::PutScript(Handle<String> source, Handle<Context> context,
                                  LanguageMode language_mode,
-                                 Handle<SharedFunctionInfo> function_info) {
+                                 Handle<SharedFunctionInfo> function_info,
+                                 Handle<Cell> literals) {
   if (!IsEnabled()) return;
 
-  script_.Put(source, context, language_mode, function_info);
+  script_.Put(source, context, language_mode, function_info, literals);
 }
 
-
 void CompilationCache::PutEval(Handle<String> source,
                                Handle<SharedFunctionInfo> outer_info,
                                Handle<Context> context,
                                Handle<SharedFunctionInfo> function_info,
-                               int scope_position) {
+                               Handle<Cell> literals, int position) {
   if (!IsEnabled()) return;
 
   HandleScope scope(isolate());
   if (context->IsNativeContext()) {
-    eval_global_.Put(source, outer_info, function_info, scope_position);
+    eval_global_.Put(source, outer_info, function_info, context, literals,
+                     position);
   } else {
-    DCHECK(scope_position != kNoSourcePosition);
-    eval_contextual_.Put(source, outer_info, function_info, scope_position);
+    DCHECK(position != kNoSourcePosition);
+    Handle<Context> native_context(context->native_context(), isolate());
+    eval_contextual_.Put(source, outer_info, function_info, native_context,
+                         literals, position);
   }
 }
 
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index 973673c..229fe07 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -6,12 +6,14 @@
 #define V8_COMPILATION_CACHE_H_
 
 #include "src/allocation.h"
-#include "src/handles.h"
 #include "src/objects.h"
 
 namespace v8 {
 namespace internal {
 
+template <typename T>
+class Handle;
+
 // The compilation cache consists of several generational sub-caches which uses
 // this class as a base class. A sub-cache contains a compilation cache tables
 // for each generation of the sub-cache. Since the same source code string has
@@ -74,17 +76,16 @@
 // Sub-cache for scripts.
 class CompilationCacheScript : public CompilationSubCache {
  public:
-  CompilationCacheScript(Isolate* isolate, int generations);
+  explicit CompilationCacheScript(Isolate* isolate);
 
-  Handle<SharedFunctionInfo> Lookup(Handle<String> source, Handle<Object> name,
-                                    int line_offset, int column_offset,
-                                    ScriptOriginOptions resource_options,
-                                    Handle<Context> context,
-                                    LanguageMode language_mode);
-  void Put(Handle<String> source,
-           Handle<Context> context,
-           LanguageMode language_mode,
-           Handle<SharedFunctionInfo> function_info);
+  InfoVectorPair Lookup(Handle<String> source, Handle<Object> name,
+                        int line_offset, int column_offset,
+                        ScriptOriginOptions resource_options,
+                        Handle<Context> context, LanguageMode language_mode);
+
+  void Put(Handle<String> source, Handle<Context> context,
+           LanguageMode language_mode, Handle<SharedFunctionInfo> function_info,
+           Handle<Cell> literals);
 
  private:
   bool HasOrigin(Handle<SharedFunctionInfo> function_info, Handle<Object> name,
@@ -109,16 +110,17 @@
 // 4. The start position of the calling scope.
 class CompilationCacheEval: public CompilationSubCache {
  public:
-  CompilationCacheEval(Isolate* isolate, int generations)
-      : CompilationSubCache(isolate, generations) { }
+  explicit CompilationCacheEval(Isolate* isolate)
+      : CompilationSubCache(isolate, 1) {}
 
-  MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source,
-                                         Handle<SharedFunctionInfo> outer_info,
-                                         LanguageMode language_mode,
-                                         int scope_position);
+  InfoVectorPair Lookup(Handle<String> source,
+                        Handle<SharedFunctionInfo> outer_info,
+                        Handle<Context> native_context,
+                        LanguageMode language_mode, int position);
 
   void Put(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
-           Handle<SharedFunctionInfo> function_info, int scope_position);
+           Handle<SharedFunctionInfo> function_info,
+           Handle<Context> native_context, Handle<Cell> literals, int position);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
@@ -140,7 +142,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
 };
 
-
 // The compilation cache keeps shared function infos for compiled
 // scripts and evals. The shared function infos are looked up using
 // the source string as the key. For regular expressions the
@@ -150,17 +151,19 @@
   // Finds the script shared function info for a source
   // string. Returns an empty handle if the cache doesn't contain a
   // script for the given source string with the right origin.
-  MaybeHandle<SharedFunctionInfo> LookupScript(
-      Handle<String> source, Handle<Object> name, int line_offset,
-      int column_offset, ScriptOriginOptions resource_options,
-      Handle<Context> context, LanguageMode language_mode);
+  InfoVectorPair LookupScript(Handle<String> source, Handle<Object> name,
+                              int line_offset, int column_offset,
+                              ScriptOriginOptions resource_options,
+                              Handle<Context> context,
+                              LanguageMode language_mode);
 
   // Finds the shared function info for a source string for eval in a
   // given context.  Returns an empty handle if the cache doesn't
   // contain a script for the given source string.
-  MaybeHandle<SharedFunctionInfo> LookupEval(
-      Handle<String> source, Handle<SharedFunctionInfo> outer_info,
-      Handle<Context> context, LanguageMode language_mode, int scope_position);
+  InfoVectorPair LookupEval(Handle<String> source,
+                            Handle<SharedFunctionInfo> outer_info,
+                            Handle<Context> context, LanguageMode language_mode,
+                            int position);
 
   // Returns the regexp data associated with the given regexp if it
   // is in cache, otherwise an empty handle.
@@ -169,16 +172,17 @@
 
   // Associate the (source, kind) pair to the shared function
   // info. This may overwrite an existing mapping.
-  void PutScript(Handle<String> source,
-                 Handle<Context> context,
+  void PutScript(Handle<String> source, Handle<Context> context,
                  LanguageMode language_mode,
-                 Handle<SharedFunctionInfo> function_info);
+                 Handle<SharedFunctionInfo> function_info,
+                 Handle<Cell> literals);
 
   // Associate the (source, context->closure()->shared(), kind) triple
   // with the shared function info. This may overwrite an existing mapping.
   void PutEval(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
                Handle<Context> context,
-               Handle<SharedFunctionInfo> function_info, int scope_position);
+               Handle<SharedFunctionInfo> function_info, Handle<Cell> literals,
+               int position);
 
   // Associate the (source, flags) pair to the given regexp data.
   // This may overwrite an existing mapping.
diff --git a/src/compilation-info.cc b/src/compilation-info.cc
index 5c9fa58..b0dda6c 100644
--- a/src/compilation-info.cc
+++ b/src/compilation-info.cc
@@ -7,7 +7,9 @@
 #include "src/api.h"
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
+#include "src/debug/debug.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 #include "src/parsing/parse-info.h"
 #include "src/source-position.h"
 
@@ -51,10 +53,10 @@
   return parse_info_ && !parse_info_->shared_info().is_null();
 }
 
-CompilationInfo::CompilationInfo(ParseInfo* parse_info,
+CompilationInfo::CompilationInfo(Zone* zone, ParseInfo* parse_info,
                                  Handle<JSFunction> closure)
     : CompilationInfo(parse_info, {}, Code::ComputeFlags(Code::FUNCTION), BASE,
-                      parse_info->isolate(), parse_info->zone()) {
+                      parse_info->isolate(), zone) {
   closure_ = closure;
 
   // Compiling for the snapshot typically results in different code than
@@ -68,8 +70,10 @@
   if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
   if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
 
-  if (FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
-      FLAG_turbo_profiling || isolate_->is_profiling()) {
+  // Collect source positions for optimized code when profiling or if debugger
+  // is active, to be able to get more precise source positions at the price of
+  // more memory consumption.
+  if (isolate_->NeedsSourcePositionsForProfiling()) {
     MarkAsSourcePositionsEnabled();
   }
 }
@@ -104,7 +108,6 @@
     shared_info()->DisableOptimization(bailout_reason());
   }
   dependencies()->Rollback();
-  delete deferred_handles_;
 }
 
 int CompilationInfo::num_parameters() const {
@@ -121,15 +124,28 @@
 // profiler, so they trigger their own optimization when they're called
 // for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
 bool CompilationInfo::ShouldSelfOptimize() {
-  return FLAG_crankshaft &&
+  return FLAG_opt && FLAG_crankshaft &&
          !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
          !literal()->dont_optimize() &&
          literal()->scope()->AllowsLazyCompilation() &&
          !shared_info()->optimization_disabled();
 }
 
+void CompilationInfo::set_deferred_handles(
+    std::shared_ptr<DeferredHandles> deferred_handles) {
+  DCHECK(deferred_handles_.get() == nullptr);
+  deferred_handles_.swap(deferred_handles);
+}
+
+void CompilationInfo::set_deferred_handles(DeferredHandles* deferred_handles) {
+  DCHECK(deferred_handles_.get() == nullptr);
+  deferred_handles_.reset(deferred_handles);
+}
+
 void CompilationInfo::ReopenHandlesInNewHandleScope() {
-  closure_ = Handle<JSFunction>(*closure_);
+  if (!closure_.is_null()) {
+    closure_ = Handle<JSFunction>(*closure_);
+  }
 }
 
 bool CompilationInfo::has_simple_parameters() {
@@ -163,11 +179,13 @@
 #undef CASE_KIND
       return StackFrame::STUB;
     case Code::WASM_FUNCTION:
-      return StackFrame::WASM;
+      return StackFrame::WASM_COMPILED;
     case Code::JS_TO_WASM_FUNCTION:
       return StackFrame::JS_TO_WASM;
     case Code::WASM_TO_JS_FUNCTION:
       return StackFrame::WASM_TO_JS;
+    case Code::WASM_INTERPRETER_ENTRY:
+      return StackFrame::WASM_INTERPRETER_ENTRY;
     default:
       UNIMPLEMENTED();
       return StackFrame::NONE;
diff --git a/src/compilation-info.h b/src/compilation-info.h
index 77b9e34..a3938d2 100644
--- a/src/compilation-info.h
+++ b/src/compilation-info.h
@@ -9,6 +9,7 @@
 
 #include "src/compilation-dependencies.h"
 #include "src/frames.h"
+#include "src/globals.h"
 #include "src/handles.h"
 #include "src/objects.h"
 #include "src/source-position-table.h"
@@ -28,7 +29,7 @@
 
 // CompilationInfo encapsulates some information known at compile time.  It
 // is constructed based on the resources available at compile-time.
-class CompilationInfo final {
+class V8_EXPORT_PRIVATE CompilationInfo final {
  public:
   // Various configuration flags for a compilation, as well as some properties
   // of the compiled code produced by a compilation.
@@ -49,10 +50,11 @@
     kSourcePositionsEnabled = 1 << 13,
     kBailoutOnUninitialized = 1 << 14,
     kOptimizeFromBytecode = 1 << 15,
-    kTypeFeedbackEnabled = 1 << 16,
+    kLoopPeelingEnabled = 1 << 16,
   };
 
-  CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
+  CompilationInfo(Zone* zone, ParseInfo* parse_info,
+                  Handle<JSFunction> closure);
   CompilationInfo(Vector<const char> debug_name, Isolate* isolate, Zone* zone,
                   Code::Flags code_flags);
   ~CompilationInfo();
@@ -141,12 +143,6 @@
     return GetFlag(kDeoptimizationEnabled);
   }
 
-  void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
-
-  bool is_type_feedback_enabled() const {
-    return GetFlag(kTypeFeedbackEnabled);
-  }
-
   void MarkAsAccessorInliningEnabled() { SetFlag(kAccessorInliningEnabled); }
 
   bool is_accessor_inlining_enabled() const {
@@ -179,6 +175,10 @@
     return GetFlag(kOptimizeFromBytecode);
   }
 
+  void MarkAsLoopPeelingEnabled() { SetFlag(kLoopPeelingEnabled); }
+
+  bool is_loop_peeling_enabled() const { return GetFlag(kLoopPeelingEnabled); }
+
   bool GeneratePreagedPrologue() const {
     // Generate a pre-aged prologue if we are optimizing for size, which
     // will make code flushing more aggressive. Only apply to Code::FUNCTION,
@@ -210,6 +210,7 @@
   // Accessors for the different compilation modes.
   bool IsOptimizing() const { return mode_ == OPTIMIZE; }
   bool IsStub() const { return mode_ == STUB; }
+  bool IsWasm() const { return output_code_kind() == Code::WASM_FUNCTION; }
   void SetOptimizing();
   void SetOptimizingForOsr(BailoutId osr_ast_id, JavaScriptFrame* osr_frame) {
     SetOptimizing();
@@ -232,9 +233,10 @@
   // Determines whether or not to insert a self-optimization header.
   bool ShouldSelfOptimize();
 
-  void set_deferred_handles(DeferredHandles* deferred_handles) {
-    DCHECK(deferred_handles_ == NULL);
-    deferred_handles_ = deferred_handles;
+  void set_deferred_handles(std::shared_ptr<DeferredHandles> deferred_handles);
+  void set_deferred_handles(DeferredHandles* deferred_handles);
+  std::shared_ptr<DeferredHandles> deferred_handles() {
+    return deferred_handles_;
   }
 
   void ReopenHandlesInNewHandleScope();
@@ -364,7 +366,7 @@
   // CompilationInfo allocates.
   Zone* zone_;
 
-  DeferredHandles* deferred_handles_;
+  std::shared_ptr<DeferredHandles> deferred_handles_;
 
   // Dependencies for this compilation, e.g. stable maps.
   CompilationDependencies dependencies_;
diff --git a/src/compilation-statistics.cc b/src/compilation-statistics.cc
index d4ca39d..16ab3b3 100644
--- a/src/compilation-statistics.cc
+++ b/src/compilation-statistics.cc
@@ -14,6 +14,8 @@
 void CompilationStatistics::RecordPhaseStats(const char* phase_kind_name,
                                              const char* phase_name,
                                              const BasicStats& stats) {
+  base::LockGuard<base::Mutex> guard(&record_mutex_);
+
   std::string phase_name_str(phase_name);
   auto it = phase_map_.find(phase_name_str);
   if (it == phase_map_.end()) {
@@ -26,6 +28,8 @@
 
 void CompilationStatistics::RecordPhaseKindStats(const char* phase_kind_name,
                                                  const BasicStats& stats) {
+  base::LockGuard<base::Mutex> guard(&record_mutex_);
+
   std::string phase_kind_name_str(phase_kind_name);
   auto it = phase_kind_map_.find(phase_kind_name_str);
   if (it == phase_kind_map_.end()) {
@@ -39,6 +43,8 @@
 
 void CompilationStatistics::RecordTotalStats(size_t source_size,
                                              const BasicStats& stats) {
+  base::LockGuard<base::Mutex> guard(&record_mutex_);
+
   source_size += source_size;
   total_stats_.Accumulate(stats);
 }
@@ -128,10 +134,10 @@
   }
 
   if (!ps.machine_output) WriteHeader(os);
-  for (auto phase_kind_it : sorted_phase_kinds) {
+  for (const auto& phase_kind_it : sorted_phase_kinds) {
     const auto& phase_kind_name = phase_kind_it->first;
     if (!ps.machine_output) {
-      for (auto phase_it : sorted_phases) {
+      for (const auto& phase_it : sorted_phases) {
         const auto& phase_stats = phase_it->second;
         if (phase_stats.phase_kind_name_ != phase_kind_name) continue;
         const auto& phase_name = phase_it->first;
diff --git a/src/compilation-statistics.h b/src/compilation-statistics.h
index ceffc2e..388117b 100644
--- a/src/compilation-statistics.h
+++ b/src/compilation-statistics.h
@@ -80,6 +80,7 @@
   TotalStats total_stats_;
   PhaseKindMap phase_kind_map_;
   PhaseMap phase_map_;
+  base::Mutex record_mutex_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilationStatistics);
 };
diff --git a/src/compiler-dispatcher/compiler-dispatcher-job.cc b/src/compiler-dispatcher/compiler-dispatcher-job.cc
index b87a4a5..56d166f 100644
--- a/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -8,6 +8,7 @@
 #include "src/compilation-info.h"
 #include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
 #include "src/compiler.h"
+#include "src/flags.h"
 #include "src/global-handles.h"
 #include "src/isolate.h"
 #include "src/objects-inl.h"
@@ -15,26 +16,104 @@
 #include "src/parsing/parser.h"
 #include "src/parsing/scanner-character-streams.h"
 #include "src/unicode-cache.h"
-#include "src/zone/zone.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
+namespace {
+
+class OneByteWrapper : public v8::String::ExternalOneByteStringResource {
+ public:
+  OneByteWrapper(const void* data, int length) : data_(data), length_(length) {}
+  ~OneByteWrapper() override = default;
+
+  const char* data() const override {
+    return reinterpret_cast<const char*>(data_);
+  }
+
+  size_t length() const override { return static_cast<size_t>(length_); }
+
+ private:
+  const void* data_;
+  int length_;
+
+  DISALLOW_COPY_AND_ASSIGN(OneByteWrapper);
+};
+
+class TwoByteWrapper : public v8::String::ExternalStringResource {
+ public:
+  TwoByteWrapper(const void* data, int length) : data_(data), length_(length) {}
+  ~TwoByteWrapper() override = default;
+
+  const uint16_t* data() const override {
+    return reinterpret_cast<const uint16_t*>(data_);
+  }
+
+  size_t length() const override { return static_cast<size_t>(length_); }
+
+ private:
+  const void* data_;
+  int length_;
+
+  DISALLOW_COPY_AND_ASSIGN(TwoByteWrapper);
+};
+
+}  // namespace
+
 CompilerDispatcherJob::CompilerDispatcherJob(Isolate* isolate,
+                                             CompilerDispatcherTracer* tracer,
                                              Handle<SharedFunctionInfo> shared,
                                              size_t max_stack_size)
-    : isolate_(isolate),
-      tracer_(isolate_->compiler_dispatcher_tracer()),
+    : status_(CompileJobStatus::kInitial),
+      isolate_(isolate),
+      tracer_(tracer),
+      context_(Handle<Context>::cast(
+          isolate_->global_handles()->Create(isolate->context()))),
       shared_(Handle<SharedFunctionInfo>::cast(
           isolate_->global_handles()->Create(*shared))),
       max_stack_size_(max_stack_size),
-      can_compile_on_background_thread_(false) {
+      trace_compiler_dispatcher_jobs_(FLAG_trace_compiler_dispatcher_jobs) {
+  DCHECK(!shared_->is_toplevel());
   HandleScope scope(isolate_);
-  DCHECK(!shared_->outer_scope_info()->IsTheHole(isolate_));
   Handle<Script> script(Script::cast(shared_->script()), isolate_);
   Handle<String> source(String::cast(script->source()), isolate_);
-  can_parse_on_background_thread_ =
-      source->IsExternalTwoByteString() || source->IsExternalOneByteString();
+  if (trace_compiler_dispatcher_jobs_) {
+    PrintF("CompilerDispatcherJob[%p] created for ", static_cast<void*>(this));
+    shared_->ShortPrint();
+    PrintF(" in initial state.\n");
+  }
+}
+
+CompilerDispatcherJob::CompilerDispatcherJob(
+    Isolate* isolate, CompilerDispatcherTracer* tracer, Handle<Script> script,
+    Handle<SharedFunctionInfo> shared, FunctionLiteral* literal,
+    std::shared_ptr<Zone> parse_zone,
+    std::shared_ptr<DeferredHandles> parse_handles,
+    std::shared_ptr<DeferredHandles> compile_handles, size_t max_stack_size)
+    : status_(CompileJobStatus::kAnalyzed),
+      isolate_(isolate),
+      tracer_(tracer),
+      context_(Handle<Context>::cast(
+          isolate_->global_handles()->Create(isolate->context()))),
+      shared_(Handle<SharedFunctionInfo>::cast(
+          isolate_->global_handles()->Create(*shared))),
+      max_stack_size_(max_stack_size),
+      parse_info_(new ParseInfo(shared_)),
+      parse_zone_(parse_zone),
+      compile_info_(new CompilationInfo(parse_info_->zone(), parse_info_.get(),
+                                        Handle<JSFunction>::null())),
+      trace_compiler_dispatcher_jobs_(FLAG_trace_compiler_dispatcher_jobs) {
+  parse_info_->set_literal(literal);
+  parse_info_->set_script(script);
+  parse_info_->set_deferred_handles(parse_handles);
+  compile_info_->set_deferred_handles(compile_handles);
+
+  if (trace_compiler_dispatcher_jobs_) {
+    PrintF("CompilerDispatcherJob[%p] created for ", static_cast<void*>(this));
+    shared_->ShortPrint();
+    PrintF(" in Analyzed state.\n");
+  }
 }
 
 CompilerDispatcherJob::~CompilerDispatcherJob() {
@@ -42,31 +121,97 @@
   DCHECK(status_ == CompileJobStatus::kInitial ||
          status_ == CompileJobStatus::kDone);
   i::GlobalHandles::Destroy(Handle<Object>::cast(shared_).location());
+  i::GlobalHandles::Destroy(Handle<Object>::cast(context_).location());
+}
+
+bool CompilerDispatcherJob::IsAssociatedWith(
+    Handle<SharedFunctionInfo> shared) const {
+  return *shared_ == *shared;
 }
 
 void CompilerDispatcherJob::PrepareToParseOnMainThread() {
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status() == CompileJobStatus::kInitial);
   COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToParse);
+  if (trace_compiler_dispatcher_jobs_) {
+    PrintF("CompilerDispatcherJob[%p]: Preparing to parse\n",
+           static_cast<void*>(this));
+  }
   HandleScope scope(isolate_);
   unicode_cache_.reset(new UnicodeCache());
-  zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
   Handle<Script> script(Script::cast(shared_->script()), isolate_);
   DCHECK(script->type() != Script::TYPE_NATIVE);
 
   Handle<String> source(String::cast(script->source()), isolate_);
+  parse_info_.reset(new ParseInfo(isolate_->allocator()));
   if (source->IsExternalTwoByteString() || source->IsExternalOneByteString()) {
     character_stream_.reset(ScannerStream::For(
         source, shared_->start_position(), shared_->end_position()));
   } else {
     source = String::Flatten(source);
-    // Have to globalize the reference here, so it survives between function
-    // calls.
-    source_ = Handle<String>::cast(isolate_->global_handles()->Create(*source));
-    character_stream_.reset(ScannerStream::For(
-        source_, shared_->start_position(), shared_->end_position()));
+    const void* data;
+    int offset = 0;
+    int length = source->length();
+
+    // Objects in lo_space don't move, so we can just read the contents from
+    // any thread.
+    if (isolate_->heap()->lo_space()->Contains(*source)) {
+      // We need to globalize the handle to the flattened string here, in
+      // case it's not referenced from anywhere else.
+      source_ =
+          Handle<String>::cast(isolate_->global_handles()->Create(*source));
+      DisallowHeapAllocation no_allocation;
+      String::FlatContent content = source->GetFlatContent();
+      DCHECK(content.IsFlat());
+      data =
+          content.IsOneByte()
+              ? reinterpret_cast<const void*>(content.ToOneByteVector().start())
+              : reinterpret_cast<const void*>(content.ToUC16Vector().start());
+    } else {
+      // Otherwise, create a copy of the part of the string we'll parse in the
+      // zone.
+      length = (shared_->end_position() - shared_->start_position());
+      offset = shared_->start_position();
+
+      int byte_len = length * (source->IsOneByteRepresentation() ? 1 : 2);
+      data = parse_info_->zone()->New(byte_len);
+
+      DisallowHeapAllocation no_allocation;
+      String::FlatContent content = source->GetFlatContent();
+      DCHECK(content.IsFlat());
+      if (content.IsOneByte()) {
+        MemCopy(const_cast<void*>(data),
+                &content.ToOneByteVector().at(shared_->start_position()),
+                byte_len);
+      } else {
+        MemCopy(const_cast<void*>(data),
+                &content.ToUC16Vector().at(shared_->start_position()),
+                byte_len);
+      }
+    }
+    Handle<String> wrapper;
+    if (source->IsOneByteRepresentation()) {
+      ExternalOneByteString::Resource* resource =
+          new OneByteWrapper(data, length);
+      source_wrapper_.reset(resource);
+      wrapper = isolate_->factory()
+                    ->NewExternalStringFromOneByte(resource)
+                    .ToHandleChecked();
+    } else {
+      ExternalTwoByteString::Resource* resource =
+          new TwoByteWrapper(data, length);
+      source_wrapper_.reset(resource);
+      wrapper = isolate_->factory()
+                    ->NewExternalStringFromTwoByte(resource)
+                    .ToHandleChecked();
+    }
+    wrapper_ =
+        Handle<String>::cast(isolate_->global_handles()->Create(*wrapper));
+
+    character_stream_.reset(
+        ScannerStream::For(wrapper_, shared_->start_position() - offset,
+                           shared_->end_position() - offset));
   }
-  parse_info_.reset(new ParseInfo(zone_.get()));
   parse_info_->set_isolate(isolate_);
   parse_info_->set_character_stream(character_stream_.get());
   parse_info_->set_hash_seed(isolate_->heap()->HashSeed());
@@ -76,14 +221,15 @@
   parse_info_->set_end_position(shared_->end_position());
   parse_info_->set_unicode_cache(unicode_cache_.get());
   parse_info_->set_language_mode(shared_->language_mode());
+  parse_info_->set_function_literal_id(shared_->function_literal_id());
 
   parser_.reset(new Parser(parse_info_.get()));
-  Handle<ScopeInfo> outer_scope_info(
-      handle(ScopeInfo::cast(shared_->outer_scope_info())));
-  parser_->DeserializeScopeChain(parse_info_.get(),
-                                 outer_scope_info->length() > 0
-                                     ? MaybeHandle<ScopeInfo>(outer_scope_info)
-                                     : MaybeHandle<ScopeInfo>());
+  MaybeHandle<ScopeInfo> outer_scope_info;
+  if (!shared_->outer_scope_info()->IsTheHole(isolate_) &&
+      ScopeInfo::cast(shared_->outer_scope_info())->length() > 0) {
+    outer_scope_info = handle(ScopeInfo::cast(shared_->outer_scope_info()));
+  }
+  parser_->DeserializeScopeChain(parse_info_.get(), outer_scope_info);
 
   Handle<String> name(String::cast(shared_->name()));
   parse_info_->set_function_name(
@@ -92,21 +238,17 @@
 }
 
 void CompilerDispatcherJob::Parse() {
-  DCHECK(can_parse_on_background_thread_ ||
-         ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status() == CompileJobStatus::kReadyToParse);
   COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
       tracer_, kParse,
       parse_info_->end_position() - parse_info_->start_position());
+  if (trace_compiler_dispatcher_jobs_) {
+    PrintF("CompilerDispatcherJob[%p]: Parsing\n", static_cast<void*>(this));
+  }
 
   DisallowHeapAllocation no_allocation;
   DisallowHandleAllocation no_handles;
-  std::unique_ptr<DisallowHandleDereference> no_deref;
-  // If we can't parse on a background thread, we need to be able to deref the
-  // source string.
-  if (can_parse_on_background_thread_) {
-    no_deref.reset(new DisallowHandleDereference());
-  }
+  DisallowHandleDereference no_deref;
 
   // Nullify the Isolate temporarily so that the parser doesn't accidentally
   // use it.
@@ -126,33 +268,44 @@
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status() == CompileJobStatus::kParsed);
   COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeParsing);
+  if (trace_compiler_dispatcher_jobs_) {
+    PrintF("CompilerDispatcherJob[%p]: Finalizing parsing\n",
+           static_cast<void*>(this));
+  }
 
   if (!source_.is_null()) {
     i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
     source_ = Handle<String>::null();
   }
+  if (!wrapper_.is_null()) {
+    i::GlobalHandles::Destroy(Handle<Object>::cast(wrapper_).location());
+    wrapper_ = Handle<String>::null();
+  }
 
+  Handle<Script> script(Script::cast(shared_->script()), isolate_);
+  parse_info_->set_script(script);
   if (parse_info_->literal() == nullptr) {
+    parser_->ReportErrors(isolate_, script);
     status_ = CompileJobStatus::kFailed;
   } else {
-    status_ = CompileJobStatus::kReadyToAnalyse;
+    status_ = CompileJobStatus::kReadyToAnalyze;
   }
+  parser_->UpdateStatistics(isolate_, script);
 
   DeferredHandleScope scope(isolate_);
   {
-    Handle<Script> script(Script::cast(shared_->script()), isolate_);
+    parse_info_->ReopenHandlesInNewHandleScope();
 
-    parse_info_->set_script(script);
-    Handle<ScopeInfo> outer_scope_info(
-        handle(ScopeInfo::cast(shared_->outer_scope_info())));
-    if (outer_scope_info->length() > 0) {
+    if (!shared_->outer_scope_info()->IsTheHole(isolate_) &&
+        ScopeInfo::cast(shared_->outer_scope_info())->length() > 0) {
+      Handle<ScopeInfo> outer_scope_info(
+          handle(ScopeInfo::cast(shared_->outer_scope_info())));
       parse_info_->set_outer_scope_info(outer_scope_info);
     }
     parse_info_->set_shared_info(shared_);
 
-    // Do the parsing tasks which need to be done on the main thread. This
-    // will also handle parse errors.
-    parser_->Internalize(isolate_, script, parse_info_->literal() == nullptr);
+    // Internalize ast values on the main thread.
+    parse_info_->ast_value_factory()->Internalize(isolate_);
     parser_->HandleSourceURLComments(isolate_, script);
 
     parse_info_->set_character_stream(nullptr);
@@ -161,44 +314,61 @@
     unicode_cache_.reset();
     character_stream_.reset();
   }
-  handles_from_parsing_.reset(scope.Detach());
+  parse_info_->set_deferred_handles(scope.Detach());
+
+  return status_ != CompileJobStatus::kFailed;
+}
+
+bool CompilerDispatcherJob::AnalyzeOnMainThread() {
+  DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+  DCHECK(status() == CompileJobStatus::kReadyToAnalyze);
+  COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kAnalyze);
+  if (trace_compiler_dispatcher_jobs_) {
+    PrintF("CompilerDispatcherJob[%p]: Analyzing\n", static_cast<void*>(this));
+  }
+
+  compile_info_.reset(new CompilationInfo(
+      parse_info_->zone(), parse_info_.get(), Handle<JSFunction>::null()));
+
+  DeferredHandleScope scope(isolate_);
+  {
+    if (Compiler::Analyze(parse_info_.get())) {
+      status_ = CompileJobStatus::kAnalyzed;
+    } else {
+      status_ = CompileJobStatus::kFailed;
+      if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
+    }
+  }
+  compile_info_->set_deferred_handles(scope.Detach());
 
   return status_ != CompileJobStatus::kFailed;
 }
 
 bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
-  DCHECK(status() == CompileJobStatus::kReadyToAnalyse);
+  DCHECK(status() == CompileJobStatus::kAnalyzed);
   COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToCompile);
 
-  compile_info_.reset(
-      new CompilationInfo(parse_info_.get(), Handle<JSFunction>::null()));
-
-  DeferredHandleScope scope(isolate_);
-  if (Compiler::Analyze(parse_info_.get())) {
-    compile_job_.reset(
-        Compiler::PrepareUnoptimizedCompilationJob(compile_info_.get()));
-  }
-  compile_info_->set_deferred_handles(scope.Detach());
-
+  compile_job_.reset(
+      Compiler::PrepareUnoptimizedCompilationJob(compile_info_.get()));
   if (!compile_job_.get()) {
     if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
     status_ = CompileJobStatus::kFailed;
     return false;
   }
 
-  can_compile_on_background_thread_ =
-      compile_job_->can_execute_on_background_thread();
+  CHECK(compile_job_->can_execute_on_background_thread());
   status_ = CompileJobStatus::kReadyToCompile;
   return true;
 }
 
 void CompilerDispatcherJob::Compile() {
   DCHECK(status() == CompileJobStatus::kReadyToCompile);
-  DCHECK(can_compile_on_background_thread_ ||
-         ThreadId::Current().Equals(isolate_->thread_id()));
   COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
       tracer_, kCompile, parse_info_->literal()->ast_node_count());
+  if (trace_compiler_dispatcher_jobs_) {
+    PrintF("CompilerDispatcherJob[%p]: Compiling\n", static_cast<void*>(this));
+  }
 
   // Disallowing of handle dereference and heap access dealt with in
   // CompilationJob::ExecuteJob.
@@ -218,19 +388,25 @@
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
   DCHECK(status() == CompileJobStatus::kCompiled);
   COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeCompiling);
-
-  if (compile_job_->state() == CompilationJob::State::kFailed ||
-      !Compiler::FinalizeCompilationJob(compile_job_.release())) {
-    if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
-    status_ = CompileJobStatus::kFailed;
-    return false;
+  if (trace_compiler_dispatcher_jobs_) {
+    PrintF("CompilerDispatcherJob[%p]: Finalizing compiling\n",
+           static_cast<void*>(this));
   }
 
-  zone_.reset();
-  parse_info_.reset();
-  compile_info_.reset();
+  {
+    HandleScope scope(isolate_);
+    if (compile_job_->state() == CompilationJob::State::kFailed ||
+        !Compiler::FinalizeCompilationJob(compile_job_.release())) {
+      if (!isolate_->has_pending_exception()) isolate_->StackOverflow();
+      status_ = CompileJobStatus::kFailed;
+      return false;
+    }
+  }
+
   compile_job_.reset();
-  handles_from_parsing_.reset();
+  compile_info_.reset();
+  parse_zone_.reset();
+  parse_info_.reset();
 
   status_ = CompileJobStatus::kDone;
   return true;
@@ -239,22 +415,68 @@
 void CompilerDispatcherJob::ResetOnMainThread() {
   DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
 
+  if (trace_compiler_dispatcher_jobs_) {
+    PrintF("CompilerDispatcherJob[%p]: Resetting\n", static_cast<void*>(this));
+  }
+
+  compile_job_.reset();
+  compile_info_.reset();
+  parse_zone_.reset();
   parser_.reset();
   unicode_cache_.reset();
   character_stream_.reset();
   parse_info_.reset();
-  zone_.reset();
-  handles_from_parsing_.reset();
-  compile_info_.reset();
-  compile_job_.reset();
 
   if (!source_.is_null()) {
     i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
     source_ = Handle<String>::null();
   }
+  if (!wrapper_.is_null()) {
+    i::GlobalHandles::Destroy(Handle<Object>::cast(wrapper_).location());
+    wrapper_ = Handle<String>::null();
+  }
 
   status_ = CompileJobStatus::kInitial;
 }
 
+double CompilerDispatcherJob::EstimateRuntimeOfNextStepInMs() const {
+  switch (status_) {
+    case CompileJobStatus::kInitial:
+      return tracer_->EstimatePrepareToParseInMs();
+
+    case CompileJobStatus::kReadyToParse:
+      return tracer_->EstimateParseInMs(parse_info_->end_position() -
+                                        parse_info_->start_position());
+
+    case CompileJobStatus::kParsed:
+      return tracer_->EstimateFinalizeParsingInMs();
+
+    case CompileJobStatus::kReadyToAnalyze:
+      return tracer_->EstimateAnalyzeInMs();
+
+    case CompileJobStatus::kAnalyzed:
+      return tracer_->EstimatePrepareToCompileInMs();
+
+    case CompileJobStatus::kReadyToCompile:
+      return tracer_->EstimateCompileInMs(
+          parse_info_->literal()->ast_node_count());
+
+    case CompileJobStatus::kCompiled:
+      return tracer_->EstimateFinalizeCompilingInMs();
+
+    case CompileJobStatus::kFailed:
+    case CompileJobStatus::kDone:
+      return 0.0;
+  }
+
+  UNREACHABLE();
+  return 0.0;
+}
+
+void CompilerDispatcherJob::ShortPrint() {
+  DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+  shared_->ShortPrint();
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler-dispatcher/compiler-dispatcher-job.h b/src/compiler-dispatcher/compiler-dispatcher-job.h
index 7f4c6ce..aea4847 100644
--- a/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -7,6 +7,7 @@
 
 #include <memory>
 
+#include "include/v8.h"
 #include "src/base/macros.h"
 #include "src/globals.h"
 #include "src/handles.h"
@@ -15,9 +16,12 @@
 namespace v8 {
 namespace internal {
 
+class AstValueFactory;
 class CompilerDispatcherTracer;
 class CompilationInfo;
 class CompilationJob;
+class DeferredHandles;
+class FunctionLiteral;
 class Isolate;
 class ParseInfo;
 class Parser;
@@ -25,13 +29,13 @@
 class String;
 class UnicodeCache;
 class Utf16CharacterStream;
-class Zone;
 
 enum class CompileJobStatus {
   kInitial,
   kReadyToParse,
   kParsed,
-  kReadyToAnalyse,
+  kReadyToAnalyze,
+  kAnalyzed,
   kReadyToCompile,
   kCompiled,
   kFailed,
@@ -40,19 +44,28 @@
 
 class V8_EXPORT_PRIVATE CompilerDispatcherJob {
  public:
-  CompilerDispatcherJob(Isolate* isolate, Handle<SharedFunctionInfo> shared,
+  // Creates a CompilerDispatcherJob in the initial state.
+  CompilerDispatcherJob(Isolate* isolate, CompilerDispatcherTracer* tracer,
+                        Handle<SharedFunctionInfo> shared,
+                        size_t max_stack_size);
+  // Creates a CompilerDispatcherJob in the analyzed state.
+  CompilerDispatcherJob(Isolate* isolate, CompilerDispatcherTracer* tracer,
+                        Handle<Script> script,
+                        Handle<SharedFunctionInfo> shared,
+                        FunctionLiteral* literal,
+                        std::shared_ptr<Zone> parse_zone,
+                        std::shared_ptr<DeferredHandles> parse_handles,
+                        std::shared_ptr<DeferredHandles> compile_handles,
                         size_t max_stack_size);
   ~CompilerDispatcherJob();
 
   CompileJobStatus status() const { return status_; }
-  bool can_parse_on_background_thread() const {
-    return can_parse_on_background_thread_;
-  }
-  // Should only be called after kReadyToCompile.
-  bool can_compile_on_background_thread() const {
-    DCHECK(compile_job_.get());
-    return can_compile_on_background_thread_;
-  }
+
+  Context* context() { return *context_; }
+
+  // Returns true if this CompilerDispatcherJob was created for the given
+  // function.
+  bool IsAssociatedWith(Handle<SharedFunctionInfo> shared) const;
 
   // Transition from kInitial to kReadyToParse.
   void PrepareToParseOnMainThread();
@@ -60,11 +73,15 @@
   // Transition from kReadyToParse to kParsed.
   void Parse();
 
-  // Transition from kParsed to kReadyToAnalyse (or kFailed). Returns false
+  // Transition from kParsed to kReadyToAnalyze (or kFailed). Returns false
   // when transitioning to kFailed. In that case, an exception is pending.
   bool FinalizeParsingOnMainThread();
 
-  // Transition from kReadyToAnalyse to kReadyToCompile (or kFailed). Returns
+  // Transition from kReadyToAnalyze to kAnalyzed (or kFailed). Returns
+  // false when transitioning to kFailed. In that case, an exception is pending.
+  bool AnalyzeOnMainThread();
+
+  // Transition from kAnalyzed to kReadyToCompile (or kFailed). Returns
   // false when transitioning to kFailed. In that case, an exception is pending.
   bool PrepareToCompileOnMainThread();
 
@@ -78,30 +95,40 @@
   // Transition from any state to kInitial and free all resources.
   void ResetOnMainThread();
 
+  // Estimate how long the next step will take using the tracer.
+  double EstimateRuntimeOfNextStepInMs() const;
+
+  // Even though the name does not imply this, ShortPrint() must only be invoked
+  // on the main thread.
+  void ShortPrint();
+
  private:
   FRIEND_TEST(CompilerDispatcherJobTest, ScopeChain);
 
-  CompileJobStatus status_ = CompileJobStatus::kInitial;
+  CompileJobStatus status_;
   Isolate* isolate_;
   CompilerDispatcherTracer* tracer_;
+  Handle<Context> context_;            // Global handle.
   Handle<SharedFunctionInfo> shared_;  // Global handle.
   Handle<String> source_;        // Global handle.
+  Handle<String> wrapper_;       // Global handle.
+  std::unique_ptr<v8::String::ExternalStringResourceBase> source_wrapper_;
   size_t max_stack_size_;
 
   // Members required for parsing.
   std::unique_ptr<UnicodeCache> unicode_cache_;
-  std::unique_ptr<Zone> zone_;
   std::unique_ptr<Utf16CharacterStream> character_stream_;
   std::unique_ptr<ParseInfo> parse_info_;
   std::unique_ptr<Parser> parser_;
-  std::unique_ptr<DeferredHandles> handles_from_parsing_;
+
+  // Members required for compiling a parsed function.
+  std::shared_ptr<Zone> parse_zone_;
 
   // Members required for compiling.
   std::unique_ptr<CompilationInfo> compile_info_;
   std::unique_ptr<CompilationJob> compile_job_;
 
-  bool can_parse_on_background_thread_;
-  bool can_compile_on_background_thread_;
+  bool trace_compiler_dispatcher_jobs_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherJob);
 };
diff --git a/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
index f8af05f..d98209b 100644
--- a/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
+++ b/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
@@ -5,6 +5,7 @@
 #include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
 
 #include "src/isolate.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -16,17 +17,14 @@
          static_cast<double>(base::Time::kMillisecondsPerSecond);
 }
 
+const double kEstimatedRuntimeWithoutData = 1.0;
+
 }  // namespace
 
 CompilerDispatcherTracer::Scope::Scope(CompilerDispatcherTracer* tracer,
                                        ScopeID scope_id, size_t num)
     : tracer_(tracer), scope_id_(scope_id), num_(num) {
   start_time_ = MonotonicallyIncreasingTimeInMs();
-  // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (V8_UNLIKELY(FLAG_runtime_stats)) {
-    RuntimeCallStats::Enter(tracer_->runtime_call_stats_, &timer_,
-                            &RuntimeCallStats::CompilerDispatcher);
-  }
 }
 
 CompilerDispatcherTracer::Scope::~Scope() {
@@ -41,6 +39,9 @@
     case ScopeID::kFinalizeParsing:
       tracer_->RecordFinalizeParsing(elapsed);
       break;
+    case ScopeID::kAnalyze:
+      tracer_->RecordAnalyze(elapsed);
+      break;
     case ScopeID::kPrepareToCompile:
       tracer_->RecordPrepareToCompile(elapsed);
       break;
@@ -51,10 +52,6 @@
       tracer_->RecordFinalizeCompiling(elapsed);
       break;
   }
-  // TODO(cbruni): remove once we fully moved to a trace-based system.
-  if (V8_UNLIKELY(FLAG_runtime_stats)) {
-    RuntimeCallStats::Leave(tracer_->runtime_call_stats_, &timer_);
-  }
 }
 
 // static
@@ -66,6 +63,8 @@
       return "V8.BackgroundCompile_Parse";
     case ScopeID::kFinalizeParsing:
       return "V8.BackgroundCompile_FinalizeParsing";
+    case ScopeID::kAnalyze:
+      return "V8.BackgroundCompile_Analyze";
     case ScopeID::kPrepareToCompile:
       return "V8.BackgroundCompile_PrepareToCompile";
     case ScopeID::kCompile:
@@ -103,6 +102,11 @@
   finalize_parsing_events_.Push(duration_ms);
 }
 
+void CompilerDispatcherTracer::RecordAnalyze(double duration_ms) {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  analyze_events_.Push(duration_ms);
+}
+
 void CompilerDispatcherTracer::RecordPrepareToCompile(double duration_ms) {
   base::LockGuard<base::Mutex> lock(&mutex_);
   prepare_compile_events_.Push(duration_ms);
@@ -129,26 +133,44 @@
   return Estimate(parse_events_, source_length);
 }
 
-double CompilerDispatcherTracer::EstimateFinalizeParsingInMs() {
+double CompilerDispatcherTracer::EstimateFinalizeParsingInMs() const {
   base::LockGuard<base::Mutex> lock(&mutex_);
   return Average(finalize_parsing_events_);
 }
 
-double CompilerDispatcherTracer::EstimatePrepareToCompileInMs() {
+double CompilerDispatcherTracer::EstimateAnalyzeInMs() const {
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  return Average(analyze_events_);
+}
+
+double CompilerDispatcherTracer::EstimatePrepareToCompileInMs() const {
   base::LockGuard<base::Mutex> lock(&mutex_);
   return Average(prepare_compile_events_);
 }
 
-double CompilerDispatcherTracer::EstimateCompileInMs(size_t ast_size_in_bytes) {
+double CompilerDispatcherTracer::EstimateCompileInMs(
+    size_t ast_size_in_bytes) const {
   base::LockGuard<base::Mutex> lock(&mutex_);
   return Estimate(compile_events_, ast_size_in_bytes);
 }
 
-double CompilerDispatcherTracer::EstimateFinalizeCompilingInMs() {
+double CompilerDispatcherTracer::EstimateFinalizeCompilingInMs() const {
   base::LockGuard<base::Mutex> lock(&mutex_);
   return Average(finalize_compiling_events_);
 }
 
+void CompilerDispatcherTracer::DumpStatistics() const {
+  PrintF(
+      "CompilerDispatcherTracer: "
+      "prepare_parsing=%.2lfms parsing=%.2lfms/kb finalize_parsing=%.2lfms "
+      "analyze=%.2lfms prepare_compiling=%.2lfms compiling=%.2lfms/kb "
+      "finalize_compiling=%.2lfms\n",
+      EstimatePrepareToParseInMs(), EstimateParseInMs(1 * KB),
+      EstimateFinalizeParsingInMs(), EstimateAnalyzeInMs(),
+      EstimatePrepareToCompileInMs(), EstimateCompileInMs(1 * KB),
+      EstimateFinalizeCompilingInMs());
+}
+
 double CompilerDispatcherTracer::Average(
     const base::RingBuffer<double>& buffer) {
   if (buffer.Count() == 0) return 0.0;
@@ -158,7 +180,7 @@
 
 double CompilerDispatcherTracer::Estimate(
     const base::RingBuffer<std::pair<size_t, double>>& buffer, size_t num) {
-  if (buffer.Count() == 0) return 0.0;
+  if (buffer.Count() == 0) return kEstimatedRuntimeWithoutData;
   std::pair<size_t, double> sum = buffer.Sum(
       [](std::pair<size_t, double> a, std::pair<size_t, double> b) {
         return std::make_pair(a.first + b.first, a.second + b.second);
diff --git a/src/compiler-dispatcher/compiler-dispatcher-tracer.h b/src/compiler-dispatcher/compiler-dispatcher-tracer.h
index b505511..7bbd5d9 100644
--- a/src/compiler-dispatcher/compiler-dispatcher-tracer.h
+++ b/src/compiler-dispatcher/compiler-dispatcher-tracer.h
@@ -35,6 +35,7 @@
     kPrepareToParse,
     kParse,
     kFinalizeParsing,
+    kAnalyze,
     kPrepareToCompile,
     kCompile,
     kFinalizeCompiling
@@ -52,7 +53,6 @@
     ScopeID scope_id_;
     size_t num_;
     double start_time_;
-    RuntimeCallTimer timer_;
 
     DISALLOW_COPY_AND_ASSIGN(Scope);
   };
@@ -63,16 +63,20 @@
   void RecordPrepareToParse(double duration_ms);
   void RecordParse(double duration_ms, size_t source_length);
   void RecordFinalizeParsing(double duration_ms);
+  void RecordAnalyze(double duration_ms);
   void RecordPrepareToCompile(double duration_ms);
   void RecordCompile(double duration_ms, size_t ast_size_in_bytes);
   void RecordFinalizeCompiling(double duration_ms);
 
   double EstimatePrepareToParseInMs() const;
   double EstimateParseInMs(size_t source_length) const;
-  double EstimateFinalizeParsingInMs();
-  double EstimatePrepareToCompileInMs();
-  double EstimateCompileInMs(size_t ast_size_in_bytes);
-  double EstimateFinalizeCompilingInMs();
+  double EstimateFinalizeParsingInMs() const;
+  double EstimateAnalyzeInMs() const;
+  double EstimatePrepareToCompileInMs() const;
+  double EstimateCompileInMs(size_t ast_size_in_bytes) const;
+  double EstimateFinalizeCompilingInMs() const;
+
+  void DumpStatistics() const;
 
  private:
   static double Average(const base::RingBuffer<double>& buffer);
@@ -83,6 +87,7 @@
   base::RingBuffer<double> prepare_parse_events_;
   base::RingBuffer<std::pair<size_t, double>> parse_events_;
   base::RingBuffer<double> finalize_parsing_events_;
+  base::RingBuffer<double> analyze_events_;
   base::RingBuffer<double> prepare_compile_events_;
   base::RingBuffer<std::pair<size_t, double>> compile_events_;
   base::RingBuffer<double> finalize_compiling_events_;
diff --git a/src/compiler-dispatcher/compiler-dispatcher.cc b/src/compiler-dispatcher/compiler-dispatcher.cc
new file mode 100644
index 0000000..802142b
--- /dev/null
+++ b/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -0,0 +1,714 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
+
+#include "include/v8-platform.h"
+#include "include/v8.h"
+#include "src/base/platform/time.h"
+#include "src/cancelable-task.h"
+#include "src/compilation-info.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
+#include "src/flags.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+enum class ExceptionHandling { kSwallow, kThrow };
+
+bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
+                            ExceptionHandling exception_handling) {
+  DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompilerDispatcherForgroundStep");
+
+  // Ensure we are in the correct context for the job.
+  SaveContext save(isolate);
+  isolate->set_context(job->context());
+
+  switch (job->status()) {
+    case CompileJobStatus::kInitial:
+      job->PrepareToParseOnMainThread();
+      break;
+
+    case CompileJobStatus::kReadyToParse:
+      job->Parse();
+      break;
+
+    case CompileJobStatus::kParsed:
+      job->FinalizeParsingOnMainThread();
+      break;
+
+    case CompileJobStatus::kReadyToAnalyze:
+      job->AnalyzeOnMainThread();
+      break;
+
+    case CompileJobStatus::kAnalyzed:
+      job->PrepareToCompileOnMainThread();
+      break;
+
+    case CompileJobStatus::kReadyToCompile:
+      job->Compile();
+      break;
+
+    case CompileJobStatus::kCompiled:
+      job->FinalizeCompilingOnMainThread();
+      break;
+
+    case CompileJobStatus::kFailed:
+    case CompileJobStatus::kDone:
+      break;
+  }
+
+  DCHECK_EQ(job->status() == CompileJobStatus::kFailed,
+            isolate->has_pending_exception());
+  if (job->status() == CompileJobStatus::kFailed &&
+      exception_handling == ExceptionHandling::kSwallow) {
+    isolate->clear_pending_exception();
+  }
+  return job->status() != CompileJobStatus::kFailed;
+}
+
+bool IsFinished(CompilerDispatcherJob* job) {
+  return job->status() == CompileJobStatus::kDone ||
+         job->status() == CompileJobStatus::kFailed;
+}
+
+bool CanRunOnAnyThread(CompilerDispatcherJob* job) {
+  return job->status() == CompileJobStatus::kReadyToParse ||
+         job->status() == CompileJobStatus::kReadyToCompile;
+}
+
+void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
+  DCHECK(CanRunOnAnyThread(job));
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompilerDispatcherBackgroundStep");
+
+  switch (job->status()) {
+    case CompileJobStatus::kReadyToParse:
+      job->Parse();
+      break;
+
+    case CompileJobStatus::kReadyToCompile:
+      job->Compile();
+      break;
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+// Theoretically we get 50ms of idle time max, however it's unlikely that
+// we'll get all of it so try to be a conservative.
+const double kMaxIdleTimeToExpectInMs = 40;
+
+class MemoryPressureTask : public CancelableTask {
+ public:
+  MemoryPressureTask(Isolate* isolate, CancelableTaskManager* task_manager,
+                     CompilerDispatcher* dispatcher);
+  ~MemoryPressureTask() override;
+
+  // CancelableTask implementation.
+  void RunInternal() override;
+
+ private:
+  CompilerDispatcher* dispatcher_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
+};
+
+MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
+                                       CancelableTaskManager* task_manager,
+                                       CompilerDispatcher* dispatcher)
+    : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
+
+MemoryPressureTask::~MemoryPressureTask() {}
+
+void MemoryPressureTask::RunInternal() {
+  dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
+}
+
+}  // namespace
+
+class CompilerDispatcher::AbortTask : public CancelableTask {
+ public:
+  AbortTask(Isolate* isolate, CancelableTaskManager* task_manager,
+            CompilerDispatcher* dispatcher);
+  ~AbortTask() override;
+
+  // CancelableTask implementation.
+  void RunInternal() override;
+
+ private:
+  CompilerDispatcher* dispatcher_;
+
+  DISALLOW_COPY_AND_ASSIGN(AbortTask);
+};
+
+CompilerDispatcher::AbortTask::AbortTask(Isolate* isolate,
+                                         CancelableTaskManager* task_manager,
+                                         CompilerDispatcher* dispatcher)
+    : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
+
+CompilerDispatcher::AbortTask::~AbortTask() {}
+
+void CompilerDispatcher::AbortTask::RunInternal() {
+  dispatcher_->AbortInactiveJobs();
+}
+
+class CompilerDispatcher::BackgroundTask : public CancelableTask {
+ public:
+  BackgroundTask(Isolate* isolate, CancelableTaskManager* task_manager,
+                 CompilerDispatcher* dispatcher);
+  ~BackgroundTask() override;
+
+  // CancelableTask implementation.
+  void RunInternal() override;
+
+ private:
+  CompilerDispatcher* dispatcher_;
+
+  DISALLOW_COPY_AND_ASSIGN(BackgroundTask);
+};
+
+CompilerDispatcher::BackgroundTask::BackgroundTask(
+    Isolate* isolate, CancelableTaskManager* task_manager,
+    CompilerDispatcher* dispatcher)
+    : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
+
+CompilerDispatcher::BackgroundTask::~BackgroundTask() {}
+
+void CompilerDispatcher::BackgroundTask::RunInternal() {
+  dispatcher_->DoBackgroundWork();
+}
+
+class CompilerDispatcher::IdleTask : public CancelableIdleTask {
+ public:
+  IdleTask(Isolate* isolate, CancelableTaskManager* task_manager,
+           CompilerDispatcher* dispatcher);
+  ~IdleTask() override;
+
+  // CancelableIdleTask implementation.
+  void RunInternal(double deadline_in_seconds) override;
+
+ private:
+  CompilerDispatcher* dispatcher_;
+
+  DISALLOW_COPY_AND_ASSIGN(IdleTask);
+};
+
+CompilerDispatcher::IdleTask::IdleTask(Isolate* isolate,
+                                       CancelableTaskManager* task_manager,
+                                       CompilerDispatcher* dispatcher)
+    : CancelableIdleTask(isolate, task_manager), dispatcher_(dispatcher) {}
+
+CompilerDispatcher::IdleTask::~IdleTask() {}
+
+void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) {
+  dispatcher_->DoIdleWork(deadline_in_seconds);
+}
+
+CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
+                                       size_t max_stack_size)
+    : isolate_(isolate),
+      platform_(platform),
+      max_stack_size_(max_stack_size),
+      trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
+      tracer_(new CompilerDispatcherTracer(isolate_)),
+      task_manager_(new CancelableTaskManager()),
+      memory_pressure_level_(MemoryPressureLevel::kNone),
+      abort_(false),
+      idle_task_scheduled_(false),
+      num_scheduled_background_tasks_(0),
+      main_thread_blocking_on_job_(nullptr),
+      block_for_testing_(false),
+      semaphore_for_testing_(0) {
+  if (trace_compiler_dispatcher_ && !IsEnabled()) {
+    PrintF("CompilerDispatcher: dispatcher is disabled\n");
+  }
+}
+
+CompilerDispatcher::~CompilerDispatcher() {
+  // To avoid crashing in unit tests due to unfished jobs.
+  AbortAll(BlockingBehavior::kBlock);
+  task_manager_->CancelAndWait();
+}
+
+bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
+  if (!IsEnabled()) return false;
+
+  DCHECK(FLAG_ignition);
+
+  if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
+    return false;
+  }
+
+  {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    if (abort_) return false;
+  }
+
+  // We only handle functions (no eval / top-level code / wasm) that are
+  // attached to a script.
+  if (!function->script()->IsScript() || function->is_toplevel() ||
+      function->asm_function() || function->native()) {
+    return false;
+  }
+
+  return true;
+}
+
+bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompilerDispatcherEnqueue");
+  if (!CanEnqueue(function)) return false;
+  if (IsEnqueued(function)) return true;
+
+  if (trace_compiler_dispatcher_) {
+    PrintF("CompilerDispatcher: enqueuing ");
+    function->ShortPrint();
+    PrintF(" for parse and compile\n");
+  }
+
+  std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+      isolate_, tracer_.get(), function, max_stack_size_));
+  std::pair<int, int> key(Script::cast(function->script())->id(),
+                          function->function_literal_id());
+  jobs_.insert(std::make_pair(key, std::move(job)));
+  ScheduleIdleTaskIfNeeded();
+  return true;
+}
+
+bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompilerDispatcherEnqueueAndStep");
+  if (IsEnqueued(function)) return true;
+  if (!Enqueue(function)) return false;
+
+  if (trace_compiler_dispatcher_) {
+    PrintF("CompilerDispatcher: stepping ");
+    function->ShortPrint();
+    PrintF("\n");
+  }
+  JobMap::const_iterator job = GetJobFor(function);
+  DoNextStepOnMainThread(isolate_, job->second.get(),
+                         ExceptionHandling::kSwallow);
+  ConsiderJobForBackgroundProcessing(job->second.get());
+  return true;
+}
+
+bool CompilerDispatcher::Enqueue(
+    Handle<Script> script, Handle<SharedFunctionInfo> function,
+    FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
+    std::shared_ptr<DeferredHandles> parse_handles,
+    std::shared_ptr<DeferredHandles> compile_handles) {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompilerDispatcherEnqueue");
+  if (!CanEnqueue(function)) return false;
+  if (IsEnqueued(function)) return true;
+
+  if (trace_compiler_dispatcher_) {
+    PrintF("CompilerDispatcher: enqueuing ");
+    function->ShortPrint();
+    PrintF(" for compile\n");
+  }
+
+  std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+      isolate_, tracer_.get(), script, function, literal, parse_zone,
+      parse_handles, compile_handles, max_stack_size_));
+  std::pair<int, int> key(Script::cast(function->script())->id(),
+                          function->function_literal_id());
+  jobs_.insert(std::make_pair(key, std::move(job)));
+  ScheduleIdleTaskIfNeeded();
+  return true;
+}
+
+bool CompilerDispatcher::EnqueueAndStep(
+    Handle<Script> script, Handle<SharedFunctionInfo> function,
+    FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
+    std::shared_ptr<DeferredHandles> parse_handles,
+    std::shared_ptr<DeferredHandles> compile_handles) {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompilerDispatcherEnqueueAndStep");
+  if (IsEnqueued(function)) return true;
+  if (!Enqueue(script, function, literal, parse_zone, parse_handles,
+               compile_handles)) {
+    return false;
+  }
+
+  if (trace_compiler_dispatcher_) {
+    PrintF("CompilerDispatcher: stepping ");
+    function->ShortPrint();
+    PrintF("\n");
+  }
+  JobMap::const_iterator job = GetJobFor(function);
+  DoNextStepOnMainThread(isolate_, job->second.get(),
+                         ExceptionHandling::kSwallow);
+  ConsiderJobForBackgroundProcessing(job->second.get());
+  return true;
+}
+
+bool CompilerDispatcher::IsEnabled() const { return FLAG_compiler_dispatcher; }
+
+bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
+  if (jobs_.empty()) return false;
+  return GetJobFor(function) != jobs_.end();
+}
+
+void CompilerDispatcher::WaitForJobIfRunningOnBackground(
+    CompilerDispatcherJob* job) {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompilerDispatcherWaitForBackgroundJob");
+  RuntimeCallTimerScope runtimeTimer(
+      isolate_, &RuntimeCallStats::CompileWaitForDispatcher);
+
+  base::LockGuard<base::Mutex> lock(&mutex_);
+  if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
+    pending_background_jobs_.erase(job);
+    return;
+  }
+  DCHECK_NULL(main_thread_blocking_on_job_);
+  main_thread_blocking_on_job_ = job;
+  while (main_thread_blocking_on_job_ != nullptr) {
+    main_thread_blocking_signal_.Wait(&mutex_);
+  }
+  DCHECK(pending_background_jobs_.find(job) == pending_background_jobs_.end());
+  DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
+}
+
+bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompilerDispatcherFinishNow");
+  JobMap::const_iterator job = GetJobFor(function);
+  CHECK(job != jobs_.end());
+
+  if (trace_compiler_dispatcher_) {
+    PrintF("CompilerDispatcher: finishing ");
+    function->ShortPrint();
+    PrintF(" now\n");
+  }
+
+  WaitForJobIfRunningOnBackground(job->second.get());
+  while (!IsFinished(job->second.get())) {
+    DoNextStepOnMainThread(isolate_, job->second.get(),
+                           ExceptionHandling::kThrow);
+  }
+  bool result = job->second->status() != CompileJobStatus::kFailed;
+
+  if (trace_compiler_dispatcher_) {
+    PrintF("CompilerDispatcher: finished working on ");
+    function->ShortPrint();
+    PrintF(": %s\n", result ? "success" : "failure");
+    tracer_->DumpStatistics();
+  }
+
+  job->second->ResetOnMainThread();
+  jobs_.erase(job);
+  if (jobs_.empty()) {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    abort_ = false;
+  }
+  return result;
+}
+
+void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
+  bool background_tasks_running =
+      task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
+  if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
+    for (auto& it : jobs_) {
+      WaitForJobIfRunningOnBackground(it.second.get());
+      if (trace_compiler_dispatcher_) {
+        PrintF("CompilerDispatcher: aborted ");
+        it.second->ShortPrint();
+        PrintF("\n");
+      }
+      it.second->ResetOnMainThread();
+    }
+    jobs_.clear();
+    {
+      base::LockGuard<base::Mutex> lock(&mutex_);
+      DCHECK(pending_background_jobs_.empty());
+      DCHECK(running_background_jobs_.empty());
+      abort_ = false;
+    }
+    return;
+  }
+
+  {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    abort_ = true;
+    pending_background_jobs_.clear();
+  }
+  AbortInactiveJobs();
+
+  // All running background jobs might already have scheduled idle tasks instead
+  // of abort tasks. Schedule a single abort task here to make sure they get
+  // processed as soon as possible (and not first when we have idle time).
+  ScheduleAbortTask();
+}
+
+void CompilerDispatcher::AbortInactiveJobs() {
+  {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    // Since we schedule two abort tasks per async abort, we might end up
+    // here with nothing left to do.
+    if (!abort_) return;
+  }
+  for (auto it = jobs_.begin(); it != jobs_.end();) {
+    auto job = it;
+    ++it;
+    {
+      base::LockGuard<base::Mutex> lock(&mutex_);
+      if (running_background_jobs_.find(job->second.get()) !=
+          running_background_jobs_.end()) {
+        continue;
+      }
+    }
+    if (trace_compiler_dispatcher_) {
+      PrintF("CompilerDispatcher: aborted ");
+      job->second->ShortPrint();
+      PrintF("\n");
+    }
+    job->second->ResetOnMainThread();
+    jobs_.erase(job);
+  }
+  if (jobs_.empty()) {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    abort_ = false;
+  }
+}
+
+void CompilerDispatcher::MemoryPressureNotification(
+    v8::MemoryPressureLevel level, bool is_isolate_locked) {
+  MemoryPressureLevel previous = memory_pressure_level_.Value();
+  memory_pressure_level_.SetValue(level);
+  // If we're already under pressure, we haven't accepted new tasks meanwhile
+  // and can just return. If we're no longer under pressure, we're also done.
+  if (previous != MemoryPressureLevel::kNone ||
+      level == MemoryPressureLevel::kNone) {
+    return;
+  }
+  if (trace_compiler_dispatcher_) {
+    PrintF("CompilerDispatcher: received memory pressure notification\n");
+  }
+  if (is_isolate_locked) {
+    AbortAll(BlockingBehavior::kDontBlock);
+  } else {
+    {
+      base::LockGuard<base::Mutex> lock(&mutex_);
+      if (abort_) return;
+      // By going into abort mode here, and clearing the
+      // pending_background_jobs_, we at keep existing background jobs from
+      // picking up more work before the MemoryPressureTask gets executed.
+      abort_ = true;
+      pending_background_jobs_.clear();
+    }
+    platform_->CallOnForegroundThread(
+        reinterpret_cast<v8::Isolate*>(isolate_),
+        new MemoryPressureTask(isolate_, task_manager_.get(), this));
+  }
+}
+
+CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
+    Handle<SharedFunctionInfo> shared) const {
+  if (!shared->script()->IsScript()) return jobs_.end();
+  std::pair<int, int> key(Script::cast(shared->script())->id(),
+                          shared->function_literal_id());
+  auto range = jobs_.equal_range(key);
+  for (auto job = range.first; job != range.second; ++job) {
+    if (job->second->IsAssociatedWith(shared)) return job;
+  }
+  return jobs_.end();
+}
+
+void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+  DCHECK(platform_->IdleTasksEnabled(v8_isolate));
+  {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    if (idle_task_scheduled_) return;
+    idle_task_scheduled_ = true;
+  }
+  platform_->CallIdleOnForegroundThread(
+      v8_isolate, new IdleTask(isolate_, task_manager_.get(), this));
+}
+
+void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
+  if (jobs_.empty()) return;
+  ScheduleIdleTaskFromAnyThread();
+}
+
+void CompilerDispatcher::ScheduleAbortTask() {
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+  platform_->CallOnForegroundThread(
+      v8_isolate, new AbortTask(isolate_, task_manager_.get(), this));
+}
+
+void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
+    CompilerDispatcherJob* job) {
+  if (!CanRunOnAnyThread(job)) return;
+  {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    pending_background_jobs_.insert(job);
+  }
+  ScheduleMoreBackgroundTasksIfNeeded();
+}
+
+void CompilerDispatcher::ScheduleMoreBackgroundTasksIfNeeded() {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompilerDispatcherScheduleMoreBackgroundTasksIfNeeded");
+  if (FLAG_single_threaded) return;
+  {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    if (pending_background_jobs_.empty()) return;
+    if (platform_->NumberOfAvailableBackgroundThreads() <=
+        num_scheduled_background_tasks_) {
+      return;
+    }
+    ++num_scheduled_background_tasks_;
+  }
+  platform_->CallOnBackgroundThread(
+      new BackgroundTask(isolate_, task_manager_.get(), this),
+      v8::Platform::kShortRunningTask);
+}
+
+void CompilerDispatcher::DoBackgroundWork() {
+  CompilerDispatcherJob* job = nullptr;
+  {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    --num_scheduled_background_tasks_;
+    if (!pending_background_jobs_.empty()) {
+      auto it = pending_background_jobs_.begin();
+      job = *it;
+      pending_background_jobs_.erase(it);
+      running_background_jobs_.insert(job);
+    }
+  }
+  if (job == nullptr) return;
+
+  if (V8_UNLIKELY(block_for_testing_.Value())) {
+    block_for_testing_.SetValue(false);
+    semaphore_for_testing_.Wait();
+  }
+
+  if (trace_compiler_dispatcher_) {
+    PrintF("CompilerDispatcher: doing background work\n");
+  }
+
+  DoNextStepOnBackgroundThread(job);
+
+  ScheduleMoreBackgroundTasksIfNeeded();
+  // Unconditionally schedule an idle task, as all background steps have to be
+  // followed by a main thread step.
+  ScheduleIdleTaskFromAnyThread();
+
+  {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    running_background_jobs_.erase(job);
+
+    if (running_background_jobs_.empty() && abort_) {
+      // This is the last background job that finished. The abort task
+      // scheduled by AbortAll might already have ran, so schedule another
+      // one to be on the safe side.
+      ScheduleAbortTask();
+    }
+
+    if (main_thread_blocking_on_job_ == job) {
+      main_thread_blocking_on_job_ = nullptr;
+      main_thread_blocking_signal_.NotifyOne();
+    }
+  }
+  // Don't touch |this| anymore after this point, as it might have been
+  // deleted.
+}
+
+void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
+  bool aborted = false;
+  {
+    base::LockGuard<base::Mutex> lock(&mutex_);
+    idle_task_scheduled_ = false;
+    aborted = abort_;
+  }
+
+  if (aborted) {
+    AbortInactiveJobs();
+    return;
+  }
+
+  // Number of jobs that are unlikely to make progress during any idle callback
+  // due to their estimated duration.
+  size_t too_long_jobs = 0;
+
+  // Iterate over all available jobs & remaining time. For each job, decide
+  // whether to 1) skip it (if it would take too long), 2) erase it (if it's
+  // finished), or 3) make progress on it.
+  double idle_time_in_seconds =
+      deadline_in_seconds - platform_->MonotonicallyIncreasingTime();
+
+  if (trace_compiler_dispatcher_) {
+    PrintF("CompilerDispatcher: received %0.1lfms of idle time\n",
+           idle_time_in_seconds *
+               static_cast<double>(base::Time::kMillisecondsPerSecond));
+  }
+  for (auto job = jobs_.begin();
+       job != jobs_.end() && idle_time_in_seconds > 0.0;
+       idle_time_in_seconds =
+           deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) {
+    // Don't work on jobs that are being worked on by background tasks.
+    // Similarly, remove jobs we work on from the set of available background
+    // jobs.
+    std::unique_ptr<base::LockGuard<base::Mutex>> lock(
+        new base::LockGuard<base::Mutex>(&mutex_));
+    if (running_background_jobs_.find(job->second.get()) !=
+        running_background_jobs_.end()) {
+      ++job;
+      continue;
+    }
+    auto it = pending_background_jobs_.find(job->second.get());
+    double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs();
+    if (idle_time_in_seconds <
+        (estimate_in_ms /
+         static_cast<double>(base::Time::kMillisecondsPerSecond))) {
+      // If there's not enough time left, try to estimate whether we would
+      // have managed to finish the job in a large idle task to assess
+      // whether we should ask for another idle callback.
+      if (estimate_in_ms > kMaxIdleTimeToExpectInMs) ++too_long_jobs;
+      if (it == pending_background_jobs_.end()) {
+        lock.reset();
+        ConsiderJobForBackgroundProcessing(job->second.get());
+      }
+      ++job;
+    } else if (IsFinished(job->second.get())) {
+      DCHECK(it == pending_background_jobs_.end());
+      if (trace_compiler_dispatcher_) {
+        PrintF("CompilerDispatcher: finished working on ");
+        job->second->ShortPrint();
+        PrintF(": %s\n", job->second->status() == CompileJobStatus::kDone
+                             ? "success"
+                             : "failure");
+        tracer_->DumpStatistics();
+      }
+      job->second->ResetOnMainThread();
+      job = jobs_.erase(job);
+      continue;
+    } else {
+      // Do one step, and keep processing the job (as we don't advance the
+      // iterator).
+      if (it != pending_background_jobs_.end()) {
+        pending_background_jobs_.erase(it);
+      }
+      lock.reset();
+      DoNextStepOnMainThread(isolate_, job->second.get(),
+                             ExceptionHandling::kSwallow);
+    }
+  }
+  if (jobs_.size() > too_long_jobs) ScheduleIdleTaskIfNeeded();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler-dispatcher/compiler-dispatcher.h b/src/compiler-dispatcher/compiler-dispatcher.h
new file mode 100644
index 0000000..6347aa8
--- /dev/null
+++ b/src/compiler-dispatcher/compiler-dispatcher.h
@@ -0,0 +1,201 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_H_
+#define V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_H_
+
+#include <map>
+#include <memory>
+#include <unordered_set>
+#include <utility>
+
+#include "src/base/atomic-utils.h"
+#include "src/base/macros.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+#include "src/globals.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
+
+namespace v8 {
+
+class Platform;
+enum class MemoryPressureLevel;
+
+namespace internal {
+
+class CancelableTaskManager;
+class CompilerDispatcherJob;
+class CompilerDispatcherTracer;
+class DeferredHandles;
+class FunctionLiteral;
+class Isolate;
+class SharedFunctionInfo;
+class Zone;
+
+template <typename T>
+class Handle;
+
+// The CompilerDispatcher uses a combination of idle tasks and background tasks
+// to parse and compile lazily parsed functions.
+//
+// As both parsing and compilation currently requires a preparation and
+// finalization step that happens on the main thread, every task has to be
+// advanced during idle time first. Depending on the properties of the task, it
+// can then be parsed or compiled on either background threads, or during idle
+// time. Last, it has to be finalized during idle time again.
+//
+// CompilerDispatcher::jobs_ maintains the list of all CompilerDispatcherJobs
+// the CompilerDispatcher knows about.
+//
+// CompilerDispatcher::pending_background_jobs_ contains the set of
+// CompilerDispatcherJobs that can be processed on a background thread.
+//
+// CompilerDispatcher::running_background_jobs_ contains the set of
+// CompilerDispatcherJobs that are currently being processed on a background
+// thread.
+//
+// CompilerDispatcher::DoIdleWork tries to advance as many jobs out of jobs_ as
+// possible during idle time. If a job can't be advanced, but is suitable for
+// background processing, it fires off background threads.
+//
+// CompilerDispatcher::DoBackgroundWork advances one of the pending jobs, and
+// then spins of another idle task to potentially do the final step on the main
+// thread.
+class V8_EXPORT_PRIVATE CompilerDispatcher {
+ public:
+  enum class BlockingBehavior { kBlock, kDontBlock };
+
+  CompilerDispatcher(Isolate* isolate, Platform* platform,
+                     size_t max_stack_size);
+  ~CompilerDispatcher();
+
+  // Returns true if the compiler dispatcher is enabled.
+  bool IsEnabled() const;
+
+  // Enqueue a job for parse and compile. Returns true if a job was enqueued.
+  bool Enqueue(Handle<SharedFunctionInfo> function);
+
+  // Like Enqueue, but also advances the job so that it can potentially
+  // continue running on a background thread (if at all possible). Returns
+  // true if the job was enqueued.
+  bool EnqueueAndStep(Handle<SharedFunctionInfo> function);
+
+  // Enqueue a job for compilation. Function must have already been parsed and
+  // analyzed and be ready for compilation. Returns true if a job was enqueued.
+  bool Enqueue(Handle<Script> script, Handle<SharedFunctionInfo> function,
+               FunctionLiteral* literal, std::shared_ptr<Zone> parse_zone,
+               std::shared_ptr<DeferredHandles> parse_handles,
+               std::shared_ptr<DeferredHandles> compile_handles);
+
+  // Like Enqueue, but also advances the job so that it can potentially
+  // continue running on a background thread (if at all possible). Returns
+  // true if the job was enqueued.
+  bool EnqueueAndStep(Handle<Script> script,
+                      Handle<SharedFunctionInfo> function,
+                      FunctionLiteral* literal,
+                      std::shared_ptr<Zone> parse_zone,
+                      std::shared_ptr<DeferredHandles> parse_handles,
+                      std::shared_ptr<DeferredHandles> compile_handles);
+
+  // Returns true if there is a pending job for the given function.
+  bool IsEnqueued(Handle<SharedFunctionInfo> function) const;
+
+  // Blocks until the given function is compiled (and does so as fast as
+  // possible). Returns true if the compile job was successful.
+  bool FinishNow(Handle<SharedFunctionInfo> function);
+
+  // Aborts a given job. Blocks if requested.
+  void Abort(Handle<SharedFunctionInfo> function, BlockingBehavior blocking);
+
+  // Aborts all jobs. Blocks if requested.
+  void AbortAll(BlockingBehavior blocking);
+
+  // Memory pressure notifications from the embedder.
+  void MemoryPressureNotification(v8::MemoryPressureLevel level,
+                                  bool is_isolate_locked);
+
+ private:
+  FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStep);
+  FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStepTwice);
+  FRIEND_TEST(CompilerDispatcherTest, EnqueueParsed);
+  FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStepParsed);
+  FRIEND_TEST(CompilerDispatcherTest, IdleTaskSmallIdleTime);
+  FRIEND_TEST(CompilerDispatcherTest, CompileOnBackgroundThread);
+  FRIEND_TEST(CompilerDispatcherTest, FinishNowWithBackgroundTask);
+  FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllPendingBackgroundTask);
+  FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllRunningBackgroundTask);
+  FRIEND_TEST(CompilerDispatcherTest, FinishNowDuringAbortAll);
+
+  typedef std::multimap<std::pair<int, int>,
+                        std::unique_ptr<CompilerDispatcherJob>>
+      JobMap;
+  class AbortTask;
+  class BackgroundTask;
+  class IdleTask;
+
+  void WaitForJobIfRunningOnBackground(CompilerDispatcherJob* job);
+  void AbortInactiveJobs();
+  bool CanEnqueue(Handle<SharedFunctionInfo> function);
+  JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
+  void ConsiderJobForBackgroundProcessing(CompilerDispatcherJob* job);
+  void ScheduleMoreBackgroundTasksIfNeeded();
+  void ScheduleIdleTaskFromAnyThread();
+  void ScheduleIdleTaskIfNeeded();
+  void ScheduleAbortTask();
+  void DoBackgroundWork();
+  void DoIdleWork(double deadline_in_seconds);
+
+  Isolate* isolate_;
+  Platform* platform_;
+  size_t max_stack_size_;
+
+  // Copy of FLAG_trace_compiler_dispatcher to allow for access from any thread.
+  bool trace_compiler_dispatcher_;
+
+  std::unique_ptr<CompilerDispatcherTracer> tracer_;
+
+  std::unique_ptr<CancelableTaskManager> task_manager_;
+
+  // Mapping from (script id, function literal id) to job. We use a multimap,
+  // as script id is not necessarily unique.
+  JobMap jobs_;
+
+  base::AtomicValue<v8::MemoryPressureLevel> memory_pressure_level_;
+
+  // The following members can be accessed from any thread. Methods need to hold
+  // the mutex |mutex_| while accessing them.
+  base::Mutex mutex_;
+
+  // True if the dispatcher is in the process of aborting running tasks.
+  bool abort_;
+
+  bool idle_task_scheduled_;
+
+  // Number of currently scheduled BackgroundTask objects.
+  size_t num_scheduled_background_tasks_;
+
+  // The set of CompilerDispatcherJobs that can be advanced on any thread.
+  std::unordered_set<CompilerDispatcherJob*> pending_background_jobs_;
+
+  // The set of CompilerDispatcherJobs currently processed on background
+  // threads.
+  std::unordered_set<CompilerDispatcherJob*> running_background_jobs_;
+
+  // If not nullptr, then the main thread waits for the task processing
+  // this job, and blocks on the ConditionVariable main_thread_blocking_signal_.
+  CompilerDispatcherJob* main_thread_blocking_on_job_;
+  base::ConditionVariable main_thread_blocking_signal_;
+
+  // Test support.
+  base::AtomicValue<bool> block_for_testing_;
+  base::Semaphore semaphore_for_testing_;
+
+  DISALLOW_COPY_AND_ASSIGN(CompilerDispatcher);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_H_
diff --git a/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 1169506..04df928 100644
--- a/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -9,6 +9,7 @@
 #include "src/compiler.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 #include "src/tracing/trace-event.h"
 #include "src/v8.h"
 
@@ -33,11 +34,11 @@
 
 class OptimizingCompileDispatcher::CompileTask : public v8::Task {
  public:
-  explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
-    OptimizingCompileDispatcher* dispatcher =
-        isolate_->optimizing_compile_dispatcher();
-    base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
-    ++dispatcher->ref_count_;
+  explicit CompileTask(Isolate* isolate,
+                       OptimizingCompileDispatcher* dispatcher)
+      : isolate_(isolate), dispatcher_(dispatcher) {
+    base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
+    ++dispatcher_->ref_count_;
   }
 
   virtual ~CompileTask() {}
@@ -49,30 +50,29 @@
     DisallowHandleAllocation no_handles;
     DisallowHandleDereference no_deref;
 
-    OptimizingCompileDispatcher* dispatcher =
-        isolate_->optimizing_compile_dispatcher();
     {
       TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
 
       TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
                    "V8.RecompileConcurrent");
 
-      if (dispatcher->recompilation_delay_ != 0) {
+      if (dispatcher_->recompilation_delay_ != 0) {
         base::OS::Sleep(base::TimeDelta::FromMilliseconds(
-            dispatcher->recompilation_delay_));
+            dispatcher_->recompilation_delay_));
       }
 
-      dispatcher->CompileNext(dispatcher->NextInput(true));
+      dispatcher_->CompileNext(dispatcher_->NextInput(true));
     }
     {
-      base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
-      if (--dispatcher->ref_count_ == 0) {
-        dispatcher->ref_count_zero_.NotifyOne();
+      base::LockGuard<base::Mutex> lock_guard(&dispatcher_->ref_count_mutex_);
+      if (--dispatcher_->ref_count_ == 0) {
+        dispatcher_->ref_count_zero_.NotifyOne();
       }
     }
   }
 
   Isolate* isolate_;
+  OptimizingCompileDispatcher* dispatcher_;
 
   DISALLOW_COPY_AND_ASSIGN(CompileTask);
 };
@@ -222,14 +222,14 @@
     blocked_jobs_++;
   } else {
     V8::GetCurrentPlatform()->CallOnBackgroundThread(
-        new CompileTask(isolate_), v8::Platform::kShortRunningTask);
+        new CompileTask(isolate_, this), v8::Platform::kShortRunningTask);
   }
 }
 
 void OptimizingCompileDispatcher::Unblock() {
   while (blocked_jobs_ > 0) {
     V8::GetCurrentPlatform()->CallOnBackgroundThread(
-        new CompileTask(isolate_), v8::Platform::kShortRunningTask);
+        new CompileTask(isolate_, this), v8::Platform::kShortRunningTask);
     blocked_jobs_--;
   }
 }
diff --git a/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 7e08161..5a9486d 100644
--- a/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -12,6 +12,7 @@
 #include "src/base/platform/mutex.h"
 #include "src/base/platform/platform.h"
 #include "src/flags.h"
+#include "src/globals.h"
 #include "src/list.h"
 
 namespace v8 {
@@ -20,7 +21,7 @@
 class CompilationJob;
 class SharedFunctionInfo;
 
-class OptimizingCompileDispatcher {
+class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
  public:
   enum class BlockingBehavior { kBlock, kDontBlock };
 
@@ -38,9 +39,9 @@
 
   ~OptimizingCompileDispatcher();
 
-  void Run();
   void Stop();
   void Flush(BlockingBehavior blocking_behavior);
+  // Takes ownership of |job|.
   void QueueForOptimization(CompilationJob* job);
   void Unblock();
   void InstallOptimizedFunctions();
diff --git a/src/compiler.cc b/src/compiler.cc
index 3435f53..6767c75 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -9,12 +9,14 @@
 
 #include "src/asmjs/asm-js.h"
 #include "src/asmjs/asm-typer.h"
+#include "src/assembler-inl.h"
 #include "src/ast/ast-numbering.h"
 #include "src/ast/prettyprinter.h"
 #include "src/ast/scopes.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
 #include "src/compiler/pipeline.h"
 #include "src/crankshaft/hydrogen.h"
@@ -28,7 +30,7 @@
 #include "src/isolate-inl.h"
 #include "src/log-inl.h"
 #include "src/messages.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parsing.h"
 #include "src/parsing/rewriter.h"
 #include "src/parsing/scanner-character-streams.h"
 #include "src/runtime-profiler.h"
@@ -38,7 +40,18 @@
 namespace v8 {
 namespace internal {
 
+// A wrapper around a ParseInfo that detaches the parser handles from the
+// underlying DeferredHandleScope and stores them in info_ on destruction.
+class ParseHandleScope final {
+ public:
+  explicit ParseHandleScope(ParseInfo* info)
+      : deferred_(info->isolate()), info_(info) {}
+  ~ParseHandleScope() { info_->set_deferred_handles(deferred_.Detach()); }
 
+ private:
+  DeferredHandleScope deferred_;
+  ParseInfo* info_;
+};
 
 // A wrapper around a CompilationInfo that detaches the Handles from
 // the underlying DeferredHandleScope and stores them in info_ on
@@ -70,6 +83,15 @@
 // ----------------------------------------------------------------------------
 // Implementation of CompilationJob
 
+CompilationJob::CompilationJob(Isolate* isolate, CompilationInfo* info,
+                               const char* compiler_name, State initial_state)
+    : info_(info),
+      isolate_thread_id_(isolate->thread_id()),
+      compiler_name_(compiler_name),
+      state_(initial_state),
+      stack_limit_(isolate->stack_guard()->real_climit()),
+      executed_on_background_thread_(false) {}
+
 CompilationJob::Status CompilationJob::PrepareJob() {
   DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
   DisallowJavascriptExecution no_js(isolate());
@@ -98,8 +120,10 @@
     no_handles.reset(new DisallowHandleAllocation());
     no_deref.reset(new DisallowHandleDereference());
     no_dependency_change.reset(new DisallowCodeDependencyChange());
+    executed_on_background_thread_ =
+        !ThreadId::Current().Equals(isolate_thread_id_);
   } else {
-    DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
+    DCHECK(ThreadId::Current().Equals(isolate_thread_id_));
   }
 
   // Delegate to the underlying implementation.
@@ -284,7 +308,7 @@
 void EnsureFeedbackMetadata(CompilationInfo* info) {
   DCHECK(info->has_shared_info());
 
-  // If no type feedback metadata exists, we create it now. At this point the
+  // If no type feedback metadata exists, create it. At this point the
   // AstNumbering pass has already run. Note the snapshot can contain outdated
   // vectors for a different configuration, hence we also recreate a new vector
   // when the function is not compiled (i.e. no code was serialized).
@@ -292,7 +316,7 @@
   // TODO(mvstanton): reintroduce is_empty() predicate to feedback_metadata().
   if (info->shared_info()->feedback_metadata()->length() == 0 ||
       !info->shared_info()->is_compiled()) {
-    Handle<TypeFeedbackMetadata> feedback_metadata = TypeFeedbackMetadata::New(
+    Handle<FeedbackMetadata> feedback_metadata = FeedbackMetadata::New(
         info->isolate(), info->literal()->feedback_vector_spec());
     info->shared_info()->set_feedback_metadata(*feedback_metadata);
   }
@@ -304,18 +328,19 @@
 }
 
 bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
-  bool optimization_disabled = shared->optimization_disabled();
-  bool dont_crankshaft = shared->dont_crankshaft();
+  if (shared->optimization_disabled()) {
+    return false;
+  }
+
+  bool must_use_ignition_turbo = shared->must_use_ignition_turbo();
 
   // Check the enabling conditions for Turbofan.
   // 1. "use asm" code.
-  bool is_turbofanable_asm =
-      FLAG_turbo_asm && shared->asm_function() && !optimization_disabled;
+  bool is_turbofanable_asm = FLAG_turbo_asm && shared->asm_function();
 
   // 2. Fallback for features unsupported by Crankshaft.
   bool is_unsupported_by_crankshaft_but_turbofanable =
-      dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
-      !optimization_disabled;
+      must_use_ignition_turbo && strcmp(FLAG_turbo_filter, "~~") == 0;
 
   // 3. Explicitly enabled by the command-line filter.
   bool passes_turbo_filter = shared->PassesFilter(FLAG_turbo_filter);
@@ -324,34 +349,57 @@
          passes_turbo_filter;
 }
 
-bool ShouldUseIgnition(CompilationInfo* info) {
-  DCHECK(info->has_shared_info());
+bool ShouldUseIgnition(Handle<SharedFunctionInfo> shared,
+                       bool marked_as_debug) {
+  // Code which can't be supported by the old pipeline should use Ignition.
+  if (shared->must_use_ignition_turbo()) return true;
+
+  // Resumable functions are not supported by {FullCodeGenerator}, suspended
+  // activations stored as {JSGeneratorObject} on the heap always assume the
+  // underlying code to be based on the bytecode array.
+  DCHECK(!IsResumableFunction(shared->kind()));
 
   // Skip Ignition for asm.js functions.
-  if (info->shared_info()->asm_function()) {
+  if (shared->asm_function()) return false;
+
+  // Skip Ignition for asm wasm code.
+  if (FLAG_validate_asm && shared->HasAsmWasmData()) {
     return false;
   }
 
   // When requesting debug code as a replacement for existing code, we provide
   // the same kind as the existing code (to prevent implicit tier-change).
-  if (info->is_debug() && info->shared_info()->is_compiled()) {
-    return !info->shared_info()->HasBaselineCode();
+  if (marked_as_debug && shared->is_compiled()) {
+    return !shared->HasBaselineCode();
   }
 
   // Code destined for TurboFan should be compiled with Ignition first.
-  if (UseTurboFan(info->shared_info())) return true;
+  if (UseTurboFan(shared)) return true;
 
   // Only use Ignition for any other function if FLAG_ignition is true.
-  if (!FLAG_ignition) return false;
+  return FLAG_ignition;
+}
 
-  // Checks whether top level functions should be passed by the filter.
-  if (info->shared_info()->is_toplevel()) {
-    Vector<const char> filter = CStrVector(FLAG_ignition_filter);
-    return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
-  }
+bool ShouldUseIgnition(CompilationInfo* info) {
+  DCHECK(info->has_shared_info());
+  return ShouldUseIgnition(info->shared_info(), info->is_debug());
+}
 
-  // Finally respect the filter.
-  return info->shared_info()->PassesFilter(FLAG_ignition_filter);
+bool UseAsmWasm(DeclarationScope* scope, Handle<SharedFunctionInfo> shared_info,
+                bool is_debug) {
+  return FLAG_validate_asm && scope->asm_module() &&
+         !shared_info->is_asm_wasm_broken() && !is_debug;
+}
+
+bool UseCompilerDispatcher(Compiler::ConcurrencyMode inner_function_mode,
+                           CompilerDispatcher* dispatcher,
+                           DeclarationScope* scope,
+                           Handle<SharedFunctionInfo> shared_info,
+                           bool is_debug, bool will_serialize) {
+  return FLAG_compiler_dispatcher_eager_inner &&
+         inner_function_mode == Compiler::CONCURRENT &&
+         dispatcher->IsEnabled() && !is_debug && !will_serialize &&
+         !UseAsmWasm(scope, shared_info, is_debug);
 }
 
 CompilationJob* GetUnoptimizedCompilationJob(CompilationInfo* info) {
@@ -360,7 +408,6 @@
   DCHECK_NOT_NULL(info->literal());
   DCHECK_NOT_NULL(info->scope());
 
-  EnsureFeedbackMetadata(info);
   if (ShouldUseIgnition(info)) {
     return interpreter::Interpreter::NewCompilationJob(info);
   } else {
@@ -407,18 +454,51 @@
 CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
   CompilationJob::Status status = job->FinalizeJob();
   if (status == CompilationJob::SUCCEEDED) {
-    InstallUnoptimizedCode(job->info());
+    CompilationInfo* info = job->info();
+    EnsureFeedbackMetadata(info);
+    DCHECK(!info->code().is_null());
+    if (info->parse_info()->literal()->should_be_used_once_hint()) {
+      info->code()->MarkToBeExecutedOnce(info->isolate());
+    }
+    InstallUnoptimizedCode(info);
+    RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, info);
     job->RecordUnoptimizedCompilationStats();
   }
   return status;
 }
 
+void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
+                                       Handle<SharedFunctionInfo> shared_info) {
+  shared_info->set_ast_node_count(literal->ast_node_count());
+  if (literal->dont_optimize_reason() != kNoReason) {
+    shared_info->DisableOptimization(literal->dont_optimize_reason());
+  }
+  if (literal->flags() & AstProperties::kMustUseIgnitionTurbo) {
+    shared_info->set_must_use_ignition_turbo(true);
+  }
+}
+
+bool Renumber(ParseInfo* parse_info,
+              Compiler::EagerInnerFunctionLiterals* eager_literals) {
+  RuntimeCallTimerScope runtimeTimer(parse_info->isolate(),
+                                     &RuntimeCallStats::CompileRenumber);
+  if (!AstNumbering::Renumber(
+          parse_info->isolate()->stack_guard()->real_climit(),
+          parse_info->zone(), parse_info->literal(), eager_literals)) {
+    return false;
+  }
+  if (!parse_info->shared_info().is_null()) {
+    SetSharedFunctionFlagsFromLiteral(parse_info->literal(),
+                                      parse_info->shared_info());
+  }
+  return true;
+}
+
 bool GenerateUnoptimizedCode(CompilationInfo* info) {
-  if (FLAG_validate_asm && info->scope()->asm_module() &&
-      !info->shared_info()->is_asm_wasm_broken() && !info->is_debug()) {
+  if (UseAsmWasm(info->scope(), info->shared_info(), info->is_debug())) {
     EnsureFeedbackMetadata(info);
     MaybeHandle<FixedArray> wasm_data;
-    wasm_data = AsmJs::ConvertAsmToWasm(info->parse_info());
+    wasm_data = AsmJs::CompileAsmViaWasm(info);
     if (!wasm_data.is_null()) {
       info->shared_info()->set_asm_wasm_data(*wasm_data.ToHandleChecked());
       info->SetCode(info->isolate()->builtins()->InstantiateAsmJs());
@@ -437,28 +517,164 @@
   return true;
 }
 
-bool CompileUnoptimizedCode(CompilationInfo* info) {
-  DCHECK(AllowCompilation::IsAllowed(info->isolate()));
-  if (!Compiler::Analyze(info->parse_info()) ||
-      !GenerateUnoptimizedCode(info)) {
-    Isolate* isolate = info->isolate();
-    if (!isolate->has_pending_exception()) isolate->StackOverflow();
-    return false;
+bool CompileUnoptimizedInnerFunctions(
+    Compiler::EagerInnerFunctionLiterals* literals,
+    Compiler::ConcurrencyMode inner_function_mode,
+    std::shared_ptr<Zone> parse_zone, CompilationInfo* outer_info) {
+  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
+               "V8.CompileUnoptimizedInnerFunctions");
+  Isolate* isolate = outer_info->isolate();
+  Handle<Script> script = outer_info->script();
+  bool is_debug = outer_info->is_debug();
+  bool will_serialize = outer_info->will_serialize();
+  RuntimeCallTimerScope runtimeTimer(isolate,
+                                     &RuntimeCallStats::CompileInnerFunction);
+
+  for (auto it : *literals) {
+    FunctionLiteral* literal = it->value();
+    Handle<SharedFunctionInfo> shared =
+        Compiler::GetSharedFunctionInfo(literal, script, outer_info);
+    if (shared->is_compiled()) continue;
+
+    // The {literal} has already been numbered because AstNumbering decends into
+    // eagerly compiled function literals.
+    SetSharedFunctionFlagsFromLiteral(literal, shared);
+
+    // Try to enqueue the eager function on the compiler dispatcher.
+    CompilerDispatcher* dispatcher = isolate->compiler_dispatcher();
+    if (UseCompilerDispatcher(inner_function_mode, dispatcher, literal->scope(),
+                              shared, is_debug, will_serialize) &&
+        dispatcher->EnqueueAndStep(outer_info->script(), shared, literal,
+                                   parse_zone,
+                                   outer_info->parse_info()->deferred_handles(),
+                                   outer_info->deferred_handles())) {
+      // If we have successfully queued up the function for compilation on the
+      // compiler dispatcher then we are done.
+      continue;
+    } else {
+      // Otherwise generate unoptimized code now.
+      ParseInfo parse_info(script);
+      CompilationInfo info(parse_info.zone(), &parse_info,
+                           Handle<JSFunction>::null());
+
+      parse_info.set_literal(literal);
+      parse_info.set_shared_info(shared);
+      parse_info.set_function_literal_id(shared->function_literal_id());
+      parse_info.set_language_mode(literal->scope()->language_mode());
+      parse_info.set_ast_value_factory(
+          outer_info->parse_info()->ast_value_factory());
+      parse_info.set_ast_value_factory_owned(false);
+
+      if (will_serialize) info.PrepareForSerializing();
+      if (is_debug) info.MarkAsDebug();
+
+      if (!GenerateUnoptimizedCode(&info)) {
+        if (!isolate->has_pending_exception()) isolate->StackOverflow();
+        return false;
+      }
+    }
   }
   return true;
 }
 
-MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
+bool InnerFunctionIsAsmModule(
+    ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* literals) {
+  for (auto it : *literals) {
+    FunctionLiteral* literal = it->value();
+    if (literal->scope()->IsAsmModule()) return true;
+  }
+  return false;
+}
+
+bool CompileUnoptimizedCode(CompilationInfo* info,
+                            Compiler::ConcurrencyMode inner_function_mode) {
+  Isolate* isolate = info->isolate();
+  DCHECK(AllowCompilation::IsAllowed(isolate));
+
+  Compiler::EagerInnerFunctionLiterals inner_literals;
+  {
+    std::unique_ptr<CompilationHandleScope> compilation_handle_scope;
+    if (inner_function_mode == Compiler::CONCURRENT) {
+      compilation_handle_scope.reset(new CompilationHandleScope(info));
+    }
+    if (!Compiler::Analyze(info->parse_info(), &inner_literals)) {
+      if (!isolate->has_pending_exception()) isolate->StackOverflow();
+      return false;
+    }
+  }
+
+  // Disable concurrent inner compilation for asm-wasm code.
+  // TODO(rmcilroy,bradnelson): Remove this AsmWasm check once the asm-wasm
+  // builder doesn't do parsing when visiting function declarations.
+  if (info->scope()->IsAsmModule() ||
+      InnerFunctionIsAsmModule(&inner_literals)) {
+    inner_function_mode = Compiler::NOT_CONCURRENT;
+  }
+
+  std::shared_ptr<Zone> parse_zone;
+  if (inner_function_mode == Compiler::CONCURRENT) {
+    // Seal the parse zone so that it can be shared by parallel inner function
+    // compilation jobs.
+    DCHECK_NE(info->parse_info()->zone(), info->zone());
+    parse_zone = info->parse_info()->zone_shared();
+    parse_zone->Seal();
+  }
+
+  if (!CompileUnoptimizedInnerFunctions(&inner_literals, inner_function_mode,
+                                        parse_zone, info) ||
+      !GenerateUnoptimizedCode(info)) {
+    if (!isolate->has_pending_exception()) isolate->StackOverflow();
+    return false;
+  }
+
+  return true;
+}
+
+void EnsureSharedFunctionInfosArrayOnScript(ParseInfo* info) {
+  DCHECK(info->is_toplevel());
+  DCHECK(!info->script().is_null());
+  if (info->script()->shared_function_infos()->length() > 0) {
+    DCHECK_EQ(info->script()->shared_function_infos()->length(),
+              info->max_function_literal_id() + 1);
+    return;
+  }
+  Isolate* isolate = info->isolate();
+  Handle<FixedArray> infos(
+      isolate->factory()->NewFixedArray(info->max_function_literal_id() + 1));
+  info->script()->set_shared_function_infos(*infos);
+}
+
+MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(
+    CompilationInfo* info, Compiler::ConcurrencyMode inner_function_mode) {
+  RuntimeCallTimerScope runtimeTimer(
+      info->isolate(), &RuntimeCallStats::CompileGetUnoptimizedCode);
   VMState<COMPILER> state(info->isolate());
   PostponeInterruptsScope postpone(info->isolate());
 
-  // Parse and update CompilationInfo with the results.
-  if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
+  // Parse and update ParseInfo with the results.
+  {
+    if (!parsing::ParseAny(info->parse_info(),
+                           inner_function_mode != Compiler::CONCURRENT)) {
+      return MaybeHandle<Code>();
+    }
+
+    if (inner_function_mode == Compiler::CONCURRENT) {
+      ParseHandleScope parse_handles(info->parse_info());
+      info->parse_info()->ReopenHandlesInNewHandleScope();
+      info->parse_info()->ast_value_factory()->Internalize(info->isolate());
+    }
+  }
+
+  if (info->parse_info()->is_toplevel()) {
+    EnsureSharedFunctionInfosArrayOnScript(info->parse_info());
+  }
   DCHECK_EQ(info->shared_info()->language_mode(),
             info->literal()->language_mode());
 
   // Compile either unoptimized code or bytecode for the interpreter.
-  if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
+  if (!CompileUnoptimizedCode(info, inner_function_mode)) {
+    return MaybeHandle<Code>();
+  }
 
   // Record the function compilation event.
   RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
@@ -468,16 +684,18 @@
 
 MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
     Handle<JSFunction> function, BailoutId osr_ast_id) {
+  RuntimeCallTimerScope runtimeTimer(
+      function->GetIsolate(),
+      &RuntimeCallStats::CompileGetFromOptimizedCodeMap);
   Handle<SharedFunctionInfo> shared(function->shared());
   DisallowHeapAllocation no_gc;
-  CodeAndLiterals cached = shared->SearchOptimizedCodeMap(
+  Code* code = shared->SearchOptimizedCodeMap(
       function->context()->native_context(), osr_ast_id);
-  if (cached.code != nullptr) {
+  if (code != nullptr) {
     // Caching of optimized code enabled and optimized code found.
-    if (cached.literals != nullptr) function->set_literals(cached.literals);
-    DCHECK(!cached.code->marked_for_deoptimization());
+    DCHECK(!code->marked_for_deoptimization());
     DCHECK(function->shared()->is_compiled());
-    return Handle<Code>(cached.code);
+    return Handle<Code>(code);
   }
   return MaybeHandle<Code>();
 }
@@ -499,29 +717,9 @@
   // Cache optimized context-specific code.
   Handle<JSFunction> function = info->closure();
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<LiteralsArray> literals(function->literals());
   Handle<Context> native_context(function->context()->native_context());
   SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
-                                            literals, info->osr_ast_id());
-}
-
-bool Renumber(ParseInfo* parse_info) {
-  if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
-                              parse_info->literal())) {
-    return false;
-  }
-  Handle<SharedFunctionInfo> shared_info = parse_info->shared_info();
-  if (!shared_info.is_null()) {
-    FunctionLiteral* lit = parse_info->literal();
-    shared_info->set_ast_node_count(lit->ast_node_count());
-    if (lit->dont_optimize_reason() != kNoReason) {
-      shared_info->DisableOptimization(lit->dont_optimize_reason());
-    }
-    if (lit->flags() & AstProperties::kDontCrankshaft) {
-      shared_info->set_dont_crankshaft(true);
-    }
-  }
-  return true;
+                                            info->osr_ast_id());
 }
 
 bool GetOptimizedCodeNow(CompilationJob* job) {
@@ -640,6 +838,7 @@
   }
 
   // Reset profiler ticks, function is no longer considered hot.
+  DCHECK(shared->is_compiled());
   if (shared->HasBaselineCode()) {
     shared->code()->set_profiler_ticks(0);
   } else if (shared->HasBytecodeArray()) {
@@ -650,8 +849,12 @@
   DCHECK(!isolate->has_pending_exception());
   PostponeInterruptsScope postpone(isolate);
   bool use_turbofan = UseTurboFan(shared) || ignition_osr;
+  bool has_script = shared->script()->IsScript();
+  // BUG(5946): This DCHECK is necessary to make certain that we won't tolerate
+  // the lack of a script without bytecode.
+  DCHECK_IMPLIES(!has_script, ShouldUseIgnition(shared, false));
   std::unique_ptr<CompilationJob> job(
-      use_turbofan ? compiler::Pipeline::NewCompilationJob(function)
+      use_turbofan ? compiler::Pipeline::NewCompilationJob(function, has_script)
                    : new HCompilationJob(function));
   CompilationInfo* info = job->info();
   ParseInfo* parse_info = info->parse_info();
@@ -668,7 +871,7 @@
   const int kMaxOptCount =
       FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
   if (info->shared_info()->opt_count() > kMaxOptCount) {
-    info->AbortOptimization(kOptimizedTooManyTimes);
+    info->AbortOptimization(kDeoptimizedTooManyTimes);
     return MaybeHandle<Code>();
   }
 
@@ -679,10 +882,7 @@
   // TurboFan can optimize directly from existing bytecode.
   if (use_turbofan && ShouldUseIgnition(info)) {
     if (info->is_osr() && !ignition_osr) return MaybeHandle<Code>();
-    if (!Compiler::EnsureBytecode(info)) {
-      if (isolate->has_pending_exception()) isolate->clear_pending_exception();
-      return MaybeHandle<Code>();
-    }
+    DCHECK(shared->HasBytecodeArray());
     info->MarkAsOptimizeFromBytecode();
   }
 
@@ -759,10 +959,8 @@
     } else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
       job->RecordOptimizedCompilationStats();
       RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
-      if (shared
-              ->SearchOptimizedCodeMap(info->context()->native_context(),
-                                       info->osr_ast_id())
-              .code == nullptr) {
+      if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
+                                         info->osr_ast_id()) == nullptr) {
         InsertCodeIntoOptimizedCodeMap(info);
       }
       if (FLAG_trace_opt) {
@@ -789,9 +987,10 @@
   Isolate* isolate = function->GetIsolate();
   VMState<COMPILER> state(isolate);
   PostponeInterruptsScope postpone(isolate);
-  Zone zone(isolate->allocator(), ZONE_NAME);
-  ParseInfo parse_info(&zone, handle(function->shared()));
-  CompilationInfo info(&parse_info, function);
+  ParseInfo parse_info(handle(function->shared()));
+  CompilationInfo info(parse_info.zone(), &parse_info, function);
+
+  DCHECK(function->shared()->is_compiled());
 
   // Function no longer needs to be tiered up
   function->shared()->set_marked_for_tier_up(false);
@@ -812,13 +1011,11 @@
     return MaybeHandle<Code>();
   }
 
-  // TODO(4280): For now we do not switch generators or async functions to
-  // baseline code because there might be suspended activations stored in
-  // generator objects on the heap. We could eventually go directly to
-  // TurboFan in this case.
-  if (IsResumableFunction(function->shared()->kind())) {
+  // Don't generate full-codegen code for functions it can't support.
+  if (function->shared()->must_use_ignition_turbo()) {
     return MaybeHandle<Code>();
   }
+  DCHECK(!IsResumableFunction(function->shared()->kind()));
 
   if (FLAG_trace_opt) {
     OFStream os(stdout);
@@ -827,7 +1024,7 @@
   }
 
   // Parse and update CompilationInfo with the results.
-  if (!Parser::ParseStatic(info.parse_info())) return MaybeHandle<Code>();
+  if (!parsing::ParseFunction(info.parse_info())) return MaybeHandle<Code>();
   Handle<SharedFunctionInfo> shared = info.shared_info();
   DCHECK_EQ(shared->language_mode(), info.literal()->language_mode());
 
@@ -856,7 +1053,7 @@
   DCHECK(!function->is_compiled());
   TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
   RuntimeCallTimerScope runtimeTimer(isolate,
-                                     &RuntimeCallStats::CompileCodeLazy);
+                                     &RuntimeCallStats::CompileFunction);
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
   AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
 
@@ -872,24 +1069,18 @@
     return cached_code;
   }
 
-  if (function->shared()->marked_for_tier_up()) {
+  if (function->shared()->is_compiled() &&
+      function->shared()->marked_for_tier_up()) {
     DCHECK(FLAG_mark_shared_functions_for_tier_up);
 
     function->shared()->set_marked_for_tier_up(false);
 
     switch (Compiler::NextCompilationTier(*function)) {
       case Compiler::BASELINE: {
-        if (FLAG_trace_opt) {
-          PrintF("[recompiling function ");
-          function->ShortPrint();
-          PrintF(
-              " to baseline eagerly (shared function marked for tier up)]\n");
-        }
-
-        Handle<Code> code;
-        if (GetBaselineCode(function).ToHandle(&code)) {
-          return code;
-        }
+        // We don't try to handle baseline here because GetBaselineCode()
+        // doesn't handle top-level code. We aren't supporting
+        // the hybrid pipeline going forward (where Ignition is a first
+        // tier followed by full-code).
         break;
       }
       case Compiler::OPTIMIZED: {
@@ -922,13 +1113,14 @@
     return entry;
   }
 
-  Zone zone(isolate->allocator(), ZONE_NAME);
-  ParseInfo parse_info(&zone, handle(function->shared()));
-  CompilationInfo info(&parse_info, function);
+  ParseInfo parse_info(handle(function->shared()));
+  Zone compile_zone(isolate->allocator(), ZONE_NAME);
+  CompilationInfo info(&compile_zone, &parse_info, function);
   Handle<Code> result;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCode(&info), Code);
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, result, GetUnoptimizedCode(&info, Compiler::CONCURRENT), Code);
 
-  if (FLAG_always_opt) {
+  if (FLAG_always_opt && !info.shared_info()->HasAsmWasmData()) {
     Handle<Code> opt_code;
     if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
             .ToHandle(&opt_code)) {
@@ -940,49 +1132,44 @@
 }
 
 
-Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
-    Isolate* isolate, FunctionLiteral* literal, Handle<Script> script) {
-  Handle<Code> code = isolate->builtins()->CompileLazy();
-  Handle<ScopeInfo> scope_info = handle(ScopeInfo::Empty(isolate));
-  Handle<SharedFunctionInfo> result = isolate->factory()->NewSharedFunctionInfo(
-      literal->name(), literal->materialized_literal_count(), literal->kind(),
-      code, scope_info);
-  SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
-  SharedFunctionInfo::SetScript(result, script);
-  return result;
-}
-
 Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
   TimerEventScope<TimerEventCompileCode> timer(isolate);
-  RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
   PostponeInterruptsScope postpone(isolate);
   DCHECK(!isolate->native_context().is_null());
   ParseInfo* parse_info = info->parse_info();
+
+  RuntimeCallTimerScope runtimeTimer(
+      isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
+                                     : &RuntimeCallStats::CompileScript);
+
   Handle<Script> script = parse_info->script();
 
   // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
   FixedArray* array = isolate->native_context()->embedder_data();
   script->set_context_data(array->get(v8::Context::kDebugIdIndex));
 
-  isolate->debug()->OnBeforeCompile(script);
-
   Handle<SharedFunctionInfo> result;
 
   { VMState<COMPILER> state(info->isolate());
-    if (parse_info->literal() == nullptr && !Parser::ParseStatic(parse_info)) {
-      return Handle<SharedFunctionInfo>::null();
+    if (parse_info->literal() == nullptr) {
+      if (!parsing::ParseProgram(parse_info, false)) {
+        return Handle<SharedFunctionInfo>::null();
+      }
+
+      {
+        ParseHandleScope parse_handles(parse_info);
+        parse_info->ReopenHandlesInNewHandleScope();
+        parse_info->ast_value_factory()->Internalize(info->isolate());
+      }
     }
 
-    FunctionLiteral* lit = parse_info->literal();
+    EnsureSharedFunctionInfosArrayOnScript(parse_info);
 
     // Measure how long it takes to do the compilation; only take the
     // rest of the function into account to avoid overlap with the
     // parsing statistics.
-    RuntimeCallTimerScope runtimeTimer(
-        isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
-                                       : &RuntimeCallStats::Compile);
     HistogramTimer* rate = parse_info->is_eval()
                                ? info->isolate()->counters()->compile_eval()
                                : info->isolate()->counters()->compile();
@@ -991,13 +1178,15 @@
                  parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
 
     // Allocate a shared function info object.
+    FunctionLiteral* lit = parse_info->literal();
     DCHECK_EQ(kNoSourcePosition, lit->function_token_position());
-    result = NewSharedFunctionInfoForLiteral(isolate, lit, script);
+    result = isolate->factory()->NewSharedFunctionInfoForLiteral(lit, script);
     result->set_is_toplevel(true);
     parse_info->set_shared_info(result);
+    parse_info->set_function_literal_id(result->function_literal_id());
 
     // Compile the code.
-    if (!CompileUnoptimizedCode(info)) {
+    if (!CompileUnoptimizedCode(info, Compiler::CONCURRENT)) {
       return Handle<SharedFunctionInfo>::null();
     }
 
@@ -1025,17 +1214,23 @@
 // ----------------------------------------------------------------------------
 // Implementation of Compiler
 
-bool Compiler::Analyze(ParseInfo* info) {
+bool Compiler::Analyze(ParseInfo* info,
+                       EagerInnerFunctionLiterals* eager_literals) {
   DCHECK_NOT_NULL(info->literal());
+  RuntimeCallTimerScope runtimeTimer(info->isolate(),
+                                     &RuntimeCallStats::CompileAnalyse);
   if (!Rewriter::Rewrite(info)) return false;
   DeclarationScope::Analyze(info, AnalyzeMode::kRegular);
-  if (!Renumber(info)) return false;
+  if (!Renumber(info, eager_literals)) {
+    return false;
+  }
   DCHECK_NOT_NULL(info->scope());
   return true;
 }
 
 bool Compiler::ParseAndAnalyze(ParseInfo* info) {
-  if (!Parser::ParseStatic(info)) return false;
+  if (!parsing::ParseAny(info)) return false;
+  if (info->is_toplevel()) EnsureSharedFunctionInfosArrayOnScript(info);
   if (!Compiler::Analyze(info)) return false;
   DCHECK_NOT_NULL(info->literal());
   DCHECK_NOT_NULL(info->scope());
@@ -1047,13 +1242,25 @@
   Isolate* isolate = function->GetIsolate();
   DCHECK(AllowCompilation::IsAllowed(isolate));
 
-  // Start a compilation.
+  CompilerDispatcher* dispatcher = isolate->compiler_dispatcher();
+  Handle<SharedFunctionInfo> shared(function->shared(), isolate);
   Handle<Code> code;
-  if (!GetLazyCode(function).ToHandle(&code)) {
-    if (flag == CLEAR_EXCEPTION) {
-      isolate->clear_pending_exception();
+  if (dispatcher->IsEnqueued(shared)) {
+    if (!dispatcher->FinishNow(shared)) {
+      if (flag == CLEAR_EXCEPTION) {
+        isolate->clear_pending_exception();
+      }
+      return false;
     }
-    return false;
+    code = handle(shared->code(), isolate);
+  } else {
+    // Start a compilation.
+    if (!GetLazyCode(function).ToHandle(&code)) {
+      if (flag == CLEAR_EXCEPTION) {
+        isolate->clear_pending_exception();
+      }
+      return false;
+    }
   }
 
   // Install code on closure.
@@ -1100,21 +1307,11 @@
   // Start a compilation.
   Handle<Code> code;
   if (!GetOptimizedCode(function, mode).ToHandle(&code)) {
-    // Optimization failed, get unoptimized code.
+    // Optimization failed, get unoptimized code. Unoptimized code must exist
+    // already if we are optimizing.
     DCHECK(!isolate->has_pending_exception());
-    if (function->shared()->is_compiled()) {
-      code = handle(function->shared()->code(), isolate);
-    } else if (function->shared()->HasBytecodeArray()) {
-      code = isolate->builtins()->InterpreterEntryTrampoline();
-      function->shared()->ReplaceCode(*code);
-    } else {
-      Zone zone(isolate->allocator(), ZONE_NAME);
-      ParseInfo parse_info(&zone, handle(function->shared()));
-      CompilationInfo info(&parse_info, function);
-      if (!GetUnoptimizedCode(&info).ToHandle(&code)) {
-        return false;
-      }
-    }
+    DCHECK(function->shared()->is_compiled());
+    code = handle(function->shared()->code(), isolate);
   }
 
   // Install code on closure.
@@ -1133,11 +1330,11 @@
   DCHECK(AllowCompilation::IsAllowed(isolate));
 
   // Start a compilation.
-  Zone zone(isolate->allocator(), ZONE_NAME);
-  ParseInfo parse_info(&zone, shared);
-  CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+  ParseInfo parse_info(shared);
+  CompilationInfo info(parse_info.zone(), &parse_info,
+                       Handle<JSFunction>::null());
   info.MarkAsDebug();
-  if (GetUnoptimizedCode(&info).is_null()) {
+  if (GetUnoptimizedCode(&info, Compiler::NOT_CONCURRENT).is_null()) {
     isolate->clear_pending_exception();
     return false;
   }
@@ -1156,13 +1353,14 @@
   // In order to ensure that live edit function info collection finds the newly
   // generated shared function infos, clear the script's list temporarily
   // and restore it at the end of this method.
-  Handle<Object> old_function_infos(script->shared_function_infos(), isolate);
-  script->set_shared_function_infos(Smi::kZero);
+  Handle<FixedArray> old_function_infos(script->shared_function_infos(),
+                                        isolate);
+  script->set_shared_function_infos(isolate->heap()->empty_fixed_array());
 
   // Start a compilation.
-  Zone zone(isolate->allocator(), ZONE_NAME);
-  ParseInfo parse_info(&zone, script);
-  CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+  ParseInfo parse_info(script);
+  Zone compile_zone(isolate->allocator(), ZONE_NAME);
+  CompilationInfo info(&compile_zone, &parse_info, Handle<JSFunction>::null());
   info.MarkAsDebug();
 
   // TODO(635): support extensions.
@@ -1172,7 +1370,7 @@
     // Check postconditions on success.
     DCHECK(!isolate->has_pending_exception());
     infos = LiveEditFunctionTracker::Collect(parse_info.literal(), script,
-                                             &zone, isolate);
+                                             parse_info.zone(), isolate);
   }
 
   // Restore the original function info list in order to remain side-effect
@@ -1184,21 +1382,20 @@
 }
 
 bool Compiler::EnsureBytecode(CompilationInfo* info) {
-  if (!ShouldUseIgnition(info)) return false;
-  if (!info->shared_info()->HasBytecodeArray()) {
-    Handle<Code> original_code(info->shared_info()->code());
-    if (GetUnoptimizedCode(info).is_null()) return false;
-    if (info->shared_info()->HasAsmWasmData()) return false;
-    DCHECK(info->shared_info()->is_compiled());
-    if (original_code->kind() == Code::FUNCTION) {
-      // Generating bytecode will install the {InterpreterEntryTrampoline} as
-      // shared code on the function. To avoid an implicit tier down we restore
-      // original baseline code in case it existed beforehand.
-      info->shared_info()->ReplaceCode(*original_code);
+  if (!info->shared_info()->is_compiled()) {
+    CompilerDispatcher* dispatcher = info->isolate()->compiler_dispatcher();
+    if (dispatcher->IsEnqueued(info->shared_info())) {
+      if (!dispatcher->FinishNow(info->shared_info())) return false;
+    } else if (GetUnoptimizedCode(info, Compiler::NOT_CONCURRENT).is_null()) {
+      return false;
     }
   }
-  DCHECK(info->shared_info()->HasBytecodeArray());
-  return true;
+  DCHECK(info->shared_info()->is_compiled());
+
+  if (info->shared_info()->HasAsmWasmData()) return false;
+
+  DCHECK_EQ(ShouldUseIgnition(info), info->shared_info()->HasBytecodeArray());
+  return info->shared_info()->HasBytecodeArray();
 }
 
 // TODO(turbofan): In the future, unoptimized code with deopt support could
@@ -1207,16 +1404,21 @@
   DCHECK_NOT_NULL(info->literal());
   DCHECK_NOT_NULL(info->scope());
   Handle<SharedFunctionInfo> shared = info->shared_info();
+
+  CompilerDispatcher* dispatcher = info->isolate()->compiler_dispatcher();
+  if (dispatcher->IsEnqueued(shared)) {
+    if (!dispatcher->FinishNow(shared)) return false;
+  }
+
   if (!shared->has_deoptimization_support()) {
-    Zone zone(info->isolate()->allocator(), ZONE_NAME);
-    CompilationInfo unoptimized(info->parse_info(), info->closure());
+    Zone compile_zone(info->isolate()->allocator(), ZONE_NAME);
+    CompilationInfo unoptimized(&compile_zone, info->parse_info(),
+                                info->closure());
     unoptimized.EnableDeoptimizationSupport();
 
-    // TODO(4280): For now we do not switch generators or async functions to
-    // baseline code because there might be suspended activations stored in
-    // generator objects on the heap. We could eventually go directly to
-    // TurboFan in this case.
-    if (IsResumableFunction(shared->kind())) return false;
+    // Don't generate full-codegen code for functions it can't support.
+    if (shared->must_use_ignition_turbo()) return false;
+    DCHECK(!IsResumableFunction(shared->kind()));
 
     // When we call PrepareForSerializing below, we will change the shared
     // ParseInfo. Make sure to reset it.
@@ -1230,6 +1432,14 @@
       unoptimized.PrepareForSerializing();
     }
     EnsureFeedbackMetadata(&unoptimized);
+
+    // Ensure we generate and install bytecode first if the function should use
+    // Ignition to avoid implicit tier-down.
+    if (!shared->is_compiled() && ShouldUseIgnition(info) &&
+        !GenerateUnoptimizedCode(info)) {
+      return false;
+    }
+
     if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
 
     info->parse_info()->set_will_serialize(old_will_serialize_value);
@@ -1267,24 +1477,50 @@
 MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
     Handle<String> source, Handle<SharedFunctionInfo> outer_info,
     Handle<Context> context, LanguageMode language_mode,
-    ParseRestriction restriction, int eval_scope_position, int eval_position,
-    int line_offset, int column_offset, Handle<Object> script_name,
+    ParseRestriction restriction, int parameters_end_pos,
+    int eval_scope_position, int eval_position, int line_offset,
+    int column_offset, Handle<Object> script_name,
     ScriptOriginOptions options) {
   Isolate* isolate = source->GetIsolate();
   int source_length = source->length();
   isolate->counters()->total_eval_size()->Increment(source_length);
   isolate->counters()->total_compile_size()->Increment(source_length);
 
+  // The cache lookup key needs to be aware of the separation between the
+  // parameters and the body to prevent this valid invocation:
+  //   Function("", "function anonymous(\n/**/) {\n}");
+  // from adding an entry that falsely approves this invalid invocation:
+  //   Function("\n/**/) {\nfunction anonymous(", "}");
+  // The actual eval_scope_position for indirect eval and CreateDynamicFunction
+  // is unused (just 0), which means it's an available field to use to indicate
+  // this separation. But to make sure we're not causing other false hits, we
+  // negate the scope position.
+  int position = eval_scope_position;
+  if (FLAG_harmony_function_tostring &&
+      restriction == ONLY_SINGLE_FUNCTION_LITERAL &&
+      parameters_end_pos != kNoSourcePosition) {
+    // use the parameters_end_pos as the eval_scope_position in the eval cache.
+    DCHECK_EQ(eval_scope_position, 0);
+    position = -parameters_end_pos;
+  }
   CompilationCache* compilation_cache = isolate->compilation_cache();
-  MaybeHandle<SharedFunctionInfo> maybe_shared_info =
-      compilation_cache->LookupEval(source, outer_info, context, language_mode,
-                                    eval_scope_position);
+  InfoVectorPair eval_result = compilation_cache->LookupEval(
+      source, outer_info, context, language_mode, position);
   Handle<SharedFunctionInfo> shared_info;
+  if (eval_result.has_shared()) {
+    shared_info = Handle<SharedFunctionInfo>(eval_result.shared(), isolate);
+  }
+  Handle<Cell> vector;
+  if (eval_result.has_vector()) {
+    vector = Handle<Cell>(eval_result.vector(), isolate);
+  }
 
   Handle<Script> script;
-  if (!maybe_shared_info.ToHandle(&shared_info)) {
+  if (!eval_result.has_shared()) {
     script = isolate->factory()->NewScript(source);
-    if (FLAG_trace_deopt) Script::InitLineEnds(script);
+    if (isolate->NeedsSourcePositionsForProfiling()) {
+      Script::InitLineEnds(script);
+    }
     if (!script_name.is_null()) {
       script->set_name(*script_name);
       script->set_line_offset(line_offset);
@@ -1294,32 +1530,51 @@
     script->set_compilation_type(Script::COMPILATION_TYPE_EVAL);
     Script::SetEvalOrigin(script, outer_info, eval_position);
 
-    Zone zone(isolate->allocator(), ZONE_NAME);
-    ParseInfo parse_info(&zone, script);
-    CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+    ParseInfo parse_info(script);
+    Zone compile_zone(isolate->allocator(), ZONE_NAME);
+    CompilationInfo info(&compile_zone, &parse_info,
+                         Handle<JSFunction>::null());
     parse_info.set_eval();
     parse_info.set_language_mode(language_mode);
     parse_info.set_parse_restriction(restriction);
+    parse_info.set_parameters_end_pos(parameters_end_pos);
     if (!context->IsNativeContext()) {
       parse_info.set_outer_scope_info(handle(context->scope_info()));
     }
 
     shared_info = CompileToplevel(&info);
-
     if (shared_info.is_null()) {
       return MaybeHandle<JSFunction>();
-    } else {
-      // If caller is strict mode, the result must be in strict mode as well.
-      DCHECK(is_sloppy(language_mode) ||
-             is_strict(shared_info->language_mode()));
-      compilation_cache->PutEval(source, outer_info, context, shared_info,
-                                 eval_scope_position);
     }
   }
 
-  Handle<JSFunction> result =
-      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+  // If caller is strict mode, the result must be in strict mode as well.
+  DCHECK(is_sloppy(language_mode) || is_strict(shared_info->language_mode()));
+
+  Handle<JSFunction> result;
+  if (eval_result.has_shared()) {
+    if (eval_result.has_vector()) {
+      result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          shared_info, context, vector, NOT_TENURED);
+    } else {
+      result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
           shared_info, context, NOT_TENURED);
+      JSFunction::EnsureLiterals(result);
+      // Make sure to cache this result.
+      Handle<Cell> new_vector(result->feedback_vector_cell(), isolate);
+      compilation_cache->PutEval(source, outer_info, context, shared_info,
+                                 new_vector, eval_scope_position);
+    }
+  } else {
+    result = isolate->factory()->NewFunctionFromSharedFunctionInfo(
+        shared_info, context, NOT_TENURED);
+    JSFunction::EnsureLiterals(result);
+    // Add the SharedFunctionInfo and the LiteralsArray to the eval cache if
+    // we didn't retrieve from there.
+    Handle<Cell> vector(result->feedback_vector_cell(), isolate);
+    compilation_cache->PutEval(source, outer_info, context, shared_info, vector,
+                               eval_scope_position);
+  }
 
   // OnAfterCompile has to be called after we create the JSFunction, which we
   // may require to recompile the eval for debugging, if we find a function
@@ -1347,11 +1602,20 @@
   }
 }
 
+bool ContainsAsmModule(Handle<Script> script) {
+  DisallowHeapAllocation no_gc;
+  SharedFunctionInfo::ScriptIterator iter(script);
+  while (SharedFunctionInfo* info = iter.Next()) {
+    if (info->HasAsmWasmData()) return true;
+  }
+  return false;
+}
+
 }  // namespace
 
 MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
     Handle<Context> context, Handle<String> source,
-    ParseRestriction restriction) {
+    ParseRestriction restriction, int parameters_end_pos) {
   Isolate* const isolate = context->GetIsolate();
   Handle<Context> native_context(context->native_context(), isolate);
 
@@ -1371,8 +1635,8 @@
   int eval_position = kNoSourcePosition;
   Handle<SharedFunctionInfo> outer_info(native_context->closure()->shared());
   return Compiler::GetFunctionFromEval(source, outer_info, native_context,
-                                       SLOPPY, restriction, eval_scope_position,
-                                       eval_position);
+                                       SLOPPY, restriction, parameters_end_pos,
+                                       eval_scope_position, eval_position);
 }
 
 Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
@@ -1380,8 +1644,7 @@
     int column_offset, ScriptOriginOptions resource_options,
     Handle<Object> source_map_url, Handle<Context> context,
     v8::Extension* extension, ScriptData** cached_data,
-    ScriptCompiler::CompileOptions compile_options, NativesFlag natives,
-    bool is_module) {
+    ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
   Isolate* isolate = source->GetIsolate();
   if (compile_options == ScriptCompiler::kNoCompileOptions) {
     cached_data = NULL;
@@ -1404,14 +1667,14 @@
   CompilationCache* compilation_cache = isolate->compilation_cache();
 
   // Do a lookup in the compilation cache but not for extensions.
-  MaybeHandle<SharedFunctionInfo> maybe_result;
   Handle<SharedFunctionInfo> result;
+  Handle<Cell> vector;
   if (extension == NULL) {
     // First check per-isolate compilation cache.
-    maybe_result = compilation_cache->LookupScript(
+    InfoVectorPair pair = compilation_cache->LookupScript(
         source, script_name, line_offset, column_offset, resource_options,
         context, language_mode);
-    if (maybe_result.is_null() && FLAG_serialize_toplevel &&
+    if (!pair.has_shared() && FLAG_serialize_toplevel &&
         compile_options == ScriptCompiler::kConsumeCodeCache &&
         !isolate->debug()->is_loaded()) {
       // Then check cached code provided by embedder.
@@ -1420,14 +1683,27 @@
                                          &RuntimeCallStats::CompileDeserialize);
       TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
                    "V8.CompileDeserialize");
-      Handle<SharedFunctionInfo> result;
+      Handle<SharedFunctionInfo> inner_result;
       if (CodeSerializer::Deserialize(isolate, *cached_data, source)
-              .ToHandle(&result)) {
+              .ToHandle(&inner_result)) {
         // Promote to per-isolate compilation cache.
-        compilation_cache->PutScript(source, context, language_mode, result);
-        return result;
+        // TODO(mvstanton): create a feedback vector array here.
+        DCHECK(inner_result->is_compiled());
+        Handle<FeedbackVector> feedback_vector =
+            FeedbackVector::New(isolate, inner_result);
+        vector = isolate->factory()->NewCell(feedback_vector);
+        compilation_cache->PutScript(source, context, language_mode,
+                                     inner_result, vector);
+        return inner_result;
       }
       // Deserializer failed. Fall through to compile.
+    } else {
+      if (pair.has_shared()) {
+        result = Handle<SharedFunctionInfo>(pair.shared(), isolate);
+      }
+      if (pair.has_vector()) {
+        vector = Handle<Cell>(pair.vector(), isolate);
+      }
     }
   }
 
@@ -1437,20 +1713,22 @@
     timer.Start();
   }
 
-  if (!maybe_result.ToHandle(&result) ||
+  if (result.is_null() ||
       (FLAG_serialize_toplevel &&
        compile_options == ScriptCompiler::kProduceCodeCache)) {
     // No cache entry found, or embedder wants a code cache. Compile the script.
 
     // Create a script object describing the script to be compiled.
     Handle<Script> script = isolate->factory()->NewScript(source);
-    if (FLAG_trace_deopt) Script::InitLineEnds(script);
+    if (isolate->NeedsSourcePositionsForProfiling()) {
+      Script::InitLineEnds(script);
+    }
     if (natives == NATIVES_CODE) {
       script->set_type(Script::TYPE_NATIVE);
-      script->set_hide_source(true);
     } else if (natives == EXTENSION_CODE) {
       script->set_type(Script::TYPE_EXTENSION);
-      script->set_hide_source(true);
+    } else if (natives == INSPECTOR_CODE) {
+      script->set_type(Script::TYPE_INSPECTOR);
     }
     if (!script_name.is_null()) {
       script->set_name(*script_name);
@@ -1463,10 +1741,11 @@
     }
 
     // Compile the function and add it to the cache.
-    Zone zone(isolate->allocator(), ZONE_NAME);
-    ParseInfo parse_info(&zone, script);
-    CompilationInfo info(&parse_info, Handle<JSFunction>::null());
-    if (is_module) parse_info.set_module();
+    ParseInfo parse_info(script);
+    Zone compile_zone(isolate->allocator(), ZONE_NAME);
+    CompilationInfo info(&compile_zone, &parse_info,
+                         Handle<JSFunction>::null());
+    if (resource_options.IsModule()) parse_info.set_module();
     if (compile_options != ScriptCompiler::kNoCompileOptions) {
       parse_info.set_cached_data(cached_data);
     }
@@ -1484,9 +1763,16 @@
         static_cast<LanguageMode>(parse_info.language_mode() | language_mode));
     result = CompileToplevel(&info);
     if (extension == NULL && !result.is_null()) {
-      compilation_cache->PutScript(source, context, language_mode, result);
+      // We need a feedback vector.
+      DCHECK(result->is_compiled());
+      Handle<FeedbackVector> feedback_vector =
+          FeedbackVector::New(isolate, result);
+      vector = isolate->factory()->NewCell(feedback_vector);
+      compilation_cache->PutScript(source, context, language_mode, result,
+                                   vector);
       if (FLAG_serialize_toplevel &&
-          compile_options == ScriptCompiler::kProduceCodeCache) {
+          compile_options == ScriptCompiler::kProduceCodeCache &&
+          !ContainsAsmModule(script)) {
         HistogramTimerScope histogram_timer(
             isolate->counters()->compile_serialize());
         RuntimeCallTimerScope runtimeTimer(isolate,
@@ -1502,7 +1788,9 @@
     }
 
     if (result.is_null()) {
-      isolate->ReportPendingMessages();
+      if (natives != EXTENSION_CODE && natives != NATIVES_CODE) {
+        isolate->ReportPendingMessages();
+      }
     } else {
       isolate->debug()->OnAfterCompile(script);
     }
@@ -1523,7 +1811,9 @@
   parse_info->set_language_mode(
       static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
 
-  CompilationInfo compile_info(parse_info, Handle<JSFunction>::null());
+  Zone compile_zone(isolate->allocator(), ZONE_NAME);
+  CompilationInfo compile_info(&compile_zone, parse_info,
+                               Handle<JSFunction>::null());
 
   // The source was parsed lazily, so compiling for debugging is not possible.
   DCHECK(!compile_info.is_debug());
@@ -1533,7 +1823,6 @@
   return result;
 }
 
-
 Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
     FunctionLiteral* literal, Handle<Script> script,
     CompilationInfo* outer_info) {
@@ -1542,92 +1831,23 @@
   MaybeHandle<SharedFunctionInfo> maybe_existing;
 
   // Find any previously allocated shared function info for the given literal.
-  if (outer_info->shared_info()->never_compiled()) {
-    // On the first compile, there are no existing shared function info for
-    // inner functions yet, so do not try to find them. All bets are off for
-    // live edit though.
-    SLOW_DCHECK(script->FindSharedFunctionInfo(literal).is_null() ||
-                isolate->debug()->live_edit_enabled());
-  } else {
-    maybe_existing = script->FindSharedFunctionInfo(literal);
-  }
+  maybe_existing = script->FindSharedFunctionInfo(isolate, literal);
 
-  // We found an existing shared function info. If it has any sort of code
-  // attached, don't worry about compiling and simply return it. Otherwise,
-  // continue to decide whether to eagerly compile.
-  // Note that we also carry on if we are compiling eager to obtain code for
-  // debugging, unless we already have code with debug break slots.
+  // If we found an existing shared function info, return it.
   Handle<SharedFunctionInfo> existing;
   if (maybe_existing.ToHandle(&existing)) {
     DCHECK(!existing->is_toplevel());
-    if (existing->HasBaselineCode() || existing->HasBytecodeArray()) {
-      if (!outer_info->is_debug() || existing->HasDebugCode()) {
-        return existing;
-      }
-    }
+    return existing;
   }
 
-  // Allocate a shared function info object.
-  Handle<SharedFunctionInfo> result;
-  if (!maybe_existing.ToHandle(&result)) {
-    result = NewSharedFunctionInfoForLiteral(isolate, literal, script);
-    result->set_is_toplevel(false);
-
-    // If the outer function has been compiled before, we cannot be sure that
-    // shared function info for this function literal has been created for the
-    // first time. It may have already been compiled previously.
-    result->set_never_compiled(outer_info->shared_info()->never_compiled());
+  // Allocate a shared function info object which will be compiled lazily.
+  Handle<SharedFunctionInfo> result =
+      isolate->factory()->NewSharedFunctionInfoForLiteral(literal, script);
+  result->set_is_toplevel(false);
+  Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
+  if (outer_scope) {
+    result->set_outer_scope_info(*outer_scope->scope_info());
   }
-
-  Zone zone(isolate->allocator(), ZONE_NAME);
-  ParseInfo parse_info(&zone, script);
-  CompilationInfo info(&parse_info, Handle<JSFunction>::null());
-  parse_info.set_literal(literal);
-  parse_info.set_shared_info(result);
-  parse_info.set_language_mode(literal->scope()->language_mode());
-  parse_info.set_ast_value_factory(
-      outer_info->parse_info()->ast_value_factory());
-  parse_info.set_ast_value_factory_owned(false);
-
-  if (outer_info->will_serialize()) info.PrepareForSerializing();
-  if (outer_info->is_debug()) info.MarkAsDebug();
-
-  // If this inner function is already compiled, we don't need to compile
-  // again. When compiling for debug, we are not interested in having debug
-  // break slots in inner functions, neither for setting break points nor
-  // for revealing inner functions.
-  // This is especially important for generators. We must not replace the
-  // code for generators, as there may be suspended generator objects.
-  if (!result->is_compiled()) {
-    if (!literal->ShouldEagerCompile()) {
-      info.SetCode(isolate->builtins()->CompileLazy());
-      Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
-      if (outer_scope) {
-        result->set_outer_scope_info(*outer_scope->scope_info());
-      }
-    } else {
-      // Generate code
-      TimerEventScope<TimerEventCompileCode> timer(isolate);
-      RuntimeCallTimerScope runtimeTimer(isolate,
-                                         &RuntimeCallStats::CompileCode);
-      TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
-      if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
-        // Code generation will ensure that the feedback vector is present and
-        // appropriately sized.
-        DCHECK(!info.code().is_null());
-        if (literal->should_be_used_once_hint()) {
-          info.code()->MarkToBeExecutedOnce(isolate);
-        }
-      } else {
-        return Handle<SharedFunctionInfo>::null();
-      }
-    }
-  }
-
-  if (maybe_existing.is_null()) {
-    RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, &info);
-  }
-
   return result;
 }
 
@@ -1649,7 +1869,7 @@
   Handle<Code> code = Handle<Code>(fun->shared()->code());
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
   Handle<SharedFunctionInfo> shared = isolate->factory()->NewSharedFunctionInfo(
-      name, fun->shared()->num_literals(), FunctionKind::kNormalFunction, code,
+      name, FunctionKind::kNormalFunction, code,
       Handle<ScopeInfo>(fun->shared()->scope_info()));
   shared->set_outer_scope_info(fun->shared()->outer_scope_info());
   shared->SetConstructStub(*construct_stub);
@@ -1690,13 +1910,8 @@
     return FinalizeOptimizedCompilationJob(job.get()) ==
            CompilationJob::SUCCEEDED;
   } else {
-    if (FinalizeUnoptimizedCompilationJob(job.get()) ==
-        CompilationJob::SUCCEEDED) {
-      RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
-                                job->info());
-      return true;
-    }
-    return false;
+    return FinalizeUnoptimizedCompilationJob(job.get()) ==
+           CompilationJob::SUCCEEDED;
   }
 }
 
@@ -1704,23 +1919,22 @@
                                  PretenureFlag pretenure) {
   Handle<SharedFunctionInfo> shared(function->shared());
 
-  if (FLAG_always_opt && shared->allows_lazy_compilation()) {
+  if (FLAG_always_opt && shared->allows_lazy_compilation() &&
+      !function->shared()->HasAsmWasmData() &&
+      function->shared()->is_compiled()) {
     function->MarkForOptimization();
   }
 
-  CodeAndLiterals cached = shared->SearchOptimizedCodeMap(
+  Code* code = shared->SearchOptimizedCodeMap(
       function->context()->native_context(), BailoutId::None());
-  if (cached.code != nullptr) {
+  if (code != nullptr) {
     // Caching of optimized code enabled and optimized code found.
-    DCHECK(!cached.code->marked_for_deoptimization());
+    DCHECK(!code->marked_for_deoptimization());
     DCHECK(function->shared()->is_compiled());
-    function->ReplaceCode(cached.code);
+    function->ReplaceCode(code);
   }
 
-  if (cached.literals != nullptr) {
-    DCHECK(shared->is_compiled());
-    function->set_literals(cached.literals);
-  } else if (shared->is_compiled()) {
+  if (shared->is_compiled()) {
     // TODO(mvstanton): pass pretenure flag to EnsureLiterals.
     JSFunction::EnsureLiterals(function);
   }
diff --git a/src/compiler.h b/src/compiler.h
index 03c6f81..e26484a 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -22,6 +22,10 @@
 class JavaScriptFrame;
 class ParseInfo;
 class ScriptData;
+template <typename T>
+class ThreadedList;
+template <typename T>
+class ThreadedListZoneEntry;
 
 // The V8 compiler API.
 //
@@ -33,7 +37,7 @@
 // parameters which then can be executed. If the source code contains other
 // functions, they might be compiled and allocated as part of the compilation
 // of the source code or deferred for lazy compilation at a later point.
-class Compiler : public AllStatic {
+class V8_EXPORT_PRIVATE Compiler : public AllStatic {
  public:
   enum ClearExceptionFlag { KEEP_EXCEPTION, CLEAR_EXCEPTION };
   enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
@@ -63,10 +67,15 @@
   // offer this chance, optimized closure instantiation will not call this.
   static void PostInstantiation(Handle<JSFunction> function, PretenureFlag);
 
+  typedef ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>
+      EagerInnerFunctionLiterals;
+
   // Parser::Parse, then Compiler::Analyze.
   static bool ParseAndAnalyze(ParseInfo* info);
-  // Rewrite, analyze scopes, and renumber.
-  static bool Analyze(ParseInfo* info);
+  // Rewrite, analyze scopes, and renumber. If |eager_literals| is non-null, it
+  // is appended with inner function literals which should be eagerly compiled.
+  static bool Analyze(ParseInfo* info,
+                      EagerInnerFunctionLiterals* eager_literals = nullptr);
   // Adds deoptimization support, requires ParseAndAnalyze.
   static bool EnsureDeoptimizationSupport(CompilationInfo* info);
   // Ensures that bytecode is generated, calls ParseAndAnalyze internally.
@@ -89,15 +98,15 @@
   MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
       Handle<String> source, Handle<SharedFunctionInfo> outer_info,
       Handle<Context> context, LanguageMode language_mode,
-      ParseRestriction restriction, int eval_scope_position, int eval_position,
-      int line_offset = 0, int column_offset = 0,
-      Handle<Object> script_name = Handle<Object>(),
+      ParseRestriction restriction, int parameters_end_pos,
+      int eval_scope_position, int eval_position, int line_offset = 0,
+      int column_offset = 0, Handle<Object> script_name = Handle<Object>(),
       ScriptOriginOptions options = ScriptOriginOptions());
 
   // Create a (bound) function for a String source within a context for eval.
   MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromString(
       Handle<Context> context, Handle<String> source,
-      ParseRestriction restriction);
+      ParseRestriction restriction, int parameters_end_pos);
 
   // Create a shared function info object for a String source within a context.
   static Handle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
@@ -106,7 +115,7 @@
       Handle<Object> source_map_url, Handle<Context> context,
       v8::Extension* extension, ScriptData** cached_data,
       ScriptCompiler::CompileOptions compile_options,
-      NativesFlag is_natives_code, bool is_module);
+      NativesFlag is_natives_code);
 
   // Create a shared function info object for a Script that has already been
   // parsed while the script was being loaded from a streamed source.
@@ -145,7 +154,7 @@
 //
 // Each of the three phases can either fail or succeed. The current state of
 // the job can be checked using {state()}.
-class CompilationJob {
+class V8_EXPORT_PRIVATE CompilationJob {
  public:
   enum Status { SUCCEEDED, FAILED };
   enum class State {
@@ -158,11 +167,7 @@
 
   CompilationJob(Isolate* isolate, CompilationInfo* info,
                  const char* compiler_name,
-                 State initial_state = State::kReadyToPrepare)
-      : info_(info),
-        compiler_name_(compiler_name),
-        state_(initial_state),
-        stack_limit_(isolate->stack_guard()->real_climit()) {}
+                 State initial_state = State::kReadyToPrepare);
   virtual ~CompilationJob() {}
 
   // Prepare the compile job. Must be called on the main thread.
@@ -191,6 +196,11 @@
   void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
   uintptr_t stack_limit() const { return stack_limit_; }
 
+  bool executed_on_background_thread() const {
+    DCHECK_IMPLIES(!can_execute_on_background_thread(),
+                   !executed_on_background_thread_);
+    return executed_on_background_thread_;
+  }
   State state() const { return state_; }
   CompilationInfo* info() const { return info_; }
   Isolate* isolate() const;
@@ -207,12 +217,14 @@
 
  private:
   CompilationInfo* info_;
+  ThreadId isolate_thread_id_;
   base::TimeDelta time_taken_to_prepare_;
   base::TimeDelta time_taken_to_execute_;
   base::TimeDelta time_taken_to_finalize_;
   const char* compiler_name_;
   State state_;
   uintptr_t stack_limit_;
+  bool executed_on_background_thread_;
 
   MUST_USE_RESULT Status UpdateState(Status status, State next_state) {
     if (status == SUCCEEDED) {
diff --git a/src/compiler/OWNERS b/src/compiler/OWNERS
index 02de4ed..10ffcb0 100644
--- a/src/compiler/OWNERS
+++ b/src/compiler/OWNERS
@@ -6,3 +6,4 @@
 mstarzinger@chromium.org
 mtrofin@chromium.org
 titzer@chromium.org
+danno@chromium.org
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index 540eb37..2722590 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -9,6 +9,7 @@
 #include "src/frames.h"
 #include "src/handles-inl.h"
 #include "src/heap/heap.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -16,47 +17,67 @@
 
 // static
 FieldAccess AccessBuilder::ForExternalDoubleValue() {
-  FieldAccess access = {kUntaggedBase,          0,
-                        MaybeHandle<Name>(),    Type::Number(),
-                        MachineType::Float64(), kNoWriteBarrier};
+  FieldAccess access = {kUntaggedBase,       0,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Number(),      MachineType::Float64(),
+                        kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalTaggedValue() {
+  FieldAccess access = {kUntaggedBase,       0,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Any(),         MachineType::AnyTagged(),
+                        kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalUint8Value() {
+  FieldAccess access = {kUntaggedBase,           0,
+                        MaybeHandle<Name>(),     MaybeHandle<Map>(),
+                        TypeCache::Get().kUint8, MachineType::Uint8(),
+                        kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForMap() {
-  FieldAccess access = {
-      kTaggedBase,           HeapObject::kMapOffset,       MaybeHandle<Name>(),
-      Type::OtherInternal(), MachineType::TaggedPointer(), kMapWriteBarrier};
+  FieldAccess access = {kTaggedBase,           HeapObject::kMapOffset,
+                        MaybeHandle<Name>(),   MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
+                        kMapWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForHeapNumberValue() {
-  FieldAccess access = {kTaggedBase,
-                        HeapNumber::kValueOffset,
-                        MaybeHandle<Name>(),
-                        TypeCache::Get().kFloat64,
-                        MachineType::Float64(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        HeapNumber::kValueOffset,  MaybeHandle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kFloat64, MachineType::Float64(),
+      kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectProperties() {
-  FieldAccess access = {
-      kTaggedBase,      JSObject::kPropertiesOffset,  MaybeHandle<Name>(),
-      Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSObject::kPropertiesOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectElements() {
-  FieldAccess access = {
-      kTaggedBase,      JSObject::kElementsOffset,    MaybeHandle<Name>(),
-      Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSObject::kElementsOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
@@ -65,126 +86,136 @@
 FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
                                                        int index) {
   int const offset = map->GetInObjectPropertyOffset(index);
-  FieldAccess access = {kTaggedBase,
-                        offset,
-                        MaybeHandle<Name>(),
-                        Type::NonInternal(),
-                        MachineType::AnyTagged(),
+  FieldAccess access = {kTaggedBase,         offset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
                         kFullWriteBarrier};
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForJSObjectOffset(
+    int offset, WriteBarrierKind write_barrier_kind) {
+  FieldAccess access = {kTaggedBase,         offset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        write_barrier_kind};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSCollectionTable() {
+  FieldAccess access = {kTaggedBase,           JSCollection::kTableOffset,
+                        MaybeHandle<Name>(),   MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
+  return access;
+}
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
-  FieldAccess access = {kTaggedBase,
-                        JSFunction::kPrototypeOrInitialMapOffset,
-                        MaybeHandle<Name>(),
-                        Type::Any(),
-                        MachineType::AnyTagged(),
-                        kFullWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSFunction::kPrototypeOrInitialMapOffset,
+      MaybeHandle<Name>(), MaybeHandle<Map>(),
+      Type::Any(),         MachineType::AnyTagged(),
+      kFullWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionContext() {
-  FieldAccess access = {
-      kTaggedBase,      JSFunction::kContextOffset, MaybeHandle<Name>(),
-      Type::Internal(), MachineType::AnyTagged(),   kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSFunction::kContextOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
-  FieldAccess access = {kTaggedBase,
-                        JSFunction::kSharedFunctionInfoOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           JSFunction::kSharedFunctionInfoOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
 // static
-FieldAccess AccessBuilder::ForJSFunctionLiterals() {
-  FieldAccess access = {
-      kTaggedBase,      JSFunction::kLiteralsOffset,  Handle<Name>(),
-      Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+FieldAccess AccessBuilder::ForJSFunctionFeedbackVector() {
+  FieldAccess access = {kTaggedBase,         JSFunction::kFeedbackVectorOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionCodeEntry() {
-  FieldAccess access = {
-      kTaggedBase,           JSFunction::kCodeEntryOffset, Handle<Name>(),
-      Type::OtherInternal(), MachineType::Pointer(),       kNoWriteBarrier};
+  FieldAccess access = {kTaggedBase,           JSFunction::kCodeEntryOffset,
+                        Handle<Name>(),        MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::Pointer(),
+                        kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
-  FieldAccess access = {kTaggedBase,
-                        JSFunction::kNextFunctionLinkOffset,
-                        Handle<Name>(),
-                        Type::Any(),
-                        MachineType::AnyTagged(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSFunction::kNextFunctionLinkOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::Any(),         MachineType::AnyTagged(),
+      kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kContextOffset,
-                        Handle<Name>(),
-                        Type::Internal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,         JSGeneratorObject::kContextOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kContinuationOffset,
-                        Handle<Name>(),
-                        Type::SignedSmall(),
-                        MachineType::TaggedSigned(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSGeneratorObject::kContinuationOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::SignedSmall(), MachineType::TaggedSigned(),
+      kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kInputOrDebugPosOffset,
-                        Handle<Name>(),
-                        Type::NonInternal(),
-                        MachineType::AnyTagged(),
-                        kFullWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSGeneratorObject::kInputOrDebugPosOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::NonInternal(), MachineType::AnyTagged(),
+      kFullWriteBarrier};
   return access;
 }
 
 // static
-FieldAccess AccessBuilder::ForJSGeneratorObjectOperandStack() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kOperandStackOffset,
-                        Handle<Name>(),
-                        Type::Internal(),
-                        MachineType::AnyTagged(),
-                        kPointerWriteBarrier};
+FieldAccess AccessBuilder::ForJSGeneratorObjectRegisterFile() {
+  FieldAccess access = {
+      kTaggedBase,         JSGeneratorObject::kRegisterFileOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::Internal(),    MachineType::AnyTagged(),
+      kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kResumeModeOffset,
-                        Handle<Name>(),
-                        Type::SignedSmall(),
-                        MachineType::TaggedSigned(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSGeneratorObject::kResumeModeOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::SignedSmall(), MachineType::TaggedSigned(),
+      kNoWriteBarrier};
   return access;
 }
 
@@ -194,6 +225,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArray::kLengthOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         type_cache.kJSArrayLengthType,
                         MachineType::TaggedSigned(),
                         kFullWriteBarrier};
@@ -210,30 +242,28 @@
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
-  FieldAccess access = {kTaggedBase,
-                        JSArrayBuffer::kBackingStoreOffset,
-                        MaybeHandle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::Pointer(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           JSArrayBuffer::kBackingStoreOffset,
+      MaybeHandle<Name>(),   MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::Pointer(),
+      kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
-  FieldAccess access = {kTaggedBase,           JSArrayBuffer::kBitFieldOffset,
-                        MaybeHandle<Name>(),   TypeCache::Get().kUint8,
-                        MachineType::Uint32(), kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        JSArrayBuffer::kBitFieldOffset, MaybeHandle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kUint8,        MachineType::Uint32(),
+      kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
-  FieldAccess access = {kTaggedBase,
-                        JSArrayBufferView::kBufferOffset,
-                        MaybeHandle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,           JSArrayBufferView::kBufferOffset,
+                        MaybeHandle<Name>(),   MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -243,6 +273,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayBufferView::kByteLengthOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kPositiveInteger,
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
@@ -254,6 +285,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayBufferView::kByteOffsetOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kPositiveInteger,
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
@@ -265,6 +297,7 @@
   FieldAccess access = {kTaggedBase,
                         JSTypedArray::kLengthOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kJSTypedArrayLengthType,
                         MachineType::TaggedSigned(),
                         kNoWriteBarrier};
@@ -276,6 +309,7 @@
   FieldAccess access = {kTaggedBase,
                         JSDate::kValueOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kJSDateValueType,
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
@@ -284,48 +318,51 @@
 
 // static
 FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
-  FieldAccess access = {kTaggedBase,
-                        JSDate::kValueOffset + index * kPointerSize,
-                        MaybeHandle<Name>(),
-                        Type::Number(),
-                        MachineType::AnyTagged(),
-                        kFullWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSDate::kValueOffset + index * kPointerSize,
+      MaybeHandle<Name>(), MaybeHandle<Map>(),
+      Type::Number(),      MachineType::AnyTagged(),
+      kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSIteratorResultDone() {
-  FieldAccess access = {
-      kTaggedBase,         JSIteratorResult::kDoneOffset, MaybeHandle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(),      kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSIteratorResult::kDoneOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSIteratorResultValue() {
-  FieldAccess access = {
-      kTaggedBase,         JSIteratorResult::kValueOffset, MaybeHandle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(),       kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSIteratorResult::kValueOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSRegExpFlags() {
-  FieldAccess access = {
-      kTaggedBase,         JSRegExp::kFlagsOffset,   MaybeHandle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSRegExp::kFlagsOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSRegExpSource() {
-  FieldAccess access = {
-      kTaggedBase,         JSRegExp::kSourceOffset,  MaybeHandle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSRegExp::kSourceOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
@@ -335,6 +372,7 @@
   FieldAccess access = {kTaggedBase,
                         FixedArray::kLengthOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kFixedArrayLengthType,
                         MachineType::TaggedSigned(),
                         kNoWriteBarrier};
@@ -343,12 +381,11 @@
 
 // static
 FieldAccess AccessBuilder::ForFixedTypedArrayBaseBasePointer() {
-  FieldAccess access = {kTaggedBase,
-                        FixedTypedArrayBase::kBasePointerOffset,
-                        MaybeHandle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::AnyTagged(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           FixedTypedArrayBase::kBasePointerOffset,
+      MaybeHandle<Name>(),   MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::AnyTagged(),
+      kPointerWriteBarrier};
   return access;
 }
 
@@ -357,6 +394,7 @@
   FieldAccess access = {kTaggedBase,
                         FixedTypedArrayBase::kExternalPointerOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         Type::ExternalPointer(),
                         MachineType::Pointer(),
                         kNoWriteBarrier};
@@ -365,53 +403,51 @@
 
 // static
 FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
-  FieldAccess access = {kTaggedBase,
-                        DescriptorArray::kEnumCacheOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           DescriptorArray::kEnumCacheOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
-  FieldAccess access = {kTaggedBase,
-                        DescriptorArray::kEnumCacheBridgeCacheOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           DescriptorArray::kEnumCacheBridgeCacheOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapBitField() {
-  FieldAccess access = {kTaggedBase,          Map::kBitFieldOffset,
-                        Handle<Name>(),       TypeCache::Get().kUint8,
-                        MachineType::Uint8(), kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        Map::kBitFieldOffset,    Handle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+      kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapBitField3() {
-  FieldAccess access = {kTaggedBase,          Map::kBitField3Offset,
-                        Handle<Name>(),       TypeCache::Get().kInt32,
-                        MachineType::Int32(), kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        Map::kBitField3Offset,   Handle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kInt32, MachineType::Int32(),
+      kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapDescriptors() {
-  FieldAccess access = {kTaggedBase,
-                        Map::kDescriptorsOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,           Map::kDescriptorsOffset,
+                        Handle<Name>(),        MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -419,48 +455,47 @@
 
 // static
 FieldAccess AccessBuilder::ForMapInstanceType() {
-  FieldAccess access = {kTaggedBase,          Map::kInstanceTypeOffset,
-                        Handle<Name>(),       TypeCache::Get().kUint8,
-                        MachineType::Uint8(), kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        Map::kInstanceTypeOffset, Handle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kUint8,  MachineType::Uint8(),
+      kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapPrototype() {
-  FieldAccess access = {
-      kTaggedBase, Map::kPrototypeOffset,        Handle<Name>(),
-      Type::Any(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         Map::kPrototypeOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::Any(),         MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForModuleRegularExports() {
-  FieldAccess access = {kTaggedBase,
-                        Module::kRegularExportsOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,           Module::kRegularExportsOffset,
+                        Handle<Name>(),        MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForModuleRegularImports() {
-  FieldAccess access = {kTaggedBase,
-                        Module::kRegularImportsOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,           Module::kRegularImportsOffset,
+                        Handle<Name>(),        MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForNameHashField() {
-  FieldAccess access = {kTaggedBase,           Name::kHashFieldOffset,
-                        Handle<Name>(),        Type::Internal(),
-                        MachineType::Uint32(), kNoWriteBarrier};
+  FieldAccess access = {kTaggedBase,        Name::kHashFieldOffset,
+                        Handle<Name>(),     MaybeHandle<Map>(),
+                        Type::Unsigned32(), MachineType::Uint32(),
+                        kNoWriteBarrier};
   return access;
 }
 
@@ -469,6 +504,7 @@
   FieldAccess access = {kTaggedBase,
                         String::kLengthOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kStringLengthType,
                         MachineType::TaggedSigned(),
                         kNoWriteBarrier};
@@ -477,33 +513,46 @@
 
 // static
 FieldAccess AccessBuilder::ForConsStringFirst() {
-  FieldAccess access = {
-      kTaggedBase,    ConsString::kFirstOffset,     Handle<Name>(),
-      Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         ConsString::kFirstOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForConsStringSecond() {
-  FieldAccess access = {
-      kTaggedBase,    ConsString::kSecondOffset,    Handle<Name>(),
-      Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         ConsString::kSecondOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForThinStringActual() {
+  FieldAccess access = {kTaggedBase,         ThinString::kActualOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForSlicedStringOffset() {
-  FieldAccess access = {
-      kTaggedBase,         SlicedString::kOffsetOffset, Handle<Name>(),
-      Type::SignedSmall(), MachineType::TaggedSigned(), kNoWriteBarrier};
+  FieldAccess access = {kTaggedBase,         SlicedString::kOffsetOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::SignedSmall(), MachineType::TaggedSigned(),
+                        kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForSlicedStringParent() {
-  FieldAccess access = {
-      kTaggedBase,    SlicedString::kParentOffset,  Handle<Name>(),
-      Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         SlicedString::kParentOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
@@ -512,6 +561,7 @@
   FieldAccess access = {kTaggedBase,
                         ExternalString::kResourceDataOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         Type::ExternalPointer(),
                         MachineType::Pointer(),
                         kNoWriteBarrier};
@@ -550,23 +600,20 @@
 
 // static
 FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
-  FieldAccess access = {kTaggedBase,
-                        JSGlobalObject::kGlobalProxyOffset,
-                        Handle<Name>(),
-                        Type::Receiver(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,         JSGlobalObject::kGlobalProxyOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::Receiver(),    MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
-  FieldAccess access = {kTaggedBase,
-                        JSGlobalObject::kNativeContextOffset,
-                        Handle<Name>(),
-                        Type::Internal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSGlobalObject::kNativeContextOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::Internal(),    MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
@@ -575,6 +622,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayIterator::kIteratedObjectOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         Type::ReceiverOrUndefined(),
                         MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
@@ -589,6 +637,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayIterator::kNextIndexOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kPositiveSafeInteger,
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
@@ -614,20 +663,20 @@
 
 // static
 FieldAccess AccessBuilder::ForJSArrayIteratorObjectMap() {
-  FieldAccess access = {kTaggedBase,
-                        JSArrayIterator::kIteratedObjectMapOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           JSArrayIterator::kIteratedObjectMapOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSStringIteratorString() {
-  FieldAccess access = {
-      kTaggedBase,    JSStringIterator::kStringOffset, Handle<Name>(),
-      Type::String(), MachineType::TaggedPointer(),    kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSStringIterator::kStringOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
@@ -636,6 +685,7 @@
   FieldAccess access = {kTaggedBase,
                         JSStringIterator::kNextIndexOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kStringLengthType,
                         MachineType::TaggedSigned(),
                         kNoWriteBarrier};
@@ -644,52 +694,53 @@
 
 // static
 FieldAccess AccessBuilder::ForValue() {
-  FieldAccess access = {
-      kTaggedBase,         JSValue::kValueOffset,    Handle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
-  return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForArgumentsLength() {
-  FieldAccess access = {
-      kTaggedBase,         JSArgumentsObject::kLengthOffset, Handle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(),         kFullWriteBarrier};
-  return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForArgumentsCallee() {
-  FieldAccess access = {kTaggedBase,
-                        JSSloppyArgumentsObject::kCalleeOffset,
-                        Handle<Name>(),
-                        Type::NonInternal(),
-                        MachineType::AnyTagged(),
-                        kPointerWriteBarrier};
-  return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
-  int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
-  FieldAccess access = {kTaggedBase,
-                        offset,
-                        Handle<Name>(),
-                        Type::NonInternal(),
-                        MachineType::AnyTagged(),
+  FieldAccess access = {kTaggedBase,         JSValue::kValueOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
                         kFullWriteBarrier};
   return access;
 }
 
 
 // static
-FieldAccess AccessBuilder::ForCellValue() {
+FieldAccess AccessBuilder::ForArgumentsLength() {
+  FieldAccess access = {kTaggedBase,         JSArgumentsObject::kLengthOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
+  return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForArgumentsCallee() {
   FieldAccess access = {
-      kTaggedBase, Cell::kValueOffset,       Handle<Name>(),
-      Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+      kTaggedBase,         JSSloppyArgumentsObject::kCalleeOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::NonInternal(), MachineType::AnyTagged(),
+      kPointerWriteBarrier};
+  return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForFixedArraySlot(
+    size_t index, WriteBarrierKind write_barrier_kind) {
+  int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
+  FieldAccess access = {kTaggedBase,         offset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        write_barrier_kind};
+  return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForCellValue() {
+  FieldAccess access = {kTaggedBase,      Cell::kValueOffset,
+                        Handle<Name>(),   MaybeHandle<Map>(),
+                        Type::Any(),      MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
@@ -698,31 +749,29 @@
   int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
   DCHECK_EQ(offset,
             Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
-  FieldAccess access = {kTaggedBase,
-                        offset,
-                        Handle<Name>(),
-                        Type::Any(),
-                        MachineType::AnyTagged(),
+  FieldAccess access = {kTaggedBase,      offset,
+                        Handle<Name>(),   MaybeHandle<Map>(),
+                        Type::Any(),      MachineType::AnyTagged(),
                         kFullWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForContextExtensionScopeInfo() {
-  FieldAccess access = {kTaggedBase,
-                        ContextExtension::kScopeInfoOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::AnyTagged(),
-                        kFullWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           ContextExtension::kScopeInfoOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::AnyTagged(),
+      kFullWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForContextExtensionExtension() {
-  FieldAccess access = {
-      kTaggedBase, ContextExtension::kExtensionOffset, Handle<Name>(),
-      Type::Any(), MachineType::AnyTagged(),           kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,      ContextExtension::kExtensionOffset,
+                        Handle<Name>(),   MaybeHandle<Map>(),
+                        Type::Any(),      MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
@@ -831,6 +880,68 @@
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForHashTableBaseNumberOfElements() {
+  FieldAccess access = {
+      kTaggedBase,
+      FixedArray::OffsetOfElementAt(HashTableBase::kNumberOfElementsIndex),
+      MaybeHandle<Name>(),
+      MaybeHandle<Map>(),
+      Type::SignedSmall(),
+      MachineType::TaggedSigned(),
+      kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForHashTableBaseNumberOfDeletedElement() {
+  FieldAccess access = {
+      kTaggedBase, FixedArray::OffsetOfElementAt(
+                       HashTableBase::kNumberOfDeletedElementsIndex),
+      MaybeHandle<Name>(), MaybeHandle<Map>(), Type::SignedSmall(),
+      MachineType::TaggedSigned(), kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForHashTableBaseCapacity() {
+  FieldAccess access = {
+      kTaggedBase,
+      FixedArray::OffsetOfElementAt(HashTableBase::kCapacityIndex),
+      MaybeHandle<Name>(),
+      MaybeHandle<Map>(),
+      Type::SignedSmall(),
+      MachineType::TaggedSigned(),
+      kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
+  FieldAccess access = {
+      kTaggedBase,
+      FixedArray::OffsetOfElementAt(NameDictionary::kMaxNumberKeyIndex),
+      MaybeHandle<Name>(),
+      MaybeHandle<Map>(),
+      Type::Any(),
+      MachineType::AnyTagged(),
+      kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForDictionaryNextEnumerationIndex() {
+  FieldAccess access = {
+      kTaggedBase,
+      FixedArray::OffsetOfElementAt(NameDictionary::kNextEnumerationIndexIndex),
+      MaybeHandle<Name>(),
+      MaybeHandle<Map>(),
+      Type::SignedSmall(),
+      MachineType::TaggedSigned(),
+      kNoWriteBarrier};
+  return access;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index eb8e78f..9d23220 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -26,6 +26,12 @@
   // Provides access to a double field identified by an external reference.
   static FieldAccess ForExternalDoubleValue();
 
+  // Provides access to a tagged field identified by an external reference.
+  static FieldAccess ForExternalTaggedValue();
+
+  // Provides access to an uint8 field identified by an external reference.
+  static FieldAccess ForExternalUint8Value();
+
   // ===========================================================================
   // Access to heap object fields and elements (based on tagged pointer).
 
@@ -43,6 +49,11 @@
 
   // Provides access to JSObject inobject property fields.
   static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+  static FieldAccess ForJSObjectOffset(
+      int offset, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
+
+  // Provides access to JSCollecton::table() field.
+  static FieldAccess ForJSCollectionTable();
 
   // Provides access to JSFunction::prototype_or_initial_map() field.
   static FieldAccess ForJSFunctionPrototypeOrInitialMap();
@@ -53,8 +64,8 @@
   // Provides access to JSFunction::shared() field.
   static FieldAccess ForJSFunctionSharedFunctionInfo();
 
-  // Provides access to JSFunction::literals() field.
-  static FieldAccess ForJSFunctionLiterals();
+  // Provides access to JSFunction::feedback_vector() field.
+  static FieldAccess ForJSFunctionFeedbackVector();
 
   // Provides access to JSFunction::code() field.
   static FieldAccess ForJSFunctionCodeEntry();
@@ -71,8 +82,8 @@
   // Provides access to JSGeneratorObject::input_or_debug_pos() field.
   static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
 
-  // Provides access to JSGeneratorObject::operand_stack() field.
-  static FieldAccess ForJSGeneratorObjectOperandStack();
+  // Provides access to JSGeneratorObject::register_file() field.
+  static FieldAccess ForJSGeneratorObjectRegisterFile();
 
   // Provides access to JSGeneratorObject::resume_mode() field.
   static FieldAccess ForJSGeneratorObjectResumeMode();
@@ -164,6 +175,9 @@
   // Provides access to ConsString::second() field.
   static FieldAccess ForConsStringSecond();
 
+  // Provides access to ThinString::actual() field.
+  static FieldAccess ForThinStringActual();
+
   // Provides access to SlicedString::offset() field.
   static FieldAccess ForSlicedStringOffset();
 
@@ -218,7 +232,8 @@
   static FieldAccess ForArgumentsCallee();
 
   // Provides access to FixedArray slots.
-  static FieldAccess ForFixedArraySlot(size_t index);
+  static FieldAccess ForFixedArraySlot(
+      size_t index, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
 
   // Provides access to Context slots.
   static FieldAccess ForContextSlot(size_t index);
@@ -238,6 +253,15 @@
   static ElementAccess ForTypedArrayElement(ExternalArrayType type,
                                             bool is_external);
 
+  // Provides access to HashTable fields.
+  static FieldAccess ForHashTableBaseNumberOfElements();
+  static FieldAccess ForHashTableBaseNumberOfDeletedElement();
+  static FieldAccess ForHashTableBaseCapacity();
+
+  // Provides access to Dictionary fields.
+  static FieldAccess ForDictionaryMaxNumberKey();
+  static FieldAccess ForDictionaryNextEnumerationIndex();
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
 };
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index 866b060..8fef2f0 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -52,6 +52,8 @@
       return os << "Load";
     case AccessMode::kStore:
       return os << "Store";
+    case AccessMode::kStoreInLiteral:
+      return os << "StoreInLiteral";
   }
   UNREACHABLE();
   return os;
@@ -78,11 +80,12 @@
 
 // static
 PropertyAccessInfo PropertyAccessInfo::DataField(
-    MapList const& receiver_maps, FieldIndex field_index,
-    MachineRepresentation field_representation, Type* field_type,
-    MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
+    PropertyConstness constness, MapList const& receiver_maps,
+    FieldIndex field_index, MachineRepresentation field_representation,
+    Type* field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
     MaybeHandle<Map> transition_map) {
-  return PropertyAccessInfo(holder, transition_map, field_index,
+  Kind kind = constness == kConst ? kDataConstantField : kDataField;
+  return PropertyAccessInfo(kind, holder, transition_map, field_index,
                             field_representation, field_type, field_map,
                             receiver_maps);
 }
@@ -124,10 +127,10 @@
       field_type_(Type::Any()) {}
 
 PropertyAccessInfo::PropertyAccessInfo(
-    MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
+    Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
     FieldIndex field_index, MachineRepresentation field_representation,
     Type* field_type, MaybeHandle<Map> field_map, MapList const& receiver_maps)
-    : kind_(kDataField),
+    : kind_(kind),
       receiver_maps_(receiver_maps),
       transition_map_(transition_map),
       holder_(holder),
@@ -144,13 +147,13 @@
     case kInvalid:
       break;
 
-    case kNotFound:
-      return true;
-
-    case kDataField: {
+    case kDataField:
+    case kDataConstantField: {
       // Check if we actually access the same field.
-      if (this->transition_map_.address() == that->transition_map_.address() &&
+      if (this->kind_ == that->kind_ &&
+          this->transition_map_.address() == that->transition_map_.address() &&
           this->field_index_ == that->field_index_ &&
+          this->field_map_.address() == that->field_map_.address() &&
           this->field_type_->Is(that->field_type_) &&
           that->field_type_->Is(this->field_type_) &&
           this->field_representation_ == that->field_representation_) {
@@ -173,6 +176,8 @@
       }
       return false;
     }
+
+    case kNotFound:
     case kGeneric: {
       this->receiver_maps_.insert(this->receiver_maps_.end(),
                                   that->receiver_maps_.begin(),
@@ -282,7 +287,8 @@
     int const number = descriptors->SearchWithCache(isolate(), *name, *map);
     if (number != DescriptorArray::kNotFound) {
       PropertyDetails const details = descriptors->GetDetails(number);
-      if (access_mode == AccessMode::kStore) {
+      if (access_mode == AccessMode::kStore ||
+          access_mode == AccessMode::kStoreInLiteral) {
         // Don't bother optimizing stores to read-only properties.
         if (details.IsReadOnly()) {
           return false;
@@ -295,14 +301,8 @@
           return LookupTransition(receiver_map, name, holder, access_info);
         }
       }
-      switch (details.type()) {
-        case DATA_CONSTANT: {
-          *access_info = PropertyAccessInfo::DataConstant(
-              MapList{receiver_map},
-              handle(descriptors->GetValue(number), isolate()), holder);
-          return true;
-        }
-        case DATA: {
+      if (details.location() == kField) {
+        if (details.kind() == kData) {
           int index = descriptors->GetFieldIndex(number);
           Representation details_representation = details.representation();
           FieldIndex field_index = FieldIndex::ForPropertyIndex(
@@ -341,11 +341,25 @@
             }
           }
           *access_info = PropertyAccessInfo::DataField(
-              MapList{receiver_map}, field_index, field_representation,
-              field_type, field_map, holder);
+              details.constness(), MapList{receiver_map}, field_index,
+              field_representation, field_type, field_map, holder);
           return true;
+        } else {
+          DCHECK_EQ(kAccessor, details.kind());
+          // TODO(turbofan): Add support for general accessors?
+          return false;
         }
-        case ACCESSOR_CONSTANT: {
+
+      } else {
+        DCHECK_EQ(kDescriptor, details.location());
+        if (details.kind() == kData) {
+          DCHECK(!FLAG_track_constant_fields);
+          *access_info = PropertyAccessInfo::DataConstant(
+              MapList{receiver_map},
+              handle(descriptors->GetValue(number), isolate()), holder);
+          return true;
+        } else {
+          DCHECK_EQ(kAccessor, details.kind());
           Handle<Object> accessors(descriptors->GetValue(number), isolate());
           if (!accessors->IsAccessorPair()) return false;
           Handle<Object> accessor(
@@ -361,15 +375,23 @@
             if (optimization.api_call_info()->fast_handler()->IsCode()) {
               return false;
             }
+            if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
+          }
+          if (access_mode == AccessMode::kLoad) {
+            Handle<Name> cached_property_name;
+            if (FunctionTemplateInfo::TryGetCachedPropertyName(isolate(),
+                                                               accessor)
+                    .ToHandle(&cached_property_name)) {
+              if (ComputePropertyAccessInfo(map, cached_property_name,
+                                            access_mode, access_info)) {
+                return true;
+              }
+            }
           }
           *access_info = PropertyAccessInfo::AccessorConstant(
               MapList{receiver_map}, accessor, holder);
           return true;
         }
-        case ACCESSOR: {
-          // TODO(turbofan): Add support for general accessors?
-          return false;
-        }
       }
       UNREACHABLE();
       return false;
@@ -382,6 +404,11 @@
       return false;
     }
 
+    // Don't search on the prototype when storing in literals
+    if (access_mode == AccessMode::kStoreInLiteral) {
+      return LookupTransition(receiver_map, name, holder, access_info);
+    }
+
     // Don't lookup private symbols on the prototype chain.
     if (name->IsPrivate()) return false;
 
@@ -478,8 +505,9 @@
         field_type = type_cache_.kJSArrayLengthType;
       }
     }
+    // Special fields are always mutable.
     *access_info = PropertyAccessInfo::DataField(
-        MapList{map}, field_index, field_representation, field_type);
+        kMutable, MapList{map}, field_index, field_representation, field_type);
     return true;
   }
   return false;
@@ -503,7 +531,7 @@
     // Don't bother optimizing stores to read-only properties.
     if (details.IsReadOnly()) return false;
     // TODO(bmeurer): Handle transition to data constant?
-    if (details.type() != DATA) return false;
+    if (details.location() != kField) return false;
     int const index = details.field_index();
     Representation details_representation = details.representation();
     FieldIndex field_index = FieldIndex::ForPropertyIndex(
@@ -539,9 +567,10 @@
       }
     }
     dependencies()->AssumeMapNotDeprecated(transition_map);
+    // Transitioning stores are never stores to constant fields.
     *access_info = PropertyAccessInfo::DataField(
-        MapList{map}, field_index, field_representation, field_type, field_map,
-        holder, transition_map);
+        kMutable, MapList{map}, field_index, field_representation, field_type,
+        field_map, holder, transition_map);
     return true;
   }
   return false;
diff --git a/src/compiler/access-info.h b/src/compiler/access-info.h
index 1d485dd..42fa1db 100644
--- a/src/compiler/access-info.h
+++ b/src/compiler/access-info.h
@@ -26,7 +26,8 @@
 class TypeCache;
 
 // Whether we are loading a property or storing to a property.
-enum class AccessMode { kLoad, kStore };
+// For a store during literal creation, do not walk up the prototype chain.
+enum class AccessMode { kLoad, kStore, kStoreInLiteral };
 
 std::ostream& operator<<(std::ostream&, AccessMode);
 
@@ -61,6 +62,7 @@
     kNotFound,
     kDataConstant,
     kDataField,
+    kDataConstantField,
     kAccessorConstant,
     kGeneric
   };
@@ -71,9 +73,9 @@
                                          Handle<Object> constant,
                                          MaybeHandle<JSObject> holder);
   static PropertyAccessInfo DataField(
-      MapList const& receiver_maps, FieldIndex field_index,
-      MachineRepresentation field_representation, Type* field_type,
-      MaybeHandle<Map> field_map = MaybeHandle<Map>(),
+      PropertyConstness constness, MapList const& receiver_maps,
+      FieldIndex field_index, MachineRepresentation field_representation,
+      Type* field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
       MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
       MaybeHandle<Map> transition_map = MaybeHandle<Map>());
   static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
@@ -88,6 +90,9 @@
   bool IsNotFound() const { return kind() == kNotFound; }
   bool IsDataConstant() const { return kind() == kDataConstant; }
   bool IsDataField() const { return kind() == kDataField; }
+  // TODO(ishell): rename to IsDataConstant() once constant field tracking
+  // is done.
+  bool IsDataConstantField() const { return kind() == kDataConstantField; }
   bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
   bool IsGeneric() const { return kind() == kGeneric; }
 
@@ -110,7 +115,7 @@
                      MapList const& receiver_maps);
   PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
                      Handle<Object> constant, MapList const& receiver_maps);
-  PropertyAccessInfo(MaybeHandle<JSObject> holder,
+  PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
                      MaybeHandle<Map> transition_map, FieldIndex field_index,
                      MachineRepresentation field_representation,
                      Type* field_type, MaybeHandle<Map> field_map,
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index c473b9b..82039c8 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -32,6 +32,7 @@
       case kFlags_branch:
       case kFlags_deoptimize:
       case kFlags_set:
+      case kFlags_trap:
         return SetCC;
       case kFlags_none:
         return LeaveCC;
@@ -473,7 +474,8 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(scratch1,
+         Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(ne, &done);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -736,10 +738,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1504,6 +1504,438 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmFloat32x4Splat: {
+      __ vdup(i.OutputSimd128Register(), i.InputFloatRegister(0));
+      break;
+    }
+    case kArmFloat32x4ExtractLane: {
+      __ ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
+                     kScratchReg, i.InputInt8(1));
+      break;
+    }
+    case kArmFloat32x4ReplaceLane: {
+      __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                     i.InputFloatRegister(2), kScratchReg, i.InputInt8(1));
+      break;
+    }
+    case kArmFloat32x4FromInt32x4: {
+      __ vcvt_f32_s32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmFloat32x4FromUint32x4: {
+      __ vcvt_f32_u32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmFloat32x4Abs: {
+      __ vabs(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmFloat32x4Neg: {
+      __ vneg(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmFloat32x4Add: {
+      __ vadd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmFloat32x4Sub: {
+      __ vsub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmFloat32x4Equal: {
+      __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmFloat32x4NotEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vceq(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+      __ vmvn(dst, dst);
+      break;
+    }
+    case kArmInt32x4Splat: {
+      __ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
+      break;
+    }
+    case kArmInt32x4ExtractLane: {
+      __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS32,
+                     i.InputInt8(1));
+      break;
+    }
+    case kArmInt32x4ReplaceLane: {
+      __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                     i.InputRegister(2), NeonS32, i.InputInt8(1));
+      break;
+    }
+    case kArmInt32x4FromFloat32x4: {
+      __ vcvt_s32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmUint32x4FromFloat32x4: {
+      __ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmInt32x4Neg: {
+      __ vneg(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmInt32x4ShiftLeftByScalar: {
+      __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt5(1));
+      break;
+    }
+    case kArmInt32x4ShiftRightByScalar: {
+      __ vshr(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt5(1));
+      break;
+    }
+    case kArmInt32x4Add: {
+      __ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Sub: {
+      __ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Mul: {
+      __ vmul(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Min: {
+      __ vmin(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Max: {
+      __ vmax(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Equal: {
+      __ vceq(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4NotEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vceq(Neon32, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      __ vmvn(dst, dst);
+      break;
+    }
+    case kArmInt32x4GreaterThan: {
+      __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonS32, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint32x4ShiftRightByScalar: {
+      __ vshr(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt5(1));
+      break;
+    }
+    case kArmUint32x4Min: {
+      __ vmin(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint32x4Max: {
+      __ vmax(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint32x4GreaterThan: {
+      __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint32x4GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonU32, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Splat: {
+      __ vdup(Neon16, i.OutputSimd128Register(), i.InputRegister(0));
+      break;
+    }
+    case kArmInt16x8ExtractLane: {
+      __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS16,
+                     i.InputInt8(1));
+      break;
+    }
+    case kArmInt16x8ReplaceLane: {
+      __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                     i.InputRegister(2), NeonS16, i.InputInt8(1));
+      break;
+    }
+    case kArmInt16x8Neg: {
+      __ vneg(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmInt16x8ShiftLeftByScalar: {
+      __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt4(1));
+      break;
+    }
+    case kArmInt16x8ShiftRightByScalar: {
+      __ vshr(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt4(1));
+      break;
+    }
+    case kArmInt16x8Add: {
+      __ vadd(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8AddSaturate: {
+      __ vqadd(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Sub: {
+      __ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8SubSaturate: {
+      __ vqsub(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Mul: {
+      __ vmul(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Min: {
+      __ vmin(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Max: {
+      __ vmax(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Equal: {
+      __ vceq(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8NotEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vceq(Neon16, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      __ vmvn(dst, dst);
+      break;
+    }
+    case kArmInt16x8GreaterThan: {
+      __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonS16, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8ShiftRightByScalar: {
+      __ vshr(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt4(1));
+      break;
+    }
+    case kArmUint16x8AddSaturate: {
+      __ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8SubSaturate: {
+      __ vqsub(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8Min: {
+      __ vmin(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8Max: {
+      __ vmax(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8GreaterThan: {
+      __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonU16, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Splat: {
+      __ vdup(Neon8, i.OutputSimd128Register(), i.InputRegister(0));
+      break;
+    }
+    case kArmInt8x16ExtractLane: {
+      __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8,
+                     i.InputInt8(1));
+      break;
+    }
+    case kArmInt8x16ReplaceLane: {
+      __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                     i.InputRegister(2), NeonS8, i.InputInt8(1));
+      break;
+    }
+    case kArmInt8x16Neg: {
+      __ vneg(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmInt8x16ShiftLeftByScalar: {
+      __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt3(1));
+      break;
+    }
+    case kArmInt8x16ShiftRightByScalar: {
+      __ vshr(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt3(1));
+      break;
+    }
+    case kArmInt8x16Add: {
+      __ vadd(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16AddSaturate: {
+      __ vqadd(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Sub: {
+      __ vsub(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16SubSaturate: {
+      __ vqsub(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Mul: {
+      __ vmul(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Min: {
+      __ vmin(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Max: {
+      __ vmax(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Equal: {
+      __ vceq(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16NotEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vceq(Neon8, dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+      __ vmvn(dst, dst);
+      break;
+    }
+    case kArmInt8x16GreaterThan: {
+      __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonS8, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16ShiftRightByScalar: {
+      __ vshr(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt3(1));
+      break;
+    }
+    case kArmUint8x16AddSaturate: {
+      __ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16SubSaturate: {
+      __ vqsub(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16Min: {
+      __ vmin(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16Max: {
+      __ vmax(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16GreaterThan: {
+      __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonU8, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmSimd128And: {
+      __ vand(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmSimd128Or: {
+      __ vorr(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmSimd128Xor: {
+      __ veor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmSimd128Not: {
+      __ vmvn(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmSimd32x4Select:
+    case kArmSimd16x8Select:
+    case kArmSimd8x16Select: {
+      // vbsl clobbers the mask input so make sure it was DefineSameAsFirst.
+      DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
+      __ vbsl(i.OutputSimd128Register(), i.InputSimd128Register(1),
+              i.InputSimd128Register(2));
+      break;
+    }
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
       break;
@@ -1590,6 +2022,69 @@
   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      ArmOperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Condition cc = FlagsConditionToCondition(condition);
+  __ b(cc, tlabel);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1633,16 +2128,19 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   __ CheckConstPool(false, false);
@@ -1824,9 +2322,7 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmReference(src.rmode())) {
             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
             __ mov(dst, Operand(src.ToInt32()));
@@ -1891,8 +2387,7 @@
         DCHECK(destination->IsDoubleStackSlot());
         __ vstr(src, g.ToMemOperand(destination));
       }
-    } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+    } else if (rep == MachineRepresentation::kFloat32) {
       // GapResolver may give us reg codes that don't map to actual s-registers.
       // Generate code to work around those cases.
       int src_code = LocationOperand::cast(source)->register_code();
@@ -1903,6 +2398,19 @@
         DCHECK(destination->IsFloatStackSlot());
         __ VmovExtended(g.ToMemOperand(destination), src_code, kScratchReg);
       }
+    } else {
+      DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+      QwNeonRegister src = g.ToSimd128Register(source);
+      if (destination->IsSimd128Register()) {
+        QwNeonRegister dst = g.ToSimd128Register(destination);
+        __ Move(dst, src);
+      } else {
+        DCHECK(destination->IsSimd128StackSlot());
+        MemOperand dst = g.ToMemOperand(destination);
+        __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+        __ vst1(Neon8, NeonListOperand(src.low(), 2),
+                NeonMemOperand(kScratchReg));
+      }
     }
   } else if (source->IsFPStackSlot()) {
     MemOperand src = g.ToMemOperand(source);
@@ -1911,24 +2419,38 @@
     if (destination->IsFPRegister()) {
       if (rep == MachineRepresentation::kFloat64) {
         __ vldr(g.ToDoubleRegister(destination), src);
-      } else {
-        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      } else if (rep == MachineRepresentation::kFloat32) {
         // GapResolver may give us reg codes that don't map to actual
         // s-registers. Generate code to work around those cases.
         int dst_code = LocationOperand::cast(destination)->register_code();
         __ VmovExtended(dst_code, src, kScratchReg);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        QwNeonRegister dst = g.ToSimd128Register(destination);
+        __ add(kScratchReg, src.rn(), Operand(src.offset()));
+        __ vld1(Neon8, NeonListOperand(dst.low(), 2),
+                NeonMemOperand(kScratchReg));
       }
-    } else {
+    } else if (rep == MachineRepresentation::kFloat64) {
       DCHECK(destination->IsFPStackSlot());
       if (rep == MachineRepresentation::kFloat64) {
         DwVfpRegister temp = kScratchDoubleReg;
         __ vldr(temp, src);
         __ vstr(temp, g.ToMemOperand(destination));
-      } else {
-        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      } else if (rep == MachineRepresentation::kFloat32) {
         SwVfpRegister temp = kScratchDoubleReg.low();
         __ vldr(temp, src);
         __ vstr(temp, g.ToMemOperand(destination));
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        MemOperand dst = g.ToMemOperand(destination);
+        __ add(kScratchReg, src.rn(), Operand(src.offset()));
+        __ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+                NeonMemOperand(kScratchReg));
+        __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+        __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+                NeonMemOperand(kScratchReg));
+        __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
       }
     }
   } else {
@@ -1936,7 +2458,6 @@
   }
 }
 
-
 void CodeGenerator::AssembleSwap(InstructionOperand* source,
                                  InstructionOperand* destination) {
   ArmOperandConverter g(this, nullptr);
@@ -1975,7 +2496,7 @@
       DwVfpRegister src = g.ToDoubleRegister(source);
       if (destination->IsFPRegister()) {
         DwVfpRegister dst = g.ToDoubleRegister(destination);
-        __ vswp(src, dst);
+        __ Swap(src, dst);
       } else {
         DCHECK(destination->IsFPStackSlot());
         MemOperand dst = g.ToMemOperand(destination);
@@ -1983,8 +2504,7 @@
         __ vldr(src, dst);
         __ vstr(temp, dst);
       }
-    } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+    } else if (rep == MachineRepresentation::kFloat32) {
       int src_code = LocationOperand::cast(source)->register_code();
       if (destination->IsFPRegister()) {
         int dst_code = LocationOperand::cast(destination)->register_code();
@@ -1998,29 +2518,55 @@
         __ VmovExtended(src_code, dst, kScratchReg);
         __ vstr(temp.low(), dst);
       }
+    } else {
+      DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+      QwNeonRegister src = g.ToSimd128Register(source);
+      if (destination->IsFPRegister()) {
+        QwNeonRegister dst = g.ToSimd128Register(destination);
+        __ Swap(src, dst);
+      } else {
+        DCHECK(destination->IsFPStackSlot());
+        MemOperand dst = g.ToMemOperand(destination);
+        __ Move(kScratchQuadReg, src);
+        __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+        __ vld1(Neon8, NeonListOperand(src.low(), 2),
+                NeonMemOperand(kScratchReg));
+        __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+                NeonMemOperand(kScratchReg));
+        __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPStackSlot());
-    Register temp_0 = kScratchReg;
-    LowDwVfpRegister temp_1 = kScratchDoubleReg;
-    MemOperand src0 = g.ToMemOperand(source);
-    MemOperand dst0 = g.ToMemOperand(destination);
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
     MachineRepresentation rep = LocationOperand::cast(source)->representation();
     if (rep == MachineRepresentation::kFloat64) {
-      MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
-      MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
-      __ vldr(temp_1, dst0);  // Save destination in temp_1.
-      __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
-      __ str(temp_0, dst0);
-      __ ldr(temp_0, src1);
-      __ str(temp_0, dst1);
-      __ vstr(temp_1, src0);
+      __ vldr(kScratchDoubleReg, dst);
+      __ vldr(kDoubleRegZero, src);
+      __ vstr(kScratchDoubleReg, src);
+      __ vstr(kDoubleRegZero, dst);
+      // Restore the 0 register.
+      __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+    } else if (rep == MachineRepresentation::kFloat32) {
+      __ vldr(kScratchDoubleReg.low(), dst);
+      __ vldr(kScratchDoubleReg.high(), src);
+      __ vstr(kScratchDoubleReg.low(), src);
+      __ vstr(kScratchDoubleReg.high(), dst);
     } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
-      __ vldr(temp_1.low(), dst0);  // Save destination in temp_1.
-      __ ldr(temp_0, src0);  // Then use temp_0 to copy source to destination.
-      __ str(temp_0, dst0);
-      __ vstr(temp_1.low(), src0);
+      DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+      __ vldr(kScratchDoubleReg, dst);
+      __ vldr(kDoubleRegZero, src);
+      __ vstr(kScratchDoubleReg, src);
+      __ vstr(kDoubleRegZero, dst);
+      src.set_offset(src.offset() + kDoubleSize);
+      dst.set_offset(dst.offset() + kDoubleSize);
+      __ vldr(kScratchDoubleReg, dst);
+      __ vldr(kDoubleRegZero, src);
+      __ vstr(kScratchDoubleReg, src);
+      __ vstr(kDoubleRegZero, dst);
+      // Restore the 0 register.
+      __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
     }
   } else {
     // No other combinations are possible.
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 07c4033..0c19deb 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -119,7 +119,95 @@
   V(ArmLdr)                        \
   V(ArmStr)                        \
   V(ArmPush)                       \
-  V(ArmPoke)
+  V(ArmPoke)                       \
+  V(ArmFloat32x4Splat)             \
+  V(ArmFloat32x4ExtractLane)       \
+  V(ArmFloat32x4ReplaceLane)       \
+  V(ArmFloat32x4FromInt32x4)       \
+  V(ArmFloat32x4FromUint32x4)      \
+  V(ArmFloat32x4Abs)               \
+  V(ArmFloat32x4Neg)               \
+  V(ArmFloat32x4Add)               \
+  V(ArmFloat32x4Sub)               \
+  V(ArmFloat32x4Equal)             \
+  V(ArmFloat32x4NotEqual)          \
+  V(ArmInt32x4Splat)               \
+  V(ArmInt32x4ExtractLane)         \
+  V(ArmInt32x4ReplaceLane)         \
+  V(ArmInt32x4FromFloat32x4)       \
+  V(ArmUint32x4FromFloat32x4)      \
+  V(ArmInt32x4Neg)                 \
+  V(ArmInt32x4ShiftLeftByScalar)   \
+  V(ArmInt32x4ShiftRightByScalar)  \
+  V(ArmInt32x4Add)                 \
+  V(ArmInt32x4Sub)                 \
+  V(ArmInt32x4Mul)                 \
+  V(ArmInt32x4Min)                 \
+  V(ArmInt32x4Max)                 \
+  V(ArmInt32x4Equal)               \
+  V(ArmInt32x4NotEqual)            \
+  V(ArmInt32x4GreaterThan)         \
+  V(ArmInt32x4GreaterThanOrEqual)  \
+  V(ArmUint32x4ShiftRightByScalar) \
+  V(ArmUint32x4Min)                \
+  V(ArmUint32x4Max)                \
+  V(ArmUint32x4GreaterThan)        \
+  V(ArmUint32x4GreaterThanOrEqual) \
+  V(ArmInt16x8Splat)               \
+  V(ArmInt16x8ExtractLane)         \
+  V(ArmInt16x8ReplaceLane)         \
+  V(ArmInt16x8Neg)                 \
+  V(ArmInt16x8ShiftLeftByScalar)   \
+  V(ArmInt16x8ShiftRightByScalar)  \
+  V(ArmInt16x8Add)                 \
+  V(ArmInt16x8AddSaturate)         \
+  V(ArmInt16x8Sub)                 \
+  V(ArmInt16x8SubSaturate)         \
+  V(ArmInt16x8Mul)                 \
+  V(ArmInt16x8Min)                 \
+  V(ArmInt16x8Max)                 \
+  V(ArmInt16x8Equal)               \
+  V(ArmInt16x8NotEqual)            \
+  V(ArmInt16x8GreaterThan)         \
+  V(ArmInt16x8GreaterThanOrEqual)  \
+  V(ArmUint16x8ShiftRightByScalar) \
+  V(ArmUint16x8AddSaturate)        \
+  V(ArmUint16x8SubSaturate)        \
+  V(ArmUint16x8Min)                \
+  V(ArmUint16x8Max)                \
+  V(ArmUint16x8GreaterThan)        \
+  V(ArmUint16x8GreaterThanOrEqual) \
+  V(ArmInt8x16Splat)               \
+  V(ArmInt8x16ExtractLane)         \
+  V(ArmInt8x16ReplaceLane)         \
+  V(ArmInt8x16Neg)                 \
+  V(ArmInt8x16ShiftLeftByScalar)   \
+  V(ArmInt8x16ShiftRightByScalar)  \
+  V(ArmInt8x16Add)                 \
+  V(ArmInt8x16AddSaturate)         \
+  V(ArmInt8x16Sub)                 \
+  V(ArmInt8x16SubSaturate)         \
+  V(ArmInt8x16Mul)                 \
+  V(ArmInt8x16Min)                 \
+  V(ArmInt8x16Max)                 \
+  V(ArmInt8x16Equal)               \
+  V(ArmInt8x16NotEqual)            \
+  V(ArmInt8x16GreaterThan)         \
+  V(ArmInt8x16GreaterThanOrEqual)  \
+  V(ArmUint8x16ShiftRightByScalar) \
+  V(ArmUint8x16AddSaturate)        \
+  V(ArmUint8x16SubSaturate)        \
+  V(ArmUint8x16Min)                \
+  V(ArmUint8x16Max)                \
+  V(ArmUint8x16GreaterThan)        \
+  V(ArmUint8x16GreaterThanOrEqual) \
+  V(ArmSimd128And)                 \
+  V(ArmSimd128Or)                  \
+  V(ArmSimd128Xor)                 \
+  V(ArmSimd128Not)                 \
+  V(ArmSimd32x4Select)             \
+  V(ArmSimd16x8Select)             \
+  V(ArmSimd8x16Select)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index 3f38e5d..ba2f219 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -108,6 +108,94 @@
     case kArmFloat32Min:
     case kArmFloat64Min:
     case kArmFloat64SilenceNaN:
+    case kArmFloat32x4Splat:
+    case kArmFloat32x4ExtractLane:
+    case kArmFloat32x4ReplaceLane:
+    case kArmFloat32x4FromInt32x4:
+    case kArmFloat32x4FromUint32x4:
+    case kArmFloat32x4Abs:
+    case kArmFloat32x4Neg:
+    case kArmFloat32x4Add:
+    case kArmFloat32x4Sub:
+    case kArmFloat32x4Equal:
+    case kArmFloat32x4NotEqual:
+    case kArmInt32x4Splat:
+    case kArmInt32x4ExtractLane:
+    case kArmInt32x4ReplaceLane:
+    case kArmInt32x4FromFloat32x4:
+    case kArmUint32x4FromFloat32x4:
+    case kArmInt32x4Neg:
+    case kArmInt32x4ShiftLeftByScalar:
+    case kArmInt32x4ShiftRightByScalar:
+    case kArmInt32x4Add:
+    case kArmInt32x4Sub:
+    case kArmInt32x4Mul:
+    case kArmInt32x4Min:
+    case kArmInt32x4Max:
+    case kArmInt32x4Equal:
+    case kArmInt32x4NotEqual:
+    case kArmInt32x4GreaterThan:
+    case kArmInt32x4GreaterThanOrEqual:
+    case kArmUint32x4ShiftRightByScalar:
+    case kArmUint32x4Min:
+    case kArmUint32x4Max:
+    case kArmUint32x4GreaterThan:
+    case kArmUint32x4GreaterThanOrEqual:
+    case kArmInt16x8Splat:
+    case kArmInt16x8ExtractLane:
+    case kArmInt16x8ReplaceLane:
+    case kArmInt16x8Neg:
+    case kArmInt16x8ShiftLeftByScalar:
+    case kArmInt16x8ShiftRightByScalar:
+    case kArmInt16x8Add:
+    case kArmInt16x8AddSaturate:
+    case kArmInt16x8Sub:
+    case kArmInt16x8SubSaturate:
+    case kArmInt16x8Mul:
+    case kArmInt16x8Min:
+    case kArmInt16x8Max:
+    case kArmInt16x8Equal:
+    case kArmInt16x8NotEqual:
+    case kArmInt16x8GreaterThan:
+    case kArmInt16x8GreaterThanOrEqual:
+    case kArmUint16x8ShiftRightByScalar:
+    case kArmUint16x8AddSaturate:
+    case kArmUint16x8SubSaturate:
+    case kArmUint16x8Min:
+    case kArmUint16x8Max:
+    case kArmUint16x8GreaterThan:
+    case kArmUint16x8GreaterThanOrEqual:
+    case kArmInt8x16Splat:
+    case kArmInt8x16ExtractLane:
+    case kArmInt8x16ReplaceLane:
+    case kArmInt8x16Neg:
+    case kArmInt8x16ShiftLeftByScalar:
+    case kArmInt8x16ShiftRightByScalar:
+    case kArmInt8x16Add:
+    case kArmInt8x16AddSaturate:
+    case kArmInt8x16Sub:
+    case kArmInt8x16SubSaturate:
+    case kArmInt8x16Mul:
+    case kArmInt8x16Min:
+    case kArmInt8x16Max:
+    case kArmInt8x16Equal:
+    case kArmInt8x16NotEqual:
+    case kArmInt8x16GreaterThan:
+    case kArmInt8x16GreaterThanOrEqual:
+    case kArmUint8x16ShiftRightByScalar:
+    case kArmUint8x16AddSaturate:
+    case kArmUint8x16SubSaturate:
+    case kArmUint8x16Min:
+    case kArmUint8x16Max:
+    case kArmUint8x16GreaterThan:
+    case kArmUint8x16GreaterThanOrEqual:
+    case kArmSimd128And:
+    case kArmSimd128Or:
+    case kArmSimd128Xor:
+    case kArmSimd128Not:
+    case kArmSimd32x4Select:
+    case kArmSimd16x8Select:
+    case kArmSimd8x16Select:
       return kNoOpcodeFlags;
 
     case kArmVldrF32:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 5279d1e..0cffff7 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -84,7 +84,6 @@
                  g.UseRegister(node->InputAt(0)));
 }
 
-
 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
   ArmOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
@@ -92,6 +91,29 @@
                  g.UseRegister(node->InputAt(1)));
 }
 
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  ArmOperandGenerator g(selector);
+  // Use DefineSameAsFirst for ternary ops that clobber their first input,
+  // e.g. the NEON vbsl instruction.
+  selector->Emit(
+      opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+      g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
+void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  ArmOperandGenerator g(selector);
+  int32_t imm = OpParameter<int32_t>(node);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  ArmOperandGenerator g(selector);
+  int32_t imm = OpParameter<int32_t>(node);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+                 g.UseRegister(node->InputAt(1)));
+}
 
 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
           AddressingMode kImmMode, AddressingMode kRegMode>
@@ -266,7 +288,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -403,6 +428,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -488,6 +516,9 @@
         break;
       case MachineRepresentation::kWord64:   // Fall through.
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -501,6 +532,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 void InstructionSelector::VisitUnalignedLoad(Node* node) {
   UnalignedLoadRepresentation load_rep =
       UnalignedLoadRepresentationOf(node->op());
@@ -646,6 +682,9 @@
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -690,6 +729,9 @@
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -884,7 +926,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -1079,15 +1124,8 @@
   VisitShift(this, node, TryMatchROR);
 }
 
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
-  VisitRR(this, kArmClz, node);
-}
-
-
 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
 
-
 void InstructionSelector::VisitWord32ReverseBits(Node* node) {
   DCHECK(IsSupported(ARMv7));
   VisitRR(this, kArmRbit, node);
@@ -1250,12 +1288,16 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     InstructionOperand in[] = {temp_operand, result_operand, shift_31};
-    selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
                    result_operand, shift_31);
+  } else {
+    DCHECK(cont->IsTrap());
+    InstructionOperand in[] = {temp_operand, result_operand, shift_31,
+                               g.UseImmediate(cont->trap_id())};
+    selector->Emit(opcode, 0, nullptr, 4, in);
   }
 }
 
@@ -1284,12 +1326,6 @@
   VisitRRR(this, kArmMul, node);
 }
 
-
-void InstructionSelector::VisitInt32MulHigh(Node* node) {
-  VisitRRR(this, kArmSmmul, node);
-}
-
-
 void InstructionSelector::VisitUint32MulHigh(Node* node) {
   ArmOperandGenerator g(this);
   InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
@@ -1318,73 +1354,76 @@
   VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
 }
 
+#define RR_OP_LIST(V)                                \
+  V(Word32Clz, kArmClz)                              \
+  V(ChangeFloat32ToFloat64, kArmVcvtF64F32)          \
+  V(RoundInt32ToFloat32, kArmVcvtF32S32)             \
+  V(RoundUint32ToFloat32, kArmVcvtF32U32)            \
+  V(ChangeInt32ToFloat64, kArmVcvtF64S32)            \
+  V(ChangeUint32ToFloat64, kArmVcvtF64U32)           \
+  V(TruncateFloat32ToInt32, kArmVcvtS32F32)          \
+  V(TruncateFloat32ToUint32, kArmVcvtU32F32)         \
+  V(ChangeFloat64ToInt32, kArmVcvtS32F64)            \
+  V(ChangeFloat64ToUint32, kArmVcvtU32F64)           \
+  V(TruncateFloat64ToUint32, kArmVcvtU32F64)         \
+  V(TruncateFloat64ToFloat32, kArmVcvtF32F64)        \
+  V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
+  V(RoundFloat64ToInt32, kArmVcvtS32F64)             \
+  V(BitcastFloat32ToInt32, kArmVmovU32F32)           \
+  V(BitcastInt32ToFloat32, kArmVmovF32U32)           \
+  V(Float64ExtractLowWord32, kArmVmovLowU32F64)      \
+  V(Float64ExtractHighWord32, kArmVmovHighU32F64)    \
+  V(Float64SilenceNaN, kArmFloat64SilenceNaN)        \
+  V(Float32Abs, kArmVabsF32)                         \
+  V(Float64Abs, kArmVabsF64)                         \
+  V(Float32Neg, kArmVnegF32)                         \
+  V(Float64Neg, kArmVnegF64)                         \
+  V(Float32Sqrt, kArmVsqrtF32)                       \
+  V(Float64Sqrt, kArmVsqrtF64)
 
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
-  VisitRR(this, kArmVcvtF64F32, node);
-}
+#define RR_OP_LIST_V8(V)                 \
+  V(Float32RoundDown, kArmVrintmF32)     \
+  V(Float64RoundDown, kArmVrintmF64)     \
+  V(Float32RoundUp, kArmVrintpF32)       \
+  V(Float64RoundUp, kArmVrintpF64)       \
+  V(Float32RoundTruncate, kArmVrintzF32) \
+  V(Float64RoundTruncate, kArmVrintzF64) \
+  V(Float64RoundTiesAway, kArmVrintaF64) \
+  V(Float32RoundTiesEven, kArmVrintnF32) \
+  V(Float64RoundTiesEven, kArmVrintnF64)
 
+#define RRR_OP_LIST(V)          \
+  V(Int32MulHigh, kArmSmmul)    \
+  V(Float32Mul, kArmVmulF32)    \
+  V(Float64Mul, kArmVmulF64)    \
+  V(Float32Div, kArmVdivF32)    \
+  V(Float64Div, kArmVdivF64)    \
+  V(Float32Max, kArmFloat32Max) \
+  V(Float64Max, kArmFloat64Max) \
+  V(Float32Min, kArmFloat32Min) \
+  V(Float64Min, kArmFloat64Min)
 
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
-  VisitRR(this, kArmVcvtF32S32, node);
-}
+#define RR_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, opcode, node);                      \
+  }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
 
+#define RR_VISITOR_V8(Name, opcode)                   \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    DCHECK(CpuFeatures::IsSupported(ARMv8));          \
+    VisitRR(this, opcode, node);                      \
+  }
+RR_OP_LIST_V8(RR_VISITOR_V8)
+#undef RR_VISITOR_V8
 
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
-  VisitRR(this, kArmVcvtF32U32, node);
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
-  VisitRR(this, kArmVcvtF64S32, node);
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
-  VisitRR(this, kArmVcvtF64U32, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
-  VisitRR(this, kArmVcvtS32F32, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
-  VisitRR(this, kArmVcvtU32F32, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
-  VisitRR(this, kArmVcvtS32F64, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
-  VisitRR(this, kArmVcvtU32F64, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
-  VisitRR(this, kArmVcvtU32F64, node);
-}
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
-  VisitRR(this, kArmVcvtF32F64, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
-  VisitRR(this, kArchTruncateDoubleToI, node);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
-  VisitRR(this, kArmVcvtS32F64, node);
-}
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
-  VisitRR(this, kArmVmovU32F32, node);
-}
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
-  VisitRR(this, kArmVmovF32U32, node);
-}
+#define RRR_VISITOR(Name, opcode)                     \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRRR(this, opcode, node);                     \
+  }
+RRR_OP_LIST(RRR_VISITOR)
+#undef RRR_VISITOR
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
   ArmOperandGenerator g(this);
@@ -1453,132 +1492,12 @@
   VisitRRR(this, kArmVsubF64, node);
 }
 
-void InstructionSelector::VisitFloat32Mul(Node* node) {
-  VisitRRR(this, kArmVmulF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
-  VisitRRR(this, kArmVmulF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
-  VisitRRR(this, kArmVdivF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
-  VisitRRR(this, kArmVdivF64, node);
-}
-
-
 void InstructionSelector::VisitFloat64Mod(Node* node) {
   ArmOperandGenerator g(this);
   Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
 }
 
-void InstructionSelector::VisitFloat32Max(Node* node) {
-  VisitRRR(this, kArmFloat32Max, node);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
-  VisitRRR(this, kArmFloat64Max, node);
-}
-
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
-  VisitRR(this, kArmFloat64SilenceNaN, node);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
-  VisitRRR(this, kArmFloat32Min, node);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
-  VisitRRR(this, kArmFloat64Min, node);
-}
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
-  VisitRR(this, kArmVabsF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
-  VisitRR(this, kArmVabsF64, node);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRR(this, kArmVsqrtF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  VisitRR(this, kArmVsqrtF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintmF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintmF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintpF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintpF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintzF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintzF64, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintaF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintnF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintnF64, node);
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
-  VisitRR(this, kArmVnegF32, node);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
-  VisitRR(this, kArmVnegF64, node);
-}
-
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
   ArmOperandGenerator g(this);
@@ -1641,11 +1560,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1835,7 +1757,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -1991,11 +1916,14 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    value_operand);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -2008,14 +1936,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -2151,17 +2094,6 @@
   VisitFloat64Compare(this, node, &cont);
 }
 
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
-  VisitRR(this, kArmVmovLowU32F64, node);
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
-  VisitRR(this, kArmVmovHighU32F64, node);
-}
-
-
 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   ArmOperandGenerator g(this);
   Node* left = node->InputAt(0);
@@ -2249,6 +2181,145 @@
   Emit(code, 0, nullptr, input_count, inputs);
 }
 
+#define SIMD_TYPE_LIST(V) \
+  V(Float32x4)            \
+  V(Int32x4)              \
+  V(Int16x8)              \
+  V(Int8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+  V(32x4)                   \
+  V(16x8)                   \
+  V(8x16)
+
+#define SIMD_UNOP_LIST(V)  \
+  V(Float32x4FromInt32x4)  \
+  V(Float32x4FromUint32x4) \
+  V(Float32x4Abs)          \
+  V(Float32x4Neg)          \
+  V(Int32x4FromFloat32x4)  \
+  V(Uint32x4FromFloat32x4) \
+  V(Int32x4Neg)            \
+  V(Int16x8Neg)            \
+  V(Int8x16Neg)            \
+  V(Simd128Not)
+
+#define SIMD_BINOP_LIST(V)      \
+  V(Float32x4Add)               \
+  V(Float32x4Sub)               \
+  V(Float32x4Equal)             \
+  V(Float32x4NotEqual)          \
+  V(Int32x4Add)                 \
+  V(Int32x4Sub)                 \
+  V(Int32x4Mul)                 \
+  V(Int32x4Min)                 \
+  V(Int32x4Max)                 \
+  V(Int32x4Equal)               \
+  V(Int32x4NotEqual)            \
+  V(Int32x4GreaterThan)         \
+  V(Int32x4GreaterThanOrEqual)  \
+  V(Uint32x4Min)                \
+  V(Uint32x4Max)                \
+  V(Uint32x4GreaterThan)        \
+  V(Uint32x4GreaterThanOrEqual) \
+  V(Int16x8Add)                 \
+  V(Int16x8AddSaturate)         \
+  V(Int16x8Sub)                 \
+  V(Int16x8SubSaturate)         \
+  V(Int16x8Mul)                 \
+  V(Int16x8Min)                 \
+  V(Int16x8Max)                 \
+  V(Int16x8Equal)               \
+  V(Int16x8NotEqual)            \
+  V(Int16x8GreaterThan)         \
+  V(Int16x8GreaterThanOrEqual)  \
+  V(Uint16x8AddSaturate)        \
+  V(Uint16x8SubSaturate)        \
+  V(Uint16x8Min)                \
+  V(Uint16x8Max)                \
+  V(Uint16x8GreaterThan)        \
+  V(Uint16x8GreaterThanOrEqual) \
+  V(Int8x16Add)                 \
+  V(Int8x16AddSaturate)         \
+  V(Int8x16Sub)                 \
+  V(Int8x16SubSaturate)         \
+  V(Int8x16Mul)                 \
+  V(Int8x16Min)                 \
+  V(Int8x16Max)                 \
+  V(Int8x16Equal)               \
+  V(Int8x16NotEqual)            \
+  V(Int8x16GreaterThan)         \
+  V(Int8x16GreaterThanOrEqual)  \
+  V(Uint8x16AddSaturate)        \
+  V(Uint8x16SubSaturate)        \
+  V(Uint8x16Min)                \
+  V(Uint8x16Max)                \
+  V(Uint8x16GreaterThan)        \
+  V(Uint8x16GreaterThanOrEqual) \
+  V(Simd128And)                 \
+  V(Simd128Or)                  \
+  V(Simd128Xor)
+
+#define SIMD_SHIFT_OP_LIST(V)   \
+  V(Int32x4ShiftLeftByScalar)   \
+  V(Int32x4ShiftRightByScalar)  \
+  V(Uint32x4ShiftRightByScalar) \
+  V(Int16x8ShiftLeftByScalar)   \
+  V(Int16x8ShiftRightByScalar)  \
+  V(Uint16x8ShiftRightByScalar) \
+  V(Int8x16ShiftLeftByScalar)   \
+  V(Int8x16ShiftRightByScalar)  \
+  V(Uint8x16ShiftRightByScalar)
+
+#define SIMD_VISIT_SPLAT(Type)                              \
+  void InstructionSelector::VisitCreate##Type(Node* node) { \
+    VisitRR(this, kArm##Type##Splat, node);                 \
+  }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type)                              \
+  void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+    VisitRRI(this, kArm##Type##ExtractLane, node);                 \
+  }
+SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type)                              \
+  void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+    VisitRRIR(this, kArm##Type##ReplaceLane, node);                \
+  }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name)                         \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, kArm##Name, node);                  \
+  }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_BINOP(Name)                        \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRRR(this, kArm##Name, node);                 \
+  }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+#define SIMD_VISIT_SHIFT_OP(Name)                     \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRRI(this, kArm##Name, node);                 \
+  }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_SELECT_OP(format)                                \
+  void InstructionSelector::VisitSimd##format##Select(Node* node) { \
+    VisitRRRR(this, kArmSimd##format##Select, node);                \
+  }
+SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
+#undef SIMD_VISIT_SELECT_OP
+
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 8b1cb57..1cdedb0 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -209,17 +209,16 @@
     Constant constant = ToConstant(operand);
     switch (constant.type()) {
       case Constant::kInt32:
-        if (constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+        if (RelocInfo::IsWasmSizeReference(constant.rmode())) {
           return Operand(constant.ToInt32(), constant.rmode());
         } else {
           return Operand(constant.ToInt32());
         }
       case Constant::kInt64:
-        if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-            constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+        if (RelocInfo::IsWasmPtrReference(constant.rmode())) {
           return Operand(constant.ToInt64(), constant.rmode());
         } else {
-          DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+          DCHECK(!RelocInfo::IsWasmSizeReference(constant.rmode()));
           return Operand(constant.ToInt64());
         }
       case Constant::kFloat32:
@@ -571,7 +570,8 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ Cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ Cmp(scratch1,
+         Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ B(ne, &done);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -775,10 +775,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1702,6 +1700,67 @@
   if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+    void Generate() final {
+      Arm64OperandConverter i(gen_, instr_);
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        DCHECK(csp.Is(__ StackPointer()));
+        // Initialize the jssp because it is required for the runtime call.
+        __ Mov(jssp, csp);
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          // The trap code should never return.
+          __ Brk(0);
+        }
+      }
+    }
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Condition cc = FlagsConditionToCondition(condition);
+  __ B(cc, tlabel);
+}
 
 // Assemble boolean materializations after this instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1749,13 +1808,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -1828,7 +1890,6 @@
       osr_pc_offset_ = __ pc_offset();
       shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
     }
-
     // Build remainder of frame, including accounting for and filling-in
     // frame-specific header information, e.g. claiming the extra slot that
     // other platforms explicitly push for STUB frames and frames recording
@@ -1843,7 +1904,7 @@
     if (is_stub_frame) {
       UseScratchRegisterScope temps(masm());
       Register temp = temps.AcquireX();
-      __ Mov(temp, Smi::FromInt(info()->GetOutputStackFrameType()));
+      __ Mov(temp, StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
       __ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
     }
   }
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 0eef53c..bacf792 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -123,7 +123,7 @@
 
   bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
     // TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
-    DCHECK_NE(MachineRepresentation::kSimd128, rep);
+    DCHECK_GT(MachineRepresentation::kSimd128, rep);
     return IsIntegerConstant(node) &&
            (GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
   }
@@ -436,14 +436,18 @@
     Matcher m_shift(right_node);
     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
     inputs[input_count++] = g.UseRegister(m_shift.left().node());
-    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+    // We only need at most the last 6 bits of the shift.
+    inputs[input_count++] =
+        g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
   } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
                                              !is_add_sub)) {
     if (must_commute_cond) cont->Commute();
     Matcher m_shift(left_node);
     inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
     inputs[input_count++] = g.UseRegister(m_shift.left().node());
-    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+    // We only need at most the last 6 bits of the shift.
+    inputs[input_count++] =
+        g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
   } else {
     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
     inputs[input_count++] = g.UseRegister(right_node);
@@ -470,7 +474,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -586,6 +593,9 @@
       immediate_mode = kLoadStoreImm64;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -682,6 +692,9 @@
         immediate_mode = kLoadStoreImm64;
         break;
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -708,6 +721,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -745,6 +763,9 @@
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -797,6 +818,9 @@
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -934,7 +958,8 @@
     uint64_t mask = m.right().Value();
     uint64_t mask_width = base::bits::CountPopulation64(mask);
     uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
-    if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+    if ((mask_width != 0) && (mask_width != 64) &&
+        (mask_msb + mask_width == 64)) {
       // The mask must be contiguous, and occupy the least-significant bits.
       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
 
@@ -1061,6 +1086,7 @@
     // OP is >>> or >> and (K & 0x1f) != 0.
     Int32BinopMatcher mleft(m.left().node());
     if (mleft.right().HasValue() && m.right().HasValue() &&
+        (mleft.right().Value() & 0x1f) != 0 &&
         (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
       DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
       ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
@@ -1218,44 +1244,99 @@
   VisitRRO(this, kArm64Ror, node, kShift64Imm);
 }
 
+#define RR_OP_LIST(V)                                         \
+  V(Word64Clz, kArm64Clz)                                     \
+  V(Word32Clz, kArm64Clz32)                                   \
+  V(Word32ReverseBits, kArm64Rbit32)                          \
+  V(Word64ReverseBits, kArm64Rbit)                            \
+  V(ChangeFloat32ToFloat64, kArm64Float32ToFloat64)           \
+  V(RoundInt32ToFloat32, kArm64Int32ToFloat32)                \
+  V(RoundUint32ToFloat32, kArm64Uint32ToFloat32)              \
+  V(ChangeInt32ToFloat64, kArm64Int32ToFloat64)               \
+  V(ChangeUint32ToFloat64, kArm64Uint32ToFloat64)             \
+  V(TruncateFloat32ToInt32, kArm64Float32ToInt32)             \
+  V(ChangeFloat64ToInt32, kArm64Float64ToInt32)               \
+  V(TruncateFloat32ToUint32, kArm64Float32ToUint32)           \
+  V(ChangeFloat64ToUint32, kArm64Float64ToUint32)             \
+  V(TruncateFloat64ToUint32, kArm64Float64ToUint32)           \
+  V(TruncateFloat64ToFloat32, kArm64Float64ToFloat32)         \
+  V(TruncateFloat64ToWord32, kArchTruncateDoubleToI)          \
+  V(RoundFloat64ToInt32, kArm64Float64ToInt32)                \
+  V(RoundInt64ToFloat32, kArm64Int64ToFloat32)                \
+  V(RoundInt64ToFloat64, kArm64Int64ToFloat64)                \
+  V(RoundUint64ToFloat32, kArm64Uint64ToFloat32)              \
+  V(RoundUint64ToFloat64, kArm64Uint64ToFloat64)              \
+  V(BitcastFloat32ToInt32, kArm64Float64ExtractLowWord32)     \
+  V(BitcastFloat64ToInt64, kArm64U64MoveFloat64)              \
+  V(BitcastInt32ToFloat32, kArm64Float64MoveU64)              \
+  V(BitcastInt64ToFloat64, kArm64Float64MoveU64)              \
+  V(Float32Abs, kArm64Float32Abs)                             \
+  V(Float64Abs, kArm64Float64Abs)                             \
+  V(Float32Sqrt, kArm64Float32Sqrt)                           \
+  V(Float64Sqrt, kArm64Float64Sqrt)                           \
+  V(Float32RoundDown, kArm64Float32RoundDown)                 \
+  V(Float64RoundDown, kArm64Float64RoundDown)                 \
+  V(Float32RoundUp, kArm64Float32RoundUp)                     \
+  V(Float64RoundUp, kArm64Float64RoundUp)                     \
+  V(Float32RoundTruncate, kArm64Float32RoundTruncate)         \
+  V(Float64RoundTruncate, kArm64Float64RoundTruncate)         \
+  V(Float64RoundTiesAway, kArm64Float64RoundTiesAway)         \
+  V(Float32RoundTiesEven, kArm64Float32RoundTiesEven)         \
+  V(Float64RoundTiesEven, kArm64Float64RoundTiesEven)         \
+  V(Float32Neg, kArm64Float32Neg)                             \
+  V(Float64Neg, kArm64Float64Neg)                             \
+  V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32)   \
+  V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
+  V(Float64SilenceNaN, kArm64Float64SilenceNaN)
 
-void InstructionSelector::VisitWord64Clz(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
-}
+#define RRR_OP_LIST(V)            \
+  V(Int32Div, kArm64Idiv32)       \
+  V(Int64Div, kArm64Idiv)         \
+  V(Uint32Div, kArm64Udiv32)      \
+  V(Uint64Div, kArm64Udiv)        \
+  V(Int32Mod, kArm64Imod32)       \
+  V(Int64Mod, kArm64Imod)         \
+  V(Uint32Mod, kArm64Umod32)      \
+  V(Uint64Mod, kArm64Umod)        \
+  V(Float32Add, kArm64Float32Add) \
+  V(Float64Add, kArm64Float64Add) \
+  V(Float32Sub, kArm64Float32Sub) \
+  V(Float64Sub, kArm64Float64Sub) \
+  V(Float32Mul, kArm64Float32Mul) \
+  V(Float64Mul, kArm64Float64Mul) \
+  V(Float32Div, kArm64Float32Div) \
+  V(Float64Div, kArm64Float64Div) \
+  V(Float32Max, kArm64Float32Max) \
+  V(Float64Max, kArm64Float64Max) \
+  V(Float32Min, kArm64Float32Min) \
+  V(Float64Min, kArm64Float64Min)
 
+#define RR_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, opcode, node);                      \
+  }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
 
-void InstructionSelector::VisitWord32Clz(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
-}
-
+#define RRR_VISITOR(Name, opcode)                     \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRRR(this, opcode, node);                     \
+  }
+RRR_OP_LIST(RRR_VISITOR)
+#undef RRR_VISITOR
 
 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
 
-
 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
 
-
-void InstructionSelector::VisitWord32ReverseBits(Node* node) {
-  VisitRR(this, kArm64Rbit32, node);
-}
-
-
-void InstructionSelector::VisitWord64ReverseBits(Node* node) {
-  VisitRR(this, kArm64Rbit, node);
-}
-
 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
 
-
 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
 
-
 void InstructionSelector::VisitInt32Add(Node* node) {
   Arm64OperandGenerator g(this);
   Int32BinopMatcher m(node);
@@ -1377,11 +1458,14 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     InstructionOperand in[] = {result, result};
-    selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), result, result,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1487,94 +1571,6 @@
 }
 
 
-void InstructionSelector::VisitInt32Div(Node* node) {
-  VisitRRR(this, kArm64Idiv32, node);
-}
-
-
-void InstructionSelector::VisitInt64Div(Node* node) {
-  VisitRRR(this, kArm64Idiv, node);
-}
-
-
-void InstructionSelector::VisitUint32Div(Node* node) {
-  VisitRRR(this, kArm64Udiv32, node);
-}
-
-
-void InstructionSelector::VisitUint64Div(Node* node) {
-  VisitRRR(this, kArm64Udiv, node);
-}
-
-
-void InstructionSelector::VisitInt32Mod(Node* node) {
-  VisitRRR(this, kArm64Imod32, node);
-}
-
-
-void InstructionSelector::VisitInt64Mod(Node* node) {
-  VisitRRR(this, kArm64Imod, node);
-}
-
-
-void InstructionSelector::VisitUint32Mod(Node* node) {
-  VisitRRR(this, kArm64Umod32, node);
-}
-
-
-void InstructionSelector::VisitUint64Mod(Node* node) {
-  VisitRRR(this, kArm64Umod, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
-  VisitRR(this, kArm64Float32ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
-  VisitRR(this, kArm64Int32ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
-  VisitRR(this, kArm64Uint32ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
-  VisitRR(this, kArm64Int32ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
-  VisitRR(this, kArm64Uint32ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
-  VisitRR(this, kArm64Float32ToInt32, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
-  VisitRR(this, kArm64Float64ToInt32, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
-  VisitRR(this, kArm64Float32ToUint32, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
-  VisitRR(this, kArm64Float64ToUint32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
-  VisitRR(this, kArm64Float64ToUint32, node);
-}
-
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   Arm64OperandGenerator g(this);
 
@@ -1729,20 +1725,6 @@
   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
 }
 
-
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
-  VisitRR(this, kArm64Float64ToFloat32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
-  VisitRR(this, kArchTruncateDoubleToI, node);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
-  VisitRR(this, kArm64Float64ToInt32, node);
-}
-
-
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   Arm64OperandGenerator g(this);
   Node* value = node->InputAt(0);
@@ -1751,85 +1733,6 @@
   Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
 }
 
-
-void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
-  VisitRR(this, kArm64Int64ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
-  VisitRR(this, kArm64Int64ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
-  VisitRR(this, kArm64Uint64ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
-  VisitRR(this, kArm64Uint64ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
-  VisitRR(this, kArm64Float64ExtractLowWord32, node);
-}
-
-
-void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
-  VisitRR(this, kArm64U64MoveFloat64, node);
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
-  VisitRR(this, kArm64Float64MoveU64, node);
-}
-
-
-void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
-  VisitRR(this, kArm64Float64MoveU64, node);
-}
-
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
-  VisitRRR(this, kArm64Float32Add, node);
-}
-
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
-  VisitRRR(this, kArm64Float64Add, node);
-}
-
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
-  VisitRRR(this, kArm64Float32Sub, node);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
-  VisitRRR(this, kArm64Float64Sub, node);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
-  VisitRRR(this, kArm64Float32Mul, node);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
-  VisitRRR(this, kArm64Float64Mul, node);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
-  VisitRRR(this, kArm64Float32Div, node);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
-  VisitRRR(this, kArm64Float64Div, node);
-}
-
-
 void InstructionSelector::VisitFloat64Mod(Node* node) {
   Arm64OperandGenerator g(this);
   Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
@@ -1837,94 +1740,6 @@
        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
 }
 
-void InstructionSelector::VisitFloat32Max(Node* node) {
-  VisitRRR(this, kArm64Float32Max, node);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
-  VisitRRR(this, kArm64Float64Max, node);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
-  VisitRRR(this, kArm64Float32Min, node);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
-  VisitRRR(this, kArm64Float64Min, node);
-}
-
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
-  VisitRR(this, kArm64Float32Abs, node);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
-  VisitRR(this, kArm64Float64Abs, node);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRR(this, kArm64Float32Sqrt, node);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  VisitRR(this, kArm64Float64Sqrt, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  VisitRR(this, kArm64Float32RoundDown, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
-  VisitRR(this, kArm64Float64RoundDown, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  VisitRR(this, kArm64Float32RoundUp, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
-  VisitRR(this, kArm64Float64RoundUp, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  VisitRR(this, kArm64Float32RoundTruncate, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  VisitRR(this, kArm64Float64RoundTruncate, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
-  VisitRR(this, kArm64Float64RoundTiesAway, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
-  VisitRR(this, kArm64Float32RoundTiesEven, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
-  VisitRR(this, kArm64Float64RoundTiesEven, node);
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
-  VisitRR(this, kArm64Float32Neg, node);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
-  VisitRR(this, kArm64Float64Neg, node);
-}
-
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
   Arm64OperandGenerator g(this);
@@ -1993,11 +1808,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -2162,7 +1980,7 @@
   } else {
     DCHECK(cont->IsDeoptimize());
     selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   }
 }
 
@@ -2513,11 +2331,15 @@
     selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
                    g.UseRegister(value), g.Label(cont->true_block()),
                    g.Label(cont->false_block()));
-  } else {
-    DCHECK(cont->IsDeoptimize());
+  } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
                              g.UseRegister(value), g.UseRegister(value),
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
+                   g.UseRegister(value), g.UseRegister(value),
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -2530,14 +2352,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -2774,21 +2611,6 @@
   VisitFloat64Compare(this, node, &cont);
 }
 
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   Arm64OperandGenerator g(this);
   Node* left = node->InputAt(0);
@@ -2823,10 +2645,6 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
-  VisitRR(this, kArm64Float64SilenceNaN, node);
-}
-
 void InstructionSelector::VisitAtomicLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   Arm64OperandGenerator g(this);
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index 1b7d116..e199a03 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -17,7 +17,9 @@
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/state-values-utils.h"
-#include "src/compiler/type-hint-analyzer.h"
+#include "src/feedback-vector.h"
+#include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
 
 namespace v8 {
 namespace internal {
@@ -166,8 +168,6 @@
   void ReturnValue(Node* return_value);
   void ThrowValue(Node* exception_value);
 
-  class DeferredCommands;
-
  protected:
   enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_THROW };
 
@@ -207,93 +207,6 @@
   int stack_height_;
 };
 
-// Helper class for a try-finally control scope. It can record intercepted
-// control-flow commands that cause entry into a finally-block, and re-apply
-// them after again leaving that block. Special tokens are used to identify
-// paths going through the finally-block to dispatch after leaving the block.
-class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
- public:
-  explicit DeferredCommands(AstGraphBuilder* owner)
-      : owner_(owner),
-        deferred_(owner->local_zone()),
-        return_token_(nullptr),
-        throw_token_(nullptr) {}
-
-  // One recorded control-flow command.
-  struct Entry {
-    Command command;       // The command type being applied on this path.
-    Statement* statement;  // The target statement for the command or {nullptr}.
-    Node* token;           // A token identifying this particular path.
-  };
-
-  // Records a control-flow command while entering the finally-block. This also
-  // generates a new dispatch token that identifies one particular path.
-  Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
-    Node* token = nullptr;
-    switch (cmd) {
-      case CMD_BREAK:
-      case CMD_CONTINUE:
-        token = NewPathToken(dispenser_.GetBreakContinueToken());
-        break;
-      case CMD_THROW:
-        if (throw_token_) return throw_token_;
-        token = NewPathToken(TokenDispenserForFinally::kThrowToken);
-        throw_token_ = token;
-        break;
-      case CMD_RETURN:
-        if (return_token_) return return_token_;
-        token = NewPathToken(TokenDispenserForFinally::kReturnToken);
-        return_token_ = token;
-        break;
-    }
-    DCHECK_NOT_NULL(token);
-    deferred_.push_back({cmd, stmt, token});
-    return token;
-  }
-
-  // Returns the dispatch token to be used to identify the implicit fall-through
-  // path at the end of a try-block into the corresponding finally-block.
-  Node* GetFallThroughToken() { return NewPathTokenForImplicitFallThrough(); }
-
-  // Applies all recorded control-flow commands after the finally-block again.
-  // This generates a dynamic dispatch on the token from the entry point.
-  void ApplyDeferredCommands(Node* token, Node* value) {
-    SwitchBuilder dispatch(owner_, static_cast<int>(deferred_.size()));
-    dispatch.BeginSwitch();
-    for (size_t i = 0; i < deferred_.size(); ++i) {
-      Node* condition = NewPathDispatchCondition(token, deferred_[i].token);
-      dispatch.BeginLabel(static_cast<int>(i), condition);
-      dispatch.EndLabel();
-    }
-    for (size_t i = 0; i < deferred_.size(); ++i) {
-      dispatch.BeginCase(static_cast<int>(i));
-      owner_->execution_control()->PerformCommand(
-          deferred_[i].command, deferred_[i].statement, value);
-      dispatch.EndCase();
-    }
-    dispatch.EndSwitch();
-  }
-
- protected:
-  Node* NewPathToken(int token_id) {
-    return owner_->jsgraph()->Constant(token_id);
-  }
-  Node* NewPathTokenForImplicitFallThrough() {
-    return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
-  }
-  Node* NewPathDispatchCondition(Node* t1, Node* t2) {
-    return owner_->NewNode(
-        owner_->javascript()->StrictEqual(CompareOperationHint::kAny), t1, t2);
-  }
-
- private:
-  TokenDispenserForFinally dispenser_;
-  AstGraphBuilder* owner_;
-  ZoneVector<Entry> deferred_;
-  Node* return_token_;
-  Node* throw_token_;
-};
-
 
 // Control scope implementation for a BreakableStatement.
 class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
@@ -356,65 +269,9 @@
 };
 
 
-// Control scope implementation for a TryCatchStatement.
-class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
- public:
-  ControlScopeForCatch(AstGraphBuilder* owner, TryCatchStatement* stmt,
-                       TryCatchBuilder* control)
-      : ControlScope(owner), control_(control) {
-    builder()->try_nesting_level_++;  // Increment nesting.
-  }
-  ~ControlScopeForCatch() {
-    builder()->try_nesting_level_--;  // Decrement nesting.
-  }
-
- protected:
-  bool Execute(Command cmd, Statement* target, Node** value) override {
-    switch (cmd) {
-      case CMD_THROW:
-        control_->Throw(*value);
-        return true;
-      case CMD_BREAK:
-      case CMD_CONTINUE:
-      case CMD_RETURN:
-        break;
-    }
-    return false;
-  }
-
- private:
-  TryCatchBuilder* control_;
-};
-
-
-// Control scope implementation for a TryFinallyStatement.
-class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
- public:
-  ControlScopeForFinally(AstGraphBuilder* owner, TryFinallyStatement* stmt,
-                         DeferredCommands* commands, TryFinallyBuilder* control)
-      : ControlScope(owner), commands_(commands), control_(control) {
-    builder()->try_nesting_level_++;  // Increment nesting.
-  }
-  ~ControlScopeForFinally() {
-    builder()->try_nesting_level_--;  // Decrement nesting.
-  }
-
- protected:
-  bool Execute(Command cmd, Statement* target, Node** value) override {
-    Node* token = commands_->RecordCommand(cmd, target, *value);
-    control_->LeaveTry(token, *value);
-    return true;
-  }
-
- private:
-  DeferredCommands* commands_;
-  TryFinallyBuilder* control_;
-};
-
 AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
                                  JSGraph* jsgraph, float invocation_frequency,
-                                 LoopAssignmentAnalysis* loop,
-                                 TypeHintAnalysis* type_hint_analysis)
+                                 LoopAssignmentAnalysis* loop)
     : isolate_(info->isolate()),
       local_zone_(local_zone),
       info_(info),
@@ -425,12 +282,10 @@
       globals_(0, local_zone),
       execution_control_(nullptr),
       execution_context_(nullptr),
-      try_nesting_level_(0),
       input_buffer_size_(0),
       input_buffer_(nullptr),
       exit_controls_(local_zone),
       loop_assignment_analysis_(loop),
-      type_hint_analysis_(type_hint_analysis),
       state_values_cache_(jsgraph),
       liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
                          false, local_zone),
@@ -453,7 +308,7 @@
     // calling eval, not the anonymous closure containing the eval code.
     const Operator* op =
         javascript()->LoadContext(0, Context::CLOSURE_INDEX, false);
-    return NewNode(op, current_context());
+    return NewNode(op);
   } else {
     DCHECK(closure_scope->is_function_scope());
     return GetFunctionClosure();
@@ -483,18 +338,6 @@
   return function_context_.get();
 }
 
-
-Node* AstGraphBuilder::GetNewTarget() {
-  if (!new_target_.is_set()) {
-    int params = info()->num_parameters_including_this();
-    int index = Linkage::GetJSCallNewTargetParamIndex(params);
-    const Operator* op = common()->Parameter(index, "%new.target");
-    Node* node = NewNode(op, graph()->start());
-    new_target_.set(node);
-  }
-  return new_target_.get();
-}
-
 Node* AstGraphBuilder::GetEmptyFrameState() {
   if (!empty_frame_state_.is_set()) {
     const Operator* op = common()->FrameState(
@@ -573,15 +416,10 @@
   // Build the arguments object if it is used.
   BuildArgumentsObject(scope->arguments());
 
-  // Build rest arguments array if it is used.
-  Variable* rest_parameter = scope->rest_parameter();
-  BuildRestArgumentsArray(rest_parameter);
-
-  // Build assignment to {.this_function} variable if it is used.
-  BuildThisFunctionVariable(scope->this_function_var());
-
-  // Build assignment to {new.target} variable if it is used.
-  BuildNewTargetVariable(scope->new_target_var());
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(scope->new_target_var());
+  DCHECK_NULL(scope->rest_parameter());
+  DCHECK_NULL(scope->this_function_var());
 
   // Emit tracing call if requested to do so.
   if (FLAG_trace) {
@@ -835,7 +673,7 @@
     }
   }
   if (should_update) {
-    const Operator* op = common()->StateValues(count);
+    const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
     (*state_values) = graph()->NewNode(op, count, env_values);
   }
 }
@@ -1092,10 +930,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+      globals()->push_back(variable->name());
+      FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
       globals()->push_back(isolate()->factory()->undefined_value());
+      globals()->push_back(isolate()->factory()->undefined_value());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -1109,17 +949,10 @@
       if (variable->binding_needs_init()) {
         Node* value = jsgraph()->TheHoleConstant();
         const Operator* op = javascript()->StoreContext(0, variable->index());
-        NewNode(op, current_context(), value);
+        NewNode(op, value);
       }
       break;
-    case VariableLocation::LOOKUP: {
-      DCHECK(!variable->binding_needs_init());
-      Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op = javascript()->CallRuntime(Runtime::kDeclareEvalVar);
-      Node* store = NewNode(op, name);
-      PrepareFrameState(store, decl->proxy()->id());
-      break;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1134,9 +967,16 @@
           decl->fun(), info()->script(), info());
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
-      FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+      globals()->push_back(variable->name());
+      FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
+
+      // We need the slot where the literals array lives, too.
+      slot = decl->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
+
       globals()->push_back(function);
       break;
     }
@@ -1151,19 +991,10 @@
       VisitForValue(decl->fun());
       Node* value = environment()->Pop();
       const Operator* op = javascript()->StoreContext(0, variable->index());
-      NewNode(op, current_context(), value);
+      NewNode(op, value);
       break;
     }
-    case VariableLocation::LOOKUP: {
-      VisitForValue(decl->fun());
-      Node* value = environment()->Pop();
-      Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kDeclareEvalFunction);
-      Node* store = NewNode(op, name, value);
-      PrepareFrameState(store, decl->proxy()->id());
-      break;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1240,14 +1071,8 @@
 
 
 void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
-  VisitForValue(stmt->expression());
-  Node* value = environment()->Pop();
-  Node* object = BuildToObject(value, stmt->ToObjectId());
-  Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
-  const Operator* op = javascript()->CreateWithContext(scope_info);
-  Node* context = NewNode(op, object, GetFunctionClosureForContext());
-  PrepareFrameState(context, stmt->EntryId());
-  VisitInScope(stmt->statement(), stmt->scope(), context);
+  // Dynamic scoping is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
@@ -1277,13 +1102,7 @@
     Node* label = environment()->Pop();
     Node* tag = environment()->Top();
 
-    CompareOperationHint hint;
-    if (!type_hint_analysis_ ||
-        !type_hint_analysis_->GetCompareOperationHint(clause->CompareId(),
-                                                      &hint)) {
-      hint = CompareOperationHint::kAny;
-    }
-
+    CompareOperationHint hint = CompareOperationHint::kAny;
     const Operator* op = javascript()->StrictEqual(hint);
     Node* condition = NewNode(op, tag, label);
     compare_switch.BeginLabel(i, condition);
@@ -1354,218 +1173,32 @@
 
 
 void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
-  VisitForValue(stmt->subject());
-  Node* object = environment()->Pop();
-  BlockBuilder for_block(this);
-  for_block.BeginBlock();
-  // Check for null or undefined before entering loop.
-  Node* is_null_cond =
-      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
-              jsgraph()->NullConstant());
-  for_block.BreakWhen(is_null_cond, BranchHint::kFalse);
-  Node* is_undefined_cond =
-      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
-              jsgraph()->UndefinedConstant());
-  for_block.BreakWhen(is_undefined_cond, BranchHint::kFalse);
-  {
-    // Convert object to jsobject.
-    object = BuildToObject(object, stmt->ToObjectId());
-    environment()->Push(object);
-
-    // Prepare for-in cache.
-    Node* prepare = NewNode(javascript()->ForInPrepare(), object);
-    PrepareFrameState(prepare, stmt->PrepareId(),
-                      OutputFrameStateCombine::Push(3));
-    Node* cache_type = NewNode(common()->Projection(0), prepare);
-    Node* cache_array = NewNode(common()->Projection(1), prepare);
-    Node* cache_length = NewNode(common()->Projection(2), prepare);
-
-    // Construct the rest of the environment.
-    environment()->Push(cache_type);
-    environment()->Push(cache_array);
-    environment()->Push(cache_length);
-    environment()->Push(jsgraph()->ZeroConstant());
-
-    // Build the actual loop body.
-    LoopBuilder for_loop(this);
-    for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
-    {
-      // These stack values are renamed in the case of OSR, so reload them
-      // from the environment.
-      Node* index = environment()->Peek(0);
-      Node* cache_length = environment()->Peek(1);
-      Node* cache_array = environment()->Peek(2);
-      Node* cache_type = environment()->Peek(3);
-      Node* object = environment()->Peek(4);
-
-      // Check loop termination condition (we know that the {index} is always
-      // in Smi range, so we can just set the hint on the comparison below).
-      PrepareEagerCheckpoint(stmt->EntryId());
-      Node* exit_cond =
-          NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall),
-                  index, cache_length);
-      PrepareFrameState(exit_cond, BailoutId::None());
-      for_loop.BreakUnless(exit_cond);
-
-      // Compute the next enumerated value.
-      Node* value = NewNode(javascript()->ForInNext(), object, cache_array,
-                            cache_type, index);
-      PrepareFrameState(value, stmt->FilterId(),
-                        OutputFrameStateCombine::Push());
-      IfBuilder test_value(this);
-      Node* test_value_cond =
-          NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), value,
-                  jsgraph()->UndefinedConstant());
-      test_value.If(test_value_cond, BranchHint::kFalse);
-      test_value.Then();
-      test_value.Else();
-      {
-        environment()->Push(value);
-        PrepareEagerCheckpoint(stmt->FilterId());
-        value = environment()->Pop();
-        // Bind value and do loop body.
-        VectorSlotPair feedback =
-            CreateVectorSlotPair(stmt->EachFeedbackSlot());
-        VisitForInAssignment(stmt->each(), value, feedback,
-                             stmt->AssignmentId());
-        VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
-      }
-      test_value.End();
-      for_loop.EndBody();
-
-      // Increment counter and continue (we know that the {index} is always
-      // in Smi range, so we can just set the hint on the increment below).
-      index = environment()->Peek(0);
-      PrepareEagerCheckpoint(stmt->IncrementId());
-      index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall),
-                      index, jsgraph()->OneConstant());
-      PrepareFrameState(index, BailoutId::None());
-      environment()->Poke(0, index);
-    }
-    for_loop.EndLoop();
-    environment()->Drop(5);
-  }
-  for_block.EndBlock();
+  // Only the BytecodeGraphBuilder supports for-in.
+  return SetStackOverflow();
 }
 
 
 void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
-  LoopBuilder for_loop(this);
-  VisitForEffect(stmt->assign_iterator());
-  for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
-  VisitForEffect(stmt->next_result());
-  VisitForTest(stmt->result_done());
-  Node* condition = environment()->Pop();
-  for_loop.BreakWhen(condition);
-  VisitForEffect(stmt->assign_each());
-  VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
-  for_loop.EndBody();
-  for_loop.EndLoop();
+  // Iterator looping is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
 void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  TryCatchBuilder try_control(this);
-
-  // Evaluate the try-block inside a control scope. This simulates a handler
-  // that is intercepting 'throw' control commands.
-  try_control.BeginTry();
-  {
-    ControlScopeForCatch scope(this, stmt, &try_control);
-    STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
-    environment()->Push(current_context());
-    Visit(stmt->try_block());
-    environment()->Pop();
-  }
-  try_control.EndTry();
-
-  // If requested, clear message object as we enter the catch block.
-  if (stmt->clear_pending_message()) {
-    Node* the_hole = jsgraph()->TheHoleConstant();
-    NewNode(javascript()->StoreMessage(), the_hole);
-  }
-
-  // Create a catch scope that binds the exception.
-  Node* exception = try_control.GetExceptionNode();
-  Handle<String> name = stmt->variable()->name();
-  Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
-  const Operator* op = javascript()->CreateCatchContext(name, scope_info);
-  Node* context = NewNode(op, exception, GetFunctionClosureForContext());
-
-  // Evaluate the catch-block.
-  VisitInScope(stmt->catch_block(), stmt->scope(), context);
-  try_control.EndCatch();
+  // Exception handling is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
 void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  TryFinallyBuilder try_control(this);
-
-  // We keep a record of all paths that enter the finally-block to be able to
-  // dispatch to the correct continuation point after the statements in the
-  // finally-block have been evaluated.
-  //
-  // The try-finally construct can enter the finally-block in three ways:
-  // 1. By exiting the try-block normally, falling through at the end.
-  // 2. By exiting the try-block with a function-local control flow transfer
-  //    (i.e. through break/continue/return statements).
-  // 3. By exiting the try-block with a thrown exception.
-  Node* fallthrough_result = jsgraph()->TheHoleConstant();
-  ControlScope::DeferredCommands* commands =
-      new (local_zone()) ControlScope::DeferredCommands(this);
-
-  // Evaluate the try-block inside a control scope. This simulates a handler
-  // that is intercepting all control commands.
-  try_control.BeginTry();
-  {
-    ControlScopeForFinally scope(this, stmt, commands, &try_control);
-    STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
-    environment()->Push(current_context());
-    Visit(stmt->try_block());
-    environment()->Pop();
-  }
-  try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
-
-  // The result value semantics depend on how the block was entered:
-  //  - ReturnStatement: It represents the return value being returned.
-  //  - ThrowStatement: It represents the exception being thrown.
-  //  - BreakStatement/ContinueStatement: Filled with the hole.
-  //  - Falling through into finally-block: Filled with the hole.
-  Node* result = try_control.GetResultValueNode();
-  Node* token = try_control.GetDispatchTokenNode();
-
-  // The result value, dispatch token and message is expected on the operand
-  // stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
-  Node* message = NewNode(javascript()->LoadMessage());
-  environment()->Push(token);
-  environment()->Push(result);
-  environment()->Push(message);
-
-  // Clear message object as we enter the finally block.
-  Node* the_hole = jsgraph()->TheHoleConstant();
-  NewNode(javascript()->StoreMessage(), the_hole);
-
-  // Evaluate the finally-block.
-  Visit(stmt->finally_block());
-  try_control.EndFinally();
-
-  // The result value, dispatch token and message is restored from the operand
-  // stack (this is in sync with FullCodeGenerator::ExitFinallyBlock).
-  message = environment()->Pop();
-  result = environment()->Pop();
-  token = environment()->Pop();
-  NewNode(javascript()->StoreMessage(), message);
-
-  // Dynamic dispatch after the finally-block.
-  commands->ApplyDeferredCommands(token, result);
+  // Exception handling is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
 void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  Node* node =
-      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
-  PrepareFrameState(node, stmt->DebugBreakId());
-  environment()->MarkAllLocalsLive();
+  // Debugger statement is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
@@ -1577,112 +1210,14 @@
 
   // Create node to instantiate a new closure.
   PretenureFlag pretenure = expr->pretenure() ? TENURED : NOT_TENURED;
-  const Operator* op = javascript()->CreateClosure(shared_info, pretenure);
+  VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
+  const Operator* op =
+      javascript()->CreateClosure(shared_info, pair, pretenure);
   Node* value = NewNode(op);
   ast_context()->ProduceValue(expr, value);
 }
 
-
-void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
-  VisitForValueOrTheHole(expr->extends());
-  VisitForValue(expr->constructor());
-
-  // Create node to instantiate a new class.
-  Node* constructor = environment()->Pop();
-  Node* extends = environment()->Pop();
-  Node* start = jsgraph()->Constant(expr->start_position());
-  Node* end = jsgraph()->Constant(expr->end_position());
-  const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass);
-  Node* literal = NewNode(opc, extends, constructor, start, end);
-  PrepareFrameState(literal, expr->CreateLiteralId(),
-                    OutputFrameStateCombine::Push());
-  environment()->Push(literal);
-
-  // Load the "prototype" from the constructor.
-  PrepareEagerCheckpoint(expr->CreateLiteralId());
-  Handle<Name> name = isolate()->factory()->prototype_string();
-  VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
-  Node* prototype = BuildNamedLoad(literal, name, pair);
-  PrepareFrameState(prototype, expr->PrototypeId(),
-                    OutputFrameStateCombine::Push());
-  environment()->Push(prototype);
-
-  // Create nodes to store method values into the literal.
-  for (int i = 0; i < expr->properties()->length(); i++) {
-    ClassLiteral::Property* property = expr->properties()->at(i);
-    environment()->Push(environment()->Peek(property->is_static() ? 1 : 0));
-
-    VisitForValue(property->key());
-    Node* name = BuildToName(environment()->Pop(), expr->GetIdForProperty(i));
-    environment()->Push(name);
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      Node* check = BuildThrowIfStaticPrototype(environment()->Pop(),
-                                                expr->GetIdForProperty(i));
-      environment()->Push(check);
-    }
-
-    VisitForValue(property->value());
-    Node* value = environment()->Pop();
-    Node* key = environment()->Pop();
-    Node* receiver = environment()->Pop();
-
-    BuildSetHomeObject(value, receiver, property);
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD: {
-        Node* attr = jsgraph()->Constant(DONT_ENUM);
-        Node* set_function_name =
-            jsgraph()->Constant(property->NeedsSetFunctionName());
-        const Operator* op =
-            javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
-        Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
-        PrepareFrameState(call, BailoutId::None());
-        break;
-      }
-      case ClassLiteral::Property::GETTER: {
-        Node* attr = jsgraph()->Constant(DONT_ENUM);
-        const Operator* op = javascript()->CallRuntime(
-            Runtime::kDefineGetterPropertyUnchecked, 4);
-        NewNode(op, receiver, key, value, attr);
-        break;
-      }
-      case ClassLiteral::Property::SETTER: {
-        Node* attr = jsgraph()->Constant(DONT_ENUM);
-        const Operator* op = javascript()->CallRuntime(
-            Runtime::kDefineSetterPropertyUnchecked, 4);
-        NewNode(op, receiver, key, value, attr);
-        break;
-      }
-      case ClassLiteral::Property::FIELD: {
-        UNREACHABLE();
-        break;
-      }
-    }
-  }
-
-  // Set the constructor to have fast properties.
-  prototype = environment()->Pop();
-  literal = environment()->Pop();
-  const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
-  literal = NewNode(op, literal);
-
-  // Assign to class variable.
-  if (expr->class_variable_proxy() != nullptr) {
-    Variable* var = expr->class_variable_proxy()->var();
-    VectorSlotPair feedback = CreateVectorSlotPair(
-        expr->NeedsProxySlot() ? expr->ProxySlot()
-                               : FeedbackVectorSlot::Invalid());
-    BuildVariableAssignment(var, literal, Token::INIT, feedback,
-                            BailoutId::None());
-  }
-  ast_context()->ProduceValue(expr, literal);
-}
-
+void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) { UNREACHABLE(); }
 
 void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
   UNREACHABLE();
@@ -1734,7 +1269,8 @@
 
   // Create node to materialize a regular expression literal.
   const Operator* op = javascript()->CreateLiteralRegExp(
-      expr->pattern(), expr->flags(), expr->literal_index());
+      expr->pattern(), expr->flags(),
+      FeedbackVector::GetIndex(expr->literal_slot()));
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(expr, literal);
@@ -1746,8 +1282,8 @@
 
   // Create node to deep-copy the literal boilerplate.
   const Operator* op = javascript()->CreateLiteralObject(
-      expr->constant_properties(), expr->ComputeFlags(true),
-      expr->literal_index(), expr->properties_count());
+      expr->GetOrBuildConstantProperties(isolate()), expr->ComputeFlags(true),
+      FeedbackVector::GetIndex(expr->literal_slot()), expr->properties_count());
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
@@ -1757,15 +1293,15 @@
   environment()->Push(literal);
 
   // Create nodes to store computed values into the literal.
-  int property_index = 0;
   AccessorTable accessor_table(local_zone());
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1783,7 +1319,7 @@
             Handle<Name> name = key->AsPropertyName();
             VectorSlotPair feedback =
                 CreateVectorSlotPair(property->GetSlot(0));
-            Node* store = BuildNamedStore(literal, name, value, feedback);
+            Node* store = BuildNamedStoreOwn(literal, name, value, feedback);
             PrepareFrameState(store, key->id(),
                               OutputFrameStateCombine::Ignore());
             BuildSetHomeObject(value, literal, property, 1);
@@ -1818,21 +1354,20 @@
             javascript()->CallRuntime(Runtime::kInternalSetPrototype);
         Node* set_prototype = NewNode(op, receiver, value);
         // SetPrototype should not lazy deopt on an object literal.
-        PrepareFrameState(set_prototype,
-                          expr->GetIdForPropertySet(property_index));
+        PrepareFrameState(set_prototype, expr->GetIdForPropertySet(i));
         break;
       }
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1856,77 +1391,6 @@
     Node* call = NewNode(op, literal, name, getter, setter, attr);
     PrepareFrameState(call, it->second->bailout_id);
   }
-
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
-  // with the first computed property name and continues with all properties to
-  // its right. All the code from above initializes the static component of the
-  // object literal, and arranges for the map of the result to reflect the
-  // static order in which the keys appear. For the dynamic properties, we
-  // compile them into a series of "SetOwnProperty" runtime calls. This will
-  // preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      environment()->Push(environment()->Top());  // Duplicate receiver.
-      VisitForValue(property->value());
-      Node* value = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kInternalSetPrototype);
-      Node* call = NewNode(op, receiver, value);
-      PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
-      continue;
-    }
-
-    environment()->Push(environment()->Top());  // Duplicate receiver.
-    VisitForValue(property->key());
-    Node* name = BuildToName(environment()->Pop(),
-                             expr->GetIdForPropertyName(property_index));
-    environment()->Push(name);
-    VisitForValue(property->value());
-    Node* value = environment()->Pop();
-    Node* key = environment()->Pop();
-    Node* receiver = environment()->Pop();
-    BuildSetHomeObject(value, receiver, property);
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::COMPUTED:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
-        if (!property->emit_store()) continue;
-        Node* attr = jsgraph()->Constant(NONE);
-        Node* set_function_name =
-            jsgraph()->Constant(property->NeedsSetFunctionName());
-        const Operator* op =
-            javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
-        Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
-        PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
-        break;
-      }
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();  // Handled specially above.
-        break;
-      case ObjectLiteral::Property::GETTER: {
-        Node* attr = jsgraph()->Constant(NONE);
-        const Operator* op = javascript()->CallRuntime(
-            Runtime::kDefineGetterPropertyUnchecked, 4);
-        Node* call = NewNode(op, receiver, key, value, attr);
-        PrepareFrameState(call, BailoutId::None());
-        break;
-      }
-      case ObjectLiteral::Property::SETTER: {
-        Node* attr = jsgraph()->Constant(NONE);
-        const Operator* op = javascript()->CallRuntime(
-            Runtime::kDefineSetterPropertyUnchecked, 4);
-        Node* call = NewNode(op, receiver, key, value, attr);
-        PrepareFrameState(call, BailoutId::None());
-        break;
-      }
-    }
-  }
-
   ast_context()->ProduceValue(expr, environment()->Pop());
 }
 
@@ -1947,8 +1411,8 @@
 
   // Create node to deep-copy the literal boilerplate.
   const Operator* op = javascript()->CreateLiteralArray(
-      expr->constant_elements(), expr->ComputeFlags(true),
-      expr->literal_index(), expr->values()->length());
+      expr->GetOrBuildConstantElements(isolate()), expr->ComputeFlags(true),
+      FeedbackVector::GetIndex(expr->literal_slot()), expr->values()->length());
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
@@ -1978,72 +1442,6 @@
   ast_context()->ProduceValue(expr, environment()->Pop());
 }
 
-void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
-                                           const VectorSlotPair& feedback,
-                                           BailoutId bailout_id) {
-  DCHECK(expr->IsValidReferenceExpressionOrThis());
-
-  // Left-hand side can only be a property, a global or a variable slot.
-  Property* property = expr->AsProperty();
-  LhsKind assign_type = Property::GetAssignType(property);
-
-  // Evaluate LHS expression and store the value.
-  switch (assign_type) {
-    case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
-      BuildVariableAssignment(var, value, Token::ASSIGN, feedback, bailout_id);
-      break;
-    }
-    case NAMED_PROPERTY: {
-      environment()->Push(value);
-      VisitForValue(property->obj());
-      Node* object = environment()->Pop();
-      value = environment()->Pop();
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      Node* store = BuildNamedStore(object, name, value, feedback);
-      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
-      break;
-    }
-    case KEYED_PROPERTY: {
-      environment()->Push(value);
-      VisitForValue(property->obj());
-      VisitForValue(property->key());
-      Node* key = environment()->Pop();
-      Node* object = environment()->Pop();
-      value = environment()->Pop();
-      Node* store = BuildKeyedStore(object, key, value, feedback);
-      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
-      break;
-    }
-    case NAMED_SUPER_PROPERTY: {
-      environment()->Push(value);
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      value = environment()->Pop();
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      environment()->Push(value);
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForValue(property->key());
-      Node* key = environment()->Pop();
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      value = environment()->Pop();
-      Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
-      break;
-    }
-  }
-}
-
-
 void AstGraphBuilder::VisitAssignment(Assignment* expr) {
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
@@ -2071,13 +1469,8 @@
       VisitForValue(property->key());
       break;
     case NAMED_SUPER_PROPERTY:
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      break;
     case KEYED_SUPER_PROPERTY:
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForValue(property->key());
+      UNREACHABLE();
       break;
   }
 
@@ -2115,28 +1508,10 @@
                           OutputFrameStateCombine::Push());
         break;
       }
-      case NAMED_SUPER_PROPERTY: {
-        Node* home_object = environment()->Top();
-        Node* receiver = environment()->Peek(1);
-        Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-        VectorSlotPair pair =
-            CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-        PrepareFrameState(old_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
+        UNREACHABLE();
         break;
-      }
-      case KEYED_SUPER_PROPERTY: {
-        Node* key = environment()->Top();
-        Node* home_object = environment()->Peek(1);
-        Node* receiver = environment()->Peek(2);
-        VectorSlotPair pair =
-            CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-        PrepareFrameState(old_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        break;
-      }
     }
     environment()->Push(old_value);
     VisitForValue(expr->value());
@@ -2181,22 +1556,10 @@
                         OutputFrameStateCombine::Push());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
       break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      Node* key = environment()->Pop();
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
-      break;
-    }
   }
 
   ast_context()->ProduceValue(expr, value);
@@ -2205,8 +1568,7 @@
 
 void AstGraphBuilder::VisitYield(Yield* expr) {
   // Generator functions are supported only by going through Ignition first.
-  SetStackOverflow();
-  ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
+  UNREACHABLE();
 }
 
 
@@ -2243,27 +1605,10 @@
       PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
-      value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-      PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
       break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForValue(expr->key());
-      Node* key = environment()->Pop();
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-      PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
-      break;
-    }
   }
   ast_context()->ProduceValue(expr, value);
 }
@@ -2272,140 +1617,70 @@
 void AstGraphBuilder::VisitCall(Call* expr) {
   Expression* callee = expr->expression();
   Call::CallType call_type = expr->GetCallType();
+  CHECK(!expr->is_possibly_eval());
 
   // Prepare the callee and the receiver to the function call. This depends on
   // the semantics of the underlying call type.
   ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
   Node* receiver_value = nullptr;
   Node* callee_value = nullptr;
-  if (expr->is_possibly_eval()) {
-    if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
-      Variable* variable = callee->AsVariableProxy()->var();
-      Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
-      Node* pair = NewNode(op, name);
-      callee_value = NewNode(common()->Projection(0), pair);
-      receiver_value = NewNode(common()->Projection(1), pair);
-      PrepareFrameState(pair, expr->LookupId(),
-                        OutputFrameStateCombine::Push(2));
-    } else {
+  switch (call_type) {
+    case Call::GLOBAL_CALL: {
+      VariableProxy* proxy = callee->AsVariableProxy();
+      VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+      PrepareEagerCheckpoint(BeforeId(proxy));
+      callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+                                       pair, OutputFrameStateCombine::Push());
+      receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+      receiver_value = jsgraph()->UndefinedConstant();
+      break;
+    }
+    case Call::NAMED_PROPERTY_CALL: {
+      Property* property = callee->AsProperty();
+      VectorSlotPair feedback =
+          CreateVectorSlotPair(property->PropertyFeedbackSlot());
+      VisitForValue(property->obj());
+      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+      Node* object = environment()->Top();
+      callee_value = BuildNamedLoad(object, name, feedback);
+      PrepareFrameState(callee_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
+      // Note that a property call requires the receiver to be wrapped into
+      // an object for sloppy callees. However the receiver is guaranteed
+      // not to be null or undefined at this point.
+      receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+      receiver_value = environment()->Pop();
+      break;
+    }
+    case Call::KEYED_PROPERTY_CALL: {
+      Property* property = callee->AsProperty();
+      VectorSlotPair feedback =
+          CreateVectorSlotPair(property->PropertyFeedbackSlot());
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      Node* key = environment()->Pop();
+      Node* object = environment()->Top();
+      callee_value = BuildKeyedLoad(object, key, feedback);
+      PrepareFrameState(callee_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
+      // Note that a property call requires the receiver to be wrapped into
+      // an object for sloppy callees. However the receiver is guaranteed
+      // not to be null or undefined at this point.
+      receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+      receiver_value = environment()->Pop();
+      break;
+    }
+    case Call::OTHER_CALL:
       VisitForValue(callee);
       callee_value = environment()->Pop();
       receiver_hint = ConvertReceiverMode::kNullOrUndefined;
       receiver_value = jsgraph()->UndefinedConstant();
-    }
-  } else {
-    switch (call_type) {
-      case Call::GLOBAL_CALL: {
-        VariableProxy* proxy = callee->AsVariableProxy();
-        VectorSlotPair pair =
-            CreateVectorSlotPair(proxy->VariableFeedbackSlot());
-        PrepareEagerCheckpoint(BeforeId(proxy));
-        callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
-                                         pair, OutputFrameStateCombine::Push());
-        receiver_hint = ConvertReceiverMode::kNullOrUndefined;
-        receiver_value = jsgraph()->UndefinedConstant();
-        break;
-      }
-      case Call::WITH_CALL: {
-        Variable* variable = callee->AsVariableProxy()->var();
-        Node* name = jsgraph()->Constant(variable->name());
-        const Operator* op =
-            javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
-        Node* pair = NewNode(op, name);
-        callee_value = NewNode(common()->Projection(0), pair);
-        receiver_value = NewNode(common()->Projection(1), pair);
-        PrepareFrameState(pair, expr->LookupId(),
-                          OutputFrameStateCombine::Push(2));
-        break;
-      }
-      case Call::NAMED_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        VectorSlotPair feedback =
-            CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        VisitForValue(property->obj());
-        Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-        Node* object = environment()->Top();
-        callee_value = BuildNamedLoad(object, name, feedback);
-        PrepareFrameState(callee_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        // Note that a property call requires the receiver to be wrapped into
-        // an object for sloppy callees. However the receiver is guaranteed
-        // not to be null or undefined at this point.
-        receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
-        receiver_value = environment()->Pop();
-        break;
-      }
-      case Call::KEYED_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        VectorSlotPair feedback =
-            CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        VisitForValue(property->obj());
-        VisitForValue(property->key());
-        Node* key = environment()->Pop();
-        Node* object = environment()->Top();
-        callee_value = BuildKeyedLoad(object, key, feedback);
-        PrepareFrameState(callee_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        // Note that a property call requires the receiver to be wrapped into
-        // an object for sloppy callees. However the receiver is guaranteed
-        // not to be null or undefined at this point.
-        receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
-        receiver_value = environment()->Pop();
-        break;
-      }
-      case Call::NAMED_SUPER_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        SuperPropertyReference* super_ref =
-            property->obj()->AsSuperPropertyReference();
-        VisitForValue(super_ref->home_object());
-        VisitForValue(super_ref->this_var());
-        Node* home = environment()->Peek(1);
-        Node* object = environment()->Top();
-        Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-        callee_value =
-            BuildNamedSuperLoad(object, home, name, VectorSlotPair());
-        PrepareFrameState(callee_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        // Note that a property call requires the receiver to be wrapped into
-        // an object for sloppy callees. Since the receiver is not the target of
-        // the load, it could very well be null or undefined at this point.
-        receiver_value = environment()->Pop();
-        environment()->Drop(1);
-        break;
-      }
-      case Call::KEYED_SUPER_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        SuperPropertyReference* super_ref =
-            property->obj()->AsSuperPropertyReference();
-        VisitForValue(super_ref->home_object());
-        VisitForValue(super_ref->this_var());
-        environment()->Push(environment()->Top());    // Duplicate this_var.
-        environment()->Push(environment()->Peek(2));  // Duplicate home_obj.
-        VisitForValue(property->key());
-        Node* key = environment()->Pop();
-        Node* home = environment()->Pop();
-        Node* object = environment()->Pop();
-        callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
-        PrepareFrameState(callee_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        // Note that a property call requires the receiver to be wrapped into
-        // an object for sloppy callees. Since the receiver is not the target of
-        // the load, it could very well be null or undefined at this point.
-        receiver_value = environment()->Pop();
-        environment()->Drop(1);
-        break;
-      }
-      case Call::SUPER_CALL:
-        return VisitCallSuper(expr);
-      case Call::OTHER_CALL:
-        VisitForValue(callee);
-        callee_value = environment()->Pop();
-        receiver_hint = ConvertReceiverMode::kNullOrUndefined;
-        receiver_value = jsgraph()->UndefinedConstant();
-        break;
-    }
+      break;
+    case Call::NAMED_SUPER_PROPERTY_CALL:
+    case Call::KEYED_SUPER_PROPERTY_CALL:
+    case Call::SUPER_CALL:
+    case Call::WITH_CALL:
+      UNREACHABLE();
   }
 
   // The callee and the receiver both have to be pushed onto the operand stack
@@ -2417,41 +1692,13 @@
   ZoneList<Expression*>* args = expr->arguments();
   VisitForValues(args);
 
-  // Resolve callee for a potential direct eval call. This block will mutate the
-  // callee value pushed onto the environment.
-  if (expr->is_possibly_eval() && args->length() > 0) {
-    int arg_count = args->length();
-
-    // Extract callee and source string from the environment.
-    Node* callee = environment()->Peek(arg_count + 1);
-    Node* source = environment()->Peek(arg_count - 1);
-
-    // Create node to ask for help resolving potential eval call. This will
-    // provide a fully resolved callee to patch into the environment.
-    Node* function = GetFunctionClosure();
-    Node* language = jsgraph()->Constant(language_mode());
-    Node* eval_scope_position =
-        jsgraph()->Constant(current_scope()->start_position());
-    Node* eval_position = jsgraph()->Constant(expr->position());
-    const Operator* op =
-        javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
-    Node* new_callee = NewNode(op, callee, source, function, language,
-                               eval_scope_position, eval_position);
-    PrepareFrameState(new_callee, expr->EvalId(),
-                      OutputFrameStateCombine::PokeAt(arg_count + 1));
-
-    // Patch callee on the environment.
-    environment()->Poke(arg_count + 1, new_callee);
-  }
-
   // Create node to perform the function call.
   float const frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
   const Operator* call =
-      javascript()->CallFunction(args->length() + 2, frequency, feedback,
-                                 receiver_hint, expr->tail_call_mode());
-  PrepareEagerCheckpoint(expr->is_possibly_eval() ? expr->EvalId()
-                                                  : expr->CallId());
+      javascript()->Call(args->length() + 2, frequency, feedback, receiver_hint,
+                         expr->tail_call_mode());
+  PrepareEagerCheckpoint(expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   // The callee passed to the call, we just need to push something here to
   // satisfy the bailout location contract. The fullcodegen code will not
@@ -2463,34 +1710,6 @@
 }
 
 
-void AstGraphBuilder::VisitCallSuper(Call* expr) {
-  SuperCallReference* super = expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super);
-
-  // Prepare the callee to the super call.
-  VisitForValue(super->this_function_var());
-  Node* this_function = environment()->Pop();
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kInlineGetSuperConstructor, 1);
-  Node* super_function = NewNode(op, this_function);
-  environment()->Push(super_function);
-
-  // Evaluate all arguments to the super call.
-  ZoneList<Expression*>* args = expr->arguments();
-  VisitForValues(args);
-
-  // The new target is loaded from the {new.target} variable.
-  VisitForValue(super->new_target_var());
-
-  // Create node to perform the super call.
-  const Operator* call =
-      javascript()->CallConstruct(args->length() + 2, 0.0f, VectorSlotPair());
-  Node* value = ProcessArguments(call, args->length() + 2);
-  PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
-  ast_context()->ProduceValue(expr, value);
-}
-
-
 void AstGraphBuilder::VisitCallNew(CallNew* expr) {
   VisitForValue(expr->expression());
 
@@ -2505,7 +1724,7 @@
   float const frequency = ComputeCallFrequency(expr->CallNewFeedbackSlot());
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
   const Operator* call =
-      javascript()->CallConstruct(args->length() + 2, frequency, feedback);
+      javascript()->Construct(args->length() + 2, frequency, feedback);
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
   ast_context()->ProduceValue(expr, value);
@@ -2526,7 +1745,7 @@
   VisitForValues(args);
 
   // Create node to perform the JS runtime call.
-  const Operator* call = javascript()->CallFunction(args->length() + 2);
+  const Operator* call = javascript()->Call(args->length() + 2);
   PrepareEagerCheckpoint(expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
@@ -2625,35 +1844,10 @@
       stack_depth = 2;
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      Node* home_object = environment()->Top();
-      Node* receiver = environment()->Peek(1);
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      VectorSlotPair pair =
-          CreateVectorSlotPair(property->PropertyFeedbackSlot());
-      old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-      PrepareFrameState(old_value, property->LoadId(),
-                        OutputFrameStateCombine::Push());
-      stack_depth = 2;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
       break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForValue(property->key());
-      Node* key = environment()->Top();
-      Node* home_object = environment()->Peek(1);
-      Node* receiver = environment()->Peek(2);
-      VectorSlotPair pair =
-          CreateVectorSlotPair(property->PropertyFeedbackSlot());
-      old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-      PrepareFrameState(old_value, property->LoadId(),
-                        OutputFrameStateCombine::Push());
-      stack_depth = 3;
-      break;
-    }
   }
 
   // Convert old value into a number.
@@ -2708,24 +1902,10 @@
                         OutputFrameStateCombine::Push());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      PrepareFrameState(store, expr->AssignmentId(),
-                        OutputFrameStateCombine::Push());
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
       break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      Node* key = environment()->Pop();
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      PrepareFrameState(store, expr->AssignmentId(),
-                        OutputFrameStateCombine::Push());
-      break;
-    }
   }
 
   // Restore old value for postfix expressions.
@@ -2804,13 +1984,7 @@
     return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
   }
 
-  CompareOperationHint hint;
-  if (!type_hint_analysis_ ||
-      !type_hint_analysis_->GetCompareOperationHint(
-          expr->CompareOperationFeedbackId(), &hint)) {
-    hint = CompareOperationHint::kAny;
-  }
-
+  CompareOperationHint hint = CompareOperationHint::kAny;
   const Operator* op;
   switch (expr->op()) {
     case Token::EQ:
@@ -2868,6 +2042,10 @@
   UNREACHABLE();
 }
 
+void AstGraphBuilder::VisitGetIterator(GetIterator* expr) {
+  // GetIterator is supported only by going through Ignition first.
+  UNREACHABLE();
+}
 
 void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
   Node* value = GetFunctionClosure();
@@ -2877,8 +2055,7 @@
 
 void AstGraphBuilder::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
-  Node* value = BuildThrowUnsupportedSuperError(expr->id());
-  ast_context()->ProduceValue(expr, value);
+  UNREACHABLE();
 }
 
 
@@ -2898,17 +2075,16 @@
   AstVisitor<AstGraphBuilder>::VisitDeclarations(declarations);
   if (globals()->empty()) return;
   int array_index = 0;
-  Handle<TypeFeedbackVector> feedback_vector(
-      info()->closure()->feedback_vector());
+  Handle<FeedbackVector> feedback_vector(info()->closure()->feedback_vector());
   Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
       static_cast<int>(globals()->size()), TENURED);
   for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
   int encoded_flags = info()->GetDeclareGlobalsFlags();
   Node* flags = jsgraph()->Constant(encoded_flags);
-  Node* pairs = jsgraph()->Constant(data);
+  Node* decls = jsgraph()->Constant(data);
   Node* vector = jsgraph()->Constant(feedback_vector);
   const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
-  Node* call = NewNode(op, pairs, flags, vector);
+  Node* call = NewNode(op, decls, flags, vector);
   PrepareFrameState(call, BailoutId::Declarations());
   globals()->clear();
 }
@@ -2920,20 +2096,12 @@
 }
 
 
-void AstGraphBuilder::VisitInScope(Statement* stmt, Scope* s, Node* context) {
-  ContextScope scope(this, s, context);
-  DCHECK(s->declarations()->is_empty());
-  Visit(stmt);
-}
-
 void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
                                          LoopBuilder* loop,
                                          BailoutId stack_check_id) {
   ControlScopeForIteration scope(this, stmt, loop);
-  if (FLAG_turbo_loop_stackcheck || !info()->shared_info()->asm_function()) {
-    Node* node = NewNode(javascript()->StackCheck());
-    PrepareFrameState(node, stack_check_id);
-  }
+  Node* node = NewNode(javascript()->StackCheck());
+  PrepareFrameState(node, stack_check_id);
   Visit(stmt->body());
 }
 
@@ -3063,9 +2231,7 @@
   return current_scope()->language_mode();
 }
 
-
-VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
-    FeedbackVectorSlot slot) const {
+VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(FeedbackSlot slot) const {
   return VectorSlotPair(handle(info()->closure()->feedback_vector()), slot);
 }
 
@@ -3074,50 +2240,10 @@
   Visit(node->expression());
 }
 
-
-namespace {
-
-// Limit of context chain length to which inline check is possible.
-const int kMaxCheckDepth = 30;
-
-// Sentinel for {TryLoadDynamicVariable} disabling inline checks.
-const uint32_t kFullCheckRequired = -1;
-
-}  // namespace
-
-
-uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
-  DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
-  uint32_t check_depths = 0;
-  for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (!s->calls_sloppy_eval()) continue;
-    int depth = current_scope()->ContextChainLength(s);
-    if (depth > kMaxCheckDepth) return kFullCheckRequired;
-    check_depths |= 1 << depth;
-  }
-  return check_depths;
-}
-
-
-uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
-  DCHECK_EQ(DYNAMIC_LOCAL, variable->mode());
-  uint32_t check_depths = 0;
-  for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
-    int depth = current_scope()->ContextChainLength(s);
-    if (depth > kMaxCheckDepth) return kFullCheckRequired;
-    check_depths |= 1 << depth;
-    if (s == variable->scope()) break;
-  }
-  return check_depths;
-}
-
-float AstGraphBuilder::ComputeCallFrequency(FeedbackVectorSlot slot) const {
+float AstGraphBuilder::ComputeCallFrequency(FeedbackSlot slot) const {
   if (slot.IsInvalid()) return 0.0f;
-  Handle<TypeFeedbackVector> feedback_vector(
-      info()->closure()->feedback_vector(), isolate());
+  Handle<FeedbackVector> feedback_vector(info()->closure()->feedback_vector(),
+                                         isolate());
   CallICNexus nexus(feedback_vector, slot);
   return nexus.ComputeCallFrequency() * invocation_frequency_;
 }
@@ -3147,7 +2273,8 @@
     Variable* variable = scope->receiver();
     DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
     const Operator* op = javascript()->StoreContext(0, variable->index());
-    NewNode(op, local_context, receiver);
+    Node* node = NewNode(op, receiver);
+    NodeProperties::ReplaceContextInput(node, local_context);
   }
 
   // Copy parameters into context if necessary.
@@ -3159,7 +2286,8 @@
     // Context variable (at bottom of the context chain).
     DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
     const Operator* op = javascript()->StoreContext(0, variable->index());
-    NewNode(op, local_context, parameter);
+    Node* node = NewNode(op, parameter);
+    NodeProperties::ReplaceContextInput(node, local_context);
   }
 
   return local_context;
@@ -3171,7 +2299,8 @@
 
   // Allocate a new local context.
   int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  const Operator* op = javascript()->CreateFunctionContext(slot_count);
+  const Operator* op =
+      javascript()->CreateFunctionContext(slot_count, scope->scope_type());
   Node* local_context = NewNode(op, GetFunctionClosure());
 
   return local_context;
@@ -3224,52 +2353,6 @@
   return object;
 }
 
-Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest) {
-  if (rest == nullptr) return nullptr;
-
-  // Allocate and initialize a new arguments object.
-  CreateArgumentsType type = CreateArgumentsType::kRestParameter;
-  const Operator* op = javascript()->CreateArguments(type);
-  Node* object = NewNode(op, GetFunctionClosure());
-  PrepareFrameState(object, BailoutId::None());
-
-  // Assign the object to the {rest} variable. This should never lazy
-  // deopt, so it is fine to send invalid bailout id.
-  DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
-  BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
-                          BailoutId::None());
-  return object;
-}
-
-
-Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
-  if (this_function_var == nullptr) return nullptr;
-
-  // Retrieve the closure we were called with.
-  Node* this_function = GetFunctionClosure();
-
-  // Assign the object to the {.this_function} variable. This should never lazy
-  // deopt, so it is fine to send invalid bailout id.
-  BuildVariableAssignment(this_function_var, this_function, Token::INIT,
-                          VectorSlotPair(), BailoutId::None());
-  return this_function;
-}
-
-
-Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
-  if (new_target_var == nullptr) return nullptr;
-
-  // Retrieve the new target we were called with.
-  Node* object = GetNewTarget();
-
-  // Assign the object to the {new.target} variable. This should never lazy
-  // deopt, so it is fine to send invalid bailout id.
-  BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
-                          BailoutId::None());
-  return object;
-}
-
-
 Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
                                                Node* not_hole,
                                                BailoutId bailout_id) {
@@ -3305,25 +2388,6 @@
   return environment()->Pop();
 }
 
-
-Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
-                                                   BailoutId bailout_id) {
-  IfBuilder prototype_check(this);
-  Node* prototype_string =
-      jsgraph()->Constant(isolate()->factory()->prototype_string());
-  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
-                        name, prototype_string);
-  prototype_check.If(check);
-  prototype_check.Then();
-  Node* error = BuildThrowStaticPrototypeError(bailout_id);
-  environment()->Push(error);
-  prototype_check.Else();
-  environment()->Push(name);
-  prototype_check.End();
-  return environment()->Pop();
-}
-
-
 Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
                                          BailoutId bailout_id,
                                          const VectorSlotPair& feedback,
@@ -3363,7 +2427,7 @@
                        info()->is_function_context_specializing();
       const Operator* op =
           javascript()->LoadContext(depth, variable->index(), immutable);
-      Node* value = NewNode(op, current_context());
+      Node* value = NewNode(op);
       // TODO(titzer): initialization checks are redundant for already
       // initialized immutable context loads, but only specialization knows.
       // Maybe specializer should be a parameter to the graph builder?
@@ -3373,17 +2437,7 @@
       }
       return value;
     }
-    case VariableLocation::LOOKUP: {
-      // Dynamic lookup of context variable (anywhere in the chain).
-      Handle<String> name = variable->name();
-      if (Node* node = TryLoadDynamicVariable(variable, name, bailout_id,
-                                              feedback, combine, typeof_mode)) {
-        return node;
-      }
-      Node* value = BuildDynamicLoad(name, typeof_mode);
-      PrepareFrameState(value, bailout_id, combine);
-      return value;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -3411,15 +2465,7 @@
       // Local var, const, or let variable or context variable.
       return jsgraph()->BooleanConstant(variable->is_this());
     }
-    case VariableLocation::LOOKUP: {
-      // Dynamic lookup of context variable (anywhere in the chain).
-      Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kDeleteLookupSlot);
-      Node* result = NewNode(op, name);
-      PrepareFrameState(result, bailout_id, combine);
-      return result;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -3498,7 +2544,7 @@
         // Perform an initialization check for let declared variables.
         const Operator* op =
             javascript()->LoadContext(depth, variable->index(), false);
-        Node* current = NewNode(op, current_context());
+        Node* current = NewNode(op);
         value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
       } else if (mode == CONST && op == Token::INIT) {
         // Perform an initialization check for const {this} variables.
@@ -3507,7 +2553,7 @@
         if (variable->is_this()) {
           const Operator* op =
               javascript()->LoadContext(depth, variable->index(), false);
-          Node* current = NewNode(op, current_context());
+          Node* current = NewNode(op);
           value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
         }
       } else if (mode == CONST && op != Token::INIT &&
@@ -3524,22 +2570,16 @@
         if (variable->binding_needs_init()) {
           const Operator* op =
               javascript()->LoadContext(depth, variable->index(), false);
-          Node* current = NewNode(op, current_context());
+          Node* current = NewNode(op);
           BuildHoleCheckThenThrow(current, variable, value, bailout_id);
         }
         // Assignment to const is exception in all modes.
         return BuildThrowConstAssignError(bailout_id);
       }
       const Operator* op = javascript()->StoreContext(depth, variable->index());
-      return NewNode(op, current_context(), value);
+      return NewNode(op, value);
     }
-    case VariableLocation::LOOKUP: {
-      // Dynamic lookup of context variable (anywhere in the chain).
-      Handle<Name> name = variable->name();
-      Node* store = BuildDynamicStore(name, value);
-      PrepareFrameState(store, bailout_id, combine);
-      return store;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -3551,7 +2591,7 @@
 Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
                                       const VectorSlotPair& feedback) {
   const Operator* op = javascript()->LoadProperty(feedback);
-  Node* node = NewNode(op, object, key, GetFunctionClosure());
+  Node* node = NewNode(op, object, key);
   return node;
 }
 
@@ -3559,15 +2599,17 @@
 Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
                                       const VectorSlotPair& feedback) {
   const Operator* op = javascript()->LoadNamed(name, feedback);
-  Node* node = NewNode(op, object, GetFunctionClosure());
+  Node* node = NewNode(op, object);
   return node;
 }
 
 
 Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
                                        const VectorSlotPair& feedback) {
+  DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
+            language_mode());
   const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, value, GetFunctionClosure());
+  Node* node = NewNode(op, object, key, value);
   return node;
 }
 
@@ -3575,60 +2617,30 @@
 Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
                                        Node* value,
                                        const VectorSlotPair& feedback) {
+  DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
+            language_mode());
   const Operator* op =
       javascript()->StoreNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, value, GetFunctionClosure());
+  Node* node = NewNode(op, object, value);
   return node;
 }
 
-
-Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
-                                           Handle<Name> name,
-                                           const VectorSlotPair& feedback) {
-  Node* name_node = jsgraph()->Constant(name);
-  const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper);
-  Node* node = NewNode(op, receiver, home_object, name_node);
+Node* AstGraphBuilder::BuildNamedStoreOwn(Node* object, Handle<Name> name,
+                                          Node* value,
+                                          const VectorSlotPair& feedback) {
+  DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+            feedback.vector()->GetKind(feedback.slot()));
+  const Operator* op = javascript()->StoreNamedOwn(name, feedback);
+  Node* node = NewNode(op, object, value);
   return node;
 }
 
-
-Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
-                                           Node* key,
-                                           const VectorSlotPair& feedback) {
-  const Operator* op = javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper);
-  Node* node = NewNode(op, receiver, home_object, key);
-  return node;
-}
-
-
-Node* AstGraphBuilder::BuildKeyedSuperStore(Node* receiver, Node* home_object,
-                                            Node* key, Node* value) {
-  Runtime::FunctionId function_id = is_strict(language_mode())
-                                        ? Runtime::kStoreKeyedToSuper_Strict
-                                        : Runtime::kStoreKeyedToSuper_Sloppy;
-  const Operator* op = javascript()->CallRuntime(function_id, 4);
-  Node* node = NewNode(op, receiver, home_object, key, value);
-  return node;
-}
-
-
-Node* AstGraphBuilder::BuildNamedSuperStore(Node* receiver, Node* home_object,
-                                            Handle<Name> name, Node* value) {
-  Node* name_node = jsgraph()->Constant(name);
-  Runtime::FunctionId function_id = is_strict(language_mode())
-                                        ? Runtime::kStoreToSuper_Strict
-                                        : Runtime::kStoreToSuper_Sloppy;
-  const Operator* op = javascript()->CallRuntime(function_id, 4);
-  Node* node = NewNode(op, receiver, home_object, name_node, value);
-  return node;
-}
-
-
 Node* AstGraphBuilder::BuildGlobalLoad(Handle<Name> name,
                                        const VectorSlotPair& feedback,
                                        TypeofMode typeof_mode) {
+  DCHECK_EQ(feedback.vector()->GetTypeofMode(feedback.slot()), typeof_mode);
   const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
-  Node* node = NewNode(op, GetFunctionClosure());
+  Node* node = NewNode(op);
   return node;
 }
 
@@ -3637,33 +2649,10 @@
                                         const VectorSlotPair& feedback) {
   const Operator* op =
       javascript()->StoreGlobal(language_mode(), name, feedback);
-  Node* node = NewNode(op, value, GetFunctionClosure());
+  Node* node = NewNode(op, value);
   return node;
 }
 
-
-Node* AstGraphBuilder::BuildDynamicLoad(Handle<Name> name,
-                                        TypeofMode typeof_mode) {
-  Node* name_node = jsgraph()->Constant(name);
-  const Operator* op =
-      javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
-                                    ? Runtime::kLoadLookupSlot
-                                    : Runtime::kLoadLookupSlotInsideTypeof);
-  Node* node = NewNode(op, name_node);
-  return node;
-}
-
-
-Node* AstGraphBuilder::BuildDynamicStore(Handle<Name> name, Node* value) {
-  Node* name_node = jsgraph()->Constant(name);
-  const Operator* op = javascript()->CallRuntime(
-      is_strict(language_mode()) ? Runtime::kStoreLookupSlot_Strict
-                                 : Runtime::kStoreLookupSlot_Sloppy);
-  Node* node = NewNode(op, name_node, value);
-  return node;
-}
-
-
 Node* AstGraphBuilder::BuildLoadGlobalObject() {
   return BuildLoadNativeContextField(Context::EXTENSION_INDEX);
 }
@@ -3672,30 +2661,20 @@
 Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
   const Operator* op =
       javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
-  Node* native_context = NewNode(op, current_context());
-  return NewNode(javascript()->LoadContext(0, index, true), native_context);
+  Node* native_context = NewNode(op);
+  Node* result = NewNode(javascript()->LoadContext(0, index, true));
+  NodeProperties::ReplaceContextInput(result, native_context);
+  return result;
 }
 
 
 Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
   if (Node* node = TryFastToBoolean(input)) return node;
-  ToBooleanHints hints;
-  if (!type_hint_analysis_ ||
-      !type_hint_analysis_->GetToBooleanHints(feedback_id, &hints)) {
-    hints = ToBooleanHint::kAny;
-  }
+  ToBooleanHints hints = ToBooleanHint::kAny;
   return NewNode(javascript()->ToBoolean(hints), input);
 }
 
 
-Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
-  if (Node* node = TryFastToName(input)) return node;
-  Node* name = NewNode(javascript()->ToName(), input);
-  PrepareFrameState(name, bailout_id, OutputFrameStateCombine::Push());
-  return name;
-}
-
-
 Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
   Node* object = NewNode(javascript()->ToObject(), input);
   PrepareFrameState(object, bailout_id, OutputFrameStateCombine::Push());
@@ -3750,28 +2729,6 @@
 }
 
 
-Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError);
-  Node* call = NewNode(op);
-  PrepareFrameState(call, bailout_id);
-  Node* control = NewNode(common()->Throw(), call);
-  UpdateControlDependencyToLeaveFunction(control);
-  return call;
-}
-
-
-Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
-  Node* call = NewNode(op);
-  PrepareFrameState(call, bailout_id);
-  Node* control = NewNode(common()->Throw(), call);
-  UpdateControlDependencyToLeaveFunction(control);
-  return call;
-}
-
-
 Node* AstGraphBuilder::BuildReturn(Node* return_value) {
   // Emit tracing call if requested to do so.
   if (FLAG_trace) {
@@ -3796,44 +2753,40 @@
 Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
                                      TypeFeedbackId feedback_id) {
   const Operator* js_op;
-  BinaryOperationHint hint;
-  if (!type_hint_analysis_ ||
-      !type_hint_analysis_->GetBinaryOperationHint(feedback_id, &hint)) {
-    hint = BinaryOperationHint::kAny;
-  }
+  BinaryOperationHint hint = BinaryOperationHint::kAny;
   switch (op) {
     case Token::BIT_OR:
-      js_op = javascript()->BitwiseOr(hint);
+      js_op = javascript()->BitwiseOr();
       break;
     case Token::BIT_AND:
-      js_op = javascript()->BitwiseAnd(hint);
+      js_op = javascript()->BitwiseAnd();
       break;
     case Token::BIT_XOR:
-      js_op = javascript()->BitwiseXor(hint);
+      js_op = javascript()->BitwiseXor();
       break;
     case Token::SHL:
-      js_op = javascript()->ShiftLeft(hint);
+      js_op = javascript()->ShiftLeft();
       break;
     case Token::SAR:
-      js_op = javascript()->ShiftRight(hint);
+      js_op = javascript()->ShiftRight();
       break;
     case Token::SHR:
-      js_op = javascript()->ShiftRightLogical(hint);
+      js_op = javascript()->ShiftRightLogical();
       break;
     case Token::ADD:
       js_op = javascript()->Add(hint);
       break;
     case Token::SUB:
-      js_op = javascript()->Subtract(hint);
+      js_op = javascript()->Subtract();
       break;
     case Token::MUL:
-      js_op = javascript()->Multiply(hint);
+      js_op = javascript()->Multiply();
       break;
     case Token::DIV:
-      js_op = javascript()->Divide(hint);
+      js_op = javascript()->Divide();
       break;
     case Token::MOD:
-      js_op = javascript()->Modulus(hint);
+      js_op = javascript()->Modulus();
       break;
     default:
       UNREACHABLE();
@@ -3850,109 +2803,6 @@
   return nullptr;
 }
 
-Node* AstGraphBuilder::TryLoadDynamicVariable(Variable* variable,
-                                              Handle<String> name,
-                                              BailoutId bailout_id,
-                                              const VectorSlotPair& feedback,
-                                              OutputFrameStateCombine combine,
-                                              TypeofMode typeof_mode) {
-  VariableMode mode = variable->mode();
-
-  if (mode == DYNAMIC_GLOBAL) {
-    uint32_t bitset = ComputeBitsetForDynamicGlobal(variable);
-    if (bitset == kFullCheckRequired) return nullptr;
-
-    // We are using two blocks to model fast and slow cases.
-    BlockBuilder fast_block(this);
-    BlockBuilder slow_block(this);
-    environment()->Push(jsgraph()->TheHoleConstant());
-    slow_block.BeginBlock();
-    environment()->Pop();
-    fast_block.BeginBlock();
-
-    // Perform checks whether the fast mode applies, by looking for any
-    // extension object which might shadow the optimistic declaration.
-    for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
-      if ((bitset & 1) == 0) continue;
-      Node* load = NewNode(
-          javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
-          current_context());
-      Node* check =
-          NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
-                  jsgraph()->TheHoleConstant());
-      fast_block.BreakUnless(check, BranchHint::kTrue);
-    }
-
-    // Fast case, because variable is not shadowed.
-    if (Node* constant = TryLoadGlobalConstant(name)) {
-      environment()->Push(constant);
-    } else {
-      // Perform global slot load.
-      Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
-      PrepareFrameState(fast, bailout_id, combine);
-      environment()->Push(fast);
-    }
-    slow_block.Break();
-    environment()->Pop();
-    fast_block.EndBlock();
-
-    // Slow case, because variable potentially shadowed. Perform dynamic lookup.
-    Node* slow = BuildDynamicLoad(name, typeof_mode);
-    PrepareFrameState(slow, bailout_id, combine);
-    environment()->Push(slow);
-    slow_block.EndBlock();
-
-    return environment()->Pop();
-  }
-
-  if (mode == DYNAMIC_LOCAL) {
-    uint32_t bitset = ComputeBitsetForDynamicContext(variable);
-    if (bitset == kFullCheckRequired) return nullptr;
-
-    // We are using two blocks to model fast and slow cases.
-    BlockBuilder fast_block(this);
-    BlockBuilder slow_block(this);
-    environment()->Push(jsgraph()->TheHoleConstant());
-    slow_block.BeginBlock();
-    environment()->Pop();
-    fast_block.BeginBlock();
-
-    // Perform checks whether the fast mode applies, by looking for any
-    // extension object which might shadow the optimistic declaration.
-    for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
-      if ((bitset & 1) == 0) continue;
-      Node* load = NewNode(
-          javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
-          current_context());
-      Node* check =
-          NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
-                  jsgraph()->TheHoleConstant());
-      fast_block.BreakUnless(check, BranchHint::kTrue);
-    }
-
-    // Fast case, because variable is not shadowed. Perform context slot load.
-    Variable* local = variable->local_if_not_shadowed();
-    DCHECK(local->location() == VariableLocation::CONTEXT);  // Must be context.
-    Node* fast =
-        BuildVariableLoad(local, bailout_id, feedback, combine, typeof_mode);
-    environment()->Push(fast);
-    slow_block.Break();
-    environment()->Pop();
-    fast_block.EndBlock();
-
-    // Slow case, because variable potentially shadowed. Perform dynamic lookup.
-    Node* slow = BuildDynamicLoad(name, typeof_mode);
-    PrepareFrameState(slow, bailout_id, combine);
-    environment()->Push(slow);
-    slow_block.EndBlock();
-
-    return environment()->Pop();
-  }
-
-  return nullptr;
-}
-
-
 Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
   switch (input->opcode()) {
     case IrOpcode::kNumberConstant: {
@@ -3983,24 +2833,6 @@
 }
 
 
-Node* AstGraphBuilder::TryFastToName(Node* input) {
-  switch (input->opcode()) {
-    case IrOpcode::kHeapConstant: {
-      Handle<HeapObject> object = HeapObjectMatcher(input).Value();
-      if (object->IsName()) return input;
-      break;
-    }
-    case IrOpcode::kJSToString:
-    case IrOpcode::kJSToName:
-    case IrOpcode::kJSTypeOf:
-      return input;
-    default:
-      break;
-  }
-  return nullptr;
-}
-
-
 bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
   if (info()->osr_ast_id() == stmt->OsrEntryId()) {
     DCHECK_EQ(-1, info()->osr_expr_stack_height());
@@ -4073,7 +2905,6 @@
   if (!has_context && !has_frame_state && !has_control && !has_effect) {
     result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
   } else {
-    bool inside_try_scope = try_nesting_level_ > 0;
     int input_count_with_deps = value_input_count;
     if (has_context) ++input_count_with_deps;
     if (has_frame_state) ++input_count_with_deps;
@@ -4107,18 +2938,6 @@
       if (result->op()->EffectOutputCount() > 0) {
         environment_->UpdateEffectDependency(result);
       }
-      // Add implicit exception continuation for throwing nodes.
-      if (!result->op()->HasProperty(Operator::kNoThrow) && inside_try_scope) {
-        // Copy the environment for the success continuation.
-        Environment* success_env = environment()->CopyForConditional();
-        const Operator* op = common()->IfException();
-        Node* effect = environment()->GetEffectDependency();
-        Node* on_exception = graph()->NewNode(op, effect, result);
-        environment_->UpdateControlDependency(on_exception);
-        environment_->UpdateEffectDependency(on_exception);
-        execution_control()->ThrowValue(on_exception);
-        set_environment(success_env);
-      }
       // Add implicit success continuation for throwing nodes.
       if (!result->op()->HasProperty(Operator::kNoThrow)) {
         const Operator* op = common()->IfSuccess();
@@ -4244,8 +3063,7 @@
   Node* osr_context = effect = contexts()->back();
   int last = static_cast<int>(contexts()->size() - 1);
   for (int i = last - 1; i >= 0; i--) {
-    osr_context = effect =
-        graph->NewNode(load_op, osr_context, osr_context, effect);
+    osr_context = effect = graph->NewNode(load_op, osr_context, effect);
     contexts()->at(i) = osr_context;
   }
   UpdateEffectDependency(effect);
@@ -4364,10 +3182,9 @@
 AstGraphBuilderWithPositions::AstGraphBuilderWithPositions(
     Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
     float invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
-    TypeHintAnalysis* type_hint_analysis, SourcePositionTable* source_positions,
-    int inlining_id)
+    SourcePositionTable* source_positions, int inlining_id)
     : AstGraphBuilder(local_zone, info, jsgraph, invocation_frequency,
-                      loop_assignment, type_hint_analysis),
+                      loop_assignment),
       source_positions_(source_positions),
       start_position_(info->shared_info()->start_position(), inlining_id) {}
 
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 2013f50..4fd3f35 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -26,7 +26,6 @@
 class LoopAssignmentAnalysis;
 class LoopBuilder;
 class Node;
-class TypeHintAnalysis;
 
 
 // The AstGraphBuilder produces a high-level IR graph, based on an
@@ -39,8 +38,7 @@
  public:
   AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
                   float invocation_frequency,
-                  LoopAssignmentAnalysis* loop_assignment = nullptr,
-                  TypeHintAnalysis* type_hint_analysis = nullptr);
+                  LoopAssignmentAnalysis* loop_assignment = nullptr);
   virtual ~AstGraphBuilder() {}
 
   // Creates a graph by visiting the entire AST.
@@ -73,8 +71,6 @@
   class ControlScope;
   class ControlScopeForBreakable;
   class ControlScopeForIteration;
-  class ControlScopeForCatch;
-  class ControlScopeForFinally;
   class Environment;
   friend class ControlBuilder;
 
@@ -98,10 +94,6 @@
   // Nodes representing values in the activation record.
   SetOncePointer<Node> function_closure_;
   SetOncePointer<Node> function_context_;
-  SetOncePointer<Node> new_target_;
-
-  // Tracks how many try-blocks are currently entered.
-  int try_nesting_level_;
 
   // Temporary storage for building node input lists.
   int input_buffer_size_;
@@ -119,9 +111,6 @@
   // Result of loop assignment analysis performed before graph creation.
   LoopAssignmentAnalysis* loop_assignment_analysis_;
 
-  // Result of type hint analysis performed before graph creation.
-  TypeHintAnalysis* type_hint_analysis_;
-
   // Cache for StateValues nodes for frame states.
   StateValuesCache state_values_cache_;
 
@@ -171,9 +160,6 @@
   // Get or create the node that represents the incoming function context.
   Node* GetFunctionContext();
 
-  // Get or create the node that represents the incoming new target value.
-  Node* GetNewTarget();
-
   // Get or create the node that represents the empty frame state.
   Node* GetEmptyFrameState();
 
@@ -260,15 +246,10 @@
   Node** EnsureInputBufferSize(int size);
 
   // Named and keyed loads require a VectorSlotPair for successful lowering.
-  VectorSlotPair CreateVectorSlotPair(FeedbackVectorSlot slot) const;
+  VectorSlotPair CreateVectorSlotPair(FeedbackSlot slot) const;
 
-  // Determine which contexts need to be checked for extension objects that
-  // might shadow the optimistic declaration of dynamic lookup variables.
-  uint32_t ComputeBitsetForDynamicGlobal(Variable* variable);
-  uint32_t ComputeBitsetForDynamicContext(Variable* variable);
-
-  // Computes the frequency for JSCallFunction and JSCallConstruct nodes.
-  float ComputeCallFrequency(FeedbackVectorSlot slot) const;
+  // Computes the frequency for JSCall and JSConstruct nodes.
+  float ComputeCallFrequency(FeedbackSlot slot) const;
 
   // ===========================================================================
   // The following build methods all generate graph fragments and return one
@@ -284,15 +265,6 @@
   // Builder to create an arguments object if it is used.
   Node* BuildArgumentsObject(Variable* arguments);
 
-  // Builder to create an array of rest parameters if used.
-  Node* BuildRestArgumentsArray(Variable* rest);
-
-  // Builder that assigns to the {.this_function} internal variable if needed.
-  Node* BuildThisFunctionVariable(Variable* this_function_var);
-
-  // Builder that assigns to the {new.target} internal variable if needed.
-  Node* BuildNewTargetVariable(Variable* new_target_var);
-
   // Builders for variable load and assignment.
   Node* BuildVariableAssignment(Variable* variable, Node* value,
                                 Token::Value op, const VectorSlotPair& slot,
@@ -315,16 +287,8 @@
                         const VectorSlotPair& feedback);
   Node* BuildNamedStore(Node* receiver, Handle<Name> name, Node* value,
                         const VectorSlotPair& feedback);
-
-  // Builders for super property loads and stores.
-  Node* BuildKeyedSuperStore(Node* receiver, Node* home_object, Node* key,
-                             Node* value);
-  Node* BuildNamedSuperStore(Node* receiver, Node* home_object,
-                             Handle<Name> name, Node* value);
-  Node* BuildNamedSuperLoad(Node* receiver, Node* home_object,
-                            Handle<Name> name, const VectorSlotPair& feedback);
-  Node* BuildKeyedSuperLoad(Node* receiver, Node* home_object, Node* key,
-                            const VectorSlotPair& feedback);
+  Node* BuildNamedStoreOwn(Node* receiver, Handle<Name> name, Node* value,
+                           const VectorSlotPair& feedback);
 
   // Builders for global variable loads and stores.
   Node* BuildGlobalLoad(Handle<Name> name, const VectorSlotPair& feedback,
@@ -332,17 +296,12 @@
   Node* BuildGlobalStore(Handle<Name> name, Node* value,
                          const VectorSlotPair& feedback);
 
-  // Builders for dynamic variable loads and stores.
-  Node* BuildDynamicLoad(Handle<Name> name, TypeofMode typeof_mode);
-  Node* BuildDynamicStore(Handle<Name> name, Node* value);
-
   // Builders for accessing the function context.
   Node* BuildLoadGlobalObject();
   Node* BuildLoadNativeContextField(int index);
 
   // Builders for automatic type conversion.
   Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
-  Node* BuildToName(Node* input, BailoutId bailout_id);
   Node* BuildToObject(Node* input, BailoutId bailout_id);
 
   // Builder for adding the [[HomeObject]] to a value if the value came from a
@@ -354,8 +313,6 @@
   Node* BuildThrowError(Node* exception, BailoutId bailout_id);
   Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
   Node* BuildThrowConstAssignError(BailoutId bailout_id);
-  Node* BuildThrowStaticPrototypeError(BailoutId bailout_id);
-  Node* BuildThrowUnsupportedSuperError(BailoutId bailout_id);
 
   // Builders for dynamic hole-checks at runtime.
   Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
@@ -363,9 +320,6 @@
   Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
                                 BailoutId bailout_id);
 
-  // Builders for conditional errors.
-  Node* BuildThrowIfStaticPrototype(Node* name, BailoutId bailout_id);
-
   // Builders for non-local control flow.
   Node* BuildReturn(Node* return_value);
   Node* BuildThrow(Node* exception_value);
@@ -387,17 +341,8 @@
   // Optimization for variable load from global object.
   Node* TryLoadGlobalConstant(Handle<Name> name);
 
-  // Optimization for variable load of dynamic lookup slot that is most likely
-  // to resolve to a global slot or context slot (inferred from scope chain).
-  Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
-                               BailoutId bailout_id,
-                               const VectorSlotPair& feedback,
-                               OutputFrameStateCombine combine,
-                               TypeofMode typeof_mode);
-
   // Optimizations for automatic type conversion.
   Node* TryFastToBoolean(Node* input);
-  Node* TryFastToName(Node* input);
 
   // ===========================================================================
   // The following visitation methods all recursively visit a subtree of the
@@ -408,7 +353,6 @@
 
   // Visit statements.
   void VisitIfNotNull(Statement* stmt);
-  void VisitInScope(Statement* stmt, Scope* scope, Node* context);
 
   // Visit expressions.
   void Visit(Expression* expr);
@@ -449,11 +393,6 @@
   void VisitLiteralCompareTypeof(CompareOperation* expr, Expression* sub_expr,
                                  Handle<String> check);
 
-  // Dispatched from VisitForInStatement.
-  void VisitForInAssignment(Expression* expr, Node* value,
-                            const VectorSlotPair& feedback,
-                            BailoutId bailout_id);
-
   // Dispatched from VisitObjectLiteral.
   void VisitObjectLiteralAccessor(Node* home_object,
                                   ObjectLiteralProperty* property);
@@ -622,7 +561,6 @@
   AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
                                JSGraph* jsgraph, float invocation_frequency,
                                LoopAssignmentAnalysis* loop_assignment,
-                               TypeHintAnalysis* type_hint_analysis,
                                SourcePositionTable* source_positions,
                                int inlining_id = SourcePosition::kNotInlined);
 
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
index 82eaeb2..8239e3a 100644
--- a/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/ast-loop-assignment-analyzer.h"
 #include "src/ast/scopes.h"
 #include "src/compilation-info.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -201,6 +202,7 @@
 
 void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
 
+void ALAA::VisitGetIterator(GetIterator* e) { UNREACHABLE(); }
 
 void ALAA::VisitCaseClause(CaseClause* cc) {
   if (!cc->is_default()) Visit(cc->label());
diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc
index 9b36eb1..2d9a084 100644
--- a/src/compiler/branch-elimination.cc
+++ b/src/compiler/branch-elimination.cc
@@ -18,7 +18,9 @@
       jsgraph_(js_graph),
       node_conditions_(zone, js_graph->graph()->NodeCount()),
       zone_(zone),
-      dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
+      dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {
+  NodeProperties::SetType(dead_, Type::None());
+}
 
 BranchElimination::~BranchElimination() {}
 
@@ -83,7 +85,7 @@
   DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
          node->opcode() == IrOpcode::kDeoptimizeUnless);
   bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
-  DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   Node* condition = NodeProperties::GetValueInput(node, 0);
   Node* frame_state = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -103,9 +105,8 @@
       // with the {control} node that already contains the right information.
       ReplaceWithValue(node, dead(), effect, control);
     } else {
-      control =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
-                           frame_state, effect, control);
+      control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
+                                 frame_state, effect, control);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), control);
       Revisit(graph()->end());
@@ -143,20 +144,27 @@
 Reduction BranchElimination::ReduceMerge(Node* node) {
   // Shortcut for the case when we do not know anything about some
   // input.
-  for (int i = 0; i < node->InputCount(); i++) {
-    if (node_conditions_.Get(node->InputAt(i)) == nullptr) {
+  Node::Inputs inputs = node->inputs();
+  for (Node* input : inputs) {
+    if (node_conditions_.Get(input) == nullptr) {
       return UpdateConditions(node, nullptr);
     }
   }
 
-  const ControlPathConditions* first = node_conditions_.Get(node->InputAt(0));
+  auto input_it = inputs.begin();
+
+  DCHECK_GT(inputs.count(), 0);
+
+  const ControlPathConditions* first = node_conditions_.Get(*input_it);
+  ++input_it;
   // Make a copy of the first input's conditions and merge with the conditions
   // from other inputs.
   ControlPathConditions* conditions =
       new (zone_->New(sizeof(ControlPathConditions)))
           ControlPathConditions(*first);
-  for (int i = 1; i < node->InputCount(); i++) {
-    conditions->Merge(*(node_conditions_.Get(node->InputAt(i))));
+  auto input_end = inputs.end();
+  for (; input_it != input_end; ++input_it) {
+    conditions->Merge(*(node_conditions_.Get(*input_it)));
   }
 
   return UpdateConditions(node, conditions);
diff --git a/src/compiler/bytecode-analysis.cc b/src/compiler/bytecode-analysis.cc
new file mode 100644
index 0000000..6d8afe1
--- /dev/null
+++ b/src/compiler/bytecode-analysis.cc
@@ -0,0 +1,621 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-analysis.h"
+
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-array-random-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using namespace interpreter;
+
+BytecodeLoopAssignments::BytecodeLoopAssignments(int parameter_count,
+                                                 int register_count, Zone* zone)
+    : parameter_count_(parameter_count),
+      bit_vector_(new (zone)
+                      BitVector(parameter_count + register_count, zone)) {}
+
+void BytecodeLoopAssignments::Add(interpreter::Register r) {
+  if (r.is_parameter()) {
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+  } else {
+    bit_vector_->Add(parameter_count_ + r.index());
+  }
+}
+
+void BytecodeLoopAssignments::AddPair(interpreter::Register r) {
+  if (r.is_parameter()) {
+    DCHECK(interpreter::Register(r.index() + 1).is_parameter());
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
+  } else {
+    DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+    bit_vector_->Add(parameter_count_ + r.index());
+    bit_vector_->Add(parameter_count_ + r.index() + 1);
+  }
+}
+
+void BytecodeLoopAssignments::AddTriple(interpreter::Register r) {
+  if (r.is_parameter()) {
+    DCHECK(interpreter::Register(r.index() + 1).is_parameter());
+    DCHECK(interpreter::Register(r.index() + 2).is_parameter());
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 2);
+  } else {
+    DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+    DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
+    bit_vector_->Add(parameter_count_ + r.index());
+    bit_vector_->Add(parameter_count_ + r.index() + 1);
+    bit_vector_->Add(parameter_count_ + r.index() + 2);
+  }
+}
+
+void BytecodeLoopAssignments::AddAll() { bit_vector_->AddAll(); }
+
+void BytecodeLoopAssignments::Union(const BytecodeLoopAssignments& other) {
+  bit_vector_->Union(*other.bit_vector_);
+}
+
+bool BytecodeLoopAssignments::ContainsParameter(int index) const {
+  DCHECK_GE(index, 0);
+  DCHECK_LT(index, parameter_count());
+  return bit_vector_->Contains(index);
+}
+
+bool BytecodeLoopAssignments::ContainsLocal(int index) const {
+  DCHECK_GE(index, 0);
+  DCHECK_LT(index, local_count());
+  return bit_vector_->Contains(parameter_count_ + index);
+}
+
+bool BytecodeLoopAssignments::ContainsAccumulator() const {
+  // TODO(leszeks): This assumes the accumulator is always assigned. This is
+  // probably correct, but that assignment is also probably dead, so we should
+  // check liveness.
+  return true;
+}
+
+BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
+                                   Zone* zone, bool do_liveness_analysis)
+    : bytecode_array_(bytecode_array),
+      do_liveness_analysis_(do_liveness_analysis),
+      zone_(zone),
+      loop_stack_(zone),
+      loop_end_index_queue_(zone),
+      end_to_header_(zone),
+      header_to_info_(zone),
+      liveness_map_(bytecode_array->length(), zone) {}
+
+namespace {
+
+void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
+                      const BytecodeArrayAccessor& accessor) {
+  int num_operands = Bytecodes::NumberOfOperands(bytecode);
+  const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+
+  if (Bytecodes::WritesAccumulator(bytecode)) {
+    in_liveness.MarkAccumulatorDead();
+  }
+  for (int i = 0; i < num_operands; ++i) {
+    switch (operand_types[i]) {
+      case OperandType::kRegOut: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          in_liveness.MarkRegisterDead(r.index());
+        }
+        break;
+      }
+      case OperandType::kRegOutPair: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+          in_liveness.MarkRegisterDead(r.index());
+          in_liveness.MarkRegisterDead(r.index() + 1);
+        }
+        break;
+      }
+      case OperandType::kRegOutTriple: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+          DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
+          in_liveness.MarkRegisterDead(r.index());
+          in_liveness.MarkRegisterDead(r.index() + 1);
+          in_liveness.MarkRegisterDead(r.index() + 2);
+        }
+        break;
+      }
+      default:
+        DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
+        break;
+    }
+  }
+
+  if (Bytecodes::ReadsAccumulator(bytecode)) {
+    in_liveness.MarkAccumulatorLive();
+  }
+  for (int i = 0; i < num_operands; ++i) {
+    switch (operand_types[i]) {
+      case OperandType::kReg: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          in_liveness.MarkRegisterLive(r.index());
+        }
+        break;
+      }
+      case OperandType::kRegPair: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+          in_liveness.MarkRegisterLive(r.index());
+          in_liveness.MarkRegisterLive(r.index() + 1);
+        }
+        break;
+      }
+      case OperandType::kRegList: {
+        interpreter::Register r = accessor.GetRegisterOperand(i++);
+        uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+        if (!r.is_parameter()) {
+          for (uint32_t j = 0; j < reg_count; ++j) {
+            DCHECK(!interpreter::Register(r.index() + j).is_parameter());
+            in_liveness.MarkRegisterLive(r.index() + j);
+          }
+        }
+      }
+      default:
+        DCHECK(!Bytecodes::IsRegisterInputOperandType(operand_types[i]));
+        break;
+    }
+  }
+}
+
+void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
+                       BytecodeLivenessState* next_bytecode_in_liveness,
+                       const BytecodeArrayAccessor& accessor,
+                       const BytecodeLivenessMap& liveness_map) {
+  int current_offset = accessor.current_offset();
+  const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
+
+  // Update from jump target (if any). Skip loops, we update these manually in
+  // the liveness iterations.
+  if (Bytecodes::IsForwardJump(bytecode)) {
+    int target_offset = accessor.GetJumpTargetOffset();
+    out_liveness.Union(*liveness_map.GetInLiveness(target_offset));
+  }
+
+  // Update from next bytecode (unless there isn't one or this is an
+  // unconditional jump).
+  if (next_bytecode_in_liveness != nullptr &&
+      !Bytecodes::IsUnconditionalJump(bytecode)) {
+    out_liveness.Union(*next_bytecode_in_liveness);
+  }
+
+  // Update from exception handler (if any).
+  if (!interpreter::Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
+    int handler_context;
+    // TODO(leszeks): We should look up this range only once per entry.
+    HandlerTable* table = HandlerTable::cast(bytecode_array->handler_table());
+    int handler_offset =
+        table->LookupRange(current_offset, &handler_context, nullptr);
+
+    if (handler_offset != -1) {
+      out_liveness.Union(*liveness_map.GetInLiveness(handler_offset));
+      out_liveness.MarkRegisterLive(handler_context);
+    }
+  }
+}
+
+void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
+                       const BytecodeArrayAccessor& accessor) {
+  int num_operands = Bytecodes::NumberOfOperands(bytecode);
+  const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+
+  for (int i = 0; i < num_operands; ++i) {
+    switch (operand_types[i]) {
+      case OperandType::kRegOut: {
+        assignments.Add(accessor.GetRegisterOperand(i));
+        break;
+      }
+      case OperandType::kRegOutPair: {
+        assignments.AddPair(accessor.GetRegisterOperand(i));
+        break;
+      }
+      case OperandType::kRegOutTriple: {
+        assignments.AddTriple(accessor.GetRegisterOperand(i));
+        break;
+      }
+      default:
+        DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
+        break;
+    }
+  }
+}
+
+}  // namespace
+
+void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
+  loop_stack_.push({-1, nullptr});
+
+  BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
+
+  int osr_loop_end_offset =
+      osr_bailout_id.IsNone() ? -1 : osr_bailout_id.ToInt();
+
+  BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+  for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
+    Bytecode bytecode = iterator.current_bytecode();
+    int current_offset = iterator.current_offset();
+
+    if (bytecode == Bytecode::kJumpLoop) {
+      // Every byte up to and including the last byte within the backwards jump
+      // instruction is considered part of the loop, set loop end accordingly.
+      int loop_end = current_offset + iterator.current_bytecode_size();
+      PushLoop(iterator.GetJumpTargetOffset(), loop_end);
+
+      // Normally prefixed bytecodes are treated as if the prefix's offset was
+      // the actual bytecode's offset. However, the OSR id is the offset of the
+      // actual JumpLoop bytecode, so we need to find the location of that
+      // bytecode ignoring the prefix.
+      int jump_loop_offset = current_offset + iterator.current_prefix_offset();
+      bool is_osr_loop = (jump_loop_offset == osr_loop_end_offset);
+
+      // Check that is_osr_loop is set iff the osr_loop_end_offset is within
+      // this bytecode.
+      DCHECK(!is_osr_loop ||
+             iterator.OffsetWithinBytecode(osr_loop_end_offset));
+
+      // OSR "assigns" everything to OSR values on entry into an OSR loop, so we
+      // need to make sure to considered everything to be assigned.
+      if (is_osr_loop) {
+        loop_stack_.top().loop_info->assignments().AddAll();
+      }
+
+      // Save the index so that we can do another pass later.
+      if (do_liveness_analysis_) {
+        loop_end_index_queue_.push_back(iterator.current_index());
+      }
+    } else if (loop_stack_.size() > 1) {
+      LoopStackEntry& current_loop = loop_stack_.top();
+      LoopInfo* current_loop_info = current_loop.loop_info;
+
+      // TODO(leszeks): Ideally, we'd only set values that were assigned in
+      // the loop *and* are live when the loop exits. However, this requires
+      // tracking the out-liveness of *all* loop exits, which is not
+      // information we currently have.
+      UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
+
+      if (current_offset == current_loop.header_offset) {
+        loop_stack_.pop();
+        if (loop_stack_.size() > 1) {
+          // Propagate inner loop assignments to outer loop.
+          loop_stack_.top().loop_info->assignments().Union(
+              current_loop_info->assignments());
+        }
+      }
+    }
+
+    if (do_liveness_analysis_) {
+      BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
+          current_offset, bytecode_array()->register_count(), zone());
+
+      UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+                        iterator, liveness_map_);
+      liveness.in->CopyFrom(*liveness.out);
+      UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+      next_bytecode_in_liveness = liveness.in;
+    }
+  }
+
+  DCHECK_EQ(loop_stack_.size(), 1u);
+  DCHECK_EQ(loop_stack_.top().header_offset, -1);
+
+  if (!do_liveness_analysis_) return;
+
+  // At this point, every bytecode has a valid in and out liveness, except for
+  // propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness
+  // analysis iterations can only add additional liveness bits that are pulled
+  // across these back edges.
+  //
+  // Furthermore, a loop header's in-liveness can only change based on any
+  // bytecodes *after* the loop end --  it cannot change as a result of the
+  // JumpLoop liveness being updated, as the only liveness bits than can be
+  // added to the loop body are those of the loop header.
+  //
+  // So, if we know that the liveness of bytecodes after a loop header won't
+  // change (e.g. because there are no loops in them, or we have already ensured
+  // those loops are valid), we can safely update the loop end and pass over the
+  // loop body, and then never have to pass over that loop end again, because we
+  // have shown that its target, the loop header, can't change from the entries
+  // after the loop, and can't change from any loop body pass.
+  //
+  // This means that in a pass, we can iterate backwards over the bytecode
+  // array, process any loops that we encounter, and on subsequent passes we can
+  // skip processing those loops (though we still have to process inner loops).
+  //
+  // Equivalently, we can queue up loop ends from back to front, and pass over
+  // the loops in that order, as this preserves both the bottom-to-top and
+  // outer-to-inner requirements.
+
+  for (int loop_end_index : loop_end_index_queue_) {
+    iterator.GoToIndex(loop_end_index);
+
+    DCHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
+
+    int header_offset = iterator.GetJumpTargetOffset();
+    int end_offset = iterator.current_offset();
+
+    BytecodeLiveness& header_liveness =
+        liveness_map_.GetLiveness(header_offset);
+    BytecodeLiveness& end_liveness = liveness_map_.GetLiveness(end_offset);
+
+    if (!end_liveness.out->UnionIsChanged(*header_liveness.in)) {
+      // Only update the loop body if the loop end liveness changed.
+      continue;
+    }
+    end_liveness.in->CopyFrom(*end_liveness.out);
+    next_bytecode_in_liveness = end_liveness.in;
+
+    // Advance into the loop body.
+    --iterator;
+    for (; iterator.current_offset() > header_offset; --iterator) {
+      Bytecode bytecode = iterator.current_bytecode();
+
+      int current_offset = iterator.current_offset();
+      BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+      UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+                        iterator, liveness_map_);
+      liveness.in->CopyFrom(*liveness.out);
+      UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+      next_bytecode_in_liveness = liveness.in;
+    }
+    // Now we are at the loop header. Since the in-liveness of the header
+    // can't change, we need only to update the out-liveness.
+    UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out,
+                      next_bytecode_in_liveness, iterator, liveness_map_);
+  }
+
+  DCHECK(LivenessIsValid());
+}
+
+void BytecodeAnalysis::PushLoop(int loop_header, int loop_end) {
+  DCHECK(loop_header < loop_end);
+  DCHECK(loop_stack_.top().header_offset < loop_header);
+  DCHECK(end_to_header_.find(loop_end) == end_to_header_.end());
+  DCHECK(header_to_info_.find(loop_header) == header_to_info_.end());
+
+  int parent_offset = loop_stack_.top().header_offset;
+
+  end_to_header_.insert({loop_end, loop_header});
+  auto it = header_to_info_.insert(
+      {loop_header, LoopInfo(parent_offset, bytecode_array_->parameter_count(),
+                             bytecode_array_->register_count(), zone_)});
+  // Get the loop info pointer from the output of insert.
+  LoopInfo* loop_info = &it.first->second;
+
+  loop_stack_.push({loop_header, loop_info});
+}
+
+bool BytecodeAnalysis::IsLoopHeader(int offset) const {
+  return header_to_info_.find(offset) != header_to_info_.end();
+}
+
+int BytecodeAnalysis::GetLoopOffsetFor(int offset) const {
+  auto loop_end_to_header = end_to_header_.upper_bound(offset);
+  // If there is no next end => offset is not in a loop.
+  if (loop_end_to_header == end_to_header_.end()) {
+    return -1;
+  }
+  // If the header preceeds the offset, this is the loop
+  //
+  //   .> header  <--loop_end_to_header
+  //   |
+  //   |  <--offset
+  //   |
+  //   `- end
+  if (loop_end_to_header->second <= offset) {
+    return loop_end_to_header->second;
+  }
+  // Otherwise there is a (potentially nested) loop after this offset.
+  //
+  //    <--offset
+  //
+  //   .> header
+  //   |
+  //   | .> header  <--loop_end_to_header
+  //   | |
+  //   | `- end
+  //   |
+  //   `- end
+  // We just return the parent of the next loop (might be -1).
+  DCHECK(header_to_info_.upper_bound(offset) != header_to_info_.end());
+
+  return header_to_info_.upper_bound(offset)->second.parent_offset();
+}
+
+const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
+  DCHECK(IsLoopHeader(header_offset));
+
+  return header_to_info_.find(header_offset)->second;
+}
+
+const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
+    int offset) const {
+  if (!do_liveness_analysis_) return nullptr;
+
+  return liveness_map_.GetInLiveness(offset);
+}
+
+const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor(
+    int offset) const {
+  if (!do_liveness_analysis_) return nullptr;
+
+  return liveness_map_.GetOutLiveness(offset);
+}
+
+std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const {
+  interpreter::BytecodeArrayIterator iterator(bytecode_array());
+
+  for (; !iterator.done(); iterator.Advance()) {
+    int current_offset = iterator.current_offset();
+
+    const BitVector& in_liveness =
+        GetInLivenessFor(current_offset)->bit_vector();
+    const BitVector& out_liveness =
+        GetOutLivenessFor(current_offset)->bit_vector();
+
+    for (int i = 0; i < in_liveness.length(); ++i) {
+      os << (in_liveness.Contains(i) ? "L" : ".");
+    }
+    os << " -> ";
+
+    for (int i = 0; i < out_liveness.length(); ++i) {
+      os << (out_liveness.Contains(i) ? "L" : ".");
+    }
+
+    os << " | " << current_offset << ": ";
+    iterator.PrintTo(os) << std::endl;
+  }
+
+  return os;
+}
+
+#if DEBUG
+bool BytecodeAnalysis::LivenessIsValid() {
+  BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+
+  BytecodeLivenessState previous_liveness(bytecode_array()->register_count(),
+                                          zone());
+
+  int invalid_offset = -1;
+  int which_invalid = -1;
+
+  BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
+
+  // Ensure that there are no liveness changes if we iterate one more time.
+  for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
+    Bytecode bytecode = iterator.current_bytecode();
+
+    int current_offset = iterator.current_offset();
+
+    BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+    previous_liveness.CopyFrom(*liveness.out);
+
+    UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+                      iterator, liveness_map_);
+    // UpdateOutLiveness skips kJumpLoop, so we update it manually.
+    if (bytecode == Bytecode::kJumpLoop) {
+      int target_offset = iterator.GetJumpTargetOffset();
+      liveness.out->Union(*liveness_map_.GetInLiveness(target_offset));
+    }
+
+    if (!liveness.out->Equals(previous_liveness)) {
+      // Reset the invalid liveness.
+      liveness.out->CopyFrom(previous_liveness);
+      invalid_offset = current_offset;
+      which_invalid = 1;
+      break;
+    }
+
+    previous_liveness.CopyFrom(*liveness.in);
+
+    liveness.in->CopyFrom(*liveness.out);
+    UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+    if (!liveness.in->Equals(previous_liveness)) {
+      // Reset the invalid liveness.
+      liveness.in->CopyFrom(previous_liveness);
+      invalid_offset = current_offset;
+      which_invalid = 0;
+      break;
+    }
+
+    next_bytecode_in_liveness = liveness.in;
+  }
+
+  if (invalid_offset != -1) {
+    OFStream of(stderr);
+    of << "Invalid liveness:" << std::endl;
+
+    // Dump the bytecode, annotated with the liveness and marking loops.
+
+    int loop_indent = 0;
+
+    BytecodeArrayIterator forward_iterator(bytecode_array());
+    for (; !forward_iterator.done(); forward_iterator.Advance()) {
+      int current_offset = forward_iterator.current_offset();
+      const BitVector& in_liveness =
+          GetInLivenessFor(current_offset)->bit_vector();
+      const BitVector& out_liveness =
+          GetOutLivenessFor(current_offset)->bit_vector();
+
+      for (int i = 0; i < in_liveness.length(); ++i) {
+        of << (in_liveness.Contains(i) ? 'L' : '.');
+      }
+
+      of << " | ";
+
+      for (int i = 0; i < out_liveness.length(); ++i) {
+        of << (out_liveness.Contains(i) ? 'L' : '.');
+      }
+
+      of << " : " << current_offset << " : ";
+
+      // Draw loop back edges by indentin everything between loop headers and
+      // jump loop instructions.
+      if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
+        loop_indent--;
+      }
+      for (int i = 0; i < loop_indent; ++i) {
+        of << " | ";
+      }
+      if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
+        of << " `-" << current_offset;
+      } else if (IsLoopHeader(current_offset)) {
+        of << " .>" << current_offset;
+        loop_indent++;
+      }
+      forward_iterator.PrintTo(of) << std::endl;
+
+      if (current_offset == invalid_offset) {
+        // Underline the invalid liveness.
+        if (which_invalid == 0) {
+          for (int i = 0; i < in_liveness.length(); ++i) {
+            of << '^';
+          }
+        } else {
+          for (int i = 0; i < in_liveness.length() + 3; ++i) {
+            of << ' ';
+          }
+          for (int i = 0; i < out_liveness.length(); ++i) {
+            of << '^';
+          }
+        }
+
+        // Make sure to draw the loop indentation marks on this additional line.
+        of << " : " << current_offset << " : ";
+        for (int i = 0; i < loop_indent; ++i) {
+          of << " | ";
+        }
+
+        of << std::endl;
+      }
+    }
+  }
+
+  return invalid_offset == -1;
+}
+#endif
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/bytecode-analysis.h b/src/compiler/bytecode-analysis.h
new file mode 100644
index 0000000..ad93f8a
--- /dev/null
+++ b/src/compiler/bytecode-analysis.h
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_ANALYSIS_H_
+#define V8_COMPILER_BYTECODE_ANALYSIS_H_
+
+#include "src/base/hashmap.h"
+#include "src/bit-vector.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace compiler {
+
+class V8_EXPORT_PRIVATE BytecodeLoopAssignments {
+ public:
+  BytecodeLoopAssignments(int parameter_count, int register_count, Zone* zone);
+
+  void Add(interpreter::Register r);
+  void AddPair(interpreter::Register r);
+  void AddTriple(interpreter::Register r);
+  void AddAll();
+  void Union(const BytecodeLoopAssignments& other);
+
+  bool ContainsParameter(int index) const;
+  bool ContainsLocal(int index) const;
+  bool ContainsAccumulator() const;
+
+  int parameter_count() const { return parameter_count_; }
+  int local_count() const { return bit_vector_->length() - parameter_count_; }
+
+ private:
+  int parameter_count_;
+  BitVector* bit_vector_;
+};
+
+struct V8_EXPORT_PRIVATE LoopInfo {
+ public:
+  LoopInfo(int parent_offset, int parameter_count, int register_count,
+           Zone* zone)
+      : parent_offset_(parent_offset),
+        assignments_(parameter_count, register_count, zone) {}
+
+  int parent_offset() const { return parent_offset_; }
+
+  BytecodeLoopAssignments& assignments() { return assignments_; }
+  const BytecodeLoopAssignments& assignments() const { return assignments_; }
+
+ private:
+  // The offset to the parent loop, or -1 if there is no parent.
+  int parent_offset_;
+  BytecodeLoopAssignments assignments_;
+};
+
+class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
+ public:
+  BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
+                   bool do_liveness_analysis);
+
+  // Analyze the bytecodes to find the loop ranges, loop nesting, loop
+  // assignments and liveness, under the assumption that there is an OSR bailout
+  // at {osr_bailout_id}.
+  //
+  // No other methods in this class return valid information until this has been
+  // called.
+  void Analyze(BailoutId osr_bailout_id);
+
+  // Return true if the given offset is a loop header
+  bool IsLoopHeader(int offset) const;
+  // Get the loop header offset of the containing loop for arbitrary
+  // {offset}, or -1 if the {offset} is not inside any loop.
+  int GetLoopOffsetFor(int offset) const;
+  // Get the loop info of the loop header at {header_offset}.
+  const LoopInfo& GetLoopInfoFor(int header_offset) const;
+
+  // Gets the in-liveness for the bytecode at {offset}.
+  const BytecodeLivenessState* GetInLivenessFor(int offset) const;
+
+  // Gets the out-liveness for the bytecode at {offset}.
+  const BytecodeLivenessState* GetOutLivenessFor(int offset) const;
+
+  std::ostream& PrintLivenessTo(std::ostream& os) const;
+
+ private:
+  struct LoopStackEntry {
+    int header_offset;
+    LoopInfo* loop_info;
+  };
+
+  void PushLoop(int loop_header, int loop_end);
+
+#if DEBUG
+  bool LivenessIsValid();
+#endif
+
+  Zone* zone() const { return zone_; }
+  Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ private:
+  Handle<BytecodeArray> bytecode_array_;
+  bool do_liveness_analysis_;
+  Zone* zone_;
+
+  ZoneStack<LoopStackEntry> loop_stack_;
+  ZoneVector<int> loop_end_index_queue_;
+
+  ZoneMap<int, int> end_to_header_;
+  ZoneMap<int, LoopInfo> header_to_info_;
+
+  BytecodeLivenessMap liveness_map_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_BYTECODE_ANALYSIS_H_
diff --git a/src/compiler/bytecode-branch-analysis.cc b/src/compiler/bytecode-branch-analysis.cc
deleted file mode 100644
index 4e96a53..0000000
--- a/src/compiler/bytecode-branch-analysis.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/bytecode-branch-analysis.h"
-
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-BytecodeBranchAnalysis::BytecodeBranchAnalysis(
-    Handle<BytecodeArray> bytecode_array, Zone* zone)
-    : bytecode_array_(bytecode_array),
-      is_backward_target_(bytecode_array->length(), zone),
-      is_forward_target_(bytecode_array->length(), zone),
-      zone_(zone) {}
-
-void BytecodeBranchAnalysis::Analyze() {
-  interpreter::BytecodeArrayIterator iterator(bytecode_array());
-  while (!iterator.done()) {
-    interpreter::Bytecode bytecode = iterator.current_bytecode();
-    int current_offset = iterator.current_offset();
-    if (interpreter::Bytecodes::IsJump(bytecode)) {
-      AddBranch(current_offset, iterator.GetJumpTargetOffset());
-    }
-    iterator.Advance();
-  }
-}
-
-void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
-  if (source_offset < target_offset) {
-    is_forward_target_.Add(target_offset);
-  } else {
-    is_backward_target_.Add(target_offset);
-  }
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/bytecode-branch-analysis.h b/src/compiler/bytecode-branch-analysis.h
deleted file mode 100644
index 7d32da8..0000000
--- a/src/compiler/bytecode-branch-analysis.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
-#define V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
-
-#include "src/bit-vector.h"
-#include "src/handles.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-
-namespace compiler {
-
-// A class for identifying branch targets within a bytecode array.
-// This information can be used to construct the local control flow
-// logic for high-level IR graphs built from bytecode.
-//
-// N.B. If this class is used to determine loop headers, then such a
-// usage relies on the only backwards branches in bytecode being jumps
-// back to loop headers.
-class BytecodeBranchAnalysis BASE_EMBEDDED {
- public:
-  BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
-
-  // Analyze the bytecodes to find the branch sites and their
-  // targets. No other methods in this class return valid information
-  // until this has been called.
-  void Analyze();
-
-  // Returns true if there are any forward branches to the bytecode at
-  // |offset|.
-  bool forward_branches_target(int offset) const {
-    return is_forward_target_.Contains(offset);
-  }
-
-  // Returns true if there are any backward branches to the bytecode
-  // at |offset|.
-  bool backward_branches_target(int offset) const {
-    return is_backward_target_.Contains(offset);
-  }
-
- private:
-  void AddBranch(int origin_offset, int target_offset);
-
-  Zone* zone() const { return zone_; }
-  Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
-  Handle<BytecodeArray> bytecode_array_;
-  BitVector is_backward_target_;
-  BitVector is_forward_target_;
-  Zone* zone_;
-
-  DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
-};
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index 34b50df..aaeee66 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -7,12 +7,14 @@
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
 #include "src/compilation-info.h"
-#include "src/compiler/bytecode-branch-analysis.h"
 #include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-type-hint-lowering.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
 
 namespace v8 {
 namespace internal {
@@ -36,7 +38,6 @@
 
   Node* LookupAccumulator() const;
   Node* LookupRegister(interpreter::Register the_register) const;
-  void MarkAllRegistersLive();
 
   void BindAccumulator(Node* node,
                        FrameStateAttachmentMode mode = kDontAttachFrameState);
@@ -57,7 +58,8 @@
   // Preserve a checkpoint of the environment for the IR graph. Any
   // further mutation of the environment will not affect checkpoints.
   Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
-                   bool owner_has_exception);
+                   bool owner_has_exception,
+                   const BytecodeLivenessState* liveness);
 
   // Control dependency tracked by this environment.
   Node* GetControlDependency() const { return control_dependency_; }
@@ -68,30 +70,29 @@
   Node* Context() const { return context_; }
   void SetContext(Node* new_context) { context_ = new_context; }
 
-  Environment* CopyForConditional();
-  Environment* CopyForLoop();
-  Environment* CopyForOsrEntry();
+  Environment* Copy();
   void Merge(Environment* other);
-  void PrepareForOsrEntry();
 
-  void PrepareForLoopExit(Node* loop);
+  void PrepareForOsrEntry();
+  void PrepareForLoop(const BytecodeLoopAssignments& assignments);
+  void PrepareForLoopExit(Node* loop,
+                          const BytecodeLoopAssignments& assignments);
 
  private:
-  Environment(const Environment* copy, LivenessAnalyzerBlock* liveness_block);
-  void PrepareForLoop();
+  explicit Environment(const Environment* copy);
 
-  bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
-  void UpdateStateValues(Node** state_values, int offset, int count);
+  bool StateValuesRequireUpdate(Node** state_values, Node** values, int count);
+  void UpdateStateValues(Node** state_values, Node** values, int count);
+  void UpdateStateValuesWithCache(Node** state_values, Node** values, int count,
+                                  const BitVector* liveness,
+                                  int liveness_offset);
 
   int RegisterToValuesIndex(interpreter::Register the_register) const;
 
-  bool IsLivenessBlockConsistent() const;
-
   Zone* zone() const { return builder_->local_zone(); }
   Graph* graph() const { return builder_->graph(); }
   CommonOperatorBuilder* common() const { return builder_->common(); }
   BytecodeGraphBuilder* builder() const { return builder_; }
-  LivenessAnalyzerBlock* liveness_block() const { return liveness_block_; }
   const NodeVector* values() const { return &values_; }
   NodeVector* values() { return &values_; }
   int register_base() const { return register_base_; }
@@ -100,7 +101,6 @@
   BytecodeGraphBuilder* builder_;
   int register_count_;
   int parameter_count_;
-  LivenessAnalyzerBlock* liveness_block_;
   Node* context_;
   Node* control_dependency_;
   Node* effect_dependency_;
@@ -124,9 +124,6 @@
     : builder_(builder),
       register_count_(register_count),
       parameter_count_(parameter_count),
-      liveness_block_(builder->is_liveness_analysis_enabled_
-                          ? builder_->liveness_analyzer()->NewBlock()
-                          : nullptr),
       context_(context),
       control_dependency_(control_dependency),
       effect_dependency_(control_dependency),
@@ -161,12 +158,10 @@
 }
 
 BytecodeGraphBuilder::Environment::Environment(
-    const BytecodeGraphBuilder::Environment* other,
-    LivenessAnalyzerBlock* liveness_block)
+    const BytecodeGraphBuilder::Environment* other)
     : builder_(other->builder_),
       register_count_(other->register_count_),
       parameter_count_(other->parameter_count_),
-      liveness_block_(liveness_block),
       context_(other->context_),
       control_dependency_(other->control_dependency_),
       effect_dependency_(other->effect_dependency_),
@@ -189,16 +184,7 @@
   }
 }
 
-bool BytecodeGraphBuilder::Environment::IsLivenessBlockConsistent() const {
-  return !builder_->IsLivenessAnalysisEnabled() ==
-         (liveness_block() == nullptr);
-}
-
 Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
-  DCHECK(IsLivenessBlockConsistent());
-  if (liveness_block() != nullptr) {
-    liveness_block()->LookupAccumulator();
-  }
   return values()->at(accumulator_base_);
 }
 
@@ -213,32 +199,15 @@
     return builder()->GetNewTarget();
   } else {
     int values_index = RegisterToValuesIndex(the_register);
-    if (liveness_block() != nullptr && !the_register.is_parameter()) {
-      DCHECK(IsLivenessBlockConsistent());
-      liveness_block()->Lookup(the_register.index());
-    }
     return values()->at(values_index);
   }
 }
 
-void BytecodeGraphBuilder::Environment::MarkAllRegistersLive() {
-  DCHECK(IsLivenessBlockConsistent());
-  if (liveness_block() != nullptr) {
-    for (int i = 0; i < register_count(); ++i) {
-      liveness_block()->Lookup(i);
-    }
-  }
-}
-
 void BytecodeGraphBuilder::Environment::BindAccumulator(
     Node* node, FrameStateAttachmentMode mode) {
   if (mode == FrameStateAttachmentMode::kAttachFrameState) {
     builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(0));
   }
-  DCHECK(IsLivenessBlockConsistent());
-  if (liveness_block() != nullptr) {
-    liveness_block()->BindAccumulator();
-  }
   values()->at(accumulator_base_) = node;
 }
 
@@ -251,10 +220,6 @@
                                            accumulator_base_ - values_index));
   }
   values()->at(values_index) = node;
-  if (liveness_block() != nullptr && !the_register.is_parameter()) {
-    DCHECK(IsLivenessBlockConsistent());
-    liveness_block()->Bind(the_register.index());
-  }
 }
 
 void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
@@ -278,45 +243,13 @@
   }
 }
 
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForLoop() {
-  PrepareForLoop();
-  if (liveness_block() != nullptr) {
-    // Finish the current block before copying.
-    liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
-  }
-  return new (zone()) Environment(this, liveness_block());
-}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForOsrEntry() {
-  return new (zone())
-      Environment(this, builder_->liveness_analyzer()->NewBlock());
-}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForConditional() {
-  LivenessAnalyzerBlock* copy_liveness_block = nullptr;
-  if (liveness_block() != nullptr) {
-    copy_liveness_block =
-        builder_->liveness_analyzer()->NewBlock(liveness_block());
-    liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
-  }
-  return new (zone()) Environment(this, copy_liveness_block);
+BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::Environment::Copy() {
+  return new (zone()) Environment(this);
 }
 
 
 void BytecodeGraphBuilder::Environment::Merge(
     BytecodeGraphBuilder::Environment* other) {
-  if (builder_->is_liveness_analysis_enabled_) {
-    if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
-      liveness_block_ =
-          builder()->liveness_analyzer()->NewBlock(liveness_block());
-    }
-    liveness_block()->AddPredecessor(other->liveness_block());
-  }
-
   // Create a merge of the control dependencies of both environments and update
   // the current environment's control dependency accordingly.
   Node* control = builder()->MergeControl(GetControlDependency(),
@@ -337,8 +270,8 @@
   }
 }
 
-
-void BytecodeGraphBuilder::Environment::PrepareForLoop() {
+void BytecodeGraphBuilder::Environment::PrepareForLoop(
+    const BytecodeLoopAssignments& assignments) {
   // Create a control node for the loop header.
   Node* control = builder()->NewLoop();
 
@@ -346,11 +279,23 @@
   Node* effect = builder()->NewEffectPhi(1, GetEffectDependency(), control);
   UpdateEffectDependency(effect);
 
-  // Assume everything in the loop is updated.
+  // Create Phis for any values that may be updated by the end of the loop.
   context_ = builder()->NewPhi(1, context_, control);
-  int size = static_cast<int>(values()->size());
-  for (int i = 0; i < size; i++) {
-    values()->at(i) = builder()->NewPhi(1, values()->at(i), control);
+  for (int i = 0; i < parameter_count(); i++) {
+    if (assignments.ContainsParameter(i)) {
+      values_[i] = builder()->NewPhi(1, values_[i], control);
+    }
+  }
+  for (int i = 0; i < register_count(); i++) {
+    if (assignments.ContainsLocal(i)) {
+      int index = register_base() + i;
+      values_[index] = builder()->NewPhi(1, values_[index], control);
+    }
+  }
+
+  if (assignments.ContainsAccumulator()) {
+    values_[accumulator_base()] =
+        builder()->NewPhi(1, values_[accumulator_base()], control);
   }
 
   // Connect to the loop end.
@@ -384,7 +329,7 @@
 
   BailoutId loop_id(builder_->bytecode_iterator().current_offset());
   Node* frame_state =
-      Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false);
+      Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false, nullptr);
   Node* checkpoint =
       graph()->NewNode(common()->Checkpoint(), frame_state, entry, entry);
   UpdateEffectDependency(checkpoint);
@@ -402,22 +347,22 @@
 }
 
 bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
-    Node** state_values, int offset, int count) {
+    Node** state_values, Node** values, int count) {
   if (*state_values == nullptr) {
     return true;
   }
-  DCHECK_EQ((*state_values)->InputCount(), count);
-  DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
-  Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+  Node::Inputs inputs = (*state_values)->inputs();
+  if (inputs.count() != count) return true;
   for (int i = 0; i < count; i++) {
-    if ((*state_values)->InputAt(i) != env_values[i]) {
+    if (inputs[i] != values[i]) {
       return true;
     }
   }
   return false;
 }
 
-void BytecodeGraphBuilder::Environment::PrepareForLoopExit(Node* loop) {
+void BytecodeGraphBuilder::Environment::PrepareForLoopExit(
+    Node* loop, const BytecodeLoopAssignments& assignments) {
   DCHECK_EQ(loop->opcode(), IrOpcode::kLoop);
 
   Node* control = GetControlDependency();
@@ -431,34 +376,80 @@
                                          GetEffectDependency(), loop_exit);
   UpdateEffectDependency(effect_rename);
 
-  // TODO(jarin) We should also rename context here. However, uncoditional
+  // TODO(jarin) We should also rename context here. However, unconditional
   // renaming confuses global object and native context specialization.
   // We should only rename if the context is assigned in the loop.
 
-  // Rename the environmnent values.
-  for (size_t i = 0; i < values_.size(); i++) {
-    Node* rename =
-        graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
-    values_[i] = rename;
+  // Rename the environment values if they were assigned in the loop.
+  for (int i = 0; i < parameter_count(); i++) {
+    if (assignments.ContainsParameter(i)) {
+      Node* rename =
+          graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
+      values_[i] = rename;
+    }
+  }
+  for (int i = 0; i < register_count(); i++) {
+    if (assignments.ContainsLocal(i)) {
+      Node* rename = graph()->NewNode(common()->LoopExitValue(),
+                                      values_[register_base() + i], loop_exit);
+      values_[register_base() + i] = rename;
+    }
+  }
+
+  if (assignments.ContainsAccumulator()) {
+    Node* rename = graph()->NewNode(common()->LoopExitValue(),
+                                    values_[accumulator_base()], loop_exit);
+    values_[accumulator_base()] = rename;
   }
 }
 
 void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
-                                                          int offset,
+                                                          Node** values,
                                                           int count) {
-  if (StateValuesRequireUpdate(state_values, offset, count)) {
-    const Operator* op = common()->StateValues(count);
-    (*state_values) = graph()->NewNode(op, count, &values()->at(offset));
+  if (StateValuesRequireUpdate(state_values, values, count)) {
+    const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
+    (*state_values) = graph()->NewNode(op, count, values);
   }
 }
 
+void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
+    Node** state_values, Node** values, int count, const BitVector* liveness,
+    int liveness_offset) {
+  *state_values = builder_->state_values_cache_.GetNodeForValues(
+      values, static_cast<size_t>(count), liveness, liveness_offset);
+}
+
 Node* BytecodeGraphBuilder::Environment::Checkpoint(
     BailoutId bailout_id, OutputFrameStateCombine combine,
-    bool owner_has_exception) {
-  UpdateStateValues(&parameters_state_values_, 0, parameter_count());
-  UpdateStateValues(&registers_state_values_, register_base(),
-                    register_count());
-  UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
+    bool owner_has_exception, const BytecodeLivenessState* liveness) {
+  if (parameter_count() == register_count()) {
+    // Re-use the state-value cache if the number of local registers happens
+    // to match the parameter count.
+    UpdateStateValuesWithCache(&parameters_state_values_, &values()->at(0),
+                               parameter_count(), nullptr, 0);
+  } else {
+    UpdateStateValues(&parameters_state_values_, &values()->at(0),
+                      parameter_count());
+  }
+
+  UpdateStateValuesWithCache(&registers_state_values_,
+                             &values()->at(register_base()), register_count(),
+                             liveness ? &liveness->bit_vector() : nullptr, 0);
+
+  bool accumulator_is_live = !liveness || liveness->AccumulatorIsLive();
+  if (parameter_count() == 1 && accumulator_is_live &&
+      values()->at(accumulator_base()) == values()->at(0)) {
+    // Re-use the parameter state values if there happens to only be one
+    // parameter and the accumulator is live and holds that parameter's value.
+    accumulator_state_values_ = parameters_state_values_;
+  } else {
+    // Otherwise, use the state values cache to hopefully re-use local register
+    // state values (if there is only one local register), or at the very least
+    // re-use previous accumulator state values.
+    UpdateStateValuesWithCache(
+        &accumulator_state_values_, &values()->at(accumulator_base()), 1,
+        liveness ? &liveness->bit_vector() : nullptr, register_count());
+  }
 
   const Operator* op = common()->FrameState(
       bailout_id, combine, builder()->frame_state_function_info());
@@ -467,51 +458,40 @@
       accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
       builder()->graph()->start());
 
-  if (liveness_block() != nullptr) {
-    // If the owning node has an exception, register the checkpoint to the
-    // predecessor so that the checkpoint is used for both the normal and the
-    // exceptional paths. Yes, this is a terrible hack and we might want
-    // to use an explicit frame state for the exceptional path.
-    if (owner_has_exception) {
-      liveness_block()->GetPredecessor()->Checkpoint(result);
-    } else {
-      liveness_block()->Checkpoint(result);
-    }
-  }
-
   return result;
 }
 
 BytecodeGraphBuilder::BytecodeGraphBuilder(
-    Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
-    float invocation_frequency, SourcePositionTable* source_positions,
-    int inlining_id)
+    Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
+    Handle<FeedbackVector> feedback_vector, BailoutId osr_ast_id,
+    JSGraph* jsgraph, float invocation_frequency,
+    SourcePositionTable* source_positions, int inlining_id)
     : local_zone_(local_zone),
       jsgraph_(jsgraph),
       invocation_frequency_(invocation_frequency),
-      bytecode_array_(handle(info->shared_info()->bytecode_array())),
+      bytecode_array_(handle(shared_info->bytecode_array())),
       exception_handler_table_(
           handle(HandlerTable::cast(bytecode_array()->handler_table()))),
-      feedback_vector_(handle(info->closure()->feedback_vector())),
+      feedback_vector_(feedback_vector),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kInterpretedFunction,
           bytecode_array()->parameter_count(),
-          bytecode_array()->register_count(), info->shared_info())),
-      osr_ast_id_(info->osr_ast_id()),
+          bytecode_array()->register_count(), shared_info)),
+      bytecode_iterator_(nullptr),
+      bytecode_analysis_(nullptr),
+      environment_(nullptr),
+      osr_ast_id_(osr_ast_id),
+      osr_loop_offset_(-1),
       merge_environments_(local_zone),
       exception_handlers_(local_zone),
       current_exception_handler_(0),
       input_buffer_size_(0),
       input_buffer_(nullptr),
+      needs_eager_checkpoint_(true),
       exit_controls_(local_zone),
-      is_liveness_analysis_enabled_(FLAG_analyze_environment_liveness &&
-                                    info->is_deoptimization_enabled()),
       state_values_cache_(jsgraph),
-      liveness_analyzer_(
-          static_cast<size_t>(bytecode_array()->register_count()), true,
-          local_zone),
       source_positions_(source_positions),
-      start_position_(info->shared_info()->start_position(), inlining_id) {}
+      start_position_(shared_info->start_position(), inlining_id) {}
 
 Node* BytecodeGraphBuilder::GetNewTarget() {
   if (!new_target_.is_set()) {
@@ -551,14 +531,16 @@
 Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
   const Operator* op =
       javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
-  Node* native_context = NewNode(op, environment()->Context());
-  return NewNode(javascript()->LoadContext(0, index, true), native_context);
+  Node* native_context = NewNode(op);
+  Node* result = NewNode(javascript()->LoadContext(0, index, true));
+  NodeProperties::ReplaceContextInput(result, native_context);
+  return result;
 }
 
 
 VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
-  FeedbackVectorSlot slot;
-  if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
+  FeedbackSlot slot;
+  if (slot_id >= FeedbackVector::kReservedIndexCount) {
     slot = feedback_vector()->ToSlot(slot_id);
   }
   return VectorSlotPair(feedback_vector(), slot);
@@ -587,24 +569,42 @@
   Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
   graph()->SetEnd(end);
 
-  ClearNonLiveSlotsInFrameStates();
-
   return true;
 }
 
 void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
-  if (environment()->GetEffectDependency()->opcode() != IrOpcode::kCheckpoint) {
+  if (needs_eager_checkpoint()) {
     // Create an explicit checkpoint node for before the operation. This only
     // needs to happen if we aren't effect-dominated by a {Checkpoint} already.
+    mark_as_needing_eager_checkpoint(false);
     Node* node = NewNode(common()->Checkpoint());
     DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
     DCHECK_EQ(IrOpcode::kDead,
               NodeProperties::GetFrameStateInput(node)->opcode());
     BailoutId bailout_id(bytecode_iterator().current_offset());
+
+    const BytecodeLivenessState* liveness_before =
+        bytecode_analysis()->GetInLivenessFor(
+            bytecode_iterator().current_offset());
+
     Node* frame_state_before = environment()->Checkpoint(
-        bailout_id, OutputFrameStateCombine::Ignore(), false);
+        bailout_id, OutputFrameStateCombine::Ignore(), false, liveness_before);
     NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
+#ifdef DEBUG
+  } else {
+    // In case we skipped checkpoint creation above, we must be able to find an
+    // existing checkpoint that effect-dominates the nodes about to be created.
+    // Starting a search from the current effect-dependency has to succeed.
+    Node* effect = environment()->GetEffectDependency();
+    while (effect->opcode() != IrOpcode::kCheckpoint) {
+      DCHECK(effect->op()->HasProperty(Operator::kNoWrite));
+      DCHECK_EQ(1, effect->op()->EffectInputCount());
+      effect = NodeProperties::GetEffectInput(effect);
+    }
   }
+#else
+  }
+#endif  // DEBUG
 }
 
 void BytecodeGraphBuilder::PrepareFrameState(Node* node,
@@ -617,40 +617,36 @@
               NodeProperties::GetFrameStateInput(node)->opcode());
     BailoutId bailout_id(bytecode_iterator().current_offset());
     bool has_exception = NodeProperties::IsExceptionalCall(node);
-    Node* frame_state_after =
-        environment()->Checkpoint(bailout_id, combine, has_exception);
+
+    const BytecodeLivenessState* liveness_after =
+        bytecode_analysis()->GetOutLivenessFor(
+            bytecode_iterator().current_offset());
+
+    Node* frame_state_after = environment()->Checkpoint(
+        bailout_id, combine, has_exception, liveness_after);
     NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
   }
 }
 
-void BytecodeGraphBuilder::ClearNonLiveSlotsInFrameStates() {
-  if (!IsLivenessAnalysisEnabled()) {
-    return;
-  }
-  NonLiveFrameStateSlotReplacer replacer(
-      &state_values_cache_, jsgraph()->OptimizedOutConstant(),
-      liveness_analyzer()->local_count(), true, local_zone());
-  liveness_analyzer()->Run(&replacer);
-  if (FLAG_trace_environment_liveness) {
-    OFStream os(stdout);
-    liveness_analyzer()->Print(os);
-  }
-}
-
 void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
-  BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
-  BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
-  analysis.Analyze();
-  loop_analysis.Analyze();
-  set_branch_analysis(&analysis);
-  set_loop_analysis(&loop_analysis);
+  BytecodeAnalysis bytecode_analysis(bytecode_array(), local_zone(),
+                                     FLAG_analyze_environment_liveness);
+  bytecode_analysis.Analyze(osr_ast_id_);
+  set_bytecode_analysis(&bytecode_analysis);
 
   interpreter::BytecodeArrayIterator iterator(bytecode_array());
   set_bytecode_iterator(&iterator);
   SourcePositionTableIterator source_position_iterator(
       bytecode_array()->source_position_table());
 
+  if (FLAG_trace_environment_liveness) {
+    OFStream of(stdout);
+
+    bytecode_analysis.PrintLivenessTo(of);
+  }
+
   BuildOSRNormalEntryPoint();
+
   for (; !iterator.done(); iterator.Advance()) {
     int current_offset = iterator.current_offset();
     UpdateCurrentSourcePosition(&source_position_iterator, current_offset);
@@ -658,7 +654,6 @@
     SwitchToMergeEnvironment(current_offset);
     if (environment() != nullptr) {
       BuildLoopHeaderEnvironment(current_offset);
-      BuildOSRLoopEntryPoint(current_offset);
 
       // Skip the first stack check if stack_check is false
       if (!stack_check &&
@@ -677,8 +672,7 @@
       }
     }
   }
-
-  set_branch_analysis(nullptr);
+  set_bytecode_analysis(nullptr);
   set_bytecode_iterator(nullptr);
   DCHECK(exception_handlers_.empty());
 }
@@ -741,27 +735,32 @@
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
 }
 
-Node* BytecodeGraphBuilder::BuildLoadGlobal(uint32_t feedback_slot_index,
+Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
+                                            uint32_t feedback_slot_index,
                                             TypeofMode typeof_mode) {
   VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
-  DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
-            feedback_vector()->GetKind(feedback.slot()));
-  Handle<Name> name(feedback_vector()->GetName(feedback.slot()));
+  DCHECK(IsLoadGlobalICKind(feedback_vector()->GetKind(feedback.slot())));
   const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
-  return NewNode(op, GetFunctionClosure());
+  return NewNode(op);
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobal() {
   PrepareEagerCheckpoint();
-  Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
-                               TypeofMode::NOT_INSIDE_TYPEOF);
+  Handle<Name> name =
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+  Node* node =
+      BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
   PrepareEagerCheckpoint();
-  Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
-                               TypeofMode::INSIDE_TYPEOF);
+  Handle<Name> name =
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+  Node* node =
+      BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
@@ -774,7 +773,7 @@
   Node* value = environment()->LookupAccumulator();
 
   const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
-  Node* node = NewNode(op, value, GetFunctionClosure());
+  Node* node = NewNode(op, value);
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
@@ -786,27 +785,56 @@
   BuildStoreGlobal(LanguageMode::STRICT);
 }
 
+void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
+  PrepareEagerCheckpoint();
+
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* name =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+  Node* value = environment()->LookupAccumulator();
+  int flags = bytecode_iterator().GetFlagOperand(2);
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
+
+  const Operator* op = javascript()->StoreDataPropertyInLiteral(feedback);
+  Node* node = NewNode(op, object, name, value, jsgraph()->Constant(flags));
+  environment()->RecordAfterState(node, Environment::kAttachFrameState);
+}
+
 void BytecodeGraphBuilder::VisitLdaContextSlot() {
-  // TODO(mythria): immutable flag is also set to false. This information is not
-  // available in bytecode array. update this code when the implementation
-  // changes.
   const Operator* op = javascript()->LoadContext(
       bytecode_iterator().GetUnsignedImmediateOperand(2),
       bytecode_iterator().GetIndexOperand(1), false);
+  Node* node = NewNode(op);
   Node* context =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Node* node = NewNode(op, context);
+  NodeProperties::ReplaceContextInput(node, context);
+  environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitLdaImmutableContextSlot() {
+  const Operator* op = javascript()->LoadContext(
+      bytecode_iterator().GetUnsignedImmediateOperand(2),
+      bytecode_iterator().GetIndexOperand(1), true);
+  Node* node = NewNode(op);
+  Node* context =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  NodeProperties::ReplaceContextInput(node, context);
   environment()->BindAccumulator(node);
 }
 
 void BytecodeGraphBuilder::VisitLdaCurrentContextSlot() {
-  // TODO(mythria): immutable flag is also set to false. This information is not
-  // available in bytecode array. update this code when the implementation
-  // changes.
   const Operator* op = javascript()->LoadContext(
       0, bytecode_iterator().GetIndexOperand(0), false);
-  Node* context = environment()->Context();
-  Node* node = NewNode(op, context);
+  Node* node = NewNode(op);
+  environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
+  const Operator* op = javascript()->LoadContext(
+      0, bytecode_iterator().GetIndexOperand(0), true);
+  Node* node = NewNode(op);
   environment()->BindAccumulator(node);
 }
 
@@ -814,18 +842,18 @@
   const Operator* op = javascript()->StoreContext(
       bytecode_iterator().GetUnsignedImmediateOperand(2),
       bytecode_iterator().GetIndexOperand(1));
+  Node* value = environment()->LookupAccumulator();
+  Node* node = NewNode(op, value);
   Node* context =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Node* value = environment()->LookupAccumulator();
-  NewNode(op, context, value);
+  NodeProperties::ReplaceContextInput(node, context);
 }
 
 void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
   const Operator* op =
       javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(0));
-  Node* context = environment()->Context();
   Node* value = environment()->LookupAccumulator();
-  NewNode(op, context, value);
+  NewNode(op, value);
 }
 
 void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
@@ -857,15 +885,14 @@
   // the same scope as the variable itself has no way of shadowing it.
   for (uint32_t d = 0; d < depth; d++) {
     Node* extension_slot =
-        NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false),
-                environment()->Context());
+        NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false));
 
     Node* check_no_extension =
-        NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
-                extension_slot, jsgraph()->TheHoleConstant());
+        NewNode(simplified()->ReferenceEqual(), extension_slot,
+                jsgraph()->TheHoleConstant());
 
     NewBranch(check_no_extension);
-    Environment* true_environment = environment()->CopyForConditional();
+    Environment* true_environment = environment()->Copy();
 
     {
       NewIfFalse();
@@ -904,8 +931,7 @@
     uint32_t slot_index = bytecode_iterator().GetIndexOperand(1);
 
     const Operator* op = javascript()->LoadContext(depth, slot_index, false);
-    Node* context = environment()->Context();
-    environment()->BindAccumulator(NewNode(op, context));
+    environment()->BindAccumulator(NewNode(op));
   }
 
   // Only build the slow path if there were any slow-path checks.
@@ -930,6 +956,7 @@
 
     fast_environment->Merge(environment());
     set_environment(fast_environment);
+    mark_as_needing_eager_checkpoint(true);
   }
 }
 
@@ -950,8 +977,10 @@
   // Fast path, do a global load.
   {
     PrepareEagerCheckpoint();
-    Node* node =
-        BuildLoadGlobal(bytecode_iterator().GetIndexOperand(1), typeof_mode);
+    Handle<Name> name =
+        Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+    uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+    Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
     environment()->BindAccumulator(node, Environment::kAttachFrameState);
   }
 
@@ -977,6 +1006,7 @@
 
     fast_environment->Merge(environment());
     set_environment(fast_environment);
+    mark_as_needing_eager_checkpoint(true);
   }
 }
 
@@ -1018,7 +1048,7 @@
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
   const Operator* op = javascript()->LoadNamed(name, feedback);
-  Node* node = NewNode(op, object, GetFunctionClosure());
+  Node* node = NewNode(op, object);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
@@ -1031,11 +1061,12 @@
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
 
   const Operator* op = javascript()->LoadProperty(feedback);
-  Node* node = NewNode(op, object, key, GetFunctionClosure());
+  Node* node = NewNode(op, object, key);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
-void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
+void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode,
+                                           StoreMode store_mode) {
   PrepareEagerCheckpoint();
   Node* value = environment()->LookupAccumulator();
   Node* object =
@@ -1045,17 +1076,31 @@
   VectorSlotPair feedback =
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
-  const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
-  Node* node = NewNode(op, object, value, GetFunctionClosure());
+  const Operator* op;
+  if (store_mode == StoreMode::kOwn) {
+    DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+              feedback.vector()->GetKind(feedback.slot()));
+    op = javascript()->StoreNamedOwn(name, feedback);
+  } else {
+    DCHECK(store_mode == StoreMode::kNormal);
+    DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
+              language_mode);
+    op = javascript()->StoreNamed(language_mode, name, feedback);
+  }
+  Node* node = NewNode(op, object, value);
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitStaNamedPropertySloppy() {
-  BuildNamedStore(LanguageMode::SLOPPY);
+  BuildNamedStore(LanguageMode::SLOPPY, StoreMode::kNormal);
 }
 
 void BytecodeGraphBuilder::VisitStaNamedPropertyStrict() {
-  BuildNamedStore(LanguageMode::STRICT);
+  BuildNamedStore(LanguageMode::STRICT, StoreMode::kNormal);
+}
+
+void BytecodeGraphBuilder::VisitStaNamedOwnProperty() {
+  BuildNamedStore(LanguageMode::STRICT, StoreMode::kOwn);
 }
 
 void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
@@ -1068,8 +1113,9 @@
   VectorSlotPair feedback =
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
+  DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()), language_mode);
   const Operator* op = javascript()->StoreProperty(language_mode, feedback);
-  Node* node = NewNode(op, object, key, value, GetFunctionClosure());
+  Node* node = NewNode(op, object, key, value);
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
@@ -1085,8 +1131,7 @@
   int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
   uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
   Node* module =
-      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
-              environment()->Context());
+      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, true));
   Node* value = NewNode(javascript()->LoadModule(cell_index), module);
   environment()->BindAccumulator(value);
 }
@@ -1095,8 +1140,7 @@
   int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
   uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
   Node* module =
-      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
-              environment()->Context());
+      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, true));
   Node* value = environment()->LookupAccumulator();
   NewNode(javascript()->StoreModule(cell_index), module, value);
 }
@@ -1117,12 +1161,14 @@
 void BytecodeGraphBuilder::VisitCreateClosure() {
   Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
+  int const slot_id = bytecode_iterator().GetIndexOperand(1);
+  VectorSlotPair pair = CreateVectorSlotPair(slot_id);
   PretenureFlag tenured =
       interpreter::CreateClosureFlags::PretenuredBit::decode(
-          bytecode_iterator().GetFlagOperand(1))
+          bytecode_iterator().GetFlagOperand(2))
           ? TENURED
           : NOT_TENURED;
-  const Operator* op = javascript()->CreateClosure(shared_info, tenured);
+  const Operator* op = javascript()->CreateClosure(shared_info, pair, tenured);
   Node* closure = NewNode(op);
   environment()->BindAccumulator(closure);
 }
@@ -1138,7 +1184,15 @@
 
 void BytecodeGraphBuilder::VisitCreateFunctionContext() {
   uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
-  const Operator* op = javascript()->CreateFunctionContext(slots);
+  const Operator* op =
+      javascript()->CreateFunctionContext(slots, FUNCTION_SCOPE);
+  Node* context = NewNode(op, GetFunctionClosure());
+  environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateEvalContext() {
+  uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
+  const Operator* op = javascript()->CreateFunctionContext(slots, EVAL_SCOPE);
   Node* context = NewNode(op, GetFunctionClosure());
   environment()->BindAccumulator(context);
 }
@@ -1198,16 +1252,21 @@
 }
 
 void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
-  Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0));
+  Handle<ConstantElementsPair> constant_elements =
+      Handle<ConstantElementsPair>::cast(
+          bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
-  int literal_flags = bytecode_iterator().GetFlagOperand(2);
+  int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
+  int literal_flags =
+      interpreter::CreateArrayLiteralFlags::FlagsBits::decode(bytecode_flags);
   // Disable allocation site mementos. Only unoptimized code will collect
   // feedback about allocation site. Once the code is optimized we expect the
   // data to converge. So, we disable allocation site mementos in optimized
   // code. We can revisit this when we have data to the contrary.
   literal_flags |= ArrayLiteral::kDisableMementos;
-  int number_of_elements = constant_elements->length();
+  // TODO(mstarzinger): Thread through number of elements. The below number is
+  // only an estimate and does not match {ArrayLiteral::values::length}.
+  int number_of_elements = constant_elements->constant_values()->length();
   Node* literal = NewNode(
       javascript()->CreateLiteralArray(constant_elements, literal_flags,
                                        literal_index, number_of_elements),
@@ -1216,15 +1275,16 @@
 }
 
 void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
-  PrepareEagerCheckpoint();
-  Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0));
+  Handle<BoilerplateDescription> constant_properties =
+      Handle<BoilerplateDescription>::cast(
+          bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
   int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
   int literal_flags =
       interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
-  // TODO(mstarzinger): Thread through number of properties.
-  int number_of_properties = constant_properties->length() / 2;
+  // TODO(mstarzinger): Thread through number of properties. The below number is
+  // only an estimate and does not match {ObjectLiteral::properties_count}.
+  int number_of_properties = constant_properties->size();
   Node* literal = NewNode(
       javascript()->CreateLiteralObject(constant_properties, literal_flags,
                                         literal_index, number_of_properties),
@@ -1260,13 +1320,13 @@
 
   // Slot index of 0 is used indicate no feedback slot is available. Assert
   // the assumption that slot index 0 is never a valid feedback slot.
-  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+  STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
   int const slot_id = bytecode_iterator().GetIndexOperand(3);
   VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
 
   float const frequency = ComputeCallFrequency(slot_id);
-  const Operator* call = javascript()->CallFunction(
-      arg_count + 1, frequency, feedback, receiver_hint, tail_call_mode);
+  const Operator* call = javascript()->Call(arg_count + 1, frequency, feedback,
+                                            receiver_hint, tail_call_mode);
   Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
   environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
@@ -1275,6 +1335,19 @@
   BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kAny);
 }
 
+void BytecodeGraphBuilder::VisitCallWithSpread() {
+  PrepareEagerCheckpoint();
+  Node* callee =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+  const Operator* call =
+      javascript()->CallWithSpread(static_cast<int>(arg_count + 1));
+
+  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
 void BytecodeGraphBuilder::VisitCallProperty() {
   BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined);
 }
@@ -1295,7 +1368,7 @@
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
   // Create node to perform the JS runtime call.
-  const Operator* call = javascript()->CallFunction(arg_count + 1);
+  const Operator* call = javascript()->Call(arg_count + 1);
   Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
   environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
@@ -1340,6 +1413,37 @@
                                             Environment::kAttachFrameState);
 }
 
+Node* BytecodeGraphBuilder::ProcessConstructWithSpreadArguments(
+    const Operator* op, Node* callee, Node* new_target,
+    interpreter::Register first_arg, size_t arity) {
+  Node** all = local_zone()->NewArray<Node*>(arity);
+  all[0] = callee;
+  int first_arg_index = first_arg.index();
+  for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
+    all[i] = environment()->LookupRegister(
+        interpreter::Register(first_arg_index + i - 1));
+  }
+  all[arity - 1] = new_target;
+  Node* value = MakeNode(op, static_cast<int>(arity), all, false);
+  return value;
+}
+
+void BytecodeGraphBuilder::VisitConstructWithSpread() {
+  PrepareEagerCheckpoint();
+  interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+
+  Node* new_target = environment()->LookupAccumulator();
+  Node* callee = environment()->LookupRegister(callee_reg);
+
+  const Operator* op =
+      javascript()->ConstructWithSpread(static_cast<int>(arg_count) + 2);
+  Node* value = ProcessConstructWithSpreadArguments(op, callee, new_target,
+                                                    first_arg, arg_count + 2);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
 void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
   PrepareEagerCheckpoint();
   Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
@@ -1353,7 +1457,7 @@
   environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
-Node* BytecodeGraphBuilder::ProcessCallNewArguments(
+Node* BytecodeGraphBuilder::ProcessConstructArguments(
     const Operator* call_new_op, Node* callee, Node* new_target,
     interpreter::Register first_arg, size_t arity) {
   Node** all = local_zone()->NewArray<Node*>(arity);
@@ -1368,14 +1472,14 @@
   return value;
 }
 
-void BytecodeGraphBuilder::VisitNew() {
+void BytecodeGraphBuilder::VisitConstruct() {
   PrepareEagerCheckpoint();
   interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
   // Slot index of 0 is used indicate no feedback slot is available. Assert
   // the assumption that slot index 0 is never a valid feedback slot.
-  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+  STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
   int const slot_id = bytecode_iterator().GetIndexOperand(3);
   VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
 
@@ -1383,24 +1487,18 @@
   Node* callee = environment()->LookupRegister(callee_reg);
 
   float const frequency = ComputeCallFrequency(slot_id);
-  const Operator* call = javascript()->CallConstruct(
+  const Operator* call = javascript()->Construct(
       static_cast<int>(arg_count) + 2, frequency, feedback);
-  Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
-                                        arg_count + 2);
+  Node* value = ProcessConstructArguments(call, callee, new_target, first_arg,
+                                          arg_count + 2);
   environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
-void BytecodeGraphBuilder::BuildThrow() {
-  PrepareEagerCheckpoint();
-  Node* value = environment()->LookupAccumulator();
-  Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
-  environment()->BindAccumulator(call, Environment::kAttachFrameState);
-}
-
 void BytecodeGraphBuilder::VisitThrow() {
   BuildLoopExitsForFunctionExit();
-  BuildThrow();
-  Node* call = environment()->LookupAccumulator();
+  Node* value = environment()->LookupAccumulator();
+  Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
+  environment()->BindAccumulator(call, Environment::kAttachFrameState);
   Node* control = NewNode(common()->Throw(), call);
   MergeControlToLeaveFunction(control);
 }
@@ -1413,12 +1511,39 @@
   MergeControlToLeaveFunction(control);
 }
 
-void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
+Node* BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op,
+                                                       Node* left, Node* right,
+                                                       FeedbackSlot slot) {
+  Node* effect = environment()->GetEffectDependency();
+  Node* control = environment()->GetControlDependency();
+  JSTypeHintLowering type_hint_lowering(jsgraph(), feedback_vector());
+  Reduction early_reduction = type_hint_lowering.ReduceBinaryOperation(
+      op, left, right, effect, control, slot);
+  if (early_reduction.Changed()) {
+    Node* node = early_reduction.replacement();
+    if (node->op()->EffectOutputCount() > 0) {
+      environment()->UpdateEffectDependency(node);
+    }
+    return node;
+  }
+  return nullptr;
+}
+
+void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
   PrepareEagerCheckpoint();
   Node* left =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* right = environment()->LookupAccumulator();
-  Node* node = NewNode(js_op, left, right);
+
+  Node* node = nullptr;
+  FeedbackSlot slot = feedback_vector()->ToSlot(
+      bytecode_iterator().GetIndexOperand(kBinaryOperationHintIndex));
+  if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+    node = simplified;
+  } else {
+    node = NewNode(op, left, right);
+  }
+
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
@@ -1426,10 +1551,9 @@
 // feedback.
 BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
     int operand_index) {
-  FeedbackVectorSlot slot = feedback_vector()->ToSlot(
+  FeedbackSlot slot = feedback_vector()->ToSlot(
       bytecode_iterator().GetIndexOperand(operand_index));
-  DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
-            feedback_vector()->GetKind(slot));
+  DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot));
   BinaryOpICNexus nexus(feedback_vector(), slot);
   return nexus.GetBinaryOperationFeedback();
 }
@@ -1441,10 +1565,9 @@
   if (slot_index == 0) {
     return CompareOperationHint::kAny;
   }
-  FeedbackVectorSlot slot =
+  FeedbackSlot slot =
       feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
-  DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
-            feedback_vector()->GetKind(slot));
+  DCHECK_EQ(FeedbackSlotKind::kCompareOp, feedback_vector()->GetKind(slot));
   CompareICNexus nexus(feedback_vector(), slot);
   return nexus.GetCompareOperationFeedback();
 }
@@ -1460,61 +1583,58 @@
 }
 
 void BytecodeGraphBuilder::VisitSub() {
-  BuildBinaryOp(javascript()->Subtract(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->Subtract());
 }
 
 void BytecodeGraphBuilder::VisitMul() {
-  BuildBinaryOp(javascript()->Multiply(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->Multiply());
 }
 
-void BytecodeGraphBuilder::VisitDiv() {
-  BuildBinaryOp(
-      javascript()->Divide(GetBinaryOperationHint(kBinaryOperationHintIndex)));
-}
+void BytecodeGraphBuilder::VisitDiv() { BuildBinaryOp(javascript()->Divide()); }
 
 void BytecodeGraphBuilder::VisitMod() {
-  BuildBinaryOp(
-      javascript()->Modulus(GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->Modulus());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseOr() {
-  BuildBinaryOp(javascript()->BitwiseOr(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->BitwiseOr());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseXor() {
-  BuildBinaryOp(javascript()->BitwiseXor(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->BitwiseXor());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseAnd() {
-  BuildBinaryOp(javascript()->BitwiseAnd(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->BitwiseAnd());
 }
 
 void BytecodeGraphBuilder::VisitShiftLeft() {
-  BuildBinaryOp(javascript()->ShiftLeft(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->ShiftLeft());
 }
 
 void BytecodeGraphBuilder::VisitShiftRight() {
-  BuildBinaryOp(javascript()->ShiftRight(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->ShiftRight());
 }
 
 void BytecodeGraphBuilder::VisitShiftRightLogical() {
-  BuildBinaryOp(javascript()->ShiftRightLogical(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->ShiftRightLogical());
 }
 
-void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* js_op) {
+void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
   PrepareEagerCheckpoint();
   Node* left =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
   Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
-  Node* node = NewNode(js_op, left, right);
+
+  Node* node = nullptr;
+  FeedbackSlot slot = feedback_vector()->ToSlot(
+      bytecode_iterator().GetIndexOperand(kBinaryOperationSmiHintIndex));
+  if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+    node = simplified;
+  } else {
+    node = NewNode(op, left, right);
+  }
+
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
@@ -1524,62 +1644,73 @@
 }
 
 void BytecodeGraphBuilder::VisitSubSmi() {
-  BuildBinaryOpWithImmediate(javascript()->Subtract(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->Subtract());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
-  BuildBinaryOpWithImmediate(javascript()->BitwiseOr(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->BitwiseOr());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseAndSmi() {
-  BuildBinaryOpWithImmediate(javascript()->BitwiseAnd(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->BitwiseAnd());
 }
 
 void BytecodeGraphBuilder::VisitShiftLeftSmi() {
-  BuildBinaryOpWithImmediate(javascript()->ShiftLeft(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->ShiftLeft());
 }
 
 void BytecodeGraphBuilder::VisitShiftRightSmi() {
-  BuildBinaryOpWithImmediate(javascript()->ShiftRight(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->ShiftRight());
 }
 
 void BytecodeGraphBuilder::VisitInc() {
   PrepareEagerCheckpoint();
   // Note: Use subtract -1 here instead of add 1 to ensure we always convert to
   // a number, not a string.
-  const Operator* js_op =
-      javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
-  Node* node = NewNode(js_op, environment()->LookupAccumulator(),
-                       jsgraph()->Constant(-1));
+  Node* left = environment()->LookupAccumulator();
+  Node* right = jsgraph()->Constant(-1);
+  const Operator* op = javascript()->Subtract();
+
+  Node* node = nullptr;
+  FeedbackSlot slot = feedback_vector()->ToSlot(
+      bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
+  if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+    node = simplified;
+  } else {
+    node = NewNode(op, left, right);
+  }
+
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitDec() {
   PrepareEagerCheckpoint();
-  const Operator* js_op =
-      javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
-  Node* node = NewNode(js_op, environment()->LookupAccumulator(),
-                       jsgraph()->OneConstant());
+  Node* left = environment()->LookupAccumulator();
+  Node* right = jsgraph()->OneConstant();
+  const Operator* op = javascript()->Subtract();
+
+  Node* node = nullptr;
+  FeedbackSlot slot = feedback_vector()->ToSlot(
+      bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
+  if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+    node = simplified;
+  } else {
+    node = NewNode(op, left, right);
+  }
+
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitLogicalNot() {
   Node* value = environment()->LookupAccumulator();
-  Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
-                       jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+  Node* node = NewNode(simplified()->BooleanNot(), value);
   environment()->BindAccumulator(node);
 }
 
 void BytecodeGraphBuilder::VisitToBooleanLogicalNot() {
   Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
                         environment()->LookupAccumulator());
-  Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
-                       jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+  Node* node = NewNode(simplified()->BooleanNot(), value);
   environment()->BindAccumulator(node);
 }
 
@@ -1607,6 +1738,13 @@
   BuildDelete(LanguageMode::SLOPPY);
 }
 
+void BytecodeGraphBuilder::VisitGetSuperConstructor() {
+  Node* node = NewNode(javascript()->GetSuperConstructor(),
+                       environment()->LookupAccumulator());
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node,
+                              Environment::kAttachFrameState);
+}
+
 void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
   PrepareEagerCheckpoint();
   Node* left =
@@ -1652,8 +1790,30 @@
   BuildCompareOp(javascript()->InstanceOf());
 }
 
+void BytecodeGraphBuilder::VisitTestUndetectable() {
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* node = NewNode(jsgraph()->simplified()->ObjectIsUndetectable(), object);
+  environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitTestNull() {
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* result = NewNode(simplified()->ReferenceEqual(), object,
+                         jsgraph()->NullConstant());
+  environment()->BindAccumulator(result);
+}
+
+void BytecodeGraphBuilder::VisitTestUndefined() {
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* result = NewNode(simplified()->ReferenceEqual(), object,
+                         jsgraph()->UndefinedConstant());
+  environment()->BindAccumulator(result);
+}
+
 void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
-  PrepareEagerCheckpoint();
   Node* value = NewNode(js_op, environment()->LookupAccumulator());
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value,
                               Environment::kAttachFrameState);
@@ -1705,6 +1865,12 @@
   BuildJumpIfNotHole();
 }
 
+void BytecodeGraphBuilder::VisitJumpIfJSReceiver() { BuildJumpIfJSReceiver(); }
+
+void BytecodeGraphBuilder::VisitJumpIfJSReceiverConstant() {
+  BuildJumpIfJSReceiver();
+}
+
 void BytecodeGraphBuilder::VisitJumpIfNull() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
@@ -1729,6 +1895,12 @@
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
+void BytecodeGraphBuilder::VisitSetPendingMessage() {
+  Node* previous_message = NewNode(javascript()->LoadMessage());
+  NewNode(javascript()->StoreMessage(), environment()->LookupAccumulator());
+  environment()->BindAccumulator(previous_message);
+}
+
 void BytecodeGraphBuilder::VisitReturn() {
   BuildLoopExitsForFunctionExit();
   Node* pop_node = jsgraph()->ZeroConstant();
@@ -1739,10 +1911,8 @@
 
 void BytecodeGraphBuilder::VisitDebugger() {
   PrepareEagerCheckpoint();
-  Node* call =
-      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
-  environment()->BindAccumulator(call, Environment::kAttachFrameState);
-  environment()->MarkAllRegistersLive();
+  Node* call = NewNode(javascript()->Debugger());
+  environment()->RecordAfterState(call, Environment::kAttachFrameState);
 }
 
 // We cannot create a graph from the debugger copy of the bytecode array.
@@ -1798,8 +1968,9 @@
   PrepareEagerCheckpoint();
   Node* index =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall), index,
-                  jsgraph()->OneConstant());
+  index = NewNode(
+      simplified()->SpeculativeNumberAdd(NumberOperationHint::kSignedSmall),
+      index, jsgraph()->OneConstant());
   environment()->BindAccumulator(index, Environment::kAttachFrameState);
 }
 
@@ -1866,33 +2037,45 @@
 void BytecodeGraphBuilder::VisitNop() {}
 
 void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
-  if (merge_environments_[current_offset] != nullptr) {
+  auto it = merge_environments_.find(current_offset);
+  if (it != merge_environments_.end()) {
+    mark_as_needing_eager_checkpoint(true);
     if (environment() != nullptr) {
-      merge_environments_[current_offset]->Merge(environment());
+      it->second->Merge(environment());
     }
-    set_environment(merge_environments_[current_offset]);
+    set_environment(it->second);
   }
 }
 
 void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
-  if (branch_analysis()->backward_branches_target(current_offset)) {
-    // Add loop header and store a copy so we can connect merged back
-    // edge inputs to the loop header.
-    merge_environments_[current_offset] = environment()->CopyForLoop();
+  if (bytecode_analysis()->IsLoopHeader(current_offset)) {
+    mark_as_needing_eager_checkpoint(true);
+    const LoopInfo& loop_info =
+        bytecode_analysis()->GetLoopInfoFor(current_offset);
+
+    // Add loop header.
+    environment()->PrepareForLoop(loop_info.assignments());
+
+    BuildOSRLoopEntryPoint(current_offset);
+
+    // Store a copy of the environment so we can connect merged back edge inputs
+    // to the loop header.
+    merge_environments_[current_offset] = environment()->Copy();
   }
 }
 
 void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
   BuildLoopExitsForBranch(target_offset);
-  if (merge_environments_[target_offset] == nullptr) {
+  Environment*& merge_environment = merge_environments_[target_offset];
+  if (merge_environment == nullptr) {
     // Append merge nodes to the environment. We may merge here with another
     // environment. So add a place holder for merge nodes. We may add redundant
     // but will be eliminated in a later pass.
     // TODO(mstarzinger): Be smarter about this!
     NewMerge();
-    merge_environments_[target_offset] = environment();
+    merge_environment = environment();
   } else {
-    merge_environments_[target_offset]->Merge(environment());
+    merge_environment->Merge(environment());
   }
   set_environment(nullptr);
 }
@@ -1903,13 +2086,14 @@
 }
 
 void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
-  if (!osr_ast_id_.IsNone() && osr_ast_id_.ToInt() == current_offset) {
+  DCHECK(bytecode_analysis()->IsLoopHeader(current_offset));
+
+  if (!osr_ast_id_.IsNone() && osr_loop_offset_ == current_offset) {
     // For OSR add a special {OsrLoopEntry} node into the current loop header.
     // It will be turned into a usable entry by the OSR deconstruction.
-    Environment* loop_env = merge_environments_[current_offset];
-    Environment* osr_env = loop_env->CopyForOsrEntry();
+    Environment* osr_env = environment()->Copy();
     osr_env->PrepareForOsrEntry();
-    loop_env->Merge(osr_env);
+    environment()->Merge(osr_env);
   }
 }
 
@@ -1918,9 +2102,11 @@
     // For OSR add an {OsrNormalEntry} as the the top-level environment start.
     // It will be replaced with {Dead} by the OSR deconstruction.
     NewNode(common()->OsrNormalEntry());
-    // Note that the requested OSR entry point must be the target of a backward
-    // branch, otherwise there will not be a proper loop header available.
-    DCHECK(branch_analysis()->backward_branches_target(osr_ast_id_.ToInt()));
+    // Translate the offset of the jump instruction to the jump target offset of
+    // that instruction so that the derived BailoutId points to the loop header.
+    osr_loop_offset_ =
+        bytecode_analysis()->GetLoopOffsetFor(osr_ast_id_.ToInt());
+    DCHECK(bytecode_analysis()->IsLoopHeader(osr_loop_offset_));
   }
 }
 
@@ -1928,17 +2114,20 @@
   int origin_offset = bytecode_iterator().current_offset();
   // Only build loop exits for forward edges.
   if (target_offset > origin_offset) {
-    BuildLoopExitsUntilLoop(loop_analysis()->GetLoopOffsetFor(target_offset));
+    BuildLoopExitsUntilLoop(
+        bytecode_analysis()->GetLoopOffsetFor(target_offset));
   }
 }
 
 void BytecodeGraphBuilder::BuildLoopExitsUntilLoop(int loop_offset) {
   int origin_offset = bytecode_iterator().current_offset();
-  int current_loop = loop_analysis()->GetLoopOffsetFor(origin_offset);
+  int current_loop = bytecode_analysis()->GetLoopOffsetFor(origin_offset);
   while (loop_offset < current_loop) {
     Node* loop_node = merge_environments_[current_loop]->GetControlDependency();
-    environment()->PrepareForLoopExit(loop_node);
-    current_loop = loop_analysis()->GetParentLoopFor(current_loop);
+    const LoopInfo& loop_info =
+        bytecode_analysis()->GetLoopInfoFor(current_loop);
+    environment()->PrepareForLoopExit(loop_node, loop_info.assignments());
+    current_loop = loop_info.parent_offset();
   }
 }
 
@@ -1952,7 +2141,7 @@
 
 void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
   NewBranch(condition);
-  Environment* if_false_environment = environment()->CopyForConditional();
+  Environment* if_false_environment = environment()->Copy();
   NewIfTrue();
   MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
   set_environment(if_false_environment);
@@ -1961,7 +2150,7 @@
 
 void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
   NewBranch(condition);
-  Environment* if_true_environment = environment()->CopyForConditional();
+  Environment* if_true_environment = environment()->Copy();
   NewIfFalse();
   MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
   set_environment(if_true_environment);
@@ -1971,17 +2160,30 @@
 void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
   Node* accumulator = environment()->LookupAccumulator();
   Node* condition =
-      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
-              accumulator, comperand);
+      NewNode(simplified()->ReferenceEqual(), accumulator, comperand);
   BuildJumpIf(condition);
 }
 
 void BytecodeGraphBuilder::BuildJumpIfFalse() {
-  BuildJumpIfNot(environment()->LookupAccumulator());
+  NewBranch(environment()->LookupAccumulator());
+  Environment* if_true_environment = environment()->Copy();
+  environment()->BindAccumulator(jsgraph()->FalseConstant());
+  NewIfFalse();
+  MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+  if_true_environment->BindAccumulator(jsgraph()->TrueConstant());
+  set_environment(if_true_environment);
+  NewIfTrue();
 }
 
 void BytecodeGraphBuilder::BuildJumpIfTrue() {
-  BuildJumpIf(environment()->LookupAccumulator());
+  NewBranch(environment()->LookupAccumulator());
+  Environment* if_false_environment = environment()->Copy();
+  environment()->BindAccumulator(jsgraph()->TrueConstant());
+  NewIfTrue();
+  MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+  if_false_environment->BindAccumulator(jsgraph()->FalseConstant());
+  set_environment(if_false_environment);
+  NewIfFalse();
 }
 
 void BytecodeGraphBuilder::BuildJumpIfToBooleanTrue() {
@@ -2000,12 +2202,17 @@
 
 void BytecodeGraphBuilder::BuildJumpIfNotHole() {
   Node* accumulator = environment()->LookupAccumulator();
-  Node* condition =
-      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
-              accumulator, jsgraph()->TheHoleConstant());
+  Node* condition = NewNode(simplified()->ReferenceEqual(), accumulator,
+                            jsgraph()->TheHoleConstant());
   BuildJumpIfNot(condition);
 }
 
+void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
+  Node* accumulator = environment()->LookupAccumulator();
+  Node* condition = NewNode(simplified()->ObjectIsReceiver(), accumulator);
+  BuildJumpIf(condition);
+}
+
 Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
   if (size > input_buffer_size_) {
     size = size + kInputBufferSizeIncrement + input_buffer_size_;
@@ -2093,7 +2300,7 @@
       int handler_offset = exception_handlers_.top().handler_offset_;
       int context_index = exception_handlers_.top().context_register_;
       interpreter::Register context_register(context_index);
-      Environment* success_env = environment()->CopyForConditional();
+      Environment* success_env = environment()->Copy();
       const Operator* op = common()->IfException();
       Node* effect = environment()->GetEffectDependency();
       Node* on_exception = graph()->NewNode(op, effect, result);
@@ -2111,6 +2318,10 @@
       Node* on_success = graph()->NewNode(if_success, result);
       environment()->UpdateControlDependency(on_success);
     }
+    // Ensure checkpoints are created after operations with side-effects.
+    if (has_effect && !result->op()->HasProperty(Operator::kNoWrite)) {
+      mark_as_needing_eager_checkpoint(true);
+    }
   }
 
   return result;
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index 6994226..41fcf68 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -5,12 +5,10 @@
 #ifndef V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
 #define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
 
-#include "src/compiler/bytecode-branch-analysis.h"
-#include "src/compiler/bytecode-loop-analysis.h"
+#include "src/compiler/bytecode-analysis.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/liveness-analyzer.h"
 #include "src/compiler/state-values-utils.h"
-#include "src/compiler/type-hint-analyzer.h"
 #include "src/interpreter/bytecode-array-iterator.h"
 #include "src/interpreter/bytecode-flags.h"
 #include "src/interpreter/bytecodes.h"
@@ -18,9 +16,6 @@
 
 namespace v8 {
 namespace internal {
-
-class CompilationInfo;
-
 namespace compiler {
 
 class SourcePositionTable;
@@ -29,8 +24,10 @@
 // interpreter bytecodes.
 class BytecodeGraphBuilder {
  public:
-  BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
-                       JSGraph* jsgraph, float invocation_frequency,
+  BytecodeGraphBuilder(Zone* local_zone, Handle<SharedFunctionInfo> shared,
+                       Handle<FeedbackVector> feedback_vector,
+                       BailoutId osr_ast_id, JSGraph* jsgraph,
+                       float invocation_frequency,
                        SourcePositionTable* source_positions,
                        int inlining_id = SourcePosition::kNotInlined);
 
@@ -114,9 +111,14 @@
 
   Node* ProcessCallArguments(const Operator* call_op, Node* callee,
                              interpreter::Register receiver, size_t arity);
-  Node* ProcessCallNewArguments(const Operator* call_new_op, Node* callee,
-                                Node* new_target,
-                                interpreter::Register first_arg, size_t arity);
+  Node* ProcessConstructArguments(const Operator* call_new_op, Node* callee,
+                                  Node* new_target,
+                                  interpreter::Register first_arg,
+                                  size_t arity);
+  Node* ProcessConstructWithSpreadArguments(const Operator* op, Node* callee,
+                                            Node* new_target,
+                                            interpreter::Register first_arg,
+                                            size_t arity);
   Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
                                     interpreter::Register first_arg,
                                     size_t arity);
@@ -131,14 +133,18 @@
   // Conceptually this frame state is "after" a given operation.
   void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
 
-  // Computes register liveness and replaces dead ones in frame states with the
-  // undefined values.
-  void ClearNonLiveSlotsInFrameStates();
-
   void BuildCreateArguments(CreateArgumentsType type);
-  Node* BuildLoadGlobal(uint32_t feedback_slot_index, TypeofMode typeof_mode);
+  Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
+                        TypeofMode typeof_mode);
   void BuildStoreGlobal(LanguageMode language_mode);
-  void BuildNamedStore(LanguageMode language_mode);
+
+  enum class StoreMode {
+    // Check the prototype chain before storing.
+    kNormal,
+    // Store value to the receiver without checking the prototype chain.
+    kOwn,
+  };
+  void BuildNamedStore(LanguageMode language_mode, StoreMode store_mode);
   void BuildKeyedStore(LanguageMode language_mode);
   void BuildLdaLookupSlot(TypeofMode typeof_mode);
   void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
@@ -146,7 +152,6 @@
   void BuildStaLookupSlot(LanguageMode language_mode);
   void BuildCall(TailCallMode tail_call_mode,
                  ConvertReceiverMode receiver_hint);
-  void BuildThrow();
   void BuildBinaryOp(const Operator* op);
   void BuildBinaryOpWithImmediate(const Operator* op);
   void BuildCompareOp(const Operator* op);
@@ -156,6 +161,13 @@
   void BuildForInNext();
   void BuildInvokeIntrinsic();
 
+  // Optional early lowering to the simplified operator level. Returns the node
+  // representing the lowered operation or {nullptr} if no lowering available.
+  // Note that the result has already been wired into the environment just like
+  // any other invocation of {NewNode} would do.
+  Node* TryBuildSimplifiedBinaryOp(const Operator* op, Node* left, Node* right,
+                                   FeedbackSlot slot);
+
   // Check the context chain for extensions, for lookup fast paths.
   Environment* CheckContextExtensions(uint32_t depth);
 
@@ -181,6 +193,7 @@
   void BuildJumpIfToBooleanTrue();
   void BuildJumpIfToBooleanFalse();
   void BuildJumpIfNotHole();
+  void BuildJumpIfJSReceiver();
 
   // Simulates control flow by forward-propagating environments.
   void MergeIntoSuccessorEnvironment(int target_offset);
@@ -203,6 +216,10 @@
   // Simulates entry and exit of exception handlers.
   void EnterAndExitExceptionHandlers(int current_offset);
 
+  // Update the current position of the {SourcePositionTable} to that of the
+  // bytecode at {offset}, if any.
+  void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
+
   // Growth increment for the temporary buffer used to construct input lists to
   // new nodes.
   static const int kInputBufferSizeIncrement = 64;
@@ -224,6 +241,9 @@
   Zone* graph_zone() const { return graph()->zone(); }
   JSGraph* jsgraph() const { return jsgraph_; }
   JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
+  SimplifiedOperatorBuilder* simplified() const {
+    return jsgraph_->simplified();
+  }
   Zone* local_zone() const { return local_zone_; }
   const Handle<BytecodeArray>& bytecode_array() const {
     return bytecode_array_;
@@ -231,7 +251,7 @@
   const Handle<HandlerTable>& exception_handler_table() const {
     return exception_handler_table_;
   }
-  const Handle<TypeFeedbackVector>& feedback_vector() const {
+  const Handle<FeedbackVector>& feedback_vector() const {
     return feedback_vector_;
   }
   const FrameStateFunctionInfo* frame_state_function_info() const {
@@ -247,24 +267,17 @@
     bytecode_iterator_ = bytecode_iterator;
   }
 
-  const BytecodeBranchAnalysis* branch_analysis() const {
-    return branch_analysis_;
+  const BytecodeAnalysis* bytecode_analysis() const {
+    return bytecode_analysis_;
   }
 
-  void set_branch_analysis(const BytecodeBranchAnalysis* branch_analysis) {
-    branch_analysis_ = branch_analysis;
+  void set_bytecode_analysis(const BytecodeAnalysis* bytecode_analysis) {
+    bytecode_analysis_ = bytecode_analysis;
   }
 
-  const BytecodeLoopAnalysis* loop_analysis() const { return loop_analysis_; }
-
-  void set_loop_analysis(const BytecodeLoopAnalysis* loop_analysis) {
-    loop_analysis_ = loop_analysis;
-  }
-
-  LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
-
-  bool IsLivenessAnalysisEnabled() const {
-    return this->is_liveness_analysis_enabled_;
+  bool needs_eager_checkpoint() const { return needs_eager_checkpoint_; }
+  void mark_as_needing_eager_checkpoint(bool value) {
+    needs_eager_checkpoint_ = value;
   }
 
 #define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
@@ -276,13 +289,13 @@
   float const invocation_frequency_;
   Handle<BytecodeArray> bytecode_array_;
   Handle<HandlerTable> exception_handler_table_;
-  Handle<TypeFeedbackVector> feedback_vector_;
+  Handle<FeedbackVector> feedback_vector_;
   const FrameStateFunctionInfo* frame_state_function_info_;
   const interpreter::BytecodeArrayIterator* bytecode_iterator_;
-  const BytecodeBranchAnalysis* branch_analysis_;
-  const BytecodeLoopAnalysis* loop_analysis_;
+  const BytecodeAnalysis* bytecode_analysis_;
   Environment* environment_;
   BailoutId osr_ast_id_;
+  int osr_loop_offset_;
 
   // Merge environments are snapshots of the environment at points where the
   // control flow merges. This models a forward data flow propagation of all
@@ -297,6 +310,11 @@
   int input_buffer_size_;
   Node** input_buffer_;
 
+  // Optimization to only create checkpoints when the current position in the
+  // control-flow is not effect-dominated by another checkpoint already. All
+  // operations that do not have observable side-effects can be re-evaluated.
+  bool needs_eager_checkpoint_;
+
   // Nodes representing values in the activation record.
   SetOncePointer<Node> function_context_;
   SetOncePointer<Node> function_closure_;
@@ -305,22 +323,13 @@
   // Control nodes that exit the function body.
   ZoneVector<Node*> exit_controls_;
 
-  bool const is_liveness_analysis_enabled_;
-
   StateValuesCache state_values_cache_;
 
-  // Analyzer of register liveness.
-  LivenessAnalyzer liveness_analyzer_;
-
-  // The Turbofan source position table, to be populated.
+  // The source position table, to be populated.
   SourcePositionTable* source_positions_;
 
   SourcePosition const start_position_;
 
-  // Update [source_positions_]'s current position to that of the bytecode at
-  // [offset], if any.
-  void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
-
   static int const kBinaryOperationHintIndex = 1;
   static int const kCountOperationHintIndex = 0;
   static int const kBinaryOperationSmiHintIndex = 2;
diff --git a/src/compiler/bytecode-liveness-map.cc b/src/compiler/bytecode-liveness-map.cc
new file mode 100644
index 0000000..ba98dec
--- /dev/null
+++ b/src/compiler/bytecode-liveness-map.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-liveness-map.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BytecodeLiveness::BytecodeLiveness(int register_count, Zone* zone)
+    : in(new (zone) BytecodeLivenessState(register_count, zone)),
+      out(new (zone) BytecodeLivenessState(register_count, zone)) {}
+
+BytecodeLivenessMap::BytecodeLivenessMap(int bytecode_size, Zone* zone)
+    : liveness_map_(base::bits::RoundUpToPowerOfTwo32(bytecode_size / 4 + 1),
+                    base::KeyEqualityMatcher<int>(),
+                    ZoneAllocationPolicy(zone)) {}
+
+uint32_t OffsetHash(int offset) { return offset; }
+
+BytecodeLiveness& BytecodeLivenessMap::InitializeLiveness(int offset,
+                                                          int register_count,
+                                                          Zone* zone) {
+  return liveness_map_
+      .LookupOrInsert(offset, OffsetHash(offset),
+                      [&]() { return BytecodeLiveness(register_count, zone); },
+                      ZoneAllocationPolicy(zone))
+      ->value;
+}
+
+BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) {
+  return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
+}
+
+const BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) const {
+  return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/bytecode-liveness-map.h b/src/compiler/bytecode-liveness-map.h
new file mode 100644
index 0000000..03251f1
--- /dev/null
+++ b/src/compiler/bytecode-liveness-map.h
@@ -0,0 +1,119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
+#define V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
+
+#include "src/base/hashmap.h"
+#include "src/bit-vector.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class BytecodeLivenessState : public ZoneObject {
+ public:
+  BytecodeLivenessState(int register_count, Zone* zone)
+      : bit_vector_(register_count + 1, zone) {}
+
+  const BitVector& bit_vector() const { return bit_vector_; }
+
+  BitVector& bit_vector() { return bit_vector_; }
+
+  bool RegisterIsLive(int index) const {
+    DCHECK_GE(index, 0);
+    DCHECK_LT(index, bit_vector_.length() - 1);
+    return bit_vector_.Contains(index);
+  }
+
+  bool AccumulatorIsLive() const {
+    return bit_vector_.Contains(bit_vector_.length() - 1);
+  }
+
+  bool Equals(const BytecodeLivenessState& other) const {
+    return bit_vector_.Equals(other.bit_vector_);
+  }
+
+  void MarkRegisterLive(int index) {
+    DCHECK_GE(index, 0);
+    DCHECK_LT(index, bit_vector_.length() - 1);
+    bit_vector_.Add(index);
+  }
+
+  void MarkRegisterDead(int index) {
+    DCHECK_GE(index, 0);
+    DCHECK_LT(index, bit_vector_.length() - 1);
+    bit_vector_.Remove(index);
+  }
+
+  void MarkAccumulatorLive() { bit_vector_.Add(bit_vector_.length() - 1); }
+
+  void MarkAccumulatorDead() { bit_vector_.Remove(bit_vector_.length() - 1); }
+
+  void MarkAllLive() { bit_vector_.AddAll(); }
+
+  void Union(const BytecodeLivenessState& other) {
+    bit_vector_.Union(other.bit_vector_);
+  }
+
+  bool UnionIsChanged(const BytecodeLivenessState& other) {
+    return bit_vector_.UnionIsChanged(other.bit_vector_);
+  }
+
+  void CopyFrom(const BytecodeLivenessState& other) {
+    bit_vector_.CopyFrom(other.bit_vector_);
+  }
+
+ private:
+  BitVector bit_vector_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeLivenessState);
+};
+
+struct BytecodeLiveness {
+  BytecodeLivenessState* in;
+  BytecodeLivenessState* out;
+
+  BytecodeLiveness(int register_count, Zone* zone);
+};
+
+class V8_EXPORT_PRIVATE BytecodeLivenessMap {
+ public:
+  BytecodeLivenessMap(int size, Zone* zone);
+
+  BytecodeLiveness& InitializeLiveness(int offset, int register_count,
+                                       Zone* zone);
+
+  BytecodeLiveness& GetLiveness(int offset);
+  const BytecodeLiveness& GetLiveness(int offset) const;
+
+  BytecodeLivenessState* GetInLiveness(int offset) {
+    return GetLiveness(offset).in;
+  }
+  const BytecodeLivenessState* GetInLiveness(int offset) const {
+    return GetLiveness(offset).in;
+  }
+
+  BytecodeLivenessState* GetOutLiveness(int offset) {
+    return GetLiveness(offset).out;
+  }
+  const BytecodeLivenessState* GetOutLiveness(int offset) const {
+    return GetLiveness(offset).out;
+  }
+
+ private:
+  base::TemplateHashMapImpl<int, BytecodeLiveness,
+                            base::KeyEqualityMatcher<int>, ZoneAllocationPolicy>
+      liveness_map_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
diff --git a/src/compiler/bytecode-loop-analysis.cc b/src/compiler/bytecode-loop-analysis.cc
deleted file mode 100644
index 03c11f7..0000000
--- a/src/compiler/bytecode-loop-analysis.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/bytecode-loop-analysis.h"
-
-#include "src/compiler/bytecode-branch-analysis.h"
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-BytecodeLoopAnalysis::BytecodeLoopAnalysis(
-    Handle<BytecodeArray> bytecode_array,
-    const BytecodeBranchAnalysis* branch_analysis, Zone* zone)
-    : bytecode_array_(bytecode_array),
-      branch_analysis_(branch_analysis),
-      zone_(zone),
-      current_loop_offset_(-1),
-      found_current_backedge_(false),
-      backedge_to_header_(zone),
-      loop_header_to_parent_(zone) {}
-
-void BytecodeLoopAnalysis::Analyze() {
-  current_loop_offset_ = -1;
-  found_current_backedge_ = false;
-  interpreter::BytecodeArrayIterator iterator(bytecode_array());
-  while (!iterator.done()) {
-    interpreter::Bytecode bytecode = iterator.current_bytecode();
-    int current_offset = iterator.current_offset();
-    if (branch_analysis_->backward_branches_target(current_offset)) {
-      AddLoopEntry(current_offset);
-    } else if (interpreter::Bytecodes::IsJump(bytecode)) {
-      AddBranch(current_offset, iterator.GetJumpTargetOffset());
-    }
-    iterator.Advance();
-  }
-}
-
-void BytecodeLoopAnalysis::AddLoopEntry(int entry_offset) {
-  if (found_current_backedge_) {
-    // We assume that all backedges of a loop must occur together and before
-    // another loop entry or an outer loop backedge.
-    // This is guaranteed by the invariants from AddBranch, such that every
-    // backedge must either go to the current loop or be the first of the
-    // backedges to the parent loop.
-    // Thus here, the current loop actually ended before and we have a loop
-    // with the same parent.
-    current_loop_offset_ = loop_header_to_parent_[current_loop_offset_];
-    found_current_backedge_ = false;
-  }
-  loop_header_to_parent_[entry_offset] = current_loop_offset_;
-  current_loop_offset_ = entry_offset;
-}
-
-void BytecodeLoopAnalysis::AddBranch(int origin_offset, int target_offset) {
-  // If this is a backedge, record it.
-  if (target_offset < origin_offset) {
-    backedge_to_header_[origin_offset] = target_offset;
-    // Check whether this is actually a backedge of the outer loop and we have
-    // already finished the current loop.
-    if (target_offset < current_loop_offset_) {
-      DCHECK(found_current_backedge_);
-      int parent_offset = loop_header_to_parent_[current_loop_offset_];
-      DCHECK_EQ(target_offset, parent_offset);
-      current_loop_offset_ = parent_offset;
-    } else {
-      DCHECK_EQ(target_offset, current_loop_offset_);
-      found_current_backedge_ = true;
-    }
-  }
-}
-
-int BytecodeLoopAnalysis::GetLoopOffsetFor(int offset) const {
-  auto next_backedge = backedge_to_header_.lower_bound(offset);
-  // If there is no next backedge => offset is not in a loop.
-  if (next_backedge == backedge_to_header_.end()) {
-    return -1;
-  }
-  // If the header preceeds the offset, it is the backedge of the containing
-  // loop.
-  if (next_backedge->second <= offset) {
-    return next_backedge->second;
-  }
-  // Otherwise there is a nested loop after this offset. We just return the
-  // parent of the next nested loop.
-  return loop_header_to_parent_.upper_bound(offset)->second;
-}
-
-int BytecodeLoopAnalysis::GetParentLoopFor(int header_offset) const {
-  auto parent = loop_header_to_parent_.find(header_offset);
-  DCHECK(parent != loop_header_to_parent_.end());
-  return parent->second;
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/bytecode-loop-analysis.h b/src/compiler/bytecode-loop-analysis.h
deleted file mode 100644
index 1a86d7b..0000000
--- a/src/compiler/bytecode-loop-analysis.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
-#define V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
-
-#include "src/handles.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-
-namespace compiler {
-
-class BytecodeBranchAnalysis;
-
-class BytecodeLoopAnalysis BASE_EMBEDDED {
- public:
-  BytecodeLoopAnalysis(Handle<BytecodeArray> bytecode_array,
-                       const BytecodeBranchAnalysis* branch_analysis,
-                       Zone* zone);
-
-  // Analyze the bytecodes to find the branch sites and their
-  // targets. No other methods in this class return valid information
-  // until this has been called.
-  void Analyze();
-
-  // Get the loop header offset of the containing loop for arbitrary
-  // {offset}, or -1 if the {offset} is not inside any loop.
-  int GetLoopOffsetFor(int offset) const;
-  // Gets the loop header offset of the parent loop of the loop header
-  // at {header_offset}, or -1 for outer-most loops.
-  int GetParentLoopFor(int header_offset) const;
-
- private:
-  void AddLoopEntry(int entry_offset);
-  void AddBranch(int origin_offset, int target_offset);
-
-  Zone* zone() const { return zone_; }
-  Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
-  Handle<BytecodeArray> bytecode_array_;
-  const BytecodeBranchAnalysis* branch_analysis_;
-  Zone* zone_;
-
-  int current_loop_offset_;
-  bool found_current_backedge_;
-
-  // Map from the offset of a backedge jump to the offset of the corresponding
-  // loop header. There might be multiple backedges for do-while loops.
-  ZoneMap<int, int> backedge_to_header_;
-  // Map from the offset of a loop header to the offset of its parent's loop
-  // header. This map will have as many entries as there are loops in the
-  // function.
-  ZoneMap<int, int> loop_header_to_parent_;
-
-  DISALLOW_COPY_AND_ASSIGN(BytecodeLoopAnalysis);
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc
index 3431098..1ace7da 100644
--- a/src/compiler/code-assembler.cc
+++ b/src/compiler/code-assembler.cc
@@ -19,18 +19,27 @@
 #include "src/interpreter/bytecodes.h"
 #include "src/machine-type.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 #include "src/utils.h"
 #include "src/zone/zone.h"
 
+#define REPEAT_1_TO_2(V, T) V(T) V(T, T)
+#define REPEAT_1_TO_3(V, T) REPEAT_1_TO_2(V, T) V(T, T, T)
+#define REPEAT_1_TO_4(V, T) REPEAT_1_TO_3(V, T) V(T, T, T, T)
+#define REPEAT_1_TO_5(V, T) REPEAT_1_TO_4(V, T) V(T, T, T, T, T)
+#define REPEAT_1_TO_6(V, T) REPEAT_1_TO_5(V, T) V(T, T, T, T, T, T)
+#define REPEAT_1_TO_7(V, T) REPEAT_1_TO_6(V, T) V(T, T, T, T, T, T, T)
+#define REPEAT_1_TO_8(V, T) REPEAT_1_TO_7(V, T) V(T, T, T, T, T, T, T, T)
+#define REPEAT_1_TO_9(V, T) REPEAT_1_TO_8(V, T) V(T, T, T, T, T, T, T, T, T)
+
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
-                             const CallInterfaceDescriptor& descriptor,
-                             Code::Flags flags, const char* name,
-                             size_t result_size)
-    : CodeAssembler(
+CodeAssemblerState::CodeAssemblerState(
+    Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+    Code::Flags flags, const char* name, size_t result_size)
+    : CodeAssemblerState(
           isolate, zone,
           Linkage::GetStubCallDescriptor(
               isolate, zone, descriptor, descriptor.GetStackParameterCount(),
@@ -38,19 +47,20 @@
               MachineType::AnyTagged(), result_size),
           flags, name) {}
 
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
-                             Code::Flags flags, const char* name)
-    : CodeAssembler(isolate, zone,
-                    Linkage::GetJSCallDescriptor(
-                        zone, false, parameter_count,
-                        Code::ExtractKindFromFlags(flags) == Code::BUILTIN
-                            ? CallDescriptor::kPushArgumentCount
-                            : CallDescriptor::kNoFlags),
-                    flags, name) {}
+CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
+                                       int parameter_count, Code::Flags flags,
+                                       const char* name)
+    : CodeAssemblerState(isolate, zone,
+                         Linkage::GetJSCallDescriptor(
+                             zone, false, parameter_count,
+                             Code::ExtractKindFromFlags(flags) == Code::BUILTIN
+                                 ? CallDescriptor::kPushArgumentCount
+                                 : CallDescriptor::kNoFlags),
+                         flags, name) {}
 
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
-                             CallDescriptor* call_descriptor, Code::Flags flags,
-                             const char* name)
+CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
+                                       CallDescriptor* call_descriptor,
+                                       Code::Flags flags, const char* name)
     : raw_assembler_(new RawMachineAssembler(
           isolate, new (zone) Graph(zone), call_descriptor,
           MachineType::PointerRepresentation(),
@@ -61,56 +71,109 @@
       code_generated_(false),
       variables_(zone) {}
 
+CodeAssemblerState::~CodeAssemblerState() {}
+
+int CodeAssemblerState::parameter_count() const {
+  return static_cast<int>(raw_assembler_->call_descriptor()->ParameterCount());
+}
+
 CodeAssembler::~CodeAssembler() {}
 
-void CodeAssembler::CallPrologue() {}
+class BreakOnNodeDecorator final : public GraphDecorator {
+ public:
+  explicit BreakOnNodeDecorator(NodeId node_id) : node_id_(node_id) {}
 
-void CodeAssembler::CallEpilogue() {}
+  void Decorate(Node* node) final {
+    if (node->id() == node_id_) {
+      base::OS::DebugBreak();
+    }
+  }
 
-Handle<Code> CodeAssembler::GenerateCode() {
-  DCHECK(!code_generated_);
+ private:
+  NodeId node_id_;
+};
 
-  Schedule* schedule = raw_assembler_->Export();
+void CodeAssembler::BreakOnNode(int node_id) {
+  Graph* graph = raw_assembler()->graph();
+  Zone* zone = graph->zone();
+  GraphDecorator* decorator =
+      new (zone) BreakOnNodeDecorator(static_cast<NodeId>(node_id));
+  graph->AddDecorator(decorator);
+}
+
+void CodeAssembler::RegisterCallGenerationCallbacks(
+    const CodeAssemblerCallback& call_prologue,
+    const CodeAssemblerCallback& call_epilogue) {
+  // The callback can be registered only once.
+  DCHECK(!state_->call_prologue_);
+  DCHECK(!state_->call_epilogue_);
+  state_->call_prologue_ = call_prologue;
+  state_->call_epilogue_ = call_epilogue;
+}
+
+void CodeAssembler::UnregisterCallGenerationCallbacks() {
+  state_->call_prologue_ = nullptr;
+  state_->call_epilogue_ = nullptr;
+}
+
+void CodeAssembler::CallPrologue() {
+  if (state_->call_prologue_) {
+    state_->call_prologue_();
+  }
+}
+
+void CodeAssembler::CallEpilogue() {
+  if (state_->call_epilogue_) {
+    state_->call_epilogue_();
+  }
+}
+
+// static
+Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
+  DCHECK(!state->code_generated_);
+
+  RawMachineAssembler* rasm = state->raw_assembler_.get();
+  Schedule* schedule = rasm->Export();
   Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      isolate(), raw_assembler_->call_descriptor(), raw_assembler_->graph(),
-      schedule, flags_, name_);
+      rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
+      state->flags_, state->name_);
 
-  code_generated_ = true;
+  state->code_generated_ = true;
   return code;
 }
 
-bool CodeAssembler::Is64() const { return raw_assembler_->machine()->Is64(); }
+bool CodeAssembler::Is64() const { return raw_assembler()->machine()->Is64(); }
 
 bool CodeAssembler::IsFloat64RoundUpSupported() const {
-  return raw_assembler_->machine()->Float64RoundUp().IsSupported();
+  return raw_assembler()->machine()->Float64RoundUp().IsSupported();
 }
 
 bool CodeAssembler::IsFloat64RoundDownSupported() const {
-  return raw_assembler_->machine()->Float64RoundDown().IsSupported();
+  return raw_assembler()->machine()->Float64RoundDown().IsSupported();
 }
 
 bool CodeAssembler::IsFloat64RoundTiesEvenSupported() const {
-  return raw_assembler_->machine()->Float64RoundTiesEven().IsSupported();
+  return raw_assembler()->machine()->Float64RoundTiesEven().IsSupported();
 }
 
 bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
-  return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
+  return raw_assembler()->machine()->Float64RoundTruncate().IsSupported();
 }
 
 Node* CodeAssembler::Int32Constant(int32_t value) {
-  return raw_assembler_->Int32Constant(value);
+  return raw_assembler()->Int32Constant(value);
 }
 
 Node* CodeAssembler::Int64Constant(int64_t value) {
-  return raw_assembler_->Int64Constant(value);
+  return raw_assembler()->Int64Constant(value);
 }
 
 Node* CodeAssembler::IntPtrConstant(intptr_t value) {
-  return raw_assembler_->IntPtrConstant(value);
+  return raw_assembler()->IntPtrConstant(value);
 }
 
 Node* CodeAssembler::NumberConstant(double value) {
-  return raw_assembler_->NumberConstant(value);
+  return raw_assembler()->NumberConstant(value);
 }
 
 Node* CodeAssembler::SmiConstant(Smi* value) {
@@ -122,19 +185,23 @@
 }
 
 Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
-  return raw_assembler_->HeapConstant(object);
+  return raw_assembler()->HeapConstant(object);
+}
+
+Node* CodeAssembler::CStringConstant(const char* str) {
+  return HeapConstant(factory()->NewStringFromAsciiChecked(str, TENURED));
 }
 
 Node* CodeAssembler::BooleanConstant(bool value) {
-  return raw_assembler_->BooleanConstant(value);
+  return raw_assembler()->BooleanConstant(value);
 }
 
 Node* CodeAssembler::ExternalConstant(ExternalReference address) {
-  return raw_assembler_->ExternalConstant(address);
+  return raw_assembler()->ExternalConstant(address);
 }
 
 Node* CodeAssembler::Float64Constant(double value) {
-  return raw_assembler_->Float64Constant(value);
+  return raw_assembler()->Float64Constant(value);
 }
 
 Node* CodeAssembler::NaNConstant() {
@@ -174,24 +241,48 @@
 }
 
 bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
+  if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned ||
+      node->opcode() == IrOpcode::kBitcastWordToTagged) {
+    node = node->InputAt(0);
+  }
   IntPtrMatcher m(node);
   if (m.HasValue()) out_value = m.Value();
   return m.HasValue();
 }
 
 Node* CodeAssembler::Parameter(int value) {
-  return raw_assembler_->Parameter(value);
+  return raw_assembler()->Parameter(value);
+}
+
+Node* CodeAssembler::GetJSContextParameter() {
+  CallDescriptor* desc = raw_assembler()->call_descriptor();
+  DCHECK(desc->IsJSFunctionCall());
+  return Parameter(Linkage::GetJSCallContextParamIndex(
+      static_cast<int>(desc->JSParameterCount())));
 }
 
 void CodeAssembler::Return(Node* value) {
-  return raw_assembler_->Return(value);
+  return raw_assembler()->Return(value);
+}
+
+void CodeAssembler::Return(Node* value1, Node* value2) {
+  return raw_assembler()->Return(value1, value2);
+}
+
+void CodeAssembler::Return(Node* value1, Node* value2, Node* value3) {
+  return raw_assembler()->Return(value1, value2, value3);
 }
 
 void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
-  return raw_assembler_->PopAndReturn(pop, value);
+  return raw_assembler()->PopAndReturn(pop, value);
 }
 
-void CodeAssembler::DebugBreak() { raw_assembler_->DebugBreak(); }
+void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
+
+void CodeAssembler::Unreachable() {
+  DebugBreak();
+  raw_assembler()->Unreachable();
+}
 
 void CodeAssembler::Comment(const char* format, ...) {
   if (!FLAG_code_comments) return;
@@ -210,81 +301,118 @@
   MemCopy(copy + prefix_len, builder.Finalize(), length);
   copy[0] = ';';
   copy[1] = ' ';
-  raw_assembler_->Comment(copy);
+  raw_assembler()->Comment(copy);
 }
 
-void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
+void CodeAssembler::Bind(Label* label) { return label->Bind(); }
 
 Node* CodeAssembler::LoadFramePointer() {
-  return raw_assembler_->LoadFramePointer();
+  return raw_assembler()->LoadFramePointer();
 }
 
 Node* CodeAssembler::LoadParentFramePointer() {
-  return raw_assembler_->LoadParentFramePointer();
+  return raw_assembler()->LoadParentFramePointer();
 }
 
 Node* CodeAssembler::LoadStackPointer() {
-  return raw_assembler_->LoadStackPointer();
+  return raw_assembler()->LoadStackPointer();
 }
 
 #define DEFINE_CODE_ASSEMBLER_BINARY_OP(name)   \
   Node* CodeAssembler::name(Node* a, Node* b) { \
-    return raw_assembler_->name(a, b);          \
+    return raw_assembler()->name(a, b);         \
   }
 CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
 #undef DEFINE_CODE_ASSEMBLER_BINARY_OP
 
+Node* CodeAssembler::IntPtrAdd(Node* left, Node* right) {
+  intptr_t left_constant;
+  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  intptr_t right_constant;
+  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  if (is_left_constant) {
+    if (is_right_constant) {
+      return IntPtrConstant(left_constant + right_constant);
+    }
+    if (left_constant == 0) {
+      return right;
+    }
+  } else if (is_right_constant) {
+    if (right_constant == 0) {
+      return left;
+    }
+  }
+  return raw_assembler()->IntPtrAdd(left, right);
+}
+
+Node* CodeAssembler::IntPtrSub(Node* left, Node* right) {
+  intptr_t left_constant;
+  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  intptr_t right_constant;
+  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  if (is_left_constant) {
+    if (is_right_constant) {
+      return IntPtrConstant(left_constant - right_constant);
+    }
+  } else if (is_right_constant) {
+    if (right_constant == 0) {
+      return left;
+    }
+  }
+  return raw_assembler()->IntPtrSub(left, right);
+}
+
 Node* CodeAssembler::WordShl(Node* value, int shift) {
-  return (shift != 0) ? raw_assembler_->WordShl(value, IntPtrConstant(shift))
+  return (shift != 0) ? raw_assembler()->WordShl(value, IntPtrConstant(shift))
                       : value;
 }
 
 Node* CodeAssembler::WordShr(Node* value, int shift) {
-  return (shift != 0) ? raw_assembler_->WordShr(value, IntPtrConstant(shift))
+  return (shift != 0) ? raw_assembler()->WordShr(value, IntPtrConstant(shift))
                       : value;
 }
 
 Node* CodeAssembler::Word32Shr(Node* value, int shift) {
-  return (shift != 0) ? raw_assembler_->Word32Shr(value, Int32Constant(shift))
+  return (shift != 0) ? raw_assembler()->Word32Shr(value, Int32Constant(shift))
                       : value;
 }
 
 Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
-  if (raw_assembler_->machine()->Is64()) {
-    value = raw_assembler_->ChangeUint32ToUint64(value);
+  if (raw_assembler()->machine()->Is64()) {
+    value = raw_assembler()->ChangeUint32ToUint64(value);
   }
   return value;
 }
 
 Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
-  if (raw_assembler_->machine()->Is64()) {
-    value = raw_assembler_->ChangeInt32ToInt64(value);
+  if (raw_assembler()->machine()->Is64()) {
+    value = raw_assembler()->ChangeInt32ToInt64(value);
   }
   return value;
 }
 
 Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
-  if (raw_assembler_->machine()->Is64()) {
-    return raw_assembler_->RoundInt64ToFloat64(value);
+  if (raw_assembler()->machine()->Is64()) {
+    return raw_assembler()->RoundInt64ToFloat64(value);
   }
-  return raw_assembler_->ChangeInt32ToFloat64(value);
+  return raw_assembler()->ChangeInt32ToFloat64(value);
 }
 
 #define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
-  Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
+  Node* CodeAssembler::name(Node* a) { return raw_assembler()->name(a); }
 CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
 #undef DEFINE_CODE_ASSEMBLER_UNARY_OP
 
 Node* CodeAssembler::Load(MachineType rep, Node* base) {
-  return raw_assembler_->Load(rep, base);
+  return raw_assembler()->Load(rep, base);
 }
 
-Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
-  return raw_assembler_->Load(rep, base, index);
+Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset) {
+  return raw_assembler()->Load(rep, base, offset);
 }
 
-Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* index) {
-  return raw_assembler_->AtomicLoad(rep, base, index);
+Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
+  return raw_assembler()->AtomicLoad(rep, base, offset);
 }
 
 Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
@@ -303,28 +431,35 @@
               IntPtrConstant(root_index * kPointerSize));
 }
 
-Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
-  return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+Node* CodeAssembler::Store(Node* base, Node* value) {
+  return raw_assembler()->Store(MachineRepresentation::kTagged, base, value,
+                                kFullWriteBarrier);
 }
 
-Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index,
-                           Node* value) {
-  return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+Node* CodeAssembler::Store(Node* base, Node* offset, Node* value) {
+  return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
+                                value, kFullWriteBarrier);
+}
+
+Node* CodeAssembler::StoreWithMapWriteBarrier(Node* base, Node* offset,
+                                              Node* value) {
+  return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
+                                value, kMapWriteBarrier);
 }
 
 Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
                                          Node* value) {
-  return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+  return raw_assembler()->Store(rep, base, value, kNoWriteBarrier);
 }
 
 Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
-                                         Node* index, Node* value) {
-  return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+                                         Node* offset, Node* value) {
+  return raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
 }
 
 Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
-                                 Node* index, Node* value) {
-  return raw_assembler_->AtomicStore(rep, base, index, value);
+                                 Node* offset, Node* value) {
+  return raw_assembler()->AtomicStore(rep, base, offset, value);
 }
 
 Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
@@ -336,11 +471,11 @@
 }
 
 Node* CodeAssembler::Retain(Node* value) {
-  return raw_assembler_->Retain(value);
+  return raw_assembler()->Retain(value);
 }
 
 Node* CodeAssembler::Projection(int index, Node* value) {
-  return raw_assembler_->Projection(index, value);
+  return raw_assembler()->Projection(index, value);
 }
 
 void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
@@ -350,11 +485,11 @@
   exception.MergeVariables();
   DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
 
-  raw_assembler_->Continuations(node, success.label_, exception.label_);
+  raw_assembler()->Continuations(node, success.label_, exception.label_);
 
   Bind(&exception);
-  const Operator* op = raw_assembler_->common()->IfException();
-  Node* exception_value = raw_assembler_->AddNode(op, node, node);
+  const Operator* op = raw_assembler()->common()->IfException();
+  Node* exception_value = raw_assembler()->AddNode(op, node, node);
   if (exception_var != nullptr) {
     exception_var->Bind(exception_value);
   }
@@ -363,627 +498,161 @@
   Bind(&success);
 }
 
-Node* CodeAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
-                           Node** args) {
+template <class... TArgs>
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function, Node* context,
+                                 TArgs... args) {
+  int argc = static_cast<int>(sizeof...(args));
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, argc, Operator::kNoProperties,
+      CallDescriptor::kNoFlags);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry =
+      HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+  Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+  Node* arity = Int32Constant(argc);
+
+  Node* nodes[] = {centry, args..., ref, arity, context};
+
   CallPrologue();
-  Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
+  Node* return_value = raw_assembler()->CallN(desc, arraysize(nodes), nodes);
   CallEpilogue();
   return return_value;
 }
 
-Node* CodeAssembler::TailCallN(CallDescriptor* descriptor, Node* code_target,
-                               Node** args) {
-  return raw_assembler_->TailCallN(descriptor, code_target, args);
+// Instantiate CallRuntime() with up to 6 arguments.
+#define INSTANTIATE(...)                                       \
+  template V8_EXPORT_PRIVATE Node* CodeAssembler::CallRuntime( \
+      Runtime::FunctionId, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
+
+template <class... TArgs>
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function,
+                                     Node* context, TArgs... args) {
+  int argc = static_cast<int>(sizeof...(args));
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, argc, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry =
+      HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+  Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+  Node* arity = Int32Constant(argc);
+
+  Node* nodes[] = {centry, args..., ref, arity, context};
+
+  return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
 }
 
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                 Node* context) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
-  CallEpilogue();
-  return return_value;
+// Instantiate TailCallRuntime() with up to 6 arguments.
+#define INSTANTIATE(...)                                           \
+  template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallRuntime( \
+      Runtime::FunctionId, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
+
+template <class... TArgs>
+Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
+                               size_t result_size, Node* target, Node* context,
+                               TArgs... args) {
+  Node* nodes[] = {target, args..., context};
+  return CallStubN(descriptor, result_size, arraysize(nodes), nodes);
 }
 
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1, Node* arg2) {
-  CallPrologue();
-  Node* return_value =
-      raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1, Node* arg2, Node* arg3) {
-  CallPrologue();
-  Node* return_value =
-      raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1, Node* arg2, Node* arg3,
-                                 Node* arg4) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
-                                                    arg3, arg4, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                                 Node* arg5) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime5(function_id, arg1, arg2,
-                                                    arg3, arg4, arg5, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context) {
-  return raw_assembler_->TailCallRuntime0(function_id, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1) {
-  return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2) {
-  return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3) {
-  return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
-                                          context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3, Node* arg4) {
-  return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
-                                          context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3, Node* arg4, Node* arg5) {
-  return raw_assembler_->TailCallRuntime5(function_id, arg1, arg2, arg3, arg4,
-                                          arg5, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3, Node* arg4, Node* arg5,
-                                     Node* arg6) {
-  return raw_assembler_->TailCallRuntime6(function_id, arg1, arg2, arg3, arg4,
-                                          arg5, arg6, context);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
-                              Node* arg1, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), target, context, arg1, result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
-                              Node* arg1, Node* arg2, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), target, context, arg1, arg2,
-                  result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
-                              Node* arg1, Node* arg2, Node* arg3,
-                              size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                  result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
-                              Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                              size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                  arg4, result_size);
-}
-
-Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
-                               size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStubN(callable.descriptor(), target, args, result_size);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(1);
-  args[0] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(2);
-  args[0] = arg1;
-  args[1] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              Node* arg2, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(3);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              Node* arg2, Node* arg3, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              Node* arg2, Node* arg3, Node* arg4,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(5);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              Node* arg2, Node* arg3, Node* arg4, Node* arg5,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(6);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = arg5;
-  args[5] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, const Arg& arg1,
-                              const Arg& arg2, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 3;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, const Arg& arg1,
-                              const Arg& arg2, const Arg& arg3,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 4;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, const Arg& arg1,
-                              const Arg& arg2, const Arg& arg3, const Arg& arg4,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 5;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[arg4.index] = arg4.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, const Arg& arg1,
-                              const Arg& arg2, const Arg& arg3, const Arg& arg4,
-                              const Arg& arg5, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 6;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[arg4.index] = arg4.value;
-  args[arg5.index] = arg5.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return CallN(call_descriptor, target, args);
-}
+// Instantiate CallStubR() with up to 6 arguments.
+#define INSTANTIATE(...)                                     \
+  template V8_EXPORT_PRIVATE Node* CodeAssembler::CallStubR( \
+      const CallInterfaceDescriptor& descriptor, size_t, Node*, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
 
 Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
-                               int js_parameter_count, Node* target,
-                               Node** args, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor,
-      descriptor.GetStackParameterCount() + js_parameter_count,
+                               size_t result_size, int input_count,
+                               Node* const* inputs) {
+  // 2 is for target and context.
+  DCHECK_LE(2, input_count);
+  int argc = input_count - 2;
+  DCHECK_LE(descriptor.GetParameterCount(), argc);
+  // Extra arguments not mentioned in the descriptor are passed on the stack.
+  int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
+  DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, stack_parameter_count,
       CallDescriptor::kNoFlags, Operator::kNoProperties,
       MachineType::AnyTagged(), result_size);
 
-  return CallN(call_descriptor, target, args);
+  CallPrologue();
+  Node* return_value = raw_assembler()->CallN(desc, input_count, inputs);
+  CallEpilogue();
+  return return_value;
 }
 
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1,
-                      result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, Node* arg2, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
-                      result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, Node* arg2, Node* arg3,
-                                  size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                      result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, Node* arg2, Node* arg3,
-                                  Node* arg4, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                      arg4, result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, Node* arg2, Node* arg3,
-                                  Node* arg4, Node* arg5, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                      arg4, arg5, result_size);
-}
-
+template <class... TArgs>
 Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+                                  Node* target, Node* context, TArgs... args) {
+  DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
+  size_t result_size = 1;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
       CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
       MachineType::AnyTagged(), result_size);
 
-  Node** args = zone()->NewArray<Node*>(2);
-  args[0] = arg1;
-  args[1] = context;
+  Node* nodes[] = {target, args..., context};
 
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
+  return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
 }
 
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
+// Instantiate TailCallStub() with up to 6 arguments.
+#define INSTANTIATE(...)                                        \
+  template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallStub( \
+      const CallInterfaceDescriptor& descriptor, Node*, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
 
-  Node** args = zone()->NewArray<Node*>(3);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, Node* arg4,
-                                  size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(5);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, Node* arg4,
-                                  Node* arg5, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(6);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = arg5;
-  args[5] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, Node* arg4,
-                                  Node* arg5, Node* arg6, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(7);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = arg5;
-  args[5] = arg6;
-  args[6] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, const Arg& arg1,
-                                  const Arg& arg2, const Arg& arg3,
-                                  const Arg& arg4, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 5;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[arg4.index] = arg4.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, const Arg& arg1,
-                                  const Arg& arg2, const Arg& arg3,
-                                  const Arg& arg4, const Arg& arg5,
-                                  size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 6;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[arg4.index] = arg4.value;
-  args[arg5.index] = arg5.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
+template <class... TArgs>
 Node* CodeAssembler::TailCallBytecodeDispatch(
-    const CallInterfaceDescriptor& interface_descriptor,
-    Node* code_target_address, Node** args) {
-  CallDescriptor* descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
-      isolate(), zone(), interface_descriptor,
-      interface_descriptor.GetStackParameterCount());
-  return raw_assembler_->TailCallN(descriptor, code_target_address, args);
+    const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
+  DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
+  CallDescriptor* desc = Linkage::GetBytecodeDispatchCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount());
+
+  Node* nodes[] = {target, args...};
+  return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
 }
 
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
-                            Node* function, Node* receiver,
-                            size_t result_size) {
-  const int argc = 0;
-  Node* target = HeapConstant(callable.code());
+// Instantiate TailCallBytecodeDispatch() with 4 arguments.
+template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
+    const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
+    Node*, Node*);
 
-  Node** args = zone()->NewArray<Node*>(argc + 4);
-  args[0] = function;
-  args[1] = Int32Constant(argc);
-  args[2] = receiver;
-  args[3] = context;
-
-  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
-                            Node* function, Node* receiver, Node* arg1,
-                            size_t result_size) {
-  const int argc = 1;
-  Node* target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(argc + 4);
-  args[0] = function;
-  args[1] = Int32Constant(argc);
-  args[2] = receiver;
-  args[3] = arg1;
-  args[4] = context;
-
-  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
-                            Node* function, Node* receiver, Node* arg1,
-                            Node* arg2, size_t result_size) {
-  const int argc = 2;
-  Node* target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(argc + 4);
-  args[0] = function;
-  args[1] = Int32Constant(argc);
-  args[2] = receiver;
-  args[3] = arg1;
-  args[4] = arg2;
-  args[5] = context;
-
-  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
-                            Node* function, Node* receiver, Node* arg1,
-                            Node* arg2, Node* arg3, size_t result_size) {
-  const int argc = 3;
-  Node* target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(argc + 4);
-  args[0] = function;
-  args[1] = Int32Constant(argc);
-  args[2] = receiver;
-  args[3] = arg1;
-  args[4] = arg2;
-  args[5] = arg3;
-  args[6] = context;
-
-  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
+Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
+                                    int input_count, Node* const* inputs) {
+  CallDescriptor* desc = Linkage::GetSimplifiedCDescriptor(zone(), signature);
+  return raw_assembler()->CallN(desc, input_count, inputs);
 }
 
 Node* CodeAssembler::CallCFunction2(MachineType return_type,
                                     MachineType arg0_type,
                                     MachineType arg1_type, Node* function,
                                     Node* arg0, Node* arg1) {
-  return raw_assembler_->CallCFunction2(return_type, arg0_type, arg1_type,
-                                        function, arg0, arg1);
+  return raw_assembler()->CallCFunction2(return_type, arg0_type, arg1_type,
+                                         function, arg0, arg1);
 }
 
-void CodeAssembler::Goto(CodeAssembler::Label* label) {
+Node* CodeAssembler::CallCFunction3(MachineType return_type,
+                                    MachineType arg0_type,
+                                    MachineType arg1_type,
+                                    MachineType arg2_type, Node* function,
+                                    Node* arg0, Node* arg1, Node* arg2) {
+  return raw_assembler()->CallCFunction3(return_type, arg0_type, arg1_type,
+                                         arg2_type, function, arg0, arg1, arg2);
+}
+
+void CodeAssembler::Goto(Label* label) {
   label->MergeVariables();
-  raw_assembler_->Goto(label->label_);
+  raw_assembler()->Goto(label->label_);
 }
 
 void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
@@ -992,18 +661,18 @@
   Bind(&false_label);
 }
 
-void CodeAssembler::GotoUnless(Node* condition, Label* false_label) {
+void CodeAssembler::GotoIfNot(Node* condition, Label* false_label) {
   Label true_label(this);
   Branch(condition, &true_label, false_label);
   Bind(&true_label);
 }
 
-void CodeAssembler::Branch(Node* condition, CodeAssembler::Label* true_label,
-                           CodeAssembler::Label* false_label) {
+void CodeAssembler::Branch(Node* condition, Label* true_label,
+                           Label* false_label) {
   true_label->MergeVariables();
   false_label->MergeVariables();
-  return raw_assembler_->Branch(condition, true_label->label_,
-                                false_label->label_);
+  return raw_assembler()->Branch(condition, true_label->label_,
+                                 false_label->label_);
 }
 
 void CodeAssembler::Switch(Node* index, Label* default_label,
@@ -1017,75 +686,68 @@
     case_labels[i]->MergeVariables();
     default_label->MergeVariables();
   }
-  return raw_assembler_->Switch(index, default_label->label_, case_values,
-                                labels, case_count);
-}
-
-Node* CodeAssembler::Select(Node* condition, Node* true_value,
-                            Node* false_value, MachineRepresentation rep) {
-  Variable value(this, rep);
-  Label vtrue(this), vfalse(this), end(this);
-  Branch(condition, &vtrue, &vfalse);
-
-  Bind(&vtrue);
-  {
-    value.Bind(true_value);
-    Goto(&end);
-  }
-  Bind(&vfalse);
-  {
-    value.Bind(false_value);
-    Goto(&end);
-  }
-
-  Bind(&end);
-  return value.value();
+  return raw_assembler()->Switch(index, default_label->label_, case_values,
+                                 labels, case_count);
 }
 
 // RawMachineAssembler delegate helpers:
-Isolate* CodeAssembler::isolate() const { return raw_assembler_->isolate(); }
+Isolate* CodeAssembler::isolate() const { return raw_assembler()->isolate(); }
 
 Factory* CodeAssembler::factory() const { return isolate()->factory(); }
 
-Zone* CodeAssembler::zone() const { return raw_assembler_->zone(); }
+Zone* CodeAssembler::zone() const { return raw_assembler()->zone(); }
+
+RawMachineAssembler* CodeAssembler::raw_assembler() const {
+  return state_->raw_assembler_.get();
+}
 
 // The core implementation of Variable is stored through an indirection so
 // that it can outlive the often block-scoped Variable declarations. This is
 // needed to ensure that variable binding and merging through phis can
 // properly be verified.
-class CodeAssembler::Variable::Impl : public ZoneObject {
+class CodeAssemblerVariable::Impl : public ZoneObject {
  public:
   explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
   Node* value_;
   MachineRepresentation rep_;
 };
 
-CodeAssembler::Variable::Variable(CodeAssembler* assembler,
-                                  MachineRepresentation rep)
-    : impl_(new (assembler->zone()) Impl(rep)), assembler_(assembler) {
-  assembler->variables_.insert(impl_);
+CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
+                                             MachineRepresentation rep)
+    : impl_(new (assembler->zone()) Impl(rep)), state_(assembler->state()) {
+  state_->variables_.insert(impl_);
 }
 
-CodeAssembler::Variable::~Variable() { assembler_->variables_.erase(impl_); }
+CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
+                                             MachineRepresentation rep,
+                                             Node* initial_value)
+    : CodeAssemblerVariable(assembler, rep) {
+  Bind(initial_value);
+}
 
-void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+CodeAssemblerVariable::~CodeAssemblerVariable() {
+  state_->variables_.erase(impl_);
+}
 
-Node* CodeAssembler::Variable::value() const {
+void CodeAssemblerVariable::Bind(Node* value) { impl_->value_ = value; }
+
+Node* CodeAssemblerVariable::value() const {
   DCHECK_NOT_NULL(impl_->value_);
   return impl_->value_;
 }
 
-MachineRepresentation CodeAssembler::Variable::rep() const {
-  return impl_->rep_;
-}
+MachineRepresentation CodeAssemblerVariable::rep() const { return impl_->rep_; }
 
-bool CodeAssembler::Variable::IsBound() const {
-  return impl_->value_ != nullptr;
-}
+bool CodeAssemblerVariable::IsBound() const { return impl_->value_ != nullptr; }
 
-CodeAssembler::Label::Label(CodeAssembler* assembler, size_t vars_count,
-                            Variable** vars, CodeAssembler::Label::Type type)
-    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+CodeAssemblerLabel::CodeAssemblerLabel(CodeAssembler* assembler,
+                                       size_t vars_count,
+                                       CodeAssemblerVariable** vars,
+                                       CodeAssemblerLabel::Type type)
+    : bound_(false),
+      merge_count_(0),
+      state_(assembler->state()),
+      label_(nullptr) {
   void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
   label_ = new (buffer)
       RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
@@ -1095,9 +757,11 @@
   }
 }
 
-void CodeAssembler::Label::MergeVariables() {
+CodeAssemblerLabel::~CodeAssemblerLabel() { label_->~RawMachineLabel(); }
+
+void CodeAssemblerLabel::MergeVariables() {
   ++merge_count_;
-  for (auto var : assembler_->variables_) {
+  for (auto var : state_->variables_) {
     size_t count = 0;
     Node* node = var->value_;
     if (node != nullptr) {
@@ -1122,7 +786,7 @@
       auto phi = variable_phis_.find(var);
       if (phi != variable_phis_.end()) {
         DCHECK_NOT_NULL(phi->second);
-        assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+        state_->raw_assembler_->AppendPhiInput(phi->second, node);
       } else {
         auto i = variable_merges_.find(var);
         if (i != variable_merges_.end()) {
@@ -1141,13 +805,13 @@
   }
 }
 
-void CodeAssembler::Label::Bind() {
+void CodeAssemblerLabel::Bind() {
   DCHECK(!bound_);
-  assembler_->raw_assembler_->Bind(label_);
+  state_->raw_assembler_->Bind(label_);
 
   // Make sure that all variables that have changed along any path up to this
   // point are marked as merge variables.
-  for (auto var : assembler_->variables_) {
+  for (auto var : state_->variables_) {
     Node* shared_value = nullptr;
     auto i = variable_merges_.find(var);
     if (i != variable_merges_.end()) {
@@ -1165,22 +829,23 @@
   }
 
   for (auto var : variable_phis_) {
-    CodeAssembler::Variable::Impl* var_impl = var.first;
+    CodeAssemblerVariable::Impl* var_impl = var.first;
     auto i = variable_merges_.find(var_impl);
-    // If the following assert fires, then a variable that has been marked as
+    // If the following asserts fire, then a variable that has been marked as
     // being merged at the label--either by explicitly marking it so in the
     // label constructor or by having seen different bound values at branches
     // into the label--doesn't have a bound value along all of the paths that
     // have been merged into the label up to this point.
-    DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
-    Node* phi = assembler_->raw_assembler_->Phi(
+    DCHECK(i != variable_merges_.end());
+    DCHECK_EQ(i->second.size(), merge_count_);
+    Node* phi = state_->raw_assembler_->Phi(
         var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
     variable_phis_[var_impl] = phi;
   }
 
   // Bind all variables to a merge phi, the common value along all paths or
   // null.
-  for (auto var : assembler_->variables_) {
+  for (auto var : state_->variables_) {
     auto i = variable_phis_.find(var);
     if (i != variable_phis_.end()) {
       var->value_ = i->second;
diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h
index 1f364d9..8808a82 100644
--- a/src/compiler/code-assembler.h
+++ b/src/compiler/code-assembler.h
@@ -12,6 +12,7 @@
 // Do not include anything from src/compiler here!
 #include "src/allocation.h"
 #include "src/builtins/builtins.h"
+#include "src/code-factory.h"
 #include "src/globals.h"
 #include "src/heap/heap.h"
 #include "src/machine-type.h"
@@ -30,10 +31,17 @@
 namespace compiler {
 
 class CallDescriptor;
+class CodeAssemblerLabel;
+class CodeAssemblerVariable;
+class CodeAssemblerState;
 class Node;
 class RawMachineAssembler;
 class RawMachineLabel;
 
+typedef ZoneList<CodeAssemblerVariable*> CodeAssemblerVariableList;
+
+typedef std::function<void()> CodeAssemblerCallback;
+
 #define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
   V(Float32Equal)                                \
   V(Float32LessThan)                             \
@@ -79,9 +87,7 @@
   V(Float64Pow)                            \
   V(Float64InsertLowWord32)                \
   V(Float64InsertHighWord32)               \
-  V(IntPtrAdd)                             \
   V(IntPtrAddWithOverflow)                 \
-  V(IntPtrSub)                             \
   V(IntPtrSubWithOverflow)                 \
   V(IntPtrMul)                             \
   V(Int32Add)                              \
@@ -157,6 +163,7 @@
   V(Float64RoundTiesEven)               \
   V(Float64RoundTruncate)               \
   V(Word32Clz)                          \
+  V(Word32Not)                          \
   V(Word32BinaryNot)
 
 // A "public" interface used by components outside of compiler directory to
@@ -175,22 +182,16 @@
 // clients, CodeAssembler also provides an abstraction for creating variables
 // and enhanced Label functionality to merge variable values along paths where
 // they have differing values, including loops.
+//
+// The CodeAssembler itself is stateless (and instances are expected to be
+// temporary-scoped and short-lived); all its state is encapsulated into
+// a CodeAssemblerState instance.
 class V8_EXPORT_PRIVATE CodeAssembler {
  public:
-  // Create with CallStub linkage.
-  // |result_size| specifies the number of results returned by the stub.
-  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
-  CodeAssembler(Isolate* isolate, Zone* zone,
-                const CallInterfaceDescriptor& descriptor, Code::Flags flags,
-                const char* name, size_t result_size = 1);
+  explicit CodeAssembler(CodeAssemblerState* state) : state_(state) {}
+  ~CodeAssembler();
 
-  // Create with JSCall linkage.
-  CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
-                Code::Flags flags, const char* name);
-
-  virtual ~CodeAssembler();
-
-  Handle<Code> GenerateCode();
+  static Handle<Code> GenerateCode(CodeAssemblerState* state);
 
   bool Is64() const;
   bool IsFloat64RoundUpSupported() const;
@@ -198,24 +199,10 @@
   bool IsFloat64RoundTiesEvenSupported() const;
   bool IsFloat64RoundTruncateSupported() const;
 
-  class Label;
-  class Variable {
-   public:
-    explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
-    ~Variable();
-    void Bind(Node* value);
-    Node* value() const;
-    MachineRepresentation rep() const;
-    bool IsBound() const;
-
-   private:
-    friend class CodeAssembler;
-    class Impl;
-    Impl* impl_;
-    CodeAssembler* assembler_;
-  };
-
-  typedef ZoneList<Variable*> VariableList;
+  // Shortened aliases for use in CodeAssembler subclasses.
+  typedef CodeAssemblerLabel Label;
+  typedef CodeAssemblerVariable Variable;
+  typedef CodeAssemblerVariableList VariableList;
 
   // ===========================================================================
   // Base Assembler
@@ -229,6 +216,7 @@
   Node* SmiConstant(Smi* value);
   Node* SmiConstant(int value);
   Node* HeapConstant(Handle<HeapObject> object);
+  Node* CStringConstant(const char* str);
   Node* BooleanConstant(bool value);
   Node* ExternalConstant(ExternalReference address);
   Node* Float64Constant(double value);
@@ -240,24 +228,25 @@
   bool ToIntPtrConstant(Node* node, intptr_t& out_value);
 
   Node* Parameter(int value);
+  Node* GetJSContextParameter();
   void Return(Node* value);
+  void Return(Node* value1, Node* value2);
+  void Return(Node* value1, Node* value2, Node* value3);
   void PopAndReturn(Node* pop, Node* value);
 
   void DebugBreak();
+  void Unreachable();
   void Comment(const char* format, ...);
 
   void Bind(Label* label);
   void Goto(Label* label);
   void GotoIf(Node* condition, Label* true_label);
-  void GotoUnless(Node* condition, Label* false_label);
+  void GotoIfNot(Node* condition, Label* false_label);
   void Branch(Node* condition, Label* true_label, Label* false_label);
 
   void Switch(Node* index, Label* default_label, const int32_t* case_values,
               Label** case_labels, size_t case_count);
 
-  Node* Select(Node* condition, Node* true_value, Node* false_value,
-               MachineRepresentation rep = MachineRepresentation::kTagged);
-
   // Access to the frame pointer
   Node* LoadFramePointer();
   Node* LoadParentFramePointer();
@@ -267,19 +256,20 @@
 
   // Load raw memory location.
   Node* Load(MachineType rep, Node* base);
-  Node* Load(MachineType rep, Node* base, Node* index);
-  Node* AtomicLoad(MachineType rep, Node* base, Node* index);
+  Node* Load(MachineType rep, Node* base, Node* offset);
+  Node* AtomicLoad(MachineType rep, Node* base, Node* offset);
 
   // Load a value from the root array.
   Node* LoadRoot(Heap::RootListIndex root_index);
 
   // Store value to raw memory location.
-  Node* Store(MachineRepresentation rep, Node* base, Node* value);
-  Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+  Node* Store(Node* base, Node* value);
+  Node* Store(Node* base, Node* offset, Node* value);
+  Node* StoreWithMapWriteBarrier(Node* base, Node* offset, Node* value);
   Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
-  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset,
                             Node* value);
-  Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+  Node* AtomicStore(MachineRepresentation rep, Node* base, Node* offset,
                     Node* value);
 
   // Store a value to the root array.
@@ -290,6 +280,9 @@
   CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
 #undef DECLARE_CODE_ASSEMBLER_BINARY_OP
 
+  Node* IntPtrAdd(Node* left, Node* right);
+  Node* IntPtrSub(Node* left, Node* right);
+
   Node* WordShl(Node* value, int shift);
   Node* WordShr(Node* value, int shift);
   Node* Word32Shr(Node* value, int shift);
@@ -316,149 +309,79 @@
   Node* Projection(int index, Node* value);
 
   // Calls
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2, Node* arg3);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2, Node* arg3, Node* arg4);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2, Node* arg3, Node* arg4, Node* arg5);
+  template <class... TArgs>
+  Node* CallRuntime(Runtime::FunctionId function, Node* context, TArgs... args);
 
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3, Node* arg4);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                        Node* arg5);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                        Node* arg5, Node* arg6);
+  template <class... TArgs>
+  Node* TailCallRuntime(Runtime::FunctionId function, Node* context,
+                        TArgs... args);
 
-  // A pair of a zero-based argument index and a value.
-  // It helps writing arguments order independent code.
-  struct Arg {
-    Arg(int index, Node* value) : index(index), value(value) {}
-
-    int const index;
-    Node* const value;
-  };
-
-  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
-                 size_t result_size = 1);
-  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
-                 Node* arg2, size_t result_size = 1);
-  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
-                 Node* arg2, Node* arg3, size_t result_size = 1);
-  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
-                 Node* arg2, Node* arg3, Node* arg4, size_t result_size = 1);
-  Node* CallStubN(Callable const& callable, Node** args,
-                  size_t result_size = 1);
-
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, Node* arg3,
-                 size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                 size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                 Node* arg5, size_t result_size = 1);
-
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, const Arg& arg1, const Arg& arg2,
-                 size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, const Arg& arg1, const Arg& arg2,
-                 const Arg& arg3, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, const Arg& arg1, const Arg& arg2,
-                 const Arg& arg3, const Arg& arg4, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, const Arg& arg1, const Arg& arg2,
-                 const Arg& arg3, const Arg& arg4, const Arg& arg5,
-                 size_t result_size = 1);
-
-  Node* CallStubN(const CallInterfaceDescriptor& descriptor,
-                  int js_parameter_count, Node* target, Node** args,
-                  size_t result_size = 1);
-  Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
-                  Node** args, size_t result_size = 1) {
-    return CallStubN(descriptor, 0, target, args, result_size);
+  template <class... TArgs>
+  Node* CallStub(Callable const& callable, Node* context, TArgs... args) {
+    Node* target = HeapConstant(callable.code());
+    return CallStub(callable.descriptor(), target, context, args...);
   }
 
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     size_t result_size = 1);
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     Node* arg2, size_t result_size = 1);
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     Node* arg2, Node* arg3, size_t result_size = 1);
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     Node* arg2, Node* arg3, Node* arg4,
-                     size_t result_size = 1);
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     Node* arg2, Node* arg3, Node* arg4, Node* arg5,
-                     size_t result_size = 1);
+  template <class... TArgs>
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, TArgs... args) {
+    return CallStubR(descriptor, 1, target, context, args...);
+  }
 
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2,
-                     size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2, Node* arg3,
-                     size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2, Node* arg3,
-                     Node* arg4, size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2, Node* arg3,
-                     Node* arg4, Node* arg5, size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2, Node* arg3,
-                     Node* arg4, Node* arg5, Node* arg6,
-                     size_t result_size = 1);
+  template <class... TArgs>
+  Node* CallStubR(const CallInterfaceDescriptor& descriptor, size_t result_size,
+                  Node* target, Node* context, TArgs... args);
 
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, const Arg& arg1, const Arg& arg2,
-                     const Arg& arg3, const Arg& arg4, size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, const Arg& arg1, const Arg& arg2,
-                     const Arg& arg3, const Arg& arg4, const Arg& arg5,
-                     size_t result_size = 1);
+  Node* CallStubN(const CallInterfaceDescriptor& descriptor, size_t result_size,
+                  int input_count, Node* const* inputs);
 
+  template <class... TArgs>
+  Node* TailCallStub(Callable const& callable, Node* context, TArgs... args) {
+    Node* target = HeapConstant(callable.code());
+    return TailCallStub(callable.descriptor(), target, context, args...);
+  }
+
+  template <class... TArgs>
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, TArgs... args);
+
+  template <class... TArgs>
   Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
-                                 Node* code_target_address, Node** args);
+                                 Node* target, TArgs... args);
 
+  template <class... TArgs>
   Node* CallJS(Callable const& callable, Node* context, Node* function,
-               Node* receiver, size_t result_size = 1);
-  Node* CallJS(Callable const& callable, Node* context, Node* function,
-               Node* receiver, Node* arg1, size_t result_size = 1);
-  Node* CallJS(Callable const& callable, Node* context, Node* function,
-               Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
-  Node* CallJS(Callable const& callable, Node* context, Node* function,
-               Node* receiver, Node* arg1, Node* arg2, Node* arg3,
-               size_t result_size = 1);
+               Node* receiver, TArgs... args) {
+    int argc = static_cast<int>(sizeof...(args));
+    Node* arity = Int32Constant(argc);
+    return CallStub(callable, context, function, arity, receiver, args...);
+  }
+
+  template <class... TArgs>
+  Node* ConstructJS(Callable const& callable, Node* context, Node* new_target,
+                    TArgs... args) {
+    int argc = static_cast<int>(sizeof...(args));
+    Node* arity = Int32Constant(argc);
+    Node* receiver = LoadRoot(Heap::kUndefinedValueRootIndex);
+
+    // Construct(target, new_target, arity, receiver, arguments...)
+    return CallStub(callable, context, new_target, new_target, arity, receiver,
+                    args...);
+  }
+
+  Node* CallCFunctionN(Signature<MachineType>* signature, int input_count,
+                       Node* const* inputs);
 
   // Call to a C function with two arguments.
   Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
                        MachineType arg1_type, Node* function, Node* arg0,
                        Node* arg1);
 
+  // Call to a C function with three arguments.
+  Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
+                       MachineType arg1_type, MachineType arg2_type,
+                       Node* function, Node* arg0, Node* arg1, Node* arg2);
+
   // Exception handling support.
   void GotoIfException(Node* node, Label* if_exception,
                        Variable* exception_var = nullptr);
@@ -468,45 +391,70 @@
   Isolate* isolate() const;
   Zone* zone() const;
 
+  CodeAssemblerState* state() { return state_; }
+
+  void BreakOnNode(int node_id);
+
  protected:
-  // Enables subclasses to perform operations before and after a call.
-  virtual void CallPrologue();
-  virtual void CallEpilogue();
+  void RegisterCallGenerationCallbacks(
+      const CodeAssemblerCallback& call_prologue,
+      const CodeAssemblerCallback& call_epilogue);
+  void UnregisterCallGenerationCallbacks();
 
  private:
-  CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
-                Code::Flags flags, const char* name);
+  RawMachineAssembler* raw_assembler() const;
 
-  Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
-  Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+  // Calls respective callback registered in the state.
+  void CallPrologue();
+  void CallEpilogue();
 
-  std::unique_ptr<RawMachineAssembler> raw_assembler_;
-  Code::Flags flags_;
-  const char* name_;
-  bool code_generated_;
-  ZoneSet<Variable::Impl*> variables_;
+  CodeAssemblerState* state_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
 };
 
-class CodeAssembler::Label {
+class CodeAssemblerVariable {
+ public:
+  explicit CodeAssemblerVariable(CodeAssembler* assembler,
+                                 MachineRepresentation rep);
+  CodeAssemblerVariable(CodeAssembler* assembler, MachineRepresentation rep,
+                        Node* initial_value);
+  ~CodeAssemblerVariable();
+  void Bind(Node* value);
+  Node* value() const;
+  MachineRepresentation rep() const;
+  bool IsBound() const;
+
+ private:
+  friend class CodeAssemblerLabel;
+  friend class CodeAssemblerState;
+  class Impl;
+  Impl* impl_;
+  CodeAssemblerState* state_;
+};
+
+class CodeAssemblerLabel {
  public:
   enum Type { kDeferred, kNonDeferred };
 
-  explicit Label(
+  explicit CodeAssemblerLabel(
       CodeAssembler* assembler,
-      CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
-      : CodeAssembler::Label(assembler, 0, nullptr, type) {}
-  Label(CodeAssembler* assembler, const VariableList& merged_variables,
-        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
-      : CodeAssembler::Label(assembler, merged_variables.length(),
-                             &(merged_variables[0]), type) {}
-  Label(CodeAssembler* assembler, size_t count, Variable** vars,
-        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
-  Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
-        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
-      : Label(assembler, 1, &merged_variable, type) {}
-  ~Label() {}
+      CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+      : CodeAssemblerLabel(assembler, 0, nullptr, type) {}
+  CodeAssemblerLabel(
+      CodeAssembler* assembler,
+      const CodeAssemblerVariableList& merged_variables,
+      CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+      : CodeAssemblerLabel(assembler, merged_variables.length(),
+                           &(merged_variables[0]), type) {}
+  CodeAssemblerLabel(
+      CodeAssembler* assembler, size_t count, CodeAssemblerVariable** vars,
+      CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred);
+  CodeAssemblerLabel(
+      CodeAssembler* assembler, CodeAssemblerVariable* merged_variable,
+      CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+      : CodeAssemblerLabel(assembler, 1, &merged_variable, type) {}
+  ~CodeAssemblerLabel();
 
  private:
   friend class CodeAssembler;
@@ -516,14 +464,53 @@
 
   bool bound_;
   size_t merge_count_;
-  CodeAssembler* assembler_;
+  CodeAssemblerState* state_;
   RawMachineLabel* label_;
   // Map of variables that need to be merged to their phi nodes (or placeholders
   // for those phis).
-  std::map<Variable::Impl*, Node*> variable_phis_;
+  std::map<CodeAssemblerVariable::Impl*, Node*> variable_phis_;
   // Map of variables to the list of value nodes that have been added from each
   // merge path in their order of merging.
-  std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+  std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
+class V8_EXPORT_PRIVATE CodeAssemblerState {
+ public:
+  // Create with CallStub linkage.
+  // |result_size| specifies the number of results returned by the stub.
+  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
+  CodeAssemblerState(Isolate* isolate, Zone* zone,
+                     const CallInterfaceDescriptor& descriptor,
+                     Code::Flags flags, const char* name,
+                     size_t result_size = 1);
+
+  // Create with JSCall linkage.
+  CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
+                     Code::Flags flags, const char* name);
+
+  ~CodeAssemblerState();
+
+  const char* name() const { return name_; }
+  int parameter_count() const;
+
+ private:
+  friend class CodeAssembler;
+  friend class CodeAssemblerLabel;
+  friend class CodeAssemblerVariable;
+
+  CodeAssemblerState(Isolate* isolate, Zone* zone,
+                     CallDescriptor* call_descriptor, Code::Flags flags,
+                     const char* name);
+
+  std::unique_ptr<RawMachineAssembler> raw_assembler_;
+  Code::Flags flags_;
+  const char* name_;
+  bool code_generated_;
+  ZoneSet<CodeAssemblerVariable::Impl*> variables_;
+  CodeAssemblerCallback call_prologue_;
+  CodeAssemblerCallback call_epilogue_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeAssemblerState);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 8bf3a9e..bdedbec 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -67,6 +67,14 @@
     return static_cast<int16_t>(InputInt32(index));
   }
 
+  uint8_t InputInt3(size_t index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0x7);
+  }
+
+  uint8_t InputInt4(size_t index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0xF);
+  }
+
   uint8_t InputInt5(size_t index) {
     return static_cast<uint8_t>(InputInt32(index) & 0x1F);
   }
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index c69e86e..bbd9452 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -56,6 +56,7 @@
       jump_tables_(nullptr),
       ools_(nullptr),
       osr_pc_offset_(-1),
+      optimized_out_literal_id_(-1),
       source_position_table_builder_(code->zone(),
                                      info->SourcePositionRecordingMode()) {
   for (int i = 0; i < code->InstructionBlockCount(); ++i) {
@@ -71,6 +72,7 @@
   frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
 }
 
+
 Handle<Code> CodeGenerator::GenerateCode() {
   CompilationInfo* info = this->info();
 
@@ -79,6 +81,11 @@
   // the frame (that is done in AssemblePrologue).
   FrameScope frame_scope(masm(), StackFrame::MANUAL);
 
+  if (info->is_source_positions_enabled()) {
+    SourcePosition source_position(info->shared_info()->start_position());
+    AssembleSourcePosition(source_position);
+  }
+
   // Place function entry hook if requested to do so.
   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm());
@@ -189,8 +196,7 @@
   // Assemble all eager deoptimization exits.
   for (DeoptimizationExit* exit : deoptimization_exits_) {
     masm()->bind(exit->label());
-    AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER,
-                            exit->pos());
+    AssembleDeoptimizerCall(exit->deoptimization_id(), exit->pos());
   }
 
   // Ensure there is space for lazy deoptimization in the code.
@@ -392,6 +398,10 @@
 CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
     Instruction* instr, const InstructionBlock* block) {
   int first_unused_stack_slot;
+  FlagsMode mode = FlagsModeField::decode(instr->opcode());
+  if (mode != kFlags_trap) {
+    AssembleSourcePosition(instr);
+  }
   bool adjust_stack =
       GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
   if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
@@ -404,12 +414,10 @@
   if (instr->IsJump() && block->must_deconstruct_frame()) {
     AssembleDeconstructFrame();
   }
-  AssembleSourcePosition(instr);
   // Assemble architecture-specific code for the instruction.
   CodeGenResult result = AssembleArchInstruction(instr);
   if (result != kSuccess) return result;
 
-  FlagsMode mode = FlagsModeField::decode(instr->opcode());
   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
   switch (mode) {
     case kFlags_branch: {
@@ -461,6 +469,10 @@
       AssembleArchBoolean(instr, condition);
       break;
     }
+    case kFlags_trap: {
+      AssembleArchTrap(instr, condition);
+      break;
+    }
     case kFlags_none: {
       break;
     }
@@ -468,10 +480,14 @@
   return kSuccess;
 }
 
-
 void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
   SourcePosition source_position = SourcePosition::Unknown();
+  if (instr->IsNop() && instr->AreMovesRedundant()) return;
   if (!code()->GetSourcePosition(instr, &source_position)) return;
+  AssembleSourcePosition(source_position);
+}
+
+void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
   if (source_position == current_source_position_) return;
   current_source_position_ = source_position;
   if (!source_position.IsKnown()) return;
@@ -481,7 +497,13 @@
     CompilationInfo* info = this->info();
     if (!info->parse_info()) return;
     std::ostringstream buffer;
-    buffer << "-- " << source_position.InliningStack(info) << " --";
+    buffer << "-- ";
+    if (FLAG_trace_turbo) {
+      buffer << source_position;
+    } else {
+      buffer << source_position.InliningStack(info);
+    }
+    buffer << " --";
     masm()->RecordComment(StrDup(buffer.str().c_str()));
   }
 }
@@ -628,15 +650,6 @@
       deopt_state_id = BuildTranslation(instr, -1, frame_state_offset,
                                         OutputFrameStateCombine::Ignore());
     }
-#if DEBUG
-    // Make sure all the values live in stack slots or they are immediates.
-    // (The values should not live in register because registers are clobbered
-    // by calls.)
-    for (size_t i = 0; i < descriptor->GetSize(); i++) {
-      InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
-      CHECK(op->IsStackSlot() || op->IsFPStackSlot() || op->IsImmediate());
-    }
-#endif
     safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
   }
 }
@@ -658,6 +671,13 @@
   return code()->GetDeoptimizationEntry(state_id);
 }
 
+DeoptimizeKind CodeGenerator::GetDeoptimizationKind(
+    int deoptimization_id) const {
+  size_t const index = static_cast<size_t>(deoptimization_id);
+  DCHECK_LT(index, deoptimization_states_.size());
+  return deoptimization_states_[index]->kind();
+}
+
 DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
     int deoptimization_id) const {
   size_t const index = static_cast<size_t>(deoptimization_id);
@@ -666,19 +686,41 @@
 }
 
 void CodeGenerator::TranslateStateValueDescriptor(
-    StateValueDescriptor* desc, Translation* translation,
-    InstructionOperandIterator* iter) {
+    StateValueDescriptor* desc, StateValueList* nested,
+    Translation* translation, InstructionOperandIterator* iter) {
+  // Note:
+  // If translation is null, we just skip the relevant instruction operands.
   if (desc->IsNested()) {
-    translation->BeginCapturedObject(static_cast<int>(desc->size()));
-    for (size_t index = 0; index < desc->fields().size(); index++) {
-      TranslateStateValueDescriptor(&desc->fields()[index], translation, iter);
+    if (translation != nullptr) {
+      translation->BeginCapturedObject(static_cast<int>(nested->size()));
+    }
+    for (auto field : *nested) {
+      TranslateStateValueDescriptor(field.desc, field.nested, translation,
+                                    iter);
+    }
+  } else if (desc->IsArguments()) {
+    if (translation != nullptr) {
+      translation->BeginArgumentsObject(0);
     }
   } else if (desc->IsDuplicate()) {
-    translation->DuplicateObject(static_cast<int>(desc->id()));
+    if (translation != nullptr) {
+      translation->DuplicateObject(static_cast<int>(desc->id()));
+    }
+  } else if (desc->IsPlain()) {
+    InstructionOperand* op = iter->Advance();
+    if (translation != nullptr) {
+      AddTranslationForOperand(translation, iter->instruction(), op,
+                               desc->type());
+    }
   } else {
-    DCHECK(desc->IsPlain());
-    AddTranslationForOperand(translation, iter->instruction(), iter->Advance(),
-                             desc->type());
+    DCHECK(desc->IsOptimizedOut());
+    if (translation != nullptr) {
+      if (optimized_out_literal_id_ == -1) {
+        optimized_out_literal_id_ =
+            DefineDeoptimizationLiteral(isolate()->factory()->optimized_out());
+      }
+      translation->StoreLiteral(optimized_out_literal_id_);
+    }
   }
 }
 
@@ -686,44 +728,41 @@
 void CodeGenerator::TranslateFrameStateDescriptorOperands(
     FrameStateDescriptor* desc, InstructionOperandIterator* iter,
     OutputFrameStateCombine combine, Translation* translation) {
-  for (size_t index = 0; index < desc->GetSize(combine); index++) {
-    switch (combine.kind()) {
-      case OutputFrameStateCombine::kPushOutput: {
-        DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
-        size_t size_without_output =
-            desc->GetSize(OutputFrameStateCombine::Ignore());
-        // If the index is past the existing stack items in values_.
-        if (index >= size_without_output) {
-          // Materialize the result of the call instruction in this slot.
-          AddTranslationForOperand(
-              translation, iter->instruction(),
-              iter->instruction()->OutputAt(index - size_without_output),
-              MachineType::AnyTagged());
-          continue;
-        }
-        break;
+  size_t index = 0;
+  StateValueList* values = desc->GetStateValueDescriptors();
+  for (StateValueList::iterator it = values->begin(); it != values->end();
+       ++it, ++index) {
+    StateValueDescriptor* value_desc = (*it).desc;
+    if (combine.kind() == OutputFrameStateCombine::kPokeAt) {
+      // The result of the call should be placed at position
+      // [index_from_top] in the stack (overwriting whatever was
+      // previously there).
+      size_t index_from_top =
+          desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+      if (index >= index_from_top &&
+          index < index_from_top + iter->instruction()->OutputCount()) {
+        DCHECK_NOT_NULL(translation);
+        AddTranslationForOperand(
+            translation, iter->instruction(),
+            iter->instruction()->OutputAt(index - index_from_top),
+            MachineType::AnyTagged());
+        // Skip the instruction operands.
+        TranslateStateValueDescriptor(value_desc, (*it).nested, nullptr, iter);
+        continue;
       }
-      case OutputFrameStateCombine::kPokeAt:
-        // The result of the call should be placed at position
-        // [index_from_top] in the stack (overwriting whatever was
-        // previously there).
-        size_t index_from_top =
-            desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
-        if (index >= index_from_top &&
-            index < index_from_top + iter->instruction()->OutputCount()) {
-          AddTranslationForOperand(
-              translation, iter->instruction(),
-              iter->instruction()->OutputAt(index - index_from_top),
-              MachineType::AnyTagged());
-          iter->Advance();  // We do not use this input, but we need to
-                            // advace, as the input got replaced.
-          continue;
-        }
-        break;
     }
-    StateValueDescriptor* value_desc = desc->GetStateValueDescriptor();
-    TranslateStateValueDescriptor(&value_desc->fields()[index], translation,
-                                  iter);
+    TranslateStateValueDescriptor(value_desc, (*it).nested, translation, iter);
+  }
+  DCHECK_EQ(desc->GetSize(OutputFrameStateCombine::Ignore()), index);
+
+  if (combine.kind() == OutputFrameStateCombine::kPushOutput) {
+    DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
+    for (size_t output = 0; output < combine.GetPushCount(); output++) {
+      // Materialize the result of the call instruction in this slot.
+      AddTranslationForOperand(translation, iter->instruction(),
+                               iter->instruction()->OutputAt(output),
+                               MachineType::AnyTagged());
+    }
   }
 }
 
@@ -768,8 +807,9 @@
       translation->BeginTailCallerFrame(shared_info_id);
       break;
     case FrameStateType::kConstructStub:
+      DCHECK(descriptor->bailout_id().IsValidForConstructStub());
       translation->BeginConstructStubFrame(
-          shared_info_id,
+          descriptor->bailout_id(), shared_info_id,
           static_cast<unsigned int>(descriptor->parameters_count()));
       break;
     case FrameStateType::kGetterStub:
@@ -803,7 +843,7 @@
   int deoptimization_id = static_cast<int>(deoptimization_states_.size());
 
   deoptimization_states_.push_back(new (zone()) DeoptimizationState(
-      descriptor->bailout_id(), translation.index(), pc_offset,
+      descriptor->bailout_id(), translation.index(), pc_offset, entry.kind(),
       entry.reason()));
 
   return deoptimization_id;
@@ -823,16 +863,15 @@
     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
                type == MachineType::Uint32()) {
       translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
-    } else if (IsAnyTagged(type.representation())) {
-      translation->StoreStackSlot(LocationOperand::cast(op)->index());
     } else {
-      CHECK(false);
+      CHECK_EQ(MachineRepresentation::kTagged, type.representation());
+      translation->StoreStackSlot(LocationOperand::cast(op)->index());
     }
   } else if (op->IsFPStackSlot()) {
     if (type.representation() == MachineRepresentation::kFloat64) {
       translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
     } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+      CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
       translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
     }
   } else if (op->IsRegister()) {
@@ -845,27 +884,26 @@
     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
                type == MachineType::Uint32()) {
       translation->StoreUint32Register(converter.ToRegister(op));
-    } else if (IsAnyTagged(type.representation())) {
-      translation->StoreRegister(converter.ToRegister(op));
     } else {
-      CHECK(false);
+      CHECK_EQ(MachineRepresentation::kTagged, type.representation());
+      translation->StoreRegister(converter.ToRegister(op));
     }
   } else if (op->IsFPRegister()) {
     InstructionOperandConverter converter(this, instr);
     if (type.representation() == MachineRepresentation::kFloat64) {
       translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
     } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+      CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
       translation->StoreFloatRegister(converter.ToFloatRegister(op));
     }
-  } else if (op->IsImmediate()) {
+  } else {
+    CHECK(op->IsImmediate());
     InstructionOperandConverter converter(this, instr);
     Constant constant = converter.ToConstant(op);
     Handle<Object> constant_object;
     switch (constant.type()) {
       case Constant::kInt32:
-        if (type.representation() == MachineRepresentation::kTagged ||
-            type.representation() == MachineRepresentation::kTaggedSigned) {
+        if (type.representation() == MachineRepresentation::kTagged) {
           // When pointers are 4 bytes, we can use int32 constants to represent
           // Smis.
           DCHECK_EQ(4, kPointerSize);
@@ -888,9 +926,13 @@
                  type.representation() == MachineRepresentation::kNone);
           DCHECK(type.representation() != MachineRepresentation::kNone ||
                  constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
-
-          constant_object =
-              isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+          if (type == MachineType::Uint32()) {
+            constant_object =
+                isolate()->factory()->NewNumberFromUint(constant.ToInt32());
+          } else {
+            constant_object =
+                isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+          }
         }
         break;
       case Constant::kInt64:
@@ -899,37 +941,28 @@
         // TODO(jarin,bmeurer): We currently pass in raw pointers to the
         // JSFunction::entry here. We should really consider fixing this.
         DCHECK(type.representation() == MachineRepresentation::kWord64 ||
-               type.representation() == MachineRepresentation::kTagged ||
-               type.representation() == MachineRepresentation::kTaggedSigned);
+               type.representation() == MachineRepresentation::kTagged);
         DCHECK_EQ(8, kPointerSize);
         constant_object =
             handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
         DCHECK(constant_object->IsSmi());
         break;
       case Constant::kFloat32:
-        if (type.representation() == MachineRepresentation::kTaggedSigned) {
-          DCHECK(IsSmiDouble(constant.ToFloat32()));
-        } else {
-          DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
-                 CanBeTaggedPointer(type.representation()));
-        }
+        DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
+               type.representation() == MachineRepresentation::kTagged);
         constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
         break;
       case Constant::kFloat64:
-        if (type.representation() == MachineRepresentation::kTaggedSigned) {
-          DCHECK(IsSmiDouble(constant.ToFloat64()));
-        } else {
-          DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
-                 CanBeTaggedPointer(type.representation()));
-        }
+        DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
+               type.representation() == MachineRepresentation::kTagged);
         constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
         break;
       case Constant::kHeapObject:
-        DCHECK(CanBeTaggedPointer(type.representation()));
+        DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
         constant_object = constant.ToHeapObject();
         break;
       default:
-        CHECK(false);
+        UNREACHABLE();
     }
     if (constant_object.is_identical_to(info()->closure())) {
       translation->StoreJSFrameFunction();
@@ -937,8 +970,6 @@
       int literal_id = DefineDeoptimizationLiteral(constant_object);
       translation->StoreLiteral(literal_id);
     }
-  } else {
-    CHECK(false);
   }
 }
 
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index 7aed85a..74958d0 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -12,6 +12,7 @@
 #include "src/macro-assembler.h"
 #include "src/safepoint-table.h"
 #include "src/source-position-table.h"
+#include "src/trap-handler/trap-handler.h"
 
 namespace v8 {
 namespace internal {
@@ -65,6 +66,14 @@
 
   Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
 
+  void AssembleSourcePosition(Instruction* instr);
+
+  void AssembleSourcePosition(SourcePosition source_position);
+
+  // Record a safepoint with the given pointer map.
+  void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
+                       int arguments, Safepoint::DeoptMode deopt_mode);
+
  private:
   MacroAssembler* masm() { return &masm_; }
   GapResolver* resolver() { return &resolver_; }
@@ -82,10 +91,6 @@
   // assembling code, in which case, a fall-through can be used.
   bool IsNextInAssemblyOrder(RpoNumber block) const;
 
-  // Record a safepoint with the given pointer map.
-  void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
-                       int arguments, Safepoint::DeoptMode deopt_mode);
-
   // Check if a heap object can be materialized by loading from a heap root,
   // which is cheaper on some platforms than materializing the actual heap
   // object constant.
@@ -100,7 +105,6 @@
   // Assemble code for the specified instruction.
   CodeGenResult AssembleInstruction(Instruction* instr,
                                     const InstructionBlock* block);
-  void AssembleSourcePosition(Instruction* instr);
   void AssembleGaps(Instruction* instr);
 
   // Returns true if a instruction is a tail call that needs to adjust the stack
@@ -116,11 +120,11 @@
   void AssembleArchJump(RpoNumber target);
   void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
   void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+  void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
   void AssembleArchLookupSwitch(Instruction* instr);
   void AssembleArchTableSwitch(Instruction* instr);
 
   CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
-                                        Deoptimizer::BailoutType bailout_type,
                                         SourcePosition pos);
 
   // Generates an architecture-specific, descriptor-specific prologue
@@ -205,6 +209,7 @@
   int DefineDeoptimizationLiteral(Handle<Object> literal);
   DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
                                                     size_t frame_state_offset);
+  DeoptimizeKind GetDeoptimizationKind(int deoptimization_id) const;
   DeoptimizeReason GetDeoptimizationReason(int deoptimization_id) const;
   int BuildTranslation(Instruction* instr, int pc_offset,
                        size_t frame_state_offset,
@@ -213,6 +218,7 @@
       FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
       Translation* translation, OutputFrameStateCombine state_combine);
   void TranslateStateValueDescriptor(StateValueDescriptor* desc,
+                                     StateValueList* nested,
                                      Translation* translation,
                                      InstructionOperandIterator* iter);
   void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
@@ -232,21 +238,24 @@
   class DeoptimizationState final : public ZoneObject {
    public:
     DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset,
-                        DeoptimizeReason reason)
+                        DeoptimizeKind kind, DeoptimizeReason reason)
         : bailout_id_(bailout_id),
           translation_id_(translation_id),
           pc_offset_(pc_offset),
+          kind_(kind),
           reason_(reason) {}
 
     BailoutId bailout_id() const { return bailout_id_; }
     int translation_id() const { return translation_id_; }
     int pc_offset() const { return pc_offset_; }
+    DeoptimizeKind kind() const { return kind_; }
     DeoptimizeReason reason() const { return reason_; }
 
    private:
     BailoutId bailout_id_;
     int translation_id_;
     int pc_offset_;
+    DeoptimizeKind kind_;
     DeoptimizeReason reason_;
   };
 
@@ -279,6 +288,7 @@
   JumpTable* jump_tables_;
   OutOfLineCode* ools_;
   int osr_pc_offset_;
+  int optimized_out_literal_id_;
   SourcePositionTableBuilder source_position_table_builder_;
 };
 
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 9a36816..70fdf71 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -36,7 +36,6 @@
 
 }  // namespace
 
-
 CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
                                              CommonOperatorBuilder* common,
                                              MachineOperatorBuilder* machine)
@@ -44,8 +43,9 @@
       graph_(graph),
       common_(common),
       machine_(machine),
-      dead_(graph->NewNode(common->Dead())) {}
-
+      dead_(graph->NewNode(common->Dead())) {
+  NodeProperties::SetType(dead_, Type::None());
+}
 
 Reduction CommonOperatorReducer::Reduce(Node* node) {
   switch (node->opcode()) {
@@ -126,7 +126,7 @@
   DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
          node->opcode() == IrOpcode::kDeoptimizeUnless);
   bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
-  DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   Node* condition = NodeProperties::GetValueInput(node, 0);
   Node* frame_state = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -137,9 +137,10 @@
   // (as guaranteed by the graph reduction logic).
   if (condition->opcode() == IrOpcode::kBooleanNot) {
     NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
-    NodeProperties::ChangeOp(node, condition_is_true
-                                       ? common()->DeoptimizeIf(reason)
-                                       : common()->DeoptimizeUnless(reason));
+    NodeProperties::ChangeOp(
+        node, condition_is_true
+                  ? common()->DeoptimizeIf(p.kind(), p.reason())
+                  : common()->DeoptimizeUnless(p.kind(), p.reason()));
     return Changed(node);
   }
   Decision const decision = DecideCondition(condition);
@@ -147,9 +148,8 @@
   if (condition_is_true == (decision == Decision::kTrue)) {
     ReplaceWithValue(node, dead(), effect, control);
   } else {
-    control =
-        graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
-                         frame_state, effect, control);
+    control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
+                               frame_state, effect, control);
     // TODO(bmeurer): This should be on the AdvancedReducer somehow.
     NodeProperties::MergeControlToEnd(graph(), common(), control);
     Revisit(graph()->end());
@@ -195,15 +195,16 @@
 
 Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
   DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
-  int const input_count = node->InputCount() - 1;
-  DCHECK_LE(1, input_count);
-  Node* const merge = node->InputAt(input_count);
+  Node::Inputs inputs = node->inputs();
+  int const effect_input_count = inputs.count() - 1;
+  DCHECK_LE(1, effect_input_count);
+  Node* const merge = inputs[effect_input_count];
   DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
-  DCHECK_EQ(input_count, merge->InputCount());
-  Node* const effect = node->InputAt(0);
+  DCHECK_EQ(effect_input_count, merge->InputCount());
+  Node* const effect = inputs[0];
   DCHECK_NE(node, effect);
-  for (int i = 1; i < input_count; ++i) {
-    Node* const input = node->InputAt(i);
+  for (int i = 1; i < effect_input_count; ++i) {
+    Node* const input = inputs[i];
     if (input == node) {
       // Ignore redundant inputs.
       DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
@@ -219,16 +220,18 @@
 
 Reduction CommonOperatorReducer::ReducePhi(Node* node) {
   DCHECK_EQ(IrOpcode::kPhi, node->opcode());
-  int const input_count = node->InputCount() - 1;
-  DCHECK_LE(1, input_count);
-  Node* const merge = node->InputAt(input_count);
+  Node::Inputs inputs = node->inputs();
+  int const value_input_count = inputs.count() - 1;
+  DCHECK_LE(1, value_input_count);
+  Node* const merge = inputs[value_input_count];
   DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
-  DCHECK_EQ(input_count, merge->InputCount());
-  if (input_count == 2) {
-    Node* vtrue = node->InputAt(0);
-    Node* vfalse = node->InputAt(1);
-    Node* if_true = merge->InputAt(0);
-    Node* if_false = merge->InputAt(1);
+  DCHECK_EQ(value_input_count, merge->InputCount());
+  if (value_input_count == 2) {
+    Node* vtrue = inputs[0];
+    Node* vfalse = inputs[1];
+    Node::Inputs merge_inputs = merge->inputs();
+    Node* if_true = merge_inputs[0];
+    Node* if_false = merge_inputs[1];
     if (if_true->opcode() != IrOpcode::kIfTrue) {
       std::swap(if_true, if_false);
       std::swap(vtrue, vfalse);
@@ -265,10 +268,10 @@
       }
     }
   }
-  Node* const value = node->InputAt(0);
+  Node* const value = inputs[0];
   DCHECK_NE(node, value);
-  for (int i = 1; i < input_count; ++i) {
-    Node* const input = node->InputAt(i);
+  for (int i = 1; i < value_input_count; ++i) {
+    Node* const input = inputs[i];
     if (input == node) {
       // Ignore redundant inputs.
       DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
@@ -281,49 +284,91 @@
   return Replace(value);
 }
 
-
 Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
   DCHECK_EQ(IrOpcode::kReturn, node->opcode());
-  Node* const value = node->InputAt(1);
   Node* effect = NodeProperties::GetEffectInput(node);
-  Node* const control = NodeProperties::GetControlInput(node);
-  bool changed = false;
   if (effect->opcode() == IrOpcode::kCheckpoint) {
     // Any {Return} node can never be used to insert a deoptimization point,
     // hence checkpoints can be cut out of the effect chain flowing into it.
     effect = NodeProperties::GetEffectInput(effect);
     NodeProperties::ReplaceEffectInput(node, effect);
-    changed = true;
+    Reduction const reduction = ReduceReturn(node);
+    return reduction.Changed() ? reduction : Changed(node);
   }
+  // TODO(ahaas): Extend the reduction below to multiple return values.
+  if (ValueInputCountOfReturn(node->op()) != 1) {
+    return NoChange();
+  }
+  Node* pop_count = NodeProperties::GetValueInput(node, 0);
+  Node* value = NodeProperties::GetValueInput(node, 1);
+  Node* control = NodeProperties::GetControlInput(node);
   if (value->opcode() == IrOpcode::kPhi &&
       NodeProperties::GetControlInput(value) == control &&
-      effect->opcode() == IrOpcode::kEffectPhi &&
-      NodeProperties::GetControlInput(effect) == control &&
       control->opcode() == IrOpcode::kMerge) {
-    int const control_input_count = control->InputCount();
-    DCHECK_NE(0, control_input_count);
-    DCHECK_EQ(control_input_count, value->InputCount() - 1);
-    DCHECK_EQ(control_input_count, effect->InputCount() - 1);
+    // This optimization pushes {Return} nodes through merges. It checks that
+    // the return value is actually a {Phi} and the return control dependency
+    // is the {Merge} to which the {Phi} belongs.
+
+    // Value1 ... ValueN Control1 ... ControlN
+    //   ^          ^       ^            ^
+    //   |          |       |            |
+    //   +----+-----+       +------+-----+
+    //        |                    |
+    //       Phi --------------> Merge
+    //        ^                    ^
+    //        |                    |
+    //        |  +-----------------+
+    //        |  |
+    //       Return -----> Effect
+    //         ^
+    //         |
+    //        End
+
+    // Now the effect input to the {Return} node can be either an {EffectPhi}
+    // hanging off the same {Merge}, or the {Merge} node is only connected to
+    // the {Return} and the {Phi}, in which case we know that the effect input
+    // must somehow dominate all merged branches.
+
+    Node::Inputs control_inputs = control->inputs();
+    Node::Inputs value_inputs = value->inputs();
+    DCHECK_NE(0, control_inputs.count());
+    DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1);
     DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
     DCHECK_NE(0, graph()->end()->InputCount());
-    for (int i = 0; i < control_input_count; ++i) {
-      // Create a new {Return} and connect it to {end}. We don't need to mark
-      // {end} as revisit, because we mark {node} as {Dead} below, which was
-      // previously connected to {end}, so we know for sure that at some point
-      // the reducer logic will visit {end} again.
-      Node* ret = graph()->NewNode(common()->Return(), node->InputAt(0),
-                                   value->InputAt(i), effect->InputAt(i),
-                                   control->InputAt(i));
-      NodeProperties::MergeControlToEnd(graph(), common(), ret);
+    if (control->OwnedBy(node, value)) {
+      for (int i = 0; i < control_inputs.count(); ++i) {
+        // Create a new {Return} and connect it to {end}. We don't need to mark
+        // {end} as revisit, because we mark {node} as {Dead} below, which was
+        // previously connected to {end}, so we know for sure that at some point
+        // the reducer logic will visit {end} again.
+        Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i],
+                                     effect, control_inputs[i]);
+        NodeProperties::MergeControlToEnd(graph(), common(), ret);
+      }
+      // Mark the Merge {control} and Return {node} as {dead}.
+      Replace(control, dead());
+      return Replace(dead());
+    } else if (effect->opcode() == IrOpcode::kEffectPhi &&
+               NodeProperties::GetControlInput(effect) == control) {
+      Node::Inputs effect_inputs = effect->inputs();
+      DCHECK_EQ(control_inputs.count(), effect_inputs.count() - 1);
+      for (int i = 0; i < control_inputs.count(); ++i) {
+        // Create a new {Return} and connect it to {end}. We don't need to mark
+        // {end} as revisit, because we mark {node} as {Dead} below, which was
+        // previously connected to {end}, so we know for sure that at some point
+        // the reducer logic will visit {end} again.
+        Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i],
+                                     effect_inputs[i], control_inputs[i]);
+        NodeProperties::MergeControlToEnd(graph(), common(), ret);
+      }
+      // Mark the Merge {control} and Return {node} as {dead}.
+      Replace(control, dead());
+      return Replace(dead());
     }
-    // Mark the merge {control} and return {node} as {dead}.
-    Replace(control, dead());
-    return Replace(dead());
   }
-  return changed ? Changed(node) : NoChange();
+  return NoChange();
 }
 
-
 Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
   DCHECK_EQ(IrOpcode::kSelect, node->opcode());
   Node* const cond = node->InputAt(0);
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index 9ce6f71..637b064 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -7,9 +7,11 @@
 #include "src/assembler.h"
 #include "src/base/lazy-instance.h"
 #include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 #include "src/zone/zone.h"
 
 namespace v8 {
@@ -35,23 +37,11 @@
   return OpParameter<BranchHint>(op);
 }
 
-DeoptimizeReason DeoptimizeReasonOf(Operator const* const op) {
-  DCHECK(op->opcode() == IrOpcode::kDeoptimizeIf ||
-         op->opcode() == IrOpcode::kDeoptimizeUnless);
-  return OpParameter<DeoptimizeReason>(op);
-}
-
-size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
-
-std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
-  switch (kind) {
-    case DeoptimizeKind::kEager:
-      return os << "Eager";
-    case DeoptimizeKind::kSoft:
-      return os << "Soft";
-  }
-  UNREACHABLE();
-  return os;
+int ValueInputCountOfReturn(Operator const* const op) {
+  DCHECK(op->opcode() == IrOpcode::kReturn);
+  // Return nodes have a hidden input at index 0 which we ignore in the value
+  // input count.
+  return op->ValueInputCount() - 1;
 }
 
 bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -71,7 +61,9 @@
 }
 
 DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
-  DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
+  DCHECK(op->opcode() == IrOpcode::kDeoptimize ||
+         op->opcode() == IrOpcode::kDeoptimizeIf ||
+         op->opcode() == IrOpcode::kDeoptimizeUnless);
   return OpParameter<DeoptimizeParameters>(op);
 }
 
@@ -171,6 +163,106 @@
   return os << p.value() << "|" << p.rmode() << "|" << p.type();
 }
 
+SparseInputMask::InputIterator::InputIterator(
+    SparseInputMask::BitMaskType bit_mask, Node* parent)
+    : bit_mask_(bit_mask), parent_(parent), real_index_(0) {
+#if DEBUG
+  if (bit_mask_ != SparseInputMask::kDenseBitMask) {
+    DCHECK_EQ(base::bits::CountPopulation(bit_mask_) -
+                  base::bits::CountPopulation(kEndMarker),
+              parent->InputCount());
+  }
+#endif
+}
+
+void SparseInputMask::InputIterator::Advance() {
+  DCHECK(!IsEnd());
+
+  if (IsReal()) {
+    ++real_index_;
+  }
+  bit_mask_ >>= 1;
+}
+
+Node* SparseInputMask::InputIterator::GetReal() const {
+  DCHECK(IsReal());
+  return parent_->InputAt(real_index_);
+}
+
+bool SparseInputMask::InputIterator::IsReal() const {
+  return bit_mask_ == SparseInputMask::kDenseBitMask ||
+         (bit_mask_ & kEntryMask);
+}
+
+bool SparseInputMask::InputIterator::IsEnd() const {
+  return (bit_mask_ == kEndMarker) ||
+         (bit_mask_ == SparseInputMask::kDenseBitMask &&
+          real_index_ >= parent_->InputCount());
+}
+
+int SparseInputMask::CountReal() const {
+  DCHECK(!IsDense());
+  return base::bits::CountPopulation(bit_mask_) -
+         base::bits::CountPopulation(kEndMarker);
+}
+
+SparseInputMask::InputIterator SparseInputMask::IterateOverInputs(Node* node) {
+  DCHECK(IsDense() || CountReal() == node->InputCount());
+  return InputIterator(bit_mask_, node);
+}
+
+bool operator==(SparseInputMask const& lhs, SparseInputMask const& rhs) {
+  return lhs.mask() == rhs.mask();
+}
+
+bool operator!=(SparseInputMask const& lhs, SparseInputMask const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(SparseInputMask const& p) {
+  return base::hash_value(p.mask());
+}
+
+std::ostream& operator<<(std::ostream& os, SparseInputMask const& p) {
+  if (p.IsDense()) {
+    return os << "dense";
+  } else {
+    SparseInputMask::BitMaskType mask = p.mask();
+    DCHECK_NE(mask, SparseInputMask::kDenseBitMask);
+
+    os << "sparse:";
+
+    while (mask != SparseInputMask::kEndMarker) {
+      if (mask & SparseInputMask::kEntryMask) {
+        os << "^";
+      } else {
+        os << ".";
+      }
+      mask >>= 1;
+    }
+    return os;
+  }
+}
+
+bool operator==(TypedStateValueInfo const& lhs,
+                TypedStateValueInfo const& rhs) {
+  return lhs.machine_types() == rhs.machine_types() &&
+         lhs.sparse_input_mask() == rhs.sparse_input_mask();
+}
+
+bool operator!=(TypedStateValueInfo const& lhs,
+                TypedStateValueInfo const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(TypedStateValueInfo const& p) {
+  return base::hash_combine(p.machine_types(), p.sparse_input_mask());
+}
+
+std::ostream& operator<<(std::ostream& os, TypedStateValueInfo const& p) {
+  return os << p.machine_types() << "|" << p.sparse_input_mask();
+}
+
 size_t hash_value(RegionObservability observability) {
   return static_cast<size_t>(observability);
 }
@@ -235,9 +327,23 @@
   return OpParameter<OsrGuardType>(op);
 }
 
+SparseInputMask SparseInputMaskOf(Operator const* op) {
+  DCHECK(op->opcode() == IrOpcode::kStateValues ||
+         op->opcode() == IrOpcode::kTypedStateValues);
+
+  if (op->opcode() == IrOpcode::kTypedStateValues) {
+    return OpParameter<TypedStateValueInfo>(op).sparse_input_mask();
+  }
+  return OpParameter<SparseInputMask>(op);
+}
+
 ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
   DCHECK(op->opcode() == IrOpcode::kTypedObjectState ||
          op->opcode() == IrOpcode::kTypedStateValues);
+
+  if (op->opcode() == IrOpcode::kTypedStateValues) {
+    return OpParameter<TypedStateValueInfo>(op).machine_types();
+  }
   return OpParameter<const ZoneVector<MachineType>*>(op);
 }
 
@@ -313,22 +419,37 @@
   V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
 
 #define CACHED_DEOPTIMIZE_IF_LIST(V) \
-  V(DivisionByZero)                  \
-  V(Hole)                            \
-  V(MinusZero)                       \
-  V(Overflow)                        \
-  V(Smi)
+  V(Eager, DivisionByZero)           \
+  V(Eager, Hole)                     \
+  V(Eager, MinusZero)                \
+  V(Eager, Overflow)                 \
+  V(Eager, Smi)
 
 #define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
-  V(LostPrecision)                       \
-  V(LostPrecisionOrNaN)                  \
-  V(NoReason)                            \
-  V(NotAHeapNumber)                      \
-  V(NotANumberOrOddball)                 \
-  V(NotASmi)                             \
-  V(OutOfBounds)                         \
-  V(WrongInstanceType)                   \
-  V(WrongMap)
+  V(Eager, LostPrecision)                \
+  V(Eager, LostPrecisionOrNaN)           \
+  V(Eager, NoReason)                     \
+  V(Eager, NotAHeapNumber)               \
+  V(Eager, NotANumberOrOddball)          \
+  V(Eager, NotASmi)                      \
+  V(Eager, OutOfBounds)                  \
+  V(Eager, WrongInstanceType)            \
+  V(Eager, WrongMap)
+
+#define CACHED_TRAP_IF_LIST(V) \
+  V(TrapDivUnrepresentable)    \
+  V(TrapFloatUnrepresentable)
+
+// The reason for a trap.
+#define CACHED_TRAP_UNLESS_LIST(V) \
+  V(TrapUnreachable)               \
+  V(TrapMemOutOfBounds)            \
+  V(TrapDivByZero)                 \
+  V(TrapDivUnrepresentable)        \
+  V(TrapRemByZero)                 \
+  V(TrapFloatUnrepresentable)      \
+  V(TrapFuncInvalid)               \
+  V(TrapFuncSigMismatch)
 
 #define CACHED_PARAMETER_LIST(V) \
   V(0)                           \
@@ -497,38 +618,72 @@
   CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
 #undef CACHED_DEOPTIMIZE
 
-  template <DeoptimizeReason kReason>
-  struct DeoptimizeIfOperator final : public Operator1<DeoptimizeReason> {
+  template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+  struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
     DeoptimizeIfOperator()
-        : Operator1<DeoptimizeReason>(                   // --
+        : Operator1<DeoptimizeParameters>(               // --
               IrOpcode::kDeoptimizeIf,                   // opcode
               Operator::kFoldable | Operator::kNoThrow,  // properties
               "DeoptimizeIf",                            // name
               2, 1, 1, 0, 1, 1,                          // counts
-              kReason) {}                                // parameter
+              DeoptimizeParameters(kKind, kReason)) {}   // parameter
   };
-#define CACHED_DEOPTIMIZE_IF(Reason)                \
-  DeoptimizeIfOperator<DeoptimizeReason::k##Reason> \
-      kDeoptimizeIf##Reason##Operator;
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason)                                   \
+  DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
+      kDeoptimizeIf##Kind##Reason##Operator;
   CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
 #undef CACHED_DEOPTIMIZE_IF
 
-  template <DeoptimizeReason kReason>
-  struct DeoptimizeUnlessOperator final : public Operator1<DeoptimizeReason> {
+  template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+  struct DeoptimizeUnlessOperator final
+      : public Operator1<DeoptimizeParameters> {
     DeoptimizeUnlessOperator()
-        : Operator1<DeoptimizeReason>(                   // --
+        : Operator1<DeoptimizeParameters>(               // --
               IrOpcode::kDeoptimizeUnless,               // opcode
               Operator::kFoldable | Operator::kNoThrow,  // properties
               "DeoptimizeUnless",                        // name
               2, 1, 1, 0, 1, 1,                          // counts
-              kReason) {}                                // parameter
+              DeoptimizeParameters(kKind, kReason)) {}   // parameter
   };
-#define CACHED_DEOPTIMIZE_UNLESS(Reason)                \
-  DeoptimizeUnlessOperator<DeoptimizeReason::k##Reason> \
-      kDeoptimizeUnless##Reason##Operator;
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason)          \
+  DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind,     \
+                           DeoptimizeReason::k##Reason> \
+      kDeoptimizeUnless##Kind##Reason##Operator;
   CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
 #undef CACHED_DEOPTIMIZE_UNLESS
 
+  template <int32_t trap_id>
+  struct TrapIfOperator final : public Operator1<int32_t> {
+    TrapIfOperator()
+        : Operator1<int32_t>(                            // --
+              IrOpcode::kTrapIf,                         // opcode
+              Operator::kFoldable | Operator::kNoThrow,  // properties
+              "TrapIf",                                  // name
+              1, 1, 1, 0, 0, 1,                          // counts
+              trap_id) {}                                // parameter
+  };
+#define CACHED_TRAP_IF(Trap)                                       \
+  TrapIfOperator<static_cast<int32_t>(Builtins::kThrowWasm##Trap)> \
+      kTrapIf##Trap##Operator;
+  CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
+#undef CACHED_TRAP_IF
+
+  template <int32_t trap_id>
+  struct TrapUnlessOperator final : public Operator1<int32_t> {
+    TrapUnlessOperator()
+        : Operator1<int32_t>(                            // --
+              IrOpcode::kTrapUnless,                     // opcode
+              Operator::kFoldable | Operator::kNoThrow,  // properties
+              "TrapUnless",                              // name
+              1, 1, 1, 0, 0, 1,                          // counts
+              trap_id) {}                                // parameter
+  };
+#define CACHED_TRAP_UNLESS(Trap)                                       \
+  TrapUnlessOperator<static_cast<int32_t>(Builtins::kThrowWasm##Trap)> \
+      kTrapUnless##Trap##Operator;
+  CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
+#undef CACHED_TRAP_UNLESS
+
   template <MachineRepresentation kRep, int kInputCount>
   struct PhiOperator final : public Operator1<MachineRepresentation> {
     PhiOperator()
@@ -588,13 +743,14 @@
 #undef CACHED_PROJECTION
 
   template <int kInputCount>
-  struct StateValuesOperator final : public Operator {
+  struct StateValuesOperator final : public Operator1<SparseInputMask> {
     StateValuesOperator()
-        : Operator(                           // --
-              IrOpcode::kStateValues,         // opcode
-              Operator::kPure,                // flags
-              "StateValues",                  // name
-              kInputCount, 0, 0, 1, 0, 0) {}  // counts
+        : Operator1<SparseInputMask>(       // --
+              IrOpcode::kStateValues,       // opcode
+              Operator::kPure,              // flags
+              "StateValues",                // name
+              kInputCount, 0, 0, 1, 0, 0,   // counts
+              SparseInputMask::Dense()) {}  // parameter
   };
 #define CACHED_STATE_VALUES(input_count) \
   StateValuesOperator<input_count> kStateValues##input_count##Operator;
@@ -688,45 +844,81 @@
       parameter);                                       // parameter
 }
 
-const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeReason reason) {
-  switch (reason) {
-#define CACHED_DEOPTIMIZE_IF(Reason) \
-  case DeoptimizeReason::k##Reason:  \
-    return &cache_.kDeoptimizeIf##Reason##Operator;
-    CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
-#undef CACHED_DEOPTIMIZE_IF
-    default:
-      break;
+const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeKind kind,
+                                                    DeoptimizeReason reason) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason)                \
+  if (kind == DeoptimizeKind::k##Kind &&                  \
+      reason == DeoptimizeReason::k##Reason) {            \
+    return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
   }
+  CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
+#undef CACHED_DEOPTIMIZE_IF
   // Uncached
-  return new (zone()) Operator1<DeoptimizeReason>(  // --
-      IrOpcode::kDeoptimizeIf,                      // opcode
-      Operator::kFoldable | Operator::kNoThrow,     // properties
-      "DeoptimizeIf",                               // name
-      2, 1, 1, 0, 1, 1,                             // counts
-      reason);                                      // parameter
+  DeoptimizeParameters parameter(kind, reason);
+  return new (zone()) Operator1<DeoptimizeParameters>(  // --
+      IrOpcode::kDeoptimizeIf,                          // opcode
+      Operator::kFoldable | Operator::kNoThrow,         // properties
+      "DeoptimizeIf",                                   // name
+      2, 1, 1, 0, 1, 1,                                 // counts
+      parameter);                                       // parameter
 }
 
 const Operator* CommonOperatorBuilder::DeoptimizeUnless(
-    DeoptimizeReason reason) {
-  switch (reason) {
-#define CACHED_DEOPTIMIZE_UNLESS(Reason) \
-  case DeoptimizeReason::k##Reason:      \
-    return &cache_.kDeoptimizeUnless##Reason##Operator;
-    CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
+    DeoptimizeKind kind, DeoptimizeReason reason) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason)                \
+  if (kind == DeoptimizeKind::k##Kind &&                      \
+      reason == DeoptimizeReason::k##Reason) {                \
+    return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
+  }
+  CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
 #undef CACHED_DEOPTIMIZE_UNLESS
+  // Uncached
+  DeoptimizeParameters parameter(kind, reason);
+  return new (zone()) Operator1<DeoptimizeParameters>(  // --
+      IrOpcode::kDeoptimizeUnless,                      // opcode
+      Operator::kFoldable | Operator::kNoThrow,         // properties
+      "DeoptimizeUnless",                               // name
+      2, 1, 1, 0, 1, 1,                                 // counts
+      parameter);                                       // parameter
+}
+
+const Operator* CommonOperatorBuilder::TrapIf(int32_t trap_id) {
+  switch (trap_id) {
+#define CACHED_TRAP_IF(Trap)       \
+  case Builtins::kThrowWasm##Trap: \
+    return &cache_.kTrapIf##Trap##Operator;
+    CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
+#undef CACHED_TRAP_IF
     default:
       break;
   }
   // Uncached
-  return new (zone()) Operator1<DeoptimizeReason>(  // --
-      IrOpcode::kDeoptimizeUnless,                  // opcode
-      Operator::kFoldable | Operator::kNoThrow,     // properties
-      "DeoptimizeUnless",                           // name
-      2, 1, 1, 0, 1, 1,                             // counts
-      reason);                                      // parameter
+  return new (zone()) Operator1<int>(            // --
+      IrOpcode::kTrapIf,                         // opcode
+      Operator::kFoldable | Operator::kNoThrow,  // properties
+      "TrapIf",                                  // name
+      1, 1, 1, 0, 0, 1,                          // counts
+      trap_id);                                  // parameter
 }
 
+const Operator* CommonOperatorBuilder::TrapUnless(int32_t trap_id) {
+  switch (trap_id) {
+#define CACHED_TRAP_UNLESS(Trap)   \
+  case Builtins::kThrowWasm##Trap: \
+    return &cache_.kTrapUnless##Trap##Operator;
+    CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
+#undef CACHED_TRAP_UNLESS
+    default:
+      break;
+  }
+  // Uncached
+  return new (zone()) Operator1<int>(            // --
+      IrOpcode::kTrapUnless,                     // opcode
+      Operator::kFoldable | Operator::kNoThrow,  // properties
+      "TrapUnless",                              // name
+      1, 1, 1, 0, 0, 1,                          // counts
+      trap_id);                                  // parameter
+}
 
 const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
   return new (zone()) Operator(               // --
@@ -1000,30 +1192,51 @@
   return nullptr;
 }
 
-const Operator* CommonOperatorBuilder::StateValues(int arguments) {
-  switch (arguments) {
+const Operator* CommonOperatorBuilder::StateValues(int arguments,
+                                                   SparseInputMask bitmask) {
+  if (bitmask.IsDense()) {
+    switch (arguments) {
 #define CACHED_STATE_VALUES(arguments) \
   case arguments:                      \
     return &cache_.kStateValues##arguments##Operator;
-    CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
+      CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
 #undef CACHED_STATE_VALUES
-    default:
-      break;
+      default:
+        break;
+    }
   }
+
+#if DEBUG
+  DCHECK(bitmask.IsDense() || bitmask.CountReal() == arguments);
+#endif
+
   // Uncached.
-  return new (zone()) Operator(                 // --
-      IrOpcode::kStateValues, Operator::kPure,  // opcode
-      "StateValues",                            // name
-      arguments, 0, 0, 1, 0, 0);                // counts
+  return new (zone()) Operator1<SparseInputMask>(  // --
+      IrOpcode::kStateValues, Operator::kPure,     // opcode
+      "StateValues",                               // name
+      arguments, 0, 0, 1, 0, 0,                    // counts
+      bitmask);                                    // parameter
 }
 
 const Operator* CommonOperatorBuilder::TypedStateValues(
-    const ZoneVector<MachineType>* types) {
-  return new (zone()) Operator1<const ZoneVector<MachineType>*>(  // --
-      IrOpcode::kTypedStateValues, Operator::kPure,               // opcode
-      "TypedStateValues",                                         // name
-      static_cast<int>(types->size()), 0, 0, 1, 0, 0,             // counts
-      types);                                                     // parameter
+    const ZoneVector<MachineType>* types, SparseInputMask bitmask) {
+#if DEBUG
+  DCHECK(bitmask.IsDense() ||
+         bitmask.CountReal() == static_cast<int>(types->size()));
+#endif
+
+  return new (zone()) Operator1<TypedStateValueInfo>(  // --
+      IrOpcode::kTypedStateValues, Operator::kPure,    // opcode
+      "TypedStateValues",                              // name
+      static_cast<int>(types->size()), 0, 0, 1, 0, 0,  // counts
+      TypedStateValueInfo(types, bitmask));            // parameters
+}
+
+const Operator* CommonOperatorBuilder::ArgumentsObjectState() {
+  return new (zone()) Operator(                          // --
+      IrOpcode::kArgumentsObjectState, Operator::kPure,  // opcode
+      "ArgumentsObjectState",                            // name
+      0, 0, 0, 1, 0, 0);                                 // counts
 }
 
 const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots) {
@@ -1131,7 +1344,6 @@
   }
 }
 
-
 const FrameStateFunctionInfo*
 CommonOperatorBuilder::CreateFrameStateFunctionInfo(
     FrameStateType type, int parameter_count, int local_count,
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 1f258a0..4682959 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -22,6 +22,7 @@
 struct CommonOperatorGlobalCache;
 class Operator;
 class Type;
+class Node;
 
 // Prediction hint for branches.
 enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
@@ -45,15 +46,8 @@
 
 V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
 
-// Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
-DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
-
-// Deoptimize bailout kind.
-enum class DeoptimizeKind : uint8_t { kEager, kSoft };
-
-size_t hash_value(DeoptimizeKind kind);
-
-std::ostream& operator<<(std::ostream&, DeoptimizeKind);
+// Helper function for return nodes, because returns have a hidden value input.
+int ValueInputCountOfReturn(Operator const* const op);
 
 // Parameters for the {Deoptimize} operator.
 class DeoptimizeParameters final {
@@ -158,6 +152,123 @@
 
 size_t hash_value(RelocatablePtrConstantInfo const& p);
 
+// Used to define a sparse set of inputs. This can be used to efficiently encode
+// nodes that can have a lot of inputs, but where many inputs can have the same
+// value.
+class SparseInputMask final {
+ public:
+  typedef uint32_t BitMaskType;
+
+  // The mask representing a dense input set.
+  static const BitMaskType kDenseBitMask = 0x0;
+  // The bits representing the end of a sparse input set.
+  static const BitMaskType kEndMarker = 0x1;
+  // The mask for accessing a sparse input entry in the bitmask.
+  static const BitMaskType kEntryMask = 0x1;
+
+  // The number of bits in the mask, minus one for the end marker.
+  static const int kMaxSparseInputs = (sizeof(BitMaskType) * kBitsPerByte - 1);
+
+  // An iterator over a node's sparse inputs.
+  class InputIterator final {
+   public:
+    InputIterator() {}
+    InputIterator(BitMaskType bit_mask, Node* parent);
+
+    Node* parent() const { return parent_; }
+    int real_index() const { return real_index_; }
+
+    // Advance the iterator to the next sparse input. Only valid if the iterator
+    // has not reached the end.
+    void Advance();
+
+    // Get the current sparse input's real node value. Only valid if the
+    // current sparse input is real.
+    Node* GetReal() const;
+
+    // Get the current sparse input, returning either a real input node if
+    // the current sparse input is real, or the given {empty_value} if the
+    // current sparse input is empty.
+    Node* Get(Node* empty_value) const {
+      return IsReal() ? GetReal() : empty_value;
+    }
+
+    // True if the current sparse input is a real input node.
+    bool IsReal() const;
+
+    // True if the current sparse input is an empty value.
+    bool IsEmpty() const { return !IsReal(); }
+
+    // True if the iterator has reached the end of the sparse inputs.
+    bool IsEnd() const;
+
+   private:
+    BitMaskType bit_mask_;
+    Node* parent_;
+    int real_index_;
+  };
+
+  explicit SparseInputMask(BitMaskType bit_mask) : bit_mask_(bit_mask) {}
+
+  // Provides a SparseInputMask representing a dense input set.
+  static SparseInputMask Dense() { return SparseInputMask(kDenseBitMask); }
+
+  BitMaskType mask() const { return bit_mask_; }
+
+  bool IsDense() const { return bit_mask_ == SparseInputMask::kDenseBitMask; }
+
+  // Counts how many real values are in the sparse array. Only valid for
+  // non-dense masks.
+  int CountReal() const;
+
+  // Returns an iterator over the sparse inputs of {node}.
+  InputIterator IterateOverInputs(Node* node);
+
+ private:
+  //
+  // The sparse input mask has a bitmask specifying if the node's inputs are
+  // represented sparsely. If the bitmask value is 0, then the inputs are dense;
+  // otherwise, they should be interpreted as follows:
+  //
+  //   * The bitmask represents which values are real, with 1 for real values
+  //     and 0 for empty values.
+  //   * The inputs to the node are the real values, in the order of the 1s from
+  //     least- to most-significant.
+  //   * The top bit of the bitmask is a guard indicating the end of the values,
+  //     whether real or empty (and is not representative of a real input
+  //     itself). This is used so that we don't have to additionally store a
+  //     value count.
+  //
+  // So, for N 1s in the bitmask, there are N - 1 inputs into the node.
+  BitMaskType bit_mask_;
+};
+
+bool operator==(SparseInputMask const& lhs, SparseInputMask const& rhs);
+bool operator!=(SparseInputMask const& lhs, SparseInputMask const& rhs);
+
+class TypedStateValueInfo final {
+ public:
+  TypedStateValueInfo(ZoneVector<MachineType> const* machine_types,
+                      SparseInputMask sparse_input_mask)
+      : machine_types_(machine_types), sparse_input_mask_(sparse_input_mask) {}
+
+  ZoneVector<MachineType> const* machine_types() const {
+    return machine_types_;
+  }
+  SparseInputMask sparse_input_mask() const { return sparse_input_mask_; }
+
+ private:
+  ZoneVector<MachineType> const* machine_types_;
+  SparseInputMask sparse_input_mask_;
+};
+
+bool operator==(TypedStateValueInfo const& lhs, TypedStateValueInfo const& rhs);
+bool operator!=(TypedStateValueInfo const& lhs, TypedStateValueInfo const& rhs);
+
+std::ostream& operator<<(std::ostream&, TypedStateValueInfo const&);
+
+size_t hash_value(TypedStateValueInfo const& p);
+
 // Used to mark a region (as identified by BeginRegion/FinishRegion) as either
 // JavaScript-observable or not (i.e. allocations are not JavaScript observable
 // themselves, but transitioning stores are).
@@ -181,6 +292,8 @@
 std::ostream& operator<<(std::ostream&, OsrGuardType);
 OsrGuardType OsrGuardTypeOf(Operator const*);
 
+SparseInputMask SparseInputMaskOf(Operator const*);
+
 ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
     WARN_UNUSED_RESULT;
 
@@ -203,8 +316,11 @@
   const Operator* IfDefault();
   const Operator* Throw();
   const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
-  const Operator* DeoptimizeIf(DeoptimizeReason reason);
-  const Operator* DeoptimizeUnless(DeoptimizeReason reason);
+  const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason);
+  const Operator* DeoptimizeUnless(DeoptimizeKind kind,
+                                   DeoptimizeReason reason);
+  const Operator* TrapIf(int32_t trap_id);
+  const Operator* TrapUnless(int32_t trap_id);
   const Operator* Return(int value_input_count = 1);
   const Operator* Terminate();
 
@@ -243,8 +359,10 @@
   const Operator* Checkpoint();
   const Operator* BeginRegion(RegionObservability);
   const Operator* FinishRegion();
-  const Operator* StateValues(int arguments);
-  const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
+  const Operator* StateValues(int arguments, SparseInputMask bitmask);
+  const Operator* TypedStateValues(const ZoneVector<MachineType>* types,
+                                   SparseInputMask bitmask);
+  const Operator* ArgumentsObjectState();
   const Operator* ObjectState(int pointer_slots);
   const Operator* TypedObjectState(const ZoneVector<MachineType>* types);
   const Operator* FrameState(BailoutId bailout_id,
diff --git a/src/compiler/control-builders.cc b/src/compiler/control-builders.cc
index b159bb2..a0b3ebd 100644
--- a/src/compiler/control-builders.cc
+++ b/src/compiler/control-builders.cc
@@ -4,6 +4,8 @@
 
 #include "src/compiler/control-builders.h"
 
+#include "src/objects-inl.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
@@ -180,65 +182,6 @@
   set_environment(break_environment_);
 }
 
-
-void TryCatchBuilder::BeginTry() {
-  exit_environment_ = environment()->CopyAsUnreachable();
-  catch_environment_ = environment()->CopyAsUnreachable();
-  catch_environment_->Push(the_hole());
-}
-
-
-void TryCatchBuilder::Throw(Node* exception) {
-  environment()->Push(exception);
-  catch_environment_->Merge(environment());
-  environment()->Pop();
-  environment()->MarkAsUnreachable();
-}
-
-
-void TryCatchBuilder::EndTry() {
-  exit_environment_->Merge(environment());
-  exception_node_ = catch_environment_->Pop();
-  set_environment(catch_environment_);
-}
-
-
-void TryCatchBuilder::EndCatch() {
-  exit_environment_->Merge(environment());
-  set_environment(exit_environment_);
-}
-
-
-void TryFinallyBuilder::BeginTry() {
-  finally_environment_ = environment()->CopyAsUnreachable();
-  finally_environment_->Push(the_hole());
-  finally_environment_->Push(the_hole());
-}
-
-
-void TryFinallyBuilder::LeaveTry(Node* token, Node* value) {
-  environment()->Push(value);
-  environment()->Push(token);
-  finally_environment_->Merge(environment());
-  environment()->Drop(2);
-}
-
-
-void TryFinallyBuilder::EndTry(Node* fallthrough_token, Node* value) {
-  environment()->Push(value);
-  environment()->Push(fallthrough_token);
-  finally_environment_->Merge(environment());
-  environment()->Drop(2);
-  token_node_ = finally_environment_->Pop();
-  value_node_ = finally_environment_->Pop();
-  set_environment(finally_environment_);
-}
-
-
-void TryFinallyBuilder::EndFinally() {
-  // Nothing to be done here.
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/control-builders.h b/src/compiler/control-builders.h
index a59dcb6..88efd27 100644
--- a/src/compiler/control-builders.h
+++ b/src/compiler/control-builders.h
@@ -145,59 +145,6 @@
   Environment* break_environment_;  // Environment after the block exits.
 };
 
-
-// Tracks control flow for a try-catch statement.
-class TryCatchBuilder final : public ControlBuilder {
- public:
-  explicit TryCatchBuilder(AstGraphBuilder* builder)
-      : ControlBuilder(builder),
-        catch_environment_(nullptr),
-        exit_environment_(nullptr),
-        exception_node_(nullptr) {}
-
-  // Primitive control commands.
-  void BeginTry();
-  void Throw(Node* exception);
-  void EndTry();
-  void EndCatch();
-
-  // Returns the exception value inside the 'catch' body.
-  Node* GetExceptionNode() const { return exception_node_; }
-
- private:
-  Environment* catch_environment_;  // Environment for the 'catch' body.
-  Environment* exit_environment_;   // Environment after the statement.
-  Node* exception_node_;            // Node for exception in 'catch' body.
-};
-
-
-// Tracks control flow for a try-finally statement.
-class TryFinallyBuilder final : public ControlBuilder {
- public:
-  explicit TryFinallyBuilder(AstGraphBuilder* builder)
-      : ControlBuilder(builder),
-        finally_environment_(nullptr),
-        token_node_(nullptr),
-        value_node_(nullptr) {}
-
-  // Primitive control commands.
-  void BeginTry();
-  void LeaveTry(Node* token, Node* value);
-  void EndTry(Node* token, Node* value);
-  void EndFinally();
-
-  // Returns the dispatch token value inside the 'finally' body.
-  Node* GetDispatchTokenNode() const { return token_node_; }
-
-  // Returns the saved result value inside the 'finally' body.
-  Node* GetResultValueNode() const { return value_node_; }
-
- private:
-  Environment* finally_environment_;  // Environment for the 'finally' body.
-  Node* token_node_;                  // Node for token in 'finally' body.
-  Node* value_node_;                  // Node for value in 'finally' body.
-};
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/control-equivalence.h b/src/compiler/control-equivalence.h
index 05777d7..b76e04f 100644
--- a/src/compiler/control-equivalence.h
+++ b/src/compiler/control-equivalence.h
@@ -124,7 +124,11 @@
   void DetermineParticipation(Node* exit);
 
  private:
-  NodeData* GetData(Node* node) { return &node_data_[node->id()]; }
+  NodeData* GetData(Node* node) {
+    size_t const index = node->id();
+    if (index >= node_data_.size()) node_data_.resize(index + 1, EmptyData());
+    return &node_data_[index];
+  }
   int NewClassNumber() { return class_number_++; }
   int NewDFSNumber() { return dfs_number_++; }
 
diff --git a/src/compiler/dead-code-elimination.cc b/src/compiler/dead-code-elimination.cc
index 81bf299..d66a9c5 100644
--- a/src/compiler/dead-code-elimination.cc
+++ b/src/compiler/dead-code-elimination.cc
@@ -18,8 +18,9 @@
     : AdvancedReducer(editor),
       graph_(graph),
       common_(common),
-      dead_(graph->NewNode(common->Dead())) {}
-
+      dead_(graph->NewNode(common->Dead())) {
+  NodeProperties::SetType(dead_, Type::None());
+}
 
 Reduction DeadCodeElimination::Reduce(Node* node) {
   switch (node->opcode()) {
@@ -40,11 +41,11 @@
 
 Reduction DeadCodeElimination::ReduceEnd(Node* node) {
   DCHECK_EQ(IrOpcode::kEnd, node->opcode());
-  int const input_count = node->InputCount();
-  DCHECK_LE(1, input_count);
+  Node::Inputs inputs = node->inputs();
+  DCHECK_LE(1, inputs.count());
   int live_input_count = 0;
-  for (int i = 0; i < input_count; ++i) {
-    Node* const input = node->InputAt(i);
+  for (int i = 0; i < inputs.count(); ++i) {
+    Node* const input = inputs[i];
     // Skip dead inputs.
     if (input->opcode() == IrOpcode::kDead) continue;
     // Compact live inputs.
@@ -53,20 +54,20 @@
   }
   if (live_input_count == 0) {
     return Replace(dead());
-  } else if (live_input_count < input_count) {
+  } else if (live_input_count < inputs.count()) {
     node->TrimInputCount(live_input_count);
     NodeProperties::ChangeOp(node, common()->End(live_input_count));
     return Changed(node);
   }
-  DCHECK_EQ(input_count, live_input_count);
+  DCHECK_EQ(inputs.count(), live_input_count);
   return NoChange();
 }
 
 
 Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
   DCHECK(IrOpcode::IsMergeOpcode(node->opcode()));
-  int const input_count = node->InputCount();
-  DCHECK_LE(1, input_count);
+  Node::Inputs inputs = node->inputs();
+  DCHECK_LE(1, inputs.count());
   // Count the number of live inputs to {node} and compact them on the fly, also
   // compacting the inputs of the associated {Phi} and {EffectPhi} uses at the
   // same time.  We consider {Loop}s dead even if only the first control input
@@ -74,8 +75,8 @@
   int live_input_count = 0;
   if (node->opcode() != IrOpcode::kLoop ||
       node->InputAt(0)->opcode() != IrOpcode::kDead) {
-    for (int i = 0; i < input_count; ++i) {
-      Node* const input = node->InputAt(i);
+    for (int i = 0; i < inputs.count(); ++i) {
+      Node* const input = inputs[i];
       // Skip dead inputs.
       if (input->opcode() == IrOpcode::kDead) continue;
       // Compact live inputs.
@@ -83,7 +84,7 @@
         node->ReplaceInput(live_input_count, input);
         for (Node* const use : node->uses()) {
           if (NodeProperties::IsPhi(use)) {
-            DCHECK_EQ(input_count + 1, use->InputCount());
+            DCHECK_EQ(inputs.count() + 1, use->InputCount());
             use->ReplaceInput(live_input_count, use->InputAt(i));
           }
         }
@@ -109,9 +110,9 @@
     return Replace(node->InputAt(0));
   }
   DCHECK_LE(2, live_input_count);
-  DCHECK_LE(live_input_count, input_count);
+  DCHECK_LE(live_input_count, inputs.count());
   // Trim input count for the {Merge} or {Loop} node.
-  if (live_input_count < input_count) {
+  if (live_input_count < inputs.count()) {
     // Trim input counts for all phi uses and revisit them.
     for (Node* const use : node->uses()) {
       if (NodeProperties::IsPhi(use)) {
diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc
index d4b0576..865e909 100644
--- a/src/compiler/effect-control-linearizer.cc
+++ b/src/compiler/effect-control-linearizer.cc
@@ -13,6 +13,7 @@
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
 #include "src/compiler/schedule.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -24,7 +25,8 @@
     : js_graph_(js_graph),
       schedule_(schedule),
       temp_zone_(temp_zone),
-      source_positions_(source_positions) {}
+      source_positions_(source_positions),
+      graph_assembler_(js_graph, nullptr, nullptr, temp_zone) {}
 
 Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
 CommonOperatorBuilder* EffectControlLinearizer::common() const {
@@ -596,829 +598,727 @@
                                                    Node* frame_state,
                                                    Node** effect,
                                                    Node** control) {
-  ValueEffectControl state(nullptr, nullptr, nullptr);
+  gasm()->Reset(*effect, *control);
+  Node* result = nullptr;
   switch (node->opcode()) {
     case IrOpcode::kChangeBitToTagged:
-      state = LowerChangeBitToTagged(node, *effect, *control);
+      result = LowerChangeBitToTagged(node);
       break;
     case IrOpcode::kChangeInt31ToTaggedSigned:
-      state = LowerChangeInt31ToTaggedSigned(node, *effect, *control);
+      result = LowerChangeInt31ToTaggedSigned(node);
       break;
     case IrOpcode::kChangeInt32ToTagged:
-      state = LowerChangeInt32ToTagged(node, *effect, *control);
+      result = LowerChangeInt32ToTagged(node);
       break;
     case IrOpcode::kChangeUint32ToTagged:
-      state = LowerChangeUint32ToTagged(node, *effect, *control);
+      result = LowerChangeUint32ToTagged(node);
       break;
     case IrOpcode::kChangeFloat64ToTagged:
-      state = LowerChangeFloat64ToTagged(node, *effect, *control);
+      result = LowerChangeFloat64ToTagged(node);
       break;
     case IrOpcode::kChangeFloat64ToTaggedPointer:
-      state = LowerChangeFloat64ToTaggedPointer(node, *effect, *control);
+      result = LowerChangeFloat64ToTaggedPointer(node);
       break;
     case IrOpcode::kChangeTaggedSignedToInt32:
-      state = LowerChangeTaggedSignedToInt32(node, *effect, *control);
+      result = LowerChangeTaggedSignedToInt32(node);
       break;
     case IrOpcode::kChangeTaggedToBit:
-      state = LowerChangeTaggedToBit(node, *effect, *control);
+      result = LowerChangeTaggedToBit(node);
       break;
     case IrOpcode::kChangeTaggedToInt32:
-      state = LowerChangeTaggedToInt32(node, *effect, *control);
+      result = LowerChangeTaggedToInt32(node);
       break;
     case IrOpcode::kChangeTaggedToUint32:
-      state = LowerChangeTaggedToUint32(node, *effect, *control);
+      result = LowerChangeTaggedToUint32(node);
       break;
     case IrOpcode::kChangeTaggedToFloat64:
-      state = LowerChangeTaggedToFloat64(node, *effect, *control);
+      result = LowerChangeTaggedToFloat64(node);
+      break;
+    case IrOpcode::kChangeTaggedToTaggedSigned:
+      result = LowerChangeTaggedToTaggedSigned(node);
       break;
     case IrOpcode::kTruncateTaggedToBit:
-      state = LowerTruncateTaggedToBit(node, *effect, *control);
+      result = LowerTruncateTaggedToBit(node);
       break;
     case IrOpcode::kTruncateTaggedToFloat64:
-      state = LowerTruncateTaggedToFloat64(node, *effect, *control);
+      result = LowerTruncateTaggedToFloat64(node);
       break;
     case IrOpcode::kCheckBounds:
-      state = LowerCheckBounds(node, frame_state, *effect, *control);
+      result = LowerCheckBounds(node, frame_state);
       break;
     case IrOpcode::kCheckMaps:
-      state = LowerCheckMaps(node, frame_state, *effect, *control);
+      result = LowerCheckMaps(node, frame_state);
       break;
     case IrOpcode::kCheckNumber:
-      state = LowerCheckNumber(node, frame_state, *effect, *control);
+      result = LowerCheckNumber(node, frame_state);
+      break;
+    case IrOpcode::kCheckReceiver:
+      result = LowerCheckReceiver(node, frame_state);
       break;
     case IrOpcode::kCheckString:
-      state = LowerCheckString(node, frame_state, *effect, *control);
+      result = LowerCheckString(node, frame_state);
+      break;
+    case IrOpcode::kCheckInternalizedString:
+      result = LowerCheckInternalizedString(node, frame_state);
       break;
     case IrOpcode::kCheckIf:
-      state = LowerCheckIf(node, frame_state, *effect, *control);
+      result = LowerCheckIf(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Add:
-      state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Add(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Sub:
-      state = LowerCheckedInt32Sub(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Sub(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Div:
-      state = LowerCheckedInt32Div(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Div(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Mod:
-      state = LowerCheckedInt32Mod(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Mod(node, frame_state);
       break;
     case IrOpcode::kCheckedUint32Div:
-      state = LowerCheckedUint32Div(node, frame_state, *effect, *control);
+      result = LowerCheckedUint32Div(node, frame_state);
       break;
     case IrOpcode::kCheckedUint32Mod:
-      state = LowerCheckedUint32Mod(node, frame_state, *effect, *control);
+      result = LowerCheckedUint32Mod(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Mul:
-      state = LowerCheckedInt32Mul(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Mul(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32ToTaggedSigned:
-      state =
-          LowerCheckedInt32ToTaggedSigned(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
       break;
     case IrOpcode::kCheckedUint32ToInt32:
-      state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
+      result = LowerCheckedUint32ToInt32(node, frame_state);
       break;
     case IrOpcode::kCheckedUint32ToTaggedSigned:
-      state = LowerCheckedUint32ToTaggedSigned(node, frame_state, *effect,
-                                               *control);
+      result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
       break;
     case IrOpcode::kCheckedFloat64ToInt32:
-      state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
+      result = LowerCheckedFloat64ToInt32(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedSignedToInt32:
-      state =
-          LowerCheckedTaggedSignedToInt32(node, frame_state, *effect, *control);
+      result = LowerCheckedTaggedSignedToInt32(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedToInt32:
-      state = LowerCheckedTaggedToInt32(node, frame_state, *effect, *control);
+      result = LowerCheckedTaggedToInt32(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedToFloat64:
-      state = LowerCheckedTaggedToFloat64(node, frame_state, *effect, *control);
+      result = LowerCheckedTaggedToFloat64(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedToTaggedSigned:
-      state = LowerCheckedTaggedToTaggedSigned(node, frame_state, *effect,
-                                               *control);
+      result = LowerCheckedTaggedToTaggedSigned(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedToTaggedPointer:
-      state = LowerCheckedTaggedToTaggedPointer(node, frame_state, *effect,
-                                                *control);
+      result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
       break;
     case IrOpcode::kTruncateTaggedToWord32:
-      state = LowerTruncateTaggedToWord32(node, *effect, *control);
+      result = LowerTruncateTaggedToWord32(node);
       break;
     case IrOpcode::kCheckedTruncateTaggedToWord32:
-      state = LowerCheckedTruncateTaggedToWord32(node, frame_state, *effect,
-                                                 *control);
+      result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
       break;
-    case IrOpcode::kObjectIsCallable:
-      state = LowerObjectIsCallable(node, *effect, *control);
+    case IrOpcode::kObjectIsDetectableCallable:
+      result = LowerObjectIsDetectableCallable(node);
+      break;
+    case IrOpcode::kObjectIsNonCallable:
+      result = LowerObjectIsNonCallable(node);
       break;
     case IrOpcode::kObjectIsNumber:
-      state = LowerObjectIsNumber(node, *effect, *control);
+      result = LowerObjectIsNumber(node);
       break;
     case IrOpcode::kObjectIsReceiver:
-      state = LowerObjectIsReceiver(node, *effect, *control);
+      result = LowerObjectIsReceiver(node);
       break;
     case IrOpcode::kObjectIsSmi:
-      state = LowerObjectIsSmi(node, *effect, *control);
+      result = LowerObjectIsSmi(node);
       break;
     case IrOpcode::kObjectIsString:
-      state = LowerObjectIsString(node, *effect, *control);
+      result = LowerObjectIsString(node);
       break;
     case IrOpcode::kObjectIsUndetectable:
-      state = LowerObjectIsUndetectable(node, *effect, *control);
+      result = LowerObjectIsUndetectable(node);
+      break;
+    case IrOpcode::kNewRestParameterElements:
+      result = LowerNewRestParameterElements(node);
+      break;
+    case IrOpcode::kNewUnmappedArgumentsElements:
+      result = LowerNewUnmappedArgumentsElements(node);
       break;
     case IrOpcode::kArrayBufferWasNeutered:
-      state = LowerArrayBufferWasNeutered(node, *effect, *control);
+      result = LowerArrayBufferWasNeutered(node);
       break;
     case IrOpcode::kStringFromCharCode:
-      state = LowerStringFromCharCode(node, *effect, *control);
+      result = LowerStringFromCharCode(node);
       break;
     case IrOpcode::kStringFromCodePoint:
-      state = LowerStringFromCodePoint(node, *effect, *control);
+      result = LowerStringFromCodePoint(node);
+      break;
+    case IrOpcode::kStringIndexOf:
+      result = LowerStringIndexOf(node);
+      break;
+    case IrOpcode::kStringCharAt:
+      result = LowerStringCharAt(node);
       break;
     case IrOpcode::kStringCharCodeAt:
-      state = LowerStringCharCodeAt(node, *effect, *control);
+      result = LowerStringCharCodeAt(node);
       break;
     case IrOpcode::kStringEqual:
-      state = LowerStringEqual(node, *effect, *control);
+      result = LowerStringEqual(node);
       break;
     case IrOpcode::kStringLessThan:
-      state = LowerStringLessThan(node, *effect, *control);
+      result = LowerStringLessThan(node);
       break;
     case IrOpcode::kStringLessThanOrEqual:
-      state = LowerStringLessThanOrEqual(node, *effect, *control);
+      result = LowerStringLessThanOrEqual(node);
       break;
     case IrOpcode::kCheckFloat64Hole:
-      state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
+      result = LowerCheckFloat64Hole(node, frame_state);
       break;
     case IrOpcode::kCheckTaggedHole:
-      state = LowerCheckTaggedHole(node, frame_state, *effect, *control);
+      result = LowerCheckTaggedHole(node, frame_state);
       break;
     case IrOpcode::kConvertTaggedHoleToUndefined:
-      state = LowerConvertTaggedHoleToUndefined(node, *effect, *control);
+      result = LowerConvertTaggedHoleToUndefined(node);
       break;
     case IrOpcode::kPlainPrimitiveToNumber:
-      state = LowerPlainPrimitiveToNumber(node, *effect, *control);
+      result = LowerPlainPrimitiveToNumber(node);
       break;
     case IrOpcode::kPlainPrimitiveToWord32:
-      state = LowerPlainPrimitiveToWord32(node, *effect, *control);
+      result = LowerPlainPrimitiveToWord32(node);
       break;
     case IrOpcode::kPlainPrimitiveToFloat64:
-      state = LowerPlainPrimitiveToFloat64(node, *effect, *control);
+      result = LowerPlainPrimitiveToFloat64(node);
       break;
     case IrOpcode::kEnsureWritableFastElements:
-      state = LowerEnsureWritableFastElements(node, *effect, *control);
+      result = LowerEnsureWritableFastElements(node);
       break;
     case IrOpcode::kMaybeGrowFastElements:
-      state = LowerMaybeGrowFastElements(node, frame_state, *effect, *control);
+      result = LowerMaybeGrowFastElements(node, frame_state);
       break;
     case IrOpcode::kTransitionElementsKind:
-      state = LowerTransitionElementsKind(node, *effect, *control);
+      LowerTransitionElementsKind(node);
       break;
     case IrOpcode::kLoadTypedElement:
-      state = LowerLoadTypedElement(node, *effect, *control);
+      result = LowerLoadTypedElement(node);
       break;
     case IrOpcode::kStoreTypedElement:
-      state = LowerStoreTypedElement(node, *effect, *control);
+      LowerStoreTypedElement(node);
       break;
     case IrOpcode::kFloat64RoundUp:
-      state = LowerFloat64RoundUp(node, *effect, *control);
+      if (!LowerFloat64RoundUp(node).To(&result)) {
+        return false;
+      }
       break;
     case IrOpcode::kFloat64RoundDown:
-      state = LowerFloat64RoundDown(node, *effect, *control);
+      if (!LowerFloat64RoundDown(node).To(&result)) {
+        return false;
+      }
       break;
     case IrOpcode::kFloat64RoundTruncate:
-      state = LowerFloat64RoundTruncate(node, *effect, *control);
+      if (!LowerFloat64RoundTruncate(node).To(&result)) {
+        return false;
+      }
       break;
     case IrOpcode::kFloat64RoundTiesEven:
-      state = LowerFloat64RoundTiesEven(node, *effect, *control);
+      if (!LowerFloat64RoundTiesEven(node).To(&result)) {
+        return false;
+      }
       break;
     default:
       return false;
   }
-  NodeProperties::ReplaceUses(node, state.value, state.effect, state.control);
-  *effect = state.effect;
-  *control = state.control;
+  *effect = gasm()->ExtractCurrentEffect();
+  *control = gasm()->ExtractCurrentControl();
+  NodeProperties::ReplaceUses(node, result, *effect, *control);
   return true;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
-                                                    Node* control) {
+#define __ gasm()->
+
+Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
   Node* value = node->InputAt(0);
-  return AllocateHeapNumberWithValue(value, effect, control);
+  return AllocateHeapNumberWithValue(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node,
-                                                           Node* effect,
-                                                           Node* control) {
+Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
   Node* value = node->InputAt(0);
-  return AllocateHeapNumberWithValue(value, effect, control);
+  return AllocateHeapNumberWithValue(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeBitToTagged(Node* node, Node* effect,
-                                                Node* control) {
+Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* branch = graph()->NewNode(common()->Branch(), value, control);
+  auto if_true = __ MakeLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = jsgraph()->TrueConstant();
+  __ GotoIf(value, &if_true);
+  __ Goto(&done, __ FalseConstant());
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = jsgraph()->FalseConstant();
+  __ Bind(&if_true);
+  __ Goto(&done, __ TrueConstant());
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node,
-                                                        Node* effect,
-                                                        Node* control) {
+Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
   Node* value = node->InputAt(0);
-  value = ChangeInt32ToSmi(value);
-  return ValueEffectControl(value, effect, control);
+  return ChangeInt32ToSmi(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node, Node* effect,
-                                                  Node* control) {
+Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
   Node* value = node->InputAt(0);
 
   if (machine()->Is64()) {
-    return ValueEffectControl(ChangeInt32ToSmi(value), effect, control);
+    return ChangeInt32ToSmi(value);
   }
 
-  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
-                               control);
+  auto if_overflow = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
-  Node* ovf = graph()->NewNode(common()->Projection(1), add, control);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
+  Node* add = __ Int32AddWithOverflow(value, value);
+  Node* ovf = __ Projection(1, add);
+  __ GotoIf(ovf, &if_overflow);
+  __ Goto(&done, __ Projection(0, add));
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  ValueEffectControl alloc =
-      AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), effect, if_true);
+  __ Bind(&if_overflow);
+  Node* number = AllocateHeapNumberWithValue(__ ChangeInt32ToFloat64(value));
+  __ Goto(&done, number);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = graph()->NewNode(common()->Projection(0), add, if_false);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), alloc.control, if_false);
-  Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                               alloc.value, vfalse, merge);
-  Node* ephi =
-      graph()->NewNode(common()->EffectPhi(2), alloc.effect, effect, merge);
-
-  return ValueEffectControl(phi, ephi, merge);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node, Node* effect,
-                                                   Node* control) {
+Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
-                                 SmiMaxValueConstant());
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  auto if_not_in_smi_range = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = ChangeUint32ToSmi(value);
+  Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
+  __ GotoUnless(check, &if_not_in_smi_range);
+  __ Goto(&done, ChangeUint32ToSmi(value));
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  ValueEffectControl alloc = AllocateHeapNumberWithValue(
-      ChangeUint32ToFloat64(value), effect, if_false);
+  __ Bind(&if_not_in_smi_range);
+  Node* number = AllocateHeapNumberWithValue(__ ChangeUint32ToFloat64(value));
 
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, alloc.control);
-  Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                               vtrue, alloc.value, merge);
-  Node* ephi =
-      graph()->NewNode(common()->EffectPhi(2), effect, alloc.effect, merge);
+  __ Goto(&done, number);
+  __ Bind(&done);
 
-  return ValueEffectControl(phi, ephi, merge);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node,
-                                                        Node* effect,
-                                                        Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
   Node* value = node->InputAt(0);
-  value = ChangeSmiToInt32(value);
-  return ValueEffectControl(value, effect, control);
+  return ChangeSmiToInt32(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToBit(Node* node, Node* effect,
-                                                Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
   Node* value = node->InputAt(0);
-  value = graph()->NewNode(machine()->WordEqual(), value,
-                           jsgraph()->TrueConstant());
-  return ValueEffectControl(value, effect, control);
+  return __ WordEqual(value, __ TrueConstant());
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
-                                                  Node* control) {
+Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
   Node* value = node->InputAt(0);
-  Node* zero = jsgraph()->Int32Constant(0);
-  Node* fzero = jsgraph()->Float64Constant(0.0);
 
-  // Collect effect/control/value triples.
-  int count = 0;
-  Node* values[6];
-  Node* effects[6];
-  Node* controls[5];
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto if_heapnumber = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<6>(MachineRepresentation::kBit);
+
+  Node* zero = __ Int32Constant(0);
+  Node* fzero = __ Float64Constant(0.0);
+
+  // Check if {value} is false.
+  __ GotoIf(__ WordEqual(value, __ FalseConstant()), &done, zero);
 
   // Check if {value} is a Smi.
   Node* check_smi = ObjectIsSmi(value);
-  Node* branch_smi = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                      check_smi, control);
+  __ GotoIf(check_smi, &if_smi);
 
-  // If {value} is a Smi, then we only need to check that it's not zero.
-  Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_smi);
-  Node* esmi = effect;
-  {
-    controls[count] = if_smi;
-    effects[count] = esmi;
-    values[count] =
-        graph()->NewNode(machine()->Word32Equal(),
-                         graph()->NewNode(machine()->WordEqual(), value,
-                                          jsgraph()->IntPtrConstant(0)),
-                         zero);
-    count++;
-  }
-  control = graph()->NewNode(common()->IfFalse(), branch_smi);
+  // Check if {value} is the empty string.
+  __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), &done, zero);
 
-  // Load the map instance type of {value}.
-  Node* value_map = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
-  Node* value_instance_type = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
-      effect, control);
+  // Load the map of {value}.
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
 
-  // Check if {value} is an Oddball.
-  Node* check_oddball =
-      graph()->NewNode(machine()->Word32Equal(), value_instance_type,
-                       jsgraph()->Int32Constant(ODDBALL_TYPE));
-  Node* branch_oddball = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                          check_oddball, control);
-
-  // The only Oddball {value} that is trueish is true itself.
-  Node* if_oddball = graph()->NewNode(common()->IfTrue(), branch_oddball);
-  Node* eoddball = effect;
-  {
-    controls[count] = if_oddball;
-    effects[count] = eoddball;
-    values[count] = graph()->NewNode(machine()->WordEqual(), value,
-                                     jsgraph()->TrueConstant());
-    count++;
-  }
-  control = graph()->NewNode(common()->IfFalse(), branch_oddball);
-
-  // Check if {value} is a String.
-  Node* check_string =
-      graph()->NewNode(machine()->Int32LessThan(), value_instance_type,
-                       jsgraph()->Int32Constant(FIRST_NONSTRING_TYPE));
-  Node* branch_string =
-      graph()->NewNode(common()->Branch(), check_string, control);
-
-  // For String {value}, we need to check that the length is not zero.
-  Node* if_string = graph()->NewNode(common()->IfTrue(), branch_string);
-  Node* estring = effect;
-  {
-    // Load the {value} length.
-    Node* value_length = estring = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForStringLength()), value,
-        estring, if_string);
-
-    controls[count] = if_string;
-    effects[count] = estring;
-    values[count] =
-        graph()->NewNode(machine()->Word32Equal(),
-                         graph()->NewNode(machine()->WordEqual(), value_length,
-                                          jsgraph()->IntPtrConstant(0)),
-                         zero);
-    count++;
-  }
-  control = graph()->NewNode(common()->IfFalse(), branch_string);
+  // Check if the {value} is undetectable and immediately return false.
+  Node* value_map_bitfield =
+      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+  __ GotoUnless(
+      __ Word32Equal(__ Word32And(value_map_bitfield,
+                                  __ Int32Constant(1 << Map::kIsUndetectable)),
+                     zero),
+      &done, zero);
 
   // Check if {value} is a HeapNumber.
-  Node* check_heapnumber =
-      graph()->NewNode(machine()->Word32Equal(), value_instance_type,
-                       jsgraph()->Int32Constant(HEAP_NUMBER_TYPE));
-  Node* branch_heapnumber =
-      graph()->NewNode(common()->Branch(), check_heapnumber, control);
+  __ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
+            &if_heapnumber);
 
-  // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or NaN.
-  Node* if_heapnumber = graph()->NewNode(common()->IfTrue(), branch_heapnumber);
-  Node* eheapnumber = effect;
+  // All other values that reach here are true.
+  __ Goto(&done, __ Int32Constant(1));
+
+  __ Bind(&if_heapnumber);
   {
-    // Load the raw value of {value}.
-    Node* value_value = eheapnumber = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        eheapnumber, if_heapnumber);
-
-    // Check if {value} is not one of 0, -0, or NaN.
-    controls[count] = if_heapnumber;
-    effects[count] = eheapnumber;
-    values[count] = graph()->NewNode(
-        machine()->Float64LessThan(), fzero,
-        graph()->NewNode(machine()->Float64Abs(), value_value));
-    count++;
-  }
-  control = graph()->NewNode(common()->IfFalse(), branch_heapnumber);
-
-  // The {value} is either a JSReceiver, a Symbol or some Simd128Value. In
-  // those cases we can just the undetectable bit on the map, which will only
-  // be set for certain JSReceivers, i.e. document.all.
-  {
-    // Load the {value} map bit field.
-    Node* value_map_bitfield = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
-        effect, control);
-
-    controls[count] = control;
-    effects[count] = effect;
-    values[count] = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(machine()->Word32And(), value_map_bitfield,
-                         jsgraph()->Int32Constant(1 << Map::kIsUndetectable)),
-        zero);
-    count++;
+    // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
+    // NaN.
+    Node* value_value =
+        __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+    __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
   }
 
-  // Merge the different controls.
-  control = graph()->NewNode(common()->Merge(count), count, controls);
-  effects[count] = control;
-  effect = graph()->NewNode(common()->EffectPhi(count), count + 1, effects);
-  values[count] = control;
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, count),
-                           count + 1, values);
+  __ Bind(&if_smi);
+  {
+    // If {value} is a Smi, then we only need to check that it's not zero.
+    __ Goto(&done,
+            __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)), zero));
+  }
 
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node, Node* effect,
-                                                  Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
+  __ Goto(&done, ChangeSmiToInt32(value));
+
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = __ ChangeFloat64ToInt32(vfalse);
+  __ Goto(&done, vfalse);
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
+  __ Goto(&done, ChangeSmiToInt32(value));
+
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = __ ChangeFloat64ToUint32(vfalse);
+  __ Goto(&done, vfalse);
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
+  return LowerTruncateTaggedToFloat64(node);
+}
+
+Node* EffectControlLinearizer::LowerChangeTaggedToTaggedSigned(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
+  __ Goto(&done, value);
+
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = __ ChangeFloat64ToInt32(vfalse);
+  vfalse = ChangeInt32ToSmi(vfalse);
+  __ Goto(&done, vfalse);
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
   Node* vtrue = ChangeSmiToInt32(value);
+  vtrue = __ ChangeInt32ToFloat64(vtrue);
+  __ Goto(&done, vtrue);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->ChangeFloat64ToInt32(), vfalse);
-  }
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  __ Goto(&done, vfalse);
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node, Node* effect,
-                                                   Node* control) {
-  Node* value = node->InputAt(0);
-
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->ChangeFloat64ToUint32(), vfalse);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node, Node* effect,
-                                                    Node* control) {
-  return LowerTruncateTaggedToFloat64(node, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node, Node* effect,
-                                                      Node* control) {
-  Node* value = node->InputAt(0);
-
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue;
-  {
-    vtrue = ChangeSmiToInt32(value);
-    vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
-  }
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state,
-                                          Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
   Node* index = node->InputAt(0);
   Node* limit = node->InputAt(1);
 
-  Node* check = graph()->NewNode(machine()->Uint32LessThan(), index, limit);
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check,
-      frame_state, effect, control);
-
-  return ValueEffectControl(index, effect, control);
+  Node* check = __ Uint32LessThan(index, limit);
+  __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check, frame_state);
+  return index;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state,
-                                        Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
+  CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
   Node* value = node->InputAt(0);
 
-  // Load the current map of the {value}.
-  Node* value_map = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+  ZoneHandleSet<Map> const& maps = p.maps();
+  size_t const map_count = maps.size();
 
-  int const map_count = node->op()->ValueInputCount() - 1;
-  Node** controls = temp_zone()->NewArray<Node*>(map_count);
-  Node** effects = temp_zone()->NewArray<Node*>(map_count + 1);
+  if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
+    auto done =
+        __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count * 2);
+    auto migrate = __ MakeDeferredLabel<1>();
 
-  for (int i = 0; i < map_count; ++i) {
-    Node* map = node->InputAt(1 + i);
+    // Load the current map of the {value}.
+    Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
 
-    Node* check = graph()->NewNode(machine()->WordEqual(), value_map, map);
-    if (i == map_count - 1) {
-      controls[i] = effects[i] = graph()->NewNode(
-          common()->DeoptimizeUnless(DeoptimizeReason::kWrongMap), check,
-          frame_state, effect, control);
-    } else {
-      control = graph()->NewNode(common()->Branch(), check, control);
-      controls[i] = graph()->NewNode(common()->IfTrue(), control);
-      control = graph()->NewNode(common()->IfFalse(), control);
-      effects[i] = effect;
+    // Perform the map checks.
+    for (size_t i = 0; i < map_count; ++i) {
+      Node* map = __ HeapConstant(maps[i]);
+      Node* check = __ WordEqual(value_map, map);
+      if (i == map_count - 1) {
+        __ GotoUnless(check, &migrate);
+        __ Goto(&done);
+      } else {
+        __ GotoIf(check, &done);
+      }
     }
+
+    // Perform the (deferred) instance migration.
+    __ Bind(&migrate);
+    {
+      // If map is not deprecated the migration attempt does not make sense.
+      Node* bitfield3 =
+          __ LoadField(AccessBuilder::ForMapBitField3(), value_map);
+      Node* if_not_deprecated = __ WordEqual(
+          __ Word32And(bitfield3, __ Int32Constant(Map::Deprecated::kMask)),
+          __ Int32Constant(0));
+      __ DeoptimizeIf(DeoptimizeReason::kWrongMap, if_not_deprecated,
+                      frame_state);
+
+      Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+      Runtime::FunctionId id = Runtime::kTryMigrateInstance;
+      CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+          graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+      Node* result =
+          __ Call(desc, __ CEntryStubConstant(1), value,
+                  __ ExternalConstant(ExternalReference(id, isolate())),
+                  __ Int32Constant(1), __ NoContextConstant());
+      Node* check = ObjectIsSmi(result);
+      __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, check,
+                      frame_state);
+    }
+
+    // Reload the current map of the {value}.
+    value_map = __ LoadField(AccessBuilder::ForMap(), value);
+
+    // Perform the map checks again.
+    for (size_t i = 0; i < map_count; ++i) {
+      Node* map = __ HeapConstant(maps[i]);
+      Node* check = __ WordEqual(value_map, map);
+      if (i == map_count - 1) {
+        __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
+      } else {
+        __ GotoIf(check, &done);
+      }
+    }
+
+    __ Goto(&done);
+    __ Bind(&done);
+  } else {
+    auto done =
+        __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count);
+
+    // Load the current map of the {value}.
+    Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+
+    for (size_t i = 0; i < map_count; ++i) {
+      Node* map = __ HeapConstant(maps[i]);
+      Node* check = __ WordEqual(value_map, map);
+      if (i == map_count - 1) {
+        __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
+      } else {
+        __ GotoIf(check, &done);
+      }
+    }
+    __ Goto(&done);
+    __ Bind(&done);
   }
-
-  control = graph()->NewNode(common()->Merge(map_count), map_count, controls);
-  effects[map_count] = control;
-  effect =
-      graph()->NewNode(common()->EffectPhi(map_count), map_count + 1, effects);
-
-  return ValueEffectControl(value, effect, control);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state,
-                                          Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>();
+
   Node* check0 = ObjectIsSmi(value);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  __ GotoUnless(check0, &if_not_smi);
+  __ Goto(&done);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
+  __ Bind(&if_not_smi);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant());
+  __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check1, frame_state);
+  __ Goto(&done);
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  {
-    Node* value_map = efalse0 =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse0, if_false0);
-    Node* check1 = graph()->NewNode(machine()->WordEqual(), value_map,
-                                    jsgraph()->HeapNumberMapConstant());
-    if_false0 = efalse0 = graph()->NewNode(
-        common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check1,
-        frame_state, efalse0, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state,
-                                          Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
+                                                  Node* frame_state) {
   Node* value = node->InputAt(0);
 
-  Node* check0 = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check0,
-                       frame_state, effect, control);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
 
-  Node* value_map = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
-  Node* value_instance_type = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
-      effect, control);
-
-  Node* check1 =
-      graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
-                       jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType), check1,
-      frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  Node* check = __ Uint32LessThanOrEqual(
+      __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+  __ DeoptimizeUnless(DeoptimizeReason::kNotAJavaScriptObject, check,
+                      frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state,
-                                      Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNoReason),
-                       value, frame_state, effect, control);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
 
-  return ValueEffectControl(value, effect, control);
+  Node* check = __ Uint32LessThan(value_instance_type,
+                                  __ Uint32Constant(FIRST_NONSTRING_TYPE));
+  __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
+                                                            Node* frame_state) {
+  Node* value = node->InputAt(0);
+
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+
+  Node* check = __ Word32Equal(
+      __ Word32And(value_instance_type,
+                   __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
+      __ Int32Constant(kInternalizedTag));
+  __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+
+  return value;
+}
+
+Node* EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
+  Node* value = node->InputAt(0);
+  __ DeoptimizeUnless(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason,
+                      value, frame_state);
+  return value;
+}
+
+Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
+                                                    Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
-  Node* value =
-      graph()->NewNode(machine()->Int32AddWithOverflow(), lhs, rhs, control);
-
-  Node* check = graph()->NewNode(common()->Projection(1), value, control);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                       check, frame_state, effect, control);
-
-  value = graph()->NewNode(common()->Projection(0), value, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* value = __ Int32AddWithOverflow(lhs, rhs);
+  Node* check = __ Projection(1, value);
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+  return __ Projection(0, value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
+                                                    Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
-  Node* value =
-      graph()->NewNode(machine()->Int32SubWithOverflow(), lhs, rhs, control);
-
-  Node* check = graph()->NewNode(common()->Projection(1), value, control);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                       check, frame_state, effect, control);
-
-  value = graph()->NewNode(common()->Projection(0), value, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* value = __ Int32SubWithOverflow(lhs, rhs);
+  Node* check = __ Projection(1, value);
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+  return __ Projection(0, value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
-  Node* zero = jsgraph()->Int32Constant(0);
-  Node* minusone = jsgraph()->Int32Constant(-1);
-  Node* minint = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::min());
-
+Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
+                                                    Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
+  auto if_not_positive = __ MakeDeferredLabel<1>();
+  auto if_is_minint = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+  auto minint_check_done = __ MakeLabel<2>();
+
+  Node* zero = __ Int32Constant(0);
+
   // Check if {rhs} is positive (and not zero).
-  Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  Node* check0 = __ Int32LessThan(zero, rhs);
+  __ GotoUnless(check0, &if_not_positive);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0;
-  {
-    // Fast case, no additional checking required.
-    vtrue0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
-  }
+  // Fast case, no additional checking required.
+  __ Goto(&done, __ Int32Div(lhs, rhs));
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
   {
+    __ Bind(&if_not_positive);
+
     // Check if {rhs} is zero.
-    Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
-    if_false0 = efalse0 = graph()->NewNode(
-        common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
-        frame_state, efalse0, if_false0);
+    Node* check = __ Word32Equal(rhs, zero);
+    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
 
     // Check if {lhs} is zero, as that would produce minus zero.
-    check = graph()->NewNode(machine()->Word32Equal(), lhs, zero);
-    if_false0 = efalse0 =
-        graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
-                         check, frame_state, efalse0, if_false0);
+    check = __ Word32Equal(lhs, zero);
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
 
     // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
     // to return -kMinInt, which is not representable.
+    Node* minint = __ Int32Constant(std::numeric_limits<int32_t>::min());
     Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
+    __ GotoIf(check1, &if_is_minint);
+    __ Goto(&minint_check_done);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = efalse0;
-    {
-      // Check if {rhs} is -1.
-      Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
-      if_true1 = etrue1 =
-          graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                           check, frame_state, etrue1, if_true1);
-    }
+    __ Bind(&if_is_minint);
+    // Check if {rhs} is -1.
+    Node* minusone = __ Int32Constant(-1);
+    Node* is_minus_one = __ Word32Equal(rhs, minusone);
+    __ DeoptimizeIf(DeoptimizeReason::kOverflow, is_minus_one, frame_state);
+    __ Goto(&minint_check_done);
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = efalse0;
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    efalse0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
-
+    __ Bind(&minint_check_done);
     // Perform the actual integer division.
-    vfalse0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_false0);
+    __ Goto(&done, __ Int32Div(lhs, rhs));
   }
 
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
-                       vfalse0, control);
+  __ Bind(&done);
+  Node* value = done.PhiAt(0);
 
   // Check if the remainder is non-zero.
-  Node* check =
-      graph()->NewNode(machine()->Word32Equal(), lhs,
-                       graph()->NewNode(machine()->Int32Mul(), rhs, value));
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
-      frame_state, effect, control);
+  Node* check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
 
-  return ValueEffectControl(value, effect, control);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
-  Node* zero = jsgraph()->Int32Constant(0);
-  Node* one = jsgraph()->Int32Constant(1);
-
+Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
+                                                    Node* frame_state) {
   // General case for signed integer modulus, with optimization for (unknown)
   // power of 2 right hand side.
   //
@@ -1439,1226 +1339,704 @@
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
-  // Check if {rhs} is not strictly positive.
-  Node* check0 = graph()->NewNode(machine()->Int32LessThanOrEqual(), rhs, zero);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+  auto if_rhs_not_positive = __ MakeDeferredLabel<1>();
+  auto if_lhs_negative = __ MakeDeferredLabel<1>();
+  auto if_power_of_two = __ MakeLabel<1>();
+  auto rhs_checked = __ MakeLabel<2>(MachineRepresentation::kWord32);
+  auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0;
+  Node* zero = __ Int32Constant(0);
+
+  // Check if {rhs} is not strictly positive.
+  Node* check0 = __ Int32LessThanOrEqual(rhs, zero);
+  __ GotoIf(check0, &if_rhs_not_positive);
+  __ Goto(&rhs_checked, rhs);
+
+  __ Bind(&if_rhs_not_positive);
   {
     // Negate {rhs}, might still produce a negative result in case of
     // -2^31, but that is handled safely below.
-    vtrue0 = graph()->NewNode(machine()->Int32Sub(), zero, rhs);
+    Node* vtrue0 = __ Int32Sub(zero, rhs);
 
     // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
-    Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue0, zero);
-    if_true0 = etrue0 = graph()->NewNode(
-        common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
-        frame_state, etrue0, if_true0);
+    Node* check = __ Word32Equal(vtrue0, zero);
+    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+    __ Goto(&rhs_checked, vtrue0);
   }
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0 = rhs;
-
-  // At this point {rhs} is either greater than zero or -2^31, both are
-  // fine for the code that follows.
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  rhs = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                         vtrue0, vfalse0, control);
+  __ Bind(&rhs_checked);
+  rhs = rhs_checked.PhiAt(0);
 
   // Check if {lhs} is negative.
-  Node* check1 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
-  Node* branch1 =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
+  Node* check1 = __ Int32LessThan(lhs, zero);
+  __ GotoIf(check1, &if_lhs_negative);
 
-  Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-  Node* etrue1 = effect;
-  Node* vtrue1;
+  // {lhs} non-negative.
   {
-    // Compute the remainder using {lhs % msk}.
-    vtrue1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
-
-    // Check if we would have to return -0.
-    Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue1, zero);
-    if_true1 = etrue1 =
-        graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
-                         check, frame_state, etrue1, if_true1);
-  }
-
-  Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-  Node* efalse1 = effect;
-  Node* vfalse1;
-  {
-    Node* msk = graph()->NewNode(machine()->Int32Sub(), rhs, one);
+    Node* one = __ Int32Constant(1);
+    Node* msk = __ Int32Sub(rhs, one);
 
     // Check if {rhs} minus one is a valid mask.
-    Node* check2 = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(machine()->Word32And(), rhs, msk), zero);
-    Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
-
-    // Compute the remainder using {lhs & msk}.
-    Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-    Node* vtrue2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
-
+    Node* check2 = __ Word32Equal(__ Word32And(rhs, msk), zero);
+    __ GotoIf(check2, &if_power_of_two);
     // Compute the remainder using the generic {lhs % rhs}.
-    Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-    Node* vfalse2 =
-        graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_false2);
+    __ Goto(&done, __ Int32Mod(lhs, rhs));
 
-    if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-    vfalse1 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                               vtrue2, vfalse2, if_false1);
+    __ Bind(&if_power_of_two);
+    // Compute the remainder using {lhs & msk}.
+    __ Goto(&done, __ Word32And(lhs, msk));
   }
 
-  control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue1,
-                       vfalse1, control);
+  __ Bind(&if_lhs_negative);
+  {
+    // Compute the remainder using {lhs % msk}.
+    Node* vtrue1 = __ Int32Mod(lhs, rhs);
 
-  return ValueEffectControl(value, effect, control);
+    // Check if we would have to return -0.
+    Node* check = __ Word32Equal(vtrue1, zero);
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+    __ Goto(&done, vtrue1);
+  }
+
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32Div(Node* node, Node* frame_state,
-                                               Node* effect, Node* control) {
-  Node* zero = jsgraph()->Int32Constant(0);
-
+Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
+                                                     Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
+  Node* zero = __ Int32Constant(0);
+
   // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
-  Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
-      frame_state, effect, control);
+  Node* check = __ Word32Equal(rhs, zero);
+  __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
 
   // Perform the actual unsigned integer division.
-  Node* value = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, control);
+  Node* value = __ Uint32Div(lhs, rhs);
 
   // Check if the remainder is non-zero.
-  check = graph()->NewNode(machine()->Word32Equal(), lhs,
-                           graph()->NewNode(machine()->Int32Mul(), rhs, value));
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
-      frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32Mod(Node* node, Node* frame_state,
-                                               Node* effect, Node* control) {
-  Node* zero = jsgraph()->Int32Constant(0);
-
+Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
+                                                     Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
+  Node* zero = __ Int32Constant(0);
+
   // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
-  Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
-      frame_state, effect, control);
+  Node* check = __ Word32Equal(rhs, zero);
+  __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
 
   // Perform the actual unsigned integer modulus.
-  Node* value = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, control);
-
-  return ValueEffectControl(value, effect, control);
+  return __ Uint32Mod(lhs, rhs);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
+                                                    Node* frame_state) {
   CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
-  Node* zero = jsgraph()->Int32Constant(0);
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
-  Node* projection =
-      graph()->NewNode(machine()->Int32MulWithOverflow(), lhs, rhs, control);
+  Node* projection = __ Int32MulWithOverflow(lhs, rhs);
+  Node* check = __ Projection(1, projection);
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
 
-  Node* check = graph()->NewNode(common()->Projection(1), projection, control);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                       check, frame_state, effect, control);
-
-  Node* value = graph()->NewNode(common()->Projection(0), projection, control);
+  Node* value = __ Projection(0, projection);
 
   if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
-    Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value, zero);
-    Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                         check_zero, control);
+    auto if_zero = __ MakeDeferredLabel<1>();
+    auto check_done = __ MakeLabel<2>();
+    Node* zero = __ Int32Constant(0);
+    Node* check_zero = __ Word32Equal(value, zero);
+    __ GotoIf(check_zero, &if_zero);
+    __ Goto(&check_done);
 
-    Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
-    Node* e_if_zero = effect;
-    {
-      // We may need to return negative zero.
-      Node* or_inputs = graph()->NewNode(machine()->Word32Or(), lhs, rhs);
-      Node* check_or =
-          graph()->NewNode(machine()->Int32LessThan(), or_inputs, zero);
-      if_zero = e_if_zero =
-          graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
-                           check_or, frame_state, e_if_zero, if_zero);
-    }
+    __ Bind(&if_zero);
+    // We may need to return negative zero.
+    Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_or, frame_state);
+    __ Goto(&check_done);
 
-    Node* if_not_zero = graph()->NewNode(common()->IfFalse(), branch_zero);
-    Node* e_if_not_zero = effect;
-
-    control = graph()->NewNode(common()->Merge(2), if_zero, if_not_zero);
-    effect = graph()->NewNode(common()->EffectPhi(2), e_if_zero, e_if_not_zero,
-                              control);
+    __ Bind(&check_done);
   }
 
-  return ValueEffectControl(value, effect, control);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(Node* node,
-                                                         Node* frame_state,
-                                                         Node* effect,
-                                                         Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
+    Node* node, Node* frame_state) {
   DCHECK(SmiValuesAre31Bits());
   Node* value = node->InputAt(0);
 
-  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
-                               control);
-
-  Node* check = graph()->NewNode(common()->Projection(1), add, control);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                       check, frame_state, effect, control);
-
-  value = graph()->NewNode(common()->Projection(0), add, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* add = __ Int32AddWithOverflow(value, value);
+  Node* check = __ Projection(1, add);
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+  return __ Projection(0, add);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
-                                                   Node* frame_state,
-                                                   Node* effect,
-                                                   Node* control) {
+Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
+                                                         Node* frame_state) {
   Node* value = node->InputAt(0);
-  Node* max_int = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::max());
-  Node* is_safe =
-      graph()->NewNode(machine()->Uint32LessThanOrEqual(), value, max_int);
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), is_safe,
-      frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* max_int = __ Int32Constant(std::numeric_limits<int32_t>::max());
+  Node* is_safe = __ Uint32LessThanOrEqual(value, max_int);
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, is_safe, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(Node* node,
-                                                          Node* frame_state,
-                                                          Node* effect,
-                                                          Node* control) {
+Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
+    Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
-  Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
-                                 SmiMaxValueConstant());
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
-      frame_state, effect, control);
-  value = ChangeUint32ToSmi(value);
-
-  return ValueEffectControl(value, effect, control);
+  Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
+  return ChangeUint32ToSmi(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
-                                                    Node* value,
-                                                    Node* frame_state,
-                                                    Node* effect,
-                                                    Node* control) {
-  Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
-  Node* check_same = graph()->NewNode(
-      machine()->Float64Equal(), value,
-      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN),
-      check_same, frame_state, effect, control);
+Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
+    CheckForMinusZeroMode mode, Node* value, Node* frame_state) {
+  Node* value32 = __ RoundFloat64ToInt32(value);
+  Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN, check_same,
+                      frame_state);
 
   if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
     // Check if {value} is -0.
-    Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
-                                        jsgraph()->Int32Constant(0));
-    Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                         check_zero, control);
+    auto if_zero = __ MakeDeferredLabel<1>();
+    auto check_done = __ MakeLabel<2>();
 
-    Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
-    Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+    Node* check_zero = __ Word32Equal(value32, __ Int32Constant(0));
+    __ GotoIf(check_zero, &if_zero);
+    __ Goto(&check_done);
 
+    __ Bind(&if_zero);
     // In case of 0, we need to check the high bits for the IEEE -0 pattern.
-    Node* check_negative = graph()->NewNode(
-        machine()->Int32LessThan(),
-        graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
-        jsgraph()->Int32Constant(0));
+    Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
+                                            __ Int32Constant(0));
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_negative, frame_state);
+    __ Goto(&check_done);
 
-    Node* deopt_minus_zero =
-        graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
-                         check_negative, frame_state, effect, if_zero);
-
-    control =
-        graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
-    effect = graph()->NewNode(common()->EffectPhi(2), deopt_minus_zero, effect,
-                              control);
+    __ Bind(&check_done);
   }
-
-  return ValueEffectControl(value32, effect, control);
+  return value32;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
-                                                    Node* frame_state,
-                                                    Node* effect,
-                                                    Node* control) {
+Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
+                                                          Node* frame_state) {
+  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+  Node* value = node->InputAt(0);
+  return BuildCheckedFloat64ToInt32(mode, value, frame_state);
+}
+
+Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
+    Node* node, Node* frame_state) {
+  Node* value = node->InputAt(0);
+  Node* check = ObjectIsSmi(value);
+  __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
+  return ChangeSmiToInt32(value);
+}
+
+Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
+                                                         Node* frame_state) {
   CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
   Node* value = node->InputAt(0);
 
-  return BuildCheckedFloat64ToInt32(mode, value, frame_state, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(Node* node,
-                                                         Node* frame_state,
-                                                         Node* effect,
-                                                         Node* control) {
-  Node* value = node->InputAt(0);
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
 
   Node* check = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
-                       check, frame_state, effect, control);
-  value = ChangeSmiToInt32(value);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
-                                                   Node* frame_state,
-                                                   Node* effect,
-                                                   Node* control) {
-  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
-  Node* value = node->InputAt(0);
-
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
+  __ GotoUnless(check, &if_not_smi);
   // In the Smi case, just convert to int32.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
+  __ Goto(&done, ChangeSmiToInt32(value));
 
   // In the non-Smi case, check the heap numberness, load the number and convert
   // to int32.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* check = graph()->NewNode(machine()->WordEqual(), value_map,
-                                   jsgraph()->HeapNumberMapConstant());
-    if_false = efalse = graph()->NewNode(
-        common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check,
-        frame_state, efalse, if_false);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-    ValueEffectControl state =
-        BuildCheckedFloat64ToInt32(mode, vfalse, frame_state, efalse, if_false);
-    if_false = state.control;
-    efalse = state.effect;
-    vfalse = state.value;
-  }
+  __ Bind(&if_not_smi);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
+  __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_map,
+                      frame_state);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = BuildCheckedFloat64ToInt32(mode, vfalse, frame_state);
+  __ Goto(&done, vfalse);
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
-    CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
-    Node* control) {
-  Node* value_map = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
-
-  Node* check_number = graph()->NewNode(machine()->WordEqual(), value_map,
-                                        jsgraph()->HeapNumberMapConstant());
-
+Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
+    CheckTaggedInputMode mode, Node* value, Node* frame_state) {
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant());
   switch (mode) {
     case CheckTaggedInputMode::kNumber: {
-      control = effect = graph()->NewNode(
-          common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber),
-          check_number, frame_state, effect, control);
+      __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_number,
+                          frame_state);
       break;
     }
     case CheckTaggedInputMode::kNumberOrOddball: {
-      Node* branch =
-          graph()->NewNode(common()->Branch(), check_number, control);
+      auto check_done = __ MakeLabel<2>();
 
-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-      Node* etrue = effect;
-
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+      __ GotoIf(check_number, &check_done);
       // For oddballs also contain the numeric value, let us just check that
       // we have an oddball here.
-      Node* efalse = effect;
-      Node* instance_type = efalse = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-          value_map, efalse, if_false);
+      Node* instance_type =
+          __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
       Node* check_oddball =
-          graph()->NewNode(machine()->Word32Equal(), instance_type,
-                           jsgraph()->Int32Constant(ODDBALL_TYPE));
-      if_false = efalse = graph()->NewNode(
-          common()->DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball),
-          check_oddball, frame_state, efalse, if_false);
+          __ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
+      __ DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball, check_oddball,
+                          frame_state);
       STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+      __ Goto(&check_done);
 
-      control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-      effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+      __ Bind(&check_done);
       break;
     }
   }
-
-  value = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-      effect, control);
-  return ValueEffectControl(value, effect, control);
+  return __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
-                                                     Node* frame_state,
-                                                     Node* effect,
-                                                     Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
+                                                           Node* frame_state) {
   CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
   Node* value = node->InputAt(0);
 
+  auto if_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch = graph()->NewNode(common()->Branch(), check, control);
+  __ GotoIf(check, &if_smi);
 
   // In the Smi case, just convert to int32 and then float64.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
-  vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
-
   // Otherwise, check heap numberness and load the number.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  ValueEffectControl number_state = BuildCheckedHeapNumberOrOddballToFloat64(
-      mode, value, frame_state, effect, if_false);
+  Node* number =
+      BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+  __ Goto(&done, number);
 
-  Node* merge =
-      graph()->NewNode(common()->Merge(2), if_true, number_state.control);
-  Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
-                                      number_state.effect, merge);
-  Node* result =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), vtrue,
-                       number_state.value, merge);
+  __ Bind(&if_smi);
+  Node* from_smi = ChangeSmiToInt32(value);
+  from_smi = __ ChangeInt32ToFloat64(from_smi);
+  __ Goto(&done, from_smi);
 
-  return ValueEffectControl(result, effect_phi, merge);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(Node* node,
-                                                          Node* frame_state,
-                                                          Node* effect,
-                                                          Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
+    Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
-                       check, frame_state, effect, control);
+  __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
 
-  return ValueEffectControl(value, effect, control);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(Node* node,
-                                                           Node* frame_state,
-                                                           Node* effect,
-                                                           Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
+    Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
-                       frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ DeoptimizeIf(DeoptimizeReason::kSmi, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
-                                                     Node* control) {
+Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  __ GotoUnless(check, &if_not_smi);
+  __ Goto(&done, ChangeSmiToInt32(value));
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = __ TruncateFloat64ToWord32(vfalse);
+  __ Goto(&done, vfalse);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(Node* node,
-                                                            Node* frame_state,
-                                                            Node* effect,
-                                                            Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
+    Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  auto if_not_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
 
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
   // In the Smi case, just convert to int32.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
+  __ Goto(&done, ChangeSmiToInt32(value));
 
   // Otherwise, check that it's a heap number or oddball and truncate the value
   // to int32.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  ValueEffectControl false_state = BuildCheckedHeapNumberOrOddballToFloat64(
-      CheckTaggedInputMode::kNumberOrOddball, value, frame_state, effect,
-      if_false);
-  false_state.value =
-      graph()->NewNode(machine()->TruncateFloat64ToWord32(), false_state.value);
+  __ Bind(&if_not_smi);
+  Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+      CheckTaggedInputMode::kNumberOrOddball, value, frame_state);
+  number = __ TruncateFloat64ToWord32(number);
+  __ Goto(&done, number);
 
-  Node* merge =
-      graph()->NewNode(common()->Merge(2), if_true, false_state.control);
-  Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
-                                      false_state.effect, merge);
-  Node* result =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue,
-                       false_state.value, merge);
-
-  return ValueEffectControl(result, effect_phi, merge);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsCallable(Node* node, Node* effect,
-                                               Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+  __ GotoIf(check, &if_smi);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(0);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_bit_field =
+      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+  Node* vfalse = __ Word32Equal(
+      __ Int32Constant(1 << Map::kIsCallable),
+      __ Word32And(value_bit_field,
+                   __ Int32Constant((1 << Map::kIsCallable) |
+                                    (1 << Map::kIsUndetectable))));
+  __ Goto(&done, vfalse);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* value_bit_field = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
-        efalse, if_false);
-    vfalse = graph()->NewNode(
-        machine()->Word32Equal(),
-        jsgraph()->Int32Constant(1 << Map::kIsCallable),
-        graph()->NewNode(
-            machine()->Word32And(), value_bit_field,
-            jsgraph()->Int32Constant((1 << Map::kIsCallable) |
-                                     (1 << Map::kIsUndetectable))));
-  }
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(0));
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsNumber(Node* node, Node* effect,
-                                             Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_primitive = __ MakeDeferredLabel<2>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
+  Node* check0 = ObjectIsSmi(value);
+  __ GotoIf(check0, &if_primitive);
+
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  Node* check1 = __ Uint32LessThanOrEqual(
+      __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+  __ GotoUnless(check1, &if_primitive);
+
+  Node* value_bit_field =
+      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+  Node* check2 = __ Word32Equal(
+      __ Int32Constant(0),
+      __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
+  __ Goto(&done, check2);
+
+  __ Bind(&if_primitive);
+  __ Goto(&done, __ Int32Constant(0));
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
+  __ GotoIf(ObjectIsSmi(value), &if_smi);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  __ Goto(&done, __ WordEqual(value_map, __ HeapNumberMapConstant()));
+
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(1));
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
+  __ GotoIf(ObjectIsSmi(value), &if_smi);
+
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+  Node* result = __ Uint32LessThanOrEqual(
+      __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+  __ Goto(&done, result);
+
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(0));
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsSmi(Node* node) {
+  Node* value = node->InputAt(0);
+  return ObjectIsSmi(value);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch = graph()->NewNode(common()->Branch(), check, control);
+  __ GotoIf(check, &if_smi);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+  Node* vfalse = __ Uint32LessThan(value_instance_type,
+                                   __ Uint32Constant(FIRST_NONSTRING_TYPE));
+  __ Goto(&done, vfalse);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(1);
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(0));
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    vfalse = graph()->NewNode(machine()->WordEqual(), value_map,
-                              jsgraph()->HeapNumberMapConstant());
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsReceiver(Node* node, Node* effect,
-                                               Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+  __ GotoIf(check, &if_smi);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(0);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_bit_field =
+      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+  Node* vfalse = __ Word32Equal(
+      __ Word32Equal(__ Int32Constant(0),
+                     __ Word32And(value_bit_field,
+                                  __ Int32Constant(1 << Map::kIsUndetectable))),
+      __ Int32Constant(0));
+  __ Goto(&done, vfalse);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* value_instance_type = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->Uint32LessThanOrEqual(),
-                              jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
-                              value_instance_type);
-  }
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(0));
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsSmi(Node* node, Node* effect,
-                                          Node* control) {
-  Node* value = node->InputAt(0);
-  value = ObjectIsSmi(value);
-  return ValueEffectControl(value, effect, control);
+Node* EffectControlLinearizer::LowerNewRestParameterElements(Node* node) {
+  int const formal_parameter_count = ParameterCountOf(node->op());
+
+  Callable const callable = CodeFactory::NewRestParameterElements(isolate());
+  Operator::Properties const properties = node->op()->properties();
+  CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  return __ Call(desc, __ HeapConstant(callable.code()),
+                 __ IntPtrConstant(formal_parameter_count),
+                 __ NoContextConstant());
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsString(Node* node, Node* effect,
-                                             Node* control) {
+Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
+  int const formal_parameter_count = ParameterCountOf(node->op());
+
+  Callable const callable =
+      CodeFactory::NewUnmappedArgumentsElements(isolate());
+  Operator::Properties const properties = node->op()->properties();
+  CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  return __ Call(desc, __ HeapConstant(callable.code()),
+                 __ IntPtrConstant(formal_parameter_count),
+                 __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(0);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* value_instance_type = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
-                              jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* value_bit_field =
+      __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), value);
+  return __ Word32Equal(
+      __ Word32Equal(
+          __ Word32And(value_bit_field,
+                       __ Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
+          __ Int32Constant(0)),
+      __ Int32Constant(0));
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsUndetectable(Node* node, Node* effect,
-                                                   Node* control) {
+Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
+  Node* receiver = node->InputAt(0);
+  Node* position = node->InputAt(1);
+
+  Callable const callable = CodeFactory::StringCharAt(isolate());
+  Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+                 __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
+  Node* receiver = node->InputAt(0);
+  Node* position = node->InputAt(1);
+
+  Callable const callable = CodeFactory::StringCharCodeAt(isolate());
+  Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
+      MachineType::TaggedSigned());
+  return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+                 __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(0);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* value_bit_field = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
-        efalse, if_false);
-    vfalse = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(
-            machine()->Word32Equal(), jsgraph()->Int32Constant(0),
-            graph()->NewNode(
-                machine()->Word32And(), value_bit_field,
-                jsgraph()->Int32Constant(1 << Map::kIsUndetectable))),
-        jsgraph()->Int32Constant(0));
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node, Node* effect,
-                                                     Node* control) {
-  Node* value = node->InputAt(0);
-
-  Node* value_bit_field = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()), value,
-      effect, control);
-  value = graph()->NewNode(
-      machine()->Word32Equal(),
-      graph()->NewNode(machine()->Word32Equal(),
-                       graph()->NewNode(machine()->Word32And(), value_bit_field,
-                                        jsgraph()->Int32Constant(
-                                            JSArrayBuffer::WasNeutered::kMask)),
-                       jsgraph()->Int32Constant(0)),
-      jsgraph()->Int32Constant(0));
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringCharCodeAt(Node* node, Node* effect,
-                                               Node* control) {
-  Node* subject = node->InputAt(0);
-  Node* index = node->InputAt(1);
-
-  // We may need to loop several times for ConsString/SlicedString {subject}s.
-  Node* loop =
-      graph()->NewNode(common()->Loop(4), control, control, control, control);
-  Node* lsubject =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 4),
-                       subject, subject, subject, subject, loop);
-  Node* lindex =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 4), index,
-                       index, index, index, loop);
-  Node* leffect = graph()->NewNode(common()->EffectPhi(4), effect, effect,
-                                   effect, effect, loop);
-
-  control = loop;
-  effect = leffect;
-
-  // Determine the instance type of {lsubject}.
-  Node* lsubject_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       lsubject, effect, control);
-  Node* lsubject_instance_type = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-      lsubject_map, effect, control);
-
-  // Check if {lsubject} is a SeqString.
-  Node* check0 = graph()->NewNode(
-      machine()->Word32Equal(),
-      graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                       jsgraph()->Int32Constant(kStringRepresentationMask)),
-      jsgraph()->Int32Constant(kSeqStringTag));
-  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0;
-  {
-    // Check if the {lsubject} is a TwoByteSeqString or a OneByteSeqString.
-    Node* check1 = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                         jsgraph()->Int32Constant(kStringEncodingMask)),
-        jsgraph()->Int32Constant(kTwoByteStringTag));
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = etrue0;
-    Node* vtrue1 = etrue1 =
-        graph()->NewNode(simplified()->LoadElement(
-                             AccessBuilder::ForSeqTwoByteStringCharacter()),
-                         lsubject, lindex, etrue1, if_true1);
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = etrue0;
-    Node* vfalse1 = efalse1 =
-        graph()->NewNode(simplified()->LoadElement(
-                             AccessBuilder::ForSeqOneByteStringCharacter()),
-                         lsubject, lindex, efalse1, if_false1);
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                              vtrue1, vfalse1, if_true0);
-  }
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
-  {
-    // Check if the {lsubject} is a ConsString.
-    Node* check1 = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                         jsgraph()->Int32Constant(kStringRepresentationMask)),
-        jsgraph()->Int32Constant(kConsStringTag));
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = efalse0;
-    {
-      // Load the right hand side of the {lsubject} ConsString.
-      Node* lsubject_second = etrue1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForConsStringSecond()),
-          lsubject, etrue1, if_true1);
-
-      // Check whether the right hand side is the empty string (i.e. if
-      // this is really a flat string in a cons string). If that is not
-      // the case we flatten the string first.
-      Node* check2 = graph()->NewNode(machine()->WordEqual(), lsubject_second,
-                                      jsgraph()->EmptyStringConstant());
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                       check2, if_true1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* etrue2 = etrue1;
-      Node* vtrue2 = etrue2 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForConsStringFirst()),
-          lsubject, etrue2, if_true2);
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* efalse2 = etrue1;
-      Node* vfalse2;
-      {
-        // Flatten the {lsubject} ConsString first.
-        Operator::Properties properties =
-            Operator::kNoDeopt | Operator::kNoThrow;
-        Runtime::FunctionId id = Runtime::kFlattenString;
-        CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
-            graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
-        vfalse2 = efalse2 = graph()->NewNode(
-            common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
-            jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
-            jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(),
-            efalse2, if_false2);
-      }
-
-      // Retry the {loop} with the new subject.
-      loop->ReplaceInput(1, if_true2);
-      lindex->ReplaceInput(1, lindex);
-      leffect->ReplaceInput(1, etrue2);
-      lsubject->ReplaceInput(1, vtrue2);
-      loop->ReplaceInput(2, if_false2);
-      lindex->ReplaceInput(2, lindex);
-      leffect->ReplaceInput(2, efalse2);
-      lsubject->ReplaceInput(2, vfalse2);
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = efalse0;
-    Node* vfalse1;
-    {
-      // Check if the {lsubject} is an ExternalString.
-      Node* check2 = graph()->NewNode(
-          machine()->Word32Equal(),
-          graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                           jsgraph()->Int32Constant(kStringRepresentationMask)),
-          jsgraph()->Int32Constant(kExternalStringTag));
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                       check2, if_false1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* etrue2 = efalse1;
-      Node* vtrue2;
-      {
-        // Check if the {lsubject} is a short external string.
-        Node* check3 = graph()->NewNode(
-            machine()->Word32Equal(),
-            graph()->NewNode(
-                machine()->Word32And(), lsubject_instance_type,
-                jsgraph()->Int32Constant(kShortExternalStringMask)),
-            jsgraph()->Int32Constant(0));
-        Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                         check3, if_true2);
-
-        Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
-        Node* etrue3 = etrue2;
-        Node* vtrue3;
-        {
-          // Load the actual resource data from the {lsubject}.
-          Node* lsubject_resource_data = etrue3 = graph()->NewNode(
-              simplified()->LoadField(
-                  AccessBuilder::ForExternalStringResourceData()),
-              lsubject, etrue3, if_true3);
-
-          // Check if the {lsubject} is a TwoByteExternalString or a
-          // OneByteExternalString.
-          Node* check4 = graph()->NewNode(
-              machine()->Word32Equal(),
-              graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                               jsgraph()->Int32Constant(kStringEncodingMask)),
-              jsgraph()->Int32Constant(kTwoByteStringTag));
-          Node* branch4 =
-              graph()->NewNode(common()->Branch(), check4, if_true3);
-
-          Node* if_true4 = graph()->NewNode(common()->IfTrue(), branch4);
-          Node* etrue4 = etrue3;
-          Node* vtrue4 = etrue4 = graph()->NewNode(
-              simplified()->LoadElement(
-                  AccessBuilder::ForExternalTwoByteStringCharacter()),
-              lsubject_resource_data, lindex, etrue4, if_true4);
-
-          Node* if_false4 = graph()->NewNode(common()->IfFalse(), branch4);
-          Node* efalse4 = etrue3;
-          Node* vfalse4 = efalse4 = graph()->NewNode(
-              simplified()->LoadElement(
-                  AccessBuilder::ForExternalOneByteStringCharacter()),
-              lsubject_resource_data, lindex, efalse4, if_false4);
-
-          if_true3 = graph()->NewNode(common()->Merge(2), if_true4, if_false4);
-          etrue3 = graph()->NewNode(common()->EffectPhi(2), etrue4, efalse4,
-                                    if_true3);
-          vtrue3 =
-              graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                               vtrue4, vfalse4, if_true3);
-        }
-
-        Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
-        Node* efalse3 = etrue2;
-        Node* vfalse3;
-        {
-          // The {lsubject} might be compressed, call the runtime.
-          Operator::Properties properties =
-              Operator::kNoDeopt | Operator::kNoThrow;
-          Runtime::FunctionId id = Runtime::kExternalStringGetChar;
-          CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
-              graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
-          vfalse3 = efalse3 = graph()->NewNode(
-              common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
-              ChangeInt32ToSmi(lindex),
-              jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
-              jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(),
-              efalse3, if_false3);
-          vfalse3 = ChangeSmiToInt32(vfalse3);
-        }
-
-        if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
-        etrue2 =
-            graph()->NewNode(common()->EffectPhi(2), etrue3, efalse3, if_true2);
-        vtrue2 =
-            graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                             vtrue3, vfalse3, if_true2);
-      }
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* efalse2 = efalse1;
-      {
-        // The {lsubject} is a SlicedString, continue with its parent.
-        Node* lsubject_parent = efalse2 = graph()->NewNode(
-            simplified()->LoadField(AccessBuilder::ForSlicedStringParent()),
-            lsubject, efalse2, if_false2);
-        Node* lsubject_offset = efalse2 = graph()->NewNode(
-            simplified()->LoadField(AccessBuilder::ForSlicedStringOffset()),
-            lsubject, efalse2, if_false2);
-        Node* lsubject_index = graph()->NewNode(
-            machine()->Int32Add(), lindex, ChangeSmiToInt32(lsubject_offset));
-
-        // Retry the {loop} with the parent subject.
-        loop->ReplaceInput(3, if_false2);
-        leffect->ReplaceInput(3, efalse2);
-        lindex->ReplaceInput(3, lsubject_index);
-        lsubject->ReplaceInput(3, lsubject_parent);
-      }
-
-      if_false1 = if_true2;
-      efalse1 = etrue2;
-      vfalse1 = vtrue2;
-    }
-
-    if_false0 = if_false1;
-    efalse0 = efalse1;
-    vfalse0 = vfalse1;
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
-                       vfalse0, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
-                                                 Node* control) {
-  Node* value = node->InputAt(0);
+  auto runtime_call = __ MakeDeferredLabel<2>();
+  auto if_undefined = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
   // Compute the character code.
-  Node* code =
-      graph()->NewNode(machine()->Word32And(), value,
-                       jsgraph()->Int32Constant(String::kMaxUtf16CodeUnit));
+  Node* code = __ Word32And(value, __ Int32Constant(String::kMaxUtf16CodeUnit));
 
   // Check if the {code} is a one-byte char code.
-  Node* check0 =
-      graph()->NewNode(machine()->Int32LessThanOrEqual(), code,
-                       jsgraph()->Int32Constant(String::kMaxOneByteCharCode));
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
+  Node* check0 = __ Int32LessThanOrEqual(
+      code, __ Int32Constant(String::kMaxOneByteCharCode));
+  __ GotoUnless(check0, &runtime_call);
 
   // Load the isolate wide single character string cache.
-  Node* cache =
-      jsgraph()->HeapConstant(factory()->single_character_string_cache());
+  Node* cache = __ HeapConstant(factory()->single_character_string_cache());
 
   // Compute the {cache} index for {code}.
-  Node* index = machine()->Is32()
-                    ? code
-                    : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+  Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
 
   // Check if we have an entry for the {code} in the single character string
   // cache already.
-  Node* entry = etrue0 = graph()->NewNode(
-      simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
-      index, etrue0, if_true0);
+  Node* entry =
+      __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
 
-  Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
-                                  jsgraph()->UndefinedConstant());
-  Node* branch1 =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, if_true0);
-
-  // Use the {entry} from the {cache}.
-  Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-  Node* efalse1 = etrue0;
-  Node* vfalse1 = entry;
+  Node* check1 = __ WordEqual(entry, __ UndefinedConstant());
+  __ GotoIf(check1, &runtime_call);
+  __ Goto(&done, entry);
 
   // Let %StringFromCharCode handle this case.
   // TODO(turbofan): At some point we may consider adding a stub for this
   // deferred case, so that we don't need to call to C++ here.
-  Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-  Node* etrue1 = etrue0;
-  Node* vtrue1;
+  __ Bind(&runtime_call);
   {
-    if_true1 = graph()->NewNode(common()->Merge(2), if_true1, if_false0);
-    etrue1 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse0, if_true1);
     Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
     Runtime::FunctionId id = Runtime::kStringCharFromCode;
     CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
         graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
-    vtrue1 = etrue1 = graph()->NewNode(
-        common()->Call(desc), jsgraph()->CEntryStubConstant(1),
-        ChangeInt32ToSmi(code),
-        jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
-        jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(), etrue1,
-        if_true1);
+    Node* vtrue1 =
+        __ Call(desc, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
+                __ ExternalConstant(ExternalReference(id, isolate())),
+                __ Int32Constant(1), __ NoContextConstant());
+    __ Goto(&done, vtrue1);
   }
-
-  control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue1, vfalse1, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringFromCodePoint(Node* node, Node* effect,
-                                                  Node* control) {
+Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
   Node* value = node->InputAt(0);
   Node* code = value;
 
-  Node* etrue0 = effect;
-  Node* vtrue0;
+  auto if_not_single_code = __ MakeDeferredLabel<1>();
+  auto if_not_one_byte = __ MakeDeferredLabel<1>();
+  auto cache_miss = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<4>(MachineRepresentation::kTagged);
 
   // Check if the {code} is a single code unit
-  Node* check0 = graph()->NewNode(machine()->Uint32LessThanOrEqual(), code,
-                                  jsgraph()->Uint32Constant(0xFFFF));
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  Node* check0 = __ Uint32LessThanOrEqual(code, __ Uint32Constant(0xFFFF));
+  __ GotoUnless(check0, &if_not_single_code);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
   {
     // Check if the {code} is a one byte character
-    Node* check1 = graph()->NewNode(
-        machine()->Uint32LessThanOrEqual(), code,
-        jsgraph()->Uint32Constant(String::kMaxOneByteCharCode));
-    Node* branch1 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = etrue0;
-    Node* vtrue1;
+    Node* check1 = __ Uint32LessThanOrEqual(
+        code, __ Uint32Constant(String::kMaxOneByteCharCode));
+    __ GotoUnless(check1, &if_not_one_byte);
     {
       // Load the isolate wide single character string cache.
-      Node* cache =
-          jsgraph()->HeapConstant(factory()->single_character_string_cache());
+      Node* cache = __ HeapConstant(factory()->single_character_string_cache());
 
       // Compute the {cache} index for {code}.
-      Node* index =
-          machine()->Is32()
-              ? code
-              : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+      Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
 
       // Check if we have an entry for the {code} in the single character string
       // cache already.
-      Node* entry = etrue1 = graph()->NewNode(
-          simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
-          cache, index, etrue1, if_true1);
+      Node* entry =
+          __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
 
-      Node* check2 = graph()->NewNode(machine()->WordEqual(), entry,
-                                      jsgraph()->UndefinedConstant());
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_true1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* etrue2 = etrue1;
-      Node* vtrue2;
-      {
-        // Allocate a new SeqOneByteString for {code}.
-        vtrue2 = etrue2 = graph()->NewNode(
-            simplified()->Allocate(NOT_TENURED),
-            jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue2,
-            if_true2);
-        etrue2 = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForMap()), vtrue2,
-            jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue2,
-            if_true2);
-        etrue2 = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue2,
-            jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue2, if_true2);
-        etrue2 = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue2,
-            jsgraph()->SmiConstant(1), etrue2, if_true2);
-        etrue2 = graph()->NewNode(
-            machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
-                                                 kNoWriteBarrier)),
-            vtrue2, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
-                                              kHeapObjectTag),
-            code, etrue2, if_true2);
-
-        // Remember it in the {cache}.
-        etrue2 = graph()->NewNode(
-            simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
-            cache, index, vtrue2, etrue2, if_true2);
-      }
+      Node* check2 = __ WordEqual(entry, __ UndefinedConstant());
+      __ GotoIf(check2, &cache_miss);
 
       // Use the {entry} from the {cache}.
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* efalse2 = etrue0;
-      Node* vfalse2 = entry;
+      __ Goto(&done, entry);
 
-      if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      etrue1 =
-          graph()->NewNode(common()->EffectPhi(2), etrue2, efalse2, if_true1);
-      vtrue1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue2, vfalse2, if_true1);
+      __ Bind(&cache_miss);
+      {
+        // Allocate a new SeqOneByteString for {code}.
+        Node* vtrue2 = __ Allocate(
+            NOT_TENURED, __ Int32Constant(SeqOneByteString::SizeFor(1)));
+        __ StoreField(AccessBuilder::ForMap(), vtrue2,
+                      __ HeapConstant(factory()->one_byte_string_map()));
+        __ StoreField(AccessBuilder::ForNameHashField(), vtrue2,
+                      __ IntPtrConstant(Name::kEmptyHashField));
+        __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
+                      __ SmiConstant(1));
+        __ Store(
+            StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
+            vtrue2,
+            __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+            code);
+
+        // Remember it in the {cache}.
+        __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
+                        vtrue2);
+        __ Goto(&done, vtrue2);
+      }
     }
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = effect;
-    Node* vfalse1;
+    __ Bind(&if_not_one_byte);
     {
       // Allocate a new SeqTwoByteString for {code}.
-      vfalse1 = efalse1 = graph()->NewNode(
-          simplified()->Allocate(NOT_TENURED),
-          jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)), efalse1,
-          if_false1);
-      efalse1 = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForMap()), vfalse1,
-          jsgraph()->HeapConstant(factory()->string_map()), efalse1, if_false1);
-      efalse1 = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse1,
-          jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse1, if_false1);
-      efalse1 = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse1,
-          jsgraph()->SmiConstant(1), efalse1, if_false1);
-      efalse1 = graph()->NewNode(
-          machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
-                                               kNoWriteBarrier)),
-          vfalse1, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
-                                             kHeapObjectTag),
-          code, efalse1, if_false1);
+      Node* vfalse1 = __ Allocate(
+          NOT_TENURED, __ Int32Constant(SeqTwoByteString::SizeFor(1)));
+      __ StoreField(AccessBuilder::ForMap(), vfalse1,
+                    __ HeapConstant(factory()->string_map()));
+      __ StoreField(AccessBuilder::ForNameHashField(), vfalse1,
+                    __ IntPtrConstant(Name::kEmptyHashField));
+      __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
+                    __ SmiConstant(1));
+      __ Store(
+          StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
+          vfalse1,
+          __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+          code);
+      __ Goto(&done, vfalse1);
     }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                              vtrue1, vfalse1, if_true0);
   }
 
+  __ Bind(&if_not_single_code);
   // Generate surrogate pair string
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
   {
     switch (UnicodeEncodingOf(node->op())) {
       case UnicodeEncoding::UTF16:
@@ -2666,553 +2044,373 @@
 
       case UnicodeEncoding::UTF32: {
         // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
-        Node* lead_offset = jsgraph()->Int32Constant(0xD800 - (0x10000 >> 10));
+        Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
 
         // lead = (codepoint >> 10) + LEAD_OFFSET
         Node* lead =
-            graph()->NewNode(machine()->Int32Add(),
-                             graph()->NewNode(machine()->Word32Shr(), code,
-                                              jsgraph()->Int32Constant(10)),
-                             lead_offset);
+            __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
 
         // trail = (codepoint & 0x3FF) + 0xDC00;
-        Node* trail =
-            graph()->NewNode(machine()->Int32Add(),
-                             graph()->NewNode(machine()->Word32And(), code,
-                                              jsgraph()->Int32Constant(0x3FF)),
-                             jsgraph()->Int32Constant(0xDC00));
+        Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
+                                  __ Int32Constant(0xDC00));
 
         // codpoint = (trail << 16) | lead;
-        code = graph()->NewNode(machine()->Word32Or(),
-                                graph()->NewNode(machine()->Word32Shl(), trail,
-                                                 jsgraph()->Int32Constant(16)),
-                                lead);
+        code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
         break;
       }
     }
 
     // Allocate a new SeqTwoByteString for {code}.
-    vfalse0 = efalse0 =
-        graph()->NewNode(simplified()->Allocate(NOT_TENURED),
-                         jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(2)),
-                         efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
-        jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
-        jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
-        jsgraph()->SmiConstant(2), efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        machine()->Store(StoreRepresentation(MachineRepresentation::kWord32,
-                                             kNoWriteBarrier)),
-        vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
-                                           kHeapObjectTag),
-        code, efalse0, if_false0);
+    Node* vfalse0 = __ Allocate(NOT_TENURED,
+                                __ Int32Constant(SeqTwoByteString::SizeFor(2)));
+    __ StoreField(AccessBuilder::ForMap(), vfalse0,
+                  __ HeapConstant(factory()->string_map()));
+    __ StoreField(AccessBuilder::ForNameHashField(), vfalse0,
+                  __ IntPtrConstant(Name::kEmptyHashField));
+    __ StoreField(AccessBuilder::ForStringLength(), vfalse0, __ SmiConstant(2));
+    __ Store(
+        StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
+        vfalse0,
+        __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+        code);
+    __ Goto(&done, vfalse0);
   }
 
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue0, vfalse0, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringComparison(Callable const& callable,
-                                               Node* node, Node* effect,
-                                               Node* control) {
+Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
+  Node* subject = node->InputAt(0);
+  Node* search_string = node->InputAt(1);
+  Node* position = node->InputAt(2);
+
+  Callable callable = CodeFactory::StringIndexOf(isolate());
   Operator::Properties properties = Operator::kEliminatable;
   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
-  node->InsertInput(graph()->zone(), 0,
-                    jsgraph()->HeapConstant(callable.code()));
-  node->AppendInput(graph()->zone(), jsgraph()->NoContextConstant());
-  node->AppendInput(graph()->zone(), effect);
-  NodeProperties::ChangeOp(node, common()->Call(desc));
-  return ValueEffectControl(node, node, control);
+  return __ Call(desc, __ HeapConstant(callable.code()), subject, search_string,
+                 position, __ NoContextConstant());
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringEqual(Node* node, Node* effect,
-                                          Node* control) {
-  return LowerStringComparison(CodeFactory::StringEqual(isolate()), node,
-                               effect, control);
+Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
+                                                     Node* node) {
+  Node* lhs = node->InputAt(0);
+  Node* rhs = node->InputAt(1);
+
+  Operator::Properties properties = Operator::kEliminatable;
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
+                 __ NoContextConstant());
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringLessThan(Node* node, Node* effect,
-                                             Node* control) {
-  return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node,
-                               effect, control);
+Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
+  return LowerStringComparison(CodeFactory::StringEqual(isolate()), node);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node, Node* effect,
-                                                    Node* control) {
+Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
+  return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node);
+}
+
+Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
   return LowerStringComparison(CodeFactory::StringLessThanOrEqual(isolate()),
-                               node, effect, control);
+                               node);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
-                                               Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
+                                                     Node* frame_state) {
   // If we reach this point w/o eliminating the {node} that's marked
   // with allow-return-hole, we cannot do anything, so just deoptimize
   // in case of the hole NaN (similar to Crankshaft).
   Node* value = node->InputAt(0);
-  Node* check = graph()->NewNode(
-      machine()->Word32Equal(),
-      graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
-      jsgraph()->Int32Constant(kHoleNanUpper32));
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
-                       frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
+                               __ Int32Constant(kHoleNanUpper32));
+  __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckTaggedHole(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckTaggedHole(Node* node,
+                                                    Node* frame_state) {
   Node* value = node->InputAt(0);
-  Node* check = graph()->NewNode(machine()->WordEqual(), value,
-                                 jsgraph()->TheHoleConstant());
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
-                       frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* check = __ WordEqual(value, __ TheHoleConstant());
+  __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node,
-                                                           Node* effect,
-                                                           Node* control) {
+Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) {
   Node* value = node->InputAt(0);
-  Node* check = graph()->NewNode(machine()->WordEqual(), value,
-                                 jsgraph()->TheHoleConstant());
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = jsgraph()->UndefinedConstant();
+  auto if_is_hole = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = value;
+  Node* check = __ WordEqual(value, __ TheHoleConstant());
+  __ GotoIf(check, &if_is_hole);
+  __ Goto(&done, value);
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue, vfalse, control);
+  __ Bind(&if_is_hole);
+  __ Goto(&done, __ UndefinedConstant());
 
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
-                                                     Node* control) {
-  Node* result = effect = graph()->NewNode(
-      simplified()->Allocate(NOT_TENURED),
-      jsgraph()->Int32Constant(HeapNumber::kSize), effect, control);
-  effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
-                            result, jsgraph()->HeapNumberMapConstant(), effect,
-                            control);
-  effect = graph()->NewNode(
-      simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
-      value, effect, control);
-  return ValueEffectControl(result, effect, control);
+Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
+  Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(HeapNumber::kSize));
+  __ StoreField(AccessBuilder::ForMap(), result, __ HeapNumberMapConstant());
+  __ StoreField(AccessBuilder::ForHeapNumberValue(), result, value);
+  return result;
 }
 
 Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
   if (machine()->Is64()) {
-    value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+    value = __ ChangeInt32ToInt64(value);
   }
-  return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+  return __ WordShl(value, SmiShiftBitsConstant());
 }
 
 Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
   if (machine()->Is64()) {
-    value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
+    value = __ ChangeUint32ToUint64(value);
   }
-  return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-Node* EffectControlLinearizer::ChangeInt32ToFloat64(Node* value) {
-  return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
-}
-
-Node* EffectControlLinearizer::ChangeUint32ToFloat64(Node* value) {
-  return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
+  return __ WordShl(value, SmiShiftBitsConstant());
 }
 
 Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
-  value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+  value = __ WordSar(value, SmiShiftBitsConstant());
   if (machine()->Is64()) {
-    value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+    value = __ TruncateInt64ToInt32(value);
   }
   return value;
 }
+
 Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
-  return graph()->NewNode(
-      machine()->WordEqual(),
-      graph()->NewNode(machine()->WordAnd(), value,
-                       jsgraph()->IntPtrConstant(kSmiTagMask)),
-      jsgraph()->IntPtrConstant(kSmiTag));
+  return __ WordEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)),
+                      __ IntPtrConstant(kSmiTag));
 }
 
 Node* EffectControlLinearizer::SmiMaxValueConstant() {
-  return jsgraph()->Int32Constant(Smi::kMaxValue);
+  return __ Int32Constant(Smi::kMaxValue);
 }
 
 Node* EffectControlLinearizer::SmiShiftBitsConstant() {
-  return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+  return __ IntPtrConstant(kSmiShiftSize + kSmiTagSize);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node, Node* effect,
-                                                     Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node) {
   Node* value = node->InputAt(0);
-  Node* result = effect =
-      graph()->NewNode(ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(),
-                       value, jsgraph()->NoContextConstant(), effect);
-  return ValueEffectControl(result, effect, control);
+  return __ ToNumber(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node, Node* effect,
-                                                     Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto if_to_number_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
+
   Node* check0 = ObjectIsSmi(value);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  __ GotoUnless(check0, &if_not_smi);
+  __ Goto(&done, ChangeSmiToInt32(value));
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0 = ChangeSmiToInt32(value);
+  __ Bind(&if_not_smi);
+  Node* to_number = __ ToNumber(value);
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
-  {
-    vfalse0 = efalse0 = graph()->NewNode(
-        ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
-        jsgraph()->NoContextConstant(), efalse0);
+  Node* check1 = ObjectIsSmi(to_number);
+  __ GotoIf(check1, &if_to_number_smi);
+  Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
+  __ Goto(&done, __ TruncateFloat64ToWord32(number));
 
-    Node* check1 = ObjectIsSmi(vfalse0);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+  __ Bind(&if_to_number_smi);
+  __ Goto(&done, ChangeSmiToInt32(to_number));
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = efalse0;
-    Node* vtrue1 = ChangeSmiToInt32(vfalse0);
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = efalse0;
-    Node* vfalse1;
-    {
-      vfalse1 = efalse1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
-          efalse1, if_false1);
-      vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    efalse0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
-    vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                               vtrue1, vfalse1, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue0, vfalse0, control);
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
-                                                      Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto if_to_number_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<3>(MachineRepresentation::kFloat64);
+
   Node* check0 = ObjectIsSmi(value);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  __ GotoUnless(check0, &if_not_smi);
+  Node* from_smi = ChangeSmiToInt32(value);
+  __ Goto(&done, __ ChangeInt32ToFloat64(from_smi));
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0;
-  {
-    vtrue0 = ChangeSmiToInt32(value);
-    vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
-  }
+  __ Bind(&if_not_smi);
+  Node* to_number = __ ToNumber(value);
+  Node* check1 = ObjectIsSmi(to_number);
+  __ GotoIf(check1, &if_to_number_smi);
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
-  {
-    vfalse0 = efalse0 = graph()->NewNode(
-        ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
-        jsgraph()->NoContextConstant(), efalse0);
+  Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
+  __ Goto(&done, number);
 
-    Node* check1 = ObjectIsSmi(vfalse0);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+  __ Bind(&if_to_number_smi);
+  Node* number_from_smi = ChangeSmiToInt32(to_number);
+  number_from_smi = __ ChangeInt32ToFloat64(number_from_smi);
+  __ Goto(&done, number_from_smi);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = efalse0;
-    Node* vtrue1;
-    {
-      vtrue1 = ChangeSmiToInt32(vfalse0);
-      vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = efalse0;
-    Node* vfalse1;
-    {
-      vfalse1 = efalse1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
-          efalse1, if_false1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    efalse0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue0, vfalse0, control);
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node,
-                                                         Node* effect,
-                                                         Node* control) {
+Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
   Node* object = node->InputAt(0);
   Node* elements = node->InputAt(1);
 
+  auto if_not_fixed_array = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+
   // Load the current map of {elements}.
-  Node* elements_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       elements, effect, control);
+  Node* elements_map = __ LoadField(AccessBuilder::ForMap(), elements);
 
   // Check if {elements} is not a copy-on-write FixedArray.
-  Node* check = graph()->NewNode(machine()->WordEqual(), elements_map,
-                                 jsgraph()->FixedArrayMapConstant());
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
+  Node* check = __ WordEqual(elements_map, __ FixedArrayMapConstant());
+  __ GotoUnless(check, &if_not_fixed_array);
   // Nothing to do if the {elements} are not copy-on-write.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = elements;
+  __ Goto(&done, elements);
 
+  __ Bind(&if_not_fixed_array);
   // We need to take a copy of the {elements} and set them up for {object}.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    // We need to create a copy of the {elements} for {object}.
-    Operator::Properties properties = Operator::kEliminatable;
-    Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
-    CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), callable.descriptor(), 0, flags,
-        properties);
-    vfalse = efalse = graph()->NewNode(
-        common()->Call(desc), jsgraph()->HeapConstant(callable.code()), object,
-        jsgraph()->NoContextConstant(), efalse);
-  }
+  Operator::Properties properties = Operator::kEliminatable;
+  Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  Node* result = __ Call(desc, __ HeapConstant(callable.code()), object,
+                         __ NoContextConstant());
+  __ Goto(&done, result);
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  Node* value = graph()->NewNode(
-      common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
-                                                    Node* frame_state,
-                                                    Node* effect,
-                                                    Node* control) {
+Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
+                                                          Node* frame_state) {
   GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
   Node* object = node->InputAt(0);
   Node* elements = node->InputAt(1);
   Node* index = node->InputAt(2);
   Node* length = node->InputAt(3);
 
-  Node* check0 = graph()->NewNode((flags & GrowFastElementsFlag::kHoleyElements)
-                                      ? machine()->Uint32LessThanOrEqual()
-                                      : machine()->Word32Equal(),
-                                  length, index);
-  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+  auto done_grow = __ MakeLabel<2>(MachineRepresentation::kTagged);
+  auto if_grow = __ MakeDeferredLabel<1>();
+  auto if_not_grow = __ MakeLabel<1>();
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0 = elements;
+  Node* check0 = (flags & GrowFastElementsFlag::kHoleyElements)
+                     ? __ Uint32LessThanOrEqual(length, index)
+                     : __ Word32Equal(length, index);
+  __ GotoUnless(check0, &if_not_grow);
   {
     // Load the length of the {elements} backing store.
-    Node* elements_length = etrue0 = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
-        etrue0, if_true0);
+    Node* elements_length =
+        __ LoadField(AccessBuilder::ForFixedArrayLength(), elements);
     elements_length = ChangeSmiToInt32(elements_length);
 
     // Check if we need to grow the {elements} backing store.
-    Node* check1 =
-        graph()->NewNode(machine()->Uint32LessThan(), index, elements_length);
-    Node* branch1 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+    Node* check1 = __ Uint32LessThan(index, elements_length);
+    __ GotoUnless(check1, &if_grow);
+    __ Goto(&done_grow, elements);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = etrue0;
-    Node* vtrue1 = vtrue0;
+    __ Bind(&if_grow);
+    // We need to grow the {elements} for {object}.
+    Operator::Properties properties = Operator::kEliminatable;
+    Callable callable =
+        (flags & GrowFastElementsFlag::kDoubleElements)
+            ? CodeFactory::GrowFastDoubleElements(isolate())
+            : CodeFactory::GrowFastSmiOrObjectElements(isolate());
+    CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
+    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
+        properties);
+    Node* new_object = __ Call(desc, __ HeapConstant(callable.code()), object,
+                               ChangeInt32ToSmi(index), __ NoContextConstant());
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = etrue0;
-    Node* vfalse1 = vtrue0;
-    {
-      // We need to grow the {elements} for {object}.
-      Operator::Properties properties = Operator::kEliminatable;
-      Callable callable =
-          (flags & GrowFastElementsFlag::kDoubleElements)
-              ? CodeFactory::GrowFastDoubleElements(isolate())
-              : CodeFactory::GrowFastSmiOrObjectElements(isolate());
-      CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-      CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
-          isolate(), graph()->zone(), callable.descriptor(), 0, flags,
-          properties);
-      vfalse1 = efalse1 = graph()->NewNode(
-          common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-          object, ChangeInt32ToSmi(index), jsgraph()->NoContextConstant(),
-          efalse1);
+    // Ensure that we were able to grow the {elements}.
+    // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
+    // but maybe we should just introduce a reason that makes sense.
+    __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_object),
+                    frame_state);
+    __ Goto(&done_grow, new_object);
 
-      // Ensure that we were able to grow the {elements}.
-      // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
-      // but maybe we should just introduce a reason that makes sense.
-      efalse1 = if_false1 = graph()->NewNode(
-          common()->DeoptimizeIf(DeoptimizeReason::kSmi), ObjectIsSmi(vfalse1),
-          frame_state, efalse1, if_false1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                              vtrue1, vfalse1, if_true0);
+    __ Bind(&done_grow);
 
     // For JSArray {object}s we also need to update the "length".
     if (flags & GrowFastElementsFlag::kArrayObject) {
       // Compute the new {length}.
-      Node* object_length = ChangeInt32ToSmi(graph()->NewNode(
-          machine()->Int32Add(), index, jsgraph()->Int32Constant(1)));
+      Node* object_length =
+          ChangeInt32ToSmi(__ Int32Add(index, __ Int32Constant(1)));
 
       // Update the "length" property of the {object}.
-      etrue0 =
-          graph()->NewNode(simplified()->StoreField(
-                               AccessBuilder::ForJSArrayLength(FAST_ELEMENTS)),
-                           object, object_length, etrue0, if_true0);
+      __ StoreField(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), object,
+                    object_length);
     }
+    __ Goto(&done, done_grow.PhiAt(0));
   }
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0 = elements;
+  __ Bind(&if_not_grow);
   {
     // In case of non-holey {elements}, we need to verify that the {index} is
     // in-bounds, otherwise for holey {elements}, the check above already
     // guards the index (and the operator forces {index} to be unsigned).
     if (!(flags & GrowFastElementsFlag::kHoleyElements)) {
-      Node* check1 =
-          graph()->NewNode(machine()->Uint32LessThan(), index, length);
-      efalse0 = if_false0 = graph()->NewNode(
-          common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check1,
-          frame_state, efalse0, if_false0);
+      Node* check1 = __ Uint32LessThan(index, length);
+      __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check1, frame_state);
     }
+    __ Goto(&done, elements);
   }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
-                       vfalse0, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTransitionElementsKind(Node* node, Node* effect,
-                                                     Node* control) {
+void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
   ElementsTransition const transition = ElementsTransitionOf(node->op());
   Node* object = node->InputAt(0);
-  Node* source_map = node->InputAt(1);
-  Node* target_map = node->InputAt(2);
+
+  auto if_map_same = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>();
+
+  Node* source_map = __ HeapConstant(transition.source());
+  Node* target_map = __ HeapConstant(transition.target());
 
   // Load the current map of {object}.
-  Node* object_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
-                       effect, control);
+  Node* object_map = __ LoadField(AccessBuilder::ForMap(), object);
 
   // Check if {object_map} is the same as {source_map}.
-  Node* check =
-      graph()->NewNode(machine()->WordEqual(), object_map, source_map);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+  Node* check = __ WordEqual(object_map, source_map);
+  __ GotoIf(check, &if_map_same);
+  __ Goto(&done);
 
-  // Migrate the {object} from {source_map} to {target_map}.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  {
-    switch (transition) {
-      case ElementsTransition::kFastTransition: {
-        // In-place migration of {object}, just store the {target_map}.
-        etrue =
-            graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
-                             object, target_map, etrue, if_true);
-        break;
-      }
-      case ElementsTransition::kSlowTransition: {
-        // Instance migration, call out to the runtime for {object}.
-        Operator::Properties properties =
-            Operator::kNoDeopt | Operator::kNoThrow;
-        Runtime::FunctionId id = Runtime::kTransitionElementsKind;
-        CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
-            graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
-        etrue = graph()->NewNode(
-            common()->Call(desc), jsgraph()->CEntryStubConstant(1), object,
-            target_map,
-            jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
-            jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(), etrue,
-            if_true);
-        break;
-      }
+  __ Bind(&if_map_same);
+  switch (transition.mode()) {
+    case ElementsTransition::kFastTransition:
+      // In-place migration of {object}, just store the {target_map}.
+      __ StoreField(AccessBuilder::ForMap(), object, target_map);
+      break;
+    case ElementsTransition::kSlowTransition: {
+      // Instance migration, call out to the runtime for {object}.
+      Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+      Runtime::FunctionId id = Runtime::kTransitionElementsKind;
+      CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+          graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+      __ Call(desc, __ CEntryStubConstant(1), object, target_map,
+              __ ExternalConstant(ExternalReference(id, isolate())),
+              __ Int32Constant(2), __ NoContextConstant());
+      break;
     }
   }
+  __ Goto(&done);
 
-  // Nothing to do if the {object} doesn't have the {source_map}.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-
-  return ValueEffectControl(nullptr, effect, control);
+  __ Bind(&done);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerLoadTypedElement(Node* node, Node* effect,
-                                               Node* control) {
+Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
   ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
   Node* buffer = node->InputAt(0);
   Node* base = node->InputAt(1);
@@ -3221,24 +2419,20 @@
 
   // We need to keep the {buffer} alive so that the GC will not release the
   // ArrayBuffer (if there's any) as long as we are still operating on it.
-  effect = graph()->NewNode(common()->Retain(), buffer, effect);
+  __ Retain(buffer);
 
-  // Compute the effective storage pointer.
-  Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
-                                            external, effect, control);
+  // Compute the effective storage pointer, handling the case where the
+  // {external} pointer is the effective storage pointer (i.e. the {base}
+  // is Smi zero).
+  Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
+                                                             base, external);
 
   // Perform the actual typed element access.
-  Node* value = effect = graph()->NewNode(
-      simplified()->LoadElement(
-          AccessBuilder::ForTypedArrayElement(array_type, true)),
-      storage, index, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
+                        storage, index);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStoreTypedElement(Node* node, Node* effect,
-                                                Node* control) {
+void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
   ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
   Node* buffer = node->InputAt(0);
   Node* base = node->InputAt(1);
@@ -3248,34 +2442,25 @@
 
   // We need to keep the {buffer} alive so that the GC will not release the
   // ArrayBuffer (if there's any) as long as we are still operating on it.
-  effect = graph()->NewNode(common()->Retain(), buffer, effect);
+  __ Retain(buffer);
 
-  // Compute the effective storage pointer.
-  Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
-                                            external, effect, control);
+  // Compute the effective storage pointer, handling the case where the
+  // {external} pointer is the effective storage pointer (i.e. the {base}
+  // is Smi zero).
+  Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
+                                                             base, external);
 
   // Perform the actual typed element access.
-  effect = graph()->NewNode(
-      simplified()->StoreElement(
-          AccessBuilder::ForTypedArrayElement(array_type, true)),
-      storage, index, value, effect, control);
-
-  return ValueEffectControl(nullptr, effect, control);
+  __ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
+                  storage, index, value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundUp(Node* node, Node* effect,
-                                             Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
   // Nothing to be done if a fast hardware instruction is available.
   if (machine()->Float64RoundUp().IsSupported()) {
-    return ValueEffectControl(node, effect, control);
+    return Nothing<Node*>();
   }
 
-  Node* const one = jsgraph()->Float64Constant(1.0);
-  Node* const zero = jsgraph()->Float64Constant(0.0);
-  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
   Node* const input = node->InputAt(0);
 
   // General case for ceil.
@@ -3300,251 +2485,169 @@
   //         let temp2 = (2^52 + temp1) - 2^52 in
   //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
   //         -0 - temp3
+
+  auto if_not_positive = __ MakeDeferredLabel<1>();
+  auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+  auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+  auto if_zero = __ MakeDeferredLabel<1>();
+  auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+  auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
+
+  Node* const zero = __ Float64Constant(0.0);
+  Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+  Node* const one = __ Float64Constant(1.0);
+
+  Node* check0 = __ Float64LessThan(zero, input);
+  __ GotoUnless(check0, &if_not_positive);
+  {
+    Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+    __ GotoIf(check1, &if_greater_than_two_52);
+    {
+      Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+      __ GotoUnless(__ Float64LessThan(temp1, input), &done, temp1);
+      __ Goto(&done, __ Float64Add(temp1, one));
+    }
+
+    __ Bind(&if_greater_than_two_52);
+    __ Goto(&done, input);
+  }
+
+  __ Bind(&if_not_positive);
+  {
+    Node* check1 = __ Float64Equal(input, zero);
+    __ GotoIf(check1, &if_zero);
+
+    Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+    Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+    __ GotoIf(check2, &if_less_than_minus_two_52);
+
+    {
+      Node* const minus_zero = __ Float64Constant(-0.0);
+      Node* temp1 = __ Float64Sub(minus_zero, input);
+      Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+      Node* check3 = __ Float64LessThan(temp1, temp2);
+      __ GotoUnless(check3, &done_temp3, temp2);
+      __ Goto(&done_temp3, __ Float64Sub(temp2, one));
+
+      __ Bind(&done_temp3);
+      Node* temp3 = done_temp3.PhiAt(0);
+      __ Goto(&done, __ Float64Sub(minus_zero, temp3));
+    }
+    __ Bind(&if_less_than_minus_two_52);
+    __ Goto(&done, input);
+
+    __ Bind(&if_zero);
+    __ Goto(&done, input);
+  }
+  __ Bind(&done);
+  return Just(done.PhiAt(0));
+}
+
+Node* EffectControlLinearizer::BuildFloat64RoundDown(Node* value) {
+  Node* round_down = __ Float64RoundDown(value);
+  if (round_down != nullptr) {
+    return round_down;
+  }
+
+  Node* const input = value;
+
+  // General case for floor.
   //
-  // Note: We do not use the Diamond helper class here, because it really hurts
-  // readability with nested diamonds.
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if input < temp1 then
+  //         temp1 - 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         if temp2 < temp1 then
+  //           -1 - temp2
+  //         else
+  //           -0 - temp2
 
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  auto if_not_positive = __ MakeDeferredLabel<1>();
+  auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+  auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+  auto if_temp2_lt_temp1 = __ MakeLabel<1>();
+  auto if_zero = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<7>(MachineRepresentation::kFloat64);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0;
+  Node* const zero = __ Float64Constant(0.0);
+  Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+
+  Node* check0 = __ Float64LessThan(zero, input);
+  __ GotoUnless(check0, &if_not_positive);
   {
-    Node* check1 =
-        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
+    Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+    __ GotoIf(check1, &if_greater_than_two_52);
     {
-      Node* temp1 = graph()->NewNode(
-          machine()->Float64Sub(),
-          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-      vfalse1 = graph()->NewNode(
-          common()->Select(MachineRepresentation::kFloat64),
-          graph()->NewNode(machine()->Float64LessThan(), temp1, input),
-          graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
+      Node* const one = __ Float64Constant(1.0);
+      Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+      __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
+      __ Goto(&done, __ Float64Sub(temp1, one));
     }
 
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                              vtrue1, vfalse1, if_true0);
+    __ Bind(&if_greater_than_two_52);
+    __ Goto(&done, input);
   }
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
+  __ Bind(&if_not_positive);
   {
-    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
+    Node* check1 = __ Float64Equal(input, zero);
+    __ GotoIf(check1, &if_zero);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
+    Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+    Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+    __ GotoIf(check2, &if_less_than_minus_two_52);
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
     {
-      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                      input, minus_two_52);
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_false1);
+      Node* const minus_zero = __ Float64Constant(-0.0);
+      Node* temp1 = __ Float64Sub(minus_zero, input);
+      Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+      Node* check3 = __ Float64LessThan(temp2, temp1);
+      __ GotoIf(check3, &if_temp2_lt_temp1);
+      __ Goto(&done, __ Float64Sub(minus_zero, temp2));
 
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = input;
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2;
-      {
-        Node* temp1 =
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-        Node* temp2 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-        Node* temp3 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
-            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
-        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
-      }
-
-      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      vfalse1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue2, vfalse2, if_false1);
+      __ Bind(&if_temp2_lt_temp1);
+      __ Goto(&done, __ Float64Sub(__ Float64Constant(-1.0), temp2));
     }
+    __ Bind(&if_less_than_minus_two_52);
+    __ Goto(&done, input);
 
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
+    __ Bind(&if_zero);
+    __ Goto(&done, input);
   }
-
-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                       vtrue0, vfalse0, merge0);
-  return ValueEffectControl(value, effect, merge0);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildFloat64RoundDown(Node* value, Node* effect,
-                                               Node* control) {
-  if (machine()->Float64RoundDown().IsSupported()) {
-    value = graph()->NewNode(machine()->Float64RoundDown().op(), value);
-  } else {
-    Node* const one = jsgraph()->Float64Constant(1.0);
-    Node* const zero = jsgraph()->Float64Constant(0.0);
-    Node* const minus_one = jsgraph()->Float64Constant(-1.0);
-    Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-    Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-    Node* const minus_two_52 =
-        jsgraph()->Float64Constant(-4503599627370496.0E0);
-    Node* const input = value;
-
-    // General case for floor.
-    //
-    //   if 0.0 < input then
-    //     if 2^52 <= input then
-    //       input
-    //     else
-    //       let temp1 = (2^52 + input) - 2^52 in
-    //       if input < temp1 then
-    //         temp1 - 1
-    //       else
-    //         temp1
-    //   else
-    //     if input == 0 then
-    //       input
-    //     else
-    //       if input <= -2^52 then
-    //         input
-    //       else
-    //         let temp1 = -0 - input in
-    //         let temp2 = (2^52 + temp1) - 2^52 in
-    //         if temp2 < temp1 then
-    //           -1 - temp2
-    //         else
-    //           -0 - temp2
-    //
-    // Note: We do not use the Diamond helper class here, because it really
-    // hurts
-    // readability with nested diamonds.
-
-    Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-    Node* branch0 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
-    Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-    Node* vtrue0;
-    {
-      Node* check1 =
-          graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-      Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-      Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-      Node* vtrue1 = input;
-
-      Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-      Node* vfalse1;
-      {
-        Node* temp1 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-        vfalse1 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), input, temp1),
-            graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
-      }
-
-      if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-      vtrue0 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue1, vfalse1, if_true0);
-    }
-
-    Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-    Node* vfalse0;
-    {
-      Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-      Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check1, if_false0);
-
-      Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-      Node* vtrue1 = input;
-
-      Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-      Node* vfalse1;
-      {
-        Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                        input, minus_two_52);
-        Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                         check2, if_false1);
-
-        Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-        Node* vtrue2 = input;
-
-        Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-        Node* vfalse2;
-        {
-          Node* temp1 =
-              graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-          Node* temp2 = graph()->NewNode(
-              machine()->Float64Sub(),
-              graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-          vfalse2 = graph()->NewNode(
-              common()->Select(MachineRepresentation::kFloat64),
-              graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
-              graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
-              graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
-        }
-
-        if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-        vfalse1 =
-            graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                             vtrue2, vfalse2, if_false1);
-      }
-
-      if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-      vfalse0 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue1, vfalse1, if_false0);
-    }
-
-    control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-    value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                             vtrue0, vfalse0, control);
-  }
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
-                                               Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundDown(Node* node) {
   // Nothing to be done if a fast hardware instruction is available.
   if (machine()->Float64RoundDown().IsSupported()) {
-    return ValueEffectControl(node, effect, control);
+    return Nothing<Node*>();
   }
 
   Node* const input = node->InputAt(0);
-  return BuildFloat64RoundDown(input, effect, control);
+  return Just(BuildFloat64RoundDown(input));
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node, Node* effect,
-                                                   Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node) {
   // Nothing to be done if a fast hardware instruction is available.
   if (machine()->Float64RoundTiesEven().IsSupported()) {
-    return ValueEffectControl(node, effect, control);
+    return Nothing<Node*>();
   }
 
-  Node* const one = jsgraph()->Float64Constant(1.0);
-  Node* const two = jsgraph()->Float64Constant(2.0);
-  Node* const half = jsgraph()->Float64Constant(0.5);
-  Node* const zero = jsgraph()->Float64Constant(0.0);
   Node* const input = node->InputAt(0);
 
   // Generate case for round ties to even:
@@ -3561,79 +2664,38 @@
   //       value
   //     else
   //       value + 1.0
-  //
-  // Note: We do not use the Diamond helper class here, because it really hurts
-  // readability with nested diamonds.
 
-  ValueEffectControl continuation =
-      BuildFloat64RoundDown(input, effect, control);
-  Node* value = continuation.value;
-  effect = continuation.effect;
-  control = continuation.control;
+  auto if_is_half = __ MakeLabel<1>();
+  auto done = __ MakeLabel<4>(MachineRepresentation::kFloat64);
 
-  Node* temp1 = graph()->NewNode(machine()->Float64Sub(), input, value);
+  Node* value = BuildFloat64RoundDown(input);
+  Node* temp1 = __ Float64Sub(input, value);
 
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), temp1, half);
-  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+  Node* const half = __ Float64Constant(0.5);
+  Node* check0 = __ Float64LessThan(temp1, half);
+  __ GotoIf(check0, &done, value);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0 = value;
+  Node* const one = __ Float64Constant(1.0);
+  Node* check1 = __ Float64LessThan(half, temp1);
+  __ GotoUnless(check1, &if_is_half);
+  __ Goto(&done, __ Float64Add(value, one));
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
-  {
-    Node* check1 = graph()->NewNode(machine()->Float64LessThan(), half, temp1);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+  __ Bind(&if_is_half);
+  Node* temp2 = __ Float64Mod(value, __ Float64Constant(2.0));
+  Node* check2 = __ Float64Equal(temp2, __ Float64Constant(0.0));
+  __ GotoIf(check2, &done, value);
+  __ Goto(&done, __ Float64Add(value, one));
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = graph()->NewNode(machine()->Float64Add(), value, one);
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      Node* temp2 = graph()->NewNode(machine()->Float64Mod(), value, two);
-
-      Node* check2 = graph()->NewNode(machine()->Float64Equal(), temp2, zero);
-      Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = value;
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2 = graph()->NewNode(machine()->Float64Add(), value, one);
-
-      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      vfalse1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue2, vfalse2, if_false1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue0, vfalse0, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return Just(done.PhiAt(0));
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node, Node* effect,
-                                                   Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
   // Nothing to be done if a fast hardware instruction is available.
   if (machine()->Float64RoundTruncate().IsSupported()) {
-    return ValueEffectControl(node, effect, control);
+    return Nothing<Node*>();
   }
 
-  Node* const one = jsgraph()->Float64Constant(1.0);
-  Node* const zero = jsgraph()->Float64Constant(0.0);
-  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
   Node* const input = node->InputAt(0);
 
   // General case for trunc.
@@ -3662,92 +2724,65 @@
   // Note: We do not use the Diamond helper class here, because it really hurts
   // readability with nested diamonds.
 
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  auto if_not_positive = __ MakeDeferredLabel<1>();
+  auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+  auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+  auto if_zero = __ MakeDeferredLabel<1>();
+  auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+  auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0;
+  Node* const zero = __ Float64Constant(0.0);
+  Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+  Node* const one = __ Float64Constant(1.0);
+
+  Node* check0 = __ Float64LessThan(zero, input);
+  __ GotoUnless(check0, &if_not_positive);
   {
-    Node* check1 =
-        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
+    Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+    __ GotoIf(check1, &if_greater_than_two_52);
     {
-      Node* temp1 = graph()->NewNode(
-          machine()->Float64Sub(),
-          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-      vfalse1 = graph()->NewNode(
-          common()->Select(MachineRepresentation::kFloat64),
-          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
-          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+      Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+      __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
+      __ Goto(&done, __ Float64Sub(temp1, one));
     }
 
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                              vtrue1, vfalse1, if_true0);
+    __ Bind(&if_greater_than_two_52);
+    __ Goto(&done, input);
   }
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
+  __ Bind(&if_not_positive);
   {
-    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
+    Node* check1 = __ Float64Equal(input, zero);
+    __ GotoIf(check1, &if_zero);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
+    Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+    Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+    __ GotoIf(check2, &if_less_than_minus_two_52);
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
     {
-      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                      input, minus_two_52);
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_false1);
+      Node* const minus_zero = __ Float64Constant(-0.0);
+      Node* temp1 = __ Float64Sub(minus_zero, input);
+      Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+      Node* check3 = __ Float64LessThan(temp1, temp2);
+      __ GotoUnless(check3, &done_temp3, temp2);
+      __ Goto(&done_temp3, __ Float64Sub(temp2, one));
 
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = input;
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2;
-      {
-        Node* temp1 =
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-        Node* temp2 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-        Node* temp3 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
-            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
-        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
-      }
-
-      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      vfalse1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue2, vfalse2, if_false1);
+      __ Bind(&done_temp3);
+      Node* temp3 = done_temp3.PhiAt(0);
+      __ Goto(&done, __ Float64Sub(minus_zero, temp3));
     }
+    __ Bind(&if_less_than_minus_two_52);
+    __ Goto(&done, input);
 
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
+    __ Bind(&if_zero);
+    __ Goto(&done, input);
   }
-
-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                       vtrue0, vfalse0, merge0);
-  return ValueEffectControl(value, effect, merge0);
+  __ Bind(&done);
+  return Just(done.PhiAt(0));
 }
 
+#undef __
+
 Factory* EffectControlLinearizer::factory() const {
   return isolate()->factory();
 }
@@ -3756,18 +2791,6 @@
   return jsgraph()->isolate();
 }
 
-Operator const* EffectControlLinearizer::ToNumberOperator() {
-  if (!to_number_operator_.is_set()) {
-    Callable callable = CodeFactory::ToNumber(isolate());
-    CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), callable.descriptor(), 0, flags,
-        Operator::kEliminatable);
-    to_number_operator_.set(common()->Call(desc));
-  }
-  return to_number_operator_.get();
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/effect-control-linearizer.h b/src/compiler/effect-control-linearizer.h
index 4ed03c6..016d602 100644
--- a/src/compiler/effect-control-linearizer.h
+++ b/src/compiler/effect-control-linearizer.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
 
 #include "src/compiler/common-operator.h"
+#include "src/compiler/graph-assembler.h"
 #include "src/compiler/node.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/globals.h"
@@ -38,174 +39,94 @@
   void ProcessNode(Node* node, Node** frame_state, Node** effect,
                    Node** control);
 
-  struct ValueEffectControl {
-    Node* value;
-    Node* effect;
-    Node* control;
-    ValueEffectControl(Node* value, Node* effect, Node* control)
-        : value(value), effect(effect), control(control) {}
-  };
-
   bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
                             Node** control);
-  ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
-                                            Node* control);
-  ValueEffectControl LowerChangeInt31ToTaggedSigned(Node* node, Node* effect,
-                                                    Node* control);
-  ValueEffectControl LowerChangeInt32ToTagged(Node* node, Node* effect,
-                                              Node* control);
-  ValueEffectControl LowerChangeUint32ToTagged(Node* node, Node* effect,
-                                               Node* control);
-  ValueEffectControl LowerChangeFloat64ToTagged(Node* node, Node* effect,
-                                                Node* control);
-  ValueEffectControl LowerChangeFloat64ToTaggedPointer(Node* node, Node* effect,
-                                                       Node* control);
-  ValueEffectControl LowerChangeTaggedSignedToInt32(Node* node, Node* effect,
-                                                    Node* control);
-  ValueEffectControl LowerChangeTaggedToBit(Node* node, Node* effect,
-                                            Node* control);
-  ValueEffectControl LowerChangeTaggedToInt32(Node* node, Node* effect,
-                                              Node* control);
-  ValueEffectControl LowerChangeTaggedToUint32(Node* node, Node* effect,
-                                               Node* control);
-  ValueEffectControl LowerCheckBounds(Node* node, Node* frame_state,
-                                      Node* effect, Node* control);
-  ValueEffectControl LowerCheckMaps(Node* node, Node* frame_state, Node* effect,
-                                    Node* control);
-  ValueEffectControl LowerCheckNumber(Node* node, Node* frame_state,
-                                      Node* effect, Node* control);
-  ValueEffectControl LowerCheckString(Node* node, Node* frame_state,
-                                      Node* effect, Node* control);
-  ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
-                                  Node* control);
-  ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32Div(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32Mod(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedUint32Div(Node* node, Node* frame_state,
-                                           Node* effect, Node* control);
-  ValueEffectControl LowerCheckedUint32Mod(Node* node, Node* frame_state,
-                                           Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32Mul(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32ToTaggedSigned(Node* node,
-                                                     Node* frame_state,
-                                                     Node* effect,
-                                                     Node* control);
-  ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
-                                               Node* effect, Node* control);
-  ValueEffectControl LowerCheckedUint32ToTaggedSigned(Node* node,
-                                                      Node* frame_state,
-                                                      Node* effect,
-                                                      Node* control);
-  ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
-                                                Node* effect, Node* control);
-  ValueEffectControl LowerCheckedTaggedSignedToInt32(Node* node,
-                                                     Node* frame_state,
-                                                     Node* effect,
-                                                     Node* control);
-  ValueEffectControl LowerCheckedTaggedToInt32(Node* node, Node* frame_state,
-                                               Node* effect, Node* control);
-  ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
-                                                 Node* effect, Node* control);
-  ValueEffectControl LowerCheckedTaggedToTaggedSigned(Node* node,
-                                                      Node* frame_state,
-                                                      Node* effect,
-                                                      Node* control);
-  ValueEffectControl LowerCheckedTaggedToTaggedPointer(Node* node,
-                                                       Node* frame_state,
-                                                       Node* effect,
-                                                       Node* control);
-  ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
-                                                Node* control);
-  ValueEffectControl LowerTruncateTaggedToBit(Node* node, Node* effect,
-                                              Node* control);
-  ValueEffectControl LowerTruncateTaggedToFloat64(Node* node, Node* effect,
-                                                  Node* control);
-  ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerCheckedTruncateTaggedToWord32(Node* node,
-                                                        Node* frame_state,
-                                                        Node* effect,
-                                                        Node* control);
-  ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerObjectIsNumber(Node* node, Node* effect,
-                                         Node* control);
-  ValueEffectControl LowerObjectIsReceiver(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerObjectIsSmi(Node* node, Node* effect, Node* control);
-  ValueEffectControl LowerObjectIsString(Node* node, Node* effect,
-                                         Node* control);
-  ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
-                                               Node* control);
-  ValueEffectControl LowerArrayBufferWasNeutered(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerStringCharCodeAt(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
-                                             Node* control);
-  ValueEffectControl LowerStringFromCodePoint(Node* node, Node* effect,
-                                              Node* control);
-  ValueEffectControl LowerStringEqual(Node* node, Node* effect, Node* control);
-  ValueEffectControl LowerStringLessThan(Node* node, Node* effect,
-                                         Node* control);
-  ValueEffectControl LowerStringLessThanOrEqual(Node* node, Node* effect,
-                                                Node* control);
-  ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
-                                           Node* effect, Node* control);
-  ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerConvertTaggedHoleToUndefined(Node* node, Node* effect,
-                                                       Node* control);
-  ValueEffectControl LowerPlainPrimitiveToNumber(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerPlainPrimitiveToWord32(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
-                                                  Node* control);
-  ValueEffectControl LowerEnsureWritableFastElements(Node* node, Node* effect,
-                                                     Node* control);
-  ValueEffectControl LowerMaybeGrowFastElements(Node* node, Node* frame_state,
-                                                Node* effect, Node* control);
-  ValueEffectControl LowerTransitionElementsKind(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerLoadTypedElement(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerStoreTypedElement(Node* node, Node* effect,
-                                            Node* control);
+  Node* LowerChangeBitToTagged(Node* node);
+  Node* LowerChangeInt31ToTaggedSigned(Node* node);
+  Node* LowerChangeInt32ToTagged(Node* node);
+  Node* LowerChangeUint32ToTagged(Node* node);
+  Node* LowerChangeFloat64ToTagged(Node* node);
+  Node* LowerChangeFloat64ToTaggedPointer(Node* node);
+  Node* LowerChangeTaggedSignedToInt32(Node* node);
+  Node* LowerChangeTaggedToBit(Node* node);
+  Node* LowerChangeTaggedToInt32(Node* node);
+  Node* LowerChangeTaggedToUint32(Node* node);
+  Node* LowerChangeTaggedToTaggedSigned(Node* node);
+  Node* LowerCheckBounds(Node* node, Node* frame_state);
+  Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
+  Node* LowerCheckMaps(Node* node, Node* frame_state);
+  Node* LowerCheckNumber(Node* node, Node* frame_state);
+  Node* LowerCheckReceiver(Node* node, Node* frame_state);
+  Node* LowerCheckString(Node* node, Node* frame_state);
+  Node* LowerCheckIf(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Mod(Node* node, Node* frame_state);
+  Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
+  Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
+  Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
+  Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
+  Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
+  Node* LowerChangeTaggedToFloat64(Node* node);
+  Node* LowerTruncateTaggedToBit(Node* node);
+  Node* LowerTruncateTaggedToFloat64(Node* node);
+  Node* LowerTruncateTaggedToWord32(Node* node);
+  Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
+  Node* LowerObjectIsDetectableCallable(Node* node);
+  Node* LowerObjectIsNonCallable(Node* node);
+  Node* LowerObjectIsNumber(Node* node);
+  Node* LowerObjectIsReceiver(Node* node);
+  Node* LowerObjectIsSmi(Node* node);
+  Node* LowerObjectIsString(Node* node);
+  Node* LowerObjectIsUndetectable(Node* node);
+  Node* LowerNewRestParameterElements(Node* node);
+  Node* LowerNewUnmappedArgumentsElements(Node* node);
+  Node* LowerArrayBufferWasNeutered(Node* node);
+  Node* LowerStringCharAt(Node* node);
+  Node* LowerStringCharCodeAt(Node* node);
+  Node* LowerStringFromCharCode(Node* node);
+  Node* LowerStringFromCodePoint(Node* node);
+  Node* LowerStringIndexOf(Node* node);
+  Node* LowerStringEqual(Node* node);
+  Node* LowerStringLessThan(Node* node);
+  Node* LowerStringLessThanOrEqual(Node* node);
+  Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
+  Node* LowerCheckTaggedHole(Node* node, Node* frame_state);
+  Node* LowerConvertTaggedHoleToUndefined(Node* node);
+  Node* LowerPlainPrimitiveToNumber(Node* node);
+  Node* LowerPlainPrimitiveToWord32(Node* node);
+  Node* LowerPlainPrimitiveToFloat64(Node* node);
+  Node* LowerEnsureWritableFastElements(Node* node);
+  Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
+  void LowerTransitionElementsKind(Node* node);
+  Node* LowerLoadTypedElement(Node* node);
+  void LowerStoreTypedElement(Node* node);
 
   // Lowering of optional operators.
-  ValueEffectControl LowerFloat64RoundUp(Node* node, Node* effect,
-                                         Node* control);
-  ValueEffectControl LowerFloat64RoundDown(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerFloat64RoundTiesEven(Node* node, Node* effect,
-                                               Node* control);
-  ValueEffectControl LowerFloat64RoundTruncate(Node* node, Node* effect,
-                                               Node* control);
+  Maybe<Node*> LowerFloat64RoundUp(Node* node);
+  Maybe<Node*> LowerFloat64RoundDown(Node* node);
+  Maybe<Node*> LowerFloat64RoundTiesEven(Node* node);
+  Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
 
-  ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
-                                                Node* value, Node* frame_state,
-                                                Node* effect, Node* control);
-  ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
-      CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
-      Node* control);
-  ValueEffectControl BuildFloat64RoundDown(Node* value, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerStringComparison(Callable const& callable, Node* node,
-                                           Node* effect, Node* control);
+  Node* AllocateHeapNumberWithValue(Node* node);
+  Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode, Node* value,
+                                   Node* frame_state);
+  Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
+                                                 Node* value,
+                                                 Node* frame_state);
+  Node* BuildFloat64RoundDown(Node* value);
+  Node* LowerStringComparison(Callable const& callable, Node* node);
 
   Node* ChangeInt32ToSmi(Node* value);
   Node* ChangeUint32ToSmi(Node* value);
-  Node* ChangeInt32ToFloat64(Node* value);
-  Node* ChangeUint32ToFloat64(Node* value);
   Node* ChangeSmiToInt32(Node* value);
   Node* ObjectIsSmi(Node* value);
 
@@ -222,15 +143,14 @@
   SimplifiedOperatorBuilder* simplified() const;
   MachineOperatorBuilder* machine() const;
 
-  Operator const* ToNumberOperator();
+  GraphAssembler* gasm() { return &graph_assembler_; }
 
   JSGraph* js_graph_;
   Schedule* schedule_;
   Zone* temp_zone_;
   RegionObservability region_observability_ = RegionObservability::kObservable;
   SourcePositionTable* source_positions_;
-
-  SetOncePointer<Operator const> to_number_operator_;
+  GraphAssembler graph_assembler_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
index f7708f8..c05092e 100644
--- a/src/compiler/escape-analysis-reducer.cc
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -31,7 +31,7 @@
       fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
       exists_virtual_allocate_(escape_analysis->ExistsVirtualAllocate()) {}
 
-Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+Reduction EscapeAnalysisReducer::ReduceNode(Node* node) {
   if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
       fully_reduced_.Contains(node->id())) {
     return NoChange();
@@ -61,8 +61,7 @@
         break;
       }
       bool depends_on_object_state = false;
-      for (int i = 0; i < node->InputCount(); i++) {
-        Node* input = node->InputAt(i);
+      for (Node* input : node->inputs()) {
         switch (input->opcode()) {
           case IrOpcode::kAllocate:
           case IrOpcode::kFinishRegion:
@@ -97,9 +96,18 @@
   return NoChange();
 }
 
+Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+  Reduction reduction = ReduceNode(node);
+  if (reduction.Changed() && node != reduction.replacement()) {
+    escape_analysis()->SetReplacement(node, reduction.replacement());
+  }
+  return reduction;
+}
+
 namespace {
 
-Node* MaybeGuard(JSGraph* jsgraph, Node* original, Node* replacement) {
+Node* MaybeGuard(JSGraph* jsgraph, Zone* zone, Node* original,
+                 Node* replacement) {
   // We might need to guard the replacement if the type of the {replacement}
   // node is not in a sub-type relation to the type of the the {original} node.
   Type* const replacement_type = NodeProperties::GetType(replacement);
@@ -108,10 +116,18 @@
     Node* const control = NodeProperties::GetControlInput(original);
     replacement = jsgraph->graph()->NewNode(
         jsgraph->common()->TypeGuard(original_type), replacement, control);
+    NodeProperties::SetType(replacement, original_type);
   }
   return replacement;
 }
 
+Node* SkipTypeGuards(Node* node) {
+  while (node->opcode() == IrOpcode::kTypeGuard) {
+    node = NodeProperties::GetValueInput(node, 0);
+  }
+  return node;
+}
+
 }  // namespace
 
 Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
@@ -120,12 +136,12 @@
   if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
     fully_reduced_.Add(node->id());
   }
-  if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+  if (escape_analysis()->IsVirtual(
+          SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
     if (Node* rep = escape_analysis()->GetReplacement(node)) {
-      isolate()->counters()->turbo_escape_loads_replaced()->Increment();
       TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
             node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
-      rep = MaybeGuard(jsgraph(), node, rep);
+      rep = MaybeGuard(jsgraph(), zone(), node, rep);
       ReplaceWithValue(node, rep);
       return Replace(rep);
     }
@@ -140,7 +156,8 @@
   if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
     fully_reduced_.Add(node->id());
   }
-  if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+  if (escape_analysis()->IsVirtual(
+          SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
     TRACE("Removed #%d (%s) from effect chain\n", node->id(),
           node->op()->mnemonic());
     RelaxEffectsAndControls(node);
@@ -157,7 +174,6 @@
   }
   if (escape_analysis()->IsVirtual(node)) {
     RelaxEffectsAndControls(node);
-    isolate()->counters()->turbo_escape_allocs_replaced()->Increment();
     TRACE("Removed allocate #%d from effect chain\n", node->id());
     return Changed(node);
   }
@@ -195,14 +211,14 @@
 
 Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kReferenceEqual);
-  Node* left = NodeProperties::GetValueInput(node, 0);
-  Node* right = NodeProperties::GetValueInput(node, 1);
+  Node* left = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
+  Node* right = SkipTypeGuards(NodeProperties::GetValueInput(node, 1));
   if (escape_analysis()->IsVirtual(left)) {
     if (escape_analysis()->IsVirtual(right) &&
         escape_analysis()->CompareVirtualObjects(left, right)) {
       ReplaceWithValue(node, jsgraph()->TrueConstant());
       TRACE("Replaced ref eq #%d with true\n", node->id());
-      Replace(jsgraph()->TrueConstant());
+      return Replace(jsgraph()->TrueConstant());
     }
     // Right-hand side is not a virtual object, or a different one.
     ReplaceWithValue(node, jsgraph()->FalseConstant());
@@ -220,7 +236,7 @@
 
 Reduction EscapeAnalysisReducer::ReduceObjectIsSmi(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kObjectIsSmi);
-  Node* input = NodeProperties::GetValueInput(node, 0);
+  Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
   if (escape_analysis()->IsVirtual(input)) {
     ReplaceWithValue(node, jsgraph()->FalseConstant());
     TRACE("Replaced ObjectIsSmi #%d with false\n", node->id());
@@ -313,7 +329,7 @@
                                                    bool node_multiused,
                                                    bool already_cloned,
                                                    bool multiple_users) {
-  Node* input = NodeProperties::GetValueInput(node, node_index);
+  Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, node_index));
   if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
       fully_reduced_.Contains(node->id())) {
     return nullptr;
@@ -364,8 +380,6 @@
 #endif  // DEBUG
 }
 
-Isolate* EscapeAnalysisReducer::isolate() const { return jsgraph_->isolate(); }
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/escape-analysis-reducer.h b/src/compiler/escape-analysis-reducer.h
index 61e7607..01c2ae1 100644
--- a/src/compiler/escape-analysis-reducer.h
+++ b/src/compiler/escape-analysis-reducer.h
@@ -33,6 +33,7 @@
   bool compilation_failed() const { return compilation_failed_; }
 
  private:
+  Reduction ReduceNode(Node* node);
   Reduction ReduceLoad(Node* node);
   Reduction ReduceStore(Node* node);
   Reduction ReduceAllocate(Node* node);
@@ -48,7 +49,6 @@
   JSGraph* jsgraph() const { return jsgraph_; }
   EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
   Zone* zone() const { return zone_; }
-  Isolate* isolate() const;
 
   JSGraph* const jsgraph_;
   EscapeAnalysis* escape_analysis_;
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index 0218045..255e74e 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -12,6 +12,7 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
@@ -201,7 +202,7 @@
   }
   bool UpdateFrom(const VirtualObject& other);
   bool MergeFrom(MergeCache* cache, Node* at, Graph* graph,
-                 CommonOperatorBuilder* common);
+                 CommonOperatorBuilder* common, bool initialMerge);
   void SetObjectState(Node* node) { object_state_ = node; }
   Node* GetObjectState() const { return object_state_; }
   bool IsCopyRequired() const { return status_ & kCopyRequired; }
@@ -252,10 +253,14 @@
 class VirtualState : public ZoneObject {
  public:
   VirtualState(Node* owner, Zone* zone, size_t size)
-      : info_(size, nullptr, zone), owner_(owner) {}
+      : info_(size, nullptr, zone),
+        initialized_(static_cast<int>(size), zone),
+        owner_(owner) {}
 
   VirtualState(Node* owner, const VirtualState& state)
       : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+        initialized_(state.initialized_.length(),
+                     state.info_.get_allocator().zone()),
         owner_(owner) {
     for (size_t i = 0; i < info_.size(); ++i) {
       if (state.info_[i]) {
@@ -280,6 +285,7 @@
 
  private:
   ZoneVector<VirtualObject*> info_;
+  BitVector initialized_;
   Node* owner_;
 
   DISALLOW_COPY_AND_ASSIGN(VirtualState);
@@ -375,6 +381,7 @@
 
 void VirtualState::SetVirtualObject(Alias alias, VirtualObject* obj) {
   info_[alias] = obj;
+  if (obj) initialized_.Add(alias);
 }
 
 bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
@@ -431,7 +438,6 @@
   }
   return true;
 }
-
 }  // namespace
 
 bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
@@ -440,12 +446,21 @@
   int value_input_count = static_cast<int>(cache->fields().size());
   Node* rep = GetField(i);
   if (!rep || !IsCreatedPhi(i)) {
+    Type* phi_type = Type::None();
+    for (Node* input : cache->fields()) {
+      CHECK_NOT_NULL(input);
+      CHECK(!input->IsDead());
+      Type* input_type = NodeProperties::GetType(input);
+      phi_type = Type::Union(phi_type, input_type, graph->zone());
+    }
     Node* control = NodeProperties::GetControlInput(at);
     cache->fields().push_back(control);
     Node* phi = graph->NewNode(
         common->Phi(MachineRepresentation::kTagged, value_input_count),
         value_input_count + 1, &cache->fields().front());
+    NodeProperties::SetType(phi, phi_type);
     SetField(i, phi, true);
+
 #ifdef DEBUG
     if (FLAG_trace_turbo_escape) {
       PrintF("    Creating Phi #%d as merge of", phi->id());
@@ -471,12 +486,15 @@
 }
 
 bool VirtualObject::MergeFrom(MergeCache* cache, Node* at, Graph* graph,
-                              CommonOperatorBuilder* common) {
+                              CommonOperatorBuilder* common,
+                              bool initialMerge) {
   DCHECK(at->opcode() == IrOpcode::kEffectPhi ||
          at->opcode() == IrOpcode::kPhi);
   bool changed = false;
   for (size_t i = 0; i < field_count(); ++i) {
-    if (Node* field = cache->GetFields(i)) {
+    if (!initialMerge && GetField(i) == nullptr) continue;
+    Node* field = cache->GetFields(i);
+    if (field && !IsCreatedPhi(i)) {
       changed = changed || GetField(i) != field;
       SetField(i, field);
       TRACE("    Field %zu agree on rep #%d\n", i, field->id());
@@ -516,8 +534,11 @@
         fields = std::min(obj->field_count(), fields);
       }
     }
-    if (cache->objects().size() == cache->states().size()) {
+    if (cache->objects().size() == cache->states().size() &&
+        (mergeObject || !initialized_.Contains(alias))) {
+      bool initialMerge = false;
       if (!mergeObject) {
+        initialMerge = true;
         VirtualObject* obj = new (zone)
             VirtualObject(cache->objects().front()->id(), this, zone, fields,
                           cache->objects().front()->IsInitialized());
@@ -542,7 +563,9 @@
         PrintF("\n");
       }
 #endif  // DEBUG
-      changed = mergeObject->MergeFrom(cache, at, graph, common) || changed;
+      changed =
+          mergeObject->MergeFrom(cache, at, graph, common, initialMerge) ||
+          changed;
     } else {
       if (mergeObject) {
         TRACE("  Alias %d, virtual object removed\n", alias);
@@ -671,6 +694,15 @@
           RevisitInputs(rep);
           RevisitUses(rep);
         }
+      } else {
+        Node* from = NodeProperties::GetValueInput(node, 0);
+        from = object_analysis_->ResolveReplacement(from);
+        if (SetEscaped(from)) {
+          TRACE("Setting #%d (%s) to escaped because of unresolved load #%i\n",
+                from->id(), from->op()->mnemonic(), node->id());
+          RevisitInputs(from);
+          RevisitUses(from);
+        }
       }
       RevisitUses(node);
       break;
@@ -795,6 +827,7 @@
       case IrOpcode::kSelect:
       // TODO(mstarzinger): The following list of operators will eventually be
       // handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
+      case IrOpcode::kConvertTaggedHoleToUndefined:
       case IrOpcode::kStringEqual:
       case IrOpcode::kStringLessThan:
       case IrOpcode::kStringLessThanOrEqual:
@@ -802,8 +835,11 @@
       case IrOpcode::kPlainPrimitiveToNumber:
       case IrOpcode::kPlainPrimitiveToWord32:
       case IrOpcode::kPlainPrimitiveToFloat64:
+      case IrOpcode::kStringCharAt:
       case IrOpcode::kStringCharCodeAt:
-      case IrOpcode::kObjectIsCallable:
+      case IrOpcode::kStringIndexOf:
+      case IrOpcode::kObjectIsDetectableCallable:
+      case IrOpcode::kObjectIsNonCallable:
       case IrOpcode::kObjectIsNumber:
       case IrOpcode::kObjectIsReceiver:
       case IrOpcode::kObjectIsString:
@@ -819,9 +855,9 @@
         if (use->op()->EffectInputCount() == 0 &&
             uses->op()->EffectInputCount() > 0 &&
             !IrOpcode::IsJsOpcode(use->opcode())) {
-          TRACE("Encountered unaccounted use by #%d (%s)\n", use->id(),
-                use->op()->mnemonic());
-          UNREACHABLE();
+          V8_Fatal(__FILE__, __LINE__,
+                   "Encountered unaccounted use by #%d (%s)\n", use->id(),
+                   use->op()->mnemonic());
         }
         if (SetEscaped(rep)) {
           TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
@@ -842,6 +878,7 @@
   }
   if (CheckUsesForEscape(node, true)) {
     RevisitInputs(node);
+    RevisitUses(node);
   }
 }
 
@@ -863,11 +900,15 @@
       virtual_states_(zone),
       replacements_(zone),
       cycle_detection_(zone),
-      cache_(nullptr) {}
+      cache_(nullptr) {
+  // Type slot_not_analyzed_ manually.
+  double v = OpParameter<double>(slot_not_analyzed_);
+  NodeProperties::SetType(slot_not_analyzed_, Type::Range(v, v, zone));
+}
 
 EscapeAnalysis::~EscapeAnalysis() {}
 
-void EscapeAnalysis::Run() {
+bool EscapeAnalysis::Run() {
   replacements_.resize(graph()->NodeCount());
   status_analysis_->AssignAliases();
   if (status_analysis_->AliasCount() > 0) {
@@ -876,6 +917,9 @@
     status_analysis_->ResizeStatusVector();
     RunObjectAnalysis();
     status_analysis_->RunStatusAnalysis();
+    return true;
+  } else {
+    return false;
   }
 }
 
@@ -966,6 +1010,7 @@
           // VirtualObjects, and we want to delay phis to improve performance.
           if (use->opcode() == IrOpcode::kEffectPhi) {
             if (!status_analysis_->IsInQueue(use->id())) {
+              status_analysis_->SetInQueue(use->id(), true);
               queue.push_front(use);
             }
           } else if ((use->opcode() != IrOpcode::kLoadField &&
@@ -1044,6 +1089,19 @@
   return false;
 }
 
+namespace {
+
+bool HasFrameStateInput(const Operator* op) {
+  if (op->opcode() == IrOpcode::kCall || op->opcode() == IrOpcode::kTailCall) {
+    const CallDescriptor* d = CallDescriptorOf(op);
+    return d->NeedsFrameState();
+  } else {
+    return OperatorProperties::HasFrameStateInput(op);
+  }
+}
+
+}  // namespace
+
 bool EscapeAnalysis::Process(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kAllocate:
@@ -1080,6 +1138,9 @@
       ProcessAllocationUsers(node);
       break;
   }
+  if (HasFrameStateInput(node->op())) {
+    virtual_states_[node->id()]->SetCopyRequired();
+  }
   return true;
 }
 
@@ -1173,8 +1234,7 @@
           static_cast<void*>(virtual_states_[effect->id()]),
           effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
           node->id());
-    if (status_analysis_->IsEffectBranchPoint(effect) ||
-        OperatorProperties::HasFrameStateInput(node->op())) {
+    if (status_analysis_->IsEffectBranchPoint(effect)) {
       virtual_states_[node->id()]->SetCopyRequired();
       TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
             effect->id());
@@ -1393,10 +1453,16 @@
       Node* rep = replacement(load);
       if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
         int value_input_count = static_cast<int>(cache_->fields().size());
+        Type* phi_type = Type::None();
+        for (Node* input : cache_->fields()) {
+          Type* input_type = NodeProperties::GetType(input);
+          phi_type = Type::Union(phi_type, input_type, graph()->zone());
+        }
         cache_->fields().push_back(NodeProperties::GetControlInput(from));
         Node* phi = graph()->NewNode(
             common()->Phi(MachineRepresentation::kTagged, value_input_count),
             value_input_count + 1, &cache_->fields().front());
+        NodeProperties::SetType(phi, phi_type);
         status_analysis_->ResizeStatusVector();
         SetReplacement(load, phi);
         TRACE(" got phi created.\n");
@@ -1583,13 +1649,14 @@
         cache_->fields().clear();
         for (size_t i = 0; i < vobj->field_count(); ++i) {
           if (Node* field = vobj->GetField(i)) {
-            cache_->fields().push_back(field);
+            cache_->fields().push_back(ResolveReplacement(field));
           }
         }
         int input_count = static_cast<int>(cache_->fields().size());
         Node* new_object_state =
             graph()->NewNode(common()->ObjectState(input_count), input_count,
                              &cache_->fields().front());
+        NodeProperties::SetType(new_object_state, Type::OtherInternal());
         vobj->SetObjectState(new_object_state);
         TRACE(
             "Creating object state #%d for vobj %p (from node #%d) at effect "
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index b85efe7..52edc4b 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -26,15 +26,17 @@
   EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
   ~EscapeAnalysis();
 
-  void Run();
+  bool Run();
 
   Node* GetReplacement(Node* node);
+  Node* ResolveReplacement(Node* node);
   bool IsVirtual(Node* node);
   bool IsEscaped(Node* node);
   bool CompareVirtualObjects(Node* left, Node* right);
   Node* GetOrCreateObjectState(Node* effect, Node* node);
   bool IsCyclicObjectState(Node* effect, Node* node);
   bool ExistsVirtualAllocate();
+  bool SetReplacement(Node* node, Node* rep);
 
  private:
   void RunObjectAnalysis();
@@ -58,8 +60,6 @@
                                        Node* node);
 
   Node* replacement(Node* node);
-  Node* ResolveReplacement(Node* node);
-  bool SetReplacement(Node* node, Node* rep);
   bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
 
   VirtualObject* GetVirtualObject(VirtualState* state, Node* node);
diff --git a/src/compiler/frame-elider.cc b/src/compiler/frame-elider.cc
index bb17d12..35d292b 100644
--- a/src/compiler/frame-elider.cc
+++ b/src/compiler/frame-elider.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/base/adapters.h"
 #include "src/compiler/frame-elider.h"
 
+#include "src/base/adapters.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
@@ -114,13 +115,36 @@
     }
   }
 
-  // Propagate towards start ("upwards") if there are successors and all of
-  // them need a frame.
-  for (RpoNumber& succ : block->successors()) {
-    if (!InstructionBlockAt(succ)->needs_frame()) return false;
+  // Propagate towards start ("upwards")
+  bool need_frame_successors = false;
+  if (block->SuccessorCount() == 1) {
+    // For single successors, propagate the needs_frame information.
+    need_frame_successors =
+        InstructionBlockAt(block->successors()[0])->needs_frame();
+  } else {
+    // For multiple successors, each successor must only have a single
+    // predecessor (because the graph is in edge-split form), so each successor
+    // can independently create/dismantle a frame if needed. Given this
+    // independent control, only propagate needs_frame if all non-deferred
+    // blocks need a frame.
+    for (RpoNumber& succ : block->successors()) {
+      InstructionBlock* successor_block = InstructionBlockAt(succ);
+      DCHECK_EQ(1, successor_block->PredecessorCount());
+      if (!successor_block->IsDeferred()) {
+        if (successor_block->needs_frame()) {
+          need_frame_successors = true;
+        } else {
+          return false;
+        }
+      }
+    }
   }
-  block->mark_needs_frame();
-  return true;
+  if (need_frame_successors) {
+    block->mark_needs_frame();
+    return true;
+  } else {
+    return false;
+  }
 }
 
 
diff --git a/src/compiler/frame-states.cc b/src/compiler/frame-states.cc
index a02fb01..ec014da 100644
--- a/src/compiler/frame-states.cc
+++ b/src/compiler/frame-states.cc
@@ -6,6 +6,7 @@
 
 #include "src/base/functional.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index 8d463df..a4d6829 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -113,9 +113,9 @@
 
   int AllocateSpillSlot(int width) {
     int frame_slot_count_before = frame_slot_count_;
-    int slot = AllocateAlignedFrameSlot(width);
-    spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
-    return slot;
+    AllocateAlignedFrameSlots(width);
+    spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
+    return frame_slot_count_ - 1;
   }
 
   int AlignFrame(int alignment = kDoubleSize);
@@ -131,23 +131,15 @@
   static const int kJSFunctionSlot = 3 + StandardFrameConstants::kCPSlotCount;
 
  private:
-  int AllocateAlignedFrameSlot(int width) {
-    DCHECK(width == 4 || width == 8 || width == 16);
-    if (kPointerSize == 4) {
-      // Skip one slot if necessary.
-      if (width > kPointerSize) {
-        frame_slot_count_++;
-        frame_slot_count_ |= 1;
-        // 2 extra slots if width == 16.
-        frame_slot_count_ += (width & 16) / 8;
-      }
-    } else {
-      // No alignment when slots are 8 bytes.
-      DCHECK_EQ(8, kPointerSize);
-      // 1 extra slot if width == 16.
-      frame_slot_count_ += (width & 16) / 16;
-    }
-    return frame_slot_count_++;
+  void AllocateAlignedFrameSlots(int width) {
+    DCHECK_LT(0, width);
+    int new_frame_slots = (width + kPointerSize - 1) / kPointerSize;
+    // Align to 8 bytes if width is a multiple of 8 bytes, and to 16 bytes if
+    // multiple of 16.
+    int align_to = (width & 15) == 0 ? 16 : (width & 7) == 0 ? 8 : kPointerSize;
+    frame_slot_count_ =
+        RoundUp(frame_slot_count_ + new_frame_slots, align_to / kPointerSize);
+    DCHECK_LT(0, frame_slot_count_);
   }
 
  private:
diff --git a/src/compiler/graph-assembler.cc b/src/compiler/graph-assembler.cc
new file mode 100644
index 0000000..dbeff87
--- /dev/null
+++ b/src/compiler/graph-assembler.cc
@@ -0,0 +1,295 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-assembler.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/linkage.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphAssembler::GraphAssembler(JSGraph* jsgraph, Node* effect, Node* control,
+                               Zone* zone)
+    : temp_zone_(zone),
+      jsgraph_(jsgraph),
+      current_effect_(effect),
+      current_control_(control) {}
+
+Node* GraphAssembler::IntPtrConstant(intptr_t value) {
+  return jsgraph()->IntPtrConstant(value);
+}
+
+Node* GraphAssembler::Int32Constant(int32_t value) {
+  return jsgraph()->Int32Constant(value);
+}
+
+Node* GraphAssembler::UniqueInt32Constant(int32_t value) {
+  return graph()->NewNode(common()->Int32Constant(value));
+}
+
+Node* GraphAssembler::SmiConstant(int32_t value) {
+  return jsgraph()->SmiConstant(value);
+}
+
+Node* GraphAssembler::Uint32Constant(int32_t value) {
+  return jsgraph()->Uint32Constant(value);
+}
+
+Node* GraphAssembler::Float64Constant(double value) {
+  return jsgraph()->Float64Constant(value);
+}
+
+Node* GraphAssembler::HeapConstant(Handle<HeapObject> object) {
+  return jsgraph()->HeapConstant(object);
+}
+
+
+Node* GraphAssembler::ExternalConstant(ExternalReference ref) {
+  return jsgraph()->ExternalConstant(ref);
+}
+
+Node* GraphAssembler::CEntryStubConstant(int result_size) {
+  return jsgraph()->CEntryStubConstant(result_size);
+}
+
+#define SINGLETON_CONST_DEF(Name) \
+  Node* GraphAssembler::Name() { return jsgraph()->Name(); }
+JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DEF)
+#undef SINGLETON_CONST_DEF
+
+#define PURE_UNOP_DEF(Name)                            \
+  Node* GraphAssembler::Name(Node* input) {            \
+    return graph()->NewNode(machine()->Name(), input); \
+  }
+PURE_ASSEMBLER_MACH_UNOP_LIST(PURE_UNOP_DEF)
+#undef PURE_UNOP_DEF
+
+#define PURE_BINOP_DEF(Name)                                 \
+  Node* GraphAssembler::Name(Node* left, Node* right) {      \
+    return graph()->NewNode(machine()->Name(), left, right); \
+  }
+PURE_ASSEMBLER_MACH_BINOP_LIST(PURE_BINOP_DEF)
+#undef PURE_BINOP_DEF
+
+#define CHECKED_BINOP_DEF(Name)                                                \
+  Node* GraphAssembler::Name(Node* left, Node* right) {                        \
+    return graph()->NewNode(machine()->Name(), left, right, current_control_); \
+  }
+CHECKED_ASSEMBLER_MACH_BINOP_LIST(CHECKED_BINOP_DEF)
+#undef CHECKED_BINOP_DEF
+
+Node* GraphAssembler::Float64RoundDown(Node* value) {
+  if (machine()->Float64RoundDown().IsSupported()) {
+    return graph()->NewNode(machine()->Float64RoundDown().op(), value);
+  }
+  return nullptr;
+}
+
+Node* GraphAssembler::Projection(int index, Node* value) {
+  return graph()->NewNode(common()->Projection(index), value, current_control_);
+}
+
+Node* GraphAssembler::Allocate(PretenureFlag pretenure, Node* size) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->Allocate(NOT_TENURED), size,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadField(FieldAccess const& access, Node* object) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->LoadField(access), object,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadElement(ElementAccess const& access, Node* object,
+                                  Node* index) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->LoadElement(access), object, index,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::StoreField(FieldAccess const& access, Node* object,
+                                 Node* value) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->StoreField(access), object, value,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::StoreElement(ElementAccess const& access, Node* object,
+                                   Node* index, Node* value) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->StoreElement(access), object, index,
+                              value, current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
+                            Node* value) {
+  return current_effect_ =
+             graph()->NewNode(machine()->Store(rep), object, offset, value,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Load(MachineType rep, Node* object, Node* offset) {
+  return current_effect_ =
+             graph()->NewNode(machine()->Load(rep), object, offset,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Retain(Node* buffer) {
+  return current_effect_ =
+             graph()->NewNode(common()->Retain(), buffer, current_effect_);
+}
+
+Node* GraphAssembler::UnsafePointerAdd(Node* base, Node* external) {
+  return current_effect_ =
+             graph()->NewNode(machine()->UnsafePointerAdd(), base, external,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::ToNumber(Node* value) {
+  return current_effect_ =
+             graph()->NewNode(ToNumberOperator(), ToNumberBuiltinConstant(),
+                              value, NoContextConstant(), current_effect_);
+}
+
+Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, Node* condition,
+                                   Node* frame_state) {
+  return current_control_ = current_effect_ = graph()->NewNode(
+             common()->DeoptimizeIf(DeoptimizeKind::kEager, reason), condition,
+             frame_state, current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeUnless(DeoptimizeKind kind,
+                                       DeoptimizeReason reason, Node* condition,
+                                       Node* frame_state) {
+  return current_control_ = current_effect_ = graph()->NewNode(
+             common()->DeoptimizeUnless(kind, reason), condition, frame_state,
+             current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
+                                       Node* frame_state) {
+  return DeoptimizeUnless(DeoptimizeKind::kEager, reason, condition,
+                          frame_state);
+}
+
+void GraphAssembler::Branch(Node* condition,
+                            GraphAssemblerStaticLabel<1>* if_true,
+                            GraphAssemblerStaticLabel<1>* if_false) {
+  DCHECK_NOT_NULL(current_control_);
+
+  BranchHint hint = BranchHint::kNone;
+  if (if_true->IsDeferred() != if_false->IsDeferred()) {
+    hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
+  }
+
+  Node* branch =
+      graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+  current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+  MergeState(if_true);
+
+  current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+  MergeState(if_false);
+
+  current_control_ = nullptr;
+  current_effect_ = nullptr;
+}
+
+// Extractors (should be only used when destructing the assembler.
+Node* GraphAssembler::ExtractCurrentControl() {
+  Node* result = current_control_;
+  current_control_ = nullptr;
+  return result;
+}
+
+Node* GraphAssembler::ExtractCurrentEffect() {
+  Node* result = current_effect_;
+  current_effect_ = nullptr;
+  return result;
+}
+
+void GraphAssembler::Reset(Node* effect, Node* control) {
+  current_effect_ = effect;
+  current_control_ = control;
+}
+
+Operator const* GraphAssembler::ToNumberOperator() {
+  if (!to_number_operator_.is_set()) {
+    Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+    CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+        jsgraph()->isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+        Operator::kEliminatable);
+    to_number_operator_.set(common()->Call(desc));
+  }
+  return to_number_operator_.get();
+}
+
+Node* GraphAssemblerLabel::PhiAt(size_t index) {
+  DCHECK(IsBound());
+  return GetBindingsPtrFor(index)[0];
+}
+
+GraphAssemblerLabel::GraphAssemblerLabel(GraphAssemblerLabelType is_deferred,
+                                         size_t merge_count, size_t var_count,
+                                         MachineRepresentation* representations,
+                                         Zone* zone)
+    : is_deferred_(is_deferred == GraphAssemblerLabelType::kDeferred),
+      max_merge_count_(merge_count),
+      var_count_(var_count) {
+  effects_ = zone->NewArray<Node*>(MaxMergeCount() + 1);
+  for (size_t i = 0; i < MaxMergeCount() + 1; i++) {
+    effects_[i] = nullptr;
+  }
+
+  controls_ = zone->NewArray<Node*>(MaxMergeCount());
+  for (size_t i = 0; i < MaxMergeCount(); i++) {
+    controls_[i] = nullptr;
+  }
+
+  size_t num_bindings = (MaxMergeCount() + 1) * PhiCount() + 1;
+  bindings_ = zone->NewArray<Node*>(num_bindings);
+  for (size_t i = 0; i < num_bindings; i++) {
+    bindings_[i] = nullptr;
+  }
+
+  representations_ = zone->NewArray<MachineRepresentation>(PhiCount() + 1);
+  for (size_t i = 0; i < PhiCount(); i++) {
+    representations_[i] = representations[i];
+  }
+}
+
+GraphAssemblerLabel::~GraphAssemblerLabel() {
+  DCHECK(IsBound() || MergedCount() == 0);
+}
+
+Node** GraphAssemblerLabel::GetBindingsPtrFor(size_t phi_index) {
+  DCHECK_LT(phi_index, PhiCount());
+  return &bindings_[phi_index * (MaxMergeCount() + 1)];
+}
+
+void GraphAssemblerLabel::SetBinding(size_t phi_index, size_t merge_index,
+                                     Node* binding) {
+  DCHECK_LT(phi_index, PhiCount());
+  DCHECK_LT(merge_index, MaxMergeCount());
+  bindings_[phi_index * (MaxMergeCount() + 1) + merge_index] = binding;
+}
+
+MachineRepresentation GraphAssemblerLabel::GetRepresentationFor(
+    size_t phi_index) {
+  DCHECK_LT(phi_index, PhiCount());
+  return representations_[phi_index];
+}
+
+Node** GraphAssemblerLabel::GetControlsPtr() { return controls_; }
+
+Node** GraphAssemblerLabel::GetEffectsPtr() { return effects_; }
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph-assembler.h b/src/compiler/graph-assembler.h
new file mode 100644
index 0000000..057e781
--- /dev/null
+++ b/src/compiler/graph-assembler.h
@@ -0,0 +1,451 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_ASSEMBLER_H_
+#define V8_COMPILER_GRAPH_ASSEMBLER_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+class JSGraph;
+class Graph;
+
+namespace compiler {
+
+#define PURE_ASSEMBLER_MACH_UNOP_LIST(V) \
+  V(ChangeInt32ToInt64)                  \
+  V(ChangeInt32ToFloat64)                \
+  V(ChangeUint32ToFloat64)               \
+  V(ChangeUint32ToUint64)                \
+  V(ChangeFloat64ToInt32)                \
+  V(ChangeFloat64ToUint32)               \
+  V(TruncateInt64ToInt32)                \
+  V(RoundFloat64ToInt32)                 \
+  V(TruncateFloat64ToWord32)             \
+  V(Float64ExtractHighWord32)            \
+  V(Float64Abs)                          \
+  V(BitcastWordToTagged)
+
+#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
+  V(WordShl)                              \
+  V(WordSar)                              \
+  V(WordAnd)                              \
+  V(Word32Or)                             \
+  V(Word32And)                            \
+  V(Word32Shr)                            \
+  V(Word32Shl)                            \
+  V(IntAdd)                               \
+  V(IntSub)                               \
+  V(UintLessThan)                         \
+  V(Int32Add)                             \
+  V(Int32Sub)                             \
+  V(Int32Mul)                             \
+  V(Int32LessThanOrEqual)                 \
+  V(Uint32LessThanOrEqual)                \
+  V(Uint32LessThan)                       \
+  V(Int32LessThan)                        \
+  V(Float64Add)                           \
+  V(Float64Sub)                           \
+  V(Float64Mod)                           \
+  V(Float64Equal)                         \
+  V(Float64LessThan)                      \
+  V(Float64LessThanOrEqual)               \
+  V(Word32Equal)                          \
+  V(WordEqual)
+
+#define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \
+  V(Int32AddWithOverflow)                    \
+  V(Int32SubWithOverflow)                    \
+  V(Int32MulWithOverflow)                    \
+  V(Int32Mod)                                \
+  V(Int32Div)                                \
+  V(Uint32Mod)                               \
+  V(Uint32Div)
+
+#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
+  V(TrueConstant)                          \
+  V(FalseConstant)                         \
+  V(HeapNumberMapConstant)                 \
+  V(NoContextConstant)                     \
+  V(EmptyStringConstant)                   \
+  V(UndefinedConstant)                     \
+  V(TheHoleConstant)                       \
+  V(FixedArrayMapConstant)                 \
+  V(ToNumberBuiltinConstant)               \
+  V(AllocateInNewSpaceStubConstant)        \
+  V(AllocateInOldSpaceStubConstant)
+
+class GraphAssembler;
+
+enum class GraphAssemblerLabelType { kDeferred, kNonDeferred };
+
+// Label with statically known count of incoming branches and phis.
+template <size_t MergeCount, size_t VarCount = 0u>
+class GraphAssemblerStaticLabel {
+ public:
+  Node* PhiAt(size_t index);
+
+  template <typename... Reps>
+  explicit GraphAssemblerStaticLabel(GraphAssemblerLabelType is_deferred,
+                                     Reps... reps)
+      : is_deferred_(is_deferred == GraphAssemblerLabelType::kDeferred) {
+    STATIC_ASSERT(VarCount == sizeof...(reps));
+    MachineRepresentation reps_array[] = {MachineRepresentation::kNone,
+                                          reps...};
+    for (size_t i = 0; i < VarCount; i++) {
+      representations_[i] = reps_array[i + 1];
+    }
+  }
+
+  ~GraphAssemblerStaticLabel() { DCHECK(IsBound() || MergedCount() == 0); }
+
+ private:
+  friend class GraphAssembler;
+
+  void SetBound() {
+    DCHECK(!IsBound());
+    DCHECK_EQ(merged_count_, MergeCount);
+    is_bound_ = true;
+  }
+  bool IsBound() const { return is_bound_; }
+
+  size_t PhiCount() const { return VarCount; }
+  size_t MaxMergeCount() const { return MergeCount; }
+  size_t MergedCount() const { return merged_count_; }
+  bool IsDeferred() const { return is_deferred_; }
+
+  // For each phi, the buffer must have at least MaxMergeCount() + 1
+  // node entries.
+  Node** GetBindingsPtrFor(size_t phi_index) {
+    DCHECK_LT(phi_index, PhiCount());
+    return &bindings_[phi_index * (MergeCount + 1)];
+  }
+  void SetBinding(size_t phi_index, size_t merge_index, Node* binding) {
+    DCHECK_LT(phi_index, PhiCount());
+    DCHECK_LT(merge_index, MergeCount);
+    bindings_[phi_index * (MergeCount + 1) + merge_index] = binding;
+  }
+  MachineRepresentation GetRepresentationFor(size_t phi_index) {
+    DCHECK_LT(phi_index, PhiCount());
+    return representations_[phi_index];
+  }
+  // The controls buffer must have at least MaxMergeCount() entries.
+  Node** GetControlsPtr() { return controls_; }
+  // The effects buffer must have at least MaxMergeCount() + 1 entries.
+  Node** GetEffectsPtr() { return effects_; }
+  void IncrementMergedCount() { merged_count_++; }
+
+  bool is_bound_ = false;
+  bool is_deferred_;
+  size_t merged_count_ = 0;
+  Node* effects_[MergeCount + 1];  // Extra element for control edge,
+                                   // so that we can use the array to
+                                   // construct EffectPhi.
+  Node* controls_[MergeCount];
+  Node* bindings_[(MergeCount + 1) * VarCount + 1];
+  MachineRepresentation representations_[VarCount + 1];
+};
+
+// General label (with zone allocated buffers for incoming branches and phi
+// inputs).
+class GraphAssemblerLabel {
+ public:
+  Node* PhiAt(size_t index);
+
+  GraphAssemblerLabel(GraphAssemblerLabelType is_deferred, size_t merge_count,
+                      size_t var_count, MachineRepresentation* representations,
+                      Zone* zone);
+
+  ~GraphAssemblerLabel();
+
+ private:
+  friend class GraphAssembler;
+
+  void SetBound() {
+    DCHECK(!is_bound_);
+    is_bound_ = true;
+  }
+  bool IsBound() const { return is_bound_; }
+  size_t PhiCount() const { return var_count_; }
+  size_t MaxMergeCount() const { return max_merge_count_; }
+  size_t MergedCount() const { return merged_count_; }
+  bool IsDeferred() const { return is_deferred_; }
+
+  // For each phi, the buffer must have at least MaxMergeCount() + 1
+  // node entries.
+  Node** GetBindingsPtrFor(size_t phi_index);
+  void SetBinding(size_t phi_index, size_t merge_index, Node* binding);
+  MachineRepresentation GetRepresentationFor(size_t phi_index);
+  // The controls buffer must have at least MaxMergeCount() entries.
+  Node** GetControlsPtr();
+  // The effects buffer must have at least MaxMergeCount() + 1 entries.
+  Node** GetEffectsPtr();
+  void IncrementMergedCount() { merged_count_++; }
+
+  bool is_bound_ = false;
+  bool is_deferred_;
+  size_t merged_count_ = 0;
+  size_t max_merge_count_;
+  size_t var_count_;
+  Node** effects_ = nullptr;
+  Node** controls_ = nullptr;
+  Node** bindings_ = nullptr;
+  MachineRepresentation* representations_ = nullptr;
+};
+
+class GraphAssembler {
+ public:
+  GraphAssembler(JSGraph* jsgraph, Node* effect, Node* control, Zone* zone);
+
+  void Reset(Node* effect, Node* control);
+
+  // Create non-deferred label with statically known number of incoming
+  // gotos/branches.
+  template <size_t MergeCount, typename... Reps>
+  static GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)> MakeLabel(
+      Reps... reps) {
+    return GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>(
+        GraphAssemblerLabelType::kNonDeferred, reps...);
+  }
+
+  // Create deferred label with statically known number of incoming
+  // gotos/branches.
+  template <size_t MergeCount, typename... Reps>
+  static GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>
+  MakeDeferredLabel(Reps... reps) {
+    return GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>(
+        GraphAssemblerLabelType::kDeferred, reps...);
+  }
+
+  // Create label with number of incoming branches supplied at runtime.
+  template <typename... Reps>
+  GraphAssemblerLabel MakeLabelFor(GraphAssemblerLabelType is_deferred,
+                                   size_t merge_count, Reps... reps) {
+    MachineRepresentation reps_array[] = {MachineRepresentation::kNone,
+                                          reps...};
+    return GraphAssemblerLabel(is_deferred, merge_count, sizeof...(reps),
+                               &(reps_array[1]), temp_zone());
+  }
+
+  // Value creation.
+  Node* IntPtrConstant(intptr_t value);
+  Node* Uint32Constant(int32_t value);
+  Node* Int32Constant(int32_t value);
+  Node* UniqueInt32Constant(int32_t value);
+  Node* SmiConstant(int32_t value);
+  Node* Float64Constant(double value);
+  Node* Projection(int index, Node* value);
+  Node* HeapConstant(Handle<HeapObject> object);
+  Node* CEntryStubConstant(int result_size);
+  Node* ExternalConstant(ExternalReference ref);
+
+#define SINGLETON_CONST_DECL(Name) Node* Name();
+  JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DECL)
+#undef SINGLETON_CONST_DECL
+
+#define PURE_UNOP_DECL(Name) Node* Name(Node* input);
+  PURE_ASSEMBLER_MACH_UNOP_LIST(PURE_UNOP_DECL)
+#undef PURE_UNOP_DECL
+
+#define BINOP_DECL(Name) Node* Name(Node* left, Node* right);
+  PURE_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
+  CHECKED_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
+#undef BINOP_DECL
+
+  Node* Float64RoundDown(Node* value);
+
+  Node* ToNumber(Node* value);
+  Node* Allocate(PretenureFlag pretenure, Node* size);
+  Node* LoadField(FieldAccess const&, Node* object);
+  Node* LoadElement(ElementAccess const&, Node* object, Node* index);
+  Node* StoreField(FieldAccess const&, Node* object, Node* value);
+  Node* StoreElement(ElementAccess const&, Node* object, Node* index,
+                     Node* value);
+
+  Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
+  Node* Load(MachineType rep, Node* object, Node* offset);
+
+  Node* Retain(Node* buffer);
+  Node* UnsafePointerAdd(Node* base, Node* external);
+
+  Node* DeoptimizeIf(DeoptimizeReason reason, Node* condition,
+                     Node* frame_state);
+  Node* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+                         Node* condition, Node* frame_state);
+  Node* DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
+                         Node* frame_state);
+  template <typename... Args>
+  Node* Call(const CallDescriptor* desc, Args... args);
+  template <typename... Args>
+  Node* Call(const Operator* op, Args... args);
+
+  // Basic control operations.
+  template <class LabelType>
+  void Bind(LabelType* label);
+
+  template <class LabelType, typename... vars>
+  void Goto(LabelType* label, vars...);
+
+  void Branch(Node* condition, GraphAssemblerStaticLabel<1>* if_true,
+              GraphAssemblerStaticLabel<1>* if_false);
+
+  // Control helpers.
+  // {GotoIf(c, l)} is equivalent to {Branch(c, l, templ);Bind(templ)}.
+  template <class LabelType, typename... vars>
+  void GotoIf(Node* condition, LabelType* label, vars...);
+
+  // {GotoUnless(c, l)} is equivalent to {Branch(c, templ, l);Bind(templ)}.
+  template <class LabelType, typename... vars>
+  void GotoUnless(Node* condition, LabelType* label, vars...);
+
+  // Extractors (should be only used when destructing/resetting the assembler).
+  Node* ExtractCurrentControl();
+  Node* ExtractCurrentEffect();
+
+ private:
+  template <class LabelType, typename... Vars>
+  void MergeState(LabelType label, Vars... vars);
+
+  Operator const* ToNumberOperator();
+
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const { return jsgraph_->graph(); }
+  Zone* temp_zone() const { return temp_zone_; }
+  CommonOperatorBuilder* common() const { return jsgraph()->common(); }
+  MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
+  SimplifiedOperatorBuilder* simplified() const {
+    return jsgraph()->simplified();
+  }
+
+  SetOncePointer<Operator const> to_number_operator_;
+  Zone* temp_zone_;
+  JSGraph* jsgraph_;
+  Node* current_effect_;
+  Node* current_control_;
+};
+
+template <size_t MergeCount, size_t VarCount>
+Node* GraphAssemblerStaticLabel<MergeCount, VarCount>::PhiAt(size_t index) {
+  DCHECK(IsBound());
+  return GetBindingsPtrFor(index)[0];
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::MergeState(LabelType label, Vars... vars) {
+  DCHECK(!label->IsBound());
+  size_t merged_count = label->MergedCount();
+  DCHECK_LT(merged_count, label->MaxMergeCount());
+  DCHECK_EQ(label->PhiCount(), sizeof...(vars));
+  label->GetEffectsPtr()[merged_count] = current_effect_;
+  label->GetControlsPtr()[merged_count] = current_control_;
+  // We need to start with nullptr to avoid 0-length arrays.
+  Node* var_array[] = {nullptr, vars...};
+  for (size_t i = 0; i < sizeof...(vars); i++) {
+    label->SetBinding(i, merged_count, var_array[i + 1]);
+  }
+  label->IncrementMergedCount();
+}
+
+template <class LabelType>
+void GraphAssembler::Bind(LabelType* label) {
+  DCHECK(current_control_ == nullptr);
+  DCHECK(current_effect_ == nullptr);
+  DCHECK(label->MaxMergeCount() > 0);
+  DCHECK_EQ(label->MaxMergeCount(), label->MergedCount());
+
+  int merge_count = static_cast<int>(label->MaxMergeCount());
+  if (merge_count == 1) {
+    current_control_ = label->GetControlsPtr()[0];
+    current_effect_ = label->GetEffectsPtr()[0];
+    label->SetBound();
+    return;
+  }
+
+  current_control_ = graph()->NewNode(common()->Merge(merge_count), merge_count,
+                                      label->GetControlsPtr());
+
+  Node** effects = label->GetEffectsPtr();
+  current_effect_ = effects[0];
+  for (size_t i = 1; i < label->MaxMergeCount(); i++) {
+    if (current_effect_ != effects[i]) {
+      effects[label->MaxMergeCount()] = current_control_;
+      current_effect_ = graph()->NewNode(common()->EffectPhi(merge_count),
+                                         merge_count + 1, effects);
+      break;
+    }
+  }
+
+  for (size_t var = 0; var < label->PhiCount(); var++) {
+    Node** bindings = label->GetBindingsPtrFor(var);
+    bindings[label->MaxMergeCount()] = current_control_;
+    bindings[0] = graph()->NewNode(
+        common()->Phi(label->GetRepresentationFor(var), merge_count),
+        merge_count + 1, bindings);
+  }
+
+  label->SetBound();
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::Goto(LabelType* label, Vars... vars) {
+  DCHECK_NOT_NULL(current_control_);
+  DCHECK_NOT_NULL(current_effect_);
+  MergeState(label, vars...);
+  current_control_ = nullptr;
+  current_effect_ = nullptr;
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::GotoIf(Node* condition, LabelType* label, Vars... vars) {
+  BranchHint hint =
+      label->IsDeferred() ? BranchHint::kFalse : BranchHint::kNone;
+  Node* branch =
+      graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+  current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+  MergeState(label, vars...);
+
+  current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::GotoUnless(Node* condition, LabelType* label,
+                                Vars... vars) {
+  BranchHint hint = label->IsDeferred() ? BranchHint::kTrue : BranchHint::kNone;
+  Node* branch =
+      graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+  current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+  MergeState(label, vars...);
+
+  current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+}
+
+template <typename... Args>
+Node* GraphAssembler::Call(const CallDescriptor* desc, Args... args) {
+  const Operator* op = common()->Call(desc);
+  return Call(op, args...);
+}
+
+template <typename... Args>
+Node* GraphAssembler::Call(const Operator* op, Args... args) {
+  DCHECK_EQ(IrOpcode::kCall, op->opcode());
+  Node* args_array[] = {args..., current_effect_, current_control_};
+  int size = static_cast<int>(sizeof...(args)) + op->EffectInputCount() +
+             op->ControlInputCount();
+  Node* call = graph()->NewNode(op, size, args_array);
+  DCHECK_EQ(0, op->ControlOutputCount());
+  current_effect_ = call;
+  return call;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_GRAPH_ASSEMBLER_H_
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
index b13b954..117e569 100644
--- a/src/compiler/graph-reducer.cc
+++ b/src/compiler/graph-reducer.cc
@@ -25,15 +25,17 @@
 
 void Reducer::Finalize() {}
 
-
 GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
     : graph_(graph),
       dead_(dead),
       state_(graph, 4),
       reducers_(zone),
       revisit_(zone),
-      stack_(zone) {}
-
+      stack_(zone) {
+  if (dead != nullptr) {
+    NodeProperties::SetType(dead_, Type::None());
+  }
+}
 
 GraphReducer::~GraphReducer() {}
 
@@ -113,17 +115,23 @@
 
   if (node->IsDead()) return Pop();  // Node was killed while on stack.
 
+  Node::Inputs node_inputs = node->inputs();
+
   // Recurse on an input if necessary.
-  int start = entry.input_index < node->InputCount() ? entry.input_index : 0;
-  for (int i = start; i < node->InputCount(); i++) {
-    Node* input = node->InputAt(i);
-    entry.input_index = i + 1;
-    if (input != node && Recurse(input)) return;
+  int start = entry.input_index < node_inputs.count() ? entry.input_index : 0;
+  for (int i = start; i < node_inputs.count(); ++i) {
+    Node* input = node_inputs[i];
+    if (input != node && Recurse(input)) {
+      entry.input_index = i + 1;
+      return;
+    }
   }
-  for (int i = 0; i < start; i++) {
-    Node* input = node->InputAt(i);
-    entry.input_index = i + 1;
-    if (input != node && Recurse(input)) return;
+  for (int i = 0; i < start; ++i) {
+    Node* input = node_inputs[i];
+    if (input != node && Recurse(input)) {
+      entry.input_index = i + 1;
+      return;
+    }
   }
 
   // Remember the max node id before reduction.
@@ -139,10 +147,13 @@
   Node* const replacement = reduction.replacement();
   if (replacement == node) {
     // In-place update of {node}, may need to recurse on an input.
-    for (int i = 0; i < node->InputCount(); ++i) {
-      Node* input = node->InputAt(i);
-      entry.input_index = i + 1;
-      if (input != node && Recurse(input)) return;
+    Node::Inputs node_inputs = node->inputs();
+    for (int i = 0; i < node_inputs.count(); ++i) {
+      Node* input = node_inputs[i];
+      if (input != node && Recurse(input)) {
+        entry.input_index = i + 1;
+        return;
+      }
     }
   }
 
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index ab20f8f..2cd10a7 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -22,6 +22,7 @@
 #include "src/compiler/schedule.h"
 #include "src/compiler/scheduler.h"
 #include "src/interpreter/bytecodes.h"
+#include "src/objects-inl.h"
 #include "src/ostreams.h"
 
 namespace v8 {
@@ -34,9 +35,15 @@
   EmbeddedVector<char, 256> filename(0);
   std::unique_ptr<char[]> debug_name = info->GetDebugName();
   if (strlen(debug_name.get()) > 0) {
-    SNPrintF(filename, "turbo-%s", debug_name.get());
+    if (info->has_shared_info()) {
+      int attempt = info->shared_info()->opt_count();
+      SNPrintF(filename, "turbo-%s-%i", debug_name.get(), attempt);
+    } else {
+      SNPrintF(filename, "turbo-%s", debug_name.get());
+    }
   } else if (info->has_shared_info()) {
-    SNPrintF(filename, "turbo-%p", static_cast<void*>(info));
+    int attempt = info->shared_info()->opt_count();
+    SNPrintF(filename, "turbo-%p-%i", static_cast<void*>(info), attempt);
   } else {
     SNPrintF(filename, "turbo-none-%s", phase);
   }
@@ -497,7 +504,11 @@
         if (positions != nullptr) {
           SourcePosition position = positions->GetSourcePosition(node);
           if (position.IsKnown()) {
-            os_ << " pos:" << position.ScriptOffset();
+            os_ << " pos:";
+            if (position.isInlined()) {
+              os_ << "inlining(" << position.InliningId() << "),";
+            }
+            os_ << position.ScriptOffset();
           }
         }
         os_ << " <|@\n";
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index 1e861c7..6fb7cfa 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -104,6 +104,59 @@
     Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9};
     return NewNode(op, arraysize(nodes), nodes);
   }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13, Node* n14) {
+    Node* nodes[] = {n1, n2, n3,  n4,  n5,  n6,  n7,
+                     n8, n9, n10, n11, n12, n13, n14};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13, Node* n14, Node* n15) {
+    Node* nodes[] = {n1, n2,  n3,  n4,  n5,  n6,  n7, n8,
+                     n9, n10, n11, n12, n13, n14, n15};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13, Node* n14, Node* n15,
+                Node* n16) {
+    Node* nodes[] = {n1, n2,  n3,  n4,  n5,  n6,  n7,  n8,
+                     n9, n10, n11, n12, n13, n14, n15, n16};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13, Node* n14, Node* n15,
+                Node* n16, Node* n17) {
+    Node* nodes[] = {n1,  n2,  n3,  n4,  n5,  n6,  n7,  n8, n9,
+                     n10, n11, n12, n13, n14, n15, n16, n17};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
 
   // Clone the {node}, and assign a new node id to the copy.
   Node* CloneNode(const Node* node);
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 20afdc1..3696990 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -66,9 +66,7 @@
   Immediate ToImmediate(InstructionOperand* operand) {
     Constant constant = ToConstant(operand);
     if (constant.type() == Constant::kInt32 &&
-        (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-         constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-         constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+        RelocInfo::IsWasmReference(constant.rmode())) {
       return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
                        constant.rmode());
     }
@@ -185,10 +183,9 @@
   return instr->InputAt(index)->IsImmediate();
 }
 
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
+class OutOfLineLoadZero final : public OutOfLineCode {
  public:
-  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+  OutOfLineLoadZero(CodeGenerator* gen, Register result)
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final { __ xor_(result_, result_); }
@@ -286,68 +283,423 @@
 
 }  // namespace
 
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN)      \
-  do {                                                                \
-    auto result = i.OutputDoubleRegister();                           \
-    auto offset = i.InputRegister(0);                                 \
-    if (instr->InputAt(1)->IsRegister()) {                            \
-      __ cmp(offset, i.InputRegister(1));                             \
-    } else {                                                          \
-      __ cmp(offset, i.InputImmediate(1));                            \
-    }                                                                 \
-    OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
-    __ j(above_equal, ool->entry());                                  \
-    __ asm_instr(result, i.MemoryOperand(2));                         \
-    __ bind(ool->exit());                                             \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN,              \
+                                    SingleOrDouble)                           \
+  do {                                                                        \
+    auto result = i.OutputDoubleRegister();                                   \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      if (instr->InputAt(1)->IsRegister()) {                                  \
+        __ cmp(offset, i.InputRegister(1));                                   \
+      } else {                                                                \
+        __ cmp(offset, i.InputImmediate(1));                                  \
+      }                                                                       \
+      OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result);       \
+      __ j(above_equal, ool->entry());                                        \
+      __ asm_instr(result, i.MemoryOperand(2));                               \
+      __ bind(ool->exit());                                                   \
+    } else {                                                                  \
+      auto index2 = i.InputInt32(0);                                          \
+      auto length = i.InputInt32(1);                                          \
+      auto index1 = i.InputRegister(2);                                       \
+      RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
+      RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
+      DCHECK_LE(index2, length);                                              \
+      __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2),    \
+                               rmode_length));                                \
+      class OutOfLineLoadFloat final : public OutOfLineCode {                 \
+       public:                                                                \
+        OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,            \
+                           Register buffer, Register index1, int32_t index2,  \
+                           int32_t length, RelocInfo::Mode rmode_length,      \
+                           RelocInfo::Mode rmode_buffer)                      \
+            : OutOfLineCode(gen),                                             \
+              result_(result),                                                \
+              buffer_reg_(buffer),                                            \
+              buffer_int_(0),                                                 \
+              index1_(index1),                                                \
+              index2_(index2),                                                \
+              length_(length),                                                \
+              rmode_length_(rmode_length),                                    \
+              rmode_buffer_(rmode_buffer) {}                                  \
+                                                                              \
+        OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,            \
+                           int32_t buffer, Register index1, int32_t index2,   \
+                           int32_t length, RelocInfo::Mode rmode_length,      \
+                           RelocInfo::Mode rmode_buffer)                      \
+            : OutOfLineCode(gen),                                             \
+              result_(result),                                                \
+              buffer_reg_({-1}),                                              \
+              buffer_int_(buffer),                                            \
+              index1_(index1),                                                \
+              index2_(index2),                                                \
+              length_(length),                                                \
+              rmode_length_(rmode_length),                                    \
+              rmode_buffer_(rmode_buffer) {}                                  \
+                                                                              \
+        void Generate() final {                                               \
+          Label oob;                                                          \
+          __ push(index1_);                                                   \
+          __ lea(index1_, Operand(index1_, index2_));                         \
+          __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_),       \
+                                    rmode_length_));                          \
+          __ j(above_equal, &oob, Label::kNear);                              \
+          if (buffer_reg_.is_valid()) {                                       \
+            __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
+          } else {                                                            \
+            __ asm_instr(result_,                                             \
+                         Operand(index1_, buffer_int_, rmode_buffer_));       \
+          }                                                                   \
+          __ pop(index1_);                                                    \
+          __ jmp(exit());                                                     \
+          __ bind(&oob);                                                      \
+          __ pop(index1_);                                                    \
+          __ xorp##SingleOrDouble(result_, result_);                          \
+          __ divs##SingleOrDouble(result_, result_);                          \
+        }                                                                     \
+                                                                              \
+       private:                                                               \
+        XMMRegister const result_;                                            \
+        Register const buffer_reg_;                                           \
+        int32_t const buffer_int_;                                            \
+        Register const index1_;                                               \
+        int32_t const index2_;                                                \
+        int32_t const length_;                                                \
+        RelocInfo::Mode rmode_length_;                                        \
+        RelocInfo::Mode rmode_buffer_;                                        \
+      };                                                                      \
+      if (instr->InputAt(3)->IsRegister()) {                                  \
+        auto buffer = i.InputRegister(3);                                     \
+        OutOfLineCode* ool = new (zone())                                     \
+            OutOfLineLoadFloat(this, result, buffer, index1, index2, length,  \
+                               rmode_length, rmode_buffer);                   \
+        __ j(above_equal, ool->entry());                                      \
+        __ asm_instr(result, Operand(buffer, index1, times_1, index2));       \
+        __ bind(ool->exit());                                                 \
+      } else {                                                                \
+        auto buffer = i.InputInt32(3);                                        \
+        OutOfLineCode* ool = new (zone())                                     \
+            OutOfLineLoadFloat(this, result, buffer, index1, index2, length,  \
+                               rmode_length, rmode_buffer);                   \
+        __ j(above_equal, ool->entry());                                      \
+        __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
+        __ bind(ool->exit());                                                 \
+      }                                                                       \
+    }                                                                         \
   } while (false)
 
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                          \
-  do {                                                                    \
-    auto result = i.OutputRegister();                                     \
-    auto offset = i.InputRegister(0);                                     \
-    if (instr->InputAt(1)->IsRegister()) {                                \
-      __ cmp(offset, i.InputRegister(1));                                 \
-    } else {                                                              \
-      __ cmp(offset, i.InputImmediate(1));                                \
-    }                                                                     \
-    OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
-    __ j(above_equal, ool->entry());                                      \
-    __ asm_instr(result, i.MemoryOperand(2));                             \
-    __ bind(ool->exit());                                                 \
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
+  do {                                                                         \
+    auto result = i.OutputRegister();                                          \
+    if (instr->InputAt(0)->IsRegister()) {                                     \
+      auto offset = i.InputRegister(0);                                        \
+      if (instr->InputAt(1)->IsRegister()) {                                   \
+        __ cmp(offset, i.InputRegister(1));                                    \
+      } else {                                                                 \
+        __ cmp(offset, i.InputImmediate(1));                                   \
+      }                                                                        \
+      OutOfLineCode* ool = new (zone()) OutOfLineLoadZero(this, result);       \
+      __ j(above_equal, ool->entry());                                         \
+      __ asm_instr(result, i.MemoryOperand(2));                                \
+      __ bind(ool->exit());                                                    \
+    } else {                                                                   \
+      auto index2 = i.InputInt32(0);                                           \
+      auto length = i.InputInt32(1);                                           \
+      auto index1 = i.InputRegister(2);                                        \
+      RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode();  \
+      RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode();  \
+      DCHECK_LE(index2, length);                                               \
+      __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2),     \
+                               rmode_length));                                 \
+      class OutOfLineLoadInteger final : public OutOfLineCode {                \
+       public:                                                                 \
+        OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
+                             Register buffer, Register index1, int32_t index2, \
+                             int32_t length, RelocInfo::Mode rmode_length,     \
+                             RelocInfo::Mode rmode_buffer)                     \
+            : OutOfLineCode(gen),                                              \
+              result_(result),                                                 \
+              buffer_reg_(buffer),                                             \
+              buffer_int_(0),                                                  \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              rmode_length_(rmode_length),                                     \
+              rmode_buffer_(rmode_buffer) {}                                   \
+                                                                               \
+        OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
+                             int32_t buffer, Register index1, int32_t index2,  \
+                             int32_t length, RelocInfo::Mode rmode_length,     \
+                             RelocInfo::Mode rmode_buffer)                     \
+            : OutOfLineCode(gen),                                              \
+              result_(result),                                                 \
+              buffer_reg_({-1}),                                               \
+              buffer_int_(buffer),                                             \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              rmode_length_(rmode_length),                                     \
+              rmode_buffer_(rmode_buffer) {}                                   \
+                                                                               \
+        void Generate() final {                                                \
+          Label oob;                                                           \
+          bool need_cache = !result_.is(index1_);                              \
+          if (need_cache) __ push(index1_);                                    \
+          __ lea(index1_, Operand(index1_, index2_));                          \
+          __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_),        \
+                                    rmode_length_));                           \
+          __ j(above_equal, &oob, Label::kNear);                               \
+          if (buffer_reg_.is_valid()) {                                        \
+            __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0));  \
+          } else {                                                             \
+            __ asm_instr(result_,                                              \
+                         Operand(index1_, buffer_int_, rmode_buffer_));        \
+          }                                                                    \
+          if (need_cache) __ pop(index1_);                                     \
+          __ jmp(exit());                                                      \
+          __ bind(&oob);                                                       \
+          if (need_cache) __ pop(index1_);                                     \
+          __ xor_(result_, result_);                                           \
+        }                                                                      \
+                                                                               \
+       private:                                                                \
+        Register const result_;                                                \
+        Register const buffer_reg_;                                            \
+        int32_t const buffer_int_;                                             \
+        Register const index1_;                                                \
+        int32_t const index2_;                                                 \
+        int32_t const length_;                                                 \
+        RelocInfo::Mode rmode_length_;                                         \
+        RelocInfo::Mode rmode_buffer_;                                         \
+      };                                                                       \
+      if (instr->InputAt(3)->IsRegister()) {                                   \
+        auto buffer = i.InputRegister(3);                                      \
+        OutOfLineCode* ool = new (zone())                                      \
+            OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
+                                 rmode_length, rmode_buffer);                  \
+        __ j(above_equal, ool->entry());                                       \
+        __ asm_instr(result, Operand(buffer, index1, times_1, index2));        \
+        __ bind(ool->exit());                                                  \
+      } else {                                                                 \
+        auto buffer = i.InputInt32(3);                                         \
+        OutOfLineCode* ool = new (zone())                                      \
+            OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
+                                 rmode_length, rmode_buffer);                  \
+        __ j(above_equal, ool->entry());                                       \
+        __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer));  \
+        __ bind(ool->exit());                                                  \
+      }                                                                        \
+    }                                                                          \
   } while (false)
 
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                 \
-  do {                                                          \
-    auto offset = i.InputRegister(0);                           \
-    if (instr->InputAt(1)->IsRegister()) {                      \
-      __ cmp(offset, i.InputRegister(1));                       \
-    } else {                                                    \
-      __ cmp(offset, i.InputImmediate(1));                      \
-    }                                                           \
-    Label done;                                                 \
-    __ j(above_equal, &done, Label::kNear);                     \
-    __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
-    __ bind(&done);                                             \
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                               \
+  do {                                                                        \
+    auto value = i.InputDoubleRegister(2);                                    \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      if (instr->InputAt(1)->IsRegister()) {                                  \
+        __ cmp(offset, i.InputRegister(1));                                   \
+      } else {                                                                \
+        __ cmp(offset, i.InputImmediate(1));                                  \
+      }                                                                       \
+      Label done;                                                             \
+      __ j(above_equal, &done, Label::kNear);                                 \
+      __ asm_instr(i.MemoryOperand(3), value);                                \
+      __ bind(&done);                                                         \
+    } else {                                                                  \
+      auto index2 = i.InputInt32(0);                                          \
+      auto length = i.InputInt32(1);                                          \
+      auto index1 = i.InputRegister(3);                                       \
+      RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
+      RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
+      DCHECK_LE(index2, length);                                              \
+      __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2),    \
+                               rmode_length));                                \
+      class OutOfLineStoreFloat final : public OutOfLineCode {                \
+       public:                                                                \
+        OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,              \
+                            Register index1, int32_t index2, int32_t length,  \
+                            XMMRegister value, RelocInfo::Mode rmode_length,  \
+                            RelocInfo::Mode rmode_buffer)                     \
+            : OutOfLineCode(gen),                                             \
+              buffer_reg_(buffer),                                            \
+              buffer_int_(0),                                                 \
+              index1_(index1),                                                \
+              index2_(index2),                                                \
+              length_(length),                                                \
+              value_(value),                                                  \
+              rmode_length_(rmode_length),                                    \
+              rmode_buffer_(rmode_buffer) {}                                  \
+                                                                              \
+        OutOfLineStoreFloat(CodeGenerator* gen, int32_t buffer,               \
+                            Register index1, int32_t index2, int32_t length,  \
+                            XMMRegister value, RelocInfo::Mode rmode_length,  \
+                            RelocInfo::Mode rmode_buffer)                     \
+            : OutOfLineCode(gen),                                             \
+              buffer_reg_({-1}),                                              \
+              buffer_int_(buffer),                                            \
+              index1_(index1),                                                \
+              index2_(index2),                                                \
+              length_(length),                                                \
+              value_(value),                                                  \
+              rmode_length_(rmode_length),                                    \
+              rmode_buffer_(rmode_buffer) {}                                  \
+                                                                              \
+        void Generate() final {                                               \
+          Label oob;                                                          \
+          __ push(index1_);                                                   \
+          __ lea(index1_, Operand(index1_, index2_));                         \
+          __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_),       \
+                                    rmode_length_));                          \
+          __ j(above_equal, &oob, Label::kNear);                              \
+          if (buffer_reg_.is_valid()) {                                       \
+            __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_);  \
+          } else {                                                            \
+            __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_),        \
+                         value_);                                             \
+          }                                                                   \
+          __ bind(&oob);                                                      \
+          __ pop(index1_);                                                    \
+        }                                                                     \
+                                                                              \
+       private:                                                               \
+        Register const buffer_reg_;                                           \
+        int32_t const buffer_int_;                                            \
+        Register const index1_;                                               \
+        int32_t const index2_;                                                \
+        int32_t const length_;                                                \
+        XMMRegister const value_;                                             \
+        RelocInfo::Mode rmode_length_;                                        \
+        RelocInfo::Mode rmode_buffer_;                                        \
+      };                                                                      \
+      if (instr->InputAt(4)->IsRegister()) {                                  \
+        auto buffer = i.InputRegister(4);                                     \
+        OutOfLineCode* ool = new (zone())                                     \
+            OutOfLineStoreFloat(this, buffer, index1, index2, length, value,  \
+                                rmode_length, rmode_buffer);                  \
+        __ j(above_equal, ool->entry());                                      \
+        __ asm_instr(Operand(buffer, index1, times_1, index2), value);        \
+        __ bind(ool->exit());                                                 \
+      } else {                                                                \
+        auto buffer = i.InputInt32(4);                                        \
+        OutOfLineCode* ool = new (zone())                                     \
+            OutOfLineStoreFloat(this, buffer, index1, index2, length, value,  \
+                                rmode_length, rmode_buffer);                  \
+        __ j(above_equal, ool->entry());                                      \
+        __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value);  \
+        __ bind(ool->exit());                                                 \
+      }                                                                       \
+    }                                                                         \
   } while (false)
 
+#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
+  do {                                                                         \
+    if (instr->InputAt(0)->IsRegister()) {                                     \
+      auto offset = i.InputRegister(0);                                        \
+      if (instr->InputAt(1)->IsRegister()) {                                   \
+        __ cmp(offset, i.InputRegister(1));                                    \
+      } else {                                                                 \
+        __ cmp(offset, i.InputImmediate(1));                                   \
+      }                                                                        \
+      Label done;                                                              \
+      __ j(above_equal, &done, Label::kNear);                                  \
+      __ asm_instr(i.MemoryOperand(3), value);                                 \
+      __ bind(&done);                                                          \
+    } else {                                                                   \
+      auto index2 = i.InputInt32(0);                                           \
+      auto length = i.InputInt32(1);                                           \
+      auto index1 = i.InputRegister(3);                                        \
+      RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode();  \
+      RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode();  \
+      DCHECK_LE(index2, length);                                               \
+      __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2),     \
+                               rmode_length));                                 \
+      class OutOfLineStoreInteger final : public OutOfLineCode {               \
+       public:                                                                 \
+        OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
+                              Register index1, int32_t index2, int32_t length, \
+                              Value value, RelocInfo::Mode rmode_length,       \
+                              RelocInfo::Mode rmode_buffer)                    \
+            : OutOfLineCode(gen),                                              \
+              buffer_reg_(buffer),                                             \
+              buffer_int_(0),                                                  \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              value_(value),                                                   \
+              rmode_length_(rmode_length),                                     \
+              rmode_buffer_(rmode_buffer) {}                                   \
+                                                                               \
+        OutOfLineStoreInteger(CodeGenerator* gen, int32_t buffer,              \
+                              Register index1, int32_t index2, int32_t length, \
+                              Value value, RelocInfo::Mode rmode_length,       \
+                              RelocInfo::Mode rmode_buffer)                    \
+            : OutOfLineCode(gen),                                              \
+              buffer_reg_({-1}),                                               \
+              buffer_int_(buffer),                                             \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              value_(value),                                                   \
+              rmode_length_(rmode_length),                                     \
+              rmode_buffer_(rmode_buffer) {}                                   \
+                                                                               \
+        void Generate() final {                                                \
+          Label oob;                                                           \
+          __ push(index1_);                                                    \
+          __ lea(index1_, Operand(index1_, index2_));                          \
+          __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_),        \
+                                    rmode_length_));                           \
+          __ j(above_equal, &oob, Label::kNear);                               \
+          if (buffer_reg_.is_valid()) {                                        \
+            __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_);   \
+          } else {                                                             \
+            __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_),         \
+                         value_);                                              \
+          }                                                                    \
+          __ bind(&oob);                                                       \
+          __ pop(index1_);                                                     \
+        }                                                                      \
+                                                                               \
+       private:                                                                \
+        Register const buffer_reg_;                                            \
+        int32_t const buffer_int_;                                             \
+        Register const index1_;                                                \
+        int32_t const index2_;                                                 \
+        int32_t const length_;                                                 \
+        Value const value_;                                                    \
+        RelocInfo::Mode rmode_length_;                                         \
+        RelocInfo::Mode rmode_buffer_;                                         \
+      };                                                                       \
+      if (instr->InputAt(4)->IsRegister()) {                                   \
+        auto buffer = i.InputRegister(4);                                      \
+        OutOfLineCode* ool = new (zone())                                      \
+            OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
+                                  rmode_length, rmode_buffer);                 \
+        __ j(above_equal, ool->entry());                                       \
+        __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
+        __ bind(ool->exit());                                                  \
+      } else {                                                                 \
+        auto buffer = i.InputInt32(4);                                         \
+        OutOfLineCode* ool = new (zone())                                      \
+            OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
+                                  rmode_length, rmode_buffer);                 \
+        __ j(above_equal, ool->entry());                                       \
+        __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value);   \
+        __ bind(ool->exit());                                                  \
+      }                                                                        \
+    }                                                                          \
+  } while (false)
 
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)            \
-  do {                                                       \
-    auto offset = i.InputRegister(0);                        \
-    if (instr->InputAt(1)->IsRegister()) {                   \
-      __ cmp(offset, i.InputRegister(1));                    \
-    } else {                                                 \
-      __ cmp(offset, i.InputImmediate(1));                   \
-    }                                                        \
-    Label done;                                              \
-    __ j(above_equal, &done, Label::kNear);                  \
-    if (instr->InputAt(2)->IsRegister()) {                   \
-      __ asm_instr(i.MemoryOperand(3), i.InputRegister(2));  \
-    } else {                                                 \
-      __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
-    }                                                        \
-    __ bind(&done);                                          \
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
+  do {                                                           \
+    if (instr->InputAt(2)->IsRegister()) {                       \
+      Register value = i.InputRegister(2);                       \
+      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
+    } else {                                                     \
+      Immediate value = i.InputImmediate(2);                     \
+      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
+    }                                                            \
   } while (false)
 
 #define ASSEMBLE_COMPARE(asm_instr)                                   \
@@ -434,7 +786,7 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+         Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &done, Label::kNear);
 
   __ push(scratch1);
@@ -641,10 +993,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -896,10 +1246,10 @@
       } else {
         __ add(i.OutputRegister(0), i.InputRegister(2));
       }
-      __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
       if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
         __ Move(i.OutputRegister(1), i.InputRegister(1));
       }
+      __ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
       if (use_temp) {
         __ Move(i.OutputRegister(0), i.TempRegister(0));
       }
@@ -921,10 +1271,10 @@
       } else {
         __ sub(i.OutputRegister(0), i.InputRegister(2));
       }
-      __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
       if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
         __ Move(i.OutputRegister(1), i.InputRegister(1));
       }
+      __ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
       if (use_temp) {
         __ Move(i.OutputRegister(0), i.TempRegister(0));
       }
@@ -1512,7 +1862,12 @@
       }
       break;
     case kIA32Push:
-      if (instr->InputAt(0)->IsFPRegister()) {
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ push(operand);
+        frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+      } else if (instr->InputAt(0)->IsFPRegister()) {
         __ sub(esp, Immediate(kFloatSize));
         __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
         frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
@@ -1567,10 +1922,10 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
       break;
     case kCheckedLoadFloat32:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN, s);
       break;
     case kCheckedLoadFloat64:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN, d);
       break;
     case kCheckedStoreWord8:
       ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
@@ -1611,61 +1966,66 @@
   return kSuccess;
 }  // NOLINT(readability/fn_size)
 
+static Condition FlagsConditionToCondition(FlagsCondition condition) {
+  switch (condition) {
+    case kUnorderedEqual:
+    case kEqual:
+      return equal;
+      break;
+    case kUnorderedNotEqual:
+    case kNotEqual:
+      return not_equal;
+      break;
+    case kSignedLessThan:
+      return less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      return greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      return less_equal;
+      break;
+    case kSignedGreaterThan:
+      return greater;
+      break;
+    case kUnsignedLessThan:
+      return below;
+      break;
+    case kUnsignedGreaterThanOrEqual:
+      return above_equal;
+      break;
+    case kUnsignedLessThanOrEqual:
+      return below_equal;
+      break;
+    case kUnsignedGreaterThan:
+      return above;
+      break;
+    case kOverflow:
+      return overflow;
+      break;
+    case kNotOverflow:
+      return no_overflow;
+      break;
+    default:
+      UNREACHABLE();
+      return no_condition;
+      break;
+  }
+}
 
 // Assembles a branch after an instruction.
 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  IA32OperandConverter i(this, instr);
   Label::Distance flabel_distance =
       branch->fallthru ? Label::kNear : Label::kFar;
   Label* tlabel = branch->true_label;
   Label* flabel = branch->false_label;
-  switch (branch->condition) {
-    case kUnorderedEqual:
-      __ j(parity_even, flabel, flabel_distance);
-    // Fall through.
-    case kEqual:
-      __ j(equal, tlabel);
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_even, tlabel);
-    // Fall through.
-    case kNotEqual:
-      __ j(not_equal, tlabel);
-      break;
-    case kSignedLessThan:
-      __ j(less, tlabel);
-      break;
-    case kSignedGreaterThanOrEqual:
-      __ j(greater_equal, tlabel);
-      break;
-    case kSignedLessThanOrEqual:
-      __ j(less_equal, tlabel);
-      break;
-    case kSignedGreaterThan:
-      __ j(greater, tlabel);
-      break;
-    case kUnsignedLessThan:
-      __ j(below, tlabel);
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      __ j(above_equal, tlabel);
-      break;
-    case kUnsignedLessThanOrEqual:
-      __ j(below_equal, tlabel);
-      break;
-    case kUnsignedGreaterThan:
-      __ j(above, tlabel);
-      break;
-    case kOverflow:
-      __ j(overflow, tlabel);
-      break;
-    case kNotOverflow:
-      __ j(no_overflow, tlabel);
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (branch->condition == kUnorderedEqual) {
+    __ j(parity_even, flabel, flabel_distance);
+  } else if (branch->condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
   }
+  __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
   // Add a jump if not falling through to the next block.
   if (!branch->fallthru) __ jmp(flabel);
 }
@@ -1675,6 +2035,73 @@
   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      IA32OperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        __ PrepareCallCFunction(0, esi);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ ud2();
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_even, &end);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
+  }
+  __ j(FlagsConditionToCondition(condition), tlabel);
+  __ bind(&end);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1687,58 +2114,17 @@
   Label check;
   DCHECK_NE(0u, instr->OutputCount());
   Register reg = i.OutputRegister(instr->OutputCount() - 1);
-  Condition cc = no_condition;
-  switch (condition) {
-    case kUnorderedEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ Move(reg, Immediate(0));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kEqual:
-      cc = equal;
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ mov(reg, Immediate(1));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kNotEqual:
-      cc = not_equal;
-      break;
-    case kSignedLessThan:
-      cc = less;
-      break;
-    case kSignedGreaterThanOrEqual:
-      cc = greater_equal;
-      break;
-    case kSignedLessThanOrEqual:
-      cc = less_equal;
-      break;
-    case kSignedGreaterThan:
-      cc = greater;
-      break;
-    case kUnsignedLessThan:
-      cc = below;
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      cc = above_equal;
-      break;
-    case kUnsignedLessThanOrEqual:
-      cc = below_equal;
-      break;
-    case kUnsignedGreaterThan:
-      cc = above;
-      break;
-    case kOverflow:
-      cc = overflow;
-      break;
-    case kNotOverflow:
-      cc = no_overflow;
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ Move(reg, Immediate(0));
+    __ jmp(&done, Label::kNear);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ mov(reg, Immediate(1));
+    __ jmp(&done, Label::kNear);
   }
+  Condition cc = FlagsConditionToCondition(condition);
+
   __ bind(&check);
   if (reg.is_byte_register()) {
     // setcc for byte registers (al, bl, cl, dl).
@@ -1783,13 +2169,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2082,7 +2471,7 @@
       __ Move(dst, g.ToImmediate(source));
     } else if (src_constant.type() == Constant::kFloat32) {
       // TODO(turbofan): Can we do better here?
-      uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
+      uint32_t src = src_constant.ToFloat32AsInt();
       if (destination->IsFPRegister()) {
         XMMRegister dst = g.ToDoubleRegister(destination);
         __ Move(dst, src);
@@ -2093,7 +2482,7 @@
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src_constant.type());
-      uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+      uint64_t src = src_constant.ToFloat64AsInt();
       uint32_t lower = static_cast<uint32_t>(src);
       uint32_t upper = static_cast<uint32_t>(src >> 32);
       if (destination->IsFPRegister()) {
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index c827c68..a5f72c7 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -234,6 +234,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -324,6 +327,9 @@
         break;
       case MachineRepresentation::kWord64:   // Fall through.
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -351,6 +357,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -386,10 +397,37 @@
     case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord64:         // Fall through.
     case MachineRepresentation::kSimd128:        // Fall through.
+    case MachineRepresentation::kSimd1x4:        // Fall through.
+    case MachineRepresentation::kSimd1x8:        // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
   }
+  if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+    Int32BinopMatcher moffset(offset);
+    InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
+                                            ? g.UseImmediate(buffer)
+                                            : g.UseRegister(buffer);
+    Int32Matcher mlength(length);
+    if (mlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.DefineAsRegister(node),
+           g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
+           g.UseRegister(moffset.left().node()), buffer_operand);
+      return;
+    }
+    IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
+    if (mmlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mmlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.DefineAsRegister(node),
+           g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
+           g.UseRegister(moffset.left().node()), buffer_operand);
+      return;
+    }
+  }
   InstructionOperand offset_operand = g.UseRegister(offset);
   InstructionOperand length_operand =
       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -435,6 +473,9 @@
     case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord64:         // Fall through.
     case MachineRepresentation::kSimd128:        // Fall through.
+    case MachineRepresentation::kSimd1x4:        // Fall through.
+    case MachineRepresentation::kSimd1x8:        // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -445,6 +486,30 @@
                                   rep == MachineRepresentation::kBit)
                                      ? g.UseByteRegister(value)
                                      : g.UseRegister(value));
+  if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+    Int32BinopMatcher moffset(offset);
+    InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
+                                            ? g.UseImmediate(buffer)
+                                            : g.UseRegister(buffer);
+    Int32Matcher mlength(length);
+    if (mlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
+           g.UseImmediate(length), value_operand,
+           g.UseRegister(moffset.left().node()), buffer_operand);
+      return;
+    }
+    IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
+    if (mmlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mmlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
+           g.UseImmediate(length), value_operand,
+           g.UseRegister(moffset.left().node()), buffer_operand);
+      return;
+    }
+  }
   InstructionOperand offset_operand = g.UseRegister(offset);
   InstructionOperand length_operand =
       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -515,7 +580,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -766,18 +831,83 @@
   VisitShift(this, node, kIA32Ror);
 }
 
+#define RO_OP_LIST(V)                                     \
+  V(Word32Clz, kIA32Lzcnt)                                \
+  V(Word32Ctz, kIA32Tzcnt)                                \
+  V(Word32Popcnt, kIA32Popcnt)                            \
+  V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64)         \
+  V(RoundInt32ToFloat32, kSSEInt32ToFloat32)              \
+  V(ChangeInt32ToFloat64, kSSEInt32ToFloat64)             \
+  V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)           \
+  V(TruncateFloat32ToInt32, kSSEFloat32ToInt32)           \
+  V(TruncateFloat32ToUint32, kSSEFloat32ToUint32)         \
+  V(ChangeFloat64ToInt32, kSSEFloat64ToInt32)             \
+  V(ChangeFloat64ToUint32, kSSEFloat64ToUint32)           \
+  V(TruncateFloat64ToUint32, kSSEFloat64ToUint32)         \
+  V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32)       \
+  V(RoundFloat64ToInt32, kSSEFloat64ToInt32)              \
+  V(BitcastFloat32ToInt32, kIA32BitcastFI)                \
+  V(BitcastInt32ToFloat32, kIA32BitcastIF)                \
+  V(Float32Sqrt, kSSEFloat32Sqrt)                         \
+  V(Float64Sqrt, kSSEFloat64Sqrt)                         \
+  V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
+  V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
 
-void InstructionSelector::VisitWord32Clz(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
+#define RR_OP_LIST(V)                                                         \
+  V(TruncateFloat64ToWord32, kArchTruncateDoubleToI)                          \
+  V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown))       \
+  V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown))       \
+  V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp))           \
+  V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp))           \
+  V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
+  V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
+  V(Float32RoundTiesEven,                                                     \
+    kSSEFloat32Round | MiscField::encode(kRoundToNearest))                    \
+  V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
 
+#define RRO_FLOAT_OP_LIST(V)                    \
+  V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
+  V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \
+  V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \
+  V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \
+  V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
+  V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
+  V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
+  V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
 
-void InstructionSelector::VisitWord32Ctz(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
+#define FLOAT_UNOP_LIST(V)                      \
+  V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
+  V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
+  V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
+  V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg)
 
+#define RO_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRO(this, node, opcode);                      \
+  }
+RO_OP_LIST(RO_VISITOR)
+#undef RO_VISITOR
+
+#define RR_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, node, opcode);                      \
+  }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
+
+#define RRO_FLOAT_VISITOR(Name, avx, sse)             \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRROFloat(this, node, avx, sse);              \
+  }
+RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
+#undef RRO_FLOAT_VISITOR
+
+#define FLOAT_UNOP_VISITOR(Name, avx, sse)                  \
+  void InstructionSelector::Visit##Name(Node* node) {       \
+    VisitFloatUnop(this, node, node->InputAt(0), avx, sse); \
+  }
+FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
+#undef FLOAT_UNOP_VISITOR
 
 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
@@ -785,12 +915,6 @@
 
 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitInt32Add(Node* node) {
   IA32OperandGenerator g(this);
 
@@ -885,16 +1009,6 @@
 }
 
 
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
-  VisitRO(this, node, kSSEFloat32ToFloat64);
-}
-
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
-  VisitRO(this, node, kSSEInt32ToFloat32);
-}
-
-
 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
   IA32OperandGenerator g(this);
   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
@@ -902,103 +1016,6 @@
        arraysize(temps), temps);
 }
 
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
-  VisitRO(this, node, kSSEInt32ToFloat64);
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
-  VisitRO(this, node, kSSEUint32ToFloat64);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
-  VisitRO(this, node, kSSEFloat32ToInt32);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
-  VisitRO(this, node, kSSEFloat32ToUint32);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToInt32);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToUint32);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToUint32);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToFloat32);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
-  VisitRR(this, node, kArchTruncateDoubleToI);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToInt32);
-}
-
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Add, kSSEFloat32Add);
-}
-
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Add, kSSEFloat64Add);
-}
-
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Div, kSSEFloat32Div);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Div, kSSEFloat64Div);
-}
-
-
 void InstructionSelector::VisitFloat64Mod(Node* node) {
   IA32OperandGenerator g(this);
   InstructionOperand temps[] = {g.TempRegister(eax)};
@@ -1039,80 +1056,10 @@
        arraysize(temps), temps);
 }
 
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
-  IA32OperandGenerator g(this);
-  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
-  IA32OperandGenerator g(this);
-  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRO(this, node, kSSEFloat32Sqrt);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  VisitRO(this, node, kSSEFloat64Sqrt);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
-}
-
-
 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
   UNREACHABLE();
 }
 
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
-  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
-  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
-}
-
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
   IA32OperandGenerator g(this);
@@ -1154,22 +1101,35 @@
     }
   } else {
     // Push any stack arguments.
+    int effect_level = GetEffectLevel(node);
     for (PushParameter input : base::Reversed(*arguments)) {
       // Skip any alignment holes in pushed nodes.
+      Node* input_node = input.node();
       if (input.node() == nullptr) continue;
-      InstructionOperand value =
-          g.CanBeImmediate(input.node())
-              ? g.UseImmediate(input.node())
-              : IsSupported(ATOM) ||
-                        sequence()->IsFP(GetVirtualRegister(input.node()))
-                    ? g.UseRegister(input.node())
-                    : g.Use(input.node());
-      if (input.type() == MachineType::Float32()) {
-        Emit(kIA32PushFloat32, g.NoOutput(), value);
-      } else if (input.type() == MachineType::Float64()) {
-        Emit(kIA32PushFloat64, g.NoOutput(), value);
+      if (g.CanBeMemoryOperand(kIA32Push, node, input_node, effect_level)) {
+        InstructionOperand outputs[1];
+        InstructionOperand inputs[4];
+        size_t input_count = 0;
+        InstructionCode opcode = kIA32Push;
+        AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+            input_node, inputs, &input_count);
+        opcode |= AddressingModeField::encode(mode);
+        Emit(opcode, 0, outputs, input_count, inputs);
       } else {
-        Emit(kIA32Push, g.NoOutput(), value);
+        InstructionOperand value =
+            g.CanBeImmediate(input.node())
+                ? g.UseImmediate(input.node())
+                : IsSupported(ATOM) ||
+                          sequence()->IsFP(GetVirtualRegister(input.node()))
+                      ? g.UseRegister(input.node())
+                      : g.Use(input.node());
+        if (input.type() == MachineType::Float32()) {
+          Emit(kIA32PushFloat32, g.NoOutput(), value);
+        } else if (input.type() == MachineType::Float64()) {
+          Emit(kIA32PushFloat64, g.NoOutput(), value);
+        } else {
+          Emit(kIA32Push, g.NoOutput(), value);
+        }
       }
     }
   }
@@ -1202,11 +1162,14 @@
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     InstructionOperand output = g.DefineAsRegister(cont->result());
     selector->Emit(opcode, 1, &output, input_count, inputs);
+  } else {
+    DCHECK(cont->IsTrap());
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
   }
 }
 
@@ -1220,11 +1183,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1240,21 +1206,54 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+  if (hint_node->opcode() == IrOpcode::kLoad) {
+    MachineType hint = LoadRepresentationOf(hint_node->op());
+    if (node->opcode() == IrOpcode::kInt32Constant ||
+        node->opcode() == IrOpcode::kInt64Constant) {
+      int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+                             ? OpParameter<int32_t>(node)
+                             : OpParameter<int64_t>(node);
+      if (hint == MachineType::Int8()) {
+        if (constant >= std::numeric_limits<int8_t>::min() &&
+            constant <= std::numeric_limits<int8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint8()) {
+        if (constant >= std::numeric_limits<uint8_t>::min() &&
+            constant <= std::numeric_limits<uint8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int16()) {
+        if (constant >= std::numeric_limits<int16_t>::min() &&
+            constant <= std::numeric_limits<int16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint16()) {
+        if (constant >= std::numeric_limits<uint16_t>::min() &&
+            constant <= std::numeric_limits<uint16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int32()) {
+        return hint;
+      } else if (hint == MachineType::Uint32()) {
+        if (constant >= 0) return hint;
+      }
+    }
+  }
+  return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+                                           : MachineType::None();
+}
+
 // Tries to match the size of the given opcode to that of the operands, if
 // possible.
 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
                                     Node* right, FlagsContinuation* cont) {
-  // Currently, if one of the two operands is not a Load, we don't know what its
-  // machine representation is, so we bail out.
-  // TODO(epertoso): we can probably get some size information out of immediates
-  // and phi nodes.
-  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
-    return opcode;
-  }
+  // TODO(epertoso): we can probably get some size information out of phi nodes.
   // If the load representations don't match, both operands will be
   // zero/sign-extended to 32bit.
-  MachineType left_type = LoadRepresentationOf(left->op());
-  MachineType right_type = LoadRepresentationOf(right->op());
+  MachineType left_type = MachineTypeForNarrow(left, right);
+  MachineType right_type = MachineTypeForNarrow(right, left);
   if (left_type == right_type) {
     switch (left_type.representation()) {
       case MachineRepresentation::kBit:
@@ -1332,10 +1331,8 @@
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
-      // TODO(epertoso): we should use `narrowed_opcode' here once we match
-      // immediates too.
-      return VisitCompareWithMemoryOperand(selector, opcode, left,
+    if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
+      return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
                                            g.UseImmediate(right), cont);
     }
     return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
@@ -1352,11 +1349,6 @@
         cont);
   }
 
-  if (g.CanBeBetterLeftOperand(right)) {
-    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    std::swap(left, right);
-  }
-
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1377,8 +1369,8 @@
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
       } else if (cont->IsDeoptimize()) {
-        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
-                                 cont->frame_state());
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
+                                 cont->reason(), cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1490,14 +1482,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1633,19 +1640,6 @@
 }
 
 
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
 
 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   IA32OperandGenerator g(this);
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 6242e98..360069c 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -152,7 +152,8 @@
   kFlags_none = 0,
   kFlags_branch = 1,
   kFlags_deoptimize = 2,
-  kFlags_set = 3
+  kFlags_set = 3,
+  kFlags_trap = 4
 };
 
 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -205,11 +206,11 @@
 // for code generation. We encode the instruction, addressing mode, and flags
 // continuation into a single InstructionCode which is stored as part of
 // the instruction.
-typedef BitField<ArchOpcode, 0, 8> ArchOpcodeField;
-typedef BitField<AddressingMode, 8, 5> AddressingModeField;
-typedef BitField<FlagsMode, 13, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 15, 5> FlagsConditionField;
-typedef BitField<int, 20, 12> MiscField;
+typedef BitField<ArchOpcode, 0, 9> ArchOpcodeField;
+typedef BitField<AddressingMode, 9, 5> AddressingModeField;
+typedef BitField<FlagsMode, 14, 3> FlagsModeField;
+typedef BitField<FlagsCondition, 17, 5> FlagsConditionField;
+typedef BitField<int, 22, 10> MiscField;
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 6cb87ea..ecda453 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -5,8 +5,8 @@
 #ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
 #define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
 
-#include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
+#include "src/compiler/instruction.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/schedule.h"
 #include "src/macro-assembler.h"
@@ -182,6 +182,21 @@
                               sequence()->NextVirtualRegister());
   }
 
+  int AllocateVirtualRegister() { return sequence()->NextVirtualRegister(); }
+
+  InstructionOperand DefineSameAsFirstForVreg(int vreg) {
+    return UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT, vreg);
+  }
+
+  InstructionOperand DefineAsRegistertForVreg(int vreg) {
+    return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg);
+  }
+
+  InstructionOperand UseRegisterForVreg(int vreg) {
+    return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                              UnallocatedOperand::USED_AT_START, vreg);
+  }
+
   InstructionOperand TempDoubleRegister() {
     UnallocatedOperand op = UnallocatedOperand(
         UnallocatedOperand::MUST_HAVE_REGISTER,
@@ -335,9 +350,10 @@
 
   // Creates a new flags continuation for an eager deoptimization exit.
   static FlagsContinuation ForDeoptimize(FlagsCondition condition,
+                                         DeoptimizeKind kind,
                                          DeoptimizeReason reason,
                                          Node* frame_state) {
-    return FlagsContinuation(condition, reason, frame_state);
+    return FlagsContinuation(condition, kind, reason, frame_state);
   }
 
   // Creates a new flags continuation for a boolean value.
@@ -345,14 +361,25 @@
     return FlagsContinuation(condition, result);
   }
 
+  // Creates a new flags continuation for a wasm trap.
+  static FlagsContinuation ForTrap(FlagsCondition condition,
+                                   Runtime::FunctionId trap_id, Node* result) {
+    return FlagsContinuation(condition, trap_id, result);
+  }
+
   bool IsNone() const { return mode_ == kFlags_none; }
   bool IsBranch() const { return mode_ == kFlags_branch; }
   bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
   bool IsSet() const { return mode_ == kFlags_set; }
+  bool IsTrap() const { return mode_ == kFlags_trap; }
   FlagsCondition condition() const {
     DCHECK(!IsNone());
     return condition_;
   }
+  DeoptimizeKind kind() const {
+    DCHECK(IsDeoptimize());
+    return kind_;
+  }
   DeoptimizeReason reason() const {
     DCHECK(IsDeoptimize());
     return reason_;
@@ -365,6 +392,10 @@
     DCHECK(IsSet());
     return frame_state_or_result_;
   }
+  Runtime::FunctionId trap_id() const {
+    DCHECK(IsTrap());
+    return trap_id_;
+  }
   BasicBlock* true_block() const {
     DCHECK(IsBranch());
     return true_block_;
@@ -422,10 +453,11 @@
   }
 
  private:
-  FlagsContinuation(FlagsCondition condition, DeoptimizeReason reason,
-                    Node* frame_state)
+  FlagsContinuation(FlagsCondition condition, DeoptimizeKind kind,
+                    DeoptimizeReason reason, Node* frame_state)
       : mode_(kFlags_deoptimize),
         condition_(condition),
+        kind_(kind),
         reason_(reason),
         frame_state_or_result_(frame_state) {
     DCHECK_NOT_NULL(frame_state);
@@ -437,13 +469,24 @@
     DCHECK_NOT_NULL(result);
   }
 
+  FlagsContinuation(FlagsCondition condition, Runtime::FunctionId trap_id,
+                    Node* result)
+      : mode_(kFlags_trap),
+        condition_(condition),
+        frame_state_or_result_(result),
+        trap_id_(trap_id) {
+    DCHECK_NOT_NULL(result);
+  }
+
   FlagsMode const mode_;
   FlagsCondition condition_;
-  DeoptimizeReason reason_;      // Only value if mode_ == kFlags_deoptimize
+  DeoptimizeKind kind_;          // Only valid if mode_ == kFlags_deoptimize
+  DeoptimizeReason reason_;      // Only valid if mode_ == kFlags_deoptimize
   Node* frame_state_or_result_;  // Only valid if mode_ == kFlags_deoptimize
                                  // or mode_ == kFlags_set.
   BasicBlock* true_block_;       // Only valid if mode_ == kFlags_branch.
   BasicBlock* false_block_;      // Only valid if mode_ == kFlags_branch.
+  Runtime::FunctionId trap_id_;  // Only valid if mode_ == kFlags_trap.
 };
 
 }  // namespace compiler
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 8f899f3..57b6028 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -14,6 +14,7 @@
 #include "src/compiler/schedule.h"
 #include "src/compiler/state-values-utils.h"
 #include "src/deoptimizer.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -127,7 +128,6 @@
   }
 }
 
-
 Instruction* InstructionSelector::Emit(InstructionCode opcode,
                                        InstructionOperand output,
                                        size_t temp_count,
@@ -414,13 +414,10 @@
   sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
 }
 
-
 namespace {
 
-enum class FrameStateInputKind { kAny, kStackSlot };
-
-InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
-                                   FrameStateInputKind kind,
+InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
+                                   Node* input, FrameStateInputKind kind,
                                    MachineRepresentation rep) {
   if (rep == MachineRepresentation::kNone) {
     return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
@@ -432,8 +429,31 @@
     case IrOpcode::kNumberConstant:
     case IrOpcode::kFloat32Constant:
     case IrOpcode::kFloat64Constant:
-    case IrOpcode::kHeapConstant:
       return g->UseImmediate(input);
+    case IrOpcode::kHeapConstant: {
+      if (!CanBeTaggedPointer(rep)) {
+        // If we have inconsistent static and dynamic types, e.g. if we
+        // smi-check a string, we can get here with a heap object that
+        // says it is a smi. In that case, we return an invalid instruction
+        // operand, which will be interpreted as an optimized-out value.
+
+        // TODO(jarin) Ideally, we should turn the current instruction
+        // into an abort (we should never execute it).
+        return InstructionOperand();
+      }
+
+      Handle<HeapObject> constant = OpParameter<Handle<HeapObject>>(input);
+      Heap::RootListIndex root_index;
+      if (isolate->heap()->IsRootHandle(constant, &root_index) &&
+          root_index == Heap::kOptimizedOutRootIndex) {
+        // For an optimized-out object we return an invalid instruction
+        // operand, so that we take the fast path for optimized-out values.
+        return InstructionOperand();
+      }
+
+      return g->UseImmediate(input);
+    }
+    case IrOpcode::kArgumentsObjectState:
     case IrOpcode::kObjectState:
     case IrOpcode::kTypedObjectState:
       UNREACHABLE();
@@ -452,6 +472,7 @@
   return InstructionOperand();
 }
 
+}  // namespace
 
 class StateObjectDeduplicator {
  public:
@@ -477,15 +498,21 @@
   ZoneVector<Node*> objects_;
 };
 
-
 // Returns the number of instruction operands added to inputs.
-size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
-                                        InstructionOperandVector* inputs,
-                                        OperandGenerator* g,
-                                        StateObjectDeduplicator* deduplicator,
-                                        Node* input, MachineType type,
-                                        FrameStateInputKind kind, Zone* zone) {
+size_t InstructionSelector::AddOperandToStateValueDescriptor(
+    StateValueList* values, InstructionOperandVector* inputs,
+    OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
+    MachineType type, FrameStateInputKind kind, Zone* zone) {
+  if (input == nullptr) {
+    values->PushOptimizedOut();
+    return 0;
+  }
+
   switch (input->opcode()) {
+    case IrOpcode::kArgumentsObjectState: {
+      values->PushArguments();
+      return 0;
+    }
     case IrOpcode::kObjectState: {
       UNREACHABLE();
       return 0;
@@ -495,41 +522,45 @@
       if (id == StateObjectDeduplicator::kNotDuplicated) {
         size_t entries = 0;
         id = deduplicator->InsertObject(input);
-        descriptor->fields().push_back(
-            StateValueDescriptor::Recursive(zone, id));
-        StateValueDescriptor* new_desc = &descriptor->fields().back();
+        StateValueList* nested = values->PushRecursiveField(zone, id);
         int const input_count = input->op()->ValueInputCount();
         ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
         for (int i = 0; i < input_count; ++i) {
           entries += AddOperandToStateValueDescriptor(
-              new_desc, inputs, g, deduplicator, input->InputAt(i),
-              types->at(i), kind, zone);
+              nested, inputs, g, deduplicator, input->InputAt(i), types->at(i),
+              kind, zone);
         }
         return entries;
       } else {
         // Crankshaft counts duplicate objects for the running id, so we have
         // to push the input again.
         deduplicator->InsertObject(input);
-        descriptor->fields().push_back(
-            StateValueDescriptor::Duplicate(zone, id));
+        values->PushDuplicate(id);
         return 0;
       }
     }
     default: {
-      inputs->push_back(OperandForDeopt(g, input, kind, type.representation()));
-      descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
-      return 1;
+      InstructionOperand op =
+          OperandForDeopt(isolate(), g, input, kind, type.representation());
+      if (op.kind() == InstructionOperand::INVALID) {
+        // Invalid operand means the value is impossible or optimized-out.
+        values->PushOptimizedOut();
+        return 0;
+      } else {
+        inputs->push_back(op);
+        values->PushPlain(type);
+        return 1;
+      }
     }
   }
 }
 
 
 // Returns the number of instruction operands added to inputs.
-size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
-                                       Node* state, OperandGenerator* g,
-                                       StateObjectDeduplicator* deduplicator,
-                                       InstructionOperandVector* inputs,
-                                       FrameStateInputKind kind, Zone* zone) {
+size_t InstructionSelector::AddInputsToFrameStateDescriptor(
+    FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
+    StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
+    FrameStateInputKind kind, Zone* zone) {
   DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
 
   size_t entries = 0;
@@ -553,8 +584,12 @@
   DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
   DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
 
-  StateValueDescriptor* values_descriptor =
-      descriptor->GetStateValueDescriptor();
+  StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
+
+  DCHECK_EQ(values_descriptor->size(), 0u);
+  values_descriptor->ReserveSize(
+      descriptor->GetSize(OutputFrameStateCombine::Ignore()));
+
   entries += AddOperandToStateValueDescriptor(
       values_descriptor, inputs, g, deduplicator, function,
       MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
@@ -583,8 +618,6 @@
   return entries;
 }
 
-}  // namespace
-
 
 // An internal helper class for generating the operands to calls.
 // TODO(bmeurer): Get rid of the CallBuffer business and make
@@ -733,7 +766,8 @@
     }
 
     int const state_id = sequence()->AddDeoptimizationEntry(
-        buffer->frame_state_descriptor, DeoptimizeReason::kNoReason);
+        buffer->frame_state_descriptor, DeoptimizeKind::kEager,
+        DeoptimizeReason::kNoReason);
     buffer->instruction_args.push_back(g.TempImmediate(state_id));
 
     StateObjectDeduplicator deduplicator(instruction_zone());
@@ -796,20 +830,33 @@
   }
 }
 
+bool InstructionSelector::IsSourcePositionUsed(Node* node) {
+  return (source_position_mode_ == kAllSourcePositions ||
+          node->opcode() == IrOpcode::kCall ||
+          node->opcode() == IrOpcode::kTrapIf ||
+          node->opcode() == IrOpcode::kTrapUnless);
+}
+
 void InstructionSelector::VisitBlock(BasicBlock* block) {
   DCHECK(!current_block_);
   current_block_ = block;
-  int current_block_end = static_cast<int>(instructions_.size());
+  auto current_num_instructions = [&] {
+    DCHECK_GE(kMaxInt, instructions_.size());
+    return static_cast<int>(instructions_.size());
+  };
+  int current_block_end = current_num_instructions();
 
   int effect_level = 0;
   for (Node* const node : *block) {
+    SetEffectLevel(node, effect_level);
     if (node->opcode() == IrOpcode::kStore ||
         node->opcode() == IrOpcode::kUnalignedStore ||
         node->opcode() == IrOpcode::kCheckedStore ||
-        node->opcode() == IrOpcode::kCall) {
+        node->opcode() == IrOpcode::kCall ||
+        node->opcode() == IrOpcode::kProtectedLoad ||
+        node->opcode() == IrOpcode::kProtectedStore) {
       ++effect_level;
     }
-    SetEffectLevel(node, effect_level);
   }
 
   // We visit the control first, then the nodes in the block, so the block's
@@ -818,10 +865,25 @@
     SetEffectLevel(block->control_input(), effect_level);
   }
 
+  auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
+    if (instruction_selection_failed()) return false;
+    if (current_num_instructions() == instruction_start) return true;
+    std::reverse(instructions_.begin() + instruction_start,
+                 instructions_.end());
+    if (!node) return true;
+    SourcePosition source_position = source_positions_->GetSourcePosition(node);
+    if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
+      sequence()->SetSourcePosition(instructions_[instruction_start],
+                                    source_position);
+    }
+    return true;
+  };
+
   // Generate code for the block control "top down", but schedule the code
   // "bottom up".
   VisitControl(block);
-  std::reverse(instructions_.begin() + current_block_end, instructions_.end());
+  if (!FinishEmittedInstructions(block->control_input(), current_block_end))
+    return;
 
   // Visit code in reverse control flow order, because architecture-specific
   // matching may cover more than one node at a time.
@@ -830,19 +892,9 @@
     if (!IsUsed(node) || IsDefined(node)) continue;
     // Generate code for this node "top down", but schedule the code "bottom
     // up".
-    size_t current_node_end = instructions_.size();
+    int current_node_end = current_num_instructions();
     VisitNode(node);
-    if (instruction_selection_failed()) return;
-    std::reverse(instructions_.begin() + current_node_end, instructions_.end());
-    if (instructions_.size() == current_node_end) continue;
-    // Mark source position on first instruction emitted.
-    SourcePosition source_position = source_positions_->GetSourcePosition(node);
-    if (source_position.IsKnown() &&
-        (source_position_mode_ == kAllSourcePositions ||
-         node->opcode() == IrOpcode::kCall)) {
-      sequence()->SetSourcePosition(instructions_[current_node_end],
-                                    source_position);
-    }
+    if (!FinishEmittedInstructions(node, current_node_end)) return;
   }
 
   // We're done with the block.
@@ -862,6 +914,8 @@
   if (block->SuccessorCount() > 1) {
     for (BasicBlock* const successor : block->successors()) {
       for (Node* const node : *successor) {
+        // If this CHECK fails, you might have specified merged variables
+        // for a label with only one predecessor.
         CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
       }
     }
@@ -1013,6 +1067,12 @@
       return VisitDeoptimizeIf(node);
     case IrOpcode::kDeoptimizeUnless:
       return VisitDeoptimizeUnless(node);
+    case IrOpcode::kTrapIf:
+      return VisitTrapIf(node, static_cast<Runtime::FunctionId>(
+                                   OpParameter<int32_t>(node->op())));
+    case IrOpcode::kTrapUnless:
+      return VisitTrapUnless(node, static_cast<Runtime::FunctionId>(
+                                       OpParameter<int32_t>(node->op())));
     case IrOpcode::kFrameState:
     case IrOpcode::kStateValues:
     case IrOpcode::kObjectState:
@@ -1033,6 +1093,8 @@
     }
     case IrOpcode::kStore:
       return VisitStore(node);
+    case IrOpcode::kProtectedStore:
+      return VisitProtectedStore(node);
     case IrOpcode::kWord32And:
       return MarkAsWord32(node), VisitWord32And(node);
     case IrOpcode::kWord32Or:
@@ -1387,15 +1449,190 @@
     }
     case IrOpcode::kAtomicStore:
       return VisitAtomicStore(node);
-    case IrOpcode::kProtectedLoad:
+    case IrOpcode::kProtectedLoad: {
+      LoadRepresentation type = LoadRepresentationOf(node->op());
+      MarkAsRepresentation(type.representation(), node);
       return VisitProtectedLoad(node);
+    }
     case IrOpcode::kUnsafePointerAdd:
       MarkAsRepresentation(MachineType::PointerRepresentation(), node);
       return VisitUnsafePointerAdd(node);
+    case IrOpcode::kCreateFloat32x4:
+      return MarkAsSimd128(node), VisitCreateFloat32x4(node);
+    case IrOpcode::kFloat32x4ExtractLane:
+      return MarkAsFloat32(node), VisitFloat32x4ExtractLane(node);
+    case IrOpcode::kFloat32x4ReplaceLane:
+      return MarkAsSimd128(node), VisitFloat32x4ReplaceLane(node);
+    case IrOpcode::kFloat32x4FromInt32x4:
+      return MarkAsSimd128(node), VisitFloat32x4FromInt32x4(node);
+    case IrOpcode::kFloat32x4FromUint32x4:
+      return MarkAsSimd128(node), VisitFloat32x4FromUint32x4(node);
+    case IrOpcode::kFloat32x4Abs:
+      return MarkAsSimd128(node), VisitFloat32x4Abs(node);
+    case IrOpcode::kFloat32x4Neg:
+      return MarkAsSimd128(node), VisitFloat32x4Neg(node);
+    case IrOpcode::kFloat32x4Add:
+      return MarkAsSimd128(node), VisitFloat32x4Add(node);
+    case IrOpcode::kFloat32x4Sub:
+      return MarkAsSimd128(node), VisitFloat32x4Sub(node);
+    case IrOpcode::kFloat32x4Equal:
+      return MarkAsSimd1x4(node), VisitFloat32x4Equal(node);
+    case IrOpcode::kFloat32x4NotEqual:
+      return MarkAsSimd1x4(node), VisitFloat32x4NotEqual(node);
     case IrOpcode::kCreateInt32x4:
       return MarkAsSimd128(node), VisitCreateInt32x4(node);
     case IrOpcode::kInt32x4ExtractLane:
       return MarkAsWord32(node), VisitInt32x4ExtractLane(node);
+    case IrOpcode::kInt32x4ReplaceLane:
+      return MarkAsSimd128(node), VisitInt32x4ReplaceLane(node);
+    case IrOpcode::kInt32x4FromFloat32x4:
+      return MarkAsSimd128(node), VisitInt32x4FromFloat32x4(node);
+    case IrOpcode::kUint32x4FromFloat32x4:
+      return MarkAsSimd128(node), VisitUint32x4FromFloat32x4(node);
+    case IrOpcode::kInt32x4Neg:
+      return MarkAsSimd128(node), VisitInt32x4Neg(node);
+    case IrOpcode::kInt32x4ShiftLeftByScalar:
+      return MarkAsSimd128(node), VisitInt32x4ShiftLeftByScalar(node);
+    case IrOpcode::kInt32x4ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitInt32x4ShiftRightByScalar(node);
+    case IrOpcode::kInt32x4Add:
+      return MarkAsSimd128(node), VisitInt32x4Add(node);
+    case IrOpcode::kInt32x4Sub:
+      return MarkAsSimd128(node), VisitInt32x4Sub(node);
+    case IrOpcode::kInt32x4Mul:
+      return MarkAsSimd128(node), VisitInt32x4Mul(node);
+    case IrOpcode::kInt32x4Min:
+      return MarkAsSimd128(node), VisitInt32x4Min(node);
+    case IrOpcode::kInt32x4Max:
+      return MarkAsSimd128(node), VisitInt32x4Max(node);
+    case IrOpcode::kInt32x4Equal:
+      return MarkAsSimd1x4(node), VisitInt32x4Equal(node);
+    case IrOpcode::kInt32x4NotEqual:
+      return MarkAsSimd1x4(node), VisitInt32x4NotEqual(node);
+    case IrOpcode::kInt32x4GreaterThan:
+      return MarkAsSimd1x4(node), VisitInt32x4GreaterThan(node);
+    case IrOpcode::kInt32x4GreaterThanOrEqual:
+      return MarkAsSimd1x4(node), VisitInt32x4GreaterThanOrEqual(node);
+    case IrOpcode::kUint32x4ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitUint32x4ShiftRightByScalar(node);
+    case IrOpcode::kUint32x4Min:
+      return MarkAsSimd128(node), VisitUint32x4Min(node);
+    case IrOpcode::kUint32x4Max:
+      return MarkAsSimd128(node), VisitUint32x4Max(node);
+    case IrOpcode::kUint32x4GreaterThan:
+      return MarkAsSimd1x4(node), VisitUint32x4GreaterThan(node);
+    case IrOpcode::kUint32x4GreaterThanOrEqual:
+      return MarkAsSimd1x4(node), VisitUint32x4GreaterThanOrEqual(node);
+    case IrOpcode::kCreateInt16x8:
+      return MarkAsSimd128(node), VisitCreateInt16x8(node);
+    case IrOpcode::kInt16x8ExtractLane:
+      return MarkAsWord32(node), VisitInt16x8ExtractLane(node);
+    case IrOpcode::kInt16x8ReplaceLane:
+      return MarkAsSimd128(node), VisitInt16x8ReplaceLane(node);
+    case IrOpcode::kInt16x8Neg:
+      return MarkAsSimd128(node), VisitInt16x8Neg(node);
+    case IrOpcode::kInt16x8ShiftLeftByScalar:
+      return MarkAsSimd128(node), VisitInt16x8ShiftLeftByScalar(node);
+    case IrOpcode::kInt16x8ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitInt16x8ShiftRightByScalar(node);
+    case IrOpcode::kInt16x8Add:
+      return MarkAsSimd128(node), VisitInt16x8Add(node);
+    case IrOpcode::kInt16x8AddSaturate:
+      return MarkAsSimd128(node), VisitInt16x8AddSaturate(node);
+    case IrOpcode::kInt16x8Sub:
+      return MarkAsSimd128(node), VisitInt16x8Sub(node);
+    case IrOpcode::kInt16x8SubSaturate:
+      return MarkAsSimd128(node), VisitInt16x8SubSaturate(node);
+    case IrOpcode::kInt16x8Mul:
+      return MarkAsSimd128(node), VisitInt16x8Mul(node);
+    case IrOpcode::kInt16x8Min:
+      return MarkAsSimd128(node), VisitInt16x8Min(node);
+    case IrOpcode::kInt16x8Max:
+      return MarkAsSimd128(node), VisitInt16x8Max(node);
+    case IrOpcode::kInt16x8Equal:
+      return MarkAsSimd1x8(node), VisitInt16x8Equal(node);
+    case IrOpcode::kInt16x8NotEqual:
+      return MarkAsSimd1x8(node), VisitInt16x8NotEqual(node);
+    case IrOpcode::kInt16x8GreaterThan:
+      return MarkAsSimd1x8(node), VisitInt16x8GreaterThan(node);
+    case IrOpcode::kInt16x8GreaterThanOrEqual:
+      return MarkAsSimd1x8(node), VisitInt16x8GreaterThanOrEqual(node);
+    case IrOpcode::kUint16x8ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitUint16x8ShiftRightByScalar(node);
+    case IrOpcode::kUint16x8AddSaturate:
+      return MarkAsSimd128(node), VisitUint16x8AddSaturate(node);
+    case IrOpcode::kUint16x8SubSaturate:
+      return MarkAsSimd128(node), VisitUint16x8SubSaturate(node);
+    case IrOpcode::kUint16x8Min:
+      return MarkAsSimd128(node), VisitUint16x8Min(node);
+    case IrOpcode::kUint16x8Max:
+      return MarkAsSimd128(node), VisitUint16x8Max(node);
+    case IrOpcode::kUint16x8GreaterThan:
+      return MarkAsSimd1x8(node), VisitUint16x8GreaterThan(node);
+    case IrOpcode::kUint16x8GreaterThanOrEqual:
+      return MarkAsSimd1x8(node), VisitUint16x8GreaterThanOrEqual(node);
+    case IrOpcode::kCreateInt8x16:
+      return MarkAsSimd128(node), VisitCreateInt8x16(node);
+    case IrOpcode::kInt8x16ExtractLane:
+      return MarkAsWord32(node), VisitInt8x16ExtractLane(node);
+    case IrOpcode::kInt8x16ReplaceLane:
+      return MarkAsSimd128(node), VisitInt8x16ReplaceLane(node);
+    case IrOpcode::kInt8x16Neg:
+      return MarkAsSimd128(node), VisitInt8x16Neg(node);
+    case IrOpcode::kInt8x16ShiftLeftByScalar:
+      return MarkAsSimd128(node), VisitInt8x16ShiftLeftByScalar(node);
+    case IrOpcode::kInt8x16ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitInt8x16ShiftRightByScalar(node);
+    case IrOpcode::kInt8x16Add:
+      return MarkAsSimd128(node), VisitInt8x16Add(node);
+    case IrOpcode::kInt8x16AddSaturate:
+      return MarkAsSimd128(node), VisitInt8x16AddSaturate(node);
+    case IrOpcode::kInt8x16Sub:
+      return MarkAsSimd128(node), VisitInt8x16Sub(node);
+    case IrOpcode::kInt8x16SubSaturate:
+      return MarkAsSimd128(node), VisitInt8x16SubSaturate(node);
+    case IrOpcode::kInt8x16Mul:
+      return MarkAsSimd128(node), VisitInt8x16Mul(node);
+    case IrOpcode::kInt8x16Min:
+      return MarkAsSimd128(node), VisitInt8x16Min(node);
+    case IrOpcode::kInt8x16Max:
+      return MarkAsSimd128(node), VisitInt8x16Max(node);
+    case IrOpcode::kInt8x16Equal:
+      return MarkAsSimd1x16(node), VisitInt8x16Equal(node);
+    case IrOpcode::kInt8x16NotEqual:
+      return MarkAsSimd1x16(node), VisitInt8x16NotEqual(node);
+    case IrOpcode::kInt8x16GreaterThan:
+      return MarkAsSimd1x16(node), VisitInt8x16GreaterThan(node);
+    case IrOpcode::kInt8x16GreaterThanOrEqual:
+      return MarkAsSimd1x16(node), VisitInt8x16GreaterThanOrEqual(node);
+    case IrOpcode::kUint8x16ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitUint8x16ShiftRightByScalar(node);
+    case IrOpcode::kUint8x16AddSaturate:
+      return MarkAsSimd128(node), VisitUint8x16AddSaturate(node);
+    case IrOpcode::kUint8x16SubSaturate:
+      return MarkAsSimd128(node), VisitUint8x16SubSaturate(node);
+    case IrOpcode::kUint8x16Min:
+      return MarkAsSimd128(node), VisitUint8x16Min(node);
+    case IrOpcode::kUint8x16Max:
+      return MarkAsSimd128(node), VisitUint8x16Max(node);
+    case IrOpcode::kUint8x16GreaterThan:
+      return MarkAsSimd1x16(node), VisitUint8x16GreaterThan(node);
+    case IrOpcode::kUint8x16GreaterThanOrEqual:
+      return MarkAsSimd1x16(node), VisitUint16x8GreaterThanOrEqual(node);
+    case IrOpcode::kSimd128And:
+      return MarkAsSimd128(node), VisitSimd128And(node);
+    case IrOpcode::kSimd128Or:
+      return MarkAsSimd128(node), VisitSimd128Or(node);
+    case IrOpcode::kSimd128Xor:
+      return MarkAsSimd128(node), VisitSimd128Xor(node);
+    case IrOpcode::kSimd128Not:
+      return MarkAsSimd128(node), VisitSimd128Not(node);
+    case IrOpcode::kSimd32x4Select:
+      return MarkAsSimd128(node), VisitSimd32x4Select(node);
+    case IrOpcode::kSimd16x8Select:
+      return MarkAsSimd128(node), VisitSimd16x8Select(node);
+    case IrOpcode::kSimd8x16Select:
+      return MarkAsSimd128(node), VisitSimd8x16Select(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
@@ -1538,7 +1775,7 @@
 }
 
 void InstructionSelector::VisitStackSlot(Node* node) {
-  int size = 1 << ElementSizeLog2Of(StackSlotRepresentationOf(node->op()));
+  int size = StackSlotSizeOf(node->op());
   int slot = frame_->AllocateSpillSlot(size);
   OperandGenerator g(this);
 
@@ -1547,8 +1784,7 @@
 }
 
 void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
-  OperandGenerator g(this);
-  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+  EmitIdentity(node);
 }
 
 void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
@@ -1697,7 +1933,6 @@
   UNIMPLEMENTED();
 }
 
-
 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
   UNIMPLEMENTED();
 }
@@ -1723,13 +1958,288 @@
 void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
 #endif  // V8_TARGET_ARCH_64_BIT
 
-#if !V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
 void InstructionSelector::VisitCreateInt32x4(Node* node) { UNIMPLEMENTED(); }
 
 void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
   UNIMPLEMENTED();
 }
-#endif  // !V8_TARGET_ARCH_X64
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+#endif  // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitCreateFloat32x4(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4Abs(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4NotEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4ShiftLeftByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4LessThan(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4LessThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint32x4GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitCreateInt16x8(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8ExtractLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8ReplaceLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8ShiftLeftByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8AddSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8SubSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8LessThan(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8LessThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8AddSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8SubSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint16x8Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint16x8GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitCreateInt8x16(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16ExtractLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16ReplaceLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16ShiftLeftByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16AddSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16SubSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16LessThan(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16LessThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16AddSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16SubSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint8x16Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint8x16GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitSimd128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd128Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd128Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd128Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd32x4Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd16x8Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd8x16Select(Node* node) { UNIMPLEMENTED(); }
+#endif  // !V8_TARGET_ARCH_ARM
 
 void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
 
@@ -1970,7 +2480,8 @@
   DCHECK_GE(input_count, 1);
   auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
   Node* pop_count = ret->InputAt(0);
-  value_locations[0] = pop_count->opcode() == IrOpcode::kInt32Constant
+  value_locations[0] = (pop_count->opcode() == IrOpcode::kInt32Constant ||
+                        pop_count->opcode() == IrOpcode::kInt64Constant)
                            ? g.UseImmediate(pop_count)
                            : g.UseRegister(pop_count);
   for (int i = 1; i < input_count; ++i) {
@@ -1980,32 +2491,31 @@
   Emit(kArchRet, 0, nullptr, input_count, value_locations);
 }
 
-Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
-                                                 InstructionOperand output,
-                                                 InstructionOperand a,
-                                                 DeoptimizeReason reason,
-                                                 Node* frame_state) {
+Instruction* InstructionSelector::EmitDeoptimize(
+    InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+    DeoptimizeKind kind, DeoptimizeReason reason, Node* frame_state) {
   size_t output_count = output.IsInvalid() ? 0 : 1;
   InstructionOperand inputs[] = {a};
   size_t input_count = arraysize(inputs);
   return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
-                        reason, frame_state);
+                        kind, reason, frame_state);
 }
 
 Instruction* InstructionSelector::EmitDeoptimize(
     InstructionCode opcode, InstructionOperand output, InstructionOperand a,
-    InstructionOperand b, DeoptimizeReason reason, Node* frame_state) {
+    InstructionOperand b, DeoptimizeKind kind, DeoptimizeReason reason,
+    Node* frame_state) {
   size_t output_count = output.IsInvalid() ? 0 : 1;
   InstructionOperand inputs[] = {a, b};
   size_t input_count = arraysize(inputs);
   return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
-                        reason, frame_state);
+                        kind, reason, frame_state);
 }
 
 Instruction* InstructionSelector::EmitDeoptimize(
     InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
-    size_t input_count, InstructionOperand* inputs, DeoptimizeReason reason,
-    Node* frame_state) {
+    size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
+    DeoptimizeReason reason, Node* frame_state) {
   OperandGenerator g(this);
   FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
   InstructionOperandVector args(instruction_zone());
@@ -2014,7 +2524,8 @@
     args.push_back(inputs[i]);
   }
   opcode |= MiscField::encode(static_cast<int>(input_count));
-  int const state_id = sequence()->AddDeoptimizationEntry(descriptor, reason);
+  int const state_id =
+      sequence()->AddDeoptimizationEntry(descriptor, kind, reason);
   args.push_back(g.TempImmediate(state_id));
   StateObjectDeduplicator deduplicator(instruction_zone());
   AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
@@ -2033,16 +2544,7 @@
 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
                                           DeoptimizeReason reason,
                                           Node* value) {
-  InstructionCode opcode = kArchDeoptimize;
-  switch (kind) {
-    case DeoptimizeKind::kEager:
-      opcode |= MiscField::encode(Deoptimizer::EAGER);
-      break;
-    case DeoptimizeKind::kSoft:
-      opcode |= MiscField::encode(Deoptimizer::SOFT);
-      break;
-  }
-  EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, reason, value);
+  EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason, value);
 }
 
 
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 65ba8f7..d811aa4 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -26,6 +26,7 @@
 class Linkage;
 class OperandGenerator;
 struct SwitchInfo;
+class StateObjectDeduplicator;
 
 // This struct connects nodes of parameters which are going to be pushed on the
 // call stack with their parameter index in the call descriptor of the callee.
@@ -42,6 +43,8 @@
   MachineType type_;
 };
 
+enum class FrameStateInputKind { kAny, kStackSlot };
+
 // Instruction selection generates an InstructionSequence for a given Schedule.
 class V8_EXPORT_PRIVATE InstructionSelector final {
  public:
@@ -111,14 +114,15 @@
   // ===========================================================================
 
   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
-                              InstructionOperand a, DeoptimizeReason reason,
-                              Node* frame_state);
+                              InstructionOperand a, DeoptimizeKind kind,
+                              DeoptimizeReason reason, Node* frame_state);
   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
                               InstructionOperand a, InstructionOperand b,
-                              DeoptimizeReason reason, Node* frame_state);
+                              DeoptimizeKind kind, DeoptimizeReason reason,
+                              Node* frame_state);
   Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
                               InstructionOperand* outputs, size_t input_count,
-                              InstructionOperand* inputs,
+                              InstructionOperand* inputs, DeoptimizeKind kind,
                               DeoptimizeReason reason, Node* frame_state);
 
   // ===========================================================================
@@ -259,6 +263,27 @@
   void MarkAsSimd128(Node* node) {
     MarkAsRepresentation(MachineRepresentation::kSimd128, node);
   }
+  void MarkAsSimd1x4(Node* node) {
+    if (kSimdMaskRegisters) {
+      MarkAsRepresentation(MachineRepresentation::kSimd1x4, node);
+    } else {
+      MarkAsSimd128(node);
+    }
+  }
+  void MarkAsSimd1x8(Node* node) {
+    if (kSimdMaskRegisters) {
+      MarkAsRepresentation(MachineRepresentation::kSimd1x8, node);
+    } else {
+      MarkAsSimd128(node);
+    }
+  }
+  void MarkAsSimd1x16(Node* node) {
+    if (kSimdMaskRegisters) {
+      MarkAsRepresentation(MachineRepresentation::kSimd1x16, node);
+    } else {
+      MarkAsSimd128(node);
+    }
+  }
   void MarkAsReference(Node* node) {
     MarkAsRepresentation(MachineRepresentation::kTagged, node);
   }
@@ -286,6 +311,17 @@
   int GetTempsCountForTailCallFromJSFunction();
 
   FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+  size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
+                                         Node* state, OperandGenerator* g,
+                                         StateObjectDeduplicator* deduplicator,
+                                         InstructionOperandVector* inputs,
+                                         FrameStateInputKind kind, Zone* zone);
+  size_t AddOperandToStateValueDescriptor(StateValueList* values,
+                                          InstructionOperandVector* inputs,
+                                          OperandGenerator* g,
+                                          StateObjectDeduplicator* deduplicator,
+                                          Node* input, MachineType type,
+                                          FrameStateInputKind kind, Zone* zone);
 
   // ===========================================================================
   // ============= Architecture-specific graph covering methods. ===============
@@ -307,8 +343,7 @@
 
 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
   MACHINE_OP_LIST(DECLARE_GENERATOR)
-  MACHINE_SIMD_RETURN_NUM_OP_LIST(DECLARE_GENERATOR)
-  MACHINE_SIMD_RETURN_SIMD_OP_LIST(DECLARE_GENERATOR)
+  MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
 #undef DECLARE_GENERATOR
 
   void VisitFinishRegion(Node* node);
@@ -321,6 +356,8 @@
   void VisitCall(Node* call, BasicBlock* handler = nullptr);
   void VisitDeoptimizeIf(Node* node);
   void VisitDeoptimizeUnless(Node* node);
+  void VisitTrapIf(Node* node, Runtime::FunctionId func_id);
+  void VisitTrapUnless(Node* node, Runtime::FunctionId func_id);
   void VisitTailCall(Node* call);
   void VisitGoto(BasicBlock* target);
   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
@@ -351,6 +388,7 @@
   bool instruction_selection_failed() { return instruction_selection_failed_; }
 
   void MarkPairProjectionsAsWord32(Node* node);
+  bool IsSourcePositionUsed(Node* node);
 
   // ===========================================================================
 
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 3b2311a..1067d20 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -2,11 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/instruction.h"
+
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
-#include "src/compiler/instruction.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/state-values-utils.h"
+#include "src/source-position.h"
 
 namespace v8 {
 namespace internal {
@@ -208,6 +210,15 @@
         case MachineRepresentation::kSimd128:
           os << "|s128";
           break;
+        case MachineRepresentation::kSimd1x4:
+          os << "|s1x4";
+          break;
+        case MachineRepresentation::kSimd1x8:
+          os << "|s1x8";
+          break;
+        case MachineRepresentation::kSimd1x16:
+          os << "|s1x16";
+          break;
         case MachineRepresentation::kTaggedSigned:
           os << "|ts";
           break;
@@ -433,6 +444,8 @@
       return os << "deoptimize";
     case kFlags_set:
       return os << "set";
+    case kFlags_trap:
+      return os << "trap";
   }
   UNREACHABLE();
   return os;
@@ -886,6 +899,9 @@
     case MachineRepresentation::kFloat32:
     case MachineRepresentation::kFloat64:
     case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kTaggedSigned:
     case MachineRepresentation::kTaggedPointer:
     case MachineRepresentation::kTagged:
@@ -924,9 +940,11 @@
 }
 
 int InstructionSequence::AddDeoptimizationEntry(
-    FrameStateDescriptor* descriptor, DeoptimizeReason reason) {
+    FrameStateDescriptor* descriptor, DeoptimizeKind kind,
+    DeoptimizeReason reason) {
   int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
-  deoptimization_entries_.push_back(DeoptimizationEntry(descriptor, reason));
+  deoptimization_entries_.push_back(
+      DeoptimizationEntry(descriptor, kind, reason));
   return deoptimization_id;
 }
 
@@ -985,8 +1003,18 @@
 }
 
 const RegisterConfiguration*
-InstructionSequence::GetRegisterConfigurationForTesting() {
-  return GetRegConfig();
+    InstructionSequence::registerConfigurationForTesting_ = nullptr;
+
+const RegisterConfiguration*
+InstructionSequence::RegisterConfigurationForTesting() {
+  DCHECK(registerConfigurationForTesting_ != nullptr);
+  return registerConfigurationForTesting_;
+}
+
+void InstructionSequence::SetRegisterConfigurationForTesting(
+    const RegisterConfiguration* regConfig) {
+  registerConfigurationForTesting_ = regConfig;
+  GetRegConfig = InstructionSequence::RegisterConfigurationForTesting;
 }
 
 FrameStateDescriptor::FrameStateDescriptor(
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 327c8c1..ee7865d 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -484,6 +484,9 @@
       case MachineRepresentation::kFloat32:
       case MachineRepresentation::kFloat64:
       case MachineRepresentation::kSimd128:
+      case MachineRepresentation::kSimd1x4:
+      case MachineRepresentation::kSimd1x8:
+      case MachineRepresentation::kSimd1x16:
       case MachineRepresentation::kTaggedSigned:
       case MachineRepresentation::kTaggedPointer:
       case MachineRepresentation::kTagged:
@@ -1065,16 +1068,33 @@
   }
 
   float ToFloat32() const {
+    // TODO(ahaas): We should remove this function. If value_ has the bit
+    // representation of a signalling NaN, then returning it as float can cause
+    // the signalling bit to flip, and value_ is returned as a quiet NaN.
     DCHECK_EQ(kFloat32, type());
     return bit_cast<float>(static_cast<int32_t>(value_));
   }
 
+  uint32_t ToFloat32AsInt() const {
+    DCHECK_EQ(kFloat32, type());
+    return bit_cast<uint32_t>(static_cast<int32_t>(value_));
+  }
+
   double ToFloat64() const {
+    // TODO(ahaas): We should remove this function. If value_ has the bit
+    // representation of a signalling NaN, then returning it as float can cause
+    // the signalling bit to flip, and value_ is returned as a quiet NaN.
     if (type() == kInt32) return ToInt32();
     DCHECK_EQ(kFloat64, type());
     return bit_cast<double>(value_);
   }
 
+  uint64_t ToFloat64AsInt() const {
+    if (type() == kInt32) return ToInt32();
+    DCHECK_EQ(kFloat64, type());
+    return bit_cast<uint64_t>(value_);
+  }
+
   ExternalReference ToExternalReference() const {
     DCHECK_EQ(kExternalReference, type());
     return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
@@ -1104,52 +1124,132 @@
 // Forward declarations.
 class FrameStateDescriptor;
 
-
-enum class StateValueKind { kPlain, kNested, kDuplicate };
-
+enum class StateValueKind : uint8_t {
+  kArguments,
+  kPlain,
+  kOptimizedOut,
+  kNested,
+  kDuplicate
+};
 
 class StateValueDescriptor {
  public:
-  explicit StateValueDescriptor(Zone* zone)
+  StateValueDescriptor()
       : kind_(StateValueKind::kPlain),
         type_(MachineType::AnyTagged()),
-        id_(0),
-        fields_(zone) {}
+        id_(0) {}
 
-  static StateValueDescriptor Plain(Zone* zone, MachineType type) {
-    return StateValueDescriptor(StateValueKind::kPlain, zone, type, 0);
+  static StateValueDescriptor Arguments() {
+    return StateValueDescriptor(StateValueKind::kArguments,
+                                MachineType::AnyTagged(), 0);
   }
-  static StateValueDescriptor Recursive(Zone* zone, size_t id) {
-    return StateValueDescriptor(StateValueKind::kNested, zone,
+  static StateValueDescriptor Plain(MachineType type) {
+    return StateValueDescriptor(StateValueKind::kPlain, type, 0);
+  }
+  static StateValueDescriptor OptimizedOut() {
+    return StateValueDescriptor(StateValueKind::kOptimizedOut,
+                                MachineType::AnyTagged(), 0);
+  }
+  static StateValueDescriptor Recursive(size_t id) {
+    return StateValueDescriptor(StateValueKind::kNested,
                                 MachineType::AnyTagged(), id);
   }
-  static StateValueDescriptor Duplicate(Zone* zone, size_t id) {
-    return StateValueDescriptor(StateValueKind::kDuplicate, zone,
+  static StateValueDescriptor Duplicate(size_t id) {
+    return StateValueDescriptor(StateValueKind::kDuplicate,
                                 MachineType::AnyTagged(), id);
   }
 
-  size_t size() { return fields_.size(); }
-  ZoneVector<StateValueDescriptor>& fields() { return fields_; }
-  int IsPlain() { return kind_ == StateValueKind::kPlain; }
-  int IsNested() { return kind_ == StateValueKind::kNested; }
-  int IsDuplicate() { return kind_ == StateValueKind::kDuplicate; }
+  bool IsArguments() const { return kind_ == StateValueKind::kArguments; }
+  bool IsPlain() const { return kind_ == StateValueKind::kPlain; }
+  bool IsOptimizedOut() const { return kind_ == StateValueKind::kOptimizedOut; }
+  bool IsNested() const { return kind_ == StateValueKind::kNested; }
+  bool IsDuplicate() const { return kind_ == StateValueKind::kDuplicate; }
   MachineType type() const { return type_; }
-  MachineType GetOperandType(size_t index) const {
-    return fields_[index].type_;
-  }
   size_t id() const { return id_; }
 
  private:
-  StateValueDescriptor(StateValueKind kind, Zone* zone, MachineType type,
-                       size_t id)
-      : kind_(kind), type_(type), id_(id), fields_(zone) {}
+  StateValueDescriptor(StateValueKind kind, MachineType type, size_t id)
+      : kind_(kind), type_(type), id_(id) {}
 
   StateValueKind kind_;
   MachineType type_;
   size_t id_;
-  ZoneVector<StateValueDescriptor> fields_;
 };
 
+class StateValueList {
+ public:
+  explicit StateValueList(Zone* zone) : fields_(zone), nested_(zone) {}
+
+  size_t size() { return fields_.size(); }
+
+  struct Value {
+    StateValueDescriptor* desc;
+    StateValueList* nested;
+
+    Value(StateValueDescriptor* desc, StateValueList* nested)
+        : desc(desc), nested(nested) {}
+  };
+
+  class iterator {
+   public:
+    // Bare minimum of operators needed for range iteration.
+    bool operator!=(const iterator& other) const {
+      return field_iterator != other.field_iterator;
+    }
+    bool operator==(const iterator& other) const {
+      return field_iterator == other.field_iterator;
+    }
+    iterator& operator++() {
+      if (field_iterator->IsNested()) {
+        nested_iterator++;
+      }
+      ++field_iterator;
+      return *this;
+    }
+    Value operator*() {
+      StateValueDescriptor* desc = &(*field_iterator);
+      StateValueList* nested = desc->IsNested() ? *nested_iterator : nullptr;
+      return Value(desc, nested);
+    }
+
+   private:
+    friend class StateValueList;
+
+    iterator(ZoneVector<StateValueDescriptor>::iterator it,
+             ZoneVector<StateValueList*>::iterator nested)
+        : field_iterator(it), nested_iterator(nested) {}
+
+    ZoneVector<StateValueDescriptor>::iterator field_iterator;
+    ZoneVector<StateValueList*>::iterator nested_iterator;
+  };
+
+  void ReserveSize(size_t size) { fields_.reserve(size); }
+
+  StateValueList* PushRecursiveField(Zone* zone, size_t id) {
+    fields_.push_back(StateValueDescriptor::Recursive(id));
+    StateValueList* nested =
+        new (zone->New(sizeof(StateValueList))) StateValueList(zone);
+    nested_.push_back(nested);
+    return nested;
+  }
+  void PushArguments() { fields_.push_back(StateValueDescriptor::Arguments()); }
+  void PushDuplicate(size_t id) {
+    fields_.push_back(StateValueDescriptor::Duplicate(id));
+  }
+  void PushPlain(MachineType type) {
+    fields_.push_back(StateValueDescriptor::Plain(type));
+  }
+  void PushOptimizedOut() {
+    fields_.push_back(StateValueDescriptor::OptimizedOut());
+  }
+
+  iterator begin() { return iterator(fields_.begin(), nested_.begin()); }
+  iterator end() { return iterator(fields_.end(), nested_.end()); }
+
+ private:
+  ZoneVector<StateValueDescriptor> fields_;
+  ZoneVector<StateValueList*> nested_;
+};
 
 class FrameStateDescriptor : public ZoneObject {
  public:
@@ -1178,10 +1278,7 @@
   size_t GetFrameCount() const;
   size_t GetJSFrameCount() const;
 
-  MachineType GetType(size_t index) const {
-    return values_.GetOperandType(index);
-  }
-  StateValueDescriptor* GetStateValueDescriptor() { return &values_; }
+  StateValueList* GetStateValueDescriptors() { return &values_; }
 
   static const int kImpossibleValue = 0xdead;
 
@@ -1192,7 +1289,7 @@
   size_t parameters_count_;
   size_t locals_count_;
   size_t stack_count_;
-  StateValueDescriptor values_;
+  StateValueList values_;
   MaybeHandle<SharedFunctionInfo> const shared_info_;
   FrameStateDescriptor* outer_state_;
 };
@@ -1202,14 +1299,17 @@
 class DeoptimizationEntry final {
  public:
   DeoptimizationEntry() {}
-  DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeReason reason)
-      : descriptor_(descriptor), reason_(reason) {}
+  DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
+                      DeoptimizeReason reason)
+      : descriptor_(descriptor), kind_(kind), reason_(reason) {}
 
   FrameStateDescriptor* descriptor() const { return descriptor_; }
+  DeoptimizeKind kind() const { return kind_; }
   DeoptimizeReason reason() const { return reason_; }
 
  private:
   FrameStateDescriptor* descriptor_ = nullptr;
+  DeoptimizeKind kind_ = DeoptimizeKind::kEager;
   DeoptimizeReason reason_ = DeoptimizeReason::kNoReason;
 };
 
@@ -1469,7 +1569,7 @@
   }
 
   int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
-                             DeoptimizeReason reason);
+                             DeoptimizeKind kind, DeoptimizeReason reason);
   DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
   int GetDeoptimizationEntryCount() const {
     return static_cast<int>(deoptimization_entries_.size());
@@ -1500,7 +1600,9 @@
   void ValidateDeferredBlockEntryPaths() const;
   void ValidateSSA() const;
 
-  const RegisterConfiguration* GetRegisterConfigurationForTesting();
+  static void SetRegisterConfigurationForTesting(
+      const RegisterConfiguration* regConfig);
+  static void ClearRegisterConfigurationForTesting();
 
  private:
   friend V8_EXPORT_PRIVATE std::ostream& operator<<(
@@ -1508,6 +1610,9 @@
 
   typedef ZoneMap<const Instruction*, SourcePosition> SourcePositionMap;
 
+  static const RegisterConfiguration* RegisterConfigurationForTesting();
+  static const RegisterConfiguration* registerConfigurationForTesting_;
+
   Isolate* isolate_;
   Zone* const zone_;
   InstructionBlocks* const instruction_blocks_;
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index 62523ca..06c9272 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -12,6 +12,7 @@
 #include "src/compiler/node-properties.h"
 
 #include "src/compiler/node.h"
+#include "src/objects-inl.h"
 #include "src/wasm/wasm-module.h"
 #include "src/zone/zone.h"
 
@@ -61,7 +62,8 @@
           // that they are processed after all other nodes.
           PreparePhiReplacement(input);
           stack_.push_front({input, 0});
-        } else if (input->opcode() == IrOpcode::kEffectPhi) {
+        } else if (input->opcode() == IrOpcode::kEffectPhi ||
+                   input->opcode() == IrOpcode::kLoop) {
           stack_.push_front({input, 0});
         } else {
           stack_.push_back({input, 0});
@@ -104,6 +106,9 @@
 
 void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
                                   Node*& index_high) {
+  if (HasReplacementLow(index)) {
+    index = GetReplacementLow(index);
+  }
 #if defined(V8_TARGET_LITTLE_ENDIAN)
   index_low = index;
   index_high = graph()->NewNode(machine()->Int32Add(), index,
@@ -233,9 +238,7 @@
         NodeProperties::ChangeOp(node, store_op);
         ReplaceNode(node, node, high_node);
       } else {
-        if (HasReplacementLow(node->InputAt(2))) {
-          node->ReplaceInput(2, GetReplacementLow(node->InputAt(2)));
-        }
+        DefaultLowering(node, true);
       }
       break;
     }
@@ -824,7 +827,7 @@
   ReplaceNode(node, replacement, nullptr);
 }
 
-bool Int64Lowering::DefaultLowering(Node* node) {
+bool Int64Lowering::DefaultLowering(Node* node, bool low_word_only) {
   bool something_changed = false;
   for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
     Node* input = node->InputAt(i);
@@ -832,7 +835,7 @@
       something_changed = true;
       node->ReplaceInput(i, GetReplacementLow(input));
     }
-    if (HasReplacementHigh(input)) {
+    if (!low_word_only && HasReplacementHigh(input)) {
       something_changed = true;
       node->InsertInput(zone(), i + 1, GetReplacementHigh(input));
     }
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 66a54e9..811c2b2 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -47,7 +47,7 @@
   void PrepareReplacements(Node* node);
   void PushNode(Node* node);
   void LowerNode(Node* node);
-  bool DefaultLowering(Node* node);
+  bool DefaultLowering(Node* node, bool low_word_only = false);
   void LowerComparison(Node* node, const Operator* signed_op,
                        const Operator* unsigned_op);
   void PrepareProjectionReplacements(Node* node);
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 2962e24..24eb5ce 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -4,9 +4,12 @@
 
 #include "src/compiler/js-builtin-reducer.h"
 
+#include "src/base/bits.h"
+#include "src/code-factory.h"
 #include "src/compilation-dependencies.h"
 #include "src/compiler/access-builder.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
@@ -18,17 +21,16 @@
 namespace internal {
 namespace compiler {
 
-
-// Helper class to access JSCallFunction nodes that are potential candidates
+// Helper class to access JSCall nodes that are potential candidates
 // for reduction when they have a BuiltinFunctionId associated with them.
 class JSCallReduction {
  public:
   explicit JSCallReduction(Node* node) : node_(node) {}
 
-  // Determines whether the node is a JSCallFunction operation that targets a
+  // Determines whether the node is a JSCall operation that targets a
   // constant callee being a well-known builtin with a BuiltinFunctionId.
   bool HasBuiltinFunctionId() {
-    if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
+    if (node_->opcode() != IrOpcode::kJSCall) return false;
     HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
     if (!m.HasValue() || !m.Value()->IsJSFunction()) return false;
     Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
@@ -37,7 +39,7 @@
 
   // Retrieves the BuiltinFunctionId as described above.
   BuiltinFunctionId GetBuiltinFunctionId() {
-    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
     HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
     Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
     return function->shared()->builtin_function_id();
@@ -78,13 +80,13 @@
   Node* right() { return GetJSCallInput(1); }
 
   int GetJSCallArity() {
-    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
     // Skip first (i.e. callee) and second (i.e. receiver) operand.
     return node_->op()->ValueInputCount() - 2;
   }
 
   Node* GetJSCallInput(int index) {
-    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
     DCHECK_LT(index, GetJSCallArity());
     // Skip first (i.e. callee) and second (i.e. receiver) operand.
     return NodeProperties::GetValueInput(node_, index + 2);
@@ -107,39 +109,14 @@
 
 namespace {
 
-// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
-// alias analyzer?
-bool IsSame(Node* a, Node* b) {
-  if (a == b) {
-    return true;
-  } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a->InputAt(0), b);
-  } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a, b->InputAt(0));
-  }
-  return false;
-}
-
 MaybeHandle<Map> GetMapWitness(Node* node) {
+  ZoneHandleSet<Map> maps;
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
-  // Check if the {node} is dominated by a CheckMaps with a single map
-  // for the {receiver}, and if so use that map for the lowering below.
-  for (Node* dominator = effect;;) {
-    if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        IsSame(dominator->InputAt(0), receiver)) {
-      if (dominator->op()->ValueInputCount() == 2) {
-        HeapObjectMatcher m(dominator->InputAt(1));
-        if (m.HasValue()) return Handle<Map>::cast(m.Value());
-      }
-      return MaybeHandle<Map>();
-    }
-    if (dominator->op()->EffectInputCount() != 1) {
-      // Didn't find any appropriate CheckMaps node.
-      return MaybeHandle<Map>();
-    }
-    dominator = NodeProperties::GetEffectInput(dominator);
+  if (NodeProperties::InferReceiverMaps(receiver, effect, &maps)) {
+    if (maps.size() == 1) return MaybeHandle<Map>(maps[0]);
   }
+  return MaybeHandle<Map>();
 }
 
 // TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
@@ -235,17 +212,27 @@
   Node* control = NodeProperties::GetControlInput(node);
 
   if (iter_kind == ArrayIteratorKind::kTypedArray) {
-    // For JSTypedArray iterator methods, deopt if the buffer is neutered. This
-    // is potentially a deopt loop, but should be extremely unlikely.
-    DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
-    Node* buffer = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
-        receiver, effect, control);
+    // See if we can skip the neutering check.
+    if (isolate()->IsArrayBufferNeuteringIntact()) {
+      // Add a code dependency so we are deoptimized in case an ArrayBuffer
+      // gets neutered.
+      dependencies()->AssumePropertyCell(
+          factory()->array_buffer_neutering_protector());
+    } else {
+      // For JSTypedArray iterator methods, deopt if the buffer is neutered.
+      // This is potentially a deopt loop, but should be extremely unlikely.
+      DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
+      Node* buffer = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+          receiver, effect, control);
 
-    Node* check = effect = graph()->NewNode(
-        simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
-    check = graph()->NewNode(simplified()->BooleanNot(), check);
-    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+      // Deoptimize if the {buffer} has been neutered.
+      Node* check = effect = graph()->NewNode(
+          simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+      check = graph()->NewNode(simplified()->BooleanNot(), check);
+      effect =
+          graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+    }
   }
 
   int map_index = -1;
@@ -310,6 +297,7 @@
   Node* value = effect = graph()->NewNode(
       simplified()->Allocate(NOT_TENURED),
       jsgraph()->Constant(JSArrayIterator::kSize), effect, control);
+  NodeProperties::SetType(value, Type::OtherObject());
   effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
                             value, jsgraph()->Constant(map), effect, control);
   effect = graph()->NewNode(
@@ -403,12 +391,17 @@
       } else {
         // For value/entry iteration, first step is a mapcheck to ensure
         // inlining is still valid.
+        Node* array_map = etrue1 =
+            graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                             array, etrue1, if_true1);
         Node* orig_map = etrue1 =
             graph()->NewNode(simplified()->LoadField(
                                  AccessBuilder::ForJSArrayIteratorObjectMap()),
                              iterator, etrue1, if_true1);
-        etrue1 = graph()->NewNode(simplified()->CheckMaps(1), array, orig_map,
-                                  etrue1, if_true1);
+        Node* check_map = graph()->NewNode(simplified()->ReferenceEqual(),
+                                           array_map, orig_map);
+        etrue1 = graph()->NewNode(simplified()->CheckIf(), check_map, etrue1,
+                                  if_true1);
       }
 
       if (kind != IterationKind::kKeys) {
@@ -536,11 +529,20 @@
         simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
         array, efalse0, if_false0);
 
-    Node* check1 = efalse0 = graph()->NewNode(
-        simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
-    check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
-    efalse0 =
-        graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+    // See if we can skip the neutering check.
+    if (isolate()->IsArrayBufferNeuteringIntact()) {
+      // Add a code dependency so we are deoptimized in case an ArrayBuffer
+      // gets neutered.
+      dependencies()->AssumePropertyCell(
+          factory()->array_buffer_neutering_protector());
+    } else {
+      // Deoptimize if the array buffer was neutered.
+      Node* check1 = efalse0 = graph()->NewNode(
+          simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
+      check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
+      efalse0 =
+          graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+    }
 
     Node* length = efalse0 = graph()->NewNode(
         simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()), array,
@@ -813,20 +815,42 @@
 
 // ES6 section 22.1.3.18 Array.prototype.push ( )
 Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
-  Handle<Map> receiver_map;
   // We need exactly target, receiver and value parameters.
   if (node->op()->ValueInputCount() != 3) return NoChange();
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   Node* value = NodeProperties::GetValueInput(node, 2);
-  if (GetMapWitness(node).ToHandle(&receiver_map) &&
-      CanInlineArrayResizeOperation(receiver_map)) {
+  ZoneHandleSet<Map> receiver_maps;
+  NodeProperties::InferReceiverMapsResult result =
+      NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+  if (receiver_maps.size() != 1) return NoChange();
+  DCHECK_NE(NodeProperties::kNoReceiverMaps, result);
+
+  // TODO(turbofan): Relax this to deal with multiple {receiver} maps.
+  Handle<Map> receiver_map = receiver_maps[0];
+  if (CanInlineArrayResizeOperation(receiver_map)) {
     // Install code dependencies on the {receiver} prototype maps and the
     // global array protector cell.
     dependencies()->AssumePropertyCell(factory()->array_protector());
     dependencies()->AssumePrototypeMapsStable(receiver_map);
 
+    // If the {receiver_maps} information is not reliable, we need
+    // to check that the {receiver} still has one of these maps.
+    if (result == NodeProperties::kUnreliableReceiverMaps) {
+      if (receiver_map->is_stable()) {
+        dependencies()->AssumeMapStable(receiver_map);
+      } else {
+        // TODO(turbofan): This is a potential - yet unlikely - deoptimization
+        // loop, since we might not learn from this deoptimization in baseline
+        // code. We need a way to learn from deoptimizations in optimized to
+        // address these problems.
+        effect = graph()->NewNode(
+            simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps),
+            receiver, effect, control);
+      }
+    }
+
     // TODO(turbofan): Perform type checks on the {value}. We are not guaranteed
     // to learn from these checks in case they fail, as the witness (i.e. the
     // map check from the LoadIC for a.push) might not be executed in baseline
@@ -890,39 +914,24 @@
                             InstanceType instance_type) {
   for (Node* dominator = effect;;) {
     if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        IsSame(dominator->InputAt(0), receiver)) {
+        NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
+      ZoneHandleSet<Map> const& maps =
+          CheckMapsParametersOf(dominator->op()).maps();
       // Check if all maps have the given {instance_type}.
-      for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
-        Node* const map = NodeProperties::GetValueInput(dominator, i);
-        Type* const map_type = NodeProperties::GetType(map);
-        if (!map_type->IsHeapConstant()) return false;
-        Handle<Map> const map_value =
-            Handle<Map>::cast(map_type->AsHeapConstant()->Value());
-        if (map_value->instance_type() != instance_type) return false;
+      for (size_t i = 0; i < maps.size(); ++i) {
+        if (maps[i]->instance_type() != instance_type) return false;
       }
       return true;
     }
-    switch (dominator->opcode()) {
-      case IrOpcode::kStoreField: {
-        FieldAccess const& access = FieldAccessOf(dominator->op());
-        if (access.base_is_tagged == kTaggedBase &&
-            access.offset == HeapObject::kMapOffset) {
-          return false;
-        }
-        break;
-      }
-      case IrOpcode::kStoreElement:
-      case IrOpcode::kStoreTypedElement:
-        break;
-      default: {
-        DCHECK_EQ(1, dominator->op()->EffectOutputCount());
-        if (dominator->op()->EffectInputCount() != 1 ||
-            !dominator->op()->HasProperty(Operator::kNoWrite)) {
-          // Didn't find any appropriate CheckMaps node.
-          return false;
-        }
-        break;
-      }
+    // The instance type doesn't change for JSReceiver values, so we
+    // don't need to pay attention to potentially side-effecting nodes
+    // here. Strings and internal structures like FixedArray and
+    // FixedDoubleArray are weird here, but we don't use this function then.
+    DCHECK_LE(FIRST_JS_RECEIVER_TYPE, instance_type);
+    DCHECK_EQ(1, dominator->op()->EffectOutputCount());
+    if (dominator->op()->EffectInputCount() != 1) {
+      // Didn't find any appropriate CheckMaps node.
+      return false;
     }
     dominator = NodeProperties::GetEffectInput(dominator);
   }
@@ -930,6 +939,14 @@
 
 }  // namespace
 
+// ES6 section 20.3.3.1 Date.now ( )
+Reduction JSBuiltinReducer::ReduceDateNow(Node* node) {
+  NodeProperties::RemoveValueInputs(node);
+  NodeProperties::ChangeOp(
+      node, javascript()->CallRuntime(Runtime::kDateCurrentTime));
+  return Changed(node);
+}
+
 // ES6 section 20.3.4.10 Date.prototype.getTime ( )
 Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
   Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -945,34 +962,6 @@
   return NoChange();
 }
 
-// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
-Reduction JSBuiltinReducer::ReduceFunctionHasInstance(Node* node) {
-  Node* receiver = NodeProperties::GetValueInput(node, 1);
-  Node* object = (node->op()->ValueInputCount() >= 3)
-                     ? NodeProperties::GetValueInput(node, 2)
-                     : jsgraph()->UndefinedConstant();
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
-  // stack trace doesn't contain the @@hasInstance call; we have the
-  // corresponding bug in the baseline case. Some massaging of the frame
-  // state would be necessary here.
-
-  // Morph this {node} into a JSOrdinaryHasInstance node.
-  node->ReplaceInput(0, receiver);
-  node->ReplaceInput(1, object);
-  node->ReplaceInput(2, context);
-  node->ReplaceInput(3, frame_state);
-  node->ReplaceInput(4, effect);
-  node->ReplaceInput(5, control);
-  node->TrimInputCount(6);
-  NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
-  return Changed(node);
-}
-
 // ES6 section 18.2.2 isFinite ( number )
 Reduction JSBuiltinReducer::ReduceGlobalIsFinite(Node* node) {
   JSCallReduction r(node);
@@ -1485,6 +1474,117 @@
   return NoChange();
 }
 
+// ES6 section #sec-object.create Object.create(proto, properties)
+Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
+  // We need exactly target, receiver and value parameters.
+  int arg_count = node->op()->ValueInputCount();
+  if (arg_count != 3) return NoChange();
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* prototype = NodeProperties::GetValueInput(node, 2);
+  Type* prototype_type = NodeProperties::GetType(prototype);
+  Handle<Map> instance_map;
+  if (!prototype_type->IsHeapConstant()) return NoChange();
+  Handle<HeapObject> prototype_const =
+      prototype_type->AsHeapConstant()->Value();
+  if (!prototype_const->IsNull(isolate()) && !prototype_const->IsJSReceiver()) {
+    return NoChange();
+  }
+  instance_map = Map::GetObjectCreateMap(prototype_const);
+  Node* properties = jsgraph()->EmptyFixedArrayConstant();
+  if (instance_map->is_dictionary_map()) {
+    // Allocated an empty NameDictionary as backing store for the properties.
+    Handle<Map> map(isolate()->heap()->hash_table_map(), isolate());
+    int capacity =
+        NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
+    DCHECK(base::bits::IsPowerOfTwo32(capacity));
+    int length = NameDictionary::EntryToIndex(capacity);
+    int size = NameDictionary::SizeFor(length);
+
+    effect = graph()->NewNode(
+        common()->BeginRegion(RegionObservability::kNotObservable), effect);
+
+    Node* value = effect =
+        graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+                         jsgraph()->Constant(size), effect, control);
+    effect =
+        graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+                         value, jsgraph()->HeapConstant(map), effect, control);
+
+    // Initialize FixedArray fields.
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForFixedArrayLength()), value,
+        jsgraph()->SmiConstant(length), effect, control);
+    // Initialize HashTable fields.
+    effect =
+        graph()->NewNode(simplified()->StoreField(
+                             AccessBuilder::ForHashTableBaseNumberOfElements()),
+                         value, jsgraph()->SmiConstant(0), effect, control);
+    effect = graph()->NewNode(
+        simplified()->StoreField(
+            AccessBuilder::ForHashTableBaseNumberOfDeletedElement()),
+        value, jsgraph()->SmiConstant(0), effect, control);
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForHashTableBaseCapacity()),
+        value, jsgraph()->SmiConstant(capacity), effect, control);
+    // Initialize Dictionary fields.
+    Node* undefined = jsgraph()->UndefinedConstant();
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForDictionaryMaxNumberKey()),
+        value, undefined, effect, control);
+    effect = graph()->NewNode(
+        simplified()->StoreField(
+            AccessBuilder::ForDictionaryNextEnumerationIndex()),
+        value, jsgraph()->SmiConstant(PropertyDetails::kInitialIndex), effect,
+        control);
+    // Initialize hte Properties fields.
+    for (int index = NameDictionary::kNextEnumerationIndexIndex + 1;
+         index < length; index++) {
+      effect = graph()->NewNode(
+          simplified()->StoreField(
+              AccessBuilder::ForFixedArraySlot(index, kNoWriteBarrier)),
+          value, undefined, effect, control);
+    }
+    properties = effect =
+        graph()->NewNode(common()->FinishRegion(), value, effect);
+  }
+
+  int const instance_size = instance_map->instance_size();
+  if (instance_size > kMaxRegularHeapObjectSize) return NoChange();
+  dependencies()->AssumeInitialMapCantChange(instance_map);
+
+  // Emit code to allocate the JSObject instance for the given
+  // {instance_map}.
+  effect = graph()->NewNode(
+      common()->BeginRegion(RegionObservability::kNotObservable), effect);
+  Node* value = effect =
+      graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+                       jsgraph()->Constant(instance_size), effect, control);
+  effect =
+      graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()), value,
+                       jsgraph()->HeapConstant(instance_map), effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+      properties, effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+      jsgraph()->EmptyFixedArrayConstant(), effect, control);
+  // Initialize Object fields.
+  Node* undefined = jsgraph()->UndefinedConstant();
+  for (int offset = JSObject::kHeaderSize; offset < instance_size;
+       offset += kPointerSize) {
+    effect = graph()->NewNode(
+        simplified()->StoreField(
+            AccessBuilder::ForJSObjectOffset(offset, kNoWriteBarrier)),
+        value, undefined, effect, control);
+  }
+  value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+  // replace it
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
 // ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
 Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
   JSCallReduction r(node);
@@ -1509,7 +1609,7 @@
   // the lowering below.
   for (Node* dominator = effect;;) {
     if (dominator->opcode() == IrOpcode::kCheckString &&
-        IsSame(dominator->InputAt(0), receiver)) {
+        NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
       return dominator;
     }
     if (dominator->op()->EffectInputCount() != 1) {
@@ -1531,8 +1631,17 @@
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* control = NodeProperties::GetControlInput(node);
 
-    if (index_type->Is(Type::Unsigned32())) {
+    if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
       if (Node* receiver = GetStringWitness(node)) {
+        if (!index_type->Is(Type::Unsigned32())) {
+          // Map -0 and NaN to 0 (as per ToInteger), and the values in
+          // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+          // be considered out-of-bounds as well, because of the maximal
+          // String length limit in V8.
+          STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+          index = graph()->NewNode(simplified()->NumberToUint32(), index);
+        }
+
         // Determine the {receiver} length.
         Node* receiver_length = effect = graph()->NewNode(
             simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
@@ -1544,16 +1653,10 @@
         Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
                                         check, control);
 
+        // Return the character from the {receiver} as single character string.
         Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-        Node* vtrue;
-        {
-          // Load the character from the {receiver}.
-          vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
-                                   index, if_true);
-
-          // Return it as single character string.
-          vtrue = graph()->NewNode(simplified()->StringFromCharCode(), vtrue);
-        }
+        Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
+                                       index, if_true);
 
         // Return the empty string otherwise.
         Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -1582,8 +1685,17 @@
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* control = NodeProperties::GetControlInput(node);
 
-    if (index_type->Is(Type::Unsigned32())) {
+    if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
       if (Node* receiver = GetStringWitness(node)) {
+        if (!index_type->Is(Type::Unsigned32())) {
+          // Map -0 and NaN to 0 (as per ToInteger), and the values in
+          // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+          // be considered out-of-bounds as well, because of the maximal
+          // String length limit in V8.
+          STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+          index = graph()->NewNode(simplified()->NumberToUint32(), index);
+        }
+
         // Determine the {receiver} length.
         Node* receiver_length = effect = graph()->NewNode(
             simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
@@ -1618,6 +1730,34 @@
   return NoChange();
 }
 
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
+Reduction JSBuiltinReducer::ReduceStringIndexOf(Node* node) {
+  // We need at least target, receiver and search_string parameters.
+  if (node->op()->ValueInputCount() >= 3) {
+    Node* search_string = NodeProperties::GetValueInput(node, 2);
+    Type* search_string_type = NodeProperties::GetType(search_string);
+    Node* position = (node->op()->ValueInputCount() >= 4)
+                         ? NodeProperties::GetValueInput(node, 3)
+                         : jsgraph()->ZeroConstant();
+    Type* position_type = NodeProperties::GetType(position);
+
+    if (search_string_type->Is(Type::String()) &&
+        position_type->Is(Type::SignedSmall())) {
+      if (Node* receiver = GetStringWitness(node)) {
+        RelaxEffectsAndControls(node);
+        node->ReplaceInput(0, receiver);
+        node->ReplaceInput(1, search_string);
+        node->ReplaceInput(2, position);
+        node->TrimInputCount(3);
+        NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
+        return Changed(node);
+      }
+    }
+  }
+  return NoChange();
+}
+
 Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
   if (Node* receiver = GetStringWitness(node)) {
     Node* effect = NodeProperties::GetEffectInput(node);
@@ -1632,6 +1772,7 @@
     Node* value = effect = graph()->NewNode(
         simplified()->Allocate(NOT_TENURED),
         jsgraph()->Constant(JSStringIterator::kSize), effect, control);
+    NodeProperties::SetType(value, Type::OtherObject());
     effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
                               value, map, effect, control);
     effect = graph()->NewNode(
@@ -1805,21 +1946,29 @@
   Node* control = NodeProperties::GetControlInput(node);
   if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
     // Load the {receiver}s field.
-    Node* receiver_value = effect = graph()->NewNode(
-        simplified()->LoadField(access), receiver, effect, control);
+    Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
+                                            receiver, effect, control);
 
-    // Check if the {receiver}s buffer was neutered.
-    Node* receiver_buffer = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
-        receiver, effect, control);
-    Node* check = effect =
-        graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
-                         receiver_buffer, effect, control);
+    // See if we can skip the neutering check.
+    if (isolate()->IsArrayBufferNeuteringIntact()) {
+      // Add a code dependency so we are deoptimized in case an ArrayBuffer
+      // gets neutered.
+      dependencies()->AssumePropertyCell(
+          factory()->array_buffer_neutering_protector());
+    } else {
+      // Check if the {receiver}s buffer was neutered.
+      Node* receiver_buffer = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+          receiver, effect, control);
+      Node* check = effect =
+          graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
+                           receiver_buffer, effect, control);
 
-    // Default to zero if the {receiver}s buffer was neutered.
-    Node* value = graph()->NewNode(
-        common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
-        check, jsgraph()->ZeroConstant(), receiver_value);
+      // Default to zero if the {receiver}s buffer was neutered.
+      value = graph()->NewNode(
+          common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+          check, jsgraph()->ZeroConstant(), value);
+    }
 
     ReplaceWithValue(node, value, effect, control);
     return Replace(value);
@@ -1846,11 +1995,10 @@
       return ReduceArrayPop(node);
     case kArrayPush:
       return ReduceArrayPush(node);
+    case kDateNow:
+      return ReduceDateNow(node);
     case kDateGetTime:
       return ReduceDateGetTime(node);
-    case kFunctionHasInstance:
-      return ReduceFunctionHasInstance(node);
-      break;
     case kGlobalIsFinite:
       reduction = ReduceGlobalIsFinite(node);
       break;
@@ -1971,6 +2119,9 @@
     case kNumberParseInt:
       reduction = ReduceNumberParseInt(node);
       break;
+    case kObjectCreate:
+      reduction = ReduceObjectCreate(node);
+      break;
     case kStringFromCharCode:
       reduction = ReduceStringFromCharCode(node);
       break;
@@ -1978,6 +2129,8 @@
       return ReduceStringCharAt(node);
     case kStringCharCodeAt:
       return ReduceStringCharCodeAt(node);
+    case kStringIndexOf:
+      return ReduceStringIndexOf(node);
     case kStringIterator:
       return ReduceStringIterator(node);
     case kStringIteratorNext:
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index 4af3084..6ff06e3 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -57,8 +57,8 @@
                                          IterationKind kind);
   Reduction ReduceArrayPop(Node* node);
   Reduction ReduceArrayPush(Node* node);
+  Reduction ReduceDateNow(Node* node);
   Reduction ReduceDateGetTime(Node* node);
-  Reduction ReduceFunctionHasInstance(Node* node);
   Reduction ReduceGlobalIsFinite(Node* node);
   Reduction ReduceGlobalIsNaN(Node* node);
   Reduction ReduceMathAbs(Node* node);
@@ -99,9 +99,11 @@
   Reduction ReduceNumberIsNaN(Node* node);
   Reduction ReduceNumberIsSafeInteger(Node* node);
   Reduction ReduceNumberParseInt(Node* node);
+  Reduction ReduceObjectCreate(Node* node);
   Reduction ReduceStringCharAt(Node* node);
   Reduction ReduceStringCharCodeAt(Node* node);
   Reduction ReduceStringFromCharCode(Node* node);
+  Reduction ReduceStringIndexOf(Node* node);
   Reduction ReduceStringIterator(Node* node);
   Reduction ReduceStringIteratorNext(Node* node);
   Reduction ReduceArrayBufferViewAccessor(Node* node,
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index e48fce9..c0deb91 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -4,11 +4,15 @@
 
 #include "src/compiler/js-call-reducer.h"
 
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/compilation-dependencies.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/feedback-vector-inl.h"
 #include "src/objects-inl.h"
-#include "src/type-feedback-vector-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -16,10 +20,14 @@
 
 Reduction JSCallReducer::Reduce(Node* node) {
   switch (node->opcode()) {
-    case IrOpcode::kJSCallConstruct:
-      return ReduceJSCallConstruct(node);
-    case IrOpcode::kJSCallFunction:
-      return ReduceJSCallFunction(node);
+    case IrOpcode::kJSConstruct:
+      return ReduceJSConstruct(node);
+    case IrOpcode::kJSConstructWithSpread:
+      return ReduceJSConstructWithSpread(node);
+    case IrOpcode::kJSCall:
+      return ReduceJSCall(node);
+    case IrOpcode::kJSCallWithSpread:
+      return ReduceJSCallWithSpread(node);
     default:
       break;
   }
@@ -29,9 +37,9 @@
 
 // ES6 section 22.1.1 The Array Constructor
 Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   Node* target = NodeProperties::GetValueInput(node, 0);
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  CallParameters const& p = CallParametersOf(node->op());
 
   // Check if we have an allocation site from the CallIC.
   Handle<AllocationSite> site;
@@ -58,8 +66,8 @@
 
 // ES6 section 20.1.1 The Number Constructor
 Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  CallParameters const& p = CallParametersOf(node->op());
 
   // Turn the {node} into a {JSToNumber} call.
   DCHECK_LE(2u, p.arity());
@@ -73,9 +81,13 @@
 
 // ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
 Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   Node* target = NodeProperties::GetValueInput(node, 0);
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  CallParameters const& p = CallParametersOf(node->op());
+  // Tail calls to Function.prototype.apply are not properly supported
+  // down the pipeline, so we disable this optimization completely for
+  // tail calls (for now).
+  if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
   Handle<JSFunction> apply =
       Handle<JSFunction>::cast(HeapObjectMatcher(target).Value());
   size_t arity = p.arity();
@@ -101,35 +113,65 @@
       if (edge.from() == node) continue;
       return NoChange();
     }
+    // Check if the arguments can be handled in the fast case (i.e. we don't
+    // have aliased sloppy arguments), and compute the {start_index} for
+    // rest parameters.
+    CreateArgumentsType const type = CreateArgumentsTypeOf(arg_array->op());
+    Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
+    FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+    int formal_parameter_count;
+    int start_index = 0;
+    {
+      Handle<SharedFunctionInfo> shared;
+      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+      formal_parameter_count = shared->internal_formal_parameter_count();
+    }
+    if (type == CreateArgumentsType::kMappedArguments) {
+      // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
+      if (formal_parameter_count != 0) return NoChange();
+    } else if (type == CreateArgumentsType::kRestParameter) {
+      start_index = formal_parameter_count;
+    }
+    // Check if are applying to inlined arguments or to the arguments of
+    // the outermost function.
+    Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+    if (outer_state->opcode() != IrOpcode::kFrameState) {
+      // TODO(jarin,bmeurer): Support the NewUnmappedArgumentsElement and
+      // NewRestParameterElements in the EscapeAnalysis and Deoptimizer
+      // instead, then we don't need this hack.
+      // Only works with zero formal parameters because of lacking deoptimizer
+      // support.
+      if (type != CreateArgumentsType::kRestParameter &&
+          formal_parameter_count == 0) {
+        // There are no other uses of the {arg_array} except in StateValues,
+        // so we just replace {arg_array} with a marker for the Deoptimizer
+        // that this refers to the arguments object.
+        Node* arguments = graph()->NewNode(common()->ArgumentsObjectState());
+        ReplaceWithValue(arg_array, arguments);
+      }
+
+      // Reduce {node} to a JSCallForwardVarargs operation, which just
+      // re-pushes the incoming arguments and calls the {target}.
+      node->RemoveInput(0);  // Function.prototype.apply
+      node->RemoveInput(2);  // arguments
+      NodeProperties::ChangeOp(node, javascript()->CallForwardVarargs(
+                                         start_index, p.tail_call_mode()));
+      return Changed(node);
+    }
     // Get to the actual frame state from which to extract the arguments;
     // we can only optimize this in case the {node} was already inlined into
     // some other function (and same for the {arg_array}).
-    CreateArgumentsType type = CreateArgumentsTypeOf(arg_array->op());
-    Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
-    Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
-    if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
     FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
     if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
       // Need to take the parameters from the arguments adaptor.
       frame_state = outer_state;
     }
-    FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-    int start_index = 0;
-    if (type == CreateArgumentsType::kMappedArguments) {
-      // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
-      Handle<SharedFunctionInfo> shared;
-      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
-      if (shared->internal_formal_parameter_count() != 0) return NoChange();
-    } else if (type == CreateArgumentsType::kRestParameter) {
-      Handle<SharedFunctionInfo> shared;
-      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
-      start_index = shared->internal_formal_parameter_count();
-    }
     // Remove the argArray input from the {node}.
     node->RemoveInput(static_cast<int>(--arity));
-    // Add the actual parameters to the {node}, skipping the receiver.
+    // Add the actual parameters to the {node}, skipping the receiver,
+    // starting from {start_index}.
     Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-    for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
+    for (int i = start_index + 1; i < parameters->InputCount(); ++i) {
       node->InsertInput(graph()->zone(), static_cast<int>(arity),
                         parameters->InputAt(i));
       ++arity;
@@ -140,24 +182,25 @@
   } else {
     return NoChange();
   }
-  // Change {node} to the new {JSCallFunction} operator.
+  // Change {node} to the new {JSCall} operator.
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
-                                       convert_mode, p.tail_call_mode()));
+      node,
+      javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
+                         p.tail_call_mode()));
   // Change context of {node} to the Function.prototype.apply context,
   // to ensure any exception is thrown in the correct context.
   NodeProperties::ReplaceContextInput(
       node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
-  // Try to further reduce the JSCallFunction {node}.
-  Reduction const reduction = ReduceJSCallFunction(node);
+  // Try to further reduce the JSCall {node}.
+  Reduction const reduction = ReduceJSCall(node);
   return reduction.Changed() ? reduction : Changed(node);
 }
 
 
 // ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
 Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  CallParameters const& p = CallParametersOf(node->op());
   Handle<JSFunction> call = Handle<JSFunction>::cast(
       HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
   // Change context of {node} to the Function.prototype.call context,
@@ -182,83 +225,276 @@
     --arity;
   }
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
-                                       convert_mode, p.tail_call_mode()));
-  // Try to further reduce the JSCallFunction {node}.
-  Reduction const reduction = ReduceJSCallFunction(node);
+      node,
+      javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
+                         p.tail_call_mode()));
+  // Try to further reduce the JSCall {node}.
+  Reduction const reduction = ReduceJSCall(node);
   return reduction.Changed() ? reduction : Changed(node);
 }
 
+// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] (V)
+Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* object = (node->op()->ValueInputCount() >= 3)
+                     ? NodeProperties::GetValueInput(node, 2)
+                     : jsgraph()->UndefinedConstant();
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
+  // stack trace doesn't contain the @@hasInstance call; we have the
+  // corresponding bug in the baseline case. Some massaging of the frame
+  // state would be necessary here.
+
+  // Morph this {node} into a JSOrdinaryHasInstance node.
+  node->ReplaceInput(0, receiver);
+  node->ReplaceInput(1, object);
+  node->ReplaceInput(2, context);
+  node->ReplaceInput(3, frame_state);
+  node->ReplaceInput(4, effect);
+  node->ReplaceInput(5, control);
+  node->TrimInputCount(6);
+  NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
+  return Changed(node);
+}
+
 namespace {
 
-// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
-// alias analyzer?
-bool IsSame(Node* a, Node* b) {
-  if (a == b) {
-    return true;
-  } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a->InputAt(0), b);
-  } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a, b->InputAt(0));
+bool CanInlineApiCall(Isolate* isolate, Node* node,
+                      Handle<FunctionTemplateInfo> function_template_info) {
+  DCHECK(node->opcode() == IrOpcode::kJSCall);
+  if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
+  if (function_template_info->call_code()->IsUndefined(isolate)) {
+    return false;
   }
-  return false;
-}
-
-// TODO(turbofan): Share with similar functionality in JSInliningHeuristic
-// and JSNativeContextSpecialization, i.e. move to NodeProperties helper?!
-MaybeHandle<Map> InferReceiverMap(Node* node) {
-  Node* receiver = NodeProperties::GetValueInput(node, 1);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  // Check if the {node} is dominated by a CheckMaps with a single map
-  // for the {receiver}, and if so use that map for the lowering below.
-  for (Node* dominator = effect;;) {
-    if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        IsSame(dominator->InputAt(0), receiver)) {
-      if (dominator->op()->ValueInputCount() == 2) {
-        HeapObjectMatcher m(dominator->InputAt(1));
-        if (m.HasValue()) return Handle<Map>::cast(m.Value());
-      }
-      return MaybeHandle<Map>();
-    }
-    if (dominator->op()->EffectInputCount() != 1) {
-      // Didn't find any appropriate CheckMaps node.
-      return MaybeHandle<Map>();
-    }
-    dominator = NodeProperties::GetEffectInput(dominator);
+  CallParameters const& params = CallParametersOf(node->op());
+  // CallApiCallbackStub expects the target in a register, so we count it out,
+  // and counts the receiver as an implicit argument, so we count the receiver
+  // out too.
+  int const argc = static_cast<int>(params.arity()) - 2;
+  if (argc > CallApiCallbackStub::kArgMax || !params.feedback().IsValid()) {
+    return false;
   }
+  HeapObjectMatcher receiver(NodeProperties::GetValueInput(node, 1));
+  if (!receiver.HasValue()) {
+    return false;
+  }
+  return receiver.Value()->IsUndefined(isolate) ||
+         (receiver.Value()->map()->IsJSObjectMap() &&
+          !receiver.Value()->map()->is_access_check_needed());
 }
 
 }  // namespace
 
+JSCallReducer::HolderLookup JSCallReducer::LookupHolder(
+    Handle<JSObject> object,
+    Handle<FunctionTemplateInfo> function_template_info,
+    Handle<JSObject>* holder) {
+  DCHECK(object->map()->IsJSObjectMap());
+  Handle<Map> object_map(object->map());
+  Handle<FunctionTemplateInfo> expected_receiver_type;
+  if (!function_template_info->signature()->IsUndefined(isolate())) {
+    expected_receiver_type =
+        handle(FunctionTemplateInfo::cast(function_template_info->signature()));
+  }
+  if (expected_receiver_type.is_null() ||
+      expected_receiver_type->IsTemplateFor(*object_map)) {
+    *holder = Handle<JSObject>::null();
+    return kHolderIsReceiver;
+  }
+  while (object_map->has_hidden_prototype()) {
+    Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
+    object_map = handle(prototype->map());
+    if (expected_receiver_type->IsTemplateFor(*object_map)) {
+      *holder = prototype;
+      return kHolderFound;
+    }
+  }
+  return kHolderNotFound;
+}
+
 // ES6 section B.2.2.1.1 get Object.prototype.__proto__
 Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
 
   // Try to determine the {receiver} map.
-  Handle<Map> receiver_map;
-  if (InferReceiverMap(node).ToHandle(&receiver_map)) {
-    // Check if we can constant-fold the {receiver} map.
-    if (!receiver_map->IsJSProxyMap() &&
-        !receiver_map->has_hidden_prototype() &&
-        !receiver_map->is_access_check_needed()) {
-      Handle<Object> receiver_prototype(receiver_map->prototype(), isolate());
-      Node* value = jsgraph()->Constant(receiver_prototype);
-      ReplaceWithValue(node, value);
-      return Replace(value);
+  ZoneHandleSet<Map> receiver_maps;
+  NodeProperties::InferReceiverMapsResult result =
+      NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+  if (result == NodeProperties::kReliableReceiverMaps) {
+    Handle<Map> candidate_map(
+        receiver_maps[0]->GetPrototypeChainRootMap(isolate()));
+    Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
+
+    // Check if we can constant-fold the {candidate_prototype}.
+    for (size_t i = 0; i < receiver_maps.size(); ++i) {
+      Handle<Map> const receiver_map(
+          receiver_maps[i]->GetPrototypeChainRootMap(isolate()));
+      if (receiver_map->IsJSProxyMap() ||
+          receiver_map->has_hidden_prototype() ||
+          receiver_map->is_access_check_needed() ||
+          receiver_map->prototype() != *candidate_prototype) {
+        return NoChange();
+      }
     }
+    Node* value = jsgraph()->Constant(candidate_prototype);
+    ReplaceWithValue(node, value);
+    return Replace(value);
   }
 
   return NoChange();
 }
 
-Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+Reduction JSCallReducer::ReduceCallApiFunction(
+    Node* node, Node* target,
+    Handle<FunctionTemplateInfo> function_template_info) {
+  Isolate* isolate = this->isolate();
+  CHECK(!isolate->serializer_enabled());
+  HeapObjectMatcher m(target);
+  DCHECK(m.HasValue() && m.Value()->IsJSFunction());
+  if (!CanInlineApiCall(isolate, node, function_template_info)) {
+    return NoChange();
+  }
+  Handle<CallHandlerInfo> call_handler_info(
+      handle(CallHandlerInfo::cast(function_template_info->call_code())));
+  Handle<Object> data(call_handler_info->data(), isolate);
+
+  Node* receiver_node = NodeProperties::GetValueInput(node, 1);
+  CallParameters const& params = CallParametersOf(node->op());
+
+  Handle<HeapObject> receiver = HeapObjectMatcher(receiver_node).Value();
+  bool const receiver_is_undefined = receiver->IsUndefined(isolate);
+  if (receiver_is_undefined) {
+    receiver = handle(Handle<JSFunction>::cast(m.Value())->global_proxy());
+  } else {
+    DCHECK(receiver->map()->IsJSObjectMap() &&
+           !receiver->map()->is_access_check_needed());
+  }
+
+  Handle<JSObject> holder;
+  HolderLookup lookup = LookupHolder(Handle<JSObject>::cast(receiver),
+                                     function_template_info, &holder);
+  if (lookup == kHolderNotFound) return NoChange();
+  if (receiver_is_undefined) {
+    receiver_node = jsgraph()->HeapConstant(receiver);
+    NodeProperties::ReplaceValueInput(node, receiver_node, 1);
+  }
+  Node* holder_node =
+      lookup == kHolderFound ? jsgraph()->HeapConstant(holder) : receiver_node;
+
+  Zone* zone = graph()->zone();
+  // Same as CanInlineApiCall: exclude the target (which goes in a register) and
+  // the receiver (which is implicitly counted by CallApiCallbackStub) from the
+  // arguments count.
+  int const argc = static_cast<int>(params.arity() - 2);
+  CallApiCallbackStub stub(isolate, argc, data->IsUndefined(isolate), false);
+  CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate, zone, cid,
+      cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
+      CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
+      MachineType::AnyTagged(), 1);
+  ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
+  ExternalReference function_reference(
+      &api_function, ExternalReference::DIRECT_API_CALL, isolate);
+
+  // CallApiCallbackStub's register arguments: code, target, call data, holder,
+  // function address.
+  node->InsertInput(zone, 0, jsgraph()->HeapConstant(stub.GetCode()));
+  node->InsertInput(zone, 2, jsgraph()->Constant(data));
+  node->InsertInput(zone, 3, holder_node);
+  node->InsertInput(zone, 4, jsgraph()->ExternalConstant(function_reference));
+  NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+  return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
+  DCHECK(node->opcode() == IrOpcode::kJSCallWithSpread ||
+         node->opcode() == IrOpcode::kJSConstructWithSpread);
+
+  // Do check to make sure we can actually avoid iteration.
+  if (!isolate()->initial_array_iterator_prototype_map()->is_stable()) {
+    return NoChange();
+  }
+
+  Node* spread = NodeProperties::GetValueInput(node, arity);
+
+  // Check if spread is an arguments object, and {node} is the only value user
+  // of spread (except for value uses in frame states).
+  if (spread->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
+  for (Edge edge : spread->use_edges()) {
+    if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+    if (!NodeProperties::IsValueEdge(edge)) continue;
+    if (edge.from() == node) continue;
+    return NoChange();
+  }
+
+  // Get to the actual frame state from which to extract the arguments;
+  // we can only optimize this in case the {node} was already inlined into
+  // some other function (and same for the {spread}).
+  CreateArgumentsType type = CreateArgumentsTypeOf(spread->op());
+  Node* frame_state = NodeProperties::GetFrameStateInput(spread);
+  Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+  if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
+  FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
+  if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
+    // Need to take the parameters from the arguments adaptor.
+    frame_state = outer_state;
+  }
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+  int start_index = 0;
+  if (type == CreateArgumentsType::kMappedArguments) {
+    // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
+    Handle<SharedFunctionInfo> shared;
+    if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+    if (shared->internal_formal_parameter_count() != 0) return NoChange();
+  } else if (type == CreateArgumentsType::kRestParameter) {
+    Handle<SharedFunctionInfo> shared;
+    if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+    start_index = shared->internal_formal_parameter_count();
+
+    // Only check the array iterator protector when we have a rest object.
+    if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
+    // Add a code dependency on the array iterator protector.
+    dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+  }
+
+  dependencies()->AssumeMapStable(
+      isolate()->initial_array_iterator_prototype_map());
+
+  node->RemoveInput(arity--);
+
+  // Add the actual parameters to the {node}, skipping the receiver.
+  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+  for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
+    node->InsertInput(graph()->zone(), static_cast<int>(++arity),
+                      parameters->InputAt(i));
+  }
+
+  if (node->opcode() == IrOpcode::kJSCallWithSpread) {
+    NodeProperties::ChangeOp(
+        node, javascript()->Call(arity + 1, 7, VectorSlotPair()));
+  } else {
+    NodeProperties::ChangeOp(
+        node, javascript()->Construct(arity + 2, 7, VectorSlotPair()));
+  }
+  return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceJSCall(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  CallParameters const& p = CallParametersOf(node->op());
   Node* target = NodeProperties::GetValueInput(node, 0);
   Node* control = NodeProperties::GetControlInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
 
-  // Try to specialize JSCallFunction {node}s with constant {target}s.
+  // Try to specialize JSCall {node}s with constant {target}s.
   HeapObjectMatcher m(target);
   if (m.HasValue()) {
     if (m.Value()->IsJSFunction()) {
@@ -274,12 +510,17 @@
         return Changed(node);
       }
 
+      // Don't inline cross native context.
+      if (function->native_context() != *native_context()) return NoChange();
+
       // Check for known builtin functions.
       switch (shared->code()->builtin_index()) {
         case Builtins::kFunctionPrototypeApply:
           return ReduceFunctionPrototypeApply(node);
         case Builtins::kFunctionPrototypeCall:
           return ReduceFunctionPrototypeCall(node);
+        case Builtins::kFunctionPrototypeHasInstance:
+          return ReduceFunctionPrototypeHasInstance(node);
         case Builtins::kNumberConstructor:
           return ReduceNumberConstructor(node);
         case Builtins::kObjectPrototypeGetProto:
@@ -292,6 +533,12 @@
       if (*function == function->native_context()->array_function()) {
         return ReduceArrayConstructor(node);
       }
+
+      if (shared->IsApiFunction()) {
+        return ReduceCallApiFunction(
+            node, target,
+            handle(FunctionTemplateInfo::cast(shared->function_data())));
+      }
     } else if (m.Value()->IsJSBoundFunction()) {
       Handle<JSBoundFunction> function =
           Handle<JSBoundFunction>::cast(m.Value());
@@ -300,9 +547,9 @@
       Handle<Object> bound_this(function->bound_this(), isolate());
       Handle<FixedArray> bound_arguments(function->bound_arguments(),
                                          isolate());
-      CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+      CallParameters const& p = CallParametersOf(node->op());
       ConvertReceiverMode const convert_mode =
-          (bound_this->IsNull(isolate()) || bound_this->IsUndefined(isolate()))
+          (bound_this->IsNullOrUndefined(isolate()))
               ? ConvertReceiverMode::kNullOrUndefined
               : ConvertReceiverMode::kNotNullOrUndefined;
       size_t arity = p.arity();
@@ -319,11 +566,12 @@
             jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
         arity++;
       }
-      NodeProperties::ChangeOp(node, javascript()->CallFunction(
-                                         arity, p.frequency(), VectorSlotPair(),
-                                         convert_mode, p.tail_call_mode()));
-      // Try to further reduce the JSCallFunction {node}.
-      Reduction const reduction = ReduceJSCallFunction(node);
+      NodeProperties::ChangeOp(
+          node,
+          javascript()->Call(arity, p.frequency(), VectorSlotPair(),
+                             convert_mode, p.tail_call_mode()));
+      // Try to further reduce the JSCall {node}.
+      Reduction const reduction = ReduceJSCall(node);
       return reduction.Changed() ? reduction : Changed(node);
     }
 
@@ -332,26 +580,36 @@
     return NoChange();
   }
 
-  // Not much we can do if deoptimization support is disabled.
-  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
   // Extract feedback from the {node} using the CallICNexus.
   if (!p.feedback().IsValid()) return NoChange();
   CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
-  if (nexus.IsUninitialized() && (flags() & kBailoutOnUninitialized)) {
-    Node* frame_state = NodeProperties::FindFrameStateBefore(node);
-    Node* deoptimize = graph()->NewNode(
-        common()->Deoptimize(
-            DeoptimizeKind::kSoft,
-            DeoptimizeReason::kInsufficientTypeFeedbackForCall),
-        frame_state, effect, control);
-    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-    NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-    Revisit(graph()->end());
-    node->TrimInputCount(0);
-    NodeProperties::ChangeOp(node, common()->Dead());
+  if (nexus.IsUninitialized()) {
+    // TODO(turbofan): Tail-calling to a CallIC stub is not supported.
+    if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
+
+    // Insert a CallIC here to collect feedback for uninitialized calls.
+    int const arg_count = static_cast<int>(p.arity() - 2);
+    Callable callable = CodeFactory::CallIC(isolate(), p.convert_mode());
+    CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), arg_count + 1,
+        flags);
+    Node* stub_code = jsgraph()->HeapConstant(callable.code());
+    Node* stub_arity = jsgraph()->Constant(arg_count);
+    Node* slot_index =
+        jsgraph()->Constant(FeedbackVector::GetIndex(p.feedback().slot()));
+    Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector());
+    node->InsertInput(graph()->zone(), 0, stub_code);
+    node->InsertInput(graph()->zone(), 2, stub_arity);
+    node->InsertInput(graph()->zone(), 3, slot_index);
+    node->InsertInput(graph()->zone(), 4, feedback_vector);
+    NodeProperties::ChangeOp(node, common()->Call(desc));
     return Changed(node);
   }
+
+  // Not much we can do if deoptimization support is disabled.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
   Handle<Object> feedback(nexus.GetFeedback(), isolate());
   if (feedback->IsAllocationSite()) {
     // Retrieve the Array function from the {node}.
@@ -379,22 +637,30 @@
       effect =
           graph()->NewNode(simplified()->CheckIf(), check, effect, control);
 
-      // Specialize the JSCallFunction node to the {target_function}.
+      // Specialize the JSCall node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
       NodeProperties::ReplaceEffectInput(node, effect);
 
-      // Try to further reduce the JSCallFunction {node}.
-      Reduction const reduction = ReduceJSCallFunction(node);
+      // Try to further reduce the JSCall {node}.
+      Reduction const reduction = ReduceJSCall(node);
       return reduction.Changed() ? reduction : Changed(node);
     }
   }
   return NoChange();
 }
 
+Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCallWithSpread, node->opcode());
+  CallWithSpreadParameters const& p = CallWithSpreadParametersOf(node->op());
+  DCHECK_LE(3u, p.arity());
+  int arity = static_cast<int>(p.arity() - 1);
 
-Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
-  CallConstructParameters const& p = CallConstructParametersOf(node->op());
+  return ReduceSpreadCall(node, arity);
+}
+
+Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
+  ConstructParameters const& p = ConstructParametersOf(node->op());
   DCHECK_LE(2u, p.arity());
   int const arity = static_cast<int>(p.arity() - 2);
   Node* target = NodeProperties::GetValueInput(node, 0);
@@ -402,7 +668,7 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  // Try to specialize JSCallConstruct {node}s with constant {target}s.
+  // Try to specialize JSConstruct {node}s with constant {target}s.
   HeapObjectMatcher m(target);
   if (m.HasValue()) {
     if (m.Value()->IsJSFunction()) {
@@ -412,10 +678,14 @@
       if (!function->IsConstructor()) {
         NodeProperties::ReplaceValueInputs(node, target);
         NodeProperties::ChangeOp(
-            node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
+            node, javascript()->CallRuntime(
+                      Runtime::kThrowConstructedNonConstructable));
         return Changed(node);
       }
 
+      // Don't inline cross native context.
+      if (function->native_context() != *native_context()) return NoChange();
+
       // Check for the ArrayConstructor.
       if (*function == function->native_context()->array_function()) {
         // Check if we have an allocation site.
@@ -487,15 +757,15 @@
       effect =
           graph()->NewNode(simplified()->CheckIf(), check, effect, control);
 
-      // Specialize the JSCallConstruct node to the {target_function}.
+      // Specialize the JSConstruct node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
       NodeProperties::ReplaceEffectInput(node, effect);
       if (target == new_target) {
         NodeProperties::ReplaceValueInput(node, target_function, arity + 1);
       }
 
-      // Try to further reduce the JSCallConstruct {node}.
-      Reduction const reduction = ReduceJSCallConstruct(node);
+      // Try to further reduce the JSConstruct {node}.
+      Reduction const reduction = ReduceJSConstruct(node);
       return reduction.Changed() ? reduction : Changed(node);
     }
   }
@@ -503,10 +773,22 @@
   return NoChange();
 }
 
+Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSConstructWithSpread, node->opcode());
+  ConstructWithSpreadParameters const& p =
+      ConstructWithSpreadParametersOf(node->op());
+  DCHECK_LE(3u, p.arity());
+  int arity = static_cast<int>(p.arity() - 2);
+
+  return ReduceSpreadCall(node, arity);
+}
+
 Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
 
 Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
 
+Factory* JSCallReducer::factory() const { return isolate()->factory(); }
+
 CommonOperatorBuilder* JSCallReducer::common() const {
   return jsgraph()->common();
 }
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
index 81153f9..10b8ee8 100644
--- a/src/compiler/js-call-reducer.h
+++ b/src/compiler/js-call-reducer.h
@@ -10,6 +10,11 @@
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+
 namespace compiler {
 
 // Forward declarations.
@@ -18,48 +23,65 @@
 class JSOperatorBuilder;
 class SimplifiedOperatorBuilder;
 
-// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
+// Performs strength reduction on {JSConstruct} and {JSCall} nodes,
 // which might allow inlining or other optimizations to be performed afterwards.
 class JSCallReducer final : public AdvancedReducer {
  public:
   // Flags that control the mode of operation.
   enum Flag {
     kNoFlags = 0u,
-    kBailoutOnUninitialized = 1u << 0,
-    kDeoptimizationEnabled = 1u << 1
+    kDeoptimizationEnabled = 1u << 0,
   };
   typedef base::Flags<Flag> Flags;
 
   JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
-                Handle<Context> native_context)
+                Handle<Context> native_context,
+                CompilationDependencies* dependencies)
       : AdvancedReducer(editor),
         jsgraph_(jsgraph),
         flags_(flags),
-        native_context_(native_context) {}
+        native_context_(native_context),
+        dependencies_(dependencies) {}
 
   Reduction Reduce(Node* node) final;
 
  private:
   Reduction ReduceArrayConstructor(Node* node);
+  Reduction ReduceCallApiFunction(
+      Node* node, Node* target,
+      Handle<FunctionTemplateInfo> function_template_info);
   Reduction ReduceNumberConstructor(Node* node);
   Reduction ReduceFunctionPrototypeApply(Node* node);
   Reduction ReduceFunctionPrototypeCall(Node* node);
+  Reduction ReduceFunctionPrototypeHasInstance(Node* node);
   Reduction ReduceObjectPrototypeGetProto(Node* node);
-  Reduction ReduceJSCallConstruct(Node* node);
-  Reduction ReduceJSCallFunction(Node* node);
+  Reduction ReduceSpreadCall(Node* node, int arity);
+  Reduction ReduceJSConstruct(Node* node);
+  Reduction ReduceJSConstructWithSpread(Node* node);
+  Reduction ReduceJSCall(Node* node);
+  Reduction ReduceJSCallWithSpread(Node* node);
+
+  enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
+
+  HolderLookup LookupHolder(Handle<JSObject> object,
+                            Handle<FunctionTemplateInfo> function_template_info,
+                            Handle<JSObject>* holder);
 
   Graph* graph() const;
   Flags flags() const { return flags_; }
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
+  Factory* factory() const;
   Handle<Context> native_context() const { return native_context_; }
   CommonOperatorBuilder* common() const;
   JSOperatorBuilder* javascript() const;
   SimplifiedOperatorBuilder* simplified() const;
+  CompilationDependencies* dependencies() const { return dependencies_; }
 
   JSGraph* const jsgraph_;
   Flags const flags_;
   Handle<Context> const native_context_;
+  CompilationDependencies* const dependencies_;
 };
 
 DEFINE_OPERATORS_FOR_FLAGS(JSCallReducer::Flags)
diff --git a/src/compiler/js-context-specialization.cc b/src/compiler/js-context-specialization.cc
index e02fc49..9a2edc1 100644
--- a/src/compiler/js-context-specialization.cc
+++ b/src/compiler/js-context-specialization.cc
@@ -28,50 +28,81 @@
   return NoChange();
 }
 
+Reduction JSContextSpecialization::SimplifyJSLoadContext(Node* node,
+                                                         Node* new_context,
+                                                         size_t new_depth) {
+  DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+  const ContextAccess& access = ContextAccessOf(node->op());
+  DCHECK_LE(new_depth, access.depth());
 
-MaybeHandle<Context> JSContextSpecialization::GetSpecializationContext(
-    Node* node) {
-  DCHECK(node->opcode() == IrOpcode::kJSLoadContext ||
-         node->opcode() == IrOpcode::kJSStoreContext);
-  Node* const object = NodeProperties::GetValueInput(node, 0);
-  return NodeProperties::GetSpecializationContext(object, context());
+  if (new_depth == access.depth() &&
+      new_context == NodeProperties::GetContextInput(node)) {
+    return NoChange();
+  }
+
+  const Operator* op = jsgraph_->javascript()->LoadContext(
+      new_depth, access.index(), access.immutable());
+  NodeProperties::ReplaceContextInput(node, new_context);
+  NodeProperties::ChangeOp(node, op);
+  return Changed(node);
 }
 
+Reduction JSContextSpecialization::SimplifyJSStoreContext(Node* node,
+                                                          Node* new_context,
+                                                          size_t new_depth) {
+  DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
+  const ContextAccess& access = ContextAccessOf(node->op());
+  DCHECK_LE(new_depth, access.depth());
+
+  if (new_depth == access.depth() &&
+      new_context == NodeProperties::GetContextInput(node)) {
+    return NoChange();
+  }
+
+  const Operator* op =
+      jsgraph_->javascript()->StoreContext(new_depth, access.index());
+  NodeProperties::ReplaceContextInput(node, new_context);
+  NodeProperties::ChangeOp(node, op);
+  return Changed(node);
+}
 
 Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
 
-  // Get the specialization context from the node.
-  Handle<Context> context;
-  if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
-
-  // Find the right parent context.
   const ContextAccess& access = ContextAccessOf(node->op());
-  for (size_t i = access.depth(); i > 0; --i) {
-    context = handle(context->previous(), isolate());
+  size_t depth = access.depth();
+
+  // First walk up the context chain in the graph as far as possible.
+  Node* outer = NodeProperties::GetOuterContext(node, &depth);
+
+  Handle<Context> concrete;
+  if (!NodeProperties::GetSpecializationContext(outer, context())
+           .ToHandle(&concrete)) {
+    // We do not have a concrete context object, so we can only partially reduce
+    // the load by folding-in the outer context node.
+    return SimplifyJSLoadContext(node, outer, depth);
   }
 
-  // If the access itself is mutable, only fold-in the parent.
-  if (!access.immutable()) {
-    // The access does not have to look up a parent, nothing to fold.
-    if (access.depth() == 0) {
-      return NoChange();
-    }
-    const Operator* op = jsgraph_->javascript()->LoadContext(
-        0, access.index(), access.immutable());
-    node->ReplaceInput(0, jsgraph_->Constant(context));
-    NodeProperties::ChangeOp(node, op);
-    return Changed(node);
+  // Now walk up the concrete context chain for the remaining depth.
+  for (; depth > 0; --depth) {
+    concrete = handle(concrete->previous(), isolate());
   }
-  Handle<Object> value =
-      handle(context->get(static_cast<int>(access.index())), isolate());
+
+  if (!access.immutable()) {
+    // We found the requested context object but since the context slot is
+    // mutable we can only partially reduce the load.
+    return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
+  }
 
   // Even though the context slot is immutable, the context might have escaped
   // before the function to which it belongs has initialized the slot.
-  // We must be conservative and check if the value in the slot is currently the
-  // hole or undefined. If it is neither of these, then it must be initialized.
+  // We must be conservative and check if the value in the slot is currently
+  // the hole or undefined. Only if it is neither of these, can we be sure that
+  // it won't change anymore.
+  Handle<Object> value(concrete->get(static_cast<int>(access.index())),
+                       isolate());
   if (value->IsUndefined(isolate()) || value->IsTheHole(isolate())) {
-    return NoChange();
+    return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
   }
 
   // Success. The context load can be replaced with the constant.
@@ -86,24 +117,27 @@
 Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
 
-  // Get the specialization context from the node.
-  Handle<Context> context;
-  if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
-
-  // The access does not have to look up a parent, nothing to fold.
   const ContextAccess& access = ContextAccessOf(node->op());
-  if (access.depth() == 0) {
-    return NoChange();
+  size_t depth = access.depth();
+
+  // First walk up the context chain in the graph until we reduce the depth to 0
+  // or hit a node that does not have a CreateXYZContext operator.
+  Node* outer = NodeProperties::GetOuterContext(node, &depth);
+
+  Handle<Context> concrete;
+  if (!NodeProperties::GetSpecializationContext(outer, context())
+           .ToHandle(&concrete)) {
+    // We do not have a concrete context object, so we can only partially reduce
+    // the load by folding-in the outer context node.
+    return SimplifyJSStoreContext(node, outer, depth);
   }
 
-  // Find the right parent context.
-  for (size_t i = access.depth(); i > 0; --i) {
-    context = handle(context->previous(), isolate());
+  // Now walk up the concrete context chain for the remaining depth.
+  for (; depth > 0; --depth) {
+    concrete = handle(concrete->previous(), isolate());
   }
 
-  node->ReplaceInput(0, jsgraph_->Constant(context));
-  NodeProperties::ChangeOp(node, javascript()->StoreContext(0, access.index()));
-  return Changed(node);
+  return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
 }
 
 
diff --git a/src/compiler/js-context-specialization.h b/src/compiler/js-context-specialization.h
index ef784fc..99172af 100644
--- a/src/compiler/js-context-specialization.h
+++ b/src/compiler/js-context-specialization.h
@@ -30,8 +30,10 @@
   Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSStoreContext(Node* node);
 
-  // Returns the {Context} to specialize {node} to (if any).
-  MaybeHandle<Context> GetSpecializationContext(Node* node);
+  Reduction SimplifyJSStoreContext(Node* node, Node* new_context,
+                                   size_t new_depth);
+  Reduction SimplifyJSLoadContext(Node* node, Node* new_context,
+                                  size_t new_depth);
 
   Isolate* isolate() const;
   JSOperatorBuilder* javascript() const;
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index c54b76b..f3ceb2b 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -12,11 +12,12 @@
 #include "src/compiler/js-graph.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
-#include "src/compiler/node.h"
 #include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/compiler/state-values-utils.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -38,6 +39,7 @@
   // Primitive allocation of static size.
   void Allocate(int size, PretenureFlag pretenure = NOT_TENURED,
                 Type* type = Type::Any()) {
+    DCHECK_LE(size, kMaxRegularHeapObjectSize);
     effect_ = graph()->NewNode(
         common()->BeginRegion(RegionObservability::kNotObservable), effect_);
     allocation_ =
@@ -161,7 +163,9 @@
           }
         }
       }
-    } else if (!boilerplate->HasFastDoubleElements()) {
+    } else if (boilerplate->HasFastDoubleElements()) {
+      if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+    } else {
       return false;
     }
   }
@@ -176,7 +180,8 @@
   int limit = boilerplate->map()->NumberOfOwnDescriptors();
   for (int i = 0; i < limit; i++) {
     PropertyDetails details = descriptors->GetDetails(i);
-    if (details.type() != DATA) continue;
+    if (details.location() != kField) continue;
+    DCHECK_EQ(kData, details.kind());
     if ((*max_properties)-- == 0) return false;
     FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
     if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
@@ -206,8 +211,6 @@
       return ReduceJSCreateArguments(node);
     case IrOpcode::kJSCreateArray:
       return ReduceJSCreateArray(node);
-    case IrOpcode::kJSCreateClosure:
-      return ReduceJSCreateClosure(node);
     case IrOpcode::kJSCreateIterResultObject:
       return ReduceJSCreateIterResultObject(node);
     case IrOpcode::kJSCreateKeyValueArray:
@@ -236,6 +239,7 @@
   Node* const new_target = NodeProperties::GetValueInput(node, 1);
   Type* const new_target_type = NodeProperties::GetType(new_target);
   Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
   // Extract constructor and original constructor function.
   if (target_type->IsHeapConstant() && new_target_type->IsHeapConstant() &&
       new_target_type->AsHeapConstant()->Value()->IsJSFunction()) {
@@ -263,7 +267,7 @@
 
       // Emit code to allocate the JSObject instance for the
       // {original_constructor}.
-      AllocationBuilder a(jsgraph(), effect, graph()->start());
+      AllocationBuilder a(jsgraph(), effect, control);
       a.Allocate(instance_size);
       a.Store(AccessBuilder::ForMap(), initial_map);
       a.Store(AccessBuilder::ForJSObjectProperties(),
@@ -274,6 +278,7 @@
         a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
                 jsgraph()->UndefinedConstant());
       }
+      RelaxControls(node);
       a.FinishAndChange(node);
       return Changed(node);
     }
@@ -294,46 +299,130 @@
   if (outer_state->opcode() != IrOpcode::kFrameState) {
     switch (type) {
       case CreateArgumentsType::kMappedArguments: {
-        // TODO(mstarzinger): Duplicate parameters are not handled yet.
+        // TODO(bmeurer): Make deoptimization mandatory for the various
+        // arguments objects, so that we always have a shared_info here.
         Handle<SharedFunctionInfo> shared_info;
-        if (!state_info.shared_info().ToHandle(&shared_info) ||
-            shared_info->has_duplicate_parameters()) {
-          return NoChange();
+        if (state_info.shared_info().ToHandle(&shared_info)) {
+          // TODO(mstarzinger): Duplicate parameters are not handled yet.
+          if (shared_info->has_duplicate_parameters()) return NoChange();
+          // If there is no aliasing, the arguments object elements are not
+          // special in any way, we can just return an unmapped backing store.
+          if (shared_info->internal_formal_parameter_count() == 0) {
+            Node* const callee = NodeProperties::GetValueInput(node, 0);
+            Node* effect = NodeProperties::GetEffectInput(node);
+            // Allocate the elements backing store.
+            Node* const elements = effect = graph()->NewNode(
+                simplified()->NewUnmappedArgumentsElements(0), effect);
+            Node* const length = effect = graph()->NewNode(
+                simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+                elements, effect, control);
+            // Load the arguments object map.
+            Node* const arguments_map = jsgraph()->HeapConstant(
+                handle(native_context()->sloppy_arguments_map(), isolate()));
+            // Actually allocate and initialize the arguments object.
+            AllocationBuilder a(jsgraph(), effect, control);
+            Node* properties = jsgraph()->EmptyFixedArrayConstant();
+            STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+            a.Allocate(JSSloppyArgumentsObject::kSize);
+            a.Store(AccessBuilder::ForMap(), arguments_map);
+            a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+            a.Store(AccessBuilder::ForJSObjectElements(), elements);
+            a.Store(AccessBuilder::ForArgumentsLength(), length);
+            a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+            RelaxControls(node);
+            a.FinishAndChange(node);
+          } else {
+            Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+            Operator::Properties properties = node->op()->properties();
+            CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+                isolate(), graph()->zone(), callable.descriptor(), 0,
+                CallDescriptor::kNoFlags, properties);
+            const Operator* new_op = common()->Call(desc);
+            Node* stub_code = jsgraph()->HeapConstant(callable.code());
+            node->InsertInput(graph()->zone(), 0, stub_code);
+            node->RemoveInput(3);  // Remove the frame state.
+            NodeProperties::ChangeOp(node, new_op);
+          }
+          return Changed(node);
         }
-        Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
-        Operator::Properties properties = node->op()->properties();
-        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-            isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNoFlags, properties);
-        const Operator* new_op = common()->Call(desc);
-        Node* stub_code = jsgraph()->HeapConstant(callable.code());
-        node->InsertInput(graph()->zone(), 0, stub_code);
-        node->RemoveInput(3);  // Remove the frame state.
-        NodeProperties::ChangeOp(node, new_op);
-        return Changed(node);
+        return NoChange();
       }
       case CreateArgumentsType::kUnmappedArguments: {
-        Callable callable = CodeFactory::FastNewStrictArguments(isolate());
-        Operator::Properties properties = node->op()->properties();
-        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-            isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNeedsFrameState, properties);
-        const Operator* new_op = common()->Call(desc);
-        Node* stub_code = jsgraph()->HeapConstant(callable.code());
-        node->InsertInput(graph()->zone(), 0, stub_code);
-        NodeProperties::ChangeOp(node, new_op);
+        Handle<SharedFunctionInfo> shared_info;
+        if (state_info.shared_info().ToHandle(&shared_info)) {
+          Node* effect = NodeProperties::GetEffectInput(node);
+          // Allocate the elements backing store.
+          Node* const elements = effect = graph()->NewNode(
+              simplified()->NewUnmappedArgumentsElements(
+                  shared_info->internal_formal_parameter_count()),
+              effect);
+          Node* const length = effect = graph()->NewNode(
+              simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+              elements, effect, control);
+          // Load the arguments object map.
+          Node* const arguments_map = jsgraph()->HeapConstant(
+              handle(native_context()->strict_arguments_map(), isolate()));
+          // Actually allocate and initialize the arguments object.
+          AllocationBuilder a(jsgraph(), effect, control);
+          Node* properties = jsgraph()->EmptyFixedArrayConstant();
+          STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+          a.Allocate(JSStrictArgumentsObject::kSize);
+          a.Store(AccessBuilder::ForMap(), arguments_map);
+          a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+          a.Store(AccessBuilder::ForJSObjectElements(), elements);
+          a.Store(AccessBuilder::ForArgumentsLength(), length);
+          RelaxControls(node);
+          a.FinishAndChange(node);
+        } else {
+          Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+          Operator::Properties properties = node->op()->properties();
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              isolate(), graph()->zone(), callable.descriptor(), 0,
+              CallDescriptor::kNeedsFrameState, properties);
+          const Operator* new_op = common()->Call(desc);
+          Node* stub_code = jsgraph()->HeapConstant(callable.code());
+          node->InsertInput(graph()->zone(), 0, stub_code);
+          NodeProperties::ChangeOp(node, new_op);
+        }
         return Changed(node);
       }
       case CreateArgumentsType::kRestParameter: {
-        Callable callable = CodeFactory::FastNewRestParameter(isolate());
-        Operator::Properties properties = node->op()->properties();
-        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-            isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNeedsFrameState, properties);
-        const Operator* new_op = common()->Call(desc);
-        Node* stub_code = jsgraph()->HeapConstant(callable.code());
-        node->InsertInput(graph()->zone(), 0, stub_code);
-        NodeProperties::ChangeOp(node, new_op);
+        Handle<SharedFunctionInfo> shared_info;
+        if (state_info.shared_info().ToHandle(&shared_info)) {
+          Node* effect = NodeProperties::GetEffectInput(node);
+          // Allocate the elements backing store.
+          Node* const elements = effect = graph()->NewNode(
+              simplified()->NewRestParameterElements(
+                  shared_info->internal_formal_parameter_count()),
+              effect);
+          Node* const length = effect = graph()->NewNode(
+              simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+              elements, effect, control);
+          // Load the JSArray object map.
+          Node* const jsarray_map = jsgraph()->HeapConstant(handle(
+              native_context()->js_array_fast_elements_map_index(), isolate()));
+          // Actually allocate and initialize the jsarray.
+          AllocationBuilder a(jsgraph(), effect, control);
+          Node* properties = jsgraph()->EmptyFixedArrayConstant();
+          STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+          a.Allocate(JSArray::kSize);
+          a.Store(AccessBuilder::ForMap(), jsarray_map);
+          a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+          a.Store(AccessBuilder::ForJSObjectElements(), elements);
+          a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), length);
+          RelaxControls(node);
+          a.FinishAndChange(node);
+        } else {
+          Callable callable = CodeFactory::FastNewRestParameter(isolate());
+          Operator::Properties properties = node->op()->properties();
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              isolate(), graph()->zone(), callable.descriptor(), 0,
+              CallDescriptor::kNeedsFrameState, properties);
+          const Operator* new_op = common()->Call(desc);
+          Node* stub_code = jsgraph()->HeapConstant(callable.code());
+          node->InsertInput(graph()->zone(), 0, stub_code);
+          NodeProperties::ChangeOp(node, new_op);
+        }
         return Changed(node);
       }
     }
@@ -662,43 +751,6 @@
   return ReduceNewArrayToStubCall(node, site);
 }
 
-Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
-  CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
-  Handle<SharedFunctionInfo> shared = p.shared_info();
-
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  Node* context = NodeProperties::GetContextInput(node);
-  int const function_map_index =
-      Context::FunctionMapIndex(shared->language_mode(), shared->kind());
-  Node* function_map = jsgraph()->HeapConstant(
-      handle(Map::cast(native_context()->get(function_map_index)), isolate()));
-  // Note that it is only safe to embed the raw entry point of the compile
-  // lazy stub into the code, because that stub is immortal and immovable.
-  Node* compile_entry = jsgraph()->PointerConstant(
-      jsgraph()->isolate()->builtins()->CompileLazy()->entry());
-  Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
-  Node* empty_literals_array = jsgraph()->EmptyLiteralsArrayConstant();
-  Node* the_hole = jsgraph()->TheHoleConstant();
-  Node* undefined = jsgraph()->UndefinedConstant();
-  AllocationBuilder a(jsgraph(), effect, control);
-  STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
-  a.Allocate(JSFunction::kSize, p.pretenure());
-  a.Store(AccessBuilder::ForMap(), function_map);
-  a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
-  a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
-  a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_literals_array);
-  a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
-  a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
-  a.Store(AccessBuilder::ForJSFunctionContext(), context);
-  a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
-  a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
-  RelaxControls(node);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
 Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
   Node* value = NodeProperties::GetValueInput(node, 0);
@@ -760,9 +812,10 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  Handle<LiteralsArray> literals_array;
-  if (GetSpecializationLiterals(node).ToHandle(&literals_array)) {
-    Handle<Object> literal(literals_array->literal(p.index()), isolate());
+  Handle<FeedbackVector> feedback_vector;
+  if (GetSpecializationFeedbackVector(node).ToHandle(&feedback_vector)) {
+    FeedbackSlot slot(FeedbackVector::ToSlot(p.index()));
+    Handle<Object> literal(feedback_vector->Get(slot), isolate());
     if (literal->IsAllocationSite()) {
       Handle<AllocationSite> site = Handle<AllocationSite>::cast(literal);
       Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()),
@@ -785,7 +838,10 @@
 
 Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
-  int slot_count = OpParameter<int>(node->op());
+  const CreateFunctionContextParameters& parameters =
+      CreateFunctionContextParametersOf(node->op());
+  int slot_count = parameters.slot_count();
+  ScopeType scope_type = parameters.scope_type();
   Node* const closure = NodeProperties::GetValueInput(node, 0);
 
   // Use inline allocation for function contexts up to a size limit.
@@ -798,7 +854,18 @@
     AllocationBuilder a(jsgraph(), effect, control);
     STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
     int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
-    a.AllocateArray(context_length, factory()->function_context_map());
+    Handle<Map> map;
+    switch (scope_type) {
+      case EVAL_SCOPE:
+        map = factory()->eval_context_map();
+        break;
+      case FUNCTION_SCOPE:
+        map = factory()->function_context_map();
+        break;
+      default:
+        UNREACHABLE();
+    }
+    a.AllocateArray(context_length, map);
     a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
     a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
     a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -929,6 +996,7 @@
   AllocationBuilder a(jsgraph(), effect, control);
   a.AllocateArray(argument_count, factory()->fixed_array_map());
   for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+    DCHECK_NOT_NULL((*parameters_it).node);
     a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
   }
   return a.Finish();
@@ -958,6 +1026,7 @@
   AllocationBuilder a(jsgraph(), effect, control);
   a.AllocateArray(num_elements, factory()->fixed_array_map());
   for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+    DCHECK_NOT_NULL((*parameters_it).node);
     a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
   }
   return a.Finish();
@@ -987,18 +1056,19 @@
   // Prepare an iterator over argument values recorded in the frame state.
   Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
   StateValuesAccess parameters_access(parameters);
-  auto paratemers_it = ++parameters_access.begin();
+  auto parameters_it = ++parameters_access.begin();
 
   // The unmapped argument values recorded in the frame state are stored yet
   // another indirection away and then linked into the parameter map below,
   // whereas mapped argument values are replaced with a hole instead.
   AllocationBuilder aa(jsgraph(), effect, control);
   aa.AllocateArray(argument_count, factory()->fixed_array_map());
-  for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+  for (int i = 0; i < mapped_count; ++i, ++parameters_it) {
     aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
   }
-  for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
-    aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+  for (int i = mapped_count; i < argument_count; ++i, ++parameters_it) {
+    DCHECK_NOT_NULL((*parameters_it).node);
+    aa.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
   }
   Node* arguments = aa.Finish();
 
@@ -1081,13 +1151,15 @@
   for (int i = 0; i < boilerplate_nof; ++i) {
     PropertyDetails const property_details =
         boilerplate_map->instance_descriptors()->GetDetails(i);
-    if (property_details.type() != DATA) continue;
+    if (property_details.location() != kField) continue;
+    DCHECK_EQ(kData, property_details.kind());
     Handle<Name> property_name(
         boilerplate_map->instance_descriptors()->GetKey(i), isolate());
     FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
-    FieldAccess access = {
-        kTaggedBase, index.offset(),           property_name,
-        Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+    FieldAccess access = {kTaggedBase,      index.offset(),
+                          property_name,    MaybeHandle<Map>(),
+                          Type::Any(),      MachineType::AnyTagged(),
+                          kFullWriteBarrier};
     Node* value;
     if (boilerplate->IsUnboxedDoubleField(index)) {
       access.machine_type = MachineType::Float64();
@@ -1104,23 +1176,15 @@
                                              boilerplate_object, site_context);
         site_context->ExitScope(current_site, boilerplate_object);
       } else if (property_details.representation().IsDouble()) {
+        double number = Handle<HeapNumber>::cast(boilerplate_value)->value();
         // Allocate a mutable HeapNumber box and store the value into it.
-        effect = graph()->NewNode(
-            common()->BeginRegion(RegionObservability::kNotObservable), effect);
-        value = effect = graph()->NewNode(
-            simplified()->Allocate(pretenure),
-            jsgraph()->Constant(HeapNumber::kSize), effect, control);
-        effect = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForMap()), value,
-            jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
-            effect, control);
-        effect = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
-            value, jsgraph()->Constant(
-                       Handle<HeapNumber>::cast(boilerplate_value)->value()),
-            effect, control);
-        value = effect =
-            graph()->NewNode(common()->FinishRegion(), value, effect);
+        AllocationBuilder builder(jsgraph(), effect, control);
+        builder.Allocate(HeapNumber::kSize, pretenure);
+        builder.Store(AccessBuilder::ForMap(),
+                      factory()->mutable_heap_number_map());
+        builder.Store(AccessBuilder::ForHeapNumberValue(),
+                      jsgraph()->Constant(number));
+        value = effect = builder.Finish();
       } else if (property_details.representation().IsSmi()) {
         // Ensure that value is stored as smi.
         value = boilerplate_value->IsUninitialized(isolate())
@@ -1156,7 +1220,7 @@
         AccessBuilder::ForJSArrayLength(boilerplate_array->GetElementsKind()),
         handle(boilerplate_array->length(), isolate()));
   }
-  for (auto const inobject_field : inobject_fields) {
+  for (auto const& inobject_field : inobject_fields) {
     builder.Store(inobject_field.first, inobject_field.second);
   }
   return builder.Finish();
@@ -1242,13 +1306,13 @@
   return builder.Finish();
 }
 
-MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
+MaybeHandle<FeedbackVector> JSCreateLowering::GetSpecializationFeedbackVector(
     Node* node) {
   Node* const closure = NodeProperties::GetValueInput(node, 0);
   switch (closure->opcode()) {
     case IrOpcode::kHeapConstant: {
       Handle<HeapObject> object = OpParameter<Handle<HeapObject>>(closure);
-      return handle(Handle<JSFunction>::cast(object)->literals());
+      return handle(Handle<JSFunction>::cast(object)->feedback_vector());
     }
     case IrOpcode::kParameter: {
       int const index = ParameterIndexOf(closure->op());
@@ -1256,14 +1320,14 @@
       // {Parameter} indices start at -1, so value outputs of {Start} look like
       // this: closure, receiver, param0, ..., paramN, context.
       if (index == -1) {
-        return literals_array_;
+        return feedback_vector_;
       }
       break;
     }
     default:
       break;
   }
-  return MaybeHandle<LiteralsArray>();
+  return MaybeHandle<FeedbackVector>();
 }
 
 Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
index b5390f1..eea75d3 100644
--- a/src/compiler/js-create-lowering.h
+++ b/src/compiler/js-create-lowering.h
@@ -33,12 +33,13 @@
     : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
-                   JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
+                   JSGraph* jsgraph,
+                   MaybeHandle<FeedbackVector> feedback_vector,
                    Handle<Context> native_context, Zone* zone)
       : AdvancedReducer(editor),
         dependencies_(dependencies),
         jsgraph_(jsgraph),
-        literals_array_(literals_array),
+        feedback_vector_(feedback_vector),
         native_context_(native_context),
         zone_(zone) {}
   ~JSCreateLowering() final {}
@@ -49,7 +50,6 @@
   Reduction ReduceJSCreate(Node* node);
   Reduction ReduceJSCreateArguments(Node* node);
   Reduction ReduceJSCreateArray(Node* node);
-  Reduction ReduceJSCreateClosure(Node* node);
   Reduction ReduceJSCreateIterResultObject(Node* node);
   Reduction ReduceJSCreateKeyValueArray(Node* node);
   Reduction ReduceJSCreateLiteral(Node* node);
@@ -79,8 +79,8 @@
 
   Reduction ReduceNewArrayToStubCall(Node* node, Handle<AllocationSite> site);
 
-  // Infers the LiteralsArray to use for a given {node}.
-  MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
+  // Infers the FeedbackVector to use for a given {node}.
+  MaybeHandle<FeedbackVector> GetSpecializationFeedbackVector(Node* node);
 
   Factory* factory() const;
   Graph* graph() const;
@@ -96,7 +96,7 @@
 
   CompilationDependencies* const dependencies_;
   JSGraph* const jsgraph_;
-  MaybeHandle<LiteralsArray> const literals_array_;
+  MaybeHandle<FeedbackVector> const feedback_vector_;
   Handle<Context> const native_context_;
   Zone* const zone_;
 };
diff --git a/src/compiler/js-frame-specialization.cc b/src/compiler/js-frame-specialization.cc
index 55ec1bf..73e1b7d 100644
--- a/src/compiler/js-frame-specialization.cc
+++ b/src/compiler/js-frame-specialization.cc
@@ -27,6 +27,9 @@
 }
 
 Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
+  // JSFrameSpecialization should never run on interpreted frames, since the
+  // code below assumes standard stack frame layouts.
+  DCHECK(!frame()->is_interpreted());
   DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
   Handle<Object> value;
   int index = OsrValueIndexOf(node->op());
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 250a9c2..79a3377 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/js-generic-lowering.h"
 
 #include "src/ast/ast.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/compiler/common-operator.h"
@@ -13,6 +14,7 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -87,11 +89,12 @@
 
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags,
-                                            Operator::Properties properties) {
+                                            Operator::Properties properties,
+                                            int result_size) {
   const CallInterfaceDescriptor& descriptor = callable.descriptor();
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       isolate(), zone(), descriptor, descriptor.GetStackParameterCount(), flags,
-      properties);
+      properties, MachineType::AnyTagged(), result_size);
   Node* stub_code = jsgraph()->HeapConstant(callable.code());
   node->InsertInput(zone(), 0, stub_code);
   NodeProperties::ChangeOp(node, common()->Call(desc));
@@ -142,6 +145,15 @@
                       Operator::kEliminatable);
 }
 
+void JSGenericLowering::LowerJSClassOf(Node* node) {
+  // The %_ClassOf intrinsic doesn't need the current context.
+  NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
+  Callable callable = CodeFactory::ClassOf(isolate());
+  node->AppendInput(zone(), graph()->start());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
+                      Operator::kEliminatable);
+}
+
 void JSGenericLowering::LowerJSTypeOf(Node* node) {
   // The typeof operator doesn't need the current context.
   NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
@@ -153,75 +165,37 @@
 
 
 void JSGenericLowering::LowerJSLoadProperty(Node* node) {
-  Node* closure = NodeProperties::GetValueInput(node, 2);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   const PropertyAccess& p = PropertyAccessOf(node->op());
   Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(3, vector);
-  node->ReplaceInput(6, effect);
+  node->InsertInput(zone(), 3, vector);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSLoadNamed(Node* node) {
-  Node* closure = NodeProperties::GetValueInput(node, 1);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(3, vector);
-  node->ReplaceInput(6, effect);
+  node->InsertInput(zone(), 3, vector);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
-  Node* closure = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
   Callable callable =
       CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
-  node->InsertInput(zone(), 0, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(1, vector);
-  node->ReplaceInput(4, effect);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+  node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+  node->InsertInput(zone(), 2, vector);
   ReplaceWithStubCall(node, callable, flags);
 }
 
@@ -230,33 +204,19 @@
   Node* receiver = NodeProperties::GetValueInput(node, 0);
   Node* key = NodeProperties::GetValueInput(node, 1);
   Node* value = NodeProperties::GetValueInput(node, 2);
-  Node* closure = NodeProperties::GetValueInput(node, 3);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   PropertyAccess const& p = PropertyAccessOf(node->op());
-  LanguageMode language_mode = p.language_mode();
   Callable callable =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate(), language_mode);
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+      CodeFactory::KeyedStoreICInOptimizedCode(isolate(), p.language_mode());
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   typedef StoreWithVectorDescriptor Descriptor;
-  node->InsertInputs(zone(), 0, 1);
+  node->InsertInputs(zone(), 0, 2);
   node->ReplaceInput(Descriptor::kReceiver, receiver);
   node->ReplaceInput(Descriptor::kName, key);
   node->ReplaceInput(Descriptor::kValue, value);
   node->ReplaceInput(Descriptor::kSlot,
                      jsgraph()->SmiConstant(p.feedback().index()));
   node->ReplaceInput(Descriptor::kVector, vector);
-  node->ReplaceInput(7, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
@@ -264,39 +224,42 @@
 void JSGenericLowering::LowerJSStoreNamed(Node* node) {
   Node* receiver = NodeProperties::GetValueInput(node, 0);
   Node* value = NodeProperties::GetValueInput(node, 1);
-  Node* closure = NodeProperties::GetValueInput(node, 2);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable =
       CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   typedef StoreWithVectorDescriptor Descriptor;
-  node->InsertInputs(zone(), 0, 2);
+  node->InsertInputs(zone(), 0, 3);
   node->ReplaceInput(Descriptor::kReceiver, receiver);
   node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
   node->ReplaceInput(Descriptor::kValue, value);
   node->ReplaceInput(Descriptor::kSlot,
                      jsgraph()->SmiConstant(p.feedback().index()));
   node->ReplaceInput(Descriptor::kVector, vector);
-  node->ReplaceInput(7, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
+void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* value = NodeProperties::GetValueInput(node, 1);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
+  Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+  typedef StoreWithVectorDescriptor Descriptor;
+  node->InsertInputs(zone(), 0, 3);
+  node->ReplaceInput(Descriptor::kReceiver, receiver);
+  node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
+  node->ReplaceInput(Descriptor::kValue, value);
+  node->ReplaceInput(Descriptor::kSlot,
+                     jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(Descriptor::kVector, vector);
+  ReplaceWithStubCall(node, callable, flags);
+}
 
 void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
   Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* closure = NodeProperties::GetValueInput(node, 1);
   Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
@@ -304,16 +267,7 @@
   const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
   Callable callable =
       CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   // Load global object from the context.
   Node* native_context = effect =
       graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
@@ -325,7 +279,7 @@
       jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
       effect, control);
   typedef StoreWithVectorDescriptor Descriptor;
-  node->InsertInputs(zone(), 0, 3);
+  node->InsertInputs(zone(), 0, 4);
   node->ReplaceInput(Descriptor::kReceiver, global);
   node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
   node->ReplaceInput(Descriptor::kValue, value);
@@ -336,6 +290,13 @@
   ReplaceWithStubCall(node, callable, flags);
 }
 
+void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
+  DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+  node->InsertInputs(zone(), 4, 2);
+  node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector()));
+  node->ReplaceInput(5, jsgraph()->SmiConstant(p.feedback().index()));
+  ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral);
+}
 
 void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
   LanguageMode language_mode = OpParameter<LanguageMode>(node);
@@ -344,6 +305,11 @@
                                    : Runtime::kDeleteProperty_Sloppy);
 }
 
+void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::GetSuperConstructor(isolate());
+  ReplaceWithStubCall(node, callable, flags);
+}
 
 void JSGenericLowering::LowerJSInstanceOf(Node* node) {
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -358,40 +324,12 @@
 }
 
 void JSGenericLowering::LowerJSLoadContext(Node* node) {
-  const ContextAccess& access = ContextAccessOf(node->op());
-  for (size_t i = 0; i < access.depth(); ++i) {
-    node->ReplaceInput(
-        0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
-                            NodeProperties::GetValueInput(node, 0),
-                            jsgraph()->Int32Constant(
-                                Context::SlotOffset(Context::PREVIOUS_INDEX)),
-                            NodeProperties::GetEffectInput(node),
-                            graph()->start()));
-  }
-  node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
-                            static_cast<int>(access.index()))));
-  node->AppendInput(zone(), graph()->start());
-  NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+  UNREACHABLE();  // Eliminated in typed lowering.
 }
 
 
 void JSGenericLowering::LowerJSStoreContext(Node* node) {
-  const ContextAccess& access = ContextAccessOf(node->op());
-  for (size_t i = 0; i < access.depth(); ++i) {
-    node->ReplaceInput(
-        0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
-                            NodeProperties::GetValueInput(node, 0),
-                            jsgraph()->Int32Constant(
-                                Context::SlotOffset(Context::PREVIOUS_INDEX)),
-                            NodeProperties::GetEffectInput(node),
-                            graph()->start()));
-  }
-  node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
-  node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
-                            static_cast<int>(access.index()))));
-  NodeProperties::ChangeOp(
-      node, machine()->Store(StoreRepresentation(MachineRepresentation::kTagged,
-                                                 kFullWriteBarrier)));
+  UNREACHABLE();  // Eliminated in typed lowering.
 }
 
 
@@ -438,11 +376,18 @@
   Handle<SharedFunctionInfo> const shared_info = p.shared_info();
   node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
 
-  // Use the FastNewClosureStub only for functions allocated in new space.
+  // Use the FastNewClosurebuiltin only for functions allocated in new
+  // space.
   if (p.pretenure() == NOT_TENURED) {
     Callable callable = CodeFactory::FastNewClosure(isolate());
+    node->InsertInput(zone(), 1,
+                      jsgraph()->HeapConstant(p.feedback().vector()));
+    node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
     ReplaceWithStubCall(node, callable, flags);
   } else {
+    node->InsertInput(zone(), 1,
+                      jsgraph()->HeapConstant(p.feedback().vector()));
+    node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
     ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
                                      ? Runtime::kNewClosure_Tenured
                                      : Runtime::kNewClosure);
@@ -451,14 +396,20 @@
 
 
 void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
-  int const slot_count = OpParameter<int>(node->op());
+  const CreateFunctionContextParameters& parameters =
+      CreateFunctionContextParametersOf(node->op());
+  int slot_count = parameters.slot_count();
+  ScopeType scope_type = parameters.scope_type();
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
 
-  if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
-    Callable callable = CodeFactory::FastNewFunctionContext(isolate());
+  if (slot_count <=
+      ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+    Callable callable =
+        CodeFactory::FastNewFunctionContext(isolate(), scope_type);
     node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
     ReplaceWithStubCall(node, callable, flags);
   } else {
+    node->InsertInput(zone(), 1, jsgraph()->SmiConstant(scope_type));
     ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
   }
 }
@@ -478,11 +429,13 @@
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
 
-  // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
-  // initial length limit for arrays with "fast" elements kind.
+  // Use the FastCloneShallowArray builtin only for shallow boilerplates without
+  // properties up to the number of elements that the stubs can handle.
   if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
-      p.length() < JSArray::kInitialMaxFastElementArray) {
-    Callable callable = CodeFactory::FastCloneShallowArray(isolate());
+      p.length() <
+          ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements) {
+    Callable callable = CodeFactory::FastCloneShallowArray(
+        isolate(), DONT_TRACK_ALLOCATION_SITE);
     ReplaceWithStubCall(node, callable, flags);
   } else {
     node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -498,10 +451,11 @@
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
 
-  // Use the FastCloneShallowObjectStub only for shallow boilerplates without
-  // elements up to the number of properties that the stubs can handle.
+  // Use the FastCloneShallowObject builtin only for shallow boilerplates
+  // without elements up to the number of properties that the stubs can handle.
   if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
-      p.length() <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+      p.length() <=
+          ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties) {
     Callable callable =
         CodeFactory::FastCloneShallowObject(isolate(), p.length());
     ReplaceWithStubCall(node, callable, flags);
@@ -554,9 +508,8 @@
   ReplaceWithRuntimeCall(node, Runtime::kNewScriptContext);
 }
 
-
-void JSGenericLowering::LowerJSCallConstruct(Node* node) {
-  CallConstructParameters const& p = CallConstructParametersOf(node->op());
+void JSGenericLowering::LowerJSConstruct(Node* node) {
+  ConstructParameters const& p = ConstructParametersOf(node->op());
   int const arg_count = static_cast<int>(p.arity() - 2);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   Callable callable = CodeFactory::Construct(isolate());
@@ -574,9 +527,44 @@
   NodeProperties::ChangeOp(node, common()->Call(desc));
 }
 
+void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
+  ConstructWithSpreadParameters const& p =
+      ConstructWithSpreadParametersOf(node->op());
+  int const arg_count = static_cast<int>(p.arity() - 2);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::ConstructWithSpread(isolate());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
+  Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+  Node* new_target = node->InputAt(arg_count + 1);
+  Node* receiver = jsgraph()->UndefinedConstant();
+  node->RemoveInput(arg_count + 1);  // Drop new target.
+  node->InsertInput(zone(), 0, stub_code);
+  node->InsertInput(zone(), 2, new_target);
+  node->InsertInput(zone(), 3, stub_arity);
+  node->InsertInput(zone(), 4, receiver);
+  NodeProperties::ChangeOp(node, common()->Call(desc));
+}
 
-void JSGenericLowering::LowerJSCallFunction(Node* node) {
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
+  CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
+  Callable callable = CodeFactory::CallForwardVarargs(isolate());
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  if (p.tail_call_mode() == TailCallMode::kAllow) {
+    flags |= CallDescriptor::kSupportsTailCalls;
+  }
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), callable.descriptor(), 1, flags);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
+  Node* start_index = jsgraph()->Uint32Constant(p.start_index());
+  node->InsertInput(zone(), 0, stub_code);
+  node->InsertInput(zone(), 2, start_index);
+  NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
+void JSGenericLowering::LowerJSCall(Node* node) {
+  CallParameters const& p = CallParametersOf(node->op());
   int const arg_count = static_cast<int>(p.arity() - 2);
   ConvertReceiverMode const mode = p.convert_mode();
   Callable callable = CodeFactory::Call(isolate(), mode);
@@ -593,6 +581,19 @@
   NodeProperties::ChangeOp(node, common()->Call(desc));
 }
 
+void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
+  CallWithSpreadParameters const& p = CallWithSpreadParametersOf(node->op());
+  int const arg_count = static_cast<int>(p.arity() - 2);
+  Callable callable = CodeFactory::CallWithSpread(isolate());
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
+  Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+  node->InsertInput(zone(), 0, stub_code);
+  node->InsertInput(zone(), 2, stub_arity);
+  NodeProperties::ChangeOp(node, common()->Call(desc));
+}
 
 void JSGenericLowering::LowerJSCallRuntime(Node* node) {
   const CallRuntimeParameters& p = CallRuntimeParametersOf(node->op());
@@ -604,33 +605,24 @@
 }
 
 void JSGenericLowering::LowerJSForInNext(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kForInNext);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::ForInNext(isolate());
+  ReplaceWithStubCall(node, callable, flags);
 }
 
-
 void JSGenericLowering::LowerJSForInPrepare(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kForInPrepare);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::ForInPrepare(isolate());
+  ReplaceWithStubCall(node, callable, flags, node->op()->properties(), 3);
 }
 
 void JSGenericLowering::LowerJSLoadMessage(Node* node) {
-  ExternalReference message_address =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  node->RemoveInput(NodeProperties::FirstContextIndex(node));
-  node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
-  node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
-  NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+  UNREACHABLE();  // Eliminated in typed lowering.
 }
 
 
 void JSGenericLowering::LowerJSStoreMessage(Node* node) {
-  ExternalReference message_address =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  node->RemoveInput(NodeProperties::FirstContextIndex(node));
-  node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
-  node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
-  StoreRepresentation representation(MachineRepresentation::kTagged,
-                                     kNoWriteBarrier);
-  NodeProperties::ChangeOp(node, machine()->Store(representation));
+  UNREACHABLE();  // Eliminated in typed lowering.
 }
 
 void JSGenericLowering::LowerJSLoadModule(Node* node) {
@@ -695,6 +687,11 @@
   ReplaceWithRuntimeCall(node, Runtime::kStackGuard);
 }
 
+void JSGenericLowering::LowerJSDebugger(Node* node) {
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::HandleDebuggerStatement(isolate());
+  ReplaceWithStubCall(node, callable, flags);
+}
 
 Zone* JSGenericLowering::zone() const { return graph()->zone(); }
 
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
index 38ee431..88d0b45 100644
--- a/src/compiler/js-generic-lowering.h
+++ b/src/compiler/js-generic-lowering.h
@@ -38,7 +38,8 @@
   // Helpers to replace existing nodes with a generic call.
   void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
   void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags,
-                           Operator::Properties properties);
+                           Operator::Properties properties,
+                           int result_size = 1);
   void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
 
   Zone* zone() const;
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
deleted file mode 100644
index e9ff060..0000000
--- a/src/compiler/js-global-object-specialization.cc
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/js-global-object-specialization.h"
-
-#include "src/compilation-dependencies.h"
-#include "src/compiler/access-builder.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/type-cache.h"
-#include "src/lookup.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct JSGlobalObjectSpecialization::ScriptContextTableLookupResult {
-  Handle<Context> context;
-  bool immutable;
-  int index;
-};
-
-JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
-    Editor* editor, JSGraph* jsgraph, Handle<JSGlobalObject> global_object,
-    CompilationDependencies* dependencies)
-    : AdvancedReducer(editor),
-      jsgraph_(jsgraph),
-      global_object_(global_object),
-      dependencies_(dependencies),
-      type_cache_(TypeCache::Get()) {}
-
-Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
-  switch (node->opcode()) {
-    case IrOpcode::kJSLoadGlobal:
-      return ReduceJSLoadGlobal(node);
-    case IrOpcode::kJSStoreGlobal:
-      return ReduceJSStoreGlobal(node);
-    default:
-      break;
-  }
-  return NoChange();
-}
-
-namespace {
-
-FieldAccess ForPropertyCellValue(MachineRepresentation representation,
-                                 Type* type, Handle<Name> name) {
-  WriteBarrierKind kind = kFullWriteBarrier;
-  if (representation == MachineRepresentation::kTaggedSigned) {
-    kind = kNoWriteBarrier;
-  } else if (representation == MachineRepresentation::kTaggedPointer) {
-    kind = kPointerWriteBarrier;
-  }
-  MachineType r = MachineType::TypeForRepresentation(representation);
-  FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, name, type, r,
-                        kind};
-  return access;
-}
-}  // namespace
-
-Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
-  Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // Try to lookup the name on the script context table first (lexical scoping).
-  ScriptContextTableLookupResult result;
-  if (LookupInScriptContextTable(name, &result)) {
-    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
-    Node* context = jsgraph()->HeapConstant(result.context);
-    Node* value = effect = graph()->NewNode(
-        javascript()->LoadContext(0, result.index, result.immutable), context,
-        context, effect);
-    ReplaceWithValue(node, value, effect);
-    return Replace(value);
-  }
-
-  // Lookup on the global object instead.  We only deal with own data
-  // properties of the global object here (represented as PropertyCell).
-  LookupIterator it(global_object(), name, LookupIterator::OWN);
-  if (it.state() != LookupIterator::DATA) return NoChange();
-  if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
-  Handle<PropertyCell> property_cell = it.GetPropertyCell();
-  PropertyDetails property_details = property_cell->property_details();
-  Handle<Object> property_cell_value(property_cell->value(), isolate());
-
-  // Load from non-configurable, read-only data property on the global
-  // object can be constant-folded, even without deoptimization support.
-  if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
-    Node* value = jsgraph()->Constant(property_cell_value);
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-
-  // Record a code dependency on the cell if we can benefit from the
-  // additional feedback, or the global property is configurable (i.e.
-  // can be deleted or reconfigured to an accessor property).
-  if (property_details.cell_type() != PropertyCellType::kMutable ||
-      property_details.IsConfigurable()) {
-    dependencies()->AssumePropertyCell(property_cell);
-  }
-
-  // Load from constant/undefined global property can be constant-folded.
-  if (property_details.cell_type() == PropertyCellType::kConstant ||
-      property_details.cell_type() == PropertyCellType::kUndefined) {
-    Node* value = jsgraph()->Constant(property_cell_value);
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-
-  // Load from constant type cell can benefit from type feedback.
-  Type* property_cell_value_type = Type::NonInternal();
-  MachineRepresentation representation = MachineRepresentation::kTagged;
-  if (property_details.cell_type() == PropertyCellType::kConstantType) {
-    // Compute proper type based on the current value in the cell.
-    if (property_cell_value->IsSmi()) {
-      property_cell_value_type = Type::SignedSmall();
-      representation = MachineRepresentation::kTaggedSigned;
-    } else if (property_cell_value->IsNumber()) {
-      property_cell_value_type = Type::Number();
-      representation = MachineRepresentation::kTaggedPointer;
-    } else {
-      // TODO(turbofan): Track the property_cell_value_map on the FieldAccess
-      // below and use it in LoadElimination to eliminate map checks.
-      Handle<Map> property_cell_value_map(
-          Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
-      property_cell_value_type = Type::For(property_cell_value_map);
-      representation = MachineRepresentation::kTaggedPointer;
-    }
-  }
-  Node* value = effect =
-      graph()->NewNode(simplified()->LoadField(ForPropertyCellValue(
-                           representation, property_cell_value_type, name)),
-                       jsgraph()->HeapConstant(property_cell), effect, control);
-  ReplaceWithValue(node, value, effect, control);
-  return Replace(value);
-}
-
-
-Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
-  Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // Try to lookup the name on the script context table first (lexical scoping).
-  ScriptContextTableLookupResult result;
-  if (LookupInScriptContextTable(name, &result)) {
-    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
-    if (result.immutable) return NoChange();
-    Node* context = jsgraph()->HeapConstant(result.context);
-    effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
-                              context, value, context, effect, control);
-    ReplaceWithValue(node, value, effect, control);
-    return Replace(value);
-  }
-
-  // Lookup on the global object instead.  We only deal with own data
-  // properties of the global object here (represented as PropertyCell).
-  LookupIterator it(global_object(), name, LookupIterator::OWN);
-  if (it.state() != LookupIterator::DATA) return NoChange();
-  if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
-  Handle<PropertyCell> property_cell = it.GetPropertyCell();
-  PropertyDetails property_details = property_cell->property_details();
-  Handle<Object> property_cell_value(property_cell->value(), isolate());
-
-  // Don't even bother trying to lower stores to read-only data properties.
-  if (property_details.IsReadOnly()) return NoChange();
-  switch (property_details.cell_type()) {
-    case PropertyCellType::kUndefined: {
-      return NoChange();
-    }
-    case PropertyCellType::kConstant: {
-      // Record a code dependency on the cell, and just deoptimize if the new
-      // value doesn't match the previous value stored inside the cell.
-      dependencies()->AssumePropertyCell(property_cell);
-      Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
-                                     jsgraph()->Constant(property_cell_value));
-      effect =
-          graph()->NewNode(simplified()->CheckIf(), check, effect, control);
-      break;
-    }
-    case PropertyCellType::kConstantType: {
-      // Record a code dependency on the cell, and just deoptimize if the new
-      // values' type doesn't match the type of the previous value in the cell.
-      dependencies()->AssumePropertyCell(property_cell);
-      Type* property_cell_value_type;
-      MachineRepresentation representation = MachineRepresentation::kTagged;
-      if (property_cell_value->IsHeapObject()) {
-        // We cannot do anything if the {property_cell_value}s map is no
-        // longer stable.
-        Handle<Map> property_cell_value_map(
-            Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
-        if (!property_cell_value_map->is_stable()) return NoChange();
-        dependencies()->AssumeMapStable(property_cell_value_map);
-
-        // Check that the {value} is a HeapObject.
-        value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                          value, effect, control);
-
-        // Check {value} map agains the {property_cell} map.
-        effect = graph()->NewNode(
-            simplified()->CheckMaps(1), value,
-            jsgraph()->HeapConstant(property_cell_value_map), effect, control);
-        property_cell_value_type = Type::OtherInternal();
-        representation = MachineRepresentation::kTaggedPointer;
-      } else {
-        // Check that the {value} is a Smi.
-        value = effect =
-            graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
-        property_cell_value_type = Type::SignedSmall();
-        representation = MachineRepresentation::kTaggedSigned;
-      }
-      effect = graph()->NewNode(
-          simplified()->StoreField(ForPropertyCellValue(
-              representation, property_cell_value_type, name)),
-          jsgraph()->HeapConstant(property_cell), value, effect, control);
-      break;
-    }
-    case PropertyCellType::kMutable: {
-      // Store to non-configurable, data property on the global can be lowered
-      // to a field store, even without recording a code dependency on the cell,
-      // because the property cannot be deleted or reconfigured to an accessor
-      // or interceptor property.
-      if (property_details.IsConfigurable()) {
-        // Protect lowering by recording a code dependency on the cell.
-        dependencies()->AssumePropertyCell(property_cell);
-      }
-      effect = graph()->NewNode(
-          simplified()->StoreField(ForPropertyCellValue(
-              MachineRepresentation::kTagged, Type::NonInternal(), name)),
-          jsgraph()->HeapConstant(property_cell), value, effect, control);
-      break;
-    }
-  }
-  ReplaceWithValue(node, value, effect, control);
-  return Replace(value);
-}
-
-bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
-    Handle<Name> name, ScriptContextTableLookupResult* result) {
-  if (!name->IsString()) return false;
-  Handle<ScriptContextTable> script_context_table(
-      global_object()->native_context()->script_context_table(), isolate());
-  ScriptContextTable::LookupResult lookup_result;
-  if (!ScriptContextTable::Lookup(script_context_table,
-                                  Handle<String>::cast(name), &lookup_result)) {
-    return false;
-  }
-  Handle<Context> script_context = ScriptContextTable::GetContext(
-      script_context_table, lookup_result.context_index);
-  result->context = script_context;
-  result->immutable = lookup_result.mode == CONST;
-  result->index = lookup_result.slot_index;
-  return true;
-}
-
-Graph* JSGlobalObjectSpecialization::graph() const {
-  return jsgraph()->graph();
-}
-
-Isolate* JSGlobalObjectSpecialization::isolate() const {
-  return jsgraph()->isolate();
-}
-
-CommonOperatorBuilder* JSGlobalObjectSpecialization::common() const {
-  return jsgraph()->common();
-}
-
-JSOperatorBuilder* JSGlobalObjectSpecialization::javascript() const {
-  return jsgraph()->javascript();
-}
-
-SimplifiedOperatorBuilder* JSGlobalObjectSpecialization::simplified() const {
-  return jsgraph()->simplified();
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/js-global-object-specialization.h b/src/compiler/js-global-object-specialization.h
deleted file mode 100644
index 50bdd80..0000000
--- a/src/compiler/js-global-object-specialization.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
-#define V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class CompilationDependencies;
-
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-class JSGraph;
-class JSOperatorBuilder;
-class SimplifiedOperatorBuilder;
-class TypeCache;
-
-// Specializes a given JSGraph to a given global object, potentially constant
-// folding some {JSLoadGlobal} nodes or strength reducing some {JSStoreGlobal}
-// nodes.
-class JSGlobalObjectSpecialization final : public AdvancedReducer {
- public:
-  JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph,
-                               Handle<JSGlobalObject> global_object,
-                               CompilationDependencies* dependencies);
-
-  Reduction Reduce(Node* node) final;
-
- private:
-  Reduction ReduceJSLoadGlobal(Node* node);
-  Reduction ReduceJSStoreGlobal(Node* node);
-
-  struct ScriptContextTableLookupResult;
-  bool LookupInScriptContextTable(Handle<Name> name,
-                                  ScriptContextTableLookupResult* result);
-
-  Graph* graph() const;
-  JSGraph* jsgraph() const { return jsgraph_; }
-  Isolate* isolate() const;
-  CommonOperatorBuilder* common() const;
-  JSOperatorBuilder* javascript() const;
-  SimplifiedOperatorBuilder* simplified() const;
-  Handle<JSGlobalObject> global_object() const { return global_object_; }
-  CompilationDependencies* dependencies() const { return dependencies_; }
-
-  JSGraph* const jsgraph_;
-  Handle<JSGlobalObject> const global_object_;
-  CompilationDependencies* const dependencies_;
-  TypeCache const& type_cache_;
-
-  DISALLOW_COPY_AND_ASSIGN(JSGlobalObjectSpecialization);
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 8626cd1..b51623a 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/code-stubs.h"
 #include "src/compiler/js-graph.h"
+
+#include "src/code-stubs.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/typer.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -31,11 +33,26 @@
 
 Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
                                   ArgvMode argv_mode, bool builtin_exit_frame) {
-  if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack &&
-      result_size == 1) {
+  if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack) {
+    DCHECK(result_size >= 1 && result_size <= 3);
+    if (!builtin_exit_frame) {
+      CachedNode key;
+      if (result_size == 1) {
+        key = kCEntryStub1Constant;
+      } else if (result_size == 2) {
+        key = kCEntryStub2Constant;
+      } else {
+        DCHECK(result_size == 3);
+        key = kCEntryStub3Constant;
+      }
+      return CACHED(
+          key, HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
+                                       argv_mode, builtin_exit_frame)
+                                .GetCode()));
+    }
     CachedNode key = builtin_exit_frame
-                         ? kCEntryStubWithBuiltinExitFrameConstant
-                         : kCEntryStubConstant;
+                         ? kCEntryStub1WithBuiltinExitFrameConstant
+                         : kCEntryStub1Constant;
     return CACHED(key,
                   HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
                                           argv_mode, builtin_exit_frame)
@@ -51,11 +68,6 @@
                 HeapConstant(factory()->empty_fixed_array()));
 }
 
-Node* JSGraph::EmptyLiteralsArrayConstant() {
-  return CACHED(kEmptyLiteralsArrayConstant,
-                HeapConstant(factory()->empty_literals_array()));
-}
-
 Node* JSGraph::EmptyStringConstant() {
   return CACHED(kEmptyStringConstant, HeapConstant(factory()->empty_string()));
 }
@@ -264,7 +276,8 @@
 }
 
 Node* JSGraph::EmptyStateValues() {
-  return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(0)));
+  return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(
+                                       0, SparseInputMask::Dense())));
 }
 
 Node* JSGraph::Dead() {
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index c2c0c77..8f81555 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -49,7 +49,6 @@
                            ArgvMode argv_mode = kArgvOnStack,
                            bool builtin_exit_frame = false);
   Node* EmptyFixedArrayConstant();
-  Node* EmptyLiteralsArrayConstant();
   Node* EmptyStringConstant();
   Node* FixedArrayMapConstant();
   Node* FixedDoubleArrayMapConstant();
@@ -162,10 +161,11 @@
     kAllocateInNewSpaceStubConstant,
     kAllocateInOldSpaceStubConstant,
     kToNumberBuiltinConstant,
-    kCEntryStubConstant,
-    kCEntryStubWithBuiltinExitFrameConstant,
+    kCEntryStub1Constant,
+    kCEntryStub2Constant,
+    kCEntryStub3Constant,
+    kCEntryStub1WithBuiltinExitFrameConstant,
     kEmptyFixedArrayConstant,
-    kEmptyLiteralsArrayConstant,
     kEmptyStringConstant,
     kFixedArrayMapConstant,
     kFixedDoubleArrayMapConstant,
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
index d6229c2..6f99fbb 100644
--- a/src/compiler/js-inlining-heuristic.cc
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -22,7 +22,7 @@
 namespace {
 
 int CollectFunctions(Node* node, Handle<JSFunction>* functions,
-                     int functions_size) {
+                     int functions_size, Handle<SharedFunctionInfo>& shared) {
   DCHECK_NE(0, functions_size);
   HeapObjectMatcher m(node);
   if (m.HasValue() && m.Value()->IsJSFunction()) {
@@ -39,23 +39,29 @@
     }
     return value_input_count;
   }
+  if (m.IsJSCreateClosure()) {
+    CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
+    functions[0] = Handle<JSFunction>::null();
+    shared = p.shared_info();
+    return 1;
+  }
   return 0;
 }
 
-bool CanInlineFunction(Handle<JSFunction> function) {
+bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
   // Built-in functions are handled by the JSBuiltinReducer.
-  if (function->shared()->HasBuiltinFunctionId()) return false;
+  if (shared->HasBuiltinFunctionId()) return false;
 
-  // Don't inline builtins.
-  if (function->shared()->IsBuiltin()) return false;
+  // Only choose user code for inlining.
+  if (!shared->IsUserJavaScript()) return false;
 
   // Quick check on the size of the AST to avoid parsing large candidate.
-  if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
+  if (shared->ast_node_count() > FLAG_max_inlined_nodes) {
     return false;
   }
 
   // Avoid inlining across the boundary of asm.js code.
-  if (function->shared()->asm_function()) return false;
+  if (shared->asm_function()) return false;
   return true;
 }
 
@@ -72,8 +78,8 @@
   Node* callee = node->InputAt(0);
   Candidate candidate;
   candidate.node = node;
-  candidate.num_functions =
-      CollectFunctions(callee, candidate.functions, kMaxCallPolymorphism);
+  candidate.num_functions = CollectFunctions(
+      callee, candidate.functions, kMaxCallPolymorphism, candidate.shared_info);
   if (candidate.num_functions == 0) {
     return NoChange();
   } else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
@@ -87,11 +93,14 @@
   // Functions marked with %SetForceInlineFlag are immediately inlined.
   bool can_inline = false, force_inline = true;
   for (int i = 0; i < candidate.num_functions; ++i) {
-    Handle<JSFunction> function = candidate.functions[i];
-    if (!function->shared()->force_inline()) {
+    Handle<SharedFunctionInfo> shared =
+        candidate.functions[i].is_null()
+            ? candidate.shared_info
+            : handle(candidate.functions[i]->shared());
+    if (!shared->force_inline()) {
       force_inline = false;
     }
-    if (CanInlineFunction(function)) {
+    if (CanInlineFunction(shared)) {
       can_inline = true;
     }
   }
@@ -117,11 +126,11 @@
   }
 
   // Gather feedback on how often this call site has been hit before.
-  if (node->opcode() == IrOpcode::kJSCallFunction) {
-    CallFunctionParameters const p = CallFunctionParametersOf(node->op());
+  if (node->opcode() == IrOpcode::kJSCall) {
+    CallParameters const p = CallParametersOf(node->op());
     candidate.frequency = p.frequency();
   } else {
-    CallConstructParameters const p = CallConstructParametersOf(node->op());
+    ConstructParameters const p = ConstructParametersOf(node->op());
     candidate.frequency = p.frequency();
   }
 
@@ -167,15 +176,18 @@
   int const num_calls = candidate.num_functions;
   Node* const node = candidate.node;
   if (num_calls == 1) {
-    Handle<JSFunction> function = candidate.functions[0];
-    Reduction const reduction = inliner_.ReduceJSCall(node, function);
+    Handle<SharedFunctionInfo> shared =
+        candidate.functions[0].is_null()
+            ? candidate.shared_info
+            : handle(candidate.functions[0]->shared());
+    Reduction const reduction = inliner_.ReduceJSCall(node);
     if (reduction.Changed()) {
-      cumulative_count_ += function->shared()->ast_node_count();
+      cumulative_count_ += shared->ast_node_count();
     }
     return reduction;
   }
 
-  // Expand the JSCallFunction/JSCallConstruct node to a subgraph first if
+  // Expand the JSCall/JSConstruct node to a subgraph first if
   // we have multiple known target functions.
   DCHECK_LT(1, num_calls);
   Node* calls[kMaxCallPolymorphism + 1];
@@ -192,6 +204,8 @@
 
   // Create the appropriate control flow to dispatch to the cloned calls.
   for (int i = 0; i < num_calls; ++i) {
+    // TODO(2206): Make comparison be based on underlying SharedFunctionInfo
+    // instead of the target JSFunction reference directly.
     Node* target = jsgraph()->HeapConstant(candidate.functions[i]);
     if (i != (num_calls - 1)) {
       Node* check =
@@ -255,7 +269,7 @@
   for (int i = 0; i < num_calls; ++i) {
     Handle<JSFunction> function = candidate.functions[i];
     Node* node = calls[i];
-    Reduction const reduction = inliner_.ReduceJSCall(node, function);
+    Reduction const reduction = inliner_.ReduceJSCall(node);
     if (reduction.Changed()) {
       cumulative_count_ += function->shared()->ast_node_count();
     }
@@ -281,9 +295,12 @@
     PrintF("  #%d:%s, frequency:%g\n", candidate.node->id(),
            candidate.node->op()->mnemonic(), candidate.frequency);
     for (int i = 0; i < candidate.num_functions; ++i) {
-      Handle<JSFunction> function = candidate.functions[i];
-      PrintF("  - size:%d, name: %s\n", function->shared()->ast_node_count(),
-             function->shared()->DebugName()->ToCString().get());
+      Handle<SharedFunctionInfo> shared =
+          candidate.functions[i].is_null()
+              ? candidate.shared_info
+              : handle(candidate.functions[i]->shared());
+      PrintF("  - size:%d, name: %s\n", shared->ast_node_count(),
+             shared->DebugName()->ToCString().get());
     }
   }
 }
diff --git a/src/compiler/js-inlining-heuristic.h b/src/compiler/js-inlining-heuristic.h
index aca8011..b834cb0 100644
--- a/src/compiler/js-inlining-heuristic.h
+++ b/src/compiler/js-inlining-heuristic.h
@@ -37,6 +37,11 @@
 
   struct Candidate {
     Handle<JSFunction> functions[kMaxCallPolymorphism];
+    // TODO(2206): For now polymorphic inlining is treated orthogonally to
+    // inlining based on SharedFunctionInfo. This should be unified and the
+    // above array should be switched to SharedFunctionInfo instead. Currently
+    // we use {num_functions == 1 && functions[0].is_null()} as an indicator.
+    Handle<SharedFunctionInfo> shared_info;
     int num_functions;
     Node* node = nullptr;    // The call site at which to inline.
     float frequency = 0.0f;  // Relative frequency of this call site.
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 0e122a6..c87be6c 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -4,25 +4,21 @@
 
 #include "src/compiler/js-inlining.h"
 
-#include "src/ast/ast-numbering.h"
 #include "src/ast/ast.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/compiler/all-nodes.h"
-#include "src/compiler/ast-graph-builder.h"
-#include "src/compiler/ast-loop-assignment-analyzer.h"
 #include "src/compiler/bytecode-graph-builder.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/simplified-operator.h"
-#include "src/compiler/type-hint-analyzer.h"
 #include "src/isolate-inl.h"
 #include "src/parsing/parse-info.h"
-#include "src/parsing/rewriter.h"
 
 namespace v8 {
 namespace internal {
@@ -35,45 +31,45 @@
 
 
 // Provides convenience accessors for the common layout of nodes having either
-// the {JSCallFunction} or the {JSCallConstruct} operator.
+// the {JSCall} or the {JSConstruct} operator.
 class JSCallAccessor {
  public:
   explicit JSCallAccessor(Node* call) : call_(call) {
-    DCHECK(call->opcode() == IrOpcode::kJSCallFunction ||
-           call->opcode() == IrOpcode::kJSCallConstruct);
+    DCHECK(call->opcode() == IrOpcode::kJSCall ||
+           call->opcode() == IrOpcode::kJSConstruct);
   }
 
   Node* target() {
-    // Both, {JSCallFunction} and {JSCallConstruct}, have same layout here.
+    // Both, {JSCall} and {JSConstruct}, have same layout here.
     return call_->InputAt(0);
   }
 
   Node* receiver() {
-    DCHECK_EQ(IrOpcode::kJSCallFunction, call_->opcode());
+    DCHECK_EQ(IrOpcode::kJSCall, call_->opcode());
     return call_->InputAt(1);
   }
 
   Node* new_target() {
-    DCHECK_EQ(IrOpcode::kJSCallConstruct, call_->opcode());
+    DCHECK_EQ(IrOpcode::kJSConstruct, call_->opcode());
     return call_->InputAt(formal_arguments() + 1);
   }
 
   Node* frame_state() {
-    // Both, {JSCallFunction} and {JSCallConstruct}, have frame state.
+    // Both, {JSCall} and {JSConstruct}, have frame state.
     return NodeProperties::GetFrameStateInput(call_);
   }
 
   int formal_arguments() {
-    // Both, {JSCallFunction} and {JSCallConstruct}, have two extra inputs:
-    //  - JSCallConstruct: Includes target function and new target.
-    //  - JSCallFunction: Includes target function and receiver.
+    // Both, {JSCall} and {JSConstruct}, have two extra inputs:
+    //  - JSConstruct: Includes target function and new target.
+    //  - JSCall: Includes target function and receiver.
     return call_->op()->ValueInputCount() - 2;
   }
 
   float frequency() const {
-    return (call_->opcode() == IrOpcode::kJSCallFunction)
-               ? CallFunctionParametersOf(call_->op()).frequency()
-               : CallConstructParametersOf(call_->op()).frequency();
+    return (call_->opcode() == IrOpcode::kJSCall)
+               ? CallParametersOf(call_->op()).frequency()
+               : ConstructParametersOf(call_->op()).frequency();
   }
 
  private:
@@ -224,9 +220,9 @@
   }
 }
 
-
 Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
                                             int parameter_count,
+                                            BailoutId bailout_id,
                                             FrameStateType frame_state_type,
                                             Handle<SharedFunctionInfo> shared) {
   const FrameStateFunctionInfo* state_info =
@@ -234,15 +230,15 @@
                                              parameter_count + 1, 0, shared);
 
   const Operator* op = common()->FrameState(
-      BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
-  const Operator* op0 = common()->StateValues(0);
+      bailout_id, OutputFrameStateCombine::Ignore(), state_info);
+  const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
   Node* node0 = graph()->NewNode(op0);
   NodeVector params(local_zone_);
   for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
     params.push_back(node->InputAt(1 + parameter));
   }
-  const Operator* op_param =
-      common()->StateValues(static_cast<int>(params.size()));
+  const Operator* op_param = common()->StateValues(
+      static_cast<int>(params.size()), SparseInputMask::Dense());
   Node* params_node = graph()->NewNode(
       op_param, static_cast<int>(params.size()), &params.front());
   return graph()->NewNode(op, params_node, node0, node0,
@@ -273,7 +269,7 @@
 
   const Operator* op = common()->FrameState(
       BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
-  const Operator* op0 = common()->StateValues(0);
+  const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
   Node* node0 = graph()->NewNode(op0);
   return graph()->NewNode(op, node0, node0, node0,
                           jsgraph()->UndefinedConstant(), function,
@@ -282,19 +278,6 @@
 
 namespace {
 
-// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
-// alias analyzer?
-bool IsSame(Node* a, Node* b) {
-  if (a == b) {
-    return true;
-  } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a->InputAt(0), b);
-  } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a, b->InputAt(0));
-  }
-  return false;
-}
-
 // TODO(bmeurer): Unify this with the witness helper functions in the
 // js-builtin-reducer.cc once we have a better understanding of the
 // map tracking we want to do, and eventually changed the CheckMaps
@@ -307,41 +290,39 @@
 // function, which either returns the map set from the CheckMaps or
 // a singleton set from a StoreField.
 bool NeedsConvertReceiver(Node* receiver, Node* effect) {
-  for (Node* dominator = effect;;) {
-    if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        IsSame(dominator->InputAt(0), receiver)) {
-      // Check if all maps have the given {instance_type}.
-      for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
-        HeapObjectMatcher m(NodeProperties::GetValueInput(dominator, i));
-        if (!m.HasValue()) return true;
-        Handle<Map> const map = Handle<Map>::cast(m.Value());
-        if (!map->IsJSReceiverMap()) return true;
-      }
+  // Check if the {receiver} is already a JSReceiver.
+  switch (receiver->opcode()) {
+    case IrOpcode::kJSConstruct:
+    case IrOpcode::kJSConstructWithSpread:
+    case IrOpcode::kJSCreate:
+    case IrOpcode::kJSCreateArguments:
+    case IrOpcode::kJSCreateArray:
+    case IrOpcode::kJSCreateClosure:
+    case IrOpcode::kJSCreateIterResultObject:
+    case IrOpcode::kJSCreateKeyValueArray:
+    case IrOpcode::kJSCreateLiteralArray:
+    case IrOpcode::kJSCreateLiteralObject:
+    case IrOpcode::kJSCreateLiteralRegExp:
+    case IrOpcode::kJSConvertReceiver:
+    case IrOpcode::kJSGetSuperConstructor:
+    case IrOpcode::kJSToObject: {
       return false;
     }
-    switch (dominator->opcode()) {
-      case IrOpcode::kStoreField: {
-        FieldAccess const& access = FieldAccessOf(dominator->op());
-        if (access.base_is_tagged == kTaggedBase &&
-            access.offset == HeapObject::kMapOffset) {
-          return true;
+    default: {
+      // We don't really care about the exact maps here, just the instance
+      // types, which don't change across potential side-effecting operations.
+      ZoneHandleSet<Map> maps;
+      NodeProperties::InferReceiverMapsResult result =
+          NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+      if (result != NodeProperties::kNoReceiverMaps) {
+        // Check if all {maps} are actually JSReceiver maps.
+        for (size_t i = 0; i < maps.size(); ++i) {
+          if (!maps[i]->IsJSReceiverMap()) return true;
         }
-        break;
+        return false;
       }
-      case IrOpcode::kStoreElement:
-      case IrOpcode::kStoreTypedElement:
-        break;
-      default: {
-        DCHECK_EQ(1, dominator->op()->EffectOutputCount());
-        if (dominator->op()->EffectInputCount() != 1 ||
-            !dominator->op()->HasProperty(Operator::kNoWrite)) {
-          // Didn't find any appropriate CheckMaps node.
-          return true;
-        }
-        break;
-      }
+      return true;
     }
-    dominator = NodeProperties::GetEffectInput(dominator);
   }
 }
 
@@ -365,25 +346,124 @@
 
 }  // namespace
 
-
-Reduction JSInliner::Reduce(Node* node) {
-  if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+// Determines whether the call target of the given call {node} is statically
+// known and can be used as an inlining candidate. The {SharedFunctionInfo} of
+// the call target is provided (the exact closure might be unknown).
+bool JSInliner::DetermineCallTarget(
+    Node* node, Handle<SharedFunctionInfo>& shared_info_out) {
+  DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+  HeapObjectMatcher match(node->InputAt(0));
 
   // This reducer can handle both normal function calls as well a constructor
   // calls whenever the target is a constant function object, as follows:
-  //  - JSCallFunction(target:constant, receiver, args...)
-  //  - JSCallConstruct(target:constant, args..., new.target)
-  HeapObjectMatcher match(node->InputAt(0));
-  if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
-  Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+  //  - JSCall(target:constant, receiver, args...)
+  //  - JSConstruct(target:constant, args..., new.target)
+  if (match.HasValue() && match.Value()->IsJSFunction()) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
 
-  return ReduceJSCall(node, function);
+    // Disallow cross native-context inlining for now. This means that all parts
+    // of the resulting code will operate on the same global object. This also
+    // prevents cross context leaks, where we could inline functions from a
+    // different context and hold on to that context (and closure) from the code
+    // object.
+    // TODO(turbofan): We might want to revisit this restriction later when we
+    // have a need for this, and we know how to model different native contexts
+    // in the same graph in a compositional way.
+    if (function->context()->native_context() !=
+        info_->context()->native_context()) {
+      return false;
+    }
+
+    shared_info_out = handle(function->shared());
+    return true;
+  }
+
+  // This reducer can also handle calls where the target is statically known to
+  // be the result of a closure instantiation operation, as follows:
+  //  - JSCall(JSCreateClosure[shared](context), receiver, args...)
+  //  - JSConstruct(JSCreateClosure[shared](context), args..., new.target)
+  if (match.IsJSCreateClosure()) {
+    CreateClosureParameters const& p = CreateClosureParametersOf(match.op());
+
+    // Disallow inlining in case the instantiation site was never run and hence
+    // the vector cell does not contain a valid feedback vector for the call
+    // target.
+    // TODO(turbofan): We might consider to eagerly create the feedback vector
+    // in such a case (in {DetermineCallContext} below) eventually.
+    FeedbackSlot slot = p.feedback().slot();
+    Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+    if (!cell->value()->IsFeedbackVector()) return false;
+
+    shared_info_out = p.shared_info();
+    return true;
+  }
+
+  return false;
 }
 
-Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
+// Determines statically known information about the call target (assuming that
+// the call target is known according to {DetermineCallTarget} above). The
+// following static information is provided:
+//  - context         : The context (as SSA value) bound by the call target.
+//  - feedback_vector : The target is guaranteed to use this feedback vector.
+void JSInliner::DetermineCallContext(
+    Node* node, Node*& context_out,
+    Handle<FeedbackVector>& feedback_vector_out) {
   DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+  HeapObjectMatcher match(node->InputAt(0));
+
+  if (match.HasValue() && match.Value()->IsJSFunction()) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+
+    // If the target function was never invoked, its literals array might not
+    // contain a feedback vector. We ensure at this point that it is created.
+    JSFunction::EnsureLiterals(function);
+
+    // The inlinee specializes to the context from the JSFunction object.
+    context_out = jsgraph()->Constant(handle(function->context()));
+    feedback_vector_out = handle(function->feedback_vector());
+    return;
+  }
+
+  if (match.IsJSCreateClosure()) {
+    CreateClosureParameters const& p = CreateClosureParametersOf(match.op());
+
+    // Load the feedback vector of the target by looking up its vector cell at
+    // the instantiation site (we only decide to inline if it's populated).
+    FeedbackSlot slot = p.feedback().slot();
+    Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+    DCHECK(cell->value()->IsFeedbackVector());
+
+    // The inlinee uses the locally provided context at instantiation.
+    context_out = NodeProperties::GetContextInput(match.node());
+    feedback_vector_out = handle(FeedbackVector::cast(cell->value()));
+    return;
+  }
+
+  // Must succeed.
+  UNREACHABLE();
+}
+
+Reduction JSInliner::Reduce(Node* node) {
+  if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+  return ReduceJSCall(node);
+}
+
+Reduction JSInliner::ReduceJSCall(Node* node) {
+  DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+  Handle<SharedFunctionInfo> shared_info;
   JSCallAccessor call(node);
-  Handle<SharedFunctionInfo> shared_info(function->shared());
+
+  // Determine the call target.
+  if (!DetermineCallTarget(node, shared_info)) return NoChange();
+
+  // Inlining is only supported in the bytecode pipeline.
+  if (!info_->is_optimizing_from_bytecode()) {
+    TRACE("Not inlining %s into %s due to use of the deprecated pipeline\n",
+          shared_info->DebugName()->ToCString().get(),
+          info_->shared_info()->DebugName()->ToCString().get());
+    return NoChange();
+  }
 
   // Function must be inlineable.
   if (!shared_info->IsInlineable()) {
@@ -394,7 +474,7 @@
   }
 
   // Constructor must be constructable.
-  if (node->opcode() == IrOpcode::kJSCallConstruct &&
+  if (node->opcode() == IrOpcode::kJSConstruct &&
       IsNonConstructible(shared_info)) {
     TRACE("Not inlining %s into %s because constructor is not constructable.\n",
           shared_info->DebugName()->ToCString().get(),
@@ -402,9 +482,21 @@
     return NoChange();
   }
 
+  // TODO(706642): Don't inline derived class constructors for now, as the
+  // inlining logic doesn't deal properly with derived class constructors
+  // that return a primitive, i.e. it's not in sync with what the Parser
+  // and the JSConstructSub does.
+  if (node->opcode() == IrOpcode::kJSConstruct &&
+      IsDerivedConstructor(shared_info->kind())) {
+    TRACE("Not inlining %s into %s because constructor is derived.\n",
+          shared_info->DebugName()->ToCString().get(),
+          info_->shared_info()->DebugName()->ToCString().get());
+    return NoChange();
+  }
+
   // Class constructors are callable, but [[Call]] will raise an exception.
   // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
-  if (node->opcode() == IrOpcode::kJSCallFunction &&
+  if (node->opcode() == IrOpcode::kJSCall &&
       IsClassConstructor(shared_info->kind())) {
     TRACE("Not inlining %s into %s because callee is a class constructor.\n",
           shared_info->DebugName()->ToCString().get(),
@@ -420,22 +512,6 @@
     return NoChange();
   }
 
-  // Disallow cross native-context inlining for now. This means that all parts
-  // of the resulting code will operate on the same global object.
-  // This also prevents cross context leaks for asm.js code, where we could
-  // inline functions from a different context and hold on to that context (and
-  // closure) from the code object.
-  // TODO(turbofan): We might want to revisit this restriction later when we
-  // have a need for this, and we know how to model different native contexts
-  // in the same graph in a compositional way.
-  if (function->context()->native_context() !=
-      info_->context()->native_context()) {
-    TRACE("Not inlining %s into %s because of different native contexts\n",
-          shared_info->DebugName()->ToCString().get(),
-          info_->shared_info()->DebugName()->ToCString().get());
-    return NoChange();
-  }
-
   // TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
   // not inlining recursive functions. We might want to relax that at some
   // point.
@@ -484,14 +560,13 @@
     }
   }
 
-  Zone zone(info_->isolate()->allocator(), ZONE_NAME);
-  ParseInfo parse_info(&zone, shared_info);
-  CompilationInfo info(&parse_info, function);
+  ParseInfo parse_info(shared_info);
+  CompilationInfo info(parse_info.zone(), &parse_info,
+                       Handle<JSFunction>::null());
   if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
-  if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
-  if (info_->is_optimizing_from_bytecode()) info.MarkAsOptimizeFromBytecode();
+  info.MarkAsOptimizeFromBytecode();
 
-  if (info.is_optimizing_from_bytecode() && !Compiler::EnsureBytecode(&info)) {
+  if (!Compiler::EnsureBytecode(&info)) {
     TRACE("Not inlining %s into %s because bytecode generation failed\n",
           shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
@@ -501,25 +576,6 @@
     return NoChange();
   }
 
-  if (!info.is_optimizing_from_bytecode() &&
-      !Compiler::ParseAndAnalyze(info.parse_info())) {
-    TRACE("Not inlining %s into %s because parsing failed\n",
-          shared_info->DebugName()->ToCString().get(),
-          info_->shared_info()->DebugName()->ToCString().get());
-    if (info_->isolate()->has_pending_exception()) {
-      info_->isolate()->clear_pending_exception();
-    }
-    return NoChange();
-  }
-
-  if (!info.is_optimizing_from_bytecode() &&
-      !Compiler::EnsureDeoptimizationSupport(&info)) {
-    TRACE("Not inlining %s into %s because deoptimization support failed\n",
-          shared_info->DebugName()->ToCString().get(),
-          info_->shared_info()->DebugName()->ToCString().get());
-    return NoChange();
-  }
-
   // Remember that we inlined this function. This needs to be called right
   // after we ensure deoptimization support so that the code flusher
   // does not remove the code with the deoptimization support.
@@ -534,39 +590,20 @@
         shared_info->DebugName()->ToCString().get(),
         info_->shared_info()->DebugName()->ToCString().get());
 
-  // If function was lazily compiled, its literals array may not yet be set up.
-  JSFunction::EnsureLiterals(function);
+  // Determine the targets feedback vector and its context.
+  Node* context;
+  Handle<FeedbackVector> feedback_vector;
+  DetermineCallContext(node, context, feedback_vector);
 
   // Create the subgraph for the inlinee.
   Node* start;
   Node* end;
-  if (info.is_optimizing_from_bytecode()) {
+  {
     // Run the BytecodeGraphBuilder to create the subgraph.
     Graph::SubgraphScope scope(graph());
-    BytecodeGraphBuilder graph_builder(&zone, &info, jsgraph(),
-                                       call.frequency(), source_positions_,
-                                       inlining_id);
-    graph_builder.CreateGraph(false);
-
-    // Extract the inlinee start/end nodes.
-    start = graph()->start();
-    end = graph()->end();
-  } else {
-    // Run the loop assignment analyzer on the inlinee.
-    AstLoopAssignmentAnalyzer loop_assignment_analyzer(&zone, &info);
-    LoopAssignmentAnalysis* loop_assignment =
-        loop_assignment_analyzer.Analyze();
-
-    // Run the type hint analyzer on the inlinee.
-    TypeHintAnalyzer type_hint_analyzer(&zone);
-    TypeHintAnalysis* type_hint_analysis =
-        type_hint_analyzer.Analyze(handle(shared_info->code(), info.isolate()));
-
-    // Run the AstGraphBuilder to create the subgraph.
-    Graph::SubgraphScope scope(graph());
-    AstGraphBuilderWithPositions graph_builder(
-        &zone, &info, jsgraph(), call.frequency(), loop_assignment,
-        type_hint_analysis, source_positions_, inlining_id);
+    BytecodeGraphBuilder graph_builder(
+        parse_info.zone(), shared_info, feedback_vector, BailoutId::None(),
+        jsgraph(), call.frequency(), source_positions_, inlining_id);
     graph_builder.CreateGraph(false);
 
     // Extract the inlinee start/end nodes.
@@ -600,20 +637,38 @@
   Node* frame_state = call.frame_state();
   Node* new_target = jsgraph()->UndefinedConstant();
 
-  // Inline {JSCallConstruct} requires some additional magic.
-  if (node->opcode() == IrOpcode::kJSCallConstruct) {
+  // Inline {JSConstruct} requires some additional magic.
+  if (node->opcode() == IrOpcode::kJSConstruct) {
+    // Swizzle the inputs of the {JSConstruct} node to look like inputs to a
+    // normal {JSCall} node so that the rest of the inlining machinery
+    // behaves as if we were dealing with a regular function invocation.
+    new_target = call.new_target();  // Retrieve new target value input.
+    node->RemoveInput(call.formal_arguments() + 1);  // Drop new target.
+    node->InsertInput(graph()->zone(), 1, new_target);
+
     // Insert nodes around the call that model the behavior required for a
     // constructor dispatch (allocate implicit receiver and check return value).
     // This models the behavior usually accomplished by our {JSConstructStub}.
     // Note that the context has to be the callers context (input to call node).
+    // Also note that by splitting off the {JSCreate} piece of the constructor
+    // call, we create an observable deoptimization point after the receiver
+    // instantiation but before the invocation (i.e. inside {JSConstructStub}
+    // where execution continues at {construct_stub_create_deopt_pc_offset}).
     Node* receiver = jsgraph()->TheHoleConstant();  // Implicit receiver.
     if (NeedsImplicitReceiver(shared_info)) {
-      Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
       Node* effect = NodeProperties::GetEffectInput(node);
+      Node* control = NodeProperties::GetControlInput(node);
       Node* context = NodeProperties::GetContextInput(node);
-      Node* create = graph()->NewNode(javascript()->Create(), call.target(),
-                                      call.new_target(), context,
-                                      frame_state_before, effect);
+      Node* frame_state_inside = CreateArtificialFrameState(
+          node, frame_state, call.formal_arguments(),
+          BailoutId::ConstructStubCreate(), FrameStateType::kConstructStub,
+          info.shared_info());
+      Node* create =
+          graph()->NewNode(javascript()->Create(), call.target(), new_target,
+                           context, frame_state_inside, effect, control);
+      Node* success = graph()->NewNode(common()->IfSuccess(), create);
+      uncaught_subcalls.push_back(create);  // Adds {IfException}.
+      NodeProperties::ReplaceControlInput(node, success);
       NodeProperties::ReplaceEffectInput(node, create);
       // Insert a check of the return value to determine whether the return
       // value or the implicit receiver should be selected as a result of the
@@ -628,42 +683,26 @@
       NodeProperties::ReplaceValueInput(check, node, 0);   // Fix-up input.
       receiver = create;  // The implicit receiver.
     }
-
-    // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
-    // normal {JSCallFunction} node so that the rest of the inlining machinery
-    // behaves as if we were dealing with a regular function invocation.
-    new_target = call.new_target();  // Retrieve new target value input.
-    node->RemoveInput(call.formal_arguments() + 1);  // Drop new target.
-    node->InsertInput(graph()->zone(), 1, receiver);
+    node->ReplaceInput(1, receiver);
 
     // Insert a construct stub frame into the chain of frame states. This will
     // reconstruct the proper frame when deoptimizing within the constructor.
     frame_state = CreateArtificialFrameState(
         node, frame_state, call.formal_arguments(),
-        FrameStateType::kConstructStub, info.shared_info());
+        BailoutId::ConstructStubInvoke(), FrameStateType::kConstructStub,
+        info.shared_info());
   }
 
-  // The inlinee specializes to the context from the JSFunction object.
-  // TODO(turbofan): We might want to load the context from the JSFunction at
-  // runtime in case we only know the SharedFunctionInfo once we have dynamic
-  // type feedback in the compiler.
-  Node* context = jsgraph()->Constant(handle(function->context()));
-
   // Insert a JSConvertReceiver node for sloppy callees. Note that the context
-  // passed into this node has to be the callees context (loaded above). Note
-  // that the frame state passed to the JSConvertReceiver must be the frame
-  // state _before_ the call; it is not necessary to fiddle with the receiver
-  // in that frame state tho, as the conversion of the receiver can be repeated
-  // any number of times, it's not observable.
-  if (node->opcode() == IrOpcode::kJSCallFunction &&
+  // passed into this node has to be the callees context (loaded above).
+  if (node->opcode() == IrOpcode::kJSCall &&
       is_sloppy(shared_info->language_mode()) && !shared_info->native()) {
     Node* effect = NodeProperties::GetEffectInput(node);
     if (NeedsConvertReceiver(call.receiver(), effect)) {
-      const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
-      Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
-      Node* convert = effect = graph()->NewNode(
-          javascript()->ConvertReceiver(p.convert_mode()), call.receiver(),
-          context, frame_state_before, effect, start);
+      const CallParameters& p = CallParametersOf(node->op());
+      Node* convert = effect =
+          graph()->NewNode(javascript()->ConvertReceiver(p.convert_mode()),
+                           call.receiver(), context, effect, start);
       NodeProperties::ReplaceValueInput(node, convert, 1);
       NodeProperties::ReplaceEffectInput(node, effect);
     }
@@ -676,8 +715,8 @@
   // the case when the outermost function inlines a tail call (it should remove
   // potential arguments adaptor frame that belongs to outermost function when
   // deopt happens).
-  if (node->opcode() == IrOpcode::kJSCallFunction) {
-    const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+  if (node->opcode() == IrOpcode::kJSCall) {
+    const CallParameters& p = CallParametersOf(node->op());
     if (p.tail_call_mode() == TailCallMode::kAllow) {
       frame_state = CreateTailCallerFrameState(node, frame_state);
     }
@@ -691,7 +730,7 @@
   DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
   if (call.formal_arguments() != parameter_count) {
     frame_state = CreateArtificialFrameState(
-        node, frame_state, call.formal_arguments(),
+        node, frame_state, call.formal_arguments(), BailoutId::None(),
         FrameStateType::kArgumentsAdaptor, shared_info);
   }
 
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index 9bb8ec4..e40e6a7 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -11,7 +11,7 @@
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
+class BailoutId;
 class CompilationInfo;
 
 namespace compiler {
@@ -36,7 +36,7 @@
 
   // Can be used by inlining heuristics or by testing code directly, without
   // using the above generic reducer interface of the inlining machinery.
-  Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
+  Reduction ReduceJSCall(Node* node);
 
  private:
   CommonOperatorBuilder* common() const;
@@ -50,8 +50,13 @@
   JSGraph* const jsgraph_;
   SourcePositionTable* const source_positions_;
 
+  bool DetermineCallTarget(Node* node,
+                           Handle<SharedFunctionInfo>& shared_info_out);
+  void DetermineCallContext(Node* node, Node*& context_out,
+                            Handle<FeedbackVector>& feedback_vector_out);
+
   Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
-                                   int parameter_count,
+                                   int parameter_count, BailoutId bailout_id,
                                    FrameStateType frame_state_type,
                                    Handle<SharedFunctionInfo> shared);
 
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index 5290323..8a866ee 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -32,6 +32,8 @@
   switch (f->function_id) {
     case Runtime::kInlineCreateIterResultObject:
       return ReduceCreateIterResultObject(node);
+    case Runtime::kInlineDebugIsActive:
+      return ReduceDebugIsActive(node);
     case Runtime::kInlineDeoptimizeNow:
       return ReduceDeoptimizeNow(node);
     case Runtime::kInlineGeneratorClose:
@@ -40,12 +42,14 @@
       return ReduceGeneratorGetInputOrDebugPos(node);
     case Runtime::kInlineGeneratorGetResumeMode:
       return ReduceGeneratorGetResumeMode(node);
+    case Runtime::kInlineGeneratorGetContext:
+      return ReduceGeneratorGetContext(node);
     case Runtime::kInlineIsArray:
       return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
     case Runtime::kInlineIsTypedArray:
       return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
-    case Runtime::kInlineIsRegExp:
-      return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
+    case Runtime::kInlineIsJSProxy:
+      return ReduceIsInstanceType(node, JS_PROXY_TYPE);
     case Runtime::kInlineIsJSReceiver:
       return ReduceIsJSReceiver(node);
     case Runtime::kInlineIsSmi:
@@ -54,8 +58,6 @@
       return ReduceFixedArrayGet(node);
     case Runtime::kInlineFixedArraySet:
       return ReduceFixedArraySet(node);
-    case Runtime::kInlineRegExpExec:
-      return ReduceRegExpExec(node);
     case Runtime::kInlineSubString:
       return ReduceSubString(node);
     case Runtime::kInlineToInteger:
@@ -70,10 +72,29 @@
       return ReduceToString(node);
     case Runtime::kInlineCall:
       return ReduceCall(node);
-    case Runtime::kInlineNewObject:
-      return ReduceNewObject(node);
     case Runtime::kInlineGetSuperConstructor:
       return ReduceGetSuperConstructor(node);
+    case Runtime::kInlineArrayBufferViewGetByteLength:
+      return ReduceArrayBufferViewField(
+          node, AccessBuilder::ForJSArrayBufferViewByteLength());
+    case Runtime::kInlineArrayBufferViewGetByteOffset:
+      return ReduceArrayBufferViewField(
+          node, AccessBuilder::ForJSArrayBufferViewByteOffset());
+    case Runtime::kInlineMaxSmi:
+      return ReduceMaxSmi(node);
+    case Runtime::kInlineTypedArrayGetLength:
+      return ReduceArrayBufferViewField(node,
+                                        AccessBuilder::ForJSTypedArrayLength());
+    case Runtime::kInlineTypedArrayMaxSizeInHeap:
+      return ReduceTypedArrayMaxSizeInHeap(node);
+    case Runtime::kInlineJSCollectionGetTable:
+      return ReduceJSCollectionGetTable(node);
+    case Runtime::kInlineStringGetRawHashField:
+      return ReduceStringGetRawHashField(node);
+    case Runtime::kInlineTheHole:
+      return ReduceTheHole(node);
+    case Runtime::kInlineClassOf:
+      return ReduceClassOf(node);
     default:
       break;
   }
@@ -90,6 +111,15 @@
                 context, effect);
 }
 
+Reduction JSIntrinsicLowering::ReduceDebugIsActive(Node* node) {
+  Node* const value = jsgraph()->ExternalConstant(
+      ExternalReference::debug_is_active_address(isolate()));
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
+  Operator const* const op =
+      simplified()->LoadField(AccessBuilder::ForExternalUint8Value());
+  return Change(node, op, value, effect, control);
+}
 
 Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
   if (mode() != kDeoptimizationEnabled) return NoChange();
@@ -133,6 +163,16 @@
   return Change(node, op, generator, effect, control);
 }
 
+Reduction JSIntrinsicLowering::ReduceGeneratorGetContext(Node* node) {
+  Node* const generator = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
+  Operator const* const op =
+      simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectContext());
+
+  return Change(node, op, generator, effect, control);
+}
+
 Reduction JSIntrinsicLowering::ReduceGeneratorGetResumeMode(Node* node) {
   Node* const generator = NodeProperties::GetValueInput(node, 0);
   Node* const effect = NodeProperties::GetEffectInput(node);
@@ -228,11 +268,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceRegExpExec(Node* node) {
-  return Change(node, CodeFactory::RegExpExec(isolate()), 4);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
   return Change(node, CodeFactory::SubString(isolate()), 3);
 }
@@ -271,16 +306,12 @@
 Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
   size_t const arity = CallRuntimeParametersOf(node->op()).arity();
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, 0.0f, VectorSlotPair(),
-                                       ConvertReceiverMode::kAny,
-                                       TailCallMode::kDisallow));
+      node,
+      javascript()->Call(arity, 0.0f, VectorSlotPair(),
+                         ConvertReceiverMode::kAny, TailCallMode::kDisallow));
   return Changed(node);
 }
 
-Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
-  return Change(node, CodeFactory::FastNewObject(isolate()), 0);
-}
-
 Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
   Node* active_function = NodeProperties::GetValueInput(node, 0);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -292,6 +323,75 @@
                 active_function_map, effect, control);
 }
 
+Reduction JSIntrinsicLowering::ReduceArrayBufferViewField(
+    Node* node, FieldAccess const& access) {
+  Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Load the {receiver}s field.
+  Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
+                                          receiver, effect, control);
+
+  // Check if the {receiver}s buffer was neutered.
+  Node* receiver_buffer = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+      receiver, effect, control);
+  Node* check = effect = graph()->NewNode(
+      simplified()->ArrayBufferWasNeutered(), receiver_buffer, effect, control);
+
+  // Default to zero if the {receiver}s buffer was neutered.
+  value = graph()->NewNode(
+      common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+      check, jsgraph()->ZeroConstant(), value);
+
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceMaxSmi(Node* node) {
+  Node* value = jsgraph()->Constant(Smi::kMaxValue);
+  ReplaceWithValue(node, value);
+  return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceTypedArrayMaxSizeInHeap(Node* node) {
+  Node* value = jsgraph()->Constant(FLAG_typed_array_max_size_in_heap);
+  ReplaceWithValue(node, value);
+  return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceJSCollectionGetTable(Node* node) {
+  Node* collection = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  return Change(node,
+                simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
+                collection, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceStringGetRawHashField(Node* node) {
+  Node* string = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  return Change(node,
+                simplified()->LoadField(AccessBuilder::ForNameHashField()),
+                string, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceTheHole(Node* node) {
+  Node* value = jsgraph()->TheHoleConstant();
+  ReplaceWithValue(node, value);
+  return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceClassOf(Node* node) {
+  RelaxEffectsAndControls(node);
+  node->TrimInputCount(2);
+  NodeProperties::ChangeOp(node, javascript()->ClassOf());
+  return Changed(node);
+}
+
 Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
                                       Node* b) {
   RelaxControls(node);
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index 6e984ff..f3e3e2a 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -21,6 +21,7 @@
 
 // Forward declarations.
 class CommonOperatorBuilder;
+struct FieldAccess;
 class JSOperatorBuilder;
 class JSGraph;
 class SimplifiedOperatorBuilder;
@@ -40,8 +41,10 @@
 
  private:
   Reduction ReduceCreateIterResultObject(Node* node);
+  Reduction ReduceDebugIsActive(Node* node);
   Reduction ReduceDeoptimizeNow(Node* node);
   Reduction ReduceGeneratorClose(Node* node);
+  Reduction ReduceGeneratorGetContext(Node* node);
   Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
   Reduction ReduceGeneratorGetResumeMode(Node* node);
   Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
@@ -49,7 +52,6 @@
   Reduction ReduceIsSmi(Node* node);
   Reduction ReduceFixedArrayGet(Node* node);
   Reduction ReduceFixedArraySet(Node* node);
-  Reduction ReduceRegExpExec(Node* node);
   Reduction ReduceSubString(Node* node);
   Reduction ReduceToInteger(Node* node);
   Reduction ReduceToLength(Node* node);
@@ -57,9 +59,24 @@
   Reduction ReduceToObject(Node* node);
   Reduction ReduceToString(Node* node);
   Reduction ReduceCall(Node* node);
-  Reduction ReduceNewObject(Node* node);
   Reduction ReduceGetSuperConstructor(Node* node);
 
+  // TODO(turbofan): typedarray.js support; drop once TypedArrays are
+  // converted to proper CodeStubAssembler based builtins.
+  Reduction ReduceArrayBufferViewField(Node* node, FieldAccess const& access);
+  Reduction ReduceMaxSmi(Node* node);
+  Reduction ReduceTypedArrayMaxSizeInHeap(Node* node);
+
+  // TODO(turbofan): collection.js support; drop once Maps and Sets are
+  // converted to proper CodeStubAssembler based builtins.
+  Reduction ReduceJSCollectionGetTable(Node* node);
+  Reduction ReduceStringGetRawHashField(Node* node);
+  Reduction ReduceTheHole(Node* node);
+
+  // TODO(turbofan): JavaScript builtins support; drop once all uses of
+  // %_ClassOf in JavaScript builtins are eliminated.
+  Reduction ReduceClassOf(Node* node);
+
   Reduction Change(Node* node, const Operator* op);
   Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
   Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index a849fec..c32ee26 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -14,9 +14,9 @@
 #include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/type-cache.h"
+#include "src/feedback-vector.h"
 #include "src/field-index-inl.h"
 #include "src/isolate-inl.h"
-#include "src/type-feedback-vector.h"
 
 namespace v8 {
 namespace internal {
@@ -55,6 +55,12 @@
 
 }  // namespace
 
+struct JSNativeContextSpecialization::ScriptContextTableLookupResult {
+  Handle<Context> context;
+  bool immutable;
+  int index;
+};
+
 JSNativeContextSpecialization::JSNativeContextSpecialization(
     Editor* editor, JSGraph* jsgraph, Flags flags,
     Handle<Context> native_context, CompilationDependencies* dependencies,
@@ -62,6 +68,8 @@
     : AdvancedReducer(editor),
       jsgraph_(jsgraph),
       flags_(flags),
+      global_object_(native_context->global_object()),
+      global_proxy_(JSGlobalProxy::cast(native_context->global_proxy())),
       native_context_(native_context),
       dependencies_(dependencies),
       zone_(zone),
@@ -69,10 +77,20 @@
 
 Reduction JSNativeContextSpecialization::Reduce(Node* node) {
   switch (node->opcode()) {
+    case IrOpcode::kJSAdd:
+      return ReduceJSAdd(node);
+    case IrOpcode::kJSGetSuperConstructor:
+      return ReduceJSGetSuperConstructor(node);
     case IrOpcode::kJSInstanceOf:
       return ReduceJSInstanceOf(node);
+    case IrOpcode::kJSOrdinaryHasInstance:
+      return ReduceJSOrdinaryHasInstance(node);
     case IrOpcode::kJSLoadContext:
       return ReduceJSLoadContext(node);
+    case IrOpcode::kJSLoadGlobal:
+      return ReduceJSLoadGlobal(node);
+    case IrOpcode::kJSStoreGlobal:
+      return ReduceJSStoreGlobal(node);
     case IrOpcode::kJSLoadNamed:
       return ReduceJSLoadNamed(node);
     case IrOpcode::kJSStoreNamed:
@@ -81,12 +99,75 @@
       return ReduceJSLoadProperty(node);
     case IrOpcode::kJSStoreProperty:
       return ReduceJSStoreProperty(node);
+    case IrOpcode::kJSStoreNamedOwn:
+      return ReduceJSStoreNamedOwn(node);
+    case IrOpcode::kJSStoreDataPropertyInLiteral:
+      return ReduceJSStoreDataPropertyInLiteral(node);
     default:
       break;
   }
   return NoChange();
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
+  // TODO(turbofan): This has to run together with the inlining and
+  // native context specialization to be able to leverage the string
+  // constant-folding for optimizing property access, but we should
+  // nevertheless find a better home for this at some point.
+  DCHECK_EQ(IrOpcode::kJSAdd, node->opcode());
+
+  // Constant-fold string concatenation.
+  HeapObjectBinopMatcher m(node);
+  if (m.left().HasValue() && m.left().Value()->IsString() &&
+      m.right().HasValue() && m.right().Value()->IsString()) {
+    Handle<String> left = Handle<String>::cast(m.left().Value());
+    Handle<String> right = Handle<String>::cast(m.right().Value());
+    if (left->length() + right->length() <= String::kMaxLength) {
+      Handle<String> result =
+          factory()->NewConsString(left, right).ToHandleChecked();
+      Node* value = jsgraph()->HeapConstant(result);
+      ReplaceWithValue(node, value);
+      return Replace(value);
+    }
+  }
+  return NoChange();
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
+    Node* node) {
+  DCHECK_EQ(IrOpcode::kJSGetSuperConstructor, node->opcode());
+  Node* constructor = NodeProperties::GetValueInput(node, 0);
+
+  // If deoptimization is disabled, we cannot optimize.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+  // Check if the input is a known JSFunction.
+  HeapObjectMatcher m(constructor);
+  if (!m.HasValue()) return NoChange();
+  Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+  Handle<Map> function_map(function->map(), isolate());
+  Handle<Object> function_prototype(function_map->prototype(), isolate());
+
+  // We can constant-fold the super constructor access if the
+  // {function}s map is stable, i.e. we can use a code dependency
+  // to guard against [[Prototype]] changes of {function}.
+  if (function_map->is_stable()) {
+    Node* value = jsgraph()->Constant(function_prototype);
+    dependencies()->AssumeMapStable(function_map);
+    if (function_prototype->IsConstructor()) {
+      ReplaceWithValue(node, value);
+      return Replace(value);
+    } else {
+      node->InsertInput(graph()->zone(), 0, value);
+      NodeProperties::ChangeOp(
+          node, javascript()->CallRuntime(Runtime::kThrowNotSuperConstructor));
+      return Changed(node);
+    }
+  }
+
+  return NoChange();
+}
+
 Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
   DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
   Node* object = NodeProperties::GetValueInput(node, 0);
@@ -125,15 +206,16 @@
       }
 
       // Monomorphic property access.
-      effect =
-          BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+      effect = BuildCheckMaps(constructor, effect, control,
+                              access_info.receiver_maps());
 
       // Lower to OrdinaryHasInstance(C, O).
       NodeProperties::ReplaceValueInput(node, constructor, 0);
       NodeProperties::ReplaceValueInput(node, object, 1);
       NodeProperties::ReplaceEffectInput(node, effect);
       NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
-      return Changed(node);
+      Reduction const reduction = ReduceJSOrdinaryHasInstance(node);
+      return reduction.Changed() ? reduction : Changed(node);
     }
   } else if (access_info.IsDataConstant()) {
     DCHECK(access_info.constant()->IsCallable());
@@ -145,8 +227,8 @@
     }
 
     // Monomorphic property access.
-    effect =
-        BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+    effect = BuildCheckMaps(constructor, effect, control,
+                            access_info.receiver_maps());
 
     // Call the @@hasInstance handler.
     Node* target = jsgraph()->Constant(access_info.constant());
@@ -156,8 +238,8 @@
     node->ReplaceInput(5, effect);
     NodeProperties::ChangeOp(
         node,
-        javascript()->CallFunction(3, 0.0f, VectorSlotPair(),
-                                   ConvertReceiverMode::kNotNullOrUndefined));
+        javascript()->Call(3, 0.0f, VectorSlotPair(),
+                           ConvertReceiverMode::kNotNullOrUndefined));
 
     // Rewire the value uses of {node} to ToBoolean conversion of the result.
     Node* value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
@@ -174,6 +256,31 @@
   return NoChange();
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
+    Node* node) {
+  DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
+  Node* constructor = NodeProperties::GetValueInput(node, 0);
+  Node* object = NodeProperties::GetValueInput(node, 1);
+
+  // Check if the {constructor} is a JSBoundFunction.
+  HeapObjectMatcher m(constructor);
+  if (m.HasValue() && m.Value()->IsJSBoundFunction()) {
+    // OrdinaryHasInstance on bound functions turns into a recursive
+    // invocation of the instanceof operator again.
+    // ES6 section 7.3.19 OrdinaryHasInstance (C, O) step 2.
+    Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(m.Value());
+    Handle<JSReceiver> bound_target_function(function->bound_target_function());
+    NodeProperties::ReplaceValueInput(node, object, 0);
+    NodeProperties::ReplaceValueInput(
+        node, jsgraph()->HeapConstant(bound_target_function), 1);
+    NodeProperties::ChangeOp(node, javascript()->InstanceOf());
+    Reduction const reduction = ReduceJSInstanceOf(node);
+    return reduction.Changed() ? reduction : Changed(node);
+  }
+
+  return NoChange();
+}
+
 Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
@@ -188,24 +295,292 @@
   return NoChange();
 }
 
+namespace {
+
+FieldAccess ForPropertyCellValue(MachineRepresentation representation,
+                                 Type* type, MaybeHandle<Map> map,
+                                 Handle<Name> name) {
+  WriteBarrierKind kind = kFullWriteBarrier;
+  if (representation == MachineRepresentation::kTaggedSigned) {
+    kind = kNoWriteBarrier;
+  } else if (representation == MachineRepresentation::kTaggedPointer) {
+    kind = kPointerWriteBarrier;
+  }
+  MachineType r = MachineType::TypeForRepresentation(representation);
+  FieldAccess access = {
+      kTaggedBase, PropertyCell::kValueOffset, name, map, type, r, kind};
+  return access;
+}
+
+}  // namespace
+
+Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
+    Node* node, Node* receiver, Node* value, Handle<Name> name,
+    AccessMode access_mode, Node* index) {
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Lookup on the global object. We only deal with own data properties
+  // of the global object here (represented as PropertyCell).
+  LookupIterator it(global_object(), name, LookupIterator::OWN);
+  it.TryLookupCachedProperty();
+  if (it.state() != LookupIterator::DATA) return NoChange();
+  if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
+  Handle<PropertyCell> property_cell = it.GetPropertyCell();
+  PropertyDetails property_details = property_cell->property_details();
+  Handle<Object> property_cell_value(property_cell->value(), isolate());
+  PropertyCellType property_cell_type = property_details.cell_type();
+
+  // We have additional constraints for stores.
+  if (access_mode == AccessMode::kStore) {
+    if (property_details.IsReadOnly()) {
+      // Don't even bother trying to lower stores to read-only data properties.
+      return NoChange();
+    } else if (property_cell_type == PropertyCellType::kUndefined) {
+      // There's no fast-path for dealing with undefined property cells.
+      return NoChange();
+    } else if (property_cell_type == PropertyCellType::kConstantType) {
+      // There's also no fast-path to store to a global cell which pretended
+      // to be stable, but is no longer stable now.
+      if (property_cell_value->IsHeapObject() &&
+          !Handle<HeapObject>::cast(property_cell_value)->map()->is_stable()) {
+        return NoChange();
+      }
+    }
+  }
+
+  // Ensure that {index} matches the specified {name} (if {index} is given).
+  if (index != nullptr) {
+    Node* check = graph()->NewNode(simplified()->ReferenceEqual(), index,
+                                   jsgraph()->HeapConstant(name));
+    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+  }
+
+  // Check if we have a {receiver} to validate. If so, we need to check that
+  // the {receiver} is actually the JSGlobalProxy for the native context that
+  // we are specializing to.
+  if (receiver != nullptr) {
+    Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
+                                   jsgraph()->HeapConstant(global_proxy()));
+    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+  }
+
+  if (access_mode == AccessMode::kLoad) {
+    // Load from non-configurable, read-only data property on the global
+    // object can be constant-folded, even without deoptimization support.
+    if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
+      value = jsgraph()->Constant(property_cell_value);
+    } else {
+      // Record a code dependency on the cell if we can benefit from the
+      // additional feedback, or the global property is configurable (i.e.
+      // can be deleted or reconfigured to an accessor property).
+      if (property_details.cell_type() != PropertyCellType::kMutable ||
+          property_details.IsConfigurable()) {
+        dependencies()->AssumePropertyCell(property_cell);
+      }
+
+      // Load from constant/undefined global property can be constant-folded.
+      if (property_details.cell_type() == PropertyCellType::kConstant ||
+          property_details.cell_type() == PropertyCellType::kUndefined) {
+        value = jsgraph()->Constant(property_cell_value);
+      } else {
+        // Load from constant type cell can benefit from type feedback.
+        MaybeHandle<Map> map;
+        Type* property_cell_value_type = Type::NonInternal();
+        MachineRepresentation representation = MachineRepresentation::kTagged;
+        if (property_details.cell_type() == PropertyCellType::kConstantType) {
+          // Compute proper type based on the current value in the cell.
+          if (property_cell_value->IsSmi()) {
+            property_cell_value_type = Type::SignedSmall();
+            representation = MachineRepresentation::kTaggedSigned;
+          } else if (property_cell_value->IsNumber()) {
+            property_cell_value_type = Type::Number();
+            representation = MachineRepresentation::kTaggedPointer;
+          } else {
+            Handle<Map> property_cell_value_map(
+                Handle<HeapObject>::cast(property_cell_value)->map(),
+                isolate());
+            property_cell_value_type = Type::For(property_cell_value_map);
+            representation = MachineRepresentation::kTaggedPointer;
+
+            // We can only use the property cell value map for map check
+            // elimination if it's stable, i.e. the HeapObject wasn't
+            // mutated without the cell state being updated.
+            if (property_cell_value_map->is_stable()) {
+              dependencies()->AssumeMapStable(property_cell_value_map);
+              map = property_cell_value_map;
+            }
+          }
+        }
+        value = effect = graph()->NewNode(
+            simplified()->LoadField(ForPropertyCellValue(
+                representation, property_cell_value_type, map, name)),
+            jsgraph()->HeapConstant(property_cell), effect, control);
+      }
+    }
+  } else {
+    DCHECK_EQ(AccessMode::kStore, access_mode);
+    DCHECK(!property_details.IsReadOnly());
+    switch (property_details.cell_type()) {
+      case PropertyCellType::kUndefined: {
+        UNREACHABLE();
+        break;
+      }
+      case PropertyCellType::kConstant: {
+        // Record a code dependency on the cell, and just deoptimize if the new
+        // value doesn't match the previous value stored inside the cell.
+        dependencies()->AssumePropertyCell(property_cell);
+        Node* check =
+            graph()->NewNode(simplified()->ReferenceEqual(), value,
+                             jsgraph()->Constant(property_cell_value));
+        effect =
+            graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+        break;
+      }
+      case PropertyCellType::kConstantType: {
+        // Record a code dependency on the cell, and just deoptimize if the new
+        // values' type doesn't match the type of the previous value in the
+        // cell.
+        dependencies()->AssumePropertyCell(property_cell);
+        Type* property_cell_value_type;
+        MachineRepresentation representation = MachineRepresentation::kTagged;
+        if (property_cell_value->IsHeapObject()) {
+          // We cannot do anything if the {property_cell_value}s map is no
+          // longer stable.
+          Handle<Map> property_cell_value_map(
+              Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+          DCHECK(property_cell_value_map->is_stable());
+          dependencies()->AssumeMapStable(property_cell_value_map);
+
+          // Check that the {value} is a HeapObject.
+          value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+                                            value, effect, control);
+
+          // Check {value} map agains the {property_cell} map.
+          effect =
+              graph()->NewNode(simplified()->CheckMaps(
+                                   CheckMapsFlag::kNone,
+                                   ZoneHandleSet<Map>(property_cell_value_map)),
+                               value, effect, control);
+          property_cell_value_type = Type::OtherInternal();
+          representation = MachineRepresentation::kTaggedPointer;
+        } else {
+          // Check that the {value} is a Smi.
+          value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+                                            effect, control);
+          property_cell_value_type = Type::SignedSmall();
+          representation = MachineRepresentation::kTaggedSigned;
+        }
+        effect = graph()->NewNode(simplified()->StoreField(ForPropertyCellValue(
+                                      representation, property_cell_value_type,
+                                      MaybeHandle<Map>(), name)),
+                                  jsgraph()->HeapConstant(property_cell), value,
+                                  effect, control);
+        break;
+      }
+      case PropertyCellType::kMutable: {
+        // Record a code dependency on the cell, and just deoptimize if the
+        // property ever becomes read-only.
+        dependencies()->AssumePropertyCell(property_cell);
+        effect = graph()->NewNode(
+            simplified()->StoreField(ForPropertyCellValue(
+                MachineRepresentation::kTagged, Type::NonInternal(),
+                MaybeHandle<Map>(), name)),
+            jsgraph()->HeapConstant(property_cell), value, effect, control);
+        break;
+      }
+    }
+  }
+
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
+  Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
+  Node* effect = NodeProperties::GetEffectInput(node);
+
+  // Try to lookup the name on the script context table first (lexical scoping).
+  ScriptContextTableLookupResult result;
+  if (LookupInScriptContextTable(name, &result)) {
+    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
+    Node* context = jsgraph()->HeapConstant(result.context);
+    Node* value = effect = graph()->NewNode(
+        javascript()->LoadContext(0, result.index, result.immutable), context,
+        effect);
+    ReplaceWithValue(node, value, effect);
+    return Replace(value);
+  }
+
+  // Not much we can do if deoptimization support is disabled.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+  // Lookup the {name} on the global object instead.
+  return ReduceGlobalAccess(node, nullptr, nullptr, name, AccessMode::kLoad);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
+  Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
+  Node* value = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Try to lookup the name on the script context table first (lexical scoping).
+  ScriptContextTableLookupResult result;
+  if (LookupInScriptContextTable(name, &result)) {
+    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
+    if (result.immutable) return NoChange();
+    Node* context = jsgraph()->HeapConstant(result.context);
+    effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
+                              value, context, effect, control);
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+
+  // Not much we can do if deoptimization support is disabled.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+  // Lookup the {name} on the global object instead.
+  return ReduceGlobalAccess(node, nullptr, value, name, AccessMode::kStore);
+}
+
 Reduction JSNativeContextSpecialization::ReduceNamedAccess(
     Node* node, Node* value, MapHandleList const& receiver_maps,
     Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
-    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot, Node* index) {
+    Handle<FeedbackVector> vector, FeedbackSlot slot, Node* index) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
          node->opcode() == IrOpcode::kJSStoreNamed ||
          node->opcode() == IrOpcode::kJSLoadProperty ||
-         node->opcode() == IrOpcode::kJSStoreProperty);
+         node->opcode() == IrOpcode::kJSStoreProperty ||
+         node->opcode() == IrOpcode::kJSStoreNamedOwn);
   Node* receiver = NodeProperties::GetValueInput(node, 0);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state_eager = NodeProperties::FindFrameStateBefore(node);
-  Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
   // Not much we can do if deoptimization support is disabled.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
 
+  // Check if we have an access o.x or o.x=v where o is the current
+  // native contexts' global proxy, and turn that into a direct access
+  // to the current native contexts' global object instead.
+  if (receiver_maps.length() == 1) {
+    Handle<Map> receiver_map = receiver_maps.first();
+    if (receiver_map->IsJSGlobalProxyMap()) {
+      Object* maybe_constructor = receiver_map->GetConstructor();
+      // Detached global proxies have |null| as their constructor.
+      if (maybe_constructor->IsJSFunction() &&
+          JSFunction::cast(maybe_constructor)->native_context() ==
+              *native_context()) {
+        return ReduceGlobalAccess(node, receiver, value, name, access_mode,
+                                  index);
+      }
+    }
+  }
+
   // Compute property access infos for the receiver maps.
   AccessInfoFactory access_info_factory(dependencies(), native_context(),
                                         graph()->zone());
@@ -217,7 +592,7 @@
 
   // TODO(turbofan): Add support for inlining into try blocks.
   bool is_exceptional = NodeProperties::IsExceptionalCall(node);
-  for (auto access_info : access_infos) {
+  for (const auto& access_info : access_infos) {
     if (access_info.IsAccessorConstant()) {
       // Accessor in try-blocks are not supported yet.
       if (is_exceptional || !(flags() & kAccessorInliningEnabled)) {
@@ -227,7 +602,7 @@
       // We do not handle generic calls in try blocks.
       if (is_exceptional) return NoChange();
       // We only handle the generic store IC case.
-      if (vector->GetKind(slot) != FeedbackVectorSlotKind::STORE_IC) {
+      if (!vector->IsStoreIC(slot)) {
         return NoChange();
       }
     }
@@ -260,15 +635,14 @@
                                            receiver, effect, control);
     } else {
       // Monomorphic property access.
-      receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                           receiver, effect, control);
+      receiver = BuildCheckHeapObject(receiver, &effect, control);
       effect = BuildCheckMaps(receiver, effect, control,
                               access_info.receiver_maps());
     }
 
     // Generate the actual property access.
     ValueEffectControl continuation = BuildPropertyAccess(
-        receiver, value, context, frame_state_lazy, effect, control, name,
+        receiver, value, context, frame_state, effect, control, name,
         access_info, access_mode, language_mode, vector, slot);
     value = continuation.value();
     effect = continuation.effect();
@@ -299,8 +673,7 @@
       receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
       receiverissmi_effect = effect;
     } else {
-      receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                           receiver, effect, control);
+      receiver = BuildCheckHeapObject(receiver, &effect, control);
     }
 
     // Load the {receiver} map. The resulting effect is the dominating effect
@@ -369,20 +742,14 @@
           this_effect =
               graph()->NewNode(common()->EffectPhi(this_control_count),
                                this_control_count + 1, &this_effects.front());
-
-          // TODO(turbofan): The effect/control linearization will not find a
-          // FrameState after the EffectPhi that is generated above.
-          this_effect =
-              graph()->NewNode(common()->Checkpoint(), frame_state_eager,
-                               this_effect, this_control);
         }
       }
 
       // Generate the actual property access.
-      ValueEffectControl continuation = BuildPropertyAccess(
-          this_receiver, this_value, context, frame_state_lazy, this_effect,
-          this_control, name, access_info, access_mode, language_mode, vector,
-          slot);
+      ValueEffectControl continuation =
+          BuildPropertyAccess(this_receiver, this_value, context, frame_state,
+                              this_effect, this_control, name, access_info,
+                              access_mode, language_mode, vector, slot);
       values.push_back(continuation.value());
       effects.push_back(continuation.effect());
       controls.push_back(continuation.control());
@@ -418,10 +785,20 @@
     Node* node, Node* value, FeedbackNexus const& nexus, Handle<Name> name,
     AccessMode access_mode, LanguageMode language_mode) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
-         node->opcode() == IrOpcode::kJSStoreNamed);
+         node->opcode() == IrOpcode::kJSStoreNamed ||
+         node->opcode() == IrOpcode::kJSStoreNamedOwn);
   Node* const receiver = NodeProperties::GetValueInput(node, 0);
   Node* const effect = NodeProperties::GetEffectInput(node);
 
+  if (flags() & kDeoptimizationEnabled) {
+    // Check if we are accessing the current native contexts' global proxy.
+    HeapObjectMatcher m(receiver);
+    if (m.HasValue() && m.Value().is_identical_to(global_proxy())) {
+      // Optimize accesses to the current native contexts' global proxy.
+      return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
+    }
+  }
+
   // Check if the {nexus} reports type feedback for the IC.
   if (nexus.IsUninitialized()) {
     if ((flags() & kDeoptimizationEnabled) &&
@@ -452,7 +829,6 @@
                            language_mode, nexus.vector_handle(), nexus.slot());
 }
 
-
 Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
   NamedAccess const& p = NamedAccessOf(node->op());
@@ -514,6 +890,19 @@
                                     AccessMode::kStore, p.language_mode());
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode());
+  StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
+  Node* const value = NodeProperties::GetValueInput(node, 1);
+
+  // Extract receiver maps from the IC using the StoreOwnICNexus.
+  if (!p.feedback().IsValid()) return NoChange();
+  StoreOwnICNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+  // Try to lower the creation of a named property based on the {receiver_maps}.
+  return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
+                                    AccessMode::kStoreInLiteral, STRICT);
+}
 
 Reduction JSNativeContextSpecialization::ReduceElementAccess(
     Node* node, Node* index, Node* value, MapHandleList const& receiver_maps,
@@ -547,12 +936,9 @@
     index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
                                       length, effect, control);
 
-    // Load the character from the {receiver}.
-    value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver, index,
+    // Return the character from the {receiver} as single character string.
+    value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
                              control);
-
-    // Return it as a single character string.
-    value = graph()->NewNode(simplified()->StringFromCharCode(), value);
   } else {
     // Retrieve the native context from the given {node}.
     // Compute element access infos for the receiver maps.
@@ -609,8 +995,7 @@
     }
 
     // Ensure that {receiver} is a heap object.
-    receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                         receiver, effect, control);
+    receiver = BuildCheckHeapObject(receiver, &effect, control);
 
     // Check for the monomorphic case.
     if (access_infos.size() == 1) {
@@ -621,13 +1006,13 @@
         Handle<Map> const transition_source = transition.first;
         Handle<Map> const transition_target = transition.second;
         effect = graph()->NewNode(
-            simplified()->TransitionElementsKind(
+            simplified()->TransitionElementsKind(ElementsTransition(
                 IsSimpleMapChangeTransition(transition_source->elements_kind(),
                                             transition_target->elements_kind())
                     ? ElementsTransition::kFastTransition
-                    : ElementsTransition::kSlowTransition),
-            receiver, jsgraph()->HeapConstant(transition_source),
-            jsgraph()->HeapConstant(transition_target), effect, control);
+                    : ElementsTransition::kSlowTransition,
+                transition_source, transition_target)),
+            receiver, effect, control);
       }
 
       // TODO(turbofan): The effect/control linearization will not find a
@@ -672,14 +1057,13 @@
           Handle<Map> const transition_target = transition.second;
           this_effect = graph()->NewNode(
               simplified()->TransitionElementsKind(
-                  IsSimpleMapChangeTransition(
-                      transition_source->elements_kind(),
-                      transition_target->elements_kind())
-                      ? ElementsTransition::kFastTransition
-                      : ElementsTransition::kSlowTransition),
-              receiver, jsgraph()->HeapConstant(transition_source),
-              jsgraph()->HeapConstant(transition_target), this_effect,
-              this_control);
+                  ElementsTransition(IsSimpleMapChangeTransition(
+                                         transition_source->elements_kind(),
+                                         transition_target->elements_kind())
+                                         ? ElementsTransition::kFastTransition
+                                         : ElementsTransition::kSlowTransition,
+                                     transition_source, transition_target)),
+              receiver, this_effect, this_control);
         }
 
         // Load the {receiver} map.
@@ -723,11 +1107,6 @@
             this_effect =
                 graph()->NewNode(common()->EffectPhi(this_control_count),
                                  this_control_count + 1, &this_effects.front());
-
-            // TODO(turbofan): The effect/control linearization will not find a
-            // FrameState after the EffectPhi that is generated above.
-            this_effect = graph()->NewNode(common()->Checkpoint(), frame_state,
-                                           this_effect, this_control);
           }
         }
 
@@ -806,12 +1185,9 @@
         index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
                                           length, effect, control);
 
-        // Load the character from the {receiver}.
-        value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
-                                 index, control);
-
-        // Return it as a single character string.
-        value = graph()->NewNode(simplified()->StringFromCharCode(), value);
+        // Return the character from the {receiver} as single character string.
+        value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
+                                 control);
         ReplaceWithValue(node, value, effect, control);
         return Replace(value);
       }
@@ -944,10 +1320,11 @@
     Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
     Node* control, Handle<Name> name, PropertyAccessInfo const& access_info,
     AccessMode access_mode, LanguageMode language_mode,
-    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot) {
+    Handle<FeedbackVector> vector, FeedbackSlot slot) {
   // Determine actual holder and perform prototype chain checks.
   Handle<JSObject> holder;
   if (access_info.holder().ToHandle(&holder)) {
+    DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
     AssumePrototypesStable(access_info.receiver_maps(), holder);
   }
 
@@ -981,16 +1358,16 @@
             common()->FrameState(BailoutId::None(),
                                  OutputFrameStateCombine::Ignore(),
                                  frame_info0),
-            graph()->NewNode(common()->StateValues(1), receiver),
+            graph()->NewNode(common()->StateValues(1, SparseInputMask::Dense()),
+                             receiver),
             jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
             context, target, frame_state);
 
         // Introduce the call to the getter function.
         if (access_info.constant()->IsJSFunction()) {
           value = effect = graph()->NewNode(
-              javascript()->CallFunction(
-                  2, 0.0f, VectorSlotPair(),
-                  ConvertReceiverMode::kNotNullOrUndefined),
+              javascript()->Call(2, 0.0f, VectorSlotPair(),
+                                 ConvertReceiverMode::kNotNullOrUndefined),
               target, receiver, context, frame_state0, effect, control);
           control = graph()->NewNode(common()->IfSuccess(), value);
         } else {
@@ -998,16 +1375,16 @@
           Handle<FunctionTemplateInfo> function_template_info(
               Handle<FunctionTemplateInfo>::cast(access_info.constant()));
           DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
-          ZoneVector<Node*> stack_parameters(graph()->zone());
           ValueEffectControl value_effect_control = InlineApiCall(
-              receiver, context, target, frame_state0, &stack_parameters,
-              effect, control, shared_info, function_template_info);
+              receiver, context, target, frame_state0, nullptr, effect, control,
+              shared_info, function_template_info);
           value = value_effect_control.value();
           effect = value_effect_control.effect();
           control = value_effect_control.control();
         }
         break;
       }
+      case AccessMode::kStoreInLiteral:
       case AccessMode::kStore: {
         // We need a FrameState for the setter stub to restore the correct
         // context and return the appropriate value to fullcodegen.
@@ -1018,16 +1395,16 @@
             common()->FrameState(BailoutId::None(),
                                  OutputFrameStateCombine::Ignore(),
                                  frame_info0),
-            graph()->NewNode(common()->StateValues(2), receiver, value),
+            graph()->NewNode(common()->StateValues(2, SparseInputMask::Dense()),
+                             receiver, value),
             jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
             context, target, frame_state);
 
         // Introduce the call to the setter function.
         if (access_info.constant()->IsJSFunction()) {
           effect = graph()->NewNode(
-              javascript()->CallFunction(
-                  3, 0.0f, VectorSlotPair(),
-                  ConvertReceiverMode::kNotNullOrUndefined),
+              javascript()->Call(3, 0.0f, VectorSlotPair(),
+                                 ConvertReceiverMode::kNotNullOrUndefined),
               target, receiver, value, context, frame_state0, effect, control);
           control = graph()->NewNode(common()->IfSuccess(), effect);
         } else {
@@ -1035,11 +1412,9 @@
           Handle<FunctionTemplateInfo> function_template_info(
               Handle<FunctionTemplateInfo>::cast(access_info.constant()));
           DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
-          ZoneVector<Node*> stack_parameters(graph()->zone());
-          stack_parameters.push_back(value);
           ValueEffectControl value_effect_control = InlineApiCall(
-              receiver, context, target, frame_state0, &stack_parameters,
-              effect, control, shared_info, function_template_info);
+              receiver, context, target, frame_state0, value, effect, control,
+              shared_info, function_template_info);
           value = value_effect_control.value();
           effect = value_effect_control.effect();
           control = value_effect_control.control();
@@ -1047,7 +1422,7 @@
         break;
       }
     }
-  } else if (access_info.IsDataField()) {
+  } else if (access_info.IsDataField() || access_info.IsDataConstantField()) {
     FieldIndex const field_index = access_info.field_index();
     Type* const field_type = access_info.field_type();
     MachineRepresentation const field_representation =
@@ -1059,14 +1434,36 @@
       // Optimize immutable property loads.
       HeapObjectMatcher m(receiver);
       if (m.HasValue() && m.Value()->IsJSObject()) {
+        // TODO(ishell): Use something simpler like
+        //
+        // Handle<Object> value =
+        //     JSObject::FastPropertyAt(Handle<JSObject>::cast(m.Value()),
+        //                              Representation::Tagged(), field_index);
+        //
+        // here, once we have the immutable bit in the access_info.
+
         // TODO(turbofan): Given that we already have the field_index here, we
         // might be smarter in the future and not rely on the LookupIterator,
         // but for now let's just do what Crankshaft does.
         LookupIterator it(m.Value(), name,
                           LookupIterator::OWN_SKIP_INTERCEPTOR);
-        if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
-          Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
-          return ValueEffectControl(value, effect, control);
+        if (it.state() == LookupIterator::DATA) {
+          bool is_reaonly_non_configurable =
+              it.IsReadOnly() && !it.IsConfigurable();
+          if (is_reaonly_non_configurable ||
+              (FLAG_track_constant_fields &&
+               access_info.IsDataConstantField())) {
+            Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
+            if (!is_reaonly_non_configurable) {
+              // It's necessary to add dependency on the map that introduced
+              // the field.
+              DCHECK(access_info.IsDataConstantField());
+              DCHECK(!it.is_dictionary_holder());
+              Handle<Map> field_owner_map = it.GetFieldOwnerMap();
+              dependencies()->AssumeFieldOwner(field_owner_map);
+            }
+            return ValueEffectControl(value, effect, control);
+          }
         }
       }
     }
@@ -1080,6 +1477,7 @@
         kTaggedBase,
         field_index.offset(),
         name,
+        MaybeHandle<Map>(),
         field_type,
         MachineType::TypeForRepresentation(field_representation),
         kFullWriteBarrier};
@@ -1090,6 +1488,7 @@
           FieldAccess const storage_access = {kTaggedBase,
                                               field_index.offset(),
                                               name,
+                                              MaybeHandle<Map>(),
                                               Type::OtherInternal(),
                                               MachineType::TaggedPointer(),
                                               kPointerWriteBarrier};
@@ -1099,13 +1498,27 @@
           field_access.offset = HeapNumber::kValueOffset;
           field_access.name = MaybeHandle<Name>();
         }
+      } else if (field_representation ==
+                 MachineRepresentation::kTaggedPointer) {
+        // Remember the map of the field value, if its map is stable. This is
+        // used by the LoadElimination to eliminate map checks on the result.
+        Handle<Map> field_map;
+        if (access_info.field_map().ToHandle(&field_map)) {
+          if (field_map->is_stable()) {
+            dependencies()->AssumeMapStable(field_map);
+            field_access.map = field_map;
+          }
+        }
       }
-      // TODO(turbofan): Track the field_map (if any) on the {field_access} and
-      // use it in LoadElimination to eliminate map checks.
       value = effect = graph()->NewNode(simplified()->LoadField(field_access),
                                         storage, effect, control);
     } else {
-      DCHECK_EQ(AccessMode::kStore, access_mode);
+      bool store_to_constant_field = FLAG_track_constant_fields &&
+                                     (access_mode == AccessMode::kStore) &&
+                                     access_info.IsDataConstantField();
+
+      DCHECK(access_mode == AccessMode::kStore ||
+             access_mode == AccessMode::kStoreInLiteral);
       switch (field_representation) {
         case MachineRepresentation::kFloat64: {
           value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
@@ -1138,6 +1551,7 @@
               FieldAccess const storage_access = {kTaggedBase,
                                                   field_index.offset(),
                                                   name,
+                                                  MaybeHandle<Map>(),
                                                   Type::OtherInternal(),
                                                   MachineType::TaggedPointer(),
                                                   kPointerWriteBarrier};
@@ -1149,29 +1563,62 @@
               field_access.machine_type = MachineType::Float64();
             }
           }
-          break;
-        }
-        case MachineRepresentation::kTaggedSigned: {
-          value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
-                                            effect, control);
-          field_access.write_barrier_kind = kNoWriteBarrier;
-          break;
-        }
-        case MachineRepresentation::kTaggedPointer: {
-          // Ensure that {value} is a HeapObject.
-          value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                            value, effect, control);
-          Handle<Map> field_map;
-          if (access_info.field_map().ToHandle(&field_map)) {
-            // Emit a map check for the value.
-            effect = graph()->NewNode(simplified()->CheckMaps(1), value,
-                                      jsgraph()->HeapConstant(field_map),
-                                      effect, control);
+          if (store_to_constant_field) {
+            DCHECK(!access_info.HasTransitionMap());
+            // If the field is constant check that the value we are going
+            // to store matches current value.
+            Node* current_value = effect =
+                graph()->NewNode(simplified()->LoadField(field_access), storage,
+                                 effect, control);
+
+            Node* check = graph()->NewNode(simplified()->NumberEqual(),
+                                           current_value, value);
+            effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
+                                      control);
+            return ValueEffectControl(value, effect, control);
           }
-          field_access.write_barrier_kind = kPointerWriteBarrier;
           break;
         }
+        case MachineRepresentation::kTaggedSigned:
+        case MachineRepresentation::kTaggedPointer:
         case MachineRepresentation::kTagged:
+          if (store_to_constant_field) {
+            DCHECK(!access_info.HasTransitionMap());
+            // If the field is constant check that the value we are going
+            // to store matches current value.
+            Node* current_value = effect =
+                graph()->NewNode(simplified()->LoadField(field_access), storage,
+                                 effect, control);
+
+            Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+                                           current_value, value);
+            effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
+                                      control);
+            return ValueEffectControl(value, effect, control);
+          }
+
+          if (field_representation == MachineRepresentation::kTaggedSigned) {
+            value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+                                              effect, control);
+            field_access.write_barrier_kind = kNoWriteBarrier;
+
+          } else if (field_representation ==
+                     MachineRepresentation::kTaggedPointer) {
+            // Ensure that {value} is a HeapObject.
+            value = BuildCheckHeapObject(value, &effect, control);
+            Handle<Map> field_map;
+            if (access_info.field_map().ToHandle(&field_map)) {
+              // Emit a map check for the value.
+              effect = graph()->NewNode(
+                  simplified()->CheckMaps(CheckMapsFlag::kNone,
+                                          ZoneHandleSet<Map>(field_map)),
+                  value, effect, control);
+            }
+            field_access.write_barrier_kind = kPointerWriteBarrier;
+
+          } else {
+            DCHECK_EQ(MachineRepresentation::kTagged, field_representation);
+          }
           break;
         case MachineRepresentation::kNone:
         case MachineRepresentation::kBit:
@@ -1181,6 +1628,9 @@
         case MachineRepresentation::kWord64:
         case MachineRepresentation::kFloat32:
         case MachineRepresentation::kSimd128:
+        case MachineRepresentation::kSimd1x4:
+        case MachineRepresentation::kSimd1x8:
+        case MachineRepresentation::kSimd1x16:
           UNREACHABLE();
           break;
       }
@@ -1202,7 +1652,8 @@
   } else {
     DCHECK(access_info.IsGeneric());
     DCHECK_EQ(AccessMode::kStore, access_mode);
-    DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
+    DCHECK(vector->IsStoreIC(slot));
+    DCHECK_EQ(vector->GetLanguageMode(slot), language_mode);
     Callable callable =
         CodeFactory::StoreICInOptimizedCode(isolate(), language_mode);
     const CallInterfaceDescriptor& descriptor = callable.descriptor();
@@ -1226,6 +1677,85 @@
   return ValueEffectControl(value, effect, control);
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
+    Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreDataPropertyInLiteral, node->opcode());
+
+  // If deoptimization is disabled, we cannot optimize.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+  DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+
+  if (!p.feedback().IsValid()) return NoChange();
+
+  StoreDataPropertyInLiteralICNexus nexus(p.feedback().vector(),
+                                          p.feedback().slot());
+  if (nexus.IsUninitialized()) {
+    return NoChange();
+  }
+
+  if (nexus.ic_state() == MEGAMORPHIC) {
+    return NoChange();
+  }
+
+  DCHECK_EQ(MONOMORPHIC, nexus.ic_state());
+
+  Map* map = nexus.FindFirstMap();
+  if (map == nullptr) {
+    // Maps are weakly held in the type feedback vector, we may not have one.
+    return NoChange();
+  }
+
+  Handle<Map> receiver_map(map, isolate());
+  Handle<Name> cached_name =
+      handle(Name::cast(nexus.GetFeedbackExtra()), isolate());
+
+  PropertyAccessInfo access_info;
+  AccessInfoFactory access_info_factory(dependencies(), native_context(),
+                                        graph()->zone());
+  if (!access_info_factory.ComputePropertyAccessInfo(
+          receiver_map, cached_name, AccessMode::kStoreInLiteral,
+          &access_info)) {
+    return NoChange();
+  }
+
+  if (access_info.IsGeneric()) {
+    return NoChange();
+  }
+
+  Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Monomorphic property access.
+  receiver = BuildCheckHeapObject(receiver, &effect, control);
+
+  effect =
+      BuildCheckMaps(receiver, effect, control, access_info.receiver_maps());
+
+  // Ensure that {name} matches the cached name.
+  Node* name = NodeProperties::GetValueInput(node, 1);
+  Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name,
+                                 jsgraph()->HeapConstant(cached_name));
+  effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+
+  Node* value = NodeProperties::GetValueInput(node, 2);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
+
+  // Generate the actual property access.
+  ValueEffectControl continuation = BuildPropertyAccess(
+      receiver, value, context, frame_state_lazy, effect, control, cached_name,
+      access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY,
+      p.feedback().vector(), p.feedback().slot());
+  value = continuation.value();
+  effect = continuation.effect();
+  control = continuation.control();
+
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
 namespace {
 
 ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
@@ -1249,42 +1779,79 @@
     Node* receiver, Node* index, Node* value, Node* effect, Node* control,
     ElementAccessInfo const& access_info, AccessMode access_mode,
     KeyedAccessStoreMode store_mode) {
+  DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
+
   // TODO(bmeurer): We currently specialize based on elements kind. We should
   // also be able to properly support strings and other JSObjects here.
   ElementsKind elements_kind = access_info.elements_kind();
   MapList const& receiver_maps = access_info.receiver_maps();
 
-  // Load the elements for the {receiver}.
-  Node* elements = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
-      effect, control);
-
-  // Don't try to store to a copy-on-write backing store.
-  if (access_mode == AccessMode::kStore &&
-      IsFastSmiOrObjectElementsKind(elements_kind) &&
-      store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
-    effect =
-        graph()->NewNode(simplified()->CheckMaps(1), elements,
-                         jsgraph()->FixedArrayMapConstant(), effect, control);
-  }
-
   if (IsFixedTypedArrayElementsKind(elements_kind)) {
-    // Load the {receiver}s length.
-    Node* length = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
-        receiver, effect, control);
+    Node* buffer;
+    Node* length;
+    Node* base_pointer;
+    Node* external_pointer;
 
-    // Check if the {receiver}s buffer was neutered.
-    Node* buffer = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
-        receiver, effect, control);
-    Node* check = effect = graph()->NewNode(
-        simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+    // Check if we can constant-fold information about the {receiver} (i.e.
+    // for asm.js-like code patterns).
+    HeapObjectMatcher m(receiver);
+    if (m.HasValue() && m.Value()->IsJSTypedArray()) {
+      Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(m.Value());
 
-    // Default to zero if the {receiver}s buffer was neutered.
-    length = graph()->NewNode(
-        common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
-        check, jsgraph()->ZeroConstant(), length);
+      // Determine the {receiver}s (known) length.
+      length = jsgraph()->Constant(typed_array->length_value());
+
+      // Check if the {receiver}s buffer was neutered.
+      buffer = jsgraph()->HeapConstant(typed_array->GetBuffer());
+
+      // Load the (known) base and external pointer for the {receiver}. The
+      // {external_pointer} might be invalid if the {buffer} was neutered, so
+      // we need to make sure that any access is properly guarded.
+      base_pointer = jsgraph()->ZeroConstant();
+      external_pointer = jsgraph()->PointerConstant(
+          FixedTypedArrayBase::cast(typed_array->elements())
+              ->external_pointer());
+    } else {
+      // Load the {receiver}s length.
+      length = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
+          receiver, effect, control);
+
+      // Load the buffer for the {receiver}.
+      buffer = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+          receiver, effect, control);
+
+      // Load the elements for the {receiver}.
+      Node* elements = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+          receiver, effect, control);
+
+      // Load the base and external pointer for the {receiver}s {elements}.
+      base_pointer = effect = graph()->NewNode(
+          simplified()->LoadField(
+              AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+          elements, effect, control);
+      external_pointer = effect = graph()->NewNode(
+          simplified()->LoadField(
+              AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
+          elements, effect, control);
+    }
+
+    // See if we can skip the neutering check.
+    if (isolate()->IsArrayBufferNeuteringIntact()) {
+      // Add a code dependency so we are deoptimized in case an ArrayBuffer
+      // gets neutered.
+      dependencies()->AssumePropertyCell(
+          factory()->array_buffer_neutering_protector());
+    } else {
+      // Default to zero if the {receiver}s buffer was neutered.
+      Node* check = effect = graph()->NewNode(
+          simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+      length = graph()->NewNode(
+          common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+          check, jsgraph()->ZeroConstant(), length);
+    }
 
     if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
       // Check that the {index} is a valid array index, we do the actual
@@ -1295,21 +1862,10 @@
                                         effect, control);
     } else {
       // Check that the {index} is in the valid range for the {receiver}.
-      DCHECK_EQ(STANDARD_STORE, store_mode);
       index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
                                         length, effect, control);
     }
 
-    // Load the base and external pointer for the {receiver}.
-    Node* base_pointer = effect = graph()->NewNode(
-        simplified()->LoadField(
-            AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
-        elements, effect, control);
-    Node* external_pointer = effect = graph()->NewNode(
-        simplified()->LoadField(
-            AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
-        elements, effect, control);
-
     // Access the actual element.
     ExternalArrayType external_array_type =
         GetArrayTypeFromElementsKind(elements_kind);
@@ -1320,6 +1876,9 @@
             base_pointer, external_pointer, index, effect, control);
         break;
       }
+      case AccessMode::kStoreInLiteral:
+        UNREACHABLE();
+        break;
       case AccessMode::kStore: {
         // Ensure that the {value} is actually a Number.
         value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
@@ -1360,7 +1919,6 @@
               graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
         } else {
           // Perform the actual store
-          DCHECK_EQ(STANDARD_STORE, store_mode);
           effect = graph()->NewNode(
               simplified()->StoreTypedElement(external_array_type), buffer,
               base_pointer, external_pointer, index, value, effect, control);
@@ -1369,6 +1927,22 @@
       }
     }
   } else {
+    // Load the elements for the {receiver}.
+    Node* elements = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+        effect, control);
+
+    // Don't try to store to a copy-on-write backing store.
+    if (access_mode == AccessMode::kStore &&
+        IsFastSmiOrObjectElementsKind(elements_kind) &&
+        store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+      effect = graph()->NewNode(
+          simplified()->CheckMaps(
+              CheckMapsFlag::kNone,
+              ZoneHandleSet<Map>(factory()->fixed_array_map())),
+          elements, effect, control);
+    }
+
     // Check if the {receiver} is a JSArray.
     bool receiver_is_jsarray = HasOnlyJSArrayMaps(receiver_maps);
 
@@ -1500,25 +2074,25 @@
 
 JSNativeContextSpecialization::ValueEffectControl
 JSNativeContextSpecialization::InlineApiCall(
-    Node* receiver, Node* context, Node* target, Node* frame_state,
-    ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
-    Handle<SharedFunctionInfo> shared_info,
+    Node* receiver, Node* context, Node* target, Node* frame_state, Node* value,
+    Node* effect, Node* control, Handle<SharedFunctionInfo> shared_info,
     Handle<FunctionTemplateInfo> function_template_info) {
   Handle<CallHandlerInfo> call_handler_info = handle(
       CallHandlerInfo::cast(function_template_info->call_code()), isolate());
   Handle<Object> call_data_object(call_handler_info->data(), isolate());
 
+  // Only setters have a value.
+  int const argc = value == nullptr ? 0 : 1;
   // The stub always expects the receiver as the first param on the stack.
   CallApiCallbackStub stub(
-      isolate(), static_cast<int>(stack_parameters->size()),
-      call_data_object->IsUndefined(isolate()),
-      true /* TODO(epertoso): similar to CallOptimization */);
+      isolate(), argc, call_data_object->IsUndefined(isolate()),
+      true /* FunctionTemplateInfo doesn't have an associated context. */);
   CallInterfaceDescriptor call_interface_descriptor =
       stub.GetCallInterfaceDescriptor();
   CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
       isolate(), graph()->zone(), call_interface_descriptor,
-      call_interface_descriptor.GetStackParameterCount() +
-          static_cast<int>(stack_parameters->size()) + 1,
+      call_interface_descriptor.GetStackParameterCount() + argc +
+          1 /* implicit receiver */,
       CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
       MachineType::AnyTagged(), 1);
 
@@ -1529,42 +2103,62 @@
           &function, ExternalReference::DIRECT_API_CALL, isolate())));
   Node* code = jsgraph()->HeapConstant(stub.GetCode());
 
-  ZoneVector<Node*> inputs(zone());
-  inputs.push_back(code);
-
-  // CallApiCallbackStub's register arguments.
-  inputs.push_back(target);
-  inputs.push_back(data);
-  inputs.push_back(receiver);
-  inputs.push_back(function_reference);
-
-  // Stack parameters: CallApiCallbackStub expects the first one to be the
-  // receiver.
-  inputs.push_back(receiver);
-  for (Node* node : *stack_parameters) {
-    inputs.push_back(node);
+  // Add CallApiCallbackStub's register argument as well.
+  Node* inputs[11] = {
+      code, target, data, receiver /* holder */, function_reference, receiver};
+  int index = 6 + argc;
+  inputs[index++] = context;
+  inputs[index++] = frame_state;
+  inputs[index++] = effect;
+  inputs[index++] = control;
+  // This needs to stay here because of the edge case described in
+  // http://crbug.com/675648.
+  if (value != nullptr) {
+    inputs[6] = value;
   }
-  inputs.push_back(context);
-  inputs.push_back(frame_state);
-  inputs.push_back(effect);
-  inputs.push_back(control);
 
   Node* effect0;
   Node* value0 = effect0 =
-      graph()->NewNode(common()->Call(call_descriptor),
-                       static_cast<int>(inputs.size()), inputs.data());
+      graph()->NewNode(common()->Call(call_descriptor), index, inputs);
   Node* control0 = graph()->NewNode(common()->IfSuccess(), value0);
   return ValueEffectControl(value0, effect0, control0);
 }
 
+Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
+                                                          Node** effect,
+                                                          Node* control) {
+  switch (receiver->opcode()) {
+    case IrOpcode::kHeapConstant:
+    case IrOpcode::kJSCreate:
+    case IrOpcode::kJSCreateArguments:
+    case IrOpcode::kJSCreateArray:
+    case IrOpcode::kJSCreateClosure:
+    case IrOpcode::kJSCreateIterResultObject:
+    case IrOpcode::kJSCreateLiteralArray:
+    case IrOpcode::kJSCreateLiteralObject:
+    case IrOpcode::kJSCreateLiteralRegExp:
+    case IrOpcode::kJSConvertReceiver:
+    case IrOpcode::kJSToName:
+    case IrOpcode::kJSToString:
+    case IrOpcode::kJSToObject:
+    case IrOpcode::kJSTypeOf: {
+      return receiver;
+    }
+    default: {
+      return *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+                                        receiver, *effect, control);
+    }
+  }
+}
+
 Node* JSNativeContextSpecialization::BuildCheckMaps(
     Node* receiver, Node* effect, Node* control,
-    std::vector<Handle<Map>> const& maps) {
+    std::vector<Handle<Map>> const& receiver_maps) {
   HeapObjectMatcher m(receiver);
   if (m.HasValue()) {
     Handle<Map> receiver_map(m.Value()->map(), isolate());
     if (receiver_map->is_stable()) {
-      for (Handle<Map> map : maps) {
+      for (Handle<Map> map : receiver_maps) {
         if (map.is_identical_to(receiver_map)) {
           dependencies()->AssumeMapStable(receiver_map);
           return effect;
@@ -1572,17 +2166,16 @@
       }
     }
   }
-  int const map_input_count = static_cast<int>(maps.size());
-  int const input_count = 1 + map_input_count + 1 + 1;
-  Node** inputs = zone()->NewArray<Node*>(input_count);
-  inputs[0] = receiver;
-  for (int i = 0; i < map_input_count; ++i) {
-    inputs[1 + i] = jsgraph()->HeapConstant(maps[i]);
+  ZoneHandleSet<Map> maps;
+  CheckMapsFlags flags = CheckMapsFlag::kNone;
+  for (Handle<Map> map : receiver_maps) {
+    maps.insert(map, graph()->zone());
+    if (map->is_migration_target()) {
+      flags |= CheckMapsFlag::kTryMigrateInstance;
+    }
   }
-  inputs[input_count - 2] = effect;
-  inputs[input_count - 1] = control;
-  return graph()->NewNode(simplified()->CheckMaps(map_input_count), input_count,
-                          inputs);
+  return graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
+                          effect, control);
 }
 
 void JSNativeContextSpecialization::AssumePrototypesStable(
@@ -1640,15 +2233,14 @@
     MapHandleList* receiver_maps) {
   DCHECK_EQ(0, receiver_maps->length());
   // See if we can infer a concrete type for the {receiver}.
-  Handle<Map> receiver_map;
-  if (InferReceiverMap(receiver, effect).ToHandle(&receiver_map)) {
-    // We can assume that the {receiver} still has the infered {receiver_map}.
-    receiver_maps->Add(receiver_map);
+  if (InferReceiverMaps(receiver, effect, receiver_maps)) {
+    // We can assume that the {receiver} still has the infered {receiver_maps}.
     return true;
   }
   // Try to extract some maps from the {nexus}.
   if (nexus.ExtractMaps(receiver_maps) != 0) {
     // Try to filter impossible candidates based on infered root map.
+    Handle<Map> receiver_map;
     if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
       for (int i = receiver_maps->length(); --i >= 0;) {
         if (receiver_maps->at(i)->FindRootMap() != *receiver_map) {
@@ -1661,38 +2253,28 @@
   return false;
 }
 
-MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverMap(Node* receiver,
-                                                                 Node* effect) {
-  HeapObjectMatcher m(receiver);
-  if (m.HasValue()) {
-    Handle<Map> receiver_map(m.Value()->map(), isolate());
-    if (receiver_map->is_stable()) return receiver_map;
-  } else if (m.IsJSCreate()) {
-    HeapObjectMatcher mtarget(m.InputAt(0));
-    HeapObjectMatcher mnewtarget(m.InputAt(1));
-    if (mtarget.HasValue() && mnewtarget.HasValue()) {
-      Handle<JSFunction> constructor =
-          Handle<JSFunction>::cast(mtarget.Value());
-      if (constructor->has_initial_map()) {
-        Handle<Map> initial_map(constructor->initial_map(), isolate());
-        if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
-          // Walk up the {effect} chain to see if the {receiver} is the
-          // dominating effect and there's no other observable write in
-          // between.
-          while (true) {
-            if (receiver == effect) return initial_map;
-            if (!effect->op()->HasProperty(Operator::kNoWrite) ||
-                effect->op()->EffectInputCount() != 1) {
-              break;
-            }
-            effect = NodeProperties::GetEffectInput(effect);
-          }
-        }
-      }
+bool JSNativeContextSpecialization::InferReceiverMaps(
+    Node* receiver, Node* effect, MapHandleList* receiver_maps) {
+  ZoneHandleSet<Map> maps;
+  NodeProperties::InferReceiverMapsResult result =
+      NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+  if (result == NodeProperties::kReliableReceiverMaps) {
+    for (size_t i = 0; i < maps.size(); ++i) {
+      receiver_maps->Add(maps[i]);
     }
+    return true;
+  } else if (result == NodeProperties::kUnreliableReceiverMaps) {
+    // For untrusted receiver maps, we can still use the information
+    // if the maps are stable.
+    for (size_t i = 0; i < maps.size(); ++i) {
+      if (!maps[i]->is_stable()) return false;
+    }
+    for (size_t i = 0; i < maps.size(); ++i) {
+      receiver_maps->Add(maps[i]);
+    }
+    return true;
   }
-  // TODO(turbofan): Go hunting for CheckMaps(receiver) in the effect chain?
-  return MaybeHandle<Map>();
+  return false;
 }
 
 MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
@@ -1718,6 +2300,24 @@
   return MaybeHandle<Map>();
 }
 
+bool JSNativeContextSpecialization::LookupInScriptContextTable(
+    Handle<Name> name, ScriptContextTableLookupResult* result) {
+  if (!name->IsString()) return false;
+  Handle<ScriptContextTable> script_context_table(
+      global_object()->native_context()->script_context_table(), isolate());
+  ScriptContextTable::LookupResult lookup_result;
+  if (!ScriptContextTable::Lookup(script_context_table,
+                                  Handle<String>::cast(name), &lookup_result)) {
+    return false;
+  }
+  Handle<Context> script_context = ScriptContextTable::GetContext(
+      script_context_table, lookup_result.context_index);
+  result->context = script_context;
+  result->immutable = lookup_result.mode == CONST;
+  result->index = lookup_result.slot_index;
+  return true;
+}
+
 Graph* JSNativeContextSpecialization::graph() const {
   return jsgraph()->graph();
 }
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 2d07061..249c52d 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -8,7 +8,7 @@
 #include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/deoptimize-reason.h"
-#include "src/type-feedback-vector.h"
+#include "src/feedback-vector.h"
 
 namespace v8 {
 namespace internal {
@@ -53,12 +53,19 @@
   Reduction Reduce(Node* node) final;
 
  private:
+  Reduction ReduceJSAdd(Node* node);
+  Reduction ReduceJSGetSuperConstructor(Node* node);
   Reduction ReduceJSInstanceOf(Node* node);
+  Reduction ReduceJSOrdinaryHasInstance(Node* node);
   Reduction ReduceJSLoadContext(Node* node);
+  Reduction ReduceJSLoadGlobal(Node* node);
+  Reduction ReduceJSStoreGlobal(Node* node);
   Reduction ReduceJSLoadNamed(Node* node);
   Reduction ReduceJSStoreNamed(Node* node);
   Reduction ReduceJSLoadProperty(Node* node);
   Reduction ReduceJSStoreProperty(Node* node);
+  Reduction ReduceJSStoreNamedOwn(Node* node);
+  Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
 
   Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
                                 MapHandleList const& receiver_maps,
@@ -79,8 +86,11 @@
                               MapHandleList const& receiver_maps,
                               Handle<Name> name, AccessMode access_mode,
                               LanguageMode language_mode,
-                              Handle<TypeFeedbackVector> vector,
-                              FeedbackVectorSlot slot, Node* index = nullptr);
+                              Handle<FeedbackVector> vector, FeedbackSlot slot,
+                              Node* index = nullptr);
+  Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
+                               Handle<Name> name, AccessMode access_mode,
+                               Node* index = nullptr);
 
   Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
 
@@ -105,8 +115,8 @@
       Node* receiver, Node* value, Node* context, Node* frame_state,
       Node* effect, Node* control, Handle<Name> name,
       PropertyAccessInfo const& access_info, AccessMode access_mode,
-      LanguageMode language_mode, Handle<TypeFeedbackVector> vector,
-      FeedbackVectorSlot slot);
+      LanguageMode language_mode, Handle<FeedbackVector> vector,
+      FeedbackSlot slot);
 
   // Construct the appropriate subgraph for element access.
   ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
@@ -116,6 +126,9 @@
                                         AccessMode access_mode,
                                         KeyedAccessStoreMode store_mode);
 
+  // Construct an appropriate heap object check.
+  Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
+
   // Construct an appropriate map check.
   Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
                        std::vector<Handle<Map>> const& maps);
@@ -136,20 +149,27 @@
                            FeedbackNexus const& nexus,
                            MapHandleList* receiver_maps);
 
-  // Try to infer a map for the given {receiver} at the current {effect}.
-  // If a map is returned then you can be sure that the {receiver} definitely
-  // has the returned map at this point in the program (identified by {effect}).
-  MaybeHandle<Map> InferReceiverMap(Node* receiver, Node* effect);
+  // Try to infer maps for the given {receiver} at the current {effect}.
+  // If maps are returned then you can be sure that the {receiver} definitely
+  // has one of the returned maps at this point in the program (identified
+  // by {effect}).
+  bool InferReceiverMaps(Node* receiver, Node* effect,
+                         MapHandleList* receiver_maps);
   // Try to infer a root map for the {receiver} independent of the current
   // program location.
   MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
 
   ValueEffectControl InlineApiCall(
       Node* receiver, Node* context, Node* target, Node* frame_state,
-      ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
+      Node* parameter, Node* effect, Node* control,
       Handle<SharedFunctionInfo> shared_info,
       Handle<FunctionTemplateInfo> function_template_info);
 
+  // Script context lookup logic.
+  struct ScriptContextTableLookupResult;
+  bool LookupInScriptContextTable(Handle<Name> name,
+                                  ScriptContextTableLookupResult* result);
+
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
@@ -159,12 +179,16 @@
   SimplifiedOperatorBuilder* simplified() const;
   MachineOperatorBuilder* machine() const;
   Flags flags() const { return flags_; }
+  Handle<JSGlobalObject> global_object() const { return global_object_; }
+  Handle<JSGlobalProxy> global_proxy() const { return global_proxy_; }
   Handle<Context> native_context() const { return native_context_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
   Zone* zone() const { return zone_; }
 
   JSGraph* const jsgraph_;
   Flags const flags_;
+  Handle<JSGlobalObject> global_object_;
+  Handle<JSGlobalProxy> global_proxy_;
   Handle<Context> native_context_;
   CompilationDependencies* const dependencies_;
   Zone* const zone_;
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index f64630c..a8f5692 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -9,8 +9,9 @@
 #include "src/base/lazy-instance.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
+#include "src/feedback-vector.h"
 #include "src/handles-inl.h"
-#include "src/type-feedback-vector.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -20,7 +21,7 @@
 
 
 int VectorSlotPair::index() const {
-  return vector_.is_null() ? -1 : vector_->GetIndex(slot_);
+  return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_);
 }
 
 
@@ -51,48 +52,99 @@
   return OpParameter<ToBooleanHints>(op);
 }
 
-
-bool operator==(CallConstructParameters const& lhs,
-                CallConstructParameters const& rhs) {
+bool operator==(ConstructParameters const& lhs,
+                ConstructParameters const& rhs) {
   return lhs.arity() == rhs.arity() && lhs.frequency() == rhs.frequency() &&
          lhs.feedback() == rhs.feedback();
 }
 
-
-bool operator!=(CallConstructParameters const& lhs,
-                CallConstructParameters const& rhs) {
+bool operator!=(ConstructParameters const& lhs,
+                ConstructParameters const& rhs) {
   return !(lhs == rhs);
 }
 
-
-size_t hash_value(CallConstructParameters const& p) {
+size_t hash_value(ConstructParameters const& p) {
   return base::hash_combine(p.arity(), p.frequency(), p.feedback());
 }
 
-
-std::ostream& operator<<(std::ostream& os, CallConstructParameters const& p) {
+std::ostream& operator<<(std::ostream& os, ConstructParameters const& p) {
   return os << p.arity() << ", " << p.frequency();
 }
 
-
-CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
-  DCHECK_EQ(IrOpcode::kJSCallConstruct, op->opcode());
-  return OpParameter<CallConstructParameters>(op);
+ConstructParameters const& ConstructParametersOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSConstruct, op->opcode());
+  return OpParameter<ConstructParameters>(op);
 }
 
+bool operator==(ConstructWithSpreadParameters const& lhs,
+                ConstructWithSpreadParameters const& rhs) {
+  return lhs.arity() == rhs.arity();
+}
 
-std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
+bool operator!=(ConstructWithSpreadParameters const& lhs,
+                ConstructWithSpreadParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(ConstructWithSpreadParameters const& p) {
+  return base::hash_combine(p.arity());
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         ConstructWithSpreadParameters const& p) {
+  return os << p.arity();
+}
+
+ConstructWithSpreadParameters const& ConstructWithSpreadParametersOf(
+    Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSConstructWithSpread, op->opcode());
+  return OpParameter<ConstructWithSpreadParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os, CallParameters const& p) {
   os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode() << ", "
      << p.tail_call_mode();
   return os;
 }
 
-
-const CallFunctionParameters& CallFunctionParametersOf(const Operator* op) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, op->opcode());
-  return OpParameter<CallFunctionParameters>(op);
+const CallParameters& CallParametersOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kJSCall, op->opcode());
+  return OpParameter<CallParameters>(op);
 }
 
+std::ostream& operator<<(std::ostream& os,
+                         CallForwardVarargsParameters const& p) {
+  return os << p.start_index() << ", " << p.tail_call_mode();
+}
+
+CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
+    Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSCallForwardVarargs, op->opcode());
+  return OpParameter<CallForwardVarargsParameters>(op);
+}
+
+bool operator==(CallWithSpreadParameters const& lhs,
+                CallWithSpreadParameters const& rhs) {
+  return lhs.arity() == rhs.arity();
+}
+
+bool operator!=(CallWithSpreadParameters const& lhs,
+                CallWithSpreadParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(CallWithSpreadParameters const& p) {
+  return base::hash_combine(p.arity());
+}
+
+std::ostream& operator<<(std::ostream& os, CallWithSpreadParameters const& p) {
+  return os << p.arity();
+}
+
+CallWithSpreadParameters const& CallWithSpreadParametersOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSCallWithSpread, op->opcode());
+  return OpParameter<CallWithSpreadParameters>(op);
+}
 
 bool operator==(CallRuntimeParameters const& lhs,
                 CallRuntimeParameters const& rhs) {
@@ -191,6 +243,84 @@
   return OpParameter<CreateCatchContextParameters>(op);
 }
 
+CreateFunctionContextParameters::CreateFunctionContextParameters(
+    int slot_count, ScopeType scope_type)
+    : slot_count_(slot_count), scope_type_(scope_type) {}
+
+bool operator==(CreateFunctionContextParameters const& lhs,
+                CreateFunctionContextParameters const& rhs) {
+  return lhs.slot_count() == rhs.slot_count() &&
+         lhs.scope_type() == rhs.scope_type();
+}
+
+bool operator!=(CreateFunctionContextParameters const& lhs,
+                CreateFunctionContextParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(CreateFunctionContextParameters const& parameters) {
+  return base::hash_combine(parameters.slot_count(),
+                            static_cast<int>(parameters.scope_type()));
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         CreateFunctionContextParameters const& parameters) {
+  return os << parameters.slot_count() << ", " << parameters.scope_type();
+}
+
+CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
+    Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, op->opcode());
+  return OpParameter<CreateFunctionContextParameters>(op);
+}
+
+bool operator==(StoreNamedOwnParameters const& lhs,
+                StoreNamedOwnParameters const& rhs) {
+  return lhs.name().location() == rhs.name().location() &&
+         lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(StoreNamedOwnParameters const& lhs,
+                StoreNamedOwnParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(StoreNamedOwnParameters const& p) {
+  return base::hash_combine(p.name().location(), p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, StoreNamedOwnParameters const& p) {
+  return os << Brief(*p.name());
+}
+
+StoreNamedOwnParameters const& StoreNamedOwnParametersOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, op->opcode());
+  return OpParameter<StoreNamedOwnParameters>(op);
+}
+
+bool operator==(DataPropertyParameters const& lhs,
+                DataPropertyParameters const& rhs) {
+  return lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(DataPropertyParameters const& lhs,
+                DataPropertyParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(DataPropertyParameters const& p) {
+  return base::hash_combine(p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, DataPropertyParameters const& p) {
+  return os;
+}
+
+DataPropertyParameters const& DataPropertyParametersOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
+  return OpParameter<DataPropertyParameters>(op);
+}
+
 bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
   return lhs.name().location() == rhs.name().location() &&
          lhs.language_mode() == rhs.language_mode() &&
@@ -350,6 +480,7 @@
 bool operator==(CreateClosureParameters const& lhs,
                 CreateClosureParameters const& rhs) {
   return lhs.pretenure() == rhs.pretenure() &&
+         lhs.feedback() == rhs.feedback() &&
          lhs.shared_info().location() == rhs.shared_info().location();
 }
 
@@ -361,7 +492,8 @@
 
 
 size_t hash_value(CreateClosureParameters const& p) {
-  return base::hash_combine(p.pretenure(), p.shared_info().location());
+  return base::hash_combine(p.pretenure(), p.shared_info().location(),
+                            p.feedback());
 }
 
 
@@ -410,17 +542,7 @@
 }
 
 BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
-  DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
-         op->opcode() == IrOpcode::kJSBitwiseXor ||
-         op->opcode() == IrOpcode::kJSBitwiseAnd ||
-         op->opcode() == IrOpcode::kJSShiftLeft ||
-         op->opcode() == IrOpcode::kJSShiftRight ||
-         op->opcode() == IrOpcode::kJSShiftRightLogical ||
-         op->opcode() == IrOpcode::kJSAdd ||
-         op->opcode() == IrOpcode::kJSSubtract ||
-         op->opcode() == IrOpcode::kJSMultiply ||
-         op->opcode() == IrOpcode::kJSDivide ||
-         op->opcode() == IrOpcode::kJSModulus);
+  DCHECK_EQ(IrOpcode::kJSAdd, op->opcode());
   return OpParameter<BinaryOperationHint>(op);
 }
 
@@ -436,39 +558,41 @@
   return OpParameter<CompareOperationHint>(op);
 }
 
-#define CACHED_OP_LIST(V)                                   \
-  V(ToInteger, Operator::kNoProperties, 1, 1)               \
-  V(ToLength, Operator::kNoProperties, 1, 1)                \
-  V(ToName, Operator::kNoProperties, 1, 1)                  \
-  V(ToNumber, Operator::kNoProperties, 1, 1)                \
-  V(ToObject, Operator::kFoldable, 1, 1)                    \
-  V(ToString, Operator::kNoProperties, 1, 1)                \
-  V(Create, Operator::kEliminatable, 2, 1)                  \
-  V(CreateIterResultObject, Operator::kEliminatable, 2, 1)  \
-  V(CreateKeyValueArray, Operator::kEliminatable, 2, 1)     \
-  V(HasProperty, Operator::kNoProperties, 2, 1)             \
-  V(TypeOf, Operator::kPure, 1, 1)                          \
-  V(InstanceOf, Operator::kNoProperties, 2, 1)              \
-  V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1)     \
-  V(ForInNext, Operator::kNoProperties, 4, 1)               \
-  V(ForInPrepare, Operator::kNoProperties, 1, 3)            \
-  V(LoadMessage, Operator::kNoThrow, 0, 1)                  \
-  V(StoreMessage, Operator::kNoThrow, 1, 0)                 \
-  V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
-  V(StackCheck, Operator::kNoWrite, 0, 0)
+#define CACHED_OP_LIST(V)                                       \
+  V(BitwiseOr, Operator::kNoProperties, 2, 1)                   \
+  V(BitwiseXor, Operator::kNoProperties, 2, 1)                  \
+  V(BitwiseAnd, Operator::kNoProperties, 2, 1)                  \
+  V(ShiftLeft, Operator::kNoProperties, 2, 1)                   \
+  V(ShiftRight, Operator::kNoProperties, 2, 1)                  \
+  V(ShiftRightLogical, Operator::kNoProperties, 2, 1)           \
+  V(Subtract, Operator::kNoProperties, 2, 1)                    \
+  V(Multiply, Operator::kNoProperties, 2, 1)                    \
+  V(Divide, Operator::kNoProperties, 2, 1)                      \
+  V(Modulus, Operator::kNoProperties, 2, 1)                     \
+  V(ToInteger, Operator::kNoProperties, 1, 1)                   \
+  V(ToLength, Operator::kNoProperties, 1, 1)                    \
+  V(ToName, Operator::kNoProperties, 1, 1)                      \
+  V(ToNumber, Operator::kNoProperties, 1, 1)                    \
+  V(ToObject, Operator::kFoldable, 1, 1)                        \
+  V(ToString, Operator::kNoProperties, 1, 1)                    \
+  V(Create, Operator::kNoProperties, 2, 1)                      \
+  V(CreateIterResultObject, Operator::kEliminatable, 2, 1)      \
+  V(CreateKeyValueArray, Operator::kEliminatable, 2, 1)         \
+  V(HasProperty, Operator::kNoProperties, 2, 1)                 \
+  V(ClassOf, Operator::kPure, 1, 1)                             \
+  V(TypeOf, Operator::kPure, 1, 1)                              \
+  V(InstanceOf, Operator::kNoProperties, 2, 1)                  \
+  V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1)         \
+  V(ForInNext, Operator::kNoProperties, 4, 1)                   \
+  V(ForInPrepare, Operator::kNoProperties, 1, 3)                \
+  V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
+  V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
+  V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1)     \
+  V(StackCheck, Operator::kNoWrite, 0, 0)                       \
+  V(Debugger, Operator::kNoProperties, 0, 0)                    \
+  V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
 
-#define BINARY_OP_LIST(V) \
-  V(BitwiseOr)            \
-  V(BitwiseXor)           \
-  V(BitwiseAnd)           \
-  V(ShiftLeft)            \
-  V(ShiftRight)           \
-  V(ShiftRightLogical)    \
-  V(Add)                  \
-  V(Subtract)             \
-  V(Multiply)             \
-  V(Divide)               \
-  V(Modulus)
+#define BINARY_OP_LIST(V) V(Add)
 
 #define COMPARE_OP_LIST(V)                    \
   V(Equal, Operator::kNoProperties)           \
@@ -513,20 +637,24 @@
   BINARY_OP_LIST(BINARY_OP)
 #undef BINARY_OP
 
-#define COMPARE_OP(Name, properties)                                      \
-  template <CompareOperationHint kHint>                                   \
-  struct Name##Operator final : public Operator1<CompareOperationHint> {  \
-    Name##Operator()                                                      \
-        : Operator1<CompareOperationHint>(                                \
-              IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1, \
-              Operator::ZeroIfNoThrow(properties), kHint) {}              \
-  };                                                                      \
-  Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator;      \
-  Name##Operator<CompareOperationHint::kSignedSmall>                      \
-      k##Name##SignedSmallOperator;                                       \
-  Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator;  \
-  Name##Operator<CompareOperationHint::kNumberOrOddball>                  \
-      k##Name##NumberOrOddballOperator;                                   \
+#define COMPARE_OP(Name, properties)                                         \
+  template <CompareOperationHint kHint>                                      \
+  struct Name##Operator final : public Operator1<CompareOperationHint> {     \
+    Name##Operator()                                                         \
+        : Operator1<CompareOperationHint>(                                   \
+              IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1,    \
+              Operator::ZeroIfNoThrow(properties), kHint) {}                 \
+  };                                                                         \
+  Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator;         \
+  Name##Operator<CompareOperationHint::kSignedSmall>                         \
+      k##Name##SignedSmallOperator;                                          \
+  Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator;     \
+  Name##Operator<CompareOperationHint::kNumberOrOddball>                     \
+      k##Name##NumberOrOddballOperator;                                      \
+  Name##Operator<CompareOperationHint::kInternalizedString>                  \
+      k##Name##InternalizedStringOperator;                                   \
+  Name##Operator<CompareOperationHint::kString> k##Name##StringOperator;     \
+  Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
   Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
   COMPARE_OP_LIST(COMPARE_OP)
 #undef COMPARE_OP
@@ -578,6 +706,12 @@
         return &cache_.k##Name##NumberOperator;                        \
       case CompareOperationHint::kNumberOrOddball:                     \
         return &cache_.k##Name##NumberOrOddballOperator;               \
+      case CompareOperationHint::kInternalizedString:                  \
+        return &cache_.k##Name##InternalizedStringOperator;            \
+      case CompareOperationHint::kString:                              \
+        return &cache_.k##Name##StringOperator;                        \
+      case CompareOperationHint::kReceiver:                            \
+        return &cache_.k##Name##ReceiverOperator;                      \
       case CompareOperationHint::kAny:                                 \
         return &cache_.k##Name##AnyOperator;                           \
     }                                                                  \
@@ -587,6 +721,17 @@
 COMPARE_OP_LIST(COMPARE_OP)
 #undef COMPARE_OP
 
+const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
+    const VectorSlotPair& feedback) {
+  DataPropertyParameters parameters(feedback);
+  return new (zone()) Operator1<DataPropertyParameters>(  // --
+      IrOpcode::kJSStoreDataPropertyInLiteral,
+      Operator::kNoThrow,              // opcode
+      "JSStoreDataPropertyInLiteral",  // name
+      4, 1, 1, 0, 1, 0,                // counts
+      parameters);                     // parameter
+}
+
 const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
   return new (zone()) Operator1<ToBooleanHints>(  //--
@@ -596,18 +741,37 @@
       hints);                                     // parameter
 }
 
-const Operator* JSOperatorBuilder::CallFunction(
-    size_t arity, float frequency, VectorSlotPair const& feedback,
-    ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
-  CallFunctionParameters parameters(arity, frequency, feedback, tail_call_mode,
-                                    convert_mode);
-  return new (zone()) Operator1<CallFunctionParameters>(   // --
-      IrOpcode::kJSCallFunction, Operator::kNoProperties,  // opcode
-      "JSCallFunction",                                    // name
-      parameters.arity(), 1, 1, 1, 1, 2,                   // inputs/outputs
-      parameters);                                         // parameter
+const Operator* JSOperatorBuilder::CallForwardVarargs(
+    uint32_t start_index, TailCallMode tail_call_mode) {
+  CallForwardVarargsParameters parameters(start_index, tail_call_mode);
+  return new (zone()) Operator1<CallForwardVarargsParameters>(   // --
+      IrOpcode::kJSCallForwardVarargs, Operator::kNoProperties,  // opcode
+      "JSCallForwardVarargs",                                    // name
+      2, 1, 1, 1, 1, 2,                                          // counts
+      parameters);                                               // parameter
 }
 
+const Operator* JSOperatorBuilder::Call(size_t arity, float frequency,
+                                        VectorSlotPair const& feedback,
+                                        ConvertReceiverMode convert_mode,
+                                        TailCallMode tail_call_mode) {
+  CallParameters parameters(arity, frequency, feedback, tail_call_mode,
+                            convert_mode);
+  return new (zone()) Operator1<CallParameters>(   // --
+      IrOpcode::kJSCall, Operator::kNoProperties,  // opcode
+      "JSCall",                                    // name
+      parameters.arity(), 1, 1, 1, 1, 2,           // inputs/outputs
+      parameters);                                 // parameter
+}
+
+const Operator* JSOperatorBuilder::CallWithSpread(uint32_t arity) {
+  CallWithSpreadParameters parameters(arity);
+  return new (zone()) Operator1<CallWithSpreadParameters>(   // --
+      IrOpcode::kJSCallWithSpread, Operator::kNoProperties,  // opcode
+      "JSCallWithSpread",                                    // name
+      parameters.arity(), 1, 1, 1, 1, 2,                     // counts
+      parameters);                                           // parameter
+}
 
 const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id) {
   const Runtime::Function* f = Runtime::FunctionForId(id);
@@ -633,16 +797,24 @@
       parameters);                                        // parameter
 }
 
-const Operator* JSOperatorBuilder::CallConstruct(
-    uint32_t arity, float frequency, VectorSlotPair const& feedback) {
-  CallConstructParameters parameters(arity, frequency, feedback);
-  return new (zone()) Operator1<CallConstructParameters>(   // --
-      IrOpcode::kJSCallConstruct, Operator::kNoProperties,  // opcode
-      "JSCallConstruct",                                    // name
-      parameters.arity(), 1, 1, 1, 1, 2,                    // counts
-      parameters);                                          // parameter
+const Operator* JSOperatorBuilder::Construct(uint32_t arity, float frequency,
+                                             VectorSlotPair const& feedback) {
+  ConstructParameters parameters(arity, frequency, feedback);
+  return new (zone()) Operator1<ConstructParameters>(   // --
+      IrOpcode::kJSConstruct, Operator::kNoProperties,  // opcode
+      "JSConstruct",                                    // name
+      parameters.arity(), 1, 1, 1, 1, 2,                // counts
+      parameters);                                      // parameter
 }
 
+const Operator* JSOperatorBuilder::ConstructWithSpread(uint32_t arity) {
+  ConstructWithSpreadParameters parameters(arity);
+  return new (zone()) Operator1<ConstructWithSpreadParameters>(   // --
+      IrOpcode::kJSConstructWithSpread, Operator::kNoProperties,  // opcode
+      "JSConstructWithSpread",                                    // name
+      parameters.arity(), 1, 1, 1, 1, 2,                          // counts
+      parameters);                                                // parameter
+}
 
 const Operator* JSOperatorBuilder::ConvertReceiver(
     ConvertReceiverMode convert_mode) {
@@ -659,7 +831,7 @@
   return new (zone()) Operator1<NamedAccess>(           // --
       IrOpcode::kJSLoadNamed, Operator::kNoProperties,  // opcode
       "JSLoadNamed",                                    // name
-      2, 1, 1, 1, 1, 2,                                 // counts
+      1, 1, 1, 1, 1, 2,                                 // counts
       access);                                          // parameter
 }
 
@@ -669,7 +841,7 @@
   return new (zone()) Operator1<PropertyAccess>(           // --
       IrOpcode::kJSLoadProperty, Operator::kNoProperties,  // opcode
       "JSLoadProperty",                                    // name
-      3, 1, 1, 1, 1, 2,                                    // counts
+      2, 1, 1, 1, 1, 2,                                    // counts
       access);                                             // parameter
 }
 
@@ -696,7 +868,7 @@
   return new (zone()) Operator1<NamedAccess>(            // --
       IrOpcode::kJSStoreNamed, Operator::kNoProperties,  // opcode
       "JSStoreNamed",                                    // name
-      3, 1, 1, 0, 1, 2,                                  // counts
+      2, 1, 1, 0, 1, 2,                                  // counts
       access);                                           // parameter
 }
 
@@ -707,10 +879,19 @@
   return new (zone()) Operator1<PropertyAccess>(            // --
       IrOpcode::kJSStoreProperty, Operator::kNoProperties,  // opcode
       "JSStoreProperty",                                    // name
-      4, 1, 1, 0, 1, 2,                                     // counts
+      3, 1, 1, 0, 1, 2,                                     // counts
       access);                                              // parameter
 }
 
+const Operator* JSOperatorBuilder::StoreNamedOwn(
+    Handle<Name> name, VectorSlotPair const& feedback) {
+  StoreNamedOwnParameters parameters(name, feedback);
+  return new (zone()) Operator1<StoreNamedOwnParameters>(   // --
+      IrOpcode::kJSStoreNamedOwn, Operator::kNoProperties,  // opcode
+      "JSStoreNamedOwn",                                    // name
+      2, 1, 1, 0, 1, 2,                                     // counts
+      parameters);                                          // parameter
+}
 
 const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
   return new (zone()) Operator1<LanguageMode>(               // --
@@ -728,7 +909,7 @@
   return new (zone()) Operator1<LoadGlobalParameters>(   // --
       IrOpcode::kJSLoadGlobal, Operator::kNoProperties,  // opcode
       "JSLoadGlobal",                                    // name
-      1, 1, 1, 1, 1, 2,                                  // counts
+      0, 1, 1, 1, 1, 2,                                  // counts
       parameters);                                       // parameter
 }
 
@@ -740,7 +921,7 @@
   return new (zone()) Operator1<StoreGlobalParameters>(   // --
       IrOpcode::kJSStoreGlobal, Operator::kNoProperties,  // opcode
       "JSStoreGlobal",                                    // name
-      2, 1, 1, 0, 1, 2,                                   // counts
+      1, 1, 1, 0, 1, 2,                                   // counts
       parameters);                                        // parameter
 }
 
@@ -752,7 +933,7 @@
       IrOpcode::kJSLoadContext,                  // opcode
       Operator::kNoWrite | Operator::kNoThrow,   // flags
       "JSLoadContext",                           // name
-      1, 1, 0, 1, 1, 0,                          // counts
+      0, 1, 0, 1, 1, 0,                          // counts
       access);                                   // parameter
 }
 
@@ -763,7 +944,7 @@
       IrOpcode::kJSStoreContext,                 // opcode
       Operator::kNoRead | Operator::kNoThrow,    // flags
       "JSStoreContext",                          // name
-      2, 1, 1, 0, 1, 0,                          // counts
+      1, 1, 1, 0, 1, 0,                          // counts
       access);                                   // parameter
 }
 
@@ -806,10 +987,10 @@
       parameters);                                        // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CreateClosure(
-    Handle<SharedFunctionInfo> shared_info, PretenureFlag pretenure) {
-  CreateClosureParameters parameters(shared_info, pretenure);
+    Handle<SharedFunctionInfo> shared_info, VectorSlotPair const& feedback,
+    PretenureFlag pretenure) {
+  CreateClosureParameters parameters(shared_info, feedback, pretenure);
   return new (zone()) Operator1<CreateClosureParameters>(  // --
       IrOpcode::kJSCreateClosure, Operator::kNoThrow,      // opcode
       "JSCreateClosure",                                   // name
@@ -818,8 +999,8 @@
 }
 
 const Operator* JSOperatorBuilder::CreateLiteralArray(
-    Handle<FixedArray> constant_elements, int literal_flags, int literal_index,
-    int number_of_elements) {
+    Handle<ConstantElementsPair> constant_elements, int literal_flags,
+    int literal_index, int number_of_elements) {
   CreateLiteralParameters parameters(constant_elements, number_of_elements,
                                      literal_flags, literal_index);
   return new (zone()) Operator1<CreateLiteralParameters>(        // --
@@ -830,7 +1011,7 @@
 }
 
 const Operator* JSOperatorBuilder::CreateLiteralObject(
-    Handle<FixedArray> constant_properties, int literal_flags,
+    Handle<BoilerplateDescription> constant_properties, int literal_flags,
     int literal_index, int number_of_properties) {
   CreateLiteralParameters parameters(constant_properties, number_of_properties,
                                      literal_flags, literal_index);
@@ -853,13 +1034,14 @@
       parameters);                                                // parameter
 }
 
-
-const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count) {
-  return new (zone()) Operator1<int>(                               // --
+const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count,
+                                                         ScopeType scope_type) {
+  CreateFunctionContextParameters parameters(slot_count, scope_type);
+  return new (zone()) Operator1<CreateFunctionContextParameters>(   // --
       IrOpcode::kJSCreateFunctionContext, Operator::kNoProperties,  // opcode
       "JSCreateFunctionContext",                                    // name
       1, 1, 1, 1, 1, 2,                                             // counts
-      slot_count);                                                  // parameter
+      parameters);                                                  // parameter
 }
 
 const Operator* JSOperatorBuilder::CreateCatchContext(
@@ -882,22 +1064,21 @@
 }
 
 const Operator* JSOperatorBuilder::CreateBlockContext(
-    const Handle<ScopeInfo>& scpope_info) {
+    const Handle<ScopeInfo>& scope_info) {
   return new (zone()) Operator1<Handle<ScopeInfo>>(              // --
       IrOpcode::kJSCreateBlockContext, Operator::kNoProperties,  // opcode
       "JSCreateBlockContext",                                    // name
       1, 1, 1, 1, 1, 2,                                          // counts
-      scpope_info);                                              // parameter
+      scope_info);                                               // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CreateScriptContext(
-    const Handle<ScopeInfo>& scpope_info) {
+    const Handle<ScopeInfo>& scope_info) {
   return new (zone()) Operator1<Handle<ScopeInfo>>(               // --
       IrOpcode::kJSCreateScriptContext, Operator::kNoProperties,  // opcode
       "JSCreateScriptContext",                                    // name
       1, 1, 1, 1, 1, 2,                                           // counts
-      scpope_info);                                               // parameter
+      scope_info);                                                // parameter
 }
 
 }  // namespace compiler
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 9cdd305..730b4b9 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -7,36 +7,43 @@
 
 #include "src/base/compiler-specific.h"
 #include "src/globals.h"
+#include "src/handles.h"
 #include "src/runtime/runtime.h"
 #include "src/type-hints.h"
 
 namespace v8 {
 namespace internal {
+
+class AllocationSite;
+class BoilerplateDescription;
+class ConstantElementsPair;
+class SharedFunctionInfo;
+class FeedbackVector;
+
 namespace compiler {
 
 // Forward declarations.
 class Operator;
 struct JSOperatorGlobalCache;
 
-
-// Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorSlot}, which
+// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
 // is used to access the type feedback for a certain {Node}.
 class V8_EXPORT_PRIVATE VectorSlotPair {
  public:
   VectorSlotPair();
-  VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+  VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot)
       : vector_(vector), slot_(slot) {}
 
   bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
 
-  Handle<TypeFeedbackVector> vector() const { return vector_; }
-  FeedbackVectorSlot slot() const { return slot_; }
+  Handle<FeedbackVector> vector() const { return vector_; }
+  FeedbackSlot slot() const { return slot_; }
 
   int index() const;
 
  private:
-  const Handle<TypeFeedbackVector> vector_;
-  const FeedbackVectorSlot slot_;
+  const Handle<FeedbackVector> vector_;
+  const FeedbackSlot slot_;
 };
 
 bool operator==(VectorSlotPair const&, VectorSlotPair const&);
@@ -54,11 +61,11 @@
 
 
 // Defines the arity and the feedback for a JavaScript constructor call. This is
-// used as a parameter by JSCallConstruct operators.
-class CallConstructParameters final {
+// used as a parameter by JSConstruct operators.
+class ConstructParameters final {
  public:
-  CallConstructParameters(uint32_t arity, float frequency,
-                          VectorSlotPair const& feedback)
+  ConstructParameters(uint32_t arity, float frequency,
+                      VectorSlotPair const& feedback)
       : arity_(arity), frequency_(frequency), feedback_(feedback) {}
 
   uint32_t arity() const { return arity_; }
@@ -71,24 +78,83 @@
   VectorSlotPair const feedback_;
 };
 
-bool operator==(CallConstructParameters const&, CallConstructParameters const&);
-bool operator!=(CallConstructParameters const&, CallConstructParameters const&);
+bool operator==(ConstructParameters const&, ConstructParameters const&);
+bool operator!=(ConstructParameters const&, ConstructParameters const&);
 
-size_t hash_value(CallConstructParameters const&);
+size_t hash_value(ConstructParameters const&);
 
-std::ostream& operator<<(std::ostream&, CallConstructParameters const&);
+std::ostream& operator<<(std::ostream&, ConstructParameters const&);
 
-CallConstructParameters const& CallConstructParametersOf(Operator const*);
+ConstructParameters const& ConstructParametersOf(Operator const*);
 
+// Defines the arity for a JavaScript constructor call with a spread as the last
+// parameters. This is used as a parameter by JSConstructWithSpread
+// operators.
+class ConstructWithSpreadParameters final {
+ public:
+  explicit ConstructWithSpreadParameters(uint32_t arity) : arity_(arity) {}
+
+  uint32_t arity() const { return arity_; }
+
+ private:
+  uint32_t const arity_;
+};
+
+bool operator==(ConstructWithSpreadParameters const&,
+                ConstructWithSpreadParameters const&);
+bool operator!=(ConstructWithSpreadParameters const&,
+                ConstructWithSpreadParameters const&);
+
+size_t hash_value(ConstructWithSpreadParameters const&);
+
+std::ostream& operator<<(std::ostream&, ConstructWithSpreadParameters const&);
+
+ConstructWithSpreadParameters const& ConstructWithSpreadParametersOf(
+    Operator const*);
+
+// Defines the flags for a JavaScript call forwarding parameters. This
+// is used as parameter by JSCallForwardVarargs operators.
+class CallForwardVarargsParameters final {
+ public:
+  CallForwardVarargsParameters(uint32_t start_index,
+                               TailCallMode tail_call_mode)
+      : bit_field_(StartIndexField::encode(start_index) |
+                   TailCallModeField::encode(tail_call_mode)) {}
+
+  uint32_t start_index() const { return StartIndexField::decode(bit_field_); }
+  TailCallMode tail_call_mode() const {
+    return TailCallModeField::decode(bit_field_);
+  }
+
+  bool operator==(CallForwardVarargsParameters const& that) const {
+    return this->bit_field_ == that.bit_field_;
+  }
+  bool operator!=(CallForwardVarargsParameters const& that) const {
+    return !(*this == that);
+  }
+
+ private:
+  friend size_t hash_value(CallForwardVarargsParameters const& p) {
+    return p.bit_field_;
+  }
+
+  typedef BitField<uint32_t, 0, 30> StartIndexField;
+  typedef BitField<TailCallMode, 31, 1> TailCallModeField;
+
+  uint32_t const bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&, CallForwardVarargsParameters const&);
+
+CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
+    Operator const*) WARN_UNUSED_RESULT;
 
 // Defines the arity and the call flags for a JavaScript function call. This is
-// used as a parameter by JSCallFunction operators.
-class CallFunctionParameters final {
+// used as a parameter by JSCall operators.
+class CallParameters final {
  public:
-  CallFunctionParameters(size_t arity, float frequency,
-                         VectorSlotPair const& feedback,
-                         TailCallMode tail_call_mode,
-                         ConvertReceiverMode convert_mode)
+  CallParameters(size_t arity, float frequency, VectorSlotPair const& feedback,
+                 TailCallMode tail_call_mode, ConvertReceiverMode convert_mode)
       : bit_field_(ArityField::encode(arity) |
                    ConvertReceiverModeField::encode(convert_mode) |
                    TailCallModeField::encode(tail_call_mode)),
@@ -105,17 +171,15 @@
   }
   VectorSlotPair const& feedback() const { return feedback_; }
 
-  bool operator==(CallFunctionParameters const& that) const {
+  bool operator==(CallParameters const& that) const {
     return this->bit_field_ == that.bit_field_ &&
            this->frequency_ == that.frequency_ &&
            this->feedback_ == that.feedback_;
   }
-  bool operator!=(CallFunctionParameters const& that) const {
-    return !(*this == that);
-  }
+  bool operator!=(CallParameters const& that) const { return !(*this == that); }
 
  private:
-  friend size_t hash_value(CallFunctionParameters const& p) {
+  friend size_t hash_value(CallParameters const& p) {
     return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_);
   }
 
@@ -128,12 +192,35 @@
   VectorSlotPair const feedback_;
 };
 
-size_t hash_value(CallFunctionParameters const&);
+size_t hash_value(CallParameters const&);
 
-std::ostream& operator<<(std::ostream&, CallFunctionParameters const&);
+std::ostream& operator<<(std::ostream&, CallParameters const&);
 
-const CallFunctionParameters& CallFunctionParametersOf(const Operator* op);
+const CallParameters& CallParametersOf(const Operator* op);
 
+// Defines the arity for a JavaScript constructor call with a spread as the last
+// parameters. This is used as a parameter by JSConstructWithSpread
+// operators.
+class CallWithSpreadParameters final {
+ public:
+  explicit CallWithSpreadParameters(uint32_t arity) : arity_(arity) {}
+
+  uint32_t arity() const { return arity_; }
+
+ private:
+  uint32_t const arity_;
+};
+
+bool operator==(CallWithSpreadParameters const&,
+                CallWithSpreadParameters const&);
+bool operator!=(CallWithSpreadParameters const&,
+                CallWithSpreadParameters const&);
+
+size_t hash_value(CallWithSpreadParameters const&);
+
+std::ostream& operator<<(std::ostream&, CallWithSpreadParameters const&);
+
+CallWithSpreadParameters const& CallWithSpreadParametersOf(Operator const*);
 
 // Defines the arity and the ID for a runtime function call. This is used as a
 // parameter by JSCallRuntime operators.
@@ -216,6 +303,79 @@
 CreateCatchContextParameters const& CreateCatchContextParametersOf(
     Operator const*);
 
+// Defines the slot count and ScopeType for a new function or eval context. This
+// is used as a parameter by the JSCreateFunctionContext operator.
+class CreateFunctionContextParameters final {
+ public:
+  CreateFunctionContextParameters(int slot_count, ScopeType scope_type);
+
+  int slot_count() const { return slot_count_; }
+  ScopeType scope_type() const { return scope_type_; }
+
+ private:
+  int const slot_count_;
+  ScopeType const scope_type_;
+};
+
+bool operator==(CreateFunctionContextParameters const& lhs,
+                CreateFunctionContextParameters const& rhs);
+bool operator!=(CreateFunctionContextParameters const& lhs,
+                CreateFunctionContextParameters const& rhs);
+
+size_t hash_value(CreateFunctionContextParameters const& parameters);
+
+std::ostream& operator<<(std::ostream& os,
+                         CreateFunctionContextParameters const& parameters);
+
+CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
+    Operator const*);
+
+// Defines parameters for JSStoreNamedOwn operator.
+class StoreNamedOwnParameters final {
+ public:
+  StoreNamedOwnParameters(Handle<Name> name, VectorSlotPair const& feedback)
+      : name_(name), feedback_(feedback) {}
+
+  Handle<Name> name() const { return name_; }
+  VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+  Handle<Name> const name_;
+  VectorSlotPair const feedback_;
+};
+
+bool operator==(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&);
+bool operator!=(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&);
+
+size_t hash_value(StoreNamedOwnParameters const&);
+
+std::ostream& operator<<(std::ostream&, StoreNamedOwnParameters const&);
+
+const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op);
+
+// Defines the feedback, i.e., vector and index, for storing a data property in
+// an object literal. This is
+// used as a parameter by the JSStoreDataPropertyInLiteral operator.
+class DataPropertyParameters final {
+ public:
+  explicit DataPropertyParameters(VectorSlotPair const& feedback)
+      : feedback_(feedback) {}
+
+  VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+  VectorSlotPair const feedback_;
+};
+
+bool operator==(DataPropertyParameters const&, DataPropertyParameters const&);
+bool operator!=(DataPropertyParameters const&, DataPropertyParameters const&);
+
+size_t hash_value(DataPropertyParameters const&);
+
+std::ostream& operator<<(std::ostream&, DataPropertyParameters const&);
+
+const DataPropertyParameters& DataPropertyParametersOf(const Operator* op);
+
 // Defines the property of an object for a named access. This is
 // used as a parameter by the JSLoadNamed and JSStoreNamed operators.
 class NamedAccess final {
@@ -361,14 +521,17 @@
 class CreateClosureParameters final {
  public:
   CreateClosureParameters(Handle<SharedFunctionInfo> shared_info,
+                          VectorSlotPair const& feedback,
                           PretenureFlag pretenure)
-      : shared_info_(shared_info), pretenure_(pretenure) {}
+      : shared_info_(shared_info), feedback_(feedback), pretenure_(pretenure) {}
 
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+  VectorSlotPair const& feedback() const { return feedback_; }
   PretenureFlag pretenure() const { return pretenure_; }
 
  private:
   const Handle<SharedFunctionInfo> shared_info_;
+  VectorSlotPair const feedback_;
   const PretenureFlag pretenure_;
 };
 
@@ -432,17 +595,17 @@
   const Operator* LessThanOrEqual(CompareOperationHint hint);
   const Operator* GreaterThanOrEqual(CompareOperationHint hint);
 
-  const Operator* BitwiseOr(BinaryOperationHint hint);
-  const Operator* BitwiseXor(BinaryOperationHint hint);
-  const Operator* BitwiseAnd(BinaryOperationHint hint);
-  const Operator* ShiftLeft(BinaryOperationHint hint);
-  const Operator* ShiftRight(BinaryOperationHint hint);
-  const Operator* ShiftRightLogical(BinaryOperationHint hint);
+  const Operator* BitwiseOr();
+  const Operator* BitwiseXor();
+  const Operator* BitwiseAnd();
+  const Operator* ShiftLeft();
+  const Operator* ShiftRight();
+  const Operator* ShiftRightLogical();
   const Operator* Add(BinaryOperationHint hint);
-  const Operator* Subtract(BinaryOperationHint hint);
-  const Operator* Multiply(BinaryOperationHint hint);
-  const Operator* Divide(BinaryOperationHint hint);
-  const Operator* Modulus(BinaryOperationHint hint);
+  const Operator* Subtract();
+  const Operator* Multiply();
+  const Operator* Divide();
+  const Operator* Modulus();
 
   const Operator* ToBoolean(ToBooleanHints hints);
   const Operator* ToInteger();
@@ -456,28 +619,33 @@
   const Operator* CreateArguments(CreateArgumentsType type);
   const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
   const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
+                                VectorSlotPair const& feedback,
                                 PretenureFlag pretenure);
   const Operator* CreateIterResultObject();
   const Operator* CreateKeyValueArray();
-  const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
+  const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
                                      int literal_flags, int literal_index,
                                      int number_of_elements);
-  const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
+  const Operator* CreateLiteralObject(Handle<BoilerplateDescription> constant,
                                       int literal_flags, int literal_index,
                                       int number_of_properties);
   const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
                                       int literal_flags, int literal_index);
 
-  const Operator* CallFunction(
+  const Operator* CallForwardVarargs(uint32_t start_index,
+                                     TailCallMode tail_call_mode);
+  const Operator* Call(
       size_t arity, float frequency = 0.0f,
       VectorSlotPair const& feedback = VectorSlotPair(),
       ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  const Operator* CallWithSpread(uint32_t arity);
   const Operator* CallRuntime(Runtime::FunctionId id);
   const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
   const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
-  const Operator* CallConstruct(uint32_t arity, float frequency,
-                                VectorSlotPair const& feedback);
+  const Operator* Construct(uint32_t arity, float frequency,
+                            VectorSlotPair const& feedback);
+  const Operator* ConstructWithSpread(uint32_t arity);
 
   const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
 
@@ -489,10 +657,16 @@
   const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
                              VectorSlotPair const& feedback);
 
+  const Operator* StoreNamedOwn(Handle<Name> name,
+                                VectorSlotPair const& feedback);
+  const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback);
+
   const Operator* DeleteProperty(LanguageMode language_mode);
 
   const Operator* HasProperty();
 
+  const Operator* GetSuperConstructor();
+
   const Operator* LoadGlobal(const Handle<Name>& name,
                              const VectorSlotPair& feedback,
                              TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
@@ -506,6 +680,7 @@
   const Operator* LoadModule(int32_t cell_index);
   const Operator* StoreModule(int32_t cell_index);
 
+  const Operator* ClassOf();
   const Operator* TypeOf();
   const Operator* InstanceOf();
   const Operator* OrdinaryHasInstance();
@@ -524,8 +699,9 @@
   const Operator* GeneratorRestoreRegister(int index);
 
   const Operator* StackCheck();
+  const Operator* Debugger();
 
-  const Operator* CreateFunctionContext(int slot_count);
+  const Operator* CreateFunctionContext(int slot_count, ScopeType scope_type);
   const Operator* CreateCatchContext(const Handle<String>& name,
                                      const Handle<ScopeInfo>& scope_info);
   const Operator* CreateWithContext(const Handle<ScopeInfo>& scope_info);
diff --git a/src/compiler/js-type-hint-lowering.cc b/src/compiler/js-type-hint-lowering.cc
new file mode 100644
index 0000000..e30e016
--- /dev/null
+++ b/src/compiler/js-type-hint-lowering.cc
@@ -0,0 +1,153 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-type-hint-lowering.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/feedback-vector.h"
+#include "src/type-hints.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSSpeculativeBinopBuilder final {
+ public:
+  JSSpeculativeBinopBuilder(JSTypeHintLowering* lowering, const Operator* op,
+                            Node* left, Node* right, Node* effect,
+                            Node* control, FeedbackSlot slot)
+      : lowering_(lowering),
+        op_(op),
+        left_(left),
+        right_(right),
+        effect_(effect),
+        control_(control),
+        slot_(slot) {}
+
+  BinaryOperationHint GetBinaryOperationHint() {
+    DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot_));
+    BinaryOpICNexus nexus(feedback_vector(), slot_);
+    return nexus.GetBinaryOperationFeedback();
+  }
+
+  bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
+    switch (GetBinaryOperationHint()) {
+      case BinaryOperationHint::kSignedSmall:
+        *hint = NumberOperationHint::kSignedSmall;
+        return true;
+      case BinaryOperationHint::kSigned32:
+        *hint = NumberOperationHint::kSigned32;
+        return true;
+      case BinaryOperationHint::kNumberOrOddball:
+        *hint = NumberOperationHint::kNumberOrOddball;
+        return true;
+      case BinaryOperationHint::kAny:
+      case BinaryOperationHint::kNone:
+      case BinaryOperationHint::kString:
+        break;
+    }
+    return false;
+  }
+
+  const Operator* SpeculativeNumberOp(NumberOperationHint hint) {
+    switch (op_->opcode()) {
+      case IrOpcode::kJSAdd:
+        return simplified()->SpeculativeNumberAdd(hint);
+      case IrOpcode::kJSSubtract:
+        return simplified()->SpeculativeNumberSubtract(hint);
+      case IrOpcode::kJSMultiply:
+        return simplified()->SpeculativeNumberMultiply(hint);
+      case IrOpcode::kJSDivide:
+        return simplified()->SpeculativeNumberDivide(hint);
+      case IrOpcode::kJSModulus:
+        return simplified()->SpeculativeNumberModulus(hint);
+      case IrOpcode::kJSBitwiseAnd:
+        return simplified()->SpeculativeNumberBitwiseAnd(hint);
+      case IrOpcode::kJSBitwiseOr:
+        return simplified()->SpeculativeNumberBitwiseOr(hint);
+      case IrOpcode::kJSBitwiseXor:
+        return simplified()->SpeculativeNumberBitwiseXor(hint);
+      case IrOpcode::kJSShiftLeft:
+        return simplified()->SpeculativeNumberShiftLeft(hint);
+      case IrOpcode::kJSShiftRight:
+        return simplified()->SpeculativeNumberShiftRight(hint);
+      case IrOpcode::kJSShiftRightLogical:
+        return simplified()->SpeculativeNumberShiftRightLogical(hint);
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  Node* BuildSpeculativeOperator(const Operator* op) {
+    DCHECK_EQ(2, op->ValueInputCount());
+    DCHECK_EQ(1, op->EffectInputCount());
+    DCHECK_EQ(1, op->ControlInputCount());
+    DCHECK_EQ(false, OperatorProperties::HasFrameStateInput(op));
+    DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
+    DCHECK_EQ(1, op->EffectOutputCount());
+    DCHECK_EQ(0, op->ControlOutputCount());
+    return graph()->NewNode(op, left_, right_, effect_, control_);
+  }
+
+  JSGraph* jsgraph() const { return lowering_->jsgraph(); }
+  Graph* graph() const { return jsgraph()->graph(); }
+  JSOperatorBuilder* javascript() { return jsgraph()->javascript(); }
+  SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
+  CommonOperatorBuilder* common() { return jsgraph()->common(); }
+  const Handle<FeedbackVector>& feedback_vector() const {
+    return lowering_->feedback_vector();
+  }
+
+ private:
+  JSTypeHintLowering* lowering_;
+  const Operator* op_;
+  Node* left_;
+  Node* right_;
+  Node* effect_;
+  Node* control_;
+  FeedbackSlot slot_;
+};
+
+JSTypeHintLowering::JSTypeHintLowering(JSGraph* jsgraph,
+                                       Handle<FeedbackVector> feedback_vector)
+    : jsgraph_(jsgraph), feedback_vector_(feedback_vector) {}
+
+Reduction JSTypeHintLowering::ReduceBinaryOperation(const Operator* op,
+                                                    Node* left, Node* right,
+                                                    Node* effect, Node* control,
+                                                    FeedbackSlot slot) {
+  switch (op->opcode()) {
+    case IrOpcode::kJSBitwiseOr:
+    case IrOpcode::kJSBitwiseXor:
+    case IrOpcode::kJSBitwiseAnd:
+    case IrOpcode::kJSShiftLeft:
+    case IrOpcode::kJSShiftRight:
+    case IrOpcode::kJSShiftRightLogical:
+    case IrOpcode::kJSAdd:
+    case IrOpcode::kJSSubtract:
+    case IrOpcode::kJSMultiply:
+    case IrOpcode::kJSDivide:
+    case IrOpcode::kJSModulus: {
+      JSSpeculativeBinopBuilder b(this, op, left, right, effect, control, slot);
+      NumberOperationHint hint;
+      if (b.GetBinaryNumberOperationHint(&hint)) {
+        Node* node = b.BuildSpeculativeOperator(b.SpeculativeNumberOp(hint));
+        return Reduction(node);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return Reduction();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-type-hint-lowering.h b/src/compiler/js-type-hint-lowering.h
new file mode 100644
index 0000000..d1dd1a8
--- /dev/null
+++ b/src/compiler/js-type-hint-lowering.h
@@ -0,0 +1,54 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
+#define V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+
+// The type-hint lowering consumes feedback about data operations (i.e. unary
+// and binary operations) to emit nodes using speculative simplified operators
+// in favor of the generic JavaScript operators.
+//
+// This lowering is implemented as an early reduction and can be applied before
+// nodes are placed into the initial graph. It provides the ability to shortcut
+// the JavaScript-level operators and directly emit simplified-level operators
+// even during initial graph building. This is the reason this lowering doesn't
+// follow the interface of the reducer framework used after graph construction.
+class JSTypeHintLowering {
+ public:
+  JSTypeHintLowering(JSGraph* jsgraph, Handle<FeedbackVector> feedback_vector);
+
+  // Potential reduction of binary (arithmetic, logical and shift) operations.
+  Reduction ReduceBinaryOperation(const Operator* op, Node* left, Node* right,
+                                  Node* effect, Node* control,
+                                  FeedbackSlot slot);
+
+ private:
+  friend class JSSpeculativeBinopBuilder;
+
+  JSGraph* jsgraph() const { return jsgraph_; }
+  const Handle<FeedbackVector>& feedback_vector() const {
+    return feedback_vector_;
+  }
+
+  JSGraph* jsgraph_;
+  Handle<FeedbackVector> feedback_vector_;
+
+  DISALLOW_COPY_AND_ASSIGN(JSTypeHintLowering);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index dbbeca6..31accbd 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -16,6 +16,7 @@
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/type-cache.h"
 #include "src/compiler/types.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -30,30 +31,6 @@
   JSBinopReduction(JSTypedLowering* lowering, Node* node)
       : lowering_(lowering), node_(node) {}
 
-  bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
-    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
-      DCHECK_NE(0, node_->op()->ControlOutputCount());
-      DCHECK_EQ(1, node_->op()->EffectOutputCount());
-      DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node_->op()));
-      switch (BinaryOperationHintOf(node_->op())) {
-        case BinaryOperationHint::kSignedSmall:
-          *hint = NumberOperationHint::kSignedSmall;
-          return true;
-        case BinaryOperationHint::kSigned32:
-          *hint = NumberOperationHint::kSigned32;
-          return true;
-        case BinaryOperationHint::kNumberOrOddball:
-          *hint = NumberOperationHint::kNumberOrOddball;
-          return true;
-        case BinaryOperationHint::kAny:
-        case BinaryOperationHint::kNone:
-        case BinaryOperationHint::kString:
-          break;
-      }
-    }
-    return false;
-  }
-
   bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
     if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
       DCHECK_EQ(1, node_->op()->EffectOutputCount());
@@ -69,17 +46,51 @@
           return true;
         case CompareOperationHint::kAny:
         case CompareOperationHint::kNone:
+        case CompareOperationHint::kString:
+        case CompareOperationHint::kReceiver:
+        case CompareOperationHint::kInternalizedString:
           break;
       }
     }
     return false;
   }
 
+  bool IsInternalizedStringCompareOperation() {
+    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+      DCHECK_EQ(1, node_->op()->EffectOutputCount());
+      return (CompareOperationHintOf(node_->op()) ==
+              CompareOperationHint::kInternalizedString) &&
+             BothInputsMaybe(Type::InternalizedString());
+    }
+    return false;
+  }
+
+  bool IsReceiverCompareOperation() {
+    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+      DCHECK_EQ(1, node_->op()->EffectOutputCount());
+      return (CompareOperationHintOf(node_->op()) ==
+              CompareOperationHint::kReceiver) &&
+             BothInputsMaybe(Type::Receiver());
+    }
+    return false;
+  }
+
+  bool IsStringCompareOperation() {
+    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+      DCHECK_EQ(1, node_->op()->EffectOutputCount());
+      return (CompareOperationHintOf(node_->op()) ==
+              CompareOperationHint::kString) &&
+             BothInputsMaybe(Type::String());
+    }
+    return false;
+  }
+
   // Check if a string addition will definitely result in creating a ConsString,
   // i.e. if the combined length of the resulting string exceeds the ConsString
   // minimum length.
   bool ShouldCreateConsString() {
     DCHECK_EQ(IrOpcode::kJSAdd, node_->opcode());
+    DCHECK(OneInputIs(Type::String()));
     if (BothInputsAre(Type::String()) ||
         ((lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) &&
          BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString)) {
@@ -103,6 +114,66 @@
     return false;
   }
 
+  // Inserts a CheckReceiver for the left input.
+  void CheckLeftInputToReceiver() {
+    Node* left_input = graph()->NewNode(simplified()->CheckReceiver(), left(),
+                                        effect(), control());
+    node_->ReplaceInput(0, left_input);
+    update_effect(left_input);
+  }
+
+  // Checks that both inputs are Receiver, and if we don't know
+  // statically that one side is already a Receiver, insert a
+  // CheckReceiver node.
+  void CheckInputsToReceiver() {
+    if (!left_type()->Is(Type::Receiver())) {
+      CheckLeftInputToReceiver();
+    }
+    if (!right_type()->Is(Type::Receiver())) {
+      Node* right_input = graph()->NewNode(simplified()->CheckReceiver(),
+                                           right(), effect(), control());
+      node_->ReplaceInput(1, right_input);
+      update_effect(right_input);
+    }
+  }
+
+  // Checks that both inputs are String, and if we don't know
+  // statically that one side is already a String, insert a
+  // CheckString node.
+  void CheckInputsToString() {
+    if (!left_type()->Is(Type::String())) {
+      Node* left_input = graph()->NewNode(simplified()->CheckString(), left(),
+                                          effect(), control());
+      node_->ReplaceInput(0, left_input);
+      update_effect(left_input);
+    }
+    if (!right_type()->Is(Type::String())) {
+      Node* right_input = graph()->NewNode(simplified()->CheckString(), right(),
+                                           effect(), control());
+      node_->ReplaceInput(1, right_input);
+      update_effect(right_input);
+    }
+  }
+
+  // Checks that both inputs are InternalizedString, and if we don't know
+  // statically that one side is already an InternalizedString, insert a
+  // CheckInternalizedString node.
+  void CheckInputsToInternalizedString() {
+    if (!left_type()->Is(Type::UniqueName())) {
+      Node* left_input = graph()->NewNode(
+          simplified()->CheckInternalizedString(), left(), effect(), control());
+      node_->ReplaceInput(0, left_input);
+      update_effect(left_input);
+    }
+    if (!right_type()->Is(Type::UniqueName())) {
+      Node* right_input =
+          graph()->NewNode(simplified()->CheckInternalizedString(), right(),
+                           effect(), control());
+      node_->ReplaceInput(1, right_input);
+      update_effect(right_input);
+    }
+  }
+
   void ConvertInputsToNumber() {
     // To convert the inputs to numbers, we have to provide frame states
     // for lazy bailouts in the ToNumber conversions.
@@ -277,30 +348,18 @@
     return nullptr;
   }
 
-  const Operator* SpeculativeNumberOp(NumberOperationHint hint) {
+  const Operator* NumberOpFromSpeculativeNumberOp() {
     switch (node_->opcode()) {
-      case IrOpcode::kJSAdd:
-        return simplified()->SpeculativeNumberAdd(hint);
-      case IrOpcode::kJSSubtract:
-        return simplified()->SpeculativeNumberSubtract(hint);
-      case IrOpcode::kJSMultiply:
-        return simplified()->SpeculativeNumberMultiply(hint);
-      case IrOpcode::kJSDivide:
-        return simplified()->SpeculativeNumberDivide(hint);
-      case IrOpcode::kJSModulus:
-        return simplified()->SpeculativeNumberModulus(hint);
-      case IrOpcode::kJSBitwiseAnd:
-        return simplified()->SpeculativeNumberBitwiseAnd(hint);
-      case IrOpcode::kJSBitwiseOr:
-        return simplified()->SpeculativeNumberBitwiseOr(hint);
-      case IrOpcode::kJSBitwiseXor:
-        return simplified()->SpeculativeNumberBitwiseXor(hint);
-      case IrOpcode::kJSShiftLeft:
-        return simplified()->SpeculativeNumberShiftLeft(hint);
-      case IrOpcode::kJSShiftRight:
-        return simplified()->SpeculativeNumberShiftRight(hint);
-      case IrOpcode::kJSShiftRightLogical:
-        return simplified()->SpeculativeNumberShiftRightLogical(hint);
+      case IrOpcode::kSpeculativeNumberAdd:
+        return simplified()->NumberAdd();
+      case IrOpcode::kSpeculativeNumberSubtract:
+        return simplified()->NumberSubtract();
+      case IrOpcode::kSpeculativeNumberMultiply:
+        return simplified()->NumberMultiply();
+      case IrOpcode::kSpeculativeNumberDivide:
+        return simplified()->NumberDivide();
+      case IrOpcode::kSpeculativeNumberModulus:
+        return simplified()->NumberModulus();
       default:
         break;
     }
@@ -316,6 +375,10 @@
 
   bool BothInputsAre(Type* t) { return LeftInputIs(t) && RightInputIs(t); }
 
+  bool BothInputsMaybe(Type* t) {
+    return left_type()->Maybe(t) && right_type()->Maybe(t);
+  }
+
   bool OneInputCannotBe(Type* t) {
     return !left_type()->Maybe(t) || !right_type()->Maybe(t);
   }
@@ -459,8 +522,13 @@
       dependencies_(dependencies),
       flags_(flags),
       jsgraph_(jsgraph),
-      the_hole_type_(
-          Type::HeapConstant(factory()->the_hole_value(), graph()->zone())),
+      pointer_comparable_type_(Type::Union(
+          Type::Oddball(),
+          Type::Union(
+              Type::SymbolOrReceiver(),
+              Type::HeapConstant(factory()->empty_string(), graph()->zone()),
+              graph()->zone()),
+          graph()->zone())),
       type_cache_(TypeCache::Get()) {
   for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
     double min = kMinInt / (1 << k);
@@ -469,20 +537,22 @@
   }
 }
 
+Reduction JSTypedLowering::ReduceSpeculativeNumberAdd(Node* node) {
+  JSBinopReduction r(this, node);
+  NumberOperationHint hint = NumberOperationHintOf(node->op());
+  if (hint == NumberOperationHint::kNumberOrOddball &&
+      r.BothInputsAre(Type::PlainPrimitive()) &&
+      r.NeitherInputCanBe(Type::StringOrReceiver())) {
+    // SpeculativeNumberAdd(x:-string, y:-string) =>
+    //     NumberAdd(ToNumber(x), ToNumber(y))
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
+  }
+  return NoChange();
+}
+
 Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
   JSBinopReduction r(this, node);
-  NumberOperationHint hint;
-  if (r.GetBinaryNumberOperationHint(&hint)) {
-    if (hint == NumberOperationHint::kNumberOrOddball &&
-        r.BothInputsAre(Type::PlainPrimitive()) &&
-        r.NeitherInputCanBe(Type::StringOrReceiver())) {
-      // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
-      r.ConvertInputsToNumber();
-      return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
-    }
-    return r.ChangeToSpeculativeOperator(
-        simplified()->SpeculativeNumberAdd(hint), Type::Number());
-  }
   if (r.BothInputsAre(Type::Number())) {
     // JSAdd(x:number, y:number) => NumberAdd(x, y)
     r.ConvertInputsToNumber();
@@ -505,13 +575,20 @@
     } else if (!r.RightInputIs(Type::String())) {
       flags = STRING_ADD_CONVERT_RIGHT;
     }
+    Operator::Properties properties = node->op()->properties();
+    if (r.NeitherInputCanBe(Type::Receiver())) {
+      // Both sides are already strings, so we know that the
+      // string addition will not cause any observable side
+      // effects; it can still throw obviously.
+      properties = Operator::kNoWrite | Operator::kNoDeopt;
+    }
     // JSAdd(x:string, y) => CallStub[StringAdd](x, y)
     // JSAdd(x, y:string) => CallStub[StringAdd](x, y)
     Callable const callable =
         CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
     CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
         isolate(), graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNeedsFrameState, node->op()->properties());
+        CallDescriptor::kNeedsFrameState, properties);
     DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
     node->InsertInput(graph()->zone(), 0,
                       jsgraph()->HeapConstant(callable.code()));
@@ -523,16 +600,6 @@
 
 Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
   JSBinopReduction r(this, node);
-  NumberOperationHint hint;
-  if (r.GetBinaryNumberOperationHint(&hint)) {
-    if (hint == NumberOperationHint::kNumberOrOddball &&
-        r.BothInputsAre(Type::NumberOrOddball())) {
-      r.ConvertInputsToNumber();
-      return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
-    }
-    return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
-                                         Type::Number());
-  }
   if (r.BothInputsAre(Type::PlainPrimitive()) ||
       !(flags() & kDeoptimizationEnabled)) {
     r.ConvertInputsToNumber();
@@ -541,13 +608,20 @@
   return NoChange();
 }
 
+Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
+  JSBinopReduction r(this, node);
+  NumberOperationHint hint = NumberOperationHintOf(node->op());
+  if (hint == NumberOperationHint::kNumberOrOddball &&
+      r.BothInputsAre(Type::NumberOrOddball())) {
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
+                                  Type::Number());
+  }
+  return NoChange();
+}
+
 Reduction JSTypedLowering::ReduceInt32Binop(Node* node) {
   JSBinopReduction r(this, node);
-  NumberOperationHint hint;
-  if (r.GetBinaryNumberOperationHint(&hint)) {
-    return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
-                                         Type::Signed32());
-  }
   if (r.BothInputsAre(Type::PlainPrimitive()) ||
       !(flags() & kDeoptimizationEnabled)) {
     r.ConvertInputsToNumber();
@@ -559,12 +633,6 @@
 
 Reduction JSTypedLowering::ReduceUI32Shift(Node* node, Signedness signedness) {
   JSBinopReduction r(this, node);
-  NumberOperationHint hint;
-  if (r.GetBinaryNumberOperationHint(&hint)) {
-    return r.ChangeToSpeculativeOperator(
-        r.SpeculativeNumberOp(hint),
-        signedness == kUnsigned ? Type::Unsigned32() : Type::Signed32());
-  }
   if (r.BothInputsAre(Type::PlainPrimitive()) ||
       !(flags() & kDeoptimizationEnabled)) {
     r.ConvertInputsToNumber();
@@ -746,6 +814,10 @@
     r.ConvertInputsToNumber();
     less_than = simplified()->NumberLessThan();
     less_than_or_equal = simplified()->NumberLessThanOrEqual();
+  } else if (r.IsStringCompareOperation()) {
+    r.CheckInputsToString();
+    less_than = simplified()->StringLessThan();
+    less_than_or_equal = simplified()->StringLessThanOrEqual();
   } else {
     return NoChange();
   }
@@ -787,61 +859,72 @@
     return Replace(jsgraph()->Constant(f->string_string()));
   } else if (type->Is(Type::Symbol())) {
     return Replace(jsgraph()->Constant(f->symbol_string()));
-  } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
-                                  graph()->zone()))) {
+  } else if (type->Is(Type::OtherUndetectableOrUndefined())) {
     return Replace(jsgraph()->Constant(f->undefined_string()));
-  } else if (type->Is(Type::Null())) {
+  } else if (type->Is(Type::NonCallableOrNull())) {
     return Replace(jsgraph()->Constant(f->object_string()));
   } else if (type->Is(Type::Function())) {
     return Replace(jsgraph()->Constant(f->function_string()));
   } else if (type->IsHeapConstant()) {
     return Replace(jsgraph()->Constant(
         Object::TypeOf(isolate(), type->AsHeapConstant()->Value())));
-  } else if (type->IsOtherNumberConstant()) {
-    return Replace(jsgraph()->Constant(f->number_string()));
   }
 
   return NoChange();
 }
 
 Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
+  Node* input;
+  Handle<String> type;
   HeapObjectBinopMatcher m(node);
   if (m.left().IsJSTypeOf() && m.right().HasValue() &&
       m.right().Value()->IsString()) {
-    Node* replacement;
-    Node* input = m.left().InputAt(0);
-    Handle<String> value = Handle<String>::cast(m.right().Value());
-    if (String::Equals(value, factory()->boolean_string())) {
-      replacement =
-          graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
-                           graph()->NewNode(simplified()->ReferenceEqual(),
-                                            input, jsgraph()->TrueConstant()),
-                           jsgraph()->TrueConstant(),
-                           graph()->NewNode(simplified()->ReferenceEqual(),
-                                            input, jsgraph()->FalseConstant()));
-    } else if (String::Equals(value, factory()->function_string())) {
-      replacement = graph()->NewNode(simplified()->ObjectIsCallable(), input);
-    } else if (String::Equals(value, factory()->number_string())) {
-      replacement = graph()->NewNode(simplified()->ObjectIsNumber(), input);
-    } else if (String::Equals(value, factory()->string_string())) {
-      replacement = graph()->NewNode(simplified()->ObjectIsString(), input);
-    } else if (String::Equals(value, factory()->undefined_string())) {
-      replacement = graph()->NewNode(
-          common()->Select(MachineRepresentation::kTagged),
-          graph()->NewNode(simplified()->ReferenceEqual(), input,
-                           jsgraph()->NullConstant()),
-          jsgraph()->FalseConstant(),
-          graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
-    } else {
-      return NoChange();
-    }
-    if (invert) {
-      replacement = graph()->NewNode(simplified()->BooleanNot(), replacement);
-    }
-    ReplaceWithValue(node, replacement);
-    return Replace(replacement);
+    input = m.left().InputAt(0);
+    type = Handle<String>::cast(m.right().Value());
+  } else if (m.right().IsJSTypeOf() && m.left().HasValue() &&
+             m.left().Value()->IsString()) {
+    input = m.right().InputAt(0);
+    type = Handle<String>::cast(m.left().Value());
+  } else {
+    return NoChange();
   }
-  return NoChange();
+  Node* value;
+  if (String::Equals(type, factory()->boolean_string())) {
+    value =
+        graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+                         graph()->NewNode(simplified()->ReferenceEqual(), input,
+                                          jsgraph()->TrueConstant()),
+                         jsgraph()->TrueConstant(),
+                         graph()->NewNode(simplified()->ReferenceEqual(), input,
+                                          jsgraph()->FalseConstant()));
+  } else if (String::Equals(type, factory()->function_string())) {
+    value = graph()->NewNode(simplified()->ObjectIsDetectableCallable(), input);
+  } else if (String::Equals(type, factory()->number_string())) {
+    value = graph()->NewNode(simplified()->ObjectIsNumber(), input);
+  } else if (String::Equals(type, factory()->object_string())) {
+    value = graph()->NewNode(
+        common()->Select(MachineRepresentation::kTagged),
+        graph()->NewNode(simplified()->ObjectIsNonCallable(), input),
+        jsgraph()->TrueConstant(),
+        graph()->NewNode(simplified()->ReferenceEqual(), input,
+                         jsgraph()->NullConstant()));
+  } else if (String::Equals(type, factory()->string_string())) {
+    value = graph()->NewNode(simplified()->ObjectIsString(), input);
+  } else if (String::Equals(type, factory()->undefined_string())) {
+    value = graph()->NewNode(
+        common()->Select(MachineRepresentation::kTagged),
+        graph()->NewNode(simplified()->ReferenceEqual(), input,
+                         jsgraph()->NullConstant()),
+        jsgraph()->FalseConstant(),
+        graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+  } else {
+    return NoChange();
+  }
+  if (invert) {
+    value = graph()->NewNode(simplified()->BooleanNot(), value);
+  }
+  ReplaceWithValue(node, value);
+  return Replace(value);
 }
 
 Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
@@ -850,6 +933,13 @@
 
   JSBinopReduction r(this, node);
 
+  if (r.BothInputsAre(Type::UniqueName())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  }
+  if (r.IsInternalizedStringCompareOperation()) {
+    r.CheckInputsToInternalizedString();
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  }
   if (r.BothInputsAre(Type::String())) {
     return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
@@ -884,6 +974,12 @@
         simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
   } else if (r.BothInputsAre(Type::Number())) {
     return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  } else if (r.IsReceiverCompareOperation()) {
+    r.CheckInputsToReceiver();
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  } else if (r.IsStringCompareOperation()) {
+    r.CheckInputsToString();
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
   return NoChange();
 }
@@ -898,10 +994,10 @@
       return Replace(replacement);
     }
   }
-  if (r.OneInputCannotBe(Type::NumberOrSimdOrString())) {
+  if (r.OneInputCannotBe(Type::NumberOrString())) {
     // For values with canonical representation (i.e. neither String, nor
-    // Simd128Value nor Number) an empty type intersection means the values
-    // cannot be strictly equal.
+    // Number) an empty type intersection means the values cannot be strictly
+    // equal.
     if (!r.left_type()->Maybe(r.right_type())) {
       Node* replacement = jsgraph()->BooleanConstant(invert);
       ReplaceWithValue(node, replacement);
@@ -912,27 +1008,16 @@
   Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
   if (reduction.Changed()) return reduction;
 
-  if (r.OneInputIs(the_hole_type_)) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Undefined())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Null())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Boolean())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Object())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Receiver())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
   if (r.BothInputsAre(Type::Unique())) {
     return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
+  if (r.OneInputIs(pointer_comparable_type_)) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  }
+  if (r.IsInternalizedStringCompareOperation()) {
+    r.CheckInputsToInternalizedString();
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  }
   if (r.BothInputsAre(Type::String())) {
     return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
@@ -946,6 +1031,15 @@
         simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
   } else if (r.BothInputsAre(Type::Number())) {
     return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  } else if (r.IsReceiverCompareOperation()) {
+    // For strict equality, it's enough to know that one input is a Receiver,
+    // as a strict equality comparison with a Receiver can only yield true if
+    // both sides refer to the same Receiver than.
+    r.CheckLeftInputToReceiver();
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  } else if (r.IsStringCompareOperation()) {
+    r.CheckInputsToString();
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
   return NoChange();
 }
@@ -958,7 +1052,6 @@
     return Replace(input);
   } else if (input_type->Is(Type::OrderedNumber())) {
     // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
-    RelaxEffectsAndControls(node);
     node->ReplaceInput(0, graph()->NewNode(simplified()->NumberEqual(), input,
                                            jsgraph()->ZeroConstant()));
     node->TrimInputCount(1);
@@ -966,10 +1059,33 @@
     return Changed(node);
   } else if (input_type->Is(Type::Number())) {
     // JSToBoolean(x:number) => NumberToBoolean(x)
-    RelaxEffectsAndControls(node);
     node->TrimInputCount(1);
     NodeProperties::ChangeOp(node, simplified()->NumberToBoolean());
     return Changed(node);
+  } else if (input_type->Is(Type::DetectableReceiverOrNull())) {
+    // JSToBoolean(x:detectable receiver \/ null)
+    //   => BooleanNot(ReferenceEqual(x,#null))
+    node->ReplaceInput(0, graph()->NewNode(simplified()->ReferenceEqual(),
+                                           input, jsgraph()->NullConstant()));
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+    return Changed(node);
+  } else if (input_type->Is(Type::ReceiverOrNullOrUndefined())) {
+    // JSToBoolean(x:receiver \/ null \/ undefined)
+    //   => BooleanNot(ObjectIsUndetectable(x))
+    node->ReplaceInput(
+        0, graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+    return Changed(node);
+  } else if (input_type->Is(Type::String())) {
+    // JSToBoolean(x:string) => BooleanNot(ReferenceEqual(x,""))
+    node->ReplaceInput(0,
+                       graph()->NewNode(simplified()->ReferenceEqual(), input,
+                                        jsgraph()->EmptyStringConstant()));
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+    return Changed(node);
   }
   return NoChange();
 }
@@ -1239,6 +1355,9 @@
   Node* value = NodeProperties::GetValueInput(node, 2);
   Type* key_type = NodeProperties::GetType(key);
   Type* value_type = NodeProperties::GetType(value);
+
+  if (!value_type->Is(Type::PlainPrimitive())) return NoChange();
+
   HeapObjectMatcher mbase(base);
   if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
     Handle<JSTypedArray> const array =
@@ -1257,7 +1376,6 @@
             Handle<FixedTypedArrayBase>::cast(handle(array->elements()));
         Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
         Node* length = jsgraph()->Constant(byte_length);
-        Node* context = NodeProperties::GetContextInput(node);
         Node* effect = NodeProperties::GetEffectInput(node);
         Node* control = NodeProperties::GetControlInput(node);
         // Convert to a number first.
@@ -1266,12 +1384,8 @@
           if (number_reduction.Changed()) {
             value = number_reduction.replacement();
           } else {
-            Node* frame_state_for_to_number =
-                NodeProperties::FindFrameStateBefore(node);
-            value = effect =
-                graph()->NewNode(javascript()->ToNumber(), value, context,
-                                 frame_state_for_to_number, effect, control);
-            control = graph()->NewNode(common()->IfSuccess(), value);
+            value =
+                graph()->NewNode(simplified()->PlainPrimitiveToNumber(), value);
           }
         }
         // Check if we can avoid the bounds check.
@@ -1316,11 +1430,30 @@
   Node* constructor = NodeProperties::GetValueInput(node, 0);
   Type* constructor_type = NodeProperties::GetType(constructor);
   Node* object = NodeProperties::GetValueInput(node, 1);
+  Type* object_type = NodeProperties::GetType(object);
   Node* context = NodeProperties::GetContextInput(node);
   Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
+  // Check if the {constructor} cannot be callable.
+  // See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 1.
+  if (!constructor_type->Maybe(Type::Callable())) {
+    Node* value = jsgraph()->FalseConstant();
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+
+  // If the {constructor} cannot be a JSBoundFunction and then {object}
+  // cannot be a JSReceiver, then this can be constant-folded to false.
+  // See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 2 and 3.
+  if (!object_type->Maybe(Type::Receiver()) &&
+      !constructor_type->Maybe(Type::BoundFunction())) {
+    Node* value = jsgraph()->FalseConstant();
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+
   // Check if the {constructor} is a (known) JSFunction.
   if (!constructor_type->IsHeapConstant() ||
       !constructor_type->AsHeapConstant()->Value()->IsJSFunction()) {
@@ -1473,16 +1606,17 @@
   DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
   Node* effect = NodeProperties::GetEffectInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
   Node* control = graph()->start();
   for (size_t i = 0; i < access.depth(); ++i) {
-    Node* previous = effect = graph()->NewNode(
+    context = effect = graph()->NewNode(
         simplified()->LoadField(
             AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
-        NodeProperties::GetValueInput(node, 0), effect, control);
-    node->ReplaceInput(0, previous);
+        context, effect, control);
   }
+  node->ReplaceInput(0, context);
   node->ReplaceInput(1, effect);
-  node->ReplaceInput(2, control);
+  node->AppendInput(jsgraph()->zone(), control);
   NodeProperties::ChangeOp(
       node,
       simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
@@ -1493,15 +1627,17 @@
   DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
   Node* effect = NodeProperties::GetEffectInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
   Node* control = graph()->start();
+  Node* value = NodeProperties::GetValueInput(node, 0);
   for (size_t i = 0; i < access.depth(); ++i) {
-    Node* previous = effect = graph()->NewNode(
+    context = effect = graph()->NewNode(
         simplified()->LoadField(
             AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
-        NodeProperties::GetValueInput(node, 0), effect, control);
-    node->ReplaceInput(0, previous);
+        context, effect, control);
   }
-  node->RemoveInput(2);
+  node->ReplaceInput(0, context);
+  node->ReplaceInput(1, value);
   node->ReplaceInput(2, effect);
   NodeProperties::ChangeOp(
       node,
@@ -1591,7 +1727,6 @@
   Type* receiver_type = NodeProperties::GetType(receiver);
   Node* context = NodeProperties::GetContextInput(node);
   Type* context_type = NodeProperties::GetType(context);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
@@ -1614,10 +1749,10 @@
     } else {
       Node* native_context = effect = graph()->NewNode(
           javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
+          context, effect);
       receiver = effect = graph()->NewNode(
           javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
-          native_context, native_context, effect);
+          native_context, effect);
     }
     ReplaceWithValue(node, receiver, effect, control);
     return Replace(receiver);
@@ -1638,14 +1773,15 @@
     Node* efalse = effect;
     Node* rfalse;
     {
-      // Convert {receiver} using the ToObjectStub.
+      // Convert {receiver} using the ToObjectStub. The call does not require a
+      // frame-state in this case, because neither null nor undefined is passed.
       Callable callable = CodeFactory::ToObject(isolate());
       CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
           isolate(), graph()->zone(), callable.descriptor(), 0,
-          CallDescriptor::kNeedsFrameState, node->op()->properties());
+          CallDescriptor::kNoFlags, node->op()->properties());
       rfalse = efalse = graph()->NewNode(
           common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-          receiver, context, frame_state, efalse);
+          receiver, context, efalse);
     }
 
     control = graph()->NewNode(common()->Merge(2), if_true, if_false);
@@ -1695,14 +1831,15 @@
   Node* econvert = effect;
   Node* rconvert;
   {
-    // Convert {receiver} using the ToObjectStub.
+    // Convert {receiver} using the ToObjectStub. The call does not require a
+    // frame-state in this case, because neither null nor undefined is passed.
     Callable callable = CodeFactory::ToObject(isolate());
     CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
         isolate(), graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNeedsFrameState, node->op()->properties());
+        CallDescriptor::kNoFlags, node->op()->properties());
     rconvert = econvert = graph()->NewNode(
         common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-        receiver, context, frame_state, econvert);
+        receiver, context, econvert);
   }
 
   // Replace {receiver} with global proxy of {context}.
@@ -1719,10 +1856,10 @@
     } else {
       Node* native_context = eglobal = graph()->NewNode(
           javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, eglobal);
+          context, eglobal);
       rglobal = eglobal = graph()->NewNode(
           javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
-          native_context, native_context, eglobal);
+          native_context, eglobal);
     }
   }
 
@@ -1764,7 +1901,7 @@
   // The logic contained here is mirrored in Builtins::Generate_Adaptor.
   // Keep these in sync.
 
-  const bool is_construct = (node->opcode() == IrOpcode::kJSCallConstruct);
+  const bool is_construct = (node->opcode() == IrOpcode::kJSConstruct);
 
   DCHECK(Builtins::HasCppImplementation(builtin_index));
   DCHECK_EQ(0, flags & CallDescriptor::kSupportsTailCalls);
@@ -1824,9 +1961,9 @@
 
 }  // namespace
 
-Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
-  CallConstructParameters const& p = CallConstructParametersOf(node->op());
+Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
+  ConstructParameters const& p = ConstructParametersOf(node->op());
   DCHECK_LE(2u, p.arity());
   int const arity = static_cast<int>(p.arity() - 2);
   Node* target = NodeProperties::GetValueInput(node, 0);
@@ -1899,10 +2036,38 @@
   return NoChange();
 }
 
+Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCallForwardVarargs, node->opcode());
+  CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
+  Node* target = NodeProperties::GetValueInput(node, 0);
+  Type* target_type = NodeProperties::GetType(target);
 
-Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  // Check if {target} is a JSFunction.
+  if (target_type->Is(Type::Function())) {
+    // Compute flags for the call.
+    CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+    if (p.tail_call_mode() == TailCallMode::kAllow) {
+      flags |= CallDescriptor::kSupportsTailCalls;
+    }
+
+    // Patch {node} to an indirect call via CallFunctionForwardVarargs.
+    Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
+    node->InsertInput(graph()->zone(), 0,
+                      jsgraph()->HeapConstant(callable.code()));
+    node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(p.start_index()));
+    NodeProperties::ChangeOp(
+        node,
+        common()->Call(Linkage::GetStubCallDescriptor(
+            isolate(), graph()->zone(), callable.descriptor(), 1, flags)));
+    return Changed(node);
+  }
+
+  return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSCall(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  CallParameters const& p = CallParametersOf(node->op());
   int const arity = static_cast<int>(p.arity() - 2);
   ConvertReceiverMode convert_mode = p.convert_mode();
   Node* target = NodeProperties::GetValueInput(node, 0);
@@ -1911,7 +2076,6 @@
   Type* receiver_type = NodeProperties::GetType(receiver);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Try to infer receiver {convert_mode} from {receiver} type.
   if (receiver_type->Is(Type::NullOrUndefined())) {
@@ -1944,7 +2108,7 @@
         !receiver_type->Is(Type::Receiver())) {
       receiver = effect =
           graph()->NewNode(javascript()->ConvertReceiver(convert_mode),
-                           receiver, context, frame_state, effect, control);
+                           receiver, context, effect, control);
       NodeProperties::ReplaceValueInput(node, receiver, 1);
     }
 
@@ -2011,8 +2175,9 @@
   // Maybe we did at least learn something about the {receiver}.
   if (p.convert_mode() != convert_mode) {
     NodeProperties::ChangeOp(
-        node, javascript()->CallFunction(p.arity(), p.frequency(), p.feedback(),
-                                         convert_mode, p.tail_call_mode()));
+        node,
+        javascript()->Call(p.arity(), p.frequency(), p.feedback(), convert_mode,
+                           p.tail_call_mode()));
     return Changed(node);
   }
 
@@ -2031,6 +2196,18 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
+  // We don't support lowering JSForInNext inside try blocks.
+  if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
+  // We know that the {index} is in Unsigned32 range here, otherwise executing
+  // the JSForInNext wouldn't be valid. Unfortunately due to OSR and generators
+  // this is not always reflected in the types, hence we might need to rename
+  // the {index} here.
+  if (!NodeProperties::GetType(index)->Is(Type::Unsigned32())) {
+    index = graph()->NewNode(common()->TypeGuard(Type::Unsigned32()), index,
+                             control);
+  }
+
   // Load the next {key} from the {cache_array}.
   Node* key = effect = graph()->NewNode(
       simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
@@ -2085,6 +2262,28 @@
   return Changed(node);
 }
 
+Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadMessage, node->opcode());
+  ExternalReference const ref =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
+  NodeProperties::ChangeOp(
+      node, simplified()->LoadField(AccessBuilder::ForExternalTaggedValue()));
+  return Changed(node);
+}
+
+Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreMessage, node->opcode());
+  ExternalReference const ref =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  Node* value = NodeProperties::GetValueInput(node, 0);
+  node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
+  node->ReplaceInput(1, value);
+  NodeProperties::ChangeOp(
+      node, simplified()->StoreField(AccessBuilder::ForExternalTaggedValue()));
+  return Changed(node);
+}
+
 Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
   DCHECK_EQ(IrOpcode::kJSGeneratorStore, node->opcode());
   Node* generator = NodeProperties::GetValueInput(node, 0);
@@ -2095,7 +2294,7 @@
   Node* control = NodeProperties::GetControlInput(node);
   int register_count = OpParameter<int>(node);
 
-  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
   FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
   FieldAccess continuation_field =
       AccessBuilder::ForJSGeneratorObjectContinuation();
@@ -2149,7 +2348,7 @@
   Node* control = NodeProperties::GetControlInput(node);
   int index = OpParameter<int>(node);
 
-  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
   FieldAccess element_field = AccessBuilder::ForFixedArraySlot(index);
 
   Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
@@ -2229,18 +2428,33 @@
       return ReduceJSStoreModule(node);
     case IrOpcode::kJSConvertReceiver:
       return ReduceJSConvertReceiver(node);
-    case IrOpcode::kJSCallConstruct:
-      return ReduceJSCallConstruct(node);
-    case IrOpcode::kJSCallFunction:
-      return ReduceJSCallFunction(node);
+    case IrOpcode::kJSConstruct:
+      return ReduceJSConstruct(node);
+    case IrOpcode::kJSCallForwardVarargs:
+      return ReduceJSCallForwardVarargs(node);
+    case IrOpcode::kJSCall:
+      return ReduceJSCall(node);
     case IrOpcode::kJSForInNext:
       return ReduceJSForInNext(node);
+    case IrOpcode::kJSLoadMessage:
+      return ReduceJSLoadMessage(node);
+    case IrOpcode::kJSStoreMessage:
+      return ReduceJSStoreMessage(node);
     case IrOpcode::kJSGeneratorStore:
       return ReduceJSGeneratorStore(node);
     case IrOpcode::kJSGeneratorRestoreContinuation:
       return ReduceJSGeneratorRestoreContinuation(node);
     case IrOpcode::kJSGeneratorRestoreRegister:
       return ReduceJSGeneratorRestoreRegister(node);
+    // TODO(mstarzinger): Simplified operations hiding in JS-level reducer not
+    // fooling anyone. Consider moving this into a separate reducer.
+    case IrOpcode::kSpeculativeNumberAdd:
+      return ReduceSpeculativeNumberAdd(node);
+    case IrOpcode::kSpeculativeNumberSubtract:
+    case IrOpcode::kSpeculativeNumberMultiply:
+    case IrOpcode::kSpeculativeNumberDivide:
+    case IrOpcode::kSpeculativeNumberModulus:
+      return ReduceSpeculativeNumberBinop(node);
     default:
       break;
   }
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 3e71022..35195ec 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -70,9 +70,12 @@
   Reduction ReduceJSToString(Node* node);
   Reduction ReduceJSToObject(Node* node);
   Reduction ReduceJSConvertReceiver(Node* node);
-  Reduction ReduceJSCallConstruct(Node* node);
-  Reduction ReduceJSCallFunction(Node* node);
+  Reduction ReduceJSConstruct(Node* node);
+  Reduction ReduceJSCallForwardVarargs(Node* node);
+  Reduction ReduceJSCall(Node* node);
   Reduction ReduceJSForInNext(Node* node);
+  Reduction ReduceJSLoadMessage(Node* node);
+  Reduction ReduceJSStoreMessage(Node* node);
   Reduction ReduceJSGeneratorStore(Node* node);
   Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
   Reduction ReduceJSGeneratorRestoreRegister(Node* node);
@@ -81,6 +84,8 @@
   Reduction ReduceInt32Binop(Node* node);
   Reduction ReduceUI32Shift(Node* node, Signedness signedness);
   Reduction ReduceCreateConsString(Node* node);
+  Reduction ReduceSpeculativeNumberAdd(Node* node);
+  Reduction ReduceSpeculativeNumberBinop(Node* node);
 
   Factory* factory() const;
   Graph* graph() const;
@@ -96,7 +101,7 @@
   Flags flags_;
   JSGraph* jsgraph_;
   Type* shifted_int32_ranges_[4];
-  Type* const the_hole_type_;
+  Type* pointer_comparable_type_;
   TypeCache const& type_cache_;
 };
 
diff --git a/src/compiler/jump-threading.cc b/src/compiler/jump-threading.cc
index d7d4f91..86d25de 100644
--- a/src/compiler/jump-threading.cc
+++ b/src/compiler/jump-threading.cc
@@ -4,6 +4,7 @@
 
 #include "src/compiler/jump-threading.h"
 #include "src/compiler/code-generator-impl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 971ea72..06f967a 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -5,7 +5,6 @@
 #include "src/compiler/linkage.h"
 
 #include "src/ast/scopes.h"
-#include "src/builtins/builtins-utils.h"
 #include "src/code-stubs.h"
 #include "src/compilation-info.h"
 #include "src/compiler/common-operator.h"
@@ -13,6 +12,7 @@
 #include "src/compiler/node.h"
 #include "src/compiler/osr.h"
 #include "src/compiler/pipeline.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -53,8 +53,7 @@
 MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const {
   size_t param_count = ParameterCount();
   size_t return_count = ReturnCount();
-  MachineType* types = reinterpret_cast<MachineType*>(
-      zone->New(sizeof(MachineType*) * (param_count + return_count)));
+  MachineType* types = zone->NewArray<MachineType>(param_count + return_count);
   int current = 0;
   for (size_t i = 0; i < return_count; ++i) {
     types[current++] = GetReturnType(i);
@@ -143,16 +142,15 @@
 bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
   switch (function) {
     // Most runtime functions need a FrameState. A few chosen ones that we know
-    // not to call into arbitrary JavaScript, not to throw, and not to
-    // deoptimize
-    // are whitelisted here and can be called without a FrameState.
+    // not to call into arbitrary JavaScript, not to throw, and not to lazily
+    // deoptimize are whitelisted here and can be called without a FrameState.
     case Runtime::kAbort:
     case Runtime::kAllocateInTargetSpace:
+    case Runtime::kConvertReceiver:
     case Runtime::kCreateIterResultObject:
     case Runtime::kDefineGetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kDefineSetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kGeneratorGetContinuation:
-    case Runtime::kGetSuperConstructor:
     case Runtime::kIsFunction:
     case Runtime::kNewClosure:
     case Runtime::kNewClosure_Tenured:
@@ -173,13 +171,13 @@
       return false;
 
     // Some inline intrinsics are also safe to call without a FrameState.
+    case Runtime::kInlineClassOf:
     case Runtime::kInlineCreateIterResultObject:
     case Runtime::kInlineFixedArrayGet:
     case Runtime::kInlineFixedArraySet:
     case Runtime::kInlineGeneratorClose:
     case Runtime::kInlineGeneratorGetInputOrDebugPos:
     case Runtime::kInlineGeneratorGetResumeMode:
-    case Runtime::kInlineGetSuperConstructor:
     case Runtime::kInlineIsArray:
     case Runtime::kInlineIsJSReceiver:
     case Runtime::kInlineIsRegExp:
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index e50ebe1..10140e1 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -8,6 +8,8 @@
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -320,6 +322,42 @@
   }
 }
 
+bool LoadElimination::AbstractMaps::Lookup(
+    Node* object, ZoneHandleSet<Map>* object_maps) const {
+  for (auto pair : info_for_node_) {
+    if (MustAlias(object, pair.first)) {
+      *object_maps = pair.second;
+      return true;
+    }
+  }
+  return false;
+}
+
+LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Kill(
+    Node* object, Zone* zone) const {
+  for (auto pair : this->info_for_node_) {
+    if (MayAlias(object, pair.first)) {
+      AbstractMaps* that = new (zone) AbstractMaps(zone);
+      for (auto pair : this->info_for_node_) {
+        if (!MayAlias(object, pair.first)) that->info_for_node_.insert(pair);
+      }
+      return that;
+    }
+  }
+  return this;
+}
+
+void LoadElimination::AbstractMaps::Print() const {
+  for (auto pair : info_for_node_) {
+    PrintF("    #%d:%s\n", pair.first->id(), pair.first->op()->mnemonic());
+    OFStream os(stdout);
+    ZoneHandleSet<Map> const& maps = pair.second;
+    for (size_t i = 0; i < maps.size(); ++i) {
+      os << "     - " << Brief(*maps[i]) << "\n";
+    }
+  }
+}
+
 bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
   if (this->checks_) {
     if (!that->checks_ || !that->checks_->Equals(this->checks_)) {
@@ -344,6 +382,13 @@
       return false;
     }
   }
+  if (this->maps_) {
+    if (!that->maps_ || !that->maps_->Equals(this->maps_)) {
+      return false;
+    }
+  } else if (that->maps_) {
+    return false;
+  }
   return true;
 }
 
@@ -372,6 +417,11 @@
       }
     }
   }
+
+  // Merge the information we have about the maps.
+  if (this->maps_) {
+    this->maps_ = that->maps_ ? that->maps_->Merge(this->maps_, zone) : nullptr;
+  }
 }
 
 Node* LoadElimination::AbstractState::LookupCheck(Node* node) const {
@@ -389,6 +439,35 @@
   return that;
 }
 
+bool LoadElimination::AbstractState::LookupMaps(
+    Node* object, ZoneHandleSet<Map>* object_map) const {
+  return this->maps_ && this->maps_->Lookup(object, object_map);
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::AddMaps(
+    Node* object, ZoneHandleSet<Map> maps, Zone* zone) const {
+  AbstractState* that = new (zone) AbstractState(*this);
+  if (that->maps_) {
+    that->maps_ = that->maps_->Extend(object, maps, zone);
+  } else {
+    that->maps_ = new (zone) AbstractMaps(object, maps, zone);
+  }
+  return that;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillMaps(
+    Node* object, Zone* zone) const {
+  if (this->maps_) {
+    AbstractMaps const* that_maps = this->maps_->Kill(object, zone);
+    if (this->maps_ != that_maps) {
+      AbstractState* that = new (zone) AbstractState(*this);
+      that->maps_ = that_maps;
+      return that;
+    }
+  }
+  return this;
+}
+
 Node* LoadElimination::AbstractState::LookupElement(Node* object,
                                                     Node* index) const {
   if (this->elements_) {
@@ -456,7 +535,7 @@
       AbstractField const* that_field = this_field->Kill(object, zone);
       if (that_field != this_field) {
         AbstractState* that = new (zone) AbstractState(*this);
-        that->fields_[i] = this_field;
+        that->fields_[i] = that_field;
         while (++i < arraysize(fields_)) {
           if (this->fields_[i] != nullptr) {
             that->fields_[i] = this->fields_[i]->Kill(object, zone);
@@ -481,6 +560,10 @@
     PrintF("   checks:\n");
     checks_->Print();
   }
+  if (maps_) {
+    PrintF("   maps:\n");
+    maps_->Print();
+  }
   if (elements_) {
     PrintF("   elements:\n");
     elements_->Print();
@@ -520,23 +603,18 @@
 }
 
 Reduction LoadElimination::ReduceCheckMaps(Node* node) {
+  ZoneHandleSet<Map> const maps = CheckMapsParametersOf(node->op()).maps();
   Node* const object = NodeProperties::GetValueInput(node, 0);
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  int const map_input_count = node->op()->ValueInputCount() - 1;
-  if (Node* const object_map =
-          state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
-    for (int i = 0; i < map_input_count; ++i) {
-      Node* map = NodeProperties::GetValueInput(node, 1 + i);
-      if (map == object_map) return Replace(effect);
-    }
+  ZoneHandleSet<Map> object_maps;
+  if (state->LookupMaps(object, &object_maps)) {
+    if (maps.contains(object_maps)) return Replace(effect);
+    state = state->KillMaps(object, zone());
+    // TODO(turbofan): Compute the intersection.
   }
-  if (map_input_count == 1) {
-    Node* const map0 = NodeProperties::GetValueInput(node, 1);
-    state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset), map0,
-                            zone());
-  }
+  state = state->AddMaps(object, maps, zone());
   return UpdateState(node, state);
 }
 
@@ -546,18 +624,16 @@
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
-  if (Node* const elements_map =
-          state->LookupField(elements, FieldIndexOf(HeapObject::kMapOffset))) {
     // Check if the {elements} already have the fixed array map.
-    if (elements_map == fixed_array_map) {
-      ReplaceWithValue(node, elements, effect);
-      return Replace(elements);
-    }
+  ZoneHandleSet<Map> elements_maps;
+  ZoneHandleSet<Map> fixed_array_maps(factory()->fixed_array_map());
+  if (state->LookupMaps(elements, &elements_maps) &&
+      fixed_array_maps.contains(elements_maps)) {
+    ReplaceWithValue(node, elements, effect);
+    return Replace(elements);
   }
   // We know that the resulting elements have the fixed array map.
-  state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
-                          fixed_array_map, zone());
+  state = state->AddMaps(node, fixed_array_maps, zone());
   // Kill the previous elements on {object}.
   state =
       state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
@@ -575,14 +651,12 @@
   if (state == nullptr) return NoChange();
   if (flags & GrowFastElementsFlag::kDoubleElements) {
     // We know that the resulting elements have the fixed double array map.
-    Node* fixed_double_array_map = jsgraph()->FixedDoubleArrayMapConstant();
-    state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
-                            fixed_double_array_map, zone());
+    state = state->AddMaps(
+        node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
   } else {
     // We know that the resulting elements have the fixed array map.
-    Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
-    state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
-                            fixed_array_map, zone());
+    state = state->AddMaps(
+        node, ZoneHandleSet<Map>(factory()->fixed_array_map()), zone());
   }
   if (flags & GrowFastElementsFlag::kArrayObject) {
     // Kill the previous Array::length on {object}.
@@ -599,31 +673,30 @@
 }
 
 Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
+  ElementsTransition transition = ElementsTransitionOf(node->op());
   Node* const object = NodeProperties::GetValueInput(node, 0);
-  Node* const source_map = NodeProperties::GetValueInput(node, 1);
-  Node* const target_map = NodeProperties::GetValueInput(node, 2);
+  Handle<Map> source_map(transition.source());
+  Handle<Map> target_map(transition.target());
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  if (Node* const object_map =
-          state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
-    if (target_map == object_map) {
+  ZoneHandleSet<Map> object_maps;
+  if (state->LookupMaps(object, &object_maps)) {
+    if (ZoneHandleSet<Map>(target_map).contains(object_maps)) {
       // The {object} already has the {target_map}, so this TransitionElements
       // {node} is fully redundant (independent of what {source_map} is).
       return Replace(effect);
     }
-    state =
-        state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
-    if (source_map == object_map) {
-      state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset),
-                              target_map, zone());
+    if (object_maps.contains(ZoneHandleSet<Map>(source_map))) {
+      object_maps.remove(source_map, zone());
+      object_maps.insert(target_map, zone());
+      state = state->KillMaps(object, zone());
+      state = state->AddMaps(object, object_maps, zone());
     }
   } else {
-    state =
-        state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
+    state = state->KillMaps(object, zone());
   }
-  ElementsTransition transition = ElementsTransitionOf(node->op());
-  switch (transition) {
+  switch (transition.mode()) {
     case ElementsTransition::kFastTransition:
       break;
     case ElementsTransition::kSlowTransition:
@@ -642,23 +715,40 @@
   Node* const control = NodeProperties::GetControlInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  int field_index = FieldIndexOf(access);
-  if (field_index >= 0) {
-    if (Node* replacement = state->LookupField(object, field_index)) {
-      // Make sure we don't resurrect dead {replacement} nodes.
-      if (!replacement->IsDead()) {
-        // We might need to guard the {replacement} if the type of the
-        // {node} is more precise than the type of the {replacement}.
-        Type* const node_type = NodeProperties::GetType(node);
-        if (!NodeProperties::GetType(replacement)->Is(node_type)) {
-          replacement = graph()->NewNode(common()->TypeGuard(node_type),
-                                         replacement, control);
-        }
-        ReplaceWithValue(node, replacement, effect);
-        return Replace(replacement);
-      }
+  if (access.offset == HeapObject::kMapOffset &&
+      access.base_is_tagged == kTaggedBase) {
+    DCHECK(IsAnyTagged(access.machine_type.representation()));
+    ZoneHandleSet<Map> object_maps;
+    if (state->LookupMaps(object, &object_maps) && object_maps.size() == 1) {
+      Node* value = jsgraph()->HeapConstant(object_maps[0]);
+      NodeProperties::SetType(value, Type::OtherInternal());
+      ReplaceWithValue(node, value, effect);
+      return Replace(value);
     }
-    state = state->AddField(object, field_index, node, zone());
+  } else {
+    int field_index = FieldIndexOf(access);
+    if (field_index >= 0) {
+      if (Node* replacement = state->LookupField(object, field_index)) {
+        // Make sure we don't resurrect dead {replacement} nodes.
+        if (!replacement->IsDead()) {
+          // We might need to guard the {replacement} if the type of the
+          // {node} is more precise than the type of the {replacement}.
+          Type* const node_type = NodeProperties::GetType(node);
+          if (!NodeProperties::GetType(replacement)->Is(node_type)) {
+            replacement = graph()->NewNode(common()->TypeGuard(node_type),
+                                           replacement, control);
+            NodeProperties::SetType(replacement, node_type);
+          }
+          ReplaceWithValue(node, replacement, effect);
+          return Replace(replacement);
+        }
+      }
+      state = state->AddField(object, field_index, node, zone());
+    }
+  }
+  Handle<Map> field_map;
+  if (access.map.ToHandle(&field_map)) {
+    state = state->AddMaps(node, ZoneHandleSet<Map>(field_map), zone());
   }
   return UpdateState(node, state);
 }
@@ -670,19 +760,33 @@
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  int field_index = FieldIndexOf(access);
-  if (field_index >= 0) {
-    Node* const old_value = state->LookupField(object, field_index);
-    if (old_value == new_value) {
-      // This store is fully redundant.
-      return Replace(effect);
+  if (access.offset == HeapObject::kMapOffset &&
+      access.base_is_tagged == kTaggedBase) {
+    DCHECK(IsAnyTagged(access.machine_type.representation()));
+    // Kill all potential knowledge about the {object}s map.
+    state = state->KillMaps(object, zone());
+    Type* const new_value_type = NodeProperties::GetType(new_value);
+    if (new_value_type->IsHeapConstant()) {
+      // Record the new {object} map information.
+      ZoneHandleSet<Map> object_maps(
+          Handle<Map>::cast(new_value_type->AsHeapConstant()->Value()));
+      state = state->AddMaps(object, object_maps, zone());
     }
-    // Kill all potentially aliasing fields and record the new value.
-    state = state->KillField(object, field_index, zone());
-    state = state->AddField(object, field_index, new_value, zone());
   } else {
-    // Unsupported StoreField operator.
-    state = state->KillFields(object, zone());
+    int field_index = FieldIndexOf(access);
+    if (field_index >= 0) {
+      Node* const old_value = state->LookupField(object, field_index);
+      if (old_value == new_value) {
+        // This store is fully redundant.
+        return Replace(effect);
+      }
+      // Kill all potentially aliasing fields and record the new value.
+      state = state->KillField(object, field_index, zone());
+      state = state->AddField(object, field_index, new_value, zone());
+    } else {
+      // Unsupported StoreField operator.
+      state = state->KillFields(object, zone());
+    }
   }
   return UpdateState(node, state);
 }
@@ -703,6 +807,7 @@
       if (!NodeProperties::GetType(replacement)->Is(node_type)) {
         replacement = graph()->NewNode(common()->TypeGuard(node_type),
                                        replacement, control);
+        NodeProperties::SetType(replacement, node_type);
       }
       ReplaceWithValue(node, replacement, effect);
       return Replace(replacement);
@@ -730,6 +835,9 @@
   // Only record the new value if the store doesn't have an implicit truncation.
   switch (access.machine_type.representation()) {
     case MachineRepresentation::kNone:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kBit:
       UNREACHABLE();
       break;
@@ -865,21 +973,31 @@
             break;
           }
           case IrOpcode::kTransitionElementsKind: {
+            ElementsTransition transition = ElementsTransitionOf(current->op());
             Node* const object = NodeProperties::GetValueInput(current, 0);
-            state = state->KillField(
-                object, FieldIndexOf(HeapObject::kMapOffset), zone());
-            state = state->KillField(
-                object, FieldIndexOf(JSObject::kElementsOffset), zone());
+            ZoneHandleSet<Map> object_maps;
+            if (!state->LookupMaps(object, &object_maps) ||
+                !ZoneHandleSet<Map>(transition.target())
+                     .contains(object_maps)) {
+              state = state->KillMaps(object, zone());
+              state = state->KillField(
+                  object, FieldIndexOf(JSObject::kElementsOffset), zone());
+            }
             break;
           }
           case IrOpcode::kStoreField: {
             FieldAccess const& access = FieldAccessOf(current->op());
             Node* const object = NodeProperties::GetValueInput(current, 0);
-            int field_index = FieldIndexOf(access);
-            if (field_index < 0) {
-              state = state->KillFields(object, zone());
+            if (access.offset == HeapObject::kMapOffset) {
+              // Invalidate what we know about the {object}s map.
+              state = state->KillMaps(object, zone());
             } else {
-              state = state->KillField(object, field_index, zone());
+              int field_index = FieldIndexOf(access);
+              if (field_index < 0) {
+                state = state->KillFields(object, zone());
+              } else {
+                state = state->KillField(object, field_index, zone());
+              }
             }
             break;
           }
@@ -911,7 +1029,8 @@
   DCHECK_EQ(0, offset % kPointerSize);
   int field_index = offset / kPointerSize;
   if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
-  return field_index;
+  DCHECK_LT(0, field_index);
+  return field_index - 1;
 }
 
 // static
@@ -921,6 +1040,9 @@
     case MachineRepresentation::kNone:
     case MachineRepresentation::kBit:
     case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
       UNREACHABLE();
       break;
     case MachineRepresentation::kWord32:
@@ -957,6 +1079,8 @@
 
 Graph* LoadElimination::graph() const { return jsgraph()->graph(); }
 
+Factory* LoadElimination::factory() const { return jsgraph()->factory(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index 50979e4..cd486a2 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -8,9 +8,14 @@
 #include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/globals.h"
+#include "src/zone/zone-handle-set.h"
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class Factory;
+
 namespace compiler {
 
 // Foward declarations.
@@ -152,6 +157,49 @@
 
   static size_t const kMaxTrackedFields = 32;
 
+  // Abstract state to approximate the current map of an object along the
+  // effect paths through the graph.
+  class AbstractMaps final : public ZoneObject {
+   public:
+    explicit AbstractMaps(Zone* zone) : info_for_node_(zone) {}
+    AbstractMaps(Node* object, ZoneHandleSet<Map> maps, Zone* zone)
+        : info_for_node_(zone) {
+      info_for_node_.insert(std::make_pair(object, maps));
+    }
+
+    AbstractMaps const* Extend(Node* object, ZoneHandleSet<Map> maps,
+                               Zone* zone) const {
+      AbstractMaps* that = new (zone) AbstractMaps(zone);
+      that->info_for_node_ = this->info_for_node_;
+      that->info_for_node_.insert(std::make_pair(object, maps));
+      return that;
+    }
+    bool Lookup(Node* object, ZoneHandleSet<Map>* object_maps) const;
+    AbstractMaps const* Kill(Node* object, Zone* zone) const;
+    bool Equals(AbstractMaps const* that) const {
+      return this == that || this->info_for_node_ == that->info_for_node_;
+    }
+    AbstractMaps const* Merge(AbstractMaps const* that, Zone* zone) const {
+      if (this->Equals(that)) return this;
+      AbstractMaps* copy = new (zone) AbstractMaps(zone);
+      for (auto this_it : this->info_for_node_) {
+        Node* this_object = this_it.first;
+        ZoneHandleSet<Map> this_maps = this_it.second;
+        auto that_it = that->info_for_node_.find(this_object);
+        if (that_it != that->info_for_node_.end() &&
+            that_it->second == this_maps) {
+          copy->info_for_node_.insert(this_it);
+        }
+      }
+      return copy;
+    }
+
+    void Print() const;
+
+   private:
+    ZoneMap<Node*, ZoneHandleSet<Map>> info_for_node_;
+  };
+
   class AbstractState final : public ZoneObject {
    public:
     AbstractState() {
@@ -163,6 +211,11 @@
     bool Equals(AbstractState const* that) const;
     void Merge(AbstractState const* that, Zone* zone);
 
+    AbstractState const* AddMaps(Node* object, ZoneHandleSet<Map> maps,
+                                 Zone* zone) const;
+    AbstractState const* KillMaps(Node* object, Zone* zone) const;
+    bool LookupMaps(Node* object, ZoneHandleSet<Map>* object_maps) const;
+
     AbstractState const* AddField(Node* object, size_t index, Node* value,
                                   Zone* zone) const;
     AbstractState const* KillField(Node* object, size_t index,
@@ -185,6 +238,7 @@
     AbstractChecks const* checks_ = nullptr;
     AbstractElements const* elements_ = nullptr;
     AbstractField const* fields_[kMaxTrackedFields];
+    AbstractMaps const* maps_ = nullptr;
   };
 
   class AbstractStateForEffectNodes final : public ZoneObject {
@@ -223,6 +277,7 @@
 
   CommonOperatorBuilder* common() const;
   AbstractState const* empty_state() const { return &empty_state_; }
+  Factory* factory() const;
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   Zone* zone() const { return node_states_.zone(); }
diff --git a/src/compiler/loop-variable-optimizer.cc b/src/compiler/loop-variable-optimizer.cc
index 55cce26..9bade27 100644
--- a/src/compiler/loop-variable-optimizer.cc
+++ b/src/compiler/loop-variable-optimizer.cc
@@ -303,9 +303,11 @@
   Node* initial = phi->InputAt(0);
   Node* arith = phi->InputAt(1);
   InductionVariable::ArithmeticType arithmeticType;
-  if (arith->opcode() == IrOpcode::kJSAdd) {
+  if (arith->opcode() == IrOpcode::kJSAdd ||
+      arith->opcode() == IrOpcode::kSpeculativeNumberAdd) {
     arithmeticType = InductionVariable::ArithmeticType::kAddition;
-  } else if (arith->opcode() == IrOpcode::kJSSubtract) {
+  } else if (arith->opcode() == IrOpcode::kJSSubtract ||
+             arith->opcode() == IrOpcode::kSpeculativeNumberSubtract) {
     arithmeticType = InductionVariable::ArithmeticType::kSubtraction;
   } else {
     return nullptr;
diff --git a/src/compiler/machine-graph-verifier.cc b/src/compiler/machine-graph-verifier.cc
index a8f7a25..2d5fce5 100644
--- a/src/compiler/machine-graph-verifier.cc
+++ b/src/compiler/machine-graph-verifier.cc
@@ -30,6 +30,10 @@
     Run();
   }
 
+  CallDescriptor* call_descriptor() const {
+    return linkage_->GetIncomingDescriptor();
+  }
+
   MachineRepresentation GetRepresentation(Node const* node) const {
     return representation_vector_.at(node->id());
   }
@@ -66,6 +70,18 @@
     }
   }
 
+  MachineRepresentation PromoteRepresentation(MachineRepresentation rep) {
+    switch (rep) {
+      case MachineRepresentation::kWord8:
+      case MachineRepresentation::kWord16:
+      case MachineRepresentation::kWord32:
+        return MachineRepresentation::kWord32;
+      default:
+        break;
+    }
+    return rep;
+  }
+
   void Run() {
     auto blocks = schedule_->all_blocks();
     for (BasicBlock* block : *blocks) {
@@ -82,6 +98,11 @@
                 linkage_->GetParameterType(ParameterIndexOf(node->op()))
                     .representation();
             break;
+          case IrOpcode::kReturn: {
+            representation_vector_[node->id()] = PromoteRepresentation(
+                linkage_->GetReturnType().representation());
+            break;
+          }
           case IrOpcode::kProjection: {
             representation_vector_[node->id()] = GetProjectionType(node);
           } break;
@@ -91,12 +112,12 @@
           case IrOpcode::kAtomicLoad:
           case IrOpcode::kLoad:
           case IrOpcode::kProtectedLoad:
-            representation_vector_[node->id()] =
-                LoadRepresentationOf(node->op()).representation();
+            representation_vector_[node->id()] = PromoteRepresentation(
+                LoadRepresentationOf(node->op()).representation());
             break;
           case IrOpcode::kCheckedLoad:
-            representation_vector_[node->id()] =
-                CheckedLoadRepresentationOf(node->op()).representation();
+            representation_vector_[node->id()] = PromoteRepresentation(
+                CheckedLoadRepresentationOf(node->op()).representation());
             break;
           case IrOpcode::kLoadStackPointer:
           case IrOpcode::kLoadFramePointer:
@@ -104,6 +125,10 @@
             representation_vector_[node->id()] =
                 MachineType::PointerRepresentation();
             break;
+          case IrOpcode::kUnalignedLoad:
+            representation_vector_[node->id()] = PromoteRepresentation(
+                UnalignedLoadRepresentationOf(node->op()).representation());
+            break;
           case IrOpcode::kPhi:
             representation_vector_[node->id()] =
                 PhiRepresentationOf(node->op());
@@ -119,9 +144,22 @@
             }
             break;
           }
-          case IrOpcode::kUnalignedLoad:
+          case IrOpcode::kAtomicStore:
             representation_vector_[node->id()] =
-                UnalignedLoadRepresentationOf(node->op()).representation();
+                PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
+            break;
+          case IrOpcode::kStore:
+          case IrOpcode::kProtectedStore:
+            representation_vector_[node->id()] = PromoteRepresentation(
+                StoreRepresentationOf(node->op()).representation());
+            break;
+          case IrOpcode::kCheckedStore:
+            representation_vector_[node->id()] =
+                PromoteRepresentation(CheckedStoreRepresentationOf(node->op()));
+            break;
+          case IrOpcode::kUnalignedStore:
+            representation_vector_[node->id()] = PromoteRepresentation(
+                UnalignedStoreRepresentationOf(node->op()));
             break;
           case IrOpcode::kHeapConstant:
           case IrOpcode::kNumberConstant:
@@ -170,6 +208,8 @@
           case IrOpcode::kTruncateFloat32ToUint32:
           case IrOpcode::kBitcastFloat32ToInt32:
           case IrOpcode::kInt32x4ExtractLane:
+          case IrOpcode::kInt16x8ExtractLane:
+          case IrOpcode::kInt8x16ExtractLane:
           case IrOpcode::kInt32Constant:
           case IrOpcode::kRelocatableInt32Constant:
           case IrOpcode::kTruncateFloat64ToWord32:
@@ -237,8 +277,12 @@
  public:
   MachineRepresentationChecker(
       Schedule const* const schedule,
-      MachineRepresentationInferrer const* const inferrer)
-      : schedule_(schedule), inferrer_(inferrer) {}
+      MachineRepresentationInferrer const* const inferrer, bool is_stub,
+      const char* name)
+      : schedule_(schedule),
+        inferrer_(inferrer),
+        is_stub_(is_stub),
+        name_(name) {}
 
   void Run() {
     BasicBlockVector const* blocks = schedule_->all_blocks();
@@ -290,9 +334,17 @@
             CheckValueInputForFloat64Op(node, 0);
             break;
           case IrOpcode::kWord64Equal:
-            CheckValueInputIsTaggedOrPointer(node, 0);
-            CheckValueInputRepresentationIs(
-                node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+            if (Is64()) {
+              CheckValueInputIsTaggedOrPointer(node, 0);
+              CheckValueInputIsTaggedOrPointer(node, 1);
+              if (!is_stub_) {
+                CheckValueInputRepresentationIs(
+                    node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+              }
+            } else {
+              CheckValueInputForInt64Op(node, 0);
+              CheckValueInputForInt64Op(node, 1);
+            }
             break;
           case IrOpcode::kInt64LessThan:
           case IrOpcode::kInt64LessThanOrEqual:
@@ -302,6 +354,8 @@
             CheckValueInputForInt64Op(node, 1);
             break;
           case IrOpcode::kInt32x4ExtractLane:
+          case IrOpcode::kInt16x8ExtractLane:
+          case IrOpcode::kInt8x16ExtractLane:
             CheckValueInputRepresentationIs(node, 0,
                                             MachineRepresentation::kSimd128);
             break;
@@ -317,6 +371,19 @@
             MACHINE_UNOP_32_LIST(LABEL) { CheckValueInputForInt32Op(node, 0); }
             break;
           case IrOpcode::kWord32Equal:
+            if (Is32()) {
+              CheckValueInputIsTaggedOrPointer(node, 0);
+              CheckValueInputIsTaggedOrPointer(node, 1);
+              if (!is_stub_) {
+                CheckValueInputRepresentationIs(
+                    node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+              }
+            } else {
+              CheckValueInputForInt32Op(node, 0);
+              CheckValueInputForInt32Op(node, 1);
+            }
+            break;
+
           case IrOpcode::kInt32LessThan:
           case IrOpcode::kInt32LessThanOrEqual:
           case IrOpcode::kUint32LessThan:
@@ -374,7 +441,7 @@
             CheckValueInputIsTaggedOrPointer(node, 0);
             CheckValueInputRepresentationIs(
                 node, 1, MachineType::PointerRepresentation());
-            switch (StoreRepresentationOf(node->op()).representation()) {
+            switch (inferrer_->GetRepresentation(node)) {
               case MachineRepresentation::kTagged:
               case MachineRepresentation::kTaggedPointer:
               case MachineRepresentation::kTaggedSigned:
@@ -382,15 +449,14 @@
                 break;
               default:
                 CheckValueInputRepresentationIs(
-                    node, 2,
-                    StoreRepresentationOf(node->op()).representation());
+                    node, 2, inferrer_->GetRepresentation(node));
             }
             break;
           case IrOpcode::kAtomicStore:
             CheckValueInputIsTaggedOrPointer(node, 0);
             CheckValueInputRepresentationIs(
                 node, 1, MachineType::PointerRepresentation());
-            switch (AtomicStoreRepresentationOf(node->op())) {
+            switch (inferrer_->GetRepresentation(node)) {
               case MachineRepresentation::kTagged:
               case MachineRepresentation::kTaggedPointer:
               case MachineRepresentation::kTaggedSigned:
@@ -398,7 +464,7 @@
                 break;
               default:
                 CheckValueInputRepresentationIs(
-                    node, 2, AtomicStoreRepresentationOf(node->op()));
+                    node, 2, inferrer_->GetRepresentation(node));
             }
             break;
           case IrOpcode::kPhi:
@@ -410,6 +476,11 @@
                   CheckValueInputIsTagged(node, i);
                 }
                 break;
+              case MachineRepresentation::kWord32:
+                for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+                  CheckValueInputForInt32Op(node, i);
+                }
+                break;
               default:
                 for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
                   CheckValueInputRepresentationIs(
@@ -422,9 +493,35 @@
           case IrOpcode::kSwitch:
             CheckValueInputForInt32Op(node, 0);
             break;
-          case IrOpcode::kReturn:
-            // TODO(epertoso): use the linkage to determine which tipe we
-            // should have here.
+          case IrOpcode::kReturn: {
+            // TODO(ishell): enable once the pop count parameter type becomes
+            // MachineType::PointerRepresentation(). Currently it's int32 or
+            // word-size.
+            // CheckValueInputRepresentationIs(
+            //     node, 0, MachineType::PointerRepresentation());  // Pop count
+            size_t return_count = inferrer_->call_descriptor()->ReturnCount();
+            for (size_t i = 0; i < return_count; i++) {
+              MachineType type = inferrer_->call_descriptor()->GetReturnType(i);
+              int input_index = static_cast<int>(i + 1);
+              switch (type.representation()) {
+                case MachineRepresentation::kTagged:
+                case MachineRepresentation::kTaggedPointer:
+                case MachineRepresentation::kTaggedSigned:
+                  CheckValueInputIsTagged(node, input_index);
+                  break;
+                case MachineRepresentation::kWord32:
+                  CheckValueInputForInt32Op(node, input_index);
+                  break;
+                default:
+                  CheckValueInputRepresentationIs(
+                      node, 2, inferrer_->GetRepresentation(node));
+              }
+              break;
+            }
+            break;
+          }
+          case IrOpcode::kThrow:
+            CheckValueInputIsTagged(node, 0);
             break;
           case IrOpcode::kTypedStateValues:
           case IrOpcode::kFrameState:
@@ -434,6 +531,7 @@
               std::stringstream str;
               str << "Node #" << node->id() << ":" << *node->op()
                   << " in the machine graph is not being checked.";
+              PrintDebugHelp(str, node);
               FATAL(str.str().c_str());
             }
             break;
@@ -443,6 +541,15 @@
   }
 
  private:
+  static bool Is32() {
+    return MachineType::PointerRepresentation() ==
+           MachineRepresentation::kWord32;
+  }
+  static bool Is64() {
+    return MachineType::PointerRepresentation() ==
+           MachineRepresentation::kWord64;
+  }
+
   void CheckValueInputRepresentationIs(Node const* node, int index,
                                        MachineRepresentation representation) {
     Node const* input = node->InputAt(index);
@@ -450,10 +557,11 @@
         inferrer_->GetRepresentation(input);
     if (input_representation != representation) {
       std::stringstream str;
-      str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
-          << MachineReprToString(input_representation) << " uses node #"
-          << input->id() << ":" << *input->op() << " which doesn't have a "
-          << MachineReprToString(representation) << " representation.";
+      str << "TypeError: node #" << node->id() << ":" << *node->op()
+          << " uses node #" << input->id() << ":" << *input->op() << ":"
+          << input_representation << " which doesn't have a " << representation
+          << " representation.";
+      PrintDebugHelp(str, node);
       FATAL(str.str().c_str());
     }
   }
@@ -472,6 +580,7 @@
     str << "TypeError: node #" << node->id() << ":" << *node->op()
         << " uses node #" << input->id() << ":" << *input->op()
         << " which doesn't have a tagged representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -482,6 +591,19 @@
       case MachineRepresentation::kTaggedPointer:
       case MachineRepresentation::kTaggedSigned:
         return;
+      case MachineRepresentation::kBit:
+      case MachineRepresentation::kWord8:
+      case MachineRepresentation::kWord16:
+      case MachineRepresentation::kWord32:
+        if (Is32()) {
+          return;
+        }
+        break;
+      case MachineRepresentation::kWord64:
+        if (Is64()) {
+          return;
+        }
+        break;
       default:
         break;
     }
@@ -491,6 +613,7 @@
       str << "TypeError: node #" << node->id() << ":" << *node->op()
           << " uses node #" << input->id() << ":" << *input->op()
           << " which doesn't have a tagged or pointer representation.";
+      PrintDebugHelp(str, node);
       FATAL(str.str().c_str());
     }
   }
@@ -507,6 +630,7 @@
         std::ostringstream str;
         str << "TypeError: node #" << input->id() << ":" << *input->op()
             << " is untyped.";
+        PrintDebugHelp(str, node);
         FATAL(str.str().c_str());
         break;
       }
@@ -517,6 +641,7 @@
     str << "TypeError: node #" << node->id() << ":" << *node->op()
         << " uses node #" << input->id() << ":" << *input->op()
         << " which doesn't have an int32-compatible representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -531,6 +656,7 @@
         std::ostringstream str;
         str << "TypeError: node #" << input->id() << ":" << *input->op()
             << " is untyped.";
+        PrintDebugHelp(str, node);
         FATAL(str.str().c_str());
         break;
       }
@@ -539,9 +665,11 @@
         break;
     }
     std::ostringstream str;
-    str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
-        << input_representation << " uses node #" << input->id() << ":"
-        << *input->op() << " which doesn't have a kWord64 representation.";
+    str << "TypeError: node #" << node->id() << ":" << *node->op()
+        << " uses node #" << input->id() << ":" << *input->op() << ":"
+        << input_representation
+        << " which doesn't have a kWord64 representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -555,6 +683,7 @@
     str << "TypeError: node #" << node->id() << ":" << *node->op()
         << " uses node #" << input->id() << ":" << *input->op()
         << " which doesn't have a kFloat32 representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -568,6 +697,7 @@
     str << "TypeError: node #" << node->id() << ":" << *node->op()
         << " uses node #" << input->id() << ":" << *input->op()
         << " which doesn't have a kFloat64 representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -590,11 +720,11 @@
           str << std::endl;
         }
         str << " * input " << i << " (" << input->id() << ":" << *input->op()
-            << ") doesn't have a " << MachineReprToString(expected_input_type)
-            << " representation.";
+            << ") doesn't have a " << expected_input_type << " representation.";
       }
     }
     if (should_log_error) {
+      PrintDebugHelp(str, node);
       FATAL(str.str().c_str());
     }
   }
@@ -640,6 +770,9 @@
       case MachineRepresentation::kFloat32:
       case MachineRepresentation::kFloat64:
       case MachineRepresentation::kSimd128:
+      case MachineRepresentation::kSimd1x4:
+      case MachineRepresentation::kSimd1x8:
+      case MachineRepresentation::kSimd1x16:
       case MachineRepresentation::kBit:
       case MachineRepresentation::kWord8:
       case MachineRepresentation::kWord16:
@@ -657,17 +790,28 @@
     return false;
   }
 
+  void PrintDebugHelp(std::ostream& out, Node const* node) {
+    if (DEBUG_BOOL) {
+      out << "\n#\n# Specify option --csa-trap-on-node=" << name_ << ","
+          << node->id() << " for debugging.";
+    }
+  }
+
   Schedule const* const schedule_;
   MachineRepresentationInferrer const* const inferrer_;
+  bool is_stub_;
+  const char* name_;
 };
 
 }  // namespace
 
 void MachineGraphVerifier::Run(Graph* graph, Schedule const* const schedule,
-                               Linkage* linkage, Zone* temp_zone) {
+                               Linkage* linkage, bool is_stub, const char* name,
+                               Zone* temp_zone) {
   MachineRepresentationInferrer representation_inferrer(schedule, graph,
                                                         linkage, temp_zone);
-  MachineRepresentationChecker checker(schedule, &representation_inferrer);
+  MachineRepresentationChecker checker(schedule, &representation_inferrer,
+                                       is_stub, name);
   checker.Run();
 }
 
diff --git a/src/compiler/machine-graph-verifier.h b/src/compiler/machine-graph-verifier.h
index b7d7b61..26e5d77 100644
--- a/src/compiler/machine-graph-verifier.h
+++ b/src/compiler/machine-graph-verifier.h
@@ -21,7 +21,8 @@
 class MachineGraphVerifier {
  public:
   static void Run(Graph* graph, Schedule const* const schedule,
-                  Linkage* linkage, Zone* temp_zone);
+                  Linkage* linkage, bool is_stub, const char* name,
+                  Zone* temp_zone);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 0ad20f0..a50f0dc 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -12,14 +12,15 @@
 #include "src/compiler/graph.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph)
-    : jsgraph_(jsgraph) {}
-
+MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph,
+                                               bool allow_signalling_nan)
+    : jsgraph_(jsgraph), allow_signalling_nan_(allow_signalling_nan) {}
 
 MachineOperatorReducer::~MachineOperatorReducer() {}
 
@@ -50,12 +51,12 @@
 Node* MachineOperatorReducer::Float64PowHalf(Node* value) {
   value =
       graph()->NewNode(machine()->Float64Add(), Float64Constant(0.0), value);
-  return graph()->NewNode(
-      common()->Select(MachineRepresentation::kFloat64, BranchHint::kFalse),
-      graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
-                       Float64Constant(-V8_INFINITY)),
-      Float64Constant(V8_INFINITY),
-      graph()->NewNode(machine()->Float64Sqrt(), value));
+  Diamond d(graph(), common(),
+            graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
+                             Float64Constant(-V8_INFINITY)),
+            BranchHint::kFalse);
+  return d.Phi(MachineRepresentation::kFloat64, Float64Constant(V8_INFINITY),
+               graph()->NewNode(machine()->Float64Sqrt(), value));
 }
 
 Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
@@ -316,19 +317,22 @@
     }
     case IrOpcode::kFloat32Sub: {
       Float32BinopMatcher m(node);
-      if (m.right().Is(0) && (copysign(1.0, m.right().Value()) > 0)) {
+      if (allow_signalling_nan_ && m.right().Is(0) &&
+          (copysign(1.0, m.right().Value()) > 0)) {
         return Replace(m.left().node());  // x - 0 => x
       }
       if (m.right().IsNaN()) {  // x - NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat32(m.right().Value() - m.right().Value());
       }
       if (m.left().IsNaN()) {  // NaN - x => NaN
-        return Replace(m.left().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat32(m.left().Value() - m.left().Value());
       }
       if (m.IsFoldable()) {  // L - R => (L - R)
         return ReplaceFloat32(m.left().Value() - m.right().Value());
       }
-      if (m.left().IsMinusZero()) {
+      if (allow_signalling_nan_ && m.left().IsMinusZero()) {
         // -0.0 - round_down(-0.0 - R) => round_up(R)
         if (machine()->Float32RoundUp().IsSupported() &&
             m.right().IsFloat32RoundDown()) {
@@ -350,7 +354,8 @@
     case IrOpcode::kFloat64Add: {
       Float64BinopMatcher m(node);
       if (m.right().IsNaN()) {  // x + NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.right().Value() - m.right().Value());
       }
       if (m.IsFoldable()) {  // K + K => K
         return ReplaceFloat64(m.left().Value() + m.right().Value());
@@ -359,19 +364,22 @@
     }
     case IrOpcode::kFloat64Sub: {
       Float64BinopMatcher m(node);
-      if (m.right().Is(0) && (Double(m.right().Value()).Sign() > 0)) {
+      if (allow_signalling_nan_ && m.right().Is(0) &&
+          (Double(m.right().Value()).Sign() > 0)) {
         return Replace(m.left().node());  // x - 0 => x
       }
       if (m.right().IsNaN()) {  // x - NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.right().Value() - m.right().Value());
       }
       if (m.left().IsNaN()) {  // NaN - x => NaN
-        return Replace(m.left().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.left().Value() - m.left().Value());
       }
       if (m.IsFoldable()) {  // L - R => (L - R)
         return ReplaceFloat64(m.left().Value() - m.right().Value());
       }
-      if (m.left().IsMinusZero()) {
+      if (allow_signalling_nan_ && m.left().IsMinusZero()) {
         // -0.0 - round_down(-0.0 - R) => round_up(R)
         if (machine()->Float64RoundUp().IsSupported() &&
             m.right().IsFloat64RoundDown()) {
@@ -392,15 +400,17 @@
     }
     case IrOpcode::kFloat64Mul: {
       Float64BinopMatcher m(node);
+      if (allow_signalling_nan_ && m.right().Is(1))
+        return Replace(m.left().node());  // x * 1.0 => x
       if (m.right().Is(-1)) {  // x * -1.0 => -0.0 - x
         node->ReplaceInput(0, Float64Constant(-0.0));
         node->ReplaceInput(1, m.left().node());
         NodeProperties::ChangeOp(node, machine()->Float64Sub());
         return Changed(node);
       }
-      if (m.right().Is(1)) return Replace(m.left().node());  // x * 1.0 => x
       if (m.right().IsNaN()) {                               // x * NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.right().Value() - m.right().Value());
       }
       if (m.IsFoldable()) {  // K * K => K
         return ReplaceFloat64(m.left().Value() * m.right().Value());
@@ -414,17 +424,21 @@
     }
     case IrOpcode::kFloat64Div: {
       Float64BinopMatcher m(node);
-      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1.0 => x
+      if (allow_signalling_nan_ && m.right().Is(1))
+        return Replace(m.left().node());  // x / 1.0 => x
+      // TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
       if (m.right().IsNaN()) {                               // x / NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.right().Value() - m.right().Value());
       }
       if (m.left().IsNaN()) {  // NaN / x => NaN
-        return Replace(m.left().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.left().Value() - m.left().Value());
       }
       if (m.IsFoldable()) {  // K / K => K
         return ReplaceFloat64(m.left().Value() / m.right().Value());
       }
-      if (m.right().Is(-1)) {  // x / -1.0 => -x
+      if (allow_signalling_nan_ && m.right().Is(-1)) {  // x / -1.0 => -x
         node->RemoveInput(1);
         NodeProperties::ChangeOp(node, machine()->Float64Neg());
         return Changed(node);
@@ -593,7 +607,13 @@
     }
     case IrOpcode::kChangeFloat32ToFloat64: {
       Float32Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceFloat64(m.Value());
+      if (m.HasValue()) {
+        if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+          // Do some calculation to make guarantee the value is a quiet NaN.
+          return ReplaceFloat64(m.Value() + m.Value());
+        }
+        return ReplaceFloat64(m.Value());
+      }
       break;
     }
     case IrOpcode::kChangeFloat64ToInt32: {
@@ -642,8 +662,15 @@
     }
     case IrOpcode::kTruncateFloat64ToFloat32: {
       Float64Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value()));
-      if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
+      if (m.HasValue()) {
+        if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+          // Do some calculation to make guarantee the value is a quiet NaN.
+          return ReplaceFloat32(DoubleToFloat32(m.Value() + m.Value()));
+        }
+        return ReplaceFloat32(DoubleToFloat32(m.Value()));
+      }
+      if (allow_signalling_nan_ && m.IsChangeFloat32ToFloat64())
+        return Replace(m.node()->InputAt(0));
       break;
     }
     case IrOpcode::kRoundFloat64ToInt32: {
@@ -664,6 +691,8 @@
     case IrOpcode::kFloat64LessThan:
     case IrOpcode::kFloat64LessThanOrEqual:
       return ReduceFloat64Compare(node);
+    case IrOpcode::kFloat64RoundDown:
+      return ReduceFloat64RoundDown(node);
     default:
       break;
   }
@@ -841,14 +870,13 @@
     if (base::bits::IsPowerOfTwo32(divisor)) {
       uint32_t const mask = divisor - 1;
       Node* const zero = Int32Constant(0);
-      node->ReplaceInput(
-          0, graph()->NewNode(machine()->Int32LessThan(), dividend, zero));
-      node->ReplaceInput(
-          1, Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)));
-      node->ReplaceInput(2, Word32And(dividend, mask));
-      NodeProperties::ChangeOp(
-          node,
-          common()->Select(MachineRepresentation::kWord32, BranchHint::kFalse));
+      Diamond d(graph(), common(),
+                graph()->NewNode(machine()->Int32LessThan(), dividend, zero),
+                BranchHint::kFalse);
+      return Replace(
+          d.Phi(MachineRepresentation::kWord32,
+                Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)),
+                Word32And(dividend, mask)));
     } else {
       Node* quotient = Int32Div(dividend, divisor);
       DCHECK_EQ(dividend, node->InputAt(0));
@@ -1153,8 +1181,9 @@
     if (m.left().IsWord32Shl()) {
       Uint32BinopMatcher mleft(m.left().node());
       if (mleft.right().HasValue() &&
-          mleft.right().Value() >= base::bits::CountTrailingZeros32(mask)) {
-        // (x << L) & (-1 << K) => x << L iff K >= L
+          (mleft.right().Value() & 0x1f) >=
+              base::bits::CountTrailingZeros32(mask)) {
+        // (x << L) & (-1 << K) => x << L iff L >= K
         return Replace(mleft.node());
       }
     } else if (m.left().IsInt32Add()) {
@@ -1392,6 +1421,14 @@
   return NoChange();
 }
 
+Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
+  DCHECK_EQ(IrOpcode::kFloat64RoundDown, node->opcode());
+  Float64Matcher m(node->InputAt(0));
+  if (m.HasValue()) {
+    return ReplaceFloat64(Floor(m.Value()));
+  }
+  return NoChange();
+}
 
 CommonOperatorBuilder* MachineOperatorReducer::common() const {
   return jsgraph()->common();
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index d0845d9..593f7f2 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -24,7 +24,8 @@
 class V8_EXPORT_PRIVATE MachineOperatorReducer final
     : public NON_EXPORTED_BASE(Reducer) {
  public:
-  explicit MachineOperatorReducer(JSGraph* jsgraph);
+  explicit MachineOperatorReducer(JSGraph* jsgraph,
+                                  bool allow_signalling_nan = true);
   ~MachineOperatorReducer();
 
   Reduction Reduce(Node* node) override;
@@ -96,6 +97,7 @@
   Reduction ReduceFloat64InsertLowWord32(Node* node);
   Reduction ReduceFloat64InsertHighWord32(Node* node);
   Reduction ReduceFloat64Compare(Node* node);
+  Reduction ReduceFloat64RoundDown(Node* node);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -103,6 +105,7 @@
   MachineOperatorBuilder* machine() const;
 
   JSGraph* jsgraph_;
+  bool allow_signalling_nan_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index e36a61e..854c22e 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -43,7 +43,8 @@
 
 
 StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
-  DCHECK_EQ(IrOpcode::kStore, op->opcode());
+  DCHECK(IrOpcode::kStore == op->opcode() ||
+         IrOpcode::kProtectedStore == op->opcode());
   return OpParameter<StoreRepresentation>(op);
 }
 
@@ -69,9 +70,9 @@
   return OpParameter<CheckedStoreRepresentation>(op);
 }
 
-MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
+int StackSlotSizeOf(Operator const* op) {
   DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
-  return OpParameter<MachineRepresentation>(op);
+  return OpParameter<int>(op);
 }
 
 MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
@@ -129,7 +130,6 @@
   V(Word32Clz, Operator::kNoProperties, 1, 0, 1)                           \
   V(Word64Clz, Operator::kNoProperties, 1, 0, 1)                           \
   V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1)                 \
-  V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1)                 \
   V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1)           \
   V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1)             \
   V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)              \
@@ -220,8 +220,6 @@
   V(Word32PairShr, Operator::kNoProperties, 3, 0, 2)                       \
   V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)                       \
   V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1)                     \
-  V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                \
-  V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                \
   V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1)                        \
   V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1)                        \
   V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1)                       \
@@ -241,57 +239,36 @@
   V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
   V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                \
   V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)         \
-  V(Float32x4Select, Operator::kNoProperties, 3, 0, 1)                     \
-  V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                    \
-  V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                    \
   V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1)                \
   V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1)               \
   V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1)                       \
-  V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
   V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1)                          \
   V(Int32x4Add, Operator::kCommutative, 2, 0, 1)                           \
   V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1)                          \
   V(Int32x4Mul, Operator::kCommutative, 2, 0, 1)                           \
   V(Int32x4Min, Operator::kCommutative, 2, 0, 1)                           \
   V(Int32x4Max, Operator::kCommutative, 2, 0, 1)                           \
-  V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)            \
-  V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)           \
   V(Int32x4Equal, Operator::kCommutative, 2, 0, 1)                         \
   V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1)                      \
   V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1)                     \
   V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
   V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
   V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
-  V(Int32x4Select, Operator::kNoProperties, 3, 0, 1)                       \
-  V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                      \
-  V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                      \
   V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                \
   V(Uint32x4Min, Operator::kCommutative, 2, 0, 1)                          \
   V(Uint32x4Max, Operator::kCommutative, 2, 0, 1)                          \
-  V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)           \
-  V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)          \
   V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1)                    \
   V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
   V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)          \
   V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)               \
-  V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1)                      \
-  V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                 \
   V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1)                         \
   V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1)                     \
   V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1)                     \
-  V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                     \
-  V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                     \
-  V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1)                        \
-  V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1)                     \
   V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1)                       \
-  V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
   V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1)                          \
   V(Int16x8Add, Operator::kCommutative, 2, 0, 1)                           \
   V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                   \
@@ -300,43 +277,27 @@
   V(Int16x8Mul, Operator::kCommutative, 2, 0, 1)                           \
   V(Int16x8Min, Operator::kCommutative, 2, 0, 1)                           \
   V(Int16x8Max, Operator::kCommutative, 2, 0, 1)                           \
-  V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)            \
-  V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)           \
   V(Int16x8Equal, Operator::kCommutative, 2, 0, 1)                         \
   V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1)                      \
   V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1)                     \
   V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
   V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
   V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
-  V(Int16x8Select, Operator::kNoProperties, 3, 0, 1)                       \
-  V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                      \
-  V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                     \
   V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                  \
   V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint16x8Min, Operator::kCommutative, 2, 0, 1)                          \
   V(Uint16x8Max, Operator::kCommutative, 2, 0, 1)                          \
-  V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)           \
-  V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)          \
   V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1)                    \
   V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
   V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)          \
-  V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1)                      \
-  V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                 \
   V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1)                         \
   V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1)                     \
   V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1)                     \
-  V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                     \
-  V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                    \
-  V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1)                        \
-  V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1)                     \
   V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1)                      \
-  V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
   V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1)                          \
   V(Int8x16Add, Operator::kCommutative, 2, 0, 1)                           \
   V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                   \
@@ -345,40 +306,26 @@
   V(Int8x16Mul, Operator::kCommutative, 2, 0, 1)                           \
   V(Int8x16Min, Operator::kCommutative, 2, 0, 1)                           \
   V(Int8x16Max, Operator::kCommutative, 2, 0, 1)                           \
-  V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)            \
-  V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)           \
   V(Int8x16Equal, Operator::kCommutative, 2, 0, 1)                         \
   V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1)                      \
   V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1)                     \
   V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
   V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
   V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
-  V(Int8x16Select, Operator::kNoProperties, 3, 0, 1)                       \
-  V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                     \
-  V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                     \
   V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                  \
   V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint8x16Min, Operator::kCommutative, 2, 0, 1)                          \
   V(Uint8x16Max, Operator::kCommutative, 2, 0, 1)                          \
-  V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)           \
-  V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)          \
   V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1)                    \
   V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
   V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)          \
-  V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1)                     \
-  V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                 \
   V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1)                         \
   V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1)                     \
   V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1)                     \
-  V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                    \
-  V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                    \
-  V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1)                        \
-  V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1)                     \
   V(Simd128Load, Operator::kNoProperties, 2, 0, 1)                         \
   V(Simd128Load1, Operator::kNoProperties, 2, 0, 1)                        \
   V(Simd128Load2, Operator::kNoProperties, 2, 0, 1)                        \
@@ -390,7 +337,10 @@
   V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
   V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
-  V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
+  V(Simd128Not, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Simd32x4Select, Operator::kNoProperties, 3, 0, 1)                      \
+  V(Simd16x8Select, Operator::kNoProperties, 3, 0, 1)                      \
+  V(Simd8x16Select, Operator::kNoProperties, 3, 0, 1)
 
 #define PURE_OPTIONAL_OP_LIST(V)                            \
   V(Word32Ctz, Operator::kNoProperties, 1, 0, 1)            \
@@ -460,6 +410,26 @@
   V(kWord16)                          \
   V(kWord32)
 
+#define SIMD_LANE_OP_LIST(V) \
+  V(Float32x4, 4)            \
+  V(Int32x4, 4)              \
+  V(Int16x8, 8)              \
+  V(Int8x16, 16)
+
+#define SIMD_FORMAT_LIST(V) \
+  V(32x4, 32)               \
+  V(16x8, 16)               \
+  V(8x16, 8)
+
+#define STACK_SLOT_CACHED_SIZES_LIST(V) V(4) V(8) V(16)
+
+struct StackSlotOperator : public Operator1<int> {
+  explicit StackSlotOperator(int size)
+      : Operator1<int>(IrOpcode::kStackSlot,
+                       Operator::kNoDeopt | Operator::kNoThrow, "StackSlot", 0,
+                       0, 0, 1, 0, 0, size) {}
+};
+
 struct MachineOperatorGlobalCache {
 #define PURE(Name, properties, value_input_count, control_input_count,         \
              output_count)                                                     \
@@ -485,56 +455,51 @@
   OVERFLOW_OP_LIST(OVERFLOW_OP)
 #undef OVERFLOW_OP
 
-#define LOAD(Type)                                                           \
-  struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
-    Load##Type##Operator()                                                   \
-        : Operator1<LoadRepresentation>(                                     \
-              IrOpcode::kLoad,                                               \
-              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
-              "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}              \
-  };                                                                         \
-  struct UnalignedLoad##Type##Operator final                                 \
-      : public Operator1<UnalignedLoadRepresentation> {                      \
-    UnalignedLoad##Type##Operator()                                          \
-        : Operator1<UnalignedLoadRepresentation>(                            \
-              IrOpcode::kUnalignedLoad,                                      \
-              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
-              "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}     \
-  };                                                                         \
-  struct CheckedLoad##Type##Operator final                                   \
-      : public Operator1<CheckedLoadRepresentation> {                        \
-    CheckedLoad##Type##Operator()                                            \
-        : Operator1<CheckedLoadRepresentation>(                              \
-              IrOpcode::kCheckedLoad,                                        \
-              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
-              "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {}       \
-  };                                                                         \
-  struct ProtectedLoad##Type##Operator final                                 \
-      : public Operator1<ProtectedLoadRepresentation> {                      \
-    ProtectedLoad##Type##Operator()                                          \
-        : Operator1<ProtectedLoadRepresentation>(                            \
-              IrOpcode::kProtectedLoad,                                      \
-              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
-              "ProtectedLoad", 4, 1, 1, 1, 1, 0, MachineType::Type()) {}     \
-  };                                                                         \
-  Load##Type##Operator kLoad##Type;                                          \
-  UnalignedLoad##Type##Operator kUnalignedLoad##Type;                        \
-  CheckedLoad##Type##Operator kCheckedLoad##Type;                            \
+#define LOAD(Type)                                                            \
+  struct Load##Type##Operator final : public Operator1<LoadRepresentation> {  \
+    Load##Type##Operator()                                                    \
+        : Operator1<LoadRepresentation>(                                      \
+              IrOpcode::kLoad,                                                \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,   \
+              "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}               \
+  };                                                                          \
+  struct UnalignedLoad##Type##Operator final                                  \
+      : public Operator1<UnalignedLoadRepresentation> {                       \
+    UnalignedLoad##Type##Operator()                                           \
+        : Operator1<UnalignedLoadRepresentation>(                             \
+              IrOpcode::kUnalignedLoad,                                       \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,   \
+              "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}      \
+  };                                                                          \
+  struct CheckedLoad##Type##Operator final                                    \
+      : public Operator1<CheckedLoadRepresentation> {                         \
+    CheckedLoad##Type##Operator()                                             \
+        : Operator1<CheckedLoadRepresentation>(                               \
+              IrOpcode::kCheckedLoad,                                         \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,   \
+              "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {}        \
+  };                                                                          \
+  struct ProtectedLoad##Type##Operator final                                  \
+      : public Operator1<LoadRepresentation> {                                \
+    ProtectedLoad##Type##Operator()                                           \
+        : Operator1<LoadRepresentation>(                                      \
+              IrOpcode::kProtectedLoad,                                       \
+              Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 3, 1, \
+              1, 1, 1, 0, MachineType::Type()) {}                             \
+  };                                                                          \
+  Load##Type##Operator kLoad##Type;                                           \
+  UnalignedLoad##Type##Operator kUnalignedLoad##Type;                         \
+  CheckedLoad##Type##Operator kCheckedLoad##Type;                             \
   ProtectedLoad##Type##Operator kProtectedLoad##Type;
   MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
 
-#define STACKSLOT(Type)                                                      \
-  struct StackSlot##Type##Operator final                                     \
-      : public Operator1<MachineRepresentation> {                            \
-    StackSlot##Type##Operator()                                              \
-        : Operator1<MachineRepresentation>(                                  \
-              IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow, \
-              "StackSlot", 0, 0, 0, 1, 0, 0,                                 \
-              MachineType::Type().representation()) {}                       \
-  };                                                                         \
-  StackSlot##Type##Operator kStackSlot##Type;
-  MACHINE_TYPE_LIST(STACKSLOT)
+#define STACKSLOT(Size)                                                     \
+  struct StackSlotOfSize##Size##Operator final : public StackSlotOperator { \
+    StackSlotOfSize##Size##Operator() : StackSlotOperator(Size) {}          \
+  };                                                                        \
+  StackSlotOfSize##Size##Operator kStackSlotSize##Size;
+  STACK_SLOT_CACHED_SIZES_LIST(STACKSLOT)
 #undef STACKSLOT
 
 #define STORE(Type)                                                            \
@@ -585,13 +550,24 @@
               "CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
     }                                                                          \
   };                                                                           \
+  struct ProtectedStore##Type##Operator                                        \
+      : public Operator1<StoreRepresentation> {                                \
+    explicit ProtectedStore##Type##Operator()                                  \
+        : Operator1<StoreRepresentation>(                                      \
+              IrOpcode::kProtectedStore,                                       \
+              Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,     \
+              "Store", 4, 1, 1, 0, 1, 0,                                       \
+              StoreRepresentation(MachineRepresentation::Type,                 \
+                                  kNoWriteBarrier)) {}                         \
+  };                                                                           \
   Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier;          \
   Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier;        \
   Store##Type##PointerWriteBarrier##Operator                                   \
       kStore##Type##PointerWriteBarrier;                                       \
   Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier;      \
   UnalignedStore##Type##Operator kUnalignedStore##Type;                        \
-  CheckedStore##Type##Operator kCheckedStore##Type;
+  CheckedStore##Type##Operator kCheckedStore##Type;                            \
+  ProtectedStore##Type##Operator kProtectedStore##Type;
   MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
 
@@ -621,6 +597,19 @@
   ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
 #undef STORE
 
+  // The {BitcastWordToTagged} operator must not be marked as pure (especially
+  // not idempotent), because otherwise the splitting logic in the Scheduler
+  // might decide to split these operators, thus potentially creating live
+  // ranges of allocation top across calls or other things that might allocate.
+  // See https://bugs.chromium.org/p/v8/issues/detail?id=6059 for more details.
+  struct BitcastWordToTaggedOperator : public Operator {
+    BitcastWordToTaggedOperator()
+        : Operator(IrOpcode::kBitcastWordToTagged,
+                   Operator::kEliminatable | Operator::kNoWrite,
+                   "BitcastWordToTagged", 1, 0, 0, 1, 0, 0) {}
+  };
+  BitcastWordToTaggedOperator kBitcastWordToTagged;
+
   struct DebugBreakOperator : public Operator {
     DebugBreakOperator()
         : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
@@ -678,6 +667,9 @@
     MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
     case MachineRepresentation::kBit:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       break;
   }
@@ -726,15 +718,21 @@
   return nullptr;
 }
 
-const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
-#define STACKSLOT(Type)                              \
-  if (rep == MachineType::Type().representation()) { \
-    return &cache_.kStackSlot##Type;                 \
+const Operator* MachineOperatorBuilder::StackSlot(int size) {
+  DCHECK_LE(0, size);
+#define CASE_CACHED_SIZE(Size) \
+  case Size:                   \
+    return &cache_.kStackSlotSize##Size;
+  switch (size) {
+    STACK_SLOT_CACHED_SIZES_LIST(CASE_CACHED_SIZE);
+    default:
+      return new (zone_) StackSlotOperator(size);
   }
-  MACHINE_TYPE_LIST(STACKSLOT)
-#undef STACKSLOT
-  UNREACHABLE();
-  return nullptr;
+#undef CASE_CACHED_SIZE
+}
+
+const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
+  return StackSlot(1 << ElementSizeLog2Of(rep));
 }
 
 const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
@@ -755,6 +753,29 @@
     MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
     case MachineRepresentation::kBit:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
+    case MachineRepresentation::kNone:
+      break;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::ProtectedStore(
+    MachineRepresentation rep) {
+  switch (rep) {
+#define STORE(kRep)                       \
+  case MachineRepresentation::kRep:       \
+    return &cache_.kProtectedStore##kRep; \
+    break;
+    MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
+    case MachineRepresentation::kBit:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       break;
   }
@@ -766,6 +787,10 @@
   return &cache_.kUnsafePointerAdd;
 }
 
+const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
+  return &cache_.kBitcastWordToTagged;
+}
+
 const Operator* MachineOperatorBuilder::DebugBreak() {
   return &cache_.kDebugBreak;
 }
@@ -796,6 +821,9 @@
     MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
     case MachineRepresentation::kBit:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       break;
   }
@@ -825,6 +853,60 @@
   return nullptr;
 }
 
+#define SIMD_LANE_OPS(Type, lane_count)                                     \
+  const Operator* MachineOperatorBuilder::Type##ExtractLane(                \
+      int32_t lane_index) {                                                 \
+    DCHECK(0 <= lane_index && lane_index < lane_count);                     \
+    return new (zone_)                                                      \
+        Operator1<int32_t>(IrOpcode::k##Type##ExtractLane, Operator::kPure, \
+                           "Extract lane", 1, 0, 0, 1, 0, 0, lane_index);   \
+  }                                                                         \
+  const Operator* MachineOperatorBuilder::Type##ReplaceLane(                \
+      int32_t lane_index) {                                                 \
+    DCHECK(0 <= lane_index && lane_index < lane_count);                     \
+    return new (zone_)                                                      \
+        Operator1<int32_t>(IrOpcode::k##Type##ReplaceLane, Operator::kPure, \
+                           "Replace lane", 2, 0, 0, 1, 0, 0, lane_index);   \
+  }
+SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
+#undef SIMD_LANE_OPS
+
+#define SIMD_SHIFT_OPS(format, bits)                                        \
+  const Operator* MachineOperatorBuilder::Int##format##ShiftLeftByScalar(   \
+      int32_t shift) {                                                      \
+    DCHECK(0 <= shift && shift < bits);                                     \
+    return new (zone_) Operator1<int32_t>(                                  \
+        IrOpcode::kInt##format##ShiftLeftByScalar, Operator::kPure,         \
+        "Shift left", 1, 0, 0, 1, 0, 0, shift);                             \
+  }                                                                         \
+  const Operator* MachineOperatorBuilder::Int##format##ShiftRightByScalar(  \
+      int32_t shift) {                                                      \
+    DCHECK(0 < shift && shift <= bits);                                     \
+    return new (zone_) Operator1<int32_t>(                                  \
+        IrOpcode::kInt##format##ShiftRightByScalar, Operator::kPure,        \
+        "Arithmetic shift right", 1, 0, 0, 1, 0, 0, shift);                 \
+  }                                                                         \
+  const Operator* MachineOperatorBuilder::Uint##format##ShiftRightByScalar( \
+      int32_t shift) {                                                      \
+    DCHECK(0 <= shift && shift < bits);                                     \
+    return new (zone_) Operator1<int32_t>(                                  \
+        IrOpcode::kUint##format##ShiftRightByScalar, Operator::kPure,       \
+        "Shift right", 1, 0, 0, 1, 0, 0, shift);                            \
+  }
+SIMD_FORMAT_LIST(SIMD_SHIFT_OPS)
+#undef SIMD_SHIFT_OPS
+
+// TODO(bbudge) Add Shuffle, DCHECKs based on format.
+#define SIMD_PERMUTE_OPS(format, bits)                                         \
+  const Operator* MachineOperatorBuilder::Simd##format##Swizzle(               \
+      uint32_t swizzle) {                                                      \
+    return new (zone_)                                                         \
+        Operator1<uint32_t>(IrOpcode::kSimd##format##Swizzle, Operator::kPure, \
+                            "Swizzle", 2, 0, 0, 1, 0, 0, swizzle);             \
+  }
+SIMD_FORMAT_LIST(SIMD_PERMUTE_OPS)
+#undef SIMD_PERMUTE_OPS
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 1cbec99..0558279 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -43,7 +43,6 @@
 
 // A Load needs a MachineType.
 typedef MachineType LoadRepresentation;
-typedef LoadRepresentation ProtectedLoadRepresentation;
 
 LoadRepresentation LoadRepresentationOf(Operator const*);
 
@@ -94,7 +93,7 @@
 
 CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
 
-MachineRepresentation StackSlotRepresentationOf(Operator const* op);
+int StackSlotSizeOf(Operator const* op);
 
 MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
 
@@ -427,8 +426,8 @@
 
   // SIMD operators.
   const Operator* CreateFloat32x4();
-  const Operator* Float32x4ExtractLane();
-  const Operator* Float32x4ReplaceLane();
+  const Operator* Float32x4ExtractLane(int32_t);
+  const Operator* Float32x4ReplaceLane(int32_t);
   const Operator* Float32x4Abs();
   const Operator* Float32x4Neg();
   const Operator* Float32x4Sqrt();
@@ -448,61 +447,47 @@
   const Operator* Float32x4LessThanOrEqual();
   const Operator* Float32x4GreaterThan();
   const Operator* Float32x4GreaterThanOrEqual();
-  const Operator* Float32x4Select();
-  const Operator* Float32x4Swizzle();
-  const Operator* Float32x4Shuffle();
   const Operator* Float32x4FromInt32x4();
   const Operator* Float32x4FromUint32x4();
 
   const Operator* CreateInt32x4();
-  const Operator* Int32x4ExtractLane();
-  const Operator* Int32x4ReplaceLane();
+  const Operator* Int32x4ExtractLane(int32_t);
+  const Operator* Int32x4ReplaceLane(int32_t);
   const Operator* Int32x4Neg();
   const Operator* Int32x4Add();
   const Operator* Int32x4Sub();
   const Operator* Int32x4Mul();
   const Operator* Int32x4Min();
   const Operator* Int32x4Max();
-  const Operator* Int32x4ShiftLeftByScalar();
-  const Operator* Int32x4ShiftRightByScalar();
+  const Operator* Int32x4ShiftLeftByScalar(int32_t);
+  const Operator* Int32x4ShiftRightByScalar(int32_t);
   const Operator* Int32x4Equal();
   const Operator* Int32x4NotEqual();
   const Operator* Int32x4LessThan();
   const Operator* Int32x4LessThanOrEqual();
   const Operator* Int32x4GreaterThan();
   const Operator* Int32x4GreaterThanOrEqual();
-  const Operator* Int32x4Select();
-  const Operator* Int32x4Swizzle();
-  const Operator* Int32x4Shuffle();
   const Operator* Int32x4FromFloat32x4();
 
   const Operator* Uint32x4Min();
   const Operator* Uint32x4Max();
-  const Operator* Uint32x4ShiftLeftByScalar();
-  const Operator* Uint32x4ShiftRightByScalar();
+  const Operator* Uint32x4ShiftRightByScalar(int32_t);
   const Operator* Uint32x4LessThan();
   const Operator* Uint32x4LessThanOrEqual();
   const Operator* Uint32x4GreaterThan();
   const Operator* Uint32x4GreaterThanOrEqual();
   const Operator* Uint32x4FromFloat32x4();
 
-  const Operator* CreateBool32x4();
-  const Operator* Bool32x4ExtractLane();
-  const Operator* Bool32x4ReplaceLane();
   const Operator* Bool32x4And();
   const Operator* Bool32x4Or();
   const Operator* Bool32x4Xor();
   const Operator* Bool32x4Not();
   const Operator* Bool32x4AnyTrue();
   const Operator* Bool32x4AllTrue();
-  const Operator* Bool32x4Swizzle();
-  const Operator* Bool32x4Shuffle();
-  const Operator* Bool32x4Equal();
-  const Operator* Bool32x4NotEqual();
 
   const Operator* CreateInt16x8();
-  const Operator* Int16x8ExtractLane();
-  const Operator* Int16x8ReplaceLane();
+  const Operator* Int16x8ExtractLane(int32_t);
+  const Operator* Int16x8ReplaceLane(int32_t);
   const Operator* Int16x8Neg();
   const Operator* Int16x8Add();
   const Operator* Int16x8AddSaturate();
@@ -511,46 +496,35 @@
   const Operator* Int16x8Mul();
   const Operator* Int16x8Min();
   const Operator* Int16x8Max();
-  const Operator* Int16x8ShiftLeftByScalar();
-  const Operator* Int16x8ShiftRightByScalar();
+  const Operator* Int16x8ShiftLeftByScalar(int32_t);
+  const Operator* Int16x8ShiftRightByScalar(int32_t);
   const Operator* Int16x8Equal();
   const Operator* Int16x8NotEqual();
   const Operator* Int16x8LessThan();
   const Operator* Int16x8LessThanOrEqual();
   const Operator* Int16x8GreaterThan();
   const Operator* Int16x8GreaterThanOrEqual();
-  const Operator* Int16x8Select();
-  const Operator* Int16x8Swizzle();
-  const Operator* Int16x8Shuffle();
 
   const Operator* Uint16x8AddSaturate();
   const Operator* Uint16x8SubSaturate();
   const Operator* Uint16x8Min();
   const Operator* Uint16x8Max();
-  const Operator* Uint16x8ShiftLeftByScalar();
-  const Operator* Uint16x8ShiftRightByScalar();
+  const Operator* Uint16x8ShiftRightByScalar(int32_t);
   const Operator* Uint16x8LessThan();
   const Operator* Uint16x8LessThanOrEqual();
   const Operator* Uint16x8GreaterThan();
   const Operator* Uint16x8GreaterThanOrEqual();
 
-  const Operator* CreateBool16x8();
-  const Operator* Bool16x8ExtractLane();
-  const Operator* Bool16x8ReplaceLane();
   const Operator* Bool16x8And();
   const Operator* Bool16x8Or();
   const Operator* Bool16x8Xor();
   const Operator* Bool16x8Not();
   const Operator* Bool16x8AnyTrue();
   const Operator* Bool16x8AllTrue();
-  const Operator* Bool16x8Swizzle();
-  const Operator* Bool16x8Shuffle();
-  const Operator* Bool16x8Equal();
-  const Operator* Bool16x8NotEqual();
 
   const Operator* CreateInt8x16();
-  const Operator* Int8x16ExtractLane();
-  const Operator* Int8x16ReplaceLane();
+  const Operator* Int8x16ExtractLane(int32_t);
+  const Operator* Int8x16ReplaceLane(int32_t);
   const Operator* Int8x16Neg();
   const Operator* Int8x16Add();
   const Operator* Int8x16AddSaturate();
@@ -559,42 +533,31 @@
   const Operator* Int8x16Mul();
   const Operator* Int8x16Min();
   const Operator* Int8x16Max();
-  const Operator* Int8x16ShiftLeftByScalar();
-  const Operator* Int8x16ShiftRightByScalar();
+  const Operator* Int8x16ShiftLeftByScalar(int32_t);
+  const Operator* Int8x16ShiftRightByScalar(int32_t);
   const Operator* Int8x16Equal();
   const Operator* Int8x16NotEqual();
   const Operator* Int8x16LessThan();
   const Operator* Int8x16LessThanOrEqual();
   const Operator* Int8x16GreaterThan();
   const Operator* Int8x16GreaterThanOrEqual();
-  const Operator* Int8x16Select();
-  const Operator* Int8x16Swizzle();
-  const Operator* Int8x16Shuffle();
 
   const Operator* Uint8x16AddSaturate();
   const Operator* Uint8x16SubSaturate();
   const Operator* Uint8x16Min();
   const Operator* Uint8x16Max();
-  const Operator* Uint8x16ShiftLeftByScalar();
-  const Operator* Uint8x16ShiftRightByScalar();
+  const Operator* Uint8x16ShiftRightByScalar(int32_t);
   const Operator* Uint8x16LessThan();
   const Operator* Uint8x16LessThanOrEqual();
   const Operator* Uint8x16GreaterThan();
   const Operator* Uint8x16GreaterThanOrEqual();
 
-  const Operator* CreateBool8x16();
-  const Operator* Bool8x16ExtractLane();
-  const Operator* Bool8x16ReplaceLane();
   const Operator* Bool8x16And();
   const Operator* Bool8x16Or();
   const Operator* Bool8x16Xor();
   const Operator* Bool8x16Not();
   const Operator* Bool8x16AnyTrue();
   const Operator* Bool8x16AllTrue();
-  const Operator* Bool8x16Swizzle();
-  const Operator* Bool8x16Shuffle();
-  const Operator* Bool8x16Equal();
-  const Operator* Bool8x16NotEqual();
 
   const Operator* Simd128Load();
   const Operator* Simd128Load1();
@@ -608,6 +571,15 @@
   const Operator* Simd128Or();
   const Operator* Simd128Xor();
   const Operator* Simd128Not();
+  const Operator* Simd32x4Select();
+  const Operator* Simd32x4Swizzle(uint32_t);
+  const Operator* Simd32x4Shuffle();
+  const Operator* Simd16x8Select();
+  const Operator* Simd16x8Swizzle(uint32_t);
+  const Operator* Simd16x8Shuffle();
+  const Operator* Simd8x16Select();
+  const Operator* Simd8x16Swizzle(uint32_t);
+  const Operator* Simd8x16Shuffle();
 
   // load [base + index]
   const Operator* Load(LoadRepresentation rep);
@@ -615,6 +587,7 @@
 
   // store [base + index], value
   const Operator* Store(StoreRepresentation rep);
+  const Operator* ProtectedStore(MachineRepresentation rep);
 
   // unaligned load [base + index]
   const Operator* UnalignedLoad(UnalignedLoadRepresentation rep);
@@ -622,6 +595,7 @@
   // unaligned store [base + index], value
   const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
 
+  const Operator* StackSlot(int size);
   const Operator* StackSlot(MachineRepresentation rep);
 
   // Access to the machine stack.
diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc
index 66fcbb9..7e9a522 100644
--- a/src/compiler/memory-optimizer.cc
+++ b/src/compiler/memory-optimizer.cc
@@ -20,7 +20,8 @@
       empty_state_(AllocationState::Empty(zone)),
       pending_(zone),
       tokens_(zone),
-      zone_(zone) {}
+      zone_(zone),
+      graph_assembler_(jsgraph, nullptr, nullptr, zone) {}
 
 void MemoryOptimizer::Optimize() {
   EnqueueUses(graph()->start(), empty_state());
@@ -91,7 +92,9 @@
     case IrOpcode::kDeoptimizeUnless:
     case IrOpcode::kIfException:
     case IrOpcode::kLoad:
+    case IrOpcode::kProtectedLoad:
     case IrOpcode::kStore:
+    case IrOpcode::kProtectedStore:
     case IrOpcode::kRetain:
     case IrOpcode::kUnsafePointerAdd:
       return VisitOtherEffect(node, state);
@@ -101,12 +104,17 @@
   DCHECK_EQ(0, node->op()->EffectOutputCount());
 }
 
+#define __ gasm()->
+
 void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
   DCHECK_EQ(IrOpcode::kAllocate, node->opcode());
   Node* value;
   Node* size = node->InputAt(0);
   Node* effect = node->InputAt(1);
   Node* control = node->InputAt(2);
+
+  gasm()->Reset(effect, control);
+
   PretenureFlag pretenure = PretenureFlagOf(node->op());
 
   // Propagate tenuring from outer allocations to inner allocations, i.e.
@@ -141,11 +149,11 @@
   }
 
   // Determine the top/limit addresses.
-  Node* top_address = jsgraph()->ExternalConstant(
+  Node* top_address = __ ExternalConstant(
       pretenure == NOT_TENURED
           ? ExternalReference::new_space_allocation_top_address(isolate())
           : ExternalReference::old_space_allocation_top_address(isolate()));
-  Node* limit_address = jsgraph()->ExternalConstant(
+  Node* limit_address = __ ExternalConstant(
       pretenure == NOT_TENURED
           ? ExternalReference::new_space_allocation_limit_address(isolate())
           : ExternalReference::old_space_allocation_limit_address(isolate()));
@@ -171,89 +179,69 @@
 
       // Update the allocation top with the new object allocation.
       // TODO(bmeurer): Defer writing back top as much as possible.
-      Node* top = graph()->NewNode(machine()->IntAdd(), state->top(),
-                                   jsgraph()->IntPtrConstant(object_size));
-      effect = graph()->NewNode(
-          machine()->Store(StoreRepresentation(
-              MachineType::PointerRepresentation(), kNoWriteBarrier)),
-          top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+      Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size));
+      __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+                                   kNoWriteBarrier),
+               top_address, __ IntPtrConstant(0), top);
 
       // Compute the effective inner allocated address.
-      value = graph()->NewNode(
-          machine()->BitcastWordToTagged(),
-          graph()->NewNode(machine()->IntAdd(), state->top(),
-                           jsgraph()->IntPtrConstant(kHeapObjectTag)));
+      value = __ BitcastWordToTagged(
+          __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
 
       // Extend the allocation {group}.
       group->Add(value);
       state = AllocationState::Open(group, state_size, top, zone());
     } else {
+      auto call_runtime = __ MakeDeferredLabel<1>();
+      auto done = __ MakeLabel<2>(MachineType::PointerRepresentation());
+
       // Setup a mutable reservation size node; will be patched as we fold
       // additional allocations into this new group.
-      Node* size = graph()->NewNode(common()->Int32Constant(object_size));
+      Node* size = __ UniqueInt32Constant(object_size);
 
       // Load allocation top and limit.
-      Node* top = effect =
-          graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
-                           jsgraph()->IntPtrConstant(0), effect, control);
-      Node* limit = effect = graph()->NewNode(
-          machine()->Load(MachineType::Pointer()), limit_address,
-          jsgraph()->IntPtrConstant(0), effect, control);
+      Node* top =
+          __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+      Node* limit =
+          __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
 
       // Check if we need to collect garbage before we can start bump pointer
       // allocation (always done for folded allocations).
-      Node* check = graph()->NewNode(
-          machine()->UintLessThan(),
-          graph()->NewNode(
-              machine()->IntAdd(), top,
-              machine()->Is64()
-                  ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
-                  : size),
+      Node* check = __ UintLessThan(
+          __ IntAdd(top,
+                    machine()->Is64() ? __ ChangeInt32ToInt64(size) : size),
           limit);
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
 
-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-      Node* etrue = effect;
-      Node* vtrue = top;
+      __ GotoUnless(check, &call_runtime);
+      __ Goto(&done, top);
 
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* efalse = effect;
-      Node* vfalse;
+      __ Bind(&call_runtime);
       {
-        Node* target = pretenure == NOT_TENURED
-                           ? jsgraph()->AllocateInNewSpaceStubConstant()
-                           : jsgraph()->AllocateInOldSpaceStubConstant();
+        Node* target =
+            pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
+                                     : __
+                                       AllocateInOldSpaceStubConstant();
         if (!allocate_operator_.is_set()) {
           CallDescriptor* descriptor =
               Linkage::GetAllocateCallDescriptor(graph()->zone());
           allocate_operator_.set(common()->Call(descriptor));
         }
-        vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target,
-                                           size, efalse, if_false);
-        vfalse = graph()->NewNode(machine()->IntSub(), vfalse,
-                                  jsgraph()->IntPtrConstant(kHeapObjectTag));
+        Node* vfalse = __ Call(allocate_operator_.get(), target, size);
+        vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
+        __ Goto(&done, vfalse);
       }
 
-      control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-      effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-      value = graph()->NewNode(
-          common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse,
-          control);
+      __ Bind(&done);
 
       // Compute the new top and write it back.
-      top = graph()->NewNode(machine()->IntAdd(), value,
-                             jsgraph()->IntPtrConstant(object_size));
-      effect = graph()->NewNode(
-          machine()->Store(StoreRepresentation(
-              MachineType::PointerRepresentation(), kNoWriteBarrier)),
-          top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+      top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
+      __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+                                   kNoWriteBarrier),
+               top_address, __ IntPtrConstant(0), top);
 
       // Compute the initial object address.
-      value = graph()->NewNode(
-          machine()->BitcastWordToTagged(),
-          graph()->NewNode(machine()->IntAdd(), value,
-                           jsgraph()->IntPtrConstant(kHeapObjectTag)));
+      value = __ BitcastWordToTagged(
+          __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
 
       // Start a new allocation group.
       AllocationGroup* group =
@@ -261,61 +249,42 @@
       state = AllocationState::Open(group, object_size, top, zone());
     }
   } else {
+    auto call_runtime = __ MakeDeferredLabel<1>();
+    auto done = __ MakeLabel<2>(MachineRepresentation::kTaggedPointer);
+
     // Load allocation top and limit.
-    Node* top = effect =
-        graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
-                         jsgraph()->IntPtrConstant(0), effect, control);
-    Node* limit = effect =
-        graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address,
-                         jsgraph()->IntPtrConstant(0), effect, control);
+    Node* top =
+        __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+    Node* limit =
+        __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
 
     // Compute the new top.
-    Node* new_top = graph()->NewNode(
-        machine()->IntAdd(), top,
-        machine()->Is64()
-            ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
-            : size);
+    Node* new_top =
+        __ IntAdd(top, machine()->Is64() ? __ ChangeInt32ToInt64(size) : size);
 
     // Check if we can do bump pointer allocation here.
-    Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit);
-    Node* branch =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+    Node* check = __ UintLessThan(new_top, limit);
+    __ GotoUnless(check, &call_runtime);
+    __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+                                 kNoWriteBarrier),
+             top_address, __ IntPtrConstant(0), new_top);
+    __ Goto(&done, __ BitcastWordToTagged(
+                       __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
 
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* etrue = effect;
-    Node* vtrue;
-    {
-      etrue = graph()->NewNode(
-          machine()->Store(StoreRepresentation(
-              MachineType::PointerRepresentation(), kNoWriteBarrier)),
-          top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true);
-      vtrue = graph()->NewNode(
-          machine()->BitcastWordToTagged(),
-          graph()->NewNode(machine()->IntAdd(), top,
-                           jsgraph()->IntPtrConstant(kHeapObjectTag)));
+    __ Bind(&call_runtime);
+    Node* target =
+        pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
+                                 : __
+                                   AllocateInOldSpaceStubConstant();
+    if (!allocate_operator_.is_set()) {
+      CallDescriptor* descriptor =
+          Linkage::GetAllocateCallDescriptor(graph()->zone());
+      allocate_operator_.set(common()->Call(descriptor));
     }
+    __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
 
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* efalse = effect;
-    Node* vfalse;
-    {
-      Node* target = pretenure == NOT_TENURED
-                         ? jsgraph()->AllocateInNewSpaceStubConstant()
-                         : jsgraph()->AllocateInOldSpaceStubConstant();
-      if (!allocate_operator_.is_set()) {
-        CallDescriptor* descriptor =
-            Linkage::GetAllocateCallDescriptor(graph()->zone());
-        allocate_operator_.set(common()->Call(descriptor));
-      }
-      vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size,
-                                         efalse, if_false);
-    }
-
-    control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-    effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-    value = graph()->NewNode(
-        common()->Phi(MachineRepresentation::kTaggedPointer, 2), vtrue, vfalse,
-        control);
+    __ Bind(&done);
+    value = done.PhiAt(0);
 
     // Create an unfoldable allocation group.
     AllocationGroup* group =
@@ -323,6 +292,10 @@
     state = AllocationState::Closed(group, zone());
   }
 
+  effect = __ ExtractCurrentEffect();
+  control = __ ExtractCurrentControl();
+  USE(control);  // Floating control, dropped on the floor.
+
   // Replace all effect uses of {node} with the {effect}, enqueue the
   // effect uses for further processing, and replace all value uses of
   // {node} with the {value}.
@@ -340,6 +313,8 @@
   node->Kill();
 }
 
+#undef __
+
 void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
   DCHECK_EQ(IrOpcode::kCall, node->opcode());
   // If the call can allocate, we start with a fresh state.
diff --git a/src/compiler/memory-optimizer.h b/src/compiler/memory-optimizer.h
index ba1d6dd..1541d22 100644
--- a/src/compiler/memory-optimizer.h
+++ b/src/compiler/memory-optimizer.h
@@ -5,6 +5,7 @@
 #ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
 #define V8_COMPILER_MEMORY_OPTIMIZER_H_
 
+#include "src/compiler/graph-assembler.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -131,6 +132,7 @@
   CommonOperatorBuilder* common() const;
   MachineOperatorBuilder* machine() const;
   Zone* zone() const { return zone_; }
+  GraphAssembler* gasm() { return &graph_assembler_; }
 
   SetOncePointer<const Operator> allocate_operator_;
   JSGraph* const jsgraph_;
@@ -138,6 +140,7 @@
   ZoneMap<NodeId, AllocationStates> pending_;
   ZoneQueue<Token> tokens_;
   Zone* const zone_;
+  GraphAssembler graph_assembler_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
 };
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 0a62b52..db4b529 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -270,6 +270,26 @@
   bool must_save_lr_;
 };
 
+#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T)                 \
+  class ool_name final : public OutOfLineCode {                      \
+   public:                                                           \
+    ool_name(CodeGenerator* gen, T dst, T src1, T src2)              \
+        : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+                                                                     \
+    void Generate() final { __ masm_ool_name(dst_, src1_, src2_); }  \
+                                                                     \
+   private:                                                          \
+    T const dst_;                                                    \
+    T const src1_;                                                   \
+    T const src2_;                                                   \
+  }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister);
+
+#undef CREATE_OOL_CLASS
 
 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
   switch (condition) {
@@ -542,7 +562,7 @@
   // Check if current frame is an arguments adaptor frame.
   __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
   __ Branch(&done, ne, scratch1,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Load arguments count from current arguments adaptor frame (note, it
   // does not include receiver).
@@ -712,10 +732,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1132,36 +1150,24 @@
                i.InputDoubleRegister(1));
       break;
     case kMipsMaddS:
-      __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
-                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2),
+                kScratchDoubleReg);
       break;
     case kMipsMaddD:
-      __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
-      break;
-    case kMipsMaddfS:
-      __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
-                 i.InputFloatRegister(2));
-      break;
-    case kMipsMaddfD:
-      __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(2));
+      __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+                kScratchDoubleReg);
       break;
     case kMipsMsubS:
-      __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
-                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2),
+                kScratchDoubleReg);
       break;
     case kMipsMsubD:
-      __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
-      break;
-    case kMipsMsubfS:
-      __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
-                 i.InputFloatRegister(2));
-      break;
-    case kMipsMsubfD:
-      __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(2));
+      __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+                kScratchDoubleReg);
       break;
     case kMipsMulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
@@ -1239,47 +1245,39 @@
       break;
     }
     case kMipsFloat32Max: {
-      Label compare_nan, done_compare;
-      __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
-                       i.InputSingleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputSingleRegister(),
-              std::numeric_limits<float>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputSingleRegister();
+      FPURegister src1 = i.InputSingleRegister(0);
+      FPURegister src2 = i.InputSingleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
+      __ Float32Max(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMipsFloat64Max: {
-      Label compare_nan, done_compare;
-      __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                       i.InputDoubleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputDoubleRegister(),
-              std::numeric_limits<double>::quiet_NaN());
-      __ bind(&done_compare);
+      DoubleRegister dst = i.OutputDoubleRegister();
+      DoubleRegister src1 = i.InputDoubleRegister(0);
+      DoubleRegister src2 = i.InputDoubleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
+      __ Float64Max(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMipsFloat32Min: {
-      Label compare_nan, done_compare;
-      __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
-                       i.InputSingleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputSingleRegister(),
-              std::numeric_limits<float>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputSingleRegister();
+      FPURegister src1 = i.InputSingleRegister(0);
+      FPURegister src2 = i.InputSingleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
+      __ Float32Min(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMipsFloat64Min: {
-      Label compare_nan, done_compare;
-      __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                       i.InputDoubleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputDoubleRegister(),
-              std::numeric_limits<double>::quiet_NaN());
-      __ bind(&done_compare);
+      DoubleRegister dst = i.OutputDoubleRegister();
+      DoubleRegister src1 = i.InputDoubleRegister(0);
+      DoubleRegister src2 = i.InputDoubleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
+      __ Float64Min(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMipsCvtSD: {
@@ -1628,12 +1626,12 @@
   return false;
 }
 
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+                            Instruction* instr, FlagsCondition condition,
+                            Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ masm->
 
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  MipsOperandConverter i(this, instr);
-  Label* tlabel = branch->true_label;
-  Label* flabel = branch->false_label;
   Condition cc = kNoCondition;
   // MIPS does not have condition code flags, so compare and branch are
   // implemented differently than on the other arch's. The compare operations
@@ -1642,12 +1640,13 @@
   // registers to compare pseudo-op are not modified before this branch op, as
   // they are tested here.
 
+  MipsOperandConverter i(gen, instr);
   if (instr->arch_opcode() == kMipsTst) {
-    cc = FlagsConditionToConditionTst(branch->condition);
+    cc = FlagsConditionToConditionTst(condition);
     __ And(at, i.InputRegister(0), i.InputOperand(1));
     __ Branch(tlabel, cc, at, Operand(zero_reg));
   } else if (instr->arch_opcode() == kMipsAddOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
                         i.InputOperand(1), tlabel, flabel);
@@ -1657,11 +1656,11 @@
                         i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+        UNSUPPORTED_COND(kMipsAddOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMipsSubOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
                         i.InputOperand(1), tlabel, flabel);
@@ -1671,11 +1670,11 @@
                         i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+        UNSUPPORTED_COND(kMipsAddOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMipsMulOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
                         i.InputOperand(1), tlabel, flabel);
@@ -1685,15 +1684,15 @@
                         i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMipsMulOvf, branch->condition);
+        UNSUPPORTED_COND(kMipsMulOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMipsCmp) {
-    cc = FlagsConditionToConditionCmp(branch->condition);
+    cc = FlagsConditionToConditionCmp(condition);
     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
   } else if (instr->arch_opcode() == kMipsCmpS) {
-    if (!convertCondition(branch->condition, cc)) {
-      UNSUPPORTED_COND(kMips64CmpS, branch->condition);
+    if (!convertCondition(condition, cc)) {
+      UNSUPPORTED_COND(kMips64CmpS, condition);
     }
     FPURegister left = i.InputOrZeroSingleRegister(0);
     FPURegister right = i.InputOrZeroSingleRegister(1);
@@ -1703,8 +1702,8 @@
     }
     __ BranchF32(tlabel, nullptr, cc, left, right);
   } else if (instr->arch_opcode() == kMipsCmpD) {
-    if (!convertCondition(branch->condition, cc)) {
-      UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+    if (!convertCondition(condition, cc)) {
+      UNSUPPORTED_COND(kMips64CmpD, condition);
     }
     FPURegister left = i.InputOrZeroDoubleRegister(0);
     FPURegister right = i.InputOrZeroDoubleRegister(1);
@@ -1718,7 +1717,17 @@
            instr->arch_opcode());
     UNIMPLEMENTED();
   }
-  if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+  if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
+#undef __
+#define __ masm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+                         branch->fallthru);
 }
 
 
@@ -1726,6 +1735,68 @@
   if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      MipsOperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1915,13 +1986,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2080,9 +2154,7 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmReference(src.rmode())) {
             __ li(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
             __ li(dst, Operand(src.ToInt32()));
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 45ed041..edff56f 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -71,12 +71,8 @@
   V(MipsMulPair)                   \
   V(MipsMaddS)                     \
   V(MipsMaddD)                     \
-  V(MipsMaddfS)                    \
-  V(MipsMaddfD)                    \
   V(MipsMsubS)                     \
   V(MipsMsubD)                     \
-  V(MipsMsubfS)                    \
-  V(MipsMsubfD)                    \
   V(MipsFloat32RoundDown)          \
   V(MipsFloat32RoundTruncate)      \
   V(MipsFloat32RoundUp)            \
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 1e4b996..d0ceac1 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -173,10 +173,9 @@
                         &inputs[1])) {
     inputs[0] = g.UseRegister(m.left().node());
     input_count++;
-  }
-  if (has_reverse_opcode &&
-      TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
-                        &input_count, &inputs[1])) {
+  } else if (has_reverse_opcode &&
+             TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+                               &input_count, &inputs[1])) {
     inputs[0] = g.UseRegister(m.right().node());
     opcode = reverse_opcode;
     input_count++;
@@ -188,6 +187,8 @@
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.TempImmediate(cont->trap_id());
   }
 
   if (cont->IsDeoptimize()) {
@@ -210,7 +211,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -263,6 +264,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -348,6 +352,9 @@
         break;
       case MachineRepresentation::kWord64:   // Fall through.
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -368,6 +375,10 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitWord32And(Node* node) {
   MipsOperandGenerator g(this);
@@ -394,9 +405,13 @@
         // zeros.
         if (lsb + mask_width > 32) mask_width = 32 - lsb;
 
-        Emit(kMipsExt, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
-             g.TempImmediate(mask_width));
+        if (lsb == 0 && mask_width == 32) {
+          Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+        } else {
+          Emit(kMipsExt, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+               g.TempImmediate(mask_width));
+        }
         return;
       }
       // Other cases fall through to the normal And operation.
@@ -652,7 +667,7 @@
   if (m.right().opcode() == IrOpcode::kWord32Shl &&
       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
     Int32BinopMatcher mright(m.right().node());
-    if (mright.right().HasValue()) {
+    if (mright.right().HasValue() && !m.left().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
@@ -664,7 +679,7 @@
   if (m.left().opcode() == IrOpcode::kWord32Shl &&
       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
     Int32BinopMatcher mleft(m.left().node());
-    if (mleft.right().HasValue()) {
+    if (mleft.right().HasValue() && !m.right().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
            g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
@@ -900,35 +915,23 @@
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
   MipsOperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
-    // For Add.S(Mul.S(x, y), z):
-    Float32BinopMatcher mleft(m.left().node());
-    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+  if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+    Float32BinopMatcher m(node);
+    if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+      // For Add.S(Mul.S(x, y), z):
+      Float32BinopMatcher mleft(m.left().node());
       Emit(kMipsMaddS, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
            g.UseRegister(mleft.right().node()));
       return;
-    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.S(z, x, y).
-      Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
-           g.UseRegister(mleft.right().node()));
-      return;
     }
-  }
-  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
-    // For Add.S(x, Mul.S(y, z)):
-    Float32BinopMatcher mright(m.right().node());
-    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(x, y, z).
+    if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+      // For Add.S(x, Mul.S(y, z)):
+      Float32BinopMatcher mright(m.right().node());
       Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(mright.left().node()),
            g.UseRegister(mright.right().node()));
       return;
-    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.S(x, y, z).
-      Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
     }
   }
   VisitRRR(this, kMipsAddS, node);
@@ -937,35 +940,23 @@
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
   MipsOperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    // For Add.D(Mul.D(x, y), z):
-    Float64BinopMatcher mleft(m.left().node());
-    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.D(z, x, y).
+  if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+    Float64BinopMatcher m(node);
+    if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+      // For Add.D(Mul.D(x, y), z):
+      Float64BinopMatcher mleft(m.left().node());
       Emit(kMipsMaddD, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
            g.UseRegister(mleft.right().node()));
       return;
-    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.D(z, x, y).
-      Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
-           g.UseRegister(mleft.right().node()));
-      return;
     }
-  }
-  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    // For Add.D(x, Mul.D(y, z)):
-    Float64BinopMatcher mright(m.right().node());
-    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.D(x, y, z).
+    if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+      // For Add.D(x, Mul.D(y, z)):
+      Float64BinopMatcher mright(m.right().node());
       Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(mright.left().node()),
            g.UseRegister(mright.right().node()));
       return;
-    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.D(x, y, z).
-      Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
     }
   }
   VisitRRR(this, kMipsAddD, node);
@@ -974,9 +965,9 @@
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
   MipsOperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
-    if (IsMipsArchVariant(kMips32r2)) {
+  if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+    Float32BinopMatcher m(node);
+    if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
       Float32BinopMatcher mleft(m.left().node());
       Emit(kMipsMsubS, g.DefineAsRegister(node),
@@ -984,24 +975,15 @@
            g.UseRegister(mleft.right().node()));
       return;
     }
-  } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
-    if (IsMipsArchVariant(kMips32r6)) {
-      // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
-      Float32BinopMatcher mright(m.right().node());
-      Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
-    }
   }
   VisitRRR(this, kMipsSubS, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   MipsOperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    if (IsMipsArchVariant(kMips32r2)) {
+  if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+    Float64BinopMatcher m(node);
+    if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
       Float64BinopMatcher mleft(m.left().node());
       Emit(kMipsMsubD, g.DefineAsRegister(node),
@@ -1009,15 +991,6 @@
            g.UseRegister(mleft.right().node()));
       return;
     }
-  } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    if (IsMipsArchVariant(kMips32r6)) {
-      // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
-      Float64BinopMatcher mright(m.right().node());
-      Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
-    }
   }
   VisitRRR(this, kMipsSubD, node);
 }
@@ -1231,6 +1204,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1281,6 +1257,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1329,6 +1308,9 @@
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1404,11 +1386,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.TempImmediate(cont->trap_id()));
   }
 }
 
@@ -1614,12 +1599,15 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
-                             g.TempImmediate(0), cont->reason(),
+                             g.TempImmediate(0), cont->kind(), cont->reason(),
                              cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    g.TempImmediate(0));
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+                   g.TempImmediate(cont->trap_id()));
   }
 }
 
@@ -1632,14 +1620,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index a3bf433..3ab85e0 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -270,6 +270,26 @@
   bool must_save_lr_;
 };
 
+#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T)                 \
+  class ool_name final : public OutOfLineCode {                      \
+   public:                                                           \
+    ool_name(CodeGenerator* gen, T dst, T src1, T src2)              \
+        : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+                                                                     \
+    void Generate() final { __ masm_ool_name(dst_, src1_, src2_); }  \
+                                                                     \
+   private:                                                          \
+    T const dst_;                                                    \
+    T const src1_;                                                   \
+    T const src2_;                                                   \
+  }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
+
+#undef CREATE_OOL_CLASS
 
 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
   switch (condition) {
@@ -366,85 +386,108 @@
 }
 
 }  // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                         \
+#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds)         \
   do {                                                                        \
-    auto result = i.Output##width##Register();                                \
-    auto ool = new (zone()) OutOfLineLoad##width(this, result);               \
-    if (instr->InputAt(0)->IsRegister()) {                                    \
-      auto offset = i.InputRegister(0);                                       \
-      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
-      __ And(kScratchReg, offset, Operand(0xffffffff));                       \
-      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                 \
-      __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
+    if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
+      __ And(kScratchReg, offset, Operand(~(length.immediate() - 1)));        \
+      __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg,               \
+                Operand(zero_reg));                                           \
     } else {                                                                  \
-      int offset = static_cast<int>(i.InputOperand(0).immediate());           \
-      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
-      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+      __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length);           \
     }                                                                         \
-    __ bind(ool->exit());                                                     \
   } while (0)
 
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                              \
+#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds)        \
   do {                                                                        \
-    auto result = i.OutputRegister();                                         \
-    auto ool = new (zone()) OutOfLineLoadInteger(this, result);               \
-    if (instr->InputAt(0)->IsRegister()) {                                    \
-      auto offset = i.InputRegister(0);                                       \
-      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
-      __ And(kScratchReg, offset, Operand(0xffffffff));                       \
-      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                 \
-      __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
+    if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
+      __ Or(kScratchReg, zero_reg, Operand(offset));                          \
+      __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1)));   \
+      __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg));           \
     } else {                                                                  \
-      int offset = static_cast<int>(i.InputOperand(0).immediate());           \
-      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
-      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+      __ Branch(out_of_bounds, ls, length.rm(), Operand(offset));             \
     }                                                                         \
-    __ bind(ool->exit());                                                     \
   } while (0)
 
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
-  do {                                                                 \
-    Label done;                                                        \
-    if (instr->InputAt(0)->IsRegister()) {                             \
-      auto offset = i.InputRegister(0);                                \
-      auto value = i.InputOrZero##width##Register(2);                  \
-      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
-        __ Move(kDoubleRegZero, 0.0);                                  \
-      }                                                                \
-      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
-      __ And(kScratchReg, offset, Operand(0xffffffff));                \
-      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);          \
-      __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
-    } else {                                                           \
-      int offset = static_cast<int>(i.InputOperand(0).immediate());    \
-      auto value = i.InputOrZero##width##Register(2);                  \
-      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
-        __ Move(kDoubleRegZero, 0.0);                                  \
-      }                                                                \
-      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
-      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
-    }                                                                  \
-    __ bind(&done);                                                    \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                          \
+  do {                                                                         \
+    auto result = i.Output##width##Register();                                 \
+    auto ool = new (zone()) OutOfLineLoad##width(this, result);                \
+    if (instr->InputAt(0)->IsRegister()) {                                     \
+      auto offset = i.InputRegister(0);                                        \
+      ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                        \
+      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                  \
+      __ asm_instr(result, MemOperand(kScratchReg, 0));                        \
+    } else {                                                                   \
+      int offset = static_cast<int>(i.InputOperand(0).immediate());            \
+      ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1),               \
+                                      ool->entry());                           \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));            \
+    }                                                                          \
+    __ bind(ool->exit());                                                      \
   } while (0)
 
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
-  do {                                                                 \
-    Label done;                                                        \
-    if (instr->InputAt(0)->IsRegister()) {                             \
-      auto offset = i.InputRegister(0);                                \
-      auto value = i.InputOrZeroRegister(2);                           \
-      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
-      __ And(kScratchReg, offset, Operand(0xffffffff));                \
-      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);          \
-      __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
-    } else {                                                           \
-      int offset = static_cast<int>(i.InputOperand(0).immediate());    \
-      auto value = i.InputOrZeroRegister(2);                           \
-      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
-      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
-    }                                                                  \
-    __ bind(&done);                                                    \
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
+  do {                                                                         \
+    auto result = i.OutputRegister();                                          \
+    auto ool = new (zone()) OutOfLineLoadInteger(this, result);                \
+    if (instr->InputAt(0)->IsRegister()) {                                     \
+      auto offset = i.InputRegister(0);                                        \
+      ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                        \
+      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                  \
+      __ asm_instr(result, MemOperand(kScratchReg, 0));                        \
+    } else {                                                                   \
+      int offset = static_cast<int>(i.InputOperand(0).immediate());            \
+      ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1),               \
+                                      ool->entry());                           \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));            \
+    }                                                                          \
+    __ bind(ool->exit());                                                      \
+  } while (0)
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                   \
+  do {                                                                   \
+    Label done;                                                          \
+    if (instr->InputAt(0)->IsRegister()) {                               \
+      auto offset = i.InputRegister(0);                                  \
+      auto value = i.InputOrZero##width##Register(2);                    \
+      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {        \
+        __ Move(kDoubleRegZero, 0.0);                                    \
+      }                                                                  \
+      ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done);  \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                  \
+      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);            \
+      __ asm_instr(value, MemOperand(kScratchReg, 0));                   \
+    } else {                                                             \
+      int offset = static_cast<int>(i.InputOperand(0).immediate());      \
+      auto value = i.InputOrZero##width##Register(2);                    \
+      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {        \
+        __ Move(kDoubleRegZero, 0.0);                                    \
+      }                                                                  \
+      ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));       \
+    }                                                                    \
+    __ bind(&done);                                                      \
+  } while (0)
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                        \
+  do {                                                                   \
+    Label done;                                                          \
+    if (instr->InputAt(0)->IsRegister()) {                               \
+      auto offset = i.InputRegister(0);                                  \
+      auto value = i.InputOrZeroRegister(2);                             \
+      ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done);  \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                  \
+      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);            \
+      __ asm_instr(value, MemOperand(kScratchReg, 0));                   \
+    } else {                                                             \
+      int offset = static_cast<int>(i.InputOperand(0).immediate());      \
+      auto value = i.InputOrZeroRegister(2);                             \
+      ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));       \
+    }                                                                    \
+    __ bind(&done);                                                      \
   } while (0)
 
 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode)                                  \
@@ -556,7 +599,7 @@
   // Check if current frame is an arguments adaptor frame.
   __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
   __ Branch(&done, ne, scratch3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Load arguments count from current arguments adaptor frame (note, it
   // does not include receiver).
@@ -725,10 +768,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1326,36 +1367,24 @@
                i.InputDoubleRegister(1));
       break;
     case kMips64MaddS:
-      __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
-                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2),
+                kScratchDoubleReg);
       break;
     case kMips64MaddD:
-      __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
-      break;
-    case kMips64MaddfS:
-      __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
-                 i.InputFloatRegister(2));
-      break;
-    case kMips64MaddfD:
-      __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(2));
+      __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+                kScratchDoubleReg);
       break;
     case kMips64MsubS:
-      __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
-                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2),
+                kScratchDoubleReg);
       break;
     case kMips64MsubD:
-      __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
-      break;
-    case kMips64MsubfS:
-      __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
-                 i.InputFloatRegister(2));
-      break;
-    case kMips64MsubfD:
-      __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(2));
+      __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+                kScratchDoubleReg);
       break;
     case kMips64MulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
@@ -1430,47 +1459,39 @@
       break;
     }
     case kMips64Float32Max: {
-      Label compare_nan, done_compare;
-      __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
-                       i.InputSingleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputSingleRegister(),
-              std::numeric_limits<float>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputSingleRegister();
+      FPURegister src1 = i.InputSingleRegister(0);
+      FPURegister src2 = i.InputSingleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
+      __ Float32Max(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMips64Float64Max: {
-      Label compare_nan, done_compare;
-      __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                       i.InputDoubleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputDoubleRegister(),
-              std::numeric_limits<double>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputDoubleRegister();
+      FPURegister src1 = i.InputDoubleRegister(0);
+      FPURegister src2 = i.InputDoubleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
+      __ Float64Max(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMips64Float32Min: {
-      Label compare_nan, done_compare;
-      __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
-                       i.InputSingleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputSingleRegister(),
-              std::numeric_limits<float>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputSingleRegister();
+      FPURegister src1 = i.InputSingleRegister(0);
+      FPURegister src2 = i.InputSingleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
+      __ Float32Min(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMips64Float64Min: {
-      Label compare_nan, done_compare;
-      __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                       i.InputDoubleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputDoubleRegister(),
-              std::numeric_limits<double>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputDoubleRegister();
+      FPURegister src1 = i.InputDoubleRegister(0);
+      FPURegister src2 = i.InputDoubleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
+      __ Float64Min(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMips64Float64SilenceNaN:
@@ -1935,12 +1956,13 @@
   return false;
 }
 
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+                            Instruction* instr, FlagsCondition condition,
+                            Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ masm->
+  MipsOperandConverter i(gen, instr);
 
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  MipsOperandConverter i(this, instr);
-  Label* tlabel = branch->true_label;
-  Label* flabel = branch->false_label;
   Condition cc = kNoCondition;
   // MIPS does not have condition code flags, so compare and branch are
   // implemented differently than on the other arch's. The compare operations
@@ -1950,17 +1972,17 @@
   // they are tested here.
 
   if (instr->arch_opcode() == kMips64Tst) {
-    cc = FlagsConditionToConditionTst(branch->condition);
+    cc = FlagsConditionToConditionTst(condition);
     __ And(at, i.InputRegister(0), i.InputOperand(1));
     __ Branch(tlabel, cc, at, Operand(zero_reg));
   } else if (instr->arch_opcode() == kMips64Dadd ||
              instr->arch_opcode() == kMips64Dsub) {
-    cc = FlagsConditionToConditionOvf(branch->condition);
+    cc = FlagsConditionToConditionOvf(condition);
     __ dsra32(kScratchReg, i.OutputRegister(), 0);
     __ sra(at, i.OutputRegister(), 31);
     __ Branch(tlabel, cc, at, Operand(kScratchReg));
   } else if (instr->arch_opcode() == kMips64DaddOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
                          i.InputOperand(1), tlabel, flabel);
@@ -1970,11 +1992,11 @@
                          i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
+        UNSUPPORTED_COND(kMips64DaddOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMips64DsubOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
                          i.InputOperand(1), tlabel, flabel);
@@ -1984,11 +2006,11 @@
                          i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
+        UNSUPPORTED_COND(kMips64DsubOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMips64MulOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow: {
         __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
                         i.InputOperand(1), tlabel, flabel, kScratchReg);
@@ -1998,15 +2020,15 @@
                         i.InputOperand(1), flabel, tlabel, kScratchReg);
       } break;
       default:
-        UNSUPPORTED_COND(kMips64MulOvf, branch->condition);
+        UNSUPPORTED_COND(kMips64MulOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMips64Cmp) {
-    cc = FlagsConditionToConditionCmp(branch->condition);
+    cc = FlagsConditionToConditionCmp(condition);
     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
   } else if (instr->arch_opcode() == kMips64CmpS) {
-    if (!convertCondition(branch->condition, cc)) {
-      UNSUPPORTED_COND(kMips64CmpS, branch->condition);
+    if (!convertCondition(condition, cc)) {
+      UNSUPPORTED_COND(kMips64CmpS, condition);
     }
     FPURegister left = i.InputOrZeroSingleRegister(0);
     FPURegister right = i.InputOrZeroSingleRegister(1);
@@ -2016,8 +2038,8 @@
     }
     __ BranchF32(tlabel, nullptr, cc, left, right);
   } else if (instr->arch_opcode() == kMips64CmpD) {
-    if (!convertCondition(branch->condition, cc)) {
-      UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+    if (!convertCondition(condition, cc)) {
+      UNSUPPORTED_COND(kMips64CmpD, condition);
     }
     FPURegister left = i.InputOrZeroDoubleRegister(0);
     FPURegister right = i.InputOrZeroDoubleRegister(1);
@@ -2031,7 +2053,18 @@
            instr->arch_opcode());
     UNIMPLEMENTED();
   }
-  if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+  if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
+#undef __
+#define __ masm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+
+  AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+                         branch->fallthru);
 }
 
 
@@ -2039,6 +2072,65 @@
   if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+    void Generate() final {
+      MipsOperandConverter i(gen_, instr_);
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2239,13 +2331,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2401,7 +2496,7 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmSizeReference(src.rmode())) {
             __ li(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
             __ li(dst, Operand(src.ToInt32()));
@@ -2411,11 +2506,10 @@
           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
           break;
         case Constant::kInt64:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ li(dst, Operand(src.ToInt64(), src.rmode()));
           } else {
-            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+            DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
             __ li(dst, Operand(src.ToInt64()));
           }
           break;
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 8f68ced..0c0e1aa 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -87,12 +87,8 @@
   V(Mips64MinD)                     \
   V(Mips64MaddS)                    \
   V(Mips64MaddD)                    \
-  V(Mips64MaddfS)                   \
-  V(Mips64MaddfD)                   \
   V(Mips64MsubS)                    \
   V(Mips64MsubD)                    \
-  V(Mips64MsubfS)                   \
-  V(Mips64MsubfD)                   \
   V(Mips64Float64RoundDown)         \
   V(Mips64Float64RoundTruncate)     \
   V(Mips64Float64RoundUp)           \
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index fbf09d6..4f19a17 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -92,9 +92,35 @@
       case kMips64Tst:
       case kMips64Xor:
         return is_uint16(value);
+      case kMips64Lb:
+      case kMips64Lbu:
+      case kMips64Sb:
+      case kMips64Lh:
+      case kMips64Lhu:
+      case kMips64Sh:
+      case kMips64Lw:
+      case kMips64Sw:
+      case kMips64Ld:
+      case kMips64Sd:
+      case kMips64Lwc1:
+      case kMips64Swc1:
       case kMips64Ldc1:
       case kMips64Sdc1:
-        return is_int16(value + kIntSize);
+      case kCheckedLoadInt8:
+      case kCheckedLoadUint8:
+      case kCheckedLoadInt16:
+      case kCheckedLoadUint16:
+      case kCheckedLoadWord32:
+      case kCheckedLoadWord64:
+      case kCheckedStoreWord8:
+      case kCheckedStoreWord16:
+      case kCheckedStoreWord32:
+      case kCheckedStoreWord64:
+      case kCheckedLoadFloat32:
+      case kCheckedLoadFloat64:
+      case kCheckedStoreFloat32:
+      case kCheckedStoreFloat64:
+        return is_int32(value);
       default:
         return is_int16(value);
     }
@@ -169,6 +195,16 @@
     DCHECK(m.IsWord64Sar());
     if (m.left().IsLoad() && m.right().Is(32) &&
         selector_->CanCover(m.node(), m.left().node())) {
+      MachineRepresentation rep =
+          LoadRepresentationOf(m.left().node()->op()).representation();
+      DCHECK(ElementSizeLog2Of(rep) == 3);
+      if (rep != MachineRepresentation::kTaggedSigned &&
+          rep != MachineRepresentation::kTaggedPointer &&
+          rep != MachineRepresentation::kTagged &&
+          rep != MachineRepresentation::kWord64) {
+        return;
+      }
+
       Mips64OperandGenerator g(selector_);
       Node* load = m.left().node();
       Node* offset = load->InputAt(1);
@@ -186,7 +222,8 @@
   }
 };
 
-bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+                          Node* output_node) {
   ExtendingLoadMatcher m(node, selector);
   Mips64OperandGenerator g(selector);
   if (m.Matches()) {
@@ -196,7 +233,7 @@
         m.opcode() | AddressingModeField::encode(kMode_MRI);
     DCHECK(is_int32(m.immediate()));
     inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
-    InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+    InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
     selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
                    inputs);
     return true;
@@ -232,10 +269,9 @@
                         &inputs[1])) {
     inputs[0] = g.UseRegister(m.left().node());
     input_count++;
-  }
-  if (has_reverse_opcode &&
-      TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
-                        &input_count, &inputs[1])) {
+  } else if (has_reverse_opcode &&
+             TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+                               &input_count, &inputs[1])) {
     inputs[0] = g.UseRegister(m.right().node());
     opcode = reverse_opcode;
     input_count++;
@@ -247,6 +283,8 @@
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.TempImmediate(cont->trap_id());
   }
 
   if (cont->IsDeoptimize()) {
@@ -269,7 +307,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -341,6 +379,9 @@
       opcode = kMips64Ld;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -418,6 +459,9 @@
         opcode = kMips64Sd;
         break;
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -438,6 +482,10 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitWord32And(Node* node) {
   Mips64OperandGenerator g(this);
@@ -514,9 +562,13 @@
         // zeros.
         if (lsb + mask_width > 64) mask_width = 64 - lsb;
 
-        Emit(kMips64Dext, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
-             g.TempImmediate(static_cast<int32_t>(mask_width)));
+        if (lsb == 0 && mask_width == 64) {
+          Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+        } else {
+          Emit(kMips64Dext, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+               g.TempImmediate(static_cast<int32_t>(mask_width)));
+        }
         return;
       }
       // Other cases fall through to the normal And operation.
@@ -748,7 +800,7 @@
 
 
 void InstructionSelector::VisitWord64Sar(Node* node) {
-  if (TryEmitExtendingLoad(this, node)) return;
+  if (TryEmitExtendingLoad(this, node, node)) return;
   VisitRRO(this, kMips64Dsar, node);
 }
 
@@ -824,7 +876,7 @@
   if (m.right().opcode() == IrOpcode::kWord32Shl &&
       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
     Int32BinopMatcher mright(m.right().node());
-    if (mright.right().HasValue()) {
+    if (mright.right().HasValue() && !m.left().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
       Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
@@ -836,7 +888,7 @@
   if (m.left().opcode() == IrOpcode::kWord32Shl &&
       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
     Int32BinopMatcher mleft(m.left().node());
-    if (mleft.right().HasValue()) {
+    if (mleft.right().HasValue() && !m.right().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
       Emit(kMips64Lsa, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
@@ -856,7 +908,7 @@
   if (m.right().opcode() == IrOpcode::kWord64Shl &&
       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
     Int64BinopMatcher mright(m.right().node());
-    if (mright.right().HasValue()) {
+    if (mright.right().HasValue() && !m.left().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
       Emit(kMips64Dlsa, g.DefineAsRegister(node),
            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
@@ -869,7 +921,7 @@
   if (m.left().opcode() == IrOpcode::kWord64Shl &&
       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
     Int64BinopMatcher mleft(m.left().node());
-    if (mleft.right().HasValue()) {
+    if (mleft.right().HasValue() && !m.right().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
       Emit(kMips64Dlsa, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
@@ -1318,13 +1370,17 @@
   if (CanCover(node, value)) {
     switch (value->opcode()) {
       case IrOpcode::kWord64Sar: {
-        Int64BinopMatcher m(value);
-        if (m.right().IsInRange(32, 63)) {
-          // After smi untagging no need for truncate. Combine sequence.
-          Emit(kMips64Dsar, g.DefineSameAsFirst(node),
-               g.UseRegister(m.left().node()),
-               g.UseImmediate(m.right().node()));
+        if (TryEmitExtendingLoad(this, value, node)) {
           return;
+        } else {
+          Int64BinopMatcher m(value);
+          if (m.right().IsInRange(32, 63)) {
+            // After smi untagging no need for truncate. Combine sequence.
+            Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+                 g.UseRegister(m.left().node()),
+                 g.UseImmediate(m.right().node()));
+            return;
+          }
         }
         break;
       }
@@ -1404,35 +1460,23 @@
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
   Mips64OperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
-    // For Add.S(Mul.S(x, y), z):
-    Float32BinopMatcher mleft(m.left().node());
-    if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+  if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+    Float32BinopMatcher m(node);
+    if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+      // For Add.S(Mul.S(x, y), z):
+      Float32BinopMatcher mleft(m.left().node());
       Emit(kMips64MaddS, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
            g.UseRegister(mleft.right().node()));
       return;
-    } else if (kArchVariant == kMips64r6) {  // Select Maddf.S(z, x, y).
-      Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
-           g.UseRegister(mleft.right().node()));
-      return;
     }
-  }
-  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
-    // For Add.S(x, Mul.S(y, z)):
-    Float32BinopMatcher mright(m.right().node());
-    if (kArchVariant == kMips64r2) {  // Select Madd.S(x, y, z).
+    if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+      // For Add.S(x, Mul.S(y, z)):
+      Float32BinopMatcher mright(m.right().node());
       Emit(kMips64MaddS, g.DefineAsRegister(node),
            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
            g.UseRegister(mright.right().node()));
       return;
-    } else if (kArchVariant == kMips64r6) {  // Select Maddf.S(x, y, z).
-      Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
     }
   }
   VisitRRR(this, kMips64AddS, node);
@@ -1441,35 +1485,23 @@
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
   Mips64OperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    // For Add.D(Mul.D(x, y), z):
-    Float64BinopMatcher mleft(m.left().node());
-    if (kArchVariant == kMips64r2) {  // Select Madd.D(z, x, y).
+  if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+    Float64BinopMatcher m(node);
+    if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+      // For Add.D(Mul.D(x, y), z):
+      Float64BinopMatcher mleft(m.left().node());
       Emit(kMips64MaddD, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
            g.UseRegister(mleft.right().node()));
       return;
-    } else if (kArchVariant == kMips64r6) {  // Select Maddf.D(z, x, y).
-      Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
-           g.UseRegister(mleft.right().node()));
-      return;
     }
-  }
-  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    // For Add.D(x, Mul.D(y, z)):
-    Float64BinopMatcher mright(m.right().node());
-    if (kArchVariant == kMips64r2) {  // Select Madd.D(x, y, z).
+    if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+      // For Add.D(x, Mul.D(y, z)):
+      Float64BinopMatcher mright(m.right().node());
       Emit(kMips64MaddD, g.DefineAsRegister(node),
            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
            g.UseRegister(mright.right().node()));
       return;
-    } else if (kArchVariant == kMips64r6) {  // Select Maddf.D(x, y, z).
-      Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
     }
   }
   VisitRRR(this, kMips64AddD, node);
@@ -1478,9 +1510,9 @@
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
   Mips64OperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
-    if (kArchVariant == kMips64r2) {
+  if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+    Float32BinopMatcher m(node);
+    if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
       Float32BinopMatcher mleft(m.left().node());
       Emit(kMips64MsubS, g.DefineAsRegister(node),
@@ -1488,24 +1520,15 @@
            g.UseRegister(mleft.right().node()));
       return;
     }
-  } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
-    if (kArchVariant == kMips64r6) {
-      // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
-      Float32BinopMatcher mright(m.right().node());
-      Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
-    }
   }
   VisitRRR(this, kMips64SubS, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   Mips64OperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    if (kArchVariant == kMips64r2) {
+  if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+    Float64BinopMatcher m(node);
+    if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
       Float64BinopMatcher mleft(m.left().node());
       Emit(kMips64MsubD, g.DefineAsRegister(node),
@@ -1513,15 +1536,6 @@
            g.UseRegister(mleft.right().node()));
       return;
     }
-  } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    if (kArchVariant == kMips64r6) {
-      // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
-      Float64BinopMatcher mright(m.right().node());
-      Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
-    }
   }
   VisitRRR(this, kMips64SubD, node);
 }
@@ -1735,6 +1749,9 @@
       opcode = kMips64Uld;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1785,6 +1802,9 @@
       opcode = kMips64Usd;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1835,6 +1855,9 @@
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:
     case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:   // Fall through.
+    case MachineRepresentation::kSimd1x8:   // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1849,6 +1872,15 @@
                                                 : g.UseRegister(length)
                                           : g.UseRegister(length);
 
+  if (length->opcode() == IrOpcode::kInt32Constant) {
+    Int32Matcher m(length);
+    if (m.IsPowerOf2()) {
+      Emit(opcode, g.DefineAsRegister(node), offset_operand,
+           g.UseImmediate(length), g.UseRegister(buffer));
+      return;
+    }
+  }
+
   Emit(opcode | AddressingModeField::encode(kMode_MRI),
        g.DefineAsRegister(node), offset_operand, length_operand,
        g.UseRegister(buffer));
@@ -1887,6 +1919,9 @@
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:
     case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:   // Fall through.
+    case MachineRepresentation::kSimd1x8:   // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1901,6 +1936,15 @@
                                                 : g.UseRegister(length)
                                           : g.UseRegister(length);
 
+  if (length->opcode() == IrOpcode::kInt32Constant) {
+    Int32Matcher m(length);
+    if (m.IsPowerOf2()) {
+      Emit(opcode, g.NoOutput(), offset_operand, g.UseImmediate(length),
+           g.UseRegisterOrImmediateZero(value), g.UseRegister(buffer));
+      return;
+    }
+  }
+
   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
        offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
        g.UseRegister(buffer));
@@ -1919,11 +1963,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.TempImmediate(cont->trap_id()));
   }
 }
 
@@ -2133,8 +2180,11 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
-                             g.TempImmediate(0), cont->reason(),
+                             g.TempImmediate(0), cont->kind(), cont->reason(),
                              cont->frame_state());
+  } else if (cont->IsTrap()) {
+    selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+                   g.TempImmediate(cont->trap_id()));
   } else {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    g.TempImmediate(0));
@@ -2269,14 +2319,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
diff --git a/src/compiler/node-marker.h b/src/compiler/node-marker.h
index 84666d5..e38105d 100644
--- a/src/compiler/node-marker.h
+++ b/src/compiler/node-marker.h
@@ -20,11 +20,10 @@
  public:
   NodeMarkerBase(Graph* graph, uint32_t num_states);
 
-  V8_INLINE Mark Get(Node* node) {
+  V8_INLINE Mark Get(const Node* node) {
     Mark mark = node->mark();
     if (mark < mark_min_) {
-      mark = mark_min_;
-      node->set_mark(mark_min_);
+      return 0;
     }
     DCHECK_LT(mark, mark_max_);
     return mark - mark_min_;
@@ -52,9 +51,9 @@
 // set to State(0) in constant time.
 //
 // In its current implementation, in debug mode NodeMarker will try to
-// (efficiently) detect invalid use of an older NodeMarker. Namely, if you get
-// or set a node with a NodeMarker, and then get or set that node
-// with an older NodeMarker you will get a crash.
+// (efficiently) detect invalid use of an older NodeMarker. Namely, if you set a
+// node with a NodeMarker, and then get or set that node with an older
+// NodeMarker you will get a crash.
 //
 // GraphReducer uses a NodeMarker, so individual Reducers cannot use a
 // NodeMarker.
@@ -64,7 +63,7 @@
   V8_INLINE NodeMarker(Graph* graph, uint32_t num_states)
       : NodeMarkerBase(graph, num_states) {}
 
-  V8_INLINE State Get(Node* node) {
+  V8_INLINE State Get(const Node* node) {
     return static_cast<State>(NodeMarkerBase::Get(node));
   }
 
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index c317fdd..d2bdb8b 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -489,13 +489,14 @@
     bool power_of_two_plus_one = false;
     DisplacementMode displacement_mode = kPositiveDisplacement;
     int scale = 0;
-    if (m.HasIndexInput() && left->OwnedBy(node)) {
+    if (m.HasIndexInput() && left->OwnedByAddressingOperand()) {
       index = m.IndexInput();
       scale = m.scale();
       scale_expression = left;
       power_of_two_plus_one = m.power_of_two_plus_one();
       bool match_found = false;
-      if (right->opcode() == AddMatcher::kSubOpcode && right->OwnedBy(node)) {
+      if (right->opcode() == AddMatcher::kSubOpcode &&
+          right->OwnedByAddressingOperand()) {
         AddMatcher right_matcher(right);
         if (right_matcher.right().HasValue()) {
           // (S + (B - D))
@@ -506,7 +507,8 @@
         }
       }
       if (!match_found) {
-        if (right->opcode() == AddMatcher::kAddOpcode && right->OwnedBy(node)) {
+        if (right->opcode() == AddMatcher::kAddOpcode &&
+            right->OwnedByAddressingOperand()) {
           AddMatcher right_matcher(right);
           if (right_matcher.right().HasValue()) {
             // (S + (B + D))
@@ -526,7 +528,8 @@
       }
     } else {
       bool match_found = false;
-      if (left->opcode() == AddMatcher::kSubOpcode && left->OwnedBy(node)) {
+      if (left->opcode() == AddMatcher::kSubOpcode &&
+          left->OwnedByAddressingOperand()) {
         AddMatcher left_matcher(left);
         Node* left_left = left_matcher.left().node();
         Node* left_right = left_matcher.right().node();
@@ -551,7 +554,8 @@
         }
       }
       if (!match_found) {
-        if (left->opcode() == AddMatcher::kAddOpcode && left->OwnedBy(node)) {
+        if (left->opcode() == AddMatcher::kAddOpcode &&
+            left->OwnedByAddressingOperand()) {
           AddMatcher left_matcher(left);
           Node* left_left = left_matcher.left().node();
           Node* left_right = left_matcher.right().node();
@@ -565,13 +569,19 @@
               displacement = left_right;
               base = right;
             } else if (m.right().HasValue()) {
-              // ((S + B) + D)
-              index = left_matcher.IndexInput();
-              scale = left_matcher.scale();
-              scale_expression = left_left;
-              power_of_two_plus_one = left_matcher.power_of_two_plus_one();
-              base = left_right;
-              displacement = right;
+              if (left->OwnedBy(node)) {
+                // ((S + B) + D)
+                index = left_matcher.IndexInput();
+                scale = left_matcher.scale();
+                scale_expression = left_left;
+                power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+                base = left_right;
+                displacement = right;
+              } else {
+                // (B + D)
+                base = left;
+                displacement = right;
+              }
             } else {
               // (B + B)
               index = left;
@@ -584,10 +594,16 @@
               displacement = left_right;
               base = right;
             } else if (m.right().HasValue()) {
-              // ((B + B) + D)
-              index = left_left;
-              base = left_right;
-              displacement = right;
+              if (left->OwnedBy(node)) {
+                // ((B + B) + D)
+                index = left_left;
+                base = left_right;
+                displacement = right;
+              } else {
+                // (B + D)
+                base = left;
+                displacement = right;
+              }
             } else {
               // (B + B)
               index = left;
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index 646dbc2..9243a08 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -2,14 +2,17 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/node-properties.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
-#include "src/compiler/node-properties.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/compiler/verifier.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -311,6 +314,111 @@
 #endif
 }
 
+// static
+bool NodeProperties::IsSame(Node* a, Node* b) {
+  for (;;) {
+    if (a->opcode() == IrOpcode::kCheckHeapObject) {
+      a = GetValueInput(a, 0);
+      continue;
+    }
+    if (b->opcode() == IrOpcode::kCheckHeapObject) {
+      b = GetValueInput(b, 0);
+      continue;
+    }
+    return a == b;
+  }
+}
+
+// static
+NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
+    Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return) {
+  HeapObjectMatcher m(receiver);
+  if (m.HasValue()) {
+    Handle<Map> receiver_map(m.Value()->map());
+    if (receiver_map->is_stable()) {
+      // The {receiver_map} is only reliable when we install a stability
+      // code dependency.
+      *maps_return = ZoneHandleSet<Map>(receiver_map);
+      return kUnreliableReceiverMaps;
+    }
+  }
+  InferReceiverMapsResult result = kReliableReceiverMaps;
+  while (true) {
+    switch (effect->opcode()) {
+      case IrOpcode::kCheckMaps: {
+        Node* const object = GetValueInput(effect, 0);
+        if (IsSame(receiver, object)) {
+          *maps_return = CheckMapsParametersOf(effect->op()).maps();
+          return result;
+        }
+        break;
+      }
+      case IrOpcode::kJSCreate: {
+        if (IsSame(receiver, effect)) {
+          HeapObjectMatcher mtarget(GetValueInput(effect, 0));
+          HeapObjectMatcher mnewtarget(GetValueInput(effect, 1));
+          if (mtarget.HasValue() && mnewtarget.HasValue()) {
+            Handle<JSFunction> original_constructor =
+                Handle<JSFunction>::cast(mnewtarget.Value());
+            if (original_constructor->has_initial_map()) {
+              Handle<Map> initial_map(original_constructor->initial_map());
+              if (initial_map->constructor_or_backpointer() ==
+                  *mtarget.Value()) {
+                *maps_return = ZoneHandleSet<Map>(initial_map);
+                return result;
+              }
+            }
+          }
+          // We reached the allocation of the {receiver}.
+          return kNoReceiverMaps;
+        }
+        break;
+      }
+      case IrOpcode::kStoreField: {
+        // We only care about StoreField of maps.
+        Node* const object = GetValueInput(effect, 0);
+        FieldAccess const& access = FieldAccessOf(effect->op());
+        if (access.base_is_tagged == kTaggedBase &&
+            access.offset == HeapObject::kMapOffset) {
+          if (IsSame(receiver, object)) {
+            Node* const value = GetValueInput(effect, 1);
+            HeapObjectMatcher m(value);
+            if (m.HasValue()) {
+              *maps_return = ZoneHandleSet<Map>(Handle<Map>::cast(m.Value()));
+              return result;
+            }
+          }
+          // Without alias analysis we cannot tell whether this
+          // StoreField[map] affects {receiver} or not.
+          result = kUnreliableReceiverMaps;
+        }
+        break;
+      }
+      case IrOpcode::kJSStoreMessage:
+      case IrOpcode::kJSStoreModule:
+      case IrOpcode::kStoreElement:
+      case IrOpcode::kStoreTypedElement: {
+        // These never change the map of objects.
+        break;
+      }
+      default: {
+        DCHECK_EQ(1, effect->op()->EffectOutputCount());
+        if (effect->op()->EffectInputCount() != 1) {
+          // Didn't find any appropriate CheckMaps node.
+          return kNoReceiverMaps;
+        }
+        if (!effect->op()->HasProperty(Operator::kNoWrite)) {
+          // Without alias/escape analysis we cannot tell whether this
+          // {effect} affects {receiver} or not.
+          result = kUnreliableReceiverMaps;
+        }
+        break;
+      }
+    }
+    DCHECK_EQ(1, effect->op()->EffectInputCount());
+    effect = NodeProperties::GetEffectInput(effect);
+  }
+}
 
 // static
 MaybeHandle<Context> NodeProperties::GetSpecializationContext(
@@ -338,6 +446,17 @@
 
 
 // static
+Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
+  Node* context = NodeProperties::GetContextInput(node);
+  while (*depth > 0 &&
+         IrOpcode::IsContextChainExtendingOpcode(context->opcode())) {
+    context = NodeProperties::GetContextInput(context);
+    (*depth)--;
+  }
+  return context;
+}
+
+// static
 Type* NodeProperties::GetTypeOrAny(Node* node) {
   return IsTyped(node) ? node->type() : Type::Any();
 }
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index 2325323..5ed8540 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -8,6 +8,7 @@
 #include "src/compiler/node.h"
 #include "src/compiler/types.h"
 #include "src/globals.h"
+#include "src/zone/zone-handle-set.h"
 
 namespace v8 {
 namespace internal {
@@ -123,6 +124,20 @@
   //  - Switch: [ IfValue, ..., IfDefault ]
   static void CollectControlProjections(Node* node, Node** proj, size_t count);
 
+  // Checks if two nodes are the same, looking past {CheckHeapObject}.
+  static bool IsSame(Node* a, Node* b);
+
+  // Walks up the {effect} chain to find a witness that provides map
+  // information about the {receiver}. Can look through potentially
+  // side effecting nodes.
+  enum InferReceiverMapsResult {
+    kNoReceiverMaps,         // No receiver maps inferred.
+    kReliableReceiverMaps,   // Receiver maps can be trusted.
+    kUnreliableReceiverMaps  // Receiver maps might have changed (side-effect).
+  };
+  static InferReceiverMapsResult InferReceiverMaps(
+      Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
+
   // ---------------------------------------------------------------------------
   // Context.
 
@@ -132,6 +147,11 @@
   static MaybeHandle<Context> GetSpecializationContext(
       Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
 
+  // Walk up the context chain from the given {node} until we reduce the {depth}
+  // to 0 or hit a node that does not extend the context chain ({depth} will be
+  // updated accordingly).
+  static Node* GetOuterContext(Node* node, size_t* depth);
+
   // ---------------------------------------------------------------------------
   // Type.
 
diff --git a/src/compiler/node.cc b/src/compiler/node.cc
index f4e7b17..16dc2db 100644
--- a/src/compiler/node.cc
+++ b/src/compiler/node.cc
@@ -296,12 +296,44 @@
   return mask == 3;
 }
 
+bool Node::OwnedByAddressingOperand() const {
+  for (Use* use = first_use_; use; use = use->next) {
+    Node* from = use->from();
+    if (from->opcode() != IrOpcode::kLoad &&
+        // If {from} is store, make sure it does not use {this} as value
+        (from->opcode() != IrOpcode::kStore || from->InputAt(2) == this) &&
+        from->opcode() != IrOpcode::kInt32Add &&
+        from->opcode() != IrOpcode::kInt64Add) {
+      return false;
+    }
+  }
+  return true;
+}
 
 void Node::Print() const {
   OFStream os(stdout);
   os << *this << std::endl;
+  for (Node* input : this->inputs()) {
+    os << "  " << *input << std::endl;
+  }
 }
 
+std::ostream& operator<<(std::ostream& os, const Node& n) {
+  os << n.id() << ": " << *n.op();
+  if (n.InputCount() > 0) {
+    os << "(";
+    for (int i = 0; i < n.InputCount(); ++i) {
+      if (i != 0) os << ", ";
+      if (n.InputAt(i)) {
+        os << n.InputAt(i)->id();
+      } else {
+        os << "null";
+      }
+    }
+    os << ")";
+  }
+  return os;
+}
 
 Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
     : op_(op),
@@ -378,25 +410,6 @@
 }
 #endif
 
-
-std::ostream& operator<<(std::ostream& os, const Node& n) {
-  os << n.id() << ": " << *n.op();
-  if (n.InputCount() > 0) {
-    os << "(";
-    for (int i = 0; i < n.InputCount(); ++i) {
-      if (i != 0) os << ", ";
-      if (n.InputAt(i)) {
-        os << n.InputAt(i)->id();
-      } else {
-        os << "null";
-      }
-    }
-    os << ")";
-  }
-  return os;
-}
-
-
 Node::InputEdges::iterator Node::InputEdges::iterator::operator++(int n) {
   iterator result(*this);
   ++(*this);
@@ -404,9 +417,6 @@
 }
 
 
-bool Node::InputEdges::empty() const { return begin() == end(); }
-
-
 Node::Inputs::const_iterator Node::Inputs::const_iterator::operator++(int n) {
   const_iterator result(*this);
   ++(*this);
@@ -414,9 +424,6 @@
 }
 
 
-bool Node::Inputs::empty() const { return begin() == end(); }
-
-
 Node::UseEdges::iterator Node::UseEdges::iterator::operator++(int n) {
   iterator result(*this);
   ++(*this);
diff --git a/src/compiler/node.h b/src/compiler/node.h
index dc6c5dc..b291af2 100644
--- a/src/compiler/node.h
+++ b/src/compiler/node.h
@@ -46,7 +46,7 @@
                    Node* const* inputs, bool has_extensible_inputs);
   static Node* Clone(Zone* zone, NodeId id, const Node* node);
 
-  bool IsDead() const { return InputCount() > 0 && !InputAt(0); }
+  inline bool IsDead() const;
   void Kill();
 
   const Operator* op() const { return op_; }
@@ -109,41 +109,11 @@
   int UseCount() const;
   void ReplaceUses(Node* replace_to);
 
-  class InputEdges final {
-   public:
-    typedef Edge value_type;
+  class InputEdges;
+  inline InputEdges input_edges();
 
-    class iterator;
-    inline iterator begin() const;
-    inline iterator end() const;
-
-    bool empty() const;
-
-    explicit InputEdges(Node* node) : node_(node) {}
-
-   private:
-    Node* node_;
-  };
-
-  InputEdges input_edges() { return InputEdges(this); }
-
-  class V8_EXPORT_PRIVATE Inputs final {
-   public:
-    typedef Node* value_type;
-
-    class const_iterator;
-    inline const_iterator begin() const;
-    inline const_iterator end() const;
-
-    bool empty() const;
-
-    explicit Inputs(Node* node) : node_(node) {}
-
-   private:
-    Node* node_;
-  };
-
-  Inputs inputs() { return Inputs(this); }
+  class Inputs;
+  inline Inputs inputs() const;
 
   class UseEdges final {
    public:
@@ -188,6 +158,10 @@
 
   // Returns true if {owner1} and {owner2} are the only users of {this} node.
   bool OwnedBy(Node const* owner1, Node const* owner2) const;
+
+  // Returns true if addressing related operands (such as load, store, lea)
+  // are the only users of {this} node.
+  bool OwnedByAddressingOperand() const;
   void Print() const;
 
  private:
@@ -294,7 +268,7 @@
   void set_type(Type* type) { type_ = type; }
 
   // Only NodeMarkers should manipulate the marks on nodes.
-  Mark mark() { return mark_; }
+  Mark mark() const { return mark_; }
   void set_mark(Mark mark) { mark_ = mark; }
 
   inline bool has_inline_inputs() const {
@@ -345,6 +319,48 @@
   return OpParameter<T>(node->op());
 }
 
+class Node::InputEdges final {
+ public:
+  typedef Edge value_type;
+
+  class iterator;
+  inline iterator begin() const;
+  inline iterator end() const;
+
+  bool empty() const { return count_ == 0; }
+  int count() const { return count_; }
+
+  inline value_type operator[](int index) const;
+
+  InputEdges(Node** input_root, Use* use_root, int count)
+      : input_root_(input_root), use_root_(use_root), count_(count) {}
+
+ private:
+  Node** input_root_;
+  Use* use_root_;
+  int count_;
+};
+
+class V8_EXPORT_PRIVATE Node::Inputs final {
+ public:
+  typedef Node* value_type;
+
+  class const_iterator;
+  inline const_iterator begin() const;
+  inline const_iterator end() const;
+
+  bool empty() const { return count_ == 0; }
+  int count() const { return count_; }
+
+  inline value_type operator[](int index) const;
+
+  explicit Inputs(Node* const* input_root, int count)
+      : input_root_(input_root), count_(count) {}
+
+ private:
+  Node* const* input_root_;
+  int count_;
+};
 
 // An encapsulation for information associated with a single use of node as a
 // input from another node, allowing access to both the defining node and
@@ -373,6 +389,7 @@
 
  private:
   friend class Node::UseEdges::iterator;
+  friend class Node::InputEdges;
   friend class Node::InputEdges::iterator;
 
   Edge(Node::Use* use, Node** input_ptr) : use_(use), input_ptr_(input_ptr) {
@@ -385,12 +402,37 @@
   Node** input_ptr_;
 };
 
+bool Node::IsDead() const {
+  Node::Inputs inputs = this->inputs();
+  return inputs.count() > 0 && inputs[0] == nullptr;
+}
+
+Node::InputEdges Node::input_edges() {
+  int inline_count = InlineCountField::decode(bit_field_);
+  if (inline_count != kOutlineMarker) {
+    return InputEdges(inputs_.inline_, reinterpret_cast<Use*>(this) - 1,
+                      inline_count);
+  } else {
+    return InputEdges(inputs_.outline_->inputs_,
+                      reinterpret_cast<Use*>(inputs_.outline_) - 1,
+                      inputs_.outline_->count_);
+  }
+}
+
+Node::Inputs Node::inputs() const {
+  int inline_count = InlineCountField::decode(bit_field_);
+  if (inline_count != kOutlineMarker) {
+    return Inputs(inputs_.inline_, inline_count);
+  } else {
+    return Inputs(inputs_.outline_->inputs_, inputs_.outline_->count_);
+  }
+}
 
 // A forward iterator to visit the edges for the input dependencies of a node.
 class Node::InputEdges::iterator final {
  public:
   typedef std::forward_iterator_tag iterator_category;
-  typedef int difference_type;
+  typedef std::ptrdiff_t difference_type;
   typedef Edge value_type;
   typedef Edge* pointer;
   typedef Edge& reference;
@@ -410,12 +452,23 @@
     return *this;
   }
   iterator operator++(int);
+  iterator& operator+=(difference_type offset) {
+    input_ptr_ += offset;
+    use_ -= offset;
+    return *this;
+  }
+  iterator operator+(difference_type offset) const {
+    return iterator(use_ - offset, input_ptr_ + offset);
+  }
+  difference_type operator-(const iterator& other) const {
+    return input_ptr_ - other.input_ptr_;
+  }
 
  private:
   friend class Node;
 
-  explicit iterator(Node* from, int index = 0)
-      : use_(from->GetUsePtr(index)), input_ptr_(from->GetInputPtr(index)) {}
+  explicit iterator(Use* use, Node** input_ptr)
+      : use_(use), input_ptr_(input_ptr) {}
 
   Use* use_;
   Node** input_ptr_;
@@ -423,57 +476,71 @@
 
 
 Node::InputEdges::iterator Node::InputEdges::begin() const {
-  return Node::InputEdges::iterator(this->node_, 0);
+  return Node::InputEdges::iterator(use_root_, input_root_);
 }
 
 
 Node::InputEdges::iterator Node::InputEdges::end() const {
-  return Node::InputEdges::iterator(this->node_, this->node_->InputCount());
+  return Node::InputEdges::iterator(use_root_ - count_, input_root_ + count_);
 }
 
+Edge Node::InputEdges::operator[](int index) const {
+  return Edge(use_root_ + index, input_root_ + index);
+}
 
 // A forward iterator to visit the inputs of a node.
 class Node::Inputs::const_iterator final {
  public:
   typedef std::forward_iterator_tag iterator_category;
-  typedef int difference_type;
+  typedef std::ptrdiff_t difference_type;
   typedef Node* value_type;
-  typedef Node** pointer;
-  typedef Node*& reference;
+  typedef const value_type* pointer;
+  typedef value_type& reference;
 
-  const_iterator(const const_iterator& other) : iter_(other.iter_) {}
+  const_iterator(const const_iterator& other) : input_ptr_(other.input_ptr_) {}
 
-  Node* operator*() const { return (*iter_).to(); }
+  Node* operator*() const { return *input_ptr_; }
   bool operator==(const const_iterator& other) const {
-    return iter_ == other.iter_;
+    return input_ptr_ == other.input_ptr_;
   }
   bool operator!=(const const_iterator& other) const {
     return !(*this == other);
   }
   const_iterator& operator++() {
-    ++iter_;
+    ++input_ptr_;
     return *this;
   }
   const_iterator operator++(int);
+  const_iterator& operator+=(difference_type offset) {
+    input_ptr_ += offset;
+    return *this;
+  }
+  const_iterator operator+(difference_type offset) const {
+    return const_iterator(input_ptr_ + offset);
+  }
+  difference_type operator-(const const_iterator& other) const {
+    return input_ptr_ - other.input_ptr_;
+  }
 
  private:
   friend class Node::Inputs;
 
-  const_iterator(Node* node, int index) : iter_(node, index) {}
+  explicit const_iterator(Node* const* input_ptr) : input_ptr_(input_ptr) {}
 
-  Node::InputEdges::iterator iter_;
+  Node* const* input_ptr_;
 };
 
 
 Node::Inputs::const_iterator Node::Inputs::begin() const {
-  return const_iterator(this->node_, 0);
+  return const_iterator(input_root_);
 }
 
 
 Node::Inputs::const_iterator Node::Inputs::end() const {
-  return const_iterator(this->node_, this->node_->InputCount());
+  return const_iterator(input_root_ + count_);
 }
 
+Node* Node::Inputs::operator[](int index) const { return input_root_[index]; }
 
 // A forward iterator to visit the uses edges of a node.
 class Node::UseEdges::iterator final {
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index fdbe001..b50754c 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -25,6 +25,8 @@
   V(Deoptimize)            \
   V(DeoptimizeIf)          \
   V(DeoptimizeUnless)      \
+  V(TrapIf)                \
+  V(TrapUnless)            \
   V(Return)                \
   V(TailCall)              \
   V(Terminate)             \
@@ -57,6 +59,7 @@
   V(FrameState)           \
   V(StateValues)          \
   V(TypedStateValues)     \
+  V(ArgumentsObjectState) \
   V(ObjectState)          \
   V(TypedObjectState)     \
   V(Call)                 \
@@ -104,7 +107,9 @@
 #define JS_SIMPLE_BINOP_LIST(V) \
   JS_COMPARE_BINOP_LIST(V)      \
   JS_BITWISE_BINOP_LIST(V)      \
-  JS_ARITH_BINOP_LIST(V)
+  JS_ARITH_BINOP_LIST(V)        \
+  V(JSInstanceOf)               \
+  V(JSOrdinaryHasInstance)
 
 #define JS_CONVERSION_UNOP_LIST(V) \
   V(JSToBoolean)                   \
@@ -116,32 +121,34 @@
   V(JSToString)
 
 #define JS_OTHER_UNOP_LIST(V) \
+  V(JSClassOf)                \
   V(JSTypeOf)
 
 #define JS_SIMPLE_UNOP_LIST(V) \
   JS_CONVERSION_UNOP_LIST(V)   \
   JS_OTHER_UNOP_LIST(V)
 
-#define JS_OBJECT_OP_LIST(V)  \
-  V(JSCreate)                 \
-  V(JSCreateArguments)        \
-  V(JSCreateArray)            \
-  V(JSCreateClosure)          \
-  V(JSCreateIterResultObject) \
-  V(JSCreateKeyValueArray)    \
-  V(JSCreateLiteralArray)     \
-  V(JSCreateLiteralObject)    \
-  V(JSCreateLiteralRegExp)    \
-  V(JSLoadProperty)           \
-  V(JSLoadNamed)              \
-  V(JSLoadGlobal)             \
-  V(JSStoreProperty)          \
-  V(JSStoreNamed)             \
-  V(JSStoreGlobal)            \
-  V(JSDeleteProperty)         \
-  V(JSHasProperty)            \
-  V(JSInstanceOf)             \
-  V(JSOrdinaryHasInstance)
+#define JS_OBJECT_OP_LIST(V)      \
+  V(JSCreate)                     \
+  V(JSCreateArguments)            \
+  V(JSCreateArray)                \
+  V(JSCreateClosure)              \
+  V(JSCreateIterResultObject)     \
+  V(JSCreateKeyValueArray)        \
+  V(JSCreateLiteralArray)         \
+  V(JSCreateLiteralObject)        \
+  V(JSCreateLiteralRegExp)        \
+  V(JSLoadProperty)               \
+  V(JSLoadNamed)                  \
+  V(JSLoadGlobal)                 \
+  V(JSStoreProperty)              \
+  V(JSStoreNamed)                 \
+  V(JSStoreNamedOwn)              \
+  V(JSStoreGlobal)                \
+  V(JSStoreDataPropertyInLiteral) \
+  V(JSDeleteProperty)             \
+  V(JSHasProperty)                \
+  V(JSGetSuperConstructor)
 
 #define JS_CONTEXT_OP_LIST(V) \
   V(JSLoadContext)            \
@@ -153,8 +160,11 @@
   V(JSCreateScriptContext)
 
 #define JS_OTHER_OP_LIST(V)         \
-  V(JSCallConstruct)                \
-  V(JSCallFunction)                 \
+  V(JSConstruct)                    \
+  V(JSConstructWithSpread)          \
+  V(JSCallForwardVarargs)           \
+  V(JSCall)                         \
+  V(JSCallWithSpread)               \
   V(JSCallRuntime)                  \
   V(JSConvertReceiver)              \
   V(JSForInNext)                    \
@@ -166,7 +176,8 @@
   V(JSGeneratorStore)               \
   V(JSGeneratorRestoreContinuation) \
   V(JSGeneratorRestoreRegister)     \
-  V(JSStackCheck)
+  V(JSStackCheck)                   \
+  V(JSDebugger)
 
 #define JS_OP_LIST(V)     \
   JS_SIMPLE_BINOP_LIST(V) \
@@ -181,6 +192,7 @@
   V(ChangeTaggedToInt32)             \
   V(ChangeTaggedToUint32)            \
   V(ChangeTaggedToFloat64)           \
+  V(ChangeTaggedToTaggedSigned)      \
   V(ChangeInt31ToTaggedSigned)       \
   V(ChangeInt32ToTagged)             \
   V(ChangeUint32ToTagged)            \
@@ -294,13 +306,17 @@
   V(PlainPrimitiveToWord32)         \
   V(PlainPrimitiveToFloat64)        \
   V(BooleanNot)                     \
+  V(StringCharAt)                   \
   V(StringCharCodeAt)               \
   V(StringFromCharCode)             \
   V(StringFromCodePoint)            \
+  V(StringIndexOf)                  \
   V(CheckBounds)                    \
   V(CheckIf)                        \
   V(CheckMaps)                      \
   V(CheckNumber)                    \
+  V(CheckInternalizedString)        \
+  V(CheckReceiver)                  \
   V(CheckString)                    \
   V(CheckSmi)                       \
   V(CheckHeapObject)                \
@@ -316,12 +332,15 @@
   V(StoreBuffer)                    \
   V(StoreElement)                   \
   V(StoreTypedElement)              \
-  V(ObjectIsCallable)               \
+  V(ObjectIsDetectableCallable)     \
+  V(ObjectIsNonCallable)            \
   V(ObjectIsNumber)                 \
   V(ObjectIsReceiver)               \
   V(ObjectIsSmi)                    \
   V(ObjectIsString)                 \
   V(ObjectIsUndetectable)           \
+  V(NewRestParameterElements)       \
+  V(NewUnmappedArgumentsElements)   \
   V(ArrayBufferWasNeutered)         \
   V(EnsureWritableFastElements)     \
   V(MaybeGrowFastElements)          \
@@ -527,6 +546,7 @@
   V(Word32PairShr)              \
   V(Word32PairSar)              \
   V(ProtectedLoad)              \
+  V(ProtectedStore)             \
   V(AtomicLoad)                 \
   V(AtomicStore)                \
   V(UnsafePointerAdd)
@@ -553,9 +573,6 @@
   V(Float32x4LessThanOrEqual)               \
   V(Float32x4GreaterThan)                   \
   V(Float32x4GreaterThanOrEqual)            \
-  V(Float32x4Select)                        \
-  V(Float32x4Swizzle)                       \
-  V(Float32x4Shuffle)                       \
   V(Float32x4FromInt32x4)                   \
   V(Float32x4FromUint32x4)                  \
   V(CreateInt32x4)                          \
@@ -574,9 +591,6 @@
   V(Int32x4LessThanOrEqual)                 \
   V(Int32x4GreaterThan)                     \
   V(Int32x4GreaterThanOrEqual)              \
-  V(Int32x4Select)                          \
-  V(Int32x4Swizzle)                         \
-  V(Int32x4Shuffle)                         \
   V(Int32x4FromFloat32x4)                   \
   V(Uint32x4Min)                            \
   V(Uint32x4Max)                            \
@@ -587,16 +601,10 @@
   V(Uint32x4GreaterThan)                    \
   V(Uint32x4GreaterThanOrEqual)             \
   V(Uint32x4FromFloat32x4)                  \
-  V(CreateBool32x4)                         \
-  V(Bool32x4ReplaceLane)                    \
   V(Bool32x4And)                            \
   V(Bool32x4Or)                             \
   V(Bool32x4Xor)                            \
   V(Bool32x4Not)                            \
-  V(Bool32x4Swizzle)                        \
-  V(Bool32x4Shuffle)                        \
-  V(Bool32x4Equal)                          \
-  V(Bool32x4NotEqual)                       \
   V(CreateInt16x8)                          \
   V(Int16x8ReplaceLane)                     \
   V(Int16x8Neg)                             \
@@ -615,9 +623,6 @@
   V(Int16x8LessThanOrEqual)                 \
   V(Int16x8GreaterThan)                     \
   V(Int16x8GreaterThanOrEqual)              \
-  V(Int16x8Select)                          \
-  V(Int16x8Swizzle)                         \
-  V(Int16x8Shuffle)                         \
   V(Uint16x8AddSaturate)                    \
   V(Uint16x8SubSaturate)                    \
   V(Uint16x8Min)                            \
@@ -628,16 +633,10 @@
   V(Uint16x8LessThanOrEqual)                \
   V(Uint16x8GreaterThan)                    \
   V(Uint16x8GreaterThanOrEqual)             \
-  V(CreateBool16x8)                         \
-  V(Bool16x8ReplaceLane)                    \
   V(Bool16x8And)                            \
   V(Bool16x8Or)                             \
   V(Bool16x8Xor)                            \
   V(Bool16x8Not)                            \
-  V(Bool16x8Swizzle)                        \
-  V(Bool16x8Shuffle)                        \
-  V(Bool16x8Equal)                          \
-  V(Bool16x8NotEqual)                       \
   V(CreateInt8x16)                          \
   V(Int8x16ReplaceLane)                     \
   V(Int8x16Neg)                             \
@@ -656,9 +655,6 @@
   V(Int8x16LessThanOrEqual)                 \
   V(Int8x16GreaterThan)                     \
   V(Int8x16GreaterThanOrEqual)              \
-  V(Int8x16Select)                          \
-  V(Int8x16Swizzle)                         \
-  V(Int8x16Shuffle)                         \
   V(Uint8x16AddSaturate)                    \
   V(Uint8x16SubSaturate)                    \
   V(Uint8x16Min)                            \
@@ -669,16 +665,23 @@
   V(Uint8x16LessThanOrEqual)                \
   V(Uint8x16GreaterThan)                    \
   V(Uint8x16GreaterThanOrEqual)             \
-  V(CreateBool8x16)                         \
-  V(Bool8x16ReplaceLane)                    \
   V(Bool8x16And)                            \
   V(Bool8x16Or)                             \
   V(Bool8x16Xor)                            \
   V(Bool8x16Not)                            \
-  V(Bool8x16Swizzle)                        \
-  V(Bool8x16Shuffle)                        \
-  V(Bool8x16Equal)                          \
-  V(Bool8x16NotEqual)
+  V(Simd128And)                             \
+  V(Simd128Or)                              \
+  V(Simd128Xor)                             \
+  V(Simd128Not)                             \
+  V(Simd32x4Select)                         \
+  V(Simd32x4Swizzle)                        \
+  V(Simd32x4Shuffle)                        \
+  V(Simd16x8Select)                         \
+  V(Simd16x8Swizzle)                        \
+  V(Simd16x8Shuffle)                        \
+  V(Simd8x16Select)                         \
+  V(Simd8x16Swizzle)                        \
+  V(Simd8x16Shuffle)
 
 #define MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
   V(Float32x4ExtractLane)                  \
@@ -687,13 +690,10 @@
   V(Int8x16ExtractLane)
 
 #define MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
-  V(Bool32x4ExtractLane)                    \
   V(Bool32x4AnyTrue)                        \
   V(Bool32x4AllTrue)                        \
-  V(Bool16x8ExtractLane)                    \
   V(Bool16x8AnyTrue)                        \
   V(Bool16x8AllTrue)                        \
-  V(Bool8x16ExtractLane)                    \
   V(Bool8x16AnyTrue)                        \
   V(Bool8x16AllTrue)
 
@@ -705,11 +705,7 @@
   V(Simd128Store)                       \
   V(Simd128Store1)                      \
   V(Simd128Store2)                      \
-  V(Simd128Store3)                      \
-  V(Simd128And)                         \
-  V(Simd128Or)                          \
-  V(Simd128Xor)                         \
-  V(Simd128Not)
+  V(Simd128Store3)
 
 #define MACHINE_SIMD_OP_LIST(V)       \
   MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
@@ -762,7 +758,7 @@
 
   // Returns true if opcode for JavaScript operator.
   static bool IsJsOpcode(Value value) {
-    return kJSEqual <= value && value <= kJSStackCheck;
+    return kJSEqual <= value && value <= kJSDebugger;
   }
 
   // Returns true if opcode for constant operator.
@@ -784,7 +780,7 @@
 
   // Returns true if opcode can be inlined.
   static bool IsInlineeOpcode(Value value) {
-    return value == kJSCallConstruct || value == kJSCallFunction;
+    return value == kJSConstruct || value == kJSCall;
   }
 
   // Returns true if opcode for comparison operator.
@@ -793,6 +789,10 @@
            (kNumberEqual <= value && value <= kStringLessThanOrEqual) ||
            (kWord32Equal <= value && value <= kFloat64LessThanOrEqual);
   }
+
+  static bool IsContextChainExtendingOpcode(Value value) {
+    return kJSCreateFunctionContext <= value && value <= kJSCreateScriptContext;
+  }
 };
 
 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IrOpcode::Value);
diff --git a/src/compiler/operation-typer.cc b/src/compiler/operation-typer.cc
index 9198f4b..dfd4c4b 100644
--- a/src/compiler/operation-typer.cc
+++ b/src/compiler/operation-typer.cc
@@ -366,8 +366,9 @@
 Type* OperationTyper::NumberFloor(Type* type) {
   DCHECK(type->Is(Type::Number()));
   if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
-  // TODO(bmeurer): We could infer a more precise type here.
-  return cache_.kIntegerOrMinusZeroOrNaN;
+  type = Type::Intersect(type, Type::MinusZeroOrNaN(), zone());
+  type = Type::Union(type, cache_.kInteger, zone());
+  return type;
 }
 
 Type* OperationTyper::NumberFround(Type* type) {
@@ -624,12 +625,19 @@
   }
 
   if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-  // Division is tricky, so all we do is try ruling out nan.
+  // Division is tricky, so all we do is try ruling out -0 and NaN.
+  bool maybe_minuszero = !lhs->Is(cache_.kPositiveIntegerOrNaN) ||
+                         !rhs->Is(cache_.kPositiveIntegerOrNaN);
   bool maybe_nan =
       lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
       ((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
        (rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
-  return maybe_nan ? Type::Number() : Type::OrderedNumber();
+
+  // Take into account the -0 and NaN information computed earlier.
+  Type* type = Type::PlainNumber();
+  if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+  if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+  return type;
 }
 
 Type* OperationTyper::NumberModulus(Type* lhs, Type* rhs) {
@@ -796,8 +804,35 @@
   DCHECK(lhs->Is(Type::Number()));
   DCHECK(rhs->Is(Type::Number()));
 
-  // TODO(turbofan): Infer a better type here.
-  return Type::Signed32();
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+  lhs = NumberToInt32(lhs);
+  rhs = NumberToUint32(rhs);
+
+  int32_t min_lhs = lhs->Min();
+  int32_t max_lhs = lhs->Max();
+  uint32_t min_rhs = rhs->Min();
+  uint32_t max_rhs = rhs->Max();
+  if (max_rhs > 31) {
+    // rhs can be larger than the bitmask
+    max_rhs = 31;
+    min_rhs = 0;
+  }
+
+  if (max_lhs > (kMaxInt >> max_rhs) || min_lhs < (kMinInt >> max_rhs)) {
+    // overflow possible
+    return Type::Signed32();
+  }
+
+  double min =
+      std::min(static_cast<int32_t>(static_cast<uint32_t>(min_lhs) << min_rhs),
+               static_cast<int32_t>(static_cast<uint32_t>(min_lhs) << max_rhs));
+  double max =
+      std::max(static_cast<int32_t>(static_cast<uint32_t>(max_lhs) << min_rhs),
+               static_cast<int32_t>(static_cast<uint32_t>(max_lhs) << max_rhs));
+
+  if (max == kMaxInt && min == kMinInt) return Type::Signed32();
+  return Type::Range(min, max, zone());
 }
 
 Type* OperationTyper::NumberShiftRight(Type* lhs, Type* rhs) {
@@ -809,33 +844,18 @@
   lhs = NumberToInt32(lhs);
   rhs = NumberToUint32(rhs);
 
-  double min = kMinInt;
-  double max = kMaxInt;
-  if (lhs->Min() >= 0) {
-    // Right-shifting a non-negative value cannot make it negative, nor larger.
-    min = std::max(min, 0.0);
-    max = std::min(max, lhs->Max());
-    if (rhs->Min() > 0 && rhs->Max() <= 31) {
-      max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
-    }
+  int32_t min_lhs = lhs->Min();
+  int32_t max_lhs = lhs->Max();
+  uint32_t min_rhs = rhs->Min();
+  uint32_t max_rhs = rhs->Max();
+  if (max_rhs > 31) {
+    // rhs can be larger than the bitmask
+    max_rhs = 31;
+    min_rhs = 0;
   }
-  if (lhs->Max() < 0) {
-    // Right-shifting a negative value cannot make it non-negative, nor smaller.
-    min = std::max(min, lhs->Min());
-    max = std::min(max, -1.0);
-    if (rhs->Min() > 0 && rhs->Max() <= 31) {
-      min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
-    }
-  }
-  if (rhs->Min() > 0 && rhs->Max() <= 31) {
-    // Right-shifting by a positive value yields a small integer value.
-    double shift_min = kMinInt >> static_cast<int>(rhs->Min());
-    double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
-    min = std::max(min, shift_min);
-    max = std::min(max, shift_max);
-  }
-  // TODO(jarin) Ideally, the following micro-optimization should be performed
-  // by the type constructor.
+  double min = std::min(min_lhs >> min_rhs, min_lhs >> max_rhs);
+  double max = std::max(max_lhs >> min_rhs, max_lhs >> max_rhs);
+
   if (max == kMaxInt && min == kMinInt) return Type::Signed32();
   return Type::Range(min, max, zone());
 }
@@ -844,12 +864,29 @@
   DCHECK(lhs->Is(Type::Number()));
   DCHECK(rhs->Is(Type::Number()));
 
-  if (!lhs->IsInhabited()) return Type::None();
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
 
   lhs = NumberToUint32(lhs);
+  rhs = NumberToUint32(rhs);
 
-  // Logical right-shifting any value cannot make it larger.
-  return Type::Range(0.0, lhs->Max(), zone());
+  uint32_t min_lhs = lhs->Min();
+  uint32_t max_lhs = lhs->Max();
+  uint32_t min_rhs = rhs->Min();
+  uint32_t max_rhs = rhs->Max();
+  if (max_rhs > 31) {
+    // rhs can be larger than the bitmask
+    max_rhs = 31;
+    min_rhs = 0;
+  }
+
+  double min = min_lhs >> max_rhs;
+  double max = max_lhs >> min_rhs;
+  DCHECK_LE(0, min);
+  DCHECK_LE(max, kMaxUInt32);
+
+  if (min == 0 && max == kMaxInt) return Type::Unsigned31();
+  if (min == 0 && max == kMaxUInt32) return Type::Unsigned32();
+  return Type::Range(min, max, zone());
 }
 
 Type* OperationTyper::NumberAtan2(Type* lhs, Type* rhs) {
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index 0a9e644..0d488d8 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -78,6 +78,8 @@
     case IrOpcode::kJSStoreProperty:
     case IrOpcode::kJSLoadGlobal:
     case IrOpcode::kJSStoreGlobal:
+    case IrOpcode::kJSStoreNamedOwn:
+    case IrOpcode::kJSStoreDataPropertyInLiteral:
     case IrOpcode::kJSDeleteProperty:
 
     // Context operations
@@ -92,14 +94,18 @@
     case IrOpcode::kJSToString:
 
     // Call operations
-    case IrOpcode::kJSCallConstruct:
-    case IrOpcode::kJSCallFunction:
+    case IrOpcode::kJSConstruct:
+    case IrOpcode::kJSConstructWithSpread:
+    case IrOpcode::kJSCallForwardVarargs:
+    case IrOpcode::kJSCall:
+    case IrOpcode::kJSCallWithSpread:
 
     // Misc operations
-    case IrOpcode::kJSConvertReceiver:
     case IrOpcode::kJSForInNext:
     case IrOpcode::kJSForInPrepare:
     case IrOpcode::kJSStackCheck:
+    case IrOpcode::kJSDebugger:
+    case IrOpcode::kJSGetSuperConstructor:
       return true;
 
     default:
diff --git a/src/compiler/osr.cc b/src/compiler/osr.cc
index a2dc430..687424b 100644
--- a/src/compiler/osr.cc
+++ b/src/compiler/osr.cc
@@ -268,28 +268,7 @@
     }
   }
 
-  OsrGuardType guard_type = OsrGuardType::kAny;
-  // Find the phi that uses the OsrGuard node and get the type from
-  // there. Skip the search if the OsrGuard does not have value use
-  // (i.e., if there is other use beyond the effect use).
-  if (OsrGuardTypeOf(osr_guard->op()) == OsrGuardType::kUninitialized &&
-      osr_guard->UseCount() > 1) {
-    Type* type = nullptr;
-    for (Node* use : osr_guard->uses()) {
-      if (use->opcode() == IrOpcode::kPhi) {
-        if (NodeProperties::GetControlInput(use) != loop) continue;
-        CHECK_NULL(type);
-        type = NodeProperties::GetType(use);
-      }
-    }
-    CHECK_NOT_NULL(type);
-
-    if (type->Is(Type::SignedSmall())) {
-      guard_type = OsrGuardType::kSignedSmall;
-    }
-  }
-
-  NodeProperties::ChangeOp(osr_guard, common->OsrGuard(guard_type));
+  NodeProperties::ChangeOp(osr_guard, common->OsrGuard(OsrGuardType::kAny));
 }
 
 }  // namespace
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 2614155..330b096 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -37,7 +37,6 @@
 #include "src/compiler/js-create-lowering.h"
 #include "src/compiler/js-frame-specialization.h"
 #include "src/compiler/js-generic-lowering.h"
-#include "src/compiler/js-global-object-specialization.h"
 #include "src/compiler/js-inlining-heuristic.h"
 #include "src/compiler/js-intrinsic-lowering.h"
 #include "src/compiler/js-native-context-specialization.h"
@@ -65,7 +64,6 @@
 #include "src/compiler/simplified-operator.h"
 #include "src/compiler/store-store-elimination.h"
 #include "src/compiler/tail-call-optimization.h"
-#include "src/compiler/type-hint-analyzer.h"
 #include "src/compiler/typed-optimization.h"
 #include "src/compiler/typer.h"
 #include "src/compiler/value-numbering-reducer.h"
@@ -75,6 +73,7 @@
 #include "src/ostreams.h"
 #include "src/parsing/parse-info.h"
 #include "src/register-configuration.h"
+#include "src/trap-handler/trap-handler.h"
 #include "src/type-info.h"
 #include "src/utils.h"
 
@@ -111,11 +110,37 @@
     javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
     jsgraph_ = new (graph_zone_)
         JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
+    is_asm_ = info->shared_info()->asm_function();
   }
 
   // For WASM compile entry point.
+  PipelineData(ZoneStats* zone_stats, CompilationInfo* info, JSGraph* jsgraph,
+               SourcePositionTable* source_positions,
+               ZoneVector<trap_handler::ProtectedInstructionData>*
+                   protected_instructions)
+      : isolate_(info->isolate()),
+        info_(info),
+        debug_name_(info_->GetDebugName()),
+        zone_stats_(zone_stats),
+        graph_zone_scope_(zone_stats_, ZONE_NAME),
+        graph_(jsgraph->graph()),
+        source_positions_(source_positions),
+        machine_(jsgraph->machine()),
+        common_(jsgraph->common()),
+        javascript_(jsgraph->javascript()),
+        jsgraph_(jsgraph),
+        instruction_zone_scope_(zone_stats_, ZONE_NAME),
+        instruction_zone_(instruction_zone_scope_.zone()),
+        register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+        register_allocation_zone_(register_allocation_zone_scope_.zone()),
+        protected_instructions_(protected_instructions) {
+    is_asm_ =
+        info->has_shared_info() ? info->shared_info()->asm_function() : false;
+  }
+
+  // For machine graph testing entry point.
   PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
-               SourcePositionTable* source_positions)
+               Schedule* schedule, SourcePositionTable* source_positions)
       : isolate_(info->isolate()),
         info_(info),
         debug_name_(info_->GetDebugName()),
@@ -123,27 +148,13 @@
         graph_zone_scope_(zone_stats_, ZONE_NAME),
         graph_(graph),
         source_positions_(source_positions),
-        instruction_zone_scope_(zone_stats_, ZONE_NAME),
-        instruction_zone_(instruction_zone_scope_.zone()),
-        register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
-        register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
-
-  // For machine graph testing entry point.
-  PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
-               Schedule* schedule)
-      : isolate_(info->isolate()),
-        info_(info),
-        debug_name_(info_->GetDebugName()),
-        zone_stats_(zone_stats),
-        graph_zone_scope_(zone_stats_, ZONE_NAME),
-        graph_(graph),
-        source_positions_(new (info->zone()) SourcePositionTable(graph_)),
         schedule_(schedule),
         instruction_zone_scope_(zone_stats_, ZONE_NAME),
         instruction_zone_(instruction_zone_scope_.zone()),
         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
-        register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
-
+        register_allocation_zone_(register_allocation_zone_scope_.zone()) {
+    is_asm_ = false;
+  }
   // For register allocation testing entry point.
   PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
                InstructionSequence* sequence)
@@ -156,7 +167,10 @@
         instruction_zone_(sequence->zone()),
         sequence_(sequence),
         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
-        register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+        register_allocation_zone_(register_allocation_zone_scope_.zone()) {
+    is_asm_ =
+        info->has_shared_info() ? info->shared_info()->asm_function() : false;
+  }
 
   ~PipelineData() {
     DeleteRegisterAllocationZone();
@@ -170,6 +184,11 @@
   PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
   bool compilation_failed() const { return compilation_failed_; }
   void set_compilation_failed() { compilation_failed_ = true; }
+
+  bool is_asm() const { return is_asm_; }
+  bool verify_graph() const { return verify_graph_; }
+  void set_verify_graph(bool value) { verify_graph_ = value; }
+
   Handle<Code> code() { return code_; }
   void set_code(Handle<Code> code) {
     DCHECK(code_.is_null());
@@ -199,12 +218,6 @@
     loop_assignment_ = loop_assignment;
   }
 
-  TypeHintAnalysis* type_hint_analysis() const { return type_hint_analysis_; }
-  void set_type_hint_analysis(TypeHintAnalysis* type_hint_analysis) {
-    DCHECK_NULL(type_hint_analysis_);
-    type_hint_analysis_ = type_hint_analysis;
-  }
-
   Schedule* schedule() const { return schedule_; }
   void set_schedule(Schedule* schedule) {
     DCHECK(!schedule_);
@@ -233,6 +246,11 @@
     source_position_output_ = source_position_output;
   }
 
+  ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions()
+      const {
+    return protected_instructions_;
+  }
+
   void DeleteGraphZone() {
     if (graph_zone_ == nullptr) return;
     graph_zone_scope_.Destroy();
@@ -240,7 +258,6 @@
     graph_ = nullptr;
     source_positions_ = nullptr;
     loop_assignment_ = nullptr;
-    type_hint_analysis_ = nullptr;
     simplified_ = nullptr;
     machine_ = nullptr;
     common_ = nullptr;
@@ -293,7 +310,7 @@
     DCHECK(register_allocation_data_ == nullptr);
     register_allocation_data_ = new (register_allocation_zone())
         RegisterAllocationData(config, register_allocation_zone(), frame(),
-                               sequence(), debug_name_.get());
+                               sequence(), debug_name());
   }
 
   void BeginPhaseKind(const char* phase_kind_name) {
@@ -308,6 +325,8 @@
     }
   }
 
+  const char* debug_name() const { return debug_name_.get(); }
+
  private:
   Isolate* const isolate_;
   CompilationInfo* const info_;
@@ -316,6 +335,8 @@
   ZoneStats* const zone_stats_;
   PipelineStatistics* pipeline_statistics_ = nullptr;
   bool compilation_failed_ = false;
+  bool verify_graph_ = false;
+  bool is_asm_ = false;
   Handle<Code> code_ = Handle<Code>::null();
 
   // All objects in the following group of fields are allocated in graph_zone_.
@@ -325,7 +346,6 @@
   Graph* graph_ = nullptr;
   SourcePositionTable* source_positions_ = nullptr;
   LoopAssignmentAnalysis* loop_assignment_ = nullptr;
-  TypeHintAnalysis* type_hint_analysis_ = nullptr;
   SimplifiedOperatorBuilder* simplified_ = nullptr;
   MachineOperatorBuilder* machine_ = nullptr;
   CommonOperatorBuilder* common_ = nullptr;
@@ -355,6 +375,9 @@
   // Source position output for --trace-turbo.
   std::string source_position_output_;
 
+  ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions_ =
+      nullptr;
+
   DISALLOW_COPY_AND_ASSIGN(PipelineData);
 };
 
@@ -522,14 +545,13 @@
 
 class PipelineCompilationJob final : public CompilationJob {
  public:
-  PipelineCompilationJob(Isolate* isolate, Handle<JSFunction> function)
+  PipelineCompilationJob(ParseInfo* parse_info, Handle<JSFunction> function)
       // Note that the CompilationInfo is not initialized at the time we pass it
       // to the CompilationJob constructor, but it is not dereferenced there.
-      : CompilationJob(isolate, &info_, "TurboFan"),
-        zone_(isolate->allocator(), ZONE_NAME),
-        zone_stats_(isolate->allocator()),
-        parse_info_(&zone_, handle(function->shared())),
-        info_(&parse_info_, function),
+      : CompilationJob(parse_info->isolate(), &info_, "TurboFan"),
+        parse_info_(parse_info),
+        zone_stats_(parse_info->isolate()->allocator()),
+        info_(parse_info_.get()->zone(), parse_info_.get(), function),
         pipeline_statistics_(CreatePipelineStatistics(info(), &zone_stats_)),
         data_(&zone_stats_, info(), pipeline_statistics_.get()),
         pipeline_(&data_),
@@ -541,9 +563,8 @@
   Status FinalizeJobImpl() final;
 
  private:
-  Zone zone_;
+  std::unique_ptr<ParseInfo> parse_info_;
   ZoneStats zone_stats_;
-  ParseInfo parse_info_;
   CompilationInfo info_;
   std::unique_ptr<PipelineStatistics> pipeline_statistics_;
   PipelineData data_;
@@ -555,30 +576,37 @@
 
 PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
   if (info()->shared_info()->asm_function()) {
-    if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
+    if (info()->osr_frame() && !info()->is_optimizing_from_bytecode()) {
+      info()->MarkAsFrameSpecializing();
+    }
     info()->MarkAsFunctionContextSpecializing();
   } else {
     if (!FLAG_always_opt) {
       info()->MarkAsBailoutOnUninitialized();
     }
-    if (FLAG_turbo_inlining) {
-      info()->MarkAsInliningEnabled();
+    if (FLAG_turbo_loop_peeling) {
+      info()->MarkAsLoopPeelingEnabled();
     }
   }
-  if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
+  if (info()->is_optimizing_from_bytecode() ||
+      !info()->shared_info()->asm_function()) {
     info()->MarkAsDeoptimizationEnabled();
     if (FLAG_inline_accessors) {
       info()->MarkAsAccessorInliningEnabled();
     }
+    if (info()->closure()->feedback_vector_cell()->map() ==
+        isolate()->heap()->one_closure_cell_map()) {
+      info()->MarkAsFunctionContextSpecializing();
+    }
   }
   if (!info()->is_optimizing_from_bytecode()) {
-    if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
-      info()->MarkAsTypeFeedbackEnabled();
-    }
     if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
+  } else if (FLAG_turbo_inlining) {
+    info()->MarkAsInliningEnabled();
   }
 
-  linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
+  linkage_ = new (info()->zone())
+      Linkage(Linkage::ComputeIncoming(info()->zone(), info()));
 
   if (!pipeline_.CreateGraph()) {
     if (isolate()->has_pending_exception()) return FAILED;  // Stack overflowed.
@@ -612,15 +640,18 @@
 
 class PipelineWasmCompilationJob final : public CompilationJob {
  public:
-  explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
-                                      CallDescriptor* descriptor,
-                                      SourcePositionTable* source_positions)
+  explicit PipelineWasmCompilationJob(
+      CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+      SourcePositionTable* source_positions,
+      ZoneVector<trap_handler::ProtectedInstructionData>* protected_insts,
+      bool allow_signalling_nan)
       : CompilationJob(info->isolate(), info, "TurboFan",
                        State::kReadyToExecute),
         zone_stats_(info->isolate()->allocator()),
-        data_(&zone_stats_, info, graph, source_positions),
+        data_(&zone_stats_, info, jsgraph, source_positions, protected_insts),
         pipeline_(&data_),
-        linkage_(descriptor) {}
+        linkage_(descriptor),
+        allow_signalling_nan_(allow_signalling_nan) {}
 
  protected:
   Status PrepareJobImpl() final;
@@ -632,6 +663,7 @@
   PipelineData data_;
   PipelineImpl pipeline_;
   Linkage linkage_;
+  bool allow_signalling_nan_;
 };
 
 PipelineWasmCompilationJob::Status
@@ -649,6 +681,24 @@
   }
 
   pipeline_.RunPrintAndVerify("Machine", true);
+  if (FLAG_wasm_opt) {
+    PipelineData* data = &data_;
+    PipelineRunScope scope(data, "WASM optimization");
+    JSGraphReducer graph_reducer(data->jsgraph(), scope.zone());
+    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+                                              data->common());
+    ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
+    MachineOperatorReducer machine_reducer(data->jsgraph(),
+                                           allow_signalling_nan_);
+    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+                                         data->common(), data->machine());
+    AddReducer(data, &graph_reducer, &dead_code_elimination);
+    AddReducer(data, &graph_reducer, &value_numbering);
+    AddReducer(data, &graph_reducer, &machine_reducer);
+    AddReducer(data, &graph_reducer, &common_reducer);
+    graph_reducer.ReduceGraph();
+    pipeline_.RunPrintAndVerify("Optimized Machine", true);
+  }
 
   if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
   return SUCCEEDED;
@@ -694,20 +744,6 @@
 };
 
 
-struct TypeHintAnalysisPhase {
-  static const char* phase_name() { return "type hint analysis"; }
-
-  void Run(PipelineData* data, Zone* temp_zone) {
-    if (data->info()->is_type_feedback_enabled()) {
-      TypeHintAnalyzer analyzer(data->graph_zone());
-      Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
-      TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
-      data->set_type_hint_analysis(type_hint_analysis);
-    }
-  }
-};
-
-
 struct GraphBuilderPhase {
   static const char* phase_name() { return "graph builder"; }
 
@@ -715,15 +751,18 @@
     bool succeeded = false;
 
     if (data->info()->is_optimizing_from_bytecode()) {
-      BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
-                                         data->jsgraph(), 1.0f,
-                                         data->source_positions());
+      // Bytecode graph builder assumes deoptimziation is enabled.
+      DCHECK(data->info()->is_deoptimization_enabled());
+      BytecodeGraphBuilder graph_builder(
+          temp_zone, data->info()->shared_info(),
+          handle(data->info()->closure()->feedback_vector()),
+          data->info()->osr_ast_id(), data->jsgraph(), 1.0f,
+          data->source_positions());
       succeeded = graph_builder.CreateGraph();
     } else {
       AstGraphBuilderWithPositions graph_builder(
           temp_zone, data->info(), data->jsgraph(), 1.0f,
-          data->loop_assignment(), data->type_hint_analysis(),
-          data->source_positions());
+          data->loop_assignment(), data->source_positions());
       succeeded = graph_builder.CreateGraph();
     }
 
@@ -741,17 +780,16 @@
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
+    CheckpointElimination checkpoint_elimination(&graph_reducer);
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
     JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
-    if (data->info()->is_bailout_on_uninitialized()) {
-      call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
-    }
     if (data->info()->is_deoptimization_enabled()) {
       call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
     }
     JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
-                               call_reducer_flags, data->native_context());
+                               call_reducer_flags, data->native_context(),
+                               data->info()->dependencies());
     JSContextSpecialization context_specialization(
         &graph_reducer, data->jsgraph(),
         data->info()->is_function_context_specializing()
@@ -759,9 +797,6 @@
             : MaybeHandle<Context>());
     JSFrameSpecialization frame_specialization(
         &graph_reducer, data->info()->osr_frame(), data->jsgraph());
-    JSGlobalObjectSpecialization global_object_specialization(
-        &graph_reducer, data->jsgraph(), data->global_object(),
-        data->info()->dependencies());
     JSNativeContextSpecialization::Flags flags =
         JSNativeContextSpecialization::kNoFlags;
     if (data->info()->is_accessor_inlining_enabled()) {
@@ -787,13 +822,11 @@
             ? JSIntrinsicLowering::kDeoptimizationEnabled
             : JSIntrinsicLowering::kDeoptimizationDisabled);
     AddReducer(data, &graph_reducer, &dead_code_elimination);
+    AddReducer(data, &graph_reducer, &checkpoint_elimination);
     AddReducer(data, &graph_reducer, &common_reducer);
     if (data->info()->is_frame_specializing()) {
       AddReducer(data, &graph_reducer, &frame_specialization);
     }
-    if (data->info()->is_deoptimization_enabled()) {
-      AddReducer(data, &graph_reducer, &global_object_specialization);
-    }
     AddReducer(data, &graph_reducer, &native_context_specialization);
     AddReducer(data, &graph_reducer, &context_specialization);
     AddReducer(data, &graph_reducer, &intrinsic_lowering);
@@ -817,21 +850,6 @@
   }
 };
 
-struct OsrTyperPhase {
-  static const char* phase_name() { return "osr typer"; }
-
-  void Run(PipelineData* data, Zone* temp_zone) {
-    NodeVector roots(temp_zone);
-    data->jsgraph()->GetCachedNodes(&roots);
-    // Dummy induction variable optimizer: at the moment, we do not try
-    // to compute loop variable bounds on OSR.
-    LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
-                                         data->common(), temp_zone);
-    Typer typer(data->isolate(), Typer::kNoFlags, data->graph());
-    typer.Run(roots, &induction_vars);
-  }
-};
-
 struct UntyperPhase {
   static const char* phase_name() { return "untyper"; }
 
@@ -888,10 +906,11 @@
             ? JSBuiltinReducer::kDeoptimizationEnabled
             : JSBuiltinReducer::kNoFlags,
         data->info()->dependencies(), data->native_context());
-    Handle<LiteralsArray> literals_array(data->info()->closure()->literals());
+    Handle<FeedbackVector> feedback_vector(
+        data->info()->closure()->feedback_vector());
     JSCreateLowering create_lowering(
         &graph_reducer, data->info()->dependencies(), data->jsgraph(),
-        literals_array, data->native_context(), temp_zone);
+        feedback_vector, data->native_context(), temp_zone);
     JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
     if (data->info()->is_deoptimization_enabled()) {
       typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
@@ -930,7 +949,7 @@
   void Run(PipelineData* data, Zone* temp_zone) {
     EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
                                    temp_zone);
-    escape_analysis.Run();
+    if (!escape_analysis.Run()) return;
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
                                          &escape_analysis, temp_zone);
@@ -944,8 +963,8 @@
   }
 };
 
-struct RepresentationSelectionPhase {
-  static const char* phase_name() { return "representation selection"; }
+struct SimplifiedLoweringPhase {
+  static const char* phase_name() { return "simplified lowering"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
@@ -978,6 +997,23 @@
   }
 };
 
+struct ConcurrentOptimizationPrepPhase {
+  static const char* phase_name() {
+    return "concurrent optimization preparation";
+  }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    // Make sure we cache these code stubs.
+    data->jsgraph()->CEntryStubConstant(1);
+    data->jsgraph()->CEntryStubConstant(2);
+    data->jsgraph()->CEntryStubConstant(3);
+
+    // This is needed for escape analysis.
+    NodeProperties::SetType(data->jsgraph()->FalseConstant(), Type::Boolean());
+    NodeProperties::SetType(data->jsgraph()->TrueConstant(), Type::Boolean());
+  }
+};
+
 struct GenericLoweringPhase {
   static const char* phase_name() { return "generic lowering"; }
 
@@ -1178,21 +1214,6 @@
 };
 
 
-struct StressLoopPeelingPhase {
-  static const char* phase_name() { return "stress loop peeling"; }
-
-  void Run(PipelineData* data, Zone* temp_zone) {
-    // Peel the first outer loop for testing.
-    // TODO(titzer): peel all loops? the N'th loop? Innermost loops?
-    LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
-    if (loop_tree != nullptr && loop_tree->outer_loops().size() > 0) {
-      LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
-                       loop_tree->outer_loops()[0], temp_zone);
-    }
-  }
-};
-
-
 struct ComputeSchedulePhase {
   static const char* phase_name() { return "scheduling"; }
 
@@ -1475,8 +1496,6 @@
     Run<LoopAssignmentAnalysisPhase>();
   }
 
-  Run<TypeHintAnalysisPhase>();
-
   Run<GraphBuilderPhase>();
   if (data->compilation_failed()) {
     data->EndPhaseKind();
@@ -1486,8 +1505,6 @@
 
   // Perform OSR deconstruction.
   if (info()->is_osr()) {
-    Run<OsrTyperPhase>();
-
     Run<OsrDeconstructionPhase>();
 
     Run<UntyperPhase>();
@@ -1512,7 +1529,7 @@
     // Determine the Typer operation flags.
     Typer::Flags flags = Typer::kNoFlags;
     if (is_sloppy(info()->shared_info()->language_mode()) &&
-        !info()->shared_info()->IsBuiltin()) {
+        info()->shared_info()->IsUserJavaScript()) {
       // Sloppy mode functions always have an Object for this.
       flags |= Typer::kThisIsReceiver;
     }
@@ -1533,43 +1550,50 @@
     // Lower JSOperators where we can determine types.
     Run<TypedLoweringPhase>();
     RunPrintAndVerify("Lowered typed");
+  }
 
-    if (FLAG_turbo_loop_peeling) {
-      Run<LoopPeelingPhase>();
-      RunPrintAndVerify("Loops peeled", true);
-    } else {
-      Run<LoopExitEliminationPhase>();
-      RunPrintAndVerify("Loop exits eliminated", true);
+  // Do some hacky things to prepare for the optimization phase.
+  // (caching handles, etc.).
+  Run<ConcurrentOptimizationPrepPhase>();
+
+  data->EndPhaseKind();
+
+  return true;
+}
+
+bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
+  PipelineData* data = this->data_;
+
+  if (data->info()->is_loop_peeling_enabled()) {
+    Run<LoopPeelingPhase>();
+    RunPrintAndVerify("Loops peeled", true);
+  } else {
+    Run<LoopExitEliminationPhase>();
+    RunPrintAndVerify("Loop exits eliminated", true);
+  }
+
+  if (!data->is_asm()) {
+    if (FLAG_turbo_load_elimination) {
+      Run<LoadEliminationPhase>();
+      RunPrintAndVerify("Load eliminated");
     }
 
-    if (FLAG_turbo_stress_loop_peeling) {
-      Run<StressLoopPeelingPhase>();
-      RunPrintAndVerify("Loop peeled");
-    }
-
-    if (!info()->shared_info()->asm_function()) {
-      if (FLAG_turbo_load_elimination) {
-        Run<LoadEliminationPhase>();
-        RunPrintAndVerify("Load eliminated");
+    if (FLAG_turbo_escape) {
+      Run<EscapeAnalysisPhase>();
+      if (data->compilation_failed()) {
+        info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
+        data->EndPhaseKind();
+        return false;
       }
-
-      if (FLAG_turbo_escape) {
-        Run<EscapeAnalysisPhase>();
-        if (data->compilation_failed()) {
-          info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
-          data->EndPhaseKind();
-          return false;
-        }
-        RunPrintAndVerify("Escape Analysed");
-      }
+      RunPrintAndVerify("Escape Analysed");
     }
   }
 
-  // Select representations. This has to run w/o the Typer decorator, because
-  // we cannot compute meaningful types anyways, and the computed types might
-  // even conflict with the representation/truncation logic.
-  Run<RepresentationSelectionPhase>();
-  RunPrintAndVerify("Representations selected", true);
+  // Perform simplified lowering. This has to run w/o the Typer decorator,
+  // because we cannot compute meaningful types anyways, and the computed types
+  // might even conflict with the representation/truncation logic.
+  Run<SimplifiedLoweringPhase>();
+  RunPrintAndVerify("Simplified lowering", true);
 
 #ifdef DEBUG
   // From now on it is invalid to look at types on the nodes, because:
@@ -1592,14 +1616,6 @@
   Run<GenericLoweringPhase>();
   RunPrintAndVerify("Generic lowering", true);
 
-  data->EndPhaseKind();
-
-  return true;
-}
-
-bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
-  PipelineData* data = this->data_;
-
   data->BeginPhaseKind("block building");
 
   // Run early optimization pass.
@@ -1648,7 +1664,9 @@
 
   // Construct a pipeline for scheduling and code generation.
   ZoneStats zone_stats(isolate->allocator());
-  PipelineData data(&zone_stats, &info, graph, schedule);
+  SourcePositionTable source_positions(graph);
+  PipelineData data(&zone_stats, &info, graph, schedule, &source_positions);
+  data.set_verify_graph(FLAG_verify_csa);
   std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
     pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
@@ -1660,6 +1678,12 @@
 
   if (FLAG_trace_turbo) {
     {
+      CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+      OFStream os(tracing_scope.file());
+      os << "---------------------------------------------------\n"
+         << "Begin compiling " << debug_name << " using Turbofan" << std::endl;
+    }
+    {
       TurboJsonFile json_of(&info, std::ios_base::trunc);
       json_of << "{\"function\":\"" << info.GetDebugName().get()
               << "\", \"source\":\"\",\n\"phases\":[";
@@ -1696,13 +1720,16 @@
 }
 
 // static
-Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
-                                              CallDescriptor* call_descriptor,
-                                              Graph* graph,
-                                              Schedule* schedule) {
+Handle<Code> Pipeline::GenerateCodeForTesting(
+    CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
+    Schedule* schedule, SourcePositionTable* source_positions) {
   // Construct a pipeline for scheduling and code generation.
   ZoneStats zone_stats(info->isolate()->allocator());
-  PipelineData data(&zone_stats, info, graph, schedule);
+  // TODO(wasm): Refactor code generation to check for non-existing source
+  // table, then remove this conditional allocation.
+  if (!source_positions)
+    source_positions = new (info->zone()) SourcePositionTable(graph);
+  PipelineData data(&zone_stats, info, graph, schedule, source_positions);
   std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
     pipeline_statistics.reset(new PipelineStatistics(info, &zone_stats));
@@ -1723,16 +1750,27 @@
 }
 
 // static
-CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function) {
-  return new PipelineCompilationJob(function->GetIsolate(), function);
+CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function,
+                                            bool has_script) {
+  Handle<SharedFunctionInfo> shared = handle(function->shared());
+  ParseInfo* parse_info;
+  if (!has_script) {
+    parse_info = ParseInfo::AllocateWithoutScript(shared);
+  } else {
+    parse_info = new ParseInfo(shared);
+  }
+  return new PipelineCompilationJob(parse_info, function);
 }
 
 // static
 CompilationJob* Pipeline::NewWasmCompilationJob(
-    CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
-    SourcePositionTable* source_positions) {
-  return new PipelineWasmCompilationJob(info, graph, descriptor,
-                                        source_positions);
+    CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+    SourcePositionTable* source_positions,
+    ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions,
+    bool allow_signalling_nan) {
+  return new PipelineWasmCompilationJob(
+      info, jsgraph, descriptor, source_positions, protected_instructions,
+      allow_signalling_nan);
 }
 
 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -1767,12 +1805,27 @@
         info(), data->graph(), data->schedule()));
   }
 
-  if (FLAG_turbo_verify_machine_graph != nullptr &&
-      (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
-       !strcmp(FLAG_turbo_verify_machine_graph,
-               data->info()->GetDebugName().get()))) {
+  bool verify_stub_graph = data->verify_graph();
+  if (verify_stub_graph ||
+      (FLAG_turbo_verify_machine_graph != nullptr &&
+       (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
+        !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
+    if (FLAG_trace_verify_csa) {
+      AllowHandleDereference allow_deref;
+      CompilationInfo* info = data->info();
+      CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+      OFStream os(tracing_scope.file());
+      os << "--------------------------------------------------\n"
+         << "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
+         << "--------------------------------------------------\n"
+         << *data->schedule()
+         << "--------------------------------------------------\n"
+         << "--- End of " << data->debug_name() << " generated by TurboFan\n"
+         << "--------------------------------------------------\n";
+    }
     Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
     MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
+                              data->info()->IsStub(), data->debug_name(),
                               &temp_zone);
   }
 
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index 0c0a57b..624ef01 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -9,6 +9,7 @@
 // Do not include anything from src/compiler here!
 #include "src/globals.h"
 #include "src/objects.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -17,9 +18,14 @@
 class CompilationJob;
 class RegisterConfiguration;
 
+namespace trap_handler {
+struct ProtectedInstructionData;
+}  // namespace trap_handler
+
 namespace compiler {
 
 class CallDescriptor;
+class JSGraph;
 class Graph;
 class InstructionSequence;
 class Schedule;
@@ -28,12 +34,16 @@
 class Pipeline : public AllStatic {
  public:
   // Returns a new compilation job for the given function.
-  static CompilationJob* NewCompilationJob(Handle<JSFunction> function);
+  static CompilationJob* NewCompilationJob(Handle<JSFunction> function,
+                                           bool has_script);
 
   // Returns a new compilation job for the WebAssembly compilation info.
   static CompilationJob* NewWasmCompilationJob(
-      CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
-      SourcePositionTable* source_positions);
+      CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+      SourcePositionTable* source_positions,
+      ZoneVector<trap_handler::ProtectedInstructionData>*
+          protected_instructions,
+      bool wasm_origin);
 
   // Run the pipeline on a machine graph and generate code. The {schedule} must
   // be valid, hence the given {graph} does not need to be schedulable.
@@ -60,10 +70,10 @@
 
   // Run the pipeline on a machine graph and generate code. If {schedule} is
   // {nullptr}, then compute a new schedule for code generation.
-  static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
-                                             CallDescriptor* call_descriptor,
-                                             Graph* graph,
-                                             Schedule* schedule = nullptr);
+  static Handle<Code> GenerateCodeForTesting(
+      CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
+      Schedule* schedule = nullptr,
+      SourcePositionTable* source_positions = nullptr);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index a838ede..455b0ae 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -34,6 +34,7 @@
       case kFlags_branch:
       case kFlags_deoptimize:
       case kFlags_set:
+      case kFlags_trap:
         return SetRC;
       case kFlags_none:
         return LeaveRC;
@@ -263,7 +264,8 @@
       // Overflow checked for add/sub only.
       switch (op) {
 #if V8_TARGET_ARCH_PPC64
-        case kPPC_Add:
+        case kPPC_Add32:
+        case kPPC_Add64:
         case kPPC_Sub:
 #endif
         case kPPC_AddWithOverflow32:
@@ -276,7 +278,8 @@
     case kNotOverflow:
       switch (op) {
 #if V8_TARGET_ARCH_PPC64
-        case kPPC_Add:
+        case kPPC_Add32:
+        case kPPC_Add64:
         case kPPC_Sub:
 #endif
         case kPPC_AddWithOverflow32:
@@ -761,36 +764,33 @@
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
   } while (0)
 
-#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx)   \
-  do {                                                        \
-    Label done;                                               \
-    Register result = i.OutputRegister();                     \
-    AddressingMode mode = kMode_None;                         \
-    MemOperand operand = i.MemoryOperand(&mode);              \
-    __ sync();                                                \
-    if (mode == kMode_MRI) {                                  \
-    __ asm_instr(result, operand);                            \
-    } else {                                                  \
-    __ asm_instrx(result, operand);                           \
-    }                                                         \
-    __ bind(&done);                                           \
-    __ cmp(result, result);                                   \
-    __ bne(&done);                                            \
-    __ isync();                                               \
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
+  do {                                                      \
+    Label done;                                             \
+    Register result = i.OutputRegister();                   \
+    AddressingMode mode = kMode_None;                       \
+    MemOperand operand = i.MemoryOperand(&mode);            \
+    if (mode == kMode_MRI) {                                \
+      __ asm_instr(result, operand);                        \
+    } else {                                                \
+      __ asm_instrx(result, operand);                       \
+    }                                                       \
+    __ lwsync();                                            \
   } while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx)  \
-  do {                                                        \
-    size_t index = 0;                                         \
-    AddressingMode mode = kMode_None;                         \
-    MemOperand operand = i.MemoryOperand(&mode, &index);      \
-    Register value = i.InputRegister(index);                  \
-    __ sync();                                                \
-    if (mode == kMode_MRI) {                                  \
-      __ asm_instr(value, operand);                           \
-    } else {                                                  \
-      __ asm_instrx(value, operand);                          \
-    }                                                         \
-    DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
+  do {                                                       \
+    size_t index = 0;                                        \
+    AddressingMode mode = kMode_None;                        \
+    MemOperand operand = i.MemoryOperand(&mode, &index);     \
+    Register value = i.InputRegister(index);                 \
+    __ lwsync();                                             \
+    if (mode == kMode_MRI) {                                 \
+      __ asm_instr(value, operand);                          \
+    } else {                                                 \
+      __ asm_instrx(value, operand);                         \
+    }                                                        \
+    __ sync();                                               \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                     \
   } while (0)
 
 void CodeGenerator::AssembleDeconstructFrame() {
@@ -813,7 +813,8 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ cmpi(scratch1,
+          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ bne(&done);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -1082,10 +1083,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1322,7 +1321,7 @@
                 63 - i.InputInt32(2), i.OutputRCBit());
       break;
 #endif
-    case kPPC_Add:
+    case kPPC_Add32:
 #if V8_TARGET_ARCH_PPC64
       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
         ASSEMBLE_ADD_WITH_OVERFLOW();
@@ -1335,10 +1334,26 @@
           __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
           DCHECK_EQ(LeaveRC, i.OutputRCBit());
         }
+        __ extsw(i.OutputRegister(), i.OutputRegister());
 #if V8_TARGET_ARCH_PPC64
       }
 #endif
       break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_Add64:
+      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+        ASSEMBLE_ADD_WITH_OVERFLOW();
+      } else {
+        if (HasRegisterInput(instr, 1)) {
+          __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+                 LeaveOE, i.OutputRCBit());
+        } else {
+          __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+          DCHECK_EQ(LeaveRC, i.OutputRCBit());
+        }
+      }
+      break;
+#endif
     case kPPC_AddWithOverflow32:
       ASSEMBLE_ADD_WITH_OVERFLOW32();
       break;
@@ -1431,19 +1446,35 @@
       ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Mod32:
-      ASSEMBLE_MODULO(divw, mullw);
+      if (CpuFeatures::IsSupported(MODULO)) {
+        __ modsw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        ASSEMBLE_MODULO(divw, mullw);
+      }
       break;
 #if V8_TARGET_ARCH_PPC64
     case kPPC_Mod64:
-      ASSEMBLE_MODULO(divd, mulld);
+      if (CpuFeatures::IsSupported(MODULO)) {
+        __ modsd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        ASSEMBLE_MODULO(divd, mulld);
+      }
       break;
 #endif
     case kPPC_ModU32:
-      ASSEMBLE_MODULO(divwu, mullw);
+      if (CpuFeatures::IsSupported(MODULO)) {
+        __ moduw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        ASSEMBLE_MODULO(divwu, mullw);
+      }
       break;
 #if V8_TARGET_ARCH_PPC64
     case kPPC_ModU64:
-      ASSEMBLE_MODULO(divdu, mulld);
+      if (CpuFeatures::IsSupported(MODULO)) {
+        __ modud(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        ASSEMBLE_MODULO(divdu, mulld);
+      }
       break;
 #endif
     case kPPC_ModDouble:
@@ -1984,6 +2015,84 @@
   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      PPCOperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED, true);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+
+  ArchOpcode op = instr->arch_opcode();
+  CRegister cr = cr0;
+  Condition cond = FlagsConditionToCondition(condition, op);
+  if (op == kPPC_CmpDouble) {
+    // check for unordered if necessary
+    if (cond == le) {
+      __ bunordered(&end, cr);
+      // Unnecessary for eq/lt since only FU bit will be set.
+    } else if (cond == gt) {
+      __ bunordered(tlabel, cr);
+      // Unnecessary for ne/ge since only FU bit will be set.
+    }
+  }
+  __ b(cond, tlabel, cr);
+  __ bind(&end);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2072,16 +2181,19 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2257,11 +2369,9 @@
       switch (src.type()) {
         case Constant::kInt32:
 #if V8_TARGET_ARCH_PPC64
-          if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmSizeReference(src.rmode())) {
 #else
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmReference(src.rmode())) {
 #endif
             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
@@ -2270,11 +2380,10 @@
           break;
         case Constant::kInt64:
 #if V8_TARGET_ARCH_PPC64
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
           } else {
-            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+            DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
 #endif
             __ mov(dst, Operand(src.ToInt64()));
 #if V8_TARGET_ARCH_PPC64
@@ -2313,8 +2422,23 @@
       DoubleRegister dst = destination->IsFPRegister()
                                ? g.ToDoubleRegister(destination)
                                : kScratchDoubleReg;
-      double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
-                                                        : src.ToFloat64();
+      double value;
+// bit_cast of snan is converted to qnan on ia32/x64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+      intptr_t valueInt = (src.type() == Constant::kFloat32)
+                              ? src.ToFloat32AsInt()
+                              : src.ToFloat64AsInt();
+      if (valueInt == ((src.type() == Constant::kFloat32)
+                           ? 0x7fa00000
+                           : 0x7fa0000000000000)) {
+        value = bit_cast<double, int64_t>(0x7ff4000000000000L);
+      } else {
+#endif
+        value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+                                                   : src.ToFloat64();
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+      }
+#endif
       __ LoadDoubleLiteral(dst, value, kScratchReg);
       if (destination->IsFPStackSlot()) {
         __ StoreDouble(dst, g.ToMemOperand(destination), r0);
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index 9198bcb..f68ab3a 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -33,7 +33,8 @@
   V(PPC_RotLeftAndClear64)         \
   V(PPC_RotLeftAndClearLeft64)     \
   V(PPC_RotLeftAndClearRight64)    \
-  V(PPC_Add)                       \
+  V(PPC_Add32)                     \
+  V(PPC_Add64)                     \
   V(PPC_AddWithOverflow32)         \
   V(PPC_AddPair)                   \
   V(PPC_AddDouble)                 \
@@ -42,7 +43,7 @@
   V(PPC_SubPair)                   \
   V(PPC_SubDouble)                 \
   V(PPC_Mul32)                     \
-  V(PPC_Mul32WithHigh32)          \
+  V(PPC_Mul32WithHigh32)           \
   V(PPC_Mul64)                     \
   V(PPC_MulHigh32)                 \
   V(PPC_MulHighU32)                \
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index dee8494..640a7e4 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -35,7 +35,8 @@
     case kPPC_RotLeftAndClear64:
     case kPPC_RotLeftAndClearLeft64:
     case kPPC_RotLeftAndClearRight64:
-    case kPPC_Add:
+    case kPPC_Add32:
+    case kPPC_Add64:
     case kPPC_AddWithOverflow32:
     case kPPC_AddPair:
     case kPPC_AddDouble:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index 768b188..c2770b3 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -154,7 +154,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -213,6 +216,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -322,6 +328,9 @@
       case MachineRepresentation::kWord64:  // Fall through.
 #endif
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -339,6 +348,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -381,6 +395,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -429,6 +446,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -836,7 +856,7 @@
 }
 
 void InstructionSelector::VisitInt32PairAdd(Node* node) {
-  VisitPairBinop(this, kPPC_AddPair, kPPC_Add, node);
+  VisitPairBinop(this, kPPC_AddPair, kPPC_Add32, node);
 }
 
 void InstructionSelector::VisitInt32PairSub(Node* node) {
@@ -1013,13 +1033,13 @@
 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitInt32Add(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+  VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitInt64Add(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
 }
 #endif
 
@@ -1481,11 +1501,11 @@
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
+    return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm,
                                          &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm, &cont);
+  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm, &cont);
 }
 
 
@@ -1528,11 +1548,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1693,7 +1716,7 @@
 #if V8_TARGET_ARCH_PPC64
               case IrOpcode::kInt64AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add,
+                return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add64,
                                                      kInt16Imm, cont);
               case IrOpcode::kInt64SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1771,14 +1794,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 14695c1..0e10177 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -4,10 +4,10 @@
 
 #include "src/compiler/raw-machine-assembler.h"
 
-#include "src/code-factory.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/pipeline.h"
 #include "src/compiler/scheduler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -51,12 +51,12 @@
     os << *schedule_;
   }
   schedule_->EnsureCFGWellFormedness();
+  Scheduler::ComputeSpecialRPO(zone(), schedule_);
   schedule_->PropagateDeferredMark();
   if (FLAG_trace_turbo_scheduler) {
     PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
     os << *schedule_;
   }
-  Scheduler::ComputeSpecialRPO(zone(), schedule_);
   // Invalidate RawMachineAssembler.
   Schedule* schedule = schedule_;
   schedule_ = nullptr;
@@ -166,299 +166,39 @@
 
 void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
 
+void RawMachineAssembler::Unreachable() {
+  Node* values[] = {UndefinedConstant()};  // Unused.
+  Node* ret = MakeNode(common()->Throw(), 1, values);
+  schedule()->AddThrow(CurrentBlock(), ret);
+  current_block_ = nullptr;
+}
+
 void RawMachineAssembler::Comment(const char* msg) {
   AddNode(machine()->Comment(msg));
 }
 
-Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
-                                 Node** args) {
-  int param_count = static_cast<int>(desc->ParameterCount());
-  int input_count = param_count + 1;
-  Node** buffer = zone()->NewArray<Node*>(input_count);
-  int index = 0;
-  buffer[index++] = function;
-  for (int i = 0; i < param_count; i++) {
-    buffer[index++] = args[i];
-  }
-  return AddNode(common()->Call(desc), input_count, buffer);
+Node* RawMachineAssembler::CallN(CallDescriptor* desc, int input_count,
+                                 Node* const* inputs) {
+  DCHECK(!desc->NeedsFrameState());
+  // +1 is for target.
+  DCHECK_EQ(input_count, desc->ParameterCount() + 1);
+  return AddNode(common()->Call(desc), input_count, inputs);
 }
 
-
 Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
-                                               Node* function, Node** args,
-                                               Node* frame_state) {
+                                               int input_count,
+                                               Node* const* inputs) {
   DCHECK(desc->NeedsFrameState());
-  int param_count = static_cast<int>(desc->ParameterCount());
-  int input_count = param_count + 2;
-  Node** buffer = zone()->NewArray<Node*>(input_count);
-  int index = 0;
-  buffer[index++] = function;
-  for (int i = 0; i < param_count; i++) {
-    buffer[index++] = args[i];
-  }
-  buffer[index++] = frame_state;
-  return AddNode(common()->Call(desc), input_count, buffer);
+  // +2 is for target and frame state.
+  DCHECK_EQ(input_count, desc->ParameterCount() + 2);
+  return AddNode(common()->Call(desc), input_count, inputs);
 }
 
-Node* RawMachineAssembler::CallRuntime0(Runtime::FunctionId function,
-                                        Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 0, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(0);
-
-  return AddNode(common()->Call(descriptor), centry, ref, arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
-                                        Node* arg1, Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 1, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(1);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context);
-}
-
-
-Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
-                                        Node* arg1, Node* arg2, Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 2, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(2);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, arg2, ref, arity,
-                 context);
-}
-
-Node* RawMachineAssembler::CallRuntime3(Runtime::FunctionId function,
-                                        Node* arg1, Node* arg2, Node* arg3,
-                                        Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 3, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(3);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, ref,
-                 arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
-                                        Node* arg1, Node* arg2, Node* arg3,
-                                        Node* arg4, Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 4, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(4);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
-                 ref, arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime5(Runtime::FunctionId function,
-                                        Node* arg1, Node* arg2, Node* arg3,
-                                        Node* arg4, Node* arg5, Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 5, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(5);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
-                 arg5, ref, arity, context);
-}
-
-Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
-                                     Node** args) {
-  int param_count = static_cast<int>(desc->ParameterCount());
-  int input_count = param_count + 1;
-  Node** buffer = zone()->NewArray<Node*>(input_count);
-  int index = 0;
-  buffer[index++] = function;
-  for (int i = 0; i < param_count; i++) {
-    buffer[index++] = args[i];
-  }
-  Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime0(Runtime::FunctionId function,
-                                            Node* context) {
-  const int kArity = 0;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
-                                            Node* arg1, Node* context) {
-  const int kArity = 1;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-
-Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2,
-                                            Node* context) {
-  const int kArity = 2;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime3(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2, Node* arg3,
-                                            Node* context) {
-  const int kArity = 3;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime4(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2, Node* arg3,
-                                            Node* arg4, Node* context) {
-  const int kArity = 4;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime5(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2, Node* arg3,
-                                            Node* arg4, Node* arg5,
-                                            Node* context) {
-  const int kArity = 5;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, arg3, arg4, arg5, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime6(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2, Node* arg3,
-                                            Node* arg4, Node* arg5, Node* arg6,
-                                            Node* context) {
-  const int kArity = 6;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, arg3,  arg4,
-                   arg5,   arg6, ref,  arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
+Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, int input_count,
+                                     Node* const* inputs) {
+  // +1 is for target.
+  DCHECK_EQ(input_count, desc->ParameterCount() + 1);
+  Node* tail_call = MakeNode(common()->TailCall(desc), input_count, inputs);
   schedule()->AddTailCall(CurrentBlock(), tail_call);
   current_block_ = nullptr;
   return tail_call;
@@ -502,6 +242,21 @@
   return AddNode(common()->Call(descriptor), function, arg0, arg1);
 }
 
+Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
+                                          MachineType arg0_type,
+                                          MachineType arg1_type,
+                                          MachineType arg2_type, Node* function,
+                                          Node* arg0, Node* arg1, Node* arg2) {
+  MachineSignature::Builder builder(zone(), 1, 3);
+  builder.AddReturn(return_type);
+  builder.AddParam(arg0_type);
+  builder.AddParam(arg1_type);
+  builder.AddParam(arg2_type);
+  const CallDescriptor* descriptor =
+      Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+  return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2);
+}
 
 Node* RawMachineAssembler::CallCFunction8(
     MachineType return_type, MachineType arg0_type, MachineType arg1_type,
@@ -584,7 +339,11 @@
   return graph()->NewNodeUnchecked(op, input_count, inputs);
 }
 
-RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
+RawMachineLabel::~RawMachineLabel() {
+  // If this DCHECK fails, it means that the label has been bound but it's not
+  // used, or the opposite. This would cause the register allocator to crash.
+  DCHECK_EQ(bound_, used_);
+}
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 6d2accb..d726217 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -534,13 +534,21 @@
 
   // Conversions.
   Node* BitcastTaggedToWord(Node* a) {
+#ifdef ENABLE_VERIFY_CSA
     return AddNode(machine()->BitcastTaggedToWord(), a);
+#else
+    return a;
+#endif
   }
   Node* BitcastWordToTagged(Node* a) {
     return AddNode(machine()->BitcastWordToTagged(), a);
   }
   Node* BitcastWordToTaggedSigned(Node* a) {
+#ifdef ENABLE_VERIFY_CSA
     return AddNode(machine()->BitcastWordToTaggedSigned(), a);
+#else
+    return a;
+#endif
   }
   Node* TruncateFloat64ToWord32(Node* a) {
     return AddNode(machine()->TruncateFloat64ToWord32(), a);
@@ -653,6 +661,12 @@
   Node* Float64RoundTiesEven(Node* a) {
     return AddNode(machine()->Float64RoundTiesEven().op(), a);
   }
+  Node* Word32ReverseBytes(Node* a) {
+    return AddNode(machine()->Word32ReverseBytes().op(), a);
+  }
+  Node* Word64ReverseBytes(Node* a) {
+    return AddNode(machine()->Word64ReverseBytes().op(), a);
+  }
 
   // Float64 bit operations.
   Node* Float64ExtractLowWord32(Node* a) {
@@ -701,26 +715,18 @@
   }
 
   // Call a given call descriptor and the given arguments.
-  Node* CallN(CallDescriptor* desc, Node* function, Node** args);
+  // The call target is passed as part of the {inputs} array.
+  Node* CallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+
   // Call a given call descriptor and the given arguments and frame-state.
-  Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
-                            Node* frame_state);
-  // Call to a runtime function with zero arguments.
-  Node* CallRuntime0(Runtime::FunctionId function, Node* context);
-  // Call to a runtime function with one arguments.
-  Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
-  // Call to a runtime function with two arguments.
-  Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                     Node* context);
-  // Call to a runtime function with three arguments.
-  Node* CallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                     Node* arg3, Node* context);
-  // Call to a runtime function with four arguments.
-  Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                     Node* arg3, Node* arg4, Node* context);
-  // Call to a runtime function with five arguments.
-  Node* CallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                     Node* arg3, Node* arg4, Node* arg5, Node* context);
+  // The call target and frame state are passed as part of the {inputs} array.
+  Node* CallNWithFrameState(CallDescriptor* desc, int input_count,
+                            Node* const* inputs);
+
+  // Tail call a given call descriptor and the given arguments.
+  // The call target is passed as part of the {inputs} array.
+  Node* TailCallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+
   // Call to a C function with zero arguments.
   Node* CallCFunction0(MachineType return_type, Node* function);
   // Call to a C function with one parameter.
@@ -730,6 +736,10 @@
   Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
                        MachineType arg1_type, Node* function, Node* arg0,
                        Node* arg1);
+  // Call to a C function with three arguments.
+  Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
+                       MachineType arg1_type, MachineType arg2_type,
+                       Node* function, Node* arg0, Node* arg1, Node* arg2);
   // Call to a C function with eight arguments.
   Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
                        MachineType arg1_type, MachineType arg2_type,
@@ -739,30 +749,6 @@
                        Node* arg1, Node* arg2, Node* arg3, Node* arg4,
                        Node* arg5, Node* arg6, Node* arg7);
 
-  // Tail call the given call descriptor and the given arguments.
-  Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
-  // Tail call to a runtime function with zero arguments.
-  Node* TailCallRuntime0(Runtime::FunctionId function, Node* context);
-  // Tail call to a runtime function with one argument.
-  Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
-                         Node* context);
-  // Tail call to a runtime function with two arguments.
-  Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* context);
-  // Tail call to a runtime function with three arguments.
-  Node* TailCallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* arg3, Node* context);
-  // Tail call to a runtime function with four arguments.
-  Node* TailCallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* arg3, Node* arg4, Node* context);
-  // Tail call to a runtime function with five arguments.
-  Node* TailCallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* arg3, Node* arg4, Node* arg5, Node* context);
-  // Tail call to a runtime function with six arguments.
-  Node* TailCallRuntime6(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* arg3, Node* arg4, Node* arg5, Node* arg6,
-                         Node* context);
-
   // ===========================================================================
   // The following utility methods deal with control flow, hence might switch
   // the current basic block or create new basic blocks for labels.
@@ -783,6 +769,7 @@
   void Bind(RawMachineLabel* label);
   void Deoptimize(Node* state);
   void DebugBreak();
+  void Unreachable();
   void Comment(const char* msg);
 
   // Add success / exception successor blocks and ends the current block ending
diff --git a/src/compiler/redundancy-elimination.cc b/src/compiler/redundancy-elimination.cc
index 6dcf2bf..38feb8b 100644
--- a/src/compiler/redundancy-elimination.cc
+++ b/src/compiler/redundancy-elimination.cc
@@ -16,12 +16,15 @@
 RedundancyElimination::~RedundancyElimination() {}
 
 Reduction RedundancyElimination::Reduce(Node* node) {
+  if (node_checks_.Get(node)) return NoChange();
   switch (node->opcode()) {
     case IrOpcode::kCheckBounds:
     case IrOpcode::kCheckFloat64Hole:
     case IrOpcode::kCheckHeapObject:
     case IrOpcode::kCheckIf:
+    case IrOpcode::kCheckInternalizedString:
     case IrOpcode::kCheckNumber:
+    case IrOpcode::kCheckReceiver:
     case IrOpcode::kCheckSmi:
     case IrOpcode::kCheckString:
     case IrOpcode::kCheckTaggedHole:
@@ -36,6 +39,11 @@
     case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedUint32ToInt32:
       return ReduceCheckNode(node);
+    case IrOpcode::kSpeculativeNumberAdd:
+    case IrOpcode::kSpeculativeNumberSubtract:
+      // For increments and decrements by a constant, try to learn from the last
+      // bounds check.
+      return TryReuseBoundsCheckForFirstInput(node);
     case IrOpcode::kEffectPhi:
       return ReduceEffectPhi(node);
     case IrOpcode::kDead:
@@ -114,7 +122,14 @@
 namespace {
 
 bool IsCompatibleCheck(Node const* a, Node const* b) {
-  if (a->op() != b->op()) return false;
+  if (a->op() != b->op()) {
+    if (a->opcode() == IrOpcode::kCheckInternalizedString &&
+        b->opcode() == IrOpcode::kCheckString) {
+      // CheckInternalizedString(node) implies CheckString(node)
+    } else {
+      return false;
+    }
+  }
   for (int i = a->op()->ValueInputCount(); --i >= 0;) {
     if (a->InputAt(i) != b->InputAt(i)) return false;
   }
@@ -133,6 +148,17 @@
   return nullptr;
 }
 
+Node* RedundancyElimination::EffectPathChecks::LookupBoundsCheckFor(
+    Node* node) const {
+  for (Check const* check = head_; check != nullptr; check = check->next) {
+    if (check->node->opcode() == IrOpcode::kCheckBounds &&
+        check->node->InputAt(0) == node) {
+      return check->node;
+    }
+  }
+  return nullptr;
+}
+
 RedundancyElimination::EffectPathChecks const*
 RedundancyElimination::PathChecksForEffectNodes::Get(Node* node) const {
   size_t const id = node->id();
@@ -158,10 +184,41 @@
     ReplaceWithValue(node, check);
     return Replace(check);
   }
+
   // Learn from this check.
   return UpdateChecks(node, checks->AddCheck(zone(), node));
 }
 
+Reduction RedundancyElimination::TryReuseBoundsCheckForFirstInput(Node* node) {
+  DCHECK(node->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+         node->opcode() == IrOpcode::kSpeculativeNumberSubtract);
+
+  DCHECK_EQ(1, node->op()->EffectInputCount());
+  DCHECK_EQ(1, node->op()->EffectOutputCount());
+
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  EffectPathChecks const* checks = node_checks_.Get(effect);
+
+  // If we do not know anything about the predecessor, do not propagate just yet
+  // because we will have to recompute anyway once we compute the predecessor.
+  if (checks == nullptr) return NoChange();
+
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  // Only use bounds checks for increments/decrements by a constant.
+  if (right->opcode() == IrOpcode::kNumberConstant) {
+    if (Node* bounds_check = checks->LookupBoundsCheckFor(left)) {
+      // Only use the bounds checked type if it is better.
+      if (NodeProperties::GetType(bounds_check)
+              ->Is(NodeProperties::GetType(left))) {
+        node->ReplaceInput(0, bounds_check);
+      }
+    }
+  }
+
+  return UpdateChecks(node, checks);
+}
+
 Reduction RedundancyElimination::ReduceEffectPhi(Node* node) {
   Node* const control = NodeProperties::GetControlInput(node);
   if (control->opcode() == IrOpcode::kLoop) {
diff --git a/src/compiler/redundancy-elimination.h b/src/compiler/redundancy-elimination.h
index 88f9032..786c960 100644
--- a/src/compiler/redundancy-elimination.h
+++ b/src/compiler/redundancy-elimination.h
@@ -34,6 +34,7 @@
 
     EffectPathChecks const* AddCheck(Zone* zone, Node* node) const;
     Node* LookupCheck(Node* node) const;
+    Node* LookupBoundsCheckFor(Node* node) const;
 
    private:
     EffectPathChecks(Check* head, size_t size) : head_(head), size_(size) {}
@@ -62,6 +63,8 @@
   Reduction TakeChecksFromFirstEffect(Node* node);
   Reduction UpdateChecks(Node* node, EffectPathChecks const* checks);
 
+  Reduction TryReuseBoundsCheckForFirstInput(Node* node);
+
   Zone* zone() const { return zone_; }
 
   PathChecksForEffectNodes node_checks_;
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index cefd04a..d589a9d 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -2,9 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/register-allocator-verifier.h"
+
 #include "src/bit-vector.h"
 #include "src/compiler/instruction.h"
-#include "src/compiler/register-allocator-verifier.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -300,6 +302,27 @@
   }
 }
 
+void BlockAssessments::Print() const {
+  OFStream os(stdout);
+  for (const auto pair : map()) {
+    const InstructionOperand op = pair.first;
+    const Assessment* assessment = pair.second;
+    // Use operator<< so we can write the assessment on the same
+    // line. Since we need a register configuration, just pick
+    // Turbofan for now.
+    PrintableInstructionOperand wrapper = {RegisterConfiguration::Turbofan(),
+                                           op};
+    os << wrapper << " : ";
+    if (assessment->kind() == AssessmentKind::Final) {
+      os << "v" << FinalAssessment::cast(assessment)->virtual_register();
+    } else {
+      os << "P";
+    }
+    os << std::endl;
+  }
+  os << std::endl;
+}
+
 BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
     const InstructionBlock* block) {
   RpoNumber current_block_id = block->rpo_number();
@@ -352,8 +375,9 @@
   // for the original operand (the one where the assessment was created for
   // first) are also pending. To avoid recursion, we use a work list. To
   // deal with cycles, we keep a set of seen nodes.
-  ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(zone());
-  ZoneSet<RpoNumber> seen(zone());
+  Zone local_zone(zone()->allocator(), ZONE_NAME);
+  ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(&local_zone);
+  ZoneSet<RpoNumber> seen(&local_zone);
   worklist.push(std::make_pair(assessment, virtual_register));
   seen.insert(block_id);
 
@@ -448,7 +472,11 @@
   // is virtual_register.
   const PendingAssessment* old = assessment->original_pending_assessment();
   CHECK_NOT_NULL(old);
-  ValidatePendingAssessment(block_id, op, current_assessments, old,
+  RpoNumber old_block = old->origin()->rpo_number();
+  DCHECK_LE(old_block, block_id);
+  BlockAssessments* old_block_assessments =
+      old_block == block_id ? current_assessments : assessments_[old_block];
+  ValidatePendingAssessment(old_block, op, old_block_assessments, old,
                             virtual_register);
 }
 
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
index 9a605d6..989589e 100644
--- a/src/compiler/register-allocator-verifier.h
+++ b/src/compiler/register-allocator-verifier.h
@@ -5,13 +5,14 @@
 #ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
 #define V8_REGISTER_ALLOCATOR_VERIFIER_H_
 
+#include "src/compiler/instruction.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-class InstructionOperand;
+class InstructionBlock;
 class InstructionSequence;
 
 // The register allocator validator traverses instructions in the instruction
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 0ed479f..403c344 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -86,6 +86,10 @@
       return kDoubleSize;
     case MachineRepresentation::kSimd128:
       return kSimd128Size;
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
+      return kSimdMaskRegisters ? kPointerSize : kSimd128Size;
     case MachineRepresentation::kNone:
       break;
   }
@@ -2985,7 +2989,7 @@
     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
   DCHECK_GE(positions.length(), num_regs);
 
-  for (int i = 0; i < num_regs; i++) {
+  for (int i = 0; i < num_regs; ++i) {
     positions[i] = LifetimePosition::MaxPosition();
   }
 
@@ -3009,9 +3013,17 @@
 
   for (LiveRange* cur_inactive : inactive_live_ranges()) {
     DCHECK(cur_inactive->End() > range->Start());
+    int cur_reg = cur_inactive->assigned_register();
+    // No need to carry out intersections, when this register won't be
+    // interesting to this range anyway.
+    // TODO(mtrofin): extend to aliased ranges, too.
+    if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+        positions[cur_reg] < range->Start()) {
+      continue;
+    }
+
     LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
     if (!next_intersection.IsValid()) continue;
-    int cur_reg = cur_inactive->assigned_register();
     if (kSimpleFPAliasing || !check_fp_aliasing()) {
       positions[cur_reg] = Min(positions[cur_reg], next_intersection);
       TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
@@ -3111,8 +3123,9 @@
   const int* codes = allocatable_register_codes();
   MachineRepresentation rep = current->representation();
   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
-                             rep == MachineRepresentation::kSimd128))
+                             rep == MachineRepresentation::kSimd128)) {
     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+  }
 
   DCHECK_GE(free_until_pos.length(), num_codes);
 
@@ -3166,6 +3179,9 @@
                              rep == MachineRepresentation::kSimd128))
     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
 
+  // use_pos keeps track of positions a register/alias is used at.
+  // block_pos keeps track of positions where a register/alias is blocked
+  // from.
   LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
   LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
   for (int i = 0; i < num_regs; i++) {
@@ -3181,6 +3197,8 @@
         block_pos[cur_reg] = use_pos[cur_reg] =
             LifetimePosition::GapFromInstructionIndex(0);
       } else {
+        DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
+                  block_pos[cur_reg]);
         use_pos[cur_reg] =
             range->NextLifetimePositionRegisterIsBeneficial(current->Start());
       }
@@ -3196,7 +3214,9 @@
               LifetimePosition::GapFromInstructionIndex(0);
         } else {
           use_pos[aliased_reg] =
-              range->NextLifetimePositionRegisterIsBeneficial(current->Start());
+              Min(block_pos[aliased_reg],
+                  range->NextLifetimePositionRegisterIsBeneficial(
+                      current->Start()));
         }
       }
     }
@@ -3204,10 +3224,23 @@
 
   for (LiveRange* range : inactive_live_ranges()) {
     DCHECK(range->End() > current->Start());
-    LifetimePosition next_intersection = range->FirstIntersection(current);
-    if (!next_intersection.IsValid()) continue;
     int cur_reg = range->assigned_register();
     bool is_fixed = range->TopLevel()->IsFixed();
+
+    // Don't perform costly intersections if they are guaranteed to not update
+    // block_pos or use_pos.
+    // TODO(mtrofin): extend to aliased ranges, too.
+    if ((kSimpleFPAliasing || !check_fp_aliasing())) {
+      if (is_fixed) {
+        if (block_pos[cur_reg] < range->Start()) continue;
+      } else {
+        if (use_pos[cur_reg] < range->Start()) continue;
+      }
+    }
+
+    LifetimePosition next_intersection = range->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+
     if (kSimpleFPAliasing || !check_fp_aliasing()) {
       if (is_fixed) {
         block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
@@ -3242,19 +3275,18 @@
     }
   }
 
-  LifetimePosition pos = use_pos[reg];
-
-  if (pos < register_use->pos()) {
+  if (use_pos[reg] < register_use->pos()) {
+    // If there is a gap position before the next register use, we can
+    // spill until there. The gap position will then fit the fill move.
     if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
                                                    register_use->pos())) {
       SpillBetween(current, current->Start(), register_use->pos());
-    } else {
-      SetLiveRangeAssignedRegister(current, reg);
-      SplitAndSpillIntersecting(current);
+      return;
     }
-    return;
   }
 
+  // We couldn't spill until the next register use. Split before the register
+  // is blocked, if applicable.
   if (block_pos[reg] < current->End()) {
     // Register becomes blocked before the current range end. Split before that
     // position.
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index e3e5108..4b4f8c9 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -9,6 +9,8 @@
 #include "src/base/bits.h"
 #include "src/code-factory.h"
 #include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -168,9 +170,10 @@
     case MachineRepresentation::kWord64:
       DCHECK(use_info.type_check() == TypeCheckKind::kNone);
       return GetWord64RepresentationFor(node, output_rep, output_type);
-    case MachineRepresentation::kSimd128:  // Fall through.
-      // TODO(bbudge) Handle conversions between tagged and untagged.
-      break;
+    case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       return node;
   }
@@ -270,9 +273,15 @@
       return TypeError(node, output_rep, output_type,
                        MachineRepresentation::kTaggedSigned);
     }
-  } else if (CanBeTaggedPointer(output_rep) &&
-             use_info.type_check() == TypeCheckKind::kSignedSmall) {
-    op = simplified()->CheckedTaggedToTaggedSigned();
+  } else if (CanBeTaggedPointer(output_rep)) {
+    if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+      op = simplified()->CheckedTaggedToTaggedSigned();
+    } else if (output_type->Is(Type::SignedSmall())) {
+      op = simplified()->ChangeTaggedToTaggedSigned();
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTaggedSigned);
+    }
   } else if (output_rep == MachineRepresentation::kBit &&
              use_info.type_check() == TypeCheckKind::kSignedSmall) {
     // TODO(turbofan): Consider adding a Bailout operator that just deopts.
@@ -307,7 +316,12 @@
     // We just provide a dummy value here.
     return jsgraph()->TheHoleConstant();
   } else if (output_rep == MachineRepresentation::kBit) {
-    return node;
+    if (output_type->Is(Type::Boolean())) {
+      op = simplified()->ChangeBitToTagged();
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTagged);
+    }
   } else if (IsWord(output_rep)) {
     if (output_type->Is(Type::Unsigned32())) {
       // uint32 -> float64 -> tagged
@@ -582,33 +596,33 @@
   } else if (output_rep == MachineRepresentation::kBit) {
     return node;  // Sloppy comparison -> word32
   } else if (output_rep == MachineRepresentation::kFloat64) {
-    if (output_type->Is(Type::Unsigned32())) {
-      op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32())) {
+    if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else if (use_info.truncation().IsUsedAsWord32()) {
-      op = machine()->TruncateFloat64ToWord32();
     } else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
                use_info.type_check() == TypeCheckKind::kSigned32) {
       op = simplified()->CheckedFloat64ToInt32(
           output_type->Maybe(Type::MinusZero())
               ? use_info.minus_zero_check()
               : CheckForMinusZeroMode::kDontCheckForMinusZero);
+    } else if (output_type->Is(Type::Unsigned32())) {
+      op = machine()->ChangeFloat64ToUint32();
+    } else if (use_info.truncation().IsUsedAsWord32()) {
+      op = machine()->TruncateFloat64ToWord32();
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
-    if (output_type->Is(Type::Unsigned32())) {
-      op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32())) {
+    if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else if (use_info.truncation().IsUsedAsWord32()) {
-      op = machine()->TruncateFloat64ToWord32();
     } else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
                use_info.type_check() == TypeCheckKind::kSigned32) {
       op = simplified()->CheckedFloat64ToInt32(
           output_type->Maybe(Type::MinusZero())
               ? CheckForMinusZeroMode::kCheckForMinusZero
               : CheckForMinusZeroMode::kDontCheckForMinusZero);
+    } else if (output_type->Is(Type::Unsigned32())) {
+      op = machine()->ChangeFloat64ToUint32();
+    } else if (use_info.truncation().IsUsedAsWord32()) {
+      op = machine()->TruncateFloat64ToWord32();
     }
   } else if (output_rep == MachineRepresentation::kTaggedSigned) {
     if (output_type->Is(Type::Signed32())) {
@@ -622,16 +636,8 @@
     }
   } else if (output_rep == MachineRepresentation::kTagged ||
              output_rep == MachineRepresentation::kTaggedPointer) {
-    if (output_type->Is(Type::Unsigned32())) {
-      op = simplified()->ChangeTaggedToUint32();
-    } else if (output_type->Is(Type::Signed32())) {
+    if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeTaggedToInt32();
-    } else if (use_info.truncation().IsUsedAsWord32()) {
-      if (use_info.type_check() != TypeCheckKind::kNone) {
-        op = simplified()->CheckedTruncateTaggedToWord32();
-      } else {
-        op = simplified()->TruncateTaggedToWord32();
-      }
     } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
       op = simplified()->CheckedTaggedSignedToInt32();
     } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
@@ -639,6 +645,14 @@
           output_type->Maybe(Type::MinusZero())
               ? CheckForMinusZeroMode::kCheckForMinusZero
               : CheckForMinusZeroMode::kDontCheckForMinusZero);
+    } else if (output_type->Is(Type::Unsigned32())) {
+      op = simplified()->ChangeTaggedToUint32();
+    } else if (use_info.truncation().IsUsedAsWord32()) {
+      if (output_type->Is(Type::NumberOrOddball())) {
+        op = simplified()->TruncateTaggedToWord32();
+      } else if (use_info.type_check() != TypeCheckKind::kNone) {
+        op = simplified()->CheckedTruncateTaggedToWord32();
+      }
     }
   } else if (output_rep == MachineRepresentation::kWord32) {
     // Only the checked case should get here, the non-checked case is
@@ -689,8 +703,12 @@
   // Eagerly fold representation changes for constants.
   switch (node->opcode()) {
     case IrOpcode::kHeapConstant: {
-      Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
-      return jsgraph()->Int32Constant(value->BooleanValue() ? 1 : 0);
+      HeapObjectMatcher m(node);
+      if (m.Is(factory()->false_value())) {
+        return jsgraph()->Int32Constant(0);
+      } else if (m.Is(factory()->true_value())) {
+        return jsgraph()->Int32Constant(1);
+      }
     }
     default:
       break;
@@ -807,6 +825,24 @@
   }
 }
 
+const Operator* RepresentationChanger::TaggedSignedOperatorFor(
+    IrOpcode::Value opcode) {
+  switch (opcode) {
+    case IrOpcode::kSpeculativeNumberLessThan:
+      return machine()->Is32() ? machine()->Int32LessThan()
+                               : machine()->Int64LessThan();
+    case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+      return machine()->Is32() ? machine()->Int32LessThanOrEqual()
+                               : machine()->Int64LessThanOrEqual();
+    case IrOpcode::kSpeculativeNumberEqual:
+      return machine()->Is32() ? machine()->Word32Equal()
+                               : machine()->Word64Equal();
+    default:
+      UNREACHABLE();
+      return nullptr;
+  }
+}
+
 const Operator* RepresentationChanger::Uint32OperatorFor(
     IrOpcode::Value opcode) {
   switch (opcode) {
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index d7895da..4fa7d91 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -238,6 +238,7 @@
                              UseInfo use_info);
   const Operator* Int32OperatorFor(IrOpcode::Value opcode);
   const Operator* Int32OverflowOperatorFor(IrOpcode::Value opcode);
+  const Operator* TaggedSignedOperatorFor(IrOpcode::Value opcode);
   const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
   const Operator* Uint32OverflowOperatorFor(IrOpcode::Value opcode);
   const Operator* Float64OperatorFor(IrOpcode::Value opcode);
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
index 5dcc82f..8e9db3d 100644
--- a/src/compiler/s390/code-generator-s390.cc
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -119,12 +119,30 @@
     InstructionOperand* op = instr_->InputAt(index);
     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
   }
+
+  MemOperand InputStackSlot32(size_t index) {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+    // We want to read the 32-bits directly from memory
+    MemOperand mem = InputStackSlot(index);
+    return MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+    return InputStackSlot(index);
+#endif
+  }
 };
 
+static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
+  return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
+}
+
 static inline bool HasRegisterInput(Instruction* instr, int index) {
   return instr->InputAt(index)->IsRegister();
 }
 
+static inline bool HasFPRegisterInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsFPRegister();
+}
+
 static inline bool HasImmediateInput(Instruction* instr, size_t index) {
   return instr->InputAt(index)->IsImmediate();
 }
@@ -133,6 +151,10 @@
   return instr->InputAt(index)->IsStackSlot();
 }
 
+static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
+  return instr->InputAt(index)->IsFPStackSlot();
+}
+
 namespace {
 
 class OutOfLineLoadNAN32 final : public OutOfLineCode {
@@ -250,17 +272,33 @@
       return eq;
     case kNotEqual:
       return ne;
-    case kSignedLessThan:
     case kUnsignedLessThan:
+      // unsigned number never less than 0
+      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+        return CC_NOP;
+    // fall through
+    case kSignedLessThan:
       return lt;
-    case kSignedGreaterThanOrEqual:
     case kUnsignedGreaterThanOrEqual:
+      // unsigned number always greater than or equal 0
+      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+        return CC_ALWAYS;
+    // fall through
+    case kSignedGreaterThanOrEqual:
       return ge;
-    case kSignedLessThanOrEqual:
     case kUnsignedLessThanOrEqual:
+      // unsigned number never less than 0
+      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+        return CC_EQ;
+    // fall through
+    case kSignedLessThanOrEqual:
       return le;
-    case kSignedGreaterThan:
     case kUnsignedGreaterThan:
+      // unsigned number always greater than or equal 0
+      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+        return ne;
+    // fall through
+    case kSignedGreaterThan:
       return gt;
     case kOverflow:
       // Overflow checked for AddP/SubP only.
@@ -292,8 +330,176 @@
   return kNoCondition;
 }
 
+typedef void (MacroAssembler::*RRTypeInstr)(Register, Register);
+typedef void (MacroAssembler::*RMTypeInstr)(Register, const MemOperand&);
+typedef void (MacroAssembler::*RITypeInstr)(Register, const Operand&);
+typedef void (MacroAssembler::*RRRTypeInstr)(Register, Register, Register);
+typedef void (MacroAssembler::*RRMTypeInstr)(Register, Register,
+                                             const MemOperand&);
+typedef void (MacroAssembler::*RRITypeInstr)(Register, Register,
+                                             const Operand&);
+
+#define CHECK_AND_ZERO_EXT_OUTPUT(num)                                   \
+  {                                                                      \
+    CHECK(HasImmediateInput(instr, (num)));                              \
+    int doZeroExt = i.InputInt32(num);                                   \
+    if (doZeroExt) masm->LoadlW(i.OutputRegister(), i.OutputRegister()); \
+  }
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRTypeInstr rr_instr,
+                   RMTypeInstr rm_instr, RITypeInstr ri_instr) {
+  CHECK(i.OutputRegister().is(i.InputRegister(0)));
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  int zeroExtIndex = 2;
+  if (mode != kMode_None) {
+    size_t first_index = 1;
+    MemOperand operand = i.MemoryOperand(&mode, &first_index);
+    zeroExtIndex = first_index;
+    CHECK(rm_instr != NULL);
+    (masm->*rm_instr)(i.OutputRegister(), operand);
+  } else if (HasRegisterInput(instr, 1)) {
+    (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+  } else if (HasStackSlotInput(instr, 1)) {
+    (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRRTypeInstr rrr_instr,
+                   RMTypeInstr rm_instr, RITypeInstr ri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  int zeroExtIndex = 2;
+  if (mode != kMode_None) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    size_t first_index = 1;
+    MemOperand operand = i.MemoryOperand(&mode, &first_index);
+    zeroExtIndex = first_index;
+    CHECK(rm_instr != NULL);
+    (masm->*rm_instr)(i.OutputRegister(), operand);
+  } else if (HasRegisterInput(instr, 1)) {
+    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+  } else if (HasStackSlotInput(instr, 1)) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRRTypeInstr rrr_instr,
+                   RMTypeInstr rm_instr, RRITypeInstr rri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  int zeroExtIndex = 2;
+  if (mode != kMode_None) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    size_t first_index = 1;
+    MemOperand operand = i.MemoryOperand(&mode, &first_index);
+    zeroExtIndex = first_index;
+    CHECK(rm_instr != NULL);
+    (masm->*rm_instr)(i.OutputRegister(), operand);
+  } else if (HasRegisterInput(instr, 1)) {
+    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputImmediate(1));
+  } else if (HasStackSlotInput(instr, 1)) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRRTypeInstr rrr_instr,
+                   RRMTypeInstr rrm_instr, RRITypeInstr rri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  int zeroExtIndex = 2;
+  if (mode != kMode_None) {
+    size_t first_index = 1;
+    MemOperand operand = i.MemoryOperand(&mode, &first_index);
+    zeroExtIndex = first_index;
+    CHECK(rrm_instr != NULL);
+    (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0), operand);
+  } else if (HasRegisterInput(instr, 1)) {
+    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputImmediate(1));
+  } else if (HasStackSlotInput(instr, 1)) {
+    (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputStackSlot32(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRRTypeInstr rrr_instr,
+                   RRITypeInstr rri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  CHECK(mode == kMode_None);
+  int zeroExtIndex = 2;
+  if (HasRegisterInput(instr, 1)) {
+    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputImmediate(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRTypeInstr rr_instr,
+                   RITypeInstr ri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  CHECK(mode == kMode_None);
+  CHECK(i.OutputRegister().is(i.InputRegister(0)));
+  int zeroExtIndex = 2;
+  if (HasRegisterInput(instr, 1)) {
+    (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+#define ASSEMBLE_BIN_OP(instr1, instr2, instr3)            \
+  AssembleBinOp(i, masm(), instr, &MacroAssembler::instr1, \
+                &MacroAssembler::instr2, &MacroAssembler::instr3)
+
+#undef CHECK_AND_ZERO_EXT_OUTPUT
+
 }  // namespace
 
+#define CHECK_AND_ZERO_EXT_OUTPUT(num)                                \
+  {                                                                   \
+    CHECK(HasImmediateInput(instr, (num)));                           \
+    int doZeroExt = i.InputInt32(num);                                \
+    if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
+  }
+
 #define ASSEMBLE_FLOAT_UNOP(asm_instr)                                \
   do {                                                                \
     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
@@ -318,26 +524,92 @@
     }                                                      \
   } while (0)
 
-#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                 \
-  do {                                                          \
-    if (HasRegisterInput(instr, 1)) {                           \
-      if (i.CompareLogical()) {                                 \
-        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));  \
-      } else {                                                  \
-        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));   \
-      }                                                         \
-    } else {                                                    \
-      if (i.CompareLogical()) {                                 \
-        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
-      } else {                                                  \
-        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));  \
-      }                                                         \
-    }                                                           \
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                         \
+  do {                                                                  \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+    if (mode != kMode_None) {                                           \
+      size_t first_index = 1;                                           \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), operand);                     \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), operand);                      \
+      }                                                                 \
+    } else if (HasRegisterInput(instr, 1)) {                            \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
+      }                                                                 \
+    } else if (HasImmediateInput(instr, 1)) {                           \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
+      }                                                                 \
+    } else {                                                            \
+      DCHECK(HasStackSlotInput(instr, 1));                              \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1));         \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1));          \
+      }                                                                 \
+    }                                                                   \
   } while (0)
 
-#define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                            \
-  do {                                                               \
-    __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
+#define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr)                       \
+  do {                                                                  \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+    if (mode != kMode_None) {                                           \
+      size_t first_index = 1;                                           \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), operand);                     \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), operand);                      \
+      }                                                                 \
+    } else if (HasRegisterInput(instr, 1)) {                            \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
+      }                                                                 \
+    } else if (HasImmediateInput(instr, 1)) {                           \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
+      }                                                                 \
+    } else {                                                            \
+      DCHECK(HasStackSlotInput(instr, 1));                              \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1));       \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1));        \
+      }                                                                 \
+    }                                                                   \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr)     \
+  do {                                                                     \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode());    \
+    if (mode != kMode_None) {                                              \
+      size_t first_index = 1;                                              \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);           \
+      __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                  \
+    } else if (HasFPRegisterInput(instr, 1)) {                             \
+      __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+    } else {                                                               \
+      USE(HasFPStackSlotInput);                                            \
+      DCHECK(HasFPStackSlotInput(instr, 1));                               \
+      MemOperand operand = i.InputStackSlot(1);                            \
+      if (operand.offset() >= 0) {                                         \
+        __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                \
+      } else {                                                             \
+        __ load_instr(kScratchDoubleReg, operand);                         \
+        __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg);      \
+      }                                                                    \
+    }                                                                      \
   } while (0)
 
 // Divide instruction dr will implicity use register pair
@@ -349,7 +621,7 @@
     __ LoadRR(r0, i.InputRegister(0));          \
     __ shift_instr(r0, Operand(32));            \
     __ div_instr(r0, i.InputRegister(1));       \
-    __ ltr(i.OutputRegister(), r0);             \
+    __ LoadlW(i.OutputRegister(), r0);          \
   } while (0)
 
 #define ASSEMBLE_FLOAT_MODULO()                                               \
@@ -569,6 +841,7 @@
     }                                                                  \
     __ bind(&done);                                                    \
   } while (0)
+//
 // Only MRI mode for these instructions available
 #define ASSEMBLE_LOAD_FLOAT(asm_instr)                \
   do {                                                \
@@ -586,6 +859,38 @@
     __ asm_instr(result, operand);               \
   } while (0)
 
+#define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm)              \
+  {                                                                     \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+    Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
+    if (mode != kMode_None) {                                           \
+      size_t first_index = 0;                                           \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
+      __ asm_instr_rm(dst, operand);                                    \
+    } else if (HasRegisterInput(instr, 0)) {                            \
+      __ asm_instr_rr(dst, i.InputRegister(0));                         \
+    } else {                                                            \
+      DCHECK(HasStackSlotInput(instr, 0));                              \
+      __ asm_instr_rm(dst, i.InputStackSlot(0));                        \
+    }                                                                   \
+  }
+
+#define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm)              \
+  {                                                                     \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+    Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
+    if (mode != kMode_None) {                                           \
+      size_t first_index = 0;                                           \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
+      __ asm_instr_rm(dst, operand);                                    \
+    } else if (HasRegisterInput(instr, 0)) {                            \
+      __ asm_instr_rr(dst, i.InputRegister(0));                         \
+    } else {                                                            \
+      DCHECK(HasStackSlotInput(instr, 0));                              \
+      __ asm_instr_rm(dst, i.InputStackSlot32(0));                      \
+    }                                                                   \
+  }
+
 #define ASSEMBLE_STORE_FLOAT32()                         \
   do {                                                   \
     size_t index = 0;                                    \
@@ -729,7 +1034,8 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ CmpP(scratch1,
+          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ bne(&done);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -984,10 +1290,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1048,35 +1352,43 @@
       break;
     }
     case kS390_And32:
-      ASSEMBLE_BINOP(And);
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(nrk, And, nilf);
+      } else {
+        ASSEMBLE_BIN_OP(nr, And, nilf);
+      }
       break;
     case kS390_And64:
       ASSEMBLE_BINOP(AndP);
       break;
     case kS390_Or32:
-      ASSEMBLE_BINOP(Or);
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(ork, Or, oilf);
+      } else {
+        ASSEMBLE_BIN_OP(or_z, Or, oilf);
+      }
+      break;
     case kS390_Or64:
       ASSEMBLE_BINOP(OrP);
       break;
     case kS390_Xor32:
-      ASSEMBLE_BINOP(Xor);
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(xrk, Xor, xilf);
+      } else {
+        ASSEMBLE_BIN_OP(xr, Xor, xilf);
+      }
       break;
     case kS390_Xor64:
       ASSEMBLE_BINOP(XorP);
       break;
     case kS390_ShiftLeft32:
-      if (HasRegisterInput(instr, 1)) {
-        if (i.OutputRegister().is(i.InputRegister(1)) &&
-            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
-          __ LoadRR(kScratchReg, i.InputRegister(1));
-          __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
-        } else {
-          ASSEMBLE_BINOP(ShiftLeft);
-        }
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::ShiftLeft,
+                      &MacroAssembler::ShiftLeft);
       } else {
-        ASSEMBLE_BINOP(ShiftLeft);
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::sll,
+                      &MacroAssembler::sll);
       }
-      __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ShiftLeft64:
@@ -1084,18 +1396,13 @@
       break;
 #endif
     case kS390_ShiftRight32:
-      if (HasRegisterInput(instr, 1)) {
-        if (i.OutputRegister().is(i.InputRegister(1)) &&
-            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
-          __ LoadRR(kScratchReg, i.InputRegister(1));
-          __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
-        } else {
-          ASSEMBLE_BINOP(ShiftRight);
-        }
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::srlk,
+                      &MacroAssembler::srlk);
       } else {
-        ASSEMBLE_BINOP(ShiftRight);
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::srl,
+                      &MacroAssembler::srl);
       }
-      __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ShiftRight64:
@@ -1103,19 +1410,13 @@
       break;
 #endif
     case kS390_ShiftRightArith32:
-      if (HasRegisterInput(instr, 1)) {
-        if (i.OutputRegister().is(i.InputRegister(1)) &&
-            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
-          __ LoadRR(kScratchReg, i.InputRegister(1));
-          __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
-                             kScratchReg);
-        } else {
-          ASSEMBLE_BINOP(ShiftRightArith);
-        }
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::srak,
+                      &MacroAssembler::srak);
       } else {
-        ASSEMBLE_BINOP(ShiftRightArith);
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::sra,
+                      &MacroAssembler::sra);
       }
-      __ LoadlW(i.OutputRegister(), i.OutputRegister());
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ShiftRightArith64:
@@ -1197,7 +1498,7 @@
       break;
     }
 #endif
-    case kS390_RotRight32:
+    case kS390_RotRight32: {
       if (HasRegisterInput(instr, 1)) {
         __ LoadComplementRR(kScratchReg, i.InputRegister(1));
         __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
@@ -1205,7 +1506,9 @@
         __ rll(i.OutputRegister(), i.InputRegister(0),
                Operand(32 - i.InputInt32(1)));
       }
+      CHECK_AND_ZERO_EXT_OUTPUT(2);
       break;
+    }
 #if V8_TARGET_ARCH_S390X
     case kS390_RotRight64:
       if (HasRegisterInput(instr, 1)) {
@@ -1216,33 +1519,6 @@
                 Operand(64 - i.InputInt32(1)));
       }
       break;
-#endif
-    case kS390_Not32:
-      __ Not32(i.OutputRegister(), i.InputRegister(0));
-      break;
-    case kS390_Not64:
-      __ Not64(i.OutputRegister(), i.InputRegister(0));
-      break;
-    case kS390_RotLeftAndMask32:
-      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
-        int shiftAmount = i.InputInt32(1);
-        int endBit = 63 - i.InputInt32(3);
-        int startBit = 63 - i.InputInt32(2);
-        __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
-        __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
-                 Operand(endBit), Operand::Zero(), true);
-      } else {
-        int shiftAmount = i.InputInt32(1);
-        int clearBitLeft = 63 - i.InputInt32(2);
-        int clearBitRight = i.InputInt32(3);
-        __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
-        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
-        __ srlg(i.OutputRegister(), i.OutputRegister(),
-                Operand((clearBitLeft + clearBitRight)));
-        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
-      }
-      break;
-#if V8_TARGET_ARCH_S390X
     case kS390_RotLeftAndClear64:
       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
         int shiftAmount = i.InputInt32(1);
@@ -1291,10 +1567,14 @@
       }
       break;
 #endif
-    case kS390_Add32:
-      ASSEMBLE_BINOP(Add32);
-      __ LoadW(i.OutputRegister(), i.OutputRegister());
+    case kS390_Add32: {
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(ark, Add32, Add32_RRI);
+      } else {
+        ASSEMBLE_BIN_OP(ar, Add32, Add32_RI);
+      }
       break;
+    }
     case kS390_Add64:
       ASSEMBLE_BINOP(AddP);
       break;
@@ -1319,8 +1599,11 @@
       }
       break;
     case kS390_Sub32:
-      ASSEMBLE_BINOP(Sub32);
-      __ LoadW(i.OutputRegister(), i.OutputRegister());
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(srk, Sub32, Sub32_RRI);
+      } else {
+        ASSEMBLE_BIN_OP(sr, Sub32, Sub32_RI);
+      }
       break;
     case kS390_Sub64:
       ASSEMBLE_BINOP(SubP);
@@ -1352,26 +1635,15 @@
       }
       break;
     case kS390_Mul32:
-      if (HasRegisterInput(instr, 1)) {
-        __ Mul32(i.InputRegister(0), i.InputRegister(1));
-      } else if (HasImmediateInput(instr, 1)) {
-        __ Mul32(i.InputRegister(0), i.InputImmediate(1));
-      } else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
-        // Avoid endian-issue here:
-        // stg r1, 0(fp)
-        // ...
-        // msy r2, 0(fp) <-- This will read the upper 32 bits
-        __ lg(kScratchReg, i.InputStackSlot(1));
-        __ Mul32(i.InputRegister(0), kScratchReg);
-#else
-        __ Mul32(i.InputRegister(0), i.InputStackSlot(1));
-#endif
-      } else {
-        UNIMPLEMENTED();
-      }
+      ASSEMBLE_BIN_OP(Mul32, Mul32, Mul32);
+      break;
+    case kS390_Mul32WithOverflow:
+      ASSEMBLE_BIN_OP(Mul32WithOverflowIfCCUnequal,
+                      Mul32WithOverflowIfCCUnequal,
+                      Mul32WithOverflowIfCCUnequal);
       break;
     case kS390_Mul64:
+      CHECK(i.OutputRegister().is(i.InputRegister(0)));
       if (HasRegisterInput(instr, 1)) {
         __ Mul64(i.InputRegister(0), i.InputRegister(1));
       } else if (HasImmediateInput(instr, 1)) {
@@ -1383,50 +1655,10 @@
       }
       break;
     case kS390_MulHigh32:
-      __ LoadRR(r1, i.InputRegister(0));
-      if (HasRegisterInput(instr, 1)) {
-        __ mr_z(r0, i.InputRegister(1));
-      } else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
-        // Avoid endian-issue here:
-        // stg r1, 0(fp)
-        // ...
-        // mfy r2, 0(fp) <-- This will read the upper 32 bits
-        __ lg(kScratchReg, i.InputStackSlot(1));
-        __ mr_z(r0, kScratchReg);
-#else
-        __ mfy(r0, i.InputStackSlot(1));
-#endif
-      } else {
-        UNIMPLEMENTED();
-      }
-      __ LoadW(i.OutputRegister(), r0);
-      break;
-    case kS390_Mul32WithHigh32:
-      __ LoadRR(r1, i.InputRegister(0));
-      __ mr_z(r0, i.InputRegister(1));
-      __ LoadW(i.OutputRegister(0), r1);  // low
-      __ LoadW(i.OutputRegister(1), r0);  // high
+      ASSEMBLE_BIN_OP(MulHigh32, MulHigh32, MulHigh32);
       break;
     case kS390_MulHighU32:
-      __ LoadRR(r1, i.InputRegister(0));
-      if (HasRegisterInput(instr, 1)) {
-        __ mlr(r0, i.InputRegister(1));
-      } else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
-        // Avoid endian-issue here:
-        // stg r1, 0(fp)
-        // ...
-        // mfy r2, 0(fp) <-- This will read the upper 32 bits
-        __ lg(kScratchReg, i.InputStackSlot(1));
-        __ mlr(r0, kScratchReg);
-#else
-        __ ml(r0, i.InputStackSlot(1));
-#endif
-      } else {
-        UNIMPLEMENTED();
-      }
-      __ LoadlW(i.OutputRegister(), r0);
+      ASSEMBLE_BIN_OP(MulHighU32, MulHighU32, MulHighU32);
       break;
     case kS390_MulFloat:
       // Ensure we don't clobber right
@@ -1455,13 +1687,10 @@
       __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
       break;
 #endif
-    case kS390_Div32:
-      __ LoadRR(r0, i.InputRegister(0));
-      __ srda(r0, Operand(32));
-      __ dr(r0, i.InputRegister(1));
-      __ LoadAndTestP_ExtendSrc(i.OutputRegister(),
-                                r1);  // Copy R1: Quotient to output
+    case kS390_Div32: {
+      ASSEMBLE_BIN_OP(Div32, Div32, Div32);
       break;
+    }
 #if V8_TARGET_ARCH_S390X
     case kS390_DivU64:
       __ LoadRR(r1, i.InputRegister(0));
@@ -1470,14 +1699,10 @@
       __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
       break;
 #endif
-    case kS390_DivU32:
-      __ LoadRR(r0, i.InputRegister(0));
-      __ srdl(r0, Operand(32));
-      __ dlr(r0, i.InputRegister(1));  // R0:R1: Dividend
-      __ LoadlW(i.OutputRegister(), r1);  // Copy R1: Quotient to output
-      __ LoadAndTestP_ExtendSrc(r1, r1);
+    case kS390_DivU32: {
+      ASSEMBLE_BIN_OP(DivU32, DivU32, DivU32);
       break;
-
+    }
     case kS390_DivFloat:
       // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
@@ -1503,10 +1728,10 @@
       }
       break;
     case kS390_Mod32:
-      ASSEMBLE_MODULO(dr, srda);
+      ASSEMBLE_BIN_OP(Mod32, Mod32, Mod32);
       break;
     case kS390_ModU32:
-      ASSEMBLE_MODULO(dlr, srdl);
+      ASSEMBLE_BIN_OP(ModU32, ModU32, ModU32);
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_Mod64:
@@ -1611,7 +1836,7 @@
     }
     case kS390_Neg32:
       __ lcr(i.OutputRegister(), i.InputRegister(0));
-      __ LoadW(i.OutputRegister(), i.OutputRegister());
+      CHECK_AND_ZERO_EXT_OUTPUT(1);
       break;
     case kS390_Neg64:
       __ lcgr(i.OutputRegister(), i.InputRegister(0));
@@ -1659,14 +1884,16 @@
     case kS390_Cntlz32: {
       __ llgfr(i.OutputRegister(), i.InputRegister(0));
       __ flogr(r0, i.OutputRegister());
-      __ LoadRR(i.OutputRegister(), r0);
-      __ SubP(i.OutputRegister(), Operand(32));
-    } break;
+      __ Add32(i.OutputRegister(), r0, Operand(-32));
+      // No need to zero-ext b/c llgfr is done already
+      break;
+    }
 #if V8_TARGET_ARCH_S390X
     case kS390_Cntlz64: {
       __ flogr(r0, i.InputRegister(0));
       __ LoadRR(i.OutputRegister(), r0);
-    } break;
+      break;
+    }
 #endif
     case kS390_Popcnt32:
       __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
@@ -1677,7 +1904,7 @@
       break;
 #endif
     case kS390_Cmp32:
-      ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
+      ASSEMBLE_COMPARE32(Cmp32, CmpLogical32);
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_Cmp64:
@@ -1685,28 +1912,38 @@
       break;
 #endif
     case kS390_CmpFloat:
-      __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      ASSEMBLE_FLOAT_COMPARE(cebr, ceb, ley);
+      // __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
       break;
     case kS390_CmpDouble:
-      __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      ASSEMBLE_FLOAT_COMPARE(cdbr, cdb, ldy);
+      // __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
       break;
     case kS390_Tst32:
       if (HasRegisterInput(instr, 1)) {
-        __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+        __ And(r0, i.InputRegister(0), i.InputRegister(1));
       } else {
-        __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+        Operand opnd = i.InputImmediate(1);
+        if (is_uint16(opnd.immediate())) {
+          __ tmll(i.InputRegister(0), opnd);
+        } else {
+          __ lr(r0, i.InputRegister(0));
+          __ nilf(r0, opnd);
+        }
       }
-      __ LoadAndTestP_ExtendSrc(r0, r0);
       break;
-#if V8_TARGET_ARCH_S390X
     case kS390_Tst64:
       if (HasRegisterInput(instr, 1)) {
         __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
       } else {
-        __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+        Operand opnd = i.InputImmediate(1);
+        if (is_uint16(opnd.immediate())) {
+          __ tmll(i.InputRegister(0), opnd);
+        } else {
+          __ AndP(r0, i.InputRegister(0), opnd);
+        }
       }
       break;
-#endif
     case kS390_Float64SilenceNaN: {
       DoubleRegister value = i.InputDoubleRegister(0);
       DoubleRegister result = i.OutputDoubleRegister();
@@ -1758,18 +1995,12 @@
       break;
     }
     case kS390_ExtendSignWord8:
-#if V8_TARGET_ARCH_S390X
-      __ lgbr(i.OutputRegister(), i.InputRegister(0));
-#else
       __ lbr(i.OutputRegister(), i.InputRegister(0));
-#endif
+      CHECK_AND_ZERO_EXT_OUTPUT(1);
       break;
     case kS390_ExtendSignWord16:
-#if V8_TARGET_ARCH_S390X
-      __ lghr(i.OutputRegister(), i.InputRegister(0));
-#else
       __ lhr(i.OutputRegister(), i.InputRegister(0));
-#endif
+      CHECK_AND_ZERO_EXT_OUTPUT(1);
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ExtendSignWord32:
@@ -2005,6 +2236,14 @@
       ASSEMBLE_LOAD_INTEGER(lg);
       break;
 #endif
+    case kS390_LoadAndTestWord32: {
+      ASSEMBLE_LOADANDTEST32(ltr, lt_z);
+      break;
+    }
+    case kS390_LoadAndTestWord64: {
+      ASSEMBLE_LOADANDTEST64(ltgr, ltg);
+      break;
+    }
     case kS390_LoadFloat32:
       ASSEMBLE_LOAD_FLOAT(LoadFloat32);
       break;
@@ -2040,6 +2279,9 @@
     case kS390_StoreDouble:
       ASSEMBLE_STORE_DOUBLE();
       break;
+    case kS390_Lay:
+      __ lay(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
 #if V8_TARGET_ARCH_S390X
@@ -2152,6 +2394,84 @@
   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      S390OperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+
+  ArchOpcode op = instr->arch_opcode();
+  Condition cond = FlagsConditionToCondition(condition, op);
+  if (op == kS390_CmpDouble) {
+    // check for unordered if necessary
+    if (cond == le) {
+      __ bunordered(&end);
+      // Unnecessary for eq/lt since only FU bit will be set.
+    } else if (cond == gt) {
+      __ bunordered(tlabel);
+      // Unnecessary for ne/ge since only FU bit will be set.
+    }
+  }
+  __ b(cond, tlabel);
+  __ bind(&end);
+}
+
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
                                         FlagsCondition condition) {
@@ -2210,16 +2530,19 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2377,25 +2700,22 @@
       switch (src.type()) {
         case Constant::kInt32:
 #if V8_TARGET_ARCH_S390X
-          if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmSizeReference(src.rmode())) {
 #else
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmReference(src.rmode())) {
 #endif
             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
-            __ mov(dst, Operand(src.ToInt32()));
+            __ Load(dst, Operand(src.ToInt32()));
           }
           break;
         case Constant::kInt64:
 #if V8_TARGET_ARCH_S390X
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
           } else {
-            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
-            __ mov(dst, Operand(src.ToInt64()));
+            DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
+            __ Load(dst, Operand(src.ToInt64()));
           }
 #else
           __ mov(dst, Operand(src.ToInt64()));
diff --git a/src/compiler/s390/instruction-codes-s390.h b/src/compiler/s390/instruction-codes-s390.h
index 80e1532..b99e79f 100644
--- a/src/compiler/s390/instruction-codes-s390.h
+++ b/src/compiler/s390/instruction-codes-s390.h
@@ -31,10 +31,10 @@
   V(S390_RotRight64)               \
   V(S390_Not32)                    \
   V(S390_Not64)                    \
-  V(S390_RotLeftAndMask32)         \
   V(S390_RotLeftAndClear64)        \
   V(S390_RotLeftAndClearLeft64)    \
   V(S390_RotLeftAndClearRight64)   \
+  V(S390_Lay)                      \
   V(S390_Add32)                    \
   V(S390_Add64)                    \
   V(S390_AddPair)                  \
@@ -47,7 +47,7 @@
   V(S390_SubPair)                  \
   V(S390_MulPair)                  \
   V(S390_Mul32)                    \
-  V(S390_Mul32WithHigh32)          \
+  V(S390_Mul32WithOverflow)        \
   V(S390_Mul64)                    \
   V(S390_MulHigh32)                \
   V(S390_MulHighU32)               \
@@ -135,6 +135,10 @@
   V(S390_LoadWordU16)              \
   V(S390_LoadWordS32)              \
   V(S390_LoadWordU32)              \
+  V(S390_LoadAndTestWord32)        \
+  V(S390_LoadAndTestWord64)        \
+  V(S390_LoadAndTestFloat32)       \
+  V(S390_LoadAndTestFloat64)       \
   V(S390_LoadReverse16RR)          \
   V(S390_LoadReverse32RR)          \
   V(S390_LoadReverse64RR)          \
diff --git a/src/compiler/s390/instruction-scheduler-s390.cc b/src/compiler/s390/instruction-scheduler-s390.cc
index 5ebe489..d6ec3de 100644
--- a/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/src/compiler/s390/instruction-scheduler-s390.cc
@@ -32,10 +32,10 @@
     case kS390_RotRight64:
     case kS390_Not32:
     case kS390_Not64:
-    case kS390_RotLeftAndMask32:
     case kS390_RotLeftAndClear64:
     case kS390_RotLeftAndClearLeft64:
     case kS390_RotLeftAndClearRight64:
+    case kS390_Lay:
     case kS390_Add32:
     case kS390_Add64:
     case kS390_AddPair:
@@ -48,7 +48,7 @@
     case kS390_SubFloat:
     case kS390_SubDouble:
     case kS390_Mul32:
-    case kS390_Mul32WithHigh32:
+    case kS390_Mul32WithOverflow:
     case kS390_Mul64:
     case kS390_MulHigh32:
     case kS390_MulHighU32:
@@ -130,6 +130,10 @@
     case kS390_LoadReverse16RR:
     case kS390_LoadReverse32RR:
     case kS390_LoadReverse64RR:
+    case kS390_LoadAndTestWord32:
+    case kS390_LoadAndTestWord64:
+    case kS390_LoadAndTestFloat32:
+    case kS390_LoadAndTestFloat64:
       return kNoOpcodeFlags;
 
     case kS390_LoadWordS8:
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
index eed08a9..e591d3c 100644
--- a/src/compiler/s390/instruction-selector-s390.cc
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -12,29 +12,85 @@
 namespace internal {
 namespace compiler {
 
-enum ImmediateMode {
-  kShift32Imm,
-  kShift64Imm,
-  kInt32Imm,
-  kInt32Imm_Negate,
-  kUint32Imm,
-  kInt20Imm,
-  kNoImmediate
+enum class OperandMode : uint32_t {
+  kNone = 0u,
+  // Immediate mode
+  kShift32Imm = 1u << 0,
+  kShift64Imm = 1u << 1,
+  kInt32Imm = 1u << 2,
+  kInt32Imm_Negate = 1u << 3,
+  kUint32Imm = 1u << 4,
+  kInt20Imm = 1u << 5,
+  kUint12Imm = 1u << 6,
+  // Instr format
+  kAllowRRR = 1u << 7,
+  kAllowRM = 1u << 8,
+  kAllowRI = 1u << 9,
+  kAllowRRI = 1u << 10,
+  kAllowRRM = 1u << 11,
+  // Useful combination
+  kAllowImmediate = kAllowRI | kAllowRRI,
+  kAllowMemoryOperand = kAllowRM | kAllowRRM,
+  kAllowDistinctOps = kAllowRRR | kAllowRRI | kAllowRRM,
+  kBitWiseCommonMode = kAllowRI,
+  kArithmeticCommonMode = kAllowRM | kAllowRI
 };
 
+typedef base::Flags<OperandMode, uint32_t> OperandModes;
+DEFINE_OPERATORS_FOR_FLAGS(OperandModes);
+OperandModes immediateModeMask =
+    OperandMode::kShift32Imm | OperandMode::kShift64Imm |
+    OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
+    OperandMode::kUint32Imm | OperandMode::kInt20Imm;
+
+#define AndOperandMode                                              \
+  ((OperandMode::kBitWiseCommonMode | OperandMode::kUint32Imm |     \
+    OperandMode::kAllowRM | (CpuFeatures::IsSupported(DISTINCT_OPS) \
+                                 ? OperandMode::kAllowRRR           \
+                                 : OperandMode::kBitWiseCommonMode)))
+
+#define OrOperandMode AndOperandMode
+#define XorOperandMode AndOperandMode
+
+#define ShiftOperandMode                                         \
+  ((OperandMode::kBitWiseCommonMode | OperandMode::kShift64Imm | \
+    (CpuFeatures::IsSupported(DISTINCT_OPS)                      \
+         ? OperandMode::kAllowRRR                                \
+         : OperandMode::kBitWiseCommonMode)))
+
+#define AddOperandMode                                            \
+  ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
+    (CpuFeatures::IsSupported(DISTINCT_OPS)                       \
+         ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI)      \
+         : OperandMode::kArithmeticCommonMode)))
+#define SubOperandMode                                                   \
+  ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
+    (CpuFeatures::IsSupported(DISTINCT_OPS)                              \
+         ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI)             \
+         : OperandMode::kArithmeticCommonMode)))
+#define MulOperandMode \
+  (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
+
 // Adds S390-specific methods for generating operands.
 class S390OperandGenerator final : public OperandGenerator {
  public:
   explicit S390OperandGenerator(InstructionSelector* selector)
       : OperandGenerator(selector) {}
 
-  InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+  InstructionOperand UseOperand(Node* node, OperandModes mode) {
     if (CanBeImmediate(node, mode)) {
       return UseImmediate(node);
     }
     return UseRegister(node);
   }
 
+  InstructionOperand UseAnyExceptImmediate(Node* node) {
+    if (NodeProperties::IsConstant(node))
+      return UseRegister(node);
+    else
+      return Use(node);
+  }
+
   int64_t GetImmediate(Node* node) {
     if (node->opcode() == IrOpcode::kInt32Constant)
       return OpParameter<int32_t>(node);
@@ -45,7 +101,7 @@
     return 0L;
   }
 
-  bool CanBeImmediate(Node* node, ImmediateMode mode) {
+  bool CanBeImmediate(Node* node, OperandModes mode) {
     int64_t value;
     if (node->opcode() == IrOpcode::kInt32Constant)
       value = OpParameter<int32_t>(node);
@@ -56,22 +112,47 @@
     return CanBeImmediate(value, mode);
   }
 
-  bool CanBeImmediate(int64_t value, ImmediateMode mode) {
-    switch (mode) {
-      case kShift32Imm:
-        return 0 <= value && value < 32;
-      case kShift64Imm:
-        return 0 <= value && value < 64;
-      case kInt32Imm:
-        return is_int32(value);
-      case kInt32Imm_Negate:
-        return is_int32(-value);
-      case kUint32Imm:
-        return is_uint32(value);
-      case kInt20Imm:
-        return is_int20(value);
-      case kNoImmediate:
-        return false;
+  bool CanBeImmediate(int64_t value, OperandModes mode) {
+    if (mode & OperandMode::kShift32Imm)
+      return 0 <= value && value < 32;
+    else if (mode & OperandMode::kShift64Imm)
+      return 0 <= value && value < 64;
+    else if (mode & OperandMode::kInt32Imm)
+      return is_int32(value);
+    else if (mode & OperandMode::kInt32Imm_Negate)
+      return is_int32(-value);
+    else if (mode & OperandMode::kUint32Imm)
+      return is_uint32(value);
+    else if (mode & OperandMode::kInt20Imm)
+      return is_int20(value);
+    else if (mode & OperandMode::kUint12Imm)
+      return is_uint12(value);
+    else
+      return false;
+  }
+
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* user, Node* input,
+                          int effect_level) {
+    if (input->opcode() != IrOpcode::kLoad ||
+        !selector()->CanCover(user, input)) {
+      return false;
+    }
+
+    if (effect_level != selector()->GetEffectLevel(input)) {
+      return false;
+    }
+
+    MachineRepresentation rep =
+        LoadRepresentationOf(input->op()).representation();
+    switch (opcode) {
+      case kS390_Cmp64:
+      case kS390_LoadAndTestWord64:
+        return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
+      case kS390_LoadAndTestWord32:
+      case kS390_Cmp32:
+        return rep == MachineRepresentation::kWord32;
+      default:
+        break;
     }
     return false;
   }
@@ -119,9 +200,9 @@
     return mode;
   }
 
-  AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
-                                                  InstructionOperand inputs[],
-                                                  size_t* input_count) {
+  AddressingMode GetEffectiveAddressMemoryOperand(
+      Node* operand, InstructionOperand inputs[], size_t* input_count,
+      OperandModes immediate_mode = OperandMode::kInt20Imm) {
 #if V8_TARGET_ARCH_S390X
     BaseWithIndexAndDisplacement64Matcher m(operand,
                                             AddressOption::kAllowInputSwap);
@@ -131,7 +212,7 @@
 #endif
     DCHECK(m.matches());
     if ((m.displacement() == nullptr ||
-         CanBeImmediate(m.displacement(), kInt20Imm))) {
+         CanBeImmediate(m.displacement(), immediate_mode))) {
       DCHECK(m.scale() == 0);
       return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
                                          m.displacement_mode(), inputs,
@@ -158,6 +239,153 @@
 
 namespace {
 
+bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
+  switch (opcode) {
+    case kS390_CmpFloat:
+    case kS390_CmpDouble:
+      return true;
+    default:
+      return false;
+  }
+}
+
+bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
+  ArchOpcode opcode = ArchOpcodeField::decode(op);
+  return S390OpcodeOnlySupport12BitDisp(opcode);
+}
+
+#define OpcodeImmMode(op)                                       \
+  (S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
+                                      : OperandMode::kInt20Imm)
+
+ArchOpcode SelectLoadOpcode(Node* node) {
+  NodeMatcher m(node);
+  DCHECK(m.IsLoad());
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kFloat32:
+      opcode = kS390_LoadFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kS390_LoadDouble;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
+      break;
+#if !V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+#endif
+    case MachineRepresentation::kWord32:
+      opcode = kS390_LoadWordU32;
+      break;
+#if V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+    case MachineRepresentation::kWord64:
+      opcode = kS390_LoadWord64;
+      break;
+#else
+    case MachineRepresentation::kWord64:  // Fall through.
+#endif
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
+    case MachineRepresentation::kNone:
+    default:
+      UNREACHABLE();
+  }
+  return opcode;
+}
+
+bool AutoZeroExtendsWord32ToWord64(Node* node) {
+#if !V8_TARGET_ARCH_S390X
+  return true;
+#else
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kInt32MulHigh:
+    case IrOpcode::kUint32MulHigh:
+    case IrOpcode::kInt32Mod:
+    case IrOpcode::kUint32Mod:
+    case IrOpcode::kWord32Clz:
+    case IrOpcode::kWord32Popcnt:
+      return true;
+    default:
+      return false;
+  }
+  return false;
+#endif
+}
+
+bool ZeroExtendsWord32ToWord64(Node* node) {
+#if !V8_TARGET_ARCH_S390X
+  return true;
+#else
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Add:
+    case IrOpcode::kInt32Sub:
+    case IrOpcode::kWord32And:
+    case IrOpcode::kWord32Or:
+    case IrOpcode::kWord32Xor:
+    case IrOpcode::kWord32Shl:
+    case IrOpcode::kWord32Shr:
+    case IrOpcode::kWord32Sar:
+    case IrOpcode::kInt32Mul:
+    case IrOpcode::kWord32Ror:
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kInt32MulHigh:
+    case IrOpcode::kInt32Mod:
+    case IrOpcode::kUint32Mod:
+    case IrOpcode::kWord32Popcnt:
+      return true;
+    // TODO(john.yan): consider the following case to be valid
+    // case IrOpcode::kWord32Equal:
+    // case IrOpcode::kInt32LessThan:
+    // case IrOpcode::kInt32LessThanOrEqual:
+    // case IrOpcode::kUint32LessThan:
+    // case IrOpcode::kUint32LessThanOrEqual:
+    // case IrOpcode::kUint32MulHigh:
+    //   // These 32-bit operations implicitly zero-extend to 64-bit on x64, so
+    //   the
+    //   // zero-extension is a no-op.
+    //   return true;
+    // case IrOpcode::kProjection: {
+    //   Node* const value = node->InputAt(0);
+    //   switch (value->opcode()) {
+    //     case IrOpcode::kInt32AddWithOverflow:
+    //     case IrOpcode::kInt32SubWithOverflow:
+    //     case IrOpcode::kInt32MulWithOverflow:
+    //       return true;
+    //     default:
+    //       return false;
+    //   }
+    // }
+    case IrOpcode::kLoad: {
+      LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+      switch (load_rep.representation()) {
+        case MachineRepresentation::kWord32:
+          return true;
+        default:
+          return false;
+      }
+    }
+    default:
+      return false;
+  }
+#endif
+}
+
 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
   S390OperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
@@ -171,15 +399,15 @@
                  g.UseRegister(node->InputAt(1)));
 }
 
+#if V8_TARGET_ARCH_S390X
 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
-              ImmediateMode operand_mode) {
+              OperandModes operand_mode) {
   S390OperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)),
                  g.UseOperand(node->InputAt(1), operand_mode));
 }
 
-#if V8_TARGET_ARCH_S390X
 void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
                             Node* node) {
   S390OperandGenerator g(selector);
@@ -200,7 +428,7 @@
 // Shared routine for multiple binary operations.
 template <typename Matcher>
 void VisitBinop(InstructionSelector* selector, Node* node,
-                InstructionCode opcode, ImmediateMode operand_mode,
+                InstructionCode opcode, OperandModes operand_mode,
                 FlagsContinuation* cont) {
   S390OperandGenerator g(selector);
   Matcher m(node);
@@ -260,7 +488,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -269,54 +500,152 @@
 // Shared routine for multiple binary operations.
 template <typename Matcher>
 void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
-                ImmediateMode operand_mode) {
+                OperandModes operand_mode) {
   FlagsContinuation cont;
   VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
 }
 
+void VisitBin32op(InstructionSelector* selector, Node* node,
+                  InstructionCode opcode, OperandModes operand_mode,
+                  FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
+  InstructionOperand inputs[8];
+  size_t input_count = 0;
+  InstructionOperand outputs[2];
+  size_t output_count = 0;
+
+  // match left of TruncateInt64ToInt32
+  if (m.left().IsTruncateInt64ToInt32() && selector->CanCover(node, left)) {
+    left = left->InputAt(0);
+  }
+  // match right of TruncateInt64ToInt32
+  if (m.right().IsTruncateInt64ToInt32() && selector->CanCover(node, right)) {
+    right = right->InputAt(0);
+  }
+
+#if V8_TARGET_ARCH_S390X
+  if ((ZeroExtendsWord32ToWord64(right) || g.CanBeBetterLeftOperand(right)) &&
+      node->op()->HasProperty(Operator::kCommutative) &&
+      !g.CanBeImmediate(right, operand_mode)) {
+    std::swap(left, right);
+  }
+#else
+  if (node->op()->HasProperty(Operator::kCommutative) &&
+      !g.CanBeImmediate(right, operand_mode) &&
+      (g.CanBeBetterLeftOperand(right))) {
+    std::swap(left, right);
+  }
+#endif
+
+  // left is always register
+  InstructionOperand const left_input = g.UseRegister(left);
+  inputs[input_count++] = left_input;
+
+  // TODO(turbofan): match complex addressing modes.
+  if (left == right) {
+    // If both inputs refer to the same operand, enforce allocating a register
+    // for both of them to ensure that we don't end up generating code like
+    // this:
+    //
+    //   mov rax, [rbp-0x10]
+    //   add rax, [rbp-0x10]
+    //   jo label
+    inputs[input_count++] = left_input;
+    // Can only be RR or RRR
+    operand_mode &= OperandMode::kAllowRRR;
+  } else if ((operand_mode & OperandMode::kAllowImmediate) &&
+             g.CanBeImmediate(right, operand_mode)) {
+    inputs[input_count++] = g.UseImmediate(right);
+    // Can only be RI or RRI
+    operand_mode &= OperandMode::kAllowImmediate;
+  } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
+    NodeMatcher mright(right);
+    if (mright.IsLoad() && selector->CanCover(node, right) &&
+        SelectLoadOpcode(right) == kS390_LoadWordU32) {
+      AddressingMode mode =
+          g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
+      opcode |= AddressingModeField::encode(mode);
+      operand_mode &= ~OperandMode::kAllowImmediate;
+      if (operand_mode & OperandMode::kAllowRM)
+        operand_mode &= ~OperandMode::kAllowDistinctOps;
+    } else if (operand_mode & OperandMode::kAllowRM) {
+      DCHECK(!(operand_mode & OperandMode::kAllowRRM));
+      inputs[input_count++] = g.Use(right);
+      // Can not be Immediate
+      operand_mode &=
+          ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
+    } else if (operand_mode & OperandMode::kAllowRRM) {
+      DCHECK(!(operand_mode & OperandMode::kAllowRM));
+      inputs[input_count++] = g.Use(right);
+      // Can not be Immediate
+      operand_mode &= ~OperandMode::kAllowImmediate;
+    } else {
+      UNREACHABLE();
+    }
+  } else {
+    inputs[input_count++] = g.UseRegister(right);
+    // Can only be RR or RRR
+    operand_mode &= OperandMode::kAllowRRR;
+  }
+
+  bool doZeroExt =
+      AutoZeroExtendsWord32ToWord64(node) || !ZeroExtendsWord32ToWord64(left);
+
+  inputs[input_count++] =
+      g.TempImmediate(doZeroExt && (!AutoZeroExtendsWord32ToWord64(node)));
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  if (doZeroExt && (operand_mode & OperandMode::kAllowDistinctOps) &&
+      // If we can deoptimize as a result of the binop, we need to make sure
+      // that
+      // the deopt inputs are not overwritten by the binop result. One way
+      // to achieve that is to declare the output register as same-as-first.
+      !cont->IsDeoptimize()) {
+    outputs[output_count++] = g.DefineAsRegister(node);
+  } else {
+    outputs[output_count++] = g.DefineSameAsFirst(node);
+  }
+
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0u, input_count);
+  DCHECK_NE(0u, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  opcode = cont->Encode(opcode);
+
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
+}
+
+void VisitBin32op(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+                  OperandModes operand_mode) {
+  FlagsContinuation cont;
+  VisitBin32op(selector, node, opcode, operand_mode, &cont);
+}
+
 }  // namespace
 
 void InstructionSelector::VisitLoad(Node* node) {
-  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   S390OperandGenerator g(this);
-  ArchOpcode opcode = kArchNop;
-  switch (load_rep.representation()) {
-    case MachineRepresentation::kFloat32:
-      opcode = kS390_LoadFloat32;
-      break;
-    case MachineRepresentation::kFloat64:
-      opcode = kS390_LoadDouble;
-      break;
-    case MachineRepresentation::kBit:  // Fall through.
-    case MachineRepresentation::kWord8:
-      opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
-      break;
-    case MachineRepresentation::kWord16:
-      opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
-      break;
-#if !V8_TARGET_ARCH_S390X
-    case MachineRepresentation::kTaggedSigned:   // Fall through.
-    case MachineRepresentation::kTaggedPointer:  // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-#endif
-    case MachineRepresentation::kWord32:
-      opcode = kS390_LoadWordU32;
-      break;
-#if V8_TARGET_ARCH_S390X
-    case MachineRepresentation::kTaggedSigned:   // Fall through.
-    case MachineRepresentation::kTaggedPointer:  // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:
-      opcode = kS390_LoadWord64;
-      break;
-#else
-    case MachineRepresentation::kWord64:    // Fall through.
-#endif
-    case MachineRepresentation::kSimd128:  // Fall through.
-    case MachineRepresentation::kNone:
-      UNREACHABLE();
-      return;
-  }
+  ArchOpcode opcode = SelectLoadOpcode(node);
   InstructionOperand outputs[1];
   outputs[0] = g.DefineAsRegister(node);
   InstructionOperand inputs[3];
@@ -350,7 +679,7 @@
     inputs[input_count++] = g.UseUniqueRegister(base);
     // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
     // for the store itself, so we must check compatibility with both.
-    if (g.CanBeImmediate(offset, kInt20Imm)) {
+    if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) {
       inputs[input_count++] = g.UseImmediate(offset);
       addressing_mode = kMode_MRI;
     } else {
@@ -423,6 +752,9 @@
       case MachineRepresentation::kWord64:  // Fall through.
 #endif
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -440,6 +772,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -482,6 +819,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -489,7 +829,7 @@
   AddressingMode addressingMode = kMode_MRR;
   Emit(opcode | AddressingModeField::encode(addressingMode),
        g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
-       g.UseOperand(length, kUint32Imm));
+       g.UseOperand(length, OperandMode::kUint32Imm));
 }
 
 void InstructionSelector::VisitCheckedStore(Node* node) {
@@ -529,6 +869,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -536,9 +879,10 @@
   AddressingMode addressingMode = kMode_MRR;
   Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
        g.UseRegister(base), g.UseRegister(offset),
-       g.UseOperand(length, kUint32Imm), g.UseRegister(value));
+       g.UseOperand(length, OperandMode::kUint32Imm), g.UseRegister(value));
 }
 
+#if 0
 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
   int mask_width = base::bits::CountPopulation32(value);
   int mask_msb = base::bits::CountLeadingZeros32(value);
@@ -549,6 +893,7 @@
   *me = mask_lsb;
   return true;
 }
+#endif
 
 #if V8_TARGET_ARCH_S390X
 static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
@@ -564,37 +909,7 @@
 #endif
 
 void InstructionSelector::VisitWord32And(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  int mb = 0;
-  int me = 0;
-  if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
-    int sh = 0;
-    Node* left = m.left().node();
-    if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
-        CanCover(node, left)) {
-      Int32BinopMatcher mleft(m.left().node());
-      if (mleft.right().IsInRange(0, 31)) {
-        left = mleft.left().node();
-        sh = mleft.right().Value();
-        if (m.left().IsWord32Shr()) {
-          // Adjust the mask such that it doesn't include any rotated bits.
-          if (mb > 31 - sh) mb = 31 - sh;
-          sh = (32 - sh) & 0x1f;
-        } else {
-          // Adjust the mask such that it doesn't include any rotated bits.
-          if (me < sh) me = sh;
-        }
-      }
-    }
-    if (mb >= me) {
-      Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
-           g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
-           g.TempImmediate(me));
-      return;
-    }
-  }
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
+  VisitBin32op(this, node, kS390_And32, AndOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -646,65 +961,36 @@
       }
     }
   }
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_And64, kUint32Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_And64,
+                                OperandMode::kUint32Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Or(Node* node) {
-  Int32BinopMatcher m(node);
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_Or32, kUint32Imm);
+  VisitBin32op(this, node, kS390_Or32, OrOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitWord64Or(Node* node) {
   Int64BinopMatcher m(node);
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64, kUint32Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64,
+                                OperandMode::kUint32Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Xor(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  if (m.right().Is(-1)) {
-    Emit(kS390_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
-  } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor32, kUint32Imm);
-  }
+  VisitBin32op(this, node, kS390_Xor32, XorOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitWord64Xor(Node* node) {
-  S390OperandGenerator g(this);
-  Int64BinopMatcher m(node);
-  if (m.right().Is(-1)) {
-    Emit(kS390_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
-  } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64, kUint32Imm);
-  }
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64,
+                                OperandMode::kUint32Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Shl(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
-    Int32BinopMatcher mleft(m.left().node());
-    int sh = m.right().Value();
-    int mb;
-    int me;
-    if (mleft.right().HasValue() &&
-        IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
-      // Adjust the mask such that it doesn't include any rotated bits.
-      if (me < sh) me = sh;
-      if (mb >= me) {
-        Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
-             g.TempImmediate(mb), g.TempImmediate(me));
-        return;
-      }
-    }
-  }
-  VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
+  VisitBin32op(this, node, kS390_ShiftLeft32, ShiftOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -747,32 +1033,12 @@
       }
     }
   }
-  VisitRRO(this, kS390_ShiftLeft64, node, kShift64Imm);
+  VisitRRO(this, kS390_ShiftLeft64, node, OperandMode::kShift64Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Shr(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
-    Int32BinopMatcher mleft(m.left().node());
-    int sh = m.right().Value();
-    int mb;
-    int me;
-    if (mleft.right().HasValue() &&
-        IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
-      // Adjust the mask such that it doesn't include any rotated bits.
-      if (mb > 31 - sh) mb = 31 - sh;
-      sh = (32 - sh) & 0x1f;
-      if (mb >= me) {
-        Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
-             g.TempImmediate(mb), g.TempImmediate(me));
-        return;
-      }
-    }
-  }
-  VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
+  VisitBin32op(this, node, kS390_ShiftRight32, ShiftOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -811,7 +1077,7 @@
       }
     }
   }
-  VisitRRO(this, kS390_ShiftRight64, node, kShift64Imm);
+  VisitRRO(this, kS390_ShiftRight64, node, OperandMode::kShift64Imm);
 }
 #endif
 
@@ -822,16 +1088,20 @@
   if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
     Int32BinopMatcher mleft(m.left().node());
     if (mleft.right().Is(16) && m.right().Is(16)) {
-      Emit(kS390_ExtendSignWord16, g.DefineAsRegister(node),
-           g.UseRegister(mleft.left().node()));
+      bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
+      Emit(kS390_ExtendSignWord16,
+           doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
       return;
     } else if (mleft.right().Is(24) && m.right().Is(24)) {
-      Emit(kS390_ExtendSignWord8, g.DefineAsRegister(node),
-           g.UseRegister(mleft.left().node()));
+      bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
+      Emit(kS390_ExtendSignWord8,
+           doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
       return;
     }
   }
-  VisitRRO(this, kS390_ShiftRightArith32, node, kShift32Imm);
+  VisitBin32op(this, node, kS390_ShiftRightArith32, ShiftOperandMode);
 }
 
 #if !V8_TARGET_ARCH_S390X
@@ -857,7 +1127,7 @@
     // instruction.
     selector->Emit(opcode2, g.DefineSameAsFirst(node),
                    g.UseRegister(node->InputAt(0)),
-                   g.UseRegister(node->InputAt(2)));
+                   g.UseRegister(node->InputAt(2)), g.TempImmediate(0));
   }
 }
 
@@ -887,7 +1157,8 @@
     // The high word of the result is not used, so we emit the standard 32 bit
     // instruction.
     Emit(kS390_Mul32, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(2)),
+         g.TempImmediate(0));
   }
 }
 
@@ -943,24 +1214,25 @@
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitWord64Sar(Node* node) {
-  VisitRRO(this, kS390_ShiftRightArith64, node, kShift64Imm);
+  VisitRRO(this, kS390_ShiftRightArith64, node, OperandMode::kShift64Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
-  VisitRRO(this, kS390_RotRight32, node, kShift32Imm);
+  // TODO(john): match dst = ror(src1, src2 + imm)
+  VisitBin32op(this, node, kS390_RotRight32,
+               OperandMode::kAllowRI | OperandMode::kAllowRRR |
+                   OperandMode::kAllowRRI | OperandMode::kShift32Imm);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitWord64Ror(Node* node) {
-  VisitRRO(this, kS390_RotRight64, node, kShift64Imm);
+  VisitRRO(this, kS390_RotRight64, node, OperandMode::kShift64Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Clz(Node* node) {
-  S390OperandGenerator g(this);
-  Emit(kS390_Cntlz32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRR(this, kS390_Cntlz32, node);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -973,8 +1245,8 @@
 
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   S390OperandGenerator g(this);
-  Emit(kS390_Popcnt32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  Node* value = node->InputAt(0);
+  Emit(kS390_Popcnt32, g.DefineAsRegister(node), g.UseRegister(value));
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1023,12 +1295,13 @@
 }
 
 void InstructionSelector::VisitInt32Add(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm);
+  VisitBin32op(this, node, kS390_Add32, AddOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitInt64Add(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
+                                OperandMode::kInt32Imm);
 }
 #endif
 
@@ -1036,10 +1309,12 @@
   S390OperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kS390_Neg32, g.DefineAsRegister(node),
-         g.UseRegister(m.right().node()));
+    Node* right = m.right().node();
+    bool doZeroExt = ZeroExtendsWord32ToWord64(right);
+    Emit(kS390_Neg32, g.DefineAsRegister(node), g.UseRegister(right),
+         g.TempImmediate(doZeroExt));
   } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate);
+    VisitBin32op(this, node, kS390_Sub32, SubOperandMode);
   }
 }
 
@@ -1051,7 +1326,8 @@
     Emit(kS390_Neg64, g.DefineAsRegister(node),
          g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate);
+    VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
+                                  OperandMode::kInt32Imm_Negate);
   }
 }
 #endif
@@ -1061,35 +1337,14 @@
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
                   FlagsContinuation* cont);
-void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
-                              FlagsContinuation* cont) {
-  S390OperandGenerator g(selector);
-  Int32BinopMatcher m(node);
-  InstructionOperand result_operand = g.DefineAsRegister(node);
-  InstructionOperand high32_operand = g.TempRegister();
-  InstructionOperand temp_operand = g.TempRegister();
-  {
-    InstructionOperand outputs[] = {result_operand, high32_operand};
-    InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
-                                   g.UseRegister(m.right().node())};
-    selector->Emit(kS390_Mul32WithHigh32, 2, outputs, 2, inputs);
-  }
-  {
-    InstructionOperand shift_31 = g.UseImmediate(31);
-    InstructionOperand outputs[] = {temp_operand};
-    InstructionOperand inputs[] = {result_operand, shift_31};
-    selector->Emit(kS390_ShiftRightArith32, 1, outputs, 2, inputs);
-  }
 
-  VisitCompare(selector, kS390_Cmp32, high32_operand, temp_operand, cont);
-}
-
+#if V8_TARGET_ARCH_S390X
 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   S390OperandGenerator g(selector);
   Int32BinopMatcher m(node);
   Node* left = m.left().node();
   Node* right = m.right().node();
-  if (g.CanBeImmediate(right, kInt32Imm)) {
+  if (g.CanBeImmediate(right, OperandMode::kInt32Imm)) {
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
@@ -1100,17 +1355,18 @@
                    g.Use(right));
   }
 }
+#endif
 
 }  // namespace
 
 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
-    return EmitInt32MulWithOverflow(this, node, &cont);
+    return VisitBin32op(this, node, kS390_Mul32WithOverflow,
+                        OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+                        &cont);
   }
-  VisitMul(this, node, kS390_Mul32);
-  // FlagsContinuation cont;
-  // EmitInt32MulWithOverflow(this, node, &cont);
+  VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
 }
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
@@ -1118,14 +1374,20 @@
   Int32BinopMatcher m(node);
   Node* left = m.left().node();
   Node* right = m.right().node();
-  if (g.CanBeImmediate(right, kInt32Imm) &&
+  if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
       base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
     int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
-    Emit(kS390_ShiftLeft32, g.DefineSameAsFirst(node), g.UseRegister(left),
-         g.UseImmediate(power));
+    bool doZeroExt = !ZeroExtendsWord32ToWord64(left);
+    InstructionOperand dst =
+        (doZeroExt && CpuFeatures::IsSupported(DISTINCT_OPS))
+            ? g.DefineAsRegister(node)
+            : g.DefineSameAsFirst(node);
+
+    Emit(kS390_ShiftLeft32, dst, g.UseRegister(left), g.UseImmediate(power),
+         g.TempImmediate(doZeroExt));
     return;
   }
-  VisitMul(this, node, kS390_Mul32);
+  VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1134,7 +1396,7 @@
   Int64BinopMatcher m(node);
   Node* left = m.left().node();
   Node* right = m.right().node();
-  if (g.CanBeImmediate(right, kInt32Imm) &&
+  if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
       base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
     int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
     Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
@@ -1146,31 +1408,18 @@
 #endif
 
 void InstructionSelector::VisitInt32MulHigh(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  Node* left = m.left().node();
-  Node* right = m.right().node();
-  if (g.CanBeBetterLeftOperand(right)) {
-    std::swap(left, right);
-  }
-  Emit(kS390_MulHigh32, g.DefineAsRegister(node), g.UseRegister(left),
-       g.Use(right));
+  VisitBin32op(this, node, kS390_MulHigh32,
+               OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps);
 }
 
 void InstructionSelector::VisitUint32MulHigh(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  Node* left = m.left().node();
-  Node* right = m.right().node();
-  if (g.CanBeBetterLeftOperand(right)) {
-    std::swap(left, right);
-  }
-  Emit(kS390_MulHighU32, g.DefineAsRegister(node), g.UseRegister(left),
-       g.Use(right));
+  VisitBin32op(this, node, kS390_MulHighU32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 void InstructionSelector::VisitInt32Div(Node* node) {
-  VisitRRR(this, kS390_Div32, node);
+  VisitBin32op(this, node, kS390_Div32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1180,7 +1429,8 @@
 #endif
 
 void InstructionSelector::VisitUint32Div(Node* node) {
-  VisitRRR(this, kS390_DivU32, node);
+  VisitBin32op(this, node, kS390_DivU32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1190,7 +1440,8 @@
 #endif
 
 void InstructionSelector::VisitInt32Mod(Node* node) {
-  VisitRRR(this, kS390_Mod32, node);
+  VisitBin32op(this, node, kS390_Mod32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1200,7 +1451,8 @@
 #endif
 
 void InstructionSelector::VisitUint32Mod(Node* node) {
-  VisitRRR(this, kS390_ModU32, node);
+  VisitBin32op(this, node, kS390_ModU32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1264,7 +1516,13 @@
 }
 
 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
-  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  S390OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  if (ZeroExtendsWord32ToWord64(value)) {
+    // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
+    // zero-extension is a no-op.
+    return EmitIdentity(node);
+  }
   VisitRR(this, kS390_Uint32ToUint64, node);
 }
 #endif
@@ -1470,46 +1728,46 @@
 }
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  OperandModes mode = AddOperandMode;
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm,
-                                         &cont);
+    return VisitBin32op(this, node, kS390_Add32, mode, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm, &cont);
+  VisitBin32op(this, node, kS390_Add32, mode, &cont);
 }
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  OperandModes mode = SubOperandMode;
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32,
-                                         kInt32Imm_Negate, &cont);
+    return VisitBin32op(this, node, kS390_Sub32, mode, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate,
-                                &cont);
+  VisitBin32op(this, node, kS390_Sub32, mode, &cont);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm,
-                                         &cont);
+    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
+                                         OperandMode::kInt32Imm, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm, &cont);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, OperandMode::kInt32Imm,
+                                &cont);
 }
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
-                                         kInt32Imm_Negate, &cont);
+                                         OperandMode::kInt32Imm_Negate, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate,
-                                &cont);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
+                                OperandMode::kInt32Imm_Negate, &cont);
 }
 #endif
 
@@ -1539,68 +1797,204 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, InstructionCode opcode,
+                          FlagsContinuation* cont);
+
+void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
+                      Node* node, Node* value, FlagsContinuation* cont,
+                      bool discard_output = false);
+
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont,
-                      bool commutative, ImmediateMode immediate_mode) {
+                      OperandModes immediate_mode) {
   S390OperandGenerator g(selector);
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right, immediate_mode)) {
-    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
-                 cont);
-  } else if (g.CanBeImmediate(left, immediate_mode)) {
-    if (!commutative) cont->Commute();
-    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
-                 cont);
+  DCHECK(IrOpcode::IsComparisonOpcode(node->opcode()) ||
+         node->opcode() == IrOpcode::kInt32Sub ||
+         node->opcode() == IrOpcode::kInt64Sub);
+
+  InstructionOperand inputs[8];
+  InstructionOperand outputs[1];
+  size_t input_count = 0;
+  size_t output_count = 0;
+
+  // If one of the two inputs is an immediate, make sure it's on the right, or
+  // if one of the two inputs is a memory operand, make sure it's on the left.
+  int effect_level = selector->GetEffectLevel(node);
+  if (cont->IsBranch()) {
+    effect_level = selector->GetEffectLevel(
+        cont->true_block()->PredecessorAt(0)->control_input());
+  }
+
+  if ((!g.CanBeImmediate(right, immediate_mode) &&
+       g.CanBeImmediate(left, immediate_mode)) ||
+      (!g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
+       g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    std::swap(left, right);
+  }
+
+  // check if compare with 0
+  if (g.CanBeImmediate(right, immediate_mode) && g.GetImmediate(right) == 0) {
+    DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64);
+    ArchOpcode load_and_test = (opcode == kS390_Cmp32)
+                                   ? kS390_LoadAndTestWord32
+                                   : kS390_LoadAndTestWord64;
+    return VisitLoadAndTest(selector, load_and_test, node, left, cont, true);
+  }
+
+  inputs[input_count++] = g.UseRegister(left);
+  if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
+    // generate memory operand
+    AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
+        right, inputs, &input_count, OpcodeImmMode(opcode));
+    opcode |= AddressingModeField::encode(addressing_mode);
+  } else if (g.CanBeImmediate(right, immediate_mode)) {
+    inputs[input_count++] = g.UseImmediate(right);
   } else {
-    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
-                 cont);
+    inputs[input_count++] = g.UseAnyExceptImmediate(right);
+  }
+
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  } else if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+  } else {
+    DCHECK(cont->IsDeoptimize());
+    // nothing to do
+  }
+
+  DCHECK(input_count <= 8 && output_count <= 1);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
 }
 
 void VisitWord32Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
-  ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kInt32Imm);
-  VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
+  OperandModes mode =
+      (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
+  VisitWordCompare(selector, node, kS390_Cmp32, cont, mode);
 }
 
 #if V8_TARGET_ARCH_S390X
 void VisitWord64Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
-  ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kUint32Imm);
-  VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
+  OperandModes mode =
+      (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
+  VisitWordCompare(selector, node, kS390_Cmp64, cont, mode);
 }
 #endif
 
 // Shared routine for multiple float32 compare operations.
 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
                          FlagsContinuation* cont) {
-  S390OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
-  VisitCompare(selector, kS390_CmpFloat, g.UseRegister(left),
-               g.UseRegister(right), cont);
+  VisitWordCompare(selector, node, kS390_CmpFloat, cont, OperandMode::kNone);
 }
 
 // Shared routine for multiple float64 compare operations.
 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
                          FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kS390_CmpDouble, cont, OperandMode::kNone);
+}
+
+void VisitTestUnderMask(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  DCHECK(node->opcode() == IrOpcode::kWord32And ||
+         node->opcode() == IrOpcode::kWord64And);
+  ArchOpcode opcode =
+      (node->opcode() == IrOpcode::kWord32And) ? kS390_Tst32 : kS390_Tst64;
   S390OperandGenerator g(selector);
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
-  VisitCompare(selector, kS390_CmpDouble, g.UseRegister(left),
-               g.UseRegister(right), cont);
+  if (!g.CanBeImmediate(right, OperandMode::kUint32Imm) &&
+      g.CanBeImmediate(left, OperandMode::kUint32Imm)) {
+    std::swap(left, right);
+  }
+  VisitCompare(selector, opcode, g.UseRegister(left),
+               g.UseOperand(right, OperandMode::kUint32Imm), cont);
+}
+
+void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
+                      Node* node, Node* value, FlagsContinuation* cont,
+                      bool discard_output) {
+  static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3,
+                "LoadAndTest Opcode shouldn't contain other opcodes.");
+
+  // TODO(john.yan): Add support for Float32/Float64.
+  DCHECK(opcode >= kS390_LoadAndTestWord32 ||
+         opcode <= kS390_LoadAndTestWord64);
+
+  S390OperandGenerator g(selector);
+  InstructionOperand inputs[8];
+  InstructionOperand outputs[2];
+  size_t input_count = 0;
+  size_t output_count = 0;
+  bool use_value = false;
+
+  int effect_level = selector->GetEffectLevel(node);
+  if (cont->IsBranch()) {
+    effect_level = selector->GetEffectLevel(
+        cont->true_block()->PredecessorAt(0)->control_input());
+  }
+
+  if (g.CanBeMemoryOperand(opcode, node, value, effect_level)) {
+    // generate memory operand
+    AddressingMode addressing_mode =
+        g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
+    opcode |= AddressingModeField::encode(addressing_mode);
+  } else {
+    inputs[input_count++] = g.UseAnyExceptImmediate(value);
+    use_value = true;
+  }
+
+  if (!discard_output && !use_value) {
+    outputs[output_count++] = g.DefineAsRegister(value);
+  }
+
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  } else if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+  } else {
+    DCHECK(cont->IsDeoptimize());
+    // nothing to do
+  }
+
+  DCHECK(input_count <= 8 && output_count <= 2);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 // Shared routine for word comparisons against zero.
@@ -1618,11 +2012,29 @@
     cont->Negate();
   }
 
+  FlagsCondition fc = cont->condition();
   if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal:
+      case IrOpcode::kWord32Equal: {
         cont->OverwriteAndNegateIfEqual(kEqual);
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          // Try to combine the branch with a comparison.
+          Node* const user = m.node();
+          Node* const value = m.left().node();
+          if (selector->CanCover(user, value)) {
+            switch (value->opcode()) {
+              case IrOpcode::kInt32Sub:
+                return VisitWord32Compare(selector, value, cont);
+              case IrOpcode::kWord32And:
+                return VisitTestUnderMask(selector, value, cont);
+              default:
+                break;
+            }
+          }
+        }
         return VisitWord32Compare(selector, value, cont);
+      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWord32Compare(selector, value, cont);
@@ -1636,9 +2048,26 @@
         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
         return VisitWord32Compare(selector, value, cont);
 #if V8_TARGET_ARCH_S390X
-      case IrOpcode::kWord64Equal:
+      case IrOpcode::kWord64Equal: {
         cont->OverwriteAndNegateIfEqual(kEqual);
+        Int64BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          // Try to combine the branch with a comparison.
+          Node* const user = m.node();
+          Node* const value = m.left().node();
+          if (selector->CanCover(user, value)) {
+            switch (value->opcode()) {
+              case IrOpcode::kInt64Sub:
+                return VisitWord64Compare(selector, value, cont);
+              case IrOpcode::kWord64And:
+                return VisitTestUnderMask(selector, value, cont);
+              default:
+                break;
+            }
+          }
+        }
         return VisitWord64Compare(selector, value, cont);
+      }
       case IrOpcode::kInt64LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWord64Compare(selector, value, cont);
@@ -1685,24 +2114,28 @@
             switch (node->opcode()) {
               case IrOpcode::kInt32AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int32BinopMatcher>(
-                    selector, node, kS390_Add32, kInt32Imm, cont);
+                return VisitBin32op(selector, node, kS390_Add32, AddOperandMode,
+                                    cont);
               case IrOpcode::kInt32SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int32BinopMatcher>(
-                    selector, node, kS390_Sub32, kInt32Imm_Negate, cont);
+                return VisitBin32op(selector, node, kS390_Sub32, SubOperandMode,
+                                    cont);
               case IrOpcode::kInt32MulWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kNotEqual);
-                return EmitInt32MulWithOverflow(selector, node, cont);
+                return VisitBin32op(
+                    selector, node, kS390_Mul32WithOverflow,
+                    OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+                    cont);
 #if V8_TARGET_ARCH_S390X
               case IrOpcode::kInt64AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop<Int64BinopMatcher>(
-                    selector, node, kS390_Add64, kInt32Imm, cont);
+                    selector, node, kS390_Add64, OperandMode::kInt32Imm, cont);
               case IrOpcode::kInt64SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop<Int64BinopMatcher>(
-                    selector, node, kS390_Sub64, kInt32Imm_Negate, cont);
+                    selector, node, kS390_Sub64, OperandMode::kInt32Imm_Negate,
+                    cont);
 #endif
               default:
                 break;
@@ -1711,53 +2144,77 @@
         }
         break;
       case IrOpcode::kInt32Sub:
-        return VisitWord32Compare(selector, value, cont);
+        if (fc == kNotEqual || fc == kEqual)
+          return VisitWord32Compare(selector, value, cont);
+        break;
       case IrOpcode::kWord32And:
-        return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
-                                kUint32Imm);
-// TODO(mbrandy): Handle?
-// case IrOpcode::kInt32Add:
-// case IrOpcode::kWord32Or:
-// case IrOpcode::kWord32Xor:
-// case IrOpcode::kWord32Sar:
-// case IrOpcode::kWord32Shl:
-// case IrOpcode::kWord32Shr:
-// case IrOpcode::kWord32Ror:
+        return VisitTestUnderMask(selector, value, cont);
+      case IrOpcode::kLoad: {
+        LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+        switch (load_rep.representation()) {
+          case MachineRepresentation::kWord32:
+            if (opcode == kS390_LoadAndTestWord32) {
+              return VisitLoadAndTest(selector, opcode, user, value, cont);
+            }
+          default:
+            break;
+        }
+        break;
+      }
+      case IrOpcode::kInt32Add:
+        // can't handle overflow case.
+        break;
+      case IrOpcode::kWord32Or:
+        return VisitBin32op(selector, value, kS390_Or32, OrOperandMode, cont);
+      case IrOpcode::kWord32Xor:
+        return VisitBin32op(selector, value, kS390_Xor32, XorOperandMode, cont);
+      case IrOpcode::kWord32Sar:
+      case IrOpcode::kWord32Shl:
+      case IrOpcode::kWord32Shr:
+      case IrOpcode::kWord32Ror:
+        // doesn't generate cc, so ignore.
+        break;
 #if V8_TARGET_ARCH_S390X
       case IrOpcode::kInt64Sub:
-        return VisitWord64Compare(selector, value, cont);
+        if (fc == kNotEqual || fc == kEqual)
+          return VisitWord64Compare(selector, value, cont);
+        break;
       case IrOpcode::kWord64And:
-        return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
-                                kUint32Imm);
-// TODO(mbrandy): Handle?
-// case IrOpcode::kInt64Add:
-// case IrOpcode::kWord64Or:
-// case IrOpcode::kWord64Xor:
-// case IrOpcode::kWord64Sar:
-// case IrOpcode::kWord64Shl:
-// case IrOpcode::kWord64Shr:
-// case IrOpcode::kWord64Ror:
+        return VisitTestUnderMask(selector, value, cont);
+      case IrOpcode::kInt64Add:
+        // can't handle overflow case.
+        break;
+      case IrOpcode::kWord64Or:
+        // TODO(john.yan): need to handle
+        break;
+      case IrOpcode::kWord64Xor:
+        // TODO(john.yan): need to handle
+        break;
+      case IrOpcode::kWord64Sar:
+      case IrOpcode::kWord64Shl:
+      case IrOpcode::kWord64Shr:
+      case IrOpcode::kWord64Ror:
+        // doesn't generate cc, so ignore
+        break;
 #endif
       default:
         break;
     }
   }
 
-  // Branch could not be combined with a compare, emit compare against 0.
-  S390OperandGenerator g(selector);
-  VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
-               cont);
+  // Branch could not be combined with a compare, emit LoadAndTest
+  VisitLoadAndTest(selector, opcode, user, value, cont, true);
 }
 
 void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
                             Node* value, FlagsContinuation* cont) {
-  VisitWordCompareZero(selector, user, value, kS390_Cmp32, cont);
+  VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord32, cont);
 }
 
 #if V8_TARGET_ARCH_S390X
 void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
                             Node* value, FlagsContinuation* cont) {
-  VisitWordCompareZero(selector, user, value, kS390_Cmp64, cont);
+  VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord64, cont);
 }
 #endif
 
@@ -1770,14 +2227,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1797,9 +2269,14 @@
     InstructionOperand index_operand = value_operand;
     if (sw.min_value) {
       index_operand = g.TempRegister();
-      Emit(kS390_Sub32, index_operand, value_operand,
-           g.TempImmediate(sw.min_value));
+      Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
+           value_operand, g.TempImmediate(-sw.min_value));
     }
+#if V8_TARGET_ARCH_S390X
+    InstructionOperand index_operand_zero_ext = g.TempRegister();
+    Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
+    index_operand = index_operand_zero_ext;
+#endif
     // Generate a table lookup.
     return EmitTableSwitch(sw, index_operand);
   }
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index eb3dda8..dcc84b3 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -407,7 +407,7 @@
       if (!block->deferred()) {
         bool deferred = block->PredecessorCount() > 0;
         for (auto pred : block->predecessors()) {
-          if (!pred->deferred()) {
+          if (!pred->deferred() && (pred->rpo_number() < block->rpo_number())) {
             deferred = false;
           }
         }
diff --git a/src/compiler/simd-scalar-lowering.cc b/src/compiler/simd-scalar-lowering.cc
index c5a94b4..19ffe93 100644
--- a/src/compiler/simd-scalar-lowering.cc
+++ b/src/compiler/simd-scalar-lowering.cc
@@ -9,6 +9,7 @@
 #include "src/compiler/node-properties.h"
 
 #include "src/compiler/node.h"
+#include "src/objects-inl.h"
 #include "src/wasm/wasm-module.h"
 
 namespace v8 {
@@ -58,6 +59,9 @@
           // that they are processed after all other nodes.
           PreparePhiReplacement(input);
           stack_.push_front({input, 0});
+        } else if (input->opcode() == IrOpcode::kEffectPhi ||
+                   input->opcode() == IrOpcode::kLoop) {
+          stack_.push_front({input, 0});
         } else {
           stack_.push_back({input, 0});
         }
@@ -70,12 +74,14 @@
 #define FOREACH_INT32X4_OPCODE(V) \
   V(Int32x4Add)                   \
   V(Int32x4ExtractLane)           \
-  V(CreateInt32x4)
+  V(CreateInt32x4)                \
+  V(Int32x4ReplaceLane)
 
 #define FOREACH_FLOAT32X4_OPCODE(V) \
   V(Float32x4Add)                   \
   V(Float32x4ExtractLane)           \
-  V(CreateFloat32x4)
+  V(CreateFloat32x4)                \
+  V(Float32x4ReplaceLane)
 
 void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
   switch (node->opcode()) {
@@ -102,7 +108,7 @@
   // In function calls, the simd128 types are passed as 4 Int32 types. The
   // parameters are typecast to the types as needed for various operations.
   int result = old_index;
-  for (int i = 0; i < old_index; i++) {
+  for (int i = 0; i < old_index; ++i) {
     if (signature->GetParam(i) == MachineRepresentation::kSimd128) {
       result += 3;
     }
@@ -123,7 +129,7 @@
 static int GetReturnCountAfterLowering(
     Signature<MachineRepresentation>* signature) {
   int result = static_cast<int>(signature->return_count());
-  for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+  for (int i = 0; i < static_cast<int>(signature->return_count()); ++i) {
     if (signature->GetReturn(i) == MachineRepresentation::kSimd128) {
       result += 3;
     }
@@ -131,6 +137,100 @@
   return result;
 }
 
+void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices) {
+  new_indices[0] = index;
+  for (size_t i = 1; i < kMaxLanes; ++i) {
+    new_indices[i] = graph()->NewNode(machine()->Int32Add(), index,
+                                      graph()->NewNode(common()->Int32Constant(
+                                          static_cast<int>(i) * kLaneWidth)));
+  }
+}
+
+void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
+                                     const Operator* load_op) {
+  if (rep == MachineRepresentation::kSimd128) {
+    Node* base = node->InputAt(0);
+    Node* index = node->InputAt(1);
+    Node* indices[kMaxLanes];
+    GetIndexNodes(index, indices);
+    Node* rep_nodes[kMaxLanes];
+    rep_nodes[0] = node;
+    NodeProperties::ChangeOp(rep_nodes[0], load_op);
+    if (node->InputCount() > 2) {
+      DCHECK(node->InputCount() > 3);
+      Node* effect_input = node->InputAt(2);
+      Node* control_input = node->InputAt(3);
+      rep_nodes[3] = graph()->NewNode(load_op, base, indices[3], effect_input,
+                                      control_input);
+      rep_nodes[2] = graph()->NewNode(load_op, base, indices[2], rep_nodes[3],
+                                      control_input);
+      rep_nodes[1] = graph()->NewNode(load_op, base, indices[1], rep_nodes[2],
+                                      control_input);
+      rep_nodes[0]->ReplaceInput(2, rep_nodes[1]);
+    } else {
+      for (size_t i = 1; i < kMaxLanes; ++i) {
+        rep_nodes[i] = graph()->NewNode(load_op, base, indices[i]);
+      }
+    }
+    ReplaceNode(node, rep_nodes);
+  } else {
+    DefaultLowering(node);
+  }
+}
+
+void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
+                                      const Operator* store_op,
+                                      SimdType rep_type) {
+  if (rep == MachineRepresentation::kSimd128) {
+    Node* base = node->InputAt(0);
+    Node* index = node->InputAt(1);
+    Node* indices[kMaxLanes];
+    GetIndexNodes(index, indices);
+    DCHECK(node->InputCount() > 2);
+    Node* value = node->InputAt(2);
+    DCHECK(HasReplacement(1, value));
+    Node* rep_nodes[kMaxLanes];
+    rep_nodes[0] = node;
+    Node** rep_inputs = GetReplacementsWithType(value, rep_type);
+    rep_nodes[0]->ReplaceInput(2, rep_inputs[0]);
+    NodeProperties::ChangeOp(node, store_op);
+    if (node->InputCount() > 3) {
+      DCHECK(node->InputCount() > 4);
+      Node* effect_input = node->InputAt(3);
+      Node* control_input = node->InputAt(4);
+      rep_nodes[3] = graph()->NewNode(store_op, base, indices[3], rep_inputs[3],
+                                      effect_input, control_input);
+      rep_nodes[2] = graph()->NewNode(store_op, base, indices[2], rep_inputs[2],
+                                      rep_nodes[3], control_input);
+      rep_nodes[1] = graph()->NewNode(store_op, base, indices[1], rep_inputs[1],
+                                      rep_nodes[2], control_input);
+      rep_nodes[0]->ReplaceInput(3, rep_nodes[1]);
+
+    } else {
+      for (size_t i = 1; i < kMaxLanes; ++i) {
+        rep_nodes[i] =
+            graph()->NewNode(store_op, base, indices[i], rep_inputs[i]);
+      }
+    }
+
+    ReplaceNode(node, rep_nodes);
+  } else {
+    DefaultLowering(node);
+  }
+}
+
+void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType rep_type,
+                                       const Operator* op) {
+  DCHECK(node->InputCount() == 2);
+  Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+  Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+  Node* rep_node[kMaxLanes];
+  for (int i = 0; i < kMaxLanes; ++i) {
+    rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+  }
+  ReplaceNode(node, rep_node);
+}
+
 void SimdScalarLowering::LowerNode(Node* node) {
   SimdType rep_type = ReplacementType(node);
   switch (node->opcode()) {
@@ -159,13 +259,13 @@
           NodeProperties::ChangeOp(node, common()->Parameter(new_index));
 
           Node* new_node[kMaxLanes];
-          for (int i = 0; i < kMaxLanes; i++) {
+          for (int i = 0; i < kMaxLanes; ++i) {
             new_node[i] = nullptr;
           }
           new_node[0] = node;
           if (signature()->GetParam(old_index) ==
               MachineRepresentation::kSimd128) {
-            for (int i = 1; i < kMaxLanes; i++) {
+            for (int i = 1; i < kMaxLanes; ++i) {
               new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
                                              graph()->start());
             }
@@ -175,6 +275,57 @@
       }
       break;
     }
+    case IrOpcode::kLoad: {
+      MachineRepresentation rep =
+          LoadRepresentationOf(node->op()).representation();
+      const Operator* load_op;
+      if (rep_type == SimdType::kInt32) {
+        load_op = machine()->Load(MachineType::Int32());
+      } else if (rep_type == SimdType::kFloat32) {
+        load_op = machine()->Load(MachineType::Float32());
+      }
+      LowerLoadOp(rep, node, load_op);
+      break;
+    }
+    case IrOpcode::kUnalignedLoad: {
+      MachineRepresentation rep =
+          UnalignedLoadRepresentationOf(node->op()).representation();
+      const Operator* load_op;
+      if (rep_type == SimdType::kInt32) {
+        load_op = machine()->UnalignedLoad(MachineType::Int32());
+      } else if (rep_type == SimdType::kFloat32) {
+        load_op = machine()->UnalignedLoad(MachineType::Float32());
+      }
+      LowerLoadOp(rep, node, load_op);
+      break;
+    }
+    case IrOpcode::kStore: {
+      MachineRepresentation rep =
+          StoreRepresentationOf(node->op()).representation();
+      WriteBarrierKind write_barrier_kind =
+          StoreRepresentationOf(node->op()).write_barrier_kind();
+      const Operator* store_op;
+      if (rep_type == SimdType::kInt32) {
+        store_op = machine()->Store(StoreRepresentation(
+            MachineRepresentation::kWord32, write_barrier_kind));
+      } else {
+        store_op = machine()->Store(StoreRepresentation(
+            MachineRepresentation::kFloat32, write_barrier_kind));
+      }
+      LowerStoreOp(rep, node, store_op, rep_type);
+      break;
+    }
+    case IrOpcode::kUnalignedStore: {
+      MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+      const Operator* store_op;
+      if (rep_type == SimdType::kInt32) {
+        store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
+      } else {
+        store_op = machine()->UnalignedStore(MachineRepresentation::kFloat32);
+      }
+      LowerStoreOp(rep, node, store_op, rep_type);
+      break;
+    }
     case IrOpcode::kReturn: {
       DefaultLowering(node);
       int new_return_count = GetReturnCountAfterLowering(signature());
@@ -200,7 +351,7 @@
           descriptor->GetReturnType(0) == MachineType::Simd128()) {
         // We access the additional return values through projections.
         Node* rep_node[kMaxLanes];
-        for (int i = 0; i < kMaxLanes; i++) {
+        for (int i = 0; i < kMaxLanes; ++i) {
           rep_node[i] =
               graph()->NewNode(common()->Projection(i), node, graph()->start());
         }
@@ -214,7 +365,7 @@
         // The replacement nodes have already been created, we only have to
         // replace placeholder nodes.
         Node** rep_node = GetReplacements(node);
-        for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+        for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
           Node** rep_input =
               GetReplacementsWithType(node->InputAt(i), rep_type);
           for (int j = 0; j < kMaxLanes; j++) {
@@ -226,75 +377,51 @@
       }
       break;
     }
-
     case IrOpcode::kInt32x4Add: {
-      DCHECK(node->InputCount() == 2);
-      Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
-      Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
-      Node* rep_node[kMaxLanes];
-      for (int i = 0; i < kMaxLanes; i++) {
-        rep_node[i] =
-            graph()->NewNode(machine()->Int32Add(), rep_left[i], rep_right[i]);
-      }
-      ReplaceNode(node, rep_node);
+      LowerBinaryOp(node, rep_type, machine()->Int32Add());
       break;
     }
-
-    case IrOpcode::kCreateInt32x4: {
-      Node* rep_node[kMaxLanes];
-      for (int i = 0; i < kMaxLanes; i++) {
-        DCHECK(!HasReplacement(1, node->InputAt(i)));
-        rep_node[i] = node->InputAt(i);
-      }
-      ReplaceNode(node, rep_node);
-      break;
-    }
-
-    case IrOpcode::kInt32x4ExtractLane: {
-      Node* laneNode = node->InputAt(1);
-      DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
-      int32_t lane = OpParameter<int32_t>(laneNode);
-      Node* rep_node[kMaxLanes] = {
-          GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
-          nullptr, nullptr};
-      ReplaceNode(node, rep_node);
-      break;
-    }
-
     case IrOpcode::kFloat32x4Add: {
-      DCHECK(node->InputCount() == 2);
-      Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
-      Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
-      Node* rep_node[kMaxLanes];
-      for (int i = 0; i < kMaxLanes; i++) {
-        rep_node[i] = graph()->NewNode(machine()->Float32Add(), rep_left[i],
-                                       rep_right[i]);
-      }
-      ReplaceNode(node, rep_node);
+      LowerBinaryOp(node, rep_type, machine()->Float32Add());
       break;
     }
-
+    case IrOpcode::kCreateInt32x4:
     case IrOpcode::kCreateFloat32x4: {
       Node* rep_node[kMaxLanes];
-      for (int i = 0; i < kMaxLanes; i++) {
-        DCHECK(!HasReplacement(1, node->InputAt(i)));
-        rep_node[i] = node->InputAt(i);
+      for (int i = 0; i < kMaxLanes; ++i) {
+        if (HasReplacement(0, node->InputAt(i))) {
+          rep_node[i] = GetReplacements(node->InputAt(i))[0];
+        } else {
+          rep_node[i] = node->InputAt(i);
+        }
       }
       ReplaceNode(node, rep_node);
       break;
     }
-
+    case IrOpcode::kInt32x4ExtractLane:
     case IrOpcode::kFloat32x4ExtractLane: {
-      Node* laneNode = node->InputAt(1);
-      DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
-      int32_t lane = OpParameter<int32_t>(laneNode);
+      int32_t lane = OpParameter<int32_t>(node);
       Node* rep_node[kMaxLanes] = {
           GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
           nullptr, nullptr};
       ReplaceNode(node, rep_node);
       break;
     }
-
+    case IrOpcode::kInt32x4ReplaceLane:
+    case IrOpcode::kFloat32x4ReplaceLane: {
+      DCHECK_EQ(2, node->InputCount());
+      Node* repNode = node->InputAt(1);
+      int32_t lane = OpParameter<int32_t>(node);
+      DCHECK(lane >= 0 && lane <= 3);
+      Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+      if (HasReplacement(0, repNode)) {
+        rep_node[lane] = GetReplacements(repNode)[0];
+      } else {
+        rep_node[lane] = repNode;
+      }
+      ReplaceNode(node, rep_node);
+      break;
+    }
     default: { DefaultLowering(node); }
   }
 }
@@ -322,7 +449,7 @@
   DCHECK(new_node[0] != nullptr ||
          (new_node[1] == nullptr && new_node[2] == nullptr &&
           new_node[3] == nullptr));
-  for (int i = 0; i < kMaxLanes; i++) {
+  for (int i = 0; i < kMaxLanes; ++i) {
     replacements_[old->id()].node[i] = new_node[i];
   }
 }
@@ -348,7 +475,7 @@
   }
   Node** result = zone()->NewArray<Node*>(kMaxLanes);
   if (ReplacementType(node) == SimdType::kInt32 && type == SimdType::kFloat32) {
-    for (int i = 0; i < kMaxLanes; i++) {
+    for (int i = 0; i < kMaxLanes; ++i) {
       if (replacements[i] != nullptr) {
         result[i] = graph()->NewNode(machine()->BitcastInt32ToFloat32(),
                                      replacements[i]);
@@ -357,7 +484,7 @@
       }
     }
   } else {
-    for (int i = 0; i < kMaxLanes; i++) {
+    for (int i = 0; i < kMaxLanes; ++i) {
       if (replacements[i] != nullptr) {
         result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
                                      replacements[i]);
@@ -379,17 +506,17 @@
     int value_count = phi->op()->ValueInputCount();
     SimdType type = ReplacementType(phi);
     Node** inputs_rep[kMaxLanes];
-    for (int i = 0; i < kMaxLanes; i++) {
+    for (int i = 0; i < kMaxLanes; ++i) {
       inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
       inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
     }
-    for (int i = 0; i < value_count; i++) {
+    for (int i = 0; i < value_count; ++i) {
       for (int j = 0; j < kMaxLanes; j++) {
         inputs_rep[j][i] = placeholder_;
       }
     }
     Node* rep_nodes[kMaxLanes];
-    for (int i = 0; i < kMaxLanes; i++) {
+    for (int i = 0; i < kMaxLanes; ++i) {
       if (type == SimdType::kInt32) {
         rep_nodes[i] = graph()->NewNode(
             common()->Phi(MachineRepresentation::kWord32, value_count),
diff --git a/src/compiler/simd-scalar-lowering.h b/src/compiler/simd-scalar-lowering.h
index 39449f4..c795c6b 100644
--- a/src/compiler/simd-scalar-lowering.h
+++ b/src/compiler/simd-scalar-lowering.h
@@ -31,6 +31,7 @@
   enum class SimdType : uint8_t { kInt32, kFloat32 };
 
   static const int kMaxLanes = 4;
+  static const int kLaneWidth = 16 / kMaxLanes;
 
   struct Replacement {
     Node* node[kMaxLanes];
@@ -53,6 +54,12 @@
   SimdType ReplacementType(Node* node);
   void PreparePhiReplacement(Node* phi);
   void SetLoweredType(Node* node, Node* output);
+  void GetIndexNodes(Node* index, Node** new_indices);
+  void LowerLoadOp(MachineRepresentation rep, Node* node,
+                   const Operator* load_op);
+  void LowerStoreOp(MachineRepresentation rep, Node* node,
+                    const Operator* store_op, SimdType rep_type);
+  void LowerBinaryOp(Node* node, SimdType rep_type, const Operator* op);
 
   struct NodeState {
     Node* node;
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index c90d743..4acc77f 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -138,7 +138,10 @@
       return UseInfo::TruncatingWord32();
     case MachineRepresentation::kBit:
       return UseInfo::Bool();
-    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       break;
   }
@@ -170,6 +173,7 @@
 }
 
 void ChangeToPureOp(Node* node, const Operator* new_op) {
+  DCHECK(new_op->HasProperty(Operator::kPure));
   if (node->op()->EffectInputCount() > 0) {
     DCHECK_LT(0, node->op()->ControlInputCount());
     // Disconnect the node from effect and control chains.
@@ -209,8 +213,30 @@
 
 #endif  // DEBUG
 
-}  // namespace
+bool CanOverflowSigned32(const Operator* op, Type* left, Type* right,
+                         Zone* type_zone) {
+  // We assume the inputs are checked Signed32 (or known statically
+  // to be Signed32). Technically, theinputs could also be minus zero, but
+  // that cannot cause overflow.
+  left = Type::Intersect(left, Type::Signed32(), type_zone);
+  right = Type::Intersect(right, Type::Signed32(), type_zone);
+  if (!left->IsInhabited() || !right->IsInhabited()) return false;
+  switch (op->opcode()) {
+    case IrOpcode::kSpeculativeNumberAdd:
+      return (left->Max() + right->Max() > kMaxInt) ||
+             (left->Min() + right->Min() < kMinInt);
 
+    case IrOpcode::kSpeculativeNumberSubtract:
+      return (left->Max() - right->Min() > kMaxInt) ||
+             (left->Min() - right->Max() < kMinInt);
+
+    default:
+      UNREACHABLE();
+  }
+  return true;
+}
+
+}  // namespace
 
 class RepresentationSelector {
  public:
@@ -675,6 +701,11 @@
            GetUpperBound(node->InputAt(1))->Is(type);
   }
 
+  bool IsNodeRepresentationTagged(Node* node) {
+    MachineRepresentation representation = GetInfo(node)->representation();
+    return IsAnyTagged(representation);
+  }
+
   bool OneInputCannotBe(Node* node, Type* type) {
     DCHECK_EQ(2, node->op()->ValueInputCount());
     return !GetUpperBound(node->InputAt(0))->Maybe(type) ||
@@ -867,6 +898,7 @@
   // Helper for handling selects.
   void VisitSelect(Node* node, Truncation truncation,
                    SimplifiedLowering* lowering) {
+    DCHECK(TypeOf(node->InputAt(0))->Is(Type::Boolean()));
     ProcessInput(node, 0, UseInfo::Bool());
 
     MachineRepresentation output =
@@ -953,7 +985,7 @@
     }
   }
 
-  MachineSemantic DeoptValueSemanticOf(Type* type) {
+  static MachineSemantic DeoptValueSemanticOf(Type* type) {
     // We only need signedness to do deopt correctly.
     if (type->Is(Type::Signed32())) {
       return MachineSemantic::kInt32;
@@ -964,6 +996,29 @@
     }
   }
 
+  static MachineType DeoptMachineTypeOf(MachineRepresentation rep, Type* type) {
+    if (!type->IsInhabited()) {
+      return MachineType::None();
+    }
+    // TODO(turbofan): Special treatment for ExternalPointer here,
+    // to avoid incompatible truncations. We really need a story
+    // for the JSFunction::entry field.
+    if (type->Is(Type::ExternalPointer())) {
+      return MachineType::Pointer();
+    }
+    // Do not distinguish between various Tagged variations.
+    if (IsAnyTagged(rep)) {
+      return MachineType::AnyTagged();
+    }
+    MachineType machine_type(rep, DeoptValueSemanticOf(type));
+    DCHECK(machine_type.representation() != MachineRepresentation::kWord32 ||
+           machine_type.semantic() == MachineSemantic::kInt32 ||
+           machine_type.semantic() == MachineSemantic::kUint32);
+    DCHECK(machine_type.representation() != MachineRepresentation::kBit ||
+           type->Is(Type::Boolean()));
+    return machine_type;
+  }
+
   void VisitStateValues(Node* node) {
     if (propagate()) {
       for (int i = 0; i < node->InputCount(); i++) {
@@ -976,20 +1031,12 @@
               ZoneVector<MachineType>(node->InputCount(), zone);
       for (int i = 0; i < node->InputCount(); i++) {
         Node* input = node->InputAt(i);
-        NodeInfo* input_info = GetInfo(input);
-        Type* input_type = TypeOf(input);
-        MachineRepresentation rep = input_type->IsInhabited()
-                                        ? input_info->representation()
-                                        : MachineRepresentation::kNone;
-        MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
-        DCHECK(machine_type.representation() !=
-                   MachineRepresentation::kWord32 ||
-               machine_type.semantic() == MachineSemantic::kInt32 ||
-               machine_type.semantic() == MachineSemantic::kUint32);
-        (*types)[i] = machine_type;
+        (*types)[i] =
+            DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
       }
-      NodeProperties::ChangeOp(node,
-                               jsgraph_->common()->TypedStateValues(types));
+      SparseInputMask mask = SparseInputMaskOf(node->op());
+      NodeProperties::ChangeOp(
+          node, jsgraph_->common()->TypedStateValues(types, mask));
     }
     SetOutput(node, MachineRepresentation::kTagged);
   }
@@ -1002,9 +1049,14 @@
         // TODO(turbofan): Special treatment for ExternalPointer here,
         // to avoid incompatible truncations. We really need a story
         // for the JSFunction::entry field.
-        UseInfo use_info = input_type->Is(Type::ExternalPointer())
-                               ? UseInfo::PointerInt()
-                               : UseInfo::Any();
+        UseInfo use_info = UseInfo::None();
+        if (input_type->IsInhabited()) {
+          if (input_type->Is(Type::ExternalPointer())) {
+            use_info = UseInfo::PointerInt();
+          } else {
+            use_info = UseInfo::Any();
+          }
+        }
         EnqueueInput(node, i, use_info);
       }
     } else if (lower()) {
@@ -1014,26 +1066,8 @@
               ZoneVector<MachineType>(node->InputCount(), zone);
       for (int i = 0; i < node->InputCount(); i++) {
         Node* input = node->InputAt(i);
-        NodeInfo* input_info = GetInfo(input);
-        Type* input_type = TypeOf(input);
-        // TODO(turbofan): Special treatment for ExternalPointer here,
-        // to avoid incompatible truncations. We really need a story
-        // for the JSFunction::entry field.
-        if (input_type->Is(Type::ExternalPointer())) {
-          (*types)[i] = MachineType::Pointer();
-        } else {
-          MachineRepresentation rep = input_type->IsInhabited()
-                                          ? input_info->representation()
-                                          : MachineRepresentation::kNone;
-          MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
-          DCHECK(machine_type.representation() !=
-                     MachineRepresentation::kWord32 ||
-                 machine_type.semantic() == MachineSemantic::kInt32 ||
-                 machine_type.semantic() == MachineSemantic::kUint32);
-          DCHECK(machine_type.representation() != MachineRepresentation::kBit ||
-                 input_type->Is(Type::Boolean()));
-          (*types)[i] = machine_type;
-        }
+        (*types)[i] =
+            DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
       }
       NodeProperties::ChangeOp(node,
                                jsgraph_->common()->TypedObjectState(types));
@@ -1080,17 +1114,14 @@
         return kNoWriteBarrier;
       }
       if (value_type->IsHeapConstant()) {
-        Handle<HeapObject> value_object = value_type->AsHeapConstant()->Value();
-        RootIndexMap root_index_map(jsgraph_->isolate());
-        int root_index = root_index_map.Lookup(*value_object);
-        if (root_index != RootIndexMap::kInvalidRootIndex &&
-            jsgraph_->isolate()->heap()->RootIsImmortalImmovable(root_index)) {
-          // Write barriers are unnecessary for immortal immovable roots.
-          return kNoWriteBarrier;
-        }
-        if (value_object->IsMap()) {
-          // Write barriers for storing maps are cheaper.
-          return kMapWriteBarrier;
+        Heap::RootListIndex root_index;
+        Heap* heap = jsgraph_->isolate()->heap();
+        if (heap->IsRootHandle(value_type->AsHeapConstant()->Value(),
+                               &root_index)) {
+          if (heap->RootIsImmortalImmovable(root_index)) {
+            // Write barriers are unnecessary for immortal immovable roots.
+            return kNoWriteBarrier;
+          }
         }
       }
       if (field_representation == MachineRepresentation::kTaggedPointer ||
@@ -1160,10 +1191,14 @@
     // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we can
     // only eliminate an unused speculative number operation if we know that
     // the inputs are PlainPrimitive, which excludes everything that's might
-    // have side effects or throws during a ToNumber conversion.
-    if (BothInputsAre(node, Type::PlainPrimitive())) {
+    // have side effects or throws during a ToNumber conversion. We are only
+    // allowed to perform a number addition if neither input is a String, even
+    // if the value is never used, so we further limit to NumberOrOddball in
+    // order to explicitly exclude String inputs.
+    if (BothInputsAre(node, Type::NumberOrOddball())) {
       if (truncation.IsUnused()) return VisitUnused(node);
     }
+
     if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
         (GetUpperBound(node)->Is(Type::Signed32()) ||
          GetUpperBound(node)->Is(Type::Unsigned32()) ||
@@ -1177,33 +1212,38 @@
     // Try to use type feedback.
     NumberOperationHint hint = NumberOperationHintOf(node->op());
 
-    // Handle the case when no int32 checks on inputs are necessary
-    // (but an overflow check is needed on the output).
-    if (BothInputsAre(node, Type::Signed32()) ||
-        (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
-         NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger))) {
-      // If both the inputs the feedback are int32, use the overflow op.
-      if (hint == NumberOperationHint::kSignedSmall ||
-          hint == NumberOperationHint::kSigned32) {
-        VisitBinop(node, UseInfo::TruncatingWord32(),
-                   MachineRepresentation::kWord32, Type::Signed32());
-        if (lower()) ChangeToInt32OverflowOp(node);
-        return;
-      }
-    }
-
     if (hint == NumberOperationHint::kSignedSmall ||
         hint == NumberOperationHint::kSigned32) {
-      UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
-      // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
-      // a minus zero check for the right hand side, since we already
-      // know that the left hand side is a proper Signed32 value,
-      // potentially guarded by a check.
-      UseInfo right_use = CheckedUseInfoAsWord32FromHint(
-          hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
-      VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
-                 Type::Signed32());
-      if (lower()) ChangeToInt32OverflowOp(node);
+      Type* left_feedback_type = TypeOf(node->InputAt(0));
+      Type* right_feedback_type = TypeOf(node->InputAt(1));
+      // Handle the case when no int32 checks on inputs are necessary (but
+      // an overflow check is needed on the output).
+      // TODO(jarin) We should not look at the upper bound because the typer
+      // could have already baked in some feedback into the upper bound.
+      if (BothInputsAre(node, Type::Signed32()) ||
+          (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
+           GetUpperBound(node)->Is(type_cache_.kSafeInteger))) {
+        VisitBinop(node, UseInfo::TruncatingWord32(),
+                   MachineRepresentation::kWord32, Type::Signed32());
+      } else {
+        UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
+        // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
+        // a minus zero check for the right hand side, since we already
+        // know that the left hand side is a proper Signed32 value,
+        // potentially guarded by a check.
+        UseInfo right_use = CheckedUseInfoAsWord32FromHint(
+            hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
+        VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
+                   Type::Signed32());
+      }
+      if (lower()) {
+        if (CanOverflowSigned32(node->op(), left_feedback_type,
+                                right_feedback_type, graph_zone())) {
+          ChangeToInt32OverflowOp(node);
+        } else {
+          ChangeToPureOp(node, Int32Op(node));
+        }
+      }
       return;
     }
 
@@ -1392,10 +1432,12 @@
         return;
       }
 
-      case IrOpcode::kBranch:
+      case IrOpcode::kBranch: {
+        DCHECK(TypeOf(node->InputAt(0))->Is(Type::Boolean()));
         ProcessInput(node, 0, UseInfo::Bool());
         EnqueueInput(node, NodeProperties::FirstControlIndex(node));
         return;
+      }
       case IrOpcode::kSwitch:
         ProcessInput(node, 0, UseInfo::TruncatingWord32());
         EnqueueInput(node, NodeProperties::FirstControlIndex(node));
@@ -1550,13 +1592,38 @@
         NumberOperationHint hint = NumberOperationHintOf(node->op());
         switch (hint) {
           case NumberOperationHint::kSignedSmall:
-          case NumberOperationHint::kSigned32:
-            VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
-                       MachineRepresentation::kBit);
-            if (lower()) ChangeToPureOp(node, Int32Op(node));
+          case NumberOperationHint::kSigned32: {
+            if (propagate()) {
+              VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                         MachineRepresentation::kBit);
+            } else if (retype()) {
+              SetOutput(node, MachineRepresentation::kBit, Type::Any());
+            } else {
+              DCHECK(lower());
+              Node* lhs = node->InputAt(0);
+              Node* rhs = node->InputAt(1);
+              if (IsNodeRepresentationTagged(lhs) &&
+                  IsNodeRepresentationTagged(rhs)) {
+                VisitBinop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
+                           MachineRepresentation::kBit);
+                ChangeToPureOp(
+                    node, changer_->TaggedSignedOperatorFor(node->opcode()));
+
+              } else {
+                VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                           MachineRepresentation::kBit);
+                ChangeToPureOp(node, Int32Op(node));
+              }
+            }
             return;
-          case NumberOperationHint::kNumber:
+          }
           case NumberOperationHint::kNumberOrOddball:
+            // Abstract and strict equality don't perform ToNumber conversions
+            // on Oddballs, so make sure we don't accidentially sneak in a
+            // hint with Oddball feedback here.
+            DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
+          // Fallthrough
+          case NumberOperationHint::kNumber:
             VisitBinop(node, CheckedUseInfoAsFloat64FromHint(hint),
                        MachineRepresentation::kBit);
             if (lower()) ChangeToPureOp(node, Float64Op(node));
@@ -1919,8 +1986,26 @@
         if (BothInputsAre(node, Type::PlainPrimitive())) {
           if (truncation.IsUnused()) return VisitUnused(node);
         }
+        NumberOperationHint hint = NumberOperationHintOf(node->op());
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
+        if (rhs_type->Is(type_cache_.kZeroish) &&
+            (hint == NumberOperationHint::kSignedSmall ||
+             hint == NumberOperationHint::kSigned32) &&
+            !truncation.IsUsedAsWord32()) {
+          // The SignedSmall or Signed32 feedback means that the results that we
+          // have seen so far were of type Unsigned31.  We speculate that this
+          // will continue to hold.  Moreover, since the RHS is 0, the result
+          // will just be the (converted) LHS.
+          VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                     MachineRepresentation::kWord32, Type::Unsigned31());
+          if (lower()) {
+            node->RemoveInput(1);
+            NodeProperties::ChangeOp(node,
+                                     simplified()->CheckedUint32ToInt32());
+          }
+          return;
+        }
         if (BothInputsAre(node, Type::NumberOrOddball())) {
-          Type* rhs_type = GetUpperBound(node->InputAt(1));
           VisitBinop(node, UseInfo::TruncatingWord32(),
                      UseInfo::TruncatingWord32(),
                      MachineRepresentation::kWord32);
@@ -1929,8 +2014,6 @@
           }
           return;
         }
-        NumberOperationHint hint = NumberOperationHintOf(node->op());
-        Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
                    MachineRepresentation::kWord32, Type::Unsigned32());
         if (lower()) {
@@ -2156,9 +2239,15 @@
         return VisitBinop(node, UseInfo::AnyTagged(),
                           MachineRepresentation::kTaggedPointer);
       }
-      case IrOpcode::kStringCharCodeAt: {
+      case IrOpcode::kStringCharAt: {
         VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
-                   MachineRepresentation::kWord32);
+                   MachineRepresentation::kTaggedPointer);
+        return;
+      }
+      case IrOpcode::kStringCharCodeAt: {
+        // TODO(turbofan): Allow builtins to return untagged values.
+        VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+                   MachineRepresentation::kTaggedSigned);
         return;
       }
       case IrOpcode::kStringFromCharCode: {
@@ -2171,6 +2260,13 @@
                   MachineRepresentation::kTaggedPointer);
         return;
       }
+      case IrOpcode::kStringIndexOf: {
+        ProcessInput(node, 0, UseInfo::AnyTagged());
+        ProcessInput(node, 1, UseInfo::AnyTagged());
+        ProcessInput(node, 2, UseInfo::TaggedSigned());
+        SetOutput(node, MachineRepresentation::kTaggedSigned);
+        return;
+      }
 
       case IrOpcode::kCheckBounds: {
         Type* index_type = TypeOf(node->InputAt(0));
@@ -2207,6 +2303,17 @@
         SetOutput(node, MachineRepresentation::kNone);
         return;
       }
+      case IrOpcode::kCheckInternalizedString: {
+        if (InputIs(node, Type::InternalizedString())) {
+          VisitUnop(node, UseInfo::AnyTagged(),
+                    MachineRepresentation::kTaggedPointer);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+                    MachineRepresentation::kTaggedPointer);
+        }
+        return;
+      }
       case IrOpcode::kCheckNumber: {
         if (InputIs(node, Type::Number())) {
           if (truncation.IsUsedAsWord32()) {
@@ -2226,6 +2333,17 @@
         }
         return;
       }
+      case IrOpcode::kCheckReceiver: {
+        if (InputIs(node, Type::Receiver())) {
+          VisitUnop(node, UseInfo::AnyTagged(),
+                    MachineRepresentation::kTaggedPointer);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+                    MachineRepresentation::kTaggedPointer);
+        }
+        return;
+      }
       case IrOpcode::kCheckSmi: {
         if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
           VisitUnop(node, UseInfo::CheckedSignedSmallAsWord32(),
@@ -2243,7 +2361,7 @@
                     MachineRepresentation::kTaggedPointer);
           if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
-          VisitUnop(node, UseInfo::AnyTagged(),
+          VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
                     MachineRepresentation::kTaggedPointer);
         }
         return;
@@ -2423,9 +2541,12 @@
         }
         return;
       }
-      case IrOpcode::kObjectIsCallable: {
-        // TODO(turbofan): Add Type::Callable to optimize this?
-        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+      case IrOpcode::kObjectIsDetectableCallable: {
+        VisitObjectIs(node, Type::DetectableCallable(), lowering);
+        return;
+      }
+      case IrOpcode::kObjectIsNonCallable: {
+        VisitObjectIs(node, Type::NonCallable(), lowering);
         return;
       }
       case IrOpcode::kObjectIsNumber: {
@@ -2449,12 +2570,17 @@
         VisitObjectIs(node, Type::Undetectable(), lowering);
         return;
       }
+      case IrOpcode::kNewRestParameterElements:
+      case IrOpcode::kNewUnmappedArgumentsElements: {
+        ProcessRemainingInputs(node, 0);
+        SetOutput(node, MachineRepresentation::kTaggedPointer);
+        return;
+      }
       case IrOpcode::kArrayBufferWasNeutered: {
         VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
         return;
       }
       case IrOpcode::kCheckFloat64Hole: {
-        if (truncation.IsUnused()) return VisitUnused(node);
         CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
         ProcessInput(node, 0, UseInfo::TruncatingFloat64());
         ProcessRemainingInputs(node, 1);
@@ -2466,8 +2592,7 @@
         return;
       }
       case IrOpcode::kCheckTaggedHole: {
-        VisitUnop(node, UseInfo::AnyTagged(),
-                  MachineRepresentation::kTaggedPointer);
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         return;
       }
       case IrOpcode::kConvertTaggedHoleToUndefined: {
@@ -2562,6 +2687,7 @@
       case IrOpcode::kBeginRegion:
       case IrOpcode::kProjection:
       case IrOpcode::kOsrValue:
+      case IrOpcode::kArgumentsObjectState:
 // All JavaScript operators except JSToNumber have uniform handling.
 #define OPCODE_CASE(name) case IrOpcode::k##name:
         JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
@@ -3276,12 +3402,11 @@
 
 void SimplifiedLowering::DoShift(Node* node, Operator const* op,
                                  Type* rhs_type) {
-  Node* const rhs = NodeProperties::GetValueInput(node, 1);
   if (!rhs_type->Is(type_cache_.kZeroToThirtyOne)) {
+    Node* const rhs = NodeProperties::GetValueInput(node, 1);
     node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
                                            jsgraph()->Int32Constant(0x1f)));
   }
-  DCHECK(op->HasProperty(Operator::kPure));
   ChangeToPureOp(node, op);
 }
 
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index b8a486d..dcfb485 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -129,6 +129,15 @@
       }
       break;
     }
+    case IrOpcode::kCheckedFloat64ToInt32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue() && IsInt32Double(m.Value())) {
+        Node* value = jsgraph()->Int32Constant(static_cast<int32_t>(m.Value()));
+        ReplaceWithValue(node, value);
+        return Replace(value);
+      }
+      break;
+    }
     case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedTaggedSignedToInt32: {
       NodeMatcher m(node->InputAt(0));
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 345a2c5..90a4e34 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -92,6 +92,7 @@
   // really only relevant for eliminating loads and they don't care about the
   // write barrier mode.
   return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
+         lhs.map.address() == rhs.map.address() &&
          lhs.machine_type == rhs.machine_type;
 }
 
@@ -118,6 +119,10 @@
     name->Print(os);
     os << ", ";
   }
+  Handle<Map> map;
+  if (access.map.ToHandle(&map)) {
+    os << Brief(*map) << ", ";
+  }
 #endif
   access.type->PrintTo(os);
   os << ", " << access.machine_type << ", " << access.write_barrier_kind << "]";
@@ -229,6 +234,44 @@
   return os;
 }
 
+std::ostream& operator<<(std::ostream& os, CheckMapsFlags flags) {
+  bool empty = true;
+  if (flags & CheckMapsFlag::kTryMigrateInstance) {
+    os << "TryMigrateInstance";
+    empty = false;
+  }
+  if (empty) os << "None";
+  return os;
+}
+
+bool operator==(CheckMapsParameters const& lhs,
+                CheckMapsParameters const& rhs) {
+  return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps();
+}
+
+bool operator!=(CheckMapsParameters const& lhs,
+                CheckMapsParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(CheckMapsParameters const& p) {
+  return base::hash_combine(p.flags(), p.maps());
+}
+
+std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
+  ZoneHandleSet<Map> const& maps = p.maps();
+  os << p.flags();
+  for (size_t i = 0; i < maps.size(); ++i) {
+    os << ", " << Brief(*maps[i]);
+  }
+  return os;
+}
+
+CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kCheckMaps, op->opcode());
+  return OpParameter<CheckMapsParameters>(op);
+}
+
 size_t hash_value(CheckTaggedInputMode mode) {
   return static_cast<size_t>(mode);
 }
@@ -274,22 +317,36 @@
   return OpParameter<GrowFastElementsFlags>(op);
 }
 
+bool operator==(ElementsTransition const& lhs, ElementsTransition const& rhs) {
+  return lhs.mode() == rhs.mode() &&
+         lhs.source().address() == rhs.source().address() &&
+         lhs.target().address() == rhs.target().address();
+}
+
+bool operator!=(ElementsTransition const& lhs, ElementsTransition const& rhs) {
+  return !(lhs == rhs);
+}
+
 size_t hash_value(ElementsTransition transition) {
-  return static_cast<uint8_t>(transition);
+  return base::hash_combine(static_cast<uint8_t>(transition.mode()),
+                            transition.source().address(),
+                            transition.target().address());
 }
 
 std::ostream& operator<<(std::ostream& os, ElementsTransition transition) {
-  switch (transition) {
+  switch (transition.mode()) {
     case ElementsTransition::kFastTransition:
-      return os << "fast-transition";
+      return os << "fast-transition from " << Brief(*transition.source())
+                << " to " << Brief(*transition.target());
     case ElementsTransition::kSlowTransition:
-      return os << "slow-transition";
+      return os << "slow-transition from " << Brief(*transition.source())
+                << " to " << Brief(*transition.target());
   }
   UNREACHABLE();
   return os;
 }
 
-ElementsTransition ElementsTransitionOf(const Operator* op) {
+ElementsTransition const& ElementsTransitionOf(const Operator* op) {
   DCHECK_EQ(IrOpcode::kTransitionElementsKind, op->opcode());
   return OpParameter<ElementsTransition>(op);
 }
@@ -331,6 +388,12 @@
   return OpParameter<NumberOperationHint>(op);
 }
 
+int ParameterCountOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kNewUnmappedArgumentsElements ||
+         op->opcode() == IrOpcode::kNewRestParameterElements);
+  return OpParameter<int>(op);
+}
+
 PretenureFlag PretenureFlagOf(const Operator* op) {
   DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
   return OpParameter<PretenureFlag>(op);
@@ -395,8 +458,10 @@
   V(NumberToUint32, Operator::kNoProperties, 1, 0)               \
   V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0)         \
   V(NumberSilenceNaN, Operator::kNoProperties, 1, 0)             \
+  V(StringCharAt, Operator::kNoProperties, 2, 1)                 \
   V(StringCharCodeAt, Operator::kNoProperties, 2, 1)             \
   V(StringFromCharCode, Operator::kNoProperties, 1, 0)           \
+  V(StringIndexOf, Operator::kNoProperties, 3, 0)                \
   V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0)       \
   V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0)       \
   V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0)      \
@@ -404,6 +469,7 @@
   V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0)          \
   V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0)         \
   V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0)        \
+  V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0)   \
   V(ChangeFloat64ToTagged, Operator::kNoProperties, 1, 0)        \
   V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
   V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0)    \
@@ -414,7 +480,8 @@
   V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0)          \
   V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0)       \
   V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0)      \
-  V(ObjectIsCallable, Operator::kNoProperties, 1, 0)             \
+  V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0)   \
+  V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0)          \
   V(ObjectIsNumber, Operator::kNoProperties, 1, 0)               \
   V(ObjectIsReceiver, Operator::kNoProperties, 1, 0)             \
   V(ObjectIsSmi, Operator::kNoProperties, 1, 0)                  \
@@ -436,7 +503,9 @@
   V(CheckBounds, 2, 1)                  \
   V(CheckHeapObject, 1, 1)              \
   V(CheckIf, 1, 0)                      \
+  V(CheckInternalizedString, 1, 1)      \
   V(CheckNumber, 1, 1)                  \
+  V(CheckReceiver, 1, 1)                \
   V(CheckSmi, 1, 1)                     \
   V(CheckString, 1, 1)                  \
   V(CheckTaggedHole, 1, 1)              \
@@ -689,16 +758,15 @@
   return nullptr;
 }
 
-const Operator* SimplifiedOperatorBuilder::CheckMaps(int map_input_count) {
-  // TODO(bmeurer): Cache the most important versions of this operator.
-  DCHECK_LT(0, map_input_count);
-  int const value_input_count = 1 + map_input_count;
-  return new (zone()) Operator1<int>(           // --
-      IrOpcode::kCheckMaps,                     // opcode
-      Operator::kNoThrow | Operator::kNoWrite,  // flags
-      "CheckMaps",                              // name
-      value_input_count, 1, 1, 0, 1, 0,         // counts
-      map_input_count);                         // parameter
+const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
+                                                     ZoneHandleSet<Map> maps) {
+  CheckMapsParameters const parameters(flags, maps);
+  return new (zone()) Operator1<CheckMapsParameters>(  // --
+      IrOpcode::kCheckMaps,                            // opcode
+      Operator::kNoThrow | Operator::kNoWrite,         // flags
+      "CheckMaps",                                     // name
+      1, 1, 1, 0, 1, 0,                                // counts
+      parameters);                                     // parameter
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
@@ -733,10 +801,30 @@
       IrOpcode::kTransitionElementsKind,              // opcode
       Operator::kNoDeopt | Operator::kNoThrow,        // flags
       "TransitionElementsKind",                       // name
-      3, 1, 1, 0, 1, 0,                               // counts
+      1, 1, 1, 0, 1, 0,                               // counts
       transition);                                    // parameter
 }
 
+const Operator* SimplifiedOperatorBuilder::NewUnmappedArgumentsElements(
+    int parameter_count) {
+  return new (zone()) Operator1<int>(           // --
+      IrOpcode::kNewUnmappedArgumentsElements,  // opcode
+      Operator::kEliminatable,                  // flags
+      "NewUnmappedArgumentsElements",           // name
+      0, 1, 0, 1, 1, 0,                         // counts
+      parameter_count);                         // parameter
+}
+
+const Operator* SimplifiedOperatorBuilder::NewRestParameterElements(
+    int parameter_count) {
+  return new (zone()) Operator1<int>(       // --
+      IrOpcode::kNewRestParameterElements,  // opcode
+      Operator::kEliminatable,              // flags
+      "NewRestParameterElements",           // name
+      0, 1, 0, 1, 1, 0,                     // counts
+      parameter_count);                     // parameter
+}
+
 const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
   switch (pretenure) {
     case NOT_TENURED:
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 833a055..ff3f60a 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -14,6 +14,7 @@
 #include "src/handles.h"
 #include "src/machine-type.h"
 #include "src/objects.h"
+#include "src/zone/zone-handle-set.h"
 
 namespace v8 {
 namespace internal {
@@ -64,6 +65,7 @@
   BaseTaggedness base_is_tagged;  // specifies if the base pointer is tagged.
   int offset;                     // offset of the field, without tag.
   MaybeHandle<Name> name;         // debugging only.
+  MaybeHandle<Map> map;           // map of the field value (if known).
   Type* type;                     // type of the field.
   MachineType machine_type;       // machine type of the field.
   WriteBarrierKind write_barrier_kind;  // write barrier hint.
@@ -143,6 +145,41 @@
 
 CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
 
+// Flags for map checks.
+enum class CheckMapsFlag : uint8_t {
+  kNone = 0u,
+  kTryMigrateInstance = 1u << 0,  // Try instance migration.
+};
+typedef base::Flags<CheckMapsFlag> CheckMapsFlags;
+
+DEFINE_OPERATORS_FOR_FLAGS(CheckMapsFlags)
+
+std::ostream& operator<<(std::ostream&, CheckMapsFlags);
+
+// A descriptor for map checks.
+class CheckMapsParameters final {
+ public:
+  CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps)
+      : flags_(flags), maps_(maps) {}
+
+  CheckMapsFlags flags() const { return flags_; }
+  ZoneHandleSet<Map> const& maps() const { return maps_; }
+
+ private:
+  CheckMapsFlags const flags_;
+  ZoneHandleSet<Map> const maps_;
+};
+
+bool operator==(CheckMapsParameters const&, CheckMapsParameters const&);
+bool operator!=(CheckMapsParameters const&, CheckMapsParameters const&);
+
+size_t hash_value(CheckMapsParameters const&);
+
+std::ostream& operator<<(std::ostream&, CheckMapsParameters const&);
+
+CheckMapsParameters const& CheckMapsParametersOf(Operator const*)
+    WARN_UNUSED_RESULT;
+
 // A descriptor for growing elements backing stores.
 enum class GrowFastElementsFlag : uint8_t {
   kNone = 0u,
@@ -160,16 +197,35 @@
     WARN_UNUSED_RESULT;
 
 // A descriptor for elements kind transitions.
-enum class ElementsTransition : uint8_t {
-  kFastTransition,  // simple transition, just updating the map.
-  kSlowTransition   // full transition, round-trip to the runtime.
+class ElementsTransition final {
+ public:
+  enum Mode : uint8_t {
+    kFastTransition,  // simple transition, just updating the map.
+    kSlowTransition   // full transition, round-trip to the runtime.
+  };
+
+  ElementsTransition(Mode mode, Handle<Map> source, Handle<Map> target)
+      : mode_(mode), source_(source), target_(target) {}
+
+  Mode mode() const { return mode_; }
+  Handle<Map> source() const { return source_; }
+  Handle<Map> target() const { return target_; }
+
+ private:
+  Mode const mode_;
+  Handle<Map> const source_;
+  Handle<Map> const target_;
 };
 
+bool operator==(ElementsTransition const&, ElementsTransition const&);
+bool operator!=(ElementsTransition const&, ElementsTransition const&);
+
 size_t hash_value(ElementsTransition);
 
 std::ostream& operator<<(std::ostream&, ElementsTransition);
 
-ElementsTransition ElementsTransitionOf(const Operator* op) WARN_UNUSED_RESULT;
+ElementsTransition const& ElementsTransitionOf(const Operator* op)
+    WARN_UNUSED_RESULT;
 
 // A hint for speculative number operations.
 enum class NumberOperationHint : uint8_t {
@@ -186,6 +242,8 @@
 NumberOperationHint NumberOperationHintOf(const Operator* op)
     WARN_UNUSED_RESULT;
 
+int ParameterCountOf(const Operator* op) WARN_UNUSED_RESULT;
+
 PretenureFlag PretenureFlagOf(const Operator* op) WARN_UNUSED_RESULT;
 
 UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
@@ -294,9 +352,11 @@
   const Operator* StringEqual();
   const Operator* StringLessThan();
   const Operator* StringLessThanOrEqual();
+  const Operator* StringCharAt();
   const Operator* StringCharCodeAt();
   const Operator* StringFromCharCode();
   const Operator* StringFromCodePoint(UnicodeEncoding encoding);
+  const Operator* StringIndexOf();
 
   const Operator* PlainPrimitiveToNumber();
   const Operator* PlainPrimitiveToWord32();
@@ -306,6 +366,7 @@
   const Operator* ChangeTaggedToInt32();
   const Operator* ChangeTaggedToUint32();
   const Operator* ChangeTaggedToFloat64();
+  const Operator* ChangeTaggedToTaggedSigned();
   const Operator* ChangeInt31ToTaggedSigned();
   const Operator* ChangeInt32ToTagged();
   const Operator* ChangeUint32ToTagged();
@@ -319,12 +380,14 @@
 
   const Operator* CheckIf();
   const Operator* CheckBounds();
-  const Operator* CheckMaps(int map_input_count);
+  const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>);
 
   const Operator* CheckHeapObject();
+  const Operator* CheckInternalizedString();
   const Operator* CheckNumber();
   const Operator* CheckSmi();
   const Operator* CheckString();
+  const Operator* CheckReceiver();
 
   const Operator* CheckedInt32Add();
   const Operator* CheckedInt32Sub();
@@ -348,13 +411,20 @@
   const Operator* CheckTaggedHole();
   const Operator* ConvertTaggedHoleToUndefined();
 
-  const Operator* ObjectIsCallable();
+  const Operator* ObjectIsDetectableCallable();
+  const Operator* ObjectIsNonCallable();
   const Operator* ObjectIsNumber();
   const Operator* ObjectIsReceiver();
   const Operator* ObjectIsSmi();
   const Operator* ObjectIsString();
   const Operator* ObjectIsUndetectable();
 
+  // new-rest-parameter-elements
+  const Operator* NewRestParameterElements(int parameter_count);
+
+  // new-unmapped-arguments-elements
+  const Operator* NewUnmappedArgumentsElements(int parameter_count);
+
   // array-buffer-was-neutered buffer
   const Operator* ArrayBufferWasNeutered();
 
diff --git a/src/compiler/state-values-utils.cc b/src/compiler/state-values-utils.cc
index e8310d7..899c91a 100644
--- a/src/compiler/state-values-utils.cc
+++ b/src/compiler/state-values-utils.cc
@@ -4,6 +4,8 @@
 
 #include "src/compiler/state-values-utils.h"
 
+#include "src/bit-vector.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
@@ -47,6 +49,16 @@
   if (key->count != static_cast<size_t>(node->InputCount())) {
     return false;
   }
+
+  DCHECK(node->opcode() == IrOpcode::kStateValues);
+  SparseInputMask node_mask = SparseInputMaskOf(node->op());
+
+  if (node_mask != key->mask) {
+    return false;
+  }
+
+  // Comparing real inputs rather than sparse inputs, since we already know the
+  // sparse input masks are the same.
   for (size_t i = 0; i < key->count; i++) {
     if (key->values[i] != node->InputAt(static_cast<int>(i))) {
       return false;
@@ -62,6 +74,9 @@
   if (key1->count != key2->count) {
     return false;
   }
+  if (key1->mask != key2->mask) {
+    return false;
+  }
   for (size_t i = 0; i < key1->count; i++) {
     if (key1->values[i] != key2->values[i]) {
       return false;
@@ -73,19 +88,18 @@
 
 Node* StateValuesCache::GetEmptyStateValues() {
   if (empty_state_values_ == nullptr) {
-    empty_state_values_ = graph()->NewNode(common()->StateValues(0));
+    empty_state_values_ =
+        graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
   }
   return empty_state_values_;
 }
 
-
-NodeVector* StateValuesCache::GetWorkingSpace(size_t level) {
-  while (working_space_.size() <= level) {
-    void* space = zone()->New(sizeof(NodeVector));
-    working_space_.push_back(new (space)
-                                 NodeVector(kMaxInputCount, nullptr, zone()));
+StateValuesCache::WorkingBuffer* StateValuesCache::GetWorkingSpace(
+    size_t level) {
+  if (working_space_.size() <= level) {
+    working_space_.resize(level + 1);
   }
-  return working_space_[level];
+  return &working_space_[level];
 }
 
 namespace {
@@ -93,24 +107,24 @@
 int StateValuesHashKey(Node** nodes, size_t count) {
   size_t hash = count;
   for (size_t i = 0; i < count; i++) {
-    hash = hash * 23 + nodes[i]->id();
+    hash = hash * 23 + (nodes[i] == nullptr ? 0 : nodes[i]->id());
   }
   return static_cast<int>(hash & 0x7fffffff);
 }
 
 }  // namespace
 
-
-Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count) {
-  StateValuesKey key(count, nodes);
+Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count,
+                                               SparseInputMask mask) {
+  StateValuesKey key(count, mask, nodes);
   int hash = StateValuesHashKey(nodes, count);
   ZoneHashMap::Entry* lookup =
       hash_map_.LookupOrInsert(&key, hash, ZoneAllocationPolicy(zone()));
   DCHECK_NOT_NULL(lookup);
   Node* node;
   if (lookup->value == nullptr) {
-    int input_count = static_cast<int>(count);
-    node = graph()->NewNode(common()->StateValues(input_count), input_count,
+    int node_count = static_cast<int>(count);
+    node = graph()->NewNode(common()->StateValues(node_count, mask), node_count,
                             nodes);
     NodeKey* new_key = new (zone()->New(sizeof(NodeKey))) NodeKey(node);
     lookup->key = new_key;
@@ -121,106 +135,192 @@
   return node;
 }
 
+SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
+    WorkingBuffer* node_buffer, size_t* node_count, size_t* values_idx,
+    Node** values, size_t count, const BitVector* liveness,
+    int liveness_offset) {
+  SparseInputMask::BitMaskType input_mask = 0;
 
-class StateValuesCache::ValueArrayIterator {
- public:
-  ValueArrayIterator(Node** values, size_t count)
-      : values_(values), count_(count), current_(0) {}
+  // Virtual nodes are the live nodes plus the implicit optimized out nodes,
+  // which are implied by the liveness mask.
+  size_t virtual_node_count = *node_count;
 
-  void Advance() {
-    if (!done()) {
-      current_++;
+  while (*values_idx < count && *node_count < kMaxInputCount &&
+         virtual_node_count < SparseInputMask::kMaxSparseInputs) {
+    DCHECK_LE(*values_idx, static_cast<size_t>(INT_MAX));
+
+    if (liveness == nullptr ||
+        liveness->Contains(liveness_offset + static_cast<int>(*values_idx))) {
+      input_mask |= 1 << (virtual_node_count);
+      (*node_buffer)[(*node_count)++] = values[*values_idx];
+    }
+    virtual_node_count++;
+
+    (*values_idx)++;
+  }
+
+  DCHECK(*node_count <= StateValuesCache::kMaxInputCount);
+  DCHECK(virtual_node_count <= SparseInputMask::kMaxSparseInputs);
+
+  // Add the end marker at the end of the mask.
+  input_mask |= SparseInputMask::kEndMarker << virtual_node_count;
+
+  return input_mask;
+}
+
+Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
+                                  size_t count, const BitVector* liveness,
+                                  int liveness_offset, size_t level) {
+  WorkingBuffer* node_buffer = GetWorkingSpace(level);
+  size_t node_count = 0;
+  SparseInputMask::BitMaskType input_mask = SparseInputMask::kDenseBitMask;
+
+  if (level == 0) {
+    input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
+                                      values, count, liveness, liveness_offset);
+    // Make sure we returned a sparse input mask.
+    DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
+  } else {
+    while (*values_idx < count && node_count < kMaxInputCount) {
+      if (count - *values_idx < kMaxInputCount - node_count) {
+        // If we have fewer values remaining than inputs remaining, dump the
+        // remaining values into this node.
+        // TODO(leszeks): We could optimise this further by only counting
+        // remaining live nodes.
+
+        size_t previous_input_count = node_count;
+        input_mask =
+            FillBufferWithValues(node_buffer, &node_count, values_idx, values,
+                                 count, liveness, liveness_offset);
+        // Make sure we have exhausted our values.
+        DCHECK_EQ(*values_idx, count);
+        // Make sure we returned a sparse input mask.
+        DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
+
+        // Make sure we haven't touched inputs below previous_input_count in the
+        // mask.
+        DCHECK_EQ(input_mask & ((1 << previous_input_count) - 1), 0u);
+        // Mark all previous inputs as live.
+        input_mask |= ((1 << previous_input_count) - 1);
+
+        break;
+
+      } else {
+        // Otherwise, add the values to a subtree and add that as an input.
+        Node* subtree = BuildTree(values_idx, values, count, liveness,
+                                  liveness_offset, level - 1);
+        (*node_buffer)[node_count++] = subtree;
+        // Don't touch the bitmask, so that it stays dense.
+      }
     }
   }
 
-  bool done() { return current_ >= count_; }
-
-  Node* node() {
-    DCHECK(!done());
-    return values_[current_];
-  }
-
- private:
-  Node** values_;
-  size_t count_;
-  size_t current_;
-};
-
-
-Node* StateValuesCache::BuildTree(ValueArrayIterator* it, size_t max_height) {
-  if (max_height == 0) {
-    Node* node = it->node();
-    it->Advance();
-    return node;
-  }
-  DCHECK(!it->done());
-
-  NodeVector* buffer = GetWorkingSpace(max_height);
-  size_t count = 0;
-  for (; count < kMaxInputCount; count++) {
-    if (it->done()) break;
-    (*buffer)[count] = BuildTree(it, max_height - 1);
-  }
-  if (count == 1) {
-    return (*buffer)[0];
+  if (node_count == 1 && input_mask == SparseInputMask::kDenseBitMask) {
+    // Elide the StateValue node if there is only one, dense input. This will
+    // only happen if we built a single subtree (as nodes with values are always
+    // sparse), and so we can replace ourselves with it.
+    DCHECK_EQ((*node_buffer)[0]->opcode(), IrOpcode::kStateValues);
+    return (*node_buffer)[0];
   } else {
-    return GetValuesNodeFromCache(&(buffer->front()), count);
+    return GetValuesNodeFromCache(node_buffer->data(), node_count,
+                                  SparseInputMask(input_mask));
   }
 }
 
-
-Node* StateValuesCache::GetNodeForValues(Node** values, size_t count) {
 #if DEBUG
+namespace {
+
+void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
+                             const BitVector* liveness, int liveness_offset) {
+  CHECK_EQ(count, StateValuesAccess(tree).size());
+
+  int i;
+  auto access = StateValuesAccess(tree);
+  auto it = access.begin();
+  auto itend = access.end();
+  for (i = 0; it != itend; ++it, ++i) {
+    if (liveness == nullptr || liveness->Contains(liveness_offset + i)) {
+      CHECK((*it).node == values[i]);
+    } else {
+      CHECK((*it).node == nullptr);
+    }
+  }
+  CHECK_EQ(static_cast<size_t>(i), count);
+}
+
+}  // namespace
+#endif
+
+Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
+                                         const BitVector* liveness,
+                                         int liveness_offset) {
+#if DEBUG
+  // Check that the values represent actual values, and not a tree of values.
   for (size_t i = 0; i < count; i++) {
-    DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
-    DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+    if (values[i] != nullptr) {
+      DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
+      DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+    }
+  }
+  if (liveness != nullptr) {
+    DCHECK_LE(liveness_offset + count, static_cast<size_t>(liveness->length()));
+
+    for (size_t i = 0; i < count; i++) {
+      if (liveness->Contains(liveness_offset + static_cast<int>(i))) {
+        DCHECK_NOT_NULL(values[i]);
+      }
+    }
   }
 #endif
+
   if (count == 0) {
     return GetEmptyStateValues();
   }
+
+  // This is a worst-case tree height estimate, assuming that all values are
+  // live. We could get a better estimate by counting zeroes in the liveness
+  // vector, but there's no point -- any excess height in the tree will be
+  // collapsed by the single-input elision at the end of BuildTree.
   size_t height = 0;
-  size_t max_nodes = 1;
-  while (count > max_nodes) {
+  size_t max_inputs = kMaxInputCount;
+  while (count > max_inputs) {
     height++;
-    max_nodes *= kMaxInputCount;
+    max_inputs *= kMaxInputCount;
   }
 
-  ValueArrayIterator it(values, count);
+  size_t values_idx = 0;
+  Node* tree =
+      BuildTree(&values_idx, values, count, liveness, liveness_offset, height);
+  // The values should be exhausted by the end of BuildTree.
+  DCHECK_EQ(values_idx, count);
 
-  Node* tree = BuildTree(&it, height);
+  // The 'tree' must be rooted with a state value node.
+  DCHECK_EQ(tree->opcode(), IrOpcode::kStateValues);
 
-  // If the 'tree' is a single node, equip it with a StateValues wrapper.
-  if (tree->opcode() != IrOpcode::kStateValues &&
-      tree->opcode() != IrOpcode::kTypedStateValues) {
-    tree = GetValuesNodeFromCache(&tree, 1);
-  }
+#if DEBUG
+  CheckTreeContainsValues(tree, values, count, liveness, liveness_offset);
+#endif
 
   return tree;
 }
 
-
 StateValuesAccess::iterator::iterator(Node* node) : current_depth_(0) {
-  // A hacky way initialize - just set the index before the node we want
-  // to process and then advance to it.
-  stack_[current_depth_].node = node;
-  stack_[current_depth_].index = -1;
-  Advance();
+  stack_[current_depth_] =
+      SparseInputMaskOf(node->op()).IterateOverInputs(node);
+  EnsureValid();
 }
 
-
-StateValuesAccess::iterator::StatePos* StateValuesAccess::iterator::Top() {
+SparseInputMask::InputIterator* StateValuesAccess::iterator::Top() {
   DCHECK(current_depth_ >= 0);
   DCHECK(current_depth_ < kMaxInlineDepth);
   return &(stack_[current_depth_]);
 }
 
-
 void StateValuesAccess::iterator::Push(Node* node) {
   current_depth_++;
   CHECK(current_depth_ < kMaxInlineDepth);
-  stack_[current_depth_].node = node;
-  stack_[current_depth_].index = 0;
+  stack_[current_depth_] =
+      SparseInputMaskOf(node->op()).IterateOverInputs(node);
 }
 
 
@@ -234,48 +334,61 @@
 
 
 void StateValuesAccess::iterator::Advance() {
-  // Advance the current index.
-  Top()->index++;
+  Top()->Advance();
+  EnsureValid();
+}
 
-  // Fix up the position to point to a valid node.
+void StateValuesAccess::iterator::EnsureValid() {
   while (true) {
-    // TODO(jarin): Factor to a separate method.
-    Node* node = Top()->node;
-    int index = Top()->index;
+    SparseInputMask::InputIterator* top = Top();
 
-    if (index >= node->InputCount()) {
-      // Pop stack and move to the next sibling.
+    if (top->IsEmpty()) {
+      // We are on a valid (albeit optimized out) node.
+      return;
+    }
+
+    if (top->IsEnd()) {
+      // We have hit the end of this iterator. Pop the stack and move to the
+      // next sibling iterator.
       Pop();
       if (done()) {
         // Stack is exhausted, we have reached the end.
         return;
       }
-      Top()->index++;
-    } else if (node->InputAt(index)->opcode() == IrOpcode::kStateValues ||
-               node->InputAt(index)->opcode() == IrOpcode::kTypedStateValues) {
-      // Nested state, we need to push to the stack.
-      Push(node->InputAt(index));
-    } else {
-      // We are on a valid node, we can stop the iteration.
-      return;
+      Top()->Advance();
+      continue;
     }
+
+    // At this point the value is known to be live and within our input nodes.
+    Node* value_node = top->GetReal();
+
+    if (value_node->opcode() == IrOpcode::kStateValues ||
+        value_node->opcode() == IrOpcode::kTypedStateValues) {
+      // Nested state, we need to push to the stack.
+      Push(value_node);
+      continue;
+    }
+
+    // We are on a valid node, we can stop the iteration.
+    return;
   }
 }
 
-
-Node* StateValuesAccess::iterator::node() {
-  return Top()->node->InputAt(Top()->index);
-}
-
+Node* StateValuesAccess::iterator::node() { return Top()->Get(nullptr); }
 
 MachineType StateValuesAccess::iterator::type() {
-  Node* state = Top()->node;
-  if (state->opcode() == IrOpcode::kStateValues) {
+  Node* parent = Top()->parent();
+  if (parent->opcode() == IrOpcode::kStateValues) {
     return MachineType::AnyTagged();
   } else {
-    DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
-    ZoneVector<MachineType> const* types = MachineTypesOf(state->op());
-    return (*types)[Top()->index];
+    DCHECK_EQ(IrOpcode::kTypedStateValues, parent->opcode());
+
+    if (Top()->IsEmpty()) {
+      return MachineType::None();
+    } else {
+      ZoneVector<MachineType> const* types = MachineTypesOf(parent->op());
+      return (*types)[Top()->real_index()];
+    }
   }
 }
 
@@ -300,14 +413,24 @@
 
 size_t StateValuesAccess::size() {
   size_t count = 0;
-  for (int i = 0; i < node_->InputCount(); i++) {
-    if (node_->InputAt(i)->opcode() == IrOpcode::kStateValues ||
-        node_->InputAt(i)->opcode() == IrOpcode::kTypedStateValues) {
-      count += StateValuesAccess(node_->InputAt(i)).size();
-    } else {
+  SparseInputMask mask = SparseInputMaskOf(node_->op());
+
+  SparseInputMask::InputIterator iterator = mask.IterateOverInputs(node_);
+
+  for (; !iterator.IsEnd(); iterator.Advance()) {
+    if (iterator.IsEmpty()) {
       count++;
+    } else {
+      Node* value = iterator.GetReal();
+      if (value->opcode() == IrOpcode::kStateValues ||
+          value->opcode() == IrOpcode::kTypedStateValues) {
+        count += StateValuesAccess(value).size();
+      } else {
+        count++;
+      }
     }
   }
+
   return count;
 }
 
diff --git a/src/compiler/state-values-utils.h b/src/compiler/state-values-utils.h
index 14b1b9e..e1fd7d2 100644
--- a/src/compiler/state-values-utils.h
+++ b/src/compiler/state-values-utils.h
@@ -5,12 +5,16 @@
 #ifndef V8_COMPILER_STATE_VALUES_UTILS_H_
 #define V8_COMPILER_STATE_VALUES_UTILS_H_
 
+#include <array>
+#include "src/compiler/common-operator.h"
 #include "src/compiler/js-graph.h"
 #include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 
+class BitVector;
+
 namespace compiler {
 
 class Graph;
@@ -19,10 +23,13 @@
  public:
   explicit StateValuesCache(JSGraph* js_graph);
 
-  Node* GetNodeForValues(Node** values, size_t count);
+  Node* GetNodeForValues(Node** values, size_t count,
+                         const BitVector* liveness = nullptr,
+                         int liveness_offset = 0);
 
  private:
   static const size_t kMaxInputCount = 8;
+  typedef std::array<Node*, kMaxInputCount> WorkingBuffer;
 
   struct NodeKey {
     Node* node;
@@ -33,22 +40,35 @@
   struct StateValuesKey : public NodeKey {
     // ValueArray - array of nodes ({node} has to be nullptr).
     size_t count;
+    SparseInputMask mask;
     Node** values;
 
-    StateValuesKey(size_t count, Node** values)
-        : NodeKey(nullptr), count(count), values(values) {}
+    StateValuesKey(size_t count, SparseInputMask mask, Node** values)
+        : NodeKey(nullptr), count(count), mask(mask), values(values) {}
   };
 
-  class ValueArrayIterator;
-
   static bool AreKeysEqual(void* key1, void* key2);
   static bool IsKeysEqualToNode(StateValuesKey* key, Node* node);
   static bool AreValueKeysEqual(StateValuesKey* key1, StateValuesKey* key2);
 
-  Node* BuildTree(ValueArrayIterator* it, size_t max_height);
-  NodeVector* GetWorkingSpace(size_t level);
+  // Fills {node_buffer}, starting from {node_count}, with {values}, starting
+  // at {values_idx}, sparsely encoding according to {liveness}. {node_count} is
+  // updated with the new number of inputs in {node_buffer}, and a bitmask of
+  // the sparse encoding is returned.
+  SparseInputMask::BitMaskType FillBufferWithValues(WorkingBuffer* node_buffer,
+                                                    size_t* node_count,
+                                                    size_t* values_idx,
+                                                    Node** values, size_t count,
+                                                    const BitVector* liveness,
+                                                    int liveness_offset);
+
+  Node* BuildTree(size_t* values_idx, Node** values, size_t count,
+                  const BitVector* liveness, int liveness_offset, size_t level);
+
+  WorkingBuffer* GetWorkingSpace(size_t level);
   Node* GetEmptyStateValues();
-  Node* GetValuesNodeFromCache(Node** nodes, size_t count);
+  Node* GetValuesNodeFromCache(Node** nodes, size_t count,
+                               SparseInputMask mask);
 
   Graph* graph() { return js_graph_->graph(); }
   CommonOperatorBuilder* common() { return js_graph_->common(); }
@@ -57,7 +77,7 @@
 
   JSGraph* js_graph_;
   CustomMatcherZoneHashMap hash_map_;
-  ZoneVector<NodeVector*> working_space_;  // One working space per level.
+  ZoneVector<WorkingBuffer> working_space_;  // One working space per level.
   Node* empty_state_values_;
 };
 
@@ -86,21 +106,14 @@
     MachineType type();
     bool done();
     void Advance();
+    void EnsureValid();
 
-    struct StatePos {
-      Node* node;
-      int index;
-
-      explicit StatePos(Node* node) : node(node), index(0) {}
-      StatePos() {}
-    };
-
-    StatePos* Top();
+    SparseInputMask::InputIterator* Top();
     void Push(Node* node);
     void Pop();
 
     static const int kMaxInlineDepth = 8;
-    StatePos stack_[kMaxInlineDepth];
+    SparseInputMask::InputIterator stack_[kMaxInlineDepth];
     int current_depth_;
   };
 
diff --git a/src/compiler/type-cache.h b/src/compiler/type-cache.h
index 69eaf11..3d9801b 100644
--- a/src/compiler/type-cache.h
+++ b/src/compiler/type-cache.h
@@ -64,6 +64,8 @@
   Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
   Type* const kPositiveIntegerOrMinusZero =
       Type::Union(kPositiveInteger, Type::MinusZero(), zone());
+  Type* const kPositiveIntegerOrNaN =
+      Type::Union(kPositiveInteger, Type::NaN(), zone());
   Type* const kPositiveIntegerOrMinusZeroOrNaN =
       Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
 
@@ -97,6 +99,11 @@
   // [0, String::kMaxLength].
   Type* const kStringLengthType = CreateRange(0.0, String::kMaxLength);
 
+  // A time value always contains a tagged number in the range
+  // [-kMaxTimeInMs, kMaxTimeInMs].
+  Type* const kTimeValueType =
+      CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs);
+
   // The JSDate::day property always contains a tagged number in the range
   // [1, 31] or NaN.
   Type* const kJSDateDayType =
@@ -123,9 +130,8 @@
 
   // The JSDate::value property always contains a tagged number in the range
   // [-kMaxTimeInMs, kMaxTimeInMs] or NaN.
-  Type* const kJSDateValueType = Type::Union(
-      CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs),
-      Type::NaN(), zone());
+  Type* const kJSDateValueType =
+      Type::Union(kTimeValueType, Type::NaN(), zone());
 
   // The JSDate::weekday property always contains a tagged number in the range
   // [0, 6] or NaN.
@@ -137,6 +143,10 @@
   Type* const kJSDateYearType =
       Type::Union(Type::SignedSmall(), Type::NaN(), zone());
 
+  // The valid number of arguments for JavaScript functions.
+  Type* const kArgumentsLengthType =
+      Type::Range(0.0, Code::kMaxArguments, zone());
+
  private:
   template <typename T>
   Type* CreateRange() {
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
deleted file mode 100644
index da77a0c..0000000
--- a/src/compiler/type-hint-analyzer.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/type-hint-analyzer.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/ic/ic-state.h"
-#include "src/type-hints.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-BinaryOperationHint ToBinaryOperationHint(Token::Value op,
-                                          BinaryOpICState::Kind kind) {
-  switch (kind) {
-    case BinaryOpICState::NONE:
-      return BinaryOperationHint::kNone;
-    case BinaryOpICState::SMI:
-      return BinaryOperationHint::kSignedSmall;
-    case BinaryOpICState::INT32:
-      return (Token::IsTruncatingBinaryOp(op) && SmiValuesAre31Bits())
-                 ? BinaryOperationHint::kNumberOrOddball
-                 : BinaryOperationHint::kSigned32;
-    case BinaryOpICState::NUMBER:
-      return BinaryOperationHint::kNumberOrOddball;
-    case BinaryOpICState::STRING:
-      return BinaryOperationHint::kString;
-    case BinaryOpICState::GENERIC:
-      return BinaryOperationHint::kAny;
-  }
-  UNREACHABLE();
-  return BinaryOperationHint::kNone;
-}
-
-CompareOperationHint ToCompareOperationHint(Token::Value op,
-                                            CompareICState::State state) {
-  switch (state) {
-    case CompareICState::UNINITIALIZED:
-      return CompareOperationHint::kNone;
-    case CompareICState::SMI:
-      return CompareOperationHint::kSignedSmall;
-    case CompareICState::NUMBER:
-      return Token::IsOrderedRelationalCompareOp(op)
-                 ? CompareOperationHint::kNumberOrOddball
-                 : CompareOperationHint::kNumber;
-    case CompareICState::STRING:
-    case CompareICState::INTERNALIZED_STRING:
-    case CompareICState::UNIQUE_NAME:
-    case CompareICState::RECEIVER:
-    case CompareICState::KNOWN_RECEIVER:
-    case CompareICState::BOOLEAN:
-    case CompareICState::GENERIC:
-      return CompareOperationHint::kAny;
-  }
-  UNREACHABLE();
-  return CompareOperationHint::kNone;
-}
-
-}  // namespace
-
-bool TypeHintAnalysis::GetBinaryOperationHint(TypeFeedbackId id,
-                                              BinaryOperationHint* hint) const {
-  auto i = infos_.find(id);
-  if (i == infos_.end()) return false;
-  Handle<Code> code = i->second;
-  DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
-  BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
-  *hint = ToBinaryOperationHint(state.op(), state.kind());
-  return true;
-}
-
-bool TypeHintAnalysis::GetCompareOperationHint(
-    TypeFeedbackId id, CompareOperationHint* hint) const {
-  auto i = infos_.find(id);
-  if (i == infos_.end()) return false;
-  Handle<Code> code = i->second;
-  DCHECK_EQ(Code::COMPARE_IC, code->kind());
-  CompareICStub stub(code->stub_key(), code->GetIsolate());
-  *hint = ToCompareOperationHint(stub.op(), stub.state());
-  return true;
-}
-
-bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
-                                         ToBooleanHints* hints) const {
-  auto i = infos_.find(id);
-  if (i == infos_.end()) return false;
-  Handle<Code> code = i->second;
-  DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
-  ToBooleanICStub stub(code->GetIsolate(), code->extra_ic_state());
-  *hints = stub.hints();
-  return true;
-}
-
-TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
-  DisallowHeapAllocation no_gc;
-  TypeHintAnalysis::Infos infos(zone());
-  Isolate* const isolate = code->GetIsolate();
-  int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
-  for (RelocIterator it(*code, mask); !it.done(); it.next()) {
-    RelocInfo* rinfo = it.rinfo();
-    Address target_address = rinfo->target_address();
-    Code* target = Code::GetCodeFromTargetAddress(target_address);
-    switch (target->kind()) {
-      case Code::BINARY_OP_IC:
-      case Code::COMPARE_IC:
-      case Code::TO_BOOLEAN_IC: {
-        // Add this feedback to the {infos}.
-        TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
-        infos.insert(std::make_pair(id, handle(target, isolate)));
-        break;
-      }
-      default:
-        // Ignore the remaining code objects.
-        break;
-    }
-  }
-  return new (zone()) TypeHintAnalysis(infos, zone());
-}
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/type-hint-analyzer.h b/src/compiler/type-hint-analyzer.h
deleted file mode 100644
index 354f894..0000000
--- a/src/compiler/type-hint-analyzer.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_TYPE_HINT_ANALYZER_H_
-#define V8_COMPILER_TYPE_HINT_ANALYZER_H_
-
-#include "src/handles.h"
-#include "src/type-hints.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// The result of analyzing type hints.
-class TypeHintAnalysis final : public ZoneObject {
- public:
-  typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
-
-  explicit TypeHintAnalysis(Infos const& infos, Zone* zone)
-      : infos_(infos), zone_(zone) {}
-
-  bool GetBinaryOperationHint(TypeFeedbackId id,
-                              BinaryOperationHint* hint) const;
-  bool GetCompareOperationHint(TypeFeedbackId id,
-                               CompareOperationHint* hint) const;
-  bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
-
- private:
-  Zone* zone() const { return zone_; }
-
-  Infos const infos_;
-  Zone* zone_;
-};
-
-
-// The class that performs type hint analysis on the fullcodegen code object.
-class TypeHintAnalyzer final {
- public:
-  explicit TypeHintAnalyzer(Zone* zone) : zone_(zone) {}
-
-  TypeHintAnalysis* Analyze(Handle<Code> code);
-
- private:
-  Zone* zone() const { return zone_; }
-
-  Zone* const zone_;
-
-  DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_TYPE_HINT_ANALYZER_H_
diff --git a/src/compiler/typed-optimization.cc b/src/compiler/typed-optimization.cc
index 5ebc390..e130a10 100644
--- a/src/compiler/typed-optimization.cc
+++ b/src/compiler/typed-optimization.cc
@@ -83,14 +83,17 @@
     case IrOpcode::kLoadField:
       return ReduceLoadField(node);
     case IrOpcode::kNumberCeil:
-    case IrOpcode::kNumberFloor:
     case IrOpcode::kNumberRound:
     case IrOpcode::kNumberTrunc:
       return ReduceNumberRoundop(node);
+    case IrOpcode::kNumberFloor:
+      return ReduceNumberFloor(node);
     case IrOpcode::kNumberToUint8Clamped:
       return ReduceNumberToUint8Clamped(node);
     case IrOpcode::kPhi:
       return ReducePhi(node);
+    case IrOpcode::kReferenceEqual:
+      return ReduceReferenceEqual(node);
     case IrOpcode::kSelect:
       return ReduceSelect(node);
     default:
@@ -185,6 +188,40 @@
   return NoChange();
 }
 
+Reduction TypedOptimization::ReduceNumberFloor(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+    return Replace(input);
+  }
+  if (input_type->Is(Type::PlainNumber()) &&
+      input->opcode() == IrOpcode::kNumberDivide) {
+    Node* const lhs = NodeProperties::GetValueInput(input, 0);
+    Type* const lhs_type = NodeProperties::GetType(lhs);
+    Node* const rhs = NodeProperties::GetValueInput(input, 1);
+    Type* const rhs_type = NodeProperties::GetType(rhs);
+    if (lhs_type->Is(Type::Unsigned32()) && rhs_type->Is(Type::Unsigned32())) {
+      // We can replace
+      //
+      //   NumberFloor(NumberDivide(lhs: unsigned32,
+      //                            rhs: unsigned32)): plain-number
+      //
+      // with
+      //
+      //   NumberToUint32(NumberDivide(lhs, rhs))
+      //
+      // and just smash the type of the {lhs} on the {node},
+      // as the truncated result must be in the same range as
+      // {lhs} since {rhs} cannot be less than 1 (due to the
+      // plain-number type constraint on the {node}).
+      NodeProperties::ChangeOp(node, simplified()->NumberToUint32());
+      NodeProperties::SetType(node, lhs_type);
+      return Changed(node);
+    }
+  }
+  return NoChange();
+}
+
 Reduction TypedOptimization::ReduceNumberRoundop(Node* node) {
   Node* const input = NodeProperties::GetValueInput(node, 0);
   Type* const input_type = NodeProperties::GetType(input);
@@ -223,6 +260,18 @@
   return NoChange();
 }
 
+Reduction TypedOptimization::ReduceReferenceEqual(Node* node) {
+  DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
+  Node* const lhs = NodeProperties::GetValueInput(node, 0);
+  Node* const rhs = NodeProperties::GetValueInput(node, 1);
+  Type* const lhs_type = NodeProperties::GetType(lhs);
+  Type* const rhs_type = NodeProperties::GetType(rhs);
+  if (!lhs_type->Maybe(rhs_type)) {
+    return Replace(jsgraph()->FalseConstant());
+  }
+  return NoChange();
+}
+
 Reduction TypedOptimization::ReduceSelect(Node* node) {
   DCHECK_EQ(IrOpcode::kSelect, node->opcode());
   Node* const condition = NodeProperties::GetValueInput(node, 0);
diff --git a/src/compiler/typed-optimization.h b/src/compiler/typed-optimization.h
index fb2db72..93de680 100644
--- a/src/compiler/typed-optimization.h
+++ b/src/compiler/typed-optimization.h
@@ -46,9 +46,11 @@
   Reduction ReduceCheckMaps(Node* node);
   Reduction ReduceCheckString(Node* node);
   Reduction ReduceLoadField(Node* node);
+  Reduction ReduceNumberFloor(Node* node);
   Reduction ReduceNumberRoundop(Node* node);
   Reduction ReduceNumberToUint8Clamped(Node* node);
   Reduction ReducePhi(Node* node);
+  Reduction ReduceReferenceEqual(Node* node);
   Reduction ReduceSelect(Node* node);
 
   CompilationDependencies* dependencies() const { return dependencies_; }
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 2642a10..ed1a04a 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -43,13 +43,14 @@
   Zone* zone = this->zone();
   Factory* const factory = isolate->factory();
 
-  singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
-  singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
-  singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
+  singleton_empty_string_ = Type::HeapConstant(factory->empty_string(), zone);
+  singleton_false_ = operation_typer_.singleton_false();
+  singleton_true_ = operation_typer_.singleton_true();
   falsish_ = Type::Union(
       Type::Undetectable(),
       Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
-                  singleton_the_hole_, zone),
+                  Type::Union(singleton_empty_string_, Type::Hole(), zone),
+                  zone),
       zone);
   truish_ = Type::Union(
       singleton_true_,
@@ -122,6 +123,8 @@
       DECLARE_CASE(Deoptimize)
       DECLARE_CASE(DeoptimizeIf)
       DECLARE_CASE(DeoptimizeUnless)
+      DECLARE_CASE(TrapIf)
+      DECLARE_CASE(TrapUnless)
       DECLARE_CASE(Return)
       DECLARE_CASE(TailCall)
       DECLARE_CASE(Terminate)
@@ -185,6 +188,8 @@
       DECLARE_CASE(Deoptimize)
       DECLARE_CASE(DeoptimizeIf)
       DECLARE_CASE(DeoptimizeUnless)
+      DECLARE_CASE(TrapIf)
+      DECLARE_CASE(TrapUnless)
       DECLARE_CASE(Return)
       DECLARE_CASE(TailCall)
       DECLARE_CASE(Terminate)
@@ -279,7 +284,8 @@
   SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
 #undef DECLARE_METHOD
 
-  static Type* ObjectIsCallable(Type*, Typer*);
+  static Type* ObjectIsDetectableCallable(Type*, Typer*);
+  static Type* ObjectIsNonCallable(Type*, Typer*);
   static Type* ObjectIsNumber(Type*, Typer*);
   static Type* ObjectIsReceiver(Type*, Typer*);
   static Type* ObjectIsSmi(Type*, Typer*);
@@ -292,7 +298,7 @@
   JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
 #undef DECLARE_METHOD
 
-  static Type* JSCallFunctionTyper(Type*, Typer*);
+  static Type* JSCallTyper(Type*, Typer*);
 
   static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
   static Type* StringFromCharCodeTyper(Type*, Typer*);
@@ -497,9 +503,15 @@
 
 // Type checks.
 
-Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
-  if (type->Is(Type::Function())) return t->singleton_true_;
-  if (type->Is(Type::Primitive())) return t->singleton_false_;
+Type* Typer::Visitor::ObjectIsDetectableCallable(Type* type, Typer* t) {
+  if (type->Is(Type::DetectableCallable())) return t->singleton_true_;
+  if (!type->Maybe(Type::DetectableCallable())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+Type* Typer::Visitor::ObjectIsNonCallable(Type* type, Typer* t) {
+  if (type->Is(Type::NonCallable())) return t->singleton_true_;
+  if (!type->Maybe(Type::NonCallable())) return t->singleton_false_;
   return Type::Boolean();
 }
 
@@ -822,6 +834,10 @@
   return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeArgumentsObjectState(Node* node) {
+  return Type::Internal();
+}
+
 Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
 
 Type* Typer::Visitor::TypeTypedObjectState(Node* node) {
@@ -893,8 +909,7 @@
       (lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
     return t->singleton_false_;
   }
-  if ((lhs->Is(t->singleton_the_hole_) || rhs->Is(t->singleton_the_hole_)) &&
-      !lhs->Maybe(rhs)) {
+  if ((lhs->Is(Type::Hole()) || rhs->Is(Type::Hole())) && !lhs->Maybe(rhs)) {
     return t->singleton_false_;
   }
   if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
@@ -1041,6 +1056,9 @@
 
 // JS unary operators.
 
+Type* Typer::Visitor::TypeJSClassOf(Node* node) {
+  return Type::InternalizedStringOrNull();
+}
 
 Type* Typer::Visitor::TypeJSTypeOf(Node* node) {
   return Type::InternalizedString();
@@ -1233,6 +1251,15 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeJSStoreNamedOwn(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
+Type* Typer::Visitor::TypeJSStoreDataPropertyInLiteral(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
 
 Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
   return Type::Boolean();
@@ -1240,12 +1267,21 @@
 
 Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
 
-Type* Typer::Visitor::TypeJSInstanceOf(Node* node) { return Type::Boolean(); }
+// JS instanceof operator.
 
-Type* Typer::Visitor::TypeJSOrdinaryHasInstance(Node* node) {
+Type* Typer::Visitor::JSInstanceOfTyper(Type* lhs, Type* rhs, Typer* t) {
   return Type::Boolean();
 }
 
+Type* Typer::Visitor::JSOrdinaryHasInstanceTyper(Type* lhs, Type* rhs,
+                                                 Typer* t) {
+  return Type::Boolean();
+}
+
+Type* Typer::Visitor::TypeJSGetSuperConstructor(Node* node) {
+  return Type::Callable();
+}
+
 // JS context operators.
 
 
@@ -1291,12 +1327,13 @@
 
 // JS other operators.
 
+Type* Typer::Visitor::TypeJSConstruct(Node* node) { return Type::Receiver(); }
 
-Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
+Type* Typer::Visitor::TypeJSConstructWithSpread(Node* node) {
   return Type::Receiver();
 }
 
-Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
+Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
   if (fun->IsHeapConstant() && fun->AsHeapConstant()->Value()->IsJSFunction()) {
     Handle<JSFunction> function =
         Handle<JSFunction>::cast(fun->AsHeapConstant()->Value());
@@ -1344,6 +1381,8 @@
         case kMathClz32:
           return t->cache_.kZeroToThirtyTwo;
         // Date functions.
+        case kDateNow:
+          return t->cache_.kTimeValueType;
         case kDateGetDate:
           return t->cache_.kJSDateDayType;
         case kDateGetDay:
@@ -1363,6 +1402,7 @@
           return t->cache_.kJSDateSecondType;
         case kDateGetTime:
           return t->cache_.kJSDateValueType;
+
         // Number functions.
         case kNumberIsFinite:
         case kNumberIsInteger:
@@ -1375,16 +1415,41 @@
           return t->cache_.kIntegerOrMinusZeroOrNaN;
         case kNumberToString:
           return Type::String();
+
         // String functions.
         case kStringCharCodeAt:
           return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
                              t->zone());
         case kStringCharAt:
+          return Type::String();
+        case kStringCodePointAt:
+          return Type::Union(Type::Range(0.0, String::kMaxCodePoint, t->zone()),
+                             Type::Undefined(), t->zone());
         case kStringConcat:
         case kStringFromCharCode:
+        case kStringFromCodePoint:
+          return Type::String();
+        case kStringIndexOf:
+        case kStringLastIndexOf:
+          return Type::Range(-1.0, String::kMaxLength - 1.0, t->zone());
+        case kStringEndsWith:
+        case kStringIncludes:
+          return Type::Boolean();
+        case kStringRaw:
+        case kStringRepeat:
+        case kStringSlice:
+          return Type::String();
+        case kStringStartsWith:
+          return Type::Boolean();
         case kStringSubstr:
+        case kStringSubstring:
         case kStringToLowerCase:
+        case kStringToString:
         case kStringToUpperCase:
+        case kStringTrim:
+        case kStringTrimLeft:
+        case kStringTrimRight:
+        case kStringValueOf:
           return Type::String();
 
         case kStringIterator:
@@ -1401,15 +1466,59 @@
           return Type::OtherObject();
 
         // Array functions.
+        case kArrayIsArray:
+          return Type::Boolean();
+        case kArrayConcat:
+          return Type::Receiver();
+        case kArrayEvery:
+          return Type::Boolean();
+        case kArrayFill:
+        case kArrayFilter:
+          return Type::Receiver();
+        case kArrayFindIndex:
+          return Type::Range(-1, kMaxSafeInteger, t->zone());
+        case kArrayForEach:
+          return Type::Undefined();
+        case kArrayIncludes:
+          return Type::Boolean();
         case kArrayIndexOf:
+          return Type::Range(-1, kMaxSafeInteger, t->zone());
+        case kArrayJoin:
+          return Type::String();
         case kArrayLastIndexOf:
           return Type::Range(-1, kMaxSafeInteger, t->zone());
+        case kArrayMap:
+          return Type::Receiver();
         case kArrayPush:
           return t->cache_.kPositiveSafeInteger;
+        case kArrayReverse:
+        case kArraySlice:
+          return Type::Receiver();
+        case kArraySome:
+          return Type::Boolean();
+        case kArraySplice:
+          return Type::Receiver();
+        case kArrayUnshift:
+          return t->cache_.kPositiveSafeInteger;
 
         // Object functions.
+        case kObjectAssign:
+        case kObjectCreate:
+          return Type::OtherObject();
         case kObjectHasOwnProperty:
           return Type::Boolean();
+        case kObjectToString:
+          return Type::String();
+
+        // RegExp functions.
+        case kRegExpCompile:
+          return Type::OtherObject();
+        case kRegExpExec:
+          return Type::Union(Type::OtherObject(), Type::Null(), t->zone());
+        case kRegExpTest:
+          return Type::Boolean();
+        case kRegExpToString:
+          return Type::String();
 
         // Function functions.
         case kFunctionHasInstance:
@@ -1426,6 +1535,46 @@
         case kGlobalIsFinite:
         case kGlobalIsNaN:
           return Type::Boolean();
+
+        // Map functions.
+        case kMapClear:
+        case kMapForEach:
+          return Type::Undefined();
+        case kMapDelete:
+        case kMapHas:
+          return Type::Boolean();
+        case kMapEntries:
+        case kMapKeys:
+        case kMapSet:
+        case kMapValues:
+          return Type::OtherObject();
+
+        // Set functions.
+        case kSetAdd:
+        case kSetEntries:
+        case kSetKeys:
+        case kSetValues:
+          return Type::OtherObject();
+        case kSetClear:
+        case kSetForEach:
+          return Type::Undefined();
+        case kSetDelete:
+        case kSetHas:
+          return Type::Boolean();
+
+        // WeakMap functions.
+        case kWeakMapDelete:
+        case kWeakMapHas:
+          return Type::Boolean();
+        case kWeakMapSet:
+          return Type::OtherObject();
+
+        // WeakSet functions.
+        case kWeakSetAdd:
+          return Type::OtherObject();
+        case kWeakSetDelete:
+        case kWeakSetHas:
+          return Type::Boolean();
         default:
           break;
       }
@@ -1434,13 +1583,19 @@
   return Type::NonInternal();
 }
 
-
-Type* Typer::Visitor::TypeJSCallFunction(Node* node) {
-  // TODO(bmeurer): We could infer better types if we wouldn't ignore the
-  // argument types for the JSCallFunctionTyper above.
-  return TypeUnaryOp(node, JSCallFunctionTyper);
+Type* Typer::Visitor::TypeJSCallForwardVarargs(Node* node) {
+  return TypeUnaryOp(node, JSCallTyper);
 }
 
+Type* Typer::Visitor::TypeJSCall(Node* node) {
+  // TODO(bmeurer): We could infer better types if we wouldn't ignore the
+  // argument types for the JSCallTyper above.
+  return TypeUnaryOp(node, JSCallTyper);
+}
+
+Type* Typer::Visitor::TypeJSCallWithSpread(Node* node) {
+  return TypeUnaryOp(node, JSCallTyper);
+}
 
 Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
   switch (CallRuntimeParametersOf(node->op()).id()) {
@@ -1468,6 +1623,8 @@
       return TypeUnaryOp(node, ToObject);
     case Runtime::kInlineToString:
       return TypeUnaryOp(node, ToString);
+    case Runtime::kInlineClassOf:
+      return Type::InternalizedStringOrNull();
     case Runtime::kHasInPrototypeChain:
       return Type::Boolean();
     default:
@@ -1486,7 +1643,7 @@
 
 
 Type* Typer::Visitor::TypeJSForInNext(Node* node) {
-  return Type::Union(Type::Name(), Type::Undefined(), zone());
+  return Type::Union(Type::String(), Type::Undefined(), zone());
 }
 
 
@@ -1530,6 +1687,8 @@
 
 Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
 
+Type* Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
+
 // Simplified operators.
 
 Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
@@ -1595,6 +1754,8 @@
   return Type::String();
 }
 
+Type* Typer::Visitor::TypeStringCharAt(Node* node) { return Type::String(); }
+
 Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
   return typer_->cache_.kUint16;
 }
@@ -1607,6 +1768,10 @@
   return TypeUnaryOp(node, StringFromCodePointTyper);
 }
 
+Type* Typer::Visitor::TypeStringIndexOf(Node* node) {
+  return Type::Range(-1.0, String::kMaxLength - 1.0, zone());
+}
+
 Type* Typer::Visitor::TypeCheckBounds(Node* node) {
   Type* index = Operand(node, 0);
   Type* length = Operand(node, 1);
@@ -1628,6 +1793,11 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeCheckInternalizedString(Node* node) {
+  Type* arg = Operand(node, 0);
+  return Type::Intersect(arg, Type::InternalizedString(), zone());
+}
+
 Type* Typer::Visitor::TypeCheckMaps(Node* node) {
   UNREACHABLE();
   return nullptr;
@@ -1638,6 +1808,11 @@
   return Type::Intersect(arg, Type::Number(), zone());
 }
 
+Type* Typer::Visitor::TypeCheckReceiver(Node* node) {
+  Type* arg = Operand(node, 0);
+  return Type::Intersect(arg, Type::Receiver(), zone());
+}
+
 Type* Typer::Visitor::TypeCheckSmi(Node* node) {
   Type* arg = Operand(node, 0);
   return Type::Intersect(arg, Type::SignedSmall(), zone());
@@ -1726,8 +1901,12 @@
   return nullptr;
 }
 
-Type* Typer::Visitor::TypeObjectIsCallable(Node* node) {
-  return TypeUnaryOp(node, ObjectIsCallable);
+Type* Typer::Visitor::TypeObjectIsDetectableCallable(Node* node) {
+  return TypeUnaryOp(node, ObjectIsDetectableCallable);
+}
+
+Type* Typer::Visitor::TypeObjectIsNonCallable(Node* node) {
+  return TypeUnaryOp(node, ObjectIsNonCallable);
 }
 
 Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
@@ -1752,6 +1931,14 @@
   return TypeUnaryOp(node, ObjectIsUndetectable);
 }
 
+Type* Typer::Visitor::TypeNewUnmappedArgumentsElements(Node* node) {
+  return Type::OtherInternal();
+}
+
+Type* Typer::Visitor::TypeNewRestParameterElements(Node* node) {
+  return Type::OtherInternal();
+}
+
 Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
   return Type::Boolean();
 }
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index 7f6f90a..09b0b4d 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -50,9 +50,9 @@
   TypeCache const& cache_;
   OperationTyper operation_typer_;
 
+  Type* singleton_empty_string_;
   Type* singleton_false_;
   Type* singleton_true_;
-  Type* singleton_the_hole_;
   Type* falsish_;
   Type* truish_;
 
diff --git a/src/compiler/types.cc b/src/compiler/types.cc
index 806bd8f..f28a56a 100644
--- a/src/compiler/types.cc
+++ b/src/compiler/types.cc
@@ -7,6 +7,7 @@
 #include "src/compiler/types.h"
 
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 #include "src/ostreams.h"
 
 namespace v8 {
@@ -151,6 +152,8 @@
     case ONE_BYTE_STRING_TYPE:
     case CONS_STRING_TYPE:
     case CONS_ONE_BYTE_STRING_TYPE:
+    case THIN_STRING_TYPE:
+    case THIN_ONE_BYTE_STRING_TYPE:
     case SLICED_STRING_TYPE:
     case SLICED_ONE_BYTE_STRING_TYPE:
     case EXTERNAL_STRING_TYPE:
@@ -187,8 +190,6 @@
     }
     case HEAP_NUMBER_TYPE:
       return kNumber;
-    case SIMD128_VALUE_TYPE:
-      return kSimd;
     case JS_OBJECT_TYPE:
     case JS_ARGUMENTS_TYPE:
     case JS_ERROR_TYPE:
@@ -196,7 +197,17 @@
     case JS_GLOBAL_PROXY_TYPE:
     case JS_API_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
-      if (map->is_undetectable()) return kOtherUndetectable;
+      if (map->is_undetectable()) {
+        // Currently we assume that every undetectable receiver is also
+        // callable, which is what we need to support document.all.  We
+        // could add another Type bit to support other use cases in the
+        // future if necessary.
+        DCHECK(map->is_callable());
+        return kOtherUndetectable;
+      }
+      if (map->is_callable()) {
+        return kOtherCallable;
+      }
       return kOtherObject;
     case JS_VALUE_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
@@ -204,7 +215,6 @@
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
     case JS_MODULE_NAMESPACE_TYPE:
-    case JS_FIXED_ARRAY_ITERATOR_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_ARRAY_TYPE:
     case JS_REGEXP_TYPE:  // TODO(rossberg): there should be a RegExp type.
@@ -215,6 +225,7 @@
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
     case JS_STRING_ITERATOR_TYPE:
+    case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
 
     case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
     case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
@@ -254,16 +265,21 @@
 
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
+    case JS_PROMISE_CAPABILITY_TYPE:
     case JS_PROMISE_TYPE:
-    case JS_BOUND_FUNCTION_TYPE:
+      DCHECK(!map->is_callable());
       DCHECK(!map->is_undetectable());
       return kOtherObject;
+    case JS_BOUND_FUNCTION_TYPE:
+      DCHECK(!map->is_undetectable());
+      return kBoundFunction;
     case JS_FUNCTION_TYPE:
       DCHECK(!map->is_undetectable());
       return kFunction;
     case JS_PROXY_TYPE:
       DCHECK(!map->is_undetectable());
-      return kProxy;
+      if (map->is_callable()) return kCallableProxy;
+      return kOtherProxy;
     case MAP_TYPE:
     case ALLOCATION_SITE_TYPE:
     case ACCESSOR_INFO_TYPE:
@@ -297,12 +313,9 @@
     case INTERCEPTOR_INFO_TYPE:
     case CALL_HANDLER_INFO_TYPE:
     case OBJECT_TEMPLATE_INFO_TYPE:
-    case SIGNATURE_INFO_TYPE:
-    case TYPE_SWITCH_INFO_TYPE:
     case ALLOCATION_MEMENTO_TYPE:
     case TYPE_FEEDBACK_INFO_TYPE:
     case ALIASED_ARGUMENTS_ENTRY_TYPE:
-    case BOX_TYPE:
     case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
     case PROMISE_REACTION_JOB_INFO_TYPE:
     case DEBUG_INFO_TYPE:
@@ -310,8 +323,10 @@
     case CELL_TYPE:
     case WEAK_CELL_TYPE:
     case PROTOTYPE_INFO_TYPE:
+    case TUPLE2_TYPE:
     case TUPLE3_TYPE:
     case CONTEXT_EXTENSION_TYPE:
+    case CONSTANT_ELEMENTS_PAIR_TYPE:
       UNREACHABLE();
       return kNone;
   }
@@ -447,7 +462,7 @@
                                    i::Handle<i::HeapObject> object)
     : TypeBase(kHeapConstant), bitset_(bitset), object_(object) {
   DCHECK(!object->IsHeapNumber());
-  DCHECK(!object->IsString());
+  DCHECK_IMPLIES(object->IsString(), object->IsInternalizedString());
 }
 
 // -----------------------------------------------------------------------------
@@ -823,17 +838,8 @@
     return Range(v, v, zone);
   } else if (value->IsHeapNumber()) {
     return NewConstant(value->Number(), zone);
-  } else if (value->IsString()) {
-    bitset b = BitsetType::Lub(*value);
-    DCHECK(b == BitsetType::kInternalizedString ||
-           b == BitsetType::kOtherString);
-    if (b == BitsetType::kInternalizedString) {
-      return Type::InternalizedString();
-    } else if (b == BitsetType::kOtherString) {
-      return Type::OtherString();
-    } else {
-      UNREACHABLE();
-    }
+  } else if (value->IsString() && !value->IsInternalizedString()) {
+    return Type::OtherString();
   }
   return HeapConstant(i::Handle<i::HeapObject>::cast(value), zone);
 }
diff --git a/src/compiler/types.h b/src/compiler/types.h
index e783570..9e55a0b 100644
--- a/src/compiler/types.h
+++ b/src/compiler/types.h
@@ -116,56 +116,75 @@
   V(Symbol,              1u << 12)  \
   V(InternalizedString,  1u << 13)  \
   V(OtherString,         1u << 14)  \
-  V(Simd,                1u << 15)  \
-  V(OtherObject,         1u << 17)  \
-  V(OtherUndetectable,   1u << 16)  \
-  V(Proxy,               1u << 18)  \
-  V(Function,            1u << 19)  \
-  V(Hole,                1u << 20)  \
-  V(OtherInternal,       1u << 21)  \
-  V(ExternalPointer,     1u << 22)  \
+  V(OtherCallable,       1u << 15)  \
+  V(OtherObject,         1u << 16)  \
+  V(OtherUndetectable,   1u << 17)  \
+  V(CallableProxy,       1u << 18)  \
+  V(OtherProxy,          1u << 19)  \
+  V(Function,            1u << 20)  \
+  V(BoundFunction,       1u << 21)  \
+  V(Hole,                1u << 22)  \
+  V(OtherInternal,       1u << 23)  \
+  V(ExternalPointer,     1u << 24)  \
   \
-  V(Signed31,                   kUnsigned30 | kNegative31) \
-  V(Signed32,                   kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
-  V(Signed32OrMinusZero,        kSigned32 | kMinusZero) \
-  V(Signed32OrMinusZeroOrNaN,   kSigned32 | kMinusZero | kNaN) \
-  V(Negative32,                 kNegative31 | kOtherSigned32) \
-  V(Unsigned31,                 kUnsigned30 | kOtherUnsigned31) \
-  V(Unsigned32,                 kUnsigned30 | kOtherUnsigned31 | \
-                                kOtherUnsigned32) \
-  V(Unsigned32OrMinusZero,      kUnsigned32 | kMinusZero) \
-  V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
-  V(Integral32,                 kSigned32 | kUnsigned32) \
-  V(PlainNumber,                kIntegral32 | kOtherNumber) \
-  V(OrderedNumber,              kPlainNumber | kMinusZero) \
-  V(MinusZeroOrNaN,             kMinusZero | kNaN) \
-  V(Number,                     kOrderedNumber | kNaN) \
-  V(String,                     kInternalizedString | kOtherString) \
-  V(UniqueName,                 kSymbol | kInternalizedString) \
-  V(Name,                       kSymbol | kString) \
-  V(BooleanOrNumber,            kBoolean | kNumber) \
-  V(BooleanOrNullOrNumber,      kBooleanOrNumber | kNull) \
-  V(BooleanOrNullOrUndefined,   kBoolean | kNull | kUndefined) \
-  V(NullOrNumber,               kNull | kNumber) \
-  V(NullOrUndefined,            kNull | kUndefined) \
-  V(Undetectable,               kNullOrUndefined | kOtherUndetectable) \
-  V(NumberOrOddball,            kNumber | kNullOrUndefined | kBoolean | kHole) \
-  V(NumberOrSimdOrString,       kNumber | kSimd | kString) \
-  V(NumberOrString,             kNumber | kString) \
-  V(NumberOrUndefined,          kNumber | kUndefined) \
-  V(PlainPrimitive,             kNumberOrString | kBoolean | kNullOrUndefined) \
-  V(Primitive,                  kSymbol | kSimd | kPlainPrimitive) \
-  V(DetectableReceiver,         kFunction | kOtherObject | kProxy) \
-  V(Object,                     kFunction | kOtherObject | kOtherUndetectable) \
-  V(Receiver,                   kObject | kProxy) \
-  V(ReceiverOrUndefined,        kReceiver | kUndefined) \
-  V(StringOrReceiver,           kString | kReceiver) \
-  V(Unique,                     kBoolean | kUniqueName | kNull | kUndefined | \
-                                kReceiver) \
-  V(Internal,                   kHole | kExternalPointer | kOtherInternal) \
-  V(NonInternal,                kPrimitive | kReceiver) \
-  V(NonNumber,                  kUnique | kString | kInternal) \
-  V(Any,                        0xfffffffeu)
+  V(Signed31,                     kUnsigned30 | kNegative31) \
+  V(Signed32,                     kSigned31 | kOtherUnsigned31 | \
+                                  kOtherSigned32) \
+  V(Signed32OrMinusZero,          kSigned32 | kMinusZero) \
+  V(Signed32OrMinusZeroOrNaN,     kSigned32 | kMinusZero | kNaN) \
+  V(Negative32,                   kNegative31 | kOtherSigned32) \
+  V(Unsigned31,                   kUnsigned30 | kOtherUnsigned31) \
+  V(Unsigned32,                   kUnsigned30 | kOtherUnsigned31 | \
+                                  kOtherUnsigned32) \
+  V(Unsigned32OrMinusZero,        kUnsigned32 | kMinusZero) \
+  V(Unsigned32OrMinusZeroOrNaN,   kUnsigned32 | kMinusZero | kNaN) \
+  V(Integral32,                   kSigned32 | kUnsigned32) \
+  V(Integral32OrMinusZeroOrNaN,   kIntegral32 | kMinusZero | kNaN) \
+  V(PlainNumber,                  kIntegral32 | kOtherNumber) \
+  V(OrderedNumber,                kPlainNumber | kMinusZero) \
+  V(MinusZeroOrNaN,               kMinusZero | kNaN) \
+  V(Number,                       kOrderedNumber | kNaN) \
+  V(String,                       kInternalizedString | kOtherString) \
+  V(UniqueName,                   kSymbol | kInternalizedString) \
+  V(Name,                         kSymbol | kString) \
+  V(InternalizedStringOrNull,     kInternalizedString | kNull) \
+  V(BooleanOrNumber,              kBoolean | kNumber) \
+  V(BooleanOrNullOrNumber,        kBooleanOrNumber | kNull) \
+  V(BooleanOrNullOrUndefined,     kBoolean | kNull | kUndefined) \
+  V(Oddball,                      kBooleanOrNullOrUndefined | kHole) \
+  V(NullOrNumber,                 kNull | kNumber) \
+  V(NullOrUndefined,              kNull | kUndefined) \
+  V(Undetectable,                 kNullOrUndefined | kOtherUndetectable) \
+  V(NumberOrOddball,              kNumber | kNullOrUndefined | kBoolean | \
+                                  kHole) \
+  V(NumberOrString,               kNumber | kString) \
+  V(NumberOrUndefined,            kNumber | kUndefined) \
+  V(PlainPrimitive,               kNumberOrString | kBoolean | \
+                                  kNullOrUndefined) \
+  V(Primitive,                    kSymbol | kPlainPrimitive) \
+  V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
+  V(Proxy,                        kCallableProxy | kOtherProxy) \
+  V(DetectableCallable,           kFunction | kBoundFunction | \
+                                  kOtherCallable | kCallableProxy) \
+  V(Callable,                     kDetectableCallable | kOtherUndetectable) \
+  V(NonCallable,                  kOtherObject | kOtherProxy) \
+  V(NonCallableOrNull,            kNonCallable | kNull) \
+  V(DetectableObject,             kFunction | kBoundFunction | \
+                                  kOtherCallable | kOtherObject) \
+  V(DetectableReceiver,           kDetectableObject | kProxy) \
+  V(DetectableReceiverOrNull,     kDetectableReceiver | kNull) \
+  V(Object,                       kDetectableObject | kOtherUndetectable) \
+  V(Receiver,                     kObject | kProxy) \
+  V(ReceiverOrUndefined,          kReceiver | kUndefined) \
+  V(ReceiverOrNullOrUndefined,    kReceiver | kNull | kUndefined) \
+  V(SymbolOrReceiver,             kSymbol | kReceiver) \
+  V(StringOrReceiver,             kString | kReceiver) \
+  V(Unique,                       kBoolean | kUniqueName | kNull | \
+                                  kUndefined | kReceiver) \
+  V(Internal,                     kHole | kExternalPointer | kOtherInternal) \
+  V(NonInternal,                  kPrimitive | kReceiver) \
+  V(NonNumber,                    kUnique | kString | kInternal) \
+  V(Any,                          0xfffffffeu)
 
 // clang-format on
 
diff --git a/src/compiler/value-numbering-reducer.cc b/src/compiler/value-numbering-reducer.cc
index 30473f2..38e1f0c 100644
--- a/src/compiler/value-numbering-reducer.cc
+++ b/src/compiler/value-numbering-reducer.cc
@@ -18,8 +18,8 @@
 
 size_t HashCode(Node* node) {
   size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
-  for (int j = 0; j < node->InputCount(); ++j) {
-    h = base::hash_combine(h, node->InputAt(j)->id());
+  for (Node* input : node->inputs()) {
+    h = base::hash_combine(h, input->id());
   }
   return h;
 }
@@ -32,10 +32,17 @@
   DCHECK_NOT_NULL(b->op());
   if (!a->op()->Equals(b->op())) return false;
   if (a->InputCount() != b->InputCount()) return false;
-  for (int j = 0; j < a->InputCount(); ++j) {
-    DCHECK_NOT_NULL(a->InputAt(j));
-    DCHECK_NOT_NULL(b->InputAt(j));
-    if (a->InputAt(j)->id() != b->InputAt(j)->id()) return false;
+  Node::Inputs aInputs = a->inputs();
+  Node::Inputs bInputs = b->inputs();
+
+  auto aIt = aInputs.begin();
+  auto bIt = bInputs.begin();
+  auto aEnd = aInputs.end();
+
+  for (; aIt != aEnd; ++aIt, ++bIt) {
+    DCHECK_NOT_NULL(*aIt);
+    DCHECK_NOT_NULL(*bIt);
+    if ((*aIt)->id() != (*bIt)->id()) return false;
   }
   return true;
 }
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 872305b..7f63ceb 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -14,11 +14,12 @@
 #include "src/compiler/all-nodes.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
-#include "src/compiler/node.h"
+#include "src/compiler/js-operator.h"
 #include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
 #include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/operator.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/ostreams.h"
@@ -150,7 +151,7 @@
                   "control");
     }
 
-    // Verify that no-no-throw nodes only have IfSuccess/IfException control
+    // Verify that nodes that can throw only have IfSuccess/IfException control
     // uses.
     if (!node->op()->HasProperty(Operator::kNoThrow)) {
       int count_success = 0, count_exception = 0;
@@ -206,6 +207,8 @@
       }
       CHECK_EQ(1, count_true);
       CHECK_EQ(1, count_false);
+      // The condition must be a Boolean.
+      CheckValueInputIs(node, 0, Type::Boolean());
       // Type is empty.
       CheckNotTyped(node);
       break;
@@ -283,6 +286,11 @@
       // Type is empty.
       CheckNotTyped(node);
       break;
+    case IrOpcode::kTrapIf:
+    case IrOpcode::kTrapUnless:
+      // Type is empty.
+      CheckNotTyped(node);
+      break;
     case IrOpcode::kDeoptimize:
     case IrOpcode::kReturn:
     case IrOpcode::kThrow:
@@ -402,6 +410,10 @@
       CHECK_EQ(0, effect_count);
       CHECK_EQ(0, control_count);
       CHECK_EQ(3, value_count);
+      // The condition must be a Boolean.
+      CheckValueInputIs(node, 0, Type::Boolean());
+      // Type can be anything.
+      CheckTypeIs(node, Type::Any());
       break;
     }
     case IrOpcode::kPhi: {
@@ -484,6 +496,7 @@
     }
     case IrOpcode::kStateValues:
     case IrOpcode::kTypedStateValues:
+    case IrOpcode::kArgumentsObjectState:
     case IrOpcode::kObjectState:
     case IrOpcode::kTypedObjectState:
       // TODO(jarin): what are the constraints on these?
@@ -590,16 +603,43 @@
       CheckTypeIs(node, Type::OtherObject());
       break;
     case IrOpcode::kJSLoadProperty:
+      // Type can be anything.
+      CheckTypeIs(node, Type::Any());
+      CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
+      break;
     case IrOpcode::kJSLoadNamed:
+      // Type can be anything.
+      CheckTypeIs(node, Type::Any());
+      CHECK(NamedAccessOf(node->op()).feedback().IsValid());
+      break;
     case IrOpcode::kJSLoadGlobal:
       // Type can be anything.
       CheckTypeIs(node, Type::Any());
+      CHECK(LoadGlobalParametersOf(node->op()).feedback().IsValid());
       break;
     case IrOpcode::kJSStoreProperty:
+      // Type is empty.
+      CheckNotTyped(node);
+      CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
+      break;
     case IrOpcode::kJSStoreNamed:
+      // Type is empty.
+      CheckNotTyped(node);
+      CHECK(NamedAccessOf(node->op()).feedback().IsValid());
+      break;
     case IrOpcode::kJSStoreGlobal:
       // Type is empty.
       CheckNotTyped(node);
+      CHECK(StoreGlobalParametersOf(node->op()).feedback().IsValid());
+      break;
+    case IrOpcode::kJSStoreNamedOwn:
+      // Type is empty.
+      CheckNotTyped(node);
+      CHECK(StoreNamedOwnParametersOf(node->op()).feedback().IsValid());
+      break;
+    case IrOpcode::kJSStoreDataPropertyInLiteral:
+      // Type is empty.
+      CheckNotTyped(node);
       break;
     case IrOpcode::kJSDeleteProperty:
     case IrOpcode::kJSHasProperty:
@@ -608,9 +648,20 @@
       // Type is Boolean.
       CheckTypeIs(node, Type::Boolean());
       break;
+    case IrOpcode::kJSClassOf:
+      // Type is InternaliedString \/ Null.
+      CheckTypeIs(node, Type::InternalizedStringOrNull());
+      break;
     case IrOpcode::kJSTypeOf:
-      // Type is String.
-      CheckTypeIs(node, Type::String());
+      // Type is InternalizedString.
+      CheckTypeIs(node, Type::InternalizedString());
+      break;
+    case IrOpcode::kJSGetSuperConstructor:
+      // We don't check the input for Type::Function because
+      // this_function can be context-allocated.
+      // Any -> Callable.
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckTypeIs(node, Type::Callable());
       break;
 
     case IrOpcode::kJSLoadContext:
@@ -635,12 +686,15 @@
       break;
     }
 
-    case IrOpcode::kJSCallConstruct:
+    case IrOpcode::kJSConstruct:
+    case IrOpcode::kJSConstructWithSpread:
     case IrOpcode::kJSConvertReceiver:
       // Type is Receiver.
       CheckTypeIs(node, Type::Receiver());
       break;
-    case IrOpcode::kJSCallFunction:
+    case IrOpcode::kJSCallForwardVarargs:
+    case IrOpcode::kJSCall:
+    case IrOpcode::kJSCallWithSpread:
     case IrOpcode::kJSCallRuntime:
       // Type can be anything.
       CheckTypeIs(node, Type::Any());
@@ -680,6 +734,7 @@
       break;
 
     case IrOpcode::kJSStackCheck:
+    case IrOpcode::kJSDebugger:
       // Type is empty.
       CheckNotTyped(node);
       break;
@@ -861,6 +916,12 @@
       CheckValueInputIs(node, 1, Type::String());
       CheckTypeIs(node, Type::Boolean());
       break;
+    case IrOpcode::kStringCharAt:
+      // (String, Unsigned32) -> String
+      CheckValueInputIs(node, 0, Type::String());
+      CheckValueInputIs(node, 1, Type::Unsigned32());
+      CheckTypeIs(node, Type::String());
+      break;
     case IrOpcode::kStringCharCodeAt:
       // (String, Unsigned32) -> UnsignedSmall
       CheckValueInputIs(node, 0, Type::String());
@@ -877,13 +938,22 @@
       CheckValueInputIs(node, 0, Type::Number());
       CheckTypeIs(node, Type::String());
       break;
-    case IrOpcode::kReferenceEqual: {
+    case IrOpcode::kStringIndexOf:
+      // (String, String, SignedSmall) -> SignedSmall
+      CheckValueInputIs(node, 0, Type::String());
+      CheckValueInputIs(node, 1, Type::String());
+      CheckValueInputIs(node, 2, Type::SignedSmall());
+      CheckTypeIs(node, Type::SignedSmall());
+      break;
+
+    case IrOpcode::kReferenceEqual:
       // (Unique, Any) -> Boolean  and
       // (Any, Unique) -> Boolean
       CheckTypeIs(node, Type::Boolean());
       break;
-    }
-    case IrOpcode::kObjectIsCallable:
+
+    case IrOpcode::kObjectIsDetectableCallable:
+    case IrOpcode::kObjectIsNonCallable:
     case IrOpcode::kObjectIsNumber:
     case IrOpcode::kObjectIsReceiver:
     case IrOpcode::kObjectIsSmi:
@@ -893,6 +963,10 @@
       CheckValueInputIs(node, 0, Type::Any());
       CheckTypeIs(node, Type::Boolean());
       break;
+    case IrOpcode::kNewRestParameterElements:
+    case IrOpcode::kNewUnmappedArgumentsElements:
+      CheckTypeIs(node, Type::OtherInternal());
+      break;
     case IrOpcode::kAllocate:
       CheckValueInputIs(node, 0, Type::PlainNumber());
       break;
@@ -910,8 +984,6 @@
       break;
     case IrOpcode::kTransitionElementsKind:
       CheckValueInputIs(node, 0, Type::Any());
-      CheckValueInputIs(node, 1, Type::Internal());
-      CheckValueInputIs(node, 2, Type::Internal());
       CheckNotTyped(node);
       break;
 
@@ -951,6 +1023,8 @@
       // CheckTypeIs(node, to));
       break;
     }
+    case IrOpcode::kChangeTaggedToTaggedSigned:
+      break;
     case IrOpcode::kTruncateTaggedToFloat64: {
       // NumberOrUndefined /\ Tagged -> Number /\ UntaggedFloat64
       // TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1041,6 +1115,10 @@
       CheckValueInputIs(node, 0, Type::Boolean());
       CheckNotTyped(node);
       break;
+    case IrOpcode::kCheckInternalizedString:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckTypeIs(node, Type::InternalizedString());
+      break;
     case IrOpcode::kCheckMaps:
       // (Any, Internal, ..., Internal) -> Any
       CheckValueInputIs(node, 0, Type::Any());
@@ -1053,6 +1131,10 @@
       CheckValueInputIs(node, 0, Type::Any());
       CheckTypeIs(node, Type::Number());
       break;
+    case IrOpcode::kCheckReceiver:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckTypeIs(node, Type::Receiver());
+      break;
     case IrOpcode::kCheckSmi:
       CheckValueInputIs(node, 0, Type::Any());
       break;
@@ -1140,6 +1222,7 @@
     // -----------------------
     case IrOpcode::kLoad:
     case IrOpcode::kProtectedLoad:
+    case IrOpcode::kProtectedStore:
     case IrOpcode::kStore:
     case IrOpcode::kStackSlot:
     case IrOpcode::kWord32And:
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 1b61c15..168178e 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -6,11 +6,12 @@
 
 #include <memory>
 
-#include "src/isolate-inl.h"
-
+#include "src/assembler-inl.h"
 #include "src/base/platform/elapsed-timer.h"
 #include "src/base/platform/platform.h"
-
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
 #include "src/compiler/access-builder.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/compiler-source-position-table.h"
@@ -27,15 +28,15 @@
 #include "src/compiler/pipeline.h"
 #include "src/compiler/simd-scalar-lowering.h"
 #include "src/compiler/zone-stats.h"
-
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
 #include "src/factory.h"
+#include "src/isolate-inl.h"
 #include "src/log-inl.h"
-
-#include "src/wasm/ast-decoder.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-limits.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 #include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-text.h"
 
 // TODO(titzer): pull WASM_64 up to a common header.
 #if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
@@ -64,13 +65,12 @@
   }
 }
 
-Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
-                         Handle<Context> context, Node** parameters,
-                         int parameter_count, Node** effect_ptr,
-                         Node* control) {
-  // At the moment we only allow 2 parameters. If more parameters are needed,
-  // then the size of {inputs} below has to be increased accordingly.
-  DCHECK(parameter_count <= 2);
+// Only call this function for code which is not reused across instantiations,
+// as we do not patch the embedded context.
+Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
+                                    Node* context, Node** parameters,
+                                    int parameter_count, Node** effect_ptr,
+                                    Node* control) {
   const Runtime::Function* fun = Runtime::FunctionForId(f);
   CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
       jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
@@ -78,7 +78,11 @@
   // CEntryStubConstant nodes have to be created and cached in the main
   // thread. At the moment this is only done for CEntryStubConstant(1).
   DCHECK_EQ(1, fun->result_size);
-  Node* inputs[8];
+  // At the moment we only allow 3 parameters. If more parameters are needed,
+  // increase this constant accordingly.
+  static const int kMaxParams = 3;
+  DCHECK_GE(kMaxParams, parameter_count);
+  Node* inputs[kMaxParams + 6];
   int count = 0;
   inputs[count++] = jsgraph->CEntryStubConstant(fun->result_size);
   for (int i = 0; i < parameter_count; i++) {
@@ -87,7 +91,7 @@
   inputs[count++] = jsgraph->ExternalConstant(
       ExternalReference(f, jsgraph->isolate()));         // ref
   inputs[count++] = jsgraph->Int32Constant(fun->nargs);  // arity
-  inputs[count++] = jsgraph->HeapConstant(context);      // context
+  inputs[count++] = context;                             // context
   inputs[count++] = *effect_ptr;
   inputs[count++] = control;
 
@@ -97,8 +101,23 @@
   return node;
 }
 
+Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
+                         Node** parameters, int parameter_count,
+                         Node** effect_ptr, Node* control) {
+  return BuildCallToRuntimeWithContext(f, jsgraph, jsgraph->NoContextConstant(),
+                                       parameters, parameter_count, effect_ptr,
+                                       control);
+}
+
 }  // namespace
 
+// TODO(eholk): Support trap handlers on other platforms.
+#if V8_TARGET_ARCH_X64 && V8_OS_LINUX
+const bool kTrapHandlerSupported = true;
+#else
+const bool kTrapHandlerSupported = false;
+#endif
+
 // A helper that handles building graph fragments for trapping.
 // To avoid generating a ton of redundant code that just calls the runtime
 // to trap, we generate a per-trap-reason block of code that all trap sites
@@ -159,21 +178,70 @@
     return TrapIfEq64(reason, node, 0, position);
   }
 
+  Builtins::Name GetBuiltinIdForTrap(wasm::TrapReason reason) {
+    if (builder_->module_ && !builder_->module_->instance->context.is_null()) {
+      switch (reason) {
+#define TRAPREASON_TO_MESSAGE(name) \
+  case wasm::k##name:               \
+    return Builtins::kThrowWasm##name;
+        FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
+#undef TRAPREASON_TO_MESSAGE
+        default:
+          UNREACHABLE();
+          return Builtins::builtin_count;
+      }
+    } else {
+      // We use Runtime::kNumFunctions as a marker to tell the code generator
+      // to generate a call to a testing c-function instead of a runtime
+      // function. This code should only be called from a cctest.
+      return Builtins::builtin_count;
+    }
+  }
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM ||      \
+    V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || \
+    V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 ||    \
+    V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_X87
+#define WASM_TRAP_IF_SUPPORTED
+#endif
+
   // Add a trap if {cond} is true.
   void AddTrapIfTrue(wasm::TrapReason reason, Node* cond,
                      wasm::WasmCodePosition position) {
-    AddTrapIf(reason, cond, true, position);
+#ifdef WASM_TRAP_IF_SUPPORTED
+    if (FLAG_wasm_trap_if) {
+      int32_t trap_id = GetBuiltinIdForTrap(reason);
+      Node* node = graph()->NewNode(common()->TrapIf(trap_id), cond,
+                                    builder_->Effect(), builder_->Control());
+      *builder_->control_ = node;
+      builder_->SetSourcePosition(node, position);
+      return;
+    }
+#endif  // WASM_TRAP_IF_SUPPORTED
+    BuildTrapIf(reason, cond, true, position);
   }
 
   // Add a trap if {cond} is false.
   void AddTrapIfFalse(wasm::TrapReason reason, Node* cond,
                       wasm::WasmCodePosition position) {
-    AddTrapIf(reason, cond, false, position);
+#ifdef WASM_TRAP_IF_SUPPORTED
+    if (FLAG_wasm_trap_if) {
+      int32_t trap_id = GetBuiltinIdForTrap(reason);
+
+      Node* node = graph()->NewNode(common()->TrapUnless(trap_id), cond,
+                                    builder_->Effect(), builder_->Control());
+      *builder_->control_ = node;
+      builder_->SetSourcePosition(node, position);
+      return;
+    }
+#endif  // WASM_TRAP_IF_SUPPORTED
+
+    BuildTrapIf(reason, cond, false, position);
   }
 
   // Add a trap if {cond} is true or false according to {iftrue}.
-  void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
-                 wasm::WasmCodePosition position) {
+  void BuildTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
+                   wasm::WasmCodePosition position) {
     Node** effect_ptr = builder_->effect_;
     Node** control_ptr = builder_->control_;
     Node* before = *effect_ptr;
@@ -196,18 +264,18 @@
     }
   }
 
-  Node* GetTrapValue(wasm::LocalType type) {
+  Node* GetTrapValue(wasm::ValueType type) {
     switch (type) {
-      case wasm::kAstI32:
+      case wasm::kWasmI32:
         return jsgraph()->Int32Constant(0xdeadbeef);
-      case wasm::kAstI64:
+      case wasm::kWasmI64:
         return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
-      case wasm::kAstF32:
+      case wasm::kWasmF32:
         return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
-      case wasm::kAstF64:
+      case wasm::kWasmF64:
         return jsgraph()->Float64Constant(bit_cast<double>(0xdeadbeefdeadbeef));
         break;
-      case wasm::kAstS128:
+      case wasm::kWasmS128:
         return builder_->CreateS128Value(0xdeadbeef);
         break;
       default:
@@ -246,7 +314,6 @@
   }
 
   void BuildTrapCode(Node* reason_node, Node* position_node) {
-    Node* end;
     Node** control_ptr = builder_->control_;
     Node** effect_ptr = builder_->effect_;
     wasm::ModuleEnv* module = builder_->module_;
@@ -268,8 +335,7 @@
     if (module && !module->instance->context.is_null()) {
       Node* parameters[] = {trap_reason_smi,     // message id
                             trap_position_smi};  // byte position
-      BuildCallToRuntime(Runtime::kThrowWasmError, jsgraph(),
-                         module->instance->context, parameters,
+      BuildCallToRuntime(Runtime::kThrowWasmError, jsgraph(), parameters,
                          arraysize(parameters), effect_ptr, *control_ptr);
     }
     if (false) {
@@ -277,36 +343,36 @@
       Node* thrw =
           graph()->NewNode(common()->Throw(), jsgraph()->ZeroConstant(),
                            *effect_ptr, *control_ptr);
-      end = thrw;
+      MergeControlToEnd(jsgraph(), thrw);
     } else {
       // End the control flow with returning 0xdeadbeef
       Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
-      end = graph()->NewNode(jsgraph()->common()->Return(),
-                             jsgraph()->Int32Constant(0), ret_value,
-                             *effect_ptr, *control_ptr);
+      builder_->Return(ret_value);
     }
-
-    MergeControlToEnd(jsgraph(), end);
   }
 };
 
 WasmGraphBuilder::WasmGraphBuilder(
-    Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* function_signature,
+    wasm::ModuleEnv* module_env, Zone* zone, JSGraph* jsgraph,
+    wasm::FunctionSig* sig,
     compiler::SourcePositionTable* source_position_table)
     : zone_(zone),
       jsgraph_(jsgraph),
-      module_(nullptr),
-      mem_buffer_(nullptr),
-      mem_size_(nullptr),
+      module_(module_env),
+      signature_tables_(zone),
       function_tables_(zone),
       function_table_sizes_(zone),
-      control_(nullptr),
-      effect_(nullptr),
       cur_buffer_(def_buffer_),
       cur_bufsize_(kDefaultBufferSize),
       trap_(new (zone) WasmTrapHelper(this)),
-      function_signature_(function_signature),
+      sig_(sig),
       source_position_table_(source_position_table) {
+  for (size_t i = 0; i < sig->parameter_count(); i++) {
+    if (sig->GetParam(i) == wasm::kWasmS128) has_simd_ = true;
+  }
+  for (size_t i = 0; i < sig->return_count(); i++) {
+    if (sig->GetReturn(i) == wasm::kWasmS128) has_simd_ = true;
+  }
   DCHECK_NOT_NULL(jsgraph_);
 }
 
@@ -318,7 +384,7 @@
   return start;
 }
 
-Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
+Node* WasmGraphBuilder::Param(unsigned index) {
   return graph()->NewNode(jsgraph()->common()->Parameter(index),
                           graph()->start());
 }
@@ -376,7 +442,7 @@
   return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
 }
 
-Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
+Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, Node** vals,
                             Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
   Node** buf = Realloc(vals, count, count + 1);
@@ -412,43 +478,45 @@
 
 void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
                                   Node** effect, Node** control) {
-  if (effect == nullptr) {
-    effect = effect_;
-  }
-  if (control == nullptr) {
-    control = control_;
-  }
+  if (FLAG_wasm_no_stack_checks) return;
   // We do not generate stack checks for cctests.
-  if (module_ && !module_->instance->context.is_null()) {
-    Node* limit = graph()->NewNode(
-        jsgraph()->machine()->Load(MachineType::Pointer()),
-        jsgraph()->ExternalConstant(
-            ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
-        jsgraph()->IntPtrConstant(0), *effect, *control);
-    Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
+  if (!module_ || module_->instance->context.is_null()) return;
+  if (effect == nullptr) effect = effect_;
+  if (control == nullptr) control = control_;
 
-    Node* check =
-        graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
+  Node* limit = graph()->NewNode(
+      jsgraph()->machine()->Load(MachineType::Pointer()),
+      jsgraph()->ExternalConstant(
+          ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
+      jsgraph()->IntPtrConstant(0), *effect, *control);
+  Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
 
-    Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
-    stack_check.Chain(*control);
-    Node* effect_true = *effect;
+  Node* check =
+      graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
 
-    Node* effect_false;
-    // Generate a call to the runtime if there is a stack check failure.
-    {
-      Node* node = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
-                                      module_->instance->context, nullptr, 0,
-                                      effect, stack_check.if_false);
-      effect_false = node;
-    }
+  Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
+  stack_check.Chain(*control);
+  Node* effect_true = *effect;
 
-    Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2),
-                                  effect_true, effect_false, stack_check.merge);
+  Handle<Code> code = jsgraph()->isolate()->builtins()->WasmStackGuard();
+  CallInterfaceDescriptor idesc =
+      WasmRuntimeCallDescriptor(jsgraph()->isolate());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      jsgraph()->isolate(), jsgraph()->zone(), idesc, 0,
+      CallDescriptor::kNoFlags, Operator::kNoProperties);
+  Node* stub_code = jsgraph()->HeapConstant(code);
 
-    *control = stack_check.merge;
-    *effect = ephi;
-  }
+  Node* context = jsgraph()->NoContextConstant();
+  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
+                                context, *effect, stack_check.if_false);
+
+  SetSourcePosition(call, position);
+
+  Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2), effect_true,
+                                call, stack_check.merge);
+
+  *control = stack_check.merge;
+  *effect = ephi;
 }
 
 Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
@@ -1042,9 +1110,18 @@
   DCHECK_NOT_NULL(*control_);
   DCHECK_NOT_NULL(*effect_);
 
-  Node** buf = Realloc(vals, count, count + 3);
-  memmove(buf + 1, buf, sizeof(void*) * count);
+  static const int kStackAllocatedNodeBufferSize = 8;
+  Node* stack_buffer[kStackAllocatedNodeBufferSize];
+  std::vector<Node*> heap_buffer;
+
+  Node** buf = stack_buffer;
+  if (count + 3 > kStackAllocatedNodeBufferSize) {
+    heap_buffer.resize(count + 3);
+    buf = heap_buffer.data();
+  }
+
   buf[0] = jsgraph()->Int32Constant(0);
+  memcpy(buf + 1, vals, sizeof(void*) * count);
   buf[count + 1] = *effect_;
   buf[count + 2] = *control_;
   Node* ret =
@@ -1107,7 +1184,7 @@
 }
 
 Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
-                                              wasm::LocalType wasmtype) {
+                                              wasm::ValueType wasmtype) {
   Node* result;
   Node* value = node;
   MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1223,7 +1300,7 @@
       // Perform sign extension using following trick
       // result = (x << machine_width - type_width) >> (machine_width -
       // type_width)
-      if (wasmtype == wasm::kAstI64) {
+      if (wasmtype == wasm::kWasmI64) {
         shiftBitCount = jsgraph()->Int32Constant(64 - valueSizeInBits);
         result = graph()->NewNode(
             m->Word64Sar(),
@@ -1231,7 +1308,7 @@
                              graph()->NewNode(m->ChangeInt32ToInt64(), result),
                              shiftBitCount),
             shiftBitCount);
-      } else if (wasmtype == wasm::kAstI32) {
+      } else if (wasmtype == wasm::kWasmI32) {
         shiftBitCount = jsgraph()->Int32Constant(32 - valueSizeInBits);
         result = graph()->NewNode(
             m->Word32Sar(),
@@ -1714,37 +1791,24 @@
 Node* WasmGraphBuilder::GrowMemory(Node* input) {
   Diamond check_input_range(
       graph(), jsgraph()->common(),
-      graph()->NewNode(
-          jsgraph()->machine()->Uint32LessThanOrEqual(), input,
-          jsgraph()->Uint32Constant(wasm::WasmModule::kV8MaxPages)),
+      graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(), input,
+                       jsgraph()->Uint32Constant(FLAG_wasm_max_mem_pages)),
       BranchHint::kTrue);
 
   check_input_range.Chain(*control_);
 
-  Runtime::FunctionId function_id = Runtime::kWasmGrowMemory;
-  const Runtime::Function* function = Runtime::FunctionForId(function_id);
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
-      CallDescriptor::kNoFlags);
-  wasm::ModuleEnv* module = module_;
-  input = BuildChangeUint32ToSmi(input);
-  Node* inputs[] = {
-      jsgraph()->CEntryStubConstant(function->result_size), input,  // C entry
-      jsgraph()->ExternalConstant(
-          ExternalReference(function_id, jsgraph()->isolate())),  // ref
-      jsgraph()->Int32Constant(function->nargs),                  // arity
-      jsgraph()->HeapConstant(module->instance->context),         // context
-      *effect_,
-      check_input_range.if_true};
-  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc),
-                                static_cast<int>(arraysize(inputs)), inputs);
+  Node* parameters[] = {BuildChangeUint32ToSmi(input)};
+  Node* old_effect = *effect_;
+  Node* call = BuildCallToRuntime(Runtime::kWasmGrowMemory, jsgraph(),
+                                  parameters, arraysize(parameters), effect_,
+                                  check_input_range.if_true);
 
   Node* result = BuildChangeSmiToInt32(call);
 
   result = check_input_range.Phi(MachineRepresentation::kWord32, result,
                                  jsgraph()->Int32Constant(-1));
-  *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), call, *effect_,
-                              check_input_range.merge);
+  *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), call,
+                              old_effect, check_input_range.merge);
   *control_ = check_input_range.merge;
   return result;
 }
@@ -1767,8 +1831,7 @@
       graph()->NewNode(machine->Word32And(), input, Int32Constant(0xFFFFu)));
 
   Node* parameters[] = {lower, upper};  // thrown value
-  return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(),
-                            module_->instance->context, parameters,
+  return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), parameters,
                             arraysize(parameters), effect_, *control_);
 }
 
@@ -1778,8 +1841,7 @@
   Node* parameters[] = {input};  // caught value
   Node* value =
       BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue, jsgraph(),
-                         module_->instance->context, parameters,
-                         arraysize(parameters), effect_, *control_);
+                         parameters, arraysize(parameters), effect_, *control_);
 
   Node* is_smi;
   Node* is_heap;
@@ -1911,36 +1973,101 @@
 }
 
 Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
+  CommonOperatorBuilder* c = jsgraph()->common();
   MachineOperatorBuilder* m = jsgraph()->machine();
+  Node* const zero = jsgraph()->Int32Constant(0);
 
   Int32Matcher mr(right);
   if (mr.HasValue()) {
-    if (mr.Value() == 0) {
-      return jsgraph()->Int32Constant(0);
-    } else if (mr.Value() == -1) {
-      return jsgraph()->Int32Constant(0);
+    if (mr.Value() == 0 || mr.Value() == -1) {
+      return zero;
     }
     return graph()->NewNode(m->Int32Mod(), left, right, *control_);
   }
 
-  // asm.js semantics return 0 on divide or mod by zero.
-  // Explicit check for x % 0.
-  Diamond z(
-      graph(), jsgraph()->common(),
-      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
-      BranchHint::kFalse);
+  // General case for signed integer modulus, with optimization for (unknown)
+  // power of 2 right hand side.
+  //
+  //   if 0 < right then
+  //     msk = right - 1
+  //     if right & msk != 0 then
+  //       left % right
+  //     else
+  //       if left < 0 then
+  //         -(-left & msk)
+  //       else
+  //         left & msk
+  //   else
+  //     if right < -1 then
+  //       left % right
+  //     else
+  //       zero
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+  Node* const minus_one = jsgraph()->Int32Constant(-1);
 
-  // Explicit check for x % -1.
-  Diamond d(
-      graph(), jsgraph()->common(),
-      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
-      BranchHint::kFalse);
-  d.Chain(z.if_false);
+  const Operator* const merge_op = c->Merge(2);
+  const Operator* const phi_op = c->Phi(MachineRepresentation::kWord32, 2);
 
-  return z.Phi(
-      MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-      d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-            graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+  Node* check0 = graph()->NewNode(m->Int32LessThan(), zero, right);
+  Node* branch0 =
+      graph()->NewNode(c->Branch(BranchHint::kTrue), check0, graph()->start());
+
+  Node* if_true0 = graph()->NewNode(c->IfTrue(), branch0);
+  Node* true0;
+  {
+    Node* msk = graph()->NewNode(m->Int32Add(), right, minus_one);
+
+    Node* check1 = graph()->NewNode(m->Word32And(), right, msk);
+    Node* branch1 = graph()->NewNode(c->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(c->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(m->Int32Mod(), left, right, if_true1);
+
+    Node* if_false1 = graph()->NewNode(c->IfFalse(), branch1);
+    Node* false1;
+    {
+      Node* check2 = graph()->NewNode(m->Int32LessThan(), left, zero);
+      Node* branch2 =
+          graph()->NewNode(c->Branch(BranchHint::kFalse), check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(c->IfTrue(), branch2);
+      Node* true2 = graph()->NewNode(
+          m->Int32Sub(), zero,
+          graph()->NewNode(m->Word32And(),
+                           graph()->NewNode(m->Int32Sub(), zero, left), msk));
+
+      Node* if_false2 = graph()->NewNode(c->IfFalse(), branch2);
+      Node* false2 = graph()->NewNode(m->Word32And(), left, msk);
+
+      if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
+      false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
+    }
+
+    if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(c->IfFalse(), branch0);
+  Node* false0;
+  {
+    Node* check1 = graph()->NewNode(m->Int32LessThan(), right, minus_one);
+    Node* branch1 =
+        graph()->NewNode(c->Branch(BranchHint::kTrue), check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(c->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(m->Int32Mod(), left, right, if_true1);
+
+    Node* if_false1 = graph()->NewNode(c->IfFalse(), branch1);
+    Node* false1 = zero;
+
+    if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+  return graph()->NewNode(phi_op, true0, false0, merge0);
 }
 
 Node* WasmGraphBuilder::BuildI32AsmjsDivU(Node* left, Node* right) {
@@ -2016,6 +2143,8 @@
             graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
                              jsgraph()->Int64Constant(-1)));
 
+  d.Chain(*control_);
+
   Node* rem = graph()->NewNode(jsgraph()->machine()->Int64Mod(), left, right,
                                d.if_false);
 
@@ -2179,6 +2308,7 @@
   Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
   trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
   Node* table = function_tables_[table_index];
+  Node* signatures = signature_tables_[table_index];
 
   // Load signature from the table and check.
   // The table is a FixedArray; signatures are encoded as SMIs.
@@ -2187,7 +2317,7 @@
   const int fixed_offset = access.header_size - access.tag();
   {
     Node* load_sig = graph()->NewNode(
-        machine->Load(MachineType::AnyTagged()), table,
+        machine->Load(MachineType::AnyTagged()), signatures,
         graph()->NewNode(machine->Int32Add(),
                          graph()->NewNode(machine->Word32Shl(), key,
                                           Int32Constant(kPointerSizeLog2)),
@@ -2202,14 +2332,12 @@
   }
 
   // Load code object from the table.
-  uint32_t table_size = module_->module->function_tables[table_index].min_size;
-  uint32_t offset = fixed_offset + kPointerSize * table_size;
   Node* load_code = graph()->NewNode(
       machine->Load(MachineType::AnyTagged()), table,
       graph()->NewNode(machine->Int32Add(),
                        graph()->NewNode(machine->Word32Shl(), key,
                                         Int32Constant(kPointerSizeLog2)),
-                       Uint32Constant(offset)),
+                       Uint32Constant(fixed_offset)),
       *effect_, *control_);
 
   args[0] = load_code;
@@ -2342,24 +2470,20 @@
   return value;
 }
 
-Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
+Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
   switch (type) {
-    case wasm::kAstI32:
+    case wasm::kWasmI32:
       return BuildChangeInt32ToTagged(node);
-    case wasm::kAstS128:
-    case wasm::kAstI64:
-      // Throw a TypeError. The native context is good enough here because we
-      // only throw a TypeError.
-      return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
-                                jsgraph()->isolate()->native_context(), nullptr,
-                                0, effect_, *control_);
-    case wasm::kAstF32:
+    case wasm::kWasmS128:
+    case wasm::kWasmI64:
+      UNREACHABLE();
+    case wasm::kWasmF32:
       node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
                               node);
       return BuildChangeFloat64ToTagged(node);
-    case wasm::kAstF64:
+    case wasm::kWasmF64:
       return BuildChangeFloat64ToTagged(node);
-    case wasm::kAstStmt:
+    case wasm::kWasmStmt:
       return jsgraph()->UndefinedConstant();
     default:
       UNREACHABLE();
@@ -2367,8 +2491,7 @@
   }
 }
 
-Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context,
-                                                Node* effect, Node* control) {
+Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context) {
   Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
@@ -2376,7 +2499,9 @@
   Node* stub_code = jsgraph()->HeapConstant(callable.code());
 
   Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
-                                  node, context, effect, control);
+                                  node, context, *effect_, *control_);
+
+  SetSourcePosition(result, 1);
 
   *effect_ = result;
 
@@ -2495,35 +2620,30 @@
 }
 
 Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
-                               wasm::LocalType type) {
+                               wasm::ValueType type) {
+  DCHECK_NE(wasm::kWasmStmt, type);
+
   // Do a JavaScript ToNumber.
-  Node* num = BuildJavaScriptToNumber(node, context, *effect_, *control_);
+  Node* num = BuildJavaScriptToNumber(node, context);
 
   // Change representation.
   SimplifiedOperatorBuilder simplified(jsgraph()->zone());
   num = BuildChangeTaggedToFloat64(num);
 
   switch (type) {
-    case wasm::kAstI32: {
+    case wasm::kWasmI32: {
       num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
                              num);
       break;
     }
-    case wasm::kAstS128:
-    case wasm::kAstI64:
-      // Throw a TypeError. The native context is good enough here because we
-      // only throw a TypeError.
-      return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
-                                jsgraph()->isolate()->native_context(), nullptr,
-                                0, effect_, *control_);
-    case wasm::kAstF32:
+    case wasm::kWasmS128:
+    case wasm::kWasmI64:
+      UNREACHABLE();
+    case wasm::kWasmF32:
       num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
                              num);
       break;
-    case wasm::kAstF64:
-      break;
-    case wasm::kAstStmt:
-      num = jsgraph()->Int32Constant(0);
+    case wasm::kWasmF64:
       break;
     default:
       UNREACHABLE();
@@ -2613,42 +2733,73 @@
   return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
 }
 
+bool IsJSCompatible(wasm::ValueType type) {
+  return (type != wasm::kWasmI64) && (type != wasm::kWasmS128);
+}
+
+bool HasJSCompatibleSignature(wasm::FunctionSig* sig) {
+  for (size_t i = 0; i < sig->parameter_count(); i++) {
+    if (!IsJSCompatible(sig->GetParam(i))) {
+      return false;
+    }
+  }
+  for (size_t i = 0; i < sig->return_count(); i++) {
+    if (!IsJSCompatible(sig->GetReturn(i))) {
+      return false;
+    }
+  }
+  return true;
+}
+
 void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
                                             wasm::FunctionSig* sig) {
   int wasm_count = static_cast<int>(sig->parameter_count());
-  int param_count;
-  if (jsgraph()->machine()->Is64()) {
-    param_count = static_cast<int>(sig->parameter_count());
-  } else {
-    param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
-  }
-  int count = param_count + 3;
+  int count = wasm_count + 3;
   Node** args = Buffer(count);
 
   // Build the start and the JS parameter nodes.
-  Node* start = Start(param_count + 5);
+  Node* start = Start(wasm_count + 5);
   *control_ = start;
   *effect_ = start;
+
   // Create the context parameter
   Node* context = graph()->NewNode(
       jsgraph()->common()->Parameter(
           Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
       graph()->start());
 
+  if (!HasJSCompatibleSignature(sig_)) {
+    // Throw a TypeError. Use the context of the calling javascript function
+    // (passed as a parameter), such that the generated code is context
+    // independent.
+    BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
+                                  context, nullptr, 0, effect_, *control_);
+
+    // Add a dummy call to the wasm function so that the generated wrapper
+    // contains a reference to the wrapped wasm function. Without this reference
+    // the wasm function could not be re-imported into another wasm module.
+    int pos = 0;
+    args[pos++] = HeapConstant(wasm_code);
+    args[pos++] = *effect_;
+    args[pos++] = *control_;
+
+    // We only need a dummy call descriptor.
+    wasm::FunctionSig::Builder dummy_sig_builder(jsgraph()->zone(), 0, 0);
+    CallDescriptor* desc = wasm::ModuleEnv::GetWasmCallDescriptor(
+        jsgraph()->zone(), dummy_sig_builder.Build());
+    *effect_ = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+    Return(jsgraph()->UndefinedConstant());
+    return;
+  }
+
   int pos = 0;
   args[pos++] = HeapConstant(wasm_code);
 
   // Convert JS parameters to WASM numbers.
   for (int i = 0; i < wasm_count; ++i) {
-    Node* param =
-        graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
+    Node* param = Param(i + 1);
     Node* wasm_param = FromJS(param, context, sig->GetParam(i));
     args[pos++] = wasm_param;
-    if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
-      // We make up the high word with SAR to get the proper sign extension.
-      args[pos++] = graph()->NewNode(jsgraph()->machine()->Word32Sar(),
-                                     wasm_param, jsgraph()->Int32Constant(31));
-    }
   }
 
   args[pos++] = *effect_;
@@ -2657,23 +2808,13 @@
   // Call the WASM code.
   CallDescriptor* desc =
       wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
-  if (jsgraph()->machine()->Is32()) {
-    desc = wasm::ModuleEnv::GetI32WasmCallDescriptor(jsgraph()->zone(), desc);
-  }
-  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
-  Node* retval = call;
-  if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
-      sig->GetReturn(0) == wasm::kAstI64) {
-    // The return values comes as two values, we pick the low word.
-    retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval,
-                              graph()->start());
-  }
-  Node* jsval = ToJS(
-      retval, sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
-  Node* ret = graph()->NewNode(jsgraph()->common()->Return(),
-                               jsgraph()->Int32Constant(0), jsval, call, start);
 
-  MergeControlToEnd(jsgraph(), ret);
+  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+  *effect_ = call;
+  Node* retval = call;
+  Node* jsval = ToJS(
+      retval, sig->return_count() == 0 ? wasm::kWasmStmt : sig->GetReturn());
+  Return(jsval);
 }
 
 int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
@@ -2681,14 +2822,8 @@
   // Convert WASM numbers to JS values.
   int param_index = 0;
   for (int i = 0; i < param_count; ++i) {
-    Node* param = graph()->NewNode(
-        jsgraph()->common()->Parameter(param_index++), graph()->start());
+    Node* param = Param(param_index++);
     args[pos++] = ToJS(param, sig->GetParam(i));
-    if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
-      // On 32 bit platforms we have to skip the high word of int64
-      // parameters.
-      param_index++;
-    }
   }
   return pos;
 }
@@ -2698,19 +2833,25 @@
   DCHECK(target->IsCallable());
 
   int wasm_count = static_cast<int>(sig->parameter_count());
-  int param_count;
-  if (jsgraph()->machine()->Is64()) {
-    param_count = wasm_count;
-  } else {
-    param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
-  }
 
   // Build the start and the parameter nodes.
   Isolate* isolate = jsgraph()->isolate();
   CallDescriptor* desc;
-  Node* start = Start(param_count + 3);
+  Node* start = Start(wasm_count + 3);
   *effect_ = start;
   *control_ = start;
+
+  if (!HasJSCompatibleSignature(sig_)) {
+    // Throw a TypeError. Embedding the context is ok here, since this code is
+    // regenerated at instantiation time.
+    Node* context =
+        jsgraph()->HeapConstant(jsgraph()->isolate()->native_context());
+    Return(BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
+                                         jsgraph(), context, nullptr, 0,
+                                         effect_, *control_));
+    return;
+  }
+
   Node** args = Buffer(wasm_count + 7);
 
   Node* call;
@@ -2777,24 +2918,123 @@
     call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
   }
 
+  *effect_ = call;
+  SetSourcePosition(call, 0);
+
   // Convert the return value back.
-  Node* ret;
-  Node* val =
-      FromJS(call, HeapConstant(isolate->native_context()),
-             sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
-  Node* pop_size = jsgraph()->Int32Constant(0);
-  if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
-      sig->GetReturn() == wasm::kAstI64) {
-    ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val,
-                           graph()->NewNode(jsgraph()->machine()->Word32Sar(),
-                                            val, jsgraph()->Int32Constant(31)),
-                           call, start);
-  } else {
-    ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val, call,
-                           start);
+  Node* i32_zero = jsgraph()->Int32Constant(0);
+  Node* val = sig->return_count() == 0
+                  ? i32_zero
+                  : FromJS(call, HeapConstant(isolate->native_context()),
+                           sig->GetReturn());
+  Return(val);
+}
+
+void WasmGraphBuilder::BuildWasmInterpreterEntry(
+    uint32_t function_index, wasm::FunctionSig* sig,
+    Handle<WasmInstanceObject> instance) {
+  int wasm_count = static_cast<int>(sig->parameter_count());
+  int param_count = jsgraph()->machine()->Is64()
+                        ? wasm_count
+                        : Int64Lowering::GetParameterCountAfterLowering(sig);
+
+  // Build the start and the parameter nodes.
+  Node* start = Start(param_count + 3);
+  *effect_ = start;
+  *control_ = start;
+
+  // Compute size for the argument buffer.
+  int args_size_bytes = 0;
+  for (int i = 0; i < wasm_count; i++) {
+    args_size_bytes +=
+        RoundUpToMultipleOfPowOf2(1 << ElementSizeLog2Of(sig->GetParam(i)), 8);
   }
 
-  MergeControlToEnd(jsgraph(), ret);
+  // The return value is also passed via this buffer:
+  DCHECK_GE(wasm::kV8MaxWasmFunctionReturns, sig->return_count());
+  // TODO(wasm): Handle multi-value returns.
+  DCHECK_EQ(1, wasm::kV8MaxWasmFunctionReturns);
+  int return_size_bytes =
+      sig->return_count() == 0 ? 0 : 1 << ElementSizeLog2Of(sig->GetReturn(0));
+
+  // Get a stack slot for the arguments.
+  Node* arg_buffer = args_size_bytes == 0 && return_size_bytes == 0
+                         ? jsgraph()->IntPtrConstant(0)
+                         : graph()->NewNode(jsgraph()->machine()->StackSlot(
+                               std::max(args_size_bytes, return_size_bytes)));
+
+  // Now store all our arguments to the buffer.
+  int param_index = 0;
+  int offset = 0;
+  for (int i = 0; i < wasm_count; i++) {
+    Node* param = Param(param_index++);
+    bool is_i64_as_two_params =
+        jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kWasmI64;
+
+    if (is_i64_as_two_params) {
+      StoreRepresentation store_rep(wasm::kWasmI32,
+                                    WriteBarrierKind::kNoWriteBarrier);
+      *effect_ =
+          graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+                           Int32Constant(offset + kInt64LowerHalfMemoryOffset),
+                           param, *effect_, *control_);
+
+      param = Param(param_index++);
+      *effect_ =
+          graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+                           Int32Constant(offset + kInt64UpperHalfMemoryOffset),
+                           param, *effect_, *control_);
+      offset += 8;
+
+    } else {
+      MachineRepresentation param_rep = sig->GetParam(i);
+      StoreRepresentation store_rep(param_rep,
+                                    WriteBarrierKind::kNoWriteBarrier);
+      *effect_ =
+          graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+                           Int32Constant(offset), param, *effect_, *control_);
+      offset += RoundUpToMultipleOfPowOf2(1 << ElementSizeLog2Of(param_rep), 8);
+    }
+
+    DCHECK(IsAligned(offset, 8));
+  }
+  DCHECK_EQ(param_count, param_index);
+  DCHECK_EQ(args_size_bytes, offset);
+
+  // We are passing the raw arg_buffer here. To the GC and other parts, it looks
+  // like a Smi (lowest bit not set). In the runtime function however, don't
+  // call Smi::value on it, but just cast it to a byte pointer.
+  Node* parameters[] = {
+      jsgraph()->HeapConstant(instance),       // wasm instance
+      jsgraph()->SmiConstant(function_index),  // function index
+      arg_buffer,                              // argument buffer
+  };
+  BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), parameters,
+                     arraysize(parameters), effect_, *control_);
+
+  // Read back the return value.
+  if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
+      sig->GetReturn() == wasm::kWasmI64) {
+    MachineType load_rep = wasm::WasmOpcodes::MachineTypeFor(wasm::kWasmI32);
+    Node* lower =
+        graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+                         Int32Constant(0), *effect_, *control_);
+    Node* upper =
+        graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+                         Int32Constant(sizeof(int32_t)), *effect_, *control_);
+    Return(upper, lower);
+  } else {
+    Node* val;
+    if (sig->return_count() == 0) {
+      val = Int32Constant(0);
+    } else {
+      MachineType load_rep =
+          wasm::WasmOpcodes::MachineTypeFor(sig->GetReturn());
+      val = graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+                             Int32Constant(0), *effect_, *control_);
+    }
+    Return(val);
+  }
 }
 
 Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
@@ -2853,12 +3093,18 @@
 
 void WasmGraphBuilder::EnsureFunctionTableNodes() {
   if (function_tables_.size() > 0) return;
-  for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
-    auto handle = module_->instance->function_tables[i];
-    DCHECK(!handle.is_null());
-    function_tables_.push_back(HeapConstant(handle));
+  size_t tables_size = module_->instance->function_tables.size();
+  DCHECK(tables_size == module_->instance->signature_tables.size());
+  for (size_t i = 0; i < tables_size; ++i) {
+    auto function_handle = module_->instance->function_tables[i];
+    auto signature_handle = module_->instance->signature_tables[i];
+    DCHECK(!function_handle.is_null() && !signature_handle.is_null());
+    function_tables_.push_back(HeapConstant(function_handle));
+    signature_tables_.push_back(HeapConstant(signature_handle));
     uint32_t table_size = module_->module->function_tables[i].min_size;
-    function_table_sizes_.push_back(Uint32Constant(table_size));
+    function_table_sizes_.push_back(jsgraph()->RelocatableInt32Constant(
+        static_cast<uint32_t>(table_size),
+        RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
   }
 }
 
@@ -2895,6 +3141,7 @@
                                       uint32_t offset,
                                       wasm::WasmCodePosition position) {
   DCHECK(module_ && module_->instance);
+  if (FLAG_wasm_no_bounds_checks) return;
   uint32_t size = module_->instance->mem_size;
   byte memsize = wasm::WasmOpcodes::MemSize(memtype);
 
@@ -2945,15 +3192,14 @@
   trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
 }
 
-
-Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
+Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
                                 Node* index, uint32_t offset,
                                 uint32_t alignment,
                                 wasm::WasmCodePosition position) {
   Node* load;
 
   // WASM semantics throw on OOB. Introduce explicit bounds check.
-  if (!FLAG_wasm_trap_handler) {
+  if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
     BoundsCheckMem(memtype, index, offset, position);
   }
   bool aligned = static_cast<int>(alignment) >=
@@ -2961,18 +3207,19 @@
 
   if (aligned ||
       jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
-    if (FLAG_wasm_trap_handler) {
-      Node* context = HeapConstant(module_->instance->context);
+    if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+      DCHECK(FLAG_wasm_guard_pages);
       Node* position_node = jsgraph()->Int32Constant(position);
       load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
-                              MemBuffer(offset), index, context, position_node,
-                              *effect_, *control_);
+                              MemBuffer(offset), index, position_node, *effect_,
+                              *control_);
     } else {
       load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
                               MemBuffer(offset), index, *effect_, *control_);
     }
   } else {
-    DCHECK(!FLAG_wasm_trap_handler);
+    // TODO(eholk): Support unaligned loads with trap handlers.
+    DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
     load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
                             MemBuffer(offset), index, *effect_, *control_);
   }
@@ -2983,7 +3230,7 @@
   load = BuildChangeEndianness(load, memtype, type);
 #endif
 
-  if (type == wasm::kAstI64 &&
+  if (type == wasm::kWasmI64 &&
       ElementSizeLog2Of(memtype.representation()) < 3) {
     // TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
     if (memtype.IsSigned()) {
@@ -3006,7 +3253,9 @@
   Node* store;
 
   // WASM semantics throw on OOB. Introduce explicit bounds check.
-  BoundsCheckMem(memtype, index, offset, position);
+  if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
+    BoundsCheckMem(memtype, index, offset, position);
+  }
   StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
 
   bool aligned = static_cast<int>(alignment) >=
@@ -3018,11 +3267,20 @@
 
   if (aligned ||
       jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
-    StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
-    store =
-        graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
-                         index, val, *effect_, *control_);
+    if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+      Node* position_node = jsgraph()->Int32Constant(position);
+      store = graph()->NewNode(
+          jsgraph()->machine()->ProtectedStore(memtype.representation()),
+          MemBuffer(offset), index, val, position_node, *effect_, *control_);
+    } else {
+      StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+      store =
+          graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+                           index, val, *effect_, *control_);
+    }
   } else {
+    // TODO(eholk): Support unaligned stores with trap handlers.
+    DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
     UnalignedStoreRepresentation rep(memtype.representation());
     store =
         graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
@@ -3070,16 +3328,14 @@
 void WasmGraphBuilder::Int64LoweringForTesting() {
   if (jsgraph()->machine()->Is32()) {
     Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
-                    jsgraph()->common(), jsgraph()->zone(),
-                    function_signature_);
+                    jsgraph()->common(), jsgraph()->zone(), sig_);
     r.LowerGraph();
   }
 }
 
 void WasmGraphBuilder::SimdScalarLoweringForTesting() {
   SimdScalarLowering(jsgraph()->graph(), jsgraph()->machine(),
-                     jsgraph()->common(), jsgraph()->zone(),
-                     function_signature_)
+                     jsgraph()->common(), jsgraph()->zone(), sig_)
       .LowerGraph();
 }
 
@@ -3093,6 +3349,7 @@
 Node* WasmGraphBuilder::CreateS128Value(int32_t value) {
   // TODO(gdeepti): Introduce Simd128Constant to common-operator.h and use
   // instead of creating a SIMD Value.
+  has_simd_ = true;
   return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(),
                           Int32Constant(value), Int32Constant(value),
                           Int32Constant(value), Int32Constant(value));
@@ -3100,36 +3357,348 @@
 
 Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
                                const NodeVector& inputs) {
+  has_simd_ = true;
   switch (opcode) {
-    case wasm::kExprI32x4Splat:
-      return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
-                              inputs[0], inputs[0], inputs[0]);
-    case wasm::kExprI32x4Add:
-      return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
-                              inputs[1]);
-    case wasm::kExprF32x4ExtractLane:
-      return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
-                              inputs[0], inputs[1]);
     case wasm::kExprF32x4Splat:
       return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
                               inputs[0], inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprF32x4SConvertI32x4:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4FromInt32x4(),
+                              inputs[0]);
+    case wasm::kExprF32x4UConvertI32x4:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4FromUint32x4(),
+                              inputs[0]);
+    case wasm::kExprF32x4Abs:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4Abs(), inputs[0]);
+    case wasm::kExprF32x4Neg:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4Neg(), inputs[0]);
     case wasm::kExprF32x4Add:
       return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
                               inputs[1]);
+    case wasm::kExprF32x4Sub:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4Sub(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF32x4Eq:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4Equal(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF32x4Ne:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4NotEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4Splat:
+      return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
+                              inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprI32x4SConvertF32x4:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4FromFloat32x4(),
+                              inputs[0]);
+    case wasm::kExprI32x4UConvertF32x4:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4FromFloat32x4(),
+                              inputs[0]);
+    case wasm::kExprI32x4Neg:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Neg(), inputs[0]);
+    case wasm::kExprI32x4Add:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4Sub:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Sub(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4Mul:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Mul(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4MinS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4MaxS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4Eq:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Equal(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4Ne:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4NotEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4LtS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI32x4LeS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThanOrEqual(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI32x4GtS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4GeS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThanOrEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4MinU:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4MaxU:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4LtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI32x4LeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint32x4GreaterThanOrEqual(), inputs[1],
+          inputs[0]);
+    case wasm::kExprI32x4GtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4GeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint32x4GreaterThanOrEqual(), inputs[0],
+          inputs[1]);
+    case wasm::kExprI16x8Splat:
+      return graph()->NewNode(jsgraph()->machine()->CreateInt16x8(), inputs[0],
+                              inputs[0], inputs[0], inputs[0], inputs[0],
+                              inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprI16x8Neg:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Neg(), inputs[0]);
+    case wasm::kExprI16x8Add:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Add(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8AddSaturateS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8AddSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8Sub:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Sub(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8SubSaturateS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8SubSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8Mul:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Mul(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8MinS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8MaxS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8Eq:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Equal(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8Ne:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8NotEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8LtS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI16x8LeS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThanOrEqual(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI16x8GtS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8GeS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThanOrEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8AddSaturateU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8AddSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8SubSaturateU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8SubSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8MinU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8MaxU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8LtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI16x8LeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint16x8GreaterThanOrEqual(), inputs[1],
+          inputs[0]);
+    case wasm::kExprI16x8GtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8GeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint16x8GreaterThanOrEqual(), inputs[0],
+          inputs[1]);
+    case wasm::kExprI8x16Splat:
+      return graph()->NewNode(jsgraph()->machine()->CreateInt8x16(), inputs[0],
+                              inputs[0], inputs[0], inputs[0], inputs[0],
+                              inputs[0], inputs[0], inputs[0], inputs[0],
+                              inputs[0], inputs[0], inputs[0], inputs[0],
+                              inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprI8x16Neg:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Neg(), inputs[0]);
+    case wasm::kExprI8x16Add:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Add(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16AddSaturateS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16AddSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16Sub:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Sub(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16SubSaturateS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16SubSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16Mul:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Mul(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16MinS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16MaxS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16Eq:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Equal(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16Ne:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16NotEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16LtS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI8x16LeS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThanOrEqual(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI8x16GtS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16GeS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThanOrEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16AddSaturateU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16AddSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16SubSaturateU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16SubSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16MinU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16MaxU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16LtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI8x16LeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint8x16GreaterThanOrEqual(), inputs[1],
+          inputs[0]);
+    case wasm::kExprI8x16GtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16GeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint8x16GreaterThanOrEqual(), inputs[0],
+          inputs[1]);
+    case wasm::kExprS32x4Select:
+      return graph()->NewNode(jsgraph()->machine()->Simd32x4Select(), inputs[0],
+                              inputs[1], inputs[2]);
+    case wasm::kExprS16x8Select:
+      return graph()->NewNode(jsgraph()->machine()->Simd16x8Select(), inputs[0],
+                              inputs[1], inputs[2]);
+    case wasm::kExprS8x16Select:
+      return graph()->NewNode(jsgraph()->machine()->Simd8x16Select(), inputs[0],
+                              inputs[1], inputs[2]);
+    case wasm::kExprS128And:
+      return graph()->NewNode(jsgraph()->machine()->Simd128And(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprS128Or:
+      return graph()->NewNode(jsgraph()->machine()->Simd128Or(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprS128Xor:
+      return graph()->NewNode(jsgraph()->machine()->Simd128Xor(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprS128Not:
+      return graph()->NewNode(jsgraph()->machine()->Simd128Not(), inputs[0]);
     default:
       return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
   }
 }
 
-Node* WasmGraphBuilder::SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane,
-                                        Node* input) {
+Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
+                                   const NodeVector& inputs) {
+  has_simd_ = true;
   switch (opcode) {
-    case wasm::kExprI32x4ExtractLane:
-      return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(), input,
-                              Int32Constant(lane));
     case wasm::kExprF32x4ExtractLane:
-      return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
-                              input, Int32Constant(lane));
+      return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(lane),
+                              inputs[0]);
+    case wasm::kExprF32x4ReplaceLane:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4ReplaceLane(lane),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(lane),
+                              inputs[0]);
+    case wasm::kExprI32x4ReplaceLane:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4ReplaceLane(lane),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8ExtractLane(lane),
+                              inputs[0]);
+    case wasm::kExprI16x8ReplaceLane:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8ReplaceLane(lane),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16ExtractLane(lane),
+                              inputs[0]);
+    case wasm::kExprI8x16ReplaceLane:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16ReplaceLane(lane),
+                              inputs[0], inputs[1]);
+    default:
+      return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+  }
+}
+
+Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
+                                    const NodeVector& inputs) {
+  has_simd_ = true;
+  switch (opcode) {
+    case wasm::kExprI32x4Shl:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int32x4ShiftLeftByScalar(shift), inputs[0]);
+    case wasm::kExprI32x4ShrS:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int32x4ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI32x4ShrU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint32x4ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI16x8Shl:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int16x8ShiftLeftByScalar(shift), inputs[0]);
+    case wasm::kExprI16x8ShrS:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int16x8ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI16x8ShrU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint16x8ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI8x16Shl:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int8x16ShiftLeftByScalar(shift), inputs[0]);
+    case wasm::kExprI8x16ShrS:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int8x16ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI8x16ShrU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint8x16ShiftRightByScalar(shift), inputs[0]);
+    default:
+      return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+  }
+}
+
+Node* WasmGraphBuilder::SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+                                      const NodeVector& inputs) {
+  has_simd_ = true;
+  switch (opcode) {
+    case wasm::kExprS32x4Swizzle:
+      return graph()->NewNode(jsgraph()->machine()->Simd32x4Swizzle(swizzle),
+                              inputs[0]);
+    case wasm::kExprS16x8Swizzle:
+      return graph()->NewNode(jsgraph()->machine()->Simd16x8Swizzle(swizzle),
+                              inputs[0]);
+    case wasm::kExprS8x16Swizzle:
+      return graph()->NewNode(jsgraph()->machine()->Simd8x16Swizzle(swizzle),
+                              inputs[0]);
     default:
       return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
   }
@@ -3156,9 +3725,10 @@
                                    *script_str, 0, 0));
 }
 
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+                                    const wasm::WasmModule* module,
                                     Handle<Code> wasm_code, uint32_t index) {
-  const wasm::WasmFunction* func = &module->module->functions[index];
+  const wasm::WasmFunction* func = &module->functions[index];
 
   //----------------------------------------------------------------------------
   // Create the Graph
@@ -3172,10 +3742,10 @@
   Node* control = nullptr;
   Node* effect = nullptr;
 
-  WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+  wasm::ModuleEnv module_env(module, nullptr);
+  WasmGraphBuilder builder(&module_env, &zone, &jsgraph, func->sig);
   builder.set_control_ptr(&control);
   builder.set_effect_ptr(&effect);
-  builder.set_module(module);
   builder.BuildJSToWasmWrapper(wasm_code, func->sig);
 
   //----------------------------------------------------------------------------
@@ -3188,8 +3758,8 @@
   }
 
   // Schedule and compile to machine code.
-  int params =
-      static_cast<int>(module->GetFunctionSignature(index)->parameter_count());
+  int params = static_cast<int>(
+      module_env.GetFunctionSignature(index)->parameter_count());
   CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
       &zone, false, params + 1, CallDescriptor::kNoFlags);
   Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
@@ -3222,10 +3792,11 @@
   }
 
   if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
-    RecordFunctionCompilation(
-        CodeEventListener::FUNCTION_TAG, isolate, code, "js-to-wasm", index,
-        wasm::WasmName("export"),
-        module->module->GetName(func->name_offset, func->name_length));
+    char func_name[32];
+    SNPrintF(ArrayVector(func_name), "js-to-wasm#%d", func->func_index);
+    RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+                              "js-to-wasm", index, wasm::WasmName("export"),
+                              CStrVector(func_name));
   }
   return code;
 }
@@ -3233,7 +3804,8 @@
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
                                     wasm::FunctionSig* sig, uint32_t index,
                                     Handle<String> module_name,
-                                    MaybeHandle<String> import_name) {
+                                    MaybeHandle<String> import_name,
+                                    wasm::ModuleOrigin origin) {
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
@@ -3246,7 +3818,12 @@
   Node* control = nullptr;
   Node* effect = nullptr;
 
-  WasmGraphBuilder builder(&zone, &jsgraph, sig);
+  SourcePositionTable* source_position_table =
+      origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
+                                   : nullptr;
+
+  WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig,
+                           source_position_table);
   builder.set_control_ptr(&control);
   builder.set_effect_ptr(&effect);
   builder.BuildWasmToJSWrapper(target, sig);
@@ -3282,7 +3859,8 @@
     }
 
     CompilationInfo info(func_name, isolate, &zone, flags);
-    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr,
+                                            source_position_table);
 #ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_opt_code && !code.is_null()) {
       OFStream os(stdout);
@@ -3310,6 +3888,75 @@
   return code;
 }
 
+Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
+                                         wasm::FunctionSig* sig,
+                                         Handle<WasmInstanceObject> instance) {
+  //----------------------------------------------------------------------------
+  // Create the Graph
+  //----------------------------------------------------------------------------
+  Zone zone(isolate->allocator(), ZONE_NAME);
+  Graph graph(&zone);
+  CommonOperatorBuilder common(&zone);
+  MachineOperatorBuilder machine(
+      &zone, MachineType::PointerRepresentation(),
+      InstructionSelector::SupportedMachineOperatorFlags(),
+      InstructionSelector::AlignmentRequirements());
+  JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+
+  Node* control = nullptr;
+  Node* effect = nullptr;
+
+  WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig);
+  builder.set_control_ptr(&control);
+  builder.set_effect_ptr(&effect);
+  builder.BuildWasmInterpreterEntry(func_index, sig, instance);
+
+  Handle<Code> code = Handle<Code>::null();
+  {
+    if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
+      OFStream os(stdout);
+      os << "-- Wasm to interpreter graph -- " << std::endl;
+      os << AsRPO(graph);
+    }
+
+    // Schedule and compile to machine code.
+    CallDescriptor* incoming =
+        wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+    if (machine.Is32()) {
+      incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+    }
+    Code::Flags flags = Code::ComputeFlags(Code::WASM_INTERPRETER_ENTRY);
+    EmbeddedVector<char, 32> debug_name;
+    int name_len = SNPrintF(debug_name, "wasm-to-interpreter#%d", func_index);
+    DCHECK(name_len > 0 && name_len < debug_name.length());
+    debug_name.Truncate(name_len);
+    DCHECK_EQ('\0', debug_name.start()[debug_name.length()]);
+
+    CompilationInfo info(debug_name, isolate, &zone, flags);
+    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+    if (FLAG_print_opt_code && !code.is_null()) {
+      OFStream os(stdout);
+      code->Disassemble(debug_name.start(), os);
+    }
+#endif
+
+    if (isolate->logger()->is_logging_code_events() ||
+        isolate->is_profiling()) {
+      RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+                                "wasm-to-interpreter", func_index,
+                                wasm::WasmName("module"), debug_name);
+    }
+  }
+
+  Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(1, TENURED);
+  Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
+  deopt_data->set(0, *weak_instance);
+  code->set_deoptimization_data(*deopt_data);
+
+  return code;
+}
+
 SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
     double* decode_ms) {
   base::ElapsedTimer decode_timer;
@@ -3323,12 +3970,12 @@
   MachineOperatorBuilder* machine = jsgraph_->machine();
   SourcePositionTable* source_position_table =
       new (jsgraph_->zone()) SourcePositionTable(graph);
-  WasmGraphBuilder builder(jsgraph_->zone(), jsgraph_, function_->sig,
-                           source_position_table);
-  wasm::FunctionBody body = {
-      module_env_, function_->sig, module_env_->module->module_start,
-      module_env_->module->module_start + function_->code_start_offset,
-      module_env_->module->module_start + function_->code_end_offset};
+  WasmGraphBuilder builder(&module_env_->module_env, jsgraph_->zone(), jsgraph_,
+                           function_->sig, source_position_table);
+  const byte* module_start = module_env_->wire_bytes.start();
+  wasm::FunctionBody body = {function_->sig, module_start,
+                             module_start + function_->code_start_offset,
+                             module_start + function_->code_end_offset};
   graph_construction_result_ =
       wasm::BuildTFGraph(isolate_->allocator(), &builder, body);
 
@@ -3341,18 +3988,26 @@
   }
 
   if (machine->Is32()) {
-    Int64Lowering r(graph, machine, common, jsgraph_->zone(), function_->sig);
-    r.LowerGraph();
+    Int64Lowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+        .LowerGraph();
   }
 
-  SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
-      .LowerGraph();
+  if (builder.has_simd() && !CpuFeatures::SupportsSimd128()) {
+    SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+        .LowerGraph();
+  }
 
   int index = static_cast<int>(function_->func_index);
 
   if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
     OFStream os(stdout);
-    PrintAst(isolate_->allocator(), body, os, nullptr);
+    PrintRawWasmCode(isolate_->allocator(), body,
+                     module_env_->module_env.module);
+  }
+  if (index >= FLAG_trace_wasm_text_start && index < FLAG_trace_wasm_text_end) {
+    OFStream os(stdout);
+    PrintWasmText(module_env_->module_env.module, module_env_->wire_bytes,
+                  function_->func_index, os, nullptr);
   }
   if (FLAG_trace_wasm_decode_time) {
     *decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -3360,15 +4015,22 @@
   return source_position_table;
 }
 
+char* WasmCompilationUnit::GetTaggedFunctionName(
+    const wasm::WasmFunction* function) {
+  snprintf(function_name_, sizeof(function_name_), "wasm#%d",
+           function->func_index);
+  return function_name_;
+}
+
 WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
                                          Isolate* isolate,
-                                         wasm::ModuleEnv* module_env,
+                                         wasm::ModuleBytesEnv* module_env,
                                          const wasm::WasmFunction* function,
                                          uint32_t index)
     : thrower_(thrower),
       isolate_(isolate),
       module_env_(module_env),
-      function_(function),
+      function_(&module_env->module_env.module->functions[index]),
       graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
       jsgraph_(new (graph_zone()) JSGraph(
           isolate, new (graph_zone()) Graph(graph_zone()),
@@ -3379,14 +4041,14 @@
                        InstructionSelector::AlignmentRequirements()))),
       compilation_zone_(isolate->allocator(), ZONE_NAME),
       info_(function->name_length != 0
-                ? module_env->module->GetNameOrNull(function->name_offset,
-                                                    function->name_length)
-                : ArrayVector("wasm"),
+                ? module_env->wire_bytes.GetNameOrNull(function)
+                : CStrVector(GetTaggedFunctionName(function)),
             isolate, &compilation_zone_,
             Code::ComputeFlags(Code::WASM_FUNCTION)),
       job_(),
       index_(index),
-      ok_(true) {
+      ok_(true),
+      protected_instructions_(&compilation_zone_) {
   // Create and cache this node in the main thread.
   jsgraph_->CEntryStubConstant(1);
 }
@@ -3398,7 +4060,9 @@
   if (FLAG_trace_wasm_compiler) {
     OFStream os(stdout);
     os << "Compiling WASM function "
-       << wasm::WasmFunctionName(function_, module_env_) << std::endl;
+       << wasm::WasmFunctionName(
+              function_, module_env_->wire_bytes.GetNameOrNull(function_))
+       << std::endl;
     os << std::endl;
   }
 
@@ -3423,11 +4087,12 @@
   CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
       &compilation_zone_, function_->sig);
   if (jsgraph_->machine()->Is32()) {
-    descriptor =
-        module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+    descriptor = module_env_->module_env.GetI32WasmCallDescriptor(
+        &compilation_zone_, descriptor);
   }
-  job_.reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
-                                             descriptor, source_positions));
+  job_.reset(Pipeline::NewWasmCompilationJob(
+      &info_, jsgraph_, descriptor, source_positions, &protected_instructions_,
+      module_env_->module_env.module->origin != wasm::kWasmOrigin));
   ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
   // TODO(bradnelson): Improve histogram handling of size_t.
   // TODO(ahaas): The counters are not thread-safe at the moment.
@@ -3438,11 +4103,10 @@
   if (FLAG_trace_wasm_decode_time) {
     double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
     PrintF(
-        "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
+        "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
         "%0.3f ms pipeline\n",
-        static_cast<int>(function_->code_end_offset -
-                         function_->code_start_offset),
-        decode_ms, node_count, pipeline_ms);
+        function_->code_end_offset - function_->code_start_offset, decode_ms,
+        node_count, pipeline_ms);
   }
 }
 
@@ -3451,8 +4115,7 @@
     if (graph_construction_result_.failed()) {
       // Add the function as another context for the exception
       ScopedVector<char> buffer(128);
-      wasm::WasmName name = module_env_->module->GetName(
-          function_->name_offset, function_->name_length);
+      wasm::WasmName name = module_env_->wire_bytes.GetName(function_);
       SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
                function_->func_index, name.length(), name.start());
       thrower_->CompileFailed(buffer.start(), graph_construction_result_);
@@ -3460,31 +4123,29 @@
 
     return Handle<Code>::null();
   }
+  base::ElapsedTimer codegen_timer;
+  if (FLAG_trace_wasm_decode_time) {
+    codegen_timer.Start();
+  }
   if (job_->FinalizeJob() != CompilationJob::SUCCEEDED) {
     return Handle<Code>::null();
   }
-  base::ElapsedTimer compile_timer;
-  if (FLAG_trace_wasm_decode_time) {
-    compile_timer.Start();
-  }
   Handle<Code> code = info_.code();
   DCHECK(!code.is_null());
 
   if (isolate_->logger()->is_logging_code_events() ||
       isolate_->is_profiling()) {
-    RecordFunctionCompilation(
-        CodeEventListener::FUNCTION_TAG, isolate_, code, "WASM_function",
-        function_->func_index, wasm::WasmName("module"),
-        module_env_->module->GetName(function_->name_offset,
-                                     function_->name_length));
+    RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
+                              "WASM_function", function_->func_index,
+                              wasm::WasmName("module"),
+                              module_env_->wire_bytes.GetName(function_));
   }
 
   if (FLAG_trace_wasm_decode_time) {
-    double compile_ms = compile_timer.Elapsed().InMillisecondsF();
-    PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
-           static_cast<int>(function_->code_end_offset -
-                            function_->code_start_offset),
-           compile_ms);
+    double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
+    PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
+           function_->code_end_offset - function_->code_start_offset,
+           codegen_ms);
   }
 
   return code;
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index b4bc350..706c386 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -11,6 +11,8 @@
 // Do not include anything from src/compiler here!
 #include "src/compilation-info.h"
 #include "src/compiler.h"
+#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-opcodes.h"
 #include "src/wasm/wasm-result.h"
 #include "src/zone/zone.h"
@@ -29,8 +31,10 @@
 
 namespace wasm {
 // Forward declarations for some WASM data structures.
+struct ModuleBytesEnv;
 struct ModuleEnv;
 struct WasmFunction;
+struct WasmModule;
 class ErrorThrower;
 struct DecodeStruct;
 
@@ -43,7 +47,7 @@
 class WasmCompilationUnit final {
  public:
   WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
-                      wasm::ModuleEnv* module_env,
+                      wasm::ModuleBytesEnv* module_env,
                       const wasm::WasmFunction* function, uint32_t index);
 
   Zone* graph_zone() { return graph_zone_.get(); }
@@ -54,20 +58,24 @@
 
   static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
                                           Isolate* isolate,
-                                          wasm::ModuleEnv* module_env,
+                                          wasm::ModuleBytesEnv* module_env,
                                           const wasm::WasmFunction* function) {
-    WasmCompilationUnit unit(thrower, isolate, module_env, function, 0);
+    WasmCompilationUnit unit(thrower, isolate, module_env, function,
+                             function->func_index);
     unit.ExecuteCompilation();
     return unit.FinishCompilation();
   }
 
  private:
   SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
+  char* GetTaggedFunctionName(const wasm::WasmFunction* function);
 
   wasm::ErrorThrower* thrower_;
   Isolate* isolate_;
-  wasm::ModuleEnv* module_env_;
+  wasm::ModuleBytesEnv* module_env_;
   const wasm::WasmFunction* function_;
+  // Function name is tagged with uint32 func_index - wasm#<func_index>
+  char function_name_[16];
   // The graph zone is deallocated at the end of ExecuteCompilation.
   std::unique_ptr<Zone> graph_zone_;
   JSGraph* jsgraph_;
@@ -77,6 +85,9 @@
   uint32_t index_;
   wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
   bool ok_;
+  ZoneVector<trap_handler::ProtectedInstructionData>
+      protected_instructions_;  // Instructions that are protected by the signal
+                                // handler.
 
   DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
 };
@@ -85,12 +96,20 @@
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
                                     wasm::FunctionSig* sig, uint32_t index,
                                     Handle<String> module_name,
-                                    MaybeHandle<String> import_name);
+                                    MaybeHandle<String> import_name,
+                                    wasm::ModuleOrigin origin);
 
 // Wraps a given wasm code object, producing a code object.
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+                                    const wasm::WasmModule* module,
                                     Handle<Code> wasm_code, uint32_t index);
 
+// Compiles a stub that redirects a call to a wasm function to the wasm
+// interpreter. It's ABI compatible with the compiled wasm function.
+Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
+                                         wasm::FunctionSig* sig,
+                                         Handle<WasmInstanceObject> instance);
+
 // Abstracts details of building TurboFan graph nodes for WASM to separate
 // the WASM decoder from the internal details of TurboFan.
 class WasmTrapHelper;
@@ -98,7 +117,7 @@
 class WasmGraphBuilder {
  public:
   WasmGraphBuilder(
-      Zone* z, JSGraph* g, wasm::FunctionSig* function_signature,
+      wasm::ModuleEnv* module_env, Zone* z, JSGraph* g, wasm::FunctionSig* sig,
       compiler::SourcePositionTable* source_position_table = nullptr);
 
   Node** Buffer(size_t count) {
@@ -116,11 +135,11 @@
   //-----------------------------------------------------------------------
   Node* Error();
   Node* Start(unsigned params);
-  Node* Param(unsigned index, wasm::LocalType type);
+  Node* Param(unsigned index);
   Node* Loop(Node* entry);
   Node* Terminate(Node* effect, Node* control);
   Node* Merge(unsigned count, Node** controls);
-  Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
+  Node* Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control);
   Node* EffectPhi(unsigned count, Node** effects, Node* control);
   Node* NumberConstant(int32_t value);
   Node* Uint32Constant(uint32_t value);
@@ -155,7 +174,12 @@
   Node* Switch(unsigned count, Node* key);
   Node* IfValue(int32_t value, Node* sw);
   Node* IfDefault(Node* sw);
-  Node* Return(unsigned count, Node** vals);
+  Node* Return(unsigned count, Node** nodes);
+  template <typename... Nodes>
+  Node* Return(Node* fst, Nodes*... more) {
+    Node* arr[] = {fst, more...};
+    return Return(arraysize(arr), arr);
+  }
   Node* ReturnVoid();
   Node* Unreachable(wasm::WasmCodePosition position);
 
@@ -166,9 +190,11 @@
 
   void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
   void BuildWasmToJSWrapper(Handle<JSReceiver> target, wasm::FunctionSig* sig);
+  void BuildWasmInterpreterEntry(uint32_t func_index, wasm::FunctionSig* sig,
+                                 Handle<WasmInstanceObject> instance);
 
-  Node* ToJS(Node* node, wasm::LocalType type);
-  Node* FromJS(Node* node, Node* context, wasm::LocalType type);
+  Node* ToJS(Node* node, wasm::ValueType type);
+  Node* FromJS(Node* node, Node* context, wasm::ValueType type);
   Node* Invert(Node* node);
   void EnsureFunctionTableNodes();
 
@@ -178,7 +204,7 @@
   Node* CurrentMemoryPages();
   Node* GetGlobal(uint32_t index);
   Node* SetGlobal(uint32_t index, Node* val);
-  Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
+  Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
                 uint32_t offset, uint32_t alignment,
                 wasm::WasmCodePosition position);
   Node* StoreMem(MachineType type, Node* index, uint32_t offset,
@@ -190,13 +216,11 @@
   Node* Control() { return *control_; }
   Node* Effect() { return *effect_; }
 
-  void set_module(wasm::ModuleEnv* module) { this->module_ = module; }
-
   void set_control_ptr(Node** control) { this->control_ = control; }
 
   void set_effect_ptr(Node** effect) { this->effect_ = effect; }
 
-  wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
+  wasm::FunctionSig* GetFunctionSignature() { return sig_; }
 
   void Int64LoweringForTesting();
 
@@ -207,7 +231,19 @@
   Node* CreateS128Value(int32_t value);
 
   Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
-  Node* SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane, Node* input);
+
+  Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
+                   const NodeVector& inputs);
+
+  Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
+                    const NodeVector& inputs);
+
+  Node* SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+                      const NodeVector& inputs);
+
+  bool has_simd() const { return has_simd_; }
+
+  wasm::ModuleEnv* module_env() const { return module_; }
 
  private:
   static const int kDefaultBufferSize = 16;
@@ -215,19 +251,21 @@
 
   Zone* zone_;
   JSGraph* jsgraph_;
-  wasm::ModuleEnv* module_;
-  Node* mem_buffer_;
-  Node* mem_size_;
+  wasm::ModuleEnv* module_ = nullptr;
+  Node* mem_buffer_ = nullptr;
+  Node* mem_size_ = nullptr;
+  NodeVector signature_tables_;
   NodeVector function_tables_;
   NodeVector function_table_sizes_;
-  Node** control_;
-  Node** effect_;
+  Node** control_ = nullptr;
+  Node** effect_ = nullptr;
   Node** cur_buffer_;
   size_t cur_bufsize_;
   Node* def_buffer_[kDefaultBufferSize];
+  bool has_simd_ = false;
 
   WasmTrapHelper* trap_;
-  wasm::FunctionSig* function_signature_;
+  wasm::FunctionSig* sig_;
   SetOncePointer<const Operator> allocate_heap_number_operator_;
 
   compiler::SourcePositionTable* source_position_table_ = nullptr;
@@ -243,7 +281,7 @@
                       wasm::WasmCodePosition position);
 
   Node* BuildChangeEndianness(Node* node, MachineType type,
-                              wasm::LocalType wasmtype = wasm::kAstStmt);
+                              wasm::ValueType wasmtype = wasm::kWasmStmt);
 
   Node* MaskShiftCount32(Node* node);
   Node* MaskShiftCount64(Node* node);
@@ -314,8 +352,7 @@
                        MachineType result_type, int trap_zero,
                        wasm::WasmCodePosition position);
 
-  Node* BuildJavaScriptToNumber(Node* node, Node* context, Node* effect,
-                                Node* control);
+  Node* BuildJavaScriptToNumber(Node* node, Node* context);
 
   Node* BuildChangeInt32ToTagged(Node* value);
   Node* BuildChangeFloat64ToTagged(Node* value);
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index a41c93c..01c1b86 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -5,6 +5,7 @@
 #include "src/assembler.h"
 #include "src/base/lazy-instance.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 #include "src/register-configuration.h"
 
 #include "src/wasm/wasm-module.h"
@@ -24,17 +25,17 @@
 
 namespace {
 
-MachineType MachineTypeFor(LocalType type) {
+MachineType MachineTypeFor(ValueType type) {
   switch (type) {
-    case kAstI32:
+    case kWasmI32:
       return MachineType::Int32();
-    case kAstI64:
+    case kWasmI64:
       return MachineType::Int64();
-    case kAstF64:
+    case kWasmF64:
       return MachineType::Float64();
-    case kAstF32:
+    case kWasmF32:
       return MachineType::Float32();
-    case kAstS128:
+    case kWasmS128:
       return MachineType::Simd128();
     default:
       UNREACHABLE();
@@ -173,7 +174,7 @@
 
   int stack_offset;
 
-  LinkageLocation Next(LocalType type) {
+  LinkageLocation Next(ValueType type) {
     if (IsFloatingPoint(type)) {
       // Allocate a floating point register/stack location.
       if (fp_offset < fp_count) {
@@ -182,7 +183,7 @@
         // Allocate floats using a double register, but modify the code to
         // reflect how ARM FP registers alias.
         // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
-        if (type == kAstF32) {
+        if (type == kWasmF32) {
           int float_reg_code = reg.code() * 2;
           DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
           return regloc(DoubleRegister::from_code(float_reg_code),
@@ -206,11 +207,11 @@
       }
     }
   }
-  bool IsFloatingPoint(LocalType type) {
-    return type == kAstF32 || type == kAstF64;
+  bool IsFloatingPoint(ValueType type) {
+    return type == kWasmF32 || type == kWasmF64;
   }
-  int Words(LocalType type) {
-    if (kPointerSize < 8 && (type == kAstI64 || type == kAstF64)) {
+  int Words(ValueType type) {
+    if (kPointerSize < 8 && (type == kWasmI64 || type == kWasmF64)) {
       return 2;
     }
     return 1;
@@ -285,7 +286,7 @@
   // Add return location(s).
   const int return_count = static_cast<int>(locations.return_count_);
   for (int i = 0; i < return_count; i++) {
-    LocalType ret = fsig->GetReturn(i);
+    ValueType ret = fsig->GetReturn(i);
     locations.AddReturn(rets.Next(ret));
   }
 
@@ -294,7 +295,7 @@
   // Add register and/or stack parameter(s).
   const int parameter_count = static_cast<int>(fsig->parameter_count());
   for (int i = 0; i < parameter_count; i++) {
-    LocalType param = fsig->GetParam(i);
+    ValueType param = fsig->GetParam(i);
     locations.AddParam(params.Next(param));
   }
 
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 745ac50..ae33e8c 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -11,6 +11,7 @@
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/osr.h"
+#include "src/heap/heap-inl.h"
 #include "src/wasm/wasm-module.h"
 #include "src/x64/assembler-x64.h"
 #include "src/x64/macro-assembler-x64.h"
@@ -43,9 +44,7 @@
       DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
       return Immediate(0);
     }
-    if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-        constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE ||
-        constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+    if (RelocInfo::IsWasmReference(constant.rmode())) {
       return Immediate(constant.ToInt32(), constant.rmode());
     }
     return Immediate(constant.ToInt32());
@@ -270,38 +269,56 @@
 
 class WasmOutOfLineTrap final : public OutOfLineCode {
  public:
-  WasmOutOfLineTrap(CodeGenerator* gen, Address pc, bool frame_elided,
-                    Register context, int32_t position)
+  WasmOutOfLineTrap(CodeGenerator* gen, int pc, bool frame_elided,
+                    int32_t position, Instruction* instr)
       : OutOfLineCode(gen),
+        gen_(gen),
         pc_(pc),
         frame_elided_(frame_elided),
-        context_(context),
-        position_(position) {}
+        position_(position),
+        instr_(instr) {}
 
+  // TODO(eholk): Refactor this method to take the code generator as a
+  // parameter.
   void Generate() final {
-    // TODO(eholk): record pc_ and the current pc in a table so that
-    // the signal handler can find it.
-    USE(pc_);
+    __ RecordProtectedInstructionLanding(pc_);
 
     if (frame_elided_) {
-      __ EnterFrame(StackFrame::WASM);
+      __ EnterFrame(StackFrame::WASM_COMPILED);
     }
 
     wasm::TrapReason trap_id = wasm::kTrapMemOutOfBounds;
     int trap_reason = wasm::WasmOpcodes::TrapReasonToMessageId(trap_id);
     __ Push(Smi::FromInt(trap_reason));
     __ Push(Smi::FromInt(position_));
-    __ Move(rsi, context_);
+    __ Move(rsi, gen_->isolate()->native_context());
     __ CallRuntime(Runtime::kThrowWasmError);
+
+    if (instr_->reference_map() != nullptr) {
+      gen_->RecordSafepoint(instr_->reference_map(), Safepoint::kSimple, 0,
+                            Safepoint::kNoLazyDeopt);
+    }
   }
 
  private:
-  Address pc_;
+  CodeGenerator* gen_;
+  int pc_;
   bool frame_elided_;
-  Register context_;
   int32_t position_;
+  Instruction* instr_;
 };
 
+void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
+                         InstructionCode opcode, size_t input_count,
+                         X64OperandConverter& i, int pc, Instruction* instr) {
+  const X64MemoryProtection protection =
+      static_cast<X64MemoryProtection>(MiscField::decode(opcode));
+  if (protection == X64MemoryProtection::kProtected) {
+    const bool frame_elided = !codegen->frame_access_state()->has_frame();
+    const int32_t position = i.InputInt32(input_count - 1);
+    new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, position, instr);
+  }
+}
 }  // namespace
 
 
@@ -708,8 +725,8 @@
   Label done;
 
   // Check if current frame is an arguments adaptor frame.
-  __ Cmp(Operand(rbp, StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+          Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &done, Label::kNear);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -912,10 +929,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1838,21 +1853,31 @@
       __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
       break;
     case kX64Movsxbl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxbl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movzxbl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movzxbl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movsxbq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxbq);
       break;
     case kX64Movzxbq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movzxbq);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movb: {
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       size_t index = 0;
       Operand operand = i.MemoryOperand(&index);
       if (HasImmediateInput(instr, index)) {
@@ -1863,21 +1888,31 @@
       break;
     }
     case kX64Movsxwl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxwl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movzxwl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movzxwl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movsxwq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxwq);
       break;
     case kX64Movzxwq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movzxwq);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movw: {
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       size_t index = 0;
       Operand operand = i.MemoryOperand(&index);
       if (HasImmediateInput(instr, index)) {
@@ -1888,7 +1923,8 @@
       break;
     }
     case kX64Movl:
-    case kX64TrapMovl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       if (instr->HasOutput()) {
         if (instr->addressing_mode() == kMode_None) {
           if (instr->InputAt(0)->IsRegister()) {
@@ -1897,14 +1933,7 @@
             __ movl(i.OutputRegister(), i.InputOperand(0));
           }
         } else {
-          Address pc = __ pc();
           __ movl(i.OutputRegister(), i.MemoryOperand());
-
-          if (arch_opcode == kX64TrapMovl) {
-            bool frame_elided = !frame_access_state()->has_frame();
-            new (zone()) WasmOutOfLineTrap(this, pc, frame_elided,
-                                           i.InputRegister(2), i.InputInt32(3));
-          }
         }
         __ AssertZeroExtended(i.OutputRegister());
       } else {
@@ -1918,9 +1947,13 @@
       }
       break;
     case kX64Movsxlq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxlq);
       break;
     case kX64Movq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       if (instr->HasOutput()) {
         __ movq(i.OutputRegister(), i.MemoryOperand());
       } else {
@@ -1934,6 +1967,8 @@
       }
       break;
     case kX64Movss:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       if (instr->HasOutput()) {
         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
       } else {
@@ -1943,6 +1978,8 @@
       }
       break;
     case kX64Movsd:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       if (instr->HasOutput()) {
         __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
       } else {
@@ -2059,30 +2096,35 @@
       __ incl(i.OutputRegister());
       break;
     case kX64Push:
-      if (HasImmediateInput(instr, 0)) {
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ pushq(operand);
+        frame_access_state()->IncreaseSPDelta(1);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kPointerSize);
+      } else if (HasImmediateInput(instr, 0)) {
         __ pushq(i.InputImmediate(0));
         frame_access_state()->IncreaseSPDelta(1);
         unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
                                                          kPointerSize);
+      } else if (instr->InputAt(0)->IsRegister()) {
+        __ pushq(i.InputRegister(0));
+        frame_access_state()->IncreaseSPDelta(1);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kPointerSize);
+      } else if (instr->InputAt(0)->IsFPRegister()) {
+        // TODO(titzer): use another machine instruction?
+        __ subq(rsp, Immediate(kDoubleSize));
+        frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kDoubleSize);
+        __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
       } else {
-        if (instr->InputAt(0)->IsRegister()) {
-          __ pushq(i.InputRegister(0));
-          frame_access_state()->IncreaseSPDelta(1);
-          unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-                                                           kPointerSize);
-        } else if (instr->InputAt(0)->IsFPRegister()) {
-          // TODO(titzer): use another machine instruction?
-          __ subq(rsp, Immediate(kDoubleSize));
-          frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
-          unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-                                                           kDoubleSize);
-          __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
-        } else {
-          __ pushq(i.InputOperand(0));
-          frame_access_state()->IncreaseSPDelta(1);
-          unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-                                                           kPointerSize);
-        }
+        __ pushq(i.InputOperand(0));
+        frame_access_state()->IncreaseSPDelta(1);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kPointerSize);
       }
       break;
     case kX64Poke: {
@@ -2124,6 +2166,26 @@
       __ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
       break;
     }
+    case kX64Int32x4ReplaceLane: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      if (instr->InputAt(2)->IsRegister()) {
+        __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
+                  i.InputInt8(1));
+      } else {
+        __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+      }
+      break;
+    }
+    case kX64Int32x4Add: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+      break;
+    }
+    case kX64Int32x4Sub: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+      break;
+    }
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
       break;
@@ -2183,61 +2245,58 @@
   return kSuccess;
 }  // NOLINT(readability/fn_size)
 
+namespace {
+
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+  switch (condition) {
+    case kUnorderedEqual:
+    case kEqual:
+      return equal;
+    case kUnorderedNotEqual:
+    case kNotEqual:
+      return not_equal;
+    case kSignedLessThan:
+      return less;
+    case kSignedGreaterThanOrEqual:
+      return greater_equal;
+    case kSignedLessThanOrEqual:
+      return less_equal;
+    case kSignedGreaterThan:
+      return greater;
+    case kUnsignedLessThan:
+      return below;
+    case kUnsignedGreaterThanOrEqual:
+      return above_equal;
+    case kUnsignedLessThanOrEqual:
+      return below_equal;
+    case kUnsignedGreaterThan:
+      return above;
+    case kOverflow:
+      return overflow;
+    case kNotOverflow:
+      return no_overflow;
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return no_condition;
+}
+
+}  // namespace
 
 // Assembles branches after this instruction.
 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  X64OperandConverter i(this, instr);
   Label::Distance flabel_distance =
       branch->fallthru ? Label::kNear : Label::kFar;
   Label* tlabel = branch->true_label;
   Label* flabel = branch->false_label;
-  switch (branch->condition) {
-    case kUnorderedEqual:
-      __ j(parity_even, flabel, flabel_distance);
-    // Fall through.
-    case kEqual:
-      __ j(equal, tlabel);
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_even, tlabel);
-    // Fall through.
-    case kNotEqual:
-      __ j(not_equal, tlabel);
-      break;
-    case kSignedLessThan:
-      __ j(less, tlabel);
-      break;
-    case kSignedGreaterThanOrEqual:
-      __ j(greater_equal, tlabel);
-      break;
-    case kSignedLessThanOrEqual:
-      __ j(less_equal, tlabel);
-      break;
-    case kSignedGreaterThan:
-      __ j(greater, tlabel);
-      break;
-    case kUnsignedLessThan:
-      __ j(below, tlabel);
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      __ j(above_equal, tlabel);
-      break;
-    case kUnsignedLessThanOrEqual:
-      __ j(below_equal, tlabel);
-      break;
-    case kUnsignedGreaterThan:
-      __ j(above, tlabel);
-      break;
-    case kOverflow:
-      __ j(overflow, tlabel);
-      break;
-    case kNotOverflow:
-      __ j(no_overflow, tlabel);
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (branch->condition == kUnorderedEqual) {
+    __ j(parity_even, flabel, flabel_distance);
+  } else if (branch->condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
   }
+  __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
   if (!branch->fallthru) __ jmp(flabel, flabel_distance);
 }
 
@@ -2246,6 +2305,73 @@
   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      X64OperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        __ PrepareCallCFunction(0);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ ud2();
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_even, &end);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
+  }
+  __ j(FlagsConditionToCondition(condition), tlabel);
+  __ bind(&end);
+}
 
 // Assembles boolean materializations after this instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2258,60 +2384,17 @@
   Label check;
   DCHECK_NE(0u, instr->OutputCount());
   Register reg = i.OutputRegister(instr->OutputCount() - 1);
-  Condition cc = no_condition;
-  switch (condition) {
-    case kUnorderedEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ movl(reg, Immediate(0));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kEqual:
-      cc = equal;
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ movl(reg, Immediate(1));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kNotEqual:
-      cc = not_equal;
-      break;
-    case kSignedLessThan:
-      cc = less;
-      break;
-    case kSignedGreaterThanOrEqual:
-      cc = greater_equal;
-      break;
-    case kSignedLessThanOrEqual:
-      cc = less_equal;
-      break;
-    case kSignedGreaterThan:
-      cc = greater;
-      break;
-    case kUnsignedLessThan:
-      cc = below;
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      cc = above_equal;
-      break;
-    case kUnsignedLessThanOrEqual:
-      cc = below_equal;
-      break;
-    case kUnsignedGreaterThan:
-      cc = above;
-      break;
-    case kOverflow:
-      cc = overflow;
-      break;
-    case kNotOverflow:
-      cc = no_overflow;
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ movl(reg, Immediate(0));
+    __ jmp(&done, Label::kNear);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ movl(reg, Immediate(1));
+    __ jmp(&done, Label::kNear);
   }
   __ bind(&check);
-  __ setcc(cc, reg);
+  __ setcc(FlagsConditionToCondition(condition), reg);
   __ movzxbl(reg, reg);
   __ bind(&done);
 }
@@ -2344,13 +2427,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2555,8 +2641,7 @@
                                                : kScratchRegister;
       switch (src.type()) {
         case Constant::kInt32: {
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ movq(dst, src.ToInt64(), src.rmode());
           } else {
             // TODO(dcarney): don't need scratch in this case.
@@ -2564,7 +2649,7 @@
             if (value == 0) {
               __ xorl(dst, dst);
             } else {
-              if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+              if (RelocInfo::IsWasmSizeReference(src.rmode())) {
                 __ movl(dst, Immediate(value, src.rmode()));
               } else {
                 __ movl(dst, Immediate(value));
@@ -2574,11 +2659,10 @@
           break;
         }
         case Constant::kInt64:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ movq(dst, src.ToInt64(), src.rmode());
           } else {
-            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+            DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
             __ Set(dst, src.ToInt64());
           }
           break;
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 35acec0..aad1727 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -128,7 +128,6 @@
   V(X64Movzxwq)                    \
   V(X64Movw)                       \
   V(X64Movl)                       \
-  V(X64TrapMovl)                   \
   V(X64Movsxlq)                    \
   V(X64Movq)                       \
   V(X64Movsd)                      \
@@ -148,7 +147,10 @@
   V(X64Xchgw)                      \
   V(X64Xchgl)                      \
   V(X64Int32x4Create)              \
-  V(X64Int32x4ExtractLane)
+  V(X64Int32x4ExtractLane)         \
+  V(X64Int32x4ReplaceLane)         \
+  V(X64Int32x4Add)                 \
+  V(X64Int32x4Sub)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
@@ -183,6 +185,8 @@
   V(M8I)  /* [      %r2*8 + K] */      \
   V(Root) /* [%root       + K] */
 
+enum X64MemoryProtection { kUnprotected = 0, kProtected = 1 };
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index ef0c3ad..427e580 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -125,6 +125,9 @@
     case kX64Inc32:
     case kX64Int32x4Create:
     case kX64Int32x4ExtractLane:
+    case kX64Int32x4ReplaceLane:
+    case kX64Int32x4Add:
+    case kX64Int32x4Sub:
       return (instr->addressing_mode() == kMode_None)
           ? kNoOpcodeFlags
           : kIsLoadOperation | kHasSideEffect;
@@ -155,7 +158,6 @@
       return kHasSideEffect;
 
     case kX64Movl:
-    case kX64TrapMovl:
       if (instr->HasOutput()) {
         DCHECK(instr->InputCount() >= 1);
         return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 878e778..7abdd90 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -58,6 +58,7 @@
     MachineRepresentation rep =
         LoadRepresentationOf(input->op()).representation();
     switch (opcode) {
+      case kX64Push:
       case kX64Cmp:
       case kX64Test:
         return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
@@ -82,6 +83,15 @@
                                              InstructionOperand inputs[],
                                              size_t* input_count) {
     AddressingMode mode = kMode_MRI;
+    if (base != nullptr && (index != nullptr || displacement != nullptr)) {
+      if (base->opcode() == IrOpcode::kInt32Constant &&
+          OpParameter<int32_t>(base) == 0) {
+        base = nullptr;
+      } else if (base->opcode() == IrOpcode::kInt64Constant &&
+                 OpParameter<int64_t>(base) == 0) {
+        base = nullptr;
+      }
+    }
     if (base != nullptr) {
       inputs[(*input_count)++] = UseRegister(base);
       if (index != nullptr) {
@@ -110,17 +120,22 @@
         }
       }
     } else {
-      DCHECK_NOT_NULL(index);
       DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
-      inputs[(*input_count)++] = UseRegister(index);
       if (displacement != nullptr) {
-        inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
-                                       ? UseNegatedImmediate(displacement)
-                                       : UseImmediate(displacement);
-        static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
-                                                    kMode_M4I, kMode_M8I};
-        mode = kMnI_modes[scale_exponent];
+        if (index == nullptr) {
+          inputs[(*input_count)++] = UseRegister(displacement);
+          mode = kMode_MR;
+        } else {
+          inputs[(*input_count)++] = UseRegister(index);
+          inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+                                         ? UseNegatedImmediate(displacement)
+                                         : UseImmediate(displacement);
+          static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+                                                      kMode_M4I, kMode_M8I};
+          mode = kMnI_modes[scale_exponent];
+        }
       } else {
+        inputs[(*input_count)++] = UseRegister(index);
         static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
                                                    kMode_M4, kMode_M8};
         mode = kMn_modes[scale_exponent];
@@ -154,10 +169,18 @@
     }
     BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
     DCHECK(m.matches());
-    if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
+    if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
       return GenerateMemoryOperandInputs(
           m.index(), m.scale(), m.base(), m.displacement(),
           m.displacement_mode(), inputs, input_count);
+    } else if (m.base() == nullptr &&
+               m.displacement_mode() == kPositiveDisplacement) {
+      // The displacement cannot be an immediate, but we can use the
+      // displacement as base instead and still benefit from addressing
+      // modes for the scale.
+      return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(),
+                                         nullptr, m.displacement_mode(), inputs,
+                                         input_count);
     } else {
       inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
       inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
@@ -171,7 +194,6 @@
 };
 
 namespace {
-
 ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
   ArchOpcode opcode = kArchNop;
   switch (load_rep.representation()) {
@@ -198,6 +220,9 @@
       opcode = kX64Movq;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       break;
@@ -205,6 +230,42 @@
   return opcode;
 }
 
+ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
+  switch (store_rep.representation()) {
+    case MachineRepresentation::kFloat32:
+      return kX64Movss;
+      break;
+    case MachineRepresentation::kFloat64:
+      return kX64Movsd;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      return kX64Movb;
+      break;
+    case MachineRepresentation::kWord16:
+      return kX64Movw;
+      break;
+    case MachineRepresentation::kWord32:
+      return kX64Movl;
+      break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+    case MachineRepresentation::kWord64:
+      return kX64Movq;
+      break;
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return kArchNop;
+  }
+  UNREACHABLE();
+  return kArchNop;
+}
+
 }  // namespace
 
 void InstructionSelector::VisitLoad(Node* node) {
@@ -214,33 +275,21 @@
   ArchOpcode opcode = GetLoadOpcode(load_rep);
   InstructionOperand outputs[1];
   outputs[0] = g.DefineAsRegister(node);
-  InstructionOperand inputs[3];
-  size_t input_count = 0;
-  AddressingMode mode =
-      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
-  InstructionCode code = opcode | AddressingModeField::encode(mode);
-  Emit(code, 1, outputs, input_count, inputs);
-}
-
-void InstructionSelector::VisitProtectedLoad(Node* node) {
-  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
-  X64OperandGenerator g(this);
-
-  ArchOpcode opcode = GetLoadOpcode(load_rep);
-  InstructionOperand outputs[1];
-  outputs[0] = g.DefineAsRegister(node);
   InstructionOperand inputs[4];
   size_t input_count = 0;
   AddressingMode mode =
       g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
-  // Add the context parameter as an input.
-  inputs[input_count++] = g.UseUniqueRegister(node->InputAt(2));
-  // Add the source position as an input
-  inputs[input_count++] = g.UseImmediate(node->InputAt(3));
   InstructionCode code = opcode | AddressingModeField::encode(mode);
+  if (node->opcode() == IrOpcode::kProtectedLoad) {
+    code |= MiscField::encode(X64MemoryProtection::kProtected);
+    // Add the source position as an input
+    inputs[input_count++] = g.UseImmediate(node->InputAt(2));
+  }
   Emit(code, 1, outputs, input_count, inputs);
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
+
 void InstructionSelector::VisitStore(Node* node) {
   X64OperandGenerator g(this);
   Node* base = node->InputAt(0);
@@ -249,10 +298,9 @@
 
   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
-  MachineRepresentation rep = store_rep.representation();
 
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK(CanBeTaggedPointer(rep));
+    DCHECK(CanBeTaggedPointer(store_rep.representation()));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
@@ -287,41 +335,18 @@
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
-    ArchOpcode opcode = kArchNop;
-    switch (rep) {
-      case MachineRepresentation::kFloat32:
-        opcode = kX64Movss;
-        break;
-      case MachineRepresentation::kFloat64:
-        opcode = kX64Movsd;
-        break;
-      case MachineRepresentation::kBit:  // Fall through.
-      case MachineRepresentation::kWord8:
-        opcode = kX64Movb;
-        break;
-      case MachineRepresentation::kWord16:
-        opcode = kX64Movw;
-        break;
-      case MachineRepresentation::kWord32:
-        opcode = kX64Movl;
-        break;
-      case MachineRepresentation::kTaggedSigned:   // Fall through.
-      case MachineRepresentation::kTaggedPointer:  // Fall through.
-      case MachineRepresentation::kTagged:  // Fall through.
-      case MachineRepresentation::kWord64:
-        opcode = kX64Movq;
-        break;
-      case MachineRepresentation::kSimd128:  // Fall through.
-      case MachineRepresentation::kNone:
-        UNREACHABLE();
-        return;
-    }
+    ArchOpcode opcode = GetStoreOpcode(store_rep);
     InstructionOperand inputs[4];
     size_t input_count = 0;
     AddressingMode addressing_mode =
         g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
     InstructionCode code =
         opcode | AddressingModeField::encode(addressing_mode);
+    if ((ElementSizeLog2Of(store_rep.representation()) < kPointerSizeLog2) &&
+        (value->opcode() == IrOpcode::kTruncateInt64ToInt32) &&
+        CanCover(node, value)) {
+      value = value->InputAt(0);
+    }
     InstructionOperand value_operand =
         g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
     inputs[input_count++] = value_operand;
@@ -330,6 +355,27 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  X64OperandGenerator g(this);
+  Node* value = node->InputAt(2);
+  Node* position = node->InputAt(3);
+
+  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+
+  ArchOpcode opcode = GetStoreOpcode(store_rep);
+  InstructionOperand inputs[5];
+  size_t input_count = 0;
+  AddressingMode addressing_mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+                         MiscField::encode(X64MemoryProtection::kProtected);
+  InstructionOperand value_operand =
+      g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+  inputs[input_count++] = value_operand;
+  inputs[input_count++] = g.UseImmediate(position);
+  Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -364,6 +410,9 @@
       break;
     case MachineRepresentation::kBit:      // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kTaggedSigned:   // Fall through.
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
@@ -419,6 +468,9 @@
       break;
     case MachineRepresentation::kBit:      // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kTaggedSigned:   // Fall through.
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
@@ -502,7 +554,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -796,31 +848,6 @@
   VisitWord64Shift(this, node, kX64Ror);
 }
 
-
-void InstructionSelector::VisitWord64Clz(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord64Ctz(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord32Ctz(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Tzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
 
@@ -830,18 +857,6 @@
 
 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord64Popcnt(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitInt32Add(Node* node) {
   X64OperandGenerator g(this);
 
@@ -1064,55 +1079,6 @@
   VisitMulHigh(this, node, kX64UmulHigh32);
 }
 
-
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ToUint32 | MiscField::encode(1), g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ToUint32 | MiscField::encode(0), g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   X64OperandGenerator g(this);
   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1330,16 +1296,65 @@
 
 }  // namespace
 
+#define RO_OP_LIST(V)                                                    \
+  V(Word64Clz, kX64Lzcnt)                                                \
+  V(Word32Clz, kX64Lzcnt32)                                              \
+  V(Word64Ctz, kX64Tzcnt)                                                \
+  V(Word32Ctz, kX64Tzcnt32)                                              \
+  V(Word64Popcnt, kX64Popcnt)                                            \
+  V(Word32Popcnt, kX64Popcnt32)                                          \
+  V(Float64Sqrt, kSSEFloat64Sqrt)                                        \
+  V(Float32Sqrt, kSSEFloat32Sqrt)                                        \
+  V(ChangeFloat64ToInt32, kSSEFloat64ToInt32)                            \
+  V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1))   \
+  V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
+  V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32)                      \
+  V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64)                        \
+  V(TruncateFloat32ToInt32, kSSEFloat32ToInt32)                          \
+  V(TruncateFloat32ToUint32, kSSEFloat32ToUint32)                        \
+  V(ChangeInt32ToFloat64, kSSEInt32ToFloat64)                            \
+  V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)                          \
+  V(RoundFloat64ToInt32, kSSEFloat64ToInt32)                             \
+  V(RoundInt32ToFloat32, kSSEInt32ToFloat32)                             \
+  V(RoundInt64ToFloat32, kSSEInt64ToFloat32)                             \
+  V(RoundInt64ToFloat64, kSSEInt64ToFloat64)                             \
+  V(RoundUint32ToFloat32, kSSEUint32ToFloat32)                           \
+  V(BitcastFloat32ToInt32, kX64BitcastFI)                                \
+  V(BitcastFloat64ToInt64, kX64BitcastDL)                                \
+  V(BitcastInt32ToFloat32, kX64BitcastIF)                                \
+  V(BitcastInt64ToFloat64, kX64BitcastLD)                                \
+  V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32)                \
+  V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
 
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToFloat32);
-}
+#define RR_OP_LIST(V)                                                         \
+  V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown))       \
+  V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown))       \
+  V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp))           \
+  V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp))           \
+  V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
+  V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
+  V(Float32RoundTiesEven,                                                     \
+    kSSEFloat32Round | MiscField::encode(kRoundToNearest))                    \
+  V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
+
+#define RO_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRO(this, node, opcode);                      \
+  }
+RO_OP_LIST(RO_VISITOR)
+#undef RO_VISITOR
+
+#define RR_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, node, opcode);                      \
+  }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
 
 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
   VisitRR(this, node, kArchTruncateDoubleToI);
 }
 
-
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   X64OperandGenerator g(this);
   Node* value = node->InputAt(0);
@@ -1365,34 +1380,6 @@
   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
 }
 
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToInt32);
-}
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEInt32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEInt64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
   X64OperandGenerator g(this);
   InstructionOperand temps[] = {g.TempRegister()};
@@ -1408,31 +1395,6 @@
        arraysize(temps), temps);
 }
 
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitFloat32Add(Node* node) {
   VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
 }
@@ -1457,10 +1419,6 @@
 }
 
 
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRO(this, node, kSSEFloat32Sqrt);
-}
-
 void InstructionSelector::VisitFloat32Max(Node* node) {
   VisitRRO(this, node, kSSEFloat32Max);
 }
@@ -1511,55 +1469,12 @@
   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
 }
 
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  VisitRO(this, node, kSSEFloat64Sqrt);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
-}
-
 
 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
   UNREACHABLE();
 }
 
 
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
-}
-
 void InstructionSelector::VisitFloat32Neg(Node* node) {
   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
 }
@@ -1607,17 +1522,29 @@
     }
   } else {
     // Push any stack arguments.
+    int effect_level = GetEffectLevel(node);
     for (PushParameter input : base::Reversed(*arguments)) {
-      // TODO(titzer): X64Push cannot handle stack->stack double moves
-      // because there is no way to encode fixed double slots.
-      InstructionOperand value =
-          g.CanBeImmediate(input.node())
-              ? g.UseImmediate(input.node())
-              : IsSupported(ATOM) ||
-                        sequence()->IsFP(GetVirtualRegister(input.node()))
-                    ? g.UseRegister(input.node())
-                    : g.Use(input.node());
-      Emit(kX64Push, g.NoOutput(), value);
+      Node* input_node = input.node();
+      if (g.CanBeImmediate(input_node)) {
+        Emit(kX64Push, g.NoOutput(), g.UseImmediate(input_node));
+      } else if (IsSupported(ATOM) ||
+                 sequence()->IsFP(GetVirtualRegister(input_node))) {
+        // TODO(titzer): X64Push cannot handle stack->stack double moves
+        // because there is no way to encode fixed double slots.
+        Emit(kX64Push, g.NoOutput(), g.UseRegister(input_node));
+      } else if (g.CanBeMemoryOperand(kX64Push, node, input_node,
+                                      effect_level)) {
+        InstructionOperand outputs[1];
+        InstructionOperand inputs[4];
+        size_t input_count = 0;
+        InstructionCode opcode = kX64Push;
+        AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+            input_node, inputs, &input_count);
+        opcode |= AddressingModeField::encode(mode);
+        Emit(opcode, 0, outputs, input_count, inputs);
+      } else {
+        Emit(kX64Push, g.NoOutput(), g.Use(input_node));
+      }
     }
   }
 }
@@ -1649,11 +1576,14 @@
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     InstructionOperand output = g.DefineAsRegister(cont->result());
     selector->Emit(opcode, 1, &output, input_count, inputs);
+  } else {
+    DCHECK(cont->IsTrap());
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
   }
 }
 
@@ -1667,11 +1597,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1687,21 +1620,54 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+  if (hint_node->opcode() == IrOpcode::kLoad) {
+    MachineType hint = LoadRepresentationOf(hint_node->op());
+    if (node->opcode() == IrOpcode::kInt32Constant ||
+        node->opcode() == IrOpcode::kInt64Constant) {
+      int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+                             ? OpParameter<int32_t>(node)
+                             : OpParameter<int64_t>(node);
+      if (hint == MachineType::Int8()) {
+        if (constant >= std::numeric_limits<int8_t>::min() &&
+            constant <= std::numeric_limits<int8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint8()) {
+        if (constant >= std::numeric_limits<uint8_t>::min() &&
+            constant <= std::numeric_limits<uint8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int16()) {
+        if (constant >= std::numeric_limits<int16_t>::min() &&
+            constant <= std::numeric_limits<int16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint16()) {
+        if (constant >= std::numeric_limits<uint16_t>::min() &&
+            constant <= std::numeric_limits<uint16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int32()) {
+        return hint;
+      } else if (hint == MachineType::Uint32()) {
+        if (constant >= 0) return hint;
+      }
+    }
+  }
+  return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+                                           : MachineType::None();
+}
+
 // Tries to match the size of the given opcode to that of the operands, if
 // possible.
 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
                                     Node* right, FlagsContinuation* cont) {
-  // Currently, if one of the two operands is not a Load, we don't know what its
-  // machine representation is, so we bail out.
-  // TODO(epertoso): we can probably get some size information out of immediates
-  // and phi nodes.
-  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
-    return opcode;
-  }
+  // TODO(epertoso): we can probably get some size information out phi nodes.
   // If the load representations don't match, both operands will be
   // zero/sign-extended to 32bit.
-  MachineType left_type = LoadRepresentationOf(left->op());
-  MachineType right_type = LoadRepresentationOf(right->op());
+  MachineType left_type = MachineTypeForNarrow(left, right);
+  MachineType right_type = MachineTypeForNarrow(right, left);
   if (left_type == right_type) {
     switch (left_type.representation()) {
       case MachineRepresentation::kBit:
@@ -1775,11 +1741,6 @@
                                          g.UseRegister(right), cont);
   }
 
-  if (g.CanBeBetterLeftOperand(right)) {
-    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    std::swap(left, right);
-  }
-
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1824,11 +1785,13 @@
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
       } else if (cont->IsDeoptimize()) {
-        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
-                                 cont->frame_state());
-      } else {
-        DCHECK(cont->IsSet());
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
+                                 cont->reason(), cont->frame_state());
+      } else if (cont->IsSet()) {
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+      } else {
+        DCHECK(cont->IsTrap());
+        selector->Emit(opcode, g.NoOutput(), g.UseImmediate(cont->trap_id()));
       }
       return;
     }
@@ -2001,12 +1964,8 @@
         break;
       case IrOpcode::kInt32Sub:
         return VisitWordCompare(selector, value, kX64Cmp32, cont);
-      case IrOpcode::kInt64Sub:
-        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kWord32And:
         return VisitWordCompare(selector, value, kX64Test32, cont);
-      case IrOpcode::kWord64And:
-        return VisitWordCompare(selector, value, kX64Test, cont);
       default:
         break;
     }
@@ -2025,14 +1984,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -2072,32 +2046,7 @@
   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(user);
   if (m.right().Is(0)) {
-    Node* value = m.left().node();
-
-    // Try to combine with comparisons against 0 by simply inverting the branch.
-    while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
-      Int32BinopMatcher m(value);
-      if (m.right().Is(0)) {
-        user = value;
-        value = m.left().node();
-        cont.Negate();
-      } else {
-        break;
-      }
-    }
-
-    // Try to combine the branch with a comparison.
-    if (CanCover(user, value)) {
-      switch (value->opcode()) {
-        case IrOpcode::kInt32Sub:
-          return VisitWordCompare(this, value, kX64Cmp32, &cont);
-        case IrOpcode::kWord32And:
-          return VisitWordCompare(this, value, kX64Test32, &cont);
-        default:
-          break;
-      }
-    }
-    return VisitCompareZero(this, value, kX64Cmp32, &cont);
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   }
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
@@ -2250,21 +2199,6 @@
   VisitFloat64Compare(this, node, &cont);
 }
 
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   X64OperandGenerator g(this);
   Node* left = node->InputAt(0);
@@ -2347,8 +2281,29 @@
 
 void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
   X64OperandGenerator g(this);
+  int32_t lane = OpParameter<int32_t>(node);
   Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
+       g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+}
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+  X64OperandGenerator g(this);
+  int32_t lane = OpParameter<int32_t>(node);
+  Emit(kX64Int32x4ReplaceLane, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+       g.Use(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Int32x4Add, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Int32x4Sub, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
 // static
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index d2f64e8..fc5992a 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -60,9 +60,7 @@
   Immediate ToImmediate(InstructionOperand* operand) {
     Constant constant = ToConstant(operand);
     if (constant.type() == Constant::kInt32 &&
-        (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-         constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-         constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+        RelocInfo::IsWasmReference(constant.rmode())) {
       return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
                        constant.rmode());
     }
@@ -738,10 +736,8 @@
       __ fild_s(MemOperand(esp, 0));
       __ lea(esp, Operand(esp, kPointerSize));
 
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -996,10 +992,10 @@
       } else {
         __ add(i.OutputRegister(0), i.InputRegister(2));
       }
-      __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
       if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
         __ Move(i.OutputRegister(1), i.InputRegister(1));
       }
+      __ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
       if (use_temp) {
         __ Move(i.OutputRegister(0), i.TempRegister(0));
       }
@@ -1021,10 +1017,10 @@
       } else {
         __ sub(i.OutputRegister(0), i.InputRegister(2));
       }
-      __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
       if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
         __ Move(i.OutputRegister(1), i.InputRegister(1));
       }
+      __ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
       if (use_temp) {
         __ Move(i.OutputRegister(0), i.TempRegister(0));
       }
@@ -2030,10 +2026,55 @@
   return kSuccess;
 }  // NOLINT(readability/fn_size)
 
+static Condition FlagsConditionToCondition(FlagsCondition condition) {
+  switch (condition) {
+    case kUnorderedEqual:
+    case kEqual:
+      return equal;
+      break;
+    case kUnorderedNotEqual:
+    case kNotEqual:
+      return not_equal;
+      break;
+    case kSignedLessThan:
+      return less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      return greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      return less_equal;
+      break;
+    case kSignedGreaterThan:
+      return greater;
+      break;
+    case kUnsignedLessThan:
+      return below;
+      break;
+    case kUnsignedGreaterThanOrEqual:
+      return above_equal;
+      break;
+    case kUnsignedLessThanOrEqual:
+      return below_equal;
+      break;
+    case kUnsignedGreaterThan:
+      return above;
+      break;
+    case kOverflow:
+      return overflow;
+      break;
+    case kNotOverflow:
+      return no_overflow;
+      break;
+    default:
+      UNREACHABLE();
+      return no_condition;
+      break;
+  }
+}
 
 // Assembles a branch after an instruction.
 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  X87OperandConverter i(this, instr);
   Label::Distance flabel_distance =
       branch->fallthru ? Label::kNear : Label::kFar;
 
@@ -2046,53 +2087,13 @@
   Label* tlabel_dst = branch->true_label;
   Label* flabel_dst = branch->false_label;
 
-  switch (branch->condition) {
-    case kUnorderedEqual:
-      __ j(parity_even, flabel, flabel_distance);
-    // Fall through.
-    case kEqual:
-      __ j(equal, tlabel);
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_even, tlabel);
-    // Fall through.
-    case kNotEqual:
-      __ j(not_equal, tlabel);
-      break;
-    case kSignedLessThan:
-      __ j(less, tlabel);
-      break;
-    case kSignedGreaterThanOrEqual:
-      __ j(greater_equal, tlabel);
-      break;
-    case kSignedLessThanOrEqual:
-      __ j(less_equal, tlabel);
-      break;
-    case kSignedGreaterThan:
-      __ j(greater, tlabel);
-      break;
-    case kUnsignedLessThan:
-      __ j(below, tlabel);
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      __ j(above_equal, tlabel);
-      break;
-    case kUnsignedLessThanOrEqual:
-      __ j(below_equal, tlabel);
-      break;
-    case kUnsignedGreaterThan:
-      __ j(above, tlabel);
-      break;
-    case kOverflow:
-      __ j(overflow, tlabel);
-      break;
-    case kNotOverflow:
-      __ j(no_overflow, tlabel);
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (branch->condition == kUnorderedEqual) {
+    __ j(parity_even, flabel, flabel_distance);
+  } else if (branch->condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
   }
+  __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
   // Add a jump if not falling through to the next block.
   if (!branch->fallthru) __ jmp(flabel);
 
@@ -2130,6 +2131,71 @@
   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      X87OperandConverter i(gen_, instr_);
+
+      Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+          i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        __ set_has_frame(old_has_frame);
+      }
+      if (FLAG_debug_code) {
+        __ ud2();
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+      if (trap_id == Runtime::kNumFunctions) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        __ PrepareCallCFunction(0, esi);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+      } else {
+        __ Move(esi, isolate()->native_context());
+        gen_->AssembleSourcePosition(instr_);
+        __ CallRuntime(trap_id);
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_even, &end);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
+  }
+  __ j(FlagsConditionToCondition(condition), tlabel);
+  __ bind(&end);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2142,58 +2208,17 @@
   Label check;
   DCHECK_NE(0u, instr->OutputCount());
   Register reg = i.OutputRegister(instr->OutputCount() - 1);
-  Condition cc = no_condition;
-  switch (condition) {
-    case kUnorderedEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ Move(reg, Immediate(0));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kEqual:
-      cc = equal;
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ mov(reg, Immediate(1));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kNotEqual:
-      cc = not_equal;
-      break;
-    case kSignedLessThan:
-      cc = less;
-      break;
-    case kSignedGreaterThanOrEqual:
-      cc = greater_equal;
-      break;
-    case kSignedLessThanOrEqual:
-      cc = less_equal;
-      break;
-    case kSignedGreaterThan:
-      cc = greater;
-      break;
-    case kUnsignedLessThan:
-      cc = below;
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      cc = above_equal;
-      break;
-    case kUnsignedLessThanOrEqual:
-      cc = below_equal;
-      break;
-    case kUnsignedGreaterThan:
-      cc = above;
-      break;
-    case kOverflow:
-      cc = overflow;
-      break;
-    case kNotOverflow:
-      cc = no_overflow;
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ Move(reg, Immediate(0));
+    __ jmp(&done, Label::kNear);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ mov(reg, Immediate(1));
+    __ jmp(&done, Label::kNear);
   }
+  Condition cc = FlagsConditionToCondition(condition);
+
   __ bind(&check);
   if (reg.is_byte_register()) {
     // setcc for byte registers (al, bl, cl, dl).
@@ -2238,13 +2263,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2560,7 +2588,7 @@
       __ Move(dst, g.ToImmediate(source));
     } else if (src_constant.type() == Constant::kFloat32) {
       // TODO(turbofan): Can we do better here?
-      uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
+      uint32_t src = src_constant.ToFloat32AsInt();
       if (destination->IsFPRegister()) {
         __ sub(esp, Immediate(kInt32Size));
         __ mov(MemOperand(esp, 0), Immediate(src));
@@ -2575,7 +2603,7 @@
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src_constant.type());
-      uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+      uint64_t src = src_constant.ToFloat64AsInt();
       uint32_t lower = static_cast<uint32_t>(src);
       uint32_t upper = static_cast<uint32_t>(src >> 32);
       if (destination->IsFPRegister()) {
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index a737d1e..ede0d45 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -195,6 +195,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -285,6 +288,9 @@
         break;
       case MachineRepresentation::kWord64:   // Fall through.
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -312,6 +318,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -347,6 +358,9 @@
     case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord64:         // Fall through.
     case MachineRepresentation::kSimd128:        // Fall through.
+    case MachineRepresentation::kSimd1x4:        // Fall through.
+    case MachineRepresentation::kSimd1x8:        // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -396,6 +410,9 @@
     case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord64:         // Fall through.
     case MachineRepresentation::kSimd128:        // Fall through.
+    case MachineRepresentation::kSimd1x4:        // Fall through.
+    case MachineRepresentation::kSimd1x8:        // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -476,7 +493,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -1218,11 +1235,14 @@
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     InstructionOperand output = g.DefineAsRegister(cont->result());
     selector->Emit(opcode, 1, &output, input_count, inputs);
+  } else {
+    DCHECK(cont->IsTrap());
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
   }
 }
 
@@ -1236,11 +1256,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1256,21 +1279,54 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+  if (hint_node->opcode() == IrOpcode::kLoad) {
+    MachineType hint = LoadRepresentationOf(hint_node->op());
+    if (node->opcode() == IrOpcode::kInt32Constant ||
+        node->opcode() == IrOpcode::kInt64Constant) {
+      int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+                             ? OpParameter<int32_t>(node)
+                             : OpParameter<int64_t>(node);
+      if (hint == MachineType::Int8()) {
+        if (constant >= std::numeric_limits<int8_t>::min() &&
+            constant <= std::numeric_limits<int8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint8()) {
+        if (constant >= std::numeric_limits<uint8_t>::min() &&
+            constant <= std::numeric_limits<uint8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int16()) {
+        if (constant >= std::numeric_limits<int16_t>::min() &&
+            constant <= std::numeric_limits<int16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint16()) {
+        if (constant >= std::numeric_limits<uint16_t>::min() &&
+            constant <= std::numeric_limits<uint16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int32()) {
+        return hint;
+      } else if (hint == MachineType::Uint32()) {
+        if (constant >= 0) return hint;
+      }
+    }
+  }
+  return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+                                           : MachineType::None();
+}
+
 // Tries to match the size of the given opcode to that of the operands, if
 // possible.
 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
                                     Node* right, FlagsContinuation* cont) {
-  // Currently, if one of the two operands is not a Load, we don't know what its
-  // machine representation is, so we bail out.
-  // TODO(epertoso): we can probably get some size information out of immediates
-  // and phi nodes.
-  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
-    return opcode;
-  }
+  // TODO(epertoso): we can probably get some size information out of phi nodes.
   // If the load representations don't match, both operands will be
   // zero/sign-extended to 32bit.
-  MachineType left_type = LoadRepresentationOf(left->op());
-  MachineType right_type = LoadRepresentationOf(right->op());
+  MachineType left_type = MachineTypeForNarrow(left, right);
+  MachineType right_type = MachineTypeForNarrow(right, left);
   if (left_type == right_type) {
     switch (left_type.representation()) {
       case MachineRepresentation::kBit:
@@ -1316,11 +1372,14 @@
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
                              g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(cont->Encode(kX87Float32Cmp),
                    g.DefineAsByteRegister(cont->result()));
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1337,11 +1396,14 @@
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
                              g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(cont->Encode(kX87Float64Cmp),
                    g.DefineAsByteRegister(cont->result()));
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1372,10 +1434,8 @@
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
-      // TODO(epertoso): we should use `narrowed_opcode' here once we match
-      // immediates too.
-      return VisitCompareWithMemoryOperand(selector, opcode, left,
+    if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
+      return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
                                            g.UseImmediate(right), cont);
     }
     return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
@@ -1417,8 +1477,8 @@
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
       } else if (cont->IsDeoptimize()) {
-        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
-                                 cont->frame_state());
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
+                                 cont->reason(), cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1531,14 +1591,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
diff --git a/src/contexts-inl.h b/src/contexts-inl.h
index ce2c97b..1c7392f 100644
--- a/src/contexts-inl.h
+++ b/src/contexts-inl.h
@@ -7,6 +7,7 @@
 
 #include "src/contexts.h"
 #include "src/objects-inl.h"
+#include "src/objects/regexp-match-info.h"
 
 namespace v8 {
 namespace internal {
@@ -118,12 +119,19 @@
   return map == map->GetHeap()->module_context_map();
 }
 
+bool Context::IsEvalContext() {
+  Map* map = this->map();
+  return map == map->GetHeap()->eval_context_map();
+}
 
 bool Context::IsScriptContext() {
   Map* map = this->map();
   return map == map->GetHeap()->script_context_map();
 }
 
+bool Context::OptimizedCodeMapIsCleared() {
+  return osr_code_table() == GetHeap()->empty_fixed_array();
+}
 
 bool Context::HasSameSecurityTokenAs(Context* that) {
   return this->native_context()->security_token() ==
diff --git a/src/contexts.cc b/src/contexts.cc
index 012944e..e622807 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -61,6 +61,7 @@
       IsModuleContext()) {
     return true;
   }
+  if (IsEvalContext()) return closure()->shared()->language_mode() == STRICT;
   if (!IsBlockContext()) return false;
   Object* ext = extension();
   // If we have the special extension, we immediately know it must be a
@@ -74,7 +75,6 @@
   Context* current = this;
   while (!current->is_declaration_context()) {
     current = current->previous();
-    DCHECK(current->closure() == closure());
   }
   return current;
 }
@@ -82,7 +82,8 @@
 Context* Context::closure_context() {
   Context* current = this;
   while (!current->IsFunctionContext() && !current->IsScriptContext() &&
-         !current->IsModuleContext() && !current->IsNativeContext()) {
+         !current->IsModuleContext() && !current->IsNativeContext() &&
+         !current->IsEvalContext()) {
     current = current->previous();
     DCHECK(current->closure() == closure());
   }
@@ -90,7 +91,8 @@
 }
 
 JSObject* Context::extension_object() {
-  DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext());
+  DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext() ||
+         IsEvalContext());
   HeapObject* object = extension();
   if (object->IsTheHole(GetIsolate())) return nullptr;
   if (IsBlockContext()) {
@@ -103,7 +105,7 @@
 }
 
 JSReceiver* Context::extension_receiver() {
-  DCHECK(IsNativeContext() || IsWithContext() ||
+  DCHECK(IsNativeContext() || IsWithContext() || IsEvalContext() ||
          IsFunctionContext() || IsBlockContext());
   return IsWithContext() ? JSReceiver::cast(
                                ContextExtension::cast(extension())->extension())
@@ -112,7 +114,7 @@
 
 ScopeInfo* Context::scope_info() {
   DCHECK(!IsNativeContext());
-  if (IsFunctionContext() || IsModuleContext()) {
+  if (IsFunctionContext() || IsModuleContext() || IsEvalContext()) {
     return closure()->shared()->scope_info();
   }
   HeapObject* object = extension();
@@ -223,6 +225,8 @@
     }
 
     // 1. Check global objects, subjects of with, and extension objects.
+    DCHECK_IMPLIES(context->IsEvalContext(),
+                   context->extension()->IsTheHole(isolate));
     if ((context->IsNativeContext() ||
          (context->IsWithContext() && ((flags & SKIP_WITH_CONTEXT) == 0)) ||
          context->IsFunctionContext() || context->IsBlockContext()) &&
@@ -301,12 +305,10 @@
 
     // 2. Check the context proper if it has slots.
     if (context->IsFunctionContext() || context->IsBlockContext() ||
-        context->IsScriptContext()) {
+        context->IsScriptContext() || context->IsEvalContext()) {
       // Use serialized scope information of functions and blocks to search
       // for the context index.
-      Handle<ScopeInfo> scope_info(context->IsFunctionContext()
-          ? context->closure()->shared()->scope_info()
-          : context->scope_info());
+      Handle<ScopeInfo> scope_info(context->scope_info());
       VariableMode mode;
       InitializationFlag flag;
       MaybeAssignedFlag maybe_assigned_flag;
@@ -408,6 +410,162 @@
   return Handle<Object>::null();
 }
 
+static const int kSharedOffset = 0;
+static const int kCachedCodeOffset = 1;
+static const int kOsrAstIdOffset = 2;
+static const int kEntryLength = 3;
+static const int kInitialLength = kEntryLength;
+
+int Context::SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
+                                         BailoutId osr_ast_id) {
+  DisallowHeapAllocation no_gc;
+  DCHECK(this->IsNativeContext());
+  if (!OptimizedCodeMapIsCleared()) {
+    FixedArray* optimized_code_map = this->osr_code_table();
+    int length = optimized_code_map->length();
+    Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
+    for (int i = 0; i < length; i += kEntryLength) {
+      if (WeakCell::cast(optimized_code_map->get(i + kSharedOffset))->value() ==
+              shared &&
+          optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
+        return i;
+      }
+    }
+  }
+  return -1;
+}
+
+Code* Context::SearchOptimizedCodeMap(SharedFunctionInfo* shared,
+                                      BailoutId osr_ast_id) {
+  DCHECK(this->IsNativeContext());
+  int entry = SearchOptimizedCodeMapEntry(shared, osr_ast_id);
+  if (entry != -1) {
+    FixedArray* code_map = osr_code_table();
+    DCHECK_LE(entry + kEntryLength, code_map->length());
+    WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
+    return cell->cleared() ? nullptr : Code::cast(cell->value());
+  }
+  return nullptr;
+}
+
+void Context::AddToOptimizedCodeMap(Handle<Context> native_context,
+                                    Handle<SharedFunctionInfo> shared,
+                                    Handle<Code> code,
+                                    BailoutId osr_ast_id) {
+  DCHECK(native_context->IsNativeContext());
+  Isolate* isolate = native_context->GetIsolate();
+  if (isolate->serializer_enabled()) return;
+
+  STATIC_ASSERT(kEntryLength == 3);
+  Handle<FixedArray> new_code_map;
+  int entry;
+
+  if (native_context->OptimizedCodeMapIsCleared()) {
+    new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
+    entry = 0;
+  } else {
+    Handle<FixedArray> old_code_map(native_context->osr_code_table(), isolate);
+    entry = native_context->SearchOptimizedCodeMapEntry(*shared, osr_ast_id);
+    if (entry >= 0) {
+      // Just set the code of the entry.
+      Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+      old_code_map->set(entry + kCachedCodeOffset, *code_cell);
+      return;
+    }
+
+    // Can we reuse an entry?
+    DCHECK(entry < 0);
+    int length = old_code_map->length();
+    for (int i = 0; i < length; i += kEntryLength) {
+      if (WeakCell::cast(old_code_map->get(i + kSharedOffset))->cleared()) {
+        new_code_map = old_code_map;
+        entry = i;
+        break;
+      }
+    }
+
+    if (entry < 0) {
+      // Copy old optimized code map and append one new entry.
+      new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
+          old_code_map, kEntryLength, TENURED);
+      entry = old_code_map->length();
+    }
+  }
+
+  Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+  Handle<WeakCell> shared_cell = isolate->factory()->NewWeakCell(shared);
+
+  new_code_map->set(entry + kSharedOffset, *shared_cell);
+  new_code_map->set(entry + kCachedCodeOffset, *code_cell);
+  new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
+
+#ifdef DEBUG
+  for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
+    WeakCell* cell = WeakCell::cast(new_code_map->get(i + kSharedOffset));
+    DCHECK(cell->cleared() || cell->value()->IsSharedFunctionInfo());
+    cell = WeakCell::cast(new_code_map->get(i + kCachedCodeOffset));
+    DCHECK(cell->cleared() ||
+           (cell->value()->IsCode() &&
+            Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
+    DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
+  }
+#endif
+
+  FixedArray* old_code_map = native_context->osr_code_table();
+  if (old_code_map != *new_code_map) {
+    native_context->set_osr_code_table(*new_code_map);
+  }
+}
+
+void Context::EvictFromOptimizedCodeMap(Code* optimized_code,
+                                        const char* reason) {
+  DCHECK(IsNativeContext());
+  DisallowHeapAllocation no_gc;
+  if (OptimizedCodeMapIsCleared()) return;
+
+  Heap* heap = GetHeap();
+  FixedArray* code_map = osr_code_table();
+  int dst = 0;
+  int length = code_map->length();
+  for (int src = 0; src < length; src += kEntryLength) {
+    if (WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
+        optimized_code) {
+      BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
+      if (FLAG_trace_opt) {
+        PrintF(
+            "[evicting entry from native context optimizing code map (%s) for ",
+            reason);
+        ShortPrint();
+        DCHECK(!osr.IsNone());
+        PrintF(" (osr ast id %d)]\n", osr.ToInt());
+      }
+      // Evict the src entry by not copying it to the dst entry.
+      continue;
+    }
+    // Keep the src entry by copying it to the dst entry.
+    if (dst != src) {
+      code_map->set(dst + kSharedOffset, code_map->get(src + kSharedOffset));
+      code_map->set(dst + kCachedCodeOffset,
+                    code_map->get(src + kCachedCodeOffset));
+      code_map->set(dst + kOsrAstIdOffset,
+                    code_map->get(src + kOsrAstIdOffset));
+    }
+    dst += kEntryLength;
+  }
+  if (dst != length) {
+    // Always trim even when array is cleared because of heap verifier.
+    heap->RightTrimFixedArray(code_map, length - dst);
+    if (code_map->length() == 0) {
+      ClearOptimizedCodeMap();
+    }
+  }
+}
+
+void Context::ClearOptimizedCodeMap() {
+  DCHECK(IsNativeContext());
+  FixedArray* empty_fixed_array = GetHeap()->empty_fixed_array();
+  set_osr_code_table(empty_fixed_array);
+}
 
 void Context::AddOptimizedFunction(JSFunction* function) {
   DCHECK(IsNativeContext());
@@ -582,6 +740,10 @@
 
 #endif
 
+void Context::ResetErrorsThrown() {
+  DCHECK(IsNativeContext());
+  set_errors_thrown(Smi::FromInt(0));
+}
 
 void Context::IncrementErrorsThrown() {
   DCHECK(IsNativeContext());
diff --git a/src/contexts.h b/src/contexts.h
index b0b7195..7f9646b 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -11,6 +11,7 @@
 namespace v8 {
 namespace internal {
 
+class RegExpMatchInfo;
 
 enum ContextLookupFlags {
   FOLLOW_CONTEXT_CHAIN = 1 << 0,
@@ -35,6 +36,14 @@
 // Factory::NewContext.
 
 #define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)                           \
+  V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction,                      \
+    async_function_await_caught)                                        \
+  V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction,                    \
+    async_function_await_uncaught)                                      \
+  V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction,                    \
+    async_function_promise_create)                                      \
+  V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction,                   \
+    async_function_promise_release)                                     \
   V(IS_ARRAYLIKE, JSFunction, is_arraylike)                             \
   V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal)       \
   V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site)   \
@@ -43,6 +52,7 @@
   V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error)             \
   V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error)                 \
   V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error)                   \
+  V(OBJECT_CREATE, JSFunction, object_create)                           \
   V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties)     \
   V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property)         \
   V(OBJECT_FREEZE, JSFunction, object_freeze)                           \
@@ -60,60 +70,54 @@
   V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable)                 \
   V(MATH_FLOOR_INDEX, JSFunction, math_floor)                           \
   V(MATH_POW_INDEX, JSFunction, math_pow)                               \
-  V(CREATE_RESOLVING_FUNCTION_INDEX, JSFunction, create_resolving_functions)
+  V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability)   \
+  V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction,                     \
+    promise_internal_constructor)                                       \
+  V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject) \
+  V(IS_PROMISE_INDEX, JSFunction, is_promise)                           \
+  V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve)                 \
+  V(PROMISE_THEN_INDEX, JSFunction, promise_then)                       \
+  V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle)                   \
+  V(PROMISE_HANDLE_REJECT_INDEX, JSFunction, promise_handle_reject)
 
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                 \
-  V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                         \
-  V(ARRAY_POP_INDEX, JSFunction, array_pop)                               \
-  V(ARRAY_PUSH_INDEX, JSFunction, array_push)                             \
-  V(ARRAY_SHIFT_INDEX, JSFunction, array_shift)                           \
-  V(ARRAY_SPLICE_INDEX, JSFunction, array_splice)                         \
-  V(ARRAY_SLICE_INDEX, JSFunction, array_slice)                           \
-  V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift)                       \
-  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)       \
-  V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction,                        \
-    async_function_await_caught)                                          \
-  V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction,                      \
-    async_function_await_uncaught)                                        \
-  V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction,                      \
-    async_function_promise_create)                                        \
-  V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction,                     \
-    async_function_promise_release)                                       \
-  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap)                 \
-  V(ERROR_FUNCTION_INDEX, JSFunction, error_function)                     \
-  V(ERROR_TO_STRING, JSFunction, error_to_string)                         \
-  V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function)           \
-  V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                   \
-  V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete)                      \
-  V(MAP_GET_METHOD_INDEX, JSFunction, map_get)                            \
-  V(MAP_HAS_METHOD_INDEX, JSFunction, map_has)                            \
-  V(MAP_SET_METHOD_INDEX, JSFunction, map_set)                            \
-  V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance)       \
-  V(OBJECT_VALUE_OF, JSFunction, object_value_of)                         \
-  V(OBJECT_TO_STRING, JSFunction, object_to_string)                       \
-  V(PROMISE_CATCH_INDEX, JSFunction, promise_catch)                       \
-  V(PROMISE_CREATE_INDEX, JSFunction, promise_create)                     \
-  V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function)                 \
-  V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle)                     \
-  V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction,            \
-    promise_has_user_defined_reject_handler)                              \
-  V(PROMISE_DEBUG_GET_INFO_INDEX, JSFunction, promise_debug_get_info)     \
-  V(PROMISE_REJECT_INDEX, JSFunction, promise_reject)                     \
-  V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject)   \
-  V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve)                   \
-  V(PROMISE_THEN_INDEX, JSFunction, promise_then)                         \
-  V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function)         \
-  V(REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, JSFunction,                      \
-    reject_promise_no_debug_event)                                        \
-  V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
-  V(SET_ADD_METHOD_INDEX, JSFunction, set_add)                            \
-  V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete)                      \
-  V(SET_HAS_METHOD_INDEX, JSFunction, set_has)                            \
-  V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function)       \
-  V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function)           \
-  V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)             \
-  V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction,                        \
-    wasm_compile_error_function)                                          \
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                     \
+  V(ARRAY_CONCAT_INDEX, JSFunction, array_concat)                             \
+  V(ARRAY_POP_INDEX, JSFunction, array_pop)                                   \
+  V(ARRAY_PUSH_INDEX, JSFunction, array_push)                                 \
+  V(ARRAY_SHIFT_INDEX, JSFunction, array_shift)                               \
+  V(ARRAY_SPLICE_INDEX, JSFunction, array_splice)                             \
+  V(ARRAY_SLICE_INDEX, JSFunction, array_slice)                               \
+  V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift)                           \
+  V(ARRAY_ENTRIES_ITERATOR_INDEX, JSFunction, array_entries_iterator)         \
+  V(ARRAY_FOR_EACH_ITERATOR_INDEX, JSFunction, array_for_each_iterator)       \
+  V(ARRAY_KEYS_ITERATOR_INDEX, JSFunction, array_keys_iterator)               \
+  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)           \
+  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap)                     \
+  V(ERROR_FUNCTION_INDEX, JSFunction, error_function)                         \
+  V(ERROR_TO_STRING, JSFunction, error_to_string)                             \
+  V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function)               \
+  V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                       \
+  V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function)           \
+  V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete)                          \
+  V(MAP_GET_METHOD_INDEX, JSFunction, map_get)                                \
+  V(MAP_HAS_METHOD_INDEX, JSFunction, map_has)                                \
+  V(MAP_SET_METHOD_INDEX, JSFunction, map_set)                                \
+  V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance)           \
+  V(OBJECT_VALUE_OF, JSFunction, object_value_of)                             \
+  V(OBJECT_TO_STRING, JSFunction, object_to_string)                           \
+  V(PROMISE_CATCH_INDEX, JSFunction, promise_catch)                           \
+  V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function)                     \
+  V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function)             \
+  V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function)     \
+  V(SET_ADD_METHOD_INDEX, JSFunction, set_add)                                \
+  V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete)                          \
+  V(SET_HAS_METHOD_INDEX, JSFunction, set_has)                                \
+  V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function)           \
+  V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function)               \
+  V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function)                 \
+  V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction,                            \
+    wasm_compile_error_function)                                              \
+  V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function)     \
   V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, wasm_runtime_error_function)
 
 #define NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V)                               \
@@ -193,10 +197,14 @@
   V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun)                      \
   V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map)                             \
   V(ARRAY_FUNCTION_INDEX, JSFunction, array_function)                          \
+  V(ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX, Map, async_from_sync_iterator_map)     \
+  V(ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, SharedFunctionInfo,                \
+    async_function_await_reject_shared_fun)                                    \
+  V(ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN, SharedFunctionInfo,               \
+    async_function_await_resolve_shared_fun)                                   \
   V(ASYNC_FUNCTION_FUNCTION_INDEX, JSFunction, async_function_constructor)     \
-  V(BOOL16X8_FUNCTION_INDEX, JSFunction, bool16x8_function)                    \
-  V(BOOL32X4_FUNCTION_INDEX, JSFunction, bool32x4_function)                    \
-  V(BOOL8X16_FUNCTION_INDEX, JSFunction, bool8x16_function)                    \
+  V(ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN, SharedFunctionInfo,                \
+    async_iterator_value_unwrap_shared_fun)                                    \
   V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function)                      \
   V(BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX, Map,                            \
     bound_function_with_constructor_map)                                       \
@@ -207,6 +215,7 @@
   V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate)    \
   V(CALLSITE_FUNCTION_INDEX, JSFunction, callsite_function)                    \
   V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function)  \
+  V(CURRENT_MODULE_INDEX, Module, current_module)                              \
   V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map)     \
   V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun)                            \
   V(DATE_FUNCTION_INDEX, JSFunction, date_function)                            \
@@ -214,15 +223,12 @@
     error_message_for_code_gen_from_strings)                                   \
   V(ERRORS_THROWN_INDEX, Smi, errors_thrown)                                   \
   V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object)              \
-  V(EXTRAS_UTILS_OBJECT_INDEX, JSObject, extras_utils_object)                  \
+  V(EXTRAS_UTILS_OBJECT_INDEX, Object, extras_utils_object)                    \
   V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map)         \
-  V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun)                    \
-  V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function)                  \
-  V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun)                    \
   V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, FixedArray,                      \
     fast_template_instantiations_cache)                                        \
-  V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary,        \
-    slow_template_instantiations_cache)                                        \
+  V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun)                    \
+  V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun)                    \
   V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function)                    \
   V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction,                             \
     generator_function_function)                                               \
@@ -236,13 +242,17 @@
   V(INITIAL_ITERATOR_PROTOTYPE_INDEX, JSObject, initial_iterator_prototype)    \
   V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype)        \
   V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun)                        \
-  V(INT16X8_FUNCTION_INDEX, JSFunction, int16x8_function)                      \
   V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun)                        \
-  V(INT32X4_FUNCTION_INDEX, JSFunction, int32x4_function)                      \
   V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun)                          \
-  V(INT8X16_FUNCTION_INDEX, JSFunction, int8x16_function)                      \
   V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function)        \
   V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map)                       \
+  V(INTL_DATE_TIME_FORMAT_FUNCTION_INDEX, JSFunction,                          \
+    intl_date_time_format_function)                                            \
+  V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction,                             \
+    intl_number_format_function)                                               \
+  V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function)          \
+  V(INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX, JSFunction,                         \
+    intl_v8_break_iterator_function)                                           \
   V(JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX, Map,                                 \
     js_array_fast_smi_elements_map_index)                                      \
   V(JS_ARRAY_FAST_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map,                           \
@@ -263,7 +273,6 @@
   V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun)                        \
   V(MAP_CACHE_INDEX, Object, map_cache)                                        \
   V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map)                             \
-  V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map)                       \
   V(MATH_RANDOM_INDEX_INDEX, Smi, math_random_index)                           \
   V(MATH_RANDOM_CACHE_INDEX, Object, math_random_cache)                        \
   V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners)                  \
@@ -271,18 +280,28 @@
   V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache)                  \
   V(NUMBER_FUNCTION_INDEX, JSFunction, number_function)                        \
   V(OBJECT_FUNCTION_INDEX, JSFunction, object_function)                        \
-  V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map,                                  \
-    slow_object_with_null_prototype_map)                                       \
   V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map)   \
   V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function)    \
+  V(OSR_CODE_TABLE_INDEX, FixedArray, osr_code_table)                          \
   V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map)                         \
   V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map)                   \
   V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function)                          \
   V(PROXY_FUNCTION_MAP_INDEX, Map, proxy_function_map)                         \
   V(PROXY_MAP_INDEX, Map, proxy_map)                                           \
+  V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo,          \
+    promise_get_capabilities_executor_shared_fun)                              \
   V(PROMISE_RESOLVE_SHARED_FUN, SharedFunctionInfo,                            \
     promise_resolve_shared_fun)                                                \
   V(PROMISE_REJECT_SHARED_FUN, SharedFunctionInfo, promise_reject_shared_fun)  \
+  V(PROMISE_THEN_FINALLY_SHARED_FUN, SharedFunctionInfo,                       \
+    promise_then_finally_shared_fun)                                           \
+  V(PROMISE_CATCH_FINALLY_SHARED_FUN, SharedFunctionInfo,                      \
+    promise_catch_finally_shared_fun)                                          \
+  V(PROMISE_VALUE_THUNK_FINALLY_SHARED_FUN, SharedFunctionInfo,                \
+    promise_value_thunk_finally_shared_fun)                                    \
+  V(PROMISE_THROWER_FINALLY_SHARED_FUN, SharedFunctionInfo,                    \
+    promise_thrower_finally_shared_fun)                                        \
+  V(PROMISE_PROTOTYPE_MAP_INDEX, Map, promise_prototype_map)                   \
   V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function)              \
   V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function)                        \
   V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info)     \
@@ -295,7 +314,6 @@
   V(SECURITY_TOKEN_INDEX, Object, security_token)                              \
   V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell)                            \
   V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map)                             \
-  V(FIXED_ARRAY_ITERATOR_MAP_INDEX, Map, fixed_array_iterator_map)             \
   V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun)        \
   V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map)                     \
   V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map)                       \
@@ -303,37 +321,39 @@
     sloppy_function_without_prototype_map)                                     \
   V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map,                    \
     sloppy_function_with_readonly_prototype_map)                               \
-  V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map)                           \
-  V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor)        \
-  V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor)    \
-  V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor)          \
-  V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor)        \
-  V(WASM_MODULE_SYM_INDEX, Symbol, wasm_module_sym)                            \
-  V(WASM_TABLE_SYM_INDEX, Symbol, wasm_table_sym)                              \
-  V(WASM_MEMORY_SYM_INDEX, Symbol, wasm_memory_sym)                            \
-  V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym)                        \
-  V(SLOPPY_ASYNC_FUNCTION_MAP_INDEX, Map, sloppy_async_function_map)           \
-  V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map)   \
   V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map)         \
-  V(STRICT_ASYNC_FUNCTION_MAP_INDEX, Map, strict_async_function_map)           \
+  V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map,                                  \
+    slow_object_with_null_prototype_map)                                       \
+  V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary,        \
+    slow_template_instantiations_cache)                                        \
   V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map)                     \
+  V(ASYNC_FUNCTION_MAP_INDEX, Map, async_function_map)                         \
   V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map)                       \
   V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map,                          \
     strict_function_without_prototype_map)                                     \
-  V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map)   \
+  V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map)                 \
+  V(CLASS_FUNCTION_MAP_INDEX, Map, class_function_map)                         \
   V(STRING_FUNCTION_INDEX, JSFunction, string_function)                        \
   V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map)   \
+  V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map)                       \
   V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function)                        \
+  V(NATIVE_FUNCTION_MAP_INDEX, Map, native_function_map)                       \
+  V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map)                           \
+  V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor)    \
+  V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym)                        \
+  V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor)        \
+  V(WASM_MEMORY_SYM_INDEX, Symbol, wasm_memory_sym)                            \
+  V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor)        \
+  V(WASM_MODULE_SYM_INDEX, Symbol, wasm_module_sym)                            \
+  V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor)          \
+  V(WASM_TABLE_SYM_INDEX, Symbol, wasm_table_sym)                              \
   V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function)                   \
   V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype)              \
   V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun)                      \
-  V(UINT16X8_FUNCTION_INDEX, JSFunction, uint16x8_function)                    \
   V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun)                      \
-  V(UINT32X4_FUNCTION_INDEX, JSFunction, uint32x4_function)                    \
   V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun)                        \
   V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun)        \
-  V(UINT8X16_FUNCTION_INDEX, JSFunction, uint8x16_function)                    \
-  V(CURRENT_MODULE_INDEX, Module, current_module)                              \
+  V(EXPORTS_CONTAINER, Object, exports_container)                              \
   NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V)                                        \
   NATIVE_CONTEXT_IMPORTED_FIELDS(V)                                            \
   NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V)
@@ -486,6 +506,7 @@
     WHITE_LIST_INDEX = MIN_CONTEXT_SLOTS + 1
   };
 
+  void ResetErrorsThrown();
   void IncrementErrorsThrown();
   int GetErrorsThrown();
 
@@ -542,10 +563,31 @@
   inline bool IsDebugEvaluateContext();
   inline bool IsBlockContext();
   inline bool IsModuleContext();
+  inline bool IsEvalContext();
   inline bool IsScriptContext();
 
   inline bool HasSameSecurityTokenAs(Context* that);
 
+  // Removes a specific optimized code object from the optimized code map.
+  // In case of non-OSR the code reference is cleared from the cache entry but
+  // the entry itself is left in the map in order to proceed sharing literals.
+  void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
+
+  // Clear optimized code map.
+  void ClearOptimizedCodeMap();
+
+  // A native context keeps track of all osrd optimized functions.
+  inline bool OptimizedCodeMapIsCleared();
+  Code* SearchOptimizedCodeMap(SharedFunctionInfo* shared,
+                               BailoutId osr_ast_id);
+  int SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
+                                  BailoutId osr_ast_id);
+
+  static void AddToOptimizedCodeMap(Handle<Context> native_context,
+                                    Handle<SharedFunctionInfo> shared,
+                                    Handle<Code> code,
+                                    BailoutId osr_ast_id);
+
   // A native context holds a list of all functions with optimized code.
   void AddOptimizedFunction(JSFunction* function);
   void RemoveOptimizedFunction(JSFunction* function);
@@ -601,20 +643,20 @@
   }
 
   static int FunctionMapIndex(LanguageMode language_mode, FunctionKind kind) {
-    // Note: Must be kept in sync with FastNewClosureStub::Generate.
+    // Note: Must be kept in sync with the FastNewClosure builtin.
     if (IsGeneratorFunction(kind)) {
-      return is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
-                                      : SLOPPY_GENERATOR_FUNCTION_MAP_INDEX;
+      return GENERATOR_FUNCTION_MAP_INDEX;
     }
 
     if (IsAsyncFunction(kind)) {
-      return is_strict(language_mode) ? STRICT_ASYNC_FUNCTION_MAP_INDEX
-                                      : SLOPPY_ASYNC_FUNCTION_MAP_INDEX;
+      return ASYNC_FUNCTION_MAP_INDEX;
     }
 
     if (IsClassConstructor(kind)) {
-      // Use strict function map (no own "caller" / "arguments")
-      return STRICT_FUNCTION_MAP_INDEX;
+      // Like the strict function map, but with no 'name' accessor. 'name'
+      // needs to be the last property and it is added during instantiation,
+      // in case a static property with the same name exists"
+      return CLASS_FUNCTION_MAP_INDEX;
     }
 
     if (IsArrowFunction(kind) || IsConciseMethod(kind) ||
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 427a67d..c4753eb 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -57,7 +57,7 @@
 #ifndef V8_TARGET_BIG_ENDIAN
     Address mantissa_ptr = reinterpret_cast<Address>(&x);
 #else
-    Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
+    Address mantissa_ptr = reinterpret_cast<Address>(&x) + kInt32Size;
 #endif
     // Copy least significant 32 bits of mantissa.
     memcpy(&result, mantissa_ptr, sizeof(result));
@@ -122,18 +122,61 @@
          value == FastUI2D(FastD2UI(value));
 }
 
+bool DoubleToUint32IfEqualToSelf(double value, uint32_t* uint32_value) {
+  const double k2Pow52 = 4503599627370496.0;
+  const uint32_t kValidTopBits = 0x43300000;
+  const uint64_t kBottomBitMask = V8_2PART_UINT64_C(0x00000000, FFFFFFFF);
+
+  // Add 2^52 to the double, to place valid uint32 values in the low-significant
+  // bits of the exponent, by effectively setting the (implicit) top bit of the
+  // significand. Note that this addition also normalises 0.0 and -0.0.
+  double shifted_value = value + k2Pow52;
+
+  // At this point, a valid uint32 valued double will be represented as:
+  //
+  // sign = 0
+  // exponent = 52
+  // significand = 1. 00...00 <value>
+  //       implicit^          ^^^^^^^ 32 bits
+  //                  ^^^^^^^^^^^^^^^ 52 bits
+  //
+  // Therefore, we can first check the top 32 bits to make sure that the sign,
+  // exponent and remaining significand bits are valid, and only then check the
+  // value in the bottom 32 bits.
+
+  uint64_t result = bit_cast<uint64_t>(shifted_value);
+  if ((result >> 32) == kValidTopBits) {
+    *uint32_value = result & kBottomBitMask;
+    return FastUI2D(result & kBottomBitMask) == value;
+  }
+  return false;
+}
 
 int32_t NumberToInt32(Object* number) {
   if (number->IsSmi()) return Smi::cast(number)->value();
   return DoubleToInt32(number->Number());
 }
 
-
 uint32_t NumberToUint32(Object* number) {
   if (number->IsSmi()) return Smi::cast(number)->value();
   return DoubleToUint32(number->Number());
 }
 
+uint32_t PositiveNumberToUint32(Object* number) {
+  if (number->IsSmi()) {
+    int value = Smi::cast(number)->value();
+    if (value <= 0) return 0;
+    return value;
+  }
+  DCHECK(number->IsHeapNumber());
+  double value = number->Number();
+  // Catch all values smaller than 1 and use the double-negation trick for NANs.
+  if (!(value >= 1)) return 0;
+  uint32_t max = std::numeric_limits<uint32_t>::max();
+  if (value < max) return static_cast<uint32_t>(value);
+  return max;
+}
+
 int64_t NumberToInt64(Object* number) {
   if (number->IsSmi()) return Smi::cast(number)->value();
   return static_cast<int64_t>(number->Number());
@@ -154,7 +197,12 @@
   } else {
     DCHECK(number->IsHeapNumber());
     double value = HeapNumber::cast(number)->value();
-    if (value >= 0 && value <= std::numeric_limits<size_t>::max()) {
+    // If value is compared directly to the limit, the limit will be
+    // casted to a double and could end up as limit + 1,
+    // because a double might not have enough mantissa bits for it.
+    // So we might as well cast the limit first, and use < instead of <=.
+    double maxSize = static_cast<double>(std::numeric_limits<size_t>::max());
+    if (value >= 0 && value < maxSize) {
       *result = static_cast<size_t>(value);
       return true;
     } else {
diff --git a/src/conversions.cc b/src/conversions.cc
index 7867719..d26274c 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -8,12 +8,14 @@
 #include <stdarg.h>
 #include <cmath>
 
+#include "src/allocation.h"
 #include "src/assert-scope.h"
 #include "src/char-predicates-inl.h"
 #include "src/codegen.h"
 #include "src/conversions-inl.h"
 #include "src/dtoa.h"
 #include "src/factory.h"
+#include "src/handles.h"
 #include "src/list-inl.h"
 #include "src/strtod.h"
 #include "src/utils.h"
@@ -168,7 +170,7 @@
         if (exponent < 0) exponent = -exponent;
         builder.AddDecimalInteger(exponent);
       }
-    return builder.Finalize();
+      return builder.Finalize();
     }
   }
 }
@@ -411,76 +413,91 @@
   return result;
 }
 
-
 char* DoubleToRadixCString(double value, int radix) {
   DCHECK(radix >= 2 && radix <= 36);
-
+  DCHECK(std::isfinite(value));
+  DCHECK_NE(0.0, value);
   // Character array used for conversion.
   static const char chars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
 
-  // Buffer for the integer part of the result. 1024 chars is enough
-  // for max integer value in radix 2.  We need room for a sign too.
-  static const int kBufferSize = 1100;
-  char integer_buffer[kBufferSize];
-  integer_buffer[kBufferSize - 1] = '\0';
+  // Temporary buffer for the result. We start with the decimal point in the
+  // middle and write to the left for the integer part and to the right for the
+  // fractional part. 1024 characters for the exponent and 52 for the mantissa
+  // either way, with additional space for sign, decimal point and string
+  // termination should be sufficient.
+  static const int kBufferSize = 2200;
+  char buffer[kBufferSize];
+  int integer_cursor = kBufferSize / 2;
+  int fraction_cursor = integer_cursor;
 
-  // Buffer for the decimal part of the result.  We only generate up
-  // to kBufferSize - 1 chars for the decimal part.
-  char decimal_buffer[kBufferSize];
-  decimal_buffer[kBufferSize - 1] = '\0';
+  bool negative = value < 0;
+  if (negative) value = -value;
 
-  // Make sure the value is positive.
-  bool is_negative = value < 0.0;
-  if (is_negative) value = -value;
-
-  // Get the integer part and the decimal part.
-  double integer_part = std::floor(value);
-  double decimal_part = value - integer_part;
-
-  // Convert the integer part starting from the back.  Always generate
-  // at least one digit.
-  int integer_pos = kBufferSize - 2;
-  do {
-    double remainder = modulo(integer_part, radix);
-    integer_buffer[integer_pos--] = chars[static_cast<int>(remainder)];
-    integer_part -= remainder;
-    integer_part /= radix;
-  } while (integer_part >= 1.0);
-  // Sanity check.
-  DCHECK(integer_pos > 0);
-  // Add sign if needed.
-  if (is_negative) integer_buffer[integer_pos--] = '-';
-
-  // Convert the decimal part.  Repeatedly multiply by the radix to
-  // generate the next char.  Never generate more than kBufferSize - 1
-  // chars.
-  //
-  // TODO(1093998): We will often generate a full decimal_buffer of
-  // chars because hitting zero will often not happen.  The right
-  // solution would be to continue until the string representation can
-  // be read back and yield the original value.  To implement this
-  // efficiently, we probably have to modify dtoa.
-  int decimal_pos = 0;
-  while ((decimal_part > 0.0) && (decimal_pos < kBufferSize - 1)) {
-    decimal_part *= radix;
-    decimal_buffer[decimal_pos++] =
-        chars[static_cast<int>(std::floor(decimal_part))];
-    decimal_part -= std::floor(decimal_part);
+  // Split the value into an integer part and a fractional part.
+  double integer = std::floor(value);
+  double fraction = value - integer;
+  // We only compute fractional digits up to the input double's precision.
+  double delta = 0.5 * (Double(value).NextDouble() - value);
+  delta = std::max(Double(0.0).NextDouble(), delta);
+  DCHECK_GT(delta, 0.0);
+  if (fraction > delta) {
+    // Insert decimal point.
+    buffer[fraction_cursor++] = '.';
+    do {
+      // Shift up by one digit.
+      fraction *= radix;
+      delta *= radix;
+      // Write digit.
+      int digit = static_cast<int>(fraction);
+      buffer[fraction_cursor++] = chars[digit];
+      // Calculate remainder.
+      fraction -= digit;
+      // Round to even.
+      if (fraction > 0.5 || (fraction == 0.5 && (digit & 1))) {
+        if (fraction + delta > 1) {
+          // We need to back trace already written digits in case of carry-over.
+          while (true) {
+            fraction_cursor--;
+            if (fraction_cursor == kBufferSize / 2) {
+              CHECK_EQ('.', buffer[fraction_cursor]);
+              // Carry over to the integer part.
+              integer += 1;
+              break;
+            }
+            char c = buffer[fraction_cursor];
+            // Reconstruct digit.
+            int digit = c > '9' ? (c - 'a' + 10) : (c - '0');
+            if (digit + 1 < radix) {
+              buffer[fraction_cursor++] = chars[digit + 1];
+              break;
+            }
+          }
+          break;
+        }
+      }
+    } while (fraction > delta);
   }
-  decimal_buffer[decimal_pos] = '\0';
 
-  // Compute the result size.
-  int integer_part_size = kBufferSize - 2 - integer_pos;
-  // Make room for zero termination.
-  unsigned result_size = integer_part_size + decimal_pos;
-  // If the number has a decimal part, leave room for the period.
-  if (decimal_pos > 0) result_size++;
-  // Allocate result and fill in the parts.
-  SimpleStringBuilder builder(result_size + 1);
-  builder.AddSubstring(integer_buffer + integer_pos + 1, integer_part_size);
-  if (decimal_pos > 0) builder.AddCharacter('.');
-  builder.AddSubstring(decimal_buffer, decimal_pos);
-  return builder.Finalize();
+  // Compute integer digits. Fill unrepresented digits with zero.
+  while (Double(integer / radix).Exponent() > 0) {
+    integer /= radix;
+    buffer[--integer_cursor] = '0';
+  }
+  do {
+    double remainder = modulo(integer, radix);
+    buffer[--integer_cursor] = chars[static_cast<int>(remainder)];
+    integer = (integer - remainder) / radix;
+  } while (integer > 0);
+
+  // Add sign and terminate string.
+  if (negative) buffer[--integer_cursor] = '-';
+  buffer[fraction_cursor++] = '\0';
+  DCHECK_LT(fraction_cursor, kBufferSize);
+  DCHECK_LE(0, integer_cursor);
+  // Allocate new string as return value.
+  char* result = NewArray<char>(fraction_cursor - integer_cursor);
+  memcpy(result, buffer + integer_cursor, fraction_cursor - integer_cursor);
+  return result;
 }
 
 
diff --git a/src/conversions.h b/src/conversions.h
index 2dd91d9..4a54e70 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -8,12 +8,13 @@
 #include <limits>
 
 #include "src/base/logging.h"
-#include "src/handles.h"
 #include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
+template <typename T>
+class Handle;
 class UnicodeCache;
 
 // Maximum number of significant digits in decimal representation.
@@ -167,7 +168,15 @@
 // We also have to check for negative 0 as it is not a UInteger32.
 inline bool IsUint32Double(double value);
 
+// Tries to convert |value| to a uint32, setting the result in |uint32_value|.
+// If the output does not compare equal to the input, returns false and the
+// value in |uint32_value| is left unspecified.
+// Used for conversions such as in ECMA-262 15.4.2.2, which check "ToUint32(len)
+// is equal to len".
+inline bool DoubleToUint32IfEqualToSelf(double value, uint32_t* uint32_value);
+
 // Convert from Number object to C integer.
+inline uint32_t PositiveNumberToUint32(Object* number);
 inline int32_t NumberToInt32(Object* number);
 inline uint32_t NumberToUint32(Object* number);
 inline int64_t NumberToInt64(Object* number);
diff --git a/src/counters-inl.h b/src/counters-inl.h
index 7219ef7..ce77806 100644
--- a/src/counters-inl.h
+++ b/src/counters-inl.h
@@ -10,6 +10,57 @@
 namespace v8 {
 namespace internal {
 
+void RuntimeCallTimer::Start(RuntimeCallCounter* counter,
+                             RuntimeCallTimer* parent) {
+  DCHECK(!IsStarted());
+  counter_ = counter;
+  parent_.SetValue(parent);
+  if (FLAG_runtime_stats ==
+      v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
+    return;
+  }
+  base::TimeTicks now = Now();
+  if (parent) parent->Pause(now);
+  Resume(now);
+  DCHECK(IsStarted());
+}
+
+void RuntimeCallTimer::Pause(base::TimeTicks now) {
+  DCHECK(IsStarted());
+  elapsed_ += (now - start_ticks_);
+  start_ticks_ = base::TimeTicks();
+}
+
+void RuntimeCallTimer::Resume(base::TimeTicks now) {
+  DCHECK(!IsStarted());
+  start_ticks_ = now;
+}
+
+RuntimeCallTimer* RuntimeCallTimer::Stop() {
+  if (!IsStarted()) return parent();
+  base::TimeTicks now = Now();
+  Pause(now);
+  counter_->Increment();
+  CommitTimeToCounter();
+
+  RuntimeCallTimer* parent_timer = parent();
+  if (parent_timer) {
+    parent_timer->Resume(now);
+  }
+  return parent_timer;
+}
+
+void RuntimeCallTimer::CommitTimeToCounter() {
+  counter_->Add(elapsed_);
+  elapsed_ = base::TimeDelta();
+}
+
+bool RuntimeCallTimer::IsStarted() { return start_ticks_ != base::TimeTicks(); }
+
+base::TimeTicks RuntimeCallTimer::Now() {
+  return base::TimeTicks::HighResolutionNow();
+}
+
 RuntimeCallTimerScope::RuntimeCallTimerScope(
     Isolate* isolate, RuntimeCallStats::CounterId counter_id) {
   if (V8_UNLIKELY(FLAG_runtime_stats)) {
diff --git a/src/counters.cc b/src/counters.cc
index 5089eb2..66e4def 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -216,10 +216,11 @@
   // binary size increase: std::vector::push_back expands to a large amount of
   // instructions, and this function is invoked repeatedly by macros.
   V8_NOINLINE void Add(RuntimeCallCounter* counter) {
-    if (counter->count == 0) return;
-    entries.push_back(Entry(counter->name, counter->time, counter->count));
-    total_time += counter->time;
-    total_call_count += counter->count;
+    if (counter->count() == 0) return;
+    entries.push_back(
+        Entry(counter->name(), counter->time(), counter->count()));
+    total_time += counter->time();
+    total_call_count += counter->count();
   }
 
  private:
@@ -273,20 +274,33 @@
 };
 
 void RuntimeCallCounter::Reset() {
-  count = 0;
-  time = base::TimeDelta();
+  count_ = 0;
+  time_ = base::TimeDelta();
 }
 
 void RuntimeCallCounter::Dump(v8::tracing::TracedValue* value) {
-  value->BeginArray(name);
-  value->AppendLongInteger(count);
-  value->AppendLongInteger(time.InMicroseconds());
+  value->BeginArray(name_);
+  value->AppendDouble(count_);
+  value->AppendDouble(time_.InMicroseconds());
   value->EndArray();
 }
 
 void RuntimeCallCounter::Add(RuntimeCallCounter* other) {
-  count += other->count;
-  time += other->time;
+  count_ += other->count();
+  time_ += other->time();
+}
+
+void RuntimeCallTimer::Snapshot() {
+  base::TimeTicks now = Now();
+  // Pause only / topmost timer in the timer stack.
+  Pause(now);
+  // Commit all the timer's elapsed time to the counters.
+  RuntimeCallTimer* timer = this;
+  while (timer != nullptr) {
+    timer->CommitTimeToCounter();
+    timer = timer->parent();
+  }
+  Resume(now);
 }
 
 // static
@@ -310,10 +324,14 @@
 };
 
 // static
+const int RuntimeCallStats::counters_count =
+    arraysize(RuntimeCallStats::counters);
+
+// static
 void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
                              CounterId counter_id) {
   RuntimeCallCounter* counter = &(stats->*counter_id);
-  DCHECK(counter->name != nullptr);
+  DCHECK(counter->name() != nullptr);
   timer->Start(counter, stats->current_timer_.Value());
   stats->current_timer_.SetValue(timer);
 }
@@ -329,7 +347,7 @@
     RuntimeCallTimer* next = stats->current_timer_.Value();
     while (next && next->parent() != timer) next = next->parent();
     if (next == nullptr) return;
-    next->parent_.SetValue(timer->Stop());
+    next->set_parent(timer->Stop());
   }
 }
 
@@ -348,13 +366,13 @@
   RuntimeCallTimer* timer = stats->current_timer_.Value();
   // When RCS are enabled dynamically there might be no current timer set up.
   if (timer == nullptr) return;
-  timer->counter_ = &(stats->*counter_id);
+  timer->set_counter(&(stats->*counter_id));
 }
 
 void RuntimeCallStats::Print(std::ostream& os) {
   RuntimeCallStatEntries entries;
   if (current_timer_.Value() != nullptr) {
-    current_timer_.Value()->Elapsed();
+    current_timer_.Value()->Snapshot();
   }
   for (const RuntimeCallStats::CounterId counter_id :
        RuntimeCallStats::counters) {
@@ -388,7 +406,7 @@
   for (const RuntimeCallStats::CounterId counter_id :
        RuntimeCallStats::counters) {
     RuntimeCallCounter* counter = &(this->*counter_id);
-    if (counter->count > 0) counter->Dump(value);
+    if (counter->count() > 0) counter->Dump(value);
   }
 
   in_use_ = false;
diff --git a/src/counters.h b/src/counters.h
index 4415250..06a680c 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -484,65 +484,51 @@
          value * ((current_ms - last_ms_) / interval_ms);
 }
 
-struct RuntimeCallCounter {
-  explicit RuntimeCallCounter(const char* name) : name(name) {}
+class RuntimeCallCounter final {
+ public:
+  explicit RuntimeCallCounter(const char* name) : name_(name) {}
   V8_NOINLINE void Reset();
   V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
   void Add(RuntimeCallCounter* other);
 
-  const char* name;
-  int64_t count = 0;
-  base::TimeDelta time;
+  const char* name() const { return name_; }
+  int64_t count() const { return count_; }
+  base::TimeDelta time() const { return time_; }
+  void Increment() { count_++; }
+  void Add(base::TimeDelta delta) { time_ += delta; }
+
+ private:
+  const char* name_;
+  int64_t count_ = 0;
+  base::TimeDelta time_;
 };
 
 // RuntimeCallTimer is used to keep track of the stack of currently active
 // timers used for properly measuring the own time of a RuntimeCallCounter.
-class RuntimeCallTimer {
+class RuntimeCallTimer final {
  public:
   RuntimeCallCounter* counter() { return counter_; }
-  base::ElapsedTimer timer() { return timer_; }
+  void set_counter(RuntimeCallCounter* counter) { counter_ = counter; }
   RuntimeCallTimer* parent() const { return parent_.Value(); }
+  void set_parent(RuntimeCallTimer* timer) { parent_.SetValue(timer); }
+  const char* name() const { return counter_->name(); }
+
+  inline bool IsStarted();
+
+  inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent);
+  void Snapshot();
+  inline RuntimeCallTimer* Stop();
 
  private:
-  friend class RuntimeCallStats;
-
-  inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent) {
-    counter_ = counter;
-    parent_.SetValue(parent);
-    if (FLAG_runtime_stats !=
-        v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
-      timer_.Start();
-    }
-  }
-
-  inline RuntimeCallTimer* Stop() {
-    if (!timer_.IsStarted()) return parent();
-    base::TimeDelta delta = timer_.Elapsed();
-    timer_.Stop();
-    counter_->count++;
-    counter_->time += delta;
-    if (parent()) {
-      // Adjust parent timer so that it does not include sub timer's time.
-      parent()->counter_->time -= delta;
-    }
-    return parent();
-  }
-
-  inline void Elapsed() {
-    base::TimeDelta delta = timer_.Elapsed();
-    counter_->time += delta;
-    if (parent()) {
-      parent()->counter_->time -= delta;
-      parent()->Elapsed();
-    }
-    timer_.Restart();
-  }
-
-  const char* name() { return counter_->name; }
+  inline void Pause(base::TimeTicks now);
+  inline void Resume(base::TimeTicks now);
+  inline void CommitTimeToCounter();
+  inline base::TimeTicks Now();
 
   RuntimeCallCounter* counter_ = nullptr;
   base::AtomicValue<RuntimeCallTimer*> parent_;
-  base::ElapsedTimer timer_;
+  base::TimeTicks start_ticks_;
+  base::TimeDelta elapsed_;
 };
 
 #define FOR_EACH_API_COUNTER(V)                            \
@@ -560,7 +546,6 @@
   V(Date_New)                                              \
   V(Date_NumberValue)                                      \
   V(Debug_Call)                                            \
-  V(Debug_GetMirror)                                       \
   V(Error_New)                                             \
   V(External_New)                                          \
   V(Float32Array_New)                                      \
@@ -571,6 +556,7 @@
   V(FunctionTemplate_GetFunction)                          \
   V(FunctionTemplate_New)                                  \
   V(FunctionTemplate_NewRemoteInstance)                    \
+  V(FunctionTemplate_NewWithCache)                         \
   V(FunctionTemplate_NewWithFastHandler)                   \
   V(Int16Array_New)                                        \
   V(Int32Array_New)                                        \
@@ -641,6 +627,8 @@
   V(Promise_HasRejectHandler)                              \
   V(Promise_Resolver_New)                                  \
   V(Promise_Resolver_Resolve)                              \
+  V(Promise_Result)                                        \
+  V(Promise_Status)                                        \
   V(Promise_Then)                                          \
   V(Proxy_New)                                             \
   V(RangeError_New)                                        \
@@ -696,23 +684,36 @@
   V(AccessorNameGetterCallback_FunctionPrototype)   \
   V(AccessorNameGetterCallback_StringLength)        \
   V(AccessorNameSetterCallback)                     \
-  V(Compile)                                        \
-  V(CompileCode)                                    \
   V(CompileCodeLazy)                                \
   V(CompileDeserialize)                             \
   V(CompileEval)                                    \
   V(CompileFullCode)                                \
+  V(CompileAnalyse)                                 \
+  V(CompileBackgroundIgnition)                      \
+  V(CompileFunction)                                \
+  V(CompileGetFromOptimizedCodeMap)                 \
+  V(CompileGetUnoptimizedCode)                      \
   V(CompileIgnition)                                \
-  V(CompilerDispatcher)                             \
+  V(CompileIgnitionFinalization)                    \
+  V(CompileInnerFunction)                           \
+  V(CompileRenumber)                                \
+  V(CompileRewriteReturnResult)                     \
+  V(CompileScopeAnalysis)                           \
+  V(CompileScript)                                  \
   V(CompileSerialize)                               \
+  V(CompileWaitForDispatcher)                       \
   V(DeoptimizeCode)                                 \
   V(FunctionCallback)                               \
   V(GC)                                             \
+  V(GC_AllAvailableGarbage)                         \
+  V(GCEpilogueCallback)                             \
+  V(GCPrologueCallback)                             \
   V(GenericNamedPropertyDefinerCallback)            \
   V(GenericNamedPropertyDeleterCallback)            \
   V(GenericNamedPropertyDescriptorCallback)         \
   V(GenericNamedPropertyQueryCallback)              \
   V(GenericNamedPropertySetterCallback)             \
+  V(GetMoreDataCallback)                            \
   V(IndexedPropertyDefinerCallback)                 \
   V(IndexedPropertyDeleterCallback)                 \
   V(IndexedPropertyDescriptorCallback)              \
@@ -728,11 +729,16 @@
   V(Object_DeleteProperty)                          \
   V(OptimizeCode)                                   \
   V(ParseArrowFunctionLiteral)                      \
+  V(ParseBackgroundArrowFunctionLiteral)            \
+  V(ParseBackgroundFunctionLiteral)                 \
   V(ParseEval)                                      \
   V(ParseFunction)                                  \
   V(ParseFunctionLiteral)                           \
   V(ParseProgram)                                   \
   V(PreParseArrowFunctionLiteral)                   \
+  V(PreParseBackgroundArrowFunctionLiteral)         \
+  V(PreParseBackgroundNoVariableResolution)         \
+  V(PreParseBackgroundWithVariableResolution)       \
   V(PreParseNoVariableResolution)                   \
   V(PreParseWithVariableResolution)                 \
   V(PropertyCallback)                               \
@@ -741,6 +747,9 @@
   V(PrototypeObject_DeleteProperty)                 \
   V(RecompileConcurrent)                            \
   V(RecompileSynchronous)                           \
+  V(TestCounter1)                                   \
+  V(TestCounter2)                                   \
+  V(TestCounter3)                                   \
   /* Dummy counter for the unexpected stub miss. */ \
   V(UnexpectedStubMiss)
 
@@ -750,8 +759,6 @@
   V(KeyedLoadIC_LoadIndexedInterceptorStub)      \
   V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub)    \
   V(KeyedLoadIC_LoadElementDH)                   \
-  V(KeyedLoadIC_LoadFastElementStub)             \
-  V(KeyedLoadIC_LoadDictionaryElementStub)       \
   V(KeyedLoadIC_SlowStub)                        \
   V(KeyedStoreIC_ElementsTransitionAndStoreStub) \
   V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub)  \
@@ -778,7 +785,6 @@
   V(LoadIC_LoadFieldDH)                          \
   V(LoadIC_LoadFieldFromPrototypeDH)             \
   V(LoadIC_LoadField)                            \
-  V(LoadIC_LoadFieldStub)                        \
   V(LoadIC_LoadGlobal)                           \
   V(LoadIC_LoadInterceptor)                      \
   V(LoadIC_LoadNonexistentDH)                    \
@@ -786,6 +792,7 @@
   V(LoadIC_LoadNormal)                           \
   V(LoadIC_LoadScriptContextFieldStub)           \
   V(LoadIC_LoadViaGetter)                        \
+  V(LoadIC_NonReceiver)                          \
   V(LoadIC_Premonomorphic)                       \
   V(LoadIC_SlowStub)                             \
   V(LoadIC_StringLengthStub)                     \
@@ -797,6 +804,7 @@
   V(StoreIC_HandlerCacheHit_Accessor)            \
   V(StoreIC_HandlerCacheHit_Data)                \
   V(StoreIC_HandlerCacheHit_Transition)          \
+  V(StoreIC_NonReceiver)                         \
   V(StoreIC_Premonomorphic)                      \
   V(StoreIC_SlowStub)                            \
   V(StoreIC_StoreCallback)                       \
@@ -812,7 +820,7 @@
   V(StoreIC_StoreTransitionDH)                   \
   V(StoreIC_StoreViaSetter)
 
-class RuntimeCallStats : public ZoneObject {
+class RuntimeCallStats final : public ZoneObject {
  public:
   typedef RuntimeCallCounter RuntimeCallStats::*CounterId;
 
@@ -838,26 +846,29 @@
 #undef CALL_BUILTIN_COUNTER
 
   static const CounterId counters[];
+  static const int counters_count;
 
   // Starting measuring the time for a function. This will establish the
   // connection to the parent counter for properly calculating the own times.
-  static void Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
-                    CounterId counter_id);
+  V8_EXPORT_PRIVATE static void Enter(RuntimeCallStats* stats,
+                                      RuntimeCallTimer* timer,
+                                      CounterId counter_id);
 
   // Leave a scope for a measured runtime function. This will properly add
   // the time delta to the current_counter and subtract the delta from its
   // parent.
-  static void Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer);
+  V8_EXPORT_PRIVATE static void Leave(RuntimeCallStats* stats,
+                                      RuntimeCallTimer* timer);
 
   // Set counter id for the innermost measurement. It can be used to refine
   // event kind when a runtime entry counter is too generic.
-  static void CorrectCurrentCounterId(RuntimeCallStats* stats,
-                                      CounterId counter_id);
+  V8_EXPORT_PRIVATE static void CorrectCurrentCounterId(RuntimeCallStats* stats,
+                                                        CounterId counter_id);
 
-  void Reset();
+  V8_EXPORT_PRIVATE void Reset();
   // Add all entries from another stats object.
   void Add(RuntimeCallStats* other);
-  void Print(std::ostream& os);
+  V8_EXPORT_PRIVATE void Print(std::ostream& os);
   V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
 
   RuntimeCallStats() {
@@ -887,6 +898,36 @@
   CHANGE_CURRENT_RUNTIME_COUNTER(isolate->counters()->runtime_call_stats(), \
                                  Handler_##counter_name)
 
+// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
+// the time of C++ scope.
+class RuntimeCallTimerScope {
+ public:
+  inline RuntimeCallTimerScope(Isolate* isolate,
+                               RuntimeCallStats::CounterId counter_id);
+  // This constructor is here just to avoid calling GetIsolate() when the
+  // stats are disabled and the isolate is not directly available.
+  inline RuntimeCallTimerScope(HeapObject* heap_object,
+                               RuntimeCallStats::CounterId counter_id);
+  inline RuntimeCallTimerScope(RuntimeCallStats* stats,
+                               RuntimeCallStats::CounterId counter_id);
+
+  inline ~RuntimeCallTimerScope() {
+    if (V8_UNLIKELY(stats_ != nullptr)) {
+      RuntimeCallStats::Leave(stats_, &timer_);
+    }
+  }
+
+ private:
+  V8_INLINE void Initialize(RuntimeCallStats* stats,
+                            RuntimeCallStats::CounterId counter_id) {
+    stats_ = stats;
+    RuntimeCallStats::Enter(stats_, &timer_, counter_id);
+  }
+
+  RuntimeCallStats* stats_ = nullptr;
+  RuntimeCallTimer timer_;
+};
+
 #define HISTOGRAM_RANGE_LIST(HR)                                              \
   /* Generic range histograms */                                              \
   HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21)        \
@@ -900,6 +941,7 @@
   HR(incremental_marking_reason, V8.GCIncrementalMarkingReason, 0, 21, 22)    \
   HR(mark_compact_reason, V8.GCMarkCompactReason, 0, 21, 22)                  \
   HR(scavenge_reason, V8.GCScavengeReason, 0, 21, 22)                         \
+  HR(young_generation_handling, V8.GCYoungGenerationHandling, 0, 2, 3)        \
   /* Asm/Wasm. */                                                             \
   HR(wasm_functions_per_module, V8.WasmFunctionsPerModule, 1, 10000, 51)
 
@@ -941,6 +983,8 @@
   HT(wasm_compile_module_time, V8.WasmCompileModuleMicroSeconds, 1000000,      \
      MICROSECOND)                                                              \
   HT(wasm_compile_function_time, V8.WasmCompileFunctionMicroSeconds, 1000000,  \
+     MICROSECOND)                                                              \
+  HT(asm_wasm_translation_time, V8.AsmWasmTranslationMicroSeconds, 1000000,    \
      MICROSECOND)
 
 #define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
@@ -1050,8 +1094,6 @@
   SC(ic_compare_miss, V8.ICCompareMiss)                                        \
   SC(ic_call_miss, V8.ICCallMiss)                                              \
   SC(ic_keyed_call_miss, V8.ICKeyedCallMiss)                                   \
-  SC(ic_load_miss, V8.ICLoadMiss)                                              \
-  SC(ic_keyed_load_miss, V8.ICKeyedLoadMiss)                                   \
   SC(ic_store_miss, V8.ICStoreMiss)                                            \
   SC(ic_keyed_store_miss, V8.ICKeyedStoreMiss)                                 \
   SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime)                   \
@@ -1106,10 +1148,6 @@
   SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable)                 \
   SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted)                 \
   SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)                           \
-  SC(turbo_escape_allocs_replaced, V8.TurboEscapeAllocsReplaced)               \
-  SC(crankshaft_escape_allocs_replaced, V8.CrankshaftEscapeAllocsReplaced)     \
-  SC(turbo_escape_loads_replaced, V8.TurboEscapeLoadsReplaced)                 \
-  SC(crankshaft_escape_loads_replaced, V8.CrankshaftEscapeLoadsReplaced)       \
   /* Total code size (including metadata) of baseline code or bytecode. */     \
   SC(total_baseline_code_size, V8.TotalBaselineCodeSize)                       \
   /* Total count of functions compiled using the baseline compiler. */         \
@@ -1298,36 +1336,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
 };
 
-// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
-// the time of C++ scope.
-class RuntimeCallTimerScope {
- public:
-  inline RuntimeCallTimerScope(Isolate* isolate,
-                               RuntimeCallStats::CounterId counter_id);
-  // This constructor is here just to avoid calling GetIsolate() when the
-  // stats are disabled and the isolate is not directly available.
-  inline RuntimeCallTimerScope(HeapObject* heap_object,
-                               RuntimeCallStats::CounterId counter_id);
-  inline RuntimeCallTimerScope(RuntimeCallStats* stats,
-                               RuntimeCallStats::CounterId counter_id);
-
-  inline ~RuntimeCallTimerScope() {
-    if (V8_UNLIKELY(stats_ != nullptr)) {
-      RuntimeCallStats::Leave(stats_, &timer_);
-    }
-  }
-
- private:
-  V8_INLINE void Initialize(RuntimeCallStats* stats,
-                            RuntimeCallStats::CounterId counter_id) {
-    stats_ = stats;
-    RuntimeCallStats::Enter(stats_, &timer_, counter_id);
-  }
-
-  RuntimeCallStats* stats_ = nullptr;
-  RuntimeCallTimer timer_;
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc
index e092a9e..11c70fb 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -5,6 +5,7 @@
 #include "src/crankshaft/arm/lithium-codegen-arm.h"
 
 #include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
@@ -164,15 +165,18 @@
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info()->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(r1);
+        __ Push(Smi::FromInt(info()->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
     }
@@ -266,7 +270,7 @@
         DCHECK(!frame_is_built_);
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
-        __ Move(scratch0(), Smi::FromInt(StackFrame::STUB));
+        __ mov(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
         __ PushCommonFrame(scratch0());
         Comment(";;; Deferred code");
       }
@@ -340,7 +344,7 @@
       // This variant of deopt can only be used with stubs. Since we don't
       // have a function pointer to install in the stack frame that we're
       // building, install a special marker there instead.
-      __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
+      __ mov(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
       __ push(ip);
       DCHECK(info()->IsStub());
     }
@@ -2126,12 +2130,6 @@
         __ b(eq, instr->TrueLabel(chunk_));
       }
 
-      if (expected & ToBooleanHint::kSimdValue) {
-        // SIMD value -> true.
-        __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
-        __ b(eq, instr->TrueLabel(chunk_));
-      }
-
       if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         DwVfpRegister dbl_scratch = double_scratch0();
@@ -2873,7 +2871,7 @@
       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
-      __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
       __ cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
     }
@@ -2937,7 +2935,8 @@
     __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ ldr(result, MemOperand(scratch,
                               CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ cmp(result,
+           Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
@@ -3125,7 +3124,7 @@
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  __ Move(scratch0(), instr->hydrogen()->pairs());
+  __ Move(scratch0(), instr->hydrogen()->declarations());
   __ push(scratch0());
   __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   __ push(scratch0());
@@ -3501,7 +3500,8 @@
   __ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ ldr(scratch3,
          MemOperand(scratch2, StandardFrameConstants::kContextOffset));
-  __ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(scratch3,
+         Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(ne, &no_arguments_adaptor);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -4041,13 +4041,17 @@
       if (Smi::IsValid(int_key)) {
         __ mov(r3, Operand(Smi::FromInt(int_key)));
       } else {
-        // We should never get here at runtime because there is a smi check on
-        // the key before this point.
-        __ stop("expected smi");
+        Abort(kArrayIndexConstantValueTooBig);
       }
     } else {
-      __ Move(r3, ToRegister(key));
-      __ SmiTag(r3);
+      Label is_smi;
+      __ SmiTag(r3, ToRegister(key), SetCC);
+      // Deopt if the key is outside Smi range. The stub expects Smi and would
+      // bump the elements into dictionary mode (and trigger a deopt) anyways.
+      __ b(vc, &is_smi);
+      __ PopSafepointRegisters();
+      DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow);
+      __ bind(&is_smi);
     }
 
     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
@@ -4755,6 +4759,13 @@
 
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  Label deopt, done;
+  // If the map is not deprecated the migration attempt does not make sense.
+  __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  __ ldr(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
+  __ tst(scratch0(), Operand(Map::Deprecated::kMask));
+  __ b(eq, &deopt);
+
   {
     PushSafepointRegistersScope scope(this);
     __ push(object);
@@ -4765,7 +4776,12 @@
     __ StoreToSafepointRegisterSlot(r0, scratch0());
   }
   __ tst(scratch0(), Operand(kSmiTagMask));
-  DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed);
+  __ b(ne, &done);
+
+  __ bind(&deopt);
+  DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+  __ bind(&done);
 }
 
 
@@ -5116,17 +5132,6 @@
            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     final_branch_condition = eq;
 
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
-  } else if (String::Equals(type_name, factory->type##_string())) {  \
-    __ JumpIfSmi(input, false_label);                                \
-    __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
-    __ CompareRoot(scratch, Heap::k##Type##MapRootIndex);            \
-    final_branch_condition = eq;
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
-
   } else {
     __ b(false_label);
   }
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.cc b/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 4d8e661..8152924 100644
--- a/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -6,6 +6,7 @@
 
 #include "src/arm64/frames-arm64.h"
 #include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
@@ -618,14 +619,17 @@
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info()->scope()->scope_type());
         __ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ Push(x1);
+        __ Push(Smi::FromInt(info()->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
     }
@@ -720,7 +724,7 @@
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
         __ Push(lr, fp);
-        __ Mov(fp, Smi::FromInt(StackFrame::STUB));
+        __ Mov(fp, StackFrame::TypeToMarker(StackFrame::STUB));
         __ Push(fp);
         __ Add(fp, __ StackPointer(),
                TypedFrameConstants::kFixedFrameSizeFromFp);
@@ -799,7 +803,7 @@
       UseScratchRegisterScope temps(masm());
       Register stub_marker = temps.AcquireX();
       __ Bind(&needs_frame);
-      __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
+      __ Mov(stub_marker, StackFrame::TypeToMarker(StackFrame::STUB));
       __ Push(cp, stub_marker);
       __ Add(fp, __ StackPointer(), 2 * kPointerSize);
     }
@@ -1614,7 +1618,7 @@
            MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ Ldr(result, MemOperand(previous_fp,
                               CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+    __ Cmp(result, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
     __ Csel(result, fp, previous_fp, ne);
   } else {
     __ Mov(result, fp);
@@ -1861,12 +1865,6 @@
         __ B(eq, true_label);
       }
 
-      if (expected & ToBooleanHint::kSimdValue) {
-        // SIMD value -> true.
-        __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
-        __ B(eq, true_label);
-      }
-
       if (expected & ToBooleanHint::kHeapNumber) {
         Label not_heap_number;
         __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
@@ -2020,6 +2018,13 @@
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   Register temp = ToRegister(instr->temp());
+  Label deopt, done;
+  // If the map is not deprecated the migration attempt does not make sense.
+  __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ Ldr(temp, FieldMemOperand(temp, Map::kBitField3Offset));
+  __ Tst(temp, Operand(Map::Deprecated::kMask));
+  __ B(eq, &deopt);
+
   {
     PushSafepointRegistersScope scope(this);
     __ Push(object);
@@ -2029,7 +2034,13 @@
         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(x0, temp);
   }
-  DeoptimizeIfSmi(temp, instr, DeoptimizeReason::kInstanceMigrationFailed);
+  __ Tst(temp, Operand(kSmiTagMask));
+  __ B(ne, &done);
+
+  __ bind(&deopt);
+  Deoptimize(instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+  __ bind(&done);
 }
 
 
@@ -2829,7 +2840,8 @@
   __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ Ldr(scratch3,
          MemOperand(scratch2, StandardFrameConstants::kContextOffset));
-  __ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ Cmp(scratch3,
+         Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ B(ne, &no_arguments_adaptor);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -3243,7 +3255,7 @@
       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
-      __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
       __ Cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
     }
@@ -4595,7 +4607,7 @@
 
   // TODO(all): if Mov could handle object in new space then it could be used
   // here.
-  __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
+  __ LoadHeapObject(scratch1, instr->hydrogen()->declarations());
   __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
   __ Push(scratch1, scratch2);
   __ LoadHeapObject(scratch1, instr->hydrogen()->feedback_vector());
@@ -5435,20 +5447,6 @@
     EmitTestAndBranch(instr, eq, scratch,
                       (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
 
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)       \
-  } else if (String::Equals(type_name, factory->type##_string())) { \
-    DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));   \
-    Register map = ToRegister(instr->temp1());                      \
-                                                                    \
-    __ JumpIfSmi(value, false_label);                               \
-    __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));    \
-    __ CompareRoot(map, Heap::k##Type##MapRootIndex);               \
-    EmitBranch(instr, eq);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
-
   } else {
     __ B(false_label);
   }
diff --git a/src/crankshaft/compilation-phase.cc b/src/crankshaft/compilation-phase.cc
index 4be0b1a..1130070 100644
--- a/src/crankshaft/compilation-phase.cc
+++ b/src/crankshaft/compilation-phase.cc
@@ -6,6 +6,7 @@
 
 #include "src/crankshaft/hydrogen.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-bce.cc b/src/crankshaft/hydrogen-bce.cc
index 7910c5b..333fafb 100644
--- a/src/crankshaft/hydrogen-bce.cc
+++ b/src/crankshaft/hydrogen-bce.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-bce.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-canonicalize.cc b/src/crankshaft/hydrogen-canonicalize.cc
index 4a07357..20e7717 100644
--- a/src/crankshaft/hydrogen-canonicalize.cc
+++ b/src/crankshaft/hydrogen-canonicalize.cc
@@ -4,7 +4,9 @@
 
 #include "src/crankshaft/hydrogen-canonicalize.h"
 
+#include "src/counters.h"
 #include "src/crankshaft/hydrogen-redundant-phi.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-check-elimination.cc b/src/crankshaft/hydrogen-check-elimination.cc
index 548e4cd..951628e 100644
--- a/src/crankshaft/hydrogen-check-elimination.cc
+++ b/src/crankshaft/hydrogen-check-elimination.cc
@@ -6,6 +6,7 @@
 
 #include "src/crankshaft/hydrogen-alias-analysis.h"
 #include "src/crankshaft/hydrogen-flow-engine.h"
+#include "src/objects-inl.h"
 
 #define GLOBAL 1
 
diff --git a/src/crankshaft/hydrogen-dce.cc b/src/crankshaft/hydrogen-dce.cc
index 3cb9cf4..60b41cd 100644
--- a/src/crankshaft/hydrogen-dce.cc
+++ b/src/crankshaft/hydrogen-dce.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-dce.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-dehoist.cc b/src/crankshaft/hydrogen-dehoist.cc
index 34de94a..0fccecc 100644
--- a/src/crankshaft/hydrogen-dehoist.cc
+++ b/src/crankshaft/hydrogen-dehoist.cc
@@ -5,6 +5,7 @@
 #include "src/crankshaft/hydrogen-dehoist.h"
 
 #include "src/base/safe_math.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-environment-liveness.cc b/src/crankshaft/hydrogen-environment-liveness.cc
index 7965a94..e1eb116 100644
--- a/src/crankshaft/hydrogen-environment-liveness.cc
+++ b/src/crankshaft/hydrogen-environment-liveness.cc
@@ -4,7 +4,7 @@
 
 
 #include "src/crankshaft/hydrogen-environment-liveness.h"
-
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-escape-analysis.cc b/src/crankshaft/hydrogen-escape-analysis.cc
index ab3bff2..91b4ff2 100644
--- a/src/crankshaft/hydrogen-escape-analysis.cc
+++ b/src/crankshaft/hydrogen-escape-analysis.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-escape-analysis.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -142,7 +143,6 @@
 // necessary.
 HValue* HEscapeAnalysisPhase::NewLoadReplacement(
     HLoadNamedField* load, HValue* load_value) {
-  isolate()->counters()->crankshaft_escape_loads_replaced()->Increment();
   HValue* replacement = load_value;
   Representation representation = load->representation();
   if (representation.IsSmiOrInteger32() || representation.IsDouble()) {
@@ -320,8 +320,6 @@
   for (int i = 0; i < max_fixpoint_iteration_count; i++) {
     CollectCapturedValues();
     if (captured_.is_empty()) break;
-    isolate()->counters()->crankshaft_escape_allocs_replaced()->Increment(
-        captured_.length());
     PerformScalarReplacement();
     captured_.Rewind(0);
   }
diff --git a/src/crankshaft/hydrogen-gvn.cc b/src/crankshaft/hydrogen-gvn.cc
index e6ddd75..7032005 100644
--- a/src/crankshaft/hydrogen-gvn.cc
+++ b/src/crankshaft/hydrogen-gvn.cc
@@ -5,6 +5,7 @@
 #include "src/crankshaft/hydrogen-gvn.h"
 
 #include "src/crankshaft/hydrogen.h"
+#include "src/objects-inl.h"
 #include "src/v8.h"
 
 namespace v8 {
diff --git a/src/crankshaft/hydrogen-infer-representation.cc b/src/crankshaft/hydrogen-infer-representation.cc
index 74f264e..bbff24e 100644
--- a/src/crankshaft/hydrogen-infer-representation.cc
+++ b/src/crankshaft/hydrogen-infer-representation.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-infer-representation.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-infer-types.cc b/src/crankshaft/hydrogen-infer-types.cc
index bfd3dd2..a2fd72e 100644
--- a/src/crankshaft/hydrogen-infer-types.cc
+++ b/src/crankshaft/hydrogen-infer-types.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-infer-types.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-instructions.cc b/src/crankshaft/hydrogen-instructions.cc
index be1ac9a..8cf4920 100644
--- a/src/crankshaft/hydrogen-instructions.cc
+++ b/src/crankshaft/hydrogen-instructions.cc
@@ -12,6 +12,7 @@
 #include "src/double.h"
 #include "src/elements.h"
 #include "src/factory.h"
+#include "src/objects-inl.h"
 
 #if V8_TARGET_ARCH_IA32
 #include "src/crankshaft/ia32/lithium-ia32.h"  // NOLINT
@@ -1072,9 +1073,9 @@
 
 
 Representation HBranch::observed_input_representation(int index) {
-  if (expected_input_types_ & (ToBooleanHint::kNull | ToBooleanHint::kReceiver |
-                               ToBooleanHint::kString | ToBooleanHint::kSymbol |
-                               ToBooleanHint::kSimdValue)) {
+  if (expected_input_types_ &
+      (ToBooleanHint::kNull | ToBooleanHint::kReceiver |
+       ToBooleanHint::kString | ToBooleanHint::kSymbol)) {
     return Representation::Tagged();
   }
   if (expected_input_types_ & ToBooleanHint::kUndefined) {
@@ -1244,17 +1245,6 @@
     }
     case SYMBOL_TYPE:
       return heap->symbol_string();
-    case SIMD128_VALUE_TYPE: {
-      Unique<Map> map = constant->ObjectMap();
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
-  if (map.IsKnownGlobal(heap->type##_map())) {                \
-    return heap->type##_string();                             \
-  }
-      SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-      UNREACHABLE();
-      return nullptr;
-    }
     default:
       if (constant->IsUndetectable()) return heap->undefined_string();
       if (constant->IsCallable()) return heap->function_string();
@@ -2177,6 +2167,11 @@
                  InstanceTypeField::encode(kUnknownInstanceType)),
       int32_value_(0) {
   DCHECK_EQ(kHoleNaN, special);
+  // Manipulating the signaling NaN used for the hole in C++, e.g. with bit_cast
+  // will change its value on ia32 (the x87 stack is used to return values
+  // and stores to the stack silently clear the signalling bit).
+  // Therefore we have to use memcpy for initializing |double_value_| with
+  // kHoleNanInt64 here.
   std::memcpy(&double_value_, &kHoleNanInt64, sizeof(double_value_));
   Initialize(Representation::Double());
 }
diff --git a/src/crankshaft/hydrogen-instructions.h b/src/crankshaft/hydrogen-instructions.h
index 9b9e674..7059425 100644
--- a/src/crankshaft/hydrogen-instructions.h
+++ b/src/crankshaft/hydrogen-instructions.h
@@ -1944,14 +1944,12 @@
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HDeclareGlobals,
                                               Handle<FixedArray>, int,
-                                              Handle<TypeFeedbackVector>);
+                                              Handle<FeedbackVector>);
 
   HValue* context() { return OperandAt(0); }
-  Handle<FixedArray> pairs() const { return pairs_; }
+  Handle<FixedArray> declarations() const { return declarations_; }
   int flags() const { return flags_; }
-  Handle<TypeFeedbackVector> feedback_vector() const {
-    return feedback_vector_;
-  }
+  Handle<FeedbackVector> feedback_vector() const { return feedback_vector_; }
 
   DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
 
@@ -1960,18 +1958,18 @@
   }
 
  private:
-  HDeclareGlobals(HValue* context, Handle<FixedArray> pairs, int flags,
-                  Handle<TypeFeedbackVector> feedback_vector)
+  HDeclareGlobals(HValue* context, Handle<FixedArray> declarations, int flags,
+                  Handle<FeedbackVector> feedback_vector)
       : HUnaryOperation(context),
-        pairs_(pairs),
+        declarations_(declarations),
         feedback_vector_(feedback_vector),
         flags_(flags) {
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
 
-  Handle<FixedArray> pairs_;
-  Handle<TypeFeedbackVector> feedback_vector_;
+  Handle<FixedArray> declarations_;
+  Handle<FeedbackVector> feedback_vector_;
   int flags_;
 };
 
@@ -3088,11 +3086,8 @@
     return double_value_;
   }
   uint64_t DoubleValueAsBits() const {
-    uint64_t bits;
     DCHECK(HasDoubleValue());
-    STATIC_ASSERT(sizeof(bits) == sizeof(double_value_));
-    std::memcpy(&bits, &double_value_, sizeof(bits));
-    return bits;
+    return bit_cast<uint64_t>(double_value_);
   }
   bool IsTheHole() const {
     if (HasDoubleValue() && DoubleValueAsBits() == kHoleNanInt64) {
@@ -5127,10 +5122,6 @@
     return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
   }
 
-  static HObjectAccess ForLiteralsPointer() {
-    return HObjectAccess(kInobject, JSFunction::kLiteralsOffset);
-  }
-
   static HObjectAccess ForNextFunctionLinkPointer() {
     return HObjectAccess(kInobject, JSFunction::kNextFunctionLinkOffset);
   }
diff --git a/src/crankshaft/hydrogen-load-elimination.cc b/src/crankshaft/hydrogen-load-elimination.cc
index 88963fc..99f4947 100644
--- a/src/crankshaft/hydrogen-load-elimination.cc
+++ b/src/crankshaft/hydrogen-load-elimination.cc
@@ -7,6 +7,7 @@
 #include "src/crankshaft/hydrogen-alias-analysis.h"
 #include "src/crankshaft/hydrogen-flow-engine.h"
 #include "src/crankshaft/hydrogen-instructions.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-mark-unreachable.cc b/src/crankshaft/hydrogen-mark-unreachable.cc
index 4e1dd68..2393b5a 100644
--- a/src/crankshaft/hydrogen-mark-unreachable.cc
+++ b/src/crankshaft/hydrogen-mark-unreachable.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-mark-unreachable.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-osr.cc b/src/crankshaft/hydrogen-osr.cc
index 607bfbd..093f94b 100644
--- a/src/crankshaft/hydrogen-osr.cc
+++ b/src/crankshaft/hydrogen-osr.cc
@@ -5,6 +5,7 @@
 #include "src/crankshaft/hydrogen-osr.h"
 
 #include "src/crankshaft/hydrogen.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-range-analysis.cc b/src/crankshaft/hydrogen-range-analysis.cc
index a489e01..50592d3 100644
--- a/src/crankshaft/hydrogen-range-analysis.cc
+++ b/src/crankshaft/hydrogen-range-analysis.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-range-analysis.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-redundant-phi.cc b/src/crankshaft/hydrogen-redundant-phi.cc
index ef8b291..08644c8 100644
--- a/src/crankshaft/hydrogen-redundant-phi.cc
+++ b/src/crankshaft/hydrogen-redundant-phi.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-redundant-phi.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-removable-simulates.cc b/src/crankshaft/hydrogen-removable-simulates.cc
index ceef743..e68168c 100644
--- a/src/crankshaft/hydrogen-removable-simulates.cc
+++ b/src/crankshaft/hydrogen-removable-simulates.cc
@@ -6,6 +6,7 @@
 
 #include "src/crankshaft/hydrogen-flow-engine.h"
 #include "src/crankshaft/hydrogen-instructions.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-representation-changes.cc b/src/crankshaft/hydrogen-representation-changes.cc
index 4d74df4..5fd7261 100644
--- a/src/crankshaft/hydrogen-representation-changes.cc
+++ b/src/crankshaft/hydrogen-representation-changes.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-representation-changes.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-sce.cc b/src/crankshaft/hydrogen-sce.cc
index 91e91d2..a08190d 100644
--- a/src/crankshaft/hydrogen-sce.cc
+++ b/src/crankshaft/hydrogen-sce.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-sce.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-store-elimination.cc b/src/crankshaft/hydrogen-store-elimination.cc
index 57c7880..b081c21 100644
--- a/src/crankshaft/hydrogen-store-elimination.cc
+++ b/src/crankshaft/hydrogen-store-elimination.cc
@@ -5,6 +5,7 @@
 #include "src/crankshaft/hydrogen-store-elimination.h"
 
 #include "src/crankshaft/hydrogen-instructions.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen-types.cc b/src/crankshaft/hydrogen-types.cc
index 684e6ad..ad2d461 100644
--- a/src/crankshaft/hydrogen-types.cc
+++ b/src/crankshaft/hydrogen-types.cc
@@ -6,6 +6,7 @@
 
 #include "src/field-type.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 #include "src/ostreams.h"
 
 namespace v8 {
diff --git a/src/crankshaft/hydrogen-uint32-analysis.cc b/src/crankshaft/hydrogen-uint32-analysis.cc
index ac4a63f..de31a61 100644
--- a/src/crankshaft/hydrogen-uint32-analysis.cc
+++ b/src/crankshaft/hydrogen-uint32-analysis.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/crankshaft/hydrogen-uint32-analysis.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/hydrogen.cc b/src/crankshaft/hydrogen.cc
index 754da77..d55bb37 100644
--- a/src/crankshaft/hydrogen.cc
+++ b/src/crankshaft/hydrogen.cc
@@ -118,7 +118,7 @@
 
 HCompilationJob::Status HCompilationJob::PrepareJobImpl() {
   if (!isolate()->use_crankshaft() ||
-      info()->shared_info()->dont_crankshaft()) {
+      info()->shared_info()->must_use_ignition_turbo()) {
     // Crankshaft is entirely disabled.
     return FAILED;
   }
@@ -142,7 +142,6 @@
     }
   }
   DCHECK(info()->shared_info()->has_deoptimization_support());
-  DCHECK(!info()->shared_info()->never_compiled());
 
   // Check the whitelist for Crankshaft.
   if (!info()->shared_info()->PassesFilter(FLAG_hydrogen_filter)) {
@@ -1363,10 +1362,6 @@
   DCHECK(!FLAG_minimal);
   graph_ = new (zone()) HGraph(info_, descriptor_);
   if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
-  if (!info_->IsStub() && is_tracking_positions()) {
-    TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown(),
-                         SourcePosition::kNotInlined);
-  }
   CompilationPhase phase("H_Block building", info_);
   set_current_block(graph()->entry_block());
   if (!BuildGraph()) return NULL;
@@ -1374,49 +1369,6 @@
   return graph_;
 }
 
-void HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
-                                         SourcePosition position,
-                                         int inlining_id) {
-  DCHECK(is_tracking_positions());
-
-  if (!shared->script()->IsUndefined(isolate())) {
-    Handle<Script> script(Script::cast(shared->script()), isolate());
-
-    if (FLAG_hydrogen_track_positions &&
-        !script->source()->IsUndefined(isolate())) {
-      CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
-      Object* source_name = script->name();
-      OFStream os(tracing_scope.file());
-      os << "--- FUNCTION SOURCE (";
-      if (source_name->IsString()) {
-        os << String::cast(source_name)->ToCString().get() << ":";
-      }
-      os << shared->DebugName()->ToCString().get() << ") id{";
-      os << info_->optimization_id() << "," << inlining_id << "} ---\n";
-      {
-        DisallowHeapAllocation no_allocation;
-        int start = shared->start_position();
-        int len = shared->end_position() - start;
-        String::SubStringRange source(String::cast(script->source()), start,
-                                      len);
-        for (const auto& c : source) {
-          os << AsReversiblyEscapedUC16(c);
-        }
-      }
-
-      os << "\n--- END ---\n";
-    }
-  }
-
-  if (FLAG_hydrogen_track_positions &&
-      inlining_id != SourcePosition::kNotInlined) {
-    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
-    OFStream os(tracing_scope.file());
-    os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
-       << info_->optimization_id() << "," << inlining_id << "} AS "
-       << inlining_id << " AT " << position.ScriptOffset() << std::endl;
-  }
-}
 
 HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
   DCHECK(current_block() != NULL);
@@ -1764,12 +1716,12 @@
     details_index->ClearFlag(HValue::kCanOverflow);
     HValue* details = Add<HLoadKeyed>(elements, details_index, nullptr, nullptr,
                                       FAST_ELEMENTS);
-    int details_mask = PropertyDetails::TypeField::kMask;
+    int details_mask = PropertyDetails::KindField::kMask;
     details = AddUncasted<HBitwise>(Token::BIT_AND, details,
                                     Add<HConstant>(details_mask));
     IfBuilder details_compare(this);
-    details_compare.If<HCompareNumericAndBranch>(
-        details, graph()->GetConstant0(), Token::EQ);
+    details_compare.If<HCompareNumericAndBranch>(details, New<HConstant>(kData),
+                                                 Token::EQ);
     details_compare.Then();
     HValue* result_index =
         AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 1));
@@ -2289,6 +2241,9 @@
   IfBuilder if_createcons(this);
   if_createcons.If<HCompareNumericAndBranch>(
       length, Add<HConstant>(ConsString::kMinLength), Token::GTE);
+  if_createcons.And();
+  if_createcons.If<HCompareNumericAndBranch>(
+      length, Add<HConstant>(ConsString::kMaxLength), Token::LTE);
   if_createcons.Then();
   {
     // Create a cons string.
@@ -3994,7 +3949,7 @@
 
 
 bool HOptimizedGraphBuilder::BuildGraph() {
-  if (IsSubclassConstructor(current_info()->literal()->kind())) {
+  if (IsDerivedConstructor(current_info()->literal()->kind())) {
     Bailout(kSuperReference);
     return false;
   }
@@ -5099,18 +5054,23 @@
   // space for nested functions that don't need pretenuring.
   HConstant* shared_info_value = Add<HConstant>(shared_info);
   HInstruction* instr;
+  Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
+  HValue* vector_value = Add<HConstant>(vector);
+  int index = FeedbackVector::GetIndex(expr->LiteralFeedbackSlot());
+  HValue* index_value = Add<HConstant>(index);
   if (!expr->pretenure()) {
-    FastNewClosureStub stub(isolate());
-    FastNewClosureDescriptor descriptor(isolate());
-    HValue* values[] = {shared_info_value};
-    HConstant* stub_value = Add<HConstant>(stub.GetCode());
-    instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
+    Callable callable = CodeFactory::FastNewClosure(isolate());
+    HValue* values[] = {shared_info_value, vector_value, index_value};
+    HConstant* stub_value = Add<HConstant>(callable.code());
+    instr = New<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
                                      ArrayVector(values));
   } else {
     Add<HPushArguments>(shared_info_value);
+    Add<HPushArguments>(vector_value);
+    Add<HPushArguments>(index_value);
     Runtime::FunctionId function_id =
         expr->pretenure() ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure;
-    instr = New<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
+    instr = New<HCallRuntime>(Runtime::FunctionForId(function_id), 3);
   }
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
@@ -5326,15 +5286,17 @@
         InlineGlobalPropertyLoad(&it, expr->id());
         return;
       } else {
-        Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+        Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
+        FeedbackSlot slot = expr->VariableFeedbackSlot();
+        DCHECK(vector->IsLoadGlobalIC(slot));
 
         HValue* vector_value = Add<HConstant>(vector);
-        HValue* slot_value =
-            Add<HConstant>(vector->GetIndex(expr->VariableFeedbackSlot()));
+        HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
         Callable callable = CodeFactory::LoadGlobalICInOptimizedCode(
             isolate(), ast_context()->typeof_mode());
         HValue* stub = Add<HConstant>(callable.code());
-        HValue* values[] = {slot_value, vector_value};
+        HValue* name = Add<HConstant>(variable->name());
+        HValue* values[] = {name, slot_value, vector_value};
         HCallWithDescriptor* instr = New<HCallWithDescriptor>(
             Code::LOAD_GLOBAL_IC, stub, 0, callable.descriptor(),
             ArrayVector(values));
@@ -5393,7 +5355,8 @@
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
   Callable callable = CodeFactory::FastCloneRegExp(isolate());
-  HValue* values[] = {AddThisFunction(), Add<HConstant>(expr->literal_index()),
+  int index = FeedbackVector::GetIndex(expr->literal_slot());
+  HValue* values[] = {AddThisFunction(), Add<HConstant>(index),
                       Add<HConstant>(expr->pattern()),
                       Add<HConstant>(expr->flags())};
   HConstant* stub_value = Add<HConstant>(callable.code());
@@ -5446,7 +5409,9 @@
           }
         }
       }
-    } else if (!boilerplate->HasFastDoubleElements()) {
+    } else if (boilerplate->HasFastDoubleElements()) {
+      if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+    } else {
       return false;
     }
   }
@@ -5460,7 +5425,8 @@
     int limit = boilerplate->map()->NumberOfOwnDescriptors();
     for (int i = 0; i < limit; i++) {
       PropertyDetails details = descriptors->GetDetails(i);
-      if (details.type() != DATA) continue;
+      if (details.location() != kField) continue;
+      DCHECK_EQ(kData, details.kind());
       if ((*max_properties)-- == 0) return false;
       FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
       if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
@@ -5491,7 +5457,7 @@
   // Check whether to use fast or slow deep-copying for boilerplate.
   int max_properties = kMaxFastLiteralProperties;
   Handle<Object> literals_cell(
-      closure->literals()->literal(expr->literal_index()), isolate());
+      closure->feedback_vector()->Get(expr->literal_slot()), isolate());
   Handle<AllocationSite> site;
   Handle<JSObject> boilerplate;
   if (!literals_cell->IsUndefined(isolate())) {
@@ -5509,8 +5475,9 @@
     site_context.ExitScope(site, boilerplate);
   } else {
     NoObservableSideEffectsScope no_effects(this);
-    Handle<FixedArray> constant_properties = expr->constant_properties();
-    int literal_index = expr->literal_index();
+    Handle<BoilerplateDescription> constant_properties =
+        expr->GetOrBuildConstantProperties(isolate());
+    int literal_index = FeedbackVector::GetIndex(expr->literal_slot());
     int flags = expr->ComputeFlags(true);
 
     Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
@@ -5548,7 +5515,7 @@
             Handle<Map> map = property->GetReceiverType();
             Handle<String> name = key->AsPropertyName();
             HValue* store;
-            FeedbackVectorSlot slot = property->GetSlot();
+            FeedbackSlot slot = property->GetSlot();
             if (map.is_null()) {
               // If we don't know the monomorphic type, do a generic store.
               CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot, literal,
@@ -5558,9 +5525,11 @@
               if (info.CanAccessMonomorphic()) {
                 HValue* checked_literal = Add<HCheckMaps>(literal, map);
                 DCHECK(!info.IsAccessorConstant());
+                info.MarkAsInitializingStore();
                 store = BuildMonomorphicAccess(
                     &info, literal, checked_literal, value,
                     BailoutId::None(), BailoutId::None());
+                DCHECK_NOT_NULL(store);
               } else {
                 CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot,
                                                       literal, name, value));
@@ -5608,10 +5577,9 @@
   HInstruction* literal;
 
   Handle<AllocationSite> site;
-  Handle<LiteralsArray> literals(environment()->closure()->literals(),
-                                 isolate());
-  Handle<Object> literals_cell(literals->literal(expr->literal_index()),
-                               isolate());
+  Handle<FeedbackVector> vector(environment()->closure()->feedback_vector(),
+                                isolate());
+  Handle<Object> literals_cell(vector->Get(expr->literal_slot()), isolate());
   Handle<JSObject> boilerplate_object;
   if (!literals_cell->IsUndefined(isolate())) {
     DCHECK(literals_cell->IsAllocationSite());
@@ -5632,8 +5600,9 @@
     site_context.ExitScope(site, boilerplate_object);
   } else {
     NoObservableSideEffectsScope no_effects(this);
-    Handle<FixedArray> constants = expr->constant_elements();
-    int literal_index = expr->literal_index();
+    Handle<ConstantElementsPair> constants =
+        expr->GetOrBuildConstantElements(isolate());
+    int literal_index = FeedbackVector::GetIndex(expr->literal_slot());
     int flags = expr->ComputeFlags(true);
 
     Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
@@ -5711,7 +5680,7 @@
 HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
     PropertyAccessInfo* info,
     HValue* checked_object) {
-  // See if this is a load for an immutable property
+  // Check if this is a load of an immutable or constant property.
   if (checked_object->ActualValue()->IsConstant()) {
     Handle<Object> object(
         HConstant::cast(checked_object->ActualValue())->handle(isolate()));
@@ -5719,9 +5688,20 @@
     if (object->IsJSObject()) {
       LookupIterator it(object, info->name(),
                         LookupIterator::OWN_SKIP_INTERCEPTOR);
-      Handle<Object> value = JSReceiver::GetDataProperty(&it);
-      if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
-        return New<HConstant>(value);
+      if (it.IsFound()) {
+        bool is_reaonly_non_configurable =
+            it.IsReadOnly() && !it.IsConfigurable();
+        if (is_reaonly_non_configurable ||
+            (FLAG_track_constant_fields && info->IsDataConstantField())) {
+          Handle<Object> value = JSReceiver::GetDataProperty(&it);
+          if (!is_reaonly_non_configurable) {
+            DCHECK(!it.is_dictionary_holder());
+            // Add dependency on the map that introduced the field.
+            Handle<Map> field_owner_map = it.GetFieldOwnerMap();
+            top_info()->dependencies()->AssumeFieldOwner(field_owner_map);
+          }
+          return New<HConstant>(value);
+        }
       }
     }
   }
@@ -5750,15 +5730,17 @@
       checked_object, checked_object, access, maps, info->field_type());
 }
 
-
-HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
-    PropertyAccessInfo* info,
-    HValue* checked_object,
-    HValue* value) {
+HValue* HOptimizedGraphBuilder::BuildStoreNamedField(PropertyAccessInfo* info,
+                                                     HValue* checked_object,
+                                                     HValue* value) {
   bool transition_to_field = info->IsTransition();
   // TODO(verwaest): Move this logic into PropertyAccessInfo.
   HObjectAccess field_access = info->access();
 
+  bool store_to_constant_field = FLAG_track_constant_fields &&
+                                 info->StoreMode() != INITIALIZING_STORE &&
+                                 info->IsDataConstantField();
+
   HStoreNamedField *instr;
   if (field_access.representation().IsDouble() &&
       (!FLAG_unbox_double_fields || !field_access.IsInobject())) {
@@ -5784,24 +5766,57 @@
       // Already holds a HeapNumber; load the box and write its value field.
       HInstruction* heap_number =
           Add<HLoadNamedField>(checked_object, nullptr, heap_number_access);
-      instr = New<HStoreNamedField>(heap_number,
-                                    HObjectAccess::ForHeapNumberValue(),
-                                    value, STORE_TO_INITIALIZED_ENTRY);
+
+      if (store_to_constant_field) {
+        // If the field is constant check that the value we are going to store
+        // matches current value.
+        HInstruction* current_value = Add<HLoadNamedField>(
+            heap_number, nullptr, HObjectAccess::ForHeapNumberValue());
+        IfBuilder value_checker(this);
+        value_checker.IfNot<HCompareNumericAndBranch>(current_value, value,
+                                                      Token::EQ);
+        value_checker.ThenDeopt(DeoptimizeReason::kValueMismatch);
+        value_checker.End();
+        return nullptr;
+
+      } else {
+        instr = New<HStoreNamedField>(heap_number,
+                                      HObjectAccess::ForHeapNumberValue(),
+                                      value, STORE_TO_INITIALIZED_ENTRY);
+      }
     }
   } else {
-    if (field_access.representation().IsHeapObject()) {
-      BuildCheckHeapObject(value);
-    }
+    if (store_to_constant_field) {
+      // If the field is constant check that the value we are going to store
+      // matches current value.
+      HInstruction* current_value = Add<HLoadNamedField>(
+          checked_object->ActualValue(), checked_object, field_access);
 
-    if (!info->field_maps()->is_empty()) {
-      DCHECK(field_access.representation().IsHeapObject());
-      value = Add<HCheckMaps>(value, info->field_maps());
-    }
+      IfBuilder value_checker(this);
+      if (field_access.representation().IsDouble()) {
+        value_checker.IfNot<HCompareNumericAndBranch>(current_value, value,
+                                                      Token::EQ);
+      } else {
+        value_checker.IfNot<HCompareObjectEqAndBranch>(current_value, value);
+      }
+      value_checker.ThenDeopt(DeoptimizeReason::kValueMismatch);
+      value_checker.End();
+      return nullptr;
 
-    // This is a normal store.
-    instr = New<HStoreNamedField>(
-        checked_object->ActualValue(), field_access, value,
-        transition_to_field ? INITIALIZING_STORE : STORE_TO_INITIALIZED_ENTRY);
+    } else {
+      if (field_access.representation().IsHeapObject()) {
+        BuildCheckHeapObject(value);
+      }
+
+      if (!info->field_maps()->is_empty()) {
+        DCHECK(field_access.representation().IsHeapObject());
+        value = Add<HCheckMaps>(value, info->field_maps());
+      }
+
+      // This is a normal store.
+      instr = New<HStoreNamedField>(checked_object->ActualValue(), field_access,
+                                    value, info->StoreMode());
+    }
   }
 
   if (transition_to_field) {
@@ -6185,9 +6200,8 @@
   }
 }
 
-
 void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
-    PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+    PropertyAccessType access_type, Expression* expr, FeedbackSlot slot,
     BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
     SmallMapList* maps, Handle<Name> name) {
   // Something did not match; must use a polymorphic load.
@@ -6385,8 +6399,8 @@
 }
 
 void HOptimizedGraphBuilder::BuildStore(Expression* expr, Property* prop,
-                                        FeedbackVectorSlot slot,
-                                        BailoutId ast_id, BailoutId return_id,
+                                        FeedbackSlot slot, BailoutId ast_id,
+                                        BailoutId return_id,
                                         bool is_uninitialized) {
   if (!prop->key()->IsPropertyName()) {
     // Keyed store.
@@ -6505,8 +6519,10 @@
 // Because not every expression has a position and there is not common
 // superclass of Assignment and CountOperation, we cannot just pass the
 // owning expression instead of position and ast_id separately.
-void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
-    Variable* var, HValue* value, FeedbackVectorSlot slot, BailoutId ast_id) {
+void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
+                                                            HValue* value,
+                                                            FeedbackSlot slot,
+                                                            BailoutId ast_id) {
   Handle<JSGlobalObject> global(current_info()->global_object());
 
   // Lookup in script contexts.
@@ -6552,11 +6568,12 @@
     HValue* global_object = Add<HLoadNamedField>(
         BuildGetNativeContext(), nullptr,
         HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
-    Handle<TypeFeedbackVector> vector =
+    Handle<FeedbackVector> vector =
         handle(current_feedback_vector(), isolate());
     HValue* name = Add<HConstant>(var->name());
     HValue* vector_value = Add<HConstant>(vector);
     HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
+    DCHECK_EQ(vector->GetLanguageMode(slot), function_language_mode());
     Callable callable = CodeFactory::StoreICInOptimizedCode(
         isolate(), function_language_mode());
     HValue* stub = Add<HConstant>(callable.code());
@@ -6852,16 +6869,15 @@
                               HObjectAccess::ForStringLength());
 }
 
-
 HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
-    PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+    PropertyAccessType access_type, Expression* expr, FeedbackSlot slot,
     HValue* object, Handle<Name> name, HValue* value, bool is_uninitialized) {
   if (is_uninitialized) {
     Add<HDeoptimize>(
         DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess,
         Deoptimizer::SOFT);
   }
-  Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+  Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
 
   HValue* key = Add<HConstant>(name);
   HValue* vector_value = Add<HConstant>(vector);
@@ -6870,6 +6886,7 @@
   if (access_type == LOAD) {
     HValue* values[] = {object, key, slot_value, vector_value};
     if (!expr->AsProperty()->key()->IsPropertyName()) {
+      DCHECK(vector->IsKeyedLoadIC(slot));
       // It's possible that a keyed load of a constant string was converted
       // to a named load. Here, at the last minute, we need to make sure to
       // use a generic Keyed Load if we are using the type vector, because
@@ -6881,6 +6898,7 @@
                                    callable.descriptor(), ArrayVector(values));
       return result;
     }
+    DCHECK(vector->IsLoadIC(slot));
     Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
     HValue* stub = Add<HConstant>(callable.code());
     HCallWithDescriptor* result = New<HCallWithDescriptor>(
@@ -6889,11 +6907,12 @@
 
   } else {
     HValue* values[] = {object, key, value, slot_value, vector_value};
-    if (vector->GetKind(slot) == FeedbackVectorSlotKind::KEYED_STORE_IC) {
+    if (vector->IsKeyedStoreIC(slot)) {
       // It's possible that a keyed store of a constant string was converted
       // to a named store. Here, at the last minute, we need to make sure to
       // use a generic Keyed Store if we are using the type vector, because
       // it has to share information with full code.
+      DCHECK_EQ(vector->GetLanguageMode(slot), function_language_mode());
       Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
           isolate(), function_language_mode());
       HValue* stub = Add<HConstant>(callable.code());
@@ -6902,20 +6921,29 @@
                                    callable.descriptor(), ArrayVector(values));
       return result;
     }
-    Callable callable = CodeFactory::StoreICInOptimizedCode(
-        isolate(), function_language_mode());
-    HValue* stub = Add<HConstant>(callable.code());
-    HCallWithDescriptor* result = New<HCallWithDescriptor>(
-        Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
+    HCallWithDescriptor* result;
+    if (vector->IsStoreOwnIC(slot)) {
+      Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
+      HValue* stub = Add<HConstant>(callable.code());
+      result = New<HCallWithDescriptor>(
+          Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
+    } else {
+      DCHECK(vector->IsStoreIC(slot));
+      DCHECK_EQ(vector->GetLanguageMode(slot), function_language_mode());
+      Callable callable = CodeFactory::StoreICInOptimizedCode(
+          isolate(), function_language_mode());
+      HValue* stub = Add<HConstant>(callable.code());
+      result = New<HCallWithDescriptor>(
+          Code::STORE_IC, stub, 0, callable.descriptor(), ArrayVector(values));
+    }
     return result;
   }
 }
 
-
 HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
-    PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+    PropertyAccessType access_type, Expression* expr, FeedbackSlot slot,
     HValue* object, HValue* key, HValue* value) {
-  Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+  Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
   HValue* vector_value = Add<HConstant>(vector);
   HValue* slot_value = Add<HConstant>(vector->GetIndex(slot));
 
@@ -7108,9 +7136,8 @@
   return instr;
 }
 
-
 HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
-    Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
+    Expression* expr, FeedbackSlot slot, HValue* object, HValue* key,
     HValue* val, SmallMapList* maps, PropertyAccessType access_type,
     KeyedAccessStoreMode store_mode, bool* has_side_effects) {
   *has_side_effects = false;
@@ -7245,12 +7272,11 @@
 }
 
 HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
-    HValue* obj, HValue* key, HValue* val, Expression* expr,
-    FeedbackVectorSlot slot, BailoutId ast_id, BailoutId return_id,
-    PropertyAccessType access_type, bool* has_side_effects) {
+    HValue* obj, HValue* key, HValue* val, Expression* expr, FeedbackSlot slot,
+    BailoutId ast_id, BailoutId return_id, PropertyAccessType access_type,
+    bool* has_side_effects) {
   // A keyed name access with type feedback may contain the name.
-  Handle<TypeFeedbackVector> vector =
-      handle(current_feedback_vector(), isolate());
+  Handle<FeedbackVector> vector = handle(current_feedback_vector(), isolate());
   HValue* expected_key = key;
   if (!key->ActualValue()->IsConstant()) {
     Name* name = nullptr;
@@ -7474,8 +7500,8 @@
 
 HValue* HOptimizedGraphBuilder::BuildNamedAccess(
     PropertyAccessType access, BailoutId ast_id, BailoutId return_id,
-    Expression* expr, FeedbackVectorSlot slot, HValue* object,
-    Handle<Name> name, HValue* value, bool is_uninitialized) {
+    Expression* expr, FeedbackSlot slot, HValue* object, Handle<Name> name,
+    HValue* value, bool is_uninitialized) {
   SmallMapList* maps;
   ComputeReceiverTypes(expr, object, &maps, this);
   DCHECK(maps != NULL);
@@ -7553,6 +7579,12 @@
     HValue* string = Pop();
     HInstruction* char_code = BuildStringCharCodeAt(string, index);
     AddInstruction(char_code);
+    if (char_code->IsConstant()) {
+      HConstant* c_code = HConstant::cast(char_code);
+      if (c_code->HasNumberValue() && std::isnan(c_code->DoubleValue())) {
+        Add<HDeoptimize>(DeoptimizeReason::kOutOfBounds, Deoptimizer::EAGER);
+      }
+    }
     instr = NewUncasted<HStringCharFromCode>(char_code);
 
   } else if (expr->key()->IsPropertyName()) {
@@ -7606,27 +7638,38 @@
   BuildLoad(expr, expr->id());
 }
 
-
-HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant) {
+HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
+                                                   bool ensure_no_elements) {
   HCheckMaps* check = Add<HCheckMaps>(
       Add<HConstant>(constant), handle(constant->map()));
   check->ClearDependsOnFlag(kElementsKind);
+  if (ensure_no_elements) {
+    // TODO(ishell): remove this once we support NO_ELEMENTS elements kind.
+    HValue* elements = AddLoadElements(check, nullptr);
+    HValue* empty_elements =
+        Add<HConstant>(isolate()->factory()->empty_fixed_array());
+    IfBuilder if_empty(this);
+    if_empty.IfNot<HCompareObjectEqAndBranch>(elements, empty_elements);
+    if_empty.ThenDeopt(DeoptimizeReason::kWrongMap);
+    if_empty.End();
+  }
   return check;
 }
 
-
 HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
-                                                     Handle<JSObject> holder) {
+                                                     Handle<JSObject> holder,
+                                                     bool ensure_no_elements) {
   PrototypeIterator iter(isolate(), prototype, kStartAtReceiver);
   while (holder.is_null() ||
          !PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) {
-    BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
+    BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter),
+                          ensure_no_elements);
     iter.Advance();
     if (iter.IsAtEnd()) {
       return NULL;
     }
   }
-  return BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
+  return BuildConstantMapCheck(holder);
 }
 
 
@@ -7685,21 +7728,21 @@
 HInstruction* HOptimizedGraphBuilder::NewCallFunctionViaIC(
     HValue* function, int argument_count, TailCallMode syntactic_tail_call_mode,
     ConvertReceiverMode convert_mode, TailCallMode tail_call_mode,
-    FeedbackVectorSlot slot) {
+    FeedbackSlot slot) {
   if (syntactic_tail_call_mode == TailCallMode::kAllow) {
     BuildEnsureCallable(function);
   } else {
     DCHECK_EQ(TailCallMode::kDisallow, tail_call_mode);
   }
   int arity = argument_count - 1;
-  Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+  Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
   HValue* arity_val = Add<HConstant>(arity);
   HValue* index_val = Add<HConstant>(vector->GetIndex(slot));
   HValue* vector_val = Add<HConstant>(vector);
 
   HValue* op_vals[] = {function, arity_val, index_val, vector_val};
-  Callable callable = CodeFactory::CallICInOptimizedCode(
-      isolate(), convert_mode, tail_call_mode);
+  Callable callable =
+      CodeFactory::CallIC(isolate(), convert_mode, tail_call_mode);
   HConstant* stub = Add<HConstant>(callable.code());
 
   return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
@@ -7965,7 +8008,7 @@
   if (target_shared->force_inline()) {
     return 0;
   }
-  if (target->shared()->IsBuiltin()) {
+  if (!target->shared()->IsUserJavaScript()) {
     return kNotInlinable;
   }
 
@@ -8052,12 +8095,12 @@
   // Use the same AstValueFactory for creating strings in the sub-compilation
   // step, but don't transfer ownership to target_info.
   Handle<SharedFunctionInfo> target_shared(target->shared());
-  ParseInfo parse_info(zone(), target_shared);
+  ParseInfo parse_info(target_shared, top_info()->parse_info()->zone_shared());
   parse_info.set_ast_value_factory(
       top_info()->parse_info()->ast_value_factory());
   parse_info.set_ast_value_factory_owned(false);
 
-  CompilationInfo target_info(&parse_info, target);
+  CompilationInfo target_info(parse_info.zone(), &parse_info, target);
 
   if (inlining_kind != CONSTRUCT_CALL_RETURN &&
       IsClassConstructor(target_shared->kind())) {
@@ -8078,7 +8121,7 @@
     TraceInline(target, caller, "parse failure");
     return false;
   }
-  if (target_shared->dont_crankshaft()) {
+  if (target_shared->must_use_ignition_turbo()) {
     TraceInline(target, caller, "ParseAndAnalyze found incompatibility");
     return false;
   }
@@ -8161,10 +8204,6 @@
            &bounds_)
       .Run();
 
-  if (is_tracking_positions()) {
-    TraceInlinedFunction(target_shared, source_position(), inlining_id);
-  }
-
   // Save the pending call context. Set up new one for the inlined function.
   // The function state is new-allocated because we need to delete it
   // in two different places.
@@ -8491,6 +8530,23 @@
   }
 }
 
+// static
+bool HOptimizedGraphBuilder::NoElementsInPrototypeChain(
+    Handle<Map> receiver_map) {
+  // TODO(ishell): remove this once we support NO_ELEMENTS elements kind.
+  PrototypeIterator iter(receiver_map);
+  Handle<Object> empty_fixed_array =
+      iter.isolate()->factory()->empty_fixed_array();
+  while (true) {
+    Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
+    if (current->elements() != *empty_fixed_array) return false;
+    iter.Advance();
+    if (iter.IsAtEnd()) {
+      return true;
+    }
+  }
+}
+
 bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
     Handle<JSFunction> function, Handle<Map> receiver_map, BailoutId ast_id,
     int args_count_no_receiver) {
@@ -8745,6 +8801,7 @@
     }
     case kArrayShift: {
       if (!CanInlineArrayResizeOperation(receiver_map)) return false;
+      if (!NoElementsInPrototypeChain(receiver_map)) return false;
       ElementsKind kind = receiver_map->elements_kind();
 
       // If there may be elements accessors in the prototype chain, the fast
@@ -8758,7 +8815,7 @@
       // in a map change.
       BuildCheckPrototypeMaps(
           handle(JSObject::cast(receiver_map->prototype()), isolate()),
-          Handle<JSObject>::null());
+          Handle<JSObject>::null(), true);
 
       // Threshold for fast inlined Array.shift().
       HConstant* inline_threshold = Add<HConstant>(static_cast<int32_t>(16));
@@ -9686,7 +9743,7 @@
 // Checks whether allocation using the given constructor can be inlined.
 static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
   return constructor->has_initial_map() &&
-         !IsSubclassConstructor(constructor->shared()->kind()) &&
+         !IsDerivedConstructor(constructor->shared()->kind()) &&
          constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
          constructor->initial_map()->instance_size() <
              HAllocate::kMaxInlineSize;
@@ -10404,7 +10461,7 @@
 }
 
 void HOptimizedGraphBuilder::BuildStoreForEffect(
-    Expression* expr, Property* prop, FeedbackVectorSlot slot, BailoutId ast_id,
+    Expression* expr, Property* prop, FeedbackSlot slot, BailoutId ast_id,
     BailoutId return_id, HValue* object, HValue* key, HValue* value) {
   EffectContext for_effect(this);
   Push(object);
@@ -10973,15 +11030,12 @@
   Literal* literal = expr->right()->AsLiteral();
   if (literal == NULL) return false;
   if (!literal->value()->IsString()) return false;
-  if (!call->is_jsruntime() &&
-      call->function()->function_id != Runtime::kInlineClassOf) {
-    return false;
-  }
-  DCHECK(call->arguments()->length() == 1);
+  if (call->is_jsruntime()) return false;
+  if (call->function()->function_id != Runtime::kInlineClassOf) return false;
+  DCHECK_EQ(call->arguments()->length(), 1);
   return true;
 }
 
-
 void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
@@ -11140,11 +11194,9 @@
   return op == Token::EQ_STRICT &&
          ((left->IsConstant() &&
            !HConstant::cast(left)->handle(isolate)->IsNumber() &&
-           !HConstant::cast(left)->handle(isolate)->IsSimd128Value() &&
            !HConstant::cast(left)->handle(isolate)->IsString()) ||
           (right->IsConstant() &&
            !HConstant::cast(right)->handle(isolate)->IsNumber() &&
-           !HConstant::cast(right)->handle(isolate)->IsSimd128Value() &&
            !HConstant::cast(right)->handle(isolate)->IsString()));
 }
 
@@ -11208,8 +11260,9 @@
           Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
       // Make sure that the {function} already has a meaningful initial map
       // (i.e. we constructed at least one instance using the constructor
-      // {function}).
-      if (function->has_initial_map()) {
+      // {function}), and has an instance as .prototype.
+      if (function->has_initial_map() &&
+          !function->map()->has_non_instance_prototype()) {
         // Lookup @@hasInstance on the {function}.
         Handle<Map> function_map(function->map(), isolate());
         PropertyAccessInfo has_instance(
@@ -11502,6 +11555,9 @@
   UNREACHABLE();
 }
 
+void HOptimizedGraphBuilder::VisitGetIterator(GetIterator* expr) {
+  UNREACHABLE();
+}
 
 HValue* HOptimizedGraphBuilder::AddThisFunction() {
   return AddInstruction(BuildThisFunction());
@@ -11653,7 +11709,8 @@
   int copied_fields = 0;
   for (int i = 0; i < limit; i++) {
     PropertyDetails details = descriptors->GetDetails(i);
-    if (details.type() != DATA) continue;
+    if (details.location() != kField) continue;
+    DCHECK_EQ(kData, details.kind());
     copied_fields++;
     FieldIndex field_index = FieldIndex::ForDescriptor(*boilerplate_map, i);
 
@@ -11833,7 +11890,7 @@
        isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
     for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
     int flags = current_info()->GetDeclareGlobalsFlags();
-    Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+    Handle<FeedbackVector> vector(current_feedback_vector(), isolate());
     Add<HDeclareGlobals>(array, flags, vector);
     globals_.Rewind(0);
   }
@@ -11847,10 +11904,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_.Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_.Add(isolate()->factory()->undefined_value(), zone());
+      globals_.Add(isolate()->factory()->undefined_value(), zone());
       return;
     }
     case VariableLocation::PARAMETER:
@@ -11885,9 +11944,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_.Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
           declaration->fun(), current_info()->script(), top_info());
       // Check for stack-overflow exception.
@@ -11969,16 +12035,6 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
-  DCHECK(call->arguments()->length() == 1);
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* value = Pop();
-  HHasInstanceTypeAndBranch* result =
-      New<HHasInstanceTypeAndBranch>(value, JS_REGEXP_TYPE);
-  return ast_context()->ReturnControl(result, call->id());
-}
-
-
 void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
   DCHECK_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12127,45 +12183,6 @@
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
-// Support for direct creation of new objects.
-void HOptimizedGraphBuilder::GenerateNewObject(CallRuntime* call) {
-  DCHECK_EQ(2, call->arguments()->length());
-  CHECK_ALIVE(VisitExpressions(call->arguments()));
-  FastNewObjectStub stub(isolate());
-  FastNewObjectDescriptor descriptor(isolate());
-  HValue* values[] = {Pop(), Pop()};
-  HConstant* stub_value = Add<HConstant>(stub.GetCode());
-  HInstruction* result =
-      New<HCallWithDescriptor>(stub_value, 0, descriptor, ArrayVector(values));
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-// Support for direct calls from JavaScript to native RegExp code.
-void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
-  DCHECK_EQ(4, call->arguments()->length());
-  CHECK_ALIVE(VisitExpressions(call->arguments()));
-  Callable callable = CodeFactory::RegExpExec(isolate());
-  HValue* last_match_info = Pop();
-  HValue* index = Pop();
-  HValue* subject = Pop();
-  HValue* regexp_object = Pop();
-  HValue* stub = Add<HConstant>(callable.code());
-  HValue* values[] = {regexp_object, subject, index, last_match_info};
-  HInstruction* result = New<HCallWithDescriptor>(
-      stub, 0, callable.descriptor(), ArrayVector(values));
-  return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-// Fast support for number to string.
-void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
-  DCHECK_EQ(1, call->arguments()->length());
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
-  HValue* number = Pop();
-  HValue* result = BuildNumberToString(number, AstType::Any());
-  return ast_context()->ReturnValue(result);
-}
-
 
 // Fast support for calls.
 void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
diff --git a/src/crankshaft/hydrogen.h b/src/crankshaft/hydrogen.h
index 9f2508a..2ce6454 100644
--- a/src/crankshaft/hydrogen.h
+++ b/src/crankshaft/hydrogen.h
@@ -12,10 +12,13 @@
 #include "src/bailout-reason.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
+#include "src/counters.h"
 #include "src/crankshaft/compilation-phase.h"
 #include "src/crankshaft/hydrogen-instructions.h"
 #include "src/globals.h"
 #include "src/parsing/parse-info.h"
+#include "src/string-stream.h"
+#include "src/transitions.h"
 #include "src/zone/zone.h"
 
 namespace v8 {
@@ -37,9 +40,8 @@
  public:
   explicit HCompilationJob(Handle<JSFunction> function)
       : CompilationJob(function->GetIsolate(), &info_, "Crankshaft"),
-        zone_(function->GetIsolate()->allocator(), ZONE_NAME),
-        parse_info_(&zone_, handle(function->shared())),
-        info_(&parse_info_, function),
+        parse_info_(handle(function->shared())),
+        info_(parse_info_.zone(), &parse_info_, function),
         graph_(nullptr),
         chunk_(nullptr) {}
 
@@ -49,7 +51,6 @@
   virtual Status FinalizeJobImpl();
 
  private:
-  Zone zone_;
   ParseInfo parse_info_;
   CompilationInfo info_;
   HGraph* graph_;
@@ -463,12 +464,6 @@
   void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
   bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
 
-  // If we are tracking source positions then this function assigns a unique
-  // identifier to each inlining and dumps function source if it was inlined
-  // for the first time during the current optimization.
-  int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
-                           SourcePosition position);
-
  private:
   HConstant* ReinsertConstantIfNecessary(HConstant* constant);
   HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -1807,9 +1802,11 @@
                                     HValue* previous_object_size,
                                     HValue* payload);
 
-  HInstruction* BuildConstantMapCheck(Handle<JSObject> constant);
+  HInstruction* BuildConstantMapCheck(Handle<JSObject> constant,
+                                      bool ensure_no_elements = false);
   HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype,
-                                        Handle<JSObject> holder);
+                                        Handle<JSObject> holder,
+                                        bool ensure_no_elements = false);
 
   HInstruction* BuildGetNativeContext(HValue* closure);
   HInstruction* BuildGetNativeContext();
@@ -1853,9 +1850,6 @@
 
   bool is_tracking_positions() { return track_positions_; }
 
-  void TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
-                            SourcePosition position, int inlining_id);
-
   HValue* BuildAllocateEmptyArrayBuffer(HValue* byte_length);
   template <typename ViewClass>
   void BuildArrayBufferViewInitialization(HValue* obj,
@@ -2149,7 +2143,7 @@
   Handle<SharedFunctionInfo> current_shared_info() const {
     return current_info()->shared_info();
   }
-  TypeFeedbackVector* current_feedback_vector() const {
+  FeedbackVector* current_feedback_vector() const {
     return current_closure()->feedback_vector();
   }
   void ClearInlinedTestContext() {
@@ -2163,10 +2157,8 @@
   F(IsSmi)                             \
   F(IsArray)                           \
   F(IsTypedArray)                      \
-  F(IsRegExp)                          \
   F(IsJSProxy)                         \
   F(Call)                              \
-  F(NewObject)                         \
   F(ToInteger)                         \
   F(ToObject)                          \
   F(ToString)                          \
@@ -2176,8 +2168,6 @@
   F(DebugBreakInOptimizedCode)         \
   F(StringCharCodeAt)                  \
   F(SubString)                         \
-  F(RegExpExec)                        \
-  F(NumberToString)                    \
   F(DebugIsActive)                     \
   /* Typed Arrays */                   \
   F(TypedArrayInitialize)              \
@@ -2386,6 +2376,7 @@
                         TailCallMode syntactic_tail_call_mode);
   static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
   static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
+  static bool NoElementsInPrototypeChain(Handle<Map> receiver_map);
 
   // If --trace-inlining, print a line of the inlining trace.  Inlining
   // succeeded if the reason string is NULL and failed if there is a
@@ -2395,15 +2386,16 @@
                    TailCallMode tail_call_mode = TailCallMode::kDisallow);
 
   void HandleGlobalVariableAssignment(Variable* var, HValue* value,
-                                      FeedbackVectorSlot slot,
-                                      BailoutId ast_id);
+                                      FeedbackSlot slot, BailoutId ast_id);
 
   void HandlePropertyAssignment(Assignment* expr);
   void HandleCompoundAssignment(Assignment* expr);
-  void HandlePolymorphicNamedFieldAccess(
-      PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
-      BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
-      SmallMapList* types, Handle<Name> name);
+  void HandlePolymorphicNamedFieldAccess(PropertyAccessType access_type,
+                                         Expression* expr, FeedbackSlot slot,
+                                         BailoutId ast_id, BailoutId return_id,
+                                         HValue* object, HValue* value,
+                                         SmallMapList* types,
+                                         Handle<Name> name);
 
   HValue* BuildAllocateExternalElements(
       ExternalArrayType array_type,
@@ -2464,7 +2456,19 @@
           field_type_(HType::Tagged()),
           access_(HObjectAccess::ForMap()),
           lookup_type_(NOT_FOUND),
-          details_(NONE, DATA, Representation::None()) {}
+          details_(PropertyDetails::Empty()),
+          store_mode_(STORE_TO_INITIALIZED_ENTRY) {}
+
+    // Ensure the full store is performed.
+    void MarkAsInitializingStore() {
+      DCHECK_EQ(STORE, access_type_);
+      store_mode_ = INITIALIZING_STORE;
+    }
+
+    StoreFieldOrKeyedMode StoreMode() {
+      DCHECK_EQ(STORE, access_type_);
+      return store_mode_;
+    }
 
     // Checkes whether this PropertyAccessInfo can be handled as a monomorphic
     // load named. It additionally fills in the fields necessary to generate the
@@ -2521,15 +2525,23 @@
     bool IsFound() const { return lookup_type_ != NOT_FOUND; }
     bool IsProperty() const { return IsFound() && !IsTransition(); }
     bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
+    // TODO(ishell): rename to IsDataConstant() once constant field tracking
+    // is done.
+    bool IsDataConstantField() const {
+      return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
+             details_.location() == kField && details_.constness() == kConst;
+    }
     bool IsData() const {
-      return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == DATA;
+      return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
+             details_.location() == kField;
     }
     bool IsDataConstant() const {
-      return lookup_type_ == DESCRIPTOR_TYPE &&
-             details_.type() == DATA_CONSTANT;
+      return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
+             details_.location() == kDescriptor;
     }
     bool IsAccessorConstant() const {
-      return !IsTransition() && details_.type() == ACCESSOR_CONSTANT;
+      return !IsTransition() && details_.kind() == kAccessor &&
+             details_.location() == kDescriptor;
     }
     bool IsConfigurable() const { return details_.IsConfigurable(); }
     bool IsReadOnly() const { return details_.IsReadOnly(); }
@@ -2578,6 +2590,7 @@
       transition_ = handle(target);
       number_ = transition_->LastAdded();
       details_ = transition_->instance_descriptors()->GetDetails(number_);
+      MarkAsInitializingStore();
     }
     void NotFound() {
       lookup_type_ = NOT_FOUND;
@@ -2588,7 +2601,8 @@
       return details_.representation();
     }
     bool IsTransitionToData() const {
-      return IsTransition() && details_.type() == DATA;
+      return IsTransition() && details_.kind() == kData &&
+             details_.location() == kField;
     }
 
     Zone* zone() { return builder_->zone(); }
@@ -2623,6 +2637,7 @@
     Handle<Map> transition_;
     int number_;
     PropertyDetails details_;
+    StoreFieldOrKeyedMode store_mode_;
   };
 
   HValue* BuildMonomorphicAccess(PropertyAccessInfo* info, HValue* object,
@@ -2632,9 +2647,8 @@
 
   HValue* BuildNamedAccess(PropertyAccessType access, BailoutId ast_id,
                            BailoutId reutrn_id, Expression* expr,
-                           FeedbackVectorSlot slot, HValue* object,
-                           Handle<Name> name, HValue* value,
-                           bool is_uninitialized = false);
+                           FeedbackSlot slot, HValue* object, Handle<Name> name,
+                           HValue* value, bool is_uninitialized = false);
 
   void HandlePolymorphicCallNamed(Call* expr,
                                   HValue* receiver,
@@ -2668,7 +2682,7 @@
       PushBeforeSimulateBehavior push_sim_result);
   HInstruction* BuildIncrement(CountOperation* expr);
   HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
-                                  Expression* expr, FeedbackVectorSlot slot,
+                                  Expression* expr, FeedbackSlot slot,
                                   HValue* object, HValue* key, HValue* value);
 
   HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
@@ -2686,19 +2700,21 @@
                                               PropertyAccessType access_type,
                                               KeyedAccessStoreMode store_mode);
 
-  HValue* HandlePolymorphicElementAccess(
-      Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
-      HValue* val, SmallMapList* maps, PropertyAccessType access_type,
-      KeyedAccessStoreMode store_mode, bool* has_side_effects);
+  HValue* HandlePolymorphicElementAccess(Expression* expr, FeedbackSlot slot,
+                                         HValue* object, HValue* key,
+                                         HValue* val, SmallMapList* maps,
+                                         PropertyAccessType access_type,
+                                         KeyedAccessStoreMode store_mode,
+                                         bool* has_side_effects);
 
   HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
-                                   Expression* expr, FeedbackVectorSlot slot,
+                                   Expression* expr, FeedbackSlot slot,
                                    BailoutId ast_id, BailoutId return_id,
                                    PropertyAccessType access_type,
                                    bool* has_side_effects);
 
   HInstruction* BuildNamedGeneric(PropertyAccessType access, Expression* expr,
-                                  FeedbackVectorSlot slot, HValue* object,
+                                  FeedbackSlot slot, HValue* object,
                                   Handle<Name> name, HValue* value,
                                   bool is_uninitialized = false);
 
@@ -2711,19 +2727,18 @@
                 HValue* key);
 
   void BuildStoreForEffect(Expression* expression, Property* prop,
-                           FeedbackVectorSlot slot, BailoutId ast_id,
+                           FeedbackSlot slot, BailoutId ast_id,
                            BailoutId return_id, HValue* object, HValue* key,
                            HValue* value);
 
-  void BuildStore(Expression* expression, Property* prop,
-                  FeedbackVectorSlot slot, BailoutId ast_id,
-                  BailoutId return_id, bool is_uninitialized = false);
+  void BuildStore(Expression* expression, Property* prop, FeedbackSlot slot,
+                  BailoutId ast_id, BailoutId return_id,
+                  bool is_uninitialized = false);
 
   HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
                                     HValue* checked_object);
-  HInstruction* BuildStoreNamedField(PropertyAccessInfo* info,
-                                     HValue* checked_object,
-                                     HValue* value);
+  HValue* BuildStoreNamedField(PropertyAccessInfo* info, HValue* checked_object,
+                               HValue* value);
 
   HValue* BuildContextChainWalk(Variable* var);
 
@@ -2769,7 +2784,7 @@
                                      TailCallMode syntactic_tail_call_mode,
                                      ConvertReceiverMode convert_mode,
                                      TailCallMode tail_call_mode,
-                                     FeedbackVectorSlot slot);
+                                     FeedbackSlot slot);
 
   HInstruction* NewCallConstantFunction(Handle<JSFunction> target,
                                         int argument_count,
@@ -2804,7 +2819,6 @@
 
   friend class FunctionState;  // Pushes and pops the state stack.
   friend class AstContext;  // Pushes and pops the AST context stack.
-  friend class KeyedLoadFastElementStub;
   friend class HOsrBuilder;
 
   DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.cc b/src/crankshaft/ia32/lithium-codegen-ia32.cc
index d9044ca..d5b8749 100644
--- a/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -7,6 +7,7 @@
 #include "src/crankshaft/ia32/lithium-codegen-ia32.h"
 
 #include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
@@ -176,15 +177,18 @@
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info()->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Immediate(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
-        __ push(edi);
+        __ Push(edi);
+        __ Push(Smi::FromInt(info()->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
     }
@@ -305,7 +309,7 @@
     // building, install a special marker there instead.
     DCHECK(info()->IsStub());
     __ mov(MemOperand(esp, 2 * kPointerSize),
-           Immediate(Smi::FromInt(StackFrame::STUB)));
+           Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
 
     /* stack layout
        3: old ebp
@@ -342,7 +346,7 @@
         frame_is_built_ = true;
         // Build the frame in such a way that esi isn't trashed.
         __ push(ebp);  // Caller's frame pointer.
-        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+        __ push(Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
         __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
         Comment(";;; Deferred code");
       }
@@ -1923,12 +1927,6 @@
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected & ToBooleanHint::kSimdValue) {
-        // SIMD value -> true.
-        __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
-        __ j(equal, instr->TrueLabel(chunk_));
-      }
-
       if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
@@ -2692,7 +2690,7 @@
     __ mov(result,
            Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
     __ cmp(Operand(result),
-           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+           Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
     __ j(equal, &adapted, Label::kNear);
 
     // No arguments adaptor frame.
@@ -2762,9 +2760,9 @@
 
   // Normal function. Replace undefined or null with global receiver.
   __ cmp(receiver, factory()->null_value());
-  __ j(equal, &global_object, Label::kNear);
+  __ j(equal, &global_object, dist);
   __ cmp(receiver, factory()->undefined_value());
-  __ j(equal, &global_object, Label::kNear);
+  __ j(equal, &global_object, dist);
 
   // The receiver should be a JS object.
   __ test(receiver, Immediate(kSmiTagMask));
@@ -2772,7 +2770,7 @@
   __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
   DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
 
-  __ jmp(&receiver_ok, Label::kNear);
+  __ jmp(&receiver_ok, dist);
   __ bind(&global_object);
   __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
   __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
@@ -2869,7 +2867,7 @@
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
-  __ push(Immediate(instr->hydrogen()->pairs()));
+  __ push(Immediate(instr->hydrogen()->declarations()));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
   __ push(Immediate(instr->hydrogen()->feedback_vector()));
   CallRuntime(Runtime::kDeclareGlobals, instr);
@@ -3394,7 +3392,7 @@
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+         Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -3855,13 +3853,18 @@
       if (Smi::IsValid(int_key)) {
         __ mov(ebx, Immediate(Smi::FromInt(int_key)));
       } else {
-        // We should never get here at runtime because there is a smi check on
-        // the key before this point.
-        __ int3();
+        Abort(kArrayIndexConstantValueTooBig);
       }
     } else {
+      Label is_smi;
       __ Move(ebx, ToRegister(key));
       __ SmiTag(ebx);
+      // Deopt if the key is outside Smi range. The stub expects Smi and would
+      // bump the elements into dictionary mode (and trigger a deopt) anyways.
+      __ j(no_overflow, &is_smi);
+      __ PopSafepointRegisters();
+      DeoptimizeIf(no_condition, instr, DeoptimizeReason::kOverflow);
+      __ bind(&is_smi);
     }
 
     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
@@ -4539,6 +4542,15 @@
 
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  Label deopt, done;
+  // If the map is not deprecated the migration attempt does not make sense.
+  __ push(object);
+  __ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+  __ test(FieldOperand(object, Map::kBitField3Offset),
+          Immediate(Map::Deprecated::kMask));
+  __ pop(object);
+  __ j(zero, &deopt);
+
   {
     PushSafepointRegistersScope scope(this);
     __ push(object);
@@ -4549,7 +4561,12 @@
 
     __ test(eax, Immediate(kSmiTagMask));
   }
-  DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
+  __ j(not_zero, &done);
+
+  __ bind(&deopt);
+  DeoptimizeIf(no_condition, instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+  __ bind(&done);
 }
 
 
@@ -4890,18 +4907,6 @@
     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
               Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     final_branch_condition = zero;
-
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)         \
-  } else if (String::Equals(type_name, factory()->type##_string())) { \
-    __ JumpIfSmi(input, false_label, false_distance);                 \
-    __ cmp(FieldOperand(input, HeapObject::kMapOffset),               \
-           factory()->type##_map());                                  \
-    final_branch_condition = equal;
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
-
   } else {
     __ jmp(false_label, false_distance);
   }
diff --git a/src/crankshaft/lithium-allocator.cc b/src/crankshaft/lithium-allocator.cc
index aa4459b..201c606 100644
--- a/src/crankshaft/lithium-allocator.cc
+++ b/src/crankshaft/lithium-allocator.cc
@@ -5,8 +5,9 @@
 #include "src/crankshaft/lithium-allocator.h"
 
 #include "src/crankshaft/hydrogen.h"
-#include "src/crankshaft/lithium-inl.h"
 #include "src/crankshaft/lithium-allocator-inl.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/objects-inl.h"
 #include "src/register-configuration.h"
 #include "src/string-stream.h"
 
diff --git a/src/crankshaft/lithium-codegen.cc b/src/crankshaft/lithium-codegen.cc
index 2d16560..9569660 100644
--- a/src/crankshaft/lithium-codegen.cc
+++ b/src/crankshaft/lithium-codegen.cc
@@ -6,6 +6,8 @@
 
 #include <sstream>
 
+#include "src/objects-inl.h"
+
 #if V8_TARGET_ARCH_IA32
 #include "src/crankshaft/ia32/lithium-ia32.h"  // NOLINT
 #include "src/crankshaft/ia32/lithium-codegen-ia32.h"  // NOLINT
@@ -237,7 +239,8 @@
       int shared_id = DefineDeoptimizationLiteral(
           environment->entry() ? environment->entry()->shared()
                                : info()->shared_info());
-      translation->BeginConstructStubFrame(shared_id, translation_size);
+      translation->BeginConstructStubFrame(BailoutId::ConstructStubInvoke(),
+                                           shared_id, translation_size);
       if (info()->closure().is_identical_to(environment->closure())) {
         translation->StoreJSFrameFunction();
       } else {
diff --git a/src/crankshaft/lithium.cc b/src/crankshaft/lithium.cc
index 94d6041..5f0e9e3 100644
--- a/src/crankshaft/lithium.cc
+++ b/src/crankshaft/lithium.cc
@@ -6,6 +6,7 @@
 
 #include "src/ast/scopes.h"
 #include "src/codegen.h"
+#include "src/objects-inl.h"
 
 #if V8_TARGET_ARCH_IA32
 #include "src/crankshaft/ia32/lithium-ia32.h"  // NOLINT
diff --git a/src/crankshaft/mips/lithium-codegen-mips.cc b/src/crankshaft/mips/lithium-codegen-mips.cc
index abbf208..cd6e45a 100644
--- a/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -28,6 +28,7 @@
 #include "src/crankshaft/mips/lithium-codegen-mips.h"
 
 #include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/crankshaft/hydrogen-osr.h"
@@ -202,15 +203,18 @@
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info()->scope()->scope_type());
         __ li(FastNewFunctionContextDescriptor::SlotsRegister(),
               Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(a1);
+        __ Push(Smi::FromInt(info()->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
     }
@@ -299,7 +303,7 @@
         DCHECK(!frame_is_built_);
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
-        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
         __ PushCommonFrame(scratch0());
         Comment(";;; Deferred code");
       }
@@ -358,7 +362,7 @@
       // This variant of deopt can only be used with stubs. Since we don't
       // have a function pointer to install in the stack frame that we're
       // building, install a special marker there instead.
-      __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
+      __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
       __ push(at);
       DCHECK(info()->IsStub());
     }
@@ -1764,18 +1768,18 @@
     FPURegister left_reg = ToDoubleRegister(left);
     FPURegister right_reg = ToDoubleRegister(right);
     FPURegister result_reg = ToDoubleRegister(instr->result());
+
     Label nan, done;
     if (operation == HMathMinMax::kMathMax) {
-      __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
+      __ Float64Max(result_reg, left_reg, right_reg, &nan);
     } else {
       DCHECK(operation == HMathMinMax::kMathMin);
-      __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
+      __ Float64Min(result_reg, left_reg, right_reg, &nan);
     }
     __ Branch(&done);
 
     __ bind(&nan);
-    __ LoadRoot(scratch, Heap::kNanValueRootIndex);
-    __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+    __ add_d(result_reg, left_reg, right_reg);
 
     __ bind(&done);
   }
@@ -2021,14 +2025,6 @@
         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
       }
 
-      if (expected & ToBooleanHint::kSimdValue) {
-        // SIMD value -> true.
-        const Register scratch = scratch1();
-        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-        __ Branch(instr->TrueLabel(chunk_), eq, scratch,
-                  Operand(SIMD128_VALUE_TYPE));
-      }
-
       if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         DoubleRegister dbl_scratch = double_scratch0();
@@ -2799,7 +2795,7 @@
       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
-      __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ lw(result, FieldMemOperand(result, PropertyCell::kValueOffset));
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
                    Operand(Smi::FromInt(Isolate::kProtectorValid)));
     }
@@ -2870,7 +2866,8 @@
     __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ lw(result,
           MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ Xor(temp, result,
+           Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
@@ -3058,7 +3055,7 @@
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  __ li(scratch0(), instr->hydrogen()->pairs());
+  __ li(scratch0(), instr->hydrogen()->declarations());
   __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   __ Push(scratch0(), scratch1());
   __ li(scratch0(), instr->hydrogen()->feedback_vector());
@@ -3482,7 +3479,7 @@
   __ lw(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ lw(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
   __ Branch(&no_arguments_adaptor, ne, scratch3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(fp, scratch2);
@@ -4016,13 +4013,19 @@
       if (Smi::IsValid(int_key)) {
         __ li(a3, Operand(Smi::FromInt(int_key)));
       } else {
-        // We should never get here at runtime because there is a smi check on
-        // the key before this point.
-        __ stop("expected smi");
+        Abort(kArrayIndexConstantValueTooBig);
       }
     } else {
-      __ mov(a3, ToRegister(key));
-      __ SmiTag(a3);
+      Label is_smi;
+      __ SmiTagCheckOverflow(a3, ToRegister(key), at);
+      // Deopt if the key is outside Smi range. The stub expects Smi and would
+      // bump the elements into dictionary mode (and trigger a deopt) anyways.
+      __ BranchOnNoOverflow(&is_smi, at);
+      RestoreRegistersStateStub stub(isolate());
+      __ push(ra);
+      __ CallStub(&stub);
+      DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow);
+      __ bind(&is_smi);
     }
 
     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
@@ -4758,6 +4761,13 @@
 
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  Label deopt, done;
+  // If the map is not deprecated the migration attempt does not make sense.
+  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  __ lw(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
+  __ And(at, scratch0(), Operand(Map::Deprecated::kMask));
+  __ Branch(&deopt, eq, at, Operand(zero_reg));
+
   {
     PushSafepointRegistersScope scope(this);
     __ push(object);
@@ -4768,8 +4778,15 @@
     __ StoreToSafepointRegisterSlot(v0, scratch0());
   }
   __ SmiTst(scratch0(), at);
-  DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
+  __ Branch(&done, ne, at, Operand(zero_reg));
+
+  __ bind(&deopt);
+  // In case of "al" condition the operands are not used so just pass zero_reg
+  // there.
+  DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg,
                Operand(zero_reg));
+
+  __ bind(&done);
 }
 
 
@@ -5146,19 +5163,6 @@
     *cmp2 = Operand(zero_reg);
     final_branch_condition = eq;
 
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
-  } else if (String::Equals(type_name, factory->type##_string())) {  \
-    __ JumpIfSmi(input, false_label);                                \
-    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));    \
-    __ LoadRoot(at, Heap::k##Type##MapRootIndex);                    \
-    *cmp1 = input;                                                   \
-    *cmp2 = Operand(at);                                             \
-    final_branch_condition = eq;
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
-
   } else {
     *cmp1 = at;
     *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 1531996..d32052c 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -4,6 +4,7 @@
 
 #include "src/crankshaft/mips64/lithium-codegen-mips64.h"
 
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/crankshaft/hydrogen-osr.h"
@@ -178,15 +179,18 @@
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info()->scope()->scope_type());
         __ li(FastNewFunctionContextDescriptor::SlotsRegister(),
               Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(a1);
+        __ Push(Smi::FromInt(info()->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
     }
@@ -275,7 +279,7 @@
         DCHECK(!frame_is_built_);
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
-        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ li(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
         __ PushCommonFrame(scratch0());
         Comment(";;; Deferred code");
       }
@@ -343,7 +347,7 @@
       // This variant of deopt can only be used with stubs. Since we don't
       // have a function pointer to install in the stack frame that we're
       // building, install a special marker there instead.
-      __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
+      __ li(at, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
       __ push(at);
       DCHECK(info()->IsStub());
     }
@@ -1888,16 +1892,15 @@
     FPURegister result_reg = ToDoubleRegister(instr->result());
     Label nan, done;
     if (operation == HMathMinMax::kMathMax) {
-      __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
+      __ Float64Max(result_reg, left_reg, right_reg, &nan);
     } else {
       DCHECK(operation == HMathMinMax::kMathMin);
-      __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
+      __ Float64Min(result_reg, left_reg, right_reg, &nan);
     }
     __ Branch(&done);
 
     __ bind(&nan);
-    __ LoadRoot(scratch, Heap::kNanValueRootIndex);
-    __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+    __ add_d(result_reg, left_reg, right_reg);
 
     __ bind(&done);
   }
@@ -2143,14 +2146,6 @@
         __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
       }
 
-      if (expected & ToBooleanHint::kSimdValue) {
-        // SIMD value -> true.
-        const Register scratch = scratch1();
-        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-        __ Branch(instr->TrueLabel(chunk_), eq, scratch,
-                  Operand(SIMD128_VALUE_TYPE));
-      }
-
       if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         DoubleRegister dbl_scratch = double_scratch0();
@@ -2976,7 +2971,7 @@
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
       // The comparison only needs LS bits of value, which is a smi.
-      __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ ld(result, FieldMemOperand(result, PropertyCell::kValueOffset));
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
                    Operand(Smi::FromInt(Isolate::kProtectorValid)));
     }
@@ -3053,7 +3048,8 @@
     __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
     __ ld(result,
           MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ Xor(temp, result,
+           Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
@@ -3245,7 +3241,7 @@
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  __ li(scratch0(), instr->hydrogen()->pairs());
+  __ li(scratch0(), instr->hydrogen()->declarations());
   __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   __ Push(scratch0(), scratch1());
   __ li(scratch0(), instr->hydrogen()->feedback_vector());
@@ -3689,7 +3685,7 @@
   __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
   __ Branch(&no_arguments_adaptor, ne, scratch3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Drop current frame and load arguments count from arguments adaptor frame.
   __ mov(fp, scratch2);
@@ -4246,15 +4242,7 @@
 
     LOperand* key = instr->key();
     if (key->IsConstantOperand()) {
-      LConstantOperand* constant_key = LConstantOperand::cast(key);
-      int32_t int_key = ToInteger32(constant_key);
-      if (Smi::IsValid(int_key)) {
-        __ li(a3, Operand(Smi::FromInt(int_key)));
-      } else {
-        // We should never get here at runtime because there is a smi check on
-        // the key before this point.
-        __ stop("expected smi");
-      }
+      __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
     } else {
       __ mov(a3, ToRegister(key));
       __ SmiTag(a3);
@@ -4964,6 +4952,13 @@
 
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  Label deopt, done;
+  // If the map is not deprecated the migration attempt does not make sense.
+  __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  __ lwu(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
+  __ And(at, scratch0(), Operand(Map::Deprecated::kMask));
+  __ Branch(&deopt, eq, at, Operand(zero_reg));
+
   {
     PushSafepointRegistersScope scope(this);
     __ push(object);
@@ -4974,8 +4969,15 @@
     __ StoreToSafepointRegisterSlot(v0, scratch0());
   }
   __ SmiTst(scratch0(), at);
-  DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, at,
+  __ Branch(&done, ne, at, Operand(zero_reg));
+
+  __ bind(&deopt);
+  // In case of "al" condition the operands are not used so just pass zero_reg
+  // there.
+  DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, zero_reg,
                Operand(zero_reg));
+
+  __ bind(&done);
 }
 
 
@@ -5354,20 +5356,6 @@
     *cmp2 = Operand(zero_reg);
     final_branch_condition = eq;
 
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
-  } else if (String::Equals(type_name, factory->type##_string())) {  \
-    __ JumpIfSmi(input, false_label);                                \
-    __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));    \
-    __ LoadRoot(at, Heap::k##Type##MapRootIndex);                    \
-    *cmp1 = input;                                                   \
-    *cmp2 = Operand(at);                                             \
-    final_branch_condition = eq;
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
-
-
   } else {
     *cmp1 = at;
     *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 9c65586..f930611 100644
--- a/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -5,6 +5,7 @@
 #include "src/crankshaft/ppc/lithium-codegen-ppc.h"
 
 #include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/crankshaft/hydrogen-osr.h"
@@ -186,15 +187,18 @@
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info()->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(r4);
+        __ Push(Smi::FromInt(info()->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
     }
@@ -283,7 +287,7 @@
         DCHECK(!frame_is_built_);
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
-        __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+        __ mov(scratch0(), Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
         __ PushCommonFrame(scratch0());
         Comment(";;; Deferred code");
       }
@@ -352,7 +356,7 @@
       // This variant of deopt can only be used with stubs. Since we don't
       // have a function pointer to install in the stack frame that we're
       // building, install a special marker there instead.
-      __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+      __ mov(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
       __ push(ip);
       DCHECK(info()->IsStub());
     }
@@ -1703,12 +1707,15 @@
     } else {
       __ sub(result, left, EmitLoadRegister(right, ip));
     }
-#if V8_TARGET_ARCH_PPC64
     if (can_overflow) {
+#if V8_TARGET_ARCH_PPC64
       __ TestIfInt32(result, r0);
+#else
+      __ TestIfInt32(scratch0(), result, r0);
+#endif
       DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
     }
-#endif
+
   } else {
     if (right->IsConstantOperand()) {
       __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
@@ -1986,16 +1993,32 @@
   DoubleRegister result = ToDoubleRegister(instr->result());
   switch (instr->op()) {
     case Token::ADD:
-      __ fadd(result, left, right);
+      if (CpuFeatures::IsSupported(VSX)) {
+        __ xsadddp(result, left, right);
+      } else {
+        __ fadd(result, left, right);
+      }
       break;
     case Token::SUB:
-      __ fsub(result, left, right);
+      if (CpuFeatures::IsSupported(VSX)) {
+        __ xssubdp(result, left, right);
+      } else {
+        __ fsub(result, left, right);
+      }
       break;
     case Token::MUL:
-      __ fmul(result, left, right);
+      if (CpuFeatures::IsSupported(VSX)) {
+        __ xsmuldp(result, left, right);
+      } else {
+        __ fmul(result, left, right);
+      }
       break;
     case Token::DIV:
-      __ fdiv(result, left, right);
+      if (CpuFeatures::IsSupported(VSX)) {
+        __ xsdivdp(result, left, right);
+      } else {
+        __ fdiv(result, left, right);
+      }
       break;
     case Token::MOD: {
       __ PrepareCallCFunction(0, 2, scratch0());
@@ -2183,13 +2206,6 @@
         __ beq(instr->TrueLabel(chunk_));
       }
 
-      if (expected & ToBooleanHint::kSimdValue) {
-        // SIMD value -> true.
-        Label not_simd;
-        __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
-        __ beq(instr->TrueLabel(chunk_));
-      }
-
       if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
@@ -3049,7 +3065,7 @@
       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
-      __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
       __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
     }
@@ -3113,7 +3129,8 @@
     __ LoadP(
         result,
         MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+    __ cmpi(result,
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
@@ -3307,7 +3324,7 @@
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  __ Move(scratch0(), instr->hydrogen()->pairs());
+  __ Move(scratch0(), instr->hydrogen()->declarations());
   __ push(scratch0());
   __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
   __ push(scratch0());
@@ -3751,7 +3768,8 @@
   __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ LoadP(scratch3,
            MemOperand(scratch2, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ cmpi(scratch3,
+          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ bne(&no_arguments_adaptor);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -4331,12 +4349,21 @@
       if (Smi::IsValid(int_key)) {
         __ LoadSmiLiteral(r6, Smi::FromInt(int_key));
       } else {
-        // We should never get here at runtime because there is a smi check on
-        // the key before this point.
-        __ stop("expected smi");
+        Abort(kArrayIndexConstantValueTooBig);
       }
     } else {
+      Label is_smi;
+#if V8_TARGET_ARCH_PPC64
       __ SmiTag(r6, ToRegister(key));
+#else
+      // Deopt if the key is outside Smi range. The stub expects Smi and would
+      // bump the elements into dictionary mode (and trigger a deopt) anyways.
+      __ SmiTagCheckOverflow(r6, ToRegister(key), r0);
+      __ BranchOnNoOverflow(&is_smi);
+      __ PopSafepointRegisters();
+      DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0);
+      __ bind(&is_smi);
+#endif
     }
 
     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
@@ -5035,6 +5062,13 @@
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   Register temp = ToRegister(instr->temp());
+  Label deopt, done;
+  // If the map is not deprecated the migration attempt does not make sense.
+  __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ lwz(temp, FieldMemOperand(temp, Map::kBitField3Offset));
+  __ TestBitMask(temp, Map::Deprecated::kMask, r0);
+  __ beq(&deopt, cr0);
+
   {
     PushSafepointRegistersScope scope(this);
     __ push(object);
@@ -5045,7 +5079,13 @@
     __ StoreToSafepointRegisterSlot(r3, temp);
   }
   __ TestIfSmi(temp, r0);
-  DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
+  __ bne(&done, cr0);
+
+  __ bind(&deopt);
+  // In case of "al" condition the operand is not used so just pass cr0 there.
+  DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
+
+  __ bind(&done);
 }
 
 
@@ -5397,17 +5437,6 @@
     __ cmpi(r0, Operand::Zero());
     final_branch_condition = eq;
 
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
-  } else if (String::Equals(type_name, factory->type##_string())) {  \
-    __ JumpIfSmi(input, false_label);                                \
-    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
-    __ CompareRoot(scratch, Heap::k##Type##MapRootIndex);            \
-    final_branch_condition = eq;
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
-
   } else {
     __ b(false_label);
   }
diff --git a/src/crankshaft/s390/lithium-codegen-s390.cc b/src/crankshaft/s390/lithium-codegen-s390.cc
index c44df95..02c6b6f 100644
--- a/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -6,6 +6,7 @@
 #include "src/crankshaft/s390/lithium-codegen-s390.h"
 
 #include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/crankshaft/hydrogen-osr.h"
@@ -177,15 +178,18 @@
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info()->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(r3);
+        __ Push(Smi::FromInt(info()->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
     }
@@ -271,7 +275,8 @@
         DCHECK(!frame_is_built_);
         DCHECK(info()->IsStub());
         frame_is_built_ = true;
-        __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+        __ Load(scratch0(),
+                Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
         __ PushCommonFrame(scratch0());
         Comment(";;; Deferred code");
       }
@@ -340,7 +345,7 @@
       // have a function pointer to install in the stack frame that we're
       // building, install a special marker there instead.
       DCHECK(info()->IsStub());
-      __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+      __ Load(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
       __ push(ip);
       DCHECK(info()->IsStub());
     }
@@ -1283,8 +1288,12 @@
   __ bge(&done, Label::kNear);
 
   // If there is no remainder then we are done.
-  __ lr(scratch, result);
-  __ msr(scratch, divisor);
+  if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+    __ msrkc(scratch, result, divisor);
+  } else {
+    __ lr(scratch, result);
+    __ msr(scratch, divisor);
+  }
   __ Cmp32(dividend, scratch);
   __ beq(&done, Label::kNear);
 
@@ -1415,36 +1424,48 @@
     Register right = ToRegister(right_op);
 
     if (can_overflow) {
+      if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+        // result = left * right.
+        if (instr->hydrogen()->representation().IsSmi()) {
+          __ SmiUntag(scratch, right);
+          __ MulPWithCondition(result, left, scratch);
+        } else {
+          __ msrkc(result, left, right);
+          __ LoadW(result, result);
+        }
+        DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
+      } else {
 #if V8_TARGET_ARCH_S390X
-      // result = left * right.
-      if (instr->hydrogen()->representation().IsSmi()) {
-        __ SmiUntag(result, left);
-        __ SmiUntag(scratch, right);
-        __ msgr(result, scratch);
-      } else {
-        __ LoadRR(result, left);
-        __ msgr(result, right);
-      }
-      __ TestIfInt32(result, r0);
-      DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
-      if (instr->hydrogen()->representation().IsSmi()) {
-        __ SmiTag(result);
-      }
+        // result = left * right.
+        if (instr->hydrogen()->representation().IsSmi()) {
+          __ SmiUntag(result, left);
+          __ SmiUntag(scratch, right);
+          __ msgr(result, scratch);
+        } else {
+          __ LoadRR(result, left);
+          __ msgr(result, right);
+        }
+        __ TestIfInt32(result, r0);
+        DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
+        if (instr->hydrogen()->representation().IsSmi()) {
+          __ SmiTag(result);
+        }
 #else
-      // r0:scratch = scratch * right
-      if (instr->hydrogen()->representation().IsSmi()) {
-        __ SmiUntag(scratch, left);
-        __ mr_z(r0, right);
-        __ LoadRR(result, scratch);
-      } else {
         // r0:scratch = scratch * right
-        __ LoadRR(scratch, left);
-        __ mr_z(r0, right);
-        __ LoadRR(result, scratch);
-      }
-      __ TestIfInt32(r0, result, scratch);
-      DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
+        if (instr->hydrogen()->representation().IsSmi()) {
+          __ SmiUntag(scratch, left);
+          __ mr_z(r0, right);
+          __ LoadRR(result, scratch);
+        } else {
+          // r0:scratch = scratch * right
+          __ LoadRR(scratch, left);
+          __ mr_z(r0, right);
+          __ LoadRR(result, scratch);
+        }
+        __ TestIfInt32(r0, result, scratch);
+        DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
 #endif
+      }
     } else {
       if (instr->hydrogen()->representation().IsSmi()) {
         __ SmiUntag(result, left);
@@ -1678,10 +1699,17 @@
 #endif
 
   if (right->IsConstantOperand()) {
-    if (!isInteger || !checkOverflow)
+    if (!isInteger || !checkOverflow) {
       __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
-    else
-      __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
+    } else {
+      // -(MinInt) will overflow
+      if (ToInteger32(LConstantOperand::cast(right)) == kMinInt) {
+        __ Load(scratch0(), ToOperand(right));
+        __ Sub32(ToRegister(result), ToRegister(left), scratch0());
+      } else {
+        __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
+      }
+    }
   } else if (right->IsRegister()) {
     if (!isInteger)
       __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
@@ -1721,35 +1749,12 @@
   }
 }
 
-void LCodeGen::DoRSubI(LRSubI* instr) {
-  LOperand* left = instr->left();
-  LOperand* right = instr->right();
-  LOperand* result = instr->result();
-
-  DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
-         right->IsConstantOperand());
-
-#if V8_TARGET_ARCH_S390X
-  // The overflow detection needs to be tested on the lower 32-bits.
-  // As a result, on 64-bit, we need to force 32-bit arithmetic operations
-  // to set the CC overflow bit properly.  The result is then sign-extended.
-  bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-#else
-  bool checkOverflow = true;
-#endif
-
-  Operand right_operand = ToOperand(right);
-  __ mov(r0, right_operand);
-
-  if (!checkOverflow) {
-    __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left));
-  } else {
-    __ Sub32(ToRegister(result), r0, ToRegister(left));
-  }
-}
-
 void LCodeGen::DoConstantI(LConstantI* instr) {
-  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+  Register dst = ToRegister(instr->result());
+  if (instr->value() == 0)
+    __ XorP(dst, dst);
+  else
+    __ Load(dst, Operand(instr->value()));
 }
 
 void LCodeGen::DoConstantS(LConstantS* instr) {
@@ -1992,20 +1997,38 @@
   DoubleRegister left = ToDoubleRegister(instr->left());
   DoubleRegister right = ToDoubleRegister(instr->right());
   DoubleRegister result = ToDoubleRegister(instr->result());
-  // All operations except MOD are computed in-place.
-  DCHECK(instr->op() == Token::MOD || left.is(result));
   switch (instr->op()) {
     case Token::ADD:
-      __ adbr(result, right);
+      if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+        __ vfa(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ adbr(result, right);
+      }
       break;
     case Token::SUB:
-      __ sdbr(result, right);
+      if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+        __ vfs(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ sdbr(result, right);
+      }
       break;
     case Token::MUL:
-      __ mdbr(result, right);
+      if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+        __ vfm(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ mdbr(result, right);
+      }
       break;
     case Token::DIV:
-      __ ddbr(result, right);
+      if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+        __ vfd(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ ddbr(result, right);
+      }
       break;
     case Token::MOD: {
       __ PrepareCallCFunction(0, 2, scratch0());
@@ -2187,13 +2210,6 @@
         __ beq(instr->TrueLabel(chunk_));
       }
 
-      if (expected & ToBooleanHint::kSimdValue) {
-        // SIMD value -> true.
-        Label not_simd;
-        __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
-        __ beq(instr->TrueLabel(chunk_));
-      }
-
       if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
@@ -3012,7 +3028,7 @@
       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
-      __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
       __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
     }
@@ -3080,8 +3096,8 @@
     __ LoadP(
         result,
         MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
-    __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-    __ CmpP(result, r0);
+    __ CmpP(result,
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
     // Result is the frame pointer for the frame if not adapted and for the real
     // frame below the adaptor frame if adapted.
@@ -3258,7 +3274,7 @@
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   DCHECK(ToRegister(instr->context()).is(cp));
-  __ Move(scratch0(), instr->hydrogen()->pairs());
+  __ Move(scratch0(), instr->hydrogen()->declarations());
   __ push(scratch0());
   __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
   __ push(scratch0());
@@ -3391,31 +3407,17 @@
 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
   Register input = ToRegister(instr->value());
   Register result = ToRegister(instr->result());
-  Label done;
-  __ CmpP(input, Operand::Zero());
-  __ Move(result, input);
-  __ bge(&done, Label::kNear);
-  __ LoadComplementRR(result, result);
+  __ LoadPositiveP(result, input);
   // Deoptimize on overflow.
   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
-  __ bind(&done);
 }
 
 #if V8_TARGET_ARCH_S390X
 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
   Register input = ToRegister(instr->value());
   Register result = ToRegister(instr->result());
-  Label done;
-  __ Cmp32(input, Operand::Zero());
-  __ Move(result, input);
-  __ bge(&done, Label::kNear);
-
-  // Deoptimize on overflow.
-  __ Cmp32(input, Operand(0x80000000));
-  DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
-
-  __ LoadComplementRR(result, result);
-  __ bind(&done);
+  __ LoadPositive32(result, input);
+  DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
 }
 #endif
 
@@ -3537,9 +3539,13 @@
 }
 
 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
-  DoubleRegister input = ToDoubleRegister(instr->value());
   DoubleRegister result = ToDoubleRegister(instr->result());
-  __ sqdbr(result, input);
+  LOperand* input = instr->value();
+  if (input->IsDoubleRegister()) {
+    __ Sqrt(result, ToDoubleRegister(instr->value()));
+  } else {
+    __ Sqrt(result, ToMemOperand(input));
+  }
 }
 
 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
@@ -3668,7 +3674,8 @@
   __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   __ LoadP(scratch3,
            MemOperand(scratch2, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ CmpP(scratch3,
+          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ bne(&no_arguments_adaptor);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -4287,12 +4294,21 @@
       if (Smi::IsValid(int_key)) {
         __ LoadSmiLiteral(r5, Smi::FromInt(int_key));
       } else {
-        // We should never get here at runtime because there is a smi check on
-        // the key before this point.
-        __ stop("expected smi");
+        Abort(kArrayIndexConstantValueTooBig);
       }
     } else {
+      Label is_smi;
+#if V8_TARGET_ARCH_S390X
       __ SmiTag(r5, ToRegister(key));
+#else
+      // Deopt if the key is outside Smi range. The stub expects Smi and would
+      // bump the elements into dictionary mode (and trigger a deopt) anyways.
+      __ Add32(r5, ToRegister(key), ToRegister(key));
+      __ b(nooverflow, &is_smi);
+      __ PopSafepointRegisters();
+      DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0);
+      __ bind(&is_smi);
+#endif
     }
 
     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
@@ -4877,14 +4893,42 @@
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->value();
-  __ TestIfSmi(ToRegister(input));
+  if (input->IsRegister()) {
+    __ TestIfSmi(ToRegister(input));
+  } else if (input->IsStackSlot()) {
+    MemOperand value = ToMemOperand(input);
+#if !V8_TARGET_LITTLE_ENDIAN
+#if V8_TARGET_ARCH_S390X
+    __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7));
+#else
+    __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3));
+#endif
+#else
+    __ TestIfSmi(value);
+#endif
+  }
   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
 }
 
 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
     LOperand* input = instr->value();
-    __ TestIfSmi(ToRegister(input));
+    if (input->IsRegister()) {
+      __ TestIfSmi(ToRegister(input));
+    } else if (input->IsStackSlot()) {
+      MemOperand value = ToMemOperand(input);
+#if !V8_TARGET_LITTLE_ENDIAN
+#if V8_TARGET_ARCH_S390X
+      __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7));
+#else
+      __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3));
+#endif
+#else
+      __ TestIfSmi(value);
+#endif
+    } else {
+      UNIMPLEMENTED();
+    }
     DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
   }
 }
@@ -4963,6 +5007,13 @@
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   Register temp = ToRegister(instr->temp());
+  Label deopt, done;
+  // If the map is not deprecated the migration attempt does not make sense.
+  __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ LoadlW(temp, FieldMemOperand(temp, Map::kBitField3Offset));
+  __ TestBitMask(temp, Map::Deprecated::kMask, r0);
+  __ beq(&deopt);
+
   {
     PushSafepointRegistersScope scope(this);
     __ push(object);
@@ -4973,7 +5024,13 @@
     __ StoreToSafepointRegisterSlot(r2, temp);
   }
   __ TestIfSmi(temp);
-  DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
+  __ bne(&done);
+
+  __ bind(&deopt);
+  // In case of "al" condition the operand is not used so just pass cr0 there.
+  DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
+
+  __ bind(&done);
 }
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
@@ -5322,17 +5379,6 @@
     __ CmpP(r0, Operand::Zero());
     final_branch_condition = eq;
 
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
-  } else if (String::Equals(type_name, factory->type##_string())) {  \
-    __ JumpIfSmi(input, false_label);                                \
-    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
-    __ CompareRoot(scratch, Heap::k##Type##MapRootIndex);            \
-    final_branch_condition = eq;
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
-
   } else {
     __ b(false_label);
   }
diff --git a/src/crankshaft/s390/lithium-s390.cc b/src/crankshaft/s390/lithium-s390.cc
index 3d14764..79868f5 100644
--- a/src/crankshaft/s390/lithium-s390.cc
+++ b/src/crankshaft/s390/lithium-s390.cc
@@ -619,7 +619,9 @@
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
     LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
-    return DefineSameAsFirst(result);
+    return CpuFeatures::IsSupported(VECTOR_FACILITY)
+               ? DefineAsRegister(result)
+               : DefineSameAsFirst(result);
   }
 }
 
@@ -1056,7 +1058,7 @@
 }
 
 LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
-  LOperand* input = UseRegisterAtStart(instr->value());
+  LOperand* input = UseAtStart(instr->value());
   LMathSqrt* result = new (zone()) LMathSqrt(input);
   return DefineAsRegister(result);
 }
@@ -1353,12 +1355,6 @@
     DCHECK(instr->left()->representation().Equals(instr->representation()));
     DCHECK(instr->right()->representation().Equals(instr->representation()));
 
-    if (instr->left()->IsConstant() &&
-        !instr->CheckFlag(HValue::kCanOverflow)) {
-      // If lhs is constant, do reverse subtraction instead.
-      return DoRSub(instr);
-    }
-
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     LSubI* sub = new (zone()) LSubI(left, right);
@@ -1374,21 +1370,6 @@
   }
 }
 
-LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
-  DCHECK(instr->representation().IsSmiOrInteger32());
-  DCHECK(instr->left()->representation().Equals(instr->representation()));
-  DCHECK(instr->right()->representation().Equals(instr->representation()));
-  DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
-
-  // Note: The lhs of the subtraction becomes the rhs of the
-  // reverse-subtraction.
-  LOperand* left = UseRegisterAtStart(instr->right());
-  LOperand* right = UseOrConstantAtStart(instr->left());
-  LRSubI* rsb = new (zone()) LRSubI(left, right);
-  LInstruction* result = DefineAsRegister(rsb);
-  return result;
-}
-
 LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
   LOperand* multiplier_op = UseRegister(mul->left());
   LOperand* multiplicand_op = UseRegister(mul->right());
@@ -1697,7 +1678,7 @@
 }
 
 LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* value = UseAtStart(instr->value());
   LInstruction* result = new (zone()) LCheckNonSmi(value);
   if (!instr->value()->type().IsHeapObject()) {
     result = AssignEnvironment(result);
@@ -1706,7 +1687,7 @@
 }
 
 LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
-  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* value = UseAtStart(instr->value());
   return AssignEnvironment(new (zone()) LCheckSmi(value));
 }
 
diff --git a/src/crankshaft/s390/lithium-s390.h b/src/crankshaft/s390/lithium-s390.h
index b946d4f..f9710b1 100644
--- a/src/crankshaft/s390/lithium-s390.h
+++ b/src/crankshaft/s390/lithium-s390.h
@@ -133,7 +133,6 @@
   V(StringCharFromCode)                      \
   V(StringCompareAndBranch)                  \
   V(SubI)                                    \
-  V(RSubI)                                   \
   V(TaggedToI)                               \
   V(ThisFunction)                            \
   V(TransitionElementsKind)                  \
@@ -1090,20 +1089,6 @@
   DECLARE_HYDROGEN_ACCESSOR(Sub)
 };
 
-class LRSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
-  LRSubI(LOperand* left, LOperand* right) {
-    inputs_[0] = left;
-    inputs_[1] = right;
-  }
-
-  LOperand* left() { return inputs_[0]; }
-  LOperand* right() { return inputs_[1]; }
-
-  DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
-  DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
 class LConstantI final : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
@@ -2141,7 +2126,6 @@
 
   LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
   LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
-  LInstruction* DoRSub(HSub* instr);
 
   static bool HasMagicNumberForDivisor(int32_t divisor);
 
diff --git a/src/crankshaft/typing.cc b/src/crankshaft/typing.cc
index f21d235..9713e4f 100644
--- a/src/crankshaft/typing.cc
+++ b/src/crankshaft/typing.cc
@@ -405,7 +405,7 @@
           prop->key()->AsLiteral()->value()->IsInternalizedString() &&
           prop->emit_store()) {
         // Record type feed back for the property.
-        FeedbackVectorSlot slot = prop->GetSlot();
+        FeedbackSlot slot = prop->GetSlot();
         SmallMapList maps;
         oracle()->CollectReceiverTypes(slot, &maps);
         prop->set_receiver_type(maps.length() == 1 ? maps.at(0)
@@ -435,7 +435,7 @@
   // Collect type feedback.
   Property* prop = expr->target()->AsProperty();
   if (prop != NULL) {
-    FeedbackVectorSlot slot = expr->AssignmentSlot();
+    FeedbackSlot slot = expr->AssignmentSlot();
     expr->set_is_uninitialized(oracle()->StoreIsUninitialized(slot));
     if (!expr->IsUninitialized()) {
       SmallMapList* receiver_types = expr->GetReceiverTypes();
@@ -486,7 +486,7 @@
 
 void AstTyper::VisitProperty(Property* expr) {
   // Collect type feedback.
-  FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
+  FeedbackSlot slot = expr->PropertyFeedbackSlot();
   expr->set_inline_cache_state(oracle()->LoadInlineCacheState(slot));
 
   if (!expr->IsUninitialized()) {
@@ -515,7 +515,7 @@
 void AstTyper::VisitCall(Call* expr) {
   // Collect type feedback.
   RECURSE(Visit(expr->expression()));
-  FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
+  FeedbackSlot slot = expr->CallFeedbackICSlot();
   bool is_uninitialized = oracle()->CallIsUninitialized(slot);
   if (!expr->expression()->IsProperty() && oracle()->CallIsMonomorphic(slot)) {
     expr->set_target(oracle()->GetCallTarget(slot));
@@ -541,8 +541,7 @@
 
 void AstTyper::VisitCallNew(CallNew* expr) {
   // Collect type feedback.
-  FeedbackVectorSlot allocation_site_feedback_slot =
-      expr->CallNewFeedbackSlot();
+  FeedbackSlot allocation_site_feedback_slot = expr->CallNewFeedbackSlot();
   expr->set_allocation_site(
       oracle()->GetCallNewAllocationSite(allocation_site_feedback_slot));
   bool monomorphic =
@@ -602,7 +601,7 @@
 
 void AstTyper::VisitCountOperation(CountOperation* expr) {
   // Collect type feedback.
-  FeedbackVectorSlot slot = expr->CountSlot();
+  FeedbackSlot slot = expr->CountSlot();
   KeyedAccessStoreMode store_mode;
   IcCheckType key_type;
   oracle()->GetStoreModeAndKeyType(slot, &store_mode, &key_type);
@@ -756,6 +755,7 @@
   UNREACHABLE();
 }
 
+void AstTyper::VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
 
 void AstTyper::VisitThisFunction(ThisFunction* expr) {}
 
diff --git a/src/crankshaft/x64/lithium-codegen-x64.cc b/src/crankshaft/x64/lithium-codegen-x64.cc
index 6889040..65816a1 100644
--- a/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -7,11 +7,13 @@
 #include "src/crankshaft/x64/lithium-codegen-x64.h"
 
 #include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/crankshaft/hydrogen-osr.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -179,14 +181,17 @@
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info()->scope()->scope_type());
         __ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
-        __ CallStub(&stub);
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
         // Result of FastNewFunctionContextStub is always in new space.
         need_write_barrier = false;
       } else {
         __ Push(rdi);
+        __ Push(Smi::FromInt(info()->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
     }
@@ -333,7 +338,8 @@
     // have a function pointer to install in the stack frame that we're
     // building, install a special marker there instead.
     DCHECK(info()->IsStub());
-    __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
+    __ movp(MemOperand(rsp, 2 * kPointerSize),
+            Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
 
     /* stack layout
        3: old rbp
@@ -371,7 +377,7 @@
         frame_is_built_ = true;
         // Build the frame in such a way that esi isn't trashed.
         __ pushq(rbp);  // Caller's frame pointer.
-        __ Push(Smi::FromInt(StackFrame::STUB));
+        __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
         __ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp));
         Comment(";;; Deferred code");
       }
@@ -2061,12 +2067,6 @@
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected & ToBooleanHint::kSimdValue) {
-        // SIMD value -> true.
-        __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
-        __ j(equal, instr->TrueLabel(chunk_));
-      }
-
       if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
@@ -2825,7 +2825,7 @@
       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
       // it needs to bail out.
       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
-      __ Cmp(FieldOperand(result, Cell::kValueOffset),
+      __ Cmp(FieldOperand(result, PropertyCell::kValueOffset),
              Smi::FromInt(Isolate::kProtectorValid));
       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
     }
@@ -2883,8 +2883,8 @@
     // Check for arguments adapter frame.
     Label done, adapted;
     __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-    __ Cmp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
-           Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+    __ cmpp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
+            Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
     __ j(equal, &adapted, Label::kNear);
 
     // No arguments adaptor frame.
@@ -2958,9 +2958,9 @@
 
   // Normal function. Replace undefined or null with global receiver.
   __ CompareRoot(receiver, Heap::kNullValueRootIndex);
-  __ j(equal, &global_object, Label::kNear);
+  __ j(equal, &global_object, dist);
   __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &global_object, Label::kNear);
+  __ j(equal, &global_object, dist);
 
   // The receiver should be a JS object.
   Condition is_smi = __ CheckSmi(receiver);
@@ -2968,7 +2968,7 @@
   __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
   DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
 
-  __ jmp(&receiver_ok, Label::kNear);
+  __ jmp(&receiver_ok, dist);
   __ bind(&global_object);
   __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
   __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
@@ -3062,7 +3062,7 @@
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   DCHECK(ToRegister(instr->context()).is(rsi));
-  __ Push(instr->hydrogen()->pairs());
+  __ Push(instr->hydrogen()->declarations());
   __ Push(Smi::FromInt(instr->hydrogen()->flags()));
   __ Push(instr->hydrogen()->feedback_vector());
   CallRuntime(Runtime::kDeclareGlobals, instr);
@@ -3559,8 +3559,8 @@
   Register caller_args_count_reg = scratch1;
   Label no_arguments_adaptor, formal_parameter_count_loaded;
   __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ cmpp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
+          Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
 
   // Drop current frame and load arguments count from arguments adaptor frame.
@@ -4813,9 +4813,19 @@
 
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  Label deopt, done;
+  // If the map is not deprecated the migration attempt does not make sense.
+  __ Push(object);
+  __ movp(object, FieldOperand(object, HeapObject::kMapOffset));
+  __ testl(FieldOperand(object, Map::kBitField3Offset),
+           Immediate(Map::Deprecated::kMask));
+  __ Pop(object);
+  __ j(zero, &deopt);
+
   {
     PushSafepointRegistersScope scope(this);
     __ Push(object);
+
     __ Set(rsi, 0);
     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
     RecordSafepointWithRegisters(
@@ -4823,7 +4833,12 @@
 
     __ testp(rax, Immediate(kSmiTagMask));
   }
-  DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
+  __ j(not_zero, &done);
+
+  __ bind(&deopt);
+  DeoptimizeIf(always, instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+  __ bind(&done);
 }
 
 
@@ -5176,17 +5191,6 @@
              Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     final_branch_condition = zero;
 
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)       \
-  } else if (String::Equals(type_name, factory->type##_string())) { \
-    __ JumpIfSmi(input, false_label, false_distance);               \
-    __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),     \
-                   Heap::k##Type##MapRootIndex);                    \
-    final_branch_condition = equal;
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
-
   } else {
     __ jmp(false_label, false_distance);
   }
diff --git a/src/crankshaft/x64/lithium-gap-resolver-x64.cc b/src/crankshaft/x64/lithium-gap-resolver-x64.cc
index 94dffb3..38b7d45 100644
--- a/src/crankshaft/x64/lithium-gap-resolver-x64.cc
+++ b/src/crankshaft/x64/lithium-gap-resolver-x64.cc
@@ -7,6 +7,7 @@
 #include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
 
 #include "src/crankshaft/x64/lithium-codegen-x64.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/x64/lithium-x64.cc b/src/crankshaft/x64/lithium-x64.cc
index bc9040b..d0671e9 100644
--- a/src/crankshaft/x64/lithium-x64.cc
+++ b/src/crankshaft/x64/lithium-x64.cc
@@ -11,6 +11,7 @@
 #include "src/crankshaft/hydrogen-osr.h"
 #include "src/crankshaft/lithium-inl.h"
 #include "src/crankshaft/x64/lithium-codegen-x64.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/crankshaft/x87/OWNERS b/src/crankshaft/x87/OWNERS
index dd9998b..61245ae 100644
--- a/src/crankshaft/x87/OWNERS
+++ b/src/crankshaft/x87/OWNERS
@@ -1 +1,2 @@
 weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/src/crankshaft/x87/lithium-codegen-x87.cc b/src/crankshaft/x87/lithium-codegen-x87.cc
index b83d97f..f526a19 100644
--- a/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -7,6 +7,7 @@
 #include "src/crankshaft/x87/lithium-codegen-x87.h"
 
 #include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
@@ -146,15 +147,18 @@
       __ CallRuntime(Runtime::kNewScriptContext);
       deopt_mode = Safepoint::kLazyDeopt;
     } else {
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info()->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Immediate(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
-        __ push(edi);
+        __ Push(edi);
+        __ Push(Smi::FromInt(info()->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
     }
@@ -2194,12 +2198,6 @@
         __ j(equal, instr->TrueLabel(chunk_));
       }
 
-      if (expected & ToBooleanHint::kSimdValue) {
-        // SIMD value -> true.
-        __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
-        __ j(equal, instr->TrueLabel(chunk_));
-      }
-
       if (expected & ToBooleanHint::kHeapNumber) {
         // heap number -> false iff +0, -0, or NaN.
         Label not_heap_number;
@@ -3017,7 +3015,16 @@
   // object as a receiver to normal functions. Values have to be
   // passed unchanged to builtins and strict-mode functions.
   Label receiver_ok, global_object;
-  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+  Label::Distance dist;
+
+  // For x87 debug version jitted code's size exceeds 128 bytes whether
+  // FLAG_deopt_every_n_times
+  // is set or not. Always use Label:kFar for label distance for debug mode.
+  if (FLAG_debug_code)
+    dist = Label::kFar;
+  else
+    dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+
   Register scratch = ToRegister(instr->temp());
 
   if (!instr->hydrogen()->known_function()) {
@@ -3037,9 +3044,9 @@
 
   // Normal function. Replace undefined or null with global receiver.
   __ cmp(receiver, factory()->null_value());
-  __ j(equal, &global_object, Label::kNear);
+  __ j(equal, &global_object, dist);
   __ cmp(receiver, factory()->undefined_value());
-  __ j(equal, &global_object, Label::kNear);
+  __ j(equal, &global_object, dist);
 
   // The receiver should be a JS object.
   __ test(receiver, Immediate(kSmiTagMask));
@@ -3047,7 +3054,7 @@
   __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
   DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
 
-  __ jmp(&receiver_ok, Label::kNear);
+  __ jmp(&receiver_ok, dist);
   __ bind(&global_object);
   __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
   __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
@@ -3144,7 +3151,7 @@
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
-  __ push(Immediate(instr->hydrogen()->pairs()));
+  __ push(Immediate(instr->hydrogen()->declarations()));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
   __ push(Immediate(instr->hydrogen()->feedback_vector()));
   CallRuntime(Runtime::kDeclareGlobals, instr);
@@ -4933,6 +4940,15 @@
 
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  Label deopt, done;
+  // If the map is not deprecated the migration attempt does not make sense.
+  __ push(object);
+  __ mov(object, FieldOperand(object, HeapObject::kMapOffset));
+  __ test(FieldOperand(object, Map::kBitField3Offset),
+          Immediate(Map::Deprecated::kMask));
+  __ pop(object);
+  __ j(zero, &deopt);
+
   {
     PushSafepointRegistersScope scope(this);
     __ push(object);
@@ -4943,7 +4959,12 @@
 
     __ test(eax, Immediate(kSmiTagMask));
   }
-  DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
+  __ j(not_zero, &done);
+
+  __ bind(&deopt);
+  DeoptimizeIf(no_condition, instr, DeoptimizeReason::kInstanceMigrationFailed);
+
+  __ bind(&done);
 }
 
 
@@ -5379,17 +5400,6 @@
               Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     final_branch_condition = zero;
 
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)         \
-  } else if (String::Equals(type_name, factory()->type##_string())) { \
-    __ JumpIfSmi(input, false_label, false_distance);                 \
-    __ cmp(FieldOperand(input, HeapObject::kMapOffset),               \
-           factory()->type##_map());                                  \
-    final_branch_condition = equal;
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
-
   } else {
     __ jmp(false_label, false_distance);
   }
diff --git a/src/d8.cc b/src/d8.cc
index fd9afee..64349f2 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -27,9 +27,14 @@
 #include "src/base/debug/stack_trace.h"
 #include "src/base/logging.h"
 #include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
 #include "src/base/sys-info.h"
 #include "src/basic-block-profiler.h"
+#include "src/debug/debug-interface.h"
 #include "src/interpreter/interpreter.h"
+#include "src/list-inl.h"
+#include "src/msan.h"
+#include "src/objects-inl.h"
 #include "src/snapshot/natives.h"
 #include "src/utils.h"
 #include "src/v8.h"
@@ -61,16 +66,78 @@
 
 const int MB = 1024 * 1024;
 const int kMaxWorkers = 50;
+const int kMaxSerializerMemoryUsage = 1 * MB;  // Arbitrary maximum for testing.
 
+#define USE_VM 1
+#define VM_THRESHOLD 65536
+// TODO(titzer): allocations should fail if >= 2gb because of
+// array buffers storing the lengths as a SMI internally.
+#define TWO_GB (2u * 1024u * 1024u * 1024u)
 
 class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
  public:
   virtual void* Allocate(size_t length) {
+#if USE_VM
+    if (RoundToPageSize(&length)) {
+      void* data = VirtualMemoryAllocate(length);
+#if DEBUG
+      if (data) {
+        // In debug mode, check the memory is zero-initialized.
+        size_t limit = length / sizeof(uint64_t);
+        uint64_t* ptr = reinterpret_cast<uint64_t*>(data);
+        for (size_t i = 0; i < limit; i++) {
+          DCHECK_EQ(0u, ptr[i]);
+        }
+      }
+#endif
+      return data;
+    }
+#endif
     void* data = AllocateUninitialized(length);
     return data == NULL ? data : memset(data, 0, length);
   }
-  virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
-  virtual void Free(void* data, size_t) { free(data); }
+  virtual void* AllocateUninitialized(size_t length) {
+#if USE_VM
+    if (RoundToPageSize(&length)) return VirtualMemoryAllocate(length);
+#endif
+// Work around for GCC bug on AIX
+// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
+#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
+    return __linux_malloc(length);
+#else
+    return malloc(length);
+#endif
+  }
+  virtual void Free(void* data, size_t length) {
+#if USE_VM
+    if (RoundToPageSize(&length)) {
+      base::VirtualMemory::ReleaseRegion(data, length);
+      return;
+    }
+#endif
+    free(data);
+  }
+  // If {length} is at least {VM_THRESHOLD}, round up to next page size
+  // and return {true}. Otherwise return {false}.
+  bool RoundToPageSize(size_t* length) {
+    const size_t kPageSize = base::OS::CommitPageSize();
+    if (*length >= VM_THRESHOLD && *length < TWO_GB) {
+      *length = ((*length + kPageSize - 1) / kPageSize) * kPageSize;
+      return true;
+    }
+    return false;
+  }
+#if USE_VM
+  void* VirtualMemoryAllocate(size_t length) {
+    void* data = base::VirtualMemory::ReserveRegion(length);
+    if (data && !base::VirtualMemory::CommitRegion(data, length, false)) {
+      base::VirtualMemory::ReleaseRegion(data, length);
+      return nullptr;
+    }
+    MSAN_MEMORY_IS_INITIALIZED(data, length);
+    return data;
+  }
+#endif
 };
 
 
@@ -160,16 +227,6 @@
 }
 
 
-bool FindInObjectList(Local<Object> object, const Shell::ObjectList& list) {
-  for (int i = 0; i < list.length(); ++i) {
-    if (list[i]->StrictEquals(object)) {
-      return true;
-    }
-  }
-  return false;
-}
-
-
 Worker* GetWorkerFromInternalField(Isolate* isolate, Local<Object> object) {
   if (object->InternalFieldCount() != 1) {
     Throw(isolate, "this is not a Worker");
@@ -352,7 +409,7 @@
 base::LazyMutex Shell::workers_mutex_;
 bool Shell::allow_new_workers_ = true;
 i::List<Worker*> Shell::workers_;
-i::List<SharedArrayBuffer::Contents> Shell::externalized_shared_contents_;
+std::vector<ExternalizedContents> Shell::externalized_contents_;
 
 Global<Context> Shell::evaluation_context_;
 ArrayBuffer::Allocator* Shell::array_buffer_allocator;
@@ -366,12 +423,6 @@
 }
 
 
-// Converts a V8 value to a C string.
-const char* Shell::ToCString(const v8::String::Utf8Value& value) {
-  return *value ? *value : "<string conversion failed>";
-}
-
-
 ScriptCompiler::CachedData* CompileForCachedData(
     Local<String> source, Local<Value> name,
     ScriptCompiler::CompileOptions compile_options) {
@@ -642,7 +693,9 @@
   }
   ScriptOrigin origin(
       String::NewFromUtf8(isolate, file_name.c_str(), NewStringType::kNormal)
-          .ToLocalChecked());
+          .ToLocalChecked(),
+      Local<Integer>(), Local<Integer>(), Local<Boolean>(), Local<Integer>(),
+      Local<Value>(), Local<Boolean>(), Local<Boolean>(), True(isolate));
   ScriptCompiler::Source source(source_text, origin);
   Local<Module> module;
   if (!ScriptCompiler::CompileModule(isolate, &source).ToLocal(&module)) {
@@ -810,20 +863,24 @@
 }
 
 MaybeLocal<Context> Shell::CreateRealm(
-    const v8::FunctionCallbackInfo<v8::Value>& args) {
+    const v8::FunctionCallbackInfo<v8::Value>& args, int index,
+    v8::MaybeLocal<Value> global_object) {
   Isolate* isolate = args.GetIsolate();
   TryCatch try_catch(isolate);
   PerIsolateData* data = PerIsolateData::Get(isolate);
-  Global<Context>* old_realms = data->realms_;
-  int index = data->realm_count_;
-  data->realms_ = new Global<Context>[++data->realm_count_];
-  for (int i = 0; i < index; ++i) {
-    data->realms_[i].Reset(isolate, old_realms[i]);
-    old_realms[i].Reset();
+  if (index < 0) {
+    Global<Context>* old_realms = data->realms_;
+    index = data->realm_count_;
+    data->realms_ = new Global<Context>[++data->realm_count_];
+    for (int i = 0; i < index; ++i) {
+      data->realms_[i].Reset(isolate, old_realms[i]);
+      old_realms[i].Reset();
+    }
+    delete[] old_realms;
   }
-  delete[] old_realms;
   Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
-  Local<Context> context = Context::New(isolate, NULL, global_template);
+  Local<Context> context =
+      Context::New(isolate, NULL, global_template, global_object);
   if (context.IsEmpty()) {
     DCHECK(try_catch.HasCaught());
     try_catch.ReThrow();
@@ -835,10 +892,20 @@
   return context;
 }
 
+void Shell::DisposeRealm(const v8::FunctionCallbackInfo<v8::Value>& args,
+                         int index) {
+  Isolate* isolate = args.GetIsolate();
+  PerIsolateData* data = PerIsolateData::Get(isolate);
+  DisposeModuleEmbedderData(data->realms_[index].Get(isolate));
+  data->realms_[index].Reset();
+  isolate->ContextDisposedNotification();
+  isolate->IdleNotificationDeadline(g_platform->MonotonicallyIncreasingTime());
+}
+
 // Realm.create() creates a new realm with a distinct security token
 // and returns its index.
 void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  CreateRealm(args);
+  CreateRealm(args, -1, v8::MaybeLocal<Value>());
 }
 
 // Realm.createAllowCrossRealmAccess() creates a new realm with the same
@@ -846,12 +913,26 @@
 void Shell::RealmCreateAllowCrossRealmAccess(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
   Local<Context> context;
-  if (CreateRealm(args).ToLocal(&context)) {
+  if (CreateRealm(args, -1, v8::MaybeLocal<Value>()).ToLocal(&context)) {
     context->SetSecurityToken(
         args.GetIsolate()->GetEnteredContext()->GetSecurityToken());
   }
 }
 
+// Realm.navigate(i) creates a new realm with a distinct security token
+// in place of realm i.
+void Shell::RealmNavigate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  Isolate* isolate = args.GetIsolate();
+  PerIsolateData* data = PerIsolateData::Get(isolate);
+  int index = data->RealmIndexOrThrow(args, 0);
+  if (index == -1) return;
+
+  Local<Context> context = Local<Context>::New(isolate, data->realms_[index]);
+  v8::MaybeLocal<Value> global_object = context->Global();
+  DisposeRealm(args, index);
+  CreateRealm(args, index, global_object);
+}
+
 // Realm.dispose(i) disposes the reference to the realm i.
 void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
   Isolate* isolate = args.GetIsolate();
@@ -863,10 +944,7 @@
     Throw(args.GetIsolate(), "Invalid realm index");
     return;
   }
-  DisposeModuleEmbedderData(data->realms_[index].Get(isolate));
-  data->realms_[index].Reset();
-  isolate->ContextDisposedNotification();
-  isolate->IdleNotificationDeadline(g_platform->MonotonicallyIncreasingTime());
+  DisposeRealm(args, index);
 }
 
 
@@ -1096,7 +1174,6 @@
 void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
   Isolate* isolate = args.GetIsolate();
   HandleScope handle_scope(isolate);
-  Local<Context> context = isolate->GetCurrentContext();
 
   if (args.Length() < 1) {
     Throw(isolate, "Invalid argument");
@@ -1109,36 +1186,12 @@
   }
 
   Local<Value> message = args[0];
-  ObjectList to_transfer;
-  if (args.Length() >= 2) {
-    if (!args[1]->IsArray()) {
-      Throw(isolate, "Transfer list must be an Array");
-      return;
-    }
-
-    Local<Array> transfer = Local<Array>::Cast(args[1]);
-    uint32_t length = transfer->Length();
-    for (uint32_t i = 0; i < length; ++i) {
-      Local<Value> element;
-      if (transfer->Get(context, i).ToLocal(&element)) {
-        if (!element->IsArrayBuffer() && !element->IsSharedArrayBuffer()) {
-          Throw(isolate,
-                "Transfer array elements must be an ArrayBuffer or "
-                "SharedArrayBuffer.");
-          break;
-        }
-
-        to_transfer.Add(Local<Object>::Cast(element));
-      }
-    }
-  }
-
-  ObjectList seen_objects;
-  SerializationData* data = new SerializationData;
-  if (SerializeValue(isolate, message, to_transfer, &seen_objects, data)) {
-    worker->PostMessage(data);
-  } else {
-    delete data;
+  Local<Value> transfer =
+      args.Length() >= 2 ? args[1] : Local<Value>::Cast(Undefined(isolate));
+  std::unique_ptr<SerializationData> data =
+      Shell::SerializeValue(isolate, message, transfer);
+  if (data) {
+    worker->PostMessage(std::move(data));
   }
 }
 
@@ -1151,14 +1204,12 @@
     return;
   }
 
-  SerializationData* data = worker->GetMessage();
+  std::unique_ptr<SerializationData> data = worker->GetMessage();
   if (data) {
-    int offset = 0;
-    Local<Value> data_value;
-    if (Shell::DeserializeValue(isolate, *data, &offset).ToLocal(&data_value)) {
-      args.GetReturnValue().Set(data_value);
+    Local<Value> value;
+    if (Shell::DeserializeValue(isolate, std::move(data)).ToLocal(&value)) {
+      args.GetReturnValue().Set(value);
     }
-    delete data;
   }
 }
 
@@ -1180,6 +1231,7 @@
                       ->Int32Value(args->GetIsolate()->GetCurrentContext())
                       .FromMaybe(0);
   CleanupWorkers();
+  args->GetIsolate()->Exit();
   OnExit(args->GetIsolate());
   Exit(exit_code);
 }
@@ -1200,12 +1252,17 @@
 
 void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
   HandleScope handle_scope(isolate);
-  Local<Context> context;
-  bool enter_context = !isolate->InContext();
+  Local<Context> context = isolate->GetCurrentContext();
+  bool enter_context = context.IsEmpty();
   if (enter_context) {
     context = Local<Context>::New(isolate, evaluation_context_);
     context->Enter();
   }
+  // Converts a V8 value to a C string.
+  auto ToCString = [](const v8::String::Utf8Value& value) {
+    return *value ? *value : "<string conversion failed>";
+  };
+
   v8::String::Utf8Value exception(try_catch->Exception());
   const char* exception_string = ToCString(exception);
   Local<Message> message = try_catch->Message();
@@ -1213,40 +1270,40 @@
     // V8 didn't provide any extra information about this error; just
     // print the exception.
     printf("%s\n", exception_string);
+  } else if (message->GetScriptOrigin().Options().IsWasm()) {
+    // Print <WASM>[(function index)]((function name))+(offset): (message).
+    int function_index = message->GetLineNumber(context).FromJust() - 1;
+    int offset = message->GetStartColumn(context).FromJust();
+    printf("<WASM>[%d]+%d: %s\n", function_index, offset, exception_string);
   } else {
     // Print (filename):(line number): (message).
     v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
     const char* filename_string = ToCString(filename);
-    Maybe<int> maybeline = message->GetLineNumber(isolate->GetCurrentContext());
-    int linenum = maybeline.IsJust() ? maybeline.FromJust() : -1;
+    int linenum = message->GetLineNumber(context).FromMaybe(-1);
     printf("%s:%i: %s\n", filename_string, linenum, exception_string);
     Local<String> sourceline;
-    if (message->GetSourceLine(isolate->GetCurrentContext())
-            .ToLocal(&sourceline)) {
+    if (message->GetSourceLine(context).ToLocal(&sourceline)) {
       // Print line of source code.
       v8::String::Utf8Value sourcelinevalue(sourceline);
       const char* sourceline_string = ToCString(sourcelinevalue);
       printf("%s\n", sourceline_string);
       // Print wavy underline (GetUnderline is deprecated).
-      int start =
-          message->GetStartColumn(isolate->GetCurrentContext()).FromJust();
+      int start = message->GetStartColumn(context).FromJust();
       for (int i = 0; i < start; i++) {
         printf(" ");
       }
-      int end = message->GetEndColumn(isolate->GetCurrentContext()).FromJust();
+      int end = message->GetEndColumn(context).FromJust();
       for (int i = start; i < end; i++) {
         printf("^");
       }
       printf("\n");
     }
-    Local<Value> stack_trace_string;
-    if (try_catch->StackTrace(isolate->GetCurrentContext())
-            .ToLocal(&stack_trace_string) &&
-        stack_trace_string->IsString()) {
-      v8::String::Utf8Value stack_trace(
-          Local<String>::Cast(stack_trace_string));
-      printf("%s\n", ToCString(stack_trace));
-    }
+  }
+  Local<Value> stack_trace_string;
+  if (try_catch->StackTrace(context).ToLocal(&stack_trace_string) &&
+      stack_trace_string->IsString()) {
+    v8::String::Utf8Value stack_trace(Local<String>::Cast(stack_trace_string));
+    printf("%s\n", ToCString(stack_trace));
   }
   printf("\n");
   if (enter_context) context->Exit();
@@ -1455,6 +1512,10 @@
           .ToLocalChecked(),
       FunctionTemplate::New(isolate, RealmCreateAllowCrossRealmAccess));
   realm_template->Set(
+      String::NewFromUtf8(isolate, "navigate", NewStringType::kNormal)
+          .ToLocalChecked(),
+      FunctionTemplate::New(isolate, RealmNavigate));
+  realm_template->Set(
       String::NewFromUtf8(isolate, "dispose", NewStringType::kNormal)
           .ToLocalChecked(),
       FunctionTemplate::New(isolate, RealmDispose));
@@ -1524,9 +1585,43 @@
   return global_template;
 }
 
-static void EmptyMessageCallback(Local<Message> message, Local<Value> error) {
-  // Nothing to be done here, exceptions thrown up to the shell will be reported
+static void PrintNonErrorsMessageCallback(Local<Message> message,
+                                          Local<Value> error) {
+  // Nothing to do here for errors, exceptions thrown up to the shell will be
+  // reported
   // separately by {Shell::ReportException} after they are caught.
+  // Do print other kinds of messages.
+  switch (message->ErrorLevel()) {
+    case v8::Isolate::kMessageWarning:
+    case v8::Isolate::kMessageLog:
+    case v8::Isolate::kMessageInfo:
+    case v8::Isolate::kMessageDebug: {
+      break;
+    }
+
+    case v8::Isolate::kMessageError: {
+      // Ignore errors, printed elsewhere.
+      return;
+    }
+
+    default: {
+      UNREACHABLE();
+      break;
+    }
+  }
+  // Converts a V8 value to a C string.
+  auto ToCString = [](const v8::String::Utf8Value& value) {
+    return *value ? *value : "<string conversion failed>";
+  };
+  Isolate* isolate = Isolate::GetCurrent();
+  v8::String::Utf8Value msg(message->Get());
+  const char* msg_string = ToCString(msg);
+  // Print (filename):(line number): (message).
+  v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
+  const char* filename_string = ToCString(filename);
+  Maybe<int> maybeline = message->GetLineNumber(isolate->GetCurrentContext());
+  int linenum = maybeline.IsJust() ? maybeline.FromJust() : -1;
+  printf("%s:%i: %s\n", filename_string, linenum, msg_string);
 }
 
 void Shell::Initialize(Isolate* isolate) {
@@ -1534,7 +1629,11 @@
   if (i::StrLength(i::FLAG_map_counters) != 0)
     MapCounters(isolate, i::FLAG_map_counters);
   // Disable default message reporting.
-  isolate->AddMessageListener(EmptyMessageCallback);
+  isolate->AddMessageListenerWithErrorLevel(
+      PrintNonErrorsMessageCallback,
+      v8::Isolate::kMessageError | v8::Isolate::kMessageWarning |
+          v8::Isolate::kMessageInfo | v8::Isolate::kMessageDebug |
+          v8::Isolate::kMessageLog);
 }
 
 
@@ -1593,9 +1692,69 @@
       JSON::Stringify(context, dispatch_counters).ToLocalChecked());
 }
 
+// Write coverage data in LCOV format. See man page for geninfo(1).
+void Shell::WriteLcovData(v8::Isolate* isolate, const char* file) {
+  if (!file) return;
+  HandleScope handle_scope(isolate);
+  debug::Coverage coverage = debug::Coverage::Collect(isolate, false);
+  std::ofstream sink(file, std::ofstream::app);
+  for (size_t i = 0; i < coverage.ScriptCount(); i++) {
+    debug::Coverage::ScriptData script_data = coverage.GetScriptData(i);
+    Local<debug::Script> script = script_data.GetScript();
+    // Skip unnamed scripts.
+    Local<String> name;
+    if (!script->Name().ToLocal(&name)) continue;
+    std::string file_name = ToSTLString(name);
+    // Skip scripts not backed by a file.
+    if (!std::ifstream(file_name).good()) continue;
+    sink << "SF:";
+    sink << NormalizePath(file_name, GetWorkingDirectory()) << std::endl;
+    std::vector<uint32_t> lines;
+    for (size_t j = 0; j < script_data.FunctionCount(); j++) {
+      debug::Coverage::FunctionData function_data =
+          script_data.GetFunctionData(j);
+      int start_line = function_data.Start().GetLineNumber();
+      int end_line = function_data.End().GetLineNumber();
+      uint32_t count = function_data.Count();
+      // Ensure space in the array.
+      lines.resize(std::max(static_cast<size_t>(end_line + 1), lines.size()),
+                   0);
+      // Boundary lines could be shared between two functions with different
+      // invocation counts. Take the maximum.
+      lines[start_line] = std::max(lines[start_line], count);
+      lines[end_line] = std::max(lines[end_line], count);
+      // Invocation counts for non-boundary lines are overwritten.
+      for (int k = start_line + 1; k < end_line; k++) lines[k] = count;
+      // Write function stats.
+      Local<String> name;
+      std::stringstream name_stream;
+      if (function_data.Name().ToLocal(&name)) {
+        name_stream << ToSTLString(name);
+      } else {
+        name_stream << "<" << start_line + 1 << "-";
+        name_stream << function_data.Start().GetColumnNumber() << ">";
+      }
+      sink << "FN:" << start_line + 1 << "," << name_stream.str() << std::endl;
+      sink << "FNDA:" << count << "," << name_stream.str() << std::endl;
+    }
+    // Write per-line coverage. LCOV uses 1-based line numbers.
+    for (size_t i = 0; i < lines.size(); i++) {
+      sink << "DA:" << (i + 1) << "," << lines[i] << std::endl;
+    }
+    sink << "end_of_record" << std::endl;
+  }
+}
 
 void Shell::OnExit(v8::Isolate* isolate) {
-  if (i::FLAG_dump_counters) {
+  // Dump basic block profiling data.
+  if (i::BasicBlockProfiler* profiler =
+          reinterpret_cast<i::Isolate*>(isolate)->basic_block_profiler()) {
+    i::OFStream os(stdout);
+    os << *profiler;
+  }
+  isolate->Dispose();
+
+  if (i::FLAG_dump_counters || i::FLAG_dump_counters_nvp) {
     int number_of_counters = 0;
     for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
       number_of_counters++;
@@ -1607,24 +1766,44 @@
       counters[j].key = i.CurrentKey();
     }
     std::sort(counters, counters + number_of_counters);
-    printf("+----------------------------------------------------------------+"
-           "-------------+\n");
-    printf("| Name                                                           |"
-           " Value       |\n");
-    printf("+----------------------------------------------------------------+"
-           "-------------+\n");
-    for (j = 0; j < number_of_counters; j++) {
-      Counter* counter = counters[j].counter;
-      const char* key = counters[j].key;
-      if (counter->is_histogram()) {
-        printf("| c:%-60s | %11i |\n", key, counter->count());
-        printf("| t:%-60s | %11i |\n", key, counter->sample_total());
-      } else {
-        printf("| %-62s | %11i |\n", key, counter->count());
+
+    if (i::FLAG_dump_counters_nvp) {
+      // Dump counters as name-value pairs.
+      for (j = 0; j < number_of_counters; j++) {
+        Counter* counter = counters[j].counter;
+        const char* key = counters[j].key;
+        if (counter->is_histogram()) {
+          printf("\"c:%s\"=%i\n", key, counter->count());
+          printf("\"t:%s\"=%i\n", key, counter->sample_total());
+        } else {
+          printf("\"%s\"=%i\n", key, counter->count());
+        }
       }
+    } else {
+      // Dump counters in formatted boxes.
+      printf(
+          "+----------------------------------------------------------------+"
+          "-------------+\n");
+      printf(
+          "| Name                                                           |"
+          " Value       |\n");
+      printf(
+          "+----------------------------------------------------------------+"
+          "-------------+\n");
+      for (j = 0; j < number_of_counters; j++) {
+        Counter* counter = counters[j].counter;
+        const char* key = counters[j].key;
+        if (counter->is_histogram()) {
+          printf("| c:%-60s | %11i |\n", key, counter->count());
+          printf("| t:%-60s | %11i |\n", key, counter->sample_total());
+        } else {
+          printf("| %-62s | %11i |\n", key, counter->count());
+        }
+      }
+      printf(
+          "+----------------------------------------------------------------+"
+          "-------------+\n");
     }
-    printf("+----------------------------------------------------------------+"
-           "-------------+\n");
     delete [] counters;
   }
 
@@ -1633,7 +1812,6 @@
 }
 
 
-
 static FILE* FOpen(const char* path, const char* mode) {
 #if defined(_MSC_VER) && (defined(_WIN32) || defined(_WIN64))
   FILE* result;
@@ -1771,13 +1949,14 @@
   virtual ~InspectorFrontend() = default;
 
  private:
-  void sendProtocolResponse(int callId,
-                            const v8_inspector::StringView& message) override {
-    Send(message);
+  void sendResponse(
+      int callId,
+      std::unique_ptr<v8_inspector::StringBuffer> message) override {
+    Send(message->string());
   }
-  void sendProtocolNotification(
-      const v8_inspector::StringView& message) override {
-    Send(message);
+  void sendNotification(
+      std::unique_ptr<v8_inspector::StringBuffer> message) override {
+    Send(message->string());
   }
   void flushProtocolNotifications() override {}
 
@@ -1806,7 +1985,21 @@
       Local<Value> args[] = {message};
       MaybeLocal<Value> result = Local<Function>::Cast(callback)->Call(
           context, Undefined(isolate_), 1, args);
-      CHECK(!result.IsEmpty());  // Listeners may not throw.
+#ifdef DEBUG
+      if (try_catch.HasCaught()) {
+        Local<Object> exception = Local<Object>::Cast(try_catch.Exception());
+        Local<String> key = v8::String::NewFromUtf8(isolate_, "message",
+                                                    v8::NewStringType::kNormal)
+                                .ToLocalChecked();
+        Local<String> expected =
+            v8::String::NewFromUtf8(isolate_,
+                                    "Maximum call stack size exceeded",
+                                    v8::NewStringType::kNormal)
+                .ToLocalChecked();
+        Local<Value> value = exception->Get(context, key).ToLocalChecked();
+        CHECK(value->StrictEquals(expected));
+      }
+#endif
     }
   }
 
@@ -2016,111 +2209,35 @@
   thread_->Join();
 }
 
-
-SerializationData::~SerializationData() {
-  // Any ArrayBuffer::Contents are owned by this SerializationData object if
-  // ownership hasn't been transferred out via ReadArrayBufferContents.
-  // SharedArrayBuffer::Contents may be used by multiple threads, so must be
-  // cleaned up by the main thread in Shell::CleanupWorkers().
-  for (int i = 0; i < array_buffer_contents_.length(); ++i) {
-    ArrayBuffer::Contents& contents = array_buffer_contents_[i];
-    if (contents.Data()) {
-      Shell::array_buffer_allocator->Free(contents.Data(),
-                                          contents.ByteLength());
-    }
-  }
+ExternalizedContents::~ExternalizedContents() {
+  Shell::array_buffer_allocator->Free(data_, size_);
 }
 
-
-void SerializationData::WriteTag(SerializationTag tag) { data_.Add(tag); }
-
-
-void SerializationData::WriteMemory(const void* p, int length) {
-  if (length > 0) {
-    i::Vector<uint8_t> block = data_.AddBlock(0, length);
-    memcpy(&block[0], p, length);
-  }
-}
-
-
-void SerializationData::WriteArrayBufferContents(
-    const ArrayBuffer::Contents& contents) {
-  array_buffer_contents_.Add(contents);
-  WriteTag(kSerializationTagTransferredArrayBuffer);
-  int index = array_buffer_contents_.length() - 1;
-  Write(index);
-}
-
-
-void SerializationData::WriteSharedArrayBufferContents(
-    const SharedArrayBuffer::Contents& contents) {
-  shared_array_buffer_contents_.Add(contents);
-  WriteTag(kSerializationTagTransferredSharedArrayBuffer);
-  int index = shared_array_buffer_contents_.length() - 1;
-  Write(index);
-}
-
-
-SerializationTag SerializationData::ReadTag(int* offset) const {
-  return static_cast<SerializationTag>(Read<uint8_t>(offset));
-}
-
-
-void SerializationData::ReadMemory(void* p, int length, int* offset) const {
-  if (length > 0) {
-    memcpy(p, &data_[*offset], length);
-    (*offset) += length;
-  }
-}
-
-
-void SerializationData::ReadArrayBufferContents(ArrayBuffer::Contents* contents,
-                                                int* offset) const {
-  int index = Read<int>(offset);
-  DCHECK(index < array_buffer_contents_.length());
-  *contents = array_buffer_contents_[index];
-  // Ownership of this ArrayBuffer::Contents is passed to the caller. Neuter
-  // our copy so it won't be double-free'd when this SerializationData is
-  // destroyed.
-  array_buffer_contents_[index] = ArrayBuffer::Contents();
-}
-
-
-void SerializationData::ReadSharedArrayBufferContents(
-    SharedArrayBuffer::Contents* contents, int* offset) const {
-  int index = Read<int>(offset);
-  DCHECK(index < shared_array_buffer_contents_.length());
-  *contents = shared_array_buffer_contents_[index];
-}
-
-
-void SerializationDataQueue::Enqueue(SerializationData* data) {
+void SerializationDataQueue::Enqueue(std::unique_ptr<SerializationData> data) {
   base::LockGuard<base::Mutex> lock_guard(&mutex_);
-  data_.Add(data);
+  data_.push_back(std::move(data));
 }
 
-
-bool SerializationDataQueue::Dequeue(SerializationData** data) {
+bool SerializationDataQueue::Dequeue(
+    std::unique_ptr<SerializationData>* out_data) {
+  out_data->reset();
   base::LockGuard<base::Mutex> lock_guard(&mutex_);
-  *data = NULL;
-  if (data_.is_empty()) return false;
-  *data = data_.Remove(0);
+  if (data_.empty()) return false;
+  *out_data = std::move(data_[0]);
+  data_.erase(data_.begin());
   return true;
 }
 
 
 bool SerializationDataQueue::IsEmpty() {
   base::LockGuard<base::Mutex> lock_guard(&mutex_);
-  return data_.is_empty();
+  return data_.empty();
 }
 
 
 void SerializationDataQueue::Clear() {
   base::LockGuard<base::Mutex> lock_guard(&mutex_);
-  for (int i = 0; i < data_.length(); ++i) {
-    delete data_[i];
-  }
-  data_.Clear();
+  data_.clear();
 }
 
 
@@ -2149,22 +2266,20 @@
   thread_->Start();
 }
 
-
-void Worker::PostMessage(SerializationData* data) {
-  in_queue_.Enqueue(data);
+void Worker::PostMessage(std::unique_ptr<SerializationData> data) {
+  in_queue_.Enqueue(std::move(data));
   in_semaphore_.Signal();
 }
 
-
-SerializationData* Worker::GetMessage() {
-  SerializationData* data = NULL;
-  while (!out_queue_.Dequeue(&data)) {
+std::unique_ptr<SerializationData> Worker::GetMessage() {
+  std::unique_ptr<SerializationData> result;
+  while (!out_queue_.Dequeue(&result)) {
     // If the worker is no longer running, and there are no messages in the
     // queue, don't expect any more messages from it.
     if (!base::NoBarrier_Load(&running_)) break;
     out_semaphore_.Wait();
   }
-  return data;
+  return result;
 }
 
 
@@ -2228,19 +2343,21 @@
             // Now wait for messages
             while (true) {
               in_semaphore_.Wait();
-              SerializationData* data;
+              std::unique_ptr<SerializationData> data;
               if (!in_queue_.Dequeue(&data)) continue;
-              if (data == NULL) {
+              if (!data) {
                 break;
               }
-              int offset = 0;
-              Local<Value> data_value;
-              if (Shell::DeserializeValue(isolate, *data, &offset)
-                      .ToLocal(&data_value)) {
-                Local<Value> argv[] = {data_value};
+              v8::TryCatch try_catch(isolate);
+              Local<Value> value;
+              if (Shell::DeserializeValue(isolate, std::move(data))
+                      .ToLocal(&value)) {
+                Local<Value> argv[] = {value};
                 (void)onmessage_fun->Call(context, global, 1, argv);
               }
-              delete data;
+              if (try_catch.HasCaught()) {
+                Shell::ReportException(isolate, &try_catch);
+              }
             }
           }
         }
@@ -2267,21 +2384,15 @@
   }
 
   Local<Value> message = args[0];
-
-  // TODO(binji): Allow transferring from worker to main thread?
-  Shell::ObjectList to_transfer;
-
-  Shell::ObjectList seen_objects;
-  SerializationData* data = new SerializationData;
-  if (Shell::SerializeValue(isolate, message, to_transfer, &seen_objects,
-                            data)) {
+  Local<Value> transfer = Undefined(isolate);
+  std::unique_ptr<SerializationData> data =
+      Shell::SerializeValue(isolate, message, transfer);
+  if (data) {
     DCHECK(args.Data()->IsExternal());
     Local<External> this_value = Local<External>::Cast(args.Data());
     Worker* worker = static_cast<Worker*>(this_value->Value());
-    worker->out_queue_.Enqueue(data);
+    worker->out_queue_.Enqueue(std::move(data));
     worker->out_semaphore_.Signal();
-  } else {
-    delete data;
   }
 }
 
@@ -2297,7 +2408,8 @@
     if (strcmp(argv[i], "--stress-opt") == 0) {
       options.stress_opt = true;
       argv[i] = NULL;
-    } else if (strcmp(argv[i], "--nostress-opt") == 0) {
+    } else if (strcmp(argv[i], "--nostress-opt") == 0 ||
+               strcmp(argv[i], "--no-stress-opt") == 0) {
       options.stress_opt = false;
       argv[i] = NULL;
     } else if (strcmp(argv[i], "--stress-deopt") == 0) {
@@ -2306,7 +2418,8 @@
     } else if (strcmp(argv[i], "--mock-arraybuffer-allocator") == 0) {
       options.mock_arraybuffer_allocator = true;
       argv[i] = NULL;
-    } else if (strcmp(argv[i], "--noalways-opt") == 0) {
+    } else if (strcmp(argv[i], "--noalways-opt") == 0 ||
+               strcmp(argv[i], "--no-always-opt") == 0) {
       // No support for stressing if we can't use --always-opt.
       options.stress_opt = false;
       options.stress_deopt = false;
@@ -2380,6 +2493,9 @@
     } else if (strcmp(argv[i], "--enable-inspector") == 0) {
       options.enable_inspector = true;
       argv[i] = NULL;
+    } else if (strncmp(argv[i], "--lcov=", 7) == 0) {
+      options.lcov_file = argv[i] + 7;
+      argv[i] = NULL;
     }
   }
 
@@ -2421,6 +2537,7 @@
     options.isolate_sources[i].StartExecuteInThread();
   }
   {
+    if (options.lcov_file) debug::Coverage::TogglePrecise(isolate, true);
     HandleScope scope(isolate);
     Local<Context> context = CreateEvaluationContext(isolate);
     if (last_run && options.use_interactive_shell()) {
@@ -2434,6 +2551,7 @@
       options.isolate_sources[0].Execute(isolate);
     }
     DisposeModuleEmbedderData(context);
+    WriteLcovData(isolate, options.lcov_file);
   }
   CollectGarbage(isolate);
   for (int i = 1; i < options.num_isolates; ++i) {
@@ -2467,237 +2585,214 @@
 void Shell::EmptyMessageQueues(Isolate* isolate) {
   if (!i::FLAG_verify_predictable) {
     while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
+    v8::platform::RunIdleTasks(g_platform, isolate,
+                               50.0 / base::Time::kMillisecondsPerSecond);
   }
 }
 
+class Serializer : public ValueSerializer::Delegate {
+ public:
+  explicit Serializer(Isolate* isolate)
+      : isolate_(isolate),
+        serializer_(isolate, this),
+        current_memory_usage_(0) {}
 
-bool Shell::SerializeValue(Isolate* isolate, Local<Value> value,
-                           const ObjectList& to_transfer,
-                           ObjectList* seen_objects,
-                           SerializationData* out_data) {
-  DCHECK(out_data);
-  Local<Context> context = isolate->GetCurrentContext();
+  Maybe<bool> WriteValue(Local<Context> context, Local<Value> value,
+                         Local<Value> transfer) {
+    bool ok;
+    DCHECK(!data_);
+    data_.reset(new SerializationData);
+    if (!PrepareTransfer(context, transfer).To(&ok)) {
+      return Nothing<bool>();
+    }
+    serializer_.WriteHeader();
 
-  if (value->IsUndefined()) {
-    out_data->WriteTag(kSerializationTagUndefined);
-  } else if (value->IsNull()) {
-    out_data->WriteTag(kSerializationTagNull);
-  } else if (value->IsTrue()) {
-    out_data->WriteTag(kSerializationTagTrue);
-  } else if (value->IsFalse()) {
-    out_data->WriteTag(kSerializationTagFalse);
-  } else if (value->IsNumber()) {
-    Local<Number> num = Local<Number>::Cast(value);
-    double value = num->Value();
-    out_data->WriteTag(kSerializationTagNumber);
-    out_data->Write(value);
-  } else if (value->IsString()) {
-    v8::String::Utf8Value str(value);
-    out_data->WriteTag(kSerializationTagString);
-    out_data->Write(str.length());
-    out_data->WriteMemory(*str, str.length());
-  } else if (value->IsArray()) {
-    Local<Array> array = Local<Array>::Cast(value);
-    if (FindInObjectList(array, *seen_objects)) {
-      Throw(isolate, "Duplicated arrays not supported");
-      return false;
-    }
-    seen_objects->Add(array);
-    out_data->WriteTag(kSerializationTagArray);
-    uint32_t length = array->Length();
-    out_data->Write(length);
-    for (uint32_t i = 0; i < length; ++i) {
-      Local<Value> element_value;
-      if (array->Get(context, i).ToLocal(&element_value)) {
-        if (!SerializeValue(isolate, element_value, to_transfer, seen_objects,
-                            out_data))
-          return false;
-      } else {
-        Throw(isolate, "Failed to serialize array element.");
-        return false;
-      }
-    }
-  } else if (value->IsArrayBuffer()) {
-    Local<ArrayBuffer> array_buffer = Local<ArrayBuffer>::Cast(value);
-    if (FindInObjectList(array_buffer, *seen_objects)) {
-      Throw(isolate, "Duplicated array buffers not supported");
-      return false;
-    }
-    seen_objects->Add(array_buffer);
-    if (FindInObjectList(array_buffer, to_transfer)) {
-      // Transfer ArrayBuffer
-      if (!array_buffer->IsNeuterable()) {
-        Throw(isolate, "Attempting to transfer an un-neuterable ArrayBuffer");
-        return false;
-      }
-
-      ArrayBuffer::Contents contents = array_buffer->IsExternal()
-                                           ? array_buffer->GetContents()
-                                           : array_buffer->Externalize();
-      array_buffer->Neuter();
-      out_data->WriteArrayBufferContents(contents);
-    } else {
-      ArrayBuffer::Contents contents = array_buffer->GetContents();
-      // Clone ArrayBuffer
-      if (contents.ByteLength() > i::kMaxInt) {
-        Throw(isolate, "ArrayBuffer is too big to clone");
-        return false;
-      }
-
-      int32_t byte_length = static_cast<int32_t>(contents.ByteLength());
-      out_data->WriteTag(kSerializationTagArrayBuffer);
-      out_data->Write(byte_length);
-      out_data->WriteMemory(contents.Data(), byte_length);
-    }
-  } else if (value->IsSharedArrayBuffer()) {
-    Local<SharedArrayBuffer> sab = Local<SharedArrayBuffer>::Cast(value);
-    if (FindInObjectList(sab, *seen_objects)) {
-      Throw(isolate, "Duplicated shared array buffers not supported");
-      return false;
-    }
-    seen_objects->Add(sab);
-    if (!FindInObjectList(sab, to_transfer)) {
-      Throw(isolate, "SharedArrayBuffer must be transferred");
-      return false;
+    if (!serializer_.WriteValue(context, value).To(&ok)) {
+      data_.reset();
+      return Nothing<bool>();
     }
 
-    SharedArrayBuffer::Contents contents;
-    if (sab->IsExternal()) {
-      contents = sab->GetContents();
-    } else {
-      contents = sab->Externalize();
-      base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
-      externalized_shared_contents_.Add(contents);
-    }
-    out_data->WriteSharedArrayBufferContents(contents);
-  } else if (value->IsObject()) {
-    Local<Object> object = Local<Object>::Cast(value);
-    if (FindInObjectList(object, *seen_objects)) {
-      Throw(isolate, "Duplicated objects not supported");
-      return false;
-    }
-    seen_objects->Add(object);
-    Local<Array> property_names;
-    if (!object->GetOwnPropertyNames(context).ToLocal(&property_names)) {
-      Throw(isolate, "Unable to get property names");
-      return false;
+    if (!FinalizeTransfer().To(&ok)) {
+      return Nothing<bool>();
     }
 
-    uint32_t length = property_names->Length();
-    out_data->WriteTag(kSerializationTagObject);
-    out_data->Write(length);
-    for (uint32_t i = 0; i < length; ++i) {
-      Local<Value> name;
-      Local<Value> property_value;
-      if (property_names->Get(context, i).ToLocal(&name) &&
-          object->Get(context, name).ToLocal(&property_value)) {
-        if (!SerializeValue(isolate, name, to_transfer, seen_objects, out_data))
-          return false;
-        if (!SerializeValue(isolate, property_value, to_transfer, seen_objects,
-                            out_data))
-          return false;
-      } else {
-        Throw(isolate, "Failed to serialize property.");
-        return false;
-      }
-    }
-  } else {
-    Throw(isolate, "Don't know how to serialize object");
-    return false;
+    std::pair<uint8_t*, size_t> pair = serializer_.Release();
+    data_->data_.reset(pair.first);
+    data_->size_ = pair.second;
+    return Just(true);
   }
 
-  return true;
-}
+  std::unique_ptr<SerializationData> Release() { return std::move(data_); }
 
+ protected:
+  // Implements ValueSerializer::Delegate.
+  void ThrowDataCloneError(Local<String> message) override {
+    isolate_->ThrowException(Exception::Error(message));
+  }
 
-MaybeLocal<Value> Shell::DeserializeValue(Isolate* isolate,
-                                          const SerializationData& data,
-                                          int* offset) {
-  DCHECK(offset);
-  EscapableHandleScope scope(isolate);
-  Local<Value> result;
-  SerializationTag tag = data.ReadTag(offset);
-
-  switch (tag) {
-    case kSerializationTagUndefined:
-      result = Undefined(isolate);
-      break;
-    case kSerializationTagNull:
-      result = Null(isolate);
-      break;
-    case kSerializationTagTrue:
-      result = True(isolate);
-      break;
-    case kSerializationTagFalse:
-      result = False(isolate);
-      break;
-    case kSerializationTagNumber:
-      result = Number::New(isolate, data.Read<double>(offset));
-      break;
-    case kSerializationTagString: {
-      int length = data.Read<int>(offset);
-      CHECK(length >= 0);
-      std::vector<char> buffer(length + 1);  // + 1 so it is never empty.
-      data.ReadMemory(&buffer[0], length, offset);
-      MaybeLocal<String> str =
-          String::NewFromUtf8(isolate, &buffer[0], NewStringType::kNormal,
-                              length).ToLocalChecked();
-      if (!str.IsEmpty()) result = str.ToLocalChecked();
-      break;
+  Maybe<uint32_t> GetSharedArrayBufferId(
+      Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer) override {
+    DCHECK(data_ != nullptr);
+    for (size_t index = 0; index < shared_array_buffers_.size(); ++index) {
+      if (shared_array_buffers_[index] == shared_array_buffer) {
+        return Just<uint32_t>(static_cast<uint32_t>(index));
+      }
     }
-    case kSerializationTagArray: {
-      uint32_t length = data.Read<uint32_t>(offset);
-      Local<Array> array = Array::New(isolate, length);
+
+    size_t index = shared_array_buffers_.size();
+    shared_array_buffers_.emplace_back(isolate_, shared_array_buffer);
+    return Just<uint32_t>(static_cast<uint32_t>(index));
+  }
+
+  void* ReallocateBufferMemory(void* old_buffer, size_t size,
+                               size_t* actual_size) override {
+    // Not accurate, because we don't take into account reallocated buffers,
+    // but this is fine for testing.
+    current_memory_usage_ += size;
+    if (current_memory_usage_ > kMaxSerializerMemoryUsage) return nullptr;
+
+    void* result = realloc(old_buffer, size);
+    *actual_size = result ? size : 0;
+    return result;
+  }
+
+  void FreeBufferMemory(void* buffer) override { free(buffer); }
+
+ private:
+  Maybe<bool> PrepareTransfer(Local<Context> context, Local<Value> transfer) {
+    if (transfer->IsArray()) {
+      Local<Array> transfer_array = Local<Array>::Cast(transfer);
+      uint32_t length = transfer_array->Length();
       for (uint32_t i = 0; i < length; ++i) {
-        Local<Value> element_value;
-        CHECK(DeserializeValue(isolate, data, offset).ToLocal(&element_value));
-        array->Set(isolate->GetCurrentContext(), i, element_value).FromJust();
+        Local<Value> element;
+        if (transfer_array->Get(context, i).ToLocal(&element)) {
+          if (!element->IsArrayBuffer()) {
+            Throw(isolate_, "Transfer array elements must be an ArrayBuffer");
+            break;
+          }
+
+          Local<ArrayBuffer> array_buffer = Local<ArrayBuffer>::Cast(element);
+          serializer_.TransferArrayBuffer(
+              static_cast<uint32_t>(array_buffers_.size()), array_buffer);
+          array_buffers_.emplace_back(isolate_, array_buffer);
+        } else {
+          return Nothing<bool>();
+        }
       }
-      result = array;
-      break;
+      return Just(true);
+    } else if (transfer->IsUndefined()) {
+      return Just(true);
+    } else {
+      Throw(isolate_, "Transfer list must be an Array or undefined");
+      return Nothing<bool>();
     }
-    case kSerializationTagObject: {
-      int length = data.Read<int>(offset);
-      Local<Object> object = Object::New(isolate);
-      for (int i = 0; i < length; ++i) {
-        Local<Value> property_name;
-        CHECK(DeserializeValue(isolate, data, offset).ToLocal(&property_name));
-        Local<Value> property_value;
-        CHECK(DeserializeValue(isolate, data, offset).ToLocal(&property_value));
-        object->Set(isolate->GetCurrentContext(), property_name, property_value)
-            .FromJust();
-      }
-      result = object;
-      break;
-    }
-    case kSerializationTagArrayBuffer: {
-      int32_t byte_length = data.Read<int32_t>(offset);
-      Local<ArrayBuffer> array_buffer = ArrayBuffer::New(isolate, byte_length);
-      ArrayBuffer::Contents contents = array_buffer->GetContents();
-      DCHECK(static_cast<size_t>(byte_length) == contents.ByteLength());
-      data.ReadMemory(contents.Data(), byte_length, offset);
-      result = array_buffer;
-      break;
-    }
-    case kSerializationTagTransferredArrayBuffer: {
-      ArrayBuffer::Contents contents;
-      data.ReadArrayBufferContents(&contents, offset);
-      result = ArrayBuffer::New(isolate, contents.Data(), contents.ByteLength(),
-                                ArrayBufferCreationMode::kInternalized);
-      break;
-    }
-    case kSerializationTagTransferredSharedArrayBuffer: {
-      SharedArrayBuffer::Contents contents;
-      data.ReadSharedArrayBufferContents(&contents, offset);
-      result = SharedArrayBuffer::New(isolate, contents.Data(),
-                                      contents.ByteLength());
-      break;
-    }
-    default:
-      UNREACHABLE();
   }
 
-  return scope.Escape(result);
+  template <typename T>
+  typename T::Contents MaybeExternalize(Local<T> array_buffer) {
+    if (array_buffer->IsExternal()) {
+      return array_buffer->GetContents();
+    } else {
+      typename T::Contents contents = array_buffer->Externalize();
+      data_->externalized_contents_.emplace_back(contents);
+      return contents;
+    }
+  }
+
+  Maybe<bool> FinalizeTransfer() {
+    for (const auto& global_array_buffer : array_buffers_) {
+      Local<ArrayBuffer> array_buffer =
+          Local<ArrayBuffer>::New(isolate_, global_array_buffer);
+      if (!array_buffer->IsNeuterable()) {
+        Throw(isolate_, "ArrayBuffer could not be transferred");
+        return Nothing<bool>();
+      }
+
+      ArrayBuffer::Contents contents = MaybeExternalize(array_buffer);
+      array_buffer->Neuter();
+      data_->array_buffer_contents_.push_back(contents);
+    }
+
+    for (const auto& global_shared_array_buffer : shared_array_buffers_) {
+      Local<SharedArrayBuffer> shared_array_buffer =
+          Local<SharedArrayBuffer>::New(isolate_, global_shared_array_buffer);
+      data_->shared_array_buffer_contents_.push_back(
+          MaybeExternalize(shared_array_buffer));
+    }
+
+    return Just(true);
+  }
+
+  Isolate* isolate_;
+  ValueSerializer serializer_;
+  std::unique_ptr<SerializationData> data_;
+  std::vector<Global<ArrayBuffer>> array_buffers_;
+  std::vector<Global<SharedArrayBuffer>> shared_array_buffers_;
+  size_t current_memory_usage_;
+
+  DISALLOW_COPY_AND_ASSIGN(Serializer);
+};
+
+class Deserializer : public ValueDeserializer::Delegate {
+ public:
+  Deserializer(Isolate* isolate, std::unique_ptr<SerializationData> data)
+      : isolate_(isolate),
+        deserializer_(isolate, data->data(), data->size(), this),
+        data_(std::move(data)) {
+    deserializer_.SetSupportsLegacyWireFormat(true);
+  }
+
+  MaybeLocal<Value> ReadValue(Local<Context> context) {
+    bool read_header;
+    if (!deserializer_.ReadHeader(context).To(&read_header)) {
+      return MaybeLocal<Value>();
+    }
+
+    uint32_t index = 0;
+    for (const auto& contents : data_->array_buffer_contents()) {
+      Local<ArrayBuffer> array_buffer =
+          ArrayBuffer::New(isolate_, contents.Data(), contents.ByteLength());
+      deserializer_.TransferArrayBuffer(index++, array_buffer);
+    }
+
+    index = 0;
+    for (const auto& contents : data_->shared_array_buffer_contents()) {
+      Local<SharedArrayBuffer> shared_array_buffer = SharedArrayBuffer::New(
+          isolate_, contents.Data(), contents.ByteLength());
+      deserializer_.TransferSharedArrayBuffer(index++, shared_array_buffer);
+    }
+
+    return deserializer_.ReadValue(context);
+  }
+
+ private:
+  Isolate* isolate_;
+  ValueDeserializer deserializer_;
+  std::unique_ptr<SerializationData> data_;
+
+  DISALLOW_COPY_AND_ASSIGN(Deserializer);
+};
+
+std::unique_ptr<SerializationData> Shell::SerializeValue(
+    Isolate* isolate, Local<Value> value, Local<Value> transfer) {
+  bool ok;
+  Local<Context> context = isolate->GetCurrentContext();
+  Serializer serializer(isolate);
+  if (serializer.WriteValue(context, value, transfer).To(&ok)) {
+    std::unique_ptr<SerializationData> data = serializer.Release();
+    base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
+    data->AppendExternalizedContentsTo(&externalized_contents_);
+    return data;
+  }
+  return nullptr;
+}
+
+MaybeLocal<Value> Shell::DeserializeValue(
+    Isolate* isolate, std::unique_ptr<SerializationData> data) {
+  Local<Value> value;
+  Local<Context> context = isolate->GetCurrentContext();
+  Deserializer deserializer(isolate, std::move(data));
+  return deserializer.ReadValue(context);
 }
 
 
@@ -2722,13 +2817,7 @@
   // Now that all workers are terminated, we can re-enable Worker creation.
   base::LockGuard<base::Mutex> lock_guard(workers_mutex_.Pointer());
   allow_new_workers_ = true;
-
-  for (int i = 0; i < externalized_shared_contents_.length(); ++i) {
-    const SharedArrayBuffer::Contents& contents =
-        externalized_shared_contents_[i];
-    Shell::array_buffer_allocator->Free(contents.Data(), contents.ByteLength());
-  }
-  externalized_shared_contents_.Clear();
+  externalized_contents_.clear();
 }
 
 
@@ -2854,7 +2943,7 @@
       base::SysInfo::AmountOfVirtualMemory());
 
   Shell::counter_map_ = new CounterMap();
-  if (i::FLAG_dump_counters || i::FLAG_gc_stats) {
+  if (i::FLAG_dump_counters || i::FLAG_dump_counters_nvp || i::FLAG_gc_stats) {
     create_params.counter_lookup_callback = LookupCounter;
     create_params.create_histogram_callback = CreateHistogram;
     create_params.add_histogram_sample_callback = AddHistogramSample;
@@ -2931,13 +3020,6 @@
     CollectGarbage(isolate);
   }
   OnExit(isolate);
-  // Dump basic block profiling data.
-  if (i::BasicBlockProfiler* profiler =
-          reinterpret_cast<i::Isolate*>(isolate)->basic_block_profiler()) {
-    i::OFStream os(stdout);
-    os << *profiler;
-  }
-  isolate->Dispose();
   V8::Dispose();
   V8::ShutdownPlatform();
   delete g_platform;
diff --git a/src/d8.h b/src/d8.h
index 5e7abaf..21e4c4f 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -5,12 +5,16 @@
 #ifndef V8_D8_H_
 #define V8_D8_H_
 
+#include <iterator>
+#include <memory>
 #include <string>
+#include <vector>
 
 #include "src/allocation.h"
 #include "src/base/hashmap.h"
 #include "src/base/platform/time.h"
 #include "src/list.h"
+#include "src/utils.h"
 
 #include "src/base/once.h"
 
@@ -142,68 +146,86 @@
   int end_offset_;
 };
 
-enum SerializationTag {
-  kSerializationTagUndefined,
-  kSerializationTagNull,
-  kSerializationTagTrue,
-  kSerializationTagFalse,
-  kSerializationTagNumber,
-  kSerializationTagString,
-  kSerializationTagArray,
-  kSerializationTagObject,
-  kSerializationTagArrayBuffer,
-  kSerializationTagTransferredArrayBuffer,
-  kSerializationTagTransferredSharedArrayBuffer,
-};
+// The backing store of an ArrayBuffer or SharedArrayBuffer, after
+// Externalize() has been called on it.
+class ExternalizedContents {
+ public:
+  explicit ExternalizedContents(const ArrayBuffer::Contents& contents)
+      : data_(contents.Data()), size_(contents.ByteLength()) {}
+  explicit ExternalizedContents(const SharedArrayBuffer::Contents& contents)
+      : data_(contents.Data()), size_(contents.ByteLength()) {}
+  ExternalizedContents(ExternalizedContents&& other)
+      : data_(other.data_), size_(other.size_) {
+    other.data_ = nullptr;
+    other.size_ = 0;
+  }
+  ExternalizedContents& operator=(ExternalizedContents&& other) {
+    if (this != &other) {
+      data_ = other.data_;
+      size_ = other.size_;
+      other.data_ = nullptr;
+      other.size_ = 0;
+    }
+    return *this;
+  }
+  ~ExternalizedContents();
 
+ private:
+  void* data_;
+  size_t size_;
+
+  DISALLOW_COPY_AND_ASSIGN(ExternalizedContents);
+};
 
 class SerializationData {
  public:
-  SerializationData() {}
-  ~SerializationData();
+  SerializationData() : size_(0) {}
 
-  void WriteTag(SerializationTag tag);
-  void WriteMemory(const void* p, int length);
-  void WriteArrayBufferContents(const ArrayBuffer::Contents& contents);
-  void WriteSharedArrayBufferContents(
-      const SharedArrayBuffer::Contents& contents);
-
-  template <typename T>
-  void Write(const T& data) {
-    WriteMemory(&data, sizeof(data));
+  uint8_t* data() { return data_.get(); }
+  size_t size() { return size_; }
+  const std::vector<ArrayBuffer::Contents>& array_buffer_contents() {
+    return array_buffer_contents_;
+  }
+  const std::vector<SharedArrayBuffer::Contents>&
+  shared_array_buffer_contents() {
+    return shared_array_buffer_contents_;
   }
 
-  SerializationTag ReadTag(int* offset) const;
-  void ReadMemory(void* p, int length, int* offset) const;
-  void ReadArrayBufferContents(ArrayBuffer::Contents* contents,
-                               int* offset) const;
-  void ReadSharedArrayBufferContents(SharedArrayBuffer::Contents* contents,
-                                     int* offset) const;
-
-  template <typename T>
-  T Read(int* offset) const {
-    T value;
-    ReadMemory(&value, sizeof(value), offset);
-    return value;
+  void AppendExternalizedContentsTo(std::vector<ExternalizedContents>* to) {
+    to->insert(to->end(),
+               std::make_move_iterator(externalized_contents_.begin()),
+               std::make_move_iterator(externalized_contents_.end()));
+    externalized_contents_.clear();
   }
 
  private:
-  i::List<uint8_t> data_;
-  i::List<ArrayBuffer::Contents> array_buffer_contents_;
-  i::List<SharedArrayBuffer::Contents> shared_array_buffer_contents_;
+  struct DataDeleter {
+    void operator()(uint8_t* p) const { free(p); }
+  };
+
+  std::unique_ptr<uint8_t, DataDeleter> data_;
+  size_t size_;
+  std::vector<ArrayBuffer::Contents> array_buffer_contents_;
+  std::vector<SharedArrayBuffer::Contents> shared_array_buffer_contents_;
+  std::vector<ExternalizedContents> externalized_contents_;
+
+ private:
+  friend class Serializer;
+
+  DISALLOW_COPY_AND_ASSIGN(SerializationData);
 };
 
 
 class SerializationDataQueue {
  public:
-  void Enqueue(SerializationData* data);
-  bool Dequeue(SerializationData** data);
+  void Enqueue(std::unique_ptr<SerializationData> data);
+  bool Dequeue(std::unique_ptr<SerializationData>* data);
   bool IsEmpty();
   void Clear();
 
  private:
   base::Mutex mutex_;
-  i::List<SerializationData*> data_;
+  std::vector<std::unique_ptr<SerializationData>> data_;
 };
 
 
@@ -218,13 +240,13 @@
   // Post a message to the worker's incoming message queue. The worker will
   // take ownership of the SerializationData.
   // This function should only be called by the thread that created the Worker.
-  void PostMessage(SerializationData* data);
+  void PostMessage(std::unique_ptr<SerializationData> data);
   // Synchronously retrieve messages from the worker's outgoing message queue.
   // If there is no message in the queue, block until a message is available.
   // If there are no messages in the queue and the worker is no longer running,
   // return nullptr.
   // This function should only be called by the thread that created the Worker.
-  SerializationData* GetMessage();
+  std::unique_ptr<SerializationData> GetMessage();
   // Terminate the worker's event loop. Messages from the worker that have been
   // queued can still be read via GetMessage().
   // This function can be called by any thread.
@@ -282,7 +304,8 @@
         natives_blob(NULL),
         snapshot_blob(NULL),
         trace_enabled(false),
-        trace_config(NULL) {}
+        trace_config(NULL),
+        lcov_file(NULL) {}
 
   ~ShellOptions() {
     delete[] isolate_sources;
@@ -313,6 +336,7 @@
   const char* snapshot_blob;
   bool trace_enabled;
   const char* trace_config;
+  const char* lcov_file;
 };
 
 class Shell : public i::AllStatic {
@@ -324,7 +348,6 @@
                             Local<Value> name, bool print_result,
                             bool report_exceptions);
   static bool ExecuteModule(Isolate* isolate, const char* file_name);
-  static const char* ToCString(const v8::String::Utf8Value& value);
   static void ReportException(Isolate* isolate, TryCatch* try_catch);
   static Local<String> ReadFile(Isolate* isolate, const char* name);
   static Local<Context> CreateEvaluationContext(Isolate* isolate);
@@ -335,16 +358,10 @@
   static void CollectGarbage(Isolate* isolate);
   static void EmptyMessageQueues(Isolate* isolate);
 
-  // TODO(binji): stupid implementation for now. Is there an easy way to hash an
-  // object for use in base::HashMap? By pointer?
-  typedef i::List<Local<Object>> ObjectList;
-  static bool SerializeValue(Isolate* isolate, Local<Value> value,
-                             const ObjectList& to_transfer,
-                             ObjectList* seen_objects,
-                             SerializationData* out_data);
-  static MaybeLocal<Value> DeserializeValue(Isolate* isolate,
-                                            const SerializationData& data,
-                                            int* offset);
+  static std::unique_ptr<SerializationData> SerializeValue(
+      Isolate* isolate, Local<Value> value, Local<Value> transfer);
+  static MaybeLocal<Value> DeserializeValue(
+      Isolate* isolate, std::unique_ptr<SerializationData> data);
   static void CleanupWorkers();
   static int* LookupCounter(const char* name);
   static void* CreateHistogram(const char* name,
@@ -360,6 +377,7 @@
   static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args);
+  static void RealmNavigate(const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmCreateAllowCrossRealmAccess(
       const v8::FunctionCallbackInfo<v8::Value>& args);
   static void RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -446,9 +464,11 @@
   static base::LazyMutex workers_mutex_;
   static bool allow_new_workers_;
   static i::List<Worker*> workers_;
-  static i::List<SharedArrayBuffer::Contents> externalized_shared_contents_;
+  static std::vector<ExternalizedContents> externalized_contents_;
 
   static void WriteIgnitionDispatchCountersFile(v8::Isolate* isolate);
+  // Append LCOV coverage data to file.
+  static void WriteLcovData(v8::Isolate* isolate, const char* file);
   static Counter* GetCounter(const char* name, bool is_histogram);
   static Local<String> Stringify(Isolate* isolate, Local<Value> value);
   static void Initialize(Isolate* isolate);
@@ -456,7 +476,10 @@
   static bool SetOptions(int argc, char* argv[]);
   static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
   static MaybeLocal<Context> CreateRealm(
-      const v8::FunctionCallbackInfo<v8::Value>& args);
+      const v8::FunctionCallbackInfo<v8::Value>& args, int index,
+      v8::MaybeLocal<Value> global_object);
+  static void DisposeRealm(const v8::FunctionCallbackInfo<v8::Value>& args,
+                           int index);
   static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Context> context,
                                             const std::string& file_name);
 };
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index 47a7c6e..fd4bed2 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -343,8 +343,13 @@
     }
     if (!scanner->Peek().IsEndOfInput()) return DateToken::Invalid();
   }
-  // Successfully parsed ES5 Date Time String. Default to UTC if no TZ given.
-  if (tz->IsEmpty()) tz->Set(0);
+  // Successfully parsed ES5 Date Time String.
+  // ES#sec-date-time-string-format Date Time String Format
+  // "When the time zone offset is absent, date-only forms are interpreted
+  //  as a UTC time and date-time forms are interpreted as a local time."
+  if (tz->IsEmpty() && time->IsEmpty()) {
+    tz->Set(0);
+  }
   day->set_iso_date();
   return DateToken::EndOfInput();
 }
diff --git a/src/debug/arm/debug-arm.cc b/src/debug/arm/debug-arm.cc
index d96ec31..4839282 100644
--- a/src/debug/arm/debug-arm.cc
+++ b/src/debug/arm/debug-arm.cc
@@ -75,14 +75,6 @@
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
 
-    // Load padding words on stack.
-    __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
-    for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
-      __ push(ip);
-    }
-    __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
-    __ push(ip);
-
     // Push arguments for DebugBreak call.
     if (mode == SAVE_RESULT_REGISTER) {
       // Break on return.
@@ -109,50 +101,45 @@
         }
       }
     }
-
-    // Don't bother removing padding bytes pushed on the stack
-    // as the frame is going to be restored right away.
-
     // Leave the internal frame.
   }
 
-  // Now that the break point has been handled, resume normal execution by
-  // jumping to the target address intended by the caller and that was
-  // overwritten by the address of DebugBreakXXX.
-  ExternalReference after_break_target =
-      ExternalReference::debug_after_break_target_address(masm->isolate());
-  __ mov(ip, Operand(after_break_target));
-  __ ldr(ip, MemOperand(ip));
-  __ Jump(ip);
+  __ MaybeDropFrames();
+
+  // Return to caller.
+  __ Ret();
 }
 
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+  }
+  __ MaybeDropFrames();
 
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  // Load the function pointer off of our current stack frame.
-  __ ldr(r1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+  // Return to caller.
+  __ Ret();
+}
 
-  // Pop return address, frame and constant pool pointer (if
-  // FLAG_enable_embedded_constant_pool).
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+  // Frame is being dropped:
+  // - Drop to the target frame specified by r1.
+  // - Look up current function on the frame.
+  // - Leave the frame.
+  // - Restart the frame by calling the function.
+  __ mov(fp, r1);
+  __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ LeaveFrame(StackFrame::INTERNAL);
 
-  ParameterCount dummy(0);
-  __ FloodFunctionIfStepping(r1, no_reg, dummy, dummy);
+  __ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r0,
+         FieldMemOperand(r0, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ mov(r2, r0);
 
-  { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
-    // Load context from the function.
-    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
-    // Clear new.target as a safety measure.
-    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
-
-    // Get function code.
-    __ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-    __ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
-    __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-    // Re-run JSFunction, r1 is function, cp is context.
-    __ Jump(ip);
-  }
+  ParameterCount dummy1(r2);
+  ParameterCount dummy2(r0);
+  __ InvokeFunction(r1, dummy1, dummy2, JUMP_FUNCTION,
+                    CheckDebugStepCallWrapper());
 }
 
 
diff --git a/src/debug/arm64/debug-arm64.cc b/src/debug/arm64/debug-arm64.cc
index e344924..06929c6 100644
--- a/src/debug/arm64/debug-arm64.cc
+++ b/src/debug/arm64/debug-arm64.cc
@@ -88,12 +88,6 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Load padding words on stack.
-    __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue));
-    __ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
-    __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
-    __ Push(scratch);
-
     // Push arguments for DebugBreak call.
     if (mode == SAVE_RESULT_REGISTER) {
       // Break on return.
@@ -119,52 +113,48 @@
         }
       }
     }
-
-    // Don't bother removing padding bytes pushed on the stack
-    // as the frame is going to be restored right away.
-
     // Leave the internal frame.
   }
 
-  // Now that the break point has been handled, resume normal execution by
-  // jumping to the target address intended by the caller and that was
-  // overwritten by the address of DebugBreakXXX.
-  ExternalReference after_break_target =
-      ExternalReference::debug_after_break_target_address(masm->isolate());
-  __ Mov(scratch, after_break_target);
-  __ Ldr(scratch, MemOperand(scratch));
-  __ Br(scratch);
+  __ MaybeDropFrames();
+
+  // Return to caller.
+  __ Ret();
 }
 
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+  }
+  __ MaybeDropFrames();
 
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  // We do not know our frame height, but set sp based on fp.
-  __ Add(masm->StackPointer(), fp, FrameDropperFrameConstants::kFunctionOffset);
+  // Return to caller.
+  __ Ret();
+}
+
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+  // Frame is being dropped:
+  // - Drop to the target frame specified by x1.
+  // - Look up current function on the frame.
+  // - Leave the frame.
+  // - Restart the frame by calling the function.
+  __ Mov(fp, x1);
   __ AssertStackConsistency();
+  __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
 
-  __ Pop(x1);  // Function
   __ Mov(masm->StackPointer(), Operand(fp));
   __ Pop(fp, lr);  // Frame, Return address.
 
-  ParameterCount dummy(0);
-  __ FloodFunctionIfStepping(x1, no_reg, dummy, dummy);
+  __ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(x0,
+         FieldMemOperand(x0, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ mov(x2, x0);
 
-  UseScratchRegisterScope temps(masm);
-  Register scratch = temps.AcquireX();
-
-  // Load context from the function.
-  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
-
-  // Clear new.target as a safety measure.
-  __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
-
-  // Get function code.
-  __ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
-  __ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
-  __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
-
-  // Re-run JSFunction, x1 is function, cp is context.
-  __ Br(scratch);
+  ParameterCount dummy1(x2);
+  ParameterCount dummy2(x0);
+  __ InvokeFunction(x1, dummy1, dummy2, JUMP_FUNCTION,
+                    CheckDebugStepCallWrapper());
 }
 
 
diff --git a/src/debug/debug-coverage.cc b/src/debug/debug-coverage.cc
new file mode 100644
index 0000000..8a13b6c
--- /dev/null
+++ b/src/debug/debug-coverage.cc
@@ -0,0 +1,169 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/debug/debug-coverage.h"
+
+#include "src/base/hashmap.h"
+#include "src/deoptimizer.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class SharedToCounterMap
+    : public base::TemplateHashMapImpl<SharedFunctionInfo*, uint32_t,
+                                       base::KeyEqualityMatcher<void*>,
+                                       base::DefaultAllocationPolicy> {
+ public:
+  typedef base::TemplateHashMapEntry<SharedFunctionInfo*, uint32_t> Entry;
+  inline void Add(SharedFunctionInfo* key, uint32_t count) {
+    Entry* entry = LookupOrInsert(key, Hash(key), []() { return 0; });
+    uint32_t old_count = entry->value;
+    if (UINT32_MAX - count < old_count) {
+      entry->value = UINT32_MAX;
+    } else {
+      entry->value = old_count + count;
+    }
+  }
+
+  inline uint32_t Get(SharedFunctionInfo* key) {
+    Entry* entry = Lookup(key, Hash(key));
+    if (entry == nullptr) return 0;
+    return entry->value;
+  }
+
+ private:
+  static uint32_t Hash(SharedFunctionInfo* key) {
+    return static_cast<uint32_t>(reinterpret_cast<intptr_t>(key));
+  }
+
+  DisallowHeapAllocation no_gc;
+};
+
+namespace {
+int StartPosition(SharedFunctionInfo* info) {
+  int start = info->function_token_position();
+  if (start == kNoSourcePosition) start = info->start_position();
+  return start;
+}
+
+bool CompareSharedFunctionInfo(SharedFunctionInfo* a, SharedFunctionInfo* b) {
+  int a_start = StartPosition(a);
+  int b_start = StartPosition(b);
+  if (a_start == b_start) return a->end_position() > b->end_position();
+  return a_start < b_start;
+}
+}  // anonymous namespace
+
+Coverage* Coverage::Collect(Isolate* isolate, bool reset_count) {
+  SharedToCounterMap counter_map;
+
+  // Feed invocation count into the counter map.
+  if (isolate->IsCodeCoverageEnabled()) {
+    // Feedback vectors are already listed to prevent losing them to GC.
+    Handle<ArrayList> list =
+        Handle<ArrayList>::cast(isolate->factory()->code_coverage_list());
+    for (int i = 0; i < list->Length(); i++) {
+      FeedbackVector* vector = FeedbackVector::cast(list->Get(i));
+      SharedFunctionInfo* shared = vector->shared_function_info();
+      DCHECK(shared->IsSubjectToDebugging());
+      uint32_t count = static_cast<uint32_t>(vector->invocation_count());
+      if (reset_count) vector->clear_invocation_count();
+      counter_map.Add(shared, count);
+    }
+  } else {
+    // Iterate the heap to find all feedback vectors and accumulate the
+    // invocation counts into the map for each shared function info.
+    HeapIterator heap_iterator(isolate->heap());
+    while (HeapObject* current_obj = heap_iterator.next()) {
+      if (!current_obj->IsFeedbackVector()) continue;
+      FeedbackVector* vector = FeedbackVector::cast(current_obj);
+      SharedFunctionInfo* shared = vector->shared_function_info();
+      if (!shared->IsSubjectToDebugging()) continue;
+      uint32_t count = static_cast<uint32_t>(vector->invocation_count());
+      if (reset_count) vector->clear_invocation_count();
+      counter_map.Add(shared, count);
+    }
+  }
+
+  // Iterate shared function infos of every script and build a mapping
+  // between source ranges and invocation counts.
+  Coverage* result = new Coverage();
+  Script::Iterator scripts(isolate);
+  while (Script* script = scripts.Next()) {
+    // Dismiss non-user scripts.
+    if (script->type() != Script::TYPE_NORMAL) continue;
+
+    // Create and add new script data.
+    Handle<Script> script_handle(script, isolate);
+    result->emplace_back(isolate, script_handle);
+    std::vector<CoverageFunction>* functions = &result->back().functions;
+
+    std::vector<SharedFunctionInfo*> sorted;
+    bool has_toplevel = false;
+
+    {
+      // Sort functions by start position, from outer to inner functions.
+      SharedFunctionInfo::ScriptIterator infos(script_handle);
+      while (SharedFunctionInfo* info = infos.Next()) {
+        has_toplevel |= info->is_toplevel();
+        sorted.push_back(info);
+      }
+      std::sort(sorted.begin(), sorted.end(), CompareSharedFunctionInfo);
+    }
+
+    functions->reserve(sorted.size() + (has_toplevel ? 0 : 1));
+
+    if (!has_toplevel) {
+      // Add a replacement toplevel function if it does not exist.
+      int source_end = String::cast(script->source())->length();
+      functions->emplace_back(0, source_end, 1u,
+                              isolate->factory()->empty_string());
+    }
+
+    // Use sorted list to reconstruct function nesting.
+    for (SharedFunctionInfo* info : sorted) {
+      int start = StartPosition(info);
+      int end = info->end_position();
+      uint32_t count = counter_map.Get(info);
+      Handle<String> name(info->DebugName(), isolate);
+      functions->emplace_back(start, end, count, name);
+    }
+  }
+  return result;
+}
+
+void Coverage::TogglePrecise(Isolate* isolate, bool enable) {
+  if (enable) {
+    HandleScope scope(isolate);
+    // Remove all optimized function. Optimized and inlined functions do not
+    // increment invocation count.
+    Deoptimizer::DeoptimizeAll(isolate);
+    // Collect existing feedback vectors.
+    std::vector<Handle<FeedbackVector>> vectors;
+    {
+      HeapIterator heap_iterator(isolate->heap());
+      while (HeapObject* current_obj = heap_iterator.next()) {
+        if (!current_obj->IsFeedbackVector()) continue;
+        FeedbackVector* vector = FeedbackVector::cast(current_obj);
+        SharedFunctionInfo* shared = vector->shared_function_info();
+        if (!shared->IsSubjectToDebugging()) continue;
+        vector->clear_invocation_count();
+        vectors.emplace_back(vector, isolate);
+      }
+    }
+    // Add collected feedback vectors to the root list lest we lose them to GC.
+    Handle<ArrayList> list =
+        ArrayList::New(isolate, static_cast<int>(vectors.size()));
+    for (const auto& vector : vectors) list = ArrayList::Add(list, vector);
+    isolate->SetCodeCoverageList(*list);
+  } else {
+    isolate->SetCodeCoverageList(isolate->heap()->undefined_value());
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/debug/debug-coverage.h b/src/debug/debug-coverage.h
new file mode 100644
index 0000000..36128bc
--- /dev/null
+++ b/src/debug/debug-coverage.h
@@ -0,0 +1,53 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_DEBUG_COVERAGE_H_
+#define V8_DEBUG_DEBUG_COVERAGE_H_
+
+#include <vector>
+
+#include "src/debug/debug-interface.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class Isolate;
+
+struct CoverageFunction {
+  CoverageFunction(int s, int e, uint32_t c, Handle<String> n)
+      : start(s), end(e), count(c), name(n) {}
+  int start;
+  int end;
+  uint32_t count;
+  Handle<String> name;
+};
+
+struct CoverageScript {
+  // Initialize top-level function in case it has been garbage-collected.
+  CoverageScript(Isolate* isolate, Handle<Script> s) : script(s) {}
+  Handle<Script> script;
+  // Functions are sorted by start position, from outer to inner function.
+  std::vector<CoverageFunction> functions;
+};
+
+class Coverage : public std::vector<CoverageScript> {
+ public:
+  // Allocate a new Coverage object and populate with result.
+  // The ownership is transferred to the caller.
+  static Coverage* Collect(Isolate* isolate, bool reset_count);
+
+  // Enable precise code coverage. This disables optimization and makes sure
+  // invocation count is not affected by GC.
+  static void TogglePrecise(Isolate* isolate, bool enable);
+
+ private:
+  Coverage() {}
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_DEBUG_DEBUG_COVERAGE_H_
diff --git a/src/debug/debug-evaluate.cc b/src/debug/debug-evaluate.cc
index 8970520..c6fafa5 100644
--- a/src/debug/debug-evaluate.cc
+++ b/src/debug/debug-evaluate.cc
@@ -12,6 +12,8 @@
 #include "src/debug/debug.h"
 #include "src/frames-inl.h"
 #include "src/globals.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecodes.h"
 #include "src/isolate-inl.h"
 
 namespace v8 {
@@ -21,12 +23,10 @@
   return context->native_context() == *isolate->debug()->debug_context();
 }
 
-
-MaybeHandle<Object> DebugEvaluate::Global(
-    Isolate* isolate, Handle<String> source, bool disable_break,
-    Handle<HeapObject> context_extension) {
+MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
+                                          Handle<String> source) {
   // Handle the processing of break.
-  DisableBreak disable_break_scope(isolate->debug(), disable_break);
+  DisableBreak disable_break_scope(isolate->debug());
 
   // Enter the top context from before the debugger was invoked.
   SaveContext save(isolate);
@@ -41,19 +41,16 @@
   Handle<Context> context = isolate->native_context();
   Handle<JSObject> receiver(context->global_proxy());
   Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
-  return Evaluate(isolate, outer_info, context, context_extension, receiver,
-                  source);
+  return Evaluate(isolate, outer_info, context, receiver, source, false);
 }
 
-
 MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
                                          StackFrame::Id frame_id,
                                          int inlined_jsframe_index,
                                          Handle<String> source,
-                                         bool disable_break,
-                                         Handle<HeapObject> context_extension) {
+                                         bool throw_on_side_effect) {
   // Handle the processing of break.
-  DisableBreak disable_break_scope(isolate->debug(), disable_break);
+  DisableBreak disable_break_scope(isolate->debug());
 
   // Get the frame where the debugging is performed.
   StackTraceFrameIterator it(isolate, frame_id);
@@ -79,8 +76,8 @@
   Handle<Context> context = context_builder.evaluation_context();
   Handle<JSObject> receiver(context->global_proxy());
   MaybeHandle<Object> maybe_result =
-      Evaluate(isolate, context_builder.outer_info(), context,
-               context_extension, receiver, source);
+      Evaluate(isolate, context_builder.outer_info(), context, receiver, source,
+               throw_on_side_effect);
   if (!maybe_result.is_null()) context_builder.UpdateValues();
   return maybe_result;
 }
@@ -89,32 +86,23 @@
 // Compile and evaluate source for the given context.
 MaybeHandle<Object> DebugEvaluate::Evaluate(
     Isolate* isolate, Handle<SharedFunctionInfo> outer_info,
-    Handle<Context> context, Handle<HeapObject> context_extension,
-    Handle<Object> receiver, Handle<String> source) {
-  if (context_extension->IsJSObject()) {
-    Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
-    Handle<JSFunction> closure(context->closure(), isolate);
-    context = isolate->factory()->NewWithContext(
-        closure, context,
-        ScopeInfo::CreateForWithScope(
-            isolate, context->IsNativeContext()
-                         ? Handle<ScopeInfo>::null()
-                         : Handle<ScopeInfo>(context->scope_info())),
-        extension);
-  }
-
+    Handle<Context> context, Handle<Object> receiver, Handle<String> source,
+    bool throw_on_side_effect) {
   Handle<JSFunction> eval_fun;
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, eval_fun,
       Compiler::GetFunctionFromEval(source, outer_info, context, SLOPPY,
                                     NO_PARSE_RESTRICTION, kNoSourcePosition,
-                                    kNoSourcePosition),
+                                    kNoSourcePosition, kNoSourcePosition),
       Object);
 
   Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, result, Execution::Call(isolate, eval_fun, receiver, 0, NULL),
-      Object);
+  {
+    NoSideEffectScope no_side_effect(isolate, throw_on_side_effect);
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, result, Execution::Call(isolate, eval_fun, receiver, 0, NULL),
+        Object);
+  }
 
   // Skip the global proxy as it has no properties and always delegates to the
   // real global object.
@@ -158,8 +146,8 @@
   //  - Look up in the original context.
   //  - Check the whitelist to find out whether to skip contexts during lookup.
   const ScopeIterator::Option option = ScopeIterator::COLLECT_NON_LOCALS;
-  for (ScopeIterator it(isolate, &frame_inspector, option);
-       !it.Failed() && !it.Done(); it.Next()) {
+  for (ScopeIterator it(isolate, &frame_inspector, option); !it.Done();
+       it.Next()) {
     ScopeIterator::ScopeType scope_type = it.Type();
     if (scope_type == ScopeIterator::ScopeTypeLocal) {
       DCHECK_EQ(FUNCTION_SCOPE, it.CurrentScopeInfo()->scope_type());
@@ -239,7 +227,7 @@
     Handle<JSObject> target, Handle<JSFunction> function) {
   // Do not materialize the arguments object for eval or top-level code.
   // Skip if "arguments" is already taken.
-  if (!function->shared()->is_function()) return;
+  if (function->shared()->is_toplevel()) return;
   Maybe<bool> maybe = JSReceiver::HasOwnProperty(
       target, isolate_->factory()->arguments_string());
   DCHECK(maybe.IsJust());
@@ -269,5 +257,313 @@
   JSObject::SetOwnPropertyIgnoreAttributes(target, name, recv, NONE).Check();
 }
 
+namespace {
+
+bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
+  switch (id) {
+    // Whitelist for intrinsics amd runtime functions.
+    // Conversions.
+    case Runtime::kToInteger:
+    case Runtime::kInlineToInteger:
+    case Runtime::kToObject:
+    case Runtime::kInlineToObject:
+    case Runtime::kToString:
+    case Runtime::kInlineToString:
+    case Runtime::kToLength:
+    case Runtime::kInlineToLength:
+    case Runtime::kToNumber:
+    // Type checks.
+    case Runtime::kIsJSReceiver:
+    case Runtime::kInlineIsJSReceiver:
+    case Runtime::kIsSmi:
+    case Runtime::kInlineIsSmi:
+    case Runtime::kIsArray:
+    case Runtime::kInlineIsArray:
+    case Runtime::kIsFunction:
+    case Runtime::kIsDate:
+    case Runtime::kIsJSProxy:
+    case Runtime::kIsRegExp:
+    case Runtime::kIsTypedArray:
+    // Loads.
+    case Runtime::kLoadLookupSlotForCall:
+    // Arrays.
+    case Runtime::kArraySpeciesConstructor:
+    case Runtime::kNormalizeElements:
+    case Runtime::kGetArrayKeys:
+    case Runtime::kHasComplexElements:
+    case Runtime::kEstimateNumberOfElements:
+    // Errors.
+    case Runtime::kReThrow:
+    case Runtime::kThrowReferenceError:
+    case Runtime::kThrowSymbolIteratorInvalid:
+    case Runtime::kThrowIteratorResultNotAnObject:
+    case Runtime::kNewTypeError:
+    // Strings.
+    case Runtime::kInlineStringCharCodeAt:
+    case Runtime::kStringCharCodeAt:
+    case Runtime::kStringIndexOf:
+    case Runtime::kStringReplaceOneCharWithString:
+    case Runtime::kSubString:
+    case Runtime::kInlineSubString:
+    case Runtime::kRegExpInternalReplace:
+    // Literals.
+    case Runtime::kCreateArrayLiteral:
+    case Runtime::kCreateObjectLiteral:
+    case Runtime::kCreateRegExpLiteral:
+    // Misc.
+    case Runtime::kForInPrepare:
+    case Runtime::kInlineCall:
+    case Runtime::kCall:
+    case Runtime::kInlineMaxSmi:
+    case Runtime::kMaxSmi:
+      return true;
+    default:
+      if (FLAG_trace_side_effect_free_debug_evaluate) {
+        PrintF("[debug-evaluate] intrinsic %s may cause side effect.\n",
+               Runtime::FunctionForId(id)->name);
+      }
+      return false;
+  }
+}
+
+bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
+  typedef interpreter::Bytecode Bytecode;
+  typedef interpreter::Bytecodes Bytecodes;
+  if (Bytecodes::IsWithoutExternalSideEffects(bytecode)) return true;
+  if (Bytecodes::IsCallOrConstruct(bytecode)) return true;
+  if (Bytecodes::WritesBooleanToAccumulator(bytecode)) return true;
+  if (Bytecodes::IsJumpIfToBoolean(bytecode)) return true;
+  if (Bytecodes::IsPrefixScalingBytecode(bytecode)) return true;
+  switch (bytecode) {
+    // Whitelist for bytecodes.
+    // Loads.
+    case Bytecode::kLdaLookupSlot:
+    case Bytecode::kLdaGlobal:
+    case Bytecode::kLdaNamedProperty:
+    case Bytecode::kLdaKeyedProperty:
+    // Arithmetics.
+    case Bytecode::kAdd:
+    case Bytecode::kAddSmi:
+    case Bytecode::kSub:
+    case Bytecode::kSubSmi:
+    case Bytecode::kMul:
+    case Bytecode::kDiv:
+    case Bytecode::kMod:
+    case Bytecode::kBitwiseAnd:
+    case Bytecode::kBitwiseAndSmi:
+    case Bytecode::kBitwiseOr:
+    case Bytecode::kBitwiseOrSmi:
+    case Bytecode::kBitwiseXor:
+    case Bytecode::kShiftLeft:
+    case Bytecode::kShiftLeftSmi:
+    case Bytecode::kShiftRight:
+    case Bytecode::kShiftRightSmi:
+    case Bytecode::kShiftRightLogical:
+    case Bytecode::kInc:
+    case Bytecode::kDec:
+    case Bytecode::kLogicalNot:
+    case Bytecode::kToBooleanLogicalNot:
+    case Bytecode::kTypeOf:
+    // Contexts.
+    case Bytecode::kCreateBlockContext:
+    case Bytecode::kCreateCatchContext:
+    case Bytecode::kCreateFunctionContext:
+    case Bytecode::kCreateEvalContext:
+    case Bytecode::kCreateWithContext:
+    // Literals.
+    case Bytecode::kCreateArrayLiteral:
+    case Bytecode::kCreateObjectLiteral:
+    case Bytecode::kCreateRegExpLiteral:
+    // Allocations.
+    case Bytecode::kCreateClosure:
+    case Bytecode::kCreateUnmappedArguments:
+    // Conversions.
+    case Bytecode::kToObject:
+    case Bytecode::kToNumber:
+    // Misc.
+    case Bytecode::kForInPrepare:
+    case Bytecode::kForInContinue:
+    case Bytecode::kForInNext:
+    case Bytecode::kForInStep:
+    case Bytecode::kThrow:
+    case Bytecode::kReThrow:
+    case Bytecode::kIllegal:
+    case Bytecode::kCallJSRuntime:
+    case Bytecode::kStackCheck:
+    case Bytecode::kReturn:
+    case Bytecode::kSetPendingMessage:
+      return true;
+    default:
+      if (FLAG_trace_side_effect_free_debug_evaluate) {
+        PrintF("[debug-evaluate] bytecode %s may cause side effect.\n",
+               Bytecodes::ToString(bytecode));
+      }
+      return false;
+  }
+}
+
+bool BuiltinHasNoSideEffect(Builtins::Name id) {
+  switch (id) {
+    // Whitelist for builtins.
+    // Array builtins.
+    case Builtins::kArrayCode:
+    case Builtins::kArrayIndexOf:
+    case Builtins::kArrayPrototypeValues:
+    case Builtins::kArrayIncludes:
+    case Builtins::kArrayPrototypeEntries:
+    case Builtins::kArrayPrototypeKeys:
+    case Builtins::kArrayForEach:
+    // Math builtins.
+    case Builtins::kMathAbs:
+    case Builtins::kMathAcos:
+    case Builtins::kMathAcosh:
+    case Builtins::kMathAsin:
+    case Builtins::kMathAsinh:
+    case Builtins::kMathAtan:
+    case Builtins::kMathAtanh:
+    case Builtins::kMathAtan2:
+    case Builtins::kMathCeil:
+    case Builtins::kMathCbrt:
+    case Builtins::kMathExpm1:
+    case Builtins::kMathClz32:
+    case Builtins::kMathCos:
+    case Builtins::kMathCosh:
+    case Builtins::kMathExp:
+    case Builtins::kMathFloor:
+    case Builtins::kMathFround:
+    case Builtins::kMathHypot:
+    case Builtins::kMathImul:
+    case Builtins::kMathLog:
+    case Builtins::kMathLog1p:
+    case Builtins::kMathLog2:
+    case Builtins::kMathLog10:
+    case Builtins::kMathMax:
+    case Builtins::kMathMin:
+    case Builtins::kMathPow:
+    case Builtins::kMathRandom:
+    case Builtins::kMathRound:
+    case Builtins::kMathSign:
+    case Builtins::kMathSin:
+    case Builtins::kMathSinh:
+    case Builtins::kMathSqrt:
+    case Builtins::kMathTan:
+    case Builtins::kMathTanh:
+    case Builtins::kMathTrunc:
+    // Number builtins.
+    case Builtins::kNumberConstructor:
+    case Builtins::kNumberIsFinite:
+    case Builtins::kNumberIsInteger:
+    case Builtins::kNumberIsNaN:
+    case Builtins::kNumberIsSafeInteger:
+    case Builtins::kNumberParseFloat:
+    case Builtins::kNumberParseInt:
+    case Builtins::kNumberPrototypeToExponential:
+    case Builtins::kNumberPrototypeToFixed:
+    case Builtins::kNumberPrototypeToPrecision:
+    case Builtins::kNumberPrototypeToString:
+    case Builtins::kNumberPrototypeValueOf:
+    // String builtins. Strings are immutable.
+    case Builtins::kStringFromCharCode:
+    case Builtins::kStringFromCodePoint:
+    case Builtins::kStringConstructor:
+    case Builtins::kStringPrototypeCharAt:
+    case Builtins::kStringPrototypeCharCodeAt:
+    case Builtins::kStringPrototypeEndsWith:
+    case Builtins::kStringPrototypeIncludes:
+    case Builtins::kStringPrototypeIndexOf:
+    case Builtins::kStringPrototypeLastIndexOf:
+    case Builtins::kStringPrototypeStartsWith:
+    case Builtins::kStringPrototypeSubstr:
+    case Builtins::kStringPrototypeSubstring:
+    case Builtins::kStringPrototypeToString:
+    case Builtins::kStringPrototypeToLowerCase:
+    case Builtins::kStringPrototypeToUpperCase:
+    case Builtins::kStringPrototypeTrim:
+    case Builtins::kStringPrototypeTrimLeft:
+    case Builtins::kStringPrototypeTrimRight:
+    case Builtins::kStringPrototypeValueOf:
+    // JSON builtins.
+    case Builtins::kJsonParse:
+    case Builtins::kJsonStringify:
+    // Error builtins.
+    case Builtins::kMakeError:
+    case Builtins::kMakeTypeError:
+    case Builtins::kMakeSyntaxError:
+    case Builtins::kMakeRangeError:
+    case Builtins::kMakeURIError:
+      return true;
+    default:
+      if (FLAG_trace_side_effect_free_debug_evaluate) {
+        PrintF("[debug-evaluate] built-in %s may cause side effect.\n",
+               Builtins::name(id));
+      }
+      return false;
+  }
+}
+
+static const Address accessors_with_no_side_effect[] = {
+    // Whitelist for accessors.
+    FUNCTION_ADDR(Accessors::StringLengthGetter),
+    FUNCTION_ADDR(Accessors::ArrayLengthGetter)};
+
+}  // anonymous namespace
+
+// static
+bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
+  if (FLAG_trace_side_effect_free_debug_evaluate) {
+    PrintF("[debug-evaluate] Checking function %s for side effect.\n",
+           info->DebugName()->ToCString().get());
+  }
+
+  DCHECK(info->is_compiled());
+
+  if (info->HasBytecodeArray()) {
+    // Check bytecodes against whitelist.
+    Handle<BytecodeArray> bytecode_array(info->bytecode_array());
+    if (FLAG_trace_side_effect_free_debug_evaluate) bytecode_array->Print();
+    for (interpreter::BytecodeArrayIterator it(bytecode_array); !it.done();
+         it.Advance()) {
+      interpreter::Bytecode bytecode = it.current_bytecode();
+
+      if (interpreter::Bytecodes::IsCallRuntime(bytecode)) {
+        Runtime::FunctionId id =
+            (bytecode == interpreter::Bytecode::kInvokeIntrinsic)
+                ? it.GetIntrinsicIdOperand(0)
+                : it.GetRuntimeIdOperand(0);
+        if (IntrinsicHasNoSideEffect(id)) continue;
+        return false;
+      }
+
+      if (BytecodeHasNoSideEffect(bytecode)) continue;
+
+      // Did not match whitelist.
+      return false;
+    }
+    return true;
+  } else {
+    // Check built-ins against whitelist.
+    int builtin_index = info->code()->builtin_index();
+    if (builtin_index >= 0 && builtin_index < Builtins::builtin_count &&
+        BuiltinHasNoSideEffect(static_cast<Builtins::Name>(builtin_index))) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+// static
+bool DebugEvaluate::CallbackHasNoSideEffect(Address function_addr) {
+  for (size_t i = 0; i < arraysize(accessors_with_no_side_effect); i++) {
+    if (function_addr == accessors_with_no_side_effect[i]) return true;
+  }
+
+  if (FLAG_trace_side_effect_free_debug_evaluate) {
+    PrintF("[debug-evaluate] API Callback at %p may cause side effect.\n",
+           reinterpret_cast<void*>(function_addr));
+  }
+  return false;
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/debug/debug-evaluate.h b/src/debug/debug-evaluate.h
index 26f4e41..5f5b51e 100644
--- a/src/debug/debug-evaluate.h
+++ b/src/debug/debug-evaluate.h
@@ -13,9 +13,7 @@
 
 class DebugEvaluate : public AllStatic {
  public:
-  static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source,
-                                    bool disable_break,
-                                    Handle<HeapObject> context_extension);
+  static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source);
 
   // Evaluate a piece of JavaScript in the context of a stack frame for
   // debugging.  Things that need special attention are:
@@ -24,8 +22,11 @@
   // - The arguments object needs to materialized.
   static MaybeHandle<Object> Local(Isolate* isolate, StackFrame::Id frame_id,
                                    int inlined_jsframe_index,
-                                   Handle<String> source, bool disable_break,
-                                   Handle<HeapObject> context_extension);
+                                   Handle<String> source,
+                                   bool throw_on_side_effect);
+
+  static bool FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info);
+  static bool CallbackHasNoSideEffect(Address function_addr);
 
  private:
   // This class builds a context chain for evaluation of expressions
@@ -85,9 +86,9 @@
   static MaybeHandle<Object> Evaluate(Isolate* isolate,
                                       Handle<SharedFunctionInfo> outer_info,
                                       Handle<Context> context,
-                                      Handle<HeapObject> context_extension,
                                       Handle<Object> receiver,
-                                      Handle<String> source);
+                                      Handle<String> source,
+                                      bool throw_on_side_effect);
 };
 
 
diff --git a/src/debug/debug-frames.cc b/src/debug/debug-frames.cc
index 5da1656..d489911 100644
--- a/src/debug/debug-frames.cc
+++ b/src/debug/debug-frames.cc
@@ -5,72 +5,78 @@
 #include "src/debug/debug-frames.h"
 
 #include "src/frames-inl.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-objects.h"
 
 namespace v8 {
 namespace internal {
 
-FrameInspector::FrameInspector(StandardFrame* frame, int inlined_jsframe_index,
+FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
                                Isolate* isolate)
-    : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
+    : frame_(frame),
+      frame_summary_(FrameSummary::Get(frame, inlined_frame_index)),
+      isolate_(isolate) {
   JavaScriptFrame* js_frame =
       frame->is_java_script() ? javascript_frame() : nullptr;
   DCHECK(js_frame || frame->is_wasm());
   has_adapted_arguments_ = js_frame && js_frame->has_adapted_arguments();
-  is_bottommost_ = inlined_jsframe_index == 0;
+  is_bottommost_ = inlined_frame_index == 0;
   is_optimized_ = frame_->is_optimized();
   is_interpreted_ = frame_->is_interpreted();
+
   // Calculate the deoptimized frame.
-  if (frame->is_optimized()) {
+  if (is_optimized_) {
     DCHECK(js_frame != nullptr);
     // TODO(turbofan): Revisit once we support deoptimization.
     if (js_frame->LookupCode()->is_turbofanned() &&
-        js_frame->function()->shared()->asm_function() &&
-        !FLAG_turbo_asm_deoptimization) {
+        js_frame->function()->shared()->asm_function()) {
       is_optimized_ = false;
       return;
     }
 
-    deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
-        js_frame, inlined_jsframe_index, isolate);
+    deoptimized_frame_.reset(Deoptimizer::DebuggerInspectableFrame(
+        js_frame, inlined_frame_index, isolate));
+  } else if (frame_->is_wasm_interpreter_entry()) {
+    wasm_interpreted_frame_ =
+        frame_summary_.AsWasm()
+            .wasm_instance()
+            ->debug_info()
+            ->GetInterpretedFrame(frame_->fp(), inlined_frame_index);
+    DCHECK(wasm_interpreted_frame_);
   }
 }
 
 FrameInspector::~FrameInspector() {
-  // Get rid of the calculated deoptimized frame if any.
-  if (deoptimized_frame_ != nullptr) {
-    delete deoptimized_frame_;
-  }
+  // Destructor needs to be defined in the .cc file, because it instantiates
+  // std::unique_ptr destructors but the types are not known in the header.
 }
 
 int FrameInspector::GetParametersCount() {
-  return is_optimized_ ? deoptimized_frame_->parameters_count()
-                       : frame_->ComputeParametersCount();
+  if (is_optimized_) return deoptimized_frame_->parameters_count();
+  if (wasm_interpreted_frame_)
+    return wasm_interpreted_frame_->GetParameterCount();
+  return frame_->ComputeParametersCount();
 }
 
 Handle<Script> FrameInspector::GetScript() {
-  Object* script = is_optimized_
-                       ? deoptimized_frame_->GetFunction()->shared()->script()
-                       : frame_->script();
-  return handle(Script::cast(script), isolate_);
+  return Handle<Script>::cast(frame_summary_.script());
 }
 
 Handle<JSFunction> FrameInspector::GetFunction() {
-  DCHECK(!frame_->is_wasm());
-  return is_optimized_ ? deoptimized_frame_->GetFunction()
-                       : handle(javascript_frame()->function(), isolate_);
+  return frame_summary_.AsJavaScript().function();
 }
 
 Handle<Object> FrameInspector::GetParameter(int index) {
-  return is_optimized_ ? deoptimized_frame_->GetParameter(index)
-                       : handle(frame_->GetParameter(index), isolate_);
+  if (is_optimized_) return deoptimized_frame_->GetParameter(index);
+  // TODO(clemensh): Handle wasm_interpreted_frame_.
+  return handle(frame_->GetParameter(index), isolate_);
 }
 
 Handle<Object> FrameInspector::GetExpression(int index) {
   // TODO(turbofan): Revisit once we support deoptimization.
   if (frame_->is_java_script() &&
       javascript_frame()->LookupCode()->is_turbofanned() &&
-      javascript_frame()->function()->shared()->asm_function() &&
-      !FLAG_turbo_asm_deoptimization) {
+      javascript_frame()->function()->shared()->asm_function()) {
     return isolate_->factory()->undefined_value();
   }
   return is_optimized_ ? deoptimized_frame_->GetExpression(index)
@@ -78,22 +84,16 @@
 }
 
 int FrameInspector::GetSourcePosition() {
-  return is_optimized_ ? deoptimized_frame_->GetSourcePosition()
-                       : frame_->position();
+  return frame_summary_.SourcePosition();
 }
 
-bool FrameInspector::IsConstructor() {
-  return is_optimized_ && !is_bottommost_
-             ? deoptimized_frame_->HasConstructStub()
-             : frame_->IsConstructor();
-}
+bool FrameInspector::IsConstructor() { return frame_summary_.is_constructor(); }
 
 Handle<Object> FrameInspector::GetContext() {
   return is_optimized_ ? deoptimized_frame_->GetContext()
                        : handle(frame_->context(), isolate_);
 }
 
-
 // To inspect all the provided arguments the frame might need to be
 // replaced with the arguments frame.
 void FrameInspector::SetArgumentsFrame(StandardFrame* frame) {
@@ -211,15 +211,11 @@
                                                 int index) {
   int count = -1;
   for (; !it->done(); it->Advance()) {
-    if (it->is_wasm()) {
-      if (++count == index) return 0;
-      continue;
-    }
     List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-    it->javascript_frame()->Summarize(&frames);
+    it->frame()->Summarize(&frames);
     for (int i = frames.length() - 1; i >= 0; i--) {
       // Omit functions from native and extension scripts.
-      if (!frames[i].function()->shared()->IsSubjectToDebugging()) continue;
+      if (!frames[i].is_subject_to_debugging()) continue;
       if (++count == index) return i;
     }
   }
diff --git a/src/debug/debug-frames.h b/src/debug/debug-frames.h
index e8698e7..2c9e43f 100644
--- a/src/debug/debug-frames.h
+++ b/src/debug/debug-frames.h
@@ -13,13 +13,20 @@
 namespace v8 {
 namespace internal {
 
+// Forward declaration:
+namespace wasm {
+class InterpretedFrame;
+}
+
 class FrameInspector {
  public:
-  FrameInspector(StandardFrame* frame, int inlined_jsframe_index,
+  FrameInspector(StandardFrame* frame, int inlined_frame_index,
                  Isolate* isolate);
 
   ~FrameInspector();
 
+  FrameSummary& summary() { return frame_summary_; }
+
   int GetParametersCount();
   Handle<JSFunction> GetFunction();
   Handle<Script> GetScript();
@@ -33,7 +40,6 @@
     return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_)
                                           : JavaScriptFrame::cast(frame_);
   }
-  inline WasmFrame* wasm_frame() { return WasmFrame::cast(frame_); }
 
   JavaScriptFrame* GetArgumentsFrame() { return javascript_frame(); }
   void SetArgumentsFrame(StandardFrame* frame);
@@ -52,7 +58,9 @@
                                          Handle<String> parameter_name);
 
   StandardFrame* frame_;
-  DeoptimizedFrameInfo* deoptimized_frame_;
+  FrameSummary frame_summary_;
+  std::unique_ptr<DeoptimizedFrameInfo> deoptimized_frame_;
+  std::unique_ptr<wasm::InterpretedFrame> wasm_interpreted_frame_;
   Isolate* isolate_;
   bool is_optimized_;
   bool is_interpreted_;
diff --git a/src/debug/debug-interface.h b/src/debug/debug-interface.h
index 443ed42..be8ed90 100644
--- a/src/debug/debug-interface.h
+++ b/src/debug/debug-interface.h
@@ -5,205 +5,260 @@
 #ifndef V8_DEBUG_DEBUG_INTERFACE_H_
 #define V8_DEBUG_DEBUG_INTERFACE_H_
 
+#include <functional>
+
 #include "include/v8-debug.h"
 #include "include/v8-util.h"
 #include "include/v8.h"
 
+#include "src/debug/interface-types.h"
+#include "src/globals.h"
+
 namespace v8 {
 
-class DebugInterface {
- public:
-  /**
-   * An event details object passed to the debug event listener.
-   */
-  class EventDetails : public v8::Debug::EventDetails {
-   public:
-    /**
-     * Event type.
-     */
-    virtual v8::DebugEvent GetEvent() const = 0;
+namespace internal {
+struct CoverageFunction;
+struct CoverageScript;
+class Coverage;
+class Script;
+}
 
-    /**
-     * Access to execution state and event data of the debug event. Don't store
-     * these cross callbacks as their content becomes invalid.
-     */
-    virtual Local<Object> GetExecutionState() const = 0;
-    virtual Local<Object> GetEventData() const = 0;
+namespace debug {
 
-    /**
-     * Get the context active when the debug event happened. Note this is not
-     * the current active context as the JavaScript part of the debugger is
-     * running in its own context which is entered at this point.
-     */
-    virtual Local<Context> GetEventContext() const = 0;
+/**
+ * Debugger is running in its own context which is entered while debugger
+ * messages are being dispatched. This is an explicit getter for this
+ * debugger context. Note that the content of the debugger context is subject
+ * to change. The Context exists only when the debugger is active, i.e. at
+ * least one DebugEventListener or MessageHandler is set.
+ */
+Local<Context> GetDebugContext(Isolate* isolate);
 
-    /**
-     * Client data passed with the corresponding callback when it was
-     * registered.
-     */
-    virtual Local<Value> GetCallbackData() const = 0;
+/**
+ * Run a JavaScript function in the debugger.
+ * \param fun the function to call
+ * \param data passed as second argument to the function
+ * With this call the debugger is entered and the function specified is called
+ * with the execution state as the first argument. This makes it possible to
+ * get access to information otherwise not available during normal JavaScript
+ * execution e.g. details on stack frames. Receiver of the function call will
+ * be the debugger context global object, however this is a subject to change.
+ * The following example shows a JavaScript function which when passed to
+ * v8::Debug::Call will return the current line of JavaScript execution.
+ *
+ * \code
+ *   function frame_source_line(exec_state) {
+ *     return exec_state.frame(0).sourceLine();
+ *   }
+ * \endcode
+ */
+// TODO(dcarney): data arg should be a MaybeLocal
+MaybeLocal<Value> Call(Local<Context> context, v8::Local<v8::Function> fun,
+                       Local<Value> data = Local<Value>());
 
-    virtual ~EventDetails() {}
-  };
+/**
+ * Enable/disable LiveEdit functionality for the given Isolate
+ * (default Isolate if not provided). V8 will abort if LiveEdit is
+ * unexpectedly used. LiveEdit is enabled by default.
+ */
+void SetLiveEditEnabled(Isolate* isolate, bool enable);
 
-  /**
-   * Debug event callback function.
-   *
-   * \param event_details object providing information about the debug event
-   *
-   * A EventCallback does not take possession of the event data,
-   * and must not rely on the data persisting after the handler returns.
-   */
-  typedef void (*EventCallback)(const EventDetails& event_details);
+// Schedule a debugger break to happen when JavaScript code is run
+// in the given isolate.
+void DebugBreak(Isolate* isolate);
 
-  static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
-                                    Local<Value> data = Local<Value>());
+// Remove scheduled debugger break in given isolate if it has not
+// happened yet.
+void CancelDebugBreak(Isolate* isolate);
 
-  /**
-   * Debugger is running in its own context which is entered while debugger
-   * messages are being dispatched. This is an explicit getter for this
-   * debugger context. Note that the content of the debugger context is subject
-   * to change. The Context exists only when the debugger is active, i.e. at
-   * least one DebugEventListener or MessageHandler is set.
-   */
-  static Local<Context> GetDebugContext(Isolate* isolate);
+/**
+ * Returns array of internal properties specific to the value type. Result has
+ * the following format: [<name>, <value>,...,<name>, <value>]. Result array
+ * will be allocated in the current context.
+ */
+MaybeLocal<Array> GetInternalProperties(Isolate* isolate, Local<Value> value);
 
-  /**
-   * Run a JavaScript function in the debugger.
-   * \param fun the function to call
-   * \param data passed as second argument to the function
-   * With this call the debugger is entered and the function specified is called
-   * with the execution state as the first argument. This makes it possible to
-   * get access to information otherwise not available during normal JavaScript
-   * execution e.g. details on stack frames. Receiver of the function call will
-   * be the debugger context global object, however this is a subject to change.
-   * The following example shows a JavaScript function which when passed to
-   * v8::Debug::Call will return the current line of JavaScript execution.
-   *
-   * \code
-   *   function frame_source_line(exec_state) {
-   *     return exec_state.frame(0).sourceLine();
-   *   }
-   * \endcode
-   */
-  // TODO(dcarney): data arg should be a MaybeLocal
-  static MaybeLocal<Value> Call(Local<Context> context,
-                                v8::Local<v8::Function> fun,
-                                Local<Value> data = Local<Value>());
-
-  /**
-   * Enable/disable LiveEdit functionality for the given Isolate
-   * (default Isolate if not provided). V8 will abort if LiveEdit is
-   * unexpectedly used. LiveEdit is enabled by default.
-   */
-  static void SetLiveEditEnabled(Isolate* isolate, bool enable);
-
-  // Schedule a debugger break to happen when JavaScript code is run
-  // in the given isolate.
-  static void DebugBreak(Isolate* isolate);
-
-  // Remove scheduled debugger break in given isolate if it has not
-  // happened yet.
-  static void CancelDebugBreak(Isolate* isolate);
-
-  /**
-   * Returns array of internal properties specific to the value type. Result has
-   * the following format: [<name>, <value>,...,<name>, <value>]. Result array
-   * will be allocated in the current context.
-   */
-  static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
-                                                 Local<Value> value);
-
-  enum ExceptionBreakState {
-    NoBreakOnException = 0,
-    BreakOnUncaughtException = 1,
-    BreakOnAnyException = 2
-  };
-
-  /**
-   * Defines if VM will pause on exceptions or not.
-   * If BreakOnAnyExceptions is set then VM will pause on caught and uncaught
-   * exception, if BreakOnUncaughtException is set then VM will pause only on
-   * uncaught exception, otherwise VM won't stop on any exception.
-   */
-  static void ChangeBreakOnException(Isolate* isolate,
-                                     ExceptionBreakState state);
-
-  enum StepAction {
-    StepOut = 0,   // Step out of the current function.
-    StepNext = 1,  // Step to the next statement in the current function.
-    StepIn = 2,    // Step into new functions invoked or the next statement
-                   // in the current function.
-    StepFrame = 3  // Step into a new frame or return to previous frame.
-  };
-
-  static void PrepareStep(Isolate* isolate, StepAction action);
-  static void ClearStepping(Isolate* isolate);
-
-  /**
-   * Defines location inside script.
-   * Lines and columns are 0-based.
-   */
-  class Location {
-   public:
-    Location(int lineNumber, int columnNumber);
-    /**
-     * Create empty location.
-     */
-    Location();
-
-    int GetLineNumber() const;
-    int GetColumnNumber() const;
-    bool IsEmpty() const;
-
-   private:
-    int lineNumber_;
-    int columnNumber_;
-  };
-
-  /**
-   * Native wrapper around v8::internal::Script object.
-   */
-  class Script {
-   public:
-    v8::Isolate* GetIsolate() const;
-
-    ScriptOriginOptions OriginOptions() const;
-    bool WasCompiled() const;
-    int Id() const;
-    int LineOffset() const;
-    int ColumnOffset() const;
-    std::vector<int> LineEnds() const;
-    MaybeLocal<String> Name() const;
-    MaybeLocal<String> SourceURL() const;
-    MaybeLocal<String> SourceMappingURL() const;
-    MaybeLocal<String> ContextData() const;
-    MaybeLocal<String> Source() const;
-    bool GetPossibleBreakpoints(const Location& start, const Location& end,
-                                std::vector<Location>* locations) const;
-
-    /**
-     * script parameter is a wrapper v8::internal::JSObject for
-     * v8::internal::Script.
-     * This function gets v8::internal::Script from v8::internal::JSObject and
-     * wraps it with DebugInterface::Script.
-     * Returns empty local if not called with a valid wrapper of
-     * v8::internal::Script.
-     */
-    static MaybeLocal<Script> Wrap(Isolate* isolate,
-                                   v8::Local<v8::Object> script);
-
-   private:
-    int GetSourcePosition(const Location& location) const;
-  };
-
-  /**
-   * Return array of compiled scripts.
-   */
-  static void GetLoadedScripts(Isolate* isolate,
-                               PersistentValueVector<Script>& scripts);
+enum ExceptionBreakState {
+  NoBreakOnException = 0,
+  BreakOnUncaughtException = 1,
+  BreakOnAnyException = 2
 };
 
+/**
+ * Defines if VM will pause on exceptions or not.
+ * If BreakOnAnyExceptions is set then VM will pause on caught and uncaught
+ * exception, if BreakOnUncaughtException is set then VM will pause only on
+ * uncaught exception, otherwise VM won't stop on any exception.
+ */
+void ChangeBreakOnException(Isolate* isolate, ExceptionBreakState state);
+
+void SetBreakPointsActive(Isolate* isolate, bool is_active);
+
+enum StepAction {
+  StepOut = 0,   // Step out of the current function.
+  StepNext = 1,  // Step to the next statement in the current function.
+  StepIn = 2     // Step into new functions invoked or the next statement
+                 // in the current function.
+};
+
+void PrepareStep(Isolate* isolate, StepAction action);
+
+bool HasNonBlackboxedFrameOnStack(Isolate* isolate);
+
+/**
+ * Out-of-memory callback function.
+ * The function is invoked when the heap size is close to the hard limit.
+ *
+ * \param data the parameter provided during callback installation.
+ */
+typedef void (*OutOfMemoryCallback)(void* data);
+void SetOutOfMemoryCallback(Isolate* isolate, OutOfMemoryCallback callback,
+                            void* data);
+
+/**
+ * Native wrapper around v8::internal::Script object.
+ */
+class V8_EXPORT_PRIVATE Script {
+ public:
+  v8::Isolate* GetIsolate() const;
+
+  ScriptOriginOptions OriginOptions() const;
+  bool WasCompiled() const;
+  int Id() const;
+  int LineOffset() const;
+  int ColumnOffset() const;
+  std::vector<int> LineEnds() const;
+  MaybeLocal<String> Name() const;
+  MaybeLocal<String> SourceURL() const;
+  MaybeLocal<String> SourceMappingURL() const;
+  MaybeLocal<Value> ContextData() const;
+  MaybeLocal<String> Source() const;
+  bool IsWasm() const;
+  bool IsModule() const;
+  bool GetPossibleBreakpoints(const debug::Location& start,
+                              const debug::Location& end,
+                              std::vector<debug::Location>* locations) const;
+
+ private:
+  int GetSourcePosition(const debug::Location& location) const;
+};
+
+// Specialization for wasm Scripts.
+class WasmScript : public Script {
+ public:
+  static WasmScript* Cast(Script* script);
+
+  int NumFunctions() const;
+  int NumImportedFunctions() const;
+
+  std::pair<int, int> GetFunctionRange(int function_index) const;
+
+  debug::WasmDisassembly DisassembleFunction(int function_index) const;
+};
+
+void GetLoadedScripts(Isolate* isolate, PersistentValueVector<Script>& scripts);
+
+MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
+                                                 Local<String> source);
+
+class DebugDelegate {
+ public:
+  virtual ~DebugDelegate() {}
+  virtual void PromiseEventOccurred(debug::PromiseDebugActionType type, int id,
+                                    int parent_id) {}
+  virtual void ScriptCompiled(v8::Local<Script> script,
+                              bool has_compile_error) {}
+  virtual void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+                                     v8::Local<v8::Object> exec_state,
+                                     v8::Local<v8::Value> break_points_hit) {}
+  virtual void ExceptionThrown(v8::Local<v8::Context> paused_context,
+                               v8::Local<v8::Object> exec_state,
+                               v8::Local<v8::Value> exception,
+                               v8::Local<v8::Value> promise, bool is_uncaught) {
+  }
+  virtual bool IsFunctionBlackboxed(v8::Local<debug::Script> script,
+                                    const debug::Location& start,
+                                    const debug::Location& end) {
+    return false;
+  }
+};
+
+void SetDebugDelegate(Isolate* isolate, DebugDelegate* listener);
+
+void ResetBlackboxedStateCache(Isolate* isolate,
+                               v8::Local<debug::Script> script);
+
+int EstimatedValueSize(Isolate* isolate, v8::Local<v8::Value> value);
+
+v8::MaybeLocal<v8::Array> EntriesPreview(Isolate* isolate,
+                                         v8::Local<v8::Value> value,
+                                         bool* is_key_value);
+
+/**
+ * Native wrapper around v8::internal::JSGeneratorObject object.
+ */
+class GeneratorObject {
+ public:
+  v8::MaybeLocal<debug::Script> Script();
+  v8::Local<v8::Function> Function();
+  debug::Location SuspendedLocation();
+  bool IsSuspended();
+
+  static v8::Local<debug::GeneratorObject> Cast(v8::Local<v8::Value> value);
+};
+
+/*
+ * Provide API layer between inspector and code coverage.
+ */
+class V8_EXPORT_PRIVATE Coverage {
+ public:
+  class ScriptData;  // Forward declaration.
+
+  class V8_EXPORT_PRIVATE FunctionData {
+   public:
+    // 0-based line and colum numbers.
+    Location Start() { return start_; }
+    Location End() { return end_; }
+    uint32_t Count();
+    MaybeLocal<String> Name();
+
+   private:
+    FunctionData(i::CoverageFunction* function, Local<debug::Script> script);
+    i::CoverageFunction* function_;
+    Location start_;
+    Location end_;
+
+    friend class v8::debug::Coverage::ScriptData;
+  };
+
+  class V8_EXPORT_PRIVATE ScriptData {
+   public:
+    Local<debug::Script> GetScript();
+    size_t FunctionCount();
+    FunctionData GetFunctionData(size_t i);
+
+   private:
+    explicit ScriptData(i::CoverageScript* script) : script_(script) {}
+    i::CoverageScript* script_;
+
+    friend class v8::debug::Coverage;
+  };
+
+  static Coverage Collect(Isolate* isolate, bool reset_count);
+
+  static void TogglePrecise(Isolate* isolate, bool enable);
+
+  size_t ScriptCount();
+  ScriptData GetScriptData(size_t i);
+  bool IsEmpty() { return coverage_ == nullptr; }
+
+  ~Coverage();
+
+ private:
+  explicit Coverage(i::Coverage* coverage) : coverage_(coverage) {}
+  i::Coverage* coverage_;
+};
+}  // namespace debug
 }  // namespace v8
 
 #endif  // V8_DEBUG_DEBUG_INTERFACE_H_
diff --git a/src/debug/debug-scopes.cc b/src/debug/debug-scopes.cc
index c84d32a..cf957bc 100644
--- a/src/debug/debug-scopes.cc
+++ b/src/debug/debug-scopes.cc
@@ -6,13 +6,14 @@
 
 #include <memory>
 
+#include "src/ast/ast.h"
 #include "src/ast/scopes.h"
 #include "src/debug/debug.h"
 #include "src/frames-inl.h"
 #include "src/globals.h"
 #include "src/isolate-inl.h"
 #include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parsing.h"
 #include "src/parsing/rewriter.h"
 
 namespace v8 {
@@ -23,8 +24,7 @@
     : isolate_(isolate),
       frame_inspector_(frame_inspector),
       nested_scope_chain_(4),
-      seen_script_scope_(false),
-      failed_(false) {
+      seen_script_scope_(false) {
   if (!frame_inspector->GetContext()->IsContext()) {
     // Optimized frame, context or function cannot be materialized. Give up.
     return;
@@ -87,12 +87,11 @@
 
   // Reparse the code and analyze the scopes.
   // Check whether we are in global, eval or function code.
-  Zone zone(isolate->allocator(), ZONE_NAME);
   std::unique_ptr<ParseInfo> info;
   if (scope_info->scope_type() != FUNCTION_SCOPE) {
     // Global or eval code.
     Handle<Script> script(Script::cast(shared_info->script()));
-    info.reset(new ParseInfo(&zone, script));
+    info.reset(new ParseInfo(script));
     if (scope_info->scope_type() == EVAL_SCOPE) {
       info->set_eval();
       if (!function->context()->IsNativeContext()) {
@@ -108,9 +107,9 @@
     }
   } else {
     // Inner function.
-    info.reset(new ParseInfo(&zone, shared_info));
+    info.reset(new ParseInfo(shared_info));
   }
-  if (Parser::ParseStatic(info.get()) && Rewriter::Rewrite(info.get())) {
+  if (parsing::ParseAny(info.get()) && Rewriter::Rewrite(info.get())) {
     DeclarationScope* scope = info->literal()->scope();
     if (!ignore_nested_scopes || collect_non_locals) {
       CollectNonLocals(info.get(), scope);
@@ -119,26 +118,26 @@
       DeclarationScope::Analyze(info.get(), AnalyzeMode::kDebugger);
       RetrieveScopeChain(scope);
     }
-  } else if (!ignore_nested_scopes) {
+  } else {
     // A failed reparse indicates that the preparser has diverged from the
     // parser or that the preparse data given to the initial parse has been
     // faulty. We fail in debug mode but in release mode we only provide the
     // information we get from the context chain but nothing about
     // completely stack allocated scopes or stack allocated locals.
     // Or it could be due to stack overflow.
-    DCHECK(isolate_->has_pending_exception());
-    failed_ = true;
+    // Silently fail by presenting an empty context chain.
+    CHECK(isolate_->has_pending_exception());
+    isolate_->clear_pending_exception();
+    context_ = Handle<Context>();
   }
   UnwrapEvaluationContext();
 }
 
-
 ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
     : isolate_(isolate),
       frame_inspector_(NULL),
       context_(function->context()),
-      seen_script_scope_(false),
-      failed_(false) {
+      seen_script_scope_(false) {
   if (!function->shared()->IsSubjectToDebugging()) context_ = Handle<Context>();
   UnwrapEvaluationContext();
 }
@@ -148,8 +147,7 @@
     : isolate_(isolate),
       frame_inspector_(NULL),
       context_(generator->context()),
-      seen_script_scope_(false),
-      failed_(false) {
+      seen_script_scope_(false) {
   if (!generator->function()->shared()->IsSubjectToDebugging()) {
     context_ = Handle<Context>();
   }
@@ -212,7 +210,7 @@
 
 
 void ScopeIterator::Next() {
-  DCHECK(!failed_);
+  DCHECK(!Done());
   ScopeType scope_type = Type();
   if (scope_type == ScopeTypeGlobal) {
     // The global scope is always the last in the chain.
@@ -249,7 +247,7 @@
 
 // Return the type of the current scope.
 ScopeIterator::ScopeType ScopeIterator::Type() {
-  DCHECK(!failed_);
+  DCHECK(!Done());
   if (!nested_scope_chain_.is_empty()) {
     Handle<ScopeInfo> scope_info = nested_scope_chain_.last().scope_info;
     switch (scope_info->scope_type()) {
@@ -272,7 +270,7 @@
         DCHECK(!scope_info->HasContext() || context_->IsBlockContext());
         return ScopeTypeBlock;
       case EVAL_SCOPE:
-        DCHECK(!scope_info->HasContext() || context_->IsFunctionContext());
+        DCHECK(!scope_info->HasContext() || context_->IsEvalContext());
         return ScopeTypeEval;
     }
     UNREACHABLE();
@@ -283,7 +281,7 @@
     // fake it.
     return seen_script_scope_ ? ScopeTypeGlobal : ScopeTypeScript;
   }
-  if (context_->IsFunctionContext()) {
+  if (context_->IsFunctionContext() || context_->IsEvalContext()) {
     return ScopeTypeClosure;
   }
   if (context_->IsCatchContext()) {
@@ -304,7 +302,7 @@
 
 
 MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
-  DCHECK(!failed_);
+  DCHECK(!Done());
   switch (Type()) {
     case ScopeIterator::ScopeTypeGlobal:
       return Handle<JSObject>(CurrentContext()->global_proxy());
@@ -346,7 +344,7 @@
 
 bool ScopeIterator::SetVariableValue(Handle<String> variable_name,
                                      Handle<Object> new_value) {
-  DCHECK(!failed_);
+  DCHECK(!Done());
   switch (Type()) {
     case ScopeIterator::ScopeTypeGlobal:
       break;
@@ -372,20 +370,19 @@
 
 
 Handle<ScopeInfo> ScopeIterator::CurrentScopeInfo() {
-  DCHECK(!failed_);
+  DCHECK(!Done());
   if (!nested_scope_chain_.is_empty()) {
     return nested_scope_chain_.last().scope_info;
-  } else if (context_->IsBlockContext()) {
+  } else if (context_->IsBlockContext() || context_->IsFunctionContext() ||
+             context_->IsEvalContext()) {
     return Handle<ScopeInfo>(context_->scope_info());
-  } else if (context_->IsFunctionContext()) {
-    return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
   }
   return Handle<ScopeInfo>::null();
 }
 
 
 Handle<Context> ScopeIterator::CurrentContext() {
-  DCHECK(!failed_);
+  DCHECK(!Done());
   if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript ||
       nested_scope_chain_.is_empty()) {
     return context_;
@@ -402,7 +399,7 @@
 // Debug print of the content of the current scope.
 void ScopeIterator::DebugPrint() {
   OFStream os(stdout);
-  DCHECK(!failed_);
+  DCHECK(!Done());
   switch (Type()) {
     case ScopeIterator::ScopeTypeGlobal:
       os << "Global:\n";
@@ -530,7 +527,7 @@
 // context.
 Handle<JSObject> ScopeIterator::MaterializeClosure() {
   Handle<Context> context = CurrentContext();
-  DCHECK(context->IsFunctionContext());
+  DCHECK(context->IsFunctionContext() || context->IsEvalContext());
 
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
   Handle<ScopeInfo> scope_info(shared->scope_info());
@@ -728,18 +725,21 @@
 // This method copies structure of MaterializeClosure method above.
 bool ScopeIterator::SetClosureVariableValue(Handle<String> variable_name,
                                             Handle<Object> new_value) {
-  DCHECK(CurrentContext()->IsFunctionContext());
+  DCHECK(CurrentContext()->IsFunctionContext() ||
+         CurrentContext()->IsEvalContext());
   return SetContextVariableValue(CurrentScopeInfo(), CurrentContext(),
                                  variable_name, new_value);
 }
 
 bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
                                            Handle<Object> new_value) {
+  Handle<String> internalized_variable_name =
+      isolate_->factory()->InternalizeString(variable_name);
   Handle<Context> context = CurrentContext();
   Handle<ScriptContextTable> script_contexts(
       context->global_object()->native_context()->script_context_table());
   ScriptContextTable::LookupResult lookup_result;
-  if (ScriptContextTable::Lookup(script_contexts, variable_name,
+  if (ScriptContextTable::Lookup(script_contexts, internalized_variable_name,
                                  &lookup_result)) {
     Handle<Context> script_context = ScriptContextTable::GetContext(
         script_contexts, lookup_result.context_index);
@@ -838,8 +838,12 @@
                                         int position) {
   if (scope->is_function_scope()) {
     // Do not collect scopes of nested inner functions inside the current one.
+    // Nested arrow functions could have the same end positions.
     Handle<JSFunction> function = frame_inspector_->GetFunction();
-    if (scope->end_position() < function->shared()->end_position()) return;
+    if (scope->start_position() > function->shared()->start_position() &&
+        scope->end_position() <= function->shared()->end_position()) {
+      return;
+    }
   }
   if (scope->is_hidden()) {
     // We need to add this chain element in case the scope has a context
diff --git a/src/debug/debug-scopes.h b/src/debug/debug-scopes.h
index 87c85b8..d187f3e 100644
--- a/src/debug/debug-scopes.h
+++ b/src/debug/debug-scopes.h
@@ -50,12 +50,7 @@
   MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
 
   // More scopes?
-  bool Done() {
-    DCHECK(!failed_);
-    return context_.is_null();
-  }
-
-  bool Failed() { return failed_; }
+  bool Done() { return context_.is_null(); }
 
   // Move to the next scope.
   void Next();
@@ -103,7 +98,6 @@
   List<ExtendedScopeInfo> nested_scope_chain_;
   Handle<StringSet> non_locals_;
   bool seen_script_scope_;
-  bool failed_;
 
   inline JavaScriptFrame* GetFrame() {
     return frame_inspector_->GetArgumentsFrame();
diff --git a/src/debug/debug.cc b/src/debug/debug.cc
index 960327b..dd1f5cf 100644
--- a/src/debug/debug.cc
+++ b/src/debug/debug.cc
@@ -8,12 +8,14 @@
 
 #include "src/api.h"
 #include "src/arguments.h"
+#include "src/assembler-inl.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
 #include "src/compiler.h"
+#include "src/debug/debug-evaluate.h"
 #include "src/debug/liveedit.h"
 #include "src/deoptimizer.h"
 #include "src/execution.h"
@@ -28,6 +30,7 @@
 #include "src/messages.h"
 #include "src/snapshot/natives.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 
 #include "include/v8-debug.h"
 
@@ -36,19 +39,15 @@
 
 Debug::Debug(Isolate* isolate)
     : debug_context_(Handle<Context>()),
-      event_listener_(Handle<Object>()),
-      event_listener_data_(Handle<Object>()),
-      message_handler_(NULL),
-      command_received_(0),
-      command_queue_(isolate->logger(), kQueueInitialSize),
       is_active_(false),
+      hook_on_function_call_(false),
       is_suppressed_(false),
       live_edit_enabled_(true),  // TODO(yangguo): set to false by default.
       break_disabled_(false),
       break_points_active_(true),
-      in_debug_event_listener_(false),
       break_on_exception_(false),
       break_on_uncaught_exception_(false),
+      side_effect_check_failed_(false),
       debug_info_list_(NULL),
       feature_tracker_(isolate),
       isolate_(isolate) {
@@ -57,7 +56,7 @@
 
 BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
                                        JavaScriptFrame* frame) {
-  FrameSummary summary = FrameSummary::GetFirst(frame);
+  auto summary = FrameSummary::GetTop(frame).AsJavaScript();
   int offset = summary.code_offset();
   Handle<AbstractCode> abstract_code = summary.abstract_code();
   if (abstract_code->IsCode()) offset = offset - 1;
@@ -69,7 +68,7 @@
 void BreakLocation::AllAtCurrentStatement(Handle<DebugInfo> debug_info,
                                           JavaScriptFrame* frame,
                                           List<BreakLocation>* result_out) {
-  FrameSummary summary = FrameSummary::GetFirst(frame);
+  auto summary = FrameSummary::GetTop(frame).AsJavaScript();
   int offset = summary.code_offset();
   Handle<AbstractCode> abstract_code = summary.abstract_code();
   if (abstract_code->IsCode()) offset = offset - 1;
@@ -115,35 +114,32 @@
   // step to, but not actually a location where we can put a break point.
   if (abstract_code_->IsCode()) {
     DCHECK_EQ(debug_info->DebugCode(), abstract_code_->GetCode());
-    CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+    CodeBreakIterator it(debug_info);
     it.SkipToPosition(position_, BREAK_POSITION_ALIGNED);
     return it.code_offset() == code_offset_;
   } else {
     DCHECK(abstract_code_->IsBytecodeArray());
-    BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+    BytecodeArrayBreakIterator it(debug_info);
     it.SkipToPosition(position_, BREAK_POSITION_ALIGNED);
     return it.code_offset() == code_offset_;
   }
 }
 
 std::unique_ptr<BreakIterator> BreakIterator::GetIterator(
-    Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code,
-    BreakLocatorType type) {
+    Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code) {
   if (abstract_code->IsBytecodeArray()) {
     DCHECK(debug_info->HasDebugBytecodeArray());
     return std::unique_ptr<BreakIterator>(
-        new BytecodeArrayBreakIterator(debug_info, type));
+        new BytecodeArrayBreakIterator(debug_info));
   } else {
     DCHECK(abstract_code->IsCode());
     DCHECK(debug_info->HasDebugCode());
-    return std::unique_ptr<BreakIterator>(
-        new CodeBreakIterator(debug_info, type));
+    return std::unique_ptr<BreakIterator>(new CodeBreakIterator(debug_info));
   }
 }
 
-BreakIterator::BreakIterator(Handle<DebugInfo> debug_info,
-                             BreakLocatorType type)
-    : debug_info_(debug_info), break_index_(-1), break_locator_type_(type) {
+BreakIterator::BreakIterator(Handle<DebugInfo> debug_info)
+    : debug_info_(debug_info), break_index_(-1) {
   position_ = debug_info->shared()->start_position();
   statement_position_ = position_;
 }
@@ -172,10 +168,9 @@
   return closest_break;
 }
 
-CodeBreakIterator::CodeBreakIterator(Handle<DebugInfo> debug_info,
-                                     BreakLocatorType type)
-    : BreakIterator(debug_info, type),
-      reloc_iterator_(debug_info->DebugCode(), GetModeMask(type)),
+CodeBreakIterator::CodeBreakIterator(Handle<DebugInfo> debug_info)
+    : BreakIterator(debug_info),
+      reloc_iterator_(debug_info->DebugCode(), GetModeMask()),
       source_position_iterator_(
           debug_info->DebugCode()->source_position_table()) {
   // There is at least one break location.
@@ -183,17 +178,12 @@
   Next();
 }
 
-int CodeBreakIterator::GetModeMask(BreakLocatorType type) {
+int CodeBreakIterator::GetModeMask() {
   int mask = 0;
   mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_RETURN);
   mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_CALL);
-  if (isolate()->is_tail_call_elimination_enabled()) {
-    mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL);
-  }
-  if (type == ALL_BREAK_LOCATIONS) {
-    mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
-    mask |= RelocInfo::ModeMask(RelocInfo::DEBUGGER_STATEMENT);
-  }
+  mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_TAIL_CALL);
+  mask |= RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION);
   return mask;
 }
 
@@ -218,8 +208,7 @@
     source_position_iterator_.Advance();
   }
 
-  DCHECK(RelocInfo::IsDebugBreakSlot(rmode()) ||
-         RelocInfo::IsDebuggerStatement(rmode()));
+  DCHECK(RelocInfo::IsDebugBreakSlot(rmode()));
   break_index_++;
 }
 
@@ -232,8 +221,6 @@
     return isolate()->is_tail_call_elimination_enabled()
                ? DEBUG_BREAK_SLOT_AT_TAIL_CALL
                : DEBUG_BREAK_SLOT_AT_CALL;
-  } else if (RelocInfo::IsDebuggerStatement(rmode())) {
-    return DEBUGGER_STATEMENT;
   } else if (RelocInfo::IsDebugBreakSlot(rmode())) {
     return DEBUG_BREAK_SLOT;
   } else {
@@ -243,13 +230,12 @@
 
 void CodeBreakIterator::SkipToPosition(int position,
                                        BreakPositionAlignment alignment) {
-  CodeBreakIterator it(debug_info_, break_locator_type_);
+  CodeBreakIterator it(debug_info_);
   SkipTo(it.BreakIndexFromPosition(position, alignment));
 }
 
 void CodeBreakIterator::SetDebugBreak() {
   DebugBreakType debug_break_type = GetDebugBreakType();
-  if (debug_break_type == DEBUGGER_STATEMENT) return;
   DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
   Builtins* builtins = isolate()->builtins();
   Handle<Code> target = debug_break_type == DEBUG_BREAK_SLOT_AT_RETURN
@@ -259,16 +245,12 @@
 }
 
 void CodeBreakIterator::ClearDebugBreak() {
-  DebugBreakType debug_break_type = GetDebugBreakType();
-  if (debug_break_type == DEBUGGER_STATEMENT) return;
-  DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+  DCHECK(GetDebugBreakType() >= DEBUG_BREAK_SLOT);
   DebugCodegen::ClearDebugBreakSlot(isolate(), rinfo()->pc());
 }
 
 bool CodeBreakIterator::IsDebugBreak() {
-  DebugBreakType debug_break_type = GetDebugBreakType();
-  if (debug_break_type == DEBUGGER_STATEMENT) return false;
-  DCHECK(debug_break_type >= DEBUG_BREAK_SLOT);
+  DCHECK(GetDebugBreakType() >= DEBUG_BREAK_SLOT);
   return DebugCodegen::DebugBreakSlotIsPatched(rinfo()->pc());
 }
 
@@ -278,8 +260,8 @@
 }
 
 BytecodeArrayBreakIterator::BytecodeArrayBreakIterator(
-    Handle<DebugInfo> debug_info, BreakLocatorType type)
-    : BreakIterator(debug_info, type),
+    Handle<DebugInfo> debug_info)
+    : BreakIterator(debug_info),
       source_position_iterator_(
           debug_info->DebugBytecodeArray()->source_position_table()) {
   // There is at least one break location.
@@ -303,13 +285,7 @@
     DCHECK(statement_position_ >= 0);
 
     DebugBreakType type = GetDebugBreakType();
-    if (type == NOT_DEBUG_BREAK) continue;
-
-    if (break_locator_type_ == ALL_BREAK_LOCATIONS) break;
-
-    DCHECK_EQ(CALLS_AND_RETURNS, break_locator_type_);
-    if (type == DEBUG_BREAK_SLOT_AT_CALL) break;
-    if (type == DEBUG_BREAK_SLOT_AT_RETURN) break;
+    if (type != NOT_DEBUG_BREAK) break;
   }
   break_index_++;
 }
@@ -327,7 +303,7 @@
     return isolate()->is_tail_call_elimination_enabled()
                ? DEBUG_BREAK_SLOT_AT_TAIL_CALL
                : DEBUG_BREAK_SLOT_AT_CALL;
-  } else if (interpreter::Bytecodes::IsCallOrNew(bytecode)) {
+  } else if (interpreter::Bytecodes::IsCallOrConstruct(bytecode)) {
     return DEBUG_BREAK_SLOT_AT_CALL;
   } else if (source_position_iterator_.is_statement()) {
     return DEBUG_BREAK_SLOT;
@@ -338,7 +314,7 @@
 
 void BytecodeArrayBreakIterator::SkipToPosition(
     int position, BreakPositionAlignment alignment) {
-  BytecodeArrayBreakIterator it(debug_info_, break_locator_type_);
+  BytecodeArrayBreakIterator it(debug_info_);
   SkipTo(it.BreakIndexFromPosition(position, alignment));
 }
 
@@ -398,13 +374,15 @@
   thread_local_.break_frame_id_ = StackFrame::NO_ID;
   thread_local_.last_step_action_ = StepNone;
   thread_local_.last_statement_position_ = kNoSourcePosition;
-  thread_local_.last_fp_ = 0;
-  thread_local_.target_fp_ = 0;
-  thread_local_.return_value_ = Handle<Object>();
+  thread_local_.last_frame_count_ = -1;
+  thread_local_.target_frame_count_ = -1;
+  thread_local_.return_value_ = Smi::kZero;
+  thread_local_.async_task_count_ = 0;
   clear_suspended_generator();
-  // TODO(isolates): frames_are_dropped_?
+  thread_local_.restart_fp_ = nullptr;
   base::NoBarrier_Store(&thread_local_.current_debug_scope_,
                         static_cast<base::AtomicWord>(0));
+  UpdateHookOnFunctionCall();
 }
 
 
@@ -424,6 +402,7 @@
 int Debug::ArchiveSpacePerThread() { return 0; }
 
 void Debug::Iterate(ObjectVisitor* v) {
+  v->VisitPointer(&thread_local_.return_value_);
   v->VisitPointer(&thread_local_.suspended_generator_);
 }
 
@@ -453,7 +432,7 @@
 
   // Disable breakpoints and interrupts while compiling and running the
   // debugger scripts including the context creation code.
-  DisableBreak disable(this, true);
+  DisableBreak disable(this);
   PostponeInterruptsScope postpone(isolate_);
 
   // Create the debugger context.
@@ -465,7 +444,8 @@
   static const int kFirstContextSnapshotIndex = 0;
   Handle<Context> context = isolate_->bootstrapper()->CreateEnvironment(
       MaybeHandle<JSGlobalProxy>(), v8::Local<ObjectTemplate>(), &no_extensions,
-      kFirstContextSnapshotIndex, DEBUG_CONTEXT);
+      kFirstContextSnapshotIndex, v8::DeserializeInternalFieldsCallback(),
+      DEBUG_CONTEXT);
 
   // Fail if no context could be created.
   if (context.is_null()) return false;
@@ -482,6 +462,7 @@
 void Debug::Unload() {
   ClearAllBreakPoints();
   ClearStepping();
+  RemoveDebugDelegate();
 
   // Return debugger is not loaded.
   if (!is_loaded()) return;
@@ -492,8 +473,6 @@
 }
 
 void Debug::Break(JavaScriptFrame* frame) {
-  HandleScope scope(isolate_);
-
   // Initialize LiveEdit.
   LiveEdit::InitializeThreadLocal(this);
 
@@ -506,62 +485,59 @@
 
   // Postpone interrupt during breakpoint processing.
   PostponeInterruptsScope postpone(isolate_);
+  DisableBreak no_recursive_break(this);
 
-  // Get the debug info (create it if it does not exist).
+  // Return if we fail to retrieve debug info.
   Handle<JSFunction> function(frame->function());
   Handle<SharedFunctionInfo> shared(function->shared());
-  if (!EnsureDebugInfo(shared, function)) {
-    // Return if we failed to retrieve the debug info.
-    return;
-  }
+  if (!EnsureDebugInfo(shared)) return;
   Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
 
   // Find the break location where execution has stopped.
   BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
 
   // Find actual break points, if any, and trigger debug break event.
-  Handle<Object> break_points_hit = CheckBreakPoints(debug_info, &location);
-  if (!break_points_hit->IsUndefined(isolate_)) {
+  MaybeHandle<FixedArray> break_points_hit =
+      CheckBreakPoints(debug_info, &location);
+  if (!break_points_hit.is_null()) {
     // Clear all current stepping setup.
     ClearStepping();
     // Notify the debug event listeners.
-    OnDebugBreak(break_points_hit, false);
+    Handle<JSArray> jsarr = isolate_->factory()->NewJSArrayWithElements(
+        break_points_hit.ToHandleChecked());
+    OnDebugBreak(jsarr);
     return;
   }
 
   // No break point. Check for stepping.
   StepAction step_action = last_step_action();
-  Address current_fp = frame->UnpaddedFP();
-  Address target_fp = thread_local_.target_fp_;
-  Address last_fp = thread_local_.last_fp_;
+  int current_frame_count = CurrentFrameCount();
+  int target_frame_count = thread_local_.target_frame_count_;
+  int last_frame_count = thread_local_.last_frame_count_;
 
   bool step_break = false;
   switch (step_action) {
     case StepNone:
       return;
     case StepOut:
-      // Step out has not reached the target frame yet.
-      if (current_fp < target_fp) return;
+      // Step out should not break in a deeper frame than target frame.
+      if (current_frame_count > target_frame_count) return;
       step_break = true;
       break;
     case StepNext:
-      // Step next should not break in a deeper frame.
-      if (current_fp < target_fp) return;
+      // Step next should not break in a deeper frame than target frame.
+      if (current_frame_count > target_frame_count) return;
       // For step-next, a tail call is like a return and should break.
       step_break = location.IsTailCall();
     // Fall through.
     case StepIn: {
-      FrameSummary summary = FrameSummary::GetFirst(frame);
-      int offset = summary.code_offset();
+      FrameSummary summary = FrameSummary::GetTop(frame);
       step_break = step_break || location.IsReturn() ||
-                   (current_fp != last_fp) ||
-                   (thread_local_.last_statement_position_ !=
-                    summary.abstract_code()->SourceStatementPosition(offset));
+                   current_frame_count != last_frame_count ||
+                   thread_local_.last_statement_position_ !=
+                       summary.SourceStatementPosition();
       break;
     }
-    case StepFrame:
-      step_break = current_fp != last_fp;
-      break;
   }
 
   // Clear all current stepping setup.
@@ -569,7 +545,7 @@
 
   if (step_break) {
     // Notify the debug event listeners.
-    OnDebugBreak(isolate_->factory()->undefined_value(), false);
+    OnDebugBreak(isolate_->factory()->undefined_value());
   } else {
     // Re-prepare to continue.
     PrepareStep(step_action);
@@ -578,56 +554,33 @@
 
 
 // Find break point objects for this location, if any, and evaluate them.
-// Return an array of break point objects that evaluated true.
-Handle<Object> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
-                                       BreakLocation* location,
-                                       bool* has_break_points) {
-  Factory* factory = isolate_->factory();
+// Return an array of break point objects that evaluated true, or an empty
+// handle if none evaluated true.
+MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
+                                                BreakLocation* location,
+                                                bool* has_break_points) {
   bool has_break_points_to_check =
       break_points_active_ && location->HasBreakPoint(debug_info);
   if (has_break_points) *has_break_points = has_break_points_to_check;
-  if (!has_break_points_to_check) return factory->undefined_value();
+  if (!has_break_points_to_check) return {};
 
   Handle<Object> break_point_objects =
       debug_info->GetBreakPointObjects(location->position());
-  // Count the number of break points hit. If there are multiple break points
-  // they are in a FixedArray.
-  Handle<FixedArray> break_points_hit;
-  int break_points_hit_count = 0;
-  DCHECK(!break_point_objects->IsUndefined(isolate_));
-  if (break_point_objects->IsFixedArray()) {
-    Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
-    break_points_hit = factory->NewFixedArray(array->length());
-    for (int i = 0; i < array->length(); i++) {
-      Handle<Object> break_point_object(array->get(i), isolate_);
-      if (CheckBreakPoint(break_point_object)) {
-        break_points_hit->set(break_points_hit_count++, *break_point_object);
-      }
-    }
-  } else {
-    break_points_hit = factory->NewFixedArray(1);
-    if (CheckBreakPoint(break_point_objects)) {
-      break_points_hit->set(break_points_hit_count++, *break_point_objects);
-    }
-  }
-  if (break_points_hit_count == 0) return factory->undefined_value();
-  Handle<JSArray> result = factory->NewJSArrayWithElements(break_points_hit);
-  result->set_length(Smi::FromInt(break_points_hit_count));
-  return result;
+  return Debug::GetHitBreakPointObjects(break_point_objects);
 }
 
 
 bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
+  HandleScope scope(isolate_);
   // A break location is considered muted if break locations on the current
   // statement have at least one break point, and all of these break points
   // evaluate to false. Aside from not triggering a debug break event at the
   // break location, we also do not trigger one for debugger statements, nor
   // an exception event on exception at this location.
-  Object* fun = frame->function();
-  if (!fun->IsJSFunction()) return false;
-  JSFunction* function = JSFunction::cast(fun);
+  FrameSummary summary = FrameSummary::GetTop(frame);
+  DCHECK(!summary.IsWasm());
+  Handle<JSFunction> function = summary.AsJavaScript().function();
   if (!function->shared()->HasDebugInfo()) return false;
-  HandleScope scope(isolate_);
   Handle<DebugInfo> debug_info(function->shared()->GetDebugInfo());
   // Enter the debugger.
   DebugScope debug_scope(this);
@@ -637,10 +590,10 @@
   bool has_break_points_at_all = false;
   for (int i = 0; i < break_locations.length(); i++) {
     bool has_break_points;
-    Handle<Object> check_result =
+    MaybeHandle<FixedArray> check_result =
         CheckBreakPoints(debug_info, &break_locations[i], &has_break_points);
     has_break_points_at_all |= has_break_points;
-    if (has_break_points && !check_result->IsUndefined(isolate_)) return false;
+    if (has_break_points && !check_result.is_null()) return false;
   }
   return has_break_points_at_all;
 }
@@ -655,7 +608,10 @@
   Handle<JSFunction> fun = Handle<JSFunction>::cast(
       JSReceiver::GetProperty(isolate_, holder, name).ToHandleChecked());
   Handle<Object> undefined = isolate_->factory()->undefined_value();
-  return Execution::TryCall(isolate_, fun, undefined, argc, args);
+  MaybeHandle<Object> maybe_exception;
+  return Execution::TryCall(isolate_, fun, undefined, argc, args,
+                            Execution::MessageHandling::kReport,
+                            &maybe_exception);
 }
 
 
@@ -690,11 +646,7 @@
 
   // Make sure the function is compiled and has set up the debug info.
   Handle<SharedFunctionInfo> shared(function->shared());
-  if (!EnsureDebugInfo(shared, function)) {
-    // Return if retrieving debug info failed.
-    return true;
-  }
-
+  if (!EnsureDebugInfo(shared)) return true;
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
   // Source positions starts with zero.
   DCHECK(*source_position >= 0);
@@ -719,9 +671,12 @@
                                    int* source_position,
                                    BreakPositionAlignment alignment) {
   if (script->type() == Script::TYPE_WASM) {
-    // TODO(clemensh): set breakpoint for wasm.
-    return false;
+    Handle<WasmCompiledModule> compiled_module(
+        WasmCompiledModule::cast(script->wasm_compiled_module()), isolate_);
+    return WasmCompiledModule::SetBreakPoint(compiled_module, source_position,
+                                             break_point_object);
   }
+
   HandleScope scope(isolate_);
 
   // Obtain shared function info for the function.
@@ -731,10 +686,7 @@
 
   // Make sure the function has set up the debug info.
   Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(result);
-  if (!EnsureDebugInfo(shared, Handle<JSFunction>::null())) {
-    // Return if retrieving debug info failed.
-    return false;
-  }
+  if (!EnsureDebugInfo(shared)) return false;
 
   // Find position within function. The script position might be before the
   // source position of the first function.
@@ -764,13 +716,13 @@
   int statement_position;
   int position;
   if (debug_info->HasDebugCode()) {
-    CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+    CodeBreakIterator it(debug_info);
     it.SkipToPosition(source_position, alignment);
     statement_position = it.statement_position();
     position = it.position();
   } else {
     DCHECK(debug_info->HasDebugBytecodeArray());
-    BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+    BytecodeArrayBreakIterator it(debug_info);
     it.SkipToPosition(source_position, alignment);
     statement_position = it.statement_position();
     position = it.position();
@@ -787,12 +739,12 @@
     BreakPointInfo* info = BreakPointInfo::cast(break_points->get(i));
     if (info->GetBreakPointCount() == 0) continue;
     if (debug_info->HasDebugCode()) {
-      CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+      CodeBreakIterator it(debug_info);
       it.SkipToPosition(info->source_position(), BREAK_POSITION_ALIGNED);
       it.SetDebugBreak();
     }
     if (debug_info->HasDebugBytecodeArray()) {
-      BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+      BytecodeArrayBreakIterator it(debug_info);
       it.SkipToPosition(info->source_position(), BREAK_POSITION_ALIGNED);
       it.SetDebugBreak();
     }
@@ -802,14 +754,12 @@
 void Debug::ClearBreakPoints(Handle<DebugInfo> debug_info) {
   DisallowHeapAllocation no_gc;
   if (debug_info->HasDebugCode()) {
-    for (CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS); !it.Done();
-         it.Next()) {
+    for (CodeBreakIterator it(debug_info); !it.Done(); it.Next()) {
       it.ClearDebugBreak();
     }
   }
   if (debug_info->HasDebugBytecodeArray()) {
-    for (BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
-         !it.Done(); it.Next()) {
+    for (BytecodeArrayBreakIterator it(debug_info); !it.Done(); it.Next()) {
       it.ClearDebugBreak();
     }
   }
@@ -850,35 +800,19 @@
   }
 }
 
-void Debug::FloodWithOneShot(Handle<JSFunction> function,
-                             BreakLocatorType type) {
-  // Debug utility functions are not subject to debugging.
-  if (function->native_context() == *debug_context()) return;
-
-  if (!function->shared()->IsSubjectToDebugging()) {
-    // Builtin functions are not subject to stepping, but need to be
-    // deoptimized, because optimized code does not check for debug
-    // step in at call sites.
-    Deoptimizer::DeoptimizeFunction(*function);
-    return;
-  }
+void Debug::FloodWithOneShot(Handle<SharedFunctionInfo> shared) {
+  if (!shared->IsSubjectToDebugging() || IsBlackboxed(shared)) return;
   // Make sure the function is compiled and has set up the debug info.
-  Handle<SharedFunctionInfo> shared(function->shared());
-  if (!EnsureDebugInfo(shared, function)) {
-    // Return if we failed to retrieve the debug info.
-    return;
-  }
-
-  // Flood the function with break points.
+  if (!EnsureDebugInfo(shared)) return;
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
+  // Flood the function with break points.
   if (debug_info->HasDebugCode()) {
-    for (CodeBreakIterator it(debug_info, type); !it.Done(); it.Next()) {
+    for (CodeBreakIterator it(debug_info); !it.Done(); it.Next()) {
       it.SetDebugBreak();
     }
   }
   if (debug_info->HasDebugBytecodeArray()) {
-    for (BytecodeArrayBreakIterator it(debug_info, type); !it.Done();
-         it.Next()) {
+    for (BytecodeArrayBreakIterator it(debug_info); !it.Done(); it.Next()) {
       it.SetDebugBreak();
     }
   }
@@ -901,57 +835,123 @@
   }
 }
 
+MaybeHandle<FixedArray> Debug::GetHitBreakPointObjects(
+    Handle<Object> break_point_objects) {
+  DCHECK(!break_point_objects->IsUndefined(isolate_));
+  if (!break_point_objects->IsFixedArray()) {
+    if (!CheckBreakPoint(break_point_objects)) return {};
+    Handle<FixedArray> break_points_hit = isolate_->factory()->NewFixedArray(1);
+    break_points_hit->set(0, *break_point_objects);
+    return break_points_hit;
+  }
+
+  Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
+  int num_objects = array->length();
+  Handle<FixedArray> break_points_hit =
+      isolate_->factory()->NewFixedArray(num_objects);
+  int break_points_hit_count = 0;
+  for (int i = 0; i < num_objects; ++i) {
+    Handle<Object> break_point_object(array->get(i), isolate_);
+    if (CheckBreakPoint(break_point_object)) {
+      break_points_hit->set(break_points_hit_count++, *break_point_object);
+    }
+  }
+  if (break_points_hit_count == 0) return {};
+  break_points_hit->Shrink(break_points_hit_count);
+  return break_points_hit;
+}
 
 void Debug::PrepareStepIn(Handle<JSFunction> function) {
   CHECK(last_step_action() >= StepIn);
-  if (!is_active()) return;
+  if (ignore_events()) return;
   if (in_debug_scope()) return;
-  FloodWithOneShot(function);
+  if (break_disabled()) return;
+  FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared(), isolate_));
 }
 
 void Debug::PrepareStepInSuspendedGenerator() {
   CHECK(has_suspended_generator());
-  if (!is_active()) return;
+  if (ignore_events()) return;
   if (in_debug_scope()) return;
+  if (break_disabled()) return;
   thread_local_.last_step_action_ = StepIn;
+  UpdateHookOnFunctionCall();
   Handle<JSFunction> function(
       JSGeneratorObject::cast(thread_local_.suspended_generator_)->function());
-  FloodWithOneShot(function);
+  FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared(), isolate_));
   clear_suspended_generator();
 }
 
 void Debug::PrepareStepOnThrow() {
-  if (!is_active()) return;
   if (last_step_action() == StepNone) return;
+  if (ignore_events()) return;
   if (in_debug_scope()) return;
+  if (break_disabled()) return;
 
   ClearOneShot();
 
+  int current_frame_count = CurrentFrameCount();
+
   // Iterate through the JavaScript stack looking for handlers.
   JavaScriptFrameIterator it(isolate_);
   while (!it.done()) {
     JavaScriptFrame* frame = it.frame();
     if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) break;
+    List<SharedFunctionInfo*> infos;
+    frame->GetFunctions(&infos);
+    current_frame_count -= infos.length();
     it.Advance();
   }
 
-  if (last_step_action() == StepNext || last_step_action() == StepOut) {
-    while (!it.done()) {
-      Address current_fp = it.frame()->UnpaddedFP();
-      if (current_fp >= thread_local_.target_fp_) break;
-      it.Advance();
+  // No handler found. Nothing to instrument.
+  if (it.done()) return;
+
+  bool found_handler = false;
+  // Iterate frames, including inlined frames. First, find the handler frame.
+  // Then skip to the frame we want to break in, then instrument for stepping.
+  for (; !it.done(); it.Advance()) {
+    JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
+    if (last_step_action() == StepIn) {
+      // Deoptimize frame to ensure calls are checked for step-in.
+      Deoptimizer::DeoptimizeFunction(frame->function());
+    }
+    List<FrameSummary> summaries;
+    frame->Summarize(&summaries);
+    for (int i = summaries.length() - 1; i >= 0; i--, current_frame_count--) {
+      if (!found_handler) {
+        // We have yet to find the handler. If the frame inlines multiple
+        // functions, we have to check each one for the handler.
+        // If it only contains one function, we already found the handler.
+        if (summaries.length() > 1) {
+          Handle<AbstractCode> code =
+              summaries[i].AsJavaScript().abstract_code();
+          CHECK_EQ(AbstractCode::INTERPRETED_FUNCTION, code->kind());
+          BytecodeArray* bytecode = code->GetBytecodeArray();
+          HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+          int code_offset = summaries[i].code_offset();
+          HandlerTable::CatchPrediction prediction;
+          int index = table->LookupRange(code_offset, nullptr, &prediction);
+          if (index > 0) found_handler = true;
+        } else {
+          found_handler = true;
+        }
+      }
+
+      if (found_handler) {
+        // We found the handler. If we are stepping next or out, we need to
+        // iterate until we found the suitable target frame to break in.
+        if ((last_step_action() == StepNext || last_step_action() == StepOut) &&
+            current_frame_count > thread_local_.target_frame_count_) {
+          continue;
+        }
+        Handle<SharedFunctionInfo> info(
+            summaries[i].AsJavaScript().function()->shared());
+        if (!info->IsSubjectToDebugging() || IsBlackboxed(info)) continue;
+        FloodWithOneShot(info);
+        return;
+      }
     }
   }
-
-  // Find the closest Javascript frame we can flood with one-shots.
-  while (!it.done() &&
-         !it.frame()->function()->shared()->IsSubjectToDebugging()) {
-    it.Advance();
-  }
-
-  if (it.done()) return;  // No suitable Javascript catch handler.
-
-  FloodWithOneShot(Handle<JSFunction>(it.frame()->function()));
 }
 
 
@@ -968,46 +968,47 @@
   // If there is no JavaScript stack don't do anything.
   if (frame_id == StackFrame::NO_ID) return;
 
-  JavaScriptFrameIterator frames_it(isolate_, frame_id);
-  JavaScriptFrame* frame = frames_it.frame();
-
   feature_tracker()->Track(DebugFeatureTracker::kStepping);
 
   thread_local_.last_step_action_ = step_action;
+  UpdateHookOnFunctionCall();
 
-  // If the function on the top frame is unresolved perform step out. This will
-  // be the case when calling unknown function and having the debugger stopped
-  // in an unhandled exception.
-  if (!frame->function()->IsJSFunction()) {
-    // Step out: Find the calling JavaScript frame and flood it with
-    // breakpoints.
-    frames_it.Advance();
-    // Fill the function to return to with one-shot break points.
-    JSFunction* function = frames_it.frame()->function();
-    FloodWithOneShot(Handle<JSFunction>(function));
+  StackTraceFrameIterator frames_it(isolate_, frame_id);
+  StandardFrame* frame = frames_it.frame();
+
+  // Handle stepping in wasm functions via the wasm interpreter.
+  if (frame->is_wasm()) {
+    // If the top frame is compiled, we cannot step.
+    if (frame->is_wasm_compiled()) return;
+    WasmInterpreterEntryFrame* wasm_frame =
+        WasmInterpreterEntryFrame::cast(frame);
+    wasm_frame->wasm_instance()->debug_info()->PrepareStep(step_action);
     return;
   }
 
+  JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
+  DCHECK(js_frame->function()->IsJSFunction());
+
   // Get the debug info (create it if it does not exist).
-  FrameSummary summary = FrameSummary::GetFirst(frame);
+  auto summary = FrameSummary::GetTop(frame).AsJavaScript();
   Handle<JSFunction> function(summary.function());
   Handle<SharedFunctionInfo> shared(function->shared());
-  if (!EnsureDebugInfo(shared, function)) {
-    // Return if ensuring debug info failed.
-    return;
-  }
-
+  if (!EnsureDebugInfo(shared)) return;
   Handle<DebugInfo> debug_info(shared->GetDebugInfo());
-  BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
+
+  BreakLocation location = BreakLocation::FromFrame(debug_info, js_frame);
 
   // Any step at a return is a step-out.
   if (location.IsReturn()) step_action = StepOut;
   // A step-next at a tail call is a step-out.
   if (location.IsTailCall() && step_action == StepNext) step_action = StepOut;
+  // A step-next in blackboxed function is a step-out.
+  if (step_action == StepNext && IsBlackboxed(shared)) step_action = StepOut;
 
   thread_local_.last_statement_position_ =
       summary.abstract_code()->SourceStatementPosition(summary.code_offset());
-  thread_local_.last_fp_ = frame->UnpaddedFP();
+  int current_frame_count = CurrentFrameCount();
+  thread_local_.last_frame_count_ = current_frame_count;
   // No longer perform the current async step.
   clear_suspended_generator();
 
@@ -1015,38 +1016,45 @@
     case StepNone:
       UNREACHABLE();
       break;
-    case StepOut:
-      // Advance to caller frame.
-      frames_it.Advance();
-      // Skip native and extension functions on the stack.
-      while (!frames_it.done() &&
-             !frames_it.frame()->function()->shared()->IsSubjectToDebugging()) {
-        // Builtin functions are not subject to stepping, but need to be
-        // deoptimized to include checks for step-in at call sites.
-        Deoptimizer::DeoptimizeFunction(frames_it.frame()->function());
-        frames_it.Advance();
-      }
-      if (!frames_it.done()) {
-        // Fill the caller function to return to with one-shot break points.
-        Handle<JSFunction> caller_function(frames_it.frame()->function());
-        FloodWithOneShot(caller_function);
-        thread_local_.target_fp_ = frames_it.frame()->UnpaddedFP();
-      }
+    case StepOut: {
       // Clear last position info. For stepping out it does not matter.
       thread_local_.last_statement_position_ = kNoSourcePosition;
-      thread_local_.last_fp_ = 0;
+      thread_local_.last_frame_count_ = -1;
+      // Skip the current frame, find the first frame we want to step out to
+      // and deoptimize every frame along the way.
+      bool in_current_frame = true;
+      for (; !frames_it.done(); frames_it.Advance()) {
+        // TODO(clemensh): Implement stepping out from JS to WASM.
+        if (frames_it.frame()->is_wasm()) continue;
+        JavaScriptFrame* frame = JavaScriptFrame::cast(frames_it.frame());
+        if (last_step_action() == StepIn) {
+          // Deoptimize frame to ensure calls are checked for step-in.
+          Deoptimizer::DeoptimizeFunction(frame->function());
+        }
+        HandleScope scope(isolate_);
+        List<Handle<SharedFunctionInfo>> infos;
+        frame->GetFunctions(&infos);
+        for (; !infos.is_empty(); current_frame_count--) {
+          Handle<SharedFunctionInfo> info = infos.RemoveLast();
+          if (in_current_frame) {
+            // We want to skip out, so skip the current frame.
+            in_current_frame = false;
+            continue;
+          }
+          if (!info->IsSubjectToDebugging() || IsBlackboxed(info)) continue;
+          FloodWithOneShot(info);
+          thread_local_.target_frame_count_ = current_frame_count;
+          return;
+        }
+      }
       break;
+    }
     case StepNext:
-      thread_local_.target_fp_ = frame->UnpaddedFP();
-      FloodWithOneShot(function);
-      break;
+      thread_local_.target_frame_count_ = current_frame_count;
+    // Fall through.
     case StepIn:
-      FloodWithOneShot(function);
-      break;
-    case StepFrame:
-      // No point in setting one-shot breaks at places where we are not about
-      // to leave the current frame.
-      FloodWithOneShot(function, CALLS_AND_RETURNS);
+      // TODO(clemensh): Implement stepping from JS into WASM.
+      FloodWithOneShot(shared);
       break;
   }
 }
@@ -1075,13 +1083,13 @@
       Smi* position = NULL;
       if (position_alignment == STATEMENT_ALIGNED) {
         if (debug_info->HasDebugCode()) {
-          CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+          CodeBreakIterator it(debug_info);
           it.SkipToPosition(break_point_info->source_position(),
                             BREAK_POSITION_ALIGNED);
           position = Smi::FromInt(it.statement_position());
         } else {
           DCHECK(debug_info->HasDebugBytecodeArray());
-          BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+          BytecodeArrayBreakIterator it(debug_info);
           it.SkipToPosition(break_point_info->source_position(),
                             BREAK_POSITION_ALIGNED);
           position = Smi::FromInt(it.statement_position());
@@ -1102,8 +1110,9 @@
 
   thread_local_.last_step_action_ = StepNone;
   thread_local_.last_statement_position_ = kNoSourcePosition;
-  thread_local_.last_fp_ = 0;
-  thread_local_.target_fp_ = 0;
+  thread_local_.last_frame_count_ = -1;
+  thread_local_.target_frame_count_ = -1;
+  UpdateHookOnFunctionCall();
 }
 
 
@@ -1176,34 +1185,6 @@
 }
 
 
-// Count the number of continuations at which the current pc offset is at.
-static int ComputeContinuationIndexFromPcOffset(Code* code, int pc_offset) {
-  DCHECK_EQ(code->kind(), Code::FUNCTION);
-  Address pc = code->instruction_start() + pc_offset;
-  int mask = RelocInfo::ModeMask(RelocInfo::GENERATOR_CONTINUATION);
-  int index = 0;
-  for (RelocIterator it(code, mask); !it.done(); it.next()) {
-    index++;
-    RelocInfo* rinfo = it.rinfo();
-    Address current_pc = rinfo->pc();
-    if (current_pc == pc) break;
-    DCHECK(current_pc < pc);
-  }
-  return index;
-}
-
-
-// Find the pc offset for the given continuation index.
-static int ComputePcOffsetFromContinuationIndex(Code* code, int index) {
-  DCHECK_EQ(code->kind(), Code::FUNCTION);
-  DCHECK(code->has_debug_break_slots());
-  int mask = RelocInfo::ModeMask(RelocInfo::GENERATOR_CONTINUATION);
-  RelocIterator it(code, mask);
-  for (int i = 1; i < index; i++) it.next();
-  return static_cast<int>(it.rinfo()->pc() - code->instruction_start());
-}
-
-
 class RedirectActiveFunctions : public ThreadVisitor {
  public:
   explicit RedirectActiveFunctions(SharedFunctionInfo* shared)
@@ -1268,18 +1249,20 @@
   }
 
   List<Handle<JSFunction> > functions;
-  List<Handle<JSGeneratorObject> > suspended_generators;
 
   // Flush all optimized code maps. Note that the below heap iteration does not
   // cover this, because the given function might have been inlined into code
   // for which no JSFunction exists.
   {
-    SharedFunctionInfo::Iterator iterator(isolate_);
+    SharedFunctionInfo::GlobalIterator iterator(isolate_);
     while (SharedFunctionInfo* shared = iterator.Next()) {
       shared->ClearCodeFromOptimizedCodeMap();
     }
   }
 
+  // The native context also has a list of OSR'd optimized code. Clear it.
+  isolate_->ClearOSROptimizedCode();
+
   // Make sure we abort incremental marking.
   isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                                       GarbageCollectionReason::kDebugger);
@@ -1293,9 +1276,6 @@
     // smarter here and avoid the heap walk.
     HeapIterator iterator(isolate_->heap());
     HeapObject* obj;
-    // Continuation from old-style generators need to be recomputed.
-    bool find_resumables =
-        baseline_exists && IsResumableFunction(shared->kind());
 
     while ((obj = iterator.next())) {
       if (obj->IsJSFunction()) {
@@ -1307,25 +1287,12 @@
         if (baseline_exists && function->shared() == *shared) {
           functions.Add(handle(function));
         }
-      } else if (find_resumables && obj->IsJSGeneratorObject()) {
-        // This case handles async functions as well, as they use generator
-        // objects for in-progress async function execution.
-        JSGeneratorObject* generator_obj = JSGeneratorObject::cast(obj);
-        if (!generator_obj->is_suspended()) continue;
-        JSFunction* function = generator_obj->function();
-        if (!function->Inlines(*shared)) continue;
-        int pc_offset = generator_obj->continuation();
-        int index =
-            ComputeContinuationIndexFromPcOffset(function->code(), pc_offset);
-        generator_obj->set_continuation(index);
-        suspended_generators.Add(handle(generator_obj));
       }
     }
   }
 
   // We do not need to replace code to debug bytecode.
   DCHECK(baseline_exists || functions.is_empty());
-  DCHECK(baseline_exists || suspended_generators.is_empty());
 
   // We do not need to recompile to debug bytecode.
   if (baseline_exists && !shared->code()->has_debug_break_slots()) {
@@ -1337,12 +1304,6 @@
     JSFunction::EnsureLiterals(function);
   }
 
-  for (Handle<JSGeneratorObject> const generator_obj : suspended_generators) {
-    int index = generator_obj->continuation();
-    int pc_offset = ComputePcOffsetFromContinuationIndex(shared->code(), index);
-    generator_obj->set_continuation(pc_offset);
-  }
-
   // Update PCs on the stack to point to recompiled code.
   RedirectActiveFunctions redirect_visitor(*shared);
   redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
@@ -1369,12 +1330,12 @@
                             int end_position, BreakPositionAlignment alignment,
                             std::set<int>* positions) {
   if (debug_info->HasDebugCode()) {
-    CodeBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+    CodeBreakIterator it(debug_info);
     GetBreakablePositions(&it, start_position, end_position, alignment,
                           positions);
   } else {
     DCHECK(debug_info->HasDebugBytecodeArray());
-    BytecodeArrayBreakIterator it(debug_info, ALL_BREAK_LOCATIONS);
+    BytecodeArrayBreakIterator it(debug_info);
     GetBreakablePositions(&it, start_position, end_position, alignment,
                           positions);
   }
@@ -1384,24 +1345,18 @@
 bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
                                    int end_position, std::set<int>* positions) {
   while (true) {
-    if (!script->shared_function_infos()->IsWeakFixedArray()) return false;
-
-    WeakFixedArray* infos =
-        WeakFixedArray::cast(script->shared_function_infos());
     HandleScope scope(isolate_);
     List<Handle<SharedFunctionInfo>> candidates;
-    {
-      WeakFixedArray::Iterator iterator(infos);
-      SharedFunctionInfo* info;
-      while ((info = iterator.Next<SharedFunctionInfo>())) {
-        if (info->end_position() < start_position ||
-            info->start_position() >= end_position) {
-          continue;
-        }
-        if (!info->IsSubjectToDebugging()) continue;
-        if (!info->HasDebugCode() && !info->allows_lazy_compilation()) continue;
-        candidates.Add(i::handle(info));
+    SharedFunctionInfo::ScriptIterator iterator(script);
+    for (SharedFunctionInfo* info = iterator.Next(); info != nullptr;
+         info = iterator.Next()) {
+      if (info->end_position() < start_position ||
+          info->start_position() >= end_position) {
+        continue;
       }
+      if (!info->IsSubjectToDebugging()) continue;
+      if (!info->HasDebugCode() && !info->allows_lazy_compilation()) continue;
+      candidates.Add(i::handle(info));
     }
 
     bool was_compiled = false;
@@ -1415,8 +1370,7 @@
           was_compiled = true;
         }
       }
-      if (!EnsureDebugInfo(candidates[i], Handle<JSFunction>::null()))
-        return false;
+      if (!EnsureDebugInfo(candidates[i])) return false;
     }
     if (was_compiled) continue;
 
@@ -1424,7 +1378,7 @@
       CHECK(candidates[i]->HasDebugInfo());
       Handle<DebugInfo> debug_info(candidates[i]->GetDebugInfo());
       FindBreakablePositions(debug_info, start_position, end_position,
-                             STATEMENT_ALIGNED, positions);
+                             BREAK_POSITION_ALIGNED, positions);
     }
     return true;
   }
@@ -1432,9 +1386,14 @@
   return false;
 }
 
-void Debug::RecordAsyncFunction(Handle<JSGeneratorObject> generator_object) {
+void Debug::RecordGenerator(Handle<JSGeneratorObject> generator_object) {
   if (last_step_action() <= StepOut) return;
-  if (!IsAsyncFunction(generator_object->function()->shared()->kind())) return;
+
+  if (last_step_action() == StepNext) {
+    // Only consider this generator a step-next target if not stepping in.
+    if (thread_local_.target_frame_count_ < CurrentFrameCount()) return;
+  }
+
   DCHECK(!has_suspended_generator());
   thread_local_.suspended_generator_ = *generator_object;
   ClearStepping();
@@ -1504,15 +1463,14 @@
     // find the inner most function containing this position.
     // If there is no shared function info for this script at all, there is
     // no point in looking for it by walking the heap.
-    if (!script->shared_function_infos()->IsWeakFixedArray()) break;
 
     SharedFunctionInfo* shared;
     {
       SharedFunctionInfoFinder finder(position);
-      WeakFixedArray::Iterator iterator(script->shared_function_infos());
-      SharedFunctionInfo* candidate;
-      while ((candidate = iterator.Next<SharedFunctionInfo>())) {
-        finder.NewCandidate(candidate);
+      SharedFunctionInfo::ScriptIterator iterator(script);
+      for (SharedFunctionInfo* info = iterator.Next(); info != nullptr;
+           info = iterator.Next()) {
+        finder.NewCandidate(info);
       }
       shared = finder.Result();
       if (shared == NULL) break;
@@ -1541,16 +1499,11 @@
 
 
 // Ensures the debug information is present for shared.
-bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
-                            Handle<JSFunction> function) {
-  if (!shared->IsSubjectToDebugging()) return false;
-
+bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared) {
   // Return if we already have the debug info for shared.
   if (shared->HasDebugInfo()) return true;
-
-  if (function.is_null()) {
-    DCHECK(shared->HasDebugCode());
-  } else if (!Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
+  if (!shared->IsSubjectToDebugging()) return false;
+  if (!shared->is_compiled() && !Compiler::CompileDebugCode(shared)) {
     return false;
   }
 
@@ -1590,8 +1543,8 @@
       } else {
         prev->set_next(current->next());
       }
+      shared->set_debug_info(Smi::FromInt(debug_info->debugger_hints()));
       delete current;
-      shared->set_debug_info(DebugInfo::uninitialized());
       return;
     }
     // Move to next in list.
@@ -1602,14 +1555,6 @@
   UNREACHABLE();
 }
 
-void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
-  after_break_target_ = NULL;
-  if (!LiveEdit::SetAfterBreakTarget(this)) {
-    // Continue just after the slot.
-    after_break_target_ = frame->pc();
-  }
-}
-
 bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
   HandleScope scope(isolate_);
 
@@ -1625,12 +1570,25 @@
   return location.IsReturn() || location.IsTailCall();
 }
 
-void Debug::FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
-                                  LiveEditFrameDropMode mode) {
-  if (mode != LIVE_EDIT_CURRENTLY_SET_MODE) {
-    thread_local_.frame_drop_mode_ = mode;
+void Debug::ScheduleFrameRestart(StackFrame* frame) {
+  // Set a target FP for the FrameDropperTrampoline builtin to drop to once
+  // we return from the debugger.
+  DCHECK(frame->is_java_script());
+  // Only reschedule to a frame further below a frame we already scheduled for.
+  if (frame->fp() <= thread_local_.restart_fp_) return;
+  // If the frame is optimized, trigger a deopt and jump into the
+  // FrameDropperTrampoline in the deoptimizer.
+  thread_local_.restart_fp_ = frame->fp();
+
+  // Reset break frame ID to the frame below the restarted frame.
+  StackTraceFrameIterator it(isolate_);
+  thread_local_.break_frame_id_ = StackFrame::NO_ID;
+  for (StackTraceFrameIterator it(isolate_); !it.done(); it.Advance()) {
+    if (it.frame()->fp() > thread_local_.restart_fp_) {
+      thread_local_.break_frame_id_ = it.frame()->id();
+      return;
+    }
   }
-  thread_local_.break_frame_id_ = new_break_frame_id;
 }
 
 
@@ -1639,13 +1597,6 @@
 }
 
 
-void Debug::ClearMirrorCache() {
-  PostponeInterruptsScope postpone(isolate_);
-  HandleScope scope(isolate_);
-  CallFunction("ClearMirrorCache", 0, NULL);
-}
-
-
 Handle<FixedArray> Debug::GetLoadedScripts() {
   isolate_->heap()->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
                                       GarbageCollectionReason::kDebugger);
@@ -1705,19 +1656,17 @@
   return CallFunction("MakeCompileEvent", arraysize(argv), argv);
 }
 
-MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<String> type,
-                                              Handle<Object> id,
-                                              Handle<String> name) {
-  DCHECK(id->IsNumber());
+MaybeHandle<Object> Debug::MakeAsyncTaskEvent(
+    v8::debug::PromiseDebugActionType type, int id) {
   // Create the async task event object.
-  Handle<Object> argv[] = {type, id, name};
+  Handle<Object> argv[] = {Handle<Smi>(Smi::FromInt(type), isolate_),
+                           Handle<Smi>(Smi::FromInt(id), isolate_)};
   return CallFunction("MakeAsyncTaskEvent", arraysize(argv), argv);
 }
 
 
 void Debug::OnThrow(Handle<Object> exception) {
   if (in_debug_scope() || ignore_events()) return;
-  PrepareStepOnThrow();
   // Temporarily clear any scheduled_exception to allow evaluating
   // JavaScript from the debug event handler.
   HandleScope scope(isolate_);
@@ -1730,6 +1679,7 @@
   if (!scheduled_exception.is_null()) {
     isolate_->thread_local_top()->scheduled_exception_ = *scheduled_exception;
   }
+  PrepareStepOnThrow();
 }
 
 void Debug::OnPromiseReject(Handle<Object> promise, Handle<Object> value) {
@@ -1744,6 +1694,44 @@
   }
 }
 
+namespace {
+v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
+  Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
+  // Isolate::context() may have been NULL when "script collected" event
+  // occured.
+  if (context.is_null()) return v8::Local<v8::Context>();
+  Handle<Context> native_context(context->native_context());
+  return v8::Utils::ToLocal(native_context);
+}
+}  // anonymous namespace
+
+bool Debug::IsExceptionBlackboxed(bool uncaught) {
+  JavaScriptFrameIterator it(isolate_);
+  if (it.done()) return false;
+  // Uncaught exception is blackboxed if all current frames are blackboxed,
+  // caught exception if top frame is blackboxed.
+  bool is_top_frame_blackboxed = IsFrameBlackboxed(it.frame());
+  if (!uncaught || !is_top_frame_blackboxed) return is_top_frame_blackboxed;
+  it.Advance();
+  while (!it.done()) {
+    if (!IsFrameBlackboxed(it.frame())) return false;
+    it.Advance();
+  }
+  return true;
+}
+
+bool Debug::IsFrameBlackboxed(JavaScriptFrame* frame) {
+  HandleScope scope(isolate_);
+  if (!frame->HasInlinedFrames()) {
+    Handle<SharedFunctionInfo> shared(frame->function()->shared(), isolate_);
+    return IsBlackboxed(shared);
+  }
+  List<Handle<SharedFunctionInfo>> infos;
+  frame->GetFunctions(&infos);
+  for (const auto& info : infos)
+    if (!IsBlackboxed(info)) return false;
+  return true;
+}
 
 void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
   // We cannot generate debug events when JS execution is disallowed.
@@ -1765,6 +1753,9 @@
     // Check whether the promise reject is considered an uncaught exception.
     uncaught = !isolate_->PromiseHasUserDefinedRejectHandler(jspromise);
   }
+
+  if (!debug_delegate_) return;
+
   // Bail out if exception breaks are not active
   if (uncaught) {
     // Uncaught exceptions are reported by either flags.
@@ -1775,29 +1766,33 @@
   }
 
   {
-    // Check whether the break location is muted.
     JavaScriptFrameIterator it(isolate_);
-    if (!it.done() && IsMutedAtCurrentLocation(it.frame())) return;
+    // Check whether the top frame is blackboxed or the break location is muted.
+    if (!it.done() && (IsMutedAtCurrentLocation(it.frame()) ||
+                       IsExceptionBlackboxed(uncaught))) {
+      return;
+    }
+    if (it.done()) return;  // Do not trigger an event with an empty stack.
   }
 
   DebugScope debug_scope(this);
   if (debug_scope.failed()) return;
+  HandleScope scope(isolate_);
+  PostponeInterruptsScope postpone(isolate_);
+  DisableBreak no_recursive_break(this);
 
-  // Create the event data object.
-  Handle<Object> event_data;
+  // Create the execution state.
+  Handle<Object> exec_state;
   // Bail out and don't call debugger if exception.
-  if (!MakeExceptionEvent(
-          exception, uncaught, promise).ToHandle(&event_data)) {
-    return;
-  }
+  if (!MakeExecutionState().ToHandle(&exec_state)) return;
 
-  // Process debug event.
-  ProcessDebugEvent(v8::Exception, Handle<JSObject>::cast(event_data), false);
-  // Return to continue execution from where the exception was thrown.
+  debug_delegate_->ExceptionThrown(
+      GetDebugEventContext(isolate_),
+      v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
+      v8::Utils::ToLocal(exception), v8::Utils::ToLocal(promise), uncaught);
 }
 
-
-void Debug::OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue) {
+void Debug::OnDebugBreak(Handle<Object> break_points_hit) {
   // The caller provided for DebugScope.
   AssertDebugContext();
   // Bail out if there is no listener for this event
@@ -1807,16 +1802,20 @@
   PrintBreakLocation();
 #endif  // DEBUG
 
+  if (!debug_delegate_) return;
   HandleScope scope(isolate_);
-  // Create the event data object.
-  Handle<Object> event_data;
-  // Bail out and don't call debugger if exception.
-  if (!MakeBreakEvent(break_points_hit).ToHandle(&event_data)) return;
+  PostponeInterruptsScope no_interrupts(isolate_);
+  DisableBreak no_recursive_break(this);
 
-  // Process debug event.
-  ProcessDebugEvent(v8::Break,
-                    Handle<JSObject>::cast(event_data),
-                    auto_continue);
+  // Create the execution state.
+  Handle<Object> exec_state;
+  // Bail out and don't call debugger if exception.
+  if (!MakeExecutionState().ToHandle(&exec_state)) return;
+
+  debug_delegate_->BreakProgramRequested(
+      GetDebugEventContext(isolate_),
+      v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
+      v8::Utils::ToLocal(break_points_hit));
 }
 
 
@@ -1825,139 +1824,182 @@
 }
 
 
-void Debug::OnBeforeCompile(Handle<Script> script) {
-  ProcessCompileEvent(v8::BeforeCompile, script);
-}
-
-
 // Handle debugger actions when a new script is compiled.
 void Debug::OnAfterCompile(Handle<Script> script) {
   ProcessCompileEvent(v8::AfterCompile, script);
 }
 
-void Debug::OnAsyncTaskEvent(Handle<String> type, Handle<Object> id,
-                             Handle<String> name) {
-  DCHECK(id->IsNumber());
+namespace {
+struct CollectedCallbackData {
+  Object** location;
+  int id;
+  Debug* debug;
+  Isolate* isolate;
+
+  CollectedCallbackData(Object** location, int id, Debug* debug,
+                        Isolate* isolate)
+      : location(location), id(id), debug(debug), isolate(isolate) {}
+};
+
+void SendAsyncTaskEventCancel(const v8::WeakCallbackInfo<void>& info) {
+  std::unique_ptr<CollectedCallbackData> data(
+      reinterpret_cast<CollectedCallbackData*>(info.GetParameter()));
+  if (!data->debug->is_active()) return;
+  HandleScope scope(data->isolate);
+  data->debug->OnAsyncTaskEvent(debug::kDebugPromiseCollected, data->id, 0);
+}
+
+void ResetPromiseHandle(const v8::WeakCallbackInfo<void>& info) {
+  CollectedCallbackData* data =
+      reinterpret_cast<CollectedCallbackData*>(info.GetParameter());
+  GlobalHandles::Destroy(data->location);
+  info.SetSecondPassCallback(&SendAsyncTaskEventCancel);
+}
+
+// In an async function, reuse the existing stack related to the outer
+// Promise. Otherwise, e.g. in a direct call to then, save a new stack.
+// Promises with multiple reactions with one or more of them being async
+// functions will not get a good stack trace, as async functions require
+// different stacks from direct Promise use, but we save and restore a
+// stack once for all reactions.
+//
+// If this isn't a case of async function, we return false, otherwise
+// we set the correct id and return true.
+//
+// TODO(littledan): Improve this case.
+int GetReferenceAsyncTaskId(Isolate* isolate, Handle<JSPromise> promise) {
+  Handle<Symbol> handled_by_symbol =
+      isolate->factory()->promise_handled_by_symbol();
+  Handle<Object> handled_by_promise =
+      JSObject::GetDataProperty(promise, handled_by_symbol);
+  if (!handled_by_promise->IsJSPromise()) {
+    return isolate->debug()->NextAsyncTaskId(promise);
+  }
+  Handle<JSPromise> handled_by_promise_js =
+      Handle<JSPromise>::cast(handled_by_promise);
+  Handle<Symbol> async_stack_id_symbol =
+      isolate->factory()->promise_async_stack_id_symbol();
+  Handle<Object> async_task_id =
+      JSObject::GetDataProperty(handled_by_promise_js, async_stack_id_symbol);
+  if (!async_task_id->IsSmi()) {
+    return isolate->debug()->NextAsyncTaskId(promise);
+  }
+  return Handle<Smi>::cast(async_task_id)->value();
+}
+}  //  namespace
+
+void Debug::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+                           Handle<Object> parent) {
+  if (!debug_delegate_) return;
+  int id = GetReferenceAsyncTaskId(isolate_, promise);
+  switch (type) {
+    case PromiseHookType::kInit:
+      OnAsyncTaskEvent(debug::kDebugPromiseCreated, id,
+                       parent->IsJSPromise()
+                           ? GetReferenceAsyncTaskId(
+                                 isolate_, Handle<JSPromise>::cast(parent))
+                           : 0);
+      return;
+    case PromiseHookType::kResolve:
+      // We can't use this hook because it's called before promise object will
+      // get resolved status.
+      return;
+    case PromiseHookType::kBefore:
+      OnAsyncTaskEvent(debug::kDebugWillHandle, id, 0);
+      return;
+    case PromiseHookType::kAfter:
+      OnAsyncTaskEvent(debug::kDebugDidHandle, id, 0);
+      return;
+  }
+}
+
+int Debug::NextAsyncTaskId(Handle<JSObject> promise) {
+  LookupIterator it(promise, isolate_->factory()->promise_async_id_symbol());
+  Maybe<bool> maybe = JSReceiver::HasProperty(&it);
+  if (maybe.ToChecked()) {
+    MaybeHandle<Object> result = Object::GetProperty(&it);
+    return Handle<Smi>::cast(result.ToHandleChecked())->value();
+  }
+  Handle<Smi> async_id =
+      handle(Smi::FromInt(++thread_local_.async_task_count_), isolate_);
+  Object::SetProperty(&it, async_id, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED)
+      .ToChecked();
+  Handle<Object> global_handle = isolate_->global_handles()->Create(*promise);
+  // We send EnqueueRecurring async task event when promise is fulfilled or
+  // rejected, WillHandle and DidHandle for every scheduled microtask for this
+  // promise.
+  // We need to send a cancel event when no other microtasks can be
+  // started for this promise and all current microtasks are finished.
+  // Since we holding promise when at least one microtask is scheduled (inside
+  // PromiseReactionJobInfo), we can send cancel event in weak callback.
+  GlobalHandles::MakeWeak(
+      global_handle.location(),
+      new CollectedCallbackData(global_handle.location(), async_id->value(),
+                                this, isolate_),
+      &ResetPromiseHandle, v8::WeakCallbackType::kParameter);
+  return async_id->value();
+}
+
+namespace {
+debug::Location GetDebugLocation(Handle<Script> script, int source_position) {
+  Script::PositionInfo info;
+  Script::GetPositionInfo(script, source_position, &info, Script::WITH_OFFSET);
+  return debug::Location(info.line, info.column);
+}
+}  // namespace
+
+bool Debug::IsBlackboxed(Handle<SharedFunctionInfo> shared) {
+  if (!debug_delegate_) return false;
+  if (!shared->computed_debug_is_blackboxed()) {
+    bool is_blackboxed = false;
+    if (shared->script()->IsScript()) {
+      SuppressDebug while_processing(this);
+      HandleScope handle_scope(isolate_);
+      PostponeInterruptsScope no_interrupts(isolate_);
+      DisableBreak no_recursive_break(this);
+      Handle<Script> script(Script::cast(shared->script()));
+      if (script->type() == i::Script::TYPE_NORMAL) {
+        debug::Location start =
+            GetDebugLocation(script, shared->start_position());
+        debug::Location end = GetDebugLocation(script, shared->end_position());
+        is_blackboxed = debug_delegate_->IsFunctionBlackboxed(
+            ToApiHandle<debug::Script>(script), start, end);
+      }
+    }
+    shared->set_debug_is_blackboxed(is_blackboxed);
+    shared->set_computed_debug_is_blackboxed(true);
+  }
+  return shared->debug_is_blackboxed();
+}
+
+void Debug::OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id,
+                             int parent_id) {
   if (in_debug_scope() || ignore_events()) return;
-
-  HandleScope scope(isolate_);
-  DebugScope debug_scope(this);
+  if (!debug_delegate_) return;
+  SuppressDebug while_processing(this);
+  DebugScope debug_scope(isolate_->debug());
   if (debug_scope.failed()) return;
-
-  // Create the script collected state object.
-  Handle<Object> event_data;
-  // Bail out and don't call debugger if exception.
-  if (!MakeAsyncTaskEvent(type, id, name).ToHandle(&event_data)) return;
-
-  // Process debug event.
-  ProcessDebugEvent(v8::AsyncTaskEvent,
-                    Handle<JSObject>::cast(event_data),
-                    true);
-}
-
-
-void Debug::ProcessDebugEvent(v8::DebugEvent event,
-                              Handle<JSObject> event_data,
-                              bool auto_continue) {
   HandleScope scope(isolate_);
-
-  // Create the execution state.
-  Handle<Object> exec_state;
-  // Bail out and don't call debugger if exception.
-  if (!MakeExecutionState().ToHandle(&exec_state)) return;
-
-  // First notify the message handler if any.
-  if (message_handler_ != NULL) {
-    NotifyMessageHandler(event,
-                         Handle<JSObject>::cast(exec_state),
-                         event_data,
-                         auto_continue);
-  }
-  // Notify registered debug event listener. This can be either a C or
-  // a JavaScript function. Don't call event listener for v8::Break
-  // here, if it's only a debug command -- they will be processed later.
-  if ((event != v8::Break || !auto_continue) && !event_listener_.is_null()) {
-    CallEventCallback(event, exec_state, event_data, NULL);
-  }
+  PostponeInterruptsScope no_interrupts(isolate_);
+  DisableBreak no_recursive_break(this);
+  debug_delegate_->PromiseEventOccurred(type, id, parent_id);
 }
 
-
-void Debug::CallEventCallback(v8::DebugEvent event,
-                              Handle<Object> exec_state,
-                              Handle<Object> event_data,
-                              v8::Debug::ClientData* client_data) {
-  // Prevent other interrupts from triggering, for example API callbacks,
-  // while dispatching event listners.
-  PostponeInterruptsScope postpone(isolate_);
-  bool previous = in_debug_event_listener_;
-  in_debug_event_listener_ = true;
-  if (event_listener_->IsForeign()) {
-    // Invoke the C debug event listener.
-    v8::DebugInterface::EventCallback callback =
-        FUNCTION_CAST<v8::DebugInterface::EventCallback>(
-            Handle<Foreign>::cast(event_listener_)->foreign_address());
-    EventDetailsImpl event_details(event,
-                                   Handle<JSObject>::cast(exec_state),
-                                   Handle<JSObject>::cast(event_data),
-                                   event_listener_data_,
-                                   client_data);
-    callback(event_details);
-    CHECK(!isolate_->has_scheduled_exception());
-  } else {
-    // Invoke the JavaScript debug event listener.
-    DCHECK(event_listener_->IsJSFunction());
-    Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_),
-                              exec_state,
-                              event_data,
-                              event_listener_data_ };
-    Handle<JSReceiver> global = isolate_->global_proxy();
-    MaybeHandle<Object> result =
-        Execution::Call(isolate_, Handle<JSFunction>::cast(event_listener_),
-                        global, arraysize(argv), argv);
-    CHECK(!result.is_null());  // Listeners must not throw.
-  }
-  in_debug_event_listener_ = previous;
-}
-
-
 void Debug::ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script) {
   if (ignore_events()) return;
+  if (script->type() != i::Script::TYPE_NORMAL &&
+      script->type() != i::Script::TYPE_WASM) {
+    return;
+  }
+  if (!debug_delegate_) return;
   SuppressDebug while_processing(this);
-
-  bool in_nested_debug_scope = in_debug_scope();
-  HandleScope scope(isolate_);
   DebugScope debug_scope(this);
   if (debug_scope.failed()) return;
-
-  if (event == v8::AfterCompile) {
-    // If debugging there might be script break points registered for this
-    // script. Make sure that these break points are set.
-    Handle<Object> argv[] = {Script::GetWrapper(script)};
-    if (CallFunction("UpdateScriptBreakPoints", arraysize(argv), argv)
-            .is_null()) {
-      return;
-    }
-  }
-
-  // Create the compile state object.
-  Handle<Object> event_data;
-  // Bail out and don't call debugger if exception.
-  if (!MakeCompileEvent(script, event).ToHandle(&event_data)) return;
-
-  // Don't call NotifyMessageHandler if already in debug scope to avoid running
-  // nested command loop.
-  if (in_nested_debug_scope) {
-    if (event_listener_.is_null()) return;
-    // Create the execution state.
-    Handle<Object> exec_state;
-    // Bail out and don't call debugger if exception.
-    if (!MakeExecutionState().ToHandle(&exec_state)) return;
-
-    CallEventCallback(event, exec_state, event_data, NULL);
-  } else {
-    // Process debug event.
-    ProcessDebugEvent(event, Handle<JSObject>::cast(event_data), true);
-  }
+  HandleScope scope(isolate_);
+  PostponeInterruptsScope postpone(isolate_);
+  DisableBreak no_recursive_break(this);
+  debug_delegate_->ScriptCompiled(ToApiHandle<debug::Script>(script),
+                                  event != v8::AfterCompile);
 }
 
 
@@ -1969,177 +2011,46 @@
   return handle(*debug_context(), isolate_);
 }
 
-
-void Debug::NotifyMessageHandler(v8::DebugEvent event,
-                                 Handle<JSObject> exec_state,
-                                 Handle<JSObject> event_data,
-                                 bool auto_continue) {
-  // Prevent other interrupts from triggering, for example API callbacks,
-  // while dispatching message handler callbacks.
-  PostponeInterruptsScope no_interrupts(isolate_);
-  DCHECK(is_active_);
-  HandleScope scope(isolate_);
-  // Process the individual events.
-  bool sendEventMessage = false;
-  switch (event) {
-    case v8::Break:
-      sendEventMessage = !auto_continue;
-      break;
-    case v8::NewFunction:
-    case v8::BeforeCompile:
-    case v8::CompileError:
-    case v8::AsyncTaskEvent:
-      break;
-    case v8::Exception:
-    case v8::AfterCompile:
-      sendEventMessage = true;
-      break;
+int Debug::CurrentFrameCount() {
+  StackTraceFrameIterator it(isolate_);
+  if (break_frame_id() != StackFrame::NO_ID) {
+    // Skip to break frame.
+    DCHECK(in_debug_scope());
+    while (!it.done() && it.frame()->id() != break_frame_id()) it.Advance();
   }
-
-  // The debug command interrupt flag might have been set when the command was
-  // added. It should be enough to clear the flag only once while we are in the
-  // debugger.
-  DCHECK(in_debug_scope());
-  isolate_->stack_guard()->ClearDebugCommand();
-
-  // Notify the debugger that a debug event has occurred unless auto continue is
-  // active in which case no event is send.
-  if (sendEventMessage) {
-    MessageImpl message = MessageImpl::NewEvent(
-        event,
-        auto_continue,
-        Handle<JSObject>::cast(exec_state),
-        Handle<JSObject>::cast(event_data));
-    InvokeMessageHandler(message);
-  }
-
-  // If auto continue don't make the event cause a break, but process messages
-  // in the queue if any. For script collected events don't even process
-  // messages in the queue as the execution state might not be what is expected
-  // by the client.
-  if (auto_continue && !has_commands()) return;
-
-  // DebugCommandProcessor goes here.
-  bool running = auto_continue;
-
-  Handle<Object> cmd_processor_ctor =
-      JSReceiver::GetProperty(isolate_, exec_state, "debugCommandProcessor")
-          .ToHandleChecked();
-  Handle<Object> ctor_args[] = { isolate_->factory()->ToBoolean(running) };
-  Handle<JSReceiver> cmd_processor = Handle<JSReceiver>::cast(
-      Execution::Call(isolate_, cmd_processor_ctor, exec_state, 1, ctor_args)
-          .ToHandleChecked());
-  Handle<JSFunction> process_debug_request = Handle<JSFunction>::cast(
-      JSReceiver::GetProperty(isolate_, cmd_processor, "processDebugRequest")
-          .ToHandleChecked());
-  Handle<Object> is_running =
-      JSReceiver::GetProperty(isolate_, cmd_processor, "isRunning")
-          .ToHandleChecked();
-
-  // Process requests from the debugger.
-  do {
-    // Wait for new command in the queue.
-    command_received_.Wait();
-
-    // Get the command from the queue.
-    CommandMessage command = command_queue_.Get();
-    isolate_->logger()->DebugTag(
-        "Got request from command queue, in interactive loop.");
-    if (!is_active()) {
-      // Delete command text and user data.
-      command.Dispose();
-      return;
-    }
-
-    Vector<const uc16> command_text(
-        const_cast<const uc16*>(command.text().start()),
-        command.text().length());
-    Handle<String> request_text = isolate_->factory()->NewStringFromTwoByte(
-        command_text).ToHandleChecked();
-    Handle<Object> request_args[] = { request_text };
-    Handle<Object> answer_value;
-    Handle<String> answer;
-    MaybeHandle<Object> maybe_exception;
-    MaybeHandle<Object> maybe_result =
-        Execution::TryCall(isolate_, process_debug_request, cmd_processor, 1,
-                           request_args, &maybe_exception);
-
-    if (maybe_result.ToHandle(&answer_value)) {
-      if (answer_value->IsUndefined(isolate_)) {
-        answer = isolate_->factory()->empty_string();
-      } else {
-        answer = Handle<String>::cast(answer_value);
-      }
-
-      // Log the JSON request/response.
-      if (FLAG_trace_debug_json) {
-        PrintF("%s\n", request_text->ToCString().get());
-        PrintF("%s\n", answer->ToCString().get());
-      }
-
-      Handle<Object> is_running_args[] = { answer };
-      maybe_result = Execution::Call(
-          isolate_, is_running, cmd_processor, 1, is_running_args);
-      Handle<Object> result;
-      if (!maybe_result.ToHandle(&result)) break;
-      running = result->IsTrue(isolate_);
+  int counter = 0;
+  while (!it.done()) {
+    if (it.frame()->is_optimized()) {
+      List<SharedFunctionInfo*> infos;
+      OptimizedFrame::cast(it.frame())->GetFunctions(&infos);
+      counter += infos.length();
     } else {
-      Handle<Object> exception;
-      if (!maybe_exception.ToHandle(&exception)) break;
-      Handle<Object> result;
-      if (!Object::ToString(isolate_, exception).ToHandle(&result)) break;
-      answer = Handle<String>::cast(result);
+      counter++;
     }
-
-    // Return the result.
-    MessageImpl message = MessageImpl::NewResponse(
-        event, running, exec_state, event_data, answer, command.client_data());
-    InvokeMessageHandler(message);
-    command.Dispose();
-
-    // Return from debug event processing if either the VM is put into the
-    // running state (through a continue command) or auto continue is active
-    // and there are no more commands queued.
-  } while (!running || has_commands());
-  command_queue_.Clear();
+    it.Advance();
+  }
+  return counter;
 }
 
-
-void Debug::SetEventListener(Handle<Object> callback,
-                             Handle<Object> data) {
-  GlobalHandles* global_handles = isolate_->global_handles();
-
-  // Remove existing entry.
-  GlobalHandles::Destroy(event_listener_.location());
-  event_listener_ = Handle<Object>();
-  GlobalHandles::Destroy(event_listener_data_.location());
-  event_listener_data_ = Handle<Object>();
-
-  // Set new entry.
-  if (!callback->IsUndefined(isolate_) && !callback->IsNull(isolate_)) {
-    event_listener_ = global_handles->Create(*callback);
-    if (data.is_null()) data = isolate_->factory()->undefined_value();
-    event_listener_data_ = global_handles->Create(*data);
-  }
-
+void Debug::SetDebugDelegate(debug::DebugDelegate* delegate,
+                             bool pass_ownership) {
+  RemoveDebugDelegate();
+  debug_delegate_ = delegate;
+  owns_debug_delegate_ = pass_ownership;
   UpdateState();
 }
 
-
-void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
-  message_handler_ = handler;
-  UpdateState();
-  if (handler == NULL && in_debug_scope()) {
-    // Send an empty command to the debugger if in a break to make JavaScript
-    // run again if the debugger is closed.
-    EnqueueCommandMessage(Vector<const uint16_t>::empty());
+void Debug::RemoveDebugDelegate() {
+  if (debug_delegate_ == nullptr) return;
+  if (owns_debug_delegate_) {
+    owns_debug_delegate_ = false;
+    delete debug_delegate_;
   }
+  debug_delegate_ = nullptr;
 }
 
-
-
 void Debug::UpdateState() {
-  bool is_active = message_handler_ != NULL || !event_listener_.is_null();
+  bool is_active = debug_delegate_ != nullptr;
   if (is_active || in_debug_scope()) {
     // Note that the debug context could have already been loaded to
     // bootstrap test cases.
@@ -2150,36 +2061,15 @@
     Unload();
   }
   is_active_ = is_active;
+  isolate_->DebugStateUpdated();
 }
 
-
-// Calls the registered debug message handler. This callback is part of the
-// public API.
-void Debug::InvokeMessageHandler(MessageImpl message) {
-  if (message_handler_ != NULL) message_handler_(message);
+void Debug::UpdateHookOnFunctionCall() {
+  STATIC_ASSERT(LastStepAction == StepIn);
+  hook_on_function_call_ = thread_local_.last_step_action_ == StepIn ||
+                           isolate_->needs_side_effect_check();
 }
 
-
-// Puts a command coming from the public API on the queue.  Creates
-// a copy of the command string managed by the debugger.  Up to this
-// point, the command data was managed by the API client.  Called
-// by the API client thread.
-void Debug::EnqueueCommandMessage(Vector<const uint16_t> command,
-                                  v8::Debug::ClientData* client_data) {
-  // Need to cast away const.
-  CommandMessage message = CommandMessage::New(
-      Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
-                       command.length()),
-      client_data);
-  isolate_->logger()->DebugTag("Put command on command_queue.");
-  command_queue_.Put(message);
-  command_received_.Signal();
-
-  // Set the debug command break flag to have the command processed.
-  if (!in_debug_scope()) isolate_->stack_guard()->RequestDebugCommand();
-}
-
-
 MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
   DebugScope debug_scope(this);
   if (debug_scope.failed()) return isolate_->factory()->undefined_value();
@@ -2201,6 +2091,8 @@
 
 
 void Debug::HandleDebugBreak() {
+  // Initialize LiveEdit.
+  LiveEdit::InitializeThreadLocal(this);
   // Ignore debug break during bootstrapping.
   if (isolate_->bootstrapper()->IsActive()) return;
   // Just continue if breaks are disabled.
@@ -2215,8 +2107,21 @@
     DCHECK(!it.done());
     Object* fun = it.frame()->function();
     if (fun && fun->IsJSFunction()) {
-      // Don't stop in builtin functions.
-      if (!JSFunction::cast(fun)->shared()->IsSubjectToDebugging()) return;
+      HandleScope scope(isolate_);
+      // Don't stop in builtin and blackboxed functions.
+      Handle<SharedFunctionInfo> shared(JSFunction::cast(fun)->shared(),
+                                        isolate_);
+      if (!shared->IsSubjectToDebugging() || IsBlackboxed(shared)) {
+        // Inspector uses pause on next statement for asynchronous breakpoints.
+        // When breakpoint is fired we try to break on first not blackboxed
+        // statement. To achieve this goal we need to deoptimize current
+        // function and don't clear requested DebugBreak even if it's blackboxed
+        // to be able to break on not blackboxed function call.
+        // TODO(yangguo): introduce break_on_function_entry since current
+        // implementation is slow.
+        Deoptimizer::DeoptimizeFunction(JSFunction::cast(fun));
+        return;
+      }
       JSGlobalObject* global =
           JSFunction::cast(fun)->context()->global_object();
       // Don't stop in debugger functions.
@@ -2226,47 +2131,30 @@
     }
   }
 
-  // Collect the break state before clearing the flags.
-  bool debug_command_only = isolate_->stack_guard()->CheckDebugCommand() &&
-                            !isolate_->stack_guard()->CheckDebugBreak();
-
   isolate_->stack_guard()->ClearDebugBreak();
 
   // Clear stepping to avoid duplicate breaks.
   ClearStepping();
 
-  ProcessDebugMessages(debug_command_only);
-}
-
-
-void Debug::ProcessDebugMessages(bool debug_command_only) {
-  isolate_->stack_guard()->ClearDebugCommand();
-
-  StackLimitCheck check(isolate_);
-  if (check.HasOverflowed()) return;
-
   HandleScope scope(isolate_);
   DebugScope debug_scope(this);
   if (debug_scope.failed()) return;
 
-  // Notify the debug event listeners. Indicate auto continue if the break was
-  // a debug command break.
-  OnDebugBreak(isolate_->factory()->undefined_value(), debug_command_only);
+  OnDebugBreak(isolate_->factory()->undefined_value());
 }
 
 #ifdef DEBUG
 void Debug::PrintBreakLocation() {
   if (!FLAG_print_break_location) return;
   HandleScope scope(isolate_);
-  JavaScriptFrameIterator iterator(isolate_);
+  StackTraceFrameIterator iterator(isolate_);
   if (iterator.done()) return;
-  JavaScriptFrame* frame = iterator.frame();
-  FrameSummary summary = FrameSummary::GetFirst(frame);
-  int source_position =
-      summary.abstract_code()->SourcePosition(summary.code_offset());
-  Handle<Object> script_obj(summary.function()->shared()->script(), isolate_);
+  StandardFrame* frame = iterator.frame();
+  FrameSummary summary = FrameSummary::GetTop(frame);
+  int source_position = summary.SourcePosition();
+  Handle<Object> script_obj = summary.script();
   PrintF("[debug] break in function '");
-  summary.function()->PrintName();
+  summary.FunctionName()->PrintOn(stdout);
   PrintF("'.\n");
   if (script_obj->IsScript()) {
     Handle<Script> script = Handle<Script>::cast(script_obj);
@@ -2308,14 +2196,11 @@
   // Store the previous break id, frame id and return value.
   break_id_ = debug_->break_id();
   break_frame_id_ = debug_->break_frame_id();
-  return_value_ = debug_->return_value();
 
   // Create the new break info. If there is no proper frames there is no break
   // frame id.
   StackTraceFrameIterator it(isolate());
   bool has_frames = !it.done();
-  // We don't currently support breaking inside wasm framess.
-  DCHECK(!has_frames || !it.is_wasm());
   debug_->thread_local_.break_frame_id_ =
       has_frames ? it.frame()->id() : StackFrame::NO_ID;
   debug_->SetNextBreakId();
@@ -2329,18 +2214,6 @@
 
 
 DebugScope::~DebugScope() {
-  if (!failed_ && prev_ == NULL) {
-    // Clear mirror cache when leaving the debugger. Skip this if there is a
-    // pending exception as clearing the mirror cache calls back into
-    // JavaScript. This can happen if the v8::Debug::Call is used in which
-    // case the exception should end up in the calling code.
-    if (!isolate()->has_pending_exception()) debug_->ClearMirrorCache();
-
-    // If there are commands in the queue when leaving the debugger request
-    // that these commands are processed.
-    if (debug_->has_commands()) isolate()->stack_guard()->RequestDebugCommand();
-  }
-
   // Leaving this debugger entry.
   base::NoBarrier_Store(&debug_->thread_local_.current_debug_scope_,
                         reinterpret_cast<base::AtomicWord>(prev_));
@@ -2348,276 +2221,197 @@
   // Restore to the previous break state.
   debug_->thread_local_.break_frame_id_ = break_frame_id_;
   debug_->thread_local_.break_id_ = break_id_;
-  debug_->thread_local_.return_value_ = return_value_;
 
   debug_->UpdateState();
 }
 
-
-MessageImpl MessageImpl::NewEvent(DebugEvent event,
-                                  bool running,
-                                  Handle<JSObject> exec_state,
-                                  Handle<JSObject> event_data) {
-  MessageImpl message(true, event, running,
-                      exec_state, event_data, Handle<String>(), NULL);
-  return message;
+ReturnValueScope::ReturnValueScope(Debug* debug) : debug_(debug) {
+  return_value_ = debug_->return_value_handle();
 }
 
-
-MessageImpl MessageImpl::NewResponse(DebugEvent event,
-                                     bool running,
-                                     Handle<JSObject> exec_state,
-                                     Handle<JSObject> event_data,
-                                     Handle<String> response_json,
-                                     v8::Debug::ClientData* client_data) {
-  MessageImpl message(false, event, running,
-                      exec_state, event_data, response_json, client_data);
-  return message;
+ReturnValueScope::~ReturnValueScope() {
+  debug_->set_return_value(*return_value_);
 }
 
-
-MessageImpl::MessageImpl(bool is_event,
-                         DebugEvent event,
-                         bool running,
-                         Handle<JSObject> exec_state,
-                         Handle<JSObject> event_data,
-                         Handle<String> response_json,
-                         v8::Debug::ClientData* client_data)
-    : is_event_(is_event),
-      event_(event),
-      running_(running),
-      exec_state_(exec_state),
-      event_data_(event_data),
-      response_json_(response_json),
-      client_data_(client_data) {}
-
-
-bool MessageImpl::IsEvent() const {
-  return is_event_;
-}
-
-
-bool MessageImpl::IsResponse() const {
-  return !is_event_;
-}
-
-
-DebugEvent MessageImpl::GetEvent() const {
-  return event_;
-}
-
-
-bool MessageImpl::WillStartRunning() const {
-  return running_;
-}
-
-
-v8::Local<v8::Object> MessageImpl::GetExecutionState() const {
-  return v8::Utils::ToLocal(exec_state_);
-}
-
-
-v8::Isolate* MessageImpl::GetIsolate() const {
-  return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
-}
-
-
-v8::Local<v8::Object> MessageImpl::GetEventData() const {
-  return v8::Utils::ToLocal(event_data_);
-}
-
-
-v8::Local<v8::String> MessageImpl::GetJSON() const {
-  Isolate* isolate = event_data_->GetIsolate();
-  v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate));
-
-  if (IsEvent()) {
-    // Call toJSONProtocol on the debug event object.
-    Handle<Object> fun =
-        JSReceiver::GetProperty(isolate, event_data_, "toJSONProtocol")
-            .ToHandleChecked();
-    if (!fun->IsJSFunction()) {
-      return v8::Local<v8::String>();
+bool Debug::PerformSideEffectCheck(Handle<JSFunction> function) {
+  DCHECK(isolate_->needs_side_effect_check());
+  DisallowJavascriptExecution no_js(isolate_);
+  if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) return false;
+  Deoptimizer::DeoptimizeFunction(*function);
+  if (!function->shared()->HasNoSideEffect()) {
+    if (FLAG_trace_side_effect_free_debug_evaluate) {
+      PrintF("[debug-evaluate] Function %s failed side effect check.\n",
+             function->shared()->DebugName()->ToCString().get());
     }
+    side_effect_check_failed_ = true;
+    // Throw an uncatchable termination exception.
+    isolate_->TerminateExecution();
+    return false;
+  }
+  return true;
+}
 
-    MaybeHandle<Object> maybe_json =
-        Execution::TryCall(isolate, fun, event_data_, 0, NULL);
-    Handle<Object> json;
-    if (!maybe_json.ToHandle(&json) || !json->IsString()) {
-      return v8::Local<v8::String>();
-    }
-    return scope.Escape(v8::Utils::ToLocal(Handle<String>::cast(json)));
-  } else {
-    return v8::Utils::ToLocal(response_json_);
+bool Debug::PerformSideEffectCheckForCallback(Address function) {
+  DCHECK(isolate_->needs_side_effect_check());
+  if (DebugEvaluate::CallbackHasNoSideEffect(function)) return true;
+  side_effect_check_failed_ = true;
+  // Throw an uncatchable termination exception.
+  isolate_->TerminateExecution();
+  isolate_->OptionalRescheduleException(false);
+  return false;
+}
+
+void LegacyDebugDelegate::PromiseEventOccurred(
+    v8::debug::PromiseDebugActionType type, int id, int parent_id) {
+  Handle<Object> event_data;
+  if (isolate_->debug()->MakeAsyncTaskEvent(type, id).ToHandle(&event_data)) {
+    ProcessDebugEvent(v8::AsyncTaskEvent, Handle<JSObject>::cast(event_data));
   }
 }
 
-namespace {
-v8::Local<v8::Context> GetDebugEventContext(Isolate* isolate) {
-  Handle<Context> context = isolate->debug()->debugger_entry()->GetContext();
-  // Isolate::context() may have been NULL when "script collected" event
-  // occured.
-  if (context.is_null()) return v8::Local<v8::Context>();
-  Handle<Context> native_context(context->native_context());
-  return v8::Utils::ToLocal(native_context);
-}
-}  // anonymous namespace
-
-v8::Local<v8::Context> MessageImpl::GetEventContext() const {
-  Isolate* isolate = event_data_->GetIsolate();
-  v8::Local<v8::Context> context = GetDebugEventContext(isolate);
-  // Isolate::context() may be NULL when "script collected" event occurs.
-  DCHECK(!context.IsEmpty());
-  return context;
+void LegacyDebugDelegate::ScriptCompiled(v8::Local<v8::debug::Script> script,
+                                         bool is_compile_error) {
+  Handle<Object> event_data;
+  v8::DebugEvent event = is_compile_error ? v8::CompileError : v8::AfterCompile;
+  if (isolate_->debug()
+          ->MakeCompileEvent(v8::Utils::OpenHandle(*script), event)
+          .ToHandle(&event_data)) {
+    ProcessDebugEvent(event, Handle<JSObject>::cast(event_data));
+  }
 }
 
-
-v8::Debug::ClientData* MessageImpl::GetClientData() const {
-  return client_data_;
+void LegacyDebugDelegate::BreakProgramRequested(
+    v8::Local<v8::Context> paused_context, v8::Local<v8::Object> exec_state,
+    v8::Local<v8::Value> break_points_hit) {
+  Handle<Object> event_data;
+  if (isolate_->debug()
+          ->MakeBreakEvent(v8::Utils::OpenHandle(*break_points_hit))
+          .ToHandle(&event_data)) {
+    ProcessDebugEvent(
+        v8::Break, Handle<JSObject>::cast(event_data),
+        Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
+  }
 }
 
+void LegacyDebugDelegate::ExceptionThrown(v8::Local<v8::Context> paused_context,
+                                          v8::Local<v8::Object> exec_state,
+                                          v8::Local<v8::Value> exception,
+                                          v8::Local<v8::Value> promise,
+                                          bool is_uncaught) {
+  Handle<Object> event_data;
+  if (isolate_->debug()
+          ->MakeExceptionEvent(v8::Utils::OpenHandle(*exception), is_uncaught,
+                               v8::Utils::OpenHandle(*promise))
+          .ToHandle(&event_data)) {
+    ProcessDebugEvent(
+        v8::Exception, Handle<JSObject>::cast(event_data),
+        Handle<JSObject>::cast(v8::Utils::OpenHandle(*exec_state)));
+  }
+}
 
-EventDetailsImpl::EventDetailsImpl(DebugEvent event,
-                                   Handle<JSObject> exec_state,
-                                   Handle<JSObject> event_data,
-                                   Handle<Object> callback_data,
-                                   v8::Debug::ClientData* client_data)
+void LegacyDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
+                                            Handle<JSObject> event_data) {
+  Handle<Object> exec_state;
+  if (isolate_->debug()->MakeExecutionState().ToHandle(&exec_state)) {
+    ProcessDebugEvent(event, event_data, Handle<JSObject>::cast(exec_state));
+  }
+}
+
+JavaScriptDebugDelegate::JavaScriptDebugDelegate(Isolate* isolate,
+                                                 Handle<JSFunction> listener,
+                                                 Handle<Object> data)
+    : LegacyDebugDelegate(isolate) {
+  GlobalHandles* global_handles = isolate->global_handles();
+  listener_ = Handle<JSFunction>::cast(global_handles->Create(*listener));
+  data_ = global_handles->Create(*data);
+}
+
+JavaScriptDebugDelegate::~JavaScriptDebugDelegate() {
+  GlobalHandles::Destroy(Handle<Object>::cast(listener_).location());
+  GlobalHandles::Destroy(data_.location());
+}
+
+void JavaScriptDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
+                                                Handle<JSObject> event_data,
+                                                Handle<JSObject> exec_state) {
+  Handle<Object> argv[] = {Handle<Object>(Smi::FromInt(event), isolate_),
+                           exec_state, event_data, data_};
+  Handle<JSReceiver> global = isolate_->global_proxy();
+  // Listener must not throw.
+  Execution::Call(isolate_, listener_, global, arraysize(argv), argv)
+      .ToHandleChecked();
+}
+
+NativeDebugDelegate::NativeDebugDelegate(Isolate* isolate,
+                                         v8::Debug::EventCallback callback,
+                                         Handle<Object> data)
+    : LegacyDebugDelegate(isolate), callback_(callback) {
+  data_ = isolate->global_handles()->Create(*data);
+}
+
+NativeDebugDelegate::~NativeDebugDelegate() {
+  GlobalHandles::Destroy(data_.location());
+}
+
+NativeDebugDelegate::EventDetails::EventDetails(DebugEvent event,
+                                                Handle<JSObject> exec_state,
+                                                Handle<JSObject> event_data,
+                                                Handle<Object> callback_data)
     : event_(event),
       exec_state_(exec_state),
       event_data_(event_data),
-      callback_data_(callback_data),
-      client_data_(client_data) {}
+      callback_data_(callback_data) {}
 
-
-DebugEvent EventDetailsImpl::GetEvent() const {
+DebugEvent NativeDebugDelegate::EventDetails::GetEvent() const {
   return event_;
 }
 
-
-v8::Local<v8::Object> EventDetailsImpl::GetExecutionState() const {
+v8::Local<v8::Object> NativeDebugDelegate::EventDetails::GetExecutionState()
+    const {
   return v8::Utils::ToLocal(exec_state_);
 }
 
-
-v8::Local<v8::Object> EventDetailsImpl::GetEventData() const {
+v8::Local<v8::Object> NativeDebugDelegate::EventDetails::GetEventData() const {
   return v8::Utils::ToLocal(event_data_);
 }
 
-
-v8::Local<v8::Context> EventDetailsImpl::GetEventContext() const {
+v8::Local<v8::Context> NativeDebugDelegate::EventDetails::GetEventContext()
+    const {
   return GetDebugEventContext(exec_state_->GetIsolate());
 }
 
-
-v8::Local<v8::Value> EventDetailsImpl::GetCallbackData() const {
+v8::Local<v8::Value> NativeDebugDelegate::EventDetails::GetCallbackData()
+    const {
   return v8::Utils::ToLocal(callback_data_);
 }
 
-
-v8::Debug::ClientData* EventDetailsImpl::GetClientData() const {
-  return client_data_;
-}
-
-v8::Isolate* EventDetailsImpl::GetIsolate() const {
+v8::Isolate* NativeDebugDelegate::EventDetails::GetIsolate() const {
   return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
 }
 
-CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
-                                   client_data_(NULL) {
+void NativeDebugDelegate::ProcessDebugEvent(v8::DebugEvent event,
+                                            Handle<JSObject> event_data,
+                                            Handle<JSObject> exec_state) {
+  EventDetails event_details(event, exec_state, event_data, data_);
+  Isolate* isolate = isolate_;
+  callback_(event_details);
+  CHECK(!isolate->has_scheduled_exception());
 }
 
-
-CommandMessage::CommandMessage(const Vector<uint16_t>& text,
-                               v8::Debug::ClientData* data)
-    : text_(text),
-      client_data_(data) {
-}
-
-
-void CommandMessage::Dispose() {
-  text_.Dispose();
-  delete client_data_;
-  client_data_ = NULL;
-}
-
-
-CommandMessage CommandMessage::New(const Vector<uint16_t>& command,
-                                   v8::Debug::ClientData* data) {
-  return CommandMessage(command.Clone(), data);
-}
-
-
-CommandMessageQueue::CommandMessageQueue(int size) : start_(0), end_(0),
-                                                     size_(size) {
-  messages_ = NewArray<CommandMessage>(size);
-}
-
-
-CommandMessageQueue::~CommandMessageQueue() {
-  while (!IsEmpty()) Get().Dispose();
-  DeleteArray(messages_);
-}
-
-
-CommandMessage CommandMessageQueue::Get() {
-  DCHECK(!IsEmpty());
-  int result = start_;
-  start_ = (start_ + 1) % size_;
-  return messages_[result];
-}
-
-
-void CommandMessageQueue::Put(const CommandMessage& message) {
-  if ((end_ + 1) % size_ == start_) {
-    Expand();
+NoSideEffectScope::~NoSideEffectScope() {
+  if (isolate_->needs_side_effect_check() &&
+      isolate_->debug()->side_effect_check_failed_) {
+    DCHECK(isolate_->has_pending_exception());
+    DCHECK_EQ(isolate_->heap()->termination_exception(),
+              isolate_->pending_exception());
+    // Convert the termination exception into a regular exception.
+    isolate_->CancelTerminateExecution();
+    isolate_->Throw(*isolate_->factory()->NewEvalError(
+        MessageTemplate::kNoSideEffectDebugEvaluate));
   }
-  messages_[end_] = message;
-  end_ = (end_ + 1) % size_;
-}
-
-
-void CommandMessageQueue::Expand() {
-  CommandMessageQueue new_queue(size_ * 2);
-  while (!IsEmpty()) {
-    new_queue.Put(Get());
-  }
-  CommandMessage* array_to_free = messages_;
-  *this = new_queue;
-  new_queue.messages_ = array_to_free;
-  // Make the new_queue empty so that it doesn't call Dispose on any messages.
-  new_queue.start_ = new_queue.end_;
-  // Automatic destructor called on new_queue, freeing array_to_free.
-}
-
-
-LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
-    : logger_(logger), queue_(size) {}
-
-
-bool LockingCommandMessageQueue::IsEmpty() const {
-  base::LockGuard<base::Mutex> lock_guard(&mutex_);
-  return queue_.IsEmpty();
-}
-
-
-CommandMessage LockingCommandMessageQueue::Get() {
-  base::LockGuard<base::Mutex> lock_guard(&mutex_);
-  CommandMessage result = queue_.Get();
-  logger_->DebugEvent("Get", result.text());
-  return result;
-}
-
-
-void LockingCommandMessageQueue::Put(const CommandMessage& message) {
-  base::LockGuard<base::Mutex> lock_guard(&mutex_);
-  queue_.Put(message);
-  logger_->DebugEvent("Put", message.text());
-}
-
-
-void LockingCommandMessageQueue::Clear() {
-  base::LockGuard<base::Mutex> lock_guard(&mutex_);
-  queue_.Clear();
+  isolate_->set_needs_side_effect_check(old_needs_side_effect_check_);
+  isolate_->debug()->UpdateHookOnFunctionCall();
+  isolate_->debug()->side_effect_check_failed_ = false;
 }
 
 }  // namespace internal
diff --git a/src/debug/debug.h b/src/debug/debug.h
index 6e49db6..43338d7 100644
--- a/src/debug/debug.h
+++ b/src/debug/debug.h
@@ -6,12 +6,12 @@
 #define V8_DEBUG_DEBUG_H_
 
 #include "src/allocation.h"
-#include "src/arguments.h"
 #include "src/assembler.h"
 #include "src/base/atomicops.h"
 #include "src/base/hashmap.h"
 #include "src/base/platform/platform.h"
 #include "src/debug/debug-interface.h"
+#include "src/debug/interface-types.h"
 #include "src/execution.h"
 #include "src/factory.h"
 #include "src/flags.h"
@@ -39,9 +39,7 @@
   StepNext = 1,   // Step to the next statement in the current function.
   StepIn = 2,     // Step into new functions invoked or the next statement
                   // in the current function.
-  StepFrame = 3,  // Step into a new frame or return to previous frame.
-
-  LastStepAction = StepFrame
+  LastStepAction = StepIn
 };
 
 // Type of exception break. NOTE: These values are in macros.py as well.
@@ -51,10 +49,6 @@
 };
 
 
-// Type of exception break.
-enum BreakLocatorType { ALL_BREAK_LOCATIONS, CALLS_AND_RETURNS };
-
-
 // The different types of breakpoint position alignments.
 // Must match Debug.BreakPositionAlignment in debug.js
 enum BreakPositionAlignment {
@@ -123,8 +117,7 @@
 class BreakIterator {
  public:
   static std::unique_ptr<BreakIterator> GetIterator(
-      Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code,
-      BreakLocatorType type = ALL_BREAK_LOCATIONS);
+      Handle<DebugInfo> debug_info, Handle<AbstractCode> abstract_code);
 
   virtual ~BreakIterator() {}
 
@@ -146,8 +139,7 @@
   virtual void SetDebugBreak() = 0;
 
  protected:
-  explicit BreakIterator(Handle<DebugInfo> debug_info,
-                         BreakLocatorType break_locator_type);
+  explicit BreakIterator(Handle<DebugInfo> debug_info);
 
   int BreakIndexFromPosition(int position, BreakPositionAlignment alignment);
 
@@ -157,7 +149,6 @@
   int break_index_;
   int position_;
   int statement_position_;
-  BreakLocatorType break_locator_type_;
 
  private:
   DisallowHeapAllocation no_gc_;
@@ -166,7 +157,7 @@
 
 class CodeBreakIterator : public BreakIterator {
  public:
-  CodeBreakIterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
+  explicit CodeBreakIterator(Handle<DebugInfo> debug_info);
   ~CodeBreakIterator() override {}
 
   BreakLocation GetBreakLocation() override;
@@ -185,7 +176,7 @@
   }
 
  private:
-  int GetModeMask(BreakLocatorType type);
+  int GetModeMask();
   DebugBreakType GetDebugBreakType();
 
   RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
@@ -198,8 +189,7 @@
 
 class BytecodeArrayBreakIterator : public BreakIterator {
  public:
-  BytecodeArrayBreakIterator(Handle<DebugInfo> debug_info,
-                             BreakLocatorType type);
+  explicit BytecodeArrayBreakIterator(Handle<DebugInfo> debug_info);
   ~BytecodeArrayBreakIterator() override {}
 
   BreakLocation GetBreakLocation() override;
@@ -240,145 +230,6 @@
   DebugInfoListNode* next_;
 };
 
-
-// Message delivered to the message handler callback. This is either a debugger
-// event or the response to a command.
-class MessageImpl: public v8::Debug::Message {
- public:
-  // Create a message object for a debug event.
-  static MessageImpl NewEvent(DebugEvent event,
-                              bool running,
-                              Handle<JSObject> exec_state,
-                              Handle<JSObject> event_data);
-
-  // Create a message object for the response to a debug command.
-  static MessageImpl NewResponse(DebugEvent event,
-                                 bool running,
-                                 Handle<JSObject> exec_state,
-                                 Handle<JSObject> event_data,
-                                 Handle<String> response_json,
-                                 v8::Debug::ClientData* client_data);
-
-  // Implementation of interface v8::Debug::Message.
-  virtual bool IsEvent() const;
-  virtual bool IsResponse() const;
-  virtual DebugEvent GetEvent() const;
-  virtual bool WillStartRunning() const;
-  virtual v8::Local<v8::Object> GetExecutionState() const;
-  virtual v8::Local<v8::Object> GetEventData() const;
-  virtual v8::Local<v8::String> GetJSON() const;
-  virtual v8::Local<v8::Context> GetEventContext() const;
-  virtual v8::Debug::ClientData* GetClientData() const;
-  virtual v8::Isolate* GetIsolate() const;
-
- private:
-  MessageImpl(bool is_event,
-              DebugEvent event,
-              bool running,
-              Handle<JSObject> exec_state,
-              Handle<JSObject> event_data,
-              Handle<String> response_json,
-              v8::Debug::ClientData* client_data);
-
-  bool is_event_;  // Does this message represent a debug event?
-  DebugEvent event_;  // Debug event causing the break.
-  bool running_;  // Will the VM start running after this event?
-  Handle<JSObject> exec_state_;  // Current execution state.
-  Handle<JSObject> event_data_;  // Data associated with the event.
-  Handle<String> response_json_;  // Response JSON if message holds a response.
-  v8::Debug::ClientData* client_data_;  // Client data passed with the request.
-};
-
-
-// Details of the debug event delivered to the debug event listener.
-class EventDetailsImpl : public v8::DebugInterface::EventDetails {
- public:
-  EventDetailsImpl(DebugEvent event,
-                   Handle<JSObject> exec_state,
-                   Handle<JSObject> event_data,
-                   Handle<Object> callback_data,
-                   v8::Debug::ClientData* client_data);
-  virtual DebugEvent GetEvent() const;
-  virtual v8::Local<v8::Object> GetExecutionState() const;
-  virtual v8::Local<v8::Object> GetEventData() const;
-  virtual v8::Local<v8::Context> GetEventContext() const;
-  virtual v8::Local<v8::Value> GetCallbackData() const;
-  virtual v8::Debug::ClientData* GetClientData() const;
-  virtual v8::Isolate* GetIsolate() const;
-
- private:
-  DebugEvent event_;  // Debug event causing the break.
-  Handle<JSObject> exec_state_;         // Current execution state.
-  Handle<JSObject> event_data_;         // Data associated with the event.
-  Handle<Object> callback_data_;        // User data passed with the callback
-                                        // when it was registered.
-  v8::Debug::ClientData* client_data_;  // Data passed to DebugBreakForCommand.
-};
-
-
-// Message send by user to v8 debugger or debugger output message.
-// In addition to command text it may contain a pointer to some user data
-// which are expected to be passed along with the command reponse to message
-// handler.
-class CommandMessage {
- public:
-  static CommandMessage New(const Vector<uint16_t>& command,
-                            v8::Debug::ClientData* data);
-  CommandMessage();
-
-  // Deletes user data and disposes of the text.
-  void Dispose();
-  Vector<uint16_t> text() const { return text_; }
-  v8::Debug::ClientData* client_data() const { return client_data_; }
- private:
-  CommandMessage(const Vector<uint16_t>& text,
-                 v8::Debug::ClientData* data);
-
-  Vector<uint16_t> text_;
-  v8::Debug::ClientData* client_data_;
-};
-
-
-// A Queue of CommandMessage objects.  A thread-safe version is
-// LockingCommandMessageQueue, based on this class.
-class CommandMessageQueue BASE_EMBEDDED {
- public:
-  explicit CommandMessageQueue(int size);
-  ~CommandMessageQueue();
-  bool IsEmpty() const { return start_ == end_; }
-  CommandMessage Get();
-  void Put(const CommandMessage& message);
-  void Clear() { start_ = end_ = 0; }  // Queue is empty after Clear().
- private:
-  // Doubles the size of the message queue, and copies the messages.
-  void Expand();
-
-  CommandMessage* messages_;
-  int start_;
-  int end_;
-  int size_;  // The size of the queue buffer.  Queue can hold size-1 messages.
-};
-
-
-// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
-// messages.  The message data is not managed by LockingCommandMessageQueue.
-// Pointers to the data are passed in and out. Implemented by adding a
-// Mutex to CommandMessageQueue.  Includes logging of all puts and gets.
-class LockingCommandMessageQueue BASE_EMBEDDED {
- public:
-  LockingCommandMessageQueue(Logger* logger, int size);
-  bool IsEmpty() const;
-  CommandMessage Get();
-  void Put(const CommandMessage& message);
-  void Clear();
- private:
-  Logger* logger_;
-  CommandMessageQueue queue_;
-  mutable base::Mutex mutex_;
-  DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
-};
-
-
 class DebugFeatureTracker {
  public:
   enum Feature {
@@ -411,31 +262,23 @@
 class Debug {
  public:
   // Debug event triggers.
-  void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
+  void OnDebugBreak(Handle<Object> break_points_hit);
 
   void OnThrow(Handle<Object> exception);
   void OnPromiseReject(Handle<Object> promise, Handle<Object> value);
   void OnCompileError(Handle<Script> script);
-  void OnBeforeCompile(Handle<Script> script);
   void OnAfterCompile(Handle<Script> script);
-  void OnAsyncTaskEvent(Handle<String> type, Handle<Object> id,
-                        Handle<String> name);
+  void OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id,
+                        int parent_id);
 
-  // API facing.
-  void SetEventListener(Handle<Object> callback, Handle<Object> data);
-  void SetMessageHandler(v8::Debug::MessageHandler handler);
-  void EnqueueCommandMessage(Vector<const uint16_t> command,
-                             v8::Debug::ClientData* client_data = NULL);
   MUST_USE_RESULT MaybeHandle<Object> Call(Handle<Object> fun,
                                            Handle<Object> data);
   Handle<Context> GetDebugContext();
   void HandleDebugBreak();
-  void ProcessDebugMessages(bool debug_command_only);
 
   // Internal logic
   bool Load();
   void Break(JavaScriptFrame* frame);
-  void SetAfterBreakTarget(JavaScriptFrame* frame);
 
   // Scripts handling.
   Handle<FixedArray> GetLoadedScripts();
@@ -452,6 +295,13 @@
   void ChangeBreakOnException(ExceptionBreakType type, bool enable);
   bool IsBreakOnException(ExceptionBreakType type);
 
+  // The parameter is either a BreakPointInfo object, or a FixedArray of
+  // BreakPointInfo objects.
+  // Returns an empty handle if no breakpoint is hit, or a FixedArray with all
+  // hit breakpoints.
+  MaybeHandle<FixedArray> GetHitBreakPointObjects(
+      Handle<Object> break_point_objects);
+
   // Stepping handling.
   void PrepareStep(StepAction step_action);
   void PrepareStepIn(Handle<JSFunction> function);
@@ -464,13 +314,19 @@
   bool GetPossibleBreakpoints(Handle<Script> script, int start_position,
                               int end_position, std::set<int>* positions);
 
-  void RecordAsyncFunction(Handle<JSGeneratorObject> generator_object);
+  void RecordGenerator(Handle<JSGeneratorObject> generator_object);
 
-  // Returns whether the operation succeeded. Compilation can only be triggered
-  // if a valid closure is passed as the second argument, otherwise the shared
-  // function needs to be compiled already.
-  bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
-                       Handle<JSFunction> function);
+  void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+                      Handle<Object> parent);
+
+  int NextAsyncTaskId(Handle<JSObject> promise);
+
+  bool IsBlackboxed(Handle<SharedFunctionInfo> shared);
+
+  void SetDebugDelegate(debug::DebugDelegate* delegate, bool pass_ownership);
+
+  // Returns whether the operation succeeded.
+  bool EnsureDebugInfo(Handle<SharedFunctionInfo> shared);
   void CreateDebugInfo(Handle<SharedFunctionInfo> shared);
   static Handle<DebugInfo> GetDebugInfo(Handle<SharedFunctionInfo> shared);
 
@@ -492,8 +348,9 @@
   bool IsBreakAtReturn(JavaScriptFrame* frame);
 
   // Support for LiveEdit
-  void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
-                             LiveEditFrameDropMode mode);
+  void ScheduleFrameRestart(StackFrame* frame);
+
+  bool IsFrameBlackboxed(JavaScriptFrame* frame);
 
   // Threading support.
   char* ArchiveDebug(char* to);
@@ -510,6 +367,9 @@
     return is_active() && !debug_context().is_null() && break_id() != 0;
   }
 
+  bool PerformSideEffectCheck(Handle<JSFunction> function);
+  bool PerformSideEffectCheckForCallback(Address function);
+
   // Flags and states.
   DebugScope* debugger_entry() {
     return reinterpret_cast<DebugScope*>(
@@ -533,18 +393,19 @@
   StackFrame::Id break_frame_id() { return thread_local_.break_frame_id_; }
   int break_id() { return thread_local_.break_id_; }
 
-  Handle<Object> return_value() { return thread_local_.return_value_; }
-  void set_return_value(Handle<Object> value) {
-    thread_local_.return_value_ = value;
+  Handle<Object> return_value_handle() {
+    return handle(thread_local_.return_value_, isolate_);
   }
+  Object* return_value() { return thread_local_.return_value_; }
+  void set_return_value(Object* value) { thread_local_.return_value_ = value; }
 
   // Support for embedding into generated code.
   Address is_active_address() {
     return reinterpret_cast<Address>(&is_active_);
   }
 
-  Address after_break_target_address() {
-    return reinterpret_cast<Address>(&after_break_target_);
+  Address hook_on_function_call_address() {
+    return reinterpret_cast<Address>(&hook_on_function_call_);
   }
 
   Address last_step_action_address() {
@@ -555,25 +416,33 @@
     return reinterpret_cast<Address>(&thread_local_.suspended_generator_);
   }
 
+  Address restart_fp_address() {
+    return reinterpret_cast<Address>(&thread_local_.restart_fp_);
+  }
+
   StepAction last_step_action() { return thread_local_.last_step_action_; }
 
   DebugFeatureTracker* feature_tracker() { return &feature_tracker_; }
 
  private:
   explicit Debug(Isolate* isolate);
+  ~Debug() { DCHECK_NULL(debug_delegate_); }
 
   void UpdateState();
+  void UpdateHookOnFunctionCall();
+  void RemoveDebugDelegate();
   void Unload();
   void SetNextBreakId() {
     thread_local_.break_id_ = ++thread_local_.break_count_;
   }
 
-  // Check whether there are commands in the command queue.
-  inline bool has_commands() const { return !command_queue_.IsEmpty(); }
-  inline bool ignore_events() const { return is_suppressed_ || !is_active_; }
-  inline bool break_disabled() const {
-    return break_disabled_ || in_debug_event_listener_;
+  // Return the number of virtual frames below debugger entry.
+  int CurrentFrameCount();
+
+  inline bool ignore_events() const {
+    return is_suppressed_ || !is_active_ || isolate_->needs_side_effect_check();
   }
+  inline bool break_disabled() const { return break_disabled_; }
 
   void clear_suspended_generator() {
     thread_local_.suspended_generator_ = Smi::kZero;
@@ -583,6 +452,8 @@
     return thread_local_.suspended_generator_ != Smi::kZero;
   }
 
+  bool IsExceptionBlackboxed(bool uncaught);
+
   void OnException(Handle<Object> exception, Handle<Object> promise);
 
   // Constructors for debug event objects.
@@ -595,26 +466,11 @@
       Handle<Object> promise);
   MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
       Handle<Script> script, v8::DebugEvent type);
-  MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(Handle<String> type,
-                                                         Handle<Object> id,
-                                                         Handle<String> name);
+  MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
+      v8::debug::PromiseDebugActionType type, int id);
 
-  // Mirror cache handling.
-  void ClearMirrorCache();
-
-  void CallEventCallback(v8::DebugEvent event,
-                         Handle<Object> exec_state,
-                         Handle<Object> event_data,
-                         v8::Debug::ClientData* client_data);
   void ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script);
-  void ProcessDebugEvent(v8::DebugEvent event,
-                         Handle<JSObject> event_data,
-                         bool auto_continue);
-  void NotifyMessageHandler(v8::DebugEvent event,
-                            Handle<JSObject> exec_state,
-                            Handle<JSObject> event_data,
-                            bool auto_continue);
-  void InvokeMessageHandler(MessageImpl message);
+  void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data);
 
   // Find the closest source position for a break point for a given position.
   int FindBreakablePosition(Handle<DebugInfo> debug_info, int source_position,
@@ -626,16 +482,15 @@
   // Clear all code from instrumentation.
   void ClearAllBreakPoints();
   // Instrument a function with one-shots.
-  void FloodWithOneShot(Handle<JSFunction> function,
-                        BreakLocatorType type = ALL_BREAK_LOCATIONS);
+  void FloodWithOneShot(Handle<SharedFunctionInfo> function);
   // Clear all one-shot instrumentations, but restore break points.
   void ClearOneShot();
 
   void ActivateStepOut(StackFrame* frame);
   void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
-  Handle<Object> CheckBreakPoints(Handle<DebugInfo> debug_info,
-                                  BreakLocation* location,
-                                  bool* has_break_points = nullptr);
+  MaybeHandle<FixedArray> CheckBreakPoints(Handle<DebugInfo> debug_info,
+                                           BreakLocation* location,
+                                           bool* has_break_points = nullptr);
   bool IsMutedAtCurrentLocation(JavaScriptFrame* frame);
   bool CheckBreakPoint(Handle<Object> break_point_object);
   MaybeHandle<Object> CallFunction(const char* name, int argc,
@@ -652,30 +507,32 @@
 
   // Global handles.
   Handle<Context> debug_context_;
-  Handle<Object> event_listener_;
-  Handle<Object> event_listener_data_;
 
-  v8::Debug::MessageHandler message_handler_;
+  debug::DebugDelegate* debug_delegate_ = nullptr;
+  bool owns_debug_delegate_ = false;
 
-  static const int kQueueInitialSize = 4;
-  base::Semaphore command_received_;  // Signaled for each command received.
-  LockingCommandMessageQueue command_queue_;
-
+  // Debugger is active, i.e. there is a debug event listener attached.
   bool is_active_;
+  // Debugger needs to be notified on every new function call.
+  // Used for stepping and read-only checks
+  bool hook_on_function_call_;
+  // Suppress debug events.
   bool is_suppressed_;
+  // LiveEdit is enabled.
   bool live_edit_enabled_;
+  // Do not trigger debug break events.
   bool break_disabled_;
+  // Do not break on break points.
   bool break_points_active_;
-  bool in_debug_event_listener_;
+  // Trigger debug break events for all exceptions.
   bool break_on_exception_;
+  // Trigger debug break events for uncaught exceptions.
   bool break_on_uncaught_exception_;
+  // Termination exception because side effect check has failed.
+  bool side_effect_check_failed_;
 
-  DebugInfoListNode* debug_info_list_;  // List of active debug info objects.
-
-  // Storage location for jump when exiting debug break calls.
-  // Note that this address is not GC safe.  It should be computed immediately
-  // before returning to the DebugBreakCallHelper.
-  Address after_break_target_;
+  // List of active debug info objects.
+  DebugInfoListNode* debug_info_list_;
 
   // Used to collect histogram data on debugger feature usage.
   DebugFeatureTracker feature_tracker_;
@@ -702,20 +559,21 @@
     int last_statement_position_;
 
     // Frame pointer from last step next or step frame action.
-    Address last_fp_;
+    int last_frame_count_;
 
     // Frame pointer of the target frame we want to arrive at.
-    Address target_fp_;
+    int target_frame_count_;
 
-    // Stores the way how LiveEdit has patched the stack. It is used when
-    // debugger returns control back to user script.
-    LiveEditFrameDropMode frame_drop_mode_;
+    // Value of the accumulator at the point of entering the debugger.
+    Object* return_value_;
 
-    // Value of accumulator in interpreter frames. In non-interpreter frames
-    // this value will be the hole.
-    Handle<Object> return_value_;
-
+    // The suspended generator object to track when stepping.
     Object* suspended_generator_;
+
+    // The new frame pointer to drop to when restarting a frame.
+    Address restart_fp_;
+
+    int async_task_count_;
   };
 
   // Storage location for registers when handling debug break calls
@@ -728,6 +586,8 @@
   friend class DisableBreak;
   friend class LiveEdit;
   friend class SuppressDebug;
+  friend class NoSideEffectScope;
+  friend class LegacyDebugDelegate;
 
   friend Handle<FixedArray> GetDebuggedFunctions();  // In test-debug.cc
   friend void CheckDebuggerUnloaded(bool check_functions);  // In test-debug.cc
@@ -735,6 +595,84 @@
   DISALLOW_COPY_AND_ASSIGN(Debug);
 };
 
+class LegacyDebugDelegate : public v8::debug::DebugDelegate {
+ public:
+  explicit LegacyDebugDelegate(Isolate* isolate) : isolate_(isolate) {}
+  void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
+                            int parent_id) override;
+  void ScriptCompiled(v8::Local<v8::debug::Script> script,
+                      bool has_compile_error) override;
+  void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+                             v8::Local<v8::Object> exec_state,
+                             v8::Local<v8::Value> break_points_hit) override;
+  void ExceptionThrown(v8::Local<v8::Context> paused_context,
+                       v8::Local<v8::Object> exec_state,
+                       v8::Local<v8::Value> exception,
+                       v8::Local<v8::Value> promise, bool is_uncaught) override;
+  bool IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
+                            const v8::debug::Location& start,
+                            const v8::debug::Location& end) override {
+    return false;
+  }
+
+ protected:
+  Isolate* isolate_;
+
+ private:
+  void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data);
+  virtual void ProcessDebugEvent(v8::DebugEvent event,
+                                 Handle<JSObject> event_data,
+                                 Handle<JSObject> exec_state) = 0;
+};
+
+class JavaScriptDebugDelegate : public LegacyDebugDelegate {
+ public:
+  JavaScriptDebugDelegate(Isolate* isolate, Handle<JSFunction> listener,
+                          Handle<Object> data);
+  virtual ~JavaScriptDebugDelegate();
+
+ private:
+  void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
+                         Handle<JSObject> exec_state) override;
+
+  Handle<JSFunction> listener_;
+  Handle<Object> data_;
+};
+
+class NativeDebugDelegate : public LegacyDebugDelegate {
+ public:
+  NativeDebugDelegate(Isolate* isolate, v8::Debug::EventCallback callback,
+                      Handle<Object> data);
+  virtual ~NativeDebugDelegate();
+
+ private:
+  // Details of the debug event delivered to the debug event listener.
+  class EventDetails : public v8::Debug::EventDetails {
+   public:
+    EventDetails(DebugEvent event, Handle<JSObject> exec_state,
+                 Handle<JSObject> event_data, Handle<Object> callback_data);
+    virtual DebugEvent GetEvent() const;
+    virtual v8::Local<v8::Object> GetExecutionState() const;
+    virtual v8::Local<v8::Object> GetEventData() const;
+    virtual v8::Local<v8::Context> GetEventContext() const;
+    virtual v8::Local<v8::Value> GetCallbackData() const;
+    virtual v8::Debug::ClientData* GetClientData() const { return nullptr; }
+    virtual v8::Isolate* GetIsolate() const;
+
+   private:
+    DebugEvent event_;              // Debug event causing the break.
+    Handle<JSObject> exec_state_;   // Current execution state.
+    Handle<JSObject> event_data_;   // Data associated with the event.
+    Handle<Object> callback_data_;  // User data passed with the callback
+                                    // when it was registered.
+  };
+
+  void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
+                         Handle<JSObject> exec_state) override;
+
+  v8::Debug::EventCallback callback_;
+  Handle<Object> data_;
+};
 
 // This scope is used to load and enter the debug context and create a new
 // break state.  Leaving the scope will restore the previous state.
@@ -757,32 +695,39 @@
   DebugScope* prev_;               // Previous scope if entered recursively.
   StackFrame::Id break_frame_id_;  // Previous break frame id.
   int break_id_;                   // Previous break id.
-  Handle<Object> return_value_;    // Previous result.
   bool failed_;                    // Did the debug context fail to load?
   SaveContext save_;               // Saves previous context.
   PostponeInterruptsScope no_termination_exceptons_;
 };
 
+// This scope is used to handle return values in nested debug break points.
+// When there are nested debug breaks, we use this to restore the return
+// value to the previous state. This is not merged with DebugScope because
+// return_value_ will not be cleared when we use DebugScope.
+class ReturnValueScope {
+ public:
+  explicit ReturnValueScope(Debug* debug);
+  ~ReturnValueScope();
+
+ private:
+  Debug* debug_;
+  Handle<Object> return_value_;  // Previous result.
+};
 
 // Stack allocated class for disabling break.
 class DisableBreak BASE_EMBEDDED {
  public:
-  explicit DisableBreak(Debug* debug, bool disable_break)
-      : debug_(debug),
-        previous_break_disabled_(debug->break_disabled_),
-        previous_in_debug_event_listener_(debug->in_debug_event_listener_) {
-    debug_->break_disabled_ = disable_break;
-    debug_->in_debug_event_listener_ = disable_break;
+  explicit DisableBreak(Debug* debug)
+      : debug_(debug), previous_break_disabled_(debug->break_disabled_) {
+    debug_->break_disabled_ = true;
   }
   ~DisableBreak() {
     debug_->break_disabled_ = previous_break_disabled_;
-    debug_->in_debug_event_listener_ = previous_in_debug_event_listener_;
   }
 
  private:
   Debug* debug_;
   bool previous_break_disabled_;
-  bool previous_in_debug_event_listener_;
   DISALLOW_COPY_AND_ASSIGN(DisableBreak);
 };
 
@@ -801,6 +746,23 @@
   DISALLOW_COPY_AND_ASSIGN(SuppressDebug);
 };
 
+class NoSideEffectScope {
+ public:
+  NoSideEffectScope(Isolate* isolate, bool disallow_side_effects)
+      : isolate_(isolate),
+        old_needs_side_effect_check_(isolate->needs_side_effect_check()) {
+    isolate->set_needs_side_effect_check(old_needs_side_effect_check_ ||
+                                         disallow_side_effects);
+    isolate->debug()->UpdateHookOnFunctionCall();
+    isolate->debug()->side_effect_check_failed_ = false;
+  }
+  ~NoSideEffectScope();
+
+ private:
+  Isolate* isolate_;
+  bool old_needs_side_effect_check_;
+  DISALLOW_COPY_AND_ASSIGN(NoSideEffectScope);
+};
 
 // Code generator routines.
 class DebugCodegen : public AllStatic {
@@ -813,15 +775,15 @@
   static void GenerateDebugBreakStub(MacroAssembler* masm,
                                      DebugBreakCallHelperMode mode);
 
-  // FrameDropper is a code replacement for a JavaScript frame with possibly
-  // several frames above.
-  // There is no calling conventions here, because it never actually gets
-  // called, it only gets returned to.
-  static void GenerateFrameDropperLiveEdit(MacroAssembler* masm);
-
-
   static void GenerateSlot(MacroAssembler* masm, RelocInfo::Mode mode);
 
+  // Builtin to drop frames to restart function.
+  static void GenerateFrameDropperTrampoline(MacroAssembler* masm);
+
+  // Builtin to atomically (wrt deopts) handle debugger statement and
+  // drop frames to restart function if necessary.
+  static void GenerateHandleDebuggerStatement(MacroAssembler* masm);
+
   static void PatchDebugBreakSlot(Isolate* isolate, Address pc,
                                   Handle<Code> code);
   static bool DebugBreakSlotIsPatched(Address pc);
diff --git a/src/debug/debug.js b/src/debug/debug.js
index 8031763..6993274 100644
--- a/src/debug/debug.js
+++ b/src/debug/debug.js
@@ -12,21 +12,11 @@
 var GlobalArray = global.Array;
 var GlobalRegExp = global.RegExp;
 var IsNaN = global.isNaN;
-var JSONParse = global.JSON.parse;
-var JSONStringify = global.JSON.stringify;
-var LookupMirror = global.LookupMirror;
 var MakeMirror = global.MakeMirror;
-var MakeMirrorSerializer = global.MakeMirrorSerializer;
 var MathMin = global.Math.min;
 var Mirror = global.Mirror;
-var MirrorType;
-var ParseInt = global.parseInt;
 var ValueMirror = global.ValueMirror;
 
-utils.Import(function(from) {
-  MirrorType = from.MirrorType;
-});
-
 //----------------------------------------------------------------------------
 
 // Default number of frames to include in the response to backtrace request.
@@ -43,11 +33,9 @@
 // from the API include file debug.h.
 Debug.DebugEvent = { Break: 1,
                      Exception: 2,
-                     NewFunction: 3,
-                     BeforeCompile: 4,
-                     AfterCompile: 5,
-                     CompileError: 6,
-                     AsyncTaskEvent: 7 };
+                     AfterCompile: 3,
+                     CompileError: 4,
+                     AsyncTaskEvent: 5 };
 
 // Types of exceptions that can be broken upon.
 Debug.ExceptionBreak = { Caught : 0,
@@ -56,8 +44,7 @@
 // The different types of steps.
 Debug.StepAction = { StepOut: 0,
                      StepNext: 1,
-                     StepIn: 2,
-                     StepFrame: 3 };
+                     StepIn: 2 };
 
 // The different types of scripts matching enum ScriptType in objects.h.
 Debug.ScriptType = { Native: 0,
@@ -256,20 +243,6 @@
 }
 
 
-// Creates a clone of script breakpoint that is linked to another script.
-ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
-  var copy = new ScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
-      other_script.id, this.line_, this.column_, this.groupId_,
-      this.position_alignment_);
-  copy.number_ = next_break_point_number++;
-  script_break_points.push(copy);
-
-  copy.active_ = this.active_;
-  copy.condition_ = this.condition_;
-  return copy;
-};
-
-
 ScriptBreakPoint.prototype.number = function() {
   return this.number_;
 };
@@ -435,31 +408,6 @@
 };
 
 
-// Function called from runtime when a new script is compiled to set any script
-// break points set in this script.
-function UpdateScriptBreakPoints(script) {
-  for (var i = 0; i < script_break_points.length; i++) {
-    var break_point = script_break_points[i];
-    if ((break_point.type() == Debug.ScriptBreakPointType.ScriptName ||
-         break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) &&
-        break_point.matchesScript(script)) {
-      break_point.set(script);
-    }
-  }
-}
-
-
-function GetScriptBreakPoints(script) {
-  var result = [];
-  for (var i = 0; i < script_break_points.length; i++) {
-    if (script_break_points[i].matchesScript(script)) {
-      result.push(script_break_points[i]);
-    }
-  }
-  return result;
-}
-
-
 Debug.setListener = function(listener, opt_data) {
   if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
     throw %make_type_error(kDebuggerType);
@@ -476,7 +424,7 @@
 Debug.findScript = function(func_or_script_name) {
   if (IS_FUNCTION(func_or_script_name)) {
     return %FunctionGetScript(func_or_script_name);
-  } else if (IS_REGEXP(func_or_script_name)) {
+  } else if (%IsRegExp(func_or_script_name)) {
     var scripts = this.scripts();
     var last_result = null;
     var result_count = 0;
@@ -872,18 +820,14 @@
 ExecutionState.prototype.prepareStep = function(action) {
   if (action === Debug.StepAction.StepIn ||
       action === Debug.StepAction.StepOut ||
-      action === Debug.StepAction.StepNext ||
-      action === Debug.StepAction.StepFrame) {
+      action === Debug.StepAction.StepNext) {
     return %PrepareStep(this.break_id, action);
   }
   throw %make_type_error(kDebuggerType);
 };
 
-ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
-    opt_additional_context) {
-  return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
-                                         TO_BOOLEAN(disable_break),
-                                         opt_additional_context));
+ExecutionState.prototype.evaluateGlobal = function(source) {
+  return MakeMirror(%DebugEvaluateGlobal(this.break_id, source));
 };
 
 ExecutionState.prototype.frameCount = function() {
@@ -911,11 +855,6 @@
   return this.selected_frame;
 };
 
-ExecutionState.prototype.debugCommandProcessor = function(opt_is_running) {
-  return new DebugCommandProcessor(this, opt_is_running);
-};
-
-
 function MakeBreakEvent(break_id, break_points_hit) {
   return new BreakEvent(break_id, break_points_hit);
 }
@@ -957,43 +896,6 @@
 };
 
 
-BreakEvent.prototype.toJSONProtocol = function() {
-  var o = { seq: next_response_seq++,
-            type: "event",
-            event: "break",
-            body: { invocationText: this.frame_.invocationText() }
-          };
-
-  // Add script related information to the event if available.
-  var script = this.func().script();
-  if (script) {
-    o.body.sourceLine = this.sourceLine(),
-    o.body.sourceColumn = this.sourceColumn(),
-    o.body.sourceLineText = this.sourceLineText(),
-    o.body.script = MakeScriptObject_(script, false);
-  }
-
-  // Add an Array of break points hit if any.
-  if (this.breakPointsHit()) {
-    o.body.breakpoints = [];
-    for (var i = 0; i < this.breakPointsHit().length; i++) {
-      // Find the break point number. For break points originating from a
-      // script break point supply the script break point number.
-      var breakpoint = this.breakPointsHit()[i];
-      var script_break_point = breakpoint.script_break_point();
-      var number;
-      if (script_break_point) {
-        number = script_break_point.number();
-      } else {
-        number = breakpoint.number();
-      }
-      o.body.breakpoints.push(number);
-    }
-  }
-  return JSONStringify(ObjectToProtocolObject_(o));
-};
-
-
 function MakeExceptionEvent(break_id, exception, uncaught, promise) {
   return new ExceptionEvent(break_id, exception, uncaught, promise);
 }
@@ -1047,32 +949,6 @@
 };
 
 
-ExceptionEvent.prototype.toJSONProtocol = function() {
-  var o = new ProtocolMessage();
-  o.event = "exception";
-  o.body = { uncaught: this.uncaught_,
-             exception: MakeMirror(this.exception_)
-           };
-
-  // Exceptions might happen whithout any JavaScript frames.
-  if (this.exec_state_.frameCount() > 0) {
-    o.body.sourceLine = this.sourceLine();
-    o.body.sourceColumn = this.sourceColumn();
-    o.body.sourceLineText = this.sourceLineText();
-
-    // Add script information to the event if available.
-    var script = this.func().script();
-    if (script) {
-      o.body.script = MakeScriptObject_(script, false);
-    }
-  } else {
-    o.body.sourceLine = -1;
-  }
-
-  return o.toJSONProtocol();
-};
-
-
 function MakeCompileEvent(script, type) {
   return new CompileEvent(script, type);
 }
@@ -1094,27 +970,6 @@
 };
 
 
-CompileEvent.prototype.toJSONProtocol = function() {
-  var o = new ProtocolMessage();
-  o.running = true;
-  switch (this.type_) {
-    case Debug.DebugEvent.BeforeCompile:
-      o.event = "beforeCompile";
-      break;
-    case Debug.DebugEvent.AfterCompile:
-      o.event = "afterCompile";
-      break;
-    case Debug.DebugEvent.CompileError:
-      o.event = "compileError";
-      break;
-  }
-  o.body = {};
-  o.body.script = this.script_;
-
-  return o.toJSONProtocol();
-};
-
-
 function MakeScriptObject_(script, include_source) {
   var o = { id: script.id(),
             name: script.name(),
@@ -1132,15 +987,14 @@
 }
 
 
-function MakeAsyncTaskEvent(type, id, name) {
-  return new AsyncTaskEvent(type, id, name);
+function MakeAsyncTaskEvent(type, id) {
+  return new AsyncTaskEvent(type, id);
 }
 
 
-function AsyncTaskEvent(type, id, name) {
+function AsyncTaskEvent(type, id) {
   this.type_ = type;
   this.id_ = id;
-  this.name_ = name;
 }
 
 
@@ -1149,1308 +1003,15 @@
 }
 
 
-AsyncTaskEvent.prototype.name = function() {
-  return this.name_;
-}
-
-
 AsyncTaskEvent.prototype.id = function() {
   return this.id_;
 }
 
-
-function DebugCommandProcessor(exec_state, opt_is_running) {
-  this.exec_state_ = exec_state;
-  this.running_ = opt_is_running || false;
-}
-
-
-DebugCommandProcessor.prototype.processDebugRequest = function (request) {
-  return this.processDebugJSONRequest(request);
-};
-
-
-function ProtocolMessage(request) {
-  // Update sequence number.
-  this.seq = next_response_seq++;
-
-  if (request) {
-    // If message is based on a request this is a response. Fill the initial
-    // response from the request.
-    this.type = 'response';
-    this.request_seq = request.seq;
-    this.command = request.command;
-  } else {
-    // If message is not based on a request it is a dabugger generated event.
-    this.type = 'event';
-  }
-  this.success = true;
-  // Handler may set this field to control debugger state.
-  this.running = UNDEFINED;
-}
-
-
-ProtocolMessage.prototype.setOption = function(name, value) {
-  if (!this.options_) {
-    this.options_ = {};
-  }
-  this.options_[name] = value;
-};
-
-
-ProtocolMessage.prototype.failed = function(message, opt_details) {
-  this.success = false;
-  this.message = message;
-  if (IS_OBJECT(opt_details)) {
-    this.error_details = opt_details;
-  }
-};
-
-
-ProtocolMessage.prototype.toJSONProtocol = function() {
-  // Encode the protocol header.
-  var json = {};
-  json.seq= this.seq;
-  if (this.request_seq) {
-    json.request_seq = this.request_seq;
-  }
-  json.type = this.type;
-  if (this.event) {
-    json.event = this.event;
-  }
-  if (this.command) {
-    json.command = this.command;
-  }
-  if (this.success) {
-    json.success = this.success;
-  } else {
-    json.success = false;
-  }
-  if (this.body) {
-    // Encode the body part.
-    var bodyJson;
-    var serializer = MakeMirrorSerializer(true, this.options_);
-    if (this.body instanceof Mirror) {
-      bodyJson = serializer.serializeValue(this.body);
-    } else if (this.body instanceof GlobalArray) {
-      bodyJson = [];
-      for (var i = 0; i < this.body.length; i++) {
-        if (this.body[i] instanceof Mirror) {
-          bodyJson.push(serializer.serializeValue(this.body[i]));
-        } else {
-          bodyJson.push(ObjectToProtocolObject_(this.body[i], serializer));
-        }
-      }
-    } else {
-      bodyJson = ObjectToProtocolObject_(this.body, serializer);
-    }
-    json.body = bodyJson;
-    json.refs = serializer.serializeReferencedObjects();
-  }
-  if (this.message) {
-    json.message = this.message;
-  }
-  if (this.error_details) {
-    json.error_details = this.error_details;
-  }
-  json.running = this.running;
-  return JSONStringify(json);
-};
-
-
-DebugCommandProcessor.prototype.createResponse = function(request) {
-  return new ProtocolMessage(request);
-};
-
-
-DebugCommandProcessor.prototype.processDebugJSONRequest = function(
-    json_request) {
-  var request;  // Current request.
-  var response;  // Generated response.
-  try {
-    try {
-      // Convert the JSON string to an object.
-      request = JSONParse(json_request);
-
-      // Create an initial response.
-      response = this.createResponse(request);
-
-      if (!request.type) {
-        throw %make_error(kDebugger, 'Type not specified');
-      }
-
-      if (request.type != 'request') {
-        throw %make_error(kDebugger,
-                        "Illegal type '" + request.type + "' in request");
-      }
-
-      if (!request.command) {
-        throw %make_error(kDebugger, 'Command not specified');
-      }
-
-      if (request.arguments) {
-        var args = request.arguments;
-        // TODO(yurys): remove request.arguments.compactFormat check once
-        // ChromeDevTools are switched to 'inlineRefs'
-        if (args.inlineRefs || args.compactFormat) {
-          response.setOption('inlineRefs', true);
-        }
-        if (!IS_UNDEFINED(args.maxStringLength)) {
-          response.setOption('maxStringLength', args.maxStringLength);
-        }
-      }
-
-      var key = request.command.toLowerCase();
-      var handler = DebugCommandProcessor.prototype.dispatch_[key];
-      if (IS_FUNCTION(handler)) {
-        %_Call(handler, this, request, response);
-      } else {
-        throw %make_error(kDebugger,
-                        'Unknown command "' + request.command + '" in request');
-      }
-    } catch (e) {
-      // If there is no response object created one (without command).
-      if (!response) {
-        response = this.createResponse();
-      }
-      response.success = false;
-      response.message = TO_STRING(e);
-    }
-
-    // Return the response as a JSON encoded string.
-    try {
-      if (!IS_UNDEFINED(response.running)) {
-        // Response controls running state.
-        this.running_ = response.running;
-      }
-      response.running = this.running_;
-      return response.toJSONProtocol();
-    } catch (e) {
-      // Failed to generate response - return generic error.
-      return '{"seq":' + response.seq + ',' +
-              '"request_seq":' + request.seq + ',' +
-              '"type":"response",' +
-              '"success":false,' +
-              '"message":"Internal error: ' + TO_STRING(e) + '"}';
-    }
-  } catch (e) {
-    // Failed in one of the catch blocks above - most generic error.
-    return '{"seq":0,"type":"response","success":false,"message":"Internal error"}';
-  }
-};
-
-
-DebugCommandProcessor.prototype.continueRequest_ = function(request, response) {
-  // Check for arguments for continue.
-  if (request.arguments) {
-    var action = Debug.StepAction.StepIn;
-
-    // Pull out arguments.
-    var stepaction = request.arguments.stepaction;
-
-    // Get the stepaction argument.
-    if (stepaction) {
-      if (stepaction == 'in') {
-        action = Debug.StepAction.StepIn;
-      } else if (stepaction == 'next') {
-        action = Debug.StepAction.StepNext;
-      } else if (stepaction == 'out') {
-        action = Debug.StepAction.StepOut;
-      } else {
-        throw %make_error(kDebugger,
-                        'Invalid stepaction argument "' + stepaction + '".');
-      }
-    }
-
-    // Set up the VM for stepping.
-    this.exec_state_.prepareStep(action);
-  }
-
-  // VM should be running after executing this request.
-  response.running = true;
-};
-
-
-DebugCommandProcessor.prototype.breakRequest_ = function(request, response) {
-  // Ignore as break command does not do anything when broken.
-};
-
-
-DebugCommandProcessor.prototype.setBreakPointRequest_ =
-    function(request, response) {
-  // Check for legal request.
-  if (!request.arguments) {
-    response.failed('Missing arguments');
-    return;
-  }
-
-  // Pull out arguments.
-  var type = request.arguments.type;
-  var target = request.arguments.target;
-  var line = request.arguments.line;
-  var column = request.arguments.column;
-  var enabled = IS_UNDEFINED(request.arguments.enabled) ?
-      true : request.arguments.enabled;
-  var condition = request.arguments.condition;
-  var groupId = request.arguments.groupId;
-
-  // Check for legal arguments.
-  if (!type || IS_UNDEFINED(target)) {
-    response.failed('Missing argument "type" or "target"');
-    return;
-  }
-
-  // Either function or script break point.
-  var break_point_number;
-  if (type == 'function') {
-    // Handle function break point.
-    if (!IS_STRING(target)) {
-      response.failed('Argument "target" is not a string value');
-      return;
-    }
-    var f;
-    try {
-      // Find the function through a global evaluate.
-      f = this.exec_state_.evaluateGlobal(target).value();
-    } catch (e) {
-      response.failed('Error: "' + TO_STRING(e) +
-                      '" evaluating "' + target + '"');
-      return;
-    }
-    if (!IS_FUNCTION(f)) {
-      response.failed('"' + target + '" does not evaluate to a function');
-      return;
-    }
-
-    // Set function break point.
-    break_point_number = Debug.setBreakPoint(f, line, column, condition);
-  } else if (type == 'handle') {
-    // Find the object pointed by the specified handle.
-    var handle = ParseInt(target, 10);
-    var mirror = LookupMirror(handle);
-    if (!mirror) {
-      return response.failed('Object #' + handle + '# not found');
-    }
-    if (!mirror.isFunction()) {
-      return response.failed('Object #' + handle + '# is not a function');
-    }
-
-    // Set function break point.
-    break_point_number = Debug.setBreakPoint(mirror.value(),
-                                             line, column, condition);
-  } else if (type == 'script') {
-    // set script break point.
-    break_point_number =
-        Debug.setScriptBreakPointByName(target, line, column, condition,
-                                        groupId);
-  } else if (type == 'scriptId') {
-    break_point_number =
-        Debug.setScriptBreakPointById(target, line, column, condition, groupId);
-  } else if (type == 'scriptRegExp') {
-    break_point_number =
-        Debug.setScriptBreakPointByRegExp(target, line, column, condition,
-                                          groupId);
-  } else {
-    response.failed('Illegal type "' + type + '"');
-    return;
-  }
-
-  // Set additional break point properties.
-  var break_point = Debug.findBreakPoint(break_point_number);
-  if (!enabled) {
-    Debug.disableBreakPoint(break_point_number);
-  }
-
-  // Add the break point number to the response.
-  response.body = { type: type,
-                    breakpoint: break_point_number };
-
-  // Add break point information to the response.
-  if (break_point instanceof ScriptBreakPoint) {
-    if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
-      response.body.type = 'scriptId';
-      response.body.script_id = break_point.script_id();
-    } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
-      response.body.type = 'scriptName';
-      response.body.script_name = break_point.script_name();
-    } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
-      response.body.type = 'scriptRegExp';
-      response.body.script_regexp = break_point.script_regexp_object().source;
-    } else {
-      throw %make_error(kDebugger,
-                      "Unexpected breakpoint type: " + break_point.type());
-    }
-    response.body.line = break_point.line();
-    response.body.column = break_point.column();
-    response.body.actual_locations = break_point.actual_locations();
-  } else {
-    response.body.type = 'function';
-    response.body.actual_locations = [break_point.actual_location];
-  }
-};
-
-
-DebugCommandProcessor.prototype.changeBreakPointRequest_ = function(
-    request, response) {
-  // Check for legal request.
-  if (!request.arguments) {
-    response.failed('Missing arguments');
-    return;
-  }
-
-  // Pull out arguments.
-  var break_point = TO_NUMBER(request.arguments.breakpoint);
-  var enabled = request.arguments.enabled;
-  var condition = request.arguments.condition;
-
-  // Check for legal arguments.
-  if (!break_point) {
-    response.failed('Missing argument "breakpoint"');
-    return;
-  }
-
-  // Change enabled state if supplied.
-  if (!IS_UNDEFINED(enabled)) {
-    if (enabled) {
-      Debug.enableBreakPoint(break_point);
-    } else {
-      Debug.disableBreakPoint(break_point);
-    }
-  }
-
-  // Change condition if supplied
-  if (!IS_UNDEFINED(condition)) {
-    Debug.changeBreakPointCondition(break_point, condition);
-  }
-};
-
-
-DebugCommandProcessor.prototype.clearBreakPointGroupRequest_ = function(
-    request, response) {
-  // Check for legal request.
-  if (!request.arguments) {
-    response.failed('Missing arguments');
-    return;
-  }
-
-  // Pull out arguments.
-  var group_id = request.arguments.groupId;
-
-  // Check for legal arguments.
-  if (!group_id) {
-    response.failed('Missing argument "groupId"');
-    return;
-  }
-
-  var cleared_break_points = [];
-  var new_script_break_points = [];
-  for (var i = 0; i < script_break_points.length; i++) {
-    var next_break_point = script_break_points[i];
-    if (next_break_point.groupId() == group_id) {
-      cleared_break_points.push(next_break_point.number());
-      next_break_point.clear();
-    } else {
-      new_script_break_points.push(next_break_point);
-    }
-  }
-  script_break_points = new_script_break_points;
-
-  // Add the cleared break point numbers to the response.
-  response.body = { breakpoints: cleared_break_points };
-};
-
-
-DebugCommandProcessor.prototype.clearBreakPointRequest_ = function(
-    request, response) {
-  // Check for legal request.
-  if (!request.arguments) {
-    response.failed('Missing arguments');
-    return;
-  }
-
-  // Pull out arguments.
-  var break_point = TO_NUMBER(request.arguments.breakpoint);
-
-  // Check for legal arguments.
-  if (!break_point) {
-    response.failed('Missing argument "breakpoint"');
-    return;
-  }
-
-  // Clear break point.
-  Debug.clearBreakPoint(break_point);
-
-  // Add the cleared break point number to the response.
-  response.body = { breakpoint: break_point };
-};
-
-
-DebugCommandProcessor.prototype.listBreakpointsRequest_ = function(
-    request, response) {
-  var array = [];
-  for (var i = 0; i < script_break_points.length; i++) {
-    var break_point = script_break_points[i];
-
-    var description = {
-      number: break_point.number(),
-      line: break_point.line(),
-      column: break_point.column(),
-      groupId: break_point.groupId(),
-      active: break_point.active(),
-      condition: break_point.condition(),
-      actual_locations: break_point.actual_locations()
-    };
-
-    if (break_point.type() == Debug.ScriptBreakPointType.ScriptId) {
-      description.type = 'scriptId';
-      description.script_id = break_point.script_id();
-    } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptName) {
-      description.type = 'scriptName';
-      description.script_name = break_point.script_name();
-    } else if (break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) {
-      description.type = 'scriptRegExp';
-      description.script_regexp = break_point.script_regexp_object().source;
-    } else {
-      throw %make_error(kDebugger,
-                      "Unexpected breakpoint type: " + break_point.type());
-    }
-    array.push(description);
-  }
-
-  response.body = {
-    breakpoints: array,
-    breakOnExceptions: Debug.isBreakOnException(),
-    breakOnUncaughtExceptions: Debug.isBreakOnUncaughtException()
-  };
-};
-
-
-DebugCommandProcessor.prototype.disconnectRequest_ =
-    function(request, response) {
-  Debug.disableAllBreakPoints();
-  this.continueRequest_(request, response);
-};
-
-
-DebugCommandProcessor.prototype.setExceptionBreakRequest_ =
-    function(request, response) {
-  // Check for legal request.
-  if (!request.arguments) {
-    response.failed('Missing arguments');
-    return;
-  }
-
-  // Pull out and check the 'type' argument:
-  var type = request.arguments.type;
-  if (!type) {
-    response.failed('Missing argument "type"');
-    return;
-  }
-
-  // Initialize the default value of enable:
-  var enabled;
-  if (type == 'all') {
-    enabled = !Debug.isBreakOnException();
-  } else if (type == 'uncaught') {
-    enabled = !Debug.isBreakOnUncaughtException();
-  }
-
-  // Pull out and check the 'enabled' argument if present:
-  if (!IS_UNDEFINED(request.arguments.enabled)) {
-    enabled = request.arguments.enabled;
-    if ((enabled != true) && (enabled != false)) {
-      response.failed('Illegal value for "enabled":"' + enabled + '"');
-    }
-  }
-
-  // Now set the exception break state:
-  if (type == 'all') {
-    %ChangeBreakOnException(Debug.ExceptionBreak.Caught, enabled);
-  } else if (type == 'uncaught') {
-    %ChangeBreakOnException(Debug.ExceptionBreak.Uncaught, enabled);
-  } else {
-    response.failed('Unknown "type":"' + type + '"');
-  }
-
-  // Add the cleared break point number to the response.
-  response.body = { 'type': type, 'enabled': enabled };
-};
-
-
-DebugCommandProcessor.prototype.backtraceRequest_ = function(
-    request, response) {
-  // Get the number of frames.
-  var total_frames = this.exec_state_.frameCount();
-
-  // Create simple response if there are no frames.
-  if (total_frames == 0) {
-    response.body = {
-      totalFrames: total_frames
-    };
-    return;
-  }
-
-  // Default frame range to include in backtrace.
-  var from_index = 0;
-  var to_index = kDefaultBacktraceLength;
-
-  // Get the range from the arguments.
-  if (request.arguments) {
-    if (request.arguments.fromFrame) {
-      from_index = request.arguments.fromFrame;
-    }
-    if (request.arguments.toFrame) {
-      to_index = request.arguments.toFrame;
-    }
-    if (request.arguments.bottom) {
-      var tmp_index = total_frames - from_index;
-      from_index = total_frames - to_index;
-      to_index = tmp_index;
-    }
-    if (from_index < 0 || to_index < 0) {
-      return response.failed('Invalid frame number');
-    }
-  }
-
-  // Adjust the index.
-  to_index = MathMin(total_frames, to_index);
-
-  if (to_index <= from_index) {
-    var error = 'Invalid frame range';
-    return response.failed(error);
-  }
-
-  // Create the response body.
-  var frames = [];
-  for (var i = from_index; i < to_index; i++) {
-    frames.push(this.exec_state_.frame(i));
-  }
-  response.body = {
-    fromFrame: from_index,
-    toFrame: to_index,
-    totalFrames: total_frames,
-    frames: frames
-  };
-};
-
-
-DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
-  // No frames no source.
-  if (this.exec_state_.frameCount() == 0) {
-    return response.failed('No frames');
-  }
-
-  // With no arguments just keep the selected frame.
-  if (request.arguments) {
-    var index = request.arguments.number;
-    if (index < 0 || this.exec_state_.frameCount() <= index) {
-      return response.failed('Invalid frame number');
-    }
-
-    this.exec_state_.setSelectedFrame(request.arguments.number);
-  }
-  response.body = this.exec_state_.frame();
-};
-
-
-DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
-    function(scope_description) {
-  // Get the frame for which the scope or scopes are requested.
-  // With no frameNumber argument use the currently selected frame.
-  if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
-    var frame_index = scope_description.frameNumber;
-    if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
-      throw %make_type_error(kDebuggerFrame);
-    }
-    return this.exec_state_.frame(frame_index);
-  } else {
-    return this.exec_state_.frame();
-  }
-};
-
-
-// Gets scope host object from request. It is either a function
-// ('functionHandle' argument must be specified) or a stack frame
-// ('frameNumber' may be specified and the current frame is taken by default).
-DebugCommandProcessor.prototype.resolveScopeHolder_ =
-    function(scope_description) {
-  if (scope_description && "functionHandle" in scope_description) {
-    if (!IS_NUMBER(scope_description.functionHandle)) {
-      throw %make_error(kDebugger, 'Function handle must be a number');
-    }
-    var function_mirror = LookupMirror(scope_description.functionHandle);
-    if (!function_mirror) {
-      throw %make_error(kDebugger, 'Failed to find function object by handle');
-    }
-    if (!function_mirror.isFunction()) {
-      throw %make_error(kDebugger,
-                      'Value of non-function type is found by handle');
-    }
-    return function_mirror;
-  } else {
-    // No frames no scopes.
-    if (this.exec_state_.frameCount() == 0) {
-      throw %make_error(kDebugger, 'No scopes');
-    }
-
-    // Get the frame for which the scopes are requested.
-    var frame = this.resolveFrameFromScopeDescription_(scope_description);
-    return frame;
-  }
-}
-
-
-DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
-  var scope_holder = this.resolveScopeHolder_(request.arguments);
-
-  // Fill all scopes for this frame or function.
-  var total_scopes = scope_holder.scopeCount();
-  var scopes = [];
-  for (var i = 0; i < total_scopes; i++) {
-    scopes.push(scope_holder.scope(i));
-  }
-  response.body = {
-    fromScope: 0,
-    toScope: total_scopes,
-    totalScopes: total_scopes,
-    scopes: scopes
-  };
-};
-
-
-DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
-  // Get the frame or function for which the scope is requested.
-  var scope_holder = this.resolveScopeHolder_(request.arguments);
-
-  // With no scope argument just return top scope.
-  var scope_index = 0;
-  if (request.arguments && !IS_UNDEFINED(request.arguments.number)) {
-    scope_index = TO_NUMBER(request.arguments.number);
-    if (scope_index < 0 || scope_holder.scopeCount() <= scope_index) {
-      return response.failed('Invalid scope number');
-    }
-  }
-
-  response.body = scope_holder.scope(scope_index);
-};
-
-
-// Reads value from protocol description. Description may be in form of type
-// (for singletons), raw value (primitive types supported in JSON),
-// string value description plus type (for primitive values) or handle id.
-// Returns raw value or throws exception.
-DebugCommandProcessor.resolveValue_ = function(value_description) {
-  if ("handle" in value_description) {
-    var value_mirror = LookupMirror(value_description.handle);
-    if (!value_mirror) {
-      throw %make_error(kDebugger, "Failed to resolve value by handle, ' #" +
-                                 value_description.handle + "# not found");
-    }
-    return value_mirror.value();
-  } else if ("stringDescription" in value_description) {
-    if (value_description.type == MirrorType.BOOLEAN_TYPE) {
-      return TO_BOOLEAN(value_description.stringDescription);
-    } else if (value_description.type == MirrorType.NUMBER_TYPE) {
-      return TO_NUMBER(value_description.stringDescription);
-    } if (value_description.type == MirrorType.STRING_TYPE) {
-      return TO_STRING(value_description.stringDescription);
-    } else {
-      throw %make_error(kDebugger, "Unknown type");
-    }
-  } else if ("value" in value_description) {
-    return value_description.value;
-  } else if (value_description.type == MirrorType.UNDEFINED_TYPE) {
-    return UNDEFINED;
-  } else if (value_description.type == MirrorType.NULL_TYPE) {
-    return null;
-  } else {
-    throw %make_error(kDebugger, "Failed to parse value description");
-  }
-};
-
-
-DebugCommandProcessor.prototype.setVariableValueRequest_ =
-    function(request, response) {
-  if (!request.arguments) {
-    response.failed('Missing arguments');
-    return;
-  }
-
-  if (IS_UNDEFINED(request.arguments.name)) {
-    response.failed('Missing variable name');
-  }
-  var variable_name = request.arguments.name;
-
-  var scope_description = request.arguments.scope;
-
-  // Get the frame or function for which the scope is requested.
-  var scope_holder = this.resolveScopeHolder_(scope_description);
-
-  if (IS_UNDEFINED(scope_description.number)) {
-    response.failed('Missing scope number');
-  }
-  var scope_index = TO_NUMBER(scope_description.number);
-
-  var scope = scope_holder.scope(scope_index);
-
-  var new_value =
-      DebugCommandProcessor.resolveValue_(request.arguments.newValue);
-
-  scope.setVariableValue(variable_name, new_value);
-
-  var new_value_mirror = MakeMirror(new_value);
-
-  response.body = {
-    newValue: new_value_mirror
-  };
-};
-
-
-DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
-  if (!request.arguments) {
-    return response.failed('Missing arguments');
-  }
-
-  // Pull out arguments.
-  var expression = request.arguments.expression;
-  var frame = request.arguments.frame;
-  var global = request.arguments.global;
-  var disable_break = request.arguments.disable_break;
-  var additional_context = request.arguments.additional_context;
-
-  // The expression argument could be an integer so we convert it to a
-  // string.
-  try {
-    expression = TO_STRING(expression);
-  } catch(e) {
-    return response.failed('Failed to convert expression argument to string');
-  }
-
-  // Check for legal arguments.
-  if (!IS_UNDEFINED(frame) && global) {
-    return response.failed('Arguments "frame" and "global" are exclusive');
-  }
-
-  var additional_context_object;
-  if (additional_context) {
-    additional_context_object = {};
-    for (var i = 0; i < additional_context.length; i++) {
-      var mapping = additional_context[i];
-
-      if (!IS_STRING(mapping.name)) {
-        return response.failed("Context element #" + i +
-            " doesn't contain name:string property");
-      }
-
-      var raw_value = DebugCommandProcessor.resolveValue_(mapping);
-      additional_context_object[mapping.name] = raw_value;
-    }
-  }
-
-  // Global evaluate.
-  if (global) {
-    // Evaluate in the native context.
-    response.body = this.exec_state_.evaluateGlobal(
-        expression, TO_BOOLEAN(disable_break), additional_context_object);
-    return;
-  }
-
-  // Default value for disable_break is true.
-  if (IS_UNDEFINED(disable_break)) {
-    disable_break = true;
-  }
-
-  // No frames no evaluate in frame.
-  if (this.exec_state_.frameCount() == 0) {
-    return response.failed('No frames');
-  }
-
-  // Check whether a frame was specified.
-  if (!IS_UNDEFINED(frame)) {
-    var frame_number = TO_NUMBER(frame);
-    if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
-      return response.failed('Invalid frame "' + frame + '"');
-    }
-    // Evaluate in the specified frame.
-    response.body = this.exec_state_.frame(frame_number).evaluate(
-        expression, TO_BOOLEAN(disable_break), additional_context_object);
-    return;
-  } else {
-    // Evaluate in the selected frame.
-    response.body = this.exec_state_.frame().evaluate(
-        expression, TO_BOOLEAN(disable_break), additional_context_object);
-    return;
-  }
-};
-
-
-DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
-  if (!request.arguments) {
-    return response.failed('Missing arguments');
-  }
-
-  // Pull out arguments.
-  var handles = request.arguments.handles;
-
-  // Check for legal arguments.
-  if (IS_UNDEFINED(handles)) {
-    return response.failed('Argument "handles" missing');
-  }
-
-  // Set 'includeSource' option for script lookup.
-  if (!IS_UNDEFINED(request.arguments.includeSource)) {
-    var includeSource = TO_BOOLEAN(request.arguments.includeSource);
-    response.setOption('includeSource', includeSource);
-  }
-
-  // Lookup handles.
-  var mirrors = {};
-  for (var i = 0; i < handles.length; i++) {
-    var handle = handles[i];
-    var mirror = LookupMirror(handle);
-    if (!mirror) {
-      return response.failed('Object #' + handle + '# not found');
-    }
-    mirrors[handle] = mirror;
-  }
-  response.body = mirrors;
-};
-
-
-DebugCommandProcessor.prototype.referencesRequest_ =
-    function(request, response) {
-  if (!request.arguments) {
-    return response.failed('Missing arguments');
-  }
-
-  // Pull out arguments.
-  var type = request.arguments.type;
-  var handle = request.arguments.handle;
-
-  // Check for legal arguments.
-  if (IS_UNDEFINED(type)) {
-    return response.failed('Argument "type" missing');
-  }
-  if (IS_UNDEFINED(handle)) {
-    return response.failed('Argument "handle" missing');
-  }
-  if (type != 'referencedBy' && type != 'constructedBy') {
-    return response.failed('Invalid type "' + type + '"');
-  }
-
-  // Lookup handle and return objects with references the object.
-  var mirror = LookupMirror(handle);
-  if (mirror) {
-    if (type == 'referencedBy') {
-      response.body = mirror.referencedBy();
-    } else {
-      response.body = mirror.constructedBy();
-    }
-  } else {
-    return response.failed('Object #' + handle + '# not found');
-  }
-};
-
-
-DebugCommandProcessor.prototype.sourceRequest_ = function(request, response) {
-  // No frames no source.
-  if (this.exec_state_.frameCount() == 0) {
-    return response.failed('No source');
-  }
-
-  var from_line;
-  var to_line;
-  var frame = this.exec_state_.frame();
-  if (request.arguments) {
-    // Pull out arguments.
-    from_line = request.arguments.fromLine;
-    to_line = request.arguments.toLine;
-
-    if (!IS_UNDEFINED(request.arguments.frame)) {
-      var frame_number = TO_NUMBER(request.arguments.frame);
-      if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
-        return response.failed('Invalid frame "' + frame + '"');
-      }
-      frame = this.exec_state_.frame(frame_number);
-    }
-  }
-
-  // Get the script selected.
-  var script = frame.func().script();
-  if (!script) {
-    return response.failed('No source');
-  }
-
-  var raw_script = script.value();
-
-  // Sanitize arguments and remove line offset.
-  var line_offset = raw_script.line_offset;
-  var line_count = %ScriptLineCount(raw_script);
-  from_line = IS_UNDEFINED(from_line) ? 0 : from_line - line_offset;
-  to_line = IS_UNDEFINED(to_line) ? line_count : to_line - line_offset;
-
-  if (from_line < 0) from_line = 0;
-  if (to_line > line_count) to_line = line_count;
-
-  if (from_line >= line_count || to_line < 0 || from_line > to_line) {
-    return response.failed('Invalid line interval');
-  }
-
-  // Fill in the response.
-
-  response.body = {};
-  response.body.fromLine = from_line + line_offset;
-  response.body.toLine = to_line + line_offset;
-  response.body.fromPosition = %ScriptLineStartPosition(raw_script, from_line);
-  response.body.toPosition =
-    (to_line == 0) ? 0 : %ScriptLineEndPosition(raw_script, to_line - 1);
-  response.body.totalLines = %ScriptLineCount(raw_script);
-
-  response.body.source = %_SubString(raw_script.source,
-                                     response.body.fromPosition,
-                                     response.body.toPosition);
-};
-
-
-DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
-  var types = ScriptTypeFlag(Debug.ScriptType.Normal);
-  var includeSource = false;
-  var idsToInclude = null;
-  if (request.arguments) {
-    // Pull out arguments.
-    if (!IS_UNDEFINED(request.arguments.types)) {
-      types = TO_NUMBER(request.arguments.types);
-      if (IsNaN(types) || types < 0) {
-        return response.failed('Invalid types "' +
-                               request.arguments.types + '"');
-      }
-    }
-
-    if (!IS_UNDEFINED(request.arguments.includeSource)) {
-      includeSource = TO_BOOLEAN(request.arguments.includeSource);
-      response.setOption('includeSource', includeSource);
-    }
-
-    if (IS_ARRAY(request.arguments.ids)) {
-      idsToInclude = {};
-      var ids = request.arguments.ids;
-      for (var i = 0; i < ids.length; i++) {
-        idsToInclude[ids[i]] = true;
-      }
-    }
-
-    var filterStr = null;
-    var filterNum = null;
-    if (!IS_UNDEFINED(request.arguments.filter)) {
-      var num = TO_NUMBER(request.arguments.filter);
-      if (!IsNaN(num)) {
-        filterNum = num;
-      }
-      filterStr = request.arguments.filter;
-    }
-  }
-
-  // Collect all scripts in the heap.
-  var scripts = Debug.scripts();
-
-  response.body = [];
-
-  for (var i = 0; i < scripts.length; i++) {
-    if (idsToInclude && !idsToInclude[scripts[i].id]) {
-      continue;
-    }
-    if (filterStr || filterNum) {
-      var script = scripts[i];
-      var found = false;
-      if (filterNum && !found) {
-        if (script.id && script.id === filterNum) {
-          found = true;
-        }
-      }
-      if (filterStr && !found) {
-        if (script.name && script.name.indexOf(filterStr) >= 0) {
-          found = true;
-        }
-      }
-      if (!found) continue;
-    }
-    if (types & ScriptTypeFlag(scripts[i].type)) {
-      response.body.push(MakeMirror(scripts[i]));
-    }
-  }
-};
-
-
-DebugCommandProcessor.prototype.suspendRequest_ = function(request, response) {
-  response.running = false;
-};
-
-
-// TODO(5510): remove this.
-DebugCommandProcessor.prototype.versionRequest_ = function(request, response) {
-  response.body = {
-    V8Version: %GetV8Version()
-  };
-};
-
-
-DebugCommandProcessor.prototype.changeLiveRequest_ = function(
-    request, response) {
-  if (!request.arguments) {
-    return response.failed('Missing arguments');
-  }
-  var script_id = request.arguments.script_id;
-  var preview_only = !!request.arguments.preview_only;
-
-  var the_script = scriptById(script_id);
-  if (!the_script) {
-    response.failed('Script not found');
-    return;
-  }
-
-  var change_log = new GlobalArray();
-
-  if (!IS_STRING(request.arguments.new_source)) {
-    throw "new_source argument expected";
-  }
-
-  var new_source = request.arguments.new_source;
-
-  var result_description;
-  try {
-    result_description = Debug.LiveEdit.SetScriptSource(the_script,
-        new_source, preview_only, change_log);
-  } catch (e) {
-    if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
-      response.failed(e.message, e.details);
-      return;
-    }
-    throw e;
-  }
-  response.body = {change_log: change_log, result: result_description};
-
-  if (!preview_only && !this.running_ && result_description.stack_modified) {
-    response.body.stepin_recommended = true;
-  }
-};
-
-
-DebugCommandProcessor.prototype.restartFrameRequest_ = function(
-    request, response) {
-  if (!request.arguments) {
-    return response.failed('Missing arguments');
-  }
-  var frame = request.arguments.frame;
-
-  // No frames to evaluate in frame.
-  if (this.exec_state_.frameCount() == 0) {
-    return response.failed('No frames');
-  }
-
-  var frame_mirror;
-  // Check whether a frame was specified.
-  if (!IS_UNDEFINED(frame)) {
-    var frame_number = TO_NUMBER(frame);
-    if (frame_number < 0 || frame_number >= this.exec_state_.frameCount()) {
-      return response.failed('Invalid frame "' + frame + '"');
-    }
-    // Restart specified frame.
-    frame_mirror = this.exec_state_.frame(frame_number);
-  } else {
-    // Restart selected frame.
-    frame_mirror = this.exec_state_.frame();
-  }
-
-  var result_description = frame_mirror.restart();
-  response.body = {result: result_description};
-};
-
-
-DebugCommandProcessor.prototype.debuggerFlagsRequest_ = function(request,
-                                                                 response) {
-  // Check for legal request.
-  if (!request.arguments) {
-    response.failed('Missing arguments');
-    return;
-  }
-
-  // Pull out arguments.
-  var flags = request.arguments.flags;
-
-  response.body = { flags: [] };
-  if (!IS_UNDEFINED(flags)) {
-    for (var i = 0; i < flags.length; i++) {
-      var name = flags[i].name;
-      var debugger_flag = debugger_flags[name];
-      if (!debugger_flag) {
-        continue;
-      }
-      if ('value' in flags[i]) {
-        debugger_flag.setValue(flags[i].value);
-      }
-      response.body.flags.push({ name: name, value: debugger_flag.getValue() });
-    }
-  } else {
-    for (var name in debugger_flags) {
-      var value = debugger_flags[name].getValue();
-      response.body.flags.push({ name: name, value: value });
-    }
-  }
-};
-
-
-DebugCommandProcessor.prototype.v8FlagsRequest_ = function(request, response) {
-  var flags = request.arguments.flags;
-  if (!flags) flags = '';
-  %SetFlags(flags);
-};
-
-
-DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
-  var type = request.arguments.type;
-  if (!type) type = 'all';
-
-  var before = %GetHeapUsage();
-  %CollectGarbage(type);
-  var after = %GetHeapUsage();
-
-  response.body = { "before": before, "after": after };
-};
-
-
-DebugCommandProcessor.prototype.dispatch_ = (function() {
-  var proto = DebugCommandProcessor.prototype;
-  return {
-    "continue":             proto.continueRequest_,
-    "break"   :             proto.breakRequest_,
-    "setbreakpoint" :       proto.setBreakPointRequest_,
-    "changebreakpoint":     proto.changeBreakPointRequest_,
-    "clearbreakpoint":      proto.clearBreakPointRequest_,
-    "clearbreakpointgroup": proto.clearBreakPointGroupRequest_,
-    "disconnect":           proto.disconnectRequest_,
-    "setexceptionbreak":    proto.setExceptionBreakRequest_,
-    "listbreakpoints":      proto.listBreakpointsRequest_,
-    "backtrace":            proto.backtraceRequest_,
-    "frame":                proto.frameRequest_,
-    "scopes":               proto.scopesRequest_,
-    "scope":                proto.scopeRequest_,
-    "setvariablevalue":     proto.setVariableValueRequest_,
-    "evaluate":             proto.evaluateRequest_,
-    "lookup":               proto.lookupRequest_,
-    "references":           proto.referencesRequest_,
-    "source":               proto.sourceRequest_,
-    "scripts":              proto.scriptsRequest_,
-    "suspend":              proto.suspendRequest_,
-    "version":              proto.versionRequest_,
-    "changelive":           proto.changeLiveRequest_,
-    "restartframe":         proto.restartFrameRequest_,
-    "flags":                proto.debuggerFlagsRequest_,
-    "v8flag":               proto.v8FlagsRequest_,
-    "gc":                   proto.gcRequest_,
-  };
-})();
-
-
-// Check whether the previously processed command caused the VM to become
-// running.
-DebugCommandProcessor.prototype.isRunning = function() {
-  return this.running_;
-};
-
-
-DebugCommandProcessor.prototype.systemBreak = function(cmd, args) {
-  return %SystemBreak();
-};
-
-
-/**
- * Convert an Object to its debugger protocol representation. The representation
- * may be serilized to a JSON object using JSON.stringify().
- * This implementation simply runs through all string property names, converts
- * each property value to a protocol value and adds the property to the result
- * object. For type "object" the function will be called recursively. Note that
- * circular structures will cause infinite recursion.
- * @param {Object} object The object to format as protocol object.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- *     mirror objects are encountered.
- * @return {Object} Protocol object value.
- */
-function ObjectToProtocolObject_(object, mirror_serializer) {
-  var content = {};
-  for (var key in object) {
-    // Only consider string keys.
-    if (typeof key == 'string') {
-      // Format the value based on its type.
-      var property_value_json = ValueToProtocolValue_(object[key],
-                                                      mirror_serializer);
-      // Add the property if relevant.
-      if (!IS_UNDEFINED(property_value_json)) {
-        content[key] = property_value_json;
-      }
-    }
-  }
-
-  return content;
-}
-
-
-/**
- * Convert an array to its debugger protocol representation. It will convert
- * each array element to a protocol value.
- * @param {Array} array The array to format as protocol array.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- *     mirror objects are encountered.
- * @return {Array} Protocol array value.
- */
-function ArrayToProtocolArray_(array, mirror_serializer) {
-  var json = [];
-  for (var i = 0; i < array.length; i++) {
-    json.push(ValueToProtocolValue_(array[i], mirror_serializer));
-  }
-  return json;
-}
-
-
-/**
- * Convert a value to its debugger protocol representation.
- * @param {*} value The value to format as protocol value.
- * @param {MirrorSerializer} mirror_serializer The serializer to use if any
- *     mirror objects are encountered.
- * @return {*} Protocol value.
- */
-function ValueToProtocolValue_(value, mirror_serializer) {
-  // Format the value based on its type.
-  var json;
-  switch (typeof value) {
-    case 'object':
-      if (value instanceof Mirror) {
-        json = mirror_serializer.serializeValue(value);
-      } else if (IS_ARRAY(value)){
-        json = ArrayToProtocolArray_(value, mirror_serializer);
-      } else {
-        json = ObjectToProtocolObject_(value, mirror_serializer);
-      }
-      break;
-
-    case 'boolean':
-    case 'string':
-    case 'number':
-      json = value;
-      break;
-
-    default:
-      json = null;
-  }
-  return json;
-}
-
-
 // -------------------------------------------------------------------
 // Exports
 
 utils.InstallConstants(global, [
   "Debug", Debug,
-  "DebugCommandProcessor", DebugCommandProcessor,
   "BreakEvent", BreakEvent,
   "CompileEvent", CompileEvent,
   "BreakPoint", BreakPoint,
@@ -2464,12 +1025,6 @@
   "MakeCompileEvent", MakeCompileEvent,
   "MakeAsyncTaskEvent", MakeAsyncTaskEvent,
   "IsBreakPointTriggered", IsBreakPointTriggered,
-  "UpdateScriptBreakPoints", UpdateScriptBreakPoints,
 ]);
 
-// Export to liveedit.js
-utils.Export(function(to) {
-  to.GetScriptBreakPoints = GetScriptBreakPoints;
-});
-
 })
diff --git a/src/debug/ia32/debug-ia32.cc b/src/debug/ia32/debug-ia32.cc
index 47ec69e..0ce9874 100644
--- a/src/debug/ia32/debug-ia32.cc
+++ b/src/debug/ia32/debug-ia32.cc
@@ -64,12 +64,6 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Load padding words on stack.
-    for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
-      __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingValue)));
-    }
-    __ push(Immediate(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
-
     // Push arguments for DebugBreak call.
     if (mode == SAVE_RESULT_REGISTER) {
       // Break on return.
@@ -96,54 +90,43 @@
         }
       }
     }
-
-    __ pop(ebx);
-    // We divide stored value by 2 (untagging) and multiply it by word's size.
-    STATIC_ASSERT(kSmiTagSize == 1 && kSmiShiftSize == 0);
-    __ lea(esp, Operand(esp, ebx, times_half_pointer_size, 0));
-
     // Get rid of the internal frame.
   }
 
-  // This call did not replace a call , so there will be an unwanted
-  // return address left on the stack. Here we get rid of that.
-  __ add(esp, Immediate(kPointerSize));
+  __ MaybeDropFrames();
 
-  // Now that the break point has been handled, resume normal execution by
-  // jumping to the target address intended by the caller and that was
-  // overwritten by the address of DebugBreakXXX.
-  ExternalReference after_break_target =
-      ExternalReference::debug_after_break_target_address(masm->isolate());
-  __ jmp(Operand::StaticVariable(after_break_target));
+  // Return to caller.
+  __ ret(0);
 }
 
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+  }
+  __ MaybeDropFrames();
 
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  // We do not know our frame height, but set esp based on ebp.
-  __ lea(esp, Operand(ebp, FrameDropperFrameConstants::kFunctionOffset));
-  __ pop(edi);  // Function.
-  __ add(esp, Immediate(-FrameDropperFrameConstants::kCodeOffset));  // INTERNAL
-                                                                     // frame
-                                                                     // marker
-                                                                     // and code
-  __ pop(ebp);
+  // Return to caller.
+  __ ret(0);
+}
 
-  ParameterCount dummy(0);
-  __ FloodFunctionIfStepping(edi, no_reg, dummy, dummy);
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+  // Frame is being dropped:
+  // - Drop to the target frame specified by ebx.
+  // - Look up current function on the frame.
+  // - Leave the frame.
+  // - Restart the frame by calling the function.
+  __ mov(ebp, ebx);
+  __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+  __ leave();
 
-  // Load context from the function.
-  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
-  // Clear new.target register as a safety measure.
-  __ mov(edx, masm->isolate()->factory()->undefined_value());
-
-  // Get function code.
   __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kCodeOffset));
-  __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
+  __ mov(ebx,
+         FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
 
-  // Re-run JSFunction, edi is function, esi is context.
-  __ jmp(ebx);
+  ParameterCount dummy(ebx);
+  __ InvokeFunction(edi, dummy, dummy, JUMP_FUNCTION,
+                    CheckDebugStepCallWrapper());
 }
 
 
diff --git a/src/debug/interface-types.h b/src/debug/interface-types.h
new file mode 100644
index 0000000..b86986d
--- /dev/null
+++ b/src/debug/interface-types.h
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_INTERFACE_TYPES_H_
+#define V8_DEBUG_INTERFACE_TYPES_H_
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace debug {
+
+/**
+ * Defines location inside script.
+ * Lines and columns are 0-based.
+ */
+class V8_EXPORT_PRIVATE Location {
+ public:
+  Location(int line_number, int column_number);
+  /**
+   * Create empty location.
+   */
+  Location();
+
+  int GetLineNumber() const;
+  int GetColumnNumber() const;
+  bool IsEmpty() const;
+
+ private:
+  int line_number_;
+  int column_number_;
+};
+
+/**
+ * The result of disassembling a wasm function.
+ * Consists of the disassembly string and an offset table mapping wasm byte
+ * offsets to line and column in the disassembly.
+ * The offset table entries are ordered by the byte_offset.
+ * All numbers are 0-based.
+ */
+struct WasmDisassemblyOffsetTableEntry {
+  WasmDisassemblyOffsetTableEntry(uint32_t byte_offset, int line, int column)
+      : byte_offset(byte_offset), line(line), column(column) {}
+
+  uint32_t byte_offset;
+  int line;
+  int column;
+};
+struct WasmDisassembly {
+  using OffsetTable = std::vector<WasmDisassemblyOffsetTableEntry>;
+  WasmDisassembly() {}
+  WasmDisassembly(std::string disassembly, OffsetTable offset_table)
+      : disassembly(std::move(disassembly)),
+        offset_table(std::move(offset_table)) {}
+
+  std::string disassembly;
+  OffsetTable offset_table;
+};
+
+enum PromiseDebugActionType {
+  kDebugPromiseCreated,
+  kDebugEnqueueAsyncFunction,
+  kDebugEnqueuePromiseResolve,
+  kDebugEnqueuePromiseReject,
+  kDebugPromiseCollected,
+  kDebugWillHandle,
+  kDebugDidHandle,
+};
+
+}  // namespace debug
+}  // namespace v8
+
+#endif  // V8_DEBUG_INTERFACE_TYPES_H_
diff --git a/src/debug/liveedit.cc b/src/debug/liveedit.cc
index ace8297..fa70e77 100644
--- a/src/debug/liveedit.cc
+++ b/src/debug/liveedit.cc
@@ -4,6 +4,7 @@
 
 #include "src/debug/liveedit.h"
 
+#include "src/assembler-inl.h"
 #include "src/ast/scopes.h"
 #include "src/code-stubs.h"
 #include "src/compilation-cache.h"
@@ -14,6 +15,7 @@
 #include "src/global-handles.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
+#include "src/objects-inl.h"
 #include "src/source-position-table.h"
 #include "src/v8.h"
 #include "src/v8memory.h"
@@ -604,19 +606,18 @@
   return Smi::cast(length)->value();
 }
 
-
 void FunctionInfoWrapper::SetInitialProperties(Handle<String> name,
                                                int start_position,
                                                int end_position, int param_num,
-                                               int literal_count,
-                                               int parent_index) {
+                                               int parent_index,
+                                               int function_literal_id) {
   HandleScope scope(isolate());
   this->SetField(kFunctionNameOffset_, name);
   this->SetSmiValueField(kStartPositionOffset_, start_position);
   this->SetSmiValueField(kEndPositionOffset_, end_position);
   this->SetSmiValueField(kParamNumOffset_, param_num);
-  this->SetSmiValueField(kLiteralNumOffset_, literal_count);
   this->SetSmiValueField(kParentIndexOffset_, parent_index);
+  this->SetSmiValueField(kFunctionLiteralIdOffset_, function_literal_id);
 }
 
 void FunctionInfoWrapper::SetSharedFunctionInfo(
@@ -654,33 +655,7 @@
 
 
 void LiveEdit::InitializeThreadLocal(Debug* debug) {
-  debug->thread_local_.frame_drop_mode_ = LIVE_EDIT_FRAMES_UNTOUCHED;
-}
-
-
-bool LiveEdit::SetAfterBreakTarget(Debug* debug) {
-  Code* code = NULL;
-  Isolate* isolate = debug->isolate_;
-  switch (debug->thread_local_.frame_drop_mode_) {
-    case LIVE_EDIT_FRAMES_UNTOUCHED:
-      return false;
-    case LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL:
-      // Debug break slot stub does not return normally, instead it manually
-      // cleans the stack and jumps. We should patch the jump address.
-      code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
-      break;
-    case LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL:
-      // Nothing to do, after_break_target is not used here.
-      return true;
-    case LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL:
-      code = isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit);
-      break;
-    case LIVE_EDIT_CURRENTLY_SET_MODE:
-      UNREACHABLE();
-      break;
-  }
-  debug->after_break_target_ = code->entry();
-  return true;
+  debug->thread_local_.restart_fp_ = 0;
 }
 
 
@@ -747,47 +722,6 @@
   }
 }
 
-
-// Visitor that finds all references to a particular code object,
-// including "CODE_TARGET" references in other code objects and replaces
-// them on the fly.
-class ReplacingVisitor : public ObjectVisitor {
- public:
-  explicit ReplacingVisitor(Code* original, Code* substitution)
-    : original_(original), substitution_(substitution) {
-  }
-
-  void VisitPointers(Object** start, Object** end) override {
-    for (Object** p = start; p < end; p++) {
-      if (*p == original_) {
-        *p = substitution_;
-      }
-    }
-  }
-
-  void VisitCodeEntry(Address entry) override {
-    if (Code::GetObjectFromEntryAddress(entry) == original_) {
-      Address substitution_entry = substitution_->instruction_start();
-      Memory::Address_at(entry) = substitution_entry;
-    }
-  }
-
-  void VisitCodeTarget(RelocInfo* rinfo) override {
-    if (RelocInfo::IsCodeTarget(rinfo->rmode()) &&
-        Code::GetCodeFromTargetAddress(rinfo->target_address()) == original_) {
-      Address substitution_entry = substitution_->instruction_start();
-      rinfo->set_target_address(substitution_entry);
-    }
-  }
-
-  void VisitDebugTarget(RelocInfo* rinfo) override { VisitCodeTarget(rinfo); }
-
- private:
-  Code* original_;
-  Code* substitution_;
-};
-
-
 // Finds all references to original and replaces them with substitution.
 static void ReplaceCodeObject(Handle<Code> original,
                               Handle<Code> substitution) {
@@ -797,62 +731,42 @@
   // to code objects (that are never in new space) without worrying about
   // write barriers.
   Heap* heap = original->GetHeap();
-  HeapIterator iterator(heap);
-
-  DCHECK(!heap->InNewSpace(*substitution));
-
-  ReplacingVisitor visitor(*original, *substitution);
-
-  // Iterate over all roots. Stack frames may have pointer into original code,
-  // so temporary replace the pointers with offset numbers
-  // in prologue/epilogue.
-  heap->IterateRoots(&visitor, VISIT_ALL);
-
+  HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
   // Now iterate over all pointers of all objects, including code_target
   // implicit pointers.
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
-    obj->Iterate(&visitor);
+    if (obj->IsJSFunction()) {
+      JSFunction* fun = JSFunction::cast(obj);
+      if (fun->code() == *original) fun->ReplaceCode(*substitution);
+    } else if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* info = SharedFunctionInfo::cast(obj);
+      if (info->code() == *original) info->set_code(*substitution);
+    }
   }
 }
 
-
-// Patch function literals.
-// Name 'literals' is a misnomer. Rather it's a cache for complex object
-// boilerplates and for a native context. We must clean cached values.
-// Additionally we may need to allocate a new array if number of literals
-// changed.
-class LiteralFixer {
+// Patch function feedback vector.
+// The feedback vector is a cache for complex object boilerplates and for a
+// native context. We must clean cached values, or if the structure of the
+// vector itself changes we need to allocate a new one.
+class FeedbackVectorFixer {
  public:
-  static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
-                            Handle<SharedFunctionInfo> shared_info,
-                            bool feedback_metadata_changed, Isolate* isolate) {
-    int new_literal_count = compile_info_wrapper->GetLiteralCount();
-    int old_literal_count = shared_info->num_literals();
+  static void PatchFeedbackVector(FunctionInfoWrapper* compile_info_wrapper,
+                                  Handle<SharedFunctionInfo> shared_info,
+                                  Isolate* isolate) {
+    // When feedback metadata changes, we have to create new array instances.
+    // Since we cannot create instances when iterating heap, we should first
+    // collect all functions and fix their literal arrays.
+    Handle<FixedArray> function_instances =
+        CollectJSFunctions(shared_info, isolate);
 
-    if (old_literal_count == new_literal_count && !feedback_metadata_changed) {
-      // If literal count didn't change, simply go over all functions
-      // and clear literal arrays.
-      ClearValuesVisitor visitor;
-      IterateJSFunctions(shared_info, &visitor);
-    } else {
-      // When literal count changes, we have to create new array instances.
-      // Since we cannot create instances when iterating heap, we should first
-      // collect all functions and fix their literal arrays.
-      Handle<FixedArray> function_instances =
-          CollectJSFunctions(shared_info, isolate);
-      Handle<TypeFeedbackMetadata> feedback_metadata(
-          shared_info->feedback_metadata());
-
-      for (int i = 0; i < function_instances->length(); i++) {
-        Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
-        Handle<TypeFeedbackVector> vector =
-            TypeFeedbackVector::New(isolate, feedback_metadata);
-        Handle<LiteralsArray> new_literals =
-            LiteralsArray::New(isolate, vector, new_literal_count);
-        fun->set_literals(*new_literals);
-      }
-
-      shared_info->set_num_literals(new_literal_count);
+    for (int i = 0; i < function_instances->length(); i++) {
+      Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
+      Handle<Cell> new_cell = isolate->factory()->NewManyClosuresCell(
+          isolate->factory()->undefined_value());
+      fun->set_feedback_vector_cell(*new_cell);
+      // Only create feedback vectors if we already have the metadata.
+      if (shared_info->is_compiled()) JSFunction::EnsureLiterals(fun);
     }
   }
 
@@ -891,17 +805,6 @@
     return result;
   }
 
-  class ClearValuesVisitor {
-   public:
-    void visit(JSFunction* fun) {
-      LiteralsArray* literals = fun->literals();
-      int len = literals->literals_count();
-      for (int j = 0; j < len; j++) {
-        literals->set_literal_undefined(j);
-      }
-    }
-  };
-
   class CountVisitor {
    public:
     void visit(JSFunction* fun) {
@@ -974,7 +877,6 @@
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
   Handle<SharedFunctionInfo> new_shared_info =
       compile_info_wrapper.GetSharedFunctionInfo();
-  bool feedback_metadata_changed = false;
 
   if (shared_info->is_compiled()) {
     // Take whatever code we can get from the new shared function info. We
@@ -1019,11 +921,12 @@
     shared_info->set_outer_scope_info(new_shared_info->outer_scope_info());
     shared_info->DisableOptimization(kLiveEdit);
     // Update the type feedback vector, if needed.
-    Handle<TypeFeedbackMetadata> new_feedback_metadata(
+    Handle<FeedbackMetadata> new_feedback_metadata(
         new_shared_info->feedback_metadata());
-    feedback_metadata_changed =
-        new_feedback_metadata->DiffersFrom(shared_info->feedback_metadata());
     shared_info->set_feedback_metadata(*new_feedback_metadata);
+  } else {
+    shared_info->set_feedback_metadata(
+        FeedbackMetadata::cast(isolate->heap()->empty_fixed_array()));
   }
 
   int start_position = compile_info_wrapper.GetStartPosition();
@@ -1031,22 +934,43 @@
   shared_info->set_start_position(start_position);
   shared_info->set_end_position(end_position);
 
-  LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info,
-                              feedback_metadata_changed, isolate);
+  FeedbackVectorFixer::PatchFeedbackVector(&compile_info_wrapper, shared_info,
+                                           isolate);
 
   DeoptimizeDependentFunctions(*shared_info);
   isolate->compilation_cache()->Remove(shared_info);
 }
 
-
-void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array) {
+void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array,
+                                     int new_function_literal_id) {
   SharedInfoWrapper shared_info_wrapper(shared_info_array);
   Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
 
+  shared_info->set_function_literal_id(new_function_literal_id);
   DeoptimizeDependentFunctions(*shared_info);
   shared_info_array->GetIsolate()->compilation_cache()->Remove(shared_info);
 }
 
+void LiveEdit::FixupScript(Handle<Script> script, int max_function_literal_id) {
+  Isolate* isolate = script->GetIsolate();
+  Handle<FixedArray> old_infos(script->shared_function_infos(), isolate);
+  Handle<FixedArray> new_infos(
+      isolate->factory()->NewFixedArray(max_function_literal_id + 1));
+  script->set_shared_function_infos(*new_infos);
+  SharedFunctionInfo::ScriptIterator iterator(isolate, old_infos);
+  while (SharedFunctionInfo* shared = iterator.Next()) {
+    // We can't use SharedFunctionInfo::SetScript(info, undefined_value()) here,
+    // as we severed the link from the Script to the SharedFunctionInfo above.
+    Handle<SharedFunctionInfo> info(shared, isolate);
+    info->set_script(isolate->heap()->undefined_value());
+    Handle<Object> new_noscript_list = WeakFixedArray::Add(
+        isolate->factory()->noscript_shared_function_infos(), info);
+    isolate->heap()->SetRootNoScriptSharedFunctionInfos(*new_noscript_list);
+
+    // Put the SharedFunctionInfo at its new, correct location.
+    SharedFunctionInfo::SetScript(info, script);
+  }
+}
 
 void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
                                  Handle<Object> script_handle) {
@@ -1173,6 +1097,10 @@
   copy->set_eval_from_shared(original->eval_from_shared());
   copy->set_eval_from_position(original->eval_from_position());
 
+  Handle<FixedArray> infos(isolate->factory()->NewFixedArray(
+      original->shared_function_infos()->length()));
+  copy->set_shared_function_infos(*infos);
+
   // Copy all the flags, but clear compilation state.
   copy->set_flags(original->flags());
   copy->set_compilation_state(Script::COMPILATION_STATE_INITIAL);
@@ -1180,7 +1108,6 @@
   return copy;
 }
 
-
 Handle<Object> LiveEdit::ChangeScriptSource(Handle<Script> original_script,
                                             Handle<String> new_source,
                                             Handle<Object> old_script_name) {
@@ -1255,185 +1182,6 @@
   return false;
 }
 
-
-// Iterates over handler chain and removes all elements that are inside
-// frames being dropped.
-static bool FixTryCatchHandler(StackFrame* top_frame,
-                               StackFrame* bottom_frame) {
-  Address* pointer_address =
-      &Memory::Address_at(top_frame->isolate()->get_address_from_id(
-          Isolate::kHandlerAddress));
-
-  while (*pointer_address < top_frame->sp()) {
-    pointer_address = &Memory::Address_at(*pointer_address);
-  }
-  Address* above_frame_address = pointer_address;
-  while (*pointer_address < bottom_frame->fp()) {
-    pointer_address = &Memory::Address_at(*pointer_address);
-  }
-  bool change = *above_frame_address != *pointer_address;
-  *above_frame_address = *pointer_address;
-  return change;
-}
-
-
-// Initializes an artificial stack frame. The data it contains is used for:
-//  a. successful work of frame dropper code which eventually gets control,
-//  b. being compatible with a typed frame structure for various stack
-//     iterators.
-// Frame structure (conforms to InternalFrame structure):
-//   -- function
-//   -- code
-//   -- SMI marker
-//   -- frame base
-static void SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
-                                   Handle<Code> code) {
-  DCHECK(bottom_js_frame->is_java_script());
-  Address fp = bottom_js_frame->fp();
-  Memory::Object_at(fp + FrameDropperFrameConstants::kFunctionOffset) =
-      Memory::Object_at(fp + StandardFrameConstants::kFunctionOffset);
-  Memory::Object_at(fp + FrameDropperFrameConstants::kFrameTypeOffset) =
-      Smi::FromInt(StackFrame::INTERNAL);
-  Memory::Object_at(fp + FrameDropperFrameConstants::kCodeOffset) = *code;
-}
-
-
-// Removes specified range of frames from stack. There may be 1 or more
-// frames in range. Anyway the bottom frame is restarted rather than dropped,
-// and therefore has to be a JavaScript frame.
-// Returns error message or NULL.
-static const char* DropFrames(Vector<StackFrame*> frames, int top_frame_index,
-                              int bottom_js_frame_index,
-                              LiveEditFrameDropMode* mode) {
-  if (!LiveEdit::kFrameDropperSupported) {
-    return "Stack manipulations are not supported in this architecture.";
-  }
-
-  StackFrame* pre_top_frame = frames[top_frame_index - 1];
-  StackFrame* top_frame = frames[top_frame_index];
-  StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
-
-  DCHECK(bottom_js_frame->is_java_script());
-
-  // Check the nature of the top frame.
-  Isolate* isolate = bottom_js_frame->isolate();
-  Code* pre_top_frame_code = pre_top_frame->LookupCode();
-  bool frame_has_padding = true;
-  if (pre_top_frame_code ==
-      isolate->builtins()->builtin(Builtins::kSlot_DebugBreak)) {
-    // OK, we can drop debug break slot.
-    *mode = LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL;
-  } else if (pre_top_frame_code ==
-             isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit)) {
-    // OK, we can drop our own code.
-    pre_top_frame = frames[top_frame_index - 2];
-    top_frame = frames[top_frame_index - 1];
-    *mode = LIVE_EDIT_CURRENTLY_SET_MODE;
-    frame_has_padding = false;
-  } else if (pre_top_frame_code ==
-             isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
-    *mode = LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL;
-  } else if (pre_top_frame_code->kind() == Code::STUB &&
-             CodeStub::GetMajorKey(pre_top_frame_code) == CodeStub::CEntry) {
-    // Entry from our unit tests on 'debugger' statement.
-    // It's fine, we support this case.
-    *mode = LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL;
-    // We don't have a padding from 'debugger' statement call.
-    // Here the stub is CEntry, it's not debug-only and can't be padded.
-    // If anyone would complain, a proxy padded stub could be added.
-    frame_has_padding = false;
-  } else if (pre_top_frame->type() == StackFrame::ARGUMENTS_ADAPTOR) {
-    // This must be adaptor that remain from the frame dropping that
-    // is still on stack. A frame dropper frame must be above it.
-    DCHECK(frames[top_frame_index - 2]->LookupCode() ==
-           isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
-    pre_top_frame = frames[top_frame_index - 3];
-    top_frame = frames[top_frame_index - 2];
-    *mode = LIVE_EDIT_CURRENTLY_SET_MODE;
-    frame_has_padding = false;
-  } else if (pre_top_frame_code->kind() == Code::BYTECODE_HANDLER) {
-    // Interpreted bytecode takes up two stack frames, one for the bytecode
-    // handler and one for the interpreter entry trampoline. Therefore we shift
-    // up by one frame.
-    *mode = LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL;
-    pre_top_frame = frames[top_frame_index - 2];
-    top_frame = frames[top_frame_index - 1];
-  } else {
-    return "Unknown structure of stack above changing function";
-  }
-
-  Address unused_stack_top = top_frame->sp();
-  Address unused_stack_bottom =
-      bottom_js_frame->fp() - FrameDropperFrameConstants::kFixedFrameSize +
-      2 * kPointerSize;  // Bigger address end is exclusive.
-
-  Address* top_frame_pc_address = top_frame->pc_address();
-
-  // top_frame may be damaged below this point. Do not used it.
-  DCHECK(!(top_frame = NULL));
-
-  if (unused_stack_top > unused_stack_bottom) {
-    if (frame_has_padding) {
-      int shortage_bytes =
-          static_cast<int>(unused_stack_top - unused_stack_bottom);
-
-      Address padding_start =
-          pre_top_frame->fp() -
-          (FrameDropperFrameConstants::kFixedFrameSize - kPointerSize);
-
-      Address padding_pointer = padding_start;
-      Smi* padding_object = Smi::FromInt(LiveEdit::kFramePaddingValue);
-      while (Memory::Object_at(padding_pointer) == padding_object) {
-        padding_pointer -= kPointerSize;
-      }
-      int padding_counter =
-          Smi::cast(Memory::Object_at(padding_pointer))->value();
-      if (padding_counter * kPointerSize < shortage_bytes) {
-        return "Not enough space for frame dropper frame "
-            "(even with padding frame)";
-      }
-      Memory::Object_at(padding_pointer) =
-          Smi::FromInt(padding_counter - shortage_bytes / kPointerSize);
-
-      StackFrame* pre_pre_frame = frames[top_frame_index - 2];
-
-      MemMove(padding_start + kPointerSize - shortage_bytes,
-              padding_start + kPointerSize,
-              FrameDropperFrameConstants::kFixedFrameSize - kPointerSize);
-
-      pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
-      pre_pre_frame->SetCallerFp(pre_top_frame->fp());
-      unused_stack_top -= shortage_bytes;
-
-      STATIC_ASSERT(sizeof(Address) == kPointerSize);
-      top_frame_pc_address -= shortage_bytes / kPointerSize;
-    } else {
-      return "Not enough space for frame dropper frame";
-    }
-  }
-
-  // Committing now. After this point we should return only NULL value.
-
-  FixTryCatchHandler(pre_top_frame, bottom_js_frame);
-  // Make sure FixTryCatchHandler is idempotent.
-  DCHECK(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
-
-  Handle<Code> code = isolate->builtins()->FrameDropper_LiveEdit();
-  *top_frame_pc_address = code->entry();
-  pre_top_frame->SetCallerFp(bottom_js_frame->fp());
-
-  SetUpFrameDropperFrame(bottom_js_frame, code);
-
-  for (Address a = unused_stack_top;
-      a < unused_stack_bottom;
-      a += kPointerSize) {
-    Memory::Object_at(a) = Smi::kZero;
-  }
-
-  return NULL;
-}
-
-
 // Describes a set of call frames that execute any of listed functions.
 // Finding no such frames does not mean error.
 class MultipleFunctionTarget {
@@ -1521,7 +1269,6 @@
   Zone zone(isolate->allocator(), ZONE_NAME);
   Vector<StackFrame*> frames = CreateStackMap(isolate, &zone);
 
-
   int top_frame_index = -1;
   int frame_index = 0;
   for (; frame_index < frames.length(); frame_index++) {
@@ -1606,24 +1353,11 @@
     return target.GetNotFoundMessage();
   }
 
-  LiveEditFrameDropMode drop_mode = LIVE_EDIT_FRAMES_UNTOUCHED;
-  const char* error_message =
-      DropFrames(frames, top_frame_index, bottom_js_frame_index, &drop_mode);
-
-  if (error_message != NULL) {
-    return error_message;
+  if (!LiveEdit::kFrameDropperSupported) {
+    return "Stack manipulations are not supported in this architecture.";
   }
 
-  // Adjust break_frame after some frames has been dropped.
-  StackFrame::Id new_id = StackFrame::NO_ID;
-  for (int i = bottom_js_frame_index + 1; i < frames.length(); i++) {
-    if (frames[i]->type() == StackFrame::JAVA_SCRIPT ||
-        frames[i]->type() == StackFrame::INTERPRETED) {
-      new_id = frames[i]->id();
-      break;
-    }
-  }
-  debug->FramesHaveBeenDropped(new_id, drop_mode);
+  debug->ScheduleFrameRestart(frames[bottom_js_frame_index]);
   return NULL;
 }
 
@@ -1669,7 +1403,7 @@
   FunctionPatchabilityStatus active = FUNCTION_BLOCKED_ACTIVE_GENERATOR;
 
   Heap* heap = isolate->heap();
-  HeapIterator iterator(heap);
+  HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
   HeapObject* obj = NULL;
   while ((obj = iterator.next()) != NULL) {
     if (!obj->IsJSGeneratorObject()) continue;
@@ -1856,10 +1590,8 @@
   // Recurse using the regular traversal.
   AstTraversalVisitor::VisitFunctionLiteral(node);
   // FunctionDone are called in post-order.
-  // TODO(jgruber): If required, replace the (linear cost)
-  // FindSharedFunctionInfo call with a more efficient implementation.
   Handle<SharedFunctionInfo> info =
-      script_->FindSharedFunctionInfo(node).ToHandleChecked();
+      script_->FindSharedFunctionInfo(isolate_, node).ToHandleChecked();
   FunctionDone(info, node->scope());
 }
 
@@ -1868,8 +1600,7 @@
   FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate_);
   info.SetInitialProperties(fun->name(), fun->start_position(),
                             fun->end_position(), fun->parameter_count(),
-                            fun->materialized_literal_count(),
-                            current_parent_index_);
+                            current_parent_index_, fun->function_literal_id());
   current_parent_index_ = len_;
   SetElementSloppy(result_, len_, info.GetJSArray());
   len_++;
diff --git a/src/debug/liveedit.h b/src/debug/liveedit.h
index 2034dcb..4ad1bc5 100644
--- a/src/debug/liveedit.h
+++ b/src/debug/liveedit.h
@@ -74,8 +74,6 @@
  public:
   static void InitializeThreadLocal(Debug* debug);
 
-  static bool SetAfterBreakTarget(Debug* debug);
-
   MUST_USE_RESULT static MaybeHandle<JSArray> GatherCompileInfo(
       Handle<Script> script,
       Handle<String> source);
@@ -83,7 +81,10 @@
   static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
                                   Handle<JSArray> shared_info_array);
 
-  static void FunctionSourceUpdated(Handle<JSArray> shared_info_array);
+  static void FixupScript(Handle<Script> script, int max_function_literal_id);
+
+  static void FunctionSourceUpdated(Handle<JSArray> shared_info_array,
+                                    int new_function_literal_id);
 
   // Updates script field in FunctionSharedInfo.
   static void SetFunctionScript(Handle<JSValue> function_wrapper,
@@ -143,40 +144,6 @@
 
   // Architecture-specific constant.
   static const bool kFrameDropperSupported;
-
-  /**
-   * Defines layout of a stack frame that supports padding. This is a regular
-   * internal frame that has a flexible stack structure. LiveEdit can shift
-   * its lower part up the stack, taking up the 'padding' space when additional
-   * stack memory is required.
-   * Such frame is expected immediately above the topmost JavaScript frame.
-   *
-   * Stack Layout:
-   *   --- Top
-   *   LiveEdit routine frames
-   *   ---
-   *   C frames of debug handler
-   *   ---
-   *   ...
-   *   ---
-   *      An internal frame that has n padding words:
-   *      - any number of words as needed by code -- upper part of frame
-   *      - padding size: a Smi storing n -- current size of padding
-   *      - padding: n words filled with kPaddingValue in form of Smi
-   *      - 3 context/type words of a regular InternalFrame
-   *      - fp
-   *   ---
-   *      Topmost JavaScript frame
-   *   ---
-   *   ...
-   *   --- Bottom
-   */
-  // A number of words that should be reserved on stack for the LiveEdit use.
-  // Stored on stack in form of Smi.
-  static const int kFramePaddingInitialSize = 1;
-  // A value that padding words are filled with (in form of Smi). Going
-  // bottom-top, the first word not having this value is a counter word.
-  static const int kFramePaddingValue = kFramePaddingInitialSize + 1;
 };
 
 
@@ -277,8 +244,8 @@
   }
 
   void SetInitialProperties(Handle<String> name, int start_position,
-                            int end_position, int param_num, int literal_count,
-                            int parent_index);
+                            int end_position, int param_num, int parent_index,
+                            int function_literal_id);
 
   void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
     this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
@@ -288,10 +255,6 @@
 
   Handle<SharedFunctionInfo> GetSharedFunctionInfo();
 
-  int GetLiteralCount() {
-    return this->GetSmiValueField(kLiteralNumOffset_);
-  }
-
   int GetParentIndex() {
     return this->GetSmiValueField(kParentIndexOffset_);
   }
@@ -310,7 +273,7 @@
   static const int kFunctionScopeInfoOffset_ = 4;
   static const int kParentIndexOffset_ = 5;
   static const int kSharedFunctionInfoOffset_ = 6;
-  static const int kLiteralNumOffset_ = 7;
+  static const int kFunctionLiteralIdOffset_ = 7;
   static const int kSize_ = 8;
 
   friend class JSArrayBasedStruct<FunctionInfoWrapper>;
diff --git a/src/debug/liveedit.js b/src/debug/liveedit.js
index e9ee809..8e20654 100644
--- a/src/debug/liveedit.js
+++ b/src/debug/liveedit.js
@@ -27,15 +27,11 @@
   // Imports
 
   var FindScriptSourcePosition = global.Debug.findScriptSourcePosition;
-  var GetScriptBreakPoints;
   var GlobalArray = global.Array;
   var MathFloor = global.Math.floor;
+  var MathMax = global.Math.max;
   var SyntaxError = global.SyntaxError;
 
-  utils.Import(function(from) {
-    GetScriptBreakPoints = from.GetScriptBreakPoints;
-  });
-
   // -------------------------------------------------------------------
 
   // Forward declaration for minifier.
@@ -80,6 +76,10 @@
       }
       throw failure;
     }
+
+    var max_function_literal_id = new_compile_info.reduce(
+        (max, info) => MathMax(max, info.function_literal_id), 0);
+
     var root_new_node = BuildCodeInfoTree(new_compile_info);
 
     // Link recompiled script data with other data.
@@ -170,10 +170,6 @@
     // command for correct stack state if the stack was modified.
     preview_description.stack_modified = dropped_functions_number != 0;
 
-    // Start with breakpoints. Convert their line/column positions and
-    // temporary remove.
-    var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
-
     var old_script;
 
     // Create an old script only if there are function that should be linked
@@ -186,8 +182,7 @@
 
       // Update the script text and create a new script representing an old
       // version of the script.
-      old_script = %LiveEditReplaceScript(script, new_source,
-          old_script_name);
+      old_script = %LiveEditReplaceScript(script, new_source, old_script_name);
 
       var link_to_old_script_report = new GlobalArray();
       change_log.push( { linked_to_old_script: link_to_old_script_report } );
@@ -201,12 +196,6 @@
       preview_description.created_script_name = old_script_name;
     }
 
-    // Link to an actual script all the functions that we are going to use.
-    for (var i = 0; i < link_to_original_script_list.length; i++) {
-      %LiveEditFunctionSetScript(
-          link_to_original_script_list[i].info.shared_function_info, script);
-    }
-
     for (var i = 0; i < replace_code_list.length; i++) {
       PatchFunctionCode(replace_code_list[i], change_log);
     }
@@ -221,14 +210,24 @@
           position_patch_report);
 
       if (update_positions_list[i].live_shared_function_infos) {
-        update_positions_list[i].live_shared_function_infos.
-            forEach(function (info) {
-                %LiveEditFunctionSourceUpdated(info.raw_array);
-              });
+        var new_function_literal_id =
+            update_positions_list[i]
+                .corresponding_node.info.function_literal_id;
+        update_positions_list[i].live_shared_function_infos.forEach(function(
+            info) {
+          %LiveEditFunctionSourceUpdated(
+              info.raw_array, new_function_literal_id);
+        });
       }
     }
 
-    break_points_restorer(pos_translator, old_script);
+    %LiveEditFixupScript(script, max_function_literal_id);
+
+    // Link all the functions we're going to use to an actual script.
+    for (var i = 0; i < link_to_original_script_list.length; i++) {
+      %LiveEditFunctionSetScript(
+          link_to_original_script_list[i].info.shared_function_info, script);
+    }
 
     preview_description.updated = true;
     return preview_description;
@@ -368,79 +367,6 @@
     }
   }
 
-
-  // Returns function that restores breakpoints.
-  function TemporaryRemoveBreakPoints(original_script, change_log) {
-    var script_break_points = GetScriptBreakPoints(original_script);
-
-    var break_points_update_report = [];
-    change_log.push( { break_points_update: break_points_update_report } );
-
-    var break_point_old_positions = [];
-    for (var i = 0; i < script_break_points.length; i++) {
-      var break_point = script_break_points[i];
-
-      break_point.clear();
-
-      // TODO(LiveEdit): be careful with resource offset here.
-      var break_point_position = FindScriptSourcePosition(original_script,
-          break_point.line(), break_point.column());
-
-      var old_position_description = {
-          position: break_point_position,
-          line: break_point.line(),
-          column: break_point.column()
-      };
-      break_point_old_positions.push(old_position_description);
-    }
-
-
-    // Restores breakpoints and creates their copies in the "old" copy of
-    // the script.
-    return function (pos_translator, old_script_copy_opt) {
-      // Update breakpoints (change positions and restore them in old version
-      // of script.
-      for (var i = 0; i < script_break_points.length; i++) {
-        var break_point = script_break_points[i];
-        if (old_script_copy_opt) {
-          var clone = break_point.cloneForOtherScript(old_script_copy_opt);
-          clone.set(old_script_copy_opt);
-
-          break_points_update_report.push( {
-            type: "copied_to_old",
-            id: break_point.number(),
-            new_id: clone.number(),
-            positions: break_point_old_positions[i]
-            } );
-        }
-
-        var updated_position = pos_translator.Translate(
-            break_point_old_positions[i].position,
-            PosTranslator.ShiftWithTopInsideChunkHandler);
-
-        var new_location =
-            original_script.locationFromPosition(updated_position, false);
-
-        break_point.update_positions(new_location.line, new_location.column);
-
-        var new_position_description = {
-            position: updated_position,
-            line: new_location.line,
-            column: new_location.column
-        };
-
-        break_point.set(original_script);
-
-        break_points_update_report.push( { type: "position_changed",
-          id: break_point.number(),
-          old_positions: break_point_old_positions[i],
-          new_positions: new_position_description
-          } );
-      }
-    };
-  }
-
-
   function Assert(condition, message) {
     if (!condition) {
       if (message) {
@@ -742,6 +668,8 @@
                   old_children[old_index].corresponding_node = UNDEFINED;
                   old_node.status = FunctionStatus.CHANGED;
                 }
+              } else {
+                ProcessNode(old_children[old_index], new_children[new_index]);
               }
             } else {
               old_children[old_index].status = FunctionStatus.DAMAGED;
@@ -845,6 +773,7 @@
     this.scope_info = raw_array[4];
     this.outer_index = raw_array[5];
     this.shared_function_info = raw_array[6];
+    this.function_literal_id = raw_array[7];
     this.next_sibling_index = null;
     this.raw_array = raw_array;
   }
diff --git a/src/debug/mips/debug-mips.cc b/src/debug/mips/debug-mips.cc
index 4d8b54f..5b809e6 100644
--- a/src/debug/mips/debug-mips.cc
+++ b/src/debug/mips/debug-mips.cc
@@ -69,16 +69,6 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Load padding words on stack.
-    __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
-    __ Subu(sp, sp,
-            Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
-    for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
-      __ sw(at, MemOperand(sp, kPointerSize * i));
-    }
-    __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
-    __ push(at);
-
     // Push arguments for DebugBreak call.
     if (mode == SAVE_RESULT_REGISTER) {
       // Break on return.
@@ -104,47 +94,47 @@
         }
       }
     }
-
-    // Don't bother removing padding bytes pushed on the stack
-    // as the frame is going to be restored right away.
-
     // Leave the internal frame.
   }
 
-  // Now that the break point has been handled, resume normal execution by
-  // jumping to the target address intended by the caller and that was
-  // overwritten by the address of DebugBreakXXX.
-  ExternalReference after_break_target =
-      ExternalReference::debug_after_break_target_address(masm->isolate());
-  __ li(t9, Operand(after_break_target));
-  __ lw(t9, MemOperand(t9));
-  __ Jump(t9);
+  __ MaybeDropFrames();
+
+  // Return to caller.
+  __ Ret();
 }
 
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+  }
+  __ MaybeDropFrames();
 
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  // We do not know our frame height, but set sp based on fp.
-  __ lw(a1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+  // Return to caller.
+  __ Ret();
+}
+
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+  // Frame is being dropped:
+  // - Drop to the target frame specified by a1.
+  // - Look up current function on the frame.
+  // - Leave the frame.
+  // - Restart the frame by calling the function.
+  __ mov(fp, a1);
+  __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
 
   // Pop return address and frame.
   __ LeaveFrame(StackFrame::INTERNAL);
 
-  ParameterCount dummy(0);
-  __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
+  __ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(a0,
+        FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ mov(a2, a0);
 
-  // Load context from the function.
-  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
-  // Clear new.target as a safety measure.
-  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
-
-  // Get function code.
-  __ lw(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
-  __ Addu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  // Re-run JSFunction, a1 is function, cp is context.
-  __ Jump(t9);
+  ParameterCount dummy1(a2);
+  ParameterCount dummy2(a0);
+  __ InvokeFunction(a1, dummy1, dummy2, JUMP_FUNCTION,
+                    CheckDebugStepCallWrapper());
 }
 
 
diff --git a/src/debug/mips64/debug-mips64.cc b/src/debug/mips64/debug-mips64.cc
index 2a6ce7b..b8dbbfb 100644
--- a/src/debug/mips64/debug-mips64.cc
+++ b/src/debug/mips64/debug-mips64.cc
@@ -65,22 +65,23 @@
   return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
 }
 
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+  }
+  __ MaybeDropFrames();
+
+  // Return to caller.
+  __ Ret();
+}
+
 void DebugCodegen::GenerateDebugBreakStub(MacroAssembler* masm,
                                           DebugBreakCallHelperMode mode) {
   __ RecordComment("Debug break");
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Load padding words on stack.
-    __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
-    __ Dsubu(sp, sp,
-            Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
-    for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
-      __ sd(at, MemOperand(sp, kPointerSize * i));
-    }
-    __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
-    __ push(at);
-
     // Push arguments for DebugBreak call.
     if (mode == SAVE_RESULT_REGISTER) {
       // Break on return.
@@ -107,46 +108,36 @@
       }
     }
 
-    // Don't bother removing padding bytes pushed on the stack
-    // as the frame is going to be restored right away.
-
     // Leave the internal frame.
   }
 
-  // Now that the break point has been handled, resume normal execution by
-  // jumping to the target address intended by the caller and that was
-  // overwritten by the address of DebugBreakXXX.
-  ExternalReference after_break_target =
-      ExternalReference::debug_after_break_target_address(masm->isolate());
-  __ li(t9, Operand(after_break_target));
-  __ ld(t9, MemOperand(t9));
-  __ Jump(t9);
+  __ MaybeDropFrames();
+
+  // Return to caller.
+  __ Ret();
 }
 
-
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  // We do not know our frame height, but set sp based on fp.
-  __ ld(a1, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+  // Frame is being dropped:
+  // - Drop to the target frame specified by a1.
+  // - Look up current function on the frame.
+  // - Leave the frame.
+  // - Restart the frame by calling the function.
+  __ mov(fp, a1);
+  __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
 
   // Pop return address and frame.
   __ LeaveFrame(StackFrame::INTERNAL);
 
-  ParameterCount dummy(0);
-  __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
+  __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(a0,
+        FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ mov(a2, a0);
 
-  // Load context from the function.
-  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
-  // Clear new.target as a safety measure.
-  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
-
-  // Get function code.
-  __ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
-  __ Daddu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  // Re-run JSFunction, a1 is function, cp is context.
-  __ Jump(t9);
+  ParameterCount dummy1(a2);
+  ParameterCount dummy2(a0);
+  __ InvokeFunction(a1, dummy1, dummy2, JUMP_FUNCTION,
+                    CheckDebugStepCallWrapper());
 }
 
 
diff --git a/src/debug/mirrors.js b/src/debug/mirrors.js
index 4bc86da..b534fec 100644
--- a/src/debug/mirrors.js
+++ b/src/debug/mirrors.js
@@ -13,8 +13,6 @@
 var JSONStringify = global.JSON.stringify;
 var MapEntries;
 var MapIteratorNext;
-var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
-var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
 var SetIteratorNext;
 var SetValues;
 
@@ -79,65 +77,15 @@
   GENERATOR_TYPE : 'generator',
 }
 
-
-// Handle id counters.
-var next_handle_ = 0;
-var next_transient_handle_ = -1;
-
-// Mirror cache.
-var mirror_cache_ = [];
-var mirror_cache_enabled_ = true;
-
-
-function MirrorCacheIsEmpty() {
-  return next_handle_ == 0 && mirror_cache_.length == 0;
-}
-
-
-function ToggleMirrorCache(value) {
-  mirror_cache_enabled_ = value;
-  ClearMirrorCache();
-}
-
-
-function ClearMirrorCache(value) {
-  next_handle_ = 0;
-  mirror_cache_ = [];
-}
-
-
-function ObjectIsPromise(value) {
-  return IS_RECEIVER(value) &&
-         !IS_UNDEFINED(%DebugGetProperty(value, promiseStateSymbol));
-}
-
-
 /**
  * Returns the mirror for a specified value or object.
  *
  * @param {value or Object} value the value or object to retrieve the mirror for
- * @param {boolean} transient indicate whether this object is transient and
- *    should not be added to the mirror cache. The default is not transient.
  * @returns {Mirror} the mirror reflects the passed value or object
  */
-function MakeMirror(value, opt_transient) {
+function MakeMirror(value) {
   var mirror;
 
-  // Look for non transient mirrors in the mirror cache.
-  if (!opt_transient && mirror_cache_enabled_) {
-    for (var id in mirror_cache_) {
-      mirror = mirror_cache_[id];
-      if (mirror.value() === value) {
-        return mirror;
-      }
-      // Special check for NaN as NaN == NaN is false.
-      if (mirror.isNumber() && IsNaN(mirror.value()) &&
-          typeof value == 'number' && IsNaN(value)) {
-        return mirror;
-      }
-    }
-  }
-
   if (IS_UNDEFINED(value)) {
     mirror = new UndefinedMirror();
   } else if (IS_NULL(value)) {
@@ -156,7 +104,7 @@
     mirror = new DateMirror(value);
   } else if (IS_FUNCTION(value)) {
     mirror = new FunctionMirror(value);
-  } else if (IS_REGEXP(value)) {
+  } else if (%IsRegExp(value)) {
     mirror = new RegExpMirror(value);
   } else if (IS_ERROR(value)) {
     mirror = new ErrorMirror(value);
@@ -168,35 +116,19 @@
     mirror = new SetMirror(value);
   } else if (IS_MAP_ITERATOR(value) || IS_SET_ITERATOR(value)) {
     mirror = new IteratorMirror(value);
-  } else if (ObjectIsPromise(value)) {
+  } else if (%is_promise(value)) {
     mirror = new PromiseMirror(value);
   } else if (IS_GENERATOR(value)) {
     mirror = new GeneratorMirror(value);
   } else {
-    mirror = new ObjectMirror(value, MirrorType.OBJECT_TYPE, opt_transient);
+    mirror = new ObjectMirror(value, MirrorType.OBJECT_TYPE);
   }
 
-  if (mirror_cache_enabled_) mirror_cache_[mirror.handle()] = mirror;
   return mirror;
 }
 
 
 /**
- * Returns the mirror for a specified mirror handle.
- *
- * @param {number} handle the handle to find the mirror for
- * @returns {Mirror or undefiend} the mirror with the requested handle or
- *     undefined if no mirror with the requested handle was found
- */
-function LookupMirror(handle) {
-  if (!mirror_cache_enabled_) {
-    throw %make_error(kDebugger, "Mirror cache is disabled");
-  }
-  return mirror_cache_[handle];
-}
-
-
-/**
  * Returns the mirror for the undefined value.
  *
  * @returns {Mirror} the mirror reflects the undefined value
@@ -231,11 +163,10 @@
 var kMaxProtocolStringLength = 80;
 
 
-// A copy of the PropertyType enum from property-details.h
+// A copy of the PropertyKind enum from property-details.h
 var PropertyType = {};
-PropertyType.Data                        = 0;
-PropertyType.DataConstant                = 2;
-PropertyType.AccessorConstant            = 3;
+PropertyType.Data     = 0;
+PropertyType.Accessor = 1;
 
 
 // Different attributes for a property.
@@ -500,23 +431,6 @@
 };
 
 
-/**
- * Allocate a handle id for this object.
- */
-Mirror.prototype.allocateHandle_ = function() {
-  if (mirror_cache_enabled_) this.handle_ = next_handle_++;
-};
-
-
-/**
- * Allocate a transient handle id for this object. Transient handles are
- * negative.
- */
-Mirror.prototype.allocateTransientHandle_ = function() {
-  this.handle_ = next_transient_handle_--;
-};
-
-
 Mirror.prototype.toText = function() {
   // Simpel to text which is used when on specialization in subclass.
   return "#<" + this.constructor.name + ">";
@@ -527,28 +441,16 @@
  * Base class for all value mirror objects.
  * @param {string} type The type of the mirror
  * @param {value} value The value reflected by this mirror
- * @param {boolean} transient indicate whether this object is transient with a
- *    transient handle
  * @constructor
  * @extends Mirror
  */
-function ValueMirror(type, value, transient) {
+function ValueMirror(type, value) {
   %_Call(Mirror, this, type);
   this.value_ = value;
-  if (!transient) {
-    this.allocateHandle_();
-  } else {
-    this.allocateTransientHandle_();
-  }
 }
 inherits(ValueMirror, Mirror);
 
 
-Mirror.prototype.handle = function() {
-  return this.handle_;
-};
-
-
 /**
  * Check whether this is a primitive value.
  * @return {boolean} True if the mirror reflects a primitive value
@@ -635,7 +537,7 @@
 
 
 NumberMirror.prototype.toText = function() {
-  return %_NumberToString(this.value_);
+  return %NumberToString(this.value_);
 };
 
 
@@ -693,14 +595,12 @@
 /**
  * Mirror object for objects.
  * @param {object} value The object reflected by this mirror
- * @param {boolean} transient indicate whether this object is transient with a
- *    transient handle
  * @constructor
  * @extends ValueMirror
  */
-function ObjectMirror(value, type, transient) {
+function ObjectMirror(value, type) {
   type = type || MirrorType.OBJECT_TYPE;
-  %_Call(ValueMirror, this, type, value, transient);
+  %_Call(ValueMirror, this, type, value);
 }
 inherits(ObjectMirror, ValueMirror);
 
@@ -807,7 +707,7 @@
 
     // Skip properties which are defined through accessors.
     var property = properties[i];
-    if (property.propertyType() != PropertyType.AccessorConstant) {
+    if (property.propertyType() == PropertyType.Data) {
       if (property.value_ === value.value_) {
         return property;
       }
@@ -1273,7 +1173,7 @@
 
 
 function PromiseGetStatus_(value) {
-  var status = %DebugGetProperty(value, promiseStateSymbol);
+  var status = %PromiseStatus(value);
   if (status == 0) return "pending";
   if (status == 1) return "resolved";
   return "rejected";
@@ -1281,7 +1181,7 @@
 
 
 function PromiseGetValue_(value) {
-  return %DebugGetProperty(value, promiseResultSymbol);
+  return %PromiseResult(value);
 }
 
 
@@ -1553,7 +1453,7 @@
 
 
 PropertyMirror.prototype.propertyType = function() {
-  return %DebugPropertyTypeFromDetails(this.details_);
+  return %DebugPropertyKindFromDetails(this.details_);
 };
 
 
@@ -1611,7 +1511,7 @@
  */
 PropertyMirror.prototype.isNative = function() {
   return this.is_interceptor_ ||
-         ((this.propertyType() == PropertyType.AccessorConstant) &&
+         ((this.propertyType() == PropertyType.Accessor) &&
           !this.hasGetter() && !this.hasSetter());
 };
 
@@ -2019,14 +1919,12 @@
 };
 
 
-FrameMirror.prototype.evaluate = function(source, disable_break,
-                                          opt_context_object) {
+FrameMirror.prototype.evaluate = function(source, throw_on_side_effect = false) {
   return MakeMirror(%DebugEvaluate(this.break_id_,
                                    this.details_.frameId(),
                                    this.details_.inlinedFrameIndex(),
                                    source,
-                                   TO_BOOLEAN(disable_break),
-                                   opt_context_object));
+                                   throw_on_side_effect));
 };
 
 
@@ -2325,13 +2223,10 @@
 
 
 ScopeMirror.prototype.scopeObject = function() {
-  // For local, closure and script scopes create a transient mirror
+  // For local, closure and script scopes create a mirror
   // as these objects are created on the fly materializing the local
   // or closure scopes and therefore will not preserve identity.
-  var transient = this.scopeType() == ScopeType.Local ||
-                  this.scopeType() == ScopeType.Closure ||
-                  this.scopeType() == ScopeType.Script;
-  return MakeMirror(this.details_.object(), transient);
+  return MakeMirror(this.details_.object());
 };
 
 
@@ -2350,7 +2245,6 @@
   %_Call(Mirror, this, MirrorType.SCRIPT_TYPE);
   this.script_ = script;
   this.context_ = new ContextMirror(script.context_data);
-  this.allocateHandle_();
 }
 inherits(ScriptMirror, Mirror);
 
@@ -2466,7 +2360,6 @@
 function ContextMirror(data) {
   %_Call(Mirror, this, MirrorType.CONTEXT_TYPE);
   this.data_ = data;
-  this.allocateHandle_();
 }
 inherits(ContextMirror, Mirror);
 
@@ -2475,580 +2368,11 @@
   return this.data_;
 };
 
-
-/**
- * Returns a mirror serializer
- *
- * @param {boolean} details Set to true to include details
- * @param {Object} options Options comtrolling the serialization
- *     The following options can be set:
- *       includeSource: include ths full source of scripts
- * @returns {MirrorSerializer} mirror serializer
- */
-function MakeMirrorSerializer(details, options) {
-  return new JSONProtocolSerializer(details, options);
-}
-
-
-/**
- * Object for serializing a mirror objects and its direct references.
- * @param {boolean} details Indicates whether to include details for the mirror
- *     serialized
- * @constructor
- */
-function JSONProtocolSerializer(details, options) {
-  this.details_ = details;
-  this.options_ = options;
-  this.mirrors_ = [ ];
-}
-
-
-/**
- * Returns a serialization of an object reference. The referenced object are
- * added to the serialization state.
- *
- * @param {Mirror} mirror The mirror to serialize
- * @returns {String} JSON serialization
- */
-JSONProtocolSerializer.prototype.serializeReference = function(mirror) {
-  return this.serialize_(mirror, true, true);
-};
-
-
-/**
- * Returns a serialization of an object value. The referenced objects are
- * added to the serialization state.
- *
- * @param {Mirror} mirror The mirror to serialize
- * @returns {String} JSON serialization
- */
-JSONProtocolSerializer.prototype.serializeValue = function(mirror) {
-  var json = this.serialize_(mirror, false, true);
-  return json;
-};
-
-
-/**
- * Returns a serialization of all the objects referenced.
- *
- * @param {Mirror} mirror The mirror to serialize.
- * @returns {Array.<Object>} Array of the referenced objects converted to
- *     protcol objects.
- */
-JSONProtocolSerializer.prototype.serializeReferencedObjects = function() {
-  // Collect the protocol representation of the referenced objects in an array.
-  var content = [];
-
-  // Get the number of referenced objects.
-  var count = this.mirrors_.length;
-
-  for (var i = 0; i < count; i++) {
-    content.push(this.serialize_(this.mirrors_[i], false, false));
-  }
-
-  return content;
-};
-
-
-JSONProtocolSerializer.prototype.includeSource_ = function() {
-  return this.options_ && this.options_.includeSource;
-};
-
-
-JSONProtocolSerializer.prototype.inlineRefs_ = function() {
-  return this.options_ && this.options_.inlineRefs;
-};
-
-
-JSONProtocolSerializer.prototype.maxStringLength_ = function() {
-  if (IS_UNDEFINED(this.options_) ||
-      IS_UNDEFINED(this.options_.maxStringLength)) {
-    return kMaxProtocolStringLength;
-  }
-  return this.options_.maxStringLength;
-};
-
-
-JSONProtocolSerializer.prototype.add_ = function(mirror) {
-  // If this mirror is already in the list just return.
-  for (var i = 0; i < this.mirrors_.length; i++) {
-    if (this.mirrors_[i] === mirror) {
-      return;
-    }
-  }
-
-  // Add the mirror to the list of mirrors to be serialized.
-  this.mirrors_.push(mirror);
-};
-
-
-/**
- * Formats mirror object to protocol reference object with some data that can
- * be used to display the value in debugger.
- * @param {Mirror} mirror Mirror to serialize.
- * @return {Object} Protocol reference object.
- */
-JSONProtocolSerializer.prototype.serializeReferenceWithDisplayData_ =
-    function(mirror) {
-  var o = {};
-  o.ref = mirror.handle();
-  o.type = mirror.type();
-  switch (mirror.type()) {
-    case MirrorType.UNDEFINED_TYPE:
-    case MirrorType.NULL_TYPE:
-    case MirrorType.BOOLEAN_TYPE:
-    case MirrorType.NUMBER_TYPE:
-      o.value = mirror.value();
-      break;
-    case MirrorType.STRING_TYPE:
-      o.value = mirror.getTruncatedValue(this.maxStringLength_());
-      break;
-    case MirrorType.SYMBOL_TYPE:
-      o.description = mirror.description();
-      break;
-    case MirrorType.FUNCTION_TYPE:
-      o.name = mirror.name();
-      o.inferredName = mirror.inferredName();
-      if (mirror.script()) {
-        o.scriptId = mirror.script().id();
-      }
-      break;
-    case MirrorType.ERROR_TYPE:
-    case MirrorType.REGEXP_TYPE:
-      o.value = mirror.toText();
-      break;
-    case MirrorType.OBJECT_TYPE:
-      o.className = mirror.className();
-      break;
-  }
-  return o;
-};
-
-
-JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
-                                                       details) {
-  // If serializing a reference to a mirror just return the reference and add
-  // the mirror to the referenced mirrors.
-  if (reference &&
-      (mirror.isValue() || mirror.isScript() || mirror.isContext())) {
-    if (this.inlineRefs_() && mirror.isValue()) {
-      return this.serializeReferenceWithDisplayData_(mirror);
-    } else {
-      this.add_(mirror);
-      return {'ref' : mirror.handle()};
-    }
-  }
-
-  // Collect the JSON property/value pairs.
-  var content = {};
-
-  // Add the mirror handle.
-  if (mirror.isValue() || mirror.isScript() || mirror.isContext()) {
-    content.handle = mirror.handle();
-  }
-
-  // Always add the type.
-  content.type = mirror.type();
-
-  switch (mirror.type()) {
-    case MirrorType.UNDEFINED_TYPE:
-    case MirrorType.NULL_TYPE:
-      // Undefined and null are represented just by their type.
-      break;
-
-    case MirrorType.BOOLEAN_TYPE:
-      // Boolean values are simply represented by their value.
-      content.value = mirror.value();
-      break;
-
-    case MirrorType.NUMBER_TYPE:
-      // Number values are simply represented by their value.
-      content.value = NumberToJSON_(mirror.value());
-      break;
-
-    case MirrorType.STRING_TYPE:
-      // String values might have their value cropped to keep down size.
-      if (this.maxStringLength_() != -1 &&
-          mirror.length() > this.maxStringLength_()) {
-        var str = mirror.getTruncatedValue(this.maxStringLength_());
-        content.value = str;
-        content.fromIndex = 0;
-        content.toIndex = this.maxStringLength_();
-      } else {
-        content.value = mirror.value();
-      }
-      content.length = mirror.length();
-      break;
-
-    case MirrorType.SYMBOL_TYPE:
-      content.description = mirror.description();
-      break;
-
-    case MirrorType.OBJECT_TYPE:
-    case MirrorType.FUNCTION_TYPE:
-    case MirrorType.ERROR_TYPE:
-    case MirrorType.REGEXP_TYPE:
-    case MirrorType.PROMISE_TYPE:
-    case MirrorType.GENERATOR_TYPE:
-      // Add object representation.
-      this.serializeObject_(mirror, content, details);
-      break;
-
-    case MirrorType.PROPERTY_TYPE:
-    case MirrorType.INTERNAL_PROPERTY_TYPE:
-      throw %make_error(kDebugger,
-                     'PropertyMirror cannot be serialized independently');
-      break;
-
-    case MirrorType.FRAME_TYPE:
-      // Add object representation.
-      this.serializeFrame_(mirror, content);
-      break;
-
-    case MirrorType.SCOPE_TYPE:
-      // Add object representation.
-      this.serializeScope_(mirror, content);
-      break;
-
-    case MirrorType.SCRIPT_TYPE:
-      // Script is represented by id, name and source attributes.
-      if (mirror.name()) {
-        content.name = mirror.name();
-      }
-      content.id = mirror.id();
-      content.lineOffset = mirror.lineOffset();
-      content.columnOffset = mirror.columnOffset();
-      content.lineCount = mirror.lineCount();
-      if (mirror.data()) {
-        content.data = mirror.data();
-      }
-      if (this.includeSource_()) {
-        content.source = mirror.source();
-      } else {
-        var sourceStart = mirror.source().substring(0, 80);
-        content.sourceStart = sourceStart;
-      }
-      content.sourceLength = mirror.source().length;
-      content.scriptType = mirror.scriptType();
-      content.compilationType = mirror.compilationType();
-      // For compilation type eval emit information on the script from which
-      // eval was called if a script is present.
-      if (mirror.compilationType() == 1 &&
-          mirror.evalFromScript()) {
-        content.evalFromScript =
-            this.serializeReference(mirror.evalFromScript());
-        var evalFromLocation = mirror.evalFromLocation();
-        if (evalFromLocation) {
-          content.evalFromLocation = { line: evalFromLocation.line,
-                                       column: evalFromLocation.column };
-        }
-        if (mirror.evalFromFunctionName()) {
-          content.evalFromFunctionName = mirror.evalFromFunctionName();
-        }
-      }
-      if (mirror.context()) {
-        content.context = this.serializeReference(mirror.context());
-      }
-      break;
-
-    case MirrorType.CONTEXT_TYPE:
-      content.data = mirror.data();
-      break;
-  }
-
-  // Always add the text representation.
-  content.text = mirror.toText();
-
-  // Create and return the JSON string.
-  return content;
-};
-
-
-/**
- * Serialize object information to the following JSON format.
- *
- *   {"className":"<class name>",
- *    "constructorFunction":{"ref":<number>},
- *    "protoObject":{"ref":<number>},
- *    "prototypeObject":{"ref":<number>},
- *    "namedInterceptor":<boolean>,
- *    "indexedInterceptor":<boolean>,
- *    "properties":[<properties>],
- *    "internalProperties":[<internal properties>]}
- */
-JSONProtocolSerializer.prototype.serializeObject_ = function(mirror, content,
-                                                             details) {
-  // Add general object properties.
-  content.className = mirror.className();
-  content.constructorFunction =
-      this.serializeReference(mirror.constructorFunction());
-  content.protoObject = this.serializeReference(mirror.protoObject());
-  content.prototypeObject = this.serializeReference(mirror.prototypeObject());
-
-  // Add flags to indicate whether there are interceptors.
-  if (mirror.hasNamedInterceptor()) {
-    content.namedInterceptor = true;
-  }
-  if (mirror.hasIndexedInterceptor()) {
-    content.indexedInterceptor = true;
-  }
-
-  if (mirror.isFunction()) {
-    // Add function specific properties.
-    content.name = mirror.name();
-    if (!IS_UNDEFINED(mirror.inferredName())) {
-      content.inferredName = mirror.inferredName();
-    }
-    content.resolved = mirror.resolved();
-    if (mirror.resolved()) {
-      content.source = mirror.source();
-    }
-    if (mirror.script()) {
-      content.script = this.serializeReference(mirror.script());
-      content.scriptId = mirror.script().id();
-
-      serializeLocationFields(mirror.sourceLocation(), content);
-    }
-
-    content.scopes = [];
-    for (var i = 0; i < mirror.scopeCount(); i++) {
-      var scope = mirror.scope(i);
-      content.scopes.push({
-        type: scope.scopeType(),
-        index: i
-      });
-    }
-  }
-
-  if (mirror.isGenerator()) {
-    // Add generator specific properties.
-
-    // Either 'running', 'closed', or 'suspended'.
-    content.status = mirror.status();
-
-    content.func = this.serializeReference(mirror.func())
-    content.receiver = this.serializeReference(mirror.receiver())
-
-    // If the generator is suspended, the content add line/column properties.
-    serializeLocationFields(mirror.sourceLocation(), content);
-
-    // TODO(wingo): Also serialize a reference to the context (scope chain).
-  }
-
-  if (mirror.isDate()) {
-    // Add date specific properties.
-    content.value = mirror.value();
-  }
-
-  if (mirror.isPromise()) {
-    // Add promise specific properties.
-    content.status = mirror.status();
-    content.promiseValue = this.serializeReference(mirror.promiseValue());
-  }
-
-  // Add actual properties - named properties followed by indexed properties.
-  var properties = mirror.propertyNames();
-  for (var i = 0; i < properties.length; i++) {
-    var propertyMirror = mirror.property(properties[i]);
-    properties[i] = this.serializeProperty_(propertyMirror);
-    if (details) {
-      this.add_(propertyMirror.value());
-    }
-  }
-  content.properties = properties;
-
-  var internalProperties = mirror.internalProperties();
-  if (internalProperties.length > 0) {
-    var ip = [];
-    for (var i = 0; i < internalProperties.length; i++) {
-      ip.push(this.serializeInternalProperty_(internalProperties[i]));
-    }
-    content.internalProperties = ip;
-  }
-};
-
-
-/**
- * Serialize location information to the following JSON format:
- *
- *   "position":"<position>",
- *   "line":"<line>",
- *   "column":"<column>",
- *
- * @param {SourceLocation} location The location to serialize, may be undefined.
- */
-function serializeLocationFields (location, content) {
-  if (!location) {
-    return;
-  }
-  content.position = location.position;
-  var line = location.line;
-  if (!IS_UNDEFINED(line)) {
-    content.line = line;
-  }
-  var column = location.column;
-  if (!IS_UNDEFINED(column)) {
-    content.column = column;
-  }
-}
-
-
-/**
- * Serialize property information to the following JSON format for building the
- * array of properties.
- *
- *   {"name":"<property name>",
- *    "attributes":<number>,
- *    "propertyType":<number>,
- *    "ref":<number>}
- *
- * If the attribute for the property is PropertyAttribute.None it is not added.
- * Here are a couple of examples.
- *
- *   {"name":"hello","propertyType":0,"ref":1}
- *   {"name":"length","attributes":7,"propertyType":3,"ref":2}
- *
- * @param {PropertyMirror} propertyMirror The property to serialize.
- * @returns {Object} Protocol object representing the property.
- */
-JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
-  var result = {};
-
-  result.name = propertyMirror.name();
-  var propertyValue = propertyMirror.value();
-  if (this.inlineRefs_() && propertyValue.isValue()) {
-    result.value = this.serializeReferenceWithDisplayData_(propertyValue);
-  } else {
-    if (propertyMirror.attributes() != PropertyAttribute.None) {
-      result.attributes = propertyMirror.attributes();
-    }
-    result.propertyType = propertyMirror.propertyType();
-    result.ref = propertyValue.handle();
-  }
-  return result;
-};
-
-
-/**
- * Serialize internal property information to the following JSON format for
- * building the array of properties.
- *
- *   {"name":"<property name>",
- *    "ref":<number>}
- *
- *   {"name":"[[BoundThis]]","ref":117}
- *
- * @param {InternalPropertyMirror} propertyMirror The property to serialize.
- * @returns {Object} Protocol object representing the property.
- */
-JSONProtocolSerializer.prototype.serializeInternalProperty_ =
-    function(propertyMirror) {
-  var result = {};
-
-  result.name = propertyMirror.name();
-  var propertyValue = propertyMirror.value();
-  if (this.inlineRefs_() && propertyValue.isValue()) {
-    result.value = this.serializeReferenceWithDisplayData_(propertyValue);
-  } else {
-    result.ref = propertyValue.handle();
-  }
-  return result;
-};
-
-
-JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
-  content.index = mirror.index();
-  content.receiver = this.serializeReference(mirror.receiver());
-  var func = mirror.func();
-  content.func = this.serializeReference(func);
-  var script = func.script();
-  if (script) {
-    content.script = this.serializeReference(script);
-  }
-  content.constructCall = mirror.isConstructCall();
-  content.atReturn = mirror.isAtReturn();
-  if (mirror.isAtReturn()) {
-    content.returnValue = this.serializeReference(mirror.returnValue());
-  }
-  content.debuggerFrame = mirror.isDebuggerFrame();
-  var x = new GlobalArray(mirror.argumentCount());
-  for (var i = 0; i < mirror.argumentCount(); i++) {
-    var arg = {};
-    var argument_name = mirror.argumentName(i);
-    if (argument_name) {
-      arg.name = argument_name;
-    }
-    arg.value = this.serializeReference(mirror.argumentValue(i));
-    x[i] = arg;
-  }
-  content.arguments = x;
-  var x = new GlobalArray(mirror.localCount());
-  for (var i = 0; i < mirror.localCount(); i++) {
-    var local = {};
-    local.name = mirror.localName(i);
-    local.value = this.serializeReference(mirror.localValue(i));
-    x[i] = local;
-  }
-  content.locals = x;
-  serializeLocationFields(mirror.sourceLocation(), content);
-  var source_line_text = mirror.sourceLineText();
-  if (!IS_UNDEFINED(source_line_text)) {
-    content.sourceLineText = source_line_text;
-  }
-
-  content.scopes = [];
-  for (var i = 0; i < mirror.scopeCount(); i++) {
-    var scope = mirror.scope(i);
-    content.scopes.push({
-      type: scope.scopeType(),
-      index: i
-    });
-  }
-};
-
-
-JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
-  content.index = mirror.scopeIndex();
-  content.frameIndex = mirror.frameIndex();
-  content.type = mirror.scopeType();
-  content.object = this.inlineRefs_() ?
-                   this.serializeValue(mirror.scopeObject()) :
-                   this.serializeReference(mirror.scopeObject());
-};
-
-
-/**
- * Convert a number to a protocol value. For all finite numbers the number
- * itself is returned. For non finite numbers NaN, Infinite and
- * -Infinite the string representation "NaN", "Infinite" or "-Infinite"
- * (not including the quotes) is returned.
- *
- * @param {number} value The number value to convert to a protocol value.
- * @returns {number|string} Protocol value.
- */
-function NumberToJSON_(value) {
-  if (IsNaN(value)) {
-    return 'NaN';
-  }
-  if (!NUMBER_IS_FINITE(value)) {
-    if (value > 0) {
-      return 'Infinity';
-    } else {
-      return '-Infinity';
-    }
-  }
-  return value;
-}
-
 // ----------------------------------------------------------------------------
 // Exports
 
 utils.InstallFunctions(global, DONT_ENUM, [
   "MakeMirror", MakeMirror,
-  "MakeMirrorSerializer", MakeMirrorSerializer,
-  "LookupMirror", LookupMirror,
-  "ToggleMirrorCache", ToggleMirrorCache,
-  "MirrorCacheIsEmpty", MirrorCacheIsEmpty,
 ]);
 
 utils.InstallConstants(global, [
@@ -3083,13 +2407,4 @@
   "FrameDetails", FrameDetails,
 ]);
 
-// Functions needed by the debugger runtime.
-utils.InstallFunctions(utils, DONT_ENUM, [
-  "ClearMirrorCache", ClearMirrorCache
-]);
-
-// Export to debug.js
-utils.Export(function(to) {
-  to.MirrorType = MirrorType;
-});
 })
diff --git a/src/debug/ppc/debug-ppc.cc b/src/debug/ppc/debug-ppc.cc
index e57aa3c..42be185 100644
--- a/src/debug/ppc/debug-ppc.cc
+++ b/src/debug/ppc/debug-ppc.cc
@@ -77,14 +77,6 @@
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
 
-    // Load padding words on stack.
-    __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
-    for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
-      __ push(ip);
-    }
-    __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
-    __ push(ip);
-
     // Push arguments for DebugBreak call.
     if (mode == SAVE_RESULT_REGISTER) {
       // Break on return.
@@ -111,50 +103,47 @@
         }
       }
     }
-
-    // Don't bother removing padding bytes pushed on the stack
-    // as the frame is going to be restored right away.
-
     // Leave the internal frame.
   }
 
-  // Now that the break point has been handled, resume normal execution by
-  // jumping to the target address intended by the caller and that was
-  // overwritten by the address of DebugBreakXXX.
-  ExternalReference after_break_target =
-      ExternalReference::debug_after_break_target_address(masm->isolate());
-  __ mov(ip, Operand(after_break_target));
-  __ LoadP(ip, MemOperand(ip));
-  __ JumpToJSEntry(ip);
+  __ MaybeDropFrames();
+
+  // Return to caller.
+  __ Ret();
 }
 
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+  }
+  __ MaybeDropFrames();
 
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  // Load the function pointer off of our current stack frame.
-  __ LoadP(r4, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+  // Return to caller.
+  __ Ret();
+}
 
-  // Pop return address and frame
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+  // Frame is being dropped:
+  // - Drop to the target frame specified by r4.
+  // - Look up current function on the frame.
+  // - Leave the frame.
+  // - Restart the frame by calling the function.
+
+  __ mr(fp, r4);
+  __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ LeaveFrame(StackFrame::INTERNAL);
+  __ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(
+      r3, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ mr(r5, r3);
 
-  ParameterCount dummy(0);
-  __ FloodFunctionIfStepping(r4, no_reg, dummy, dummy);
-
-  // Load context from the function.
-  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
-  // Clear new.target as a safety measure.
-  __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
-
-  // Get function code.
-  __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
-  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  // Re-run JSFunction, r4 is function, cp is context.
-  __ Jump(ip);
+  ParameterCount dummy1(r5);
+  ParameterCount dummy2(r3);
+  __ InvokeFunction(r4, dummy1, dummy2, JUMP_FUNCTION,
+                    CheckDebugStepCallWrapper());
 }
 
-
 const bool LiveEdit::kFrameDropperSupported = true;
 
 #undef __
diff --git a/src/debug/s390/debug-s390.cc b/src/debug/s390/debug-s390.cc
index b745d5b..5ef6a60 100644
--- a/src/debug/s390/debug-s390.cc
+++ b/src/debug/s390/debug-s390.cc
@@ -82,14 +82,6 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Load padding words on stack.
-    __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
-    for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
-      __ push(ip);
-    }
-    __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
-    __ push(ip);
-
     // Push arguments for DebugBreak call.
     if (mode == SAVE_RESULT_REGISTER) {
       // Break on return.
@@ -116,46 +108,44 @@
         }
       }
     }
-
-    // Don't bother removing padding bytes pushed on the stack
-    // as the frame is going to be restored right away.
-
     // Leave the internal frame.
   }
+  __ MaybeDropFrames();
 
-  // Now that the break point has been handled, resume normal execution by
-  // jumping to the target address intended by the caller and that was
-  // overwritten by the address of DebugBreakXXX.
-  ExternalReference after_break_target =
-      ExternalReference::debug_after_break_target_address(masm->isolate());
-  __ mov(ip, Operand(after_break_target));
-  __ LoadP(ip, MemOperand(ip));
-  __ JumpToJSEntry(ip);
+  // Return to caller.
+  __ Ret();
 }
 
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  // Load the function pointer off of our current stack frame.
-  __ LoadP(r3, MemOperand(fp, FrameDropperFrameConstants::kFunctionOffset));
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+  }
+  __ MaybeDropFrames();
 
-  // Pop return address and frame
+  // Return to caller.
+  __ Ret();
+}
+
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+  // Frame is being dropped:
+  // - Drop to the target frame specified by r3.
+  // - Look up current function on the frame.
+  // - Leave the frame.
+  // - Restart the frame by calling the function.
+
+  __ LoadRR(fp, r3);
+  __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   __ LeaveFrame(StackFrame::INTERNAL);
+  __ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(
+      r2, FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ LoadRR(r4, r2);
 
-  ParameterCount dummy(0);
-  __ FloodFunctionIfStepping(r3, no_reg, dummy, dummy);
-
-  // Load context from the function.
-  __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
-
-  // Clear new.target as a safety measure.
-  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
-
-  // Get function code.
-  __ LoadP(ip, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
-  __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  // Re-run JSFunction, r3 is function, cp is context.
-  __ Jump(ip);
+  ParameterCount dummy1(r4);
+  ParameterCount dummy2(r2);
+  __ InvokeFunction(r3, dummy1, dummy2, JUMP_FUNCTION,
+                    CheckDebugStepCallWrapper());
 }
 
 const bool LiveEdit::kFrameDropperSupported = true;
diff --git a/src/debug/x64/debug-x64.cc b/src/debug/x64/debug-x64.cc
index 4f80e18..63689de 100644
--- a/src/debug/x64/debug-x64.cc
+++ b/src/debug/x64/debug-x64.cc
@@ -9,6 +9,7 @@
 #include "src/assembler.h"
 #include "src/codegen.h"
 #include "src/debug/liveedit.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -64,12 +65,6 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
-    // Load padding words on stack.
-    for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
-      __ Push(Smi::FromInt(LiveEdit::kFramePaddingValue));
-    }
-    __ Push(Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
-
     // Push arguments for DebugBreak call.
     if (mode == SAVE_RESULT_REGISTER) {
       // Break on return.
@@ -78,12 +73,8 @@
       // Non-return breaks.
       __ Push(masm->isolate()->factory()->the_hole_value());
     }
-    __ Set(rax, 1);
-    __ Move(rbx, ExternalReference(Runtime::FunctionForId(Runtime::kDebugBreak),
-                                   masm->isolate()));
 
-    CEntryStub ceb(masm->isolate(), 1);
-    __ CallStub(&ceb);
+    __ CallRuntime(Runtime::kDebugBreak, 1, kDontSaveFPRegs);
 
     if (FLAG_debug_code) {
       for (int i = 0; i < kNumJSCallerSaved; ++i) {
@@ -95,55 +86,43 @@
         }
       }
     }
-
-    // Read current padding counter and skip corresponding number of words.
-    __ Pop(kScratchRegister);
-    __ SmiToInteger32(kScratchRegister, kScratchRegister);
-    __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
-
     // Get rid of the internal frame.
   }
 
-  // This call did not replace a call , so there will be an unwanted
-  // return address left on the stack. Here we get rid of that.
-  __ addp(rsp, Immediate(kPCOnStackSize));
+  __ MaybeDropFrames();
 
-  // Now that the break point has been handled, resume normal execution by
-  // jumping to the target address intended by the caller and that was
-  // overwritten by the address of DebugBreakXXX.
-  ExternalReference after_break_target =
-      ExternalReference::debug_after_break_target_address(masm->isolate());
-  __ Move(kScratchRegister, after_break_target);
-  __ Jump(Operand(kScratchRegister, 0));
+  // Return to caller.
+  __ ret(0);
 }
 
+void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
+  }
+  __ MaybeDropFrames();
 
-void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  // We do not know our frame height, but set rsp based on rbp.
-  __ leap(rsp, Operand(rbp, FrameDropperFrameConstants::kFunctionOffset));
-  __ Pop(rdi);  // Function.
-  __ addp(rsp,
-          Immediate(-FrameDropperFrameConstants::kCodeOffset));  // INTERNAL
-                                                                 // frame marker
-                                                                 // and code
-  __ popq(rbp);
+  // Return to caller.
+  __ ret(0);
+}
 
-  ParameterCount dummy(0);
-  __ FloodFunctionIfStepping(rdi, no_reg, dummy, dummy);
+void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
+  // Frame is being dropped:
+  // - Drop to the target frame specified by rbx.
+  // - Look up current function on the frame.
+  // - Leave the frame.
+  // - Restart the frame by calling the function.
+  __ movp(rbp, rbx);
+  __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+  __ leave();
 
-  // Load context from the function.
-  __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
-  // Clear new.target as a safety measure.
-  __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
-
-  // Get function code.
   __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ movp(rbx, FieldOperand(rbx, SharedFunctionInfo::kCodeOffset));
-  __ leap(rbx, FieldOperand(rbx, Code::kHeaderSize));
+  __ LoadSharedFunctionInfoSpecialField(
+      rbx, rbx, SharedFunctionInfo::kFormalParameterCountOffset);
 
-  // Re-run JSFunction, rdi is function, rsi is context.
-  __ jmp(rbx);
+  ParameterCount dummy(rbx);
+  __ InvokeFunction(rdi, no_reg, dummy, dummy, JUMP_FUNCTION,
+                    CheckDebugStepCallWrapper());
 }
 
 const bool LiveEdit::kFrameDropperSupported = true;
diff --git a/src/debug/x87/OWNERS b/src/debug/x87/OWNERS
index dd9998b..61245ae 100644
--- a/src/debug/x87/OWNERS
+++ b/src/debug/x87/OWNERS
@@ -1 +1,2 @@
 weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/src/debug/x87/debug-x87.cc b/src/debug/x87/debug-x87.cc
index c29eac1..8810f01 100644
--- a/src/debug/x87/debug-x87.cc
+++ b/src/debug/x87/debug-x87.cc
@@ -129,7 +129,7 @@
   __ pop(ebp);
 
   ParameterCount dummy(0);
-  __ FloodFunctionIfStepping(edi, no_reg, dummy, dummy);
+  __ CheckDebugHook(edi, no_reg, dummy, dummy);
 
   // Load context from the function.
   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index dddf62e..8dfe0e1 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -257,9 +257,8 @@
       SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
       int deopt_index = safepoint.deoptimization_index();
       // Turbofan deopt is checked when we are patching addresses on stack.
-      bool turbofanned = code->is_turbofanned() &&
-                         function->shared()->asm_function() &&
-                         !FLAG_turbo_asm_deoptimization;
+      bool turbofanned =
+          code->is_turbofanned() && function->shared()->asm_function();
       bool safe_to_deopt =
           deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
       bool builtin = code->kind() == Code::BUILTIN;
@@ -391,14 +390,13 @@
   }
 }
 
-
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
   Isolate* isolate = function->GetIsolate();
   RuntimeCallTimerScope runtimeTimer(isolate,
                                      &RuntimeCallStats::DeoptimizeCode);
   TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
   TRACE_EVENT0("v8", "V8.DeoptimizeCode");
-  Code* code = function->code();
+  if (code == nullptr) code = function->code();
   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
     // Mark the code for deoptimization and unlink any functions that also
     // refer to that code. The code cannot be shared across native contexts,
@@ -627,19 +625,15 @@
 int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
   switch (translated_frame->kind()) {
     case TranslatedFrame::kFunction: {
-      BailoutId node_id = translated_frame->node_id();
+#ifdef DEBUG
       JSFunction* function =
           JSFunction::cast(translated_frame->begin()->GetRawValue());
       Code* non_optimized_code = function->shared()->code();
-      FixedArray* raw_data = non_optimized_code->deoptimization_data();
-      DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
-      unsigned pc_and_state =
-          Deoptimizer::GetOutputInfo(data, node_id, function->shared());
-      unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
       HandlerTable* table =
           HandlerTable::cast(non_optimized_code->handler_table());
-      HandlerTable::CatchPrediction prediction;
-      return table->LookupRange(pc_offset, data_out, &prediction);
+      DCHECK_EQ(0, table->NumberOfRangeEntries());
+#endif
+      break;
     }
     case TranslatedFrame::kInterpretedFunction: {
       int bytecode_offset = translated_frame->node_id().ToInt();
@@ -647,8 +641,7 @@
           JSFunction::cast(translated_frame->begin()->GetRawValue());
       BytecodeArray* bytecode = function->shared()->bytecode_array();
       HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
-      HandlerTable::CatchPrediction prediction;
-      return table->LookupRange(bytecode_offset, data_out, &prediction);
+      return table->LookupRange(bytecode_offset, data_out, nullptr);
     }
     default:
       break;
@@ -1403,8 +1396,7 @@
 
   // A marker value is used in place of the context.
   output_offset -= kPointerSize;
-  intptr_t context = reinterpret_cast<intptr_t>(
-      Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  intptr_t context = StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
   output_frame->SetFrameSlot(output_offset, context);
   DebugPrintOutputSlot(context, frame_index, output_offset,
                        "context (adaptor sentinel)\n");
@@ -1460,8 +1452,8 @@
   Address adaptor_fp_address =
       Memory::Address_at(fp_address + CommonFrameConstants::kCallerFPOffset);
 
-  if (Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR) !=
-      Memory::Object_at(adaptor_fp_address +
+  if (StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR) !=
+      Memory::intptr_at(adaptor_fp_address +
                         CommonFrameConstants::kContextOrFrameTypeOffset)) {
     return;
   }
@@ -1515,6 +1507,7 @@
 
   Builtins* builtins = isolate_->builtins();
   Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+  BailoutId bailout_id = translated_frame->node_id();
   unsigned height = translated_frame->height();
   unsigned height_in_bytes = height * kPointerSize;
 
@@ -1527,12 +1520,15 @@
     height_in_bytes += kPointerSize;
   }
 
-  // Skip function.
+  JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
   value_iterator++;
   input_index++;
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(),
-           "  translating construct stub => height=%d\n", height_in_bytes);
+           "  translating construct stub => bailout_id=%d (%s), height=%d\n",
+           bailout_id.ToInt(),
+           bailout_id == BailoutId::ConstructStubCreate() ? "create" : "invoke",
+           height_in_bytes);
   }
 
   unsigned fixed_frame_size = ConstructFrameConstants::kFixedFrameSize;
@@ -1596,7 +1592,7 @@
 
   // A marker value is used to mark the frame.
   output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
+  value = StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
   output_frame->SetFrameSlot(output_offset, value);
   DebugPrintOutputSlot(value, frame_index, output_offset,
                        "typed frame marker\n");
@@ -1616,13 +1612,21 @@
     PrintF(trace_scope_->file(), "(%d)\n", height - 1);
   }
 
-  // The newly allocated object was passed as receiver in the artificial
-  // constructor stub environment created by HEnvironment::CopyForInlining().
-  output_offset -= kPointerSize;
-  value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
-  output_frame->SetFrameSlot(output_offset, value);
-  DebugPrintOutputSlot(value, frame_index, output_offset,
-                       "allocated receiver\n");
+  if (bailout_id == BailoutId::ConstructStubCreate()) {
+    // The function was mentioned explicitly in the CONSTRUCT_STUB_FRAME.
+    output_offset -= kPointerSize;
+    value = reinterpret_cast<intptr_t>(function);
+    WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
+  } else {
+    DCHECK(bailout_id == BailoutId::ConstructStubInvoke());
+    // The newly allocated object was passed as receiver in the artificial
+    // constructor stub environment created by HEnvironment::CopyForInlining().
+    output_offset -= kPointerSize;
+    value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+    output_frame->SetFrameSlot(output_offset, value);
+    DebugPrintOutputSlot(value, frame_index, output_offset,
+                         "allocated receiver\n");
+  }
 
   if (is_topmost) {
     // Ensure the result is restored back when we return to the stub.
@@ -1639,10 +1643,17 @@
 
   CHECK_EQ(0u, output_offset);
 
-  intptr_t pc = reinterpret_cast<intptr_t>(
-      construct_stub->instruction_start() +
-      isolate_->heap()->construct_stub_deopt_pc_offset()->value());
-  output_frame->SetPc(pc);
+  // Compute this frame's PC.
+  DCHECK(bailout_id.IsValidForConstructStub());
+  Address start = construct_stub->instruction_start();
+  int pc_offset =
+      bailout_id == BailoutId::ConstructStubCreate()
+          ? isolate_->heap()->construct_stub_create_deopt_pc_offset()->value()
+          : isolate_->heap()->construct_stub_invoke_deopt_pc_offset()->value();
+  intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
+  output_frame->SetPc(pc_value);
+
+  // Update constant pool.
   if (FLAG_enable_embedded_constant_pool) {
     intptr_t constant_pool_value =
         reinterpret_cast<intptr_t>(construct_stub->constant_pool());
@@ -1768,7 +1779,7 @@
 
   // Set the frame type.
   output_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+  value = StackFrame::TypeToMarker(StackFrame::INTERNAL);
   output_frame->SetFrameSlot(output_offset, value);
   DebugPrintOutputSlot(value, frame_index, output_offset, "frame type ");
   if (trace_scope_ != nullptr) {
@@ -1827,6 +1838,8 @@
   intptr_t pc = reinterpret_cast<intptr_t>(
       accessor_stub->instruction_start() + offset->value());
   output_frame->SetPc(pc);
+
+  // Update constant pool.
   if (FLAG_enable_embedded_constant_pool) {
     intptr_t constant_pool_value =
         reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
@@ -1963,8 +1976,7 @@
 
   // The marker for the typed stack frame
   output_frame_offset -= kPointerSize;
-  value = reinterpret_cast<intptr_t>(
-      Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
+  value = StackFrame::TypeToMarker(StackFrame::STUB_FAILURE_TRAMPOLINE);
   output_frame->SetFrameSlot(output_frame_offset, value);
   DebugPrintOutputSlot(value, frame_index, output_frame_offset,
                        "function (stub failure sentinel)\n");
@@ -2326,9 +2338,10 @@
   return result;
 }
 
-
-void Translation::BeginConstructStubFrame(int literal_id, unsigned height) {
+void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
+                                          unsigned height) {
   buffer_->Add(CONSTRUCT_STUB_FRAME);
+  buffer_->Add(bailout_id.ToInt());
   buffer_->Add(literal_id);
   buffer_->Add(height);
 }
@@ -2357,8 +2370,7 @@
   buffer_->Add(literal_id);
 }
 
-void Translation::BeginJSFrame(BailoutId node_id,
-                               int literal_id,
+void Translation::BeginJSFrame(BailoutId node_id, int literal_id,
                                unsigned height) {
   buffer_->Add(JS_FRAME);
   buffer_->Add(node_id.ToInt());
@@ -2515,10 +2527,10 @@
       return 1;
     case BEGIN:
     case ARGUMENTS_ADAPTOR_FRAME:
-    case CONSTRUCT_STUB_FRAME:
       return 2;
     case JS_FRAME:
     case INTERPRETED_FRAME:
+    case CONSTRUCT_STUB_FRAME:
       return 3;
   }
   FATAL("Unexpected translation type");
@@ -2721,6 +2733,7 @@
 
 
 Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
+  CHECK(code->instruction_start() <= pc && pc <= code->instruction_end());
   SourcePosition last_position = SourcePosition::Unknown();
   DeoptimizeReason last_reason = DeoptimizeReason::kNoReason;
   int last_deopt_id = kNoDeoptimizationId;
@@ -2730,9 +2743,7 @@
              RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID);
   for (RelocIterator it(code, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
-    if (info->pc() >= pc) {
-      return DeoptInfo(last_position, last_reason, last_deopt_id);
-    }
+    if (info->pc() >= pc) break;
     if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) {
       int script_offset = static_cast<int>(info->data());
       it.next();
@@ -2745,7 +2756,7 @@
       last_reason = static_cast<DeoptimizeReason>(info->data());
     }
   }
-  return DeoptInfo(SourcePosition::Unknown(), DeoptimizeReason::kNoReason, -1);
+  return DeoptInfo(last_position, last_reason, last_deopt_id);
 }
 
 
@@ -2801,7 +2812,7 @@
 
 // static
 TranslatedValue TranslatedValue::NewFloat(TranslatedState* container,
-                                          float value) {
+                                          Float32 value) {
   TranslatedValue slot(container, kFloat);
   slot.float_value_ = value;
   return slot;
@@ -2809,7 +2820,7 @@
 
 // static
 TranslatedValue TranslatedValue::NewDouble(TranslatedState* container,
-                                           double value) {
+                                           Float64 value) {
   TranslatedValue slot(container, kDouble);
   slot.double_value_ = value;
   return slot;
@@ -2878,12 +2889,12 @@
   return uint32_value_;
 }
 
-float TranslatedValue::float_value() const {
+Float32 TranslatedValue::float_value() const {
   DCHECK_EQ(kFloat, kind());
   return float_value_;
 }
 
-double TranslatedValue::double_value() const {
+Float64 TranslatedValue::double_value() const {
   DCHECK_EQ(kDouble, kind());
   return double_value_;
 }
@@ -2993,22 +3004,29 @@
   }
 
   switch (kind()) {
-    case kInt32: {
+    case kInt32:
       value_ = Handle<Object>(isolate()->factory()->NewNumber(int32_value()));
       return;
-    }
 
     case kUInt32:
       value_ = Handle<Object>(isolate()->factory()->NewNumber(uint32_value()));
       return;
 
-    case kFloat:
-      value_ = Handle<Object>(isolate()->factory()->NewNumber(float_value()));
+    case kFloat: {
+      double scalar_value = float_value().get_scalar();
+      value_ = Handle<Object>(isolate()->factory()->NewNumber(scalar_value));
       return;
+    }
 
-    case kDouble:
-      value_ = Handle<Object>(isolate()->factory()->NewNumber(double_value()));
+    case kDouble: {
+      if (double_value().is_hole_nan()) {
+        value_ = isolate()->factory()->hole_nan_value();
+        return;
+      }
+      double scalar_value = double_value().get_scalar();
+      value_ = Handle<Object>(isolate()->factory()->NewNumber(scalar_value));
       return;
+    }
 
     case kCapturedObject:
     case kDuplicatedObject:
@@ -3056,6 +3074,13 @@
 #endif
 }
 
+Float32 TranslatedState::GetFloatSlot(Address fp, int slot_offset) {
+  return Float32::FromBits(GetUInt32Slot(fp, slot_offset));
+}
+
+Float64 TranslatedState::GetDoubleSlot(Address fp, int slot_offset) {
+  return Float64::FromBits(Memory::uint64_at(fp + slot_offset));
+}
 
 void TranslatedValue::Handlify() {
   if (kind() == kTagged) {
@@ -3104,9 +3129,11 @@
 }
 
 TranslatedFrame TranslatedFrame::ConstructStubFrame(
-    SharedFunctionInfo* shared_info, int height) {
-  return TranslatedFrame(kConstructStub, shared_info->GetIsolate(), shared_info,
-                         height);
+    BailoutId bailout_id, SharedFunctionInfo* shared_info, int height) {
+  TranslatedFrame frame(kConstructStub, shared_info->GetIsolate(), shared_info,
+                        height);
+  frame.node_id_ = bailout_id;
+  return frame;
 }
 
 
@@ -3224,15 +3251,18 @@
     }
 
     case Translation::CONSTRUCT_STUB_FRAME: {
+      BailoutId bailout_id = BailoutId(iterator->Next());
       SharedFunctionInfo* shared_info =
           SharedFunctionInfo::cast(literal_array->get(iterator->Next()));
       int height = iterator->Next();
       if (trace_file != nullptr) {
         std::unique_ptr<char[]> name = shared_info->DebugName()->ToCString();
         PrintF(trace_file, "  reading construct stub frame %s", name.get());
-        PrintF(trace_file, " => height=%d; inputs:\n", height);
+        PrintF(trace_file, " => bailout_id=%d, height=%d; inputs:\n",
+               bailout_id.ToInt(), height);
       }
-      return TranslatedFrame::ConstructStubFrame(shared_info, height);
+      return TranslatedFrame::ConstructStubFrame(bailout_id, shared_info,
+                                                 height);
     }
 
     case Translation::GETTER_STUB_FRAME: {
@@ -3411,9 +3441,9 @@
     case Translation::FLOAT_REGISTER: {
       int input_reg = iterator->Next();
       if (registers == nullptr) return TranslatedValue::NewInvalid(this);
-      float value = registers->GetFloatRegister(input_reg);
+      Float32 value = registers->GetFloatRegister(input_reg);
       if (trace_file != nullptr) {
-        PrintF(trace_file, "%e ; %s (float)", value,
+        PrintF(trace_file, "%e ; %s (float)", value.get_scalar(),
                RegisterConfiguration::Crankshaft()->GetFloatRegisterName(
                    input_reg));
       }
@@ -3423,9 +3453,9 @@
     case Translation::DOUBLE_REGISTER: {
       int input_reg = iterator->Next();
       if (registers == nullptr) return TranslatedValue::NewInvalid(this);
-      double value = registers->GetDoubleRegister(input_reg);
+      Float64 value = registers->GetDoubleRegister(input_reg);
       if (trace_file != nullptr) {
-        PrintF(trace_file, "%e ; %s (double)", value,
+        PrintF(trace_file, "%e ; %s (double)", value.get_scalar(),
                RegisterConfiguration::Crankshaft()->GetDoubleRegisterName(
                    input_reg));
       }
@@ -3481,9 +3511,9 @@
     case Translation::FLOAT_STACK_SLOT: {
       int slot_offset =
           OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
-      float value = ReadFloatValue(fp + slot_offset);
+      Float32 value = GetFloatSlot(fp, slot_offset);
       if (trace_file != nullptr) {
-        PrintF(trace_file, "%e ; (float) [fp %c %d] ", value,
+        PrintF(trace_file, "%e ; (float) [fp %c %d] ", value.get_scalar(),
                slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
       }
       return TranslatedValue::NewFloat(this, value);
@@ -3492,9 +3522,9 @@
     case Translation::DOUBLE_STACK_SLOT: {
       int slot_offset =
           OptimizedFrame::StackSlotOffsetRelativeToFp(iterator->Next());
-      double value = ReadDoubleValue(fp + slot_offset);
+      Float64 value = GetDoubleSlot(fp, slot_offset);
       if (trace_file != nullptr) {
-        PrintF(trace_file, "%e ; (double) [fp %c %d] ", value,
+        PrintF(trace_file, "%e ; (double) [fp %c %d] ", value.get_scalar(),
                slot_offset < 0 ? '-' : '+', std::abs(slot_offset));
       }
       return TranslatedValue::NewDouble(this, value);
@@ -3622,11 +3652,371 @@
   UpdateFromPreviouslyMaterializedObjects();
 }
 
+class TranslatedState::CapturedObjectMaterializer {
+ public:
+  CapturedObjectMaterializer(TranslatedState* state, int frame_index,
+                             int field_count)
+      : state_(state), frame_index_(frame_index), field_count_(field_count) {}
+
+  Handle<Object> FieldAt(int* value_index) {
+    CHECK(field_count_ > 0);
+    --field_count_;
+    return state_->MaterializeAt(frame_index_, value_index);
+  }
+
+  ~CapturedObjectMaterializer() { CHECK_EQ(0, field_count_); }
+
+ private:
+  TranslatedState* state_;
+  int frame_index_;
+  int field_count_;
+};
+
+Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
+    TranslatedValue* slot, int frame_index, int* value_index) {
+  int length = slot->GetChildrenCount();
+
+  CapturedObjectMaterializer materializer(this, frame_index, length);
+
+  Handle<Object> result;
+  if (slot->value_.ToHandle(&result)) {
+    // This has been previously materialized, return the previous value.
+    // We still need to skip all the nested objects.
+    for (int i = 0; i < length; i++) {
+      materializer.FieldAt(value_index);
+    }
+
+    return result;
+  }
+
+  Handle<Object> map_object = materializer.FieldAt(value_index);
+  Handle<Map> map = Map::GeneralizeAllFields(Handle<Map>::cast(map_object));
+  switch (map->instance_type()) {
+    case MUTABLE_HEAP_NUMBER_TYPE:
+    case HEAP_NUMBER_TYPE: {
+      // Reuse the HeapNumber value directly as it is already properly
+      // tagged and skip materializing the HeapNumber explicitly.
+      Handle<Object> object = materializer.FieldAt(value_index);
+      slot->value_ = object;
+      // On 32-bit architectures, there is an extra slot there because
+      // the escape analysis calculates the number of slots as
+      // object-size/pointer-size. To account for this, we read out
+      // any extra slots.
+      for (int i = 0; i < length - 2; i++) {
+        materializer.FieldAt(value_index);
+      }
+      return object;
+    }
+    case JS_OBJECT_TYPE:
+    case JS_ERROR_TYPE:
+    case JS_ARGUMENTS_TYPE: {
+      Handle<JSObject> object =
+          isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
+      slot->value_ = object;
+      Handle<Object> properties = materializer.FieldAt(value_index);
+      Handle<Object> elements = materializer.FieldAt(value_index);
+      object->set_properties(FixedArray::cast(*properties));
+      object->set_elements(FixedArrayBase::cast(*elements));
+      for (int i = 0; i < length - 3; ++i) {
+        Handle<Object> value = materializer.FieldAt(value_index);
+        FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
+        object->FastPropertyAtPut(index, *value);
+      }
+      return object;
+    }
+    case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+    case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE: {
+      Handle<JSArrayIterator> object = Handle<JSArrayIterator>::cast(
+          isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+      slot->value_ = object;
+      // Initialize the index to zero to make the heap verifier happy.
+      object->set_index(Smi::FromInt(0));
+      Handle<Object> properties = materializer.FieldAt(value_index);
+      Handle<Object> elements = materializer.FieldAt(value_index);
+      Handle<Object> iterated_object = materializer.FieldAt(value_index);
+      Handle<Object> next_index = materializer.FieldAt(value_index);
+      Handle<Object> iterated_object_map = materializer.FieldAt(value_index);
+      object->set_properties(FixedArray::cast(*properties));
+      object->set_elements(FixedArrayBase::cast(*elements));
+      object->set_object(*iterated_object);
+      object->set_index(*next_index);
+      object->set_object_map(*iterated_object_map);
+      return object;
+    }
+    case JS_STRING_ITERATOR_TYPE: {
+      Handle<JSStringIterator> object = Handle<JSStringIterator>::cast(
+          isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+      slot->value_ = object;
+      // Initialize the index to zero to make the heap verifier happy.
+      object->set_index(0);
+      Handle<Object> properties = materializer.FieldAt(value_index);
+      Handle<Object> elements = materializer.FieldAt(value_index);
+      Handle<Object> iterated_string = materializer.FieldAt(value_index);
+      Handle<Object> next_index = materializer.FieldAt(value_index);
+      object->set_properties(FixedArray::cast(*properties));
+      object->set_elements(FixedArrayBase::cast(*elements));
+      CHECK(iterated_string->IsString());
+      object->set_string(String::cast(*iterated_string));
+      CHECK(next_index->IsSmi());
+      object->set_index(Smi::cast(*next_index)->value());
+      return object;
+    }
+    case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE: {
+      Handle<JSAsyncFromSyncIterator> object =
+          Handle<JSAsyncFromSyncIterator>::cast(
+              isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+      slot->value_ = object;
+      Handle<Object> properties = materializer.FieldAt(value_index);
+      Handle<Object> elements = materializer.FieldAt(value_index);
+      Handle<Object> sync_iterator = materializer.FieldAt(value_index);
+      object->set_properties(FixedArray::cast(*properties));
+      object->set_elements(FixedArrayBase::cast(*elements));
+      object->set_sync_iterator(JSReceiver::cast(*sync_iterator));
+      return object;
+    }
+    case JS_ARRAY_TYPE: {
+      Handle<JSArray> object = Handle<JSArray>::cast(
+          isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+      slot->value_ = object;
+      Handle<Object> properties = materializer.FieldAt(value_index);
+      Handle<Object> elements = materializer.FieldAt(value_index);
+      Handle<Object> length = materializer.FieldAt(value_index);
+      object->set_properties(FixedArray::cast(*properties));
+      object->set_elements(FixedArrayBase::cast(*elements));
+      object->set_length(*length);
+      return object;
+    }
+    case JS_FUNCTION_TYPE: {
+      Handle<SharedFunctionInfo> temporary_shared =
+          isolate_->factory()->NewSharedFunctionInfo(
+              isolate_->factory()->empty_string(), MaybeHandle<Code>(), false);
+      Handle<JSFunction> object =
+          isolate_->factory()->NewFunctionFromSharedFunctionInfo(
+              map, temporary_shared, isolate_->factory()->undefined_value(),
+              NOT_TENURED);
+      slot->value_ = object;
+      Handle<Object> properties = materializer.FieldAt(value_index);
+      Handle<Object> elements = materializer.FieldAt(value_index);
+      Handle<Object> prototype = materializer.FieldAt(value_index);
+      Handle<Object> shared = materializer.FieldAt(value_index);
+      Handle<Object> context = materializer.FieldAt(value_index);
+      Handle<Object> vector_cell = materializer.FieldAt(value_index);
+      Handle<Object> entry = materializer.FieldAt(value_index);
+      Handle<Object> next_link = materializer.FieldAt(value_index);
+      object->ReplaceCode(*isolate_->builtins()->CompileLazy());
+      object->set_map(*map);
+      object->set_properties(FixedArray::cast(*properties));
+      object->set_elements(FixedArrayBase::cast(*elements));
+      object->set_prototype_or_initial_map(*prototype);
+      object->set_shared(SharedFunctionInfo::cast(*shared));
+      object->set_context(Context::cast(*context));
+      object->set_feedback_vector_cell(Cell::cast(*vector_cell));
+      CHECK(entry->IsNumber());  // Entry to compile lazy stub.
+      CHECK(next_link->IsUndefined(isolate_));
+      return object;
+    }
+    case CONS_STRING_TYPE: {
+      Handle<ConsString> object = Handle<ConsString>::cast(
+          isolate_->factory()
+              ->NewConsString(isolate_->factory()->undefined_string(),
+                              isolate_->factory()->undefined_string())
+              .ToHandleChecked());
+      slot->value_ = object;
+      Handle<Object> hash = materializer.FieldAt(value_index);
+      Handle<Object> length = materializer.FieldAt(value_index);
+      Handle<Object> first = materializer.FieldAt(value_index);
+      Handle<Object> second = materializer.FieldAt(value_index);
+      object->set_map(*map);
+      object->set_length(Smi::cast(*length)->value());
+      object->set_first(String::cast(*first));
+      object->set_second(String::cast(*second));
+      CHECK(hash->IsNumber());  // The {Name::kEmptyHashField} value.
+      return object;
+    }
+    case CONTEXT_EXTENSION_TYPE: {
+      Handle<ContextExtension> object =
+          isolate_->factory()->NewContextExtension(
+              isolate_->factory()->NewScopeInfo(1),
+              isolate_->factory()->undefined_value());
+      slot->value_ = object;
+      Handle<Object> scope_info = materializer.FieldAt(value_index);
+      Handle<Object> extension = materializer.FieldAt(value_index);
+      object->set_scope_info(ScopeInfo::cast(*scope_info));
+      object->set_extension(*extension);
+      return object;
+    }
+    case FIXED_ARRAY_TYPE: {
+      Handle<Object> lengthObject = materializer.FieldAt(value_index);
+      int32_t length = 0;
+      CHECK(lengthObject->ToInt32(&length));
+      Handle<FixedArray> object = isolate_->factory()->NewFixedArray(length);
+      // We need to set the map, because the fixed array we are
+      // materializing could be a context or an arguments object,
+      // in which case we must retain that information.
+      object->set_map(*map);
+      slot->value_ = object;
+      for (int i = 0; i < length; ++i) {
+        Handle<Object> value = materializer.FieldAt(value_index);
+        object->set(i, *value);
+      }
+      return object;
+    }
+    case FIXED_DOUBLE_ARRAY_TYPE: {
+      DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
+      Handle<Object> lengthObject = materializer.FieldAt(value_index);
+      int32_t length = 0;
+      CHECK(lengthObject->ToInt32(&length));
+      Handle<FixedArrayBase> object =
+          isolate_->factory()->NewFixedDoubleArray(length);
+      slot->value_ = object;
+      if (length > 0) {
+        Handle<FixedDoubleArray> double_array =
+            Handle<FixedDoubleArray>::cast(object);
+        for (int i = 0; i < length; ++i) {
+          Handle<Object> value = materializer.FieldAt(value_index);
+          CHECK(value->IsNumber());
+          if (value.is_identical_to(isolate_->factory()->hole_nan_value())) {
+            double_array->set_the_hole(isolate_, i);
+          } else {
+            double_array->set(i, value->Number());
+          }
+        }
+      }
+      return object;
+    }
+    case STRING_TYPE:
+    case ONE_BYTE_STRING_TYPE:
+    case CONS_ONE_BYTE_STRING_TYPE:
+    case SLICED_STRING_TYPE:
+    case SLICED_ONE_BYTE_STRING_TYPE:
+    case EXTERNAL_STRING_TYPE:
+    case EXTERNAL_ONE_BYTE_STRING_TYPE:
+    case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+    case SHORT_EXTERNAL_STRING_TYPE:
+    case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
+    case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+    case THIN_STRING_TYPE:
+    case THIN_ONE_BYTE_STRING_TYPE:
+    case INTERNALIZED_STRING_TYPE:
+    case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+    case EXTERNAL_INTERNALIZED_STRING_TYPE:
+    case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+    case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+    case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+    case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+    case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+    case SYMBOL_TYPE:
+    case ODDBALL_TYPE:
+    case JS_GLOBAL_OBJECT_TYPE:
+    case JS_GLOBAL_PROXY_TYPE:
+    case JS_API_OBJECT_TYPE:
+    case JS_SPECIAL_API_OBJECT_TYPE:
+    case JS_VALUE_TYPE:
+    case JS_MESSAGE_OBJECT_TYPE:
+    case JS_DATE_TYPE:
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+    case JS_GENERATOR_OBJECT_TYPE:
+    case JS_MODULE_NAMESPACE_TYPE:
+    case JS_ARRAY_BUFFER_TYPE:
+    case JS_REGEXP_TYPE:
+    case JS_TYPED_ARRAY_TYPE:
+    case JS_DATA_VIEW_TYPE:
+    case JS_SET_TYPE:
+    case JS_MAP_TYPE:
+    case JS_SET_ITERATOR_TYPE:
+    case JS_MAP_ITERATOR_TYPE:
+    case JS_WEAK_MAP_TYPE:
+    case JS_WEAK_SET_TYPE:
+    case JS_PROMISE_CAPABILITY_TYPE:
+    case JS_PROMISE_TYPE:
+    case JS_BOUND_FUNCTION_TYPE:
+    case JS_PROXY_TYPE:
+    case MAP_TYPE:
+    case ALLOCATION_SITE_TYPE:
+    case ACCESSOR_INFO_TYPE:
+    case SHARED_FUNCTION_INFO_TYPE:
+    case FUNCTION_TEMPLATE_INFO_TYPE:
+    case ACCESSOR_PAIR_TYPE:
+    case BYTE_ARRAY_TYPE:
+    case BYTECODE_ARRAY_TYPE:
+    case TRANSITION_ARRAY_TYPE:
+    case FOREIGN_TYPE:
+    case SCRIPT_TYPE:
+    case CODE_TYPE:
+    case PROPERTY_CELL_TYPE:
+    case MODULE_TYPE:
+    case MODULE_INFO_ENTRY_TYPE:
+    case FREE_SPACE_TYPE:
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case FIXED_##TYPE##_ARRAY_TYPE:
+      TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
+#undef FIXED_TYPED_ARRAY_CASE
+    case FILLER_TYPE:
+    case ACCESS_CHECK_INFO_TYPE:
+    case INTERCEPTOR_INFO_TYPE:
+    case CALL_HANDLER_INFO_TYPE:
+    case OBJECT_TEMPLATE_INFO_TYPE:
+    case ALLOCATION_MEMENTO_TYPE:
+    case TYPE_FEEDBACK_INFO_TYPE:
+    case ALIASED_ARGUMENTS_ENTRY_TYPE:
+    case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
+    case PROMISE_REACTION_JOB_INFO_TYPE:
+    case DEBUG_INFO_TYPE:
+    case BREAK_POINT_INFO_TYPE:
+    case CELL_TYPE:
+    case WEAK_CELL_TYPE:
+    case PROTOTYPE_INFO_TYPE:
+    case TUPLE2_TYPE:
+    case TUPLE3_TYPE:
+    case CONSTANT_ELEMENTS_PAIR_TYPE:
+      OFStream os(stderr);
+      os << "[couldn't handle instance type " << map->instance_type() << "]"
+         << std::endl;
+      UNREACHABLE();
+      break;
+  }
+  UNREACHABLE();
+  return Handle<Object>::null();
+}
 
 Handle<Object> TranslatedState::MaterializeAt(int frame_index,
                                               int* value_index) {
+  CHECK_LT(static_cast<size_t>(frame_index), frames().size());
   TranslatedFrame* frame = &(frames_[frame_index]);
-  CHECK(static_cast<size_t>(*value_index) < frame->values_.size());
+  CHECK_LT(static_cast<size_t>(*value_index), frame->values_.size());
 
   TranslatedValue* slot = &(frame->values_[*value_index]);
   (*value_index)++;
@@ -3670,176 +4060,11 @@
       return arguments;
     }
     case TranslatedValue::kCapturedObject: {
-      int length = slot->GetChildrenCount();
-
       // The map must be a tagged object.
       CHECK(frame->values_[*value_index].kind() == TranslatedValue::kTagged);
-
-      Handle<Object> result;
-      if (slot->value_.ToHandle(&result)) {
-        // This has been previously materialized, return the previous value.
-        // We still need to skip all the nested objects.
-        for (int i = 0; i < length; i++) {
-          MaterializeAt(frame_index, value_index);
-        }
-
-        return result;
-      }
-
-      Handle<Object> map_object = MaterializeAt(frame_index, value_index);
-      Handle<Map> map =
-          Map::GeneralizeAllFieldRepresentations(Handle<Map>::cast(map_object));
-      switch (map->instance_type()) {
-        case MUTABLE_HEAP_NUMBER_TYPE:
-        case HEAP_NUMBER_TYPE: {
-          // Reuse the HeapNumber value directly as it is already properly
-          // tagged and skip materializing the HeapNumber explicitly.
-          Handle<Object> object = MaterializeAt(frame_index, value_index);
-          slot->value_ = object;
-          // On 32-bit architectures, there is an extra slot there because
-          // the escape analysis calculates the number of slots as
-          // object-size/pointer-size. To account for this, we read out
-          // any extra slots.
-          for (int i = 0; i < length - 2; i++) {
-            MaterializeAt(frame_index, value_index);
-          }
-          return object;
-        }
-        case JS_OBJECT_TYPE:
-        case JS_ERROR_TYPE:
-        case JS_ARGUMENTS_TYPE: {
-          Handle<JSObject> object =
-              isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
-          slot->value_ = object;
-          Handle<Object> properties = MaterializeAt(frame_index, value_index);
-          Handle<Object> elements = MaterializeAt(frame_index, value_index);
-          object->set_properties(FixedArray::cast(*properties));
-          object->set_elements(FixedArrayBase::cast(*elements));
-          for (int i = 0; i < length - 3; ++i) {
-            Handle<Object> value = MaterializeAt(frame_index, value_index);
-            FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
-            object->FastPropertyAtPut(index, *value);
-          }
-          return object;
-        }
-        case JS_ARRAY_TYPE: {
-          Handle<JSArray> object = Handle<JSArray>::cast(
-              isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
-          slot->value_ = object;
-          Handle<Object> properties = MaterializeAt(frame_index, value_index);
-          Handle<Object> elements = MaterializeAt(frame_index, value_index);
-          Handle<Object> length = MaterializeAt(frame_index, value_index);
-          object->set_properties(FixedArray::cast(*properties));
-          object->set_elements(FixedArrayBase::cast(*elements));
-          object->set_length(*length);
-          return object;
-        }
-        case JS_FUNCTION_TYPE: {
-          Handle<SharedFunctionInfo> temporary_shared =
-              isolate_->factory()->NewSharedFunctionInfo(
-                  isolate_->factory()->empty_string(), MaybeHandle<Code>(),
-                  false);
-          Handle<JSFunction> object =
-              isolate_->factory()->NewFunctionFromSharedFunctionInfo(
-                  map, temporary_shared, isolate_->factory()->undefined_value(),
-                  NOT_TENURED);
-          slot->value_ = object;
-          Handle<Object> properties = MaterializeAt(frame_index, value_index);
-          Handle<Object> elements = MaterializeAt(frame_index, value_index);
-          Handle<Object> prototype = MaterializeAt(frame_index, value_index);
-          Handle<Object> shared = MaterializeAt(frame_index, value_index);
-          Handle<Object> context = MaterializeAt(frame_index, value_index);
-          Handle<Object> literals = MaterializeAt(frame_index, value_index);
-          Handle<Object> entry = MaterializeAt(frame_index, value_index);
-          Handle<Object> next_link = MaterializeAt(frame_index, value_index);
-          object->ReplaceCode(*isolate_->builtins()->CompileLazy());
-          object->set_map(*map);
-          object->set_properties(FixedArray::cast(*properties));
-          object->set_elements(FixedArrayBase::cast(*elements));
-          object->set_prototype_or_initial_map(*prototype);
-          object->set_shared(SharedFunctionInfo::cast(*shared));
-          object->set_context(Context::cast(*context));
-          object->set_literals(LiteralsArray::cast(*literals));
-          CHECK(entry->IsNumber());  // Entry to compile lazy stub.
-          CHECK(next_link->IsUndefined(isolate_));
-          return object;
-        }
-        case CONS_STRING_TYPE: {
-          Handle<ConsString> object = Handle<ConsString>::cast(
-              isolate_->factory()
-                  ->NewConsString(isolate_->factory()->undefined_string(),
-                                  isolate_->factory()->undefined_string())
-                  .ToHandleChecked());
-          slot->value_ = object;
-          Handle<Object> hash = MaterializeAt(frame_index, value_index);
-          Handle<Object> length = MaterializeAt(frame_index, value_index);
-          Handle<Object> first = MaterializeAt(frame_index, value_index);
-          Handle<Object> second = MaterializeAt(frame_index, value_index);
-          object->set_map(*map);
-          object->set_length(Smi::cast(*length)->value());
-          object->set_first(String::cast(*first));
-          object->set_second(String::cast(*second));
-          CHECK(hash->IsNumber());  // The {Name::kEmptyHashField} value.
-          return object;
-        }
-        case CONTEXT_EXTENSION_TYPE: {
-          Handle<ContextExtension> object =
-              isolate_->factory()->NewContextExtension(
-                  isolate_->factory()->NewScopeInfo(1),
-                  isolate_->factory()->undefined_value());
-          slot->value_ = object;
-          Handle<Object> scope_info = MaterializeAt(frame_index, value_index);
-          Handle<Object> extension = MaterializeAt(frame_index, value_index);
-          object->set_scope_info(ScopeInfo::cast(*scope_info));
-          object->set_extension(*extension);
-          return object;
-        }
-        case FIXED_ARRAY_TYPE: {
-          Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
-          int32_t length = 0;
-          CHECK(lengthObject->ToInt32(&length));
-          Handle<FixedArray> object =
-              isolate_->factory()->NewFixedArray(length);
-          // We need to set the map, because the fixed array we are
-          // materializing could be a context or an arguments object,
-          // in which case we must retain that information.
-          object->set_map(*map);
-          slot->value_ = object;
-          for (int i = 0; i < length; ++i) {
-            Handle<Object> value = MaterializeAt(frame_index, value_index);
-            object->set(i, *value);
-          }
-          return object;
-        }
-        case FIXED_DOUBLE_ARRAY_TYPE: {
-          DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
-          Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
-          int32_t length = 0;
-          CHECK(lengthObject->ToInt32(&length));
-          Handle<FixedArrayBase> object =
-              isolate_->factory()->NewFixedDoubleArray(length);
-          slot->value_ = object;
-          if (length > 0) {
-            Handle<FixedDoubleArray> double_array =
-                Handle<FixedDoubleArray>::cast(object);
-            for (int i = 0; i < length; ++i) {
-              Handle<Object> value = MaterializeAt(frame_index, value_index);
-              CHECK(value->IsNumber());
-              double_array->set(i, value->Number());
-            }
-          }
-          return object;
-        }
-        default:
-          PrintF(stderr, "[couldn't handle instance type %d]\n",
-                 map->instance_type());
-          FATAL("unreachable");
-          return Handle<Object>::null();
-      }
-      UNREACHABLE();
-      break;
+      CHECK(frame->values_[*value_index].GetValue()->IsMap());
+      return MaterializeCapturedObjectAt(slot, frame_index, value_index);
     }
-
     case TranslatedValue::kDuplicatedObject: {
       int object_index = slot->object_index();
       TranslatedState::ObjectPosition pos = object_positions_[object_index];
@@ -3869,13 +4094,12 @@
   return Handle<Object>::null();
 }
 
-
 Handle<Object> TranslatedState::MaterializeObjectAt(int object_index) {
+  CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
   TranslatedState::ObjectPosition pos = object_positions_[object_index];
   return MaterializeAt(pos.frame_index_, &(pos.value_index_));
 }
 
-
 bool TranslatedState::GetAdaptedArguments(Handle<JSObject>* result,
                                           int frame_index) {
   if (frame_index == 0) {
@@ -3915,7 +4139,6 @@
   }
 }
 
-
 TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
     int jsframe_index, int* args_count) {
   for (size_t i = 0; i < frames_.size(); i++) {
@@ -3924,7 +4147,8 @@
       if (jsframe_index > 0) {
         jsframe_index--;
       } else {
-        // We have the JS function frame, now check if it has arguments adaptor.
+        // We have the JS function frame, now check if it has arguments
+        // adaptor.
         if (i > 0 &&
             frames_[i - 1].kind() == TranslatedFrame::kArgumentsAdaptor) {
           *args_count = frames_[i - 1].height();
@@ -3939,8 +4163,7 @@
   return nullptr;
 }
 
-
-void TranslatedState::StoreMaterializedValuesAndDeopt() {
+void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
   MaterializedObjectStore* materialized_store =
       isolate_->materialized_object_store();
   Handle<FixedArray> previously_materialized_objects =
@@ -3986,12 +4209,11 @@
     CHECK(frames_[0].kind() == TranslatedFrame::kFunction ||
           frames_[0].kind() == TranslatedFrame::kInterpretedFunction ||
           frames_[0].kind() == TranslatedFrame::kTailCallerFunction);
-    Object* const function = frames_[0].front().GetRawValue();
-    Deoptimizer::DeoptimizeFunction(JSFunction::cast(function));
+    CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
+    Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode());
   }
 }
 
-
 void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
   MaterializedObjectStore* materialized_store =
       isolate_->materialized_object_store();
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 4d84fb7..5501ca6 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -20,6 +20,37 @@
 class TranslatedState;
 class RegisterValues;
 
+// Safety wrapper for a 32-bit floating-point value to make sure we don't loose
+// the exact bit pattern during deoptimization when passing this value. Note
+// that there is intentionally no way to construct it from a {float} value.
+class Float32 {
+ public:
+  Float32() : bit_pattern_(0) {}
+  uint32_t get_bits() const { return bit_pattern_; }
+  float get_scalar() const { return bit_cast<float>(bit_pattern_); }
+  static Float32 FromBits(uint32_t bits) { return Float32(bits); }
+
+ private:
+  explicit Float32(uint32_t bit_pattern) : bit_pattern_(bit_pattern) {}
+  uint32_t bit_pattern_;
+};
+
+// Safety wrapper for a 64-bit floating-point value to make sure we don't loose
+// the exact bit pattern during deoptimization when passing this value. Note
+// that there is intentionally no way to construct it from a {double} value.
+class Float64 {
+ public:
+  Float64() : bit_pattern_(0) {}
+  uint64_t get_bits() const { return bit_pattern_; }
+  double get_scalar() const { return bit_cast<double>(bit_pattern_); }
+  bool is_hole_nan() const { return bit_pattern_ == kHoleNanInt64; }
+  static Float64 FromBits(uint64_t bits) { return Float64(bits); }
+
+ private:
+  explicit Float64(uint64_t bit_pattern) : bit_pattern_(bit_pattern) {}
+  uint64_t bit_pattern_;
+};
+
 class TranslatedValue {
  public:
   // Allocation-less getter of the value.
@@ -64,8 +95,8 @@
   static TranslatedValue NewDeferredObject(TranslatedState* container,
                                            int length, int object_index);
   static TranslatedValue NewDuplicateObject(TranslatedState* container, int id);
-  static TranslatedValue NewFloat(TranslatedState* container, float value);
-  static TranslatedValue NewDouble(TranslatedState* container, double value);
+  static TranslatedValue NewFloat(TranslatedState* container, Float32 value);
+  static TranslatedValue NewDouble(TranslatedState* container, Float64 value);
   static TranslatedValue NewInt32(TranslatedState* container, int32_t value);
   static TranslatedValue NewUInt32(TranslatedState* container, uint32_t value);
   static TranslatedValue NewBool(TranslatedState* container, uint32_t value);
@@ -98,9 +129,9 @@
     // kind is kInt32.
     int32_t int32_value_;
     // kind is kFloat
-    float float_value_;
+    Float32 float_value_;
     // kind is kDouble
-    double double_value_;
+    Float64 double_value_;
     // kind is kDuplicatedObject or kArgumentsObject or kCapturedObject.
     MaterializedObjectInfo materialization_info_;
   };
@@ -109,8 +140,8 @@
   Object* raw_literal() const;
   int32_t int32_value() const;
   uint32_t uint32_value() const;
-  float float_value() const;
-  double double_value() const;
+  Float32 float_value() const;
+  Float64 double_value() const;
   int object_length() const;
   int object_index() const;
 };
@@ -195,7 +226,8 @@
   static TranslatedFrame ArgumentsAdaptorFrame(SharedFunctionInfo* shared_info,
                                                int height);
   static TranslatedFrame TailCallerFrame(SharedFunctionInfo* shared_info);
-  static TranslatedFrame ConstructStubFrame(SharedFunctionInfo* shared_info,
+  static TranslatedFrame ConstructStubFrame(BailoutId bailout_id,
+                                            SharedFunctionInfo* shared_info,
                                             int height);
   static TranslatedFrame CompiledStubFrame(int height, Isolate* isolate) {
     return TranslatedFrame(kCompiledStub, isolate, nullptr, height);
@@ -254,7 +286,7 @@
   void Prepare(bool has_adapted_arguments, Address stack_frame_pointer);
 
   // Store newly materialized values into the isolate.
-  void StoreMaterializedValuesAndDeopt();
+  void StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame);
 
   typedef std::vector<TranslatedFrame>::iterator iterator;
   iterator begin() { return frames_.begin(); }
@@ -292,9 +324,14 @@
   void UpdateFromPreviouslyMaterializedObjects();
   Handle<Object> MaterializeAt(int frame_index, int* value_index);
   Handle<Object> MaterializeObjectAt(int object_index);
+  class CapturedObjectMaterializer;
+  Handle<Object> MaterializeCapturedObjectAt(TranslatedValue* slot,
+                                             int frame_index, int* value_index);
   bool GetAdaptedArguments(Handle<JSObject>* result, int frame_index);
 
   static uint32_t GetUInt32Slot(Address fp, int slot_index);
+  static Float32 GetFloatSlot(Address fp, int slot_index);
+  static Float64 GetDoubleSlot(Address fp, int slot_index);
 
   std::vector<TranslatedFrame> frames_;
   Isolate* isolate_;
@@ -419,8 +456,9 @@
 
   // Deoptimize the function now. Its current optimized code will never be run
   // again and any activations of the optimized code will get deoptimized when
-  // execution returns.
-  static void DeoptimizeFunction(JSFunction* function);
+  // execution returns. If {code} is specified then the given code is targeted
+  // instead of the function code (e.g. OSR code not installed on function).
+  static void DeoptimizeFunction(JSFunction* function, Code* code = nullptr);
 
   // Deoptimize all code in the given isolate.
   static void DeoptimizeAll(Isolate* isolate);
@@ -648,12 +686,12 @@
     return registers_[n];
   }
 
-  float GetFloatRegister(unsigned n) const {
+  Float32 GetFloatRegister(unsigned n) const {
     DCHECK(n < arraysize(float_registers_));
     return float_registers_[n];
   }
 
-  double GetDoubleRegister(unsigned n) const {
+  Float64 GetDoubleRegister(unsigned n) const {
     DCHECK(n < arraysize(double_registers_));
     return double_registers_[n];
   }
@@ -663,19 +701,24 @@
     registers_[n] = value;
   }
 
-  void SetFloatRegister(unsigned n, float value) {
+  void SetFloatRegister(unsigned n, Float32 value) {
     DCHECK(n < arraysize(float_registers_));
     float_registers_[n] = value;
   }
 
-  void SetDoubleRegister(unsigned n, double value) {
+  void SetDoubleRegister(unsigned n, Float64 value) {
     DCHECK(n < arraysize(double_registers_));
     double_registers_[n] = value;
   }
 
+  // Generated code is writing directly into the below arrays, make sure their
+  // element sizes fit what the machine instructions expect.
+  static_assert(sizeof(Float32) == kFloatSize, "size mismatch");
+  static_assert(sizeof(Float64) == kDoubleSize, "size mismatch");
+
   intptr_t registers_[Register::kNumRegisters];
-  float float_registers_[FloatRegister::kMaxNumRegisters];
-  double double_registers_[DoubleRegister::kMaxNumRegisters];
+  Float32 float_registers_[FloatRegister::kMaxNumRegisters];
+  Float64 double_registers_[DoubleRegister::kMaxNumRegisters];
 };
 
 
@@ -728,7 +771,7 @@
     return register_values_.GetRegister(n);
   }
 
-  double GetDoubleRegister(unsigned n) const {
+  Float64 GetDoubleRegister(unsigned n) const {
     return register_values_.GetDoubleRegister(n);
   }
 
@@ -736,7 +779,7 @@
     register_values_.SetRegister(n, value);
   }
 
-  void SetDoubleRegister(unsigned n, double value) {
+  void SetDoubleRegister(unsigned n, Float64 value) {
     register_values_.SetDoubleRegister(n, value);
   }
 
@@ -932,7 +975,8 @@
   void BeginCompiledStubFrame(int height);
   void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
   void BeginTailCallerFrame(int literal_id);
-  void BeginConstructStubFrame(int literal_id, unsigned height);
+  void BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
+                               unsigned height);
   void BeginGetterStubFrame(int literal_id);
   void BeginSetterStubFrame(int literal_id);
   void BeginArgumentsObject(int args_length);
diff --git a/src/disassembler.cc b/src/disassembler.cc
index 7036e1b..59accc1 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -6,6 +6,7 @@
 
 #include <memory>
 
+#include "src/assembler-inl.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/debug/debug.h"
@@ -13,6 +14,7 @@
 #include "src/disasm.h"
 #include "src/ic/ic.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 #include "src/snapshot/serializer-common.h"
 #include "src/string-stream.h"
 
@@ -201,11 +203,6 @@
         Code* code = Code::GetCodeFromTargetAddress(relocinfo.target_address());
         Code::Kind kind = code->kind();
         if (code->is_inline_cache_stub()) {
-          if (kind == Code::LOAD_GLOBAL_IC &&
-              LoadGlobalICState::GetTypeofMode(code->extra_ic_state()) ==
-                  INSIDE_TYPEOF) {
-            out.AddFormatted(" inside typeof,");
-          }
           out.AddFormatted(" %s", Code::Kind2String(kind));
           if (!IC::ICUseVector(kind)) {
             InlineCacheState ic_state = IC::StateFromCode(code);
diff --git a/src/eh-frame.h b/src/eh-frame.h
index 3da4612..bd064eb 100644
--- a/src/eh-frame.h
+++ b/src/eh-frame.h
@@ -8,6 +8,7 @@
 #include "src/base/compiler-specific.h"
 #include "src/globals.h"
 #include "src/macro-assembler.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/elements-kind.cc b/src/elements-kind.cc
index 7bb75c4..8651b76 100644
--- a/src/elements-kind.cc
+++ b/src/elements-kind.cc
@@ -7,6 +7,7 @@
 #include "src/api.h"
 #include "src/base/lazy-instance.h"
 #include "src/elements.h"
+#include "src/objects-inl.h"
 #include "src/objects.h"
 
 namespace v8 {
diff --git a/src/elements.cc b/src/elements.cc
index ccbdb40..d5acb66 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -185,14 +185,15 @@
   WriteBarrierMode write_barrier_mode = IsFastObjectElementsKind(to_kind)
                                             ? UPDATE_WRITE_BARRIER
                                             : SKIP_WRITE_BARRIER;
+  Isolate* isolate = from->GetIsolate();
   for (int i = 0; i < copy_size; i++) {
-    int entry = from->FindEntry(i + from_start);
+    int entry = from->FindEntry(isolate, i + from_start);
     if (entry != SeededNumberDictionary::kNotFound) {
       Object* value = from->ValueAt(entry);
-      DCHECK(!value->IsTheHole(from->GetIsolate()));
+      DCHECK(!value->IsTheHole(isolate));
       to->set(i + to_start, value, write_barrier_mode);
     } else {
-      to->set_the_hole(i + to_start);
+      to->set_the_hole(isolate, i + to_start);
     }
   }
 }
@@ -416,8 +417,9 @@
   if (to_start + copy_size > to_length) {
     copy_size = to_length - to_start;
   }
+  Isolate* isolate = from->GetIsolate();
   for (int i = 0; i < copy_size; i++) {
-    int entry = from->FindEntry(i + from_start);
+    int entry = from->FindEntry(isolate, i + from_start);
     if (entry != SeededNumberDictionary::kNotFound) {
       to->set(i + to_start, from->ValueAt(entry)->Number());
     } else {
@@ -603,7 +605,7 @@
   static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
                              uint32_t index,
                              Handle<FixedArrayBase> backing_store,
-                             PropertyFilter filter) {
+                             PropertyFilter filter = ALL_PROPERTIES) {
     return Subclass::GetEntryForIndexImpl(isolate, *holder, *backing_store,
                                           index, filter) != kMaxUInt32;
   }
@@ -618,15 +620,16 @@
   }
 
   Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final {
-    return Subclass::GetImpl(holder, entry);
+    return Subclass::GetInternalImpl(holder, entry);
   }
 
-  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
-    return Subclass::GetImpl(holder->elements(), entry);
+  static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
+                                        uint32_t entry) {
+    return Subclass::GetImpl(holder->GetIsolate(), holder->elements(), entry);
   }
 
-  static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
-    Isolate* isolate = backing_store->GetIsolate();
+  static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+                                uint32_t entry) {
     uint32_t index = GetIndexForEntryImpl(backing_store, entry);
     return handle(BackingStore::cast(backing_store)->get(index), isolate);
   }
@@ -758,13 +761,10 @@
       }
       if (2 * length <= capacity) {
         // If more than half the elements won't be used, trim the array.
-        isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
-            *backing_store, capacity - length);
+        isolate->heap()->RightTrimFixedArray(*backing_store, capacity - length);
       } else {
         // Otherwise, fill the unused tail with holes.
-        for (uint32_t i = length; i < old_length; i++) {
-          BackingStore::cast(*backing_store)->set_the_hole(i);
-        }
+        BackingStore::cast(*backing_store)->FillWithHoles(length, old_length);
       }
     } else {
       // Check whether the backing store should be expanded.
@@ -1034,7 +1034,7 @@
       PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
 
       if (details.kind() == kData) {
-        value = Subclass::GetImpl(object, entry);
+        value = Subclass::GetImpl(isolate, object->elements(), entry);
       } else {
         LookupIterator it(isolate, object, index, LookupIterator::OWN);
         ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -1248,17 +1248,28 @@
 
   static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
                                         uint32_t entry) {
-    return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+    return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
   }
 
   static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
-    return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+    return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
   }
 
   PropertyDetails GetDetails(JSObject* holder, uint32_t entry) final {
     return Subclass::GetDetailsImpl(holder, entry);
   }
 
+  Handle<FixedArray> CreateListFromArray(Isolate* isolate,
+                                         Handle<JSArray> array) final {
+    return Subclass::CreateListFromArrayImpl(isolate, array);
+  };
+
+  static Handle<FixedArray> CreateListFromArrayImpl(Isolate* isolate,
+                                                    Handle<JSArray> array) {
+    UNREACHABLE();
+    return Handle<FixedArray>();
+  }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase);
 };
@@ -1374,7 +1385,7 @@
       if (!dict->IsKey(isolate, key)) continue;
       DCHECK(!dict->IsDeleted(i));
       PropertyDetails details = dict->DetailsAt(i);
-      if (details.type() == ACCESSOR_CONSTANT) return true;
+      if (details.kind() == kAccessor) return true;
     }
     return false;
   }
@@ -1384,12 +1395,9 @@
     return backing_store->ValueAt(entry);
   }
 
-  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
-    return GetImpl(holder->elements(), entry);
-  }
-
-  static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
-    return handle(GetRaw(backing_store, entry), backing_store->GetIsolate());
+  static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+                                uint32_t entry) {
+    return handle(GetRaw(backing_store, entry), isolate);
   }
 
   static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
@@ -1410,7 +1418,7 @@
     if (attributes != NONE) object->RequireSlowElements(dictionary);
     dictionary->ValueAtPut(entry, *value);
     PropertyDetails details = dictionary->DetailsAt(entry);
-    details = PropertyDetails(attributes, DATA, details.dictionary_index(),
+    details = PropertyDetails(kData, attributes, details.dictionary_index(),
                               PropertyCellType::kNoCell);
     dictionary->DetailsAtPut(entry, details);
   }
@@ -1418,15 +1426,14 @@
   static void AddImpl(Handle<JSObject> object, uint32_t index,
                       Handle<Object> value, PropertyAttributes attributes,
                       uint32_t new_capacity) {
-    PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+    PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
     Handle<SeededNumberDictionary> dictionary =
         object->HasFastElements() || object->HasFastStringWrapperElements()
             ? JSObject::NormalizeElements(object)
             : handle(SeededNumberDictionary::cast(object->elements()));
     Handle<SeededNumberDictionary> new_dictionary =
-        SeededNumberDictionary::AddNumberEntry(
-            dictionary, index, value, details,
-            object->map()->is_prototype_map());
+        SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
+                                               details, object);
     if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
     if (dictionary.is_identical_to(new_dictionary)) return;
     object->set_elements(*new_dictionary);
@@ -1588,7 +1595,7 @@
         continue;
       }
 
-      if (dictionary->DetailsAt(i).type() == ACCESSOR_CONSTANT) {
+      if (dictionary->DetailsAt(i).kind() == kAccessor) {
         // Restart from beginning in slow path, otherwise we may observably
         // access getters out of order
         return false;
@@ -1622,7 +1629,7 @@
     // Iterate through entire range, as accessing elements out of order is
     // observable
     for (uint32_t k = start_from; k < length; ++k) {
-      int entry = dictionary->FindEntry(k);
+      int entry = dictionary->FindEntry(isolate, k);
       if (entry == SeededNumberDictionary::kNotFound) {
         if (search_for_hole) return Just(true);
         continue;
@@ -1688,7 +1695,7 @@
     // Iterate through entire range, as accessing elements out of order is
     // observable.
     for (uint32_t k = start_from; k < length; ++k) {
-      int entry = dictionary->FindEntry(k);
+      int entry = dictionary->FindEntry(isolate, k);
       if (entry == SeededNumberDictionary::kNotFound) {
         continue;
       }
@@ -1766,15 +1773,14 @@
         SeededNumberDictionary::New(isolate, capacity);
 
     PropertyDetails details = PropertyDetails::Empty();
-    bool used_as_prototype = object->map()->is_prototype_map();
     int j = 0;
     for (int i = 0; j < capacity; i++) {
       if (IsHoleyElementsKind(kind)) {
         if (BackingStore::cast(*store)->is_the_hole(isolate, i)) continue;
       }
-      Handle<Object> value = Subclass::GetImpl(*store, i);
-      dictionary = SeededNumberDictionary::AddNumberEntry(
-          dictionary, i, value, details, used_as_prototype);
+      Handle<Object> value = Subclass::GetImpl(isolate, *store, i);
+      dictionary = SeededNumberDictionary::AddNumberEntry(dictionary, i, value,
+                                                          details, object);
       j++;
     }
     return dictionary;
@@ -1799,8 +1805,7 @@
       return;
     }
 
-    isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
-        *backing_store, length - entry);
+    isolate->heap()->RightTrimFixedArray(*backing_store, length - entry);
   }
 
   static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
@@ -1816,7 +1821,7 @@
     }
 
     Isolate* isolate = obj->GetIsolate();
-    backing_store->set_the_hole(entry);
+    backing_store->set_the_hole(isolate, entry);
 
     // TODO(verwaest): Move this out of elements.cc.
     // If an old space backing store is larger than a certain size and
@@ -1934,7 +1939,7 @@
     for (uint32_t i = 0; i < length; i++) {
       if (IsFastPackedElementsKind(KindTraits::Kind) ||
           HasEntryImpl(isolate, *elements, i)) {
-        accumulator->AddKey(Subclass::GetImpl(*elements, i), convert);
+        accumulator->AddKey(Subclass::GetImpl(isolate, *elements, i), convert);
       }
     }
   }
@@ -2075,7 +2080,7 @@
     uint32_t length = elements->length();
     for (uint32_t index = 0; index < length; ++index) {
       if (!HasEntryImpl(isolate, *elements, index)) continue;
-      Handle<Object> value = Subclass::GetImpl(*elements, index);
+      Handle<Object> value = Subclass::GetImpl(isolate, *elements, index);
       if (get_entries) {
         value = MakeEntryPair(isolate, index, value);
       }
@@ -2265,6 +2270,24 @@
     }
   }
 
+  static Handle<FixedArray> CreateListFromArrayImpl(Isolate* isolate,
+                                                    Handle<JSArray> array) {
+    uint32_t length = 0;
+    array->length()->ToArrayLength(&length);
+    Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
+    Handle<FixedArrayBase> elements(array->elements(), isolate);
+    for (uint32_t i = 0; i < length; i++) {
+      if (!Subclass::HasElementImpl(isolate, array, i, elements)) continue;
+      Handle<Object> value;
+      value = Subclass::GetImpl(isolate, *elements, i);
+      if (value->IsName()) {
+        value = isolate->factory()->InternalizeName(Handle<Name>::cast(value));
+      }
+      result->set(i, *value);
+    }
+    return result;
+  }
+
  private:
   // SpliceShrinkStep might modify the backing_store.
   static void SpliceShrinkStep(Isolate* isolate, Handle<JSArray> receiver,
@@ -2323,7 +2346,8 @@
     DCHECK(length > 0);
     int new_length = length - 1;
     int remove_index = remove_position == AT_START ? 0 : new_length;
-    Handle<Object> result = Subclass::GetImpl(*backing_store, remove_index);
+    Handle<Object> result =
+        Subclass::GetImpl(isolate, *backing_store, remove_index);
     if (remove_position == AT_START) {
       Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length,
                              0, 0);
@@ -2544,12 +2568,8 @@
   explicit FastDoubleElementsAccessor(const char* name)
       : FastElementsAccessor<Subclass, KindTraits>(name) {}
 
-  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
-    return GetImpl(holder->elements(), entry);
-  }
-
-  static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
-    Isolate* isolate = backing_store->GetIsolate();
+  static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+                                uint32_t entry) {
     return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry,
                                  isolate);
   }
@@ -2618,10 +2638,10 @@
     FixedArrayBase* elements_base = receiver->elements();
     Object* value = *search_value;
 
-    if (start_from >= length) return Just<int64_t>(-1);
-
     length = std::min(static_cast<uint32_t>(elements_base->length()), length);
 
+    if (start_from >= length) return Just<int64_t>(-1);
+
     if (!value->IsNumber()) {
       return Just<int64_t>(-1);
     }
@@ -2696,21 +2716,18 @@
     BackingStore::cast(backing_store)->SetValue(entry, value);
   }
 
-  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
-    return GetImpl(holder->elements(), entry);
-  }
-
-  static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+  static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+                                uint32_t entry) {
     return BackingStore::get(BackingStore::cast(backing_store), entry);
   }
 
   static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
-    return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
+    return PropertyDetails(kData, DONT_DELETE, 0, PropertyCellType::kNoCell);
   }
 
   static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
                                         uint32_t entry) {
-    return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
+    return PropertyDetails(kData, DONT_DELETE, 0, PropertyCellType::kNoCell);
   }
 
   static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
@@ -2749,10 +2766,14 @@
                : kMaxUInt32;
   }
 
+  static bool WasNeutered(JSObject* holder) {
+    JSArrayBufferView* view = JSArrayBufferView::cast(holder);
+    return view->WasNeutered();
+  }
+
   static uint32_t GetCapacityImpl(JSObject* holder,
                                   FixedArrayBase* backing_store) {
-    JSArrayBufferView* view = JSArrayBufferView::cast(holder);
-    if (view->WasNeutered()) return 0;
+    if (WasNeutered(holder)) return 0;
     return backing_store->length();
   }
 
@@ -2764,10 +2785,11 @@
   static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
                                               KeyAccumulator* accumulator,
                                               AddKeyConversion convert) {
+    Isolate* isolate = receiver->GetIsolate();
     Handle<FixedArrayBase> elements(receiver->elements());
     uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
     for (uint32_t i = 0; i < length; i++) {
-      Handle<Object> value = AccessorClass::GetImpl(*elements, i);
+      Handle<Object> value = AccessorClass::GetImpl(isolate, *elements, i);
       accumulator->AddKey(value, convert);
     }
   }
@@ -2781,7 +2803,8 @@
       Handle<FixedArrayBase> elements(object->elements());
       uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements);
       for (uint32_t index = 0; index < length; ++index) {
-        Handle<Object> value = AccessorClass::GetImpl(*elements, index);
+        Handle<Object> value =
+            AccessorClass::GetImpl(isolate, *elements, index);
         if (get_entries) {
           value = MakeEntryPair(isolate, index, value);
         }
@@ -2799,6 +2822,12 @@
     DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
     DisallowHeapAllocation no_gc;
 
+    // TODO(caitp): return Just(false) here when implementing strict throwing on
+    // neutered views.
+    if (WasNeutered(*receiver)) {
+      return Just(value->IsUndefined(isolate) && length > start_from);
+    }
+
     BackingStore* elements = BackingStore::cast(receiver->elements());
     if (value->IsUndefined(isolate) &&
         length > static_cast<uint32_t>(elements->length())) {
@@ -2848,6 +2877,8 @@
     DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
     DisallowHeapAllocation no_gc;
 
+    if (WasNeutered(*receiver)) return Just<int64_t>(-1);
+
     BackingStore* elements = BackingStore::cast(receiver->elements());
     if (!value->IsNumber()) return Just<int64_t>(-1);
 
@@ -2904,12 +2935,8 @@
     USE(KindTraits::Kind);
   }
 
-  static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
-    return GetImpl(holder->elements(), entry);
-  }
-
-  static Handle<Object> GetImpl(FixedArrayBase* parameters, uint32_t entry) {
-    Isolate* isolate = parameters->GetIsolate();
+  static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* parameters,
+                                uint32_t entry) {
     Handle<FixedArray> parameter_map(FixedArray::cast(parameters), isolate);
     uint32_t length = parameter_map->length() - 2;
     if (entry < length) {
@@ -2922,7 +2949,7 @@
     } else {
       // Object is not mapped, defer to the arguments.
       Handle<Object> result = ArgumentsAccessor::GetImpl(
-          FixedArray::cast(parameter_map->get(1)), entry - length);
+          isolate, FixedArray::cast(parameter_map->get(1)), entry - length);
       // Elements of the arguments object in slow mode might be slow aliases.
       if (result->IsAliasedArgumentsEntry()) {
         DisallowHeapAllocation no_gc;
@@ -3020,7 +3047,7 @@
     uint32_t length = GetCapacityImpl(*receiver, *elements);
     for (uint32_t entry = 0; entry < length; entry++) {
       if (!HasEntryImpl(isolate, *elements, entry)) continue;
-      Handle<Object> value = GetImpl(*elements, entry);
+      Handle<Object> value = GetImpl(isolate, *elements, entry);
       accumulator->AddKey(value, convert);
     }
   }
@@ -3071,7 +3098,7 @@
     FixedArray* parameter_map = FixedArray::cast(holder->elements());
     uint32_t length = parameter_map->length() - 2;
     if (entry < length) {
-      return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+      return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
     }
     FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
     return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
@@ -3156,7 +3183,8 @@
         continue;
       }
 
-      Handle<Object> element_k = GetImpl(*parameter_map, entry);
+      Handle<Object> element_k =
+          Subclass::GetImpl(isolate, *parameter_map, entry);
 
       if (element_k->IsAccessorPair()) {
         LookupIterator it(isolate, object, k, LookupIterator::OWN);
@@ -3195,7 +3223,8 @@
         continue;
       }
 
-      Handle<Object> element_k = GetImpl(*parameter_map, entry);
+      Handle<Object> element_k =
+          Subclass::GetImpl(isolate, *parameter_map, entry);
 
       if (element_k->IsAccessorPair()) {
         LookupIterator it(isolate, object, k, LookupIterator::OWN);
@@ -3256,11 +3285,10 @@
         old_elements->IsSeededNumberDictionary()
             ? Handle<SeededNumberDictionary>::cast(old_elements)
             : JSObject::NormalizeElements(object);
-    PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+    PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
     Handle<SeededNumberDictionary> new_dictionary =
-        SeededNumberDictionary::AddNumberEntry(
-            dictionary, index, value, details,
-            object->map()->is_prototype_map());
+        SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
+                                               details, object);
     if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
     if (*dictionary != *new_dictionary) {
       FixedArray::cast(object->elements())->set(1, *new_dictionary);
@@ -3283,17 +3311,17 @@
       context->set(context_entry, *value);
 
       // Redefining attributes of an aliased element destroys fast aliasing.
-      parameter_map->set_the_hole(entry + 2);
+      parameter_map->set_the_hole(isolate, entry + 2);
       // For elements that are still writable we re-establish slow aliasing.
       if ((attributes & READ_ONLY) == 0) {
         value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
       }
 
-      PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+      PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
       Handle<SeededNumberDictionary> arguments(
           SeededNumberDictionary::cast(parameter_map->get(1)), isolate);
       arguments = SeededNumberDictionary::AddNumberEntry(
-          arguments, entry, value, details, object->map()->is_prototype_map());
+          arguments, entry, value, details, object);
       // If the attributes were NONE, we would have called set rather than
       // reconfigure.
       DCHECK_NE(NONE, attributes);
@@ -3340,9 +3368,9 @@
       uint32_t entry = GetEntryForIndexImpl(isolate, *receiver, parameters, i,
                                             ALL_PROPERTIES);
       if (entry != kMaxUInt32 && HasEntryImpl(isolate, parameters, entry)) {
-        elements->set(insertion_index, *GetImpl(parameters, entry));
+        elements->set(insertion_index, *GetImpl(isolate, parameters, entry));
       } else {
-        elements->set_the_hole(insertion_index);
+        elements->set_the_hole(isolate, insertion_index);
       }
       insertion_index++;
     }
@@ -3440,6 +3468,11 @@
     USE(KindTraits::Kind);
   }
 
+  static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
+                                        uint32_t entry) {
+    return GetImpl(holder, entry);
+  }
+
   static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
     Isolate* isolate = holder->GetIsolate();
     Handle<String> string(GetString(*holder), isolate);
@@ -3448,7 +3481,14 @@
       return isolate->factory()->LookupSingleCharacterStringFromCode(
           String::Flatten(string)->Get(entry));
     }
-    return BackingStoreAccessor::GetImpl(holder, entry - length);
+    return BackingStoreAccessor::GetImpl(isolate, holder->elements(),
+                                         entry - length);
+  }
+
+  static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* elements,
+                                uint32_t entry) {
+    UNREACHABLE();
+    return Handle<Object>();
   }
 
   static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
@@ -3456,8 +3496,7 @@
     if (entry < length) {
       PropertyAttributes attributes =
           static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-      return PropertyDetails(attributes, v8::internal::DATA, 0,
-                             PropertyCellType::kNoCell);
+      return PropertyDetails(kData, attributes, 0, PropertyCellType::kNoCell);
     }
     return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
   }
@@ -3672,9 +3711,9 @@
     JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
     return array;
 
-  } else if (args->length() == 1 && args->at<Object>(0)->IsNumber()) {
+  } else if (args->length() == 1 && args->at(0)->IsNumber()) {
     uint32_t length;
-    if (!args->at<Object>(0)->ToArrayLength(&length)) {
+    if (!args->at(0)->ToArrayLength(&length)) {
       return ThrowArrayLengthRangeError(array->GetIsolate());
     }
 
diff --git a/src/elements.h b/src/elements.h
index fc2e6a4..28635d5 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -175,6 +175,9 @@
                             ElementsKind source_kind,
                             Handle<FixedArrayBase> destination, int size) = 0;
 
+  virtual Handle<FixedArray> CreateListFromArray(Isolate* isolate,
+                                                 Handle<JSArray> array) = 0;
+
  protected:
   friend class LookupIterator;
 
diff --git a/src/execution.cc b/src/execution.cc
index 59421c7..ee6afb2 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -7,6 +7,7 @@
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
+#include "src/debug/debug.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
 #include "src/runtime-profiler.h"
@@ -54,11 +55,10 @@
 
 namespace {
 
-MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
-                                           Handle<Object> target,
-                                           Handle<Object> receiver, int argc,
-                                           Handle<Object> args[],
-                                           Handle<Object> new_target) {
+MUST_USE_RESULT MaybeHandle<Object> Invoke(
+    Isolate* isolate, bool is_construct, Handle<Object> target,
+    Handle<Object> receiver, int argc, Handle<Object> args[],
+    Handle<Object> new_target, Execution::MessageHandling message_handling) {
   DCHECK(!receiver->IsJSGlobalObject());
 
 #ifdef USE_SIMULATOR
@@ -69,7 +69,9 @@
   StackLimitCheck check(isolate);
   if (check.HasOverflowed()) {
     isolate->StackOverflow();
-    isolate->ReportPendingMessages();
+    if (message_handling == Execution::MessageHandling::kReport) {
+      isolate->ReportPendingMessages();
+    }
     return MaybeHandle<Object>();
   }
 #endif
@@ -89,7 +91,9 @@
       bool has_exception = value.is_null();
       DCHECK(has_exception == isolate->has_pending_exception());
       if (has_exception) {
-        isolate->ReportPendingMessages();
+        if (message_handling == Execution::MessageHandling::kReport) {
+          isolate->ReportPendingMessages();
+        }
         return MaybeHandle<Object>();
       } else {
         isolate->clear_pending_message();
@@ -103,7 +107,9 @@
   CHECK(AllowJavascriptExecution::IsAllowed(isolate));
   if (!ThrowOnJavascriptExecution::IsAllowed(isolate)) {
     isolate->ThrowIllegalOperation();
-    isolate->ReportPendingMessages();
+    if (message_handling == Execution::MessageHandling::kReport) {
+      isolate->ReportPendingMessages();
+    }
     return MaybeHandle<Object>();
   }
 
@@ -150,7 +156,9 @@
   bool has_exception = value->IsException(isolate);
   DCHECK(has_exception == isolate->has_pending_exception());
   if (has_exception) {
-    isolate->ReportPendingMessages();
+    if (message_handling == Execution::MessageHandling::kReport) {
+      isolate->ReportPendingMessages();
+    }
     return MaybeHandle<Object>();
   } else {
     isolate->clear_pending_message();
@@ -159,13 +167,10 @@
   return Handle<Object>(value, isolate);
 }
 
-}  // namespace
-
-
-// static
-MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
-                                    Handle<Object> receiver, int argc,
-                                    Handle<Object> argv[]) {
+MaybeHandle<Object> CallInternal(Isolate* isolate, Handle<Object> callable,
+                                 Handle<Object> receiver, int argc,
+                                 Handle<Object> argv[],
+                                 Execution::MessageHandling message_handling) {
   // Convert calls on global objects to be calls on the global
   // receiver instead to avoid having a 'this' pointer which refers
   // directly to a global object.
@@ -174,7 +179,17 @@
         handle(Handle<JSGlobalObject>::cast(receiver)->global_proxy(), isolate);
   }
   return Invoke(isolate, false, callable, receiver, argc, argv,
-                isolate->factory()->undefined_value());
+                isolate->factory()->undefined_value(), message_handling);
+}
+
+}  // namespace
+
+// static
+MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
+                                    Handle<Object> receiver, int argc,
+                                    Handle<Object> argv[]) {
+  return CallInternal(isolate, callable, receiver, argc, argv,
+                      MessageHandling::kReport);
 }
 
 
@@ -190,18 +205,21 @@
                                    Handle<Object> new_target, int argc,
                                    Handle<Object> argv[]) {
   return Invoke(isolate, true, constructor,
-                isolate->factory()->undefined_value(), argc, argv, new_target);
+                isolate->factory()->undefined_value(), argc, argv, new_target,
+                MessageHandling::kReport);
 }
 
-
 MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
                                        Handle<Object> callable,
                                        Handle<Object> receiver, int argc,
                                        Handle<Object> args[],
+                                       MessageHandling message_handling,
                                        MaybeHandle<Object>* exception_out) {
   bool is_termination = false;
   MaybeHandle<Object> maybe_result;
   if (exception_out != NULL) *exception_out = MaybeHandle<Object>();
+  DCHECK_IMPLIES(message_handling == MessageHandling::kKeepPending,
+                 exception_out == nullptr);
   // Enter a try-block while executing the JavaScript code. To avoid
   // duplicate error printing it must be non-verbose.  Also, to avoid
   // creating message objects during stack overflow we shouldn't
@@ -211,24 +229,25 @@
     catcher.SetVerbose(false);
     catcher.SetCaptureMessage(false);
 
-    maybe_result = Call(isolate, callable, receiver, argc, args);
+    maybe_result =
+        CallInternal(isolate, callable, receiver, argc, args, message_handling);
 
     if (maybe_result.is_null()) {
-      DCHECK(catcher.HasCaught());
       DCHECK(isolate->has_pending_exception());
-      DCHECK(isolate->external_caught_exception());
       if (isolate->pending_exception() ==
           isolate->heap()->termination_exception()) {
         is_termination = true;
       } else {
-        if (exception_out != NULL) {
+        if (exception_out != nullptr) {
+          DCHECK(catcher.HasCaught());
+          DCHECK(isolate->external_caught_exception());
           *exception_out = v8::Utils::OpenHandle(*catcher.Exception());
         }
       }
-      isolate->OptionalRescheduleException(true);
+      if (message_handling == MessageHandling::kReport) {
+        isolate->OptionalRescheduleException(true);
+      }
     }
-
-    DCHECK(!isolate->has_pending_exception());
   }
 
   // Re-request terminate execution interrupt to trigger later.
@@ -451,7 +470,7 @@
     isolate_->heap()->HandleGCRequest();
   }
 
-  if (CheckDebugBreak() || CheckDebugCommand()) {
+  if (CheckDebugBreak()) {
     isolate_->debug()->HandleDebugBreak();
   }
 
diff --git a/src/execution.h b/src/execution.h
index 6f4bb33..d5f6371 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -7,14 +7,20 @@
 
 #include "src/allocation.h"
 #include "src/base/atomicops.h"
-#include "src/handles.h"
+#include "src/globals.h"
 #include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
+template <typename T>
+class Handle;
+
 class Execution final : public AllStatic {
  public:
+  // Whether to report pending messages, or keep them pending on the isolate.
+  enum class MessageHandling { kReport, kKeepPending };
+
   // Call a function, the caller supplies a receiver and an array
   // of arguments.
   //
@@ -36,16 +42,18 @@
                                                  int argc,
                                                  Handle<Object> argv[]);
 
-  // Call a function, just like Call(), but make sure to silently catch
-  // any thrown exceptions. The return value is either the result of
-  // calling the function (if caught exception is false) or the exception
-  // that occurred (if caught exception is true).
-  // In the exception case, exception_out holds the caught exceptions, unless
-  // it is a termination exception.
+  // Call a function, just like Call(), but handle don't report exceptions
+  // externally.
+  // The return value is either the result of calling the function (if no
+  // exception occurred), or an empty handle.
+  // If message_handling is MessageHandling::kReport, exceptions (except for
+  // termination exceptions) will be stored in exception_out (if not a
+  // nullptr).
   static MaybeHandle<Object> TryCall(Isolate* isolate, Handle<Object> callable,
                                      Handle<Object> receiver, int argc,
                                      Handle<Object> argv[],
-                                     MaybeHandle<Object>* exception_out = NULL);
+                                     MessageHandling message_handling,
+                                     MaybeHandle<Object>* exception_out);
 };
 
 
@@ -56,7 +64,7 @@
 // StackGuard contains the handling of the limits that are used to limit the
 // number of nested invocations of JavaScript and the stack size used in each
 // invocation.
-class StackGuard final {
+class V8_EXPORT_PRIVATE StackGuard final {
  public:
   // Pass the address beyond which the stack should not grow.  The stack
   // is assumed to grow downwards.
@@ -79,14 +87,13 @@
   // it has been set up.
   void ClearThread(const ExecutionAccess& lock);
 
-#define INTERRUPT_LIST(V)                                          \
-  V(DEBUGBREAK, DebugBreak, 0)                                     \
-  V(DEBUGCOMMAND, DebugCommand, 1)                                 \
-  V(TERMINATE_EXECUTION, TerminateExecution, 2)                    \
-  V(GC_REQUEST, GC, 3)                                             \
-  V(INSTALL_CODE, InstallCode, 4)                                  \
-  V(API_INTERRUPT, ApiInterrupt, 5)                                \
-  V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 6)
+#define INTERRUPT_LIST(V)                       \
+  V(DEBUGBREAK, DebugBreak, 0)                  \
+  V(TERMINATE_EXECUTION, TerminateExecution, 1) \
+  V(GC_REQUEST, GC, 2)                          \
+  V(INSTALL_CODE, InstallCode, 3)               \
+  V(API_INTERRUPT, ApiInterrupt, 4)             \
+  V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 5)
 
 #define V(NAME, Name, id)                                          \
   inline bool Check##Name() { return CheckInterrupt(NAME); }  \
diff --git a/src/extensions/externalize-string-extension.cc b/src/extensions/externalize-string-extension.cc
index 2ed3ad2..b81b782 100644
--- a/src/extensions/externalize-string-extension.cc
+++ b/src/extensions/externalize-string-extension.cc
@@ -7,6 +7,7 @@
 #include "src/api.h"
 #include "src/handles.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/external-reference-table.cc b/src/external-reference-table.cc
index 2e9fc46..6c72a46 100644
--- a/src/external-reference-table.cc
+++ b/src/external-reference-table.cc
@@ -10,6 +10,7 @@
 #include "src/counters.h"
 #include "src/deoptimizer.h"
 #include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
 
 #if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
 #define SYMBOLIZE_FUNCTION
@@ -228,6 +229,10 @@
       "f64_asin_wrapper");
   Add(ExternalReference::f64_mod_wrapper_function(isolate).address(),
       "f64_mod_wrapper");
+  Add(ExternalReference::wasm_call_trap_callback_for_testing(isolate).address(),
+      "wasm::call_trap_callback_for_testing");
+  Add(ExternalReference::libc_memchr_function(isolate).address(),
+      "libc_memchr");
   Add(ExternalReference::log_enter_external_function(isolate).address(),
       "Logger::EnterExternal");
   Add(ExternalReference::log_leave_external_function(isolate).address(),
@@ -249,16 +254,21 @@
       "double_absolute_constant");
   Add(ExternalReference::address_of_double_neg_constant().address(),
       "double_negate_constant");
+  Add(ExternalReference::promise_hook_or_debug_is_active_address(isolate)
+          .address(),
+      "Isolate::promise_hook_or_debug_is_active_address()");
 
   // Debug addresses
-  Add(ExternalReference::debug_after_break_target_address(isolate).address(),
-      "Debug::after_break_target_address()");
   Add(ExternalReference::debug_is_active_address(isolate).address(),
       "Debug::is_active_address()");
+  Add(ExternalReference::debug_hook_on_function_call_address(isolate).address(),
+      "Debug::hook_on_function_call_address()");
   Add(ExternalReference::debug_last_step_action_address(isolate).address(),
       "Debug::step_in_enabled_address()");
   Add(ExternalReference::debug_suspended_generator_address(isolate).address(),
       "Debug::step_suspended_generator_address()");
+  Add(ExternalReference::debug_restart_fp_address(isolate).address(),
+      "Debug::restart_fp_address()");
 
 #ifndef V8_INTERPRETED_REGEXP
   Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
@@ -363,9 +373,8 @@
   };
 
   static const AccessorRefTable getters[] = {
-#define ACCESSOR_INFO_DECLARATION(name)     \
-  { FUNCTION_ADDR(&Accessors::name##Getter), \
-    "Redirect to Accessors::" #name "Getter"},
+#define ACCESSOR_INFO_DECLARATION(name) \
+  {FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter"},
       ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
 #undef ACCESSOR_INFO_DECLARATION
   };
@@ -377,10 +386,7 @@
   };
 
   for (unsigned i = 0; i < arraysize(getters); ++i) {
-    const char* name = getters[i].name + 12;  // Skip "Redirect to " prefix.
-    Add(getters[i].address, name);
-    Add(AccessorInfo::redirect(isolate, getters[i].address, ACCESSOR_GETTER),
-        getters[i].name);
+    Add(getters[i].address, getters[i].name);
   }
 
   for (unsigned i = 0; i < arraysize(setters); ++i) {
@@ -438,6 +444,7 @@
 void ExternalReferenceTable::AddApiReferences(Isolate* isolate) {
   // Add external references provided by the embedder (a null-terminated
   // array).
+  api_refs_start_ = size();
   intptr_t* api_external_references = isolate->api_external_references();
   if (api_external_references != nullptr) {
     while (*api_external_references != 0) {
diff --git a/src/external-reference-table.h b/src/external-reference-table.h
index e1b97f9..40eccbe 100644
--- a/src/external-reference-table.h
+++ b/src/external-reference-table.h
@@ -22,6 +22,7 @@
   uint32_t size() const { return static_cast<uint32_t>(refs_.length()); }
   Address address(uint32_t i) { return refs_[i].address; }
   const char* name(uint32_t i) { return refs_[i].name; }
+  bool is_api_reference(uint32_t i) { return i >= api_refs_start_; }
 
 #ifdef DEBUG
   void increment_count(uint32_t i) { refs_[i].count++; }
@@ -64,6 +65,7 @@
   void AddApiReferences(Isolate* isolate);
 
   List<ExternalReferenceEntry> refs_;
+  uint32_t api_refs_start_;
 
   DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
 };
diff --git a/src/factory.cc b/src/factory.cc
index 3e812d5..79147d6 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -6,12 +6,15 @@
 
 #include "src/accessors.h"
 #include "src/allocation-site-scopes.h"
+#include "src/ast/ast.h"
 #include "src/base/bits.h"
 #include "src/bootstrapper.h"
 #include "src/compiler.h"
 #include "src/conversions.h"
 #include "src/isolate-inl.h"
 #include "src/macro-assembler.h"
+#include "src/objects/module-info.h"
+#include "src/objects/scope-info.h"
 
 namespace v8 {
 namespace internal {
@@ -86,12 +89,6 @@
 }
 
 
-Handle<Box> Factory::NewBox(Handle<Object> value) {
-  Handle<Box> result = Handle<Box>::cast(NewStruct(BOX_TYPE));
-  result->set_value(*value);
-  return result;
-}
-
 Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
   Handle<PrototypeInfo> result =
       Handle<PrototypeInfo>::cast(NewStruct(PROTOTYPE_INFO_TYPE));
@@ -102,6 +99,14 @@
   return result;
 }
 
+Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1,
+                                  Handle<Object> value2) {
+  Handle<Tuple2> result = Handle<Tuple2>::cast(NewStruct(TUPLE2_TYPE));
+  result->set_value1(*value1);
+  result->set_value2(*value2);
+  return result;
+}
+
 Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
                                   Handle<Object> value3) {
   Handle<Tuple3> result = Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE));
@@ -120,6 +125,15 @@
   return result;
 }
 
+Handle<ConstantElementsPair> Factory::NewConstantElementsPair(
+    ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
+  Handle<ConstantElementsPair> result = Handle<ConstantElementsPair>::cast(
+      NewStruct(CONSTANT_ELEMENTS_PAIR_TYPE));
+  result->set_elements_kind(elements_kind);
+  result->set_constant_values(*constant_values);
+  return result;
+}
+
 Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
                                     Handle<Object> to_number,
                                     const char* type_of, byte kind) {
@@ -158,7 +172,6 @@
       FixedArray);
 }
 
-
 Handle<FixedArray> Factory::NewUninitializedFixedArray(int size) {
   CALL_HEAP_FUNCTION(
       isolate(),
@@ -166,6 +179,34 @@
       FixedArray);
 }
 
+Handle<BoilerplateDescription> Factory::NewBoilerplateDescription(
+    int boilerplate, int all_properties, int index_keys, bool has_seen_proto) {
+  DCHECK_GE(boilerplate, 0);
+  DCHECK_GE(all_properties, index_keys);
+  DCHECK_GE(index_keys, 0);
+
+  int backing_store_size =
+      all_properties - index_keys - (has_seen_proto ? 1 : 0);
+  DCHECK_GE(backing_store_size, 0);
+  bool has_different_size_backing_store = boilerplate != backing_store_size;
+
+  // Space for name and value for every boilerplate property.
+  int size = 2 * boilerplate;
+
+  if (has_different_size_backing_store) {
+    // An extra entry for the backing store size.
+    size++;
+  }
+
+  Handle<BoilerplateDescription> description =
+      Handle<BoilerplateDescription>::cast(NewFixedArray(size, TENURED));
+
+  if (has_different_size_backing_store) {
+    DCHECK((boilerplate != (all_properties - index_keys)) || has_seen_proto);
+    description->set_backing_store_size(isolate(), backing_store_size);
+  }
+  return description;
+}
 
 Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int size,
                                                     PretenureFlag pretenure) {
@@ -183,11 +224,7 @@
   DCHECK(0 <= size);
   Handle<FixedArrayBase> array = NewFixedDoubleArray(size, pretenure);
   if (size > 0) {
-    Handle<FixedDoubleArray> double_array =
-        Handle<FixedDoubleArray>::cast(array);
-    for (int i = 0; i < size; ++i) {
-      double_array->set_the_hole(i);
-    }
+    Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, size);
   }
   return array;
 }
@@ -263,6 +300,7 @@
 MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
                                                   PretenureFlag pretenure) {
   int length = string.length();
+  if (length == 0) return empty_string();
   if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
   Handle<SeqOneByteString> result;
   ASSIGN_RETURN_ON_EXCEPTION(
@@ -356,6 +394,7 @@
 MaybeHandle<String> Factory::NewStringFromTwoByte(const uc16* string,
                                                   int length,
                                                   PretenureFlag pretenure) {
+  if (length == 0) return empty_string();
   if (String::IsOneByte(string, length)) {
     if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
     Handle<SeqOneByteString> result;
@@ -440,38 +479,63 @@
       String);
 }
 
+namespace {
+
+MaybeHandle<Map> GetInternalizedStringMap(Factory* f, Handle<String> string) {
+  switch (string->map()->instance_type()) {
+    case STRING_TYPE:
+      return f->internalized_string_map();
+    case ONE_BYTE_STRING_TYPE:
+      return f->one_byte_internalized_string_map();
+    case EXTERNAL_STRING_TYPE:
+      return f->external_internalized_string_map();
+    case EXTERNAL_ONE_BYTE_STRING_TYPE:
+      return f->external_one_byte_internalized_string_map();
+    case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+      return f->external_internalized_string_with_one_byte_data_map();
+    case SHORT_EXTERNAL_STRING_TYPE:
+      return f->short_external_internalized_string_map();
+    case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
+      return f->short_external_one_byte_internalized_string_map();
+    case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+      return f->short_external_internalized_string_with_one_byte_data_map();
+    default: return MaybeHandle<Map>();  // No match found.
+  }
+}
+
+}  // namespace
 
 MaybeHandle<Map> Factory::InternalizedStringMapForString(
     Handle<String> string) {
   // If the string is in new space it cannot be used as internalized.
   if (isolate()->heap()->InNewSpace(*string)) return MaybeHandle<Map>();
 
-  // Find the corresponding internalized string map for strings.
-  switch (string->map()->instance_type()) {
-    case STRING_TYPE: return internalized_string_map();
-    case ONE_BYTE_STRING_TYPE:
-      return one_byte_internalized_string_map();
-    case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
-    case EXTERNAL_ONE_BYTE_STRING_TYPE:
-      return external_one_byte_internalized_string_map();
-    case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
-      return external_internalized_string_with_one_byte_data_map();
-    case SHORT_EXTERNAL_STRING_TYPE:
-      return short_external_internalized_string_map();
-    case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
-      return short_external_one_byte_internalized_string_map();
-    case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
-      return short_external_internalized_string_with_one_byte_data_map();
-    default: return MaybeHandle<Map>();  // No match found.
-  }
+  return GetInternalizedStringMap(this, string);
 }
 
+template <class StringClass>
+Handle<StringClass> Factory::InternalizeExternalString(Handle<String> string) {
+  Handle<StringClass> cast_string = Handle<StringClass>::cast(string);
+  Handle<Map> map = GetInternalizedStringMap(this, string).ToHandleChecked();
+  Handle<StringClass> external_string = New<StringClass>(map, OLD_SPACE);
+  external_string->set_length(cast_string->length());
+  external_string->set_hash_field(cast_string->hash_field());
+  external_string->set_resource(nullptr);
+  isolate()->heap()->RegisterExternalString(*external_string);
+  return external_string;
+}
+
+template Handle<ExternalOneByteString>
+    Factory::InternalizeExternalString<ExternalOneByteString>(Handle<String>);
+template Handle<ExternalTwoByteString>
+    Factory::InternalizeExternalString<ExternalTwoByteString>(Handle<String>);
 
 MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
     int length, PretenureFlag pretenure) {
   if (length > String::kMaxLength || length < 0) {
     THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
   }
+  DCHECK(length > 0);  // Use Factory::empty_string() instead.
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateRawOneByteString(length, pretenure),
@@ -484,6 +548,7 @@
   if (length > String::kMaxLength || length < 0) {
     THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
   }
+  DCHECK(length > 0);  // Use Factory::empty_string() instead.
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateRawTwoByteString(length, pretenure),
@@ -572,6 +637,12 @@
 
 MaybeHandle<String> Factory::NewConsString(Handle<String> left,
                                            Handle<String> right) {
+  if (left->IsThinString()) {
+    left = handle(Handle<ThinString>::cast(left)->actual(), isolate());
+  }
+  if (right->IsThinString()) {
+    right = handle(Handle<ThinString>::cast(right)->actual(), isolate());
+  }
   int left_length = left->length();
   if (left_length == 0) return right;
   int right_length = right->length();
@@ -718,6 +789,10 @@
     str = Handle<String>(slice->parent(), isolate());
     offset += slice->offset();
   }
+  if (str->IsThinString()) {
+    Handle<ThinString> thin = Handle<ThinString>::cast(str);
+    str = handle(thin->actual(), isolate());
+  }
 
   DCHECK(str->IsSeqString() || str->IsExternalString());
   Handle<Map> map = str->IsOneByteRepresentation()
@@ -739,6 +814,7 @@
   if (length > static_cast<size_t>(String::kMaxLength)) {
     THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
   }
+  if (length == 0) return empty_string();
 
   Handle<Map> map;
   if (resource->IsCompressible()) {
@@ -763,6 +839,7 @@
   if (length > static_cast<size_t>(String::kMaxLength)) {
     THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
   }
+  if (length == 0) return empty_string();
 
   // For small strings we check whether the resource contains only
   // one byte characters.  If yes, we use a different string map.
@@ -885,13 +962,24 @@
   return context;
 }
 
-
 Handle<Context> Factory::NewFunctionContext(int length,
-                                            Handle<JSFunction> function) {
-  DCHECK(function->shared()->scope_info()->scope_type() == FUNCTION_SCOPE);
+                                            Handle<JSFunction> function,
+                                            ScopeType scope_type) {
+  DCHECK(function->shared()->scope_info()->scope_type() == scope_type);
   DCHECK(length >= Context::MIN_CONTEXT_SLOTS);
   Handle<FixedArray> array = NewFixedArray(length);
-  array->set_map_no_write_barrier(*function_context_map());
+  Handle<Map> map;
+  switch (scope_type) {
+    case EVAL_SCOPE:
+      map = eval_context_map();
+      break;
+    case FUNCTION_SCOPE:
+      map = function_context_map();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  array->set_map_no_write_barrier(*map);
   Handle<Context> context = Handle<Context>::cast(array);
   context->set_closure(*function);
   context->set_previous(function->context());
@@ -971,15 +1059,6 @@
   return context;
 }
 
-Handle<Context> Factory::NewPromiseResolvingFunctionContext(int length) {
-  DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
-  Handle<FixedArray> array = NewFixedArray(length);
-  array->set_map_no_write_barrier(*function_context_map());
-  Handle<Context> context = Handle<Context>::cast(array);
-  context->set_extension(*the_hole_value());
-  return context;
-}
-
 Handle<Struct> Factory::NewStruct(InstanceType type) {
   CALL_HEAP_FUNCTION(
       isolate(),
@@ -987,39 +1066,6 @@
       Struct);
 }
 
-Handle<PromiseResolveThenableJobInfo> Factory::NewPromiseResolveThenableJobInfo(
-    Handle<JSReceiver> thenable, Handle<JSReceiver> then,
-    Handle<JSFunction> resolve, Handle<JSFunction> reject,
-    Handle<Object> debug_id, Handle<Object> debug_name,
-    Handle<Context> context) {
-  Handle<PromiseResolveThenableJobInfo> result =
-      Handle<PromiseResolveThenableJobInfo>::cast(
-          NewStruct(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE));
-  result->set_thenable(*thenable);
-  result->set_then(*then);
-  result->set_resolve(*resolve);
-  result->set_reject(*reject);
-  result->set_debug_id(*debug_id);
-  result->set_debug_name(*debug_name);
-  result->set_context(*context);
-  return result;
-}
-
-Handle<PromiseReactionJobInfo> Factory::NewPromiseReactionJobInfo(
-    Handle<Object> value, Handle<Object> tasks, Handle<Object> deferred,
-    Handle<Object> debug_id, Handle<Object> debug_name,
-    Handle<Context> context) {
-  Handle<PromiseReactionJobInfo> result = Handle<PromiseReactionJobInfo>::cast(
-      NewStruct(PROMISE_REACTION_JOB_INFO_TYPE));
-  result->set_value(*value);
-  result->set_tasks(*tasks);
-  result->set_deferred(*deferred);
-  result->set_debug_id(*debug_id);
-  result->set_debug_name(*debug_name);
-  result->set_context(*context);
-  return result;
-}
-
 Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
     int aliased_context_slot) {
   Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
@@ -1053,7 +1099,7 @@
   script->set_line_ends(heap->undefined_value());
   script->set_eval_from_shared(heap->undefined_value());
   script->set_eval_from_position(0);
-  script->set_shared_function_infos(Smi::kZero);
+  script->set_shared_function_infos(*empty_fixed_array(), SKIP_WRITE_BARRIER);
   script->set_flags(0);
 
   heap->set_script_list(*WeakFixedArray::Add(script_list(), script));
@@ -1113,7 +1159,6 @@
                      FixedTypedArrayBase);
 }
 
-
 Handle<Cell> Factory::NewCell(Handle<Object> value) {
   AllowDeferredHandleDereference convert_to_cell;
   CALL_HEAP_FUNCTION(
@@ -1122,6 +1167,23 @@
       Cell);
 }
 
+Handle<Cell> Factory::NewNoClosuresCell(Handle<Object> value) {
+  Handle<Cell> cell = NewCell(value);
+  cell->set_map_no_write_barrier(*no_closures_cell_map());
+  return cell;
+}
+
+Handle<Cell> Factory::NewOneClosureCell(Handle<Object> value) {
+  Handle<Cell> cell = NewCell(value);
+  cell->set_map_no_write_barrier(*one_closure_cell_map());
+  return cell;
+}
+
+Handle<Cell> Factory::NewManyClosuresCell(Handle<Object> value) {
+  Handle<Cell> cell = NewCell(value);
+  cell->set_map_no_write_barrier(*many_closures_cell_map());
+  return cell;
+}
 
 Handle<PropertyCell> Factory::NewPropertyCell() {
   CALL_HEAP_FUNCTION(
@@ -1265,27 +1327,13 @@
   return NewHeapNumber(FastUI2D(value), IMMUTABLE, pretenure);
 }
 
-
-Handle<HeapNumber> Factory::NewHeapNumber(double value,
-                                          MutableMode mode,
+Handle<HeapNumber> Factory::NewHeapNumber(MutableMode mode,
                                           PretenureFlag pretenure) {
-  CALL_HEAP_FUNCTION(
-      isolate(),
-      isolate()->heap()->AllocateHeapNumber(value, mode, pretenure),
-      HeapNumber);
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->AllocateHeapNumber(mode, pretenure),
+                     HeapNumber);
 }
 
-
-#define SIMD128_NEW_DEF(TYPE, Type, type, lane_count, lane_type)               \
-  Handle<Type> Factory::New##Type(lane_type lanes[lane_count],                 \
-                                  PretenureFlag pretenure) {                   \
-    CALL_HEAP_FUNCTION(                                                        \
-        isolate(), isolate()->heap()->Allocate##Type(lanes, pretenure), Type); \
-  }
-SIMD128_TYPES(SIMD128_NEW_DEF)
-#undef SIMD128_NEW_DEF
-
-
 Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
                                  MessageTemplate::Template template_index,
                                  Handle<Object> arg0, Handle<Object> arg1,
@@ -1356,6 +1404,7 @@
 DEFINE_ERROR(SyntaxError, syntax_error)
 DEFINE_ERROR(TypeError, type_error)
 DEFINE_ERROR(WasmCompileError, wasm_compile_error)
+DEFINE_ERROR(WasmLinkError, wasm_link_error)
 DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
 #undef DEFINE_ERROR
 
@@ -1374,7 +1423,7 @@
   function->set_code(info->code());
   function->set_context(*context_or_undefined);
   function->set_prototype_or_initial_map(*the_hole_value());
-  function->set_literals(LiteralsArray::cast(*empty_literals_array()));
+  function->set_feedback_vector_cell(*undefined_cell());
   function->set_next_function_link(*undefined_value(), SKIP_WRITE_BARRIER);
   isolate()->heap()->InitializeJSObjectBody(*function, *map, JSFunction::kSize);
   return function;
@@ -1398,6 +1447,7 @@
       map.is_identical_to(isolate()->strict_function_without_prototype_map()) ||
       // TODO(titzer): wasm_function_map() could be undefined here. ugly.
       (*map == context->get(Context::WASM_FUNCTION_MAP_INDEX)) ||
+      (*map == context->get(Context::NATIVE_FUNCTION_MAP_INDEX)) ||
       map.is_identical_to(isolate()->proxy_function_map()));
   return NewFunction(map, info, context);
 }
@@ -1505,6 +1555,17 @@
 }
 
 Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+    Handle<SharedFunctionInfo> info, Handle<Context> context,
+    Handle<Cell> vector, PretenureFlag pretenure) {
+  int map_index =
+      Context::FunctionMapIndex(info->language_mode(), info->kind());
+  Handle<Map> initial_map(Map::cast(context->native_context()->get(map_index)));
+
+  return NewFunctionFromSharedFunctionInfo(initial_map, info, context, vector,
+                                           pretenure);
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
     Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
     Handle<Object> context_or_undefined, PretenureFlag pretenure) {
   DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
@@ -1523,6 +1584,35 @@
   return result;
 }
 
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+    Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
+    Handle<Object> context_or_undefined, Handle<Cell> vector,
+    PretenureFlag pretenure) {
+  DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+  Handle<JSFunction> result =
+      NewFunction(initial_map, info, context_or_undefined, pretenure);
+
+  // Bump the closure count that is encoded in the vector cell's map.
+  if (vector->map() == *no_closures_cell_map()) {
+    vector->set_map(*one_closure_cell_map());
+  } else if (vector->map() == *one_closure_cell_map()) {
+    vector->set_map(*many_closures_cell_map());
+  } else {
+    DCHECK_EQ(vector->map(), *many_closures_cell_map());
+  }
+
+  result->set_feedback_vector_cell(*vector);
+  if (info->ic_age() != isolate()->heap()->global_ic_age()) {
+    info->ResetForNewContext(isolate()->heap()->global_ic_age());
+  }
+
+  if (context_or_undefined->IsContext()) {
+    // Give compiler a chance to pre-initialize.
+    Compiler::PostInstantiation(result, pretenure);
+  }
+
+  return result;
+}
 
 Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
   Handle<FixedArray> array = NewFixedArray(length, TENURED);
@@ -1650,9 +1740,9 @@
       isolate()->heap()->AllocateJSObject(*constructor, pretenure), JSObject);
 }
 
-
-Handle<JSObject> Factory::NewJSObjectWithNullProto() {
-  Handle<JSObject> result = NewJSObject(isolate()->object_function());
+Handle<JSObject> Factory::NewJSObjectWithNullProto(PretenureFlag pretenure) {
+  Handle<JSObject> result =
+      NewJSObject(isolate()->object_function(), pretenure);
   Handle<Map> new_map =
       Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
   Map::SetPrototype(new_map, null_value());
@@ -1692,12 +1782,12 @@
   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
     PropertyDetails details = descs->GetDetails(i);
     // Only accessors are expected.
-    DCHECK_EQ(ACCESSOR_CONSTANT, details.type());
-    PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+    DCHECK_EQ(kAccessor, details.kind());
+    PropertyDetails d(kAccessor, details.attributes(), i + 1,
                       PropertyCellType::kMutable);
     Handle<Name> name(descs->GetKey(i));
     Handle<PropertyCell> cell = NewPropertyCell();
-    cell->set_value(descs->GetCallbacksObject(i));
+    cell->set_value(descs->GetValue(i));
     // |dictionary| already contains enough space for all properties.
     USE(GlobalDictionary::Add(dictionary, name, cell, d));
   }
@@ -1806,7 +1896,13 @@
 
 Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
   Handle<Map> map = isolate()->js_module_namespace_map();
-  return Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map));
+  Handle<JSModuleNamespace> module_namespace(
+      Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map)));
+  FieldIndex index = FieldIndex::ForDescriptor(
+      *map, JSModuleNamespace::kToStringTagFieldIndex);
+  module_namespace->FastPropertyAtPut(index,
+                                      isolate()->heap()->Module_string());
+  return module_namespace;
 }
 
 Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
@@ -1879,6 +1975,16 @@
   return js_iter_result;
 }
 
+Handle<JSAsyncFromSyncIterator> Factory::NewJSAsyncFromSyncIterator(
+    Handle<JSReceiver> sync_iterator) {
+  Handle<Map> map(isolate()->native_context()->async_from_sync_iterator_map());
+  Handle<JSAsyncFromSyncIterator> iterator =
+      Handle<JSAsyncFromSyncIterator>::cast(NewJSObjectFromMap(map));
+
+  iterator->set_sync_iterator(*sync_iterator);
+  return iterator;
+}
+
 Handle<JSMap> Factory::NewJSMap() {
   Handle<Map> map(isolate()->native_context()->js_map_map());
   Handle<JSMap> js_map = Handle<JSMap>::cast(NewJSObjectFromMap(map));
@@ -2224,6 +2330,7 @@
     map->set_is_prototype_map(true);
   }
   JSObject::NotifyMapChange(old_map, map, isolate());
+  old_map->NotifyLeafMapLayoutChange();
 
   // Check that the already allocated object has the same size and type as
   // objects allocated using the constructor.
@@ -2249,21 +2356,30 @@
 }
 
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
-    Handle<String> name, int number_of_literals, FunctionKind kind,
-    Handle<Code> code, Handle<ScopeInfo> scope_info) {
+    Handle<String> name, FunctionKind kind, Handle<Code> code,
+    Handle<ScopeInfo> scope_info) {
   DCHECK(IsValidFunctionKind(kind));
   Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(
       name, code, IsConstructable(kind, scope_info->language_mode()));
   shared->set_scope_info(*scope_info);
   shared->set_outer_scope_info(*the_hole_value());
   shared->set_kind(kind);
-  shared->set_num_literals(number_of_literals);
   if (IsGeneratorFunction(kind)) {
     shared->set_instance_class_name(isolate()->heap()->Generator_string());
   }
   return shared;
 }
 
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
+    FunctionLiteral* literal, Handle<Script> script) {
+  Handle<Code> code = isolate()->builtins()->CompileLazy();
+  Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
+  Handle<SharedFunctionInfo> result =
+      NewSharedFunctionInfo(literal->name(), literal->kind(), code, scope_info);
+  SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
+  SharedFunctionInfo::SetScript(result, script);
+  return result;
+}
 
 Handle<JSMessageObject> Factory::NewJSMessageObject(
     MessageTemplate::Template message, Handle<Object> argument,
@@ -2280,6 +2396,7 @@
   message_obj->set_end_position(end_position);
   message_obj->set_script(*script);
   message_obj->set_stack_frames(*stack_frames);
+  message_obj->set_error_level(v8::Isolate::kMessageError);
   return message_obj;
 }
 
@@ -2295,6 +2412,7 @@
 
   // Set pointer fields.
   share->set_name(*name);
+  share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
   Handle<Code> code;
   if (!maybe_code.ToHandle(&code)) {
     code = isolate()->builtins()->Illegal();
@@ -2308,14 +2426,14 @@
                      : isolate()->builtins()->ConstructedNonConstructable();
   share->SetConstructStub(*construct_stub);
   share->set_instance_class_name(*Object_string());
-  share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
   share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
-  share->set_debug_info(DebugInfo::uninitialized(), SKIP_WRITE_BARRIER);
+  share->set_debug_info(Smi::kZero, SKIP_WRITE_BARRIER);
   share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
   StaticFeedbackVectorSpec empty_spec;
-  Handle<TypeFeedbackMetadata> feedback_metadata =
-      TypeFeedbackMetadata::New(isolate(), &empty_spec);
+  Handle<FeedbackMetadata> feedback_metadata =
+      FeedbackMetadata::New(isolate(), &empty_spec);
   share->set_feedback_metadata(*feedback_metadata, SKIP_WRITE_BARRIER);
+  share->set_function_literal_id(FunctionLiteral::kIdTypeInvalid);
 #if TRACE_MAPS
   share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
 #endif
@@ -2327,7 +2445,6 @@
   share->set_length(0);
   share->set_internal_formal_parameter_count(0);
   share->set_expected_nof_properties(0);
-  share->set_num_literals(0);
   share->set_start_position_and_type(0);
   share->set_end_position(0);
   share->set_function_token_position(0);
@@ -2413,6 +2530,7 @@
 
 
 Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
+  DCHECK(!shared->HasDebugInfo());
   // Allocate initial fixed array for active break points before allocating the
   // debug info object to avoid allocation while setting up the debug info
   // object.
@@ -2432,6 +2550,7 @@
   Handle<DebugInfo> debug_info =
       Handle<DebugInfo>::cast(NewStruct(DEBUG_INFO_TYPE));
   debug_info->set_shared(*shared);
+  debug_info->set_debugger_hints(shared->debugger_hints());
   debug_info->set_debug_bytecode_array(*maybe_debug_bytecode_array);
   debug_info->set_break_points(*break_points);
 
@@ -2441,6 +2560,13 @@
   return debug_info;
 }
 
+Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
+  Handle<BreakPointInfo> new_break_point_info =
+      Handle<BreakPointInfo>::cast(NewStruct(BREAK_POINT_INFO_TYPE));
+  new_break_point_info->set_source_position(source_position);
+  new_break_point_info->set_break_point_objects(*undefined_value());
+  return new_break_point_info;
+}
 
 Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
                                              int length) {
@@ -2616,31 +2742,31 @@
   Handle<AccessorInfo> length =
       Accessors::FunctionLengthInfo(isolate(), roc_attribs);
   {  // Add length.
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
-                                 length, roc_attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        Handle<Name>(Name::cast(length->name())), length, roc_attribs);
     map->AppendDescriptor(&d);
   }
 
   STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
   Handle<AccessorInfo> name =
-      Accessors::FunctionNameInfo(isolate(), ro_attribs);
+      Accessors::FunctionNameInfo(isolate(), roc_attribs);
   {  // Add name.
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
-                                 roc_attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        Handle<Name>(Name::cast(name->name())), name, roc_attribs);
     map->AppendDescriptor(&d);
   }
   Handle<AccessorInfo> args =
       Accessors::FunctionArgumentsInfo(isolate(), ro_attribs);
   {  // Add arguments.
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(args->name())), args,
-                                 ro_attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        Handle<Name>(Name::cast(args->name())), args, ro_attribs);
     map->AppendDescriptor(&d);
   }
   Handle<AccessorInfo> caller =
       Accessors::FunctionCallerInfo(isolate(), ro_attribs);
   {  // Add caller.
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(caller->name())),
-                                 caller, ro_attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        Handle<Name>(Name::cast(caller->name())), caller, ro_attribs);
     map->AppendDescriptor(&d);
   }
   if (IsFunctionModeWithPrototype(function_mode)) {
@@ -2649,8 +2775,8 @@
     }
     Handle<AccessorInfo> prototype =
         Accessors::FunctionPrototypeInfo(isolate(), ro_attribs);
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
-                                 prototype, ro_attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        Handle<Name>(Name::cast(prototype->name())), prototype, ro_attribs);
     map->AppendDescriptor(&d);
   }
 }
@@ -2684,8 +2810,8 @@
   {  // Add length.
     Handle<AccessorInfo> length =
         Accessors::FunctionLengthInfo(isolate(), roc_attribs);
-    AccessorConstantDescriptor d(handle(Name::cast(length->name())), length,
-                                 roc_attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        handle(Name::cast(length->name())), length, roc_attribs);
     map->AppendDescriptor(&d);
   }
 
@@ -2693,8 +2819,8 @@
   {  // Add name.
     Handle<AccessorInfo> name =
         Accessors::FunctionNameInfo(isolate(), roc_attribs);
-    AccessorConstantDescriptor d(handle(Name::cast(name->name())), name,
-                                 roc_attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        handle(Name::cast(name->name())), name, roc_attribs);
     map->AppendDescriptor(&d);
   }
   if (IsFunctionModeWithPrototype(function_mode)) {
@@ -2704,31 +2830,46 @@
                                                            : ro_attribs;
     Handle<AccessorInfo> prototype =
         Accessors::FunctionPrototypeInfo(isolate(), attribs);
-    AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
-                                 prototype, attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        Handle<Name>(Name::cast(prototype->name())), prototype, attribs);
     map->AppendDescriptor(&d);
   }
 }
 
-Handle<JSFixedArrayIterator> Factory::NewJSFixedArrayIterator(
-    Handle<FixedArray> array) {
-  // Create the "next" function (must be unique per iterator object).
-  Handle<Code> code(
-      isolate()->builtins()->builtin(Builtins::kFixedArrayIteratorNext));
-  // TODO(neis): Don't create a new SharedFunctionInfo each time.
-  Handle<JSFunction> next = isolate()->factory()->NewFunctionWithoutPrototype(
-      isolate()->factory()->next_string(), code, false);
-  next->shared()->set_native(true);
+Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
+  Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+  SetClassFunctionInstanceDescriptor(map);
+  map->set_is_constructor(true);
+  map->set_is_callable();
+  Map::SetPrototype(map, empty_function);
+  return map;
+}
 
-  // Create the iterator.
-  Handle<Map> map(isolate()->native_context()->fixed_array_iterator_map());
-  Handle<JSFixedArrayIterator> iterator =
-      Handle<JSFixedArrayIterator>::cast(NewJSObjectFromMap(map));
-  iterator->set_initial_next(*next);
-  iterator->set_array(*array);
-  iterator->set_index(0);
-  iterator->InObjectPropertyAtPut(JSFixedArrayIterator::kNextIndex, *next);
-  return iterator;
+void Factory::SetClassFunctionInstanceDescriptor(Handle<Map> map) {
+  Map::EnsureDescriptorSlack(map, 2);
+
+  PropertyAttributes rw_attribs =
+      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+  PropertyAttributes roc_attribs =
+      static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+
+  STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
+  {  // Add length.
+    Handle<AccessorInfo> length =
+        Accessors::FunctionLengthInfo(isolate(), roc_attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        handle(Name::cast(length->name())), length, roc_attribs);
+    map->AppendDescriptor(&d);
+  }
+
+  {
+    // Add prototype.
+    Handle<AccessorInfo> prototype =
+        Accessors::FunctionPrototypeInfo(isolate(), rw_attribs);
+    Descriptor d = Descriptor::AccessorConstant(
+        Handle<Name>(Name::cast(prototype->name())), prototype, rw_attribs);
+    map->AppendDescriptor(&d);
+  }
 }
 
 }  // namespace internal
diff --git a/src/factory.h b/src/factory.h
index d059b10..50d0137 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -5,14 +5,18 @@
 #ifndef V8_FACTORY_H_
 #define V8_FACTORY_H_
 
+#include "src/feedback-vector.h"
 #include "src/globals.h"
 #include "src/isolate.h"
 #include "src/messages.h"
-#include "src/type-feedback-vector.h"
+#include "src/objects/scope-info.h"
 
 namespace v8 {
 namespace internal {
 
+class BoilerplateDescription;
+class ConstantElementsPair;
+
 enum FunctionMode {
   // With prototype.
   FUNCTION_WITH_WRITEABLE_PROTOTYPE,
@@ -48,6 +52,13 @@
   // Allocates an uninitialized fixed array. It must be filled by the caller.
   Handle<FixedArray> NewUninitializedFixedArray(int size);
 
+  // Allocates a fixed array for name-value pairs of boilerplate properties and
+  // calculates the number of properties we need to store in the backing store.
+  Handle<BoilerplateDescription> NewBoilerplateDescription(int boilerplate,
+                                                           int all_properties,
+                                                           int index_keys,
+                                                           bool has_seen_proto);
+
   // Allocate a new uninitialized fixed double array.
   // The function returns a pre-allocated empty fixed array for capacity = 0,
   // so the return type must be the general fixed array class.
@@ -66,25 +77,12 @@
   Handle<OrderedHashSet> NewOrderedHashSet();
   Handle<OrderedHashMap> NewOrderedHashMap();
 
-  // Create a new boxed value.
-  Handle<Box> NewBox(Handle<Object> value);
-
-  // Create a new PromiseReactionJobInfo struct.
-  Handle<PromiseReactionJobInfo> NewPromiseReactionJobInfo(
-      Handle<Object> value, Handle<Object> tasks, Handle<Object> deferred,
-      Handle<Object> debug_id, Handle<Object> debug_name,
-      Handle<Context> context);
-
-  // Create a new PromiseResolveThenableJobInfo struct.
-  Handle<PromiseResolveThenableJobInfo> NewPromiseResolveThenableJobInfo(
-      Handle<JSReceiver> thenable, Handle<JSReceiver> then,
-      Handle<JSFunction> resolve, Handle<JSFunction> reject,
-      Handle<Object> debug_id, Handle<Object> debug_name,
-      Handle<Context> context);
-
   // Create a new PrototypeInfo struct.
   Handle<PrototypeInfo> NewPrototypeInfo();
 
+  // Create a new Tuple2 struct.
+  Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2);
+
   // Create a new Tuple3 struct.
   Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
                            Handle<Object> value3);
@@ -93,6 +91,10 @@
   Handle<ContextExtension> NewContextExtension(Handle<ScopeInfo> scope_info,
                                                Handle<Object> extension);
 
+  // Create a new ConstantElementsPair struct.
+  Handle<ConstantElementsPair> NewConstantElementsPair(
+      ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
+
   // Create a pre-tenured empty AccessorPair.
   Handle<AccessorPair> NewAccessorPair();
 
@@ -227,6 +229,11 @@
   MUST_USE_RESULT MaybeHandle<Map> InternalizedStringMapForString(
       Handle<String> string);
 
+  // Creates an internalized copy of an external string. |string| must be
+  // of type StringClass.
+  template <class StringClass>
+  Handle<StringClass> InternalizeExternalString(Handle<String> string);
+
   // Allocates and partially initializes an one-byte or two-byte String. The
   // characters of the string are uninitialized. Currently used in regexp code
   // only, where they are pretenured.
@@ -293,8 +300,9 @@
                                    Handle<JSFunction> function,
                                    Handle<ScopeInfo> scope_info);
 
-  // Create a function context.
-  Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
+  // Create a function or eval context.
+  Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function,
+                                     ScopeType scope_type);
 
   // Create a catch context.
   Handle<Context> NewCatchContext(Handle<JSFunction> function,
@@ -319,8 +327,6 @@
   Handle<Context> NewBlockContext(Handle<JSFunction> function,
                                   Handle<Context> previous,
                                   Handle<ScopeInfo> scope_info);
-  // Create a promise context.
-  Handle<Context> NewPromiseResolvingFunctionContext(int length);
 
   // Allocate a new struct.  The struct is pretenured (allocated directly in
   // the old generation).
@@ -333,6 +339,8 @@
 
   Handle<Script> NewScript(Handle<String> source);
 
+  Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
+
   // Foreign objects are pretenured when allocated by the bootstrapper.
   Handle<Foreign> NewForeign(Address addr,
                              PretenureFlag pretenure = NOT_TENURED);
@@ -362,6 +370,10 @@
 
   Handle<WeakCell> NewWeakCell(Handle<HeapObject> value);
 
+  Handle<Cell> NewNoClosuresCell(Handle<Object> value);
+  Handle<Cell> NewOneClosureCell(Handle<Object> value);
+  Handle<Cell> NewManyClosuresCell(Handle<Object> value);
+
   Handle<TransitionArray> NewTransitionArray(int capacity);
 
   // Allocate a tenured AllocationSite. It's payload is null.
@@ -431,15 +443,28 @@
     }
     return NewNumber(static_cast<double>(value), pretenure);
   }
-  Handle<HeapNumber> NewHeapNumber(double value,
-                                   MutableMode mode = IMMUTABLE,
-                                   PretenureFlag pretenure = NOT_TENURED);
+  Handle<HeapNumber> NewHeapNumber(double value, MutableMode mode = IMMUTABLE,
+                                   PretenureFlag pretenure = NOT_TENURED) {
+    Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+    heap_number->set_value(value);
+    return heap_number;
+  }
+  Handle<HeapNumber> NewHeapNumberFromBits(
+      uint64_t bits, MutableMode mode = IMMUTABLE,
+      PretenureFlag pretenure = NOT_TENURED) {
+    Handle<HeapNumber> heap_number = NewHeapNumber(mode, pretenure);
+    heap_number->set_value_as_bits(bits);
+    return heap_number;
+  }
+  // Creates mutable heap number object with value field set to hole NaN.
+  Handle<HeapNumber> NewMutableHeapNumber(
+      PretenureFlag pretenure = NOT_TENURED) {
+    return NewHeapNumberFromBits(kHoleNanInt64, MUTABLE, pretenure);
+  }
 
-#define SIMD128_NEW_DECL(TYPE, Type, type, lane_count, lane_type) \
-  Handle<Type> New##Type(lane_type lanes[lane_count],             \
-                         PretenureFlag pretenure = NOT_TENURED);
-  SIMD128_TYPES(SIMD128_NEW_DECL)
-#undef SIMD128_NEW_DECL
+  // Creates heap number object with not yet set value field.
+  Handle<HeapNumber> NewHeapNumber(MutableMode mode,
+                                   PretenureFlag pretenure = NOT_TENURED);
 
   Handle<JSWeakMap> NewJSWeakMap();
 
@@ -450,7 +475,8 @@
   Handle<JSObject> NewJSObject(Handle<JSFunction> constructor,
                                PretenureFlag pretenure = NOT_TENURED);
   // JSObject without a prototype.
-  Handle<JSObject> NewJSObjectWithNullProto();
+  Handle<JSObject> NewJSObjectWithNullProto(
+      PretenureFlag pretenure = NOT_TENURED);
 
   // Global objects are pretenured and initialized based on a constructor.
   Handle<JSGlobalObject> NewJSGlobalObject(Handle<JSFunction> constructor);
@@ -532,6 +558,8 @@
                                    size_t byte_offset, size_t byte_length);
 
   Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value, bool done);
+  Handle<JSAsyncFromSyncIterator> NewJSAsyncFromSyncIterator(
+      Handle<JSReceiver> sync_iterator);
 
   Handle<JSMap> NewJSMap();
   Handle<JSSet> NewJSSet();
@@ -540,9 +568,6 @@
   Handle<JSMapIterator> NewJSMapIterator();
   Handle<JSSetIterator> NewJSSetIterator();
 
-  Handle<JSFixedArrayIterator> NewJSFixedArrayIterator(
-      Handle<FixedArray> array);
-
   // Allocates a bound function.
   MaybeHandle<JSBoundFunction> NewJSBoundFunction(
       Handle<JSReceiver> target_function, Handle<Object> bound_this,
@@ -575,6 +600,15 @@
 
   Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
       Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
+      Handle<Object> context_or_undefined, Handle<Cell> vector,
+      PretenureFlag pretenure = TENURED);
+
+  Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+      Handle<SharedFunctionInfo> function_info, Handle<Context> context,
+      Handle<Cell> vector, PretenureFlag pretenure = TENURED);
+
+  Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+      Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
       Handle<Object> context_or_undefined, PretenureFlag pretenure = TENURED);
 
   Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
@@ -645,6 +679,7 @@
   DECLARE_ERROR(SyntaxError)
   DECLARE_ERROR(TypeError)
   DECLARE_ERROR(WasmCompileError)
+  DECLARE_ERROR(WasmLinkError)
   DECLARE_ERROR(WasmRuntimeError)
 #undef DECLARE_ERROR
 
@@ -700,12 +735,15 @@
 
   // Allocates a new SharedFunctionInfo object.
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(
-      Handle<String> name, int number_of_literals, FunctionKind kind,
-      Handle<Code> code, Handle<ScopeInfo> scope_info);
+      Handle<String> name, FunctionKind kind, Handle<Code> code,
+      Handle<ScopeInfo> scope_info);
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
                                                    MaybeHandle<Code> code,
                                                    bool is_constructor);
 
+  Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
+      FunctionLiteral* literal, Handle<Script> script);
+
   static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
     return (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
             function_mode == FUNCTION_WITH_READONLY_PROTOTYPE);
@@ -716,6 +754,8 @@
   Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
                                       Handle<JSFunction> empty_function);
 
+  Handle<Map> CreateClassFunctionMap(Handle<JSFunction> empty_function);
+
   // Allocates a new JSMessageObject object.
   Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
                                              Handle<Object> argument,
@@ -797,6 +837,8 @@
 
   void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
                                            FunctionMode function_mode);
+
+  void SetClassFunctionInstanceDescriptor(Handle<Map> map);
 };
 
 }  // namespace internal
diff --git a/src/fast-accessor-assembler.cc b/src/fast-accessor-assembler.cc
index ee9b241..6e7b49e 100644
--- a/src/fast-accessor-assembler.cc
+++ b/src/fast-accessor-assembler.cc
@@ -8,20 +8,23 @@
 #include "src/code-stub-assembler.h"
 #include "src/code-stubs.h"  // For CallApiCallbackStub.
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 #include "src/objects.h"  // For FAA::LoadInternalField impl.
 
-using v8::internal::CodeStubAssembler;
-using v8::internal::compiler::Node;
-
 namespace v8 {
 namespace internal {
 
+using compiler::Node;
+using compiler::CodeAssemblerLabel;
+using compiler::CodeAssemblerVariable;
+
 FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
     : zone_(isolate->allocator(), ZONE_NAME),
       isolate_(isolate),
-      assembler_(new CodeStubAssembler(isolate, zone(), 1,
-                                       Code::ComputeFlags(Code::STUB),
-                                       "FastAccessorAssembler")),
+      assembler_state_(new compiler::CodeAssemblerState(
+          isolate, zone(), 1, Code::ComputeFlags(Code::STUB),
+          "FastAccessorAssembler")),
+      assembler_(new CodeStubAssembler(assembler_state_.get())),
       state_(kBuilding) {}
 
 FastAccessorAssembler::~FastAccessorAssembler() { Clear(); }
@@ -40,19 +43,18 @@
 }
 
 FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
-    ValueId value, int field_no) {
+    ValueId value_id, int field_no) {
   CHECK_EQ(kBuilding, state_);
 
-  CodeStubAssembler::Variable result(assembler_.get(),
-                                     MachineRepresentation::kTagged);
+  CodeAssemblerVariable result(assembler_.get(),
+                               MachineRepresentation::kTagged);
   LabelId is_not_jsobject = MakeLabel();
-  CodeStubAssembler::Label merge(assembler_.get(), &result);
+  CodeAssemblerLabel merge(assembler_.get(), &result);
 
-  CheckIsJSObjectOrJump(value, is_not_jsobject);
+  CheckIsJSObjectOrJump(value_id, is_not_jsobject);
 
   Node* internal_field = assembler_->LoadObjectField(
-      FromId(value), JSObject::kHeaderSize + kPointerSize * field_no,
-      MachineType::Pointer());
+      FromId(value_id), JSObject::kHeaderSize + kPointerSize * field_no);
 
   result.Bind(internal_field);
   assembler_->Goto(&merge);
@@ -68,14 +70,15 @@
 }
 
 FastAccessorAssembler::ValueId
-FastAccessorAssembler::LoadInternalFieldUnchecked(ValueId value, int field_no) {
+FastAccessorAssembler::LoadInternalFieldUnchecked(ValueId value_id,
+                                                  int field_no) {
   CHECK_EQ(kBuilding, state_);
 
   // Defensive debug checks.
   if (FLAG_debug_code) {
     LabelId is_jsobject = MakeLabel();
     LabelId is_not_jsobject = MakeLabel();
-    CheckIsJSObjectOrJump(value, is_not_jsobject);
+    CheckIsJSObjectOrJump(value_id, is_not_jsobject);
     assembler_->Goto(FromId(is_jsobject));
 
     SetLabel(is_not_jsobject);
@@ -86,58 +89,56 @@
   }
 
   Node* result = assembler_->LoadObjectField(
-      FromId(value), JSObject::kHeaderSize + kPointerSize * field_no,
-      MachineType::Pointer());
+      FromId(value_id), JSObject::kHeaderSize + kPointerSize * field_no);
 
   return FromRaw(result);
 }
 
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
-                                                                int offset) {
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(
+    ValueId value_id, int offset) {
   CHECK_EQ(kBuilding, state_);
-  return FromRaw(assembler_->LoadBufferObject(FromId(value), offset,
+  return FromRaw(assembler_->LoadBufferObject(FromId(value_id), offset,
                                               MachineType::IntPtr()));
 }
 
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
-                                                                 int offset) {
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(
+    ValueId value_id, int offset) {
   CHECK_EQ(kBuilding, state_);
   return FromRaw(assembler_->LoadBufferObject(
-      assembler_->LoadBufferObject(FromId(value), offset,
-                                   MachineType::Pointer()),
-      0, MachineType::AnyTagged()));
+      assembler_->LoadBufferObject(FromId(value_id), offset), 0,
+      MachineType::AnyTagged()));
 }
 
-FastAccessorAssembler::ValueId FastAccessorAssembler::ToSmi(ValueId value) {
+FastAccessorAssembler::ValueId FastAccessorAssembler::ToSmi(ValueId value_id) {
   CHECK_EQ(kBuilding, state_);
-  return FromRaw(assembler_->SmiTag(FromId(value)));
+  return FromRaw(assembler_->SmiTag(FromId(value_id)));
 }
 
-void FastAccessorAssembler::ReturnValue(ValueId value) {
+void FastAccessorAssembler::ReturnValue(ValueId value_id) {
   CHECK_EQ(kBuilding, state_);
-  assembler_->Return(FromId(value));
+  assembler_->Return(FromId(value_id));
 }
 
-void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
+void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value_id,
+                                                     int mask) {
   CHECK_EQ(kBuilding, state_);
-  CodeStubAssembler::Label pass(assembler_.get());
-  CodeStubAssembler::Label fail(assembler_.get());
+  CodeAssemblerLabel pass(assembler_.get());
+  CodeAssemblerLabel fail(assembler_.get());
+  Node* value = FromId(value_id);
   assembler_->Branch(
-      assembler_->Word32Equal(
-          assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
-          assembler_->Int32Constant(0)),
-      &fail, &pass);
+      assembler_->IsSetWord(assembler_->BitcastTaggedToWord(value), mask),
+      &pass, &fail);
   assembler_->Bind(&fail);
   assembler_->Return(assembler_->NullConstant());
   assembler_->Bind(&pass);
 }
 
-void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
+void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value_id) {
   CHECK_EQ(kBuilding, state_);
-  CodeStubAssembler::Label is_null(assembler_.get());
-  CodeStubAssembler::Label not_null(assembler_.get());
+  CodeAssemblerLabel is_null(assembler_.get());
+  CodeAssemblerLabel not_null(assembler_.get());
   assembler_->Branch(
-      assembler_->WordEqual(FromId(value), assembler_->IntPtrConstant(0)),
+      assembler_->WordEqual(FromId(value_id), assembler_->SmiConstant(0)),
       &is_null, &not_null);
   assembler_->Bind(&is_null);
   assembler_->Return(assembler_->NullConstant());
@@ -146,7 +147,7 @@
 
 FastAccessorAssembler::LabelId FastAccessorAssembler::MakeLabel() {
   CHECK_EQ(kBuilding, state_);
-  return FromRaw(new CodeStubAssembler::Label(assembler_.get()));
+  return FromRaw(new CodeAssemblerLabel(assembler_.get()));
 }
 
 void FastAccessorAssembler::SetLabel(LabelId label_id) {
@@ -162,9 +163,9 @@
 void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
                                                LabelId label_id) {
   CHECK_EQ(kBuilding, state_);
-  CodeStubAssembler::Label pass(assembler_.get());
+  CodeAssemblerLabel pass(assembler_.get());
   assembler_->Branch(
-      assembler_->WordEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
+      assembler_->WordEqual(FromId(value_id), assembler_->SmiConstant(0)),
       FromId(label_id), &pass);
   assembler_->Bind(&pass);
 }
@@ -184,31 +185,16 @@
   CallInterfaceDescriptor descriptor = stub.GetCallInterfaceDescriptor();
   DCHECK_EQ(4, descriptor.GetParameterCount());
   DCHECK_EQ(0, descriptor.GetStackParameterCount());
-  // TODO(vogelheim): There is currently no clean way to retrieve the context
-  //     parameter for a stub and the implementation details are hidden in
-  //     compiler/*. The context_paramter is computed as:
-  //       Linkage::GetJSCallContextParamIndex(descriptor->JSParameterCount())
-  const int kContextParameter = 3;
-  Node* context = assembler_->Parameter(kContextParameter);
+  Node* context = assembler_->GetJSContextParameter();
   Node* target = assembler_->HeapConstant(stub.GetCode());
 
-  int param_count = descriptor.GetParameterCount();
-  Node** args = zone()->NewArray<Node*>(param_count + 1 + kJSParameterCount);
-  // Stub/register parameters:
-  args[0] = assembler_->UndefinedConstant();  // callee (there's no JSFunction)
-  args[1] = assembler_->UndefinedConstant();  // call_data (undefined)
-  args[2] = assembler_->Parameter(0);  // receiver (same as holder in this case)
-  args[3] = assembler_->ExternalConstant(callback);  // API callback function
-
-  // JS arguments, on stack:
-  args[4] = FromId(arg);
-
-  // Context.
-  args[5] = context;
-
-  Node* call =
-      assembler_->CallStubN(descriptor, kJSParameterCount, target, args);
-
+  Node* call = assembler_->CallStub(
+      descriptor, target, context,
+      assembler_->UndefinedConstant(),  // callee (there's no JSFunction)
+      assembler_->UndefinedConstant(),  // call_data (undefined)
+      assembler_->Parameter(0),  // receiver (same as holder in this case)
+      assembler_->ExternalConstant(callback),  // API callback function
+      FromId(arg));                            // JS argument, on stack
   return FromRaw(call);
 }
 
@@ -217,28 +203,20 @@
   CHECK_EQ(kBuilding, state_);
 
   // Determine the 'value' object's instance type.
-  Node* object_map = assembler_->LoadObjectField(
-      FromId(value_id), Internals::kHeapObjectMapOffset,
-      MachineType::Pointer());
+  Node* instance_type = assembler_->LoadInstanceType(FromId(value_id));
 
-  Node* instance_type = assembler_->WordAnd(
-      assembler_->LoadObjectField(object_map,
-                                  Internals::kMapInstanceTypeAndBitFieldOffset,
-                                  MachineType::Uint16()),
-      assembler_->IntPtrConstant(0xff));
-
-  CodeStubAssembler::Label is_jsobject(assembler_.get());
+  CodeAssemblerLabel is_jsobject(assembler_.get());
 
   // Check whether we have a proper JSObject.
   assembler_->GotoIf(
-      assembler_->WordEqual(
-          instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
+      assembler_->Word32Equal(
+          instance_type, assembler_->Int32Constant(Internals::kJSObjectType)),
       &is_jsobject);
 
   // JSApiObject?.
-  assembler_->GotoUnless(
-      assembler_->WordEqual(instance_type, assembler_->IntPtrConstant(
-                                               Internals::kJSApiObjectType)),
+  assembler_->GotoIfNot(
+      assembler_->Word32Equal(instance_type, assembler_->Int32Constant(
+                                                 Internals::kJSApiObjectType)),
       FromId(label_id));
 
   // Continue.
@@ -248,7 +226,8 @@
 
 MaybeHandle<Code> FastAccessorAssembler::Build() {
   CHECK_EQ(kBuilding, state_);
-  Handle<Code> code = assembler_->GenerateCode();
+  Handle<Code> code =
+      compiler::CodeAssembler::GenerateCode(assembler_state_.get());
   state_ = !code.is_null() ? kBuilt : kError;
   Clear();
   return code;
@@ -256,12 +235,12 @@
 
 FastAccessorAssembler::ValueId FastAccessorAssembler::FromRaw(Node* node) {
   nodes_.push_back(node);
-  ValueId value = {nodes_.size() - 1};
-  return value;
+  ValueId value_id = {nodes_.size() - 1};
+  return value_id;
 }
 
 FastAccessorAssembler::LabelId FastAccessorAssembler::FromRaw(
-    CodeStubAssembler::Label* label) {
+    CodeAssemblerLabel* label) {
   labels_.push_back(label);
   LabelId label_id = {labels_.size() - 1};
   return label_id;
@@ -273,7 +252,7 @@
   return nodes_.at(value.value_id);
 }
 
-CodeStubAssembler::Label* FastAccessorAssembler::FromId(LabelId label) const {
+CodeAssemblerLabel* FastAccessorAssembler::FromId(LabelId label) const {
   CHECK_LT(label.label_id, labels_.size());
   CHECK_NOT_NULL(labels_.at(label.label_id));
   return labels_.at(label.label_id);
diff --git a/src/fast-accessor-assembler.h b/src/fast-accessor-assembler.h
index 9468d86..f51d5a7 100644
--- a/src/fast-accessor-assembler.h
+++ b/src/fast-accessor-assembler.h
@@ -11,20 +11,22 @@
 
 #include "include/v8-experimental.h"
 #include "src/base/macros.h"
-#include "src/handles.h"
-
-// For CodeStubAssembler::Label. (We cannot forward-declare inner classes.)
-#include "src/code-stub-assembler.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
 
 class Code;
+class CodeStubAssembler;
 class Isolate;
-class Zone;
+template <typename T>
+class MaybeHandle;
 
 namespace compiler {
 class Node;
+class CodeAssemblerLabel;
+class CodeAssemblerState;
+class CodeAssemblerVariable;
 }
 
 // This interface "exports" an aggregated subset of RawMachineAssembler, for
@@ -86,9 +88,9 @@
 
  private:
   ValueId FromRaw(compiler::Node* node);
-  LabelId FromRaw(CodeStubAssembler::Label* label);
+  LabelId FromRaw(compiler::CodeAssemblerLabel* label);
   compiler::Node* FromId(ValueId value) const;
-  CodeStubAssembler::Label* FromId(LabelId value) const;
+  compiler::CodeAssemblerLabel* FromId(LabelId value) const;
 
   void CheckIsJSObjectOrJump(ValueId value, LabelId label_id);
 
@@ -98,13 +100,14 @@
 
   Zone zone_;
   Isolate* isolate_;
+  std::unique_ptr<compiler::CodeAssemblerState> assembler_state_;
   std::unique_ptr<CodeStubAssembler> assembler_;
 
   // To prevent exposing the RMA internals to the outside world, we'll map
   // Node + Label pointers integers wrapped in ValueId and LabelId instances.
   // These vectors maintain this mapping.
   std::vector<compiler::Node*> nodes_;
-  std::vector<CodeStubAssembler::Label*> labels_;
+  std::vector<compiler::CodeAssemblerLabel*> labels_;
 
   // Remember the current state for easy error checking. (We prefer to be
   // strict as this class will be exposed at the API.)
diff --git a/src/feedback-vector-inl.h b/src/feedback-vector-inl.h
new file mode 100644
index 0000000..45c2cd2
--- /dev/null
+++ b/src/feedback-vector-inl.h
@@ -0,0 +1,313 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FEEDBACK_VECTOR_INL_H_
+#define V8_FEEDBACK_VECTOR_INL_H_
+
+#include "src/factory.h"
+#include "src/feedback-vector.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename Derived>
+FeedbackSlot FeedbackVectorSpecBase<Derived>::AddSlot(FeedbackSlotKind kind) {
+  int slot = This()->slots();
+  int entries_per_slot = FeedbackMetadata::GetSlotSize(kind);
+  This()->append(kind);
+  for (int i = 1; i < entries_per_slot; i++) {
+    This()->append(FeedbackSlotKind::kInvalid);
+  }
+  return FeedbackSlot(slot);
+}
+
+// static
+FeedbackMetadata* FeedbackMetadata::cast(Object* obj) {
+  DCHECK(obj->IsFeedbackMetadata());
+  return reinterpret_cast<FeedbackMetadata*>(obj);
+}
+
+bool FeedbackMetadata::is_empty() const {
+  if (length() == 0) return true;
+  return false;
+}
+
+int FeedbackMetadata::slot_count() const {
+  if (length() == 0) return 0;
+  DCHECK(length() > kReservedIndexCount);
+  return Smi::cast(get(kSlotsCountIndex))->value();
+}
+
+// static
+FeedbackVector* FeedbackVector::cast(Object* obj) {
+  DCHECK(obj->IsFeedbackVector());
+  return reinterpret_cast<FeedbackVector*>(obj);
+}
+
+int FeedbackMetadata::GetSlotSize(FeedbackSlotKind kind) {
+  switch (kind) {
+    case FeedbackSlotKind::kGeneral:
+    case FeedbackSlotKind::kCompareOp:
+    case FeedbackSlotKind::kBinaryOp:
+    case FeedbackSlotKind::kToBoolean:
+    case FeedbackSlotKind::kLiteral:
+    case FeedbackSlotKind::kCreateClosure:
+      return 1;
+
+    case FeedbackSlotKind::kCall:
+    case FeedbackSlotKind::kLoadProperty:
+    case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+    case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+    case FeedbackSlotKind::kLoadKeyed:
+    case FeedbackSlotKind::kStoreNamedSloppy:
+    case FeedbackSlotKind::kStoreNamedStrict:
+    case FeedbackSlotKind::kStoreOwnNamed:
+    case FeedbackSlotKind::kStoreKeyedSloppy:
+    case FeedbackSlotKind::kStoreKeyedStrict:
+    case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+      return 2;
+
+    case FeedbackSlotKind::kInvalid:
+    case FeedbackSlotKind::kKindsNumber:
+      UNREACHABLE();
+      break;
+  }
+  return 1;
+}
+
+bool FeedbackVector::is_empty() const {
+  return length() == kReservedIndexCount;
+}
+
+int FeedbackVector::slot_count() const {
+  return length() - kReservedIndexCount;
+}
+
+FeedbackMetadata* FeedbackVector::metadata() const {
+  return shared_function_info()->feedback_metadata();
+}
+
+SharedFunctionInfo* FeedbackVector::shared_function_info() const {
+  return SharedFunctionInfo::cast(get(kSharedFunctionInfoIndex));
+}
+
+int FeedbackVector::invocation_count() const {
+  return Smi::cast(get(kInvocationCountIndex))->value();
+}
+
+void FeedbackVector::clear_invocation_count() {
+  set(kInvocationCountIndex, Smi::kZero);
+}
+
+// Conversion from an integer index to either a slot or an ic slot.
+// static
+FeedbackSlot FeedbackVector::ToSlot(int index) {
+  DCHECK_GE(index, kReservedIndexCount);
+  return FeedbackSlot(index - kReservedIndexCount);
+}
+
+Object* FeedbackVector::Get(FeedbackSlot slot) const {
+  return get(GetIndex(slot));
+}
+
+void FeedbackVector::Set(FeedbackSlot slot, Object* value,
+                         WriteBarrierMode mode) {
+  set(GetIndex(slot), value, mode);
+}
+
+// Helper function to transform the feedback to BinaryOperationHint.
+BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
+  switch (type_feedback) {
+    case BinaryOperationFeedback::kNone:
+      return BinaryOperationHint::kNone;
+    case BinaryOperationFeedback::kSignedSmall:
+      return BinaryOperationHint::kSignedSmall;
+    case BinaryOperationFeedback::kNumber:
+    case BinaryOperationFeedback::kNumberOrOddball:
+      return BinaryOperationHint::kNumberOrOddball;
+    case BinaryOperationFeedback::kString:
+      return BinaryOperationHint::kString;
+    case BinaryOperationFeedback::kAny:
+    default:
+      return BinaryOperationHint::kAny;
+  }
+  UNREACHABLE();
+  return BinaryOperationHint::kNone;
+}
+
+// Helper function to transform the feedback to CompareOperationHint.
+CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
+  switch (type_feedback) {
+    case CompareOperationFeedback::kNone:
+      return CompareOperationHint::kNone;
+    case CompareOperationFeedback::kSignedSmall:
+      return CompareOperationHint::kSignedSmall;
+    case CompareOperationFeedback::kNumber:
+      return CompareOperationHint::kNumber;
+    case CompareOperationFeedback::kNumberOrOddball:
+      return CompareOperationHint::kNumberOrOddball;
+    case CompareOperationFeedback::kInternalizedString:
+      return CompareOperationHint::kInternalizedString;
+    case CompareOperationFeedback::kString:
+      return CompareOperationHint::kString;
+    case CompareOperationFeedback::kReceiver:
+      return CompareOperationHint::kReceiver;
+    default:
+      return CompareOperationHint::kAny;
+  }
+  UNREACHABLE();
+  return CompareOperationHint::kNone;
+}
+
+void FeedbackVector::ComputeCounts(int* with_type_info, int* generic,
+                                   int* vector_ic_count,
+                                   bool code_is_interpreted) {
+  Object* megamorphic_sentinel =
+      *FeedbackVector::MegamorphicSentinel(GetIsolate());
+  int with = 0;
+  int gen = 0;
+  int total = 0;
+  FeedbackMetadataIterator iter(metadata());
+  while (iter.HasNext()) {
+    FeedbackSlot slot = iter.Next();
+    FeedbackSlotKind kind = iter.kind();
+
+    Object* const obj = Get(slot);
+    switch (kind) {
+      case FeedbackSlotKind::kCall:
+      case FeedbackSlotKind::kLoadProperty:
+      case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+      case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+      case FeedbackSlotKind::kLoadKeyed:
+      case FeedbackSlotKind::kStoreNamedSloppy:
+      case FeedbackSlotKind::kStoreNamedStrict:
+      case FeedbackSlotKind::kStoreOwnNamed:
+      case FeedbackSlotKind::kStoreKeyedSloppy:
+      case FeedbackSlotKind::kStoreKeyedStrict:
+      case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+        if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
+          with++;
+        } else if (obj == megamorphic_sentinel) {
+          gen++;
+          if (code_is_interpreted) with++;
+        }
+        total++;
+        break;
+      }
+      case FeedbackSlotKind::kBinaryOp:
+        // If we are not running interpreted code, we need to ignore the special
+        // IC slots for binaryop/compare used by the interpreter.
+        // TODO(mvstanton): Remove code_is_interpreted when full code is retired
+        // from service.
+        if (code_is_interpreted) {
+          int const feedback = Smi::cast(obj)->value();
+          BinaryOperationHint hint = BinaryOperationHintFromFeedback(feedback);
+          if (hint == BinaryOperationHint::kAny) {
+            gen++;
+          }
+          if (hint != BinaryOperationHint::kNone) {
+            with++;
+          }
+          total++;
+        }
+        break;
+      case FeedbackSlotKind::kCompareOp: {
+        // If we are not running interpreted code, we need to ignore the special
+        // IC slots for binaryop/compare used by the interpreter.
+        // TODO(mvstanton): Remove code_is_interpreted when full code is retired
+        // from service.
+        if (code_is_interpreted) {
+          int const feedback = Smi::cast(obj)->value();
+          CompareOperationHint hint =
+              CompareOperationHintFromFeedback(feedback);
+          if (hint == CompareOperationHint::kAny) {
+            gen++;
+          }
+          if (hint != CompareOperationHint::kNone) {
+            with++;
+          }
+          total++;
+        }
+        break;
+      }
+      case FeedbackSlotKind::kToBoolean:
+      case FeedbackSlotKind::kCreateClosure:
+      case FeedbackSlotKind::kGeneral:
+      case FeedbackSlotKind::kLiteral:
+        break;
+      case FeedbackSlotKind::kInvalid:
+      case FeedbackSlotKind::kKindsNumber:
+        UNREACHABLE();
+        break;
+    }
+  }
+
+  *with_type_info = with;
+  *generic = gen;
+  *vector_ic_count = total;
+}
+
+Handle<Symbol> FeedbackVector::UninitializedSentinel(Isolate* isolate) {
+  return isolate->factory()->uninitialized_symbol();
+}
+
+Handle<Symbol> FeedbackVector::MegamorphicSentinel(Isolate* isolate) {
+  return isolate->factory()->megamorphic_symbol();
+}
+
+Handle<Symbol> FeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
+  return isolate->factory()->premonomorphic_symbol();
+}
+
+Symbol* FeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
+  return isolate->heap()->uninitialized_symbol();
+}
+
+bool FeedbackMetadataIterator::HasNext() const {
+  return next_slot_.ToInt() < metadata()->slot_count();
+}
+
+FeedbackSlot FeedbackMetadataIterator::Next() {
+  DCHECK(HasNext());
+  cur_slot_ = next_slot_;
+  slot_kind_ = metadata()->GetKind(cur_slot_);
+  next_slot_ = FeedbackSlot(next_slot_.ToInt() + entry_size());
+  return cur_slot_;
+}
+
+int FeedbackMetadataIterator::entry_size() const {
+  return FeedbackMetadata::GetSlotSize(kind());
+}
+
+Object* FeedbackNexus::GetFeedback() const { return vector()->Get(slot()); }
+
+Object* FeedbackNexus::GetFeedbackExtra() const {
+#ifdef DEBUG
+  FeedbackSlotKind kind = vector()->GetKind(slot());
+  DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
+#endif
+  int extra_index = vector()->GetIndex(slot()) + 1;
+  return vector()->get(extra_index);
+}
+
+void FeedbackNexus::SetFeedback(Object* feedback, WriteBarrierMode mode) {
+  vector()->Set(slot(), feedback, mode);
+}
+
+void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
+                                     WriteBarrierMode mode) {
+#ifdef DEBUG
+  FeedbackSlotKind kind = vector()->GetKind(slot());
+  DCHECK_LT(1, FeedbackMetadata::GetSlotSize(kind));
+#endif
+  int index = vector()->GetIndex(slot()) + 1;
+  vector()->set(index, feedback_extra, mode);
+}
+
+Isolate* FeedbackNexus::GetIsolate() const { return vector()->GetIsolate(); }
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_FEEDBACK_VECTOR_INL_H_
diff --git a/src/type-feedback-vector.cc b/src/feedback-vector.cc
similarity index 64%
rename from src/type-feedback-vector.cc
rename to src/feedback-vector.cc
index 2ba9690..4003068 100644
--- a/src/type-feedback-vector.cc
+++ b/src/feedback-vector.cc
@@ -2,18 +2,16 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/type-feedback-vector.h"
-
+#include "src/feedback-vector.h"
 #include "src/code-stubs.h"
+#include "src/feedback-vector-inl.h"
 #include "src/ic/ic-inl.h"
 #include "src/ic/ic-state.h"
 #include "src/objects.h"
-#include "src/type-feedback-vector-inl.h"
 
 namespace v8 {
 namespace internal {
 
-
 static bool IsPropertyNameFeedback(Object* feedback) {
   if (feedback->IsString()) return true;
   if (!feedback->IsSymbol()) return false;
@@ -24,64 +22,47 @@
          symbol != heap->megamorphic_symbol();
 }
 
-
-std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind) {
-  return os << TypeFeedbackMetadata::Kind2String(kind);
+std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind) {
+  return os << FeedbackMetadata::Kind2String(kind);
 }
 
-
-FeedbackVectorSlotKind TypeFeedbackMetadata::GetKind(
-    FeedbackVectorSlot slot) const {
+FeedbackSlotKind FeedbackMetadata::GetKind(FeedbackSlot slot) const {
   int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
   int data = Smi::cast(get(index))->value();
   return VectorICComputer::decode(data, slot.ToInt());
 }
 
-String* TypeFeedbackMetadata::GetName(FeedbackVectorSlot slot) const {
-  DCHECK(SlotRequiresName(GetKind(slot)));
-  UnseededNumberDictionary* names =
-      UnseededNumberDictionary::cast(get(kNamesTableIndex));
-  int entry = names->FindEntry(GetIsolate(), slot.ToInt());
-  CHECK_NE(UnseededNumberDictionary::kNotFound, entry);
-  Object* name = names->ValueAt(entry);
-  DCHECK(name->IsString());
-  return String::cast(name);
-}
-
-void TypeFeedbackMetadata::SetKind(FeedbackVectorSlot slot,
-                                   FeedbackVectorSlotKind kind) {
+void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) {
   int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
   int data = Smi::cast(get(index))->value();
   int new_data = VectorICComputer::encode(data, slot.ToInt(), kind);
   set(index, Smi::FromInt(new_data));
 }
 
-
-template Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(
+template Handle<FeedbackMetadata> FeedbackMetadata::New(
     Isolate* isolate, const StaticFeedbackVectorSpec* spec);
-template Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(
+template Handle<FeedbackMetadata> FeedbackMetadata::New(
     Isolate* isolate, const FeedbackVectorSpec* spec);
 
-
 // static
 template <typename Spec>
-Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
-                                                       const Spec* spec) {
+Handle<FeedbackMetadata> FeedbackMetadata::New(Isolate* isolate,
+                                               const Spec* spec) {
   Factory* factory = isolate->factory();
 
   const int slot_count = spec->slots();
   const int slot_kinds_length = VectorICComputer::word_count(slot_count);
   const int length = slot_kinds_length + kReservedIndexCount;
   if (length == kReservedIndexCount) {
-    return Handle<TypeFeedbackMetadata>::cast(factory->empty_fixed_array());
+    return Handle<FeedbackMetadata>::cast(factory->empty_fixed_array());
   }
 #ifdef DEBUG
   for (int i = 0; i < slot_count;) {
-    FeedbackVectorSlotKind kind = spec->GetKind(i);
-    int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+    FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i));
+    int entry_size = FeedbackMetadata::GetSlotSize(kind);
     for (int j = 1; j < entry_size; j++) {
-      FeedbackVectorSlotKind kind = spec->GetKind(i + j);
-      DCHECK_EQ(FeedbackVectorSlotKind::INVALID, kind);
+      FeedbackSlotKind kind = spec->GetKind(FeedbackSlot(i + j));
+      DCHECK_EQ(FeedbackSlotKind::kInvalid, kind);
     }
     i += entry_size;
   }
@@ -94,38 +75,17 @@
     array->set(kReservedIndexCount + i, Smi::kZero);
   }
 
-  Handle<TypeFeedbackMetadata> metadata =
-      Handle<TypeFeedbackMetadata>::cast(array);
+  Handle<FeedbackMetadata> metadata = Handle<FeedbackMetadata>::cast(array);
 
-  // Add names to NamesTable.
-  const int name_count = spec->name_count();
-
-  Handle<UnseededNumberDictionary> names;
-  if (name_count) {
-    names = UnseededNumberDictionary::New(isolate, name_count, TENURED);
-  }
-
-  int name_index = 0;
   for (int i = 0; i < slot_count; i++) {
-    FeedbackVectorSlotKind kind = spec->GetKind(i);
-    metadata->SetKind(FeedbackVectorSlot(i), kind);
-    if (SlotRequiresName(kind)) {
-      Handle<String> name = spec->GetName(name_index);
-      DCHECK(!name.is_null());
-      Handle<UnseededNumberDictionary> new_names =
-          UnseededNumberDictionary::AtNumberPut(names, i, name);
-      DCHECK_EQ(*new_names, *names);
-      names = new_names;
-      name_index++;
-    }
+    FeedbackSlot slot(i);
+    FeedbackSlotKind kind = spec->GetKind(slot);
+    metadata->SetKind(slot, kind);
   }
-  DCHECK_EQ(name_count, name_index);
-  metadata->set(kNamesTableIndex,
-                name_count ? static_cast<Object*>(*names) : Smi::kZero);
 
-  // It's important that the TypeFeedbackMetadata have a COW map, since it's
+  // It's important that the FeedbackMetadata have a COW map, since it's
   // pointed to by both a SharedFunctionInfo and indirectly by closures through
-  // the TypeFeedbackVector. The serializer uses the COW map type to decide
+  // the FeedbackVector. The serializer uses the COW map type to decide
   // this object belongs in the startup snapshot and not the partial
   // snapshot(s).
   metadata->set_map(isolate->heap()->fixed_cow_array_map());
@@ -133,228 +93,252 @@
   return metadata;
 }
 
-
-bool TypeFeedbackMetadata::SpecDiffersFrom(
+bool FeedbackMetadata::SpecDiffersFrom(
     const FeedbackVectorSpec* other_spec) const {
   if (other_spec->slots() != slot_count()) {
     return true;
   }
 
   int slots = slot_count();
-  int name_index = 0;
   for (int i = 0; i < slots;) {
-    FeedbackVectorSlot slot(i);
-    FeedbackVectorSlotKind kind = GetKind(slot);
-    int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+    FeedbackSlot slot(i);
+    FeedbackSlotKind kind = GetKind(slot);
+    int entry_size = FeedbackMetadata::GetSlotSize(kind);
 
-    if (kind != other_spec->GetKind(i)) {
+    if (kind != other_spec->GetKind(slot)) {
       return true;
     }
-    if (SlotRequiresName(kind)) {
-      String* name = GetName(slot);
-      DCHECK(name != GetHeap()->empty_string());
-      String* other_name = *other_spec->GetName(name_index++);
-      if (name != other_name) {
-        return true;
-      }
-    }
     i += entry_size;
   }
   return false;
 }
 
-bool TypeFeedbackMetadata::DiffersFrom(
-    const TypeFeedbackMetadata* other_metadata) const {
-  if (other_metadata->slot_count() != slot_count()) {
-    return true;
-  }
-
-  int slots = slot_count();
-  for (int i = 0; i < slots;) {
-    FeedbackVectorSlot slot(i);
-    FeedbackVectorSlotKind kind = GetKind(slot);
-    int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
-    if (GetKind(slot) != other_metadata->GetKind(slot)) {
-      return true;
-    }
-    if (SlotRequiresName(kind)) {
-      if (GetName(slot) != other_metadata->GetName(slot)) {
-        return true;
-      }
-    }
-    i += entry_size;
-  }
-  return false;
-}
-
-const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
+const char* FeedbackMetadata::Kind2String(FeedbackSlotKind kind) {
   switch (kind) {
-    case FeedbackVectorSlotKind::INVALID:
+    case FeedbackSlotKind::kInvalid:
       return "INVALID";
-    case FeedbackVectorSlotKind::CALL_IC:
+    case FeedbackSlotKind::kCall:
       return "CALL_IC";
-    case FeedbackVectorSlotKind::LOAD_IC:
+    case FeedbackSlotKind::kLoadProperty:
       return "LOAD_IC";
-    case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
-      return "LOAD_GLOBAL_IC";
-    case FeedbackVectorSlotKind::KEYED_LOAD_IC:
+    case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+      return "LOAD_GLOBAL_INSIDE_TYPEOF_IC";
+    case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+      return "LOAD_GLOBAL_NOT_INSIDE_TYPEOF_IC";
+    case FeedbackSlotKind::kLoadKeyed:
       return "KEYED_LOAD_IC";
-    case FeedbackVectorSlotKind::STORE_IC:
-      return "STORE_IC";
-    case FeedbackVectorSlotKind::KEYED_STORE_IC:
-      return "KEYED_STORE_IC";
-    case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
+    case FeedbackSlotKind::kStoreNamedSloppy:
+      return "STORE_SLOPPY_IC";
+    case FeedbackSlotKind::kStoreNamedStrict:
+      return "STORE_STRICT_IC";
+    case FeedbackSlotKind::kStoreOwnNamed:
+      return "STORE_OWN_IC";
+    case FeedbackSlotKind::kStoreKeyedSloppy:
+      return "KEYED_STORE_SLOPPY_IC";
+    case FeedbackSlotKind::kStoreKeyedStrict:
+      return "KEYED_STORE_STRICT_IC";
+    case FeedbackSlotKind::kBinaryOp:
       return "INTERPRETER_BINARYOP_IC";
-    case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
+    case FeedbackSlotKind::kCompareOp:
       return "INTERPRETER_COMPARE_IC";
-    case FeedbackVectorSlotKind::GENERAL:
+    case FeedbackSlotKind::kToBoolean:
+      return "TO_BOOLEAN_IC";
+    case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+      return "STORE_DATA_PROPERTY_IN_LITERAL_IC";
+    case FeedbackSlotKind::kCreateClosure:
+      return "kCreateClosure";
+    case FeedbackSlotKind::kLiteral:
+      return "LITERAL";
+    case FeedbackSlotKind::kGeneral:
       return "STUB";
-    case FeedbackVectorSlotKind::KINDS_NUMBER:
+    case FeedbackSlotKind::kKindsNumber:
       break;
   }
   UNREACHABLE();
   return "?";
 }
 
-FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
-    FeedbackVectorSlot slot) const {
+FeedbackSlotKind FeedbackVector::GetKind(FeedbackSlot slot) const {
   DCHECK(!is_empty());
   return metadata()->GetKind(slot);
 }
 
-String* TypeFeedbackVector::GetName(FeedbackVectorSlot slot) const {
-  DCHECK(!is_empty());
-  return metadata()->GetName(slot);
-}
-
 // static
-Handle<TypeFeedbackVector> TypeFeedbackVector::New(
-    Isolate* isolate, Handle<TypeFeedbackMetadata> metadata) {
+Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
+                                           Handle<SharedFunctionInfo> shared) {
   Factory* factory = isolate->factory();
 
-  const int slot_count = metadata->slot_count();
+  const int slot_count = shared->feedback_metadata()->slot_count();
   const int length = slot_count + kReservedIndexCount;
-  if (length == kReservedIndexCount) {
-    return Handle<TypeFeedbackVector>::cast(
-        factory->empty_type_feedback_vector());
-  }
 
   Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
-  array->set(kMetadataIndex, *metadata);
+  array->set_map_no_write_barrier(isolate->heap()->feedback_vector_map());
+  array->set(kSharedFunctionInfoIndex, *shared);
   array->set(kInvocationCountIndex, Smi::kZero);
 
-  DisallowHeapAllocation no_gc;
-
   // Ensure we can skip the write barrier
   Handle<Object> uninitialized_sentinel = UninitializedSentinel(isolate);
   DCHECK_EQ(isolate->heap()->uninitialized_symbol(), *uninitialized_sentinel);
+  Handle<Oddball> undefined_value = factory->undefined_value();
   for (int i = 0; i < slot_count;) {
-    FeedbackVectorSlot slot(i);
-    FeedbackVectorSlotKind kind = metadata->GetKind(slot);
-    int index = TypeFeedbackVector::GetIndex(slot);
-    int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+    FeedbackSlot slot(i);
+    FeedbackSlotKind kind = shared->feedback_metadata()->GetKind(slot);
+    int index = FeedbackVector::GetIndex(slot);
+    int entry_size = FeedbackMetadata::GetSlotSize(kind);
 
-    Object* value;
-    if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
-      value = isolate->heap()->empty_weak_cell();
-    } else if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
-               kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC) {
-      value = Smi::kZero;
-    } else {
-      value = *uninitialized_sentinel;
+    Object* extra_value = *uninitialized_sentinel;
+    switch (kind) {
+      case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+      case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+        array->set(index, isolate->heap()->empty_weak_cell(),
+                   SKIP_WRITE_BARRIER);
+        break;
+      case FeedbackSlotKind::kCompareOp:
+      case FeedbackSlotKind::kBinaryOp:
+      case FeedbackSlotKind::kToBoolean:
+        array->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
+        break;
+      case FeedbackSlotKind::kCreateClosure: {
+        Handle<Cell> cell = factory->NewNoClosuresCell(undefined_value);
+        array->set(index, *cell);
+        break;
+      }
+      case FeedbackSlotKind::kLiteral:
+        array->set(index, *undefined_value, SKIP_WRITE_BARRIER);
+        break;
+      case FeedbackSlotKind::kCall:
+        array->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+        extra_value = Smi::kZero;
+        break;
+      case FeedbackSlotKind::kLoadProperty:
+      case FeedbackSlotKind::kLoadKeyed:
+      case FeedbackSlotKind::kStoreNamedSloppy:
+      case FeedbackSlotKind::kStoreNamedStrict:
+      case FeedbackSlotKind::kStoreOwnNamed:
+      case FeedbackSlotKind::kStoreKeyedSloppy:
+      case FeedbackSlotKind::kStoreKeyedStrict:
+      case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+      case FeedbackSlotKind::kGeneral:
+        array->set(index, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
+        break;
+
+      case FeedbackSlotKind::kInvalid:
+      case FeedbackSlotKind::kKindsNumber:
+        UNREACHABLE();
+        array->set(index, Smi::kZero, SKIP_WRITE_BARRIER);
+        break;
     }
-    array->set(index, value, SKIP_WRITE_BARRIER);
-
-    value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::kZero
-                                                    : *uninitialized_sentinel;
     for (int j = 1; j < entry_size; j++) {
-      array->set(index + j, value, SKIP_WRITE_BARRIER);
+      array->set(index + j, extra_value, SKIP_WRITE_BARRIER);
     }
     i += entry_size;
   }
-  return Handle<TypeFeedbackVector>::cast(array);
-}
 
-
-// static
-int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec* spec,
-                                         FeedbackVectorSlot slot) {
-  return kReservedIndexCount + slot.ToInt();
-}
-
-
-// static
-Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
-    Isolate* isolate, Handle<TypeFeedbackVector> vector) {
-  Handle<TypeFeedbackVector> result;
-  result = Handle<TypeFeedbackVector>::cast(
-      isolate->factory()->CopyFixedArray(Handle<FixedArray>::cast(vector)));
+  Handle<FeedbackVector> result = Handle<FeedbackVector>::cast(array);
+  if (isolate->IsCodeCoverageEnabled()) AddToCodeCoverageList(isolate, result);
   return result;
 }
 
-
-// This logic is copied from
-// StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget.
-static bool ClearLogic(Isolate* isolate) {
-  return FLAG_cleanup_code_caches_at_gc && isolate->serializer_enabled();
+// static
+Handle<FeedbackVector> FeedbackVector::Copy(Isolate* isolate,
+                                            Handle<FeedbackVector> vector) {
+  Handle<FeedbackVector> result;
+  result = Handle<FeedbackVector>::cast(
+      isolate->factory()->CopyFixedArray(Handle<FixedArray>::cast(vector)));
+  if (isolate->IsCodeCoverageEnabled()) AddToCodeCoverageList(isolate, result);
+  return result;
 }
 
+// static
+void FeedbackVector::AddToCodeCoverageList(Isolate* isolate,
+                                           Handle<FeedbackVector> vector) {
+  DCHECK(isolate->IsCodeCoverageEnabled());
+  if (!vector->shared_function_info()->IsSubjectToDebugging()) return;
+  Handle<ArrayList> list =
+      Handle<ArrayList>::cast(isolate->factory()->code_coverage_list());
+  list = ArrayList::Add(list, vector);
+  isolate->SetCodeCoverageList(*list);
+}
 
-void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
-                                        bool force_clear) {
+void FeedbackVector::ClearSlots(JSFunction* host_function) {
   Isolate* isolate = GetIsolate();
 
-  if (!force_clear && !ClearLogic(isolate)) return;
-
   Object* uninitialized_sentinel =
-      TypeFeedbackVector::RawUninitializedSentinel(isolate);
+      FeedbackVector::RawUninitializedSentinel(isolate);
+  Oddball* undefined_value = isolate->heap()->undefined_value();
 
-  TypeFeedbackMetadataIterator iter(metadata());
+  bool feedback_updated = false;
+  FeedbackMetadataIterator iter(metadata());
   while (iter.HasNext()) {
-    FeedbackVectorSlot slot = iter.Next();
-    FeedbackVectorSlotKind kind = iter.kind();
+    FeedbackSlot slot = iter.Next();
+    FeedbackSlotKind kind = iter.kind();
 
     Object* obj = Get(slot);
     if (obj != uninitialized_sentinel) {
       switch (kind) {
-        case FeedbackVectorSlotKind::CALL_IC: {
+        case FeedbackSlotKind::kCall: {
           CallICNexus nexus(this, slot);
-          nexus.Clear(shared->code());
+          if (!nexus.IsCleared()) {
+            nexus.Clear();
+            feedback_updated = true;
+          }
           break;
         }
-        case FeedbackVectorSlotKind::LOAD_IC: {
+        case FeedbackSlotKind::kLoadProperty: {
           LoadICNexus nexus(this, slot);
-          nexus.Clear(shared->code());
+          if (!nexus.IsCleared()) {
+            nexus.Clear();
+            feedback_updated = true;
+          }
           break;
         }
-        case FeedbackVectorSlotKind::LOAD_GLOBAL_IC: {
+        case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+        case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
           LoadGlobalICNexus nexus(this, slot);
-          nexus.Clear(shared->code());
+          if (!nexus.IsCleared()) {
+            nexus.Clear();
+            feedback_updated = true;
+          }
           break;
         }
-        case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
+        case FeedbackSlotKind::kLoadKeyed: {
           KeyedLoadICNexus nexus(this, slot);
-          nexus.Clear(shared->code());
+          if (!nexus.IsCleared()) {
+            nexus.Clear();
+            feedback_updated = true;
+          }
           break;
         }
-        case FeedbackVectorSlotKind::STORE_IC: {
+        case FeedbackSlotKind::kStoreNamedSloppy:
+        case FeedbackSlotKind::kStoreNamedStrict:
+        case FeedbackSlotKind::kStoreOwnNamed: {
           StoreICNexus nexus(this, slot);
-          nexus.Clear(shared->code());
+          if (!nexus.IsCleared()) {
+            nexus.Clear();
+            feedback_updated = true;
+          }
           break;
         }
-        case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+        case FeedbackSlotKind::kStoreKeyedSloppy:
+        case FeedbackSlotKind::kStoreKeyedStrict: {
           KeyedStoreICNexus nexus(this, slot);
-          nexus.Clear(shared->code());
+          if (!nexus.IsCleared()) {
+            nexus.Clear();
+            feedback_updated = true;
+          }
           break;
         }
-        case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
-        case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+        case FeedbackSlotKind::kBinaryOp:
+        case FeedbackSlotKind::kCompareOp: {
           DCHECK(Get(slot)->IsSmi());
           // don't clear these smi slots.
           // Set(slot, Smi::kZero);
           break;
         }
-        case FeedbackVectorSlotKind::GENERAL: {
+        case FeedbackSlotKind::kCreateClosure: {
+          break;
+        }
+        case FeedbackSlotKind::kGeneral: {
           if (obj->IsHeapObject()) {
             InstanceType instance_type =
                 HeapObject::cast(obj)->map()->instance_type();
@@ -363,70 +347,37 @@
             // regularly.
             if (instance_type != ALLOCATION_SITE_TYPE) {
               Set(slot, uninitialized_sentinel, SKIP_WRITE_BARRIER);
+              feedback_updated = true;
             }
           }
           break;
         }
-        case FeedbackVectorSlotKind::INVALID:
-        case FeedbackVectorSlotKind::KINDS_NUMBER:
+        case FeedbackSlotKind::kLiteral: {
+          Set(slot, undefined_value, SKIP_WRITE_BARRIER);
+          feedback_updated = true;
+          break;
+        }
+        case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+          StoreDataPropertyInLiteralICNexus nexus(this, slot);
+          if (!nexus.IsCleared()) {
+            nexus.Clear();
+            feedback_updated = true;
+          }
+          break;
+        }
+        case FeedbackSlotKind::kToBoolean:
+        case FeedbackSlotKind::kInvalid:
+        case FeedbackSlotKind::kKindsNumber:
           UNREACHABLE();
           break;
       }
     }
   }
-}
-
-
-// static
-void TypeFeedbackVector::ClearAllKeyedStoreICs(Isolate* isolate) {
-  SharedFunctionInfo::Iterator iterator(isolate);
-  SharedFunctionInfo* shared;
-  while ((shared = iterator.Next())) {
-    if (!shared->OptimizedCodeMapIsCleared()) {
-      FixedArray* optimized_code_map = shared->optimized_code_map();
-      int length = optimized_code_map->length();
-      for (int i = SharedFunctionInfo::kEntriesStart; i < length;
-           i += SharedFunctionInfo::kEntryLength) {
-        WeakCell* cell = WeakCell::cast(
-            optimized_code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
-        if (cell->value()->IsLiteralsArray()) {
-          TypeFeedbackVector* vector =
-              LiteralsArray::cast(cell->value())->feedback_vector();
-          vector->ClearKeyedStoreICs(shared);
-        }
-      }
-    }
+  if (feedback_updated) {
+    IC::OnFeedbackChanged(isolate, host_function);
   }
 }
 
-
-void TypeFeedbackVector::ClearKeyedStoreICs(SharedFunctionInfo* shared) {
-  Isolate* isolate = GetIsolate();
-
-  Code* host = shared->code();
-  Object* uninitialized_sentinel =
-      TypeFeedbackVector::RawUninitializedSentinel(isolate);
-
-  TypeFeedbackMetadataIterator iter(metadata());
-  while (iter.HasNext()) {
-    FeedbackVectorSlot slot = iter.Next();
-    FeedbackVectorSlotKind kind = iter.kind();
-    if (kind != FeedbackVectorSlotKind::KEYED_STORE_IC) continue;
-    Object* obj = Get(slot);
-    if (obj != uninitialized_sentinel) {
-      KeyedStoreICNexus nexus(this, slot);
-      nexus.Clear(host);
-    }
-  }
-}
-
-
-// static
-Handle<TypeFeedbackVector> TypeFeedbackVector::DummyVector(Isolate* isolate) {
-  return isolate->factory()->dummy_vector();
-}
-
-
 Handle<FixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
   Isolate* isolate = GetIsolate();
   Handle<Object> feedback = handle(GetFeedback(), isolate);
@@ -439,7 +390,6 @@
   return Handle<FixedArray>::cast(feedback);
 }
 
-
 Handle<FixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
   Isolate* isolate = GetIsolate();
   Handle<Object> feedback_extra = handle(GetFeedbackExtra(), isolate);
@@ -464,38 +414,35 @@
   }
 }
 
-
 void FeedbackNexus::ConfigureUninitialized() {
-  SetFeedback(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+  SetFeedback(*FeedbackVector::UninitializedSentinel(GetIsolate()),
               SKIP_WRITE_BARRIER);
-  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+  SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
                    SKIP_WRITE_BARRIER);
 }
 
-
 void FeedbackNexus::ConfigurePremonomorphic() {
-  SetFeedback(*TypeFeedbackVector::PremonomorphicSentinel(GetIsolate()),
+  SetFeedback(*FeedbackVector::PremonomorphicSentinel(GetIsolate()),
               SKIP_WRITE_BARRIER);
-  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+  SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
                    SKIP_WRITE_BARRIER);
 }
 
-
 void FeedbackNexus::ConfigureMegamorphic() {
   // Keyed ICs must use ConfigureMegamorphicKeyed.
-  DCHECK_NE(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector()->GetKind(slot()));
-  DCHECK_NE(FeedbackVectorSlotKind::KEYED_STORE_IC, vector()->GetKind(slot()));
+  DCHECK(!vector()->IsKeyedLoadIC(slot()));
+  DCHECK(!vector()->IsKeyedStoreIC(slot()));
 
   Isolate* isolate = GetIsolate();
-  SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(isolate),
+  SetFeedback(*FeedbackVector::MegamorphicSentinel(isolate),
               SKIP_WRITE_BARRIER);
-  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+  SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
                    SKIP_WRITE_BARRIER);
 }
 
 void KeyedLoadICNexus::ConfigureMegamorphicKeyed(IcCheckType property_type) {
   Isolate* isolate = GetIsolate();
-  SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(isolate),
+  SetFeedback(*FeedbackVector::MegamorphicSentinel(isolate),
               SKIP_WRITE_BARRIER);
   SetFeedbackExtra(Smi::FromInt(static_cast<int>(property_type)),
                    SKIP_WRITE_BARRIER);
@@ -503,7 +450,7 @@
 
 void KeyedStoreICNexus::ConfigureMegamorphicKeyed(IcCheckType property_type) {
   Isolate* isolate = GetIsolate();
-  SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(isolate),
+  SetFeedback(*FeedbackVector::MegamorphicSentinel(isolate),
               SKIP_WRITE_BARRIER);
   SetFeedbackExtra(Smi::FromInt(static_cast<int>(property_type)),
                    SKIP_WRITE_BARRIER);
@@ -513,11 +460,11 @@
   Isolate* isolate = GetIsolate();
   Object* feedback = GetFeedback();
 
-  if (feedback == *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+  if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
     return UNINITIALIZED;
-  } else if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
+  } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
     return MEGAMORPHIC;
-  } else if (feedback == *TypeFeedbackVector::PremonomorphicSentinel(isolate)) {
+  } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
     return PREMONOMORPHIC;
   } else if (feedback->IsFixedArray()) {
     // Determine state purely by our structure, don't check if the maps are
@@ -537,7 +484,7 @@
 
   Object* extra = GetFeedbackExtra();
   if (!WeakCell::cast(feedback)->cleared() ||
-      extra != *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+      extra != *FeedbackVector::UninitializedSentinel(isolate)) {
     return MONOMORPHIC;
   }
   return UNINITIALIZED;
@@ -547,11 +494,11 @@
   Isolate* isolate = GetIsolate();
   Object* feedback = GetFeedback();
 
-  if (feedback == *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+  if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
     return UNINITIALIZED;
-  } else if (feedback == *TypeFeedbackVector::PremonomorphicSentinel(isolate)) {
+  } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
     return PREMONOMORPHIC;
-  } else if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
+  } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
     return MEGAMORPHIC;
   } else if (feedback->IsFixedArray()) {
     // Determine state purely by our structure, don't check if the maps are
@@ -569,16 +516,15 @@
   return UNINITIALIZED;
 }
 
-
 InlineCacheState StoreICNexus::StateFromFeedback() const {
   Isolate* isolate = GetIsolate();
   Object* feedback = GetFeedback();
 
-  if (feedback == *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+  if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
     return UNINITIALIZED;
-  } else if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
+  } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
     return MEGAMORPHIC;
-  } else if (feedback == *TypeFeedbackVector::PremonomorphicSentinel(isolate)) {
+  } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
     return PREMONOMORPHIC;
   } else if (feedback->IsFixedArray()) {
     // Determine state purely by our structure, don't check if the maps are
@@ -592,16 +538,15 @@
   return UNINITIALIZED;
 }
 
-
 InlineCacheState KeyedStoreICNexus::StateFromFeedback() const {
   Isolate* isolate = GetIsolate();
   Object* feedback = GetFeedback();
 
-  if (feedback == *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+  if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
     return UNINITIALIZED;
-  } else if (feedback == *TypeFeedbackVector::PremonomorphicSentinel(isolate)) {
+  } else if (feedback == *FeedbackVector::PremonomorphicSentinel(isolate)) {
     return PREMONOMORPHIC;
-  } else if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
+  } else if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
     return MEGAMORPHIC;
   } else if (feedback->IsFixedArray()) {
     // Determine state purely by our structure, don't check if the maps are
@@ -619,25 +564,23 @@
   return UNINITIALIZED;
 }
 
-
 InlineCacheState CallICNexus::StateFromFeedback() const {
   Isolate* isolate = GetIsolate();
   Object* feedback = GetFeedback();
   DCHECK(GetFeedbackExtra() ==
-             *TypeFeedbackVector::UninitializedSentinel(isolate) ||
+             *FeedbackVector::UninitializedSentinel(isolate) ||
          GetFeedbackExtra()->IsSmi());
 
-  if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate)) {
+  if (feedback == *FeedbackVector::MegamorphicSentinel(isolate)) {
     return GENERIC;
   } else if (feedback->IsAllocationSite() || feedback->IsWeakCell()) {
     return MONOMORPHIC;
   }
 
-  CHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate));
+  CHECK(feedback == *FeedbackVector::UninitializedSentinel(isolate));
   return UNINITIALIZED;
 }
 
-
 int CallICNexus::ExtractCallCount() {
   Object* call_count = GetFeedbackExtra();
   CHECK(call_count->IsSmi());
@@ -651,11 +594,9 @@
   return static_cast<float>(call_count / invocation_count);
 }
 
-void CallICNexus::Clear(Code* host) { CallIC::Clear(GetIsolate(), host, this); }
-
 void CallICNexus::ConfigureUninitialized() {
   Isolate* isolate = GetIsolate();
-  SetFeedback(*TypeFeedbackVector::UninitializedSentinel(isolate),
+  SetFeedback(*FeedbackVector::UninitializedSentinel(isolate),
               SKIP_WRITE_BARRIER);
   SetFeedbackExtra(Smi::kZero, SKIP_WRITE_BARRIER);
 }
@@ -670,16 +611,14 @@
   SetFeedbackExtra(Smi::FromInt(1), SKIP_WRITE_BARRIER);
 }
 
-
 void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
   Handle<WeakCell> new_cell = GetIsolate()->factory()->NewWeakCell(function);
   SetFeedback(*new_cell);
   SetFeedbackExtra(Smi::FromInt(1), SKIP_WRITE_BARRIER);
 }
 
-
 void CallICNexus::ConfigureMegamorphic() {
-  SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
+  SetFeedback(*FeedbackVector::MegamorphicSentinel(GetIsolate()),
               SKIP_WRITE_BARRIER);
   Smi* count = Smi::cast(GetFeedbackExtra());
   int new_count = count->value() + 1;
@@ -687,7 +626,7 @@
 }
 
 void CallICNexus::ConfigureMegamorphic(int call_count) {
-  SetFeedback(*TypeFeedbackVector::MegamorphicSentinel(GetIsolate()),
+  SetFeedback(*FeedbackVector::MegamorphicSentinel(GetIsolate()),
               SKIP_WRITE_BARRIER);
   SetFeedbackExtra(Smi::FromInt(call_count), SKIP_WRITE_BARRIER);
 }
@@ -702,18 +641,18 @@
 void LoadGlobalICNexus::ConfigureUninitialized() {
   Isolate* isolate = GetIsolate();
   SetFeedback(isolate->heap()->empty_weak_cell(), SKIP_WRITE_BARRIER);
-  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+  SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
                    SKIP_WRITE_BARRIER);
 }
 
 void LoadGlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
   Isolate* isolate = GetIsolate();
   SetFeedback(*isolate->factory()->NewWeakCell(cell));
-  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+  SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
                    SKIP_WRITE_BARRIER);
 }
 
-void LoadGlobalICNexus::ConfigureHandlerMode(Handle<Code> handler) {
+void LoadGlobalICNexus::ConfigureHandlerMode(Handle<Object> handler) {
   SetFeedback(GetIsolate()->heap()->empty_weak_cell());
   SetFeedbackExtra(*handler);
 }
@@ -761,7 +700,7 @@
   int receiver_count = maps->length();
   Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 2);
   InstallHandlers(array, maps, handlers);
-  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+  SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
                    SKIP_WRITE_BARRIER);
 }
 
@@ -773,7 +712,7 @@
   Handle<FixedArray> array;
   if (name.is_null()) {
     array = EnsureArrayOfSize(receiver_count * 2);
-    SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+    SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
                      SKIP_WRITE_BARRIER);
   } else {
     array = EnsureExtraArrayOfSize(receiver_count * 2);
@@ -789,7 +728,7 @@
   int receiver_count = maps->length();
   Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 2);
   InstallHandlers(array, maps, handlers);
-  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(isolate),
+  SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(isolate),
                    SKIP_WRITE_BARRIER);
 }
 
@@ -801,7 +740,7 @@
   Handle<FixedArray> array;
   if (name.is_null()) {
     array = EnsureArrayOfSize(receiver_count * 2);
-    SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+    SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
                      SKIP_WRITE_BARRIER);
   } else {
     array = EnsureExtraArrayOfSize(receiver_count * 2);
@@ -811,14 +750,13 @@
   InstallHandlers(array, maps, handlers);
 }
 
-
 void KeyedStoreICNexus::ConfigurePolymorphic(MapHandleList* maps,
                                              MapHandleList* transitioned_maps,
-                                             CodeHandleList* handlers) {
+                                             List<Handle<Object>>* handlers) {
   int receiver_count = maps->length();
   DCHECK(receiver_count > 1);
   Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 3);
-  SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+  SetFeedbackExtra(*FeedbackVector::UninitializedSentinel(GetIsolate()),
                    SKIP_WRITE_BARRIER);
 
   Handle<Oddball> undefined_value = GetIsolate()->factory()->undefined_value();
@@ -960,18 +898,6 @@
   return count == length;
 }
 
-
-void LoadICNexus::Clear(Code* host) { LoadIC::Clear(GetIsolate(), host, this); }
-
-void LoadGlobalICNexus::Clear(Code* host) {
-  LoadGlobalIC::Clear(GetIsolate(), host, this);
-}
-
-void KeyedLoadICNexus::Clear(Code* host) {
-  KeyedLoadIC::Clear(GetIsolate(), host, this);
-}
-
-
 Name* KeyedLoadICNexus::FindFirstName() const {
   Object* feedback = GetFeedback();
   if (IsPropertyNameFeedback(feedback)) {
@@ -980,7 +906,6 @@
   return NULL;
 }
 
-
 Name* KeyedStoreICNexus::FindFirstName() const {
   Object* feedback = GetFeedback();
   if (IsPropertyNameFeedback(feedback)) {
@@ -989,17 +914,6 @@
   return NULL;
 }
 
-
-void StoreICNexus::Clear(Code* host) {
-  StoreIC::Clear(GetIsolate(), host, this);
-}
-
-
-void KeyedStoreICNexus::Clear(Code* host) {
-  KeyedStoreIC::Clear(GetIsolate(), host, this);
-}
-
-
 KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
   KeyedAccessStoreMode mode = STANDARD_STORE;
   MapHandleList maps;
@@ -1011,12 +925,19 @@
   FindHandlers(&handlers, maps.length());
   for (int i = 0; i < handlers.length(); i++) {
     // The first handler that isn't the slow handler will have the bits we need.
-    Handle<Code> handler = Handle<Code>::cast(handlers.at(i));
+    Handle<Object> maybe_code_handler = handlers.at(i);
+    Handle<Code> handler;
+    if (maybe_code_handler->IsTuple2()) {
+      Handle<Tuple2> data_handler = Handle<Tuple2>::cast(maybe_code_handler);
+      handler = handle(Code::cast(data_handler->value2()));
+    } else {
+      handler = Handle<Code>::cast(maybe_code_handler);
+    }
     CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
     uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
     CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
           major_key == CodeStub::StoreFastElement ||
-          major_key == CodeStub::StoreElement ||
+          major_key == CodeStub::StoreSlowElement ||
           major_key == CodeStub::ElementsTransitionAndStore ||
           major_key == CodeStub::NoCache);
     if (major_key != CodeStub::NoCache) {
@@ -1030,7 +951,7 @@
 
 IcCheckType KeyedLoadICNexus::GetKeyType() const {
   Object* feedback = GetFeedback();
-  if (feedback == *TypeFeedbackVector::MegamorphicSentinel(GetIsolate())) {
+  if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
     return static_cast<IcCheckType>(Smi::cast(GetFeedbackExtra())->value());
   }
   return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
@@ -1038,7 +959,7 @@
 
 IcCheckType KeyedStoreICNexus::GetKeyType() const {
   Object* feedback = GetFeedback();
-  if (feedback == *TypeFeedbackVector::MegamorphicSentinel(GetIsolate())) {
+  if (feedback == *FeedbackVector::MegamorphicSentinel(GetIsolate())) {
     return static_cast<IcCheckType>(Smi::cast(GetFeedbackExtra())->value());
   }
   return IsPropertyNameFeedback(feedback) ? PROPERTY : ELEMENT;
@@ -1076,5 +997,27 @@
   return CompareOperationHintFromFeedback(feedback);
 }
 
+InlineCacheState StoreDataPropertyInLiteralICNexus::StateFromFeedback() const {
+  Isolate* isolate = GetIsolate();
+  Object* feedback = GetFeedback();
+
+  if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
+    return UNINITIALIZED;
+  } else if (feedback->IsWeakCell()) {
+    // Don't check if the map is cleared.
+    return MONOMORPHIC;
+  }
+
+  return MEGAMORPHIC;
+}
+
+void StoreDataPropertyInLiteralICNexus::ConfigureMonomorphic(
+    Handle<Name> name, Handle<Map> receiver_map) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+
+  SetFeedback(*cell);
+  SetFeedbackExtra(*name);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/feedback-vector.h b/src/feedback-vector.h
new file mode 100644
index 0000000..9ac146d
--- /dev/null
+++ b/src/feedback-vector.h
@@ -0,0 +1,751 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FEEDBACK_VECTOR_H_
+#define V8_FEEDBACK_VECTOR_H_
+
+#include <vector>
+
+#include "src/base/logging.h"
+#include "src/elements-kind.h"
+#include "src/objects.h"
+#include "src/type-hints.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+enum class FeedbackSlotKind {
+  // This kind means that the slot points to the middle of other slot
+  // which occupies more than one feedback vector element.
+  // There must be no such slots in the system.
+  kInvalid,
+
+  kCall,
+  kLoadProperty,
+  kLoadGlobalNotInsideTypeof,
+  kLoadGlobalInsideTypeof,
+  kLoadKeyed,
+  kStoreNamedSloppy,
+  kStoreNamedStrict,
+  kStoreOwnNamed,
+  kStoreKeyedSloppy,
+  kStoreKeyedStrict,
+  kBinaryOp,
+  kCompareOp,
+  kToBoolean,
+  kStoreDataPropertyInLiteral,
+  kCreateClosure,
+  kLiteral,
+  // This is a general purpose slot that occupies one feedback vector element.
+  kGeneral,
+
+  kKindsNumber  // Last value indicating number of kinds.
+};
+
+inline bool IsCallICKind(FeedbackSlotKind kind) {
+  return kind == FeedbackSlotKind::kCall;
+}
+
+inline bool IsLoadICKind(FeedbackSlotKind kind) {
+  return kind == FeedbackSlotKind::kLoadProperty;
+}
+
+inline bool IsLoadGlobalICKind(FeedbackSlotKind kind) {
+  return kind == FeedbackSlotKind::kLoadGlobalNotInsideTypeof ||
+         kind == FeedbackSlotKind::kLoadGlobalInsideTypeof;
+}
+
+inline bool IsKeyedLoadICKind(FeedbackSlotKind kind) {
+  return kind == FeedbackSlotKind::kLoadKeyed;
+}
+
+inline bool IsStoreICKind(FeedbackSlotKind kind) {
+  return kind == FeedbackSlotKind::kStoreNamedSloppy ||
+         kind == FeedbackSlotKind::kStoreNamedStrict;
+}
+
+inline bool IsStoreOwnICKind(FeedbackSlotKind kind) {
+  return kind == FeedbackSlotKind::kStoreOwnNamed;
+}
+
+inline bool IsKeyedStoreICKind(FeedbackSlotKind kind) {
+  return kind == FeedbackSlotKind::kStoreKeyedSloppy ||
+         kind == FeedbackSlotKind::kStoreKeyedStrict;
+}
+
+inline TypeofMode GetTypeofModeFromSlotKind(FeedbackSlotKind kind) {
+  DCHECK(IsLoadGlobalICKind(kind));
+  return (kind == FeedbackSlotKind::kLoadGlobalInsideTypeof)
+             ? INSIDE_TYPEOF
+             : NOT_INSIDE_TYPEOF;
+}
+
+inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) {
+  DCHECK(IsStoreICKind(kind) || IsStoreOwnICKind(kind) ||
+         IsKeyedStoreICKind(kind));
+  return (kind == FeedbackSlotKind::kStoreNamedSloppy ||
+          kind == FeedbackSlotKind::kStoreKeyedSloppy)
+             ? SLOPPY
+             : STRICT;
+}
+
+std::ostream& operator<<(std::ostream& os, FeedbackSlotKind kind);
+
+template <typename Derived>
+class FeedbackVectorSpecBase {
+ public:
+  FeedbackSlot AddCallICSlot() { return AddSlot(FeedbackSlotKind::kCall); }
+
+  FeedbackSlot AddLoadICSlot() {
+    return AddSlot(FeedbackSlotKind::kLoadProperty);
+  }
+
+  FeedbackSlot AddLoadGlobalICSlot(TypeofMode typeof_mode) {
+    return AddSlot(typeof_mode == INSIDE_TYPEOF
+                       ? FeedbackSlotKind::kLoadGlobalInsideTypeof
+                       : FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
+  }
+
+  FeedbackSlot AddCreateClosureSlot() {
+    return AddSlot(FeedbackSlotKind::kCreateClosure);
+  }
+
+  FeedbackSlot AddKeyedLoadICSlot() {
+    return AddSlot(FeedbackSlotKind::kLoadKeyed);
+  }
+
+  FeedbackSlot AddStoreICSlot(LanguageMode language_mode) {
+    STATIC_ASSERT(LANGUAGE_END == 2);
+    return AddSlot(is_strict(language_mode)
+                       ? FeedbackSlotKind::kStoreNamedStrict
+                       : FeedbackSlotKind::kStoreNamedSloppy);
+  }
+
+  FeedbackSlot AddStoreOwnICSlot() {
+    return AddSlot(FeedbackSlotKind::kStoreOwnNamed);
+  }
+
+  FeedbackSlot AddKeyedStoreICSlot(LanguageMode language_mode) {
+    STATIC_ASSERT(LANGUAGE_END == 2);
+    return AddSlot(is_strict(language_mode)
+                       ? FeedbackSlotKind::kStoreKeyedStrict
+                       : FeedbackSlotKind::kStoreKeyedSloppy);
+  }
+
+  FeedbackSlot AddInterpreterBinaryOpICSlot() {
+    return AddSlot(FeedbackSlotKind::kBinaryOp);
+  }
+
+  FeedbackSlot AddInterpreterCompareICSlot() {
+    return AddSlot(FeedbackSlotKind::kCompareOp);
+  }
+
+  FeedbackSlot AddGeneralSlot() { return AddSlot(FeedbackSlotKind::kGeneral); }
+
+  FeedbackSlot AddLiteralSlot() { return AddSlot(FeedbackSlotKind::kLiteral); }
+
+  FeedbackSlot AddStoreDataPropertyInLiteralICSlot() {
+    return AddSlot(FeedbackSlotKind::kStoreDataPropertyInLiteral);
+  }
+
+#ifdef OBJECT_PRINT
+  // For gdb debugging.
+  void Print();
+#endif  // OBJECT_PRINT
+
+  DECLARE_PRINTER(FeedbackVectorSpec)
+
+ private:
+  inline FeedbackSlot AddSlot(FeedbackSlotKind kind);
+
+  Derived* This() { return static_cast<Derived*>(this); }
+};
+
+class StaticFeedbackVectorSpec
+    : public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
+ public:
+  StaticFeedbackVectorSpec() : slot_count_(0) {}
+
+  int slots() const { return slot_count_; }
+
+  FeedbackSlotKind GetKind(FeedbackSlot slot) const {
+    DCHECK(slot.ToInt() >= 0 && slot.ToInt() < slot_count_);
+    return kinds_[slot.ToInt()];
+  }
+
+ private:
+  friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
+
+  void append(FeedbackSlotKind kind) {
+    DCHECK(slot_count_ < kMaxLength);
+    kinds_[slot_count_++] = kind;
+  }
+
+  static const int kMaxLength = 12;
+
+  int slot_count_;
+  FeedbackSlotKind kinds_[kMaxLength];
+};
+
+class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
+ public:
+  explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone) {
+    slot_kinds_.reserve(16);
+  }
+
+  int slots() const { return static_cast<int>(slot_kinds_.size()); }
+
+  FeedbackSlotKind GetKind(FeedbackSlot slot) const {
+    return static_cast<FeedbackSlotKind>(slot_kinds_.at(slot.ToInt()));
+  }
+
+ private:
+  friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
+
+  void append(FeedbackSlotKind kind) {
+    slot_kinds_.push_back(static_cast<unsigned char>(kind));
+  }
+
+  ZoneVector<unsigned char> slot_kinds_;
+};
+
+// The shape of the FeedbackMetadata is an array with:
+// 0: slot_count
+// 1: names table
+// 2: parameters table
+// 3..N: slot kinds packed into a bit vector
+//
+class FeedbackMetadata : public FixedArray {
+ public:
+  // Casting.
+  static inline FeedbackMetadata* cast(Object* obj);
+
+  static const int kSlotsCountIndex = 0;
+  static const int kReservedIndexCount = 1;
+
+  // Returns number of feedback vector elements used by given slot kind.
+  static inline int GetSlotSize(FeedbackSlotKind kind);
+
+  bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
+
+  inline bool is_empty() const;
+
+  // Returns number of slots in the vector.
+  inline int slot_count() const;
+
+  // Returns slot kind for given slot.
+  FeedbackSlotKind GetKind(FeedbackSlot slot) const;
+
+  template <typename Spec>
+  static Handle<FeedbackMetadata> New(Isolate* isolate, const Spec* spec);
+
+#ifdef OBJECT_PRINT
+  // For gdb debugging.
+  void Print();
+#endif  // OBJECT_PRINT
+
+  DECLARE_PRINTER(FeedbackMetadata)
+
+  static const char* Kind2String(FeedbackSlotKind kind);
+
+ private:
+  static const int kFeedbackSlotKindBits = 5;
+  STATIC_ASSERT(static_cast<int>(FeedbackSlotKind::kKindsNumber) <
+                (1 << kFeedbackSlotKindBits));
+
+  void SetKind(FeedbackSlot slot, FeedbackSlotKind kind);
+
+  typedef BitSetComputer<FeedbackSlotKind, kFeedbackSlotKindBits, kSmiValueSize,
+                         uint32_t>
+      VectorICComputer;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackMetadata);
+};
+
+// The shape of the FeedbackVector is an array with:
+// 0: feedback metadata
+// 1: invocation count
+// 2: feedback slot #0
+// ...
+// 2 + slot_count - 1: feedback slot #(slot_count-1)
+//
+class FeedbackVector : public FixedArray {
+ public:
+  // Casting.
+  static inline FeedbackVector* cast(Object* obj);
+
+  static const int kSharedFunctionInfoIndex = 0;
+  static const int kInvocationCountIndex = 1;
+  static const int kReservedIndexCount = 2;
+
+  inline void ComputeCounts(int* with_type_info, int* generic,
+                            int* vector_ic_count, bool code_is_interpreted);
+
+  inline bool is_empty() const;
+
+  // Returns number of slots in the vector.
+  inline int slot_count() const;
+
+  inline FeedbackMetadata* metadata() const;
+  inline SharedFunctionInfo* shared_function_info() const;
+  inline int invocation_count() const;
+  inline void clear_invocation_count();
+
+  // Conversion from a slot to an integer index to the underlying array.
+  static int GetIndex(FeedbackSlot slot) {
+    return kReservedIndexCount + slot.ToInt();
+  }
+
+  // Conversion from an integer index to the underlying array to a slot.
+  static inline FeedbackSlot ToSlot(int index);
+  inline Object* Get(FeedbackSlot slot) const;
+  inline void Set(FeedbackSlot slot, Object* value,
+                  WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+  // Returns slot kind for given slot.
+  FeedbackSlotKind GetKind(FeedbackSlot slot) const;
+
+  static Handle<FeedbackVector> New(Isolate* isolate,
+                                    Handle<SharedFunctionInfo> shared);
+
+  static Handle<FeedbackVector> Copy(Isolate* isolate,
+                                     Handle<FeedbackVector> vector);
+
+#define DEFINE_SLOT_KIND_PREDICATE(Name) \
+  bool Name(FeedbackSlot slot) const { return Name##Kind(GetKind(slot)); }
+
+  DEFINE_SLOT_KIND_PREDICATE(IsCallIC)
+  DEFINE_SLOT_KIND_PREDICATE(IsLoadIC)
+  DEFINE_SLOT_KIND_PREDICATE(IsLoadGlobalIC)
+  DEFINE_SLOT_KIND_PREDICATE(IsKeyedLoadIC)
+  DEFINE_SLOT_KIND_PREDICATE(IsStoreIC)
+  DEFINE_SLOT_KIND_PREDICATE(IsStoreOwnIC)
+  DEFINE_SLOT_KIND_PREDICATE(IsKeyedStoreIC)
+#undef DEFINE_SLOT_KIND_PREDICATE
+
+  // Returns typeof mode encoded into kind of given slot.
+  inline TypeofMode GetTypeofMode(FeedbackSlot slot) const {
+    return GetTypeofModeFromSlotKind(GetKind(slot));
+  }
+
+  // Returns language mode encoded into kind of given slot.
+  inline LanguageMode GetLanguageMode(FeedbackSlot slot) const {
+    return GetLanguageModeFromSlotKind(GetKind(slot));
+  }
+
+#ifdef OBJECT_PRINT
+  // For gdb debugging.
+  void Print();
+#endif  // OBJECT_PRINT
+
+  DECLARE_PRINTER(FeedbackVector)
+
+  // Clears the vector slots.
+  void ClearSlots(JSFunction* host_function);
+
+  // The object that indicates an uninitialized cache.
+  static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
+
+  // The object that indicates a megamorphic state.
+  static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate);
+
+  // The object that indicates a premonomorphic state.
+  static inline Handle<Symbol> PremonomorphicSentinel(Isolate* isolate);
+
+  // A raw version of the uninitialized sentinel that's safe to read during
+  // garbage collection (e.g., for patching the cache).
+  static inline Symbol* RawUninitializedSentinel(Isolate* isolate);
+
+ private:
+  static void AddToCodeCoverageList(Isolate* isolate,
+                                    Handle<FeedbackVector> vector);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FeedbackVector);
+};
+
+// The following asserts protect an optimization in type feedback vector
+// code that looks into the contents of a slot assuming to find a String,
+// a Symbol, an AllocationSite, a WeakCell, or a FixedArray.
+STATIC_ASSERT(WeakCell::kSize >= 2 * kPointerSize);
+STATIC_ASSERT(WeakCell::kValueOffset == AllocationSite::kTransitionInfoOffset);
+STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
+STATIC_ASSERT(WeakCell::kValueOffset == Name::kHashFieldSlot);
+// Verify that an empty hash field looks like a tagged object, but can't
+// possibly be confused with a pointer.
+STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
+STATIC_ASSERT(Name::kEmptyHashField == 0x3);
+// Verify that a set hash field will not look like a tagged object.
+STATIC_ASSERT(Name::kHashNotComputedMask == kHeapObjectTag);
+
+class FeedbackMetadataIterator {
+ public:
+  explicit FeedbackMetadataIterator(Handle<FeedbackMetadata> metadata)
+      : metadata_handle_(metadata),
+        next_slot_(FeedbackSlot(0)),
+        slot_kind_(FeedbackSlotKind::kInvalid) {}
+
+  explicit FeedbackMetadataIterator(FeedbackMetadata* metadata)
+      : metadata_(metadata),
+        next_slot_(FeedbackSlot(0)),
+        slot_kind_(FeedbackSlotKind::kInvalid) {}
+
+  inline bool HasNext() const;
+
+  inline FeedbackSlot Next();
+
+  // Returns slot kind of the last slot returned by Next().
+  FeedbackSlotKind kind() const {
+    DCHECK_NE(FeedbackSlotKind::kInvalid, slot_kind_);
+    DCHECK_NE(FeedbackSlotKind::kKindsNumber, slot_kind_);
+    return slot_kind_;
+  }
+
+  // Returns entry size of the last slot returned by Next().
+  inline int entry_size() const;
+
+ private:
+  FeedbackMetadata* metadata() const {
+    return !metadata_handle_.is_null() ? *metadata_handle_ : metadata_;
+  }
+
+  // The reason for having a handle and a raw pointer to the meta data is
+  // to have a single iterator implementation for both "handlified" and raw
+  // pointer use cases.
+  Handle<FeedbackMetadata> metadata_handle_;
+  FeedbackMetadata* metadata_;
+  FeedbackSlot cur_slot_;
+  FeedbackSlot next_slot_;
+  FeedbackSlotKind slot_kind_;
+};
+
+// A FeedbackNexus is the combination of a FeedbackVector and a slot.
+// Derived classes customize the update and retrieval of feedback.
+class FeedbackNexus {
+ public:
+  FeedbackNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+      : vector_handle_(vector), vector_(NULL), slot_(slot) {}
+  FeedbackNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : vector_(vector), slot_(slot) {}
+  virtual ~FeedbackNexus() {}
+
+  Handle<FeedbackVector> vector_handle() const {
+    DCHECK(vector_ == NULL);
+    return vector_handle_;
+  }
+  FeedbackVector* vector() const {
+    return vector_handle_.is_null() ? vector_ : *vector_handle_;
+  }
+  FeedbackSlot slot() const { return slot_; }
+  FeedbackSlotKind kind() const { return vector()->GetKind(slot()); }
+
+  InlineCacheState ic_state() const { return StateFromFeedback(); }
+  bool IsUninitialized() const { return StateFromFeedback() == UNINITIALIZED; }
+  Map* FindFirstMap() const {
+    MapHandleList maps;
+    ExtractMaps(&maps);
+    if (maps.length() > 0) return *maps.at(0);
+    return NULL;
+  }
+
+  // TODO(mvstanton): remove FindAllMaps, it didn't survive a code review.
+  void FindAllMaps(MapHandleList* maps) const { ExtractMaps(maps); }
+
+  virtual InlineCacheState StateFromFeedback() const = 0;
+  virtual int ExtractMaps(MapHandleList* maps) const;
+  virtual MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
+  virtual bool FindHandlers(List<Handle<Object>>* code_list,
+                            int length = -1) const;
+  virtual Name* FindFirstName() const { return NULL; }
+
+  bool IsCleared() {
+    InlineCacheState state = StateFromFeedback();
+    return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
+  }
+
+  virtual void Clear() { ConfigureUninitialized(); }
+  virtual void ConfigureUninitialized();
+  virtual void ConfigurePremonomorphic();
+  virtual void ConfigureMegamorphic();
+
+  inline Object* GetFeedback() const;
+  inline Object* GetFeedbackExtra() const;
+
+  inline Isolate* GetIsolate() const;
+
+ protected:
+  inline void SetFeedback(Object* feedback,
+                          WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+  inline void SetFeedbackExtra(Object* feedback_extra,
+                               WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+  Handle<FixedArray> EnsureArrayOfSize(int length);
+  Handle<FixedArray> EnsureExtraArrayOfSize(int length);
+  void InstallHandlers(Handle<FixedArray> array, MapHandleList* maps,
+                       List<Handle<Object>>* handlers);
+
+ private:
+  // The reason for having a vector handle and a raw pointer is that we can and
+  // should use handles during IC miss, but not during GC when we clear ICs. If
+  // you have a handle to the vector that is better because more operations can
+  // be done, like allocation.
+  Handle<FeedbackVector> vector_handle_;
+  FeedbackVector* vector_;
+  FeedbackSlot slot_;
+};
+
+class CallICNexus final : public FeedbackNexus {
+ public:
+  CallICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsCallIC(slot));
+  }
+  CallICNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsCallIC(slot));
+  }
+
+  void ConfigureUninitialized() override;
+  void ConfigureMonomorphicArray();
+  void ConfigureMonomorphic(Handle<JSFunction> function);
+  void ConfigureMegamorphic() final;
+  void ConfigureMegamorphic(int call_count);
+
+  InlineCacheState StateFromFeedback() const final;
+
+  int ExtractMaps(MapHandleList* maps) const final {
+    // CallICs don't record map feedback.
+    return 0;
+  }
+  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+    return MaybeHandle<Code>();
+  }
+  bool FindHandlers(List<Handle<Object>>* code_list,
+                    int length = -1) const final {
+    return length == 0;
+  }
+
+  int ExtractCallCount();
+
+  // Compute the call frequency based on the call count and the invocation
+  // count (taken from the type feedback vector).
+  float ComputeCallFrequency();
+};
+
+class LoadICNexus : public FeedbackNexus {
+ public:
+  LoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsLoadIC(slot));
+  }
+  LoadICNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsLoadIC(slot));
+  }
+
+  void Clear() override { ConfigurePremonomorphic(); }
+
+  void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Object> handler);
+
+  void ConfigurePolymorphic(MapHandleList* maps,
+                            List<Handle<Object>>* handlers);
+
+  InlineCacheState StateFromFeedback() const override;
+};
+
+class LoadGlobalICNexus : public FeedbackNexus {
+ public:
+  LoadGlobalICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsLoadGlobalIC(slot));
+  }
+  LoadGlobalICNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsLoadGlobalIC(slot));
+  }
+
+  int ExtractMaps(MapHandleList* maps) const final {
+    // LoadGlobalICs don't record map feedback.
+    return 0;
+  }
+  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+    return MaybeHandle<Code>();
+  }
+  bool FindHandlers(List<Handle<Object>>* code_list,
+                    int length = -1) const final {
+    return length == 0;
+  }
+
+  void ConfigureMegamorphic() override { UNREACHABLE(); }
+
+  void ConfigureUninitialized() override;
+  void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
+  void ConfigureHandlerMode(Handle<Object> handler);
+
+  InlineCacheState StateFromFeedback() const override;
+};
+
+class KeyedLoadICNexus : public FeedbackNexus {
+ public:
+  KeyedLoadICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsKeyedLoadIC(slot));
+  }
+  KeyedLoadICNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsKeyedLoadIC(slot));
+  }
+
+  void Clear() override { ConfigurePremonomorphic(); }
+
+  // name can be a null handle for element loads.
+  void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
+                            Handle<Object> handler);
+  // name can be null.
+  void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
+                            List<Handle<Object>>* handlers);
+
+  void ConfigureMegamorphicKeyed(IcCheckType property_type);
+
+  IcCheckType GetKeyType() const;
+  InlineCacheState StateFromFeedback() const override;
+  Name* FindFirstName() const override;
+};
+
+class StoreICNexus : public FeedbackNexus {
+ public:
+  StoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
+  }
+  StoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsStoreIC(slot) || vector->IsStoreOwnIC(slot));
+  }
+
+  void Clear() override { ConfigurePremonomorphic(); }
+
+  void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Object> handler);
+
+  void ConfigurePolymorphic(MapHandleList* maps,
+                            List<Handle<Object>>* handlers);
+
+  InlineCacheState StateFromFeedback() const override;
+};
+
+// TODO(ishell): Currently we use StoreOwnIC only for storing properties that
+// already exist in the boilerplate therefore we can use StoreIC.
+typedef StoreICNexus StoreOwnICNexus;
+
+class KeyedStoreICNexus : public FeedbackNexus {
+ public:
+  KeyedStoreICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsKeyedStoreIC(slot));
+  }
+  KeyedStoreICNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->IsKeyedStoreIC(slot));
+  }
+
+  void Clear() override { ConfigurePremonomorphic(); }
+
+  // name can be a null handle for element loads.
+  void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
+                            Handle<Object> handler);
+  // name can be null.
+  void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
+                            List<Handle<Object>>* handlers);
+  void ConfigurePolymorphic(MapHandleList* maps,
+                            MapHandleList* transitioned_maps,
+                            List<Handle<Object>>* handlers);
+  void ConfigureMegamorphicKeyed(IcCheckType property_type);
+
+  KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
+  IcCheckType GetKeyType() const;
+
+  InlineCacheState StateFromFeedback() const override;
+  Name* FindFirstName() const override;
+};
+
+class BinaryOpICNexus final : public FeedbackNexus {
+ public:
+  BinaryOpICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackSlotKind::kBinaryOp, vector->GetKind(slot));
+  }
+  BinaryOpICNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackSlotKind::kBinaryOp, vector->GetKind(slot));
+  }
+
+  InlineCacheState StateFromFeedback() const final;
+  BinaryOperationHint GetBinaryOperationFeedback() const;
+
+  int ExtractMaps(MapHandleList* maps) const final {
+    // BinaryOpICs don't record map feedback.
+    return 0;
+  }
+  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+    return MaybeHandle<Code>();
+  }
+  bool FindHandlers(List<Handle<Object>>* code_list,
+                    int length = -1) const final {
+    return length == 0;
+  }
+};
+
+class CompareICNexus final : public FeedbackNexus {
+ public:
+  CompareICNexus(Handle<FeedbackVector> vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackSlotKind::kCompareOp, vector->GetKind(slot));
+  }
+  CompareICNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackSlotKind::kCompareOp, vector->GetKind(slot));
+  }
+
+  InlineCacheState StateFromFeedback() const final;
+  CompareOperationHint GetCompareOperationFeedback() const;
+
+  int ExtractMaps(MapHandleList* maps) const final {
+    // BinaryOpICs don't record map feedback.
+    return 0;
+  }
+  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
+    return MaybeHandle<Code>();
+  }
+  bool FindHandlers(List<Handle<Object>>* code_list,
+                    int length = -1) const final {
+    return length == 0;
+  }
+};
+
+class StoreDataPropertyInLiteralICNexus : public FeedbackNexus {
+ public:
+  StoreDataPropertyInLiteralICNexus(Handle<FeedbackVector> vector,
+                                    FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackSlotKind::kStoreDataPropertyInLiteral,
+              vector->GetKind(slot));
+  }
+  StoreDataPropertyInLiteralICNexus(FeedbackVector* vector, FeedbackSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK_EQ(FeedbackSlotKind::kStoreDataPropertyInLiteral,
+              vector->GetKind(slot));
+  }
+
+  void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map);
+
+  InlineCacheState StateFromFeedback() const override;
+};
+
+inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
+inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback);
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_FEEDBACK_VECTOR_H_
diff --git a/src/ffi/OWNERS b/src/ffi/OWNERS
new file mode 100644
index 0000000..dc9a978
--- /dev/null
+++ b/src/ffi/OWNERS
@@ -0,0 +1,2 @@
+mattloring@google.com
+ofrobots@google.com
diff --git a/src/ffi/ffi-compiler.cc b/src/ffi/ffi-compiler.cc
new file mode 100644
index 0000000..d7fdbb9
--- /dev/null
+++ b/src/ffi/ffi-compiler.cc
@@ -0,0 +1,128 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ffi/ffi-compiler.h"
+#include "src/api.h"
+#include "src/code-factory.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void InstallFFIMap(Isolate* isolate) {
+  Handle<Context> context(isolate->context());
+  DCHECK(!context->get(Context::NATIVE_FUNCTION_MAP_INDEX)->IsMap());
+  Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
+
+  InstanceType instance_type = prev_map->instance_type();
+  int internal_fields = JSObject::GetInternalFieldCount(*prev_map);
+  CHECK_EQ(0, internal_fields);
+  int pre_allocated =
+      prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
+  int instance_size;
+  int in_object_properties;
+  JSFunction::CalculateInstanceSizeHelper(
+      instance_type, internal_fields, 0, &instance_size, &in_object_properties);
+  int unused_property_fields = in_object_properties - pre_allocated;
+  Handle<Map> map = Map::CopyInitialMap(
+      prev_map, instance_size, in_object_properties, unused_property_fields);
+  context->set_native_function_map(*map);
+}
+
+namespace ffi {
+
+class FFIAssembler : public CodeStubAssembler {
+ public:
+  explicit FFIAssembler(CodeAssemblerState* state) : CodeStubAssembler(state) {}
+
+  Node* ToJS(Node* node, Node* context, FFIType type) {
+    switch (type) {
+      case FFIType::kInt32:
+        return ChangeInt32ToTagged(node);
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  Node* FromJS(Node* node, Node* context, FFIType type) {
+    switch (type) {
+      case FFIType::kInt32:
+        return TruncateTaggedToWord32(context, node);
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  MachineType FFIToMachineType(FFIType type) {
+    switch (type) {
+      case FFIType::kInt32:
+        return MachineType::Int32();
+    }
+    UNREACHABLE();
+    return MachineType::None();
+  }
+
+  Signature<MachineType>* FFIToMachineSignature(FFISignature* sig) {
+    Signature<MachineType>::Builder sig_builder(zone(), sig->return_count(),
+                                                sig->parameter_count());
+    for (size_t i = 0; i < sig->return_count(); i++) {
+      sig_builder.AddReturn(FFIToMachineType(sig->GetReturn(i)));
+    }
+    for (size_t j = 0; j < sig->parameter_count(); j++) {
+      sig_builder.AddParam(FFIToMachineType(sig->GetParam(j)));
+    }
+    return sig_builder.Build();
+  }
+
+  void GenerateJSToNativeWrapper(NativeFunction* func) {
+    int params = static_cast<int>(func->sig->parameter_count());
+    int returns = static_cast<int>(func->sig->return_count());
+    ApiFunction api_func(func->start);
+    ExternalReference ref(&api_func, ExternalReference::BUILTIN_CALL,
+                          isolate());
+
+    Node* context_param = GetJSContextParameter();
+
+    Node** inputs = zone()->NewArray<Node*>(params + 1);
+    int input_count = 0;
+    inputs[input_count++] = ExternalConstant(ref);
+    for (int i = 0; i < params; i++) {
+      inputs[input_count++] =
+          FromJS(Parameter(i), context_param, func->sig->GetParam(i));
+    }
+
+    Node* call =
+        CallCFunctionN(FFIToMachineSignature(func->sig), input_count, inputs);
+    Node* return_val = UndefinedConstant();
+    if (returns == 1) {
+      return_val = ToJS(call, context_param, func->sig->GetReturn());
+    }
+    Return(return_val);
+  }
+};
+
+Handle<JSFunction> CompileJSToNativeWrapper(Isolate* isolate,
+                                            Handle<String> name,
+                                            NativeFunction func) {
+  int params = static_cast<int>(func.sig->parameter_count());
+  Zone zone(isolate->allocator(), ZONE_NAME);
+  CodeAssemblerState state(isolate, &zone, params,
+                           Code::ComputeFlags(Code::BUILTIN), "js-to-native");
+  FFIAssembler assembler(&state);
+  assembler.GenerateJSToNativeWrapper(&func);
+  Handle<Code> code = assembler.GenerateCode(&state);
+
+  Handle<SharedFunctionInfo> shared =
+      isolate->factory()->NewSharedFunctionInfo(name, code, false);
+  shared->set_length(params);
+  shared->set_internal_formal_parameter_count(params);
+  Handle<JSFunction> function = isolate->factory()->NewFunction(
+      isolate->native_function_map(), name, code);
+  function->set_shared(*shared);
+  return function;
+}
+
+}  // namespace ffi
+}  // namespace internal
+}  // namespace v8
diff --git a/src/ffi/ffi-compiler.h b/src/ffi/ffi-compiler.h
new file mode 100644
index 0000000..2825f4f
--- /dev/null
+++ b/src/ffi/ffi-compiler.h
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef SRC_FFI_FFI_COMPILER_H_
+#define SRC_FFI_FFI_COMPILER_H_
+
+#include "src/code-stub-assembler.h"
+#include "src/machine-type.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+void InstallFFIMap(Isolate* isolate);
+
+namespace ffi {
+
+enum class FFIType : uint8_t { kInt32 };
+
+typedef Signature<FFIType> FFISignature;
+
+struct NativeFunction {
+  FFISignature* sig;
+  uint8_t* start;
+};
+
+Handle<JSFunction> CompileJSToNativeWrapper(Isolate* isolate,
+                                            Handle<String> name,
+                                            NativeFunction func);
+}  // namespace ffi
+}  // namespace internal
+}  // namespace v8
+
+#endif  // SRC_FFI_FFI_COMPILER_H_
diff --git a/src/field-type.cc b/src/field-type.cc
index 16bccf2..0097a35 100644
--- a/src/field-type.cc
+++ b/src/field-type.cc
@@ -6,6 +6,7 @@
 
 #include "src/ast/ast-types.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 #include "src/ostreams.h"
 
 namespace v8 {
diff --git a/src/field-type.h b/src/field-type.h
index 11e1069..2f8250a 100644
--- a/src/field-type.h
+++ b/src/field-type.h
@@ -6,13 +6,15 @@
 #define V8_FIELD_TYPE_H_
 
 #include "src/ast/ast-types.h"
-#include "src/handles.h"
 #include "src/objects.h"
 #include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
 
+template <typename T>
+class Handle;
+
 class FieldType : public Object {
  public:
   static FieldType* None();
diff --git a/src/find-and-replace-pattern.h b/src/find-and-replace-pattern.h
new file mode 100644
index 0000000..845ee0f
--- /dev/null
+++ b/src/find-and-replace-pattern.h
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_FIND_AND_REPLACE_PATTERN_H_
+#define V8_FIND_AND_REPLACE_PATTERN_H_
+
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class Map;
+class Object;
+
+class FindAndReplacePattern {
+ public:
+  FindAndReplacePattern() : count_(0) {}
+  void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) {
+    DCHECK(count_ < kMaxCount);
+    find_[count_] = map_to_find;
+    replace_[count_] = obj_to_replace;
+    ++count_;
+  }
+
+ private:
+  static const int kMaxCount = 4;
+  int count_;
+  Handle<Map> find_[kMaxCount];
+  Handle<Object> replace_[kMaxCount];
+  friend class Code;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_FIND_AND_REPLACE_PATTERN_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index a7efe11..e6e9b7e 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -159,6 +159,7 @@
 #define DEFINE_MAYBE_BOOL(nam, cmt) \
   FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, {false COMMA false}, cmt)
 #define DEFINE_INT(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
+#define DEFINE_UINT(nam, def, cmt) FLAG(UINT, unsigned int, nam, def, cmt)
 #define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
 #define DEFINE_STRING(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
 #define DEFINE_ARGS(nam, cmt) FLAG(ARGS, JSArguments, nam, {0 COMMA NULL}, cmt)
@@ -196,35 +197,40 @@
 #define HARMONY_INPROGRESS(V)                                           \
   V(harmony_array_prototype_values, "harmony Array.prototype.values")   \
   V(harmony_function_sent, "harmony function.sent")                     \
+  V(harmony_tailcalls, "harmony tail calls")                            \
   V(harmony_sharedarraybuffer, "harmony sharedarraybuffer")             \
-  V(harmony_simd, "harmony simd")                                       \
   V(harmony_do_expressions, "harmony do-expressions")                   \
   V(harmony_regexp_named_captures, "harmony regexp named captures")     \
   V(harmony_regexp_property, "harmony unicode regexp property classes") \
-  V(harmony_class_fields, "harmony public fields in class literals")
+  V(harmony_function_tostring, "harmony Function.prototype.toString")   \
+  V(harmony_class_fields, "harmony public fields in class literals")    \
+  V(harmony_async_iteration, "harmony async iteration")                 \
+  V(harmony_dynamic_import, "harmony dynamic import")                   \
+  V(harmony_promise_finally, "harmony Promise.prototype.finally")
 
 // Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED_BASE(V)                              \
-  V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
-  V(harmony_restrictive_generators,                         \
-    "harmony restrictions on generator declarations")       \
-  V(harmony_tailcalls, "harmony tail calls")                \
-  V(harmony_trailing_commas,                                \
-    "harmony trailing commas in function parameter lists")  \
-  V(harmony_string_padding, "harmony String-padding methods")
+#define HARMONY_STAGED(V)                                                \
+  V(harmony_regexp_lookbehind, "harmony regexp lookbehind")              \
+  V(harmony_restrictive_generators,                                      \
+    "harmony restrictions on generator declarations")                    \
+  V(harmony_object_rest_spread, "harmony object rest spread properties") \
+  V(harmony_template_escapes,                                            \
+    "harmony invalid escapes in tagged template literals")
+
+// Features that are shipping (turned on by default, but internal flag remains).
+#define HARMONY_SHIPPING_BASE(V) \
+  V(harmony_trailing_commas,     \
+    "harmony trailing commas in function parameter lists")
 
 #ifdef V8_I18N_SUPPORT
-#define HARMONY_STAGED(V)                                          \
-  HARMONY_STAGED_BASE(V)                                           \
+#define HARMONY_SHIPPING(V)                                        \
+  HARMONY_SHIPPING_BASE(V)                                         \
   V(datetime_format_to_parts, "Intl.DateTimeFormat.formatToParts") \
   V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
 #else
-#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
+#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
 #endif
 
-// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) V(harmony_async_await, "harmony async-await")
-
 // Once a shipping feature has proved stable in the wild, it will be dropped
 // from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
 // and associated tests are moved from the harmony directory to the appropriate
@@ -251,14 +257,27 @@
 DEFINE_BOOL(future, false,
             "Implies all staged features that we want to ship in the "
             "not-too-far future")
-DEFINE_IMPLICATION(future, ignition_staging)
+DEFINE_IMPLICATION(future, turbo)
+
+DEFINE_IMPLICATION(turbo, ignition_staging)
+DEFINE_IMPLICATION(turbo, enable_fast_array_builtins)
+DEFINE_IMPLICATION(turbo, thin_strings)
+
+// TODO(rmcilroy): Remove ignition-staging and set these implications directly
+// with the turbo flag.
+DEFINE_BOOL(ignition_staging, false, "use ignition with all staged features")
+DEFINE_IMPLICATION(ignition_staging, ignition)
+DEFINE_IMPLICATION(ignition_staging, compiler_dispatcher)
 
 // Flags for experimental implementation features.
 DEFINE_BOOL(allocation_site_pretenuring, true,
             "pretenure with allocation sites")
+DEFINE_BOOL(mark_shared_functions_for_tier_up, true,
+            "mark shared functions for tier up")
 DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
 DEFINE_INT(page_promotion_threshold, 70,
            "min percentage of live bytes on a page to enable fast evacuation")
+DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
 DEFINE_BOOL(trace_pretenuring, false,
             "trace pretenuring decisions of HAllocate instructions")
 DEFINE_BOOL(trace_pretenuring_statistics, false,
@@ -273,9 +292,7 @@
 DEFINE_BOOL(track_field_types, true, "track field types")
 DEFINE_IMPLICATION(track_field_types, track_fields)
 DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
-DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
-DEFINE_BOOL(mark_shared_functions_for_tier_up, true,
-            "mark shared functions for tier up")
+DEFINE_BOOL(type_profile, false, "collect type information")
 
 // Flags for optimization types.
 DEFINE_BOOL(optimize_for_size, false,
@@ -290,9 +307,6 @@
 
 // Flags for Ignition.
 DEFINE_BOOL(ignition, false, "use ignition interpreter")
-DEFINE_BOOL(ignition_staging, false, "use ignition with all staged features")
-DEFINE_IMPLICATION(ignition_staging, ignition)
-DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
 DEFINE_BOOL(ignition_deadcode, true,
             "use ignition dead code elimination optimizer")
 DEFINE_BOOL(ignition_osr, true, "enable support for OSR from ignition code")
@@ -302,6 +316,8 @@
             "filter expression positions before the bytecode pipeline")
 DEFINE_BOOL(print_bytecode, false,
             "print bytecode generated by ignition interpreter")
+DEFINE_STRING(print_bytecode_filter, "*",
+              "filter for selecting which functions to print bytecode")
 DEFINE_BOOL(trace_ignition, false,
             "trace the bytecodes executed by the ignition interpreter")
 DEFINE_BOOL(trace_ignition_codegen, false,
@@ -399,7 +415,7 @@
 DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
 DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
 DEFINE_BOOL(inline_into_try, true, "inline into try blocks")
-DEFINE_INT(escape_analysis_iterations, 2,
+DEFINE_INT(escape_analysis_iterations, 1,
            "maximum number of escape analysis fix-point iterations")
 
 DEFINE_BOOL(concurrent_recompilation, true,
@@ -419,14 +435,10 @@
 
 // Flags for TurboFan.
 DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
-DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
-DEFINE_IMPLICATION(turbo, turbo_loop_peeling)
-DEFINE_IMPLICATION(turbo, turbo_escape)
 DEFINE_BOOL(turbo_sp_frame_access, false,
             "use stack pointer-relative access to frame wherever possible")
 DEFINE_BOOL(turbo_preprocess_ranges, true,
             "run pre-register allocation heuristics")
-DEFINE_BOOL(turbo_loop_stackcheck, true, "enable stack checks in loops")
 DEFINE_STRING(turbo_filter, "~~", "optimization filter for TurboFan compiler")
 DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
 DEFINE_BOOL(trace_turbo_graph, false, "trace generated TurboFan graphs")
@@ -441,17 +453,26 @@
 DEFINE_BOOL(trace_turbo_ceq, false, "trace TurboFan's control equivalence")
 DEFINE_BOOL(trace_turbo_loop, false, "trace TurboFan's loop optimizations")
 DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
-DEFINE_BOOL(turbo_asm_deoptimization, false,
-            "enable deoptimization in TurboFan for asm.js code")
 DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
 DEFINE_STRING(turbo_verify_machine_graph, nullptr,
               "verify TurboFan machine graph before instruction selection")
+#ifdef ENABLE_VERIFY_CSA
+DEFINE_BOOL(verify_csa, DEBUG_BOOL,
+            "verify TurboFan machine graph of code stubs")
+#else
+// Define the flag as read-only-false so that code still compiles even in the
+// non-ENABLE_VERIFY_CSA configuration.
+DEFINE_BOOL_READONLY(verify_csa, false,
+                     "verify TurboFan machine graph of code stubs")
+#endif
+DEFINE_BOOL(trace_verify_csa, false, "trace code stubs verification")
+DEFINE_STRING(csa_trap_on_node, nullptr,
+              "trigger break point when a node with given id is created in "
+              "given stub. The format is: StubName,NodeId")
 DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
 DEFINE_BOOL(turbo_stats_nvp, false,
             "print TurboFan statistics in machine-readable format")
 DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
-DEFINE_BOOL(turbo_type_feedback, true,
-            "use typed feedback for representation inference in Turbofan")
 DEFINE_BOOL(function_context_specialization, false,
             "enable function context specialization in TurboFan")
 DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
@@ -465,19 +486,19 @@
             "verify register allocation in TurboFan")
 DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
 DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
-DEFINE_BOOL(turbo_stress_loop_peeling, false,
-            "stress loop peeling optimization")
-DEFINE_BOOL(turbo_loop_peeling, false, "Turbofan loop peeling")
+DEFINE_BOOL(turbo_loop_peeling, true, "Turbofan loop peeling")
 DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
 DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
 DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
-DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
+DEFINE_BOOL(turbo_escape, true, "enable escape analysis")
 DEFINE_BOOL(turbo_instruction_scheduling, false,
             "enable instruction scheduling in TurboFan")
 DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
             "randomly schedule instructions to stress dependency tracking")
 DEFINE_BOOL(turbo_store_elimination, true,
             "enable store-store elimination in TurboFan")
+// TODO(turbofan): Rename --crankshaft to --optimize eventually.
+DEFINE_IMPLICATION(turbo, crankshaft)
 
 // Flags to help platform porters
 DEFINE_BOOL(minimal, false,
@@ -488,9 +509,17 @@
 DEFINE_NEG_IMPLICATION(minimal, use_ic)
 
 // Flags for native WebAssembly.
-DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
+DEFINE_BOOL(expose_wasm, true, "expose WASM interface to JavaScript")
+DEFINE_BOOL(assume_asmjs_origin, false,
+            "force wasm decoder to assume input is internal asm-wasm format")
+DEFINE_BOOL(wasm_disable_structured_cloning, false,
+            "disable WASM structured cloning")
 DEFINE_INT(wasm_num_compilation_tasks, 10,
            "number of parallel compilation tasks for wasm")
+DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
+            "maximum memory size of a wasm instance")
+DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
+            "maximum table size of a wasm instance")
 DEFINE_BOOL(trace_wasm_encoder, false, "trace encoding of wasm code")
 DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
 DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
@@ -499,6 +528,10 @@
 DEFINE_INT(trace_wasm_ast_start, 0,
            "start function for WASM AST trace (inclusive)")
 DEFINE_INT(trace_wasm_ast_end, 0, "end function for WASM AST trace (exclusive)")
+DEFINE_INT(trace_wasm_text_start, 0,
+           "start function for WASM text generation (inclusive)")
+DEFINE_INT(trace_wasm_text_end, 0,
+           "end function for WASM text generation (exclusive)")
 DEFINE_INT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
 DEFINE_BOOL(wasm_break_on_decoder_error, false,
             "debug break when wasm decoder encounters an error")
@@ -506,6 +539,9 @@
             "perform loop assignment analysis for WASM")
 
 DEFINE_BOOL(validate_asm, false, "validate asm.js modules before compiling")
+DEFINE_BOOL(suppress_asm_messages, false,
+            "don't emit asm.js related messages (for golden file testing)")
+DEFINE_BOOL(trace_asm_time, false, "log asm.js timing info to the console")
 
 DEFINE_BOOL(dump_wasm_module, false, "dump WASM module bytes")
 DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
@@ -522,9 +558,24 @@
 DEFINE_BOOL(wasm_atomics_prototype, false,
             "enable prototype atomic opcodes for wasm")
 
+DEFINE_BOOL(wasm_opt, true, "enable wasm optimization")
+DEFINE_BOOL(wasm_no_bounds_checks, false,
+            "disable bounds checks (performance testing only)")
+DEFINE_BOOL(wasm_no_stack_checks, false,
+            "disable stack checks (performance testing only)")
+
 DEFINE_BOOL(wasm_trap_handler, false,
             "use signal handlers to catch out of bounds memory access in wasm"
-            " (currently Linux x86_64 only)")
+            " (experimental, currently Linux x86_64 only)")
+DEFINE_BOOL(wasm_guard_pages, false,
+            "add guard pages to the end of WebWassembly memory"
+            " (experimental, no effect on 32-bit)")
+DEFINE_IMPLICATION(wasm_trap_handler, wasm_guard_pages)
+DEFINE_BOOL(wasm_trap_if, true,
+            "enable the use of the trap_if operator for traps")
+DEFINE_BOOL(wasm_code_fuzzer_gen_test, false,
+            "Generate a test case when running the wasm-code fuzzer")
+DEFINE_BOOL(print_wasm_code, false, "Print WebAssembly code")
 
 // Profiler flags.
 DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
@@ -540,8 +591,115 @@
 DEFINE_BOOL(trace_opt_verbose, false, "extra verbose compilation tracing")
 DEFINE_IMPLICATION(trace_opt_verbose, trace_opt)
 
+// Garbage collections flags.
+DEFINE_INT(min_semi_space_size, 0,
+           "min size of a semi-space (in MBytes), the new space consists of two"
+           "semi-spaces")
+DEFINE_INT(max_semi_space_size, 0,
+           "max size of a semi-space (in MBytes), the new space consists of two"
+           "semi-spaces")
+DEFINE_INT(semi_space_growth_factor, 2, "factor by which to grow the new space")
+DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
+            "Grow the new space based on the percentage of survivors instead "
+            "of their absolute value.")
+DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
+DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
+DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
+DEFINE_BOOL(gc_global, false, "always perform global GCs")
+DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
+DEFINE_INT(retain_maps_for_n_gc, 2,
+           "keeps maps alive for <n> old space garbage collections")
+DEFINE_BOOL(trace_gc, false,
+            "print one trace line following each garbage collection")
+DEFINE_BOOL(trace_gc_nvp, false,
+            "print one detailed trace line in name=value format "
+            "after each garbage collection")
+DEFINE_BOOL(trace_gc_ignore_scavenger, false,
+            "do not print trace line after scavenger collection")
+DEFINE_BOOL(trace_idle_notification, false,
+            "print one trace line following each idle notification")
+DEFINE_BOOL(trace_idle_notification_verbose, false,
+            "prints the heap state used by the idle notification")
+DEFINE_BOOL(trace_gc_verbose, false,
+            "print more details following each garbage collection")
+DEFINE_INT(trace_allocation_stack_interval, -1,
+           "print stack trace after <n> free-list allocations")
+DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
+DEFINE_BOOL(trace_fragmentation_verbose, false,
+            "report fragmentation for old space (detailed)")
+DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
+DEFINE_BOOL(trace_mutator_utilization, false,
+            "print mutator utilization, allocation speed, gc speed")
+DEFINE_BOOL(flush_code, true, "flush code that we expect not to use again")
+DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress")
+DEFINE_BOOL(age_code, true,
+            "track un-executed functions to age code and flush only "
+            "old code (required for code flushing)")
+DEFINE_BOOL(incremental_marking, true, "use incremental marking")
+DEFINE_BOOL(incremental_marking_wrappers, true,
+            "use incremental marking for marking wrappers")
+DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
+           "keep finalizing incremental marking as long as we discover at "
+           "least this many unmarked objects")
+DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
+           "at most try this many times to finalize incremental marking")
+DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
+DEFINE_NEG_IMPLICATION(minor_mc, incremental_marking)
+DEFINE_BOOL(black_allocation, true, "use black allocation")
+DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
+DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
+DEFINE_BOOL(parallel_pointer_update, true,
+            "use parallel pointer update during compaction")
+DEFINE_BOOL(trace_incremental_marking, false,
+            "trace progress of the incremental marking")
+DEFINE_BOOL(track_gc_object_stats, false,
+            "track object counts and memory usage")
+DEFINE_BOOL(trace_gc_object_stats, false,
+            "trace object counts and memory usage")
+DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics")
+DEFINE_IMPLICATION(trace_gc_object_stats, track_gc_object_stats)
+DEFINE_VALUE_IMPLICATION(track_gc_object_stats, gc_stats, 1)
+DEFINE_VALUE_IMPLICATION(trace_gc_object_stats, gc_stats, 1)
+DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
+DEFINE_BOOL(track_detached_contexts, true,
+            "track native contexts that are expected to be garbage collected")
+DEFINE_BOOL(trace_detached_contexts, false,
+            "trace native contexts that are expected to be garbage collected")
+DEFINE_IMPLICATION(trace_detached_contexts, track_detached_contexts)
+#ifdef VERIFY_HEAP
+DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
+#endif
+DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
+DEFINE_BOOL(memory_reducer, true, "use memory reducer")
+DEFINE_INT(heap_growing_percent, 0,
+           "specifies heap growing factor as (1 + heap_growing_percent/100)")
+DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
+DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
+DEFINE_BOOL(never_compact, false,
+            "Never perform compaction on full GC - testing only")
+DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
+DEFINE_BOOL(cleanup_code_caches_at_gc, true,
+            "Flush code caches in maps during mark compact cycle.")
+DEFINE_BOOL(use_marking_progress_bar, true,
+            "Use a progress bar to scan large objects in increments when "
+            "incremental marking is active.")
+DEFINE_BOOL(zap_code_space, DEBUG_BOOL,
+            "Zap free memory in code space with 0xCC while sweeping.")
+DEFINE_BOOL(force_marking_deque_overflows, false,
+            "force overflows of marking deque by reducing it's size "
+            "to 64 words")
+DEFINE_BOOL(stress_compaction, false,
+            "stress the GC compactor to flush out bugs (implies "
+            "--force_marking_deque_overflows)")
+DEFINE_BOOL(manual_evacuation_candidates_selection, false,
+            "Test mode only flag. It allows an unit test to select evacuation "
+            "candidates pages (requires --stress_compaction).")
+DEFINE_BOOL(fast_promotion_new_space, false,
+            "fast promote new space on high survival rates")
+
 // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
-DEFINE_BOOL(debug_code, false, "generate extra code (assertions) for debugging")
+DEFINE_BOOL(debug_code, DEBUG_BOOL,
+            "generate extra code (assertions) for debugging")
 DEFINE_BOOL(code_comments, false, "emit comments in code disassembly")
 DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available")
 DEFINE_BOOL(enable_ssse3, true, "enable use of SSSE3 instructions if available")
@@ -579,10 +737,12 @@
 
 // api.cc
 DEFINE_BOOL(script_streaming, true, "enable parsing on background")
+DEFINE_BOOL(disable_old_api_accessors, false,
+            "Disable old-style API accessors whose setters trigger through the "
+            "prototype chain")
 
 // bootstrapper.cc
 DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
-DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object")
 DEFINE_BOOL(expose_free_buffer, false, "expose freeBuffer extension")
 DEFINE_BOOL(expose_gc, false, "expose gc extension")
 DEFINE_STRING(expose_gc_as, NULL,
@@ -596,6 +756,7 @@
             "show built-in functions in stack traces")
 
 // builtins.cc
+DEFINE_BOOL(enable_fast_array_builtins, false, "use optimized builtins")
 DEFINE_BOOL(allow_unsafe_function_constructor, false,
             "allow invoking the function constructor without security checks")
 
@@ -633,8 +794,6 @@
 #endif  // DEBUG
 
 // compiler.cc
-DEFINE_INT(min_preparse_length, 1024,
-           "minimum length for automatic enable preparsing")
 DEFINE_INT(max_opt_count, 10,
            "maximum number of optimization attempts before giving up.")
 
@@ -643,6 +802,18 @@
 
 DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
 
+// compiler-dispatcher.cc
+DEFINE_BOOL(compiler_dispatcher, false, "enable compiler dispatcher")
+DEFINE_BOOL(compiler_dispatcher_eager_inner, false,
+            "enable background compilation of eager inner functions")
+DEFINE_BOOL(trace_compiler_dispatcher, false,
+            "trace compiler dispatcher activity")
+
+// compiler-dispatcher-job.cc
+DEFINE_BOOL(
+    trace_compiler_dispatcher_jobs, false,
+    "trace progress of individual jobs managed by the compiler dispatcher")
+
 // cpu-profiler.cc
 DEFINE_INT(cpu_profiler_sampling_interval, 1000,
            "CPU profiler sampling interval in microseconds")
@@ -660,6 +831,9 @@
 // debugger
 DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
 DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
+DEFINE_BOOL(
+    trace_side_effect_free_debug_evaluate, false,
+    "print debug messages for side-effect-free debug-evaluate for testing")
 DEFINE_BOOL(hard_abort, true, "abort by crashing")
 
 // execution.cc
@@ -677,99 +851,6 @@
             "emit debug code that verifies the static tracking of the operand "
             "stack depth")
 
-// heap.cc
-DEFINE_INT(min_semi_space_size, 0,
-           "min size of a semi-space (in MBytes), the new space consists of two"
-           "semi-spaces")
-DEFINE_INT(max_semi_space_size, 0,
-           "max size of a semi-space (in MBytes), the new space consists of two"
-           "semi-spaces")
-DEFINE_INT(semi_space_growth_factor, 2, "factor by which to grow the new space")
-DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
-            "Grow the new space based on the percentage of survivors instead "
-            "of their absolute value.")
-DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
-DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
-DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
-DEFINE_BOOL(gc_global, false, "always perform global GCs")
-DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
-DEFINE_INT(retain_maps_for_n_gc, 2,
-           "keeps maps alive for <n> old space garbage collections")
-DEFINE_BOOL(trace_gc, false,
-            "print one trace line following each garbage collection")
-DEFINE_BOOL(trace_gc_nvp, false,
-            "print one detailed trace line in name=value format "
-            "after each garbage collection")
-DEFINE_BOOL(trace_gc_ignore_scavenger, false,
-            "do not print trace line after scavenger collection")
-DEFINE_BOOL(trace_idle_notification, false,
-            "print one trace line following each idle notification")
-DEFINE_BOOL(trace_idle_notification_verbose, false,
-            "prints the heap state used by the idle notification")
-DEFINE_BOOL(print_max_heap_committed, false,
-            "print statistics of the maximum memory committed for the heap "
-            "in name=value format on exit")
-DEFINE_BOOL(trace_gc_verbose, false,
-            "print more details following each garbage collection")
-DEFINE_INT(trace_allocation_stack_interval, -1,
-           "print stack trace after <n> free-list allocations")
-DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
-DEFINE_BOOL(trace_fragmentation_verbose, false,
-            "report fragmentation for old space (detailed)")
-DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
-DEFINE_BOOL(trace_mutator_utilization, false,
-            "print mutator utilization, allocation speed, gc speed")
-DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true,
-            "make maps embedded in optimized code weak")
-DEFINE_BOOL(weak_embedded_objects_in_optimized_code, true,
-            "make objects embedded in optimized code weak")
-DEFINE_BOOL(flush_code, true, "flush code that we expect not to use again")
-DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress")
-DEFINE_BOOL(age_code, true,
-            "track un-executed functions to age code and flush only "
-            "old code (required for code flushing)")
-DEFINE_BOOL(incremental_marking, true, "use incremental marking")
-DEFINE_BOOL(incremental_marking_wrappers, true,
-            "use incremental marking for marking wrappers")
-DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
-           "keep finalizing incremental marking as long as we discover at "
-           "least this many unmarked objects")
-DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
-           "at most try this many times to finalize incremental marking")
-DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
-DEFINE_NEG_IMPLICATION(minor_mc, incremental_marking)
-DEFINE_BOOL(black_allocation, false, "use black allocation")
-DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
-DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
-DEFINE_BOOL(parallel_pointer_update, true,
-            "use parallel pointer update during compaction")
-DEFINE_BOOL(trace_incremental_marking, false,
-            "trace progress of the incremental marking")
-DEFINE_BOOL(track_gc_object_stats, false,
-            "track object counts and memory usage")
-DEFINE_BOOL(trace_gc_object_stats, false,
-            "trace object counts and memory usage")
-DEFINE_INT(gc_stats, 0, "Used by tracing internally to enable gc statistics")
-DEFINE_IMPLICATION(trace_gc_object_stats, track_gc_object_stats)
-DEFINE_VALUE_IMPLICATION(track_gc_object_stats, gc_stats, 1)
-DEFINE_VALUE_IMPLICATION(trace_gc_object_stats, gc_stats, 1)
-DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
-DEFINE_BOOL(track_detached_contexts, true,
-            "track native contexts that are expected to be garbage collected")
-DEFINE_BOOL(trace_detached_contexts, false,
-            "trace native contexts that are expected to be garbage collected")
-DEFINE_IMPLICATION(trace_detached_contexts, track_detached_contexts)
-#ifdef VERIFY_HEAP
-DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
-#endif
-DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
-DEFINE_BOOL(memory_reducer, true, "use memory reducer")
-DEFINE_INT(heap_growing_percent, 0,
-           "specifies heap growing factor as (1 + heap_growing_percent/100)")
-
-// spaces.cc
-DEFINE_INT(v8_os_page_size, 0, "override OS page size (in KBytes)")
-
 // execution.cc, messages.cc
 DEFINE_BOOL(clear_exceptions_on_js_entry, false,
             "clear pending exceptions when entering JavaScript")
@@ -798,31 +879,18 @@
 // ic.cc
 DEFINE_BOOL(use_ic, true, "use inline caching")
 DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
-DEFINE_BOOL_READONLY(tf_load_ic_stub, true, "use TF LoadIC stub")
-DEFINE_BOOL(tf_store_ic_stub, true, "use TF StoreIC stub")
+DEFINE_IMPLICATION(trace_ic, log_code)
+DEFINE_INT(ic_stats, 0, "inline cache state transitions statistics")
+DEFINE_VALUE_IMPLICATION(trace_ic, ic_stats, 1)
+DEFINE_BOOL_READONLY(track_constant_fields, false,
+                     "enable constant field tracking")
 
 // macro-assembler-ia32.cc
 DEFINE_BOOL(native_code_counters, false,
             "generate extra code for manipulating stats counters")
 
-// mark-compact.cc
-DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
-DEFINE_BOOL(never_compact, false,
-            "Never perform compaction on full GC - testing only")
-DEFINE_BOOL(compact_code_space, true, "Compact code space on full collections")
-DEFINE_BOOL(cleanup_code_caches_at_gc, true,
-            "Flush inline caches prior to mark compact collection and "
-            "flush code caches in maps during mark compact cycle.")
-DEFINE_BOOL(use_marking_progress_bar, true,
-            "Use a progress bar to scan large objects in increments when "
-            "incremental marking is active.")
-DEFINE_BOOL(zap_code_space, DEBUG_BOOL,
-            "Zap free memory in code space with 0xCC while sweeping.")
-DEFINE_INT(random_seed, 0,
-           "Default seed for initializing random generator "
-           "(0, the default, means to use system random).")
-
 // objects.cc
+DEFINE_BOOL(thin_strings, false, "Enable ThinString support")
 DEFINE_BOOL(trace_weak_arrays, false, "Trace WeakFixedArray usage")
 DEFINE_BOOL(trace_prototype_users, false,
             "Trace updates to prototype user tracking")
@@ -836,7 +904,13 @@
 DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
 DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
 DEFINE_BOOL(trace_preparse, false, "trace preparsing decisions")
-DEFINE_BOOL(lazy_inner_functions, false, "enable lazy parsing inner functions")
+DEFINE_BOOL(lazy_inner_functions, true, "enable lazy parsing inner functions")
+DEFINE_BOOL(aggressive_lazy_inner_functions, false,
+            "even lazier inner function parsing")
+DEFINE_IMPLICATION(aggressive_lazy_inner_functions, lazy_inner_functions)
+DEFINE_BOOL(preparser_scope_analysis, false,
+            "perform scope analysis for preparsed inner functions")
+DEFINE_IMPLICATION(preparser_scope_analysis, lazy_inner_functions)
 
 // simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
 DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -880,6 +954,9 @@
 DEFINE_INT(hash_seed, 0,
            "Fixed seed to use to hash property keys (0 means random)"
            "(with snapshots this option cannot override the baked-in seed)")
+DEFINE_INT(random_seed, 0,
+           "Default seed for initializing random generator "
+           "(0, the default, means to use system random).")
 DEFINE_BOOL(trace_rail, false, "trace RAIL mode")
 DEFINE_BOOL(print_all_exceptions, false,
             "print exception object and stack trace on each thrown exception")
@@ -917,29 +994,14 @@
 DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false,
             "Print the time it takes to lazily compile hydrogen code stubs.")
 
-// mark-compact.cc
-DEFINE_BOOL(force_marking_deque_overflows, false,
-            "force overflows of marking deque by reducing it's size "
-            "to 64 words")
-
-DEFINE_BOOL(stress_compaction, false,
-            "stress the GC compactor to flush out bugs (implies "
-            "--force_marking_deque_overflows)")
-
-DEFINE_BOOL(manual_evacuation_candidates_selection, false,
-            "Test mode only flag. It allows an unit test to select evacuation "
-            "candidates pages (requires --stress_compaction).")
-
-DEFINE_BOOL(disable_old_api_accessors, false,
-            "Disable old-style API accessors whose setters trigger through the "
-            "prototype chain")
-
 //
 // Dev shell flags
 //
 
 DEFINE_BOOL(help, false, "Print usage message, including flags, on console")
 DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
+DEFINE_BOOL(dump_counters_nvp, false,
+            "Dump counters as name-value pairs on exit")
 
 DEFINE_STRING(map_counters, "", "Map counters to a file")
 DEFINE_ARGS(js_arguments,
@@ -1074,10 +1136,9 @@
 DEFINE_BOOL(perf_prof, false,
             "Enable perf linux profiler (experimental annotate support).")
 DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
-DEFINE_BOOL(perf_prof_debug_info, false,
-            "Enable debug info for perf linux profiler (experimental).")
 DEFINE_BOOL(perf_prof_unwinding_info, false,
             "Enable unwinding info for perf linux profiler (experimental).")
+DEFINE_IMPLICATION(perf_prof, perf_prof_unwinding_info)
 DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
               "Specify the name of the file for fake gc mmap used in ll_prof")
 DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
@@ -1100,6 +1161,10 @@
 DEFINE_BOOL(hydrogen_track_positions, false,
             "track source code positions when building IR")
 
+DEFINE_BOOL(print_opt_source, false,
+            "print source code of optimized and inlined functions")
+DEFINE_IMPLICATION(hydrogen_track_positions, print_opt_source)
+
 //
 // Disassembler only flags
 //
@@ -1124,6 +1189,9 @@
 DEFINE_BOOL(test_primary_stub_cache, false,
             "test primary stub cache by disabling the secondary one")
 
+DEFINE_BOOL(test_small_max_function_context_stub_size, false,
+            "enable testing the function context size overflow path "
+            "by making the maximum size smaller")
 
 // codegen-ia32.cc / codegen-arm.cc
 DEFINE_BOOL(print_code, false, "print generated code")
@@ -1169,6 +1237,7 @@
 DEFINE_BOOL(predictable, false, "enable predictable mode")
 DEFINE_IMPLICATION(predictable, single_threaded)
 DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
+DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
 
 //
 // Threading related flags.
diff --git a/src/flags.cc b/src/flags.cc
index f7ae004..6998d49 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -15,6 +15,7 @@
 #include "src/list-inl.h"
 #include "src/ostreams.h"
 #include "src/utils.h"
+#include "src/wasm/wasm-limits.h"
 
 namespace v8 {
 namespace internal {
@@ -33,8 +34,15 @@
 // to the actual flag, default value, comment, etc.  This is designed to be POD
 // initialized as to avoid requiring static constructors.
 struct Flag {
-  enum FlagType { TYPE_BOOL, TYPE_MAYBE_BOOL, TYPE_INT, TYPE_FLOAT,
-                  TYPE_STRING, TYPE_ARGS };
+  enum FlagType {
+    TYPE_BOOL,
+    TYPE_MAYBE_BOOL,
+    TYPE_INT,
+    TYPE_UINT,
+    TYPE_FLOAT,
+    TYPE_STRING,
+    TYPE_ARGS
+  };
 
   FlagType type_;           // What type of flag, bool, int, or string.
   const char* name_;        // Name of the flag, ex "my_flag".
@@ -64,6 +72,11 @@
     return reinterpret_cast<int*>(valptr_);
   }
 
+  unsigned int* uint_variable() const {
+    DCHECK(type_ == TYPE_UINT);
+    return reinterpret_cast<unsigned int*>(valptr_);
+  }
+
   double* float_variable() const {
     DCHECK(type_ == TYPE_FLOAT);
     return reinterpret_cast<double*>(valptr_);
@@ -97,6 +110,11 @@
     return *reinterpret_cast<const int*>(defptr_);
   }
 
+  unsigned int uint_default() const {
+    DCHECK(type_ == TYPE_UINT);
+    return *reinterpret_cast<const unsigned int*>(defptr_);
+  }
+
   double float_default() const {
     DCHECK(type_ == TYPE_FLOAT);
     return *reinterpret_cast<const double*>(defptr_);
@@ -121,6 +139,8 @@
         return maybe_bool_variable()->has_value == false;
       case TYPE_INT:
         return *int_variable() == int_default();
+      case TYPE_UINT:
+        return *uint_variable() == uint_default();
       case TYPE_FLOAT:
         return *float_variable() == float_default();
       case TYPE_STRING: {
@@ -149,6 +169,9 @@
       case TYPE_INT:
         *int_variable() = int_default();
         break;
+      case TYPE_UINT:
+        *uint_variable() = uint_default();
+        break;
       case TYPE_FLOAT:
         *float_variable() = float_default();
         break;
@@ -177,6 +200,8 @@
     case Flag::TYPE_BOOL: return "bool";
     case Flag::TYPE_MAYBE_BOOL: return "maybe_bool";
     case Flag::TYPE_INT: return "int";
+    case Flag::TYPE_UINT:
+      return "uint";
     case Flag::TYPE_FLOAT: return "float";
     case Flag::TYPE_STRING: return "string";
     case Flag::TYPE_ARGS: return "arguments";
@@ -199,6 +224,9 @@
     case Flag::TYPE_INT:
       os << *flag.int_variable();
       break;
+    case Flag::TYPE_UINT:
+      os << *flag.uint_variable();
+      break;
     case Flag::TYPE_FLOAT:
       os << *flag.float_variable();
       break;
@@ -399,6 +427,24 @@
         case Flag::TYPE_INT:
           *flag->int_variable() = static_cast<int>(strtol(value, &endp, 10));
           break;
+        case Flag::TYPE_UINT: {
+          // We do not use strtoul because it accepts negative numbers.
+          int64_t val = static_cast<int64_t>(strtoll(value, &endp, 10));
+          if (val < 0 || val > std::numeric_limits<unsigned int>::max()) {
+            PrintF(stderr,
+                   "Error: Value for flag %s of type %s is out of bounds "
+                   "[0-%" PRIu64
+                   "]\n"
+                   "Try --help for options\n",
+                   arg, Type2String(flag->type()),
+                   static_cast<uint64_t>(
+                       std::numeric_limits<unsigned int>::max()));
+            return_code = j;
+            break;
+          }
+          *flag->uint_variable() = static_cast<unsigned int>(val);
+          break;
+        }
         case Flag::TYPE_FLOAT:
           *flag->float_variable() = strtod(value, &endp);
           break;
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 61d0dcd..bf1db05 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -166,16 +166,16 @@
 
 
 inline bool StandardFrame::IsArgumentsAdaptorFrame(Address fp) {
-  Object* frame_type =
-      Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
-  return frame_type == Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR);
+  intptr_t frame_type =
+      Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+  return frame_type == StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
 }
 
 
 inline bool StandardFrame::IsConstructFrame(Address fp) {
-  Object* frame_type =
-      Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
-  return frame_type == Smi::FromInt(StackFrame::CONSTRUCT);
+  intptr_t frame_type =
+      Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+  return frame_type == StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
 }
 
 inline JavaScriptFrame::JavaScriptFrame(StackFrameIteratorBase* iterator)
@@ -252,7 +252,11 @@
 inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
     : JavaScriptFrame(iterator) {}
 
-inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
+inline WasmCompiledFrame::WasmCompiledFrame(StackFrameIteratorBase* iterator)
+    : StandardFrame(iterator) {}
+
+inline WasmInterpreterEntryFrame::WasmInterpreterEntryFrame(
+    StackFrameIteratorBase* iterator)
     : StandardFrame(iterator) {}
 
 inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
@@ -311,13 +315,7 @@
 bool StackTraceFrameIterator::is_wasm() const { return frame()->is_wasm(); }
 
 JavaScriptFrame* StackTraceFrameIterator::javascript_frame() const {
-  DCHECK(is_javascript());
-  return static_cast<JavaScriptFrame*>(frame());
-}
-
-WasmFrame* StackTraceFrameIterator::wasm_frame() const {
-  DCHECK(is_wasm());
-  return static_cast<WasmFrame*>(frame());
+  return JavaScriptFrame::cast(frame());
 }
 
 inline StackFrame* SafeStackFrameIterator::frame() const {
diff --git a/src/frames.cc b/src/frames.cc
index 3b73027..680a226 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -11,6 +11,7 @@
 #include "src/deoptimizer.h"
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/ic/ic-stats.h"
 #include "src/register-configuration.h"
 #include "src/safepoint-table.h"
 #include "src/string-stream.h"
@@ -130,16 +131,6 @@
 
 // -------------------------------------------------------------------------
 
-JavaScriptFrameIterator::JavaScriptFrameIterator(Isolate* isolate,
-                                                 StackFrame::Id id)
-    : iterator_(isolate) {
-  while (!done()) {
-    Advance();
-    if (frame()->id() == id) return;
-  }
-}
-
-
 void JavaScriptFrameIterator::Advance() {
   do {
     iterator_.Advance();
@@ -177,10 +168,7 @@
   if (frame->is_java_script()) {
     JavaScriptFrame* jsFrame = static_cast<JavaScriptFrame*>(frame);
     if (!jsFrame->function()->IsJSFunction()) return false;
-    Object* script = jsFrame->function()->shared()->script();
-    // Don't show functions from native scripts to user.
-    return (script->IsScript() &&
-            Script::TYPE_NATIVE != Script::cast(script)->type());
+    return jsFrame->function()->shared()->IsSubjectToDebugging();
   }
   // apart from javascript, only wasm is valid
   return frame->is_wasm();
@@ -194,6 +182,29 @@
 
 // -------------------------------------------------------------------------
 
+namespace {
+
+bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
+  Code* interpreter_entry_trampoline =
+      isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
+  Code* interpreter_bytecode_advance =
+      isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
+  Code* interpreter_bytecode_dispatch =
+      isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+
+  return (pc >= interpreter_entry_trampoline->instruction_start() &&
+          pc < interpreter_entry_trampoline->instruction_end()) ||
+         (pc >= interpreter_bytecode_advance->instruction_start() &&
+          pc < interpreter_bytecode_advance->instruction_end()) ||
+         (pc >= interpreter_bytecode_dispatch->instruction_start() &&
+          pc < interpreter_bytecode_dispatch->instruction_end());
+}
+
+DISABLE_ASAN Address ReadMemoryAt(Address address) {
+  return Memory::Address_at(address);
+}
+
+}  // namespace
 
 SafeStackFrameIterator::SafeStackFrameIterator(
     Isolate* isolate,
@@ -206,6 +217,7 @@
   StackFrame::State state;
   StackFrame::Type type;
   ThreadLocalTop* top = isolate->thread_local_top();
+  bool advance_frame = true;
   if (IsValidTop(top)) {
     type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
     top_frame_type_ = type;
@@ -215,6 +227,19 @@
     state.sp = sp;
     state.pc_address = StackFrame::ResolveReturnAddressLocation(
         reinterpret_cast<Address*>(StandardFrame::ComputePCAddress(fp)));
+
+    // If the top of stack is a return address to the interpreter trampoline,
+    // then we are likely in a bytecode handler with elided frame. In that
+    // case, set the PC properly and make sure we do not drop the frame.
+    if (IsValidStackAddress(sp)) {
+      MSAN_MEMORY_IS_INITIALIZED(sp, kPointerSize);
+      Address tos = ReadMemoryAt(reinterpret_cast<Address>(sp));
+      if (IsInterpreterFramePc(isolate, tos)) {
+        state.pc_address = reinterpret_cast<Address*>(sp);
+        advance_frame = false;
+      }
+    }
+
     // StackFrame::ComputeType will read both kContextOffset and kMarkerOffset,
     // we check only that kMarkerOffset is within the stack bounds and do
     // compile time check that kContextOffset slot is pushed on the stack before
@@ -225,6 +250,10 @@
     if (IsValidStackAddress(frame_marker)) {
       type = StackFrame::ComputeType(this, &state);
       top_frame_type_ = type;
+      // We only keep the top frame if we believe it to be interpreted frame.
+      if (type != StackFrame::INTERPRETED) {
+        advance_frame = true;
+      }
     } else {
       // Mark the frame as JAVA_SCRIPT if we cannot determine its type.
       // The frame anyways will be skipped.
@@ -236,7 +265,7 @@
     return;
   }
   frame_ = SingletonFor(type, &state);
-  if (frame_) Advance();
+  if (advance_frame && frame_) Advance();
 }
 
 
@@ -401,22 +430,6 @@
   return_address_location_resolver_ = resolver;
 }
 
-static bool IsInterpreterFramePc(Isolate* isolate, Address pc) {
-  Code* interpreter_entry_trampoline =
-      isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
-  Code* interpreter_bytecode_advance =
-      isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
-  Code* interpreter_bytecode_dispatch =
-      isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
-
-  return (pc >= interpreter_entry_trampoline->instruction_start() &&
-          pc < interpreter_entry_trampoline->instruction_end()) ||
-         (pc >= interpreter_bytecode_advance->instruction_start() &&
-          pc < interpreter_bytecode_advance->instruction_end()) ||
-         (pc >= interpreter_bytecode_dispatch->instruction_start() &&
-          pc < interpreter_bytecode_dispatch->instruction_end());
-}
-
 StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
                                          State* state) {
   DCHECK(state->fp != NULL);
@@ -424,7 +437,7 @@
   MSAN_MEMORY_IS_INITIALIZED(
       state->fp + CommonFrameConstants::kContextOrFrameTypeOffset,
       kPointerSize);
-  Object* marker = Memory::Object_at(
+  intptr_t marker = Memory::intptr_at(
       state->fp + CommonFrameConstants::kContextOrFrameTypeOffset);
   if (!iterator->can_access_heap_objects_) {
     // TODO(titzer): "can_access_heap_objects" is kind of bogus. It really
@@ -436,7 +449,7 @@
         state->fp + StandardFrameConstants::kFunctionOffset, kPointerSize);
     Object* maybe_function =
         Memory::Object_at(state->fp + StandardFrameConstants::kFunctionOffset);
-    if (!marker->IsSmi()) {
+    if (!StackFrame::IsTypeMarker(marker)) {
       if (maybe_function->IsSmi()) {
         return NONE;
       } else if (IsInterpreterFramePc(iterator->isolate(),
@@ -453,7 +466,7 @@
     if (code_obj != nullptr) {
       switch (code_obj->kind()) {
         case Code::BUILTIN:
-          if (marker->IsSmi()) break;
+          if (StackFrame::IsTypeMarker(marker)) break;
           if (code_obj->is_interpreter_trampoline_builtin()) {
             return INTERPRETED;
           }
@@ -470,11 +483,13 @@
         case Code::OPTIMIZED_FUNCTION:
           return OPTIMIZED;
         case Code::WASM_FUNCTION:
-          return WASM;
+          return WASM_COMPILED;
         case Code::WASM_TO_JS_FUNCTION:
           return WASM_TO_JS;
         case Code::JS_TO_WASM_FUNCTION:
           return JS_TO_WASM;
+        case Code::WASM_INTERPRETER_ENTRY:
+          return WASM_INTERPRETER_ENTRY;
         default:
           // All other types should have an explicit marker
           break;
@@ -484,9 +499,8 @@
     }
   }
 
-  DCHECK(marker->IsSmi());
-  StackFrame::Type candidate =
-      static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+  DCHECK(StackFrame::IsTypeMarker(marker));
+  StackFrame::Type candidate = StackFrame::MarkerToType(marker);
   switch (candidate) {
     case ENTRY:
     case ENTRY_CONSTRUCT:
@@ -498,7 +512,7 @@
     case CONSTRUCT:
     case ARGUMENTS_ADAPTOR:
     case WASM_TO_JS:
-    case WASM:
+    case WASM_COMPILED:
       return candidate;
     case JS_TO_WASM:
     case JAVA_SCRIPT:
@@ -576,6 +590,7 @@
   state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
   state->pc_address = ResolveReturnAddressLocation(
       reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
+  state->callee_pc_address = nullptr;
   if (FLAG_enable_embedded_constant_pool) {
     state->constant_pool_address = reinterpret_cast<Address*>(
         fp() + ExitFrameConstants::kConstantPoolOffset);
@@ -605,7 +620,7 @@
   if (fp == 0) return NONE;
   Address sp = ComputeStackPointer(fp);
   FillState(fp, sp, state);
-  DCHECK(*state->pc_address != NULL);
+  DCHECK_NOT_NULL(*state->pc_address);
 
   return ComputeFrameType(fp);
 }
@@ -620,8 +635,9 @@
     return EXIT;
   }
 
-  StackFrame::Type frame_type =
-      static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+  intptr_t marker_int = bit_cast<intptr_t>(marker);
+
+  StackFrame::Type frame_type = static_cast<StackFrame::Type>(marker_int >> 1);
   if (frame_type == EXIT || frame_type == BUILTIN_EXIT) {
     return frame_type;
   }
@@ -639,11 +655,12 @@
   state->fp = fp;
   state->pc_address = ResolveReturnAddressLocation(
       reinterpret_cast<Address*>(sp - 1 * kPCOnStackSize));
+  state->callee_pc_address = nullptr;
   // The constant pool recorded in the exit frame is not associated
   // with the pc in this state (the return address into a C entry
   // stub).  ComputeCallerState will retrieve the constant pool
   // together with the associated caller pc.
-  state->constant_pool_address = NULL;
+  state->constant_pool_address = nullptr;
 }
 
 JSFunction* BuiltinExitFrame::function() const {
@@ -747,6 +764,7 @@
   state->fp = caller_fp();
   state->pc_address = ResolveReturnAddressLocation(
       reinterpret_cast<Address*>(ComputePCAddress(fp())));
+  state->callee_pc_address = pc_address();
   state->constant_pool_address =
       reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
 }
@@ -759,6 +777,12 @@
 
 bool StandardFrame::IsConstructor() const { return false; }
 
+void StandardFrame::Summarize(List<FrameSummary>* functions,
+                              FrameSummary::Mode mode) const {
+  // This should only be called on frames which override this method.
+  UNREACHABLE();
+}
+
 void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
   // Make sure that we're not doing "safe" stack frame iteration. We cannot
   // possibly find pointers in optimized frames in that state.
@@ -773,11 +797,10 @@
 
   // Determine the fixed header and spill slot area size.
   int frame_header_size = StandardFrameConstants::kFixedFrameSizeFromFp;
-  Object* marker =
-      Memory::Object_at(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
-  if (marker->IsSmi()) {
-    StackFrame::Type candidate =
-        static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+  intptr_t marker =
+      Memory::intptr_at(fp() + CommonFrameConstants::kContextOrFrameTypeOffset);
+  if (StackFrame::IsTypeMarker(marker)) {
+    StackFrame::Type candidate = StackFrame::MarkerToType(marker);
     switch (candidate) {
       case ENTRY:
       case ENTRY_CONSTRUCT:
@@ -790,7 +813,8 @@
       case CONSTRUCT:
       case JS_TO_WASM:
       case WASM_TO_JS:
-      case WASM:
+      case WASM_COMPILED:
+      case WASM_INTERPRETER_ENTRY:
         frame_header_size = TypedFrameConstants::kFixedFrameSizeFromFp;
         break;
       case JAVA_SCRIPT:
@@ -881,7 +905,7 @@
 
 
 Code* StubFrame::unchecked_code() const {
-  return static_cast<Code*>(isolate()->FindCodeObject(pc()));
+  return isolate()->FindCodeObject(pc());
 }
 
 
@@ -916,7 +940,7 @@
 
 
 bool JavaScriptFrame::HasInlinedFrames() const {
-  List<JSFunction*> functions(1);
+  List<SharedFunctionInfo*> functions(1);
   GetFunctions(&functions);
   return functions.length() > 1;
 }
@@ -949,10 +973,19 @@
   return fp() + StandardFrameConstants::kCallerSPOffset;
 }
 
-
-void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) const {
+void JavaScriptFrame::GetFunctions(List<SharedFunctionInfo*>* functions) const {
   DCHECK(functions->length() == 0);
-  functions->Add(function());
+  functions->Add(function()->shared());
+}
+
+void JavaScriptFrame::GetFunctions(
+    List<Handle<SharedFunctionInfo>>* functions) const {
+  DCHECK(functions->length() == 0);
+  List<SharedFunctionInfo*> raw_functions;
+  GetFunctions(&raw_functions);
+  for (const auto& raw_function : raw_functions) {
+    functions->Add(Handle<SharedFunctionInfo>(raw_function));
+  }
 }
 
 void JavaScriptFrame::Summarize(List<FrameSummary>* functions,
@@ -961,8 +994,9 @@
   Code* code = LookupCode();
   int offset = static_cast<int>(pc() - code->instruction_start());
   AbstractCode* abstract_code = AbstractCode::cast(code);
-  FrameSummary summary(receiver(), function(), abstract_code, offset,
-                       IsConstructor(), mode);
+  FrameSummary::JavaScriptFrameSummary summary(isolate(), receiver(),
+                                               function(), abstract_code,
+                                               offset, IsConstructor(), mode);
   functions->Add(summary);
 }
 
@@ -972,10 +1006,6 @@
 
 Object* JavaScriptFrame::receiver() const { return GetParameter(-1); }
 
-Script* JavaScriptFrame::script() const {
-  return Script::cast(function()->shared()->script());
-}
-
 Object* JavaScriptFrame::context() const {
   const int offset = StandardFrameConstants::kContextOffset;
   Object* maybe_result = Memory::Object_at(fp() + offset);
@@ -983,12 +1013,15 @@
   return maybe_result;
 }
 
+Script* JavaScriptFrame::script() const {
+  return Script::cast(function()->shared()->script());
+}
+
 int JavaScriptFrame::LookupExceptionHandlerInTable(
     int* stack_depth, HandlerTable::CatchPrediction* prediction) {
-  Code* code = LookupCode();
-  DCHECK(!code->is_optimized_code());
-  int pc_offset = static_cast<int>(pc() - code->entry());
-  return code->LookupRangeInHandlerTable(pc_offset, stack_depth, prediction);
+  DCHECK_EQ(0, LookupCode()->handler_table()->length());
+  DCHECK(!LookupCode()->is_optimized_code());
+  return -1;
 }
 
 void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function,
@@ -1020,7 +1053,6 @@
   }
 }
 
-
 void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
                                bool print_line_number) {
   // constructor calls
@@ -1060,12 +1092,48 @@
   }
 }
 
+void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction* function,
+                                                         AbstractCode* code,
+                                                         int code_offset) {
+  auto ic_stats = ICStats::instance();
+  ICInfo& ic_info = ic_stats->Current();
+  SharedFunctionInfo* shared = function->shared();
 
-void JavaScriptFrame::SaveOperandStack(FixedArray* store) const {
-  int operands_count = store->length();
-  DCHECK_LE(operands_count, ComputeOperandsCount());
-  for (int i = 0; i < operands_count; i++) {
-    store->set(i, GetOperand(i));
+  ic_info.function_name = ic_stats->GetOrCacheFunctionName(function);
+  ic_info.script_offset = code_offset;
+
+  int source_pos = code->SourcePosition(code_offset);
+  Object* maybe_script = shared->script();
+  if (maybe_script->IsScript()) {
+    Script* script = Script::cast(maybe_script);
+    ic_info.line_num = script->GetLineNumber(source_pos) + 1;
+    ic_info.script_name = ic_stats->GetOrCacheScriptName(script);
+  }
+}
+
+void JavaScriptFrame::CollectTopFrameForICStats(Isolate* isolate) {
+  // constructor calls
+  DisallowHeapAllocation no_allocation;
+  JavaScriptFrameIterator it(isolate);
+  ICInfo& ic_info = ICStats::instance()->Current();
+  while (!it.done()) {
+    if (it.frame()->is_java_script()) {
+      JavaScriptFrame* frame = it.frame();
+      if (frame->IsConstructor()) ic_info.is_constructor = true;
+      JSFunction* function = frame->function();
+      int code_offset = 0;
+      if (frame->is_interpreted()) {
+        InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
+        code_offset = iframe->GetBytecodeOffset();
+      } else {
+        Code* code = frame->unchecked_code();
+        code_offset = static_cast<int>(frame->pc() - code->instruction_start());
+      }
+      CollectFunctionAndOffsetForICStats(function, function->abstract_code(),
+                                         code_offset);
+      return;
+    }
+    it.Advance();
   }
 }
 
@@ -1080,18 +1148,19 @@
 namespace {
 
 bool CannotDeoptFromAsmCode(Code* code, JSFunction* function) {
-  return code->is_turbofanned() && function->shared()->asm_function() &&
-         !FLAG_turbo_asm_deoptimization;
+  return code->is_turbofanned() && function->shared()->asm_function();
 }
 
 }  // namespace
 
-FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
-                           AbstractCode* abstract_code, int code_offset,
-                           bool is_constructor, Mode mode)
-    : receiver_(receiver, function->GetIsolate()),
-      function_(function),
-      abstract_code_(abstract_code),
+FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
+    Isolate* isolate, Object* receiver, JSFunction* function,
+    AbstractCode* abstract_code, int code_offset, bool is_constructor,
+    Mode mode)
+    : FrameSummaryBase(isolate, JAVA_SCRIPT),
+      receiver_(receiver, isolate),
+      function_(function, isolate),
+      abstract_code_(abstract_code, isolate),
       code_offset_(code_offset),
       is_constructor_(is_constructor) {
   DCHECK(abstract_code->IsBytecodeArray() ||
@@ -1100,36 +1169,177 @@
          mode == kApproximateSummary);
 }
 
-FrameSummary FrameSummary::GetFirst(JavaScriptFrame* frame) {
+bool FrameSummary::JavaScriptFrameSummary::is_subject_to_debugging() const {
+  return function()->shared()->IsSubjectToDebugging();
+}
+
+int FrameSummary::JavaScriptFrameSummary::SourcePosition() const {
+  return abstract_code()->SourcePosition(code_offset());
+}
+
+int FrameSummary::JavaScriptFrameSummary::SourceStatementPosition() const {
+  return abstract_code()->SourceStatementPosition(code_offset());
+}
+
+Handle<Object> FrameSummary::JavaScriptFrameSummary::script() const {
+  return handle(function_->shared()->script(), isolate());
+}
+
+Handle<String> FrameSummary::JavaScriptFrameSummary::FunctionName() const {
+  return JSFunction::GetDebugName(function_);
+}
+
+Handle<Context> FrameSummary::JavaScriptFrameSummary::native_context() const {
+  return handle(function_->context()->native_context(), isolate());
+}
+
+FrameSummary::WasmFrameSummary::WasmFrameSummary(
+    Isolate* isolate, FrameSummary::Kind kind,
+    Handle<WasmInstanceObject> instance, bool at_to_number_conversion)
+    : FrameSummaryBase(isolate, kind),
+      wasm_instance_(instance),
+      at_to_number_conversion_(at_to_number_conversion) {}
+
+Handle<Object> FrameSummary::WasmFrameSummary::receiver() const {
+  return wasm_instance_->GetIsolate()->global_proxy();
+}
+
+#define WASM_SUMMARY_DISPATCH(type, name)                                      \
+  type FrameSummary::WasmFrameSummary::name() const {                          \
+    DCHECK(kind() == Kind::WASM_COMPILED || kind() == Kind::WASM_INTERPRETED); \
+    return kind() == Kind::WASM_COMPILED                                       \
+               ? static_cast<const WasmCompiledFrameSummary*>(this)->name()    \
+               : static_cast<const WasmInterpretedFrameSummary*>(this)         \
+                     ->name();                                                 \
+  }
+
+WASM_SUMMARY_DISPATCH(uint32_t, function_index)
+WASM_SUMMARY_DISPATCH(int, byte_offset)
+
+#undef WASM_SUMMARY_DISPATCH
+
+int FrameSummary::WasmFrameSummary::SourcePosition() const {
+  int offset = byte_offset();
+  Handle<WasmCompiledModule> compiled_module(wasm_instance()->compiled_module(),
+                                             isolate());
+  if (compiled_module->is_asm_js()) {
+    offset = WasmCompiledModule::GetAsmJsSourcePosition(
+        compiled_module, function_index(), offset, at_to_number_conversion());
+  } else {
+    offset += compiled_module->GetFunctionOffset(function_index());
+  }
+  return offset;
+}
+
+Handle<Script> FrameSummary::WasmFrameSummary::script() const {
+  return handle(wasm_instance()->compiled_module()->script());
+}
+
+Handle<String> FrameSummary::WasmFrameSummary::FunctionName() const {
+  Handle<WasmCompiledModule> compiled_module(
+      wasm_instance()->compiled_module());
+  return WasmCompiledModule::GetFunctionName(compiled_module->GetIsolate(),
+                                             compiled_module, function_index());
+}
+
+Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
+  return wasm_instance()->compiled_module()->native_context();
+}
+
+FrameSummary::WasmCompiledFrameSummary::WasmCompiledFrameSummary(
+    Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> code,
+    int code_offset, bool at_to_number_conversion)
+    : WasmFrameSummary(isolate, WASM_COMPILED, instance,
+                       at_to_number_conversion),
+      code_(code),
+      code_offset_(code_offset) {}
+
+uint32_t FrameSummary::WasmCompiledFrameSummary::function_index() const {
+  FixedArray* deopt_data = code()->deoptimization_data();
+  DCHECK_EQ(2, deopt_data->length());
+  DCHECK(deopt_data->get(1)->IsSmi());
+  int val = Smi::cast(deopt_data->get(1))->value();
+  DCHECK_LE(0, val);
+  return static_cast<uint32_t>(val);
+}
+
+int FrameSummary::WasmCompiledFrameSummary::byte_offset() const {
+  return AbstractCode::cast(*code())->SourcePosition(code_offset());
+}
+
+FrameSummary::WasmInterpretedFrameSummary::WasmInterpretedFrameSummary(
+    Isolate* isolate, Handle<WasmInstanceObject> instance,
+    uint32_t function_index, int byte_offset)
+    : WasmFrameSummary(isolate, WASM_INTERPRETED, instance, false),
+      function_index_(function_index),
+      byte_offset_(byte_offset) {}
+
+FrameSummary::~FrameSummary() {
+#define FRAME_SUMMARY_DESTR(kind, type, field, desc) \
+  case kind:                                         \
+    field.~type();                                   \
+    break;
+  switch (base_.kind()) {
+    FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_DESTR)
+    default:
+      UNREACHABLE();
+  }
+#undef FRAME_SUMMARY_DESTR
+}
+
+FrameSummary FrameSummary::GetTop(const StandardFrame* frame) {
   List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
   frame->Summarize(&frames);
+  DCHECK_LT(0, frames.length());
+  return frames.last();
+}
+
+FrameSummary FrameSummary::GetBottom(const StandardFrame* frame) {
+  return Get(frame, 0);
+}
+
+FrameSummary FrameSummary::GetSingle(const StandardFrame* frame) {
+  List<FrameSummary> frames(1);
+  frame->Summarize(&frames);
+  DCHECK_EQ(1, frames.length());
   return frames.first();
 }
 
-void FrameSummary::Print() {
-  PrintF("receiver: ");
-  receiver_->ShortPrint();
-  PrintF("\nfunction: ");
-  function_->shared()->DebugName()->ShortPrint();
-  PrintF("\ncode: ");
-  abstract_code_->ShortPrint();
-  if (abstract_code_->IsCode()) {
-    Code* code = abstract_code_->GetCode();
-    if (code->kind() == Code::FUNCTION) PrintF(" UNOPT ");
-    if (code->kind() == Code::OPTIMIZED_FUNCTION) {
-      if (function()->shared()->asm_function()) {
-        DCHECK(CannotDeoptFromAsmCode(code, *function()));
-        PrintF(" ASM ");
-      } else {
-        PrintF(" OPT (approximate)");
-      }
-    }
-  } else {
-    PrintF(" BYTECODE ");
-  }
-  PrintF("\npc: %d\n", code_offset_);
+FrameSummary FrameSummary::Get(const StandardFrame* frame, int index) {
+  DCHECK_LE(0, index);
+  List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+  frame->Summarize(&frames);
+  DCHECK_GT(frames.length(), index);
+  return frames[index];
 }
 
+#define FRAME_SUMMARY_DISPATCH(ret, name)        \
+  ret FrameSummary::name() const {               \
+    switch (base_.kind()) {                      \
+      case JAVA_SCRIPT:                          \
+        return java_script_summary_.name();      \
+      case WASM_COMPILED:                        \
+        return wasm_compiled_summary_.name();    \
+      case WASM_INTERPRETED:                     \
+        return wasm_interpreted_summary_.name(); \
+      default:                                   \
+        UNREACHABLE();                           \
+        return ret{};                            \
+    }                                            \
+  }
+
+FRAME_SUMMARY_DISPATCH(Handle<Object>, receiver)
+FRAME_SUMMARY_DISPATCH(int, code_offset)
+FRAME_SUMMARY_DISPATCH(bool, is_constructor)
+FRAME_SUMMARY_DISPATCH(bool, is_subject_to_debugging)
+FRAME_SUMMARY_DISPATCH(Handle<Object>, script)
+FRAME_SUMMARY_DISPATCH(int, SourcePosition)
+FRAME_SUMMARY_DISPATCH(int, SourceStatementPosition)
+FRAME_SUMMARY_DISPATCH(Handle<String>, FunctionName)
+FRAME_SUMMARY_DISPATCH(Handle<Context>, native_context)
+
+#undef FRAME_SUMMARY_DISPATCH
+
 void OptimizedFrame::Summarize(List<FrameSummary>* frames,
                                FrameSummary::Mode mode) const {
   DCHECK(frames->length() == 0);
@@ -1226,8 +1436,9 @@
         code_offset = bailout_id.ToInt();  // Points to current bytecode.
         abstract_code = AbstractCode::cast(shared_info->bytecode_array());
       }
-      FrameSummary summary(receiver, function, abstract_code, code_offset,
-                           is_constructor);
+      FrameSummary::JavaScriptFrameSummary summary(isolate(), receiver,
+                                                   function, abstract_code,
+                                                   code_offset, is_constructor);
       frames->Add(summary);
       is_constructor = false;
     } else if (frame_opcode == Translation::CONSTRUCT_STUB_FRAME) {
@@ -1297,7 +1508,7 @@
   }
 }
 
-void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
+void OptimizedFrame::GetFunctions(List<SharedFunctionInfo*>* functions) const {
   DCHECK(functions->length() == 0);
   DCHECK(is_optimized());
 
@@ -1327,25 +1538,20 @@
   // in the deoptimization translation are ordered bottom-to-top.
   while (jsframe_count != 0) {
     opcode = static_cast<Translation::Opcode>(it.Next());
-    // Skip over operands to advance to the next opcode.
-    it.Skip(Translation::NumberOfOperandsFor(opcode));
     if (opcode == Translation::JS_FRAME ||
         opcode == Translation::INTERPRETED_FRAME) {
+      it.Next();  // Skip bailout id.
       jsframe_count--;
 
-      // The translation commands are ordered and the function is always at the
-      // first position.
-      opcode = static_cast<Translation::Opcode>(it.Next());
+      // The second operand of the frame points to the function.
+      Object* shared = literal_array->get(it.Next());
+      functions->Add(SharedFunctionInfo::cast(shared));
 
-      // Get the correct function in the optimized frame.
-      Object* function;
-      if (opcode == Translation::LITERAL) {
-        function = literal_array->get(it.Next());
-      } else {
-        CHECK_EQ(Translation::STACK_SLOT, opcode);
-        function = StackSlotAt(it.Next());
-      }
-      functions->Add(JSFunction::cast(function));
+      // Skip over remaining operands to advance to the next opcode.
+      it.Skip(Translation::NumberOfOperandsFor(opcode) - 2);
+    } else {
+      // Skip over operands to advance to the next opcode.
+      it.Skip(Translation::NumberOfOperandsFor(opcode));
     }
   }
 }
@@ -1370,8 +1576,8 @@
 int InterpretedFrame::LookupExceptionHandlerInTable(
     int* context_register, HandlerTable::CatchPrediction* prediction) {
   BytecodeArray* bytecode = function()->shared()->bytecode_array();
-  return bytecode->LookupRangeInHandlerTable(GetBytecodeOffset(),
-                                             context_register, prediction);
+  HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+  return table->LookupRange(GetBytecodeOffset(), context_register, prediction);
 }
 
 int InterpretedFrame::GetBytecodeOffset() const {
@@ -1441,8 +1647,9 @@
   DCHECK(functions->length() == 0);
   AbstractCode* abstract_code =
       AbstractCode::cast(function()->shared()->bytecode_array());
-  FrameSummary summary(receiver(), function(), abstract_code,
-                       GetBytecodeOffset(), IsConstructor());
+  FrameSummary::JavaScriptFrameSummary summary(
+      isolate(), receiver(), function(), abstract_code, GetBytecodeOffset(),
+      IsConstructor());
   functions->Add(summary);
 }
 
@@ -1488,49 +1695,83 @@
   accumulator->Add((mode == OVERVIEW) ? "%5d: " : "[%d]: ", index);
 }
 
-void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
-                      int index) const {
-  accumulator->Add("wasm frame");
+void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
+                              int index) const {
+  PrintIndex(accumulator, mode, index);
+  accumulator->Add("WASM [");
+  Script* script = this->script();
+  accumulator->PrintName(script->name());
+  int pc = static_cast<int>(this->pc() - LookupCode()->instruction_start());
+  Object* instance = this->wasm_instance();
+  Vector<const uint8_t> raw_func_name =
+      WasmInstanceObject::cast(instance)->compiled_module()->GetRawFunctionName(
+          this->function_index());
+  const int kMaxPrintedFunctionName = 64;
+  char func_name[kMaxPrintedFunctionName + 1];
+  int func_name_len = std::min(kMaxPrintedFunctionName, raw_func_name.length());
+  memcpy(func_name, raw_func_name.start(), func_name_len);
+  func_name[func_name_len] = '\0';
+  accumulator->Add("], function #%u ('%s'), pc=%p, pos=%d\n",
+                   this->function_index(), func_name, pc, this->position());
+  if (mode != OVERVIEW) accumulator->Add("\n");
 }
 
-Code* WasmFrame::unchecked_code() const {
-  return static_cast<Code*>(isolate()->FindCodeObject(pc()));
+Code* WasmCompiledFrame::unchecked_code() const {
+  return isolate()->FindCodeObject(pc());
 }
 
-void WasmFrame::Iterate(ObjectVisitor* v) const { IterateCompiledFrame(v); }
+void WasmCompiledFrame::Iterate(ObjectVisitor* v) const {
+  IterateCompiledFrame(v);
+}
 
-Address WasmFrame::GetCallerStackPointer() const {
+Address WasmCompiledFrame::GetCallerStackPointer() const {
   return fp() + ExitFrameConstants::kCallerSPOffset;
 }
 
-Object* WasmFrame::wasm_instance() const {
-  Object* ret = wasm::GetOwningWasmInstance(LookupCode());
-  if (ret == nullptr) ret = isolate()->heap()->undefined_value();
-  return ret;
+WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
+  WasmInstanceObject* obj = wasm::GetOwningWasmInstance(LookupCode());
+  // This is a live stack frame; it must have a live instance.
+  DCHECK_NOT_NULL(obj);
+  return obj;
 }
 
-uint32_t WasmFrame::function_index() const {
-  FixedArray* deopt_data = LookupCode()->deoptimization_data();
-  DCHECK(deopt_data->length() == 2);
-  return Smi::cast(deopt_data->get(1))->value();
+uint32_t WasmCompiledFrame::function_index() const {
+  return FrameSummary::GetSingle(this).AsWasmCompiled().function_index();
 }
 
-Script* WasmFrame::script() const {
-  Handle<JSObject> instance(JSObject::cast(wasm_instance()), isolate());
-  return *wasm::GetScript(instance);
+Script* WasmCompiledFrame::script() const {
+  return wasm_instance()->compiled_module()->script();
 }
 
-int WasmFrame::position() const {
-  int position = StandardFrame::position();
-  if (wasm::WasmIsAsmJs(wasm_instance(), isolate())) {
-    Handle<JSObject> instance(JSObject::cast(wasm_instance()), isolate());
-    position =
-        wasm::GetAsmWasmSourcePosition(instance, function_index(), position);
-  }
-  return position;
+int WasmCompiledFrame::position() const {
+  return FrameSummary::GetSingle(this).SourcePosition();
 }
 
-int WasmFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+void WasmCompiledFrame::Summarize(List<FrameSummary>* functions,
+                                  FrameSummary::Mode mode) const {
+  DCHECK_EQ(0, functions->length());
+  Handle<Code> code(LookupCode(), isolate());
+  int offset = static_cast<int>(pc() - code->instruction_start());
+  Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
+  FrameSummary::WasmCompiledFrameSummary summary(
+      isolate(), instance, code, offset, at_to_number_conversion());
+  functions->Add(summary);
+}
+
+bool WasmCompiledFrame::at_to_number_conversion() const {
+  // Check whether our callee is a WASM_TO_JS frame, and this frame is at the
+  // ToNumber conversion call.
+  Address callee_pc = reinterpret_cast<Address>(this->callee_pc());
+  Code* code = callee_pc ? isolate()->FindCodeObject(callee_pc) : nullptr;
+  if (!code || code->kind() != Code::WASM_TO_JS_FUNCTION) return false;
+  int offset = static_cast<int>(callee_pc - code->instruction_start());
+  int pos = AbstractCode::cast(code)->SourcePosition(offset);
+  DCHECK(pos == 0 || pos == 1);
+  // The imported call has position 0, ToNumber has position 1.
+  return !!pos;
+}
+
+int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
   DCHECK_NOT_NULL(stack_slots);
   Code* code = LookupCode();
   HandlerTable* table = HandlerTable::cast(code->handler_table());
@@ -1539,6 +1780,56 @@
   return table->LookupReturn(pc_offset);
 }
 
+void WasmInterpreterEntryFrame::Iterate(ObjectVisitor* v) const {
+  IterateCompiledFrame(v);
+}
+
+void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
+                                      int index) const {
+  PrintIndex(accumulator, mode, index);
+  accumulator->Add("WASM INTERPRETER ENTRY [");
+  Script* script = this->script();
+  accumulator->PrintName(script->name());
+  accumulator->Add("]");
+  if (mode != OVERVIEW) accumulator->Add("\n");
+}
+
+void WasmInterpreterEntryFrame::Summarize(List<FrameSummary>* functions,
+                                          FrameSummary::Mode mode) const {
+  Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
+  std::vector<std::pair<uint32_t, int>> interpreted_stack =
+      instance->debug_info()->GetInterpretedStack(fp());
+
+  for (auto& e : interpreted_stack) {
+    FrameSummary::WasmInterpretedFrameSummary summary(isolate(), instance,
+                                                      e.first, e.second);
+    functions->Add(summary);
+  }
+}
+
+Code* WasmInterpreterEntryFrame::unchecked_code() const {
+  return isolate()->FindCodeObject(pc());
+}
+
+WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
+  WasmInstanceObject* ret = wasm::GetOwningWasmInstance(LookupCode());
+  // This is a live stack frame, there must be a live wasm instance available.
+  DCHECK_NOT_NULL(ret);
+  return ret;
+}
+
+Script* WasmInterpreterEntryFrame::script() const {
+  return wasm_instance()->compiled_module()->script();
+}
+
+int WasmInterpreterEntryFrame::position() const {
+  return FrameSummary::GetBottom(this).AsWasmInterpreted().SourcePosition();
+}
+
+Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
+  return fp() + ExitFrameConstants::kCallerSPOffset;
+}
+
 namespace {
 
 
diff --git a/src/frames.h b/src/frames.h
index 1daa364..2255b81 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -29,9 +29,10 @@
 
 // Forward declarations.
 class ExternalCallbackScope;
+class Isolate;
 class StackFrameIteratorBase;
 class ThreadLocalTop;
-class Isolate;
+class WasmInstanceObject;
 
 class InnerPointerToCodeCache {
  public:
@@ -66,13 +67,6 @@
 };
 
 
-// Every try-block pushes the context register.
-class TryBlockConstant : public AllStatic {
- public:
-  static const int kElementCount = 1;
-};
-
-
 class StackHandlerConstants : public AllStatic {
  public:
   static const int kNextOffset = 0 * kPointerSize;
@@ -103,9 +97,10 @@
   V(EXIT, ExitFrame)                                     \
   V(JAVA_SCRIPT, JavaScriptFrame)                        \
   V(OPTIMIZED, OptimizedFrame)                           \
-  V(WASM, WasmFrame)                                     \
+  V(WASM_COMPILED, WasmCompiledFrame)                    \
   V(WASM_TO_JS, WasmToJsFrame)                           \
   V(JS_TO_WASM, JsToWasmFrame)                           \
+  V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame)   \
   V(INTERPRETED, InterpretedFrame)                       \
   V(STUB, StubFrame)                                     \
   V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
@@ -442,20 +437,59 @@
   };
 
   // Used to mark the outermost JS entry frame.
+  //
+  // The mark is an opaque value that should be pushed onto the stack directly,
+  // carefully crafted to not be interpreted as a tagged pointer.
   enum JsFrameMarker {
-    INNER_JSENTRY_FRAME = 0,
-    OUTERMOST_JSENTRY_FRAME = 1
+    INNER_JSENTRY_FRAME = (0 << kSmiTagSize) | kSmiTag,
+    OUTERMOST_JSENTRY_FRAME = (1 << kSmiTagSize) | kSmiTag
   };
+  STATIC_ASSERT((INNER_JSENTRY_FRAME & kHeapObjectTagMask) != kHeapObjectTag);
+  STATIC_ASSERT((OUTERMOST_JSENTRY_FRAME & kHeapObjectTagMask) !=
+                kHeapObjectTag);
 
   struct State {
-    State() : sp(NULL), fp(NULL), pc_address(NULL),
-              constant_pool_address(NULL) { }
-    Address sp;
-    Address fp;
-    Address* pc_address;
-    Address* constant_pool_address;
+    Address sp = nullptr;
+    Address fp = nullptr;
+    Address* pc_address = nullptr;
+    Address* callee_pc_address = nullptr;
+    Address* constant_pool_address = nullptr;
   };
 
+  // Convert a stack frame type to a marker that can be stored on the stack.
+  //
+  // The marker is an opaque value, not intended to be interpreted in any way
+  // except being checked by IsTypeMarker or converted by MarkerToType.
+  // It has the same tagging as Smis, so any marker value that does not pass
+  // IsTypeMarker can instead be interpreted as a tagged pointer.
+  //
+  // Note that the marker is not a Smi: Smis on 64-bit architectures are stored
+  // in the top 32 bits of a 64-bit value, which in turn makes them expensive
+  // (in terms of code/instruction size) to push as immediates onto the stack.
+  static int32_t TypeToMarker(Type type) {
+    DCHECK_GE(type, 0);
+    return (type << kSmiTagSize) | kSmiTag;
+  }
+
+  // Convert a marker back to a stack frame type.
+  //
+  // Unlike the return value of TypeToMarker, this takes an intptr_t, as that is
+  // the type of the value on the stack.
+  static Type MarkerToType(intptr_t marker) {
+    DCHECK(IsTypeMarker(marker));
+    return static_cast<Type>(marker >> kSmiTagSize);
+  }
+
+  // Check if a marker is a stack frame type marker or a tagged pointer.
+  //
+  // Returns true if the given marker is tagged as a stack frame type marker,
+  // and should be converted back to a stack frame type using MarkerToType.
+  // Otherwise, the value is a tagged function pointer.
+  static bool IsTypeMarker(intptr_t function_or_marker) {
+    bool is_marker = ((function_or_marker & kSmiTagMask) == kSmiTag);
+    return is_marker;
+  }
+
   // Copy constructor; it breaks the connection to host iterator
   // (as an iterator usually lives on stack).
   StackFrame(const StackFrame& original) {
@@ -470,9 +504,12 @@
   bool is_exit() const { return type() == EXIT; }
   bool is_optimized() const { return type() == OPTIMIZED; }
   bool is_interpreted() const { return type() == INTERPRETED; }
-  bool is_wasm() const { return type() == WASM; }
+  bool is_wasm_compiled() const { return type() == WASM_COMPILED; }
   bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
   bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
+  bool is_wasm_interpreter_entry() const {
+    return type() == WASM_INTERPRETER_ENTRY;
+  }
   bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
   bool is_builtin() const { return type() == BUILTIN; }
   bool is_internal() const { return type() == INTERNAL; }
@@ -488,10 +525,17 @@
     return (type == JAVA_SCRIPT) || (type == OPTIMIZED) ||
            (type == INTERPRETED) || (type == BUILTIN);
   }
+  bool is_wasm() const {
+    Type type = this->type();
+    return type == WASM_COMPILED || type == WASM_INTERPRETER_ENTRY;
+  }
 
   // Accessors.
   Address sp() const { return state_.sp; }
   Address fp() const { return state_.fp; }
+  Address callee_pc() const {
+    return state_.callee_pc_address ? *state_.callee_pc_address : nullptr;
+  }
   Address caller_sp() const { return GetCallerStackPointer(); }
 
   // If this frame is optimized and was dynamically aligned return its old
@@ -733,7 +777,7 @@
   friend class StackFrameIteratorBase;
 };
 
-class JavaScriptFrame;
+class StandardFrame;
 
 class FrameSummary BASE_EMBEDDED {
  public:
@@ -744,26 +788,153 @@
   // information, but it might miss frames.
   enum Mode { kExactSummary, kApproximateSummary };
 
-  FrameSummary(Object* receiver, JSFunction* function,
-               AbstractCode* abstract_code, int code_offset,
-               bool is_constructor, Mode mode = kExactSummary);
+// Subclasses for the different summary kinds:
+#define FRAME_SUMMARY_VARIANTS(F)                                             \
+  F(JAVA_SCRIPT, JavaScriptFrameSummary, java_script_summary_, JavaScript)    \
+  F(WASM_COMPILED, WasmCompiledFrameSummary, wasm_compiled_summary_,          \
+    WasmCompiled)                                                             \
+  F(WASM_INTERPRETED, WasmInterpretedFrameSummary, wasm_interpreted_summary_, \
+    WasmInterpreted)
 
-  static FrameSummary GetFirst(JavaScriptFrame* frame);
+#define FRAME_SUMMARY_KIND(kind, type, field, desc) kind,
+  enum Kind { FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_KIND) };
+#undef FRAME_SUMMARY_KIND
 
-  Handle<Object> receiver() const { return receiver_; }
-  Handle<JSFunction> function() const { return function_; }
-  Handle<AbstractCode> abstract_code() const { return abstract_code_; }
-  int code_offset() const { return code_offset_; }
-  bool is_constructor() const { return is_constructor_; }
+  class FrameSummaryBase {
+   public:
+    FrameSummaryBase(Isolate* isolate, Kind kind)
+        : isolate_(isolate), kind_(kind) {}
+    Isolate* isolate() const { return isolate_; }
+    Kind kind() const { return kind_; }
 
-  void Print();
+   private:
+    Isolate* isolate_;
+    Kind kind_;
+  };
+
+  class JavaScriptFrameSummary : public FrameSummaryBase {
+   public:
+    JavaScriptFrameSummary(Isolate* isolate, Object* receiver,
+                           JSFunction* function, AbstractCode* abstract_code,
+                           int code_offset, bool is_constructor,
+                           Mode mode = kExactSummary);
+
+    Handle<Object> receiver() const { return receiver_; }
+    Handle<JSFunction> function() const { return function_; }
+    Handle<AbstractCode> abstract_code() const { return abstract_code_; }
+    int code_offset() const { return code_offset_; }
+    bool is_constructor() const { return is_constructor_; }
+    bool is_subject_to_debugging() const;
+    int SourcePosition() const;
+    int SourceStatementPosition() const;
+    Handle<Object> script() const;
+    Handle<String> FunctionName() const;
+    Handle<Context> native_context() const;
+
+   private:
+    Handle<Object> receiver_;
+    Handle<JSFunction> function_;
+    Handle<AbstractCode> abstract_code_;
+    int code_offset_;
+    bool is_constructor_;
+  };
+
+  class WasmFrameSummary : public FrameSummaryBase {
+   protected:
+    WasmFrameSummary(Isolate*, Kind, Handle<WasmInstanceObject>,
+                     bool at_to_number_conversion);
+
+   public:
+    Handle<Object> receiver() const;
+    uint32_t function_index() const;
+    int byte_offset() const;
+    bool is_constructor() const { return false; }
+    bool is_subject_to_debugging() const { return true; }
+    int SourcePosition() const;
+    int SourceStatementPosition() const { return SourcePosition(); }
+    Handle<Script> script() const;
+    Handle<WasmInstanceObject> wasm_instance() const { return wasm_instance_; }
+    Handle<String> FunctionName() const;
+    Handle<Context> native_context() const;
+    bool at_to_number_conversion() const { return at_to_number_conversion_; }
+
+   private:
+    Handle<WasmInstanceObject> wasm_instance_;
+    bool at_to_number_conversion_;
+  };
+
+  class WasmCompiledFrameSummary : public WasmFrameSummary {
+   public:
+    WasmCompiledFrameSummary(Isolate*, Handle<WasmInstanceObject>, Handle<Code>,
+                             int code_offset, bool at_to_number_conversion);
+    uint32_t function_index() const;
+    Handle<Code> code() const { return code_; }
+    int code_offset() const { return code_offset_; }
+    int byte_offset() const;
+
+   private:
+    Handle<Code> code_;
+    int code_offset_;
+  };
+
+  class WasmInterpretedFrameSummary : public WasmFrameSummary {
+   public:
+    WasmInterpretedFrameSummary(Isolate*, Handle<WasmInstanceObject>,
+                                uint32_t function_index, int byte_offset);
+    uint32_t function_index() const { return function_index_; }
+    int code_offset() const { return byte_offset_; }
+    int byte_offset() const { return byte_offset_; }
+
+   private:
+    uint32_t function_index_;
+    int byte_offset_;
+  };
+
+#undef FRAME_SUMMARY_FIELD
+#define FRAME_SUMMARY_CONS(kind, type, field, desc) \
+  FrameSummary(type summ) : field(summ) {}  // NOLINT
+  FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_CONS)
+#undef FRAME_SUMMARY_CONS
+
+  ~FrameSummary();
+
+  static FrameSummary GetTop(const StandardFrame* frame);
+  static FrameSummary GetBottom(const StandardFrame* frame);
+  static FrameSummary GetSingle(const StandardFrame* frame);
+  static FrameSummary Get(const StandardFrame* frame, int index);
+
+  // Dispatched accessors.
+  Handle<Object> receiver() const;
+  int code_offset() const;
+  bool is_constructor() const;
+  bool is_subject_to_debugging() const;
+  Handle<Object> script() const;
+  int SourcePosition() const;
+  int SourceStatementPosition() const;
+  Handle<String> FunctionName() const;
+  Handle<Context> native_context() const;
+
+#define FRAME_SUMMARY_CAST(kind_, type, field, desc)      \
+  bool Is##desc() const { return base_.kind() == kind_; } \
+  const type& As##desc() const {                          \
+    DCHECK_EQ(base_.kind(), kind_);                       \
+    return field;                                         \
+  }
+  FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_CAST)
+#undef FRAME_SUMMARY_CAST
+
+  bool IsWasm() const { return IsWasmCompiled() || IsWasmInterpreted(); }
+  const WasmFrameSummary& AsWasm() const {
+    if (IsWasmCompiled()) return AsWasmCompiled();
+    return AsWasmInterpreted();
+  }
 
  private:
-  Handle<Object> receiver_;
-  Handle<JSFunction> function_;
-  Handle<AbstractCode> abstract_code_;
-  int code_offset_;
-  bool is_constructor_;
+#define FRAME_SUMMARY_FIELD(kind, type, field, desc) type field;
+  union {
+    FrameSummaryBase base_;
+    FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_FIELD)
+  };
 };
 
 class StandardFrame : public StackFrame {
@@ -791,6 +962,13 @@
   // Check if this frame is a constructor frame invoked through 'new'.
   virtual bool IsConstructor() const;
 
+  // Build a list with summaries for this frame including all inlined frames.
+  // The functions are ordered bottom-to-top (i.e. summaries.last() is the
+  // top-most activation; caller comes before callee).
+  virtual void Summarize(
+      List<FrameSummary>* frames,
+      FrameSummary::Mode mode = FrameSummary::kExactSummary) const;
+
   static StandardFrame* cast(StackFrame* frame) {
     DCHECK(frame->is_standard());
     return static_cast<StandardFrame*>(frame);
@@ -840,10 +1018,9 @@
  public:
   Type type() const override { return JAVA_SCRIPT; }
 
-  // Build a list with summaries for this frame including all inlined frames.
-  virtual void Summarize(
+  void Summarize(
       List<FrameSummary>* frames,
-      FrameSummary::Mode mode = FrameSummary::kExactSummary) const;
+      FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
 
   // Accessors.
   virtual JSFunction* function() const;
@@ -863,9 +1040,6 @@
   inline Object* GetOperand(int index) const;
   inline int ComputeOperandsCount() const;
 
-  // Generator support to preserve operand stack.
-  void SaveOperandStack(FixedArray* store) const;
-
   // Debugger access.
   void SetParameterValue(int index, Object* value) const;
 
@@ -892,12 +1066,13 @@
   // Determine the code for the frame.
   Code* unchecked_code() const override;
 
-  // Return a list with JSFunctions of this frame.
-  virtual void GetFunctions(List<JSFunction*>* functions) const;
+  // Return a list with {SharedFunctionInfo} objects of this frame.
+  virtual void GetFunctions(List<SharedFunctionInfo*>* functions) const;
+
+  void GetFunctions(List<Handle<SharedFunctionInfo>>* functions) const;
 
   // Lookup exception handler for current {pc}, returns -1 if none found. Also
   // returns data associated with the handler site specific to the frame type:
-  //  - JavaScriptFrame : Data is the stack depth at entry of the try-block.
   //  - OptimizedFrame  : Data is the stack slot count of the entire frame.
   //  - InterpretedFrame: Data is the register index holding the context.
   virtual int LookupExceptionHandlerInTable(
@@ -920,6 +1095,11 @@
   static void PrintTop(Isolate* isolate, FILE* file, bool print_args,
                        bool print_line_number);
 
+  static void CollectFunctionAndOffsetForICStats(JSFunction* function,
+                                                 AbstractCode* code,
+                                                 int code_offset);
+  static void CollectTopFrameForICStats(Isolate* isolate);
+
  protected:
   inline explicit JavaScriptFrame(StackFrameIteratorBase* iterator);
 
@@ -968,10 +1148,10 @@
   // GC support.
   void Iterate(ObjectVisitor* v) const override;
 
-  // Return a list with JSFunctions of this frame.
+  // Return a list with {SharedFunctionInfo} objects of this frame.
   // The functions are ordered bottom-to-top (i.e. functions.last()
   // is the top-most activation)
-  void GetFunctions(List<JSFunction*>* functions) const override;
+  void GetFunctions(List<SharedFunctionInfo*>* functions) const override;
 
   void Summarize(
       List<FrameSummary>* frames,
@@ -1094,9 +1274,9 @@
   friend class StackFrameIteratorBase;
 };
 
-class WasmFrame : public StandardFrame {
+class WasmCompiledFrame : public StandardFrame {
  public:
-  Type type() const override { return WASM; }
+  Type type() const override { return WASM_COMPILED; }
 
   // GC support.
   void Iterate(ObjectVisitor* v) const override;
@@ -1113,18 +1293,59 @@
   Code* unchecked_code() const override;
 
   // Accessors.
-  Object* wasm_instance() const;
+  WasmInstanceObject* wasm_instance() const;
   uint32_t function_index() const;
   Script* script() const override;
   int position() const override;
+  bool at_to_number_conversion() const;
 
-  static WasmFrame* cast(StackFrame* frame) {
-    DCHECK(frame->is_wasm());
-    return static_cast<WasmFrame*>(frame);
+  void Summarize(List<FrameSummary>* frames,
+                 FrameSummary::Mode mode) const override;
+
+  static WasmCompiledFrame* cast(StackFrame* frame) {
+    DCHECK(frame->is_wasm_compiled());
+    return static_cast<WasmCompiledFrame*>(frame);
   }
 
  protected:
-  inline explicit WasmFrame(StackFrameIteratorBase* iterator);
+  inline explicit WasmCompiledFrame(StackFrameIteratorBase* iterator);
+
+  Address GetCallerStackPointer() const override;
+
+ private:
+  friend class StackFrameIteratorBase;
+};
+
+class WasmInterpreterEntryFrame : public StandardFrame {
+ public:
+  Type type() const override { return WASM_INTERPRETER_ENTRY; }
+
+  // GC support.
+  void Iterate(ObjectVisitor* v) const override;
+
+  // Printing support.
+  void Print(StringStream* accumulator, PrintMode mode,
+             int index) const override;
+
+  void Summarize(
+      List<FrameSummary>* frames,
+      FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
+
+  // Determine the code for the frame.
+  Code* unchecked_code() const override;
+
+  // Accessors.
+  WasmInstanceObject* wasm_instance() const;
+  Script* script() const override;
+  int position() const override;
+
+  static WasmInterpreterEntryFrame* cast(StackFrame* frame) {
+    DCHECK(frame->is_wasm_interpreter_entry());
+    return static_cast<WasmInterpreterEntryFrame*>(frame);
+  }
+
+ protected:
+  inline explicit WasmInterpreterEntryFrame(StackFrameIteratorBase* iterator);
 
   Address GetCallerStackPointer() const override;
 
@@ -1283,8 +1504,6 @@
  public:
   inline explicit JavaScriptFrameIterator(Isolate* isolate);
   inline JavaScriptFrameIterator(Isolate* isolate, ThreadLocalTop* top);
-  // Skip frames until the frame with the given id is reached.
-  JavaScriptFrameIterator(Isolate* isolate, StackFrame::Id id);
 
   inline JavaScriptFrame* frame() const;
 
@@ -1306,6 +1525,7 @@
 class StackTraceFrameIterator BASE_EMBEDDED {
  public:
   explicit StackTraceFrameIterator(Isolate* isolate);
+  // Skip frames until the frame with the given id is reached.
   StackTraceFrameIterator(Isolate* isolate, StackFrame::Id id);
   bool done() const { return iterator_.done(); }
   void Advance();
@@ -1315,7 +1535,6 @@
   inline bool is_javascript() const;
   inline bool is_wasm() const;
   inline JavaScriptFrame* javascript_frame() const;
-  inline WasmFrame* wasm_frame() const;
 
   // Advance to the frame holding the arguments for the current
   // frame. This only affects the current frame if it is a javascript frame and
diff --git a/src/full-codegen/arm/full-codegen-arm.cc b/src/full-codegen/arm/full-codegen-arm.cc
index 22c991b..aff8942 100644
--- a/src/full-codegen/arm/full-codegen-arm.cc
+++ b/src/full-codegen/arm/full-codegen-arm.cc
@@ -4,15 +4,16 @@
 
 #if V8_TARGET_ARCH_ARM
 
-#include "src/full-codegen/full-codegen.h"
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 
 #include "src/arm/code-stubs-arm.h"
@@ -131,21 +132,19 @@
   // Increment invocation count for the function.
   {
     Comment cmnt(masm_, "[ Increment invocation count");
-    __ ldr(r2, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
-    __ ldr(r2, FieldMemOperand(r2, LiteralsArray::kFeedbackVectorOffset));
-    __ ldr(r9, FieldMemOperand(r2, TypeFeedbackVector::kInvocationCountIndex *
-                                           kPointerSize +
-                                       TypeFeedbackVector::kHeaderSize));
+    __ ldr(r2, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
+    __ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
+    __ ldr(r9, FieldMemOperand(
+                   r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                           FeedbackVector::kHeaderSize));
     __ add(r9, r9, Operand(Smi::FromInt(1)));
-    __ str(r9, FieldMemOperand(r2, TypeFeedbackVector::kInvocationCountIndex *
-                                           kPointerSize +
-                                       TypeFeedbackVector::kHeaderSize));
+    __ str(r9, FieldMemOperand(
+                   r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                           FeedbackVector::kHeaderSize));
   }
 
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
     OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
@@ -200,15 +199,18 @@
       if (info->scope()->new_target_var() != nullptr) {
         __ push(r3);  // Preserve new target.
       }
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(r1);
+        __ Push(Smi::FromInt(info->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
       if (info->scope()->new_target_var() != nullptr) {
@@ -255,37 +257,10 @@
   PrepareForBailoutForId(BailoutId::FunctionContext(),
                          BailoutState::NO_REGISTERS);
 
-  // Possibly set up a local binding to the this function which is used in
-  // derived constructors with super calls.
-  Variable* this_function_var = info->scope()->this_function_var();
-  if (this_function_var != nullptr) {
-    Comment cmnt(masm_, "[ This function");
-    if (!function_in_register_r1) {
-      __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-      // The write barrier clobbers register again, keep it marked as such.
-    }
-    SetVar(this_function_var, r1, r0, r2);
-  }
-
-  // Possibly set up a local binding to the new target value.
-  Variable* new_target_var = info->scope()->new_target_var();
-  if (new_target_var != nullptr) {
-    Comment cmnt(masm_, "[ new.target");
-    SetVar(new_target_var, r3, r0, r2);
-  }
-
-  // Possibly allocate RestParameters
-  Variable* rest_param = info->scope()->rest_parameter();
-  if (rest_param != nullptr) {
-    Comment cmnt(masm_, "[ Allocate rest parameter array");
-    if (!function_in_register_r1) {
-      __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    FastNewRestParameterStub stub(isolate());
-    __ CallStub(&stub);
-    function_in_register_r1 = false;
-    SetVar(rest_param, r0, r1, r2);
-  }
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(info->scope()->new_target_var());
+  DCHECK_NULL(info->scope()->rest_parameter());
+  DCHECK_NULL(info->scope()->this_function_var());
 
   Variable* arguments = info->scope()->arguments();
   if (arguments != NULL) {
@@ -296,14 +271,16 @@
       __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
     if (is_strict(language_mode()) || !has_simple_parameters()) {
-      FastNewStrictArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     } else if (literal()->has_duplicate_parameters()) {
       __ Push(r1);
       __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
     } else {
-      FastNewSloppyArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     }
 
     SetVar(arguments, r0, r1, r2);
@@ -554,10 +531,8 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
-         !lit->IsUndetectable());
-  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
-      lit->IsFalse(isolate())) {
+  DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+  if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ b(true_label_);
@@ -783,10 +758,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_->Add(isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -809,17 +786,7 @@
       }
       break;
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      DCHECK_EQ(VAR, variable->mode());
-      DCHECK(!variable->binding_needs_init());
-      __ mov(r2, Operand(variable->name()));
-      __ Push(r2);
-      __ CallRuntime(Runtime::kDeclareEvalVar);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -832,9 +799,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function =
           Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
@@ -870,17 +844,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      __ mov(r2, Operand(variable->name()));
-      PushOperand(r2);
-      // Push initial value for function declaration.
-      VisitForStackValue(declaration->fun());
-      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -891,7 +855,7 @@
   // Call the runtime to declare the globals.
   __ mov(r1, Operand(pairs));
   __ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
-  __ EmitLoadTypeFeedbackVector(r2);
+  __ EmitLoadFeedbackVector(r2);
   __ Push(r1, r0, r2);
   __ CallRuntime(Runtime::kDeclareGlobals);
   // Return value is ignored.
@@ -996,7 +960,7 @@
   Comment cmnt(masm_, "[ ForInStatement");
   SetStatementPosition(stmt, SKIP_BREAK);
 
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
 
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1114,8 +1078,8 @@
 
   // We need to filter the key, record slow-path here.
   int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(r3);
-  __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ EmitLoadFeedbackVector(r3);
+  __ mov(r2, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
   __ str(r2, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)));
 
   // r0 contains the key. The receiver in r1 is the second argument to the
@@ -1163,9 +1127,8 @@
   decrement_loop_depth();
 }
 
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
-                                          FeedbackVectorSlot slot) {
+                                          FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
   __ ldr(StoreDescriptor::ValueRegister(),
@@ -1173,10 +1136,9 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
                                                      int offset,
-                                                     FeedbackVectorSlot slot) {
+                                                     FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), r0);
   __ ldr(StoreDescriptor::ValueRegister(),
@@ -1184,92 +1146,6 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofMode typeof_mode,
-                                                      Label* slow) {
-  Register current = cp;
-  Register next = r1;
-  Register temp = r2;
-
-  int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
-  for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (s->calls_sloppy_eval()) {
-      // Check that extension is "the hole".
-      __ ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
-      __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-    }
-    // Load next context in chain.
-    __ ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
-    // Walk the rest of the chain without clobbering cp.
-    current = next;
-    to_check--;
-  }
-
-  // All extension objects were empty and it is safe to use a normal global
-  // load machinery.
-  EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = cp;
-  Register next = r3;
-  Register temp = r4;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->NeedsContext()) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is "the hole".
-        __ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-        __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-      }
-      __ ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering cp.
-      context = next;
-    }
-  }
-  // Check that last extension is "the hole".
-  __ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-  __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an cp-based operand (the write barrier cannot be allowed to
-  // destroy the cp register).
-  return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofMode typeof_mode,
-                                                  Label* slow, Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
-    __ jmp(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->binding_needs_init()) {
-      __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
-      __ b(ne, done);
-      __ mov(r0, Operand(var->name()));
-      __ push(r0);
-      __ CallRuntime(Runtime::kThrowReferenceError);
-    } else {
-      __ jmp(done);
-    }
-  }
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1277,8 +1153,7 @@
   PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
+  // Two cases: global variables and all other types of variables.
   switch (var->location()) {
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
@@ -1311,24 +1186,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ Lookup variable");
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
-      __ bind(&slow);
-      __ Push(var->name());
-      Runtime::FunctionId function_id =
-          typeof_mode == NOT_INSIDE_TYPEOF
-              ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotInsideTypeof;
-      __ CallRuntime(function_id);
-      __ bind(&done);
-      context()->Plug(r0);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1355,9 +1213,10 @@
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  Handle<FixedArray> constant_properties = expr->constant_properties();
+  Handle<BoilerplateDescription> constant_properties =
+      expr->GetOrBuildConstantProperties(isolate());
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+  __ mov(r2, Operand(SmiFromSlot(expr->literal_slot())));
   __ mov(r1, Operand(constant_properties));
   int flags = expr->ComputeFlags();
   __ mov(r0, Operand(Smi::FromInt(flags)));
@@ -1365,8 +1224,9 @@
     __ Push(r3, r2, r1, r0);
     __ CallRuntime(Runtime::kCreateObjectLiteral);
   } else {
-    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastCloneShallowObject(
+        isolate(), expr->properties_count());
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1376,10 +1236,9 @@
   bool result_saved = false;
 
   AccessorTable accessor_table(zone());
-  int property_index = 0;
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
@@ -1389,6 +1248,7 @@
       result_saved = true;
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1403,7 +1263,7 @@
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(r0));
             __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            CallStoreIC(property->GetSlot(0), key->value());
+            CallStoreIC(property->GetSlot(0), key->value(), true);
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1437,21 +1297,21 @@
         VisitForStackValue(value);
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+        PrepareForBailoutForId(expr->GetIdForPropertySet(i),
                                BailoutState::NO_REGISTERS);
         break;
 
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1474,73 +1334,6 @@
     PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
-  // starts with the first computed property name, and continues with all
-  // properties to its right.  All the code from above initializes the static
-  // component of the object literal, and arranges for the map of the result to
-  // reflect the static order in which the keys appear. For the dynamic
-  // properties, we compile them into a series of "SetOwnProperty" runtime
-  // calls. This will preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    Expression* value = property->value();
-    if (!result_saved) {
-      PushOperand(r0);  // Save result on the stack
-      result_saved = true;
-    }
-
-    __ ldr(r0, MemOperand(sp));  // Duplicate receiver.
-    PushOperand(r0);
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      DCHECK(!property->is_computed_name());
-      VisitForStackValue(value);
-      DCHECK(property->emit_store());
-      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             BailoutState::NO_REGISTERS);
-    } else {
-      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
-      VisitForStackValue(value);
-      if (NeedsHomeObject(value)) {
-        EmitSetHomeObject(value, 2, property->GetSlot());
-      }
-
-      switch (property->kind()) {
-        case ObjectLiteral::Property::CONSTANT:
-        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        case ObjectLiteral::Property::COMPUTED:
-          if (property->emit_store()) {
-            PushOperand(Smi::FromInt(NONE));
-            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                                   BailoutState::NO_REGISTERS);
-          } else {
-            DropOperands(3);
-          }
-          break;
-
-        case ObjectLiteral::Property::PROTOTYPE:
-          UNREACHABLE();
-          break;
-
-        case ObjectLiteral::Property::GETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-          break;
-
-        case ObjectLiteral::Property::SETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-          break;
-      }
-    }
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1552,29 +1345,20 @@
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  bool has_fast_elements =
-      IsFastObjectElementsKind(expr->constant_elements_kind());
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(constant_elements->get(1)));
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
+  Handle<ConstantElementsPair> constant_elements =
+      expr->GetOrBuildConstantElements(isolate());
 
   __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
+  __ mov(r2, Operand(SmiFromSlot(expr->literal_slot())));
   __ mov(r1, Operand(constant_elements));
   if (MustCreateArrayLiteralWithRuntime(expr)) {
     __ mov(r0, Operand(Smi::FromInt(expr->ComputeFlags())));
     __ Push(r3, r2, r1, r0);
     __ CallRuntime(Runtime::kCreateArrayLiteral);
   } else {
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
+    Callable callable =
+        CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1637,35 +1421,6 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch = r1;
-        __ ldr(scratch, MemOperand(sp, kPointerSize));
-        PushOperand(scratch);
-        PushOperand(result_register());
-      }
-      break;
-    case KEYED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(property->key());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch = r1;
-        __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
-        PushOperand(scratch);
-        __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
-        PushOperand(scratch);
-        PushOperand(result_register());
-      }
-      break;
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
@@ -1678,6 +1433,10 @@
         VisitForStackValue(property->key());
       }
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 
   // For compound assignments we need another deoptimization point after the
@@ -1694,21 +1453,15 @@
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
+        case NAMED_SUPER_PROPERTY:
+        case KEYED_SUPER_PROPERTY:
+          UNREACHABLE();
+          break;
       }
     }
 
@@ -1747,72 +1500,20 @@
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(r0);
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(r0);
-      break;
     case KEYED_PROPERTY:
       EmitKeyedPropertyAssignment(expr);
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
 
 void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  SetExpressionPosition(expr);
-
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this.  It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  Label suspend, continuation, post_runtime, resume, exception;
-
-  __ jmp(&suspend);
-  __ bind(&continuation);
-  // When we arrive here, r0 holds the generator object.
-  __ RecordGeneratorContinuation();
-  __ ldr(r1, FieldMemOperand(r0, JSGeneratorObject::kResumeModeOffset));
-  __ ldr(r0, FieldMemOperand(r0, JSGeneratorObject::kInputOrDebugPosOffset));
-  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
-  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
-  __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
-  __ b(lt, &resume);
-  __ Push(result_register());
-  __ b(gt, &exception);
-  EmitCreateIteratorResult(true);
-  EmitUnwindAndReturn();
-
-  __ bind(&exception);
-  __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
-                                              : Runtime::kThrow);
-
-  __ bind(&suspend);
-  OperandStackDepthIncrement(1);  // Not popped on this path.
-  VisitForAccumulatorValue(expr->generator_object());
-  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-  __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
-  __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
-  __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
-  __ mov(r1, cp);
-  __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
-                      kLRHasBeenSaved, kDontSaveFPRegs);
-  __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
-  __ cmp(sp, r1);
-  __ b(eq, &post_runtime);
-  __ push(r0);  // generator object
-  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  RestoreContext();
-  __ bind(&post_runtime);
-  PopOperand(result_register());
-  EmitReturnSequence();
-
-  __ bind(&resume);
-  context()->Plug(result_register());
+  // Resumable functions are not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1952,60 +1653,6 @@
   context()->Plug(r0);
 }
 
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ClassLiteral::Property* property = lit->properties()->at(i);
-    Expression* value = property->value();
-
-    Register scratch = r1;
-    if (property->is_static()) {
-      __ ldr(scratch, MemOperand(sp, kPointerSize));  // constructor
-    } else {
-      __ ldr(scratch, MemOperand(sp, 0));  // prototype
-    }
-    PushOperand(scratch);
-    EmitPropertyKey(property, lit->GetIdForProperty(i));
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
-      __ push(r0);
-    }
-
-    VisitForStackValue(value);
-    if (NeedsHomeObject(value)) {
-      EmitSetHomeObject(value, 2, property->GetSlot());
-    }
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-        break;
-
-      case ClassLiteral::Property::GETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::SETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::FIELD:
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
-
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   PopOperand(r1);
   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -2015,9 +1662,7 @@
   context()->Plug(r0);
 }
 
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
-                                       FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   Property* prop = expr->AsProperty();
@@ -2039,43 +1684,6 @@
       CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      PushOperand(r0);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      // stack: value, this; r0: home_object
-      Register scratch = r2;
-      Register scratch2 = r3;
-      __ mov(scratch, result_register());              // home_object
-      __ ldr(r0, MemOperand(sp, kPointerSize));        // value
-      __ ldr(scratch2, MemOperand(sp, 0));             // this
-      __ str(scratch2, MemOperand(sp, kPointerSize));  // this
-      __ str(scratch, MemOperand(sp, 0));              // home_object
-      // stack: this, home_object; r0: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      PushOperand(r0);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = r2;
-      Register scratch2 = r3;
-      __ ldr(scratch2, MemOperand(sp, 2 * kPointerSize));  // value
-      // stack: value, this, home_object; r0: key, r3: value
-      __ ldr(scratch, MemOperand(sp, kPointerSize));  // this
-      __ str(scratch, MemOperand(sp, 2 * kPointerSize));
-      __ ldr(scratch, MemOperand(sp, 0));  // home_object
-      __ str(scratch, MemOperand(sp, kPointerSize));
-      __ str(r0, MemOperand(sp, 0));
-      __ Move(r0, scratch2);
-      // stack: this, home_object, key; r0: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
     case KEYED_PROPERTY: {
       PushOperand(r0);  // Preserve value.
       VisitForStackValue(prop->obj());
@@ -2086,6 +1694,10 @@
       CallKeyedStoreIC(slot);
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
   context()->Plug(r0);
 }
@@ -2104,7 +1716,7 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot,
+                                               FeedbackSlot slot,
                                                HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
@@ -2147,26 +1759,18 @@
 
   } else {
     DCHECK(var->mode() != CONST || op == Token::INIT);
-    if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ Push(var->name());
-      __ Push(r0);
-      __ CallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreLookupSlot_Strict
-                         : Runtime::kStoreLookupSlot_Sloppy);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
-      MemOperand location = VarOperand(var, r1);
-      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
-        // Check for an uninitialized let binding.
-        __ ldr(r2, location);
-        __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
-        __ Check(eq, kLetBindingReInitialization);
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    DCHECK(!var->IsLookupSlot());
+    // Assignment to var or initializing assignment to let/const in harmony
+    // mode.
+    MemOperand location = VarOperand(var, r1);
+    if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+      // Check for an uninitialized let binding.
+      __ ldr(r2, location);
+      __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+      __ Check(eq, kLetBindingReInitialization);
     }
+    EmitStoreToStackLocalOrContextSlot(var, location);
   }
 }
 
@@ -2185,35 +1789,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // r0 : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  PushOperand(key->value());
-  PushOperand(r0);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreToSuper_Strict
-                              : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // r0 : value
-  // stack : receiver ('this'), home_object, key
-  DCHECK(prop != NULL);
-
-  PushOperand(r0);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreKeyedToSuper_Strict
-                              : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
   PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -2261,45 +1836,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-  SetExpressionPosition(prop);
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  // Load the function from the receiver.
-  const Register scratch = r1;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(r0);
-  PushOperand(r0);
-  __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
-  PushOperand(scratch);
-  PushOperand(key->value());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ str(r0, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2325,43 +1861,6 @@
 }
 
 
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetExpressionPosition(prop);
-  // Load the function from the receiver.
-  const Register scratch = r1;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(r0);
-  PushOperand(r0);
-  __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
-  PushOperand(scratch);
-  VisitForStackValue(prop->key());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ str(r0, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2381,8 +1880,9 @@
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
   Handle<Code> code =
-      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
-  __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
+      CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+          .code();
+  __ mov(r3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
   __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ mov(r0, Operand(arg_count));
   CallIC(code);
@@ -2393,116 +1893,6 @@
   context()->DropAndPlug(1, r0);
 }
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
-  int arg_count = expr->arguments()->length();
-  // r4: copy of the first argument or undefined if it doesn't exist.
-  if (arg_count > 0) {
-    __ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
-  } else {
-    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
-  }
-
-  // r3: the receiver of the enclosing function.
-  __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // r2: language mode.
-  __ mov(r2, Operand(Smi::FromInt(language_mode())));
-
-  // r1: the start position of the scope the calls resides in.
-  __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
-
-  // r0: the source position of the eval call.
-  __ mov(r0, Operand(Smi::FromInt(expr->position())));
-
-  // Do the runtime call.
-  __ Push(r4, r3, r2, r1, r0);
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
-  VariableProxy* callee = expr->expression()->AsVariableProxy();
-  if (callee->var()->IsLookupSlot()) {
-    Label slow, done;
-    SetExpressionPosition(callee);
-    // Generate code for loading from variables potentially shadowed
-    // by eval-introduced variables.
-    EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    // Call the runtime to find the function to call (returned in r0)
-    // and the object holding it (returned in edx).
-    __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
-    PushOperands(r0, r1);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      Label call;
-      __ b(&call);
-      __ bind(&done);
-      // Push function.
-      __ push(r0);
-      // The receiver is implicitly the global receiver. Indicate this
-      // by passing the hole to the call function stub.
-      __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
-      __ push(r1);
-      __ bind(&call);
-    }
-  } else {
-    VisitForStackValue(callee);
-    // refEnv.WithBaseObject()
-    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-    PushOperand(r2);  // Reserved receiver slot.
-  }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call
-  // Runtime_ResolvePossiblyDirectEval to resolve the function we need
-  // to call.  Then we call the resolved function using the given arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  PushCalleeAndWithBaseObject(expr);
-
-  // Push the arguments.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Push a copy of the function (found below the arguments) and
-  // resolve eval.
-  __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  __ push(r1);
-  EmitResolvePossiblyDirectEval(expr);
-
-  // Touch up the stack with the resolved function.
-  __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-
-  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
-  // Record source position for debugger.
-  SetCallPosition(expr);
-  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
-                                          expr->tail_call_mode())
-                          .code();
-  __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
-  __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  __ mov(r0, Operand(arg_count));
-  __ Call(code, RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->DropAndPlug(1, r0);
-}
-
-
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -2531,7 +1921,7 @@
   __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
 
   // Record call targets in unoptimized code.
-  __ EmitLoadTypeFeedbackVector(r2);
+  __ EmitLoadFeedbackVector(r2);
   __ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
@@ -2543,49 +1933,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
-  SuperCallReference* super_call_ref =
-      expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super_call_ref);
-
-  // Push the super constructor target on the stack (may be null,
-  // but the Construct builtin can deal with that properly).
-  VisitForAccumulatorValue(super_call_ref->this_function_var());
-  __ AssertFunction(result_register());
-  __ ldr(result_register(),
-         FieldMemOperand(result_register(), HeapObject::kMapOffset));
-  __ ldr(result_register(),
-         FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  PushOperand(result_register());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetConstructCallPosition(expr);
-
-  // Load new target into r3.
-  VisitForAccumulatorValue(super_call_ref->new_target_var());
-  __ mov(r3, result_register());
-
-  // Load function and argument count into r1 and r0.
-  __ mov(r0, Operand(arg_count));
-  __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-
-  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->Plug(r0);
-}
-
-
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2673,28 +2020,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(r0, if_false);
-  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2931,16 +2256,12 @@
           __ Push(r2, r1);
           __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
           context()->Plug(r0);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+        } else {
+          DCHECK(!var->IsLookupSlot());
+          DCHECK(var->IsStackAllocated() || var->IsContextSlot());
           // Result of deleting non-global, non-dynamic variables is false.
           // The subexpression does not have side effects.
           context()->Plug(is_this);
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kDeleteLookupSlot);
-          context()->Plug(r0);
         }
       } else {
         // Result of deleting non-property, non-variable reference is true.
@@ -3046,35 +2367,6 @@
         break;
       }
 
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForAccumulatorValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        PushOperand(result_register());
-        const Register scratch = r1;
-        __ ldr(scratch, MemOperand(sp, kPointerSize));
-        PushOperand(scratch);
-        PushOperand(result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForStackValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        VisitForAccumulatorValue(prop->key());
-        PushOperand(result_register());
-        const Register scratch = r1;
-        __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
-        PushOperand(scratch);
-        __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
-        PushOperand(scratch);
-        PushOperand(result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
       case KEYED_PROPERTY: {
         VisitForStackValue(prop->obj());
         VisitForStackValue(prop->key());
@@ -3085,6 +2377,8 @@
         break;
       }
 
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
       case VARIABLE:
         UNREACHABLE();
     }
@@ -3120,14 +2414,12 @@
           case NAMED_PROPERTY:
             __ str(r0, MemOperand(sp, kPointerSize));
             break;
-          case NAMED_SUPER_PROPERTY:
-            __ str(r0, MemOperand(sp, 2 * kPointerSize));
-            break;
           case KEYED_PROPERTY:
             __ str(r0, MemOperand(sp, 2 * kPointerSize));
             break;
+          case NAMED_SUPER_PROPERTY:
           case KEYED_SUPER_PROPERTY:
-            __ str(r0, MemOperand(sp, 3 * kPointerSize));
+            UNREACHABLE();
             break;
         }
       }
@@ -3159,14 +2451,12 @@
         case NAMED_PROPERTY:
           __ str(r0, MemOperand(sp, kPointerSize));
           break;
-        case NAMED_SUPER_PROPERTY:
-          __ str(r0, MemOperand(sp, 2 * kPointerSize));
-          break;
         case KEYED_PROPERTY:
           __ str(r0, MemOperand(sp, 2 * kPointerSize));
           break;
+        case NAMED_SUPER_PROPERTY:
         case KEYED_SUPER_PROPERTY:
-          __ str(r0, MemOperand(sp, 3 * kPointerSize));
+          UNREACHABLE();
           break;
       }
     }
@@ -3223,30 +2513,6 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(r0);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(r0);
-      }
-      break;
-    }
     case KEYED_PROPERTY: {
       PopOperands(StoreDescriptor::ReceiverRegister(),
                   StoreDescriptor::NameRegister());
@@ -3261,6 +2527,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -3329,16 +2599,6 @@
     __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
     __ tst(r1, Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(eq, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
-  } else if (String::Equals(check, factory->type##_string())) { \
-    __ JumpIfSmi(r0, if_false);                                 \
-    __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));    \
-    __ CompareRoot(r0, Heap::k##Type##MapRootIndex);            \
-    Split(eq, if_true, if_false, fall_through);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
@@ -3379,6 +2639,7 @@
       SetExpressionPosition(expr);
       PopOperand(r1);
       __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+      RestoreContext();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r0, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -3493,70 +2754,6 @@
 }
 
 
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
-  DCHECK(!result_register().is(r1));
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(ip, Operand(pending_message_obj));
-  __ ldr(r1, MemOperand(ip));
-  PushOperand(r1);
-
-  ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  DCHECK(!result_register().is(r1));
-  // Restore pending message from stack.
-  PopOperand(r1);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(ip, Operand(pending_message_obj));
-  __ str(r1, MemOperand(ip));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
-  DCHECK(!result_register().is(r1));
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
-  __ mov(ip, Operand(pending_message_obj));
-  __ str(r1, MemOperand(ip));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
-  DCHECK(!result_register().is(r1));
-  __ Pop(result_register());  // Restore the accumulator.
-  __ Pop(r1);                 // Get the token.
-  for (DeferredCommand cmd : commands_) {
-    Label skip;
-    __ cmp(r1, Operand(Smi::FromInt(cmd.token)));
-    __ b(ne, &skip);
-    switch (cmd.command) {
-      case kReturn:
-        codegen_->EmitUnwindAndReturn();
-        break;
-      case kThrow:
-        __ Push(result_register());
-        __ CallRuntime(Runtime::kReThrow);
-        break;
-      case kContinue:
-        codegen_->EmitContinue(cmd.target);
-        break;
-      case kBreak:
-        codegen_->EmitBreak(cmd.target);
-        break;
-    }
-    __ bind(&skip);
-  }
-}
-
 #undef __
 
 
diff --git a/src/full-codegen/arm64/full-codegen-arm64.cc b/src/full-codegen/arm64/full-codegen-arm64.cc
index 51b3009..f6b9c2f 100644
--- a/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -4,15 +4,16 @@
 
 #if V8_TARGET_ARCH_ARM64
 
-#include "src/full-codegen/full-codegen.h"
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 
 #include "src/arm64/code-stubs-arm64.h"
@@ -135,22 +136,20 @@
   // Increment invocation count for the function.
   {
     Comment cmnt(masm_, "[ Increment invocation count");
-    __ Ldr(x11, FieldMemOperand(x1, JSFunction::kLiteralsOffset));
-    __ Ldr(x11, FieldMemOperand(x11, LiteralsArray::kFeedbackVectorOffset));
-    __ Ldr(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
-                                             kPointerSize +
-                                         TypeFeedbackVector::kHeaderSize));
+    __ Ldr(x11, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
+    __ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
+    __ Ldr(x10, FieldMemOperand(
+                    x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                             FeedbackVector::kHeaderSize));
     __ Add(x10, x10, Operand(Smi::FromInt(1)));
-    __ Str(x10, FieldMemOperand(x11, TypeFeedbackVector::kInvocationCountIndex *
-                                             kPointerSize +
-                                         TypeFeedbackVector::kHeaderSize));
+    __ Str(x10, FieldMemOperand(
+                    x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                             FeedbackVector::kHeaderSize));
   }
 
   // Reserve space on the stack for locals.
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
     OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
@@ -203,14 +202,17 @@
       if (info->scope()->new_target_var() != nullptr) {
         __ Push(x3);  // Preserve new target.
       }
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info->scope()->scope_type());
         __ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ Push(x1);
+        __ Push(Smi::FromInt(info->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
       if (info->scope()->new_target_var() != nullptr) {
@@ -257,37 +259,10 @@
   PrepareForBailoutForId(BailoutId::FunctionContext(),
                          BailoutState::NO_REGISTERS);
 
-  // Possibly set up a local binding to the this function which is used in
-  // derived constructors with super calls.
-  Variable* this_function_var = info->scope()->this_function_var();
-  if (this_function_var != nullptr) {
-    Comment cmnt(masm_, "[ This function");
-    if (!function_in_register_x1) {
-      __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-      // The write barrier clobbers register again, keep it marked as such.
-    }
-    SetVar(this_function_var, x1, x0, x2);
-  }
-
-  // Possibly set up a local binding to the new target value.
-  Variable* new_target_var = info->scope()->new_target_var();
-  if (new_target_var != nullptr) {
-    Comment cmnt(masm_, "[ new.target");
-    SetVar(new_target_var, x3, x0, x2);
-  }
-
-  // Possibly allocate RestParameters
-  Variable* rest_param = info->scope()->rest_parameter();
-  if (rest_param != nullptr) {
-    Comment cmnt(masm_, "[ Allocate rest parameter array");
-    if (!function_in_register_x1) {
-      __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    FastNewRestParameterStub stub(isolate());
-    __ CallStub(&stub);
-    function_in_register_x1 = false;
-    SetVar(rest_param, x0, x1, x2);
-  }
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(info->scope()->new_target_var());
+  DCHECK_NULL(info->scope()->rest_parameter());
+  DCHECK_NULL(info->scope()->this_function_var());
 
   Variable* arguments = info->scope()->arguments();
   if (arguments != NULL) {
@@ -298,14 +273,16 @@
       __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
     if (is_strict(language_mode()) || !has_simple_parameters()) {
-      FastNewStrictArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     } else if (literal()->has_duplicate_parameters()) {
       __ Push(x1);
       __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
     } else {
-      FastNewSloppyArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     }
 
     SetVar(arguments, x0, x1, x2);
@@ -544,10 +521,8 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
-         !lit->IsUndetectable());
-  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
-      lit->IsFalse(isolate())) {
+  DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+  if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ B(false_label_);
   } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ B(true_label_);
@@ -778,10 +753,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_->Add(isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -804,17 +781,7 @@
       }
       break;
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      DCHECK_EQ(VAR, variable->mode());
-      DCHECK(!variable->binding_needs_init());
-      __ Mov(x2, Operand(variable->name()));
-      __ Push(x2);
-      __ CallRuntime(Runtime::kDeclareEvalVar);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -827,9 +794,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function =
           Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack overflow exception.
@@ -865,17 +839,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ Function Declaration");
-      __ Mov(x2, Operand(variable->name()));
-      PushOperand(x2);
-      // Push initial value for function declaration.
-      VisitForStackValue(declaration->fun());
-      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -890,7 +854,7 @@
     flags = x10;
   __ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
   }
-  __ EmitLoadTypeFeedbackVector(x12);
+  __ EmitLoadFeedbackVector(x12);
   __ Push(x11, flags, x12);
   __ CallRuntime(Runtime::kDeclareGlobals);
   // Return value is ignored.
@@ -992,7 +956,7 @@
   Comment cmnt(masm_, "[ ForInStatement");
   SetStatementPosition(stmt, SKIP_BREAK);
 
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
 
   // TODO(all): This visitor probably needs better comments and a revisit.
 
@@ -1103,8 +1067,8 @@
 
   // We need to filter the key, record slow-path here.
   int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(x3);
-  __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ EmitLoadFeedbackVector(x3);
+  __ Mov(x10, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
   __ Str(x10, FieldMemOperand(x3, FixedArray::OffsetOfElementAt(vector_index)));
 
   // x0 contains the key. The receiver in x1 is the second argument to the
@@ -1152,110 +1116,23 @@
   decrement_loop_depth();
 }
 
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
-                                          FeedbackVectorSlot slot) {
+                                          FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Peek(StoreDescriptor::ReceiverRegister(), 0);
   __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
                                                      int offset,
-                                                     FeedbackVectorSlot slot) {
+                                                     FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), x0);
   __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofMode typeof_mode,
-                                                      Label* slow) {
-  Register current = cp;
-  Register next = x10;
-  Register temp = x11;
-
-  int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
-  for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (s->calls_sloppy_eval()) {
-      // Check that extension is "the hole".
-      __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
-      __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-    }
-    // Load next context in chain.
-    __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
-    // Walk the rest of the chain without clobbering cp.
-    current = next;
-    to_check--;
-  }
-
-  // All extension objects were empty and it is safe to use a normal global
-  // load machinery.
-  EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = cp;
-  Register next = x10;
-  Register temp = x11;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->NeedsContext()) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is "the hole".
-        __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-        __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-      }
-      __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering cp.
-      context = next;
-    }
-  }
-  // Check that last extension is "the hole".
-  __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-  __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an cp-based operand (the write barrier cannot be allowed to
-  // destroy the cp register).
-  return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofMode typeof_mode,
-                                                  Label* slow, Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
-    __ B(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->binding_needs_init()) {
-      __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
-      __ Mov(x0, Operand(var->name()));
-      __ Push(x0);
-      __ CallRuntime(Runtime::kThrowReferenceError);
-    } else {
-      __ B(done);
-    }
-  }
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1263,8 +1140,7 @@
   PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
+  // Two cases: global variables and all other types of variables.
   switch (var->location()) {
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "Global variable");
@@ -1297,24 +1173,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed by
-      // eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
-      __ Bind(&slow);
-      Comment cmnt(masm_, "Lookup variable");
-      __ Push(var->name());
-      Runtime::FunctionId function_id =
-          typeof_mode == NOT_INSIDE_TYPEOF
-              ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotInsideTypeof;
-      __ CallRuntime(function_id);
-      __ Bind(&done);
-      context()->Plug(x0);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1341,9 +1200,10 @@
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  Handle<FixedArray> constant_properties = expr->constant_properties();
+  Handle<BoilerplateDescription> constant_properties =
+      expr->GetOrBuildConstantProperties(isolate());
   __ Ldr(x3, MemOperand(fp,  JavaScriptFrameConstants::kFunctionOffset));
-  __ Mov(x2, Smi::FromInt(expr->literal_index()));
+  __ Mov(x2, SmiFromSlot(expr->literal_slot()));
   __ Mov(x1, Operand(constant_properties));
   int flags = expr->ComputeFlags();
   __ Mov(x0, Smi::FromInt(flags));
@@ -1351,8 +1211,9 @@
     __ Push(x3, x2, x1, x0);
     __ CallRuntime(Runtime::kCreateObjectLiteral);
   } else {
-    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastCloneShallowObject(
+        isolate(), expr->properties_count());
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1362,10 +1223,9 @@
   bool result_saved = false;
 
   AccessorTable accessor_table(zone());
-  int property_index = 0;
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
@@ -1375,6 +1235,7 @@
       result_saved = true;
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1389,7 +1250,7 @@
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(x0));
             __ Peek(StoreDescriptor::ReceiverRegister(), 0);
-            CallStoreIC(property->GetSlot(0), key->value());
+            CallStoreIC(property->GetSlot(0), key->value(), true);
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1422,20 +1283,20 @@
         PushOperand(x0);
         VisitForStackValue(value);
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+        PrepareForBailoutForId(expr->GetIdForPropertySet(i),
                                BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1458,73 +1319,6 @@
     PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
-  // starts with the first computed property name, and continues with all
-  // properties to its right.  All the code from above initializes the static
-  // component of the object literal, and arranges for the map of the result to
-  // reflect the static order in which the keys appear. For the dynamic
-  // properties, we compile them into a series of "SetOwnProperty" runtime
-  // calls. This will preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    Expression* value = property->value();
-    if (!result_saved) {
-      PushOperand(x0);  // Save result on stack
-      result_saved = true;
-    }
-
-    __ Peek(x10, 0);  // Duplicate receiver.
-    PushOperand(x10);
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      DCHECK(!property->is_computed_name());
-      VisitForStackValue(value);
-      DCHECK(property->emit_store());
-      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             BailoutState::NO_REGISTERS);
-    } else {
-      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
-      VisitForStackValue(value);
-      if (NeedsHomeObject(value)) {
-        EmitSetHomeObject(value, 2, property->GetSlot());
-      }
-
-      switch (property->kind()) {
-        case ObjectLiteral::Property::CONSTANT:
-        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        case ObjectLiteral::Property::COMPUTED:
-          if (property->emit_store()) {
-            PushOperand(Smi::FromInt(NONE));
-            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                                   BailoutState::NO_REGISTERS);
-          } else {
-            DropOperands(3);
-          }
-          break;
-
-        case ObjectLiteral::Property::PROTOTYPE:
-          UNREACHABLE();
-          break;
-
-        case ObjectLiteral::Property::GETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-          break;
-
-        case ObjectLiteral::Property::SETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-          break;
-      }
-    }
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1536,27 +1330,20 @@
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  bool has_fast_elements =
-      IsFastObjectElementsKind(expr->constant_elements_kind());
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
+  Handle<ConstantElementsPair> constant_elements =
+      expr->GetOrBuildConstantElements(isolate());
 
   __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ Mov(x2, Smi::FromInt(expr->literal_index()));
+  __ Mov(x2, SmiFromSlot(expr->literal_slot()));
   __ Mov(x1, Operand(constant_elements));
   if (MustCreateArrayLiteralWithRuntime(expr)) {
     __ Mov(x0, Smi::FromInt(expr->ComputeFlags()));
     __ Push(x3, x2, x1, x0);
     __ CallRuntime(Runtime::kCreateArrayLiteral);
   } else {
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
+    Callable callable =
+        CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1619,33 +1406,6 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch = x10;
-        __ Peek(scratch, kPointerSize);
-        PushOperands(scratch, result_register());
-      }
-      break;
-    case KEYED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(property->key());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch1 = x10;
-        const Register scratch2 = x11;
-        __ Peek(scratch1, 2 * kPointerSize);
-        __ Peek(scratch2, kPointerSize);
-        PushOperands(scratch1, scratch2, result_register());
-      }
-      break;
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
@@ -1657,6 +1417,10 @@
         VisitForStackValue(property->key());
       }
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 
   // For compound assignments we need another deoptimization point after the
@@ -1673,21 +1437,15 @@
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
+        case NAMED_SUPER_PROPERTY:
+        case KEYED_SUPER_PROPERTY:
+          UNREACHABLE();
+          break;
       }
     }
 
@@ -1726,17 +1484,13 @@
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(x0);
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(x0);
-      break;
     case KEYED_PROPERTY:
       EmitKeyedPropertyAssignment(expr);
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -1852,62 +1606,7 @@
   context()->Plug(x0);
 }
 
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ClassLiteral::Property* property = lit->properties()->at(i);
-    Expression* value = property->value();
-
-    Register scratch = x1;
-    if (property->is_static()) {
-      __ Peek(scratch, kPointerSize);  // constructor
-    } else {
-      __ Peek(scratch, 0);  // prototype
-    }
-    PushOperand(scratch);
-    EmitPropertyKey(property, lit->GetIdForProperty(i));
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
-      __ Push(x0);
-    }
-
-    VisitForStackValue(value);
-    if (NeedsHomeObject(value)) {
-      EmitSetHomeObject(value, 2, property->GetSlot());
-    }
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-        break;
-
-      case ClassLiteral::Property::GETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::SETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::FIELD:
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
-                                       FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   Property* prop = expr->AsProperty();
@@ -1931,43 +1630,6 @@
       CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      PushOperand(x0);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      // stack: value, this; x0: home_object
-      Register scratch = x10;
-      Register scratch2 = x11;
-      __ mov(scratch, result_register());  // home_object
-      __ Peek(x0, kPointerSize);           // value
-      __ Peek(scratch2, 0);                // this
-      __ Poke(scratch2, kPointerSize);     // this
-      __ Poke(scratch, 0);                 // home_object
-      // stack: this, home_object; x0: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      PushOperand(x0);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = x10;
-      Register scratch2 = x11;
-      __ Peek(scratch2, 2 * kPointerSize);  // value
-      // stack: value, this, home_object; x0: key, x11: value
-      __ Peek(scratch, kPointerSize);  // this
-      __ Poke(scratch, 2 * kPointerSize);
-      __ Peek(scratch, 0);  // home_object
-      __ Poke(scratch, kPointerSize);
-      __ Poke(x0, 0);
-      __ Move(x0, scratch2);
-      // stack: this, home_object, key; x0: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
     case KEYED_PROPERTY: {
       PushOperand(x0);  // Preserve value.
       VisitForStackValue(prop->obj());
@@ -1978,6 +1640,10 @@
       CallKeyedStoreIC(slot);
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
   context()->Plug(x0);
 }
@@ -1996,7 +1662,7 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot,
+                                               FeedbackSlot slot,
                                                HoleCheckMode hole_check_mode) {
   ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
   if (var->IsUnallocated()) {
@@ -2038,25 +1704,17 @@
 
   } else {
     DCHECK(var->mode() != CONST || op == Token::INIT);
-    if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ Push(var->name());
-      __ Push(x0);
-      __ CallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreLookupSlot_Strict
-                         : Runtime::kStoreLookupSlot_Sloppy);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      MemOperand location = VarOperand(var, x1);
-      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
-        __ Ldr(x10, location);
-        __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
-        __ Check(eq, kLetBindingReInitialization);
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    DCHECK(!var->IsLookupSlot());
+    // Assignment to var or initializing assignment to let/const in harmony
+    // mode.
+    MemOperand location = VarOperand(var, x1);
+    if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+      __ Ldr(x10, location);
+      __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
+      __ Check(eq, kLetBindingReInitialization);
     }
+    EmitStoreToStackLocalOrContextSlot(var, location);
   }
 }
 
@@ -2076,35 +1734,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // x0 : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  PushOperand(key->value());
-  PushOperand(x0);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreToSuper_Strict
-                              : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // x0 : value
-  // stack : receiver ('this'), home_object, key
-  DCHECK(prop != NULL);
-
-  PushOperand(x0);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreKeyedToSuper_Strict
-                              : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
   // Assignment to a property, using a keyed store IC.
@@ -2159,47 +1788,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  ASM_LOCATION("FullCodeGenerator::EmitSuperCallWithLoadIC");
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-  SetExpressionPosition(prop);
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-
-  // Load the function from the receiver.
-  const Register scratch = x10;
-  SuperPropertyReference* super_ref =
-      callee->AsProperty()->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(x0);
-  __ Peek(scratch, kPointerSize);
-  PushOperands(x0, scratch);
-  PushOperand(key->value());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ Poke(x0, kPointerSize);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2225,44 +1813,6 @@
 }
 
 
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  ASM_LOCATION("FullCodeGenerator::EmitKeyedSuperCallWithLoadIC");
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-  SetExpressionPosition(prop);
-
-  // Load the function from the receiver.
-  const Register scratch = x10;
-  SuperPropertyReference* super_ref =
-      callee->AsProperty()->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(x0);
-  __ Peek(scratch, kPointerSize);
-  PushOperands(x0, scratch);
-  VisitForStackValue(prop->key());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ Poke(x0, kPointerSize);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
   ASM_LOCATION("FullCodeGenerator::EmitCall");
   // Load the arguments.
@@ -2283,8 +1833,9 @@
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
   Handle<Code> code =
-      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
-  __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
+      CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+          .code();
+  __ Mov(x3, IntFromSlot(expr->CallFeedbackICSlot()));
   __ Peek(x1, (arg_count + 1) * kXRegSize);
   __ Mov(x0, arg_count);
   CallIC(code);
@@ -2295,119 +1846,6 @@
   context()->DropAndPlug(1, x0);
 }
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
-  int arg_count = expr->arguments()->length();
-  ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
-  // Prepare to push a copy of the first argument or undefined if it doesn't
-  // exist.
-  if (arg_count > 0) {
-    __ Peek(x9, arg_count * kXRegSize);
-  } else {
-    __ LoadRoot(x9, Heap::kUndefinedValueRootIndex);
-  }
-
-  __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // Prepare to push the language mode.
-  __ Mov(x11, Smi::FromInt(language_mode()));
-  // Prepare to push the start position of the scope the calls resides in.
-  __ Mov(x12, Smi::FromInt(scope()->start_position()));
-  // Prepare to push the source position of the eval call.
-  __ Mov(x13, Smi::FromInt(expr->position()));
-
-  // Push.
-  __ Push(x9, x10, x11, x12, x13);
-
-  // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
-  VariableProxy* callee = expr->expression()->AsVariableProxy();
-  if (callee->var()->IsLookupSlot()) {
-    Label slow, done;
-    SetExpressionPosition(callee);
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ Bind(&slow);
-    // Call the runtime to find the function to call (returned in x0)
-    // and the object holding it (returned in x1).
-    __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
-    PushOperands(x0, x1);  // Receiver, function.
-    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      Label call;
-      __ B(&call);
-      __ Bind(&done);
-      // Push function.
-      // The receiver is implicitly the global receiver. Indicate this
-      // by passing the undefined to the call function stub.
-      __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
-      __ Push(x0, x1);
-      __ Bind(&call);
-    }
-  } else {
-    VisitForStackValue(callee);
-    // refEnv.WithBaseObject()
-    __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
-    PushOperand(x10);  // Reserved receiver slot.
-  }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  ASM_LOCATION("FullCodeGenerator::EmitPossiblyEvalCall");
-  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
-  // to resolve the function we need to call.  Then we call the resolved
-  // function using the given arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  PushCalleeAndWithBaseObject(expr);
-
-  // Push the arguments.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Push a copy of the function (found below the arguments) and
-  // resolve eval.
-  __ Peek(x10, (arg_count + 1) * kPointerSize);
-  __ Push(x10);
-  EmitResolvePossiblyDirectEval(expr);
-
-  // Touch up the stack with the resolved function.
-  __ Poke(x0, (arg_count + 1) * kPointerSize);
-
-  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
-  // Record source position for debugger.
-  SetCallPosition(expr);
-
-  // Call the evaluated function.
-  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
-                                          expr->tail_call_mode())
-                          .code();
-  __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
-  __ Peek(x1, (arg_count + 1) * kXRegSize);
-  __ Mov(x0, arg_count);
-  __ Call(code, RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->DropAndPlug(1, x0);
-}
-
-
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -2436,7 +1874,7 @@
   __ Peek(x1, arg_count * kXRegSize);
 
   // Record call targets in unoptimized code.
-  __ EmitLoadTypeFeedbackVector(x2);
+  __ EmitLoadFeedbackVector(x2);
   __ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
 
   CallConstructStub stub(isolate());
@@ -2448,50 +1886,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
-  ASM_LOCATION("FullCodeGenerator::EmitSuperConstructorCall");
-  SuperCallReference* super_call_ref =
-      expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super_call_ref);
-
-  // Push the super constructor target on the stack (may be null,
-  // but the Construct builtin can deal with that properly).
-  VisitForAccumulatorValue(super_call_ref->this_function_var());
-  __ AssertFunction(result_register());
-  __ Ldr(result_register(),
-         FieldMemOperand(result_register(), HeapObject::kMapOffset));
-  __ Ldr(result_register(),
-         FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  PushOperand(result_register());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetConstructCallPosition(expr);
-
-  // Load new target into x3.
-  VisitForAccumulatorValue(super_call_ref->new_target_var());
-  __ Mov(x3, result_register());
-
-  // Load function and argument count into x1 and x0.
-  __ Mov(x0, arg_count);
-  __ Peek(x1, arg_count * kXRegSize);
-
-  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->Plug(x0);
-}
-
-
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2578,28 +1972,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(x0, if_false);
-  __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2851,16 +2223,12 @@
           __ Push(x12, x11);
           __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
           context()->Plug(x0);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+        } else {
+          DCHECK(!var->IsLookupSlot());
+          DCHECK(var->IsStackAllocated() || var->IsContextSlot());
           // Result of deleting non-global, non-dynamic variables is false.
           // The subexpression does not have side effects.
           context()->Plug(is_this);
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kDeleteLookupSlot);
-          context()->Plug(x0);
         }
       } else {
         // Result of deleting non-property, non-variable reference is true.
@@ -2965,33 +2333,6 @@
         break;
       }
 
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForAccumulatorValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        PushOperand(result_register());
-        const Register scratch = x10;
-        __ Peek(scratch, kPointerSize);
-        PushOperands(scratch, result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForStackValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        VisitForAccumulatorValue(prop->key());
-        PushOperand(result_register());
-        const Register scratch1 = x10;
-        const Register scratch2 = x11;
-        __ Peek(scratch1, 2 * kPointerSize);
-        __ Peek(scratch2, kPointerSize);
-        PushOperands(scratch1, scratch2, result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
       case KEYED_PROPERTY: {
         VisitForStackValue(prop->obj());
         VisitForStackValue(prop->key());
@@ -3001,6 +2342,8 @@
         break;
       }
 
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
       case VARIABLE:
         UNREACHABLE();
     }
@@ -3036,14 +2379,12 @@
           case NAMED_PROPERTY:
             __ Poke(x0, kPointerSize);
             break;
-          case NAMED_SUPER_PROPERTY:
-            __ Poke(x0, kPointerSize * 2);
-            break;
           case KEYED_PROPERTY:
             __ Poke(x0, kPointerSize * 2);
             break;
+          case NAMED_SUPER_PROPERTY:
           case KEYED_SUPER_PROPERTY:
-            __ Poke(x0, kPointerSize * 3);
+            UNREACHABLE();
             break;
         }
       }
@@ -3075,14 +2416,12 @@
         case NAMED_PROPERTY:
           __ Poke(x0, kXRegSize);
           break;
-        case NAMED_SUPER_PROPERTY:
-          __ Poke(x0, 2 * kXRegSize);
-          break;
         case KEYED_PROPERTY:
           __ Poke(x0, 2 * kXRegSize);
           break;
+        case NAMED_SUPER_PROPERTY:
         case KEYED_SUPER_PROPERTY:
-          __ Poke(x0, 3 * kXRegSize);
+          UNREACHABLE();
           break;
       }
     }
@@ -3141,30 +2480,6 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(x0);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(x0);
-      }
-      break;
-    }
     case KEYED_PROPERTY: {
       PopOperand(StoreDescriptor::NameRegister());
       PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3179,6 +2494,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -3250,18 +2569,6 @@
     __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
     __ TestAndSplit(x10, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable),
                     if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
-  } else if (String::Equals(check, factory->type##_string())) { \
-    ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof " \
-                 #type "_string");                              \
-    __ JumpIfSmi(x0, if_true);                                  \
-    __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));    \
-    __ CompareRoot(x0, Heap::k##Type##MapRootIndex);            \
-    Split(eq, if_true, if_false, fall_through);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
   } else {
     ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other");
     if (if_false != fall_through) __ B(if_false);
@@ -3306,6 +2613,7 @@
       SetExpressionPosition(expr);
       PopOperand(x1);
       __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+      RestoreContext();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(x0, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -3376,63 +2684,8 @@
 
 
 void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  SetExpressionPosition(expr);
-
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this. It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  // TODO(jbramley): Tidy this up once the merge is done, using named registers
-  // and suchlike. The implementation changes a little by bleeding_edge so I
-  // don't want to spend too much time on it now.
-
-  Label suspend, continuation, post_runtime, resume, exception;
-
-  __ B(&suspend);
-  // TODO(jbramley): This label is bound here because the following code
-  // looks at its pos(). Is it possible to do something more efficient here,
-  // perhaps using Adr?
-  __ Bind(&continuation);
-  // When we arrive here, x0 holds the generator object.
-  __ RecordGeneratorContinuation();
-  __ Ldr(x1, FieldMemOperand(x0, JSGeneratorObject::kResumeModeOffset));
-  __ Ldr(x0, FieldMemOperand(x0, JSGeneratorObject::kInputOrDebugPosOffset));
-  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
-  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
-  __ Cmp(x1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
-  __ B(lt, &resume);
-  __ Push(result_register());
-  __ B(gt, &exception);
-  EmitCreateIteratorResult(true);
-  EmitUnwindAndReturn();
-
-  __ Bind(&exception);
-  __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
-                                              : Runtime::kThrow);
-
-  __ Bind(&suspend);
-  OperandStackDepthIncrement(1);  // Not popped on this path.
-  VisitForAccumulatorValue(expr->generator_object());
-  DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
-  __ Mov(x1, Smi::FromInt(continuation.pos()));
-  __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
-  __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
-  __ Mov(x1, cp);
-  __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
-                      kLRHasBeenSaved, kDontSaveFPRegs);
-  __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
-  __ Cmp(__ StackPointer(), x1);
-  __ B(eq, &post_runtime);
-  __ Push(x0);  // generator object
-  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  RestoreContext();
-  __ Bind(&post_runtime);
-  PopOperand(result_register());
-  EmitReturnSequence();
-
-  __ Bind(&resume);
-  context()->Plug(result_register());
+  // Resumable functions are not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -3556,68 +2809,6 @@
 }
 
 
-void FullCodeGenerator::EnterFinallyBlock() {
-  ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
-  DCHECK(!result_register().is(x10));
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ Mov(x10, pending_message_obj);
-  __ Ldr(x10, MemOperand(x10));
-  PushOperand(x10);
-
-  ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
-  DCHECK(!result_register().is(x10));
-
-  // Restore pending message from stack.
-  PopOperand(x10);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ Mov(x13, pending_message_obj);
-  __ Str(x10, MemOperand(x13));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
-  DCHECK(!result_register().is(x10));
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
-  __ Mov(x13, pending_message_obj);
-  __ Str(x10, MemOperand(x13));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
-  __ Pop(result_register(), x1);  // Restore the accumulator and get the token.
-  for (DeferredCommand cmd : commands_) {
-    Label skip;
-    __ Cmp(x1, Operand(Smi::FromInt(cmd.token)));
-    __ B(ne, &skip);
-    switch (cmd.command) {
-      case kReturn:
-        codegen_->EmitUnwindAndReturn();
-        break;
-      case kThrow:
-        __ Push(result_register());
-        __ CallRuntime(Runtime::kReThrow);
-        break;
-      case kContinue:
-        codegen_->EmitContinue(cmd.target);
-        break;
-      case kBreak:
-        codegen_->EmitBreak(cmd.target);
-        break;
-    }
-    __ bind(&skip);
-  }
-}
-
 #undef __
 
 
diff --git a/src/full-codegen/full-codegen.cc b/src/full-codegen/full-codegen.cc
index ee5e888..58872d0 100644
--- a/src/full-codegen/full-codegen.cc
+++ b/src/full-codegen/full-codegen.cc
@@ -42,6 +42,9 @@
   }
 
   CompilationJob::Status FinalizeJobImpl() final { return SUCCEEDED; }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(FullCodegenCompilationJob);
 };
 
 FullCodeGenerator::FullCodeGenerator(MacroAssembler* masm,
@@ -62,7 +65,6 @@
                            : 0,
                        info->zone()),
       back_edges_(2, info->zone()),
-      handler_table_(info->zone()),
       source_position_table_builder_(info->zone(),
                                      info->SourcePositionRecordingMode()),
       ic_total_count_(0) {
@@ -84,6 +86,7 @@
 bool FullCodeGenerator::MakeCode(CompilationInfo* info, uintptr_t stack_limit) {
   Isolate* isolate = info->isolate();
 
+  DCHECK(!info->shared_info()->must_use_ignition_turbo());
   DCHECK(!FLAG_minimal);
   RuntimeCallTimerScope runtimeTimer(isolate,
                                      &RuntimeCallStats::CompileFullCode);
@@ -114,7 +117,6 @@
       CodeGenerator::MakeCodeEpilogue(&masm, nullptr, info, masm.CodeObject());
   cgen.PopulateDeoptimizationData(code);
   cgen.PopulateTypeFeedbackInfo(code);
-  cgen.PopulateHandlerTable(code);
   code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
   code->set_has_reloc_info_for_serialization(info->will_serialize());
   code->set_allow_osr_at_loop_nesting_level(0);
@@ -175,41 +177,15 @@
 }
 
 
-void FullCodeGenerator::PopulateHandlerTable(Handle<Code> code) {
-  int handler_table_size = static_cast<int>(handler_table_.size());
-  Handle<HandlerTable> table =
-      Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
-          HandlerTable::LengthForRange(handler_table_size), TENURED));
-  for (int i = 0; i < handler_table_size; ++i) {
-    table->SetRangeStart(i, handler_table_[i].range_start);
-    table->SetRangeEnd(i, handler_table_[i].range_end);
-    table->SetRangeHandler(i, handler_table_[i].handler_offset,
-                           handler_table_[i].catch_prediction);
-    table->SetRangeData(i, handler_table_[i].stack_depth);
-  }
-  code->set_handler_table(*table);
-}
-
-
-int FullCodeGenerator::NewHandlerTableEntry() {
-  int index = static_cast<int>(handler_table_.size());
-  HandlerTableEntry entry = {0, 0, 0, 0, HandlerTable::UNCAUGHT};
-  handler_table_.push_back(entry);
-  return index;
-}
-
-
 bool FullCodeGenerator::MustCreateObjectLiteralWithRuntime(
     ObjectLiteral* expr) const {
-  return masm()->serializer_enabled() ||
-         !FastCloneShallowObjectStub::IsSupported(expr);
+  return masm()->serializer_enabled() || !expr->IsFastCloningSupported();
 }
 
 
 bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
     ArrayLiteral* expr) const {
-  return expr->depth() > 1 ||
-         expr->values()->length() > JSArray::kInitialMaxFastElementArray;
+  return !expr->IsFastCloningSupported();
 }
 
 void FullCodeGenerator::Initialize(uintptr_t stack_limit) {
@@ -228,8 +204,7 @@
   __ Call(code, RelocInfo::CODE_TARGET, ast_id);
 }
 
-void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot,
-                                   Handle<Object> name) {
+void FullCodeGenerator::CallLoadIC(FeedbackSlot slot, Handle<Object> name) {
   DCHECK(name->IsName());
   __ Move(LoadDescriptor::NameRegister(), name);
 
@@ -237,11 +212,11 @@
 
   Handle<Code> code = CodeFactory::LoadIC(isolate()).code();
   __ Call(code, RelocInfo::CODE_TARGET);
-  if (FLAG_tf_load_ic_stub) RestoreContext();
+  RestoreContext();
 }
 
-void FullCodeGenerator::CallStoreIC(FeedbackVectorSlot slot,
-                                    Handle<Object> name) {
+void FullCodeGenerator::CallStoreIC(FeedbackSlot slot, Handle<Object> name,
+                                    bool store_own_property) {
   DCHECK(name->IsName());
   __ Move(StoreDescriptor::NameRegister(), name);
 
@@ -254,12 +229,23 @@
     EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
   }
 
-  Handle<Code> code = CodeFactory::StoreIC(isolate(), language_mode()).code();
+  Handle<Code> code;
+  if (store_own_property) {
+    DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+              feedback_vector_spec()->GetKind(slot));
+    code = CodeFactory::StoreOwnIC(isolate()).code();
+  } else {
+    // Ensure that language mode is in sync with the IC slot kind.
+    DCHECK_EQ(
+        GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+        language_mode());
+    code = CodeFactory::StoreIC(isolate(), language_mode()).code();
+  }
   __ Call(code, RelocInfo::CODE_TARGET);
   RestoreContext();
 }
 
-void FullCodeGenerator::CallKeyedStoreIC(FeedbackVectorSlot slot) {
+void FullCodeGenerator::CallKeyedStoreIC(FeedbackSlot slot) {
   STATIC_ASSERT(!StoreDescriptor::kPassLastArgsOnStack ||
                 StoreDescriptor::kStackArgumentsCount == 2);
   if (StoreDescriptor::kPassLastArgsOnStack) {
@@ -269,6 +255,9 @@
     EmitLoadSlot(StoreDescriptor::SlotRegister(), slot);
   }
 
+  // Ensure that language mode is in sync with the IC slot kind.
+  DCHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+            language_mode());
   Handle<Code> code =
       CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
   __ Call(code, RelocInfo::CODE_TARGET);
@@ -499,15 +488,19 @@
 
 void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
                                                TypeofMode typeof_mode) {
-#ifdef DEBUG
   Variable* var = proxy->var();
-  DCHECK(var->IsUnallocated() ||
-         (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
-  EmitLoadSlot(LoadGlobalDescriptor::SlotRegister(),
-               proxy->VariableFeedbackSlot());
+  DCHECK(var->IsUnallocated());
+  __ Move(LoadDescriptor::NameRegister(), var->name());
+
+  FeedbackSlot slot = proxy->VariableFeedbackSlot();
+  // Ensure that typeof mode is in sync with the IC slot kind.
+  DCHECK_EQ(GetTypeofModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+            typeof_mode);
+
+  EmitLoadSlot(LoadGlobalDescriptor::SlotRegister(), slot);
   Handle<Code> code = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
   __ Call(code, RelocInfo::CODE_TARGET);
+  RestoreContext();
 }
 
 void FullCodeGenerator::VisitSloppyBlockFunctionStatement(
@@ -577,21 +570,6 @@
 }
 
 
-void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
-  // Load the arguments on the stack and call the stub.
-  RegExpExecStub stub(isolate());
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 4);
-  VisitForStackValue(args->at(0));
-  VisitForStackValue(args->at(1));
-  VisitForStackValue(args->at(2));
-  VisitForStackValue(args->at(3));
-  __ CallStub(&stub);
-  OperandStackDepthDecrement(4);
-  context()->Plug(result_register());
-}
-
-
 void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
                                                 const Callable& callable) {
   ZoneList<Expression*>* args = expr->arguments();
@@ -623,14 +601,6 @@
   context()->Plug(result_register());
 }
 
-void FullCodeGenerator::EmitNewObject(CallRuntime* expr) {
-  EmitIntrinsicAsStubCall(expr, CodeFactory::FastNewObject(isolate()));
-}
-
-void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
-  EmitIntrinsicAsStubCall(expr, CodeFactory::NumberToString(isolate()));
-}
-
 
 void FullCodeGenerator::EmitToString(CallRuntime* expr) {
   EmitIntrinsicAsStubCall(expr, CodeFactory::ToString(isolate()));
@@ -878,30 +848,17 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    if (!expr->IsSuperAccess()) {
-      VisitForAccumulatorValue(expr->obj());
-      __ Move(LoadDescriptor::ReceiverRegister(), result_register());
-      EmitNamedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      EmitNamedSuperPropertyLoad(expr);
-    }
+    DCHECK(!expr->IsSuperAccess());
+    VisitForAccumulatorValue(expr->obj());
+    __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+    EmitNamedPropertyLoad(expr);
   } else {
-    if (!expr->IsSuperAccess()) {
-      VisitForStackValue(expr->obj());
-      VisitForAccumulatorValue(expr->key());
-      __ Move(LoadDescriptor::NameRegister(), result_register());
-      PopOperand(LoadDescriptor::ReceiverRegister());
-      EmitKeyedPropertyLoad(expr);
-    } else {
-      VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForStackValue(expr->key());
-      EmitKeyedSuperPropertyLoad(expr);
-    }
+    DCHECK(!expr->IsSuperAccess());
+    VisitForStackValue(expr->obj());
+    VisitForAccumulatorValue(expr->key());
+    __ Move(LoadDescriptor::NameRegister(), result_register());
+    PopOperand(LoadDescriptor::ReceiverRegister());
+    EmitKeyedPropertyLoad(expr);
   }
   PrepareForBailoutForId(expr->LoadId(), BailoutState::TOS_REGISTER);
   context()->Plug(result_register());
@@ -912,8 +869,7 @@
   DCHECK(!context()->IsEffect());
   DCHECK(!context()->IsTest());
 
-  if (proxy != NULL &&
-      (proxy->var()->IsUnallocated() || proxy->var()->IsLookupSlot())) {
+  if (proxy != NULL && proxy->var()->IsUnallocated()) {
     EmitVariableLoad(proxy, INSIDE_TYPEOF);
     PrepareForBailout(proxy, BailoutState::TOS_REGISTER);
   } else {
@@ -987,19 +943,10 @@
   NestedStatement* current = nesting_stack_;
   int context_length = 0;
   // When continuing, we clobber the unpredictable value in the accumulator
-  // with one that's safe for GC.  If we hit an exit from the try block of
-  // try...finally on our way out, we will unconditionally preserve the
-  // accumulator on the stack.
+  // with one that's safe for GC.
   ClearAccumulator();
   while (!current->IsContinueTarget(target)) {
     if (HasStackOverflow()) return;
-    if (current->IsTryFinally()) {
-      Comment cmnt(masm(), "[ Deferred continue through finally");
-      current->Exit(&context_length);
-      DCHECK_EQ(-1, context_length);
-      current->AsTryFinally()->deferred_commands()->RecordContinue(target);
-      return;
-    }
     current = current->Exit(&context_length);
   }
   int stack_depth = current->GetStackDepthAtTarget();
@@ -1028,19 +975,10 @@
   NestedStatement* current = nesting_stack_;
   int context_length = 0;
   // When breaking, we clobber the unpredictable value in the accumulator
-  // with one that's safe for GC.  If we hit an exit from the try block of
-  // try...finally on our way out, we will unconditionally preserve the
-  // accumulator on the stack.
+  // with one that's safe for GC.
   ClearAccumulator();
   while (!current->IsBreakTarget(target)) {
     if (HasStackOverflow()) return;
-    if (current->IsTryFinally()) {
-      Comment cmnt(masm(), "[ Deferred break through finally");
-      current->Exit(&context_length);
-      DCHECK_EQ(-1, context_length);
-      current->AsTryFinally()->deferred_commands()->RecordBreak(target);
-      return;
-    }
     current = current->Exit(&context_length);
   }
   int stack_depth = current->GetStackDepthAtTarget();
@@ -1070,31 +1008,32 @@
   int context_length = 0;
   while (current != NULL) {
     if (HasStackOverflow()) return;
-    if (current->IsTryFinally()) {
-      Comment cmnt(masm(), "[ Deferred return through finally");
-      current->Exit(&context_length);
-      DCHECK_EQ(-1, context_length);
-      current->AsTryFinally()->deferred_commands()->RecordReturn();
-      return;
-    }
     current = current->Exit(&context_length);
   }
   EmitReturnSequence();
 }
 
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
-                                       bool pretenure) {
+                                       FeedbackSlot slot, bool pretenure) {
+  // If slot is invalid, then it's a native function literal and we
+  // can pass the empty array or empty literal array, something like that...
+
   // If we're running with the --always-opt or the --prepare-always-opt
   // flag, we need to use the runtime function so that the new function
   // we are creating here gets a chance to have its code optimized and
   // doesn't just get a copy of the existing unoptimized code.
   if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
       scope()->is_function_scope()) {
-    FastNewClosureStub stub(isolate());
-    __ Move(stub.GetCallInterfaceDescriptor().GetRegisterParameter(0), info);
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastNewClosure(isolate());
+    __ Move(callable.descriptor().GetRegisterParameter(0), info);
+    __ EmitLoadFeedbackVector(callable.descriptor().GetRegisterParameter(1));
+    __ Move(callable.descriptor().GetRegisterParameter(2), SmiFromSlot(slot));
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
   } else {
     __ Push(info);
+    __ EmitLoadFeedbackVector(result_register());
+    __ Push(result_register());
+    __ Push(SmiFromSlot(slot));
     __ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
                              : Runtime::kNewClosure);
   }
@@ -1110,17 +1049,6 @@
   CallLoadIC(prop->PropertyFeedbackSlot(), key->value());
 }
 
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object
-  SetExpressionPosition(prop);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  DCHECK(prop->IsSuperAccess());
-
-  PushOperand(key->value());
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-}
-
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetExpressionPosition(prop);
 
@@ -1131,27 +1059,12 @@
   RestoreContext();
 }
 
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
-  // Stack: receiver, home_object, key.
-  SetExpressionPosition(prop);
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-}
-
-void FullCodeGenerator::EmitPropertyKey(LiteralProperty* property,
-                                        BailoutId bailout_id) {
-  VisitForStackValue(property->key());
-  CallRuntimeWithOperands(Runtime::kToName);
-  PrepareForBailoutForId(bailout_id, BailoutState::TOS_REGISTER);
-  PushOperand(result_register());
-}
-
-void FullCodeGenerator::EmitLoadSlot(Register destination,
-                                     FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitLoadSlot(Register destination, FeedbackSlot slot) {
   DCHECK(!slot.IsInvalid());
   __ Move(destination, SmiFromSlot(slot));
 }
 
-void FullCodeGenerator::EmitPushSlot(FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitPushSlot(FeedbackSlot slot) {
   __ Push(SmiFromSlot(slot));
 }
 
@@ -1165,33 +1078,8 @@
 
 
 void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
-  Comment cmnt(masm_, "[ WithStatement");
-  SetStatementPosition(stmt);
-
-  VisitForAccumulatorValue(stmt->expression());
-  Callable callable = CodeFactory::ToObject(isolate());
-  __ Move(callable.descriptor().GetRegisterParameter(0), result_register());
-  __ Call(callable.code(), RelocInfo::CODE_TARGET);
-  RestoreContext();
-  PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
-  PushOperand(result_register());
-  PushOperand(stmt->scope()->scope_info());
-  PushFunctionArgumentForContextAllocation();
-  CallRuntimeWithOperands(Runtime::kPushWithContext);
-  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-  PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
-
-  Scope* saved_scope = scope();
-  scope_ = stmt->scope();
-  { WithOrCatch body(this);
-    Visit(stmt->statement());
-  }
-  scope_ = saved_scope;
-
-  // Pop context.
-  LoadContextField(context_register(), Context::PREVIOUS_INDEX);
-  // Update local stack frame context field.
-  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+  // Dynamic scoping is not supported.
+  UNREACHABLE();
 }
 
 
@@ -1312,43 +1200,8 @@
 
 
 void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  Comment cmnt(masm_, "[ ForOfStatement");
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // var iterator = iterable[Symbol.iterator]();
-  SetExpressionAsStatementPosition(stmt->assign_iterator());
-  VisitForEffect(stmt->assign_iterator());
-
-  // Loop entry.
-  __ bind(loop_statement.continue_label());
-
-  // result = iterator.next()
-  SetExpressionAsStatementPosition(stmt->next_result());
-  VisitForEffect(stmt->next_result());
-
-  // if (result.done) break;
-  Label result_not_done;
-  VisitForControl(stmt->result_done(), loop_statement.break_label(),
-                  &result_not_done, &result_not_done);
-  __ bind(&result_not_done);
-
-  // each = result.value
-  VisitForEffect(stmt->assign_each());
-
-  // Generate code for the body of the loop.
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), BailoutState::NO_REGISTERS);
-  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
-  __ jmp(loop_statement.continue_label());
-
-  // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
-  __ bind(loop_statement.break_label());
-  decrement_loop_depth();
+  // Iterator looping is not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
@@ -1358,138 +1211,20 @@
 }
 
 void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  Comment cmnt(masm_, "[ TryCatchStatement");
-  SetStatementPosition(stmt, SKIP_BREAK);
-
-  // The try block adds a handler to the exception handler chain before
-  // entering, and removes it again when exiting normally.  If an exception
-  // is thrown during execution of the try block, the handler is consumed
-  // and control is passed to the catch block with the exception in the
-  // result register.
-
-  Label try_entry, handler_entry, exit;
-  __ jmp(&try_entry);
-  __ bind(&handler_entry);
-  if (stmt->clear_pending_message()) ClearPendingMessage();
-
-  // Exception handler code, the exception is in the result register.
-  // Extend the context before executing the catch block.
-  { Comment cmnt(masm_, "[ Extend catch context");
-    PushOperand(stmt->variable()->name());
-    PushOperand(result_register());
-    PushOperand(stmt->scope()->scope_info());
-    PushFunctionArgumentForContextAllocation();
-    CallRuntimeWithOperands(Runtime::kPushCatchContext);
-    StoreToFrameField(StandardFrameConstants::kContextOffset,
-                      context_register());
-  }
-
-  Scope* saved_scope = scope();
-  scope_ = stmt->scope();
-  DCHECK(scope_->declarations()->is_empty());
-  { WithOrCatch catch_body(this);
-    Visit(stmt->catch_block());
-  }
-  // Restore the context.
-  LoadContextField(context_register(), Context::PREVIOUS_INDEX);
-  StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
-  scope_ = saved_scope;
-  __ jmp(&exit);
-
-  // Try block code. Sets up the exception handler chain.
-  __ bind(&try_entry);
-
-  int handler_index = NewHandlerTableEntry();
-  EnterTryBlock(handler_index, &handler_entry, stmt->catch_prediction());
-  {
-    Comment cmnt_try(masm(), "[ Try block");
-    Visit(stmt->try_block());
-  }
-  ExitTryBlock(handler_index);
-  __ bind(&exit);
+  // Exception handling is not supported.
+  UNREACHABLE();
 }
 
 
 void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  Comment cmnt(masm_, "[ TryFinallyStatement");
-  SetStatementPosition(stmt, SKIP_BREAK);
-
-  // Try finally is compiled by setting up a try-handler on the stack while
-  // executing the try body, and removing it again afterwards.
-  //
-  // The try-finally construct can enter the finally block in three ways:
-  // 1. By exiting the try-block normally. This exits the try block,
-  //    pushes the continuation token and falls through to the finally
-  //    block.
-  // 2. By exiting the try-block with a function-local control flow transfer
-  //    (break/continue/return). The site of the, e.g., break exits the
-  //    try block, pushes the continuation token and jumps to the
-  //    finally block. After the finally block executes, the execution
-  //    continues based on the continuation token to a block that
-  //    continues with the control flow transfer.
-  // 3. By exiting the try-block with a thrown exception. In the handler,
-  //    we push the exception and continuation token and jump to the
-  //    finally block (which will again dispatch based on the token once
-  //    it is finished).
-
-  Label try_entry, handler_entry, finally_entry;
-  DeferredCommands deferred(this, &finally_entry);
-
-  // Jump to try-handler setup and try-block code.
-  __ jmp(&try_entry);
-  __ bind(&handler_entry);
-
-  // Exception handler code.  This code is only executed when an exception
-  // is thrown.  Record the continuation and jump to the finally block.
-  {
-    Comment cmnt_handler(masm(), "[ Finally handler");
-    deferred.RecordThrow();
-  }
-
-  // Set up try handler.
-  __ bind(&try_entry);
-  int handler_index = NewHandlerTableEntry();
-  EnterTryBlock(handler_index, &handler_entry, stmt->catch_prediction());
-  {
-    Comment cmnt_try(masm(), "[ Try block");
-    TryFinally try_body(this, &deferred);
-    Visit(stmt->try_block());
-  }
-  ExitTryBlock(handler_index);
-  // Execute the finally block on the way out.  Clobber the unpredictable
-  // value in the result register with one that's safe for GC because the
-  // finally block will unconditionally preserve the result register on the
-  // stack.
-  ClearAccumulator();
-  deferred.EmitFallThrough();
-  // Fall through to the finally block.
-
-  // Finally block implementation.
-  __ bind(&finally_entry);
-  {
-    Comment cmnt_finally(masm(), "[ Finally block");
-    OperandStackDepthIncrement(2);  // Token and accumulator are on stack.
-    EnterFinallyBlock();
-    Visit(stmt->finally_block());
-    ExitFinallyBlock();
-    OperandStackDepthDecrement(2);  // Token and accumulator were on stack.
-  }
-
-  {
-    Comment cmnt_deferred(masm(), "[ Post-finally dispatch");
-    deferred.EmitCommands();  // Return to the calling code.
-  }
+  // Exception handling is not supported.
+  UNREACHABLE();
 }
 
 
 void FullCodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  Comment cmnt(masm_, "[ DebuggerStatement");
-  SetStatementPosition(stmt);
-
-  __ DebugBreak();
-  // Ignore the return value.
-
-  PrepareForBailoutForId(stmt->DebugBreakId(), BailoutState::NO_REGISTERS);
+  // Debugger statement is not supported.
+  UNREACHABLE();
 }
 
 
@@ -1546,46 +1281,13 @@
     SetStackOverflow();
     return;
   }
-  EmitNewClosure(function_info, expr->pretenure());
+  EmitNewClosure(function_info, expr->LiteralFeedbackSlot(), expr->pretenure());
 }
 
 
 void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
-  Comment cmnt(masm_, "[ ClassLiteral");
-
-  if (lit->extends() != NULL) {
-    VisitForStackValue(lit->extends());
-  } else {
-    PushOperand(isolate()->factory()->the_hole_value());
-  }
-
-  VisitForStackValue(lit->constructor());
-
-  PushOperand(Smi::FromInt(lit->start_position()));
-  PushOperand(Smi::FromInt(lit->end_position()));
-
-  CallRuntimeWithOperands(Runtime::kDefineClass);
-  PrepareForBailoutForId(lit->CreateLiteralId(), BailoutState::TOS_REGISTER);
-  PushOperand(result_register());
-
-  // Load the "prototype" from the constructor.
-  __ Move(LoadDescriptor::ReceiverRegister(), result_register());
-  CallLoadIC(lit->PrototypeSlot(), isolate()->factory()->prototype_string());
-  PrepareForBailoutForId(lit->PrototypeId(), BailoutState::TOS_REGISTER);
-  PushOperand(result_register());
-
-  EmitClassDefineProperties(lit);
-  DropOperands(1);
-
-  // Set the constructor to have fast properties.
-  CallRuntimeWithOperands(Runtime::kToFastProperties);
-
-  if (lit->class_variable_proxy() != nullptr) {
-    EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
-                           lit->ProxySlot(), HoleCheckMode::kElided);
-  }
-
-  context()->Plug(result_register());
+  // Unsupported
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
@@ -1595,7 +1297,7 @@
   LoadFromFrameField(JavaScriptFrameConstants::kFunctionOffset,
                      descriptor.GetRegisterParameter(0));
   __ Move(descriptor.GetRegisterParameter(1),
-          Smi::FromInt(expr->literal_index()));
+          SmiFromSlot(expr->literal_slot()));
   __ Move(descriptor.GetRegisterParameter(2), expr->pattern());
   __ Move(descriptor.GetRegisterParameter(3), Smi::FromInt(expr->flags()));
   __ Call(callable.code(), RelocInfo::CODE_TARGET);
@@ -1612,7 +1314,7 @@
   Comment cmnt(masm_, "[ NativeFunctionLiteral");
   Handle<SharedFunctionInfo> shared =
       Compiler::GetSharedFunctionInfoForNative(expr->extension(), expr->name());
-  EmitNewClosure(shared, false);
+  EmitNewClosure(shared, expr->LiteralFeedbackSlot(), false);
 }
 
 
@@ -1628,32 +1330,6 @@
   if (context()->IsStackValue()) OperandStackDepthIncrement(1);
 }
 
-void FullCodeGenerator::EnterTryBlock(
-    int handler_index, Label* handler,
-    HandlerTable::CatchPrediction catch_prediction) {
-  HandlerTableEntry* entry = &handler_table_[handler_index];
-  entry->range_start = masm()->pc_offset();
-  entry->handler_offset = handler->pos();
-  entry->stack_depth = operand_stack_depth_;
-  entry->catch_prediction = catch_prediction;
-
-  // We are using the operand stack depth, check for accuracy.
-  EmitOperandStackDepthCheck();
-
-  // Push context onto operand stack.
-  STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
-  PushOperand(context_register());
-}
-
-
-void FullCodeGenerator::ExitTryBlock(int handler_index) {
-  HandlerTableEntry* entry = &handler_table_[handler_index];
-  entry->range_end = masm()->pc_offset();
-
-  // Drop context from operand stack.
-  DropOperands(TryBlockConstant::kElementCount);
-}
-
 
 void FullCodeGenerator::VisitCall(Call* expr) {
 #ifdef DEBUG
@@ -1668,48 +1344,38 @@
   Expression* callee = expr->expression();
   Call::CallType call_type = expr->GetCallType();
 
-  if (expr->is_possibly_eval()) {
-    EmitPossiblyEvalCall(expr);
-  } else {
-    switch (call_type) {
-      case Call::GLOBAL_CALL:
-        EmitCallWithLoadIC(expr);
-        break;
-      case Call::WITH_CALL:
-        // Call to a lookup slot looked up through a with scope.
-        PushCalleeAndWithBaseObject(expr);
-        EmitCall(expr);
-        break;
-      case Call::NAMED_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        VisitForStackValue(property->obj());
-        EmitCallWithLoadIC(expr);
-        break;
-      }
-      case Call::KEYED_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        VisitForStackValue(property->obj());
-        EmitKeyedCallWithLoadIC(expr, property->key());
-        break;
-      }
-      case Call::NAMED_SUPER_PROPERTY_CALL:
-        EmitSuperCallWithLoadIC(expr);
-        break;
-      case Call::KEYED_SUPER_PROPERTY_CALL:
-        EmitKeyedSuperCallWithLoadIC(expr);
-        break;
-      case Call::SUPER_CALL:
-        EmitSuperConstructorCall(expr);
-        break;
-      case Call::OTHER_CALL:
-        // Call to an arbitrary expression not handled specially above.
-        VisitForStackValue(callee);
-        OperandStackDepthIncrement(1);
-        __ PushRoot(Heap::kUndefinedValueRootIndex);
-        // Emit function call.
-        EmitCall(expr);
-        break;
+  // Eval is unsupported.
+  CHECK(!expr->is_possibly_eval());
+
+  switch (call_type) {
+    case Call::GLOBAL_CALL:
+      EmitCallWithLoadIC(expr);
+      break;
+    case Call::NAMED_PROPERTY_CALL: {
+      Property* property = callee->AsProperty();
+      VisitForStackValue(property->obj());
+      EmitCallWithLoadIC(expr);
+      break;
     }
+    case Call::KEYED_PROPERTY_CALL: {
+      Property* property = callee->AsProperty();
+      VisitForStackValue(property->obj());
+      EmitKeyedCallWithLoadIC(expr, property->key());
+      break;
+    }
+    case Call::OTHER_CALL:
+      // Call to an arbitrary expression not handled specially above.
+      VisitForStackValue(callee);
+      OperandStackDepthIncrement(1);
+      __ PushRoot(Heap::kUndefinedValueRootIndex);
+      // Emit function call.
+      EmitCall(expr);
+      break;
+    case Call::NAMED_SUPER_PROPERTY_CALL:
+    case Call::KEYED_SUPER_PROPERTY_CALL:
+    case Call::SUPER_CALL:
+    case Call::WITH_CALL:
+      UNREACHABLE();
   }
 
 #ifdef DEBUG
@@ -1769,78 +1435,12 @@
   UNREACHABLE();
 }
 
+void FullCodeGenerator::VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
 
 void FullCodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
   Visit(expr->expression());
 }
 
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
-    int* context_length) {
-  // The macros used here must preserve the result register.
-
-  // Calculate how many operands to drop to get down to handler block.
-  int stack_drop = codegen_->operand_stack_depth_ - GetStackDepthAtTarget();
-  DCHECK_GE(stack_drop, 0);
-
-  // Because the handler block contains the context of the finally
-  // code, we can restore it directly from there for the finally code
-  // rather than iteratively unwinding contexts via their previous
-  // links.
-  if (*context_length > 0) {
-    __ Drop(stack_drop);  // Down to the handler block.
-    // Restore the context to its dedicated register and the stack.
-    STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
-    __ Pop(codegen_->context_register());
-    codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
-                                codegen_->context_register());
-  } else {
-    // Down to the handler block and also drop context.
-    __ Drop(stack_drop + TryBlockConstant::kElementCount);
-  }
-
-  // The caller will ignore outputs.
-  *context_length = -1;
-  return previous_;
-}
-
-void FullCodeGenerator::DeferredCommands::RecordBreak(Statement* target) {
-  TokenId token = dispenser_.GetBreakContinueToken();
-  commands_.push_back({kBreak, token, target});
-  EmitJumpToFinally(token);
-}
-
-void FullCodeGenerator::DeferredCommands::RecordContinue(Statement* target) {
-  TokenId token = dispenser_.GetBreakContinueToken();
-  commands_.push_back({kContinue, token, target});
-  EmitJumpToFinally(token);
-}
-
-void FullCodeGenerator::DeferredCommands::RecordReturn() {
-  if (return_token_ == TokenDispenserForFinally::kInvalidToken) {
-    return_token_ = TokenDispenserForFinally::kReturnToken;
-    commands_.push_back({kReturn, return_token_, nullptr});
-  }
-  EmitJumpToFinally(return_token_);
-}
-
-void FullCodeGenerator::DeferredCommands::RecordThrow() {
-  if (throw_token_ == TokenDispenserForFinally::kInvalidToken) {
-    throw_token_ = TokenDispenserForFinally::kThrowToken;
-    commands_.push_back({kThrow, throw_token_, nullptr});
-  }
-  EmitJumpToFinally(throw_token_);
-}
-
-void FullCodeGenerator::DeferredCommands::EmitFallThrough() {
-  __ Push(Smi::FromInt(TokenDispenserForFinally::kFallThroughToken));
-  __ Push(result_register());
-}
-
-void FullCodeGenerator::DeferredCommands::EmitJumpToFinally(TokenId token) {
-  __ Push(Smi::FromInt(token));
-  __ Push(result_register());
-  __ jmp(finally_entry_);
-}
 
 bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
   Expression* sub_expr;
@@ -1995,6 +1595,10 @@
 
 FunctionLiteral* FullCodeGenerator::literal() const { return info_->literal(); }
 
+const FeedbackVectorSpec* FullCodeGenerator::feedback_vector_spec() const {
+  return literal()->feedback_vector_spec();
+}
+
 #undef __
 
 
diff --git a/src/full-codegen/full-codegen.h b/src/full-codegen/full-codegen.h
index 558dae1..58a9b9a 100644
--- a/src/full-codegen/full-codegen.h
+++ b/src/full-codegen/full-codegen.h
@@ -16,6 +16,7 @@
 #include "src/deoptimizer.h"
 #include "src/globals.h"
 #include "src/objects.h"
+#include "src/source-position-table.h"
 
 namespace v8 {
 namespace internal {
@@ -82,7 +83,6 @@
 
   class Breakable;
   class Iteration;
-  class TryFinally;
 
   class TestContext;
 
@@ -103,11 +103,9 @@
 
     virtual Breakable* AsBreakable() { return nullptr; }
     virtual Iteration* AsIteration() { return nullptr; }
-    virtual TryFinally* AsTryFinally() { return nullptr; }
 
     virtual bool IsContinueTarget(Statement* target) { return false; }
     virtual bool IsBreakTarget(Statement* target) { return false; }
-    virtual bool IsTryFinally() { return false; }
 
     // Notify the statement that we are exiting it via break, continue, or
     // return and give it a chance to generate cleanup code.  Return the
@@ -185,73 +183,6 @@
     }
   };
 
-  class DeferredCommands {
-   public:
-    enum Command { kReturn, kThrow, kBreak, kContinue };
-    typedef int TokenId;
-    struct DeferredCommand {
-      Command command;
-      TokenId token;
-      Statement* target;
-    };
-
-    DeferredCommands(FullCodeGenerator* codegen, Label* finally_entry)
-        : codegen_(codegen),
-          commands_(codegen->zone()),
-          return_token_(TokenDispenserForFinally::kInvalidToken),
-          throw_token_(TokenDispenserForFinally::kInvalidToken),
-          finally_entry_(finally_entry) {}
-
-    void EmitCommands();
-
-    void RecordBreak(Statement* target);
-    void RecordContinue(Statement* target);
-    void RecordReturn();
-    void RecordThrow();
-    void EmitFallThrough();
-
-   private:
-    MacroAssembler* masm() { return codegen_->masm(); }
-    void EmitJumpToFinally(TokenId token);
-
-    FullCodeGenerator* codegen_;
-    ZoneVector<DeferredCommand> commands_;
-    TokenDispenserForFinally dispenser_;
-    TokenId return_token_;
-    TokenId throw_token_;
-    Label* finally_entry_;
-  };
-
-  // The try block of a try/finally statement.
-  class TryFinally : public NestedStatement {
-   public:
-    TryFinally(FullCodeGenerator* codegen, DeferredCommands* commands)
-        : NestedStatement(codegen), deferred_commands_(commands) {}
-
-    NestedStatement* Exit(int* context_length) override;
-
-    bool IsTryFinally() override { return true; }
-    TryFinally* AsTryFinally() override { return this; }
-
-    DeferredCommands* deferred_commands() { return deferred_commands_; }
-
-   private:
-    DeferredCommands* deferred_commands_;
-  };
-
-  // The body of a with or catch.
-  class WithOrCatch : public NestedStatement {
-   public:
-    explicit WithOrCatch(FullCodeGenerator* codegen)
-        : NestedStatement(codegen) {
-    }
-
-    NestedStatement* Exit(int* context_length) override {
-      ++(*context_length);
-      return previous_;
-    }
-  };
-
   // A platform-specific utility to overwrite the accumulator register
   // with a GC-safe value.
   void ClearAccumulator();
@@ -411,11 +342,16 @@
   void PrepareForBailout(Expression* node, Deoptimizer::BailoutState state);
   void PrepareForBailoutForId(BailoutId id, Deoptimizer::BailoutState state);
 
+  // Returns an int32 for the index into the FixedArray that backs the feedback
+  // vector
+  int32_t IntFromSlot(FeedbackSlot slot) const {
+    return FeedbackVector::GetIndex(slot);
+  }
+
   // Returns a smi for the index into the FixedArray that backs the feedback
   // vector
-  Smi* SmiFromSlot(FeedbackVectorSlot slot) const {
-    return Smi::FromInt(TypeFeedbackVector::GetIndexFromSpec(
-        literal()->feedback_vector_spec(), slot));
+  Smi* SmiFromSlot(FeedbackSlot slot) const {
+    return Smi::FromInt(IntFromSlot(slot));
   }
 
   // Record a call's return site offset, used to rebuild the frame if the
@@ -462,30 +398,22 @@
 
   // Platform-specific code sequences for calls
   void EmitCall(Call* expr, ConvertReceiverMode = ConvertReceiverMode::kAny);
-  void EmitSuperConstructorCall(Call* expr);
   void EmitCallWithLoadIC(Call* expr);
-  void EmitSuperCallWithLoadIC(Call* expr);
   void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
-  void EmitKeyedSuperCallWithLoadIC(Call* expr);
-  void EmitPossiblyEvalCall(Call* expr);
 
 #define FOR_EACH_FULL_CODE_INTRINSIC(F) \
   F(IsSmi)                              \
   F(IsArray)                            \
   F(IsTypedArray)                       \
-  F(IsRegExp)                           \
   F(IsJSProxy)                          \
   F(Call)                               \
-  F(NewObject)                          \
   F(IsJSReceiver)                       \
   F(GetSuperConstructor)                \
   F(DebugBreakInOptimizedCode)          \
   F(ClassOf)                            \
   F(StringCharCodeAt)                   \
   F(SubString)                          \
-  F(RegExpExec)                         \
   F(ToInteger)                          \
-  F(NumberToString)                     \
   F(ToString)                           \
   F(ToLength)                           \
   F(ToNumber)                           \
@@ -506,23 +434,16 @@
   void RestoreContext();
 
   // Platform-specific code for loading variables.
-  void EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                     TypeofMode typeof_mode, Label* slow);
-  MemOperand ContextSlotOperandCheckExtensions(Variable* var, Label* slow);
-  void EmitDynamicLookupFastCase(VariableProxy* proxy, TypeofMode typeof_mode,
-                                 Label* slow, Label* done);
   void EmitGlobalVariableLoad(VariableProxy* proxy, TypeofMode typeof_mode);
   void EmitVariableLoad(VariableProxy* proxy,
                         TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
 
   void EmitAccessor(ObjectLiteralProperty* property);
 
-  // Expects the arguments and the function already pushed.
-  void EmitResolvePossiblyDirectEval(Call* expr);
-
   // Platform-specific support for allocating a new closure based on
   // the given function info.
-  void EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure);
+  void EmitNewClosure(Handle<SharedFunctionInfo> info, FeedbackSlot slot,
+                      bool pretenure);
 
   // Re-usable portions of CallRuntime
   void EmitLoadJSRuntimeFunction(CallRuntime* expr);
@@ -532,26 +453,10 @@
   // The receiver is left on the stack by the IC.
   void EmitNamedPropertyLoad(Property* expr);
 
-  // Load a value from super.named property.
-  // Expect receiver ('this' value) and home_object on the stack.
-  void EmitNamedSuperPropertyLoad(Property* expr);
-
-  // Load a value from super[keyed] property.
-  // Expect receiver ('this' value), home_object and key on the stack.
-  void EmitKeyedSuperPropertyLoad(Property* expr);
-
   // Load a value from a keyed property.
   // The receiver and the key is left on the stack by the IC.
   void EmitKeyedPropertyLoad(Property* expr);
 
-  // Adds the properties to the class (function) object and to its prototype.
-  // Expects the class (function) in the accumulator. The class (function) is
-  // in the accumulator after installing all the properties.
-  void EmitClassDefineProperties(ClassLiteral* lit);
-
-  // Pushes the property key as a Name on the stack.
-  void EmitPropertyKey(LiteralProperty* property, BailoutId bailout_id);
-
   // Apply the compound assignment operator. Expects the left operand on top
   // of the stack and the right one in the accumulator.
   void EmitBinaryOp(BinaryOperation* expr, Token::Value op);
@@ -566,12 +471,11 @@
   // Assign to the given expression as if via '='. The right-hand-side value
   // is expected in the accumulator. slot is only used if FLAG_vector_stores
   // is true.
-  void EmitAssignment(Expression* expr, FeedbackVectorSlot slot);
+  void EmitAssignment(Expression* expr, FeedbackSlot slot);
 
   // Complete a variable assignment.  The right-hand-side value is expected
   // in the accumulator.
-  void EmitVariableAssignment(Variable* var, Token::Value op,
-                              FeedbackVectorSlot slot,
+  void EmitVariableAssignment(Variable* var, Token::Value op, FeedbackSlot slot,
                               HoleCheckMode hole_check_mode);
 
   // Helper functions to EmitVariableAssignment
@@ -582,14 +486,6 @@
   // of the stack and the right-hand-side value in the accumulator.
   void EmitNamedPropertyAssignment(Assignment* expr);
 
-  // Complete a super named property assignment. The right-hand-side value
-  // is expected in accumulator.
-  void EmitNamedSuperPropertyStore(Property* prop);
-
-  // Complete a super named property assignment. The right-hand-side value
-  // is expected in accumulator.
-  void EmitKeyedSuperPropertyStore(Property* prop);
-
   // Complete a keyed property assignment.  The receiver and key are
   // expected on top of the stack and the right-hand-side value in the
   // accumulator.
@@ -603,22 +499,23 @@
   // The value of the initializer is expected to be at the top of the stack.
   // |offset| is the offset in the stack where the home object can be found.
   void EmitSetHomeObject(Expression* initializer, int offset,
-                         FeedbackVectorSlot slot);
+                         FeedbackSlot slot);
 
   void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
-                                    FeedbackVectorSlot slot);
+                                    FeedbackSlot slot);
 
   // Platform-specific code for loading a slot to a register.
-  void EmitLoadSlot(Register destination, FeedbackVectorSlot slot);
+  void EmitLoadSlot(Register destination, FeedbackSlot slot);
   // Platform-specific code for pushing a slot to the stack.
-  void EmitPushSlot(FeedbackVectorSlot slot);
+  void EmitPushSlot(FeedbackSlot slot);
 
   void CallIC(Handle<Code> code,
               TypeFeedbackId id = TypeFeedbackId::None());
 
-  void CallLoadIC(FeedbackVectorSlot slot, Handle<Object> name);
-  void CallStoreIC(FeedbackVectorSlot slot, Handle<Object> name);
-  void CallKeyedStoreIC(FeedbackVectorSlot slot);
+  void CallLoadIC(FeedbackSlot slot, Handle<Object> name);
+  void CallStoreIC(FeedbackSlot slot, Handle<Object> name,
+                   bool store_own_property = false);
+  void CallKeyedStoreIC(FeedbackSlot slot);
 
   void SetFunctionPosition(FunctionLiteral* fun);
   void SetReturnPosition(FunctionLiteral* fun);
@@ -648,14 +545,7 @@
   void RecordStatementPosition(int pos);
   void RecordPosition(int pos);
 
-  // Non-local control flow support.
-  void EnterTryBlock(int handler_index, Label* handler,
-                     HandlerTable::CatchPrediction catch_prediction);
-  void ExitTryBlock(int handler_index);
-  void EnterFinallyBlock();
-  void ExitFinallyBlock();
-  void ClearPendingMessage();
-
+  // Local control flow support.
   void EmitContinue(Statement* target);
   void EmitBreak(Statement* target);
 
@@ -679,6 +569,7 @@
   LanguageMode language_mode();
   bool has_simple_parameters();
   FunctionLiteral* literal() const;
+  const FeedbackVectorSpec* feedback_vector_spec() const;
   Scope* scope() { return scope_; }
 
   static Register context_register();
@@ -698,8 +589,6 @@
   // and PushCatchContext.
   void PushFunctionArgumentForContextAllocation();
 
-  void PushCalleeAndWithBaseObject(Call* expr);
-
   // AST node visit functions.
 #define DECLARE_VISIT(type) void Visit##type(type* node);
   AST_NODE_LIST(DECLARE_VISIT)
@@ -714,13 +603,10 @@
   void Generate();
   void PopulateDeoptimizationData(Handle<Code> code);
   void PopulateTypeFeedbackInfo(Handle<Code> code);
-  void PopulateHandlerTable(Handle<Code> code);
 
   bool MustCreateObjectLiteralWithRuntime(ObjectLiteral* expr) const;
   bool MustCreateArrayLiteralWithRuntime(ArrayLiteral* expr) const;
 
-  int NewHandlerTableEntry();
-
   struct BailoutEntry {
     BailoutId id;
     unsigned pc_and_state;
@@ -732,14 +618,6 @@
     uint32_t loop_depth;
   };
 
-  struct HandlerTableEntry {
-    unsigned range_start;
-    unsigned range_end;
-    unsigned handler_offset;
-    int stack_depth;
-    HandlerTable::CatchPrediction catch_prediction;
-  };
-
   class ExpressionContext BASE_EMBEDDED {
    public:
     explicit ExpressionContext(FullCodeGenerator* codegen)
@@ -937,7 +815,6 @@
   const ExpressionContext* context_;
   ZoneList<BailoutEntry> bailout_entries_;
   ZoneList<BackEdgeEntry> back_edges_;
-  ZoneVector<HandlerTableEntry> handler_table_;
   SourcePositionTableBuilder source_position_table_builder_;
   int ic_total_count_;
   Handle<Cell> profiling_counter_;
diff --git a/src/full-codegen/ia32/full-codegen-ia32.cc b/src/full-codegen/ia32/full-codegen-ia32.cc
index 5e80dd3..87db6f1 100644
--- a/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -4,15 +4,16 @@
 
 #if V8_TARGET_ARCH_IA32
 
-#include "src/full-codegen/full-codegen.h"
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
 #include "src/ia32/frames-ia32.h"
 #include "src/ic/ic.h"
 
@@ -120,18 +121,16 @@
   // Increment invocation count for the function.
   {
     Comment cmnt(masm_, "[ Increment invocation count");
-    __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
-    __ mov(ecx, FieldOperand(ecx, LiteralsArray::kFeedbackVectorOffset));
-    __ add(FieldOperand(
-               ecx, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                        TypeFeedbackVector::kHeaderSize),
-           Immediate(Smi::FromInt(1)));
+    __ mov(ecx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
+    __ mov(ecx, FieldOperand(ecx, Cell::kValueOffset));
+    __ add(
+        FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                              FeedbackVector::kHeaderSize),
+        Immediate(Smi::FromInt(1)));
   }
 
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
     OperandStackDepthIncrement(locals_count);
     if (locals_count == 1) {
       __ push(Immediate(isolate()->factory()->undefined_value()));
@@ -189,15 +188,18 @@
       if (info->scope()->new_target_var() != nullptr) {
         __ push(edx);  // Preserve new target.
       }
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Immediate(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(edi);
+        __ Push(Smi::FromInt(info->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
       if (info->scope()->new_target_var() != nullptr) {
@@ -247,37 +249,10 @@
   PrepareForBailoutForId(BailoutId::FunctionContext(),
                          BailoutState::NO_REGISTERS);
 
-  // Possibly set up a local binding to the this function which is used in
-  // derived constructors with super calls.
-  Variable* this_function_var = info->scope()->this_function_var();
-  if (this_function_var != nullptr) {
-    Comment cmnt(masm_, "[ This function");
-    if (!function_in_register) {
-      __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-      // The write barrier clobbers register again, keep it marked as such.
-    }
-    SetVar(this_function_var, edi, ebx, ecx);
-  }
-
-  // Possibly set up a local binding to the new target value.
-  Variable* new_target_var = info->scope()->new_target_var();
-  if (new_target_var != nullptr) {
-    Comment cmnt(masm_, "[ new.target");
-    SetVar(new_target_var, edx, ebx, ecx);
-  }
-
-  // Possibly allocate RestParameters
-  Variable* rest_param = info->scope()->rest_parameter();
-  if (rest_param != nullptr) {
-    Comment cmnt(masm_, "[ Allocate rest parameter array");
-    if (!function_in_register) {
-      __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    FastNewRestParameterStub stub(isolate());
-    __ CallStub(&stub);
-    function_in_register = false;
-    SetVar(rest_param, eax, ebx, edx);
-  }
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(info->scope()->new_target_var());
+  DCHECK_NULL(info->scope()->rest_parameter());
+  DCHECK_NULL(info->scope()->this_function_var());
 
   Variable* arguments = info->scope()->arguments();
   if (arguments != NULL) {
@@ -288,14 +263,16 @@
       __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     }
     if (is_strict(language_mode()) || !has_simple_parameters()) {
-      FastNewStrictArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      __ call(isolate()->builtins()->FastNewStrictArguments(),
+              RelocInfo::CODE_TARGET);
+      RestoreContext();
     } else if (literal()->has_duplicate_parameters()) {
       __ Push(edi);
       __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
     } else {
-      FastNewSloppyArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      __ call(isolate()->builtins()->FastNewSloppyArguments(),
+              RelocInfo::CODE_TARGET);
+      RestoreContext();
     }
 
     SetVar(arguments, eax, ebx, edx);
@@ -505,10 +482,8 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
-         !lit->IsUndetectable());
-  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
-      lit->IsFalse(isolate())) {
+  DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+  if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
@@ -731,10 +706,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_->Add(isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -757,16 +734,7 @@
       }
       break;
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      DCHECK_EQ(VAR, variable->mode());
-      DCHECK(!variable->binding_needs_init());
-      __ push(Immediate(variable->name()));
-      __ CallRuntime(Runtime::kDeclareEvalVar);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -779,9 +747,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function =
           Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
@@ -815,15 +790,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      PushOperand(variable->name());
-      VisitForStackValue(declaration->fun());
-      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -834,7 +801,7 @@
   // Call the runtime to declare the globals.
   __ Push(pairs);
   __ Push(Smi::FromInt(DeclareGlobalsFlags()));
-  __ EmitLoadTypeFeedbackVector(eax);
+  __ EmitLoadFeedbackVector(eax);
   __ Push(eax);
   __ CallRuntime(Runtime::kDeclareGlobals);
   // Return value is ignored.
@@ -938,7 +905,7 @@
   Comment cmnt(masm_, "[ ForInStatement");
   SetStatementPosition(stmt, SKIP_BREAK);
 
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
 
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1044,9 +1011,9 @@
 
   // We need to filter the key, record slow-path here.
   int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(edx);
+  __ EmitLoadFeedbackVector(edx);
   __ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
-         Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+         Immediate(FeedbackVector::MegamorphicSentinel(isolate())));
 
   // eax contains the key.  The receiver in ebx is the second argument to the
   // ForInFilter.  ForInFilter returns undefined if the receiver doesn't
@@ -1090,116 +1057,30 @@
   decrement_loop_depth();
 }
 
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
-                                          FeedbackVectorSlot slot) {
+                                          FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
   __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
                                                      int offset,
-                                                     FeedbackVectorSlot slot) {
+                                                     FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ mov(StoreDescriptor::ReceiverRegister(), eax);
   __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofMode typeof_mode,
-                                                      Label* slow) {
-  Register context = esi;
-  Register temp = edx;
-
-  int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
-  for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (s->calls_sloppy_eval()) {
-      // Check that extension is "the hole".
-      __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
-                       Heap::kTheHoleValueRootIndex, slow);
-    }
-    // Load next context in chain.
-    __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
-    // Walk the rest of the chain without clobbering esi.
-    context = temp;
-    to_check--;
-  }
-
-  // All extension objects were empty and it is safe to use a normal global
-  // load machinery.
-  EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = esi;
-  Register temp = ebx;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->NeedsContext()) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is "the hole".
-        __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
-                         Heap::kTheHoleValueRootIndex, slow);
-      }
-      __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering esi.
-      context = temp;
-    }
-  }
-  // Check that last extension is "the hole".
-  __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
-                   Heap::kTheHoleValueRootIndex, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an esi-based operand (the write barrier cannot be allowed to
-  // destroy the esi register).
-  return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofMode typeof_mode,
-                                                  Label* slow, Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
-    __ jmp(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->binding_needs_init()) {
-      __ cmp(eax, isolate()->factory()->the_hole_value());
-      __ j(not_equal, done);
-      __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kThrowReferenceError);
-    } else {
-      __ jmp(done);
-    }
-  }
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   SetExpressionPosition(proxy);
   PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
+  // Two cases: global variables and all other types of variables.
   switch (var->location()) {
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
@@ -1232,24 +1113,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ Lookup variable");
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
-      __ bind(&slow);
-      __ push(Immediate(var->name()));
-      Runtime::FunctionId function_id =
-          typeof_mode == NOT_INSIDE_TYPEOF
-              ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotInsideTypeof;
-      __ CallRuntime(function_id);
-      __ bind(&done);
-      context()->Plug(eax);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1275,23 +1139,25 @@
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  Handle<FixedArray> constant_properties = expr->constant_properties();
+  Handle<BoilerplateDescription> constant_properties =
+      expr->GetOrBuildConstantProperties(isolate());
   int flags = expr->ComputeFlags();
   // If any of the keys would store to the elements array, then we shouldn't
   // allow it.
   if (MustCreateObjectLiteralWithRuntime(expr)) {
     __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    __ push(Immediate(Smi::FromInt(expr->literal_index())));
+    __ push(Immediate(SmiFromSlot(expr->literal_slot())));
     __ push(Immediate(constant_properties));
     __ push(Immediate(Smi::FromInt(flags)));
     __ CallRuntime(Runtime::kCreateObjectLiteral);
   } else {
     __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+    __ mov(ebx, Immediate(SmiFromSlot(expr->literal_slot())));
     __ mov(ecx, Immediate(constant_properties));
     __ mov(edx, Immediate(Smi::FromInt(flags)));
-    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastCloneShallowObject(
+        isolate(), expr->properties_count());
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1301,10 +1167,9 @@
   bool result_saved = false;
 
   AccessorTable accessor_table(zone());
-  int property_index = 0;
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
@@ -1314,6 +1179,7 @@
       result_saved = true;
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1328,7 +1194,7 @@
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(eax));
             __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-            CallStoreIC(property->GetSlot(0), key->value());
+            CallStoreIC(property->GetSlot(0), key->value(), true);
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1356,20 +1222,20 @@
         VisitForStackValue(value);
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+        PrepareForBailoutForId(expr->GetIdForPropertySet(i),
                                BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1392,72 +1258,6 @@
     PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
-  // starts with the first computed property name, and continues with all
-  // properties to its right.  All the code from above initializes the static
-  // component of the object literal, and arranges for the map of the result to
-  // reflect the static order in which the keys appear. For the dynamic
-  // properties, we compile them into a series of "SetOwnProperty" runtime
-  // calls. This will preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    Expression* value = property->value();
-    if (!result_saved) {
-      PushOperand(eax);  // Save result on the stack
-      result_saved = true;
-    }
-
-    PushOperand(Operand(esp, 0));  // Duplicate receiver.
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      DCHECK(!property->is_computed_name());
-      VisitForStackValue(value);
-      DCHECK(property->emit_store());
-      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             BailoutState::NO_REGISTERS);
-    } else {
-      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
-      VisitForStackValue(value);
-      if (NeedsHomeObject(value)) {
-        EmitSetHomeObject(value, 2, property->GetSlot());
-      }
-
-      switch (property->kind()) {
-        case ObjectLiteral::Property::CONSTANT:
-        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        case ObjectLiteral::Property::COMPUTED:
-          if (property->emit_store()) {
-            PushOperand(Smi::FromInt(NONE));
-            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                                   BailoutState::NO_REGISTERS);
-          } else {
-            DropOperands(3);
-          }
-          break;
-
-        case ObjectLiteral::Property::PROTOTYPE:
-          UNREACHABLE();
-          break;
-
-        case ObjectLiteral::Property::GETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-          break;
-
-        case ObjectLiteral::Property::SETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-          break;
-      }
-    }
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1469,29 +1269,22 @@
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  bool has_constant_fast_elements =
-      IsFastObjectElementsKind(expr->constant_elements_kind());
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
+  Handle<ConstantElementsPair> constant_elements =
+      expr->GetOrBuildConstantElements(isolate());
 
   if (MustCreateArrayLiteralWithRuntime(expr)) {
     __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    __ push(Immediate(Smi::FromInt(expr->literal_index())));
+    __ push(Immediate(SmiFromSlot(expr->literal_slot())));
     __ push(Immediate(constant_elements));
     __ push(Immediate(Smi::FromInt(expr->ComputeFlags())));
     __ CallRuntime(Runtime::kCreateArrayLiteral);
   } else {
     __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+    __ mov(ebx, Immediate(SmiFromSlot(expr->literal_slot())));
     __ mov(ecx, Immediate(constant_elements));
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
+    Callable callable =
+        CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1545,17 +1338,6 @@
     case VARIABLE:
       // Nothing to do here.
       break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        PushOperand(MemOperand(esp, kPointerSize));
-        PushOperand(result_register());
-      }
-      break;
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
         // We need the receiver both on the stack and in the register.
@@ -1565,19 +1347,6 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case KEYED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(property->key());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        PushOperand(MemOperand(esp, 2 * kPointerSize));
-        PushOperand(MemOperand(esp, 2 * kPointerSize));
-        PushOperand(result_register());
-      }
-      break;
     case KEYED_PROPERTY: {
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
@@ -1590,6 +1359,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 
   // For compound assignments we need another deoptimization point after the
@@ -1602,26 +1375,20 @@
           EmitVariableLoad(expr->target()->AsVariableProxy());
           PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
+        case NAMED_SUPER_PROPERTY:
+        case KEYED_SUPER_PROPERTY:
+          UNREACHABLE();
+          break;
       }
     }
 
@@ -1659,72 +1426,20 @@
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(result_register());
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(result_register());
-      break;
     case KEYED_PROPERTY:
       EmitKeyedPropertyAssignment(expr);
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
 
 void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  SetExpressionPosition(expr);
-
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this.  It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  Label suspend, continuation, post_runtime, resume, exception;
-
-  __ jmp(&suspend);
-  __ bind(&continuation);
-  // When we arrive here, eax holds the generator object.
-  __ RecordGeneratorContinuation();
-  __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
-  __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOrDebugPosOffset));
-  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
-  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
-  __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
-  __ j(less, &resume);
-  __ Push(result_register());
-  __ j(greater, &exception);
-  EmitCreateIteratorResult(true);
-  EmitUnwindAndReturn();
-
-  __ bind(&exception);
-  __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
-                                              : Runtime::kThrow);
-
-  __ bind(&suspend);
-  OperandStackDepthIncrement(1);  // Not popped on this path.
-  VisitForAccumulatorValue(expr->generator_object());
-  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-  __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
-         Immediate(Smi::FromInt(continuation.pos())));
-  __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
-  __ mov(ecx, esi);
-  __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
-                      kDontSaveFPRegs);
-  __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
-  __ cmp(esp, ebx);
-  __ j(equal, &post_runtime);
-  __ push(eax);  // generator object
-  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  RestoreContext();
-  __ bind(&post_runtime);
-  PopOperand(result_register());
-  EmitReturnSequence();
-
-  __ bind(&resume);
-  context()->Plug(result_register());
+  // Resumable functions are not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::PushOperand(MemOperand operand) {
@@ -1863,58 +1578,6 @@
   context()->Plug(eax);
 }
 
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ClassLiteral::Property* property = lit->properties()->at(i);
-    Expression* value = property->value();
-
-    if (property->is_static()) {
-      PushOperand(Operand(esp, kPointerSize));  // constructor
-    } else {
-      PushOperand(Operand(esp, 0));  // prototype
-    }
-    EmitPropertyKey(property, lit->GetIdForProperty(i));
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
-      __ push(eax);
-    }
-
-    VisitForStackValue(value);
-    if (NeedsHomeObject(value)) {
-      EmitSetHomeObject(value, 2, property->GetSlot());
-    }
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-        break;
-
-      case ClassLiteral::Property::GETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::SETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::FIELD:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   PopOperand(edx);
   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -1924,9 +1587,7 @@
   context()->Plug(eax);
 }
 
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
-                                       FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   Property* prop = expr->AsProperty();
@@ -1948,43 +1609,6 @@
       CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      PushOperand(eax);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      // stack: value, this; eax: home_object
-      Register scratch = ecx;
-      Register scratch2 = edx;
-      __ mov(scratch, result_register());               // home_object
-      __ mov(eax, MemOperand(esp, kPointerSize));       // value
-      __ mov(scratch2, MemOperand(esp, 0));             // this
-      __ mov(MemOperand(esp, kPointerSize), scratch2);  // this
-      __ mov(MemOperand(esp, 0), scratch);              // home_object
-      // stack: this, home_object. eax: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      PushOperand(eax);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = ecx;
-      Register scratch2 = edx;
-      __ mov(scratch2, MemOperand(esp, 2 * kPointerSize));  // value
-      // stack: value, this, home_object; eax: key, edx: value
-      __ mov(scratch, MemOperand(esp, kPointerSize));  // this
-      __ mov(MemOperand(esp, 2 * kPointerSize), scratch);
-      __ mov(scratch, MemOperand(esp, 0));  // home_object
-      __ mov(MemOperand(esp, kPointerSize), scratch);
-      __ mov(MemOperand(esp, 0), eax);
-      __ mov(eax, scratch2);
-      // stack: this, home_object, key; eax: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
     case KEYED_PROPERTY: {
       PushOperand(eax);  // Preserve value.
       VisitForStackValue(prop->obj());
@@ -1995,6 +1619,10 @@
       CallKeyedStoreIC(slot);
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
   context()->Plug(eax);
 }
@@ -2011,7 +1639,7 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot,
+                                               FeedbackSlot slot,
                                                HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
@@ -2055,26 +1683,18 @@
 
   } else {
     DCHECK(var->mode() != CONST || op == Token::INIT);
-    if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ Push(Immediate(var->name()));
-      __ Push(eax);
-      __ CallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreLookupSlot_Strict
-                         : Runtime::kStoreLookupSlot_Sloppy);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      MemOperand location = VarOperand(var, ecx);
-      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
-        // Check for an uninitialized let binding.
-        __ mov(edx, location);
-        __ cmp(edx, isolate()->factory()->the_hole_value());
-        __ Check(equal, kLetBindingReInitialization);
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    DCHECK(!var->IsLookupSlot());
+    // Assignment to var or initializing assignment to let/const in harmony
+    // mode.
+    MemOperand location = VarOperand(var, ecx);
+    if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+      // Check for an uninitialized let binding.
+      __ mov(edx, location);
+      __ cmp(edx, isolate()->factory()->the_hole_value());
+      __ Check(equal, kLetBindingReInitialization);
     }
+    EmitStoreToStackLocalOrContextSlot(var, location);
   }
 }
 
@@ -2094,34 +1714,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // eax : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  PushOperand(key->value());
-  PushOperand(eax);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreToSuper_Strict
-                              : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // eax : value
-  // stack : receiver ('this'), home_object, key
-
-  PushOperand(eax);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreKeyedToSuper_Strict
-                              : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
   // eax               : value
@@ -2169,42 +1761,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  SetExpressionPosition(expr);
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  // Load the function from the receiver.
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(eax);
-  PushOperand(eax);
-  PushOperand(Operand(esp, kPointerSize * 2));
-  PushOperand(key->value());
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ mov(Operand(esp, kPointerSize), eax);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2229,40 +1785,6 @@
 }
 
 
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetExpressionPosition(prop);
-  // Load the function from the receiver.
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(eax);
-  PushOperand(eax);
-  PushOperand(Operand(esp, kPointerSize * 2));
-  VisitForStackValue(prop->key());
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ mov(Operand(esp, kPointerSize), eax);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2282,8 +1804,9 @@
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
   Handle<Code> code =
-      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
-  __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
+      CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+          .code();
+  __ Move(edx, Immediate(IntFromSlot(expr->CallFeedbackICSlot())));
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
   __ Move(eax, Immediate(arg_count));
   CallIC(code);
@@ -2294,111 +1817,6 @@
   context()->DropAndPlug(1, eax);
 }
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
-  int arg_count = expr->arguments()->length();
-  // Push copy of the first argument or undefined if it doesn't exist.
-  if (arg_count > 0) {
-    __ push(Operand(esp, arg_count * kPointerSize));
-  } else {
-    __ push(Immediate(isolate()->factory()->undefined_value()));
-  }
-
-  // Push the enclosing function.
-  __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // Push the language mode.
-  __ push(Immediate(Smi::FromInt(language_mode())));
-
-  // Push the start position of the scope the calls resides in.
-  __ push(Immediate(Smi::FromInt(scope()->start_position())));
-
-  // Push the source position of the eval call.
-  __ push(Immediate(Smi::FromInt(expr->position())));
-
-  // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
-  VariableProxy* callee = expr->expression()->AsVariableProxy();
-  if (callee->var()->IsLookupSlot()) {
-    Label slow, done;
-    SetExpressionPosition(callee);
-    // Generate code for loading from variables potentially shadowed by
-    // eval-introduced variables.
-    EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    // Call the runtime to find the function to call (returned in eax) and
-    // the object holding it (returned in edx).
-    __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
-    PushOperand(eax);  // Function.
-    PushOperand(edx);  // Receiver.
-    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the function
-    // and receiver and have the slow path jump around this code.
-    if (done.is_linked()) {
-      Label call;
-      __ jmp(&call, Label::kNear);
-      __ bind(&done);
-      // Push function.
-      __ push(eax);
-      // The receiver is implicitly the global receiver. Indicate this by
-      // passing the hole to the call function stub.
-      __ push(Immediate(isolate()->factory()->undefined_value()));
-      __ bind(&call);
-    }
-  } else {
-    VisitForStackValue(callee);
-    // refEnv.WithBaseObject()
-    PushOperand(isolate()->factory()->undefined_value());
-  }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
-  // to resolve the function we need to call.  Then we call the resolved
-  // function using the given arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  PushCalleeAndWithBaseObject(expr);
-
-  // Push the arguments.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Push a copy of the function (found below the arguments) and
-  // resolve eval.
-  __ push(Operand(esp, (arg_count + 1) * kPointerSize));
-  EmitResolvePossiblyDirectEval(expr);
-
-  // Touch up the stack with the resolved function.
-  __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
-
-  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
-  SetCallPosition(expr);
-  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
-                                          expr->tail_call_mode())
-                          .code();
-  __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
-  __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-  __ Move(eax, Immediate(arg_count));
-  __ call(code, RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->DropAndPlug(1, eax);
-}
-
-
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -2427,7 +1845,7 @@
   __ mov(edi, Operand(esp, arg_count * kPointerSize));
 
   // Record call targets in unoptimized code.
-  __ EmitLoadTypeFeedbackVector(ebx);
+  __ EmitLoadFeedbackVector(ebx);
   __ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
@@ -2439,47 +1857,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
-  SuperCallReference* super_call_ref =
-      expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super_call_ref);
-
-  // Push the super constructor target on the stack (may be null,
-  // but the Construct builtin can deal with that properly).
-  VisitForAccumulatorValue(super_call_ref->this_function_var());
-  __ AssertFunction(result_register());
-  __ mov(result_register(),
-         FieldOperand(result_register(), HeapObject::kMapOffset));
-  PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetConstructCallPosition(expr);
-
-  // Load new target into edx.
-  VisitForAccumulatorValue(super_call_ref->new_target_var());
-  __ mov(edx, result_register());
-
-  // Load function and argument count into edi and eax.
-  __ Move(eax, Immediate(arg_count));
-  __ mov(edi, Operand(esp, arg_count * kPointerSize));
-
-  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2567,28 +1944,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2822,17 +2177,13 @@
           __ push(Immediate(var->name()));
           __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
           context()->Plug(eax);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+        } else {
+          DCHECK(!var->IsLookupSlot());
+          DCHECK(var->IsStackAllocated() || var->IsContextSlot());
           // Result of deleting non-global variables is false.  'this' is
           // not really a variable, though we implement it as one.  The
           // subexpression does not have side effects.
           context()->Plug(is_this);
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kDeleteLookupSlot);
-          context()->Plug(eax);
         }
       } else {
         // Result of deleting non-property, non-variable reference is true.
@@ -2943,30 +2294,6 @@
         break;
       }
 
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForAccumulatorValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        PushOperand(result_register());
-        PushOperand(MemOperand(esp, kPointerSize));
-        PushOperand(result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForStackValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        VisitForAccumulatorValue(prop->key());
-        PushOperand(result_register());
-        PushOperand(MemOperand(esp, 2 * kPointerSize));
-        PushOperand(MemOperand(esp, 2 * kPointerSize));
-        PushOperand(result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
       case KEYED_PROPERTY: {
         VisitForStackValue(prop->obj());
         VisitForStackValue(prop->key());
@@ -2977,6 +2304,8 @@
         break;
       }
 
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
       case VARIABLE:
         UNREACHABLE();
     }
@@ -3010,14 +2339,12 @@
           case NAMED_PROPERTY:
             __ mov(Operand(esp, kPointerSize), eax);
             break;
-          case NAMED_SUPER_PROPERTY:
-            __ mov(Operand(esp, 2 * kPointerSize), eax);
-            break;
           case KEYED_PROPERTY:
             __ mov(Operand(esp, 2 * kPointerSize), eax);
             break;
+          case NAMED_SUPER_PROPERTY:
           case KEYED_SUPER_PROPERTY:
-            __ mov(Operand(esp, 3 * kPointerSize), eax);
+            UNREACHABLE();
             break;
         }
       }
@@ -3057,14 +2384,12 @@
         case NAMED_PROPERTY:
           __ mov(Operand(esp, kPointerSize), eax);
           break;
-        case NAMED_SUPER_PROPERTY:
-          __ mov(Operand(esp, 2 * kPointerSize), eax);
-          break;
         case KEYED_PROPERTY:
           __ mov(Operand(esp, 2 * kPointerSize), eax);
           break;
+        case NAMED_SUPER_PROPERTY:
         case KEYED_SUPER_PROPERTY:
-          __ mov(Operand(esp, 3 * kPointerSize), eax);
+          UNREACHABLE();
           break;
       }
     }
@@ -3123,30 +2448,6 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(eax);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(eax);
-      }
-      break;
-    }
     case KEYED_PROPERTY: {
       PopOperand(StoreDescriptor::NameRegister());
       PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3162,6 +2463,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -3228,16 +2533,6 @@
     __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
               Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(zero, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
-  } else if (String::Equals(check, factory->type##_string())) { \
-    __ JumpIfSmi(eax, if_false);                                \
-    __ cmp(FieldOperand(eax, HeapObject::kMapOffset),           \
-           isolate()->factory()->type##_map());                 \
-    Split(equal, if_true, if_false, fall_through);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
@@ -3278,6 +2573,7 @@
       SetExpressionPosition(expr);
       PopOperand(edx);
       __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+      RestoreContext();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
@@ -3394,66 +2690,6 @@
 }
 
 
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(edx, Operand::StaticVariable(pending_message_obj));
-  PushOperand(edx);
-
-  ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  DCHECK(!result_register().is(edx));
-  // Restore pending message from stack.
-  PopOperand(edx);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(Operand::StaticVariable(pending_message_obj), edx);
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
-  DCHECK(!result_register().is(edx));
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
-  __ mov(Operand::StaticVariable(pending_message_obj), edx);
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
-  DCHECK(!result_register().is(edx));
-  __ Pop(result_register());  // Restore the accumulator.
-  __ Pop(edx);                // Get the token.
-  for (DeferredCommand cmd : commands_) {
-    Label skip;
-    __ cmp(edx, Immediate(Smi::FromInt(cmd.token)));
-    __ j(not_equal, &skip);
-    switch (cmd.command) {
-      case kReturn:
-        codegen_->EmitUnwindAndReturn();
-        break;
-      case kThrow:
-        __ Push(result_register());
-        __ CallRuntime(Runtime::kReThrow);
-        break;
-      case kContinue:
-        codegen_->EmitContinue(cmd.target);
-        break;
-      case kBreak:
-        codegen_->EmitBreak(cmd.target);
-        break;
-    }
-    __ bind(&skip);
-  }
-}
-
 #undef __
 
 
diff --git a/src/full-codegen/mips/full-codegen-mips.cc b/src/full-codegen/mips/full-codegen-mips.cc
index 10cdb54..cfc9952 100644
--- a/src/full-codegen/mips/full-codegen-mips.cc
+++ b/src/full-codegen/mips/full-codegen-mips.cc
@@ -12,15 +12,16 @@
 // places where we have to move a previous result in v0 to a0 for the
 // next call: mov(a0, v0). This is not needed on the other architectures.
 
-#include "src/full-codegen/full-codegen.h"
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 
 #include "src/mips/code-stubs-mips.h"
@@ -140,21 +141,19 @@
   // Increment invocation count for the function.
   {
     Comment cmnt(masm_, "[ Increment invocation count");
-    __ lw(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
-    __ lw(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+    __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+    __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
     __ lw(t0, FieldMemOperand(
-                  a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                          TypeFeedbackVector::kHeaderSize));
+                  a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                          FeedbackVector::kHeaderSize));
     __ Addu(t0, t0, Operand(Smi::FromInt(1)));
     __ sw(t0, FieldMemOperand(
-                  a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                          TypeFeedbackVector::kHeaderSize));
+                  a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                          FeedbackVector::kHeaderSize));
   }
 
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
     OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
@@ -210,15 +209,18 @@
       if (info->scope()->new_target_var() != nullptr) {
         __ push(a3);  // Preserve new target.
       }
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info->scope()->scope_type());
         __ li(FastNewFunctionContextDescriptor::SlotsRegister(),
               Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(a1);
+        __ Push(Smi::FromInt(info->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
       if (info->scope()->new_target_var() != nullptr) {
@@ -265,37 +267,10 @@
   PrepareForBailoutForId(BailoutId::FunctionContext(),
                          BailoutState::NO_REGISTERS);
 
-  // Possibly set up a local binding to the this function which is used in
-  // derived constructors with super calls.
-  Variable* this_function_var = info->scope()->this_function_var();
-  if (this_function_var != nullptr) {
-    Comment cmnt(masm_, "[ This function");
-    if (!function_in_register_a1) {
-      __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-      // The write barrier clobbers register again, keep it marked as such.
-    }
-    SetVar(this_function_var, a1, a0, a2);
-  }
-
-  // Possibly set up a local binding to the new target value.
-  Variable* new_target_var = info->scope()->new_target_var();
-  if (new_target_var != nullptr) {
-    Comment cmnt(masm_, "[ new.target");
-    SetVar(new_target_var, a3, a0, a2);
-  }
-
-  // Possibly allocate RestParameters
-  Variable* rest_param = info->scope()->rest_parameter();
-  if (rest_param != nullptr) {
-    Comment cmnt(masm_, "[ Allocate rest parameter array");
-    if (!function_in_register_a1) {
-      __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    FastNewRestParameterStub stub(isolate());
-    __ CallStub(&stub);
-    function_in_register_a1 = false;
-    SetVar(rest_param, v0, a1, a2);
-  }
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(info->scope()->new_target_var());
+  DCHECK_NULL(info->scope()->rest_parameter());
+  DCHECK_NULL(info->scope()->this_function_var());
 
   Variable* arguments = info->scope()->arguments();
   if (arguments != NULL) {
@@ -306,14 +281,16 @@
       __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
     if (is_strict(language_mode()) || !has_simple_parameters()) {
-      FastNewStrictArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     } else if (literal()->has_duplicate_parameters()) {
       __ Push(a1);
       __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
     } else {
-      FastNewSloppyArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     }
 
     SetVar(arguments, v0, a1, a2);
@@ -547,10 +524,8 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
-         !lit->IsUndetectable());
-  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
-      lit->IsFalse(isolate())) {
+  DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+  if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ Branch(false_label_);
   } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ Branch(true_label_);
@@ -782,10 +757,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_->Add(isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -808,17 +785,7 @@
       }
       break;
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      DCHECK_EQ(VAR, variable->mode());
-      DCHECK(!variable->binding_needs_init());
-      __ li(a2, Operand(variable->name()));
-      __ Push(a2);
-      __ CallRuntime(Runtime::kDeclareEvalVar);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -831,9 +798,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function =
           Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
@@ -869,17 +843,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      __ li(a2, Operand(variable->name()));
-      PushOperand(a2);
-      // Push initial value for function declaration.
-      VisitForStackValue(declaration->fun());
-      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -890,7 +854,7 @@
   // Call the runtime to declare the globals.
   __ li(a1, Operand(pairs));
   __ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
-  __ EmitLoadTypeFeedbackVector(a2);
+  __ EmitLoadFeedbackVector(a2);
   __ Push(a1, a0, a2);
   __ CallRuntime(Runtime::kDeclareGlobals);
   // Return value is ignored.
@@ -994,7 +958,7 @@
   Comment cmnt(masm_, "[ ForInStatement");
   SetStatementPosition(stmt, SKIP_BREAK);
 
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
 
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1112,8 +1076,8 @@
 
   // We need to filter the key, record slow-path here.
   int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(a3);
-  __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ EmitLoadFeedbackVector(a3);
+  __ li(a2, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
   __ sw(a2, FieldMemOperand(a3, FixedArray::OffsetOfElementAt(vector_index)));
 
   __ mov(a0, result_register());
@@ -1162,9 +1126,8 @@
   decrement_loop_depth();
 }
 
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
-                                          FeedbackVectorSlot slot) {
+                                          FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
   __ lw(StoreDescriptor::ValueRegister(),
@@ -1172,10 +1135,9 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
                                                      int offset,
-                                                     FeedbackVectorSlot slot) {
+                                                     FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), v0);
   __ lw(StoreDescriptor::ValueRegister(),
@@ -1183,93 +1145,6 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofMode typeof_mode,
-                                                      Label* slow) {
-  Register current = cp;
-  Register next = a1;
-  Register temp = a2;
-
-  int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
-  for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (s->calls_sloppy_eval()) {
-      // Check that extension is "the hole".
-      __ lw(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
-      __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-    }
-    // Load next context in chain.
-    __ lw(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
-    // Walk the rest of the chain without clobbering cp.
-    current = next;
-    to_check--;
-  }
-
-  // All extension objects were empty and it is safe to use a normal global
-  // load machinery.
-  EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = cp;
-  Register next = a3;
-  Register temp = t0;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->NeedsContext()) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is "the hole".
-        __ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-        __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-      }
-      __ lw(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering cp.
-      context = next;
-    }
-  }
-  // Check that last extension is "the hole".
-  __ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-  __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an cp-based operand (the write barrier cannot be allowed to
-  // destroy the cp register).
-  return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofMode typeof_mode,
-                                                  Label* slow, Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
-    __ Branch(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->binding_needs_init()) {
-      __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-      __ subu(at, v0, at);  // Sub as compare: at == 0 on eq.
-      __ Branch(done, ne, at, Operand(zero_reg));
-      __ li(a0, Operand(var->name()));
-      __ push(a0);
-      __ CallRuntime(Runtime::kThrowReferenceError);
-    } else {
-      __ Branch(done);
-    }
-  }
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1277,8 +1152,7 @@
   PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
+  // Two cases: global variables and all other types of variables.
   switch (var->location()) {
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
@@ -1312,24 +1186,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ Lookup variable");
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
-      __ bind(&slow);
-      __ Push(var->name());
-      Runtime::FunctionId function_id =
-          typeof_mode == NOT_INSIDE_TYPEOF
-              ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotInsideTypeof;
-      __ CallRuntime(function_id);
-      __ bind(&done);
-      context()->Plug(v0);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1356,17 +1213,19 @@
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  Handle<FixedArray> constant_properties = expr->constant_properties();
+  Handle<BoilerplateDescription> constant_properties =
+      expr->GetOrBuildConstantProperties(isolate());
   __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
   __ li(a1, Operand(constant_properties));
   __ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
   if (MustCreateObjectLiteralWithRuntime(expr)) {
     __ Push(a3, a2, a1, a0);
     __ CallRuntime(Runtime::kCreateObjectLiteral);
   } else {
-    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastCloneShallowObject(
+        isolate(), expr->properties_count());
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1376,10 +1235,9 @@
   bool result_saved = false;
 
   AccessorTable accessor_table(zone());
-  int property_index = 0;
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
@@ -1389,6 +1247,7 @@
       result_saved = true;
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1404,7 +1263,7 @@
             __ mov(StoreDescriptor::ValueRegister(), result_register());
             DCHECK(StoreDescriptor::ValueRegister().is(a0));
             __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            CallStoreIC(property->GetSlot(0), key->value());
+            CallStoreIC(property->GetSlot(0), key->value(), true);
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1438,20 +1297,20 @@
         VisitForStackValue(value);
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+        PrepareForBailoutForId(expr->GetIdForPropertySet(i),
                                BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1474,73 +1333,6 @@
     PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
-  // starts with the first computed property name, and continues with all
-  // properties to its right.  All the code from above initializes the static
-  // component of the object literal, and arranges for the map of the result to
-  // reflect the static order in which the keys appear. For the dynamic
-  // properties, we compile them into a series of "SetOwnProperty" runtime
-  // calls. This will preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    Expression* value = property->value();
-    if (!result_saved) {
-      PushOperand(v0);  // Save result on the stack
-      result_saved = true;
-    }
-
-    __ lw(a0, MemOperand(sp));  // Duplicate receiver.
-    PushOperand(a0);
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      DCHECK(!property->is_computed_name());
-      VisitForStackValue(value);
-      DCHECK(property->emit_store());
-      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             BailoutState::NO_REGISTERS);
-    } else {
-      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
-      VisitForStackValue(value);
-      if (NeedsHomeObject(value)) {
-        EmitSetHomeObject(value, 2, property->GetSlot());
-      }
-
-      switch (property->kind()) {
-        case ObjectLiteral::Property::CONSTANT:
-        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        case ObjectLiteral::Property::COMPUTED:
-          if (property->emit_store()) {
-            PushOperand(Smi::FromInt(NONE));
-            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                                   BailoutState::NO_REGISTERS);
-          } else {
-            DropOperands(3);
-          }
-          break;
-
-        case ObjectLiteral::Property::PROTOTYPE:
-          UNREACHABLE();
-          break;
-
-        case ObjectLiteral::Property::GETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-          break;
-
-        case ObjectLiteral::Property::SETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-          break;
-      }
-    }
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1552,28 +1344,21 @@
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  bool has_fast_elements =
-      IsFastObjectElementsKind(expr->constant_elements_kind());
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
+  Handle<ConstantElementsPair> constant_elements =
+      expr->GetOrBuildConstantElements(isolate());
 
   __ mov(a0, result_register());
   __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
   __ li(a1, Operand(constant_elements));
   if (MustCreateArrayLiteralWithRuntime(expr)) {
     __ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
     __ Push(a3, a2, a1, a0);
     __ CallRuntime(Runtime::kCreateArrayLiteral);
   } else {
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
+    Callable callable =
+        CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1638,34 +1423,6 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch = a1;
-        __ lw(scratch, MemOperand(sp, kPointerSize));
-        PushOperands(scratch, result_register());
-      }
-      break;
-    case KEYED_SUPER_PROPERTY: {
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(property->key());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch1 = t0;
-        const Register scratch2 = a1;
-        __ lw(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ lw(scratch2, MemOperand(sp, 1 * kPointerSize));
-        PushOperands(scratch1, scratch2, result_register());
-      }
-      break;
-    }
     case KEYED_PROPERTY:
       // We need the key and receiver on both the stack and in v0 and a1.
       if (expr->is_compound()) {
@@ -1679,6 +1436,10 @@
         VisitForStackValue(property->key());
       }
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 
   // For compound assignments we need another deoptimization point after the
@@ -1695,21 +1456,15 @@
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
+        case NAMED_SUPER_PROPERTY:
+        case KEYED_SUPER_PROPERTY:
+          UNREACHABLE();
+          break;
       }
     }
 
@@ -1748,69 +1503,20 @@
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(v0);
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(v0);
-      break;
     case KEYED_PROPERTY:
       EmitKeyedPropertyAssignment(expr);
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
 
 void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  SetExpressionPosition(expr);
-
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this.  It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  Label suspend, continuation, post_runtime, resume, exception;
-
-  __ jmp(&suspend);
-  __ bind(&continuation);
-  // When we arrive here, v0 holds the generator object.
-  __ RecordGeneratorContinuation();
-  __ lw(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
-  __ lw(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOrDebugPosOffset));
-  __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
-  __ Push(result_register());
-  __ Branch(&exception, eq, a1,
-            Operand(Smi::FromInt(JSGeneratorObject::kThrow)));
-  EmitCreateIteratorResult(true);
-  EmitUnwindAndReturn();
-
-  __ bind(&exception);
-  __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
-                                              : Runtime::kThrow);
-
-  __ bind(&suspend);
-  OperandStackDepthIncrement(1);  // Not popped on this path.
-  VisitForAccumulatorValue(expr->generator_object());
-  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-  __ li(a1, Operand(Smi::FromInt(continuation.pos())));
-  __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
-  __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
-  __ mov(a1, cp);
-  __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
-                      kRAHasBeenSaved, kDontSaveFPRegs);
-  __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
-  __ Branch(&post_runtime, eq, sp, Operand(a1));
-  __ push(v0);  // generator object
-  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  RestoreContext();
-  __ bind(&post_runtime);
-  PopOperand(result_register());
-  EmitReturnSequence();
-
-  __ bind(&resume);
-  context()->Plug(result_register());
+  // Resumable functions are not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1959,60 +1665,6 @@
   context()->Plug(v0);
 }
 
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ClassLiteral::Property* property = lit->properties()->at(i);
-    Expression* value = property->value();
-
-    Register scratch = a1;
-    if (property->is_static()) {
-      __ lw(scratch, MemOperand(sp, kPointerSize));  // constructor
-    } else {
-      __ lw(scratch, MemOperand(sp, 0));  // prototype
-    }
-    PushOperand(scratch);
-    EmitPropertyKey(property, lit->GetIdForProperty(i));
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
-      __ push(v0);
-    }
-
-    VisitForStackValue(value);
-    if (NeedsHomeObject(value)) {
-      EmitSetHomeObject(value, 2, property->GetSlot());
-    }
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-        break;
-
-      case ClassLiteral::Property::GETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::SETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::FIELD:
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
-
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   __ mov(a0, result_register());
   PopOperand(a1);
@@ -2023,9 +1675,7 @@
   context()->Plug(v0);
 }
 
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
-                                       FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   Property* prop = expr->AsProperty();
@@ -2047,43 +1697,6 @@
       CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      PushOperand(v0);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      // stack: value, this; v0: home_object
-      Register scratch = a2;
-      Register scratch2 = a3;
-      __ mov(scratch, result_register());             // home_object
-      __ lw(v0, MemOperand(sp, kPointerSize));        // value
-      __ lw(scratch2, MemOperand(sp, 0));             // this
-      __ sw(scratch2, MemOperand(sp, kPointerSize));  // this
-      __ sw(scratch, MemOperand(sp, 0));              // home_object
-      // stack: this, home_object; v0: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      PushOperand(v0);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = a2;
-      Register scratch2 = a3;
-      __ lw(scratch2, MemOperand(sp, 2 * kPointerSize));  // value
-      // stack: value, this, home_object; v0: key, a3: value
-      __ lw(scratch, MemOperand(sp, kPointerSize));  // this
-      __ sw(scratch, MemOperand(sp, 2 * kPointerSize));
-      __ lw(scratch, MemOperand(sp, 0));  // home_object
-      __ sw(scratch, MemOperand(sp, kPointerSize));
-      __ sw(v0, MemOperand(sp, 0));
-      __ Move(v0, scratch2);
-      // stack: this, home_object, key; v0: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
     case KEYED_PROPERTY: {
       PushOperand(result_register());  // Preserve value.
       VisitForStackValue(prop->obj());
@@ -2094,6 +1707,10 @@
       CallKeyedStoreIC(slot);
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
   context()->Plug(v0);
 }
@@ -2112,7 +1729,7 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot,
+                                               FeedbackSlot slot,
                                                HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
@@ -2156,26 +1773,18 @@
 
   } else {
     DCHECK(var->mode() != CONST || op == Token::INIT);
-    if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ Push(var->name());
-      __ Push(v0);
-      __ CallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreLookupSlot_Strict
-                         : Runtime::kStoreLookupSlot_Sloppy);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
-      MemOperand location = VarOperand(var, a1);
-      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
-        // Check for an uninitialized let binding.
-        __ lw(a2, location);
-        __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
-        __ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
+    DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+    DCHECK(!var->IsLookupSlot());
+    // Assignment to var or initializing assignment to let/const in harmony
+    // mode.
+    MemOperand location = VarOperand(var, a1);
+    if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+      // Check for an uninitialized let binding.
+      __ lw(a2, location);
+      __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+      __ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
     }
+    EmitStoreToStackLocalOrContextSlot(var, location);
   }
 }
 
@@ -2195,35 +1804,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // v0 : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  PushOperand(key->value());
-  PushOperand(v0);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreToSuper_Strict
-                              : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // v0 : value
-  // stack : receiver ('this'), home_object, key
-  DCHECK(prop != NULL);
-
-  PushOperand(v0);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreKeyedToSuper_Strict
-                              : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
   // Call keyed store IC.
@@ -2277,43 +1857,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  SetExpressionPosition(expr);
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  // Load the function from the receiver.
-  const Register scratch = a1;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForAccumulatorValue(super_ref->home_object());
-  __ mov(scratch, v0);
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperands(scratch, v0, v0, scratch);
-  PushOperand(key->value());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ sw(v0, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2339,41 +1882,6 @@
 }
 
 
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetExpressionPosition(prop);
-  // Load the function from the receiver.
-  const Register scratch = a1;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForAccumulatorValue(super_ref->home_object());
-  __ Move(scratch, v0);
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperands(scratch, v0, v0, scratch);
-  VisitForStackValue(prop->key());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ sw(v0, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2394,8 +1902,9 @@
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
   Handle<Code> code =
-      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
-  __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
+      CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+          .code();
+  __ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
   __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ li(a0, Operand(arg_count));
   CallIC(code);
@@ -2406,115 +1915,6 @@
   context()->DropAndPlug(1, v0);
 }
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
-  int arg_count = expr->arguments()->length();
-  // t4: copy of the first argument or undefined if it doesn't exist.
-  if (arg_count > 0) {
-    __ lw(t4, MemOperand(sp, arg_count * kPointerSize));
-  } else {
-    __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
-  }
-
-  // t3: the receiver of the enclosing function.
-  __ lw(t3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // t2: the language mode.
-  __ li(t2, Operand(Smi::FromInt(language_mode())));
-
-  // t1: the start position of the scope the calls resides in.
-  __ li(t1, Operand(Smi::FromInt(scope()->start_position())));
-
-  // t0: the source position of the eval call.
-  __ li(t0, Operand(Smi::FromInt(expr->position())));
-
-  // Do the runtime call.
-  __ Push(t4, t3, t2, t1, t0);
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
-  VariableProxy* callee = expr->expression()->AsVariableProxy();
-  if (callee->var()->IsLookupSlot()) {
-    Label slow, done;
-
-    SetExpressionPosition(callee);
-    // Generate code for loading from variables potentially shadowed by
-    // eval-introduced variables.
-    EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    // Call the runtime to find the function to call (returned in v0)
-    // and the object holding it (returned in v1).
-    __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
-    PushOperands(v0, v1);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      Label call;
-      __ Branch(&call);
-      __ bind(&done);
-      // Push function.
-      __ push(v0);
-      // The receiver is implicitly the global receiver. Indicate this
-      // by passing the hole to the call function stub.
-      __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
-      __ push(a1);
-      __ bind(&call);
-    }
-  } else {
-    VisitForStackValue(callee);
-    // refEnv.WithBaseObject()
-    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    PushOperand(a2);  // Reserved receiver slot.
-  }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
-  // to resolve the function we need to call.  Then we call the resolved
-  // function using the given arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  PushCalleeAndWithBaseObject(expr);
-
-  // Push the arguments.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Push a copy of the function (found below the arguments) and
-  // resolve eval.
-  __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  __ push(a1);
-  EmitResolvePossiblyDirectEval(expr);
-
-  // Touch up the stack with the resolved function.
-  __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-
-  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-  // Record source position for debugger.
-  SetCallPosition(expr);
-  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
-                                          expr->tail_call_mode())
-                          .code();
-  __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
-  __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  __ li(a0, Operand(arg_count));
-  __ Call(code, RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->DropAndPlug(1, v0);
-}
-
-
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -2543,7 +1943,7 @@
   __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
 
   // Record call targets in unoptimized code.
-  __ EmitLoadTypeFeedbackVector(a2);
+  __ EmitLoadFeedbackVector(a2);
   __ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
@@ -2555,49 +1955,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
-  SuperCallReference* super_call_ref =
-      expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super_call_ref);
-
-  // Push the super constructor target on the stack (may be null,
-  // but the Construct builtin can deal with that properly).
-  VisitForAccumulatorValue(super_call_ref->this_function_var());
-  __ AssertFunction(result_register());
-  __ lw(result_register(),
-        FieldMemOperand(result_register(), HeapObject::kMapOffset));
-  __ lw(result_register(),
-        FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  PushOperand(result_register());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetConstructCallPosition(expr);
-
-  // Load new target into a3.
-  VisitForAccumulatorValue(super_call_ref->new_target_var());
-  __ mov(a3, result_register());
-
-  // Load function and argument count into a1 and a0.
-  __ li(a0, Operand(arg_count));
-  __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
-
-  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2687,28 +2044,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(v0, if_false);
-  __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2944,16 +2279,12 @@
           __ Push(a2, a1);
           __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
           context()->Plug(v0);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+        } else {
+          DCHECK(!var->IsLookupSlot());
+          DCHECK(var->IsStackAllocated() || var->IsContextSlot());
           // Result of deleting non-global, non-dynamic variables is false.
           // The subexpression does not have side effects.
           context()->Plug(is_this);
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kDeleteLookupSlot);
-          context()->Plug(v0);
         }
       } else {
         // Result of deleting non-property, non-variable reference is true.
@@ -3059,31 +2390,6 @@
         break;
       }
 
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForAccumulatorValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        const Register scratch = a1;
-        __ lw(scratch, MemOperand(sp, 0));  // this
-        PushOperands(result_register(), scratch, result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForStackValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        VisitForAccumulatorValue(prop->key());
-        const Register scratch1 = a1;
-        const Register scratch2 = t0;
-        __ lw(scratch1, MemOperand(sp, 1 * kPointerSize));  // this
-        __ lw(scratch2, MemOperand(sp, 0 * kPointerSize));  // home object
-        PushOperands(result_register(), scratch1, scratch2, result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
       case KEYED_PROPERTY: {
         VisitForStackValue(prop->obj());
         VisitForStackValue(prop->key());
@@ -3094,6 +2400,8 @@
         break;
       }
 
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
       case VARIABLE:
         UNREACHABLE();
     }
@@ -3130,14 +2438,12 @@
           case NAMED_PROPERTY:
             __ sw(v0, MemOperand(sp, kPointerSize));
             break;
-          case NAMED_SUPER_PROPERTY:
-            __ sw(v0, MemOperand(sp, 2 * kPointerSize));
-            break;
           case KEYED_PROPERTY:
             __ sw(v0, MemOperand(sp, 2 * kPointerSize));
             break;
+          case NAMED_SUPER_PROPERTY:
           case KEYED_SUPER_PROPERTY:
-            __ sw(v0, MemOperand(sp, 3 * kPointerSize));
+            UNREACHABLE();
             break;
         }
       }
@@ -3170,14 +2476,12 @@
         case NAMED_PROPERTY:
           __ sw(v0, MemOperand(sp, kPointerSize));
           break;
-        case NAMED_SUPER_PROPERTY:
-          __ sw(v0, MemOperand(sp, 2 * kPointerSize));
-          break;
         case KEYED_PROPERTY:
           __ sw(v0, MemOperand(sp, 2 * kPointerSize));
           break;
+        case NAMED_SUPER_PROPERTY:
         case KEYED_SUPER_PROPERTY:
-          __ sw(v0, MemOperand(sp, 3 * kPointerSize));
+          UNREACHABLE();
           break;
       }
     }
@@ -3234,30 +2538,6 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(v0);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(v0);
-      }
-      break;
-    }
     case KEYED_PROPERTY: {
       __ mov(StoreDescriptor::ValueRegister(), result_register());
       PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -3273,6 +2553,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -3341,16 +2625,6 @@
     __ And(a1, a1,
            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)    \
-  } else if (String::Equals(check, factory->type##_string())) {  \
-    __ JumpIfSmi(v0, if_false);                                  \
-    __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));      \
-    __ LoadRoot(at, Heap::k##Type##MapRootIndex);                \
-    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
@@ -3392,6 +2666,7 @@
       __ mov(a0, result_register());
       PopOperand(a1);
       __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+      RestoreContext();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(at, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(at), if_true, if_false, fall_through);
@@ -3504,70 +2779,6 @@
 }
 
 
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
-  DCHECK(!result_register().is(a1));
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ li(at, Operand(pending_message_obj));
-  __ lw(a1, MemOperand(at));
-  PushOperand(a1);
-
-  ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  DCHECK(!result_register().is(a1));
-  // Restore pending message from stack.
-  PopOperand(a1);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ li(at, Operand(pending_message_obj));
-  __ sw(a1, MemOperand(at));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
-  DCHECK(!result_register().is(a1));
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
-  __ li(at, Operand(pending_message_obj));
-  __ sw(a1, MemOperand(at));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
-  DCHECK(!result_register().is(a1));
-  __ Pop(result_register());  // Restore the accumulator.
-  __ Pop(a1);                 // Get the token.
-  for (DeferredCommand cmd : commands_) {
-    Label skip;
-    __ li(at, Operand(Smi::FromInt(cmd.token)));
-    __ Branch(&skip, ne, a1, Operand(at));
-    switch (cmd.command) {
-      case kReturn:
-        codegen_->EmitUnwindAndReturn();
-        break;
-      case kThrow:
-        __ Push(result_register());
-        __ CallRuntime(Runtime::kReThrow);
-        break;
-      case kContinue:
-        codegen_->EmitContinue(cmd.target);
-        break;
-      case kBreak:
-        codegen_->EmitBreak(cmd.target);
-        break;
-    }
-    __ bind(&skip);
-  }
-}
-
 #undef __
 
 
diff --git a/src/full-codegen/mips64/full-codegen-mips64.cc b/src/full-codegen/mips64/full-codegen-mips64.cc
index 7640c52..37e2d80 100644
--- a/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -12,15 +12,16 @@
 // places where we have to move a previous result in v0 to a0 for the
 // next call: mov(a0, v0). This is not needed on the other architectures.
 
-#include "src/full-codegen/full-codegen.h"
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 
 #include "src/mips64/code-stubs-mips64.h"
@@ -139,21 +140,19 @@
   // Increment invocation count for the function.
   {
     Comment cmnt(masm_, "[ Increment invocation count");
-    __ ld(a0, FieldMemOperand(a1, JSFunction::kLiteralsOffset));
-    __ ld(a0, FieldMemOperand(a0, LiteralsArray::kFeedbackVectorOffset));
+    __ ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+    __ ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
     __ ld(a4, FieldMemOperand(
-                  a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                          TypeFeedbackVector::kHeaderSize));
+                  a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                          FeedbackVector::kHeaderSize));
     __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
     __ sd(a4, FieldMemOperand(
-                  a0, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                          TypeFeedbackVector::kHeaderSize));
+                  a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                          FeedbackVector::kHeaderSize));
   }
 
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
     OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
@@ -209,15 +208,18 @@
       if (info->scope()->new_target_var() != nullptr) {
         __ push(a3);  // Preserve new target.
       }
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info->scope()->scope_type());
         __ li(FastNewFunctionContextDescriptor::SlotsRegister(),
               Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(a1);
+        __ Push(Smi::FromInt(info->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
       if (info->scope()->new_target_var() != nullptr) {
@@ -264,36 +266,10 @@
   PrepareForBailoutForId(BailoutId::FunctionContext(),
                          BailoutState::NO_REGISTERS);
 
-  // Possibly set up a local binding to the this function which is used in
-  // derived constructors with super calls.
-  Variable* this_function_var = info->scope()->this_function_var();
-  if (this_function_var != nullptr) {
-    Comment cmnt(masm_, "[ This function");
-    if (!function_in_register_a1) {
-      __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-      // The write barrier clobbers register again, keep it marked as such.
-    }
-    SetVar(this_function_var, a1, a0, a2);
-  }
-
-  Variable* new_target_var = info->scope()->new_target_var();
-  if (new_target_var != nullptr) {
-    Comment cmnt(masm_, "[ new.target");
-    SetVar(new_target_var, a3, a0, a2);
-  }
-
-  // Possibly allocate RestParameters
-  Variable* rest_param = info->scope()->rest_parameter();
-  if (rest_param != nullptr) {
-    Comment cmnt(masm_, "[ Allocate rest parameter array");
-    if (!function_in_register_a1) {
-      __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    FastNewRestParameterStub stub(isolate());
-    __ CallStub(&stub);
-    function_in_register_a1 = false;
-    SetVar(rest_param, v0, a1, a2);
-  }
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(info->scope()->new_target_var());
+  DCHECK_NULL(info->scope()->rest_parameter());
+  DCHECK_NULL(info->scope()->this_function_var());
 
   Variable* arguments = info->scope()->arguments();
   if (arguments != NULL) {
@@ -304,14 +280,16 @@
       __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
     if (is_strict(language_mode()) || !has_simple_parameters()) {
-      FastNewStrictArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     } else if (literal()->has_duplicate_parameters()) {
       __ Push(a1);
       __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
     } else {
-      FastNewSloppyArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     }
 
     SetVar(arguments, v0, a1, a2);
@@ -546,10 +524,8 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
-         !lit->IsUndetectable());
-  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
-      lit->IsFalse(isolate())) {
+  DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+  if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ Branch(false_label_);
   } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ Branch(true_label_);
@@ -781,10 +757,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_->Add(isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -807,17 +785,7 @@
       }
       break;
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      DCHECK_EQ(VAR, variable->mode());
-      DCHECK(!variable->binding_needs_init());
-      __ li(a2, Operand(variable->name()));
-      __ Push(a2);
-      __ CallRuntime(Runtime::kDeclareEvalVar);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -830,9 +798,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function =
           Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
@@ -868,17 +843,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      __ li(a2, Operand(variable->name()));
-      PushOperand(a2);
-      // Push initial value for function declaration.
-      VisitForStackValue(declaration->fun());
-      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -889,7 +854,7 @@
   // Call the runtime to declare the globals.
   __ li(a1, Operand(pairs));
   __ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
-  __ EmitLoadTypeFeedbackVector(a2);
+  __ EmitLoadFeedbackVector(a2);
   __ Push(a1, a0, a2);
   __ CallRuntime(Runtime::kDeclareGlobals);
   // Return value is ignored.
@@ -993,7 +958,7 @@
   Comment cmnt(masm_, "[ ForInStatement");
   SetStatementPosition(stmt, SKIP_BREAK);
 
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
 
   // Get the object to enumerate over. If the object is null or undefined, skip
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
@@ -1113,8 +1078,8 @@
 
   // We need to filter the key, record slow-path here.
   int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(a3);
-  __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ EmitLoadFeedbackVector(a3);
+  __ li(a2, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
   __ sd(a2, FieldMemOperand(a3, FixedArray::OffsetOfElementAt(vector_index)));
 
   __ mov(a0, result_register());
@@ -1163,9 +1128,8 @@
   decrement_loop_depth();
 }
 
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
-                                          FeedbackVectorSlot slot) {
+                                          FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
   __ ld(StoreDescriptor::ValueRegister(),
@@ -1173,10 +1137,9 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
                                                      int offset,
-                                                     FeedbackVectorSlot slot) {
+                                                     FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), v0);
   __ ld(StoreDescriptor::ValueRegister(),
@@ -1184,93 +1147,6 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofMode typeof_mode,
-                                                      Label* slow) {
-  Register current = cp;
-  Register next = a1;
-  Register temp = a2;
-
-  int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
-  for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (s->calls_sloppy_eval()) {
-      // Check that extension is "the hole".
-      __ ld(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
-      __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-    }
-    // Load next context in chain.
-    __ ld(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
-    // Walk the rest of the chain without clobbering cp.
-    current = next;
-    to_check--;
-  }
-
-  // All extension objects were empty and it is safe to use a normal global
-  // load machinery.
-  EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = cp;
-  Register next = a3;
-  Register temp = a4;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->NeedsContext()) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is "the hole".
-        __ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-        __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-      }
-      __ ld(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering cp.
-      context = next;
-    }
-  }
-  // Check that last extension is "the hole".
-  __ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-  __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an cp-based operand (the write barrier cannot be allowed to
-  // destroy the cp register).
-  return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofMode typeof_mode,
-                                                  Label* slow, Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
-    __ Branch(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ ld(v0, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->binding_needs_init()) {
-      __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-      __ dsubu(at, v0, at);  // Sub as compare: at == 0 on eq.
-      __ Branch(done, ne, at, Operand(zero_reg));
-      __ li(a0, Operand(var->name()));
-      __ push(a0);
-      __ CallRuntime(Runtime::kThrowReferenceError);
-    } else {
-      __ Branch(done);
-    }
-  }
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1278,8 +1154,7 @@
   PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
+  // Two cases: global variables and all other types of variables.
   switch (var->location()) {
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
@@ -1313,24 +1188,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ Lookup variable");
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
-      __ bind(&slow);
-      __ Push(var->name());
-      Runtime::FunctionId function_id =
-          typeof_mode == NOT_INSIDE_TYPEOF
-              ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotInsideTypeof;
-      __ CallRuntime(function_id);
-      __ bind(&done);
-      context()->Plug(v0);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1357,17 +1215,19 @@
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  Handle<FixedArray> constant_properties = expr->constant_properties();
+  Handle<BoilerplateDescription> constant_properties =
+      expr->GetOrBuildConstantProperties(isolate());
   __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
   __ li(a1, Operand(constant_properties));
   __ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
   if (MustCreateObjectLiteralWithRuntime(expr)) {
     __ Push(a3, a2, a1, a0);
     __ CallRuntime(Runtime::kCreateObjectLiteral);
   } else {
-    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastCloneShallowObject(
+        isolate(), expr->properties_count());
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1377,10 +1237,9 @@
   bool result_saved = false;
 
   AccessorTable accessor_table(zone());
-  int property_index = 0;
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
@@ -1390,6 +1249,7 @@
       result_saved = true;
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1405,7 +1265,7 @@
             __ mov(StoreDescriptor::ValueRegister(), result_register());
             DCHECK(StoreDescriptor::ValueRegister().is(a0));
             __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            CallStoreIC(property->GetSlot(0), key->value());
+            CallStoreIC(property->GetSlot(0), key->value(), true);
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1439,20 +1299,20 @@
         VisitForStackValue(value);
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+        PrepareForBailoutForId(expr->GetIdForPropertySet(i),
                                BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1475,73 +1335,6 @@
     PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
-  // starts with the first computed property name, and continues with all
-  // properties to its right.  All the code from above initializes the static
-  // component of the object literal, and arranges for the map of the result to
-  // reflect the static order in which the keys appear. For the dynamic
-  // properties, we compile them into a series of "SetOwnProperty" runtime
-  // calls. This will preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    Expression* value = property->value();
-    if (!result_saved) {
-      PushOperand(v0);  // Save result on the stack
-      result_saved = true;
-    }
-
-    __ ld(a0, MemOperand(sp));  // Duplicate receiver.
-    PushOperand(a0);
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      DCHECK(!property->is_computed_name());
-      VisitForStackValue(value);
-      DCHECK(property->emit_store());
-      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             BailoutState::NO_REGISTERS);
-    } else {
-      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
-      VisitForStackValue(value);
-      if (NeedsHomeObject(value)) {
-        EmitSetHomeObject(value, 2, property->GetSlot());
-      }
-
-      switch (property->kind()) {
-        case ObjectLiteral::Property::CONSTANT:
-        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        case ObjectLiteral::Property::COMPUTED:
-          if (property->emit_store()) {
-            PushOperand(Smi::FromInt(NONE));
-            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                                   BailoutState::NO_REGISTERS);
-          } else {
-            DropOperands(3);
-          }
-          break;
-
-        case ObjectLiteral::Property::PROTOTYPE:
-          UNREACHABLE();
-          break;
-
-        case ObjectLiteral::Property::GETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-          break;
-
-        case ObjectLiteral::Property::SETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-          break;
-      }
-    }
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1553,28 +1346,21 @@
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  bool has_fast_elements =
-      IsFastObjectElementsKind(expr->constant_elements_kind());
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
+  Handle<ConstantElementsPair> constant_elements =
+      expr->GetOrBuildConstantElements(isolate());
 
   __ mov(a0, result_register());
   __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
   __ li(a1, Operand(constant_elements));
   if (MustCreateArrayLiteralWithRuntime(expr)) {
     __ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
     __ Push(a3, a2, a1, a0);
     __ CallRuntime(Runtime::kCreateArrayLiteral);
   } else {
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
+    Callable callable =
+        CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1639,34 +1425,6 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch = a1;
-        __ ld(scratch, MemOperand(sp, kPointerSize));
-        PushOperands(scratch, result_register());
-      }
-      break;
-    case KEYED_SUPER_PROPERTY: {
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(property->key());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch1 = a4;
-        const Register scratch2 = a1;
-        __ ld(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ ld(scratch2, MemOperand(sp, 1 * kPointerSize));
-        PushOperands(scratch1, scratch2, result_register());
-      }
-      break;
-    }
     case KEYED_PROPERTY:
       // We need the key and receiver on both the stack and in v0 and a1.
       if (expr->is_compound()) {
@@ -1680,6 +1438,10 @@
         VisitForStackValue(property->key());
       }
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 
   // For compound assignments we need another deoptimization point after the
@@ -1696,21 +1458,15 @@
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
+        case NAMED_SUPER_PROPERTY:
+        case KEYED_SUPER_PROPERTY:
+          UNREACHABLE();
+          break;
       }
     }
 
@@ -1749,69 +1505,20 @@
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(v0);
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(v0);
-      break;
     case KEYED_PROPERTY:
       EmitKeyedPropertyAssignment(expr);
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
 
 void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  SetExpressionPosition(expr);
-
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this.  It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  Label suspend, continuation, post_runtime, resume, exception;
-
-  __ jmp(&suspend);
-  __ bind(&continuation);
-  // When we arrive here, v0 holds the generator object.
-  __ RecordGeneratorContinuation();
-  __ ld(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
-  __ ld(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOrDebugPosOffset));
-  __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
-  __ Push(result_register());
-  __ Branch(&exception, eq, a1,
-            Operand(Smi::FromInt(JSGeneratorObject::kThrow)));
-  EmitCreateIteratorResult(true);
-  EmitUnwindAndReturn();
-
-  __ bind(&exception);
-  __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
-                                              : Runtime::kThrow);
-
-  __ bind(&suspend);
-  OperandStackDepthIncrement(1);  // Not popped on this path.
-  VisitForAccumulatorValue(expr->generator_object());
-  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-  __ li(a1, Operand(Smi::FromInt(continuation.pos())));
-  __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
-  __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
-  __ mov(a1, cp);
-  __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
-                      kRAHasBeenSaved, kDontSaveFPRegs);
-  __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
-  __ Branch(&post_runtime, eq, sp, Operand(a1));
-  __ push(v0);  // generator object
-  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  RestoreContext();
-  __ bind(&post_runtime);
-  PopOperand(result_register());
-  EmitReturnSequence();
-
-  __ bind(&resume);
-  context()->Plug(result_register());
+  // Resumable functions are not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1959,60 +1666,6 @@
   context()->Plug(v0);
 }
 
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ClassLiteral::Property* property = lit->properties()->at(i);
-    Expression* value = property->value();
-
-    Register scratch = a1;
-    if (property->is_static()) {
-      __ ld(scratch, MemOperand(sp, kPointerSize));  // constructor
-    } else {
-      __ ld(scratch, MemOperand(sp, 0));  // prototype
-    }
-    PushOperand(scratch);
-    EmitPropertyKey(property, lit->GetIdForProperty(i));
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
-      __ push(v0);
-    }
-
-    VisitForStackValue(value);
-    if (NeedsHomeObject(value)) {
-      EmitSetHomeObject(value, 2, property->GetSlot());
-    }
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-        break;
-
-      case ClassLiteral::Property::GETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::SETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::FIELD:
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
-
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   __ mov(a0, result_register());
   PopOperand(a1);
@@ -2023,9 +1676,7 @@
   context()->Plug(v0);
 }
 
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
-                                       FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   Property* prop = expr->AsProperty();
@@ -2047,43 +1698,6 @@
       CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      PushOperand(v0);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      // stack: value, this; v0: home_object
-      Register scratch = a2;
-      Register scratch2 = a3;
-      __ mov(scratch, result_register());             // home_object
-      __ ld(v0, MemOperand(sp, kPointerSize));        // value
-      __ ld(scratch2, MemOperand(sp, 0));             // this
-      __ sd(scratch2, MemOperand(sp, kPointerSize));  // this
-      __ sd(scratch, MemOperand(sp, 0));              // home_object
-      // stack: this, home_object; v0: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      PushOperand(v0);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = a2;
-      Register scratch2 = a3;
-      __ ld(scratch2, MemOperand(sp, 2 * kPointerSize));  // value
-      // stack: value, this, home_object; v0: key, a3: value
-      __ ld(scratch, MemOperand(sp, kPointerSize));  // this
-      __ sd(scratch, MemOperand(sp, 2 * kPointerSize));
-      __ ld(scratch, MemOperand(sp, 0));  // home_object
-      __ sd(scratch, MemOperand(sp, kPointerSize));
-      __ sd(v0, MemOperand(sp, 0));
-      __ Move(v0, scratch2);
-      // stack: this, home_object, key; v0: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
     case KEYED_PROPERTY: {
       PushOperand(result_register());  // Preserve value.
       VisitForStackValue(prop->obj());
@@ -2094,6 +1708,10 @@
       CallKeyedStoreIC(slot);
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
   context()->Plug(v0);
 }
@@ -2112,7 +1730,7 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot,
+                                               FeedbackSlot slot,
                                                HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
@@ -2156,25 +1774,18 @@
 
   } else {
     DCHECK(var->mode() != CONST || op == Token::INIT);
-    if (var->IsLookupSlot()) {
-      __ Push(var->name());
-      __ Push(v0);
-      __ CallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreLookupSlot_Strict
-                         : Runtime::kStoreLookupSlot_Sloppy);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
-      MemOperand location = VarOperand(var, a1);
-      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
-        // Check for an uninitialized let binding.
-        __ ld(a2, location);
-        __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
-        __ Check(eq, kLetBindingReInitialization, a2, Operand(a4));
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
+    DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+    DCHECK(!var->IsLookupSlot());
+    // Assignment to var or initializing assignment to let/const in harmony
+    // mode.
+    MemOperand location = VarOperand(var, a1);
+    if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+      // Check for an uninitialized let binding.
+      __ ld(a2, location);
+      __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+      __ Check(eq, kLetBindingReInitialization, a2, Operand(a4));
     }
+    EmitStoreToStackLocalOrContextSlot(var, location);
   }
 }
 
@@ -2194,35 +1805,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // v0 : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  PushOperand(key->value());
-  PushOperand(v0);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreToSuper_Strict
-                              : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // v0 : value
-  // stack : receiver ('this'), home_object, key
-  DCHECK(prop != NULL);
-
-  PushOperand(v0);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreKeyedToSuper_Strict
-                              : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
   // Call keyed store IC.
@@ -2276,43 +1858,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  SetExpressionPosition(expr);
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  // Load the function from the receiver.
-  const Register scratch = a1;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForAccumulatorValue(super_ref->home_object());
-  __ mov(scratch, v0);
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperands(scratch, v0, v0, scratch);
-  PushOperand(key->value());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ sd(v0, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2338,41 +1883,6 @@
 }
 
 
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetExpressionPosition(prop);
-  // Load the function from the receiver.
-  const Register scratch = a1;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForAccumulatorValue(super_ref->home_object());
-  __ Move(scratch, v0);
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperands(scratch, v0, v0, scratch);
-  VisitForStackValue(prop->key());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ sd(v0, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2393,8 +1903,9 @@
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
   Handle<Code> code =
-      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
-  __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
+      CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+          .code();
+  __ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
   __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
   __ li(a0, Operand(arg_count));
   CallIC(code);
@@ -2405,115 +1916,6 @@
   context()->DropAndPlug(1, v0);
 }
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
-  int arg_count = expr->arguments()->length();
-  // a6: copy of the first argument or undefined if it doesn't exist.
-  if (arg_count > 0) {
-    __ ld(a6, MemOperand(sp, arg_count * kPointerSize));
-  } else {
-    __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
-  }
-
-  // a5: the receiver of the enclosing function.
-  __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // a4: the language mode.
-  __ li(a4, Operand(Smi::FromInt(language_mode())));
-
-  // a1: the start position of the scope the calls resides in.
-  __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
-
-  // a0: the source position of the eval call.
-  __ li(a0, Operand(Smi::FromInt(expr->position())));
-
-  // Do the runtime call.
-  __ Push(a6, a5, a4, a1, a0);
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
-  VariableProxy* callee = expr->expression()->AsVariableProxy();
-  if (callee->var()->IsLookupSlot()) {
-    Label slow, done;
-
-    SetExpressionPosition(callee);
-    // Generate code for loading from variables potentially shadowed by
-    // eval-introduced variables.
-    EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    // Call the runtime to find the function to call (returned in v0)
-    // and the object holding it (returned in v1).
-    __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
-    PushOperands(v0, v1);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the
-    // function and receiver and have the slow path jump around this
-    // code.
-    if (done.is_linked()) {
-      Label call;
-      __ Branch(&call);
-      __ bind(&done);
-      // Push function.
-      __ push(v0);
-      // The receiver is implicitly the global receiver. Indicate this
-      // by passing the hole to the call function stub.
-      __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
-      __ push(a1);
-      __ bind(&call);
-    }
-  } else {
-    VisitForStackValue(callee);
-    // refEnv.WithBaseObject()
-    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    PushOperand(a2);  // Reserved receiver slot.
-  }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
-  // to resolve the function we need to call.  Then we call the resolved
-  // function using the given arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  PushCalleeAndWithBaseObject(expr);
-
-  // Push the arguments.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Push a copy of the function (found below the arguments) and
-  // resolve eval.
-  __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  __ push(a1);
-  EmitResolvePossiblyDirectEval(expr);
-
-  // Touch up the stack with the resolved function.
-  __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-
-  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-  // Record source position for debugger.
-  SetCallPosition(expr);
-  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
-                                          expr->tail_call_mode())
-                          .code();
-  __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
-  __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
-  __ li(a0, Operand(arg_count));
-  __ Call(code, RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->DropAndPlug(1, v0);
-}
-
-
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -2542,7 +1944,7 @@
   __ ld(a1, MemOperand(sp, arg_count * kPointerSize));
 
   // Record call targets in unoptimized code.
-  __ EmitLoadTypeFeedbackVector(a2);
+  __ EmitLoadFeedbackVector(a2);
   __ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
@@ -2554,49 +1956,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
-  SuperCallReference* super_call_ref =
-      expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super_call_ref);
-
-  // Push the super constructor target on the stack (may be null,
-  // but the Construct builtin can deal with that properly).
-  VisitForAccumulatorValue(super_call_ref->this_function_var());
-  __ AssertFunction(result_register());
-  __ ld(result_register(),
-        FieldMemOperand(result_register(), HeapObject::kMapOffset));
-  __ ld(result_register(),
-        FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  PushOperand(result_register());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetConstructCallPosition(expr);
-
-  // Load new target into a3.
-  VisitForAccumulatorValue(super_call_ref->new_target_var());
-  __ mov(a3, result_register());
-
-  // Load function and argument count into a1 and a0.
-  __ li(a0, Operand(arg_count));
-  __ ld(a1, MemOperand(sp, arg_count * kPointerSize));
-
-  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->Plug(v0);
-}
-
-
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2686,28 +2045,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(v0, if_false);
-  __ GetObjectType(v0, a1, a1);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2943,17 +2280,12 @@
           __ Push(a2, a1);
           __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
           context()->Plug(v0);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+        } else {
+          DCHECK(!var->IsLookupSlot());
+          DCHECK(var->IsStackAllocated() || var->IsContextSlot());
           // Result of deleting non-global, non-dynamic variables is false.
           // The subexpression does not have side effects.
           context()->Plug(is_this);
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          DCHECK(!context_register().is(a2));
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kDeleteLookupSlot);
-          context()->Plug(v0);
         }
       } else {
         // Result of deleting non-property, non-variable reference is true.
@@ -3059,31 +2391,6 @@
         break;
       }
 
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForAccumulatorValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        const Register scratch = a1;
-        __ ld(scratch, MemOperand(sp, 0));  // this
-        PushOperands(result_register(), scratch, result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForStackValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        VisitForAccumulatorValue(prop->key());
-        const Register scratch1 = a1;
-        const Register scratch2 = a4;
-        __ ld(scratch1, MemOperand(sp, 1 * kPointerSize));  // this
-        __ ld(scratch2, MemOperand(sp, 0 * kPointerSize));  // home object
-        PushOperands(result_register(), scratch1, scratch2, result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
       case KEYED_PROPERTY: {
         VisitForStackValue(prop->obj());
         VisitForStackValue(prop->key());
@@ -3094,6 +2401,8 @@
         break;
       }
 
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
       case VARIABLE:
         UNREACHABLE();
     }
@@ -3130,14 +2439,12 @@
           case NAMED_PROPERTY:
             __ sd(v0, MemOperand(sp, kPointerSize));
             break;
-          case NAMED_SUPER_PROPERTY:
-            __ sd(v0, MemOperand(sp, 2 * kPointerSize));
-            break;
           case KEYED_PROPERTY:
             __ sd(v0, MemOperand(sp, 2 * kPointerSize));
             break;
+          case NAMED_SUPER_PROPERTY:
           case KEYED_SUPER_PROPERTY:
-            __ sd(v0, MemOperand(sp, 3 * kPointerSize));
+            UNREACHABLE();
             break;
         }
       }
@@ -3170,14 +2477,12 @@
         case NAMED_PROPERTY:
           __ sd(v0, MemOperand(sp, kPointerSize));
           break;
-        case NAMED_SUPER_PROPERTY:
-          __ sd(v0, MemOperand(sp, 2 * kPointerSize));
-          break;
         case KEYED_PROPERTY:
           __ sd(v0, MemOperand(sp, 2 * kPointerSize));
           break;
+        case NAMED_SUPER_PROPERTY:
         case KEYED_SUPER_PROPERTY:
-          __ sd(v0, MemOperand(sp, 3 * kPointerSize));
+          UNREACHABLE();
           break;
       }
     }
@@ -3234,30 +2539,6 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(v0);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(v0);
-      }
-      break;
-    }
     case KEYED_PROPERTY: {
       __ mov(StoreDescriptor::ValueRegister(), result_register());
       PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -3273,6 +2554,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -3341,16 +2626,6 @@
     __ And(a1, a1,
            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)    \
-  } else if (String::Equals(check, factory->type##_string())) {  \
-    __ JumpIfSmi(v0, if_false);                                  \
-    __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));      \
-    __ LoadRoot(at, Heap::k##Type##MapRootIndex);                \
-    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
@@ -3392,6 +2667,7 @@
       __ mov(a0, result_register());
       PopOperand(a1);
       __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+      RestoreContext();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ LoadRoot(a4, Heap::kTrueValueRootIndex);
       Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
@@ -3508,69 +2784,6 @@
 }
 
 
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
-  DCHECK(!result_register().is(a1));
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ li(at, Operand(pending_message_obj));
-  __ ld(a1, MemOperand(at));
-  PushOperand(a1);
-
-  ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  DCHECK(!result_register().is(a1));
-  // Restore pending message from stack.
-  PopOperand(a1);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ li(at, Operand(pending_message_obj));
-  __ sd(a1, MemOperand(at));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
-  DCHECK(!result_register().is(a1));
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
-  __ li(at, Operand(pending_message_obj));
-  __ sd(a1, MemOperand(at));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
-  __ Pop(result_register());  // Restore the accumulator.
-  __ Pop(a1);                 // Get the token.
-  for (DeferredCommand cmd : commands_) {
-    Label skip;
-    __ li(at, Operand(Smi::FromInt(cmd.token)));
-    __ Branch(&skip, ne, a1, Operand(at));
-    switch (cmd.command) {
-      case kReturn:
-        codegen_->EmitUnwindAndReturn();
-        break;
-      case kThrow:
-        __ Push(result_register());
-        __ CallRuntime(Runtime::kReThrow);
-        break;
-      case kContinue:
-        codegen_->EmitContinue(cmd.target);
-        break;
-      case kBreak:
-        codegen_->EmitBreak(cmd.target);
-        break;
-    }
-    __ bind(&skip);
-  }
-}
-
 #undef __
 
 
diff --git a/src/full-codegen/ppc/full-codegen-ppc.cc b/src/full-codegen/ppc/full-codegen-ppc.cc
index 85d198d..bd69582 100644
--- a/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -4,15 +4,16 @@
 
 #if V8_TARGET_ARCH_PPC
 
-#include "src/full-codegen/full-codegen.h"
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 
 #include "src/ppc/code-stubs-ppc.h"
@@ -136,24 +137,21 @@
   // Increment invocation count for the function.
   {
     Comment cmnt(masm_, "[ Increment invocation count");
-    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
-    __ LoadP(r7, FieldMemOperand(r7, LiteralsArray::kFeedbackVectorOffset));
-    __ LoadP(r8, FieldMemOperand(r7, TypeFeedbackVector::kInvocationCountIndex *
-                                             kPointerSize +
-                                         TypeFeedbackVector::kHeaderSize));
+    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
+    __ LoadP(r7, FieldMemOperand(r7, Cell::kValueOffset));
+    __ LoadP(r8, FieldMemOperand(
+                     r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                             FeedbackVector::kHeaderSize));
     __ AddSmiLiteral(r8, r8, Smi::FromInt(1), r0);
-    __ StoreP(r8,
-              FieldMemOperand(
-                  r7, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                          TypeFeedbackVector::kHeaderSize),
+    __ StoreP(r8, FieldMemOperand(
+                      r7, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                              FeedbackVector::kHeaderSize),
               r0);
   }
 
   {
     Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
     OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
@@ -208,15 +206,18 @@
       if (info->scope()->new_target_var() != nullptr) {
         __ push(r6);  // Preserve new target.
       }
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(r4);
+        __ Push(Smi::FromInt(info->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
       if (info->scope()->new_target_var() != nullptr) {
@@ -263,37 +264,10 @@
   PrepareForBailoutForId(BailoutId::FunctionContext(),
                          BailoutState::NO_REGISTERS);
 
-  // Possibly set up a local binding to the this function which is used in
-  // derived constructors with super calls.
-  Variable* this_function_var = info->scope()->this_function_var();
-  if (this_function_var != nullptr) {
-    Comment cmnt(masm_, "[ This function");
-    if (!function_in_register_r4) {
-      __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-      // The write barrier clobbers register again, keep it marked as such.
-    }
-    SetVar(this_function_var, r4, r3, r5);
-  }
-
-  // Possibly set up a local binding to the new target value.
-  Variable* new_target_var = info->scope()->new_target_var();
-  if (new_target_var != nullptr) {
-    Comment cmnt(masm_, "[ new.target");
-    SetVar(new_target_var, r6, r3, r5);
-  }
-
-  // Possibly allocate RestParameters
-  Variable* rest_param = info->scope()->rest_parameter();
-  if (rest_param != nullptr) {
-    Comment cmnt(masm_, "[ Allocate rest parameter array");
-    if (!function_in_register_r4) {
-      __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    FastNewRestParameterStub stub(isolate());
-    __ CallStub(&stub);
-    function_in_register_r4 = false;
-    SetVar(rest_param, r3, r4, r5);
-  }
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(info->scope()->new_target_var());
+  DCHECK_NULL(info->scope()->rest_parameter());
+  DCHECK_NULL(info->scope()->this_function_var());
 
   Variable* arguments = info->scope()->arguments();
   if (arguments != NULL) {
@@ -304,14 +278,16 @@
       __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
     if (is_strict(language_mode()) || !has_simple_parameters()) {
-      FastNewStrictArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     } else if (literal()->has_duplicate_parameters()) {
       __ Push(r4);
       __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
     } else {
-      FastNewSloppyArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     }
 
     SetVar(arguments, r3, r4, r5);
@@ -536,10 +512,8 @@
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
   codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
-         !lit->IsUndetectable());
-  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
-      lit->IsFalse(isolate())) {
+  DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+  if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ b(true_label_);
@@ -751,10 +725,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_->Add(isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -777,17 +753,7 @@
       }
       break;
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      DCHECK_EQ(VAR, variable->mode());
-      DCHECK(!variable->binding_needs_init());
-      __ mov(r5, Operand(variable->name()));
-      __ Push(r5);
-      __ CallRuntime(Runtime::kDeclareEvalVar);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -800,9 +766,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function =
           Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
@@ -834,17 +807,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      __ mov(r5, Operand(variable->name()));
-      PushOperand(r5);
-      // Push initial value for function declaration.
-      VisitForStackValue(declaration->fun());
-      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -855,7 +818,7 @@
   // Call the runtime to declare the globals.
   __ mov(r4, Operand(pairs));
   __ LoadSmiLiteral(r3, Smi::FromInt(DeclareGlobalsFlags()));
-  __ EmitLoadTypeFeedbackVector(r5);
+  __ EmitLoadFeedbackVector(r5);
   __ Push(r4, r3, r5);
   __ CallRuntime(Runtime::kDeclareGlobals);
   // Return value is ignored.
@@ -960,7 +923,7 @@
   Comment cmnt(masm_, "[ ForInStatement");
   SetStatementPosition(stmt, SKIP_BREAK);
 
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
 
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1081,8 +1044,8 @@
 
   // We need to filter the key, record slow-path here.
   int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(r3);
-  __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ EmitLoadFeedbackVector(r3);
+  __ mov(r5, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
   __ StoreP(
       r5, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)), r0);
 
@@ -1134,9 +1097,8 @@
   decrement_loop_depth();
 }
 
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
-                                          FeedbackVectorSlot slot) {
+                                          FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
   __ LoadP(StoreDescriptor::ValueRegister(),
@@ -1144,10 +1106,9 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
                                                      int offset,
-                                                     FeedbackVectorSlot slot) {
+                                                     FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), r3);
   __ LoadP(StoreDescriptor::ValueRegister(),
@@ -1155,92 +1116,6 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofMode typeof_mode,
-                                                      Label* slow) {
-  Register current = cp;
-  Register next = r4;
-  Register temp = r5;
-
-  int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
-  for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (s->calls_sloppy_eval()) {
-      // Check that extension is "the hole".
-      __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
-      __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-    }
-    // Load next context in chain.
-    __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
-    // Walk the rest of the chain without clobbering cp.
-    current = next;
-    to_check--;
-  }
-
-  // All extension objects were empty and it is safe to use a normal global
-  // load machinery.
-  EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = cp;
-  Register next = r6;
-  Register temp = r7;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->NeedsContext()) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is "the hole".
-        __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-        __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-      }
-      __ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering cp.
-      context = next;
-    }
-  }
-  // Check that last extension is "the hole".
-  __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-  __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an cp-based operand (the write barrier cannot be allowed to
-  // destroy the cp register).
-  return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofMode typeof_mode,
-                                                  Label* slow, Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
-    __ b(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ LoadP(r3, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->binding_needs_init()) {
-      __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
-      __ bne(done);
-      __ mov(r3, Operand(var->name()));
-      __ push(r3);
-      __ CallRuntime(Runtime::kThrowReferenceError);
-    } else {
-      __ b(done);
-    }
-  }
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1248,8 +1123,7 @@
   PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
+  // Two cases: global variables and all other types of variables.
   switch (var->location()) {
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
@@ -1282,24 +1156,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ Lookup variable");
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
-      __ bind(&slow);
-      __ Push(var->name());
-      Runtime::FunctionId function_id =
-          typeof_mode == NOT_INSIDE_TYPEOF
-              ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotInsideTypeof;
-      __ CallRuntime(function_id);
-      __ bind(&done);
-      context()->Plug(r3);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1326,9 +1183,10 @@
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  Handle<FixedArray> constant_properties = expr->constant_properties();
+  Handle<BoilerplateDescription> constant_properties =
+      expr->GetOrBuildConstantProperties(isolate());
   __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+  __ LoadSmiLiteral(r5, SmiFromSlot(expr->literal_slot()));
   __ mov(r4, Operand(constant_properties));
   int flags = expr->ComputeFlags();
   __ LoadSmiLiteral(r3, Smi::FromInt(flags));
@@ -1336,8 +1194,9 @@
     __ Push(r6, r5, r4, r3);
     __ CallRuntime(Runtime::kCreateObjectLiteral);
   } else {
-    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastCloneShallowObject(
+        isolate(), expr->properties_count());
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1347,10 +1206,9 @@
   bool result_saved = false;
 
   AccessorTable accessor_table(zone());
-  int property_index = 0;
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
@@ -1360,6 +1218,7 @@
       result_saved = true;
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1374,7 +1233,7 @@
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(r3));
             __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            CallStoreIC(property->GetSlot(0), key->value());
+            CallStoreIC(property->GetSlot(0), key->value(), true);
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1408,20 +1267,20 @@
         VisitForStackValue(value);
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+        PrepareForBailoutForId(expr->GetIdForPropertySet(i),
                                BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1443,73 +1302,6 @@
     PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
-  // starts with the first computed property name, and continues with all
-  // properties to its right.  All the code from above initializes the static
-  // component of the object literal, and arranges for the map of the result to
-  // reflect the static order in which the keys appear. For the dynamic
-  // properties, we compile them into a series of "SetOwnProperty" runtime
-  // calls. This will preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    Expression* value = property->value();
-    if (!result_saved) {
-      PushOperand(r3);  // Save result on the stack
-      result_saved = true;
-    }
-
-    __ LoadP(r3, MemOperand(sp));  // Duplicate receiver.
-    PushOperand(r3);
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      DCHECK(!property->is_computed_name());
-      VisitForStackValue(value);
-      DCHECK(property->emit_store());
-      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             BailoutState::NO_REGISTERS);
-    } else {
-      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
-      VisitForStackValue(value);
-      if (NeedsHomeObject(value)) {
-        EmitSetHomeObject(value, 2, property->GetSlot());
-      }
-
-      switch (property->kind()) {
-        case ObjectLiteral::Property::CONSTANT:
-        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        case ObjectLiteral::Property::COMPUTED:
-          if (property->emit_store()) {
-            PushOperand(Smi::FromInt(NONE));
-            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                                   BailoutState::NO_REGISTERS);
-          } else {
-            DropOperands(3);
-          }
-          break;
-
-        case ObjectLiteral::Property::PROTOTYPE:
-          UNREACHABLE();
-          break;
-
-        case ObjectLiteral::Property::GETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-          break;
-
-        case ObjectLiteral::Property::SETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-          break;
-      }
-    }
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1521,29 +1313,20 @@
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  bool has_fast_elements =
-      IsFastObjectElementsKind(expr->constant_elements_kind());
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(constant_elements->get(1)));
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
+  Handle<ConstantElementsPair> constant_elements =
+      expr->GetOrBuildConstantElements(isolate());
 
   __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+  __ LoadSmiLiteral(r5, SmiFromSlot(expr->literal_slot()));
   __ mov(r4, Operand(constant_elements));
   if (MustCreateArrayLiteralWithRuntime(expr)) {
     __ LoadSmiLiteral(r3, Smi::FromInt(expr->ComputeFlags()));
     __ Push(r6, r5, r4, r3);
     __ CallRuntime(Runtime::kCreateArrayLiteral);
   } else {
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
+    Callable callable =
+        CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1606,34 +1389,6 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch = r4;
-        __ LoadP(scratch, MemOperand(sp, kPointerSize));
-        PushOperands(scratch, result_register());
-      }
-      break;
-    case KEYED_SUPER_PROPERTY: {
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(property->key());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch1 = r5;
-        const Register scratch2 = r4;
-        __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ LoadP(scratch2, MemOperand(sp, 1 * kPointerSize));
-        PushOperands(scratch1, scratch2, result_register());
-      }
-      break;
-    }
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
@@ -1646,6 +1401,10 @@
         VisitForStackValue(property->key());
       }
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 
   // For compound assignments we need another deoptimization point after the
@@ -1663,21 +1422,15 @@
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
+        case NAMED_SUPER_PROPERTY:
+        case KEYED_SUPER_PROPERTY:
+          UNREACHABLE();
+          break;
       }
     }
 
@@ -1714,73 +1467,20 @@
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(r3);
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(r3);
-      break;
     case KEYED_PROPERTY:
       EmitKeyedPropertyAssignment(expr);
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
 
 void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  SetExpressionPosition(expr);
-
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this.  It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  Label suspend, continuation, post_runtime, resume, exception;
-
-  __ b(&suspend);
-  __ bind(&continuation);
-  // When we arrive here, r3 holds the generator object.
-  __ RecordGeneratorContinuation();
-  __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
-  __ LoadP(r3, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset));
-  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
-  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
-  __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kReturn), r0);
-  __ blt(&resume);
-  __ Push(result_register());
-  __ bgt(&exception);
-  EmitCreateIteratorResult(true);
-  EmitUnwindAndReturn();
-
-  __ bind(&exception);
-  __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
-                                              : Runtime::kThrow);
-
-  __ bind(&suspend);
-  OperandStackDepthIncrement(1);  // Not popped on this path.
-  VisitForAccumulatorValue(expr->generator_object());
-  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-  __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
-  __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
-            r0);
-  __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
-  __ mr(r4, cp);
-  __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
-                      kLRHasBeenSaved, kDontSaveFPRegs);
-  __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
-  __ cmp(sp, r4);
-  __ beq(&post_runtime);
-  __ push(r3);  // generator object
-  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  RestoreContext();
-  __ bind(&post_runtime);
-  PopOperand(result_register());
-  EmitReturnSequence();
-
-  __ bind(&resume);
-  context()->Plug(result_register());
+  // Resumable functions are not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1965,60 +1665,6 @@
   context()->Plug(r3);
 }
 
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ClassLiteral::Property* property = lit->properties()->at(i);
-    Expression* value = property->value();
-
-    Register scratch = r4;
-    if (property->is_static()) {
-      __ LoadP(scratch, MemOperand(sp, kPointerSize));  // constructor
-    } else {
-      __ LoadP(scratch, MemOperand(sp, 0));  // prototype
-    }
-    PushOperand(scratch);
-    EmitPropertyKey(property, lit->GetIdForProperty(i));
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
-      __ push(r3);
-    }
-
-    VisitForStackValue(value);
-    if (NeedsHomeObject(value)) {
-      EmitSetHomeObject(value, 2, property->GetSlot());
-    }
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-        break;
-
-      case ClassLiteral::Property::GETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::SETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::FIELD:
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
-
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   PopOperand(r4);
   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -2028,9 +1674,7 @@
   context()->Plug(r3);
 }
 
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
-                                       FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   Property* prop = expr->AsProperty();
@@ -2052,43 +1696,6 @@
       CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      PushOperand(r3);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      // stack: value, this; r3: home_object
-      Register scratch = r5;
-      Register scratch2 = r6;
-      __ mr(scratch, result_register());                  // home_object
-      __ LoadP(r3, MemOperand(sp, kPointerSize));         // value
-      __ LoadP(scratch2, MemOperand(sp, 0));              // this
-      __ StoreP(scratch2, MemOperand(sp, kPointerSize));  // this
-      __ StoreP(scratch, MemOperand(sp, 0));              // home_object
-      // stack: this, home_object; r3: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      PushOperand(r3);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = r5;
-      Register scratch2 = r6;
-      __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize));  // value
-      // stack: value, this, home_object; r3: key, r6: value
-      __ LoadP(scratch, MemOperand(sp, kPointerSize));  // this
-      __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
-      __ LoadP(scratch, MemOperand(sp, 0));  // home_object
-      __ StoreP(scratch, MemOperand(sp, kPointerSize));
-      __ StoreP(r3, MemOperand(sp, 0));
-      __ Move(r3, scratch2);
-      // stack: this, home_object, key; r3: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
     case KEYED_PROPERTY: {
       PushOperand(r3);  // Preserve value.
       VisitForStackValue(prop->obj());
@@ -2099,6 +1706,10 @@
       CallKeyedStoreIC(slot);
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
   context()->Plug(r3);
 }
@@ -2117,7 +1728,7 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot,
+                                               FeedbackSlot slot,
                                                HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
@@ -2160,26 +1771,18 @@
 
   } else {
     DCHECK(var->mode() != CONST || op == Token::INIT);
-    if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ Push(var->name());
-      __ Push(r3);
-      __ CallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreLookupSlot_Strict
-                         : Runtime::kStoreLookupSlot_Sloppy);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
-      MemOperand location = VarOperand(var, r4);
-      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
-        // Check for an uninitialized let binding.
-        __ LoadP(r5, location);
-        __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
-        __ Check(eq, kLetBindingReInitialization);
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
+    DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+    DCHECK(!var->IsLookupSlot());
+    // Assignment to var or initializing assignment to let/const in harmony
+    // mode.
+    MemOperand location = VarOperand(var, r4);
+    if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+      // Check for an uninitialized let binding.
+      __ LoadP(r5, location);
+      __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+      __ Check(eq, kLetBindingReInitialization);
     }
+    EmitStoreToStackLocalOrContextSlot(var, location);
   }
 }
 
@@ -2198,35 +1801,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // r3 : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  PushOperand(key->value());
-  PushOperand(r3);
-  CallRuntimeWithOperands((is_strict(language_mode())
-                               ? Runtime::kStoreToSuper_Strict
-                               : Runtime::kStoreToSuper_Sloppy));
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // r3 : value
-  // stack : receiver ('this'), home_object, key
-  DCHECK(prop != NULL);
-
-  PushOperand(r3);
-  CallRuntimeWithOperands((is_strict(language_mode())
-                               ? Runtime::kStoreKeyedToSuper_Strict
-                               : Runtime::kStoreKeyedToSuper_Sloppy));
-}
-
-
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
   PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -2275,43 +1849,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-  SetExpressionPosition(prop);
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  // Load the function from the receiver.
-  const Register scratch = r4;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForAccumulatorValue(super_ref->home_object());
-  __ mr(scratch, r3);
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperands(scratch, r3, r3, scratch);
-  PushOperand(key->value());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ StoreP(r3, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
   // Load the key.
@@ -2336,41 +1873,6 @@
 }
 
 
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetExpressionPosition(prop);
-  // Load the function from the receiver.
-  const Register scratch = r4;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForAccumulatorValue(super_ref->home_object());
-  __ mr(scratch, r3);
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperands(scratch, r3, r3, scratch);
-  VisitForStackValue(prop->key());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ StoreP(r3, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2390,8 +1892,9 @@
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
   Handle<Code> code =
-      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
-  __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
+      CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+          .code();
+  __ mov(r6, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
   __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
   __ mov(r3, Operand(arg_count));
   CallIC(code);
@@ -2402,117 +1905,6 @@
   context()->DropAndPlug(1, r3);
 }
 
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
-  int arg_count = expr->arguments()->length();
-  // r7: copy of the first argument or undefined if it doesn't exist.
-  if (arg_count > 0) {
-    __ LoadP(r7, MemOperand(sp, arg_count * kPointerSize), r0);
-  } else {
-    __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
-  }
-
-  // r6: the receiver of the enclosing function.
-  __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // r5: language mode.
-  __ LoadSmiLiteral(r5, Smi::FromInt(language_mode()));
-
-  // r4: the start position of the scope the calls resides in.
-  __ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
-
-  // r3: the source position of the eval call.
-  __ LoadSmiLiteral(r3, Smi::FromInt(expr->position()));
-
-  // Do the runtime call.
-  __ Push(r7, r6, r5, r4, r3);
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
-  VariableProxy* callee = expr->expression()->AsVariableProxy();
-  if (callee->var()->IsLookupSlot()) {
-    Label slow, done;
-    SetExpressionPosition(callee);
-    // Generate code for loading from variables potentially shadowed by
-    // eval-introduced variables.
-    EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    // Call the runtime to find the function to call (returned in r3) and
-    // the object holding it (returned in r4).
-    __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
-    PushOperands(r3, r4);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the function
-    // and receiver and have the slow path jump around this code.
-    if (done.is_linked()) {
-      Label call;
-      __ b(&call);
-      __ bind(&done);
-      // Push function.
-      __ push(r3);
-      // Pass undefined as the receiver, which is the WithBaseObject of a
-      // non-object environment record.  If the callee is sloppy, it will patch
-      // it up to be the global receiver.
-      __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
-      __ push(r4);
-      __ bind(&call);
-    }
-  } else {
-    VisitForStackValue(callee);
-    // refEnv.WithBaseObject()
-    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
-    PushOperand(r5);  // Reserved receiver slot.
-  }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call
-  // Runtime_ResolvePossiblyDirectEval to resolve the function we need
-  // to call.  Then we call the resolved function using the given arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  PushCalleeAndWithBaseObject(expr);
-
-  // Push the arguments.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Push a copy of the function (found below the arguments) and
-  // resolve eval.
-  __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-  __ push(r4);
-  EmitResolvePossiblyDirectEval(expr);
-
-  // Touch up the stack with the resolved function.
-  __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-
-  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
-  // Record source position for debugger.
-  SetCallPosition(expr);
-  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
-                                          expr->tail_call_mode())
-                          .code();
-  __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
-  __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-  __ mov(r3, Operand(arg_count));
-  __ Call(code, RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->DropAndPlug(1, r3);
-}
-
-
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -2541,7 +1933,7 @@
   __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
 
   // Record call targets in unoptimized code.
-  __ EmitLoadTypeFeedbackVector(r5);
+  __ EmitLoadFeedbackVector(r5);
   __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
 
   CallConstructStub stub(isolate());
@@ -2553,49 +1945,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
-  SuperCallReference* super_call_ref =
-      expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super_call_ref);
-
-  // Push the super constructor target on the stack (may be null,
-  // but the Construct builtin can deal with that properly).
-  VisitForAccumulatorValue(super_call_ref->this_function_var());
-  __ AssertFunction(result_register());
-  __ LoadP(result_register(),
-           FieldMemOperand(result_register(), HeapObject::kMapOffset));
-  __ LoadP(result_register(),
-           FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  PushOperand(result_register());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetConstructCallPosition(expr);
-
-  // Load new target into r6.
-  VisitForAccumulatorValue(super_call_ref->new_target_var());
-  __ mr(r6, result_register());
-
-  // Load function and argument count into r1 and r0.
-  __ mov(r3, Operand(arg_count));
-  __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
-
-  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->Plug(r3);
-}
-
-
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2683,28 +2032,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(r3, if_false);
-  __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2941,16 +2268,12 @@
           __ Push(r5, r4);
           __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
           context()->Plug(r3);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+        } else {
+          DCHECK(!var->IsLookupSlot());
+          DCHECK(var->IsStackAllocated() || var->IsContextSlot());
           // Result of deleting non-global, non-dynamic variables is false.
           // The subexpression does not have side effects.
           context()->Plug(is_this);
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kDeleteLookupSlot);
-          context()->Plug(r3);
         }
       } else {
         // Result of deleting non-property, non-variable reference is true.
@@ -3052,31 +2375,6 @@
         break;
       }
 
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForAccumulatorValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        const Register scratch = r4;
-        __ LoadP(scratch, MemOperand(sp, 0));  // this
-        PushOperands(result_register(), scratch, result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForStackValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        VisitForAccumulatorValue(prop->key());
-        const Register scratch1 = r4;
-        const Register scratch2 = r5;
-        __ LoadP(scratch1, MemOperand(sp, 1 * kPointerSize));  // this
-        __ LoadP(scratch2, MemOperand(sp, 0 * kPointerSize));  // home object
-        PushOperands(result_register(), scratch1, scratch2, result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
       case KEYED_PROPERTY: {
         VisitForStackValue(prop->obj());
         VisitForStackValue(prop->key());
@@ -3087,6 +2385,8 @@
         break;
       }
 
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
       case VARIABLE:
         UNREACHABLE();
     }
@@ -3122,14 +2422,12 @@
           case NAMED_PROPERTY:
             __ StoreP(r3, MemOperand(sp, kPointerSize));
             break;
-          case NAMED_SUPER_PROPERTY:
-            __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
-            break;
           case KEYED_PROPERTY:
             __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
             break;
+          case NAMED_SUPER_PROPERTY:
           case KEYED_SUPER_PROPERTY:
-            __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+            UNREACHABLE();
             break;
         }
       }
@@ -3164,14 +2462,12 @@
         case NAMED_PROPERTY:
           __ StoreP(r3, MemOperand(sp, kPointerSize));
           break;
-        case NAMED_SUPER_PROPERTY:
-          __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
-          break;
         case KEYED_PROPERTY:
           __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
           break;
+        case NAMED_SUPER_PROPERTY:
         case KEYED_SUPER_PROPERTY:
-          __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+          UNREACHABLE();
           break;
       }
     }
@@ -3228,30 +2524,6 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(r3);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(r3);
-      }
-      break;
-    }
     case KEYED_PROPERTY: {
       PopOperands(StoreDescriptor::ReceiverRegister(),
                   StoreDescriptor::NameRegister());
@@ -3266,6 +2538,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -3336,16 +2612,6 @@
     __ andi(r0, r4,
             Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(eq, if_true, if_false, fall_through, cr0);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
-  } else if (String::Equals(check, factory->type##_string())) { \
-    __ JumpIfSmi(r3, if_false);                                 \
-    __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));    \
-    __ CompareRoot(r3, Heap::k##Type##MapRootIndex);            \
-    Split(eq, if_true, if_false, fall_through);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
   } else {
     if (if_false != fall_through) __ b(if_false);
   }
@@ -3386,6 +2652,7 @@
       SetExpressionPosition(expr);
       PopOperand(r4);
       __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+      RestoreContext();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r3, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -3496,70 +2763,6 @@
 }
 
 
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
-  DCHECK(!result_register().is(r4));
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(ip, Operand(pending_message_obj));
-  __ LoadP(r4, MemOperand(ip));
-  PushOperand(r4);
-
-  ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  DCHECK(!result_register().is(r4));
-  // Restore pending message from stack.
-  PopOperand(r4);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(ip, Operand(pending_message_obj));
-  __ StoreP(r4, MemOperand(ip));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
-  DCHECK(!result_register().is(r4));
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
-  __ mov(ip, Operand(pending_message_obj));
-  __ StoreP(r4, MemOperand(ip));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
-  DCHECK(!result_register().is(r4));
-  // Restore the accumulator (r3) and token (r4).
-  __ Pop(r4, result_register());
-  for (DeferredCommand cmd : commands_) {
-    Label skip;
-    __ CmpSmiLiteral(r4, Smi::FromInt(cmd.token), r0);
-    __ bne(&skip);
-    switch (cmd.command) {
-      case kReturn:
-        codegen_->EmitUnwindAndReturn();
-        break;
-      case kThrow:
-        __ Push(result_register());
-        __ CallRuntime(Runtime::kReThrow);
-        break;
-      case kContinue:
-        codegen_->EmitContinue(cmd.target);
-        break;
-      case kBreak:
-        codegen_->EmitBreak(cmd.target);
-        break;
-    }
-    __ bind(&skip);
-  }
-}
-
 #undef __
 
 
diff --git a/src/full-codegen/s390/full-codegen-s390.cc b/src/full-codegen/s390/full-codegen-s390.cc
index 91fa86d..340082a 100644
--- a/src/full-codegen/s390/full-codegen-s390.cc
+++ b/src/full-codegen/s390/full-codegen-s390.cc
@@ -4,15 +4,16 @@
 
 #if V8_TARGET_ARCH_S390
 
-#include "src/full-codegen/full-codegen.h"
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 
 #include "src/s390/code-stubs-s390.h"
@@ -136,23 +137,20 @@
   // Increment invocation count for the function.
   {
     Comment cmnt(masm_, "[ Increment invocation count");
-    __ LoadP(r6, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
-    __ LoadP(r6, FieldMemOperand(r6, LiteralsArray::kFeedbackVectorOffset));
-    __ LoadP(r1, FieldMemOperand(r6, TypeFeedbackVector::kInvocationCountIndex *
-                                             kPointerSize +
-                                         TypeFeedbackVector::kHeaderSize));
+    __ LoadP(r6, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
+    __ LoadP(r6, FieldMemOperand(r6, Cell::kValueOffset));
+    __ LoadP(r1, FieldMemOperand(
+                     r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                             FeedbackVector::kHeaderSize));
     __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
-    __ StoreP(r1,
-              FieldMemOperand(
-                  r6, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                          TypeFeedbackVector::kHeaderSize));
+    __ StoreP(r1, FieldMemOperand(
+                      r6, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                              FeedbackVector::kHeaderSize));
   }
 
   {
     Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
     OperandStackDepthIncrement(locals_count);
     if (locals_count > 0) {
       if (locals_count >= 128) {
@@ -212,15 +210,18 @@
       if (info->scope()->new_target_var() != nullptr) {
         __ push(r5);  // Preserve new target.
       }
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Operand(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(r3);
+        __ Push(Smi::FromInt(info->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
       if (info->scope()->new_target_var() != nullptr) {
@@ -267,39 +268,10 @@
   PrepareForBailoutForId(BailoutId::FunctionContext(),
                          BailoutState::NO_REGISTERS);
 
-  // Possibly set up a local binding to the this function which is used in
-  // derived constructors with super calls.
-  Variable* this_function_var = info->scope()->this_function_var();
-  if (this_function_var != nullptr) {
-    Comment cmnt(masm_, "[ This function");
-    if (!function_in_register_r3) {
-      __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-      // The write barrier clobbers register again, keep it marked as such.
-    }
-    SetVar(this_function_var, r3, r2, r4);
-  }
-
-  // Possibly set up a local binding to the new target value.
-  Variable* new_target_var = info->scope()->new_target_var();
-  if (new_target_var != nullptr) {
-    Comment cmnt(masm_, "[ new.target");
-    SetVar(new_target_var, r5, r2, r4);
-  }
-
-  // Possibly allocate RestParameters
-  Variable* rest_param = info->scope()->rest_parameter();
-  if (rest_param != nullptr) {
-    Comment cmnt(masm_, "[ Allocate rest parameter array");
-
-    if (!function_in_register_r3) {
-      __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    FastNewRestParameterStub stub(isolate());
-    __ CallStub(&stub);
-
-    function_in_register_r3 = false;
-    SetVar(rest_param, r2, r3, r4);
-  }
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(info->scope()->new_target_var());
+  DCHECK_NULL(info->scope()->rest_parameter());
+  DCHECK_NULL(info->scope()->this_function_var());
 
   Variable* arguments = info->scope()->arguments();
   if (arguments != NULL) {
@@ -310,14 +282,16 @@
       __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     }
     if (is_strict(language_mode()) || !has_simple_parameters()) {
-      FastNewStrictArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     } else if (literal()->has_duplicate_parameters()) {
       __ Push(r3);
       __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
     } else {
-      FastNewSloppyArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+      __ Call(callable.code(), RelocInfo::CODE_TARGET);
+      RestoreContext();
     }
 
     SetVar(arguments, r2, r3, r4);
@@ -529,10 +503,8 @@
 void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
   codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
-         !lit->IsUndetectable());
-  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
-      lit->IsFalse(isolate())) {
+  DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+  if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ b(true_label_);
@@ -726,10 +698,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_->Add(isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -752,17 +726,7 @@
       }
       break;
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      DCHECK_EQ(VAR, variable->mode());
-      DCHECK(!variable->binding_needs_init());
-      __ mov(r4, Operand(variable->name()));
-      __ Push(r4);
-      __ CallRuntime(Runtime::kDeclareEvalVar);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -774,9 +738,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function =
           Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
@@ -807,17 +778,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      __ mov(r4, Operand(variable->name()));
-      PushOperand(r4);
-      // Push initial value for function declaration.
-      VisitForStackValue(declaration->fun());
-      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -827,7 +788,7 @@
   // Call the runtime to declare the globals.
   __ mov(r3, Operand(pairs));
   __ LoadSmiLiteral(r2, Smi::FromInt(DeclareGlobalsFlags()));
-  __ EmitLoadTypeFeedbackVector(r4);
+  __ EmitLoadFeedbackVector(r4);
   __ Push(r3, r2, r4);
   __ CallRuntime(Runtime::kDeclareGlobals);
   // Return value is ignored.
@@ -930,7 +891,7 @@
   Comment cmnt(masm_, "[ ForInStatement");
   SetStatementPosition(stmt, SKIP_BREAK);
 
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
 
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1050,8 +1011,8 @@
 
   // We need to filter the key, record slow-path here.
   int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(r2);
-  __ mov(r4, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ EmitLoadFeedbackVector(r2);
+  __ mov(r4, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
   __ StoreP(
       r4, FieldMemOperand(r2, FixedArray::OffsetOfElementAt(vector_index)), r0);
 
@@ -1104,7 +1065,7 @@
 }
 
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
-                                          FeedbackVectorSlot slot) {
+                                          FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
   __ LoadP(StoreDescriptor::ValueRegister(),
@@ -1114,7 +1075,7 @@
 
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
                                                      int offset,
-                                                     FeedbackVectorSlot slot) {
+                                                     FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ Move(StoreDescriptor::ReceiverRegister(), r2);
   __ LoadP(StoreDescriptor::ValueRegister(),
@@ -1122,89 +1083,6 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofMode typeof_mode,
-                                                      Label* slow) {
-  Register current = cp;
-  Register next = r3;
-  Register temp = r4;
-
-  int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
-  for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (s->calls_sloppy_eval()) {
-      // Check that extension is "the hole".
-      __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
-      __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-    }
-    // Load next context in chain.
-    __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
-    // Walk the rest of the chain without clobbering cp.
-    current = next;
-    to_check--;
-  }
-
-  // All extension objects were empty and it is safe to use a normal global
-  // load machinery.
-  EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = cp;
-  Register next = r5;
-  Register temp = r6;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->NeedsContext()) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is "the hole".
-        __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-        __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-      }
-      __ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering cp.
-      context = next;
-    }
-  }
-  // Check that last extension is "the hole".
-  __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
-  __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an cp-based operand (the write barrier cannot be allowed to
-  // destroy the cp register).
-  return ContextMemOperand(context, var->index());
-}
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofMode typeof_mode,
-                                                  Label* slow, Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
-    __ b(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ LoadP(r2, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->binding_needs_init()) {
-      __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
-      __ bne(done);
-      __ mov(r2, Operand(var->name()));
-      __ push(r2);
-      __ CallRuntime(Runtime::kThrowReferenceError);
-    } else {
-      __ b(done);
-    }
-  }
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1212,8 +1090,7 @@
   PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
+  // Two cases: global variables and all other types of variables.
   switch (var->location()) {
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
@@ -1246,24 +1123,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ Lookup variable");
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
-      __ bind(&slow);
-      __ Push(var->name());
-      Runtime::FunctionId function_id =
-          typeof_mode == NOT_INSIDE_TYPEOF
-              ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotInsideTypeof;
-      __ CallRuntime(function_id);
-      __ bind(&done);
-      context()->Plug(r2);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1288,9 +1148,10 @@
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  Handle<FixedArray> constant_properties = expr->constant_properties();
+  Handle<BoilerplateDescription> constant_properties =
+      expr->GetOrBuildConstantProperties(isolate());
   __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+  __ LoadSmiLiteral(r4, SmiFromSlot(expr->literal_slot()));
   __ mov(r3, Operand(constant_properties));
   int flags = expr->ComputeFlags();
   __ LoadSmiLiteral(r2, Smi::FromInt(flags));
@@ -1298,8 +1159,9 @@
     __ Push(r5, r4, r3, r2);
     __ CallRuntime(Runtime::kCreateObjectLiteral);
   } else {
-    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastCloneShallowObject(
+        isolate(), expr->properties_count());
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1309,10 +1171,9 @@
   bool result_saved = false;
 
   AccessorTable accessor_table(zone());
-  int property_index = 0;
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
@@ -1322,6 +1183,7 @@
       result_saved = true;
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1336,7 +1198,7 @@
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(r2));
             __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
-            CallStoreIC(property->GetSlot(0), key->value());
+            CallStoreIC(property->GetSlot(0), key->value(), true);
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1370,20 +1232,20 @@
         VisitForStackValue(value);
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+        PrepareForBailoutForId(expr->GetIdForPropertySet(i),
                                BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1405,73 +1267,6 @@
     PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
-  // starts with the first computed property name, and continues with all
-  // properties to its right.  All the code from above initializes the static
-  // component of the object literal, and arranges for the map of the result to
-  // reflect the static order in which the keys appear. For the dynamic
-  // properties, we compile them into a series of "SetOwnProperty" runtime
-  // calls. This will preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    Expression* value = property->value();
-    if (!result_saved) {
-      PushOperand(r2);  // Save result on the stack
-      result_saved = true;
-    }
-
-    __ LoadP(r2, MemOperand(sp));  // Duplicate receiver.
-    PushOperand(r2);
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      DCHECK(!property->is_computed_name());
-      VisitForStackValue(value);
-      DCHECK(property->emit_store());
-      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             BailoutState::NO_REGISTERS);
-    } else {
-      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
-      VisitForStackValue(value);
-      if (NeedsHomeObject(value)) {
-        EmitSetHomeObject(value, 2, property->GetSlot());
-      }
-
-      switch (property->kind()) {
-        case ObjectLiteral::Property::CONSTANT:
-        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        case ObjectLiteral::Property::COMPUTED:
-          if (property->emit_store()) {
-            PushOperand(Smi::FromInt(NONE));
-            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                                   BailoutState::NO_REGISTERS);
-          } else {
-            DropOperands(3);
-          }
-          break;
-
-        case ObjectLiteral::Property::PROTOTYPE:
-          UNREACHABLE();
-          break;
-
-        case ObjectLiteral::Property::GETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-          break;
-
-        case ObjectLiteral::Property::SETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-          break;
-      }
-    }
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1482,29 +1277,20 @@
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  bool has_fast_elements =
-      IsFastObjectElementsKind(expr->constant_elements_kind());
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(constant_elements->get(1)));
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
+  Handle<ConstantElementsPair> constant_elements =
+      expr->GetOrBuildConstantElements(isolate());
 
   __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+  __ LoadSmiLiteral(r4, SmiFromSlot(expr->literal_slot()));
   __ mov(r3, Operand(constant_elements));
   if (MustCreateArrayLiteralWithRuntime(expr)) {
     __ LoadSmiLiteral(r2, Smi::FromInt(expr->ComputeFlags()));
     __ Push(r5, r4, r3, r2);
     __ CallRuntime(Runtime::kCreateArrayLiteral);
   } else {
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
+    Callable callable =
+        CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1566,34 +1352,6 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch = r3;
-        __ LoadP(scratch, MemOperand(sp, kPointerSize));
-        PushOperands(scratch, result_register());
-      }
-      break;
-    case KEYED_SUPER_PROPERTY: {
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(property->key());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        const Register scratch1 = r4;
-        const Register scratch2 = r3;
-        __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
-        __ LoadP(scratch2, MemOperand(sp, 1 * kPointerSize));
-        PushOperands(scratch1, scratch2, result_register());
-      }
-      break;
-    }
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
@@ -1606,6 +1364,10 @@
         VisitForStackValue(property->key());
       }
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 
   // For compound assignments we need another deoptimization point after the
@@ -1623,21 +1385,15 @@
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
+        case NAMED_SUPER_PROPERTY:
+        case KEYED_SUPER_PROPERTY:
+          UNREACHABLE();
+          break;
       }
     }
 
@@ -1674,72 +1430,19 @@
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(r2);
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(r2);
-      break;
     case KEYED_PROPERTY:
       EmitKeyedPropertyAssignment(expr);
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
 void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  SetExpressionPosition(expr);
-
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this.  It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  Label suspend, continuation, post_runtime, resume, exception;
-
-  __ b(&suspend);
-  __ bind(&continuation);
-  // When we arrive here, r2 holds the generator object.
-  __ RecordGeneratorContinuation();
-  __ LoadP(r3, FieldMemOperand(r2, JSGeneratorObject::kResumeModeOffset));
-  __ LoadP(r2, FieldMemOperand(r2, JSGeneratorObject::kInputOrDebugPosOffset));
-  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
-  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
-  __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::kReturn), r0);
-  __ blt(&resume);
-  __ Push(result_register());
-  __ bgt(&exception);
-  EmitCreateIteratorResult(true);
-  EmitUnwindAndReturn();
-
-  __ bind(&exception);
-  __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
-                                              : Runtime::kThrow);
-
-  __ bind(&suspend);
-  OperandStackDepthIncrement(1);  // Not popped on this path.
-  VisitForAccumulatorValue(expr->generator_object());
-  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-  __ LoadSmiLiteral(r3, Smi::FromInt(continuation.pos()));
-  __ StoreP(r3, FieldMemOperand(r2, JSGeneratorObject::kContinuationOffset),
-            r0);
-  __ StoreP(cp, FieldMemOperand(r2, JSGeneratorObject::kContextOffset), r0);
-  __ LoadRR(r3, cp);
-  __ RecordWriteField(r2, JSGeneratorObject::kContextOffset, r3, r4,
-                      kLRHasBeenSaved, kDontSaveFPRegs);
-  __ AddP(r3, fp, Operand(StandardFrameConstants::kExpressionsOffset));
-  __ CmpP(sp, r3);
-  __ beq(&post_runtime);
-  __ push(r2);  // generator object
-  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  RestoreContext();
-  __ bind(&post_runtime);
-  PopOperand(result_register());
-  EmitReturnSequence();
-
-  __ bind(&resume);
-  context()->Plug(result_register());
+  // Resumable functions are not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1870,34 +1573,42 @@
     }
     case Token::MUL: {
       Label mul_zero;
+      if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+        __ SmiUntag(ip, right);
+        __ MulPWithCondition(scratch2, ip, left);
+        __ b(overflow, &stub_call);
+        __ beq(&mul_zero, Label::kNear);
+        __ LoadRR(right, scratch2);
+      } else {
 #if V8_TARGET_ARCH_S390X
-      // Remove tag from both operands.
-      __ SmiUntag(ip, right);
-      __ SmiUntag(scratch2, left);
-      __ mr_z(scratch1, ip);
-      // Check for overflowing the smi range - no overflow if higher 33 bits of
-      // the result are identical.
-      __ lr(ip, scratch2);  // 32 bit load
-      __ sra(ip, Operand(31));
-      __ cr_z(ip, scratch1);  // 32 bit compare
-      __ bne(&stub_call);
+        // Remove tag from both operands.
+        __ SmiUntag(ip, right);
+        __ SmiUntag(scratch2, left);
+        __ mr_z(scratch1, ip);
+        // Check for overflowing the smi range - no overflow if higher 33 bits
+        // of the result are identical.
+        __ lr(ip, scratch2);  // 32 bit load
+        __ sra(ip, Operand(31));
+        __ cr_z(ip, scratch1);  // 32 bit compare
+        __ bne(&stub_call);
 #else
-      __ SmiUntag(ip, right);
-      __ LoadRR(scratch2, left);  // load into low order of reg pair
-      __ mr_z(scratch1, ip);      // R4:R5 = R5 * ip
-      // Check for overflowing the smi range - no overflow if higher 33 bits of
-      // the result are identical.
-      __ TestIfInt32(scratch1, scratch2, ip);
-      __ bne(&stub_call);
+        __ SmiUntag(ip, right);
+        __ LoadRR(scratch2, left);  // load into low order of reg pair
+        __ mr_z(scratch1, ip);      // R4:R5 = R5 * ip
+        // Check for overflowing the smi range - no overflow if higher 33 bits
+        // of the result are identical.
+        __ TestIfInt32(scratch1, scratch2, ip);
+        __ bne(&stub_call);
 #endif
-      // Go slow on zero result to handle -0.
-      __ chi(scratch2, Operand::Zero());
-      __ beq(&mul_zero, Label::kNear);
+        // Go slow on zero result to handle -0.
+        __ chi(scratch2, Operand::Zero());
+        __ beq(&mul_zero, Label::kNear);
 #if V8_TARGET_ARCH_S390X
-      __ SmiTag(right, scratch2);
+        __ SmiTag(right, scratch2);
 #else
-      __ LoadRR(right, scratch2);
+        __ LoadRR(right, scratch2);
 #endif
+      }
       __ b(&done);
       // We need -0 if we were multiplying a negative number with 0 to get 0.
       // We know one of them was zero.
@@ -1925,58 +1636,6 @@
   context()->Plug(r2);
 }
 
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ClassLiteral::Property* property = lit->properties()->at(i);
-    Expression* value = property->value();
-
-    Register scratch = r3;
-    if (property->is_static()) {
-      __ LoadP(scratch, MemOperand(sp, kPointerSize));  // constructor
-    } else {
-      __ LoadP(scratch, MemOperand(sp, 0));  // prototype
-    }
-    PushOperand(scratch);
-    EmitPropertyKey(property, lit->GetIdForProperty(i));
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
-      __ push(r2);
-    }
-
-    VisitForStackValue(value);
-    if (NeedsHomeObject(value)) {
-      EmitSetHomeObject(value, 2, property->GetSlot());
-    }
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-        break;
-
-      case ClassLiteral::Property::GETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::SETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::FIELD:
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   PopOperand(r3);
   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -1986,8 +1645,7 @@
   context()->Plug(r2);
 }
 
-void FullCodeGenerator::EmitAssignment(Expression* expr,
-                                       FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   Property* prop = expr->AsProperty();
@@ -2009,43 +1667,6 @@
       CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      PushOperand(r2);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      // stack: value, this; r2: home_object
-      Register scratch = r4;
-      Register scratch2 = r5;
-      __ LoadRR(scratch, result_register());              // home_object
-      __ LoadP(r2, MemOperand(sp, kPointerSize));         // value
-      __ LoadP(scratch2, MemOperand(sp, 0));              // this
-      __ StoreP(scratch2, MemOperand(sp, kPointerSize));  // this
-      __ StoreP(scratch, MemOperand(sp, 0));              // home_object
-      // stack: this, home_object; r2: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      PushOperand(r2);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = r4;
-      Register scratch2 = r5;
-      __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize));  // value
-      // stack: value, this, home_object; r3: key, r6: value
-      __ LoadP(scratch, MemOperand(sp, kPointerSize));  // this
-      __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
-      __ LoadP(scratch, MemOperand(sp, 0));  // home_object
-      __ StoreP(scratch, MemOperand(sp, kPointerSize));
-      __ StoreP(r2, MemOperand(sp, 0));
-      __ Move(r2, scratch2);
-      // stack: this, home_object, key; r2: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
     case KEYED_PROPERTY: {
       PushOperand(r2);  // Preserve value.
       VisitForStackValue(prop->obj());
@@ -2056,6 +1677,10 @@
       CallKeyedStoreIC(slot);
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
   context()->Plug(r2);
 }
@@ -2073,7 +1698,7 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot,
+                                               FeedbackSlot slot,
                                                HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
@@ -2116,26 +1741,18 @@
     EmitStoreToStackLocalOrContextSlot(var, location);
   } else {
     DCHECK(var->mode() != CONST || op == Token::INIT);
-    if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ Push(var->name());
-      __ Push(r2);
-      __ CallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreLookupSlot_Strict
-                         : Runtime::kStoreLookupSlot_Sloppy);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
-      MemOperand location = VarOperand(var, r3);
-      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
-        // Check for an uninitialized let binding.
-        __ LoadP(r4, location);
-        __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
-        __ Check(eq, kLetBindingReInitialization);
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
+    DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+    DCHECK(!var->IsLookupSlot());
+    // Assignment to var or initializing assignment to let/const in harmony
+    // mode.
+    MemOperand location = VarOperand(var, r3);
+    if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+      // Check for an uninitialized let binding.
+      __ LoadP(r4, location);
+      __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+      __ Check(eq, kLetBindingReInitialization);
     }
+    EmitStoreToStackLocalOrContextSlot(var, location);
   }
 }
 
@@ -2152,33 +1769,6 @@
   context()->Plug(r2);
 }
 
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // r2 : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  PushOperand(key->value());
-  PushOperand(r2);
-  CallRuntimeWithOperands((is_strict(language_mode())
-                               ? Runtime::kStoreToSuper_Strict
-                               : Runtime::kStoreToSuper_Sloppy));
-}
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // r2 : value
-  // stack : receiver ('this'), home_object, key
-  DCHECK(prop != NULL);
-
-  PushOperand(r2);
-  CallRuntimeWithOperands((is_strict(language_mode())
-                               ? Runtime::kStoreKeyedToSuper_Strict
-                               : Runtime::kStoreKeyedToSuper_Sloppy));
-}
-
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
   PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -2226,42 +1816,6 @@
   EmitCall(expr, convert_mode);
 }
 
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-  SetExpressionPosition(prop);
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  // Load the function from the receiver.
-  const Register scratch = r3;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForAccumulatorValue(super_ref->home_object());
-  __ LoadRR(scratch, r2);
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperands(scratch, r2, r2, scratch);
-  PushOperand(key->value());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ StoreP(r2, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
   // Load the key.
@@ -2285,40 +1839,6 @@
   EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
 }
 
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetExpressionPosition(prop);
-  // Load the function from the receiver.
-  const Register scratch = r3;
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForAccumulatorValue(super_ref->home_object());
-  __ LoadRR(scratch, r2);
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperands(scratch, r2, r2, scratch);
-  VisitForStackValue(prop->key());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ StoreP(r2, MemOperand(sp, kPointerSize));
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
 void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2338,8 +1858,9 @@
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
   Handle<Code> code =
-      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
-  __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
+      CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+          .code();
+  __ Load(r5, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
   __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
   __ mov(r2, Operand(arg_count));
   CallIC(code);
@@ -2350,113 +1871,6 @@
   context()->DropAndPlug(1, r2);
 }
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
-  int arg_count = expr->arguments()->length();
-  // r6: copy of the first argument or undefined if it doesn't exist.
-  if (arg_count > 0) {
-    __ LoadP(r6, MemOperand(sp, arg_count * kPointerSize), r0);
-  } else {
-    __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
-  }
-
-  // r5: the receiver of the enclosing function.
-  __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // r4: language mode.
-  __ LoadSmiLiteral(r4, Smi::FromInt(language_mode()));
-
-  // r3: the start position of the scope the calls resides in.
-  __ LoadSmiLiteral(r3, Smi::FromInt(scope()->start_position()));
-
-  // r2: the source position of the eval call.
-  __ LoadSmiLiteral(r2, Smi::FromInt(expr->position()));
-
-  // Do the runtime call.
-  __ Push(r6, r5, r4, r3, r2);
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
-  VariableProxy* callee = expr->expression()->AsVariableProxy();
-  if (callee->var()->IsLookupSlot()) {
-    Label slow, done;
-    SetExpressionPosition(callee);
-    // Generate code for loading from variables potentially shadowed by
-    // eval-introduced variables.
-    EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    // Call the runtime to find the function to call (returned in r2) and
-    // the object holding it (returned in r3).
-    __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
-    PushOperands(r2, r3);  // Function, receiver.
-    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the function
-    // and receiver and have the slow path jump around this code.
-    if (done.is_linked()) {
-      Label call;
-      __ b(&call);
-      __ bind(&done);
-      // Push function.
-      __ push(r2);
-      // Pass undefined as the receiver, which is the WithBaseObject of a
-      // non-object environment record.  If the callee is sloppy, it will patch
-      // it up to be the global receiver.
-      __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
-      __ push(r3);
-      __ bind(&call);
-    }
-  } else {
-    VisitForStackValue(callee);
-    // refEnv.WithBaseObject()
-    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
-    PushOperand(r4);  // Reserved receiver slot.
-  }
-}
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call
-  // Runtime_ResolvePossiblyDirectEval to resolve the function we need
-  // to call.  Then we call the resolved function using the given arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  PushCalleeAndWithBaseObject(expr);
-
-  // Push the arguments.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Push a copy of the function (found below the arguments) and
-  // resolve eval.
-  __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-  __ push(r3);
-  EmitResolvePossiblyDirectEval(expr);
-
-  // Touch up the stack with the resolved function.
-  __ StoreP(r2, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-
-  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
-  // Record source position for debugger.
-  SetCallPosition(expr);
-  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
-                                          expr->tail_call_mode())
-                          .code();
-  __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
-  __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-  __ mov(r2, Operand(arg_count));
-  __ Call(code, RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->DropAndPlug(1, r2);
-}
-
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -2485,7 +1899,7 @@
   __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize), r0);
 
   // Record call targets in unoptimized code.
-  __ EmitLoadTypeFeedbackVector(r4);
+  __ EmitLoadFeedbackVector(r4);
   __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallNewFeedbackSlot()));
 
   CallConstructStub stub(isolate());
@@ -2496,48 +1910,6 @@
   context()->Plug(r2);
 }
 
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
-  SuperCallReference* super_call_ref =
-      expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super_call_ref);
-
-  // Push the super constructor target on the stack (may be null,
-  // but the Construct builtin can deal with that properly).
-  VisitForAccumulatorValue(super_call_ref->this_function_var());
-  __ AssertFunction(result_register());
-  __ LoadP(result_register(),
-           FieldMemOperand(result_register(), HeapObject::kMapOffset));
-  __ LoadP(result_register(),
-           FieldMemOperand(result_register(), Map::kPrototypeOffset));
-  PushOperand(result_register());
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetConstructCallPosition(expr);
-
-  // Load new target into r5.
-  VisitForAccumulatorValue(super_call_ref->new_target_var());
-  __ LoadRR(r5, result_register());
-
-  // Load function and argument count into r1 and r0.
-  __ mov(r2, Operand(arg_count));
-  __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize));
-
-  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->Plug(r2);
-}
-
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2621,27 +1993,6 @@
   context()->Plug(if_true, if_false);
 }
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
-                         &if_false, &fall_through);
-
-  __ JumpIfSmi(r2, if_false);
-  __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(eq, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
 void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2871,16 +2222,12 @@
           __ Push(r4, r3);
           __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
           context()->Plug(r2);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+        } else {
+          DCHECK(!var->IsLookupSlot());
+          DCHECK(var->IsStackAllocated() || var->IsContextSlot());
           // Result of deleting non-global, non-dynamic variables is false.
           // The subexpression does not have side effects.
           context()->Plug(is_this);
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kDeleteLookupSlot);
-          context()->Plug(r2);
         }
       } else {
         // Result of deleting non-property, non-variable reference is true.
@@ -2981,31 +2328,6 @@
         break;
       }
 
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForAccumulatorValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        const Register scratch = r3;
-        __ LoadP(scratch, MemOperand(sp, 0));  // this
-        PushOperands(result_register(), scratch, result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForStackValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        VisitForAccumulatorValue(prop->key());
-        const Register scratch1 = r3;
-        const Register scratch2 = r4;
-        __ LoadP(scratch1, MemOperand(sp, 1 * kPointerSize));  // this
-        __ LoadP(scratch2, MemOperand(sp, 0 * kPointerSize));  // home object
-        PushOperands(result_register(), scratch1, scratch2, result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
       case KEYED_PROPERTY: {
         VisitForStackValue(prop->obj());
         VisitForStackValue(prop->key());
@@ -3016,6 +2338,8 @@
         break;
       }
 
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
       case VARIABLE:
         UNREACHABLE();
     }
@@ -3051,14 +2375,12 @@
           case NAMED_PROPERTY:
             __ StoreP(r2, MemOperand(sp, kPointerSize));
             break;
-          case NAMED_SUPER_PROPERTY:
-            __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
-            break;
           case KEYED_PROPERTY:
             __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
             break;
+          case NAMED_SUPER_PROPERTY:
           case KEYED_SUPER_PROPERTY:
-            __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+            UNREACHABLE();
             break;
         }
       }
@@ -3093,14 +2415,12 @@
         case NAMED_PROPERTY:
           __ StoreP(r2, MemOperand(sp, kPointerSize));
           break;
-        case NAMED_SUPER_PROPERTY:
-          __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
-          break;
         case KEYED_PROPERTY:
           __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
           break;
+        case NAMED_SUPER_PROPERTY:
         case KEYED_SUPER_PROPERTY:
-          __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+          UNREACHABLE();
           break;
       }
     }
@@ -3157,30 +2477,6 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(r2);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(r2);
-      }
-      break;
-    }
     case KEYED_PROPERTY: {
       PopOperands(StoreDescriptor::ReceiverRegister(),
                   StoreDescriptor::NameRegister());
@@ -3195,6 +2491,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -3261,16 +2561,6 @@
     __ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
           Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(eq, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
-  } else if (String::Equals(check, factory->type##_string())) { \
-    __ JumpIfSmi(r2, if_false);                                 \
-    __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));  \
-    __ CompareRoot(r2, Heap::k##Type##MapRootIndex);            \
-    Split(eq, if_true, if_false, fall_through);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
   } else {
     if (if_false != fall_through) __ b(if_false);
   }
@@ -3310,6 +2600,7 @@
       SetExpressionPosition(expr);
       PopOperand(r3);
       __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+      RestoreContext();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(r2, Heap::kTrueValueRootIndex);
       Split(eq, if_true, if_false, fall_through);
@@ -3412,77 +2703,16 @@
   PushOperand(ip);
 }
 
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
-  DCHECK(!result_register().is(r3));
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(ip, Operand(pending_message_obj));
-  __ LoadP(r3, MemOperand(ip));
-  PushOperand(r3);
-
-  ClearPendingMessage();
-}
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  DCHECK(!result_register().is(r3));
-  // Restore pending message from stack.
-  PopOperand(r3);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(ip, Operand(pending_message_obj));
-  __ StoreP(r3, MemOperand(ip));
-}
-
-void FullCodeGenerator::ClearPendingMessage() {
-  DCHECK(!result_register().is(r3));
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
-  __ mov(ip, Operand(pending_message_obj));
-  __ StoreP(r3, MemOperand(ip));
-}
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
-  DCHECK(!result_register().is(r3));
-  // Restore the accumulator (r2) and token (r3).
-  __ Pop(r3, result_register());
-  for (DeferredCommand cmd : commands_) {
-    Label skip;
-    __ CmpSmiLiteral(r3, Smi::FromInt(cmd.token), r0);
-    __ bne(&skip);
-    switch (cmd.command) {
-      case kReturn:
-        codegen_->EmitUnwindAndReturn();
-        break;
-      case kThrow:
-        __ Push(result_register());
-        __ CallRuntime(Runtime::kReThrow);
-        break;
-      case kContinue:
-        codegen_->EmitContinue(cmd.target);
-        break;
-      case kBreak:
-        codegen_->EmitBreak(cmd.target);
-        break;
-    }
-    __ bind(&skip);
-  }
-}
-
 #undef __
 
 #if V8_TARGET_ARCH_S390X
 static const FourByteInstr kInterruptBranchInstruction = 0xA7A40011;
 static const FourByteInstr kOSRBranchInstruction = 0xA7040011;
-static const int16_t kBackEdgeBranchOffset = 0x11 * 2;
+static const int16_t kBackEdgeBranchOffsetInHalfWords = 0x11;
 #else
 static const FourByteInstr kInterruptBranchInstruction = 0xA7A4000D;
 static const FourByteInstr kOSRBranchInstruction = 0xA704000D;
-static const int16_t kBackEdgeBranchOffset = 0xD * 2;
+static const int16_t kBackEdgeBranchOffsetInHalfWords = 0xD;
 #endif
 
 void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
@@ -3500,7 +2730,7 @@
       //         brasrl    r14, <interrupt stub address>
       //  <reset profiling counter>
       //  ok-label
-      patcher.masm()->brc(ge, Operand(kBackEdgeBranchOffset));
+      patcher.masm()->brc(ge, Operand(kBackEdgeBranchOffsetInHalfWords));
       break;
     }
     case ON_STACK_REPLACEMENT:
@@ -3509,7 +2739,7 @@
       //         brasrl    r14, <interrupt stub address>
       //  <reset profiling counter>
       //  ok-label ----- pc_after points here
-      patcher.masm()->brc(CC_NOP, Operand(kBackEdgeBranchOffset));
+      patcher.masm()->brc(CC_NOP, Operand(kBackEdgeBranchOffsetInHalfWords));
       break;
   }
 
@@ -3550,7 +2780,6 @@
          isolate->builtins()->OnStackReplacement()->entry());
   return ON_STACK_REPLACEMENT;
 }
-
 }  // namespace internal
 }  // namespace v8
 #endif  // V8_TARGET_ARCH_S390
diff --git a/src/full-codegen/x64/full-codegen-x64.cc b/src/full-codegen/x64/full-codegen-x64.cc
index 0720c3d..d4d78ed 100644
--- a/src/full-codegen/x64/full-codegen-x64.cc
+++ b/src/full-codegen/x64/full-codegen-x64.cc
@@ -4,16 +4,20 @@
 
 #if V8_TARGET_ARCH_X64
 
-#include "src/full-codegen/full-codegen.h"
+#include "src/assembler-inl.h"
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/heap/heap-inl.h"
 #include "src/ic/ic.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -120,19 +124,16 @@
   // Increment invocation count for the function.
   {
     Comment cmnt(masm_, "[ Increment invocation count");
-    __ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
-    __ movp(rcx, FieldOperand(rcx, LiteralsArray::kFeedbackVectorOffset));
+    __ movp(rcx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
+    __ movp(rcx, FieldOperand(rcx, Cell::kValueOffset));
     __ SmiAddConstant(
-        FieldOperand(rcx,
-                     TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                         TypeFeedbackVector::kHeaderSize),
+        FieldOperand(rcx, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                              FeedbackVector::kHeaderSize),
         Smi::FromInt(1));
   }
 
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
     OperandStackDepthIncrement(locals_count);
     if (locals_count == 1) {
       __ PushRoot(Heap::kUndefinedValueRootIndex);
@@ -189,14 +190,17 @@
       if (info->scope()->new_target_var() != nullptr) {
         __ Push(rdx);  // Preserve new target.
       }
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info->scope()->scope_type());
         __ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ Push(rdi);
+        __ Push(Smi::FromInt(info->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
       if (info->scope()->new_target_var() != nullptr) {
@@ -243,37 +247,10 @@
   PrepareForBailoutForId(BailoutId::FunctionContext(),
                          BailoutState::NO_REGISTERS);
 
-  // Possibly set up a local binding to the this function which is used in
-  // derived constructors with super calls.
-  Variable* this_function_var = info->scope()->this_function_var();
-  if (this_function_var != nullptr) {
-    Comment cmnt(masm_, "[ This function");
-    if (!function_in_register) {
-      __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-      // The write barrier clobbers register again, keep it marked as such.
-    }
-    SetVar(this_function_var, rdi, rbx, rcx);
-  }
-
-  // Possibly set up a local binding to the new target value.
-  Variable* new_target_var = info->scope()->new_target_var();
-  if (new_target_var != nullptr) {
-    Comment cmnt(masm_, "[ new.target");
-    SetVar(new_target_var, rdx, rbx, rcx);
-  }
-
-  // Possibly allocate RestParameters
-  Variable* rest_param = info->scope()->rest_parameter();
-  if (rest_param != nullptr) {
-    Comment cmnt(masm_, "[ Allocate rest parameter array");
-    if (!function_in_register) {
-      __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    FastNewRestParameterStub stub(isolate());
-    __ CallStub(&stub);
-    function_in_register = false;
-    SetVar(rest_param, rax, rbx, rdx);
-  }
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(info->scope()->new_target_var());
+  DCHECK_NULL(info->scope()->rest_parameter());
+  DCHECK_NULL(info->scope()->this_function_var());
 
   // Possibly allocate an arguments object.
   DCHECK_EQ(scope(), info->scope());
@@ -286,14 +263,16 @@
       __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
     }
     if (is_strict(language_mode()) || !has_simple_parameters()) {
-      FastNewStrictArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      __ call(isolate()->builtins()->FastNewStrictArguments(),
+              RelocInfo::CODE_TARGET);
+      RestoreContext();
     } else if (literal()->has_duplicate_parameters()) {
       __ Push(rdi);
       __ CallRuntime(Runtime::kNewSloppyArguments_Generic);
     } else {
-      FastNewSloppyArgumentsStub stub(isolate());
-      __ CallStub(&stub);
+      __ call(isolate()->builtins()->FastNewSloppyArguments(),
+              RelocInfo::CODE_TARGET);
+      RestoreContext();
     }
 
     SetVar(arguments, rax, rbx, rdx);
@@ -521,10 +500,8 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
-         !lit->IsUndetectable());
-  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
-      lit->IsFalse(isolate())) {
+  DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+  if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
@@ -745,10 +722,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_->Add(isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -771,16 +750,7 @@
       }
       break;
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      DCHECK_EQ(VAR, variable->mode());
-      DCHECK(!variable->binding_needs_init());
-      __ Push(variable->name());
-      __ CallRuntime(Runtime::kDeclareEvalVar);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -793,9 +763,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function =
           Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
@@ -830,15 +807,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      PushOperand(variable->name());
-      VisitForStackValue(declaration->fun());
-      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -849,7 +818,7 @@
   // Call the runtime to declare the globals.
   __ Push(pairs);
   __ Push(Smi::FromInt(DeclareGlobalsFlags()));
-  __ EmitLoadTypeFeedbackVector(rax);
+  __ EmitLoadFeedbackVector(rax);
   __ Push(rax);
   __ CallRuntime(Runtime::kDeclareGlobals);
   // Return value is ignored.
@@ -954,7 +923,7 @@
   Comment cmnt(masm_, "[ ForInStatement");
   SetStatementPosition(stmt, SKIP_BREAK);
 
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
 
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1071,9 +1040,9 @@
 
   // We need to filter the key, record slow-path here.
   int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(rdx);
+  __ EmitLoadFeedbackVector(rdx);
   __ Move(FieldOperand(rdx, FixedArray::OffsetOfElementAt(vector_index)),
-          TypeFeedbackVector::MegamorphicSentinel(isolate()));
+          FeedbackVector::MegamorphicSentinel(isolate()));
 
   // rax contains the key. The receiver in rbx is the second argument to
   // ForInFilter. ForInFilter returns undefined if the receiver doesn't
@@ -1117,9 +1086,8 @@
   decrement_loop_depth();
 }
 
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
-                                          FeedbackVectorSlot slot) {
+                                          FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
   __ movp(StoreDescriptor::ValueRegister(),
@@ -1127,10 +1095,9 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
                                                      int offset,
-                                                     FeedbackVectorSlot slot) {
+                                                     FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ movp(StoreDescriptor::ReceiverRegister(), rax);
   __ movp(StoreDescriptor::ValueRegister(),
@@ -1138,89 +1105,6 @@
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofMode typeof_mode,
-                                                      Label* slow) {
-  Register context = rsi;
-  Register temp = rdx;
-
-  int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
-  for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (s->calls_sloppy_eval()) {
-      // Check that extension is "the hole".
-      __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
-                       Heap::kTheHoleValueRootIndex, slow);
-    }
-    // Load next context in chain.
-    __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
-    // Walk the rest of the chain without clobbering rsi.
-    context = temp;
-    to_check--;
-  }
-
-  // All extension objects were empty and it is safe to use a normal global
-  // load machinery.
-  EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = rsi;
-  Register temp = rbx;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->NeedsContext()) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is "the hole".
-        __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
-                         Heap::kTheHoleValueRootIndex, slow);
-      }
-      __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering rsi.
-      context = temp;
-    }
-  }
-  // Check that last extension is "the hole".
-  __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
-                   Heap::kTheHoleValueRootIndex, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an rsi-based operand (the write barrier cannot be allowed to
-  // destroy the rsi register).
-  return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofMode typeof_mode,
-                                                  Label* slow, Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
-    __ jmp(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->binding_needs_init()) {
-      __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
-      __ j(not_equal, done);
-      __ Push(var->name());
-      __ CallRuntime(Runtime::kThrowReferenceError);
-    } else {
-      __ jmp(done);
-    }
-  }
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   // Record position before possible IC call.
@@ -1228,8 +1112,7 @@
   PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
+  // Two cases: global variable, and all other types of variables.
   switch (var->location()) {
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
@@ -1262,24 +1145,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ Lookup slot");
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
-      __ bind(&slow);
-      __ Push(var->name());
-      Runtime::FunctionId function_id =
-          typeof_mode == NOT_INSIDE_TYPEOF
-              ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotInsideTypeof;
-      __ CallRuntime(function_id);
-      __ bind(&done);
-      context()->Plug(rax);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1306,21 +1172,23 @@
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  Handle<FixedArray> constant_properties = expr->constant_properties();
+  Handle<BoilerplateDescription> constant_properties =
+      expr->GetOrBuildConstantProperties(isolate());
   int flags = expr->ComputeFlags();
   if (MustCreateObjectLiteralWithRuntime(expr)) {
     __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-    __ Push(Smi::FromInt(expr->literal_index()));
+    __ Push(SmiFromSlot(expr->literal_slot()));
     __ Push(constant_properties);
     __ Push(Smi::FromInt(flags));
     __ CallRuntime(Runtime::kCreateObjectLiteral);
   } else {
     __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-    __ Move(rbx, Smi::FromInt(expr->literal_index()));
+    __ Move(rbx, SmiFromSlot(expr->literal_slot()));
     __ Move(rcx, constant_properties);
     __ Move(rdx, Smi::FromInt(flags));
-    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastCloneShallowObject(
+        isolate(), expr->properties_count());
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1330,10 +1198,9 @@
   bool result_saved = false;
 
   AccessorTable accessor_table(zone());
-  int property_index = 0;
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
@@ -1343,6 +1210,7 @@
       result_saved = true;
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1357,7 +1225,7 @@
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(rax));
             __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
-            CallStoreIC(property->GetSlot(0), key->value());
+            CallStoreIC(property->GetSlot(0), key->value(), true);
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
 
             if (NeedsHomeObject(value)) {
@@ -1386,20 +1254,20 @@
         VisitForStackValue(value);
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+        PrepareForBailoutForId(expr->GetIdForPropertySet(i),
                                BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1420,72 +1288,6 @@
     PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
-  // starts with the first computed property name, and continues with all
-  // properties to its right.  All the code from above initializes the static
-  // component of the object literal, and arranges for the map of the result to
-  // reflect the static order in which the keys appear. For the dynamic
-  // properties, we compile them into a series of "SetOwnProperty" runtime
-  // calls. This will preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    Expression* value = property->value();
-    if (!result_saved) {
-      PushOperand(rax);  // Save result on the stack
-      result_saved = true;
-    }
-
-    PushOperand(Operand(rsp, 0));  // Duplicate receiver.
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      DCHECK(!property->is_computed_name());
-      VisitForStackValue(value);
-      DCHECK(property->emit_store());
-      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             BailoutState::NO_REGISTERS);
-    } else {
-      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
-      VisitForStackValue(value);
-      if (NeedsHomeObject(value)) {
-        EmitSetHomeObject(value, 2, property->GetSlot());
-      }
-
-      switch (property->kind()) {
-        case ObjectLiteral::Property::CONSTANT:
-        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        case ObjectLiteral::Property::COMPUTED:
-          if (property->emit_store()) {
-            PushOperand(Smi::FromInt(NONE));
-            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                                   BailoutState::NO_REGISTERS);
-          } else {
-            DropOperands(3);
-          }
-          break;
-
-        case ObjectLiteral::Property::PROTOTYPE:
-          UNREACHABLE();
-          break;
-
-        case ObjectLiteral::Property::GETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-          break;
-
-        case ObjectLiteral::Property::SETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-          break;
-      }
-    }
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1497,29 +1299,22 @@
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  bool has_constant_fast_elements =
-      IsFastObjectElementsKind(expr->constant_elements_kind());
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
+  Handle<ConstantElementsPair> constant_elements =
+      expr->GetOrBuildConstantElements(isolate());
 
   if (MustCreateArrayLiteralWithRuntime(expr)) {
     __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-    __ Push(Smi::FromInt(expr->literal_index()));
+    __ Push(SmiFromSlot(expr->literal_slot()));
     __ Push(constant_elements);
     __ Push(Smi::FromInt(expr->ComputeFlags()));
     __ CallRuntime(Runtime::kCreateArrayLiteral);
   } else {
     __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-    __ Move(rbx, Smi::FromInt(expr->literal_index()));
+    __ Move(rbx, SmiFromSlot(expr->literal_slot()));
     __ Move(rcx, constant_elements);
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
+    Callable callable =
+        CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1582,30 +1377,6 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        PushOperand(MemOperand(rsp, kPointerSize));
-        PushOperand(result_register());
-      }
-      break;
-    case KEYED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(property->key());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        PushOperand(MemOperand(rsp, 2 * kPointerSize));
-        PushOperand(MemOperand(rsp, 2 * kPointerSize));
-        PushOperand(result_register());
-      }
-      break;
     case KEYED_PROPERTY: {
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
@@ -1618,6 +1389,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 
   // For compound assignments we need another deoptimization point after the
@@ -1634,21 +1409,15 @@
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
+        case NAMED_SUPER_PROPERTY:
+        case KEYED_SUPER_PROPERTY:
+          UNREACHABLE();
+          break;
       }
     }
 
@@ -1686,73 +1455,20 @@
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(rax);
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(rax);
-      break;
     case KEYED_PROPERTY:
       EmitKeyedPropertyAssignment(expr);
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
 
 void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  SetExpressionPosition(expr);
-
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this.  It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  Label suspend, continuation, post_runtime, resume, exception;
-
-  __ jmp(&suspend);
-  __ bind(&continuation);
-  // When we arrive here, rax holds the generator object.
-  __ RecordGeneratorContinuation();
-  __ movp(rbx, FieldOperand(rax, JSGeneratorObject::kResumeModeOffset));
-  __ movp(rax, FieldOperand(rax, JSGeneratorObject::kInputOrDebugPosOffset));
-  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
-  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
-  __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::kReturn));
-  __ j(less, &resume);
-  __ Push(result_register());
-  __ j(greater, &exception);
-  EmitCreateIteratorResult(true);
-  EmitUnwindAndReturn();
-
-  __ bind(&exception);
-  __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
-                                              : Runtime::kThrow);
-
-  __ bind(&suspend);
-  OperandStackDepthIncrement(1);  // Not popped on this path.
-  VisitForAccumulatorValue(expr->generator_object());
-  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-  __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
-          Smi::FromInt(continuation.pos()));
-  __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
-  __ movp(rcx, rsi);
-  __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
-                      kDontSaveFPRegs);
-  __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
-  __ cmpp(rsp, rbx);
-  __ j(equal, &post_runtime);
-  __ Push(rax);  // generator object
-  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  RestoreContext();
-  __ bind(&post_runtime);
-
-  PopOperand(result_register());
-  EmitReturnSequence();
-
-  __ bind(&resume);
-  context()->Plug(result_register());
+  // Resumable functions are not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::PushOperand(MemOperand operand) {
@@ -1856,57 +1572,6 @@
 }
 
 
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ClassLiteral::Property* property = lit->properties()->at(i);
-    Expression* value = property->value();
-
-    if (property->is_static()) {
-      PushOperand(Operand(rsp, kPointerSize));  // constructor
-    } else {
-      PushOperand(Operand(rsp, 0));  // prototype
-    }
-    EmitPropertyKey(property, lit->GetIdForProperty(i));
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
-      __ Push(rax);
-    }
-
-    VisitForStackValue(value);
-    if (NeedsHomeObject(value)) {
-      EmitSetHomeObject(value, 2, property->GetSlot());
-    }
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-        break;
-
-      case ClassLiteral::Property::GETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::SETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::FIELD:
-      default:
-        UNREACHABLE();
-    }
-  }
-}
-
-
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   PopOperand(rdx);
   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -1916,9 +1581,7 @@
   context()->Plug(rax);
 }
 
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
-                                       FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   Property* prop = expr->AsProperty();
@@ -1940,43 +1603,6 @@
       CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      PushOperand(rax);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      // stack: value, this; rax: home_object
-      Register scratch = rcx;
-      Register scratch2 = rdx;
-      __ Move(scratch, result_register());               // home_object
-      __ movp(rax, MemOperand(rsp, kPointerSize));       // value
-      __ movp(scratch2, MemOperand(rsp, 0));             // this
-      __ movp(MemOperand(rsp, kPointerSize), scratch2);  // this
-      __ movp(MemOperand(rsp, 0), scratch);              // home_object
-      // stack: this, home_object; rax: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      PushOperand(rax);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = rcx;
-      Register scratch2 = rdx;
-      __ movp(scratch2, MemOperand(rsp, 2 * kPointerSize));  // value
-      // stack: value, this, home_object; rax: key, rdx: value
-      __ movp(scratch, MemOperand(rsp, kPointerSize));  // this
-      __ movp(MemOperand(rsp, 2 * kPointerSize), scratch);
-      __ movp(scratch, MemOperand(rsp, 0));  // home_object
-      __ movp(MemOperand(rsp, kPointerSize), scratch);
-      __ movp(MemOperand(rsp, 0), rax);
-      __ Move(rax, scratch2);
-      // stack: this, home_object, key; rax: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
     case KEYED_PROPERTY: {
       PushOperand(rax);  // Preserve value.
       VisitForStackValue(prop->obj());
@@ -1987,6 +1613,10 @@
       CallKeyedStoreIC(slot);
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
   context()->Plug(rax);
 }
@@ -2003,7 +1633,7 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot,
+                                               FeedbackSlot slot,
                                                HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
@@ -2045,26 +1675,18 @@
 
   } else {
     DCHECK(var->mode() != CONST || op == Token::INIT);
-    if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ Push(var->name());
-      __ Push(rax);
-      __ CallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreLookupSlot_Strict
-                         : Runtime::kStoreLookupSlot_Sloppy);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      MemOperand location = VarOperand(var, rcx);
-      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
-        // Check for an uninitialized let binding.
-        __ movp(rdx, location);
-        __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
-        __ Check(equal, kLetBindingReInitialization);
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    DCHECK(!var->IsLookupSlot());
+    // Assignment to var or initializing assignment to let/const in harmony
+    // mode.
+    MemOperand location = VarOperand(var, rcx);
+    if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+      // Check for an uninitialized let binding.
+      __ movp(rdx, location);
+      __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+      __ Check(equal, kLetBindingReInitialization);
     }
+    EmitStoreToStackLocalOrContextSlot(var, location);
   }
 }
 
@@ -2083,35 +1705,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // rax : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  PushOperand(key->value());
-  PushOperand(rax);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreToSuper_Strict
-                              : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // rax : value
-  // stack : receiver ('this'), home_object, key
-  DCHECK(prop != NULL);
-
-  PushOperand(rax);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreKeyedToSuper_Strict
-                              : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
   PopOperand(StoreDescriptor::NameRegister());  // Key.
@@ -2156,43 +1749,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-  SetExpressionPosition(prop);
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  // Load the function from the receiver.
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(rax);
-  PushOperand(rax);
-  PushOperand(Operand(rsp, kPointerSize * 2));
-  PushOperand(key->value());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ movp(Operand(rsp, kPointerSize), rax);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 // Common code for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2217,41 +1773,6 @@
 }
 
 
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetExpressionPosition(prop);
-  // Load the function from the receiver.
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(rax);
-  PushOperand(rax);
-  PushOperand(Operand(rsp, kPointerSize * 2));
-  VisitForStackValue(prop->key());
-
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ movp(Operand(rsp, kPointerSize), rax);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2271,8 +1792,9 @@
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
   Handle<Code> code =
-      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
-  __ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
+      CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+          .code();
+  __ Set(rdx, IntFromSlot(expr->CallFeedbackICSlot()));
   __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
   __ Set(rax, arg_count);
   CallIC(code);
@@ -2284,111 +1806,6 @@
   context()->DropAndPlug(1, rax);
 }
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
-  int arg_count = expr->arguments()->length();
-  // Push copy of the first argument or undefined if it doesn't exist.
-  if (arg_count > 0) {
-    __ Push(Operand(rsp, arg_count * kPointerSize));
-  } else {
-    __ PushRoot(Heap::kUndefinedValueRootIndex);
-  }
-
-  // Push the enclosing function.
-  __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // Push the language mode.
-  __ Push(Smi::FromInt(language_mode()));
-
-  // Push the start position of the scope the calls resides in.
-  __ Push(Smi::FromInt(scope()->start_position()));
-
-  // Push the source position of the eval call.
-  __ Push(Smi::FromInt(expr->position()));
-
-  // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
-  VariableProxy* callee = expr->expression()->AsVariableProxy();
-  if (callee->var()->IsLookupSlot()) {
-    Label slow, done;
-    SetExpressionPosition(callee);
-    // Generate code for loading from variables potentially shadowed by
-    // eval-introduced variables.
-    EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-    __ bind(&slow);
-    // Call the runtime to find the function to call (returned in rax) and
-    // the object holding it (returned in rdx).
-    __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
-    PushOperand(rax);  // Function.
-    PushOperand(rdx);  // Receiver.
-    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the function
-    // and receiver and have the slow path jump around this code.
-    if (done.is_linked()) {
-      Label call;
-      __ jmp(&call, Label::kNear);
-      __ bind(&done);
-      // Push function.
-      __ Push(rax);
-      // Pass undefined as the receiver, which is the WithBaseObject of a
-      // non-object environment record.  If the callee is sloppy, it will patch
-      // it up to be the global receiver.
-      __ PushRoot(Heap::kUndefinedValueRootIndex);
-      __ bind(&call);
-    }
-  } else {
-    VisitForStackValue(callee);
-    // refEnv.WithBaseObject()
-    OperandStackDepthIncrement(1);
-    __ PushRoot(Heap::kUndefinedValueRootIndex);
-  }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
-  // to resolve the function we need to call.  Then we call the resolved
-  // function using the given arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  PushCalleeAndWithBaseObject(expr);
-
-  // Push the arguments.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Push a copy of the function (found below the arguments) and resolve
-  // eval.
-  __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
-  EmitResolvePossiblyDirectEval(expr);
-
-  // Touch up the callee.
-  __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
-
-  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
-  SetCallPosition(expr);
-  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
-                                          expr->tail_call_mode())
-                          .code();
-  __ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
-  __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
-  __ Set(rax, arg_count);
-  __ call(code, RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->DropAndPlug(1, rax);
-}
-
-
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -2417,7 +1834,7 @@
   __ movp(rdi, Operand(rsp, arg_count * kPointerSize));
 
   // Record call targets in unoptimized code, but not in the snapshot.
-  __ EmitLoadTypeFeedbackVector(rbx);
+  __ EmitLoadFeedbackVector(rbx);
   __ Move(rdx, SmiFromSlot(expr->CallNewFeedbackSlot()));
 
   CallConstructStub stub(isolate());
@@ -2428,48 +1845,6 @@
   context()->Plug(rax);
 }
 
-
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
-  SuperCallReference* super_call_ref =
-      expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super_call_ref);
-
-  // Push the super constructor target on the stack (may be null,
-  // but the Construct builtin can deal with that properly).
-  VisitForAccumulatorValue(super_call_ref->this_function_var());
-  __ AssertFunction(result_register());
-  __ movp(result_register(),
-          FieldOperand(result_register(), HeapObject::kMapOffset));
-  PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetConstructCallPosition(expr);
-
-  // Load new target into rdx.
-  VisitForAccumulatorValue(super_call_ref->new_target_var());
-  __ movp(rdx, result_register());
-
-  // Load function and argument count into rdi and rax.
-  __ Set(rax, arg_count);
-  __ movp(rdi, Operand(rsp, arg_count * kPointerSize));
-
-  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->Plug(rax);
-}
-
-
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2557,28 +1932,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(rax, if_false);
-  __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2813,17 +2166,13 @@
           __ Push(var->name());
           __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
           context()->Plug(rax);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+        } else {
+          DCHECK(!var->IsLookupSlot());
+          DCHECK(var->IsStackAllocated() || var->IsContextSlot());
           // Result of deleting non-global variables is false.  'this' is
           // not really a variable, though we implement it as one.  The
           // subexpression does not have side effects.
           context()->Plug(is_this);
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kDeleteLookupSlot);
-          context()->Plug(rax);
         }
       } else {
         // Result of deleting non-property, non-variable reference is true.
@@ -2933,30 +2282,6 @@
         break;
       }
 
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForAccumulatorValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        PushOperand(result_register());
-        PushOperand(MemOperand(rsp, kPointerSize));
-        PushOperand(result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForStackValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        VisitForAccumulatorValue(prop->key());
-        PushOperand(result_register());
-        PushOperand(MemOperand(rsp, 2 * kPointerSize));
-        PushOperand(MemOperand(rsp, 2 * kPointerSize));
-        PushOperand(result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
       case KEYED_PROPERTY: {
         VisitForStackValue(prop->obj());
         VisitForStackValue(prop->key());
@@ -2968,6 +2293,8 @@
         break;
       }
 
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
       case VARIABLE:
         UNREACHABLE();
     }
@@ -3001,14 +2328,12 @@
           case NAMED_PROPERTY:
             __ movp(Operand(rsp, kPointerSize), rax);
             break;
-          case NAMED_SUPER_PROPERTY:
-            __ movp(Operand(rsp, 2 * kPointerSize), rax);
-            break;
           case KEYED_PROPERTY:
             __ movp(Operand(rsp, 2 * kPointerSize), rax);
             break;
+          case NAMED_SUPER_PROPERTY:
           case KEYED_SUPER_PROPERTY:
-            __ movp(Operand(rsp, 3 * kPointerSize), rax);
+            UNREACHABLE();
             break;
         }
       }
@@ -3046,14 +2371,12 @@
         case NAMED_PROPERTY:
           __ movp(Operand(rsp, kPointerSize), rax);
           break;
-        case NAMED_SUPER_PROPERTY:
-          __ movp(Operand(rsp, 2 * kPointerSize), rax);
-          break;
         case KEYED_PROPERTY:
           __ movp(Operand(rsp, 2 * kPointerSize), rax);
           break;
+        case NAMED_SUPER_PROPERTY:
         case KEYED_SUPER_PROPERTY:
-          __ movp(Operand(rsp, 3 * kPointerSize), rax);
+          UNREACHABLE();
           break;
       }
     }
@@ -3112,30 +2435,6 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(rax);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(rax);
-      }
-      break;
-    }
     case KEYED_PROPERTY: {
       PopOperand(StoreDescriptor::NameRegister());
       PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3150,6 +2449,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -3217,16 +2520,6 @@
     __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
              Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(zero, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
-  } else if (String::Equals(check, factory->type##_string())) { \
-    __ JumpIfSmi(rax, if_false);                                \
-    __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));    \
-    __ CompareRoot(rax, Heap::k##Type##MapRootIndex);           \
-    Split(equal, if_true, if_false, fall_through);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
@@ -3267,6 +2560,7 @@
       SetExpressionPosition(expr);
       PopOperand(rdx);
       __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+      RestoreContext();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ CompareRoot(rax, Heap::kTrueValueRootIndex);
       Split(equal, if_true, if_false, fall_through);
@@ -3382,68 +2676,6 @@
 }
 
 
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-
-void FullCodeGenerator::EnterFinallyBlock() {
-  DCHECK(!result_register().is(rdx));
-
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ Load(rdx, pending_message_obj);
-  PushOperand(rdx);
-
-  ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  DCHECK(!result_register().is(rdx));
-  // Restore pending message from stack.
-  PopOperand(rdx);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ Store(pending_message_obj, rdx);
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
-  DCHECK(!result_register().is(rdx));
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
-  __ Store(pending_message_obj, rdx);
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
-  __ Pop(result_register());  // Restore the accumulator.
-  __ Pop(rdx);                // Get the token.
-  for (DeferredCommand cmd : commands_) {
-    Label skip;
-    __ SmiCompare(rdx, Smi::FromInt(cmd.token));
-    __ j(not_equal, &skip);
-    switch (cmd.command) {
-      case kReturn:
-        codegen_->EmitUnwindAndReturn();
-        break;
-      case kThrow:
-        __ Push(result_register());
-        __ CallRuntime(Runtime::kReThrow);
-        break;
-      case kContinue:
-        codegen_->EmitContinue(cmd.target);
-        break;
-      case kBreak:
-        codegen_->EmitBreak(cmd.target);
-        break;
-    }
-    __ bind(&skip);
-  }
-}
-
 #undef __
 
 
diff --git a/src/full-codegen/x87/OWNERS b/src/full-codegen/x87/OWNERS
index dd9998b..61245ae 100644
--- a/src/full-codegen/x87/OWNERS
+++ b/src/full-codegen/x87/OWNERS
@@ -1 +1,2 @@
 weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/src/full-codegen/x87/full-codegen-x87.cc b/src/full-codegen/x87/full-codegen-x87.cc
index 7cc7e2b..25d3f21 100644
--- a/src/full-codegen/x87/full-codegen-x87.cc
+++ b/src/full-codegen/x87/full-codegen-x87.cc
@@ -4,15 +4,16 @@
 
 #if V8_TARGET_ARCH_X87
 
-#include "src/full-codegen/full-codegen.h"
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
 #include "src/ic/ic.h"
 #include "src/x87/frames-x87.h"
 
@@ -120,18 +121,16 @@
   // Increment invocation count for the function.
   {
     Comment cmnt(masm_, "[ Increment invocation count");
-    __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
-    __ mov(ecx, FieldOperand(ecx, LiteralsArray::kFeedbackVectorOffset));
-    __ add(FieldOperand(
-               ecx, TypeFeedbackVector::kInvocationCountIndex * kPointerSize +
-                        TypeFeedbackVector::kHeaderSize),
-           Immediate(Smi::FromInt(1)));
+    __ mov(ecx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
+    __ mov(ecx, FieldOperand(ecx, Cell::kValueOffset));
+    __ add(
+        FieldOperand(ecx, FeedbackVector::kInvocationCountIndex * kPointerSize +
+                              FeedbackVector::kHeaderSize),
+        Immediate(Smi::FromInt(1)));
   }
 
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
-    // Generators allocate locals, if any, in context slots.
-    DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
     OperandStackDepthIncrement(locals_count);
     if (locals_count == 1) {
       __ push(Immediate(isolate()->factory()->undefined_value()));
@@ -189,15 +188,18 @@
       if (info->scope()->new_target_var() != nullptr) {
         __ push(edx);  // Preserve new target.
       }
-      if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
-        FastNewFunctionContextStub stub(isolate());
+      if (slots <=
+          ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+        Callable callable = CodeFactory::FastNewFunctionContext(
+            isolate(), info->scope()->scope_type());
         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
                Immediate(slots));
-        __ CallStub(&stub);
-        // Result of FastNewFunctionContextStub is always in new space.
+        __ Call(callable.code(), RelocInfo::CODE_TARGET);
+        // Result of the FastNewFunctionContext builtin is always in new space.
         need_write_barrier = false;
       } else {
         __ push(edi);
+        __ Push(Smi::FromInt(info->scope()->scope_type()));
         __ CallRuntime(Runtime::kNewFunctionContext);
       }
       if (info->scope()->new_target_var() != nullptr) {
@@ -244,37 +246,10 @@
   PrepareForBailoutForId(BailoutId::FunctionContext(),
                          BailoutState::NO_REGISTERS);
 
-  // Possibly set up a local binding to the this function which is used in
-  // derived constructors with super calls.
-  Variable* this_function_var = info->scope()->this_function_var();
-  if (this_function_var != nullptr) {
-    Comment cmnt(masm_, "[ This function");
-    if (!function_in_register) {
-      __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-      // The write barrier clobbers register again, keep it marked as such.
-    }
-    SetVar(this_function_var, edi, ebx, ecx);
-  }
-
-  // Possibly set up a local binding to the new target value.
-  Variable* new_target_var = info->scope()->new_target_var();
-  if (new_target_var != nullptr) {
-    Comment cmnt(masm_, "[ new.target");
-    SetVar(new_target_var, edx, ebx, ecx);
-  }
-
-  // Possibly allocate RestParameters
-  Variable* rest_param = info->scope()->rest_parameter();
-  if (rest_param != nullptr) {
-    Comment cmnt(masm_, "[ Allocate rest parameter array");
-    if (!function_in_register) {
-      __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-    }
-    FastNewRestParameterStub stub(isolate());
-    __ CallStub(&stub);
-    function_in_register = false;
-    SetVar(rest_param, eax, ebx, edx);
-  }
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(info->scope()->new_target_var());
+  DCHECK_NULL(info->scope()->rest_parameter());
+  DCHECK_NULL(info->scope()->this_function_var());
 
   Variable* arguments = info->scope()->arguments();
   if (arguments != NULL) {
@@ -502,10 +477,8 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
-         !lit->IsUndetectable());
-  if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
-      lit->IsFalse(isolate())) {
+  DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+  if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
     if (true_label_ != fall_through_) __ jmp(true_label_);
@@ -728,10 +701,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
       globals_->Add(isolate()->factory()->undefined_value(), zone());
+      globals_->Add(isolate()->factory()->undefined_value(), zone());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -754,16 +729,7 @@
       }
       break;
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ VariableDeclaration");
-      DCHECK_EQ(VAR, variable->mode());
-      DCHECK(!variable->binding_needs_init());
-      __ push(Immediate(variable->name()));
-      __ CallRuntime(Runtime::kDeclareEvalVar);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -775,9 +741,16 @@
   Variable* variable = proxy->var();
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
+      globals_->Add(variable->name(), zone());
+      FeedbackSlot slot = proxy->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
+      // We need the slot where the literals array lives, too.
+      slot = declaration->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
+
       Handle<SharedFunctionInfo> function =
           Compiler::GetSharedFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
@@ -807,15 +780,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ FunctionDeclaration");
-      PushOperand(variable->name());
-      VisitForStackValue(declaration->fun());
-      CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
-      PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -826,7 +791,7 @@
   // Call the runtime to declare the globals.
   __ Push(pairs);
   __ Push(Smi::FromInt(DeclareGlobalsFlags()));
-  __ EmitLoadTypeFeedbackVector(eax);
+  __ EmitLoadFeedbackVector(eax);
   __ Push(eax);
   __ CallRuntime(Runtime::kDeclareGlobals);
   // Return value is ignored.
@@ -930,7 +895,7 @@
   Comment cmnt(masm_, "[ ForInStatement");
   SetStatementPosition(stmt, SKIP_BREAK);
 
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
 
   // Get the object to enumerate over.
   SetExpressionAsStatementPosition(stmt->enumerable());
@@ -1036,9 +1001,9 @@
 
   // We need to filter the key, record slow-path here.
   int const vector_index = SmiFromSlot(slot)->value();
-  __ EmitLoadTypeFeedbackVector(edx);
+  __ EmitLoadFeedbackVector(edx);
   __ mov(FieldOperand(edx, FixedArray::OffsetOfElementAt(vector_index)),
-         Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+         Immediate(FeedbackVector::MegamorphicSentinel(isolate())));
 
   // eax contains the key.  The receiver in ebx is the second argument to the
   // ForInFilter.  ForInFilter returns undefined if the receiver doesn't
@@ -1082,116 +1047,30 @@
   decrement_loop_depth();
 }
 
-
 void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
-                                          FeedbackVectorSlot slot) {
+                                          FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
   __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
 void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
                                                      int offset,
-                                                     FeedbackVectorSlot slot) {
+                                                     FeedbackSlot slot) {
   DCHECK(NeedsHomeObject(initializer));
   __ mov(StoreDescriptor::ReceiverRegister(), eax);
   __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
   CallStoreIC(slot, isolate()->factory()->home_object_symbol());
 }
 
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
-                                                      TypeofMode typeof_mode,
-                                                      Label* slow) {
-  Register context = esi;
-  Register temp = edx;
-
-  int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
-  for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (s->calls_sloppy_eval()) {
-      // Check that extension is "the hole".
-      __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
-                       Heap::kTheHoleValueRootIndex, slow);
-    }
-    // Load next context in chain.
-    __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
-    // Walk the rest of the chain without clobbering esi.
-    context = temp;
-    to_check--;
-  }
-
-  // All extension objects were empty and it is safe to use a normal global
-  // load machinery.
-  EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
-                                                                Label* slow) {
-  DCHECK(var->IsContextSlot());
-  Register context = esi;
-  Register temp = ebx;
-
-  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
-    if (s->NeedsContext()) {
-      if (s->calls_sloppy_eval()) {
-        // Check that extension is "the hole".
-        __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
-                         Heap::kTheHoleValueRootIndex, slow);
-      }
-      __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
-      // Walk the rest of the chain without clobbering esi.
-      context = temp;
-    }
-  }
-  // Check that last extension is "the hole".
-  __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
-                   Heap::kTheHoleValueRootIndex, slow);
-
-  // This function is used only for loads, not stores, so it's safe to
-  // return an esi-based operand (the write barrier cannot be allowed to
-  // destroy the esi register).
-  return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
-                                                  TypeofMode typeof_mode,
-                                                  Label* slow, Label* done) {
-  // Generate fast-case code for variables that might be shadowed by
-  // eval-introduced variables.  Eval is used a lot without
-  // introducing variables.  In those cases, we do not want to
-  // perform a runtime call for all variables in the scope
-  // containing the eval.
-  Variable* var = proxy->var();
-  if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
-    __ jmp(done);
-  } else if (var->mode() == DYNAMIC_LOCAL) {
-    Variable* local = var->local_if_not_shadowed();
-    __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
-    if (local->binding_needs_init()) {
-      __ cmp(eax, isolate()->factory()->the_hole_value());
-      __ j(not_equal, done);
-      __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kThrowReferenceError);
-    } else {
-      __ jmp(done);
-    }
-  }
-}
-
 void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
                                          TypeofMode typeof_mode) {
   SetExpressionPosition(proxy);
   PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
   Variable* var = proxy->var();
 
-  // Three cases: global variables, lookup variables, and all other types of
-  // variables.
+  // Two cases: global variables and all other types of variables.
   switch (var->location()) {
     case VariableLocation::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
@@ -1224,24 +1103,7 @@
       break;
     }
 
-    case VariableLocation::LOOKUP: {
-      Comment cmnt(masm_, "[ Lookup variable");
-      Label done, slow;
-      // Generate code for loading from variables potentially shadowed
-      // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
-      __ bind(&slow);
-      __ push(Immediate(var->name()));
-      Runtime::FunctionId function_id =
-          typeof_mode == NOT_INSIDE_TYPEOF
-              ? Runtime::kLoadLookupSlot
-              : Runtime::kLoadLookupSlotInsideTypeof;
-      __ CallRuntime(function_id);
-      __ bind(&done);
-      context()->Plug(eax);
-      break;
-    }
-
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1267,7 +1129,8 @@
 void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
   Comment cmnt(masm_, "[ ObjectLiteral");
 
-  Handle<FixedArray> constant_properties = expr->constant_properties();
+  Handle<BoilerplateDescription> constant_properties =
+      expr->GetOrBuildConstantProperties(isolate());
   int flags = expr->ComputeFlags();
   // If any of the keys would store to the elements array, then we shouldn't
   // allow it.
@@ -1282,8 +1145,9 @@
     __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
     __ mov(ecx, Immediate(constant_properties));
     __ mov(edx, Immediate(Smi::FromInt(flags)));
-    FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
-    __ CallStub(&stub);
+    Callable callable = CodeFactory::FastCloneShallowObject(
+        isolate(), expr->properties_count());
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1293,10 +1157,9 @@
   bool result_saved = false;
 
   AccessorTable accessor_table(zone());
-  int property_index = 0;
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
@@ -1306,6 +1169,7 @@
       result_saved = true;
     }
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1320,7 +1184,7 @@
             VisitForAccumulatorValue(value);
             DCHECK(StoreDescriptor::ValueRegister().is(eax));
             __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
-            CallStoreIC(property->GetSlot(0), key->value());
+            CallStoreIC(property->GetSlot(0), key->value(), true);
             PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
             if (NeedsHomeObject(value)) {
               EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
@@ -1348,20 +1212,20 @@
         VisitForStackValue(value);
         DCHECK(property->emit_store());
         CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-        PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+        PrepareForBailoutForId(expr->GetIdForPropertySet(i),
                                BailoutState::NO_REGISTERS);
         break;
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1384,72 +1248,6 @@
     PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
   }
 
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
-  // starts with the first computed property name, and continues with all
-  // properties to its right.  All the code from above initializes the static
-  // component of the object literal, and arranges for the map of the result to
-  // reflect the static order in which the keys appear. For the dynamic
-  // properties, we compile them into a series of "SetOwnProperty" runtime
-  // calls. This will preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    Expression* value = property->value();
-    if (!result_saved) {
-      PushOperand(eax);  // Save result on the stack
-      result_saved = true;
-    }
-
-    PushOperand(Operand(esp, 0));  // Duplicate receiver.
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      DCHECK(!property->is_computed_name());
-      VisitForStackValue(value);
-      DCHECK(property->emit_store());
-      CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
-      PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                             BailoutState::NO_REGISTERS);
-    } else {
-      EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
-      VisitForStackValue(value);
-      if (NeedsHomeObject(value)) {
-        EmitSetHomeObject(value, 2, property->GetSlot());
-      }
-
-      switch (property->kind()) {
-        case ObjectLiteral::Property::CONSTANT:
-        case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        case ObjectLiteral::Property::COMPUTED:
-          if (property->emit_store()) {
-            PushOperand(Smi::FromInt(NONE));
-            PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-            CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-            PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
-                                   BailoutState::NO_REGISTERS);
-          } else {
-            DropOperands(3);
-          }
-          break;
-
-        case ObjectLiteral::Property::PROTOTYPE:
-          UNREACHABLE();
-          break;
-
-        case ObjectLiteral::Property::GETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-          break;
-
-        case ObjectLiteral::Property::SETTER:
-          PushOperand(Smi::FromInt(NONE));
-          CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-          break;
-      }
-    }
-  }
-
   if (result_saved) {
     context()->PlugTOS();
   } else {
@@ -1461,16 +1259,8 @@
 void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   Comment cmnt(masm_, "[ ArrayLiteral");
 
-  Handle<FixedArray> constant_elements = expr->constant_elements();
-  bool has_constant_fast_elements =
-      IsFastObjectElementsKind(expr->constant_elements_kind());
-
-  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
-  if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
-    // If the only customer of allocation sites is transitioning, then
-    // we can turn it off if we don't have anywhere else to transition to.
-    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
-  }
+  Handle<ConstantElementsPair> constant_elements =
+      expr->GetOrBuildConstantElements(isolate());
 
   if (MustCreateArrayLiteralWithRuntime(expr)) {
     __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1482,8 +1272,9 @@
     __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
     __ mov(ecx, Immediate(constant_elements));
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
-    __ CallStub(&stub);
+    Callable callable =
+        CodeFactory::FastCloneShallowArray(isolate(), TRACK_ALLOCATION_SITE);
+    __ Call(callable.code(), RelocInfo::CODE_TARGET);
     RestoreContext();
   }
   PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1537,17 +1328,6 @@
     case VARIABLE:
       // Nothing to do here.
       break;
-    case NAMED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        PushOperand(MemOperand(esp, kPointerSize));
-        PushOperand(result_register());
-      }
-      break;
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
         // We need the receiver both on the stack and in the register.
@@ -1557,19 +1337,6 @@
         VisitForStackValue(property->obj());
       }
       break;
-    case KEYED_SUPER_PROPERTY:
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(property->key());
-      PushOperand(result_register());
-      if (expr->is_compound()) {
-        PushOperand(MemOperand(esp, 2 * kPointerSize));
-        PushOperand(MemOperand(esp, 2 * kPointerSize));
-        PushOperand(result_register());
-      }
-      break;
     case KEYED_PROPERTY: {
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
@@ -1582,6 +1349,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 
   // For compound assignments we need another deoptimization point after the
@@ -1594,26 +1365,20 @@
           EmitVariableLoad(expr->target()->AsVariableProxy());
           PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
           break;
-        case NAMED_SUPER_PROPERTY:
-          EmitNamedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case NAMED_PROPERTY:
           EmitNamedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
-        case KEYED_SUPER_PROPERTY:
-          EmitKeyedSuperPropertyLoad(property);
-          PrepareForBailoutForId(property->LoadId(),
-                                 BailoutState::TOS_REGISTER);
-          break;
         case KEYED_PROPERTY:
           EmitKeyedPropertyLoad(property);
           PrepareForBailoutForId(property->LoadId(),
                                  BailoutState::TOS_REGISTER);
           break;
+        case NAMED_SUPER_PROPERTY:
+        case KEYED_SUPER_PROPERTY:
+          UNREACHABLE();
+          break;
       }
     }
 
@@ -1651,72 +1416,20 @@
     case NAMED_PROPERTY:
       EmitNamedPropertyAssignment(expr);
       break;
-    case NAMED_SUPER_PROPERTY:
-      EmitNamedSuperPropertyStore(property);
-      context()->Plug(result_register());
-      break;
-    case KEYED_SUPER_PROPERTY:
-      EmitKeyedSuperPropertyStore(property);
-      context()->Plug(result_register());
-      break;
     case KEYED_PROPERTY:
       EmitKeyedPropertyAssignment(expr);
       break;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
 
 void FullCodeGenerator::VisitYield(Yield* expr) {
-  Comment cmnt(masm_, "[ Yield");
-  SetExpressionPosition(expr);
-
-  // Evaluate yielded value first; the initial iterator definition depends on
-  // this.  It stays on the stack while we update the iterator.
-  VisitForStackValue(expr->expression());
-
-  Label suspend, continuation, post_runtime, resume, exception;
-
-  __ jmp(&suspend);
-  __ bind(&continuation);
-  // When we arrive here, eax holds the generator object.
-  __ RecordGeneratorContinuation();
-  __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
-  __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOrDebugPosOffset));
-  STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
-  STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
-  __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
-  __ j(less, &resume);
-  __ Push(result_register());
-  __ j(greater, &exception);
-  EmitCreateIteratorResult(true);
-  EmitUnwindAndReturn();
-
-  __ bind(&exception);
-  __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
-                                              : Runtime::kThrow);
-
-  __ bind(&suspend);
-  OperandStackDepthIncrement(1);  // Not popped on this path.
-  VisitForAccumulatorValue(expr->generator_object());
-  DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
-  __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
-         Immediate(Smi::FromInt(continuation.pos())));
-  __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
-  __ mov(ecx, esi);
-  __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
-                      kDontSaveFPRegs);
-  __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
-  __ cmp(esp, ebx);
-  __ j(equal, &post_runtime);
-  __ push(eax);  // generator object
-  __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
-  RestoreContext();
-  __ bind(&post_runtime);
-  PopOperand(result_register());
-  EmitReturnSequence();
-
-  __ bind(&resume);
-  context()->Plug(result_register());
+  // Resumable functions are not supported.
+  UNREACHABLE();
 }
 
 void FullCodeGenerator::PushOperand(MemOperand operand) {
@@ -1855,58 +1568,6 @@
   context()->Plug(eax);
 }
 
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
-  for (int i = 0; i < lit->properties()->length(); i++) {
-    ClassLiteral::Property* property = lit->properties()->at(i);
-    Expression* value = property->value();
-
-    if (property->is_static()) {
-      PushOperand(Operand(esp, kPointerSize));  // constructor
-    } else {
-      PushOperand(Operand(esp, 0));  // prototype
-    }
-    EmitPropertyKey(property, lit->GetIdForProperty(i));
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      __ CallRuntime(Runtime::kThrowIfStaticPrototype);
-      __ push(eax);
-    }
-
-    VisitForStackValue(value);
-    if (NeedsHomeObject(value)) {
-      EmitSetHomeObject(value, 2, property->GetSlot());
-    }
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
-        CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
-        break;
-
-      case ClassLiteral::Property::GETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::SETTER:
-        PushOperand(Smi::FromInt(DONT_ENUM));
-        CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
-        break;
-
-      case ClassLiteral::Property::FIELD:
-        UNREACHABLE();
-        break;
-    }
-  }
-}
-
-
 void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
   PopOperand(edx);
   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -1916,9 +1577,7 @@
   context()->Plug(eax);
 }
 
-
-void FullCodeGenerator::EmitAssignment(Expression* expr,
-                                       FeedbackVectorSlot slot) {
+void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpressionOrThis());
 
   Property* prop = expr->AsProperty();
@@ -1940,43 +1599,6 @@
       CallStoreIC(slot, prop->key()->AsLiteral()->value());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      PushOperand(eax);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForAccumulatorValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      // stack: value, this; eax: home_object
-      Register scratch = ecx;
-      Register scratch2 = edx;
-      __ mov(scratch, result_register());               // home_object
-      __ mov(eax, MemOperand(esp, kPointerSize));       // value
-      __ mov(scratch2, MemOperand(esp, 0));             // this
-      __ mov(MemOperand(esp, kPointerSize), scratch2);  // this
-      __ mov(MemOperand(esp, 0), scratch);              // home_object
-      // stack: this, home_object. eax: value
-      EmitNamedSuperPropertyStore(prop);
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      PushOperand(eax);
-      VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-      VisitForStackValue(
-          prop->obj()->AsSuperPropertyReference()->home_object());
-      VisitForAccumulatorValue(prop->key());
-      Register scratch = ecx;
-      Register scratch2 = edx;
-      __ mov(scratch2, MemOperand(esp, 2 * kPointerSize));  // value
-      // stack: value, this, home_object; eax: key, edx: value
-      __ mov(scratch, MemOperand(esp, kPointerSize));  // this
-      __ mov(MemOperand(esp, 2 * kPointerSize), scratch);
-      __ mov(scratch, MemOperand(esp, 0));  // home_object
-      __ mov(MemOperand(esp, kPointerSize), scratch);
-      __ mov(MemOperand(esp, 0), eax);
-      __ mov(eax, scratch2);
-      // stack: this, home_object, key; eax: value.
-      EmitKeyedSuperPropertyStore(prop);
-      break;
-    }
     case KEYED_PROPERTY: {
       PushOperand(eax);  // Preserve value.
       VisitForStackValue(prop->obj());
@@ -1987,6 +1609,10 @@
       CallKeyedStoreIC(slot);
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
   context()->Plug(eax);
 }
@@ -2003,7 +1629,7 @@
 }
 
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
-                                               FeedbackVectorSlot slot,
+                                               FeedbackSlot slot,
                                                HoleCheckMode hole_check_mode) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
@@ -2047,26 +1673,18 @@
 
   } else {
     DCHECK(var->mode() != CONST || op == Token::INIT);
-    if (var->IsLookupSlot()) {
-      // Assignment to var.
-      __ Push(Immediate(var->name()));
-      __ Push(eax);
-      __ CallRuntime(is_strict(language_mode())
-                         ? Runtime::kStoreLookupSlot_Strict
-                         : Runtime::kStoreLookupSlot_Sloppy);
-    } else {
-      // Assignment to var or initializing assignment to let/const in harmony
-      // mode.
-      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
-      MemOperand location = VarOperand(var, ecx);
-      if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
-        // Check for an uninitialized let binding.
-        __ mov(edx, location);
-        __ cmp(edx, isolate()->factory()->the_hole_value());
-        __ Check(equal, kLetBindingReInitialization);
-      }
-      EmitStoreToStackLocalOrContextSlot(var, location);
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    DCHECK(!var->IsLookupSlot());
+    // Assignment to var or initializing assignment to let/const in harmony
+    // mode.
+    MemOperand location = VarOperand(var, ecx);
+    if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+      // Check for an uninitialized let binding.
+      __ mov(edx, location);
+      __ cmp(edx, isolate()->factory()->the_hole_value());
+      __ Check(equal, kLetBindingReInitialization);
     }
+    EmitStoreToStackLocalOrContextSlot(var, location);
   }
 }
 
@@ -2086,34 +1704,6 @@
 }
 
 
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // eax : value
-  // stack : receiver ('this'), home_object
-  DCHECK(prop != NULL);
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(key != NULL);
-
-  PushOperand(key->value());
-  PushOperand(eax);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreToSuper_Strict
-                              : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
-  // Assignment to named property of super.
-  // eax : value
-  // stack : receiver ('this'), home_object, key
-
-  PushOperand(eax);
-  CallRuntimeWithOperands(is_strict(language_mode())
-                              ? Runtime::kStoreKeyedToSuper_Strict
-                              : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
   // eax               : value
@@ -2161,42 +1751,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
-  SetExpressionPosition(expr);
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  Literal* key = prop->key()->AsLiteral();
-  DCHECK(!key->value()->IsSmi());
-  // Load the function from the receiver.
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(eax);
-  PushOperand(eax);
-  PushOperand(Operand(esp, kPointerSize * 2));
-  PushOperand(key->value());
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ mov(Operand(esp, kPointerSize), eax);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2221,40 +1775,6 @@
 }
 
 
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
-  Expression* callee = expr->expression();
-  DCHECK(callee->IsProperty());
-  Property* prop = callee->AsProperty();
-  DCHECK(prop->IsSuperAccess());
-
-  SetExpressionPosition(prop);
-  // Load the function from the receiver.
-  SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
-  VisitForStackValue(super_ref->home_object());
-  VisitForAccumulatorValue(super_ref->this_var());
-  PushOperand(eax);
-  PushOperand(eax);
-  PushOperand(Operand(esp, kPointerSize * 2));
-  VisitForStackValue(prop->key());
-  // Stack here:
-  //  - home_object
-  //  - this (receiver)
-  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
-  //  - home_object
-  //  - key
-  CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-  PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
-  // Replace home_object with target function.
-  __ mov(Operand(esp, kPointerSize), eax);
-
-  // Stack here:
-  // - target function
-  // - this (receiver)
-  EmitCall(expr);
-}
-
-
 void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
@@ -2274,7 +1794,8 @@
     EmitProfilingCounterHandlingForReturnSequence(true);
   }
   Handle<Code> code =
-      CodeFactory::CallIC(isolate(), mode, expr->tail_call_mode()).code();
+      CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
+          .code();
   __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
   __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
   __ Move(eax, Immediate(arg_count));
@@ -2286,111 +1807,6 @@
   context()->DropAndPlug(1, eax);
 }
 
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
-  int arg_count = expr->arguments()->length();
-  // Push copy of the first argument or undefined if it doesn't exist.
-  if (arg_count > 0) {
-    __ push(Operand(esp, arg_count * kPointerSize));
-  } else {
-    __ push(Immediate(isolate()->factory()->undefined_value()));
-  }
-
-  // Push the enclosing function.
-  __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
-  // Push the language mode.
-  __ push(Immediate(Smi::FromInt(language_mode())));
-
-  // Push the start position of the scope the calls resides in.
-  __ push(Immediate(Smi::FromInt(scope()->start_position())));
-
-  // Push the source position of the eval call.
-  __ push(Immediate(Smi::FromInt(expr->position())));
-
-  // Do the runtime call.
-  __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
-  VariableProxy* callee = expr->expression()->AsVariableProxy();
-  if (callee->var()->IsLookupSlot()) {
-    Label slow, done;
-    SetExpressionPosition(callee);
-    // Generate code for loading from variables potentially shadowed by
-    // eval-introduced variables.
-    EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
-    __ bind(&slow);
-    // Call the runtime to find the function to call (returned in eax) and
-    // the object holding it (returned in edx).
-    __ Push(callee->name());
-    __ CallRuntime(Runtime::kLoadLookupSlotForCall);
-    PushOperand(eax);  // Function.
-    PushOperand(edx);  // Receiver.
-    PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
-    // If fast case code has been generated, emit code to push the function
-    // and receiver and have the slow path jump around this code.
-    if (done.is_linked()) {
-      Label call;
-      __ jmp(&call, Label::kNear);
-      __ bind(&done);
-      // Push function.
-      __ push(eax);
-      // The receiver is implicitly the global receiver. Indicate this by
-      // passing the hole to the call function stub.
-      __ push(Immediate(isolate()->factory()->undefined_value()));
-      __ bind(&call);
-    }
-  } else {
-    VisitForStackValue(callee);
-    // refEnv.WithBaseObject()
-    PushOperand(isolate()->factory()->undefined_value());
-  }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
-  // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
-  // to resolve the function we need to call.  Then we call the resolved
-  // function using the given arguments.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-
-  PushCalleeAndWithBaseObject(expr);
-
-  // Push the arguments.
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Push a copy of the function (found below the arguments) and
-  // resolve eval.
-  __ push(Operand(esp, (arg_count + 1) * kPointerSize));
-  EmitResolvePossiblyDirectEval(expr);
-
-  // Touch up the stack with the resolved function.
-  __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
-
-  PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
-  SetCallPosition(expr);
-  Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
-                                          expr->tail_call_mode())
-                          .code();
-  __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
-  __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
-  __ Move(eax, Immediate(arg_count));
-  __ call(code, RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->DropAndPlug(1, eax);
-}
-
-
 void FullCodeGenerator::VisitCallNew(CallNew* expr) {
   Comment cmnt(masm_, "[ CallNew");
   // According to ECMA-262, section 11.2.2, page 44, the function
@@ -2419,7 +1835,7 @@
   __ mov(edi, Operand(esp, arg_count * kPointerSize));
 
   // Record call targets in unoptimized code.
-  __ EmitLoadTypeFeedbackVector(ebx);
+  __ EmitLoadFeedbackVector(ebx);
   __ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
 
   CallConstructStub stub(isolate());
@@ -2431,47 +1847,6 @@
 }
 
 
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
-  SuperCallReference* super_call_ref =
-      expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super_call_ref);
-
-  // Push the super constructor target on the stack (may be null,
-  // but the Construct builtin can deal with that properly).
-  VisitForAccumulatorValue(super_call_ref->this_function_var());
-  __ AssertFunction(result_register());
-  __ mov(result_register(),
-         FieldOperand(result_register(), HeapObject::kMapOffset));
-  PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
-
-  // Push the arguments ("left-to-right") on the stack.
-  ZoneList<Expression*>* args = expr->arguments();
-  int arg_count = args->length();
-  for (int i = 0; i < arg_count; i++) {
-    VisitForStackValue(args->at(i));
-  }
-
-  // Call the construct call builtin that handles allocation and
-  // constructor invocation.
-  SetConstructCallPosition(expr);
-
-  // Load new target into edx.
-  VisitForAccumulatorValue(super_call_ref->new_target_var());
-  __ mov(edx, result_register());
-
-  // Load function and argument count into edi and eax.
-  __ Move(eax, Immediate(arg_count));
-  __ mov(edi, Operand(esp, arg_count * kPointerSize));
-
-  __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
-  OperandStackDepthDecrement(arg_count + 1);
-
-  RecordJSReturnSite(expr);
-  RestoreContext();
-  context()->Plug(eax);
-}
-
-
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2559,28 +1934,6 @@
 }
 
 
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
-  ZoneList<Expression*>* args = expr->arguments();
-  DCHECK(args->length() == 1);
-
-  VisitForAccumulatorValue(args->at(0));
-
-  Label materialize_true, materialize_false;
-  Label* if_true = NULL;
-  Label* if_false = NULL;
-  Label* fall_through = NULL;
-  context()->PrepareTest(&materialize_true, &materialize_false,
-                         &if_true, &if_false, &fall_through);
-
-  __ JumpIfSmi(eax, if_false);
-  __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
-  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  Split(equal, if_true, if_false, fall_through);
-
-  context()->Plug(if_true, if_false);
-}
-
-
 void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
   DCHECK(args->length() == 1);
@@ -2814,17 +2167,13 @@
           __ push(Immediate(var->name()));
           __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
           context()->Plug(eax);
-        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+        } else {
+          DCHECK(!var->IsLookupSlot());
+          DCHECK(var->IsStackAllocated() || var->IsContextSlot());
           // Result of deleting non-global variables is false.  'this' is
           // not really a variable, though we implement it as one.  The
           // subexpression does not have side effects.
           context()->Plug(is_this);
-        } else {
-          // Non-global variable.  Call the runtime to try to delete from the
-          // context where the variable was introduced.
-          __ Push(var->name());
-          __ CallRuntime(Runtime::kDeleteLookupSlot);
-          context()->Plug(eax);
         }
       } else {
         // Result of deleting non-property, non-variable reference is true.
@@ -2935,30 +2284,6 @@
         break;
       }
 
-      case NAMED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForAccumulatorValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        PushOperand(result_register());
-        PushOperand(MemOperand(esp, kPointerSize));
-        PushOperand(result_register());
-        EmitNamedSuperPropertyLoad(prop);
-        break;
-      }
-
-      case KEYED_SUPER_PROPERTY: {
-        VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
-        VisitForStackValue(
-            prop->obj()->AsSuperPropertyReference()->home_object());
-        VisitForAccumulatorValue(prop->key());
-        PushOperand(result_register());
-        PushOperand(MemOperand(esp, 2 * kPointerSize));
-        PushOperand(MemOperand(esp, 2 * kPointerSize));
-        PushOperand(result_register());
-        EmitKeyedSuperPropertyLoad(prop);
-        break;
-      }
-
       case KEYED_PROPERTY: {
         VisitForStackValue(prop->obj());
         VisitForStackValue(prop->key());
@@ -2969,6 +2294,8 @@
         break;
       }
 
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
       case VARIABLE:
         UNREACHABLE();
     }
@@ -3002,14 +2329,12 @@
           case NAMED_PROPERTY:
             __ mov(Operand(esp, kPointerSize), eax);
             break;
-          case NAMED_SUPER_PROPERTY:
-            __ mov(Operand(esp, 2 * kPointerSize), eax);
-            break;
           case KEYED_PROPERTY:
             __ mov(Operand(esp, 2 * kPointerSize), eax);
             break;
+          case NAMED_SUPER_PROPERTY:
           case KEYED_SUPER_PROPERTY:
-            __ mov(Operand(esp, 3 * kPointerSize), eax);
+            UNREACHABLE();
             break;
         }
       }
@@ -3049,14 +2374,12 @@
         case NAMED_PROPERTY:
           __ mov(Operand(esp, kPointerSize), eax);
           break;
-        case NAMED_SUPER_PROPERTY:
-          __ mov(Operand(esp, 2 * kPointerSize), eax);
-          break;
         case KEYED_PROPERTY:
           __ mov(Operand(esp, 2 * kPointerSize), eax);
           break;
+        case NAMED_SUPER_PROPERTY:
         case KEYED_SUPER_PROPERTY:
-          __ mov(Operand(esp, 3 * kPointerSize), eax);
+          UNREACHABLE();
           break;
       }
     }
@@ -3115,30 +2438,6 @@
       }
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      EmitNamedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(eax);
-      }
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      EmitKeyedSuperPropertyStore(prop);
-      PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
-      if (expr->is_postfix()) {
-        if (!context()->IsEffect()) {
-          context()->PlugTOS();
-        }
-      } else {
-        context()->Plug(eax);
-      }
-      break;
-    }
     case KEYED_PROPERTY: {
       PopOperand(StoreDescriptor::NameRegister());
       PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3154,6 +2453,10 @@
       }
       break;
     }
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
+      break;
   }
 }
 
@@ -3220,16 +2523,6 @@
     __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
               Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
     Split(zero, if_true, if_false, fall_through);
-// clang-format off
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)   \
-  } else if (String::Equals(check, factory->type##_string())) { \
-    __ JumpIfSmi(eax, if_false);                                \
-    __ cmp(FieldOperand(eax, HeapObject::kMapOffset),           \
-           isolate()->factory()->type##_map());                 \
-    Split(equal, if_true, if_false, fall_through);
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-    // clang-format on
   } else {
     if (if_false != fall_through) __ jmp(if_false);
   }
@@ -3270,6 +2563,7 @@
       SetExpressionPosition(expr);
       PopOperand(edx);
       __ Call(isolate()->builtins()->InstanceOf(), RelocInfo::CODE_TARGET);
+      RestoreContext();
       PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
       __ cmp(eax, isolate()->factory()->true_value());
       Split(equal, if_true, if_false, fall_through);
@@ -3386,66 +2680,6 @@
 }
 
 
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
-  // Store pending message while executing finally block.
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(edx, Operand::StaticVariable(pending_message_obj));
-  PushOperand(edx);
-
-  ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
-  DCHECK(!result_register().is(edx));
-  // Restore pending message from stack.
-  PopOperand(edx);
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(Operand::StaticVariable(pending_message_obj), edx);
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
-  DCHECK(!result_register().is(edx));
-  ExternalReference pending_message_obj =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
-  __ mov(Operand::StaticVariable(pending_message_obj), edx);
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
-  DCHECK(!result_register().is(edx));
-  __ Pop(result_register());  // Restore the accumulator.
-  __ Pop(edx);                // Get the token.
-  for (DeferredCommand cmd : commands_) {
-    Label skip;
-    __ cmp(edx, Immediate(Smi::FromInt(cmd.token)));
-    __ j(not_equal, &skip);
-    switch (cmd.command) {
-      case kReturn:
-        codegen_->EmitUnwindAndReturn();
-        break;
-      case kThrow:
-        __ Push(result_register());
-        __ CallRuntime(Runtime::kReThrow);
-        break;
-      case kContinue:
-        codegen_->EmitContinue(cmd.target);
-        break;
-      case kBreak:
-        codegen_->EmitBreak(cmd.target);
-        break;
-    }
-    __ bind(&skip);
-  }
-}
-
 #undef __
 
 
diff --git a/src/futex-emulation.cc b/src/futex-emulation.cc
index 2d18488..63ad213 100644
--- a/src/futex-emulation.cc
+++ b/src/futex-emulation.cc
@@ -12,6 +12,7 @@
 #include "src/handles-inl.h"
 #include "src/isolate.h"
 #include "src/list-inl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -187,10 +188,9 @@
   return result;
 }
 
-
 Object* FutexEmulation::Wake(Isolate* isolate,
                              Handle<JSArrayBuffer> array_buffer, size_t addr,
-                             int num_waiters_to_wake) {
+                             uint32_t num_waiters_to_wake) {
   DCHECK(addr < NumberToSize(array_buffer->byte_length()));
 
   int waiters_woken = 0;
@@ -202,7 +202,9 @@
     if (backing_store == node->backing_store_ && addr == node->wait_addr_) {
       node->waiting_ = false;
       node->cond_.NotifyOne();
-      --num_waiters_to_wake;
+      if (num_waiters_to_wake != kWakeAll) {
+        --num_waiters_to_wake;
+      }
       waiters_woken++;
     }
 
diff --git a/src/futex-emulation.h b/src/futex-emulation.h
index a0e2b18..801198f 100644
--- a/src/futex-emulation.h
+++ b/src/futex-emulation.h
@@ -13,7 +13,6 @@
 #include "src/base/macros.h"
 #include "src/base/platform/condition-variable.h"
 #include "src/base/platform/mutex.h"
-#include "src/handles.h"
 
 // Support for emulating futexes, a low-level synchronization primitive. They
 // are natively supported by Linux, but must be emulated for other platforms.
@@ -31,6 +30,8 @@
 
 namespace internal {
 
+template <typename T>
+class Handle;
 class Isolate;
 class JSArrayBuffer;
 
@@ -81,6 +82,9 @@
 
 class FutexEmulation : public AllStatic {
  public:
+  // Pass to Wake() to wake all waiters.
+  static const uint32_t kWakeAll = UINT32_MAX;
+
   // Check that array_buffer[addr] == value, and return "not-equal" if not. If
   // they are equal, block execution on |isolate|'s thread until woken via
   // |Wake|, or when the time given in |rel_timeout_ms| elapses. Note that
@@ -91,10 +95,11 @@
                       size_t addr, int32_t value, double rel_timeout_ms);
 
   // Wake |num_waiters_to_wake| threads that are waiting on the given |addr|.
-  // The rest of the waiters will continue to wait. The return value is the
-  // number of woken waiters.
+  // |num_waiters_to_wake| can be kWakeAll, in which case all waiters are
+  // woken. The rest of the waiters will continue to wait. The return value is
+  // the number of woken waiters.
   static Object* Wake(Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
-                      size_t addr, int num_waiters_to_wake);
+                      size_t addr, uint32_t num_waiters_to_wake);
 
   // Return the number of threads waiting on |addr|. Should only be used for
   // testing.
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 9ff16af..ff7f132 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -5,6 +5,8 @@
 #include "src/global-handles.h"
 
 #include "src/api.h"
+#include "src/cancelable-task.h"
+#include "src/objects-inl.h"
 #include "src/v8.h"
 #include "src/vm-state-inl.h"
 
@@ -719,7 +721,7 @@
   }
 }
 
-
+template <GlobalHandles::IterationMode mode>
 void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v) {
   for (int i = 0; i < new_space_nodes_.length(); ++i) {
     Node* node = new_space_nodes_[i];
@@ -728,18 +730,35 @@
         node->IsWeakRetainer()) {
       // Pending weak phantom handles die immediately. Everything else survives.
       if (node->IsPendingPhantomResetHandle()) {
-        node->ResetPhantomHandle();
-        ++number_of_phantom_handle_resets_;
+        if (mode == IterationMode::HANDLE_PHANTOM_NODES ||
+            mode == IterationMode::HANDLE_PHANTOM_NODES_VISIT_OTHERS) {
+          node->ResetPhantomHandle();
+          ++number_of_phantom_handle_resets_;
+        }
       } else if (node->IsPendingPhantomCallback()) {
-        node->CollectPhantomCallbackData(isolate(),
-                                         &pending_phantom_callbacks_);
+        if (mode == IterationMode::HANDLE_PHANTOM_NODES ||
+            mode == IterationMode::HANDLE_PHANTOM_NODES_VISIT_OTHERS) {
+          node->CollectPhantomCallbackData(isolate(),
+                                           &pending_phantom_callbacks_);
+        }
       } else {
-        v->VisitPointer(node->location());
+        if (mode == IterationMode::VISIT_OTHERS ||
+            mode == IterationMode::HANDLE_PHANTOM_NODES_VISIT_OTHERS) {
+          v->VisitPointer(node->location());
+        }
       }
     }
   }
 }
 
+template void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots<
+    GlobalHandles::HANDLE_PHANTOM_NODES>(ObjectVisitor* v);
+
+template void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots<
+    GlobalHandles::VISIT_OTHERS>(ObjectVisitor* v);
+
+template void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots<
+    GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(ObjectVisitor* v);
 
 DISABLE_CFI_PERF
 bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
diff --git a/src/global-handles.h b/src/global-handles.h
index 50e5ed6..9c4ffb4 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -113,6 +113,12 @@
 
 class GlobalHandles {
  public:
+  enum IterationMode {
+    HANDLE_PHANTOM_NODES_VISIT_OTHERS,
+    VISIT_OTHERS,
+    HANDLE_PHANTOM_NODES
+  };
+
   ~GlobalHandles();
 
   // Creates a new global handle that is alive until Destroy is called.
@@ -227,6 +233,7 @@
 
   // Iterates over weak independent or unmodified handles.
   // See the note above.
+  template <IterationMode mode>
   void IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v);
 
   // Identify unmodified objects that are in weak state and marks them
@@ -290,7 +297,7 @@
 #ifdef DEBUG
   void PrintStats();
   void Print();
-#endif
+#endif  // DEBUG
 
  private:
   explicit GlobalHandles(Isolate* isolate);
@@ -389,8 +396,6 @@
 class EternalHandles {
  public:
   enum SingletonHandle {
-    I18N_TEMPLATE_ONE,
-    I18N_TEMPLATE_TWO,
     DATE_CACHE_VERSION,
 
     NUMBER_OF_SINGLETON_HANDLES
diff --git a/src/globals.h b/src/globals.h
index f689c66..a90e624 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -11,6 +11,7 @@
 #include <ostream>
 
 #include "src/base/build_config.h"
+#include "src/base/flags.h"
 #include "src/base/logging.h"
 #include "src/base/macros.h"
 
@@ -314,32 +315,46 @@
   return os;
 }
 
-
 inline bool is_sloppy(LanguageMode language_mode) {
   return language_mode == SLOPPY;
 }
 
-
 inline bool is_strict(LanguageMode language_mode) {
   return language_mode != SLOPPY;
 }
 
-
 inline bool is_valid_language_mode(int language_mode) {
   return language_mode == SLOPPY || language_mode == STRICT;
 }
 
-
 inline LanguageMode construct_language_mode(bool strict_bit) {
   return static_cast<LanguageMode>(strict_bit);
 }
 
+enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
 // This constant is used as an undefined value when passing source positions.
 const int kNoSourcePosition = -1;
 
 // This constant is used to indicate missing deoptimization information.
 const int kNoDeoptimizationId = -1;
 
+// Deoptimize bailout kind.
+enum class DeoptimizeKind : uint8_t { kEager, kSoft };
+inline size_t hash_value(DeoptimizeKind kind) {
+  return static_cast<size_t>(kind);
+}
+inline std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
+  switch (kind) {
+    case DeoptimizeKind::kEager:
+      return os << "Eager";
+    case DeoptimizeKind::kSoft:
+      return os << "Soft";
+  }
+  UNREACHABLE();
+  return os;
+}
+
 // Mask for the sign bit in a smi.
 const intptr_t kSmiSignMask = kIntptrSignBit;
 
@@ -355,10 +370,6 @@
 const intptr_t kDoubleAlignment = 8;
 const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
 
-// Desired alignment for 128 bit SIMD values.
-const intptr_t kSimd128Alignment = 16;
-const intptr_t kSimd128AlignmentMask = kSimd128Alignment - 1;
-
 // Desired alignment for generated code is 32 bytes (to improve cache line
 // utilization).
 const int kCodeAlignmentBits = 5;
@@ -470,7 +481,7 @@
 class Symbol;
 class Name;
 class Struct;
-class TypeFeedbackVector;
+class FeedbackVector;
 class Variable;
 class RelocInfo;
 class Deserializer;
@@ -501,12 +512,7 @@
 const int kSpaceTagSize = 3;
 const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
 
-enum AllocationAlignment {
-  kWordAligned,
-  kDoubleAligned,
-  kDoubleUnaligned,
-  kSimd128Unaligned
-};
+enum AllocationAlignment { kWordAligned, kDoubleAligned, kDoubleUnaligned };
 
 // Possible outcomes for decisions.
 enum class Decision : uint8_t { kUnknown, kTrue, kFalse };
@@ -591,7 +597,12 @@
 };
 
 // Flag indicating whether code is built into the VM (one of the natives files).
-enum NativesFlag { NOT_NATIVES_CODE, EXTENSION_CODE, NATIVES_CODE };
+enum NativesFlag {
+  NOT_NATIVES_CODE,
+  EXTENSION_CODE,
+  NATIVES_CODE,
+  INSPECTOR_CODE
+};
 
 // JavaScript defines two kinds of 'nil'.
 enum NilValue { kNullValue, kUndefinedValue };
@@ -603,14 +614,6 @@
   ONLY_SINGLE_FUNCTION_LITERAL  // Only a single FunctionLiteral expression.
 };
 
-// TODO(gsathya): Move this to JSPromise once we create it.
-// This should be in sync with the constants in promise.js
-enum PromiseStatus {
-  kPromisePending,
-  kPromiseFulfilled,
-  kPromiseRejected,
-};
-
 // A CodeDesc describes a buffer holding instructions and relocation
 // information. The instructions start at the beginning of the buffer
 // and grow forward, the relocation information starts at the end of
@@ -788,10 +791,14 @@
   FPR_GPR_MOV,
   LWSYNC,
   ISELECT,
+  VSX,
+  MODULO,
   // S390
   DISTINCT_OPS,
   GENERAL_INSTR_EXT,
   FLOATING_POINT_EXT,
+  VECTOR_FACILITY,
+  MISC_INSTR_EXT2,
 
   NUMBER_OF_CPU_FEATURES,
 
@@ -891,6 +898,14 @@
   WITH_SCOPE       // The scope introduced by with.
 };
 
+// AllocationSiteMode controls whether allocations are tracked by an allocation
+// site.
+enum AllocationSiteMode {
+  DONT_TRACK_ALLOCATION_SITE,
+  TRACK_ALLOCATION_SITE,
+  LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
+};
+
 // The mips architecture prior to revision 5 has inverted encoding for sNaN.
 // The x87 FPU convert the sNaN to qNaN automatically when loading sNaN from
 // memmory.
@@ -1080,7 +1095,7 @@
   kConciseMethod = 1 << 2,
   kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
   kDefaultConstructor = 1 << 3,
-  kSubclassConstructor = 1 << 4,
+  kDerivedConstructor = 1 << 4,
   kBaseConstructor = 1 << 5,
   kGetterFunction = 1 << 6,
   kSetterFunction = 1 << 7,
@@ -1088,9 +1103,9 @@
   kModule = 1 << 9,
   kAccessorFunction = kGetterFunction | kSetterFunction,
   kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
-  kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
+  kDefaultDerivedConstructor = kDefaultConstructor | kDerivedConstructor,
   kClassConstructor =
-      kBaseConstructor | kSubclassConstructor | kDefaultConstructor,
+      kBaseConstructor | kDerivedConstructor | kDefaultConstructor,
   kAsyncArrowFunction = kArrowFunction | kAsyncFunction,
   kAsyncConciseMethod = kAsyncFunction | kConciseMethod
 };
@@ -1106,9 +1121,9 @@
          kind == FunctionKind::kSetterFunction ||
          kind == FunctionKind::kAccessorFunction ||
          kind == FunctionKind::kDefaultBaseConstructor ||
-         kind == FunctionKind::kDefaultSubclassConstructor ||
+         kind == FunctionKind::kDefaultDerivedConstructor ||
          kind == FunctionKind::kBaseConstructor ||
-         kind == FunctionKind::kSubclassConstructor ||
+         kind == FunctionKind::kDerivedConstructor ||
          kind == FunctionKind::kAsyncFunction ||
          kind == FunctionKind::kAsyncArrowFunction ||
          kind == FunctionKind::kAsyncConciseMethod;
@@ -1172,10 +1187,9 @@
   return kind & FunctionKind::kBaseConstructor;
 }
 
-
-inline bool IsSubclassConstructor(FunctionKind kind) {
+inline bool IsDerivedConstructor(FunctionKind kind) {
   DCHECK(IsValidFunctionKind(kind));
-  return kind & FunctionKind::kSubclassConstructor;
+  return kind & FunctionKind::kDerivedConstructor;
 }
 
 
@@ -1194,16 +1208,25 @@
   return true;
 }
 
-enum class CallableType : unsigned { kJSFunction, kAny };
+enum class InterpreterPushArgsMode : unsigned {
+  kJSFunction,
+  kWithFinalSpread,
+  kOther
+};
 
-inline size_t hash_value(CallableType type) { return bit_cast<unsigned>(type); }
+inline size_t hash_value(InterpreterPushArgsMode mode) {
+  return bit_cast<unsigned>(mode);
+}
 
-inline std::ostream& operator<<(std::ostream& os, CallableType function_type) {
-  switch (function_type) {
-    case CallableType::kJSFunction:
+inline std::ostream& operator<<(std::ostream& os,
+                                InterpreterPushArgsMode mode) {
+  switch (mode) {
+    case InterpreterPushArgsMode::kJSFunction:
       return os << "JSFunction";
-    case CallableType::kAny:
-      return os << "Any";
+    case InterpreterPushArgsMode::kWithFinalSpread:
+      return os << "WithFinalSpread";
+    case InterpreterPushArgsMode::kOther:
+      return os << "Other";
   }
   UNREACHABLE();
   return os;
@@ -1238,24 +1261,25 @@
   };
 };
 
+// Type feedback is encoded in such a way that, we can combine the feedback
+// at different points by performing an 'OR' operation. Type feedback moves
+// to a more generic type when we combine feedback.
+// kSignedSmall        -> kNumber   -> kAny
+// kInternalizedString -> kString   -> kAny
+//                        kReceiver -> kAny
 // TODO(epertoso): consider unifying this with BinaryOperationFeedback.
 class CompareOperationFeedback {
  public:
-  enum { kNone = 0x00, kSignedSmall = 0x01, kNumber = 0x3, kAny = 0x7 };
-};
-
-// Describes how exactly a frame has been dropped from stack.
-enum LiveEditFrameDropMode {
-  // No frame has been dropped.
-  LIVE_EDIT_FRAMES_UNTOUCHED,
-  // The top JS frame had been calling debug break slot stub. Patch the
-  // address this stub jumps to in the end.
-  LIVE_EDIT_FRAME_DROPPED_IN_DEBUG_SLOT_CALL,
-  // The top JS frame had been calling some C++ function. The return address
-  // gets patched automatically.
-  LIVE_EDIT_FRAME_DROPPED_IN_DIRECT_CALL,
-  LIVE_EDIT_FRAME_DROPPED_IN_RETURN_CALL,
-  LIVE_EDIT_CURRENTLY_SET_MODE
+  enum {
+    kNone = 0x00,
+    kSignedSmall = 0x01,
+    kNumber = 0x3,
+    kNumberOrOddball = 0x7,
+    kInternalizedString = 0x8,
+    kString = 0x18,
+    kReceiver = 0x20,
+    kAny = 0x7F
+  };
 };
 
 enum class UnicodeEncoding : uint8_t {
@@ -1294,6 +1318,29 @@
   return os;
 }
 
+// Flags for the runtime function kDefineDataPropertyInLiteral. A property can
+// be enumerable or not, and, in case of functions, the function name
+// can be set or not.
+enum class DataPropertyInLiteralFlag {
+  kNoFlags = 0,
+  kDontEnum = 1 << 0,
+  kSetFunctionName = 1 << 1
+};
+typedef base::Flags<DataPropertyInLiteralFlag> DataPropertyInLiteralFlags;
+DEFINE_OPERATORS_FOR_FLAGS(DataPropertyInLiteralFlags)
+
+enum ExternalArrayType {
+  kExternalInt8Array = 1,
+  kExternalUint8Array,
+  kExternalInt16Array,
+  kExternalUint16Array,
+  kExternalInt32Array,
+  kExternalUint32Array,
+  kExternalFloat32Array,
+  kExternalFloat64Array,
+  kExternalUint8ClampedArray,
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/handles-inl.h b/src/handles-inl.h
index cfaf4fb..aefdc0e 100644
--- a/src/handles-inl.h
+++ b/src/handles-inl.h
@@ -94,7 +94,6 @@
   return result;
 }
 
-
 Object** HandleScope::CreateHandle(Isolate* isolate, Object* value) {
   DCHECK(AllowHandleAllocation::IsAllowed());
   HandleScopeData* data = isolate->handle_scope_data();
diff --git a/src/handles.cc b/src/handles.cc
index 3b1902e..26e11b3 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -124,7 +124,8 @@
   prev_canonical_scope_ = handle_scope_data->canonical_scope;
   handle_scope_data->canonical_scope = this;
   root_index_map_ = new RootIndexMap(isolate);
-  identity_map_ = new IdentityMap<Object**>(isolate->heap(), &zone_);
+  identity_map_ = new IdentityMap<Object**, ZoneAllocationPolicy>(
+      isolate->heap(), ZoneAllocationPolicy(&zone_));
   canonical_level_ = handle_scope_data->level;
 }
 
diff --git a/src/handles.h b/src/handles.h
index 2c98209..416200b 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -5,6 +5,8 @@
 #ifndef V8_HANDLES_H_
 #define V8_HANDLES_H_
 
+#include <type_traits>
+
 #include "include/v8.h"
 #include "src/base/functional.h"
 #include "src/base/macros.h"
@@ -91,11 +93,10 @@
  public:
   V8_INLINE explicit Handle(T** location = nullptr)
       : HandleBase(reinterpret_cast<Object**>(location)) {
-    Object* a = nullptr;
-    T* b = nullptr;
-    a = b;  // Fake assignment to enforce type checks.
-    USE(a);
+    // Type check:
+    static_assert(std::is_base_of<Object, T>::value, "static type violation");
   }
+
   V8_INLINE explicit Handle(T* object) : Handle(object, object->GetIsolate()) {}
   V8_INLINE Handle(T* object, Isolate* isolate) : HandleBase(object, isolate) {}
 
@@ -330,7 +331,7 @@
 
 
 // Forward declarations for CanonicalHandleScope.
-template <typename V>
+template <typename V, class AllocationPolicy>
 class IdentityMap;
 class RootIndexMap;
 
@@ -351,7 +352,7 @@
   Isolate* isolate_;
   Zone zone_;
   RootIndexMap* root_index_map_;
-  IdentityMap<Object**>* identity_map_;
+  IdentityMap<Object**, ZoneAllocationPolicy>* identity_map_;
   // Ordinary nested handle scopes within the current one are not canonical.
   int canonical_level_;
   // We may have nested canonical scopes. Handles are canonical within each one.
@@ -360,8 +361,7 @@
   friend class HandleScope;
 };
 
-
-class DeferredHandleScope final {
+class V8_EXPORT_PRIVATE DeferredHandleScope final {
  public:
   explicit DeferredHandleScope(Isolate* isolate);
   // The DeferredHandles object returned stores the Handles created
diff --git a/src/heap-symbols.h b/src/heap-symbols.h
index cee9000..49285ee 100644
--- a/src/heap-symbols.h
+++ b/src/heap-symbols.h
@@ -6,6 +6,7 @@
 #define V8_HEAP_SYMBOLS_H_
 
 #define INTERNALIZED_STRING_LIST(V)                                \
+  V(anonymous_function_string, "(anonymous function)")             \
   V(anonymous_string, "anonymous")                                 \
   V(apply_string, "apply")                                         \
   V(arguments_string, "arguments")                                 \
@@ -14,6 +15,8 @@
   V(Array_string, "Array")                                         \
   V(ArrayIterator_string, "Array Iterator")                        \
   V(assign_string, "assign")                                       \
+  V(async_string, "async")                                         \
+  V(await_string, "await")                                         \
   V(array_to_string, "[object Array]")                             \
   V(boolean_to_string, "[object Boolean]")                         \
   V(date_to_string, "[object Date]")                               \
@@ -24,12 +27,6 @@
   V(regexp_to_string, "[object RegExp]")                           \
   V(string_to_string, "[object String]")                           \
   V(bind_string, "bind")                                           \
-  V(bool16x8_string, "bool16x8")                                   \
-  V(Bool16x8_string, "Bool16x8")                                   \
-  V(bool32x4_string, "bool32x4")                                   \
-  V(Bool32x4_string, "Bool32x4")                                   \
-  V(bool8x16_string, "bool8x16")                                   \
-  V(Bool8x16_string, "Bool8x16")                                   \
   V(boolean_string, "boolean")                                     \
   V(Boolean_string, "Boolean")                                     \
   V(bound__string, "bound ")                                       \
@@ -57,7 +54,12 @@
   V(did_handle_string, "didHandle")                                \
   V(display_name_string, "displayName")                            \
   V(done_string, "done")                                           \
+  V(dot_catch_string, ".catch")                                    \
+  V(dot_for_string, ".for")                                        \
+  V(dot_generator_object_string, ".generator_object")              \
+  V(dot_iterator_string, ".iterator")                              \
   V(dot_result_string, ".result")                                  \
+  V(dot_switch_tag_string, ".switch_tag")                          \
   V(dot_string, ".")                                               \
   V(exec_string, "exec")                                           \
   V(entries_string, "entries")                                     \
@@ -69,10 +71,6 @@
   V(EvalError_string, "EvalError")                                 \
   V(false_string, "false")                                         \
   V(flags_string, "flags")                                         \
-  V(float32x4_string, "float32x4")                                 \
-  V(Float32x4_string, "Float32x4")                                 \
-  V(for_api_string, "for_api")                                     \
-  V(for_string, "for")                                             \
   V(function_string, "function")                                   \
   V(Function_string, "Function")                                   \
   V(Generator_string, "Generator")                                 \
@@ -80,7 +78,9 @@
   V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
   V(getPrototypeOf_string, "getPrototypeOf")                       \
   V(get_string, "get")                                             \
+  V(get_space_string, "get ")                                      \
   V(global_string, "global")                                       \
+  V(group_string, "group")                                         \
   V(has_string, "has")                                             \
   V(hour_string, "hour")                                           \
   V(ignoreCase_string, "ignoreCase")                               \
@@ -89,12 +89,6 @@
   V(index_string, "index")                                         \
   V(infinity_string, "Infinity")                                   \
   V(input_string, "input")                                         \
-  V(int16x8_string, "int16x8")                                     \
-  V(Int16x8_string, "Int16x8")                                     \
-  V(int32x4_string, "int32x4")                                     \
-  V(Int32x4_string, "Int32x4")                                     \
-  V(int8x16_string, "int8x16")                                     \
-  V(Int8x16_string, "Int8x16")                                     \
   V(isExtensible_string, "isExtensible")                           \
   V(isView_string, "isView")                                       \
   V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic")           \
@@ -102,6 +96,7 @@
   V(keys_string, "keys")                                           \
   V(lastIndex_string, "lastIndex")                                 \
   V(length_string, "length")                                       \
+  V(let_string, "let")                                             \
   V(line_string, "line")                                           \
   V(literal_string, "literal")                                     \
   V(Map_string, "Map")                                             \
@@ -109,10 +104,13 @@
   V(minus_infinity_string, "-Infinity")                            \
   V(minus_zero_string, "-0")                                       \
   V(minute_string, "minute")                                       \
+  V(Module_string, "Module")                                       \
   V(month_string, "month")                                         \
   V(multiline_string, "multiline")                                 \
   V(name_string, "name")                                           \
+  V(native_string, "native")                                       \
   V(nan_string, "NaN")                                             \
+  V(new_target_string, ".new.target")                              \
   V(next_string, "next")                                           \
   V(not_equal, "not-equal")                                        \
   V(null_string, "null")                                           \
@@ -125,9 +123,9 @@
   V(ownKeys_string, "ownKeys")                                     \
   V(position_string, "position")                                   \
   V(preventExtensions_string, "preventExtensions")                 \
-  V(private_api_string, "private_api")                             \
   V(Promise_string, "Promise")                                     \
   V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
+  V(promise_string, "promise")                                     \
   V(proto_string, "__proto__")                                     \
   V(prototype_string, "prototype")                                 \
   V(Proxy_string, "Proxy")                                         \
@@ -135,9 +133,13 @@
   V(RangeError_string, "RangeError")                               \
   V(ReferenceError_string, "ReferenceError")                       \
   V(RegExp_string, "RegExp")                                       \
+  V(reject_string, "reject")                                       \
+  V(resolve_string, "resolve")                                     \
+  V(return_string, "return")                                       \
   V(script_string, "script")                                       \
   V(second_string, "second")                                       \
   V(setPrototypeOf_string, "setPrototypeOf")                       \
+  V(set_space_string, "set ")                                      \
   V(set_string, "set")                                             \
   V(Set_string, "Set")                                             \
   V(source_mapping_url_string, "source_mapping_url")               \
@@ -146,13 +148,17 @@
   V(source_url_string, "source_url")                               \
   V(stack_string, "stack")                                         \
   V(stackTraceLimit_string, "stackTraceLimit")                     \
+  V(star_default_star_string, "*default*")                         \
   V(sticky_string, "sticky")                                       \
   V(strict_compare_ic_string, "===")                               \
   V(string_string, "string")                                       \
   V(String_string, "String")                                       \
   V(symbol_string, "symbol")                                       \
   V(Symbol_string, "Symbol")                                       \
+  V(symbol_species_string, "[Symbol.species]")                     \
   V(SyntaxError_string, "SyntaxError")                             \
+  V(then_string, "then")                                           \
+  V(this_function_string, ".this_function")                        \
   V(this_string, "this")                                           \
   V(throw_string, "throw")                                         \
   V(timed_out, "timed-out")                                        \
@@ -163,16 +169,13 @@
   V(TypeError_string, "TypeError")                                 \
   V(type_string, "type")                                           \
   V(CompileError_string, "CompileError")                           \
+  V(LinkError_string, "LinkError")                                 \
   V(RuntimeError_string, "RuntimeError")                           \
-  V(uint16x8_string, "uint16x8")                                   \
-  V(Uint16x8_string, "Uint16x8")                                   \
-  V(uint32x4_string, "uint32x4")                                   \
-  V(Uint32x4_string, "Uint32x4")                                   \
-  V(uint8x16_string, "uint8x16")                                   \
-  V(Uint8x16_string, "Uint8x16")                                   \
   V(undefined_string, "undefined")                                 \
   V(undefined_to_string, "[object Undefined]")                     \
   V(unicode_string, "unicode")                                     \
+  V(use_asm_string, "use asm")                                     \
+  V(use_strict_string, "use strict")                               \
   V(URIError_string, "URIError")                                   \
   V(valueOf_string, "valueOf")                                     \
   V(values_string, "values")                                       \
@@ -184,58 +187,54 @@
   V(writable_string, "writable")                                   \
   V(year_string, "year")
 
-#define PRIVATE_SYMBOL_LIST(V)         \
-  V(array_iteration_kind_symbol)       \
-  V(array_iterator_next_symbol)        \
-  V(array_iterator_object_symbol)      \
-  V(call_site_frame_array_symbol)      \
-  V(call_site_frame_index_symbol)      \
-  V(class_end_position_symbol)         \
-  V(class_start_position_symbol)       \
-  V(detailed_stack_trace_symbol)       \
-  V(elements_transition_symbol)        \
-  V(error_end_pos_symbol)              \
-  V(error_script_symbol)               \
-  V(error_start_pos_symbol)            \
-  V(frozen_symbol)                     \
-  V(hash_code_symbol)                  \
-  V(home_object_symbol)                \
-  V(intl_impl_object_symbol)           \
-  V(intl_initialized_marker_symbol)    \
-  V(intl_pattern_symbol)               \
-  V(intl_resolved_symbol)              \
-  V(megamorphic_symbol)                \
-  V(native_context_index_symbol)       \
-  V(nonexistent_symbol)                \
-  V(nonextensible_symbol)              \
-  V(normal_ic_symbol)                  \
-  V(not_mapped_symbol)                 \
-  V(premonomorphic_symbol)             \
-  V(promise_async_stack_id_symbol)     \
-  V(promise_debug_marker_symbol)       \
-  V(promise_deferred_reaction_symbol)  \
-  V(promise_forwarding_handler_symbol) \
-  V(promise_fulfill_reactions_symbol)  \
-  V(promise_handled_by_symbol)         \
-  V(promise_handled_hint_symbol)       \
-  V(promise_has_handler_symbol)        \
-  V(promise_raw_symbol)                \
-  V(promise_reject_reactions_symbol)   \
-  V(promise_result_symbol)             \
-  V(promise_state_symbol)              \
-  V(sealed_symbol)                     \
-  V(stack_trace_symbol)                \
-  V(strict_function_transition_symbol) \
+#define PRIVATE_SYMBOL_LIST(V)              \
+  V(array_iteration_kind_symbol)            \
+  V(array_iterator_next_symbol)             \
+  V(array_iterator_object_symbol)           \
+  V(call_site_frame_array_symbol)           \
+  V(call_site_frame_index_symbol)           \
+  V(class_end_position_symbol)              \
+  V(class_start_position_symbol)            \
+  V(detailed_stack_trace_symbol)            \
+  V(elements_transition_symbol)             \
+  V(error_end_pos_symbol)                   \
+  V(error_script_symbol)                    \
+  V(error_start_pos_symbol)                 \
+  V(frozen_symbol)                          \
+  V(hash_code_symbol)                       \
+  V(home_object_symbol)                     \
+  V(intl_initialized_marker_symbol)         \
+  V(intl_pattern_symbol)                    \
+  V(intl_resolved_symbol)                   \
+  V(megamorphic_symbol)                     \
+  V(native_context_index_symbol)            \
+  V(nonexistent_symbol)                     \
+  V(nonextensible_symbol)                   \
+  V(normal_ic_symbol)                       \
+  V(not_mapped_symbol)                      \
+  V(premonomorphic_symbol)                  \
+  V(promise_async_stack_id_symbol)          \
+  V(promise_debug_marker_symbol)            \
+  V(promise_forwarding_handler_symbol)      \
+  V(promise_handled_by_symbol)              \
+  V(promise_async_id_symbol)                \
+  V(promise_default_resolve_handler_symbol) \
+  V(promise_default_reject_handler_symbol)  \
+  V(sealed_symbol)                          \
+  V(stack_trace_symbol)                     \
+  V(strict_function_transition_symbol)      \
   V(uninitialized_symbol)
 
-#define PUBLIC_SYMBOL_LIST(V)                \
-  V(iterator_symbol, Symbol.iterator)        \
-  V(match_symbol, Symbol.match)              \
-  V(replace_symbol, Symbol.replace)          \
-  V(search_symbol, Symbol.search)            \
-  V(species_symbol, Symbol.species)          \
-  V(split_symbol, Symbol.split)              \
-  V(to_primitive_symbol, Symbol.toPrimitive) \
+#define PUBLIC_SYMBOL_LIST(V)                    \
+  V(async_iterator_symbol, Symbol.asyncIterator) \
+  V(iterator_symbol, Symbol.iterator)            \
+  V(intl_fallback_symbol, IntlFallback)          \
+  V(match_symbol, Symbol.match)                  \
+  V(replace_symbol, Symbol.replace)              \
+  V(search_symbol, Symbol.search)                \
+  V(species_symbol, Symbol.species)              \
+  V(split_symbol, Symbol.split)                  \
+  V(to_primitive_symbol, Symbol.toPrimitive)     \
   V(unscopables_symbol, Symbol.unscopables)
 
 // Well-Known Symbols are "Public" symbols, which have a bit set which causes
diff --git a/src/heap/array-buffer-tracker.cc b/src/heap/array-buffer-tracker.cc
index 62b848e..d7bbb94 100644
--- a/src/heap/array-buffer-tracker.cc
+++ b/src/heap/array-buffer-tracker.cc
@@ -19,8 +19,7 @@
   for (TrackingData::iterator it = array_buffers_.begin();
        it != array_buffers_.end();) {
     JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
-    if ((free_mode == kFreeAll) ||
-        Marking::IsWhite(ObjectMarking::MarkBitFrom(buffer))) {
+    if ((free_mode == kFreeAll) || ObjectMarking::IsWhite(buffer)) {
       const size_t len = it->second;
       heap_->isolate()->array_buffer_allocator()->Free(buffer->backing_store(),
                                                        len);
@@ -78,8 +77,8 @@
 
 void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
   DCHECK_EQ(heap->gc_state(), Heap::HeapState::SCAVENGE);
-  for (Page* page : NewSpacePageRange(heap->new_space()->FromSpaceStart(),
-                                      heap->new_space()->FromSpaceEnd())) {
+  for (Page* page : PageRange(heap->new_space()->FromSpaceStart(),
+                              heap->new_space()->FromSpaceEnd())) {
     bool empty = ProcessBuffers(page, kUpdateForwardedRemoveOthers);
     CHECK(empty);
   }
diff --git a/src/heap/embedder-tracing.cc b/src/heap/embedder-tracing.cc
new file mode 100644
index 0000000..2d11724
--- /dev/null
+++ b/src/heap/embedder-tracing.cc
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/embedder-tracing.h"
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+void LocalEmbedderHeapTracer::TracePrologue() {
+  if (!InUse()) return;
+
+  CHECK(cached_wrappers_to_trace_.empty());
+  num_v8_marking_deque_was_empty_ = 0;
+  remote_tracer_->TracePrologue();
+}
+
+void LocalEmbedderHeapTracer::TraceEpilogue() {
+  if (!InUse()) return;
+
+  CHECK(cached_wrappers_to_trace_.empty());
+  remote_tracer_->TraceEpilogue();
+}
+
+void LocalEmbedderHeapTracer::AbortTracing() {
+  if (!InUse()) return;
+
+  cached_wrappers_to_trace_.clear();
+  remote_tracer_->AbortTracing();
+}
+
+void LocalEmbedderHeapTracer::EnterFinalPause() {
+  if (!InUse()) return;
+
+  remote_tracer_->EnterFinalPause();
+}
+
+bool LocalEmbedderHeapTracer::Trace(
+    double deadline, EmbedderHeapTracer::AdvanceTracingActions actions) {
+  if (!InUse()) return false;
+
+  DCHECK_EQ(0, NumberOfCachedWrappersToTrace());
+  return remote_tracer_->AdvanceTracing(deadline, actions);
+}
+
+size_t LocalEmbedderHeapTracer::NumberOfWrappersToTrace() {
+  return (InUse())
+             ? cached_wrappers_to_trace_.size() +
+                   remote_tracer_->NumberOfWrappersToTrace()
+             : 0;
+}
+
+void LocalEmbedderHeapTracer::RegisterWrappersWithRemoteTracer() {
+  if (!InUse()) return;
+
+  if (cached_wrappers_to_trace_.empty()) {
+    return;
+  }
+
+  remote_tracer_->RegisterV8References(cached_wrappers_to_trace_);
+  cached_wrappers_to_trace_.clear();
+}
+
+bool LocalEmbedderHeapTracer::RequiresImmediateWrapperProcessing() {
+  const size_t kTooManyWrappers = 16000;
+  return cached_wrappers_to_trace_.size() > kTooManyWrappers;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/embedder-tracing.h b/src/heap/embedder-tracing.h
new file mode 100644
index 0000000..5e10d6e
--- /dev/null
+++ b/src/heap/embedder-tracing.h
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_EMBEDDER_TRACING_H_
+#define V8_HEAP_EMBEDDER_TRACING_H_
+
+#include "include/v8.h"
+#include "src/flags.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
+ public:
+  typedef std::pair<void*, void*> WrapperInfo;
+
+  LocalEmbedderHeapTracer()
+      : remote_tracer_(nullptr), num_v8_marking_deque_was_empty_(0) {}
+
+  void SetRemoteTracer(EmbedderHeapTracer* tracer) { remote_tracer_ = tracer; }
+  bool InUse() { return remote_tracer_ != nullptr; }
+
+  void TracePrologue();
+  void TraceEpilogue();
+  void AbortTracing();
+  void EnterFinalPause();
+  bool Trace(double deadline,
+             EmbedderHeapTracer::AdvanceTracingActions actions);
+
+  size_t NumberOfWrappersToTrace();
+  size_t NumberOfCachedWrappersToTrace() {
+    return cached_wrappers_to_trace_.size();
+  }
+  void AddWrapperToTrace(WrapperInfo entry) {
+    cached_wrappers_to_trace_.push_back(entry);
+  }
+  void ClearCachedWrappersToTrace() { cached_wrappers_to_trace_.clear(); }
+  void RegisterWrappersWithRemoteTracer();
+
+  // In order to avoid running out of memory we force tracing wrappers if there
+  // are too many of them.
+  bool RequiresImmediateWrapperProcessing();
+
+  void NotifyV8MarkingDequeWasEmpty() { num_v8_marking_deque_was_empty_++; }
+  bool ShouldFinalizeIncrementalMarking() {
+    static const size_t kMaxIncrementalFixpointRounds = 3;
+    return !FLAG_incremental_marking_wrappers || !InUse() ||
+           NumberOfWrappersToTrace() == 0 ||
+           num_v8_marking_deque_was_empty_ > kMaxIncrementalFixpointRounds;
+  }
+
+ private:
+  typedef std::vector<WrapperInfo> WrapperCache;
+
+  EmbedderHeapTracer* remote_tracer_;
+  WrapperCache cached_wrappers_to_trace_;
+  size_t num_v8_marking_deque_was_empty_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_EMBEDDER_TRACING_H_
diff --git a/src/heap/gc-idle-time-handler.cc b/src/heap/gc-idle-time-handler.cc
index 0c411f7..905514c 100644
--- a/src/heap/gc-idle-time-handler.cc
+++ b/src/heap/gc-idle-time-handler.cc
@@ -146,6 +146,7 @@
   return GCIdleTimeAction::IncrementalStep();
 }
 
+bool GCIdleTimeHandler::Enabled() { return FLAG_incremental_marking; }
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h
index 7ce0c1a..b730a7b 100644
--- a/src/heap/gc-idle-time-handler.h
+++ b/src/heap/gc-idle-time-handler.h
@@ -125,6 +125,8 @@
   GCIdleTimeAction Compute(double idle_time_in_ms,
                            GCIdleTimeHeapState heap_state);
 
+  bool Enabled();
+
   void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
 
   static size_t EstimateMarkingStepSize(double idle_time_in_ms,
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index dcd319f..2c1024f 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -140,6 +140,13 @@
   start_counter_ = 0;
 }
 
+void GCTracer::NotifyYoungGenerationHandling(
+    YoungGenerationHandling young_generation_handling) {
+  DCHECK(current_.type == Event::SCAVENGER || start_counter_ > 1);
+  heap_->isolate()->counters()->young_generation_handling()->AddSample(
+      static_cast<int>(young_generation_handling));
+}
+
 void GCTracer::Start(GarbageCollector collector,
                      GarbageCollectionReason gc_reason,
                      const char* collector_reason) {
@@ -174,8 +181,7 @@
   current_.start_object_size = heap_->SizeOfObjects();
   current_.start_memory_size = heap_->memory_allocator()->Size();
   current_.start_holes_size = CountTotalHolesSize(heap_);
-  current_.new_space_object_size =
-      heap_->new_space()->top() - heap_->new_space()->bottom();
+  current_.new_space_object_size = heap_->new_space()->Size();
 
   current_.incremental_marking_bytes = 0;
   current_.incremental_marking_duration = 0;
@@ -446,6 +452,7 @@
           "gc=%s "
           "reduce_memory=%d "
           "scavenge=%.2f "
+          "evacuate=%.2f "
           "old_new=%.2f "
           "weak=%.2f "
           "roots=%.2f "
@@ -482,6 +489,7 @@
           "context_disposal_rate=%.1f\n",
           duration, spent_in_mutator, current_.TypeName(true),
           current_.reduce_memory, current_.scopes[Scope::SCAVENGER_SCAVENGE],
+          current_.scopes[Scope::SCAVENGER_EVACUATE],
           current_.scopes[Scope::SCAVENGER_OLD_TO_NEW_POINTERS],
           current_.scopes[Scope::SCAVENGER_WEAK],
           current_.scopes[Scope::SCAVENGER_ROOTS],
@@ -510,9 +518,14 @@
           "pause=%.1f "
           "mutator=%.1f "
           "gc=%s "
-          "reduce_memory=%d\n",
-          duration, spent_in_mutator, current_.TypeName(true),
-          current_.reduce_memory);
+          "reduce_memory=%d "
+          "mark=%.2f "
+          "mark.roots=%.2f "
+          "mark.old_to_new=%.2f\n",
+          duration, spent_in_mutator, "mmc", current_.reduce_memory,
+          current_.scopes[Scope::MINOR_MC_MARK],
+          current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
+          current_.scopes[Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS]);
       break;
     case Event::MARK_COMPACTOR:
     case Event::INCREMENTAL_MARK_COMPACTOR:
@@ -537,6 +550,9 @@
           "evacuate.candidates=%.1f "
           "evacuate.clean_up=%.1f "
           "evacuate.copy=%.1f "
+          "evacuate.prologue=%.1f "
+          "evacuate.epilogue=%.1f "
+          "evacuate.rebalance=%.1f "
           "evacuate.update_pointers=%.1f "
           "evacuate.update_pointers.to_evacuated=%.1f "
           "evacuate.update_pointers.to_new=%.1f "
@@ -620,6 +636,9 @@
           current_.scopes[Scope::MC_EVACUATE_CANDIDATES],
           current_.scopes[Scope::MC_EVACUATE_CLEAN_UP],
           current_.scopes[Scope::MC_EVACUATE_COPY],
+          current_.scopes[Scope::MC_EVACUATE_PROLOGUE],
+          current_.scopes[Scope::MC_EVACUATE_EPILOGUE],
+          current_.scopes[Scope::MC_EVACUATE_REBALANCE],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED],
           current_.scopes[Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index ed62dee..b206286 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -56,6 +56,9 @@
   F(MC_EVACUATE_CANDIDATES)                   \
   F(MC_EVACUATE_CLEAN_UP)                     \
   F(MC_EVACUATE_COPY)                         \
+  F(MC_EVACUATE_EPILOGUE)                     \
+  F(MC_EVACUATE_PROLOGUE)                     \
+  F(MC_EVACUATE_REBALANCE)                    \
   F(MC_EVACUATE_UPDATE_POINTERS)              \
   F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
   F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW)       \
@@ -79,7 +82,14 @@
   F(MC_SWEEP_CODE)                            \
   F(MC_SWEEP_MAP)                             \
   F(MC_SWEEP_OLD)                             \
+  F(MINOR_MC_MARK)                            \
+  F(MINOR_MC_MARK_CODE_FLUSH_CANDIDATES)      \
+  F(MINOR_MC_MARK_GLOBAL_HANDLES)             \
+  F(MINOR_MC_MARK_OLD_TO_NEW_POINTERS)        \
+  F(MINOR_MC_MARK_ROOTS)                      \
+  F(MINOR_MC_MARK_WEAK)                       \
   F(SCAVENGER_CODE_FLUSH_CANDIDATES)          \
+  F(SCAVENGER_EVACUATE)                       \
   F(SCAVENGER_OLD_TO_NEW_POINTERS)            \
   F(SCAVENGER_ROOTS)                          \
   F(SCAVENGER_SCAVENGE)                       \
@@ -228,6 +238,9 @@
   // Stop collecting data and print results.
   void Stop(GarbageCollector collector);
 
+  void NotifyYoungGenerationHandling(
+      YoungGenerationHandling young_generation_handling);
+
   // Sample and accumulate bytes allocated since the last GC.
   void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
                         size_t old_generation_counter_bytes);
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
index 7d0d241..9cf0475 100644
--- a/src/heap/heap-inl.h
+++ b/src/heap/heap-inl.h
@@ -9,6 +9,7 @@
 
 #include "src/base/platform/platform.h"
 #include "src/counters-inl.h"
+#include "src/feedback-vector-inl.h"
 #include "src/heap/heap.h"
 #include "src/heap/incremental-marking-inl.h"
 #include "src/heap/mark-compact.h"
@@ -21,7 +22,7 @@
 #include "src/log.h"
 #include "src/msan.h"
 #include "src/objects-inl.h"
-#include "src/type-feedback-vector-inl.h"
+#include "src/objects/scope-info.h"
 
 namespace v8 {
 namespace internal {
@@ -224,6 +225,8 @@
 AllocationResult Heap::AllocateOneByteInternalizedString(
     Vector<const uint8_t> str, uint32_t hash_field) {
   CHECK_GE(String::kMaxLength, str.length());
+  // The canonical empty_string is the only zero-length string we allow.
+  DCHECK_IMPLIES(str.length() == 0, roots_[kempty_stringRootIndex] == nullptr);
   // Compute map and object size.
   Map* map = one_byte_internalized_string_map();
   int size = SeqOneByteString::SizeFor(str.length());
@@ -255,6 +258,7 @@
 AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
                                                          uint32_t hash_field) {
   CHECK_GE(String::kMaxLength, str.length());
+  DCHECK_NE(0, str.length());  // Use Heap::empty_string() instead.
   // Compute map and object size.
   Map* map = internalized_string_map();
   int size = SeqTwoByteString::SizeFor(str.length());
@@ -688,6 +692,10 @@
       reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
 }
 
+void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
+  old_space_strings_.AddAll(new_space_strings_);
+  new_space_strings_.Clear();
+}
 
 void Heap::ExternalStringTable::AddString(String* string) {
   DCHECK(string->IsExternalString());
@@ -698,12 +706,15 @@
   }
 }
 
-
-void Heap::ExternalStringTable::Iterate(ObjectVisitor* v) {
+void Heap::ExternalStringTable::IterateNewSpaceStrings(ObjectVisitor* v) {
   if (!new_space_strings_.is_empty()) {
     Object** start = &new_space_strings_[0];
     v->VisitPointers(start, start + new_space_strings_.length());
   }
+}
+
+void Heap::ExternalStringTable::IterateAll(ObjectVisitor* v) {
+  IterateNewSpaceStrings(v);
   if (!old_space_strings_.is_empty()) {
     Object** start = &old_space_strings_[0];
     v->VisitPointers(start, start + old_space_strings_.length());
@@ -781,9 +792,14 @@
   set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
-void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
-  DCHECK(construct_stub_deopt_pc_offset() == Smi::kZero);
-  set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
+  DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
+  set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
+}
+
+void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
+  DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
+  set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
 }
 
 void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
@@ -809,9 +825,16 @@
 
 void Heap::SetSerializedTemplates(FixedArray* templates) {
   DCHECK_EQ(empty_fixed_array(), serialized_templates());
+  DCHECK(isolate()->serializer_enabled());
   set_serialized_templates(templates);
 }
 
+void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
+  DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
+  DCHECK(isolate()->serializer_enabled());
+  set_serialized_global_proxy_sizes(sizes);
+}
+
 void Heap::CreateObjectStats() {
   if (V8_LIKELY(FLAG_gc_stats == 0)) return;
   if (!live_object_stats_) {
@@ -839,6 +862,8 @@
       HeapObject* object = HeapObject::cast(*current);
       CHECK(object->GetIsolate()->heap()->Contains(object));
       CHECK(object->map()->IsMap());
+    } else {
+      CHECK((*current)->IsSmi());
     }
   }
 }
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 2059dae..1524a78 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -6,6 +6,7 @@
 
 #include "src/accessors.h"
 #include "src/api.h"
+#include "src/assembler-inl.h"
 #include "src/ast/context-slot-cache.h"
 #include "src/base/bits.h"
 #include "src/base/once.h"
@@ -17,9 +18,11 @@
 #include "src/conversions.h"
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
+#include "src/feedback-vector.h"
 #include "src/global-handles.h"
 #include "src/heap/array-buffer-tracker-inl.h"
 #include "src/heap/code-stats.h"
+#include "src/heap/embedder-tracing.h"
 #include "src/heap/gc-idle-time-handler.h"
 #include "src/heap/gc-tracer.h"
 #include "src/heap/incremental-marking.h"
@@ -40,7 +43,6 @@
 #include "src/snapshot/serializer-common.h"
 #include "src/snapshot/snapshot.h"
 #include "src/tracing/trace-event.h"
-#include "src/type-feedback-vector.h"
 #include "src/utils.h"
 #include "src/v8.h"
 #include "src/v8threads.h"
@@ -80,6 +82,7 @@
       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
       initial_semispace_size_(MB),
       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+      initial_max_old_generation_size_(max_old_generation_size_),
       initial_old_generation_size_(max_old_generation_size_ /
                                    kInitalOldGenerationLimitFactor),
       old_generation_size_configured_(false),
@@ -93,6 +96,8 @@
       survived_last_scavenge_(0),
       always_allocate_scope_count_(0),
       memory_pressure_level_(MemoryPressureLevel::kNone),
+      out_of_memory_callback_(nullptr),
+      out_of_memory_callback_data_(nullptr),
       contexts_disposed_(0),
       number_of_disposed_maps_(0),
       global_ic_age_(0),
@@ -113,7 +118,6 @@
 #endif  // DEBUG
       old_generation_allocation_limit_(initial_old_generation_size_),
       inline_allocation_disabled_(false),
-      total_regexp_code_generated_(0),
       tracer_(nullptr),
       promoted_objects_size_(0),
       promotion_ratio_(0),
@@ -137,8 +141,6 @@
       dead_object_stats_(nullptr),
       scavenge_job_(nullptr),
       idle_scavenge_observer_(nullptr),
-      full_codegen_bytes_generated_(0),
-      crankshaft_codegen_bytes_generated_(0),
       new_space_allocation_counter_(0),
       old_generation_allocation_counter_at_last_gc_(0),
       old_generation_size_at_last_gc_(0),
@@ -155,9 +157,11 @@
       deserialization_complete_(false),
       strong_roots_list_(NULL),
       heap_iterator_depth_(0),
-      embedder_heap_tracer_(nullptr),
+      local_embedder_heap_tracer_(nullptr),
+      fast_promotion_mode_(false),
       force_oom_(false),
-      delay_sweeper_tasks_for_testing_(false) {
+      delay_sweeper_tasks_for_testing_(false),
+      pending_layout_change_object_(nullptr) {
 // Allow build-time customization of the max semispace size. Building
 // V8 with snapshots and a non-default max semispace size is much
 // easier if you can define it as part of the build environment.
@@ -292,6 +296,9 @@
   return YoungGenerationCollector();
 }
 
+void Heap::SetGCState(HeapState state) {
+  gc_state_ = state;
+}
 
 // TODO(1238405): Combine the infrastructure for --heap-stats and
 // --log-gc to avoid the complicated preprocessor and flag testing.
@@ -442,7 +449,6 @@
   }
   CheckNewSpaceExpansionCriteria();
   UpdateNewSpaceAllocationCounter();
-  store_buffer()->MoveAllEntriesToRememberedSet();
 }
 
 size_t Heap::SizeOfObjects() {
@@ -510,6 +516,22 @@
   }
 }
 
+class Heap::SkipStoreBufferScope {
+ public:
+  explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer) {
+    store_buffer_->MoveAllEntriesToRememberedSet();
+    store_buffer_->SetMode(StoreBuffer::IN_GC);
+  }
+
+  ~SkipStoreBufferScope() {
+    DCHECK(store_buffer_->Empty());
+    store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
+  }
+
+ private:
+  StoreBuffer* store_buffer_;
+};
 
 class Heap::PretenuringScope {
  public:
@@ -861,6 +883,10 @@
   // Note: as weak callbacks can execute arbitrary code, we cannot
   // hope that eventually there will be no weak callbacks invocations.
   // Therefore stop recollecting after several attempts.
+  if (gc_reason == GarbageCollectionReason::kLastResort) {
+    InvokeOutOfMemoryCallback();
+  }
+  RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC_AllAvailableGarbage);
   if (isolate()->concurrent_recompilation_enabled()) {
     // The optimizing compiler may be unnecessarily holding on to memory.
     DisallowHeapAllocation no_recursive_gc;
@@ -943,6 +969,7 @@
                           const v8::GCCallbackFlags gc_callback_flags) {
   // The VM is in the GC state until exiting this function.
   VMState<GC> state(isolate_);
+  RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC);
 
 #ifdef DEBUG
   // Reset the allocation timeout to the GC interval, but make sure to
@@ -963,24 +990,6 @@
     }
   }
 
-  if (collector == MARK_COMPACTOR && FLAG_incremental_marking &&
-      !ShouldFinalizeIncrementalMarking() && !ShouldAbortIncrementalMarking() &&
-      !incremental_marking()->IsStopped() &&
-      !incremental_marking()->should_hurry() &&
-      !incremental_marking()->NeedsFinalization() &&
-      !IsCloseToOutOfMemory(new_space_->Capacity())) {
-    if (!incremental_marking()->IsComplete() &&
-        !mark_compact_collector()->marking_deque()->IsEmpty() &&
-        !FLAG_gc_global) {
-      if (FLAG_trace_incremental_marking) {
-        isolate()->PrintWithTimestamp(
-            "[IncrementalMarking] Delaying MarkSweep.\n");
-      }
-      collector = YoungGenerationCollector();
-      collector_reason = "incremental marking delaying mark-sweep";
-    }
-  }
-
   bool next_gc_likely_to_collect_more = false;
   size_t committed_memory_before = 0;
 
@@ -1022,6 +1031,7 @@
           (committed_memory_before > committed_memory_after + MB) ||
           HasHighFragmentation(used_memory_after, committed_memory_after) ||
           (detached_contexts()->length() > 0);
+      event.committed_memory = committed_memory_after;
       if (deserialization_complete_) {
         memory_reducer_->NotifyMarkCompact(event);
       }
@@ -1164,7 +1174,7 @@
             // deserializing.
             Address free_space_address = free_space->address();
             CreateFillerObjectAt(free_space_address, Map::kSize,
-                                 ClearRecordedSlots::kNo, ClearBlackArea::kNo);
+                                 ClearRecordedSlots::kNo);
             maps->Add(free_space_address);
           } else {
             perform_gc = true;
@@ -1195,7 +1205,7 @@
             // deserializing.
             Address free_space_address = free_space->address();
             CreateFillerObjectAt(free_space_address, size,
-                                 ClearRecordedSlots::kNo, ClearBlackArea::kNo);
+                                 ClearRecordedSlots::kNo);
             DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
             chunk.start = free_space_address;
             chunk.end = free_space_address + size;
@@ -1313,6 +1323,7 @@
 
   {
     Heap::PretenuringScope pretenuring_scope(this);
+    Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
 
     switch (collector) {
       case MARK_COMPACTOR:
@@ -1331,7 +1342,17 @@
         MinorMarkCompact();
         break;
       case SCAVENGER:
-        Scavenge();
+        if (fast_promotion_mode_ &&
+            CanExpandOldGeneration(new_space()->Size())) {
+          tracer()->NotifyYoungGenerationHandling(
+              YoungGenerationHandling::kFastPromotionDuringScavenge);
+          EvacuateYoungGeneration();
+        } else {
+          tracer()->NotifyYoungGenerationHandling(
+              YoungGenerationHandling::kRegularScavenge);
+
+          Scavenge();
+        }
         break;
     }
 
@@ -1341,6 +1362,10 @@
   UpdateSurvivalStatistics(start_new_space_size);
   ConfigureInitialOldGenerationSize();
 
+  if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
+    ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_);
+  }
+
   isolate_->counters()->objs_since_last_young()->Set(0);
 
   gc_post_processing_depth_++;
@@ -1394,6 +1419,7 @@
 
 
 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
+  RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCPrologueCallback);
   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
       if (!gc_prologue_callbacks_[i].pass_isolate) {
@@ -1415,6 +1441,7 @@
 
 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
                                    GCCallbackFlags gc_callback_flags) {
+  RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCEpilogueCallback);
   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
       if (!gc_epilogue_callbacks_[i].pass_isolate) {
@@ -1433,7 +1460,8 @@
 void Heap::MarkCompact() {
   PauseAllocationObserversScope pause_observers(this);
 
-  gc_state_ = MARK_COMPACT;
+  SetGCState(MARK_COMPACT);
+
   LOG(isolate_, ResourceEvent("markcompact", "begin"));
 
   uint64_t size_of_objects_before_gc = SizeOfObjects();
@@ -1459,7 +1487,7 @@
 
 void Heap::MarkCompactEpilogue() {
   TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
-  gc_state_ = NOT_IN_GC;
+  SetGCState(NOT_IN_GC);
 
   isolate_->counters()->objs_since_last_full()->Set(0);
 
@@ -1512,21 +1540,6 @@
          !HeapObject::cast(*p)->map_word().IsForwardingAddress();
 }
 
-
-static bool IsUnmodifiedHeapObject(Object** p) {
-  Object* object = *p;
-  if (object->IsSmi()) return false;
-  HeapObject* heap_object = HeapObject::cast(object);
-  if (!object->IsJSObject()) return false;
-  JSObject* js_object = JSObject::cast(object);
-  if (!js_object->WasConstructedFromApiFunction()) return false;
-  JSFunction* constructor =
-      JSFunction::cast(js_object->map()->GetConstructor());
-
-  return constructor->initial_map() == heap_object->map();
-}
-
-
 void PromotionQueue::Initialize() {
   // The last to-space page may be used for promotion queue. On promotion
   // conflict, we use the emergency stack.
@@ -1590,6 +1603,44 @@
   Heap* heap_;
 };
 
+void Heap::EvacuateYoungGeneration() {
+  TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
+  DCHECK(fast_promotion_mode_);
+  DCHECK(CanExpandOldGeneration(new_space()->Size()));
+
+  mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+
+  SetGCState(SCAVENGE);
+  LOG(isolate_, ResourceEvent("scavenge", "begin"));
+
+  // Move pages from new->old generation.
+  PageRange range(new_space()->bottom(), new_space()->top());
+  for (auto it = range.begin(); it != range.end();) {
+    Page* p = (*++it)->prev_page();
+    p->Unlink();
+    Page::ConvertNewToOld(p);
+    if (incremental_marking()->IsMarking())
+      mark_compact_collector()->RecordLiveSlotsOnPage(p);
+  }
+
+  // Reset new space.
+  if (!new_space()->Rebalance()) {
+    FatalProcessOutOfMemory("NewSpace::Rebalance");
+  }
+  new_space()->ResetAllocationInfo();
+  new_space()->set_age_mark(new_space()->top());
+
+  // Fix up special trackers.
+  external_string_table_.PromoteAllNewSpaceStrings();
+  // GlobalHandles are updated in PostGarbageCollectonProcessing
+
+  IncrementYoungSurvivorsCounter(new_space()->Size());
+  IncrementPromotedObjectsSize(new_space()->Size());
+  IncrementSemiSpaceCopiedObjectSize(0);
+
+  LOG(isolate_, ResourceEvent("scavenge", "end"));
+  SetGCState(NOT_IN_GC);
+}
 
 void Heap::Scavenge() {
   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
@@ -1605,7 +1656,7 @@
 
   mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
 
-  gc_state_ = SCAVENGE;
+  SetGCState(SCAVENGE);
 
   // Implements Cheney's copying algorithm
   LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -1615,13 +1666,6 @@
 
   scavenge_collector_->SelectScavengingVisitorsTable();
 
-  if (UsingEmbedderHeapTracer()) {
-    // Register found wrappers with embedder so it can add them to its marking
-    // deque and correctly manage the case when v8 scavenger collects the
-    // wrappers by either keeping wrappables alive, or cleaning marking deque.
-    RegisterWrappersWithEmbedderHeapTracer();
-  }
-
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
   new_space_->Flip();
@@ -1701,8 +1745,10 @@
   isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
       &IsUnscavengedHeapObject);
 
-  isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
-      &scavenge_visitor);
+  isolate()
+      ->global_handles()
+      ->IterateNewSpaceWeakUnmodifiedRoots<
+          GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&scavenge_visitor);
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
   UpdateNewSpaceReferencesInExternalStringTable(
@@ -1727,11 +1773,28 @@
   IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
                                  new_space_->Size() - survived_watermark);
 
+  // Scavenger may find new wrappers by iterating objects promoted onto a black
+  // page.
+  local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+
   LOG(isolate_, ResourceEvent("scavenge", "end"));
 
-  gc_state_ = NOT_IN_GC;
+  SetGCState(NOT_IN_GC);
 }
 
+void Heap::ComputeFastPromotionMode(double survival_rate) {
+  const size_t survived_in_new_space =
+      survived_last_scavenge_ * 100 / new_space_->Capacity();
+  fast_promotion_mode_ =
+      !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
+      !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
+      survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
+  if (FLAG_trace_gc_verbose) {
+    PrintIsolate(
+        isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
+        fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
+  }
+}
 
 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
                                                                 Object** p) {
@@ -1739,12 +1802,21 @@
 
   if (!first_word.IsForwardingAddress()) {
     // Unreachable external string can be finalized.
-    heap->FinalizeExternalString(String::cast(*p));
+    String* string = String::cast(*p);
+    if (!string->IsExternalString()) {
+      // Original external string has been internalized.
+      DCHECK(string->IsThinString());
+      return NULL;
+    }
+    heap->FinalizeExternalString(string);
     return NULL;
   }
 
   // String is still reachable.
-  return String::cast(first_word.ToForwardingAddress());
+  String* string = String::cast(first_word.ToForwardingAddress());
+  if (string->IsThinString()) string = ThinString::cast(string)->actual();
+  // Internalization can replace external strings with non-external strings.
+  return string->IsExternalString() ? string : nullptr;
 }
 
 
@@ -1882,7 +1954,7 @@
     v8::ExternalResourceVisitor* visitor_;
   } external_string_table_visitor(visitor);
 
-  external_string_table_.Iterate(&external_string_table_visitor);
+  external_string_table_.IterateAll(&external_string_table_visitor);
 }
 
 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
@@ -1948,8 +2020,6 @@
     case kDoubleAligned:
     case kDoubleUnaligned:
       return kDoubleSize - kPointerSize;
-    case kSimd128Unaligned:
-      return kSimd128Size - kPointerSize;
     default:
       UNREACHABLE();
   }
@@ -1963,10 +2033,6 @@
     return kPointerSize;
   if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
     return kDoubleSize - kPointerSize;  // No fill if double is always aligned.
-  if (alignment == kSimd128Unaligned) {
-    return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
-           kSimd128AlignmentMask;
-  }
   return 0;
 }
 
@@ -2008,7 +2074,6 @@
   ArrayBufferTracker::Unregister(this, buffer);
 }
 
-
 void Heap::ConfigureInitialOldGenerationSize() {
   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
     old_generation_allocation_limit_ =
@@ -2019,7 +2084,6 @@
   }
 }
 
-
 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
                                           int instance_size) {
   Object* result = nullptr;
@@ -2107,8 +2171,7 @@
   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
   DCHECK(chunk->owner()->identity() == space);
 #endif
-  CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo,
-                       ClearBlackArea::kNo);
+  CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
   return obj;
 }
 
@@ -2256,17 +2319,13 @@
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, feedback_vector)
     ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
                            Context::NUMBER_FUNCTION_INDEX)
     ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
                  mutable_heap_number)
     ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
                            Context::SYMBOL_FUNCTION_INDEX)
-#define ALLOCATE_SIMD128_MAP(TYPE, Type, type, lane_count, lane_type) \
-  ALLOCATE_PRIMITIVE_MAP(SIMD128_VALUE_TYPE, Type::kSize, type,       \
-                         Context::TYPE##_FUNCTION_INDEX)
-    SIMD128_TYPES(ALLOCATE_SIMD128_MAP)
-#undef ALLOCATE_SIMD128_MAP
     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
 
     ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
@@ -2279,6 +2338,9 @@
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
 
+    ALLOCATE_MAP(JS_PROMISE_CAPABILITY_TYPE, JSPromiseCapability::kSize,
+                 js_promise_capability);
+
     for (unsigned i = 0; i < arraysize(string_type_table); i++) {
       const StringTypeTable& entry = string_type_table[i];
       {
@@ -2322,6 +2384,9 @@
     ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
     ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
     ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
+    ALLOCATE_MAP(CELL_TYPE, Cell::kSize, no_closures_cell)
+    ALLOCATE_MAP(CELL_TYPE, Cell::kSize, one_closure_cell)
+    ALLOCATE_MAP(CELL_TYPE, Cell::kSize, many_closures_cell)
     ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
     ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
 
@@ -2344,6 +2409,7 @@
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
 
@@ -2405,8 +2471,7 @@
   return true;
 }
 
-
-AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
+AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
                                           PretenureFlag pretenure) {
   // Statically ensure that it is safe to allocate heap numbers in paged
   // spaces.
@@ -2423,36 +2488,9 @@
 
   Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
   HeapObject::cast(result)->set_map_no_write_barrier(map);
-  HeapNumber::cast(result)->set_value(value);
   return result;
 }
 
-#define SIMD_ALLOCATE_DEFINITION(TYPE, Type, type, lane_count, lane_type) \
-  AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count],      \
-                                        PretenureFlag pretenure) {        \
-    int size = Type::kSize;                                               \
-    STATIC_ASSERT(Type::kSize <= kMaxRegularHeapObjectSize);              \
-                                                                          \
-    AllocationSpace space = SelectSpace(pretenure);                       \
-                                                                          \
-    HeapObject* result = nullptr;                                         \
-    {                                                                     \
-      AllocationResult allocation =                                       \
-          AllocateRaw(size, space, kSimd128Unaligned);                    \
-      if (!allocation.To(&result)) return allocation;                     \
-    }                                                                     \
-                                                                          \
-    result->set_map_no_write_barrier(type##_map());                       \
-    Type* instance = Type::cast(result);                                  \
-    for (int i = 0; i < lane_count; i++) {                                \
-      instance->set_lane(i, lanes[i]);                                    \
-    }                                                                     \
-    return result;                                                        \
-  }
-SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
-#undef SIMD_ALLOCATE_DEFINITION
-
-
 AllocationResult Heap::AllocateCell(Object* value) {
   int size = Cell::kSize;
   STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
@@ -2523,10 +2561,18 @@
   return array;
 }
 
-
-void Heap::CreateApiObjects() {
+bool Heap::CreateApiObjects() {
   HandleScope scope(isolate());
   set_message_listeners(*TemplateList::New(isolate(), 2));
+  HeapObject* obj = nullptr;
+  {
+    AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
+    if (!allocation.To(&obj)) return false;
+  }
+  InterceptorInfo* info = InterceptorInfo::cast(obj);
+  info->set_flags(0);
+  set_noop_interceptor_info(info);
+  return true;
 }
 
 
@@ -2582,8 +2628,8 @@
 
   set_nan_value(*factory->NewHeapNumber(
       std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
-  set_hole_nan_value(*factory->NewHeapNumber(bit_cast<double>(kHoleNanInt64),
-                                             IMMUTABLE, TENURED));
+  set_hole_nan_value(
+      *factory->NewHeapNumberFromBits(kHoleNanInt64, IMMUTABLE, TENURED));
   set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
   set_minus_infinity_value(
       *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
@@ -2697,10 +2743,14 @@
   }
 
   Handle<NameDictionary> empty_properties_dictionary =
-      NameDictionary::New(isolate(), 0, TENURED);
+      NameDictionary::NewEmpty(isolate(), TENURED);
   empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
   set_empty_properties_dictionary(*empty_properties_dictionary);
 
+  set_public_symbol_table(*empty_properties_dictionary);
+  set_api_symbol_table(*empty_properties_dictionary);
+  set_api_private_symbol_table(*empty_properties_dictionary);
+
   set_number_string_cache(
       *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
 
@@ -2729,68 +2779,11 @@
 
   set_undefined_cell(*factory->NewCell(factory->undefined_value()));
 
-  // The symbol registry is initialized lazily.
-  set_symbol_registry(Smi::kZero);
-
   // Microtask queue uses the empty fixed array as a sentinel for "empty".
   // Number of queued microtasks stored in Isolate::pending_microtask_count().
   set_microtask_queue(empty_fixed_array());
 
   {
-    StaticFeedbackVectorSpec spec;
-    FeedbackVectorSlot slot = spec.AddLoadICSlot();
-    DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
-
-    slot = spec.AddKeyedLoadICSlot();
-    DCHECK_EQ(slot,
-              FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
-
-    slot = spec.AddStoreICSlot();
-    DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
-
-    slot = spec.AddKeyedStoreICSlot();
-    DCHECK_EQ(slot,
-              FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-
-    Handle<TypeFeedbackMetadata> dummy_metadata =
-        TypeFeedbackMetadata::New(isolate(), &spec);
-    Handle<TypeFeedbackVector> dummy_vector =
-        TypeFeedbackVector::New(isolate(), dummy_metadata);
-
-    set_dummy_vector(*dummy_vector);
-
-    // Now initialize dummy vector's entries.
-    LoadICNexus(isolate()).ConfigureMegamorphic();
-    StoreICNexus(isolate()).ConfigureMegamorphic();
-    KeyedLoadICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
-    KeyedStoreICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
-  }
-
-  {
-    // Create a canonical empty TypeFeedbackVector, which is shared by all
-    // functions that don't need actual type feedback slots. Note however
-    // that all these functions will share the same invocation count, but
-    // that shouldn't matter since we only use the invocation count to
-    // relativize the absolute call counts, but we can only have call counts
-    // if we have actual feedback slots.
-    Handle<FixedArray> empty_type_feedback_vector = factory->NewFixedArray(
-        TypeFeedbackVector::kReservedIndexCount, TENURED);
-    empty_type_feedback_vector->set(TypeFeedbackVector::kMetadataIndex,
-                                    empty_fixed_array());
-    empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex,
-                                    Smi::kZero);
-    set_empty_type_feedback_vector(*empty_type_feedback_vector);
-
-    // We use a canonical empty LiteralsArray for all functions that neither
-    // have literals nor need a TypeFeedbackVector (besides the invocation
-    // count special slot).
-    Handle<FixedArray> empty_literals_array =
-        factory->NewFixedArray(1, TENURED);
-    empty_literals_array->set(0, *empty_type_feedback_vector);
-    set_empty_literals_array(*empty_literals_array);
-  }
-
-  {
     Handle<FixedArray> empty_sloppy_arguments_elements =
         factory->NewFixedArray(2, TENURED);
     empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
@@ -2814,10 +2807,12 @@
       ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
   weak_new_space_object_to_code_list()->SetLength(0);
 
+  set_code_coverage_list(undefined_value());
+
   set_script_list(Smi::kZero);
 
   Handle<SeededNumberDictionary> slow_element_dictionary =
-      SeededNumberDictionary::New(isolate(), 0, TENURED);
+      SeededNumberDictionary::NewEmpty(isolate(), TENURED);
   slow_element_dictionary->set_requires_slow_elements();
   set_empty_slow_element_dictionary(*slow_element_dictionary);
 
@@ -2842,7 +2837,7 @@
 
   cell = factory->NewPropertyCell();
   cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
-  set_has_instance_protector(*cell);
+  set_array_iterator_protector(*cell);
 
   Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
       handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
@@ -2860,11 +2855,12 @@
       handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
   set_fast_array_iteration_protector(*fast_array_iteration_cell);
 
-  Handle<Cell> array_iterator_cell = factory->NewCell(
-      handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
-  set_array_iterator_protector(*array_iterator_cell);
+  cell = factory->NewPropertyCell();
+  cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+  set_array_buffer_neutering_protector(*cell);
 
   set_serialized_templates(empty_fixed_array());
+  set_serialized_global_proxy_sizes(empty_fixed_array());
 
   set_weak_stack_trace_list(Smi::kZero);
 
@@ -2878,6 +2874,42 @@
 
   // Initialize compilation cache.
   isolate_->compilation_cache()->Clear();
+
+  // Finish creating JSPromiseCapabilityMap
+  {
+    // TODO(caitp): This initialization can be removed once PromiseCapability
+    // object is no longer used by builtins implemented in javascript.
+    Handle<Map> map = factory->js_promise_capability_map();
+    map->set_inobject_properties_or_constructor_function_index(3);
+
+    Map::EnsureDescriptorSlack(map, 3);
+
+    PropertyAttributes attrs =
+        static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+    {  // promise
+      Descriptor d = Descriptor::DataField(factory->promise_string(),
+                                           JSPromiseCapability::kPromiseIndex,
+                                           attrs, Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+
+    {  // resolve
+      Descriptor d = Descriptor::DataField(factory->resolve_string(),
+                                           JSPromiseCapability::kResolveIndex,
+                                           attrs, Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+
+    {  // reject
+      Descriptor d = Descriptor::DataField(factory->reject_string(),
+                                           JSPromiseCapability::kRejectIndex,
+                                           attrs, Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+
+    map->set_is_extensible(false);
+    set_js_promise_capability_map(*map);
+  }
 }
 
 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
@@ -2888,7 +2920,6 @@
     case kInstanceofCacheAnswerRootIndex:
     case kCodeStubsRootIndex:
     case kEmptyScriptRootIndex:
-    case kSymbolRegistryRootIndex:
     case kScriptListRootIndex:
     case kMaterializedObjectsRootIndex:
     case kMicrotaskQueueRootIndex:
@@ -2896,9 +2927,14 @@
     case kWeakObjectToCodeTableRootIndex:
     case kWeakNewSpaceObjectToCodeListRootIndex:
     case kRetainedMapsRootIndex:
+    case kCodeCoverageListRootIndex:
     case kNoScriptSharedFunctionInfosRootIndex:
     case kWeakStackTraceListRootIndex:
     case kSerializedTemplatesRootIndex:
+    case kSerializedGlobalProxySizesRootIndex:
+    case kPublicSymbolTableRootIndex:
+    case kApiSymbolTableRootIndex:
+    case kApiPrivateSymbolTableRootIndex:
 // Smi values
 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
       SMI_ROOT_LIST(SMI_ENTRY)
@@ -2912,12 +2948,23 @@
   }
 }
 
-
 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
   return !RootCanBeWrittenAfterInitialization(root_index) &&
          !InNewSpace(root(root_index));
 }
 
+bool Heap::IsUnmodifiedHeapObject(Object** p) {
+  Object* object = *p;
+  if (object->IsSmi()) return false;
+  HeapObject* heap_object = HeapObject::cast(object);
+  if (!object->IsJSObject()) return false;
+  JSObject* js_object = JSObject::cast(object);
+  if (!js_object->WasConstructedFromApiFunction()) return false;
+  JSFunction* constructor =
+      JSFunction::cast(js_object->map()->GetConstructor());
+
+  return constructor->initial_map() == heap_object->map();
+}
 
 int Heap::FullSizeNumberStringCacheLength() {
   // Compute the size of the number string cache based on the max newspace size.
@@ -3042,6 +3089,7 @@
   instance->set_parameter_count(parameter_count);
   instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
   instance->set_osr_loop_nesting_level(0);
+  instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
   instance->set_constant_pool(constant_pool);
   instance->set_handler_table(empty_fixed_array());
   instance->set_source_position_table(empty_byte_array());
@@ -3050,9 +3098,9 @@
   return result;
 }
 
-void Heap::CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode,
-                                ClearBlackArea black_area_mode) {
-  if (size == 0) return;
+HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
+                                       ClearRecordedSlots mode) {
+  if (size == 0) return nullptr;
   HeapObject* filler = HeapObject::FromAddress(addr);
   if (size == kPointerSize) {
     filler->set_map_no_write_barrier(
@@ -3070,20 +3118,11 @@
     ClearRecordedSlotRange(addr, addr + size);
   }
 
-  // If the location where the filler is created is within a black area we have
-  // to clear the mark bits of the filler space.
-  if (black_area_mode == ClearBlackArea::kYes &&
-      incremental_marking()->black_allocation() &&
-      Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(addr))) {
-    Page* page = Page::FromAddress(addr);
-    page->markbits()->ClearRange(page->AddressToMarkbitIndex(addr),
-                                 page->AddressToMarkbitIndex(addr + size));
-  }
-
   // At this point, we may be deserializing the heap from a snapshot, and
   // none of the maps have been created yet and are NULL.
   DCHECK((filler->map() == NULL && !deserialization_complete_) ||
          filler->map()->IsMap());
+  return filler;
 }
 
 
@@ -3101,8 +3140,12 @@
   return Page::FromAddress(address)->SweepingDone();
 }
 
+bool Heap::IsImmovable(HeapObject* object) {
+  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+  return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
+}
 
-void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
+void Heap::AdjustLiveBytes(HeapObject* object, int by) {
   // As long as the inspected object is black and we are currently not iterating
   // the heap using HeapIterator, we can update the live byte count. We cannot
   // update while using HeapIterator because the iterator is temporarily
@@ -3111,12 +3154,9 @@
     lo_space()->AdjustLiveBytes(by);
   } else if (!in_heap_iterator() &&
              !mark_compact_collector()->sweeping_in_progress() &&
-             Marking::IsBlack(ObjectMarking::MarkBitFrom(object->address()))) {
-    if (mode == SEQUENTIAL_TO_SWEEPER) {
-      MemoryChunk::IncrementLiveBytesFromGC(object, by);
-    } else {
-      MemoryChunk::IncrementLiveBytesFromMutator(object, by);
-    }
+             ObjectMarking::IsBlack(object)) {
+    DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
+    MemoryChunk::IncrementLiveBytes(object, by);
   }
 }
 
@@ -3124,6 +3164,7 @@
 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
                                          int elements_to_trim) {
   CHECK_NOT_NULL(object);
+  DCHECK(CanMoveObjectStart(object));
   DCHECK(!object->IsFixedTypedArrayBase());
   DCHECK(!object->IsByteArray());
   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
@@ -3150,18 +3191,30 @@
   // Transfer the mark bits to their new location if the object is not within
   // a black area.
   if (!incremental_marking()->black_allocation() ||
-      !Marking::IsBlack(ObjectMarking::MarkBitFrom(new_start))) {
-    IncrementalMarking::TransferMark(this, old_start, new_start);
+      !Marking::IsBlack(
+          ObjectMarking::MarkBitFrom(HeapObject::FromAddress(new_start)))) {
+    IncrementalMarking::TransferMark(this, object,
+                                     HeapObject::FromAddress(new_start));
   }
 
   // Technically in new space this write might be omitted (except for
   // debug mode which iterates through the heap), but to play safer
   // we still do it.
   CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
+
+  // Clear the mark bits of the black area that belongs now to the filler.
+  // This is an optimization. The sweeper will release black fillers anyway.
+  if (incremental_marking()->black_allocation() &&
+      Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object))) {
+    Page* page = Page::FromAddress(old_start);
+    page->markbits()->ClearRange(
+        page->AddressToMarkbitIndex(old_start),
+        page->AddressToMarkbitIndex(old_start + bytes_to_trim));
+  }
+
   // Initialize header of the trimmed array. Since left trimming is only
   // performed on pages which are not concurrently swept creating a filler
   // object does not require synchronization.
-  DCHECK(CanMoveObjectStart(object));
   Object** former_start = HeapObject::RawField(object, 0);
   int new_start_index = elements_to_trim * (element_size / kPointerSize);
   former_start[new_start_index] = map;
@@ -3171,7 +3224,7 @@
       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
 
   // Maintain consistency of live bytes during incremental marking
-  AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
+  AdjustLiveBytes(new_object, -bytes_to_trim);
 
   // Remove recorded slots for the new map and length offset.
   ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
@@ -3183,15 +3236,6 @@
   return new_object;
 }
 
-
-// Force instantiation of templatized method.
-template void Heap::RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
-    FixedArrayBase*, int);
-template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
-    FixedArrayBase*, int);
-
-
-template<Heap::InvocationMode mode>
 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
   const int len = object->length();
   DCHECK_LE(elements_to_trim, len);
@@ -3235,7 +3279,18 @@
   // TODO(hpayer): We should shrink the large object page if the size
   // of the object changed significantly.
   if (!lo_space()->Contains(object)) {
-    CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
+    HeapObject* filler =
+        CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
+    DCHECK_NOT_NULL(filler);
+    // Clear the mark bits of the black area that belongs now to the filler.
+    // This is an optimization. The sweeper will release black fillers anyway.
+    if (incremental_marking()->black_allocation() &&
+        ObjectMarking::IsBlackOrGrey(filler)) {
+      Page* page = Page::FromAddress(new_end);
+      page->markbits()->ClearRange(
+          page->AddressToMarkbitIndex(new_end),
+          page->AddressToMarkbitIndex(new_end + bytes_to_trim));
+    }
   }
 
   // Initialize header of the trimmed array. We are storing the new length
@@ -3244,7 +3299,7 @@
   object->synchronized_set_length(len - elements_to_trim);
 
   // Maintain consistency of live bytes during incremental marking
-  AdjustLiveBytes(object, -bytes_to_trim, mode);
+  AdjustLiveBytes(object, -bytes_to_trim);
 
   // Notify the heap profiler of change in object layout. The array may not be
   // moved during GC, and size has to be adjusted nevertheless.
@@ -3331,18 +3386,24 @@
   if (!allocation.To(&result)) return allocation;
   if (immovable) {
     Address address = result->address();
+    MemoryChunk* chunk = MemoryChunk::FromAddress(address);
     // Code objects which should stay at a fixed address are allocated either
     // in the first page of code space (objects on the first page of each space
-    // are never moved) or in large object space.
-    if (!code_space_->FirstPage()->Contains(address) &&
-        MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
-      // Discard the first code allocation, which was on a page where it could
-      // be moved.
-      CreateFillerObjectAt(result->address(), object_size,
-                           ClearRecordedSlots::kNo);
-      allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
-      if (!allocation.To(&result)) return allocation;
-      OnAllocationEvent(result, object_size);
+    // are never moved), in large object space, or (during snapshot creation)
+    // the containing page is marked as immovable.
+    if (!Heap::IsImmovable(result) &&
+        !code_space_->FirstPage()->Contains(address)) {
+      if (isolate()->serializer_enabled()) {
+        chunk->MarkNeverEvacuate();
+      } else {
+        // Discard the first code allocation, which was on a page where it could
+        // be moved.
+        CreateFillerObjectAt(result->address(), object_size,
+                             ClearRecordedSlots::kNo);
+        allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
+        if (!allocation.To(&result)) return allocation;
+        OnAllocationEvent(result, object_size);
+      }
     }
   }
 
@@ -3405,6 +3466,7 @@
   copy->set_source_position_table(bytecode_array->source_position_table());
   copy->set_interrupt_budget(bytecode_array->interrupt_budget());
   copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
+  copy->set_bytecode_age(bytecode_array->bytecode_age());
   bytecode_array->CopyBytecodesTo(copy);
   return copy;
 }
@@ -4032,23 +4094,8 @@
 }
 
 
-bool Heap::IsHeapIterable() {
-  // TODO(hpayer): This function is not correct. Allocation folding in old
-  // space breaks the iterability.
-  return new_space_top_after_last_gc_ == new_space()->top();
-}
-
-
 void Heap::MakeHeapIterable() {
-  DCHECK(AllowHeapAllocation::IsAllowed());
-  if (!IsHeapIterable()) {
-    CollectAllGarbage(kMakeHeapIterableMask,
-                      GarbageCollectionReason::kMakeHeapIterable);
-  }
-  if (mark_compact_collector()->sweeping_in_progress()) {
-    mark_compact_collector()->EnsureSweepingCompleted();
-  }
-  DCHECK(IsHeapIterable());
+  mark_compact_collector()->EnsureSweepingCompleted();
 }
 
 
@@ -4169,21 +4216,18 @@
   }
 }
 
-bool Heap::MarkingDequesAreEmpty() {
-  return mark_compact_collector()->marking_deque()->IsEmpty() &&
-         (!UsingEmbedderHeapTracer() ||
-          (wrappers_to_trace() == 0 &&
-           embedder_heap_tracer()->NumberOfWrappersToTrace() == 0));
-}
-
 void Heap::FinalizeIncrementalMarkingIfComplete(
     GarbageCollectionReason gc_reason) {
   if (incremental_marking()->IsMarking() &&
       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
        (!incremental_marking()->finalize_marking_completed() &&
-        MarkingDequesAreEmpty()))) {
+        mark_compact_collector()->marking_deque()->IsEmpty() &&
+        local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
     FinalizeIncrementalMarking(gc_reason);
-  } else if (incremental_marking()->IsComplete() || MarkingDequesAreEmpty()) {
+  } else if (incremental_marking()->IsComplete() ||
+             (mark_compact_collector()->marking_deque()->IsEmpty() &&
+              local_embedder_heap_tracer()
+                  ->ShouldFinalizeIncrementalMarking())) {
     CollectAllGarbage(current_gc_flags_, gc_reason);
   }
 }
@@ -4195,13 +4239,16 @@
       tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
   if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
       (!incremental_marking()->finalize_marking_completed() &&
-       MarkingDequesAreEmpty() &&
+       mark_compact_collector()->marking_deque()->IsEmpty() &&
+       local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking() &&
        gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
            idle_time_in_ms))) {
     FinalizeIncrementalMarking(gc_reason);
     return true;
   } else if (incremental_marking()->IsComplete() ||
-             (MarkingDequesAreEmpty() &&
+             (mark_compact_collector()->marking_deque()->IsEmpty() &&
+              local_embedder_heap_tracer()
+                  ->ShouldFinalizeIncrementalMarking() &&
               gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
                   idle_time_in_ms, size_of_objects,
                   final_incremental_mark_compact_speed_in_bytes_per_ms))) {
@@ -4216,28 +4263,21 @@
   // for marking. We just have to execute the special visiting side effect
   // code that adds objects to global data structures, e.g. for array buffers.
 
-  // Code space, map space, and large object space do not use black pages.
-  // Hence we have to color all objects of the reservation first black to avoid
-  // unnecessary marking deque load.
   if (incremental_marking()->black_allocation()) {
+    // Iterate black objects in old space, code space, map space, and large
+    // object space for side effects.
     for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
       const Heap::Reservation& res = reservations[i];
       for (auto& chunk : res) {
         Address addr = chunk.start;
         while (addr < chunk.end) {
           HeapObject* obj = HeapObject::FromAddress(addr);
-          Marking::MarkBlack(ObjectMarking::MarkBitFrom(obj));
-          addr += obj->Size();
-        }
-      }
-    }
-    for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
-      const Heap::Reservation& res = reservations[i];
-      for (auto& chunk : res) {
-        Address addr = chunk.start;
-        while (addr < chunk.end) {
-          HeapObject* obj = HeapObject::FromAddress(addr);
-          incremental_marking()->IterateBlackObject(obj);
+          // There might be grey objects due to black to grey transitions in
+          // incremental marking. E.g. see VisitNativeContextIncremental.
+          DCHECK(ObjectMarking::IsBlackOrGrey(obj));
+          if (ObjectMarking::IsBlack(obj)) {
+            incremental_marking()->IterateBlackObject(obj);
+          }
           addr += obj->Size();
         }
       }
@@ -4245,6 +4285,29 @@
   }
 }
 
+void Heap::NotifyObjectLayoutChange(HeapObject* object,
+                                    const DisallowHeapAllocation&) {
+  if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
+    incremental_marking()->MarkGrey(this, object);
+  }
+#ifdef VERIFY_HEAP
+  DCHECK(pending_layout_change_object_ == nullptr);
+  pending_layout_change_object_ = object;
+#endif
+}
+
+#ifdef VERIFY_HEAP
+void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
+  if (pending_layout_change_object_ == nullptr) {
+    DCHECK(!object->IsJSObject() ||
+           !object->map()->TransitionRequiresSynchronizationWithGC(new_map));
+  } else {
+    DCHECK_EQ(pending_layout_change_object_, object);
+    pending_layout_change_object_ = nullptr;
+  }
+}
+#endif
+
 GCIdleTimeHeapState Heap::ComputeHeapState() {
   GCIdleTimeHeapState heap_state;
   heap_state.contexts_disposed = contexts_disposed_;
@@ -4484,6 +4547,18 @@
   }
 }
 
+void Heap::SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
+                                  void* data) {
+  out_of_memory_callback_ = callback;
+  out_of_memory_callback_data_ = data;
+}
+
+void Heap::InvokeOutOfMemoryCallback() {
+  if (out_of_memory_callback_) {
+    out_of_memory_callback_(out_of_memory_callback_data_);
+  }
+}
+
 void Heap::CollectCodeStatistics() {
   CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
   // We do not look for code in new space, or map space.  If code
@@ -4698,10 +4773,8 @@
   CHECK(HasBeenSetUp());
   HandleScope scope(isolate());
 
-  if (mark_compact_collector()->sweeping_in_progress()) {
-    // We have to wait here for the sweeper threads to have an iterable heap.
-    mark_compact_collector()->EnsureSweepingCompleted();
-  }
+  // We have to wait here for the sweeper threads to have an iterable heap.
+  mark_compact_collector()->EnsureSweepingCompleted();
 
   VerifyPointersVisitor visitor;
   IterateRoots(&visitor, VISIT_ONLY_STRONG);
@@ -4729,8 +4802,8 @@
 
 void Heap::ZapFromSpace() {
   if (!new_space_->IsFromSpaceCommitted()) return;
-  for (Page* page : NewSpacePageRange(new_space_->FromSpaceStart(),
-                                      new_space_->FromSpaceEnd())) {
+  for (Page* page :
+       PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
     for (Address cursor = page->area_start(), limit = page->area_end();
          cursor < limit; cursor += kPointerSize) {
       Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -4800,8 +4873,7 @@
   // it would be a violation of the invariant to record it's slots.
   bool record_slots = false;
   if (incremental_marking()->IsCompacting()) {
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(target);
-    record_slots = Marking::IsBlack(mark_bit);
+    record_slots = ObjectMarking::IsBlack(target);
   }
 
   IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
@@ -4838,7 +4910,7 @@
   v->Synchronize(VisitorSynchronization::kStringTable);
   if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
     // Scavenge collections have special processing for this.
-    external_string_table_.Iterate(v);
+    external_string_table_.IterateAll(v);
   }
   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
 }
@@ -4937,8 +5009,9 @@
     case VISIT_ONLY_STRONG_ROOT_LIST:
       UNREACHABLE();
       break;
-    case VISIT_ONLY_STRONG:
     case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
+      break;
+    case VISIT_ONLY_STRONG:
       isolate_->global_handles()->IterateStrongRoots(v);
       break;
     case VISIT_ALL_IN_SCAVENGE:
@@ -5052,7 +5125,7 @@
 
   // The old generation is paged and needs at least one page for each space.
   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
-  max_old_generation_size_ =
+  initial_max_old_generation_size_ = max_old_generation_size_ =
       Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
           max_old_generation_size_);
 
@@ -5171,7 +5244,6 @@
 const double Heap::kConservativeHeapGrowingFactor = 1.3;
 const double Heap::kTargetMutatorUtilization = 0.97;
 
-
 // Given GC speed in bytes per ms, the allocation throughput in bytes per ms
 // (mutator speed), this function returns the heap growing factor that will
 // achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
@@ -5307,6 +5379,13 @@
   }
 }
 
+bool Heap::ShouldOptimizeForLoadTime() {
+  return isolate()->rail_mode() == PERFORMANCE_LOAD &&
+         !AllocationLimitOvershotByLargeMargin() &&
+         MonotonicallyIncreasingTimeInMs() <
+             isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
+}
+
 // This predicate is called when an old generation space cannot allocated from
 // the free list and is about to add a new page. Returning false will cause a
 // major GC. It happens when the old generation allocation limit is reached and
@@ -5318,6 +5397,8 @@
 
   if (ShouldOptimizeForMemoryUsage()) return false;
 
+  if (ShouldOptimizeForLoadTime()) return true;
+
   if (incremental_marking()->NeedsFinalization()) {
     return !AllocationLimitOvershotByLargeMargin();
   }
@@ -5352,9 +5433,13 @@
   if (old_generation_space_available > new_space_->Capacity()) {
     return IncrementalMarkingLimit::kNoLimit;
   }
-  // We are close to the allocation limit.
-  // Choose between the hard and the soft limits.
-  if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
+  if (ShouldOptimizeForMemoryUsage()) {
+    return IncrementalMarkingLimit::kHardLimit;
+  }
+  if (ShouldOptimizeForLoadTime()) {
+    return IncrementalMarkingLimit::kNoLimit;
+  }
+  if (old_generation_space_available == 0) {
     return IncrementalMarkingLimit::kHardLimit;
   }
   return IncrementalMarkingLimit::kSoftLimit;
@@ -5477,6 +5562,7 @@
     dead_object_stats_ = new ObjectStats(this);
   }
   scavenge_job_ = new ScavengeJob();
+  local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer();
 
   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -5496,7 +5582,7 @@
 bool Heap::CreateHeapObjects() {
   // Create initial maps.
   if (!CreateInitialMaps()) return false;
-  CreateApiObjects();
+  if (!CreateApiObjects()) return false;
 
   // Create initial objects
   CreateInitialObjects();
@@ -5552,16 +5638,7 @@
 
 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
   DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
-  embedder_heap_tracer_ = tracer;
-}
-
-void Heap::RegisterWrappersWithEmbedderHeapTracer() {
-  DCHECK(UsingEmbedderHeapTracer());
-  if (wrappers_to_trace_.empty()) {
-    return;
-  }
-  embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
-  wrappers_to_trace_.clear();
+  local_embedder_heap_tracer()->SetRemoteTracer(tracer);
 }
 
 void Heap::TracePossibleWrapper(JSObject* js_object) {
@@ -5571,17 +5648,12 @@
       js_object->GetInternalField(0) != undefined_value() &&
       js_object->GetInternalField(1) != undefined_value()) {
     DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
-    wrappers_to_trace_.push_back(std::pair<void*, void*>(
+    local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
         reinterpret_cast<void*>(js_object->GetInternalField(0)),
         reinterpret_cast<void*>(js_object->GetInternalField(1))));
   }
 }
 
-bool Heap::RequiresImmediateWrapperProcessing() {
-  const size_t kTooManyWrappers = 16000;
-  return wrappers_to_trace_.size() > kTooManyWrappers;
-}
-
 void Heap::RegisterExternallyReferencedObject(Object** object) {
   HeapObject* heap_object = HeapObject::cast(*object);
   DCHECK(Contains(heap_object));
@@ -5589,8 +5661,7 @@
     IncrementalMarking::MarkGrey(this, heap_object);
   } else {
     DCHECK(mark_compact_collector()->in_use());
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
-    mark_compact_collector()->MarkObject(heap_object, mark_bit);
+    mark_compact_collector()->MarkObject(heap_object);
   }
 }
 
@@ -5603,22 +5674,6 @@
 
   UpdateMaximumCommitted();
 
-  if (FLAG_print_max_heap_committed) {
-    PrintF("\n");
-    PrintF("maximum_committed_by_heap=%" PRIuS " ", MaximumCommittedMemory());
-    PrintF("maximum_committed_by_new_space=%" PRIuS " ",
-           new_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_old_space=%" PRIuS " ",
-           old_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_code_space=%" PRIuS " ",
-           code_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_map_space=%" PRIuS " ",
-           map_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_lo_space=%" PRIuS " ",
-           lo_space_->MaximumCommittedMemory());
-    PrintF("\n\n");
-  }
-
   if (FLAG_verify_predictable) {
     PrintAlloctionsHash();
   }
@@ -5658,6 +5713,9 @@
     dead_object_stats_ = nullptr;
   }
 
+  delete local_embedder_heap_tracer_;
+  local_embedder_heap_tracer_ = nullptr;
+
   delete scavenge_job_;
   scavenge_job_ = nullptr;
 
@@ -5803,8 +5861,6 @@
         WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
         array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
       }
-    } else if (o->IsScript()) {
-      CompactWeakFixedArray(Script::cast(o)->shared_function_infos());
     }
   }
   CompactWeakFixedArray(noscript_shared_function_infos());
@@ -5909,6 +5965,18 @@
   }
 }
 
+bool Heap::HasRecordedSlot(HeapObject* object, Object** slot) {
+  if (InNewSpace(object)) {
+    return false;
+  }
+  Address slot_addr = reinterpret_cast<Address>(slot);
+  Page* page = Page::FromAddress(slot_addr);
+  DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+  store_buffer()->MoveAllEntriesToRememberedSet();
+  return RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr) ||
+         RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr);
+}
+
 void Heap::ClearRecordedSlotRange(Address start, Address end) {
   Page* page = Page::FromAddress(start);
   if (!page->InNewSpace()) {
@@ -6025,8 +6093,7 @@
 
   bool SkipObject(HeapObject* object) {
     if (object->IsFiller()) return true;
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
-    return Marking::IsWhite(mark_bit);
+    return ObjectMarking::IsWhite(object);
   }
 
  private:
@@ -6038,6 +6105,8 @@
       for (Object** p = start; p < end; p++) {
         if (!(*p)->IsHeapObject()) continue;
         HeapObject* obj = HeapObject::cast(*p);
+        // Use Marking instead of ObjectMarking to avoid adjusting live bytes
+        // counter.
         MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
         if (Marking::IsWhite(mark_bit)) {
           Marking::WhiteToBlack(mark_bit);
@@ -6067,16 +6136,15 @@
   DisallowHeapAllocation no_allocation_;
 };
 
-
 HeapIterator::HeapIterator(Heap* heap,
                            HeapIterator::HeapObjectsFiltering filtering)
-    : make_heap_iterable_helper_(heap),
-      no_heap_allocation_(),
+    : no_heap_allocation_(),
       heap_(heap),
       filtering_(filtering),
       filter_(nullptr),
       space_iterator_(nullptr),
       object_iterator_(nullptr) {
+  heap_->MakeHeapIterable();
   heap_->heap_iterator_start();
   // Start the iteration.
   space_iterator_ = new SpaceIterator(heap_);
@@ -6136,225 +6204,51 @@
 }
 
 
-#ifdef DEBUG
-
-Object* const PathTracer::kAnyGlobalObject = NULL;
-
-class PathTracer::MarkVisitor : public ObjectVisitor {
- public:
-  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-
-  void VisitPointers(Object** start, Object** end) override {
-    // Scan all HeapObject pointers in [start, end)
-    for (Object** p = start; !tracer_->found() && (p < end); p++) {
-      if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
-    }
-  }
-
- private:
-  PathTracer* tracer_;
-};
-
-
-class PathTracer::UnmarkVisitor : public ObjectVisitor {
- public:
-  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-
-  void VisitPointers(Object** start, Object** end) override {
-    // Scan all HeapObject pointers in [start, end)
-    for (Object** p = start; p < end; p++) {
-      if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
-    }
-  }
-
- private:
-  PathTracer* tracer_;
-};
-
-
-void PathTracer::VisitPointers(Object** start, Object** end) {
-  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
-  // Visit all HeapObject pointers in [start, end)
-  for (Object** p = start; !done && (p < end); p++) {
-    if ((*p)->IsHeapObject()) {
-      TracePathFrom(p);
-      done = ((what_to_find_ == FIND_FIRST) && found_target_);
-    }
-  }
-}
-
-
-void PathTracer::Reset() {
-  found_target_ = false;
-  object_stack_.Clear();
-}
-
-
-void PathTracer::TracePathFrom(Object** root) {
-  DCHECK((search_target_ == kAnyGlobalObject) ||
-         search_target_->IsHeapObject());
-  found_target_in_trace_ = false;
-  Reset();
-
-  MarkVisitor mark_visitor(this);
-  MarkRecursively(root, &mark_visitor);
-
-  UnmarkVisitor unmark_visitor(this);
-  UnmarkRecursively(root, &unmark_visitor);
-
-  ProcessResults();
-}
-
-
-static bool SafeIsNativeContext(HeapObject* obj) {
-  return obj->map() == obj->GetHeap()->root(Heap::kNativeContextMapRootIndex);
-}
-
-
-void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
-  if (!(*p)->IsHeapObject()) return;
-
-  HeapObject* obj = HeapObject::cast(*p);
-
-  MapWord map_word = obj->map_word();
-  if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
-
-  if (found_target_in_trace_) return;  // stop if target found
-  object_stack_.Add(obj);
-  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
-      (obj == search_target_)) {
-    found_target_in_trace_ = true;
-    found_target_ = true;
-    return;
-  }
-
-  bool is_native_context = SafeIsNativeContext(obj);
-
-  // not visited yet
-  Map* map = Map::cast(map_word.ToMap());
-
-  MapWord marked_map_word =
-      MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
-  obj->set_map_word(marked_map_word);
-
-  // Scan the object body.
-  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
-    // This is specialized to scan Context's properly.
-    Object** start =
-        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
-    Object** end =
-        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
-                                   Context::FIRST_WEAK_SLOT * kPointerSize);
-    mark_visitor->VisitPointers(start, end);
-  } else {
-    obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
-  }
-
-  // Scan the map after the body because the body is a lot more interesting
-  // when doing leak detection.
-  MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
-
-  if (!found_target_in_trace_) {  // don't pop if found the target
-    object_stack_.RemoveLast();
-  }
-}
-
-
-void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
-  if (!(*p)->IsHeapObject()) return;
-
-  HeapObject* obj = HeapObject::cast(*p);
-
-  MapWord map_word = obj->map_word();
-  if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
-
-  MapWord unmarked_map_word =
-      MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
-  obj->set_map_word(unmarked_map_word);
-
-  Map* map = Map::cast(unmarked_map_word.ToMap());
-
-  UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
-
-  obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
-}
-
-
-void PathTracer::ProcessResults() {
-  if (found_target_) {
-    OFStream os(stdout);
-    os << "=====================================\n"
-       << "====        Path to object       ====\n"
-       << "=====================================\n\n";
-
-    DCHECK(!object_stack_.is_empty());
-    for (int i = 0; i < object_stack_.length(); i++) {
-      if (i > 0) os << "\n     |\n     |\n     V\n\n";
-      object_stack_[i]->Print(os);
-    }
-    os << "=====================================\n";
-  }
-}
-
-
-// Triggers a depth-first traversal of reachable objects from one
-// given root object and finds a path to a specific heap object and
-// prints it.
-void Heap::TracePathToObjectFrom(Object* target, Object* root) {
-  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
-  tracer.VisitPointer(&root);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to a specific heap object and prints it.
-void Heap::TracePathToObject(Object* target) {
-  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
-  IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to any global object and prints it. Useful for
-// determining the source for leaks of global objects.
-void Heap::TracePathToGlobal() {
-  PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
-                    VISIT_ALL);
-  IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-#endif
-
 void Heap::UpdateTotalGCTime(double duration) {
   if (FLAG_trace_gc_verbose) {
     total_gc_time_ms_ += duration;
   }
 }
 
-void Heap::ExternalStringTable::CleanUp() {
+void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
   int last = 0;
   Isolate* isolate = heap_->isolate();
   for (int i = 0; i < new_space_strings_.length(); ++i) {
-    if (new_space_strings_[i]->IsTheHole(isolate)) {
+    Object* o = new_space_strings_[i];
+    if (o->IsTheHole(isolate)) {
       continue;
     }
-    DCHECK(new_space_strings_[i]->IsExternalString());
-    if (heap_->InNewSpace(new_space_strings_[i])) {
-      new_space_strings_[last++] = new_space_strings_[i];
+    if (o->IsThinString()) {
+      o = ThinString::cast(o)->actual();
+      if (!o->IsExternalString()) continue;
+    }
+    DCHECK(o->IsExternalString());
+    if (heap_->InNewSpace(o)) {
+      new_space_strings_[last++] = o;
     } else {
-      old_space_strings_.Add(new_space_strings_[i]);
+      old_space_strings_.Add(o);
     }
   }
   new_space_strings_.Rewind(last);
   new_space_strings_.Trim();
+}
 
-  last = 0;
+void Heap::ExternalStringTable::CleanUpAll() {
+  CleanUpNewSpaceStrings();
+  int last = 0;
+  Isolate* isolate = heap_->isolate();
   for (int i = 0; i < old_space_strings_.length(); ++i) {
-    if (old_space_strings_[i]->IsTheHole(isolate)) {
+    Object* o = old_space_strings_[i];
+    if (o->IsTheHole(isolate)) {
       continue;
     }
-    DCHECK(old_space_strings_[i]->IsExternalString());
-    DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
-    old_space_strings_[last++] = old_space_strings_[i];
+    if (o->IsThinString()) {
+      o = ThinString::cast(o)->actual();
+      if (!o->IsExternalString()) continue;
+    }
+    DCHECK(o->IsExternalString());
+    DCHECK(!heap_->InNewSpace(o));
+    old_space_strings_[last++] = o;
   }
   old_space_strings_.Rewind(last);
   old_space_strings_.Trim();
@@ -6367,11 +6261,21 @@
 
 void Heap::ExternalStringTable::TearDown() {
   for (int i = 0; i < new_space_strings_.length(); ++i) {
-    heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
+    Object* o = new_space_strings_[i];
+    if (o->IsThinString()) {
+      o = ThinString::cast(o)->actual();
+      if (!o->IsExternalString()) continue;
+    }
+    heap_->FinalizeExternalString(ExternalString::cast(o));
   }
   new_space_strings_.Free();
   for (int i = 0; i < old_space_strings_.length(); ++i) {
-    heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
+    Object* o = old_space_strings_[i];
+    if (o->IsThinString()) {
+      o = ThinString::cast(o)->actual();
+      if (!o->IsExternalString()) continue;
+    }
+    heap_->FinalizeExternalString(ExternalString::cast(o));
   }
   old_space_strings_.Free();
 }
diff --git a/src/heap/heap.h b/src/heap/heap.h
index 013cd9a..ad26239 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -14,6 +14,7 @@
 #include "src/allocation.h"
 #include "src/assert-scope.h"
 #include "src/base/atomic-utils.h"
+#include "src/debug/debug-interface.h"
 #include "src/globals.h"
 #include "src/heap-symbols.h"
 #include "src/list.h"
@@ -57,16 +58,14 @@
   V(Map, foreign_map, ForeignMap)                                              \
   V(Map, heap_number_map, HeapNumberMap)                                       \
   V(Map, transition_array_map, TransitionArrayMap)                             \
-  V(FixedArray, empty_literals_array, EmptyLiteralsArray)                      \
-  V(FixedArray, empty_type_feedback_vector, EmptyTypeFeedbackVector)           \
+  V(Map, feedback_vector_map, FeedbackVectorMap)                               \
+  V(ScopeInfo, empty_scope_info, EmptyScopeInfo)                               \
   V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
   V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
   /* Entries beyond the first 32                                            */ \
   /* The roots above this line should be boring from a GC point of view.    */ \
   /* This means they are never in new space and never on a page that is     */ \
   /* being compacted.                                                       */ \
-  /* Empty scope info */                                                       \
-  V(ScopeInfo, empty_scope_info, EmptyScopeInfo)                               \
   /* Oddballs */                                                               \
   V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
   V(Oddball, arguments_marker, ArgumentsMarker)                                \
@@ -77,6 +76,7 @@
   /* Context maps */                                                           \
   V(Map, native_context_map, NativeContextMap)                                 \
   V(Map, module_context_map, ModuleContextMap)                                 \
+  V(Map, eval_context_map, EvalContextMap)                                     \
   V(Map, script_context_map, ScriptContextMap)                                 \
   V(Map, block_context_map, BlockContextMap)                                   \
   V(Map, catch_context_map, CatchContextMap)                                   \
@@ -93,11 +93,16 @@
   V(Map, external_map, ExternalMap)                                            \
   V(Map, bytecode_array_map, BytecodeArrayMap)                                 \
   V(Map, module_info_map, ModuleInfoMap)                                       \
+  V(Map, no_closures_cell_map, NoClosuresCellMap)                              \
+  V(Map, one_closure_cell_map, OneClosureCellMap)                              \
+  V(Map, many_closures_cell_map, ManyClosuresCellMap)                          \
   /* String maps */                                                            \
   V(Map, native_source_string_map, NativeSourceStringMap)                      \
   V(Map, string_map, StringMap)                                                \
   V(Map, cons_one_byte_string_map, ConsOneByteStringMap)                       \
   V(Map, cons_string_map, ConsStringMap)                                       \
+  V(Map, thin_one_byte_string_map, ThinOneByteStringMap)                       \
+  V(Map, thin_string_map, ThinStringMap)                                       \
   V(Map, sliced_string_map, SlicedStringMap)                                   \
   V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap)                   \
   V(Map, external_string_map, ExternalStringMap)                               \
@@ -130,16 +135,6 @@
   V(Map, fixed_float32_array_map, FixedFloat32ArrayMap)                        \
   V(Map, fixed_float64_array_map, FixedFloat64ArrayMap)                        \
   V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap)             \
-  V(Map, float32x4_map, Float32x4Map)                                          \
-  V(Map, int32x4_map, Int32x4Map)                                              \
-  V(Map, uint32x4_map, Uint32x4Map)                                            \
-  V(Map, bool32x4_map, Bool32x4Map)                                            \
-  V(Map, int16x8_map, Int16x8Map)                                              \
-  V(Map, uint16x8_map, Uint16x8Map)                                            \
-  V(Map, bool16x8_map, Bool16x8Map)                                            \
-  V(Map, int8x16_map, Int8x16Map)                                              \
-  V(Map, uint8x16_map, Uint8x16Map)                                            \
-  V(Map, bool8x16_map, Bool8x16Map)                                            \
   /* Canonical empty values */                                                 \
   V(ByteArray, empty_byte_array, EmptyByteArray)                               \
   V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array)        \
@@ -157,17 +152,17 @@
   V(FixedArray, empty_sloppy_arguments_elements, EmptySloppyArgumentsElements) \
   V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
     EmptySlowElementDictionary)                                                \
-  V(TypeFeedbackVector, dummy_vector, DummyVector)                             \
   V(PropertyCell, empty_property_cell, EmptyPropertyCell)                      \
   V(WeakCell, empty_weak_cell, EmptyWeakCell)                                  \
   /* Protectors */                                                             \
   V(PropertyCell, array_protector, ArrayProtector)                             \
   V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector)         \
-  V(PropertyCell, has_instance_protector, HasInstanceProtector)                \
   V(Cell, species_protector, SpeciesProtector)                                 \
   V(PropertyCell, string_length_protector, StringLengthProtector)              \
   V(Cell, fast_array_iteration_protector, FastArrayIterationProtector)         \
-  V(Cell, array_iterator_protector, ArrayIteratorProtector)                    \
+  V(PropertyCell, array_iterator_protector, ArrayIteratorProtector)            \
+  V(PropertyCell, array_buffer_neutering_protector,                            \
+    ArrayBufferNeuteringProtector)                                             \
   /* Special numbers */                                                        \
   V(HeapNumber, nan_value, NanValue)                                           \
   V(HeapNumber, hole_nan_value, HoleNanValue)                                  \
@@ -190,7 +185,9 @@
     ExperimentalExtraNativesSourceCache)                                       \
   /* Lists and dictionaries */                                                 \
   V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary)    \
-  V(Object, symbol_registry, SymbolRegistry)                                   \
+  V(NameDictionary, public_symbol_table, PublicSymbolTable)                    \
+  V(NameDictionary, api_symbol_table, ApiSymbolTable)                          \
+  V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable)           \
   V(Object, script_list, ScriptList)                                           \
   V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
   V(FixedArray, materialized_objects, MaterializedObjects)                     \
@@ -203,11 +200,15 @@
   /* slots refer to the code with the reference to the weak object. */         \
   V(ArrayList, weak_new_space_object_to_code_list,                             \
     WeakNewSpaceObjectToCodeList)                                              \
+  /* List to hold onto feedback vectors that we need for code coverage */      \
+  V(Object, code_coverage_list, CodeCoverageList)                              \
   V(Object, weak_stack_trace_list, WeakStackTraceList)                         \
   V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos)       \
   V(FixedArray, serialized_templates, SerializedTemplates)                     \
+  V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes)     \
   /* Configured values */                                                      \
   V(TemplateList, message_listeners, MessageListeners)                         \
+  V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo)               \
   V(Code, js_entry_code, JsEntryCode)                                          \
   V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
   /* Oddball maps */                                                           \
@@ -221,7 +222,10 @@
   V(Map, exception_map, ExceptionMap)                                          \
   V(Map, termination_exception_map, TerminationExceptionMap)                   \
   V(Map, optimized_out_map, OptimizedOutMap)                                   \
-  V(Map, stale_register_map, StaleRegisterMap)
+  V(Map, stale_register_map, StaleRegisterMap)                                 \
+  /* per-Isolate map for JSPromiseCapability. */                               \
+  /* TODO(caitp): Make this a Struct */                                        \
+  V(Map, js_promise_capability_map, JSPromiseCapabilityMap)
 
 // Entries in this list are limited to Smis and are not visited during GC.
 #define SMI_ROOT_LIST(V)                                                       \
@@ -233,7 +237,10 @@
   /* function cache of the native context. */                                  \
   V(Smi, next_template_serial_number, NextTemplateSerialNumber)                \
   V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
-  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)           \
+  V(Smi, construct_stub_create_deopt_pc_offset,                                \
+    ConstructStubCreateDeoptPCOffset)                                          \
+  V(Smi, construct_stub_invoke_deopt_pc_offset,                                \
+    ConstructStubInvokeDeoptPCOffset)                                          \
   V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)                 \
   V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)                 \
   V(Smi, interpreter_entry_return_pc_offset, InterpreterEntryReturnPCOffset)
@@ -264,16 +271,6 @@
   V(MetaMap)                            \
   V(HeapNumberMap)                      \
   V(MutableHeapNumberMap)               \
-  V(Float32x4Map)                       \
-  V(Int32x4Map)                         \
-  V(Uint32x4Map)                        \
-  V(Bool32x4Map)                        \
-  V(Int16x8Map)                         \
-  V(Uint16x8Map)                        \
-  V(Bool16x8Map)                        \
-  V(Int8x16Map)                         \
-  V(Uint8x16Map)                        \
-  V(Bool8x16Map)                        \
   V(NativeContextMap)                   \
   V(FixedArrayMap)                      \
   V(CodeMap)                            \
@@ -297,6 +294,7 @@
   V(WithContextMap)                     \
   V(BlockContextMap)                    \
   V(ModuleContextMap)                   \
+  V(EvalContextMap)                     \
   V(ScriptContextMap)                   \
   V(UndefinedMap)                       \
   V(TheHoleMap)                         \
@@ -306,6 +304,9 @@
   V(ArgumentsMarkerMap)                 \
   V(JSMessageObjectMap)                 \
   V(ForeignMap)                         \
+  V(NoClosuresCellMap)                  \
+  V(OneClosureCellMap)                  \
+  V(ManyClosuresCellMap)                \
   V(NanValue)                           \
   V(InfinityValue)                      \
   V(MinusZeroValue)                     \
@@ -325,6 +326,7 @@
 class HeapStats;
 class HistogramTimer;
 class Isolate;
+class LocalEmbedderHeapTracer;
 class MemoryAllocator;
 class MemoryReducer;
 class ObjectIterator;
@@ -347,8 +349,6 @@
 
 enum class ClearRecordedSlots { kYes, kNo };
 
-enum class ClearBlackArea { kYes, kNo };
-
 enum class GarbageCollectionReason {
   kUnknown = 0,
   kAllocationFailure = 1,
@@ -377,6 +377,17 @@
   // Also update src/tools/metrics/histograms/histograms.xml in chromium.
 };
 
+enum class YoungGenerationHandling {
+  kRegularScavenge = 0,
+  kFastPromotionDuringScavenge = 1,
+  // Histogram::InspectConstructionArguments in chromium requires us to have at
+  // least three buckets.
+  kUnusedBucket = 2,
+  // If you add new items here, then update the young_generation_handling in
+  // counters.h.
+  // Also update src/tools/metrics/histograms/histograms.xml in chromium.
+};
+
 // A queue of objects promoted during scavenge. Each object is accompanied by
 // its size to avoid dereferencing a map pointer for scanning. The last page in
 // to-space is used for the promotion queue. On conflict during scavenge, the
@@ -554,12 +565,6 @@
 
   enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
 
-  // Indicates whether live bytes adjustment is triggered
-  // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
-  // - or from within GC (CONCURRENT_TO_SWEEPER),
-  // - or mutator code (CONCURRENT_TO_SWEEPER).
-  enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
-
   enum UpdateAllocationSiteMode { kGlobal, kCached };
 
   // Taking this lock prevents the GC from entering a phase that relocates
@@ -607,7 +612,7 @@
   static const int kMaxOldSpaceSizeMediumMemoryDevice =
       256 * kPointerMultiplier;
   static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
-  static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier;
 
   // The executable size has to be a multiple of Page::kPageSize.
   // Sizes are in MB.
@@ -643,6 +648,8 @@
   // The minimum size of a HeapObject on the heap.
   static const int kMinObjectSizeInWords = 2;
 
+  static const int kMinPromotedPercentForFastPromotionMode = 90;
+
   STATIC_ASSERT(kUndefinedValueRootIndex ==
                 Internals::kUndefinedValueRootIndex);
   STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
@@ -673,6 +680,8 @@
   // they are in new space.
   static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
 
+  static bool IsUnmodifiedHeapObject(Object** p);
+
   // Zapping is needed for verify heap, and always done in debug builds.
   static inline bool ShouldZapGarbage() {
 #ifdef DEBUG
@@ -739,32 +748,27 @@
   // Initialize a filler object to keep the ability to iterate over the heap
   // when introducing gaps within pages. If slots could have been recorded in
   // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
-  // pass ClearRecordedSlots::kNo. If the filler was created in a black area
-  // we may want to clear the corresponding mark bits with ClearBlackArea::kYes,
-  // which is the default. ClearBlackArea::kNo does not clear the mark bits.
-  void CreateFillerObjectAt(
-      Address addr, int size, ClearRecordedSlots mode,
-      ClearBlackArea black_area_mode = ClearBlackArea::kYes);
+  // pass ClearRecordedSlots::kNo.
+  HeapObject* CreateFillerObjectAt(Address addr, int size,
+                                   ClearRecordedSlots mode);
 
   bool CanMoveObjectStart(HeapObject* object);
 
+  static bool IsImmovable(HeapObject* object);
+
   // Maintain consistency of live bytes during incremental marking.
-  void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode);
+  void AdjustLiveBytes(HeapObject* object, int by);
 
   // Trim the given array from the left. Note that this relocates the object
   // start and hence is only valid if there is only a single reference to it.
   FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
 
   // Trim the given array from the right.
-  template<Heap::InvocationMode mode>
   void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
 
   // Converts the given boolean condition to JavaScript boolean value.
   inline Oddball* ToBoolean(bool condition);
 
-  // Check whether the heap is currently iterable.
-  bool IsHeapIterable();
-
   // Notify the heap that a context has been disposed.
   int NotifyContextDisposed(bool dependant_context);
 
@@ -787,6 +791,9 @@
   Object* encountered_weak_collections() const {
     return encountered_weak_collections_;
   }
+  void VisitEncounteredWeakCollections(ObjectVisitor* visitor) {
+    visitor->VisitPointer(&encountered_weak_collections_);
+  }
 
   void set_encountered_weak_cells(Object* weak_cell) {
     encountered_weak_cells_ = weak_cell;
@@ -816,6 +823,7 @@
   void PrintShortHeapStatistics();
 
   inline HeapState gc_state() { return gc_state_; }
+  void SetGCState(HeapState state);
 
   inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
 
@@ -831,7 +839,7 @@
   // Support for the API.
   //
 
-  void CreateApiObjects();
+  bool CreateApiObjects();
 
   // Implements the corresponding V8 API function.
   bool IdleNotification(double deadline_in_seconds);
@@ -841,6 +849,9 @@
                                   bool is_isolate_locked);
   void CheckMemoryPressure();
 
+  void SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
+                              void* data);
+
   double MonotonicallyIncreasingTimeInMs();
 
   void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -867,13 +878,15 @@
   inline int NextScriptId();
 
   inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
-  inline void SetConstructStubDeoptPCOffset(int pc_offset);
+  inline void SetConstructStubCreateDeoptPCOffset(int pc_offset);
+  inline void SetConstructStubInvokeDeoptPCOffset(int pc_offset);
   inline void SetGetterStubDeoptPCOffset(int pc_offset);
   inline void SetSetterStubDeoptPCOffset(int pc_offset);
   inline void SetInterpreterEntryReturnPCOffset(int pc_offset);
   inline int GetNextTemplateSerialNumber();
 
   inline void SetSerializedTemplates(FixedArray* templates);
+  inline void SetSerializedGlobalProxySizes(FixedArray* sizes);
 
   // For post mortem debugging.
   void RememberUnmappedPage(Address page, bool compacted);
@@ -948,6 +961,30 @@
     return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
   }
 
+  size_t HeapLimitForDebugging() {
+    const size_t kDebugHeapSizeFactor = 4;
+    size_t max_limit = std::numeric_limits<size_t>::max() / 4;
+    return Min(max_limit,
+               initial_max_old_generation_size_ * kDebugHeapSizeFactor);
+  }
+
+  void IncreaseHeapLimitForDebugging() {
+    max_old_generation_size_ =
+        Max(max_old_generation_size_, HeapLimitForDebugging());
+  }
+
+  void RestoreOriginalHeapLimit() {
+    // Do not set the limit lower than the live size + some slack.
+    size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
+    max_old_generation_size_ =
+        Min(max_old_generation_size_,
+            Max(initial_max_old_generation_size_, min_limit));
+  }
+
+  bool IsHeapLimitIncreasedForDebugging() {
+    return max_old_generation_size_ == HeapLimitForDebugging();
+  }
+
   // ===========================================================================
   // Initialization. ===========================================================
   // ===========================================================================
@@ -1172,6 +1209,8 @@
   void ClearRecordedSlot(HeapObject* object, Object** slot);
   void ClearRecordedSlotRange(Address start, Address end);
 
+  bool HasRecordedSlot(HeapObject* object, Object** slot);
+
   // ===========================================================================
   // Incremental marking API. ==================================================
   // ===========================================================================
@@ -1199,28 +1238,31 @@
 
   IncrementalMarking* incremental_marking() { return incremental_marking_; }
 
+  // The runtime uses this function to notify potentially unsafe object layout
+  // changes that require special synchronization with the concurrent marker.
+  // A layout change is unsafe if
+  // - it removes a tagged in-object field.
+  // - it replaces a tagged in-objects field with an untagged in-object field.
+  void NotifyObjectLayoutChange(HeapObject* object,
+                                const DisallowHeapAllocation&);
+#ifdef VERIFY_HEAP
+  // This function checks that either
+  // - the map transition is safe,
+  // - or it was communicated to GC using NotifyObjectLayoutChange.
+  void VerifyObjectLayoutChange(HeapObject* object, Map* new_map);
+#endif
+
   // ===========================================================================
   // Embedder heap tracer support. =============================================
   // ===========================================================================
 
+  LocalEmbedderHeapTracer* local_embedder_heap_tracer() {
+    return local_embedder_heap_tracer_;
+  }
   void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
-  bool UsingEmbedderHeapTracer() { return embedder_heap_tracer() != nullptr; }
-
   void TracePossibleWrapper(JSObject* js_object);
-
   void RegisterExternallyReferencedObject(Object** object);
 
-  void RegisterWrappersWithEmbedderHeapTracer();
-
-  // In order to avoid running out of memory we force tracing wrappers if there
-  // are too many of them.
-  bool RequiresImmediateWrapperProcessing();
-
-  EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
-
-  size_t wrappers_to_trace() { return wrappers_to_trace_.size(); }
-
   // ===========================================================================
   // External string table API. ================================================
   // ===========================================================================
@@ -1398,19 +1440,6 @@
   // Returns the size of objects residing in non new spaces.
   size_t PromotedSpaceSizeOfObjects();
 
-  double total_regexp_code_generated() { return total_regexp_code_generated_; }
-  void IncreaseTotalRegexpCodeGenerated(int size) {
-    total_regexp_code_generated_ += size;
-  }
-
-  void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
-    if (is_crankshafted) {
-      crankshaft_codegen_bytes_generated_ += size;
-    } else {
-      full_codegen_bytes_generated_ += size;
-    }
-  }
-
   // ===========================================================================
   // Prologue/epilogue callback methods.========================================
   // ===========================================================================
@@ -1485,10 +1514,6 @@
 #ifdef DEBUG
   void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
 
-  void TracePathToObjectFrom(Object* target, Object* root);
-  void TracePathToObject(Object* target);
-  void TracePathToGlobal();
-
   void Print();
   void PrintHandles();
 
@@ -1501,6 +1526,7 @@
       GarbageCollectionReason gc_reason);
 
  private:
+  class SkipStoreBufferScope;
   class PretenuringScope;
 
   // External strings table is a place where all external strings are
@@ -1511,11 +1537,14 @@
     // Registers an external string.
     inline void AddString(String* string);
 
-    inline void Iterate(ObjectVisitor* v);
+    inline void IterateAll(ObjectVisitor* v);
+    inline void IterateNewSpaceStrings(ObjectVisitor* v);
+    inline void PromoteAllNewSpaceStrings();
 
-    // Restores internal invariant and gets rid of collected strings.
-    // Must be called after each Iterate() that modified the strings.
-    void CleanUp();
+    // Restores internal invariant and gets rid of collected strings. Must be
+    // called after each Iterate*() that modified the strings.
+    void CleanUpAll();
+    void CleanUpNewSpaceStrings();
 
     // Destroys all allocated memory.
     void TearDown();
@@ -1632,10 +1661,6 @@
     return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
   }
 
-  // Checks whether both, the internal marking deque, and the embedder provided
-  // one are empty. Avoid in fast path as it potentially calls through the API.
-  bool MarkingDequesAreEmpty();
-
   void PreprocessStackTraces();
 
   // Checks whether a global GC is necessary
@@ -1747,6 +1772,10 @@
 
   void CollectGarbageOnMemoryPressure();
 
+  void InvokeOutOfMemoryCallback();
+
+  void ComputeFastPromotionMode(double survival_rate);
+
   // Attempt to over-approximate the weak closure by marking object groups and
   // implicit references from global handles, but don't atomically complete
   // marking. If we continue to mark incrementally, we might have marked
@@ -1790,6 +1819,7 @@
 
   // Performs a minor collection in new generation.
   void Scavenge();
+  void EvacuateYoungGeneration();
 
   Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
 
@@ -1840,6 +1870,14 @@
   // Growing strategy. =========================================================
   // ===========================================================================
 
+  // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
+  // This constant limits the effect of load RAIL mode on GC.
+  // The value is arbitrary and chosen as the largest load time observed in
+  // v8 browsing benchmarks.
+  static const int kMaxLoadTimeMs = 7000;
+
+  bool ShouldOptimizeForLoadTime();
+
   // Decrease the allocation limit if the new limit based on the given
   // parameters is lower than the current limit.
   void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
@@ -1862,7 +1900,7 @@
 
   bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
 
-  bool CanExpandOldGeneration(int size) {
+  bool CanExpandOldGeneration(size_t size) {
     if (force_oom_) return false;
     return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
   }
@@ -1924,16 +1962,8 @@
                           AllocationSite* allocation_site = NULL);
 
   // Allocates a HeapNumber from value.
-  MUST_USE_RESULT AllocationResult
-  AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
-                     PretenureFlag pretenure = NOT_TENURED);
-
-// Allocates SIMD values from the given lane values.
-#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
-  AllocationResult Allocate##Type(lane_type lanes[lane_count],             \
-                                  PretenureFlag pretenure = NOT_TENURED);
-  SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
-#undef SIMD_ALLOCATE_DECLARATION
+  MUST_USE_RESULT AllocationResult AllocateHeapNumber(
+      MutableMode mode = IMMUTABLE, PretenureFlag pretenure = NOT_TENURED);
 
   // Allocates a byte array of the specified length
   MUST_USE_RESULT AllocationResult
@@ -2098,10 +2128,6 @@
   MUST_USE_RESULT AllocationResult
       AllocateCode(int object_size, bool immovable);
 
-  MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
-
-  MUST_USE_RESULT AllocationResult InternalizeString(String* str);
-
   // ===========================================================================
 
   void set_force_oom(bool value) { force_oom_ = value; }
@@ -2128,6 +2154,7 @@
   size_t max_semi_space_size_;
   size_t initial_semispace_size_;
   size_t max_old_generation_size_;
+  size_t initial_max_old_generation_size_;
   size_t initial_old_generation_size_;
   bool old_generation_size_configured_;
   size_t max_executable_size_;
@@ -2148,6 +2175,9 @@
   // and reset by a mark-compact garbage collection.
   base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
 
+  v8::debug::OutOfMemoryCallback out_of_memory_callback_;
+  void* out_of_memory_callback_data_;
+
   // For keeping track of context disposals.
   int contexts_disposed_;
 
@@ -2222,9 +2252,6 @@
   List<GCCallbackPair> gc_epilogue_callbacks_;
   List<GCCallbackPair> gc_prologue_callbacks_;
 
-  // Total RegExp code ever generated
-  double total_regexp_code_generated_;
-
   int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
 
   GCTracer* tracer_;
@@ -2275,10 +2302,6 @@
 
   AllocationObserver* idle_scavenge_observer_;
 
-  // These two counters are monotomically increasing and never reset.
-  size_t full_codegen_bytes_generated_;
-  size_t crankshaft_codegen_bytes_generated_;
-
   // This counter is increased before each GC and never reset.
   // To account for the bytes allocated since the last GC, use the
   // NewSpaceAllocationCounter() function.
@@ -2338,13 +2361,16 @@
   // The depth of HeapIterator nestings.
   int heap_iterator_depth_;
 
-  EmbedderHeapTracer* embedder_heap_tracer_;
-  std::vector<std::pair<void*, void*>> wrappers_to_trace_;
+  LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
+
+  bool fast_promotion_mode_;
 
   // Used for testing purposes.
   bool force_oom_;
   bool delay_sweeper_tasks_for_testing_;
 
+  HeapObject* pending_layout_change_object_;
+
   // Classes in "heap" can be friends.
   friend class AlwaysAllocateScope;
   friend class GCCallbacksScope;
@@ -2515,17 +2541,8 @@
   HeapObject* next();
 
  private:
-  struct MakeHeapIterableHelper {
-    explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
-  };
-
   HeapObject* NextObject();
 
-  // The following two fields need to be declared in this order. Initialization
-  // order guarantees that we first make the heap iterable (which may involve
-  // allocations) and only then lock it down by not allowing further
-  // allocations.
-  MakeHeapIterableHelper make_heap_iterable_helper_;
   DisallowHeapAllocation no_heap_allocation_;
 
   Heap* heap_;
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
index 4b1d771..9e8fdc7 100644
--- a/src/heap/incremental-marking.cc
+++ b/src/heap/incremental-marking.cc
@@ -9,6 +9,7 @@
 #include "src/conversions.h"
 #include "src/heap/gc-idle-time-handler.h"
 #include "src/heap/gc-tracer.h"
+#include "src/heap/heap-inl.h"
 #include "src/heap/mark-compact-inl.h"
 #include "src/heap/object-stats.h"
 #include "src/heap/objects-visiting-inl.h"
@@ -32,21 +33,19 @@
       was_activated_(false),
       black_allocation_(false),
       finalize_marking_completed_(false),
+      trace_wrappers_toggle_(false),
       request_type_(NONE),
       new_generation_observer_(*this, kAllocatedThreshold),
       old_generation_observer_(*this, kAllocatedThreshold) {}
 
 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
   HeapObject* value_heap_obj = HeapObject::cast(value);
-  MarkBit value_bit = ObjectMarking::MarkBitFrom(value_heap_obj);
-  DCHECK(!Marking::IsImpossible(value_bit));
+  DCHECK(!ObjectMarking::IsImpossible(value_heap_obj));
+  DCHECK(!ObjectMarking::IsImpossible(obj));
+  const bool is_black = ObjectMarking::IsBlack(obj);
 
-  MarkBit obj_bit = ObjectMarking::MarkBitFrom(obj);
-  DCHECK(!Marking::IsImpossible(obj_bit));
-  bool is_black = Marking::IsBlack(obj_bit);
-
-  if (is_black && Marking::IsWhite(value_bit)) {
-    WhiteToGreyAndPush(value_heap_obj, value_bit);
+  if (is_black && ObjectMarking::IsWhite(value_heap_obj)) {
+    WhiteToGreyAndPush(value_heap_obj);
     RestartIfNotMarking();
   }
   return is_compacting_ && is_black;
@@ -117,9 +116,8 @@
   }
 }
 
-
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
-  Marking::WhiteToGrey(mark_bit);
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
+  ObjectMarking::WhiteToGrey(obj);
   heap_->mark_compact_collector()->marking_deque()->Push(obj);
 }
 
@@ -127,29 +125,26 @@
 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
   if (obj->IsHeapObject()) {
     HeapObject* heap_obj = HeapObject::cast(obj);
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(obj));
-    if (Marking::IsBlack(mark_bit)) {
-      MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
-    }
-    Marking::AnyToGrey(mark_bit);
+    ObjectMarking::AnyToGrey(heap_obj);
   }
 }
 
-void IncrementalMarking::TransferMark(Heap* heap, Address old_start,
-                                      Address new_start) {
+void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
+                                      HeapObject* to) {
+  DCHECK(MemoryChunk::FromAddress(from->address())->SweepingDone());
   // This is only used when resizing an object.
-  DCHECK(MemoryChunk::FromAddress(old_start) ==
-         MemoryChunk::FromAddress(new_start));
+  DCHECK(MemoryChunk::FromAddress(from->address()) ==
+         MemoryChunk::FromAddress(to->address()));
 
   if (!heap->incremental_marking()->IsMarking()) return;
 
   // If the mark doesn't move, we don't check the color of the object.
   // It doesn't matter whether the object is black, since it hasn't changed
   // size, so the adjustment to the live data count will be zero anyway.
-  if (old_start == new_start) return;
+  if (from == to) return;
 
-  MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(new_start);
-  MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(old_start);
+  MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(to);
+  MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(from);
 
 #ifdef DEBUG
   Marking::ObjectColor old_color = Marking::Color(old_mark_bit);
@@ -157,12 +152,12 @@
 
   if (Marking::IsBlack(old_mark_bit)) {
     Marking::BlackToWhite(old_mark_bit);
-    Marking::MarkBlack(new_mark_bit);
+    Marking::WhiteToBlack(new_mark_bit);
     return;
   } else if (Marking::IsGrey(old_mark_bit)) {
     Marking::GreyToWhite(old_mark_bit);
-    heap->incremental_marking()->WhiteToGreyAndPush(
-        HeapObject::FromAddress(new_start), new_mark_bit);
+    Marking::WhiteToGrey(new_mark_bit);
+    heap->mark_compact_collector()->marking_deque()->Push(to);
     heap->incremental_marking()->RestartIfNotMarking();
   }
 
@@ -210,10 +205,10 @@
       } while (scan_until_end && start_offset < object_size);
       chunk->set_progress_bar(start_offset);
       if (start_offset < object_size) {
-        if (Marking::IsGrey(ObjectMarking::MarkBitFrom(object))) {
+        if (ObjectMarking::IsGrey(object)) {
           heap->mark_compact_collector()->marking_deque()->Unshift(object);
         } else {
-          DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+          DCHECK(ObjectMarking::IsBlack(object));
           heap->mark_compact_collector()->UnshiftBlack(object);
         }
         heap->incremental_marking()->NotifyIncompleteScanOfObject(
@@ -265,10 +260,8 @@
   // Returns true if object needed marking and false otherwise.
   INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
     HeapObject* heap_object = HeapObject::cast(obj);
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
-    if (Marking::IsWhite(mark_bit)) {
-      Marking::MarkBlack(mark_bit);
-      MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
+    if (ObjectMarking::IsWhite(heap_object)) {
+      ObjectMarking::WhiteToBlack(heap_object);
       return true;
     }
     return false;
@@ -276,7 +269,7 @@
 };
 
 void IncrementalMarking::IterateBlackObject(HeapObject* object) {
-  if (IsMarking() && Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
+  if (IsMarking() && ObjectMarking::IsBlack(object)) {
     Page* page = Page::FromAddress(object->address());
     if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
       // IterateBlackObject requires us to visit the whole object.
@@ -524,10 +517,10 @@
 
   state_ = MARKING;
 
-  if (heap_->UsingEmbedderHeapTracer()) {
+  {
     TRACE_GC(heap()->tracer(),
              GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
-    heap_->embedder_heap_tracer()->TracePrologue();
+    heap_->local_embedder_heap_tracer()->TracePrologue();
   }
 
   RecordWriteStub::Mode mode = is_compacting_
@@ -603,7 +596,7 @@
   TRACE_GC(heap_->tracer(),
            GCTracer::Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING);
 
-  DCHECK(!heap_->UsingEmbedderHeapTracer());
+  DCHECK(!heap_->local_embedder_heap_tracer()->InUse());
   DCHECK(!finalize_marking_completed_);
   DCHECK(IsMarking());
 
@@ -631,7 +624,7 @@
     HeapObject* value = HeapObject::cast(weak_cell->value());
     // Remove weak cells with live objects from the list, they do not need
     // clearing.
-    if (MarkCompactCollector::IsMarked(value)) {
+    if (ObjectMarking::IsBlackOrGrey(value)) {
       // Record slot, if value is pointing to an evacuation candidate.
       Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
       heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
@@ -661,8 +654,7 @@
   }
   Object* constructor = map->GetConstructor();
   if (!constructor->IsHeapObject() ||
-      Marking::IsWhite(
-          ObjectMarking::MarkBitFrom(HeapObject::cast(constructor)))) {
+      ObjectMarking::IsWhite(HeapObject::cast(constructor))) {
     // The constructor is dead, no new objects with this map can
     // be created. Do not retain this map.
     return false;
@@ -691,16 +683,14 @@
     int age = Smi::cast(retained_maps->Get(i + 1))->value();
     int new_age;
     Map* map = Map::cast(cell->value());
-    MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
     if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
-        Marking::IsWhite(map_mark)) {
+        ObjectMarking::IsWhite(map)) {
       if (ShouldRetainMap(map, age)) {
         MarkGrey(heap(), map);
       }
       Object* prototype = map->prototype();
       if (age > 0 && prototype->IsHeapObject() &&
-          Marking::IsWhite(
-              ObjectMarking::MarkBitFrom(HeapObject::cast(prototype)))) {
+          ObjectMarking::IsWhite(HeapObject::cast(prototype))) {
         // The prototype is not marked, age the map.
         new_age = age - 1;
       } else {
@@ -736,7 +726,7 @@
   // 4) Remove weak cell with live values from the list of weak cells, they
   // do not need processing during GC.
   MarkRoots();
-  if (!heap_->UsingEmbedderHeapTracer()) {
+  if (!heap_->local_embedder_heap_tracer()->InUse()) {
     MarkObjectGroups();
   }
   if (incremental_marking_finalization_rounds_ == 0) {
@@ -750,7 +740,8 @@
       abs(old_marking_deque_top -
           heap_->mark_compact_collector()->marking_deque()->top());
 
-  marking_progress += static_cast<int>(heap_->wrappers_to_trace());
+  marking_progress += static_cast<int>(
+      heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
 
   double end = heap_->MonotonicallyIncreasingTimeInMs();
   double delta = end - start;
@@ -806,16 +797,12 @@
       // them.
       if (map_word.IsForwardingAddress()) {
         HeapObject* dest = map_word.ToForwardingAddress();
-        if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest->address())))
-          continue;
+        if (ObjectMarking::IsBlack(dest)) continue;
         array[new_top] = dest;
         new_top = ((new_top + 1) & mask);
         DCHECK(new_top != marking_deque->bottom());
-#ifdef DEBUG
-        MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
-        DCHECK(Marking::IsGrey(mark_bit) ||
-               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
-#endif
+        DCHECK(ObjectMarking::IsGrey(obj) ||
+               (obj->IsFiller() && ObjectMarking::IsWhite(obj)));
       }
     } else if (obj->map() != filler_map) {
       // Skip one word filler objects that appear on the
@@ -823,14 +810,11 @@
       array[new_top] = obj;
       new_top = ((new_top + 1) & mask);
       DCHECK(new_top != marking_deque->bottom());
-#ifdef DEBUG
-      MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
-      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-      DCHECK(Marking::IsGrey(mark_bit) ||
-             (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
-             (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
-              Marking::IsBlack(mark_bit)));
-#endif
+      DCHECK(ObjectMarking::IsGrey(obj) ||
+             (obj->IsFiller() && ObjectMarking::IsWhite(obj)) ||
+             (MemoryChunk::FromAddress(obj->address())
+                  ->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+              ObjectMarking::IsBlack(obj)));
     }
   }
   marking_deque->set_top(new_top);
@@ -854,17 +838,14 @@
 }
 
 void IncrementalMarking::MarkGrey(Heap* heap, HeapObject* object) {
-  MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
-  if (Marking::IsWhite(mark_bit)) {
-    heap->incremental_marking()->WhiteToGreyAndPush(object, mark_bit);
+  if (ObjectMarking::IsWhite(object)) {
+    heap->incremental_marking()->WhiteToGreyAndPush(object);
   }
 }
 
 void IncrementalMarking::MarkBlack(HeapObject* obj, int size) {
-  MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
-  if (Marking::IsBlack(mark_bit)) return;
-  Marking::GreyToBlack(mark_bit);
-  MemoryChunk::IncrementLiveBytesFromGC(obj, size);
+  if (ObjectMarking::IsBlack(obj)) return;
+  ObjectMarking::GreyToBlack(obj);
 }
 
 intptr_t IncrementalMarking::ProcessMarkingDeque(
@@ -879,8 +860,7 @@
     // Left trimming may result in white filler objects on the marking deque.
     // Ignore these objects.
     if (obj->IsFiller()) {
-      DCHECK(Marking::IsImpossible(ObjectMarking::MarkBitFrom(obj)) ||
-             Marking::IsWhite(ObjectMarking::MarkBitFrom(obj)));
+      DCHECK(ObjectMarking::IsImpossible(obj) || ObjectMarking::IsWhite(obj));
       continue;
     }
 
@@ -890,6 +870,11 @@
     VisitObject(map, obj, size);
     bytes_processed += size - unscanned_bytes_of_large_object_;
   }
+  // Report all found wrappers to the embedder. This is necessary as the
+  // embedder could potentially invalidate wrappers as soon as V8 is done
+  // with its incremental marking processing. Any cached wrappers could
+  // result in broken pointers at this point.
+  heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
   return bytes_processed;
 }
 
@@ -930,10 +915,8 @@
     HeapObject* cache = HeapObject::cast(
         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
     if (!cache->IsUndefined(heap_->isolate())) {
-      MarkBit mark_bit = ObjectMarking::MarkBitFrom(cache);
-      if (Marking::IsGrey(mark_bit)) {
-        Marking::GreyToBlack(mark_bit);
-        MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
+      if (ObjectMarking::IsGrey(cache)) {
+        ObjectMarking::GreyToBlack(cache);
       }
     }
     context = Context::cast(context)->next_context_link();
@@ -1026,15 +1009,40 @@
 double IncrementalMarking::AdvanceIncrementalMarking(
     double deadline_in_ms, CompletionAction completion_action,
     ForceCompletionAction force_completion, StepOrigin step_origin) {
+  HistogramTimerScope incremental_marking_scope(
+      heap_->isolate()->counters()->gc_incremental_marking());
+  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
   DCHECK(!IsStopped());
+  DCHECK_EQ(
+      0, heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
 
   double remaining_time_in_ms = 0.0;
   intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
       kStepSizeInMs,
       heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
 
+  const bool incremental_wrapper_tracing =
+      state_ == MARKING && FLAG_incremental_marking_wrappers &&
+      heap_->local_embedder_heap_tracer()->InUse();
   do {
-    Step(step_size_in_bytes, completion_action, force_completion, step_origin);
+    if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
+      TRACE_GC(heap()->tracer(),
+               GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+      const double wrapper_deadline =
+          heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
+      if (!heap_->local_embedder_heap_tracer()
+               ->ShouldFinalizeIncrementalMarking()) {
+        heap_->local_embedder_heap_tracer()->Trace(
+            wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
+                                  EmbedderHeapTracer::ForceCompletionAction::
+                                      DO_NOT_FORCE_COMPLETION));
+      }
+    } else {
+      Step(step_size_in_bytes, completion_action, force_completion,
+           step_origin);
+    }
+    trace_wrappers_toggle_ = !trace_wrappers_toggle_;
     remaining_time_in_ms =
         deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
   } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
@@ -1109,6 +1117,10 @@
       bytes_marked_ahead_of_schedule_ -= bytes_to_process;
       bytes_processed = bytes_to_process;
     } else {
+      HistogramTimerScope incremental_marking_scope(
+          heap_->isolate()->counters()->gc_incremental_marking());
+      TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+      TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
       bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
                              FORCE_COMPLETION, StepOrigin::kV8);
     }
@@ -1120,10 +1132,6 @@
                                 CompletionAction action,
                                 ForceCompletionAction completion,
                                 StepOrigin step_origin) {
-  HistogramTimerScope incremental_marking_scope(
-      heap_->isolate()->counters()->gc_incremental_marking());
-  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
-  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
   double start = heap_->MonotonicallyIncreasingTimeInMs();
 
   if (state_ == SWEEPING) {
@@ -1133,41 +1141,26 @@
 
   size_t bytes_processed = 0;
   if (state_ == MARKING) {
-    const bool incremental_wrapper_tracing =
-        FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
-    const bool process_wrappers =
-        incremental_wrapper_tracing &&
-        (heap_->RequiresImmediateWrapperProcessing() ||
-         heap_->mark_compact_collector()->marking_deque()->IsEmpty());
-    bool wrapper_work_left = incremental_wrapper_tracing;
-    if (!process_wrappers) {
-      bytes_processed = ProcessMarkingDeque(bytes_to_process);
-      if (step_origin == StepOrigin::kTask) {
-        bytes_marked_ahead_of_schedule_ += bytes_processed;
-      }
-    } else {
-      const double wrapper_deadline =
-          heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
-      TRACE_GC(heap()->tracer(),
-               GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
-      heap_->RegisterWrappersWithEmbedderHeapTracer();
-      wrapper_work_left = heap_->embedder_heap_tracer()->AdvanceTracing(
-          wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
-                                EmbedderHeapTracer::ForceCompletionAction::
-                                    DO_NOT_FORCE_COMPLETION));
+    bytes_processed = ProcessMarkingDeque(bytes_to_process);
+    if (step_origin == StepOrigin::kTask) {
+      bytes_marked_ahead_of_schedule_ += bytes_processed;
     }
 
-    if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
-        !wrapper_work_left) {
-      if (completion == FORCE_COMPLETION ||
-          IsIdleMarkingDelayCounterLimitReached()) {
-        if (!finalize_marking_completed_) {
-          FinalizeMarking(action);
+    if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
+      if (heap_->local_embedder_heap_tracer()
+              ->ShouldFinalizeIncrementalMarking()) {
+        if (completion == FORCE_COMPLETION ||
+            IsIdleMarkingDelayCounterLimitReached()) {
+          if (!finalize_marking_completed_) {
+            FinalizeMarking(action);
+          } else {
+            MarkingComplete(action);
+          }
         } else {
-          MarkingComplete(action);
+          IncrementIdleMarkingDelayCounter();
         }
       } else {
-        IncrementIdleMarkingDelayCounter();
+        heap_->local_embedder_heap_tracer()->NotifyV8MarkingDequeWasEmpty();
       }
     }
   }
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
index 7ce0ae2..37f1e5c 100644
--- a/src/heap/incremental-marking.h
+++ b/src/heap/incremental-marking.h
@@ -22,7 +22,7 @@
 
 enum class StepOrigin { kV8, kTask };
 
-class IncrementalMarking {
+class V8_EXPORT_PRIVATE IncrementalMarking {
  public:
   enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
 
@@ -151,14 +151,13 @@
   INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
                                      Code* value));
 
-  V8_EXPORT_PRIVATE void RecordWriteSlow(HeapObject* obj, Object** slot,
-                                         Object* value);
+  void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
   void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
   void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
   void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
   void RecordCodeTargetPatch(Address pc, HeapObject* value);
 
-  void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+  void WhiteToGreyAndPush(HeapObject* obj);
 
   inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
     SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
@@ -184,7 +183,7 @@
 
   static void MarkBlack(HeapObject* object, int size);
 
-  static void TransferMark(Heap* heap, Address old_start, Address new_start);
+  static void TransferMark(Heap* heap, HeapObject* from, HeapObject* to);
 
   // Returns true if the color transfer requires live bytes updating.
   INLINE(static bool TransferColor(HeapObject* from, HeapObject* to,
@@ -298,6 +297,7 @@
   bool was_activated_;
   bool black_allocation_;
   bool finalize_marking_completed_;
+  bool trace_wrappers_toggle_;
 
   GCRequestType request_type_;
 
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
index 784a76f..3104ea2 100644
--- a/src/heap/mark-compact-inl.h
+++ b/src/heap/mark-compact-inl.h
@@ -13,58 +13,34 @@
 namespace internal {
 
 void MarkCompactCollector::PushBlack(HeapObject* obj) {
-  DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
-  if (marking_deque()->Push(obj)) {
-    MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
-  } else {
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
-    Marking::BlackToGrey(mark_bit);
+  DCHECK(ObjectMarking::IsBlack(obj));
+  if (!marking_deque()->Push(obj)) {
+    ObjectMarking::BlackToGrey(obj);
   }
 }
 
 
 void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
-  DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
+  DCHECK(ObjectMarking::IsBlack(obj));
   if (!marking_deque()->Unshift(obj)) {
-    MemoryChunk::IncrementLiveBytesFromGC(obj, -obj->Size());
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
-    Marking::BlackToGrey(mark_bit);
+    ObjectMarking::BlackToGrey(obj);
   }
 }
 
-
-void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
-  DCHECK(ObjectMarking::MarkBitFrom(obj) == mark_bit);
-  if (Marking::IsWhite(mark_bit)) {
-    Marking::WhiteToBlack(mark_bit);
-    DCHECK(obj->GetIsolate()->heap()->Contains(obj));
+void MarkCompactCollector::MarkObject(HeapObject* obj) {
+  if (ObjectMarking::IsWhite(obj)) {
+    ObjectMarking::WhiteToBlack(obj);
     PushBlack(obj);
   }
 }
 
-
-void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
-  DCHECK(Marking::IsWhite(mark_bit));
-  DCHECK(ObjectMarking::MarkBitFrom(obj) == mark_bit);
-  Marking::WhiteToBlack(mark_bit);
-  MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
-}
-
-
-bool MarkCompactCollector::IsMarked(Object* obj) {
-  DCHECK(obj->IsHeapObject());
-  HeapObject* heap_object = HeapObject::cast(obj);
-  return Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(heap_object));
-}
-
-
 void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
                                       Object* target) {
   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(object));
   if (target_page->IsEvacuationCandidate() &&
       !ShouldSkipEvacuationSlotRecording(object)) {
-    DCHECK(Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object)));
+    DCHECK(ObjectMarking::IsBlackOrGrey(object));
     RememberedSet<OLD_TO_OLD>::Insert(source_page,
                                       reinterpret_cast<Address>(slot));
   }
@@ -195,12 +171,13 @@
           object = black_object;
         }
       } else if ((T == kGreyObjects || T == kAllLiveObjects)) {
+        map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
         object = HeapObject::FromAddress(addr);
       }
 
       // We found a live object.
       if (object != nullptr) {
-        if (map != nullptr && map == heap()->one_pointer_filler_map()) {
+        if (map == heap()->one_pointer_filler_map()) {
           // Black areas together with slack tracking may result in black one
           // word filler objects. We filter these objects out in the iterator.
           object = nullptr;
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index 88e6983..338d954 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -28,6 +28,7 @@
 #include "src/tracing/tracing-category-observer.h"
 #include "src/utils-inl.h"
 #include "src/v8.h"
+#include "src/v8threads.h"
 
 namespace v8 {
 namespace internal {
@@ -53,7 +54,6 @@
 #ifdef DEBUG
       state_(IDLE),
 #endif
-      marking_parity_(ODD_MARKING_PARITY),
       was_marked_incrementally_(false),
       evacuation_(false),
       compacting_(false),
@@ -67,13 +67,11 @@
 #ifdef VERIFY_HEAP
 class VerifyMarkingVisitor : public ObjectVisitor {
  public:
-  explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
-
   void VisitPointers(Object** start, Object** end) override {
     for (Object** current = start; current < end; current++) {
       if ((*current)->IsHeapObject()) {
         HeapObject* object = HeapObject::cast(*current);
-        CHECK(heap_->mark_compact_collector()->IsMarked(object));
+        CHECK(ObjectMarking::IsBlackOrGrey(object));
       }
     }
   }
@@ -93,20 +91,19 @@
       ObjectVisitor::VisitCell(rinfo);
     }
   }
-
- private:
-  Heap* heap_;
 };
 
 
 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
-  VerifyMarkingVisitor visitor(heap);
+  VerifyMarkingVisitor visitor;
   HeapObject* object;
   Address next_object_must_be_here_or_later = bottom;
   for (Address current = bottom; current < top;) {
     object = HeapObject::FromAddress(current);
-    if (MarkCompactCollector::IsMarked(object)) {
-      CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+    // One word fillers at the end of a black area can be grey.
+    if (ObjectMarking::IsBlackOrGrey(object) &&
+        object->map() != heap->one_pointer_filler_map()) {
+      CHECK(ObjectMarking::IsBlack(object));
       CHECK(current >= next_object_must_be_here_or_later);
       object->Iterate(&visitor);
       next_object_must_be_here_or_later = current + object->Size();
@@ -133,7 +130,7 @@
   // page->area_start() as start of range on all pages.
   CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
 
-  NewSpacePageRange range(space->bottom(), end);
+  PageRange range(space->bottom(), end);
   for (auto it = range.begin(); it != range.end();) {
     Page* page = *(it++);
     Address limit = it != range.end() ? page->area_end() : end;
@@ -156,11 +153,11 @@
   VerifyMarking(heap->map_space());
   VerifyMarking(heap->new_space());
 
-  VerifyMarkingVisitor visitor(heap);
+  VerifyMarkingVisitor visitor;
 
   LargeObjectIterator it(heap->lo_space());
   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    if (MarkCompactCollector::IsMarked(obj)) {
+    if (ObjectMarking::IsBlackOrGrey(obj)) {
       obj->Iterate(&visitor);
     }
   }
@@ -197,7 +194,7 @@
 
 static void VerifyEvacuation(NewSpace* space) {
   VerifyEvacuationVisitor visitor;
-  NewSpacePageRange range(space->bottom(), space->top());
+  PageRange range(space->bottom(), space->top());
   for (auto it = range.begin(); it != range.end();) {
     Page* page = *(it++);
     Address current = page->area_start();
@@ -322,7 +319,6 @@
   Finish();
 }
 
-
 #ifdef VERIFY_HEAP
 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
   for (Page* p : *space) {
@@ -333,7 +329,7 @@
 
 
 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
-  for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
+  for (Page* p : PageRange(space->bottom(), space->top())) {
     CHECK(p->markbits()->IsClean());
     CHECK_EQ(0, p->LiveBytes());
   }
@@ -348,13 +344,11 @@
 
   LargeObjectIterator it(heap_->lo_space());
   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
-    CHECK(Marking::IsWhite(mark_bit));
+    CHECK(ObjectMarking::IsWhite(obj));
     CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
   }
 }
 
-
 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
   HeapObjectIterator code_iterator(heap()->code_space());
   for (HeapObject* obj = code_iterator.Next(); obj != NULL;
@@ -399,7 +393,7 @@
 
   LargeObjectIterator it(heap_->lo_space());
   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    Marking::MarkWhite(ObjectMarking::MarkBitFrom(obj));
+    ObjectMarking::ClearMarkBit(obj);
     MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
     chunk->ResetProgressBar();
     chunk->ResetLiveBytes();
@@ -779,10 +773,8 @@
 
   DCHECK(!FLAG_never_compact || !FLAG_always_compact);
 
-  if (sweeping_in_progress()) {
-    // Instead of waiting we could also abort the sweeper threads here.
-    EnsureSweepingCompleted();
-  }
+  // Instead of waiting we could also abort the sweeper threads here.
+  EnsureSweepingCompleted();
 
   if (heap()->incremental_marking()->IsSweeping()) {
     heap()->incremental_marking()->Stop();
@@ -801,22 +793,14 @@
     AbortWeakCells();
     AbortTransitionArrays();
     AbortCompaction();
-    if (heap_->UsingEmbedderHeapTracer()) {
-      heap_->embedder_heap_tracer()->AbortTracing();
-    }
+    heap_->local_embedder_heap_tracer()->AbortTracing();
     marking_deque()->Clear();
     was_marked_incrementally_ = false;
   }
 
   if (!was_marked_incrementally_) {
-    if (heap_->UsingEmbedderHeapTracer()) {
-      TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
-      heap_->embedder_heap_tracer()->TracePrologue();
-    }
-  }
-
-  if (heap_->UsingEmbedderHeapTracer()) {
-    heap_->embedder_heap_tracer()->EnterFinalPause();
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
+    heap_->local_embedder_heap_tracer()->TracePrologue();
   }
 
   // Don't start compaction if we are in the middle of incremental
@@ -874,13 +858,6 @@
   }
 
   heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
-
-  if (marking_parity_ == EVEN_MARKING_PARITY) {
-    marking_parity_ = ODD_MARKING_PARITY;
-  } else {
-    DCHECK(marking_parity_ == ODD_MARKING_PARITY);
-    marking_parity_ = EVEN_MARKING_PARITY;
-  }
 }
 
 
@@ -914,6 +891,8 @@
 
 void CodeFlusher::ProcessJSFunctionCandidates() {
   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
+  Code* interpreter_entry_trampoline =
+      isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
   Object* undefined = isolate_->heap()->undefined_value();
 
   JSFunction* candidate = jsfunction_candidates_head_;
@@ -925,8 +904,7 @@
     SharedFunctionInfo* shared = candidate->shared();
 
     Code* code = shared->code();
-    MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
-    if (Marking::IsWhite(code_mark)) {
+    if (ObjectMarking::IsWhite(code)) {
       if (FLAG_trace_code_flushing && shared->is_compiled()) {
         PrintF("[code-flushing clears: ");
         shared->ShortPrint();
@@ -936,10 +914,15 @@
       if (!shared->OptimizedCodeMapIsCleared()) {
         shared->ClearOptimizedCodeMap();
       }
-      shared->set_code(lazy_compile);
-      candidate->set_code(lazy_compile);
+      if (shared->HasBytecodeArray()) {
+        shared->set_code(interpreter_entry_trampoline);
+        candidate->set_code(interpreter_entry_trampoline);
+      } else {
+        shared->set_code(lazy_compile);
+        candidate->set_code(lazy_compile);
+      }
     } else {
-      DCHECK(Marking::IsBlack(code_mark));
+      DCHECK(ObjectMarking::IsBlack(code));
       candidate->set_code(code);
     }
 
@@ -964,7 +947,8 @@
 
 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
-
+  Code* interpreter_entry_trampoline =
+      isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
   SharedFunctionInfo* next_candidate;
   while (candidate != NULL) {
@@ -972,8 +956,7 @@
     ClearNextCandidate(candidate);
 
     Code* code = candidate->code();
-    MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
-    if (Marking::IsWhite(code_mark)) {
+    if (ObjectMarking::IsWhite(code)) {
       if (FLAG_trace_code_flushing && candidate->is_compiled()) {
         PrintF("[code-flushing clears: ");
         candidate->ShortPrint();
@@ -983,7 +966,11 @@
       if (!candidate->OptimizedCodeMapIsCleared()) {
         candidate->ClearOptimizedCodeMap();
       }
-      candidate->set_code(lazy_compile);
+      if (candidate->HasBytecodeArray()) {
+        candidate->set_code(interpreter_entry_trampoline);
+      } else {
+        candidate->set_code(lazy_compile);
+      }
     }
 
     Object** code_slot =
@@ -1083,6 +1070,32 @@
   }
 }
 
+class StaticYoungGenerationMarkingVisitor
+    : public StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor> {
+ public:
+  static void Initialize(Heap* heap) {
+    StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor>::Initialize();
+  }
+
+  inline static void VisitPointer(Heap* heap, HeapObject* object, Object** p) {
+    Object* target = *p;
+    if (heap->InNewSpace(target)) {
+      if (MarkRecursively(heap, HeapObject::cast(target))) return;
+      heap->mark_compact_collector()->MarkObject(HeapObject::cast(target));
+    }
+  }
+
+ protected:
+  inline static bool MarkRecursively(Heap* heap, HeapObject* object) {
+    StackLimitCheck check(heap->isolate());
+    if (check.HasOverflowed()) return false;
+
+    if (ObjectMarking::IsBlackOrGrey(object)) return true;
+    ObjectMarking::WhiteToBlack(object);
+    IterateBody(object->map(), object);
+    return true;
+  }
+};
 
 class MarkCompactMarkingVisitor
     : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
@@ -1109,16 +1122,14 @@
 
   // Marks the object black and pushes it on the marking stack.
   INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
-    MarkBit mark = ObjectMarking::MarkBitFrom(object);
-    heap->mark_compact_collector()->MarkObject(object, mark);
+    heap->mark_compact_collector()->MarkObject(object);
   }
 
   // Marks the object black without pushing it on the marking stack.
   // Returns true if object needed marking and false otherwise.
   INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
-    if (Marking::IsWhite(mark_bit)) {
-      heap->mark_compact_collector()->SetMark(object, mark_bit);
+    if (ObjectMarking::IsWhite(object)) {
+      ObjectMarking::WhiteToBlack(object);
       return true;
     }
     return false;
@@ -1130,8 +1141,7 @@
     if (!(*p)->IsHeapObject()) return;
     HeapObject* target_object = HeapObject::cast(*p);
     collector->RecordSlot(object, p, target_object);
-    MarkBit mark = ObjectMarking::MarkBitFrom(target_object);
-    collector->MarkObject(target_object, mark);
+    collector->MarkObject(target_object);
   }
 
 
@@ -1140,15 +1150,13 @@
                                          HeapObject* obj)) {
 #ifdef DEBUG
     DCHECK(collector->heap()->Contains(obj));
-    DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
+    DCHECK(ObjectMarking::IsWhite(obj));
 #endif
     Map* map = obj->map();
     Heap* heap = obj->GetHeap();
-    MarkBit mark = ObjectMarking::MarkBitFrom(obj);
-    heap->mark_compact_collector()->SetMark(obj, mark);
+    ObjectMarking::WhiteToBlack(obj);
     // Mark the map pointer and the body.
-    MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
-    heap->mark_compact_collector()->MarkObject(map, map_mark);
+    heap->mark_compact_collector()->MarkObject(map);
     IterateBody(map, obj);
   }
 
@@ -1167,8 +1175,7 @@
       if (!o->IsHeapObject()) continue;
       collector->RecordSlot(object, p, o);
       HeapObject* obj = HeapObject::cast(o);
-      MarkBit mark = ObjectMarking::MarkBitFrom(obj);
-      if (Marking::IsBlackOrGrey(mark)) continue;
+      if (ObjectMarking::IsBlackOrGrey(obj)) continue;
       VisitUnmarkedObject(collector, obj);
     }
     return true;
@@ -1201,7 +1208,7 @@
       // was marked through the compilation cache before marker reached JSRegExp
       // object.
       FixedArray* data = FixedArray::cast(re->data());
-      if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(data))) {
+      if (ObjectMarking::IsBlackOrGrey(data)) {
         Object** slot =
             data->data_start() + JSRegExp::saved_code_index(is_one_byte);
         heap->mark_compact_collector()->RecordSlot(data, slot, code);
@@ -1285,10 +1292,8 @@
     Object* obj = *slot;
     if (obj->IsSharedFunctionInfo()) {
       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
-      MarkBit shared_mark = ObjectMarking::MarkBitFrom(shared);
-      MarkBit code_mark = ObjectMarking::MarkBitFrom(shared->code());
-      collector_->MarkObject(shared->code(), code_mark);
-      collector_->MarkObject(shared, shared_mark);
+      collector_->MarkObject(shared->code());
+      collector_->MarkObject(shared);
     }
   }
 
@@ -1306,12 +1311,10 @@
     // actual optimized code object.
     StackFrame* frame = it.frame();
     Code* code = frame->unchecked_code();
-    MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
-    MarkObject(code, code_mark);
+    MarkObject(code);
     if (frame->is_optimized()) {
       Code* optimized_code = frame->LookupCode();
-      MarkBit optimized_code_mark = ObjectMarking::MarkBitFrom(optimized_code);
-      MarkObject(optimized_code, optimized_code_mark);
+      MarkObject(optimized_code);
     }
   }
 }
@@ -1336,11 +1339,12 @@
   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
 
-  ProcessMarkingDeque();
+  ProcessMarkingDeque<MarkCompactMode::FULL>();
 }
 
 
 // Visitor class for marking heap roots.
+template <MarkCompactMode mode>
 class RootMarkingVisitor : public ObjectVisitor {
  public:
   explicit RootMarkingVisitor(Heap* heap)
@@ -1362,21 +1366,30 @@
 
     HeapObject* object = HeapObject::cast(*p);
 
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
-    if (Marking::IsBlackOrGrey(mark_bit)) return;
+    if (mode == MarkCompactMode::YOUNG_GENERATION &&
+        !collector_->heap()->InNewSpace(object))
+      return;
+
+    if (ObjectMarking::IsBlackOrGrey(object)) return;
 
     Map* map = object->map();
     // Mark the object.
-    collector_->SetMark(object, mark_bit);
+    ObjectMarking::WhiteToBlack(object);
 
-    // Mark the map pointer and body, and push them on the marking stack.
-    MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
-    collector_->MarkObject(map, map_mark);
-    MarkCompactMarkingVisitor::IterateBody(map, object);
+    switch (mode) {
+      case MarkCompactMode::FULL: {
+        // Mark the map pointer and body, and push them on the marking stack.
+        collector_->MarkObject(map);
+        MarkCompactMarkingVisitor::IterateBody(map, object);
+      } break;
+      case MarkCompactMode::YOUNG_GENERATION:
+        StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
+        break;
+    }
 
     // Mark all the objects reachable from the map and body.  May leave
     // overflowed objects in the heap.
-    collector_->EmptyMarkingDeque();
+    collector_->EmptyMarkingDeque<mode>();
   }
 
   MarkCompactCollector* collector_;
@@ -1398,10 +1411,14 @@
     for (Object** p = start; p < end; p++) {
       Object* o = *p;
       if (o->IsHeapObject()) {
-        if (Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(o)))) {
+        if (ObjectMarking::IsWhite(HeapObject::cast(o))) {
           if (finalize_external_strings) {
-            DCHECK(o->IsExternalString());
-            heap_->FinalizeExternalString(String::cast(*p));
+            if (o->IsExternalString()) {
+              heap_->FinalizeExternalString(String::cast(*p));
+            } else {
+              // The original external string may have been internalized.
+              DCHECK(o->IsThinString());
+            }
           } else {
             pointers_removed_++;
           }
@@ -1435,9 +1452,8 @@
 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
  public:
   virtual Object* RetainAs(Object* object) {
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(object));
-    DCHECK(!Marking::IsGrey(mark_bit));
-    if (Marking::IsBlack(mark_bit)) {
+    DCHECK(!ObjectMarking::IsGrey(HeapObject::cast(object)));
+    if (ObjectMarking::IsBlack(HeapObject::cast(object))) {
       return object;
     } else if (object->IsAllocationSite() &&
                !(AllocationSite::cast(object)->IsZombie())) {
@@ -1445,7 +1461,7 @@
       // space. These sites get a one-time reprieve.
       AllocationSite* site = AllocationSite::cast(object);
       site->MarkZombie();
-      site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
+      ObjectMarking::WhiteToBlack(site);
       return object;
     } else {
       return NULL;
@@ -1465,9 +1481,8 @@
 
   Map* filler_map = heap()->one_pointer_filler_map();
   for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
-    MarkBit markbit = ObjectMarking::MarkBitFrom(object);
-    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
-      Marking::GreyToBlack(markbit);
+    if ((object->map() != filler_map) && ObjectMarking::IsGrey(object)) {
+      ObjectMarking::GreyToBlack(object);
       PushBlack(object);
       if (marking_deque()->IsFull()) return;
     }
@@ -1479,9 +1494,8 @@
   LiveObjectIterator<kGreyObjects> it(p);
   HeapObject* object = NULL;
   while ((object = it.Next()) != NULL) {
-    MarkBit markbit = ObjectMarking::MarkBitFrom(object);
-    DCHECK(Marking::IsGrey(markbit));
-    Marking::GreyToBlack(markbit);
+    DCHECK(ObjectMarking::IsGrey(object));
+    ObjectMarking::GreyToBlack(object);
     PushBlack(object);
     if (marking_deque()->IsFull()) return;
   }
@@ -1921,7 +1935,7 @@
 
 void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
   NewSpace* space = heap()->new_space();
-  for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
+  for (Page* page : PageRange(space->bottom(), space->top())) {
     DiscoverGreyObjectsOnPage(page);
     if (marking_deque()->IsFull()) return;
   }
@@ -1931,9 +1945,7 @@
 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
   Object* o = *p;
   if (!o->IsHeapObject()) return false;
-  HeapObject* heap_object = HeapObject::cast(o);
-  MarkBit mark = ObjectMarking::MarkBitFrom(heap_object);
-  return Marking::IsWhite(mark);
+  return ObjectMarking::IsWhite(HeapObject::cast(o));
 }
 
 
@@ -1941,33 +1953,24 @@
                                                         Object** p) {
   Object* o = *p;
   DCHECK(o->IsHeapObject());
-  HeapObject* heap_object = HeapObject::cast(o);
-  MarkBit mark = ObjectMarking::MarkBitFrom(heap_object);
-  return Marking::IsWhite(mark);
+  return ObjectMarking::IsWhite(HeapObject::cast(o));
 }
 
-
-void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
+void MarkCompactCollector::MarkStringTable(
+    RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
   StringTable* string_table = heap()->string_table();
   // Mark the string table itself.
-  MarkBit string_table_mark = ObjectMarking::MarkBitFrom(string_table);
-  if (Marking::IsWhite(string_table_mark)) {
+  if (ObjectMarking::IsWhite(string_table)) {
     // String table could have already been marked by visiting the handles list.
-    SetMark(string_table, string_table_mark);
+    ObjectMarking::WhiteToBlack(string_table);
   }
   // Explicitly mark the prefix.
   string_table->IteratePrefix(visitor);
-  ProcessMarkingDeque();
+  ProcessMarkingDeque<MarkCompactMode::FULL>();
 }
 
-
-void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
-  MarkBit mark_bit = ObjectMarking::MarkBitFrom(site);
-  SetMark(site, mark_bit);
-}
-
-
-void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
+void MarkCompactCollector::MarkRoots(
+    RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
   // Mark the heap roots including global variables, stack variables,
   // etc., and all objects reachable from them.
   heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
@@ -1977,8 +1980,8 @@
 
   // There may be overflowed objects in the heap.  Visit them now.
   while (marking_deque()->overflowed()) {
-    RefillMarkingDeque();
-    EmptyMarkingDeque();
+    RefillMarkingDeque<MarkCompactMode::FULL>();
+    EmptyMarkingDeque<MarkCompactMode::FULL>();
   }
 }
 
@@ -1993,7 +1996,7 @@
     ImplicitRefGroup* entry = ref_groups->at(i);
     DCHECK(entry != NULL);
 
-    if (!IsMarked(*entry->parent)) {
+    if (ObjectMarking::IsWhite(*entry->parent)) {
       (*ref_groups)[last++] = entry;
       continue;
     }
@@ -2018,6 +2021,7 @@
 // Before: the marking stack contains zero or more heap object pointers.
 // After: the marking stack is empty, and all objects reachable from the
 // marking stack have been marked, or are overflowed in the heap.
+template <MarkCompactMode mode>
 void MarkCompactCollector::EmptyMarkingDeque() {
   while (!marking_deque()->IsEmpty()) {
     HeapObject* object = marking_deque()->Pop();
@@ -2025,13 +2029,19 @@
     DCHECK(!object->IsFiller());
     DCHECK(object->IsHeapObject());
     DCHECK(heap()->Contains(object));
-    DCHECK(!Marking::IsWhite(ObjectMarking::MarkBitFrom(object)));
+    DCHECK(!ObjectMarking::IsWhite(object));
 
     Map* map = object->map();
-    MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
-    MarkObject(map, map_mark);
-
-    MarkCompactMarkingVisitor::IterateBody(map, object);
+    switch (mode) {
+      case MarkCompactMode::FULL: {
+        MarkObject(map);
+        MarkCompactMarkingVisitor::IterateBody(map, object);
+      } break;
+      case MarkCompactMode::YOUNG_GENERATION: {
+        DCHECK(ObjectMarking::IsBlack(object));
+        StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
+      } break;
+    }
   }
 }
 
@@ -2041,6 +2051,7 @@
 // before sweeping completes.  If sweeping completes, there are no remaining
 // overflowed objects in the heap so the overflow flag on the markings stack
 // is cleared.
+template <MarkCompactMode mode>
 void MarkCompactCollector::RefillMarkingDeque() {
   isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
   DCHECK(marking_deque()->overflowed());
@@ -2048,18 +2059,17 @@
   DiscoverGreyObjectsInNewSpace();
   if (marking_deque()->IsFull()) return;
 
-  DiscoverGreyObjectsInSpace(heap()->old_space());
-  if (marking_deque()->IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap()->code_space());
-  if (marking_deque()->IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap()->map_space());
-  if (marking_deque()->IsFull()) return;
-
-  LargeObjectIterator lo_it(heap()->lo_space());
-  DiscoverGreyObjectsWithIterator(&lo_it);
-  if (marking_deque()->IsFull()) return;
+  if (mode == MarkCompactMode::FULL) {
+    DiscoverGreyObjectsInSpace(heap()->old_space());
+    if (marking_deque()->IsFull()) return;
+    DiscoverGreyObjectsInSpace(heap()->code_space());
+    if (marking_deque()->IsFull()) return;
+    DiscoverGreyObjectsInSpace(heap()->map_space());
+    if (marking_deque()->IsFull()) return;
+    LargeObjectIterator lo_it(heap()->lo_space());
+    DiscoverGreyObjectsWithIterator(&lo_it);
+    if (marking_deque()->IsFull()) return;
+  }
 
   marking_deque()->ClearOverflowed();
 }
@@ -2069,12 +2079,14 @@
 // stack.  Before: the marking stack contains zero or more heap object
 // pointers.  After: the marking stack is empty and there are no overflowed
 // objects in the heap.
+template <MarkCompactMode mode>
 void MarkCompactCollector::ProcessMarkingDeque() {
-  EmptyMarkingDeque();
+  EmptyMarkingDeque<mode>();
   while (marking_deque()->overflowed()) {
-    RefillMarkingDeque();
-    EmptyMarkingDeque();
+    RefillMarkingDeque<mode>();
+    EmptyMarkingDeque<mode>();
   }
+  DCHECK(marking_deque()->IsEmpty());
 }
 
 // Mark all objects reachable (transitively) from objects on the marking
@@ -2084,23 +2096,33 @@
   DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
   bool work_to_do = true;
   while (work_to_do) {
-    if (heap_->UsingEmbedderHeapTracer()) {
-      TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
-      heap_->RegisterWrappersWithEmbedderHeapTracer();
-      heap_->embedder_heap_tracer()->AdvanceTracing(
-          0, EmbedderHeapTracer::AdvanceTracingActions(
-                 EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
-    }
     if (!only_process_harmony_weak_collections) {
-      TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING);
-      isolate()->global_handles()->IterateObjectGroups(
-          visitor, &IsUnmarkedHeapObjectWithHeap);
-      MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
+      if (heap_->local_embedder_heap_tracer()->InUse()) {
+        TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
+        heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+        heap_->local_embedder_heap_tracer()->Trace(
+            0,
+            EmbedderHeapTracer::AdvanceTracingActions(
+                EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+      } else {
+        TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING);
+        isolate()->global_handles()->IterateObjectGroups(
+            visitor, &IsUnmarkedHeapObjectWithHeap);
+        MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
+      }
+    } else {
+      // TODO(mlippautz): We currently do not trace through blink when
+      // discovering new objects reachable from weak roots (that have been made
+      // strong). This is a limitation of not having a separate handle type
+      // that doesn't require zapping before this phase. See crbug.com/668060.
+      heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
     }
     ProcessWeakCollections();
     work_to_do = !marking_deque()->IsEmpty();
-    ProcessMarkingDeque();
+    ProcessMarkingDeque<MarkCompactMode::FULL>();
   }
+  CHECK(marking_deque()->IsEmpty());
+  CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
 }
 
 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
@@ -2114,7 +2136,7 @@
       if (!code->CanDeoptAt(it.frame()->pc())) {
         Code::BodyDescriptor::IterateBody(code, visitor);
       }
-      ProcessMarkingDeque();
+      ProcessMarkingDeque<MarkCompactMode::FULL>();
       return;
     }
   }
@@ -2154,6 +2176,7 @@
 
 void MarkingDeque::StopUsing() {
   base::LockGuard<base::Mutex> guard(&mutex_);
+  if (!in_use_) return;
   DCHECK(IsEmpty());
   DCHECK(!overflowed_);
   top_ = bottom_ = mask_ = 0;
@@ -2216,10 +2239,10 @@
   }
 
   bool Visit(HeapObject* obj) override {
-    if (Marking::IsBlack(ObjectMarking::MarkBitFrom(obj))) {
+    if (ObjectMarking::IsBlack(obj)) {
       live_collector_.CollectStatistics(obj);
     } else {
-      DCHECK(!Marking::IsGrey(ObjectMarking::MarkBitFrom(obj)));
+      DCHECK(!ObjectMarking::IsGrey(obj));
       dead_collector_.CollectStatistics(obj);
     }
     return true;
@@ -2267,6 +2290,93 @@
   }
 }
 
+SlotCallbackResult MarkCompactCollector::CheckAndMarkObject(
+    Heap* heap, Address slot_address) {
+  Object* object = *reinterpret_cast<Object**>(slot_address);
+  if (heap->InNewSpace(object)) {
+    // Marking happens before flipping the young generation, so the object
+    // has to be in ToSpace.
+    DCHECK(heap->InToSpace(object));
+    HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+    if (ObjectMarking::IsBlackOrGrey(heap_object)) {
+      return KEEP_SLOT;
+    }
+    ObjectMarking::WhiteToBlack(heap_object);
+    StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(),
+                                                     heap_object);
+    return KEEP_SLOT;
+  }
+  return REMOVE_SLOT;
+}
+
+static bool IsUnmarkedObject(Heap* heap, Object** p) {
+  DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
+  return heap->InNewSpace(*p) && !ObjectMarking::IsBlack(HeapObject::cast(*p));
+}
+
+void MarkCompactCollector::MarkLiveObjectsInYoungGeneration() {
+  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
+
+  PostponeInterruptsScope postpone(isolate());
+
+  StaticYoungGenerationMarkingVisitor::Initialize(heap());
+  RootMarkingVisitor<MarkCompactMode::YOUNG_GENERATION> root_visitor(heap());
+
+  marking_deque()->StartUsing();
+
+  isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+      &Heap::IsUnmodifiedHeapObject);
+
+  {
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
+    heap()->IterateRoots(&root_visitor, VISIT_ALL_IN_SCAVENGE);
+    ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+  }
+
+  {
+    TRACE_GC(heap()->tracer(),
+             GCTracer::Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS);
+    RememberedSet<OLD_TO_NEW>::Iterate(heap(), [this](Address addr) {
+      return CheckAndMarkObject(heap(), addr);
+    });
+    RememberedSet<OLD_TO_NEW>::IterateTyped(
+        heap(), [this](SlotType type, Address host_addr, Address addr) {
+          return UpdateTypedSlotHelper::UpdateTypedSlot(
+              isolate(), type, addr, [this](Object** addr) {
+                return CheckAndMarkObject(heap(),
+                                          reinterpret_cast<Address>(addr));
+              });
+        });
+    ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+  }
+
+  {
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
+    heap()->VisitEncounteredWeakCollections(&root_visitor);
+    ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+  }
+
+  if (is_code_flushing_enabled()) {
+    TRACE_GC(heap()->tracer(),
+             GCTracer::Scope::MINOR_MC_MARK_CODE_FLUSH_CANDIDATES);
+    code_flusher()->IteratePointersToFromSpace(&root_visitor);
+    ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+  }
+
+  {
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
+    isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+        &IsUnmarkedObject);
+    isolate()
+        ->global_handles()
+        ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>(
+            &root_visitor);
+    ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+  }
+
+  marking_deque()->StopUsing();
+}
+
 void MarkCompactCollector::MarkLiveObjects() {
   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
   // The recursive GC marker detects when it is nearing stack overflow,
@@ -2291,12 +2401,14 @@
 
   marking_deque()->StartUsing();
 
+  heap_->local_embedder_heap_tracer()->EnterFinalPause();
+
   {
     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
     PrepareForCodeFlushing();
   }
 
-  RootMarkingVisitor root_visitor(heap());
+  RootMarkingVisitor<MarkCompactMode::FULL> root_visitor(heap());
 
   {
     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
@@ -2328,7 +2440,7 @@
                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
       heap()->isolate()->global_handles()->IdentifyWeakHandles(
           &IsUnmarkedHeapObject);
-      ProcessMarkingDeque();
+      ProcessMarkingDeque<MarkCompactMode::FULL>();
     }
     // Then we mark the objects.
 
@@ -2336,7 +2448,7 @@
       TRACE_GC(heap()->tracer(),
                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
       heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
-      ProcessMarkingDeque();
+      ProcessMarkingDeque<MarkCompactMode::FULL>();
     }
 
     // Repeat Harmony weak maps marking to mark unmarked objects reachable from
@@ -2347,9 +2459,9 @@
     {
       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
       ProcessEphemeralMarking(&root_visitor, true);
-      if (heap_->UsingEmbedderHeapTracer()) {
+      {
         TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
-        heap()->embedder_heap_tracer()->TraceEpilogue();
+        heap()->local_embedder_heap_tracer()->TraceEpilogue();
       }
     }
   }
@@ -2371,8 +2483,8 @@
     string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
 
     ExternalStringTableCleaner external_visitor(heap(), nullptr);
-    heap()->external_string_table_.Iterate(&external_visitor);
-    heap()->external_string_table_.CleanUp();
+    heap()->external_string_table_.IterateAll(&external_visitor);
+    heap()->external_string_table_.CleanUpAll();
   }
 
   {
@@ -2481,11 +2593,11 @@
   while (weak_cell_obj != Smi::kZero) {
     WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
     Map* map = Map::cast(weak_cell->value());
-    DCHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(map)));
+    DCHECK(ObjectMarking::IsWhite(map));
     Object* potential_parent = map->constructor_or_backpointer();
     if (potential_parent->IsMap()) {
       Map* parent = Map::cast(potential_parent);
-      if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent)) &&
+      if (ObjectMarking::IsBlackOrGrey(parent) &&
           parent->raw_transitions() == weak_cell) {
         ClearSimpleMapTransition(parent, map);
       }
@@ -2524,8 +2636,7 @@
     if (num_transitions > 0) {
       Map* map = array->GetTarget(0);
       Map* parent = Map::cast(map->constructor_or_backpointer());
-      bool parent_is_alive =
-          Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent));
+      bool parent_is_alive = ObjectMarking::IsBlackOrGrey(parent);
       DescriptorArray* descriptors =
           parent_is_alive ? parent->instance_descriptors() : nullptr;
       bool descriptors_owner_died =
@@ -2550,7 +2661,7 @@
   for (int i = 0; i < num_transitions; ++i) {
     Map* target = transitions->GetTarget(i);
     DCHECK_EQ(target->constructor_or_backpointer(), map);
-    if (Marking::IsWhite(ObjectMarking::MarkBitFrom(target))) {
+    if (ObjectMarking::IsWhite(target)) {
       if (descriptors != nullptr &&
           target->instance_descriptors() == descriptors) {
         descriptors_owner_died = true;
@@ -2578,8 +2689,8 @@
   // array disappeared during GC.
   int trim = TransitionArray::Capacity(transitions) - transition_index;
   if (trim > 0) {
-    heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
-        transitions, trim * TransitionArray::kTransitionSize);
+    heap_->RightTrimFixedArray(transitions,
+                               trim * TransitionArray::kTransitionSize);
     transitions->SetNumberOfTransitions(transition_index);
   }
   return descriptors_owner_died;
@@ -2597,8 +2708,8 @@
   int number_of_descriptors = descriptors->number_of_descriptors_storage();
   int to_trim = number_of_descriptors - number_of_own_descriptors;
   if (to_trim > 0) {
-    heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
-        descriptors, to_trim * DescriptorArray::kDescriptorSize);
+    heap_->RightTrimFixedArray(descriptors,
+                               to_trim * DescriptorArray::kEntrySize);
     descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
 
     if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
@@ -2629,13 +2740,11 @@
 
   int to_trim = enum_cache->length() - live_enum;
   if (to_trim <= 0) return;
-  heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
-      descriptors->GetEnumCache(), to_trim);
+  heap_->RightTrimFixedArray(descriptors->GetEnumCache(), to_trim);
 
   if (!descriptors->HasEnumIndicesCache()) return;
   FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
-  heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
-                                                          to_trim);
+  heap_->RightTrimFixedArray(enum_indices_cache, to_trim);
 }
 
 
@@ -2644,11 +2753,11 @@
   while (weak_collection_obj != Smi::kZero) {
     JSWeakCollection* weak_collection =
         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
-    DCHECK(MarkCompactCollector::IsMarked(weak_collection));
+    DCHECK(ObjectMarking::IsBlackOrGrey(weak_collection));
     if (weak_collection->table()->IsHashTable()) {
       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
       for (int i = 0; i < table->Capacity(); i++) {
-        if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+        if (ObjectMarking::IsBlackOrGrey(HeapObject::cast(table->KeyAt(i)))) {
           Object** key_slot =
               table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
           RecordSlot(table, key_slot, *key_slot);
@@ -2670,12 +2779,12 @@
   while (weak_collection_obj != Smi::kZero) {
     JSWeakCollection* weak_collection =
         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
-    DCHECK(MarkCompactCollector::IsMarked(weak_collection));
+    DCHECK(ObjectMarking::IsBlackOrGrey(weak_collection));
     if (weak_collection->table()->IsHashTable()) {
       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
       for (int i = 0; i < table->Capacity(); i++) {
         HeapObject* key = HeapObject::cast(table->KeyAt(i));
-        if (!MarkCompactCollector::IsMarked(key)) {
+        if (!ObjectMarking::IsBlackOrGrey(key)) {
           table->RemoveEntry(i);
         }
       }
@@ -2716,7 +2825,7 @@
     // We do not insert cleared weak cells into the list, so the value
     // cannot be a Smi here.
     HeapObject* value = HeapObject::cast(weak_cell->value());
-    if (!MarkCompactCollector::IsMarked(value)) {
+    if (!ObjectMarking::IsBlackOrGrey(value)) {
       // Cells for new-space objects embedded in optimized code are wrapped in
       // WeakCell and put into Heap::weak_object_to_code_table.
       // Such cells do not have any strong references but we want to keep them
@@ -2725,10 +2834,9 @@
       if (value->IsCell()) {
         Object* cell_value = Cell::cast(value)->value();
         if (cell_value->IsHeapObject() &&
-            MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
+            ObjectMarking::IsBlackOrGrey(HeapObject::cast(cell_value))) {
           // Resurrect the cell.
-          MarkBit mark = ObjectMarking::MarkBitFrom(value);
-          SetMark(value, mark);
+          ObjectMarking::WhiteToBlack(value);
           Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
           RecordSlot(value, slot, *slot);
           slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
@@ -2887,14 +2995,26 @@
   return String::cast(*p);
 }
 
-void MarkCompactCollector::EvacuateNewSpacePrologue() {
+void MarkCompactCollector::EvacuatePrologue() {
+  // New space.
   NewSpace* new_space = heap()->new_space();
   // Append the list of new space pages to be processed.
-  for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
-    newspace_evacuation_candidates_.Add(p);
+  for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
+    new_space_evacuation_pages_.Add(p);
   }
   new_space->Flip();
   new_space->ResetAllocationInfo();
+
+  // Old space.
+  CHECK(old_space_evacuation_pages_.is_empty());
+  old_space_evacuation_pages_.Swap(&evacuation_candidates_);
+}
+
+void MarkCompactCollector::EvacuateEpilogue() {
+  // New space.
+  heap()->new_space()->set_age_mark(heap()->new_space()->top());
+  // Old space. Deallocate evacuated candidate pages.
+  ReleaseEvacuationCandidates();
 }
 
 class MarkCompactCollector::Evacuator : public Malloced {
@@ -3148,18 +3268,19 @@
 
   int abandoned_pages = 0;
   intptr_t live_bytes = 0;
-  for (Page* page : evacuation_candidates_) {
+  for (Page* page : old_space_evacuation_pages_) {
     live_bytes += page->LiveBytes();
     job.AddPage(page, &abandoned_pages);
   }
 
   const bool reduce_memory = heap()->ShouldReduceMemory();
   const Address age_mark = heap()->new_space()->age_mark();
-  for (Page* page : newspace_evacuation_candidates_) {
+  for (Page* page : new_space_evacuation_pages_) {
     live_bytes += page->LiveBytes();
     if (!reduce_memory && !page->NeverEvacuate() &&
         (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
-        !page->Contains(age_mark)) {
+        !page->Contains(age_mark) &&
+        heap()->CanExpandOldGeneration(page->LiveBytes())) {
       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
       } else {
@@ -3270,7 +3391,7 @@
   HeapObject* object = NULL;
 
   while ((object = it.Next()) != NULL) {
-    DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+    DCHECK(ObjectMarking::IsBlack(object));
     Address free_end = object->address();
     if (free_end != free_start) {
       CHECK_GT(free_end, free_start);
@@ -3360,8 +3481,7 @@
     DCHECK(compacting_);
 
     // If the object is white than no slots were recorded on it yet.
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(code);
-    if (Marking::IsWhite(mark_bit)) return;
+    if (ObjectMarking::IsWhite(code)) return;
 
     // Ignore all slots that might have been recorded in the body of the
     // deoptimized code object. Assumption: no slots will be recorded for
@@ -3382,11 +3502,16 @@
   LiveObjectIterator<kAllLiveObjects> it(page);
   HeapObject* object = NULL;
   while ((object = it.Next()) != NULL) {
-    CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+    CHECK(ObjectMarking::IsBlack(object));
   }
 }
 #endif  // VERIFY_HEAP
 
+void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
+  EvacuateRecordOnlyVisitor visitor(heap());
+  VisitLiveObjects(page, &visitor, kKeepMarking);
+}
+
 template <class Visitor>
 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
                                             IterationMode mode) {
@@ -3397,7 +3522,7 @@
   LiveObjectIterator<kBlackObjects> it(page);
   HeapObject* object = nullptr;
   while ((object = it.Next()) != nullptr) {
-    DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+    DCHECK(ObjectMarking::IsBlack(object));
     if (!visitor->Visit(object)) {
       if (mode == kClearMarkbits) {
         page->markbits()->ClearRange(
@@ -3445,18 +3570,23 @@
   Heap::RelocationLock relocation_lock(heap());
 
   {
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
+    EvacuatePrologue();
+  }
+
+  {
     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
     EvacuationScope evacuation_scope(this);
-
-    EvacuateNewSpacePrologue();
     EvacuatePagesInParallel();
-    heap()->new_space()->set_age_mark(heap()->new_space()->top());
   }
 
   UpdatePointersAfterEvacuation();
 
-  if (!heap()->new_space()->Rebalance()) {
-    FatalProcessOutOfMemory("NewSpace::Rebalance");
+  {
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
+    if (!heap()->new_space()->Rebalance()) {
+      FatalProcessOutOfMemory("NewSpace::Rebalance");
+    }
   }
 
   // Give pages that are queued to be freed back to the OS. Note that filtering
@@ -3468,7 +3598,7 @@
   {
     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
 
-    for (Page* p : newspace_evacuation_candidates_) {
+    for (Page* p : new_space_evacuation_pages_) {
       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
         sweeper().AddPage(p->owner()->identity(), p);
@@ -3479,9 +3609,9 @@
         sweeper().AddPage(p->owner()->identity(), p);
       }
     }
-    newspace_evacuation_candidates_.Rewind(0);
+    new_space_evacuation_pages_.Rewind(0);
 
-    for (Page* p : evacuation_candidates_) {
+    for (Page* p : old_space_evacuation_pages_) {
       // Important: skip list should be cleared only after roots were updated
       // because root iteration traverses the stack and might have to find
       // code objects from non-updated pc pointing into evacuation candidate.
@@ -3492,9 +3622,11 @@
         p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
       }
     }
+  }
 
-    // Deallocate evacuated candidate pages.
-    ReleaseEvacuationCandidates();
+  {
+    TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
+    EvacuateEpilogue();
   }
 
 #ifdef VERIFY_HEAP
@@ -3523,7 +3655,7 @@
  private:
   static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
     if (direction == OLD_TO_NEW) {
-      RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) {
+      RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
         return CheckAndUpdateOldToNewSlot(heap, slot);
       });
     } else {
@@ -3594,8 +3726,7 @@
       // slot has been recorded multiple times in the remembered set. Since
       // there is no forwarding information present we need to check the
       // markbits to determine liveness.
-      if (Marking::IsBlack(ObjectMarking::MarkBitFrom(
-              reinterpret_cast<HeapObject*>(slot_reference))))
+      if (ObjectMarking::IsBlack(reinterpret_cast<HeapObject*>(slot_reference)))
         return KEEP_SLOT;
     } else {
       DCHECK(!heap->InNewSpace(slot_reference));
@@ -3676,7 +3807,7 @@
       heap, heap->isolate()->cancelable_task_manager(), semaphore);
   Address space_start = heap->new_space()->bottom();
   Address space_end = heap->new_space()->top();
-  for (Page* page : NewSpacePageRange(space_start, space_end)) {
+  for (Page* page : PageRange(space_start, space_end)) {
     Address start =
         page->Contains(space_start) ? space_start : page->area_start();
     Address end = page->Contains(space_end) ? space_end : page->area_end();
@@ -3722,14 +3853,14 @@
 
 
 void MarkCompactCollector::ReleaseEvacuationCandidates() {
-  for (Page* p : evacuation_candidates_) {
+  for (Page* p : old_space_evacuation_pages_) {
     if (!p->IsEvacuationCandidate()) continue;
     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
     p->ResetLiveBytes();
     CHECK(p->SweepingDone());
     space->ReleasePage(p);
   }
-  evacuation_candidates_.Rewind(0);
+  old_space_evacuation_pages_.Rewind(0);
   compacting_ = false;
   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
 }
@@ -3929,8 +4060,7 @@
     Code* host =
         isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
             pc);
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(host);
-    if (Marking::IsBlack(mark_bit)) {
+    if (ObjectMarking::IsBlack(host)) {
       RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
       // The target is always in old space, we don't have to record the slot in
       // the old-to-new remembered set.
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index de18207..86d0b96 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -17,6 +17,8 @@
 namespace v8 {
 namespace internal {
 
+enum class MarkCompactMode { FULL, YOUNG_GENERATION };
+
 // Callback function, returns whether an object is alive. The heap size
 // of the object is returned in size. It optionally updates the offset
 // to the first live object in the page (only used for old and map objects).
@@ -29,23 +31,91 @@
 class CodeFlusher;
 class MarkCompactCollector;
 class MarkingVisitor;
+template <MarkCompactMode mode>
 class RootMarkingVisitor;
 
 class ObjectMarking : public AllStatic {
  public:
-  INLINE(static MarkBit MarkBitFrom(Address addr)) {
-    MemoryChunk* p = MemoryChunk::FromAddress(addr);
-    return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
-  }
-
-  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
-    return MarkBitFrom(reinterpret_cast<Address>(obj));
+  V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj) {
+    const Address address = obj->address();
+    MemoryChunk* p = MemoryChunk::FromAddress(address);
+    return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(address));
   }
 
   static Marking::ObjectColor Color(HeapObject* obj) {
     return Marking::Color(ObjectMarking::MarkBitFrom(obj));
   }
 
+  V8_INLINE static bool IsImpossible(HeapObject* obj) {
+    return Marking::IsImpossible(MarkBitFrom(obj));
+  }
+
+  V8_INLINE static bool IsBlack(HeapObject* obj) {
+    return Marking::IsBlack(MarkBitFrom(obj));
+  }
+
+  V8_INLINE static bool IsWhite(HeapObject* obj) {
+    return Marking::IsWhite(MarkBitFrom(obj));
+  }
+
+  V8_INLINE static bool IsGrey(HeapObject* obj) {
+    return Marking::IsGrey(MarkBitFrom(obj));
+  }
+
+  V8_INLINE static bool IsBlackOrGrey(HeapObject* obj) {
+    return Marking::IsBlackOrGrey(MarkBitFrom(obj));
+  }
+
+  V8_INLINE static void ClearMarkBit(HeapObject* obj) {
+    Marking::MarkWhite(MarkBitFrom(obj));
+  }
+
+  V8_INLINE static void BlackToWhite(HeapObject* obj) {
+    DCHECK(IsBlack(obj));
+    MarkBit markbit = MarkBitFrom(obj);
+    Marking::BlackToWhite(markbit);
+    MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
+  }
+
+  V8_INLINE static void GreyToWhite(HeapObject* obj) {
+    DCHECK(IsGrey(obj));
+    Marking::GreyToWhite(MarkBitFrom(obj));
+  }
+
+  V8_INLINE static void BlackToGrey(HeapObject* obj) {
+    DCHECK(IsBlack(obj));
+    MarkBit markbit = MarkBitFrom(obj);
+    Marking::BlackToGrey(markbit);
+    MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
+  }
+
+  V8_INLINE static void WhiteToGrey(HeapObject* obj) {
+    DCHECK(IsWhite(obj));
+    Marking::WhiteToGrey(MarkBitFrom(obj));
+  }
+
+  V8_INLINE static void WhiteToBlack(HeapObject* obj) {
+    DCHECK(IsWhite(obj));
+    MarkBit markbit = MarkBitFrom(obj);
+    Marking::WhiteToBlack(markbit);
+    MemoryChunk::IncrementLiveBytes(obj, obj->Size());
+  }
+
+  V8_INLINE static void GreyToBlack(HeapObject* obj) {
+    DCHECK(IsGrey(obj));
+    MarkBit markbit = MarkBitFrom(obj);
+    Marking::GreyToBlack(markbit);
+    MemoryChunk::IncrementLiveBytes(obj, obj->Size());
+  }
+
+  V8_INLINE static void AnyToGrey(HeapObject* obj) {
+    MarkBit markbit = MarkBitFrom(obj);
+    if (Marking::IsBlack(markbit)) {
+      MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
+    }
+    Marking::AnyToGrey(markbit);
+  }
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
 };
@@ -416,6 +486,9 @@
 
   static void Initialize();
 
+  static SlotCallbackResult CheckAndMarkObject(Heap* heap,
+                                               Address slot_address);
+
   void SetUp();
 
   void TearDown();
@@ -435,12 +508,6 @@
 
   void AbortCompaction();
 
-#ifdef DEBUG
-  // Checks whether performing mark-compact collection.
-  bool in_use() { return state_ > PREPARE_GC; }
-  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
-#endif
-
   // Determine type of object and emit deletion log event.
   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
 
@@ -449,7 +516,6 @@
   static const uint32_t kSingleFreeEncoding = 0;
   static const uint32_t kMultiFreeEncoding = 1;
 
-  static inline bool IsMarked(Object* obj);
   static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
 
   inline Heap* heap() const { return heap_; }
@@ -458,15 +524,6 @@
   CodeFlusher* code_flusher() { return code_flusher_; }
   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
 
-#ifdef VERIFY_HEAP
-  void VerifyValidStoreAndSlotsBufferEntries();
-  void VerifyMarkbitsAreClean();
-  static void VerifyMarkbitsAreClean(PagedSpace* space);
-  static void VerifyMarkbitsAreClean(NewSpace* space);
-  void VerifyWeakEmbeddedObjectsInCode();
-  void VerifyOmittedMapChecks();
-#endif
-
   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
     return Page::FromAddress(reinterpret_cast<Address>(host))
         ->ShouldSkipEvacuationSlotRecording();
@@ -483,6 +540,7 @@
   INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
   INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
                               Object* target));
+  void RecordLiveSlotsOnPage(Page* page);
 
   void UpdateSlots(SlotsBuffer* buffer);
   void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
@@ -493,8 +551,6 @@
 
   bool is_compacting() const { return compacting_; }
 
-  MarkingParity marking_parity() { return marking_parity_; }
-
   // Ensures that sweeping is finished.
   //
   // Note: Can only be called safely from main thread.
@@ -513,10 +569,6 @@
 
   bool evacuation() const { return evacuation_; }
 
-  // Special case for processing weak references in a full collection. We need
-  // to artificially keep AllocationSites alive for a time.
-  void MarkAllocationSite(AllocationSite* site);
-
   // Mark objects in implicit references groups if their parent object
   // is marked.
   void MarkImplicitRefGroups(MarkObjectFunction mark_object);
@@ -525,6 +577,21 @@
 
   Sweeper& sweeper() { return sweeper_; }
 
+#ifdef DEBUG
+  // Checks whether performing mark-compact collection.
+  bool in_use() { return state_ > PREPARE_GC; }
+  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
+#endif
+
+#ifdef VERIFY_HEAP
+  void VerifyValidStoreAndSlotsBufferEntries();
+  void VerifyMarkbitsAreClean();
+  static void VerifyMarkbitsAreClean(PagedSpace* space);
+  static void VerifyMarkbitsAreClean(NewSpace* space);
+  void VerifyWeakEmbeddedObjectsInCode();
+  void VerifyOmittedMapChecks();
+#endif
+
  private:
   template <PageEvacuationMode mode>
   class EvacuateNewSpacePageVisitor;
@@ -564,8 +631,10 @@
   friend class MarkCompactMarkingVisitor;
   friend class MarkingVisitor;
   friend class RecordMigratedSlotVisitor;
+  template <MarkCompactMode mode>
   friend class RootMarkingVisitor;
   friend class SharedFunctionInfoMarkingVisitor;
+  friend class StaticYoungGenerationMarkingVisitor;
 
   // Mark code objects that are active on the stack to prevent them
   // from being flushed.
@@ -575,6 +644,8 @@
 
   // Marking operations for objects reachable from roots.
   void MarkLiveObjects();
+  // Mark the young generation.
+  void MarkLiveObjectsInYoungGeneration();
 
   // Pushes a black object onto the marking stack and accounts for live bytes.
   // Note that this assumes live bytes have not yet been counted.
@@ -586,21 +657,18 @@
 
   // Marks the object black and pushes it on the marking stack.
   // This is for non-incremental marking only.
-  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
-
-  // Marks the object black assuming that it is not yet marked.
-  // This is for non-incremental marking only.
-  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+  INLINE(void MarkObject(HeapObject* obj));
 
   // Mark the heap roots and all objects reachable from them.
-  void MarkRoots(RootMarkingVisitor* visitor);
+  void MarkRoots(RootMarkingVisitor<MarkCompactMode::FULL>* visitor);
 
   // Mark the string table specially.  References to internalized strings from
   // the string table are weak.
-  void MarkStringTable(RootMarkingVisitor* visitor);
+  void MarkStringTable(RootMarkingVisitor<MarkCompactMode::FULL>* visitor);
 
   // Mark objects reachable (transitively) from objects in the marking stack
   // or overflowed in the heap.
+  template <MarkCompactMode mode>
   void ProcessMarkingDeque();
 
   // Mark objects reachable (transitively) from objects in the marking stack
@@ -624,11 +692,13 @@
   // stack.  This function empties the marking stack, but may leave
   // overflowed objects in the heap, in which case the marking stack's
   // overflow flag will be set.
+  template <MarkCompactMode mode>
   void EmptyMarkingDeque();
 
   // Refill the marking stack with overflowed objects from the heap.  This
   // function either leaves the marking stack full or clears the overflow
   // flag on the marking stack.
+  template <MarkCompactMode mode>
   void RefillMarkingDeque();
 
   // Helper methods for refilling the marking stack by discovering grey objects
@@ -684,8 +754,8 @@
   void StartSweepSpaces();
   void StartSweepSpace(PagedSpace* space);
 
-  void EvacuateNewSpacePrologue();
-
+  void EvacuatePrologue();
+  void EvacuateEpilogue();
   void EvacuatePagesInParallel();
 
   // The number of parallel compaction tasks, including the main thread.
@@ -733,8 +803,6 @@
   CollectorState state_;
 #endif
 
-  MarkingParity marking_parity_;
-
   bool was_marked_incrementally_;
 
   bool evacuation_;
@@ -751,8 +819,11 @@
 
   CodeFlusher* code_flusher_;
 
+  // Candidates for pages that should be evacuated.
   List<Page*> evacuation_candidates_;
-  List<Page*> newspace_evacuation_candidates_;
+  // Pages that are actually processed during evacuation.
+  List<Page*> old_space_evacuation_pages_;
+  List<Page*> new_space_evacuation_pages_;
 
   Sweeper sweeper_;
 
diff --git a/src/heap/memory-reducer.cc b/src/heap/memory-reducer.cc
index 2aed4c7..3645547 100644
--- a/src/heap/memory-reducer.cc
+++ b/src/heap/memory-reducer.cc
@@ -17,6 +17,8 @@
 const int MemoryReducer::kShortDelayMs = 500;
 const int MemoryReducer::kWatchdogDelayMs = 100000;
 const int MemoryReducer::kMaxNumberOfGCs = 3;
+const double MemoryReducer::kCommittedMemoryFactor = 1.1;
+const size_t MemoryReducer::kCommittedMemoryDelta = 10 * MB;
 
 MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
     : CancelableTask(memory_reducer->heap()->isolate()),
@@ -47,6 +49,7 @@
   event.can_start_incremental_gc =
       heap->incremental_marking()->IsStopped() &&
       (heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
+  event.committed_memory = heap->CommittedOldGenerationMemory();
   memory_reducer_->NotifyTimer(event);
 }
 
@@ -128,17 +131,30 @@
 MemoryReducer::State MemoryReducer::Step(const State& state,
                                          const Event& event) {
   if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
-    return State(kDone, 0, 0, state.last_gc_time_ms);
+    return State(kDone, 0, 0, state.last_gc_time_ms, 0);
   }
   switch (state.action) {
     case kDone:
       if (event.type == kTimer) {
         return state;
+      } else if (event.type == kMarkCompact) {
+        if (event.committed_memory <
+            Max(static_cast<size_t>(state.committed_memory_at_last_run *
+                                    kCommittedMemoryFactor),
+                state.committed_memory_at_last_run + kCommittedMemoryDelta)) {
+          return state;
+        } else {
+          return State(kWait, 0, event.time_ms + kLongDelayMs,
+                       event.type == kMarkCompact ? event.time_ms
+                                                  : state.last_gc_time_ms,
+                       0);
+        }
       } else {
-        DCHECK(event.type == kPossibleGarbage || event.type == kMarkCompact);
+        DCHECK_EQ(kPossibleGarbage, event.type);
         return State(
             kWait, 0, event.time_ms + kLongDelayMs,
-            event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
+            event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms,
+            0);
       }
     case kWait:
       switch (event.type) {
@@ -146,23 +162,24 @@
           return state;
         case kTimer:
           if (state.started_gcs >= kMaxNumberOfGCs) {
-            return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
+            return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms,
+                         event.committed_memory);
           } else if (event.can_start_incremental_gc &&
                      (event.should_start_incremental_gc ||
                       WatchdogGC(state, event))) {
             if (state.next_gc_start_ms <= event.time_ms) {
               return State(kRun, state.started_gcs + 1, 0.0,
-                           state.last_gc_time_ms);
+                           state.last_gc_time_ms, 0);
             } else {
               return state;
             }
           } else {
             return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
-                         state.last_gc_time_ms);
+                         state.last_gc_time_ms, 0);
           }
         case kMarkCompact:
           return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
-                       event.time_ms);
+                       event.time_ms, 0);
       }
     case kRun:
       if (event.type != kMarkCompact) {
@@ -171,14 +188,15 @@
         if (state.started_gcs < kMaxNumberOfGCs &&
             (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
           return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
-                       event.time_ms);
+                       event.time_ms, 0);
         } else {
-          return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms);
+          return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms,
+                       event.committed_memory);
         }
       }
   }
   UNREACHABLE();
-  return State(kDone, 0, 0, 0.0);  // Make the compiler happy.
+  return State(kDone, 0, 0, 0.0, 0);  // Make the compiler happy.
 }
 
 
@@ -192,7 +210,7 @@
       isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
 }
 
-void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
+void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/memory-reducer.h b/src/heap/memory-reducer.h
index 0421987..0f0ad6e 100644
--- a/src/heap/memory-reducer.h
+++ b/src/heap/memory-reducer.h
@@ -86,15 +86,17 @@
 
   struct State {
     State(Action action, int started_gcs, double next_gc_start_ms,
-          double last_gc_time_ms)
+          double last_gc_time_ms, size_t committed_memory_at_last_run)
         : action(action),
           started_gcs(started_gcs),
           next_gc_start_ms(next_gc_start_ms),
-          last_gc_time_ms(last_gc_time_ms) {}
+          last_gc_time_ms(last_gc_time_ms),
+          committed_memory_at_last_run(committed_memory_at_last_run) {}
     Action action;
     int started_gcs;
     double next_gc_start_ms;
     double last_gc_time_ms;
+    size_t committed_memory_at_last_run;
   };
 
   enum EventType { kTimer, kMarkCompact, kPossibleGarbage };
@@ -102,6 +104,7 @@
   struct Event {
     EventType type;
     double time_ms;
+    size_t committed_memory;
     bool next_gc_likely_to_collect_more;
     bool should_start_incremental_gc;
     bool can_start_incremental_gc;
@@ -109,7 +112,7 @@
 
   explicit MemoryReducer(Heap* heap)
       : heap_(heap),
-        state_(kDone, 0, 0.0, 0.0),
+        state_(kDone, 0, 0.0, 0.0, 0),
         js_calls_counter_(0),
         js_calls_sample_time_ms_(0.0) {}
   // Callbacks.
@@ -126,6 +129,12 @@
   static const int kShortDelayMs;
   static const int kWatchdogDelayMs;
   static const int kMaxNumberOfGCs;
+  // The committed memory has to increase by at least this factor since the
+  // last run in order to trigger a new run after mark-compact.
+  static const double kCommittedMemoryFactor;
+  // The committed memory has to increase by at least this amount since the
+  // last run in order to trigger a new run after mark-compact.
+  static const size_t kCommittedMemoryDelta;
 
   Heap* heap() { return heap_; }
 
diff --git a/src/heap/object-stats.cc b/src/heap/object-stats.cc
index ef5f657..50d6fcc 100644
--- a/src/heap/object-stats.cc
+++ b/src/heap/object-stats.cc
@@ -4,6 +4,7 @@
 
 #include "src/heap/object-stats.h"
 
+#include "src/assembler-inl.h"
 #include "src/compilation-cache.h"
 #include "src/counters.h"
 #include "src/heap/heap-inl.h"
@@ -330,7 +331,6 @@
          array->map() != heap->fixed_double_array_map() &&
          array != heap->empty_fixed_array() &&
          array != heap->empty_byte_array() &&
-         array != heap->empty_literals_array() &&
          array != heap->empty_sloppy_arguments_elements() &&
          array != heap->empty_slow_element_dictionary() &&
          array != heap->empty_descriptor_array() &&
@@ -441,10 +441,8 @@
 }
 
 void ObjectStatsCollector::RecordScriptDetails(Script* obj) {
-  Object* infos = WeakFixedArray::cast(obj->shared_function_infos());
-  if (infos->IsWeakFixedArray())
-    RecordFixedArrayHelper(obj, WeakFixedArray::cast(infos),
-                           SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
+  FixedArray* infos = FixedArray::cast(obj->shared_function_infos());
+  RecordFixedArrayHelper(obj, infos, SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
 }
 
 void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
@@ -542,17 +540,10 @@
     SharedFunctionInfo* sfi) {
   FixedArray* scope_info = sfi->scope_info();
   RecordFixedArrayHelper(sfi, scope_info, SCOPE_INFO_SUB_TYPE, 0);
-  TypeFeedbackMetadata* feedback_metadata = sfi->feedback_metadata();
+  FeedbackMetadata* feedback_metadata = sfi->feedback_metadata();
   if (!feedback_metadata->is_empty()) {
-    RecordFixedArrayHelper(sfi, feedback_metadata,
-                           TYPE_FEEDBACK_METADATA_SUB_TYPE, 0);
-    Object* names =
-        feedback_metadata->get(TypeFeedbackMetadata::kNamesTableIndex);
-    if (!names->IsSmi()) {
-      UnseededNumberDictionary* names = UnseededNumberDictionary::cast(
-          feedback_metadata->get(TypeFeedbackMetadata::kNamesTableIndex));
-      RecordHashTableHelper(sfi, names, TYPE_FEEDBACK_METADATA_SUB_TYPE);
-    }
+    RecordFixedArrayHelper(sfi, feedback_metadata, FEEDBACK_METADATA_SUB_TYPE,
+                           0);
   }
 
   if (!sfi->OptimizedCodeMapIsCleared()) {
@@ -560,34 +551,15 @@
     RecordFixedArrayHelper(sfi, optimized_code_map, OPTIMIZED_CODE_MAP_SUB_TYPE,
                            0);
     // Optimized code map should be small, so skip accounting.
-    int len = optimized_code_map->length();
-    for (int i = SharedFunctionInfo::kEntriesStart; i < len;
-         i += SharedFunctionInfo::kEntryLength) {
-      Object* slot =
-          optimized_code_map->get(i + SharedFunctionInfo::kLiteralsOffset);
-      LiteralsArray* literals = nullptr;
-      if (slot->IsWeakCell()) {
-        WeakCell* cell = WeakCell::cast(slot);
-        if (!cell->cleared()) {
-          literals = LiteralsArray::cast(cell->value());
-        }
-      } else {
-        literals = LiteralsArray::cast(slot);
-      }
-      if (literals != nullptr) {
-        RecordFixedArrayHelper(sfi, literals, LITERALS_ARRAY_SUB_TYPE, 0);
-        RecordFixedArrayHelper(sfi, literals->feedback_vector(),
-                               TYPE_FEEDBACK_VECTOR_SUB_TYPE, 0);
-      }
-    }
   }
 }
 
 void ObjectStatsCollector::RecordJSFunctionDetails(JSFunction* function) {
-  LiteralsArray* literals = function->literals();
-  RecordFixedArrayHelper(function, literals, LITERALS_ARRAY_SUB_TYPE, 0);
-  RecordFixedArrayHelper(function, literals->feedback_vector(),
-                         TYPE_FEEDBACK_VECTOR_SUB_TYPE, 0);
+  if (function->feedback_vector_cell()->value()->IsFeedbackVector()) {
+    FeedbackVector* feedback_vector = function->feedback_vector();
+    RecordFixedArrayHelper(function, feedback_vector, FEEDBACK_VECTOR_SUB_TYPE,
+                           0);
+  }
 }
 
 void ObjectStatsCollector::RecordFixedArrayDetails(FixedArray* array) {
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
index f350256..493dce7 100644
--- a/src/heap/objects-visiting-inl.h
+++ b/src/heap/objects-visiting-inl.h
@@ -6,6 +6,7 @@
 #define V8_OBJECTS_VISITING_INL_H_
 
 #include "src/heap/array-buffer-tracker.h"
+#include "src/heap/mark-compact.h"
 #include "src/heap/objects-visiting.h"
 #include "src/ic/ic-state.h"
 #include "src/macro-assembler.h"
@@ -31,6 +32,10 @@
       kVisitConsString,
       &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
 
+  table_.Register(
+      kVisitThinString,
+      &FixedBodyVisitor<StaticVisitor, ThinString::BodyDescriptor, int>::Visit);
+
   table_.Register(kVisitSlicedString,
                   &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
                                     int>::Visit);
@@ -60,7 +65,6 @@
                         int>::Visit);
 
   table_.Register(kVisitByteArray, &VisitByteArray);
-  table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
 
   table_.Register(
       kVisitSharedFunctionInfo,
@@ -103,19 +107,11 @@
 
   table_.template RegisterSpecializations<StructVisitor, kVisitStruct,
                                           kVisitStructGeneric>();
-}
 
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
-    Map* map, HeapObject* object) {
-  VisitPointers(
-      map->GetHeap(), object,
-      HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
-      HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
-  return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
+  table_.Register(kVisitBytecodeArray, &UnreachableVisitor);
+  table_.Register(kVisitSharedFunctionInfo, &UnreachableVisitor);
 }
 
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::Initialize() {
   table_.Register(kVisitShortcutCandidate,
@@ -126,6 +122,10 @@
                   &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
                                     void>::Visit);
 
+  table_.Register(kVisitThinString,
+                  &FixedBodyVisitor<StaticVisitor, ThinString::BodyDescriptor,
+                                    void>::Visit);
+
   table_.Register(kVisitSlicedString,
                   &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
                                     void>::Visit);
@@ -157,10 +157,7 @@
 
   table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
 
-  table_.Register(
-      kVisitBytecodeArray,
-      &FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
-                        void>::Visit);
+  table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
 
   table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
 
@@ -271,22 +268,11 @@
                                                           RelocInfo* rinfo) {
   DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
   Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-  // Monomorphic ICs are preserved when possible, but need to be flushed
-  // when they might be keeping a Context alive, or when the heap is about
-  // to be serialized.
-  if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
-      (heap->isolate()->serializer_enabled() ||
-       target->ic_age() != heap->global_ic_age())) {
-    ICUtility::Clear(heap->isolate(), rinfo->pc(),
-                     rinfo->host()->constant_pool());
-    target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-  }
   Code* host = rinfo->host();
   heap->mark_compact_collector()->RecordRelocSlot(host, rinfo, target);
   StaticVisitor::MarkObject(heap, target);
 }
 
-
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
     Heap* heap, RelocInfo* rinfo) {
@@ -298,6 +284,13 @@
   StaticVisitor::MarkObject(heap, target);
 }
 
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
+    Map* map, HeapObject* object) {
+  FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
+                   void>::Visit(map, object);
+  BytecodeArray::cast(object)->MakeOlder();
+}
 
 template <typename StaticVisitor>
 void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
@@ -340,7 +333,7 @@
   // contain smi zero.
   if (weak_cell->next_cleared() && !weak_cell->cleared()) {
     HeapObject* value = HeapObject::cast(weak_cell->value());
-    if (MarkCompactCollector::IsMarked(value)) {
+    if (ObjectMarking::IsBlackOrGrey(value)) {
       // Weak cells with live values are directly processed here to reduce
       // the processing time of weak cells during the main GC pause.
       Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
@@ -421,7 +414,7 @@
   Heap* heap = map->GetHeap();
   Code* code = Code::cast(object);
   if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
-    code->MakeOlder(heap->mark_compact_collector()->marking_parity());
+    code->MakeOlder();
   }
   CodeBodyVisitor::Visit(map, object);
 }
@@ -435,12 +428,6 @@
   if (shared->ic_age() != heap->global_ic_age()) {
     shared->ResetForNewContext(heap->global_ic_age());
   }
-  if (FLAG_flush_optimized_code_cache) {
-    if (!shared->OptimizedCodeMapIsCleared()) {
-      // Always flush the optimized code map if requested by flag.
-      shared->ClearOptimizedCodeMap();
-    }
-  }
   MarkCompactCollector* collector = heap->mark_compact_collector();
   if (collector->is_code_flushing_enabled()) {
     if (IsFlushable(heap, shared)) {
@@ -465,9 +452,6 @@
                                                           HeapObject* object) {
   Heap* heap = map->GetHeap();
   JSFunction* function = JSFunction::cast(object);
-  if (FLAG_cleanup_code_caches_at_gc) {
-    function->ClearTypeFeedbackInfoAtGCTime();
-  }
   MarkCompactCollector* collector = heap->mark_compact_collector();
   if (collector->is_code_flushing_enabled()) {
     if (IsFlushable(heap, function)) {
@@ -538,8 +522,7 @@
 
   // Code is either on stack, in compilation cache or referenced
   // by optimized version of function.
-  MarkBit code_mark = ObjectMarking::MarkBitFrom(function->code());
-  if (Marking::IsBlackOrGrey(code_mark)) {
+  if (ObjectMarking::IsBlackOrGrey(function->code())) {
     return false;
   }
 
@@ -562,8 +545,7 @@
     Heap* heap, SharedFunctionInfo* shared_info) {
   // Code is either on stack, in compilation cache or referenced
   // by optimized version of function.
-  MarkBit code_mark = ObjectMarking::MarkBitFrom(shared_info->code());
-  if (Marking::IsBlackOrGrey(code_mark)) {
+  if (ObjectMarking::IsBlackOrGrey(shared_info->code())) {
     return false;
   }
 
@@ -600,8 +582,8 @@
     return false;
   }
 
-  // The function must not be a builtin.
-  if (shared_info->IsBuiltin()) {
+  // The function must be user code.
+  if (!shared_info->IsUserJavaScript()) {
     return false;
   }
 
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
index d4aa8b2..a0df1f5 100644
--- a/src/heap/objects-visiting.cc
+++ b/src/heap/objects-visiting.cc
@@ -4,6 +4,7 @@
 
 #include "src/heap/objects-visiting.h"
 
+#include "src/heap/heap-inl.h"
 #include "src/heap/mark-compact-inl.h"
 #include "src/heap/objects-visiting-inl.h"
 
@@ -41,6 +42,9 @@
       case kExternalStringTag:
         return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
                                    instance_size, has_unboxed_fields);
+
+      case kThinStringTag:
+        return kVisitThinString;
     }
     UNREACHABLE();
   }
@@ -105,9 +109,9 @@
     case JS_OBJECT_TYPE:
     case JS_ERROR_TYPE:
     case JS_ARGUMENTS_TYPE:
+    case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
-    case JS_FIXED_ARRAY_ITERATOR_TYPE:
     case JS_MODULE_NAMESPACE_TYPE:
     case JS_VALUE_TYPE:
     case JS_DATE_TYPE:
@@ -159,6 +163,7 @@
     case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
     case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
 
+    case JS_PROMISE_CAPABILITY_TYPE:
     case JS_PROMISE_TYPE:
     case JS_BOUND_FUNCTION_TYPE:
       return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
@@ -177,7 +182,6 @@
     case FOREIGN_TYPE:
     case HEAP_NUMBER_TYPE:
     case MUTABLE_HEAP_NUMBER_TYPE:
-    case SIMD128_VALUE_TYPE:
       return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
                                  instance_size, has_unboxed_fields);
 
diff --git a/src/heap/objects-visiting.h b/src/heap/objects-visiting.h
index 633c277..f10f370 100644
--- a/src/heap/objects-visiting.h
+++ b/src/heap/objects-visiting.h
@@ -6,6 +6,7 @@
 #define V8_OBJECTS_VISITING_H_
 
 #include "src/allocation.h"
+#include "src/heap/embedder-tracing.h"
 #include "src/heap/heap.h"
 #include "src/heap/spaces.h"
 #include "src/layout-descriptor.h"
@@ -78,6 +79,7 @@
   V(StructGeneric)         \
   V(ConsString)            \
   V(SlicedString)          \
+  V(ThinString)            \
   V(Symbol)                \
   V(Oddball)               \
   V(Code)                  \
@@ -267,12 +269,17 @@
   // Although we are using the JSFunction body descriptor which does not
   // visit the code entry, compiler wants it to be accessible.
   // See JSFunction::BodyDescriptorImpl.
-  INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
-                                    Address entry_address)) {
+  inline static void VisitCodeEntry(Heap* heap, HeapObject* object,
+                                    Address entry_address) {
     UNREACHABLE();
   }
 
  private:
+  inline static int UnreachableVisitor(Map* map, HeapObject* object) {
+    UNREACHABLE();
+    return 0;
+  }
+
   INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
     return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
   }
@@ -300,8 +307,6 @@
     return FreeSpace::cast(object)->size();
   }
 
-  INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
-
   class DataObjectVisitor {
    public:
     template <int object_size>
@@ -372,6 +377,7 @@
  protected:
   INLINE(static void VisitMap(Map* map, HeapObject* object));
   INLINE(static void VisitCode(Map* map, HeapObject* object));
+  INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
   INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
   INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
   INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
@@ -420,7 +426,7 @@
 
    private:
     INLINE(static void TracePossibleWrapper(HeapObject* object)) {
-      if (object->GetHeap()->UsingEmbedderHeapTracer()) {
+      if (object->GetHeap()->local_embedder_heap_tracer()->InUse()) {
         DCHECK(object->IsJSObject());
         object->GetHeap()->TracePossibleWrapper(JSObject::cast(object));
       }
diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h
index a625b13..cf17a46 100644
--- a/src/heap/remembered-set.h
+++ b/src/heap/remembered-set.h
@@ -17,7 +17,7 @@
 
 // TODO(ulan): Investigate performance of de-templatizing this class.
 template <PointerDirection direction>
-class RememberedSet {
+class RememberedSet : public AllStatic {
  public:
   // Given a page and a slot in that page, this function adds the slot to the
   // remembered set.
@@ -31,6 +31,19 @@
     slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
   }
 
+  // Given a page and a slot in that page, this function returns true if
+  // the remembered set contains the slot.
+  static bool Contains(MemoryChunk* chunk, Address slot_addr) {
+    DCHECK(chunk->Contains(slot_addr));
+    SlotSet* slot_set = GetSlotSet(chunk);
+    if (slot_set == nullptr) {
+      return false;
+    }
+    uintptr_t offset = slot_addr - chunk->address();
+    return slot_set[offset / Page::kPageSize].Contains(offset %
+                                                       Page::kPageSize);
+  }
+
   // Given a page and a slot in that page, this function removes the slot from
   // the remembered set.
   // If the slot was never added, then the function does nothing.
diff --git a/src/heap/scavenger.cc b/src/heap/scavenger.cc
index cad0e8a..c4c3e8b 100644
--- a/src/heap/scavenger.cc
+++ b/src/heap/scavenger.cc
@@ -5,11 +5,13 @@
 #include "src/heap/scavenger.h"
 
 #include "src/contexts.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/incremental-marking.h"
 #include "src/heap/objects-visiting-inl.h"
 #include "src/heap/scavenger-inl.h"
 #include "src/isolate.h"
 #include "src/log.h"
+#include "src/profiler/heap-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -30,6 +32,7 @@
     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
+    table_.Register(kVisitThinString, &EvacuateThinString);
     table_.Register(kVisitByteArray, &EvacuateByteArray);
     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
@@ -89,6 +92,12 @@
     return &table_;
   }
 
+  static void EvacuateThinStringNoShortcut(Map* map, HeapObject** slot,
+                                           HeapObject* object) {
+    EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+                                                 ThinString::kSize);
+  }
+
  private:
   enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
 
@@ -139,7 +148,7 @@
 
     if (marks_handling == TRANSFER_MARKS) {
       if (IncrementalMarking::TransferColor(source, target, size)) {
-        MemoryChunk::IncrementLiveBytesFromGC(target, size);
+        MemoryChunk::IncrementLiveBytes(target, size);
       }
     }
   }
@@ -193,9 +202,8 @@
                                    reinterpret_cast<base::AtomicWord>(target));
 
       if (object_contents == POINTER_OBJECT) {
-        heap->promotion_queue()->insert(
-            target, object_size,
-            Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+        heap->promotion_queue()->insert(target, object_size,
+                                        ObjectMarking::IsBlack(object));
       }
       heap->IncrementPromotedObjectsSize(object_size);
       return true;
@@ -239,8 +247,7 @@
     DCHECK(map_word.IsForwardingAddress());
     HeapObject* target = map_word.ToForwardingAddress();
 
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(target);
-    if (Marking::IsBlack(mark_bit)) {
+    if (ObjectMarking::IsBlack(target)) {
       // This object is black and it might not be rescanned by marker.
       // We should explicitly record code entry slot for compaction because
       // promotion queue processing (IteratePromotedObjectPointers) will
@@ -339,6 +346,22 @@
                                                  object_size);
   }
 
+  static inline void EvacuateThinString(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+    if (marks_handling == IGNORE_MARKS) {
+      HeapObject* actual = ThinString::cast(object)->actual();
+      *slot = actual;
+      // ThinStrings always refer to internalized strings, which are
+      // always in old space.
+      DCHECK(!map->GetHeap()->InNewSpace(actual));
+      object->set_map_word(MapWord::FromForwardingAddress(actual));
+      return;
+    }
+
+    EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+                                                 ThinString::kSize);
+  }
+
   template <ObjectContents object_contents>
   class ObjectEvacuationStrategy {
    public:
@@ -423,6 +446,10 @@
           StaticVisitorBase::kVisitShortcutCandidate,
           scavenging_visitors_table_.GetVisitorById(
               StaticVisitorBase::kVisitConsString));
+      scavenging_visitors_table_.Register(
+          StaticVisitorBase::kVisitThinString,
+          &ScavengingVisitor<TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::
+              EvacuateThinStringNoShortcut);
     }
   }
 }
diff --git a/src/heap/slot-set.h b/src/heap/slot-set.h
index da61052..7612199 100644
--- a/src/heap/slot-set.h
+++ b/src/heap/slot-set.h
@@ -66,6 +66,18 @@
   }
 
   // The slot offset specifies a slot at address page_start_ + slot_offset.
+  // Returns true if the set contains the slot.
+  bool Contains(int slot_offset) {
+    int bucket_index, cell_index, bit_index;
+    SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+    base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
+    if (current_bucket == nullptr) {
+      return false;
+    }
+    return (current_bucket[cell_index].Value() & (1u << bit_index)) != 0;
+  }
+
+  // The slot offset specifies a slot at address page_start_ + slot_offset.
   void Remove(int slot_offset) {
     int bucket_index, cell_index, bit_index;
     SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index f3f9215..62d1b62 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -28,10 +28,14 @@
   return tmp;
 }
 
-NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
-    : range_(Page::FromAddress(start),
-             Page::FromAllocationAreaAddress(limit)->next_page()) {
-  SemiSpace::AssertValidRange(start, limit);
+PageRange::PageRange(Address start, Address limit)
+    : begin_(Page::FromAddress(start)),
+      end_(Page::FromAllocationAreaAddress(limit)->next_page()) {
+#ifdef DEBUG
+  if (begin_->InNewSpace()) {
+    SemiSpace::AssertValidRange(start, limit);
+  }
+#endif  // DEBUG
 }
 
 // -----------------------------------------------------------------------------
@@ -204,8 +208,9 @@
 }
 
 Page* Page::ConvertNewToOld(Page* old_page) {
-  OldSpace* old_space = old_page->heap()->old_space();
+  DCHECK(!old_page->is_anchor());
   DCHECK(old_page->InNewSpace());
+  OldSpace* old_space = old_page->heap()->old_space();
   old_page->set_owner(old_space);
   old_page->SetFlags(0, ~0);
   old_space->AccountCommitted(old_page->size());
@@ -221,7 +226,7 @@
   }
 }
 
-void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
+void MemoryChunk::IncrementLiveBytes(HeapObject* object, int by) {
   MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
 }
 
@@ -244,18 +249,8 @@
   DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
 }
 
-void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
-  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
-  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
-    static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
-  }
-  chunk->IncrementLiveBytes(by);
-}
-
 bool PagedSpace::Contains(Address addr) {
-  Page* p = Page::FromAddress(addr);
-  if (!Page::IsValid(p)) return false;
-  return p->owner() == this;
+  return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
 }
 
 bool PagedSpace::Contains(Object* o) {
@@ -288,7 +283,7 @@
   MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
   uintptr_t offset = addr - chunk->address();
   if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
-    chunk = heap->lo_space()->FindPage(addr);
+    chunk = heap->lo_space()->FindPageThreadSafe(addr);
   }
   return chunk;
 }
@@ -436,11 +431,10 @@
     if (object == NULL) {
       object = SlowAllocateRaw(size_in_bytes);
     }
-    if (object != NULL) {
-      if (heap()->incremental_marking()->black_allocation()) {
-        Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
-        MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
-      }
+    if (object != NULL && heap()->incremental_marking()->black_allocation()) {
+      Address start = object->address();
+      Address end = object->address() + size_in_bytes;
+      Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
     }
   }
 
@@ -479,12 +473,19 @@
     if (object == NULL) {
       object = SlowAllocateRaw(allocation_size);
     }
-    if (object != NULL && filler_size != 0) {
-      object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
-                                       alignment);
-      // Filler objects are initialized, so mark only the aligned object memory
-      // as uninitialized.
-      allocation_size = size_in_bytes;
+    if (object != NULL) {
+      if (heap()->incremental_marking()->black_allocation()) {
+        Address start = object->address();
+        Address end = object->address() + allocation_size;
+        Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
+      }
+      if (filler_size != 0) {
+        object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
+                                         alignment);
+        // Filler objects are initialized, so mark only the aligned object
+        // memory as uninitialized.
+        allocation_size = size_in_bytes;
+      }
     }
   }
 
@@ -596,6 +597,17 @@
     FATAL("Code page is too large.");
   }
   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
+
+  // Initialize the owner field for each contained page (except the first, which
+  // is initialized by MemoryChunk::Initialize).
+  for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
+       addr < chunk->area_end(); addr += Page::kPageSize) {
+    // Clear out kPageHeaderTag.
+    Memory::Address_at(addr) = 0;
+  }
+
   return static_cast<LargePage*>(chunk);
 }
 
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index e0e6d12..c2a51e4 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -9,11 +9,15 @@
 #include "src/base/bits.h"
 #include "src/base/platform/platform.h"
 #include "src/base/platform/semaphore.h"
+#include "src/counters.h"
 #include "src/full-codegen/full-codegen.h"
 #include "src/heap/array-buffer-tracker.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
 #include "src/heap/slot-set.h"
 #include "src/macro-assembler.h"
 #include "src/msan.h"
+#include "src/objects-inl.h"
 #include "src/snapshot/snapshot.h"
 #include "src/v8.h"
 
@@ -335,7 +339,7 @@
  private:
   // v8::Task overrides.
   void Run() override {
-    unmapper_->PerformFreeMemoryOnQueuedChunks();
+    unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
     unmapper_->pending_unmapping_tasks_semaphore_.Signal();
   }
 
@@ -350,7 +354,7 @@
         new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
     concurrent_unmapping_tasks_active_++;
   } else {
-    PerformFreeMemoryOnQueuedChunks();
+    PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
   }
 }
 
@@ -364,6 +368,7 @@
   return waited;
 }
 
+template <MemoryAllocator::Unmapper::FreeMode mode>
 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
   MemoryChunk* chunk = nullptr;
   // Regular chunks.
@@ -372,6 +377,14 @@
     allocator_->PerformFreeMemory(chunk);
     if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
   }
+  if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
+    // The previous loop uncommitted any pages marked as pooled and added them
+    // to the pooled list. In case of kReleasePooled we need to free them
+    // though.
+    while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
+      allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
+    }
+  }
   // Non-regular chunks.
   while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
     allocator_->PerformFreeMemory(chunk);
@@ -382,7 +395,10 @@
   WaitUntilCompleted();
   ReconsiderDelayedChunks();
   CHECK(delayed_regular_chunks_.empty());
-  PerformFreeMemoryOnQueuedChunks();
+  PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+  for (int i = 0; i < kNumberOfChunkQueues; i++) {
+    DCHECK(chunks_[i].empty());
+  }
 }
 
 void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
@@ -780,7 +796,7 @@
 
 size_t Page::AvailableInFreeList() {
   size_t sum = 0;
-  ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
+  ForAllFreeListCategories([&sum](FreeListCategory* category) {
     sum += category->available();
   });
   return sum;
@@ -833,6 +849,16 @@
   return unused;
 }
 
+void Page::CreateBlackArea(Address start, Address end) {
+  DCHECK(heap()->incremental_marking()->black_allocation());
+  DCHECK_EQ(Page::FromAddress(start), this);
+  DCHECK_NE(start, end);
+  DCHECK_EQ(Page::FromAddress(end - 1), this);
+  markbits()->SetRange(AddressToMarkbitIndex(start),
+                       AddressToMarkbitIndex(end));
+  IncrementLiveBytes(static_cast<int>(end - start));
+}
+
 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
                                         Address start_free) {
   // We do not allow partial shrink for code.
@@ -899,6 +925,11 @@
       PreFreeMemory(chunk);
       PerformFreeMemory(chunk);
       break;
+    case kAlreadyPooled:
+      // Pooled pages cannot be touched anymore as their memory is uncommitted.
+      FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
+                 Executability::NOT_EXECUTABLE);
+      break;
     case kPooledAndQueue:
       DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
       DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
@@ -909,13 +940,14 @@
       // The chunks added to this queue will be freed by a concurrent thread.
       unmapper()->AddMemoryChunkSafe(chunk);
       break;
-    default:
-      UNREACHABLE();
   }
 }
 
 template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
 
+template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
+    MemoryChunk* chunk);
+
 template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
     MemoryChunk* chunk);
 
@@ -1287,25 +1319,6 @@
   return false;
 }
 
-
-Object* PagedSpace::FindObject(Address addr) {
-  // Note: this function can only be called on iterable spaces.
-  DCHECK(!heap()->mark_compact_collector()->in_use());
-
-  if (!Contains(addr)) return Smi::kZero;  // Signaling not found.
-
-  Page* p = Page::FromAddress(addr);
-  HeapObjectIterator it(p);
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    Address cur = obj->address();
-    Address next = cur + obj->Size();
-    if ((cur <= addr) && (addr < next)) return obj;
-  }
-
-  UNREACHABLE();
-  return Smi::kZero;
-}
-
 void PagedSpace::ShrinkImmortalImmovablePages() {
   DCHECK(!heap()->deserialization_complete());
   MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -1361,10 +1374,7 @@
   SetTopAndLimit(top, limit);
   if (top != nullptr && top != limit &&
       heap()->incremental_marking()->black_allocation()) {
-    Page* page = Page::FromAllocationAreaAddress(top);
-    page->markbits()->SetRange(page->AddressToMarkbitIndex(top),
-                               page->AddressToMarkbitIndex(limit));
-    page->IncrementLiveBytes(static_cast<int>(limit - top));
+    Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
   }
 }
 
@@ -1373,10 +1383,8 @@
   Address current_top = top();
   Address current_limit = limit();
   if (current_top != nullptr && current_top != current_limit) {
-    Page* page = Page::FromAllocationAreaAddress(current_top);
-    page->markbits()->SetRange(page->AddressToMarkbitIndex(current_top),
-                               page->AddressToMarkbitIndex(current_limit));
-    page->IncrementLiveBytes(static_cast<int>(current_limit - current_top));
+    Page::FromAllocationAreaAddress(current_top)
+        ->CreateBlackArea(current_top, current_limit);
   }
 }
 
@@ -1473,7 +1481,7 @@
       // All the interior pointers should be contained in the heap.
       int size = object->Size();
       object->IterateBody(map->instance_type(), size, visitor);
-      if (Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
+      if (ObjectMarking::IsBlack(object)) {
         black_size += size;
       }
 
@@ -1598,6 +1606,9 @@
         // Make sure we don't overtake the actual top pointer.
         CHECK_NE(to_remove, current_page_);
         to_remove->Unlink();
+        // Clear new space flags to avoid this page being treated as a new
+        // space page that is potentially being swept.
+        to_remove->SetFlags(0, Page::kIsInNewSpaceMask);
         heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
             to_remove);
       }
@@ -2095,7 +2106,7 @@
   DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
   age_mark_ = mark;
   // Mark all pages up to the one containing mark.
-  for (Page* p : NewSpacePageRange(space_start(), mark)) {
+  for (Page* p : PageRange(space_start(), mark)) {
     p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
   }
 }
@@ -2616,7 +2627,7 @@
 
   // Memory in the linear allocation area is counted as allocated.  We may free
   // a little of this again immediately - see below.
-  owner_->Allocate(static_cast<int>(new_node_size));
+  owner_->AccountAllocatedBytes(new_node_size);
 
   if (owner_->heap()->inline_allocation_disabled()) {
     // Keep the linear allocation area empty if requested to do so, just
@@ -2650,7 +2661,7 @@
 size_t FreeList::EvictFreeListItems(Page* page) {
   size_t sum = 0;
   page->ForAllFreeListCategories(
-      [this, &sum, page](FreeListCategory* category) {
+      [this, &sum](FreeListCategory* category) {
         DCHECK_EQ(this, category->owner());
         sum += category->available();
         RemoveCategory(category);
@@ -2806,7 +2817,6 @@
   }
 }
 
-
 HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   if (collector->sweeping_in_progress()) {
@@ -2820,7 +2830,6 @@
   return nullptr;
 }
 
-
 HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
   if (collector->sweeping_in_progress()) {
@@ -2877,9 +2886,7 @@
          ", available: %" V8PRIdPTR ", %%%d\n",
          Capacity(), Waste(), Available(), pct);
 
-  if (heap()->mark_compact_collector()->sweeping_in_progress()) {
-    heap()->mark_compact_collector()->EnsureSweepingCompleted();
-  }
+  heap()->mark_compact_collector()->EnsureSweepingCompleted();
   ClearHistograms(heap()->isolate());
   HeapObjectIterator obj_it(this);
   for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
@@ -2994,7 +3001,6 @@
   InsertChunkMapEntries(page);
 
   HeapObject* object = page->GetObject();
-  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
 
   if (Heap::ShouldZapGarbage()) {
     // Make the object consistent so the heap can be verified in OldSpaceStep.
@@ -3009,8 +3015,9 @@
   AllocationStep(object->address(), object_size);
 
   if (heap()->incremental_marking()->black_allocation()) {
-    Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
-    MemoryChunk::IncrementLiveBytesFromGC(object, object_size);
+    // We cannot use ObjectMarking here as the object still lacks a size.
+    Marking::WhiteToBlack(ObjectMarking::MarkBitFrom(object));
+    MemoryChunk::IncrementLiveBytes(object, object_size);
   }
   return object;
 }
@@ -3033,6 +3040,10 @@
   return Smi::kZero;  // Signaling not found.
 }
 
+LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
+  base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
+  return FindPage(a);
+}
 
 LargePage* LargeObjectSpace::FindPage(Address a) {
   uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
@@ -3054,9 +3065,8 @@
   LargePage* current = first_page_;
   while (current != NULL) {
     HeapObject* object = current->GetObject();
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
-    DCHECK(Marking::IsBlack(mark_bit));
-    Marking::BlackToWhite(mark_bit);
+    DCHECK(ObjectMarking::IsBlack(object));
+    ObjectMarking::ClearMarkBit(object);
     Page::FromAddress(object->address())->ResetProgressBar();
     Page::FromAddress(object->address())->ResetLiveBytes();
     current = current->next_page();
@@ -3069,6 +3079,9 @@
   uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
   uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
                     MemoryChunk::kAlignment;
+  // There may be concurrent access on the chunk map. We have to take the lock
+  // here.
+  base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
   for (uintptr_t key = start; key <= limit; key++) {
     base::HashMap::Entry* entry = chunk_map_.InsertNew(
         reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
@@ -3098,9 +3111,8 @@
   LargePage* current = first_page_;
   while (current != NULL) {
     HeapObject* object = current->GetObject();
-    MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
-    DCHECK(!Marking::IsGrey(mark_bit));
-    if (Marking::IsBlack(mark_bit)) {
+    DCHECK(!ObjectMarking::IsGrey(object));
+    if (ObjectMarking::IsBlack(object)) {
       Address free_start;
       if ((free_start = current->GetAddressToShrink()) != 0) {
         // TODO(hpayer): Perform partial free concurrently.
@@ -3168,11 +3180,13 @@
 
     // We have only code, sequential strings, external strings
     // (sequential strings that have been morphed into external
-    // strings), fixed arrays, byte arrays, and constant pool arrays in the
-    // large object space.
+    // strings), thin strings (sequential strings that have been
+    // morphed into thin strings), fixed arrays, byte arrays, and
+    // constant pool arrays in the large object space.
     CHECK(object->IsAbstractCode() || object->IsSeqString() ||
-          object->IsExternalString() || object->IsFixedArray() ||
-          object->IsFixedDoubleArray() || object->IsByteArray());
+          object->IsExternalString() || object->IsThinString() ||
+          object->IsFixedArray() || object->IsFixedDoubleArray() ||
+          object->IsByteArray());
 
     // The object itself should look OK.
     object->ObjectVerify();
@@ -3233,7 +3247,7 @@
   unsigned mark_size = 0;
   for (HeapObject* object = objects.Next(); object != NULL;
        object = objects.Next()) {
-    bool is_marked = Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object));
+    bool is_marked = ObjectMarking::IsBlackOrGrey(object);
     PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
     if (is_marked) {
       mark_size += object->Size();
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index f5701ad..a71c636 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -317,8 +317,11 @@
   static const intptr_t kAlignmentMask = kAlignment - 1;
 
   static const intptr_t kSizeOffset = 0;
-
-  static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
+  static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
+  static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
+  static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
+  static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
+  static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
 
   static const size_t kMinHeaderSize =
       kSizeOffset + kSizetSize  // size_t size
@@ -367,8 +370,7 @@
 
   static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
 
-  static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
-  static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
+  static inline void IncrementLiveBytes(HeapObject* object, int by);
 
   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
   static MemoryChunk* FromAddress(Address a) {
@@ -553,10 +555,11 @@
   void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
 
   Space* owner() const {
-    if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
-        kPageHeaderTag) {
-      return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
-                                      kPageHeaderTag);
+    intptr_t owner_value = base::NoBarrierAtomicValue<intptr_t>::FromAddress(
+                               const_cast<Address*>(&owner_))
+                               ->Value();
+    if ((owner_value & kPageHeaderTagMask) == kPageHeaderTag) {
+      return reinterpret_cast<Space*>(owner_value - kPageHeaderTag);
     } else {
       return nullptr;
     }
@@ -769,6 +772,8 @@
 
   size_t ShrinkToHighWaterMark();
 
+  void CreateBlackArea(Address start, Address end);
+
 #ifdef DEBUG
   void Print();
 #endif  // DEBUG
@@ -1092,7 +1097,7 @@
 // A space acquires chunks of memory from the operating system. The memory
 // allocator allocates and deallocates pages for the paged heap spaces and large
 // pages for large object space.
-class MemoryAllocator {
+class V8_EXPORT_PRIVATE MemoryAllocator {
  public:
   // Unmapper takes care of concurrently unmapping and uncommitting memory
   // chunks.
@@ -1103,7 +1108,10 @@
     explicit Unmapper(MemoryAllocator* allocator)
         : allocator_(allocator),
           pending_unmapping_tasks_semaphore_(0),
-          concurrent_unmapping_tasks_active_(0) {}
+          concurrent_unmapping_tasks_active_(0) {
+      chunks_[kRegular].reserve(kReservedQueueingSlots);
+      chunks_[kPooled].reserve(kReservedQueueingSlots);
+    }
 
     void AddMemoryChunkSafe(MemoryChunk* chunk) {
       if ((chunk->size() == Page::kPageSize) &&
@@ -1136,6 +1144,8 @@
     void TearDown();
 
    private:
+    static const int kReservedQueueingSlots = 64;
+
     enum ChunkQueueType {
       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
                     // can thus be used for stealing.
@@ -1144,6 +1154,11 @@
       kNumberOfChunkQueues,
     };
 
+    enum class FreeMode {
+      kUncommitPooled,
+      kReleasePooled,
+    };
+
     template <ChunkQueueType type>
     void AddMemoryChunkSafe(MemoryChunk* chunk) {
       base::LockGuard<base::Mutex> guard(&mutex_);
@@ -1159,17 +1174,18 @@
     MemoryChunk* GetMemoryChunkSafe() {
       base::LockGuard<base::Mutex> guard(&mutex_);
       if (chunks_[type].empty()) return nullptr;
-      MemoryChunk* chunk = chunks_[type].front();
-      chunks_[type].pop_front();
+      MemoryChunk* chunk = chunks_[type].back();
+      chunks_[type].pop_back();
       return chunk;
     }
 
     void ReconsiderDelayedChunks();
+    template <FreeMode mode>
     void PerformFreeMemoryOnQueuedChunks();
 
     base::Mutex mutex_;
     MemoryAllocator* allocator_;
-    std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
+    std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
     // Delayed chunks cannot be processed in the current unmapping cycle because
     // of dependencies such as an active sweeper.
     // See MemoryAllocator::CanFreeMemoryChunk.
@@ -1187,6 +1203,7 @@
 
   enum FreeMode {
     kFull,
+    kAlreadyPooled,
     kPreFreeAndQueue,
     kPooledAndQueue,
   };
@@ -1376,6 +1393,15 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
 };
 
+extern template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+    size_t size, PagedSpace* owner, Executability executable);
+extern template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+    size_t size, SemiSpace* owner, Executability executable);
+extern template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+    size_t size, SemiSpace* owner, Executability executable);
 
 // -----------------------------------------------------------------------------
 // Interface for heap object iterator to be implemented by all object space
@@ -1419,6 +1445,8 @@
   typedef PageIterator iterator;
   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
+  inline PageRange(Address start, Address limit);
+
   iterator begin() { return iterator(begin_); }
   iterator end() { return iterator(end_); }
 
@@ -1641,7 +1669,7 @@
 //   words in size.
 // At least 16384 words (huge): This list is for objects of 2048 words or
 //   larger. Empty pages are also added to this list.
-class FreeList {
+class V8_EXPORT_PRIVATE FreeList {
  public:
   // This method returns how much memory can be allocated after freeing
   // maximum_freed memory.
@@ -1878,18 +1906,7 @@
   AllocationInfo allocation_info_;
 };
 
-class NewSpacePageRange {
- public:
-  typedef PageRange::iterator iterator;
-  inline NewSpacePageRange(Address start, Address limit);
-  iterator begin() { return range_.begin(); }
-  iterator end() { return range_.end(); }
-
- private:
-  PageRange range_;
-};
-
-class PagedSpace : public Space {
+class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
  public:
   typedef PageIterator iterator;
 
@@ -1915,12 +1932,6 @@
   inline bool Contains(Object* o);
   bool ContainsSlow(Address addr);
 
-  // Given an address occupied by a live object, return that object if it is
-  // in this space, or a Smi if it is not.  The implementation iterates over
-  // objects in the page containing the address, the cost is linear in the
-  // number of objects in the page.  It may be slow.
-  Object* FindObject(Address addr);
-
   // During boot the free_space_map is created, and afterwards we may need
   // to write it into the free list nodes that were already created.
   void RepairFreeListsAfterDeserialization();
@@ -2034,7 +2045,9 @@
 
   void MarkAllocationInfoBlack();
 
-  void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
+  void AccountAllocatedBytes(size_t bytes) {
+    accounting_stats_.AllocateBytes(bytes);
+  }
 
   void IncreaseCapacity(size_t bytes);
 
@@ -2448,34 +2461,24 @@
   }
 
   size_t AllocatedSinceLastGC() {
-    bool seen_age_mark = false;
-    Address age_mark = to_space_.age_mark();
-    Page* current_page = to_space_.first_page();
-    Page* age_mark_page = Page::FromAddress(age_mark);
-    Page* last_page = Page::FromAddress(top() - kPointerSize);
-    if (age_mark_page == last_page) {
-      if (top() - age_mark >= 0) {
-        return top() - age_mark;
-      }
-      // Top was reset at some point, invalidating this metric.
-      return 0;
-    }
-    while (current_page != last_page) {
-      if (current_page == age_mark_page) {
-        seen_age_mark = true;
-        break;
-      }
+    const Address age_mark = to_space_.age_mark();
+    DCHECK_NOT_NULL(age_mark);
+    DCHECK_NOT_NULL(top());
+    Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
+    Page* const last_page = Page::FromAllocationAreaAddress(top());
+    Page* current_page = age_mark_page;
+    size_t allocated = 0;
+    if (current_page != last_page) {
+      DCHECK_EQ(current_page, age_mark_page);
+      DCHECK_GE(age_mark_page->area_end(), age_mark);
+      allocated += age_mark_page->area_end() - age_mark;
       current_page = current_page->next_page();
+    } else {
+      DCHECK_GE(top(), age_mark);
+      return top() - age_mark;
     }
-    if (!seen_age_mark) {
-      // Top was reset at some point, invalidating this metric.
-      return 0;
-    }
-    DCHECK_GE(age_mark_page->area_end(), age_mark);
-    size_t allocated = age_mark_page->area_end() - age_mark;
-    DCHECK_EQ(current_page, age_mark_page);
-    current_page = age_mark_page->next_page();
     while (current_page != last_page) {
+      DCHECK_NE(current_page, age_mark_page);
       allocated += Page::kAllocatableMemory;
       current_page = current_page->next_page();
     }
@@ -2820,6 +2823,9 @@
   // The function iterates through all objects in this space, may be slow.
   Object* FindObject(Address a);
 
+  // Takes the chunk_map_mutex_ and calls FindPage after that.
+  LargePage* FindPageThreadSafe(Address a);
+
   // Finds a large object page containing the given address, returns NULL
   // if such a page doesn't exist.
   LargePage* FindPage(Address a);
@@ -2870,6 +2876,9 @@
   size_t size_;            // allocated bytes
   int page_count_;         // number of chunks
   size_t objects_size_;    // size of objects
+  // The chunk_map_mutex_ has to be used when the chunk map is accessed
+  // concurrently.
+  base::Mutex chunk_map_mutex_;
   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
   base::HashMap chunk_map_;
 
diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc
index 974b85e..8eb943f 100644
--- a/src/heap/store-buffer.cc
+++ b/src/heap/store-buffer.cc
@@ -16,13 +16,19 @@
 namespace internal {
 
 StoreBuffer::StoreBuffer(Heap* heap)
-    : heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) {
+    : heap_(heap),
+      top_(nullptr),
+      current_(0),
+      mode_(NOT_IN_GC),
+      virtual_memory_(nullptr) {
   for (int i = 0; i < kStoreBuffers; i++) {
     start_[i] = nullptr;
     limit_[i] = nullptr;
     lazy_top_[i] = nullptr;
   }
   task_running_ = false;
+  insertion_callback = &InsertDuringRuntime;
+  deletion_callback = &DeleteDuringRuntime;
 }
 
 void StoreBuffer::SetUp() {
@@ -85,7 +91,7 @@
   current_ = other;
   top_ = start_[current_];
 
-  if (!task_running_) {
+  if (!task_running_ && FLAG_concurrent_sweeping) {
     task_running_ = true;
     Task* task = new Task(heap_->isolate(), this);
     V8::GetCurrentPlatform()->CallOnBackgroundThread(
@@ -99,7 +105,6 @@
   DCHECK_LT(index, kStoreBuffers);
   for (Address* current = start_[index]; current < lazy_top_[index];
        current++) {
-    DCHECK(!heap_->code_space()->Contains(*current));
     Address addr = *current;
     Page* page = Page::FromAnyPointerAddress(heap_, addr);
     if (IsDeletionAddress(addr)) {
@@ -137,29 +142,5 @@
   task_running_ = false;
 }
 
-void StoreBuffer::DeleteEntry(Address start, Address end) {
-  // Deletions coming from the GC are directly deleted from the remembered
-  // set. Deletions coming from the runtime are added to the store buffer
-  // to allow concurrent processing.
-  if (heap_->gc_state() == Heap::NOT_IN_GC) {
-    if (top_ + sizeof(Address) * 2 > limit_[current_]) {
-      StoreBufferOverflow(heap_->isolate());
-    }
-    *top_ = MarkDeletionAddress(start);
-    top_++;
-    *top_ = end;
-    top_++;
-  } else {
-    // In GC the store buffer has to be empty at any time.
-    DCHECK(Empty());
-    Page* page = Page::FromAddress(start);
-    if (end) {
-      RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
-                                             SlotSet::PREFREE_EMPTY_BUCKETS);
-    } else {
-      RememberedSet<OLD_TO_NEW>::Remove(page, start);
-    }
-  }
-}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
index 09faf4d..0ade9e0 100644
--- a/src/heap/store-buffer.h
+++ b/src/heap/store-buffer.h
@@ -24,7 +24,9 @@
 // slots are moved to the remembered set.
 class StoreBuffer {
  public:
-  static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
+  enum StoreBufferMode { IN_GC, NOT_IN_GC };
+
+  static const int kStoreBufferSize = 1 << (11 + kPointerSizeLog2);
   static const int kStoreBufferMask = kStoreBufferSize - 1;
   static const int kStoreBuffers = 2;
   static const intptr_t kDeletionTag = 1;
@@ -63,22 +65,77 @@
   // If we only want to delete a single slot, end should be set to null which
   // will be written into the second field. When processing the store buffer
   // the more efficient Remove method will be called in this case.
-  void DeleteEntry(Address start, Address end = nullptr);
+  void DeleteEntry(Address start, Address end = nullptr) {
+    // Deletions coming from the GC are directly deleted from the remembered
+    // set. Deletions coming from the runtime are added to the store buffer
+    // to allow concurrent processing.
+    deletion_callback(this, start, end);
+  }
+
+  static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
+                                            Address start, Address end) {
+    // In GC the store buffer has to be empty at any time.
+    DCHECK(store_buffer->Empty());
+    DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+    Page* page = Page::FromAddress(start);
+    if (end) {
+      RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+                                             SlotSet::PREFREE_EMPTY_BUCKETS);
+    } else {
+      RememberedSet<OLD_TO_NEW>::Remove(page, start);
+    }
+  }
+
+  static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
+                                  Address end) {
+    DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+    store_buffer->InsertDeletionIntoStoreBuffer(start, end);
+  }
+
+  void InsertDeletionIntoStoreBuffer(Address start, Address end) {
+    if (top_ + sizeof(Address) * 2 > limit_[current_]) {
+      StoreBufferOverflow(heap_->isolate());
+    }
+    *top_ = MarkDeletionAddress(start);
+    top_++;
+    *top_ = end;
+    top_++;
+  }
+
+  static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
+                                            Address slot) {
+    DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+    RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+  }
+
+  static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
+    DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+    store_buffer->InsertIntoStoreBuffer(slot);
+  }
+
+  void InsertIntoStoreBuffer(Address slot) {
+    if (top_ + sizeof(Address) > limit_[current_]) {
+      StoreBufferOverflow(heap_->isolate());
+    }
+    *top_ = slot;
+    top_++;
+  }
 
   void InsertEntry(Address slot) {
     // Insertions coming from the GC are directly inserted into the remembered
     // set. Insertions coming from the runtime are added to the store buffer to
     // allow concurrent processing.
-    if (heap_->gc_state() == Heap::NOT_IN_GC) {
-      if (top_ + sizeof(Address) > limit_[current_]) {
-        StoreBufferOverflow(heap_->isolate());
-      }
-      *top_ = slot;
-      top_++;
+    insertion_callback(this, slot);
+  }
+
+  void SetMode(StoreBufferMode mode) {
+    mode_ = mode;
+    if (mode == NOT_IN_GC) {
+      insertion_callback = &InsertDuringRuntime;
+      deletion_callback = &DeleteDuringRuntime;
     } else {
-      // In GC the store buffer has to be empty at any time.
-      DCHECK(Empty());
-      RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+      insertion_callback = &InsertDuringGarbageCollection;
+      deletion_callback = &DeleteDuringGarbageCollection;
     }
   }
 
@@ -95,6 +152,8 @@
     return top_ == start_[current_];
   }
 
+  Heap* heap() { return heap_; }
+
  private:
   // There are two store buffers. If one store buffer fills up, the main thread
   // publishes the top pointer of the store buffer that needs processing in its
@@ -119,6 +178,8 @@
     DISALLOW_COPY_AND_ASSIGN(Task);
   };
 
+  StoreBufferMode mode() const { return mode_; }
+
   void FlipStoreBuffers();
 
   Heap* heap_;
@@ -142,7 +203,17 @@
   // Points to the current buffer in use.
   int current_;
 
+  // During GC, entries are directly added to the remembered set without
+  // going through the store buffer. This is signaled by a special
+  // IN_GC mode.
+  StoreBufferMode mode_;
+
   base::VirtualMemory* virtual_memory_;
+
+  // Callbacks are more efficient than reading out the gc state for every
+  // store buffer operation.
+  void (*insertion_callback)(StoreBuffer*, Address);
+  void (*deletion_callback)(StoreBuffer*, Address, Address);
 };
 
 }  // namespace internal
diff --git a/src/i18n.cc b/src/i18n.cc
index 58b8a8d..7c22871 100644
--- a/src/i18n.cc
+++ b/src/i18n.cc
@@ -10,6 +10,7 @@
 #include "src/api.h"
 #include "src/factory.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 #include "unicode/brkiter.h"
 #include "unicode/calendar.h"
 #include "unicode/coll.h"
@@ -29,8 +30,13 @@
 #include "unicode/ucol.h"
 #include "unicode/ucurr.h"
 #include "unicode/unum.h"
+#include "unicode/uvernum.h"
 #include "unicode/uversion.h"
 
+#if U_ICU_VERSION_MAJOR_NUM >= 59
+#include "unicode/char16ptr.h"
+#endif
+
 namespace v8 {
 namespace internal {
 
@@ -224,23 +230,6 @@
 }
 
 
-template<int internal_fields, EternalHandles::SingletonHandle field>
-Handle<ObjectTemplateInfo> GetEternal(Isolate* isolate) {
-  if (isolate->eternal_handles()->Exists(field)) {
-    return Handle<ObjectTemplateInfo>::cast(
-        isolate->eternal_handles()->GetSingleton(field));
-  }
-  v8::Local<v8::ObjectTemplate> raw_template =
-      v8::ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate));
-  raw_template->SetInternalFieldCount(internal_fields);
-  return Handle<ObjectTemplateInfo>::cast(
-      isolate->eternal_handles()->CreateSingleton(
-        isolate,
-        *v8::Utils::OpenHandle(*raw_template),
-        field));
-}
-
-
 icu::DecimalFormat* CreateICUNumberFormat(
     Isolate* isolate,
     const icu::Locale& icu_locale,
@@ -286,8 +275,13 @@
       }
 
       UErrorCode status_digits = U_ZERO_ERROR;
+#if U_ICU_VERSION_MAJOR_NUM >= 59
       uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
-        currency.getTerminatedBuffer(), &status_digits);
+          icu::toUCharPtr(currency.getTerminatedBuffer()), &status_digits);
+#else
+      uint32_t fraction_digits = ucurr_getDefaultFractionDigits(
+          currency.getTerminatedBuffer(), &status_digits);
+#endif
       if (U_SUCCESS(status_digits)) {
         number_format->setMinimumFractionDigits(fraction_digits);
         number_format->setMaximumFractionDigits(fraction_digits);
@@ -702,18 +696,6 @@
 
 
 // static
-Handle<ObjectTemplateInfo> I18N::GetTemplate(Isolate* isolate) {
-  return GetEternal<1, i::EternalHandles::I18N_TEMPLATE_ONE>(isolate);
-}
-
-
-// static
-Handle<ObjectTemplateInfo> I18N::GetTemplate2(Isolate* isolate) {
-  return GetEternal<2, i::EternalHandles::I18N_TEMPLATE_TWO>(isolate);
-}
-
-
-// static
 icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
     Isolate* isolate,
     Handle<String> locale,
@@ -759,16 +741,7 @@
 icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
     Isolate* isolate,
     Handle<JSObject> obj) {
-  Handle<String> key =
-      isolate->factory()->NewStringFromStaticChars("dateFormat");
-  Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
-  CHECK(maybe.IsJust());
-  if (maybe.FromJust()) {
-    return reinterpret_cast<icu::SimpleDateFormat*>(
-        obj->GetInternalField(0));
-  }
-
-  return NULL;
+  return reinterpret_cast<icu::SimpleDateFormat*>(obj->GetInternalField(0));
 }
 
 void DateFormat::DeleteDateFormat(const v8::WeakCallbackInfo<void>& data) {
@@ -823,15 +796,7 @@
 icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
     Isolate* isolate,
     Handle<JSObject> obj) {
-  Handle<String> key =
-      isolate->factory()->NewStringFromStaticChars("numberFormat");
-  Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
-  CHECK(maybe.IsJust());
-  if (maybe.FromJust()) {
-    return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
-  }
-
-  return NULL;
+  return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
 }
 
 void NumberFormat::DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data) {
@@ -883,14 +848,7 @@
 
 icu::Collator* Collator::UnpackCollator(Isolate* isolate,
                                         Handle<JSObject> obj) {
-  Handle<String> key = isolate->factory()->NewStringFromStaticChars("collator");
-  Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
-  CHECK(maybe.IsJust());
-  if (maybe.FromJust()) {
-    return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
-  }
-
-  return NULL;
+  return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
 }
 
 void Collator::DeleteCollator(const v8::WeakCallbackInfo<void>& data) {
@@ -898,11 +856,8 @@
   GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
 }
 
-
-icu::BreakIterator* BreakIterator::InitializeBreakIterator(
-    Isolate* isolate,
-    Handle<String> locale,
-    Handle<JSObject> options,
+icu::BreakIterator* V8BreakIterator::InitializeBreakIterator(
+    Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
     Handle<JSObject> resolved) {
   // Convert BCP47 into ICU locale format.
   UErrorCode status = U_ZERO_ERROR;
@@ -942,21 +897,12 @@
   return break_iterator;
 }
 
-
-icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
-                                                       Handle<JSObject> obj) {
-  Handle<String> key =
-      isolate->factory()->NewStringFromStaticChars("breakIterator");
-  Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
-  CHECK(maybe.IsJust());
-  if (maybe.FromJust()) {
-    return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
-  }
-
-  return NULL;
+icu::BreakIterator* V8BreakIterator::UnpackBreakIterator(Isolate* isolate,
+                                                         Handle<JSObject> obj) {
+  return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
 }
 
-void BreakIterator::DeleteBreakIterator(
+void V8BreakIterator::DeleteBreakIterator(
     const v8::WeakCallbackInfo<void>& data) {
   delete reinterpret_cast<icu::BreakIterator*>(data.GetInternalField(0));
   delete reinterpret_cast<icu::UnicodeString*>(data.GetInternalField(1));
diff --git a/src/i18n.h b/src/i18n.h
index 2a4c208..f89d005 100644
--- a/src/i18n.h
+++ b/src/i18n.h
@@ -6,7 +6,7 @@
 #ifndef V8_I18N_H_
 #define V8_I18N_H_
 
-#include "src/handles.h"
+#include "src/objects.h"
 #include "unicode/uversion.h"
 
 namespace U_ICU_NAMESPACE {
@@ -19,21 +19,8 @@
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
-class ObjectTemplateInfo;
-
-class I18N {
- public:
-  // Creates an ObjectTemplate with one internal field.
-  static Handle<ObjectTemplateInfo> GetTemplate(Isolate* isolate);
-
-  // Creates an ObjectTemplate with two internal fields.
-  static Handle<ObjectTemplateInfo> GetTemplate2(Isolate* isolate);
-
- private:
-  I18N();
-};
-
+template <typename T>
+class Handle;
 
 class DateFormat {
  public:
@@ -53,6 +40,10 @@
   // holds the pointer gets garbage collected.
   static void DeleteDateFormat(const v8::WeakCallbackInfo<void>& data);
 
+  // Layout description.
+  static const int kSimpleDateFormat = JSObject::kHeaderSize;
+  static const int kSize = kSimpleDateFormat + kPointerSize;
+
  private:
   DateFormat();
 };
@@ -76,6 +67,10 @@
   // holds the pointer gets garbage collected.
   static void DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data);
 
+  // Layout description.
+  static const int kDecimalFormat = JSObject::kHeaderSize;
+  static const int kSize = kDecimalFormat + kPointerSize;
+
  private:
   NumberFormat();
 };
@@ -98,11 +93,15 @@
   // the pointer gets garbage collected.
   static void DeleteCollator(const v8::WeakCallbackInfo<void>& data);
 
+  // Layout description.
+  static const int kCollator = JSObject::kHeaderSize;
+  static const int kSize = kCollator + kPointerSize;
+
  private:
   Collator();
 };
 
-class BreakIterator {
+class V8BreakIterator {
  public:
   // Create a BreakIterator for the specificied locale and options. Returns the
   // resolved settings for the locale / options.
@@ -120,8 +119,13 @@
   // holds the pointer gets garbage collected.
   static void DeleteBreakIterator(const v8::WeakCallbackInfo<void>& data);
 
+  // Layout description.
+  static const int kBreakIterator = JSObject::kHeaderSize;
+  static const int kUnicodeString = kBreakIterator + kPointerSize;
+  static const int kSize = kUnicodeString + kPointerSize;
+
  private:
-  BreakIterator();
+  V8BreakIterator();
 };
 
 }  // namespace internal
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 281c3ef..de5fc6b 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -41,6 +41,7 @@
 
 #include "src/assembler.h"
 #include "src/debug/debug.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -446,6 +447,17 @@
   }
 }
 
+Address Assembler::target_address_at(Address pc, Code* code) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  set_target_address_at(isolate, pc, constant_pool, target);
+}
 
 Address Assembler::target_address_from_return_address(Address pc) {
   return pc - kCallTargetAddressOffset;
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index d4de79e..9bbf6f4 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -201,13 +201,18 @@
   return Memory::uint32_at(pc_);
 }
 
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  return Memory::uint32_at(pc_);
+}
+
 void RelocInfo::unchecked_update_wasm_memory_reference(
     Address address, ICacheFlushMode flush_mode) {
   Memory::Address_at(pc_) = address;
 }
 
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
-                                                  ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+                                           ICacheFlushMode flush_mode) {
   Memory::uint32_at(pc_) = size;
 }
 
@@ -830,7 +835,7 @@
 
 
 void Assembler::cmpw(const Operand& op, Immediate imm16) {
-  DCHECK(imm16.is_int16());
+  DCHECK(imm16.is_int16() || imm16.is_uint16());
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x81);
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 79f4125..a4bc98d 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -148,6 +148,7 @@
 const Register no_reg = {Register::kCode_no_reg};
 
 static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
 
 struct XMMRegister {
   enum Code {
@@ -498,16 +499,10 @@
   inline static void set_target_address_at(
       Isolate* isolate, Address pc, Address constant_pool, Address target,
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
-  static inline Address target_address_at(Address pc, Code* code) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    return target_address_at(pc, constant_pool);
-  }
+  static inline Address target_address_at(Address pc, Code* code);
   static inline void set_target_address_at(
       Isolate* isolate, Address pc, Code* code, Address target,
-      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    set_target_address_at(isolate, pc, constant_pool, target);
-  }
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
 
   // Return the code target address at a call site from the return address
   // of that call in the instruction stream.
@@ -1434,9 +1429,6 @@
     return pc_offset() - label->pos();
   }
 
-  // Mark generator continuation.
-  void RecordGeneratorContinuation();
-
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 9b2c51e..1320d90 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -34,17 +34,6 @@
   __ TailCallRuntime(Runtime::kNewArray);
 }
 
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
-  descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
-  descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
                                                ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
@@ -473,57 +462,6 @@
   __ ret(0);
 }
 
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  // With careful management, we won't have to save slot and vector on
-  // the stack. Simply handle the possibly missing case first.
-  // TODO(mvstanton): this code can be more efficient.
-  __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
-         Immediate(isolate()->factory()->the_hole_value()));
-  __ j(equal, &miss);
-  __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
-  __ ret(0);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Label miss;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register index = LoadDescriptor::NameRegister();
-  Register scratch = edi;
-  DCHECK(!scratch.is(receiver) && !scratch.is(index));
-  Register result = eax;
-  DCHECK(!result.is(scratch));
-  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
-         result.is(LoadDescriptor::SlotRegister()));
-
-  // StringCharAtGenerator doesn't use the result register until it's passed
-  // the different miss possibilities. If it did, we would have a conflict
-  // when FLAG_vector_ics is true.
-  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          RECEIVER_IS_STRING);
-  char_at_generator.GenerateFast(masm);
-  __ ret(0);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -615,7 +553,7 @@
   // (8) Is the external string one byte?  If yes, go to (5).
   // (9) Two byte sequential.  Load regexp code for two byte. Go to (E).
   // (10) Short external string or not a string?  If yes, bail out to runtime.
-  // (11) Sliced string.  Replace subject with parent. Go to (1).
+  // (11) Sliced or thin string.  Replace subject with parent. Go to (1).
 
   Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */,
       external_string /* 7 */, check_underlying /* 1 */,
@@ -645,6 +583,7 @@
   // have already been covered.
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmp(ebx, Immediate(kExternalStringTag));
@@ -923,11 +862,18 @@
   __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
   __ j(not_zero, &runtime);
 
-  // (11) Sliced string.  Replace subject with parent.  Go to (1).
+  // (11) Sliced or thin string.  Replace subject with parent.  Go to (1).
+  Label thin_string;
+  __ cmp(ebx, Immediate(kThinStringTag));
+  __ j(equal, &thin_string, Label::kNear);
   // Load offset into edi and replace subject string with parent.
   __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
   __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
   __ jmp(&check_underlying);  // Go to (1).
+
+  __ bind(&thin_string);
+  __ mov(eax, FieldOperand(eax, ThinString::kActualOffset));
+  __ jmp(&check_underlying);  // Go to (1).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -1027,9 +973,6 @@
       // Call runtime on identical symbols since we need to throw a TypeError.
       __ cmpb(ecx, Immediate(SYMBOL_TYPE));
       __ j(equal, &runtime_call, Label::kFar);
-      // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ cmpb(ecx, Immediate(SIMD128_VALUE_TYPE));
-      __ j(equal, &runtime_call, Label::kFar);
     }
     __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
     __ ret(0);
@@ -1225,9 +1168,11 @@
   if (cc == equal) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(edx);
-      __ Push(eax);
-      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+      __ Push(esi);
+      __ Call(strict() ? isolate()->builtins()->StrictEqual()
+                       : isolate()->builtins()->Equal(),
+              RelocInfo::CODE_TARGET);
+      __ Pop(esi);
     }
     // Turn true into 0 and false into some non-zero value.
     STATIC_ASSERT(EQUAL == 0);
@@ -1297,8 +1242,7 @@
   // A monomorphic cache hit or an already megamorphic state: invoke the
   // function without changing the state.
   // We don't know if ecx is a WeakCell or a Symbol, but it's harmless to read
-  // at this position in a symbol (see static asserts in
-  // type-feedback-vector.h).
+  // at this position in a symbol (see static asserts in feedback-vector.h).
   Label check_allocation_site;
   __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
   __ j(equal, &done, Label::kFar);
@@ -1337,7 +1281,7 @@
   __ bind(&megamorphic);
   __ mov(
       FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
-      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
+      Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
   __ jmp(&done, Label::kFar);
 
   // An uninitialized cache is patched with the function or sentinel to
@@ -1409,206 +1353,6 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
-                               Register slot) {
-  __ add(FieldOperand(feedback_vector, slot, times_half_pointer_size,
-                      FixedArray::kHeaderSize + kPointerSize),
-         Immediate(Smi::FromInt(1)));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
-  // eax - number of arguments
-  // edi - function
-  // edx - slot id
-  // ebx - vector
-  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
-  __ cmp(edi, ecx);
-  __ j(not_equal, miss);
-
-  // Reload ecx.
-  __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
-                           FixedArray::kHeaderSize));
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, ebx, edx);
-
-  __ mov(ebx, ecx);
-  __ mov(edx, edi);
-  ArrayConstructorStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-
-  // Unreachable.
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
-  // edi - number of arguments
-  // edi - function
-  // edx - slot id
-  // ebx - vector
-  Isolate* isolate = masm->isolate();
-  Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
-  // The checks. First, does edi match the recorded monomorphic target?
-  __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
-                           FixedArray::kHeaderSize));
-
-  // We don't know that we have a weak cell. We might have a private symbol
-  // or an AllocationSite, but the memory is safe to examine.
-  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
-  // FixedArray.
-  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
-  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
-  // computed, meaning that it can't appear to be a pointer. If the low bit is
-  // 0, then hash is computed, but the 0 bit prevents the field from appearing
-  // to be a pointer.
-  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
-  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
-                    WeakCell::kValueOffset &&
-                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
-  __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
-  __ j(not_equal, &extra_checks_or_miss);
-
-  // The compare above could have been a SMI/SMI comparison. Guard against this
-  // convincing us that we have a monomorphic JSFunction.
-  __ JumpIfSmi(edi, &extra_checks_or_miss);
-
-  __ bind(&call_function);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, ebx, edx);
-
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
-                                                    tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&extra_checks_or_miss);
-  Label uninitialized, miss, not_allocation_site;
-
-  __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
-  __ j(equal, &call);
-
-  // Check if we have an allocation site.
-  __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
-                 Heap::kAllocationSiteMapRootIndex);
-  __ j(not_equal, &not_allocation_site);
-
-  // We have an allocation site.
-  HandleArrayCase(masm, &miss);
-
-  __ bind(&not_allocation_site);
-
-  // The following cases attempt to handle MISS cases without going to the
-  // runtime.
-  if (FLAG_trace_ic) {
-    __ jmp(&miss);
-  }
-
-  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
-  __ j(equal, &uninitialized);
-
-  // We are going megamorphic. If the feedback is a JSFunction, it is fine
-  // to handle it here. More complex cases are dealt with in the runtime.
-  __ AssertNotSmi(ecx);
-  __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &miss);
-  __ mov(
-      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
-      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
-
-  __ bind(&call);
-
-  // Increment the call count for megamorphic function calls.
-  IncrementCallCount(masm, ebx, edx);
-
-  __ bind(&call_count_incremented);
-
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&uninitialized);
-
-  // We are going monomorphic, provided we actually have a JSFunction.
-  __ JumpIfSmi(edi, &miss);
-
-  // Goto miss case if we do not have a function.
-  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
-  __ j(not_equal, &miss);
-
-  // Make sure the function is not the Array() function, which requires special
-  // behavior on MISS.
-  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
-  __ cmp(edi, ecx);
-  __ j(equal, &miss);
-
-  // Make sure the function belongs to the same native context.
-  __ mov(ecx, FieldOperand(edi, JSFunction::kContextOffset));
-  __ mov(ecx, ContextOperand(ecx, Context::NATIVE_CONTEXT_INDEX));
-  __ cmp(ecx, NativeContextOperand());
-  __ j(not_equal, &miss);
-
-  // Store the function. Use a stub since we need a frame for allocation.
-  // eax - number of arguments
-  // ebx - vector
-  // edx - slot
-  // edi - function
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    CreateWeakCellStub create_stub(isolate);
-    __ SmiTag(eax);
-    __ push(eax);
-    __ push(ebx);
-    __ push(edx);
-    __ push(edi);
-    __ push(esi);
-    __ CallStub(&create_stub);
-    __ pop(esi);
-    __ pop(edi);
-    __ pop(edx);
-    __ pop(ebx);
-    __ pop(eax);
-    __ SmiUntag(eax);
-  }
-
-  __ jmp(&call_function);
-
-  // We are here because tracing is on or we encountered a MISS case we can't
-  // handle here.
-  __ bind(&miss);
-  GenerateMiss(masm);
-
-  __ jmp(&call_count_incremented);
-
-  // Unreachable
-  __ int3();
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
-  FrameScope scope(masm, StackFrame::INTERNAL);
-
-  // Preserve the number of arguments.
-  __ SmiTag(eax);
-  __ push(eax);
-
-  // Push the function and feedback info.
-  __ push(edi);
-  __ push(ebx);
-  __ push(edx);
-
-  // Call the entry.
-  __ CallRuntime(Runtime::kCallIC_Miss);
-
-  // Move result to edi and exit the internal frame.
-  __ mov(edi, eax);
-
-  // Restore number of arguments.
-  __ pop(eax);
-  __ SmiUntag(eax);
-}
-
-
 bool CEntryStub::NeedsImmovableCode() {
   return false;
 }
@@ -1631,7 +1375,6 @@
 void CodeStub::GenerateFPStubs(Isolate* isolate) {
   // Generate if not already in cache.
   CEntryStub(isolate, 1, kSaveFPRegs).GetCode();
-  isolate->set_fp_stubs_generated(true);
 }
 
 
@@ -1803,8 +1546,8 @@
   __ mov(ebp, esp);
 
   // Push marker in two places.
-  int marker = type();
-  __ push(Immediate(Smi::FromInt(marker)));  // marker
+  StackFrame::Type marker = type();
+  __ push(Immediate(StackFrame::TypeToMarker(marker)));  // marker
   ExternalReference context_address(Isolate::kContextAddress, isolate());
   __ push(Operand::StaticVariable(context_address));  // context
   // Save callee-saved registers (C calling conventions).
@@ -1821,10 +1564,10 @@
   __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
   __ j(not_equal, &not_outermost_js, Label::kNear);
   __ mov(Operand::StaticVariable(js_entry_sp), ebp);
-  __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
   __ jmp(&invoke, Label::kNear);
   __ bind(&not_outermost_js);
-  __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+  __ push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
 
   // Jump to a faked try block that does the invoke, with a faked catch
   // block that sets the pending exception.
@@ -1868,7 +1611,7 @@
   __ bind(&exit);
   // Check if the current stack frame is marked as the outermost JS frame.
   __ pop(ebx);
-  __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ cmp(ebx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
   __ j(not_equal, &not_outermost_js_2);
   __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
   __ bind(&not_outermost_js_2);
@@ -1984,86 +1727,6 @@
   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
 }
 
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiShiftSize == 0);
-  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
-  __ test(code_, Immediate(kSmiTagMask |
-                           ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
-  __ j(not_zero, &slow_case_);
-
-  Factory* factory = masm->isolate()->factory();
-  __ Move(result_, Immediate(factory->single_character_string_cache()));
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-  STATIC_ASSERT(kSmiShiftSize == 0);
-  // At this point code register contains smi tagged one byte char code.
-  __ mov(result_, FieldOperand(result_,
-                               code_, times_half_pointer_size,
-                               FixedArray::kHeaderSize));
-  __ cmp(result_, factory->undefined_value());
-  __ j(equal, &slow_case_);
-  __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
-  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
-  __ bind(&slow_case_);
-  call_helper.BeforeCall(masm);
-  __ push(code_);
-  __ CallRuntime(Runtime::kStringCharFromCode);
-  if (!result_.is(eax)) {
-    __ mov(result_, eax);
-  }
-  call_helper.AfterCall(masm);
-  __ jmp(&exit_);
-
-  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
-                                          Register dest,
-                                          Register src,
-                                          Register count,
-                                          Register scratch,
-                                          String::Encoding encoding) {
-  DCHECK(!scratch.is(dest));
-  DCHECK(!scratch.is(src));
-  DCHECK(!scratch.is(count));
-
-  // Nothing to do for zero characters.
-  Label done;
-  __ test(count, count);
-  __ j(zero, &done);
-
-  // Make count the number of bytes to copy.
-  if (encoding == String::TWO_BYTE_ENCODING) {
-    __ shl(count, 1);
-  }
-
-  Label loop;
-  __ bind(&loop);
-  __ mov_b(scratch, Operand(src, 0));
-  __ mov_b(Operand(dest, 0), scratch);
-  __ inc(src);
-  __ inc(dest);
-  __ dec(count);
-  __ j(not_zero, &loop);
-
-  __ bind(&done);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -2679,67 +2342,6 @@
   __ jmp(done);
 }
 
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
-                                                      Label* miss,
-                                                      Label* done,
-                                                      Register elements,
-                                                      Register name,
-                                                      Register r0,
-                                                      Register r1) {
-  DCHECK(!elements.is(r0));
-  DCHECK(!elements.is(r1));
-  DCHECK(!name.is(r0));
-  DCHECK(!name.is(r1));
-
-  __ AssertName(name);
-
-  __ mov(r1, FieldOperand(elements, kCapacityOffset));
-  __ shr(r1, kSmiTagSize);  // convert smi to int
-  __ dec(r1);
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
-    __ shr(r0, Name::kHashShift);
-    if (i > 0) {
-      __ add(r0, Immediate(NameDictionary::GetProbeOffset(i)));
-    }
-    __ and_(r0, r1);
-
-    // Scale the index by multiplying by the entry size.
-    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
-
-    // Check if the key is identical to the name.
-    __ cmp(name, Operand(elements,
-                         r0,
-                         times_4,
-                         kElementsStartOffset - kHeapObjectTag));
-    __ j(equal, done);
-  }
-
-  NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0,
-                                POSITIVE_LOOKUP);
-  __ push(name);
-  __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
-  __ shr(r0, Name::kHashShift);
-  __ push(r0);
-  __ CallStub(&stub);
-
-  __ test(r1, r1);
-  __ j(zero, miss);
-  __ jmp(done);
-}
-
-
 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   // we cannot call anything that could cause a GC from this stub.
@@ -3016,336 +2618,6 @@
   __ jmp(ecx);  // Return to IC Miss stub, continuation still on stack.
 }
 
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  KeyedStoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
-// value is on the stack already.
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
-                                       Register key, Register vector,
-                                       Register slot, Register feedback,
-                                       bool is_polymorphic, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next, next_loop, prepare_next;
-  Label load_smi_map, compare_map;
-  Label start_polymorphic;
-  Label pop_and_miss;
-
-  __ push(receiver);
-  // Value, vector and slot are passed on the stack, so no need to save/restore
-  // them.
-
-  Register receiver_map = receiver;
-  Register cached_map = vector;
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &load_smi_map);
-  __ mov(receiver_map, FieldOperand(receiver, 0));
-  __ bind(&compare_map);
-  __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-
-  // A named keyed store might have a 2 element array, all other cases can count
-  // on an array with at least 2 {map, handler} pairs, so they can go right
-  // into polymorphic array handling.
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &start_polymorphic);
-
-  // found, now call handler.
-  Register handler = feedback;
-  DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
-  __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ pop(receiver);
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  // Polymorphic, we have to loop from 2 to N
-  __ bind(&start_polymorphic);
-  __ push(key);
-  Register counter = key;
-  __ mov(counter, Immediate(Smi::FromInt(2)));
-
-  if (!is_polymorphic) {
-    // If is_polymorphic is false, we may only have a two element array.
-    // Check against length now in that case.
-    __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
-    __ j(greater_equal, &pop_and_miss);
-  }
-
-  __ bind(&next_loop);
-  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
-                                  FixedArray::kHeaderSize));
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &prepare_next);
-  __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ pop(key);
-  __ pop(receiver);
-  __ jmp(handler);
-
-  __ bind(&prepare_next);
-  __ add(counter, Immediate(Smi::FromInt(2)));
-  __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
-  __ j(less, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ bind(&pop_and_miss);
-  __ pop(key);
-  __ pop(receiver);
-  __ jmp(miss);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
-static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
-                                       Register key, Register vector,
-                                       Register slot, Register weak_cell,
-                                       Label* miss) {
-  // The store ic value is on the stack.
-  DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
-
-  // feedback initially contains the feedback array
-  Label compare_smi_map;
-
-  // Move the weak map into the weak_cell register.
-  Register ic_map = weak_cell;
-  __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &compare_smi_map);
-  __ cmp(ic_map, FieldOperand(receiver, 0));
-  __ j(not_equal, miss);
-  __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
-                                 FixedArray::kHeaderSize + kPointerSize));
-  __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
-  // jump to the handler.
-  __ jmp(weak_cell);
-
-  // In microbenchmarks, it made sense to unroll this code so that the call to
-  // the handler is duplicated for a HeapObject receiver and a Smi receiver.
-  __ bind(&compare_smi_map);
-  __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, miss);
-  __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
-                                 FixedArray::kHeaderSize + kPointerSize));
-  __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
-  // jump to the handler.
-  __ jmp(weak_cell);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
-                                            Register receiver, Register key,
-                                            Register vector, Register slot,
-                                            Register feedback, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next, next_loop, prepare_next;
-  Label load_smi_map, compare_map;
-  Label transition_call;
-  Label pop_and_miss;
-
-  __ push(receiver);
-  // Value, vector and slot are passed on the stack, so no need to save/restore
-  // them.
-
-  Register receiver_map = receiver;
-  Register cached_map = vector;
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &load_smi_map);
-  __ mov(receiver_map, FieldOperand(receiver, 0));
-  __ bind(&compare_map);
-
-  // Polymorphic, we have to loop from 0 to N - 1
-  __ push(key);
-  // Current stack layout:
-  // - esp[0]    -- key
-  // - esp[4]    -- receiver
-  // - esp[8]    -- return address
-  // - esp[12]   -- vector
-  // - esp[16]   -- slot
-  // - esp[20]   -- value
-  //
-  // Required stack layout for handler call (see StoreWithVectorDescriptor):
-  // - esp[0]    -- return address
-  // - esp[4]    -- vector
-  // - esp[8]    -- slot
-  // - esp[12]   -- value
-  // - receiver, key, handler in registers.
-  Register counter = key;
-  __ mov(counter, Immediate(Smi::kZero));
-  __ bind(&next_loop);
-  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
-                                  FixedArray::kHeaderSize));
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &prepare_next);
-  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
-                                  FixedArray::kHeaderSize + kPointerSize));
-  __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &transition_call);
-  __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
-                                FixedArray::kHeaderSize + 2 * kPointerSize));
-  __ pop(key);
-  __ pop(receiver);
-  __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-  __ jmp(feedback);
-
-  __ bind(&transition_call);
-  // Current stack layout:
-  // - esp[0]    -- key
-  // - esp[4]    -- receiver
-  // - esp[8]    -- return address
-  // - esp[12]   -- vector
-  // - esp[16]   -- slot
-  // - esp[20]   -- value
-  //
-  // Required stack layout for handler call (see StoreTransitionDescriptor):
-  // - esp[0]    -- return address
-  // - esp[4]    -- vector
-  // - esp[8]    -- slot
-  // - esp[12]   -- value
-  // - receiver, key, map, handler in registers.
-  __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
-                                FixedArray::kHeaderSize + 2 * kPointerSize));
-  __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-
-  __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  // The weak cell may have been cleared.
-  __ JumpIfSmi(cached_map, &pop_and_miss);
-  DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
-  __ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
-
-  // Call store transition handler using StoreTransitionDescriptor calling
-  // convention.
-  __ pop(key);
-  __ pop(receiver);
-  // Ensure that the transition handler we are going to call has the same
-  // number of stack arguments which means that we don't have to adapt them
-  // before the call.
-  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-  STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
-  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
-                    StoreWithVectorDescriptor::kValue ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kValue);
-  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
-                    StoreWithVectorDescriptor::kSlot ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kSlot);
-  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
-                    StoreWithVectorDescriptor::kVector ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kVector);
-  __ jmp(feedback);
-
-  __ bind(&prepare_next);
-  __ add(counter, Immediate(Smi::FromInt(3)));
-  __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
-  __ j(less, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ bind(&pop_and_miss);
-  __ pop(key);
-  __ pop(receiver);
-  __ jmp(miss);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // edx
-  Register key = StoreWithVectorDescriptor::NameRegister();           // ecx
-  Register value = StoreWithVectorDescriptor::ValueRegister();        // eax
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // ebx
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // edi
-  Label miss;
-
-  if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
-    // Current stack layout:
-    // - esp[8]    -- value
-    // - esp[4]    -- slot
-    // - esp[0]    -- return address
-    STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
-    STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-    if (in_frame) {
-      __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
-      // If the vector is not on the stack, then insert the vector beneath
-      // return address in order to prepare for calling handler with
-      // StoreWithVector calling convention.
-      __ push(Operand(esp, 0));
-      __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
-      __ RecordComment("]");
-    } else {
-      __ mov(vector, Operand(esp, 1 * kPointerSize));
-    }
-    __ mov(slot, Operand(esp, 2 * kPointerSize));
-  }
-
-  Register scratch = value;
-  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize));
-
-  // Is it a weak cell?
-  Label try_array;
-  Label not_array, smi_key, key_okay;
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
-  __ j(not_equal, &try_array);
-  HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-  HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch,
-                                  &miss);
-
-  __ bind(&not_array);
-  Label try_poly_name;
-  __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &try_poly_name);
-
-  Handle<Code> megamorphic_stub =
-      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmp(key, scratch);
-  __ j(not_equal, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
-                             &miss);
-
-  __ bind(&miss);
-  KeyedStoreIC::GenerateMiss(masm);
-}
-
-
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(ebx);
-  CallICStub stub(isolate(), state());
-  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -3692,689 +2964,6 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- edi    : target
-  //  -- edx    : new target
-  //  -- esi    : context
-  //  -- esp[0] : return address
-  // -----------------------------------
-  __ AssertFunction(edi);
-  __ AssertReceiver(edx);
-
-  // Verify that the new target is a JSFunction.
-  Label new_object;
-  __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
-  __ j(not_equal, &new_object);
-
-  // Load the initial map and verify that it's in fact a map.
-  __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(ecx, &new_object);
-  __ CmpObjectType(ecx, MAP_TYPE, ebx);
-  __ j(not_equal, &new_object);
-
-  // Fall back to runtime if the target differs from the new target's
-  // initial map constructor.
-  __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset));
-  __ j(not_equal, &new_object);
-
-  // Allocate the JSObject on the heap.
-  Label allocate, done_allocate;
-  __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
-  __ lea(ebx, Operand(ebx, times_pointer_size, 0));
-  __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Initialize the JSObject fields.
-  __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
-  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize));
-
-  // ----------- S t a t e -------------
-  //  -- eax    : result (tagged)
-  //  -- ebx    : result fields (untagged)
-  //  -- edi    : result end (untagged)
-  //  -- ecx    : initial map
-  //  -- esi    : context
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  // Perform in-object slack tracking if requested.
-  Label slack_tracking;
-  STATIC_ASSERT(Map::kNoSlackTracking == 0);
-  __ test(FieldOperand(ecx, Map::kBitField3Offset),
-          Immediate(Map::ConstructionCounter::kMask));
-  __ j(not_zero, &slack_tracking, Label::kNear);
-  {
-    // Initialize all in-object fields with undefined.
-    __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
-    __ InitializeFieldsWithFiller(ebx, edi, edx);
-    __ Ret();
-  }
-  __ bind(&slack_tracking);
-  {
-    // Decrease generous allocation count.
-    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-    __ sub(FieldOperand(ecx, Map::kBitField3Offset),
-           Immediate(1 << Map::ConstructionCounter::kShift));
-
-    // Initialize the in-object fields with undefined.
-    __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
-    __ neg(edx);
-    __ lea(edx, Operand(edi, edx, times_pointer_size, 0));
-    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
-    __ InitializeFieldsWithFiller(ebx, edx, edi);
-
-    // Initialize the remaining (reserved) fields with one pointer filler map.
-    __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
-    __ lea(edx, Operand(ebx, edx, times_pointer_size, 0));
-    __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
-    __ InitializeFieldsWithFiller(ebx, edx, edi);
-
-    // Check if we can finalize the instance size.
-    Label finalize;
-    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
-    __ test(FieldOperand(ecx, Map::kBitField3Offset),
-            Immediate(Map::ConstructionCounter::kMask));
-    __ j(zero, &finalize, Label::kNear);
-    __ Ret();
-
-    // Finalize the instance size.
-    __ bind(&finalize);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(eax);
-      __ Push(ecx);
-      __ CallRuntime(Runtime::kFinalizeInstanceSize);
-      __ Pop(eax);
-    }
-    __ Ret();
-  }
-
-  // Fall back to %AllocateInNewSpace.
-  __ bind(&allocate);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(ebx);
-    __ Push(ecx);
-    __ Push(ebx);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(ecx);
-  }
-  __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
-  __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ dec(edi);
-  __ jmp(&done_allocate);
-
-  // Fall back to %NewObject.
-  __ bind(&new_object);
-  __ PopReturnAddressTo(ecx);
-  __ Push(edi);
-  __ Push(edx);
-  __ PushReturnAddressFrom(ecx);
-  __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- edi    : function
-  //  -- esi    : context
-  //  -- ebp    : frame pointer
-  //  -- esp[0] : return address
-  // -----------------------------------
-  __ AssertFunction(edi);
-
-  // Make edx point to the JavaScript frame.
-  __ mov(edx, ebp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
-    __ j(equal, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have rest parameters (only possible if we have an
-  // arguments adaptor frame below the function frame).
-  Label no_rest_parameters;
-  __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(not_equal, &no_rest_parameters, Label::kNear);
-
-  // Check if the arguments adaptor frame contains more arguments than
-  // specified by the function's internal formal parameter count.
-  Label rest_parameters;
-  __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ sub(eax,
-         FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ j(greater, &rest_parameters);
-
-  // Return an empty rest parameter array.
-  __ bind(&no_rest_parameters);
-  {
-    // ----------- S t a t e -------------
-    //  -- esi    : context
-    //  -- esp[0] : return address
-    // -----------------------------------
-
-    // Allocate an empty rest parameter array.
-    Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, eax, edx, ecx, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the rest parameter array in rax.
-    __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
-    __ mov(FieldOperand(eax, JSArray::kMapOffset), ecx);
-    __ mov(ecx, isolate()->factory()->empty_fixed_array());
-    __ mov(FieldOperand(eax, JSArray::kPropertiesOffset), ecx);
-    __ mov(FieldOperand(eax, JSArray::kElementsOffset), ecx);
-    __ mov(FieldOperand(eax, JSArray::kLengthOffset), Immediate(Smi::kZero));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace.
-    __ bind(&allocate);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(Smi::FromInt(JSArray::kSize));
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-    }
-    __ jmp(&done_allocate);
-  }
-
-  __ bind(&rest_parameters);
-  {
-    // Compute the pointer to the first rest parameter (skippping the receiver).
-    __ lea(ebx,
-           Operand(ebx, eax, times_half_pointer_size,
-                   StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
-
-    // ----------- S t a t e -------------
-    //  -- esi    : context
-    //  -- eax    : number of rest parameters (tagged)
-    //  -- ebx    : pointer to first rest parameters
-    //  -- esp[0] : return address
-    // -----------------------------------
-
-    // Allocate space for the rest parameter array plus the backing store.
-    Label allocate, done_allocate;
-    __ lea(ecx, Operand(eax, times_half_pointer_size,
-                        JSArray::kSize + FixedArray::kHeaderSize));
-    __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the elements array in edx.
-    __ mov(FieldOperand(edx, FixedArray::kMapOffset),
-           isolate()->factory()->fixed_array_map());
-    __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
-    {
-      Label loop, done_loop;
-      __ Move(ecx, Smi::kZero);
-      __ bind(&loop);
-      __ cmp(ecx, eax);
-      __ j(equal, &done_loop, Label::kNear);
-      __ mov(edi, Operand(ebx, 0 * kPointerSize));
-      __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
-                          FixedArray::kHeaderSize),
-             edi);
-      __ sub(ebx, Immediate(1 * kPointerSize));
-      __ add(ecx, Immediate(Smi::FromInt(1)));
-      __ jmp(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Setup the rest parameter array in edi.
-    __ lea(edi,
-           Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
-    __ LoadGlobalFunction(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, ecx);
-    __ mov(FieldOperand(edi, JSArray::kMapOffset), ecx);
-    __ mov(FieldOperand(edi, JSArray::kPropertiesOffset),
-           isolate()->factory()->empty_fixed_array());
-    __ mov(FieldOperand(edi, JSArray::kElementsOffset), edx);
-    __ mov(FieldOperand(edi, JSArray::kLengthOffset), eax);
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ mov(eax, edi);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace (if not too big).
-    Label too_big_for_new_space;
-    __ bind(&allocate);
-    __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
-    __ j(greater, &too_big_for_new_space);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(ecx);
-      __ Push(eax);
-      __ Push(ebx);
-      __ Push(ecx);
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-      __ mov(edx, eax);
-      __ Pop(ebx);
-      __ Pop(eax);
-    }
-    __ jmp(&done_allocate);
-
-    // Fall back to %NewRestParameter.
-    __ bind(&too_big_for_new_space);
-    __ PopReturnAddressTo(ecx);
-    // We reload the function from the caller frame due to register pressure
-    // within this stub. This is the slow path, hence reloading is preferable.
-    if (skip_stub_frame()) {
-      // For Ignition we need to skip the handler/stub frame to reach the
-      // JavaScript frame for the function.
-      __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-      __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
-    } else {
-      __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
-    }
-    __ PushReturnAddressFrom(ecx);
-    __ TailCallRuntime(Runtime::kNewRestParameter);
-  }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- edi    : function
-  //  -- esi    : context
-  //  -- ebp    : frame pointer
-  //  -- esp[0] : return address
-  // -----------------------------------
-  __ AssertFunction(edi);
-
-  // Make ecx point to the JavaScript frame.
-  __ mov(ecx, ebp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ mov(ecx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ cmp(edi, Operand(ecx, StandardFrameConstants::kFunctionOffset));
-    __ j(equal, &ok);
-    __ Abort(kInvalidFrameForFastNewSloppyArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-  __ mov(ebx,
-         FieldOperand(ebx, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ lea(edx, Operand(ecx, ebx, times_half_pointer_size,
-                      StandardFrameConstants::kCallerSPOffset));
-
-  // ebx : number of parameters (tagged)
-  // edx : parameters pointer
-  // edi : function
-  // ecx : JavaScript frame pointer.
-  // esp[0] : return address
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ mov(eax, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
-  __ mov(eax, Operand(eax, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &adaptor_frame, Label::kNear);
-
-  // No adaptor, parameter count = argument count.
-  __ mov(ecx, ebx);
-  __ push(ebx);
-  __ jmp(&try_allocate, Label::kNear);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ push(ebx);
-  __ mov(edx, Operand(ecx, StandardFrameConstants::kCallerFPOffset));
-  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lea(edx, Operand(edx, ecx, times_2,
-                      StandardFrameConstants::kCallerSPOffset));
-
-  // ebx = parameter count (tagged)
-  // ecx = argument count (smi-tagged)
-  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
-  __ cmp(ebx, ecx);
-  __ j(less_equal, &try_allocate, Label::kNear);
-  __ mov(ebx, ecx);
-
-  // Save mapped parameter count and function.
-  __ bind(&try_allocate);
-  __ push(edi);
-  __ push(ebx);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  Label no_parameter_map;
-  __ test(ebx, ebx);
-  __ j(zero, &no_parameter_map, Label::kNear);
-  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
-  __ bind(&no_parameter_map);
-
-  // 2. Backing store.
-  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ add(ebx, Immediate(JSSloppyArgumentsObject::kSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(ebx, eax, edi, no_reg, &runtime, NO_ALLOCATION_FLAGS);
-
-  // eax = address of new object(s) (tagged)
-  // ecx = argument count (smi-tagged)
-  // esp[0] = mapped parameter count (tagged)
-  // esp[4] = function
-  // esp[8] = parameter count (tagged)
-  // Get the arguments map from the current native context into edi.
-  Label has_mapped_parameters, instantiate;
-  __ mov(edi, NativeContextOperand());
-  __ mov(ebx, Operand(esp, 0 * kPointerSize));
-  __ test(ebx, ebx);
-  __ j(not_zero, &has_mapped_parameters, Label::kNear);
-  __ mov(
-      edi,
-      Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
-  __ jmp(&instantiate, Label::kNear);
-
-  __ bind(&has_mapped_parameters);
-  __ mov(edi, Operand(edi, Context::SlotOffset(
-                               Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)));
-  __ bind(&instantiate);
-
-  // eax = address of new object (tagged)
-  // ebx = mapped parameter count (tagged)
-  // ecx = argument count (smi-tagged)
-  // edi = address of arguments map (tagged)
-  // esp[0] = mapped parameter count (tagged)
-  // esp[4] = function
-  // esp[8] = parameter count (tagged)
-  // Copy the JS object part.
-  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
-  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-
-  // Set up the callee in-object property.
-  STATIC_ASSERT(JSSloppyArgumentsObject::kCalleeIndex == 1);
-  __ mov(edi, Operand(esp, 1 * kPointerSize));
-  __ AssertNotSmi(edi);
-  __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kCalleeOffset), edi);
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(ecx);
-  __ mov(FieldOperand(eax, JSSloppyArgumentsObject::kLengthOffset), ecx);
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, edi will point there, otherwise to the
-  // backing store.
-  __ lea(edi, Operand(eax, JSSloppyArgumentsObject::kSize));
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
-
-  // eax = address of new object (tagged)
-  // ebx = mapped parameter count (tagged)
-  // ecx = argument count (tagged)
-  // edx = address of receiver argument
-  // edi = address of parameter map or backing store (tagged)
-  // esp[0] = mapped parameter count (tagged)
-  // esp[4] = function
-  // esp[8] = parameter count (tagged)
-  // Free two registers.
-  __ push(edx);
-  __ push(eax);
-
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ test(ebx, ebx);
-  __ j(zero, &skip_parameter_map);
-
-  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
-         Immediate(isolate()->factory()->sloppy_arguments_elements_map()));
-  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
-  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
-  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
-  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
-  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-  __ push(ecx);
-  __ mov(eax, Operand(esp, 3 * kPointerSize));
-  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ add(ebx, Operand(esp, 5 * kPointerSize));
-  __ sub(ebx, eax);
-  __ mov(ecx, isolate()->factory()->the_hole_value());
-  __ mov(edx, edi);
-  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
-  // eax = loop variable (tagged)
-  // ebx = mapping index (tagged)
-  // ecx = the hole value
-  // edx = address of parameter map (tagged)
-  // edi = address of backing store (tagged)
-  // esp[0] = argument count (tagged)
-  // esp[4] = address of new object (tagged)
-  // esp[8] = address of receiver argument
-  // esp[12] = mapped parameter count (tagged)
-  // esp[16] = function
-  // esp[20] = parameter count (tagged)
-  __ jmp(&parameters_test, Label::kNear);
-
-  __ bind(&parameters_loop);
-  __ sub(eax, Immediate(Smi::FromInt(1)));
-  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
-  __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
-  __ add(ebx, Immediate(Smi::FromInt(1)));
-  __ bind(&parameters_test);
-  __ test(eax, eax);
-  __ j(not_zero, &parameters_loop, Label::kNear);
-  __ pop(ecx);
-
-  __ bind(&skip_parameter_map);
-
-  // ecx = argument count (tagged)
-  // edi = address of backing store (tagged)
-  // esp[0] = address of new object (tagged)
-  // esp[4] = address of receiver argument
-  // esp[8] = mapped parameter count (tagged)
-  // esp[12] = function
-  // esp[16] = parameter count (tagged)
-  // Copy arguments header and remaining slots (if there are any).
-  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
-         Immediate(isolate()->factory()->fixed_array_map()));
-  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
-
-  Label arguments_loop, arguments_test;
-  __ mov(ebx, Operand(esp, 2 * kPointerSize));
-  __ mov(edx, Operand(esp, 1 * kPointerSize));
-  __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
-  __ sub(edx, ebx);
-  __ jmp(&arguments_test, Label::kNear);
-
-  __ bind(&arguments_loop);
-  __ sub(edx, Immediate(kPointerSize));
-  __ mov(eax, Operand(edx, 0));
-  __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
-  __ add(ebx, Immediate(Smi::FromInt(1)));
-
-  __ bind(&arguments_test);
-  __ cmp(ebx, ecx);
-  __ j(less, &arguments_loop, Label::kNear);
-
-  // Restore.
-  __ pop(eax);  // Address of arguments object.
-  __ Drop(4);
-
-  // Return.
-  __ ret(0);
-
-  // Do the runtime call to allocate the arguments object.
-  __ bind(&runtime);
-  __ pop(eax);   // Remove saved mapped parameter count.
-  __ pop(edi);   // Pop saved function.
-  __ pop(eax);   // Remove saved parameter count.
-  __ pop(eax);   // Pop return address.
-  __ push(edi);  // Push function.
-  __ push(edx);  // Push parameters pointer.
-  __ push(ecx);  // Push parameter count.
-  __ push(eax);  // Push return address.
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- edi    : function
-  //  -- esi    : context
-  //  -- ebp    : frame pointer
-  //  -- esp[0] : return address
-  // -----------------------------------
-  __ AssertFunction(edi);
-
-  // Make edx point to the JavaScript frame.
-  __ mov(edx, ebp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ mov(edx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ cmp(edi, Operand(edx, StandardFrameConstants::kFunctionOffset));
-    __ j(equal, &ok);
-    __ Abort(kInvalidFrameForFastNewStrictArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have an arguments adaptor frame below the function frame.
-  Label arguments_adaptor, arguments_done;
-  __ mov(ebx, Operand(edx, StandardFrameConstants::kCallerFPOffset));
-  __ cmp(Operand(ebx, CommonFrameConstants::kContextOrFrameTypeOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  __ j(equal, &arguments_adaptor, Label::kNear);
-  {
-    __ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
-    __ mov(eax,
-           FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
-    __ lea(ebx,
-           Operand(edx, eax, times_half_pointer_size,
-                   StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
-  }
-  __ jmp(&arguments_done, Label::kNear);
-  __ bind(&arguments_adaptor);
-  {
-    __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ lea(ebx,
-           Operand(ebx, eax, times_half_pointer_size,
-                   StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
-  }
-  __ bind(&arguments_done);
-
-  // ----------- S t a t e -------------
-  //  -- eax    : number of arguments (tagged)
-  //  -- ebx    : pointer to the first argument
-  //  -- esi    : context
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  // Allocate space for the strict arguments object plus the backing store.
-  Label allocate, done_allocate;
-  __ lea(ecx,
-         Operand(eax, times_half_pointer_size,
-                 JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ Allocate(ecx, edx, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Setup the elements array in edx.
-  __ mov(FieldOperand(edx, FixedArray::kMapOffset),
-         isolate()->factory()->fixed_array_map());
-  __ mov(FieldOperand(edx, FixedArray::kLengthOffset), eax);
-  {
-    Label loop, done_loop;
-    __ Move(ecx, Smi::kZero);
-    __ bind(&loop);
-    __ cmp(ecx, eax);
-    __ j(equal, &done_loop, Label::kNear);
-    __ mov(edi, Operand(ebx, 0 * kPointerSize));
-    __ mov(FieldOperand(edx, ecx, times_half_pointer_size,
-                        FixedArray::kHeaderSize),
-           edi);
-    __ sub(ebx, Immediate(1 * kPointerSize));
-    __ add(ecx, Immediate(Smi::FromInt(1)));
-    __ jmp(&loop);
-    __ bind(&done_loop);
-  }
-
-  // Setup the rest parameter array in edi.
-  __ lea(edi,
-         Operand(edx, eax, times_half_pointer_size, FixedArray::kHeaderSize));
-  __ LoadGlobalFunction(Context::STRICT_ARGUMENTS_MAP_INDEX, ecx);
-  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kMapOffset), ecx);
-  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kPropertiesOffset),
-         isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kElementsOffset), edx);
-  __ mov(FieldOperand(edi, JSStrictArgumentsObject::kLengthOffset), eax);
-  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
-  __ mov(eax, edi);
-  __ Ret();
-
-  // Fall back to %AllocateInNewSpace (if not too big).
-  Label too_big_for_new_space;
-  __ bind(&allocate);
-  __ cmp(ecx, Immediate(kMaxRegularHeapObjectSize));
-  __ j(greater, &too_big_for_new_space);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(ecx);
-    __ Push(eax);
-    __ Push(ebx);
-    __ Push(ecx);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ mov(edx, eax);
-    __ Pop(ebx);
-    __ Pop(eax);
-  }
-  __ jmp(&done_allocate);
-
-  // Fall back to %NewStrictArguments.
-  __ bind(&too_big_for_new_space);
-  __ PopReturnAddressTo(ecx);
-  // We reload the function from the caller frame due to register pressure
-  // within this stub. This is the slow path, hence reloading is preferable.
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
-    __ Push(Operand(edx, StandardFrameConstants::kFunctionOffset));
-  } else {
-    __ Push(Operand(ebp, StandardFrameConstants::kFunctionOffset));
-  }
-  __ PushReturnAddressFrom(ecx);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
 // Generates an Operand for saving parameters after PrepareCallApiFunction.
 static Operand ApiParameterOperand(int index) {
   return Operand(esp, index * kPointerSize);
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index c1878f0..649e2cc 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -16,16 +16,6 @@
 
 class StringHelper : public AllStatic {
  public:
-  // Generate code for copying characters using the rep movs instruction.
-  // Copies ecx characters from esi to edi. Copying of overlapping regions is
-  // not supported.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     Register scratch,
-                                     String::Encoding encoding);
-
   // Compares two flat one byte strings and returns result in eax.
   static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
                                                 Register left, Register right,
@@ -68,14 +58,6 @@
                                      Handle<Name> name,
                                      Register r0);
 
-  static void GeneratePositiveLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register elements,
-                                     Register name,
-                                     Register r0,
-                                     Register r1);
-
   bool SometimesSetsUpAFrame() override { return false; }
 
  private:
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 18e5364..fd7b9ca 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -485,315 +485,15 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* allocation_memento_found) {
-  Register scratch = edi;
-  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    DCHECK(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(
-        receiver, scratch, allocation_memento_found);
-  }
-
-  // Set transitioned map.
-  __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Return address is on the stack.
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-  DCHECK(value.is(eax));
-  DCHECK(target_map.is(ebx));
-
-  Label loop, entry, convert_hole, gc_required, only_change_map;
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
-  __ j(equal, &only_change_map);
-
-  __ push(eax);
-  __ push(ebx);
-  __ push(esi);
-
-  __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
-
-  // Allocate new FixedDoubleArray.
-  // edx: receiver
-  // edi: length of source FixedArray (smi-tagged)
-  AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
-  __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
-              REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
-
-  // eax: destination FixedDoubleArray
-  // edi: number of elements
-  // edx: receiver
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->fixed_double_array_map()));
-  __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
-  __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
-  __ mov(ebx, eax);
-  __ RecordWriteField(edx,
-                      JSObject::kElementsOffset,
-                      ebx,
-                      edi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
-
-  // Prepare for conversion loop.
-  ExternalReference canonical_the_hole_nan_reference =
-      ExternalReference::address_of_the_hole_nan();
-  XMMRegister the_hole_nan = xmm1;
-  __ movsd(the_hole_nan,
-           Operand::StaticVariable(canonical_the_hole_nan_reference));
-  __ jmp(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-
-  // Restore registers before jumping into runtime.
-  __ pop(esi);
-  __ pop(ebx);
-  __ pop(eax);
-  __ jmp(fail);
-
-  // Convert and copy elements
-  // esi: source FixedArray
-  __ bind(&loop);
-  __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
-  // ebx: current element from source
-  // edi: index of current element
-  __ JumpIfNotSmi(ebx, &convert_hole);
-
-  // Normal smi, convert it to double and store.
-  __ SmiUntag(ebx);
-  __ Cvtsi2sd(xmm0, ebx);
-  __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
-           xmm0);
-  __ jmp(&entry);
-
-  // Found hole, store hole_nan_as_double instead.
-  __ bind(&convert_hole);
-
-  if (FLAG_debug_code) {
-    __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
-    __ Assert(equal, kObjectFoundInSmiOnlyArray);
-  }
-
-  __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
-           the_hole_nan);
-
-  __ bind(&entry);
-  __ sub(edi, Immediate(Smi::FromInt(1)));
-  __ j(not_sign, &loop);
-
-  // Restore registers.
-  __ pop(esi);
-  __ pop(ebx);
-  __ pop(eax);
-
-  __ bind(&only_change_map);
-  // eax: value
-  // ebx: target map
-  // Set transitioned map.
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Return address is on the stack.
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-  DCHECK(value.is(eax));
-  DCHECK(target_map.is(ebx));
-
-  Label loop, entry, convert_hole, gc_required, only_change_map, success;
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
-  __ j(equal, &only_change_map);
-
-  __ push(esi);
-  __ push(eax);
-  __ push(edx);
-  __ push(ebx);
-
-  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-
-  // Allocate new FixedArray.
-  // ebx: length of source FixedDoubleArray (smi-tagged)
-  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
-  __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
-
-  // eax: destination FixedArray
-  // ebx: number of elements
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->fixed_array_map()));
-  __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // Allocating heap numbers in the loop below can fail and cause a jump to
-  // gc_required. We can't leave a partly initialized FixedArray behind,
-  // so pessimistically fill it with holes now.
-  Label initialization_loop, initialization_loop_entry;
-  __ jmp(&initialization_loop_entry, Label::kNear);
-  __ bind(&initialization_loop);
-  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
-         masm->isolate()->factory()->the_hole_value());
-  __ bind(&initialization_loop_entry);
-  __ sub(ebx, Immediate(Smi::FromInt(1)));
-  __ j(not_sign, &initialization_loop);
-
-  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-  __ jmp(&entry);
-
-  // ebx: target map
-  // edx: receiver
-  // Set transitioned map.
-  __ bind(&only_change_map);
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&success);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ pop(ebx);
-  __ pop(edx);
-  __ pop(eax);
-  __ pop(esi);
-  __ jmp(fail);
-
-  // Box doubles into heap numbers.
-  // edi: source FixedDoubleArray
-  // eax: destination FixedArray
-  __ bind(&loop);
-  // ebx: index of current element (smi-tagged)
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(equal, &convert_hole);
-
-  // Non-hole double, copy value into a heap number.
-  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
-  // edx: new heap number
-  __ movsd(xmm0,
-           FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
-  __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
-  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
-  __ mov(esi, ebx);
-  __ RecordWriteArray(eax,
-                      edx,
-                      esi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&entry, Label::kNear);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
-         masm->isolate()->factory()->the_hole_value());
-
-  __ bind(&entry);
-  __ sub(ebx, Immediate(Smi::FromInt(1)));
-  __ j(not_sign, &loop);
-
-  __ pop(ebx);
-  __ pop(edx);
-  // ebx: target map
-  // edx: receiver
-  // Set transitioned map.
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
-  __ RecordWriteField(edx,
-                      JSObject::kElementsOffset,
-                      eax,
-                      edi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Restore registers.
-  __ pop(eax);
-  __ pop(esi);
-
-  __ bind(&success);
-}
-
-
 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                        Factory* factory,
                                        Register string,
                                        Register index,
                                        Register result,
                                        Label* call_runtime) {
+  Label indirect_string_loaded;
+  __ bind(&indirect_string_loaded);
+
   // Fetch the instance type of the receiver into result register.
   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
@@ -804,17 +504,24 @@
   __ j(zero, &check_sequential, Label::kNear);
 
   // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ test(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
+  Label cons_string, thin_string;
+  __ and_(result, Immediate(kStringRepresentationMask));
+  __ cmp(result, Immediate(kConsStringTag));
+  __ j(equal, &cons_string, Label::kNear);
+  __ cmp(result, Immediate(kThinStringTag));
+  __ j(equal, &thin_string, Label::kNear);
 
   // Handle slices.
-  Label indirect_string_loaded;
   __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
   __ SmiUntag(result);
   __ add(index, result);
   __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
+  __ jmp(&indirect_string_loaded);
+
+  // Handle thin strings.
+  __ bind(&thin_string);
+  __ mov(string, FieldOperand(string, ThinString::kActualOffset));
+  __ jmp(&indirect_string_loaded);
 
   // Handle cons strings.
   // Check whether the right hand side is the empty string (i.e. if
@@ -826,10 +533,7 @@
          Immediate(factory->empty_string()));
   __ j(not_equal, call_runtime);
   __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+  __ jmp(&indirect_string_loaded);
 
   // Distinguish sequential and external strings. Only these two string
   // representations can reach here (slices and flat cons strings have been
@@ -919,32 +623,24 @@
   return result;
 }
 
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
 
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                               MarkingParity* parity) {
-  if (IsYoungSequence(isolate, sequence)) {
-    *age = kNoAgeCodeAge;
-    *parity = NO_MARKING_PARITY;
-  } else {
-    sequence++;  // Skip the kCallOpcode byte
-    Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
-        Assembler::kCallTargetAddressOffset;
-    Code* stub = GetCodeFromTargetAddress(target_address);
-    GetCodeAgeAndParity(stub, age, parity);
-  }
+  sequence++;  // Skip the kCallOpcode byte
+  Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+                           Assembler::kCallTargetAddressOffset;
+  Code* stub = GetCodeFromTargetAddress(target_address);
+  return GetAgeOfCodeAgeStub(stub);
 }
 
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
-                                Code::Age age,
-                                MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+                                Code::Age age) {
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
     Assembler::FlushICache(isolate, sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age);
     CodePatcher patcher(isolate, sequence, young_length);
     patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
   }
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 390f3a7..da4d2e8 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -164,8 +164,7 @@
   // Right trim the relocation info to free up remaining space.
   const int delta = reloc_info->length() - new_reloc_length;
   if (delta > 0) {
-    isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
-        reloc_info, delta);
+    isolate->heap()->RightTrimFixedArray(reloc_info, delta);
   }
 }
 
@@ -182,7 +181,7 @@
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
   for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
-    double double_value = input_->GetDoubleRegister(i);
+    Float64 double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
 }
diff --git a/src/ia32/interface-descriptors-ia32.cc b/src/ia32/interface-descriptors-ia32.cc
index 8ce7872..0264af9 100644
--- a/src/ia32/interface-descriptors-ia32.cc
+++ b/src/ia32/interface-descriptors-ia32.cc
@@ -64,37 +64,11 @@
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {ebx};
+  // SharedFunctionInfo, vector, slot index.
+  Register registers[] = {ebx, ecx, edx};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void FastNewObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {edi, edx};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {edi};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {edi};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {edi};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 // static
 const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
 
@@ -146,15 +120,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {edi, edx};
+  Register registers[] = {edi, eax, edx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {edi, eax, edx, ebx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -183,6 +155,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // ecx : start index (to support rest parameters)
+  // edi : the target to call
+  Register registers[] = {edi, ecx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void ConstructStubDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -218,13 +197,12 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
-      CallInterfaceDescriptorData* data) {                          \
-    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
-  }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+  Register registers[] = {edi, edx, eax, ebx};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
 
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -417,6 +395,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      ebx,  // loaded new FP
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 2fa9d0e..906c369 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -673,12 +673,14 @@
   bind(&done);
 }
 
-void MacroAssembler::DebugBreak() {
-  Move(eax, Immediate(0));
-  mov(ebx, Immediate(ExternalReference(Runtime::kHandleDebuggerStatement,
-                                       isolate())));
-  CEntryStub ces(isolate(), 1);
-  call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+  // Check whether we need to drop frames to restart a function on the stack.
+  ExternalReference restart_fp =
+      ExternalReference::debug_restart_fp_address(isolate());
+  mov(ebx, Operand::StaticVariable(restart_fp));
+  test(ebx, ebx);
+  j(not_zero, isolate()->builtins()->FrameDropperTrampoline(),
+    RelocInfo::CODE_TARGET);
 }
 
 void MacroAssembler::Cvtsi2sd(XMMRegister dst, const Operand& src) {
@@ -810,67 +812,6 @@
   cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
 }
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Label* fail,
-                                             Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
-  j(below_equal, fail, distance);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
-                                          Label* fail,
-                                          Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
-    Register maybe_number,
-    Register elements,
-    Register key,
-    Register scratch1,
-    XMMRegister scratch2,
-    Label* fail,
-    int elements_offset) {
-  Label smi_value, done;
-  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
-  CheckMap(maybe_number,
-           isolate()->factory()->heap_number_map(),
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  // Double value, turn potential sNaN into qNaN.
-  Move(scratch2, 1.0);
-  mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
-  jmp(&done, Label::kNear);
-
-  bind(&smi_value);
-  // Value is a smi. Convert to a double and store.
-  // Preserve original value.
-  mov(scratch1, maybe_number);
-  SmiUntag(scratch1);
-  Cvtsi2sd(scratch2, scratch1);
-  bind(&done);
-  movsd(FieldOperand(elements, key, times_4,
-                     FixedDoubleArray::kHeaderSize - elements_offset),
-        scratch2);
-}
-
-
 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
   cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
 }
@@ -1058,7 +999,7 @@
 void MacroAssembler::StubPrologue(StackFrame::Type type) {
   push(ebp);  // Caller's frame pointer.
   mov(ebp, esp);
-  push(Immediate(Smi::FromInt(type)));
+  push(Immediate(StackFrame::TypeToMarker(type)));
 }
 
 void MacroAssembler::Prologue(bool code_pre_aging) {
@@ -1077,11 +1018,10 @@
   }
 }
 
-
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
   mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
-  mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+  mov(vector, FieldOperand(vector, JSFunction::kFeedbackVectorOffset));
+  mov(vector, FieldOperand(vector, Cell::kValueOffset));
 }
 
 
@@ -1095,7 +1035,7 @@
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   push(ebp);
   mov(ebp, esp);
-  push(Immediate(Smi::FromInt(type)));
+  push(Immediate(StackFrame::TypeToMarker(type)));
   if (type == StackFrame::INTERNAL) {
     push(Immediate(CodeObject()));
   }
@@ -1109,7 +1049,7 @@
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   if (emit_debug_code()) {
     cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
-        Immediate(Smi::FromInt(type)));
+        Immediate(StackFrame::TypeToMarker(type)));
     Check(equal, kStackFrameTypesMustMatch);
   }
   leave();
@@ -1144,7 +1084,7 @@
   mov(ebp, esp);
 
   // Reserve room for entry stack pointer and push the code object.
-  push(Immediate(Smi::FromInt(frame_type)));
+  push(Immediate(StackFrame::TypeToMarker(frame_type)));
   DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
   push(Immediate(0));  // Saved entry sp, patched before call.
   DCHECK_EQ(-3 * kPointerSize, ExitFrameConstants::kCodeOffset);
@@ -1654,139 +1594,6 @@
   mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
 }
 
-
-void MacroAssembler::AllocateTwoByteString(Register result,
-                                           Register length,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  DCHECK(kShortSize == 2);
-  // scratch1 = length * 2 + kObjectAlignmentMask.
-  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
-  and_(scratch1, Immediate(~kObjectAlignmentMask));
-
-  // Allocate two byte string in new space.
-  Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
-           REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->string_map()));
-  mov(scratch1, length);
-  SmiTag(scratch1);
-  mov(FieldOperand(result, String::kLengthOffset), scratch1);
-  mov(FieldOperand(result, String::kHashFieldOffset),
-      Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  mov(scratch1, length);
-  DCHECK(kCharSize == 1);
-  add(scratch1, Immediate(kObjectAlignmentMask));
-  and_(scratch1, Immediate(~kObjectAlignmentMask));
-
-  // Allocate one-byte string in new space.
-  Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
-           REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->one_byte_string_map()));
-  mov(scratch1, length);
-  SmiTag(scratch1);
-  mov(FieldOperand(result, String::kLengthOffset), scratch1);
-  mov(FieldOperand(result, String::kHashFieldOffset),
-      Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, int length,
-                                           Register scratch1, Register scratch2,
-                                           Label* gc_required) {
-  DCHECK(length > 0);
-
-  // Allocate one-byte string in new space.
-  Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
-           gc_required, NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->one_byte_string_map()));
-  mov(FieldOperand(result, String::kLengthOffset),
-      Immediate(Smi::FromInt(length)));
-  mov(FieldOperand(result, String::kHashFieldOffset),
-      Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Label* gc_required) {
-  // Allocate heap number in new space.
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->cons_string_map()));
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->cons_one_byte_string_map()));
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
-                                          Register scratch1,
-                                          Register scratch2,
-                                          Label* gc_required) {
-  // Allocate heap number in new space.
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->sliced_string_map()));
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  // Allocate heap number in new space.
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->sliced_one_byte_string_map()));
-}
-
-
 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
                                      Register value, Register scratch,
                                      Label* gc_required) {
@@ -1875,32 +1682,6 @@
   bind(&done);
 }
 
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
-                                             Register scratch, Label* miss) {
-  // Get the prototype or initial map from the function.
-  mov(result,
-      FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // If the prototype or initial map is the hole, don't return it and
-  // simply miss the cache instead. This will allow us to allocate a
-  // prototype object on-demand in the runtime system.
-  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
-  j(equal, miss);
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  CmpObjectType(result, MAP_TYPE, scratch);
-  j(not_equal, &done, Label::kNear);
-
-  // Get the prototype from the initial map.
-  mov(result, FieldOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  bind(&done);
-}
-
-
 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
   DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
   call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@@ -2110,6 +1891,7 @@
       DCHECK(actual.reg().is(eax));
       DCHECK(expected.reg().is(ebx));
     } else {
+      definitely_matches = true;
       Move(eax, actual.reg());
     }
   }
@@ -2131,16 +1913,14 @@
   }
 }
 
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
-                                             const ParameterCount& expected,
-                                             const ParameterCount& actual) {
-  Label skip_flooding;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
-  j(less, &skip_flooding);
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual) {
+  Label skip_hook;
+  ExternalReference debug_hook_active =
+      ExternalReference::debug_hook_on_function_call_address(isolate());
+  cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
+  j(equal, &skip_hook);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2157,7 +1937,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    CallRuntime(Runtime::kDebugOnFunctionCall);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -2171,7 +1951,7 @@
       SmiUntag(expected.reg());
     }
   }
-  bind(&skip_flooding);
+  bind(&skip_hook);
 }
 
 
@@ -2185,8 +1965,8 @@
   DCHECK(function.is(edi));
   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
 
-  if (call_wrapper.NeedsDebugStepCheck()) {
-    FloodFunctionIfStepping(function, new_target, expected, actual);
+  if (call_wrapper.NeedsDebugHookCheck()) {
+    CheckDebugHook(function, new_target, expected, actual);
   }
 
   // Clear the new.target register if not given.
@@ -2291,28 +2071,6 @@
   mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
 }
 
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  DCHECK(IsFastElementsKind(expected_kind));
-  DCHECK(IsFastElementsKind(transitioned_kind));
-
-  // Check that the function's map is the same as the expected cached map.
-  mov(scratch, NativeContextOperand());
-  cmp(map_in_out,
-      ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
-  j(not_equal, no_map_match);
-
-  // Use the transitioned cached map.
-  mov(map_in_out,
-      ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   // Load the native context from the current context.
   mov(function, NativeContextOperand());
@@ -2759,19 +2517,6 @@
   psllq(dst, HeapNumber::kMantissaBits);
 }
 
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
-    Register instance_type, Register scratch, Label* failure) {
-  if (!scratch.is(instance_type)) {
-    mov(scratch, instance_type);
-  }
-  and_(scratch,
-       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
-  cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
-  j(not_equal, failure);
-}
-
-
 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
                                                            Register object2,
                                                            Register scratch1,
@@ -2795,11 +2540,13 @@
   const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
   // Interleave bits from both instance types and compare them in one check.
-  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+  const int kShift = 8;
+  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
   and_(scratch1, kFlatOneByteStringMask);
   and_(scratch2, kFlatOneByteStringMask);
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
+  shl(scratch2, kShift);
+  or_(scratch1, scratch2);
+  cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << kShift));
   j(not_equal, failure);
 }
 
@@ -3162,43 +2909,6 @@
   cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
 }
 
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
-    Register object,
-    Register scratch0,
-    Register scratch1,
-    Label* found) {
-  DCHECK(!scratch1.is(scratch0));
-  Factory* factory = isolate()->factory();
-  Register current = scratch0;
-  Label loop_again, end;
-
-  // scratch contained elements pointer.
-  mov(current, object);
-  mov(current, FieldOperand(current, HeapObject::kMapOffset));
-  mov(current, FieldOperand(current, Map::kPrototypeOffset));
-  cmp(current, Immediate(factory->null_value()));
-  j(equal, &end);
-
-  // Loop based on the map going up the prototype chain.
-  bind(&loop_again);
-  mov(current, FieldOperand(current, HeapObject::kMapOffset));
-  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  CmpInstanceType(current, JS_OBJECT_TYPE);
-  j(below, found);
-  mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
-  DecodeField<Map::ElementsKindBits>(scratch1);
-  cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
-  j(equal, found);
-  mov(current, FieldOperand(current, Map::kPrototypeOffset));
-  cmp(current, Immediate(factory->null_value()));
-  j(not_equal, &loop_again);
-
-  bind(&end);
-}
-
-
 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
   DCHECK(!dividend.is(eax));
   DCHECK(!dividend.is(edx));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index e8ff59d..8aa7d38 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -228,10 +228,8 @@
   void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
                          Register scratch2, SaveFPRegsMode save_fp);
 
-  // ---------------------------------------------------------------------------
-  // Debugger Support
-
-  void DebugBreak();
+  // Frame restart support
+  void MaybeDropFrames();
 
   // Generates function and stub prologue code.
   void StubPrologue(StackFrame::Type type);
@@ -260,16 +258,6 @@
   // Load the global proxy from the current context.
   void LoadGlobalProxy(Register dst);
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the native context if the map in register
-  // map_in_out is the cached Array map in the native context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
-                                           ElementsKind transitioned_kind,
-                                           Register map_in_out,
-                                           Register scratch,
-                                           Label* no_map_match);
-
   // Load the global function with the given index.
   void LoadGlobalFunction(int index, Register function);
 
@@ -344,9 +332,10 @@
                           const ParameterCount& actual, InvokeFlag flag,
                           const CallWrapper& call_wrapper);
 
-  void FloodFunctionIfStepping(Register fun, Register new_target,
-                               const ParameterCount& expected,
-                               const ParameterCount& actual);
+  // On function call, call into the debugger if necessary.
+  void CheckDebugHook(Register fun, Register new_target,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
@@ -391,24 +380,6 @@
   // Compare instance type for map.
   void CmpInstanceType(Register map, InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map, Label* fail,
-                               Label::Distance distance = Label::kFar);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiElements(Register map, Label* fail,
-                            Label::Distance distance = Label::kFar);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements, otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register maybe_number, Register elements,
-                                   Register key, Register scratch1,
-                                   XMMRegister scratch2, Label* fail,
-                                   int offset = 0);
-
   // Compare an object's map with the specified map.
   void CompareMap(Register obj, Handle<Map> map);
 
@@ -503,7 +474,12 @@
     test(value, Immediate(kSmiTagMask));
     j(not_zero, not_smi_label, distance);
   }
-
+  // Jump if the operand is not a smi.
+  inline void JumpIfNotSmi(Operand value, Label* smi_label,
+                           Label::Distance distance = Label::kFar) {
+    test(value, Immediate(kSmiTagMask));
+    j(not_zero, smi_label, distance);
+  }
   // Jump if the value cannot be represented by a smi.
   inline void JumpIfNotValidSmiValue(Register value, Register scratch,
                                      Label* on_invalid,
@@ -640,31 +616,6 @@
   void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
                           Label* gc_required, MutableMode mode = IMMUTABLE);
 
-  // Allocate a sequential string. All the header fields of the string object
-  // are initialized.
-  void AllocateTwoByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateOneByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateOneByteString(Register result, int length, Register scratch1,
-                             Register scratch2, Label* gc_required);
-
-  // Allocate a raw cons string object. Only the map field of the result is
-  // initialized.
-  void AllocateTwoByteConsString(Register result, Register scratch1,
-                                 Register scratch2, Label* gc_required);
-  void AllocateOneByteConsString(Register result, Register scratch1,
-                                 Register scratch2, Label* gc_required);
-
-  // Allocate a raw sliced string object. Only the map field of the result is
-  // initialized.
-  void AllocateTwoByteSlicedString(Register result, Register scratch1,
-                                   Register scratch2, Label* gc_required);
-  void AllocateOneByteSlicedString(Register result, Register scratch1,
-                                   Register scratch2, Label* gc_required);
-
   // Allocate and initialize a JSValue wrapper with the specified {constructor}
   // and {value}.
   void AllocateJSValue(Register result, Register constructor, Register value,
@@ -694,14 +645,6 @@
   // |temp| holds |result|'s map when done.
   void GetMapConstructor(Register result, Register map, Register temp);
 
-  // Try to get function prototype of a function and puts the value in
-  // the result register. Checks that the function really is a
-  // function and jumps to the miss label if the fast checks fail. The
-  // function register will be untouched; the other registers may be
-  // clobbered.
-  void TryGetFunctionPrototype(Register function, Register result,
-                               Register scratch, Label* miss);
-
   // ---------------------------------------------------------------------------
   // Runtime calls
 
@@ -889,13 +832,6 @@
   // ---------------------------------------------------------------------------
   // String utilities.
 
-  // Check whether the instance type represents a flat one-byte string. Jump to
-  // the label if not. If the instance type can be scratched specify same
-  // register for both instance type and scratch.
-  void JumpIfInstanceTypeIsNotSequentialOneByte(
-      Register instance_type, Register scratch,
-      Label* on_not_flat_one_byte_string);
-
   // Checks if both objects are sequential one-byte strings, and jumps to label
   // if either is not.
   void JumpIfNotBothSequentialOneByteStrings(
@@ -919,7 +855,7 @@
   }
 
   // Load the type feedback vector from a JavaScript frame.
-  void EmitLoadTypeFeedbackVector(Register vector);
+  void EmitLoadFeedbackVector(Register vector);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -943,20 +879,6 @@
                                        Register scratch_reg,
                                        Label* no_memento_found);
 
-  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
-                                         Register scratch_reg,
-                                         Label* memento_found) {
-    Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
-                                    &no_memento_found);
-    j(equal, memento_found);
-    bind(&no_memento_found);
-  }
-
-  // Jumps to found label if a prototype map has dictionary elements.
-  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
-                                        Register scratch1, Label* found);
-
  private:
   bool generating_stub_;
   bool has_frame_;
diff --git a/src/ic/access-compiler.cc b/src/ic/access-compiler.cc
index d92f9c0..d210ea8 100644
--- a/src/ic/access-compiler.cc
+++ b/src/ic/access-compiler.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/ic/access-compiler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ic/accessor-assembler.cc b/src/ic/accessor-assembler.cc
new file mode 100644
index 0000000..d3379ab
--- /dev/null
+++ b/src/ic/accessor-assembler.cc
@@ -0,0 +1,2024 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/accessor-assembler.h"
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/counters.h"
+#include "src/ic/handler-configuration.h"
+#include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::CodeAssemblerState;
+using compiler::Node;
+
+//////////////////// Private helpers.
+
+Node* AccessorAssembler::TryMonomorphicCase(Node* slot, Node* vector,
+                                            Node* receiver_map,
+                                            Label* if_handler,
+                                            Variable* var_handler,
+                                            Label* if_miss) {
+  Comment("TryMonomorphicCase");
+  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+  // TODO(ishell): add helper class that hides offset computations for a series
+  // of loads.
+  int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
+  // Adding |header_size| with a separate IntPtrAdd rather than passing it
+  // into ElementOffsetFromIndex() allows it to be folded into a single
+  // [base, index, offset] indirect memory access on x64.
+  Node* offset =
+      ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
+  Node* feedback = Load(MachineType::AnyTagged(), vector,
+                        IntPtrAdd(offset, IntPtrConstant(header_size)));
+
+  // Try to quickly handle the monomorphic case without knowing for sure
+  // if we have a weak cell in feedback. We do know it's safe to look
+  // at WeakCell::kValueOffset.
+  GotoIf(WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(feedback)),
+         if_miss);
+
+  Node* handler =
+      Load(MachineType::AnyTagged(), vector,
+           IntPtrAdd(offset, IntPtrConstant(header_size + kPointerSize)));
+
+  var_handler->Bind(handler);
+  Goto(if_handler);
+  return feedback;
+}
+
+void AccessorAssembler::HandlePolymorphicCase(Node* receiver_map,
+                                              Node* feedback, Label* if_handler,
+                                              Variable* var_handler,
+                                              Label* if_miss,
+                                              int unroll_count) {
+  Comment("HandlePolymorphicCase");
+  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+  // Iterate {feedback} array.
+  const int kEntrySize = 2;
+
+  for (int i = 0; i < unroll_count; i++) {
+    Label next_entry(this);
+    Node* cached_map =
+        LoadWeakCellValue(LoadFixedArrayElement(feedback, i * kEntrySize));
+    GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+    // Found, now call handler.
+    Node* handler = LoadFixedArrayElement(feedback, i * kEntrySize + 1);
+    var_handler->Bind(handler);
+    Goto(if_handler);
+
+    Bind(&next_entry);
+  }
+
+  // Loop from {unroll_count}*kEntrySize to {length}.
+  Node* init = IntPtrConstant(unroll_count * kEntrySize);
+  Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+  BuildFastLoop(
+      init, length,
+      [this, receiver_map, feedback, if_handler, var_handler](Node* index) {
+        Node* cached_map =
+            LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+
+        Label next_entry(this);
+        GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+        // Found, now call handler.
+        Node* handler = LoadFixedArrayElement(feedback, index, kPointerSize);
+        var_handler->Bind(handler);
+        Goto(if_handler);
+
+        Bind(&next_entry);
+      },
+      kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+  // The loop falls through if no handler was found.
+  Goto(if_miss);
+}
+
+void AccessorAssembler::HandleKeyedStorePolymorphicCase(
+    Node* receiver_map, Node* feedback, Label* if_handler,
+    Variable* var_handler, Label* if_transition_handler,
+    Variable* var_transition_map_cell, Label* if_miss) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+  DCHECK_EQ(MachineRepresentation::kTagged, var_transition_map_cell->rep());
+
+  const int kEntrySize = 3;
+
+  Node* init = IntPtrConstant(0);
+  Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+  BuildFastLoop(init, length,
+                [this, receiver_map, feedback, if_handler, var_handler,
+                 if_transition_handler, var_transition_map_cell](Node* index) {
+                  Node* cached_map =
+                      LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+                  Label next_entry(this);
+                  GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+                  Node* maybe_transition_map_cell =
+                      LoadFixedArrayElement(feedback, index, kPointerSize);
+
+                  var_handler->Bind(
+                      LoadFixedArrayElement(feedback, index, 2 * kPointerSize));
+                  GotoIf(WordEqual(maybe_transition_map_cell,
+                                   LoadRoot(Heap::kUndefinedValueRootIndex)),
+                         if_handler);
+                  var_transition_map_cell->Bind(maybe_transition_map_cell);
+                  Goto(if_transition_handler);
+
+                  Bind(&next_entry);
+                },
+                kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+  // The loop falls through if no handler was found.
+  Goto(if_miss);
+}
+
+void AccessorAssembler::HandleLoadICHandlerCase(
+    const LoadICParameters* p, Node* handler, Label* miss,
+    ElementSupport support_elements) {
+  Comment("have_handler");
+  ExitPoint direct_exit(this);
+
+  Variable var_holder(this, MachineRepresentation::kTagged);
+  var_holder.Bind(p->receiver);
+  Variable var_smi_handler(this, MachineRepresentation::kTagged);
+  var_smi_handler.Bind(handler);
+
+  Variable* vars[] = {&var_holder, &var_smi_handler};
+  Label if_smi_handler(this, 2, vars);
+  Label try_proto_handler(this), call_handler(this);
+
+  Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+
+  // |handler| is a Smi, encoding what to do. See SmiHandler methods
+  // for the encoding format.
+  Bind(&if_smi_handler);
+  {
+    HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
+                               miss, &direct_exit, support_elements);
+  }
+
+  Bind(&try_proto_handler);
+  {
+    GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+    HandleLoadICProtoHandlerCase(p, handler, &var_holder, &var_smi_handler,
+                                 &if_smi_handler, miss, &direct_exit, false);
+  }
+
+  Bind(&call_handler);
+  {
+    typedef LoadWithVectorDescriptor Descriptor;
+    TailCallStub(Descriptor(isolate()), handler, p->context, p->receiver,
+                 p->name, p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::HandleLoadICSmiHandlerCase(
+    const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
+    ExitPoint* exit_point, ElementSupport support_elements) {
+  Variable var_double_value(this, MachineRepresentation::kFloat64);
+  Label rebox_double(this, &var_double_value);
+
+  Node* handler_word = SmiUntag(smi_handler);
+  Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
+  if (support_elements == kSupportElements) {
+    Label property(this);
+    GotoIfNot(
+        WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForElements)),
+        &property);
+
+    Comment("element_load");
+    Node* intptr_index = TryToIntptr(p->name, miss);
+    Node* elements = LoadElements(holder);
+    Node* is_jsarray_condition =
+        IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
+    Node* elements_kind =
+        DecodeWord32FromWord<LoadHandler::ElementsKindBits>(handler_word);
+    Label if_hole(this), unimplemented_elements_kind(this);
+    Label* out_of_bounds = miss;
+    EmitElementLoad(holder, elements, elements_kind, intptr_index,
+                    is_jsarray_condition, &if_hole, &rebox_double,
+                    &var_double_value, &unimplemented_elements_kind,
+                    out_of_bounds, miss, exit_point);
+
+    Bind(&unimplemented_elements_kind);
+    {
+      // Smi handlers should only be installed for supported elements kinds.
+      // Crash if we get here.
+      DebugBreak();
+      Goto(miss);
+    }
+
+    Bind(&if_hole);
+    {
+      Comment("convert hole");
+      GotoIfNot(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
+      Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+      DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+      GotoIfNot(
+          WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+                    SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+          miss);
+      exit_point->Return(UndefinedConstant());
+    }
+
+    Bind(&property);
+    Comment("property_load");
+  }
+
+  Label constant(this), field(this);
+  Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForFields)),
+         &field, &constant);
+
+  Bind(&field);
+  {
+    Comment("field_load");
+    Node* offset = DecodeWord<LoadHandler::FieldOffsetBits>(handler_word);
+
+    Label inobject(this), out_of_object(this);
+    Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
+           &out_of_object);
+
+    Bind(&inobject);
+    {
+      Label is_double(this);
+      GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+      exit_point->Return(LoadObjectField(holder, offset));
+
+      Bind(&is_double);
+      if (FLAG_unbox_double_fields) {
+        var_double_value.Bind(
+            LoadObjectField(holder, offset, MachineType::Float64()));
+      } else {
+        Node* mutable_heap_number = LoadObjectField(holder, offset);
+        var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+      }
+      Goto(&rebox_double);
+    }
+
+    Bind(&out_of_object);
+    {
+      Label is_double(this);
+      Node* properties = LoadProperties(holder);
+      Node* value = LoadObjectField(properties, offset);
+      GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+      exit_point->Return(value);
+
+      Bind(&is_double);
+      var_double_value.Bind(LoadHeapNumberValue(value));
+      Goto(&rebox_double);
+    }
+
+    Bind(&rebox_double);
+    exit_point->Return(AllocateHeapNumberWithValue(var_double_value.value()));
+  }
+
+  Bind(&constant);
+  {
+    Comment("constant_load");
+    Node* descriptors = LoadMapDescriptors(LoadMap(holder));
+    Node* descriptor =
+        DecodeWord<LoadHandler::DescriptorValueIndexBits>(handler_word);
+    CSA_ASSERT(this,
+               UintPtrLessThan(descriptor,
+                               LoadAndUntagFixedArrayBaseLength(descriptors)));
+    Node* value = LoadFixedArrayElement(descriptors, descriptor);
+
+    Label if_accessor_info(this);
+    GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
+           &if_accessor_info);
+    exit_point->Return(value);
+
+    Bind(&if_accessor_info);
+    Callable callable = CodeFactory::ApiGetter(isolate());
+    exit_point->ReturnCallStub(callable, p->context, p->receiver, holder,
+                               value);
+  }
+}
+
+void AccessorAssembler::HandleLoadICProtoHandlerCase(
+    const LoadICParameters* p, Node* handler, Variable* var_holder,
+    Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
+    ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
+  DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
+
+  // IC dispatchers rely on these assumptions to be held.
+  STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kHolderCellOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
+            LoadHandler::kSmiHandlerOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
+            LoadHandler::kValidityCellOffset);
+
+  // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+  Label validity_cell_check_done(this);
+  Node* validity_cell =
+      LoadObjectField(handler, LoadHandler::kValidityCellOffset);
+  GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+         &validity_cell_check_done);
+  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+  GotoIf(WordNotEqual(cell_value,
+                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+         miss);
+  Goto(&validity_cell_check_done);
+
+  Bind(&validity_cell_check_done);
+  Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+  CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+  Node* handler_flags = SmiUntag(smi_handler);
+
+  Label check_prototypes(this);
+  GotoIfNot(
+      IsSetWord<LoadHandler::DoNegativeLookupOnReceiverBits>(handler_flags),
+      &check_prototypes);
+  {
+    CSA_ASSERT(this, Word32BinaryNot(
+                         HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+    // We have a dictionary receiver, do a negative lookup check.
+    NameDictionaryNegativeLookup(p->receiver, p->name, miss);
+    Goto(&check_prototypes);
+  }
+
+  Bind(&check_prototypes);
+  Node* maybe_holder_cell =
+      LoadObjectField(handler, LoadHandler::kHolderCellOffset);
+  Label array_handler(this), tuple_handler(this);
+  Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
+
+  Bind(&tuple_handler);
+  {
+    Label load_existent(this);
+    GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+    // This is a handler for a load of a non-existent value.
+    if (throw_reference_error_if_nonexistent) {
+      exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context,
+                                    p->name);
+    } else {
+      exit_point->Return(UndefinedConstant());
+    }
+
+    Bind(&load_existent);
+    Node* holder = LoadWeakCellValue(maybe_holder_cell);
+    // The |holder| is guaranteed to be alive at this point since we passed
+    // both the receiver map check and the validity cell check.
+    CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+
+    var_holder->Bind(holder);
+    var_smi_handler->Bind(smi_handler);
+    Goto(if_smi_handler);
+  }
+
+  Bind(&array_handler);
+  {
+    exit_point->ReturnCallStub(
+        CodeFactory::LoadICProtoArray(isolate(),
+                                      throw_reference_error_if_nonexistent),
+        p->context, p->receiver, p->name, p->slot, p->vector, handler);
+  }
+}
+
+Node* AccessorAssembler::EmitLoadICProtoArrayCheck(
+    const LoadICParameters* p, Node* handler, Node* handler_length,
+    Node* handler_flags, Label* miss,
+    bool throw_reference_error_if_nonexistent) {
+  Variable start_index(this, MachineType::PointerRepresentation());
+  start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
+
+  Label can_access(this);
+  GotoIfNot(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
+            &can_access);
+  {
+    // Skip this entry of a handler.
+    start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
+
+    int offset =
+        FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
+    Node* expected_native_context =
+        LoadWeakCellValue(LoadObjectField(handler, offset), miss);
+    CSA_ASSERT(this, IsNativeContext(expected_native_context));
+
+    Node* native_context = LoadNativeContext(p->context);
+    GotoIf(WordEqual(expected_native_context, native_context), &can_access);
+    // If the receiver is not a JSGlobalProxy then we miss.
+    GotoIfNot(IsJSGlobalProxy(p->receiver), miss);
+    // For JSGlobalProxy receiver try to compare security tokens of current
+    // and expected native contexts.
+    Node* expected_token = LoadContextElement(expected_native_context,
+                                              Context::SECURITY_TOKEN_INDEX);
+    Node* current_token =
+        LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
+    Branch(WordEqual(expected_token, current_token), &can_access, miss);
+  }
+  Bind(&can_access);
+
+  BuildFastLoop(start_index.value(), handler_length,
+                [this, p, handler, miss](Node* current) {
+                  Node* prototype_cell =
+                      LoadFixedArrayElement(handler, current);
+                  CheckPrototype(prototype_cell, p->name, miss);
+                },
+                1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+
+  Node* maybe_holder_cell =
+      LoadFixedArrayElement(handler, LoadHandler::kHolderCellIndex);
+  Label load_existent(this);
+  GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+  // This is a handler for a load of a non-existent value.
+  if (throw_reference_error_if_nonexistent) {
+    TailCallRuntime(Runtime::kThrowReferenceError, p->context, p->name);
+  } else {
+    Return(UndefinedConstant());
+  }
+
+  Bind(&load_existent);
+  Node* holder = LoadWeakCellValue(maybe_holder_cell);
+  // The |holder| is guaranteed to be alive at this point since we passed
+  // the receiver map check, the validity cell check and the prototype chain
+  // check.
+  CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+  return holder;
+}
+
+void AccessorAssembler::HandleLoadGlobalICHandlerCase(
+    const LoadICParameters* pp, Node* handler, Label* miss,
+    ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
+  LoadICParameters p = *pp;
+  DCHECK_NULL(p.receiver);
+  Node* native_context = LoadNativeContext(p.context);
+  p.receiver = LoadContextElement(native_context, Context::EXTENSION_INDEX);
+
+  Variable var_holder(this, MachineRepresentation::kTagged);
+  Variable var_smi_handler(this, MachineRepresentation::kTagged);
+  Label if_smi_handler(this);
+  HandleLoadICProtoHandlerCase(&p, handler, &var_holder, &var_smi_handler,
+                               &if_smi_handler, miss, exit_point,
+                               throw_reference_error_if_nonexistent);
+  Bind(&if_smi_handler);
+  HandleLoadICSmiHandlerCase(&p, var_holder.value(), var_smi_handler.value(),
+                             miss, exit_point, kOnlyProperties);
+}
+
+void AccessorAssembler::HandleStoreICHandlerCase(
+    const StoreICParameters* p, Node* handler, Label* miss,
+    ElementSupport support_elements) {
+  Label if_smi_handler(this), if_nonsmi_handler(this);
+  Label if_proto_handler(this), if_element_handler(this), call_handler(this);
+
+  Branch(TaggedIsSmi(handler), &if_smi_handler, &if_nonsmi_handler);
+
+  // |handler| is a Smi, encoding what to do. See SmiHandler methods
+  // for the encoding format.
+  Bind(&if_smi_handler);
+  {
+    Node* holder = p->receiver;
+    Node* handler_word = SmiUntag(handler);
+
+    // Handle non-transitioning field stores.
+    HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
+  }
+
+  Bind(&if_nonsmi_handler);
+  {
+    Node* handler_map = LoadMap(handler);
+    if (support_elements == kSupportElements) {
+      GotoIf(IsTuple2Map(handler_map), &if_element_handler);
+    }
+    Branch(IsCodeMap(handler_map), &call_handler, &if_proto_handler);
+  }
+
+  if (support_elements == kSupportElements) {
+    Bind(&if_element_handler);
+    { HandleStoreICElementHandlerCase(p, handler, miss); }
+  }
+
+  Bind(&if_proto_handler);
+  {
+    HandleStoreICProtoHandler(p, handler, miss);
+  }
+
+  // |handler| is a heap object. Must be code, call it.
+  Bind(&call_handler);
+  {
+    StoreWithVectorDescriptor descriptor(isolate());
+    TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
+                 p->value, p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::HandleStoreICElementHandlerCase(
+    const StoreICParameters* p, Node* handler, Label* miss) {
+  Comment("HandleStoreICElementHandlerCase");
+  Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
+  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+  GotoIf(WordNotEqual(cell_value,
+                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+         miss);
+
+  Node* code_handler = LoadObjectField(handler, Tuple2::kValue2Offset);
+  CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
+
+  StoreWithVectorDescriptor descriptor(isolate());
+  TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
+               p->value, p->slot, p->vector);
+}
+
+void AccessorAssembler::HandleStoreICProtoHandler(const StoreICParameters* p,
+                                                  Node* handler, Label* miss) {
+  // IC dispatchers rely on these assumptions to be held.
+  STATIC_ASSERT(FixedArray::kLengthOffset ==
+                StoreHandler::kTransitionCellOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
+            StoreHandler::kSmiHandlerOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
+            StoreHandler::kValidityCellOffset);
+
+  // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+  Label validity_cell_check_done(this);
+  Node* validity_cell =
+      LoadObjectField(handler, StoreHandler::kValidityCellOffset);
+  GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+         &validity_cell_check_done);
+  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+  GotoIf(WordNotEqual(cell_value,
+                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+         miss);
+  Goto(&validity_cell_check_done);
+
+  Bind(&validity_cell_check_done);
+  Node* smi_handler = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
+  CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+
+  Node* maybe_transition_cell =
+      LoadObjectField(handler, StoreHandler::kTransitionCellOffset);
+  Label array_handler(this), tuple_handler(this);
+  Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
+
+  Variable var_transition(this, MachineRepresentation::kTagged);
+  Label if_transition(this), if_transition_to_constant(this);
+  Bind(&tuple_handler);
+  {
+    Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+    var_transition.Bind(transition);
+    Goto(&if_transition);
+  }
+
+  Bind(&array_handler);
+  {
+    Node* length = SmiUntag(maybe_transition_cell);
+    BuildFastLoop(IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
+                  [this, p, handler, miss](Node* current) {
+                    Node* prototype_cell =
+                        LoadFixedArrayElement(handler, current);
+                    CheckPrototype(prototype_cell, p->name, miss);
+                  },
+                  1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+
+    Node* maybe_transition_cell =
+        LoadFixedArrayElement(handler, StoreHandler::kTransitionCellIndex);
+    Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+    var_transition.Bind(transition);
+    Goto(&if_transition);
+  }
+
+  Bind(&if_transition);
+  {
+    Node* holder = p->receiver;
+    Node* transition = var_transition.value();
+    Node* handler_word = SmiUntag(smi_handler);
+
+    GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(transition)), miss);
+
+    Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+    GotoIf(WordEqual(handler_kind,
+                     IntPtrConstant(StoreHandler::kTransitionToConstant)),
+           &if_transition_to_constant);
+
+    // Handle transitioning field stores.
+    HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition,
+                                miss);
+
+    Bind(&if_transition_to_constant);
+    {
+      // Check that constant matches value.
+      Node* value_index_in_descriptor =
+          DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+      Node* descriptors = LoadMapDescriptors(transition);
+      Node* constant =
+          LoadFixedArrayElement(descriptors, value_index_in_descriptor);
+      GotoIf(WordNotEqual(p->value, constant), miss);
+
+      StoreMap(p->receiver, transition);
+      Return(p->value);
+    }
+  }
+}
+
+void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
+                                                    Node* holder, Node* value,
+                                                    Node* transition,
+                                                    Label* miss) {
+  Comment(transition ? "transitioning field store" : "field store");
+
+#ifdef DEBUG
+  Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+  if (transition) {
+    CSA_ASSERT(
+        this,
+        Word32Or(
+            WordEqual(handler_kind,
+                      IntPtrConstant(StoreHandler::kTransitionToField)),
+            WordEqual(handler_kind,
+                      IntPtrConstant(StoreHandler::kTransitionToConstant))));
+  } else {
+    if (FLAG_track_constant_fields) {
+      CSA_ASSERT(
+          this,
+          Word32Or(WordEqual(handler_kind,
+                             IntPtrConstant(StoreHandler::kStoreField)),
+                   WordEqual(handler_kind,
+                             IntPtrConstant(StoreHandler::kStoreConstField))));
+    } else {
+      CSA_ASSERT(this, WordEqual(handler_kind,
+                                 IntPtrConstant(StoreHandler::kStoreField)));
+    }
+  }
+#endif
+
+  Node* field_representation =
+      DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
+
+  Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
+      if_tagged_field(this);
+
+  GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)),
+         &if_tagged_field);
+  GotoIf(WordEqual(field_representation,
+                   IntPtrConstant(StoreHandler::kHeapObject)),
+         &if_heap_object_field);
+  GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)),
+         &if_double_field);
+  CSA_ASSERT(this, WordEqual(field_representation,
+                             IntPtrConstant(StoreHandler::kSmi)));
+  Goto(&if_smi_field);
+
+  Bind(&if_tagged_field);
+  {
+    Comment("store tagged field");
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
+                              value, transition, miss);
+  }
+
+  Bind(&if_double_field);
+  {
+    Comment("store double field");
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
+                              value, transition, miss);
+  }
+
+  Bind(&if_heap_object_field);
+  {
+    Comment("store heap object field");
+    HandleStoreFieldAndReturn(handler_word, holder,
+                              Representation::HeapObject(), value, transition,
+                              miss);
+  }
+
+  Bind(&if_smi_field);
+  {
+    Comment("store smi field");
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
+                              value, transition, miss);
+  }
+}
+
+void AccessorAssembler::HandleStoreFieldAndReturn(Node* handler_word,
+                                                  Node* holder,
+                                                  Representation representation,
+                                                  Node* value, Node* transition,
+                                                  Label* miss) {
+  bool transition_to_field = transition != nullptr;
+  Node* prepared_value = PrepareValueForStore(
+      handler_word, holder, representation, transition, value, miss);
+
+  Label if_inobject(this), if_out_of_object(this);
+  Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
+         &if_out_of_object);
+
+  Bind(&if_inobject);
+  {
+    StoreNamedField(handler_word, holder, true, representation, prepared_value,
+                    transition_to_field, miss);
+    if (transition_to_field) {
+      StoreMap(holder, transition);
+    }
+    Return(value);
+  }
+
+  Bind(&if_out_of_object);
+  {
+    if (transition_to_field) {
+      Label storage_extended(this);
+      GotoIfNot(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
+                &storage_extended);
+      Comment("[ Extend storage");
+      ExtendPropertiesBackingStore(holder);
+      Comment("] Extend storage");
+      Goto(&storage_extended);
+
+      Bind(&storage_extended);
+    }
+
+    StoreNamedField(handler_word, holder, false, representation, prepared_value,
+                    transition_to_field, miss);
+    if (transition_to_field) {
+      StoreMap(holder, transition);
+    }
+    Return(value);
+  }
+}
+
+Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
+                                              Representation representation,
+                                              Node* transition, Node* value,
+                                              Label* bailout) {
+  if (representation.IsDouble()) {
+    value = TryTaggedToFloat64(value, bailout);
+
+  } else if (representation.IsHeapObject()) {
+    GotoIf(TaggedIsSmi(value), bailout);
+
+    Label done(this);
+    if (FLAG_track_constant_fields && !transition) {
+      // Skip field type check in favor of constant value check when storing
+      // to constant field.
+      GotoIf(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
+                       IntPtrConstant(StoreHandler::kStoreConstField)),
+             &done);
+    }
+    Node* value_index_in_descriptor =
+        DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+    Node* descriptors =
+        LoadMapDescriptors(transition ? transition : LoadMap(holder));
+    Node* maybe_field_type =
+        LoadFixedArrayElement(descriptors, value_index_in_descriptor);
+
+    GotoIf(TaggedIsSmi(maybe_field_type), &done);
+    // Check that value type matches the field type.
+    {
+      Node* field_type = LoadWeakCellValue(maybe_field_type, bailout);
+      Branch(WordEqual(LoadMap(value), field_type), &done, bailout);
+    }
+    Bind(&done);
+
+  } else if (representation.IsSmi()) {
+    GotoIfNot(TaggedIsSmi(value), bailout);
+
+  } else {
+    DCHECK(representation.IsTagged());
+  }
+  return value;
+}
+
+void AccessorAssembler::ExtendPropertiesBackingStore(Node* object) {
+  Node* properties = LoadProperties(object);
+  Node* length = LoadFixedArrayBaseLength(properties);
+
+  ParameterMode mode = OptimalParameterMode();
+  length = TaggedToParameter(length, mode);
+
+  Node* delta = IntPtrOrSmiConstant(JSObject::kFieldsAdded, mode);
+  Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
+
+  // Grow properties array.
+  ElementsKind kind = FAST_ELEMENTS;
+  DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
+         FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
+  // The size of a new properties backing store is guaranteed to be small
+  // enough that the new backing store will be allocated in new space.
+  CSA_ASSERT(this,
+             UintPtrOrSmiLessThan(
+                 new_capacity,
+                 IntPtrOrSmiConstant(
+                     kMaxNumberOfDescriptors + JSObject::kFieldsAdded, mode),
+                 mode));
+
+  Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
+
+  FillFixedArrayWithValue(kind, new_properties, length, new_capacity,
+                          Heap::kUndefinedValueRootIndex, mode);
+
+  // |new_properties| is guaranteed to be in new space, so we can skip
+  // the write barrier.
+  CopyFixedArrayElements(kind, properties, new_properties, length,
+                         SKIP_WRITE_BARRIER, mode);
+
+  StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
+}
+
+void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
+                                        bool is_inobject,
+                                        Representation representation,
+                                        Node* value, bool transition_to_field,
+                                        Label* bailout) {
+  bool store_value_as_double = representation.IsDouble();
+  Node* property_storage = object;
+  if (!is_inobject) {
+    property_storage = LoadProperties(object);
+  }
+
+  Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
+  if (representation.IsDouble()) {
+    if (!FLAG_unbox_double_fields || !is_inobject) {
+      if (transition_to_field) {
+        Node* heap_number = AllocateHeapNumberWithValue(value, MUTABLE);
+        // Store the new mutable heap number into the object.
+        value = heap_number;
+        store_value_as_double = false;
+      } else {
+        // Load the heap number.
+        property_storage = LoadObjectField(property_storage, offset);
+        // Store the double value into it.
+        offset = IntPtrConstant(HeapNumber::kValueOffset);
+      }
+    }
+  }
+
+  // Do constant value check if necessary.
+  if (FLAG_track_constant_fields && !transition_to_field) {
+    Label done(this);
+    GotoIfNot(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
+                        IntPtrConstant(StoreHandler::kStoreConstField)),
+              &done);
+    {
+      if (store_value_as_double) {
+        Node* current_value =
+            LoadObjectField(property_storage, offset, MachineType::Float64());
+        GotoIfNot(Float64Equal(current_value, value), bailout);
+      } else {
+        Node* current_value = LoadObjectField(property_storage, offset);
+        GotoIfNot(WordEqual(current_value, value), bailout);
+      }
+      Goto(&done);
+    }
+    Bind(&done);
+  }
+
+  // Do the store.
+  if (store_value_as_double) {
+    StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
+                                   MachineRepresentation::kFloat64);
+  } else if (representation.IsSmi()) {
+    StoreObjectFieldNoWriteBarrier(property_storage, offset, value);
+  } else {
+    StoreObjectField(property_storage, offset, value);
+  }
+}
+
+void AccessorAssembler::EmitFastElementsBoundsCheck(Node* object,
+                                                    Node* elements,
+                                                    Node* intptr_index,
+                                                    Node* is_jsarray_condition,
+                                                    Label* miss) {
+  Variable var_length(this, MachineType::PointerRepresentation());
+  Comment("Fast elements bounds check");
+  Label if_array(this), length_loaded(this, &var_length);
+  GotoIf(is_jsarray_condition, &if_array);
+  {
+    var_length.Bind(SmiUntag(LoadFixedArrayBaseLength(elements)));
+    Goto(&length_loaded);
+  }
+  Bind(&if_array);
+  {
+    var_length.Bind(SmiUntag(LoadJSArrayLength(object)));
+    Goto(&length_loaded);
+  }
+  Bind(&length_loaded);
+  GotoIfNot(UintPtrLessThan(intptr_index, var_length.value()), miss);
+}
+
+void AccessorAssembler::EmitElementLoad(
+    Node* object, Node* elements, Node* elements_kind, Node* intptr_index,
+    Node* is_jsarray_condition, Label* if_hole, Label* rebox_double,
+    Variable* var_double_value, Label* unimplemented_elements_kind,
+    Label* out_of_bounds, Label* miss, ExitPoint* exit_point) {
+  Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
+      if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
+      if_dictionary(this);
+  GotoIf(
+      Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+      &if_nonfast);
+
+  EmitFastElementsBoundsCheck(object, elements, intptr_index,
+                              is_jsarray_condition, out_of_bounds);
+  int32_t kinds[] = {// Handled by if_fast_packed.
+                     FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+                     // Handled by if_fast_holey.
+                     FAST_HOLEY_SMI_ELEMENTS, FAST_HOLEY_ELEMENTS,
+                     // Handled by if_fast_double.
+                     FAST_DOUBLE_ELEMENTS,
+                     // Handled by if_fast_holey_double.
+                     FAST_HOLEY_DOUBLE_ELEMENTS};
+  Label* labels[] = {// FAST_{SMI,}_ELEMENTS
+                     &if_fast_packed, &if_fast_packed,
+                     // FAST_HOLEY_{SMI,}_ELEMENTS
+                     &if_fast_holey, &if_fast_holey,
+                     // FAST_DOUBLE_ELEMENTS
+                     &if_fast_double,
+                     // FAST_HOLEY_DOUBLE_ELEMENTS
+                     &if_fast_holey_double};
+  Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
+         arraysize(kinds));
+
+  Bind(&if_fast_packed);
+  {
+    Comment("fast packed elements");
+    exit_point->Return(LoadFixedArrayElement(elements, intptr_index));
+  }
+
+  Bind(&if_fast_holey);
+  {
+    Comment("fast holey elements");
+    Node* element = LoadFixedArrayElement(elements, intptr_index);
+    GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
+    exit_point->Return(element);
+  }
+
+  Bind(&if_fast_double);
+  {
+    Comment("packed double elements");
+    var_double_value->Bind(LoadFixedDoubleArrayElement(elements, intptr_index,
+                                                       MachineType::Float64()));
+    Goto(rebox_double);
+  }
+
+  Bind(&if_fast_holey_double);
+  {
+    Comment("holey double elements");
+    Node* value = LoadFixedDoubleArrayElement(elements, intptr_index,
+                                              MachineType::Float64(), 0,
+                                              INTPTR_PARAMETERS, if_hole);
+    var_double_value->Bind(value);
+    Goto(rebox_double);
+  }
+
+  Bind(&if_nonfast);
+  {
+    STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+    GotoIf(Int32GreaterThanOrEqual(
+               elements_kind,
+               Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+           &if_typed_array);
+    GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
+           &if_dictionary);
+    Goto(unimplemented_elements_kind);
+  }
+
+  Bind(&if_dictionary);
+  {
+    Comment("dictionary elements");
+    GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
+    Variable var_entry(this, MachineType::PointerRepresentation());
+    Label if_found(this);
+    NumberDictionaryLookup<SeededNumberDictionary>(
+        elements, intptr_index, &if_found, &var_entry, if_hole);
+    Bind(&if_found);
+    // Check that the value is a data property.
+    Node* index = EntryToIndex<SeededNumberDictionary>(var_entry.value());
+    Node* details =
+        LoadDetailsByKeyIndex<SeededNumberDictionary>(elements, index);
+    Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
+    // TODO(jkummerow): Support accessors without missing?
+    GotoIfNot(Word32Equal(kind, Int32Constant(kData)), miss);
+    // Finally, load the value.
+    exit_point->Return(
+        LoadValueByKeyIndex<SeededNumberDictionary>(elements, index));
+  }
+
+  Bind(&if_typed_array);
+  {
+    Comment("typed elements");
+    // Check if buffer has been neutered.
+    Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+    GotoIf(IsDetachedBuffer(buffer), miss);
+
+    // Bounds check.
+    Node* length =
+        SmiUntag(LoadObjectField(object, JSTypedArray::kLengthOffset));
+    GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
+
+    // Backing store = external_pointer + base_pointer.
+    Node* external_pointer =
+        LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
+                        MachineType::Pointer());
+    Node* base_pointer =
+        LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
+    Node* backing_store =
+        IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer));
+
+    Label uint8_elements(this), int8_elements(this), uint16_elements(this),
+        int16_elements(this), uint32_elements(this), int32_elements(this),
+        float32_elements(this), float64_elements(this);
+    Label* elements_kind_labels[] = {
+        &uint8_elements,  &uint8_elements,   &int8_elements,
+        &uint16_elements, &int16_elements,   &uint32_elements,
+        &int32_elements,  &float32_elements, &float64_elements};
+    int32_t elements_kinds[] = {
+        UINT8_ELEMENTS,  UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
+        UINT16_ELEMENTS, INT16_ELEMENTS,         UINT32_ELEMENTS,
+        INT32_ELEMENTS,  FLOAT32_ELEMENTS,       FLOAT64_ELEMENTS};
+    const size_t kTypedElementsKindCount =
+        LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+        FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
+    DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+    DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+    Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
+           kTypedElementsKindCount);
+    Bind(&uint8_elements);
+    {
+      Comment("UINT8_ELEMENTS");  // Handles UINT8_CLAMPED_ELEMENTS too.
+      Node* element = Load(MachineType::Uint8(), backing_store, intptr_index);
+      exit_point->Return(SmiFromWord32(element));
+    }
+    Bind(&int8_elements);
+    {
+      Comment("INT8_ELEMENTS");
+      Node* element = Load(MachineType::Int8(), backing_store, intptr_index);
+      exit_point->Return(SmiFromWord32(element));
+    }
+    Bind(&uint16_elements);
+    {
+      Comment("UINT16_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(1));
+      Node* element = Load(MachineType::Uint16(), backing_store, index);
+      exit_point->Return(SmiFromWord32(element));
+    }
+    Bind(&int16_elements);
+    {
+      Comment("INT16_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(1));
+      Node* element = Load(MachineType::Int16(), backing_store, index);
+      exit_point->Return(SmiFromWord32(element));
+    }
+    Bind(&uint32_elements);
+    {
+      Comment("UINT32_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(2));
+      Node* element = Load(MachineType::Uint32(), backing_store, index);
+      exit_point->Return(ChangeUint32ToTagged(element));
+    }
+    Bind(&int32_elements);
+    {
+      Comment("INT32_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(2));
+      Node* element = Load(MachineType::Int32(), backing_store, index);
+      exit_point->Return(ChangeInt32ToTagged(element));
+    }
+    Bind(&float32_elements);
+    {
+      Comment("FLOAT32_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(2));
+      Node* element = Load(MachineType::Float32(), backing_store, index);
+      var_double_value->Bind(ChangeFloat32ToFloat64(element));
+      Goto(rebox_double);
+    }
+    Bind(&float64_elements);
+    {
+      Comment("FLOAT64_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(3));
+      Node* element = Load(MachineType::Float64(), backing_store, index);
+      var_double_value->Bind(element);
+      Goto(rebox_double);
+    }
+  }
+}
+
+void AccessorAssembler::CheckPrototype(Node* prototype_cell, Node* name,
+                                       Label* miss) {
+  Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
+
+  Label done(this);
+  Label if_property_cell(this), if_dictionary_object(this);
+
+  // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
+  Branch(WordEqual(LoadMap(maybe_prototype),
+                   LoadRoot(Heap::kGlobalPropertyCellMapRootIndex)),
+         &if_property_cell, &if_dictionary_object);
+
+  Bind(&if_dictionary_object);
+  {
+    CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
+    NameDictionaryNegativeLookup(maybe_prototype, name, miss);
+    Goto(&done);
+  }
+
+  Bind(&if_property_cell);
+  {
+    // Ensure the property cell still contains the hole.
+    Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
+    GotoIf(WordNotEqual(value, LoadRoot(Heap::kTheHoleValueRootIndex)), miss);
+    Goto(&done);
+  }
+
+  Bind(&done);
+}
+
+void AccessorAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
+                                                     Label* miss) {
+  CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
+  Node* properties = LoadProperties(object);
+  // Ensure the property does not exist in a dictionary-mode object.
+  Variable var_name_index(this, MachineType::PointerRepresentation());
+  Label done(this);
+  NameDictionaryLookup<NameDictionary>(properties, name, miss, &var_name_index,
+                                       &done);
+  Bind(&done);
+}
+
+void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
+                                           Node* instance_type, Node* index,
+                                           Label* slow) {
+  Comment("integer index");
+
+  ExitPoint direct_exit(this);
+
+  Label if_element_hole(this), if_oob(this);
+  // Receivers requiring non-standard element accesses (interceptors, access
+  // checks, strings and string wrappers, proxies) are handled in the runtime.
+  GotoIf(Int32LessThanOrEqual(instance_type,
+                              Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+         slow);
+  Node* elements = LoadElements(receiver);
+  Node* elements_kind = LoadMapElementsKind(receiver_map);
+  Node* is_jsarray_condition =
+      Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
+  Variable var_double_value(this, MachineRepresentation::kFloat64);
+  Label rebox_double(this, &var_double_value);
+
+  // Unimplemented elements kinds fall back to a runtime call.
+  Label* unimplemented_elements_kind = slow;
+  IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
+  EmitElementLoad(receiver, elements, elements_kind, index,
+                  is_jsarray_condition, &if_element_hole, &rebox_double,
+                  &var_double_value, unimplemented_elements_kind, &if_oob, slow,
+                  &direct_exit);
+
+  Bind(&rebox_double);
+  Return(AllocateHeapNumberWithValue(var_double_value.value()));
+
+  Bind(&if_oob);
+  {
+    Comment("out of bounds");
+    // Negative keys can't take the fast OOB path.
+    GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), slow);
+    // Positive OOB indices are effectively the same as hole loads.
+    Goto(&if_element_hole);
+  }
+
+  Bind(&if_element_hole);
+  {
+    Comment("found the hole");
+    Label return_undefined(this);
+    BranchIfPrototypesHaveNoElements(receiver_map, &return_undefined, slow);
+
+    Bind(&return_undefined);
+    Return(UndefinedConstant());
+  }
+}
+
+void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
+                                            Node* instance_type, Node* key,
+                                            const LoadICParameters* p,
+                                            Label* slow) {
+  Comment("key is unique name");
+  Label if_found_on_receiver(this), if_property_dictionary(this),
+      lookup_prototype_chain(this);
+  Variable var_details(this, MachineRepresentation::kWord32);
+  Variable var_value(this, MachineRepresentation::kTagged);
+
+  // Receivers requiring non-standard accesses (interceptors, access
+  // checks, strings and string wrappers, proxies) are handled in the runtime.
+  GotoIf(Int32LessThanOrEqual(instance_type,
+                              Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+         slow);
+
+  // Check if the receiver has fast or slow properties.
+  Node* properties = LoadProperties(receiver);
+  Node* properties_map = LoadMap(properties);
+  GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+         &if_property_dictionary);
+
+  // Try looking up the property on the receiver; if unsuccessful, look
+  // for a handler in the stub cache.
+  Node* bitfield3 = LoadMapBitField3(receiver_map);
+  Node* descriptors = LoadMapDescriptors(receiver_map);
+
+  Label if_descriptor_found(this), stub_cache(this);
+  Variable var_name_index(this, MachineType::PointerRepresentation());
+  DescriptorLookup(key, descriptors, bitfield3, &if_descriptor_found,
+                   &var_name_index, &stub_cache);
+
+  Bind(&if_descriptor_found);
+  {
+    LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+                               var_name_index.value(), &var_details,
+                               &var_value);
+    Goto(&if_found_on_receiver);
+  }
+
+  Bind(&stub_cache);
+  {
+    Comment("stub cache probe for fast property load");
+    Variable var_handler(this, MachineRepresentation::kTagged);
+    Label found_handler(this, &var_handler), stub_cache_miss(this);
+    TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
+                      &found_handler, &var_handler, &stub_cache_miss);
+    Bind(&found_handler);
+    { HandleLoadICHandlerCase(p, var_handler.value(), slow); }
+
+    Bind(&stub_cache_miss);
+    {
+      // TODO(jkummerow): Check if the property exists on the prototype
+      // chain. If it doesn't, then there's no point in missing.
+      Comment("KeyedLoadGeneric_miss");
+      TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+                      p->name, p->slot, p->vector);
+    }
+  }
+
+  Bind(&if_property_dictionary);
+  {
+    Comment("dictionary property load");
+    // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
+    // seeing global objects here (which would need special handling).
+
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    Label dictionary_found(this, &var_name_index);
+    NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
+                                         &var_name_index,
+                                         &lookup_prototype_chain);
+    Bind(&dictionary_found);
+    {
+      LoadPropertyFromNameDictionary(properties, var_name_index.value(),
+                                     &var_details, &var_value);
+      Goto(&if_found_on_receiver);
+    }
+  }
+
+  Bind(&if_found_on_receiver);
+  {
+    Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
+                                       p->context, receiver, slow);
+    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
+    Return(value);
+  }
+
+  Bind(&lookup_prototype_chain);
+  {
+    Variable var_holder_map(this, MachineRepresentation::kTagged);
+    Variable var_holder_instance_type(this, MachineRepresentation::kWord32);
+    Label return_undefined(this);
+    Variable* merged_variables[] = {&var_holder_map, &var_holder_instance_type};
+    Label loop(this, arraysize(merged_variables), merged_variables);
+
+    var_holder_map.Bind(receiver_map);
+    var_holder_instance_type.Bind(instance_type);
+    // Private symbols must not be looked up on the prototype chain.
+    GotoIf(IsPrivateSymbol(key), &return_undefined);
+    Goto(&loop);
+    Bind(&loop);
+    {
+      // Bailout if it can be an integer indexed exotic case.
+      GotoIf(Word32Equal(var_holder_instance_type.value(),
+                         Int32Constant(JS_TYPED_ARRAY_TYPE)),
+             slow);
+      Node* proto = LoadMapPrototype(var_holder_map.value());
+      GotoIf(WordEqual(proto, NullConstant()), &return_undefined);
+      Node* proto_map = LoadMap(proto);
+      Node* proto_instance_type = LoadMapInstanceType(proto_map);
+      var_holder_map.Bind(proto_map);
+      var_holder_instance_type.Bind(proto_instance_type);
+      Label next_proto(this), return_value(this, &var_value), goto_slow(this);
+      TryGetOwnProperty(p->context, receiver, proto, proto_map,
+                        proto_instance_type, key, &return_value, &var_value,
+                        &next_proto, &goto_slow);
+
+      // This trampoline and the next are required to appease Turbofan's
+      // variable merging.
+      Bind(&next_proto);
+      Goto(&loop);
+
+      Bind(&goto_slow);
+      Goto(slow);
+
+      Bind(&return_value);
+      Return(var_value.value());
+    }
+
+    Bind(&return_undefined);
+    Return(UndefinedConstant());
+  }
+}
+
+//////////////////// Stub cache access helpers.
+
+enum AccessorAssembler::StubCacheTable : int {
+  kPrimary = static_cast<int>(StubCache::kPrimary),
+  kSecondary = static_cast<int>(StubCache::kSecondary)
+};
+
+Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
+  // See v8::internal::StubCache::PrimaryOffset().
+  STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
+  // Compute the hash of the name (use entire hash field).
+  Node* hash_field = LoadNameHashField(name);
+  CSA_ASSERT(this,
+             Word32Equal(Word32And(hash_field,
+                                   Int32Constant(Name::kHashNotComputedMask)),
+                         Int32Constant(0)));
+
+  // Using only the low bits in 64-bit mode is unlikely to increase the
+  // risk of collision even if the heap is spread over an area larger than
+  // 4Gb (and not at all if it isn't).
+  Node* map32 = TruncateWordToWord32(BitcastTaggedToWord(map));
+  Node* hash = Int32Add(hash_field, map32);
+  // Base the offset on a simple combination of name and map.
+  hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
+  uint32_t mask = (StubCache::kPrimaryTableSize - 1)
+                  << StubCache::kCacheIndexShift;
+  return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
+}
+
+Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) {
+  // See v8::internal::StubCache::SecondaryOffset().
+
+  // Use the seed from the primary cache in the secondary cache.
+  Node* name32 = TruncateWordToWord32(BitcastTaggedToWord(name));
+  Node* hash = Int32Sub(TruncateWordToWord32(seed), name32);
+  hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
+  int32_t mask = (StubCache::kSecondaryTableSize - 1)
+                 << StubCache::kCacheIndexShift;
+  return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
+}
+
+void AccessorAssembler::TryProbeStubCacheTable(StubCache* stub_cache,
+                                               StubCacheTable table_id,
+                                               Node* entry_offset, Node* name,
+                                               Node* map, Label* if_handler,
+                                               Variable* var_handler,
+                                               Label* if_miss) {
+  StubCache::Table table = static_cast<StubCache::Table>(table_id);
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    Goto(if_miss);
+    return;
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    Goto(if_miss);
+    return;
+  }
+#endif
+  // The {table_offset} holds the entry offset times four (due to masking
+  // and shifting optimizations).
+  const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
+  entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
+
+  // Check that the key in the entry matches the name.
+  Node* key_base =
+      ExternalConstant(ExternalReference(stub_cache->key_reference(table)));
+  Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
+  GotoIf(WordNotEqual(name, entry_key), if_miss);
+
+  // Get the map entry from the cache.
+  DCHECK_EQ(kPointerSize * 2, stub_cache->map_reference(table).address() -
+                                  stub_cache->key_reference(table).address());
+  Node* entry_map =
+      Load(MachineType::Pointer(), key_base,
+           IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize * 2)));
+  GotoIf(WordNotEqual(map, entry_map), if_miss);
+
+  DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
+                              stub_cache->key_reference(table).address());
+  Node* handler = Load(MachineType::TaggedPointer(), key_base,
+                       IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
+
+  // We found the handler.
+  var_handler->Bind(handler);
+  Goto(if_handler);
+}
+
+void AccessorAssembler::TryProbeStubCache(StubCache* stub_cache, Node* receiver,
+                                          Node* name, Label* if_handler,
+                                          Variable* var_handler,
+                                          Label* if_miss) {
+  Label try_secondary(this), miss(this);
+
+  Counters* counters = isolate()->counters();
+  IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+  // Check that the {receiver} isn't a smi.
+  GotoIf(TaggedIsSmi(receiver), &miss);
+
+  Node* receiver_map = LoadMap(receiver);
+
+  // Probe the primary table.
+  Node* primary_offset = StubCachePrimaryOffset(name, receiver_map);
+  TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name,
+                         receiver_map, if_handler, var_handler, &try_secondary);
+
+  Bind(&try_secondary);
+  {
+    // Probe the secondary table.
+    Node* secondary_offset = StubCacheSecondaryOffset(name, primary_offset);
+    TryProbeStubCacheTable(stub_cache, kSecondary, secondary_offset, name,
+                           receiver_map, if_handler, var_handler, &miss);
+  }
+
+  Bind(&miss);
+  {
+    IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+    Goto(if_miss);
+  }
+}
+
+//////////////////// Entry points into private implementation (one per stub).
+
+void AccessorAssembler::LoadIC(const LoadICParameters* p) {
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  // TODO(ishell): defer blocks when it works.
+  Label if_handler(this, &var_handler), try_polymorphic(this),
+      try_megamorphic(this /*, Label::kDeferred*/),
+      miss(this /*, Label::kDeferred*/);
+
+  Node* receiver_map = LoadReceiverMap(p->receiver);
+  GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+  // Check monomorphic case.
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
+  Bind(&if_handler);
+  { HandleLoadICHandlerCase(p, var_handler.value(), &miss); }
+
+  Bind(&try_polymorphic);
+  {
+    // Check polymorphic case.
+    Comment("LoadIC_try_polymorphic");
+    GotoIfNot(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+              &try_megamorphic);
+    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+                          &miss, 2);
+  }
+
+  Bind(&try_megamorphic);
+  {
+    // Check megamorphic case.
+    GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+              &miss);
+
+    TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
+                      &if_handler, &var_handler, &miss);
+  }
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+                    p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::LoadICProtoArray(
+    const LoadICParameters* p, Node* handler,
+    bool throw_reference_error_if_nonexistent) {
+  Label miss(this);
+  CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+  CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
+
+  ExitPoint direct_exit(this);
+
+  Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+  Node* handler_flags = SmiUntag(smi_handler);
+
+  Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
+
+  Node* holder =
+      EmitLoadICProtoArrayCheck(p, handler, handler_length, handler_flags,
+                                &miss, throw_reference_error_if_nonexistent);
+
+  HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, &direct_exit,
+                             kOnlyProperties);
+
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+                    p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
+    Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
+    Label* miss, ParameterMode slot_mode) {
+  Comment("LoadGlobalIC_TryPropertyCellCase");
+
+  Node* weak_cell = LoadFixedArrayElement(vector, slot, 0, slot_mode);
+  CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
+
+  // Load value or try handler case if the {weak_cell} is cleared.
+  Node* property_cell = LoadWeakCellValue(weak_cell, try_handler);
+  CSA_ASSERT(this, HasInstanceType(property_cell, PROPERTY_CELL_TYPE));
+
+  Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
+  GotoIf(WordEqual(value, TheHoleConstant()), miss);
+  exit_point->Return(value);
+}
+
+void AccessorAssembler::LoadGlobalIC_TryHandlerCase(const LoadICParameters* p,
+                                                    TypeofMode typeof_mode,
+                                                    ExitPoint* exit_point,
+                                                    Label* miss) {
+  Comment("LoadGlobalIC_TryHandlerCase");
+
+  Label call_handler(this);
+
+  Node* handler =
+      LoadFixedArrayElement(p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
+  CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+  GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+         miss);
+  GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+
+  bool throw_reference_error_if_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF;
+  HandleLoadGlobalICHandlerCase(p, handler, miss, exit_point,
+                                throw_reference_error_if_nonexistent);
+
+  Bind(&call_handler);
+  {
+    LoadWithVectorDescriptor descriptor(isolate());
+    Node* native_context = LoadNativeContext(p->context);
+    Node* receiver =
+        LoadContextElement(native_context, Context::EXTENSION_INDEX);
+    exit_point->ReturnCallStub(descriptor, handler, p->context, receiver,
+                               p->name, p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::LoadGlobalIC_MissCase(const LoadICParameters* p,
+                                              ExitPoint* exit_point) {
+  Comment("LoadGlobalIC_MissCase");
+
+  exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context,
+                                p->name, p->slot, p->vector);
+}
+
+void AccessorAssembler::LoadGlobalIC(const LoadICParameters* p,
+                                     TypeofMode typeof_mode) {
+  ExitPoint direct_exit(this);
+
+  Label try_handler(this), miss(this);
+  LoadGlobalIC_TryPropertyCellCase(p->vector, p->slot, &direct_exit,
+                                   &try_handler, &miss);
+
+  Bind(&try_handler);
+  LoadGlobalIC_TryHandlerCase(p, typeof_mode, &direct_exit, &miss);
+
+  Bind(&miss);
+  LoadGlobalIC_MissCase(p, &direct_exit);
+}
+
+void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  // TODO(ishell): defer blocks when it works.
+  Label if_handler(this, &var_handler), try_polymorphic(this),
+      try_megamorphic(this /*, Label::kDeferred*/),
+      try_polymorphic_name(this /*, Label::kDeferred*/),
+      miss(this /*, Label::kDeferred*/);
+
+  Node* receiver_map = LoadReceiverMap(p->receiver);
+  GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+  // Check monomorphic case.
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
+  Bind(&if_handler);
+  { HandleLoadICHandlerCase(p, var_handler.value(), &miss, kSupportElements); }
+
+  Bind(&try_polymorphic);
+  {
+    // Check polymorphic case.
+    Comment("KeyedLoadIC_try_polymorphic");
+    GotoIfNot(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+              &try_megamorphic);
+    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+                          &miss, 2);
+  }
+
+  Bind(&try_megamorphic);
+  {
+    // Check megamorphic case.
+    Comment("KeyedLoadIC_try_megamorphic");
+    GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+              &try_polymorphic_name);
+    // TODO(jkummerow): Inline this? Or some of it?
+    TailCallStub(CodeFactory::KeyedLoadIC_Megamorphic(isolate()), p->context,
+                 p->receiver, p->name, p->slot, p->vector);
+  }
+  Bind(&try_polymorphic_name);
+  {
+    // We might have a name in feedback, and a fixed array in the next slot.
+    Comment("KeyedLoadIC_try_polymorphic_name");
+    GotoIfNot(WordEqual(feedback, p->name), &miss);
+    // If the name comparison succeeded, we know we have a fixed array with
+    // at least one map/handler pair.
+    Node* offset = ElementOffsetFromIndex(
+        p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+        FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+    Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+    HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
+                          1);
+  }
+  Bind(&miss);
+  {
+    Comment("KeyedLoadIC_miss");
+    TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+                    p->name, p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
+  Variable var_index(this, MachineType::PointerRepresentation());
+  Variable var_unique(this, MachineRepresentation::kTagged);
+  var_unique.Bind(p->name);  // Dummy initialization.
+  Label if_index(this), if_unique_name(this), slow(this);
+
+  Node* receiver = p->receiver;
+  GotoIf(TaggedIsSmi(receiver), &slow);
+  Node* receiver_map = LoadMap(receiver);
+  Node* instance_type = LoadMapInstanceType(receiver_map);
+
+  TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+            &slow);
+
+  Bind(&if_index);
+  {
+    GenericElementLoad(receiver, receiver_map, instance_type, var_index.value(),
+                       &slow);
+  }
+
+  Bind(&if_unique_name);
+  {
+    GenericPropertyLoad(receiver, receiver_map, instance_type,
+                        var_unique.value(), p, &slow);
+  }
+
+  Bind(&slow);
+  {
+    Comment("KeyedLoadGeneric_slow");
+    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
+    // TODO(jkummerow): Should we use the GetProperty TF stub instead?
+    TailCallRuntime(Runtime::kKeyedGetProperty, p->context, p->receiver,
+                    p->name);
+  }
+}
+
+void AccessorAssembler::StoreIC(const StoreICParameters* p) {
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  // TODO(ishell): defer blocks when it works.
+  Label if_handler(this, &var_handler), try_polymorphic(this),
+      try_megamorphic(this /*, Label::kDeferred*/),
+      miss(this /*, Label::kDeferred*/);
+
+  Node* receiver_map = LoadReceiverMap(p->receiver);
+  GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+  // Check monomorphic case.
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
+  Bind(&if_handler);
+  {
+    Comment("StoreIC_if_handler");
+    HandleStoreICHandlerCase(p, var_handler.value(), &miss);
+  }
+
+  Bind(&try_polymorphic);
+  {
+    // Check polymorphic case.
+    Comment("StoreIC_try_polymorphic");
+    GotoIfNot(
+        WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+        &try_megamorphic);
+    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+                          &miss, 2);
+  }
+
+  Bind(&try_megamorphic);
+  {
+    // Check megamorphic case.
+    GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+              &miss);
+
+    TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
+                      &if_handler, &var_handler, &miss);
+  }
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
+                    p->vector, p->receiver, p->name);
+  }
+}
+
+void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p,
+                                     LanguageMode language_mode) {
+  // TODO(ishell): defer blocks when it works.
+  Label miss(this /*, Label::kDeferred*/);
+  {
+    Variable var_handler(this, MachineRepresentation::kTagged);
+
+    // TODO(ishell): defer blocks when it works.
+    Label if_handler(this, &var_handler), try_polymorphic(this),
+        try_megamorphic(this /*, Label::kDeferred*/),
+        try_polymorphic_name(this /*, Label::kDeferred*/);
+
+    Node* receiver_map = LoadReceiverMap(p->receiver);
+    GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+    // Check monomorphic case.
+    Node* feedback =
+        TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                           &var_handler, &try_polymorphic);
+    Bind(&if_handler);
+    {
+      Comment("KeyedStoreIC_if_handler");
+      HandleStoreICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
+    }
+
+    Bind(&try_polymorphic);
+    {
+      // CheckPolymorphic case.
+      Comment("KeyedStoreIC_try_polymorphic");
+      GotoIfNot(
+          WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+          &try_megamorphic);
+      Label if_transition_handler(this);
+      Variable var_transition_map_cell(this, MachineRepresentation::kTagged);
+      HandleKeyedStorePolymorphicCase(receiver_map, feedback, &if_handler,
+                                      &var_handler, &if_transition_handler,
+                                      &var_transition_map_cell, &miss);
+      Bind(&if_transition_handler);
+      Comment("KeyedStoreIC_polymorphic_transition");
+      {
+        Node* handler = var_handler.value();
+
+        Label call_handler(this);
+        Variable var_code_handler(this, MachineRepresentation::kTagged);
+        var_code_handler.Bind(handler);
+        GotoIfNot(IsTuple2Map(LoadMap(handler)), &call_handler);
+        {
+          CSA_ASSERT(this, IsTuple2Map(LoadMap(handler)));
+
+          // Check validity cell.
+          Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
+          Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+          GotoIf(
+              WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
+              &miss);
+
+          var_code_handler.Bind(
+              LoadObjectField(handler, Tuple2::kValue2Offset));
+          Goto(&call_handler);
+        }
+
+        Bind(&call_handler);
+        {
+          Node* code_handler = var_code_handler.value();
+          CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
+
+          Node* transition_map =
+              LoadWeakCellValue(var_transition_map_cell.value(), &miss);
+          StoreTransitionDescriptor descriptor(isolate());
+          TailCallStub(descriptor, code_handler, p->context, p->receiver,
+                       p->name, transition_map, p->value, p->slot, p->vector);
+        }
+      }
+    }
+
+    Bind(&try_megamorphic);
+    {
+      // Check megamorphic case.
+      Comment("KeyedStoreIC_try_megamorphic");
+      GotoIfNot(
+          WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+          &try_polymorphic_name);
+      TailCallStub(
+          CodeFactory::KeyedStoreIC_Megamorphic(isolate(), language_mode),
+          p->context, p->receiver, p->name, p->value, p->slot, p->vector);
+    }
+
+    Bind(&try_polymorphic_name);
+    {
+      // We might have a name in feedback, and a fixed array in the next slot.
+      Comment("KeyedStoreIC_try_polymorphic_name");
+      GotoIfNot(WordEqual(feedback, p->name), &miss);
+      // If the name comparison succeeded, we know we have a FixedArray with
+      // at least one map/handler pair.
+      Node* offset = ElementOffsetFromIndex(
+          p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+          FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+      Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+      HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
+                            &miss, 1);
+    }
+  }
+  Bind(&miss);
+  {
+    Comment("KeyedStoreIC_miss");
+    TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+                    p->vector, p->receiver, p->name);
+  }
+}
+
+//////////////////// Public methods.
+
+void AccessorAssembler::GenerateLoadIC() {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  LoadIC(&p);
+}
+
+void AccessorAssembler::GenerateLoadICTrampoline() {
+  typedef LoadDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  LoadIC(&p);
+}
+
+void AccessorAssembler::GenerateLoadICProtoArray(
+    bool throw_reference_error_if_nonexistent) {
+  typedef LoadICProtoArrayDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* handler = Parameter(Descriptor::kHandler);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  LoadICProtoArray(&p, handler, throw_reference_error_if_nonexistent);
+}
+
+void AccessorAssembler::GenerateLoadField() {
+  typedef LoadFieldDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = nullptr;
+  Node* slot = nullptr;
+  Node* vector = nullptr;
+  Node* context = Parameter(Descriptor::kContext);
+  LoadICParameters p(context, receiver, name, slot, vector);
+
+  ExitPoint direct_exit(this);
+
+  HandleLoadICSmiHandlerCase(&p, receiver, Parameter(Descriptor::kSmiHandler),
+                             nullptr, &direct_exit, kOnlyProperties);
+}
+
+void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
+  typedef LoadGlobalWithVectorDescriptor Descriptor;
+
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, nullptr, name, slot, vector);
+  LoadGlobalIC(&p, typeof_mode);
+}
+
+void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
+  typedef LoadGlobalDescriptor Descriptor;
+
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  LoadICParameters p(context, nullptr, name, slot, vector);
+  LoadGlobalIC(&p, typeof_mode);
+}
+
+void AccessorAssembler::GenerateKeyedLoadIC() {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  KeyedLoadIC(&p);
+}
+
+void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
+  typedef LoadDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  KeyedLoadIC(&p);
+}
+
+void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  KeyedLoadICGeneric(&p);
+}
+
+void AccessorAssembler::GenerateStoreIC() {
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  StoreICParameters p(context, receiver, name, value, slot, vector);
+  StoreIC(&p);
+}
+
+void AccessorAssembler::GenerateStoreICTrampoline() {
+  typedef StoreDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  StoreICParameters p(context, receiver, name, value, slot, vector);
+  StoreIC(&p);
+}
+
+void AccessorAssembler::GenerateKeyedStoreIC(LanguageMode language_mode) {
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  StoreICParameters p(context, receiver, name, value, slot, vector);
+  KeyedStoreIC(&p, language_mode);
+}
+
+void AccessorAssembler::GenerateKeyedStoreICTrampoline(
+    LanguageMode language_mode) {
+  typedef StoreDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  StoreICParameters p(context, receiver, name, value, slot, vector);
+  KeyedStoreIC(&p, language_mode);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/ic/accessor-assembler.h b/src/ic/accessor-assembler.h
new file mode 100644
index 0000000..9bc2873
--- /dev/null
+++ b/src/ic/accessor-assembler.h
@@ -0,0 +1,284 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+#define V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class CodeAssemblerState;
+}
+
+class ExitPoint;
+
+class AccessorAssembler : public CodeStubAssembler {
+ public:
+  typedef compiler::Node Node;
+
+  explicit AccessorAssembler(compiler::CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+
+  void GenerateLoadIC();
+  void GenerateLoadField();
+  void GenerateLoadICTrampoline();
+  void GenerateKeyedLoadIC();
+  void GenerateKeyedLoadICTrampoline();
+  void GenerateKeyedLoadIC_Megamorphic();
+  void GenerateStoreIC();
+  void GenerateStoreICTrampoline();
+
+  void GenerateLoadICProtoArray(bool throw_reference_error_if_nonexistent);
+
+  void GenerateLoadGlobalIC(TypeofMode typeof_mode);
+  void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
+
+  void GenerateKeyedStoreIC(LanguageMode language_mode);
+  void GenerateKeyedStoreICTrampoline(LanguageMode language_mode);
+
+  void TryProbeStubCache(StubCache* stub_cache, Node* receiver, Node* name,
+                         Label* if_handler, Variable* var_handler,
+                         Label* if_miss);
+
+  Node* StubCachePrimaryOffsetForTesting(Node* name, Node* map) {
+    return StubCachePrimaryOffset(name, map);
+  }
+  Node* StubCacheSecondaryOffsetForTesting(Node* name, Node* map) {
+    return StubCacheSecondaryOffset(name, map);
+  }
+
+  struct LoadICParameters {
+    LoadICParameters(Node* context, Node* receiver, Node* name, Node* slot,
+                     Node* vector)
+        : context(context),
+          receiver(receiver),
+          name(name),
+          slot(slot),
+          vector(vector) {}
+
+    Node* context;
+    Node* receiver;
+    Node* name;
+    Node* slot;
+    Node* vector;
+  };
+
+  void LoadGlobalIC_TryPropertyCellCase(
+      Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
+      Label* miss, ParameterMode slot_mode = SMI_PARAMETERS);
+  void LoadGlobalIC_TryHandlerCase(const LoadICParameters* p,
+                                   TypeofMode typeof_mode,
+                                   ExitPoint* exit_point, Label* miss);
+  void LoadGlobalIC_MissCase(const LoadICParameters* p, ExitPoint* exit_point);
+
+ protected:
+  struct StoreICParameters : public LoadICParameters {
+    StoreICParameters(Node* context, Node* receiver, Node* name, Node* value,
+                      Node* slot, Node* vector)
+        : LoadICParameters(context, receiver, name, slot, vector),
+          value(value) {}
+    Node* value;
+  };
+
+  enum ElementSupport { kOnlyProperties, kSupportElements };
+  void HandleStoreICHandlerCase(
+      const StoreICParameters* p, Node* handler, Label* miss,
+      ElementSupport support_elements = kOnlyProperties);
+
+ private:
+  // Stub generation entry points.
+
+  void LoadIC(const LoadICParameters* p);
+  void LoadICProtoArray(const LoadICParameters* p, Node* handler,
+                        bool throw_reference_error_if_nonexistent);
+  void LoadGlobalIC(const LoadICParameters* p, TypeofMode typeof_mode);
+  void KeyedLoadIC(const LoadICParameters* p);
+  void KeyedLoadICGeneric(const LoadICParameters* p);
+  void StoreIC(const StoreICParameters* p);
+  void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
+
+  // IC dispatcher behavior.
+
+  // Checks monomorphic case. Returns {feedback} entry of the vector.
+  Node* TryMonomorphicCase(Node* slot, Node* vector, Node* receiver_map,
+                           Label* if_handler, Variable* var_handler,
+                           Label* if_miss);
+  void HandlePolymorphicCase(Node* receiver_map, Node* feedback,
+                             Label* if_handler, Variable* var_handler,
+                             Label* if_miss, int unroll_count);
+  void HandleKeyedStorePolymorphicCase(Node* receiver_map, Node* feedback,
+                                       Label* if_handler, Variable* var_handler,
+                                       Label* if_transition_handler,
+                                       Variable* var_transition_map_cell,
+                                       Label* if_miss);
+
+  // LoadIC implementation.
+
+  void HandleLoadICHandlerCase(
+      const LoadICParameters* p, Node* handler, Label* miss,
+      ElementSupport support_elements = kOnlyProperties);
+
+  void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
+                                  Node* smi_handler, Label* miss,
+                                  ExitPoint* exit_point,
+                                  ElementSupport support_elements);
+
+  void HandleLoadICProtoHandlerCase(const LoadICParameters* p, Node* handler,
+                                    Variable* var_holder,
+                                    Variable* var_smi_handler,
+                                    Label* if_smi_handler, Label* miss,
+                                    ExitPoint* exit_point,
+                                    bool throw_reference_error_if_nonexistent);
+
+  Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p, Node* handler,
+                                  Node* handler_length, Node* handler_flags,
+                                  Label* miss,
+                                  bool throw_reference_error_if_nonexistent);
+
+  // LoadGlobalIC implementation.
+
+  void HandleLoadGlobalICHandlerCase(const LoadICParameters* p, Node* handler,
+                                     Label* miss, ExitPoint* exit_point,
+                                     bool throw_reference_error_if_nonexistent);
+
+  // StoreIC implementation.
+
+  void HandleStoreICElementHandlerCase(const StoreICParameters* p,
+                                       Node* handler, Label* miss);
+
+  void HandleStoreICProtoHandler(const StoreICParameters* p, Node* handler,
+                                 Label* miss);
+  // If |transition| is nullptr then the normal field store is generated or
+  // transitioning store otherwise.
+  void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder,
+                                   Node* value, Node* transition, Label* miss);
+  // If |transition| is nullptr then the normal field store is generated or
+  // transitioning store otherwise.
+  void HandleStoreFieldAndReturn(Node* handler_word, Node* holder,
+                                 Representation representation, Node* value,
+                                 Node* transition, Label* miss);
+
+  // KeyedLoadIC_Generic implementation.
+
+  void GenericElementLoad(Node* receiver, Node* receiver_map,
+                          Node* instance_type, Node* index, Label* slow);
+
+  void GenericPropertyLoad(Node* receiver, Node* receiver_map,
+                           Node* instance_type, Node* key,
+                           const LoadICParameters* p, Label* slow);
+
+  // Low-level helpers.
+
+  Node* PrepareValueForStore(Node* handler_word, Node* holder,
+                             Representation representation, Node* transition,
+                             Node* value, Label* bailout);
+
+  // Extends properties backing store by JSObject::kFieldsAdded elements.
+  void ExtendPropertiesBackingStore(Node* object);
+
+  void StoreNamedField(Node* handler_word, Node* object, bool is_inobject,
+                       Representation representation, Node* value,
+                       bool transition_to_field, Label* bailout);
+
+  void EmitFastElementsBoundsCheck(Node* object, Node* elements,
+                                   Node* intptr_index,
+                                   Node* is_jsarray_condition, Label* miss);
+  void EmitElementLoad(Node* object, Node* elements, Node* elements_kind,
+                       Node* key, Node* is_jsarray_condition, Label* if_hole,
+                       Label* rebox_double, Variable* var_double_value,
+                       Label* unimplemented_elements_kind, Label* out_of_bounds,
+                       Label* miss, ExitPoint* exit_point);
+  void CheckPrototype(Node* prototype_cell, Node* name, Label* miss);
+  void NameDictionaryNegativeLookup(Node* object, Node* name, Label* miss);
+
+  // Stub cache access helpers.
+
+  // This enum is used here as a replacement for StubCache::Table to avoid
+  // including stub cache header.
+  enum StubCacheTable : int;
+
+  Node* StubCachePrimaryOffset(Node* name, Node* map);
+  Node* StubCacheSecondaryOffset(Node* name, Node* seed);
+
+  void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
+                              Node* entry_offset, Node* name, Node* map,
+                              Label* if_handler, Variable* var_handler,
+                              Label* if_miss);
+};
+
+// Abstraction over direct and indirect exit points. Direct exits correspond to
+// tailcalls and Return, while indirect exits store the result in a variable
+// and then jump to an exit label.
+class ExitPoint {
+ private:
+  typedef compiler::Node Node;
+  typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+  typedef compiler::CodeAssemblerVariable CodeAssemblerVariable;
+
+ public:
+  explicit ExitPoint(CodeStubAssembler* assembler)
+      : ExitPoint(assembler, nullptr, nullptr) {}
+  ExitPoint(CodeStubAssembler* assembler, CodeAssemblerLabel* out,
+            CodeAssemblerVariable* var_result)
+      : out_(out), var_result_(var_result), asm_(assembler) {
+    DCHECK_EQ(out != nullptr, var_result != nullptr);
+  }
+
+  template <class... TArgs>
+  void ReturnCallRuntime(Runtime::FunctionId function, Node* context,
+                         TArgs... args) {
+    if (IsDirect()) {
+      asm_->TailCallRuntime(function, context, args...);
+    } else {
+      IndirectReturn(asm_->CallRuntime(function, context, args...));
+    }
+  }
+
+  template <class... TArgs>
+  void ReturnCallStub(Callable const& callable, Node* context, TArgs... args) {
+    if (IsDirect()) {
+      asm_->TailCallStub(callable, context, args...);
+    } else {
+      IndirectReturn(asm_->CallStub(callable, context, args...));
+    }
+  }
+
+  template <class... TArgs>
+  void ReturnCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                      Node* context, TArgs... args) {
+    if (IsDirect()) {
+      asm_->TailCallStub(descriptor, target, context, args...);
+    } else {
+      IndirectReturn(asm_->CallStub(descriptor, target, context, args...));
+    }
+  }
+
+  void Return(Node* const result) {
+    if (IsDirect()) {
+      asm_->Return(result);
+    } else {
+      IndirectReturn(result);
+    }
+  }
+
+  bool IsDirect() const { return out_ == nullptr; }
+
+ private:
+  void IndirectReturn(Node* const result) {
+    var_result_->Bind(result);
+    asm_->Goto(out_);
+  }
+
+  CodeAssemblerLabel* const out_;
+  CodeAssemblerVariable* const var_result_;
+  CodeStubAssembler* const asm_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc
index 6145d43..ebef63c 100644
--- a/src/ic/arm/handler-compiler-arm.cc
+++ b/src/ic/arm/handler-compiler-arm.cc
@@ -135,14 +135,6 @@
   __ add(sp, sp, Operand(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -189,27 +181,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ ldr(result,
-         FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mov(r0, scratch1);
-  __ Ret();
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -228,10 +199,12 @@
   __ b(ne, miss);
 }
 
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+         Runtime::FunctionForId(id)->nargs);
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -239,15 +212,7 @@
   __ push(name);
   __ push(receiver);
   __ push(holder);
-}
 
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm, Register receiver, Register holder, Register name,
-    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
-  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
-         Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallRuntime(id);
 }
 
@@ -355,58 +320,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ ldr(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ tst(scratch, Operand(Map::Deprecated::kMask));
-    __ b(ne, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ ldr(scratch,
-         FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmp(value_reg, scratch);
-  __ b(ne, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ b(ne, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -538,13 +451,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(r0, value);
-  __ Ret();
-}
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -609,8 +515,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
index babf497..b749027 100644
--- a/src/ic/arm/ic-arm.cc
+++ b/src/ic/arm/ic-arm.cc
@@ -6,530 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ b(ne, miss);
-
-  // Get the value at the masked, scaled index and return.
-  __ ldr(result,
-         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY))
-      << kSmiTagSize;
-  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
-  __ b(ne, miss);
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ str(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = r0;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                     JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), r0, r3, r4);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r3; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r4, r5);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r4, r5);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Perform tail call to the entry.
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = r4;
-  Register address = r5;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, address));
-
-  if (check_map == kCheckMap) {
-    __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ cmp(elements_map,
-           Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ b(ne, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ldr(scratch, MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
-  __ cmp(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
-  __ b(ne, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(scratch, key, Operand(Smi::FromInt(1)));
-    __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
-  __ Ret();
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(scratch, key, Operand(Smi::FromInt(1)));
-    __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
-  __ str(value, MemOperand(address));
-  // Update write barrier for the elements array address.
-  __ mov(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-    __ b(ne, slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ add(address, elements,
-         Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
-                 kHeapObjectTag));
-  __ ldr(scratch, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
-  __ cmp(scratch, Operand(kHoleNanUpper32));
-  __ b(ne, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(scratch, key, Operand(Smi::FromInt(1)));
-    __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret();
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
-  __ b(ne, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(receiver.is(r1));
-  DCHECK(key.is(r2));
-  DCHECK(value.is(r0));
-  Register receiver_map = r3;
-  Register elements_map = r6;
-  Register elements = r9;  // Elements array of the receiver.
-  // r4 and r5 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ b(ne, &slow);
-  // Check if the object is a JS array or not.
-  __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ cmp(r4, Operand(JS_ARRAY_TYPE));
-  __ b(eq, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ cmp(r4, Operand(JS_OBJECT_TYPE));
-  __ b(lo, &slow);
-
-  // Object case: Check key against length in the elements array.
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(ip));
-  __ b(lo, &fast_object);
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // r0: value.
-  // r1: key.
-  // r2: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ ldr(r4, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(r4, &slow);
-
-  // We use register r8, because otherwise probing the megamorphic stub cache
-  // would require pushing temporaries on the stack.
-  // TODO(mvstanton): quit using register r8 when
-  // FLAG_enable_embedded_constant_pool is turned on.
-  DCHECK(!FLAG_enable_embedded_constant_pool);
-  Register temporary2 = r8;
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-
-  DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ mov(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r5,
-                                                     temporary2, r6, r9);
-  // Cache miss.
-  __ b(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  __ b(ne, &slow);  // Only support writing to writing to array[array.length].
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(ip));
-  __ b(hs, &slow);
-  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ b(ne, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ b(ne, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ cmp(key, Operand(ip));
-  __ b(hs, &extra);
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = r5;
-  DCHECK(receiver.is(r1));
-  DCHECK(name.is(r2));
-  DCHECK(value.is(r0));
-  DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r3));
-  DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r4));
-
-  __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, r6, r9);
-  __ Ret();
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, r6, r9);
-  GenerateMiss(masm);
-}
-
-
-#undef __
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -585,9 +67,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(cmp_instruction_address), delta);
+    LOG(isolate, PatchIC(address, cmp_instruction_address, delta));
   }
 
   Address patch_address =
diff --git a/src/ic/arm/ic-compiler-arm.cc b/src/ic/arm/ic-compiler-arm.cc
deleted file mode 100644
index 3185231..0000000
--- a/src/ic/arm/ic-compiler-arm.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
-
-  __ mov(r0, Operand(Smi::FromInt(language_mode)));
-  __ Push(r0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/ic/arm/stub-cache-arm.cc b/src/ic/arm/stub-cache-arm.cc
deleted file mode 100644
index b0f93e3..0000000
--- a/src/ic/arm/stub-cache-arm.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
-  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
-  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
-  // Calculate the base address of the entry.
-  __ add(base_addr, offset_scratch, Operand(key_offset));
-
-  // Check that the key in the entry matches the name.
-  __ ldr(ip, MemOperand(base_addr, 0));
-  __ cmp(name, ip);
-  __ b(ne, &miss);
-
-  // Check the map matches.
-  __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(ip, scratch2);
-  __ b(ne, &miss);
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ jmp(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ jmp(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  DCHECK(sizeof(Entry) == 12);
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check scratch, extra and extra2 registers are valid.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ add(scratch, scratch, Operand(ip));
-  __ eor(scratch, scratch, Operand(kPrimaryMagic));
-  __ mov(ip, Operand(kPrimaryTableSize - 1));
-  __ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ sub(scratch, scratch, Operand(name));
-  __ add(scratch, scratch, Operand(kSecondaryMagic));
-  __ mov(ip, Operand(kSecondaryTableSize - 1));
-  __ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc
index 58d0bb7..b7dc589 100644
--- a/src/ic/arm64/handler-compiler-arm64.cc
+++ b/src/ic/arm64/handler-compiler-arm64.cc
@@ -44,14 +44,6 @@
   __ Drop(2);
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -91,31 +83,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ Ldr(result,
-         FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  // TryGetFunctionPrototype can't put the result directly in x0 because the
-  // 3 inputs registers can't alias and we call this function from
-  // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
-  // move the result in x0.
-  __ Mov(x0, scratch1);
-  __ Ret();
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -132,25 +99,18 @@
   __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
 }
 
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-
-  __ Push(name, receiver, holder);
-}
-
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -386,57 +346,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ Mov(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ Ldrsw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ Ldr(scratch,
-         FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ Cmp(value_reg, scratch);
-  __ B(ne, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ Ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ B(ne, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -572,13 +481,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ LoadObject(x0, value);
-  __ Ret();
-}
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
@@ -644,8 +546,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
index 0ced207..8c7d4f2 100644
--- a/src/ic/arm64/ic-arm64.cc
+++ b/src/ic/arm64/ic-arm64.cc
@@ -6,489 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done.
-// The scratch registers need to be different from elements, name and result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  DCHECK(!AreAliased(elements, name, scratch1, scratch2));
-  DCHECK(!AreAliased(result, scratch1, scratch2));
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal property.
-  __ Bind(&done);
-
-  static const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ B(ne, miss);
-
-  // Get the value at the masked, scaled index and return.
-  __ Ldr(result,
-         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store (never clobbered).
-//
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ Bind(&done);
-
-  static const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  static const int kTypeAndReadOnlyMask =
-      PropertyDetails::TypeField::kMask |
-      PropertyDetails::AttributesField::encode(READ_ONLY);
-  __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
-  __ Tst(scratch1, kTypeAndReadOnlyMask);
-  __ B(ne, miss);
-
-  // Store the value at the masked, scaled index and return.
-  static const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
-  __ Str(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ Mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = x0;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-  Label slow;
-
-  __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                     JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), x0, x3, x4);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ Bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-  ASM_LOCATION("LoadIC::GenerateMiss");
-
-  DCHECK(!AreAliased(x4, x5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, x4, x5);
-
-  // Perform tail call to the entry.
-  __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
-          LoadWithVectorDescriptor::NameRegister(),
-          LoadWithVectorDescriptor::SlotRegister(),
-          LoadWithVectorDescriptor::VectorRegister());
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, x10, x11);
-
-  __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
-          LoadWithVectorDescriptor::NameRegister(),
-          LoadWithVectorDescriptor::SlotRegister(),
-          LoadWithVectorDescriptor::VectorRegister());
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  ASM_LOCATION("KeyedStoreIC::GenerateMiss");
-  StoreIC_PushArgs(masm);
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  ASM_LOCATION("KeyedStoreIC::GenerateSlow");
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     x10, x11));
-
-  Label transition_smi_elements;
-  Label transition_double_elements;
-  Label fast_double_without_map_check;
-  Label non_double_value;
-  Label finish_store;
-
-  __ Bind(fast_object);
-  if (check_map == kCheckMap) {
-    __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ Cmp(elements_map,
-           Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ B(ne, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because there
-  // may be a callback on the element.
-  Label holecheck_passed;
-  __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
-  __ Ldr(x11, MemOperand(x10));
-  __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-  __ bind(&holecheck_passed);
-
-  // Smi stores don't require further checks.
-  __ JumpIfSmi(value, &finish_store);
-
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
-
-  __ Bind(&finish_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Add(x10, key, Smi::FromInt(1));
-    __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-
-  Register address = x11;
-  __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
-  __ Str(value, MemOperand(address));
-
-  Label dont_record_write;
-  __ JumpIfSmi(value, &dont_record_write);
-
-  // Update write barrier for the elements array address.
-  __ Mov(x10, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-  __ Bind(&dont_record_write);
-  __ Ret();
-
-
-  __ Bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so go to
-  // the runtime.
-  __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
-  __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
-  __ Ldr(x11, MemOperand(x10));
-  __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-
-  __ Bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Add(x10, key, Smi::FromInt(1));
-    __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret();
-
-
-  __ Bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ B(&fast_double_without_map_check);
-
-  __ Bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, x10, x11, slow);
-
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ B(&finish_store);
-
-  __ Bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, x10, x11, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ B(&finish_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
-  Label slow;
-  Label array;
-  Label fast_object;
-  Label extra;
-  Label fast_object_grow;
-  Label fast_double_grow;
-  Label fast_double;
-  Label maybe_name_key;
-  Label miss;
-
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(receiver.is(x1));
-  DCHECK(key.is(x2));
-  DCHECK(value.is(x0));
-
-  Register receiver_map = x3;
-  Register elements = x4;
-  Register elements_map = x5;
-
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  __ JumpIfSmi(receiver, &slow);
-  __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ TestAndBranchIfAnySet(x10, (1 << Map::kIsAccessCheckNeeded), &slow);
-
-  // Check if the object is a JS array or not.
-  Register instance_type = x10;
-  __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
-  __ B(eq, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ Cmp(instance_type, JS_OBJECT_TYPE);
-  __ B(lo, &slow);
-
-  // Object case: Check key against length in the elements array.
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Cmp(x10, Operand::UntagSmi(key));
-  __ B(hi, &fast_object);
-
-
-  __ Bind(&slow);
-  // Slow case, handle jump to runtime.
-  // Live values:
-  //  x0: value
-  //  x1: key
-  //  x2: receiver
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ Ldr(x10, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(x10, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ Mov(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, x5,
-                                                     x6, x7, x8);
-  // Cache miss.
-  __ B(&miss);
-
-  __ Bind(&extra);
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Cmp(x10, Operand::UntagSmi(key));
-  __ B(ls, &slow);
-
-  __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ B(eq, &fast_object_grow);
-  __ Cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ B(eq, &fast_double_grow);
-  __ B(&slow);
-
-
-  __ Bind(&array);
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Cmp(x10, Operand::UntagSmi(key));
-  __ B(eq, &extra);  // We can handle the case where we are appending 1 element.
-  __ B(lo, &slow);
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register value = StoreDescriptor::ValueRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register dictionary = x5;
-  DCHECK(!AreAliased(value, receiver, name,
-                     StoreWithVectorDescriptor::SlotRegister(),
-                     StoreWithVectorDescriptor::VectorRegister(), x5, x6, x7));
-
-  __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, x6, x7);
-  __ Ret();
-
-  // Cache miss: Jump to runtime.
-  __ Bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, x6, x7);
-  GenerateMiss(masm);
-}
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -536,9 +59,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  Patching ic at %p, marker=%p, SMI check=%p\n",
-           static_cast<void*>(address), static_cast<void*>(info_address),
-           static_cast<void*>(info.SmiCheck()));
+    LOG(isolate, PatchIC(address, info_address, info.SmiCheckDelta()));
   }
 
   // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
diff --git a/src/ic/arm64/ic-compiler-arm64.cc b/src/ic/arm64/ic-compiler-arm64.cc
deleted file mode 100644
index c99c637..0000000
--- a/src/ic/arm64/ic-compiler-arm64.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  ASM_LOCATION("PropertyICCompiler::GenerateRuntimeSetProperty");
-
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
-
-  __ Mov(x10, Smi::FromInt(language_mode));
-  __ Push(x10);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/ic/arm64/stub-cache-arm64.cc b/src/ic/arm64/stub-cache-arm64.cc
deleted file mode 100644
index 81c8207..0000000
--- a/src/ic/arm64/stub-cache-arm64.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-// Probe primary or secondary table.
-// If the entry is found in the cache, the generated code jump to the first
-// instruction of the stub in the cache.
-// If there is a miss the code fall trough.
-//
-// 'receiver', 'name' and 'offset' registers are preserved on miss.
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register scratch3) {
-  // Some code below relies on the fact that the Entry struct contains
-  // 3 pointers (name, code, map).
-  STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
-
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
-  uintptr_t value_off_addr =
-      reinterpret_cast<uintptr_t>(value_offset.address());
-  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
-  Label miss;
-
-  DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
-
-  // Multiply by 3 because there are 3 fields per entry.
-  __ Add(scratch3, offset, Operand(offset, LSL, 1));
-
-  // Calculate the base address of the entry.
-  __ Mov(scratch, key_offset);
-  __ Add(
-      scratch, scratch,
-      Operand(scratch3, LSL, kPointerSizeLog2 - StubCache::kCacheIndexShift));
-
-  // Check that the key in the entry matches the name.
-  __ Ldr(scratch2, MemOperand(scratch));
-  __ Cmp(name, scratch2);
-  __ B(ne, &miss);
-
-  // Check the map matches.
-  __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
-  __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Cmp(scratch2, scratch3);
-  __ B(ne, &miss);
-
-  // Get the code entry from the cache.
-  __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ B(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ B(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
-  __ Br(scratch);
-
-  // Miss: fall through.
-  __ Bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Make sure extra and extra2 registers are valid.
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Compute the hash for primary table.
-  __ Ldr(scratch.W(), FieldMemOperand(name, Name::kHashFieldOffset));
-  __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Add(scratch, scratch, extra);
-  __ Eor(scratch, scratch, kPrimaryMagic);
-  __ And(scratch, scratch,
-         Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary table.
-  __ Sub(scratch, scratch, Operand(name));
-  __ Add(scratch, scratch, Operand(kSecondaryMagic));
-  __ And(scratch, scratch,
-         Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ Bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/ic/call-optimization.cc b/src/ic/call-optimization.cc
index f7a1f69..6780ac4 100644
--- a/src/ic/call-optimization.cc
+++ b/src/ic/call-optimization.cc
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/ic/call-optimization.h"
-
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
index 05e9031..6a9734d 100644
--- a/src/ic/handler-compiler.cc
+++ b/src/ic/handler-compiler.cc
@@ -24,60 +24,6 @@
   return handle(code);
 }
 
-
-Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
-    Handle<Name> name, Handle<Map> receiver_map) {
-  Isolate* isolate = name->GetIsolate();
-  if (receiver_map->prototype()->IsNull(isolate)) {
-    // TODO(jkummerow/verwaest): If there is no prototype and the property
-    // is nonexistent, introduce a builtin to handle this (fast properties
-    // -> return undefined, dictionary properties -> do negative lookup).
-    return Handle<Code>();
-  }
-  CacheHolderFlag flag;
-  Handle<Map> stub_holder_map =
-      IC::GetHandlerCacheHolder(receiver_map, false, isolate, &flag);
-
-  // If no dictionary mode objects are present in the prototype chain, the load
-  // nonexistent IC stub can be shared for all names for a given map and we use
-  // the empty string for the map cache in that case. If there are dictionary
-  // mode objects involved, we need to do negative lookups in the stub and
-  // therefore the stub will be specific to the name.
-  Handle<Name> cache_name =
-      receiver_map->is_dictionary_map()
-          ? name
-          : Handle<Name>::cast(isolate->factory()->nonexistent_symbol());
-  Handle<Map> current_map = stub_holder_map;
-  Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
-  while (true) {
-    if (current_map->is_dictionary_map()) cache_name = name;
-    if (current_map->prototype()->IsNull(isolate)) break;
-    if (name->IsPrivate()) {
-      // TODO(verwaest): Use nonexistent_private_symbol.
-      cache_name = name;
-      if (!current_map->has_hidden_prototype()) break;
-    }
-
-    last = handle(JSObject::cast(current_map->prototype()));
-    current_map = handle(last->map());
-  }
-  // Compile the stub that is either shared for all names or
-  // name specific if there are global objects involved.
-  Handle<Code> handler = PropertyHandlerCompiler::Find(
-      cache_name, stub_holder_map, Code::LOAD_IC, flag);
-  if (!handler.is_null()) {
-    TRACE_HANDLER_STATS(isolate, LoadIC_HandlerCacheHit_NonExistent);
-    return handler;
-  }
-
-  TRACE_HANDLER_STATS(isolate, LoadIC_LoadNonexistent);
-  NamedLoadHandlerCompiler compiler(isolate, receiver_map, last, flag);
-  handler = compiler.CompileLoadNonexistent(cache_name);
-  Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
-  return handler;
-}
-
-
 Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
                                               Handle<Name> name) {
   Code::Flags flags = Code::ComputeHandlerFlags(kind, cache_holder());
@@ -149,87 +95,6 @@
   return reg;
 }
 
-
-void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
-                                                        Label* miss,
-                                                        Register scratch1,
-                                                        Register scratch2) {
-  Register holder_reg;
-  Handle<Map> last_map;
-  if (holder().is_null()) {
-    holder_reg = receiver();
-    last_map = map();
-    // If |type| has null as its prototype, |holder()| is
-    // Handle<JSObject>::null().
-    DCHECK(last_map->prototype() == isolate()->heap()->null_value());
-  } else {
-    last_map = handle(holder()->map());
-    // This condition matches the branches below.
-    bool need_holder =
-        last_map->is_dictionary_map() && !last_map->IsJSGlobalObjectMap();
-    holder_reg =
-        FrontendHeader(receiver(), name, miss,
-                       need_holder ? RETURN_HOLDER : DONT_RETURN_ANYTHING);
-  }
-
-  if (last_map->is_dictionary_map()) {
-    if (last_map->IsJSGlobalObjectMap()) {
-      Handle<JSGlobalObject> global =
-          holder().is_null()
-              ? Handle<JSGlobalObject>::cast(isolate()->global_object())
-              : Handle<JSGlobalObject>::cast(holder());
-      GenerateCheckPropertyCell(masm(), global, name, scratch1, miss);
-    } else {
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
-      DCHECK(holder().is_null() ||
-             holder()->property_dictionary()->FindEntry(name) ==
-                 NameDictionary::kNotFound);
-      GenerateDictionaryNegativeLookup(masm(), miss, holder_reg, name, scratch1,
-                                       scratch2);
-    }
-  }
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
-                                                        FieldIndex field) {
-  Register reg = Frontend(name);
-  __ Move(receiver(), reg);
-  LoadFieldStub stub(isolate(), field);
-  GenerateTailCall(masm(), stub.GetCode());
-  return GetCode(kind(), name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
-                                                           int constant_index) {
-  Register reg = Frontend(name);
-  __ Move(receiver(), reg);
-  LoadConstantStub stub(isolate(), constant_index);
-  GenerateTailCall(masm(), stub.GetCode());
-  return GetCode(kind(), name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
-    Handle<Name> name) {
-  Label miss;
-  if (IC::ShouldPushPopSlotAndVector(kind())) {
-    DCHECK(kind() == Code::LOAD_IC);
-    PushVectorAndSlot();
-  }
-  NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
-  if (IC::ShouldPushPopSlotAndVector(kind())) {
-    DiscardVectorAndSlot();
-  }
-  GenerateLoadConstant(isolate()->factory()->undefined_value());
-  FrontendFooter(name, &miss);
-  return GetCode(kind(), name);
-}
-
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
     Handle<Name> name, Handle<AccessorInfo> callback, Handle<Code> slow_stub) {
   if (V8_UNLIKELY(FLAG_runtime_stats)) {
@@ -298,10 +163,13 @@
     case LookupIterator::NOT_FOUND:
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
       break;
-    case LookupIterator::DATA:
-      inline_followup =
-          it->property_details().type() == DATA && !it->is_dictionary_holder();
+    case LookupIterator::DATA: {
+      PropertyDetails details = it->property_details();
+      inline_followup = details.kind() == kData &&
+                        details.location() == kField &&
+                        !it->is_dictionary_holder();
       break;
+    }
     case LookupIterator::ACCESSOR: {
       Handle<Object> accessors = it->GetAccessors();
       if (accessors->IsAccessorInfo()) {
@@ -409,10 +277,13 @@
     case LookupIterator::TRANSITION:
       UNREACHABLE();
     case LookupIterator::DATA: {
-      DCHECK_EQ(DATA, it->property_details().type());
-      __ Move(receiver(), reg);
-      LoadFieldStub stub(isolate(), it->GetFieldIndex());
-      GenerateTailCall(masm(), stub.GetCode());
+      DCHECK_EQ(kData, it->property_details().kind());
+      DCHECK_EQ(kField, it->property_details().location());
+      __ Move(LoadFieldDescriptor::ReceiverRegister(), reg);
+      Handle<Object> smi_handler =
+          LoadIC::SimpleFieldLoad(isolate(), it->GetFieldIndex());
+      __ Move(LoadFieldDescriptor::SmiHandlerRegister(), smi_handler);
+      GenerateTailCall(masm(), isolate()->builtins()->LoadField());
       break;
     }
     case LookupIterator::ACCESSOR:
@@ -440,150 +311,6 @@
   return GetCode(kind(), name);
 }
 
-
-// TODO(verwaest): Cleanup. holder() is actually the receiver.
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
-    Handle<Map> transition, Handle<Name> name) {
-  Label miss;
-
-  // Ensure that the StoreTransitionStub we are going to call has the same
-  // number of stack arguments. This means that we don't have to adapt them
-  // if we decide to call the transition or miss stub.
-  STATIC_ASSERT(Descriptor::kStackArgumentsCount ==
-                StoreTransitionDescriptor::kStackArgumentsCount);
-  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 0 ||
-                Descriptor::kStackArgumentsCount == 3);
-  STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kValue ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kValue);
-  STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kSlot ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kSlot);
-  STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kVector ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kVector);
-
-  if (Descriptor::kPassLastArgsOnStack) {
-    __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-  }
-
-  bool need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
-  if (need_save_restore) {
-    PushVectorAndSlot();
-  }
-
-  // Check that we are allowed to write this.
-  bool is_nonexistent = holder()->map() == transition->GetBackPointer();
-  if (is_nonexistent) {
-    // Find the top object.
-    Handle<JSObject> last;
-    PrototypeIterator::WhereToEnd end =
-        name->IsPrivate() ? PrototypeIterator::END_AT_NON_HIDDEN
-                          : PrototypeIterator::END_AT_NULL;
-    PrototypeIterator iter(isolate(), holder(), kStartAtPrototype, end);
-    while (!iter.IsAtEnd()) {
-      last = PrototypeIterator::GetCurrent<JSObject>(iter);
-      iter.Advance();
-    }
-    if (!last.is_null()) set_holder(last);
-    NonexistentFrontendHeader(name, &miss, scratch1(), scratch2());
-  } else {
-    FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
-    DCHECK(holder()->HasFastProperties());
-  }
-
-  int descriptor = transition->LastAdded();
-  Handle<DescriptorArray> descriptors(transition->instance_descriptors());
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-  Representation representation = details.representation();
-  DCHECK(!representation.IsNone());
-
-  // Stub is never generated for objects that require access checks.
-  DCHECK(!transition->is_access_check_needed());
-
-  // Call to respective StoreTransitionStub.
-  Register map_reg = StoreTransitionDescriptor::MapRegister();
-
-  if (details.type() == DATA_CONSTANT) {
-    DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
-    GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
-    GenerateConstantCheck(map_reg, descriptor, value(), scratch1(), &miss);
-    if (need_save_restore) {
-      PopVectorAndSlot();
-    }
-    GenerateRestoreName(name);
-    StoreMapStub stub(isolate());
-    GenerateTailCall(masm(), stub.GetCode());
-
-  } else {
-    if (representation.IsHeapObject()) {
-      GenerateFieldTypeChecks(descriptors->GetFieldType(descriptor), value(),
-                              &miss);
-    }
-    StoreTransitionStub::StoreMode store_mode =
-        Map::cast(transition->GetBackPointer())->unused_property_fields() == 0
-            ? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
-            : StoreTransitionStub::StoreMapAndValue;
-    GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
-    if (need_save_restore) {
-      PopVectorAndSlot();
-    }
-    // We need to pass name on the stack.
-    PopReturnAddress(this->name());
-    __ Push(name);
-    PushReturnAddress(this->name());
-
-    FieldIndex index = FieldIndex::ForDescriptor(*transition, descriptor);
-    __ Move(StoreNamedTransitionDescriptor::FieldOffsetRegister(),
-            Smi::FromInt(index.index() << kPointerSizeLog2));
-
-    StoreTransitionStub stub(isolate(), index.is_inobject(), representation,
-                             store_mode);
-    GenerateTailCall(masm(), stub.GetCode());
-  }
-
-  __ bind(&miss);
-  if (need_save_restore) {
-    PopVectorAndSlot();
-  }
-  GenerateRestoreName(name);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  return GetCode(kind(), name);
-}
-
-bool NamedStoreHandlerCompiler::RequiresFieldTypeChecks(
-    FieldType* field_type) const {
-  return field_type->IsClass();
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
-  Label miss;
-  DCHECK(it->representation().IsHeapObject());
-
-  FieldType* field_type = *it->GetFieldType();
-  bool need_save_restore = false;
-  if (RequiresFieldTypeChecks(field_type)) {
-    need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
-    if (Descriptor::kPassLastArgsOnStack) {
-      __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-    }
-    if (need_save_restore) PushVectorAndSlot();
-    GenerateFieldTypeChecks(field_type, value(), &miss);
-    if (need_save_restore) PopVectorAndSlot();
-  }
-
-  StoreFieldStub stub(isolate(), it->GetFieldIndex(), it->representation());
-  GenerateTailCall(masm(), stub.GetCode());
-
-  __ bind(&miss);
-  if (need_save_restore) PopVectorAndSlot();
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-  return GetCode(kind(), it->name());
-}
-
-
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
     Handle<JSObject> object, Handle<Name> name, int accessor_index,
     int expected_arguments) {
@@ -625,7 +352,7 @@
   }
   if (receiver_map->IsStringMap()) {
     TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadIndexedStringStub);
-    return LoadIndexedStringStub(isolate).GetCode();
+    return isolate->builtins()->KeyedLoadIC_IndexedString();
   }
   InstanceType instance_type = receiver_map->instance_type();
   if (instance_type < FIRST_JS_RECEIVER_TYPE) {
@@ -640,13 +367,8 @@
   }
   bool is_js_array = instance_type == JS_ARRAY_TYPE;
   if (elements_kind == DICTIONARY_ELEMENTS) {
-    if (FLAG_tf_load_ic_stub) {
-      TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
-      return LoadHandler::LoadElement(isolate, elements_kind, false,
-                                      is_js_array);
-    }
-    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
-    return LoadDictionaryElementStub(isolate).GetCode();
+    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+    return LoadHandler::LoadElement(isolate, elements_kind, false, is_js_array);
   }
   DCHECK(IsFastElementsKind(elements_kind) ||
          IsFixedTypedArrayElementsKind(elements_kind));
@@ -654,16 +376,9 @@
   bool convert_hole_to_undefined =
       is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
       *receiver_map == isolate->get_initial_js_array_map(elements_kind);
-  if (FLAG_tf_load_ic_stub) {
-    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
-    return LoadHandler::LoadElement(isolate, elements_kind,
-                                    convert_hole_to_undefined, is_js_array);
-  } else {
-    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadFastElementStub);
-    return LoadFastElementStub(isolate, is_js_array, elements_kind,
-                               convert_hole_to_undefined)
-        .GetCode();
-  }
+  TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+  return LoadHandler::LoadElement(isolate, elements_kind,
+                                  convert_hole_to_undefined, is_js_array);
 }
 
 void ElementHandlerCompiler::CompileElementHandlers(
diff --git a/src/ic/handler-compiler.h b/src/ic/handler-compiler.h
index 0dec36a..a37375a 100644
--- a/src/ic/handler-compiler.h
+++ b/src/ic/handler-compiler.h
@@ -40,8 +40,6 @@
   // Frontend loads from receiver(), returns holder register which may be
   // different.
   Register Frontend(Handle<Name> name);
-  void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
-                                 Register scratch1, Register scratch2);
 
   // When FLAG_vector_ics is true, handlers that have the possibility of missing
   // will need to save and pass these to miss handlers.
@@ -52,9 +50,6 @@
 
   void DiscardVectorAndSlot();
 
-  void PushReturnAddress(Register tmp);
-  void PopReturnAddress(Register tmp);
-
   // TODO(verwaest): Make non-static.
   static void GenerateApiAccessorCall(MacroAssembler* masm,
                                       const CallOptimization& optimization,
@@ -134,8 +129,6 @@
 
   virtual ~NamedLoadHandlerCompiler() {}
 
-  Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
-
   Handle<Code> CompileLoadCallback(Handle<Name> name,
                                    Handle<AccessorInfo> callback,
                                    Handle<Code> slow_stub);
@@ -144,8 +137,6 @@
                                    const CallOptimization& call_optimization,
                                    int accessor_index, Handle<Code> slow_stub);
 
-  Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
-
   // The LookupIterator is used to perform a lookup behind the interceptor. If
   // the iterator points to a LookupIterator::PROPERTY, its access will be
   // inlined.
@@ -157,10 +148,6 @@
   Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name,
                                  bool is_configurable);
 
-  // Static interface
-  static Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
-                                             Handle<Map> map);
-
   static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<Map> map,
                                     Register receiver, Register holder,
                                     int accessor_index, int expected_arguments,
@@ -171,12 +158,6 @@
                           no_reg);
   }
 
-  static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
-                                            Register receiver,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Label* miss_label);
-
   // These constants describe the structure of the interceptor arguments on the
   // stack. The arguments are pushed by the (platform-specific)
   // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
@@ -193,11 +174,7 @@
   virtual void FrontendFooter(Handle<Name> name, Label* miss);
 
  private:
-  Handle<Code> CompileLoadNonexistent(Handle<Name> name);
-  void GenerateLoadConstant(Handle<Object> value);
   void GenerateLoadCallback(Register reg, Handle<AccessorInfo> callback);
-  void GenerateLoadCallback(const CallOptimization& call_optimization,
-                            Handle<Map> receiver_map);
 
   // Helper emits no code if vector-ics are disabled.
   void InterceptorVectorSlotPush(Register holder_reg);
@@ -209,17 +186,6 @@
                                            Register holder_reg);
   void GenerateLoadPostInterceptor(LookupIterator* it, Register reg);
 
-  // Generates prototype loading code that uses the objects from the
-  // context we were in when this function was called. If the context
-  // has changed, a jump to miss is performed. This ties the generated
-  // code to a particular context and so must not be used in cases
-  // where the generated code is not allowed to have references to
-  // objects from a context.
-  static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                        int index,
-                                                        Register prototype,
-                                                        Label* miss);
-
   Register scratch3() { return registers_[4]; }
 };
 
@@ -244,9 +210,6 @@
 
   void ZapStackArgumentsRegisterAliases();
 
-  Handle<Code> CompileStoreTransition(Handle<Map> transition,
-                                      Handle<Name> name);
-  Handle<Code> CompileStoreField(LookupIterator* it);
   Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
                                     Handle<AccessorInfo> callback,
                                     LanguageMode language_mode);
@@ -275,18 +238,6 @@
   void GenerateRestoreName(Label* label, Handle<Name> name);
 
  private:
-  void GenerateRestoreName(Handle<Name> name);
-  void GenerateRestoreMap(Handle<Map> transition, Register map_reg,
-                          Register scratch, Label* miss);
-
-  void GenerateConstantCheck(Register map_reg, int descriptor,
-                             Register value_reg, Register scratch,
-                             Label* miss_label);
-
-  bool RequiresFieldTypeChecks(FieldType* field_type) const;
-  void GenerateFieldTypeChecks(FieldType* field_type, Register value_reg,
-                               Label* miss_label);
-
   static Register value();
 };
 
diff --git a/src/ic/handler-configuration-inl.h b/src/ic/handler-configuration-inl.h
index 505d67c..437c528 100644
--- a/src/ic/handler-configuration-inl.h
+++ b/src/ic/handler-configuration-inl.h
@@ -103,8 +103,10 @@
   }
   int value_index = DescriptorArray::ToValueIndex(descriptor);
 
-  DCHECK(kind == kStoreField || kind == kTransitionToField);
-  DCHECK_IMPLIES(kind == kStoreField, !extend_storage);
+  DCHECK(kind == kStoreField || kind == kTransitionToField ||
+         (kind == kStoreConstField && FLAG_track_constant_fields));
+  DCHECK_IMPLIES(extend_storage, kind == kTransitionToField);
+  DCHECK_IMPLIES(field_index.is_inobject(), !extend_storage);
 
   int config = StoreHandler::KindBits::encode(kind) |
                StoreHandler::ExtendStorageBits::encode(extend_storage) |
@@ -117,9 +119,12 @@
 
 Handle<Object> StoreHandler::StoreField(Isolate* isolate, int descriptor,
                                         FieldIndex field_index,
+                                        PropertyConstness constness,
                                         Representation representation) {
-  return StoreField(isolate, kStoreField, descriptor, field_index,
-                    representation, false);
+  DCHECK_IMPLIES(!FLAG_track_constant_fields, constness == kMutable);
+  Kind kind = constness == kMutable ? kStoreField : kStoreConstField;
+  return StoreField(isolate, kind, descriptor, field_index, representation,
+                    false);
 }
 
 Handle<Object> StoreHandler::TransitionToField(Isolate* isolate, int descriptor,
@@ -132,6 +137,7 @@
 
 Handle<Object> StoreHandler::TransitionToConstant(Isolate* isolate,
                                                   int descriptor) {
+  DCHECK(!FLAG_track_constant_fields);
   int value_index = DescriptorArray::ToValueIndex(descriptor);
   int config =
       StoreHandler::KindBits::encode(StoreHandler::kTransitionToConstant) |
diff --git a/src/ic/handler-configuration.h b/src/ic/handler-configuration.h
index a529173..539d448 100644
--- a/src/ic/handler-configuration.h
+++ b/src/ic/handler-configuration.h
@@ -121,8 +121,10 @@
   enum Kind {
     kStoreElement,
     kStoreField,
+    kStoreConstField,
     kTransitionToField,
-    kTransitionToConstant
+    // TODO(ishell): remove once constant field tracking is done.
+    kTransitionToConstant = kStoreConstField
   };
   class KindBits : public BitField<Kind, 0, 2> {};
 
@@ -175,6 +177,7 @@
   // Creates a Smi-handler for storing a field to fast object.
   static inline Handle<Object> StoreField(Isolate* isolate, int descriptor,
                                           FieldIndex field_index,
+                                          PropertyConstness constness,
                                           Representation representation);
 
   // Creates a Smi-handler for transitioning store to a field.
diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc
index 68fd1b9..f0f8fad 100644
--- a/src/ic/ia32/handler-compiler-ia32.cc
+++ b/src/ic/ia32/handler-compiler-ia32.cc
@@ -83,16 +83,6 @@
   __ add(esp, Immediate(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ push(tmp);
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ pop(tmp);
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -132,27 +122,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadGlobalFunction(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ mov(result,
-         FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  // TODO(mvstanton): This isn't used on ia32. Move all the other
-  // platform implementations into a code stub so this method can be removed.
-  UNREACHABLE();
-}
-
-
 // Generate call to api function.
 // This function uses push() to generate smaller, faster code than
 // the version above. It is an optimization that should will be removed
@@ -324,10 +293,12 @@
   }
 }
 
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+         Runtime::FunctionForId(id)->nargs);
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -335,15 +306,7 @@
   __ push(name);
   __ push(receiver);
   __ push(holder);
-}
 
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm, Register receiver, Register holder, Register name,
-    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
-  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
-         Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallRuntime(id);
 }
 
@@ -359,58 +322,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Immediate(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
-    __ and_(scratch, Immediate(Map::Deprecated::kMask));
-    __ j(not_zero, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ mov(scratch,
-         FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmp(value_reg, scratch);
-  __ j(not_equal, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ j(not_equal, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -540,14 +451,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ LoadObject(eax, value);
-  __ ret(0);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -620,10 +523,26 @@
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   // Call the runtime system to load the interceptor.
-  __ pop(scratch2());  // save old return address
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
-  __ push(scratch2());  // restore old return address
+
+  // Stack:
+  //   return address
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ push(receiver());
+  __ push(holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ push(slot());
+    __ push(vector());
+  } else {
+    __ push(scratch3());  // slot
+    __ push(scratch2());  // vector
+  }
+  __ push(Operand(esp, 4 * kPointerSize));  // return address
+  __ mov(Operand(esp, 5 * kPointerSize), name());
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/ia32/ic-compiler-ia32.cc b/src/ic/ia32/ic-compiler-ia32.cc
deleted file mode 100644
index a52f046..0000000
--- a/src/ic/ia32/ic-compiler-ia32.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
-  // ----------- S t a t e -------------
-  //  -- esp[12] : value
-  //  -- esp[8]  : slot
-  //  -- esp[4]  : vector
-  //  -- esp[0]  : return address
-  // -----------------------------------
-  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
-                                        Descriptor::kValue);
-
-  __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
-  __ mov(Operand(esp, 8), Descriptor::NameRegister());
-  __ mov(Operand(esp, 4), Descriptor::ValueRegister());
-  __ pop(ebx);
-  __ push(Immediate(Smi::FromInt(language_mode)));
-  __ push(ebx);  // return address
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index 44a5b9f..c4b4cdc 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -6,532 +6,11 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
-                                   Register elements, Register name,
-                                   Register r0, Register r1, Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0   - used for the index into the property dictionary
-  //
-  // r1   - used to hold the capacity of the property dictionary.
-  //
-  // result - holds the result on exit.
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ j(not_zero, miss_label);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
-                                    Register elements, Register name,
-                                    Register value, Register r0, Register r1) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is clobbered.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // value - holds the value to store and is unchanged.
-  //
-  // r0 - used for index into the property dictionary and is clobbered.
-  //
-  // r1 - used to hold the capacity of the property dictionary and is clobbered.
-  Label done;
-
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property that is not read only.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY))
-      << kSmiTagSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label);
-
-  // Store the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-  __ mov(Operand(r0, 0), value);
-
-  // Update write barrier. Make sure not to clobber the value.
-  __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-  DCHECK(value.is(eax));
-  // key is a smi.
-  // ebx: FixedArray receiver->elements
-  // edi: receiver map
-  // Fast case: Do the store, could either Object or double.
-  __ bind(fast_object);
-  if (check_map == kCheckMap) {
-    __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-    __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-    __ j(not_equal, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ cmp(FixedArrayElementOperand(ebx, key),
-         masm->isolate()->factory()->the_hole_value());
-  __ j(not_equal, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ mov(FixedArrayElementOperand(ebx, key), value);
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ CheckFastObjectElements(edi, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ mov(FixedArrayElementOperand(ebx, key), value);
-  // Update write barrier for the elements array address.
-  __ mov(edx, value);  // Preserve the value which is returned.
-  __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-    __ j(not_equal, slow);
-    // If the value is a number, store it as a double in the FastDoubleElements
-    // array.
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(not_equal, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, ebx, key, edi, xmm0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&transition_smi_elements);
-  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
-              &non_double_value, DONT_DO_SMI_CHECK);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
-  // and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
-                                         edi, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         ebx, edi, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
-                                                      value, ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  // Return address is on the stack.
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-  Register receiver = Descriptor::ReceiverRegister();
-  Register key = Descriptor::NameRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map from the receiver.
-  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &slow);
-
-  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
-                                        Descriptor::kValue);
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
-  __ j(equal, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ CmpInstanceType(edi, JS_OBJECT_TYPE);
-  __ j(below, &slow);
-
-  // Object case: Check key against length in the elements array.
-  // Key is a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(below, &fast_object);
-
-  // Slow case: call runtime.
-  __ bind(&slow);
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
-                                                     no_reg);
-
-  // Cache miss.
-  __ jmp(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // receiver is a JSArray.
-  // key is a smi.
-  // ebx: receiver->elements, a FixedArray
-  // edi: receiver map
-  // flags: compare (key, receiver.length())
-  // do not leave holes in the array:
-  __ j(not_equal, &slow);
-  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow);
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-  __ j(not_equal, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-  __ j(not_equal, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  // receiver is a JSArray.
-  // key is a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array and fall through to the
-  // common store code.
-  __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset));  // Compare smis.
-  __ j(above_equal, &extra);
-
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
-                                      kCheckMap, kDontIncrementLength);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = eax;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
-                                  JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), edi, ebx, eax);
-  __ ret(0);
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
-         !edi.is(vector));
-
-  __ pop(edi);
-  __ push(receiver);
-  __ push(name);
-  __ push(slot);
-  __ push(vector);
-  __ push(edi);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
-  Register name = StoreWithVectorDescriptor::NameRegister();
-
-  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-  // Current stack layout:
-  // - esp[12]   -- value
-  // - esp[8]    -- slot
-  // - esp[4]    -- vector
-  // - esp[0]    -- return address
-
-  Register return_address = StoreWithVectorDescriptor::SlotRegister();
-  __ pop(return_address);
-  __ push(receiver);
-  __ push(name);
-  __ push(return_address);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  Label restore_miss;
-  Register receiver = Descriptor::ReceiverRegister();
-  Register name = Descriptor::NameRegister();
-  Register value = Descriptor::ValueRegister();
-  // Since the slot and vector values are passed on the stack we can use
-  // respective registers as scratch registers.
-  Register scratch1 = Descriptor::VectorRegister();
-  Register scratch2 = Descriptor::SlotRegister();
-
-  __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
-
-  // A lot of registers are needed for storing to slow case objects.
-  // Push and restore receiver but rely on GenerateDictionaryStore preserving
-  // the value and name.
-  __ push(receiver);
-
-  Register dictionary = receiver;
-  __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
-                          scratch1, scratch2);
-  __ Drop(1);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1);
-  __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&restore_miss);
-  __ pop(receiver);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
@@ -582,9 +61,7 @@
   // condition code uses at the patched jump.
   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(test_instruction_address), delta);
+    LOG(isolate, PatchIC(address, test_instruction_address, delta));
   }
 
   // Patch with a short conditional jump. Enabling means switching from a short
diff --git a/src/ic/ia32/stub-cache-ia32.cc b/src/ic/ia32/stub-cache-ia32.cc
deleted file mode 100644
index 82700d3..0000000
--- a/src/ic/ia32/stub-cache-ia32.cc
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register name, Register receiver,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register extra) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  Label miss;
-  Code::Kind ic_kind = stub_cache->ic_kind();
-  bool is_vector_store =
-      IC::ICUseVector(ic_kind) &&
-      (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ lea(offset, Operand(offset, offset, times_2, 0));
-
-  if (extra.is_valid()) {
-    // Get the code entry from the cache.
-    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    if (is_vector_store) {
-      // The value, vector and slot were passed to the IC on the stack and
-      // they are still there. So we can just jump to the handler.
-      DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
-      __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(extra);
-    } else {
-      // The vector and slot were pushed onto the stack before starting the
-      // probe, and need to be dropped before calling the handler.
-      __ pop(LoadWithVectorDescriptor::VectorRegister());
-      __ pop(LoadDescriptor::SlotRegister());
-      __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(extra);
-    }
-
-    __ bind(&miss);
-  } else {
-    DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
-    // Save the offset on the stack.
-    __ push(offset);
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-    // Restore offset register.
-    __ mov(offset, Operand(esp, 0));
-
-    // Get the code entry from the cache.
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    // Restore offset and re-load code entry from cache.
-    __ pop(offset);
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Jump to the first instruction in the code stub.
-    if (is_vector_store) {
-      DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
-    }
-    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(offset);
-
-    // Pop at miss.
-    __ bind(&miss);
-    __ pop(offset);
-  }
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Assert that code is valid.  The multiplying code relies on the entry size
-  // being 12.
-  DCHECK(sizeof(Entry) == 12);
-
-  // Assert that there are no register conflicts.
-  DCHECK(!scratch.is(receiver));
-  DCHECK(!scratch.is(name));
-  DCHECK(!extra.is(receiver));
-  DCHECK(!extra.is(name));
-  DCHECK(!extra.is(scratch));
-
-  // Assert scratch and extra registers are valid, and extra2/3 are unused.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(extra2.is(no_reg));
-  DCHECK(extra3.is(no_reg));
-
-  Register offset = scratch;
-  scratch = no_reg;
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, kPrimaryMagic);
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
-  // ProbeTable expects the offset to be pointer scaled, which it is, because
-  // the heap object tag size is 2 and the pointer size log 2 is also 2.
-  DCHECK(kCacheIndexShift == kPointerSizeLog2);
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, kPrimaryMagic);
-  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
-  __ sub(offset, name);
-  __ add(offset, Immediate(kSecondaryMagic));
-  __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
deleted file mode 100644
index 750c88d..0000000
--- a/src/ic/ic-compiler.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ic/ic-compiler.h"
-
-#include "src/ic/handler-compiler.h"
-#include "src/ic/ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
-    Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
-  Isolate* isolate = receiver_map->GetIsolate();
-
-  DCHECK(store_mode == STANDARD_STORE ||
-         store_mode == STORE_AND_GROW_NO_TRANSITION ||
-         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
-         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
-  PropertyICCompiler compiler(isolate);
-  Handle<Code> code =
-      compiler.CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
-  return code;
-}
-
-void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
-    MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
-    CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
-  Isolate* isolate = receiver_maps->at(0)->GetIsolate();
-  DCHECK(store_mode == STANDARD_STORE ||
-         store_mode == STORE_AND_GROW_NO_TRANSITION ||
-         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
-         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-  PropertyICCompiler compiler(isolate);
-  compiler.CompileKeyedStorePolymorphicHandlers(
-      receiver_maps, transitioned_maps, handlers, store_mode);
-}
-
-
-void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
-    MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
-    CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    Handle<Map> receiver_map(receiver_maps->at(i));
-    Handle<Code> cached_stub;
-    Handle<Map> transitioned_map;
-    {
-      Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
-      if (tmap != nullptr) transitioned_map = handle(tmap);
-    }
-
-    // TODO(mvstanton): The code below is doing pessimistic elements
-    // transitions. I would like to stop doing that and rely on Allocation Site
-    // Tracking to do a better job of ensuring the data types are what they need
-    // to be. Not all the elements are in place yet, pessimistic elements
-    // transitions are still important for performance.
-    if (!transitioned_map.is_null()) {
-      bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-      ElementsKind elements_kind = receiver_map->elements_kind();
-      TRACE_HANDLER_STATS(isolate(),
-                          KeyedStoreIC_ElementsTransitionAndStoreStub);
-      cached_stub =
-          ElementsTransitionAndStoreStub(isolate(), elements_kind,
-                                         transitioned_map->elements_kind(),
-                                         is_js_array, store_mode).GetCode();
-    } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
-      // TODO(mvstanton): Consider embedding store_mode in the state of the slow
-      // keyed store ic for uniformity.
-      TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
-      cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
-    } else {
-      cached_stub =
-          CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
-    }
-    DCHECK(!cached_stub.is_null());
-    handlers->Add(cached_stub);
-    transitioned_maps->Add(transitioned_map);
-  }
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphicHandler(
-    Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
-  ElementsKind elements_kind = receiver_map->elements_kind();
-  bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  Handle<Code> stub;
-  if (receiver_map->has_sloppy_arguments_elements()) {
-    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_KeyedStoreSloppyArgumentsStub);
-    stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
-  } else if (receiver_map->has_fast_elements() ||
-             receiver_map->has_fixed_typed_array_elements()) {
-    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
-    stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
-                                store_mode).GetCode();
-  } else {
-    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
-    stub = StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
-  }
-  return stub;
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
diff --git a/src/ic/ic-compiler.h b/src/ic/ic-compiler.h
deleted file mode 100644
index fa3ba15..0000000
--- a/src/ic/ic-compiler.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_IC_COMPILER_H_
-#define V8_IC_IC_COMPILER_H_
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class PropertyICCompiler : public PropertyAccessCompiler {
- public:
-  // Keyed
-  static Handle<Code> ComputeKeyedStoreMonomorphicHandler(
-      Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
-  static void ComputeKeyedStorePolymorphicHandlers(
-      MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
-      CodeHandleList* handlers, KeyedAccessStoreMode store_mode);
-
-  // Helpers
-  // TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
-  // and make the helpers private.
-  static void GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         LanguageMode language_mode);
-
-
- private:
-  explicit PropertyICCompiler(Isolate* isolate)
-      : PropertyAccessCompiler(isolate, Code::KEYED_STORE_IC,
-                               kCacheOnReceiver) {}
-
-  Handle<Code> CompileKeyedStoreMonomorphicHandler(
-      Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
-  void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
-                                            MapHandleList* transitioned_maps,
-                                            CodeHandleList* handlers,
-                                            KeyedAccessStoreMode store_mode);
-};
-
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_IC_IC_COMPILER_H_
diff --git a/src/ic/ic-inl.h b/src/ic/ic-inl.h
index 1b5d063..aacb690 100644
--- a/src/ic/ic-inl.h
+++ b/src/ic/ic-inl.h
@@ -7,6 +7,7 @@
 
 #include "src/ic/ic.h"
 
+#include "src/assembler-inl.h"
 #include "src/debug/debug.h"
 #include "src/macro-assembler.h"
 #include "src/prototype.h"
@@ -45,7 +46,10 @@
   // Convert target address to the code object. Code::GetCodeFromTargetAddress
   // is safe for use during GC where the map might be marked.
   Code* result = Code::GetCodeFromTargetAddress(target);
-  DCHECK(result->is_inline_cache_stub());
+  // The result can be an IC dispatcher (for vector-based ICs), an IC handler
+  // (for old-style patching ICs) or CEntryStub (for IC dispatchers inlined to
+  // bytecode handlers).
+  DCHECK(result->is_inline_cache_stub() || result->is_stub());
   return result;
 }
 
@@ -54,25 +58,13 @@
                             Address constant_pool) {
   if (AddressIsDeoptimizedCode(target->GetIsolate(), address)) return;
 
-  DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
-
-  DCHECK(!target->is_inline_cache_stub() ||
-         (target->kind() != Code::LOAD_IC &&
-          target->kind() != Code::KEYED_LOAD_IC &&
-          target->kind() != Code::CALL_IC && target->kind() != Code::STORE_IC &&
-          target->kind() != Code::KEYED_STORE_IC));
+  // Only these three old-style ICs still do code patching.
+  DCHECK(target->is_binary_op_stub() || target->is_compare_ic_stub() ||
+         target->is_to_boolean_ic_stub());
 
   Heap* heap = target->GetHeap();
   Code* old_target = GetTargetAtAddress(address, constant_pool);
-#ifdef DEBUG
-  // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
-  // ICs as language mode. The language mode of the IC must be preserved.
-  if (old_target->kind() == Code::STORE_IC ||
-      old_target->kind() == Code::KEYED_STORE_IC) {
-    DCHECK(StoreICState::GetLanguageMode(old_target->extra_ic_state()) ==
-           StoreICState::GetLanguageMode(target->extra_ic_state()));
-  }
-#endif
+
   Assembler::set_target_address_at(heap->isolate(), address, constant_pool,
                                    target->instruction_start());
   if (heap->gc_state() == Heap::MARK_COMPACT) {
@@ -93,8 +85,8 @@
 }
 
 bool IC::IsHandler(Object* object) {
-  return (object->IsSmi() && (object != nullptr)) || object->IsTuple3() ||
-         object->IsFixedArray() ||
+  return (object->IsSmi() && (object != nullptr)) || object->IsTuple2() ||
+         object->IsTuple3() || object->IsFixedArray() ||
          (object->IsCode() && Code::cast(object)->is_handler());
 }
 
@@ -132,14 +124,6 @@
 }
 
 
-Code* IC::get_host() {
-  return isolate()
-      ->inner_pointer_to_code_cache()
-      ->GetCacheEntry(address())
-      ->code;
-}
-
-
 bool IC::AddressIsDeoptimizedCode() const {
   return AddressIsDeoptimizedCode(isolate(), address());
 }
diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc
index f948036..a217b11 100644
--- a/src/ic/ic-state.cc
+++ b/src/ic/ic-state.cc
@@ -4,7 +4,10 @@
 
 #include "src/ic/ic-state.h"
 
+#include "src/ast/ast-types.h"
+#include "src/feedback-vector.h"
 #include "src/ic/ic.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -16,11 +19,6 @@
 }
 
 
-std::ostream& operator<<(std::ostream& os, const CallICState& s) {
-  return os << "(" << s.convert_mode() << ", " << s.tail_call_mode() << ")";
-}
-
-
 // static
 STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::FIRST_TOKEN;
 
@@ -61,6 +59,23 @@
   return extra_ic_state;
 }
 
+std::string BinaryOpICState::ToString() const {
+  std::string ret = "(";
+  ret += Token::Name(op_);
+  if (CouldCreateAllocationMementos()) ret += "_CreateAllocationMementos";
+  ret += ":";
+  ret += BinaryOpICState::KindToString(left_kind_);
+  ret += "*";
+  if (fixed_right_arg_.IsJust()) {
+    ret += fixed_right_arg_.FromJust();
+  } else {
+    ret += BinaryOpICState::KindToString(right_kind_);
+  }
+  ret += "->";
+  ret += BinaryOpICState::KindToString(result_kind_);
+  ret += ")";
+  return ret;
+}
 
 // static
 void BinaryOpICState::GenerateAheadOfTime(
diff --git a/src/ic/ic-state.h b/src/ic/ic-state.h
index 1ba37b9..16651c5 100644
--- a/src/ic/ic-state.h
+++ b/src/ic/ic-state.h
@@ -11,6 +11,7 @@
 namespace v8 {
 namespace internal {
 
+class AstType;
 
 const int kMaxKeyedPolymorphism = 4;
 
@@ -22,38 +23,6 @@
 };
 
 
-class CallICState final BASE_EMBEDDED {
- public:
-  explicit CallICState(ExtraICState extra_ic_state)
-      : bit_field_(extra_ic_state) {}
-  CallICState(ConvertReceiverMode convert_mode, TailCallMode tail_call_mode)
-      : bit_field_(ConvertModeBits::encode(convert_mode) |
-                   TailCallModeBits::encode(tail_call_mode)) {}
-
-  ExtraICState GetExtraICState() const { return bit_field_; }
-
-  static void GenerateAheadOfTime(Isolate*,
-                                  void (*Generate)(Isolate*,
-                                                   const CallICState&));
-
-  ConvertReceiverMode convert_mode() const {
-    return ConvertModeBits::decode(bit_field_);
-  }
-  TailCallMode tail_call_mode() const {
-    return TailCallModeBits::decode(bit_field_);
-  }
-
- private:
-  typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
-  typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
-
-  int const bit_field_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const CallICState& s);
-
-
 class BinaryOpICState final BASE_EMBEDDED {
  public:
   BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
@@ -82,6 +51,7 @@
   }
 
   ExtraICState GetExtraICState() const;
+  std::string ToString() const;
 
   static void GenerateAheadOfTime(Isolate*,
                                   void (*Generate)(Isolate*,
@@ -212,60 +182,6 @@
                            Handle<Object> y);
 };
 
-class LoadGlobalICState final BASE_EMBEDDED {
- private:
-  class TypeofModeBits : public BitField<TypeofMode, 0, 1> {};
-  STATIC_ASSERT(static_cast<int>(INSIDE_TYPEOF) == 0);
-  const ExtraICState state_;
-
- public:
-  static const uint32_t kNextBitFieldOffset = TypeofModeBits::kNext;
-
-  explicit LoadGlobalICState(ExtraICState extra_ic_state)
-      : state_(extra_ic_state) {}
-
-  explicit LoadGlobalICState(TypeofMode typeof_mode)
-      : state_(TypeofModeBits::encode(typeof_mode)) {}
-
-  ExtraICState GetExtraICState() const { return state_; }
-
-  TypeofMode typeof_mode() const { return TypeofModeBits::decode(state_); }
-
-  static TypeofMode GetTypeofMode(ExtraICState state) {
-    return LoadGlobalICState(state).typeof_mode();
-  }
-};
-
-
-class StoreICState final BASE_EMBEDDED {
- public:
-  explicit StoreICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
-
-  explicit StoreICState(LanguageMode mode)
-      : state_(LanguageModeState::encode(mode)) {}
-
-  ExtraICState GetExtraICState() const { return state_; }
-
-  LanguageMode language_mode() const {
-    return LanguageModeState::decode(state_);
-  }
-
-  static LanguageMode GetLanguageMode(ExtraICState state) {
-    return StoreICState(state).language_mode();
-  }
-
-  class LanguageModeState : public BitField<LanguageMode, 1, 1> {};
-  STATIC_ASSERT(i::LANGUAGE_END == 2);
-
-  // For convenience, a statically declared encoding of strict mode extra
-  // IC state.
-  static const ExtraICState kStrictModeState = STRICT
-                                               << LanguageModeState::kShift;
-
- private:
-  const ExtraICState state_;
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ic/ic-stats.cc b/src/ic/ic-stats.cc
new file mode 100644
index 0000000..de2529f
--- /dev/null
+++ b/src/ic/ic-stats.cc
@@ -0,0 +1,144 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/ic-stats.h"
+
+#include "src/flags.h"
+#include "src/objects-inl.h"
+#include "src/tracing/trace-event.h"
+#include "src/tracing/traced-value.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+base::LazyInstance<ICStats>::type ICStats::instance_ =
+    LAZY_INSTANCE_INITIALIZER;
+
+ICStats::ICStats() : ic_infos_(MAX_IC_INFO), pos_(0) {
+  base::NoBarrier_Store(&enabled_, 0);
+}
+
+void ICStats::Begin() {
+  if (V8_LIKELY(!FLAG_ic_stats)) return;
+  base::NoBarrier_Store(&enabled_, 1);
+}
+
+void ICStats::End() {
+  if (base::NoBarrier_Load(&enabled_) != 1) return;
+  ++pos_;
+  if (pos_ == MAX_IC_INFO) {
+    Dump();
+  }
+  base::NoBarrier_Store(&enabled_, 0);
+}
+
+void ICStats::Reset() {
+  for (auto ic_info : ic_infos_) {
+    ic_info.Reset();
+  }
+  pos_ = 0;
+}
+
+void ICStats::Dump() {
+  auto value = v8::tracing::TracedValue::Create();
+  value->BeginArray("data");
+  for (int i = 0; i < pos_; ++i) {
+    ic_infos_[i].AppendToTracedValue(value.get());
+  }
+  value->EndArray();
+
+  TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats"), "V8.ICStats",
+                       TRACE_EVENT_SCOPE_THREAD, "ic-stats", std::move(value));
+  Reset();
+}
+
+const char* ICStats::GetOrCacheScriptName(Script* script) {
+  if (script_name_map_.find(script) != script_name_map_.end()) {
+    return script_name_map_[script].get();
+  }
+  Object* script_name_raw = script->name();
+  if (script_name_raw->IsString()) {
+    String* script_name = String::cast(script_name_raw);
+    char* c_script_name =
+        script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
+            .release();
+    script_name_map_.insert(
+        std::make_pair(script, std::unique_ptr<char[]>(c_script_name)));
+    return c_script_name;
+  } else {
+    script_name_map_.insert(
+        std::make_pair(script, std::unique_ptr<char[]>(nullptr)));
+    return nullptr;
+  }
+  return nullptr;
+}
+
+const char* ICStats::GetOrCacheFunctionName(JSFunction* function) {
+  if (function_name_map_.find(function) != function_name_map_.end()) {
+    return function_name_map_[function].get();
+  }
+  SharedFunctionInfo* shared = function->shared();
+  ic_infos_[pos_].is_optimized = function->IsOptimized();
+  char* function_name = shared->DebugName()->ToCString().release();
+  function_name_map_.insert(
+      std::make_pair(function, std::unique_ptr<char[]>(function_name)));
+  return function_name;
+}
+
+ICInfo::ICInfo()
+    : function_name(nullptr),
+      script_offset(0),
+      script_name(nullptr),
+      line_num(-1),
+      is_constructor(false),
+      is_optimized(false),
+      map(nullptr),
+      is_dictionary_map(0),
+      number_of_own_descriptors(0) {}
+
+void ICInfo::Reset() {
+  type.clear();
+  function_name = nullptr;
+  script_offset = 0;
+  script_name = nullptr;
+  line_num = -1;
+  is_constructor = false;
+  is_optimized = false;
+  state.clear();
+  map = nullptr;
+  is_dictionary_map = false;
+  number_of_own_descriptors = 0;
+  instance_type.clear();
+}
+
+void ICInfo::AppendToTracedValue(v8::tracing::TracedValue* value) const {
+  value->BeginDictionary();
+  value->SetString("type", type);
+  if (function_name) {
+    value->SetString("functionName", function_name);
+    if (is_optimized) {
+      value->SetInteger("optimized", is_optimized);
+    }
+  }
+  if (script_offset) value->SetInteger("offset", script_offset);
+  if (script_name) value->SetString("scriptName", script_name);
+  if (line_num != -1) value->SetInteger("lineNum", line_num);
+  if (is_constructor) value->SetInteger("constructor", is_constructor);
+  if (!state.empty()) value->SetString("state", state);
+  if (map) {
+    // V8 cannot represent integer above 2^53 - 1 in JavaScript from JSON,
+    // thus `map` should be converted to a string rather than an integer.
+    std::stringstream ss;
+    ss << map;
+    value->SetString("map", ss.str());
+  }
+  if (map) value->SetInteger("dict", is_dictionary_map);
+  if (map) value->SetInteger("own", number_of_own_descriptors);
+  if (!instance_type.empty()) value->SetString("instanceType", instance_type);
+  value->EndDictionary();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/ic/ic-stats.h b/src/ic/ic-stats.h
new file mode 100644
index 0000000..a3015d0
--- /dev/null
+++ b/src/ic/ic-stats.h
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_IC_STATS_H_
+#define V8_IC_IC_STATS_H_
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "src/base/atomicops.h"
+#include "src/base/lazy-instance.h"
+
+namespace v8 {
+
+namespace tracing {
+class TracedValue;
+}
+
+namespace internal {
+
+class JSFunction;
+class Script;
+
+struct ICInfo {
+  ICInfo();
+  void Reset();
+  void AppendToTracedValue(v8::tracing::TracedValue* value) const;
+  std::string type;
+  const char* function_name;
+  int script_offset;
+  const char* script_name;
+  int line_num;
+  bool is_constructor;
+  bool is_optimized;
+  std::string state;
+  // Address of the map.
+  void* map;
+  // Whether map is a dictionary map.
+  bool is_dictionary_map;
+  // Number of own descriptors.
+  unsigned number_of_own_descriptors;
+  std::string instance_type;
+};
+
+class ICStats {
+ public:
+  const int MAX_IC_INFO = 4096;
+
+  ICStats();
+  void Dump();
+  void Begin();
+  void End();
+  void Reset();
+  V8_INLINE ICInfo& Current() {
+    DCHECK(pos_ >= 0 && pos_ < MAX_IC_INFO);
+    return ic_infos_[pos_];
+  }
+  const char* GetOrCacheScriptName(Script* script);
+  const char* GetOrCacheFunctionName(JSFunction* function);
+  V8_INLINE static ICStats* instance() { return instance_.Pointer(); }
+
+ private:
+  static base::LazyInstance<ICStats>::type instance_;
+  base::Atomic32 enabled_;
+  std::vector<ICInfo> ic_infos_;
+  std::unordered_map<Script*, std::unique_ptr<char[]>> script_name_map_;
+  std::unordered_map<JSFunction*, std::unique_ptr<char[]>> function_name_map_;
+  int pos_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_IC_IC_STATS_H_
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index 7e0cefd..f11f94a 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -19,8 +19,8 @@
 #include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/handler-configuration-inl.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/ic-inl.h"
+#include "src/ic/ic-stats.h"
 #include "src/ic/stub-cache.h"
 #include "src/isolate-inl.h"
 #include "src/macro-assembler.h"
@@ -29,6 +29,7 @@
 #include "src/runtime/runtime-utils.h"
 #include "src/runtime/runtime.h"
 #include "src/tracing/trace-event.h"
+#include "src/tracing/tracing-category-observer.h"
 
 namespace v8 {
 namespace internal {
@@ -64,33 +65,10 @@
   return "";
 }
 
-
-#ifdef DEBUG
-
-#define TRACE_GENERIC_IC(isolate, type, reason)                \
-  do {                                                         \
-    if (FLAG_trace_ic) {                                       \
-      PrintF("[%s patching generic stub in ", type);           \
-      JavaScriptFrame::PrintTop(isolate, stdout, false, true); \
-      PrintF(" (%s)]\n", reason);                              \
-    }                                                          \
-  } while (false)
-
-#else
-
-#define TRACE_GENERIC_IC(isolate, type, reason)      \
-  do {                                               \
-    if (FLAG_trace_ic) {                             \
-      PrintF("[%s patching generic stub in ", type); \
-      PrintF("(see below) (%s)]\n", reason);         \
-    }                                                \
-  } while (false)
-
-#endif  // DEBUG
-
+#define TRACE_GENERIC_IC(reason) set_slow_stub_reason(reason);
 
 void IC::TraceIC(const char* type, Handle<Object> name) {
-  if (FLAG_trace_ic) {
+  if (FLAG_ic_stats) {
     if (AddressIsDeoptimizedCode()) return;
     DCHECK(UseVector());
     State new_state = nexus()->StateFromFeedback();
@@ -98,64 +76,110 @@
   }
 }
 
+Address IC::GetAbstractPC(int* line, int* column) const {
+  JavaScriptFrameIterator it(isolate());
+
+  JavaScriptFrame* frame = it.frame();
+  DCHECK(!frame->is_builtin());
+  int position = frame->position();
+
+  Object* maybe_script = frame->function()->shared()->script();
+  if (maybe_script->IsScript()) {
+    Handle<Script> script(Script::cast(maybe_script), isolate());
+    Script::PositionInfo info;
+    Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
+    *line = info.line + 1;
+    *column = info.column + 1;
+  } else {
+    *line = position;
+    *column = -1;
+  }
+
+  if (frame->is_interpreted()) {
+    InterpretedFrame* iframe = static_cast<InterpretedFrame*>(frame);
+    Address bytecode_start =
+        reinterpret_cast<Address>(iframe->GetBytecodeArray()) - kHeapObjectTag +
+        BytecodeArray::kHeaderSize;
+    return bytecode_start + iframe->GetBytecodeOffset();
+  }
+
+  return frame->pc();
+}
 
 void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
                  State new_state) {
-  if (!FLAG_trace_ic) return;
-  PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
+  if (V8_LIKELY(!FLAG_ic_stats)) return;
 
-  // TODO(jkummerow): Add support for "apply". The logic is roughly:
-  // marker = [fp_ + kMarkerOffset];
-  // if marker is smi and marker.value == INTERNAL and
-  //     the frame's code == builtin(Builtins::kFunctionApply):
-  // then print "apply from" and advance one frame
-
-  Object* maybe_function =
-      Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
-  if (maybe_function->IsJSFunction()) {
-    JSFunction* function = JSFunction::cast(maybe_function);
-    int code_offset = 0;
-    if (function->IsInterpreted()) {
-      code_offset = InterpretedFrame::GetBytecodeOffset(fp());
-    } else {
-      code_offset =
-          static_cast<int>(pc() - function->code()->instruction_start());
-    }
-    JavaScriptFrame::PrintFunctionAndOffset(function, function->abstract_code(),
-                                            code_offset, stdout, true);
-  }
-
-  const char* modifier = "";
-  if (kind() == Code::KEYED_STORE_IC) {
-    KeyedAccessStoreMode mode =
-        casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
-    modifier = GetTransitionMarkModifier(mode);
-  }
   Map* map = nullptr;
   if (!receiver_map().is_null()) {
     map = *receiver_map();
   }
-  PrintF(" (%c->%c%s) map=(%p", TransitionMarkFromState(old_state),
-         TransitionMarkFromState(new_state), modifier,
-         reinterpret_cast<void*>(map));
-  if (map != nullptr) {
-    PrintF(" dict=%u own=%u type=", map->is_dictionary_map(),
-           map->NumberOfOwnDescriptors());
-    std::cout << map->instance_type();
+
+  const char* modifier = "";
+  if (IsKeyedStoreIC()) {
+    KeyedAccessStoreMode mode =
+        casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+    modifier = GetTransitionMarkModifier(mode);
   }
-  PrintF(") ");
-  name->ShortPrint(stdout);
-  PrintF("]\n");
+
+  if (!(FLAG_ic_stats &
+        v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+    int line;
+    int column;
+    Address pc = GetAbstractPC(&line, &column);
+    LOG(isolate(), ICEvent(type, is_keyed(), pc, line, column, map, *name,
+                           TransitionMarkFromState(old_state),
+                           TransitionMarkFromState(new_state), modifier,
+                           slow_stub_reason_));
+    return;
+  }
+
+  ICStats::instance()->Begin();
+  ICInfo& ic_info = ICStats::instance()->Current();
+  ic_info.type = is_keyed() ? "Keyed" : "";
+  ic_info.type += type;
+
+  Object* maybe_function =
+      Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+  DCHECK(maybe_function->IsJSFunction());
+  JSFunction* function = JSFunction::cast(maybe_function);
+  int code_offset = 0;
+  if (function->IsInterpreted()) {
+    code_offset = InterpretedFrame::GetBytecodeOffset(fp());
+  } else {
+    code_offset =
+        static_cast<int>(pc() - function->code()->instruction_start());
+  }
+  JavaScriptFrame::CollectFunctionAndOffsetForICStats(
+      function, function->abstract_code(), code_offset);
+
+  // Reserve enough space for IC transition state, the longest length is 17.
+  ic_info.state.reserve(17);
+  ic_info.state = "(";
+  ic_info.state += TransitionMarkFromState(old_state);
+  ic_info.state += "->";
+  ic_info.state += TransitionMarkFromState(new_state);
+  ic_info.state += modifier;
+  ic_info.state += ")";
+  ic_info.map = reinterpret_cast<void*>(map);
+  if (map != nullptr) {
+    ic_info.is_dictionary_map = map->is_dictionary_map();
+    ic_info.number_of_own_descriptors = map->NumberOfOwnDescriptors();
+    ic_info.instance_type = std::to_string(map->instance_type());
+  }
+  // TODO(lpy) Add name as key field in ICStats.
+  ICStats::instance()->End();
 }
 
 
 #define TRACE_IC(type, name) TraceIC(type, name)
 
-
 IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
     : isolate_(isolate),
       vector_set_(false),
+      kind_(FeedbackSlotKind::kInvalid),
       target_maps_set_(false),
+      slow_stub_reason_(nullptr),
       nexus_(nexus) {
   // To improve the performance of the (much used) IC code, we unfold a few
   // levels of the stack frame iteration code. This yields a ~35% speedup when
@@ -192,9 +216,9 @@
   // function's frame. Check if the there is an additional frame, and if there
   // is skip this frame. However, the pc should not be updated. The call to
   // ICs happen from bytecode handlers.
-  Object* frame_type =
-      Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
-  if (frame_type == Smi::FromInt(StackFrame::STUB)) {
+  intptr_t frame_marker =
+      Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+  if (frame_marker == StackFrame::TypeToMarker(StackFrame::STUB)) {
     fp = Memory::Address_at(fp + TypedFrameConstants::kCallerFPOffset);
   }
   fp_ = fp;
@@ -202,18 +226,36 @@
     constant_pool_address_ = constant_pool;
   }
   pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
-  Code* target = this->target();
-  kind_ = target->kind();
-  state_ = UseVector() ? nexus->StateFromFeedback() : StateFromCode(target);
+  if (nexus) {
+    kind_ = nexus->kind();
+    DCHECK(UseVector());
+    state_ = nexus->StateFromFeedback();
+    extra_ic_state_ = kNoExtraICState;
+  } else {
+    Code* target = this->target();
+    Code::Kind kind = target->kind();
+    if (kind == Code::BINARY_OP_IC) {
+      kind_ = FeedbackSlotKind::kBinaryOp;
+    } else if (kind == Code::COMPARE_IC) {
+      kind_ = FeedbackSlotKind::kCompareOp;
+    } else if (kind == Code::TO_BOOLEAN_IC) {
+      kind_ = FeedbackSlotKind::kToBoolean;
+    } else {
+      UNREACHABLE();
+      kind_ = FeedbackSlotKind::kInvalid;
+    }
+    DCHECK(!UseVector());
+    state_ = StateFromCode(target);
+    extra_ic_state_ = target->extra_ic_state();
+  }
   old_state_ = state_;
-  extra_ic_state_ = target->extra_ic_state();
 }
 
 // The ICs that don't pass slot and vector through the stack have to
 // save/restore them in the dispatcher.
 bool IC::ShouldPushPopSlotAndVector(Code::Kind kind) {
   if (kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
-      kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC) {
+      kind == Code::KEYED_LOAD_IC) {
     return true;
   }
   if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) {
@@ -244,7 +286,7 @@
   }
 }
 
-SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
+JSFunction* IC::GetHostFunction() const {
   // Compute the JavaScript frame for the frame pointer of this IC
   // structure. We need this to be able to find the function
   // corresponding to the frame.
@@ -253,16 +295,7 @@
   JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
   // Find the function on the stack and both the active code for the
   // function and the original code.
-  JSFunction* function = frame->function();
-  return function->shared();
-}
-
-
-Code* IC::GetCode() const {
-  HandleScope scope(isolate());
-  Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
-  Code* code = shared->code();
-  return code;
+  return frame->function();
 }
 
 static void LookupForRead(LookupIterator* it) {
@@ -305,7 +338,7 @@
 
   // This is a contextual access, always just update the handler and stay
   // monomorphic.
-  if (kind() == Code::LOAD_GLOBAL_IC) return true;
+  if (IsLoadGlobalIC()) return true;
 
   // The current map wasn't handled yet. There's no reason to stay monomorphic,
   // *unless* we're moving from a deprecated map to its replacement, or
@@ -342,7 +375,7 @@
   update_receiver_map(receiver);
   if (!name->IsString()) return;
   if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
-  if (receiver->IsUndefined(isolate()) || receiver->IsNull(isolate())) return;
+  if (receiver->IsNullOrUndefined(isolate())) return;
 
   // Remove the target from the code cache if it became invalid
   // because of changes in the prototype chain to avoid hitting it
@@ -402,12 +435,14 @@
 }
 
 // static
-void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) {
-  if (host->kind() != Code::FUNCTION) return;
+void IC::OnFeedbackChanged(Isolate* isolate, JSFunction* host_function) {
+  Code* host = host_function->shared()->code();
 
-  TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
-  info->change_own_type_change_checksum();
-  host->set_profiler_ticks(0);
+  if (host->kind() == Code::FUNCTION) {
+    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+    info->change_own_type_change_checksum();
+    host->set_profiler_ticks(0);
+  }
   isolate->runtime_profiler()->NotifyICChanged();
   // TODO(2029): When an optimized function is patched, it would
   // be nice to propagate the corresponding type information to its
@@ -417,6 +452,7 @@
 void IC::PostPatching(Address address, Code* target, Code* old_target) {
   // Type vector based ICs update these statistics at a different time because
   // they don't always patch on state change.
+  // TODO(ishell): DCHECK
   if (ICUseVector(target->kind())) return;
 
   DCHECK(old_target->is_inline_cache_stub());
@@ -462,58 +498,6 @@
   }
 }
 
-
-void KeyedLoadIC::Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  // Make sure to also clear the map used in inline fast cases.  If we
-  // do not clear these maps, cached code can keep objects alive
-  // through the embedded maps.
-  nexus->ConfigurePremonomorphic();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-
-void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
-  // Determine our state.
-  Object* feedback = nexus->vector()->Get(nexus->slot());
-  State state = nexus->StateFromFeedback();
-
-  if (state != UNINITIALIZED && !feedback->IsAllocationSite()) {
-    nexus->ConfigureUninitialized();
-    // The change in state must be processed.
-    OnTypeFeedbackChanged(isolate, host);
-  }
-}
-
-
-void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  nexus->ConfigurePremonomorphic();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-void LoadGlobalIC::Clear(Isolate* isolate, Code* host,
-                         LoadGlobalICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  nexus->ConfigureUninitialized();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  nexus->ConfigurePremonomorphic();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-
-void KeyedStoreIC::Clear(Isolate* isolate, Code* host,
-                         KeyedStoreICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  nexus->ConfigurePremonomorphic();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-
 void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
                       Address constant_pool) {
   DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
@@ -538,13 +522,13 @@
   if (new_state == PREMONOMORPHIC) {
     nexus()->ConfigurePremonomorphic();
   } else if (new_state == MEGAMORPHIC) {
-    if (kind() == Code::LOAD_IC || kind() == Code::STORE_IC) {
+    if (IsLoadIC() || IsStoreIC() || IsStoreOwnIC()) {
       nexus()->ConfigureMegamorphic();
-    } else if (kind() == Code::KEYED_LOAD_IC) {
+    } else if (IsKeyedLoadIC()) {
       KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
       nexus->ConfigureMegamorphicKeyed(key->IsName() ? PROPERTY : ELEMENT);
     } else {
-      DCHECK(kind() == Code::KEYED_STORE_IC);
+      DCHECK(IsKeyedStoreIC());
       KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
       nexus->ConfigureMegamorphicKeyed(key->IsName() ? PROPERTY : ELEMENT);
     }
@@ -553,74 +537,131 @@
   }
 
   vector_set_ = true;
-  OnTypeFeedbackChanged(isolate(), get_host());
+  OnFeedbackChanged(isolate(), GetHostFunction());
 }
 
 void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
                               Handle<Object> handler) {
   DCHECK(UseVector());
-  if (kind() == Code::LOAD_IC) {
-    LoadICNexus* nexus = casted_nexus<LoadICNexus>();
-    nexus->ConfigureMonomorphic(map, handler);
-  } else if (kind() == Code::LOAD_GLOBAL_IC) {
-    LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
-    nexus->ConfigureHandlerMode(Handle<Code>::cast(handler));
-  } else if (kind() == Code::KEYED_LOAD_IC) {
-    KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
-    nexus->ConfigureMonomorphic(name, map, handler);
-  } else if (kind() == Code::STORE_IC) {
-    StoreICNexus* nexus = casted_nexus<StoreICNexus>();
-    nexus->ConfigureMonomorphic(map, handler);
-  } else {
-    DCHECK(kind() == Code::KEYED_STORE_IC);
-    KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
-    nexus->ConfigureMonomorphic(name, map, handler);
+  switch (kind_) {
+    case FeedbackSlotKind::kLoadProperty: {
+      LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+      nexus->ConfigureMonomorphic(map, handler);
+      break;
+    }
+    case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+    case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
+      LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
+      nexus->ConfigureHandlerMode(handler);
+      break;
+    }
+    case FeedbackSlotKind::kLoadKeyed: {
+      KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+      nexus->ConfigureMonomorphic(name, map, handler);
+      break;
+    }
+    case FeedbackSlotKind::kStoreNamedSloppy:
+    case FeedbackSlotKind::kStoreNamedStrict:
+    case FeedbackSlotKind::kStoreOwnNamed: {
+      StoreICNexus* nexus = casted_nexus<StoreICNexus>();
+      nexus->ConfigureMonomorphic(map, handler);
+      break;
+    }
+    case FeedbackSlotKind::kStoreKeyedSloppy:
+    case FeedbackSlotKind::kStoreKeyedStrict: {
+      KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+      nexus->ConfigureMonomorphic(name, map, handler);
+      break;
+    }
+    case FeedbackSlotKind::kCall:
+    case FeedbackSlotKind::kBinaryOp:
+    case FeedbackSlotKind::kCompareOp:
+    case FeedbackSlotKind::kToBoolean:
+    case FeedbackSlotKind::kCreateClosure:
+    case FeedbackSlotKind::kLiteral:
+    case FeedbackSlotKind::kGeneral:
+    case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+    case FeedbackSlotKind::kInvalid:
+    case FeedbackSlotKind::kKindsNumber:
+      UNREACHABLE();
+      break;
   }
 
   vector_set_ = true;
-  OnTypeFeedbackChanged(isolate(), get_host());
+  OnFeedbackChanged(isolate(), GetHostFunction());
 }
 
 void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
                               List<Handle<Object>>* handlers) {
   DCHECK(UseVector());
-  if (kind() == Code::LOAD_IC) {
-    LoadICNexus* nexus = casted_nexus<LoadICNexus>();
-    nexus->ConfigurePolymorphic(maps, handlers);
-  } else if (kind() == Code::KEYED_LOAD_IC) {
-    KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
-    nexus->ConfigurePolymorphic(name, maps, handlers);
-  } else if (kind() == Code::STORE_IC) {
-    StoreICNexus* nexus = casted_nexus<StoreICNexus>();
-    nexus->ConfigurePolymorphic(maps, handlers);
-  } else {
-    DCHECK(kind() == Code::KEYED_STORE_IC);
-    KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
-    nexus->ConfigurePolymorphic(name, maps, handlers);
+  switch (kind_) {
+    case FeedbackSlotKind::kLoadProperty: {
+      LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+      nexus->ConfigurePolymorphic(maps, handlers);
+      break;
+    }
+    case FeedbackSlotKind::kLoadKeyed: {
+      KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+      nexus->ConfigurePolymorphic(name, maps, handlers);
+      break;
+    }
+    case FeedbackSlotKind::kStoreNamedSloppy:
+    case FeedbackSlotKind::kStoreNamedStrict:
+    case FeedbackSlotKind::kStoreOwnNamed: {
+      StoreICNexus* nexus = casted_nexus<StoreICNexus>();
+      nexus->ConfigurePolymorphic(maps, handlers);
+      break;
+    }
+    case FeedbackSlotKind::kStoreKeyedSloppy:
+    case FeedbackSlotKind::kStoreKeyedStrict: {
+      KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+      nexus->ConfigurePolymorphic(name, maps, handlers);
+      break;
+    }
+    case FeedbackSlotKind::kCall:
+    case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+    case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+    case FeedbackSlotKind::kBinaryOp:
+    case FeedbackSlotKind::kCompareOp:
+    case FeedbackSlotKind::kToBoolean:
+    case FeedbackSlotKind::kCreateClosure:
+    case FeedbackSlotKind::kLiteral:
+    case FeedbackSlotKind::kGeneral:
+    case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+    case FeedbackSlotKind::kInvalid:
+    case FeedbackSlotKind::kKindsNumber:
+      UNREACHABLE();
+      break;
   }
 
   vector_set_ = true;
-  OnTypeFeedbackChanged(isolate(), get_host());
+  OnFeedbackChanged(isolate(), GetHostFunction());
 }
 
-
 void IC::ConfigureVectorState(MapHandleList* maps,
                               MapHandleList* transitioned_maps,
-                              CodeHandleList* handlers) {
+                              List<Handle<Object>>* handlers) {
   DCHECK(UseVector());
-  DCHECK(kind() == Code::KEYED_STORE_IC);
+  DCHECK(IsKeyedStoreIC());
   KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
   nexus->ConfigurePolymorphic(maps, transitioned_maps, handlers);
 
   vector_set_ = true;
-  OnTypeFeedbackChanged(isolate(), get_host());
+  OnFeedbackChanged(isolate(), GetHostFunction());
 }
 
 
 MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
   // If the object is undefined or null it's illegal to try to get any
   // of its properties; throw a TypeError in that case.
-  if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
+  if (object->IsNullOrUndefined(isolate())) {
+    if (FLAG_use_ic && state() != UNINITIALIZED && state() != PREMONOMORPHIC) {
+      // Ensure the IC state progresses.
+      TRACE_HANDLER_STATS(isolate(), LoadIC_NonReceiver);
+      update_receiver_map(object);
+      PatchCache(name, slow_stub());
+      TRACE_IC("LoadIC", name);
+    }
     return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
   }
 
@@ -792,11 +833,8 @@
 
 void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
   DCHECK(IsHandler(*handler));
-  // Currently only LoadIC and KeyedLoadIC support non-code handlers.
-  DCHECK_IMPLIES(!handler->IsCode(), kind() == Code::LOAD_IC ||
-                                         kind() == Code::KEYED_LOAD_IC ||
-                                         kind() == Code::STORE_IC ||
-                                         kind() == Code::KEYED_STORE_IC);
+  // Currently only load and store ICs support non-code handlers.
+  DCHECK_IMPLIES(!handler->IsCode(), IsAnyLoad() || IsAnyStore());
   switch (state()) {
     case UNINITIALIZED:
     case PREMONOMORPHIC:
@@ -804,7 +842,7 @@
       break;
     case RECOMPUTE_HANDLER:
     case MONOMORPHIC:
-      if (kind() == Code::LOAD_GLOBAL_IC) {
+      if (IsLoadGlobalIC()) {
         UpdateMonomorphicIC(handler, name);
         break;
       }
@@ -831,23 +869,9 @@
   }
 }
 
-Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
-                                                 ExtraICState extra_state) {
-  DCHECK(!FLAG_tf_store_ic_stub);
-  LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
-  return is_strict(mode)
-             ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
-             : isolate->builtins()->KeyedStoreIC_Megamorphic();
-}
-
-Handle<Object> LoadIC::SimpleFieldLoad(FieldIndex index) {
-  if (FLAG_tf_load_ic_stub) {
-    TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
-    return LoadHandler::LoadField(isolate(), index);
-  }
-  TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
-  LoadFieldStub stub(isolate(), index);
-  return stub.GetCode();
+Handle<Object> LoadIC::SimpleFieldLoad(Isolate* isolate, FieldIndex index) {
+  TRACE_HANDLER_STATS(isolate, LoadIC_LoadFieldDH);
+  return LoadHandler::LoadField(isolate, index);
 }
 
 namespace {
@@ -1044,7 +1068,7 @@
     if (holder->HasFastProperties()) {
       if (getter->IsJSFunction()) {
         Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
-        if (!receiver->IsJSObject() && !function->shared()->IsBuiltin() &&
+        if (!receiver->IsJSObject() && function->shared()->IsUserJavaScript() &&
             is_sloppy(function->shared()->language_mode())) {
           // Calling sloppy non-builtins with a value as the receiver
           // requires boxing.
@@ -1063,7 +1087,7 @@
 
 
 void LoadIC::UpdateCaches(LookupIterator* lookup) {
-  if (state() == UNINITIALIZED && kind() != Code::LOAD_GLOBAL_IC) {
+  if (state() == UNINITIALIZED && !IsLoadGlobalIC()) {
     // This is the first time we execute this inline cache. Set the target to
     // the pre monomorphic stub to delay setting the monomorphic state.
     TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
@@ -1077,26 +1101,12 @@
       lookup->state() == LookupIterator::ACCESS_CHECK) {
     code = slow_stub();
   } else if (!lookup->IsFound()) {
-    if (kind() == Code::LOAD_IC) {
-      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
-      code = LoadNonExistent(receiver_map(), lookup->name());
-    } else if (kind() == Code::LOAD_GLOBAL_IC) {
-      code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
-                                                              receiver_map());
-      // TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
-      if (code.is_null()) code = slow_stub();
-    } else {
-      code = slow_stub();
-    }
+    TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
+    code = LoadNonExistent(receiver_map(), lookup->name());
   } else {
-    if (kind() == Code::LOAD_GLOBAL_IC &&
-        lookup->state() == LookupIterator::DATA &&
-        lookup->GetHolder<Object>()->IsJSGlobalObject()) {
-#if DEBUG
-      Handle<Object> holder = lookup->GetHolder<Object>();
-      Handle<Object> receiver = lookup->GetReceiver();
-      DCHECK_EQ(*receiver, *holder);
-#endif
+    if (IsLoadGlobalIC() && lookup->state() == LookupIterator::DATA &&
+        lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
+      DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
       // Now update the cell in the feedback vector.
       LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
       nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
@@ -1104,26 +1114,19 @@
       return;
     } else if (lookup->state() == LookupIterator::ACCESSOR) {
       if (!IsCompatibleReceiver(lookup, receiver_map())) {
-        TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+        TRACE_GENERIC_IC("incompatible receiver type");
         code = slow_stub();
       }
     } else if (lookup->state() == LookupIterator::INTERCEPTOR) {
-      if (kind() == Code::LOAD_GLOBAL_IC) {
-        // The interceptor handler requires name but it is not passed explicitly
-        // to LoadGlobalIC and the LoadGlobalIC dispatcher also does not load
-        // it so we will just use slow stub.
+      // Perform a lookup behind the interceptor. Copy the LookupIterator
+      // since the original iterator will be used to fetch the value.
+      LookupIterator it = *lookup;
+      it.Next();
+      LookupForRead(&it);
+      if (it.state() == LookupIterator::ACCESSOR &&
+          !IsCompatibleReceiver(&it, receiver_map())) {
+        TRACE_GENERIC_IC("incompatible receiver type");
         code = slow_stub();
-      } else {
-        // Perform a lookup behind the interceptor. Copy the LookupIterator
-        // since the original iterator will be used to fetch the value.
-        LookupIterator it = *lookup;
-        it.Next();
-        LookupForRead(&it);
-        if (it.state() == LookupIterator::ACCESSOR &&
-            !IsCompatibleReceiver(&it, receiver_map())) {
-          TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
-          code = slow_stub();
-        }
       }
     }
     if (code.is_null()) code = ComputeHandler(lookup);
@@ -1134,20 +1137,12 @@
 }
 
 StubCache* IC::stub_cache() {
-  switch (kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-      return isolate()->load_stub_cache();
-
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      return isolate()->store_stub_cache();
-
-    default:
-      break;
+  if (IsAnyLoad()) {
+    return isolate()->load_stub_cache();
+  } else {
+    DCHECK(IsAnyStore());
+    return isolate()->store_stub_cache();
   }
-  UNREACHABLE();
-  return nullptr;
 }
 
 void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* handler) {
@@ -1157,8 +1152,7 @@
 void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
   if (!FLAG_runtime_call_stats) return;
 
-  if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
-      kind() == Code::KEYED_LOAD_IC) {
+  if (IsAnyLoad()) {
     switch (lookup->state()) {
       case LookupIterator::ACCESS_CHECK:
         TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_AccessCheck);
@@ -1185,7 +1179,7 @@
         TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Transition);
         break;
     }
-  } else if (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC) {
+  } else if (IsAnyStore()) {
     switch (lookup->state()) {
       case LookupIterator::ACCESS_CHECK:
         TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_AccessCheck);
@@ -1232,19 +1226,18 @@
       lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
   CacheHolderFlag flag;
   Handle<Map> stub_holder_map;
-  if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
-      kind() == Code::KEYED_LOAD_IC) {
+  if (IsAnyLoad()) {
     stub_holder_map = IC::GetHandlerCacheHolder(
         receiver_map(), receiver_is_holder, isolate(), &flag);
   } else {
-    DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
+    DCHECK(IsAnyStore());
     // Store handlers cannot be cached on prototypes.
     flag = kCacheOnReceiver;
     stub_holder_map = receiver_map();
   }
 
   Handle<Object> handler = PropertyHandlerCompiler::Find(
-      lookup->name(), stub_holder_map, kind(), flag);
+      lookup->name(), stub_holder_map, handler_kind(), flag);
   // Use the cached value if it exists, and if it is different from the
   // handler that just missed.
   if (!handler.is_null()) {
@@ -1288,7 +1281,7 @@
   if (receiver->IsString() &&
       Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
     FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
-    return SimpleFieldLoad(index);
+    return SimpleFieldLoad(isolate(), index);
   }
 
   if (receiver->IsStringWrapper() &&
@@ -1307,8 +1300,7 @@
            ->has_non_instance_prototype()) {
     Handle<Code> stub;
     TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
-    FunctionPrototypeStub function_prototype_stub(isolate());
-    return function_prototype_stub.GetCode();
+    return isolate()->builtins()->LoadIC_FunctionPrototype();
   }
 
   Handle<Map> map = receiver_map();
@@ -1326,7 +1318,7 @@
       if (Accessors::IsJSObjectFieldAccessor(map, lookup->name(),
                                              &object_offset)) {
         FieldIndex index = FieldIndex::ForInObjectOffset(object_offset, *map);
-        return SimpleFieldLoad(index);
+        return SimpleFieldLoad(isolate(), index);
       }
 
       if (IsCompatibleReceiver(lookup, map)) {
@@ -1337,7 +1329,7 @@
             return slow_stub();
           }
           // When debugging we need to go the slow path to flood the accessor.
-          if (GetSharedFunctionInfo()->HasDebugInfo()) {
+          if (GetHostFunction()->shared()->HasDebugInfo()) {
             TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
             return slow_stub();
           }
@@ -1356,26 +1348,15 @@
             TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
             return slow_stub();
           }
-          if (FLAG_tf_load_ic_stub) {
-            Handle<Object> smi_handler = LoadHandler::LoadApiGetter(
-                isolate(), lookup->GetAccessorIndex());
-            if (receiver_is_holder) {
-              TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
-              return smi_handler;
-            }
-            if (kind() != Code::LOAD_GLOBAL_IC) {
-              TRACE_HANDLER_STATS(isolate(),
-                                  LoadIC_LoadApiGetterFromPrototypeDH);
-              return LoadFromPrototype(map, holder, lookup->name(),
-                                       smi_handler);
-            }
-          } else {
-            if (receiver_is_holder) {
-              TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
-              int index = lookup->GetAccessorIndex();
-              LoadApiGetterStub stub(isolate(), true, index);
-              return stub.GetCode();
-            }
+          Handle<Object> smi_handler =
+              LoadHandler::LoadApiGetter(isolate(), lookup->GetAccessorIndex());
+          if (receiver_is_holder) {
+            TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
+            return smi_handler;
+          }
+          if (!IsLoadGlobalIC()) {
+            TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
+            return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
           }
           break;  // Custom-compiled handler.
         }
@@ -1385,8 +1366,9 @@
     }
 
     case LookupIterator::DATA: {
+      DCHECK_EQ(kData, lookup->property_details().kind());
       if (lookup->is_dictionary_holder()) {
-        if (kind() != Code::LOAD_IC && kind() != Code::LOAD_GLOBAL_IC) {
+        if (!IsLoadIC() && !IsLoadGlobalIC()) {  // IsKeyedLoadIC()?
           TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
           return slow_stub();
         }
@@ -1406,40 +1388,26 @@
       }
 
       // -------------- Fields --------------
-      if (lookup->property_details().type() == DATA) {
+      if (lookup->property_details().location() == kField) {
         FieldIndex field = lookup->GetFieldIndex();
-        Handle<Object> smi_handler = SimpleFieldLoad(field);
+        Handle<Object> smi_handler = SimpleFieldLoad(isolate(), field);
         if (receiver_is_holder) {
           return smi_handler;
         }
-        if (FLAG_tf_load_ic_stub && kind() != Code::LOAD_GLOBAL_IC) {
-          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
-          return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
-        }
-        break;  // Custom-compiled handler.
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
+        return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
       }
 
       // -------------- Constant properties --------------
-      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      if (FLAG_tf_load_ic_stub) {
-        Handle<Object> smi_handler =
-            LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
-        if (receiver_is_holder) {
-          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
-          return smi_handler;
-        }
-        if (kind() != Code::LOAD_GLOBAL_IC) {
-          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
-          return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
-        }
-      } else {
-        if (receiver_is_holder) {
-          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
-          LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
-          return stub.GetCode();
-        }
+      DCHECK_EQ(kDescriptor, lookup->property_details().location());
+      Handle<Object> smi_handler =
+          LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
+      if (receiver_is_holder) {
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
+        return smi_handler;
       }
-      break;  // Custom-compiled handler.
+      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
+      return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
     }
 
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1507,7 +1475,7 @@
           return ComputeHandler(lookup);
         }
         DCHECK(holder->HasFastProperties());
-        DCHECK(!GetSharedFunctionInfo()->HasDebugInfo());
+        DCHECK(!GetHostFunction()->shared()->HasDebugInfo());
         Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
                               isolate());
         CallOptimization call_optimization(getter);
@@ -1543,33 +1511,15 @@
     }
 
     case LookupIterator::DATA: {
-      if (lookup->is_dictionary_holder()) {
-        DCHECK(kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC);
-        DCHECK(holder->IsJSGlobalObject());
-        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
-        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-        Handle<PropertyCell> cell = lookup->GetPropertyCell();
-        Handle<Code> code = compiler.CompileLoadGlobal(
-            cell, lookup->name(), lookup->IsConfigurable());
-        return code;
-      }
-
-      // -------------- Fields --------------
-      if (lookup->property_details().type() == DATA) {
-        FieldIndex field = lookup->GetFieldIndex();
-        DCHECK(!receiver_is_holder);
-        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadField);
-        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-        return compiler.CompileLoadField(lookup->name(), field);
-      }
-
-      // -------------- Constant properties --------------
-      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      DCHECK(!receiver_is_holder);
-      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstant);
+      DCHECK(lookup->is_dictionary_holder());
+      DCHECK(IsLoadIC() || IsLoadGlobalIC());
+      DCHECK(holder->IsJSGlobalObject());
+      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
       NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-      return compiler.CompileLoadConstant(lookup->name(),
-                                          lookup->GetConstantIndex());
+      Handle<PropertyCell> cell = lookup->GetPropertyCell();
+      Handle<Code> code = compiler.CompileLoadGlobal(cell, lookup->name(),
+                                                     lookup->IsConfigurable());
+      return code;
     }
 
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1599,6 +1549,8 @@
     }
   } else if (key->IsUndefined(isolate)) {
     key = isolate->factory()->undefined_string();
+  } else if (key->IsString()) {
+    key = isolate->factory()->InternalizeString(Handle<String>::cast(key));
   }
   return key;
 }
@@ -1620,11 +1572,11 @@
     Handle<Map> map = target_receiver_maps.at(i);
     if (map.is_null()) continue;
     if (map->instance_type() == JS_VALUE_TYPE) {
-      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSValue");
+      TRACE_GENERIC_IC("JSValue");
       return;
     }
     if (map->instance_type() == JS_PROXY_TYPE) {
-      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSProxy");
+      TRACE_GENERIC_IC("JSProxy");
       return;
     }
   }
@@ -1652,14 +1604,14 @@
   if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the generic stub.
-    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
+    TRACE_GENERIC_IC("same map added twice");
     return;
   }
 
   // If the maximum number of receiver maps has been exceeded, use the generic
   // version of the IC.
   if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
-    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
+    TRACE_GENERIC_IC("max polymorph exceeded");
     return;
   }
 
@@ -1706,7 +1658,6 @@
 
   if (!is_vector_set()) {
     ConfigureVectorState(MEGAMORPHIC, key);
-    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
     TRACE_IC("LoadIC", key);
   }
 
@@ -1839,7 +1790,14 @@
 
   // If the object is undefined or null it's illegal to try to set any
   // properties on it; throw a TypeError in that case.
-  if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
+  if (object->IsNullOrUndefined(isolate())) {
+    if (FLAG_use_ic && state() != UNINITIALIZED && state() != PREMONOMORPHIC) {
+      // Ensure the IC state progresses.
+      TRACE_HANDLER_STATS(isolate(), StoreIC_NonReceiver);
+      update_receiver_map(object);
+      PatchCache(name, slow_stub());
+      TRACE_IC("StoreIC", name);
+    }
     return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
   }
 
@@ -1865,12 +1823,13 @@
     return;
   }
 
-  bool use_ic = LookupForWrite(lookup, value, store_mode);
-  if (!use_ic) {
-    TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
+  Handle<Object> handler;
+  if (LookupForWrite(lookup, value, store_mode)) {
+    handler = ComputeHandler(lookup, value);
+  } else {
+    TRACE_GENERIC_IC("LookupForWrite said 'false'");
+    handler = slow_stub();
   }
-  Handle<Object> handler = use_ic ? ComputeHandler(lookup, value)
-                                  : Handle<Object>::cast(slow_stub());
 
   PatchCache(lookup->name(), handler);
   TRACE_IC("StoreIC", lookup->name());
@@ -1890,11 +1849,12 @@
   DCHECK(!transition->is_access_check_needed());
 
   Handle<Object> smi_handler;
-  if (details.type() == DATA_CONSTANT) {
+  DCHECK_EQ(kData, details.kind());
+  if (details.location() == kDescriptor) {
     smi_handler = StoreHandler::TransitionToConstant(isolate(), descriptor);
 
   } else {
-    DCHECK_EQ(DATA, details.type());
+    DCHECK_EQ(kField, details.location());
     bool extend_storage =
         Map::cast(transition->GetBackPointer())->unused_property_fields() == 0;
 
@@ -1967,18 +1927,15 @@
       }
       // Currently not handled by CompileStoreTransition.
       if (!holder->HasFastProperties()) {
-        TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow");
+        TRACE_GENERIC_IC("transition from slow");
         TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
         return slow_stub();
       }
       DCHECK(lookup->IsCacheableTransition());
-      if (FLAG_tf_store_ic_stub) {
-        Handle<Map> transition = lookup->transition_map();
-        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
-        return StoreTransition(receiver_map(), holder, transition,
-                               lookup->name());
-      }
-      break;  // Custom-compiled handler.
+      Handle<Map> transition = lookup->transition_map();
+      TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
+      return StoreTransition(receiver_map(), holder, transition,
+                             lookup->name());
     }
 
     case LookupIterator::INTERCEPTOR: {
@@ -1990,7 +1947,7 @@
 
     case LookupIterator::ACCESSOR: {
       if (!holder->HasFastProperties()) {
-        TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map");
+        TRACE_GENERIC_IC("accessor on slow map");
         TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
         return slow_stub();
       }
@@ -1998,20 +1955,19 @@
       if (accessors->IsAccessorInfo()) {
         Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
         if (v8::ToCData<Address>(info->setter()) == nullptr) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == nullptr");
+          TRACE_GENERIC_IC("setter == nullptr");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
         if (AccessorInfo::cast(*accessors)->is_special_data_property() &&
             !lookup->HolderIsReceiverOrHiddenPrototype()) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC",
-                           "special data property in prototype chain");
+          TRACE_GENERIC_IC("special data property in prototype chain");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
         if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
                                                    receiver_map())) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
+          TRACE_GENERIC_IC("incompatible receiver type");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
@@ -2024,7 +1980,7 @@
         Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
                               isolate());
         if (!setter->IsJSFunction() && !setter->IsFunctionTemplateInfo()) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
+          TRACE_GENERIC_IC("setter not a function");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
@@ -2033,7 +1989,7 @@
           if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
             break;  // Custom-compiled handler.
           }
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver");
+          TRACE_GENERIC_IC("incompatible receiver");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
@@ -2044,6 +2000,7 @@
     }
 
     case LookupIterator::DATA: {
+      DCHECK_EQ(kData, lookup->property_details().kind());
       if (lookup->is_dictionary_holder()) {
         if (holder->IsJSGlobalObject()) {
           break;  // Custom-compiled handler.
@@ -2054,33 +2011,18 @@
       }
 
       // -------------- Fields --------------
-      if (lookup->property_details().type() == DATA) {
-        if (FLAG_tf_store_ic_stub) {
-          TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
-          int descriptor = lookup->GetFieldDescriptorIndex();
-          FieldIndex index = lookup->GetFieldIndex();
-          return StoreHandler::StoreField(isolate(), descriptor, index,
-                                          lookup->representation());
-        } else {
-          bool use_stub = true;
-          if (lookup->representation().IsHeapObject()) {
-            // Only use a generic stub if no types need to be tracked.
-            Handle<FieldType> field_type = lookup->GetFieldType();
-            use_stub = !field_type->IsClass();
-          }
-          if (use_stub) {
-            TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
-            StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
-                                lookup->representation());
-            return stub.GetCode();
-          }
-        }
-        break;  // Custom-compiled handler.
+      if (lookup->property_details().location() == kField) {
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
+        int descriptor = lookup->GetFieldDescriptorIndex();
+        FieldIndex index = lookup->GetFieldIndex();
+        return StoreHandler::StoreField(isolate(), descriptor, index,
+                                        lookup->constness(),
+                                        lookup->representation());
       }
 
       // -------------- Constant properties --------------
-      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
+      DCHECK_EQ(kDescriptor, lookup->property_details().location());
+      TRACE_GENERIC_IC("constant property");
       TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
       return slow_stub();
     }
@@ -2117,15 +2059,7 @@
         cell->set_value(isolate()->heap()->the_hole_value());
         return code;
       }
-      DCHECK(!FLAG_tf_store_ic_stub);
-      Handle<Map> transition = lookup->transition_map();
-      // Currently not handled by CompileStoreTransition.
-      DCHECK(holder->HasFastProperties());
-
-      DCHECK(lookup->IsCacheableTransition());
-      TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransition);
-      NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
-      return compiler.CompileStoreTransition(transition, lookup->name());
+      UNREACHABLE();
     }
 
     case LookupIterator::INTERCEPTOR:
@@ -2144,6 +2078,11 @@
         DCHECK(!info->is_sloppy() || receiver->IsJSReceiver());
         TRACE_HANDLER_STATS(isolate(), StoreIC_StoreCallback);
         NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
+        // TODO(ishell): don't hard-code language mode into the handler because
+        // this handler can be re-used through megamorphic stub cache for wrong
+        // language mode.
+        // Better pass vector/slot to Runtime::kStoreCallbackProperty and
+        // let it decode the language mode from the IC kind.
         Handle<Code> code = compiler.CompileStoreCallback(
             receiver, lookup->name(), info, language_mode());
         return code;
@@ -2173,40 +2112,18 @@
     }
 
     case LookupIterator::DATA: {
-      if (lookup->is_dictionary_holder()) {
-        DCHECK(holder->IsJSGlobalObject());
-        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobal);
-        DCHECK(holder.is_identical_to(receiver) ||
-               receiver->map()->prototype() == *holder);
-        auto cell = lookup->GetPropertyCell();
-        auto updated_type =
-            PropertyCell::UpdatedType(cell, value, lookup->property_details());
-        auto code = PropertyCellStoreHandler(
-            isolate(), receiver, Handle<JSGlobalObject>::cast(holder),
-            lookup->name(), cell, updated_type);
-        return code;
-      }
-
-      // -------------- Fields --------------
-      if (lookup->property_details().type() == DATA) {
-        DCHECK(!FLAG_tf_store_ic_stub);
-#ifdef DEBUG
-        bool use_stub = true;
-        if (lookup->representation().IsHeapObject()) {
-          // Only use a generic stub if no types need to be tracked.
-          Handle<FieldType> field_type = lookup->GetFieldType();
-          use_stub = !field_type->IsClass();
-        }
-        DCHECK(!use_stub);
-#endif
-        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreField);
-        NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
-        return compiler.CompileStoreField(lookup);
-      }
-
-      // -------------- Constant properties --------------
-      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      UNREACHABLE();
+      DCHECK(lookup->is_dictionary_holder());
+      DCHECK(holder->IsJSGlobalObject());
+      TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobal);
+      DCHECK(holder.is_identical_to(receiver) ||
+             receiver->map()->prototype() == *holder);
+      auto cell = lookup->GetPropertyCell();
+      auto updated_type =
+          PropertyCell::UpdatedType(cell, value, lookup->property_details());
+      auto code = PropertyCellStoreHandler(isolate(), receiver,
+                                           Handle<JSGlobalObject>::cast(holder),
+                                           lookup->name(), cell, updated_type);
+      return code;
     }
 
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -2227,16 +2144,14 @@
     Handle<Map> monomorphic_map =
         ComputeTransitionedMap(receiver_map, store_mode);
     store_mode = GetNonTransitioningStoreMode(store_mode);
-    Handle<Code> handler =
-        PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(monomorphic_map,
-                                                                store_mode);
+    Handle<Object> handler = StoreElementHandler(monomorphic_map, store_mode);
     return ConfigureVectorState(Handle<Name>(), monomorphic_map, handler);
   }
 
   for (int i = 0; i < target_receiver_maps.length(); i++) {
     if (!target_receiver_maps.at(i).is_null() &&
         target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "JSValue");
+      TRACE_GENERIC_IC("JSValue");
       return;
     }
   }
@@ -2261,9 +2176,8 @@
       // if they at least come from the same origin for a transitioning store,
       // stay MONOMORPHIC and use the map for the most generic ElementsKind.
       store_mode = GetNonTransitioningStoreMode(store_mode);
-      Handle<Code> handler =
-          PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
-              transitioned_receiver_map, store_mode);
+      Handle<Object> handler =
+          StoreElementHandler(transitioned_receiver_map, store_mode);
       ConfigureVectorState(Handle<Name>(), transitioned_receiver_map, handler);
       return;
     }
@@ -2275,9 +2189,7 @@
       // A "normal" IC that handles stores can switch to a version that can
       // grow at the end of the array, handle OOB accesses or copy COW arrays
       // and still stay MONOMORPHIC.
-      Handle<Code> handler =
-          PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(receiver_map,
-                                                                  store_mode);
+      Handle<Object> handler = StoreElementHandler(receiver_map, store_mode);
       return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
     }
   }
@@ -2297,7 +2209,7 @@
   if (!map_added) {
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the megamorphic stub which can handle everything.
-    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice");
+    TRACE_GENERIC_IC("same map added twice");
     return;
   }
 
@@ -2312,7 +2224,7 @@
     if (store_mode == STANDARD_STORE) {
       store_mode = old_store_mode;
     } else if (store_mode != old_store_mode) {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch");
+      TRACE_GENERIC_IC("store mode mismatch");
       return;
     }
   }
@@ -2329,16 +2241,15 @@
     }
     if (external_arrays != 0 &&
         external_arrays != target_receiver_maps.length()) {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
-                       "unsupported combination of external and normal arrays");
+      TRACE_GENERIC_IC("unsupported combination of external and normal arrays");
       return;
     }
   }
 
   MapHandleList transitioned_maps(target_receiver_maps.length());
-  CodeHandleList handlers(target_receiver_maps.length());
-  PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
-      &target_receiver_maps, &transitioned_maps, &handlers, store_mode);
+  List<Handle<Object>> handlers(target_receiver_maps.length());
+  StoreElementPolymorphicHandlers(&target_receiver_maps, &transitioned_maps,
+                                  &handlers, store_mode);
   ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
 }
 
@@ -2372,6 +2283,91 @@
   return MaybeHandle<Map>().ToHandleChecked();
 }
 
+Handle<Object> KeyedStoreIC::StoreElementHandler(
+    Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
+  DCHECK(store_mode == STANDARD_STORE ||
+         store_mode == STORE_AND_GROW_NO_TRANSITION ||
+         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  Handle<Code> stub;
+  if (receiver_map->has_sloppy_arguments_elements()) {
+    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_KeyedStoreSloppyArgumentsStub);
+    stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
+  } else if (receiver_map->has_fast_elements() ||
+             receiver_map->has_fixed_typed_array_elements()) {
+    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
+    stub =
+        StoreFastElementStub(isolate(), is_jsarray, elements_kind, store_mode)
+            .GetCode();
+  } else {
+    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
+    DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
+    stub = StoreSlowElementStub(isolate(), store_mode).GetCode();
+  }
+  Handle<Object> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (validity_cell.is_null()) {
+    return stub;
+  }
+  return isolate()->factory()->NewTuple2(validity_cell, stub);
+}
+
+void KeyedStoreIC::StoreElementPolymorphicHandlers(
+    MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
+    List<Handle<Object>>* handlers, KeyedAccessStoreMode store_mode) {
+  DCHECK(store_mode == STANDARD_STORE ||
+         store_mode == STORE_AND_GROW_NO_TRANSITION ||
+         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    Handle<Map> receiver_map(receiver_maps->at(i));
+    Handle<Object> handler;
+    Handle<Map> transitioned_map;
+    {
+      Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
+      if (tmap != nullptr) transitioned_map = handle(tmap);
+    }
+
+    // TODO(mvstanton): The code below is doing pessimistic elements
+    // transitions. I would like to stop doing that and rely on Allocation Site
+    // Tracking to do a better job of ensuring the data types are what they need
+    // to be. Not all the elements are in place yet, pessimistic elements
+    // transitions are still important for performance.
+    if (!transitioned_map.is_null()) {
+      bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+      ElementsKind elements_kind = receiver_map->elements_kind();
+      TRACE_HANDLER_STATS(isolate(),
+                          KeyedStoreIC_ElementsTransitionAndStoreStub);
+      Handle<Code> stub =
+          ElementsTransitionAndStoreStub(isolate(), elements_kind,
+                                         transitioned_map->elements_kind(),
+                                         is_js_array, store_mode)
+              .GetCode();
+      Handle<Object> validity_cell =
+          Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+      if (validity_cell.is_null()) {
+        handler = stub;
+      } else {
+        handler = isolate()->factory()->NewTuple2(validity_cell, stub);
+      }
+
+    } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+      // TODO(mvstanton): Consider embedding store_mode in the state of the slow
+      // keyed store ic for uniformity.
+      TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
+      handler = isolate()->builtins()->KeyedStoreIC_Slow();
+    } else {
+      handler = StoreElementHandler(receiver_map, store_mode);
+    }
+    DCHECK(!handler.is_null());
+    handlers->Add(handler);
+    transitioned_maps->Add(transitioned_map);
+  }
+}
 
 bool IsOutOfBoundsAccess(Handle<JSObject> receiver, uint32_t index) {
   uint32_t length = 0;
@@ -2464,8 +2460,7 @@
         Object);
     if (!is_vector_set()) {
       ConfigureVectorState(MEGAMORPHIC, key);
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
-                       "unhandled internalized string key");
+      TRACE_GENERIC_IC("unhandled internalized string key");
       TRACE_IC("StoreIC", key);
     }
     return store_handle;
@@ -2479,23 +2474,20 @@
     // the runtime to enable optimization of element hole access.
     Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
     if (heap_object->map()->IsMapInArrayPrototypeChain()) {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "map in array prototype");
+      TRACE_GENERIC_IC("map in array prototype");
       use_ic = false;
     }
   }
 
   Handle<Map> old_receiver_map;
-  bool sloppy_arguments_elements = false;
+  bool is_arguments = false;
   bool key_is_valid_index = false;
   KeyedAccessStoreMode store_mode = STANDARD_STORE;
   if (use_ic && object->IsJSObject()) {
     Handle<JSObject> receiver = Handle<JSObject>::cast(object);
     old_receiver_map = handle(receiver->map(), isolate());
-    sloppy_arguments_elements =
-        !is_sloppy(language_mode()) &&
-        receiver->elements()->map() ==
-            isolate()->heap()->sloppy_arguments_elements_map();
-    if (!sloppy_arguments_elements) {
+    is_arguments = receiver->IsJSArgumentsObject();
+    if (!is_arguments) {
       key_is_valid_index = key->IsSmi() && Smi::cast(*key)->value() >= 0;
       if (key_is_valid_index) {
         uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
@@ -2512,8 +2504,8 @@
 
   if (use_ic) {
     if (!old_receiver_map.is_null()) {
-      if (sloppy_arguments_elements) {
-        TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
+      if (is_arguments) {
+        TRACE_GENERIC_IC("arguments receiver");
       } else if (key_is_valid_index) {
         // We should go generic if receiver isn't a dictionary, but our
         // prototype chain does have dictionary elements. This ensures that
@@ -2522,20 +2514,18 @@
         if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) {
           UpdateStoreElement(old_receiver_map, store_mode);
         } else {
-          TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
-                           "dictionary or proxy prototype");
+          TRACE_GENERIC_IC("dictionary or proxy prototype");
         }
       } else {
-        TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-smi-like key");
+        TRACE_GENERIC_IC("non-smi-like key");
       }
     } else {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-JSObject receiver");
+      TRACE_GENERIC_IC("non-JSObject receiver");
     }
   }
 
   if (!is_vector_set()) {
     ConfigureVectorState(MEGAMORPHIC, key);
-    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
   }
   TRACE_IC("StoreIC", key);
 
@@ -2543,48 +2533,6 @@
 }
 
 
-void CallIC::HandleMiss(Handle<Object> function) {
-  Handle<Object> name = isolate()->factory()->empty_string();
-  CallICNexus* nexus = casted_nexus<CallICNexus>();
-  Object* feedback = nexus->GetFeedback();
-
-  // Hand-coded MISS handling is easier if CallIC slots don't contain smis.
-  DCHECK(!feedback->IsSmi());
-
-  if (feedback->IsWeakCell() || !function->IsJSFunction() ||
-      feedback->IsAllocationSite()) {
-    // We are going generic.
-    nexus->ConfigureMegamorphic();
-  } else {
-    DCHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()));
-    Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
-
-    Handle<JSFunction> array_function =
-        Handle<JSFunction>(isolate()->native_context()->array_function());
-    if (array_function.is_identical_to(js_function)) {
-      // Alter the slot.
-      nexus->ConfigureMonomorphicArray();
-    } else if (js_function->context()->native_context() !=
-               *isolate()->native_context()) {
-      // Don't collect cross-native context feedback for the CallIC.
-      // TODO(bmeurer): We should collect the SharedFunctionInfo as
-      // feedback in this case instead.
-      nexus->ConfigureMegamorphic();
-    } else {
-      nexus->ConfigureMonomorphic(js_function);
-    }
-  }
-
-  if (function->IsJSFunction()) {
-    Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
-    name = handle(js_function->shared()->name(), isolate());
-  }
-
-  OnTypeFeedbackChanged(isolate(), get_host());
-  TRACE_IC("CallIC", name);
-}
-
-
 #undef TRACE_IC
 
 
@@ -2593,55 +2541,36 @@
 //
 
 // Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(3, args.length());
-  // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> function = args.at<Object>(0);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
-  Handle<Smi> slot = args.at<Smi>(2);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  CallICNexus nexus(vector, vector_slot);
-  CallIC ic(isolate, &nexus);
-  ic.HandleMiss(function);
-  return *function;
-}
-
-
-// Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
   HandleScope scope(isolate);
   DCHECK_EQ(4, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> receiver = args.at<Object>(0);
+  Handle<Object> receiver = args.at(0);
+  Handle<Name> key = args.at<Name>(1);
   Handle<Smi> slot = args.at<Smi>(2);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
   // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
   // LoadIC miss handler if the handler misses. Since the vector Nexus is
   // set up outside the IC, handle that here.
-  FeedbackVectorSlotKind kind = vector->GetKind(vector_slot);
-  if (kind == FeedbackVectorSlotKind::LOAD_IC) {
-    Handle<Name> key = args.at<Name>(1);
+  FeedbackSlotKind kind = vector->GetKind(vector_slot);
+  if (IsLoadICKind(kind)) {
     LoadICNexus nexus(vector, vector_slot);
-    LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    LoadIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
 
-  } else if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
-    Handle<Name> key(vector->GetName(vector_slot), isolate);
-    DCHECK_NE(*key, isolate->heap()->empty_string());
+  } else if (IsLoadGlobalICKind(kind)) {
     DCHECK_EQ(*isolate->global_object(), *receiver);
     LoadGlobalICNexus nexus(vector, vector_slot);
-    LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    LoadGlobalIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Load(key));
 
   } else {
-    Handle<Name> key = args.at<Name>(1);
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, kind);
+    DCHECK(IsKeyedLoadICKind(kind));
     KeyedLoadICNexus nexus(vector, vector_slot);
-    KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    KeyedLoadIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
   }
@@ -2650,19 +2579,16 @@
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
   HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
+  DCHECK_EQ(3, args.length());
   // Runtime functions don't follow the IC's calling convention.
   Handle<JSGlobalObject> global = isolate->global_object();
-  Handle<Smi> slot = args.at<Smi>(0);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
-            vector->GetKind(vector_slot));
-  Handle<String> name(vector->GetName(vector_slot), isolate);
-  DCHECK_NE(*name, isolate->heap()->empty_string());
+  Handle<String> name = args.at<String>(0);
+  Handle<Smi> slot = args.at<Smi>(1);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
 
   LoadGlobalICNexus nexus(vector, vector_slot);
-  LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+  LoadGlobalIC ic(isolate, &nexus);
   ic.UpdateState(global, name);
 
   Handle<Object> result;
@@ -2672,20 +2598,12 @@
 
 RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
   HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_SMI_ARG_CHECKED(slot, 0);
-  CONVERT_ARG_HANDLE_CHECKED(TypeFeedbackVector, vector, 1);
+  DCHECK_EQ(3, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
 
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot);
-  DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
-            vector->GetKind(vector_slot));
-  Handle<String> name(vector->GetName(vector_slot), isolate);
-  DCHECK_NE(*name, isolate->heap()->empty_string());
-
-  Handle<JSGlobalObject> global = isolate->global_object();
-
+  Handle<Context> native_context = isolate->native_context();
   Handle<ScriptContextTable> script_contexts(
-      global->native_context()->script_context_table());
+      native_context->script_context_table());
 
   ScriptContextTable::LookupResult lookup_result;
   if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
@@ -2700,17 +2618,20 @@
     return *result;
   }
 
+  Handle<JSGlobalObject> global(native_context->global_object(), isolate);
   Handle<Object> result;
   bool is_found = false;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
       Runtime::GetObjectProperty(isolate, global, name, &is_found));
   if (!is_found) {
-    LoadICNexus nexus(isolate);
-    LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    Handle<Smi> slot = args.at<Smi>(1);
+    Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+    FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+    FeedbackSlotKind kind = vector->GetKind(vector_slot);
     // It is actually a LoadGlobalICs here but the predicate handles this case
     // properly.
-    if (ic.ShouldThrowReferenceError()) {
+    if (LoadIC::ShouldThrowReferenceError(kind)) {
       THROW_NEW_ERROR_RETURN_FAILURE(
           isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
     }
@@ -2723,75 +2644,56 @@
   HandleScope scope(isolate);
   DCHECK_EQ(4, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
+  Handle<Object> receiver = args.at(0);
+  Handle<Object> key = args.at(1);
   Handle<Smi> slot = args.at<Smi>(2);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
   KeyedLoadICNexus nexus(vector, vector_slot);
-  KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+  KeyedLoadIC ic(isolate, &nexus);
   ic.UpdateState(receiver, key);
   RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
 }
 
-
-RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
-  HandleScope scope(isolate);
-  typedef LoadWithVectorDescriptor Descriptor;
-  DCHECK_EQ(Descriptor::kParameterCount, args.length());
-  Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
-  Handle<Object> key = args.at<Object>(Descriptor::kName);
-  Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
-  Handle<TypeFeedbackVector> vector =
-      args.at<TypeFeedbackVector>(Descriptor::kVector);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  KeyedLoadICNexus nexus(vector, vector_slot);
-  KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
-  ic.UpdateState(receiver, key);
-  RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
-}
-
-
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> value = args.at(0);
   Handle<Smi> slot = args.at<Smi>(1);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
-  Handle<Object> receiver = args.at<Object>(3);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  Handle<Object> receiver = args.at(3);
   Handle<Name> key = args.at<Name>(4);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  FeedbackSlotKind kind = vector->GetKind(vector_slot);
+  if (IsStoreICKind(kind) || IsStoreOwnICKind(kind)) {
     StoreICNexus nexus(vector, vector_slot);
-    StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    StoreIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
   } else {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
-              vector->GetKind(vector_slot));
+    DCHECK(IsKeyedStoreICKind(kind));
     KeyedStoreICNexus nexus(vector, vector_slot);
-    KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    KeyedStoreIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
   }
 }
 
-
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> value = args.at(0);
   Handle<Smi> slot = args.at<Smi>(1);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
-  Handle<Object> receiver = args.at<Object>(3);
-  Handle<Object> key = args.at<Object>(4);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  Handle<Object> receiver = args.at(3);
+  Handle<Object> key = args.at(4);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
   KeyedStoreICNexus nexus(vector, vector_slot);
-  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+  KeyedStoreIC ic(isolate, &nexus);
   ic.UpdateState(receiver, key);
   RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
 }
@@ -2801,14 +2703,13 @@
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> value = args.at<Object>(0);
-  // slot and vector parameters are not used.
-  Handle<Object> object = args.at<Object>(3);
-  Handle<Object> key = args.at<Object>(4);
-  LanguageMode language_mode;
-  KeyedStoreICNexus nexus(isolate);
-  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
-  language_mode = ic.language_mode();
+  Handle<Object> value = args.at(0);
+  Handle<Smi> slot = args.at<Smi>(1);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  Handle<Object> object = args.at(3);
+  Handle<Object> key = args.at(4);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
   RETURN_RESULT_OR_FAILURE(
       isolate,
       Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
@@ -2817,15 +2718,16 @@
 
 RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
   HandleScope scope(isolate);
+  DCHECK_EQ(6, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> object = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  Handle<Object> value = args.at<Object>(2);
+  Handle<Object> object = args.at(0);
+  Handle<Object> key = args.at(1);
+  Handle<Object> value = args.at(2);
   Handle<Map> map = args.at<Map>(3);
-  LanguageMode language_mode;
-  KeyedStoreICNexus nexus(isolate);
-  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
-  language_mode = ic.language_mode();
+  Handle<Smi> slot = args.at<Smi>(4);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(5);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
   if (object->IsJSObject()) {
     JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
                                      map->elements_kind());
@@ -2931,15 +2833,25 @@
   }
   set_target(*new_target);
 
-  if (FLAG_trace_ic) {
-    OFStream os(stdout);
-    os << "[BinaryOpIC" << old_state << " => " << state << " @ "
-       << static_cast<void*>(*new_target) << " <- ";
-    JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
-    if (!allocation_site.is_null()) {
-      os << " using allocation site " << static_cast<void*>(*allocation_site);
-    }
-    os << "]" << std::endl;
+  if (FLAG_ic_stats &
+      v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+    auto ic_stats = ICStats::instance();
+    ic_stats->Begin();
+    ICInfo& ic_info = ic_stats->Current();
+    ic_info.type = "BinaryOpIC";
+    ic_info.state = old_state.ToString();
+    ic_info.state += " => ";
+    ic_info.state += state.ToString();
+    JavaScriptFrame::CollectTopFrameForICStats(isolate());
+    ic_stats->End();
+  } else if (FLAG_ic_stats) {
+    int line;
+    int column;
+    Address pc = GetAbstractPC(&line, &column);
+    LOG(isolate(),
+        BinaryOpIC(pc, line, column, *new_target, old_state.ToString().c_str(),
+                   state.ToString().c_str(),
+                   allocation_site.is_null() ? nullptr : *allocation_site));
   }
 
   // Patch the inlined smi code as necessary.
@@ -2957,8 +2869,8 @@
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
   typedef BinaryOpDescriptor Descriptor;
-  Handle<Object> left = args.at<Object>(Descriptor::kLeft);
-  Handle<Object> right = args.at<Object>(Descriptor::kRight);
+  Handle<Object> left = args.at(Descriptor::kLeft);
+  Handle<Object> right = args.at(Descriptor::kRight);
   BinaryOpIC ic(isolate);
   RETURN_RESULT_OR_FAILURE(
       isolate, ic.Transition(Handle<AllocationSite>::null(), left, right));
@@ -2971,8 +2883,8 @@
   typedef BinaryOpWithAllocationSiteDescriptor Descriptor;
   Handle<AllocationSite> allocation_site =
       args.at<AllocationSite>(Descriptor::kAllocationSite);
-  Handle<Object> left = args.at<Object>(Descriptor::kLeft);
-  Handle<Object> right = args.at<Object>(Descriptor::kRight);
+  Handle<Object> left = args.at(Descriptor::kLeft);
+  Handle<Object> right = args.at(Descriptor::kRight);
   BinaryOpIC ic(isolate);
   RETURN_RESULT_OR_FAILURE(isolate,
                            ic.Transition(allocation_site, left, right));
@@ -3005,17 +2917,40 @@
   Handle<Code> new_target = stub.GetCode();
   set_target(*new_target);
 
-  if (FLAG_trace_ic) {
-    PrintF("[CompareIC in ");
-    JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
-    PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
-           CompareICState::GetStateName(old_stub.left()),
-           CompareICState::GetStateName(old_stub.right()),
-           CompareICState::GetStateName(old_stub.state()),
-           CompareICState::GetStateName(new_left),
-           CompareICState::GetStateName(new_right),
-           CompareICState::GetStateName(state), Token::Name(op_),
-           static_cast<void*>(*stub.GetCode()));
+  if (FLAG_ic_stats &
+      v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+    auto ic_stats = ICStats::instance();
+    ic_stats->Begin();
+    ICInfo& ic_info = ic_stats->Current();
+    ic_info.type = "CompareIC";
+    JavaScriptFrame::CollectTopFrameForICStats(isolate());
+    ic_info.state = "((";
+    ic_info.state += CompareICState::GetStateName(old_stub.left());
+    ic_info.state += "+";
+    ic_info.state += CompareICState::GetStateName(old_stub.right());
+    ic_info.state += "=";
+    ic_info.state += CompareICState::GetStateName(old_stub.state());
+    ic_info.state += ")->(";
+    ic_info.state += CompareICState::GetStateName(new_left);
+    ic_info.state += "+";
+    ic_info.state += CompareICState::GetStateName(new_right);
+    ic_info.state += "=";
+    ic_info.state += CompareICState::GetStateName(state);
+    ic_info.state += "))#";
+    ic_info.state += Token::Name(op_);
+    ic_stats->End();
+  } else if (FLAG_ic_stats) {
+    int line;
+    int column;
+    Address pc = GetAbstractPC(&line, &column);
+    LOG(isolate(),
+        CompareIC(pc, line, column, *stub.GetCode(), Token::Name(op_),
+                  CompareICState::GetStateName(old_stub.left()),
+                  CompareICState::GetStateName(old_stub.right()),
+                  CompareICState::GetStateName(old_stub.state()),
+                  CompareICState::GetStateName(new_left),
+                  CompareICState::GetStateName(new_right),
+                  CompareICState::GetStateName(state)));
   }
 
   // Activate inlined smi code.
@@ -3032,7 +2967,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
   CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
-  return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
+  return ic.UpdateCaches(args.at(0), args.at(1));
 }
 
 
@@ -3045,9 +2980,36 @@
 
 Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
   ToBooleanICStub stub(isolate(), extra_ic_state());
+  ToBooleanHints old_hints = stub.hints();
   bool to_boolean_value = stub.UpdateStatus(object);
+  ToBooleanHints new_hints = stub.hints();
   Handle<Code> code = stub.GetCode();
   set_target(*code);
+
+  // Note: Although a no-op transition is semantically OK, it is hinting at a
+  // bug somewhere in our state transition machinery.
+  DCHECK_NE(old_hints, new_hints);
+  if (V8_UNLIKELY(FLAG_ic_stats)) {
+    if (FLAG_ic_stats &
+        v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+      auto ic_stats = ICStats::instance();
+      ic_stats->Begin();
+      ICInfo& ic_info = ic_stats->Current();
+      ic_info.type = "ToBooleanIC";
+      ic_info.state = ToString(old_hints);
+      ic_info.state += "=>";
+      ic_info.state += ToString(new_hints);
+      ic_stats->End();
+    } else {
+      int line;
+      int column;
+      Address pc = GetAbstractPC(&line, &column);
+      LOG(isolate(),
+          ToBooleanIC(pc, line, column, *code, ToString(old_hints).c_str(),
+                      ToString(new_hints).c_str()));
+    }
+  }
+
   return isolate()->factory()->ToBoolean(to_boolean_value);
 }
 
@@ -3055,7 +3017,7 @@
 RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
   DCHECK(args.length() == 1);
   HandleScope scope(isolate);
-  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> object = args.at(0);
   ToBooleanIC ic(isolate);
   return *ic.ToBoolean(object);
 }
@@ -3066,7 +3028,7 @@
   Handle<JSObject> holder = args.at<JSObject>(1);
   Handle<HeapObject> callback_or_cell = args.at<HeapObject>(2);
   Handle<Name> name = args.at<Name>(3);
-  Handle<Object> value = args.at<Object>(4);
+  Handle<Object> value = args.at(4);
   CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
   HandleScope scope(isolate);
 
@@ -3110,7 +3072,7 @@
   Handle<Name> name =
       args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
   Handle<Object> receiver =
-      args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+      args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
   Handle<JSObject> holder =
       args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
   HandleScope scope(isolate);
@@ -3142,11 +3104,11 @@
  */
 RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
+  DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength + 2);
   Handle<Name> name =
       args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
   Handle<Object> receiver =
-      args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+      args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
   Handle<JSObject> holder =
       args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
 
@@ -3181,26 +3143,33 @@
 
   if (it.IsFound()) return *result;
 
-#ifdef DEBUG
-  LoadICNexus nexus(isolate);
-  LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
-  // It could actually be any kind of LoadICs here but the predicate handles
-  // all the cases properly.
-  DCHECK(!ic.ShouldThrowReferenceError());
-#endif
+  Handle<Smi> slot = args.at<Smi>(3);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(4);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
+  // It could actually be any kind of load IC slot here but the predicate
+  // handles all the cases properly.
+  if (!LoadIC::ShouldThrowReferenceError(slot_kind)) {
+    return isolate->heap()->undefined_value();
+  }
 
-  return isolate->heap()->undefined_value();
+  // Throw a reference error.
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewReferenceError(MessageTemplate::kNotDefined, it.name()));
 }
 
 
 RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  StoreICNexus nexus(isolate);
-  StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
-  Handle<JSObject> receiver = args.at<JSObject>(0);
-  Handle<Name> name = args.at<Name>(1);
-  Handle<Object> value = args.at<Object>(2);
+  DCHECK_EQ(5, args.length());
+  // Runtime functions don't follow the IC's calling convention.
+  Handle<Object> value = args.at(0);
+  Handle<Smi> slot = args.at<Smi>(1);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  Handle<JSObject> receiver = args.at<JSObject>(3);
+  Handle<Name> name = args.at<Name>(4);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
 
   DCHECK(receiver->HasNamedInterceptor());
   InterceptorInfo* interceptor = receiver->GetNamedInterceptor();
@@ -3225,7 +3194,7 @@
   DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
   it.Next();
 
-  MAYBE_RETURN(Object::SetProperty(&it, value, ic.language_mode(),
+  MAYBE_RETURN(Object::SetProperty(&it, value, language_mode,
                                    JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED),
                isolate->heap()->exception());
   return *value;
diff --git a/src/ic/ic.h b/src/ic/ic.h
index 9e69cc8..c9818f5 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -5,6 +5,8 @@
 #ifndef V8_IC_H_
 #define V8_IC_H_
 
+#include "src/factory.h"
+#include "src/feedback-vector.h"
 #include "src/ic/ic-state.h"
 #include "src/macro-assembler.h"
 #include "src/messages.h"
@@ -45,16 +47,12 @@
   // Clear the inline cache to initial state.
   static void Clear(Isolate* isolate, Address address, Address constant_pool);
 
-#ifdef DEBUG
-  bool IsLoadStub() const {
-    return kind_ == Code::LOAD_IC || kind_ == Code::LOAD_GLOBAL_IC ||
-           kind_ == Code::KEYED_LOAD_IC;
+  bool IsAnyLoad() const {
+    return IsLoadIC() || IsLoadGlobalIC() || IsKeyedLoadIC();
   }
-  bool IsStoreStub() const {
-    return kind_ == Code::STORE_IC || kind_ == Code::KEYED_STORE_IC;
+  bool IsAnyStore() const {
+    return IsStoreIC() || IsStoreOwnIC() || IsKeyedStoreIC();
   }
-  bool IsCallStub() const { return kind_ == Code::CALL_IC; }
-#endif
 
   static inline Handle<Map> GetHandlerCacheHolder(Handle<Map> receiver_map,
                                                   bool receiver_is_holder,
@@ -64,15 +62,15 @@
                                              Isolate* isolate,
                                              CacheHolderFlag* flag);
 
-  static bool IsCleared(FeedbackNexus* nexus) {
-    InlineCacheState state = nexus->StateFromFeedback();
-    return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
-  }
-
   static bool ICUseVector(Code::Kind kind) {
     return kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
-           kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC ||
-           kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC;
+           kind == Code::KEYED_LOAD_IC || kind == Code::STORE_IC ||
+           kind == Code::KEYED_STORE_IC;
+  }
+  static bool ICUseVector(FeedbackSlotKind kind) {
+    return IsLoadICKind(kind) || IsLoadGlobalICKind(kind) ||
+           IsKeyedLoadICKind(kind) || IsStoreICKind(kind) ||
+           IsStoreOwnICKind(kind) || IsKeyedStoreICKind(kind);
   }
 
   // The ICs that don't pass slot and vector through the stack have to
@@ -83,15 +81,20 @@
 
   static inline bool IsHandler(Object* object);
 
+  // Nofity the IC system that a feedback has changed.
+  static void OnFeedbackChanged(Isolate* isolate, JSFunction* host_function);
+
  protected:
   Address fp() const { return fp_; }
   Address pc() const { return *pc_address_; }
+
+  void set_slow_stub_reason(const char* reason) { slow_stub_reason_ = reason; }
+
+  Address GetAbstractPC(int* line, int* column) const;
   Isolate* isolate() const { return isolate_; }
 
-  // Get the shared function info of the caller.
-  SharedFunctionInfo* GetSharedFunctionInfo() const;
-  // Get the code object of the caller.
-  Code* GetCode() const;
+  // Get the caller function object.
+  JSFunction* GetHostFunction() const;
 
   inline bool AddressIsDeoptimizedCode() const;
   inline static bool AddressIsDeoptimizedCode(Isolate* isolate,
@@ -120,7 +123,7 @@
   // keyed stores).
   void ConfigureVectorState(MapHandleList* maps,
                             MapHandleList* transitioned_maps,
-                            CodeHandleList* handlers);
+                            List<Handle<Object>>* handlers);
 
   char TransitionMarkFromState(IC::State state);
   void TraceIC(const char* type, Handle<Object> name);
@@ -136,8 +139,6 @@
                                          Address constant_pool);
   static inline void SetTargetAtAddress(Address address, Code* target,
                                         Address constant_pool);
-  // As a vector-based IC, type feedback must be updated differently.
-  static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
   static void PostPatching(Address address, Code* target, Code* old_target);
 
   void TraceHandlerCacheHitStats(LookupIterator* lookup);
@@ -165,15 +166,18 @@
   void CopyICToMegamorphicCache(Handle<Name> name);
   bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
   void PatchCache(Handle<Name> name, Handle<Object> code);
-  Code::Kind kind() const { return kind_; }
-  bool is_keyed() const {
-    return kind_ == Code::KEYED_LOAD_IC || kind_ == Code::KEYED_STORE_IC;
-  }
+  FeedbackSlotKind kind() const { return kind_; }
+  bool IsLoadIC() const { return IsLoadICKind(kind_); }
+  bool IsLoadGlobalIC() const { return IsLoadGlobalICKind(kind_); }
+  bool IsKeyedLoadIC() const { return IsKeyedLoadICKind(kind_); }
+  bool IsStoreIC() const { return IsStoreICKind(kind_); }
+  bool IsStoreOwnIC() const { return IsStoreOwnICKind(kind_); }
+  bool IsKeyedStoreIC() const { return IsKeyedStoreICKind(kind_); }
+  bool is_keyed() const { return IsKeyedLoadIC() || IsKeyedStoreIC(); }
   Code::Kind handler_kind() const {
-    if (kind_ == Code::KEYED_LOAD_IC) return Code::LOAD_IC;
-    DCHECK(kind_ == Code::LOAD_IC || kind_ == Code::STORE_IC ||
-           kind_ == Code::KEYED_STORE_IC);
-    return kind_;
+    if (IsAnyLoad()) return Code::LOAD_IC;
+    DCHECK(IsAnyStore());
+    return Code::STORE_IC;
   }
   bool ShouldRecomputeHandler(Handle<String> name);
 
@@ -200,8 +204,8 @@
     return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
   }
 
-  Handle<TypeFeedbackVector> vector() const { return nexus()->vector_handle(); }
-  FeedbackVectorSlot slot() const { return nexus()->slot(); }
+  Handle<FeedbackVector> vector() const { return nexus()->vector_handle(); }
+  FeedbackSlot slot() const { return nexus()->slot(); }
   State saved_state() const {
     return state() == RECOMPUTE_HANDLER ? old_state_ : state();
   }
@@ -212,7 +216,6 @@
   }
   FeedbackNexus* nexus() const { return nexus_; }
 
-  inline Code* get_host();
   inline Code* target() const;
 
  private:
@@ -244,7 +247,7 @@
   bool vector_set_;
   State old_state_;  // For saving if we marked as prototype failure.
   State state_;
-  Code::Kind kind_;
+  FeedbackSlotKind kind_;
   Handle<Map> receiver_map_;
   MaybeHandle<Object> maybe_handler_;
 
@@ -252,6 +255,8 @@
   MapHandleList target_maps_;
   bool target_maps_set_;
 
+  const char* slow_stub_reason_;
+
   FeedbackNexus* nexus_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
@@ -264,38 +269,28 @@
       : IC(EXTRA_CALL_FRAME, isolate, nexus) {
     DCHECK(nexus != NULL);
   }
-
-  void HandleMiss(Handle<Object> function);
-
-  static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
 };
 
 
 class LoadIC : public IC {
  public:
-  LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
-      : IC(depth, isolate, nexus) {
+  LoadIC(Isolate* isolate, FeedbackNexus* nexus)
+      : IC(NO_EXTRA_FRAME, isolate, nexus) {
     DCHECK(nexus != NULL);
-    DCHECK(IsLoadStub());
+    DCHECK(IsAnyLoad());
+  }
+
+  static bool ShouldThrowReferenceError(FeedbackSlotKind kind) {
+    return kind == FeedbackSlotKind::kLoadGlobalNotInsideTypeof;
   }
 
   bool ShouldThrowReferenceError() const {
-    return kind() == Code::LOAD_GLOBAL_IC &&
-           LoadGlobalICState::GetTypeofMode(extra_ic_state()) ==
-               NOT_INSIDE_TYPEOF;
+    return ShouldThrowReferenceError(kind());
   }
 
-  // Code generator routines.
-
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-  static void GenerateNormal(MacroAssembler* masm);
-
   MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
                                            Handle<Name> name);
 
-  static void Clear(Isolate* isolate, Code* host, LoadICNexus* nexus);
-
  protected:
   virtual Handle<Code> slow_stub() const {
     return isolate()->builtins()->LoadIC_Slow();
@@ -312,7 +307,7 @@
 
  private:
   // Creates a data handler that represents a load of a field by given index.
-  Handle<Object> SimpleFieldLoad(FieldIndex index);
+  static Handle<Object> SimpleFieldLoad(Isolate* isolate, FieldIndex index);
 
   // Creates a data handler that represents a prototype chain check followed
   // by given Smi-handler that encoded a load from the holder.
@@ -325,17 +320,16 @@
   Handle<Object> LoadNonExistent(Handle<Map> receiver_map, Handle<Name> name);
 
   friend class IC;
+  friend class NamedLoadHandlerCompiler;
 };
 
 class LoadGlobalIC : public LoadIC {
  public:
-  LoadGlobalIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
-      : LoadIC(depth, isolate, nexus) {}
+  LoadGlobalIC(Isolate* isolate, FeedbackNexus* nexus)
+      : LoadIC(isolate, nexus) {}
 
   MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Name> name);
 
-  static void Clear(Isolate* isolate, Code* host, LoadGlobalICNexus* nexus);
-
  protected:
   Handle<Code> slow_stub() const override {
     return isolate()->builtins()->LoadGlobalIC_Slow();
@@ -344,21 +338,14 @@
 
 class KeyedLoadIC : public LoadIC {
  public:
-  KeyedLoadIC(FrameDepth depth, Isolate* isolate,
-              KeyedLoadICNexus* nexus = NULL)
-      : LoadIC(depth, isolate, nexus) {
+  KeyedLoadIC(Isolate* isolate, KeyedLoadICNexus* nexus)
+      : LoadIC(isolate, nexus) {
     DCHECK(nexus != NULL);
   }
 
   MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
                                            Handle<Object> key);
 
-  // Code generator routines.
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-
-  static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
-
  protected:
   // receiver is HeapObject because it could be a String or a JSObject
   void UpdateLoadElement(Handle<HeapObject> receiver);
@@ -370,20 +357,15 @@
 
 class StoreIC : public IC {
  public:
-  StoreIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
-      : IC(depth, isolate, nexus) {
-    DCHECK(IsStoreStub());
+  StoreIC(Isolate* isolate, FeedbackNexus* nexus)
+      : IC(NO_EXTRA_FRAME, isolate, nexus) {
+    DCHECK(IsAnyStore());
   }
 
   LanguageMode language_mode() const {
-    return StoreICState::GetLanguageMode(extra_ic_state());
+    return nexus()->vector()->GetLanguageMode(nexus()->slot());
   }
 
-  // Code generators for stub routines. Only called once at startup.
-  static void GenerateSlow(MacroAssembler* masm);
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateNormal(MacroAssembler* masm);
-
   MUST_USE_RESULT MaybeHandle<Object> Store(
       Handle<Object> object, Handle<Name> name, Handle<Object> value,
       JSReceiver::StoreFromKeyed store_mode =
@@ -392,20 +374,11 @@
   bool LookupForWrite(LookupIterator* it, Handle<Object> value,
                       JSReceiver::StoreFromKeyed store_mode);
 
-  static void Clear(Isolate* isolate, Code* host, StoreICNexus* nexus);
-
  protected:
   // Stub accessors.
   Handle<Code> slow_stub() const {
-    switch (language_mode()) {
-      case SLOPPY:
-        return isolate()->builtins()->StoreIC_SlowSloppy();
-      case STRICT:
-        return isolate()->builtins()->StoreIC_SlowStrict();
-      default:
-        UNREACHABLE();
-        return Handle<Code>();
-    }
+    // StoreIC and KeyedStoreIC share the same slow stub.
+    return isolate()->builtins()->KeyedStoreIC_Slow();
   }
 
   // Update the inline cache and the global stub cache based on the
@@ -437,25 +410,13 @@
     return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
   }
 
-  KeyedStoreIC(FrameDepth depth, Isolate* isolate,
-               KeyedStoreICNexus* nexus = NULL)
-      : StoreIC(depth, isolate, nexus) {}
+  KeyedStoreIC(Isolate* isolate, KeyedStoreICNexus* nexus)
+      : StoreIC(isolate, nexus) {}
 
   MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
                                             Handle<Object> name,
                                             Handle<Object> value);
 
-  // Code generators for stub routines.  Only called once at startup.
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateSlow(MacroAssembler* masm);
-  static void GenerateMegamorphic(MacroAssembler* masm,
-                                  LanguageMode language_mode);
-
-  static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
-                                            ExtraICState extra_state);
-
-  static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
-
  protected:
   void UpdateStoreElement(Handle<Map> receiver_map,
                           KeyedAccessStoreMode store_mode);
@@ -464,6 +425,14 @@
   Handle<Map> ComputeTransitionedMap(Handle<Map> map,
                                      KeyedAccessStoreMode store_mode);
 
+  Handle<Object> StoreElementHandler(Handle<Map> receiver_map,
+                                     KeyedAccessStoreMode store_mode);
+
+  void StoreElementPolymorphicHandlers(MapHandleList* receiver_maps,
+                                       MapHandleList* transitioned_maps,
+                                       List<Handle<Object>>* handlers,
+                                       KeyedAccessStoreMode store_mode);
+
   friend class IC;
 };
 
diff --git a/src/ic/keyed-store-generic.cc b/src/ic/keyed-store-generic.cc
index 30faba8..8962386 100644
--- a/src/ic/keyed-store-generic.cc
+++ b/src/ic/keyed-store-generic.cc
@@ -4,19 +4,25 @@
 
 #include "src/ic/keyed-store-generic.h"
 
-#include "src/compiler/code-assembler.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
 #include "src/contexts.h"
+#include "src/ic/accessor-assembler.h"
+#include "src/interface-descriptors.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
 using compiler::Node;
 
-class KeyedStoreGenericAssembler : public CodeStubAssembler {
+class KeyedStoreGenericAssembler : public AccessorAssembler {
  public:
-  void KeyedStoreGeneric(const StoreICParameters* p,
-                         LanguageMode language_mode);
+  explicit KeyedStoreGenericAssembler(compiler::CodeAssemblerState* state)
+      : AccessorAssembler(state) {}
+
+  void KeyedStoreGeneric(LanguageMode language_mode);
 
  private:
   enum UpdateLength {
@@ -30,7 +36,8 @@
                                Node* value, Node* context, Label* slow);
 
   void EmitGenericPropertyStore(Node* receiver, Node* receiver_map,
-                                const StoreICParameters* p, Label* slow);
+                                const StoreICParameters* p, Label* slow,
+                                LanguageMode language_mode);
 
   void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
                                              Label* non_fast_elements,
@@ -60,16 +67,25 @@
                                 ElementsKind packed_kind,
                                 ElementsKind packed_kind_2, Label* bailout);
 
-  // Do not add fields, so that this is safe to reinterpret_cast to CSA.
+  void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
+  void LookupPropertyOnPrototypeChain(Node* receiver_map, Node* name,
+                                      Label* accessor,
+                                      Variable* var_accessor_pair,
+                                      Variable* var_accessor_holder,
+                                      Label* readonly, Label* bailout);
+
+  void CheckFieldType(Node* descriptors, Node* name_index, Node* representation,
+                      Node* value, Label* bailout);
+  void OverwriteExistingFastProperty(Node* object, Node* object_map,
+                                     Node* properties, Node* descriptors,
+                                     Node* descriptor_name_index, Node* details,
+                                     Node* value, Label* slow);
 };
 
-void KeyedStoreGenericGenerator::Generate(
-    CodeStubAssembler* assembler, const CodeStubAssembler::StoreICParameters* p,
-    LanguageMode language_mode) {
-  STATIC_ASSERT(sizeof(CodeStubAssembler) ==
-                sizeof(KeyedStoreGenericAssembler));
-  auto assm = reinterpret_cast<KeyedStoreGenericAssembler*>(assembler);
-  assm->KeyedStoreGeneric(p, language_mode);
+void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state,
+                                          LanguageMode language_mode) {
+  KeyedStoreGenericAssembler assembler(state);
+  assembler.KeyedStoreGeneric(language_mode);
 }
 
 void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
@@ -94,9 +110,7 @@
            non_fast_elements);
     Node* elements_kind = LoadMapElementsKind(prototype_map);
     STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
-    GotoIf(Int32LessThanOrEqual(elements_kind,
-                                Int32Constant(LAST_FAST_ELEMENTS_KIND)),
-           &loop_body);
+    GotoIf(IsFastElementsKind(elements_kind), &loop_body);
     GotoIf(Word32Equal(elements_kind, Int32Constant(NO_ELEMENTS)), &loop_body);
     Goto(non_fast_elements);
   }
@@ -112,7 +126,7 @@
     TrapAllocationMemento(receiver, bailout);
   }
   Label perform_transition(this), check_holey_map(this);
-  Variable var_target_map(this, MachineType::PointerRepresentation());
+  Variable var_target_map(this, MachineRepresentation::kTagged);
   // Check if the receiver has the default |from_kind| map.
   {
     Node* packed_map =
@@ -143,7 +157,7 @@
       GrowElementsCapacity(receiver, elements, from_kind, to_kind, capacity,
                            capacity, INTPTR_PARAMETERS, bailout);
     }
-    StoreObjectField(receiver, JSObject::kMapOffset, var_target_map.value());
+    StoreMap(receiver, var_target_map.value());
   }
 }
 
@@ -160,7 +174,7 @@
   }
   Node* holey_map =
       LoadContextElement(native_context, Context::ArrayMapIndex(holey_kind));
-  StoreObjectField(receiver, JSObject::kMapOffset, holey_map);
+  StoreMap(receiver, holey_map);
   Goto(done);
 }
 
@@ -219,6 +233,15 @@
   if (update_length != kDontChangeLength) {
     CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(receiver_map),
                                  Int32Constant(JS_ARRAY_TYPE)));
+    // Check if the length property is writable. The fast check is only
+    // supported for fast properties.
+    GotoIf(IsDictionaryMap(receiver_map), slow);
+    // The length property is non-configurable, so it's guaranteed to always
+    // be the first property.
+    Node* descriptors = LoadMapDescriptors(receiver_map);
+    Node* details =
+        LoadFixedArrayElement(descriptors, DescriptorArray::ToDetailsIndex(0));
+    GotoIf(IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask), slow);
   }
   STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
   const int kHeaderSize = FixedArray::kHeaderSize - kHeapObjectTag;
@@ -251,7 +274,7 @@
     // can always be stored.
     {
       Label non_smi_value(this);
-      GotoUnless(TaggedIsSmi(value), &non_smi_value);
+      GotoIfNot(TaggedIsSmi(value), &non_smi_value);
       // If we're about to introduce holes, ensure holey elements.
       if (update_length == kBumpLengthWithGap) {
         TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
@@ -276,7 +299,7 @@
         TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
                             FAST_ELEMENTS, slow);
       }
-      Store(MachineRepresentation::kTagged, elements, offset, value);
+      Store(elements, offset, value);
       MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
 
       Bind(&must_transition);
@@ -320,7 +343,7 @@
                            FAST_SMI_ELEMENTS, target_kind, slow);
         // The elements backing store didn't change, no reload necessary.
         CSA_ASSERT(this, WordEqual(elements, LoadElements(receiver)));
-        Store(MachineRepresentation::kTagged, elements, offset, value);
+        Store(elements, offset, value);
         MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
                                    update_length);
       }
@@ -356,8 +379,8 @@
     // Try to store the value as a double.
     {
       Label non_number_value(this);
-      Node* double_value = PrepareValueForWrite(value, Representation::Double(),
-                                                &non_number_value);
+      Node* double_value = TryTaggedToFloat64(value, &non_number_value);
+
       // Make sure we do not store signalling NaNs into double arrays.
       double_value = Float64SilenceNaN(double_value);
       // If we're about to introduce holes, ensure holey elements.
@@ -384,7 +407,7 @@
       Node* fast_elements = LoadElements(receiver);
       Node* fast_offset = ElementOffsetFromIndex(
           intptr_index, FAST_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
-      Store(MachineRepresentation::kTagged, fast_elements, fast_offset, value);
+      Store(fast_elements, fast_offset, value);
       MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
     }
   }
@@ -399,14 +422,13 @@
 void KeyedStoreGenericAssembler::EmitGenericElementStore(
     Node* receiver, Node* receiver_map, Node* instance_type, Node* intptr_index,
     Node* value, Node* context, Label* slow) {
-  Label if_in_bounds(this), if_increment_length_by_one(this),
+  Label if_fast(this), if_in_bounds(this), if_increment_length_by_one(this),
       if_bump_length_with_gap(this), if_grow(this), if_nonfast(this),
       if_typed_array(this), if_dictionary(this);
   Node* elements = LoadElements(receiver);
   Node* elements_kind = LoadMapElementsKind(receiver_map);
-  GotoIf(
-      Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
-      &if_nonfast);
+  Branch(IsFastElementsKind(elements_kind), &if_fast, &if_nonfast);
+  Bind(&if_fast);
 
   Label if_array(this);
   GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)), &if_array);
@@ -447,6 +469,8 @@
 
   // Out-of-capacity accesses (index >= capacity) jump here. Additionally,
   // an ElementsKind transition might be necessary.
+  // The index can also be negative at this point! Jump to the runtime in that
+  // case to convert it to a named property.
   Bind(&if_grow);
   {
     Comment("Grow backing store");
@@ -482,37 +506,419 @@
   }
 }
 
-void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
-    Node* receiver, Node* receiver_map, const StoreICParameters* p,
-    Label* slow) {
-  Comment("stub cache probe");
-  // TODO(jkummerow): Don't rely on the stub cache as much.
-  // - existing properties can be overwritten inline (unless readonly).
-  // - for dictionary mode receivers, we can even add properties inline
-  //   (unless the prototype chain prevents it).
-  Variable var_handler(this, MachineRepresentation::kTagged);
-  Label found_handler(this, &var_handler), stub_cache_miss(this);
-  TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
-                    &found_handler, &var_handler, &stub_cache_miss);
-  Bind(&found_handler);
+void KeyedStoreGenericAssembler::JumpIfDataProperty(Node* details,
+                                                    Label* writable,
+                                                    Label* readonly) {
+  // Accessor properties never have the READ_ONLY attribute set.
+  GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
+         readonly);
+  Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
+  GotoIf(Word32Equal(kind, Int32Constant(kData)), writable);
+  // Fall through if it's an accessor property.
+}
+
+void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
+    Node* receiver_map, Node* name, Label* accessor,
+    Variable* var_accessor_pair, Variable* var_accessor_holder, Label* readonly,
+    Label* bailout) {
+  Label ok_to_write(this);
+  Variable var_holder(this, MachineRepresentation::kTagged);
+  var_holder.Bind(LoadMapPrototype(receiver_map));
+  Variable var_holder_map(this, MachineRepresentation::kTagged);
+  var_holder_map.Bind(LoadMap(var_holder.value()));
+
+  Variable* merged_variables[] = {&var_holder, &var_holder_map};
+  Label loop(this, arraysize(merged_variables), merged_variables);
+  Goto(&loop);
+  Bind(&loop);
   {
-    Comment("KeyedStoreGeneric found handler");
-    HandleStoreICHandlerCase(p, var_handler.value(), slow);
+    Node* holder = var_holder.value();
+    Node* holder_map = var_holder_map.value();
+    Node* instance_type = LoadMapInstanceType(holder_map);
+    Label next_proto(this);
+    {
+      Label found(this), found_fast(this), found_dict(this), found_global(this);
+      Variable var_meta_storage(this, MachineRepresentation::kTagged);
+      Variable var_entry(this, MachineType::PointerRepresentation());
+      TryLookupProperty(holder, holder_map, instance_type, name, &found_fast,
+                        &found_dict, &found_global, &var_meta_storage,
+                        &var_entry, &next_proto, bailout);
+      Bind(&found_fast);
+      {
+        Node* descriptors = var_meta_storage.value();
+        Node* name_index = var_entry.value();
+        Node* details =
+            LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
+        JumpIfDataProperty(details, &ok_to_write, readonly);
+
+        // Accessor case.
+        // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
+        Variable var_details(this, MachineRepresentation::kWord32);
+        LoadPropertyFromFastObject(holder, holder_map, descriptors, name_index,
+                                   &var_details, var_accessor_pair);
+        var_accessor_holder->Bind(holder);
+        Goto(accessor);
+      }
+
+      Bind(&found_dict);
+      {
+        Node* dictionary = var_meta_storage.value();
+        Node* entry = var_entry.value();
+        Node* details =
+            LoadDetailsByKeyIndex<NameDictionary>(dictionary, entry);
+        JumpIfDataProperty(details, &ok_to_write, readonly);
+
+        // Accessor case.
+        var_accessor_pair->Bind(
+            LoadValueByKeyIndex<NameDictionary>(dictionary, entry));
+        var_accessor_holder->Bind(holder);
+        Goto(accessor);
+      }
+
+      Bind(&found_global);
+      {
+        Node* dictionary = var_meta_storage.value();
+        Node* entry = var_entry.value();
+        Node* property_cell =
+            LoadValueByKeyIndex<GlobalDictionary>(dictionary, entry);
+        Node* value =
+            LoadObjectField(property_cell, PropertyCell::kValueOffset);
+        GotoIf(WordEqual(value, TheHoleConstant()), &next_proto);
+        Node* details = LoadAndUntagToWord32ObjectField(
+            property_cell, PropertyCell::kDetailsOffset);
+        JumpIfDataProperty(details, &ok_to_write, readonly);
+
+        // Accessor case.
+        var_accessor_pair->Bind(value);
+        var_accessor_holder->Bind(holder);
+        Goto(accessor);
+      }
+    }
+
+    Bind(&next_proto);
+    // Bailout if it can be an integer indexed exotic case.
+    GotoIf(Word32Equal(instance_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+           bailout);
+    Node* proto = LoadMapPrototype(holder_map);
+    GotoIf(WordEqual(proto, NullConstant()), &ok_to_write);
+    var_holder.Bind(proto);
+    var_holder_map.Bind(LoadMap(proto));
+    Goto(&loop);
   }
-  Bind(&stub_cache_miss);
+  Bind(&ok_to_write);
+}
+
+void KeyedStoreGenericAssembler::CheckFieldType(Node* descriptors,
+                                                Node* name_index,
+                                                Node* representation,
+                                                Node* value, Label* bailout) {
+  Label r_smi(this), r_double(this), r_heapobject(this), all_fine(this);
+  // Ignore FLAG_track_fields etc. and always emit code for all checks,
+  // because this builtin is part of the snapshot and therefore should
+  // be flag independent.
+  GotoIf(Word32Equal(representation, Int32Constant(Representation::kSmi)),
+         &r_smi);
+  GotoIf(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+         &r_double);
+  GotoIf(
+      Word32Equal(representation, Int32Constant(Representation::kHeapObject)),
+      &r_heapobject);
+  GotoIf(Word32Equal(representation, Int32Constant(Representation::kNone)),
+         bailout);
+  CSA_ASSERT(this, Word32Equal(representation,
+                               Int32Constant(Representation::kTagged)));
+  Goto(&all_fine);
+
+  Bind(&r_smi);
+  { Branch(TaggedIsSmi(value), &all_fine, bailout); }
+
+  Bind(&r_double);
   {
-    Comment("KeyedStoreGeneric_miss");
-    TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
-                    p->vector, p->receiver, p->name);
+    GotoIf(TaggedIsSmi(value), &all_fine);
+    Node* value_map = LoadMap(value);
+    // While supporting mutable HeapNumbers would be straightforward, such
+    // objects should not end up here anyway.
+    CSA_ASSERT(this,
+               WordNotEqual(value_map,
+                            LoadRoot(Heap::kMutableHeapNumberMapRootIndex)));
+    Branch(IsHeapNumberMap(value_map), &all_fine, bailout);
+  }
+
+  Bind(&r_heapobject);
+  {
+    GotoIf(TaggedIsSmi(value), bailout);
+    Node* field_type =
+        LoadValueByKeyIndex<DescriptorArray>(descriptors, name_index);
+    intptr_t kNoneType = reinterpret_cast<intptr_t>(FieldType::None());
+    intptr_t kAnyType = reinterpret_cast<intptr_t>(FieldType::Any());
+    // FieldType::None can't hold any value.
+    GotoIf(WordEqual(field_type, IntPtrConstant(kNoneType)), bailout);
+    // FieldType::Any can hold any value.
+    GotoIf(WordEqual(field_type, IntPtrConstant(kAnyType)), &all_fine);
+    CSA_ASSERT(this, IsWeakCell(field_type));
+    // Cleared WeakCells count as FieldType::None, which can't hold any value.
+    field_type = LoadWeakCellValue(field_type, bailout);
+    // FieldType::Class(...) performs a map check.
+    CSA_ASSERT(this, IsMap(field_type));
+    Branch(WordEqual(LoadMap(value), field_type), &all_fine, bailout);
+  }
+
+  Bind(&all_fine);
+}
+
+void KeyedStoreGenericAssembler::OverwriteExistingFastProperty(
+    Node* object, Node* object_map, Node* properties, Node* descriptors,
+    Node* descriptor_name_index, Node* details, Node* value, Label* slow) {
+  // Properties in descriptors can't be overwritten without map transition.
+  GotoIf(Word32NotEqual(DecodeWord32<PropertyDetails::LocationField>(details),
+                        Int32Constant(kField)),
+         slow);
+
+  if (FLAG_track_constant_fields) {
+    // TODO(ishell): Taking the slow path is not necessary if new and old
+    // values are identical.
+    GotoIf(Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details),
+                       Int32Constant(kConst)),
+           slow);
+  }
+
+  Label done(this);
+  Node* representation =
+      DecodeWord32<PropertyDetails::RepresentationField>(details);
+
+  CheckFieldType(descriptors, descriptor_name_index, representation, value,
+                 slow);
+  Node* field_index =
+      DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
+  Node* inobject_properties = LoadMapInobjectProperties(object_map);
+
+  Label inobject(this), backing_store(this);
+  Branch(UintPtrLessThan(field_index, inobject_properties), &inobject,
+         &backing_store);
+
+  Bind(&inobject);
+  {
+    Node* field_offset =
+        IntPtrMul(IntPtrSub(LoadMapInstanceSize(object_map),
+                            IntPtrSub(inobject_properties, field_index)),
+                  IntPtrConstant(kPointerSize));
+    Label tagged_rep(this), double_rep(this);
+    Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+           &double_rep, &tagged_rep);
+    Bind(&double_rep);
+    {
+      Node* double_value = ChangeNumberToFloat64(value);
+      if (FLAG_unbox_double_fields) {
+        StoreObjectFieldNoWriteBarrier(object, field_offset, double_value,
+                                       MachineRepresentation::kFloat64);
+      } else {
+        Node* mutable_heap_number = LoadObjectField(object, field_offset);
+        StoreHeapNumberValue(mutable_heap_number, double_value);
+      }
+      Goto(&done);
+    }
+
+    Bind(&tagged_rep);
+    {
+      StoreObjectField(object, field_offset, value);
+      Goto(&done);
+    }
+  }
+
+  Bind(&backing_store);
+  {
+    Node* backing_store_index = IntPtrSub(field_index, inobject_properties);
+    Label tagged_rep(this), double_rep(this);
+    Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+           &double_rep, &tagged_rep);
+    Bind(&double_rep);
+    {
+      Node* double_value = ChangeNumberToFloat64(value);
+      Node* mutable_heap_number =
+          LoadFixedArrayElement(properties, backing_store_index);
+      StoreHeapNumberValue(mutable_heap_number, double_value);
+      Goto(&done);
+    }
+    Bind(&tagged_rep);
+    {
+      StoreFixedArrayElement(properties, backing_store_index, value);
+      Goto(&done);
+    }
+  }
+  Bind(&done);
+}
+
+void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
+    Node* receiver, Node* receiver_map, const StoreICParameters* p, Label* slow,
+    LanguageMode language_mode) {
+  Variable var_accessor_pair(this, MachineRepresentation::kTagged);
+  Variable var_accessor_holder(this, MachineRepresentation::kTagged);
+  Label stub_cache(this), fast_properties(this), dictionary_properties(this),
+      accessor(this), readonly(this);
+  Node* properties = LoadProperties(receiver);
+  Node* properties_map = LoadMap(properties);
+  Branch(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+         &dictionary_properties, &fast_properties);
+
+  Bind(&fast_properties);
+  {
+    Comment("fast property store");
+    Node* bitfield3 = LoadMapBitField3(receiver_map);
+    Node* descriptors = LoadMapDescriptors(receiver_map);
+    Label descriptor_found(this);
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    // TODO(jkummerow): Maybe look for existing map transitions?
+    Label* notfound = &stub_cache;
+    DescriptorLookup(p->name, descriptors, bitfield3, &descriptor_found,
+                     &var_name_index, notfound);
+
+    Bind(&descriptor_found);
+    {
+      Node* name_index = var_name_index.value();
+      Node* details =
+          LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
+      Label data_property(this);
+      JumpIfDataProperty(details, &data_property, &readonly);
+
+      // Accessor case.
+      // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
+      Variable var_details(this, MachineRepresentation::kWord32);
+      LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+                                 name_index, &var_details, &var_accessor_pair);
+      var_accessor_holder.Bind(receiver);
+      Goto(&accessor);
+
+      Bind(&data_property);
+      {
+        OverwriteExistingFastProperty(receiver, receiver_map, properties,
+                                      descriptors, name_index, details,
+                                      p->value, slow);
+        Return(p->value);
+      }
+    }
+  }
+
+  Bind(&dictionary_properties);
+  {
+    Comment("dictionary property store");
+    // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
+    // seeing global objects here (which would need special handling).
+
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    Label dictionary_found(this, &var_name_index), not_found(this);
+    NameDictionaryLookup<NameDictionary>(properties, p->name, &dictionary_found,
+                                         &var_name_index, &not_found);
+    Bind(&dictionary_found);
+    {
+      Label overwrite(this);
+      Node* details = LoadDetailsByKeyIndex<NameDictionary>(
+          properties, var_name_index.value());
+      JumpIfDataProperty(details, &overwrite, &readonly);
+
+      // Accessor case.
+      var_accessor_pair.Bind(LoadValueByKeyIndex<NameDictionary>(
+          properties, var_name_index.value()));
+      var_accessor_holder.Bind(receiver);
+      Goto(&accessor);
+
+      Bind(&overwrite);
+      {
+        StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
+                                             p->value);
+        Return(p->value);
+      }
+    }
+
+    Bind(&not_found);
+    {
+      LookupPropertyOnPrototypeChain(receiver_map, p->name, &accessor,
+                                     &var_accessor_pair, &var_accessor_holder,
+                                     &readonly, slow);
+      Add<NameDictionary>(properties, p->name, p->value, slow);
+      Return(p->value);
+    }
+  }
+
+  Bind(&accessor);
+  {
+    Label not_callable(this);
+    Node* accessor_pair = var_accessor_pair.value();
+    GotoIf(IsAccessorInfoMap(LoadMap(accessor_pair)), slow);
+    CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
+    Node* setter = LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
+    Node* setter_map = LoadMap(setter);
+    // FunctionTemplateInfo setters are not supported yet.
+    GotoIf(IsFunctionTemplateInfoMap(setter_map), slow);
+    GotoIfNot(IsCallableMap(setter_map), &not_callable);
+
+    Callable callable = CodeFactory::Call(isolate());
+    CallJS(callable, p->context, setter, receiver, p->value);
+    Return(p->value);
+
+    Bind(&not_callable);
+    {
+      if (language_mode == STRICT) {
+        Node* message =
+            SmiConstant(Smi::FromInt(MessageTemplate::kNoSetterInCallback));
+        TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
+                        var_accessor_holder.value());
+      } else {
+        DCHECK_EQ(SLOPPY, language_mode);
+        Return(p->value);
+      }
+    }
+  }
+
+  Bind(&readonly);
+  {
+    if (language_mode == STRICT) {
+      Node* message =
+          SmiConstant(Smi::FromInt(MessageTemplate::kStrictReadOnlyProperty));
+      Node* type = Typeof(p->receiver, p->context);
+      TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
+                      type, p->receiver);
+    } else {
+      DCHECK_EQ(SLOPPY, language_mode);
+      Return(p->value);
+    }
+  }
+
+  Bind(&stub_cache);
+  {
+    Comment("stub cache probe");
+    Variable var_handler(this, MachineRepresentation::kTagged);
+    Label found_handler(this, &var_handler), stub_cache_miss(this);
+    TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
+                      &found_handler, &var_handler, &stub_cache_miss);
+    Bind(&found_handler);
+    {
+      Comment("KeyedStoreGeneric found handler");
+      HandleStoreICHandlerCase(p, var_handler.value(), &stub_cache_miss);
+    }
+    Bind(&stub_cache_miss);
+    {
+      Comment("KeyedStoreGeneric_miss");
+      TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value,
+                      p->slot, p->vector, p->receiver, p->name);
+    }
   }
 }
 
-void KeyedStoreGenericAssembler::KeyedStoreGeneric(const StoreICParameters* p,
-                                                   LanguageMode language_mode) {
+void KeyedStoreGenericAssembler::KeyedStoreGeneric(LanguageMode language_mode) {
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
   Variable var_index(this, MachineType::PointerRepresentation());
+  Variable var_unique(this, MachineRepresentation::kTagged);
+  var_unique.Bind(name);  // Dummy initialization.
   Label if_index(this), if_unique_name(this), slow(this);
 
-  Node* receiver = p->receiver;
   GotoIf(TaggedIsSmi(receiver), &slow);
   Node* receiver_map = LoadMap(receiver);
   Node* instance_type = LoadMapInstanceType(receiver_map);
@@ -522,26 +928,28 @@
                               Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
          &slow);
 
-  TryToName(p->name, &if_index, &var_index, &if_unique_name, &slow);
+  TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique, &slow);
 
   Bind(&if_index);
   {
     Comment("integer index");
     EmitGenericElementStore(receiver, receiver_map, instance_type,
-                            var_index.value(), p->value, p->context, &slow);
+                            var_index.value(), value, context, &slow);
   }
 
   Bind(&if_unique_name);
   {
     Comment("key is unique name");
-    EmitGenericPropertyStore(receiver, receiver_map, p, &slow);
+    StoreICParameters p(context, receiver, var_unique.value(), value, slot,
+                        vector);
+    EmitGenericPropertyStore(receiver, receiver_map, &p, &slow, language_mode);
   }
 
   Bind(&slow);
   {
     Comment("KeyedStoreGeneric_slow");
-    TailCallRuntime(Runtime::kSetProperty, p->context, p->receiver, p->name,
-                    p->value, SmiConstant(language_mode));
+    TailCallRuntime(Runtime::kSetProperty, context, receiver, name, value,
+                    SmiConstant(language_mode));
   }
 }
 
diff --git a/src/ic/keyed-store-generic.h b/src/ic/keyed-store-generic.h
index daeb61f..8028736 100644
--- a/src/ic/keyed-store-generic.h
+++ b/src/ic/keyed-store-generic.h
@@ -5,15 +5,18 @@
 #ifndef V8_SRC_IC_KEYED_STORE_GENERIC_H_
 #define V8_SRC_IC_KEYED_STORE_GENERIC_H_
 
-#include "src/code-stub-assembler.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 
+namespace compiler {
+class CodeAssemblerState;
+}
+
 class KeyedStoreGenericGenerator {
  public:
-  static void Generate(CodeStubAssembler* assembler,
-                       const CodeStubAssembler::StoreICParameters* p,
+  static void Generate(compiler::CodeAssemblerState* state,
                        LanguageMode language_mode);
 };
 
diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc
index b2ddea5..c14652c 100644
--- a/src/ic/mips/handler-compiler-mips.cc
+++ b/src/ic/mips/handler-compiler-mips.cc
@@ -129,14 +129,6 @@
   __ Addu(sp, sp, Operand(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in ra register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in ra register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -181,27 +173,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ lw(result,
-        FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, scratch1);
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -219,24 +190,18 @@
   __ Branch(miss, ne, scratch, Operand(at));
 }
 
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-  __ Push(name, receiver, holder);
-}
-
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -342,57 +307,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ li(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ lw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ And(at, scratch, Operand(Map::Deprecated::kMask));
-    __ Branch(miss, ne, at, Operand(zero_reg));
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ lw(scratch,
-        FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ Branch(miss_label, ne, value_reg, Operand(scratch));
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ lw(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    // Compare map directly within the Branch() functions.
-    __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
-    __ Branch(miss_label, ne, map_reg, Operand(scratch));
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -520,14 +434,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ li(v0, value);
-  __ Ret();
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -591,8 +497,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/mips/ic-compiler-mips.cc b/src/ic/mips/ic-compiler-mips.cc
deleted file mode 100644
index 86a602b..0000000
--- a/src/ic/mips/ic-compiler-mips.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
-
-  __ li(a0, Operand(Smi::FromInt(language_mode)));
-  __ Push(a0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index 561c9d3..fd39972 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -6,528 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at, scratch1,
-         Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Get the value at the masked, scaled index and return.
-  __ lw(result,
-        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY))
-      << kSmiTagSize;
-  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ sw(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = a0;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ lw(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                    JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), v0, a3, t0);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return a3; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in ra.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, t0, t1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in ra.
-
-  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in ra.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, t0, t1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in ra.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = t0;
-  Register scratch2 = t4;
-  Register scratch3 = t5;
-  Register address = t1;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, scratch2, scratch3, address));
-
-  if (check_map == kCheckMap) {
-    __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ Branch(fast_double, ne, elements_map,
-              Operand(masm->isolate()->factory()->fixed_array_map()));
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element.
-  Label holecheck_passed1;
-  __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(scratch, MemOperand(address));
-  __ Branch(&holecheck_passed1, ne, scratch,
-            Operand(masm->isolate()->factory()->the_hole_value()));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
-  __ sw(value, MemOperand(address));
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
-  __ sw(value, MemOperand(address));
-  // Update write barrier for the elements array address.
-  __ mov(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
-    __ Branch(slow, ne, elements_map, Operand(at));
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
-                                     kHoleNanUpper32Offset - kHeapObjectTag));
-  __ Lsa(address, address, key, kPointerSizeLog2);
-  __ lw(scratch, MemOperand(address));
-  __ Branch(&fast_double_without_map_check, ne, scratch,
-            Operand(kHoleNanUpper32));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
-                                 scratch3, &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&non_double_value, ne, scratch, Operand(at));
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- ra     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(value.is(a0));
-  Register receiver_map = a3;
-  Register elements_map = t2;
-  Register elements = t3;  // Elements array of the receiver.
-  // t0 and t1 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ Branch(&slow, ne, t0, Operand(zero_reg));
-  // Check if the object is a JS array or not.
-  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ Branch(&slow, lo, t0, Operand(JS_OBJECT_TYPE));
-
-  // Object case: Check key against length in the elements array.
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&fast_object, lo, key, Operand(t0));
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // a0: value.
-  // a1: key.
-  // a2: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ lw(t0, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(t0, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ li(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, t1,
-                                                     t2, t4, t5);
-  // Cache miss.
-  __ Branch(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  // Only support writing to array[array.length].
-  __ Branch(&slow, ne, key, Operand(t0));
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&slow, hs, key, Operand(t0));
-  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Branch(&check_if_double_array, ne, elements_map,
-            Heap::kFixedArrayMapRootIndex);
-
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Branch(&extra, hs, key, Operand(t0));
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = t1;
-  DCHECK(receiver.is(a1));
-  DCHECK(name.is(a2));
-  DCHECK(value.is(a0));
-  DCHECK(StoreWithVectorDescriptor::VectorRegister().is(a3));
-  DCHECK(StoreWithVectorDescriptor::SlotRegister().is(t0));
-
-  __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, t2, t5);
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, t2, t5);
-  GenerateMiss(masm);
-}
-
-
-#undef __
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -585,9 +69,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(andi_instruction_address), delta);
+    LOG(isolate, PatchIC(address, andi_instruction_address, delta));
   }
 
   Address patch_address =
diff --git a/src/ic/mips/stub-cache-mips.cc b/src/ic/mips/stub-cache-mips.cc
deleted file mode 100644
index d476c1e..0000000
--- a/src/ic/mips/stub-cache-mips.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
-  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
-  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ Lsa(offset_scratch, offset, offset, 1);
-
-  // Calculate the base address of the entry.
-  __ li(base_addr, Operand(key_offset));
-  __ Addu(base_addr, base_addr, offset_scratch);
-
-  // Check that the key in the entry matches the name.
-  __ lw(at, MemOperand(base_addr, 0));
-  __ Branch(&miss, ne, name, Operand(at));
-
-  // Check the map matches.
-  __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Branch(&miss, ne, at, Operand(scratch2));
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ jmp(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ jmp(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(at);
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  DCHECK(sizeof(Entry) == 12);
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check register validity.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Addu(scratch, scratch, at);
-  __ Xor(scratch, scratch, Operand(kPrimaryMagic));
-  __ And(scratch, scratch,
-         Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ Subu(scratch, scratch, name);
-  __ Addu(scratch, scratch, Operand(kSecondaryMagic));
-  __ And(scratch, scratch,
-         Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc
index 249f8fe..1a38d32 100644
--- a/src/ic/mips64/handler-compiler-mips64.cc
+++ b/src/ic/mips64/handler-compiler-mips64.cc
@@ -129,14 +129,6 @@
   __ Daddu(sp, sp, Operand(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in ra register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in ra register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -181,27 +173,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ ld(result,
-        FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, scratch1);
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -219,24 +190,18 @@
   __ Branch(miss, ne, scratch, Operand(at));
 }
 
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-  __ Push(name, receiver, holder);
-}
-
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -342,57 +307,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ li(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ lwu(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ And(at, scratch, Operand(Map::Deprecated::kMask));
-    __ Branch(miss, ne, at, Operand(zero_reg));
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ ld(scratch,
-        FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ Branch(miss_label, ne, value_reg, Operand(scratch));
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ ld(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    // Compare map directly within the Branch() functions.
-    __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
-    __ Branch(miss_label, ne, map_reg, Operand(scratch));
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -520,14 +434,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ li(v0, value);
-  __ Ret();
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -591,8 +497,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/mips64/ic-compiler-mips64.cc b/src/ic/mips64/ic-compiler-mips64.cc
deleted file mode 100644
index 276f3af..0000000
--- a/src/ic/mips64/ic-compiler-mips64.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
-
-  __ li(a0, Operand(Smi::FromInt(language_mode)));
-  __ Push(a0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
index 57efa35..0e2032a 100644
--- a/src/ic/mips64/ic-mips64.cc
+++ b/src/ic/mips64/ic-mips64.cc
@@ -6,529 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at, scratch1,
-         Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Get the value at the masked, scaled index and return.
-  __ ld(result,
-        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY));
-  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ sd(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = a0;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-  Label slow;
-
-  __ ld(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                    JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), v0, a3, a4);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return a3; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, a4, a5);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in ra.
-
-  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in ra.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, a4, a5);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in ra.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = a4;
-  Register scratch2 = t0;
-  Register address = a5;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, scratch2, address));
-
-  if (check_map == kCheckMap) {
-    __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ Branch(fast_double, ne, elements_map,
-              Operand(masm->isolate()->factory()->fixed_array_map()));
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element.
-  Label holecheck_passed1;
-  __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ SmiScale(at, key, kPointerSizeLog2);
-  __ daddu(address, address, at);
-  __ ld(scratch, MemOperand(address));
-
-  __ Branch(&holecheck_passed1, ne, scratch,
-            Operand(masm->isolate()->factory()->the_hole_value()));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ Daddu(address, elements,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiScale(scratch, key, kPointerSizeLog2);
-  __ Daddu(address, address, scratch);
-  __ sd(value, MemOperand(address));
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Daddu(address, elements,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiScale(scratch, key, kPointerSizeLog2);
-  __ Daddu(address, address, scratch);
-  __ sd(value, MemOperand(address));
-  // Update write barrier for the elements array address.
-  __ mov(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
-    __ Branch(slow, ne, elements_map, Operand(at));
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ Daddu(address, elements,
-           Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
-                   kHeapObjectTag));
-  __ SmiScale(at, key, kPointerSizeLog2);
-  __ daddu(address, address, at);
-  __ lw(scratch, MemOperand(address));
-  __ Branch(&fast_double_without_map_check, ne, scratch,
-            Operand(static_cast<int32_t>(kHoleNanUpper32)));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&non_double_value, ne, scratch, Operand(at));
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- ra     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(value.is(a0));
-  Register receiver_map = a3;
-  Register elements_map = a6;
-  Register elements = a7;  // Elements array of the receiver.
-  // a4 and a5 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ Branch(&slow, ne, a4, Operand(zero_reg));
-  // Check if the object is a JS array or not.
-  __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
-  // Check that the object is some kind of JSObject.
-  __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
-
-  // Object case: Check key against length in the elements array.
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&fast_object, lo, key, Operand(a4));
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // a0: value.
-  // a1: key.
-  // a2: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(a4, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-
-  DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ li(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, a5,
-                                                     a6, a7, t0);
-  // Cache miss.
-  __ Branch(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  // Only support writing to array[array.length].
-  __ Branch(&slow, ne, key, Operand(a4));
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&slow, hs, key, Operand(a4));
-  __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Branch(&check_if_double_array, ne, elements_map,
-            Heap::kFixedArrayMapRootIndex);
-
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Branch(&extra, hs, key, Operand(a4));
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = a5;
-  DCHECK(!AreAliased(
-      value, receiver, name, StoreWithVectorDescriptor::VectorRegister(),
-      StoreWithVectorDescriptor::SlotRegister(), dictionary, a6, a7));
-
-  __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, a6, a7);
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, a6, a7);
-  GenerateMiss(masm);
-}
-
-
-#undef __
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -586,9 +69,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(andi_instruction_address), delta);
+    LOG(isolate, PatchIC(address, andi_instruction_address, delta));
   }
 
   Address patch_address =
diff --git a/src/ic/mips64/stub-cache-mips64.cc b/src/ic/mips64/stub-cache-mips64.cc
deleted file mode 100644
index 6a87b7b..0000000
--- a/src/ic/mips64/stub-cache-mips64.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
-  uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
-  uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ Dlsa(offset_scratch, offset, offset, 1);
-
-  // Calculate the base address of the entry.
-  __ li(base_addr, Operand(key_offset));
-  __ Dlsa(base_addr, base_addr, offset_scratch,
-          kPointerSizeLog2 - StubCache::kCacheIndexShift);
-
-  // Check that the key in the entry matches the name.
-  __ ld(at, MemOperand(base_addr, 0));
-  __ Branch(&miss, ne, name, Operand(at));
-
-  // Check the map matches.
-  __ ld(at, MemOperand(base_addr,
-                       static_cast<int32_t>(map_off_addr - key_off_addr)));
-  __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Branch(&miss, ne, at, Operand(scratch2));
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ ld(code, MemOperand(base_addr,
-                         static_cast<int32_t>(value_off_addr - key_off_addr)));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ jmp(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ jmp(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(at);
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  // DCHECK(sizeof(Entry) == 12);
-  // DCHECK(sizeof(Entry) == 3 * kPointerSize);
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check register validity.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ lwu(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Addu(scratch, scratch, at);
-  __ Xor(scratch, scratch, Operand(kPrimaryMagic));
-  __ And(scratch, scratch,
-         Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ Subu(scratch, scratch, name);
-  __ Addu(scratch, scratch, kSecondaryMagic);
-  __ And(scratch, scratch,
-         Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/ic/ppc/handler-compiler-ppc.cc b/src/ic/ppc/handler-compiler-ppc.cc
index e0caaa6..3da558d 100644
--- a/src/ic/ppc/handler-compiler-ppc.cc
+++ b/src/ic/ppc/handler-compiler-ppc.cc
@@ -130,14 +130,6 @@
   __ addi(sp, sp, Operand(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -184,27 +176,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ LoadP(result,
-           FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mr(r3, scratch1);
-  __ Ret();
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -224,25 +195,18 @@
 }
 
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-  __ push(name);
-  __ push(receiver);
-  __ push(holder);
-}
-
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -350,58 +314,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ lwz(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ DecodeField<Map::Deprecated>(r0, scratch, SetRC);
-    __ bne(miss, cr0);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ LoadP(scratch, FieldMemOperand(
-                        scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmp(value_reg, scratch);
-  __ bne(miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ bne(miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -538,14 +450,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(r3, value);
-  __ Ret();
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -610,8 +514,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/ppc/ic-compiler-ppc.cc b/src/ic/ppc/ic-compiler-ppc.cc
deleted file mode 100644
index c6b36f2..0000000
--- a/src/ic/ppc/ic-compiler-ppc.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ mov(r0, Operand(Smi::FromInt(language_mode)));
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(), r0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_PPC
diff --git a/src/ic/ppc/ic-ppc.cc b/src/ic/ppc/ic-ppc.cc
index 359a6a4..0f25846 100644
--- a/src/ic/ppc/ic-ppc.cc
+++ b/src/ic/ppc/ic-ppc.cc
@@ -6,527 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ mr(r0, scratch2);
-  __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ and_(scratch2, scratch1, scratch2, SetRC);
-  __ bne(miss, cr0);
-  __ mr(scratch2, r0);
-
-  // Get the value at the masked, scaled index and return.
-  __ LoadP(result,
-           FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  int kTypeAndReadOnlyMask =
-      PropertyDetails::TypeField::kMask |
-      PropertyDetails::AttributesField::encode(READ_ONLY);
-  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ mr(r0, scratch2);
-  __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
-  __ and_(scratch2, scratch1, scratch2, SetRC);
-  __ bne(miss, cr0);
-  __ mr(scratch2, r0);
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ StoreP(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mr(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = r3;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                       JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), r3, r6, r7);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r6; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r7, r8);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r7, r8);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = r7;
-  Register address = r8;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, address));
-
-  if (check_map == kCheckMap) {
-    __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ cmp(elements_map, scratch);
-    __ bne(fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ LoadPX(scratch, MemOperand(address, scratch));
-  __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0);
-  __ bne(&holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ StorePX(value, MemOperand(address, scratch));
-  __ Ret();
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
-  }
-  __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ StorePUX(value, MemOperand(address, scratch));
-  // Update write barrier for the elements array address.
-  __ mr(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-    __ bne(slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ addi(address, elements,
-          Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
-                   kHeapObjectTag)));
-  __ SmiToDoubleArrayOffset(scratch, key);
-  __ lwzx(scratch, MemOperand(address, scratch));
-  __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
-  __ bne(&fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
-  }
-  __ Ret();
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
-  __ bne(&non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- r3     : value
-  //  -- r4     : key
-  //  -- r5     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(receiver.is(r4));
-  DCHECK(key.is(r5));
-  DCHECK(value.is(r3));
-  Register receiver_map = r6;
-  Register elements_map = r9;
-  Register elements = r10;  // Elements array of the receiver.
-  // r7 and r8 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ andi(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ bne(&slow, cr0);
-  // Check if the object is a JS array or not.
-  __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ cmpi(r7, Operand(JS_ARRAY_TYPE));
-  __ beq(&array);
-  // Check that the object is some kind of JSObject.
-  __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE));
-  __ blt(&slow);
-
-  // Object case: Check key against length in the elements array.
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmpl(key, ip);
-  __ blt(&fast_object);
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // r3: value.
-  // r4: key.
-  // r5: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(r7, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r8,
-                                                     r9, r10, r11);
-  // Cache miss.
-  __ b(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  __ bne(&slow);  // Only support writing to writing to array[array.length].
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmpl(key, ip);
-  __ bge(&slow);
-  __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ cmp(elements_map, ip);  // PPC - I think I can re-use ip here
-  __ bne(&check_if_double_array);
-  __ b(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ cmp(elements_map, ip);  // PPC - another ip re-use
-  __ bne(&slow);
-  __ b(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ cmpl(key, ip);
-  __ bge(&extra);
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = r8;
-  DCHECK(receiver.is(r4));
-  DCHECK(name.is(r5));
-  DCHECK(value.is(r3));
-  DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r6));
-  DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r7));
-
-  __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, r9, r10);
-  __ Ret();
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, r9, r10);
-  GenerateMiss(masm);
-}
-
-
-#undef __
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -585,9 +70,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(cmp_instruction_address), delta);
+    LOG(isolate, PatchIC(address, cmp_instruction_address, delta));
   }
 
   Address patch_address =
diff --git a/src/ic/ppc/stub-cache-ppc.cc b/src/ic/ppc/stub-cache-ppc.cc
deleted file mode 100644
index 3dad306..0000000
--- a/src/ic/ppc/stub-cache-ppc.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
-  uintptr_t value_off_addr =
-      reinterpret_cast<uintptr_t>(value_offset.address());
-  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ ShiftLeftImm(offset_scratch, offset, Operand(1));
-  __ add(offset_scratch, offset, offset_scratch);
-
-  // Calculate the base address of the entry.
-  __ mov(base_addr, Operand(key_offset));
-#if V8_TARGET_ARCH_PPC64
-  DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
-  __ ShiftLeftImm(offset_scratch, offset_scratch,
-                  Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
-#else
-  DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
-#endif
-  __ add(base_addr, base_addr, offset_scratch);
-
-  // Check that the key in the entry matches the name.
-  __ LoadP(ip, MemOperand(base_addr, 0));
-  __ cmp(name, ip);
-  __ bne(&miss);
-
-  // Check the map matches.
-  __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(ip, scratch2);
-  __ bne(&miss);
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ b(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ b(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ mtctr(r0);
-  __ bctr();
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-#if V8_TARGET_ARCH_PPC64
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 24.
-  DCHECK(sizeof(Entry) == 24);
-#else
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  DCHECK(sizeof(Entry) == 12);
-#endif
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check scratch, extra and extra2 registers are valid.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ add(scratch, scratch, ip);
-  __ Xor(scratch, scratch, Operand(kPrimaryMagic));
-  // The mask omits the last two bits because they are not part of the hash.
-  __ andi(scratch, scratch,
-          Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ sub(scratch, scratch, name);
-  __ Add(scratch, scratch, kSecondaryMagic, r0);
-  __ andi(scratch, scratch,
-          Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_PPC
diff --git a/src/ic/s390/handler-compiler-s390.cc b/src/ic/s390/handler-compiler-s390.cc
index 72658ec..9f08797 100644
--- a/src/ic/s390/handler-compiler-s390.cc
+++ b/src/ic/s390/handler-compiler-s390.cc
@@ -125,14 +125,6 @@
   __ la(sp, MemOperand(sp, 2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -177,24 +169,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ LoadP(result,
-           FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ LoadRR(r2, scratch1);
-  __ Ret();
-}
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -212,24 +186,18 @@
   __ bne(miss);
 }
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-  __ Push(name);
-  __ Push(receiver);
-  __ Push(holder);
-}
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -335,54 +303,6 @@
   }
 }
 
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Operand(name));
-}
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ LoadlW(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ DecodeField<Map::Deprecated>(r0, scratch);
-    __ bne(miss);
-  }
-}
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ CmpP(value_reg, FieldMemOperand(
-                         scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ bne(miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ bne(miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -512,12 +432,6 @@
   }
 }
 
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(r2, value);
-  __ Ret();
-}
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -580,8 +494,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/s390/ic-compiler-s390.cc b/src/ic/s390/ic-compiler-s390.cc
deleted file mode 100644
index a7691d8..0000000
--- a/src/ic/s390/ic-compiler-s390.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ mov(r0, Operand(Smi::FromInt(language_mode)));
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(), r0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_S390
diff --git a/src/ic/s390/ic-s390.cc b/src/ic/s390/ic-s390.cc
index bd83af1..494a4cd 100644
--- a/src/ic/s390/ic-s390.cc
+++ b/src/ic/s390/ic-s390.cc
@@ -6,514 +6,11 @@
 
 #include "src/ic/ic.h"
 #include "src/codegen.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ LoadRR(r0, scratch2);
-  __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ AndP(scratch2, scratch1);
-  __ bne(miss);
-  __ LoadRR(scratch2, r0);
-
-  // Get the value at the masked, scaled index and return.
-  __ LoadP(result,
-           FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  int kTypeAndReadOnlyMask =
-      PropertyDetails::TypeField::kMask |
-      PropertyDetails::AttributesField::encode(READ_ONLY);
-  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ LoadRR(r0, scratch2);
-  __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
-  __ AndP(scratch2, scratch1);
-  __ bne(miss /*, cr0*/);
-  __ LoadRR(scratch2, r0);
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ AddP(scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ StoreP(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ LoadRR(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = r2;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                       JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), r2, r5, r6);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r5; }
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r6, r7);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ LoadRR(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r6, r7);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = r6;
-  Register address = r7;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, address));
-
-  if (check_map == kCheckMap) {
-    __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ CmpP(elements_map,
-            Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ bne(fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  // @TODO(joransiu) : Fold AddP into memref of LoadP
-  __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ LoadP(scratch, MemOperand(address, scratch));
-  __ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
-  __ bne(&holecheck_passed1, Label::kNear);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ StoreP(value, MemOperand(address, scratch));
-  __ Ret();
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ StoreP(value, MemOperand(address, scratch));
-  __ la(address, MemOperand(address, scratch));
-  // Update write barrier for the elements array address.
-  __ LoadRR(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-    __ bne(slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  // @TODO(joransiu) : Fold AddP Operand into LoadlW
-  __ AddP(address, elements,
-          Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
-                   kHeapObjectTag)));
-  __ SmiToDoubleArrayOffset(scratch, key);
-  __ LoadlW(scratch, MemOperand(address, scratch));
-  __ CmpP(scratch, Operand(kHoleNanUpper32));
-  __ bne(&fast_double_without_map_check, Label::kNear);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret();
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
-  __ bne(&non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&finish_object_store);
-}
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- r2     : value
-  //  -- r3     : key
-  //  -- r4     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(receiver.is(r3));
-  DCHECK(key.is(r4));
-  DCHECK(value.is(r2));
-  Register receiver_map = r5;
-  Register elements_map = r8;
-  Register elements = r9;  // Elements array of the receiver.
-  // r6 and r7 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ AndP(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ bne(&slow, Label::kNear);
-  // Check if the object is a JS array or not.
-  __ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ CmpP(r6, Operand(JS_ARRAY_TYPE));
-  __ beq(&array);
-  // Check that the object is some kind of JSObject.
-  __ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE));
-  __ blt(&slow, Label::kNear);
-
-  // Object case: Check key against length in the elements array.
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ blt(&fast_object);
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // r2: value.
-  // r3: key.
-  // r4: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(r6, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r7,
-                                                     r8, r9, ip);
-  // Cache miss.
-  __ b(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  __ bne(&slow);  // Only support writing to writing to array[array.length].
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ bge(&slow);
-  __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ bne(&check_if_double_array, Label::kNear);
-  __ b(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ CmpP(elements_map,
-          Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ bne(&slow);
-  __ b(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ bge(&extra);
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = r7;
-  DCHECK(receiver.is(r3));
-  DCHECK(name.is(r4));
-  DCHECK(value.is(r2));
-  DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r5));
-  DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r6));
-
-  __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, r8, r9);
-  __ Ret();
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, r8, r9);
-  GenerateMiss(masm);
-}
-
-#undef __
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
@@ -573,9 +70,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(cmp_instruction_address), delta);
+    LOG(isolate, PatchIC(address, cmp_instruction_address, delta));
   }
 
   // Expected sequence to enable by changing the following
@@ -624,13 +119,13 @@
     cc = static_cast<Condition>((branch_instr & 0x00f00000) >> 20);
     DCHECK((cc == ne) || (cc == eq));
     cc = (cc == ne) ? eq : ne;
-    patcher.masm()->brc(cc, Operand((branch_instr & 0xffff) << 1));
+    patcher.masm()->brc(cc, Operand(branch_instr & 0xffff));
   } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
     cc = static_cast<Condition>(
         (branch_instr & (static_cast<uint64_t>(0x00f0) << 32)) >> 36);
     DCHECK((cc == ne) || (cc == eq));
     cc = (cc == ne) ? eq : ne;
-    patcher.masm()->brcl(cc, Operand((branch_instr & 0xffffffff) << 1));
+    patcher.masm()->brcl(cc, Operand(branch_instr & 0xffffffff));
   } else {
     DCHECK(false);
   }
diff --git a/src/ic/s390/stub-cache-s390.cc b/src/ic/s390/stub-cache-s390.cc
deleted file mode 100644
index a0564a3..0000000
--- a/src/ic/s390/stub-cache-s390.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/ic/stub-cache.h"
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
-  uintptr_t value_off_addr =
-      reinterpret_cast<uintptr_t>(value_offset.address());
-  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ ShiftLeftP(offset_scratch, offset, Operand(1));
-  __ AddP(offset_scratch, offset, offset_scratch);
-
-  // Calculate the base address of the entry.
-  __ mov(base_addr, Operand(key_offset));
-#if V8_TARGET_ARCH_S390X
-  DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
-  __ ShiftLeftP(offset_scratch, offset_scratch,
-                Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
-#else
-  DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
-#endif
-  __ AddP(base_addr, base_addr, offset_scratch);
-
-  // Check that the key in the entry matches the name.
-  __ CmpP(name, MemOperand(base_addr, 0));
-  __ bne(&miss, Label::kNear);
-
-  // Check the map matches.
-  __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ CmpP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bne(&miss, Label::kNear);
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ b(&miss, Label::kNear);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ b(&miss, Label::kNear);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  // TODO(joransiu): Combine into indirect branch
-  __ la(code, MemOperand(code, Code::kHeaderSize - kHeapObjectTag));
-  __ b(code);
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-#if V8_TARGET_ARCH_S390X
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 24.
-  DCHECK(sizeof(Entry) == 24);
-#else
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  DCHECK(sizeof(Entry) == 12);
-#endif
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check scratch, extra and extra2 registers are valid.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ LoadlW(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ AddP(scratch, scratch, ip);
-  __ XorP(scratch, scratch, Operand(kPrimaryMagic));
-  // The mask omits the last two bits because they are not part of the hash.
-  __ AndP(scratch, scratch,
-          Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ SubP(scratch, scratch, name);
-  __ AddP(scratch, scratch, Operand(kSecondaryMagic));
-  __ AndP(scratch, scratch,
-          Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_S390
diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc
index 84dbf48..5fc8cc3 100644
--- a/src/ic/stub-cache.cc
+++ b/src/ic/stub-cache.cc
@@ -6,6 +6,8 @@
 
 #include "src/ast/ast.h"
 #include "src/base/bits.h"
+#include "src/counters.h"
+#include "src/heap/heap.h"
 #include "src/ic/ic-inl.h"
 #include "src/type-info.h"
 
@@ -99,12 +101,12 @@
   Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
   for (int i = 0; i < kPrimaryTableSize; i++) {
     primary_[i].key = isolate()->heap()->empty_string();
-    primary_[i].map = NULL;
+    primary_[i].map = nullptr;
     primary_[i].value = empty;
   }
   for (int j = 0; j < kSecondaryTableSize; j++) {
     secondary_[j].key = isolate()->heap()->empty_string();
-    secondary_[j].map = NULL;
+    secondary_[j].map = nullptr;
     secondary_[j].value = empty;
   }
 }
@@ -116,9 +118,9 @@
   for (int i = 0; i < kPrimaryTableSize; i++) {
     if (primary_[i].key == *name) {
       Map* map = primary_[i].map;
-      // Map can be NULL, if the stub is constant function call
+      // Map can be nullptr, if the stub is constant function call
       // with a primitive receiver.
-      if (map == NULL) continue;
+      if (map == nullptr) continue;
 
       int offset = PrimaryOffset(*name, map);
       if (entry(primary_, offset) == &primary_[i] &&
@@ -131,9 +133,9 @@
   for (int i = 0; i < kSecondaryTableSize; i++) {
     if (secondary_[i].key == *name) {
       Map* map = secondary_[i].map;
-      // Map can be NULL, if the stub is constant function call
+      // Map can be nullptr, if the stub is constant function call
       // with a primitive receiver.
-      if (map == NULL) continue;
+      if (map == nullptr) continue;
 
       // Lookup in primary table and skip duplicates.
       int primary_offset = PrimaryOffset(*name, map);
diff --git a/src/ic/stub-cache.h b/src/ic/stub-cache.h
index bdd7f4a..4054b32 100644
--- a/src/ic/stub-cache.h
+++ b/src/ic/stub-cache.h
@@ -48,13 +48,6 @@
   // Collect all maps that match the name.
   void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
                            Handle<Context> native_context, Zone* zone);
-  // Generate code for probing the stub cache table.
-  // Arguments extra, extra2 and extra3 may be used to pass additional scratch
-  // registers. Set to no_reg if not needed.
-  // If leave_frame is true, then exit a frame before the tail call.
-  void GenerateProbe(MacroAssembler* masm, Register receiver, Register name,
-                     Register scratch, Register extra, Register extra2 = no_reg,
-                     Register extra3 = no_reg);
 
   enum Table { kPrimary, kSecondary };
 
@@ -81,7 +74,7 @@
         return StubCache::secondary_;
     }
     UNREACHABLE();
-    return NULL;
+    return nullptr;
   }
 
   Isolate* isolate() { return isolate_; }
@@ -99,7 +92,7 @@
 
   // Some magic number used in primary and secondary hash computations.
   static const int kPrimaryMagic = 0x3d532433;
-  static const int kSecondaryMagic = 0xb16b00b5;
+  static const int kSecondaryMagic = 0xb16ca6e5;
 
   static int PrimaryOffsetForTesting(Name* name, Map* map) {
     return PrimaryOffset(name, map);
diff --git a/src/ic/x64/access-compiler-x64.cc b/src/ic/x64/access-compiler-x64.cc
index 9e95b95..4bbbba5 100644
--- a/src/ic/x64/access-compiler-x64.cc
+++ b/src/ic/x64/access-compiler-x64.cc
@@ -5,6 +5,7 @@
 #if V8_TARGET_ARCH_X64
 
 #include "src/ic/access-compiler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc
index 36acccc..425ed47 100644
--- a/src/ic/x64/handler-compiler-x64.cc
+++ b/src/ic/x64/handler-compiler-x64.cc
@@ -44,16 +44,6 @@
   __ addp(rsp, Immediate(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ Push(tmp);
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ Pop(tmp);
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -93,30 +83,12 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
 }
 
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+         Runtime::FunctionForId(id)->nargs);
 
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ movp(result,
-          FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register result, Register scratch,
-    Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, result, miss_label);
-  if (!result.is(rax)) __ movp(rax, result);
-  __ ret(0);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -124,15 +96,7 @@
   __ Push(name);
   __ Push(receiver);
   __ Push(holder);
-}
 
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm, Register receiver, Register holder, Register name,
-    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
-  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
-         Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallRuntime(id);
 }
 
@@ -348,59 +312,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ Move(this->name(), name);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ movl(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
-    __ andl(scratch, Immediate(Map::Deprecated::kMask));
-    __ j(not_zero, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ movp(scratch,
-          FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmpp(value_reg, scratch);
-  __ j(not_equal, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    Label do_store;
-    __ movp(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ j(not_equal, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -533,13 +444,6 @@
   }
 }
 
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(rax, value);
-  __ ret(0);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -606,10 +510,26 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  __ PopReturnAddressTo(scratch2());
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
-  __ PushReturnAddressFrom(scratch2());
+
+  // Stack:
+  //   return address
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(receiver());
+  __ Push(holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot());
+    __ Push(vector());
+  } else {
+    __ Push(scratch3());  // slot
+    __ Push(scratch2());  // vector
+  }
+  __ Push(Operand(rsp, 4 * kPointerSize));  // return address
+  __ movp(Operand(rsp, 5 * kPointerSize), name());
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/x64/ic-compiler-x64.cc b/src/ic/x64/ic-compiler-x64.cc
deleted file mode 100644
index 9d73433..0000000
--- a/src/ic/x64/ic-compiler-x64.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  // Return address is on the stack.
-  DCHECK(!rbx.is(StoreDescriptor::ReceiverRegister()) &&
-         !rbx.is(StoreDescriptor::NameRegister()) &&
-         !rbx.is(StoreDescriptor::ValueRegister()));
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(StoreDescriptor::ReceiverRegister());
-  __ Push(StoreDescriptor::NameRegister());
-  __ Push(StoreDescriptor::ValueRegister());
-  __ Push(Smi::FromInt(language_mode));
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
index a916e22..3b87bc9 100644
--- a/src/ic/x64/ic-x64.cc
+++ b/src/ic/x64/ic-x64.cc
@@ -6,530 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not an internalized string,
-// and will jump to the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
-                                   Register elements, Register name,
-                                   Register r0, Register r1, Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // r0   - used to hold the capacity of the property dictionary.
-  //
-  // r1   - used to hold the index into the property dictionary.
-  //
-  // result - holds the result on exit if the load succeeded.
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r1 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ Test(Operand(elements, r1, times_pointer_size,
-                  kDetailsOffset - kHeapObjectTag),
-          Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ j(not_zero, miss_label);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ movp(result, Operand(elements, r1, times_pointer_size,
-                          kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property even though it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not an internalized string, and will jump to the miss_label
-// in that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
-                                    Register elements, Register name,
-                                    Register value, Register scratch0,
-                                    Register scratch1) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is clobbered.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // value - holds the value to store and is unchanged.
-  //
-  // scratch0 - used during the positive dictionary lookup and is clobbered.
-  //
-  // scratch1 - used for index into the property dictionary and is clobbered.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(
-      masm, miss_label, &done, elements, name, scratch0, scratch1);
-
-  // If probing finds an entry in the dictionary, scratch0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property that is not read only.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      PropertyDetails::TypeField::kMask |
-      PropertyDetails::AttributesField::encode(READ_ONLY);
-  __ Test(Operand(elements, scratch1, times_pointer_size,
-                  kDetailsOffset - kHeapObjectTag),
-          Smi::FromInt(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label);
-
-  // Store the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ leap(scratch1, Operand(elements, scratch1, times_pointer_size,
-                            kValueOffset - kHeapObjectTag));
-  __ movp(Operand(scratch1, 0), value);
-
-  // Update write barrier. Make sure not to clobber the value.
-  __ movp(scratch0, value);
-  __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  DCHECK(receiver.is(rdx));
-  DCHECK(key.is(rcx));
-  DCHECK(value.is(rax));
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  // rbx: receiver's elements array (a FixedArray)
-  // receiver is a JSArray.
-  // r9: map of receiver
-  if (check_map == kCheckMap) {
-    __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
-    __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
-    __ j(not_equal, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ movp(kScratchRegister,
-          FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize));
-  __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-  __ j(not_equal, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ leal(rdi, Operand(key, 1));
-    __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
-          value);
-  __ ret(0);
-
-  __ bind(&non_smi_value);
-  // Writing a non-smi, check whether array allows non-smi elements.
-  // r9: receiver's map
-  __ CheckFastObjectElements(r9, &transition_smi_elements);
-
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ leal(rdi, Operand(key, 1));
-    __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
-  }
-  __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
-          value);
-  __ movp(rdx, value);  // Preserve the value which is returned.
-  __ RecordWriteArray(rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ ret(0);
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    // rdi: elements array's map
-    __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-    __ j(not_equal, slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmpl(FieldOperand(rbx, key, times_8, offset), Immediate(kHoleNanUpper32));
-  __ j(not_equal, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, rbx, key, kScratchDoubleReg,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ leal(rdi, Operand(key, 1));
-    __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
-  }
-  __ ret(0);
-
-  __ bind(&transition_smi_elements);
-  __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ movp(r9, FieldOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS, rbx, rdi, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   rbx, mode, slow);
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx,
-                                         rdi, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, rbx, mode, slow);
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         rbx, rdi, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
-                                                      value, rbx, mode, slow);
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // Return address is on the stack.
-  Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
-  DCHECK(receiver.is(rdx));
-  DCHECK(key.is(rcx));
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow_with_tagged_index);
-  // Get the map from the receiver.
-  __ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ testb(FieldOperand(r9, Map::kBitFieldOffset),
-           Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &slow_with_tagged_index);
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  __ SmiToInteger32(key, key);
-
-  __ CmpInstanceType(r9, JS_ARRAY_TYPE);
-  __ j(equal, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ CmpInstanceType(r9, JS_OBJECT_TYPE);
-  __ j(below, &slow);
-
-  // Object case: Check key against length in the elements array.
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds.
-  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
-  // rbx: FixedArray
-  __ j(above, &fast_object);
-
-  // Slow case: call runtime.
-  __ bind(&slow);
-  __ Integer32ToSmi(key, key);
-  __ bind(&slow_with_tagged_index);
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ movp(r9, FieldOperand(key, HeapObject::kMapOffset));
-  __ movzxbp(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
-
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ Move(vector, dummy_vector);
-  __ Move(slot, Smi::FromInt(slot_index));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r9,
-                                                     no_reg);
-  // Cache miss.
-  __ jmp(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // receiver is a JSArray.
-  // rbx: receiver's elements array (a FixedArray)
-  // flags: smicompare (receiver.length(), rbx)
-  __ j(not_equal, &slow);  // do not leave holes in the array
-  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
-  __ j(below_equal, &slow);
-  // Increment index to get new length.
-  __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
-  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  // rdi: elements array's map
-  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-  __ j(not_equal, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  // receiver is a JSArray.
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array, compute the
-  // address to store into and fall through to fast case.
-  __ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key);
-  __ j(below_equal, &extra);
-
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
-                                      kCheckMap, kDontIncrementLength);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = rax;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ movp(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
-                                   JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), rbx, rdi, rax);
-  __ ret(0);
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  LoadIC::GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  DCHECK(!rdi.is(receiver) && !rdi.is(name) && !rdi.is(slot) &&
-         !rdi.is(vector));
-
-  __ PopReturnAddressTo(rdi);
-  __ Push(receiver);
-  __ Push(name);
-  __ Push(slot);
-  __ Push(vector);
-  __ PushReturnAddressFrom(rdi);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is on the stack.
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_load_miss(), 1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-
-  DCHECK(!rbx.is(receiver) && !rbx.is(name));
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(receiver);
-  __ Push(name);
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_keyed_load_miss(), 1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-
-  DCHECK(!rbx.is(receiver) && !rbx.is(name));
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(receiver);
-  __ Push(name);
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
-  Register name = StoreWithVectorDescriptor::NameRegister();
-  Register value = StoreWithVectorDescriptor::ValueRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register temp = r11;
-  DCHECK(!AreAliased(receiver, name, value, slot, vector, temp));
-
-  __ PopReturnAddressTo(temp);
-  __ Push(value);
-  __ Push(slot);
-  __ Push(vector);
-  __ Push(receiver);
-  __ Push(name);
-  __ PushReturnAddressFrom(temp);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = r11;
-  DCHECK(!AreAliased(dictionary, StoreWithVectorDescriptor::VectorRegister(),
-                     StoreWithVectorDescriptor::SlotRegister()));
-
-  Label miss;
-
-  __ movp(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1);
-  __ ret(0);
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
@@ -580,9 +62,7 @@
   // condition code uses at the patched jump.
   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(test_instruction_address), delta);
+    LOG(isolate, PatchIC(address, test_instruction_address, delta));
   }
 
   // Patch with a short conditional jump. Enabling means switching from a short
diff --git a/src/ic/x64/stub-cache-x64.cc b/src/ic/x64/stub-cache-x64.cc
deleted file mode 100644
index 946aee5..0000000
--- a/src/ic/x64/stub-cache-x64.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset) {
-  // We need to scale up the pointer by 2 when the offset is scaled by less
-  // than the pointer size.
-  DCHECK(kPointerSize == kInt64Size
-             ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
-             : kPointerSizeLog2 == StubCache::kCacheIndexShift);
-  ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
-
-  DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry));
-  // The offset register holds the entry offset times four (due to masking
-  // and shifting optimizations).
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  Label miss;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ leap(offset, Operand(offset, offset, times_2, 0));
-
-  __ LoadAddress(kScratchRegister, key_offset);
-
-  // Check that the key in the entry matches the name.
-  __ cmpp(name, Operand(kScratchRegister, offset, scale_factor, 0));
-  __ j(not_equal, &miss);
-
-  // Get the map entry from the cache.
-  // Use key_offset + kPointerSize * 2, rather than loading map_offset.
-  DCHECK(stub_cache->map_reference(table).address() -
-             stub_cache->key_reference(table).address() ==
-         kPointerSize * 2);
-  __ movp(kScratchRegister,
-          Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
-  __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ j(not_equal, &miss);
-
-  // Get the code entry from the cache.
-  __ LoadAddress(kScratchRegister, value_offset);
-  __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ jmp(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ jmp(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ jmp(kScratchRegister);
-
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-  USE(extra);   // The register extra is not used on the X64 platform.
-  USE(extra2);  // The register extra2 is not used on the X64 platform.
-  USE(extra3);  // The register extra2 is not used on the X64 platform.
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 3 * kPointerSize.
-  DCHECK(sizeof(Entry) == 3 * kPointerSize);
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!scratch.is(receiver));
-  DCHECK(!scratch.is(name));
-
-  // Check scratch register is valid, extra and extra2 are unused.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(extra2.is(no_reg));
-  DCHECK(extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch doesn't conflict with
-  // the vector and slot registers, which need to be preserved for a handler
-  // call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    if (ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC) {
-      Register vector = LoadWithVectorDescriptor::VectorRegister();
-      Register slot = LoadDescriptor::SlotRegister();
-      DCHECK(!AreAliased(vector, slot, scratch));
-    } else {
-      DCHECK(ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC);
-      Register vector = StoreWithVectorDescriptor::VectorRegister();
-      Register slot = StoreWithVectorDescriptor::SlotRegister();
-      DCHECK(!AreAliased(vector, slot, scratch));
-    }
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
-  // Use only the low 32 bits of the map pointer.
-  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xorp(scratch, Immediate(kPrimaryMagic));
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
-  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xorp(scratch, Immediate(kPrimaryMagic));
-  __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
-  __ subl(scratch, name);
-  __ addl(scratch, Immediate(kSecondaryMagic));
-  __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/ic/x87/OWNERS b/src/ic/x87/OWNERS
index dd9998b..61245ae 100644
--- a/src/ic/x87/OWNERS
+++ b/src/ic/x87/OWNERS
@@ -1 +1,2 @@
 weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
index a5c32d3..5a61eee 100644
--- a/src/ic/x87/handler-compiler-x87.cc
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -83,16 +83,6 @@
   __ add(esp, Immediate(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ push(tmp);
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ pop(tmp);
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -132,27 +122,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadGlobalFunction(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ mov(result,
-         FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  // TODO(mvstanton): This isn't used on ia32. Move all the other
-  // platform implementations into a code stub so this method can be removed.
-  UNREACHABLE();
-}
-
-
 // Generate call to api function.
 // This function uses push() to generate smaller, faster code than
 // the version above. It is an optimization that should will be removed
@@ -324,10 +293,12 @@
   }
 }
 
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+         Runtime::FunctionForId(id)->nargs);
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -335,15 +306,7 @@
   __ push(name);
   __ push(receiver);
   __ push(holder);
-}
 
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm, Register receiver, Register holder, Register name,
-    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
-  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
-         Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallRuntime(id);
 }
 
@@ -359,58 +322,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Immediate(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
-    __ and_(scratch, Immediate(Map::Deprecated::kMask));
-    __ j(not_zero, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ mov(scratch,
-         FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmp(value_reg, scratch);
-  __ j(not_equal, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ j(not_equal, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -540,14 +451,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ LoadObject(eax, value);
-  __ ret(0);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -620,10 +523,26 @@
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   // Call the runtime system to load the interceptor.
-  __ pop(scratch2());  // save old return address
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
-  __ push(scratch2());  // restore old return address
+
+  // Stack:
+  //   return address
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ push(receiver());
+  __ push(holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ push(slot());
+    __ push(vector());
+  } else {
+    __ push(scratch3());  // slot
+    __ push(scratch2());  // vector
+  }
+  __ push(Operand(esp, 4 * kPointerSize));  // return address
+  __ mov(Operand(esp, 5 * kPointerSize), name());
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/x87/ic-compiler-x87.cc b/src/ic/x87/ic-compiler-x87.cc
deleted file mode 100644
index 11a8cdc..0000000
--- a/src/ic/x87/ic-compiler-x87.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
-  // ----------- S t a t e -------------
-  //  -- esp[12] : value
-  //  -- esp[8]  : slot
-  //  -- esp[4]  : vector
-  //  -- esp[0]  : return address
-  // -----------------------------------
-  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
-                                        Descriptor::kValue);
-
-  __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
-  __ mov(Operand(esp, 8), Descriptor::NameRegister());
-  __ mov(Operand(esp, 4), Descriptor::ValueRegister());
-  __ pop(ebx);
-  __ push(Immediate(Smi::FromInt(language_mode)));
-  __ push(ebx);  // return address
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_X87
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index f96e509..7564c00 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -6,532 +6,11 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
-                                   Register elements, Register name,
-                                   Register r0, Register r1, Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0   - used for the index into the property dictionary
-  //
-  // r1   - used to hold the capacity of the property dictionary.
-  //
-  // result - holds the result on exit.
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ j(not_zero, miss_label);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
-                                    Register elements, Register name,
-                                    Register value, Register r0, Register r1) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is clobbered.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // value - holds the value to store and is unchanged.
-  //
-  // r0 - used for index into the property dictionary and is clobbered.
-  //
-  // r1 - used to hold the capacity of the property dictionary and is clobbered.
-  Label done;
-
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property that is not read only.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY))
-      << kSmiTagSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label);
-
-  // Store the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-  __ mov(Operand(r0, 0), value);
-
-  // Update write barrier. Make sure not to clobber the value.
-  __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-  DCHECK(value.is(eax));
-  // key is a smi.
-  // ebx: FixedArray receiver->elements
-  // edi: receiver map
-  // Fast case: Do the store, could either Object or double.
-  __ bind(fast_object);
-  if (check_map == kCheckMap) {
-    __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-    __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-    __ j(not_equal, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ cmp(FixedArrayElementOperand(ebx, key),
-         masm->isolate()->factory()->the_hole_value());
-  __ j(not_equal, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ mov(FixedArrayElementOperand(ebx, key), value);
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ CheckFastObjectElements(edi, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ mov(FixedArrayElementOperand(ebx, key), value);
-  // Update write barrier for the elements array address.
-  __ mov(edx, value);  // Preserve the value which is returned.
-  __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-    __ j(not_equal, slow);
-    // If the value is a number, store it as a double in the FastDoubleElements
-    // array.
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(not_equal, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, ebx, key, edi,
-                                 &transition_double_elements, false);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&transition_smi_elements);
-  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
-              &non_double_value, DONT_DO_SMI_CHECK);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
-  // and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
-                                         edi, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         ebx, edi, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
-                                                      value, ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  // Return address is on the stack.
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-  Register receiver = Descriptor::ReceiverRegister();
-  Register key = Descriptor::NameRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map from the receiver.
-  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &slow);
-
-  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
-                                        Descriptor::kValue);
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
-  __ j(equal, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ CmpInstanceType(edi, JS_OBJECT_TYPE);
-  __ j(below, &slow);
-
-  // Object case: Check key against length in the elements array.
-  // Key is a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(below, &fast_object);
-
-  // Slow case: call runtime.
-  __ bind(&slow);
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
-                                                     no_reg);
-
-  // Cache miss.
-  __ jmp(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // receiver is a JSArray.
-  // key is a smi.
-  // ebx: receiver->elements, a FixedArray
-  // edi: receiver map
-  // flags: compare (key, receiver.length())
-  // do not leave holes in the array:
-  __ j(not_equal, &slow);
-  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow);
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-  __ j(not_equal, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-  __ j(not_equal, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  // receiver is a JSArray.
-  // key is a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array and fall through to the
-  // common store code.
-  __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset));  // Compare smis.
-  __ j(above_equal, &extra);
-
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
-                                      kCheckMap, kDontIncrementLength);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = eax;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
-                                  JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), edi, ebx, eax);
-  __ ret(0);
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
-         !edi.is(vector));
-
-  __ pop(edi);
-  __ push(receiver);
-  __ push(name);
-  __ push(slot);
-  __ push(vector);
-  __ push(edi);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
-  Register name = StoreWithVectorDescriptor::NameRegister();
-
-  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-  // Current stack layout:
-  // - esp[12]   -- value
-  // - esp[8]    -- slot
-  // - esp[4]    -- vector
-  // - esp[0]    -- return address
-
-  Register return_address = StoreWithVectorDescriptor::SlotRegister();
-  __ pop(return_address);
-  __ push(receiver);
-  __ push(name);
-  __ push(return_address);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  Label restore_miss;
-  Register receiver = Descriptor::ReceiverRegister();
-  Register name = Descriptor::NameRegister();
-  Register value = Descriptor::ValueRegister();
-  // Since the slot and vector values are passed on the stack we can use
-  // respective registers as scratch registers.
-  Register scratch1 = Descriptor::VectorRegister();
-  Register scratch2 = Descriptor::SlotRegister();
-
-  __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
-
-  // A lot of registers are needed for storing to slow case objects.
-  // Push and restore receiver but rely on GenerateDictionaryStore preserving
-  // the value and name.
-  __ push(receiver);
-
-  Register dictionary = receiver;
-  __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
-                          scratch1, scratch2);
-  __ Drop(1);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1);
-  __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&restore_miss);
-  __ pop(receiver);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
@@ -582,9 +61,7 @@
   // condition code uses at the patched jump.
   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(test_instruction_address), delta);
+    LOG(isolate, PatchIC(address, test_instruction_address, delta));
   }
 
   // Patch with a short conditional jump. Enabling means switching from a short
diff --git a/src/ic/x87/stub-cache-x87.cc b/src/ic/x87/stub-cache-x87.cc
deleted file mode 100644
index 68fa615..0000000
--- a/src/ic/x87/stub-cache-x87.cc
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register name, Register receiver,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register extra) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  Label miss;
-  Code::Kind ic_kind = stub_cache->ic_kind();
-  bool is_vector_store =
-      IC::ICUseVector(ic_kind) &&
-      (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ lea(offset, Operand(offset, offset, times_2, 0));
-
-  if (extra.is_valid()) {
-    // Get the code entry from the cache.
-    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    if (is_vector_store) {
-      // The value, vector and slot were passed to the IC on the stack and
-      // they are still there. So we can just jump to the handler.
-      DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
-      __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(extra);
-    } else {
-      // The vector and slot were pushed onto the stack before starting the
-      // probe, and need to be dropped before calling the handler.
-      __ pop(LoadWithVectorDescriptor::VectorRegister());
-      __ pop(LoadDescriptor::SlotRegister());
-      __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(extra);
-    }
-
-    __ bind(&miss);
-  } else {
-    DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
-    // Save the offset on the stack.
-    __ push(offset);
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-    // Restore offset register.
-    __ mov(offset, Operand(esp, 0));
-
-    // Get the code entry from the cache.
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    // Restore offset and re-load code entry from cache.
-    __ pop(offset);
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Jump to the first instruction in the code stub.
-    if (is_vector_store) {
-      DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
-    }
-    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(offset);
-
-    // Pop at miss.
-    __ bind(&miss);
-    __ pop(offset);
-  }
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Assert that code is valid.  The multiplying code relies on the entry size
-  // being 12.
-  DCHECK(sizeof(Entry) == 12);
-
-  // Assert that there are no register conflicts.
-  DCHECK(!scratch.is(receiver));
-  DCHECK(!scratch.is(name));
-  DCHECK(!extra.is(receiver));
-  DCHECK(!extra.is(name));
-  DCHECK(!extra.is(scratch));
-
-  // Assert scratch and extra registers are valid, and extra2/3 are unused.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(extra2.is(no_reg));
-  DCHECK(extra3.is(no_reg));
-
-  Register offset = scratch;
-  scratch = no_reg;
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, kPrimaryMagic);
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
-  // ProbeTable expects the offset to be pointer scaled, which it is, because
-  // the heap object tag size is 2 and the pointer size log 2 is also 2.
-  DCHECK(kCacheIndexShift == kPointerSizeLog2);
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, kPrimaryMagic);
-  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
-  __ sub(offset, name);
-  __ add(offset, Immediate(kSecondaryMagic));
-  __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_X87
diff --git a/src/identity-map.cc b/src/identity-map.cc
index 58dbf6b..9fee8b9 100644
--- a/src/identity-map.cc
+++ b/src/identity-map.cc
@@ -6,7 +6,6 @@
 
 #include "src/base/functional.h"
 #include "src/heap/heap-inl.h"
-#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -14,42 +13,45 @@
 static const int kInitialIdentityMapSize = 4;
 static const int kResizeFactor = 4;
 
-IdentityMapBase::~IdentityMapBase() { Clear(); }
+IdentityMapBase::~IdentityMapBase() {
+  // Clear must be called by the subclass to avoid calling the virtual
+  // DeleteArray function from the destructor.
+  DCHECK_NULL(keys_);
+}
 
 void IdentityMapBase::Clear() {
   if (keys_) {
+    DCHECK(!is_iterable());
     heap_->UnregisterStrongRoots(keys_);
+    DeleteArray(keys_);
+    DeleteArray(values_);
     keys_ = nullptr;
     values_ = nullptr;
     size_ = 0;
+    capacity_ = 0;
     mask_ = 0;
   }
 }
 
-IdentityMapBase::RawEntry IdentityMapBase::Lookup(Object* key) {
-  int index = LookupIndex(key);
-  return index >= 0 ? &values_[index] : nullptr;
+void IdentityMapBase::EnableIteration() {
+  CHECK(!is_iterable());
+  is_iterable_ = true;
 }
 
+void IdentityMapBase::DisableIteration() {
+  CHECK(is_iterable());
+  is_iterable_ = false;
 
-IdentityMapBase::RawEntry IdentityMapBase::Insert(Object* key) {
-  int index = InsertIndex(key);
-  DCHECK_GE(index, 0);
-  return &values_[index];
+  // We might need to resize due to iterator deletion - do this now.
+  if (size_ * kResizeFactor < capacity_ / kResizeFactor) {
+    Resize(capacity_ / kResizeFactor);
+  }
 }
 
-
-int IdentityMapBase::Hash(Object* address) {
-  CHECK_NE(address, heap_->not_mapped_symbol());
-  uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
-  return static_cast<int>(hasher_(raw_address));
-}
-
-
-int IdentityMapBase::LookupIndex(Object* address) {
+int IdentityMapBase::ScanKeysFor(Object* address) const {
   int start = Hash(address) & mask_;
   Object* not_mapped = heap_->not_mapped_symbol();
-  for (int index = start; index < size_; index++) {
+  for (int index = start; index < capacity_; index++) {
     if (keys_[index] == address) return index;  // Found.
     if (keys_[index] == not_mapped) return -1;  // Not found.
   }
@@ -60,12 +62,11 @@
   return -1;
 }
 
-
-int IdentityMapBase::InsertIndex(Object* address) {
+int IdentityMapBase::InsertKey(Object* address) {
   Object* not_mapped = heap_->not_mapped_symbol();
   while (true) {
     int start = Hash(address) & mask_;
-    int limit = size_ / 2;
+    int limit = capacity_ / 2;
     // Search up to {limit} entries.
     for (int index = start; --limit > 0; index = (index + 1) & mask_) {
       if (keys_[index] == address) return index;  // Found.
@@ -74,72 +75,162 @@
         return index;
       }
     }
-    Resize();  // Should only have to resize once, since we grow 4x.
+    // Should only have to resize once, since we grow 4x.
+    Resize(capacity_ * kResizeFactor);
   }
   UNREACHABLE();
   return -1;
 }
 
+void* IdentityMapBase::DeleteIndex(int index) {
+  void* ret_value = values_[index];
+  Object* not_mapped = heap_->not_mapped_symbol();
+  DCHECK_NE(keys_[index], not_mapped);
+  keys_[index] = not_mapped;
+  values_[index] = nullptr;
+  size_--;
+  DCHECK_GE(size_, 0);
+
+  if (!is_iterable() && (size_ * kResizeFactor < capacity_ / kResizeFactor)) {
+    Resize(capacity_ / kResizeFactor);
+    return ret_value;  // No need to fix collisions as resize reinserts keys.
+  }
+
+  // Move any collisions to their new correct location.
+  int next_index = index;
+  for (;;) {
+    next_index = (next_index + 1) & mask_;
+    Object* key = keys_[next_index];
+    if (key == not_mapped) break;
+
+    int expected_index = Hash(key) & mask_;
+    if (index < next_index) {
+      if (index < expected_index && expected_index <= next_index) continue;
+    } else {
+      DCHECK_GT(index, next_index);
+      if (index < expected_index || expected_index <= next_index) continue;
+    }
+    DCHECK_EQ(not_mapped, keys_[index]);
+    DCHECK_NULL(values_[index]);
+    std::swap(keys_[index], keys_[next_index]);
+    std::swap(values_[index], values_[next_index]);
+    index = next_index;
+  }
+
+  return ret_value;
+}
+
+int IdentityMapBase::Lookup(Object* key) const {
+  int index = ScanKeysFor(key);
+  if (index < 0 && gc_counter_ != heap_->gc_count()) {
+    // Miss; rehash if there was a GC, then lookup again.
+    const_cast<IdentityMapBase*>(this)->Rehash();
+    index = ScanKeysFor(key);
+  }
+  return index;
+}
+
+int IdentityMapBase::LookupOrInsert(Object* key) {
+  // Perform an optimistic lookup.
+  int index = ScanKeysFor(key);
+  if (index < 0) {
+    // Miss; rehash if there was a GC, then insert.
+    if (gc_counter_ != heap_->gc_count()) Rehash();
+    index = InsertKey(key);
+    size_++;
+    DCHECK_LE(size_, capacity_);
+  }
+  DCHECK_GE(index, 0);
+  return index;
+}
+
+int IdentityMapBase::Hash(Object* address) const {
+  CHECK_NE(address, heap_->not_mapped_symbol());
+  uintptr_t raw_address = reinterpret_cast<uintptr_t>(address);
+  return static_cast<int>(hasher_(raw_address));
+}
 
 // Searches this map for the given key using the object's address
 // as the identity, returning:
 //    found => a pointer to the storage location for the value
 //    not found => a pointer to a new storage location for the value
 IdentityMapBase::RawEntry IdentityMapBase::GetEntry(Object* key) {
-  RawEntry result;
-  if (size_ == 0) {
+  CHECK(!is_iterable());  // Don't allow insertion while iterable.
+  if (capacity_ == 0) {
     // Allocate the initial storage for keys and values.
-    size_ = kInitialIdentityMapSize;
+    capacity_ = kInitialIdentityMapSize;
     mask_ = kInitialIdentityMapSize - 1;
     gc_counter_ = heap_->gc_count();
 
-    keys_ = zone_->NewArray<Object*>(size_);
+    keys_ = reinterpret_cast<Object**>(NewPointerArray(capacity_));
     Object* not_mapped = heap_->not_mapped_symbol();
-    for (int i = 0; i < size_; i++) keys_[i] = not_mapped;
-    values_ = zone_->NewArray<void*>(size_);
-    memset(values_, 0, sizeof(void*) * size_);
+    for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
+    values_ = NewPointerArray(capacity_);
+    memset(values_, 0, sizeof(void*) * capacity_);
 
-    heap_->RegisterStrongRoots(keys_, keys_ + size_);
-    result = Insert(key);
-  } else {
-    // Perform an optimistic lookup.
-    result = Lookup(key);
-    if (result == nullptr) {
-      // Miss; rehash if there was a GC, then insert.
-      if (gc_counter_ != heap_->gc_count()) Rehash();
-      result = Insert(key);
-    }
+    heap_->RegisterStrongRoots(keys_, keys_ + capacity_);
   }
-  return result;
+  int index = LookupOrInsert(key);
+  return &values_[index];
 }
 
-
 // Searches this map for the given key using the object's address
 // as the identity, returning:
 //    found => a pointer to the storage location for the value
 //    not found => {nullptr}
-IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Object* key) {
+IdentityMapBase::RawEntry IdentityMapBase::FindEntry(Object* key) const {
+  // Don't allow find by key while iterable (might rehash).
+  CHECK(!is_iterable());
   if (size_ == 0) return nullptr;
-
-  RawEntry result = Lookup(key);
-  if (result == nullptr && gc_counter_ != heap_->gc_count()) {
-    Rehash();  // Rehash is expensive, so only do it in case of a miss.
-    result = Lookup(key);
-  }
-  return result;
+  // Remove constness since lookup might have to rehash.
+  int index = Lookup(key);
+  return index >= 0 ? &values_[index] : nullptr;
 }
 
+// Deletes the given key from the map using the object's address as the
+// identity, returning:
+//    found => the value
+//    not found => {nullptr}
+void* IdentityMapBase::DeleteEntry(Object* key) {
+  CHECK(!is_iterable());  // Don't allow deletion by key while iterable.
+  if (size_ == 0) return nullptr;
+  int index = Lookup(key);
+  if (index < 0) return nullptr;  // No entry found.
+  return DeleteIndex(index);
+}
+
+IdentityMapBase::RawEntry IdentityMapBase::EntryAtIndex(int index) const {
+  DCHECK_LE(0, index);
+  DCHECK_LT(index, capacity_);
+  DCHECK_NE(keys_[index], heap_->not_mapped_symbol());
+  CHECK(is_iterable());  // Must be iterable to access by index;
+  return &values_[index];
+}
+
+int IdentityMapBase::NextIndex(int index) const {
+  DCHECK_LE(-1, index);
+  DCHECK_LE(index, capacity_);
+  CHECK(is_iterable());  // Must be iterable to access by index;
+  Object* not_mapped = heap_->not_mapped_symbol();
+  for (index++; index < capacity_; index++) {
+    if (keys_[index] != not_mapped) {
+      return index;
+    }
+  }
+  return capacity_;
+}
 
 void IdentityMapBase::Rehash() {
+  CHECK(!is_iterable());  // Can't rehash while iterating.
   // Record the current GC counter.
   gc_counter_ = heap_->gc_count();
   // Assume that most objects won't be moved.
-  ZoneVector<std::pair<Object*, void*>> reinsert(zone_);
+  std::vector<std::pair<Object*, void*>> reinsert;
   // Search the table looking for keys that wouldn't be found with their
   // current hashcode and evacuate them.
   int last_empty = -1;
   Object* not_mapped = heap_->not_mapped_symbol();
-  for (int i = 0; i < size_; i++) {
+  for (int i = 0; i < capacity_; i++) {
     if (keys_[i] == not_mapped) {
       last_empty = i;
     } else {
@@ -155,42 +246,45 @@
   }
   // Reinsert all the key/value pairs that were in the wrong place.
   for (auto pair : reinsert) {
-    int index = InsertIndex(pair.first);
+    int index = InsertKey(pair.first);
     DCHECK_GE(index, 0);
-    DCHECK_NE(heap_->not_mapped_symbol(), values_[index]);
     values_[index] = pair.second;
   }
 }
 
-
-void IdentityMapBase::Resize() {
-  // Grow the internal storage and reinsert all the key/value pairs.
-  int old_size = size_;
+void IdentityMapBase::Resize(int new_capacity) {
+  CHECK(!is_iterable());  // Can't resize while iterating.
+  // Resize the internal storage and reinsert all the key/value pairs.
+  DCHECK_GT(new_capacity, size_);
+  int old_capacity = capacity_;
   Object** old_keys = keys_;
   void** old_values = values_;
 
-  size_ = size_ * kResizeFactor;
-  mask_ = size_ - 1;
+  capacity_ = new_capacity;
+  mask_ = capacity_ - 1;
   gc_counter_ = heap_->gc_count();
 
-  CHECK_LE(size_, (1024 * 1024 * 16));  // that would be extreme...
-
-  keys_ = zone_->NewArray<Object*>(size_);
+  keys_ = reinterpret_cast<Object**>(NewPointerArray(capacity_));
   Object* not_mapped = heap_->not_mapped_symbol();
-  for (int i = 0; i < size_; i++) keys_[i] = not_mapped;
-  values_ = zone_->NewArray<void*>(size_);
-  memset(values_, 0, sizeof(void*) * size_);
+  for (int i = 0; i < capacity_; i++) keys_[i] = not_mapped;
+  values_ = NewPointerArray(capacity_);
+  memset(values_, 0, sizeof(void*) * capacity_);
 
-  for (int i = 0; i < old_size; i++) {
+  for (int i = 0; i < old_capacity; i++) {
     if (old_keys[i] == not_mapped) continue;
-    int index = InsertIndex(old_keys[i]);
+    int index = InsertKey(old_keys[i]);
     DCHECK_GE(index, 0);
     values_[index] = old_values[i];
   }
 
   // Unregister old keys and register new keys.
   heap_->UnregisterStrongRoots(old_keys);
-  heap_->RegisterStrongRoots(keys_, keys_ + size_);
+  heap_->RegisterStrongRoots(keys_, keys_ + capacity_);
+
+  // Delete old storage;
+  DeleteArray(old_keys);
+  DeleteArray(old_values);
 }
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/identity-map.h b/src/identity-map.h
index ad2a260..5fa223a 100644
--- a/src/identity-map.h
+++ b/src/identity-map.h
@@ -13,11 +13,16 @@
 
 // Forward declarations.
 class Heap;
-class Zone;
 
 // Base class of identity maps contains shared code for all template
 // instantions.
 class IdentityMapBase {
+ public:
+  bool empty() const { return size_ == 0; }
+  int size() const { return size_; }
+  int capacity() const { return capacity_; }
+  bool is_iterable() const { return is_iterable_; }
+
  protected:
   // Allow Tester to access internals, including changing the address of objects
   // within the {keys_} array in order to simulate a moving GC.
@@ -25,51 +30,68 @@
 
   typedef void** RawEntry;
 
-  IdentityMapBase(Heap* heap, Zone* zone)
+  explicit IdentityMapBase(Heap* heap)
       : heap_(heap),
-        zone_(zone),
         gc_counter_(-1),
         size_(0),
+        capacity_(0),
         mask_(0),
         keys_(nullptr),
-        values_(nullptr) {}
-  ~IdentityMapBase();
+        values_(nullptr),
+        is_iterable_(false) {}
+  virtual ~IdentityMapBase();
 
   RawEntry GetEntry(Object* key);
-  RawEntry FindEntry(Object* key);
+  RawEntry FindEntry(Object* key) const;
+  void* DeleteEntry(Object* key);
+  void* DeleteIndex(int index);
   void Clear();
 
+  V8_EXPORT_PRIVATE RawEntry EntryAtIndex(int index) const;
+  V8_EXPORT_PRIVATE int NextIndex(int index) const;
+
+  void EnableIteration();
+  void DisableIteration();
+
+  virtual void** NewPointerArray(size_t length) = 0;
+  virtual void DeleteArray(void* array) = 0;
+
  private:
   // Internal implementation should not be called directly by subclasses.
-  int LookupIndex(Object* address);
-  int InsertIndex(Object* address);
+  int ScanKeysFor(Object* address) const;
+  int InsertKey(Object* address);
+  int Lookup(Object* key) const;
+  int LookupOrInsert(Object* key);
   void Rehash();
-  void Resize();
-  RawEntry Lookup(Object* key);
-  RawEntry Insert(Object* key);
-  int Hash(Object* address);
+  void Resize(int new_capacity);
+  int Hash(Object* address) const;
 
   base::hash<uintptr_t> hasher_;
   Heap* heap_;
-  Zone* zone_;
   int gc_counter_;
   int size_;
+  int capacity_;
   int mask_;
   Object** keys_;
   void** values_;
+  bool is_iterable_;
+
+  DISALLOW_COPY_AND_ASSIGN(IdentityMapBase);
 };
 
 // Implements an identity map from object addresses to a given value type {V}.
 // The map is robust w.r.t. garbage collection by synchronization with the
 // supplied {heap}.
 //  * Keys are treated as strong roots.
-//  * SMIs are valid keys, except SMI #0.
 //  * The value type {V} must be reinterpret_cast'able to {void*}
 //  * The value type {V} must not be a heap type.
-template <typename V>
+template <typename V, class AllocationPolicy>
 class IdentityMap : public IdentityMapBase {
  public:
-  IdentityMap(Heap* heap, Zone* zone) : IdentityMapBase(heap, zone) {}
+  explicit IdentityMap(Heap* heap,
+                       AllocationPolicy allocator = AllocationPolicy())
+      : IdentityMapBase(heap), allocator_(allocator) {}
+  ~IdentityMap() override { Clear(); };
 
   // Searches this map for the given key using the object's address
   // as the identity, returning:
@@ -82,16 +104,77 @@
   // as the identity, returning:
   //    found => a pointer to the storage location for the value
   //    not found => {nullptr}
-  V* Find(Handle<Object> key) { return Find(*key); }
-  V* Find(Object* key) { return reinterpret_cast<V*>(FindEntry(key)); }
+  V* Find(Handle<Object> key) const { return Find(*key); }
+  V* Find(Object* key) const { return reinterpret_cast<V*>(FindEntry(key)); }
 
   // Set the value for the given key.
   void Set(Handle<Object> key, V v) { Set(*key, v); }
   void Set(Object* key, V v) { *(reinterpret_cast<V*>(GetEntry(key))) = v; }
 
+  V Delete(Handle<Object> key) { return Delete(*key); }
+  V Delete(Object* key) { return reinterpret_cast<V>(DeleteEntry(key)); }
+
   // Removes all elements from the map.
   void Clear() { IdentityMapBase::Clear(); }
+
+  // Iterator over IdentityMap. The IteratableScope used to create this Iterator
+  // must be live for the duration of the iteration.
+  class Iterator {
+   public:
+    Iterator& operator++() {
+      index_ = map_->NextIndex(index_);
+      return *this;
+    }
+
+    Iterator& DeleteAndIncrement() {
+      map_->DeleteIndex(index_);
+      index_ = map_->NextIndex(index_);
+      return *this;
+    }
+
+    V* operator*() { return reinterpret_cast<V*>(map_->EntryAtIndex(index_)); }
+    V* operator->() { return reinterpret_cast<V*>(map_->EntryAtIndex(index_)); }
+    bool operator!=(const Iterator& other) { return index_ != other.index_; }
+
+   private:
+    Iterator(IdentityMap* map, int index) : map_(map), index_(index) {}
+
+    IdentityMap* map_;
+    int index_;
+
+    friend class IdentityMap;
+  };
+
+  class IteratableScope {
+   public:
+    explicit IteratableScope(IdentityMap* map) : map_(map) {
+      CHECK(!map_->is_iterable());
+      map_->EnableIteration();
+    }
+    ~IteratableScope() {
+      CHECK(map_->is_iterable());
+      map_->DisableIteration();
+    }
+
+    Iterator begin() { return Iterator(map_, map_->NextIndex(-1)); }
+    Iterator end() { return Iterator(map_, map_->capacity()); }
+
+   private:
+    IdentityMap* map_;
+    DISALLOW_COPY_AND_ASSIGN(IteratableScope);
+  };
+
+ protected:
+  void** NewPointerArray(size_t length) override {
+    return static_cast<void**>(allocator_.New(sizeof(void*) * length));
+  }
+  void DeleteArray(void* array) override { allocator_.Delete(array); }
+
+ private:
+  AllocationPolicy allocator_;
+  DISALLOW_COPY_AND_ASSIGN(IdentityMap);
 };
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/inspector/BUILD.gn b/src/inspector/BUILD.gn
index 6ebb91c..e6742c0 100644
--- a/src/inspector/BUILD.gn
+++ b/src/inspector/BUILD.gn
@@ -140,7 +140,6 @@
     "inspected-context.h",
     "java-script-call-frame.cc",
     "java-script-call-frame.h",
-    "protocol-platform.h",
     "remote-object-id.cc",
     "remote-object-id.h",
     "script-breakpoint.h",
@@ -150,6 +149,8 @@
     "string-16.h",
     "string-util.cc",
     "string-util.h",
+    "test-interface.cc",
+    "test-interface.h",
     "v8-console-agent-impl.cc",
     "v8-console-agent-impl.h",
     "v8-console-message.cc",
@@ -186,5 +187,7 @@
     "v8-stack-trace-impl.h",
     "v8-value-copier.cc",
     "v8-value-copier.h",
+    "wasm-translation.cc",
+    "wasm-translation.h",
   ]
 }
diff --git a/src/inspector/DEPS b/src/inspector/DEPS
index d49c6a6..2d77fb7 100644
--- a/src/inspector/DEPS
+++ b/src/inspector/DEPS
@@ -1,11 +1,13 @@
 include_rules = [
   "-src",
+  "-include/v8-debug.h",
   "+src/base/atomicops.h",
   "+src/base/macros.h",
   "+src/base/logging.h",
   "+src/base/platform/platform.h",
+  "+src/conversions.h",
   "+src/inspector",
   "+src/tracing",
-  "-include/v8-debug.h",
   "+src/debug/debug-interface.h",
+  "+src/debug/interface-types.h",
 ]
diff --git a/src/inspector/debugger-script.js b/src/inspector/debugger-script.js
index 1614566..d9cb12a 100644
--- a/src/inspector/debugger-script.js
+++ b/src/inspector/debugger-script.js
@@ -33,17 +33,6 @@
 
 var DebuggerScript = {};
 
-/**
- * @param {?CompileEvent} eventData
- */
-DebuggerScript.getAfterCompileScript = function(eventData)
-{
-    var script = eventData.script().value();
-    if (!script.is_debugger_script)
-        return script;
-    return null;
-}
-
 /** @type {!Map<!ScopeType, string>} */
 DebuggerScript._scopeTypeNames = new Map();
 DebuggerScript._scopeTypeNames.set(ScopeType.Global, "global");
@@ -53,6 +42,8 @@
 DebuggerScript._scopeTypeNames.set(ScopeType.Catch, "catch");
 DebuggerScript._scopeTypeNames.set(ScopeType.Block, "block");
 DebuggerScript._scopeTypeNames.set(ScopeType.Script, "script");
+DebuggerScript._scopeTypeNames.set(ScopeType.Eval, "eval");
+DebuggerScript._scopeTypeNames.set(ScopeType.Module, "module");
 
 /**
  * @param {function()} fun
@@ -83,60 +74,31 @@
 }
 
 /**
- * @param {Object} object
- * @return {?RawLocation}
+ * @param {Object} gen
+ * @return {?Array<!Scope>}
  */
-DebuggerScript.getGeneratorObjectLocation = function(object)
+DebuggerScript.getGeneratorScopes = function(gen)
 {
-    var mirror = MakeMirror(object, true /* transient */);
+    var mirror = MakeMirror(gen);
     if (!mirror.isGenerator())
         return null;
     var generatorMirror = /** @type {!GeneratorMirror} */(mirror);
-    var funcMirror = generatorMirror.func();
-    if (!funcMirror.resolved())
+    var count = generatorMirror.scopeCount();
+    if (count == 0)
         return null;
-    var location = generatorMirror.sourceLocation() || funcMirror.sourceLocation();
-    var script = funcMirror.script();
-    if (script && location) {
-        return {
-            scriptId: "" + script.id(),
-            lineNumber: location.line,
-            columnNumber: location.column
-        };
+    var result = [];
+    for (var i = 0; i < count; i++) {
+        var scopeDetails = generatorMirror.scope(i).details();
+        var scopeObject = DebuggerScript._buildScopeObject(scopeDetails.type(), scopeDetails.object());
+        if (!scopeObject)
+            continue;
+        result.push({
+            type: /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeDetails.type())),
+            object: scopeObject,
+            name: scopeDetails.name() || ""
+        });
     }
-    return null;
-}
-
-/**
- * @param {Object} object
- * @return {!Array<!{value: *}>|undefined}
- */
-DebuggerScript.getCollectionEntries = function(object)
-{
-    var mirror = MakeMirror(object, true /* transient */);
-    if (mirror.isMap())
-        return /** @type {!MapMirror} */(mirror).entries();
-    if (mirror.isSet() || mirror.isIterator()) {
-        var result = [];
-        var values = mirror.isSet() ? /** @type {!SetMirror} */(mirror).values() : /** @type {!IteratorMirror} */(mirror).preview();
-        for (var i = 0; i < values.length; ++i)
-            result.push({ value: values[i] });
-        return result;
-    }
-}
-
-/**
- * @param {string|undefined} contextData
- * @return {number}
- */
-DebuggerScript._executionContextId = function(contextData)
-{
-    if (!contextData)
-        return 0;
-    var match = contextData.match(/^[^,]*,([^,]*),.*$/);
-    if (!match)
-        return 0;
-    return parseInt(match[1], 10) || 0;
+    return result;
 }
 
 /**
@@ -146,7 +108,7 @@
  */
 DebuggerScript.setBreakpoint = function(execState, info)
 {
-    var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.Statement);
+    var breakId = Debug.setScriptBreakPointById(info.sourceID, info.lineNumber, info.columnNumber, info.condition, undefined, Debug.BreakPositionAlignment.BreakPosition);
     var locations = Debug.findBreakPointActualLocations(breakId);
     if (!locations.length)
         return undefined;
@@ -225,20 +187,10 @@
 }
 
 /**
- * @param {!ExecutionState} execState
- * @param {!{enabled: boolean}} info
+ * @param {!Array<!BreakPoint>|undefined} breakpoints
  */
-DebuggerScript.setBreakpointsActivated = function(execState, info)
+DebuggerScript.getBreakpointNumbers = function(breakpoints)
 {
-    Debug.debuggerFlags().breakPointsActive.setValue(info.enabled);
-}
-
-/**
- * @param {!BreakEvent} eventData
- */
-DebuggerScript.getBreakpointNumbers = function(eventData)
-{
-    var breakpoints = eventData.breakPointsHit();
     var numbers = [];
     if (!breakpoints)
         return numbers;
@@ -386,8 +338,8 @@
             details = {
                 "functionName": ensureFuncMirror().debugName(),
                 "location": {
-                    "lineNumber": line(),
-                    "columnNumber": column(),
+                    "lineNumber": ensureLocation().line,
+                    "columnNumber": ensureLocation().column,
                     "scriptId": String(script.id())
                 },
                 "this": thisObject,
@@ -448,50 +400,23 @@
     /**
      * @return {number}
      */
-    function line()
-    {
-        return ensureLocation().line;
-    }
-
-    /**
-     * @return {number}
-     */
-    function column()
-    {
-        return ensureLocation().column;
-    }
-
-    /**
-     * @return {number}
-     */
     function contextId()
     {
         var mirror = ensureFuncMirror();
-        // Old V8 do not have context() function on these objects
-        if (!mirror.context)
-            return DebuggerScript._executionContextId(mirror.script().value().context_data);
         var context = mirror.context();
-        if (context)
-            return DebuggerScript._executionContextId(context.data());
+        if (context && context.data())
+            return Number(context.data());
         return 0;
     }
 
     /**
-     * @return {number}
-     */
-    function sourceID()
-    {
-        var script = ensureScriptMirror();
-        return script.id();
-    }
-
-    /**
      * @param {string} expression
+     * @param {boolean} throwOnSideEffect
      * @return {*}
      */
-    function evaluate(expression)
+    function evaluate(expression, throwOnSideEffect)
     {
-        return frameMirror.evaluate(expression, false).value();
+        return frameMirror.evaluate(expression, throwOnSideEffect).value();
     }
 
     /** @return {undefined} */
@@ -514,9 +439,6 @@
     }
 
     return {
-        "sourceID": sourceID,
-        "line": line,
-        "column": column,
         "contextId": contextId,
         "thisObject": thisObject,
         "evaluate": evaluate,
@@ -541,15 +463,21 @@
     case ScopeType.Catch:
     case ScopeType.Block:
     case ScopeType.Script:
+    case ScopeType.Eval:
+    case ScopeType.Module:
         // For transient objects we create a "persistent" copy that contains
         // the same properties.
         // Reset scope object prototype to null so that the proto properties
         // don't appear in the local scope section.
-        var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject, true /* transient */)).properties();
+        var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject)).properties();
         // Almost always Script scope will be empty, so just filter out that noise.
-        // Also drop empty Block scopes, should we get any.
-        if (!properties.length && (scopeType === ScopeType.Script || scopeType === ScopeType.Block))
+        // Also drop empty Block, Eval and Script scopes, should we get any.
+        if (!properties.length && (scopeType === ScopeType.Script ||
+                                   scopeType === ScopeType.Block ||
+                                   scopeType === ScopeType.Eval ||
+                                   scopeType === ScopeType.Module)) {
             break;
+        }
         result = { __proto__: null };
         for (var j = 0; j < properties.length; j++) {
             var name = properties[j].name();
@@ -566,8 +494,5 @@
     return result;
 }
 
-// We never resolve Mirror by its handle so to avoid memory leaks caused by Mirrors in the cache we disable it.
-ToggleMirrorCache(false);
-
 return DebuggerScript;
 })();
diff --git a/src/inspector/debugger_script_externs.js b/src/inspector/debugger_script_externs.js
index cc152d5..6f36fb9 100644
--- a/src/inspector/debugger_script_externs.js
+++ b/src/inspector/debugger_script_externs.js
@@ -19,21 +19,6 @@
 var RawLocation;
 
 /** @typedef {{
-        id: number,
-        name: string,
-        sourceURL: (string|undefined),
-        sourceMappingURL: (string|undefined),
-        source: string,
-        startLine: number,
-        endLine: number,
-        startColumn: number,
-        endColumn: number,
-        executionContextId: number,
-        executionContextAuxData: string
-    }} */
-var FormattedScript;
-
-/** @typedef {{
         functionName: string,
         location: !RawLocation,
         this: !Object,
@@ -44,11 +29,9 @@
 var JavaScriptCallFrameDetails;
 
 /** @typedef {{
-        sourceID: function():(number),
-        line: function():number,
-        column: function():number,
+        contextId: function():number,
         thisObject: !Object,
-        evaluate: function(string):*,
+        evaluate: function(string, boolean):*,
         restart: function():undefined,
         setVariableValue: function(number, string, *):undefined,
         isAtReturn: boolean,
@@ -89,10 +72,6 @@
  */
 Debug.findBreakPoint = function(breakId, remove) {}
 
-/** @return {!DebuggerFlags} */
-Debug.debuggerFlags = function() {}
-
-
 /** @enum */
 const BreakPositionAlignment = {
     Statement: 0,
@@ -100,32 +79,6 @@
 };
 Debug.BreakPositionAlignment = BreakPositionAlignment;
 
-/** @enum */
-Debug.StepAction = { StepOut: 0,
-                     StepNext: 1,
-                     StepIn: 2,
-                     StepFrame: 3 };
-
-/** @enum */
-const ScriptCompilationType = { Host: 0,
-                              Eval: 1,
-                              JSON: 2 };
-Debug.ScriptCompilationType = ScriptCompilationType;
-
-
-/** @interface */
-function DebuggerFlag() {}
-
-/** @param {boolean} value */
-DebuggerFlag.prototype.setValue = function(value) {}
-
-
-/** @typedef {{
- *    breakPointsActive: !DebuggerFlag
- *  }}
- */
-var DebuggerFlags;
-
 /** @const */
 var LiveEdit = {}
 
@@ -174,28 +127,12 @@
 
 
 /** @interface */
-function CompileEvent() {}
-
-/** @return {!ScriptMirror} */
-CompileEvent.prototype.script = function() {}
-
-
-/** @interface */
-function BreakEvent() {}
-
-/** @return {!Array<!BreakPoint>|undefined} */
-BreakEvent.prototype.breakPointsHit = function() {}
-
-
-/** @interface */
 function ExecutionState() {}
 
 /**
  * @param {string} source
- * @param {boolean} disableBreak
- * @param {*=} additionalContext
  */
-ExecutionState.prototype.evaluateGlobal = function(source, disableBreak, additionalContext) {}
+ExecutionState.prototype.evaluateGlobal = function(source) {}
 
 /** @return {number} */
 ExecutionState.prototype.frameCount = function() {}
@@ -220,7 +157,9 @@
                   Closure: 3,
                   Catch: 4,
                   Block: 5,
-                  Script: 6 };
+                  Script: 6,
+                  Eval: 7,
+                  Module: 8};
 
 
 /** @typedef {{
@@ -237,14 +176,6 @@
 /** @typedef{{
  *    id: number,
  *    context_data: (string|undefined),
- *    source_url: (string|undefined),
- *    source_mapping_url: (string|undefined),
- *    is_debugger_script: boolean,
- *    source: string,
- *    line_offset: number,
- *    column_offset: number,
- *    nameOrSourceURL: function():string,
- *    compilationType: function():!ScriptCompilationType,
  *    }}
  */
 var Script;
@@ -286,16 +217,11 @@
 /** @return {number} */
 FrameDetails.prototype.scopeCount = function() {}
 
-
-/** @param {boolean} value */
-function ToggleMirrorCache(value) {}
-
 /**
  * @param {*} value
- * @param {boolean=} transient
  * @return {!Mirror}
  */
-function MakeMirror(value, transient) {}
+function MakeMirror(value) {}
 
 
 /** @interface */
@@ -307,16 +233,6 @@
 /** @return {boolean} */
 Mirror.prototype.isGenerator = function() {}
 
-/** @return {boolean} */
-Mirror.prototype.isMap = function() {}
-
-/** @return {boolean} */
-Mirror.prototype.isSet = function() {}
-
-/** @return {boolean} */
-Mirror.prototype.isIterator = function() {}
-
-
 /**
  * @interface
  * @extends {Mirror}
@@ -366,60 +282,20 @@
  */
 function UnresolvedFunctionMirror(value) {}
 
-
-/**
- * @interface
- * @extends {ObjectMirror}
- */
-function MapMirror () {}
-
-/**
- * @param {number=} limit
- * @return {!Array<!{key: *, value: *}>}
- */
-MapMirror.prototype.entries = function(limit) {}
-
-
-/**
- * @interface
- * @extends {ObjectMirror}
- */
-function SetMirror () {}
-
-/**
- * @param {number=} limit
- * @return {!Array<*>}
- */
-SetMirror.prototype.values = function(limit) {}
-
-
-/**
- * @interface
- * @extends {ObjectMirror}
- */
-function IteratorMirror () {}
-
-/**
- * @param {number=} limit
- * @return {!Array<*>}
- */
-IteratorMirror.prototype.preview = function(limit) {}
-
-
 /**
  * @interface
  * @extends {ObjectMirror}
  */
 function GeneratorMirror () {}
 
-/** @return {string} */
-GeneratorMirror.prototype.status = function() {}
+/** @return {number} */
+GeneratorMirror.prototype.scopeCount = function() {}
 
-/** @return {!SourceLocation|undefined} */
-GeneratorMirror.prototype.sourceLocation = function() {}
-
-/** @return {!FunctionMirror} */
-GeneratorMirror.prototype.func = function() {}
+/**
+ * @param {number} index
+ * @return {!ScopeMirror|undefined}
+ */
+GeneratorMirror.prototype.scope = function(index) {}
 
 
 /**
@@ -457,9 +333,9 @@
 
 /**
  * @param {string} source
- * @param {boolean} disableBreak
+ * @param {boolean} throwOnSideEffect
  */
-FrameMirror.prototype.evaluate = function(source, disableBreak) {}
+FrameMirror.prototype.evaluate = function(source, throwOnSideEffect) {}
 
 FrameMirror.prototype.restart = function() {}
 
diff --git a/src/inspector/injected-script-native.cc b/src/inspector/injected-script-native.cc
index fcf2ead..5d0136b 100644
--- a/src/inspector/injected-script-native.cc
+++ b/src/inspector/injected-script-native.cc
@@ -44,8 +44,8 @@
                                const String16& groupName) {
   if (m_lastBoundObjectId <= 0) m_lastBoundObjectId = 1;
   int id = m_lastBoundObjectId++;
-  m_idToWrappedObject[id] =
-      wrapUnique(new v8::Global<v8::Value>(m_isolate, value));
+  m_idToWrappedObject.insert(
+      std::make_pair(id, v8::Global<v8::Value>(m_isolate, value)));
   addObjectToGroup(id, groupName);
   return id;
 }
@@ -57,7 +57,7 @@
 
 v8::Local<v8::Value> InjectedScriptNative::objectForId(int id) {
   auto iter = m_idToWrappedObject.find(id);
-  return iter != m_idToWrappedObject.end() ? iter->second->Get(m_isolate)
+  return iter != m_idToWrappedObject.end() ? iter->second.Get(m_isolate)
                                            : v8::Local<v8::Value>();
 }
 
diff --git a/src/inspector/injected-script-native.h b/src/inspector/injected-script-native.h
index 3bdf247..c0b9301 100644
--- a/src/inspector/injected-script-native.h
+++ b/src/inspector/injected-script-native.h
@@ -34,8 +34,7 @@
 
   int m_lastBoundObjectId;
   v8::Isolate* m_isolate;
-  protocol::HashMap<int, std::unique_ptr<v8::Global<v8::Value>>>
-      m_idToWrappedObject;
+  protocol::HashMap<int, v8::Global<v8::Value>> m_idToWrappedObject;
   typedef protocol::HashMap<int, String16> IdToObjectGroupName;
   IdToObjectGroupName m_idToObjectGroupName;
   typedef protocol::HashMap<String16, std::vector<int>> NameToObjectGroup;
diff --git a/src/inspector/injected-script-source.js b/src/inspector/injected-script-source.js
index f3c8d6b..a828b76 100644
--- a/src/inspector/injected-script-source.js
+++ b/src/inspector/injected-script-source.js
@@ -157,11 +157,11 @@
  * @type {!Object<string, !Object<string, boolean>>}
  * @const
  */
-var domAttributesWithObservableSideEffectOnGet = nullifyObjectProto({});
-domAttributesWithObservableSideEffectOnGet["Request"] = nullifyObjectProto({});
-domAttributesWithObservableSideEffectOnGet["Request"]["body"] = true;
-domAttributesWithObservableSideEffectOnGet["Response"] = nullifyObjectProto({});
-domAttributesWithObservableSideEffectOnGet["Response"]["body"] = true;
+var domAttributesWithObservableSideEffectOnGet = {
+    Request: { body: true, __proto__: null },
+    Response: { body: true, __proto__: null },
+    __proto__: null
+}
 
 /**
  * @param {!Object} object
@@ -186,6 +186,7 @@
 var InjectedScript = function()
 {
 }
+InjectedScriptHost.nullifyPrototype(InjectedScript);
 
 /**
  * @type {!Object.<string, boolean>}
@@ -211,6 +212,8 @@
 InjectedScript.closureTypes["script"] = "Script";
 InjectedScript.closureTypes["with"] = "With Block";
 InjectedScript.closureTypes["global"] = "Global";
+InjectedScript.closureTypes["eval"] = "Eval";
+InjectedScript.closureTypes["module"] = "Module";
 
 InjectedScript.prototype = {
     /**
@@ -617,7 +620,13 @@
         var className = InjectedScriptHost.internalConstructorName(obj);
         if (subtype === "array" || subtype === "typedarray") {
             if (typeof obj.length === "number")
-                className += "[" + obj.length + "]";
+                return className + "(" + obj.length + ")";
+            return className;
+        }
+
+        if (subtype === "map" || subtype === "set") {
+            if (typeof obj.size === "number")
+                return className + "(" + obj.size + ")";
             return className;
         }
 
@@ -929,17 +938,16 @@
             if (!descriptor.isOwn)
                 continue;
 
-            // Ignore computed properties.
-            if (!("value" in descriptor))
+            // Ignore computed properties unless they have getters.
+            if (!("value" in descriptor)) {
+                if (descriptor.get)
+                    this._appendPropertyPreview(preview, { name: name, type: "accessor", __proto__: null }, propertiesThreshold);
                 continue;
+            }
 
             var value = descriptor.value;
             var type = typeof value;
 
-            // Never render functions in object preview.
-            if (type === "function" && (this.subtype !== "array" || !isUInt32(name)))
-                continue;
-
             // Special-case HTMLAll.
             if (type === "undefined" && injectedScript._isHTMLAllCollection(value))
                 type = "object";
diff --git a/src/inspector/injected-script.cc b/src/inspector/injected-script.cc
index d605227..9d9c327 100644
--- a/src/inspector/injected-script.cc
+++ b/src/inspector/injected-script.cc
@@ -105,9 +105,9 @@
   if (inspector->getContext(contextGroupId, contextId) != inspectedContext)
     return nullptr;
   if (!injectedScriptValue->IsObject()) return nullptr;
-  return wrapUnique(new InjectedScript(inspectedContext,
-                                       injectedScriptValue.As<v8::Object>(),
-                                       std::move(injectedScriptNative)));
+  return std::unique_ptr<InjectedScript>(
+      new InjectedScript(inspectedContext, injectedScriptValue.As<v8::Object>(),
+                         std::move(injectedScriptNative)));
 }
 
 InjectedScript::InjectedScript(
@@ -150,7 +150,7 @@
   if (!response.isSuccess()) return response;
   protocol::ErrorSupport errors;
   std::unique_ptr<Array<PropertyDescriptor>> result =
-      Array<PropertyDescriptor>::parse(protocolValue.get(), &errors);
+      Array<PropertyDescriptor>::fromValue(protocolValue.get(), &errors);
   if (errors.hasErrors()) return Response::Error(errors.errors());
   *properties = std::move(result);
   return Response::OK();
@@ -158,7 +158,7 @@
 
 void InjectedScript::releaseObject(const String16& objectId) {
   std::unique_ptr<protocol::Value> parsedObjectId =
-      protocol::parseJSON(objectId);
+      protocol::StringUtil::parseJSON(objectId);
   if (!parsedObjectId) return;
   protocol::DictionaryValue* object =
       protocol::DictionaryValue::cast(parsedObjectId.get());
@@ -184,7 +184,7 @@
   if (!response.isSuccess()) return response;
 
   *result =
-      protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
+      protocol::Runtime::RemoteObject::fromValue(protocolValue.get(), &errors);
   if (!result->get()) return Response::Error(errors.errors());
   return Response::OK();
 }
@@ -260,7 +260,8 @@
   Response response = toProtocolValue(context, r, &protocolValue);
   if (!response.isSuccess()) return nullptr;
   protocol::ErrorSupport errors;
-  return protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
+  return protocol::Runtime::RemoteObject::fromValue(protocolValue.get(),
+                                                    &errors);
 }
 
 Response InjectedScript::findObject(const RemoteObjectId& objectId,
@@ -317,7 +318,7 @@
   if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
     String16 value =
         callArgument->hasValue()
-            ? callArgument->getValue(nullptr)->toJSONString()
+            ? callArgument->getValue(nullptr)->serialize()
             : "Number(\"" + callArgument->getUnserializableValue("") + "\")";
     if (!m_context->inspector()
              ->compileAndRunInternalScript(
@@ -418,7 +419,7 @@
       m_handleScope(inspector->isolate()),
       m_tryCatch(inspector->isolate()),
       m_ignoreExceptionsAndMuteConsole(false),
-      m_previousPauseOnExceptionsState(v8::DebugInterface::NoBreakOnException),
+      m_previousPauseOnExceptionsState(v8::debug::NoBreakOnException),
       m_userGesture(false) {}
 
 Response InjectedScript::Scope::initialize() {
@@ -448,14 +449,13 @@
   m_inspector->client()->muteMetrics(m_contextGroupId);
   m_inspector->muteExceptions(m_contextGroupId);
   m_previousPauseOnExceptionsState =
-      setPauseOnExceptionsState(v8::DebugInterface::NoBreakOnException);
+      setPauseOnExceptionsState(v8::debug::NoBreakOnException);
 }
 
-v8::DebugInterface::ExceptionBreakState
-InjectedScript::Scope::setPauseOnExceptionsState(
-    v8::DebugInterface::ExceptionBreakState newState) {
+v8::debug::ExceptionBreakState InjectedScript::Scope::setPauseOnExceptionsState(
+    v8::debug::ExceptionBreakState newState) {
   if (!m_inspector->debugger()->enabled()) return newState;
-  v8::DebugInterface::ExceptionBreakState presentState =
+  v8::debug::ExceptionBreakState presentState =
       m_inspector->debugger()->getPauseOnExceptionsState();
   if (presentState != newState)
     m_inspector->debugger()->setPauseOnExceptionsState(newState);
diff --git a/src/inspector/injected-script.h b/src/inspector/injected-script.h
index 6500f4d..9e6680a 100644
--- a/src/inspector/injected-script.h
+++ b/src/inspector/injected-script.h
@@ -120,15 +120,15 @@
 
    private:
     void cleanup();
-    v8::DebugInterface::ExceptionBreakState setPauseOnExceptionsState(
-        v8::DebugInterface::ExceptionBreakState);
+    v8::debug::ExceptionBreakState setPauseOnExceptionsState(
+        v8::debug::ExceptionBreakState);
 
     v8::HandleScope m_handleScope;
     v8::TryCatch m_tryCatch;
     v8::Local<v8::Context> m_context;
     std::unique_ptr<V8Console::CommandLineAPIScope> m_commandLineAPIScope;
     bool m_ignoreExceptionsAndMuteConsole;
-    v8::DebugInterface::ExceptionBreakState m_previousPauseOnExceptionsState;
+    v8::debug::ExceptionBreakState m_previousPauseOnExceptionsState;
     bool m_userGesture;
   };
 
diff --git a/src/inspector/injected_script_externs.js b/src/inspector/injected_script_externs.js
index b6339c6..14b14e6 100644
--- a/src/inspector/injected_script_externs.js
+++ b/src/inspector/injected_script_externs.js
@@ -9,6 +9,11 @@
 
 /**
  * @param {*} obj
+ */
+InjectedScriptHostClass.prototype.nullifyPrototype = function(obj) {}
+
+/**
+ * @param {*} obj
  * @return {string}
  */
 InjectedScriptHostClass.prototype.internalConstructorName = function(obj) {}
diff --git a/src/inspector/inspected-context.cc b/src/inspector/inspected-context.cc
index dab3bba..6d9f51e 100644
--- a/src/inspector/inspected-context.cc
+++ b/src/inspector/inspected-context.cc
@@ -41,10 +41,12 @@
       m_humanReadableName(toString16(info.humanReadableName)),
       m_auxData(toString16(info.auxData)),
       m_reported(false) {
+  v8::Isolate* isolate = m_inspector->isolate();
+  info.context->SetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex),
+                                v8::Int32::New(isolate, contextId));
   m_context.SetWeak(&m_context, &clearContext,
                     v8::WeakCallbackType::kParameter);
 
-  v8::Isolate* isolate = m_inspector->isolate();
   v8::Local<v8::Object> global = info.context->Global();
   v8::Local<v8::Object> console =
       V8Console::createConsole(this, info.hasMemoryOnConsole);
@@ -65,6 +67,14 @@
   }
 }
 
+// static
+int InspectedContext::contextId(v8::Local<v8::Context> context) {
+  v8::Local<v8::Value> data =
+      context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
+  if (data.IsEmpty() || !data->IsInt32()) return 0;
+  return static_cast<int>(data.As<v8::Int32>()->Value());
+}
+
 v8::Local<v8::Context> InspectedContext::context() const {
   return m_context.Get(isolate());
 }
diff --git a/src/inspector/inspected-context.h b/src/inspector/inspected-context.h
index f31eb76..f8d97e9 100644
--- a/src/inspector/inspected-context.h
+++ b/src/inspector/inspected-context.h
@@ -21,6 +21,8 @@
  public:
   ~InspectedContext();
 
+  static int contextId(v8::Local<v8::Context>);
+
   v8::Local<v8::Context> context() const;
   int contextId() const { return m_contextId; }
   int contextGroupId() const { return m_contextGroupId; }
diff --git a/src/inspector/inspector.gyp b/src/inspector/inspector.gyp
index c70722f..91507bd 100644
--- a/src/inspector/inspector.gyp
+++ b/src/inspector/inspector.gyp
@@ -13,6 +13,13 @@
   'targets': [
     { 'target_name': 'inspector_injected_script',
       'type': 'none',
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }]
+      ],
       'actions': [
         {
           'action_name': 'convert_js_to_cpp_char_array',
@@ -37,6 +44,13 @@
     },
     { 'target_name': 'inspector_debugger_script',
       'type': 'none',
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }]
+      ],
       'actions': [
         {
           'action_name': 'convert_js_to_cpp_char_array',
@@ -61,6 +75,13 @@
     },
     { 'target_name': 'protocol_compatibility',
       'type': 'none',
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }]
+      ],
       'actions': [
         {
           'action_name': 'protocol_compatibility',
@@ -83,6 +104,13 @@
     { 'target_name': 'protocol_generated_sources',
       'type': 'none',
       'dependencies': [ 'protocol_compatibility' ],
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }]
+      ],
       'actions': [
         {
           'action_name': 'protocol_generated_sources',
diff --git a/src/inspector/inspector.gypi b/src/inspector/inspector.gypi
index 863c038..8aff49d 100644
--- a/src/inspector/inspector.gypi
+++ b/src/inspector/inspector.gypi
@@ -44,7 +44,6 @@
       'inspector/inspected-context.h',
       'inspector/java-script-call-frame.cc',
       'inspector/java-script-call-frame.h',
-      'inspector/protocol-platform.h',
       'inspector/remote-object-id.cc',
       'inspector/remote-object-id.h',
       'inspector/script-breakpoint.h',
@@ -54,6 +53,8 @@
       'inspector/string-16.h',
       'inspector/string-util.cc',
       'inspector/string-util.h',
+      'inspector/test-interface.cc',
+      'inspector/test-interface.h',
       'inspector/v8-console.cc',
       'inspector/v8-console.h',
       'inspector/v8-console-agent-impl.cc',
@@ -90,6 +91,8 @@
       'inspector/v8-stack-trace-impl.h',
       'inspector/v8-value-copier.cc',
       'inspector/v8-value-copier.h',
+      'inspector/wasm-translation.cc',
+      'inspector/wasm-translation.h',
     ]
   }
 }
diff --git a/src/inspector/inspector_protocol_config.json b/src/inspector/inspector_protocol_config.json
index cb9e669..22e2cf5 100644
--- a/src/inspector/inspector_protocol_config.json
+++ b/src/inspector/inspector_protocol_config.json
@@ -3,7 +3,31 @@
         "path": "js_protocol.json",
         "package": "src/inspector/protocol",
         "output": "protocol",
-        "namespace": ["v8_inspector", "protocol"]
+        "namespace": ["v8_inspector", "protocol"],
+        "options": [
+            {
+                "domain": "Schema",
+                "exported": ["Domain"]
+            },
+            {
+                "domain": "Runtime",
+                "async": ["evaluate", "awaitPromise", "callFunctionOn", "runScript"],
+                "exported": ["StackTrace", "RemoteObject"]
+            },
+            {
+                "domain": "Debugger",
+                "exported": ["SearchMatch", "paused.reason"]
+            },
+            {
+                "domain": "Console"
+            },
+            {
+                "domain": "Profiler"
+            },
+            {
+                "domain": "HeapProfiler"
+            }
+        ]
     },
 
     "exported": {
@@ -19,7 +43,6 @@
     "lib": {
         "package": "src/inspector/protocol",
         "output": "protocol",
-        "string_header": "src/inspector/string-util.h",
-        "platform_header": "src/inspector/protocol-platform.h"
+        "string_header": "src/inspector/string-util.h"
     }
 }
diff --git a/src/inspector/java-script-call-frame.cc b/src/inspector/java-script-call-frame.cc
index 2da4f04..9847944 100644
--- a/src/inspector/java-script-call-frame.cc
+++ b/src/inspector/java-script-call-frame.cc
@@ -61,18 +61,6 @@
   return result.As<v8::Int32>()->Value();
 }
 
-int JavaScriptCallFrame::sourceID() const {
-  return callV8FunctionReturnInt("sourceID");
-}
-
-int JavaScriptCallFrame::line() const {
-  return callV8FunctionReturnInt("line");
-}
-
-int JavaScriptCallFrame::column() const {
-  return callV8FunctionReturnInt("column");
-}
-
 int JavaScriptCallFrame::contextId() const {
   return callV8FunctionReturnInt("contextId");
 }
@@ -91,7 +79,7 @@
   return result.As<v8::Boolean>()->BooleanValue(context).FromMaybe(false);
 }
 
-v8::Local<v8::Object> JavaScriptCallFrame::details() const {
+v8::MaybeLocal<v8::Object> JavaScriptCallFrame::details() const {
   v8::MicrotasksScope microtasks(m_isolate,
                                  v8::MicrotasksScope::kDoNotRunMicrotasks);
   v8::Local<v8::Context> context =
@@ -101,12 +89,16 @@
   v8::Local<v8::Function> func = v8::Local<v8::Function>::Cast(
       callFrame->Get(context, toV8StringInternalized(m_isolate, "details"))
           .ToLocalChecked());
-  return v8::Local<v8::Object>::Cast(
-      func->Call(context, callFrame, 0, nullptr).ToLocalChecked());
+  v8::TryCatch try_catch(m_isolate);
+  v8::Local<v8::Value> details;
+  if (func->Call(context, callFrame, 0, nullptr).ToLocal(&details)) {
+    return v8::Local<v8::Object>::Cast(details);
+  }
+  return v8::MaybeLocal<v8::Object>();
 }
 
 v8::MaybeLocal<v8::Value> JavaScriptCallFrame::evaluate(
-    v8::Local<v8::Value> expression) {
+    v8::Local<v8::Value> expression, bool throwOnSideEffect) {
   v8::MicrotasksScope microtasks(m_isolate,
                                  v8::MicrotasksScope::kRunMicrotasks);
   v8::Local<v8::Context> context =
@@ -116,7 +108,9 @@
   v8::Local<v8::Function> evalFunction = v8::Local<v8::Function>::Cast(
       callFrame->Get(context, toV8StringInternalized(m_isolate, "evaluate"))
           .ToLocalChecked());
-  return evalFunction->Call(context, callFrame, 1, &expression);
+  v8::Local<v8::Value> argv[] = {
+      expression, v8::Boolean::New(m_isolate, throwOnSideEffect)};
+  return evalFunction->Call(context, callFrame, arraysize(argv), argv);
 }
 
 v8::MaybeLocal<v8::Value> JavaScriptCallFrame::restart() {
@@ -129,10 +123,11 @@
   v8::Local<v8::Function> restartFunction = v8::Local<v8::Function>::Cast(
       callFrame->Get(context, toV8StringInternalized(m_isolate, "restart"))
           .ToLocalChecked());
-  v8::DebugInterface::SetLiveEditEnabled(m_isolate, true);
+  v8::TryCatch try_catch(m_isolate);
+  v8::debug::SetLiveEditEnabled(m_isolate, true);
   v8::MaybeLocal<v8::Value> result = restartFunction->Call(
       m_debuggerContext.Get(m_isolate), callFrame, 0, nullptr);
-  v8::DebugInterface::SetLiveEditEnabled(m_isolate, false);
+  v8::debug::SetLiveEditEnabled(m_isolate, false);
   return result;
 }
 
@@ -154,6 +149,7 @@
   v8::Local<v8::Value> argv[] = {
       v8::Local<v8::Value>(v8::Integer::New(m_isolate, scopeNumber)),
       variableName, newValue};
+  v8::TryCatch try_catch(m_isolate);
   return setVariableValueFunction->Call(context, callFrame, arraysize(argv),
                                         argv);
 }
diff --git a/src/inspector/java-script-call-frame.h b/src/inspector/java-script-call-frame.h
index 5a4ce19..b3930c0 100644
--- a/src/inspector/java-script-call-frame.h
+++ b/src/inspector/java-script-call-frame.h
@@ -31,10 +31,10 @@
 #ifndef V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
 #define V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
 
+#include <memory>
 #include <vector>
 
 #include "src/base/macros.h"
-#include "src/inspector/protocol-platform.h"
 
 #include "include/v8.h"
 
@@ -44,19 +44,18 @@
  public:
   static std::unique_ptr<JavaScriptCallFrame> create(
       v8::Local<v8::Context> debuggerContext, v8::Local<v8::Object> callFrame) {
-    return wrapUnique(new JavaScriptCallFrame(debuggerContext, callFrame));
+    return std::unique_ptr<JavaScriptCallFrame>(
+        new JavaScriptCallFrame(debuggerContext, callFrame));
   }
   ~JavaScriptCallFrame();
 
-  int sourceID() const;
-  int line() const;
-  int column() const;
   int contextId() const;
 
   bool isAtReturn() const;
-  v8::Local<v8::Object> details() const;
+  v8::MaybeLocal<v8::Object> details() const;
 
-  v8::MaybeLocal<v8::Value> evaluate(v8::Local<v8::Value> expression);
+  v8::MaybeLocal<v8::Value> evaluate(v8::Local<v8::Value> expression,
+                                     bool throwOnSideEffect);
   v8::MaybeLocal<v8::Value> restart();
   v8::MaybeLocal<v8::Value> setVariableValue(int scopeNumber,
                                              v8::Local<v8::Value> variableName,
diff --git a/src/inspector/js_protocol.json b/src/inspector/js_protocol.json
index c1ac585..ef046a5 100644
--- a/src/inspector/js_protocol.json
+++ b/src/inspector/js_protocol.json
@@ -9,7 +9,6 @@
                 "id": "Domain",
                 "type": "object",
                 "description": "Description of the protocol domain.",
-                "exported": true,
                 "properties": [
                     { "name": "name", "type": "string", "description": "Domain name." },
                     { "name": "version", "type": "string", "description": "Domain version." }
@@ -51,7 +50,6 @@
                 "id": "RemoteObject",
                 "type": "object",
                 "description": "Mirror object referencing original JavaScript object.",
-                "exported": true,
                 "properties": [
                     { "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
                     { "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
@@ -200,18 +198,17 @@
                 "id": "StackTrace",
                 "type": "object",
                 "description": "Call frames for assertions or error messages.",
-                "exported": true,
                 "properties": [
                     { "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
                     { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
-                    { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." }
+                    { "name": "parent", "$ref": "StackTrace", "optional": true, "description": "Asynchronous JavaScript stack trace that preceded this stack, if available." },
+                    { "name": "promiseCreationFrame", "$ref": "CallFrame", "optional": true, "experimental": true, "description": "Creation frame of the Promise which produced the next synchronous trace when resolved, if available." }
                 ]
             }
         ],
         "commands": [
             {
                 "name": "evaluate",
-                "async": true,
                 "parameters": [
                     { "name": "expression", "type": "string", "description": "Expression to evaluate." },
                     { "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
@@ -231,7 +228,6 @@
             },
             {
                 "name": "awaitPromise",
-                "async": true,
                 "parameters": [
                     { "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
                     { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
@@ -245,7 +241,6 @@
             },
             {
                 "name": "callFunctionOn",
-                "async": true,
                 "parameters": [
                     { "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to call function on." },
                     { "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
@@ -333,7 +328,6 @@
             },
             {
                 "name": "runScript",
-                "async": true,
                 "parameters": [
                     { "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
                     { "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
@@ -390,7 +384,7 @@
                 "name": "consoleAPICalled",
                 "description": "Issued when console API was called.",
                 "parameters": [
-                    { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd"], "description": "Type of the call." },
+                    { "name": "type", "type": "string", "enum": ["log", "debug", "info", "error", "warning", "dir", "dirxml", "table", "trace", "clear", "startGroup", "startGroupCollapsed", "endGroup", "assert", "profile", "profileEnd", "count", "timeEnd"], "description": "Type of the call." },
                     { "name": "args", "type": "array", "items": { "$ref": "RemoteObject" }, "description": "Call arguments." },
                     { "name": "executionContextId", "$ref": "ExecutionContextId", "description": "Identifier of the context where the call was made." },
                     { "name": "timestamp", "$ref": "Timestamp", "description": "Call timestamp." },
@@ -460,7 +454,7 @@
                 "id": "Scope",
                 "type": "object",
                 "properties": [
-                    { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script"], "description": "Scope type." },
+                    { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script", "eval", "module"], "description": "Scope type." },
                     { "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
                     { "name": "name", "type": "string", "optional": true },
                     { "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
@@ -472,7 +466,6 @@
                 "id": "SearchMatch",
                 "type": "object",
                 "description": "Search match for resource.",
-                "exported": true,
                 "properties": [
                     { "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
                     { "name": "lineContent", "type": "string", "description": "Line with match content." }
@@ -642,7 +635,8 @@
                     { "name": "includeCommandLineAPI", "type": "boolean", "optional": true, "description": "Specifies whether command line API should be available to the evaluated expression, defaults to false." },
                     { "name": "silent", "type": "boolean", "optional": true, "description": "In silent mode exceptions thrown during evaluation are not reported and do not pause execution. Overrides <code>setPauseOnException</code> state." },
                     { "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
-                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." }
+                    { "name": "generatePreview", "type": "boolean", "optional": true, "experimental": true, "description": "Whether preview should be generated for the result." },
+                    { "name": "throwOnSideEffect", "type": "boolean", "optional": true, "experimental": true, "description": "Whether to throw an exception if side effect cannot be ruled out during evaluation." }
                 ],
                 "returns": [
                     { "name": "result", "$ref": "Runtime.RemoteObject", "description": "Object wrapper for the evaluation result." },
@@ -700,7 +694,8 @@
                     { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
                     { "name": "isLiveEdit", "type": "boolean", "optional": true, "description": "True, if this script is generated as a result of the live edit operation.", "experimental": true },
                     { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
-                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
+                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true },
+                    { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module.", "experimental": true }
                 ],
                 "description": "Fired when virtual machine parses script. This event is also fired for all known and uncollected scripts upon enabling debugger."
             },
@@ -717,7 +712,8 @@
                     { "name": "hash", "type": "string", "description": "Content hash of the script."},
                     { "name": "executionContextAuxData", "type": "object", "optional": true, "description": "Embedder-specific auxiliary data." },
                     { "name": "sourceMapURL", "type": "string", "optional": true, "description": "URL of source map associated with script (if any)." },
-                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true }
+                    { "name": "hasSourceURL", "type": "boolean", "optional": true, "description": "True, if this script has sourceURL.", "experimental": true },
+                    { "name": "isModule", "type": "boolean", "optional": true, "description": "True, if this script is ES6 module.", "experimental": true }
                 ],
                 "description": "Fired when virtual machine fails to parse the script."
             },
@@ -733,7 +729,7 @@
                 "name": "paused",
                 "parameters": [
                     { "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
-                    { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason.", "exported": true },
+                    { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "OOM", "other", "ambiguous" ], "description": "Pause reason." },
                     { "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
                     { "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
                     { "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
@@ -828,6 +824,38 @@
                     { "name": "line", "type": "integer", "description": "Source line number (1-based)." },
                     { "name": "ticks", "type": "integer", "description": "Number of samples attributed to the source line." }
                 ]
+            },
+            {   "id": "CoverageRange",
+                "type": "object",
+                "description": "Coverage data for a source range.",
+                "properties": [
+                    { "name": "startLineNumber", "type": "integer", "description": "JavaScript script line number (0-based) for the range start." },
+                    { "name": "startColumnNumber", "type": "integer", "description": "JavaScript script column number (0-based) for the range start." },
+                    { "name": "endLineNumber", "type": "integer", "description": "JavaScript script line number (0-based) for the range end." },
+                    { "name": "endColumnNumber", "type": "integer", "description": "JavaScript script column number (0-based) for the range end." },
+                    { "name": "count", "type": "integer", "description": "Collected execution count of the source range." }
+                ],
+                "experimental": true
+            },
+            {   "id": "FunctionCoverage",
+                "type": "object",
+                "description": "Coverage data for a JavaScript function.",
+                "properties": [
+                    { "name": "functionName", "type": "string", "description": "JavaScript function name." },
+                    { "name": "ranges", "type": "array", "items": { "$ref": "CoverageRange" }, "description": "Source ranges inside the function with coverage data." }
+                ],
+                "experimental": true
+            },
+            {
+                "id": "ScriptCoverage",
+                "type": "object",
+                "description": "Coverage data for a JavaScript script.",
+                "properties": [
+                    { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." },
+                    { "name": "url", "type": "string", "description": "JavaScript script name or url." },
+                    { "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." }
+                ],
+                "experimental": true
             }
         ],
         "commands": [
@@ -852,6 +880,32 @@
                 "returns": [
                     { "name": "profile", "$ref": "Profile", "description": "Recorded profile." }
                 ]
+            },
+            {
+                "name": "startPreciseCoverage",
+                "description": "Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code coverage may be incomplete. Enabling prevents running optimized code and resets execution counters.",
+                "experimental": true
+            },
+            {
+                "name": "stopPreciseCoverage",
+                "description": "Disable precise code coverage. Disabling releases unnecessary execution count records and allows executing optimized code.",
+                "experimental": true
+            },
+            {
+                "name": "takePreciseCoverage",
+                "returns": [
+                    { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
+                ],
+                "description": "Collect coverage data for the current isolate, and resets execution counters. Precise code coverage needs to have started.",
+                "experimental": true
+            },
+            {
+                "name": "getBestEffortCoverage",
+                "returns": [
+                    { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." }
+                ],
+                "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection.",
+                "experimental": true
             }
         ],
         "events": [
diff --git a/src/inspector/protocol-platform.h b/src/inspector/protocol-platform.h
deleted file mode 100644
index c772393..0000000
--- a/src/inspector/protocol-platform.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INSPECTOR_PROTOCOLPLATFORM_H_
-#define V8_INSPECTOR_PROTOCOLPLATFORM_H_
-
-#include <memory>
-
-#include "src/base/logging.h"
-
-namespace v8_inspector {
-
-template <typename T>
-std::unique_ptr<T> wrapUnique(T* ptr) {
-  return std::unique_ptr<T>(ptr);
-}
-
-}  // namespace v8_inspector
-
-#endif  // V8_INSPECTOR_PROTOCOLPLATFORM_H_
diff --git a/src/inspector/remote-object-id.cc b/src/inspector/remote-object-id.cc
index aac6724..2f5f051 100644
--- a/src/inspector/remote-object-id.cc
+++ b/src/inspector/remote-object-id.cc
@@ -13,7 +13,8 @@
 
 std::unique_ptr<protocol::DictionaryValue>
 RemoteObjectIdBase::parseInjectedScriptId(const String16& objectId) {
-  std::unique_ptr<protocol::Value> parsedValue = protocol::parseJSON(objectId);
+  std::unique_ptr<protocol::Value> parsedValue =
+      protocol::StringUtil::parseJSON(objectId);
   if (!parsedValue || parsedValue->type() != protocol::Value::TypeObject)
     return nullptr;
 
diff --git a/src/inspector/script-breakpoint.h b/src/inspector/script-breakpoint.h
index 025233d..a981b16 100644
--- a/src/inspector/script-breakpoint.h
+++ b/src/inspector/script-breakpoint.h
@@ -35,15 +35,18 @@
 namespace v8_inspector {
 
 struct ScriptBreakpoint {
-  ScriptBreakpoint() : ScriptBreakpoint(0, 0, String16()) {}
+  ScriptBreakpoint() {}
 
-  ScriptBreakpoint(int lineNumber, int columnNumber, const String16& condition)
-      : lineNumber(lineNumber),
-        columnNumber(columnNumber),
-        condition(condition) {}
+  ScriptBreakpoint(String16 script_id, int line_number, int column_number,
+                   String16 condition)
+      : script_id(std::move(script_id)),
+        line_number(line_number),
+        column_number(column_number),
+        condition(std::move(condition)) {}
 
-  int lineNumber;
-  int columnNumber;
+  String16 script_id;
+  int line_number = 0;
+  int column_number = 0;
   String16 condition;
 };
 
diff --git a/src/inspector/search-util.cc b/src/inspector/search-util.cc
index a6fba06..b05d7a0 100644
--- a/src/inspector/search-util.cc
+++ b/src/inspector/search-util.cc
@@ -132,7 +132,8 @@
                                            const String16& query,
                                            bool caseSensitive, bool isRegex) {
   String16 regexSource = isRegex ? query : createSearchRegexSource(query);
-  return wrapUnique(new V8Regex(inspector, regexSource, caseSensitive));
+  return std::unique_ptr<V8Regex>(
+      new V8Regex(inspector, regexSource, caseSensitive));
 }
 
 }  // namespace
diff --git a/src/inspector/string-16.cc b/src/inspector/string-16.cc
index 09909a9..6544646 100644
--- a/src/inspector/string-16.cc
+++ b/src/inspector/string-16.cc
@@ -8,14 +8,11 @@
 #include <cctype>
 #include <cstdlib>
 #include <cstring>
-#include <iomanip>
 #include <limits>
-#include <locale>
-#include <sstream>
 #include <string>
 
 #include "src/base/platform/platform.h"
-#include "src/inspector/protocol-platform.h"
+#include "src/conversions.h"
 
 namespace v8_inspector {
 
@@ -367,10 +364,9 @@
 
 // static
 String16 String16::fromInteger(int number) {
-  const size_t kBufferSize = 50;
-  char buffer[kBufferSize];
-  v8::base::OS::SNPrintF(buffer, kBufferSize, "%d", number);
-  return String16(buffer);
+  char arr[50];
+  v8::internal::Vector<char> buffer(arr, arraysize(arr));
+  return String16(IntToCString(number, buffer));
 }
 
 // static
@@ -387,19 +383,16 @@
 
 // static
 String16 String16::fromDouble(double number) {
-  std::ostringstream s;
-  s.imbue(std::locale("C"));
-  s << std::fixed << std::setprecision(std::numeric_limits<double>::digits10)
-    << number;
-  return String16(s.str().c_str());
+  char arr[50];
+  v8::internal::Vector<char> buffer(arr, arraysize(arr));
+  return String16(DoubleToCString(number, buffer));
 }
 
 // static
 String16 String16::fromDouble(double number, int precision) {
-  std::ostringstream s;
-  s.imbue(std::locale("C"));
-  s << std::fixed << std::setprecision(precision) << number;
-  return String16(s.str().c_str());
+  std::unique_ptr<char[]> str(
+      v8::internal::DoubleToPrecisionCString(number, precision));
+  return String16(str.get());
 }
 
 int String16::toInteger(bool* ok) const {
diff --git a/src/inspector/string-16.h b/src/inspector/string-16.h
index 360ec93..0270f51 100644
--- a/src/inspector/string-16.h
+++ b/src/inspector/string-16.h
@@ -23,7 +23,7 @@
   String16() {}
   String16(const String16& other)
       : m_impl(other.m_impl), hash_code(other.hash_code) {}
-  String16(const String16&& other)
+  String16(String16&& other)
       : m_impl(std::move(other.m_impl)), hash_code(other.hash_code) {}
   String16(const UChar* characters, size_t size) : m_impl(characters, size) {}
   String16(const UChar* characters)  // NOLINT(runtime/explicit)
diff --git a/src/inspector/string-util.cc b/src/inspector/string-util.cc
index e6ad5d0..31b2db5 100644
--- a/src/inspector/string-util.cc
+++ b/src/inspector/string-util.cc
@@ -50,8 +50,7 @@
 }
 
 String16 toProtocolString(v8::Local<v8::String> value) {
-  if (value.IsEmpty() || value->IsNull() || value->IsUndefined())
-    return String16();
+  if (value.IsEmpty() || value->IsNullOrUndefined()) return String16();
   std::unique_ptr<UChar[]> buffer(new UChar[value->Length()]);
   value->Write(reinterpret_cast<uint16_t*>(buffer.get()), 0, value->Length());
   return String16(buffer.get(), value->Length());
@@ -93,19 +92,20 @@
 
 namespace protocol {
 
-std::unique_ptr<protocol::Value> parseJSON(const StringView& string) {
+std::unique_ptr<protocol::Value> StringUtil::parseJSON(
+    const StringView& string) {
   if (!string.length()) return nullptr;
   if (string.is8Bit()) {
-    return protocol::parseJSON(string.characters8(),
+    return parseJSONCharacters(string.characters8(),
                                static_cast<int>(string.length()));
   }
-  return protocol::parseJSON(string.characters16(),
+  return parseJSONCharacters(string.characters16(),
                              static_cast<int>(string.length()));
 }
 
-std::unique_ptr<protocol::Value> parseJSON(const String16& string) {
+std::unique_ptr<protocol::Value> StringUtil::parseJSON(const String16& string) {
   if (!string.length()) return nullptr;
-  return protocol::parseJSON(string.characters16(),
+  return parseJSONCharacters(string.characters16(),
                              static_cast<int>(string.length()));
 }
 
@@ -119,7 +119,7 @@
 
 // static
 std::unique_ptr<StringBufferImpl> StringBufferImpl::adopt(String16& string) {
-  return wrapUnique(new StringBufferImpl(string));
+  return std::unique_ptr<StringBufferImpl>(new StringBufferImpl(string));
 }
 
 StringBufferImpl::StringBufferImpl(String16& string) {
diff --git a/src/inspector/string-util.h b/src/inspector/string-util.h
index e1a69e8..6f0e3d5 100644
--- a/src/inspector/string-util.h
+++ b/src/inspector/string-util.h
@@ -5,6 +5,9 @@
 #ifndef V8_INSPECTOR_STRINGUTIL_H_
 #define V8_INSPECTOR_STRINGUTIL_H_
 
+#include <memory>
+
+#include "src/base/logging.h"
 #include "src/base/macros.h"
 #include "src/inspector/string-16.h"
 
@@ -29,15 +32,32 @@
     return String::fromInteger(number);
   }
   static String fromDouble(double number) { return String::fromDouble(number); }
+  static size_t find(const String& s, const char* needle) {
+    return s.find(needle);
+  }
+  static size_t find(const String& s, const String& needle) {
+    return s.find(needle);
+  }
   static const size_t kNotFound = String::kNotFound;
+  static void builderAppend(StringBuilder& builder, const String& s) {
+    builder.append(s);
+  }
+  static void builderAppend(StringBuilder& builder, UChar c) {
+    builder.append(c);
+  }
+  static void builderAppend(StringBuilder& builder, const char* s, size_t len) {
+    builder.append(s, len);
+  }
   static void builderReserve(StringBuilder& builder, size_t capacity) {
     builder.reserveCapacity(capacity);
   }
+  static String builderToString(StringBuilder& builder) {
+    return builder.toString();
+  }
+  static std::unique_ptr<protocol::Value> parseJSON(const String16& json);
+  static std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
 };
 
-std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
-std::unique_ptr<protocol::Value> parseJSON(const String16& json);
-
 }  // namespace protocol
 
 v8::Local<v8::String> toV8String(v8::Isolate*, const String16&);
diff --git a/src/inspector/test-interface.cc b/src/inspector/test-interface.cc
new file mode 100644
index 0000000..ead1dc3
--- /dev/null
+++ b/src/inspector/test-interface.cc
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/test-interface.h"
+
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+
+namespace v8_inspector {
+
+void SetMaxAsyncTaskStacksForTest(V8Inspector* inspector, int limit) {
+  static_cast<V8InspectorImpl*>(inspector)
+      ->debugger()
+      ->setMaxAsyncTaskStacksForTest(limit);
+}
+
+}  //  v8_inspector
diff --git a/src/inspector/test-interface.h b/src/inspector/test-interface.h
new file mode 100644
index 0000000..98bedc2
--- /dev/null
+++ b/src/inspector/test-interface.h
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_TEST_INTERFACE_H_
+#define V8_INSPECTOR_TEST_INTERFACE_H_
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8Inspector;
+
+V8_EXPORT void SetMaxAsyncTaskStacksForTest(V8Inspector* inspector, int limit);
+
+}  //  v8_inspector
+
+#endif  //  V8_INSPECTOR_TEST_INTERFACE_H_
diff --git a/src/inspector/v8-console-message.cc b/src/inspector/v8-console-message.cc
index 63f1d49..73f74e4 100644
--- a/src/inspector/v8-console-message.cc
+++ b/src/inspector/v8-console-message.cc
@@ -4,6 +4,7 @@
 
 #include "src/inspector/v8-console-message.h"
 
+#include "src/debug/debug-interface.h"
 #include "src/inspector/inspected-context.h"
 #include "src/inspector/protocol/Protocol.h"
 #include "src/inspector/string-util.h"
@@ -50,14 +51,15 @@
     case ConsoleAPIType::kAssert:
       return protocol::Runtime::ConsoleAPICalled::TypeEnum::Assert;
     case ConsoleAPIType::kTimeEnd:
-      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::TimeEnd;
     case ConsoleAPIType::kCount:
-      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Debug;
+      return protocol::Runtime::ConsoleAPICalled::TypeEnum::Count;
   }
   return protocol::Runtime::ConsoleAPICalled::TypeEnum::Log;
 }
 
 const unsigned maxConsoleMessageCount = 1000;
+const int maxConsoleMessageV8Size = 10 * 1024 * 1024;
 const unsigned maxArrayItemsLimit = 10000;
 const unsigned maxStackDepthLimit = 32;
 
@@ -361,7 +363,7 @@
   V8InspectorImpl* inspector = inspectedContext->inspector();
   v8::Local<v8::Context> context = inspectedContext->context();
 
-  std::unique_ptr<V8ConsoleMessage> message = wrapUnique(
+  std::unique_ptr<V8ConsoleMessage> message(
       new V8ConsoleMessage(V8MessageOrigin::kConsole, timestamp, String16()));
   if (stackTrace && !stackTrace->isEmpty()) {
     message->m_url = toString16(stackTrace->topSourceURL());
@@ -371,28 +373,34 @@
   message->m_stackTrace = std::move(stackTrace);
   message->m_type = type;
   message->m_contextId = contextId;
-  for (size_t i = 0; i < arguments.size(); ++i)
-    message->m_arguments.push_back(
-        wrapUnique(new v8::Global<v8::Value>(isolate, arguments.at(i))));
+  for (size_t i = 0; i < arguments.size(); ++i) {
+    message->m_arguments.push_back(std::unique_ptr<v8::Global<v8::Value>>(
+        new v8::Global<v8::Value>(isolate, arguments.at(i))));
+    message->m_v8Size +=
+        v8::debug::EstimatedValueSize(isolate, arguments.at(i));
+  }
   if (arguments.size())
     message->m_message = V8ValueStringBuilder::toString(arguments[0], context);
 
-  V8ConsoleAPIType clientType = V8ConsoleAPIType::kLog;
+  v8::Isolate::MessageErrorLevel clientLevel = v8::Isolate::kMessageInfo;
   if (type == ConsoleAPIType::kDebug || type == ConsoleAPIType::kCount ||
-      type == ConsoleAPIType::kTimeEnd)
-    clientType = V8ConsoleAPIType::kDebug;
-  else if (type == ConsoleAPIType::kError || type == ConsoleAPIType::kAssert)
-    clientType = V8ConsoleAPIType::kError;
-  else if (type == ConsoleAPIType::kWarning)
-    clientType = V8ConsoleAPIType::kWarning;
-  else if (type == ConsoleAPIType::kInfo)
-    clientType = V8ConsoleAPIType::kInfo;
-  else if (type == ConsoleAPIType::kClear)
-    clientType = V8ConsoleAPIType::kClear;
-  inspector->client()->consoleAPIMessage(
-      contextGroupId, clientType, toStringView(message->m_message),
-      toStringView(message->m_url), message->m_lineNumber,
-      message->m_columnNumber, message->m_stackTrace.get());
+      type == ConsoleAPIType::kTimeEnd) {
+    clientLevel = v8::Isolate::kMessageDebug;
+  } else if (type == ConsoleAPIType::kError ||
+             type == ConsoleAPIType::kAssert) {
+    clientLevel = v8::Isolate::kMessageError;
+  } else if (type == ConsoleAPIType::kWarning) {
+    clientLevel = v8::Isolate::kMessageWarning;
+  } else if (type == ConsoleAPIType::kInfo || type == ConsoleAPIType::kLog) {
+    clientLevel = v8::Isolate::kMessageInfo;
+  }
+
+  if (type != ConsoleAPIType::kClear) {
+    inspector->client()->consoleAPIMessage(
+        contextGroupId, clientLevel, toStringView(message->m_message),
+        toStringView(message->m_url), message->m_lineNumber,
+        message->m_columnNumber, message->m_stackTrace.get());
+  }
 
   return message;
 }
@@ -404,7 +412,7 @@
     std::unique_ptr<V8StackTraceImpl> stackTrace, int scriptId,
     v8::Isolate* isolate, const String16& message, int contextId,
     v8::Local<v8::Value> exception, unsigned exceptionId) {
-  std::unique_ptr<V8ConsoleMessage> consoleMessage = wrapUnique(
+  std::unique_ptr<V8ConsoleMessage> consoleMessage(
       new V8ConsoleMessage(V8MessageOrigin::kException, timestamp, message));
   consoleMessage->setLocation(url, lineNumber, columnNumber,
                               std::move(stackTrace), scriptId);
@@ -413,7 +421,10 @@
   if (contextId && !exception.IsEmpty()) {
     consoleMessage->m_contextId = contextId;
     consoleMessage->m_arguments.push_back(
-        wrapUnique(new v8::Global<v8::Value>(isolate, exception)));
+        std::unique_ptr<v8::Global<v8::Value>>(
+            new v8::Global<v8::Value>(isolate, exception)));
+    consoleMessage->m_v8Size +=
+        v8::debug::EstimatedValueSize(isolate, exception);
   }
   return consoleMessage;
 }
@@ -422,7 +433,7 @@
 std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForRevokedException(
     double timestamp, const String16& messageText,
     unsigned revokedExceptionId) {
-  std::unique_ptr<V8ConsoleMessage> message = wrapUnique(new V8ConsoleMessage(
+  std::unique_ptr<V8ConsoleMessage> message(new V8ConsoleMessage(
       V8MessageOrigin::kRevokedException, timestamp, messageText));
   message->m_revokedExceptionId = revokedExceptionId;
   return message;
@@ -434,15 +445,14 @@
   if (m_message.isEmpty()) m_message = "<message collected>";
   Arguments empty;
   m_arguments.swap(empty);
+  m_v8Size = 0;
 }
 
 // ------------------------ V8ConsoleMessageStorage ----------------------------
 
 V8ConsoleMessageStorage::V8ConsoleMessageStorage(V8InspectorImpl* inspector,
                                                  int contextGroupId)
-    : m_inspector(inspector),
-      m_contextGroupId(contextGroupId),
-      m_expiredCount(0) {}
+    : m_inspector(inspector), m_contextGroupId(contextGroupId) {}
 
 V8ConsoleMessageStorage::~V8ConsoleMessageStorage() { clear(); }
 
@@ -463,23 +473,33 @@
 
   DCHECK(m_messages.size() <= maxConsoleMessageCount);
   if (m_messages.size() == maxConsoleMessageCount) {
-    ++m_expiredCount;
+    m_estimatedSize -= m_messages.front()->estimatedSize();
     m_messages.pop_front();
   }
+  while (m_estimatedSize + message->estimatedSize() > maxConsoleMessageV8Size &&
+         !m_messages.empty()) {
+    m_estimatedSize -= m_messages.front()->estimatedSize();
+    m_messages.pop_front();
+  }
+
   m_messages.push_back(std::move(message));
+  m_estimatedSize += m_messages.back()->estimatedSize();
 }
 
 void V8ConsoleMessageStorage::clear() {
   m_messages.clear();
-  m_expiredCount = 0;
+  m_estimatedSize = 0;
   if (V8InspectorSessionImpl* session =
           m_inspector->sessionForContextGroup(m_contextGroupId))
     session->releaseObjectGroup("console");
 }
 
 void V8ConsoleMessageStorage::contextDestroyed(int contextId) {
-  for (size_t i = 0; i < m_messages.size(); ++i)
+  m_estimatedSize = 0;
+  for (size_t i = 0; i < m_messages.size(); ++i) {
     m_messages[i]->contextDestroyed(contextId);
+    m_estimatedSize += m_messages[i]->estimatedSize();
+  }
 }
 
 }  // namespace v8_inspector
diff --git a/src/inspector/v8-console-message.h b/src/inspector/v8-console-message.h
index a6e9eaf..8ab81f4 100644
--- a/src/inspector/v8-console-message.h
+++ b/src/inspector/v8-console-message.h
@@ -65,6 +65,10 @@
   ConsoleAPIType type() const;
   void contextDestroyed(int contextId);
 
+  int estimatedSize() const {
+    return m_v8Size + static_cast<int>(m_message.length() * sizeof(UChar));
+  }
+
  private:
   V8ConsoleMessage(V8MessageOrigin, double timestamp, const String16& message);
 
@@ -89,6 +93,7 @@
   ConsoleAPIType m_type;
   unsigned m_exceptionId;
   unsigned m_revokedExceptionId;
+  int m_v8Size = 0;
   Arguments m_arguments;
   String16 m_detailedMessage;
 };
@@ -99,7 +104,6 @@
   ~V8ConsoleMessageStorage();
 
   int contextGroupId() { return m_contextGroupId; }
-  int expiredCount() { return m_expiredCount; }
   const std::deque<std::unique_ptr<V8ConsoleMessage>>& messages() const {
     return m_messages;
   }
@@ -111,7 +115,7 @@
  private:
   V8InspectorImpl* m_inspector;
   int m_contextGroupId;
-  int m_expiredCount;
+  int m_estimatedSize = 0;
   std::deque<std::unique_ptr<V8ConsoleMessage>> m_messages;
 };
 
diff --git a/src/inspector/v8-console.cc b/src/inspector/v8-console.cc
index fee6117..cfe7fc1 100644
--- a/src/inspector/v8-console.cc
+++ b/src/inspector/v8-console.cc
@@ -336,8 +336,14 @@
 }
 
 void V8Console::clearCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
-  ConsoleHelper(info).reportCallWithDefaultArgument(ConsoleAPIType::kClear,
-                                                    String16("console.clear"));
+  ConsoleHelper helper(info);
+  InspectedContext* context = helper.ensureInspectedContext();
+  if (!context) return;
+  int contextGroupId = context->contextGroupId();
+  if (V8InspectorClient* client = helper.ensureDebuggerClient())
+    client->consoleClear(contextGroupId);
+  helper.reportCallWithDefaultArgument(ConsoleAPIType::kClear,
+                                       String16("console.clear"));
 }
 
 void V8Console::countCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
@@ -360,8 +366,10 @@
   if (!helper.privateMap("V8Console#countMap").ToLocal(&countMap)) return;
   int32_t count = helper.getIntFromMap(countMap, identifier, 0) + 1;
   helper.setIntOnMap(countMap, identifier, count);
-  helper.reportCallWithArgument(ConsoleAPIType::kCount,
-                                title + ": " + String16::fromInteger(count));
+  String16 countString = String16::fromInteger(count);
+  helper.reportCallWithArgument(
+      ConsoleAPIType::kCount,
+      title.isEmpty() ? countString : (title + ": " + countString));
 }
 
 void V8Console::assertCallback(
@@ -431,7 +439,7 @@
     double elapsed = client->currentTimeMS() -
                      helper.getDoubleFromMap(timeMap, protocolTitle, 0.0);
     String16 message =
-        protocolTitle + ": " + String16::fromDouble(elapsed, 3) + "ms";
+        protocolTitle + ": " + String16::fromDouble(elapsed) + "ms";
     helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
   }
 }
@@ -714,6 +722,29 @@
   createBoundFunctionProperty(context, console, "timeStamp",
                               V8Console::timeStampCallback);
 
+  const char* jsConsoleAssert =
+      "(function(){\n"
+      "  var originAssert = this.assert;\n"
+      "  originAssert.apply = Function.prototype.apply;\n"
+      "  this.assert = assertWrapper;\n"
+      "  assertWrapper.toString = () => originAssert.toString();\n"
+      "  function assertWrapper(){\n"
+      "    if (!!arguments[0]) return;\n"
+      "    originAssert.apply(null, arguments);\n"
+      "  }\n"
+      "})";
+
+  v8::Local<v8::String> assertSource = toV8String(isolate, jsConsoleAssert);
+  V8InspectorImpl* inspector = inspectedContext->inspector();
+  v8::Local<v8::Value> setupFunction;
+  if (inspector->compileAndRunInternalScript(context, assertSource)
+          .ToLocal(&setupFunction) &&
+      setupFunction->IsFunction()) {
+    inspector->callInternalFunction(
+        v8::Local<v8::Function>::Cast(setupFunction), context, console, 0,
+        nullptr);
+  }
+
   if (hasMemoryAttribute)
     console->SetAccessorProperty(
         toV8StringInternalized(isolate, "memory"),
diff --git a/src/inspector/v8-debugger-agent-impl.cc b/src/inspector/v8-debugger-agent-impl.cc
index 224ae28..7de46a1 100644
--- a/src/inspector/v8-debugger-agent-impl.cc
+++ b/src/inspector/v8-debugger-agent-impl.cc
@@ -54,14 +54,33 @@
 
 }  // namespace DebuggerAgentState
 
-static const int kMaxSkipStepFrameCount = 128;
 static const char kBacktraceObjectGroup[] = "backtrace";
 static const char kDebuggerNotEnabled[] = "Debugger agent is not enabled";
 static const char kDebuggerNotPaused[] =
     "Can only perform operation while paused.";
 
-static String16 breakpointIdSuffix(
-    V8DebuggerAgentImpl::BreakpointSource source) {
+namespace {
+
+void TranslateWasmStackTraceLocations(Array<CallFrame>* stackTrace,
+                                      WasmTranslation* wasmTranslation) {
+  for (size_t i = 0, e = stackTrace->length(); i != e; ++i) {
+    protocol::Debugger::Location* location = stackTrace->get(i)->getLocation();
+    String16 scriptId = location->getScriptId();
+    int lineNumber = location->getLineNumber();
+    int columnNumber = location->getColumnNumber(-1);
+
+    if (!wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
+            &scriptId, &lineNumber, &columnNumber)) {
+      continue;
+    }
+
+    location->setScriptId(std::move(scriptId));
+    location->setLineNumber(lineNumber);
+    location->setColumnNumber(columnNumber);
+  }
+}
+
+String16 breakpointIdSuffix(V8DebuggerAgentImpl::BreakpointSource source) {
   switch (source) {
     case V8DebuggerAgentImpl::UserBreakpointSource:
       break;
@@ -73,26 +92,25 @@
   return String16();
 }
 
-static String16 generateBreakpointId(
-    const String16& scriptId, int lineNumber, int columnNumber,
-    V8DebuggerAgentImpl::BreakpointSource source) {
+String16 generateBreakpointId(const ScriptBreakpoint& breakpoint,
+                              V8DebuggerAgentImpl::BreakpointSource source) {
   String16Builder builder;
-  builder.append(scriptId);
+  builder.append(breakpoint.script_id);
   builder.append(':');
-  builder.appendNumber(lineNumber);
+  builder.appendNumber(breakpoint.line_number);
   builder.append(':');
-  builder.appendNumber(columnNumber);
+  builder.appendNumber(breakpoint.column_number);
   builder.append(breakpointIdSuffix(source));
   return builder.toString();
 }
 
-static bool positionComparator(const std::pair<int, int>& a,
-                               const std::pair<int, int>& b) {
+bool positionComparator(const std::pair<int, int>& a,
+                        const std::pair<int, int>& b) {
   if (a.first != b.first) return a.first < b.first;
   return a.second < b.second;
 }
 
-static std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
+std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
     const String16& scriptId, int lineNumber, int columnNumber) {
   return protocol::Debugger::Location::create()
       .setScriptId(scriptId)
@@ -101,6 +119,8 @@
       .build();
 }
 
+}  // namespace
+
 V8DebuggerAgentImpl::V8DebuggerAgentImpl(
     V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
     protocol::DictionaryValue* state)
@@ -111,24 +131,14 @@
       m_state(state),
       m_frontend(frontendChannel),
       m_isolate(m_inspector->isolate()),
-      m_breakReason(protocol::Debugger::Paused::ReasonEnum::Other),
       m_scheduledDebuggerStep(NoStep),
-      m_skipNextDebuggerStepOut(false),
       m_javaScriptPauseScheduled(false),
-      m_steppingFromFramework(false),
-      m_pausingOnNativeEvent(false),
-      m_skippedStepFrameCount(0),
-      m_recursionLevelForStepOut(0),
-      m_recursionLevelForStepFrame(0),
-      m_skipAllPauses(false) {
-  clearBreakDetails();
+      m_recursionLevelForStepOut(0) {
 }
 
 V8DebuggerAgentImpl::~V8DebuggerAgentImpl() {}
 
 void V8DebuggerAgentImpl::enableImpl() {
-  // m_inspector->addListener may result in reporting all parsed scripts to
-  // the agent so it should already be in enabled state by then.
   m_enabled = true;
   m_state->setBoolean(DebuggerAgentState::debuggerEnabled, true);
   m_debugger->enable();
@@ -161,29 +171,25 @@
   m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
                      protocol::DictionaryValue::create());
   m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState,
-                      v8::DebugInterface::NoBreakOnException);
+                      v8::debug::NoBreakOnException);
   m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
 
-  if (!m_pausedContext.IsEmpty()) m_debugger->continueProgram();
+  if (isPaused()) m_debugger->continueProgram();
   m_debugger->disable();
-  m_pausedContext.Reset();
   JavaScriptCallFrames emptyCallFrames;
   m_pausedCallFrames.swap(emptyCallFrames);
-  m_scripts.clear();
   m_blackboxedPositions.clear();
+  m_blackboxPattern.reset();
+  resetBlackboxedStateCache();
+  m_scripts.clear();
   m_breakpointIdToDebuggerBreakpointIds.clear();
   m_debugger->setAsyncCallStackDepth(this, 0);
   m_continueToLocationBreakpointId = String16();
   clearBreakDetails();
   m_scheduledDebuggerStep = NoStep;
-  m_skipNextDebuggerStepOut = false;
   m_javaScriptPauseScheduled = false;
-  m_steppingFromFramework = false;
-  m_pausingOnNativeEvent = false;
-  m_skippedStepFrameCount = 0;
-  m_recursionLevelForStepFrame = 0;
   m_skipAllPauses = false;
-  m_blackboxPattern = nullptr;
+  m_state->setBoolean(DebuggerAgentState::skipAllPauses, false);
   m_state->remove(DebuggerAgentState::blackboxPattern);
   m_enabled = false;
   m_state->setBoolean(DebuggerAgentState::debuggerEnabled, false);
@@ -199,7 +205,7 @@
 
   enableImpl();
 
-  int pauseState = v8::DebugInterface::NoBreakOnException;
+  int pauseState = v8::debug::NoBreakOnException;
   m_state->getInteger(DebuggerAgentState::pauseOnExceptionsState, &pauseState);
   setPauseOnExceptionsImpl(pauseState);
 
@@ -225,8 +231,8 @@
 }
 
 Response V8DebuggerAgentImpl::setSkipAllPauses(bool skip) {
+  m_state->setBoolean(DebuggerAgentState::skipAllPauses, skip);
   m_skipAllPauses = skip;
-  m_state->setBoolean(DebuggerAgentState::skipAllPauses, m_skipAllPauses);
   return Response::OK();
 }
 
@@ -291,12 +297,13 @@
       breakpointId, buildObjectForBreakpointCookie(
                         url, lineNumber, columnNumber, condition, isRegex));
 
-  ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
+  ScriptBreakpoint breakpoint(String16(), lineNumber, columnNumber, condition);
   for (const auto& script : m_scripts) {
     if (!matches(m_inspector, script.second->sourceURL(), url, isRegex))
       continue;
-    std::unique_ptr<protocol::Debugger::Location> location = resolveBreakpoint(
-        breakpointId, script.first, breakpoint, UserBreakpointSource);
+    breakpoint.script_id = script.first;
+    std::unique_ptr<protocol::Debugger::Location> location =
+        resolveBreakpoint(breakpointId, breakpoint, UserBreakpointSource);
     if (location) (*locations)->addItem(std::move(location));
   }
 
@@ -308,21 +315,18 @@
     std::unique_ptr<protocol::Debugger::Location> location,
     Maybe<String16> optionalCondition, String16* outBreakpointId,
     std::unique_ptr<protocol::Debugger::Location>* actualLocation) {
-  String16 scriptId = location->getScriptId();
-  int lineNumber = location->getLineNumber();
-  int columnNumber = location->getColumnNumber(0);
+  ScriptBreakpoint breakpoint(
+      location->getScriptId(), location->getLineNumber(),
+      location->getColumnNumber(0), optionalCondition.fromMaybe(String16()));
 
-  String16 condition = optionalCondition.fromMaybe("");
-
-  String16 breakpointId = generateBreakpointId(
-      scriptId, lineNumber, columnNumber, UserBreakpointSource);
+  String16 breakpointId =
+      generateBreakpointId(breakpoint, UserBreakpointSource);
   if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
       m_breakpointIdToDebuggerBreakpointIds.end()) {
     return Response::Error("Breakpoint at specified location already exists.");
   }
-  ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
-  *actualLocation = resolveBreakpoint(breakpointId, scriptId, breakpoint,
-                                      UserBreakpointSource);
+  *actualLocation =
+      resolveBreakpoint(breakpointId, breakpoint, UserBreakpointSource);
   if (!*actualLocation) return Response::Error("Could not resolve breakpoint");
   *outBreakpointId = breakpointId;
   return Response::OK();
@@ -365,9 +369,9 @@
     return Response::Error(
         "start.lineNumber and start.columnNumber should be >= 0");
 
-  v8::DebugInterface::Location v8Start(start->getLineNumber(),
-                                       start->getColumnNumber(0));
-  v8::DebugInterface::Location v8End;
+  v8::debug::Location v8Start(start->getLineNumber(),
+                              start->getColumnNumber(0));
+  v8::debug::Location v8End;
   if (end.isJust()) {
     if (end.fromJust()->getScriptId() != scriptId)
       return Response::Error("Locations should contain the same scriptId");
@@ -376,12 +380,12 @@
     if (line < 0 || column < 0)
       return Response::Error(
           "end.lineNumber and end.columnNumber should be >= 0");
-    v8End = v8::DebugInterface::Location(line, column);
+    v8End = v8::debug::Location(line, column);
   }
   auto it = m_scripts.find(scriptId);
   if (it == m_scripts.end()) return Response::Error("Script not found");
 
-  std::vector<v8::DebugInterface::Location> v8Locations;
+  std::vector<v8::debug::Location> v8Locations;
   if (!it->second->getPossibleBreakpoints(v8Start, v8End, &v8Locations))
     return Response::InternalError();
 
@@ -405,38 +409,20 @@
     m_continueToLocationBreakpointId = "";
   }
 
-  String16 scriptId = location->getScriptId();
-  int lineNumber = location->getLineNumber();
-  int columnNumber = location->getColumnNumber(0);
+  ScriptBreakpoint breakpoint(location->getScriptId(),
+                              location->getLineNumber(),
+                              location->getColumnNumber(0), String16());
 
-  ScriptBreakpoint breakpoint(lineNumber, columnNumber, "");
   m_continueToLocationBreakpointId = m_debugger->setBreakpoint(
-      scriptId, breakpoint, &lineNumber, &columnNumber);
+      breakpoint, &breakpoint.line_number, &breakpoint.column_number);
+  // TODO(kozyatinskiy): Return actual line and column number.
   return resume();
 }
 
-bool V8DebuggerAgentImpl::isCurrentCallStackEmptyOrBlackboxed() {
-  DCHECK(enabled());
-  JavaScriptCallFrames callFrames = m_debugger->currentCallFrames();
-  for (size_t index = 0; index < callFrames.size(); ++index) {
-    if (!isCallFrameWithUnknownScriptOrBlackboxed(callFrames[index].get()))
-      return false;
-  }
-  return true;
-}
-
-bool V8DebuggerAgentImpl::isTopPausedCallFrameBlackboxed() {
-  DCHECK(enabled());
-  JavaScriptCallFrame* frame =
-      m_pausedCallFrames.size() ? m_pausedCallFrames[0].get() : nullptr;
-  return isCallFrameWithUnknownScriptOrBlackboxed(frame);
-}
-
-bool V8DebuggerAgentImpl::isCallFrameWithUnknownScriptOrBlackboxed(
-    JavaScriptCallFrame* frame) {
-  if (!frame) return true;
-  ScriptsMap::iterator it =
-      m_scripts.find(String16::fromInteger(frame->sourceID()));
+bool V8DebuggerAgentImpl::isFunctionBlackboxed(const String16& scriptId,
+                                               const v8::debug::Location& start,
+                                               const v8::debug::Location& end) {
+  ScriptsMap::iterator it = m_scripts.find(scriptId);
   if (it == m_scripts.end()) {
     // Unknown scripts are blackboxed.
     return true;
@@ -447,78 +433,65 @@
         m_blackboxPattern->match(scriptSourceURL) != -1)
       return true;
   }
-  auto itBlackboxedPositions =
-      m_blackboxedPositions.find(String16::fromInteger(frame->sourceID()));
+  auto itBlackboxedPositions = m_blackboxedPositions.find(scriptId);
   if (itBlackboxedPositions == m_blackboxedPositions.end()) return false;
 
   const std::vector<std::pair<int, int>>& ranges =
       itBlackboxedPositions->second;
-  auto itRange = std::lower_bound(
+  auto itStartRange = std::lower_bound(
       ranges.begin(), ranges.end(),
-      std::make_pair(frame->line(), frame->column()), positionComparator);
+      std::make_pair(start.GetLineNumber(), start.GetColumnNumber()),
+      positionComparator);
+  auto itEndRange = std::lower_bound(
+      itStartRange, ranges.end(),
+      std::make_pair(end.GetLineNumber(), end.GetColumnNumber()),
+      positionComparator);
   // Ranges array contains positions in script where blackbox state is changed.
   // [(0,0) ... ranges[0]) isn't blackboxed, [ranges[0] ... ranges[1]) is
   // blackboxed...
-  return std::distance(ranges.begin(), itRange) % 2;
-}
-
-V8DebuggerAgentImpl::SkipPauseRequest
-V8DebuggerAgentImpl::shouldSkipExceptionPause(
-    JavaScriptCallFrame* topCallFrame) {
-  if (m_steppingFromFramework) return RequestNoSkip;
-  if (isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
-    return RequestContinue;
-  return RequestNoSkip;
-}
-
-V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::shouldSkipStepPause(
-    JavaScriptCallFrame* topCallFrame) {
-  if (m_steppingFromFramework) return RequestNoSkip;
-
-  if (m_skipNextDebuggerStepOut) {
-    m_skipNextDebuggerStepOut = false;
-    if (m_scheduledDebuggerStep == StepOut) return RequestStepOut;
-  }
-
-  if (!isCallFrameWithUnknownScriptOrBlackboxed(topCallFrame))
-    return RequestNoSkip;
-
-  if (m_skippedStepFrameCount >= kMaxSkipStepFrameCount) return RequestStepOut;
-
-  if (!m_skippedStepFrameCount) m_recursionLevelForStepFrame = 1;
-
-  ++m_skippedStepFrameCount;
-  return RequestStepFrame;
+  return itStartRange == itEndRange &&
+         std::distance(ranges.begin(), itStartRange) % 2;
 }
 
 std::unique_ptr<protocol::Debugger::Location>
 V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
-                                       const String16& scriptId,
                                        const ScriptBreakpoint& breakpoint,
                                        BreakpointSource source) {
+  v8::HandleScope handles(m_isolate);
   DCHECK(enabled());
   // FIXME: remove these checks once crbug.com/520702 is resolved.
   CHECK(!breakpointId.isEmpty());
-  CHECK(!scriptId.isEmpty());
-  ScriptsMap::iterator scriptIterator = m_scripts.find(scriptId);
+  CHECK(!breakpoint.script_id.isEmpty());
+  ScriptsMap::iterator scriptIterator = m_scripts.find(breakpoint.script_id);
   if (scriptIterator == m_scripts.end()) return nullptr;
-  if (breakpoint.lineNumber < scriptIterator->second->startLine() ||
-      scriptIterator->second->endLine() < breakpoint.lineNumber)
+  if (breakpoint.line_number < scriptIterator->second->startLine() ||
+      scriptIterator->second->endLine() < breakpoint.line_number)
     return nullptr;
 
+  // Translate from protocol location to v8 location for the debugger.
+  ScriptBreakpoint translatedBreakpoint = breakpoint;
+  m_debugger->wasmTranslation()->TranslateProtocolLocationToWasmScriptLocation(
+      &translatedBreakpoint.script_id, &translatedBreakpoint.line_number,
+      &translatedBreakpoint.column_number);
+
   int actualLineNumber;
   int actualColumnNumber;
   String16 debuggerBreakpointId = m_debugger->setBreakpoint(
-      scriptId, breakpoint, &actualLineNumber, &actualColumnNumber);
+      translatedBreakpoint, &actualLineNumber, &actualColumnNumber);
   if (debuggerBreakpointId.isEmpty()) return nullptr;
 
+  // Translate back from v8 location to protocol location for the return value.
+  m_debugger->wasmTranslation()->TranslateWasmScriptLocationToProtocolLocation(
+      &translatedBreakpoint.script_id, &actualLineNumber, &actualColumnNumber);
+
   m_serverBreakpoints[debuggerBreakpointId] =
       std::make_pair(breakpointId, source);
   CHECK(!breakpointId.isEmpty());
 
   m_breakpointIdToDebuggerBreakpointIds[breakpointId].push_back(
       debuggerBreakpointId);
-  return buildProtocolLocation(scriptId, actualLineNumber, actualColumnNumber);
+  return buildProtocolLocation(translatedBreakpoint.script_id, actualLineNumber,
+                               actualColumnNumber);
 }
 
 Response V8DebuggerAgentImpl::searchInContent(
@@ -531,9 +504,8 @@
     return Response::Error("No script for id: " + scriptId);
 
   std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
-      searchInTextByLinesImpl(m_session,
-                              toProtocolString(it->second->source(m_isolate)),
-                              query, optionalCaseSensitive.fromMaybe(false),
+      searchInTextByLinesImpl(m_session, it->second->source(m_isolate), query,
+                              optionalCaseSensitive.fromMaybe(false),
                               optionalIsRegex.fromMaybe(false));
   *results = protocol::Array<protocol::Debugger::SearchMatch>::create();
   for (size_t i = 0; i < matches.size(); ++i)
@@ -548,6 +520,15 @@
     Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) {
   if (!enabled()) return Response::Error(kDebuggerNotEnabled);
 
+  ScriptsMap::iterator it = m_scripts.find(scriptId);
+  if (it == m_scripts.end()) {
+    return Response::Error("No script with given id found");
+  }
+  if (it->second->isModule()) {
+    // TODO(kozyatinskiy): LiveEdit should support ES6 module
+    return Response::Error("Editing module's script is not supported.");
+  }
+
   v8::HandleScope handles(m_isolate);
   v8::Local<v8::String> newSource = toV8String(m_isolate, newContent);
   bool compileError = false;
@@ -556,9 +537,7 @@
       &m_pausedCallFrames, stackChanged, &compileError);
   if (!response.isSuccess() || compileError) return response;
 
-  ScriptsMap::iterator it = m_scripts.find(scriptId);
-  if (it != m_scripts.end()) it->second->setSource(newSource);
-
+  it->second->setSource(newSource);
   std::unique_ptr<Array<CallFrame>> callFrames;
   response = currentCallFrames(&callFrames);
   if (!response.isSuccess()) return response;
@@ -571,7 +550,7 @@
     const String16& callFrameId,
     std::unique_ptr<Array<CallFrame>>* newCallFrames,
     Maybe<StackTrace>* asyncStackTrace) {
-  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  if (!isPaused()) return Response::Error(kDebuggerNotPaused);
   InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
                                        callFrameId);
   Response response = scope.initialize();
@@ -604,93 +583,93 @@
   if (it == m_scripts.end())
     return Response::Error("No script for id: " + scriptId);
   v8::HandleScope handles(m_isolate);
-  *scriptSource = toProtocolString(it->second->source(m_isolate));
+  *scriptSource = it->second->source(m_isolate);
   return Response::OK();
 }
 
+void V8DebuggerAgentImpl::pushBreakDetails(
+    const String16& breakReason,
+    std::unique_ptr<protocol::DictionaryValue> breakAuxData) {
+  m_breakReason.push_back(std::make_pair(breakReason, std::move(breakAuxData)));
+}
+
+void V8DebuggerAgentImpl::popBreakDetails() {
+  if (m_breakReason.empty()) return;
+  m_breakReason.pop_back();
+}
+
+void V8DebuggerAgentImpl::clearBreakDetails() {
+  std::vector<BreakReason> emptyBreakReason;
+  m_breakReason.swap(emptyBreakReason);
+}
+
 void V8DebuggerAgentImpl::schedulePauseOnNextStatement(
     const String16& breakReason,
     std::unique_ptr<protocol::DictionaryValue> data) {
   if (!enabled() || m_scheduledDebuggerStep == StepInto ||
-      m_javaScriptPauseScheduled || m_debugger->isPaused() ||
+      m_javaScriptPauseScheduled || isPaused() ||
       !m_debugger->breakpointsActivated())
     return;
-  m_breakReason = breakReason;
-  m_breakAuxData = std::move(data);
-  m_pausingOnNativeEvent = true;
-  m_skipNextDebuggerStepOut = false;
-  m_debugger->setPauseOnNextStatement(true);
+  if (m_breakReason.empty()) m_debugger->setPauseOnNextStatement(true);
+  pushBreakDetails(breakReason, std::move(data));
 }
 
 void V8DebuggerAgentImpl::schedulePauseOnNextStatementIfSteppingInto() {
   DCHECK(enabled());
   if (m_scheduledDebuggerStep != StepInto || m_javaScriptPauseScheduled ||
-      m_debugger->isPaused())
+      isPaused())
     return;
-  clearBreakDetails();
-  m_pausingOnNativeEvent = false;
-  m_skippedStepFrameCount = 0;
-  m_recursionLevelForStepFrame = 0;
   m_debugger->setPauseOnNextStatement(true);
 }
 
 void V8DebuggerAgentImpl::cancelPauseOnNextStatement() {
-  if (m_javaScriptPauseScheduled || m_debugger->isPaused()) return;
-  clearBreakDetails();
-  m_pausingOnNativeEvent = false;
-  m_debugger->setPauseOnNextStatement(false);
+  if (m_javaScriptPauseScheduled || isPaused()) return;
+  popBreakDetails();
+  if (m_breakReason.empty()) m_debugger->setPauseOnNextStatement(false);
 }
 
 Response V8DebuggerAgentImpl::pause() {
   if (!enabled()) return Response::Error(kDebuggerNotEnabled);
-  if (m_javaScriptPauseScheduled || m_debugger->isPaused())
-    return Response::OK();
+  if (m_javaScriptPauseScheduled || isPaused()) return Response::OK();
   clearBreakDetails();
   m_javaScriptPauseScheduled = true;
   m_scheduledDebuggerStep = NoStep;
-  m_skippedStepFrameCount = 0;
-  m_steppingFromFramework = false;
   m_debugger->setPauseOnNextStatement(true);
   return Response::OK();
 }
 
 Response V8DebuggerAgentImpl::resume() {
-  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  if (!isPaused()) return Response::Error(kDebuggerNotPaused);
   m_scheduledDebuggerStep = NoStep;
-  m_steppingFromFramework = false;
   m_session->releaseObjectGroup(kBacktraceObjectGroup);
   m_debugger->continueProgram();
   return Response::OK();
 }
 
 Response V8DebuggerAgentImpl::stepOver() {
-  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  if (!isPaused()) return Response::Error(kDebuggerNotPaused);
   // StepOver at function return point should fallback to StepInto.
   JavaScriptCallFrame* frame =
       !m_pausedCallFrames.empty() ? m_pausedCallFrames[0].get() : nullptr;
   if (frame && frame->isAtReturn()) return stepInto();
   m_scheduledDebuggerStep = StepOver;
-  m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
   m_session->releaseObjectGroup(kBacktraceObjectGroup);
   m_debugger->stepOverStatement();
   return Response::OK();
 }
 
 Response V8DebuggerAgentImpl::stepInto() {
-  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  if (!isPaused()) return Response::Error(kDebuggerNotPaused);
   m_scheduledDebuggerStep = StepInto;
-  m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
   m_session->releaseObjectGroup(kBacktraceObjectGroup);
   m_debugger->stepIntoStatement();
   return Response::OK();
 }
 
 Response V8DebuggerAgentImpl::stepOut() {
-  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  if (!isPaused()) return Response::Error(kDebuggerNotPaused);
   m_scheduledDebuggerStep = StepOut;
-  m_skipNextDebuggerStepOut = false;
   m_recursionLevelForStepOut = 1;
-  m_steppingFromFramework = isTopPausedCallFrameBlackboxed();
   m_session->releaseObjectGroup(kBacktraceObjectGroup);
   m_debugger->stepOutOfFunction();
   return Response::OK();
@@ -699,13 +678,13 @@
 Response V8DebuggerAgentImpl::setPauseOnExceptions(
     const String16& stringPauseState) {
   if (!enabled()) return Response::Error(kDebuggerNotEnabled);
-  v8::DebugInterface::ExceptionBreakState pauseState;
+  v8::debug::ExceptionBreakState pauseState;
   if (stringPauseState == "none") {
-    pauseState = v8::DebugInterface::NoBreakOnException;
+    pauseState = v8::debug::NoBreakOnException;
   } else if (stringPauseState == "all") {
-    pauseState = v8::DebugInterface::BreakOnAnyException;
+    pauseState = v8::debug::BreakOnAnyException;
   } else if (stringPauseState == "uncaught") {
-    pauseState = v8::DebugInterface::BreakOnUncaughtException;
+    pauseState = v8::debug::BreakOnUncaughtException;
   } else {
     return Response::Error("Unknown pause on exceptions mode: " +
                            stringPauseState);
@@ -716,7 +695,7 @@
 
 void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(int pauseState) {
   m_debugger->setPauseOnExceptionsState(
-      static_cast<v8::DebugInterface::ExceptionBreakState>(pauseState));
+      static_cast<v8::debug::ExceptionBreakState>(pauseState));
   m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState, pauseState);
 }
 
@@ -724,9 +703,9 @@
     const String16& callFrameId, const String16& expression,
     Maybe<String16> objectGroup, Maybe<bool> includeCommandLineAPI,
     Maybe<bool> silent, Maybe<bool> returnByValue, Maybe<bool> generatePreview,
-    std::unique_ptr<RemoteObject>* result,
+    Maybe<bool> throwOnSideEffect, std::unique_ptr<RemoteObject>* result,
     Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
-  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  if (!isPaused()) return Response::Error(kDebuggerNotPaused);
   InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
                                        callFrameId);
   Response response = scope.initialize();
@@ -739,7 +718,8 @@
 
   v8::MaybeLocal<v8::Value> maybeResultValue =
       m_pausedCallFrames[scope.frameOrdinal()]->evaluate(
-          toV8String(m_isolate, expression));
+          toV8String(m_isolate, expression),
+          throwOnSideEffect.fromMaybe(false));
 
   // Re-initialize after running client's code, as it could have destroyed
   // context or session.
@@ -756,7 +736,7 @@
     std::unique_ptr<protocol::Runtime::CallArgument> newValueArgument,
     const String16& callFrameId) {
   if (!enabled()) return Response::Error(kDebuggerNotEnabled);
-  if (m_pausedContext.IsEmpty()) return Response::Error(kDebuggerNotPaused);
+  if (!isPaused()) return Response::Error(kDebuggerNotPaused);
   InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
                                        callFrameId);
   Response response = scope.initialize();
@@ -787,6 +767,7 @@
     std::unique_ptr<protocol::Array<String16>> patterns) {
   if (!patterns->length()) {
     m_blackboxPattern = nullptr;
+    resetBlackboxedStateCache();
     m_state->remove(DebuggerAgentState::blackboxPattern);
     return Response::OK();
   }
@@ -802,6 +783,7 @@
   String16 pattern = patternBuilder.toString();
   Response response = setBlackboxPattern(pattern);
   if (!response.isSuccess()) return response;
+  resetBlackboxedStateCache();
   m_state->setString(DebuggerAgentState::blackboxPattern, pattern);
   return Response::OK();
 }
@@ -815,15 +797,23 @@
   return Response::OK();
 }
 
+void V8DebuggerAgentImpl::resetBlackboxedStateCache() {
+  for (const auto& it : m_scripts) {
+    it.second->resetBlackboxedStateCache();
+  }
+}
+
 Response V8DebuggerAgentImpl::setBlackboxedRanges(
     const String16& scriptId,
     std::unique_ptr<protocol::Array<protocol::Debugger::ScriptPosition>>
         inPositions) {
-  if (m_scripts.find(scriptId) == m_scripts.end())
+  auto it = m_scripts.find(scriptId);
+  if (it == m_scripts.end())
     return Response::Error("No script with passed id.");
 
   if (!inPositions->length()) {
     m_blackboxedPositions.erase(scriptId);
+    it->second->resetBlackboxedStateCache();
     return Response::OK();
   }
 
@@ -849,12 +839,12 @@
   }
 
   m_blackboxedPositions[scriptId] = positions;
+  it->second->resetBlackboxedStateCache();
   return Response::OK();
 }
 
 void V8DebuggerAgentImpl::willExecuteScript(int scriptId) {
   changeJavaScriptRecursionLevel(+1);
-  // Fast return.
   if (m_scheduledDebuggerStep != StepInto) return;
   schedulePauseOnNextStatementIfSteppingInto();
 }
@@ -864,8 +854,7 @@
 }
 
 void V8DebuggerAgentImpl::changeJavaScriptRecursionLevel(int step) {
-  if (m_javaScriptPauseScheduled && !m_skipAllPauses &&
-      !m_debugger->isPaused()) {
+  if (m_javaScriptPauseScheduled && !m_skipAllPauses && !isPaused()) {
     // Do not ever loose user's pause request until we have actually paused.
     m_debugger->setPauseOnNextStatement(true);
   }
@@ -877,40 +866,19 @@
       // switch stepping to step into a next JS task, as if we exited to a
       // blackboxed framework.
       m_scheduledDebuggerStep = StepInto;
-      m_skipNextDebuggerStepOut = false;
-    }
-  }
-  if (m_recursionLevelForStepFrame) {
-    m_recursionLevelForStepFrame += step;
-    if (!m_recursionLevelForStepFrame) {
-      // We have walked through a blackboxed framework and got back to where we
-      // started.
-      // If there was no stepping scheduled, we should cancel the stepping
-      // explicitly,
-      // since there may be a scheduled StepFrame left.
-      // Otherwise, if we were stepping in/over, the StepFrame will stop at the
-      // right location,
-      // whereas if we were stepping out, we should continue doing so after
-      // debugger pauses
-      // from the old StepFrame.
-      m_skippedStepFrameCount = 0;
-      if (m_scheduledDebuggerStep == NoStep)
-        m_debugger->clearStepping();
-      else if (m_scheduledDebuggerStep == StepOut)
-        m_skipNextDebuggerStepOut = true;
     }
   }
 }
 
 Response V8DebuggerAgentImpl::currentCallFrames(
     std::unique_ptr<Array<CallFrame>>* result) {
-  if (m_pausedContext.IsEmpty() || !m_pausedCallFrames.size()) {
+  if (!isPaused()) {
     *result = Array<CallFrame>::create();
     return Response::OK();
   }
   v8::HandleScope handles(m_isolate);
   v8::Local<v8::Context> debuggerContext =
-      v8::DebugInterface::GetDebugContext(m_isolate);
+      v8::debug::GetDebugContext(m_isolate);
   v8::Context::Scope contextScope(debuggerContext);
 
   v8::Local<v8::Array> objects = v8::Array::New(m_isolate);
@@ -920,8 +888,9 @@
     const std::unique_ptr<JavaScriptCallFrame>& currentCallFrame =
         m_pausedCallFrames[frameOrdinal];
 
-    v8::Local<v8::Object> details = currentCallFrame->details();
-    if (details.IsEmpty()) return Response::InternalError();
+    v8::Local<v8::Object> details;
+    if (!currentCallFrame->details().ToLocal(&details))
+      return Response::InternalError();
 
     int contextId = currentCallFrame->contextId();
 
@@ -1004,54 +973,77 @@
   Response response = toProtocolValue(debuggerContext, objects, &protocolValue);
   if (!response.isSuccess()) return response;
   protocol::ErrorSupport errorSupport;
-  *result = Array<CallFrame>::parse(protocolValue.get(), &errorSupport);
+  *result = Array<CallFrame>::fromValue(protocolValue.get(), &errorSupport);
   if (!*result) return Response::Error(errorSupport.errors());
+  TranslateWasmStackTraceLocations(result->get(),
+                                   m_debugger->wasmTranslation());
   return Response::OK();
 }
 
 std::unique_ptr<StackTrace> V8DebuggerAgentImpl::currentAsyncStackTrace() {
-  if (m_pausedContext.IsEmpty()) return nullptr;
+  if (!isPaused()) return nullptr;
   V8StackTraceImpl* stackTrace = m_debugger->currentAsyncCallChain();
   return stackTrace ? stackTrace->buildInspectorObjectForTail(m_debugger)
                     : nullptr;
 }
 
+bool V8DebuggerAgentImpl::isPaused() const { return m_debugger->isPaused(); }
+
 void V8DebuggerAgentImpl::didParseSource(
     std::unique_ptr<V8DebuggerScript> script, bool success) {
   v8::HandleScope handles(m_isolate);
-  String16 scriptSource = toProtocolString(script->source(m_isolate));
+  String16 scriptSource = script->source(m_isolate);
   if (!success) script->setSourceURL(findSourceURL(scriptSource, false));
   if (!success)
     script->setSourceMappingURL(findSourceMapURL(scriptSource, false));
 
+  int contextId = script->executionContextId();
+  int contextGroupId = m_inspector->contextGroupId(contextId);
+  InspectedContext* inspected =
+      m_inspector->getContext(contextGroupId, contextId);
   std::unique_ptr<protocol::DictionaryValue> executionContextAuxData;
-  if (!script->executionContextAuxData().isEmpty())
+  if (inspected) {
+    // Script reused between different groups/sessions can have a stale
+    // execution context id.
     executionContextAuxData = protocol::DictionaryValue::cast(
-        protocol::parseJSON(script->executionContextAuxData()));
+        protocol::StringUtil::parseJSON(inspected->auxData()));
+  }
   bool isLiveEdit = script->isLiveEdit();
   bool hasSourceURL = script->hasSourceURL();
+  bool isModule = script->isModule();
   String16 scriptId = script->scriptId();
   String16 scriptURL = script->sourceURL();
 
-  Maybe<String16> sourceMapURLParam = script->sourceMappingURL();
+  m_scripts[scriptId] = std::move(script);
+
+  ScriptsMap::iterator scriptIterator = m_scripts.find(scriptId);
+  DCHECK(scriptIterator != m_scripts.end());
+  V8DebuggerScript* scriptRef = scriptIterator->second.get();
+  // V8 could create functions for parsed scripts before reporting and asks
+  // inspector about blackboxed state, we should reset state each time when we
+  // make any change that change isFunctionBlackboxed output - adding parsed
+  // script is changing.
+  scriptRef->resetBlackboxedStateCache();
+
+  Maybe<String16> sourceMapURLParam = scriptRef->sourceMappingURL();
   Maybe<protocol::DictionaryValue> executionContextAuxDataParam(
       std::move(executionContextAuxData));
   const bool* isLiveEditParam = isLiveEdit ? &isLiveEdit : nullptr;
   const bool* hasSourceURLParam = hasSourceURL ? &hasSourceURL : nullptr;
+  const bool* isModuleParam = isModule ? &isModule : nullptr;
   if (success)
     m_frontend.scriptParsed(
-        scriptId, scriptURL, script->startLine(), script->startColumn(),
-        script->endLine(), script->endColumn(), script->executionContextId(),
-        script->hash(), std::move(executionContextAuxDataParam),
-        isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam);
+        scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
+        scriptRef->endLine(), scriptRef->endColumn(), contextId,
+        scriptRef->hash(m_isolate), std::move(executionContextAuxDataParam),
+        isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam,
+        isModuleParam);
   else
     m_frontend.scriptFailedToParse(
-        scriptId, scriptURL, script->startLine(), script->startColumn(),
-        script->endLine(), script->endColumn(), script->executionContextId(),
-        script->hash(), std::move(executionContextAuxDataParam),
-        std::move(sourceMapURLParam), hasSourceURLParam);
-
-  m_scripts[scriptId] = std::move(script);
+        scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
+        scriptRef->endLine(), scriptRef->endColumn(), contextId,
+        scriptRef->hash(m_isolate), std::move(executionContextAuxDataParam),
+        std::move(sourceMapURLParam), hasSourceURLParam, isModuleParam);
 
   if (scriptURL.isEmpty() || !success) return;
 
@@ -1069,76 +1061,60 @@
     breakpointObject->getString(DebuggerAgentState::url, &url);
     if (!matches(m_inspector, scriptURL, url, isRegex)) continue;
     ScriptBreakpoint breakpoint;
+    breakpoint.script_id = scriptId;
     breakpointObject->getInteger(DebuggerAgentState::lineNumber,
-                                 &breakpoint.lineNumber);
+                                 &breakpoint.line_number);
     breakpointObject->getInteger(DebuggerAgentState::columnNumber,
-                                 &breakpoint.columnNumber);
+                                 &breakpoint.column_number);
     breakpointObject->getString(DebuggerAgentState::condition,
                                 &breakpoint.condition);
-    std::unique_ptr<protocol::Debugger::Location> location = resolveBreakpoint(
-        cookie.first, scriptId, breakpoint, UserBreakpointSource);
+    std::unique_ptr<protocol::Debugger::Location> location =
+        resolveBreakpoint(cookie.first, breakpoint, UserBreakpointSource);
     if (location)
       m_frontend.breakpointResolved(cookie.first, std::move(location));
   }
 }
 
-V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
-    v8::Local<v8::Context> context, v8::Local<v8::Value> exception,
-    const std::vector<String16>& hitBreakpoints, bool isPromiseRejection,
-    bool isUncaught) {
-  JavaScriptCallFrames callFrames = m_debugger->currentCallFrames(1);
-  JavaScriptCallFrame* topCallFrame =
-      !callFrames.empty() ? callFrames.begin()->get() : nullptr;
-
-  V8DebuggerAgentImpl::SkipPauseRequest result;
-  if (m_skipAllPauses)
-    result = RequestContinue;
-  else if (!hitBreakpoints.empty())
-    result = RequestNoSkip;  // Don't skip explicit breakpoints even if set in
-                             // frameworks.
-  else if (!exception.IsEmpty())
-    result = shouldSkipExceptionPause(topCallFrame);
-  else if (m_scheduledDebuggerStep != NoStep || m_javaScriptPauseScheduled ||
-           m_pausingOnNativeEvent)
-    result = shouldSkipStepPause(topCallFrame);
-  else
-    result = RequestNoSkip;
-
-  m_skipNextDebuggerStepOut = false;
-  if (result != RequestNoSkip) return result;
-  // Skip pauses inside V8 internal scripts and on syntax errors.
-  if (!topCallFrame) return RequestContinue;
-
-  DCHECK(m_pausedContext.IsEmpty());
+void V8DebuggerAgentImpl::didPause(int contextId,
+                                   v8::Local<v8::Value> exception,
+                                   const std::vector<String16>& hitBreakpoints,
+                                   bool isPromiseRejection, bool isUncaught,
+                                   bool isOOMBreak) {
   JavaScriptCallFrames frames = m_debugger->currentCallFrames();
   m_pausedCallFrames.swap(frames);
-  m_pausedContext.Reset(m_isolate, context);
   v8::HandleScope handles(m_isolate);
 
-  if (!exception.IsEmpty()) {
+  std::vector<BreakReason> hitReasons;
+
+  if (isOOMBreak) {
+    hitReasons.push_back(
+        std::make_pair(protocol::Debugger::Paused::ReasonEnum::OOM, nullptr));
+  } else if (!exception.IsEmpty()) {
     InjectedScript* injectedScript = nullptr;
-    m_session->findInjectedScript(V8Debugger::contextId(context),
-                                  injectedScript);
+    m_session->findInjectedScript(contextId, injectedScript);
     if (injectedScript) {
-      m_breakReason =
+      String16 breakReason =
           isPromiseRejection
               ? protocol::Debugger::Paused::ReasonEnum::PromiseRejection
               : protocol::Debugger::Paused::ReasonEnum::Exception;
       std::unique_ptr<protocol::Runtime::RemoteObject> obj;
       injectedScript->wrapObject(exception, kBacktraceObjectGroup, false, false,
                                  &obj);
+      std::unique_ptr<protocol::DictionaryValue> breakAuxData;
       if (obj) {
-        m_breakAuxData = obj->serialize();
-        m_breakAuxData->setBoolean("uncaught", isUncaught);
+        breakAuxData = obj->toValue();
+        breakAuxData->setBoolean("uncaught", isUncaught);
       } else {
-        m_breakAuxData = nullptr;
+        breakAuxData = nullptr;
       }
-      // m_breakAuxData might be null after this.
+      hitReasons.push_back(
+          std::make_pair(breakReason, std::move(breakAuxData)));
     }
   }
 
   std::unique_ptr<Array<String16>> hitBreakpointIds = Array<String16>::create();
 
+  bool hasDebugCommandBreakpointReason = false;
   for (const auto& point : hitBreakpoints) {
     DebugServerBreakpointToBreakpointIdAndSourceMap::iterator
         breakpointIterator = m_serverBreakpoints.find(point);
@@ -1147,34 +1123,57 @@
       hitBreakpointIds->addItem(localId);
 
       BreakpointSource source = breakpointIterator->second.second;
-      if (m_breakReason == protocol::Debugger::Paused::ReasonEnum::Other &&
-          source == DebugCommandBreakpointSource)
-        m_breakReason = protocol::Debugger::Paused::ReasonEnum::DebugCommand;
+      if (!hasDebugCommandBreakpointReason &&
+          source == DebugCommandBreakpointSource) {
+        hasDebugCommandBreakpointReason = true;
+        hitReasons.push_back(std::make_pair(
+            protocol::Debugger::Paused::ReasonEnum::DebugCommand, nullptr));
+      }
     }
   }
 
+  for (size_t i = 0; i < m_breakReason.size(); ++i) {
+    hitReasons.push_back(std::move(m_breakReason[i]));
+  }
+  clearBreakDetails();
+
+  String16 breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
+  std::unique_ptr<protocol::DictionaryValue> breakAuxData;
+  if (hitReasons.size() == 1) {
+    breakReason = hitReasons[0].first;
+    breakAuxData = std::move(hitReasons[0].second);
+  } else if (hitReasons.size() > 1) {
+    breakReason = protocol::Debugger::Paused::ReasonEnum::Ambiguous;
+    std::unique_ptr<protocol::ListValue> reasons =
+        protocol::ListValue::create();
+    for (size_t i = 0; i < hitReasons.size(); ++i) {
+      std::unique_ptr<protocol::DictionaryValue> reason =
+          protocol::DictionaryValue::create();
+      reason->setString("reason", hitReasons[i].first);
+      if (hitReasons[i].second)
+        reason->setObject("auxData", std::move(hitReasons[i].second));
+      reasons->pushValue(std::move(reason));
+    }
+    breakAuxData = protocol::DictionaryValue::create();
+    breakAuxData->setArray("reasons", std::move(reasons));
+  }
+
   std::unique_ptr<Array<CallFrame>> protocolCallFrames;
   Response response = currentCallFrames(&protocolCallFrames);
   if (!response.isSuccess()) protocolCallFrames = Array<CallFrame>::create();
-  m_frontend.paused(std::move(protocolCallFrames), m_breakReason,
-                    std::move(m_breakAuxData), std::move(hitBreakpointIds),
+  m_frontend.paused(std::move(protocolCallFrames), breakReason,
+                    std::move(breakAuxData), std::move(hitBreakpointIds),
                     currentAsyncStackTrace());
   m_scheduledDebuggerStep = NoStep;
   m_javaScriptPauseScheduled = false;
-  m_steppingFromFramework = false;
-  m_pausingOnNativeEvent = false;
-  m_skippedStepFrameCount = 0;
-  m_recursionLevelForStepFrame = 0;
 
   if (!m_continueToLocationBreakpointId.isEmpty()) {
     m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
     m_continueToLocationBreakpointId = "";
   }
-  return result;
 }
 
 void V8DebuggerAgentImpl::didContinue() {
-  m_pausedContext.Reset();
   JavaScriptCallFrames emptyCallFrames;
   m_pausedCallFrames.swap(emptyCallFrames);
   clearBreakDetails();
@@ -1184,55 +1183,48 @@
 void V8DebuggerAgentImpl::breakProgram(
     const String16& breakReason,
     std::unique_ptr<protocol::DictionaryValue> data) {
-  if (!enabled() || m_skipAllPauses || !m_pausedContext.IsEmpty() ||
-      isCurrentCallStackEmptyOrBlackboxed() ||
-      !m_debugger->breakpointsActivated())
-    return;
-  m_breakReason = breakReason;
-  m_breakAuxData = std::move(data);
+  if (!enabled() || !m_debugger->canBreakProgram() || m_skipAllPauses) return;
+  std::vector<BreakReason> currentScheduledReason;
+  currentScheduledReason.swap(m_breakReason);
+  pushBreakDetails(breakReason, std::move(data));
   m_scheduledDebuggerStep = NoStep;
-  m_steppingFromFramework = false;
-  m_pausingOnNativeEvent = false;
   m_debugger->breakProgram();
+  popBreakDetails();
+  m_breakReason.swap(currentScheduledReason);
 }
 
 void V8DebuggerAgentImpl::breakProgramOnException(
     const String16& breakReason,
     std::unique_ptr<protocol::DictionaryValue> data) {
   if (!enabled() ||
-      m_debugger->getPauseOnExceptionsState() ==
-          v8::DebugInterface::NoBreakOnException)
+      m_debugger->getPauseOnExceptionsState() == v8::debug::NoBreakOnException)
     return;
   breakProgram(breakReason, std::move(data));
 }
 
-void V8DebuggerAgentImpl::clearBreakDetails() {
-  m_breakReason = protocol::Debugger::Paused::ReasonEnum::Other;
-  m_breakAuxData = nullptr;
-}
-
 void V8DebuggerAgentImpl::setBreakpointAt(const String16& scriptId,
                                           int lineNumber, int columnNumber,
                                           BreakpointSource source,
                                           const String16& condition) {
-  String16 breakpointId =
-      generateBreakpointId(scriptId, lineNumber, columnNumber, source);
-  ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
-  resolveBreakpoint(breakpointId, scriptId, breakpoint, source);
+  ScriptBreakpoint breakpoint(scriptId, lineNumber, columnNumber, condition);
+  String16 breakpointId = generateBreakpointId(breakpoint, source);
+  resolveBreakpoint(breakpointId, breakpoint, source);
 }
 
 void V8DebuggerAgentImpl::removeBreakpointAt(const String16& scriptId,
                                              int lineNumber, int columnNumber,
                                              BreakpointSource source) {
-  removeBreakpointImpl(
-      generateBreakpointId(scriptId, lineNumber, columnNumber, source));
+  removeBreakpointImpl(generateBreakpointId(
+      ScriptBreakpoint(scriptId, lineNumber, columnNumber, String16()),
+      source));
 }
 
 void V8DebuggerAgentImpl::reset() {
   if (!enabled()) return;
   m_scheduledDebuggerStep = NoStep;
-  m_scripts.clear();
   m_blackboxedPositions.clear();
+  resetBlackboxedStateCache();
+  m_scripts.clear();
   m_breakpointIdToDebuggerBreakpointIds.clear();
 }
 
diff --git a/src/inspector/v8-debugger-agent-impl.h b/src/inspector/v8-debugger-agent-impl.h
index e5285f4..41a18a8 100644
--- a/src/inspector/v8-debugger-agent-impl.h
+++ b/src/inspector/v8-debugger-agent-impl.h
@@ -8,6 +8,7 @@
 #include <vector>
 
 #include "src/base/macros.h"
+#include "src/debug/interface-types.h"
 #include "src/inspector/java-script-call-frame.h"
 #include "src/inspector/protocol/Debugger.h"
 #include "src/inspector/protocol/Forward.h"
@@ -29,14 +30,6 @@
 
 class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
  public:
-  enum SkipPauseRequest {
-    RequestNoSkip,
-    RequestContinue,
-    RequestStepInto,
-    RequestStepOut,
-    RequestStepFrame
-  };
-
   enum BreakpointSource {
     UserBreakpointSource,
     DebugCommandBreakpointSource,
@@ -100,7 +93,7 @@
       const String16& callFrameId, const String16& expression,
       Maybe<String16> objectGroup, Maybe<bool> includeCommandLineAPI,
       Maybe<bool> silent, Maybe<bool> returnByValue,
-      Maybe<bool> generatePreview,
+      Maybe<bool> generatePreview, Maybe<bool> throwOnSideEffect,
       std::unique_ptr<protocol::Runtime::RemoteObject>* result,
       Maybe<protocol::Runtime::ExceptionDetails>*) override;
   Response setVariableValue(
@@ -134,23 +127,25 @@
   void reset();
 
   // Interface for V8InspectorImpl
-  SkipPauseRequest didPause(v8::Local<v8::Context>,
-                            v8::Local<v8::Value> exception,
-                            const std::vector<String16>& hitBreakpoints,
-                            bool isPromiseRejection, bool isUncaught);
+  void didPause(int contextId, v8::Local<v8::Value> exception,
+                const std::vector<String16>& hitBreakpoints,
+                bool isPromiseRejection, bool isUncaught, bool isOOMBreak);
   void didContinue();
   void didParseSource(std::unique_ptr<V8DebuggerScript>, bool success);
   void willExecuteScript(int scriptId);
   void didExecuteScript();
 
+  bool isFunctionBlackboxed(const String16& scriptId,
+                            const v8::debug::Location& start,
+                            const v8::debug::Location& end);
+
+  bool skipAllPauses() const { return m_skipAllPauses; }
+
   v8::Isolate* isolate() { return m_isolate; }
 
  private:
   void enableImpl();
 
-  SkipPauseRequest shouldSkipExceptionPause(JavaScriptCallFrame* topCallFrame);
-  SkipPauseRequest shouldSkipStepPause(JavaScriptCallFrame* topCallFrame);
-
   void schedulePauseOnNextStatementIfSteppingInto();
 
   Response currentCallFrames(
@@ -162,19 +157,17 @@
   void setPauseOnExceptionsImpl(int);
 
   std::unique_ptr<protocol::Debugger::Location> resolveBreakpoint(
-      const String16& breakpointId, const String16& scriptId,
-      const ScriptBreakpoint&, BreakpointSource);
+      const String16& breakpointId, const ScriptBreakpoint&, BreakpointSource);
   void removeBreakpointImpl(const String16& breakpointId);
   void clearBreakDetails();
 
-  bool isCurrentCallStackEmptyOrBlackboxed();
-  bool isTopPausedCallFrameBlackboxed();
-  bool isCallFrameWithUnknownScriptOrBlackboxed(JavaScriptCallFrame*);
-
   void internalSetAsyncCallStackDepth(int);
   void increaseCachedSkipStackGeneration();
 
   Response setBlackboxPattern(const String16& pattern);
+  void resetBlackboxedStateCache();
+
+  bool isPaused() const;
 
   using ScriptsMap =
       protocol::HashMap<String16, std::unique_ptr<V8DebuggerScript>>;
@@ -193,24 +186,26 @@
   protocol::DictionaryValue* m_state;
   protocol::Debugger::Frontend m_frontend;
   v8::Isolate* m_isolate;
-  v8::Global<v8::Context> m_pausedContext;
   JavaScriptCallFrames m_pausedCallFrames;
   ScriptsMap m_scripts;
   BreakpointIdToDebuggerBreakpointIdsMap m_breakpointIdToDebuggerBreakpointIds;
   DebugServerBreakpointToBreakpointIdAndSourceMap m_serverBreakpoints;
   String16 m_continueToLocationBreakpointId;
-  String16 m_breakReason;
-  std::unique_ptr<protocol::DictionaryValue> m_breakAuxData;
-  DebuggerStep m_scheduledDebuggerStep;
-  bool m_skipNextDebuggerStepOut;
-  bool m_javaScriptPauseScheduled;
-  bool m_steppingFromFramework;
-  bool m_pausingOnNativeEvent;
 
-  int m_skippedStepFrameCount;
+  using BreakReason =
+      std::pair<String16, std::unique_ptr<protocol::DictionaryValue>>;
+  std::vector<BreakReason> m_breakReason;
+
+  void pushBreakDetails(
+      const String16& breakReason,
+      std::unique_ptr<protocol::DictionaryValue> breakAuxData);
+  void popBreakDetails();
+
+  DebuggerStep m_scheduledDebuggerStep;
+  bool m_javaScriptPauseScheduled;
+
   int m_recursionLevelForStepOut;
-  int m_recursionLevelForStepFrame;
-  bool m_skipAllPauses;
+  bool m_skipAllPauses = false;
 
   std::unique_ptr<V8Regex> m_blackboxPattern;
   protocol::HashMap<String16, std::vector<std::pair<int, int>>>
diff --git a/src/inspector/v8-debugger-script.cc b/src/inspector/v8-debugger-script.cc
index ed0c0d6..200cdc7 100644
--- a/src/inspector/v8-debugger-script.cc
+++ b/src/inspector/v8-debugger-script.cc
@@ -4,14 +4,17 @@
 
 #include "src/inspector/v8-debugger-script.h"
 
-#include "src/inspector/protocol-platform.h"
+#include "src/inspector/inspected-context.h"
 #include "src/inspector/string-util.h"
+#include "src/inspector/wasm-translation.h"
 
 namespace v8_inspector {
 
-static const char hexDigits[17] = "0123456789ABCDEF";
+namespace {
 
-static void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
+const char hexDigits[17] = "0123456789ABCDEF";
+
+void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
   for (size_t i = 0; i < 8; ++i) {
     UChar c = hexDigits[number & 0xF];
     destination->append(c);
@@ -23,7 +26,7 @@
 // Multiplikation in
 // eingeschränkten Branchingprogrammmodellen" by Woelfe.
 // http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
-static String16 calculateHash(const String16& str) {
+String16 calculateHash(const String16& str) {
   static uint64_t prime[] = {0x3FB75161, 0xAB1F4E4F, 0x82675BC5, 0xCD924D35,
                              0x81ABE279};
   static uint64_t random[] = {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476,
@@ -67,98 +70,240 @@
   return hash.toString();
 }
 
-V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate,
-                                   v8::Local<v8::DebugInterface::Script> script,
-                                   bool isLiveEdit) {
-  m_isolate = script->GetIsolate();
-  m_id = String16::fromInteger(script->Id());
-  v8::Local<v8::String> tmp;
-  if (script->Name().ToLocal(&tmp)) m_url = toProtocolString(tmp);
-  if (script->SourceURL().ToLocal(&tmp)) {
-    m_sourceURL = toProtocolString(tmp);
-    if (m_url.isEmpty()) m_url = toProtocolString(tmp);
-  }
-  if (script->SourceMappingURL().ToLocal(&tmp))
-    m_sourceMappingURL = toProtocolString(tmp);
-  m_startLine = script->LineOffset();
-  m_startColumn = script->ColumnOffset();
-  std::vector<int> lineEnds = script->LineEnds();
-  CHECK(lineEnds.size());
-  int source_length = lineEnds[lineEnds.size() - 1];
-  if (lineEnds.size()) {
-    m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
-    if (lineEnds.size() > 1) {
-      m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
-    } else {
-      m_endColumn = source_length + m_startColumn;
-    }
-  } else {
-    m_endLine = m_startLine;
-    m_endColumn = m_startColumn;
-  }
-
-  if (script->ContextData().ToLocal(&tmp)) {
-    String16 contextData = toProtocolString(tmp);
-    size_t firstComma = contextData.find(",", 0);
-    size_t secondComma = firstComma != String16::kNotFound
-                             ? contextData.find(",", firstComma + 1)
-                             : String16::kNotFound;
-    if (secondComma != String16::kNotFound) {
-      String16 executionContextId =
-          contextData.substring(firstComma + 1, secondComma - firstComma - 1);
-      bool isOk = false;
-      m_executionContextId = executionContextId.toInteger(&isOk);
-      if (!isOk) m_executionContextId = 0;
-      m_executionContextAuxData = contextData.substring(secondComma + 1);
-    }
-  }
-
-  m_isLiveEdit = isLiveEdit;
-
-  if (script->Source().ToLocal(&tmp)) {
-    m_source.Reset(m_isolate, tmp);
-    String16 source = toProtocolString(tmp);
-    m_hash = calculateHash(source);
-    // V8 will not count last line if script source ends with \n.
-    if (source.length() > 1 && source[source.length() - 1] == '\n') {
-      m_endLine++;
-      m_endColumn = 0;
-    }
-  }
-
-  m_script.Reset(m_isolate, script);
+void TranslateProtocolLocationToV8Location(WasmTranslation* wasmTranslation,
+                                           v8::debug::Location* loc,
+                                           const String16& scriptId,
+                                           const String16& expectedV8ScriptId) {
+  if (loc->IsEmpty()) return;
+  int lineNumber = loc->GetLineNumber();
+  int columnNumber = loc->GetColumnNumber();
+  String16 translatedScriptId = scriptId;
+  wasmTranslation->TranslateProtocolLocationToWasmScriptLocation(
+      &translatedScriptId, &lineNumber, &columnNumber);
+  DCHECK_EQ(expectedV8ScriptId.utf8(), translatedScriptId.utf8());
+  *loc = v8::debug::Location(lineNumber, columnNumber);
 }
 
+void TranslateV8LocationToProtocolLocation(
+    WasmTranslation* wasmTranslation, v8::debug::Location* loc,
+    const String16& scriptId, const String16& expectedProtocolScriptId) {
+  int lineNumber = loc->GetLineNumber();
+  int columnNumber = loc->GetColumnNumber();
+  String16 translatedScriptId = scriptId;
+  wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
+      &translatedScriptId, &lineNumber, &columnNumber);
+  DCHECK_EQ(expectedProtocolScriptId.utf8(), translatedScriptId.utf8());
+  *loc = v8::debug::Location(lineNumber, columnNumber);
+}
+
+class ActualScript : public V8DebuggerScript {
+  friend class V8DebuggerScript;
+
+ public:
+  ActualScript(v8::Isolate* isolate, v8::Local<v8::debug::Script> script,
+               bool isLiveEdit)
+      : V8DebuggerScript(isolate, String16::fromInteger(script->Id()),
+                         GetNameOrSourceUrl(script)),
+        m_isLiveEdit(isLiveEdit) {
+    v8::Local<v8::String> tmp;
+    if (script->SourceURL().ToLocal(&tmp)) m_sourceURL = toProtocolString(tmp);
+    if (script->SourceMappingURL().ToLocal(&tmp))
+      m_sourceMappingURL = toProtocolString(tmp);
+    m_startLine = script->LineOffset();
+    m_startColumn = script->ColumnOffset();
+    std::vector<int> lineEnds = script->LineEnds();
+    CHECK(lineEnds.size());
+    int source_length = lineEnds[lineEnds.size() - 1];
+    if (lineEnds.size()) {
+      m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
+      if (lineEnds.size() > 1) {
+        m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
+      } else {
+        m_endColumn = source_length + m_startColumn;
+      }
+    } else {
+      m_endLine = m_startLine;
+      m_endColumn = m_startColumn;
+    }
+
+    v8::Local<v8::Value> contextData;
+    if (script->ContextData().ToLocal(&contextData) && contextData->IsInt32()) {
+      m_executionContextId =
+          static_cast<int>(contextData.As<v8::Int32>()->Value());
+    }
+
+    if (script->Source().ToLocal(&tmp)) {
+      m_sourceObj.Reset(m_isolate, tmp);
+      String16 source = toProtocolString(tmp);
+      // V8 will not count last line if script source ends with \n.
+      if (source.length() > 1 && source[source.length() - 1] == '\n') {
+        m_endLine++;
+        m_endColumn = 0;
+      }
+    }
+
+    m_isModule = script->IsModule();
+
+    m_script.Reset(m_isolate, script);
+  }
+
+  bool isLiveEdit() const override { return m_isLiveEdit; }
+  bool isModule() const override { return m_isModule; }
+
+  const String16& sourceMappingURL() const override {
+    return m_sourceMappingURL;
+  }
+
+  String16 source(v8::Isolate* isolate) const override {
+    if (!m_sourceObj.IsEmpty())
+      return toProtocolString(m_sourceObj.Get(isolate));
+    return V8DebuggerScript::source(isolate);
+  }
+
+  void setSourceMappingURL(const String16& sourceMappingURL) override {
+    m_sourceMappingURL = sourceMappingURL;
+  }
+
+  void setSource(v8::Local<v8::String> source) override {
+    m_source = String16();
+    m_sourceObj.Reset(m_isolate, source);
+    m_hash = String16();
+  }
+
+  bool getPossibleBreakpoints(
+      const v8::debug::Location& start, const v8::debug::Location& end,
+      std::vector<v8::debug::Location>* locations) override {
+    v8::HandleScope scope(m_isolate);
+    v8::Local<v8::debug::Script> script = m_script.Get(m_isolate);
+    return script->GetPossibleBreakpoints(start, end, locations);
+  }
+
+  void resetBlackboxedStateCache() override {
+    v8::HandleScope scope(m_isolate);
+    v8::debug::ResetBlackboxedStateCache(m_isolate, m_script.Get(m_isolate));
+  }
+
+ private:
+  String16 GetNameOrSourceUrl(v8::Local<v8::debug::Script> script) {
+    v8::Local<v8::String> name;
+    if (script->Name().ToLocal(&name) || script->SourceURL().ToLocal(&name))
+      return toProtocolString(name);
+    return String16();
+  }
+
+  String16 m_sourceMappingURL;
+  v8::Global<v8::String> m_sourceObj;
+  bool m_isLiveEdit = false;
+  bool m_isModule = false;
+  v8::Global<v8::debug::Script> m_script;
+};
+
+class WasmVirtualScript : public V8DebuggerScript {
+  friend class V8DebuggerScript;
+
+ public:
+  WasmVirtualScript(v8::Isolate* isolate, WasmTranslation* wasmTranslation,
+                    v8::Local<v8::debug::WasmScript> script, String16 id,
+                    String16 url, String16 source)
+      : V8DebuggerScript(isolate, std::move(id), std::move(url)),
+        m_script(isolate, script),
+        m_wasmTranslation(wasmTranslation) {
+    int num_lines = 0;
+    int last_newline = -1;
+    size_t next_newline = source.find('\n', last_newline + 1);
+    while (next_newline != String16::kNotFound) {
+      last_newline = static_cast<int>(next_newline);
+      next_newline = source.find('\n', last_newline + 1);
+      ++num_lines;
+    }
+    m_endLine = num_lines;
+    m_endColumn = static_cast<int>(source.length()) - last_newline - 1;
+    m_source = std::move(source);
+  }
+
+  const String16& sourceMappingURL() const override { return emptyString(); }
+  bool isLiveEdit() const override { return false; }
+  bool isModule() const override { return false; }
+  void setSourceMappingURL(const String16&) override {}
+
+  bool getPossibleBreakpoints(
+      const v8::debug::Location& start, const v8::debug::Location& end,
+      std::vector<v8::debug::Location>* locations) override {
+    v8::HandleScope scope(m_isolate);
+    v8::Local<v8::debug::Script> script = m_script.Get(m_isolate);
+    String16 v8ScriptId = String16::fromInteger(script->Id());
+
+    v8::debug::Location translatedStart = start;
+    TranslateProtocolLocationToV8Location(m_wasmTranslation, &translatedStart,
+                                          scriptId(), v8ScriptId);
+
+    v8::debug::Location translatedEnd = end;
+    if (translatedEnd.IsEmpty()) {
+      // Stop before the start of the next function.
+      translatedEnd =
+          v8::debug::Location(translatedStart.GetLineNumber() + 1, 0);
+    } else {
+      TranslateProtocolLocationToV8Location(m_wasmTranslation, &translatedEnd,
+                                            scriptId(), v8ScriptId);
+    }
+
+    bool success = script->GetPossibleBreakpoints(translatedStart,
+                                                  translatedEnd, locations);
+    for (v8::debug::Location& loc : *locations) {
+      TranslateV8LocationToProtocolLocation(m_wasmTranslation, &loc, v8ScriptId,
+                                            scriptId());
+    }
+    return success;
+  }
+
+  void resetBlackboxedStateCache() override {}
+
+ private:
+  static const String16& emptyString() {
+    static const String16 singleEmptyString;
+    return singleEmptyString;
+  }
+
+  v8::Global<v8::debug::WasmScript> m_script;
+  WasmTranslation* m_wasmTranslation;
+};
+
+}  // namespace
+
+std::unique_ptr<V8DebuggerScript> V8DebuggerScript::Create(
+    v8::Isolate* isolate, v8::Local<v8::debug::Script> scriptObj,
+    bool isLiveEdit) {
+  return std::unique_ptr<ActualScript>(
+      new ActualScript(isolate, scriptObj, isLiveEdit));
+}
+
+std::unique_ptr<V8DebuggerScript> V8DebuggerScript::CreateWasm(
+    v8::Isolate* isolate, WasmTranslation* wasmTranslation,
+    v8::Local<v8::debug::WasmScript> underlyingScript, String16 id,
+    String16 url, String16 source) {
+  return std::unique_ptr<WasmVirtualScript>(
+      new WasmVirtualScript(isolate, wasmTranslation, underlyingScript,
+                            std::move(id), std::move(url), std::move(source)));
+}
+
+V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate, String16 id,
+                                   String16 url)
+    : m_id(std::move(id)), m_url(std::move(url)), m_isolate(isolate) {}
+
 V8DebuggerScript::~V8DebuggerScript() {}
 
 const String16& V8DebuggerScript::sourceURL() const {
   return m_sourceURL.isEmpty() ? m_url : m_sourceURL;
 }
 
-v8::Local<v8::String> V8DebuggerScript::source(v8::Isolate* isolate) const {
-  return m_source.Get(isolate);
+const String16& V8DebuggerScript::hash(v8::Isolate* isolate) const {
+  if (m_hash.isEmpty()) m_hash = calculateHash(source(isolate));
+  DCHECK(!m_hash.isEmpty());
+  return m_hash;
 }
 
 void V8DebuggerScript::setSourceURL(const String16& sourceURL) {
   m_sourceURL = sourceURL;
 }
 
-void V8DebuggerScript::setSourceMappingURL(const String16& sourceMappingURL) {
-  m_sourceMappingURL = sourceMappingURL;
-}
-
-void V8DebuggerScript::setSource(v8::Local<v8::String> source) {
-  m_source.Reset(m_isolate, source);
-  m_hash = calculateHash(toProtocolString(source));
-}
-
-bool V8DebuggerScript::getPossibleBreakpoints(
-    const v8::DebugInterface::Location& start,
-    const v8::DebugInterface::Location& end,
-    std::vector<v8::DebugInterface::Location>* locations) {
-  v8::HandleScope scope(m_isolate);
-  v8::Local<v8::DebugInterface::Script> script = m_script.Get(m_isolate);
-  return script->GetPossibleBreakpoints(start, end, locations);
-}
-
 }  // namespace v8_inspector
diff --git a/src/inspector/v8-debugger-script.h b/src/inspector/v8-debugger-script.h
index 97b5ba9..9250c9d 100644
--- a/src/inspector/v8-debugger-script.h
+++ b/src/inspector/v8-debugger-script.h
@@ -32,63 +32,71 @@
 
 #include "src/base/macros.h"
 #include "src/inspector/string-16.h"
+#include "src/inspector/string-util.h"
 
 #include "include/v8.h"
 #include "src/debug/debug-interface.h"
 
 namespace v8_inspector {
 
+// Forward declaration.
+class WasmTranslation;
+
 class V8DebuggerScript {
  public:
-  V8DebuggerScript(v8::Isolate* isolate,
-                   v8::Local<v8::DebugInterface::Script> script,
-                   bool isLiveEdit);
-  ~V8DebuggerScript();
+  static std::unique_ptr<V8DebuggerScript> Create(
+      v8::Isolate* isolate, v8::Local<v8::debug::Script> script,
+      bool isLiveEdit);
+  static std::unique_ptr<V8DebuggerScript> CreateWasm(
+      v8::Isolate* isolate, WasmTranslation* wasmTranslation,
+      v8::Local<v8::debug::WasmScript> underlyingScript, String16 id,
+      String16 url, String16 source);
+
+  virtual ~V8DebuggerScript();
 
   const String16& scriptId() const { return m_id; }
   const String16& url() const { return m_url; }
   bool hasSourceURL() const { return !m_sourceURL.isEmpty(); }
   const String16& sourceURL() const;
-  const String16& sourceMappingURL() const { return m_sourceMappingURL; }
-  v8::Local<v8::String> source(v8::Isolate*) const;
-  const String16& hash() const { return m_hash; }
+  virtual const String16& sourceMappingURL() const = 0;
+  virtual String16 source(v8::Isolate*) const { return m_source; }
+  const String16& hash(v8::Isolate*) const;
   int startLine() const { return m_startLine; }
   int startColumn() const { return m_startColumn; }
   int endLine() const { return m_endLine; }
   int endColumn() const { return m_endColumn; }
   int executionContextId() const { return m_executionContextId; }
-  const String16& executionContextAuxData() const {
-    return m_executionContextAuxData;
-  }
-  bool isLiveEdit() const { return m_isLiveEdit; }
+  virtual bool isLiveEdit() const = 0;
+  virtual bool isModule() const = 0;
 
   void setSourceURL(const String16&);
-  void setSourceMappingURL(const String16&);
-  void setSource(v8::Local<v8::String>);
+  virtual void setSourceMappingURL(const String16&) = 0;
+  virtual void setSource(v8::Local<v8::String> source) {
+    m_source = toProtocolString(source);
+  }
 
-  bool getPossibleBreakpoints(
-      const v8::DebugInterface::Location& start,
-      const v8::DebugInterface::Location& end,
-      std::vector<v8::DebugInterface::Location>* locations);
+  virtual bool getPossibleBreakpoints(
+      const v8::debug::Location& start, const v8::debug::Location& end,
+      std::vector<v8::debug::Location>* locations) = 0;
+  virtual void resetBlackboxedStateCache() = 0;
 
- private:
+ protected:
+  V8DebuggerScript(v8::Isolate*, String16 id, String16 url);
+
   String16 m_id;
   String16 m_url;
   String16 m_sourceURL;
-  String16 m_sourceMappingURL;
-  v8::Global<v8::String> m_source;
-  String16 m_hash;
-  int m_startLine;
-  int m_startColumn;
-  int m_endLine;
-  int m_endColumn;
-  int m_executionContextId;
-  String16 m_executionContextAuxData;
-  bool m_isLiveEdit;
+  String16 m_source;
+  mutable String16 m_hash;
+  int m_startLine = 0;
+  int m_startColumn = 0;
+  int m_endLine = 0;
+  int m_endColumn = 0;
+  int m_executionContextId = 0;
 
   v8::Isolate* m_isolate;
-  v8::Global<v8::DebugInterface::Script> m_script;
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(V8DebuggerScript);
 };
 
diff --git a/src/inspector/v8-debugger.cc b/src/inspector/v8-debugger.cc
index b3657e5..3a2fc89 100644
--- a/src/inspector/v8-debugger.cc
+++ b/src/inspector/v8-debugger.cc
@@ -5,6 +5,7 @@
 #include "src/inspector/v8-debugger.h"
 
 #include "src/inspector/debugger-script.h"
+#include "src/inspector/inspected-context.h"
 #include "src/inspector/protocol/Protocol.h"
 #include "src/inspector/script-breakpoint.h"
 #include "src/inspector/string-util.h"
@@ -19,22 +20,129 @@
 namespace v8_inspector {
 
 namespace {
-static const char v8AsyncTaskEventEnqueue[] = "enqueue";
-static const char v8AsyncTaskEventEnqueueRecurring[] = "enqueueRecurring";
-static const char v8AsyncTaskEventWillHandle[] = "willHandle";
-static const char v8AsyncTaskEventDidHandle[] = "didHandle";
-static const char v8AsyncTaskEventCancel[] = "cancel";
+
+// Based on DevTools frontend measurement, with asyncCallStackDepth = 4,
+// average async call stack tail requires ~1 Kb. Let's reserve ~ 128 Mb
+// for async stacks.
+static const int kMaxAsyncTaskStacks = 128 * 1024;
 
 inline v8::Local<v8::Boolean> v8Boolean(bool value, v8::Isolate* isolate) {
   return value ? v8::True(isolate) : v8::False(isolate);
 }
 
+V8DebuggerAgentImpl* agentForScript(V8InspectorImpl* inspector,
+                                    v8::Local<v8::debug::Script> script) {
+  v8::Local<v8::Value> contextData;
+  if (!script->ContextData().ToLocal(&contextData) || !contextData->IsInt32()) {
+    return nullptr;
+  }
+  int contextId = static_cast<int>(contextData.As<v8::Int32>()->Value());
+  int contextGroupId = inspector->contextGroupId(contextId);
+  if (!contextGroupId) return nullptr;
+  return inspector->enabledDebuggerAgentForGroup(contextGroupId);
+}
+
+v8::MaybeLocal<v8::Array> collectionsEntries(v8::Local<v8::Context> context,
+                                             v8::Local<v8::Value> value) {
+  v8::Isolate* isolate = context->GetIsolate();
+  v8::Local<v8::Array> entries;
+  bool isKeyValue = false;
+  if (!v8::debug::EntriesPreview(isolate, value, &isKeyValue).ToLocal(&entries))
+    return v8::MaybeLocal<v8::Array>();
+
+  v8::Local<v8::Array> wrappedEntries = v8::Array::New(isolate);
+  CHECK(!isKeyValue || wrappedEntries->Length() % 2 == 0);
+  if (!wrappedEntries->SetPrototype(context, v8::Null(isolate))
+           .FromMaybe(false))
+    return v8::MaybeLocal<v8::Array>();
+  for (uint32_t i = 0; i < entries->Length(); i += isKeyValue ? 2 : 1) {
+    v8::Local<v8::Value> item;
+    if (!entries->Get(context, i).ToLocal(&item)) continue;
+    v8::Local<v8::Value> value;
+    if (isKeyValue && !entries->Get(context, i + 1).ToLocal(&value)) continue;
+    v8::Local<v8::Object> wrapper = v8::Object::New(isolate);
+    if (!wrapper->SetPrototype(context, v8::Null(isolate)).FromMaybe(false))
+      continue;
+    createDataProperty(
+        context, wrapper,
+        toV8StringInternalized(isolate, isKeyValue ? "key" : "value"), item);
+    if (isKeyValue) {
+      createDataProperty(context, wrapper,
+                         toV8StringInternalized(isolate, "value"), value);
+    }
+    createDataProperty(context, wrappedEntries, wrappedEntries->Length(),
+                       wrapper);
+  }
+  if (!markArrayEntriesAsInternal(context, wrappedEntries,
+                                  V8InternalValueType::kEntry)) {
+    return v8::MaybeLocal<v8::Array>();
+  }
+  return wrappedEntries;
+}
+
+v8::MaybeLocal<v8::Object> buildLocation(v8::Local<v8::Context> context,
+                                         int scriptId, int lineNumber,
+                                         int columnNumber) {
+  if (scriptId == v8::UnboundScript::kNoScriptId)
+    return v8::MaybeLocal<v8::Object>();
+  if (lineNumber == v8::Function::kLineOffsetNotFound ||
+      columnNumber == v8::Function::kLineOffsetNotFound) {
+    return v8::MaybeLocal<v8::Object>();
+  }
+  v8::Isolate* isolate = context->GetIsolate();
+  v8::Local<v8::Object> location = v8::Object::New(isolate);
+  if (!location->SetPrototype(context, v8::Null(isolate)).FromMaybe(false)) {
+    return v8::MaybeLocal<v8::Object>();
+  }
+  if (!createDataProperty(context, location,
+                          toV8StringInternalized(isolate, "scriptId"),
+                          toV8String(isolate, String16::fromInteger(scriptId)))
+           .FromMaybe(false)) {
+    return v8::MaybeLocal<v8::Object>();
+  }
+  if (!createDataProperty(context, location,
+                          toV8StringInternalized(isolate, "lineNumber"),
+                          v8::Integer::New(isolate, lineNumber))
+           .FromMaybe(false)) {
+    return v8::MaybeLocal<v8::Object>();
+  }
+  if (!createDataProperty(context, location,
+                          toV8StringInternalized(isolate, "columnNumber"),
+                          v8::Integer::New(isolate, columnNumber))
+           .FromMaybe(false)) {
+    return v8::MaybeLocal<v8::Object>();
+  }
+  if (!markAsInternal(context, location, V8InternalValueType::kLocation)) {
+    return v8::MaybeLocal<v8::Object>();
+  }
+  return location;
+}
+
+v8::MaybeLocal<v8::Object> generatorObjectLocation(
+    v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
+  if (!value->IsGeneratorObject()) return v8::MaybeLocal<v8::Object>();
+  v8::Local<v8::debug::GeneratorObject> generatorObject =
+      v8::debug::GeneratorObject::Cast(value);
+  if (!generatorObject->IsSuspended()) {
+    v8::Local<v8::Function> func = generatorObject->Function();
+    return buildLocation(context, func->ScriptId(), func->GetScriptLineNumber(),
+                         func->GetScriptColumnNumber());
+  }
+  v8::Local<v8::debug::Script> script;
+  if (!generatorObject->Script().ToLocal(&script))
+    return v8::MaybeLocal<v8::Object>();
+  v8::debug::Location suspendedLocation = generatorObject->SuspendedLocation();
+  return buildLocation(context, script->Id(), suspendedLocation.GetLineNumber(),
+                       suspendedLocation.GetColumnNumber());
+}
+
 }  // namespace
 
 static bool inLiveEditScope = false;
 
 v8::MaybeLocal<v8::Value> V8Debugger::callDebuggerMethod(
-    const char* functionName, int argc, v8::Local<v8::Value> argv[]) {
+    const char* functionName, int argc, v8::Local<v8::Value> argv[],
+    bool catchExceptions) {
   v8::MicrotasksScope microtasks(m_isolate,
                                  v8::MicrotasksScope::kDoNotRunMicrotasks);
   DCHECK(m_isolate->InContext());
@@ -44,19 +152,25 @@
       debuggerScript
           ->Get(context, toV8StringInternalized(m_isolate, functionName))
           .ToLocalChecked());
+  if (catchExceptions) {
+    v8::TryCatch try_catch(m_isolate);
+    return function->Call(context, debuggerScript, argc, argv);
+  }
   return function->Call(context, debuggerScript, argc, argv);
 }
 
 V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
     : m_isolate(isolate),
       m_inspector(inspector),
-      m_lastContextId(0),
       m_enableCount(0),
       m_breakpointsActivated(true),
       m_runningNestedMessageLoop(false),
       m_ignoreScriptParsedEventsCounter(0),
+      m_maxAsyncCallStacks(kMaxAsyncTaskStacks),
+      m_lastTaskId(0),
       m_maxAsyncCallStackDepth(0),
-      m_pauseOnExceptionsState(v8::DebugInterface::NoBreakOnException) {}
+      m_pauseOnExceptionsState(v8::debug::NoBreakOnException),
+      m_wasmTranslation(isolate) {}
 
 V8Debugger::~V8Debugger() {}
 
@@ -64,14 +178,12 @@
   if (m_enableCount++) return;
   DCHECK(!enabled());
   v8::HandleScope scope(m_isolate);
-  v8::DebugInterface::SetDebugEventListener(m_isolate,
-                                            &V8Debugger::v8DebugEventCallback,
-                                            v8::External::New(m_isolate, this));
-  m_debuggerContext.Reset(m_isolate,
-                          v8::DebugInterface::GetDebugContext(m_isolate));
-  v8::DebugInterface::ChangeBreakOnException(
-      m_isolate, v8::DebugInterface::NoBreakOnException);
-  m_pauseOnExceptionsState = v8::DebugInterface::NoBreakOnException;
+  v8::debug::SetDebugDelegate(m_isolate, this);
+  v8::debug::SetOutOfMemoryCallback(m_isolate, &V8Debugger::v8OOMCallback,
+                                    this);
+  m_debuggerContext.Reset(m_isolate, v8::debug::GetDebugContext(m_isolate));
+  v8::debug::ChangeBreakOnException(m_isolate, v8::debug::NoBreakOnException);
+  m_pauseOnExceptionsState = v8::debug::NoBreakOnException;
   compileDebuggerScript();
 }
 
@@ -82,61 +194,33 @@
   m_debuggerScript.Reset();
   m_debuggerContext.Reset();
   allAsyncTasksCanceled();
-  v8::DebugInterface::SetDebugEventListener(m_isolate, nullptr);
+  m_wasmTranslation.Clear();
+  v8::debug::SetDebugDelegate(m_isolate, nullptr);
+  v8::debug::SetOutOfMemoryCallback(m_isolate, nullptr, nullptr);
+  m_isolate->RestoreOriginalHeapLimit();
 }
 
 bool V8Debugger::enabled() const { return !m_debuggerScript.IsEmpty(); }
 
-// static
-int V8Debugger::contextId(v8::Local<v8::Context> context) {
-  v8::Local<v8::Value> data =
-      context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
-  if (data.IsEmpty() || !data->IsString()) return 0;
-  String16 dataString = toProtocolString(data.As<v8::String>());
-  if (dataString.isEmpty()) return 0;
-  size_t commaPos = dataString.find(",");
-  if (commaPos == String16::kNotFound) return 0;
-  size_t commaPos2 = dataString.find(",", commaPos + 1);
-  if (commaPos2 == String16::kNotFound) return 0;
-  return dataString.substring(commaPos + 1, commaPos2 - commaPos - 1)
-      .toInteger();
-}
-
-// static
-int V8Debugger::getGroupId(v8::Local<v8::Context> context) {
-  v8::Local<v8::Value> data =
-      context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
-  if (data.IsEmpty() || !data->IsString()) return 0;
-  String16 dataString = toProtocolString(data.As<v8::String>());
-  if (dataString.isEmpty()) return 0;
-  size_t commaPos = dataString.find(",");
-  if (commaPos == String16::kNotFound) return 0;
-  return dataString.substring(0, commaPos).toInteger();
-}
-
 void V8Debugger::getCompiledScripts(
     int contextGroupId,
     std::vector<std::unique_ptr<V8DebuggerScript>>& result) {
   v8::HandleScope scope(m_isolate);
-  v8::PersistentValueVector<v8::DebugInterface::Script> scripts(m_isolate);
-  v8::DebugInterface::GetLoadedScripts(m_isolate, scripts);
-  String16 contextPrefix = String16::fromInteger(contextGroupId) + ",";
+  v8::PersistentValueVector<v8::debug::Script> scripts(m_isolate);
+  v8::debug::GetLoadedScripts(m_isolate, scripts);
   for (size_t i = 0; i < scripts.Size(); ++i) {
-    v8::Local<v8::DebugInterface::Script> script = scripts.Get(i);
+    v8::Local<v8::debug::Script> script = scripts.Get(i);
     if (!script->WasCompiled()) continue;
-    v8::ScriptOriginOptions origin = script->OriginOptions();
-    if (origin.IsEmbedderDebugScript()) continue;
-    v8::Local<v8::String> v8ContextData;
-    if (!script->ContextData().ToLocal(&v8ContextData)) continue;
-    String16 contextData = toProtocolString(v8ContextData);
-    if (contextData.find(contextPrefix) != 0) continue;
-    result.push_back(
-        wrapUnique(new V8DebuggerScript(m_isolate, script, false)));
+    v8::Local<v8::Value> contextData;
+    if (!script->ContextData().ToLocal(&contextData) || !contextData->IsInt32())
+      continue;
+    int contextId = static_cast<int>(contextData.As<v8::Int32>()->Value());
+    if (m_inspector->contextGroupId(contextId) != contextGroupId) continue;
+    result.push_back(V8DebuggerScript::Create(m_isolate, script, false));
   }
 }
 
-String16 V8Debugger::setBreakpoint(const String16& sourceID,
-                                   const ScriptBreakpoint& scriptBreakpoint,
+String16 V8Debugger::setBreakpoint(const ScriptBreakpoint& breakpoint,
                                    int* actualLineNumber,
                                    int* actualColumnNumber) {
   v8::HandleScope scope(m_isolate);
@@ -146,29 +230,30 @@
   v8::Local<v8::Object> info = v8::Object::New(m_isolate);
   bool success = false;
   success = info->Set(context, toV8StringInternalized(m_isolate, "sourceID"),
-                      toV8String(m_isolate, sourceID))
+                      toV8String(m_isolate, breakpoint.script_id))
                 .FromMaybe(false);
   DCHECK(success);
   success = info->Set(context, toV8StringInternalized(m_isolate, "lineNumber"),
-                      v8::Integer::New(m_isolate, scriptBreakpoint.lineNumber))
+                      v8::Integer::New(m_isolate, breakpoint.line_number))
                 .FromMaybe(false);
   DCHECK(success);
   success =
       info->Set(context, toV8StringInternalized(m_isolate, "columnNumber"),
-                v8::Integer::New(m_isolate, scriptBreakpoint.columnNumber))
+                v8::Integer::New(m_isolate, breakpoint.column_number))
           .FromMaybe(false);
   DCHECK(success);
   success = info->Set(context, toV8StringInternalized(m_isolate, "condition"),
-                      toV8String(m_isolate, scriptBreakpoint.condition))
+                      toV8String(m_isolate, breakpoint.condition))
                 .FromMaybe(false);
   DCHECK(success);
+  USE(success);
 
   v8::Local<v8::Function> setBreakpointFunction = v8::Local<v8::Function>::Cast(
       m_debuggerScript.Get(m_isolate)
           ->Get(context, toV8StringInternalized(m_isolate, "setBreakpoint"))
           .ToLocalChecked());
   v8::Local<v8::Value> breakpointId =
-      v8::DebugInterface::Call(debuggerContext(), setBreakpointFunction, info)
+      v8::debug::Call(debuggerContext(), setBreakpointFunction, info)
           .ToLocalChecked();
   if (!breakpointId->IsString()) return "";
   *actualLineNumber =
@@ -196,6 +281,7 @@
                 toV8String(m_isolate, breakpointId))
           .FromMaybe(false);
   DCHECK(success);
+  USE(success);
 
   v8::Local<v8::Function> removeBreakpointFunction =
       v8::Local<v8::Function>::Cast(
@@ -203,7 +289,7 @@
               ->Get(context,
                     toV8StringInternalized(m_isolate, "removeBreakpoint"))
               .ToLocalChecked());
-  v8::DebugInterface::Call(debuggerContext(), removeBreakpointFunction, info)
+  v8::debug::Call(debuggerContext(), removeBreakpointFunction, info)
       .ToLocalChecked();
 }
 
@@ -216,8 +302,7 @@
       m_debuggerScript.Get(m_isolate)
           ->Get(context, toV8StringInternalized(m_isolate, "clearBreakpoints"))
           .ToLocalChecked());
-  v8::DebugInterface::Call(debuggerContext(), clearBreakpoints)
-      .ToLocalChecked();
+  v8::debug::Call(debuggerContext(), clearBreakpoints).ToLocalChecked();
 }
 
 void V8Debugger::setBreakpointsActivated(bool activated) {
@@ -225,65 +310,39 @@
     UNREACHABLE();
     return;
   }
-  v8::HandleScope scope(m_isolate);
-  v8::Local<v8::Context> context = debuggerContext();
-  v8::Context::Scope contextScope(context);
-
-  v8::Local<v8::Object> info = v8::Object::New(m_isolate);
-  bool success = false;
-  success = info->Set(context, toV8StringInternalized(m_isolate, "enabled"),
-                      v8::Boolean::New(m_isolate, activated))
-                .FromMaybe(false);
-  DCHECK(success);
-  v8::Local<v8::Function> setBreakpointsActivated =
-      v8::Local<v8::Function>::Cast(
-          m_debuggerScript.Get(m_isolate)
-              ->Get(context, toV8StringInternalized(m_isolate,
-                                                    "setBreakpointsActivated"))
-              .ToLocalChecked());
-  v8::DebugInterface::Call(debuggerContext(), setBreakpointsActivated, info)
-      .ToLocalChecked();
-
+  v8::debug::SetBreakPointsActive(m_isolate, activated);
   m_breakpointsActivated = activated;
 }
 
-v8::DebugInterface::ExceptionBreakState
-V8Debugger::getPauseOnExceptionsState() {
+v8::debug::ExceptionBreakState V8Debugger::getPauseOnExceptionsState() {
   DCHECK(enabled());
   return m_pauseOnExceptionsState;
 }
 
 void V8Debugger::setPauseOnExceptionsState(
-    v8::DebugInterface::ExceptionBreakState pauseOnExceptionsState) {
+    v8::debug::ExceptionBreakState pauseOnExceptionsState) {
   DCHECK(enabled());
   if (m_pauseOnExceptionsState == pauseOnExceptionsState) return;
-  v8::DebugInterface::ChangeBreakOnException(m_isolate, pauseOnExceptionsState);
+  v8::debug::ChangeBreakOnException(m_isolate, pauseOnExceptionsState);
   m_pauseOnExceptionsState = pauseOnExceptionsState;
 }
 
 void V8Debugger::setPauseOnNextStatement(bool pause) {
-  if (m_runningNestedMessageLoop) return;
+  if (isPaused()) return;
   if (pause)
-    v8::DebugInterface::DebugBreak(m_isolate);
+    v8::debug::DebugBreak(m_isolate);
   else
-    v8::DebugInterface::CancelDebugBreak(m_isolate);
+    v8::debug::CancelDebugBreak(m_isolate);
 }
 
 bool V8Debugger::canBreakProgram() {
   if (!m_breakpointsActivated) return false;
-  return m_isolate->InContext();
+  return v8::debug::HasNonBlackboxedFrameOnStack(m_isolate);
 }
 
 void V8Debugger::breakProgram() {
-  if (isPaused()) {
-    DCHECK(!m_runningNestedMessageLoop);
-    v8::Local<v8::Value> exception;
-    v8::Local<v8::Array> hitBreakpoints;
-    handleProgramBreak(m_pausedContext, m_executionState, exception,
-                       hitBreakpoints);
-    return;
-  }
-
+  // Don't allow nested breaks.
+  if (isPaused()) return;
   if (!canBreakProgram()) return;
 
   v8::HandleScope scope(m_isolate);
@@ -294,7 +353,7 @@
                          v8::ConstructorBehavior::kThrow)
            .ToLocal(&breakFunction))
     return;
-  v8::DebugInterface::Call(debuggerContext(), breakFunction).ToLocalChecked();
+  v8::debug::Call(debuggerContext(), breakFunction).ToLocalChecked();
 }
 
 void V8Debugger::continueProgram() {
@@ -306,29 +365,24 @@
 void V8Debugger::stepIntoStatement() {
   DCHECK(isPaused());
   DCHECK(!m_executionState.IsEmpty());
-  v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepIn);
+  v8::debug::PrepareStep(m_isolate, v8::debug::StepIn);
   continueProgram();
 }
 
 void V8Debugger::stepOverStatement() {
   DCHECK(isPaused());
   DCHECK(!m_executionState.IsEmpty());
-  v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepNext);
+  v8::debug::PrepareStep(m_isolate, v8::debug::StepNext);
   continueProgram();
 }
 
 void V8Debugger::stepOutOfFunction() {
   DCHECK(isPaused());
   DCHECK(!m_executionState.IsEmpty());
-  v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepOut);
+  v8::debug::PrepareStep(m_isolate, v8::debug::StepOut);
   continueProgram();
 }
 
-void V8Debugger::clearStepping() {
-  DCHECK(enabled());
-  v8::DebugInterface::ClearStepping(m_isolate);
-}
-
 Response V8Debugger::setScriptSource(
     const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
     Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails,
@@ -337,11 +391,11 @@
   class EnableLiveEditScope {
    public:
     explicit EnableLiveEditScope(v8::Isolate* isolate) : m_isolate(isolate) {
-      v8::DebugInterface::SetLiveEditEnabled(m_isolate, true);
+      v8::debug::SetLiveEditEnabled(m_isolate, true);
       inLiveEditScope = true;
     }
     ~EnableLiveEditScope() {
-      v8::DebugInterface::SetLiveEditEnabled(m_isolate, false);
+      v8::debug::SetLiveEditEnabled(m_isolate, false);
       inLiveEditScope = false;
     }
 
@@ -355,7 +409,7 @@
 
   std::unique_ptr<v8::Context::Scope> contextScope;
   if (!isPaused())
-    contextScope = wrapUnique(new v8::Context::Scope(debuggerContext()));
+    contextScope.reset(new v8::Context::Scope(debuggerContext()));
 
   v8::Local<v8::Value> argv[] = {toV8String(m_isolate, sourceID), newSource,
                                  v8Boolean(dryRun, m_isolate)};
@@ -366,7 +420,7 @@
     v8::TryCatch tryCatch(m_isolate);
     tryCatch.SetVerbose(false);
     v8::MaybeLocal<v8::Value> maybeResult =
-        callDebuggerMethod("liveEditScriptSource", 3, argv);
+        callDebuggerMethod("liveEditScriptSource", 3, argv, false);
     if (tryCatch.HasCaught()) {
       v8::Local<v8::Message> message = tryCatch.Message();
       if (!message.IsEmpty())
@@ -427,27 +481,14 @@
 }
 
 JavaScriptCallFrames V8Debugger::currentCallFrames(int limit) {
-  if (!m_isolate->InContext()) return JavaScriptCallFrames();
+  if (!isPaused()) return JavaScriptCallFrames();
   v8::Local<v8::Value> currentCallFramesV8;
-  if (m_executionState.IsEmpty()) {
-    v8::Local<v8::Function> currentCallFramesFunction =
-        v8::Local<v8::Function>::Cast(
-            m_debuggerScript.Get(m_isolate)
-                ->Get(debuggerContext(),
-                      toV8StringInternalized(m_isolate, "currentCallFrames"))
-                .ToLocalChecked());
-    currentCallFramesV8 =
-        v8::DebugInterface::Call(debuggerContext(), currentCallFramesFunction,
-                                 v8::Integer::New(m_isolate, limit))
-            .ToLocalChecked();
-  } else {
-    v8::Local<v8::Value> argv[] = {m_executionState,
-                                   v8::Integer::New(m_isolate, limit)};
-    currentCallFramesV8 =
-        callDebuggerMethod("currentCallFrames", arraysize(argv), argv)
-            .ToLocalChecked();
+  v8::Local<v8::Value> argv[] = {m_executionState,
+                                 v8::Integer::New(m_isolate, limit)};
+  if (!callDebuggerMethod("currentCallFrames", arraysize(argv), argv, true)
+           .ToLocal(&currentCallFramesV8)) {
+    return JavaScriptCallFrames();
   }
-  DCHECK(!currentCallFramesV8.IsEmpty());
   if (!currentCallFramesV8->IsArray()) return JavaScriptCallFrames();
   v8::Local<v8::Array> callFramesArray = currentCallFramesV8.As<v8::Array>();
   JavaScriptCallFrames callFrames;
@@ -488,11 +529,11 @@
                                     v8::Local<v8::Array> hitBreakpointNumbers,
                                     bool isPromiseRejection, bool isUncaught) {
   // Don't allow nested breaks.
-  if (m_runningNestedMessageLoop) return;
+  if (isPaused()) return;
 
-  V8DebuggerAgentImpl* agent =
-      m_inspector->enabledDebuggerAgentForGroup(getGroupId(pausedContext));
-  if (!agent) return;
+  V8DebuggerAgentImpl* agent = m_inspector->enabledDebuggerAgentForGroup(
+      m_inspector->contextGroupId(pausedContext));
+  if (!agent || (agent->skipAllPauses() && !m_scheduledOOMBreak)) return;
 
   std::vector<String16> breakpointIds;
   if (!hitBreakpointNumbers.IsEmpty()) {
@@ -508,147 +549,113 @@
 
   m_pausedContext = pausedContext;
   m_executionState = executionState;
-  V8DebuggerAgentImpl::SkipPauseRequest result = agent->didPause(
-      pausedContext, exception, breakpointIds, isPromiseRejection, isUncaught);
-  if (result == V8DebuggerAgentImpl::RequestNoSkip) {
-    m_runningNestedMessageLoop = true;
-    int groupId = getGroupId(pausedContext);
-    DCHECK(groupId);
+  m_runningNestedMessageLoop = true;
+  agent->didPause(InspectedContext::contextId(pausedContext), exception,
+                  breakpointIds, isPromiseRejection, isUncaught,
+                  m_scheduledOOMBreak);
+  int groupId = m_inspector->contextGroupId(pausedContext);
+  DCHECK(groupId);
+  {
+    v8::Context::Scope scope(pausedContext);
+    v8::Local<v8::Context> context = m_isolate->GetCurrentContext();
+    CHECK(!context.IsEmpty() &&
+          context != v8::debug::GetDebugContext(m_isolate));
     m_inspector->client()->runMessageLoopOnPause(groupId);
-    // The agent may have been removed in the nested loop.
-    agent =
-        m_inspector->enabledDebuggerAgentForGroup(getGroupId(pausedContext));
-    if (agent) agent->didContinue();
     m_runningNestedMessageLoop = false;
   }
+  // The agent may have been removed in the nested loop.
+  agent = m_inspector->enabledDebuggerAgentForGroup(groupId);
+  if (agent) agent->didContinue();
+  if (m_scheduledOOMBreak) m_isolate->RestoreOriginalHeapLimit();
+  m_scheduledOOMBreak = false;
   m_pausedContext.Clear();
   m_executionState.Clear();
+}
 
-  if (result == V8DebuggerAgentImpl::RequestStepFrame) {
-    v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepFrame);
-  } else if (result == V8DebuggerAgentImpl::RequestStepInto) {
-    v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepIn);
-  } else if (result == V8DebuggerAgentImpl::RequestStepOut) {
-    v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepOut);
+void V8Debugger::v8OOMCallback(void* data) {
+  V8Debugger* thisPtr = static_cast<V8Debugger*>(data);
+  thisPtr->m_isolate->IncreaseHeapLimitForDebugging();
+  thisPtr->m_scheduledOOMBreak = true;
+  thisPtr->setPauseOnNextStatement(true);
+}
+
+void V8Debugger::ScriptCompiled(v8::Local<v8::debug::Script> script,
+                                bool has_compile_error) {
+  V8DebuggerAgentImpl* agent = agentForScript(m_inspector, script);
+  if (!agent) return;
+  if (script->IsWasm()) {
+    m_wasmTranslation.AddScript(script.As<v8::debug::WasmScript>(), agent);
+  } else if (m_ignoreScriptParsedEventsCounter == 0) {
+    agent->didParseSource(
+        V8DebuggerScript::Create(m_isolate, script, inLiveEditScope),
+        !has_compile_error);
   }
 }
 
-void V8Debugger::v8DebugEventCallback(
-    const v8::DebugInterface::EventDetails& eventDetails) {
-  V8Debugger* thisPtr = toV8Debugger(eventDetails.GetCallbackData());
-  thisPtr->handleV8DebugEvent(eventDetails);
-}
-
-v8::Local<v8::Value> V8Debugger::callInternalGetterFunction(
-    v8::Local<v8::Object> object, const char* functionName) {
-  v8::MicrotasksScope microtasks(m_isolate,
-                                 v8::MicrotasksScope::kDoNotRunMicrotasks);
-  v8::Local<v8::Value> getterValue =
-      object
-          ->Get(m_isolate->GetCurrentContext(),
-                toV8StringInternalized(m_isolate, functionName))
-          .ToLocalChecked();
-  DCHECK(!getterValue.IsEmpty() && getterValue->IsFunction());
-  return v8::Local<v8::Function>::Cast(getterValue)
-      ->Call(m_isolate->GetCurrentContext(), object, 0, nullptr)
-      .ToLocalChecked();
-}
-
-void V8Debugger::handleV8DebugEvent(
-    const v8::DebugInterface::EventDetails& eventDetails) {
-  if (!enabled()) return;
-  v8::DebugEvent event = eventDetails.GetEvent();
-  if (event != v8::AsyncTaskEvent && event != v8::Break &&
-      event != v8::Exception && event != v8::AfterCompile &&
-      event != v8::BeforeCompile && event != v8::CompileError)
-    return;
-
-  v8::Local<v8::Context> eventContext = eventDetails.GetEventContext();
-  DCHECK(!eventContext.IsEmpty());
-
-  if (event == v8::AsyncTaskEvent) {
-    v8::HandleScope scope(m_isolate);
-    handleV8AsyncTaskEvent(eventContext, eventDetails.GetExecutionState(),
-                           eventDetails.GetEventData());
+void V8Debugger::BreakProgramRequested(v8::Local<v8::Context> pausedContext,
+                                       v8::Local<v8::Object> execState,
+                                       v8::Local<v8::Value> breakPointsHit) {
+  v8::Local<v8::Value> argv[] = {breakPointsHit};
+  v8::Local<v8::Value> hitBreakpoints;
+  if (!callDebuggerMethod("getBreakpointNumbers", 1, argv, true)
+           .ToLocal(&hitBreakpoints)) {
     return;
   }
-
-  V8DebuggerAgentImpl* agent =
-      m_inspector->enabledDebuggerAgentForGroup(getGroupId(eventContext));
-  if (agent) {
-    v8::HandleScope scope(m_isolate);
-    if (m_ignoreScriptParsedEventsCounter == 0 &&
-        (event == v8::AfterCompile || event == v8::CompileError)) {
-      v8::Local<v8::Context> context = debuggerContext();
-      v8::Context::Scope contextScope(context);
-      v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
-      v8::Local<v8::Value> value =
-          callDebuggerMethod("getAfterCompileScript", 1, argv).ToLocalChecked();
-      if (value->IsNull()) return;
-      DCHECK(value->IsObject());
-      v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(value);
-      v8::Local<v8::DebugInterface::Script> script;
-      if (!v8::DebugInterface::Script::Wrap(m_isolate, scriptObject)
-               .ToLocal(&script))
-        return;
-      agent->didParseSource(
-          wrapUnique(new V8DebuggerScript(m_isolate, script, inLiveEditScope)),
-          event == v8::AfterCompile);
-    } else if (event == v8::Exception) {
-      v8::Local<v8::Context> context = debuggerContext();
-      v8::Local<v8::Object> eventData = eventDetails.GetEventData();
-      v8::Local<v8::Value> exception =
-          callInternalGetterFunction(eventData, "exception");
-      v8::Local<v8::Value> promise =
-          callInternalGetterFunction(eventData, "promise");
-      bool isPromiseRejection = !promise.IsEmpty() && promise->IsObject();
-      v8::Local<v8::Value> uncaught =
-          callInternalGetterFunction(eventData, "uncaught");
-      bool isUncaught = uncaught->BooleanValue(context).FromJust();
-      handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
-                         exception, v8::Local<v8::Array>(), isPromiseRejection,
-                         isUncaught);
-    } else if (event == v8::Break) {
-      v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
-      v8::Local<v8::Value> hitBreakpoints =
-          callDebuggerMethod("getBreakpointNumbers", 1, argv).ToLocalChecked();
-      DCHECK(hitBreakpoints->IsArray());
-      handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
-                         v8::Local<v8::Value>(),
-                         hitBreakpoints.As<v8::Array>());
-    }
-  }
+  DCHECK(hitBreakpoints->IsArray());
+  handleProgramBreak(pausedContext, execState, v8::Local<v8::Value>(),
+                     hitBreakpoints.As<v8::Array>());
 }
 
-void V8Debugger::handleV8AsyncTaskEvent(v8::Local<v8::Context> context,
-                                        v8::Local<v8::Object> executionState,
-                                        v8::Local<v8::Object> eventData) {
+void V8Debugger::ExceptionThrown(v8::Local<v8::Context> pausedContext,
+                                 v8::Local<v8::Object> execState,
+                                 v8::Local<v8::Value> exception,
+                                 v8::Local<v8::Value> promise,
+                                 bool isUncaught) {
+  bool isPromiseRejection = promise->IsPromise();
+  handleProgramBreak(pausedContext, execState, exception,
+                     v8::Local<v8::Array>(), isPromiseRejection, isUncaught);
+}
+
+bool V8Debugger::IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
+                                      const v8::debug::Location& start,
+                                      const v8::debug::Location& end) {
+  V8DebuggerAgentImpl* agent = agentForScript(m_inspector, script);
+  if (!agent) return false;
+  return agent->isFunctionBlackboxed(String16::fromInteger(script->Id()), start,
+                                     end);
+}
+
+void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
+                                      int id, int parentId) {
   if (!m_maxAsyncCallStackDepth) return;
-
-  String16 type = toProtocolStringWithTypeCheck(
-      callInternalGetterFunction(eventData, "type"));
-  String16 name = toProtocolStringWithTypeCheck(
-      callInternalGetterFunction(eventData, "name"));
-  int id = static_cast<int>(callInternalGetterFunction(eventData, "id")
-                                ->ToInteger(context)
-                                .ToLocalChecked()
-                                ->Value());
   // Async task events from Promises are given misaligned pointers to prevent
   // from overlapping with other Blink task identifiers. There is a single
   // namespace of such ids, managed by src/js/promise.js.
   void* ptr = reinterpret_cast<void*>(id * 2 + 1);
-  if (type == v8AsyncTaskEventEnqueue)
-    asyncTaskScheduled(name, ptr, false);
-  else if (type == v8AsyncTaskEventEnqueueRecurring)
-    asyncTaskScheduled(name, ptr, true);
-  else if (type == v8AsyncTaskEventWillHandle)
-    asyncTaskStarted(ptr);
-  else if (type == v8AsyncTaskEventDidHandle)
-    asyncTaskFinished(ptr);
-  else if (type == v8AsyncTaskEventCancel)
-    asyncTaskCanceled(ptr);
-  else
-    UNREACHABLE();
+  switch (type) {
+    case v8::debug::kDebugPromiseCreated:
+      asyncTaskCreated(
+          ptr, parentId ? reinterpret_cast<void*>(parentId * 2 + 1) : nullptr);
+      break;
+    case v8::debug::kDebugEnqueueAsyncFunction:
+      asyncTaskScheduled("async function", ptr, true);
+      break;
+    case v8::debug::kDebugEnqueuePromiseResolve:
+      asyncTaskScheduled("Promise.resolve", ptr, true);
+      break;
+    case v8::debug::kDebugEnqueuePromiseReject:
+      asyncTaskScheduled("Promise.reject", ptr, true);
+      break;
+    case v8::debug::kDebugPromiseCollected:
+      asyncTaskCanceled(ptr);
+      break;
+    case v8::debug::kDebugWillHandle:
+      asyncTaskStarted(ptr);
+      break;
+    case v8::debug::kDebugDidHandle:
+      asyncTaskFinished(ptr);
+      break;
+  }
 }
 
 V8StackTraceImpl* V8Debugger::currentAsyncCallChain() {
@@ -685,15 +692,27 @@
   return m_debuggerContext.Get(m_isolate);
 }
 
-v8::MaybeLocal<v8::Value> V8Debugger::functionScopes(
-    v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
+v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
+    v8::Local<v8::Context> context, v8::Local<v8::Value> value,
+    ScopeTargetKind kind) {
   if (!enabled()) {
     UNREACHABLE();
     return v8::Local<v8::Value>::New(m_isolate, v8::Undefined(m_isolate));
   }
-  v8::Local<v8::Value> argv[] = {function};
+  v8::Local<v8::Value> argv[] = {value};
   v8::Local<v8::Value> scopesValue;
-  if (!callDebuggerMethod("getFunctionScopes", 1, argv).ToLocal(&scopesValue))
+
+  const char* debuggerMethod = nullptr;
+  switch (kind) {
+    case FUNCTION:
+      debuggerMethod = "getFunctionScopes";
+      break;
+    case GENERATOR:
+      debuggerMethod = "getGeneratorScopes";
+      break;
+  }
+
+  if (!callDebuggerMethod(debuggerMethod, 1, argv, true).ToLocal(&scopesValue))
     return v8::MaybeLocal<v8::Value>();
   v8::Local<v8::Value> copied;
   if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
@@ -710,16 +729,28 @@
   return copied;
 }
 
+v8::MaybeLocal<v8::Value> V8Debugger::functionScopes(
+    v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
+  return getTargetScopes(context, function, FUNCTION);
+}
+
+v8::MaybeLocal<v8::Value> V8Debugger::generatorScopes(
+    v8::Local<v8::Context> context, v8::Local<v8::Value> generator) {
+  return getTargetScopes(context, generator, GENERATOR);
+}
+
 v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
     v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
   v8::Local<v8::Array> properties;
-  if (!v8::DebugInterface::GetInternalProperties(m_isolate, value)
-           .ToLocal(&properties))
+  if (!v8::debug::GetInternalProperties(m_isolate, value).ToLocal(&properties))
     return v8::MaybeLocal<v8::Array>();
   if (value->IsFunction()) {
     v8::Local<v8::Function> function = value.As<v8::Function>();
-    v8::Local<v8::Value> location = functionLocation(context, function);
-    if (location->IsObject()) {
+    v8::Local<v8::Object> location;
+    if (buildLocation(context, function->ScriptId(),
+                      function->GetScriptLineNumber(),
+                      function->GetScriptColumnNumber())
+            .ToLocal(&location)) {
       createDataProperty(
           context, properties, properties->Length(),
           toV8StringInternalized(m_isolate, "[[FunctionLocation]]"));
@@ -732,27 +763,29 @@
                          v8::True(m_isolate));
     }
   }
-  if (!enabled()) return properties;
-  if (value->IsMap() || value->IsWeakMap() || value->IsSet() ||
-      value->IsWeakSet() || value->IsSetIterator() || value->IsMapIterator()) {
-    v8::Local<v8::Value> entries =
-        collectionEntries(context, v8::Local<v8::Object>::Cast(value));
-    if (entries->IsArray()) {
-      createDataProperty(context, properties, properties->Length(),
-                         toV8StringInternalized(m_isolate, "[[Entries]]"));
-      createDataProperty(context, properties, properties->Length(), entries);
-    }
+  v8::Local<v8::Array> entries;
+  if (collectionsEntries(context, value).ToLocal(&entries)) {
+    createDataProperty(context, properties, properties->Length(),
+                       toV8StringInternalized(m_isolate, "[[Entries]]"));
+    createDataProperty(context, properties, properties->Length(), entries);
   }
   if (value->IsGeneratorObject()) {
-    v8::Local<v8::Value> location =
-        generatorObjectLocation(context, v8::Local<v8::Object>::Cast(value));
-    if (location->IsObject()) {
+    v8::Local<v8::Object> location;
+    if (generatorObjectLocation(context, value).ToLocal(&location)) {
       createDataProperty(
           context, properties, properties->Length(),
           toV8StringInternalized(m_isolate, "[[GeneratorLocation]]"));
       createDataProperty(context, properties, properties->Length(), location);
     }
+    if (!enabled()) return properties;
+    v8::Local<v8::Value> scopes;
+    if (generatorScopes(context, value).ToLocal(&scopes)) {
+      createDataProperty(context, properties, properties->Length(),
+                         toV8StringInternalized(m_isolate, "[[Scopes]]"));
+      createDataProperty(context, properties, properties->Length(), scopes);
+    }
   }
+  if (!enabled()) return properties;
   if (value->IsFunction()) {
     v8::Local<v8::Function> function = value.As<v8::Function>();
     v8::Local<v8::Value> boundFunction = function->GetBoundFunction();
@@ -767,117 +800,16 @@
   return properties;
 }
 
-v8::Local<v8::Value> V8Debugger::collectionEntries(
-    v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
-  if (!enabled()) {
-    UNREACHABLE();
-    return v8::Undefined(m_isolate);
-  }
-  v8::Local<v8::Value> argv[] = {object};
-  v8::Local<v8::Value> entriesValue =
-      callDebuggerMethod("getCollectionEntries", 1, argv).ToLocalChecked();
-  if (!entriesValue->IsArray()) return v8::Undefined(m_isolate);
-
-  v8::Local<v8::Array> entries = entriesValue.As<v8::Array>();
-  v8::Local<v8::Array> copiedArray =
-      v8::Array::New(m_isolate, entries->Length());
-  if (!copiedArray->SetPrototype(context, v8::Null(m_isolate)).FromMaybe(false))
-    return v8::Undefined(m_isolate);
-  for (uint32_t i = 0; i < entries->Length(); ++i) {
-    v8::Local<v8::Value> item;
-    if (!entries->Get(debuggerContext(), i).ToLocal(&item))
-      return v8::Undefined(m_isolate);
-    v8::Local<v8::Value> copied;
-    if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
-                                      item)
-             .ToLocal(&copied))
-      return v8::Undefined(m_isolate);
-    if (!createDataProperty(context, copiedArray, i, copied).FromMaybe(false))
-      return v8::Undefined(m_isolate);
-  }
-  if (!markArrayEntriesAsInternal(context,
-                                  v8::Local<v8::Array>::Cast(copiedArray),
-                                  V8InternalValueType::kEntry))
-    return v8::Undefined(m_isolate);
-  return copiedArray;
-}
-
-v8::Local<v8::Value> V8Debugger::generatorObjectLocation(
-    v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
-  if (!enabled()) {
-    UNREACHABLE();
-    return v8::Null(m_isolate);
-  }
-  v8::Local<v8::Value> argv[] = {object};
-  v8::Local<v8::Value> location =
-      callDebuggerMethod("getGeneratorObjectLocation", 1, argv)
-          .ToLocalChecked();
-  v8::Local<v8::Value> copied;
-  if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
-                                    location)
-           .ToLocal(&copied) ||
-      !copied->IsObject())
-    return v8::Null(m_isolate);
-  if (!markAsInternal(context, v8::Local<v8::Object>::Cast(copied),
-                      V8InternalValueType::kLocation))
-    return v8::Null(m_isolate);
-  return copied;
-}
-
-v8::Local<v8::Value> V8Debugger::functionLocation(
-    v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
-  int scriptId = function->ScriptId();
-  if (scriptId == v8::UnboundScript::kNoScriptId) return v8::Null(m_isolate);
-  int lineNumber = function->GetScriptLineNumber();
-  int columnNumber = function->GetScriptColumnNumber();
-  if (lineNumber == v8::Function::kLineOffsetNotFound ||
-      columnNumber == v8::Function::kLineOffsetNotFound)
-    return v8::Null(m_isolate);
-  v8::Local<v8::Object> location = v8::Object::New(m_isolate);
-  if (!location->SetPrototype(context, v8::Null(m_isolate)).FromMaybe(false))
-    return v8::Null(m_isolate);
-  if (!createDataProperty(
-           context, location, toV8StringInternalized(m_isolate, "scriptId"),
-           toV8String(m_isolate, String16::fromInteger(scriptId)))
-           .FromMaybe(false))
-    return v8::Null(m_isolate);
-  if (!createDataProperty(context, location,
-                          toV8StringInternalized(m_isolate, "lineNumber"),
-                          v8::Integer::New(m_isolate, lineNumber))
-           .FromMaybe(false))
-    return v8::Null(m_isolate);
-  if (!createDataProperty(context, location,
-                          toV8StringInternalized(m_isolate, "columnNumber"),
-                          v8::Integer::New(m_isolate, columnNumber))
-           .FromMaybe(false))
-    return v8::Null(m_isolate);
-  if (!markAsInternal(context, location, V8InternalValueType::kLocation))
-    return v8::Null(m_isolate);
-  return location;
-}
-
-bool V8Debugger::isPaused() { return !m_pausedContext.IsEmpty(); }
-
 std::unique_ptr<V8StackTraceImpl> V8Debugger::createStackTrace(
     v8::Local<v8::StackTrace> stackTrace) {
   int contextGroupId =
-      m_isolate->InContext() ? getGroupId(m_isolate->GetCurrentContext()) : 0;
+      m_isolate->InContext()
+          ? m_inspector->contextGroupId(m_isolate->GetCurrentContext())
+          : 0;
   return V8StackTraceImpl::create(this, contextGroupId, stackTrace,
                                   V8StackTraceImpl::maxCallStackSizeToCapture);
 }
 
-int V8Debugger::markContext(const V8ContextInfo& info) {
-  DCHECK(info.context->GetIsolate() == m_isolate);
-  int contextId = ++m_lastContextId;
-  String16 debugData = String16::fromInteger(info.contextGroupId) + "," +
-                       String16::fromInteger(contextId) + "," +
-                       toString16(info.auxData);
-  v8::Context::Scope contextScope(info.context);
-  info.context->SetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex),
-                                toV8String(m_isolate, debugData));
-  return contextId;
-}
-
 void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
   if (depth <= 0)
     m_maxAsyncCallStackDepthMap.erase(agent);
@@ -895,6 +827,34 @@
   if (!maxAsyncCallStackDepth) allAsyncTasksCanceled();
 }
 
+void V8Debugger::registerAsyncTaskIfNeeded(void* task) {
+  if (m_taskToId.find(task) != m_taskToId.end()) return;
+
+  int id = ++m_lastTaskId;
+  m_taskToId[task] = id;
+  m_idToTask[id] = task;
+  if (static_cast<int>(m_idToTask.size()) > m_maxAsyncCallStacks) {
+    void* taskToRemove = m_idToTask.begin()->second;
+    asyncTaskCanceled(taskToRemove);
+  }
+}
+
+void V8Debugger::asyncTaskCreated(void* task, void* parentTask) {
+  if (!m_maxAsyncCallStackDepth) return;
+  if (parentTask) m_parentTask[task] = parentTask;
+  v8::HandleScope scope(m_isolate);
+  // We don't need to pass context group id here because we gets this callback
+  // from V8 for promise events only.
+  // Passing one as maxStackSize forces no async chain for the new stack and
+  // allows us to not grow exponentially.
+  std::unique_ptr<V8StackTraceImpl> creationStack =
+      V8StackTraceImpl::capture(this, 0, 1, String16());
+  if (creationStack && !creationStack->isEmpty()) {
+    m_asyncTaskCreationStacks[task] = std::move(creationStack);
+    registerAsyncTaskIfNeeded(task);
+  }
+}
+
 void V8Debugger::asyncTaskScheduled(const StringView& taskName, void* task,
                                     bool recurring) {
   if (!m_maxAsyncCallStackDepth) return;
@@ -906,13 +866,16 @@
   if (!m_maxAsyncCallStackDepth) return;
   v8::HandleScope scope(m_isolate);
   int contextGroupId =
-      m_isolate->InContext() ? getGroupId(m_isolate->GetCurrentContext()) : 0;
+      m_isolate->InContext()
+          ? m_inspector->contextGroupId(m_isolate->GetCurrentContext())
+          : 0;
   std::unique_ptr<V8StackTraceImpl> chain = V8StackTraceImpl::capture(
       this, contextGroupId, V8StackTraceImpl::maxCallStackSizeToCapture,
       taskName);
   if (chain) {
     m_asyncTaskStacks[task] = std::move(chain);
     if (recurring) m_recurringTasks.insert(task);
+    registerAsyncTaskIfNeeded(task);
   }
 }
 
@@ -920,12 +883,20 @@
   if (!m_maxAsyncCallStackDepth) return;
   m_asyncTaskStacks.erase(task);
   m_recurringTasks.erase(task);
+  m_parentTask.erase(task);
+  m_asyncTaskCreationStacks.erase(task);
+  auto it = m_taskToId.find(task);
+  if (it == m_taskToId.end()) return;
+  m_idToTask.erase(it->second);
+  m_taskToId.erase(it);
 }
 
 void V8Debugger::asyncTaskStarted(void* task) {
   if (!m_maxAsyncCallStackDepth) return;
   m_currentTasks.push_back(task);
-  AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(task);
+  auto parentIt = m_parentTask.find(task);
+  AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(
+      parentIt == m_parentTask.end() ? task : parentIt->second);
   // Needs to support following order of events:
   // - asyncTaskScheduled
   //   <-- attached here -->
@@ -936,6 +907,10 @@
   std::unique_ptr<V8StackTraceImpl> stack;
   if (stackIt != m_asyncTaskStacks.end() && stackIt->second)
     stack = stackIt->second->cloneImpl();
+  auto itCreation = m_asyncTaskCreationStacks.find(task);
+  if (stack && itCreation != m_asyncTaskCreationStacks.end()) {
+    stack->setCreation(itCreation->second->cloneImpl());
+  }
   m_currentStacks.push_back(std::move(stack));
 }
 
@@ -948,8 +923,9 @@
   m_currentTasks.pop_back();
 
   m_currentStacks.pop_back();
-  if (m_recurringTasks.find(task) == m_recurringTasks.end())
-    m_asyncTaskStacks.erase(task);
+  if (m_recurringTasks.find(task) == m_recurringTasks.end()) {
+    asyncTaskCanceled(task);
+  }
 }
 
 void V8Debugger::allAsyncTasksCanceled() {
@@ -957,6 +933,11 @@
   m_recurringTasks.clear();
   m_currentStacks.clear();
   m_currentTasks.clear();
+  m_parentTask.clear();
+  m_asyncTaskCreationStacks.clear();
+  m_idToTask.clear();
+  m_taskToId.clear();
+  m_lastTaskId = 0;
 }
 
 void V8Debugger::muteScriptParsedEvents() {
@@ -973,7 +954,8 @@
   if (!m_isolate->InContext()) return nullptr;
 
   v8::HandleScope handles(m_isolate);
-  int contextGroupId = getGroupId(m_isolate->GetCurrentContext());
+  int contextGroupId =
+      m_inspector->contextGroupId(m_isolate->GetCurrentContext());
   if (!contextGroupId) return nullptr;
 
   size_t stackSize =
diff --git a/src/inspector/v8-debugger.h b/src/inspector/v8-debugger.h
index 4c74778..c45c76f 100644
--- a/src/inspector/v8-debugger.h
+++ b/src/inspector/v8-debugger.h
@@ -13,6 +13,7 @@
 #include "src/inspector/protocol/Forward.h"
 #include "src/inspector/protocol/Runtime.h"
 #include "src/inspector/v8-debugger-script.h"
+#include "src/inspector/wasm-translation.h"
 
 #include "include/v8-inspector.h"
 
@@ -25,25 +26,21 @@
 
 using protocol::Response;
 
-class V8Debugger {
+class V8Debugger : public v8::debug::DebugDelegate {
  public:
   V8Debugger(v8::Isolate*, V8InspectorImpl*);
   ~V8Debugger();
 
-  static int contextId(v8::Local<v8::Context>);
-  static int getGroupId(v8::Local<v8::Context>);
-  int markContext(const V8ContextInfo&);
-
   bool enabled() const;
 
-  String16 setBreakpoint(const String16& sourceID, const ScriptBreakpoint&,
-                         int* actualLineNumber, int* actualColumnNumber);
+  String16 setBreakpoint(const ScriptBreakpoint&, int* actualLineNumber,
+                         int* actualColumnNumber);
   void removeBreakpoint(const String16& breakpointId);
   void setBreakpointsActivated(bool);
   bool breakpointsActivated() const { return m_breakpointsActivated; }
 
-  v8::DebugInterface::ExceptionBreakState getPauseOnExceptionsState();
-  void setPauseOnExceptionsState(v8::DebugInterface::ExceptionBreakState);
+  v8::debug::ExceptionBreakState getPauseOnExceptionsState();
+  void setPauseOnExceptionsState(v8::debug::ExceptionBreakState);
   void setPauseOnNextStatement(bool);
   bool canBreakProgram();
   void breakProgram();
@@ -51,7 +48,6 @@
   void stepIntoStatement();
   void stepOverStatement();
   void stepOutOfFunction();
-  void clearStepping();
 
   Response setScriptSource(
       const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
@@ -69,7 +65,7 @@
   void enable();
   void disable();
 
-  bool isPaused();
+  bool isPaused() const { return m_runningNestedMessageLoop; }
   v8::Local<v8::Context> pausedContext() { return m_pausedContext; }
 
   int maxAsyncCallChainDepth() { return m_maxAsyncCallStackDepth; }
@@ -94,14 +90,21 @@
 
   V8InspectorImpl* inspector() { return m_inspector; }
 
+  WasmTranslation* wasmTranslation() { return &m_wasmTranslation; }
+
+  void setMaxAsyncTaskStacksForTest(int limit) { m_maxAsyncCallStacks = limit; }
+
  private:
   void compileDebuggerScript();
   v8::MaybeLocal<v8::Value> callDebuggerMethod(const char* functionName,
                                                int argc,
-                                               v8::Local<v8::Value> argv[]);
+                                               v8::Local<v8::Value> argv[],
+                                               bool catchExceptions);
   v8::Local<v8::Context> debuggerContext() const;
   void clearBreakpoints();
 
+  static void v8OOMCallback(void* data);
+
   static void breakProgramCallback(const v8::FunctionCallbackInfo<v8::Value>&);
   void handleProgramBreak(v8::Local<v8::Context> pausedContext,
                           v8::Local<v8::Object> executionState,
@@ -109,26 +112,41 @@
                           v8::Local<v8::Array> hitBreakpoints,
                           bool isPromiseRejection = false,
                           bool isUncaught = false);
-  static void v8DebugEventCallback(const v8::DebugInterface::EventDetails&);
-  v8::Local<v8::Value> callInternalGetterFunction(v8::Local<v8::Object>,
-                                                  const char* functionName);
-  void handleV8DebugEvent(const v8::DebugInterface::EventDetails&);
-  void handleV8AsyncTaskEvent(v8::Local<v8::Context>,
-                              v8::Local<v8::Object> executionState,
-                              v8::Local<v8::Object> eventData);
 
-  v8::Local<v8::Value> collectionEntries(v8::Local<v8::Context>,
-                                         v8::Local<v8::Object>);
-  v8::Local<v8::Value> generatorObjectLocation(v8::Local<v8::Context>,
-                                               v8::Local<v8::Object>);
-  v8::Local<v8::Value> functionLocation(v8::Local<v8::Context>,
-                                        v8::Local<v8::Function>);
+  enum ScopeTargetKind {
+    FUNCTION,
+    GENERATOR,
+  };
+  v8::MaybeLocal<v8::Value> getTargetScopes(v8::Local<v8::Context>,
+                                            v8::Local<v8::Value>,
+                                            ScopeTargetKind);
+
   v8::MaybeLocal<v8::Value> functionScopes(v8::Local<v8::Context>,
                                            v8::Local<v8::Function>);
+  v8::MaybeLocal<v8::Value> generatorScopes(v8::Local<v8::Context>,
+                                            v8::Local<v8::Value>);
+
+  void asyncTaskCreated(void* task, void* parentTask);
+  void registerAsyncTaskIfNeeded(void* task);
+
+  // v8::debug::DebugEventListener implementation.
+  void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
+                            int parentId) override;
+  void ScriptCompiled(v8::Local<v8::debug::Script> script,
+                      bool has_compile_error) override;
+  void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+                             v8::Local<v8::Object> exec_state,
+                             v8::Local<v8::Value> break_points_hit) override;
+  void ExceptionThrown(v8::Local<v8::Context> paused_context,
+                       v8::Local<v8::Object> exec_state,
+                       v8::Local<v8::Value> exception,
+                       v8::Local<v8::Value> promise, bool is_uncaught) override;
+  bool IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
+                            const v8::debug::Location& start,
+                            const v8::debug::Location& end) override;
 
   v8::Isolate* m_isolate;
   V8InspectorImpl* m_inspector;
-  int m_lastContextId;
   int m_enableCount;
   bool m_breakpointsActivated;
   v8::Global<v8::Object> m_debuggerScript;
@@ -137,17 +155,26 @@
   v8::Local<v8::Context> m_pausedContext;
   bool m_runningNestedMessageLoop;
   int m_ignoreScriptParsedEventsCounter;
+  bool m_scheduledOOMBreak = false;
 
   using AsyncTaskToStackTrace =
       protocol::HashMap<void*, std::unique_ptr<V8StackTraceImpl>>;
   AsyncTaskToStackTrace m_asyncTaskStacks;
+  AsyncTaskToStackTrace m_asyncTaskCreationStacks;
+  int m_maxAsyncCallStacks;
+  std::map<int, void*> m_idToTask;
+  std::unordered_map<void*, int> m_taskToId;
+  int m_lastTaskId;
   protocol::HashSet<void*> m_recurringTasks;
   int m_maxAsyncCallStackDepth;
   std::vector<void*> m_currentTasks;
   std::vector<std::unique_ptr<V8StackTraceImpl>> m_currentStacks;
   protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
+  protocol::HashMap<void*, void*> m_parentTask;
 
-  v8::DebugInterface::ExceptionBreakState m_pauseOnExceptionsState;
+  v8::debug::ExceptionBreakState m_pauseOnExceptionsState;
+
+  WasmTranslation m_wasmTranslation;
 
   DISALLOW_COPY_AND_ASSIGN(V8Debugger);
 };
diff --git a/src/inspector/v8-function-call.cc b/src/inspector/v8-function-call.cc
index 3880e31..b8c86d3 100644
--- a/src/inspector/v8-function-call.cc
+++ b/src/inspector/v8-function-call.cc
@@ -30,6 +30,7 @@
 
 #include "src/inspector/v8-function-call.h"
 
+#include "src/inspector/inspected-context.h"
 #include "src/inspector/string-util.h"
 #include "src/inspector/v8-debugger.h"
 #include "src/inspector/v8-inspector-impl.h"
@@ -89,7 +90,7 @@
     DCHECK(!info[i].IsEmpty());
   }
 
-  int contextGroupId = V8Debugger::getGroupId(m_context);
+  int contextGroupId = m_inspector->contextGroupId(m_context);
   if (contextGroupId) {
     m_inspector->client()->muteMetrics(contextGroupId);
     m_inspector->muteExceptions(contextGroupId);
diff --git a/src/inspector/v8-heap-profiler-agent-impl.cc b/src/inspector/v8-heap-profiler-agent-impl.cc
index 0ff04e7..b3e3d11 100644
--- a/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -5,6 +5,7 @@
 #include "src/inspector/v8-heap-profiler-agent-impl.h"
 
 #include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
 #include "src/inspector/protocol/Protocol.h"
 #include "src/inspector/string-util.h"
 #include "src/inspector/v8-debugger.h"
@@ -55,7 +56,7 @@
   const char* GetName(v8::Local<v8::Object> object) override {
     InspectedContext* context = m_session->inspector()->getContext(
         m_session->contextGroupId(),
-        V8Debugger::contextId(object->CreationContext()));
+        InspectedContext::contextId(object->CreationContext()));
     if (!context) return "";
     String16 name = context->origin();
     size_t length = name.length();
@@ -216,7 +217,7 @@
   if (!profiler) return Response::Error("Cannot access v8 heap profiler");
   std::unique_ptr<HeapSnapshotProgress> progress;
   if (reportProgress.fromMaybe(false))
-    progress = wrapUnique(new HeapSnapshotProgress(&m_frontend));
+    progress.reset(new HeapSnapshotProgress(&m_frontend));
 
   GlobalObjectNameResolver resolver(m_session);
   const v8::HeapSnapshot* snapshot =
@@ -244,7 +245,7 @@
 
   *result = m_session->wrapObject(heapObject->CreationContext(), heapObject,
                                   objectGroup.fromMaybe(""), false);
-  if (!result) return Response::Error("Object is not available");
+  if (!*result) return Response::Error("Object is not available");
   return Response::OK();
 }
 
@@ -260,7 +261,8 @@
 
   if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject))
     return Response::Error("Object is not available");
-  m_session->addInspectedObject(wrapUnique(new InspectableHeapObject(id)));
+  m_session->addInspectedObject(
+      std::unique_ptr<InspectableHeapObject>(new InspectableHeapObject(id)));
   return Response::OK();
 }
 
diff --git a/src/inspector/v8-injected-script-host.cc b/src/inspector/v8-injected-script-host.cc
index 3748ec9..b3bd5ef 100644
--- a/src/inspector/v8-injected-script-host.cc
+++ b/src/inspector/v8-injected-script-host.cc
@@ -55,6 +55,9 @@
   USE(success);
   v8::Local<v8::External> debuggerExternal =
       v8::External::New(isolate, inspector);
+  setFunctionProperty(context, injectedScriptHost, "nullifyPrototype",
+                      V8InjectedScriptHost::nullifyPrototypeCallback,
+                      debuggerExternal);
   setFunctionProperty(context, injectedScriptHost, "internalConstructorName",
                       V8InjectedScriptHost::internalConstructorNameCallback,
                       debuggerExternal);
@@ -77,6 +80,16 @@
   return injectedScriptHost;
 }
 
+void V8InjectedScriptHost::nullifyPrototypeCallback(
+    const v8::FunctionCallbackInfo<v8::Value>& info) {
+  CHECK(info.Length() == 1 && info[0]->IsObject());
+  v8::Isolate* isolate = info.GetIsolate();
+  info[0]
+      .As<v8::Object>()
+      ->SetPrototype(isolate->GetCurrentContext(), v8::Null(isolate))
+      .ToChecked();
+}
+
 void V8InjectedScriptHost::internalConstructorNameCallback(
     const v8::FunctionCallbackInfo<v8::Value>& info) {
   if (info.Length() < 1 || !info[0]->IsObject()) return;
diff --git a/src/inspector/v8-injected-script-host.h b/src/inspector/v8-injected-script-host.h
index 7d293af..a64c2f8 100644
--- a/src/inspector/v8-injected-script-host.h
+++ b/src/inspector/v8-injected-script-host.h
@@ -27,6 +27,8 @@
   static v8::Local<v8::Object> create(v8::Local<v8::Context>, V8InspectorImpl*);
 
  private:
+  static void nullifyPrototypeCallback(
+      const v8::FunctionCallbackInfo<v8::Value>&);
   static void internalConstructorNameCallback(
       const v8::FunctionCallbackInfo<v8::Value>&);
   static void formatAccessorsAsProperties(
diff --git a/src/inspector/v8-inspector-impl.cc b/src/inspector/v8-inspector-impl.cc
index bd68548..34e4120 100644
--- a/src/inspector/v8-inspector-impl.cc
+++ b/src/inspector/v8-inspector-impl.cc
@@ -45,7 +45,7 @@
 
 std::unique_ptr<V8Inspector> V8Inspector::create(v8::Isolate* isolate,
                                                  V8InspectorClient* client) {
-  return wrapUnique(new V8InspectorImpl(isolate, client));
+  return std::unique_ptr<V8Inspector>(new V8InspectorImpl(isolate, client));
 }
 
 V8InspectorImpl::V8InspectorImpl(v8::Isolate* isolate,
@@ -54,10 +54,21 @@
       m_client(client),
       m_debugger(new V8Debugger(isolate, this)),
       m_capturingStackTracesCount(0),
-      m_lastExceptionId(0) {}
+      m_lastExceptionId(0),
+      m_lastContextId(0) {}
 
 V8InspectorImpl::~V8InspectorImpl() {}
 
+int V8InspectorImpl::contextGroupId(v8::Local<v8::Context> context) {
+  return contextGroupId(InspectedContext::contextId(context));
+}
+
+int V8InspectorImpl::contextGroupId(int contextId) {
+  protocol::HashMap<int, int>::iterator it =
+      m_contextIdToGroupIdMap.find(contextId);
+  return it != m_contextIdToGroupIdMap.end() ? it->second : 0;
+}
+
 V8DebuggerAgentImpl* V8InspectorImpl::enabledDebuggerAgentForGroup(
     int contextGroupId) {
   V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
@@ -83,7 +94,7 @@
     v8::Local<v8::Context> context, v8::Local<v8::Script> script) {
   v8::MicrotasksScope microtasksScope(m_isolate,
                                       v8::MicrotasksScope::kRunMicrotasks);
-  int groupId = V8Debugger::getGroupId(context);
+  int groupId = contextGroupId(context);
   if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
     agent->willExecuteScript(script->GetUnboundScript()->GetId());
   v8::MaybeLocal<v8::Value> result = script->Run(context);
@@ -97,9 +108,23 @@
 v8::MaybeLocal<v8::Value> V8InspectorImpl::callFunction(
     v8::Local<v8::Function> function, v8::Local<v8::Context> context,
     v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[]) {
-  v8::MicrotasksScope microtasksScope(m_isolate,
-                                      v8::MicrotasksScope::kRunMicrotasks);
-  int groupId = V8Debugger::getGroupId(context);
+  return callFunction(function, context, receiver, argc, info,
+                      v8::MicrotasksScope::kRunMicrotasks);
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::callInternalFunction(
+    v8::Local<v8::Function> function, v8::Local<v8::Context> context,
+    v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[]) {
+  return callFunction(function, context, receiver, argc, info,
+                      v8::MicrotasksScope::kDoNotRunMicrotasks);
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::callFunction(
+    v8::Local<v8::Function> function, v8::Local<v8::Context> context,
+    v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[],
+    v8::MicrotasksScope::Type runMicrotasks) {
+  v8::MicrotasksScope microtasksScope(m_isolate, runMicrotasks);
+  int groupId = contextGroupId(context);
   if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
     agent->willExecuteScript(function->ScriptId());
   v8::MaybeLocal<v8::Value> result =
@@ -113,32 +138,28 @@
 
 v8::MaybeLocal<v8::Value> V8InspectorImpl::compileAndRunInternalScript(
     v8::Local<v8::Context> context, v8::Local<v8::String> source) {
-  v8::Local<v8::Script> script =
-      compileScript(context, source, String16(), true);
-  if (script.IsEmpty()) return v8::MaybeLocal<v8::Value>();
+  v8::Local<v8::UnboundScript> unboundScript;
+  if (!v8::debug::CompileInspectorScript(m_isolate, source)
+           .ToLocal(&unboundScript))
+    return v8::MaybeLocal<v8::Value>();
   v8::MicrotasksScope microtasksScope(m_isolate,
                                       v8::MicrotasksScope::kDoNotRunMicrotasks);
-  return script->Run(context);
+  v8::Context::Scope contextScope(context);
+  return unboundScript->BindToCurrentContext()->Run(context);
 }
 
-v8::Local<v8::Script> V8InspectorImpl::compileScript(
-    v8::Local<v8::Context> context, v8::Local<v8::String> code,
-    const String16& fileName, bool markAsInternal) {
+v8::MaybeLocal<v8::Script> V8InspectorImpl::compileScript(
+    v8::Local<v8::Context> context, const String16& code,
+    const String16& fileName) {
   v8::ScriptOrigin origin(
       toV8String(m_isolate, fileName), v8::Integer::New(m_isolate, 0),
       v8::Integer::New(m_isolate, 0),
-      v8::False(m_isolate),  // sharable
-      v8::Local<v8::Integer>(),
-      v8::Boolean::New(m_isolate, markAsInternal),  // internal
-      toV8String(m_isolate, String16()),            // sourceMap
-      v8::True(m_isolate));                         // opaqueresource
-  v8::ScriptCompiler::Source source(code, origin);
-  v8::Local<v8::Script> script;
-  if (!v8::ScriptCompiler::Compile(context, &source,
-                                   v8::ScriptCompiler::kNoCompileOptions)
-           .ToLocal(&script))
-    return v8::Local<v8::Script>();
-  return script;
+      v8::False(m_isolate),                                         // sharable
+      v8::Local<v8::Integer>(), toV8String(m_isolate, String16()),  // sourceMap
+      v8::True(m_isolate));  // opaqueresource
+  v8::ScriptCompiler::Source source(toV8String(m_isolate, code), origin);
+  return v8::ScriptCompiler::Compile(context, &source,
+                                     v8::ScriptCompiler::kNoCompileOptions);
 }
 
 void V8InspectorImpl::enableStackCapturingIfNeeded() {
@@ -167,12 +188,12 @@
   ConsoleStorageMap::iterator storageIt =
       m_consoleStorageMap.find(contextGroupId);
   if (storageIt == m_consoleStorageMap.end())
-    storageIt =
-        m_consoleStorageMap
-            .insert(std::make_pair(
-                contextGroupId,
-                wrapUnique(new V8ConsoleMessageStorage(this, contextGroupId))))
-            .first;
+    storageIt = m_consoleStorageMap
+                    .insert(std::make_pair(
+                        contextGroupId,
+                        std::unique_ptr<V8ConsoleMessageStorage>(
+                            new V8ConsoleMessageStorage(this, contextGroupId))))
+                    .first;
   return storageIt->second.get();
 }
 
@@ -216,42 +237,43 @@
 }
 
 void V8InspectorImpl::contextCreated(const V8ContextInfo& info) {
-  int contextId = m_debugger->markContext(info);
+  int contextId = ++m_lastContextId;
+  InspectedContext* context = new InspectedContext(this, info, contextId);
+  m_contextIdToGroupIdMap[contextId] = info.contextGroupId;
 
   ContextsByGroupMap::iterator contextIt = m_contexts.find(info.contextGroupId);
   if (contextIt == m_contexts.end())
     contextIt = m_contexts
-                    .insert(std::make_pair(info.contextGroupId,
-                                           wrapUnique(new ContextByIdMap())))
+                    .insert(std::make_pair(
+                        info.contextGroupId,
+                        std::unique_ptr<ContextByIdMap>(new ContextByIdMap())))
                     .first;
-
   const auto& contextById = contextIt->second;
 
   DCHECK(contextById->find(contextId) == contextById->cend());
-  InspectedContext* context = new InspectedContext(this, info, contextId);
-  (*contextById)[contextId] = wrapUnique(context);
+  (*contextById)[contextId].reset(context);
   SessionMap::iterator sessionIt = m_sessions.find(info.contextGroupId);
   if (sessionIt != m_sessions.end())
     sessionIt->second->runtimeAgent()->reportExecutionContextCreated(context);
 }
 
 void V8InspectorImpl::contextDestroyed(v8::Local<v8::Context> context) {
-  int contextId = V8Debugger::contextId(context);
-  int contextGroupId = V8Debugger::getGroupId(context);
+  int contextId = InspectedContext::contextId(context);
+  int groupId = contextGroupId(context);
+  m_contextIdToGroupIdMap.erase(contextId);
 
-  ConsoleStorageMap::iterator storageIt =
-      m_consoleStorageMap.find(contextGroupId);
+  ConsoleStorageMap::iterator storageIt = m_consoleStorageMap.find(groupId);
   if (storageIt != m_consoleStorageMap.end())
     storageIt->second->contextDestroyed(contextId);
 
-  InspectedContext* inspectedContext = getContext(contextGroupId, contextId);
+  InspectedContext* inspectedContext = getContext(groupId, contextId);
   if (!inspectedContext) return;
 
-  SessionMap::iterator iter = m_sessions.find(contextGroupId);
+  SessionMap::iterator iter = m_sessions.find(groupId);
   if (iter != m_sessions.end())
     iter->second->runtimeAgent()->reportExecutionContextDestroyed(
         inspectedContext);
-  discardInspectedContext(contextGroupId, contextId);
+  discardInspectedContext(groupId, contextId);
 }
 
 void V8InspectorImpl::resetContextGroup(int contextGroupId) {
@@ -260,19 +282,22 @@
   SessionMap::iterator session = m_sessions.find(contextGroupId);
   if (session != m_sessions.end()) session->second->reset();
   m_contexts.erase(contextGroupId);
+  m_debugger->wasmTranslation()->Clear();
 }
 
 void V8InspectorImpl::willExecuteScript(v8::Local<v8::Context> context,
                                         int scriptId) {
   if (V8DebuggerAgentImpl* agent =
-          enabledDebuggerAgentForGroup(V8Debugger::getGroupId(context)))
+          enabledDebuggerAgentForGroup(contextGroupId(context))) {
     agent->willExecuteScript(scriptId);
+  }
 }
 
 void V8InspectorImpl::didExecuteScript(v8::Local<v8::Context> context) {
   if (V8DebuggerAgentImpl* agent =
-          enabledDebuggerAgentForGroup(V8Debugger::getGroupId(context)))
+          enabledDebuggerAgentForGroup(contextGroupId(context))) {
     agent->didExecuteScript();
+  }
 }
 
 void V8InspectorImpl::idleStarted() {
@@ -292,33 +317,31 @@
     v8::Local<v8::Value> exception, const StringView& detailedMessage,
     const StringView& url, unsigned lineNumber, unsigned columnNumber,
     std::unique_ptr<V8StackTrace> stackTrace, int scriptId) {
-  int contextGroupId = V8Debugger::getGroupId(context);
-  if (!contextGroupId || m_muteExceptionsMap[contextGroupId]) return 0;
-  std::unique_ptr<V8StackTraceImpl> stackTraceImpl =
-      wrapUnique(static_cast<V8StackTraceImpl*>(stackTrace.release()));
+  int groupId = contextGroupId(context);
+  if (!groupId || m_muteExceptionsMap[groupId]) return 0;
+  std::unique_ptr<V8StackTraceImpl> stackTraceImpl(
+      static_cast<V8StackTraceImpl*>(stackTrace.release()));
   unsigned exceptionId = nextExceptionId();
   std::unique_ptr<V8ConsoleMessage> consoleMessage =
       V8ConsoleMessage::createForException(
           m_client->currentTimeMS(), toString16(detailedMessage),
           toString16(url), lineNumber, columnNumber, std::move(stackTraceImpl),
           scriptId, m_isolate, toString16(message),
-          V8Debugger::contextId(context), exception, exceptionId);
-  ensureConsoleMessageStorage(contextGroupId)
-      ->addMessage(std::move(consoleMessage));
+          InspectedContext::contextId(context), exception, exceptionId);
+  ensureConsoleMessageStorage(groupId)->addMessage(std::move(consoleMessage));
   return exceptionId;
 }
 
 void V8InspectorImpl::exceptionRevoked(v8::Local<v8::Context> context,
                                        unsigned exceptionId,
                                        const StringView& message) {
-  int contextGroupId = V8Debugger::getGroupId(context);
-  if (!contextGroupId) return;
+  int groupId = contextGroupId(context);
+  if (!groupId) return;
 
   std::unique_ptr<V8ConsoleMessage> consoleMessage =
       V8ConsoleMessage::createForRevokedException(
           m_client->currentTimeMS(), toString16(message), exceptionId);
-  ensureConsoleMessageStorage(contextGroupId)
-      ->addMessage(std::move(consoleMessage));
+  ensureConsoleMessageStorage(groupId)->addMessage(std::move(consoleMessage));
 }
 
 std::unique_ptr<V8StackTrace> V8InspectorImpl::captureStackTrace(
diff --git a/src/inspector/v8-inspector-impl.h b/src/inspector/v8-inspector-impl.h
index 0ca1a6a..9d6e62c 100644
--- a/src/inspector/v8-inspector-impl.h
+++ b/src/inspector/v8-inspector-impl.h
@@ -36,7 +36,6 @@
 #include "src/base/macros.h"
 #include "src/inspector/protocol/Protocol.h"
 
-#include "include/v8-debug.h"
 #include "include/v8-inspector.h"
 
 namespace v8_inspector {
@@ -58,6 +57,8 @@
   v8::Isolate* isolate() const { return m_isolate; }
   V8InspectorClient* client() { return m_client; }
   V8Debugger* debugger() { return m_debugger.get(); }
+  int contextGroupId(v8::Local<v8::Context>);
+  int contextGroupId(int contextId);
 
   v8::MaybeLocal<v8::Value> runCompiledScript(v8::Local<v8::Context>,
                                               v8::Local<v8::Script>);
@@ -67,10 +68,14 @@
                                          int argc, v8::Local<v8::Value> info[]);
   v8::MaybeLocal<v8::Value> compileAndRunInternalScript(v8::Local<v8::Context>,
                                                         v8::Local<v8::String>);
-  v8::Local<v8::Script> compileScript(v8::Local<v8::Context>,
-                                      v8::Local<v8::String>,
-                                      const String16& fileName,
-                                      bool markAsInternal);
+  v8::MaybeLocal<v8::Value> callInternalFunction(v8::Local<v8::Function>,
+                                                 v8::Local<v8::Context>,
+                                                 v8::Local<v8::Value> receiver,
+                                                 int argc,
+                                                 v8::Local<v8::Value> info[]);
+  v8::MaybeLocal<v8::Script> compileScript(v8::Local<v8::Context>,
+                                           const String16& code,
+                                           const String16& fileName);
   v8::Local<v8::Context> regexContext();
 
   // V8Inspector implementation.
@@ -121,12 +126,18 @@
   V8ProfilerAgentImpl* enabledProfilerAgentForGroup(int contextGroupId);
 
  private:
+  v8::MaybeLocal<v8::Value> callFunction(
+      v8::Local<v8::Function>, v8::Local<v8::Context>,
+      v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[],
+      v8::MicrotasksScope::Type runMicrotasks);
+
   v8::Isolate* m_isolate;
   V8InspectorClient* m_client;
   std::unique_ptr<V8Debugger> m_debugger;
   v8::Global<v8::Context> m_regexContext;
   int m_capturingStackTracesCount;
   unsigned m_lastExceptionId;
+  int m_lastContextId;
 
   using MuteExceptionsMap = protocol::HashMap<int, int>;
   MuteExceptionsMap m_muteExceptionsMap;
@@ -142,6 +153,8 @@
       protocol::HashMap<int, std::unique_ptr<V8ConsoleMessageStorage>>;
   ConsoleStorageMap m_consoleStorageMap;
 
+  protocol::HashMap<int, int> m_contextIdToGroupIdMap;
+
   DISALLOW_COPY_AND_ASSIGN(V8InspectorImpl);
 };
 
diff --git a/src/inspector/v8-inspector-session-impl.cc b/src/inspector/v8-inspector-session-impl.cc
index e415575..2674fc2 100644
--- a/src/inspector/v8-inspector-session-impl.cc
+++ b/src/inspector/v8-inspector-session-impl.cc
@@ -37,10 +37,15 @@
                               protocol::Schema::Metainfo::commandPrefix);
 }
 
+// static
+int V8ContextInfo::executionContextId(v8::Local<v8::Context> context) {
+  return InspectedContext::contextId(context);
+}
+
 std::unique_ptr<V8InspectorSessionImpl> V8InspectorSessionImpl::create(
     V8InspectorImpl* inspector, int contextGroupId,
     V8Inspector::Channel* channel, const StringView& state) {
-  return wrapUnique(
+  return std::unique_ptr<V8InspectorSessionImpl>(
       new V8InspectorSessionImpl(inspector, contextGroupId, channel, state));
 }
 
@@ -62,35 +67,35 @@
       m_schemaAgent(nullptr) {
   if (savedState.length()) {
     std::unique_ptr<protocol::Value> state =
-        protocol::parseJSON(toString16(savedState));
+        protocol::StringUtil::parseJSON(toString16(savedState));
     if (state) m_state = protocol::DictionaryValue::cast(std::move(state));
     if (!m_state) m_state = protocol::DictionaryValue::create();
   } else {
     m_state = protocol::DictionaryValue::create();
   }
 
-  m_runtimeAgent = wrapUnique(new V8RuntimeAgentImpl(
+  m_runtimeAgent.reset(new V8RuntimeAgentImpl(
       this, this, agentState(protocol::Runtime::Metainfo::domainName)));
   protocol::Runtime::Dispatcher::wire(&m_dispatcher, m_runtimeAgent.get());
 
-  m_debuggerAgent = wrapUnique(new V8DebuggerAgentImpl(
+  m_debuggerAgent.reset(new V8DebuggerAgentImpl(
       this, this, agentState(protocol::Debugger::Metainfo::domainName)));
   protocol::Debugger::Dispatcher::wire(&m_dispatcher, m_debuggerAgent.get());
 
-  m_profilerAgent = wrapUnique(new V8ProfilerAgentImpl(
+  m_profilerAgent.reset(new V8ProfilerAgentImpl(
       this, this, agentState(protocol::Profiler::Metainfo::domainName)));
   protocol::Profiler::Dispatcher::wire(&m_dispatcher, m_profilerAgent.get());
 
-  m_heapProfilerAgent = wrapUnique(new V8HeapProfilerAgentImpl(
+  m_heapProfilerAgent.reset(new V8HeapProfilerAgentImpl(
       this, this, agentState(protocol::HeapProfiler::Metainfo::domainName)));
   protocol::HeapProfiler::Dispatcher::wire(&m_dispatcher,
                                            m_heapProfilerAgent.get());
 
-  m_consoleAgent = wrapUnique(new V8ConsoleAgentImpl(
+  m_consoleAgent.reset(new V8ConsoleAgentImpl(
       this, this, agentState(protocol::Console::Metainfo::domainName)));
   protocol::Console::Dispatcher::wire(&m_dispatcher, m_consoleAgent.get());
 
-  m_schemaAgent = wrapUnique(new V8SchemaAgentImpl(
+  m_schemaAgent.reset(new V8SchemaAgentImpl(
       this, this, agentState(protocol::Schema::Metainfo::domainName)));
   protocol::Schema::Dispatcher::wire(&m_dispatcher, m_schemaAgent.get());
 
@@ -126,13 +131,42 @@
   return state;
 }
 
-void V8InspectorSessionImpl::sendProtocolResponse(int callId,
-                                                  const String16& message) {
-  m_channel->sendProtocolResponse(callId, toStringView(message));
+namespace {
+
+class MessageBuffer : public StringBuffer {
+ public:
+  static std::unique_ptr<MessageBuffer> create(
+      std::unique_ptr<protocol::Serializable> message) {
+    return std::unique_ptr<MessageBuffer>(
+        new MessageBuffer(std::move(message)));
+  }
+
+  const StringView& string() override {
+    if (!m_serialized) {
+      m_serialized = StringBuffer::create(toStringView(m_message->serialize()));
+      m_message.reset(nullptr);
+    }
+    return m_serialized->string();
+  }
+
+ private:
+  explicit MessageBuffer(std::unique_ptr<protocol::Serializable> message)
+      : m_message(std::move(message)) {}
+
+  std::unique_ptr<protocol::Serializable> m_message;
+  std::unique_ptr<StringBuffer> m_serialized;
+};
+
+}  // namespace
+
+void V8InspectorSessionImpl::sendProtocolResponse(
+    int callId, std::unique_ptr<protocol::Serializable> message) {
+  m_channel->sendResponse(callId, MessageBuffer::create(std::move(message)));
 }
 
-void V8InspectorSessionImpl::sendProtocolNotification(const String16& message) {
-  m_channel->sendProtocolNotification(toStringView(message));
+void V8InspectorSessionImpl::sendProtocolNotification(
+    std::unique_ptr<protocol::Serializable> message) {
+  m_channel->sendNotification(MessageBuffer::create(std::move(message)));
 }
 
 void V8InspectorSessionImpl::flushProtocolNotifications() {
@@ -266,7 +300,7 @@
                                    const String16& groupName,
                                    bool generatePreview) {
   InjectedScript* injectedScript = nullptr;
-  findInjectedScript(V8Debugger::contextId(context), injectedScript);
+  findInjectedScript(InspectedContext::contextId(context), injectedScript);
   if (!injectedScript) return nullptr;
   std::unique_ptr<protocol::Runtime::RemoteObject> result;
   injectedScript->wrapObject(value, groupName, false, generatePreview, &result);
@@ -278,7 +312,7 @@
                                   v8::Local<v8::Value> table,
                                   v8::Local<v8::Value> columns) {
   InjectedScript* injectedScript = nullptr;
-  findInjectedScript(V8Debugger::contextId(context), injectedScript);
+  findInjectedScript(InspectedContext::contextId(context), injectedScript);
   if (!injectedScript) return nullptr;
   return injectedScript->wrapTable(table, columns);
 }
@@ -305,11 +339,11 @@
 
 void V8InspectorSessionImpl::dispatchProtocolMessage(
     const StringView& message) {
-  m_dispatcher.dispatch(protocol::parseJSON(message));
+  m_dispatcher.dispatch(protocol::StringUtil::parseJSON(message));
 }
 
 std::unique_ptr<StringBuffer> V8InspectorSessionImpl::stateJSON() {
-  String16 json = m_state->toJSONString();
+  String16 json = m_state->serialize();
   return StringBufferImpl::adopt(json);
 }
 
@@ -366,7 +400,8 @@
     const StringView& breakReason, const StringView& breakDetails) {
   m_debuggerAgent->schedulePauseOnNextStatement(
       toString16(breakReason),
-      protocol::DictionaryValue::cast(protocol::parseJSON(breakDetails)));
+      protocol::DictionaryValue::cast(
+          protocol::StringUtil::parseJSON(breakDetails)));
 }
 
 void V8InspectorSessionImpl::cancelPauseOnNextStatement() {
@@ -377,7 +412,8 @@
                                           const StringView& breakDetails) {
   m_debuggerAgent->breakProgram(
       toString16(breakReason),
-      protocol::DictionaryValue::cast(protocol::parseJSON(breakDetails)));
+      protocol::DictionaryValue::cast(
+          protocol::StringUtil::parseJSON(breakDetails)));
 }
 
 void V8InspectorSessionImpl::setSkipAllPauses(bool skip) {
diff --git a/src/inspector/v8-inspector-session-impl.h b/src/inspector/v8-inspector-session-impl.h
index af65aa3..7a59e1c 100644
--- a/src/inspector/v8-inspector-session-impl.h
+++ b/src/inspector/v8-inspector-session-impl.h
@@ -96,8 +96,10 @@
   protocol::DictionaryValue* agentState(const String16& name);
 
   // protocol::FrontendChannel implementation.
-  void sendProtocolResponse(int callId, const String16& message) override;
-  void sendProtocolNotification(const String16& message) override;
+  void sendProtocolResponse(
+      int callId, std::unique_ptr<protocol::Serializable> message) override;
+  void sendProtocolNotification(
+      std::unique_ptr<protocol::Serializable> message) override;
   void flushProtocolNotifications() override;
 
   int m_contextGroupId;
diff --git a/src/inspector/v8-internal-value-type.cc b/src/inspector/v8-internal-value-type.cc
index cde8bc9..46f5dac 100644
--- a/src/inspector/v8-internal-value-type.cc
+++ b/src/inspector/v8-internal-value-type.cc
@@ -4,7 +4,6 @@
 
 #include "src/inspector/v8-internal-value-type.h"
 
-#include "src/inspector/protocol-platform.h"
 #include "src/inspector/string-util.h"
 
 namespace v8_inspector {
diff --git a/src/inspector/v8-profiler-agent-impl.cc b/src/inspector/v8-profiler-agent-impl.cc
index 8b888a0..c7d1cc2 100644
--- a/src/inspector/v8-profiler-agent-impl.cc
+++ b/src/inspector/v8-profiler-agent-impl.cc
@@ -22,6 +22,7 @@
 static const char samplingInterval[] = "samplingInterval";
 static const char userInitiatedProfiling[] = "userInitiatedProfiling";
 static const char profilerEnabled[] = "profilerEnabled";
+static const char preciseCoverageStarted[] = "preciseCoverageStarted";
 }
 
 namespace {
@@ -152,11 +153,8 @@
     protocol::DictionaryValue* state)
     : m_session(session),
       m_isolate(m_session->inspector()->isolate()),
-      m_profiler(nullptr),
       m_state(state),
-      m_frontend(frontendChannel),
-      m_enabled(false),
-      m_recordingCPUProfile(false) {}
+      m_frontend(frontendChannel) {}
 
 V8ProfilerAgentImpl::~V8ProfilerAgentImpl() {
   if (m_profiler) m_profiler->Dispose();
@@ -204,8 +202,6 @@
 Response V8ProfilerAgentImpl::enable() {
   if (m_enabled) return Response::OK();
   m_enabled = true;
-  DCHECK(!m_profiler);
-  m_profiler = v8::CpuProfiler::New(m_isolate);
   m_state->setBoolean(ProfilerAgentState::profilerEnabled, true);
   return Response::OK();
 }
@@ -216,18 +212,18 @@
     stopProfiling(m_startedProfiles[i - 1].m_id, false);
   m_startedProfiles.clear();
   stop(nullptr);
-  m_profiler->Dispose();
-  m_profiler = nullptr;
+  stopPreciseCoverage();
+  DCHECK(!m_profiler);
   m_enabled = false;
   m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
   return Response::OK();
 }
 
 Response V8ProfilerAgentImpl::setSamplingInterval(int interval) {
-  if (m_recordingCPUProfile)
+  if (m_profiler) {
     return Response::Error("Cannot change sampling interval when profiling.");
+  }
   m_state->setInteger(ProfilerAgentState::samplingInterval, interval);
-  m_profiler->SetSamplingInterval(interval);
   return Response::OK();
 }
 
@@ -237,14 +233,14 @@
     return;
   m_enabled = true;
   DCHECK(!m_profiler);
-  m_profiler = v8::CpuProfiler::New(m_isolate);
-  int interval = 0;
-  m_state->getInteger(ProfilerAgentState::samplingInterval, &interval);
-  if (interval) m_profiler->SetSamplingInterval(interval);
   if (m_state->booleanProperty(ProfilerAgentState::userInitiatedProfiling,
                                false)) {
     start();
   }
+  if (m_state->booleanProperty(ProfilerAgentState::preciseCoverageStarted,
+                               false)) {
+    startPreciseCoverage();
+  }
 }
 
 Response V8ProfilerAgentImpl::start() {
@@ -259,8 +255,9 @@
 
 Response V8ProfilerAgentImpl::stop(
     std::unique_ptr<protocol::Profiler::Profile>* profile) {
-  if (!m_recordingCPUProfile)
+  if (!m_recordingCPUProfile) {
     return Response::Error("No recording profiles found");
+  }
   m_recordingCPUProfile = false;
   std::unique_ptr<protocol::Profiler::Profile> cpuProfile =
       stopProfiling(m_frontendInitiatedProfileId, !!profile);
@@ -273,6 +270,90 @@
   return Response::OK();
 }
 
+Response V8ProfilerAgentImpl::startPreciseCoverage() {
+  if (!m_enabled) return Response::Error("Profiler is not enabled");
+  m_state->setBoolean(ProfilerAgentState::preciseCoverageStarted, true);
+  v8::debug::Coverage::TogglePrecise(m_isolate, true);
+  return Response::OK();
+}
+
+Response V8ProfilerAgentImpl::stopPreciseCoverage() {
+  if (!m_enabled) return Response::Error("Profiler is not enabled");
+  m_state->setBoolean(ProfilerAgentState::preciseCoverageStarted, false);
+  v8::debug::Coverage::TogglePrecise(m_isolate, false);
+  return Response::OK();
+}
+
+namespace {
+Response takeCoverage(
+    v8::Isolate* isolate, bool reset_count,
+    std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+        out_result) {
+  std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>> result =
+      protocol::Array<protocol::Profiler::ScriptCoverage>::create();
+  v8::HandleScope handle_scope(isolate);
+  v8::debug::Coverage coverage =
+      v8::debug::Coverage::Collect(isolate, reset_count);
+  for (size_t i = 0; i < coverage.ScriptCount(); i++) {
+    v8::debug::Coverage::ScriptData script_data = coverage.GetScriptData(i);
+    v8::Local<v8::debug::Script> script = script_data.GetScript();
+    std::unique_ptr<protocol::Array<protocol::Profiler::FunctionCoverage>>
+        functions =
+            protocol::Array<protocol::Profiler::FunctionCoverage>::create();
+    for (size_t j = 0; j < script_data.FunctionCount(); j++) {
+      v8::debug::Coverage::FunctionData function_data =
+          script_data.GetFunctionData(j);
+      std::unique_ptr<protocol::Array<protocol::Profiler::CoverageRange>>
+          ranges = protocol::Array<protocol::Profiler::CoverageRange>::create();
+      // At this point we only have per-function coverage data, so there is
+      // only one range per function.
+      ranges->addItem(
+          protocol::Profiler::CoverageRange::create()
+              .setStartLineNumber(function_data.Start().GetLineNumber())
+              .setStartColumnNumber(function_data.Start().GetColumnNumber())
+              .setEndLineNumber(function_data.End().GetLineNumber())
+              .setEndColumnNumber(function_data.End().GetColumnNumber())
+              .setCount(function_data.Count())
+              .build());
+      functions->addItem(
+          protocol::Profiler::FunctionCoverage::create()
+              .setFunctionName(toProtocolString(
+                  function_data.Name().FromMaybe(v8::Local<v8::String>())))
+              .setRanges(std::move(ranges))
+              .build());
+    }
+    String16 url;
+    v8::Local<v8::String> name;
+    if (script->Name().ToLocal(&name) || script->SourceURL().ToLocal(&name)) {
+      url = toProtocolString(name);
+    }
+    result->addItem(protocol::Profiler::ScriptCoverage::create()
+                        .setScriptId(String16::fromInteger(script->Id()))
+                        .setUrl(url)
+                        .setFunctions(std::move(functions))
+                        .build());
+  }
+  *out_result = std::move(result);
+  return Response::OK();
+}
+}  // anonymous namespace
+
+Response V8ProfilerAgentImpl::takePreciseCoverage(
+    std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+        out_result) {
+  if (!m_state->booleanProperty(ProfilerAgentState::preciseCoverageStarted,
+                                false)) {
+    return Response::Error("Precise coverage has not been started.");
+  }
+  return takeCoverage(m_isolate, true, out_result);
+}
+
+Response V8ProfilerAgentImpl::getBestEffortCoverage(
+    std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+        out_result) {
+  return takeCoverage(m_isolate, false, out_result);
+}
+
 String16 V8ProfilerAgentImpl::nextProfileId() {
   return String16::fromInteger(
       v8::base::NoBarrier_AtomicIncrement(&s_lastProfileId, 1));
@@ -280,6 +361,15 @@
 
 void V8ProfilerAgentImpl::startProfiling(const String16& title) {
   v8::HandleScope handleScope(m_isolate);
+  if (!m_startedProfilesCount) {
+    DCHECK(!m_profiler);
+    m_profiler = v8::CpuProfiler::New(m_isolate);
+    m_profiler->SetIdle(m_idle);
+    int interval =
+        m_state->integerProperty(ProfilerAgentState::samplingInterval, 0);
+    if (interval) m_profiler->SetSamplingInterval(interval);
+  }
+  ++m_startedProfilesCount;
   m_profiler->StartProfiling(toV8String(m_isolate, title), true);
 }
 
@@ -288,29 +378,29 @@
   v8::HandleScope handleScope(m_isolate);
   v8::CpuProfile* profile =
       m_profiler->StopProfiling(toV8String(m_isolate, title));
-  if (!profile) return nullptr;
   std::unique_ptr<protocol::Profiler::Profile> result;
-  if (serialize) result = createCPUProfile(m_isolate, profile);
-  profile->Delete();
+  if (profile) {
+    if (serialize) result = createCPUProfile(m_isolate, profile);
+    profile->Delete();
+  }
+  --m_startedProfilesCount;
+  if (!m_startedProfilesCount) {
+    m_profiler->Dispose();
+    m_profiler = nullptr;
+  }
   return result;
 }
 
-bool V8ProfilerAgentImpl::isRecording() const {
-  return m_recordingCPUProfile || !m_startedProfiles.empty();
-}
-
 bool V8ProfilerAgentImpl::idleStarted() {
-  if (m_profiler) m_profiler->SetIdle(true);
+  m_idle = true;
+  if (m_profiler) m_profiler->SetIdle(m_idle);
   return m_profiler;
 }
 
 bool V8ProfilerAgentImpl::idleFinished() {
-  if (m_profiler) m_profiler->SetIdle(false);
+  m_idle = false;
+  if (m_profiler) m_profiler->SetIdle(m_idle);
   return m_profiler;
 }
 
-void V8ProfilerAgentImpl::collectSample() {
-  if (m_profiler) m_profiler->CollectSample();
-}
-
 }  // namespace v8_inspector
diff --git a/src/inspector/v8-profiler-agent-impl.h b/src/inspector/v8-profiler-agent-impl.h
index a634ff3..c60ff86 100644
--- a/src/inspector/v8-profiler-agent-impl.h
+++ b/src/inspector/v8-profiler-agent-impl.h
@@ -37,14 +37,21 @@
   Response start() override;
   Response stop(std::unique_ptr<protocol::Profiler::Profile>*) override;
 
+  Response startPreciseCoverage() override;
+  Response stopPreciseCoverage() override;
+  Response takePreciseCoverage(
+      std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+          out_result) override;
+  Response getBestEffortCoverage(
+      std::unique_ptr<protocol::Array<protocol::Profiler::ScriptCoverage>>*
+          out_result) override;
+
   void consoleProfile(const String16& title);
   void consoleProfileEnd(const String16& title);
 
   bool idleStarted();
   bool idleFinished();
 
-  void collectSample();
-
  private:
   String16 nextProfileId();
 
@@ -52,18 +59,18 @@
   std::unique_ptr<protocol::Profiler::Profile> stopProfiling(
       const String16& title, bool serialize);
 
-  bool isRecording() const;
-
   V8InspectorSessionImpl* m_session;
   v8::Isolate* m_isolate;
-  v8::CpuProfiler* m_profiler;
+  v8::CpuProfiler* m_profiler = nullptr;
   protocol::DictionaryValue* m_state;
   protocol::Profiler::Frontend m_frontend;
-  bool m_enabled;
-  bool m_recordingCPUProfile;
+  bool m_enabled = false;
+  bool m_recordingCPUProfile = false;
   class ProfileDescriptor;
   std::vector<ProfileDescriptor> m_startedProfiles;
   String16 m_frontendInitiatedProfileId;
+  bool m_idle = false;
+  int m_startedProfilesCount = 0;
 
   DISALLOW_COPY_AND_ASSIGN(V8ProfilerAgentImpl);
 };
diff --git a/src/inspector/v8-runtime-agent-impl.cc b/src/inspector/v8-runtime-agent-impl.cc
index 4dbe60f..17c8a7b 100644
--- a/src/inspector/v8-runtime-agent-impl.cc
+++ b/src/inspector/v8-runtime-agent-impl.cc
@@ -30,6 +30,7 @@
 
 #include "src/inspector/v8-runtime-agent-impl.h"
 
+#include "src/debug/debug-interface.h"
 #include "src/inspector/injected-script.h"
 #include "src/inspector/inspected-context.h"
 #include "src/inspector/protocol/Protocol.h"
@@ -241,7 +242,7 @@
         inspector->client()->ensureDefaultContextInGroup(contextGroupId);
     if (defaultContext.IsEmpty())
       return Response::Error("Cannot find default execution context");
-    *contextId = V8Debugger::contextId(defaultContext);
+    *contextId = InspectedContext::contextId(defaultContext);
   }
   return Response::OK();
 }
@@ -293,11 +294,11 @@
   if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(true);
 
   v8::MaybeLocal<v8::Value> maybeResultValue;
-  v8::Local<v8::Script> script = m_inspector->compileScript(
-      scope.context(), toV8String(m_inspector->isolate(), expression),
-      String16(), false);
-  if (!script.IsEmpty())
+  v8::Local<v8::Script> script;
+  if (m_inspector->compileScript(scope.context(), expression, String16())
+          .ToLocal(&script)) {
     maybeResultValue = m_inspector->runCompiledScript(scope.context(), script);
+  }
 
   if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(false);
 
@@ -379,10 +380,14 @@
   if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
   if (userGesture.fromMaybe(false)) scope.pretendUserGesture();
 
-  v8::MaybeLocal<v8::Value> maybeFunctionValue =
-      m_inspector->compileAndRunInternalScript(
-          scope.context(),
-          toV8String(m_inspector->isolate(), "(" + expression + ")"));
+  v8::MaybeLocal<v8::Value> maybeFunctionValue;
+  v8::Local<v8::Script> functionScript;
+  if (m_inspector
+          ->compileScript(scope.context(), "(" + expression + ")", String16())
+          .ToLocal(&functionScript)) {
+    maybeFunctionValue =
+        m_inspector->runCompiledScript(scope.context(), functionScript);
+  }
   // Re-initialize after running client's code, as it could have destroyed
   // context or session.
   response = scope.initialize();
@@ -543,11 +548,11 @@
   if (!response.isSuccess()) return response;
 
   if (!persistScript) m_inspector->debugger()->muteScriptParsedEvents();
-  v8::Local<v8::Script> script = m_inspector->compileScript(
-      scope.context(), toV8String(m_inspector->isolate(), expression),
-      sourceURL, false);
+  v8::Local<v8::Script> script;
+  bool isOk = m_inspector->compileScript(scope.context(), expression, sourceURL)
+                  .ToLocal(&script);
   if (!persistScript) m_inspector->debugger()->unmuteScriptParsedEvents();
-  if (script.IsEmpty()) {
+  if (!isOk) {
     if (scope.tryCatch().HasCaught()) {
       response = scope.injectedScript()->createExceptionDetails(
           scope.tryCatch(), String16(), false, exceptionDetails);
@@ -702,7 +707,7 @@
           .build();
   if (!context->auxData().isEmpty())
     description->setAuxData(protocol::DictionaryValue::cast(
-        protocol::parseJSON(context->auxData())));
+        protocol::StringUtil::parseJSON(context->auxData())));
   m_frontend.executionContextCreated(std::move(description));
 }
 
diff --git a/src/inspector/v8-stack-trace-impl.cc b/src/inspector/v8-stack-trace-impl.cc
index 1a38c6d..7d0edef 100644
--- a/src/inspector/v8-stack-trace-impl.cc
+++ b/src/inspector/v8-stack-trace-impl.cc
@@ -5,12 +5,10 @@
 #include "src/inspector/v8-stack-trace-impl.h"
 
 #include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
 #include "src/inspector/v8-debugger.h"
 #include "src/inspector/v8-inspector-impl.h"
-#include "src/inspector/v8-profiler-agent-impl.h"
 
-#include "include/v8-debug.h"
-#include "include/v8-profiler.h"
 #include "include/v8-version.h"
 
 namespace v8_inspector {
@@ -23,7 +21,9 @@
         v8::StackTrace::kScriptId | v8::StackTrace::kScriptNameOrSourceURL |
         v8::StackTrace::kFunctionName);
 
-V8StackTraceImpl::Frame toFrame(v8::Local<v8::StackFrame> frame) {
+V8StackTraceImpl::Frame toFrame(v8::Local<v8::StackFrame> frame,
+                                WasmTranslation* wasmTranslation,
+                                int contextGroupId) {
   String16 scriptId = String16::fromInteger(frame->GetScriptId());
   String16 sourceName;
   v8::Local<v8::String> sourceNameValue(frame->GetScriptNameOrSourceURL());
@@ -35,22 +35,30 @@
   if (!functionNameValue.IsEmpty())
     functionName = toProtocolString(functionNameValue);
 
-  int sourceLineNumber = frame->GetLineNumber();
-  int sourceColumn = frame->GetColumn();
+  int sourceLineNumber = frame->GetLineNumber() - 1;
+  int sourceColumn = frame->GetColumn() - 1;
+  // TODO(clemensh): Figure out a way to do this translation only right before
+  // sending the stack trace over wire.
+  if (wasmTranslation)
+    wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
+        &scriptId, &sourceLineNumber, &sourceColumn);
   return V8StackTraceImpl::Frame(functionName, scriptId, sourceName,
-                                 sourceLineNumber, sourceColumn);
+                                 sourceLineNumber + 1, sourceColumn + 1);
 }
 
 void toFramesVector(v8::Local<v8::StackTrace> stackTrace,
                     std::vector<V8StackTraceImpl::Frame>& frames,
-                    size_t maxStackSize, v8::Isolate* isolate) {
+                    size_t maxStackSize, v8::Isolate* isolate,
+                    V8Debugger* debugger, int contextGroupId) {
   DCHECK(isolate->InContext());
   int frameCount = stackTrace->GetFrameCount();
   if (frameCount > static_cast<int>(maxStackSize))
     frameCount = static_cast<int>(maxStackSize);
+  WasmTranslation* wasmTranslation =
+      debugger ? debugger->wasmTranslation() : nullptr;
   for (int i = 0; i < frameCount; i++) {
     v8::Local<v8::StackFrame> stackFrame = stackTrace->GetFrame(i);
-    frames.push_back(toFrame(stackFrame));
+    frames.push_back(toFrame(stackFrame, wasmTranslation, contextGroupId));
   }
 }
 
@@ -113,7 +121,8 @@
   v8::HandleScope scope(isolate);
   std::vector<V8StackTraceImpl::Frame> frames;
   if (!stackTrace.IsEmpty())
-    toFramesVector(stackTrace, frames, maxStackSize, isolate);
+    toFramesVector(stackTrace, frames, maxStackSize, isolate, debugger,
+                   contextGroupId);
 
   int maxAsyncCallChainDepth = 1;
   V8StackTraceImpl* asyncCallChain = nullptr;
@@ -131,10 +140,13 @@
     maxAsyncCallChainDepth = 1;
   }
 
-  // Only the top stack in the chain may be empty, so ensure that second stack
-  // is non-empty (it's the top of appended chain).
-  if (asyncCallChain && asyncCallChain->isEmpty())
+  // Only the top stack in the chain may be empty and doesn't contain creation
+  // stack , so ensure that second stack is non-empty (it's the top of appended
+  // chain).
+  if (asyncCallChain && asyncCallChain->isEmpty() &&
+      !asyncCallChain->m_creation) {
     asyncCallChain = asyncCallChain->m_parent.get();
+  }
 
   if (stackTrace.IsEmpty() && !asyncCallChain) return nullptr;
 
@@ -161,12 +173,6 @@
   v8::HandleScope handleScope(isolate);
   v8::Local<v8::StackTrace> stackTrace;
   if (isolate->InContext()) {
-    if (debugger) {
-      V8InspectorImpl* inspector = debugger->inspector();
-      V8ProfilerAgentImpl* profilerAgent =
-          inspector->enabledProfilerAgentForGroup(contextGroupId);
-      if (profilerAgent) profilerAgent->collectSample();
-    }
     stackTrace = v8::StackTrace::CurrentStackTrace(
         isolate, static_cast<int>(maxStackSize), stackTraceOptions);
   }
@@ -176,16 +182,18 @@
 
 std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::cloneImpl() {
   std::vector<Frame> framesCopy(m_frames);
-  return wrapUnique(
+  std::unique_ptr<V8StackTraceImpl> copy(
       new V8StackTraceImpl(m_contextGroupId, m_description, framesCopy,
                            m_parent ? m_parent->cloneImpl() : nullptr));
+  if (m_creation) copy->setCreation(m_creation->cloneImpl());
+  return copy;
 }
 
 std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
   std::vector<Frame> frames;
   for (size_t i = 0; i < m_frames.size(); i++)
     frames.push_back(m_frames.at(i).clone());
-  return wrapUnique(
+  return std::unique_ptr<V8StackTraceImpl>(
       new V8StackTraceImpl(m_contextGroupId, m_description, frames, nullptr));
 }
 
@@ -201,6 +209,19 @@
 
 V8StackTraceImpl::~V8StackTraceImpl() {}
 
+void V8StackTraceImpl::setCreation(std::unique_ptr<V8StackTraceImpl> creation) {
+  m_creation = std::move(creation);
+  // When async call chain is empty but doesn't contain useful schedule stack
+  // and parent async call chain contains creationg stack but doesn't
+  // synchronous we can merge them together.
+  // e.g. Promise ThenableJob.
+  if (m_parent && isEmpty() && m_description == m_parent->m_description &&
+      !m_parent->m_creation) {
+    m_frames.swap(m_parent->m_frames);
+    m_parent = std::move(m_parent->m_parent);
+  }
+}
+
 StringView V8StackTraceImpl::topSourceURL() const {
   DCHECK(m_frames.size());
   return toStringView(m_frames[0].m_scriptName);
@@ -239,6 +260,10 @@
           .build();
   if (!m_description.isEmpty()) stackTrace->setDescription(m_description);
   if (m_parent) stackTrace->setParent(m_parent->buildInspectorObjectImpl());
+  if (m_creation && m_creation->m_frames.size()) {
+    stackTrace->setPromiseCreationFrame(
+        m_creation->m_frames[0].buildInspectorObject());
+  }
   return stackTrace;
 }
 
diff --git a/src/inspector/v8-stack-trace-impl.h b/src/inspector/v8-stack-trace-impl.h
index f0a452e..f8b53d0 100644
--- a/src/inspector/v8-stack-trace-impl.h
+++ b/src/inspector/v8-stack-trace-impl.h
@@ -81,6 +81,8 @@
       const override;
   std::unique_ptr<StringBuffer> toString() const override;
 
+  void setCreation(std::unique_ptr<V8StackTraceImpl> creation);
+
  private:
   V8StackTraceImpl(int contextGroupId, const String16& description,
                    std::vector<Frame>& frames,
@@ -90,6 +92,7 @@
   String16 m_description;
   std::vector<Frame> m_frames;
   std::unique_ptr<V8StackTraceImpl> m_parent;
+  std::unique_ptr<V8StackTraceImpl> m_creation;
 
   DISALLOW_COPY_AND_ASSIGN(V8StackTraceImpl);
 };
diff --git a/src/inspector/wasm-translation.cc b/src/inspector/wasm-translation.cc
new file mode 100644
index 0000000..00f1aab
--- /dev/null
+++ b/src/inspector/wasm-translation.cc
@@ -0,0 +1,327 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/wasm-translation.h"
+
+#include <algorithm>
+
+#include "src/debug/debug-interface.h"
+#include "src/inspector/protocol/Debugger.h"
+#include "src/inspector/script-breakpoint.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger-script.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+
+using namespace v8_inspector;
+using namespace v8;
+
+class WasmTranslation::TranslatorImpl {
+ public:
+  struct TransLocation {
+    WasmTranslation* translation;
+    String16 script_id;
+    int line;
+    int column;
+    TransLocation(WasmTranslation* translation, String16 script_id, int line,
+                  int column)
+        : translation(translation),
+          script_id(script_id),
+          line(line),
+          column(column) {}
+  };
+
+  virtual void Init(Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) = 0;
+  virtual void Translate(TransLocation*) = 0;
+  virtual void TranslateBack(TransLocation*) = 0;
+  virtual ~TranslatorImpl() {}
+
+  class RawTranslator;
+  class DisassemblingTranslator;
+};
+
+class WasmTranslation::TranslatorImpl::RawTranslator
+    : public WasmTranslation::TranslatorImpl {
+ public:
+  void Init(Isolate*, WasmTranslation*, V8DebuggerAgentImpl*) {}
+  void Translate(TransLocation*) {}
+  void TranslateBack(TransLocation*) {}
+};
+
+class WasmTranslation::TranslatorImpl::DisassemblingTranslator
+    : public WasmTranslation::TranslatorImpl {
+  using OffsetTable = debug::WasmDisassembly::OffsetTable;
+
+ public:
+  DisassemblingTranslator(Isolate* isolate, Local<debug::WasmScript> script)
+      : script_(isolate, script) {}
+
+  void Init(Isolate* isolate, WasmTranslation* translation,
+            V8DebuggerAgentImpl* agent) override {
+    // Register fake scripts for each function in this wasm module/script.
+    Local<debug::WasmScript> script = script_.Get(isolate);
+    int num_functions = script->NumFunctions();
+    int num_imported_functions = script->NumImportedFunctions();
+    DCHECK_LE(0, num_imported_functions);
+    DCHECK_LE(0, num_functions);
+    DCHECK_GE(num_functions, num_imported_functions);
+    String16 script_id = String16::fromInteger(script->Id());
+    for (int func_idx = num_imported_functions; func_idx < num_functions;
+         ++func_idx) {
+      AddFakeScript(isolate, script_id, func_idx, translation, agent);
+    }
+  }
+
+  void Translate(TransLocation* loc) override {
+    const OffsetTable& offset_table = GetOffsetTable(loc);
+    DCHECK(!offset_table.empty());
+    uint32_t byte_offset = static_cast<uint32_t>(loc->column);
+
+    // Binary search for the given offset.
+    unsigned left = 0;                                            // inclusive
+    unsigned right = static_cast<unsigned>(offset_table.size());  // exclusive
+    while (right - left > 1) {
+      unsigned mid = (left + right) / 2;
+      if (offset_table[mid].byte_offset <= byte_offset) {
+        left = mid;
+      } else {
+        right = mid;
+      }
+    }
+
+    loc->script_id = GetFakeScriptId(loc);
+    if (offset_table[left].byte_offset == byte_offset) {
+      loc->line = offset_table[left].line;
+      loc->column = offset_table[left].column;
+    } else {
+      loc->line = 0;
+      loc->column = 0;
+    }
+  }
+
+  void TranslateBack(TransLocation* loc) override {
+    int func_index = GetFunctionIndexFromFakeScriptId(loc->script_id);
+    const OffsetTable* reverse_table = GetReverseTable(func_index);
+    if (!reverse_table) return;
+    DCHECK(!reverse_table->empty());
+    v8::Isolate* isolate = loc->translation->isolate_;
+
+    // Binary search for the given line and column.
+    unsigned left = 0;                                              // inclusive
+    unsigned right = static_cast<unsigned>(reverse_table->size());  // exclusive
+    while (right - left > 1) {
+      unsigned mid = (left + right) / 2;
+      auto& entry = (*reverse_table)[mid];
+      if (entry.line < loc->line ||
+          (entry.line == loc->line && entry.column <= loc->column)) {
+        left = mid;
+      } else {
+        right = mid;
+      }
+    }
+
+    int found_byte_offset = 0;
+    // If we found an exact match, use it. Otherwise check whether the next
+    // bigger entry is still in the same line. Report that one then.
+    // Otherwise we might have hit the special case of pointing after the last
+    // line, which is translated to the end of the function (one byte after the
+    // last function byte).
+    if ((*reverse_table)[left].line == loc->line &&
+        (*reverse_table)[left].column == loc->column) {
+      found_byte_offset = (*reverse_table)[left].byte_offset;
+    } else if (left + 1 < reverse_table->size() &&
+               (*reverse_table)[left + 1].line == loc->line) {
+      found_byte_offset = (*reverse_table)[left + 1].byte_offset;
+    } else if (left == reverse_table->size() - 1 &&
+               (*reverse_table)[left].line == loc->line - 1 &&
+               loc->column == 0) {
+      std::pair<int, int> func_range =
+          script_.Get(isolate)->GetFunctionRange(func_index);
+      DCHECK_LE(func_range.first, func_range.second);
+      found_byte_offset = func_range.second - func_range.first;
+    }
+
+    loc->script_id = String16::fromInteger(script_.Get(isolate)->Id());
+    loc->line = func_index;
+    loc->column = found_byte_offset;
+  }
+
+ private:
+  String16 GetFakeScriptUrl(v8::Isolate* isolate, int func_index) {
+    Local<debug::WasmScript> script = script_.Get(isolate);
+    String16 script_name = toProtocolString(script->Name().ToLocalChecked());
+    int numFunctions = script->NumFunctions();
+    int numImported = script->NumImportedFunctions();
+    String16Builder builder;
+    builder.appendAll("wasm://wasm/", script_name, '/');
+    if (numFunctions - numImported > 300) {
+      size_t digits = String16::fromInteger(numFunctions - 1).length();
+      String16 thisCategory = String16::fromInteger((func_index / 100) * 100);
+      DCHECK_LE(thisCategory.length(), digits);
+      for (size_t i = thisCategory.length(); i < digits; ++i)
+        builder.append('0');
+      builder.appendAll(thisCategory, '/');
+    }
+    builder.appendAll(script_name, '-');
+    builder.appendNumber(func_index);
+    return builder.toString();
+  }
+
+  String16 GetFakeScriptId(const String16 script_id, int func_index) {
+    return String16::concat(script_id, '-', String16::fromInteger(func_index));
+  }
+  String16 GetFakeScriptId(const TransLocation* loc) {
+    return GetFakeScriptId(loc->script_id, loc->line);
+  }
+
+  void AddFakeScript(v8::Isolate* isolate, const String16& underlyingScriptId,
+                     int func_idx, WasmTranslation* translation,
+                     V8DebuggerAgentImpl* agent) {
+    String16 fake_script_id = GetFakeScriptId(underlyingScriptId, func_idx);
+    String16 fake_script_url = GetFakeScriptUrl(isolate, func_idx);
+
+    v8::Local<debug::WasmScript> script = script_.Get(isolate);
+    // TODO(clemensh): Generate disassembly lazily when queried by the frontend.
+    debug::WasmDisassembly disassembly = script->DisassembleFunction(func_idx);
+
+    DCHECK_EQ(0, offset_tables_.count(func_idx));
+    offset_tables_.insert(
+        std::make_pair(func_idx, std::move(disassembly.offset_table)));
+    String16 source(disassembly.disassembly.data(),
+                    disassembly.disassembly.length());
+    std::unique_ptr<V8DebuggerScript> fake_script =
+        V8DebuggerScript::CreateWasm(isolate, translation, script,
+                                     fake_script_id, std::move(fake_script_url),
+                                     source);
+
+    translation->AddFakeScript(fake_script->scriptId(), this);
+    agent->didParseSource(std::move(fake_script), true);
+  }
+
+  int GetFunctionIndexFromFakeScriptId(const String16& fake_script_id) {
+    size_t last_dash_pos = fake_script_id.reverseFind('-');
+    DCHECK_GT(fake_script_id.length(), last_dash_pos);
+    bool ok = true;
+    int func_index = fake_script_id.substring(last_dash_pos + 1).toInteger(&ok);
+    DCHECK(ok);
+    return func_index;
+  }
+
+  const OffsetTable& GetOffsetTable(const TransLocation* loc) {
+    int func_index = loc->line;
+    auto it = offset_tables_.find(func_index);
+    // TODO(clemensh): Once we load disassembly lazily, the offset table
+    // might not be there yet. Load it lazily then.
+    DCHECK(it != offset_tables_.end());
+    return it->second;
+  }
+
+  const OffsetTable* GetReverseTable(int func_index) {
+    auto it = reverse_tables_.find(func_index);
+    if (it != reverse_tables_.end()) return &it->second;
+
+    // Find offset table, copy and sort it to get reverse table.
+    it = offset_tables_.find(func_index);
+    if (it == offset_tables_.end()) return nullptr;
+
+    OffsetTable reverse_table = it->second;
+    // Order by line, column, then byte offset.
+    auto cmp = [](OffsetTable::value_type el1, OffsetTable::value_type el2) {
+      if (el1.line != el2.line) return el1.line < el2.line;
+      if (el1.column != el2.column) return el1.column < el2.column;
+      return el1.byte_offset < el2.byte_offset;
+    };
+    std::sort(reverse_table.begin(), reverse_table.end(), cmp);
+
+    auto inserted = reverse_tables_.insert(
+        std::make_pair(func_index, std::move(reverse_table)));
+    DCHECK(inserted.second);
+    return &inserted.first->second;
+  }
+
+  Global<debug::WasmScript> script_;
+
+  // We assume to only disassemble a subset of the functions, so store them in a
+  // map instead of an array.
+  std::unordered_map<int, const OffsetTable> offset_tables_;
+  std::unordered_map<int, const OffsetTable> reverse_tables_;
+};
+
+WasmTranslation::WasmTranslation(v8::Isolate* isolate)
+    : isolate_(isolate), mode_(Disassemble) {}
+
+WasmTranslation::~WasmTranslation() { Clear(); }
+
+void WasmTranslation::AddScript(Local<debug::WasmScript> script,
+                                V8DebuggerAgentImpl* agent) {
+  std::unique_ptr<TranslatorImpl> impl;
+  switch (mode_) {
+    case Raw:
+      impl.reset(new TranslatorImpl::RawTranslator());
+      break;
+    case Disassemble:
+      impl.reset(new TranslatorImpl::DisassemblingTranslator(isolate_, script));
+      break;
+  }
+  DCHECK(impl);
+  auto inserted =
+      wasm_translators_.insert(std::make_pair(script->Id(), std::move(impl)));
+  // Check that no mapping for this script id existed before.
+  DCHECK(inserted.second);
+  // impl has been moved, use the returned iterator to call Init.
+  inserted.first->second->Init(isolate_, this, agent);
+}
+
+void WasmTranslation::Clear() {
+  wasm_translators_.clear();
+  fake_scripts_.clear();
+}
+
+// Translation "forward" (to artificial scripts).
+bool WasmTranslation::TranslateWasmScriptLocationToProtocolLocation(
+    String16* script_id, int* line_number, int* column_number) {
+  DCHECK(script_id && line_number && column_number);
+  bool ok = true;
+  int script_id_int = script_id->toInteger(&ok);
+  if (!ok) return false;
+
+  auto it = wasm_translators_.find(script_id_int);
+  if (it == wasm_translators_.end()) return false;
+  TranslatorImpl* translator = it->second.get();
+
+  TranslatorImpl::TransLocation trans_loc(this, std::move(*script_id),
+                                          *line_number, *column_number);
+  translator->Translate(&trans_loc);
+
+  *script_id = std::move(trans_loc.script_id);
+  *line_number = trans_loc.line;
+  *column_number = trans_loc.column;
+
+  return true;
+}
+
+// Translation "backward" (from artificial to real scripts).
+bool WasmTranslation::TranslateProtocolLocationToWasmScriptLocation(
+    String16* script_id, int* line_number, int* column_number) {
+  auto it = fake_scripts_.find(*script_id);
+  if (it == fake_scripts_.end()) return false;
+  TranslatorImpl* translator = it->second;
+
+  TranslatorImpl::TransLocation trans_loc(this, std::move(*script_id),
+                                          *line_number, *column_number);
+  translator->TranslateBack(&trans_loc);
+
+  *script_id = std::move(trans_loc.script_id);
+  *line_number = trans_loc.line;
+  *column_number = trans_loc.column;
+
+  return true;
+}
+
+void WasmTranslation::AddFakeScript(const String16& scriptId,
+                                    TranslatorImpl* translator) {
+  DCHECK_EQ(0, fake_scripts_.count(scriptId));
+  fake_scripts_.insert(std::make_pair(scriptId, translator));
+}
diff --git a/src/inspector/wasm-translation.h b/src/inspector/wasm-translation.h
new file mode 100644
index 0000000..2162ede
--- /dev/null
+++ b/src/inspector/wasm-translation.h
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_WASMTRANSLATION_H_
+#define V8_INSPECTOR_WASMTRANSLATION_H_
+
+#include <unordered_map>
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/debug/debug-interface.h"
+#include "src/inspector/string-16.h"
+
+namespace v8_inspector {
+
+// Forward declarations.
+class V8DebuggerAgentImpl;
+
+class WasmTranslation {
+ public:
+  enum Mode { Raw, Disassemble };
+
+  explicit WasmTranslation(v8::Isolate* isolate);
+  ~WasmTranslation();
+
+  // Set translation mode.
+  void SetMode(Mode mode) { mode_ = mode; }
+
+  // Make a wasm script known to the translation. This will trigger a number of
+  // didParseScript calls to the given debugger agent.
+  // Only locations referencing a registered script will be translated by the
+  // Translate functions below.
+  void AddScript(v8::Local<v8::debug::WasmScript> script,
+                 V8DebuggerAgentImpl* agent);
+
+  // Clear all registered scripts.
+  void Clear();
+
+  // Translate a location as generated by V8 to a location that should be sent
+  // over protocol.
+  // Does nothing for locations referencing a script which was not registered
+  // before via AddScript.
+  // Line and column are 0-based.
+  // Returns true if the location was translated, false otherwise.
+  bool TranslateWasmScriptLocationToProtocolLocation(String16* script_id,
+                                                     int* line_number,
+                                                     int* column_number);
+
+  // Translate back from protocol locations (potentially referencing artificial
+  // scripts for individual wasm functions) to locations that make sense to V8.
+  // Does nothing if the location was not generated by the translate method
+  // above.
+  // Returns true if the location was translated, false otherwise.
+  bool TranslateProtocolLocationToWasmScriptLocation(String16* script_id,
+                                                     int* line_number,
+                                                     int* column_number);
+
+ private:
+  class TranslatorImpl;
+  friend class TranslatorImpl;
+
+  void AddFakeScript(const String16& scriptId, TranslatorImpl* translator);
+
+  v8::Isolate* isolate_;
+  std::unordered_map<int, std::unique_ptr<TranslatorImpl>> wasm_translators_;
+  std::unordered_map<String16, TranslatorImpl*> fake_scripts_;
+  Mode mode_;
+
+  DISALLOW_COPY_AND_ASSIGN(WasmTranslation);
+};
+
+}  // namespace v8_inspector
+
+#endif  // V8_INSPECTOR_WASMTRANSLATION_H_
diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc
index d14b1a1..d77b137 100644
--- a/src/interface-descriptors.cc
+++ b/src/interface-descriptors.cc
@@ -74,6 +74,30 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {TargetRegister(), NewTargetRegister()};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+const Register FastNewObjectDescriptor::TargetRegister() {
+  return kJSFunctionRegister;
+}
+
+const Register FastNewObjectDescriptor::NewTargetRegister() {
+  return kJavaScriptCallNewTargetRegister;
+}
+
+void FastNewArgumentsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {TargetRegister()};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+const Register FastNewArgumentsDescriptor::TargetRegister() {
+  return kJSFunctionRegister;
+}
+
 void LoadDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
   // kReceiver, kName, kSlot
@@ -90,24 +114,41 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void LoadFieldDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kReceiver, kSmiHandler
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
+void LoadFieldDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ReceiverRegister(), SmiHandlerRegister()};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 void LoadGlobalDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
-  // kSlot
-  MachineType machine_types[] = {MachineType::TaggedSigned()};
+  // kName, kSlot
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::TaggedSigned()};
   data->InitializePlatformIndependent(arraysize(machine_types), 0,
                                       machine_types);
 }
 
 void LoadGlobalDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {LoadWithVectorDescriptor::SlotRegister()};
+  Register registers[] = {NameRegister(), SlotRegister()};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
 void LoadGlobalWithVectorDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
-  // kSlot, kVector
-  MachineType machine_types[] = {MachineType::TaggedSigned(),
+  // kName, kSlot, kVector
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::TaggedSigned(),
                                  MachineType::AnyTagged()};
   data->InitializePlatformIndependent(arraysize(machine_types), 0,
                                       machine_types);
@@ -115,8 +156,7 @@
 
 void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {LoadWithVectorDescriptor::SlotRegister(),
-                          LoadWithVectorDescriptor::VectorRegister()};
+  Register registers[] = {NameRegister(), SlotRegister(), VectorRegister()};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -183,6 +223,35 @@
   data->InitializePlatformSpecific(len, registers);
 }
 
+void StringCharAtDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kReceiver, kPosition
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::IntPtr()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
+void StringCharAtDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
+void StringCharCodeAtDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kReceiver, kPosition
+  // TODO(turbofan): Allow builtins to return untagged values.
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::IntPtr()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
+void StringCharCodeAtDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
 void StringCompareDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {LeftRegister(), RightRegister()};
@@ -207,6 +276,19 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+const Register LoadFieldDescriptor::ReceiverRegister() {
+  // Reuse the register from the LoadDescriptor, since given the
+  // LoadFieldDescriptor's usage, it doesn't matter exactly which registers are
+  // used to pass parameters in.
+  return LoadDescriptor::ReceiverRegister();
+}
+const Register LoadFieldDescriptor::SmiHandlerRegister() {
+  // Reuse the register from the LoadDescriptor, since given the
+  // LoadFieldDescriptor's usage, it doesn't matter exactly which registers are
+  // used to pass parameters in.
+  return LoadDescriptor::NameRegister();
+}
+
 void LoadWithVectorDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
   // kReceiver, kName, kSlot, kVector
@@ -293,6 +375,18 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void NewArgumentsElementsDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  MachineType const kMachineTypes[] = {MachineType::IntPtr()};
+  data->InitializePlatformIndependent(arraysize(kMachineTypes), 0,
+                                      kMachineTypes);
+}
+
+void NewArgumentsElementsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  DefaultInitializePlatformSpecific(data, 1);
+}
+
 void VarArgFunctionDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
   // kActualArgumentsCount
@@ -349,6 +443,15 @@
                                       machine_types);
 }
 
+void CallForwardVarargsDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kStartIndex
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::Int32()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
 void ConstructStubDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
   // kFunction, kNewTarget, kActualArgumentsCount, kAllocationSite
@@ -368,36 +471,38 @@
                                       machine_types);
 }
 
-void CallFunctionWithFeedbackDescriptor::InitializePlatformIndependent(
+void CallICDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
-  // kFunction, kSlot
-  MachineType machine_types[] = {MachineType::AnyTagged(),
-                                 MachineType::TaggedSigned()};
+  // kTarget, kActualArgumentsCount, kSlot, kVector
+  MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
+                                 MachineType::Int32(),
+                                 MachineType::AnyTagged()};
   data->InitializePlatformIndependent(arraysize(machine_types), 0,
                                       machine_types);
 }
 
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformIndependent(
+void CallICTrampolineDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
-  // kFunction, kActualArgumentsCount, kSlot, kVector
-  MachineType machine_types[] = {
-      MachineType::TaggedPointer(), MachineType::Int32(),
-      MachineType::TaggedSigned(), MachineType::AnyTagged()};
+  // kTarget, kActualArgumentsCount, kSlot
+  MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
+                                 MachineType::Int32()};
   data->InitializePlatformIndependent(arraysize(machine_types), 0,
                                       machine_types);
 }
 
 void BuiltinDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
-  MachineType machine_types[] = {MachineType::AnyTagged(),
-                                 MachineType::Int32()};
+  // kTarget, kNewTarget, kArgumentsCount
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32()};
   data->InitializePlatformIndependent(arraysize(machine_types), 0,
                                       machine_types);
 }
 
 void BuiltinDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {NewTargetRegister(), ArgumentsCountRegister()};
+  Register registers[] = {TargetRegister(), NewTargetRegister(),
+                          ArgumentsCountRegister()};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -408,6 +513,20 @@
   return kJavaScriptCallNewTargetRegister;
 }
 
+const Register BuiltinDescriptor::TargetRegister() {
+  return kJSFunctionRegister;
+}
+
+void ArrayConstructorDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+  MachineType machine_types[] = {MachineType::AnyTagged(),
+                                 MachineType::AnyTagged(), MachineType::Int32(),
+                                 MachineType::AnyTagged()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
   // kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter
@@ -432,9 +551,8 @@
 void ArrayNArgumentsConstructorDescriptor::InitializePlatformIndependent(
     CallInterfaceDescriptorData* data) {
   // kFunction, kAllocationSite, kActualArgumentsCount
-  MachineType machine_types[] = {MachineType::TaggedPointer(),
-                                 MachineType::AnyTagged(),
-                                 MachineType::Int32()};
+  MachineType machine_types[] = {
+      MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32()};
   data->InitializePlatformIndependent(arraysize(machine_types), 0,
                                       machine_types);
 }
@@ -464,7 +582,7 @@
   // kAccumulator, kBytecodeOffset, kBytecodeArray, kDispatchTable
   MachineType machine_types[] = {
       MachineType::AnyTagged(), MachineType::IntPtr(), MachineType::AnyTagged(),
-      MachineType::AnyTagged()};
+      MachineType::IntPtr()};
   data->InitializePlatformIndependent(arraysize(machine_types), 0,
                                       machine_types);
 }
@@ -508,5 +626,13 @@
                                       machine_types);
 }
 
+void FrameDropperTrampolineDescriptor::InitializePlatformIndependent(
+    CallInterfaceDescriptorData* data) {
+  // New FP value.
+  MachineType machine_types[] = {MachineType::Pointer()};
+  data->InitializePlatformIndependent(arraysize(machine_types), 0,
+                                      machine_types);
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h
index 3b49041..4cf5c29 100644
--- a/src/interface-descriptors.h
+++ b/src/interface-descriptors.h
@@ -21,6 +21,7 @@
   V(ContextOnly)                          \
   V(Load)                                 \
   V(LoadWithVector)                       \
+  V(LoadField)                            \
   V(LoadICProtoArray)                     \
   V(LoadGlobal)                           \
   V(LoadGlobalWithVector)                 \
@@ -32,9 +33,7 @@
   V(FastNewClosure)                       \
   V(FastNewFunctionContext)               \
   V(FastNewObject)                        \
-  V(FastNewRestParameter)                 \
-  V(FastNewSloppyArguments)               \
-  V(FastNewStrictArguments)               \
+  V(FastNewArguments)                     \
   V(TypeConversion)                       \
   V(Typeof)                               \
   V(FastCloneRegExp)                      \
@@ -43,27 +42,22 @@
   V(CreateAllocationSite)                 \
   V(CreateWeakCell)                       \
   V(CallFunction)                         \
-  V(CallFunctionWithFeedback)             \
-  V(CallFunctionWithFeedbackAndVector)    \
+  V(CallIC)                               \
+  V(CallICTrampoline)                     \
+  V(CallForwardVarargs)                   \
   V(CallConstruct)                        \
   V(CallTrampoline)                       \
   V(ConstructStub)                        \
   V(ConstructTrampoline)                  \
   V(RegExpExec)                           \
+  V(RegExpReplace)                        \
+  V(RegExpSplit)                          \
   V(CopyFastSmiOrObjectElements)          \
   V(TransitionElementsKind)               \
   V(AllocateHeapNumber)                   \
-  V(AllocateFloat32x4)                    \
-  V(AllocateInt32x4)                      \
-  V(AllocateUint32x4)                     \
-  V(AllocateBool32x4)                     \
-  V(AllocateInt16x8)                      \
-  V(AllocateUint16x8)                     \
-  V(AllocateBool16x8)                     \
-  V(AllocateInt8x16)                      \
-  V(AllocateUint8x16)                     \
-  V(AllocateBool8x16)                     \
   V(Builtin)                              \
+  V(ArrayConstructor)                     \
+  V(ForEach)                              \
   V(ArrayNoArgumentConstructor)           \
   V(ArraySingleArgumentConstructor)       \
   V(ArrayNArgumentsConstructor)           \
@@ -73,12 +67,18 @@
   V(BinaryOpWithVector)                   \
   V(CountOp)                              \
   V(StringAdd)                            \
+  V(StringCharAt)                         \
+  V(StringCharCodeAt)                     \
   V(StringCompare)                        \
+  V(StringIndexOf)                        \
   V(SubString)                            \
   V(Keyed)                                \
   V(Named)                                \
+  V(CreateIterResultObject)               \
   V(HasProperty)                          \
   V(ForInFilter)                          \
+  V(ForInNext)                            \
+  V(ForInPrepare)                         \
   V(GetProperty)                          \
   V(CallHandler)                          \
   V(ArgumentAdaptor)                      \
@@ -87,12 +87,16 @@
   V(MathPowTagged)                        \
   V(MathPowInteger)                       \
   V(GrowArrayElements)                    \
+  V(NewArgumentsElements)                 \
   V(InterpreterDispatch)                  \
   V(InterpreterPushArgsAndCall)           \
   V(InterpreterPushArgsAndConstruct)      \
   V(InterpreterPushArgsAndConstructArray) \
   V(InterpreterCEntry)                    \
-  V(ResumeGenerator)
+  V(ResumeGenerator)                      \
+  V(FrameDropperTrampoline)               \
+  V(PromiseHandleReject)                  \
+  V(WasmRuntimeCall)
 
 class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
  public:
@@ -157,8 +161,7 @@
   };
 };
 
-
-class CallInterfaceDescriptor {
+class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
  public:
   CallInterfaceDescriptor() : data_(NULL) {}
   virtual ~CallInterfaceDescriptor() {}
@@ -283,6 +286,41 @@
     kContext = kParameterCount /* implicit parameter */ \
   };
 
+#define DECLARE_BUILTIN_DESCRIPTOR(name)                                \
+  DECLARE_DESCRIPTOR_WITH_BASE(name, BuiltinDescriptor)                 \
+ protected:                                                             \
+  void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+      override {                                                        \
+    MachineType machine_types[] = {MachineType::AnyTagged(),            \
+                                   MachineType::AnyTagged(),            \
+                                   MachineType::Int32()};               \
+    int argc = kStackParameterCount + 1 - arraysize(machine_types);     \
+    data->InitializePlatformIndependent(arraysize(machine_types), argc, \
+                                        machine_types);                 \
+  }                                                                     \
+  void InitializePlatformSpecific(CallInterfaceDescriptorData* data)    \
+      override {                                                        \
+    Register registers[] = {TargetRegister(), NewTargetRegister(),      \
+                            ArgumentsCountRegister()};                  \
+    data->InitializePlatformSpecific(arraysize(registers), registers);  \
+  }                                                                     \
+                                                                        \
+ public:
+
+#define DEFINE_BUILTIN_PARAMETERS(...)                             \
+  enum ParameterIndices {                                          \
+    kReceiver,                                                     \
+    kBeforeFirstStackParameter = kReceiver,                        \
+    __VA_ARGS__,                                                   \
+    kAfterLastStackParameter,                                      \
+    kNewTarget = kAfterLastStackParameter,                         \
+    kArgumentsCount,                                               \
+    kContext, /* implicit parameter */                             \
+    kParameterCount = kContext,                                    \
+    kStackParameterCount =                                         \
+        kAfterLastStackParameter - kBeforeFirstStackParameter - 1, \
+  };
+
 class VoidDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
@@ -305,12 +343,28 @@
   static const Register SlotRegister();
 };
 
+// LoadFieldDescriptor is used by the shared handler that loads a field from an
+// object based on the smi-encoded field description.
+class LoadFieldDescriptor : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kReceiver, kSmiHandler)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadFieldDescriptor,
+                                               CallInterfaceDescriptor)
+
+  static const Register ReceiverRegister();
+  static const Register SmiHandlerRegister();
+};
+
 class LoadGlobalDescriptor : public CallInterfaceDescriptor {
  public:
-  DEFINE_PARAMETERS(kSlot)
+  DEFINE_PARAMETERS(kName, kSlot)
   DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalDescriptor,
                                                CallInterfaceDescriptor)
 
+  static const Register NameRegister() {
+    return LoadDescriptor::NameRegister();
+  }
+
   static const Register SlotRegister() {
     return LoadDescriptor::SlotRegister();
   }
@@ -401,7 +455,7 @@
 
 class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
  public:
-  DEFINE_PARAMETERS(kSlot, kVector)
+  DEFINE_PARAMETERS(kName, kSlot, kVector)
   DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalWithVectorDescriptor,
                                                LoadGlobalDescriptor)
 
@@ -412,6 +466,7 @@
 
 class FastNewClosureDescriptor : public CallInterfaceDescriptor {
  public:
+  DEFINE_PARAMETERS(kSharedFunctionInfo, kVector, kSlot)
   DECLARE_DESCRIPTOR(FastNewClosureDescriptor, CallInterfaceDescriptor)
 };
 
@@ -427,24 +482,17 @@
 
 class FastNewObjectDescriptor : public CallInterfaceDescriptor {
  public:
+  DEFINE_PARAMETERS(kTarget, kNewTarget)
   DECLARE_DESCRIPTOR(FastNewObjectDescriptor, CallInterfaceDescriptor)
+  static const Register TargetRegister();
+  static const Register NewTargetRegister();
 };
 
-class FastNewRestParameterDescriptor : public CallInterfaceDescriptor {
+class FastNewArgumentsDescriptor : public CallInterfaceDescriptor {
  public:
-  DECLARE_DESCRIPTOR(FastNewRestParameterDescriptor, CallInterfaceDescriptor)
-};
-
-class FastNewSloppyArgumentsDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(FastNewSloppyArgumentsDescriptor,
-                     CallInterfaceDescriptor)
-};
-
-class FastNewStrictArgumentsDescriptor : public CallInterfaceDescriptor {
- public:
-  DECLARE_DESCRIPTOR(FastNewStrictArgumentsDescriptor,
-                     CallInterfaceDescriptor)
+  DEFINE_PARAMETERS(kFunction)
+  DECLARE_DESCRIPTOR(FastNewArgumentsDescriptor, CallInterfaceDescriptor)
+  static const Register TargetRegister();
 };
 
 class TypeConversionDescriptor final : public CallInterfaceDescriptor {
@@ -455,6 +503,13 @@
   static const Register ArgumentRegister();
 };
 
+class CreateIterResultObjectDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kValue, kDone)
+  DECLARE_DEFAULT_DESCRIPTOR(CreateIterResultObjectDescriptor,
+                             CallInterfaceDescriptor, kParameterCount)
+};
+
 class HasPropertyDescriptor final : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS(kKey, kObject)
@@ -469,6 +524,20 @@
                              kParameterCount)
 };
 
+class ForInNextDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kObject, kCacheArray, kCacheType, kIndex)
+  DECLARE_DEFAULT_DESCRIPTOR(ForInNextDescriptor, CallInterfaceDescriptor,
+                             kParameterCount)
+};
+
+class ForInPrepareDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kObject)
+  DECLARE_DEFAULT_DESCRIPTOR(ForInPrepareDescriptor, CallInterfaceDescriptor,
+                             kParameterCount)
+};
+
 class GetPropertyDescriptor final : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS(kObject, kKey)
@@ -528,6 +597,12 @@
                                                CallInterfaceDescriptor)
 };
 
+class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kTarget, kStartIndex)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallForwardVarargsDescriptor,
+                                               CallInterfaceDescriptor)
+};
 
 class ConstructStubDescriptor : public CallInterfaceDescriptor {
  public:
@@ -551,24 +626,20 @@
   DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
 };
 
-
-class CallFunctionWithFeedbackDescriptor : public CallInterfaceDescriptor {
+class CallICDescriptor : public CallInterfaceDescriptor {
  public:
-  DEFINE_PARAMETERS(kFunction, kSlot)
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
-      CallFunctionWithFeedbackDescriptor, CallInterfaceDescriptor)
+  DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kSlot, kVector)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallICDescriptor,
+                                               CallInterfaceDescriptor)
 };
 
-
-class CallFunctionWithFeedbackAndVectorDescriptor
-    : public CallInterfaceDescriptor {
+class CallICTrampolineDescriptor : public CallInterfaceDescriptor {
  public:
-  DEFINE_PARAMETERS(kFunction, kActualArgumentsCount, kSlot, kVector)
-  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
-      CallFunctionWithFeedbackAndVectorDescriptor, CallInterfaceDescriptor)
+  DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kSlot)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallICTrampolineDescriptor,
+                                               CallInterfaceDescriptor)
 };
 
-
 class CallConstructDescriptor : public CallInterfaceDescriptor {
  public:
   DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
@@ -581,6 +652,20 @@
                                      CallInterfaceDescriptor)
 };
 
+class RegExpReplaceDescriptor : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kReceiver, kString, kReplaceValue)
+  DECLARE_DEFAULT_DESCRIPTOR(RegExpReplaceDescriptor, CallInterfaceDescriptor,
+                             kParameterCount)
+};
+
+class RegExpSplitDescriptor : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kReceiver, kString, kLimit)
+  DECLARE_DEFAULT_DESCRIPTOR(RegExpSplitDescriptor, CallInterfaceDescriptor,
+                             kParameterCount)
+};
+
 class CopyFastSmiOrObjectElementsDescriptor : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS(kObject)
@@ -600,21 +685,28 @@
   DECLARE_DESCRIPTOR(AllocateHeapNumberDescriptor, CallInterfaceDescriptor)
 };
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type)         \
-  class Allocate##Type##Descriptor : public CallInterfaceDescriptor {       \
-   public:                                                                  \
-    DECLARE_DESCRIPTOR(Allocate##Type##Descriptor, CallInterfaceDescriptor) \
-  };
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
-
 class BuiltinDescriptor : public CallInterfaceDescriptor {
  public:
+  // TODO(ishell): Where is kFunction??
   DEFINE_PARAMETERS(kNewTarget, kArgumentsCount)
   DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(BuiltinDescriptor,
                                                CallInterfaceDescriptor)
   static const Register ArgumentsCountRegister();
   static const Register NewTargetRegister();
+  static const Register TargetRegister();
+};
+
+class ForEachDescriptor : public BuiltinDescriptor {
+ public:
+  DEFINE_BUILTIN_PARAMETERS(kCallback, kThisArg)
+  DECLARE_BUILTIN_DESCRIPTOR(ForEachDescriptor)
+};
+
+class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArrayConstructorDescriptor,
+                                               CallInterfaceDescriptor)
 };
 
 class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor {
@@ -681,6 +773,19 @@
   DECLARE_DESCRIPTOR(StringAddDescriptor, CallInterfaceDescriptor)
 };
 
+class StringCharAtDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kReceiver, kPosition)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringCharAtDescriptor,
+                                               CallInterfaceDescriptor)
+};
+
+class StringCharCodeAtDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kReceiver, kPosition)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringCharCodeAtDescriptor,
+                                               CallInterfaceDescriptor)
+};
 
 class StringCompareDescriptor : public CallInterfaceDescriptor {
  public:
@@ -698,6 +803,13 @@
                                      CallInterfaceDescriptor)
 };
 
+class StringIndexOfDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kReceiver, kSearchString, kPosition)
+  DECLARE_DEFAULT_DESCRIPTOR(StringIndexOfDescriptor, CallInterfaceDescriptor,
+                             kParameterCount)
+};
+
 // TODO(ishell): not used, remove.
 class KeyedDescriptor : public CallInterfaceDescriptor {
  public:
@@ -775,7 +887,15 @@
   static const Register KeyRegister();
 };
 
-class InterpreterDispatchDescriptor : public CallInterfaceDescriptor {
+class NewArgumentsElementsDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kFormalParameterCount)
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(NewArgumentsElementsDescriptor,
+                                               CallInterfaceDescriptor)
+};
+
+class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
+    : public CallInterfaceDescriptor {
  public:
   DEFINE_PARAMETERS(kAccumulator, kBytecodeOffset, kBytecodeArray,
                     kDispatchTable)
@@ -821,6 +941,24 @@
   DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
 };
 
+class FrameDropperTrampolineDescriptor final : public CallInterfaceDescriptor {
+  DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(FrameDropperTrampolineDescriptor,
+                                               CallInterfaceDescriptor)
+};
+
+class PromiseHandleRejectDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DEFINE_PARAMETERS(kPromise, kOnReject, kException)
+  DECLARE_DEFAULT_DESCRIPTOR(PromiseHandleRejectDescriptor,
+                             CallInterfaceDescriptor, kParameterCount)
+};
+
+class WasmRuntimeCallDescriptor final : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DEFAULT_DESCRIPTOR(WasmRuntimeCallDescriptor, CallInterfaceDescriptor,
+                             0)
+};
+
 #undef DECLARE_DESCRIPTOR_WITH_BASE
 #undef DECLARE_DESCRIPTOR
 #undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
diff --git a/src/interpreter/OWNERS b/src/interpreter/OWNERS
index 4e6a721..0f2165c 100644
--- a/src/interpreter/OWNERS
+++ b/src/interpreter/OWNERS
@@ -1,6 +1,7 @@
 set noparent
 
 bmeurer@chromium.org
+leszeks@chromium.org
 mstarzinger@chromium.org
 mythria@chromium.org
 rmcilroy@chromium.org
diff --git a/src/interpreter/bytecode-array-accessor.cc b/src/interpreter/bytecode-array-accessor.cc
new file mode 100644
index 0000000..cc67775
--- /dev/null
+++ b/src/interpreter/bytecode-array-accessor.cc
@@ -0,0 +1,208 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-accessor.h"
+
+#include "src/interpreter/bytecode-decoder.h"
+#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayAccessor::BytecodeArrayAccessor(
+    Handle<BytecodeArray> bytecode_array, int initial_offset)
+    : bytecode_array_(bytecode_array),
+      bytecode_offset_(initial_offset),
+      operand_scale_(OperandScale::kSingle),
+      prefix_offset_(0) {
+  UpdateOperandScale();
+}
+
+void BytecodeArrayAccessor::SetOffset(int offset) {
+  bytecode_offset_ = offset;
+  UpdateOperandScale();
+}
+
+void BytecodeArrayAccessor::UpdateOperandScale() {
+  if (OffsetInBounds()) {
+    uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
+    Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+    if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
+      operand_scale_ =
+          Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
+      prefix_offset_ = 1;
+    } else {
+      operand_scale_ = OperandScale::kSingle;
+      prefix_offset_ = 0;
+    }
+  }
+}
+
+bool BytecodeArrayAccessor::OffsetInBounds() const {
+  return bytecode_offset_ >= 0 && bytecode_offset_ < bytecode_array()->length();
+}
+
+Bytecode BytecodeArrayAccessor::current_bytecode() const {
+  DCHECK(OffsetInBounds());
+  uint8_t current_byte =
+      bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
+  Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+  DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
+  return current_bytecode;
+}
+
+int BytecodeArrayAccessor::current_bytecode_size() const {
+  return current_prefix_offset() +
+         Bytecodes::Size(current_bytecode(), current_operand_scale());
+}
+
+uint32_t BytecodeArrayAccessor::GetUnsignedOperand(
+    int operand_index, OperandType operand_type) const {
+  DCHECK_GE(operand_index, 0);
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+  DCHECK_EQ(operand_type,
+            Bytecodes::GetOperandType(current_bytecode(), operand_index));
+  DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+  const uint8_t* operand_start =
+      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+      current_prefix_offset() +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+                                  current_operand_scale());
+  return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
+                                                current_operand_scale());
+}
+
+int32_t BytecodeArrayAccessor::GetSignedOperand(
+    int operand_index, OperandType operand_type) const {
+  DCHECK_GE(operand_index, 0);
+  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+  DCHECK_EQ(operand_type,
+            Bytecodes::GetOperandType(current_bytecode(), operand_index));
+  DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+  const uint8_t* operand_start =
+      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+      current_prefix_offset() +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+                                  current_operand_scale());
+  return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
+                                              current_operand_scale());
+}
+
+uint32_t BytecodeArrayAccessor::GetFlagOperand(int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kFlag8);
+  return GetUnsignedOperand(operand_index, OperandType::kFlag8);
+}
+
+uint32_t BytecodeArrayAccessor::GetUnsignedImmediateOperand(
+    int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kUImm);
+  return GetUnsignedOperand(operand_index, OperandType::kUImm);
+}
+
+int32_t BytecodeArrayAccessor::GetImmediateOperand(int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kImm);
+  return GetSignedOperand(operand_index, OperandType::kImm);
+}
+
+uint32_t BytecodeArrayAccessor::GetRegisterCountOperand(
+    int operand_index) const {
+  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+            OperandType::kRegCount);
+  return GetUnsignedOperand(operand_index, OperandType::kRegCount);
+}
+
+uint32_t BytecodeArrayAccessor::GetIndexOperand(int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK_EQ(operand_type, OperandType::kIdx);
+  return GetUnsignedOperand(operand_index, operand_type);
+}
+
+Register BytecodeArrayAccessor::GetRegisterOperand(int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  const uint8_t* operand_start =
+      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+      current_prefix_offset() +
+      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+                                  current_operand_scale());
+  return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
+                                                current_operand_scale());
+}
+
+int BytecodeArrayAccessor::GetRegisterOperandRange(int operand_index) const {
+  DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+  const OperandType* operand_types =
+      Bytecodes::GetOperandTypes(current_bytecode());
+  OperandType operand_type = operand_types[operand_index];
+  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+  if (operand_type == OperandType::kRegList) {
+    return GetRegisterCountOperand(operand_index + 1);
+  } else {
+    return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
+  }
+}
+
+Runtime::FunctionId BytecodeArrayAccessor::GetRuntimeIdOperand(
+    int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(operand_type == OperandType::kRuntimeId);
+  uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+  return static_cast<Runtime::FunctionId>(raw_id);
+}
+
+Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
+    int operand_index) const {
+  OperandType operand_type =
+      Bytecodes::GetOperandType(current_bytecode(), operand_index);
+  DCHECK(operand_type == OperandType::kIntrinsicId);
+  uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+  return IntrinsicsHelper::ToRuntimeId(
+      static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
+}
+
+Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
+    int operand_index) const {
+  return FixedArray::get(bytecode_array()->constant_pool(),
+                         GetIndexOperand(operand_index),
+                         bytecode_array()->GetIsolate());
+}
+
+int BytecodeArrayAccessor::GetJumpTargetOffset() const {
+  Bytecode bytecode = current_bytecode();
+  if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
+    int relative_offset = GetUnsignedImmediateOperand(0);
+    if (bytecode == Bytecode::kJumpLoop) {
+      relative_offset = -relative_offset;
+    }
+    return current_offset() + relative_offset + current_prefix_offset();
+  } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
+    Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
+    return current_offset() + smi->value() + current_prefix_offset();
+  } else {
+    UNREACHABLE();
+    return kMinInt;
+  }
+}
+
+bool BytecodeArrayAccessor::OffsetWithinBytecode(int offset) const {
+  return current_offset() <= offset &&
+         offset < current_offset() + current_bytecode_size();
+}
+
+std::ostream& BytecodeArrayAccessor::PrintTo(std::ostream& os) const {
+  return BytecodeDecoder::Decode(
+      os, bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_,
+      bytecode_array()->parameter_count());
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-array-accessor.h b/src/interpreter/bytecode-array-accessor.h
new file mode 100644
index 0000000..e5a24f3
--- /dev/null
+++ b/src/interpreter/bytecode-array-accessor.h
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/objects.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
+ public:
+  BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
+                        int initial_offset);
+
+  void SetOffset(int offset);
+
+  Bytecode current_bytecode() const;
+  int current_bytecode_size() const;
+  int current_offset() const { return bytecode_offset_; }
+  OperandScale current_operand_scale() const { return operand_scale_; }
+  int current_prefix_offset() const { return prefix_offset_; }
+  const Handle<BytecodeArray>& bytecode_array() const {
+    return bytecode_array_;
+  }
+
+  uint32_t GetFlagOperand(int operand_index) const;
+  uint32_t GetUnsignedImmediateOperand(int operand_index) const;
+  int32_t GetImmediateOperand(int operand_index) const;
+  uint32_t GetIndexOperand(int operand_index) const;
+  uint32_t GetRegisterCountOperand(int operand_index) const;
+  Register GetRegisterOperand(int operand_index) const;
+  int GetRegisterOperandRange(int operand_index) const;
+  Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
+  Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
+  Handle<Object> GetConstantForIndexOperand(int operand_index) const;
+
+  // Returns the absolute offset of the branch target at the current
+  // bytecode. It is an error to call this method if the bytecode is
+  // not for a jump or conditional jump.
+  int GetJumpTargetOffset() const;
+
+  bool OffsetWithinBytecode(int offset) const;
+
+  std::ostream& PrintTo(std::ostream& os) const;
+
+ private:
+  bool OffsetInBounds() const;
+
+  uint32_t GetUnsignedOperand(int operand_index,
+                              OperandType operand_type) const;
+  int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
+
+  void UpdateOperandScale();
+
+  Handle<BytecodeArray> bytecode_array_;
+  int bytecode_offset_;
+  OperandScale operand_scale_;
+  int prefix_offset_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeArrayAccessor);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_GRAPH_ACCESSOR_H_
diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc
index 904a8e0..c327fb7 100644
--- a/src/interpreter/bytecode-array-builder.cc
+++ b/src/interpreter/bytecode-array-builder.cc
@@ -11,6 +11,7 @@
 #include "src/interpreter/bytecode-peephole-optimizer.h"
 #include "src/interpreter/bytecode-register-optimizer.h"
 #include "src/interpreter/interpreter-intrinsics.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -21,8 +22,9 @@
     int locals_count, FunctionLiteral* literal,
     SourcePositionTableBuilder::RecordingMode source_position_mode)
     : zone_(zone),
+      literal_(literal),
       bytecode_generated_(false),
-      constant_array_builder_(zone, isolate->factory()->the_hole_value()),
+      constant_array_builder_(zone),
       handler_table_builder_(zone),
       return_seen_in_block_(false),
       parameter_count_(parameter_count),
@@ -69,6 +71,12 @@
   return Register::FromParameterIndex(parameter_index, parameter_count());
 }
 
+Register BytecodeArrayBuilder::Local(int index) const {
+  // TODO(marja): Make a DCHECK once crbug.com/706234 is fixed.
+  CHECK_LT(index, locals_count());
+  return Register(index);
+}
+
 Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
   DCHECK(return_seen_in_block_);
   DCHECK(!bytecode_generated_);
@@ -143,7 +151,8 @@
   template <>                                      \
   class OperandHelper<OperandType::k##Name>        \
       : public UnsignedOperandHelper<Type> {};
-UNSIGNED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
+UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
+UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
 #undef DEFINE_UNSIGNED_OPERAND_HELPER
 
 template <>
@@ -211,14 +220,15 @@
 
 }  // namespace
 
-template <OperandType... operand_types>
+template <Bytecode bytecode, AccumulatorUse accumulator_use,
+          OperandType... operand_types>
 class BytecodeNodeBuilder {
  public:
   template <typename... Operands>
   INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
                                   BytecodeSourceInfo source_info,
-                                  Bytecode bytecode, Operands... operands)) {
-    builder->PrepareToOutputBytecode(bytecode);
+                                  Operands... operands)) {
+    builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
     // The "OperandHelper<operand_types>::Convert(builder, operands)..." will
     // expand both the OperandType... and Operands... parameter packs e.g. for:
     //   BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
@@ -226,30 +236,34 @@
     // the code will expand into:
     //    OperandHelper<OperandType::kReg>::Convert(builder, reg),
     //    OperandHelper<OperandType::kImm>::Convert(builder, immediate),
-    return BytecodeNode(
-        bytecode, OperandHelper<operand_types>::Convert(builder, operands)...,
-        source_info);
+    return BytecodeNode::Create<bytecode, accumulator_use, operand_types...>(
+        source_info,
+        OperandHelper<operand_types>::Convert(builder, operands)...);
   }
 };
 
-#define DEFINE_BYTECODE_OUTPUT(name, accumulator_use, ...)                 \
-  template <typename... Operands>                                          \
-  void BytecodeArrayBuilder::Output##name(Operands... operands) {          \
-    BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
-        this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
-        operands...));                                                     \
-    pipeline()->Write(&node);                                              \
-  }                                                                        \
-                                                                           \
-  template <typename... Operands>                                          \
-  void BytecodeArrayBuilder::Output##name(BytecodeLabel* label,            \
-                                          Operands... operands) {          \
-    DCHECK(Bytecodes::IsJump(Bytecode::k##name));                          \
-    BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
-        this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
-        operands...));                                                     \
-    pipeline()->WriteJump(&node, label);                                   \
-    LeaveBasicBlock();                                                     \
+#define DEFINE_BYTECODE_OUTPUT(name, ...)                                \
+  template <typename... Operands>                                        \
+  void BytecodeArrayBuilder::Output##name(Operands... operands) {        \
+    static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands,        \
+                  "too many operands for bytecode");                     \
+    BytecodeNode node(                                                   \
+        BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make<       \
+            Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
+                         operands...));                                  \
+    pipeline()->Write(&node);                                            \
+  }                                                                      \
+                                                                         \
+  template <typename... Operands>                                        \
+  void BytecodeArrayBuilder::Output##name(BytecodeLabel* label,          \
+                                          Operands... operands) {        \
+    DCHECK(Bytecodes::IsJump(Bytecode::k##name));                        \
+    BytecodeNode node(                                                   \
+        BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make<       \
+            Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
+                         operands...));                                  \
+    pipeline()->WriteJump(&node, label);                                 \
+    LeaveBasicBlock();                                                   \
   }
 BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
 #undef DEFINE_BYTECODE_OUTPUT
@@ -318,6 +332,11 @@
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::GetSuperConstructor(Register out) {
+  OutputGetSuperConstructor(out);
+  return *this;
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
     Token::Value op, Register reg, int feedback_slot) {
   switch (op) {
@@ -371,12 +390,54 @@
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
-  size_t entry = GetConstantPoolEntry(object);
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
+    const AstRawString* raw_string) {
+  size_t entry = GetConstantPoolEntry(raw_string);
   OutputLdaConstant(entry);
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(const Scope* scope) {
+  size_t entry = GetConstantPoolEntry(scope);
+  OutputLdaConstant(entry);
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
+    const AstValue* ast_value) {
+  if (ast_value->IsSmi()) {
+    return LoadLiteral(ast_value->AsSmi());
+  } else if (ast_value->IsUndefined()) {
+    return LoadUndefined();
+  } else if (ast_value->IsTrue()) {
+    return LoadTrue();
+  } else if (ast_value->IsFalse()) {
+    return LoadFalse();
+  } else if (ast_value->IsNull()) {
+    return LoadNull();
+  } else if (ast_value->IsTheHole()) {
+    return LoadTheHole();
+  } else if (ast_value->IsString()) {
+    return LoadLiteral(ast_value->AsString());
+  } else if (ast_value->IsHeapNumber()) {
+    size_t entry = GetConstantPoolEntry(ast_value);
+    OutputLdaConstant(entry);
+    return *this;
+  } else {
+    // This should be the only ast value type left.
+    DCHECK(ast_value->IsSymbol());
+    size_t entry;
+    switch (ast_value->AsSymbol()) {
+      case AstSymbol::kHomeObjectSymbol:
+        entry = HomeObjectSymbolConstantPoolEntry();
+        break;
+        // No default case so that we get a warning if AstSymbol changes
+    }
+    OutputLdaConstant(entry);
+    return *this;
+  }
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadUndefined() {
   OutputLdaUndefined();
   return *this;
@@ -433,19 +494,29 @@
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(const AstRawString* name,
+                                                       int feedback_slot,
                                                        TypeofMode typeof_mode) {
+  size_t name_index = GetConstantPoolEntry(name);
+  // Ensure that typeof mode is in sync with the IC slot kind if the function
+  // literal is available (not a unit test case).
+  // TODO(ishell): check only in debug mode.
+  if (literal_) {
+    FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
+    CHECK_EQ(GetTypeofModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+             typeof_mode);
+  }
   if (typeof_mode == INSIDE_TYPEOF) {
-    OutputLdaGlobalInsideTypeof(feedback_slot);
+    OutputLdaGlobalInsideTypeof(name_index, feedback_slot);
   } else {
     DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
-    OutputLdaGlobal(feedback_slot);
+    OutputLdaGlobal(name_index, feedback_slot);
   }
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreGlobal(
-    const Handle<String> name, int feedback_slot, LanguageMode language_mode) {
+    const AstRawString* name, int feedback_slot, LanguageMode language_mode) {
   size_t name_index = GetConstantPoolEntry(name);
   if (language_mode == SLOPPY) {
     OutputStaGlobalSloppy(name_index, feedback_slot);
@@ -456,12 +527,20 @@
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(Register context,
-                                                            int slot_index,
-                                                            int depth) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadContextSlot(
+    Register context, int slot_index, int depth,
+    ContextSlotMutability mutability) {
   if (context.is_current_context() && depth == 0) {
-    OutputLdaCurrentContextSlot(slot_index);
+    if (mutability == kImmutableSlot) {
+      OutputLdaImmutableCurrentContextSlot(slot_index);
+    } else {
+      DCHECK_EQ(kMutableSlot, mutability);
+      OutputLdaCurrentContextSlot(slot_index);
+    }
+  } else if (mutability == kImmutableSlot) {
+    OutputLdaImmutableContextSlot(context, slot_index, depth);
   } else {
+    DCHECK_EQ(mutability, kMutableSlot);
     OutputLdaContextSlot(context, slot_index, depth);
   }
   return *this;
@@ -479,7 +558,7 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
-    const Handle<String> name, TypeofMode typeof_mode) {
+    const AstRawString* name, TypeofMode typeof_mode) {
   size_t name_index = GetConstantPoolEntry(name);
   if (typeof_mode == INSIDE_TYPEOF) {
     OutputLdaLookupSlotInsideTypeof(name_index);
@@ -491,7 +570,7 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupContextSlot(
-    const Handle<String> name, TypeofMode typeof_mode, int slot_index,
+    const AstRawString* name, TypeofMode typeof_mode, int slot_index,
     int depth) {
   size_t name_index = GetConstantPoolEntry(name);
   if (typeof_mode == INSIDE_TYPEOF) {
@@ -504,7 +583,7 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
-    const Handle<String> name, TypeofMode typeof_mode, int feedback_slot,
+    const AstRawString* name, TypeofMode typeof_mode, int feedback_slot,
     int depth) {
   size_t name_index = GetConstantPoolEntry(name);
   if (typeof_mode == INSIDE_TYPEOF) {
@@ -517,7 +596,7 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreLookupSlot(
-    const Handle<String> name, LanguageMode language_mode) {
+    const AstRawString* name, LanguageMode language_mode) {
   size_t name_index = GetConstantPoolEntry(name);
   if (language_mode == SLOPPY) {
     OutputStaLookupSlotSloppy(name_index);
@@ -529,7 +608,7 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
-    Register object, const Handle<Name> name, int feedback_slot) {
+    Register object, const AstRawString* name, int feedback_slot) {
   size_t name_index = GetConstantPoolEntry(name);
   OutputLdaNamedProperty(object, name_index, feedback_slot);
   return *this;
@@ -541,10 +620,38 @@
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadIteratorProperty(
+    Register object, int feedback_slot) {
+  size_t name_index = IteratorSymbolConstantPoolEntry();
+  OutputLdaNamedProperty(object, name_index, feedback_slot);
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadAsyncIteratorProperty(
+    Register object, int feedback_slot) {
+  size_t name_index = AsyncIteratorSymbolConstantPoolEntry();
+  OutputLdaNamedProperty(object, name_index, feedback_slot);
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreDataPropertyInLiteral(
+    Register object, Register name, DataPropertyInLiteralFlags flags,
+    int feedback_slot) {
+  OutputStaDataPropertyInLiteral(object, name, flags, feedback_slot);
+  return *this;
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
-    Register object, const Handle<Name> name, int feedback_slot,
+    Register object, size_t name_index, int feedback_slot,
     LanguageMode language_mode) {
-  size_t name_index = GetConstantPoolEntry(name);
+  // Ensure that language mode is in sync with the IC slot kind if the function
+  // literal is available (not a unit test case).
+  // TODO(ishell): check only in debug mode.
+  if (literal_) {
+    FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
+    CHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+             language_mode);
+  }
   if (language_mode == SLOPPY) {
     OutputStaNamedPropertySloppy(object, name_index, feedback_slot);
   } else {
@@ -554,9 +661,39 @@
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
+    Register object, const AstRawString* name, int feedback_slot,
+    LanguageMode language_mode) {
+  size_t name_index = GetConstantPoolEntry(name);
+  return StoreNamedProperty(object, name_index, feedback_slot, language_mode);
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedOwnProperty(
+    Register object, const AstRawString* name, int feedback_slot) {
+  size_t name_index = GetConstantPoolEntry(name);
+  // Ensure that the store operation is in sync with the IC slot kind if
+  // the function literal is available (not a unit test case).
+  // TODO(ishell): check only in debug mode.
+  if (literal_) {
+    FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
+    CHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+             feedback_vector_spec()->GetKind(slot));
+  }
+  OutputStaNamedOwnProperty(object, name_index, feedback_slot);
+  return *this;
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
     Register object, Register key, int feedback_slot,
     LanguageMode language_mode) {
+  // Ensure that language mode is in sync with the IC slot kind if the function
+  // literal is available (not a unit test case).
+  // TODO(ishell): check only in debug mode.
+  if (literal_) {
+    FeedbackSlot slot = FeedbackVector::ToSlot(feedback_slot);
+    CHECK_EQ(GetLanguageModeFromSlotKind(feedback_vector_spec()->GetKind(slot)),
+             language_mode);
+  }
   if (language_mode == SLOPPY) {
     OutputStaKeyedPropertySloppy(object, key, feedback_slot);
   } else {
@@ -566,24 +703,30 @@
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(size_t entry,
-                                                          int flags) {
-  OutputCreateClosure(entry, flags);
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreHomeObjectProperty(
+    Register object, int feedback_slot, LanguageMode language_mode) {
+  size_t name_index = HomeObjectSymbolConstantPoolEntry();
+  return StoreNamedProperty(object, name_index, feedback_slot, language_mode);
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
+    size_t shared_function_info_entry, int slot, int flags) {
+  OutputCreateClosure(shared_function_info_entry, slot, flags);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateBlockContext(
-    Handle<ScopeInfo> scope_info) {
-  size_t entry = GetConstantPoolEntry(scope_info);
+    const Scope* scope) {
+  size_t entry = GetConstantPoolEntry(scope);
   OutputCreateBlockContext(entry);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateCatchContext(
-    Register exception, Handle<String> name, Handle<ScopeInfo> scope_info) {
+    Register exception, const AstRawString* name, const Scope* scope) {
   size_t name_index = GetConstantPoolEntry(name);
-  size_t scope_info_index = GetConstantPoolEntry(scope_info);
-  OutputCreateCatchContext(exception, name_index, scope_info_index);
+  size_t scope_index = GetConstantPoolEntry(scope);
+  OutputCreateCatchContext(exception, name_index, scope_index);
   return *this;
 }
 
@@ -592,10 +735,15 @@
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateEvalContext(int slots) {
+  OutputCreateEvalContext(slots);
+  return *this;
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(
-    Register object, Handle<ScopeInfo> scope_info) {
-  size_t scope_info_index = GetConstantPoolEntry(scope_info);
-  OutputCreateWithContext(object, scope_info_index);
+    Register object, const Scope* scope) {
+  size_t scope_index = GetConstantPoolEntry(scope);
+  OutputCreateWithContext(object, scope_index);
   return *this;
 }
 
@@ -618,23 +766,21 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
-    Handle<String> pattern, int literal_index, int flags) {
+    const AstRawString* pattern, int literal_index, int flags) {
   size_t pattern_entry = GetConstantPoolEntry(pattern);
   OutputCreateRegExpLiteral(pattern_entry, literal_index, flags);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
-    Handle<FixedArray> constant_elements, int literal_index, int flags) {
-  size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
+    size_t constant_elements_entry, int literal_index, int flags) {
   OutputCreateArrayLiteral(constant_elements_entry, literal_index, flags);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
-    Handle<FixedArray> constant_properties, int literal_index, int flags,
+    size_t constant_properties_entry, int literal_index, int flags,
     Register output) {
-  size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
   OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags,
                             output);
   return *this;
@@ -685,6 +831,7 @@
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
   OutputJump(label, 0);
   return *this;
 }
@@ -692,34 +839,47 @@
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
   // The peephole optimizer attempts to simplify JumpIfToBooleanTrue
   // to JumpIfTrue.
+  DCHECK(!label->is_bound());
   OutputJumpIfToBooleanTrue(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
   OutputJumpIfToBooleanFalse(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNull(BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
   OutputJumpIfNull(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfUndefined(
     BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
   OutputJumpIfUndefined(label, 0);
   return *this;
 }
 
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
     BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
   OutputJumpIfNotHole(label, 0);
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfJSReceiver(
+    BytecodeLabel* label) {
+  DCHECK(!label->is_bound());
+  OutputJumpIfJSReceiver(label, 0);
+  return *this;
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
                                                      int loop_depth) {
+  DCHECK(label->is_bound());
   OutputJumpLoop(label, 0, loop_depth);
   return *this;
 }
@@ -742,6 +902,11 @@
   return *this;
 }
 
+BytecodeArrayBuilder& BytecodeArrayBuilder::SetPendingMessage() {
+  OutputSetPendingMessage();
+  return *this;
+}
+
 BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
   OutputThrow();
   return *this;
@@ -858,10 +1023,22 @@
   return *this;
 }
 
-BytecodeArrayBuilder& BytecodeArrayBuilder::New(Register constructor,
-                                                RegisterList args,
-                                                int feedback_slot_id) {
-  OutputNew(constructor, args, args.register_count(), feedback_slot_id);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CallWithSpread(Register callable,
+                                                           RegisterList args) {
+  OutputCallWithSpread(callable, args, args.register_count());
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Construct(Register constructor,
+                                                      RegisterList args,
+                                                      int feedback_slot_id) {
+  OutputConstruct(constructor, args, args.register_count(), feedback_slot_id);
+  return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::ConstructWithSpread(
+    Register constructor, RegisterList args) {
+  OutputConstructWithSpread(constructor, args, args.register_count());
   return *this;
 }
 
@@ -925,17 +1102,34 @@
   return *this;
 }
 
-size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
-  return constant_array_builder()->Insert(object);
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(
+    const AstRawString* raw_string) {
+  return constant_array_builder()->Insert(raw_string);
 }
 
-size_t BytecodeArrayBuilder::AllocateConstantPoolEntry() {
-  return constant_array_builder()->AllocateEntry();
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(const AstValue* heap_number) {
+  DCHECK(heap_number->IsHeapNumber());
+  return constant_array_builder()->Insert(heap_number);
 }
 
-void BytecodeArrayBuilder::InsertConstantPoolEntryAt(size_t entry,
-                                                     Handle<Object> object) {
-  constant_array_builder()->InsertAllocatedEntry(entry, object);
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(const Scope* scope) {
+  return constant_array_builder()->Insert(scope);
+}
+
+#define ENTRY_GETTER(NAME, ...)                            \
+  size_t BytecodeArrayBuilder::NAME##ConstantPoolEntry() { \
+    return constant_array_builder()->Insert##NAME();       \
+  }
+SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_GETTER)
+#undef ENTRY_GETTER
+
+size_t BytecodeArrayBuilder::AllocateDeferredConstantPoolEntry() {
+  return constant_array_builder()->InsertDeferred();
+}
+
+void BytecodeArrayBuilder::SetDeferredConstantPoolEntry(size_t entry,
+                                                        Handle<Object> object) {
+  constant_array_builder()->SetDeferredAt(entry, object);
 }
 
 void BytecodeArrayBuilder::SetReturnPosition() {
@@ -975,8 +1169,10 @@
   }
 }
 
-void BytecodeArrayBuilder::PrepareToOutputBytecode(Bytecode bytecode) {
-  if (register_optimizer_) register_optimizer_->PrepareForBytecode(bytecode);
+template <Bytecode bytecode, AccumulatorUse accumulator_use>
+void BytecodeArrayBuilder::PrepareToOutputBytecode() {
+  if (register_optimizer_)
+    register_optimizer_->PrepareForBytecode<bytecode, accumulator_use>();
 }
 
 uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h
index cc5b5e7..0a10c1f 100644
--- a/src/interpreter/bytecode-array-builder.h
+++ b/src/interpreter/bytecode-array-builder.h
@@ -71,12 +71,15 @@
     return register_allocator()->maximum_register_count();
   }
 
+  Register Local(int index) const;
   Register Parameter(int parameter_index) const;
 
   // Constant loads to accumulator.
   BytecodeArrayBuilder& LoadConstantPoolEntry(size_t entry);
   BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
-  BytecodeArrayBuilder& LoadLiteral(Handle<Object> object);
+  BytecodeArrayBuilder& LoadLiteral(const AstRawString* raw_string);
+  BytecodeArrayBuilder& LoadLiteral(const Scope* scope);
+  BytecodeArrayBuilder& LoadLiteral(const AstValue* ast_value);
   BytecodeArrayBuilder& LoadUndefined();
   BytecodeArrayBuilder& LoadNull();
   BytecodeArrayBuilder& LoadTheHole();
@@ -84,15 +87,17 @@
   BytecodeArrayBuilder& LoadFalse();
 
   // Global loads to the accumulator and stores from the accumulator.
-  BytecodeArrayBuilder& LoadGlobal(int feedback_slot, TypeofMode typeof_mode);
-  BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
-                                    int feedback_slot,
+  BytecodeArrayBuilder& LoadGlobal(const AstRawString* name, int feedback_slot,
+                                   TypeofMode typeof_mode);
+  BytecodeArrayBuilder& StoreGlobal(const AstRawString* name, int feedback_slot,
                                     LanguageMode language_mode);
 
   // Load the object at |slot_index| at |depth| in the context chain starting
   // with |context| into the accumulator.
+  enum ContextSlotMutability { kImmutableSlot, kMutableSlot };
   BytecodeArrayBuilder& LoadContextSlot(Register context, int slot_index,
-                                        int depth);
+                                        int depth,
+                                        ContextSlotMutability immutable);
 
   // Stores the object in the accumulator into |slot_index| at |depth| in the
   // context chain starting with |context|.
@@ -116,75 +121,109 @@
 
   // Named load property.
   BytecodeArrayBuilder& LoadNamedProperty(Register object,
-                                          const Handle<Name> name,
+                                          const AstRawString* name,
                                           int feedback_slot);
   // Keyed load property. The key should be in the accumulator.
   BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
+  // Named load property of the @@iterator symbol.
+  BytecodeArrayBuilder& LoadIteratorProperty(Register object,
+                                             int feedback_slot);
+  // Named load property of the @@asyncIterator symbol.
+  BytecodeArrayBuilder& LoadAsyncIteratorProperty(Register object,
+                                                  int feedback_slot);
 
-  // Store properties. The value to be stored should be in the accumulator.
+  // Store properties. Flag for NeedsSetFunctionName() should
+  // be in the accumulator.
+  BytecodeArrayBuilder& StoreDataPropertyInLiteral(
+      Register object, Register name, DataPropertyInLiteralFlags flags,
+      int feedback_slot);
+
+  // Store a property named by a property name. The value to be stored should be
+  // in the accumulator.
   BytecodeArrayBuilder& StoreNamedProperty(Register object,
-                                           const Handle<Name> name,
+                                           const AstRawString* name,
                                            int feedback_slot,
                                            LanguageMode language_mode);
+  // Store a property named by a constant from the constant pool. The value to
+  // be stored should be in the accumulator.
+  BytecodeArrayBuilder& StoreNamedProperty(Register object,
+                                           size_t constant_pool_entry,
+                                           int feedback_slot,
+                                           LanguageMode language_mode);
+  // Store an own property named by a constant from the constant pool. The
+  // value to be stored should be in the accumulator.
+  BytecodeArrayBuilder& StoreNamedOwnProperty(Register object,
+                                              const AstRawString* name,
+                                              int feedback_slot);
+  // Store a property keyed by a value in a register. The value to be stored
+  // should be in the accumulator.
   BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
                                            int feedback_slot,
                                            LanguageMode language_mode);
+  // Store the home object property. The value to be stored should be in the
+  // accumulator.
+  BytecodeArrayBuilder& StoreHomeObjectProperty(Register object,
+                                                int feedback_slot,
+                                                LanguageMode language_mode);
 
   // Lookup the variable with |name|.
-  BytecodeArrayBuilder& LoadLookupSlot(const Handle<String> name,
+  BytecodeArrayBuilder& LoadLookupSlot(const AstRawString* name,
                                        TypeofMode typeof_mode);
 
   // Lookup the variable with |name|, which is known to be at |slot_index| at
   // |depth| in the context chain if not shadowed by a context extension
   // somewhere in that context chain.
-  BytecodeArrayBuilder& LoadLookupContextSlot(const Handle<String> name,
+  BytecodeArrayBuilder& LoadLookupContextSlot(const AstRawString* name,
                                               TypeofMode typeof_mode,
                                               int slot_index, int depth);
 
   // Lookup the variable with |name|, which has its feedback in |feedback_slot|
   // and is known to be global if not shadowed by a context extension somewhere
   // up to |depth| in that context chain.
-  BytecodeArrayBuilder& LoadLookupGlobalSlot(const Handle<String> name,
+  BytecodeArrayBuilder& LoadLookupGlobalSlot(const AstRawString* name,
                                              TypeofMode typeof_mode,
                                              int feedback_slot, int depth);
 
   // Store value in the accumulator into the variable with |name|.
-  BytecodeArrayBuilder& StoreLookupSlot(const Handle<String> name,
+  BytecodeArrayBuilder& StoreLookupSlot(const AstRawString* name,
                                         LanguageMode language_mode);
 
   // Create a new closure for a SharedFunctionInfo which will be inserted at
-  // constant pool index |entry|.
-  BytecodeArrayBuilder& CreateClosure(size_t entry, int flags);
+  // constant pool index |shared_function_info_entry|.
+  BytecodeArrayBuilder& CreateClosure(size_t shared_function_info_entry,
+                                      int slot, int flags);
 
-  // Create a new local context for a |scope_info| and a closure which should be
+  // Create a new local context for a |scope| and a closure which should be
   // in the accumulator.
-  BytecodeArrayBuilder& CreateBlockContext(Handle<ScopeInfo> scope_info);
+  BytecodeArrayBuilder& CreateBlockContext(const Scope* scope);
 
   // Create a new context for a catch block with |exception|, |name|,
-  // |scope_info|, and the closure in the accumulator.
+  // |scope|, and the closure in the accumulator.
   BytecodeArrayBuilder& CreateCatchContext(Register exception,
-                                           Handle<String> name,
-                                           Handle<ScopeInfo> scope_info);
+                                           const AstRawString* name,
+                                           const Scope* scope);
 
   // Create a new context with size |slots|.
   BytecodeArrayBuilder& CreateFunctionContext(int slots);
 
-  // Creates a new context with the given |scope_info| for a with-statement
+  // Create a new eval context with size |slots|.
+  BytecodeArrayBuilder& CreateEvalContext(int slots);
+
+  // Creates a new context with the given |scope| for a with-statement
   // with the |object| in a register and the closure in the accumulator.
-  BytecodeArrayBuilder& CreateWithContext(Register object,
-                                          Handle<ScopeInfo> scope_info);
+  BytecodeArrayBuilder& CreateWithContext(Register object, const Scope* scope);
 
   // Create a new arguments object in the accumulator.
   BytecodeArrayBuilder& CreateArguments(CreateArgumentsType type);
 
   // Literals creation.  Constant elements should be in the accumulator.
-  BytecodeArrayBuilder& CreateRegExpLiteral(Handle<String> pattern,
+  BytecodeArrayBuilder& CreateRegExpLiteral(const AstRawString* pattern,
                                             int literal_index, int flags);
-  BytecodeArrayBuilder& CreateArrayLiteral(Handle<FixedArray> constant_elements,
+  BytecodeArrayBuilder& CreateArrayLiteral(size_t constant_elements_entry,
                                            int literal_index, int flags);
-  BytecodeArrayBuilder& CreateObjectLiteral(
-      Handle<FixedArray> constant_properties, int literal_index, int flags,
-      Register output);
+  BytecodeArrayBuilder& CreateObjectLiteral(size_t constant_properties_entry,
+                                            int literal_index, int flags,
+                                            Register output);
 
   // Push the context in accumulator as the new context, and store in register
   // |context|.
@@ -202,10 +241,21 @@
       Call::CallType call_type,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
 
-  // Call the new operator. The accumulator holds the |new_target|.
+  // Call a JS function. The JSFunction or Callable to be called should be in
+  // |callable|, the receiver in |args[0]| and the arguments in |args[1]|
+  // onwards. The final argument must be a spread.
+  BytecodeArrayBuilder& CallWithSpread(Register callable, RegisterList args);
+
+  // Call the Construct operator. The accumulator holds the |new_target|.
   // The |constructor| is in a register and arguments are in |args|.
-  BytecodeArrayBuilder& New(Register constructor, RegisterList args,
-                            int feedback_slot);
+  BytecodeArrayBuilder& Construct(Register constructor, RegisterList args,
+                                  int feedback_slot);
+
+  // Call the Construct operator for use with a spread. The accumulator holds
+  // the |new_target|. The |constructor| is in a register and arguments are in
+  // |args|. The final argument must be a spread.
+  BytecodeArrayBuilder& ConstructWithSpread(Register constructor,
+                                            RegisterList args);
 
   // Call the runtime function with |function_id| and arguments |args|.
   BytecodeArrayBuilder& CallRuntime(Runtime::FunctionId function_id,
@@ -245,6 +295,11 @@
   BytecodeArrayBuilder& LogicalNot();
   BytecodeArrayBuilder& TypeOf();
 
+  // Expects a heap object in the accumulator. Returns its super constructor in
+  // the register |out| if it passes the IsConstructor test. Otherwise, it
+  // throws a TypeError exception.
+  BytecodeArrayBuilder& GetSuperConstructor(Register out);
+
   // Deletes property from an object. This expects that accumulator contains
   // the key to be deleted and the register contains a reference to the object.
   BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
@@ -266,12 +321,17 @@
   BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
+  BytecodeArrayBuilder& JumpIfJSReceiver(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
   BytecodeArrayBuilder& JumpLoop(BytecodeLabel* label, int loop_depth);
 
   BytecodeArrayBuilder& StackCheck(int position);
 
+  // Sets the pending message to the value in the accumulator, and returns the
+  // previous pending message in the accumulator.
+  BytecodeArrayBuilder& SetPendingMessage();
+
   BytecodeArrayBuilder& Throw();
   BytecodeArrayBuilder& ReThrow();
   BytecodeArrayBuilder& Return();
@@ -302,10 +362,18 @@
   // entry, so that it can be referenced by above exception handling support.
   int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
 
-  // Allocates a slot in the constant pool which can later be inserted.
-  size_t AllocateConstantPoolEntry();
-  // Inserts a entry into an allocated constant pool entry.
-  void InsertConstantPoolEntryAt(size_t entry, Handle<Object> object);
+  // Gets a constant pool entry.
+  size_t GetConstantPoolEntry(const AstRawString* raw_string);
+  size_t GetConstantPoolEntry(const AstValue* heap_number);
+  size_t GetConstantPoolEntry(const Scope* scope);
+#define ENTRY_GETTER(NAME, ...) size_t NAME##ConstantPoolEntry();
+  SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_GETTER)
+#undef ENTRY_GETTER
+
+  // Allocates a slot in the constant pool which can later be set.
+  size_t AllocateDeferredConstantPoolEntry();
+  // Sets the deferred value into an allocated constant pool entry.
+  void SetDeferredConstantPoolEntry(size_t entry, Handle<Object> object);
 
   void InitializeReturnPosition(FunctionLiteral* literal);
 
@@ -347,9 +415,14 @@
 
  private:
   friend class BytecodeRegisterAllocator;
-  template <OperandType... operand_types>
+  template <Bytecode bytecode, AccumulatorUse accumulator_use,
+            OperandType... operand_types>
   friend class BytecodeNodeBuilder;
 
+  const FeedbackVectorSpec* feedback_vector_spec() const {
+    return literal_->feedback_vector_spec();
+  }
+
   // Returns the current source position for the given |bytecode|.
   INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
 
@@ -367,15 +440,13 @@
   // Set position for return.
   void SetReturnPosition();
 
-  // Gets a constant pool entry for the |object|.
-  size_t GetConstantPoolEntry(Handle<Object> object);
-
   // Not implemented as the illegal bytecode is used inside internally
   // to indicate a bytecode field is not valid or an error has occured
   // during bytecode generation.
   BytecodeArrayBuilder& Illegal();
 
-  void PrepareToOutputBytecode(Bytecode bytecode);
+  template <Bytecode bytecode, AccumulatorUse accumulator_use>
+  void PrepareToOutputBytecode();
 
   void LeaveBasicBlock() { return_seen_in_block_ = false; }
 
@@ -394,6 +465,7 @@
   }
 
   Zone* zone_;
+  FunctionLiteral* literal_;
   bool bytecode_generated_;
   ConstantArrayBuilder constant_array_builder_;
   HandlerTableBuilder handler_table_builder_;
diff --git a/src/interpreter/bytecode-array-iterator.cc b/src/interpreter/bytecode-array-iterator.cc
index e596b11..0248dfd 100644
--- a/src/interpreter/bytecode-array-iterator.cc
+++ b/src/interpreter/bytecode-array-iterator.cc
@@ -3,9 +3,6 @@
 // found in the LICENSE file.
 
 #include "src/interpreter/bytecode-array-iterator.h"
-
-#include "src/interpreter/bytecode-decoder.h"
-#include "src/interpreter/interpreter-intrinsics.h"
 #include "src/objects-inl.h"
 
 namespace v8 {
@@ -14,180 +11,14 @@
 
 BytecodeArrayIterator::BytecodeArrayIterator(
     Handle<BytecodeArray> bytecode_array)
-    : bytecode_array_(bytecode_array),
-      bytecode_offset_(0),
-      operand_scale_(OperandScale::kSingle),
-      prefix_offset_(0) {
-  UpdateOperandScale();
-}
+    : BytecodeArrayAccessor(bytecode_array, 0) {}
 
 void BytecodeArrayIterator::Advance() {
-  bytecode_offset_ += current_bytecode_size();
-  UpdateOperandScale();
-}
-
-void BytecodeArrayIterator::UpdateOperandScale() {
-  if (!done()) {
-    uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
-    Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
-    if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
-      operand_scale_ =
-          Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
-      prefix_offset_ = 1;
-    } else {
-      operand_scale_ = OperandScale::kSingle;
-      prefix_offset_ = 0;
-    }
-  }
+  SetOffset(current_offset() + current_bytecode_size());
 }
 
 bool BytecodeArrayIterator::done() const {
-  return bytecode_offset_ >= bytecode_array()->length();
-}
-
-Bytecode BytecodeArrayIterator::current_bytecode() const {
-  DCHECK(!done());
-  uint8_t current_byte =
-      bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
-  Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
-  DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
-  return current_bytecode;
-}
-
-int BytecodeArrayIterator::current_bytecode_size() const {
-  return current_prefix_offset() +
-         Bytecodes::Size(current_bytecode(), current_operand_scale());
-}
-
-uint32_t BytecodeArrayIterator::GetUnsignedOperand(
-    int operand_index, OperandType operand_type) const {
-  DCHECK_GE(operand_index, 0);
-  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
-  DCHECK_EQ(operand_type,
-            Bytecodes::GetOperandType(current_bytecode(), operand_index));
-  DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
-  const uint8_t* operand_start =
-      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
-      current_prefix_offset() +
-      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
-                                  current_operand_scale());
-  return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
-                                                current_operand_scale());
-}
-
-int32_t BytecodeArrayIterator::GetSignedOperand(
-    int operand_index, OperandType operand_type) const {
-  DCHECK_GE(operand_index, 0);
-  DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
-  DCHECK_EQ(operand_type,
-            Bytecodes::GetOperandType(current_bytecode(), operand_index));
-  DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
-  const uint8_t* operand_start =
-      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
-      current_prefix_offset() +
-      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
-                                  current_operand_scale());
-  return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
-                                              current_operand_scale());
-}
-
-uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
-  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
-            OperandType::kFlag8);
-  return GetUnsignedOperand(operand_index, OperandType::kFlag8);
-}
-
-uint32_t BytecodeArrayIterator::GetUnsignedImmediateOperand(
-    int operand_index) const {
-  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
-            OperandType::kUImm);
-  return GetUnsignedOperand(operand_index, OperandType::kUImm);
-}
-
-int32_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
-  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
-            OperandType::kImm);
-  return GetSignedOperand(operand_index, OperandType::kImm);
-}
-
-uint32_t BytecodeArrayIterator::GetRegisterCountOperand(
-    int operand_index) const {
-  DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
-            OperandType::kRegCount);
-  return GetUnsignedOperand(operand_index, OperandType::kRegCount);
-}
-
-uint32_t BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
-  OperandType operand_type =
-      Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK_EQ(operand_type, OperandType::kIdx);
-  return GetUnsignedOperand(operand_index, operand_type);
-}
-
-Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
-  OperandType operand_type =
-      Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  const uint8_t* operand_start =
-      bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
-      current_prefix_offset() +
-      Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
-                                  current_operand_scale());
-  return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
-                                                current_operand_scale());
-}
-
-int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
-  DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
-  const OperandType* operand_types =
-      Bytecodes::GetOperandTypes(current_bytecode());
-  OperandType operand_type = operand_types[operand_index];
-  DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
-  if (operand_type == OperandType::kRegList) {
-    return GetRegisterCountOperand(operand_index + 1);
-  } else {
-    return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
-  }
-}
-
-Runtime::FunctionId BytecodeArrayIterator::GetRuntimeIdOperand(
-    int operand_index) const {
-  OperandType operand_type =
-      Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK(operand_type == OperandType::kRuntimeId);
-  uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
-  return static_cast<Runtime::FunctionId>(raw_id);
-}
-
-Runtime::FunctionId BytecodeArrayIterator::GetIntrinsicIdOperand(
-    int operand_index) const {
-  OperandType operand_type =
-      Bytecodes::GetOperandType(current_bytecode(), operand_index);
-  DCHECK(operand_type == OperandType::kIntrinsicId);
-  uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
-  return IntrinsicsHelper::ToRuntimeId(
-      static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
-}
-
-Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
-    int operand_index) const {
-  return FixedArray::get(bytecode_array()->constant_pool(),
-                         GetIndexOperand(operand_index),
-                         bytecode_array()->GetIsolate());
-}
-
-
-int BytecodeArrayIterator::GetJumpTargetOffset() const {
-  Bytecode bytecode = current_bytecode();
-  if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
-    int relative_offset = GetImmediateOperand(0);
-    return current_offset() + relative_offset + current_prefix_offset();
-  } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
-    Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
-    return current_offset() + smi->value() + current_prefix_offset();
-  } else {
-    UNREACHABLE();
-    return kMinInt;
-  }
+  return current_offset() >= bytecode_array()->length();
 }
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-array-iterator.h b/src/interpreter/bytecode-array-iterator.h
index 03279cb..7ec9d12 100644
--- a/src/interpreter/bytecode-array-iterator.h
+++ b/src/interpreter/bytecode-array-iterator.h
@@ -1,64 +1,25 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
 #define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
 
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/interpreter/bytecode-register.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/objects.h"
-#include "src/runtime/runtime.h"
+#include "src/interpreter/bytecode-array-accessor.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
-class V8_EXPORT_PRIVATE BytecodeArrayIterator {
+class V8_EXPORT_PRIVATE BytecodeArrayIterator final
+    : public BytecodeArrayAccessor {
  public:
   explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
 
   void Advance();
   bool done() const;
-  Bytecode current_bytecode() const;
-  int current_bytecode_size() const;
-  int current_offset() const { return bytecode_offset_; }
-  OperandScale current_operand_scale() const { return operand_scale_; }
-  int current_prefix_offset() const { return prefix_offset_; }
-  const Handle<BytecodeArray>& bytecode_array() const {
-    return bytecode_array_;
-  }
-
-  uint32_t GetFlagOperand(int operand_index) const;
-  uint32_t GetUnsignedImmediateOperand(int operand_index) const;
-  int32_t GetImmediateOperand(int operand_index) const;
-  uint32_t GetIndexOperand(int operand_index) const;
-  uint32_t GetRegisterCountOperand(int operand_index) const;
-  Register GetRegisterOperand(int operand_index) const;
-  int GetRegisterOperandRange(int operand_index) const;
-  Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
-  Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
-  Handle<Object> GetConstantForIndexOperand(int operand_index) const;
-
-  // Returns the absolute offset of the branch target at the current
-  // bytecode. It is an error to call this method if the bytecode is
-  // not for a jump or conditional jump.
-  int GetJumpTargetOffset() const;
 
  private:
-  uint32_t GetUnsignedOperand(int operand_index,
-                              OperandType operand_type) const;
-  int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
-
-  void UpdateOperandScale();
-
-  Handle<BytecodeArray> bytecode_array_;
-  int bytecode_offset_;
-  OperandScale operand_scale_;
-  int prefix_offset_;
-
   DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
 };
 
@@ -66,4 +27,4 @@
 }  // namespace internal
 }  // namespace v8
 
-#endif  // V8_INTERPRETER_BYTECODE_GRAPH_ITERATOR_H_
+#endif  // V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
diff --git a/src/interpreter/bytecode-array-random-iterator.cc b/src/interpreter/bytecode-array-random-iterator.cc
new file mode 100644
index 0000000..f499887
--- /dev/null
+++ b/src/interpreter/bytecode-array-random-iterator.cc
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-random-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayRandomIterator::BytecodeArrayRandomIterator(
+    Handle<BytecodeArray> bytecode_array, Zone* zone)
+    : BytecodeArrayAccessor(bytecode_array, 0), offsets_(zone) {
+  // Run forwards through the bytecode array to determine the offset of each
+  // bytecode.
+  while (current_offset() < bytecode_array->length()) {
+    offsets_.push_back(current_offset());
+    SetOffset(current_offset() + current_bytecode_size());
+  }
+  GoToStart();
+}
+
+bool BytecodeArrayRandomIterator::IsValid() const {
+  return current_index_ >= 0 &&
+         static_cast<size_t>(current_index_) < offsets_.size();
+}
+
+void BytecodeArrayRandomIterator::UpdateOffsetFromIndex() {
+  if (IsValid()) {
+    SetOffset(offsets_[current_index_]);
+  }
+}
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
diff --git a/src/interpreter/bytecode-array-random-iterator.h b/src/interpreter/bytecode-array-random-iterator.h
new file mode 100644
index 0000000..7d559ea
--- /dev/null
+++ b/src/interpreter/bytecode-array-random-iterator.h
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
+
+#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
+    : public BytecodeArrayAccessor {
+ public:
+  explicit BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array,
+                                       Zone* zone);
+
+  BytecodeArrayRandomIterator& operator++() {
+    ++current_index_;
+    UpdateOffsetFromIndex();
+    return *this;
+  }
+  BytecodeArrayRandomIterator& operator--() {
+    --current_index_;
+    UpdateOffsetFromIndex();
+    return *this;
+  }
+
+  BytecodeArrayRandomIterator& operator+=(int offset) {
+    current_index_ += offset;
+    UpdateOffsetFromIndex();
+    return *this;
+  }
+
+  BytecodeArrayRandomIterator& operator-=(int offset) {
+    current_index_ -= offset;
+    UpdateOffsetFromIndex();
+    return *this;
+  }
+
+  int current_index() const { return current_index_; }
+
+  size_t size() const { return offsets_.size(); }
+
+  void GoToIndex(int index) {
+    current_index_ = index;
+    UpdateOffsetFromIndex();
+  }
+  void GoToStart() {
+    current_index_ = 0;
+    UpdateOffsetFromIndex();
+  }
+  void GoToEnd() {
+    DCHECK_LT(offsets_.size() - 1, static_cast<size_t>(INT_MAX));
+    current_index_ = static_cast<int>(offsets_.size() - 1);
+    UpdateOffsetFromIndex();
+  }
+
+  bool IsValid() const;
+
+ private:
+  ZoneVector<int> offsets_;
+  int current_index_;
+
+  void UpdateOffsetFromIndex();
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeArrayRandomIterator);
+};
+
+}  // namespace interpreter
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
diff --git a/src/interpreter/bytecode-array-writer.cc b/src/interpreter/bytecode-array-writer.cc
index 28f997b..225af0e 100644
--- a/src/interpreter/bytecode-array-writer.cc
+++ b/src/interpreter/bytecode-array-writer.cc
@@ -9,6 +9,7 @@
 #include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/constant-array-builder.h"
 #include "src/log.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -163,6 +164,8 @@
       return Bytecode::kJumpIfNullConstant;
     case Bytecode::kJumpIfUndefined:
       return Bytecode::kJumpIfUndefinedConstant;
+    case Bytecode::kJumpIfJSReceiver:
+      return Bytecode::kJumpIfJSReceiverConstant;
     default:
       UNREACHABLE();
       return Bytecode::kIllegal;
@@ -172,16 +175,19 @@
 void BytecodeArrayWriter::PatchJumpWith8BitOperand(size_t jump_location,
                                                    int delta) {
   Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+  DCHECK(Bytecodes::IsForwardJump(jump_bytecode));
   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+  DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
+  DCHECK_GT(delta, 0);
   size_t operand_location = jump_location + 1;
   DCHECK_EQ(bytecodes()->at(operand_location), k8BitJumpPlaceholder);
-  if (Bytecodes::ScaleForSignedOperand(delta) == OperandScale::kSingle) {
-    // The jump fits within the range of an Imm8 operand, so cancel
+  if (Bytecodes::ScaleForUnsignedOperand(delta) == OperandScale::kSingle) {
+    // The jump fits within the range of an UImm8 operand, so cancel
     // the reservation and jump directly.
     constant_array_builder()->DiscardReservedEntry(OperandSize::kByte);
     bytecodes()->at(operand_location) = static_cast<uint8_t>(delta);
   } else {
-    // The jump does not fit within the range of an Imm8 operand, so
+    // The jump does not fit within the range of an UImm8 operand, so
     // commit reservation putting the offset into the constant pool,
     // and update the jump instruction and operand.
     size_t entry = constant_array_builder()->CommitReservedEntry(
@@ -197,10 +203,13 @@
 void BytecodeArrayWriter::PatchJumpWith16BitOperand(size_t jump_location,
                                                     int delta) {
   Bytecode jump_bytecode = Bytecodes::FromByte(bytecodes()->at(jump_location));
+  DCHECK(Bytecodes::IsForwardJump(jump_bytecode));
   DCHECK(Bytecodes::IsJumpImmediate(jump_bytecode));
+  DCHECK_EQ(Bytecodes::GetOperandType(jump_bytecode, 0), OperandType::kUImm);
+  DCHECK_GT(delta, 0);
   size_t operand_location = jump_location + 1;
   uint8_t operand_bytes[2];
-  if (Bytecodes::ScaleForSignedOperand(delta) <= OperandScale::kDouble) {
+  if (Bytecodes::ScaleForUnsignedOperand(delta) <= OperandScale::kDouble) {
     // The jump fits within the range of an Imm16 operand, so cancel
     // the reservation and jump directly.
     constant_array_builder()->DiscardReservedEntry(OperandSize::kShort);
@@ -279,18 +288,16 @@
 
   if (label->is_bound()) {
     CHECK_GE(current_offset, label->offset());
-    CHECK_LE(current_offset, static_cast<size_t>(kMaxInt));
+    CHECK_LE(current_offset, static_cast<size_t>(kMaxUInt32));
     // Label has been bound already so this is a backwards jump.
-    size_t abs_delta = current_offset - label->offset();
-    int delta = -static_cast<int>(abs_delta);
-    OperandScale operand_scale = Bytecodes::ScaleForSignedOperand(delta);
+    uint32_t delta = static_cast<uint32_t>(current_offset - label->offset());
+    OperandScale operand_scale = Bytecodes::ScaleForUnsignedOperand(delta);
     if (operand_scale > OperandScale::kSingle) {
       // Adjust for scaling byte prefix for wide jump offset.
-      DCHECK_LE(delta, 0);
-      delta -= 1;
+      delta += 1;
     }
     DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
-    node->set_bytecode(node->bytecode(), delta, node->operand(1));
+    node->update_operand0(delta);
   } else {
     // The label has not yet been bound so this is a forward reference
     // that will be patched when the label is bound. We create a
@@ -308,13 +315,13 @@
         UNREACHABLE();
         break;
       case OperandSize::kByte:
-        node->set_bytecode(node->bytecode(), k8BitJumpPlaceholder);
+        node->update_operand0(k8BitJumpPlaceholder);
         break;
       case OperandSize::kShort:
-        node->set_bytecode(node->bytecode(), k16BitJumpPlaceholder);
+        node->update_operand0(k16BitJumpPlaceholder);
         break;
       case OperandSize::kQuad:
-        node->set_bytecode(node->bytecode(), k32BitJumpPlaceholder);
+        node->update_operand0(k32BitJumpPlaceholder);
         break;
     }
   }
diff --git a/src/interpreter/bytecode-decoder.cc b/src/interpreter/bytecode-decoder.cc
index 4975189..f003969 100644
--- a/src/interpreter/bytecode-decoder.cc
+++ b/src/interpreter/bytecode-decoder.cc
@@ -6,7 +6,7 @@
 
 #include <iomanip>
 
-#include "src/utils.h"
+#include "src/interpreter/interpreter-intrinsics.h"
 
 namespace v8 {
 namespace internal {
@@ -67,6 +67,23 @@
   return 0;
 }
 
+namespace {
+const char* NameForRuntimeId(uint32_t idx) {
+  switch (idx) {
+#define CASE(name, nargs, ressize) \
+  case Runtime::k##name:           \
+    return #name;                  \
+  case Runtime::kInline##name:     \
+    return #name;
+    FOR_EACH_INTRINSIC(CASE)
+#undef CASE
+    default:
+      UNREACHABLE();
+      return nullptr;
+  }
+}
+}  // anonymous namespace
+
 // static
 std::ostream& BytecodeDecoder::Decode(std::ostream& os,
                                       const uint8_t* bytecode_start,
@@ -112,12 +129,21 @@
     switch (op_type) {
       case interpreter::OperandType::kIdx:
       case interpreter::OperandType::kUImm:
-      case interpreter::OperandType::kRuntimeId:
-      case interpreter::OperandType::kIntrinsicId:
         os << "["
            << DecodeUnsignedOperand(operand_start, op_type, operand_scale)
            << "]";
         break;
+      case interpreter::OperandType::kIntrinsicId: {
+        auto id = static_cast<IntrinsicsHelper::IntrinsicId>(
+            DecodeUnsignedOperand(operand_start, op_type, operand_scale));
+        os << "[" << NameForRuntimeId(IntrinsicsHelper::ToRuntimeId(id)) << "]";
+        break;
+      }
+      case interpreter::OperandType::kRuntimeId:
+        os << "[" << NameForRuntimeId(DecodeUnsignedOperand(
+                         operand_start, op_type, operand_scale))
+           << "]";
+        break;
       case interpreter::OperandType::kImm:
         os << "[" << DecodeSignedOperand(operand_start, op_type, operand_scale)
            << "]";
diff --git a/src/interpreter/bytecode-flags.cc b/src/interpreter/bytecode-flags.cc
index 158af13..57277c8 100644
--- a/src/interpreter/bytecode-flags.cc
+++ b/src/interpreter/bytecode-flags.cc
@@ -4,7 +4,9 @@
 
 #include "src/interpreter/bytecode-flags.h"
 
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-stubs.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -25,10 +27,11 @@
   uint8_t result = FlagsBits::encode(runtime_flags);
   if (fast_clone_supported) {
     STATIC_ASSERT(
-        FastCloneShallowObjectStub::kMaximumClonedProperties <=
+        ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties <=
         1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
-    DCHECK_LE(properties_count,
-              FastCloneShallowObjectStub::kMaximumClonedProperties);
+    DCHECK_LE(
+        properties_count,
+        ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties);
     result |= CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
         properties_count);
   }
diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc
index 99e7672..0310509 100644
--- a/src/interpreter/bytecode-generator.cc
+++ b/src/interpreter/bytecode-generator.cc
@@ -6,6 +6,7 @@
 
 #include "src/ast/compile-time-value.h"
 #include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-stubs.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
@@ -13,7 +14,7 @@
 #include "src/interpreter/bytecode-label.h"
 #include "src/interpreter/bytecode-register-allocator.h"
 #include "src/interpreter/control-flow-builders.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
 #include "src/parsing/parse-info.h"
 #include "src/parsing/token.h"
 
@@ -34,6 +35,7 @@
         register_(Register::current_context()),
         depth_(0),
         should_pop_context_(should_pop_context) {
+    DCHECK(scope->NeedsContext() || outer_ == nullptr);
     if (outer_) {
       depth_ = outer_->depth_ + 1;
 
@@ -74,7 +76,6 @@
     return previous;
   }
 
-  Scope* scope() const { return scope_; }
   Register reg() const { return register_; }
   bool ShouldPopContext() { return should_pop_context_; }
 
@@ -105,12 +106,19 @@
   void Break(Statement* stmt) { PerformCommand(CMD_BREAK, stmt); }
   void Continue(Statement* stmt) { PerformCommand(CMD_CONTINUE, stmt); }
   void ReturnAccumulator() { PerformCommand(CMD_RETURN, nullptr); }
+  void AsyncReturnAccumulator() { PerformCommand(CMD_ASYNC_RETURN, nullptr); }
   void ReThrowAccumulator() { PerformCommand(CMD_RETHROW, nullptr); }
 
   class DeferredCommands;
 
  protected:
-  enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_RETHROW };
+  enum Command {
+    CMD_BREAK,
+    CMD_CONTINUE,
+    CMD_RETURN,
+    CMD_ASYNC_RETURN,
+    CMD_RETHROW
+  };
   void PerformCommand(Command command, Statement* statement);
   virtual bool Execute(Command command, Statement* statement) = 0;
 
@@ -220,6 +228,9 @@
       case CMD_RETURN:
         generator()->BuildReturn();
         return true;
+      case CMD_ASYNC_RETURN:
+        generator()->BuildAsyncReturn();
+        return true;
       case CMD_RETHROW:
         generator()->BuildReThrow();
         return true;
@@ -248,6 +259,7 @@
         return true;
       case CMD_CONTINUE:
       case CMD_RETURN:
+      case CMD_ASYNC_RETURN:
       case CMD_RETHROW:
         break;
     }
@@ -285,6 +297,7 @@
         loop_builder_->Continue();
         return true;
       case CMD_RETURN:
+      case CMD_ASYNC_RETURN:
       case CMD_RETHROW:
         break;
     }
@@ -310,6 +323,7 @@
       case CMD_BREAK:
       case CMD_CONTINUE:
       case CMD_RETURN:
+      case CMD_ASYNC_RETURN:
         break;
       case CMD_RETHROW:
         generator()->BuildReThrow();
@@ -336,6 +350,7 @@
       case CMD_BREAK:
       case CMD_CONTINUE:
       case CMD_RETURN:
+      case CMD_ASYNC_RETURN:
       case CMD_RETHROW:
         commands_->RecordCommand(command, statement);
         try_finally_builder_->LeaveTry();
@@ -496,24 +511,25 @@
         constant_pool_entry_(0),
         has_constant_pool_entry_(false) {}
 
-  void AddFunctionDeclaration(FeedbackVectorSlot slot, FunctionLiteral* func) {
+  void AddFunctionDeclaration(const AstRawString* name, FeedbackSlot slot,
+                              FeedbackSlot literal_slot,
+                              FunctionLiteral* func) {
     DCHECK(!slot.IsInvalid());
-    declarations_.push_back(std::make_pair(slot, func));
+    declarations_.push_back(Declaration(name, slot, literal_slot, func));
   }
 
-  void AddUndefinedDeclaration(FeedbackVectorSlot slot) {
+  void AddUndefinedDeclaration(const AstRawString* name, FeedbackSlot slot) {
     DCHECK(!slot.IsInvalid());
-    declarations_.push_back(std::make_pair(slot, nullptr));
+    declarations_.push_back(Declaration(name, slot, nullptr));
   }
 
-  Handle<FixedArray> AllocateDeclarationPairs(CompilationInfo* info) {
+  Handle<FixedArray> AllocateDeclarations(CompilationInfo* info) {
     DCHECK(has_constant_pool_entry_);
     int array_index = 0;
-    Handle<FixedArray> pairs = info->isolate()->factory()->NewFixedArray(
-        static_cast<int>(declarations_.size() * 2), TENURED);
-    for (std::pair<FeedbackVectorSlot, FunctionLiteral*> declaration :
-         declarations_) {
-      FunctionLiteral* func = declaration.second;
+    Handle<FixedArray> data = info->isolate()->factory()->NewFixedArray(
+        static_cast<int>(declarations_.size() * 4), TENURED);
+    for (const Declaration& declaration : declarations_) {
+      FunctionLiteral* func = declaration.func;
       Handle<Object> initial_value;
       if (func == nullptr) {
         initial_value = info->isolate()->factory()->undefined_value();
@@ -526,10 +542,19 @@
       // will set stack overflow.
       if (initial_value.is_null()) return Handle<FixedArray>();
 
-      pairs->set(array_index++, Smi::FromInt(declaration.first.ToInt()));
-      pairs->set(array_index++, *initial_value);
+      data->set(array_index++, *declaration.name->string());
+      data->set(array_index++, Smi::FromInt(declaration.slot.ToInt()));
+      Object* undefined_or_literal_slot;
+      if (declaration.literal_slot.IsInvalid()) {
+        undefined_or_literal_slot = info->isolate()->heap()->undefined_value();
+      } else {
+        undefined_or_literal_slot =
+            Smi::FromInt(declaration.literal_slot.ToInt());
+      }
+      data->set(array_index++, undefined_or_literal_slot);
+      data->set(array_index++, *initial_value);
     }
-    return pairs;
+    return data;
   }
 
   size_t constant_pool_entry() {
@@ -547,11 +572,47 @@
   bool empty() { return declarations_.empty(); }
 
  private:
-  ZoneVector<std::pair<FeedbackVectorSlot, FunctionLiteral*>> declarations_;
+  struct Declaration {
+    Declaration() : slot(FeedbackSlot::Invalid()), func(nullptr) {}
+    Declaration(const AstRawString* name, FeedbackSlot slot,
+                FeedbackSlot literal_slot, FunctionLiteral* func)
+        : name(name), slot(slot), literal_slot(literal_slot), func(func) {}
+    Declaration(const AstRawString* name, FeedbackSlot slot,
+                FunctionLiteral* func)
+        : name(name),
+          slot(slot),
+          literal_slot(FeedbackSlot::Invalid()),
+          func(func) {}
+
+    const AstRawString* name;
+    FeedbackSlot slot;
+    FeedbackSlot literal_slot;
+    FunctionLiteral* func;
+  };
+  ZoneVector<Declaration> declarations_;
   size_t constant_pool_entry_;
   bool has_constant_pool_entry_;
 };
 
+class BytecodeGenerator::CurrentScope final {
+ public:
+  CurrentScope(BytecodeGenerator* generator, Scope* scope)
+      : generator_(generator), outer_scope_(generator->current_scope()) {
+    if (scope != nullptr) {
+      generator_->set_current_scope(scope);
+    }
+  }
+  ~CurrentScope() {
+    if (outer_scope_ != generator_->current_scope()) {
+      generator_->set_current_scope(outer_scope_);
+    }
+  }
+
+ private:
+  BytecodeGenerator* generator_;
+  Scope* outer_scope_;
+};
+
 BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
     : zone_(info->zone()),
       builder_(new (zone()) BytecodeArrayBuilder(
@@ -560,39 +621,41 @@
           info->scope()->num_stack_slots(), info->literal(),
           info->SourcePositionRecordingMode())),
       info_(info),
-      scope_(info->scope()),
+      closure_scope_(info->scope()),
+      current_scope_(info->scope()),
       globals_builder_(new (zone()) GlobalDeclarationsBuilder(info->zone())),
       global_declarations_(0, info->zone()),
       function_literals_(0, info->zone()),
       native_function_literals_(0, info->zone()),
+      object_literals_(0, info->zone()),
+      array_literals_(0, info->zone()),
       execution_control_(nullptr),
       execution_context_(nullptr),
       execution_result_(nullptr),
       generator_resume_points_(info->literal()->yield_count(), info->zone()),
       generator_state_(),
       loop_depth_(0),
-      home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
-      empty_fixed_array_(info->isolate()->factory()->empty_fixed_array()) {
-  AstValueFactory* ast_value_factory = info->parse_info()->ast_value_factory();
-  const AstRawString* prototype_string = ast_value_factory->prototype_string();
-  ast_value_factory->Internalize(info->isolate());
-  prototype_string_ = prototype_string->string();
+      prototype_string_(
+          info->isolate()->ast_string_constants()->prototype_string()),
+      undefined_string_(
+          info->isolate()->ast_string_constants()->undefined_string()) {
+  DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
 }
 
 Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
-  AllocateDeferredConstants();
+  AllocateDeferredConstants(isolate);
   if (HasStackOverflow()) return Handle<BytecodeArray>();
   return builder()->ToBytecodeArray(isolate);
 }
 
-void BytecodeGenerator::AllocateDeferredConstants() {
+void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
   // Build global declaration pair arrays.
   for (GlobalDeclarationsBuilder* globals_builder : global_declarations_) {
     Handle<FixedArray> declarations =
-        globals_builder->AllocateDeclarationPairs(info());
+        globals_builder->AllocateDeclarations(info());
     if (declarations.is_null()) return SetStackOverflow();
-    builder()->InsertConstantPoolEntryAt(globals_builder->constant_pool_entry(),
-                                         declarations);
+    builder()->SetDeferredConstantPoolEntry(
+        globals_builder->constant_pool_entry(), declarations);
   }
 
   // Find or build shared function infos.
@@ -601,7 +664,7 @@
     Handle<SharedFunctionInfo> shared_info =
         Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
     if (shared_info.is_null()) return SetStackOverflow();
-    builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
+    builder()->SetDeferredConstantPoolEntry(literal.second, shared_info);
   }
 
   // Find or build shared function infos for the native function templates.
@@ -612,7 +675,29 @@
         Compiler::GetSharedFunctionInfoForNative(expr->extension(),
                                                  expr->name());
     if (shared_info.is_null()) return SetStackOverflow();
-    builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
+    builder()->SetDeferredConstantPoolEntry(literal.second, shared_info);
+  }
+
+  // Build object literal constant properties
+  for (std::pair<ObjectLiteral*, size_t> literal : object_literals_) {
+    ObjectLiteral* object_literal = literal.first;
+    if (object_literal->properties_count() > 0) {
+      // If constant properties is an empty fixed array, we've already added it
+      // to the constant pool when visiting the object literal.
+      Handle<BoilerplateDescription> constant_properties =
+          object_literal->GetOrBuildConstantProperties(isolate);
+
+      builder()->SetDeferredConstantPoolEntry(literal.second,
+                                              constant_properties);
+    }
+  }
+
+  // Build array literal constant elements
+  for (std::pair<ArrayLiteral*, size_t> literal : array_literals_) {
+    ArrayLiteral* array_literal = literal.first;
+    Handle<ConstantElementsPair> constant_elements =
+        array_literal->GetOrBuildConstantElements(isolate);
+    builder()->SetDeferredConstantPoolEntry(literal.second, constant_elements);
   }
 }
 
@@ -624,7 +709,7 @@
   InitializeAstVisitor(stack_limit);
 
   // Initialize the incoming context.
-  ContextScope incoming_context(this, scope(), false);
+  ContextScope incoming_context(this, closure_scope(), false);
 
   // Initialize control scope.
   ControlScopeForTopLevel control(this);
@@ -636,10 +721,10 @@
     VisitGeneratorPrologue();
   }
 
-  if (scope()->NeedsContext()) {
+  if (closure_scope()->NeedsContext()) {
     // Push a new inner context scope for the function.
     BuildNewLocalActivationContext();
-    ContextScope local_function_context(this, scope(), false);
+    ContextScope local_function_context(this, closure_scope(), false);
     BuildLocalActivationContextInitialization();
     GenerateBytecodeBody();
   } else {
@@ -665,23 +750,23 @@
 
 void BytecodeGenerator::GenerateBytecodeBody() {
   // Build the arguments object if it is used.
-  VisitArgumentsObject(scope()->arguments());
+  VisitArgumentsObject(closure_scope()->arguments());
 
   // Build rest arguments array if it is used.
-  Variable* rest_parameter = scope()->rest_parameter();
+  Variable* rest_parameter = closure_scope()->rest_parameter();
   VisitRestArgumentsArray(rest_parameter);
 
   // Build assignment to {.this_function} variable if it is used.
-  VisitThisFunctionVariable(scope()->this_function_var());
+  VisitThisFunctionVariable(closure_scope()->this_function_var());
 
   // Build assignment to {new.target} variable if it is used.
-  VisitNewTargetVariable(scope()->new_target_var());
+  VisitNewTargetVariable(closure_scope()->new_target_var());
 
   // Emit tracing call if requested to do so.
   if (FLAG_trace) builder()->CallRuntime(Runtime::kTraceEnter);
 
   // Visit declarations within the function scope.
-  VisitDeclarations(scope()->declarations());
+  VisitDeclarations(closure_scope()->declarations());
 
   // Emit initializing assignments for module namespace imports (if any).
   VisitModuleNamespaceImports();
@@ -711,22 +796,25 @@
                                              LoopBuilder* loop_builder) {
   // Recall that stmt->yield_count() is always zero inside ordinary
   // (i.e. non-generator) functions.
+  if (stmt->yield_count() == 0) {
+    loop_builder->LoopHeader();
+  } else {
+    // Collect all labels for generator resume points within the loop (if any)
+    // so that they can be bound to the loop header below. Also create fresh
+    // labels for these resume points, to be used inside the loop.
+    ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
+    size_t first_yield = stmt->first_yield_id();
+    DCHECK_LE(first_yield + stmt->yield_count(),
+              generator_resume_points_.size());
+    for (size_t id = first_yield; id < first_yield + stmt->yield_count();
+         id++) {
+      auto& label = generator_resume_points_[id];
+      resume_points_in_loop.push_back(label);
+      generator_resume_points_[id] = BytecodeLabel();
+    }
 
-  // Collect all labels for generator resume points within the loop (if any) so
-  // that they can be bound to the loop header below. Also create fresh labels
-  // for these resume points, to be used inside the loop.
-  ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
-  size_t first_yield = stmt->first_yield_id();
-  DCHECK_LE(first_yield + stmt->yield_count(), generator_resume_points_.size());
-  for (size_t id = first_yield; id < first_yield + stmt->yield_count(); id++) {
-    auto& label = generator_resume_points_[id];
-    resume_points_in_loop.push_back(label);
-    generator_resume_points_[id] = BytecodeLabel();
-  }
+    loop_builder->LoopHeader(&resume_points_in_loop);
 
-  loop_builder->LoopHeader(&resume_points_in_loop);
-
-  if (stmt->yield_count() > 0) {
     // If we are not resuming, fall through to loop body.
     // If we are resuming, perform state dispatch.
     BytecodeLabel not_resuming;
@@ -751,10 +839,13 @@
       ->LoadAccumulatorWithRegister(generator_object)
       .JumpIfUndefined(&regular_call);
 
-  // This is a resume call. Restore registers and perform state dispatch.
-  // (The current context has already been restored by the trampoline.)
+  // This is a resume call. Restore the current context and the registers, then
+  // perform state dispatch.
+  Register dummy = register_allocator()->NewRegister();
   builder()
-      ->ResumeGenerator(generator_object)
+      ->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object)
+      .PushContext(dummy)
+      .ResumeGenerator(generator_object)
       .StoreAccumulatorInRegister(generator_state_);
   BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
                    generator_resume_points_);
@@ -770,6 +861,7 @@
 
 void BytecodeGenerator::VisitBlock(Block* stmt) {
   // Visit declarations and statements.
+  CurrentScope current_scope(this, stmt->scope());
   if (stmt->scope() != nullptr && stmt->scope()->NeedsContext()) {
     BuildNewLocalBlockContext(stmt->scope());
     ContextScope scope(this, stmt->scope());
@@ -794,13 +886,13 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
-      globals_builder()->AddUndefinedDeclaration(slot);
+      FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
+      globals_builder()->AddUndefinedDeclaration(variable->raw_name(), slot);
       break;
     }
     case VariableLocation::LOCAL:
       if (variable->binding_needs_init()) {
-        Register destination(variable->index());
+        Register destination(builder()->Local(variable->index()));
         builder()->LoadTheHole().StoreAccumulatorInRegister(destination);
       }
       break;
@@ -826,7 +918,7 @@
       Register name = register_allocator()->NewRegister();
 
       builder()
-          ->LoadLiteral(variable->name())
+          ->LoadLiteral(variable->raw_name())
           .StoreAccumulatorInRegister(name)
           .CallRuntime(Runtime::kDeclareEvalVar, name);
       break;
@@ -834,8 +926,7 @@
     case VariableLocation::MODULE:
       if (variable->IsExport() && variable->binding_needs_init()) {
         builder()->LoadTheHole();
-        BuildVariableAssignment(variable, Token::INIT,
-                                FeedbackVectorSlot::Invalid(),
+        BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
                                 HoleCheckMode::kElided);
       }
       // Nothing to do for imports.
@@ -848,15 +939,16 @@
   DCHECK(variable->mode() == LET || variable->mode() == VAR);
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
-      FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
-      globals_builder()->AddFunctionDeclaration(slot, decl->fun());
+      FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
+      globals_builder()->AddFunctionDeclaration(
+          variable->raw_name(), slot, decl->fun()->LiteralFeedbackSlot(),
+          decl->fun());
       break;
     }
     case VariableLocation::PARAMETER:
     case VariableLocation::LOCAL: {
       VisitForAccumulatorValue(decl->fun());
-      BuildVariableAssignment(variable, Token::INIT,
-                              FeedbackVectorSlot::Invalid(),
+      BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
                               HoleCheckMode::kElided);
       break;
     }
@@ -870,7 +962,7 @@
     case VariableLocation::LOOKUP: {
       RegisterList args = register_allocator()->NewRegisterList(2);
       builder()
-          ->LoadLiteral(variable->name())
+          ->LoadLiteral(variable->raw_name())
           .StoreAccumulatorInRegister(args[0]);
       VisitForAccumulatorValue(decl->fun());
       builder()->StoreAccumulatorInRegister(args[1]).CallRuntime(
@@ -881,28 +973,27 @@
       DCHECK_EQ(variable->mode(), LET);
       DCHECK(variable->IsExport());
       VisitForAccumulatorValue(decl->fun());
-      BuildVariableAssignment(variable, Token::INIT,
-                              FeedbackVectorSlot::Invalid(),
+      BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
                               HoleCheckMode::kElided);
       break;
   }
 }
 
 void BytecodeGenerator::VisitModuleNamespaceImports() {
-  if (!scope()->is_module_scope()) return;
+  if (!closure_scope()->is_module_scope()) return;
 
   RegisterAllocationScope register_scope(this);
   Register module_request = register_allocator()->NewRegister();
 
-  ModuleDescriptor* descriptor = scope()->AsModuleScope()->module();
+  ModuleDescriptor* descriptor = closure_scope()->AsModuleScope()->module();
   for (auto entry : descriptor->namespace_imports()) {
     builder()
         ->LoadLiteral(Smi::FromInt(entry->module_request))
         .StoreAccumulatorInRegister(module_request)
         .CallRuntime(Runtime::kGetModuleNamespace, module_request);
-    Variable* var = scope()->LookupLocal(entry->local_name);
+    Variable* var = closure_scope()->LookupLocal(entry->local_name);
     DCHECK_NOT_NULL(var);
-    BuildVariableAssignment(var, Token::INIT, FeedbackVectorSlot::Invalid(),
+    BuildVariableAssignment(var, Token::INIT, FeedbackSlot::Invalid(),
                             HoleCheckMode::kElided);
   }
 }
@@ -917,7 +1008,7 @@
   if (globals_builder()->empty()) return;
 
   globals_builder()->set_constant_pool_entry(
-      builder()->AllocateConstantPoolEntry());
+      builder()->AllocateDeferredConstantPoolEntry());
   int encoded_flags = info()->GetDeclareGlobalsFlags();
 
   // Emit code to declare globals.
@@ -1004,7 +1095,12 @@
 void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
   builder()->SetStatementPosition(stmt);
   VisitForAccumulatorValue(stmt->expression());
-  execution_control()->ReturnAccumulator();
+
+  if (stmt->is_async_return()) {
+    execution_control()->AsyncReturnAccumulator();
+  } else {
+    execution_control()->ReturnAccumulator();
+  }
 }
 
 void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
@@ -1146,7 +1242,7 @@
 }
 
 void BytecodeGenerator::VisitForInAssignment(Expression* expr,
-                                             FeedbackVectorSlot slot) {
+                                             FeedbackSlot slot) {
   DCHECK(expr->IsValidReferenceExpression());
 
   // Evaluate assignment starting with the value to be stored in the
@@ -1165,7 +1261,8 @@
       Register value = register_allocator()->NewRegister();
       builder()->StoreAccumulatorInRegister(value);
       Register object = VisitForRegisterValue(property->obj());
-      Handle<String> name = property->key()->AsLiteral()->AsPropertyName();
+      const AstRawString* name =
+          property->key()->AsLiteral()->AsRawPropertyName();
       builder()->LoadAccumulatorWithRegister(value);
       builder()->StoreNamedProperty(object, name, feedback_index(slot),
                                     language_mode());
@@ -1191,7 +1288,7 @@
       VisitForRegisterValue(super_property->this_var(), args[0]);
       VisitForRegisterValue(super_property->home_object(), args[1]);
       builder()
-          ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+          ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
           .StoreAccumulatorInRegister(args[2])
           .CallRuntime(StoreToSuperRuntimeId(), args);
       break;
@@ -1244,7 +1341,7 @@
   builder()->SetExpressionAsStatementPosition(stmt->each());
   builder()->ForInContinue(index, cache_length);
   loop_builder.BreakIfFalse();
-  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  FeedbackSlot slot = stmt->ForInFeedbackSlot();
   builder()->ForInNext(receiver, index, triple.Truncate(2),
                        feedback_index(slot));
   loop_builder.ContinueIfUndefined();
@@ -1300,7 +1397,7 @@
 
   // If requested, clear message object as we enter the catch block.
   if (stmt->clear_pending_message()) {
-    builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage);
+    builder()->LoadTheHole().SetPendingMessage();
   }
 
   // Load the catch context into the accumulator.
@@ -1359,16 +1456,15 @@
   Register message = context;  // Reuse register.
 
   // Clear message object as we enter the finally block.
-  builder()
-      ->CallRuntime(Runtime::kInterpreterClearPendingMessage)
-      .StoreAccumulatorInRegister(message);
+  builder()->LoadTheHole().SetPendingMessage().StoreAccumulatorInRegister(
+      message);
 
   // Evaluate the finally-block.
   Visit(stmt->finally_block());
   try_control_builder.EndFinally();
 
   // Pending message object is restored on exit.
-  builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message);
+  builder()->LoadAccumulatorWithRegister(message).SetPendingMessage();
 
   // Dynamic dispatch after the finally-block.
   commands.ApplyDeferredCommands();
@@ -1380,61 +1476,57 @@
 }
 
 void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
-  uint8_t flags = CreateClosureFlags::Encode(expr->pretenure(),
-                                             scope()->is_function_scope());
-  size_t entry = builder()->AllocateConstantPoolEntry();
-  builder()->CreateClosure(entry, flags);
+  uint8_t flags = CreateClosureFlags::Encode(
+      expr->pretenure(), closure_scope()->is_function_scope());
+  size_t entry = builder()->AllocateDeferredConstantPoolEntry();
+  int slot_index = feedback_index(expr->LiteralFeedbackSlot());
+  builder()->CreateClosure(entry, slot_index, flags);
   function_literals_.push_back(std::make_pair(expr, entry));
 }
 
 void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
-  VisitClassLiteralForRuntimeDefinition(expr);
+  Register constructor = VisitForRegisterValue(expr->constructor());
+  {
+    RegisterAllocationScope register_scope(this);
+    RegisterList args = register_allocator()->NewRegisterList(4);
+    VisitForAccumulatorValueOrTheHole(expr->extends());
+    builder()
+        ->StoreAccumulatorInRegister(args[0])
+        .MoveRegister(constructor, args[1])
+        .LoadLiteral(Smi::FromInt(expr->start_position()))
+        .StoreAccumulatorInRegister(args[2])
+        .LoadLiteral(Smi::FromInt(expr->end_position()))
+        .StoreAccumulatorInRegister(args[3])
+        .CallRuntime(Runtime::kDefineClass, args);
+  }
+  Register prototype = register_allocator()->NewRegister();
+  builder()->StoreAccumulatorInRegister(prototype);
 
-  // Load the "prototype" from the constructor.
-  RegisterList args = register_allocator()->NewRegisterList(2);
-  Register literal = args[0];
-  Register prototype = args[1];
-  FeedbackVectorSlot slot = expr->PrototypeSlot();
-  builder()
-      ->StoreAccumulatorInRegister(literal)
-      .LoadNamedProperty(literal, prototype_string(), feedback_index(slot))
-      .StoreAccumulatorInRegister(prototype);
+  if (FunctionLiteral::NeedsHomeObject(expr->constructor())) {
+    // Prototype is already in the accumulator.
+    builder()->StoreHomeObjectProperty(
+        constructor, feedback_index(expr->HomeObjectSlot()), language_mode());
+  }
 
-  VisitClassLiteralProperties(expr, literal, prototype);
-  builder()->CallRuntime(Runtime::kToFastProperties, literal);
+  VisitClassLiteralProperties(expr, constructor, prototype);
+  BuildClassLiteralNameProperty(expr, constructor);
+  builder()->CallRuntime(Runtime::kToFastProperties, constructor);
   // Assign to class variable.
   if (expr->class_variable_proxy() != nullptr) {
     VariableProxy* proxy = expr->class_variable_proxy();
-    FeedbackVectorSlot slot = expr->NeedsProxySlot()
-                                  ? expr->ProxySlot()
-                                  : FeedbackVectorSlot::Invalid();
+    FeedbackSlot slot =
+        expr->NeedsProxySlot() ? expr->ProxySlot() : FeedbackSlot::Invalid();
     BuildVariableAssignment(proxy->var(), Token::INIT, slot,
                             HoleCheckMode::kElided);
   }
 }
 
-void BytecodeGenerator::VisitClassLiteralForRuntimeDefinition(
-    ClassLiteral* expr) {
-  RegisterAllocationScope register_scope(this);
-  RegisterList args = register_allocator()->NewRegisterList(4);
-  VisitForAccumulatorValueOrTheHole(expr->extends());
-  builder()->StoreAccumulatorInRegister(args[0]);
-  VisitForRegisterValue(expr->constructor(), args[1]);
-  builder()
-      ->LoadLiteral(Smi::FromInt(expr->start_position()))
-      .StoreAccumulatorInRegister(args[2])
-      .LoadLiteral(Smi::FromInt(expr->end_position()))
-      .StoreAccumulatorInRegister(args[3])
-      .CallRuntime(Runtime::kDefineClass, args);
-}
-
 void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
-                                                    Register literal,
+                                                    Register constructor,
                                                     Register prototype) {
   RegisterAllocationScope register_scope(this);
-  RegisterList args = register_allocator()->NewRegisterList(5);
-  Register receiver = args[0], key = args[1], value = args[2], attr = args[3],
-           set_function_name = args[4];
+  RegisterList args = register_allocator()->NewRegisterList(4);
+  Register receiver = args[0], key = args[1], value = args[2], attr = args[3];
 
   bool attr_assigned = false;
   Register old_receiver = Register::invalid_value();
@@ -1444,14 +1536,18 @@
     ClassLiteral::Property* property = expr->properties()->at(i);
 
     // Set-up receiver.
-    Register new_receiver = property->is_static() ? literal : prototype;
+    Register new_receiver = property->is_static() ? constructor : prototype;
     if (new_receiver != old_receiver) {
       builder()->MoveRegister(new_receiver, receiver);
       old_receiver = new_receiver;
     }
 
-    VisitForAccumulatorValue(property->key());
-    builder()->ConvertAccumulatorToName(key);
+    if (property->key()->IsStringLiteral()) {
+      VisitForRegisterValue(property->key(), key);
+    } else {
+      VisitForAccumulatorValue(property->key());
+      builder()->ConvertAccumulatorToName(key);
+    }
 
     if (property->is_static() && property->is_computed_name()) {
       // The static prototype property is read only. We handle the non computed
@@ -1479,20 +1575,26 @@
 
     switch (property->kind()) {
       case ClassLiteral::Property::METHOD: {
+        DataPropertyInLiteralFlags flags = DataPropertyInLiteralFlag::kDontEnum;
+        if (property->NeedsSetFunctionName()) {
+          flags |= DataPropertyInLiteralFlag::kSetFunctionName;
+        }
+
+        FeedbackSlot slot = property->GetStoreDataPropertySlot();
+        DCHECK(!slot.IsInvalid());
+
         builder()
-            ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
-            .StoreAccumulatorInRegister(set_function_name)
-            .CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
+            ->LoadAccumulatorWithRegister(value)
+            .StoreDataPropertyInLiteral(receiver, key, flags,
+                                        feedback_index(slot));
         break;
       }
       case ClassLiteral::Property::GETTER: {
-        builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
-                               args.Truncate(4));
+        builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked, args);
         break;
       }
       case ClassLiteral::Property::SETTER: {
-        builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
-                               args.Truncate(4));
+        builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked, args);
         break;
       }
       case ClassLiteral::Property::FIELD: {
@@ -1503,10 +1605,23 @@
   }
 }
 
+void BytecodeGenerator::BuildClassLiteralNameProperty(ClassLiteral* expr,
+                                                      Register literal) {
+  if (!expr->has_name_static_property() &&
+      !expr->constructor()->raw_name()->IsEmpty()) {
+    Runtime::FunctionId runtime_id =
+        expr->has_static_computed_names()
+            ? Runtime::kInstallClassNameAccessorWithCheck
+            : Runtime::kInstallClassNameAccessor;
+    builder()->CallRuntime(runtime_id, literal);
+  }
+}
+
 void BytecodeGenerator::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {
-  size_t entry = builder()->AllocateConstantPoolEntry();
-  builder()->CreateClosure(entry, NOT_TENURED);
+  size_t entry = builder()->AllocateDeferredConstantPoolEntry();
+  int slot_index = feedback_index(expr->LiteralFeedbackSlot());
+  builder()->CreateClosure(entry, slot_index, NOT_TENURED);
   native_function_literals_.push_back(std::make_pair(expr, entry));
 }
 
@@ -1542,43 +1657,35 @@
 void BytecodeGenerator::VisitLiteral(Literal* expr) {
   if (!execution_result()->IsEffect()) {
     const AstValue* raw_value = expr->raw_value();
-    if (raw_value->IsSmi()) {
-      builder()->LoadLiteral(raw_value->AsSmi());
-    } else if (raw_value->IsUndefined()) {
-      builder()->LoadUndefined();
-    } else if (raw_value->IsTrue()) {
-      builder()->LoadTrue();
-    } else if (raw_value->IsFalse()) {
-      builder()->LoadFalse();
-    } else if (raw_value->IsNull()) {
-      builder()->LoadNull();
-    } else if (raw_value->IsTheHole()) {
-      builder()->LoadTheHole();
-    } else {
-      builder()->LoadLiteral(raw_value->value());
-    }
+    builder()->LoadLiteral(raw_value);
   }
 }
 
 void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
   // Materialize a regular expression literal.
-  builder()->CreateRegExpLiteral(expr->pattern(), expr->literal_index(),
-                                 expr->flags());
+  builder()->CreateRegExpLiteral(
+      expr->raw_pattern(), feedback_index(expr->literal_slot()), expr->flags());
 }
 
 void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
-  // Copy the literal boilerplate.
+  // Deep-copy the literal boilerplate.
   uint8_t flags = CreateObjectLiteralFlags::Encode(
-      FastCloneShallowObjectStub::IsSupported(expr),
-      FastCloneShallowObjectStub::PropertiesCount(expr->properties_count()),
+      expr->IsFastCloningSupported(),
+      ConstructorBuiltinsAssembler::FastCloneShallowObjectPropertiesCount(
+          expr->properties_count()),
       expr->ComputeFlags());
-  // If constant properties is an empty fixed array, use our cached
-  // empty_fixed_array to ensure it's only added to the constant pool once.
-  Handle<FixedArray> constant_properties = expr->properties_count() == 0
-                                               ? empty_fixed_array()
-                                               : expr->constant_properties();
+
   Register literal = register_allocator()->NewRegister();
-  builder()->CreateObjectLiteral(constant_properties, expr->literal_index(),
+  size_t entry;
+  // If constant properties is an empty fixed array, use a cached empty fixed
+  // array to ensure it's only added to the constant pool once.
+  if (expr->properties_count() == 0) {
+    entry = builder()->EmptyFixedArrayConstantPoolEntry();
+  } else {
+    entry = builder()->AllocateDeferredConstantPoolEntry();
+    object_literals_.push_back(std::make_pair(expr, entry));
+  }
+  builder()->CreateObjectLiteral(entry, feedback_index(expr->literal_slot()),
                                  flags, literal);
 
   // Store computed values into the literal.
@@ -1592,6 +1699,7 @@
     RegisterAllocationScope inner_register_scope(this);
     Literal* key = property->key()->AsLiteral();
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1608,14 +1716,14 @@
               RegisterAllocationScope register_scope(this);
               Register value = register_allocator()->NewRegister();
               builder()->StoreAccumulatorInRegister(value);
-              builder()->StoreNamedProperty(
-                  literal, key->AsPropertyName(),
-                  feedback_index(property->GetSlot(0)), language_mode());
+              builder()->StoreNamedOwnProperty(
+                  literal, key->AsRawPropertyName(),
+                  feedback_index(property->GetSlot(0)));
               VisitSetHomeObject(value, literal, property, 1);
             } else {
-              builder()->StoreNamedProperty(
-                  literal, key->AsPropertyName(),
-                  feedback_index(property->GetSlot(0)), language_mode());
+              builder()->StoreNamedOwnProperty(
+                  literal, key->AsRawPropertyName(),
+                  feedback_index(property->GetSlot(0)));
             }
           } else {
             VisitForEffect(property->value());
@@ -1700,18 +1808,26 @@
       case ObjectLiteral::Property::CONSTANT:
       case ObjectLiteral::Property::COMPUTED:
       case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
-        RegisterList args = register_allocator()->NewRegisterList(5);
-        builder()->MoveRegister(literal, args[0]);
+        Register key = register_allocator()->NewRegister();
         VisitForAccumulatorValue(property->key());
-        builder()->ConvertAccumulatorToName(args[1]);
-        VisitForRegisterValue(property->value(), args[2]);
-        VisitSetHomeObject(args[2], literal, property);
+        builder()->ConvertAccumulatorToName(key);
+
+        Register value = VisitForRegisterValue(property->value());
+        VisitSetHomeObject(value, literal, property);
+
+        DataPropertyInLiteralFlags data_property_flags =
+            DataPropertyInLiteralFlag::kNoFlags;
+        if (property->NeedsSetFunctionName()) {
+          data_property_flags |= DataPropertyInLiteralFlag::kSetFunctionName;
+        }
+
+        FeedbackSlot slot = property->GetStoreDataPropertySlot();
+        DCHECK(!slot.IsInvalid());
+
         builder()
-            ->LoadLiteral(Smi::FromInt(NONE))
-            .StoreAccumulatorInRegister(args[3])
-            .LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
-            .StoreAccumulatorInRegister(args[4]);
-        builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
+            ->LoadAccumulatorWithRegister(value)
+            .StoreDataPropertyInLiteral(literal, key, data_property_flags,
+                                        feedback_index(slot));
         break;
       }
       case ObjectLiteral::Property::GETTER:
@@ -1732,6 +1848,13 @@
         builder()->CallRuntime(function_id, args);
         break;
       }
+      case ObjectLiteral::Property::SPREAD: {
+        RegisterList args = register_allocator()->NewRegisterList(2);
+        builder()->MoveRegister(literal, args[0]);
+        VisitForRegisterValue(property->value(), args[1]);
+        builder()->CallRuntime(Runtime::kCopyDataProperties, args);
+        break;
+      }
       case ObjectLiteral::Property::PROTOTYPE:
         UNREACHABLE();  // Handled specially above.
         break;
@@ -1743,14 +1866,14 @@
 
 void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
   // Deep-copy the literal boilerplate.
-  int runtime_flags = expr->ComputeFlags();
-  bool use_fast_shallow_clone =
-      (runtime_flags & ArrayLiteral::kShallowElements) != 0 &&
-      expr->values()->length() <= JSArray::kInitialMaxFastElementArray;
-  uint8_t flags =
-      CreateArrayLiteralFlags::Encode(use_fast_shallow_clone, runtime_flags);
-  builder()->CreateArrayLiteral(expr->constant_elements(),
-                                expr->literal_index(), flags);
+  uint8_t flags = CreateArrayLiteralFlags::Encode(
+      expr->IsFastCloningSupported(), expr->ComputeFlags());
+
+  size_t entry = builder()->AllocateDeferredConstantPoolEntry();
+  builder()->CreateArrayLiteral(entry, feedback_index(expr->literal_slot()),
+                                flags);
+  array_literals_.push_back(std::make_pair(expr, entry));
+
   Register index, literal;
 
   // Evaluate all the non-constant subexpressions and store them into the
@@ -1769,7 +1892,7 @@
       literal_in_accumulator = false;
     }
 
-    FeedbackVectorSlot slot = expr->LiteralFeedbackSlot();
+    FeedbackSlot slot = expr->LiteralFeedbackSlot();
     builder()
         ->LoadLiteral(Smi::FromInt(array_index))
         .StoreAccumulatorInRegister(index);
@@ -1790,19 +1913,18 @@
                     proxy->hole_check_mode());
 }
 
-void BytecodeGenerator::BuildVariableLoad(Variable* variable,
-                                          FeedbackVectorSlot slot,
+void BytecodeGenerator::BuildVariableLoad(Variable* variable, FeedbackSlot slot,
                                           HoleCheckMode hole_check_mode,
                                           TypeofMode typeof_mode) {
   switch (variable->location()) {
     case VariableLocation::LOCAL: {
-      Register source(Register(variable->index()));
+      Register source(builder()->Local(variable->index()));
       // We need to load the variable into the accumulator, even when in a
       // VisitForRegisterScope, in order to avoid register aliasing if
       // subsequent expressions assign to the same variable.
       builder()->LoadAccumulatorWithRegister(source);
       if (hole_check_mode == HoleCheckMode::kRequired) {
-        BuildThrowIfHole(variable->name());
+        BuildThrowIfHole(variable->raw_name());
       }
       break;
     }
@@ -1815,12 +1937,20 @@
       // subsequent expressions assign to the same variable.
       builder()->LoadAccumulatorWithRegister(source);
       if (hole_check_mode == HoleCheckMode::kRequired) {
-        BuildThrowIfHole(variable->name());
+        BuildThrowIfHole(variable->raw_name());
       }
       break;
     }
     case VariableLocation::UNALLOCATED: {
-      builder()->LoadGlobal(feedback_index(slot), typeof_mode);
+      // The global identifier "undefined" is immutable. Everything
+      // else could be reassigned. For performance, we do a pointer comparison
+      // rather than checking if the raw_name is really "undefined".
+      if (variable->raw_name() == undefined_string()) {
+        builder()->LoadUndefined();
+      } else {
+        builder()->LoadGlobal(variable->raw_name(), feedback_index(slot),
+                              typeof_mode);
+      }
       break;
     }
     case VariableLocation::CONTEXT: {
@@ -1834,9 +1964,15 @@
         context_reg = execution_context()->reg();
       }
 
-      builder()->LoadContextSlot(context_reg, variable->index(), depth);
+      BytecodeArrayBuilder::ContextSlotMutability immutable =
+          (variable->maybe_assigned() == kNotAssigned)
+              ? BytecodeArrayBuilder::kImmutableSlot
+              : BytecodeArrayBuilder::kMutableSlot;
+
+      builder()->LoadContextSlot(context_reg, variable->index(), depth,
+                                 immutable);
       if (hole_check_mode == HoleCheckMode::kRequired) {
-        BuildThrowIfHole(variable->name());
+        BuildThrowIfHole(variable->raw_name());
       }
       break;
     }
@@ -1846,21 +1982,22 @@
           Variable* local_variable = variable->local_if_not_shadowed();
           int depth =
               execution_context()->ContextChainDepth(local_variable->scope());
-          builder()->LoadLookupContextSlot(variable->name(), typeof_mode,
+          builder()->LoadLookupContextSlot(variable->raw_name(), typeof_mode,
                                            local_variable->index(), depth);
           if (hole_check_mode == HoleCheckMode::kRequired) {
-            BuildThrowIfHole(variable->name());
+            BuildThrowIfHole(variable->raw_name());
           }
           break;
         }
         case DYNAMIC_GLOBAL: {
-          int depth = scope()->ContextChainLengthUntilOutermostSloppyEval();
-          builder()->LoadLookupGlobalSlot(variable->name(), typeof_mode,
+          int depth =
+              closure_scope()->ContextChainLengthUntilOutermostSloppyEval();
+          builder()->LoadLookupGlobalSlot(variable->raw_name(), typeof_mode,
                                           feedback_index(slot), depth);
           break;
         }
         default:
-          builder()->LoadLookupSlot(variable->name(), typeof_mode);
+          builder()->LoadLookupSlot(variable->raw_name(), typeof_mode);
       }
       break;
     }
@@ -1868,7 +2005,7 @@
       int depth = execution_context()->ContextChainDepth(variable->scope());
       builder()->LoadModuleVariable(variable->index(), depth);
       if (hole_check_mode == HoleCheckMode::kRequired) {
-        BuildThrowIfHole(variable->name());
+        BuildThrowIfHole(variable->raw_name());
       }
       break;
     }
@@ -1876,7 +2013,7 @@
 }
 
 void BytecodeGenerator::BuildVariableLoadForAccumulatorValue(
-    Variable* variable, FeedbackVectorSlot slot, HoleCheckMode hole_check_mode,
+    Variable* variable, FeedbackSlot slot, HoleCheckMode hole_check_mode,
     TypeofMode typeof_mode) {
   ValueResultScope accumulator_result(this);
   BuildVariableLoad(variable, slot, hole_check_mode, typeof_mode);
@@ -1893,6 +2030,28 @@
   builder()->Return();
 }
 
+void BytecodeGenerator::BuildAsyncReturn() {
+  DCHECK(IsAsyncFunction(info()->literal()->kind()));
+  RegisterAllocationScope register_scope(this);
+  RegisterList args = register_allocator()->NewRegisterList(3);
+  Register receiver = args[0];
+  Register promise = args[1];
+  Register return_value = args[2];
+  builder()->StoreAccumulatorInRegister(return_value);
+
+  Variable* var_promise = closure_scope()->promise_var();
+  DCHECK_NOT_NULL(var_promise);
+  BuildVariableLoad(var_promise, FeedbackSlot::Invalid(),
+                    HoleCheckMode::kElided);
+  builder()
+      ->StoreAccumulatorInRegister(promise)
+      .LoadUndefined()
+      .StoreAccumulatorInRegister(receiver)
+      .CallJSRuntime(Context::PROMISE_RESOLVE_INDEX, args)
+      .LoadAccumulatorWithRegister(promise);
+  BuildReturn();
+}
+
 void BytecodeGenerator::BuildReThrow() { builder()->ReThrow(); }
 
 void BytecodeGenerator::BuildAbort(BailoutReason bailout_reason) {
@@ -1904,14 +2063,14 @@
       .CallRuntime(Runtime::kAbort, reason);
 }
 
-void BytecodeGenerator::BuildThrowReferenceError(Handle<String> name) {
+void BytecodeGenerator::BuildThrowReferenceError(const AstRawString* name) {
   RegisterAllocationScope register_scope(this);
   Register name_reg = register_allocator()->NewRegister();
   builder()->LoadLiteral(name).StoreAccumulatorInRegister(name_reg).CallRuntime(
       Runtime::kThrowReferenceError, name_reg);
 }
 
-void BytecodeGenerator::BuildThrowIfHole(Handle<String> name) {
+void BytecodeGenerator::BuildThrowIfHole(const AstRawString* name) {
   // TODO(interpreter): Can the parser reduce the number of checks
   // performed? Or should there be a ThrowIfHole bytecode.
   BytecodeLabel no_reference_error;
@@ -1920,36 +2079,30 @@
   builder()->Bind(&no_reference_error);
 }
 
-void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
-  // TODO(interpreter): Can the parser reduce the number of checks
-  // performed? Or should there be a ThrowIfNotHole bytecode.
-  BytecodeLabel no_reference_error, reference_error;
-  builder()
-      ->JumpIfNotHole(&reference_error)
-      .Jump(&no_reference_error)
-      .Bind(&reference_error);
-  BuildThrowReferenceError(name);
-  builder()->Bind(&no_reference_error);
-}
-
 void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
                                                             Token::Value op) {
   if (variable->is_this() && variable->mode() == CONST && op == Token::INIT) {
     // Perform an initialization check for 'this'. 'this' variable is the
     // only variable able to trigger bind operations outside the TDZ
     // via 'super' calls.
-    BuildThrowIfNotHole(variable->name());
+    BytecodeLabel no_reference_error, reference_error;
+    builder()
+        ->JumpIfNotHole(&reference_error)
+        .Jump(&no_reference_error)
+        .Bind(&reference_error)
+        .CallRuntime(Runtime::kThrowSuperAlreadyCalledError)
+        .Bind(&no_reference_error);
   } else {
     // Perform an initialization check for let/const declared variables.
     // E.g. let x = (x = 20); is not allowed.
     DCHECK(IsLexicalVariableMode(variable->mode()));
-    BuildThrowIfHole(variable->name());
+    BuildThrowIfHole(variable->raw_name());
   }
 }
 
 void BytecodeGenerator::BuildVariableAssignment(Variable* variable,
                                                 Token::Value op,
-                                                FeedbackVectorSlot slot,
+                                                FeedbackSlot slot,
                                                 HoleCheckMode hole_check_mode) {
   VariableMode mode = variable->mode();
   RegisterAllocationScope assignment_register_scope(this);
@@ -1959,9 +2112,9 @@
     case VariableLocation::LOCAL: {
       Register destination;
       if (VariableLocation::PARAMETER == variable->location()) {
-        destination = Register(builder()->Parameter(variable->index() + 1));
+        destination = builder()->Parameter(variable->index() + 1);
       } else {
-        destination = Register(variable->index());
+        destination = builder()->Local(variable->index());
       }
 
       if (hole_check_mode == HoleCheckMode::kRequired) {
@@ -1983,7 +2136,7 @@
       break;
     }
     case VariableLocation::UNALLOCATED: {
-      builder()->StoreGlobal(variable->name(), feedback_index(slot),
+      builder()->StoreGlobal(variable->raw_name(), feedback_index(slot),
                              language_mode());
       break;
     }
@@ -2004,7 +2157,8 @@
         Register value_temp = register_allocator()->NewRegister();
         builder()
             ->StoreAccumulatorInRegister(value_temp)
-            .LoadContextSlot(context_reg, variable->index(), depth);
+            .LoadContextSlot(context_reg, variable->index(), depth,
+                             BytecodeArrayBuilder::kMutableSlot);
 
         BuildHoleCheckForVariableAssignment(variable, op);
         builder()->LoadAccumulatorWithRegister(value_temp);
@@ -2018,7 +2172,7 @@
       break;
     }
     case VariableLocation::LOOKUP: {
-      builder()->StoreLookupSlot(variable->name(), language_mode());
+      builder()->StoreLookupSlot(variable->raw_name(), language_mode());
       break;
     }
     case VariableLocation::MODULE: {
@@ -2053,7 +2207,7 @@
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
   Register object, key;
   RegisterList super_property_args;
-  Handle<String> name;
+  const AstRawString* name;
 
   // Left-hand side can only be a property, a global or a variable slot.
   Property* property = expr->target()->AsProperty();
@@ -2066,7 +2220,7 @@
       break;
     case NAMED_PROPERTY: {
       object = VisitForRegisterValue(property->obj());
-      name = property->key()->AsLiteral()->AsPropertyName();
+      name = property->key()->AsLiteral()->AsRawPropertyName();
       break;
     }
     case KEYED_PROPERTY: {
@@ -2082,7 +2236,7 @@
       VisitForRegisterValue(super_property->home_object(),
                             super_property_args[1]);
       builder()
-          ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+          ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
           .StoreAccumulatorInRegister(super_property_args[2]);
       break;
     }
@@ -2111,7 +2265,7 @@
         break;
       }
       case NAMED_PROPERTY: {
-        FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+        FeedbackSlot slot = property->PropertyFeedbackSlot();
         builder()
             ->LoadNamedProperty(object, name, feedback_index(slot))
             .StoreAccumulatorInRegister(old_value);
@@ -2120,7 +2274,7 @@
       case KEYED_PROPERTY: {
         // Key is already in accumulator at this point due to evaluating the
         // LHS above.
-        FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+        FeedbackSlot slot = property->PropertyFeedbackSlot();
         builder()
             ->LoadKeyedProperty(object, feedback_index(slot))
             .StoreAccumulatorInRegister(old_value);
@@ -2142,8 +2296,7 @@
       }
     }
     VisitForAccumulatorValue(expr->value());
-    FeedbackVectorSlot slot =
-        expr->binary_operation()->BinaryOperationFeedbackSlot();
+    FeedbackSlot slot = expr->binary_operation()->BinaryOperationFeedbackSlot();
     builder()->BinaryOperation(expr->binary_op(), old_value,
                                feedback_index(slot));
   } else {
@@ -2152,7 +2305,7 @@
 
   // Store the value.
   builder()->SetExpressionPosition(expr);
-  FeedbackVectorSlot slot = expr->AssignmentSlot();
+  FeedbackSlot slot = expr->AssignmentSlot();
   switch (assign_type) {
     case VARIABLE: {
       // TODO(oth): The BuildVariableAssignment() call is hard to reason about.
@@ -2268,15 +2421,15 @@
 
 void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
   LhsKind property_kind = Property::GetAssignType(expr);
-  FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
+  FeedbackSlot slot = expr->PropertyFeedbackSlot();
   builder()->SetExpressionPosition(expr);
   switch (property_kind) {
     case VARIABLE:
       UNREACHABLE();
     case NAMED_PROPERTY: {
-      builder()->LoadNamedProperty(obj,
-                                   expr->key()->AsLiteral()->AsPropertyName(),
-                                   feedback_index(slot));
+      builder()->LoadNamedProperty(
+          obj, expr->key()->AsLiteral()->AsRawPropertyName(),
+          feedback_index(slot));
       break;
     }
     case KEYED_PROPERTY: {
@@ -2310,7 +2463,7 @@
   VisitForRegisterValue(super_property->this_var(), args[0]);
   VisitForRegisterValue(super_property->home_object(), args[1]);
   builder()
-      ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+      ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
       .StoreAccumulatorInRegister(args[2])
       .CallRuntime(Runtime::kLoadFromSuper, args);
 
@@ -2362,13 +2515,17 @@
     return VisitCallSuper(expr);
   }
 
-  Register callee = register_allocator()->NewRegister();
   // Grow the args list as we visit receiver / arguments to avoid allocating all
   // the registers up-front. Otherwise these registers are unavailable during
   // receiver / argument visiting and we can end up with memory leaks due to
   // registers keeping objects alive.
+  Register callee = register_allocator()->NewRegister();
   RegisterList args = register_allocator()->NewGrowableRegisterList();
 
+  // TODO(petermarshall): We have a lot of call bytecodes that are very similar,
+  // see if we can reduce the number by adding a separate argument which
+  // specifies the call type (e.g., property, spread, tailcall, etc.).
+
   // Prepare the callee and the receiver to the function call. This depends on
   // the semantics of the underlying call type.
   switch (call_type) {
@@ -2376,7 +2533,7 @@
     case Call::KEYED_PROPERTY_CALL: {
       Property* property = callee_expr->AsProperty();
       VisitAndPushIntoRegisterList(property->obj(), &args);
-      VisitPropertyLoadForRegister(args[0], property, callee);
+      VisitPropertyLoadForRegister(args.last_register(), property, callee);
       break;
     }
     case Call::GLOBAL_CALL: {
@@ -2403,7 +2560,7 @@
         USE(receiver);
         Variable* variable = callee_expr->AsVariableProxy()->var();
         builder()
-            ->LoadLiteral(variable->name())
+            ->LoadLiteral(variable->raw_name())
             .StoreAccumulatorInRegister(name)
             .CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, name,
                                 result_pair);
@@ -2453,8 +2610,7 @@
         .MoveRegister(Register::function_closure(), runtime_call_args[2])
         .LoadLiteral(Smi::FromInt(language_mode()))
         .StoreAccumulatorInRegister(runtime_call_args[3])
-        .LoadLiteral(
-            Smi::FromInt(execution_context()->scope()->start_position()))
+        .LoadLiteral(Smi::FromInt(current_scope()->start_position()))
         .StoreAccumulatorInRegister(runtime_call_args[4])
         .LoadLiteral(Smi::FromInt(expr->position()))
         .StoreAccumulatorInRegister(runtime_call_args[5]);
@@ -2467,9 +2623,16 @@
 
   builder()->SetExpressionPosition(expr);
 
-  int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
-  builder()->Call(callee, args, feedback_slot_index, call_type,
-                  expr->tail_call_mode());
+  // When a call contains a spread, a Call AST node is only created if there is
+  // exactly one spread, and it is the last argument.
+  if (expr->only_last_arg_is_spread()) {
+    DCHECK_EQ(TailCallMode::kDisallow, expr->tail_call_mode());
+    builder()->CallWithSpread(callee, args);
+  } else {
+    int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+    builder()->Call(callee, args, feedback_slot_index, call_type,
+                    expr->tail_call_mode());
+  }
 }
 
 void BytecodeGenerator::VisitCallSuper(Call* expr) {
@@ -2477,29 +2640,34 @@
   SuperCallReference* super = expr->expression()->AsSuperCallReference();
 
   // Prepare the constructor to the super call.
-  Register this_function = VisitForRegisterValue(super->this_function_var());
-  builder()->CallRuntime(Runtime::kInlineGetSuperConstructor, this_function);
+  VisitForAccumulatorValue(super->this_function_var());
+  Register constructor = register_allocator()->NewRegister();
+  builder()->GetSuperConstructor(constructor);
 
-  Register constructor = this_function;  // Re-use dead this_function register.
-  builder()->StoreAccumulatorInRegister(constructor);
-
-  RegisterList args = register_allocator()->NewGrowableRegisterList();
-  VisitArguments(expr->arguments(), &args);
-
+  ZoneList<Expression*>* args = expr->arguments();
+  RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
+  VisitArguments(args, &args_regs);
   // The new target is loaded into the accumulator from the
   // {new.target} variable.
   VisitForAccumulatorValue(super->new_target_var());
-
-  // Call construct.
   builder()->SetExpressionPosition(expr);
-  // TODO(turbofan): For now we do gather feedback on super constructor
-  // calls, utilizing the existing machinery to inline the actual call
-  // target and the JSCreate for the implicit receiver allocation. This
-  // is not an ideal solution for super constructor calls, but it gets
-  // the job done for now. In the long run we might want to revisit this
-  // and come up with a better way.
-  int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
-  builder()->New(constructor, args, feedback_slot_index);
+
+  // When a super call contains a spread, a CallSuper AST node is only created
+  // if there is exactly one spread, and it is the last argument.
+  if (expr->only_last_arg_is_spread()) {
+    // TODO(petermarshall): Collect type on the feedback slot.
+    builder()->ConstructWithSpread(constructor, args_regs);
+  } else {
+    // Call construct.
+    // TODO(turbofan): For now we do gather feedback on super constructor
+    // calls, utilizing the existing machinery to inline the actual call
+    // target and the JSCreate for the implicit receiver allocation. This
+    // is not an ideal solution for super constructor calls, but it gets
+    // the job done for now. In the long run we might want to revisit this
+    // and come up with a better way.
+    int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+    builder()->Construct(constructor, args_regs, feedback_slot_index);
+  }
 }
 
 void BytecodeGenerator::VisitCallNew(CallNew* expr) {
@@ -2507,12 +2675,18 @@
   RegisterList args = register_allocator()->NewGrowableRegisterList();
   VisitArguments(expr->arguments(), &args);
 
-  builder()->SetExpressionPosition(expr);
   // The accumulator holds new target which is the same as the
   // constructor for CallNew.
-  builder()
-      ->LoadAccumulatorWithRegister(constructor)
-      .New(constructor, args, feedback_index(expr->CallNewFeedbackSlot()));
+  builder()->SetExpressionPosition(expr);
+  builder()->LoadAccumulatorWithRegister(constructor);
+
+  if (expr->only_last_arg_is_spread()) {
+    // TODO(petermarshall): Collect type on the feedback slot.
+    builder()->ConstructWithSpread(constructor, args);
+  } else {
+    builder()->Construct(constructor, args,
+                         feedback_index(expr->CallNewFeedbackSlot()));
+  }
 }
 
 void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) {
@@ -2613,11 +2787,13 @@
         Register global_object = register_allocator()->NewRegister();
         builder()
             ->LoadContextSlot(execution_context()->reg(),
-                              Context::NATIVE_CONTEXT_INDEX, 0)
+                              Context::NATIVE_CONTEXT_INDEX, 0,
+                              BytecodeArrayBuilder::kMutableSlot)
             .StoreAccumulatorInRegister(native_context)
-            .LoadContextSlot(native_context, Context::EXTENSION_INDEX, 0)
+            .LoadContextSlot(native_context, Context::EXTENSION_INDEX, 0,
+                             BytecodeArrayBuilder::kMutableSlot)
             .StoreAccumulatorInRegister(global_object)
-            .LoadLiteral(variable->name())
+            .LoadLiteral(variable->raw_name())
             .Delete(global_object, language_mode());
         break;
       }
@@ -2636,7 +2812,7 @@
       case VariableLocation::LOOKUP: {
         Register name_reg = register_allocator()->NewRegister();
         builder()
-            ->LoadLiteral(variable->name())
+            ->LoadLiteral(variable->raw_name())
             .StoreAccumulatorInRegister(name_reg)
             .CallRuntime(Runtime::kDeleteLookupSlot, name_reg);
         break;
@@ -2663,7 +2839,7 @@
   // Evaluate LHS expression and get old value.
   Register object, key, old_value;
   RegisterList super_property_args;
-  Handle<String> name;
+  const AstRawString* name;
   switch (assign_type) {
     case VARIABLE: {
       VariableProxy* proxy = expr->expression()->AsVariableProxy();
@@ -2673,14 +2849,14 @@
       break;
     }
     case NAMED_PROPERTY: {
-      FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+      FeedbackSlot slot = property->PropertyFeedbackSlot();
       object = VisitForRegisterValue(property->obj());
-      name = property->key()->AsLiteral()->AsPropertyName();
+      name = property->key()->AsLiteral()->AsRawPropertyName();
       builder()->LoadNamedProperty(object, name, feedback_index(slot));
       break;
     }
     case KEYED_PROPERTY: {
-      FeedbackVectorSlot slot = property->PropertyFeedbackSlot();
+      FeedbackSlot slot = property->PropertyFeedbackSlot();
       object = VisitForRegisterValue(property->obj());
       // Use visit for accumulator here since we need the key in the accumulator
       // for the LoadKeyedProperty.
@@ -2698,7 +2874,7 @@
       VisitForRegisterValue(super_property->this_var(), load_super_args[0]);
       VisitForRegisterValue(super_property->home_object(), load_super_args[1]);
       builder()
-          ->LoadLiteral(property->key()->AsLiteral()->AsPropertyName())
+          ->LoadLiteral(property->key()->AsLiteral()->AsRawPropertyName())
           .StoreAccumulatorInRegister(load_super_args[2])
           .CallRuntime(Runtime::kLoadFromSuper, load_super_args);
       break;
@@ -2726,12 +2902,12 @@
   }
 
   // Perform +1/-1 operation.
-  FeedbackVectorSlot slot = expr->CountBinaryOpFeedbackSlot();
+  FeedbackSlot slot = expr->CountBinaryOpFeedbackSlot();
   builder()->CountOperation(expr->binary_op(), feedback_index(slot));
 
   // Store the value.
   builder()->SetExpressionPosition(expr);
-  FeedbackVectorSlot feedback_slot = expr->CountSlot();
+  FeedbackSlot feedback_slot = expr->CountSlot();
   switch (assign_type) {
     case VARIABLE: {
       VariableProxy* proxy = expr->expression()->AsVariableProxy();
@@ -2790,7 +2966,7 @@
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
   builder()->SetExpressionPosition(expr);
-  FeedbackVectorSlot slot = expr->CompareOperationFeedbackSlot();
+  FeedbackSlot slot = expr->CompareOperationFeedbackSlot();
   builder()->CompareOperation(expr->op(), lhs, feedback_index(slot));
 }
 
@@ -2799,16 +2975,87 @@
   // +x and -x by the parser.
   Register lhs = VisitForRegisterValue(expr->left());
   VisitForAccumulatorValue(expr->right());
-  FeedbackVectorSlot slot = expr->BinaryOperationFeedbackSlot();
+  FeedbackSlot slot = expr->BinaryOperationFeedbackSlot();
+  builder()->SetExpressionPosition(expr);
   builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
 }
 
-void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
+void BytecodeGenerator::VisitSpread(Spread* expr) { Visit(expr->expression()); }
 
 void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
   UNREACHABLE();
 }
 
+void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
+  FeedbackSlot load_slot = expr->IteratorPropertyFeedbackSlot();
+  FeedbackSlot call_slot = expr->IteratorCallFeedbackSlot();
+
+  RegisterList args = register_allocator()->NewRegisterList(1);
+  Register method = register_allocator()->NewRegister();
+  Register obj = args[0];
+
+  VisitForAccumulatorValue(expr->iterable());
+
+  if (expr->hint() == IteratorType::kAsync) {
+    FeedbackSlot async_load_slot = expr->AsyncIteratorPropertyFeedbackSlot();
+    FeedbackSlot async_call_slot = expr->AsyncIteratorCallFeedbackSlot();
+
+    // Set method to GetMethod(obj, @@asyncIterator)
+    builder()->StoreAccumulatorInRegister(obj).LoadAsyncIteratorProperty(
+        obj, feedback_index(async_load_slot));
+
+    BytecodeLabel async_iterator_undefined, async_iterator_null, done;
+    // TODO(ignition): Add a single opcode for JumpIfNullOrUndefined
+    builder()->JumpIfUndefined(&async_iterator_undefined);
+    builder()->JumpIfNull(&async_iterator_null);
+
+    // Let iterator be Call(method, obj)
+    builder()->StoreAccumulatorInRegister(method).Call(
+        method, args, feedback_index(async_call_slot),
+        Call::NAMED_PROPERTY_CALL);
+
+    // If Type(iterator) is not Object, throw a TypeError exception.
+    builder()->JumpIfJSReceiver(&done);
+    builder()->CallRuntime(Runtime::kThrowSymbolAsyncIteratorInvalid);
+
+    builder()->Bind(&async_iterator_undefined);
+    builder()->Bind(&async_iterator_null);
+    // If method is undefined,
+    //     Let syncMethod be GetMethod(obj, @@iterator)
+    builder()
+        ->LoadIteratorProperty(obj, feedback_index(load_slot))
+        .StoreAccumulatorInRegister(method);
+
+    //     Let syncIterator be Call(syncMethod, obj)
+    builder()->Call(method, args, feedback_index(call_slot),
+                    Call::NAMED_PROPERTY_CALL);
+
+    // Return CreateAsyncFromSyncIterator(syncIterator)
+    // alias `method` register as it's no longer used
+    Register sync_iter = method;
+    builder()->StoreAccumulatorInRegister(sync_iter).CallRuntime(
+        Runtime::kInlineCreateAsyncFromSyncIterator, sync_iter);
+
+    builder()->Bind(&done);
+  } else {
+    // Let method be GetMethod(obj, @@iterator).
+    builder()
+        ->StoreAccumulatorInRegister(obj)
+        .LoadIteratorProperty(obj, feedback_index(load_slot))
+        .StoreAccumulatorInRegister(method);
+
+    // Let iterator be Call(method, obj).
+    builder()->Call(method, args, feedback_index(call_slot),
+                    Call::NAMED_PROPERTY_CALL);
+
+    // If Type(iterator) is not Object, throw a TypeError exception.
+    BytecodeLabel no_type_error;
+    builder()->JumpIfJSReceiver(&no_type_error);
+    builder()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid);
+    builder()->Bind(&no_type_error);
+  }
+}
+
 void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
   builder()->LoadAccumulatorWithRegister(Register::function_closure());
 }
@@ -2904,7 +3151,7 @@
 
 void BytecodeGenerator::BuildNewLocalActivationContext() {
   ValueResultScope value_execution_result(this);
-  Scope* scope = this->scope();
+  Scope* scope = closure_scope();
 
   // Create the appropriate context.
   if (scope->is_script_scope()) {
@@ -2912,7 +3159,7 @@
     builder()
         ->LoadAccumulatorWithRegister(Register::function_closure())
         .StoreAccumulatorInRegister(args[0])
-        .LoadLiteral(scope->scope_info())
+        .LoadLiteral(scope)
         .StoreAccumulatorInRegister(args[1])
         .CallRuntime(Runtime::kNewScriptContext, args);
   } else if (scope->is_module_scope()) {
@@ -2926,22 +3173,37 @@
         ->MoveRegister(builder()->Parameter(1), args[0])
         .LoadAccumulatorWithRegister(Register::function_closure())
         .StoreAccumulatorInRegister(args[1])
-        .LoadLiteral(scope->scope_info())
+        .LoadLiteral(scope)
         .StoreAccumulatorInRegister(args[2])
         .CallRuntime(Runtime::kPushModuleContext, args);
   } else {
+    DCHECK(scope->is_function_scope() || scope->is_eval_scope());
     int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-    if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
-      builder()->CreateFunctionContext(slot_count);
+    if (slot_count <=
+        ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+      switch (scope->scope_type()) {
+        case EVAL_SCOPE:
+          builder()->CreateEvalContext(slot_count);
+          break;
+        case FUNCTION_SCOPE:
+          builder()->CreateFunctionContext(slot_count);
+          break;
+        default:
+          UNREACHABLE();
+      }
     } else {
-      builder()->CallRuntime(Runtime::kNewFunctionContext,
-                             Register::function_closure());
+      RegisterList args = register_allocator()->NewRegisterList(2);
+      builder()
+          ->MoveRegister(Register::function_closure(), args[0])
+          .LoadLiteral(Smi::FromInt(scope->scope_type()))
+          .StoreAccumulatorInRegister(args[1])
+          .CallRuntime(Runtime::kNewFunctionContext, args);
     }
   }
 }
 
 void BytecodeGenerator::BuildLocalActivationContextInitialization() {
-  DeclarationScope* scope = this->scope();
+  DeclarationScope* scope = closure_scope();
 
   if (scope->has_this_declaration() && scope->receiver()->IsContextSlot()) {
     Variable* variable = scope->receiver();
@@ -2973,7 +3235,7 @@
   DCHECK(scope->is_block_scope());
 
   VisitFunctionClosureForContext();
-  builder()->CreateBlockContext(scope->scope_info());
+  builder()->CreateBlockContext(scope);
 }
 
 void BytecodeGenerator::BuildNewLocalWithContext(Scope* scope) {
@@ -2983,7 +3245,7 @@
 
   builder()->ConvertAccumulatorToObject(extension_object);
   VisitFunctionClosureForContext();
-  builder()->CreateWithContext(extension_object, scope->scope_info());
+  builder()->CreateWithContext(extension_object, scope);
 }
 
 void BytecodeGenerator::BuildNewLocalCatchContext(Variable* variable,
@@ -2994,8 +3256,7 @@
   Register exception = register_allocator()->NewRegister();
   builder()->StoreAccumulatorInRegister(exception);
   VisitFunctionClosureForContext();
-  builder()->CreateCatchContext(exception, variable->name(),
-                                scope->scope_info());
+  builder()->CreateCatchContext(exception, variable->raw_name(), scope);
 }
 
 void BytecodeGenerator::VisitObjectLiteralAccessor(
@@ -3013,11 +3274,10 @@
                                            int slot_number) {
   Expression* expr = property->value();
   if (FunctionLiteral::NeedsHomeObject(expr)) {
-    FeedbackVectorSlot slot = property->GetSlot(slot_number);
+    FeedbackSlot slot = property->GetSlot(slot_number);
     builder()
         ->LoadAccumulatorWithRegister(home_object)
-        .StoreNamedProperty(value, home_object_symbol(), feedback_index(slot),
-                            language_mode());
+        .StoreHomeObjectProperty(value, feedback_index(slot), language_mode());
   }
 }
 
@@ -3033,8 +3293,7 @@
           ? CreateArgumentsType::kUnmappedArguments
           : CreateArgumentsType::kMappedArguments;
   builder()->CreateArguments(type);
-  BuildVariableAssignment(variable, Token::ASSIGN,
-                          FeedbackVectorSlot::Invalid(),
+  BuildVariableAssignment(variable, Token::ASSIGN, FeedbackSlot::Invalid(),
                           HoleCheckMode::kElided);
 }
 
@@ -3045,7 +3304,7 @@
   // variable.
   builder()->CreateArguments(CreateArgumentsType::kRestParameter);
   DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
-  BuildVariableAssignment(rest, Token::ASSIGN, FeedbackVectorSlot::Invalid(),
+  BuildVariableAssignment(rest, Token::ASSIGN, FeedbackSlot::Invalid(),
                           HoleCheckMode::kElided);
 }
 
@@ -3054,7 +3313,7 @@
 
   // Store the closure we were called with in the given variable.
   builder()->LoadAccumulatorWithRegister(Register::function_closure());
-  BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+  BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
                           HoleCheckMode::kElided);
 }
 
@@ -3063,7 +3322,7 @@
 
   // Store the new target we were called with in the given variable.
   builder()->LoadAccumulatorWithRegister(Register::new_target());
-  BuildVariableAssignment(variable, Token::INIT, FeedbackVectorSlot::Invalid(),
+  BuildVariableAssignment(variable, Token::INIT, FeedbackSlot::Invalid(),
                           HoleCheckMode::kElided);
 
   // TODO(mstarzinger): The <new.target> register is not set by the deoptimizer
@@ -3076,26 +3335,27 @@
 
 void BytecodeGenerator::VisitFunctionClosureForContext() {
   ValueResultScope value_execution_result(this);
-  DeclarationScope* closure_scope =
-      execution_context()->scope()->GetClosureScope();
-  if (closure_scope->is_script_scope()) {
+  if (closure_scope()->is_script_scope()) {
     // Contexts nested in the native context have a canonical empty function as
     // their closure, not the anonymous closure containing the global code.
     Register native_context = register_allocator()->NewRegister();
     builder()
         ->LoadContextSlot(execution_context()->reg(),
-                          Context::NATIVE_CONTEXT_INDEX, 0)
+                          Context::NATIVE_CONTEXT_INDEX, 0,
+                          BytecodeArrayBuilder::kMutableSlot)
         .StoreAccumulatorInRegister(native_context)
-        .LoadContextSlot(native_context, Context::CLOSURE_INDEX, 0);
-  } else if (closure_scope->is_eval_scope()) {
+        .LoadContextSlot(native_context, Context::CLOSURE_INDEX, 0,
+                         BytecodeArrayBuilder::kMutableSlot);
+  } else if (closure_scope()->is_eval_scope()) {
     // Contexts created by a call to eval have the same closure as the
     // context calling eval, not the anonymous closure containing the eval
     // code. Fetch it from the context.
     builder()->LoadContextSlot(execution_context()->reg(),
-                               Context::CLOSURE_INDEX, 0);
+                               Context::CLOSURE_INDEX, 0,
+                               BytecodeArrayBuilder::kMutableSlot);
   } else {
-    DCHECK(closure_scope->is_function_scope() ||
-           closure_scope->is_module_scope());
+    DCHECK(closure_scope()->is_function_scope() ||
+           closure_scope()->is_module_scope());
     builder()->LoadAccumulatorWithRegister(Register::function_closure());
   }
 }
@@ -3191,17 +3451,18 @@
 }
 
 void BytecodeGenerator::VisitInScope(Statement* stmt, Scope* scope) {
-  ContextScope context_scope(this, scope);
   DCHECK(scope->declarations()->is_empty());
+  CurrentScope current_scope(this, scope);
+  ContextScope context_scope(this, scope);
   Visit(stmt);
 }
 
 LanguageMode BytecodeGenerator::language_mode() const {
-  return execution_context()->scope()->language_mode();
+  return current_scope()->language_mode();
 }
 
-int BytecodeGenerator::feedback_index(FeedbackVectorSlot slot) const {
-  return TypeFeedbackVector::GetIndex(slot);
+int BytecodeGenerator::feedback_index(FeedbackSlot slot) const {
+  return FeedbackVector::GetIndex(slot);
 }
 
 Runtime::FunctionId BytecodeGenerator::StoreToSuperRuntimeId() {
diff --git a/src/interpreter/bytecode-generator.h b/src/interpreter/bytecode-generator.h
index bcab997..755648e 100644
--- a/src/interpreter/bytecode-generator.h
+++ b/src/interpreter/bytecode-generator.h
@@ -18,6 +18,7 @@
 
 namespace interpreter {
 
+class GlobalDeclarationsBuilder;
 class LoopBuilder;
 
 class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
@@ -43,6 +44,7 @@
   class ControlScopeForTopLevel;
   class ControlScopeForTryCatch;
   class ControlScopeForTryFinally;
+  class CurrentScope;
   class ExpressionResultScope;
   class EffectResultScope;
   class GlobalDeclarationsBuilder;
@@ -53,7 +55,7 @@
   enum class TestFallthrough { kThen, kElse, kNone };
 
   void GenerateBytecodeBody();
-  void AllocateDeferredConstants();
+  void AllocateDeferredConstants(Isolate* isolate);
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 
@@ -94,23 +96,22 @@
   void VisitPropertyLoadForRegister(Register obj, Property* expr,
                                     Register destination);
 
-  void BuildVariableLoad(Variable* variable, FeedbackVectorSlot slot,
+  void BuildVariableLoad(Variable* variable, FeedbackSlot slot,
                          HoleCheckMode hole_check_mode,
                          TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
   void BuildVariableLoadForAccumulatorValue(
-      Variable* variable, FeedbackVectorSlot slot,
-      HoleCheckMode hole_check_mode,
+      Variable* variable, FeedbackSlot slot, HoleCheckMode hole_check_mode,
       TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
   void BuildVariableAssignment(Variable* variable, Token::Value op,
-                               FeedbackVectorSlot slot,
+                               FeedbackSlot slot,
                                HoleCheckMode hole_check_mode);
 
   void BuildReturn();
+  void BuildAsyncReturn();
   void BuildReThrow();
   void BuildAbort(BailoutReason bailout_reason);
-  void BuildThrowIfHole(Handle<String> name);
-  void BuildThrowIfNotHole(Handle<String> name);
-  void BuildThrowReferenceError(Handle<String> name);
+  void BuildThrowIfHole(const AstRawString* name);
+  void BuildThrowReferenceError(const AstRawString* name);
   void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
 
   // Build jump to targets[value], where
@@ -129,9 +130,9 @@
   void VisitArgumentsObject(Variable* variable);
   void VisitRestArgumentsArray(Variable* rest);
   void VisitCallSuper(Call* call);
-  void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
-  void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
+  void VisitClassLiteralProperties(ClassLiteral* expr, Register constructor,
                                    Register prototype);
+  void BuildClassLiteralNameProperty(ClassLiteral* expr, Register constructor);
   void VisitThisFunctionVariable(Variable* variable);
   void VisitNewTargetVariable(Variable* variable);
   void VisitBlockDeclarationsAndStatements(Block* stmt);
@@ -141,7 +142,7 @@
   void VisitObjectLiteralAccessor(Register home_object,
                                   ObjectLiteralProperty* property,
                                   Register value_out);
-  void VisitForInAssignment(Expression* expr, FeedbackVectorSlot slot);
+  void VisitForInAssignment(Expression* expr, FeedbackSlot slot);
   void VisitModuleNamespaceImports();
 
   // Visit the header/body of a loop iteration.
@@ -172,9 +173,12 @@
 
   inline BytecodeArrayBuilder* builder() const { return builder_; }
   inline Zone* zone() const { return zone_; }
-  inline DeclarationScope* scope() const { return scope_; }
+  inline DeclarationScope* closure_scope() const { return closure_scope_; }
   inline CompilationInfo* info() const { return info_; }
 
+  inline Scope* current_scope() const { return current_scope_; }
+  inline void set_current_scope(Scope* scope) { current_scope_ = scope; }
+
   inline ControlScope* execution_control() const { return execution_control_; }
   inline void set_execution_control(ControlScope* scope) {
     execution_control_ = scope;
@@ -191,24 +195,29 @@
     return builder()->register_allocator();
   }
 
-  GlobalDeclarationsBuilder* globals_builder() { return globals_builder_; }
+  GlobalDeclarationsBuilder* globals_builder() {
+    DCHECK_NOT_NULL(globals_builder_);
+    return globals_builder_;
+  }
   inline LanguageMode language_mode() const;
-  int feedback_index(FeedbackVectorSlot slot) const;
+  int feedback_index(FeedbackSlot slot) const;
 
-  Handle<Name> home_object_symbol() const { return home_object_symbol_; }
-  Handle<Name> prototype_string() const { return prototype_string_; }
-  Handle<FixedArray> empty_fixed_array() const { return empty_fixed_array_; }
+  const AstRawString* prototype_string() const { return prototype_string_; }
+  const AstRawString* undefined_string() const { return undefined_string_; }
 
   Zone* zone_;
   BytecodeArrayBuilder* builder_;
   CompilationInfo* info_;
-  DeclarationScope* scope_;
+  DeclarationScope* closure_scope_;
+  Scope* current_scope_;
 
   GlobalDeclarationsBuilder* globals_builder_;
   ZoneVector<GlobalDeclarationsBuilder*> global_declarations_;
   ZoneVector<std::pair<FunctionLiteral*, size_t>> function_literals_;
   ZoneVector<std::pair<NativeFunctionLiteral*, size_t>>
       native_function_literals_;
+  ZoneVector<std::pair<ObjectLiteral*, size_t>> object_literals_;
+  ZoneVector<std::pair<ArrayLiteral*, size_t>> array_literals_;
 
   ControlScope* execution_control_;
   ContextScope* execution_context_;
@@ -218,9 +227,8 @@
   Register generator_state_;
   int loop_depth_;
 
-  Handle<Name> home_object_symbol_;
-  Handle<Name> prototype_string_;
-  Handle<FixedArray> empty_fixed_array_;
+  const AstRawString* prototype_string_;
+  const AstRawString* undefined_string_;
 };
 
 }  // namespace interpreter
diff --git a/src/interpreter/bytecode-label.cc b/src/interpreter/bytecode-label.cc
index a12e8ab..ef32bdd 100644
--- a/src/interpreter/bytecode-label.cc
+++ b/src/interpreter/bytecode-label.cc
@@ -5,6 +5,7 @@
 #include "src/interpreter/bytecode-label.h"
 
 #include "src/interpreter/bytecode-array-builder.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/interpreter/bytecode-label.h b/src/interpreter/bytecode-label.h
index b5f602d..4ef6265 100644
--- a/src/interpreter/bytecode-label.h
+++ b/src/interpreter/bytecode-label.h
@@ -17,7 +17,7 @@
 // label is bound, it represents a known position in the bytecode
 // array. For labels that are forward references there can be at most
 // one reference whilst it is unbound.
-class BytecodeLabel final {
+class V8_EXPORT_PRIVATE BytecodeLabel final {
  public:
   BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
 
@@ -54,7 +54,7 @@
 };
 
 // Class representing a branch target of multiple jumps.
-class BytecodeLabels {
+class V8_EXPORT_PRIVATE BytecodeLabels {
  public:
   explicit BytecodeLabels(Zone* zone) : labels_(zone) {}
 
diff --git a/src/interpreter/bytecode-operands.h b/src/interpreter/bytecode-operands.h
index 5548502..f649d93 100644
--- a/src/interpreter/bytecode-operands.h
+++ b/src/interpreter/bytecode-operands.h
@@ -23,27 +23,33 @@
   V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
   V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
 
-#define UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V)          \
-  V(Flag8, OperandTypeInfo::kFixedUnsignedByte)       \
-  V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
-  V(Idx, OperandTypeInfo::kScalableUnsignedByte)      \
-  V(UImm, OperandTypeInfo::kScalableUnsignedByte)     \
-  V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
-  V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
-
-#define SIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
+#define SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
   V(Imm, OperandTypeInfo::kScalableSignedByte)
 
+#define UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
+  V(Idx, OperandTypeInfo::kScalableUnsignedByte)      \
+  V(UImm, OperandTypeInfo::kScalableUnsignedByte)     \
+  V(RegCount, OperandTypeInfo::kScalableUnsignedByte)
+
+#define UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V)    \
+  V(Flag8, OperandTypeInfo::kFixedUnsignedByte)       \
+  V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
+  V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
+
+// Carefully ordered for operand type range checks below.
+#define NON_REGISTER_OPERAND_TYPE_LIST(V)       \
+  INVALID_OPERAND_TYPE_LIST(V)                  \
+  UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V)    \
+  UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
+  SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V)
+
+// Carefully ordered for operand type range checks below.
 #define REGISTER_OPERAND_TYPE_LIST(V) \
   REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
   REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
 
-#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
-  INVALID_OPERAND_TYPE_LIST(V)            \
-  UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V)    \
-  SIGNED_SCALAR_OPERAND_TYPE_LIST(V)
-
 // The list of operand types used by bytecodes.
+// Carefully ordered for operand type range checks below.
 #define OPERAND_TYPE_LIST(V)        \
   NON_REGISTER_OPERAND_TYPE_LIST(V) \
   REGISTER_OPERAND_TYPE_LIST(V)
@@ -125,6 +131,33 @@
                                            const OperandSize& operand_size);
 std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
 
+class BytecodeOperands {
+ public:
+  // Returns true if |accumulator_use| reads the accumulator.
+  static constexpr bool ReadsAccumulator(AccumulatorUse accumulator_use) {
+    return accumulator_use == AccumulatorUse::kRead ||
+           accumulator_use == AccumulatorUse::kReadWrite;
+  }
+
+  // Returns true if |accumulator_use| writes the accumulator.
+  static constexpr bool WritesAccumulator(AccumulatorUse accumulator_use) {
+    return accumulator_use == AccumulatorUse::kWrite ||
+           accumulator_use == AccumulatorUse::kReadWrite;
+  }
+
+  // Returns true if |operand_type| is a scalable signed byte.
+  static constexpr bool IsScalableSignedByte(OperandType operand_type) {
+    return operand_type >= OperandType::kImm &&
+           operand_type <= OperandType::kRegOutTriple;
+  }
+
+  // Returns true if |operand_type| is a scalable unsigned byte.
+  static constexpr bool IsScalableUnsignedByte(OperandType operand_type) {
+    return operand_type >= OperandType::kIdx &&
+           operand_type <= OperandType::kRegCount;
+  }
+};
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/bytecode-peephole-optimizer.cc b/src/interpreter/bytecode-peephole-optimizer.cc
index 4055294..acfe484 100644
--- a/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/src/interpreter/bytecode-peephole-optimizer.cc
@@ -13,7 +13,8 @@
 
 BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
     BytecodePipelineStage* next_stage)
-    : next_stage_(next_stage), last_(Bytecode::kIllegal, BytecodeSourceInfo()) {
+    : next_stage_(next_stage),
+      last_(BytecodeNode::Illegal(BytecodeSourceInfo())) {
   InvalidateLast();
 }
 
@@ -65,7 +66,7 @@
 }
 
 void BytecodePeepholeOptimizer::InvalidateLast() {
-  last_.set_bytecode(Bytecode::kIllegal);
+  last_ = BytecodeNode::Illegal(BytecodeSourceInfo());
 }
 
 bool BytecodePeepholeOptimizer::LastIsValid() const {
@@ -116,26 +117,42 @@
 
 namespace {
 
-void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
-                                              BytecodeNode* const last,
-                                              BytecodeNode* const current) {
+BytecodeNode TransformLdaSmiBinaryOpToBinaryOpWithSmi(
+    Bytecode new_bytecode, BytecodeNode* const last,
+    BytecodeNode* const current) {
   DCHECK_EQ(last->bytecode(), Bytecode::kLdaSmi);
-  current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
-                        current->operand(1));
+  BytecodeNode node(new_bytecode, last->operand(0), current->operand(0),
+                    current->operand(1), current->source_info());
   if (last->source_info().is_valid()) {
-    current->set_source_info(last->source_info());
+    node.set_source_info(last->source_info());
   }
+  return node;
 }
 
-void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
-                                                BytecodeNode* const last,
-                                                BytecodeNode* const current) {
+BytecodeNode TransformLdaZeroBinaryOpToBinaryOpWithZero(
+    Bytecode new_bytecode, BytecodeNode* const last,
+    BytecodeNode* const current) {
   DCHECK_EQ(last->bytecode(), Bytecode::kLdaZero);
-  current->set_bytecode(new_bytecode, 0, current->operand(0),
-                        current->operand(1));
+  BytecodeNode node(new_bytecode, 0, current->operand(0), current->operand(1),
+                    current->source_info());
   if (last->source_info().is_valid()) {
-    current->set_source_info(last->source_info());
+    node.set_source_info(last->source_info());
   }
+  return node;
+}
+
+BytecodeNode TransformEqualityWithNullOrUndefined(Bytecode new_bytecode,
+                                                  BytecodeNode* const last,
+                                                  BytecodeNode* const current) {
+  DCHECK((last->bytecode() == Bytecode::kLdaNull) ||
+         (last->bytecode() == Bytecode::kLdaUndefined));
+  DCHECK((current->bytecode() == Bytecode::kTestEqual) ||
+         (current->bytecode() == Bytecode::kTestEqualStrict));
+  BytecodeNode node(new_bytecode, current->operand(0), current->source_info());
+  if (last->source_info().is_valid()) {
+    node.set_source_info(last->source_info());
+  }
+  return node;
 }
 
 }  // namespace
@@ -175,8 +192,8 @@
   if (node->source_info().is_valid()) {
     // Preserve the source information by replacing the node bytecode
     // with a no op bytecode.
-    node->set_bytecode(Bytecode::kNop);
-    DefaultAction(node);
+    BytecodeNode new_node(BytecodeNode::Nop(node->source_info()));
+    DefaultAction(&new_node);
   } else {
     // Nothing to do, keep last and wait for next bytecode to pair with it.
   }
@@ -228,9 +245,9 @@
 
   if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
     // Fused last and current into current.
-    TransformLdaSmiBinaryOpToBinaryOpWithSmi(action_data->bytecode, last(),
-                                             node);
-    SetLast(node);
+    BytecodeNode new_node(TransformLdaSmiBinaryOpToBinaryOpWithSmi(
+        action_data->bytecode, last(), node));
+    SetLast(&new_node);
   } else {
     DefaultAction(node);
   }
@@ -243,14 +260,24 @@
   DCHECK(!Bytecodes::IsJump(node->bytecode()));
   if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
     // Fused last and current into current.
-    TransformLdaZeroBinaryOpToBinaryOpWithZero(action_data->bytecode, last(),
-                                               node);
-    SetLast(node);
+    BytecodeNode new_node(TransformLdaZeroBinaryOpToBinaryOpWithZero(
+        action_data->bytecode, last(), node));
+    SetLast(&new_node);
   } else {
     DefaultAction(node);
   }
 }
 
+void BytecodePeepholeOptimizer::TransformEqualityWithNullOrUndefinedAction(
+    BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+  DCHECK(LastIsValid());
+  DCHECK(!Bytecodes::IsJump(node->bytecode()));
+  // Fused last and current into current.
+  BytecodeNode new_node(TransformEqualityWithNullOrUndefined(
+      action_data->bytecode, last(), node));
+  SetLast(&new_node);
+}
+
 void BytecodePeepholeOptimizer::DefaultJumpAction(
     BytecodeNode* const node, const PeepholeActionAndData* action_data) {
   DCHECK(LastIsValid());
@@ -273,7 +300,7 @@
 
   next_stage()->Write(last());
   InvalidateLast();
-  node->set_bytecode(action_data->bytecode, node->operand(0));
+  node->replace_bytecode(action_data->bytecode);
 }
 
 void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
diff --git a/src/interpreter/bytecode-peephole-table.h b/src/interpreter/bytecode-peephole-table.h
index 1790f5a..fe46979 100644
--- a/src/interpreter/bytecode-peephole-table.h
+++ b/src/interpreter/bytecode-peephole-table.h
@@ -11,16 +11,17 @@
 namespace internal {
 namespace interpreter {
 
-#define PEEPHOLE_NON_JUMP_ACTION_LIST(V)            \
-  V(DefaultAction)                                  \
-  V(UpdateLastAction)                               \
-  V(UpdateLastIfSourceInfoPresentAction)            \
-  V(ElideCurrentAction)                             \
-  V(ElideCurrentIfOperand0MatchesAction)            \
-  V(ElideLastAction)                                \
-  V(ChangeBytecodeAction)                           \
-  V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
-  V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction)
+#define PEEPHOLE_NON_JUMP_ACTION_LIST(V)              \
+  V(DefaultAction)                                    \
+  V(UpdateLastAction)                                 \
+  V(UpdateLastIfSourceInfoPresentAction)              \
+  V(ElideCurrentAction)                               \
+  V(ElideCurrentIfOperand0MatchesAction)              \
+  V(ElideLastAction)                                  \
+  V(ChangeBytecodeAction)                             \
+  V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction)   \
+  V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction) \
+  V(TransformEqualityWithNullOrUndefinedAction)
 
 #define PEEPHOLE_JUMP_ACTION_LIST(V) \
   V(DefaultJumpAction)               \
diff --git a/src/interpreter/bytecode-pipeline.h b/src/interpreter/bytecode-pipeline.h
index d508def..03d40f7 100644
--- a/src/interpreter/bytecode-pipeline.h
+++ b/src/interpreter/bytecode-pipeline.h
@@ -191,6 +191,15 @@
     SetOperand(3, operand3);
   }
 
+#define DEFINE_BYTECODE_NODE_CREATOR(Name, ...)                              \
+  template <typename... Operands>                                            \
+  INLINE(static BytecodeNode Name(BytecodeSourceInfo source_info,            \
+                                  Operands... operands)) {                   \
+    return Create<Bytecode::k##Name, __VA_ARGS__>(source_info, operands...); \
+  }
+  BYTECODE_LIST(DEFINE_BYTECODE_NODE_CREATOR)
+#undef DEFINE_BYTECODE_NODE_CREATOR
+
   // Replace the bytecode of this node with |bytecode| and keep the operands.
   void replace_bytecode(Bytecode bytecode) {
     DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
@@ -198,40 +207,7 @@
     bytecode_ = bytecode;
   }
 
-  void set_bytecode(Bytecode bytecode) {
-    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
-    bytecode_ = bytecode;
-    operand_count_ = 0;
-    operand_scale_ = OperandScale::kSingle;
-  }
-
-  void set_bytecode(Bytecode bytecode, uint32_t operand0) {
-    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
-    bytecode_ = bytecode;
-    operand_count_ = 1;
-    operand_scale_ = OperandScale::kSingle;
-    SetOperand(0, operand0);
-  }
-
-  void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
-    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
-    bytecode_ = bytecode;
-    operand_count_ = 2;
-    operand_scale_ = OperandScale::kSingle;
-    SetOperand(0, operand0);
-    SetOperand(1, operand1);
-  }
-
-  void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
-                    uint32_t operand2) {
-    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
-    bytecode_ = bytecode;
-    operand_count_ = 3;
-    operand_scale_ = OperandScale::kSingle;
-    SetOperand(0, operand0);
-    SetOperand(1, operand1);
-    SetOperand(2, operand2);
-  }
+  void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
 
   // Print to stream |os|.
   void Print(std::ostream& os) const;
@@ -277,6 +253,100 @@
   bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
 
  private:
+  template <Bytecode bytecode, AccumulatorUse accumulator_use,
+            OperandType... operand_types>
+  friend class BytecodeNodeBuilder;
+
+  INLINE(BytecodeNode(Bytecode bytecode, int operand_count,
+                      OperandScale operand_scale,
+                      BytecodeSourceInfo source_info, uint32_t operand0 = 0,
+                      uint32_t operand1 = 0, uint32_t operand2 = 0,
+                      uint32_t operand3 = 0))
+      : bytecode_(bytecode),
+        operand_count_(operand_count),
+        operand_scale_(operand_scale),
+        source_info_(source_info) {
+    DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
+    operands_[0] = operand0;
+    operands_[1] = operand1;
+    operands_[2] = operand2;
+    operands_[3] = operand3;
+  }
+
+  template <Bytecode bytecode, AccumulatorUse accum_use>
+  INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info)) {
+    return BytecodeNode(bytecode, 0, OperandScale::kSingle, source_info);
+  }
+
+  template <Bytecode bytecode, AccumulatorUse accum_use,
+            OperandType operand0_type>
+  INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+                                    uint32_t operand0)) {
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+    OperandScale scale = OperandScale::kSingle;
+    scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+    return BytecodeNode(bytecode, 1, scale, source_info, operand0);
+  }
+
+  template <Bytecode bytecode, AccumulatorUse accum_use,
+            OperandType operand0_type, OperandType operand1_type>
+  INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+                                    uint32_t operand0, uint32_t operand1)) {
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
+    OperandScale scale = OperandScale::kSingle;
+    scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+    scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
+    return BytecodeNode(bytecode, 2, scale, source_info, operand0, operand1);
+  }
+
+  template <Bytecode bytecode, AccumulatorUse accum_use,
+            OperandType operand0_type, OperandType operand1_type,
+            OperandType operand2_type>
+  INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+                                    uint32_t operand0, uint32_t operand1,
+                                    uint32_t operand2)) {
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
+    OperandScale scale = OperandScale::kSingle;
+    scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+    scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
+    scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
+    return BytecodeNode(bytecode, 3, scale, source_info, operand0, operand1,
+                        operand2);
+  }
+
+  template <Bytecode bytecode, AccumulatorUse accum_use,
+            OperandType operand0_type, OperandType operand1_type,
+            OperandType operand2_type, OperandType operand3_type>
+  INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+                                    uint32_t operand0, uint32_t operand1,
+                                    uint32_t operand2, uint32_t operand3)) {
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
+    DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 3), operand3_type);
+    OperandScale scale = OperandScale::kSingle;
+    scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+    scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
+    scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
+    scale = std::max(scale, ScaleForOperand<operand3_type>(operand3));
+    return BytecodeNode(bytecode, 4, scale, source_info, operand0, operand1,
+                        operand2, operand3);
+  }
+
+  template <OperandType operand_type>
+  INLINE(static OperandScale ScaleForOperand(uint32_t operand)) {
+    if (BytecodeOperands::IsScalableUnsignedByte(operand_type)) {
+      return Bytecodes::ScaleForUnsignedOperand(operand);
+    } else if (BytecodeOperands::IsScalableSignedByte(operand_type)) {
+      return Bytecodes::ScaleForSignedOperand(operand);
+    } else {
+      return OperandScale::kSingle;
+    }
+  }
+
   INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
     if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
       operand_scale_ =
diff --git a/src/interpreter/bytecode-register-optimizer.cc b/src/interpreter/bytecode-register-optimizer.cc
index 563956e..e1e38a6 100644
--- a/src/interpreter/bytecode-register-optimizer.cc
+++ b/src/interpreter/bytecode-register-optimizer.cc
@@ -265,16 +265,16 @@
 
   if (input == accumulator_) {
     uint32_t operand = static_cast<uint32_t>(output.ToOperand());
-    BytecodeNode node(Bytecode::kStar, operand, source_info);
+    BytecodeNode node = BytecodeNode::Star(source_info, operand);
     next_stage_->Write(&node);
   } else if (output == accumulator_) {
     uint32_t operand = static_cast<uint32_t>(input.ToOperand());
-    BytecodeNode node(Bytecode::kLdar, operand, source_info);
+    BytecodeNode node = BytecodeNode::Ldar(source_info, operand);
     next_stage_->Write(&node);
   } else {
     uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
     uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
-    BytecodeNode node(Bytecode::kMov, operand0, operand1, source_info);
+    BytecodeNode node = BytecodeNode::Mov(source_info, operand0, operand1);
     next_stage_->Write(&node);
   }
   if (output != accumulator_) {
@@ -365,7 +365,7 @@
 void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
     BytecodeSourceInfo source_info) const {
   DCHECK(source_info.is_valid());
-  BytecodeNode nop(Bytecode::kNop, source_info);
+  BytecodeNode nop = BytecodeNode::Nop(source_info);
   next_stage_->Write(&nop);
 }
 
@@ -416,32 +416,6 @@
   }
 }
 
-void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) {
-  if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
-      bytecode == Bytecode::kSuspendGenerator) {
-    // All state must be flushed before emitting
-    // - a jump bytecode (as the register equivalents at the jump target aren't
-    //   known.
-    // - a call to the debugger (as it can manipulate locals and parameters),
-    // - a generator suspend (as this involves saving all registers).
-    Flush();
-  }
-
-  // Materialize the accumulator if it is read by the bytecode. The
-  // accumulator is special and no other register can be materialized
-  // in it's place.
-  if (Bytecodes::ReadsAccumulator(bytecode) &&
-      !accumulator_info_->materialized()) {
-    Materialize(accumulator_info_);
-  }
-
-  // Materialize an equivalent to the accumulator if it will be
-  // clobbered when the bytecode is dispatched.
-  if (Bytecodes::WritesAccumulator(bytecode)) {
-    PrepareOutputRegister(accumulator_);
-  }
-}
-
 void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
   DCHECK(RegisterIsTemporary(reg));
   size_t index = GetRegisterInfoTableIndex(reg);
diff --git a/src/interpreter/bytecode-register-optimizer.h b/src/interpreter/bytecode-register-optimizer.h
index e2a02cf..80c2f25 100644
--- a/src/interpreter/bytecode-register-optimizer.h
+++ b/src/interpreter/bytecode-register-optimizer.h
@@ -46,7 +46,32 @@
   void Flush();
 
   // Prepares for |bytecode|.
-  void PrepareForBytecode(Bytecode bytecode);
+  template <Bytecode bytecode, AccumulatorUse accumulator_use>
+  INLINE(void PrepareForBytecode()) {
+    if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
+        bytecode == Bytecode::kSuspendGenerator) {
+      // All state must be flushed before emitting
+      // - a jump bytecode (as the register equivalents at the jump target
+      // aren't
+      //   known.
+      // - a call to the debugger (as it can manipulate locals and parameters),
+      // - a generator suspend (as this involves saving all registers).
+      Flush();
+    }
+
+    // Materialize the accumulator if it is read by the bytecode. The
+    // accumulator is special and no other register can be materialized
+    // in it's place.
+    if (BytecodeOperands::ReadsAccumulator(accumulator_use)) {
+      Materialize(accumulator_info_);
+    }
+
+    // Materialize an equivalent to the accumulator if it will be
+    // clobbered when the bytecode is dispatched.
+    if (BytecodeOperands::WritesAccumulator(accumulator_use)) {
+      PrepareOutputRegister(accumulator_);
+    }
+  }
 
   // Prepares |reg| for being used as an output operand.
   void PrepareOutputRegister(Register reg);
diff --git a/src/interpreter/bytecodes.cc b/src/interpreter/bytecodes.cc
index 15c4e98..f7fb7ed 100644
--- a/src/interpreter/bytecodes.cc
+++ b/src/interpreter/bytecodes.cc
@@ -166,6 +166,19 @@
   return false;
 }
 
+bool Bytecodes::MakesCallAlongCriticalPath(Bytecode bytecode) {
+  if (IsCallOrConstruct(bytecode) || IsCallRuntime(bytecode)) return true;
+  switch (bytecode) {
+    case Bytecode::kCreateWithContext:
+    case Bytecode::kCreateBlockContext:
+    case Bytecode::kCreateCatchContext:
+    case Bytecode::kCreateRegExpLiteral:
+      return true;
+    default:
+      return false;
+  }
+}
+
 // static
 bool Bytecodes::IsRegisterInputOperandType(OperandType operand_type) {
   switch (operand_type) {
@@ -227,7 +240,8 @@
       case Bytecode::kTypeOf:
       case Bytecode::kCall:
       case Bytecode::kCallProperty:
-      case Bytecode::kNew:
+      case Bytecode::kConstruct:
+      case Bytecode::kConstructWithSpread:
         return true;
       default:
         return false;
diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h
index 23d77f0..f608526 100644
--- a/src/interpreter/bytecodes.h
+++ b/src/interpreter/bytecodes.h
@@ -38,8 +38,9 @@
   V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx)                    \
                                                                                \
   /* Globals */                                                                \
-  V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx)                      \
-  V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx)          \
+  V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx)   \
+  V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx,          \
+    OperandType::kIdx)                                                         \
   V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx,                 \
     OperandType::kIdx)                                                         \
   V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx,                 \
@@ -50,7 +51,10 @@
   V(PopContext, AccumulatorUse::kNone, OperandType::kReg)                      \
   V(LdaContextSlot, AccumulatorUse::kWrite, OperandType::kReg,                 \
     OperandType::kIdx, OperandType::kUImm)                                     \
+  V(LdaImmutableContextSlot, AccumulatorUse::kWrite, OperandType::kReg,        \
+    OperandType::kIdx, OperandType::kUImm)                                     \
   V(LdaCurrentContextSlot, AccumulatorUse::kWrite, OperandType::kIdx)          \
+  V(LdaImmutableCurrentContextSlot, AccumulatorUse::kWrite, OperandType::kIdx) \
   V(StaContextSlot, AccumulatorUse::kRead, OperandType::kReg,                  \
     OperandType::kIdx, OperandType::kUImm)                                     \
   V(StaCurrentContextSlot, AccumulatorUse::kRead, OperandType::kIdx)           \
@@ -93,10 +97,14 @@
     OperandType::kIdx, OperandType::kIdx)                                      \
   V(StaNamedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg,          \
     OperandType::kIdx, OperandType::kIdx)                                      \
+  V(StaNamedOwnProperty, AccumulatorUse::kRead, OperandType::kReg,             \
+    OperandType::kIdx, OperandType::kIdx)                                      \
   V(StaKeyedPropertySloppy, AccumulatorUse::kRead, OperandType::kReg,          \
     OperandType::kReg, OperandType::kIdx)                                      \
   V(StaKeyedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg,          \
     OperandType::kReg, OperandType::kIdx)                                      \
+  V(StaDataPropertyInLiteral, AccumulatorUse::kRead, OperandType::kReg,        \
+    OperandType::kReg, OperandType::kFlag8, OperandType::kIdx)                 \
                                                                                \
   /* Binary Operators */                                                       \
   V(Add, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx)     \
@@ -140,11 +148,16 @@
   V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg)       \
   V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg)       \
                                                                                \
+  /* GetSuperConstructor operator */                                           \
+  V(GetSuperConstructor, AccumulatorUse::kRead, OperandType::kRegOut)          \
+                                                                               \
   /* Call operations */                                                        \
   V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kRegList,    \
     OperandType::kRegCount, OperandType::kIdx)                                 \
   V(CallProperty, AccumulatorUse::kWrite, OperandType::kReg,                   \
     OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx)          \
+  V(CallWithSpread, AccumulatorUse::kWrite, OperandType::kReg,                 \
+    OperandType::kRegList, OperandType::kRegCount)                             \
   V(TailCall, AccumulatorUse::kWrite, OperandType::kReg,                       \
     OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx)          \
   V(CallRuntime, AccumulatorUse::kWrite, OperandType::kRuntimeId,              \
@@ -158,9 +171,11 @@
   V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId,        \
     OperandType::kRegList, OperandType::kRegCount)                             \
                                                                                \
-  /* New operator */                                                           \
-  V(New, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kRegList, \
-    OperandType::kRegCount, OperandType::kIdx)                                 \
+  /* Construct operators */                                                    \
+  V(Construct, AccumulatorUse::kReadWrite, OperandType::kReg,                  \
+    OperandType::kRegList, OperandType::kRegCount, OperandType::kIdx)          \
+  V(ConstructWithSpread, AccumulatorUse::kReadWrite, OperandType::kReg,        \
+    OperandType::kRegList, OperandType::kRegCount)                             \
                                                                                \
   /* Test Operators */                                                         \
   V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg,                  \
@@ -180,6 +195,11 @@
   V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg)             \
   V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg)                     \
                                                                                \
+  /* TestEqual with Null or Undefined */                                       \
+  V(TestUndetectable, AccumulatorUse::kWrite, OperandType::kReg)               \
+  V(TestNull, AccumulatorUse::kWrite, OperandType::kReg)                       \
+  V(TestUndefined, AccumulatorUse::kWrite, OperandType::kReg)                  \
+                                                                               \
   /* Cast operators */                                                         \
   V(ToName, AccumulatorUse::kRead, OperandType::kRegOut)                       \
   V(ToNumber, AccumulatorUse::kRead, OperandType::kRegOut)                     \
@@ -195,13 +215,14 @@
                                                                                \
   /* Closure allocation */                                                     \
   V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx,                  \
-    OperandType::kFlag8)                                                       \
+    OperandType::kIdx, OperandType::kFlag8)                                    \
                                                                                \
   /* Context allocation */                                                     \
   V(CreateBlockContext, AccumulatorUse::kReadWrite, OperandType::kIdx)         \
   V(CreateCatchContext, AccumulatorUse::kReadWrite, OperandType::kReg,         \
     OperandType::kIdx, OperandType::kIdx)                                      \
   V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kUImm)         \
+  V(CreateEvalContext, AccumulatorUse::kWrite, OperandType::kUImm)             \
   V(CreateWithContext, AccumulatorUse::kReadWrite, OperandType::kReg,          \
     OperandType::kIdx)                                                         \
                                                                                \
@@ -210,24 +231,35 @@
   V(CreateUnmappedArguments, AccumulatorUse::kWrite)                           \
   V(CreateRestParameter, AccumulatorUse::kWrite)                               \
                                                                                \
-  /* Control Flow */                                                           \
-  V(Jump, AccumulatorUse::kNone, OperandType::kImm)                            \
+  /* Control Flow -- carefully ordered for efficient checks */                 \
+  /* - [Unconditional jumps] */                                                \
+  V(JumpLoop, AccumulatorUse::kNone, OperandType::kUImm, OperandType::kImm)    \
+  /* - [Forward jumps] */                                                      \
+  V(Jump, AccumulatorUse::kNone, OperandType::kUImm)                           \
+  /* - [Start constant jumps] */                                               \
   V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx)                    \
-  V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm)                      \
-  V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)              \
-  V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm)                     \
-  V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
-  V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm)             \
-  V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)     \
-  V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm)            \
-  V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)    \
-  V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm)                      \
+  /* - [Conditional jumps] */                                                  \
+  /* - [Conditional constant jumps] */                                         \
   V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx)              \
-  V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm)                 \
   V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx)         \
-  V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm)                   \
+  V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)              \
+  V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)             \
+  V(JumpIfJSReceiverConstant, AccumulatorUse::kRead, OperandType::kIdx)        \
   V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx)           \
-  V(JumpLoop, AccumulatorUse::kNone, OperandType::kImm, OperandType::kImm)     \
+  /* - [Start ToBoolean jumps] */                                              \
+  V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx)     \
+  V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx)    \
+  /* - [End constant jumps] */                                                 \
+  /* - [Conditional immediate jumps] */                                        \
+  V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kUImm)            \
+  V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kUImm)           \
+  /* - [End ToBoolean jumps] */                                                \
+  V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kUImm)                     \
+  V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kUImm)                    \
+  V(JumpIfNull, AccumulatorUse::kRead, OperandType::kUImm)                     \
+  V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kUImm)                \
+  V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kUImm)               \
+  V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kUImm)                  \
                                                                                \
   /* Complex flow control For..in */                                           \
   V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg,                    \
@@ -241,6 +273,9 @@
   /* Perform a stack guard check */                                            \
   V(StackCheck, AccumulatorUse::kNone)                                         \
                                                                                \
+  /* Update the pending message */                                             \
+  V(SetPendingMessage, AccumulatorUse::kReadWrite)                             \
+                                                                               \
   /* Non-local flow control */                                                 \
   V(Throw, AccumulatorUse::kRead)                                              \
   V(ReThrow, AccumulatorUse::kRead)                                            \
@@ -294,6 +329,69 @@
   DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
   DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
 
+// Lists of jump bytecodes.
+
+#define JUMP_UNCONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+  V(JumpLoop)                                         \
+  V(Jump)
+
+#define JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V) V(JumpConstant)
+
+#define JUMP_TOBOOLEAN_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+  V(JumpIfToBooleanTrue)                                      \
+  V(JumpIfToBooleanFalse)
+
+#define JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+  V(JumpIfToBooleanTrueConstant)                             \
+  V(JumpIfToBooleanFalseConstant)
+
+#define JUMP_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V)     \
+  JUMP_TOBOOLEAN_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+  V(JumpIfTrue)                                         \
+  V(JumpIfFalse)                                        \
+  V(JumpIfNull)                                         \
+  V(JumpIfUndefined)                                    \
+  V(JumpIfJSReceiver)                                   \
+  V(JumpIfNotHole)
+
+#define JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)     \
+  JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+  V(JumpIfNullConstant)                                \
+  V(JumpIfUndefinedConstant)                           \
+  V(JumpIfTrueConstant)                                \
+  V(JumpIfFalseConstant)                               \
+  V(JumpIfJSReceiverConstant)                          \
+  V(JumpIfNotHoleConstant)
+
+#define JUMP_CONSTANT_BYTECODE_LIST(V)         \
+  JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+  JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_IMMEDIATE_BYTECODE_LIST(V)         \
+  JUMP_UNCONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+  JUMP_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V)
+
+#define JUMP_TO_BOOLEAN_BYTECODE_LIST(V)                \
+  JUMP_TOBOOLEAN_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+  JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_UNCONDITIONAL_BYTECODE_LIST(V)     \
+  JUMP_UNCONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+  JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_CONDITIONAL_BYTECODE_LIST(V)     \
+  JUMP_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+  JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_FORWARD_BYTECODE_LIST(V) \
+  V(Jump)                             \
+  V(JumpConstant)                     \
+  JUMP_CONDITIONAL_BYTECODE_LIST(V)
+
+#define JUMP_BYTECODE_LIST(V)   \
+  JUMP_FORWARD_BYTECODE_LIST(V) \
+  V(JumpLoop)
+
 // Enumeration of interpreter bytecodes.
 enum class Bytecode : uint8_t {
 #define DECLARE_BYTECODE(Name, ...) k##Name,
@@ -306,14 +404,6 @@
 #undef COUNT_BYTECODE
 };
 
-// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
-// See crbug.com/603131.
-#if V8_CC_MSVC
-#define CONSTEXPR const
-#else
-#define CONSTEXPR constexpr
-#endif
-
 class V8_EXPORT_PRIVATE Bytecodes final {
  public:
   //  The maximum number of operands a bytecode may have.
@@ -381,14 +471,12 @@
 
   // Returns true if |bytecode| reads the accumulator.
   static bool ReadsAccumulator(Bytecode bytecode) {
-    return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
-           AccumulatorUse::kRead;
+    return BytecodeOperands::ReadsAccumulator(GetAccumulatorUse(bytecode));
   }
 
   // Returns true if |bytecode| writes the accumulator.
   static bool WritesAccumulator(Bytecode bytecode) {
-    return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
-           AccumulatorUse::kWrite;
+    return BytecodeOperands::WritesAccumulator(GetAccumulatorUse(bytecode));
   }
 
   // Return true if |bytecode| writes the accumulator with a boolean value.
@@ -407,7 +495,10 @@
       case Bytecode::kTestGreaterThanOrEqual:
       case Bytecode::kTestInstanceOf:
       case Bytecode::kTestIn:
+      case Bytecode::kTestUndetectable:
       case Bytecode::kForInContinue:
+      case Bytecode::kTestUndefined:
+      case Bytecode::kTestNull:
         return true;
       default:
         return false;
@@ -416,7 +507,7 @@
 
   // Return true if |bytecode| is an accumulator load without effects,
   // e.g. LdaConstant, LdaTrue, Ldar.
-  static CONSTEXPR bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
+  static constexpr bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
     return bytecode == Bytecode::kLdar || bytecode == Bytecode::kLdaZero ||
            bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaNull ||
            bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
@@ -424,128 +515,136 @@
            bytecode == Bytecode::kLdaTheHole ||
            bytecode == Bytecode::kLdaConstant ||
            bytecode == Bytecode::kLdaContextSlot ||
-           bytecode == Bytecode::kLdaCurrentContextSlot;
+           bytecode == Bytecode::kLdaCurrentContextSlot ||
+           bytecode == Bytecode::kLdaImmutableContextSlot ||
+           bytecode == Bytecode::kLdaImmutableCurrentContextSlot;
   }
 
   // Return true if |bytecode| is a register load without effects,
   // e.g. Mov, Star.
-  static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
+  static constexpr bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
     return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
            bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar;
   }
 
   // Returns true if the bytecode is a conditional jump taking
   // an immediate byte operand (OperandType::kImm).
-  static CONSTEXPR bool IsConditionalJumpImmediate(Bytecode bytecode) {
-    return bytecode == Bytecode::kJumpIfTrue ||
-           bytecode == Bytecode::kJumpIfFalse ||
-           bytecode == Bytecode::kJumpIfToBooleanTrue ||
-           bytecode == Bytecode::kJumpIfToBooleanFalse ||
-           bytecode == Bytecode::kJumpIfNotHole ||
-           bytecode == Bytecode::kJumpIfNull ||
-           bytecode == Bytecode::kJumpIfUndefined;
+  static constexpr bool IsConditionalJumpImmediate(Bytecode bytecode) {
+    return bytecode >= Bytecode::kJumpIfToBooleanTrue &&
+           bytecode <= Bytecode::kJumpIfNotHole;
   }
 
   // Returns true if the bytecode is a conditional jump taking
   // a constant pool entry (OperandType::kIdx).
-  static CONSTEXPR bool IsConditionalJumpConstant(Bytecode bytecode) {
-    return bytecode == Bytecode::kJumpIfTrueConstant ||
-           bytecode == Bytecode::kJumpIfFalseConstant ||
-           bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
-           bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
-           bytecode == Bytecode::kJumpIfNotHoleConstant ||
-           bytecode == Bytecode::kJumpIfNullConstant ||
-           bytecode == Bytecode::kJumpIfUndefinedConstant;
+  static constexpr bool IsConditionalJumpConstant(Bytecode bytecode) {
+    return bytecode >= Bytecode::kJumpIfNullConstant &&
+           bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
   }
 
   // Returns true if the bytecode is a conditional jump taking
   // any kind of operand.
-  static CONSTEXPR bool IsConditionalJump(Bytecode bytecode) {
-    return IsConditionalJumpImmediate(bytecode) ||
-           IsConditionalJumpConstant(bytecode);
+  static constexpr bool IsConditionalJump(Bytecode bytecode) {
+    return bytecode >= Bytecode::kJumpIfNullConstant &&
+           bytecode <= Bytecode::kJumpIfNotHole;
+  }
+
+  // Returns true if the bytecode is an unconditional jump.
+  static constexpr bool IsUnconditionalJump(Bytecode bytecode) {
+    return bytecode >= Bytecode::kJumpLoop &&
+           bytecode <= Bytecode::kJumpConstant;
   }
 
   // Returns true if the bytecode is a jump or a conditional jump taking
   // an immediate byte operand (OperandType::kImm).
-  static CONSTEXPR bool IsJumpImmediate(Bytecode bytecode) {
+  static constexpr bool IsJumpImmediate(Bytecode bytecode) {
     return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpLoop ||
            IsConditionalJumpImmediate(bytecode);
   }
 
   // Returns true if the bytecode is a jump or conditional jump taking a
   // constant pool entry (OperandType::kIdx).
-  static CONSTEXPR bool IsJumpConstant(Bytecode bytecode) {
-    return bytecode == Bytecode::kJumpConstant ||
-           IsConditionalJumpConstant(bytecode);
+  static constexpr bool IsJumpConstant(Bytecode bytecode) {
+    return bytecode >= Bytecode::kJumpConstant &&
+           bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
   }
 
   // Returns true if the bytecode is a jump that internally coerces the
   // accumulator to a boolean.
-  static CONSTEXPR bool IsJumpIfToBoolean(Bytecode bytecode) {
-    return bytecode == Bytecode::kJumpIfToBooleanTrue ||
-           bytecode == Bytecode::kJumpIfToBooleanFalse ||
-           bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
-           bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
+  static constexpr bool IsJumpIfToBoolean(Bytecode bytecode) {
+    return bytecode >= Bytecode::kJumpIfToBooleanTrueConstant &&
+           bytecode <= Bytecode::kJumpIfToBooleanFalse;
   }
 
   // Returns true if the bytecode is a jump or conditional jump taking
   // any kind of operand.
-  static CONSTEXPR bool IsJump(Bytecode bytecode) {
-    return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
+  static constexpr bool IsJump(Bytecode bytecode) {
+    return bytecode >= Bytecode::kJumpLoop &&
+           bytecode <= Bytecode::kJumpIfNotHole;
+  }
+
+  // Returns true if the bytecode is a forward jump or conditional jump taking
+  // any kind of operand.
+  static constexpr bool IsForwardJump(Bytecode bytecode) {
+    return bytecode >= Bytecode::kJump && bytecode <= Bytecode::kJumpIfNotHole;
   }
 
   // Returns true if the bytecode is a conditional jump, a jump, or a return.
-  static CONSTEXPR bool IsJumpOrReturn(Bytecode bytecode) {
+  static constexpr bool IsJumpOrReturn(Bytecode bytecode) {
     return bytecode == Bytecode::kReturn || IsJump(bytecode);
   }
 
   // Return true if |bytecode| is a jump without effects,
   // e.g.  any jump excluding those that include type coercion like
   // JumpIfTrueToBoolean.
-  static CONSTEXPR bool IsJumpWithoutEffects(Bytecode bytecode) {
+  static constexpr bool IsJumpWithoutEffects(Bytecode bytecode) {
     return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
   }
 
   // Returns true if |bytecode| has no effects. These bytecodes only manipulate
   // interpreter frame state and will never throw.
-  static CONSTEXPR bool IsWithoutExternalSideEffects(Bytecode bytecode) {
+  static constexpr bool IsWithoutExternalSideEffects(Bytecode bytecode) {
     return (IsAccumulatorLoadWithoutEffects(bytecode) ||
             IsRegisterLoadWithoutEffects(bytecode) ||
             bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
   }
 
   // Returns true if the bytecode is Ldar or Star.
-  static CONSTEXPR bool IsLdarOrStar(Bytecode bytecode) {
+  static constexpr bool IsLdarOrStar(Bytecode bytecode) {
     return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
   }
 
   // Returns true if |bytecode| puts a name in the accumulator.
-  static CONSTEXPR bool PutsNameInAccumulator(Bytecode bytecode) {
+  static constexpr bool PutsNameInAccumulator(Bytecode bytecode) {
     return bytecode == Bytecode::kTypeOf;
   }
 
   // Returns true if the bytecode is a call or a constructor call.
-  static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) {
+  static constexpr bool IsCallOrConstruct(Bytecode bytecode) {
     return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
-           bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew;
+           bytecode == Bytecode::kTailCall ||
+           bytecode == Bytecode::kConstruct ||
+           bytecode == Bytecode::kCallWithSpread ||
+           bytecode == Bytecode::kConstructWithSpread ||
+           bytecode == Bytecode::kInvokeIntrinsic ||
+           bytecode == Bytecode::kCallJSRuntime;
   }
 
   // Returns true if the bytecode is a call to the runtime.
-  static CONSTEXPR bool IsCallRuntime(Bytecode bytecode) {
+  static constexpr bool IsCallRuntime(Bytecode bytecode) {
     return bytecode == Bytecode::kCallRuntime ||
            bytecode == Bytecode::kCallRuntimeForPair ||
            bytecode == Bytecode::kInvokeIntrinsic;
   }
 
   // Returns true if the bytecode is a scaling prefix bytecode.
-  static CONSTEXPR bool IsPrefixScalingBytecode(Bytecode bytecode) {
+  static constexpr bool IsPrefixScalingBytecode(Bytecode bytecode) {
     return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide ||
            bytecode == Bytecode::kDebugBreakExtraWide ||
            bytecode == Bytecode::kDebugBreakWide;
   }
 
   // Returns the number of values which |bytecode| returns.
-  static CONSTEXPR size_t ReturnCount(Bytecode bytecode) {
+  static constexpr size_t ReturnCount(Bytecode bytecode) {
     return bytecode == Bytecode::kReturn ? 1 : 0;
   }
 
@@ -632,6 +731,10 @@
   // Returns the equivalent jump bytecode without the accumulator coercion.
   static Bytecode GetJumpWithoutToBoolean(Bytecode bytecode);
 
+  // Returns true if there is a call in the most-frequently executed path
+  // through the bytecode's handler.
+  static bool MakesCallAlongCriticalPath(Bytecode bytecode);
+
   // Returns true if the bytecode is a debug break.
   static bool IsDebugBreak(Bytecode bytecode);
 
@@ -730,10 +833,6 @@
   static const OperandSize* const kOperandSizes[][3];
 };
 
-// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
-// See crbug.com/603131.
-#undef CONSTEXPR
-
 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
                                            const Bytecode& bytecode);
 
diff --git a/src/interpreter/constant-array-builder.cc b/src/interpreter/constant-array-builder.cc
index d2b7995..74d887a 100644
--- a/src/interpreter/constant-array-builder.cc
+++ b/src/interpreter/constant-array-builder.cc
@@ -7,6 +7,10 @@
 #include <functional>
 #include <set>
 
+#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/base/functional.h"
 #include "src/isolate.h"
 #include "src/objects-inl.h"
 
@@ -34,36 +38,48 @@
 }
 
 size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
-    Handle<Object> object) {
+    ConstantArrayBuilder::Entry entry) {
   DCHECK_GT(available(), 0u);
   size_t index = constants_.size();
   DCHECK_LT(index, capacity());
-  constants_.push_back(object);
+  constants_.push_back(entry);
   return index + start_index();
 }
 
-Handle<Object> ConstantArrayBuilder::ConstantArraySlice::At(
+ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
+    size_t index) {
+  DCHECK_GE(index, start_index());
+  DCHECK_LT(index, start_index() + size());
+  return constants_[index - start_index()];
+}
+
+const ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
     size_t index) const {
   DCHECK_GE(index, start_index());
   DCHECK_LT(index, start_index() + size());
   return constants_[index - start_index()];
 }
 
-void ConstantArrayBuilder::ConstantArraySlice::InsertAt(size_t index,
-                                                        Handle<Object> object) {
-  DCHECK_GE(index, start_index());
-  DCHECK_LT(index, start_index() + size());
-  constants_[index - start_index()] = object;
-}
-
-bool ConstantArrayBuilder::ConstantArraySlice::AllElementsAreUnique() const {
+#if DEBUG
+void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
+    Isolate* isolate) const {
   std::set<Object*> elements;
-  for (auto constant : constants_) {
-    if (elements.find(*constant) != elements.end()) return false;
-    elements.insert(*constant);
+  for (const Entry& entry : constants_) {
+    Handle<Object> handle = entry.ToHandle(isolate);
+    if (elements.find(*handle) != elements.end()) {
+      std::ostringstream os;
+      os << "Duplicate constant found: " << Brief(*handle) << std::endl;
+      // Print all the entries in the slice to help debug duplicates.
+      size_t i = start_index();
+      for (const Entry& prev_entry : constants_) {
+        os << i++ << ": " << Brief(*prev_entry.ToHandle(isolate)) << std::endl;
+      }
+      FATAL(os.str().c_str());
+    }
+    elements.insert(*handle);
   }
-  return true;
 }
+#endif
 
 STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::k8BitCapacity;
 STATIC_CONST_MEMBER_DEFINITION const size_t
@@ -71,14 +87,15 @@
 STATIC_CONST_MEMBER_DEFINITION const size_t
     ConstantArrayBuilder::k32BitCapacity;
 
-ConstantArrayBuilder::ConstantArrayBuilder(Zone* zone,
-                                           Handle<Object> the_hole_value)
-    : constants_map_(16, base::KeyEqualityMatcher<Address>(),
+ConstantArrayBuilder::ConstantArrayBuilder(Zone* zone)
+    : constants_map_(16, base::KeyEqualityMatcher<intptr_t>(),
                      ZoneAllocationPolicy(zone)),
       smi_map_(zone),
       smi_pairs_(zone),
-      zone_(zone),
-      the_hole_value_(the_hole_value) {
+#define INIT_SINGLETON_ENTRY_FIELD(NAME, LOWER_NAME) LOWER_NAME##_(-1),
+      SINGLETON_CONSTANT_ENTRY_TYPES(INIT_SINGLETON_ENTRY_FIELD)
+#undef INIT_SINGLETON_ENTRY_FIELD
+          zone_(zone) {
   idx_slice_[0] =
       new (zone) ConstantArraySlice(zone, 0, k8BitCapacity, OperandSize::kByte);
   idx_slice_[1] = new (zone) ConstantArraySlice(
@@ -109,65 +126,103 @@
   return nullptr;
 }
 
-Handle<Object> ConstantArrayBuilder::At(size_t index) const {
+MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
+                                             Isolate* isolate) const {
   const ConstantArraySlice* slice = IndexToSlice(index);
+  DCHECK_LT(index, slice->capacity());
   if (index < slice->start_index() + slice->size()) {
-    return slice->At(index);
-  } else {
-    DCHECK_LT(index, slice->capacity());
-    return the_hole_value();
+    const Entry& entry = slice->At(index);
+    if (!entry.IsDeferred()) return entry.ToHandle(isolate);
   }
+  return MaybeHandle<Object>();
 }
 
 Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
-  // First insert reserved SMI values.
-  for (auto reserved_smi : smi_pairs_) {
-    InsertAllocatedEntry(reserved_smi.second,
-                         handle(reserved_smi.first, isolate));
-  }
-
-  Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
+  Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArrayWithHoles(
       static_cast<int>(size()), PretenureFlag::TENURED);
   int array_index = 0;
   for (const ConstantArraySlice* slice : idx_slice_) {
-    if (array_index == fixed_array->length()) {
-      break;
-    }
+    DCHECK_EQ(slice->reserved(), 0);
     DCHECK(array_index == 0 ||
            base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
+#if DEBUG
     // Different slices might contain the same element due to reservations, but
     // all elements within a slice should be unique. If this DCHECK fails, then
     // the AST nodes are not being internalized within a CanonicalHandleScope.
-    DCHECK(slice->AllElementsAreUnique());
+    slice->CheckAllElementsAreUnique(isolate);
+#endif
     // Copy objects from slice into array.
     for (size_t i = 0; i < slice->size(); ++i) {
-      fixed_array->set(array_index++, *slice->At(slice->start_index() + i));
+      fixed_array->set(array_index++,
+                       *slice->At(slice->start_index() + i).ToHandle(isolate));
     }
-    // Insert holes where reservations led to unused slots.
-    size_t padding =
-        std::min(static_cast<size_t>(fixed_array->length() - array_index),
-                 slice->capacity() - slice->size());
-    for (size_t i = 0; i < padding; i++) {
-      fixed_array->set(array_index++, *the_hole_value());
+    // Leave holes where reservations led to unused slots.
+    size_t padding = slice->capacity() - slice->size();
+    if (static_cast<size_t>(fixed_array->length() - array_index) <= padding) {
+      break;
     }
+    array_index += padding;
   }
-  DCHECK_EQ(array_index, fixed_array->length());
+  DCHECK_GE(array_index, fixed_array->length());
   return fixed_array;
 }
 
-size_t ConstantArrayBuilder::Insert(Handle<Object> object) {
+size_t ConstantArrayBuilder::Insert(Smi* smi) {
+  auto entry = smi_map_.find(smi);
+  if (entry == smi_map_.end()) {
+    return AllocateReservedEntry(smi);
+  }
+  return entry->second;
+}
+
+size_t ConstantArrayBuilder::Insert(const AstRawString* raw_string) {
   return constants_map_
-      .LookupOrInsert(object.address(), ObjectHash(object.address()),
-                      [&]() { return AllocateIndex(object); },
+      .LookupOrInsert(reinterpret_cast<intptr_t>(raw_string),
+                      raw_string->hash(),
+                      [&]() { return AllocateIndex(Entry(raw_string)); },
                       ZoneAllocationPolicy(zone_))
       ->value;
 }
 
+size_t ConstantArrayBuilder::Insert(const AstValue* heap_number) {
+  // This method only accepts heap numbers. Other types of ast value should
+  // either be passed through as raw values (in the case of strings), use the
+  // singleton Insert methods (in the case of symbols), or skip the constant
+  // pool entirely and use bytecodes with immediate values (Smis, booleans,
+  // undefined, etc.).
+  DCHECK(heap_number->IsHeapNumber());
+  return constants_map_
+      .LookupOrInsert(reinterpret_cast<intptr_t>(heap_number),
+                      static_cast<uint32_t>(base::hash_value(heap_number)),
+                      [&]() { return AllocateIndex(Entry(heap_number)); },
+                      ZoneAllocationPolicy(zone_))
+      ->value;
+}
+
+size_t ConstantArrayBuilder::Insert(const Scope* scope) {
+  return constants_map_
+      .LookupOrInsert(reinterpret_cast<intptr_t>(scope),
+                      static_cast<uint32_t>(base::hash_value(scope)),
+                      [&]() { return AllocateIndex(Entry(scope)); },
+                      ZoneAllocationPolicy(zone_))
+      ->value;
+}
+
+#define INSERT_ENTRY(NAME, LOWER_NAME)              \
+  size_t ConstantArrayBuilder::Insert##NAME() {     \
+    if (LOWER_NAME##_ < 0) {                        \
+      LOWER_NAME##_ = AllocateIndex(Entry::NAME()); \
+    }                                               \
+    return LOWER_NAME##_;                           \
+  }
+SINGLETON_CONSTANT_ENTRY_TYPES(INSERT_ENTRY)
+#undef INSERT_ENTRY
+
 ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndex(
-    Handle<Object> object) {
+    ConstantArrayBuilder::Entry entry) {
   for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
     if (idx_slice_[i]->available() > 0) {
-      return static_cast<index_t>(idx_slice_[i]->Allocate(object));
+      return static_cast<index_t>(idx_slice_[i]->Allocate(entry));
     }
   }
   UNREACHABLE();
@@ -195,15 +250,13 @@
   return slice;
 }
 
-size_t ConstantArrayBuilder::AllocateEntry() {
-  return AllocateIndex(the_hole_value());
+size_t ConstantArrayBuilder::InsertDeferred() {
+  return AllocateIndex(Entry::Deferred());
 }
 
-void ConstantArrayBuilder::InsertAllocatedEntry(size_t index,
-                                                Handle<Object> object) {
-  DCHECK_EQ(the_hole_value().address(), At(index).address());
+void ConstantArrayBuilder::SetDeferredAt(size_t index, Handle<Object> object) {
   ConstantArraySlice* slice = IndexToSlice(index);
-  slice->InsertAt(index, object);
+  return slice->At(index).SetDeferred(object);
 }
 
 OperandSize ConstantArrayBuilder::CreateReservedEntry() {
@@ -219,9 +272,8 @@
 
 ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateReservedEntry(
     Smi* value) {
-  index_t index = static_cast<index_t>(AllocateEntry());
+  index_t index = static_cast<index_t>(AllocateIndex(Entry(value)));
   smi_map_[value] = index;
-  smi_pairs_.push_back(std::make_pair(value, index));
   return index;
 }
 
@@ -250,6 +302,33 @@
   OperandSizeToSlice(operand_size)->Unreserve();
 }
 
+Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
+  switch (tag_) {
+    case Tag::kDeferred:
+      // We shouldn't have any deferred entries by now.
+      UNREACHABLE();
+      return Handle<Object>::null();
+    case Tag::kHandle:
+      return handle_;
+    case Tag::kSmi:
+      return handle(smi_, isolate);
+    case Tag::kRawString:
+      return raw_string_->string();
+    case Tag::kHeapNumber:
+      DCHECK(heap_number_->IsHeapNumber());
+      return heap_number_->value();
+    case Tag::kScope:
+      return scope_->scope_info();
+#define ENTRY_LOOKUP(Name, name) \
+  case Tag::k##Name:             \
+    return isolate->factory()->name();
+      SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_LOOKUP);
+#undef ENTRY_LOOKUP
+  }
+  UNREACHABLE();
+  return Handle<Object>::null();
+}
+
 }  // namespace interpreter
 }  // namespace internal
 }  // namespace v8
diff --git a/src/interpreter/constant-array-builder.h b/src/interpreter/constant-array-builder.h
index 8e95913..86e7c08 100644
--- a/src/interpreter/constant-array-builder.h
+++ b/src/interpreter/constant-array-builder.h
@@ -14,9 +14,18 @@
 namespace internal {
 
 class Isolate;
+class AstRawString;
+class AstValue;
 
 namespace interpreter {
 
+// Constant array entries that represent singletons.
+#define SINGLETON_CONSTANT_ENTRY_TYPES(V)       \
+  V(IteratorSymbol, iterator_symbol)            \
+  V(AsyncIteratorSymbol, async_iterator_symbol) \
+  V(HomeObjectSymbol, home_object_symbol)       \
+  V(EmptyFixedArray, empty_fixed_array)
+
 // A helper class for constructing constant arrays for the
 // interpreter. Each instance of this class is intended to be used to
 // generate exactly one FixedArray of constants via the ToFixedArray
@@ -33,28 +42,36 @@
   static const size_t k32BitCapacity =
       kMaxUInt32 - k16BitCapacity - k8BitCapacity + 1;
 
-  ConstantArrayBuilder(Zone* zone, Handle<Object> the_hole_value);
+  ConstantArrayBuilder(Zone* zone);
 
-  // Generate a fixed array of constants based on inserted objects.
+  // Generate a fixed array of constant handles based on inserted objects.
   Handle<FixedArray> ToFixedArray(Isolate* isolate);
 
-  // Returns the object in the constant pool array that at index
-  // |index|.
-  Handle<Object> At(size_t index) const;
+  // Returns the object, as a handle in |isolate|, that is in the constant pool
+  // array at index |index|. Returns null if there is no handle at this index.
+  // Only expected to be used in tests.
+  MaybeHandle<Object> At(size_t index, Isolate* isolate) const;
 
   // Returns the number of elements in the array.
   size_t size() const;
 
-  // Insert an object into the constants array if it is not already
-  // present. Returns the array index associated with the object.
-  size_t Insert(Handle<Object> object);
+  // Insert an object into the constants array if it is not already present.
+  // Returns the array index associated with the object.
+  size_t Insert(Smi* smi);
+  size_t Insert(const AstRawString* raw_string);
+  size_t Insert(const AstValue* heap_number);
+  size_t Insert(const Scope* scope);
+#define INSERT_ENTRY(NAME, ...) size_t Insert##NAME();
+  SINGLETON_CONSTANT_ENTRY_TYPES(INSERT_ENTRY)
+#undef INSERT_ENTRY
 
-  // Allocates an empty entry and returns the array index associated with the
-  // reservation. Entry can be inserted by calling InsertReservedEntry().
-  size_t AllocateEntry();
+  // Inserts an empty entry and returns the array index associated with the
+  // reservation. The entry's handle value can be inserted by calling
+  // SetDeferredAt().
+  size_t InsertDeferred();
 
-  // Inserts the given object into an allocated entry.
-  void InsertAllocatedEntry(size_t index, Handle<Object> object);
+  // Sets the deferred value at |index| to |object|.
+  void SetDeferredAt(size_t index, Handle<Object> object);
 
   // Creates a reserved entry in the constant pool and returns
   // the size of the operand that'll be required to hold the entry
@@ -71,7 +88,60 @@
  private:
   typedef uint32_t index_t;
 
-  index_t AllocateIndex(Handle<Object> object);
+  class Entry {
+   private:
+    enum class Tag : uint8_t;
+
+   public:
+    explicit Entry(Smi* smi) : smi_(smi), tag_(Tag::kSmi) {}
+    explicit Entry(const AstRawString* raw_string)
+        : raw_string_(raw_string), tag_(Tag::kRawString) {}
+    explicit Entry(const AstValue* heap_number)
+        : heap_number_(heap_number), tag_(Tag::kHeapNumber) {}
+    explicit Entry(const Scope* scope) : scope_(scope), tag_(Tag::kScope) {}
+
+#define CONSTRUCT_ENTRY(NAME, LOWER_NAME) \
+  static Entry NAME() { return Entry(Tag::k##NAME); }
+    SINGLETON_CONSTANT_ENTRY_TYPES(CONSTRUCT_ENTRY)
+#undef CONSTRUCT_ENTRY
+
+    static Entry Deferred() { return Entry(Tag::kDeferred); }
+
+    bool IsDeferred() const { return tag_ == Tag::kDeferred; }
+
+    void SetDeferred(Handle<Object> handle) {
+      DCHECK(tag_ == Tag::kDeferred);
+      tag_ = Tag::kHandle;
+      handle_ = handle;
+    }
+
+    Handle<Object> ToHandle(Isolate* isolate) const;
+
+   private:
+    explicit Entry(Tag tag) : tag_(tag) {}
+
+    union {
+      Handle<Object> handle_;
+      Smi* smi_;
+      const AstRawString* raw_string_;
+      const AstValue* heap_number_;
+      const Scope* scope_;
+    };
+
+    enum class Tag : uint8_t {
+      kDeferred,
+      kHandle,
+      kSmi,
+      kRawString,
+      kHeapNumber,
+      kScope,
+#define ENTRY_TAG(NAME, ...) k##NAME,
+      SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_TAG)
+#undef ENTRY_TAG
+    } tag_;
+  };
+
+  index_t AllocateIndex(Entry constant_entry);
   index_t AllocateReservedEntry(Smi* value);
 
   struct ConstantArraySlice final : public ZoneObject {
@@ -79,10 +149,13 @@
                        OperandSize operand_size);
     void Reserve();
     void Unreserve();
-    size_t Allocate(Handle<Object> object);
-    Handle<Object> At(size_t index) const;
-    void InsertAt(size_t index, Handle<Object> object);
-    bool AllElementsAreUnique() const;
+    size_t Allocate(Entry entry);
+    Entry& At(size_t index);
+    const Entry& At(size_t index) const;
+
+#if DEBUG
+    void CheckAllElementsAreUnique(Isolate* isolate) const;
+#endif
 
     inline size_t available() const { return capacity() - reserved() - size(); }
     inline size_t reserved() const { return reserved_; }
@@ -97,7 +170,7 @@
     const size_t capacity_;
     size_t reserved_;
     OperandSize operand_size_;
-    ZoneVector<Handle<Object>> constants_;
+    ZoneVector<Entry> constants_;
 
     DISALLOW_COPY_AND_ASSIGN(ConstantArraySlice);
   };
@@ -105,16 +178,19 @@
   ConstantArraySlice* IndexToSlice(size_t index) const;
   ConstantArraySlice* OperandSizeToSlice(OperandSize operand_size) const;
 
-  Handle<Object> the_hole_value() const { return the_hole_value_; }
-
   ConstantArraySlice* idx_slice_[3];
-  base::TemplateHashMapImpl<Address, index_t, base::KeyEqualityMatcher<Address>,
+  base::TemplateHashMapImpl<intptr_t, index_t,
+                            base::KeyEqualityMatcher<intptr_t>,
                             ZoneAllocationPolicy>
       constants_map_;
   ZoneMap<Smi*, index_t> smi_map_;
   ZoneVector<std::pair<Smi*, index_t>> smi_pairs_;
+
+#define SINGLETON_ENTRY_FIELD(NAME, LOWER_NAME) int LOWER_NAME##_;
+  SINGLETON_CONSTANT_ENTRY_TYPES(SINGLETON_ENTRY_FIELD)
+#undef SINGLETON_ENTRY_FIELD
+
   Zone* zone_;
-  Handle<Object> the_hole_value_;
 };
 
 }  // namespace interpreter
diff --git a/src/interpreter/control-flow-builders.cc b/src/interpreter/control-flow-builders.cc
index 0e71b96..41d1ad8 100644
--- a/src/interpreter/control-flow-builders.cc
+++ b/src/interpreter/control-flow-builders.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/interpreter/control-flow-builders.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -55,8 +56,10 @@
   // and misplaced between the headers.
   DCHECK(break_labels_.empty() && continue_labels_.empty());
   builder()->Bind(&loop_header_);
-  for (auto& label : *additional_labels) {
-    builder()->Bind(&label);
+  if (additional_labels != nullptr) {
+    for (auto& label : *additional_labels) {
+      builder()->Bind(&label);
+    }
   }
 }
 
diff --git a/src/interpreter/control-flow-builders.h b/src/interpreter/control-flow-builders.h
index 3174db5..68c28c7 100644
--- a/src/interpreter/control-flow-builders.h
+++ b/src/interpreter/control-flow-builders.h
@@ -14,7 +14,7 @@
 namespace internal {
 namespace interpreter {
 
-class ControlFlowBuilder BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE ControlFlowBuilder BASE_EMBEDDED {
  public:
   explicit ControlFlowBuilder(BytecodeArrayBuilder* builder)
       : builder_(builder) {}
@@ -29,7 +29,8 @@
   DISALLOW_COPY_AND_ASSIGN(ControlFlowBuilder);
 };
 
-class BreakableControlFlowBuilder : public ControlFlowBuilder {
+class V8_EXPORT_PRIVATE BreakableControlFlowBuilder
+    : public ControlFlowBuilder {
  public:
   explicit BreakableControlFlowBuilder(BytecodeArrayBuilder* builder)
       : ControlFlowBuilder(builder), break_labels_(builder->zone()) {}
@@ -63,7 +64,8 @@
 
 
 // Class to track control flow for block statements (which can break in JS).
-class BlockBuilder final : public BreakableControlFlowBuilder {
+class V8_EXPORT_PRIVATE BlockBuilder final
+    : public BreakableControlFlowBuilder {
  public:
   explicit BlockBuilder(BytecodeArrayBuilder* builder)
       : BreakableControlFlowBuilder(builder) {}
@@ -77,7 +79,7 @@
 
 // A class to help with co-ordinating break and continue statements with
 // their loop.
-class LoopBuilder final : public BreakableControlFlowBuilder {
+class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
  public:
   explicit LoopBuilder(BytecodeArrayBuilder* builder)
       : BreakableControlFlowBuilder(builder),
@@ -85,7 +87,7 @@
         header_labels_(builder->zone()) {}
   ~LoopBuilder();
 
-  void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels);
+  void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels = nullptr);
   void JumpToHeader(int loop_depth);
   void BindContinueTarget();
   void EndLoop();
@@ -109,7 +111,8 @@
 
 
 // A class to help with co-ordinating break statements with their switch.
-class SwitchBuilder final : public BreakableControlFlowBuilder {
+class V8_EXPORT_PRIVATE SwitchBuilder final
+    : public BreakableControlFlowBuilder {
  public:
   explicit SwitchBuilder(BytecodeArrayBuilder* builder, int number_of_cases)
       : BreakableControlFlowBuilder(builder),
@@ -139,7 +142,7 @@
 
 
 // A class to help with co-ordinating control flow in try-catch statements.
-class TryCatchBuilder final : public ControlFlowBuilder {
+class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
  public:
   explicit TryCatchBuilder(BytecodeArrayBuilder* builder,
                            HandlerTable::CatchPrediction catch_prediction)
@@ -160,7 +163,7 @@
 
 
 // A class to help with co-ordinating control flow in try-finally statements.
-class TryFinallyBuilder final : public ControlFlowBuilder {
+class V8_EXPORT_PRIVATE TryFinallyBuilder final : public ControlFlowBuilder {
  public:
   explicit TryFinallyBuilder(BytecodeArrayBuilder* builder,
                              HandlerTable::CatchPrediction catch_prediction)
diff --git a/src/interpreter/handler-table-builder.h b/src/interpreter/handler-table-builder.h
index 25147ca..b9bfc5b 100644
--- a/src/interpreter/handler-table-builder.h
+++ b/src/interpreter/handler-table-builder.h
@@ -5,7 +5,6 @@
 #ifndef V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
 #define V8_INTERPRETER_HANDLER_TABLE_BUILDER_H_
 
-#include "src/handles.h"
 #include "src/interpreter/bytecode-register.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/zone/zone-containers.h"
@@ -13,13 +12,15 @@
 namespace v8 {
 namespace internal {
 
+template <typename T>
+class Handle;
 class HandlerTable;
 class Isolate;
 
 namespace interpreter {
 
 // A helper class for constructing exception handler tables for the interpreter.
-class HandlerTableBuilder final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE HandlerTableBuilder final BASE_EMBEDDED {
  public:
   explicit HandlerTableBuilder(Zone* zone);
 
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc
index c8ce553..557ad77 100644
--- a/src/interpreter/interpreter-assembler.cc
+++ b/src/interpreter/interpreter-assembler.cc
@@ -14,36 +14,46 @@
 #include "src/interpreter/interpreter.h"
 #include "src/machine-type.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 #include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
 namespace interpreter {
 
+using compiler::CodeAssemblerState;
 using compiler::Node;
 
-InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
+InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
                                            Bytecode bytecode,
                                            OperandScale operand_scale)
-    : CodeStubAssembler(isolate, zone, InterpreterDispatchDescriptor(isolate),
-                        Code::ComputeFlags(Code::BYTECODE_HANDLER),
-                        Bytecodes::ToString(bytecode),
-                        Bytecodes::ReturnCount(bytecode)),
+    : CodeStubAssembler(state),
       bytecode_(bytecode),
       operand_scale_(operand_scale),
       bytecode_offset_(this, MachineType::PointerRepresentation()),
       interpreted_frame_pointer_(this, MachineType::PointerRepresentation()),
+      bytecode_array_(this, MachineRepresentation::kTagged),
+      dispatch_table_(this, MachineType::PointerRepresentation()),
       accumulator_(this, MachineRepresentation::kTagged),
       accumulator_use_(AccumulatorUse::kNone),
       made_call_(false),
+      reloaded_frame_ptr_(false),
+      saved_bytecode_offset_(false),
       disable_stack_check_across_call_(false),
       stack_pointer_before_call_(nullptr) {
   accumulator_.Bind(Parameter(InterpreterDispatchDescriptor::kAccumulator));
   bytecode_offset_.Bind(
       Parameter(InterpreterDispatchDescriptor::kBytecodeOffset));
+  bytecode_array_.Bind(
+      Parameter(InterpreterDispatchDescriptor::kBytecodeArray));
+  dispatch_table_.Bind(
+      Parameter(InterpreterDispatchDescriptor::kDispatchTable));
+
   if (FLAG_trace_ignition) {
     TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
   }
+  RegisterCallGenerationCallbacks([this] { CallPrologue(); },
+                                  [this] { CallEpilogue(); });
 }
 
 InterpreterAssembler::~InterpreterAssembler() {
@@ -51,11 +61,16 @@
   // accumulator in the way described in the bytecode definitions in
   // bytecodes.h.
   DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+  UnregisterCallGenerationCallbacks();
 }
 
 Node* InterpreterAssembler::GetInterpretedFramePointer() {
   if (!interpreted_frame_pointer_.IsBound()) {
     interpreted_frame_pointer_.Bind(LoadParentFramePointer());
+  } else if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
+             !reloaded_frame_ptr_) {
+    interpreted_frame_pointer_.Bind(LoadParentFramePointer());
+    reloaded_frame_ptr_ = true;
   }
   return interpreted_frame_pointer_.value();
 }
@@ -150,21 +165,33 @@
 }
 
 Node* InterpreterAssembler::BytecodeOffset() {
+  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
+      (bytecode_offset_.value() ==
+       Parameter(InterpreterDispatchDescriptor::kBytecodeOffset))) {
+    bytecode_offset_.Bind(LoadAndUntagRegister(Register::bytecode_offset()));
+  }
   return bytecode_offset_.value();
 }
 
 Node* InterpreterAssembler::BytecodeArrayTaggedPointer() {
-  if (made_call_) {
-    // If we have made a call, restore bytecode array from stack frame in case
-    // the debugger has swapped us to the patched debugger bytecode array.
-    return LoadRegister(Register::bytecode_array());
-  } else {
-    return Parameter(InterpreterDispatchDescriptor::kBytecodeArray);
+  // Force a re-load of the bytecode array after every call in case the debugger
+  // has been activated.
+  if (made_call_ &&
+      (bytecode_array_.value() ==
+       Parameter(InterpreterDispatchDescriptor::kBytecodeArray))) {
+    bytecode_array_.Bind(LoadRegister(Register::bytecode_array()));
   }
+  return bytecode_array_.value();
 }
 
 Node* InterpreterAssembler::DispatchTableRawPointer() {
-  return Parameter(InterpreterDispatchDescriptor::kDispatchTable);
+  if (Bytecodes::MakesCallAlongCriticalPath(bytecode_) && made_call_ &&
+      (dispatch_table_.value() ==
+       Parameter(InterpreterDispatchDescriptor::kDispatchTable))) {
+    dispatch_table_.Bind(ExternalConstant(
+        ExternalReference::interpreter_dispatch_table_address(isolate())));
+  }
+  return dispatch_table_.value();
 }
 
 Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
@@ -186,6 +213,11 @@
               RegisterFrameOffset(reg_index));
 }
 
+Node* InterpreterAssembler::LoadAndUntagRegister(Register reg) {
+  return LoadAndUntagSmi(GetInterpretedFramePointer(), reg.ToOperand()
+                                                           << kPointerSizeLog2);
+}
+
 Node* InterpreterAssembler::StoreRegister(Node* value, Register reg) {
   return StoreNoWriteBarrier(
       MachineRepresentation::kTagged, GetInterpretedFramePointer(),
@@ -198,6 +230,12 @@
                              RegisterFrameOffset(reg_index), value);
 }
 
+Node* InterpreterAssembler::StoreAndTagRegister(compiler::Node* value,
+                                                Register reg) {
+  int offset = reg.ToOperand() << kPointerSizeLog2;
+  return StoreAndTagSmi(GetInterpretedFramePointer(), offset, value);
+}
+
 Node* InterpreterAssembler::NextRegister(Node* reg_index) {
   // Register indexes are negative, so the next index is minus one.
   return IntPtrAdd(reg_index, IntPtrConstant(-1));
@@ -222,14 +260,8 @@
   DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
                                     bytecode_, operand_index, operand_scale()));
   Node* operand_offset = OperandOffset(operand_index);
-  Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
-                    IntPtrAdd(BytecodeOffset(), operand_offset));
-
-  // Ensure that we sign extend to full pointer size
-  if (kPointerSize == 8) {
-    load = ChangeInt32ToInt64(load);
-  }
-  return load;
+  return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+              IntPtrAdd(BytecodeOffset(), operand_offset));
 }
 
 compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
@@ -305,19 +337,12 @@
       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
   int operand_offset =
       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
-  Node* load;
   if (TargetSupportsUnalignedAccess()) {
-    load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
+    return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
   } else {
-    load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
+    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
   }
-
-  // Ensure that we sign extend to full pointer size
-  if (kPointerSize == 8) {
-    load = ChangeInt32ToInt64(load);
-  }
-  return load;
 }
 
 Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
@@ -340,19 +365,12 @@
                                     bytecode_, operand_index, operand_scale()));
   int operand_offset =
       Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
-  Node* load;
   if (TargetSupportsUnalignedAccess()) {
-    load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
+    return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
                 IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
   } else {
-    load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
+    return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
   }
-
-  // Ensure that we sign extend to full pointer size
-  if (kPointerSize == 8) {
-    load = ChangeInt32ToInt64(load);
-  }
-  return load;
 }
 
 Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
@@ -414,6 +432,10 @@
   return BytecodeUnsignedOperand(operand_index, operand_size);
 }
 
+Node* InterpreterAssembler::BytecodeOperandUImmWord(int operand_index) {
+  return ChangeUint32ToWord(BytecodeOperandUImm(operand_index));
+}
+
 Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
   DCHECK_EQ(OperandType::kImm,
             Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -422,12 +444,25 @@
   return BytecodeSignedOperand(operand_index, operand_size);
 }
 
+Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
+  return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
+}
+
+Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
+  return SmiFromWord32(BytecodeOperandImm(operand_index));
+}
+
 Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
   DCHECK(OperandType::kIdx ==
          Bytecodes::GetOperandType(bytecode_, operand_index));
   OperandSize operand_size =
       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
-  return BytecodeUnsignedOperand(operand_index, operand_size);
+  return ChangeUint32ToWord(
+      BytecodeUnsignedOperand(operand_index, operand_size));
+}
+
+Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
+  return SmiTag(BytecodeOperandIdx(operand_index));
 }
 
 Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
@@ -435,7 +470,8 @@
       Bytecodes::GetOperandType(bytecode_, operand_index)));
   OperandSize operand_size =
       Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
-  return BytecodeSignedOperand(operand_index, operand_size);
+  return ChangeInt32ToIntPtr(
+      BytecodeSignedOperand(operand_index, operand_size));
 }
 
 Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
@@ -459,42 +495,34 @@
 Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
   Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
                                         BytecodeArray::kConstantPoolOffset);
-  Node* entry_offset =
-      IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
-                WordShl(index, kPointerSizeLog2));
-  return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
+  return LoadFixedArrayElement(constant_pool, index);
 }
 
 Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
-  Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
-                                        BytecodeArray::kConstantPoolOffset);
-  int offset = FixedArray::kHeaderSize - kHeapObjectTag;
-#if V8_TARGET_LITTLE_ENDIAN
-  if (Is64()) {
-    offset += kPointerSize / 2;
-  }
-#endif
-  Node* entry_offset =
-      IntPtrAdd(IntPtrConstant(offset), WordShl(index, kPointerSizeLog2));
-  if (Is64()) {
-    return ChangeInt32ToInt64(
-        Load(MachineType::Int32(), constant_pool, entry_offset));
-  } else {
-    return SmiUntag(
-        Load(MachineType::AnyTagged(), constant_pool, entry_offset));
-  }
+  return SmiUntag(LoadConstantPoolEntry(index));
 }
 
-Node* InterpreterAssembler::LoadTypeFeedbackVector() {
+Node* InterpreterAssembler::LoadFeedbackVector() {
   Node* function = LoadRegister(Register::function_closure());
-  Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
-  Node* vector =
-      LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
+  Node* cell = LoadObjectField(function, JSFunction::kFeedbackVectorOffset);
+  Node* vector = LoadObjectField(cell, Cell::kValueOffset);
   return vector;
 }
 
+void InterpreterAssembler::SaveBytecodeOffset() {
+  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+  StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
+  saved_bytecode_offset_ = true;
+}
+
 void InterpreterAssembler::CallPrologue() {
-  StoreRegister(SmiTag(BytecodeOffset()), Register::bytecode_offset());
+  if (!saved_bytecode_offset_) {
+    // If there are multiple calls in the bytecode handler, you need to spill
+    // before each of them, unless SaveBytecodeOffset has explicitly been called
+    // in a path that dominates _all_ of those calls. Therefore don't set
+    // saved_bytecode_offset_ to true or call SaveBytecodeOffset.
+    StoreAndTagRegister(BytecodeOffset(), Register::bytecode_offset());
+  }
 
   if (FLAG_debug_code && !disable_stack_check_across_call_) {
     DCHECK(stack_pointer_before_call_ == nullptr);
@@ -513,22 +541,21 @@
   }
 }
 
-Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector,
+Node* InterpreterAssembler::IncrementCallCount(Node* feedback_vector,
                                                Node* slot_id) {
   Comment("increment call count");
   Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
-  Node* call_count =
-      LoadFixedArrayElement(type_feedback_vector, call_count_slot);
-  Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
+  Node* call_count = LoadFixedArrayElement(feedback_vector, call_count_slot);
+  Node* new_count = SmiAdd(call_count, SmiConstant(1));
   // Count is Smi, so we don't need a write barrier.
-  return StoreFixedArrayElement(type_feedback_vector, call_count_slot,
-                                new_count, SKIP_WRITE_BARRIER);
+  return StoreFixedArrayElement(feedback_vector, call_count_slot, new_count,
+                                SKIP_WRITE_BARRIER);
 }
 
 Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
                                                Node* first_arg, Node* arg_count,
                                                Node* slot_id,
-                                               Node* type_feedback_vector,
+                                               Node* feedback_vector,
                                                TailCallMode tail_call_mode) {
   // Static checks to assert it is safe to examine the type feedback element.
   // We don't know that we have a weak cell. We might have a private symbol
@@ -540,6 +567,8 @@
   // computed, meaning that it can't appear to be a pointer. If the low bit is
   // 0, then hash is computed, but the 0 bit prevents the field from appearing
   // to be a pointer.
+  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
   STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
                     WeakCell::kValueOffset &&
@@ -550,10 +579,10 @@
       end(this);
 
   // The checks. First, does function match the recorded monomorphic target?
-  Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
+  Node* feedback_element = LoadFixedArrayElement(feedback_vector, slot_id);
   Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
   Node* is_monomorphic = WordEqual(function, feedback_value);
-  GotoUnless(is_monomorphic, &extra_checks);
+  GotoIfNot(is_monomorphic, &extra_checks);
 
   // The compare above could have been a SMI/SMI comparison. Guard against
   // this convincing us that we have a monomorphic JSFunction.
@@ -563,11 +592,11 @@
   Bind(&call_function);
   {
     // Increment the call count.
-    IncrementCallCount(type_feedback_vector, slot_id);
+    IncrementCallCount(feedback_vector, slot_id);
 
     // Call using call function builtin.
     Callable callable = CodeFactory::InterpreterPushArgsAndCall(
-        isolate(), tail_call_mode, CallableType::kJSFunction);
+        isolate(), tail_call_mode, InterpreterPushArgsMode::kJSFunction);
     Node* code_target = HeapConstant(callable.code());
     Node* ret_value = CallStub(callable.descriptor(), code_target, context,
                                arg_count, first_arg, function);
@@ -582,25 +611,23 @@
 
     Comment("check if megamorphic");
     // Check if it is a megamorphic target.
-    Node* is_megamorphic = WordEqual(
-        feedback_element,
-        HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+    Node* is_megamorphic =
+        WordEqual(feedback_element,
+                  HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
     GotoIf(is_megamorphic, &call);
 
     Comment("check if it is an allocation site");
-    Node* is_allocation_site = WordEqual(
-        LoadMap(feedback_element), LoadRoot(Heap::kAllocationSiteMapRootIndex));
-    GotoUnless(is_allocation_site, &check_initialized);
+    GotoIfNot(IsAllocationSiteMap(LoadMap(feedback_element)),
+              &check_initialized);
 
     // If it is not the Array() function, mark megamorphic.
-    Node* context_slot =
-        LoadFixedArrayElement(LoadNativeContext(context),
-                              Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+    Node* context_slot = LoadContextElement(LoadNativeContext(context),
+                                            Context::ARRAY_FUNCTION_INDEX);
     Node* is_array_function = WordEqual(context_slot, function);
-    GotoUnless(is_array_function, &mark_megamorphic);
+    GotoIfNot(is_array_function, &mark_megamorphic);
 
     // It is a monomorphic Array function. Increment the call count.
-    IncrementCallCount(type_feedback_vector, slot_id);
+    IncrementCallCount(feedback_vector, slot_id);
 
     // Call ArrayConstructorStub.
     Callable callable_call =
@@ -618,8 +645,8 @@
       // Check if it is uninitialized target first.
       Node* is_uninitialized = WordEqual(
           feedback_element,
-          HeapConstant(TypeFeedbackVector::UninitializedSentinel(isolate())));
-      GotoUnless(is_uninitialized, &mark_megamorphic);
+          HeapConstant(FeedbackVector::UninitializedSentinel(isolate())));
+      GotoIfNot(is_uninitialized, &mark_megamorphic);
 
       Comment("handle_unitinitialized");
       // If it is not a JSFunction mark it as megamorphic.
@@ -629,13 +656,12 @@
       // Check if function is an object of JSFunction type.
       Node* instance_type = LoadInstanceType(function);
       Node* is_js_function =
-          WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
-      GotoUnless(is_js_function, &mark_megamorphic);
+          Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+      GotoIfNot(is_js_function, &mark_megamorphic);
 
       // Check if it is the Array() function.
-      Node* context_slot =
-          LoadFixedArrayElement(LoadNativeContext(context),
-                                Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+      Node* context_slot = LoadContextElement(LoadNativeContext(context),
+                                              Context::ARRAY_FUNCTION_INDEX);
       Node* is_array_function = WordEqual(context_slot, function);
       GotoIf(is_array_function, &create_allocation_site);
 
@@ -644,9 +670,9 @@
           LoadObjectField(function, JSFunction::kContextOffset));
       Node* is_same_native_context =
           WordEqual(native_context, LoadNativeContext(context));
-      GotoUnless(is_same_native_context, &mark_megamorphic);
+      GotoIfNot(is_same_native_context, &mark_megamorphic);
 
-      CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
+      CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id),
                                      function);
 
       // Call using call function builtin.
@@ -655,8 +681,7 @@
 
     Bind(&create_allocation_site);
     {
-      CreateAllocationSiteInFeedbackVector(type_feedback_vector,
-                                           SmiTag(slot_id));
+      CreateAllocationSiteInFeedbackVector(feedback_vector, SmiTag(slot_id));
 
       // Call using CallFunction builtin. CallICs have a PREMONOMORPHIC state.
       // They start collecting feedback only when a call is executed the second
@@ -671,8 +696,8 @@
       // and will not move during a GC. So it is safe to skip write barrier.
       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
       StoreFixedArrayElement(
-          type_feedback_vector, slot_id,
-          HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
+          feedback_vector, slot_id,
+          HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
           SKIP_WRITE_BARRIER);
       Goto(&call);
     }
@@ -682,11 +707,11 @@
   {
     Comment("Increment call count and call using Call builtin");
     // Increment the call count.
-    IncrementCallCount(type_feedback_vector, slot_id);
+    IncrementCallCount(feedback_vector, slot_id);
 
     // Call using call builtin.
     Callable callable_call = CodeFactory::InterpreterPushArgsAndCall(
-        isolate(), tail_call_mode, CallableType::kAny);
+        isolate(), tail_call_mode, InterpreterPushArgsMode::kOther);
     Node* code_target_call = HeapConstant(callable_call.code());
     Node* ret_value = CallStub(callable_call.descriptor(), code_target_call,
                                context, arg_count, first_arg, function);
@@ -701,25 +726,41 @@
 Node* InterpreterAssembler::CallJS(Node* function, Node* context,
                                    Node* first_arg, Node* arg_count,
                                    TailCallMode tail_call_mode) {
+  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+  DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
   Callable callable = CodeFactory::InterpreterPushArgsAndCall(
-      isolate(), tail_call_mode, CallableType::kAny);
+      isolate(), tail_call_mode, InterpreterPushArgsMode::kOther);
   Node* code_target = HeapConstant(callable.code());
+
   return CallStub(callable.descriptor(), code_target, context, arg_count,
                   first_arg, function);
 }
 
-Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
-                                          Node* new_target, Node* first_arg,
-                                          Node* arg_count, Node* slot_id,
-                                          Node* type_feedback_vector) {
+Node* InterpreterAssembler::CallJSWithSpread(Node* function, Node* context,
+                                             Node* first_arg, Node* arg_count) {
+  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+  Callable callable = CodeFactory::InterpreterPushArgsAndCall(
+      isolate(), TailCallMode::kDisallow,
+      InterpreterPushArgsMode::kWithFinalSpread);
+  Node* code_target = HeapConstant(callable.code());
+
+  return CallStub(callable.descriptor(), code_target, context, arg_count,
+                  first_arg, function);
+}
+
+Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
+                                      Node* new_target, Node* first_arg,
+                                      Node* arg_count, Node* slot_id,
+                                      Node* feedback_vector) {
+  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
   Variable return_value(this, MachineRepresentation::kTagged);
   Variable allocation_feedback(this, MachineRepresentation::kTagged);
   Label call_construct_function(this, &allocation_feedback),
       extra_checks(this, Label::kDeferred), call_construct(this), end(this);
 
   // Slot id of 0 is used to indicate no type feedback is available.
-  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
-  Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
+  STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
+  Node* is_feedback_unavailable = WordEqual(slot_id, IntPtrConstant(0));
   GotoIf(is_feedback_unavailable, &call_construct);
 
   // Check that the constructor is not a smi.
@@ -729,11 +770,11 @@
   // Check that constructor is a JSFunction.
   Node* instance_type = LoadInstanceType(constructor);
   Node* is_js_function =
-      WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
-  GotoUnless(is_js_function, &call_construct);
+      Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+  GotoIfNot(is_js_function, &call_construct);
 
   // Check if it is a monomorphic constructor.
-  Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id);
+  Node* feedback_element = LoadFixedArrayElement(feedback_vector, slot_id);
   Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element);
   Node* is_monomorphic = WordEqual(constructor, feedback_value);
   allocation_feedback.Bind(UndefinedConstant());
@@ -741,10 +782,10 @@
 
   Bind(&call_construct_function);
   {
-    Comment("call using callConstructFunction");
-    IncrementCallCount(type_feedback_vector, slot_id);
+    Comment("call using ConstructFunction");
+    IncrementCallCount(feedback_vector, slot_id);
     Callable callable_function = CodeFactory::InterpreterPushArgsAndConstruct(
-        isolate(), CallableType::kJSFunction);
+        isolate(), InterpreterPushArgsMode::kJSFunction);
     return_value.Bind(CallStub(callable_function.descriptor(),
                                HeapConstant(callable_function.code()), context,
                                arg_count, new_target, constructor,
@@ -759,15 +800,15 @@
 
     // Check if it is a megamorphic target.
     Comment("check if megamorphic");
-    Node* is_megamorphic = WordEqual(
-        feedback_element,
-        HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+    Node* is_megamorphic =
+        WordEqual(feedback_element,
+                  HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())));
     GotoIf(is_megamorphic, &call_construct_function);
 
     Comment("check if weak cell");
     Node* is_weak_cell = WordEqual(LoadMap(feedback_element),
                                    LoadRoot(Heap::kWeakCellMapRootIndex));
-    GotoUnless(is_weak_cell, &check_allocation_site);
+    GotoIfNot(is_weak_cell, &check_allocation_site);
 
     // If the weak cell is cleared, we have a new chance to become
     // monomorphic.
@@ -781,14 +822,13 @@
       Node* is_allocation_site =
           WordEqual(LoadObjectField(feedback_element, 0),
                     LoadRoot(Heap::kAllocationSiteMapRootIndex));
-      GotoUnless(is_allocation_site, &check_initialized);
+      GotoIfNot(is_allocation_site, &check_initialized);
 
       // Make sure the function is the Array() function.
-      Node* context_slot =
-          LoadFixedArrayElement(LoadNativeContext(context),
-                                Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+      Node* context_slot = LoadContextElement(LoadNativeContext(context),
+                                              Context::ARRAY_FUNCTION_INDEX);
       Node* is_array_function = WordEqual(context_slot, constructor);
-      GotoUnless(is_array_function, &mark_megamorphic);
+      GotoIfNot(is_array_function, &mark_megamorphic);
 
       allocation_feedback.Bind(feedback_element);
       Goto(&call_construct_function);
@@ -809,15 +849,14 @@
       Comment("initialize the feedback element");
       // Create an allocation site if the function is an array function,
       // otherwise create a weak cell.
-      Node* context_slot =
-          LoadFixedArrayElement(LoadNativeContext(context),
-                                Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+      Node* context_slot = LoadContextElement(LoadNativeContext(context),
+                                              Context::ARRAY_FUNCTION_INDEX);
       Node* is_array_function = WordEqual(context_slot, constructor);
       Branch(is_array_function, &create_allocation_site, &create_weak_cell);
 
       Bind(&create_allocation_site);
       {
-        Node* site = CreateAllocationSiteInFeedbackVector(type_feedback_vector,
+        Node* site = CreateAllocationSiteInFeedbackVector(feedback_vector,
                                                           SmiTag(slot_id));
         allocation_feedback.Bind(site);
         Goto(&call_construct_function);
@@ -825,7 +864,7 @@
 
       Bind(&create_weak_cell);
       {
-        CreateWeakCellInFeedbackVector(type_feedback_vector, SmiTag(slot_id),
+        CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id),
                                        constructor);
         Goto(&call_construct_function);
       }
@@ -838,8 +877,8 @@
       Comment("transition to megamorphic");
       DCHECK(Heap::RootIsImmortalImmovable(Heap::kmegamorphic_symbolRootIndex));
       StoreFixedArrayElement(
-          type_feedback_vector, slot_id,
-          HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())),
+          feedback_vector, slot_id,
+          HeapConstant(FeedbackVector::MegamorphicSentinel(isolate())),
           SKIP_WRITE_BARRIER);
       Goto(&call_construct_function);
     }
@@ -847,9 +886,9 @@
 
   Bind(&call_construct);
   {
-    Comment("call using callConstruct builtin");
+    Comment("call using Construct builtin");
     Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
-        isolate(), CallableType::kAny);
+        isolate(), InterpreterPushArgsMode::kOther);
     Node* code_target = HeapConstant(callable.code());
     return_value.Bind(CallStub(callable.descriptor(), code_target, context,
                                arg_count, new_target, constructor,
@@ -861,9 +900,28 @@
   return return_value.value();
 }
 
+Node* InterpreterAssembler::ConstructWithSpread(Node* constructor,
+                                                Node* context, Node* new_target,
+                                                Node* first_arg,
+                                                Node* arg_count) {
+  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+  Variable return_value(this, MachineRepresentation::kTagged);
+  Comment("call using ConstructWithSpread");
+  Callable callable = CodeFactory::InterpreterPushArgsAndConstruct(
+      isolate(), InterpreterPushArgsMode::kWithFinalSpread);
+  Node* code_target = HeapConstant(callable.code());
+  return_value.Bind(CallStub(callable.descriptor(), code_target, context,
+                             arg_count, new_target, constructor,
+                             UndefinedConstant(), first_arg));
+
+  return return_value.value();
+}
+
 Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
                                          Node* first_arg, Node* arg_count,
                                          int result_size) {
+  DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
+  DCHECK(Bytecodes::IsCallRuntime(bytecode_));
   Callable callable = CodeFactory::InterpreterCEntry(isolate(), result_size);
   Node* code_target = HeapConstant(callable.code());
 
@@ -872,19 +930,17 @@
       ExternalReference::runtime_function_table_address(isolate()));
   Node* function_offset =
       Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
-  Node* function = IntPtrAdd(function_table, function_offset);
+  Node* function =
+      IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
   Node* function_entry =
       Load(MachineType::Pointer(), function,
            IntPtrConstant(offsetof(Runtime::Function, entry)));
 
-  return CallStub(callable.descriptor(), code_target, context, arg_count,
-                  first_arg, function_entry, result_size);
+  return CallStubR(callable.descriptor(), result_size, code_target, context,
+                   arg_count, first_arg, function_entry);
 }
 
-void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
-  // TODO(rmcilroy): It might be worthwhile to only update the budget for
-  // backwards branches. Those are distinguishable by the {JumpLoop} bytecode.
-
+void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
   Label ok(this), interrupt_check(this, Label::kDeferred), end(this);
   Node* budget_offset =
       IntPtrConstant(BytecodeArray::kInterruptBudgetOffset - kHeapObjectTag);
@@ -893,7 +949,11 @@
   Variable new_budget(this, MachineRepresentation::kWord32);
   Node* old_budget =
       Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset);
-  new_budget.Bind(Int32Add(old_budget, weight));
+  if (backward) {
+    new_budget.Bind(Int32Sub(old_budget, weight));
+  } else {
+    new_budget.Bind(Int32Add(old_budget, weight));
+  }
   Node* condition =
       Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0));
   Branch(condition, &ok, &interrupt_check);
@@ -921,24 +981,31 @@
   return Advance(IntPtrConstant(delta));
 }
 
-Node* InterpreterAssembler::Advance(Node* delta) {
+Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
   if (FLAG_trace_ignition) {
     TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
   }
-  Node* next_offset = IntPtrAdd(BytecodeOffset(), delta);
+  Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
+                               : IntPtrAdd(BytecodeOffset(), delta);
   bytecode_offset_.Bind(next_offset);
   return next_offset;
 }
 
-Node* InterpreterAssembler::Jump(Node* delta) {
+Node* InterpreterAssembler::Jump(Node* delta, bool backward) {
   DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
 
-  UpdateInterruptBudget(delta);
-  Node* new_bytecode_offset = Advance(delta);
+  UpdateInterruptBudget(TruncateWordToWord32(delta), backward);
+  Node* new_bytecode_offset = Advance(delta, backward);
   Node* target_bytecode = LoadBytecode(new_bytecode_offset);
   return DispatchToBytecode(target_bytecode, new_bytecode_offset);
 }
 
+Node* InterpreterAssembler::Jump(Node* delta) { return Jump(delta, false); }
+
+Node* InterpreterAssembler::JumpBackward(Node* delta) {
+  return Jump(delta, true);
+}
+
 void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
   Label match(this), no_match(this);
 
@@ -961,10 +1028,7 @@
 Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
   Node* bytecode =
       Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
-  if (kPointerSize == 8) {
-    bytecode = ChangeUint32ToUint64(bytecode);
-  }
-  return bytecode;
+  return ChangeUint32ToWord(bytecode);
 }
 
 Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
@@ -1007,6 +1071,8 @@
 }
 
 Node* InterpreterAssembler::Dispatch() {
+  Comment("========= Dispatch");
+  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
   Node* target_offset = Advance();
   Node* target_bytecode = LoadBytecode(target_offset);
 
@@ -1031,17 +1097,19 @@
 
 Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
                                                       Node* bytecode_offset) {
+  // TODO(ishell): Add CSA::CodeEntryPoint(code).
   Node* handler_entry =
-      IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+      IntPtrAdd(BitcastTaggedToWord(handler),
+                IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
   return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
 }
 
 Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
     Node* handler_entry, Node* bytecode_offset) {
   InterpreterDispatchDescriptor descriptor(isolate());
-  Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
-                  BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
-  return TailCallBytecodeDispatch(descriptor, handler_entry, args);
+  return TailCallBytecodeDispatch(
+      descriptor, handler_entry, GetAccumulatorUnchecked(), bytecode_offset,
+      BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
 }
 
 void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1052,6 +1120,7 @@
   //   Indices 0-255 correspond to bytecodes with operand_scale == 0
   //   Indices 256-511 correspond to bytecodes with operand_scale == 1
   //   Indices 512-767 correspond to bytecodes with operand_scale == 2
+  DCHECK_IMPLIES(Bytecodes::MakesCallAlongCriticalPath(bytecode_), made_call_);
   Node* next_bytecode_offset = Advance(1);
   Node* next_bytecode = LoadBytecode(next_bytecode_offset);
 
@@ -1087,7 +1156,7 @@
   Variable* loop_vars[] = {&var_value, var_type_feedback};
   Label loop(this, 2, loop_vars), done_loop(this, &var_result);
   var_value.Bind(value);
-  var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kNone));
+  var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
   Goto(&loop);
   Bind(&loop);
   {
@@ -1103,8 +1172,8 @@
       // Convert the Smi {value}.
       var_result.Bind(SmiToWord32(value));
       var_type_feedback->Bind(
-          Word32Or(var_type_feedback->value(),
-                   Int32Constant(BinaryOperationFeedback::kSignedSmall)));
+          SmiOr(var_type_feedback->value(),
+                SmiConstant(BinaryOperationFeedback::kSignedSmall)));
       Goto(&done_loop);
     }
 
@@ -1114,16 +1183,16 @@
       Label if_valueisheapnumber(this),
           if_valueisnotheapnumber(this, Label::kDeferred);
       Node* value_map = LoadMap(value);
-      Branch(WordEqual(value_map, HeapNumberMapConstant()),
-             &if_valueisheapnumber, &if_valueisnotheapnumber);
+      Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber,
+             &if_valueisnotheapnumber);
 
       Bind(&if_valueisheapnumber);
       {
         // Truncate the floating point value.
         var_result.Bind(TruncateHeapNumberValueToWord32(value));
         var_type_feedback->Bind(
-            Word32Or(var_type_feedback->value(),
-                     Int32Constant(BinaryOperationFeedback::kNumber)));
+            SmiOr(var_type_feedback->value(),
+                  SmiConstant(BinaryOperationFeedback::kNumber)));
         Goto(&done_loop);
       }
 
@@ -1132,9 +1201,8 @@
         // We do not require an Or with earlier feedback here because once we
         // convert the value to a number, we cannot reach this path. We can
         // only reach this path on the first pass when the feedback is kNone.
-        CSA_ASSERT(this,
-                   Word32Equal(var_type_feedback->value(),
-                               Int32Constant(BinaryOperationFeedback::kNone)));
+        CSA_ASSERT(this, SmiEqual(var_type_feedback->value(),
+                                  SmiConstant(BinaryOperationFeedback::kNone)));
 
         Label if_valueisoddball(this),
             if_valueisnotoddball(this, Label::kDeferred);
@@ -1147,7 +1215,7 @@
           // Convert Oddball to a Number and perform checks again.
           var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
           var_type_feedback->Bind(
-              Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+              SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
           Goto(&loop);
         }
 
@@ -1156,7 +1224,7 @@
           // Convert the {value} to a Number first.
           Callable callable = CodeFactory::NonNumberToNumber(isolate());
           var_value.Bind(CallStub(callable, context, value));
-          var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
+          var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
           Goto(&loop);
         }
       }
@@ -1174,8 +1242,8 @@
   // function.
   Node* profiling_weight =
       Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
-               BytecodeOffset());
-  UpdateInterruptBudget(profiling_weight);
+               TruncateWordToWord32(BytecodeOffset()));
+  UpdateInterruptBudget(profiling_weight, false);
 }
 
 Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
@@ -1187,9 +1255,9 @@
 }
 
 Node* InterpreterAssembler::LoadOSRNestingLevel() {
-  Node* offset =
-      IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag);
-  return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset);
+  return LoadObjectField(BytecodeArrayTaggedPointer(),
+                         BytecodeArray::kOSRNestingLevelOffset,
+                         MachineType::Int8());
 }
 
 void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
@@ -1211,6 +1279,26 @@
   Bind(&ok);
 }
 
+void InterpreterAssembler::MaybeDropFrames(Node* context) {
+  Node* restart_fp_address =
+      ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
+
+  Node* restart_fp = Load(MachineType::Pointer(), restart_fp_address);
+  Node* null = IntPtrConstant(0);
+
+  Label ok(this), drop_frames(this);
+  Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
+
+  Bind(&drop_frames);
+  // We don't expect this call to return since the frame dropper tears down
+  // the stack and jumps into the function on the target frame to restart it.
+  CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
+  Abort(kUnexpectedReturnFromFrameDropper);
+  Goto(&ok);
+
+  Bind(&ok);
+}
+
 void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
   CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
               SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
@@ -1261,19 +1349,21 @@
 Node* InterpreterAssembler::RegisterCount() {
   Node* bytecode_array = LoadRegister(Register::bytecode_array());
   Node* frame_size = LoadObjectField(
-      bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32());
-  return Word32Sar(frame_size, Int32Constant(kPointerSizeLog2));
+      bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Uint32());
+  return WordShr(ChangeUint32ToWord(frame_size),
+                 IntPtrConstant(kPointerSizeLog2));
 }
 
 Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
+  Node* register_count = RegisterCount();
   if (FLAG_debug_code) {
     Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
-    AbortIfWordNotEqual(
-        array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+    AbortIfWordNotEqual(array_size, register_count,
+                        kInvalidRegisterFileInGenerator);
   }
 
-  Variable var_index(this, MachineRepresentation::kWord32);
-  var_index.Bind(Int32Constant(0));
+  Variable var_index(this, MachineType::PointerRepresentation());
+  var_index.Bind(IntPtrConstant(0));
 
   // Iterate over register file and write values into array.
   // The mapping of register to array index must match that used in
@@ -1283,16 +1373,14 @@
   Bind(&loop);
   {
     Node* index = var_index.value();
-    Node* condition = Int32LessThan(index, RegisterCount());
-    GotoUnless(condition, &done_loop);
+    GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
 
-    Node* reg_index =
-        Int32Sub(Int32Constant(Register(0).ToOperand()), index);
-    Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
+    Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
+    Node* value = LoadRegister(reg_index);
 
     StoreFixedArrayElement(array, index, value);
 
-    var_index.Bind(Int32Add(index, Int32Constant(1)));
+    var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
     Goto(&loop);
   }
   Bind(&done_loop);
@@ -1301,14 +1389,15 @@
 }
 
 Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
+  Node* register_count = RegisterCount();
   if (FLAG_debug_code) {
     Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
-    AbortIfWordNotEqual(
-        array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+    AbortIfWordNotEqual(array_size, register_count,
+                        kInvalidRegisterFileInGenerator);
   }
 
-  Variable var_index(this, MachineRepresentation::kWord32);
-  var_index.Bind(Int32Constant(0));
+  Variable var_index(this, MachineType::PointerRepresentation());
+  var_index.Bind(IntPtrConstant(0));
 
   // Iterate over array and write values into register file.  Also erase the
   // array contents to not keep them alive artificially.
@@ -1317,18 +1406,16 @@
   Bind(&loop);
   {
     Node* index = var_index.value();
-    Node* condition = Int32LessThan(index, RegisterCount());
-    GotoUnless(condition, &done_loop);
+    GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
 
     Node* value = LoadFixedArrayElement(array, index);
 
-    Node* reg_index =
-        Int32Sub(Int32Constant(Register(0).ToOperand()), index);
-    StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
+    Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
+    StoreRegister(value, reg_index);
 
     StoreFixedArrayElement(array, index, StaleRegisterConstant());
 
-    var_index.Bind(Int32Add(index, Int32Constant(1)));
+    var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
     Goto(&loop);
   }
   Bind(&done_loop);
diff --git a/src/interpreter/interpreter-assembler.h b/src/interpreter/interpreter-assembler.h
index aefd2bc..1317f37 100644
--- a/src/interpreter/interpreter-assembler.h
+++ b/src/interpreter/interpreter-assembler.h
@@ -20,32 +20,44 @@
 
 class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
  public:
-  InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
+  InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
                        OperandScale operand_scale);
-  virtual ~InterpreterAssembler();
+  ~InterpreterAssembler();
 
-  // Returns the count immediate for bytecode operand |operand_index| in the
-  // current bytecode.
+  // Returns the 32-bit unsigned count immediate for bytecode operand
+  // |operand_index| in the current bytecode.
   compiler::Node* BytecodeOperandCount(int operand_index);
-  // Returns the 8-bit flag for bytecode operand |operand_index| in the
-  // current bytecode.
+  // Returns the 32-bit unsigned flag for bytecode operand |operand_index|
+  // in the current bytecode.
   compiler::Node* BytecodeOperandFlag(int operand_index);
-  // Returns the index immediate for bytecode operand |operand_index| in the
-  // current bytecode.
+  // Returns the 32-bit zero-extended index immediate for bytecode operand
+  // |operand_index| in the current bytecode.
   compiler::Node* BytecodeOperandIdx(int operand_index);
-  // Returns the UImm8 immediate for bytecode operand |operand_index| in the
-  // current bytecode.
+  // Returns the smi index immediate for bytecode operand |operand_index|
+  // in the current bytecode.
+  compiler::Node* BytecodeOperandIdxSmi(int operand_index);
+  // Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
+  // in the current bytecode.
   compiler::Node* BytecodeOperandUImm(int operand_index);
-  // Returns the Imm8 immediate for bytecode operand |operand_index| in the
-  // current bytecode.
+  // Returns the word-size unsigned immediate for bytecode operand
+  // |operand_index| in the current bytecode.
+  compiler::Node* BytecodeOperandUImmWord(int operand_index);
+  // Returns the 32-bit signed immediate for bytecode operand |operand_index|
+  // in the current bytecode.
   compiler::Node* BytecodeOperandImm(int operand_index);
-  // Returns the register index for bytecode operand |operand_index| in the
+  // Returns the word-size signed immediate for bytecode operand |operand_index|
+  // in the current bytecode.
+  compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
+  // Returns the smi immediate for bytecode operand |operand_index| in the
   // current bytecode.
+  compiler::Node* BytecodeOperandImmSmi(int operand_index);
+  // Returns the word-size sign-extended register index for bytecode operand
+  // |operand_index| in the current bytecode.
   compiler::Node* BytecodeOperandReg(int operand_index);
-  // Returns the runtime id immediate for bytecode operand
+  // Returns the 32-bit unsigned runtime id immediate for bytecode operand
   // |operand_index| in the current bytecode.
   compiler::Node* BytecodeOperandRuntimeId(int operand_index);
-  // Returns the intrinsic id immediate for bytecode operand
+  // Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
   // |operand_index| in the current bytecode.
   compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
 
@@ -76,9 +88,11 @@
   // Loads from and stores to the interpreter register file.
   compiler::Node* LoadRegister(Register reg);
   compiler::Node* LoadRegister(compiler::Node* reg_index);
+  compiler::Node* LoadAndUntagRegister(Register reg);
   compiler::Node* StoreRegister(compiler::Node* value, Register reg);
   compiler::Node* StoreRegister(compiler::Node* value,
                                 compiler::Node* reg_index);
+  compiler::Node* StoreAndTagRegister(compiler::Node* value, Register reg);
 
   // Returns the next consecutive register.
   compiler::Node* NextRegister(compiler::Node* reg_index);
@@ -93,12 +107,12 @@
   // Load and untag constant at |index| in the constant pool.
   compiler::Node* LoadAndUntagConstantPoolEntry(compiler::Node* index);
 
-  // Load the TypeFeedbackVector for the current function.
-  compiler::Node* LoadTypeFeedbackVector();
+  // Load the FeedbackVector for the current function.
+  compiler::Node* LoadFeedbackVector();
 
   // Increment the call count for a CALL_IC or construct call.
   // The call count is located at feedback_vector[slot_id + 1].
-  compiler::Node* IncrementCallCount(compiler::Node* type_feedback_vector,
+  compiler::Node* IncrementCallCount(compiler::Node* feedback_vector,
                                      compiler::Node* slot_id);
 
   // Call JSFunction or Callable |function| with |arg_count|
@@ -110,7 +124,7 @@
                                      compiler::Node* first_arg,
                                      compiler::Node* arg_count,
                                      compiler::Node* slot_id,
-                                     compiler::Node* type_feedback_vector,
+                                     compiler::Node* feedback_vector,
                                      TailCallMode tail_call_mode);
 
   // Call JSFunction or Callable |function| with |arg_count|
@@ -120,18 +134,34 @@
                          compiler::Node* first_arg, compiler::Node* arg_count,
                          TailCallMode tail_call_mode);
 
+  // Call JSFunction or Callable |function| with |arg_count|
+  // arguments (not including receiver) and the first argument
+  // located at |first_arg|.
+  compiler::Node* CallJSWithSpread(compiler::Node* function,
+                                   compiler::Node* context,
+                                   compiler::Node* first_arg,
+                                   compiler::Node* arg_count);
+
   // Call constructor |constructor| with |arg_count| arguments (not
   // including receiver) and the first argument located at
   // |first_arg|. The |new_target| is the same as the
   // |constructor| for the new keyword, but differs for the super
   // keyword.
-  compiler::Node* CallConstruct(compiler::Node* constructor,
-                                compiler::Node* context,
-                                compiler::Node* new_target,
-                                compiler::Node* first_arg,
-                                compiler::Node* arg_count,
-                                compiler::Node* slot_id,
-                                compiler::Node* type_feedback_vector);
+  compiler::Node* Construct(compiler::Node* constructor,
+                            compiler::Node* context, compiler::Node* new_target,
+                            compiler::Node* first_arg,
+                            compiler::Node* arg_count, compiler::Node* slot_id,
+                            compiler::Node* feedback_vector);
+
+  // Call constructor |constructor| with |arg_count| arguments (not including
+  // receiver) and the first argument located at |first_arg|. The last argument
+  // is always a spread. The |new_target| is the same as the |constructor| for
+  // the new keyword, but differs for the super keyword.
+  compiler::Node* ConstructWithSpread(compiler::Node* constructor,
+                                      compiler::Node* context,
+                                      compiler::Node* new_target,
+                                      compiler::Node* first_arg,
+                                      compiler::Node* arg_count);
 
   // Call runtime function with |arg_count| arguments and the first argument
   // located at |first_arg|.
@@ -140,15 +170,18 @@
                                compiler::Node* first_arg,
                                compiler::Node* arg_count, int return_size = 1);
 
-  // Jump relative to the current bytecode by |jump_offset|.
+  // Jump forward relative to the current bytecode by the |jump_offset|.
   compiler::Node* Jump(compiler::Node* jump_offset);
 
-  // Jump relative to the current bytecode by |jump_offset| if the
+  // Jump backward relative to the current bytecode by the |jump_offset|.
+  compiler::Node* JumpBackward(compiler::Node* jump_offset);
+
+  // Jump forward relative to the current bytecode by |jump_offset| if the
   // word values |lhs| and |rhs| are equal.
   void JumpIfWordEqual(compiler::Node* lhs, compiler::Node* rhs,
                        compiler::Node* jump_offset);
 
-  // Jump relative to the current bytecode by |jump_offset| if the
+  // Jump forward relative to the current bytecode by |jump_offset| if the
   // word values |lhs| and |rhs| are not equal.
   void JumpIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
                           compiler::Node* jump_offset);
@@ -184,9 +217,15 @@
   void AbortIfWordNotEqual(compiler::Node* lhs, compiler::Node* rhs,
                            BailoutReason bailout_reason);
 
+  // Dispatch to frame dropper trampoline if necessary.
+  void MaybeDropFrames(compiler::Node* context);
+
   // Returns the offset from the BytecodeArrayPointer of the current bytecode.
   compiler::Node* BytecodeOffset();
 
+  // Save the bytecode offset to the interpreter frame.
+  void SaveBytecodeOffset();
+
  protected:
   Bytecode bytecode() const { return bytecode_; }
   static bool TargetSupportsUnalignedAccess();
@@ -209,8 +248,8 @@
 
   // Saves and restores interpreter bytecode offset to the interpreter stack
   // frame when performing a call.
-  void CallPrologue() override;
-  void CallEpilogue() override;
+  void CallPrologue();
+  void CallEpilogue();
 
   // Increment the dispatch counter for the (current, next) bytecode pair.
   void TraceBytecodeDispatch(compiler::Node* target_index);
@@ -218,9 +257,10 @@
   // Traces the current bytecode by calling |function_id|.
   void TraceBytecode(Runtime::FunctionId function_id);
 
-  // Updates the bytecode array's interrupt budget by |weight| and calls
-  // Runtime::kInterrupt if counter reaches zero.
-  void UpdateInterruptBudget(compiler::Node* weight);
+  // Updates the bytecode array's interrupt budget by a 32-bit unsigned |weight|
+  // and calls Runtime::kInterrupt if counter reaches zero. If |backward|, then
+  // the interrupt budget is decremented, otherwise it is incremented.
+  void UpdateInterruptBudget(compiler::Node* weight, bool backward);
 
   // Returns the offset of register |index| relative to RegisterFilePointer().
   compiler::Node* RegisterFrameOffset(compiler::Node* index);
@@ -236,6 +276,7 @@
   compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
                                                MachineType result_type);
 
+  // Returns zero- or sign-extended to word32 value of the operand.
   compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
   compiler::Node* BytecodeOperandSignedByte(int operand_index);
   compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
@@ -243,12 +284,19 @@
   compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
   compiler::Node* BytecodeOperandSignedQuad(int operand_index);
 
+  // Returns zero- or sign-extended to word32 value of the operand of
+  // given size.
   compiler::Node* BytecodeSignedOperand(int operand_index,
                                         OperandSize operand_size);
   compiler::Node* BytecodeUnsignedOperand(int operand_index,
                                           OperandSize operand_size);
 
-  // Jump relative to the current bytecode by |jump_offset| if the
+  // Jump relative to the current bytecode by the |jump_offset|. If |backward|,
+  // then jump backward (subtract the offset), otherwise jump forward (add the
+  // offset). Helper function for Jump and JumpBackward.
+  compiler::Node* Jump(compiler::Node* jump_offset, bool backward);
+
+  // Jump forward relative to the current bytecode by |jump_offset| if the
   // |condition| is true. Helper function for JumpIfWordEqual and
   // JumpIfWordNotEqual.
   void JumpConditional(compiler::Node* condition, compiler::Node* jump_offset);
@@ -260,7 +308,7 @@
   // Updates and returns BytecodeOffset() advanced by delta bytecodes.
   // Traces the exit of the current bytecode.
   compiler::Node* Advance(int delta);
-  compiler::Node* Advance(compiler::Node* delta);
+  compiler::Node* Advance(compiler::Node* delta, bool backward = false);
 
   // Load the bytecode at |bytecode_offset|.
   compiler::Node* LoadBytecode(compiler::Node* bytecode_offset);
@@ -292,9 +340,13 @@
   OperandScale operand_scale_;
   CodeStubAssembler::Variable bytecode_offset_;
   CodeStubAssembler::Variable interpreted_frame_pointer_;
+  CodeStubAssembler::Variable bytecode_array_;
+  CodeStubAssembler::Variable dispatch_table_;
   CodeStubAssembler::Variable accumulator_;
   AccumulatorUse accumulator_use_;
   bool made_call_;
+  bool reloaded_frame_ptr_;
+  bool saved_bytecode_offset_;
 
   bool disable_stack_check_across_call_;
   compiler::Node* stack_pointer_before_call_;
diff --git a/src/interpreter/interpreter-intrinsics.cc b/src/interpreter/interpreter-intrinsics.cc
index b46ca87..78de42b 100644
--- a/src/interpreter/interpreter-intrinsics.cc
+++ b/src/interpreter/interpreter-intrinsics.cc
@@ -5,6 +5,7 @@
 #include "src/interpreter/interpreter-intrinsics.h"
 
 #include "src/code-factory.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -105,12 +106,8 @@
 
 Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type,
                                             InstanceTypeCompareMode mode) {
-  InterpreterAssembler::Variable return_value(assembler_,
-                                              MachineRepresentation::kTagged);
   Node* instance_type = __ LoadInstanceType(object);
 
-  InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
-      end(assembler_);
   if (mode == kInstanceTypeEqual) {
     return __ Word32Equal(instance_type, __ Int32Constant(type));
   } else {
@@ -122,6 +119,7 @@
 Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
   InterpreterAssembler::Variable return_value(assembler_,
                                               MachineRepresentation::kTagged);
+  // TODO(ishell): Use Select here.
   InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
       return_false(assembler_), end(assembler_);
   Node* arg = __ LoadRegister(input);
@@ -148,6 +146,8 @@
 
 Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count,
                                      Node* context) {
+  // TODO(ishell): Use Select here.
+  // TODO(ishell): Use CSA::IsJSReceiverInstanceType here.
   InterpreterAssembler::Variable return_value(assembler_,
                                               MachineRepresentation::kTagged);
   InterpreterAssembler::Label return_true(assembler_), return_false(assembler_),
@@ -185,16 +185,13 @@
   return IsInstanceType(input, JS_PROXY_TYPE);
 }
 
-Node* IntrinsicsHelper::IsRegExp(Node* input, Node* arg_count, Node* context) {
-  return IsInstanceType(input, JS_REGEXP_TYPE);
-}
-
 Node* IntrinsicsHelper::IsTypedArray(Node* input, Node* arg_count,
                                      Node* context) {
   return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
 }
 
 Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
+  // TODO(ishell): Use SelectBooleanConstant here.
   InterpreterAssembler::Variable return_value(assembler_,
                                               MachineRepresentation::kTagged);
   InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
@@ -222,14 +219,22 @@
 Node* IntrinsicsHelper::IntrinsicAsStubCall(Node* args_reg, Node* context,
                                             Callable const& callable) {
   int param_count = callable.descriptor().GetParameterCount();
-  Node** args = zone()->NewArray<Node*>(param_count + 1);  // 1 for context
+  int input_count = param_count + 2;  // +2 for target and context
+  Node** args = zone()->NewArray<Node*>(input_count);
+  int index = 0;
+  args[index++] = __ HeapConstant(callable.code());
   for (int i = 0; i < param_count; i++) {
-    args[i] = __ LoadRegister(args_reg);
+    args[index++] = __ LoadRegister(args_reg);
     args_reg = __ NextRegister(args_reg);
   }
-  args[param_count] = context;
+  args[index++] = context;
+  return __ CallStubN(callable.descriptor(), 1, input_count, args);
+}
 
-  return __ CallStubN(callable, args);
+Node* IntrinsicsHelper::CreateIterResultObject(Node* input, Node* arg_count,
+                                               Node* context) {
+  return IntrinsicAsStubCall(input, context,
+                             CodeFactory::CreateIterResultObject(isolate()));
 }
 
 Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count,
@@ -238,23 +243,6 @@
                              CodeFactory::HasProperty(isolate()));
 }
 
-Node* IntrinsicsHelper::NewObject(Node* input, Node* arg_count, Node* context) {
-  return IntrinsicAsStubCall(input, context,
-                             CodeFactory::FastNewObject(isolate()));
-}
-
-Node* IntrinsicsHelper::NumberToString(Node* input, Node* arg_count,
-                                       Node* context) {
-  return IntrinsicAsStubCall(input, context,
-                             CodeFactory::NumberToString(isolate()));
-}
-
-Node* IntrinsicsHelper::RegExpExec(Node* input, Node* arg_count,
-                                   Node* context) {
-  return IntrinsicAsStubCall(input, context,
-                             CodeFactory::RegExpExec(isolate()));
-}
-
 Node* IntrinsicsHelper::SubString(Node* input, Node* arg_count, Node* context) {
   return IntrinsicAsStubCall(input, context, CodeFactory::SubString(isolate()));
 }
@@ -294,7 +282,7 @@
   if (FLAG_debug_code) {
     InterpreterAssembler::Label arg_count_positive(assembler_);
     Node* comparison = __ Int32LessThan(target_args_count, __ Int32Constant(0));
-    __ GotoUnless(comparison, &arg_count_positive);
+    __ GotoIfNot(comparison, &arg_count_positive);
     __ Abort(kWrongArgumentCountForInvokeIntrinsic);
     __ Goto(&arg_count_positive);
     __ Bind(&arg_count_positive);
@@ -305,84 +293,43 @@
   return result;
 }
 
-Node* IntrinsicsHelper::ValueOf(Node* args_reg, Node* arg_count,
-                                Node* context) {
-  InterpreterAssembler::Variable return_value(assembler_,
-                                              MachineRepresentation::kTagged);
-  InterpreterAssembler::Label done(assembler_);
-
-  Node* object = __ LoadRegister(args_reg);
-  return_value.Bind(object);
-
-  // If the object is a smi return the object.
-  __ GotoIf(__ TaggedIsSmi(object), &done);
-
-  // If the object is not a value type, return the object.
-  Node* condition =
-      CompareInstanceType(object, JS_VALUE_TYPE, kInstanceTypeEqual);
-  __ GotoUnless(condition, &done);
-
-  // If the object is a value type, return the value field.
-  return_value.Bind(__ LoadObjectField(object, JSValue::kValueOffset));
-  __ Goto(&done);
-
-  __ Bind(&done);
-  return return_value.value();
-}
-
 Node* IntrinsicsHelper::ClassOf(Node* args_reg, Node* arg_count,
                                 Node* context) {
+  Node* value = __ LoadRegister(args_reg);
+  return __ ClassOf(value);
+}
+
+Node* IntrinsicsHelper::CreateAsyncFromSyncIterator(Node* args_reg,
+                                                    Node* arg_count,
+                                                    Node* context) {
+  InterpreterAssembler::Label not_receiver(
+      assembler_, InterpreterAssembler::Label::kDeferred);
+  InterpreterAssembler::Label done(assembler_);
   InterpreterAssembler::Variable return_value(assembler_,
                                               MachineRepresentation::kTagged);
-  InterpreterAssembler::Label done(assembler_), null(assembler_),
-      function(assembler_), non_function_constructor(assembler_);
 
-  Node* object = __ LoadRegister(args_reg);
+  Node* sync_iterator = __ LoadRegister(args_reg);
 
-  // If the object is not a JSReceiver, we return null.
-  __ GotoIf(__ TaggedIsSmi(object), &null);
-  STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
-  Node* is_js_receiver = CompareInstanceType(object, FIRST_JS_RECEIVER_TYPE,
-                                             kInstanceTypeGreaterThanOrEqual);
-  __ GotoUnless(is_js_receiver, &null);
+  __ GotoIf(__ TaggedIsSmi(sync_iterator), &not_receiver);
+  __ GotoIfNot(__ IsJSReceiver(sync_iterator), &not_receiver);
 
-  // Return 'Function' for JSFunction and JSBoundFunction objects.
-  Node* is_function = CompareInstanceType(object, FIRST_FUNCTION_TYPE,
-                                          kInstanceTypeGreaterThanOrEqual);
-  STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
-  __ GotoIf(is_function, &function);
+  Node* const native_context = __ LoadNativeContext(context);
+  Node* const map = __ LoadContextElement(
+      native_context, Context::ASYNC_FROM_SYNC_ITERATOR_MAP_INDEX);
+  Node* const iterator = __ AllocateJSObjectFromMap(map);
 
-  // Check if the constructor in the map is a JS function.
-  Node* constructor = __ LoadMapConstructor(__ LoadMap(object));
-  Node* constructor_is_js_function =
-      CompareInstanceType(constructor, JS_FUNCTION_TYPE, kInstanceTypeEqual);
-  __ GotoUnless(constructor_is_js_function, &non_function_constructor);
+  __ StoreObjectFieldNoWriteBarrier(
+      iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset, sync_iterator);
 
-  // Grab the instance class name from the constructor function.
-  Node* shared =
-      __ LoadObjectField(constructor, JSFunction::kSharedFunctionInfoOffset);
-  return_value.Bind(
-      __ LoadObjectField(shared, SharedFunctionInfo::kInstanceClassNameOffset));
+  return_value.Bind(iterator);
   __ Goto(&done);
 
-  // Non-JS objects have class null.
-  __ Bind(&null);
+  __ Bind(&not_receiver);
   {
-    return_value.Bind(__ LoadRoot(Heap::kNullValueRootIndex));
-    __ Goto(&done);
-  }
+    return_value.Bind(
+        __ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context));
 
-  // Functions have class 'Function'.
-  __ Bind(&function);
-  {
-    return_value.Bind(__ LoadRoot(Heap::kFunction_stringRootIndex));
-    __ Goto(&done);
-  }
-
-  // Objects with a non-function constructor have class 'Object'.
-  __ Bind(&non_function_constructor);
-  {
-    return_value.Bind(__ LoadRoot(Heap::kObject_stringRootIndex));
+    // Unreachable due to the Throw in runtime call.
     __ Goto(&done);
   }
 
diff --git a/src/interpreter/interpreter-intrinsics.h b/src/interpreter/interpreter-intrinsics.h
index 70ff291..502a2f7 100644
--- a/src/interpreter/interpreter-intrinsics.h
+++ b/src/interpreter/interpreter-intrinsics.h
@@ -23,26 +23,23 @@
 
 // List of supported intrisics, with upper case name, lower case name and
 // expected number of arguments (-1 denoting argument count is variable).
-#define INTRINSICS_LIST(V)                              \
-  V(Call, call, -1)                                     \
-  V(ClassOf, class_of, 1)                               \
-  V(HasProperty, has_property, 2)                       \
-  V(IsArray, is_array, 1)                               \
-  V(IsJSProxy, is_js_proxy, 1)                          \
-  V(IsJSReceiver, is_js_receiver, 1)                    \
-  V(IsRegExp, is_regexp, 1)                             \
-  V(IsSmi, is_smi, 1)                                   \
-  V(IsTypedArray, is_typed_array, 1)                    \
-  V(NewObject, new_object, 2)                           \
-  V(NumberToString, number_to_string, 1)                \
-  V(RegExpExec, reg_exp_exec, 4)                        \
-  V(SubString, sub_string, 3)                           \
-  V(ToString, to_string, 1)                             \
-  V(ToLength, to_length, 1)                             \
-  V(ToInteger, to_integer, 1)                           \
-  V(ToNumber, to_number, 1)                             \
-  V(ToObject, to_object, 1)                             \
-  V(ValueOf, value_of, 1)
+#define INTRINSICS_LIST(V)                                           \
+  V(Call, call, -1)                                                  \
+  V(ClassOf, class_of, 1)                                            \
+  V(CreateIterResultObject, create_iter_result_object, 2)            \
+  V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
+  V(HasProperty, has_property, 2)                                    \
+  V(IsArray, is_array, 1)                                            \
+  V(IsJSProxy, is_js_proxy, 1)                                       \
+  V(IsJSReceiver, is_js_receiver, 1)                                 \
+  V(IsSmi, is_smi, 1)                                                \
+  V(IsTypedArray, is_typed_array, 1)                                 \
+  V(SubString, sub_string, 3)                                        \
+  V(ToString, to_string, 1)                                          \
+  V(ToLength, to_length, 1)                                          \
+  V(ToInteger, to_integer, 1)                                        \
+  V(ToNumber, to_number, 1)                                          \
+  V(ToObject, to_object, 1)
 
 class IntrinsicsHelper {
  public:
diff --git a/src/interpreter/interpreter.cc b/src/interpreter/interpreter.cc
index 81aecaf..5db69e4 100644
--- a/src/interpreter/interpreter.cc
+++ b/src/interpreter/interpreter.cc
@@ -8,16 +8,23 @@
 #include <memory>
 
 #include "src/ast/prettyprinter.h"
+#include "src/builtins/builtins-arguments.h"
+#include "src/builtins/builtins-constructor.h"
+#include "src/builtins/builtins-object.h"
 #include "src/code-factory.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
+#include "src/counters.h"
+#include "src/debug/debug.h"
 #include "src/factory.h"
+#include "src/ic/accessor-assembler.h"
 #include "src/interpreter/bytecode-flags.h"
 #include "src/interpreter/bytecode-generator.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/interpreter/interpreter-assembler.h"
 #include "src/interpreter/interpreter-intrinsics.h"
 #include "src/log.h"
+#include "src/objects-inl.h"
 #include "src/zone/zone.h"
 
 namespace v8 {
@@ -27,7 +34,6 @@
 using compiler::Node;
 typedef CodeStubAssembler::Label Label;
 typedef CodeStubAssembler::Variable Variable;
-typedef InterpreterAssembler::Arg Arg;
 
 #define __ assembler->
 
@@ -41,9 +47,42 @@
   Status FinalizeJobImpl() final;
 
  private:
+  class TimerScope final {
+   public:
+    TimerScope(RuntimeCallStats* stats, RuntimeCallStats::CounterId counter_id)
+        : stats_(stats) {
+      if (V8_UNLIKELY(FLAG_runtime_stats)) {
+        RuntimeCallStats::Enter(stats_, &timer_, counter_id);
+      }
+    }
+
+    explicit TimerScope(RuntimeCallCounter* counter) : stats_(nullptr) {
+      if (V8_UNLIKELY(FLAG_runtime_stats)) {
+        timer_.Start(counter, nullptr);
+      }
+    }
+
+    ~TimerScope() {
+      if (V8_UNLIKELY(FLAG_runtime_stats)) {
+        if (stats_) {
+          RuntimeCallStats::Leave(stats_, &timer_);
+        } else {
+          timer_.Stop();
+        }
+      }
+    }
+
+   private:
+    RuntimeCallStats* stats_;
+    RuntimeCallTimer timer_;
+  };
+
   BytecodeGenerator* generator() { return &generator_; }
 
   BytecodeGenerator generator_;
+  RuntimeCallStats* runtime_call_stats_;
+  RuntimeCallCounter background_execute_counter_;
+  bool print_bytecode_;
 
   DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
 };
@@ -73,24 +112,9 @@
   };
 
   for (OperandScale operand_scale : kOperandScales) {
-#define GENERATE_CODE(Name, ...)                                               \
-  {                                                                            \
-    if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) {     \
-      InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name,       \
-                                     operand_scale);                           \
-      Do##Name(&assembler);                                                    \
-      Handle<Code> code = assembler.GenerateCode();                            \
-      size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale);  \
-      dispatch_table_[index] = code->entry();                                  \
-      TraceCodegen(code);                                                      \
-      PROFILE(                                                                 \
-          isolate_,                                                            \
-          CodeCreateEvent(                                                     \
-              CodeEventListener::BYTECODE_HANDLER_TAG,                         \
-              AbstractCode::cast(*code),                                       \
-              Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
-    }                                                                          \
-  }
+#define GENERATE_CODE(Name, ...)                                  \
+  InstallBytecodeHandler(&zone, Bytecode::k##Name, operand_scale, \
+                         &Interpreter::Do##Name);
     BYTECODE_LIST(GENERATE_CODE)
 #undef GENERATE_CODE
   }
@@ -108,6 +132,30 @@
   DCHECK(IsDispatchTableInitialized());
 }
 
+void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
+                                         OperandScale operand_scale,
+                                         BytecodeGeneratorFunc generator) {
+  if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
+
+  InterpreterDispatchDescriptor descriptor(isolate_);
+  compiler::CodeAssemblerState state(
+      isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER),
+      Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
+  InterpreterAssembler assembler(&state, bytecode, operand_scale);
+  if (Bytecodes::MakesCallAlongCriticalPath(bytecode)) {
+    assembler.SaveBytecodeOffset();
+  }
+  (this->*generator)(&assembler);
+  Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
+  size_t index = GetDispatchTableIndex(bytecode, operand_scale);
+  dispatch_table_[index] = code->entry();
+  TraceCodegen(code);
+  PROFILE(isolate_, CodeCreateEvent(
+                        CodeEventListener::BYTECODE_HANDLER_TAG,
+                        AbstractCode::cast(*code),
+                        Bytecodes::ToString(bytecode, operand_scale).c_str()));
+}
+
 Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
                                       OperandScale operand_scale) {
   DCHECK(IsDispatchTableInitialized());
@@ -153,11 +201,33 @@
   return FLAG_interrupt_budget * kCodeSizeMultiplier;
 }
 
+namespace {
+
+bool ShouldPrintBytecode(Handle<SharedFunctionInfo> shared) {
+  if (!FLAG_print_bytecode) return false;
+
+  // Checks whether function passed the filter.
+  if (shared->is_toplevel()) {
+    Vector<const char> filter = CStrVector(FLAG_print_bytecode_filter);
+    return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
+  } else {
+    return shared->PassesFilter(FLAG_print_bytecode_filter);
+  }
+}
+
+}  // namespace
+
 InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
-    : CompilationJob(info->isolate(), info, "Ignition"), generator_(info) {}
+    : CompilationJob(info->isolate(), info, "Ignition"),
+      generator_(info),
+      runtime_call_stats_(info->isolate()->counters()->runtime_call_stats()),
+      background_execute_counter_("CompileBackgroundIgnition"),
+      print_bytecode_(ShouldPrintBytecode(info->shared_info())) {}
 
 InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
-  if (FLAG_print_bytecode || FLAG_print_ast) {
+  CodeGenerator::MakeCodePrologue(info(), "interpreter");
+
+  if (print_bytecode_) {
     OFStream os(stdout);
     std::unique_ptr<char[]> name = info()->GetDebugName();
     os << "[generating bytecode for function: " << info()->GetDebugName().get()
@@ -165,25 +235,15 @@
        << std::flush;
   }
 
-#ifdef DEBUG
-  if (info()->parse_info() && FLAG_print_ast) {
-    OFStream os(stdout);
-    os << "--- AST ---" << std::endl
-       << AstPrinter(info()->isolate()).PrintProgram(info()->literal())
-       << std::endl
-       << std::flush;
-  }
-#endif  // DEBUG
-
   return SUCCEEDED;
 }
 
 InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
-  // TODO(5203): These timers aren't thread safe, move to using the CompilerJob
-  // timers.
-  RuntimeCallTimerScope runtimeTimer(info()->isolate(),
-                                     &RuntimeCallStats::CompileIgnition);
-  TimerEventScope<TimerEventCompileIgnition> timer(info()->isolate());
+  TimerScope runtimeTimer =
+      executed_on_background_thread()
+          ? TimerScope(&background_execute_counter_)
+          : TimerScope(runtime_call_stats_, &RuntimeCallStats::CompileIgnition);
+  // TODO(lpy): add support for background compilation RCS trace.
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
 
   generator()->GenerateBytecode(stack_limit());
@@ -195,14 +255,21 @@
 }
 
 InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
+  // Add background runtime call stats.
+  if (V8_UNLIKELY(FLAG_runtime_stats && executed_on_background_thread())) {
+    runtime_call_stats_->CompileBackgroundIgnition.Add(
+        &background_execute_counter_);
+  }
+
+  RuntimeCallTimerScope runtimeTimer(
+      runtime_call_stats_, &RuntimeCallStats::CompileIgnitionFinalization);
+
   Handle<BytecodeArray> bytecodes = generator()->FinalizeBytecode(isolate());
   if (generator()->HasStackOverflow()) {
     return FAILED;
   }
 
-  CodeGenerator::MakeCodePrologue(info(), "interpreter");
-
-  if (FLAG_print_bytecode) {
+  if (print_bytecode_) {
     OFStream os(stdout);
     bytecodes->Print(os);
     os << std::flush;
@@ -326,8 +393,7 @@
 //
 // Load an integer literal into the accumulator as a Smi.
 void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
-  Node* raw_int = __ BytecodeOperandImm(0);
-  Node* smi_int = __ SmiTag(raw_int);
+  Node* smi_int = __ BytecodeOperandImmSmi(0);
   __ SetAccumulator(smi_int);
   __ Dispatch();
 }
@@ -419,54 +485,98 @@
   __ Dispatch();
 }
 
-Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context,
-                                   Node* feedback_slot,
-                                   InterpreterAssembler* assembler) {
-  typedef LoadGlobalWithVectorDescriptor Descriptor;
-
+void Interpreter::BuildLoadGlobal(int slot_operand_index,
+                                  int name_operand_index,
+                                  TypeofMode typeof_mode,
+                                  InterpreterAssembler* assembler) {
   // Load the global via the LoadGlobalIC.
-  Node* code_target = __ HeapConstant(ic.code());
-  Node* smi_slot = __ SmiTag(feedback_slot);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  return __ CallStub(ic.descriptor(), code_target, context,
-                     Arg(Descriptor::kSlot, smi_slot),
-                     Arg(Descriptor::kVector, type_feedback_vector));
+  Node* feedback_vector = __ LoadFeedbackVector();
+  Node* feedback_slot = __ BytecodeOperandIdx(slot_operand_index);
+
+  AccessorAssembler accessor_asm(assembler->state());
+
+  Label try_handler(assembler, Label::kDeferred),
+      miss(assembler, Label::kDeferred);
+
+  // Fast path without frame construction for the data case.
+  {
+    Label done(assembler);
+    Variable var_result(assembler, MachineRepresentation::kTagged);
+    ExitPoint exit_point(assembler, &done, &var_result);
+
+    accessor_asm.LoadGlobalIC_TryPropertyCellCase(
+        feedback_vector, feedback_slot, &exit_point, &try_handler, &miss,
+        CodeStubAssembler::INTPTR_PARAMETERS);
+
+    __ Bind(&done);
+    __ SetAccumulator(var_result.value());
+    __ Dispatch();
+  }
+
+  // Slow path with frame construction.
+  {
+    Label done(assembler);
+    Variable var_result(assembler, MachineRepresentation::kTagged);
+    ExitPoint exit_point(assembler, &done, &var_result);
+
+    __ Bind(&try_handler);
+    {
+      Node* context = __ GetContext();
+      Node* smi_slot = __ SmiTag(feedback_slot);
+      Node* name_index = __ BytecodeOperandIdx(name_operand_index);
+      Node* name = __ LoadConstantPoolEntry(name_index);
+
+      AccessorAssembler::LoadICParameters params(context, nullptr, name,
+                                                 smi_slot, feedback_vector);
+      accessor_asm.LoadGlobalIC_TryHandlerCase(&params, typeof_mode,
+                                               &exit_point, &miss);
+    }
+
+    __ Bind(&miss);
+    {
+      Node* context = __ GetContext();
+      Node* smi_slot = __ SmiTag(feedback_slot);
+      Node* name_index = __ BytecodeOperandIdx(name_operand_index);
+      Node* name = __ LoadConstantPoolEntry(name_index);
+
+      AccessorAssembler::LoadICParameters params(context, nullptr, name,
+                                                 smi_slot, feedback_vector);
+      accessor_asm.LoadGlobalIC_MissCase(&params, &exit_point);
+    }
+
+    __ Bind(&done);
+    {
+      __ SetAccumulator(var_result.value());
+      __ Dispatch();
+    }
+  }
 }
 
-// LdaGlobal <slot>
+// LdaGlobal <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
 // accumulator using FeedBackVector slot <slot> outside of a typeof.
 void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF);
+  static const int kNameOperandIndex = 0;
+  static const int kSlotOperandIndex = 1;
 
-  Node* context = __ GetContext();
-
-  Node* raw_slot = __ BytecodeOperandIdx(0);
-  Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF,
+                  assembler);
 }
 
-// LdaGlobalInsideTypeof <slot>
+// LdaGlobalInsideTypeof <name_index> <slot>
 //
 // Load the global with name in constant pool entry <name_index> into the
 // accumulator using FeedBackVector slot <slot> inside of a typeof.
 void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
-  Callable ic =
-      CodeFactory::LoadGlobalICInOptimizedCode(isolate_, INSIDE_TYPEOF);
+  static const int kNameOperandIndex = 0;
+  static const int kSlotOperandIndex = 1;
 
-  Node* context = __ GetContext();
-
-  Node* raw_slot = __ BytecodeOperandIdx(0);
-  Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
-  __ SetAccumulator(result);
-  __ Dispatch();
+  BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF,
+                  assembler);
 }
 
 void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
-  typedef StoreWithVectorDescriptor Descriptor;
   // Get the global object.
   Node* context = __ GetContext();
   Node* native_context = __ LoadNativeContext(context);
@@ -480,11 +590,9 @@
   Node* value = __ GetAccumulator();
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  __ CallStub(ic.descriptor(), code_target, context,
-              Arg(Descriptor::kReceiver, global), Arg(Descriptor::kName, name),
-              Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
-              Arg(Descriptor::kVector, type_feedback_vector));
+  Node* feedback_vector = __ LoadFeedbackVector();
+  __ CallStub(ic.descriptor(), code_target, context, global, name, value,
+              smi_slot, feedback_vector);
   __ Dispatch();
 }
 
@@ -521,6 +629,15 @@
   __ Dispatch();
 }
 
+// LdaImmutableContextSlot <context> <slot_index> <depth>
+//
+// Load the object in |slot_index| of the context at |depth| in the context
+// chain starting at |context| into the accumulator.
+void Interpreter::DoLdaImmutableContextSlot(InterpreterAssembler* assembler) {
+  // TODO(danno) Share the actual code object rather creating a duplicate one.
+  DoLdaContextSlot(assembler);
+}
+
 // LdaCurrentContextSlot <slot_index>
 //
 // Load the object in |slot_index| of the current context into the accumulator.
@@ -532,6 +649,15 @@
   __ Dispatch();
 }
 
+// LdaImmutableCurrentContextSlot <slot_index>
+//
+// Load the object in |slot_index| of the current context into the accumulator.
+void Interpreter::DoLdaImmutableCurrentContextSlot(
+    InterpreterAssembler* assembler) {
+  // TODO(danno) Share the actual code object rather creating a duplicate one.
+  DoLdaCurrentContextSlot(assembler);
+}
+
 // StaContextSlot <context> <slot_index> <depth>
 //
 // Stores the object in the accumulator into |slot_index| of the context at
@@ -635,8 +761,6 @@
 void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
                                         InterpreterAssembler* assembler) {
   Node* context = __ GetContext();
-  Node* name_index = __ BytecodeOperandIdx(0);
-  Node* feedback_slot = __ BytecodeOperandIdx(1);
   Node* depth = __ BytecodeOperandUImm(2);
 
   Label slowpath(assembler, Label::kDeferred);
@@ -646,18 +770,21 @@
 
   // Fast path does a normal load global
   {
-    Callable ic = CodeFactory::LoadGlobalICInOptimizedCode(
-        isolate_, function_id == Runtime::kLoadLookupSlotInsideTypeof
-                      ? INSIDE_TYPEOF
-                      : NOT_INSIDE_TYPEOF);
-    Node* result = BuildLoadGlobal(ic, context, feedback_slot, assembler);
-    __ SetAccumulator(result);
-    __ Dispatch();
+    static const int kNameOperandIndex = 0;
+    static const int kSlotOperandIndex = 1;
+
+    TypeofMode typeof_mode = function_id == Runtime::kLoadLookupSlotInsideTypeof
+                                 ? INSIDE_TYPEOF
+                                 : NOT_INSIDE_TYPEOF;
+
+    BuildLoadGlobal(kSlotOperandIndex, kNameOperandIndex, typeof_mode,
+                    assembler);
   }
 
   // Slow path when we have to call out to the runtime
   __ Bind(&slowpath);
   {
+    Node* name_index = __ BytecodeOperandIdx(0);
     Node* name = __ LoadConstantPoolEntry(name_index);
     Node* result = __ CallRuntime(function_id, context, name);
     __ SetAccumulator(result);
@@ -717,7 +844,6 @@
 // Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
 // constant pool entry <name_index>.
 void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
-  typedef LoadWithVectorDescriptor Descriptor;
   Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
   Node* code_target = __ HeapConstant(ic.code());
   Node* register_index = __ BytecodeOperandReg(0);
@@ -726,12 +852,10 @@
   Node* name = __ LoadConstantPoolEntry(constant_index);
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
   Node* context = __ GetContext();
-  Node* result = __ CallStub(
-      ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
-      Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
-      Arg(Descriptor::kVector, type_feedback_vector));
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+                             name, smi_slot, feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -741,7 +865,6 @@
 // Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
 // in the accumulator.
 void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
-  typedef LoadWithVectorDescriptor Descriptor;
   Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
   Node* code_target = __ HeapConstant(ic.code());
   Node* reg_index = __ BytecodeOperandReg(0);
@@ -749,18 +872,15 @@
   Node* name = __ GetAccumulator();
   Node* raw_slot = __ BytecodeOperandIdx(1);
   Node* smi_slot = __ SmiTag(raw_slot);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
   Node* context = __ GetContext();
-  Node* result = __ CallStub(
-      ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
-      Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
-      Arg(Descriptor::kVector, type_feedback_vector));
+  Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+                             name, smi_slot, feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
 
 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
-  typedef StoreWithVectorDescriptor Descriptor;
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(object_reg_index);
@@ -769,12 +889,10 @@
   Node* value = __ GetAccumulator();
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
   Node* context = __ GetContext();
-  __ CallStub(ic.descriptor(), code_target, context,
-              Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
-              Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
-              Arg(Descriptor::kVector, type_feedback_vector));
+  __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+              smi_slot, feedback_vector);
   __ Dispatch();
 }
 
@@ -798,8 +916,17 @@
   DoStoreIC(ic, assembler);
 }
 
+// StaNamedOwnProperty <object> <name_index> <slot>
+//
+// Calls the StoreOwnIC at FeedBackVector slot <slot> for <object> and
+// the name in constant pool entry <name_index> with the value in the
+// accumulator.
+void Interpreter::DoStaNamedOwnProperty(InterpreterAssembler* assembler) {
+  Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate_);
+  DoStoreIC(ic, assembler);
+}
+
 void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
-  typedef StoreWithVectorDescriptor Descriptor;
   Node* code_target = __ HeapConstant(ic.code());
   Node* object_reg_index = __ BytecodeOperandReg(0);
   Node* object = __ LoadRegister(object_reg_index);
@@ -808,12 +935,10 @@
   Node* value = __ GetAccumulator();
   Node* raw_slot = __ BytecodeOperandIdx(2);
   Node* smi_slot = __ SmiTag(raw_slot);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
   Node* context = __ GetContext();
-  __ CallStub(ic.descriptor(), code_target, context,
-              Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
-              Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
-              Arg(Descriptor::kVector, type_feedback_vector));
+  __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+              smi_slot, feedback_vector);
   __ Dispatch();
 }
 
@@ -835,13 +960,36 @@
   DoKeyedStoreIC(ic, assembler);
 }
 
+// StaDataPropertyInLiteral <object> <name> <flags>
+//
+// Define a property <name> with value from the accumulator in <object>.
+// Property attributes and whether set_function_name are stored in
+// DataPropertyInLiteralFlags <flags>.
+//
+// This definition is not observable and is used only for definitions
+// in object or class literals.
+void Interpreter::DoStaDataPropertyInLiteral(InterpreterAssembler* assembler) {
+  Node* object = __ LoadRegister(__ BytecodeOperandReg(0));
+  Node* name = __ LoadRegister(__ BytecodeOperandReg(1));
+  Node* value = __ GetAccumulator();
+  Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
+  Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(3));
+
+  Node* feedback_vector = __ LoadFeedbackVector();
+  Node* context = __ GetContext();
+
+  __ CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
+                 value, flags, feedback_vector, vector_index);
+  __ Dispatch();
+}
+
 // LdaModuleVariable <cell_index> <depth>
 //
 // Load the contents of a module variable into the accumulator.  The variable is
 // identified by <cell_index>.  <depth> is the depth of the current context
 // relative to the module context.
 void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
-  Node* cell_index = __ BytecodeOperandImm(0);
+  Node* cell_index = __ BytecodeOperandImmIntPtr(0);
   Node* depth = __ BytecodeOperandUImm(1);
 
   Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
@@ -884,7 +1032,7 @@
 // <depth> is the depth of the current context relative to the module context.
 void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
-  Node* cell_index = __ BytecodeOperandImm(0);
+  Node* cell_index = __ BytecodeOperandImmIntPtr(0);
   Node* depth = __ BytecodeOperandUImm(1);
 
   Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
@@ -969,9 +1117,9 @@
   Node* rhs = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* slot_index = __ BytecodeOperandIdx(1);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
   Node* result = Generator::Generate(assembler, lhs, rhs, slot_index,
-                                     type_feedback_vector, context);
+                                     feedback_vector, context);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -983,68 +1131,169 @@
   Node* rhs = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* slot_index = __ BytecodeOperandIdx(1);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
 
   // TODO(interpreter): the only reason this check is here is because we
   // sometimes emit comparisons that shouldn't collect feedback (e.g.
   // try-finally blocks and generators), and we could get rid of this by
   // introducing Smi equality tests.
-  Label skip_feedback_update(assembler);
-  __ GotoIf(__ WordEqual(slot_index, __ IntPtrConstant(0)),
-            &skip_feedback_update);
+  Label gather_type_feedback(assembler), do_compare(assembler);
+  __ Branch(__ WordEqual(slot_index, __ IntPtrConstant(0)), &do_compare,
+            &gather_type_feedback);
 
-  Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
-  Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
-      gather_rhs_type(assembler), do_compare(assembler);
-  __ Branch(__ TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
-
-  __ Bind(&lhs_is_smi);
-  var_type_feedback.Bind(
-      __ Int32Constant(CompareOperationFeedback::kSignedSmall));
-  __ Goto(&gather_rhs_type);
-
-  __ Bind(&lhs_is_not_smi);
+  __ Bind(&gather_type_feedback);
   {
-    Label lhs_is_number(assembler), lhs_is_not_number(assembler);
-    Node* lhs_map = __ LoadMap(lhs);
-    __ Branch(__ WordEqual(lhs_map, __ HeapNumberMapConstant()), &lhs_is_number,
-              &lhs_is_not_number);
+    Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
+    Label lhs_is_not_smi(assembler), lhs_is_not_number(assembler),
+        lhs_is_not_string(assembler), gather_rhs_type(assembler),
+        update_feedback(assembler);
 
-    __ Bind(&lhs_is_number);
-    var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kNumber));
+    __ GotoIfNot(__ TaggedIsSmi(lhs), &lhs_is_not_smi);
+
+    var_type_feedback.Bind(
+        __ SmiConstant(CompareOperationFeedback::kSignedSmall));
     __ Goto(&gather_rhs_type);
 
-    __ Bind(&lhs_is_not_number);
-    var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kAny));
-    __ Goto(&do_compare);
-  }
+    __ Bind(&lhs_is_not_smi);
+    {
+      Node* lhs_map = __ LoadMap(lhs);
+      __ GotoIfNot(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number);
 
-  __ Bind(&gather_rhs_type);
-  {
-    Label rhs_is_smi(assembler);
-    __ GotoIf(__ TaggedIsSmi(rhs), &rhs_is_smi);
+      var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber));
+      __ Goto(&gather_rhs_type);
 
-    Node* rhs_map = __ LoadMap(rhs);
-    Node* rhs_type =
-        __ Select(__ WordEqual(rhs_map, __ HeapNumberMapConstant()),
-                  __ Int32Constant(CompareOperationFeedback::kNumber),
-                  __ Int32Constant(CompareOperationFeedback::kAny));
-    var_type_feedback.Bind(__ Word32Or(var_type_feedback.value(), rhs_type));
-    __ Goto(&do_compare);
+      __ Bind(&lhs_is_not_number);
+      {
+        Node* lhs_instance_type = __ LoadInstanceType(lhs);
+        if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+          Label lhs_is_not_oddball(assembler);
+          __ GotoIfNot(
+              __ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)),
+              &lhs_is_not_oddball);
 
-    __ Bind(&rhs_is_smi);
-    var_type_feedback.Bind(
-        __ Word32Or(var_type_feedback.value(),
-                    __ Int32Constant(CompareOperationFeedback::kSignedSmall)));
-    __ Goto(&do_compare);
+          var_type_feedback.Bind(
+              __ SmiConstant(CompareOperationFeedback::kNumberOrOddball));
+          __ Goto(&gather_rhs_type);
+
+          __ Bind(&lhs_is_not_oddball);
+        }
+
+        Label lhs_is_not_string(assembler);
+        __ GotoIfNot(__ IsStringInstanceType(lhs_instance_type),
+                     &lhs_is_not_string);
+
+        if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+          var_type_feedback.Bind(
+              __ SmiConstant(CompareOperationFeedback::kString));
+        } else {
+          var_type_feedback.Bind(__ SelectSmiConstant(
+              __ Word32Equal(
+                  __ Word32And(lhs_instance_type,
+                               __ Int32Constant(kIsNotInternalizedMask)),
+                  __ Int32Constant(kInternalizedTag)),
+              CompareOperationFeedback::kInternalizedString,
+              CompareOperationFeedback::kString));
+        }
+        __ Goto(&gather_rhs_type);
+
+        __ Bind(&lhs_is_not_string);
+        if (Token::IsEqualityOp(compare_op)) {
+          var_type_feedback.Bind(__ SelectSmiConstant(
+              __ IsJSReceiverInstanceType(lhs_instance_type),
+              CompareOperationFeedback::kReceiver,
+              CompareOperationFeedback::kAny));
+        } else {
+          var_type_feedback.Bind(
+              __ SmiConstant(CompareOperationFeedback::kAny));
+        }
+        __ Goto(&gather_rhs_type);
+      }
+    }
+
+    __ Bind(&gather_rhs_type);
+    {
+      Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler);
+
+      __ GotoIfNot(__ TaggedIsSmi(rhs), &rhs_is_not_smi);
+
+      var_type_feedback.Bind(
+          __ SmiOr(var_type_feedback.value(),
+                   __ SmiConstant(CompareOperationFeedback::kSignedSmall)));
+      __ Goto(&update_feedback);
+
+      __ Bind(&rhs_is_not_smi);
+      {
+        Node* rhs_map = __ LoadMap(rhs);
+        __ GotoIfNot(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number);
+
+        var_type_feedback.Bind(
+            __ SmiOr(var_type_feedback.value(),
+                     __ SmiConstant(CompareOperationFeedback::kNumber)));
+        __ Goto(&update_feedback);
+
+        __ Bind(&rhs_is_not_number);
+        {
+          Node* rhs_instance_type = __ LoadInstanceType(rhs);
+          if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+            Label rhs_is_not_oddball(assembler);
+            __ GotoIfNot(__ Word32Equal(rhs_instance_type,
+                                        __ Int32Constant(ODDBALL_TYPE)),
+                         &rhs_is_not_oddball);
+
+            var_type_feedback.Bind(__ SmiOr(
+                var_type_feedback.value(),
+                __ SmiConstant(CompareOperationFeedback::kNumberOrOddball)));
+            __ Goto(&update_feedback);
+
+            __ Bind(&rhs_is_not_oddball);
+          }
+
+          Label rhs_is_not_string(assembler);
+          __ GotoIfNot(__ IsStringInstanceType(rhs_instance_type),
+                       &rhs_is_not_string);
+
+          if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+            var_type_feedback.Bind(
+                __ SmiOr(var_type_feedback.value(),
+                         __ SmiConstant(CompareOperationFeedback::kString)));
+          } else {
+            var_type_feedback.Bind(__ SmiOr(
+                var_type_feedback.value(),
+                __ SelectSmiConstant(
+                    __ Word32Equal(
+                        __ Word32And(rhs_instance_type,
+                                     __ Int32Constant(kIsNotInternalizedMask)),
+                        __ Int32Constant(kInternalizedTag)),
+                    CompareOperationFeedback::kInternalizedString,
+                    CompareOperationFeedback::kString)));
+          }
+          __ Goto(&update_feedback);
+
+          __ Bind(&rhs_is_not_string);
+          if (Token::IsEqualityOp(compare_op)) {
+            var_type_feedback.Bind(
+                __ SmiOr(var_type_feedback.value(),
+                         __ SelectSmiConstant(
+                             __ IsJSReceiverInstanceType(rhs_instance_type),
+                             CompareOperationFeedback::kReceiver,
+                             CompareOperationFeedback::kAny)));
+          } else {
+            var_type_feedback.Bind(
+                __ SmiConstant(CompareOperationFeedback::kAny));
+          }
+          __ Goto(&update_feedback);
+        }
+      }
+    }
+
+    __ Bind(&update_feedback);
+    {
+      __ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+      __ Goto(&do_compare);
+    }
   }
 
   __ Bind(&do_compare);
-  __ UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
-                    slot_index);
-  __ Goto(&skip_feedback_update);
-
-  __ Bind(&skip_feedback_update);
   Node* result;
   switch (compare_op) {
     case Token::EQ:
@@ -1124,10 +1373,11 @@
   Node* rhs = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* slot_index = __ BytecodeOperandIdx(1);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
 
-  Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32),
-      var_rhs_type_feedback(assembler, MachineRepresentation::kWord32);
+  Variable var_lhs_type_feedback(assembler,
+                                 MachineRepresentation::kTaggedSigned),
+      var_rhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
       context, lhs, &var_lhs_type_feedback);
   Node* rhs_value = __ TruncateTaggedToWord32WithFeedback(
@@ -1166,10 +1416,9 @@
       UNREACHABLE();
   }
 
-  Node* result_type =
-      __ Select(__ TaggedIsSmi(result),
-                __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
-                __ Int32Constant(BinaryOperationFeedback::kNumber));
+  Node* result_type = __ SelectSmiConstant(
+      __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+      BinaryOperationFeedback::kNumber);
 
   if (FLAG_debug_code) {
     Label ok(assembler);
@@ -1182,9 +1431,9 @@
   }
 
   Node* input_feedback =
-      __ Word32Or(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
-  __ UpdateFeedback(__ Word32Or(result_type, input_feedback),
-                    type_feedback_vector, slot_index);
+      __ SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
+  __ UpdateFeedback(__ SmiOr(result_type, input_feedback), feedback_vector,
+                    slot_index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1251,10 +1500,9 @@
 
   Node* reg_index = __ BytecodeOperandReg(1);
   Node* left = __ LoadRegister(reg_index);
-  Node* raw_int = __ BytecodeOperandImm(0);
-  Node* right = __ SmiTag(raw_int);
+  Node* right = __ BytecodeOperandImmSmi(0);
   Node* slot_index = __ BytecodeOperandIdx(2);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
 
   // {right} is known to be a Smi.
   // Check if the {left} is a Smi take the fast path.
@@ -1271,8 +1519,8 @@
     __ Branch(overflow, &slowpath, &if_notoverflow);
     __ Bind(&if_notoverflow);
     {
-      __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
-                        type_feedback_vector, slot_index);
+      __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
+                        feedback_vector, slot_index);
       var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
       __ Goto(&end);
     }
@@ -1283,8 +1531,9 @@
     AddWithFeedbackStub stub(__ isolate());
     Callable callable =
         Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate()));
-    Node* args[] = {left, right, slot_index, type_feedback_vector, context};
-    var_result.Bind(__ CallStubN(callable, args, 1));
+    var_result.Bind(__ CallStub(callable, context, left, right,
+                                __ TruncateWordToWord32(slot_index),
+                                feedback_vector));
     __ Goto(&end);
   }
   __ Bind(&end);
@@ -1305,10 +1554,9 @@
 
   Node* reg_index = __ BytecodeOperandReg(1);
   Node* left = __ LoadRegister(reg_index);
-  Node* raw_int = __ BytecodeOperandImm(0);
-  Node* right = __ SmiTag(raw_int);
+  Node* right = __ BytecodeOperandImmSmi(0);
   Node* slot_index = __ BytecodeOperandIdx(2);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
 
   // {right} is known to be a Smi.
   // Check if the {left} is a Smi take the fast path.
@@ -1325,8 +1573,8 @@
     __ Branch(overflow, &slowpath, &if_notoverflow);
     __ Bind(&if_notoverflow);
     {
-      __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
-                        type_feedback_vector, slot_index);
+      __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
+                        feedback_vector, slot_index);
       var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
       __ Goto(&end);
     }
@@ -1337,8 +1585,9 @@
     SubtractWithFeedbackStub stub(__ isolate());
     Callable callable = Callable(
         stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate()));
-    Node* args[] = {left, right, slot_index, type_feedback_vector, context};
-    var_result.Bind(__ CallStubN(callable, args, 1));
+    var_result.Bind(__ CallStub(callable, context, left, right,
+                                __ TruncateWordToWord32(slot_index),
+                                feedback_vector));
     __ Goto(&end);
   }
   __ Bind(&end);
@@ -1355,23 +1604,22 @@
 void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(1);
   Node* left = __ LoadRegister(reg_index);
-  Node* raw_int = __ BytecodeOperandImm(0);
-  Node* right = __ SmiTag(raw_int);
+  Node* right = __ BytecodeOperandImmSmi(0);
   Node* context = __ GetContext();
   Node* slot_index = __ BytecodeOperandIdx(2);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+  Node* feedback_vector = __ LoadFeedbackVector();
+  Variable var_lhs_type_feedback(assembler,
+                                 MachineRepresentation::kTaggedSigned);
   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
       context, left, &var_lhs_type_feedback);
   Node* rhs_value = __ SmiToWord32(right);
   Node* value = __ Word32Or(lhs_value, rhs_value);
   Node* result = __ ChangeInt32ToTagged(value);
-  Node* result_type =
-      __ Select(__ TaggedIsSmi(result),
-                __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
-                __ Int32Constant(BinaryOperationFeedback::kNumber));
-  __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
-                    type_feedback_vector, slot_index);
+  Node* result_type = __ SelectSmiConstant(
+      __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+      BinaryOperationFeedback::kNumber);
+  __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
+                    feedback_vector, slot_index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1383,23 +1631,22 @@
 void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(1);
   Node* left = __ LoadRegister(reg_index);
-  Node* raw_int = __ BytecodeOperandImm(0);
-  Node* right = __ SmiTag(raw_int);
+  Node* right = __ BytecodeOperandImmSmi(0);
   Node* context = __ GetContext();
   Node* slot_index = __ BytecodeOperandIdx(2);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+  Node* feedback_vector = __ LoadFeedbackVector();
+  Variable var_lhs_type_feedback(assembler,
+                                 MachineRepresentation::kTaggedSigned);
   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
       context, left, &var_lhs_type_feedback);
   Node* rhs_value = __ SmiToWord32(right);
   Node* value = __ Word32And(lhs_value, rhs_value);
   Node* result = __ ChangeInt32ToTagged(value);
-  Node* result_type =
-      __ Select(__ TaggedIsSmi(result),
-                __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
-                __ Int32Constant(BinaryOperationFeedback::kNumber));
-  __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
-                    type_feedback_vector, slot_index);
+  Node* result_type = __ SelectSmiConstant(
+      __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+      BinaryOperationFeedback::kNumber);
+  __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
+                    feedback_vector, slot_index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1412,24 +1659,23 @@
 void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(1);
   Node* left = __ LoadRegister(reg_index);
-  Node* raw_int = __ BytecodeOperandImm(0);
-  Node* right = __ SmiTag(raw_int);
+  Node* right = __ BytecodeOperandImmSmi(0);
   Node* context = __ GetContext();
   Node* slot_index = __ BytecodeOperandIdx(2);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+  Node* feedback_vector = __ LoadFeedbackVector();
+  Variable var_lhs_type_feedback(assembler,
+                                 MachineRepresentation::kTaggedSigned);
   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
       context, left, &var_lhs_type_feedback);
   Node* rhs_value = __ SmiToWord32(right);
   Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
   Node* value = __ Word32Shl(lhs_value, shift_count);
   Node* result = __ ChangeInt32ToTagged(value);
-  Node* result_type =
-      __ Select(__ TaggedIsSmi(result),
-                __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
-                __ Int32Constant(BinaryOperationFeedback::kNumber));
-  __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
-                    type_feedback_vector, slot_index);
+  Node* result_type = __ SelectSmiConstant(
+      __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+      BinaryOperationFeedback::kNumber);
+  __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
+                    feedback_vector, slot_index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1442,24 +1688,23 @@
 void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
   Node* reg_index = __ BytecodeOperandReg(1);
   Node* left = __ LoadRegister(reg_index);
-  Node* raw_int = __ BytecodeOperandImm(0);
-  Node* right = __ SmiTag(raw_int);
+  Node* right = __ BytecodeOperandImmSmi(0);
   Node* context = __ GetContext();
   Node* slot_index = __ BytecodeOperandIdx(2);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+  Node* feedback_vector = __ LoadFeedbackVector();
+  Variable var_lhs_type_feedback(assembler,
+                                 MachineRepresentation::kTaggedSigned);
   Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
       context, left, &var_lhs_type_feedback);
   Node* rhs_value = __ SmiToWord32(right);
   Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
   Node* value = __ Word32Sar(lhs_value, shift_count);
   Node* result = __ ChangeInt32ToTagged(value);
-  Node* result_type =
-      __ Select(__ TaggedIsSmi(result),
-                __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
-                __ Int32Constant(BinaryOperationFeedback::kNumber));
-  __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
-                    type_feedback_vector, slot_index);
+  Node* result_type = __ SelectSmiConstant(
+      __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+      BinaryOperationFeedback::kNumber);
+  __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
+                    feedback_vector, slot_index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1477,9 +1722,9 @@
   Node* value = __ GetAccumulator();
   Node* context = __ GetContext();
   Node* slot_index = __ BytecodeOperandIdx(0);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
-  Node* result = Generator::Generate(assembler, value, context,
-                                     type_feedback_vector, slot_index);
+  Node* feedback_vector = __ LoadFeedbackVector();
+  Node* result = Generator::Generate(assembler, value, context, feedback_vector,
+                                     slot_index);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1519,14 +1764,276 @@
 //
 // Increments value in the accumulator by one.
 void Interpreter::DoInc(InterpreterAssembler* assembler) {
-  DoUnaryOpWithFeedback<IncStub>(assembler);
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* value = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* slot_index = __ BytecodeOperandIdx(0);
+  Node* feedback_vector = __ LoadFeedbackVector();
+
+  // Shared entry for floating point increment.
+  Label do_finc(assembler), end(assembler);
+  Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to try again due to ToNumber conversion.
+  Variable value_var(assembler, MachineRepresentation::kTagged);
+  Variable result_var(assembler, MachineRepresentation::kTagged);
+  Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
+  Variable* loop_vars[] = {&value_var, &var_type_feedback};
+  Label start(assembler, 2, loop_vars);
+  value_var.Bind(value);
+  var_type_feedback.Bind(
+      assembler->SmiConstant(BinaryOperationFeedback::kNone));
+  assembler->Goto(&start);
+  assembler->Bind(&start);
+  {
+    value = value_var.value();
+
+    Label if_issmi(assembler), if_isnotsmi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
+
+    assembler->Bind(&if_issmi);
+    {
+      // Try fast Smi addition first.
+      Node* one = assembler->SmiConstant(Smi::FromInt(1));
+      Node* pair = assembler->IntPtrAddWithOverflow(
+          assembler->BitcastTaggedToWord(value),
+          assembler->BitcastTaggedToWord(one));
+      Node* overflow = assembler->Projection(1, pair);
+
+      // Check if the Smi addition overflowed.
+      Label if_overflow(assembler), if_notoverflow(assembler);
+      assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+      assembler->Bind(&if_notoverflow);
+      var_type_feedback.Bind(assembler->SmiOr(
+          var_type_feedback.value(),
+          assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
+      result_var.Bind(
+          assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
+      assembler->Goto(&end);
+
+      assembler->Bind(&if_overflow);
+      {
+        var_finc_value.Bind(assembler->SmiToFloat64(value));
+        assembler->Goto(&do_finc);
+      }
+    }
+
+    assembler->Bind(&if_isnotsmi);
+    {
+      // Check if the value is a HeapNumber.
+      Label if_valueisnumber(assembler),
+          if_valuenotnumber(assembler, Label::kDeferred);
+      Node* value_map = assembler->LoadMap(value);
+      assembler->Branch(assembler->IsHeapNumberMap(value_map),
+                        &if_valueisnumber, &if_valuenotnumber);
+
+      assembler->Bind(&if_valueisnumber);
+      {
+        // Load the HeapNumber value.
+        var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
+        assembler->Goto(&do_finc);
+      }
+
+      assembler->Bind(&if_valuenotnumber);
+      {
+        // We do not require an Or with earlier feedback here because once we
+        // convert the value to a number, we cannot reach this path. We can
+        // only reach this path on the first pass when the feedback is kNone.
+        CSA_ASSERT(assembler,
+                   assembler->SmiEqual(
+                       var_type_feedback.value(),
+                       assembler->SmiConstant(BinaryOperationFeedback::kNone)));
+
+        Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+        Node* instance_type = assembler->LoadMapInstanceType(value_map);
+        Node* is_oddball = assembler->Word32Equal(
+            instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+        assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+        assembler->Bind(&if_valueisoddball);
+        {
+          // Convert Oddball to Number and check again.
+          value_var.Bind(
+              assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+          var_type_feedback.Bind(assembler->SmiConstant(
+              BinaryOperationFeedback::kNumberOrOddball));
+          assembler->Goto(&start);
+        }
+
+        assembler->Bind(&if_valuenotoddball);
+        {
+          // Convert to a Number first and try again.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_type_feedback.Bind(
+              assembler->SmiConstant(BinaryOperationFeedback::kAny));
+          value_var.Bind(assembler->CallStub(callable, context, value));
+          assembler->Goto(&start);
+        }
+      }
+    }
+  }
+
+  assembler->Bind(&do_finc);
+  {
+    Node* finc_value = var_finc_value.value();
+    Node* one = assembler->Float64Constant(1.0);
+    Node* finc_result = assembler->Float64Add(finc_value, one);
+    var_type_feedback.Bind(assembler->SmiOr(
+        var_type_feedback.value(),
+        assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
+    result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
+                            slot_index);
+
+  __ SetAccumulator(result_var.value());
+  __ Dispatch();
 }
 
 // Dec
 //
 // Decrements value in the accumulator by one.
 void Interpreter::DoDec(InterpreterAssembler* assembler) {
-  DoUnaryOpWithFeedback<DecStub>(assembler);
+  typedef CodeStubAssembler::Label Label;
+  typedef compiler::Node Node;
+  typedef CodeStubAssembler::Variable Variable;
+
+  Node* value = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* slot_index = __ BytecodeOperandIdx(0);
+  Node* feedback_vector = __ LoadFeedbackVector();
+
+  // Shared entry for floating point decrement.
+  Label do_fdec(assembler), end(assembler);
+  Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
+
+  // We might need to try again due to ToNumber conversion.
+  Variable value_var(assembler, MachineRepresentation::kTagged);
+  Variable result_var(assembler, MachineRepresentation::kTagged);
+  Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
+  Variable* loop_vars[] = {&value_var, &var_type_feedback};
+  Label start(assembler, 2, loop_vars);
+  var_type_feedback.Bind(
+      assembler->SmiConstant(BinaryOperationFeedback::kNone));
+  value_var.Bind(value);
+  assembler->Goto(&start);
+  assembler->Bind(&start);
+  {
+    value = value_var.value();
+
+    Label if_issmi(assembler), if_isnotsmi(assembler);
+    assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
+
+    assembler->Bind(&if_issmi);
+    {
+      // Try fast Smi subtraction first.
+      Node* one = assembler->SmiConstant(Smi::FromInt(1));
+      Node* pair = assembler->IntPtrSubWithOverflow(
+          assembler->BitcastTaggedToWord(value),
+          assembler->BitcastTaggedToWord(one));
+      Node* overflow = assembler->Projection(1, pair);
+
+      // Check if the Smi subtraction overflowed.
+      Label if_overflow(assembler), if_notoverflow(assembler);
+      assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+      assembler->Bind(&if_notoverflow);
+      var_type_feedback.Bind(assembler->SmiOr(
+          var_type_feedback.value(),
+          assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
+      result_var.Bind(
+          assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
+      assembler->Goto(&end);
+
+      assembler->Bind(&if_overflow);
+      {
+        var_fdec_value.Bind(assembler->SmiToFloat64(value));
+        assembler->Goto(&do_fdec);
+      }
+    }
+
+    assembler->Bind(&if_isnotsmi);
+    {
+      // Check if the value is a HeapNumber.
+      Label if_valueisnumber(assembler),
+          if_valuenotnumber(assembler, Label::kDeferred);
+      Node* value_map = assembler->LoadMap(value);
+      assembler->Branch(assembler->IsHeapNumberMap(value_map),
+                        &if_valueisnumber, &if_valuenotnumber);
+
+      assembler->Bind(&if_valueisnumber);
+      {
+        // Load the HeapNumber value.
+        var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
+        assembler->Goto(&do_fdec);
+      }
+
+      assembler->Bind(&if_valuenotnumber);
+      {
+        // We do not require an Or with earlier feedback here because once we
+        // convert the value to a number, we cannot reach this path. We can
+        // only reach this path on the first pass when the feedback is kNone.
+        CSA_ASSERT(assembler,
+                   assembler->SmiEqual(
+                       var_type_feedback.value(),
+                       assembler->SmiConstant(BinaryOperationFeedback::kNone)));
+
+        Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+        Node* instance_type = assembler->LoadMapInstanceType(value_map);
+        Node* is_oddball = assembler->Word32Equal(
+            instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+        assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+        assembler->Bind(&if_valueisoddball);
+        {
+          // Convert Oddball to Number and check again.
+          value_var.Bind(
+              assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+          var_type_feedback.Bind(assembler->SmiConstant(
+              BinaryOperationFeedback::kNumberOrOddball));
+          assembler->Goto(&start);
+        }
+
+        assembler->Bind(&if_valuenotoddball);
+        {
+          // Convert to a Number first and try again.
+          Callable callable =
+              CodeFactory::NonNumberToNumber(assembler->isolate());
+          var_type_feedback.Bind(
+              assembler->SmiConstant(BinaryOperationFeedback::kAny));
+          value_var.Bind(assembler->CallStub(callable, context, value));
+          assembler->Goto(&start);
+        }
+      }
+    }
+  }
+
+  assembler->Bind(&do_fdec);
+  {
+    Node* fdec_value = var_fdec_value.value();
+    Node* one = assembler->Float64Constant(1.0);
+    Node* fdec_result = assembler->Float64Sub(fdec_value, one);
+    var_type_feedback.Bind(assembler->SmiOr(
+        var_type_feedback.value(),
+        assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
+    result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
+    assembler->Goto(&end);
+  }
+
+  assembler->Bind(&end);
+  assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
+                            slot_index);
+
+  __ SetAccumulator(result_var.value());
+  __ Dispatch();
 }
 
 // LogicalNot
@@ -1625,6 +2132,19 @@
   DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
 }
 
+// GetSuperConstructor
+//
+// Get the super constructor from the object referenced by the accumulator.
+// The result is stored in register |reg|.
+void Interpreter::DoGetSuperConstructor(InterpreterAssembler* assembler) {
+  Node* active_function = __ GetAccumulator();
+  Node* context = __ GetContext();
+  Node* result = __ GetSuperConstructor(active_function, context);
+  Node* reg = __ BytecodeOperandReg(0);
+  __ StoreRegister(result, reg);
+  __ Dispatch();
+}
+
 void Interpreter::DoJSCall(InterpreterAssembler* assembler,
                            TailCallMode tail_call_mode) {
   Node* function_reg = __ BytecodeOperandReg(0);
@@ -1635,11 +2155,11 @@
   Node* receiver_count = __ Int32Constant(1);
   Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
   Node* slot_id = __ BytecodeOperandIdx(3);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
   Node* context = __ GetContext();
   Node* result =
       __ CallJSWithFeedback(function, context, receiver_arg, args_count,
-                            slot_id, type_feedback_vector, tail_call_mode);
+                            slot_id, feedback_vector, tail_call_mode);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1756,14 +2276,56 @@
   __ Dispatch();
 }
 
-// New <constructor> <first_arg> <arg_count>
+// CallWithSpread <callable> <first_arg> <arg_count>
 //
-// Call operator new with |constructor| and the first argument in
+// Call a JSfunction or Callable in |callable| with the receiver in
+// |first_arg| and |arg_count - 1| arguments in subsequent registers. The
+// final argument is always a spread.
+//
+void Interpreter::DoCallWithSpread(InterpreterAssembler* assembler) {
+  Node* callable_reg = __ BytecodeOperandReg(0);
+  Node* callable = __ LoadRegister(callable_reg);
+  Node* receiver_reg = __ BytecodeOperandReg(1);
+  Node* receiver_arg = __ RegisterLocation(receiver_reg);
+  Node* receiver_args_count = __ BytecodeOperandCount(2);
+  Node* receiver_count = __ Int32Constant(1);
+  Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
+  Node* context = __ GetContext();
+
+  // Call into Runtime function CallWithSpread which does everything.
+  Node* result =
+      __ CallJSWithSpread(callable, context, receiver_arg, args_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+// ConstructWithSpread <first_arg> <arg_count>
+//
+// Call the constructor in |constructor| with the first argument in register
+// |first_arg| and |arg_count| arguments in subsequent registers. The final
+// argument is always a spread. The new.target is in the accumulator.
+//
+void Interpreter::DoConstructWithSpread(InterpreterAssembler* assembler) {
+  Node* new_target = __ GetAccumulator();
+  Node* constructor_reg = __ BytecodeOperandReg(0);
+  Node* constructor = __ LoadRegister(constructor_reg);
+  Node* first_arg_reg = __ BytecodeOperandReg(1);
+  Node* first_arg = __ RegisterLocation(first_arg_reg);
+  Node* args_count = __ BytecodeOperandCount(2);
+  Node* context = __ GetContext();
+  Node* result = __ ConstructWithSpread(constructor, context, new_target,
+                                        first_arg, args_count);
+  __ SetAccumulator(result);
+  __ Dispatch();
+}
+
+// Construct <constructor> <first_arg> <arg_count>
+//
+// Call operator construct with |constructor| and the first argument in
 // register |first_arg| and |arg_count| arguments in subsequent
 // registers. The new.target is in the accumulator.
 //
-void Interpreter::DoNew(InterpreterAssembler* assembler) {
-  Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
+void Interpreter::DoConstruct(InterpreterAssembler* assembler) {
   Node* new_target = __ GetAccumulator();
   Node* constructor_reg = __ BytecodeOperandReg(0);
   Node* constructor = __ LoadRegister(constructor_reg);
@@ -1771,10 +2333,10 @@
   Node* first_arg = __ RegisterLocation(first_arg_reg);
   Node* args_count = __ BytecodeOperandCount(2);
   Node* slot_id = __ BytecodeOperandIdx(3);
-  Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+  Node* feedback_vector = __ LoadFeedbackVector();
   Node* context = __ GetContext();
-  Node* result = __ CallConstruct(constructor, context, new_target, first_arg,
-                                  args_count, slot_id, type_feedback_vector);
+  Node* result = __ Construct(constructor, context, new_target, first_arg,
+                              args_count, slot_id, feedback_vector);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -1846,11 +2408,90 @@
   DoCompareOp(Token::INSTANCEOF, assembler);
 }
 
+// TestUndetectable <src>
+//
+// Test if the value in the <src> register equals to null/undefined. This is
+// done by checking undetectable bit on the map of the object.
+void Interpreter::DoTestUndetectable(InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* object = __ LoadRegister(reg_index);
+
+  Label not_equal(assembler), end(assembler);
+  // If the object is an Smi then return false.
+  __ GotoIf(__ TaggedIsSmi(object), &not_equal);
+
+  // If it is a HeapObject, load the map and check for undetectable bit.
+  Node* map = __ LoadMap(object);
+  Node* map_bitfield = __ LoadMapBitField(map);
+  Node* map_undetectable =
+      __ Word32And(map_bitfield, __ Int32Constant(1 << Map::kIsUndetectable));
+  __ GotoIf(__ Word32Equal(map_undetectable, __ Int32Constant(0)), &not_equal);
+
+  __ SetAccumulator(__ BooleanConstant(true));
+  __ Goto(&end);
+
+  __ Bind(&not_equal);
+  {
+    __ SetAccumulator(__ BooleanConstant(false));
+    __ Goto(&end);
+  }
+
+  __ Bind(&end);
+  __ Dispatch();
+}
+
+// TestNull <src>
+//
+// Test if the value in the <src> register is strictly equal to null.
+void Interpreter::DoTestNull(InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* object = __ LoadRegister(reg_index);
+  Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+
+  Label equal(assembler), end(assembler);
+  __ GotoIf(__ WordEqual(object, null_value), &equal);
+  __ SetAccumulator(__ BooleanConstant(false));
+  __ Goto(&end);
+
+  __ Bind(&equal);
+  {
+    __ SetAccumulator(__ BooleanConstant(true));
+    __ Goto(&end);
+  }
+
+  __ Bind(&end);
+  __ Dispatch();
+}
+
+// TestUndefined <src>
+//
+// Test if the value in the <src> register is strictly equal to undefined.
+void Interpreter::DoTestUndefined(InterpreterAssembler* assembler) {
+  Node* reg_index = __ BytecodeOperandReg(0);
+  Node* object = __ LoadRegister(reg_index);
+  Node* undefined_value =
+      __ HeapConstant(isolate_->factory()->undefined_value());
+
+  Label equal(assembler), end(assembler);
+  __ GotoIf(__ WordEqual(object, undefined_value), &equal);
+  __ SetAccumulator(__ BooleanConstant(false));
+  __ Goto(&end);
+
+  __ Bind(&equal);
+  {
+    __ SetAccumulator(__ BooleanConstant(true));
+    __ Goto(&end);
+  }
+
+  __ Bind(&end);
+  __ Dispatch();
+}
+
 // Jump <imm>
 //
 // Jump by number of bytes represented by the immediate operand |imm|.
 void Interpreter::DoJump(InterpreterAssembler* assembler) {
-  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
   __ Jump(relative_jump);
 }
 
@@ -1866,46 +2507,58 @@
 // JumpIfTrue <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the
-// accumulator contains true.
+// accumulator contains true. This only works for boolean inputs, and
+// will misbehave if passed arbitrary input values.
 void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
   Node* true_value = __ BooleanConstant(true);
+  CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
+  CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
 }
 
 // JumpIfTrueConstant <idx>
 //
 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the accumulator contains true.
+// if the accumulator contains true. This only works for boolean inputs, and
+// will misbehave if passed arbitrary input values.
 void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
   Node* true_value = __ BooleanConstant(true);
+  CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
+  CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
   __ JumpIfWordEqual(accumulator, true_value, relative_jump);
 }
 
 // JumpIfFalse <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the
-// accumulator contains false.
+// accumulator contains false. This only works for boolean inputs, and
+// will misbehave if passed arbitrary input values.
 void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
-  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
   Node* false_value = __ BooleanConstant(false);
+  CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
+  CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
 }
 
 // JumpIfFalseConstant <idx>
 //
 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the accumulator contains false.
+// if the accumulator contains false. This only works for boolean inputs, and
+// will misbehave if passed arbitrary input values.
 void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* index = __ BytecodeOperandIdx(0);
   Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
   Node* false_value = __ BooleanConstant(false);
+  CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
+  CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
   __ JumpIfWordEqual(accumulator, false_value, relative_jump);
 }
 
@@ -1915,7 +2568,7 @@
 // referenced by the accumulator is true when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
-  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
   Label if_true(assembler), if_false(assembler);
   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
   __ Bind(&if_true);
@@ -1948,7 +2601,7 @@
 // referenced by the accumulator is false when the object is cast to boolean.
 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
   Node* value = __ GetAccumulator();
-  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
   Label if_true(assembler), if_false(assembler);
   __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
   __ Bind(&if_true);
@@ -1982,7 +2635,7 @@
 void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
-  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
   __ JumpIfWordEqual(accumulator, null_value, relative_jump);
 }
 
@@ -2006,7 +2659,7 @@
   Node* accumulator = __ GetAccumulator();
   Node* undefined_value =
       __ HeapConstant(isolate_->factory()->undefined_value());
-  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
 }
 
@@ -2023,6 +2676,49 @@
   __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
 }
 
+// JumpIfJSReceiver <imm>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is a JSReceiver.
+void Interpreter::DoJumpIfJSReceiver(InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
+
+  Label if_object(assembler), if_notobject(assembler, Label::kDeferred),
+      if_notsmi(assembler);
+  __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
+
+  __ Bind(&if_notsmi);
+  __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
+  __ Bind(&if_object);
+  __ Jump(relative_jump);
+
+  __ Bind(&if_notobject);
+  __ Dispatch();
+}
+
+// JumpIfJSReceiverConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool if
+// the object referenced by the accumulator is a JSReceiver.
+void Interpreter::DoJumpIfJSReceiverConstant(InterpreterAssembler* assembler) {
+  Node* accumulator = __ GetAccumulator();
+  Node* index = __ BytecodeOperandIdx(0);
+  Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
+
+  Label if_object(assembler), if_notobject(assembler), if_notsmi(assembler);
+  __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
+
+  __ Bind(&if_notsmi);
+  __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
+
+  __ Bind(&if_object);
+  __ Jump(relative_jump);
+
+  __ Bind(&if_notobject);
+  __ Dispatch();
+}
+
 // JumpIfNotHole <imm>
 //
 // Jump by number of bytes represented by an immediate operand if the object
@@ -2030,7 +2726,7 @@
 void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
   Node* accumulator = __ GetAccumulator();
   Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
-  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
   __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
 }
 
@@ -2052,7 +2748,7 @@
 // performs a loop nesting check and potentially triggers OSR in case the
 // current OSR level matches (or exceeds) the specified |loop_depth|.
 void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
-  Node* relative_jump = __ BytecodeOperandImm(0);
+  Node* relative_jump = __ BytecodeOperandUImmWord(0);
   Node* loop_depth = __ BytecodeOperandImm(1);
   Node* osr_level = __ LoadOSRNestingLevel();
 
@@ -2063,7 +2759,7 @@
   __ Branch(condition, &ok, &osr_armed);
 
   __ Bind(&ok);
-  __ Jump(relative_jump);
+  __ JumpBackward(relative_jump);
 
   __ Bind(&osr_armed);
   {
@@ -2071,7 +2767,7 @@
     Node* target = __ HeapConstant(callable.code());
     Node* context = __ GetContext();
     __ CallStub(callable.descriptor(), target, context);
-    __ Jump(relative_jump);
+    __ JumpBackward(relative_jump);
   }
 }
 
@@ -2082,14 +2778,13 @@
 void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* pattern = __ LoadConstantPoolEntry(index);
-  Node* literal_index_raw = __ BytecodeOperandIdx(1);
-  Node* literal_index = __ SmiTag(literal_index_raw);
-  Node* flags_raw = __ BytecodeOperandFlag(2);
-  Node* flags = __ SmiTag(flags_raw);
+  Node* literal_index = __ BytecodeOperandIdxSmi(1);
+  Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
   Node* closure = __ LoadRegister(Register::function_closure());
   Node* context = __ GetContext();
-  Node* result = FastCloneRegExpStub::Generate(
-      assembler, closure, literal_index, pattern, flags, context);
+  ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+  Node* result = constructor_assembler.EmitFastCloneRegExp(
+      closure, literal_index, pattern, flags, context);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -2099,35 +2794,31 @@
 // Creates an array literal for literal index <literal_idx> with
 // CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
 void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
-  Node* literal_index_raw = __ BytecodeOperandIdx(1);
-  Node* literal_index = __ SmiTag(literal_index_raw);
+  Node* literal_index = __ BytecodeOperandIdxSmi(1);
   Node* closure = __ LoadRegister(Register::function_closure());
   Node* context = __ GetContext();
   Node* bytecode_flags = __ BytecodeOperandFlag(2);
 
   Label fast_shallow_clone(assembler),
       call_runtime(assembler, Label::kDeferred);
-  Node* use_fast_shallow_clone = __ Word32And(
-      bytecode_flags,
-      __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
-  __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
+  __ Branch(__ IsSetWord32<CreateArrayLiteralFlags::FastShallowCloneBit>(
+                bytecode_flags),
+            &fast_shallow_clone, &call_runtime);
 
   __ Bind(&fast_shallow_clone);
   {
-    DCHECK(FLAG_allocation_site_pretenuring);
-    Node* result = FastCloneShallowArrayStub::Generate(
-        assembler, closure, literal_index, context, &call_runtime,
-        TRACK_ALLOCATION_SITE);
+    ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+    Node* result = constructor_assembler.EmitFastCloneShallowArray(
+        closure, literal_index, context, &call_runtime, TRACK_ALLOCATION_SITE);
     __ SetAccumulator(result);
     __ Dispatch();
   }
 
   __ Bind(&call_runtime);
   {
-    STATIC_ASSERT(CreateArrayLiteralFlags::FlagsBits::kShift == 0);
-    Node* flags_raw = __ Word32And(
-        bytecode_flags,
-        __ Int32Constant(CreateArrayLiteralFlags::FlagsBits::kMask));
+    Node* flags_raw =
+        __ DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
+            bytecode_flags);
     Node* flags = __ SmiTag(flags_raw);
     Node* index = __ BytecodeOperandIdx(0);
     Node* constant_elements = __ LoadConstantPoolEntry(index);
@@ -2144,24 +2835,24 @@
 // Creates an object literal for literal index <literal_idx> with
 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
-  Node* literal_index_raw = __ BytecodeOperandIdx(1);
-  Node* literal_index = __ SmiTag(literal_index_raw);
+  Node* literal_index = __ BytecodeOperandIdxSmi(1);
   Node* bytecode_flags = __ BytecodeOperandFlag(2);
   Node* closure = __ LoadRegister(Register::function_closure());
 
   // Check if we can do a fast clone or have to call the runtime.
   Label if_fast_clone(assembler),
       if_not_fast_clone(assembler, Label::kDeferred);
-  Node* fast_clone_properties_count =
-      __ DecodeWord32<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
-          bytecode_flags);
-  __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+  Node* fast_clone_properties_count = __ DecodeWordFromWord32<
+      CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags);
+  __ Branch(__ WordNotEqual(fast_clone_properties_count, __ IntPtrConstant(0)),
+            &if_fast_clone, &if_not_fast_clone);
 
   __ Bind(&if_fast_clone);
   {
     // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
-    Node* result = FastCloneShallowObjectStub::GenerateFastPath(
-        assembler, &if_not_fast_clone, closure, literal_index,
+    ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+    Node* result = constructor_assembler.EmitFastCloneShallowObject(
+        &if_not_fast_clone, closure, literal_index,
         fast_clone_properties_count);
     __ StoreRegister(result, __ BytecodeOperandReg(3));
     __ Dispatch();
@@ -2174,10 +2865,9 @@
     Node* constant_elements = __ LoadConstantPoolEntry(index);
     Node* context = __ GetContext();
 
-    STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
-    Node* flags_raw = __ Word32And(
-        bytecode_flags,
-        __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
+    Node* flags_raw =
+        __ DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
+            bytecode_flags);
     Node* flags = __ SmiTag(flags_raw);
 
     Node* result =
@@ -2189,31 +2879,38 @@
   }
 }
 
-// CreateClosure <index> <tenured>
+// CreateClosure <index> <slot> <tenured>
 //
 // Creates a new closure for SharedFunctionInfo at position |index| in the
 // constant pool and with the PretenureFlag <tenured>.
 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
   Node* index = __ BytecodeOperandIdx(0);
   Node* shared = __ LoadConstantPoolEntry(index);
-  Node* flags = __ BytecodeOperandFlag(1);
+  Node* flags = __ BytecodeOperandFlag(2);
   Node* context = __ GetContext();
 
   Label call_runtime(assembler, Label::kDeferred);
-  Node* fast_new_closure = __ Word32And(
-      flags, __ Int32Constant(CreateClosureFlags::FastNewClosureBit::kMask));
-  __ GotoUnless(fast_new_closure, &call_runtime);
-  __ SetAccumulator(FastNewClosureStub::Generate(assembler, shared, context));
+  __ GotoIfNot(__ IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
+               &call_runtime);
+  ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+  Node* vector_index = __ BytecodeOperandIdx(1);
+  vector_index = __ SmiTag(vector_index);
+  Node* feedback_vector = __ LoadFeedbackVector();
+  __ SetAccumulator(constructor_assembler.EmitFastNewClosure(
+      shared, feedback_vector, vector_index, context));
   __ Dispatch();
 
   __ Bind(&call_runtime);
   {
-    STATIC_ASSERT(CreateClosureFlags::PretenuredBit::kShift == 0);
-    Node* tenured_raw = __ Word32And(
-        flags, __ Int32Constant(CreateClosureFlags::PretenuredBit::kMask));
+    Node* tenured_raw =
+        __ DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
     Node* tenured = __ SmiTag(tenured_raw);
-    Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context,
-                                  shared, tenured);
+    feedback_vector = __ LoadFeedbackVector();
+    vector_index = __ BytecodeOperandIdx(1);
+    vector_index = __ SmiTag(vector_index);
+    Node* result =
+        __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared,
+                       feedback_vector, vector_index, tenured);
     __ SetAccumulator(result);
     __ Dispatch();
   }
@@ -2259,8 +2956,22 @@
   Node* closure = __ LoadRegister(Register::function_closure());
   Node* slots = __ BytecodeOperandUImm(0);
   Node* context = __ GetContext();
-  __ SetAccumulator(
-      FastNewFunctionContextStub::Generate(assembler, closure, slots, context));
+  ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+  __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
+      closure, slots, context, FUNCTION_SCOPE));
+  __ Dispatch();
+}
+
+// CreateEvalContext <slots>
+//
+// Creates a new context with number of |slots| for an eval closure.
+void Interpreter::DoCreateEvalContext(InterpreterAssembler* assembler) {
+  Node* closure = __ LoadRegister(Register::function_closure());
+  Node* slots = __ BytecodeOperandUImm(0);
+  Node* context = __ GetContext();
+  ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+  __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
+      closure, slots, context, EVAL_SCOPE));
   __ Dispatch();
 }
 
@@ -2306,10 +3017,9 @@
 
   __ Bind(&if_not_duplicate_parameters);
   {
-    // TODO(rmcilroy): Inline FastNewSloppyArguments when it is a TurboFan stub.
-    Callable callable = CodeFactory::FastNewSloppyArguments(isolate_, true);
-    Node* target = __ HeapConstant(callable.code());
-    Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+    ArgumentsBuiltinsAssembler constructor_assembler(assembler->state());
+    Node* result =
+        constructor_assembler.EmitFastNewSloppyArguments(context, closure);
     __ SetAccumulator(result);
     __ Dispatch();
   }
@@ -2327,12 +3037,11 @@
 //
 // Creates a new unmapped arguments object.
 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
-  // TODO(rmcilroy): Inline FastNewStrictArguments when it is a TurboFan stub.
-  Callable callable = CodeFactory::FastNewStrictArguments(isolate_, true);
-  Node* target = __ HeapConstant(callable.code());
   Node* context = __ GetContext();
   Node* closure = __ LoadRegister(Register::function_closure());
-  Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+  ArgumentsBuiltinsAssembler builtins_assembler(assembler->state());
+  Node* result =
+      builtins_assembler.EmitFastNewStrictArguments(context, closure);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -2341,12 +3050,10 @@
 //
 // Creates a new rest parameter array.
 void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
-  // TODO(rmcilroy): Inline FastNewRestArguments when it is a TurboFan stub.
-  Callable callable = CodeFactory::FastNewRestParameter(isolate_, true);
-  Node* target = __ HeapConstant(callable.code());
   Node* closure = __ LoadRegister(Register::function_closure());
   Node* context = __ GetContext();
-  Node* result = __ CallStub(callable.descriptor(), target, context, closure);
+  ArgumentsBuiltinsAssembler builtins_assembler(assembler->state());
+  Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure);
   __ SetAccumulator(result);
   __ Dispatch();
 }
@@ -2371,6 +3078,22 @@
   }
 }
 
+// SetPendingMessage
+//
+// Sets the pending message to the value in the accumulator, and returns the
+// previous pending message in the accumulator.
+void Interpreter::DoSetPendingMessage(InterpreterAssembler* assembler) {
+  Node* pending_message = __ ExternalConstant(
+      ExternalReference::address_of_pending_message_obj(isolate_));
+  Node* previous_message =
+      __ Load(MachineType::TaggedPointer(), pending_message);
+  Node* new_message = __ GetAccumulator();
+  __ StoreNoWriteBarrier(MachineRepresentation::kTaggedPointer, pending_message,
+                         new_message);
+  __ SetAccumulator(previous_message);
+  __ Dispatch();
+}
+
 // Throw
 //
 // Throws the exception in the accumulator.
@@ -2407,7 +3130,7 @@
 // Call runtime to handle debugger statement.
 void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
   Node* context = __ GetContext();
-  __ CallRuntime(Runtime::kHandleDebuggerStatement, context);
+  __ CallStub(CodeFactory::HandleDebuggerStatement(isolate_), context);
   __ Dispatch();
 }
 
@@ -2420,6 +3143,7 @@
     Node* accumulator = __ GetAccumulator();                                  \
     Node* original_handler =                                                  \
         __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
+    __ MaybeDropFrames(context);                                              \
     __ DispatchToBytecodeHandler(original_handler);                           \
   }
 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
@@ -2445,68 +3169,42 @@
 // |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
 // and cache_length respectively.
 void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
-  Node* object_reg = __ BytecodeOperandReg(0);
-  Node* receiver = __ LoadRegister(object_reg);
+  Node* object_register = __ BytecodeOperandReg(0);
+  Node* output_register = __ BytecodeOperandReg(1);
+  Node* receiver = __ LoadRegister(object_register);
   Node* context = __ GetContext();
-  Node* const zero_smi = __ SmiConstant(Smi::kZero);
 
-  Label nothing_to_iterate(assembler, Label::kDeferred),
-      use_enum_cache(assembler), use_runtime(assembler, Label::kDeferred);
+  Node* cache_type;
+  Node* cache_array;
+  Node* cache_length;
+  Label call_runtime(assembler, Label::kDeferred),
+      nothing_to_iterate(assembler, Label::kDeferred);
 
-  if (FLAG_debug_code) {
-    Label already_receiver(assembler), abort(assembler);
-    Node* instance_type = __ LoadInstanceType(receiver);
-    __ Branch(__ IsJSReceiverInstanceType(instance_type), &already_receiver,
-              &abort);
-    __ Bind(&abort);
-    {
-      __ Abort(kExpectedJSReceiver);
-      // TODO(klaasb) remove this unreachable Goto once Abort ends the block
-      __ Goto(&already_receiver);
-    }
-    __ Bind(&already_receiver);
-  }
+  ObjectBuiltinsAssembler object_assembler(assembler->state());
+  std::tie(cache_type, cache_array, cache_length) =
+      object_assembler.EmitForInPrepare(receiver, context, &call_runtime,
+                                        &nothing_to_iterate);
 
-  __ CheckEnumCache(receiver, &use_enum_cache, &use_runtime);
+  BuildForInPrepareResult(output_register, cache_type, cache_array,
+                          cache_length, assembler);
+  __ Dispatch();
 
-  __ Bind(&use_enum_cache);
-  {
-    // The enum cache is valid.  Load the map of the object being
-    // iterated over and use the cache for the iteration.
-    Node* cache_type = __ LoadMap(receiver);
-    Node* cache_length = __ EnumLength(cache_type);
-    __ GotoIf(assembler->WordEqual(cache_length, zero_smi),
-              &nothing_to_iterate);
-    Node* descriptors = __ LoadMapDescriptors(cache_type);
-    Node* cache_offset =
-        __ LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
-    Node* cache_array = __ LoadObjectField(
-        cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
-    Node* output_register = __ BytecodeOperandReg(1);
-    BuildForInPrepareResult(output_register, cache_type, cache_array,
-                            cache_length, assembler);
-    __ Dispatch();
-  }
-
-  __ Bind(&use_runtime);
+  __ Bind(&call_runtime);
   {
     Node* result_triple =
         __ CallRuntime(Runtime::kForInPrepare, context, receiver);
     Node* cache_type = __ Projection(0, result_triple);
     Node* cache_array = __ Projection(1, result_triple);
     Node* cache_length = __ Projection(2, result_triple);
-    Node* output_register = __ BytecodeOperandReg(1);
     BuildForInPrepareResult(output_register, cache_type, cache_array,
                             cache_length, assembler);
     __ Dispatch();
   }
-
   __ Bind(&nothing_to_iterate);
   {
     // Receiver is null or undefined or descriptors are zero length.
-    Node* output_register = __ BytecodeOperandReg(1);
-    BuildForInPrepareResult(output_register, zero_smi, zero_smi, zero_smi,
-                            assembler);
+    Node* zero = __ SmiConstant(0);
+    BuildForInPrepareResult(output_register, zero, zero, zero, assembler);
     __ Dispatch();
   }
 }
@@ -2530,7 +3228,7 @@
 
   // Check if we can use the for-in fast path potentially using the enum cache.
   Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
-  Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
+  Node* receiver_map = __ LoadMap(receiver);
   __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
   __ Bind(&if_fast);
   {
@@ -2542,10 +3240,10 @@
   {
     // Record the fact that we hit the for-in slow path.
     Node* vector_index = __ BytecodeOperandIdx(3);
-    Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+    Node* feedback_vector = __ LoadFeedbackVector();
     Node* megamorphic_sentinel =
-        __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
-    __ StoreFixedArrayElement(type_feedback_vector, vector_index,
+        __ HeapConstant(FeedbackVector::MegamorphicSentinel(isolate_));
+    __ StoreFixedArrayElement(feedback_vector, vector_index,
                               megamorphic_sentinel, SKIP_WRITE_BARRIER);
 
     // Need to filter the {key} for the {receiver}.
@@ -2636,14 +3334,13 @@
       ExternalReference::debug_last_step_action_address(isolate_));
   Node* step_action = __ Load(MachineType::Int8(), step_action_address);
   STATIC_ASSERT(StepIn > StepNext);
-  STATIC_ASSERT(StepFrame > StepNext);
-  STATIC_ASSERT(LastStepAction == StepFrame);
+  STATIC_ASSERT(LastStepAction == StepIn);
   Node* step_next = __ Int32Constant(StepNext);
   __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
   __ Bind(&ok);
 
   Node* array =
-      __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
+      __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
   Node* context = __ GetContext();
   Node* state = __ GetAccumulator();
 
@@ -2660,7 +3357,7 @@
   __ Bind(&if_stepping);
   {
     Node* context = __ GetContext();
-    __ CallRuntime(Runtime::kDebugRecordAsyncFunction, context, generator);
+    __ CallRuntime(Runtime::kDebugRecordGenerator, context, generator);
     __ Goto(&ok);
   }
 }
@@ -2675,7 +3372,7 @@
   Node* generator = __ LoadRegister(generator_reg);
 
   __ ImportRegisterFile(
-      __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset));
+      __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset));
 
   Node* old_state =
       __ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
diff --git a/src/interpreter/interpreter.h b/src/interpreter/interpreter.h
index b10ae2e..ac36815 100644
--- a/src/interpreter/interpreter.h
+++ b/src/interpreter/interpreter.h
@@ -76,6 +76,14 @@
   BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
 #undef DECLARE_BYTECODE_HANDLER_GENERATOR
 
+  typedef void (Interpreter::*BytecodeGeneratorFunc)(InterpreterAssembler*);
+
+  // Generates handler for given |bytecode| and |operand_scale| using
+  // |generator| and installs it into the dispatch table.
+  void InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
+                              OperandScale operand_scale,
+                              BytecodeGeneratorFunc generator);
+
   // Generates code to perform the binary operation via |Generator|.
   template <class Generator>
   void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
@@ -140,9 +148,8 @@
                        InterpreterAssembler* assembler);
 
   // Generates code to load a global.
-  compiler::Node* BuildLoadGlobal(Callable ic, compiler::Node* context,
-                                  compiler::Node* feedback_slot,
-                                  InterpreterAssembler* assembler);
+  void BuildLoadGlobal(int slot_operand_index, int name_operand_index,
+                       TypeofMode typeof_mode, InterpreterAssembler* assembler);
 
   // Generates code to prepare the result for ForInPrepare. Cache data
   // are placed into the consecutive series of registers starting at
diff --git a/src/interpreter/mkpeephole.cc b/src/interpreter/mkpeephole.cc
index 62d3a77..e6c3b76 100644
--- a/src/interpreter/mkpeephole.cc
+++ b/src/interpreter/mkpeephole.cc
@@ -192,6 +192,28 @@
     }
   }
 
+  // Fuse LdaNull/LdaUndefined followed by a equality comparison with test
+  // undetectable. Testing undetectable is a simple check on the map which is
+  // more efficient than the full comparison operation.
+  if (last == Bytecode::kLdaNull || last == Bytecode::kLdaUndefined) {
+    if (current == Bytecode::kTestEqual) {
+      return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction,
+              Bytecode::kTestUndetectable};
+    }
+  }
+
+  // Fuse LdaNull/LdaUndefined followed by a strict equals with
+  // TestNull/TestUndefined.
+  if (current == Bytecode::kTestEqualStrict) {
+    if (last == Bytecode::kLdaNull) {
+      return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction,
+              Bytecode::kTestNull};
+    } else if (last == Bytecode::kLdaUndefined) {
+      return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction,
+              Bytecode::kTestUndefined};
+    }
+  }
+
   // If there is no last bytecode to optimize against, store the incoming
   // bytecode or for jumps emit incoming bytecode immediately.
   if (last == Bytecode::kIllegal) {
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index a148968..02993cf 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -133,23 +133,23 @@
          Smi::cast(species_cell->value())->value() == kProtectorValid;
 }
 
-bool Isolate::IsHasInstanceLookupChainIntact() {
-  PropertyCell* has_instance_cell = heap()->has_instance_protector();
-  return has_instance_cell->value() == Smi::FromInt(kProtectorValid);
-}
-
 bool Isolate::IsStringLengthOverflowIntact() {
-  PropertyCell* has_instance_cell = heap()->string_length_protector();
-  return has_instance_cell->value() == Smi::FromInt(kProtectorValid);
+  PropertyCell* string_length_cell = heap()->string_length_protector();
+  return string_length_cell->value() == Smi::FromInt(kProtectorValid);
 }
 
 bool Isolate::IsFastArrayIterationIntact() {
-  Cell* fast_iteration = heap()->fast_array_iteration_protector();
-  return fast_iteration->value() == Smi::FromInt(kProtectorValid);
+  Cell* fast_iteration_cell = heap()->fast_array_iteration_protector();
+  return fast_iteration_cell->value() == Smi::FromInt(kProtectorValid);
+}
+
+bool Isolate::IsArrayBufferNeuteringIntact() {
+  PropertyCell* buffer_neutering = heap()->array_buffer_neutering_protector();
+  return buffer_neutering->value() == Smi::FromInt(kProtectorValid);
 }
 
 bool Isolate::IsArrayIteratorLookupChainIntact() {
-  Cell* array_iterator_cell = heap()->array_iterator_protector();
+  PropertyCell* array_iterator_cell = heap()->array_iterator_protector();
   return array_iterator_cell->value() == Smi::FromInt(kProtectorValid);
 }
 
diff --git a/src/isolate.cc b/src/isolate.cc
index 0eab398..bac6130 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -9,6 +9,8 @@
 #include <fstream>  // NOLINT(readability/streams)
 #include <sstream>
 
+#include "src/assembler-inl.h"
+#include "src/ast/ast-value-factory.h"
 #include "src/ast/context-slot-cache.h"
 #include "src/base/hashmap.h"
 #include "src/base/platform/platform.h"
@@ -20,7 +22,7 @@
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
 #include "src/compilation-statistics.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
 #include "src/crankshaft/hydrogen.h"
 #include "src/debug/debug.h"
@@ -47,6 +49,7 @@
 #include "src/version.h"
 #include "src/vm-state-inl.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 #include "src/zone/accounting-allocator.h"
 
 namespace v8 {
@@ -358,7 +361,7 @@
   // Determines whether the given stack frame should be displayed in a stack
   // trace.
   bool IsVisibleInStackTrace(JSFunction* fun) {
-    return ShouldIncludeFrame(fun) && IsNotInNativeScript(fun) &&
+    return ShouldIncludeFrame(fun) && IsNotHidden(fun) &&
            IsInSameSecurityContext(fun);
   }
 
@@ -386,12 +389,12 @@
     return false;
   }
 
-  bool IsNotInNativeScript(JSFunction* fun) {
-    // Functions defined in native scripts are not visible unless directly
+  bool IsNotHidden(JSFunction* fun) {
+    // Functions defined not in user scripts are not visible unless directly
     // exposed, in which case the native flag is set.
     // The --builtins-in-stack-traces command line flag allows including
     // internal call sites in the stack trace for debugging purposes.
-    if (!FLAG_builtins_in_stack_traces && fun->shared()->IsBuiltin()) {
+    if (!FLAG_builtins_in_stack_traces && !fun->shared()->IsUserJavaScript()) {
       return fun->shared()->native();
     }
     return true;
@@ -460,13 +463,14 @@
         List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
         js_frame->Summarize(&frames);
         for (int i = frames.length() - 1; i >= 0; i--) {
-          Handle<JSFunction> fun = frames[i].function();
+          const auto& summ = frames[i].AsJavaScript();
+          Handle<JSFunction> fun = summ.function();
 
           // Filter out internal frames that we do not want to show.
           if (!helper.IsVisibleInStackTrace(*fun)) continue;
 
           Handle<Object> recv = frames[i].receiver();
-          Handle<AbstractCode> abstract_code = frames[i].abstract_code();
+          Handle<AbstractCode> abstract_code = summ.abstract_code();
           const int offset = frames[i].code_offset();
 
           bool force_constructor = false;
@@ -509,28 +513,34 @@
                                              offset, flags);
       } break;
 
-      case StackFrame::WASM: {
-        WasmFrame* wasm_frame = WasmFrame::cast(frame);
-        Handle<Object> instance(wasm_frame->wasm_instance(), this);
+      case StackFrame::WASM_COMPILED: {
+        WasmCompiledFrame* wasm_frame = WasmCompiledFrame::cast(frame);
+        Handle<WasmInstanceObject> instance(wasm_frame->wasm_instance(), this);
         const int wasm_function_index = wasm_frame->function_index();
         Code* code = wasm_frame->unchecked_code();
         Handle<AbstractCode> abstract_code(AbstractCode::cast(code), this);
         const int offset =
             static_cast<int>(wasm_frame->pc() - code->instruction_start());
 
-        // TODO(wasm): The wasm object returned by the WasmFrame should always
-        //             be a wasm object.
-        DCHECK(wasm::IsWasmInstance(*instance) || instance->IsUndefined(this));
-
-        int flags = wasm::WasmIsAsmJs(*instance, this)
-                        ? FrameArray::kIsAsmJsWasmFrame
-                        : FrameArray::kIsWasmFrame;
+        int flags = 0;
+        if (instance->compiled_module()->is_asm_js()) {
+          flags |= FrameArray::kIsAsmJsWasmFrame;
+          if (wasm_frame->at_to_number_conversion()) {
+            flags |= FrameArray::kAsmJsAtNumberConversion;
+          }
+        } else {
+          flags |= FrameArray::kIsWasmFrame;
+        }
 
         elements =
             FrameArray::AppendWasmFrame(elements, instance, wasm_function_index,
                                         abstract_code, offset, flags);
       } break;
 
+      case StackFrame::WASM_INTERPRETER_ENTRY:
+        // TODO(clemensh): Add frames.
+        break;
+
       default:
         break;
     }
@@ -620,21 +630,22 @@
   }
 
   Handle<JSObject> NewStackFrameObject(FrameSummary& summ) {
-    int position = summ.abstract_code()->SourcePosition(summ.code_offset());
-    return NewStackFrameObject(summ.function(), position,
-                               summ.is_constructor());
+    if (summ.IsJavaScript()) return NewStackFrameObject(summ.AsJavaScript());
+    if (summ.IsWasm()) return NewStackFrameObject(summ.AsWasm());
+    UNREACHABLE();
+    return Handle<JSObject>::null();
   }
 
-  Handle<JSObject> NewStackFrameObject(Handle<JSFunction> fun, int position,
-                                       bool is_constructor) {
+  Handle<JSObject> NewStackFrameObject(
+      const FrameSummary::JavaScriptFrameSummary& summ) {
     Handle<JSObject> stack_frame =
         factory()->NewJSObject(isolate_->object_function());
-    Handle<Script> script(Script::cast(fun->shared()->script()), isolate_);
+    Handle<Script> script = Handle<Script>::cast(summ.script());
 
     if (!line_key_.is_null()) {
       Script::PositionInfo info;
-      bool valid_pos =
-          Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
+      bool valid_pos = Script::GetPositionInfo(script, summ.SourcePosition(),
+                                               &info, Script::WITH_OFFSET);
 
       if (!column_key_.is_null() && valid_pos) {
         JSObject::AddProperty(stack_frame, column_key_,
@@ -657,7 +668,7 @@
     }
 
     if (!script_name_or_source_url_key_.is_null()) {
-      Handle<Object> result = Script::GetNameOrSourceURL(script);
+      Handle<Object> result(script->GetNameOrSourceURL(), isolate_);
       JSObject::AddProperty(stack_frame, script_name_or_source_url_key_, result,
                             NONE);
     }
@@ -669,12 +680,13 @@
     }
 
     if (!function_key_.is_null()) {
-      Handle<Object> fun_name = JSFunction::GetDebugName(fun);
+      Handle<String> fun_name = summ.FunctionName();
       JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
     }
 
     if (!constructor_key_.is_null()) {
-      Handle<Object> is_constructor_obj = factory()->ToBoolean(is_constructor);
+      Handle<Object> is_constructor_obj =
+          factory()->ToBoolean(summ.is_constructor());
       JSObject::AddProperty(stack_frame, constructor_key_, is_constructor_obj,
                             NONE);
     }
@@ -696,28 +708,28 @@
     return stack_frame;
   }
 
-  Handle<JSObject> NewStackFrameObject(WasmFrame* frame) {
+  Handle<JSObject> NewStackFrameObject(
+      const FrameSummary::WasmFrameSummary& summ) {
     Handle<JSObject> stack_frame =
         factory()->NewJSObject(isolate_->object_function());
 
     if (!function_key_.is_null()) {
-      Handle<String> name = wasm::GetWasmFunctionName(
-          isolate_, handle(frame->wasm_instance(), isolate_),
-          frame->function_index());
+      Handle<WasmCompiledModule> compiled_module(
+          summ.wasm_instance()->compiled_module(), isolate_);
+      Handle<String> name = WasmCompiledModule::GetFunctionName(
+          isolate_, compiled_module, summ.function_index());
       JSObject::AddProperty(stack_frame, function_key_, name, NONE);
     }
     // Encode the function index as line number (1-based).
     if (!line_key_.is_null()) {
       JSObject::AddProperty(
           stack_frame, line_key_,
-          isolate_->factory()->NewNumberFromInt(frame->function_index() + 1),
+          isolate_->factory()->NewNumberFromInt(summ.function_index() + 1),
           NONE);
     }
     // Encode the byte offset as column (1-based).
     if (!column_key_.is_null()) {
-      Code* code = frame->LookupCode();
-      int offset = static_cast<int>(frame->pc() - code->instruction_start());
-      int position = AbstractCode::cast(code)->SourcePosition(offset);
+      int position = summ.byte_offset();
       // Make position 1-based.
       if (position >= 0) ++position;
       JSObject::AddProperty(stack_frame, column_key_,
@@ -725,7 +737,7 @@
                             NONE);
     }
     if (!script_id_key_.is_null()) {
-      int script_id = frame->script()->id();
+      int script_id = summ.script()->id();
       JSObject::AddProperty(stack_frame, script_id_key_,
                             handle(Smi::FromInt(script_id), isolate_), NONE);
     }
@@ -762,25 +774,16 @@
   for (StackTraceFrameIterator it(this); !it.done() && (frames_seen < limit);
        it.Advance()) {
     StandardFrame* frame = it.frame();
-    if (frame->is_java_script()) {
-      // Set initial size to the maximum inlining level + 1 for the outermost
-      // function.
-      List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-      JavaScriptFrame::cast(frame)->Summarize(&frames);
-      for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
-        Handle<JSFunction> fun = frames[i].function();
-        // Filter frames from other security contexts.
-        if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
-            !this->context()->HasSameSecurityTokenAs(fun->context()))
-          continue;
-        Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(frames[i]);
-        stack_trace_elems->set(frames_seen, *new_frame_obj);
-        frames_seen++;
-      }
-    } else {
-      DCHECK(frame->is_wasm());
-      WasmFrame* wasm_frame = WasmFrame::cast(frame);
-      Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(wasm_frame);
+    // Set initial size to the maximum inlining level + 1 for the outermost
+    // function.
+    List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+    frame->Summarize(&frames);
+    for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+      // Filter frames from other security contexts.
+      if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
+          !this->context()->HasSameSecurityTokenAs(*frames[i].native_context()))
+        continue;
+      Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(frames[i]);
       stack_trace_elems->set(frames_seen, *new_frame_obj);
       frames_seen++;
     }
@@ -1076,7 +1079,7 @@
     printf("Exception thrown:\n");
     if (location) {
       Handle<Script> script = location->script();
-      Handle<Object> name = Script::GetNameOrSourceURL(script);
+      Handle<Object> name(script->GetNameOrSourceURL(), this);
       printf("at ");
       if (name->IsString() && String::cast(*name)->length() > 0)
         String::cast(*name)->PrintOn(stdout);
@@ -1216,7 +1219,7 @@
     if (FLAG_wasm_eh_prototype) {
       if (frame->is_wasm() && is_catchable_by_wasm(exception)) {
         int stack_slots = 0;  // Will contain stack slot count of frame.
-        WasmFrame* wasm_frame = static_cast<WasmFrame*>(frame);
+        WasmCompiledFrame* wasm_frame = static_cast<WasmCompiledFrame*>(frame);
         offset = wasm_frame->LookupExceptionHandlerInTable(&stack_slots);
         if (offset >= 0) {
           // Compute the stack pointer from the frame pointer. This ensures that
@@ -1298,29 +1301,16 @@
       }
     }
 
-    // For JavaScript frames we perform a range lookup in the handler table.
+    // For JavaScript frames we are guaranteed not to find a handler.
     if (frame->is_java_script() && catchable_by_js) {
       JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
-      int stack_depth = 0;  // Will contain operand stack depth of handler.
-      offset = js_frame->LookupExceptionHandlerInTable(&stack_depth, nullptr);
-      if (offset >= 0) {
-        // Compute the stack pointer from the frame pointer. This ensures that
-        // operand stack slots are dropped for nested statements. Also restore
-        // correct context for the handler which is pushed within the try-block.
-        Address return_sp = frame->fp() -
-                            StandardFrameConstants::kFixedFrameSizeFromFp -
-                            stack_depth * kPointerSize;
-        STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
-        context = Context::cast(Memory::Object_at(return_sp - kPointerSize));
-
-        // Gather information from the frame.
-        code = frame->LookupCode();
-        handler_sp = return_sp;
-        handler_fp = frame->fp();
-        break;
-      }
+      offset = js_frame->LookupExceptionHandlerInTable(nullptr, nullptr);
+      CHECK_EQ(-1, offset);
     }
 
+    // TODO(clemensh): Handle unwinding interpreted wasm frames (stored in the
+    // WasmInterpreter C++ object).
+
     RemoveMaterializedObjectsOnUnwind(frame);
   }
 
@@ -1350,16 +1340,30 @@
       List<FrameSummary> summaries;
       frame->Summarize(&summaries);
       for (const FrameSummary& summary : summaries) {
-        Handle<AbstractCode> code = summary.abstract_code();
+        Handle<AbstractCode> code = summary.AsJavaScript().abstract_code();
+        if (code->IsCode() && code->kind() == AbstractCode::BUILTIN) {
+          if (code->GetCode()->is_promise_rejection()) {
+            return HandlerTable::PROMISE;
+          }
+
+          // This the exception throw in PromiseHandle which doesn't
+          // cause a promise rejection.
+          if (code->GetCode()->is_exception_caught()) {
+            return HandlerTable::CAUGHT;
+          }
+        }
+
         if (code->kind() == AbstractCode::OPTIMIZED_FUNCTION) {
-          DCHECK(summary.function()->shared()->asm_function());
-          DCHECK(!FLAG_turbo_asm_deoptimization);
+          DCHECK(summary.AsJavaScript().function()->shared()->asm_function());
           // asm code cannot contain try-catch.
           continue;
         }
+        // Must have been constructed from a bytecode array.
+        CHECK_EQ(AbstractCode::INTERPRETED_FUNCTION, code->kind());
         int code_offset = summary.code_offset();
-        int index =
-            code->LookupRangeInHandlerTable(code_offset, nullptr, &prediction);
+        BytecodeArray* bytecode = code->GetBytecodeArray();
+        HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+        int index = table->LookupRange(code_offset, nullptr, &prediction);
         if (index <= 0) continue;
         if (prediction == HandlerTable::UNCAUGHT) continue;
         return prediction;
@@ -1457,6 +1461,9 @@
     DCHECK(scheduled_exception() != heap()->termination_exception());
     clear_scheduled_exception();
   }
+  if (thread_local_top_.pending_message_obj_ == handler->message_obj_) {
+    clear_pending_message();
+  }
 }
 
 
@@ -1494,23 +1501,29 @@
   StackTraceFrameIterator it(this);
   if (it.done()) return false;
   StandardFrame* frame = it.frame();
-  // TODO(clemensh): handle wasm frames
-  if (!frame->is_java_script()) return false;
-  JSFunction* fun = JavaScriptFrame::cast(frame)->function();
-  Object* script = fun->shared()->script();
-  if (!script->IsScript() ||
-      (Script::cast(script)->source()->IsUndefined(this))) {
-    return false;
-  }
-  Handle<Script> casted_script(Script::cast(script), this);
   // Compute the location from the function and the relocation info of the
   // baseline code. For optimized code this will use the deoptimization
   // information to get canonical location information.
   List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-  JavaScriptFrame::cast(frame)->Summarize(&frames);
+  frame->Summarize(&frames);
   FrameSummary& summary = frames.last();
-  int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
-  *target = MessageLocation(casted_script, pos, pos + 1, handle(fun, this));
+  int pos = summary.SourcePosition();
+  Handle<SharedFunctionInfo> shared;
+  Handle<Object> script = summary.script();
+  if (!script->IsScript() ||
+      (Script::cast(*script)->source()->IsUndefined(this))) {
+    return false;
+  }
+
+  // TODO(wasm): Remove this once trap-if is always on.
+  // Background: Without trap-if, the information on the stack trace is
+  // incomplete (see bug v8:5007).
+  if (summary.IsWasmCompiled() && !FLAG_wasm_trap_if) return false;
+
+  if (summary.IsJavaScript()) {
+    shared = handle(summary.AsJavaScript().function()->shared());
+  }
+  *target = MessageLocation(Handle<Script>::cast(script), pos, pos + 1, shared);
   return true;
 }
 
@@ -1554,9 +1567,32 @@
 
   const int frame_count = elements->FrameCount();
   for (int i = 0; i < frame_count; i++) {
-    if (elements->IsWasmFrame(i)) {
-      // TODO(clemensh): handle wasm frames
-      return false;
+    if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
+      Handle<WasmCompiledModule> compiled_module(
+          WasmInstanceObject::cast(elements->WasmInstance(i))
+              ->compiled_module());
+      int func_index = elements->WasmFunctionIndex(i)->value();
+      int code_offset = elements->Offset(i)->value();
+      // TODO(wasm): Clean this up (bug 5007).
+      int pos = code_offset < 0
+                    ? (-1 - code_offset)
+                    : elements->Code(i)->SourcePosition(code_offset);
+      if (elements->IsAsmJsWasmFrame(i)) {
+        // For asm.js frames, make an additional translation step to get the
+        // asm.js source position.
+        bool at_to_number_conversion =
+            elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
+        pos = WasmCompiledModule::GetAsmJsSourcePosition(
+            compiled_module, func_index, pos, at_to_number_conversion);
+      } else {
+        // For pure wasm, make the function-local position module-relative by
+        // adding the function offset.
+        pos += compiled_module->GetFunctionOffset(func_index);
+      }
+      Handle<Script> script(compiled_module->script());
+
+      *target = MessageLocation(script, pos, pos + 1);
+      return true;
     }
 
     Handle<JSFunction> fun = handle(elements->Function(i), this);
@@ -1662,6 +1698,8 @@
 
 
 void Isolate::ReportPendingMessages() {
+  DCHECK(AllowExceptions::IsAllowed(this));
+
   Object* exception = pending_exception();
 
   // Try to propagate the exception to an external v8::TryCatch handler. If
@@ -1783,23 +1821,92 @@
   global_handles()->Destroy(global_promise.location());
 }
 
-bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<Object> promise) {
-  Handle<JSFunction> fun = promise_has_user_defined_reject_handler();
-  Handle<Object> has_reject_handler;
-  // If we are, e.g., overflowing the stack, don't try to call out to JS
-  if (!AllowJavascriptExecution::IsAllowed(this)) return false;
-  // Call the registered function to check for a handler
-  if (Execution::TryCall(this, fun, promise, 0, NULL)
-          .ToHandle(&has_reject_handler)) {
-    return has_reject_handler->IsTrue(this);
+namespace {
+bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
+                                                Handle<JSPromise> promise);
+
+bool PromiseHandlerCheck(Isolate* isolate, Handle<JSReceiver> handler,
+                         Handle<JSReceiver> deferred_promise) {
+  // Recurse to the forwarding Promise, if any. This may be due to
+  //  - await reaction forwarding to the throwaway Promise, which has
+  //    a dependency edge to the outer Promise.
+  //  - PromiseIdResolveHandler forwarding to the output of .then
+  //  - Promise.all/Promise.race forwarding to a throwaway Promise, which
+  //    has a dependency edge to the generated outer Promise.
+  // Otherwise, this is a real reject handler for the Promise.
+  Handle<Symbol> key = isolate->factory()->promise_forwarding_handler_symbol();
+  Handle<Object> forwarding_handler = JSReceiver::GetDataProperty(handler, key);
+  if (forwarding_handler->IsUndefined(isolate)) {
+    return true;
   }
-  // If an exception is thrown in the course of execution of this built-in
-  // function, it indicates either a bug, or a synthetic uncatchable
-  // exception in the shutdown path. In either case, it's OK to predict either
-  // way in DevTools.
+
+  if (!deferred_promise->IsJSPromise()) {
+    return true;
+  }
+
+  return InternalPromiseHasUserDefinedRejectHandler(
+      isolate, Handle<JSPromise>::cast(deferred_promise));
+}
+
+bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
+                                                Handle<JSPromise> promise) {
+  // If this promise was marked as being handled by a catch block
+  // in an async function, then it has a user-defined reject handler.
+  if (promise->handled_hint()) return true;
+
+  // If this Promise is subsumed by another Promise (a Promise resolved
+  // with another Promise, or an intermediate, hidden, throwaway Promise
+  // within async/await), then recurse on the outer Promise.
+  // In this case, the dependency is one possible way that the Promise
+  // could be resolved, so it does not subsume the other following cases.
+  Handle<Symbol> key = isolate->factory()->promise_handled_by_symbol();
+  Handle<Object> outer_promise_obj = JSObject::GetDataProperty(promise, key);
+  if (outer_promise_obj->IsJSPromise() &&
+      InternalPromiseHasUserDefinedRejectHandler(
+          isolate, Handle<JSPromise>::cast(outer_promise_obj))) {
+    return true;
+  }
+
+  Handle<Object> queue(promise->reject_reactions(), isolate);
+  Handle<Object> deferred_promise(promise->deferred_promise(), isolate);
+
+  if (queue->IsUndefined(isolate)) {
+    return false;
+  }
+
+  if (queue->IsCallable()) {
+    return PromiseHandlerCheck(isolate, Handle<JSReceiver>::cast(queue),
+                               Handle<JSReceiver>::cast(deferred_promise));
+  }
+
+  if (queue->IsSymbol()) {
+    return InternalPromiseHasUserDefinedRejectHandler(
+        isolate, Handle<JSPromise>::cast(deferred_promise));
+  }
+
+  Handle<FixedArray> queue_arr = Handle<FixedArray>::cast(queue);
+  Handle<FixedArray> deferred_promise_arr =
+      Handle<FixedArray>::cast(deferred_promise);
+  for (int i = 0; i < deferred_promise_arr->length(); i++) {
+    Handle<JSReceiver> queue_item(JSReceiver::cast(queue_arr->get(i)));
+    Handle<JSReceiver> deferred_promise_item(
+        JSReceiver::cast(deferred_promise_arr->get(i)));
+    if (PromiseHandlerCheck(isolate, queue_item, deferred_promise_item)) {
+      return true;
+    }
+  }
+
   return false;
 }
 
+}  // namespace
+
+bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<Object> promise) {
+  if (!promise->IsJSPromise()) return false;
+  return InternalPromiseHasUserDefinedRejectHandler(
+      this, Handle<JSPromise>::cast(promise));
+}
+
 Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
   Handle<Object> undefined = factory()->undefined_value();
   ThreadLocalTop* tltop = thread_local_top();
@@ -1817,7 +1924,7 @@
         continue;
       case HandlerTable::CAUGHT:
       case HandlerTable::DESUGARING:
-        if (retval->IsJSObject()) {
+        if (retval->IsJSPromise()) {
           // Caught the result of an inner async/await invocation.
           // Mark the inner promise as caught in the "synchronous case" so
           // that Debug::OnException will see. In the synchronous case,
@@ -1825,10 +1932,7 @@
           // await, the function which has this exception event has not yet
           // returned, so the generated Promise has not yet been marked
           // by AsyncFunctionAwaitCaught with promiseHandledHintSymbol.
-          Handle<Symbol> key = factory()->promise_handled_hint_symbol();
-          JSObject::SetProperty(Handle<JSObject>::cast(retval), key,
-                                factory()->true_value(), STRICT)
-              .Assert();
+          Handle<JSPromise>::cast(retval)->set_handled_hint(true);
         }
         return retval;
       case HandlerTable::PROMISE:
@@ -1927,6 +2031,47 @@
   // DCHECK_NULL(list_);
 }
 
+void Isolate::ReleaseManagedObjects() {
+  Isolate::ManagedObjectFinalizer* current =
+      managed_object_finalizers_list_.next_;
+  while (current != nullptr) {
+    Isolate::ManagedObjectFinalizer* next = current->next_;
+    current->Dispose();
+    delete current;
+    current = next;
+  }
+}
+
+Isolate::ManagedObjectFinalizer* Isolate::RegisterForReleaseAtTeardown(
+    void* value, Isolate::ManagedObjectFinalizer::Deleter deleter) {
+  DCHECK_NOT_NULL(value);
+  DCHECK_NOT_NULL(deleter);
+
+  Isolate::ManagedObjectFinalizer* ret = new Isolate::ManagedObjectFinalizer();
+  ret->value_ = value;
+  ret->deleter_ = deleter;
+  // Insert at head. We keep the head alive for the lifetime of the Isolate
+  // because otherwise we can't reset the head, should we delete it before
+  // the isolate expires
+  Isolate::ManagedObjectFinalizer* next = managed_object_finalizers_list_.next_;
+  managed_object_finalizers_list_.next_ = ret;
+  ret->prev_ = &managed_object_finalizers_list_;
+  ret->next_ = next;
+  if (next != nullptr) next->prev_ = ret;
+  return ret;
+}
+
+void Isolate::UnregisterFromReleaseAtTeardown(
+    Isolate::ManagedObjectFinalizer** finalizer_ptr) {
+  DCHECK_NOT_NULL(finalizer_ptr);
+  Isolate::ManagedObjectFinalizer* finalizer = *finalizer_ptr;
+  DCHECK_NOT_NULL(finalizer->prev_);
+
+  finalizer->prev_->next_ = finalizer->next_;
+  if (finalizer->next_ != nullptr) finalizer->next_->prev_ = finalizer->prev_;
+  delete finalizer;
+  *finalizer_ptr = nullptr;
+}
 
 Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
 #if defined(USE_SIMULATOR)
@@ -2033,8 +2178,7 @@
         "\"time\": %f, "
         "\"ptr\": \"%p\", "
         "\"name\": \"%s\","
-        "\"nesting\": %zu"
-        "}\n",
+        "\"nesting\": %" PRIuS "}\n",
         reinterpret_cast<void*>(heap_->isolate()), time,
         reinterpret_cast<const void*>(zone), zone->name(),
         nesting_deepth_.Value());
@@ -2051,9 +2195,9 @@
         "\"time\": %f, "
         "\"ptr\": \"%p\", "
         "\"name\": \"%s\", "
-        "\"size\": %zu,"
-        "\"nesting\": %zu"
-        "}\n",
+        "\"size\": %" PRIuS
+        ","
+        "\"nesting\": %" PRIuS "}\n",
         reinterpret_cast<void*>(heap_->isolate()), time,
         reinterpret_cast<const void*>(zone), zone->name(),
         zone->allocation_size(), nesting_deepth_.Value());
@@ -2069,9 +2213,9 @@
         "\"type\": \"zone\", "
         "\"isolate\": \"%p\", "
         "\"time\": %f, "
-        "\"allocated\": %zu,"
-        "\"pooled\": %zu"
-        "}\n",
+        "\"allocated\": %" PRIuS
+        ","
+        "\"pooled\": %" PRIuS "}\n",
         reinterpret_cast<void*>(heap_->isolate()), time, malloced, pooled);
   }
 
@@ -2113,7 +2257,6 @@
       global_handles_(NULL),
       eternal_handles_(NULL),
       thread_manager_(NULL),
-      has_installed_extensions_(false),
       regexp_stack_(NULL),
       date_cache_(NULL),
       call_descriptor_data_(NULL),
@@ -2121,6 +2264,9 @@
       // be fixed once the default isolate cleanup is done.
       random_number_generator_(NULL),
       rail_mode_(PERFORMANCE_ANIMATION),
+      promise_hook_or_debug_is_active_(false),
+      promise_hook_(NULL),
+      load_start_time_ms_(0),
       serializer_enabled_(enable_serializer),
       has_fatal_error_(false),
       initialized_from_snapshot_(false),
@@ -2141,7 +2287,8 @@
       use_counter_callback_(NULL),
       basic_block_profiler_(NULL),
       cancelable_task_manager_(new CancelableTaskManager()),
-      abort_on_uncaught_exception_callback_(NULL) {
+      abort_on_uncaught_exception_callback_(NULL),
+      total_regexp_code_generated_(0) {
   {
     base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
     CHECK(thread_data_table_);
@@ -2238,9 +2385,7 @@
     optimizing_compile_dispatcher_ = NULL;
   }
 
-  if (heap_.mark_compact_collector()->sweeping_in_progress()) {
-    heap_.mark_compact_collector()->EnsureSweepingCompleted();
-  }
+  heap_.mark_compact_collector()->EnsureSweepingCompleted();
 
   DumpAndResetCompilationStats();
 
@@ -2272,6 +2417,10 @@
   delete heap_profiler_;
   heap_profiler_ = NULL;
 
+  compiler_dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kBlock);
+  delete compiler_dispatcher_;
+  compiler_dispatcher_ = nullptr;
+
   cancelable_task_manager()->CancelAndWait();
 
   heap_.TearDown();
@@ -2280,8 +2429,8 @@
   delete interpreter_;
   interpreter_ = NULL;
 
-  delete compiler_dispatcher_tracer_;
-  compiler_dispatcher_tracer_ = nullptr;
+  delete ast_string_constants_;
+  ast_string_constants_ = nullptr;
 
   delete cpu_profiler_;
   cpu_profiler_ = NULL;
@@ -2292,6 +2441,7 @@
   root_index_map_ = NULL;
 
   ClearSerializerData();
+  ReleaseManagedObjects();
 }
 
 
@@ -2491,7 +2641,8 @@
   cpu_profiler_ = new CpuProfiler(this);
   heap_profiler_ = new HeapProfiler(heap());
   interpreter_ = new interpreter::Interpreter(this);
-  compiler_dispatcher_tracer_ = new CompilerDispatcherTracer(this);
+  compiler_dispatcher_ =
+      new CompilerDispatcher(this, V8::GetCurrentPlatform(), FLAG_stack_size);
 
   // Enable logging before setting up the heap
   logger_->SetUp(this);
@@ -2544,9 +2695,7 @@
 
   bootstrapper_->Initialize(create_heap_objects);
   builtins_.SetUp(this, create_heap_objects);
-  if (create_heap_objects) {
-    heap_.CreateFixedStubs();
-  }
+  if (create_heap_objects) heap_.CreateFixedStubs();
 
   if (FLAG_log_internal_timer_events) {
     set_event_logger(Logger::DefaultEventLoggerSentinel);
@@ -2605,6 +2754,11 @@
 
   time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
 
+  {
+    HandleScope scope(this);
+    ast_string_constants_ = new AstStringConstants(this, heap()->HashSeed());
+  }
+
   if (!create_heap_objects) {
     // Now that the heap is consistent, it's OK to generate the code for the
     // deopt entry table that might have been referred to by optimized code in
@@ -2795,11 +2949,24 @@
   return nullptr;
 }
 
+bool Isolate::use_crankshaft() {
+  return FLAG_opt && FLAG_crankshaft && !serializer_enabled_ &&
+         CpuFeatures::SupportsCrankshaft() && !IsCodeCoverageEnabled();
+}
 
-bool Isolate::use_crankshaft() const {
-  return FLAG_crankshaft &&
-         !serializer_enabled_ &&
-         CpuFeatures::SupportsCrankshaft();
+bool Isolate::NeedsSourcePositionsForProfiling() const {
+  return FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
+         FLAG_turbo_profiling || FLAG_perf_prof || is_profiling() ||
+         debug_->is_active() || logger_->is_logging();
+}
+
+bool Isolate::IsCodeCoverageEnabled() {
+  return heap()->code_coverage_list()->IsArrayList();
+}
+
+void Isolate::SetCodeCoverageList(Object* value) {
+  DCHECK(value->IsUndefined(this) || value->IsArrayList());
+  heap()->set_code_coverage_list(value);
 }
 
 bool Isolate::IsArrayOrObjectPrototype(Object* object) {
@@ -2815,6 +2982,26 @@
   return false;
 }
 
+void Isolate::ClearOSROptimizedCode() {
+  DisallowHeapAllocation no_gc;
+  Object* context = heap()->native_contexts_list();
+  while (!context->IsUndefined(this)) {
+    Context* current_context = Context::cast(context);
+    current_context->ClearOptimizedCodeMap();
+    context = current_context->next_context_link();
+  }
+}
+
+void Isolate::EvictOSROptimizedCode(Code* code, const char* reason) {
+  DisallowHeapAllocation no_gc;
+  Object* context = heap()->native_contexts_list();
+  while (!context->IsUndefined(this)) {
+    Context* current_context = Context::cast(context);
+    current_context->EvictFromOptimizedCodeMap(code, reason);
+    context = current_context->next_context_link();
+  }
+}
+
 bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
   DisallowHeapAllocation no_gc;
   Object* context = heap()->native_contexts_list();
@@ -2929,15 +3116,6 @@
       handle(Smi::FromInt(kProtectorInvalid), this));
 }
 
-void Isolate::InvalidateHasInstanceProtector() {
-  DCHECK(factory()->has_instance_protector()->value()->IsSmi());
-  DCHECK(IsHasInstanceLookupChainIntact());
-  PropertyCell::SetValueWithInvalidation(
-      factory()->has_instance_protector(),
-      handle(Smi::FromInt(kProtectorInvalid), this));
-  DCHECK(!IsHasInstanceLookupChainIntact());
-}
-
 void Isolate::InvalidateIsConcatSpreadableProtector() {
   DCHECK(factory()->is_concat_spreadable_protector()->value()->IsSmi());
   DCHECK(IsIsConcatSpreadableLookupChainIntact());
@@ -2965,11 +3143,21 @@
 void Isolate::InvalidateArrayIteratorProtector() {
   DCHECK(factory()->array_iterator_protector()->value()->IsSmi());
   DCHECK(IsArrayIteratorLookupChainIntact());
-  factory()->array_iterator_protector()->set_value(
-      Smi::FromInt(kProtectorInvalid));
+  PropertyCell::SetValueWithInvalidation(
+      factory()->array_iterator_protector(),
+      handle(Smi::FromInt(kProtectorInvalid), this));
   DCHECK(!IsArrayIteratorLookupChainIntact());
 }
 
+void Isolate::InvalidateArrayBufferNeuteringProtector() {
+  DCHECK(factory()->array_buffer_neutering_protector()->value()->IsSmi());
+  DCHECK(IsArrayBufferNeuteringIntact());
+  PropertyCell::SetValueWithInvalidation(
+      factory()->array_buffer_neutering_protector(),
+      handle(Smi::FromInt(kProtectorInvalid), this));
+  DCHECK(!IsArrayBufferNeuteringIntact());
+}
+
 bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
   DisallowHeapAllocation no_gc;
   return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -3003,7 +3191,7 @@
   return hash != 0 ? hash : 1;
 }
 
-Object* Isolate::FindCodeObject(Address a) {
+Code* Isolate::FindCodeObject(Address a) {
   return inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(a);
 }
 
@@ -3016,33 +3204,39 @@
 #undef ISOLATE_FIELD_OFFSET
 #endif
 
-
-Handle<JSObject> Isolate::SetUpSubregistry(Handle<JSObject> registry,
-                                           Handle<Map> map, const char* cname) {
-  Handle<String> name = factory()->InternalizeUtf8String(cname);
-  Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
-  JSObject::NormalizeProperties(obj, CLEAR_INOBJECT_PROPERTIES, 0,
-                                "SetupSymbolRegistry");
-  JSObject::AddProperty(registry, name, obj, NONE);
-  return obj;
-}
-
-
-Handle<JSObject> Isolate::GetSymbolRegistry() {
-  if (heap()->symbol_registry()->IsSmi()) {
-    Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
-    Handle<JSObject> registry = factory()->NewJSObjectFromMap(map);
-    heap()->set_symbol_registry(*registry);
-
-    SetUpSubregistry(registry, map, "for");
-    SetUpSubregistry(registry, map, "for_api");
-    SetUpSubregistry(registry, map, "keyFor");
-    SetUpSubregistry(registry, map, "private_api");
+Handle<Symbol> Isolate::SymbolFor(Heap::RootListIndex dictionary_index,
+                                  Handle<String> name, bool private_symbol) {
+  Handle<String> key = factory()->InternalizeString(name);
+  Handle<NameDictionary> dictionary =
+      Handle<NameDictionary>::cast(heap()->root_handle(dictionary_index));
+  int entry = dictionary->FindEntry(key);
+  Handle<Symbol> symbol;
+  if (entry == NameDictionary::kNotFound) {
+    symbol =
+        private_symbol ? factory()->NewPrivateSymbol() : factory()->NewSymbol();
+    symbol->set_name(*key);
+    dictionary = NameDictionary::Add(dictionary, key, symbol,
+                                     PropertyDetails::Empty(), &entry);
+    switch (dictionary_index) {
+      case Heap::kPublicSymbolTableRootIndex:
+        symbol->set_is_public(true);
+        heap()->set_public_symbol_table(*dictionary);
+        break;
+      case Heap::kApiSymbolTableRootIndex:
+        heap()->set_api_symbol_table(*dictionary);
+        break;
+      case Heap::kApiPrivateSymbolTableRootIndex:
+        heap()->set_api_private_symbol_table(*dictionary);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    symbol = Handle<Symbol>(Symbol::cast(dictionary->ValueAt(entry)));
   }
-  return Handle<JSObject>::cast(factory()->symbol_registry());
+  return symbol;
 }
 
-
 void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
   for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
     if (callback == before_call_entered_callbacks_.at(i)) return;
@@ -3100,6 +3294,22 @@
   }
 }
 
+void Isolate::DebugStateUpdated() {
+  promise_hook_or_debug_is_active_ = promise_hook_ || debug()->is_active();
+}
+
+void Isolate::SetPromiseHook(PromiseHook hook) {
+  promise_hook_ = hook;
+  DebugStateUpdated();
+}
+
+void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+                             Handle<Object> parent) {
+  if (debug()->is_active()) debug()->RunPromiseHook(type, promise, parent);
+  if (promise_hook_ == nullptr) return;
+  promise_hook_(type, v8::Utils::PromiseToLocal(promise),
+                v8::Utils::ToLocal(parent));
+}
 
 void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
   promise_reject_callback_ = callback;
@@ -3119,99 +3329,66 @@
       v8::Utils::StackTraceToLocal(stack_trace)));
 }
 
-namespace {
-class PromiseDebugEventScope {
- public:
-  PromiseDebugEventScope(Isolate* isolate, Object* id, Object* name)
-      : isolate_(isolate),
-        id_(id, isolate_),
-        name_(name, isolate_),
-        is_debug_active_(isolate_->debug()->is_active() && id_->IsNumber() &&
-                         name_->IsString()) {
-    if (is_debug_active_) {
-      isolate_->debug()->OnAsyncTaskEvent(
-          isolate_->factory()->will_handle_string(), id_,
-          Handle<String>::cast(name_));
-    }
-  }
-
-  ~PromiseDebugEventScope() {
-    if (is_debug_active_) {
-      isolate_->debug()->OnAsyncTaskEvent(
-          isolate_->factory()->did_handle_string(), id_,
-          Handle<String>::cast(name_));
-    }
-  }
-
- private:
-  Isolate* isolate_;
-  Handle<Object> id_;
-  Handle<Object> name_;
-  bool is_debug_active_;
-};
-}  // namespace
-
 void Isolate::PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
                                  MaybeHandle<Object>* result,
                                  MaybeHandle<Object>* maybe_exception) {
-  PromiseDebugEventScope helper(this, info->debug_id(), info->debug_name());
-
   Handle<Object> value(info->value(), this);
   Handle<Object> tasks(info->tasks(), this);
   Handle<JSFunction> promise_handle_fn = promise_handle();
   Handle<Object> undefined = factory()->undefined_value();
+  Handle<Object> deferred_promise(info->deferred_promise(), this);
 
-  // If tasks is an array we have multiple onFulfilled/onRejected callbacks
-  // associated with the promise. The deferred object for each callback
-  // is attached to this array as well.
-  // Otherwise, there is a single callback and the deferred object is attached
-  // directly to PromiseReactionJobInfo.
-  if (tasks->IsJSArray()) {
-    Handle<JSArray> array = Handle<JSArray>::cast(tasks);
-    DCHECK(array->length()->IsSmi());
-    int length = Smi::cast(array->length())->value();
-    ElementsAccessor* accessor = array->GetElementsAccessor();
-    DCHECK(length % 2 == 0);
-    for (int i = 0; i < length; i += 2) {
-      DCHECK(accessor->HasElement(array, i));
-      DCHECK(accessor->HasElement(array, i + 1));
-      Handle<Object> argv[] = {value, accessor->Get(array, i),
-                               accessor->Get(array, i + 1)};
-      *result = Execution::TryCall(this, promise_handle_fn, undefined,
-                                   arraysize(argv), argv, maybe_exception);
+  if (deferred_promise->IsFixedArray()) {
+    DCHECK(tasks->IsFixedArray());
+    Handle<FixedArray> deferred_promise_arr =
+        Handle<FixedArray>::cast(deferred_promise);
+    Handle<FixedArray> deferred_on_resolve_arr(
+        FixedArray::cast(info->deferred_on_resolve()), this);
+    Handle<FixedArray> deferred_on_reject_arr(
+        FixedArray::cast(info->deferred_on_reject()), this);
+    Handle<FixedArray> tasks_arr = Handle<FixedArray>::cast(tasks);
+    for (int i = 0; i < deferred_promise_arr->length(); i++) {
+      Handle<Object> argv[] = {value, handle(tasks_arr->get(i), this),
+                               handle(deferred_promise_arr->get(i), this),
+                               handle(deferred_on_resolve_arr->get(i), this),
+                               handle(deferred_on_reject_arr->get(i), this)};
+      *result = Execution::TryCall(
+          this, promise_handle_fn, undefined, arraysize(argv), argv,
+          Execution::MessageHandling::kReport, maybe_exception);
       // If execution is terminating, just bail out.
       if (result->is_null() && maybe_exception->is_null()) {
         return;
       }
     }
   } else {
-    Handle<Object> deferred(info->deferred(), this);
-    Handle<Object> argv[] = {value, tasks, deferred};
-    *result = Execution::TryCall(this, promise_handle_fn, undefined,
-                                 arraysize(argv), argv, maybe_exception);
+    Handle<Object> argv[] = {value, tasks, deferred_promise,
+                             handle(info->deferred_on_resolve(), this),
+                             handle(info->deferred_on_reject(), this)};
+    *result = Execution::TryCall(
+        this, promise_handle_fn, undefined, arraysize(argv), argv,
+        Execution::MessageHandling::kReport, maybe_exception);
   }
 }
 
 void Isolate::PromiseResolveThenableJob(
     Handle<PromiseResolveThenableJobInfo> info, MaybeHandle<Object>* result,
     MaybeHandle<Object>* maybe_exception) {
-  PromiseDebugEventScope helper(this, info->debug_id(), info->debug_name());
-
   Handle<JSReceiver> thenable(info->thenable(), this);
   Handle<JSFunction> resolve(info->resolve(), this);
   Handle<JSFunction> reject(info->reject(), this);
   Handle<JSReceiver> then(info->then(), this);
   Handle<Object> argv[] = {resolve, reject};
-  *result = Execution::TryCall(this, then, thenable, arraysize(argv), argv,
-                               maybe_exception);
+  *result =
+      Execution::TryCall(this, then, thenable, arraysize(argv), argv,
+                         Execution::MessageHandling::kReport, maybe_exception);
 
   Handle<Object> reason;
   if (maybe_exception->ToHandle(&reason)) {
     DCHECK(result->is_null());
     Handle<Object> reason_arg[] = {reason};
-    *result =
-        Execution::TryCall(this, reject, factory()->undefined_value(),
-                           arraysize(reason_arg), reason_arg, maybe_exception);
+    *result = Execution::TryCall(
+        this, reject, factory()->undefined_value(), arraysize(reason_arg),
+        reason_arg, Execution::MessageHandling::kReport, maybe_exception);
   }
 }
 
@@ -3249,6 +3426,7 @@
 void Isolate::RunMicrotasksInternal() {
   if (!pending_microtask_count()) return;
   TRACE_EVENT0("v8.execute", "RunMicrotasks");
+  TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
   while (pending_microtask_count() > 0) {
     HandleScope scope(this);
     int num_tasks = pending_microtask_count();
@@ -3290,9 +3468,9 @@
         if (microtask->IsJSFunction()) {
           Handle<JSFunction> microtask_function =
               Handle<JSFunction>::cast(microtask);
-          result = Execution::TryCall(this, microtask_function,
-                                      factory()->undefined_value(), 0, NULL,
-                                      &maybe_exception);
+          result = Execution::TryCall(
+              this, microtask_function, factory()->undefined_value(), 0,
+              nullptr, Execution::MessageHandling::kReport, &maybe_exception);
         } else if (microtask->IsPromiseResolveThenableJobInfo()) {
           PromiseResolveThenableJob(
               Handle<PromiseResolveThenableJobInfo>::cast(microtask), &result,
@@ -3437,13 +3615,26 @@
   if (new_length == 0) {
     heap()->set_detached_contexts(heap()->empty_fixed_array());
   } else if (new_length < length) {
-    heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
-        *detached_contexts, length - new_length);
+    heap()->RightTrimFixedArray(*detached_contexts, length - new_length);
   }
 }
 
+double Isolate::LoadStartTimeMs() {
+  base::LockGuard<base::Mutex> guard(&rail_mutex_);
+  return load_start_time_ms_;
+}
+
 void Isolate::SetRAILMode(RAILMode rail_mode) {
+  RAILMode old_rail_mode = rail_mode_.Value();
+  if (old_rail_mode != PERFORMANCE_LOAD && rail_mode == PERFORMANCE_LOAD) {
+    base::LockGuard<base::Mutex> guard(&rail_mutex_);
+    load_start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
+  }
   rail_mode_.SetValue(rail_mode);
+  if (old_rail_mode == PERFORMANCE_LOAD && rail_mode != PERFORMANCE_LOAD) {
+    heap()->incremental_marking()->incremental_marking_job()->ScheduleTask(
+        heap());
+  }
   if (FLAG_trace_rail) {
     PrintIsolate(this, "RAIL mode: %s\n", RAILModeName(rail_mode));
   }
diff --git a/src/isolate.h b/src/isolate.h
index 87bc45b..444d99f 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -35,6 +35,7 @@
 
 class AccessCompilerData;
 class AddressToIndexHashMap;
+class AstStringConstants;
 class BasicBlockProfiler;
 class Bootstrapper;
 class CancelableTaskManager;
@@ -46,7 +47,7 @@
 class CodeStubDescriptor;
 class CodeTracer;
 class CompilationCache;
-class CompilerDispatcherTracer;
+class CompilerDispatcher;
 class CompilationStatistics;
 class ContextSlotCache;
 class Counters;
@@ -250,7 +251,7 @@
 
   static int AllocateThreadId();
 
-  static int GetCurrentThreadId();
+  V8_EXPORT_PRIVATE static int GetCurrentThreadId();
 
   base::Atomic32 id_;
 
@@ -383,7 +384,6 @@
   V(int, bad_char_shift_table, kUC16AlphabetSize)                              \
   V(int, good_suffix_shift_table, (kBMMaxShift + 1))                           \
   V(int, suffix_table, (kBMMaxShift + 1))                                      \
-  V(uint32_t, private_random_seed, 2)                                          \
   ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
 
 typedef List<HeapObject*> DebugObjectCache;
@@ -394,6 +394,8 @@
   V(OOMErrorCallback, oom_behavior, nullptr)                                  \
   V(LogEventCallback, event_logger, nullptr)                                  \
   V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+  V(AllowWasmCompileCallback, allow_wasm_compile_callback, nullptr)           \
+  V(AllowWasmInstantiateCallback, allow_wasm_instantiate_callback, nullptr)   \
   V(ExternalReferenceRedirectorPointer*, external_reference_redirector,       \
     nullptr)                                                                  \
   /* State for Relocatable. */                                                \
@@ -404,15 +406,11 @@
   V(intptr_t*, api_external_references, nullptr)                              \
   V(AddressToIndexHashMap*, external_reference_map, nullptr)                  \
   V(HeapObjectToIndexHashMap*, root_index_map, nullptr)                       \
-  V(v8::DeserializeInternalFieldsCallback,                                    \
-    deserialize_internal_fields_callback, nullptr)                            \
   V(int, pending_microtask_count, 0)                                          \
-  V(int, debug_microtask_count, 0)                                            \
   V(HStatistics*, hstatistics, nullptr)                                       \
   V(CompilationStatistics*, turbo_statistics, nullptr)                        \
   V(HTracer*, htracer, nullptr)                                               \
   V(CodeTracer*, code_tracer, nullptr)                                        \
-  V(bool, fp_stubs_generated, false)                                          \
   V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                           \
   V(PromiseRejectCallback, promise_reject_callback, nullptr)                  \
   V(const v8::StartupData*, snapshot_blob, nullptr)                           \
@@ -422,6 +420,8 @@
   V(bool, is_profiling, false)                                                \
   /* true if a trace is being formatted through Error.prepareStackTrace. */   \
   V(bool, formatting_stack_trace, false)                                      \
+  /* Perform side effect checks on function call and API callbacks. */        \
+  V(bool, needs_side_effect_check, false)                                     \
   ISOLATE_INIT_SIMULATOR_LIST(V)
 
 #define THREAD_LOCAL_TOP_ACCESSOR(type, name)                        \
@@ -533,6 +533,8 @@
   // for legacy API reasons.
   void TearDown();
 
+  void ReleaseManagedObjects();
+
   static void GlobalTearDown();
 
   void ClearSerializerData();
@@ -765,7 +767,9 @@
   Object* PromoteScheduledException();
 
   // Attempts to compute the current source location, storing the
-  // result in the target out parameter.
+  // result in the target out parameter. The source location is attached to a
+  // Message object as the location which should be shown to the user. It's
+  // typically the top-most meaningful location on the stack.
   bool ComputeLocation(MessageLocation* target);
   bool ComputeLocationFromException(MessageLocation* target,
                                     Handle<Object> exception);
@@ -905,12 +909,6 @@
 
   Builtins* builtins() { return &builtins_; }
 
-  void NotifyExtensionInstalled() {
-    has_installed_extensions_ = true;
-  }
-
-  bool has_installed_extensions() { return has_installed_extensions_; }
-
   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
       regexp_macro_assembler_canonicalize() {
     return &regexp_macro_assembler_canonicalize_;
@@ -918,6 +916,11 @@
 
   RegExpStack* regexp_stack() { return regexp_stack_; }
 
+  size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
+  void IncreaseTotalRegexpCodeGenerated(int size) {
+    total_regexp_code_generated_ += size;
+  }
+
   List<int>* regexp_indices() { return &regexp_indices_; }
 
   unibrow::Mapping<unibrow::Ecma262Canonicalize>*
@@ -966,10 +969,15 @@
   bool IsDead() { return has_fatal_error_; }
   void SignalFatalError() { has_fatal_error_ = true; }
 
-  bool use_crankshaft() const;
+  bool use_crankshaft();
 
   bool initialized_from_snapshot() { return initialized_from_snapshot_; }
 
+  bool NeedsSourcePositionsForProfiling() const;
+
+  bool IsCodeCoverageEnabled();
+  void SetCodeCoverageList(Object* value);
+
   double time_millis_since_init() {
     return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
   }
@@ -992,7 +1000,6 @@
 
   bool IsFastArrayConstructorPrototypeChainIntact();
   inline bool IsArraySpeciesLookupChainIntact();
-  inline bool IsHasInstanceLookupChainIntact();
   bool IsIsConcatSpreadableLookupChainIntact();
   bool IsIsConcatSpreadableLookupChainIntact(JSReceiver* receiver);
   inline bool IsStringLengthOverflowIntact();
@@ -1001,6 +1008,9 @@
   // Avoid deopt loops if fast Array Iterators migrate to slow Array Iterators.
   inline bool IsFastArrayIterationIntact();
 
+  // Make sure we do check for neutered array buffers.
+  inline bool IsArrayBufferNeuteringIntact();
+
   // On intent to set an element in object, make sure that appropriate
   // notifications occur if the set is on the elements of the array or
   // object prototype. Also ensure that changes to prototype chain between
@@ -1016,15 +1026,16 @@
     UpdateArrayProtectorOnSetElement(object);
   }
   void InvalidateArraySpeciesProtector();
-  void InvalidateHasInstanceProtector();
   void InvalidateIsConcatSpreadableProtector();
   void InvalidateStringLengthOverflowProtector();
   void InvalidateArrayIteratorProtector();
+  void InvalidateArrayBufferNeuteringProtector();
 
   // Returns true if array is the initial array prototype in any native context.
   bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
 
-  CallInterfaceDescriptorData* call_descriptor_data(int index);
+  V8_EXPORT_PRIVATE CallInterfaceDescriptorData* call_descriptor_data(
+      int index);
 
   AccessCompilerData* access_compiler_data() { return access_compiler_data_; }
 
@@ -1070,7 +1081,7 @@
   int GenerateIdentityHash(uint32_t mask);
 
   // Given an address occupied by a live code object, return that object.
-  Object* FindCodeObject(Address a);
+  Code* FindCodeObject(Address a);
 
   int NextOptimizationId() {
     int id = next_optimization_id_++;
@@ -1080,9 +1091,6 @@
     return id;
   }
 
-  // Get (and lazily initialize) the registry for per-isolate symbols.
-  Handle<JSObject> GetSymbolRegistry();
-
   void AddCallCompletedCallback(CallCompletedCallback callback);
   void RemoveCallCompletedCallback(CallCompletedCallback callback);
   void FireCallCompletedCallback();
@@ -1108,7 +1116,9 @@
   void EnqueueMicrotask(Handle<Object> microtask);
   void RunMicrotasks();
   bool IsRunningMicrotasks() const { return is_running_microtasks_; }
-  int GetNextDebugMicrotaskId() { return debug_microtask_count_++; }
+
+  Handle<Symbol> SymbolFor(Heap::RootListIndex dictionary_index,
+                           Handle<String> name, bool private_symbol);
 
   void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
   void CountUsage(v8::Isolate::UseCounterFeature feature);
@@ -1122,6 +1132,16 @@
   int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
 #endif
 
+  Address promise_hook_or_debug_is_active_address() {
+    return reinterpret_cast<Address>(&promise_hook_or_debug_is_active_);
+  }
+
+  void DebugStateUpdated();
+
+  void SetPromiseHook(PromiseHook hook);
+  void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+                      Handle<Object> parent);
+
   // Support for dynamically disabling tail call elimination.
   Address is_tail_call_elimination_enabled_address() {
     return reinterpret_cast<Address>(&is_tail_call_elimination_enabled_);
@@ -1149,18 +1169,32 @@
     return cancelable_task_manager_;
   }
 
+  const AstStringConstants* ast_string_constants() const {
+    return ast_string_constants_;
+  }
+
   interpreter::Interpreter* interpreter() const { return interpreter_; }
 
   AccountingAllocator* allocator() { return allocator_; }
 
-  CompilerDispatcherTracer* compiler_dispatcher_tracer() const {
-    return compiler_dispatcher_tracer_;
+  CompilerDispatcher* compiler_dispatcher() const {
+    return compiler_dispatcher_;
   }
 
+  // Clear all optimized code stored in native contexts.
+  void ClearOSROptimizedCode();
+
+  // Ensure that a particular optimized code is evicted.
+  void EvictOSROptimizedCode(Code* code, const char* reason);
+
   bool IsInAnyContext(Object* object, uint32_t index);
 
   void SetRAILMode(RAILMode rail_mode);
 
+  RAILMode rail_mode() { return rail_mode_.Value(); }
+
+  double LoadStartTimeMs();
+
   void IsolateInForegroundNotification();
 
   void IsolateInBackgroundNotification();
@@ -1173,6 +1207,42 @@
   base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
 #endif
 
+  void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
+  bool allow_atomics_wait() { return allow_atomics_wait_; }
+
+  // List of native heap values allocated by the runtime as part of its
+  // implementation that must be freed at isolate deinit.
+  class ManagedObjectFinalizer final {
+   public:
+    typedef void (*Deleter)(void*);
+    void Dispose() { deleter_(value_); }
+
+   private:
+    friend class Isolate;
+
+    ManagedObjectFinalizer() {
+      DCHECK_EQ(reinterpret_cast<void*>(this),
+                reinterpret_cast<void*>(&value_));
+    }
+
+    // value_ must be the first member
+    void* value_ = nullptr;
+    Deleter deleter_ = nullptr;
+    ManagedObjectFinalizer* prev_ = nullptr;
+    ManagedObjectFinalizer* next_ = nullptr;
+  };
+
+  // Register a native value for destruction at isolate teardown.
+  ManagedObjectFinalizer* RegisterForReleaseAtTeardown(
+      void* value, ManagedObjectFinalizer::Deleter deleter);
+
+  // Unregister a previously registered value from release at
+  // isolate teardown, deleting the ManagedObjectFinalizer.
+  // This transfers the responsibility of the previously managed value's
+  // deletion to the caller. Pass by pointer, because *finalizer_ptr gets
+  // reset to nullptr.
+  void UnregisterFromReleaseAtTeardown(ManagedObjectFinalizer** finalizer_ptr);
+
  protected:
   explicit Isolate(bool enable_serializer);
   bool IsArrayOrObjectPrototype(Object* object);
@@ -1180,8 +1250,6 @@
  private:
   friend struct GlobalState;
   friend struct InitializeGlobalState;
-  Handle<JSObject> SetUpSubregistry(Handle<JSObject> registry, Handle<Map> map,
-                                    const char* name);
 
   // These fields are accessed through the API, offsets must be kept in sync
   // with v8::internal::Internals (in include/v8.h) constants. This is also
@@ -1337,7 +1405,6 @@
   ThreadManager* thread_manager_;
   RuntimeState runtime_state_;
   Builtins builtins_;
-  bool has_installed_extensions_;
   unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
   unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
   unibrow::Mapping<unibrow::Ecma262Canonicalize>
@@ -1349,6 +1416,10 @@
   AccessCompilerData* access_compiler_data_;
   base::RandomNumberGenerator* random_number_generator_;
   base::AtomicValue<RAILMode> rail_mode_;
+  bool promise_hook_or_debug_is_active_;
+  PromiseHook promise_hook_;
+  base::Mutex rail_mutex_;
+  double load_start_time_ms_;
 
   // Whether the isolate has been created for snapshotting.
   bool serializer_enabled_;
@@ -1381,9 +1452,11 @@
   std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
   FunctionEntryHook function_entry_hook_;
 
+  const AstStringConstants* ast_string_constants_;
+
   interpreter::Interpreter* interpreter_;
 
-  CompilerDispatcherTracer* compiler_dispatcher_tracer_;
+  CompilerDispatcher* compiler_dispatcher_;
 
   typedef std::pair<InterruptCallback, void*> InterruptEntry;
   std::queue<InterruptEntry> api_interrupts_queue_;
@@ -1449,8 +1522,15 @@
   base::Mutex simulator_i_cache_mutex_;
 #endif
 
+  bool allow_atomics_wait_;
+
+  ManagedObjectFinalizer managed_object_finalizers_list_;
+
+  size_t total_regexp_code_generated_;
+
   friend class ExecutionAccess;
   friend class HandleScopeImplementer;
+  friend class HeapTester;
   friend class OptimizingCompileDispatcher;
   friend class SweeperThread;
   friend class ThreadManager;
@@ -1571,14 +1651,13 @@
   Isolate* isolate_;
 };
 
-#define STACK_CHECK(isolate, result_value)               \
-  do {                                                   \
-    StackLimitCheck stack_check(isolate);                \
-    if (stack_check.HasOverflowed()) {                   \
-      isolate->Throw(*isolate->factory()->NewRangeError( \
-          MessageTemplate::kStackOverflow));             \
-      return result_value;                               \
-    }                                                    \
+#define STACK_CHECK(isolate, result_value) \
+  do {                                     \
+    StackLimitCheck stack_check(isolate);  \
+    if (stack_check.HasOverflowed()) {     \
+      isolate->StackOverflow();            \
+      return result_value;                 \
+    }                                      \
   } while (false)
 
 // Support for temporarily postponing interrupts. When the outermost
diff --git a/src/js/array.js b/src/js/array.js
index e23810f..88e8cb3 100644
--- a/src/js/array.js
+++ b/src/js/array.js
@@ -21,7 +21,6 @@
 var ObjectHasOwnProperty;
 var ObjectToString = utils.ImportNow("object_to_string");
 var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
 var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
 
 utils.Import(function(from) {
@@ -723,7 +722,7 @@
       else return x < y ? -1 : 1;
     };
   }
-  var InsertionSort = function InsertionSort(a, from, to) {
+  function InsertionSort(a, from, to) {
     for (var i = from + 1; i < to; i++) {
       var element = a[i];
       for (var j = i - 1; j >= from; j--) {
@@ -739,7 +738,7 @@
     }
   };
 
-  var GetThirdIndex = function(a, from, to) {
+  function GetThirdIndex(a, from, to) {
     var t_array = new InternalArray();
     // Use both 'from' and 'to' to determine the pivot candidates.
     var increment = 200 + ((to - from) & 15);
@@ -757,7 +756,7 @@
     return third_index;
   }
 
-  var QuickSort = function QuickSort(a, from, to) {
+  function QuickSort(a, from, to) {
     var third_index = 0;
     while (true) {
       // Insertion sort is faster for short arrays.
@@ -846,7 +845,7 @@
   // Copy elements in the range 0..length from obj's prototype chain
   // to obj itself, if obj has holes. Return one more than the maximal index
   // of a prototype property.
-  var CopyFromPrototype = function CopyFromPrototype(obj, length) {
+  function CopyFromPrototype(obj, length) {
     var max = 0;
     for (var proto = %object_get_prototype_of(obj); proto;
          proto = %object_get_prototype_of(proto)) {
@@ -876,7 +875,7 @@
   // Set a value of "undefined" on all indices in the range from..to
   // where a prototype of obj has an element. I.e., shadow all prototype
   // elements in that range.
-  var ShadowPrototypeElements = function(obj, from, to) {
+  function ShadowPrototypeElements(obj, from, to) {
     for (var proto = %object_get_prototype_of(obj); proto;
          proto = %object_get_prototype_of(proto)) {
       var indices = IS_PROXY(proto) ? to : %GetArrayKeys(proto, to);
@@ -899,7 +898,7 @@
     }
   };
 
-  var SafeRemoveArrayHoles = function SafeRemoveArrayHoles(obj) {
+  function SafeRemoveArrayHoles(obj) {
     // Copy defined elements from the end to fill in all holes and undefineds
     // in the beginning of the array.  Write undefineds and holes at the end
     // after loop is finished.
@@ -1271,7 +1270,14 @@
 }
 
 
-function InnerArrayCopyWithin(target, start, end, array, length) {
+// ES#sec-array.prototype.copywithin
+// (Array.prototype.copyWithin ( target, start [ , end ] )
+function ArrayCopyWithin(target, start, end) {
+  CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
+
+  var array = TO_OBJECT(this);
+  var length = TO_LENGTH(array.length);
+
   target = TO_INTEGER(target);
   var to;
   if (target < 0) {
@@ -1319,17 +1325,6 @@
 }
 
 
-// ES6 draft 03-17-15, section 22.1.3.3
-function ArrayCopyWithin(target, start, end) {
-  CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
-
-  var array = TO_OBJECT(this);
-  var length = TO_LENGTH(array.length);
-
-  return InnerArrayCopyWithin(target, start, end, array, length);
-}
-
-
 function InnerArrayFind(predicate, thisArg, array, length) {
   if (!IS_CALLABLE(predicate)) {
     throw %make_type_error(kCalledNonCallable, predicate);
@@ -1490,12 +1485,6 @@
   return array;
 }
 
-
-function ArraySpecies() {
-  return this;
-}
-
-
 // -------------------------------------------------------------------
 
 // Set up non-enumerable constructor property on the Array.prototype
@@ -1528,7 +1517,7 @@
 
 var specialFunctions = %SpecialArrayFunctions();
 
-var getFunction = function(name, jsBuiltin, len) {
+function getFunction(name, jsBuiltin, len) {
   var f = jsBuiltin;
   if (specialFunctions.hasOwnProperty(name)) {
     f = specialFunctions[name];
@@ -1539,7 +1528,14 @@
   return f;
 };
 
-var ArrayValues = getFunction("values", null, 0);
+// Array prototype functions that return iterators. They are exposed to the
+// public API via Template::SetIntrinsicDataProperty().
+var IteratorFunctions = {
+    "entries": getFunction("entries", null, 0),
+    "forEach": getFunction("forEach", ArrayForEach, 1),
+    "keys": getFunction("keys", null, 0),
+    "values": getFunction("values", null, 0)
+}
 
 // Set up non-enumerable functions of the Array.prototype object and
 // set their names.
@@ -1558,7 +1554,6 @@
   "splice", getFunction("splice", ArraySplice, 2),
   "sort", getFunction("sort", ArraySort),
   "filter", getFunction("filter", ArrayFilter, 1),
-  "forEach", getFunction("forEach", ArrayForEach, 1),
   "some", getFunction("some", ArraySome, 1),
   "every", getFunction("every", ArrayEvery, 1),
   "map", getFunction("map", ArrayMap, 1),
@@ -1571,14 +1566,18 @@
   "findIndex", getFunction("findIndex", ArrayFindIndex, 1),
   "fill", getFunction("fill", ArrayFill, 1),
   "includes", getFunction("includes", null, 1),
-  "keys", getFunction("keys", null, 0),
-  "entries", getFunction("entries", null, 0),
-  iteratorSymbol, ArrayValues
+  "entries", IteratorFunctions.entries,
+  "forEach", IteratorFunctions.forEach,
+  "keys", IteratorFunctions.keys,
+  iteratorSymbol, IteratorFunctions.values
 ]);
 
-%FunctionSetName(ArrayValues, "values");
+utils.ForEachFunction = GlobalArray.prototype.forEach;
 
-utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies);
+%FunctionSetName(IteratorFunctions.entries, "entries");
+%FunctionSetName(IteratorFunctions.forEach, "forEach");
+%FunctionSetName(IteratorFunctions.keys, "keys");
+%FunctionSetName(IteratorFunctions.values, "values");
 
 %FinishArrayPrototypeSetup(GlobalArray.prototype);
 
@@ -1621,8 +1620,7 @@
   to.ArrayJoin = ArrayJoin;
   to.ArrayPush = ArrayPush;
   to.ArrayToString = ArrayToString;
-  to.ArrayValues = ArrayValues;
-  to.InnerArrayCopyWithin = InnerArrayCopyWithin;
+  to.ArrayValues = IteratorFunctions.values,
   to.InnerArrayEvery = InnerArrayEvery;
   to.InnerArrayFill = InnerArrayFill;
   to.InnerArrayFilter = InnerArrayFilter;
@@ -1640,13 +1638,16 @@
 });
 
 %InstallToContext([
+  "array_entries_iterator", IteratorFunctions.entries,
+  "array_for_each_iterator", IteratorFunctions.forEach,
+  "array_keys_iterator", IteratorFunctions.keys,
   "array_pop", ArrayPop,
   "array_push", ArrayPush,
   "array_shift", ArrayShift,
   "array_splice", ArraySplice,
   "array_slice", ArraySlice,
   "array_unshift", ArrayUnshift,
-  "array_values_iterator", ArrayValues,
+  "array_values_iterator", IteratorFunctions.values,
 ]);
 
 });
diff --git a/src/js/arraybuffer.js b/src/js/arraybuffer.js
index a1ff03d..9cb93a6 100644
--- a/src/js/arraybuffer.js
+++ b/src/js/arraybuffer.js
@@ -15,7 +15,6 @@
 var MaxSimple;
 var MinSimple;
 var SpeciesConstructor;
-var speciesSymbol = utils.ImportNow("species_symbol");
 
 utils.Import(function(from) {
   MaxSimple = from.MaxSimple;
@@ -75,13 +74,6 @@
   return result;
 }
 
-
-function ArrayBufferSpecies() {
-  return this;
-}
-
-utils.InstallGetter(GlobalArrayBuffer, speciesSymbol, ArrayBufferSpecies);
-
 utils.InstallFunctions(GlobalArrayBuffer.prototype, DONT_ENUM, [
   "slice", ArrayBufferSlice
 ]);
diff --git a/src/js/async-await.js b/src/js/async-await.js
deleted file mode 100644
index a1cac0d..0000000
--- a/src/js/async-await.js
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils, extrasUtils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var AsyncFunctionNext;
-var AsyncFunctionThrow;
-var GlobalPromise;
-var IsPromise;
-var NewPromiseCapability;
-var PerformPromiseThen;
-var PromiseCreate;
-var PromiseNextMicrotaskID;
-var RejectPromise;
-var ResolvePromise;
-
-utils.Import(function(from) {
-  AsyncFunctionNext = from.AsyncFunctionNext;
-  AsyncFunctionThrow = from.AsyncFunctionThrow;
-  GlobalPromise = from.GlobalPromise;
-  IsPromise = from.IsPromise;
-  NewPromiseCapability = from.NewPromiseCapability;
-  PerformPromiseThen = from.PerformPromiseThen;
-  PromiseCreate = from.PromiseCreate;
-  RejectPromise = from.RejectPromise;
-  ResolvePromise = from.ResolvePromise;
-});
-
-var promiseAsyncStackIDSymbol =
-    utils.ImportNow("promise_async_stack_id_symbol");
-var promiseHandledBySymbol =
-    utils.ImportNow("promise_handled_by_symbol");
-var promiseForwardingHandlerSymbol =
-    utils.ImportNow("promise_forwarding_handler_symbol");
-var promiseHandledHintSymbol =
-    utils.ImportNow("promise_handled_hint_symbol");
-var promiseHasHandlerSymbol =
-    utils.ImportNow("promise_has_handler_symbol");
-
-// -------------------------------------------------------------------
-
-function PromiseCastResolved(value) {
-  if (IsPromise(value)) {
-    return value;
-  } else {
-    var promise = PromiseCreate();
-    ResolvePromise(promise, value);
-    return promise;
-  }
-}
-
-// ES#abstract-ops-async-function-await
-// AsyncFunctionAwait ( value )
-// Shared logic for the core of await. The parser desugars
-//   await awaited
-// into
-//   yield AsyncFunctionAwait{Caught,Uncaught}(.generator, awaited, .promise)
-// The 'awaited' parameter is the value; the generator stands in
-// for the asyncContext, and .promise is the larger promise under
-// construction by the enclosing async function.
-function AsyncFunctionAwait(generator, awaited, outerPromise) {
-  // Promise.resolve(awaited).then(
-  //     value => AsyncFunctionNext(value),
-  //     error => AsyncFunctionThrow(error)
-  // );
-  var promise = PromiseCastResolved(awaited);
-
-  var onFulfilled = sentValue => {
-    %_Call(AsyncFunctionNext, generator, sentValue);
-    // The resulting Promise is a throwaway, so it doesn't matter what it
-    // resolves to. What is important is that we don't end up keeping the
-    // whole chain of intermediate Promises alive by returning the value
-    // of AsyncFunctionNext, as that would create a memory leak.
-    return;
-  };
-  var onRejected = sentError => {
-    %_Call(AsyncFunctionThrow, generator, sentError);
-    // Similarly, returning the huge Promise here would cause a long
-    // resolution chain to find what the exception to throw is, and
-    // create a similar memory leak, and it does not matter what
-    // sort of rejection this intermediate Promise becomes.
-    return;
-  }
-
-  // Just forwarding the exception, so no debugEvent for throwawayCapability
-  var throwawayCapability = NewPromiseCapability(GlobalPromise, false);
-
-  // The Promise will be thrown away and not handled, but it shouldn't trigger
-  // unhandled reject events as its work is done
-  SET_PRIVATE(throwawayCapability.promise, promiseHasHandlerSymbol, true);
-
-  if (DEBUG_IS_ACTIVE) {
-    if (IsPromise(awaited)) {
-      // Mark the reject handler callback to be a forwarding edge, rather
-      // than a meaningful catch handler
-      SET_PRIVATE(onRejected, promiseForwardingHandlerSymbol, true);
-    }
-
-    // Mark the dependency to outerPromise in case the throwaway Promise is
-    // found on the Promise stack
-    SET_PRIVATE(throwawayCapability.promise, promiseHandledBySymbol,
-                outerPromise);
-  }
-
-  PerformPromiseThen(promise, onFulfilled, onRejected, throwawayCapability);
-}
-
-// Called by the parser from the desugaring of 'await' when catch
-// prediction indicates no locally surrounding catch block
-function AsyncFunctionAwaitUncaught(generator, awaited, outerPromise) {
-  AsyncFunctionAwait(generator, awaited, outerPromise);
-}
-
-// Called by the parser from the desugaring of 'await' when catch
-// prediction indicates that there is a locally surrounding catch block
-function AsyncFunctionAwaitCaught(generator, awaited, outerPromise) {
-  if (DEBUG_IS_ACTIVE && IsPromise(awaited)) {
-    SET_PRIVATE(awaited, promiseHandledHintSymbol, true);
-  }
-  AsyncFunctionAwait(generator, awaited, outerPromise);
-}
-
-// How the parser rejects promises from async/await desugaring
-function RejectPromiseNoDebugEvent(promise, reason) {
-  return RejectPromise(promise, reason, false);
-}
-
-function AsyncFunctionPromiseCreate() {
-  var promise = PromiseCreate();
-  if (DEBUG_IS_ACTIVE) {
-    // Push the Promise under construction in an async function on
-    // the catch prediction stack to handle exceptions thrown before
-    // the first await.
-    %DebugPushPromise(promise);
-    // Assign ID and create a recurring task to save stack for future
-    // resumptions from await.
-    var id = %DebugNextMicrotaskId();
-    SET_PRIVATE(promise, promiseAsyncStackIDSymbol, id);
-    %DebugAsyncTaskEvent("enqueueRecurring", id, "async function");
-  }
-  return promise;
-}
-
-function AsyncFunctionPromiseRelease(promise) {
-  if (DEBUG_IS_ACTIVE) {
-    // Cancel
-    var id = GET_PRIVATE(promise, promiseAsyncStackIDSymbol);
-
-    // Don't send invalid events when catch prediction is turned on in
-    // the middle of some async operation.
-    if (!IS_UNDEFINED(id)) {
-      %DebugAsyncTaskEvent("cancel", id, "async function");
-    }
-    // Pop the Promise under construction in an async function on
-    // from catch prediction stack.
-    %DebugPopPromise();
-  }
-}
-
-%InstallToContext([
-  "async_function_await_caught", AsyncFunctionAwaitCaught,
-  "async_function_await_uncaught", AsyncFunctionAwaitUncaught,
-  "reject_promise_no_debug_event", RejectPromiseNoDebugEvent,
-  "async_function_promise_create", AsyncFunctionPromiseCreate,
-  "async_function_promise_release", AsyncFunctionPromiseRelease,
-]);
-
-})
diff --git a/src/js/collection.js b/src/js/collection.js
index a4ae904..adb2688 100644
--- a/src/js/collection.js
+++ b/src/js/collection.js
@@ -17,7 +17,6 @@
 var MathRandom = global.Math.random;
 var MapIterator;
 var SetIterator;
-var speciesSymbol = utils.ImportNow("species_symbol");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 utils.Import(function(from) {
@@ -251,12 +250,6 @@
   }
 }
 
-
-function SetSpecies() {
-  return this;
-}
-
-
 // -------------------------------------------------------------------
 
 %SetCode(GlobalSet, SetConstructor);
@@ -268,8 +261,6 @@
 
 %FunctionSetLength(SetForEach, 1);
 
-utils.InstallGetter(GlobalSet, speciesSymbol, SetSpecies);
-
 // Set up the non-enumerable functions on the Set prototype object.
 utils.InstallGetter(GlobalSet.prototype, "size", SetGetSize);
 utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
@@ -439,11 +430,6 @@
   }
 }
 
-
-function MapSpecies() {
-  return this;
-}
-
 // -------------------------------------------------------------------
 
 %SetCode(GlobalMap, MapConstructor);
@@ -455,8 +441,6 @@
 
 %FunctionSetLength(MapForEach, 1);
 
-utils.InstallGetter(GlobalMap, speciesSymbol, MapSpecies);
-
 // Set up the non-enumerable functions on the Map prototype object.
 utils.InstallGetter(GlobalMap.prototype, "size", MapGetSize);
 utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
diff --git a/src/js/datetime-format-to-parts.js b/src/js/datetime-format-to-parts.js
deleted file mode 100644
index 3194f50..0000000
--- a/src/js/datetime-format-to-parts.js
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalIntl = global.Intl;
-var FormatDateToParts = utils.ImportNow("FormatDateToParts");
-
-utils.InstallFunctions(GlobalIntl.DateTimeFormat.prototype,  DONT_ENUM, [
-    'formatToParts', FormatDateToParts
-]);
-})
diff --git a/src/js/harmony-atomics.js b/src/js/harmony-atomics.js
index bfbf0c5..daeba3f 100644
--- a/src/js/harmony-atomics.js
+++ b/src/js/harmony-atomics.js
@@ -13,10 +13,12 @@
 
 var GlobalObject = global.Object;
 var MaxSimple;
+var MinSimple;
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 utils.Import(function(from) {
   MaxSimple = from.MaxSimple;
+  MinSimple = from.MinSimple;
 });
 
 // -------------------------------------------------------------------
@@ -101,7 +103,7 @@
 }
 
 function AtomicsIsLockFreeJS(size) {
-  return %_AtomicsIsLockFree(size);
+  return %_AtomicsIsLockFree(TO_INTEGER(size));
 }
 
 function AtomicsWaitJS(ia, index, value, timeout) {
@@ -123,7 +125,12 @@
 function AtomicsWakeJS(ia, index, count) {
   CheckSharedInteger32TypedArray(ia);
   index = ValidateIndex(index, %_TypedArrayGetLength(ia));
-  count = MaxSimple(0, TO_INTEGER(count));
+  if (IS_UNDEFINED(count)) {
+    count = kMaxUint32;
+  } else {
+    // Clamp to [0, kMaxUint32].
+    count = MinSimple(MaxSimple(0, TO_INTEGER(count)), kMaxUint32);
+  }
   return %AtomicsWake(ia, index, count);
 }
 
diff --git a/src/js/harmony-simd.js b/src/js/harmony-simd.js
deleted file mode 100644
index 0880b5b..0000000
--- a/src/js/harmony-simd.js
+++ /dev/null
@@ -1,923 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalSIMD = global.SIMD;
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-
-// -------------------------------------------------------------------
-
-macro SIMD_FLOAT_TYPES(FUNCTION)
-FUNCTION(Float32x4, float32x4, 4)
-endmacro
-
-macro SIMD_INT_TYPES(FUNCTION)
-FUNCTION(Int32x4, int32x4, 4)
-FUNCTION(Int16x8, int16x8, 8)
-FUNCTION(Int8x16, int8x16, 16)
-endmacro
-
-macro SIMD_UINT_TYPES(FUNCTION)
-FUNCTION(Uint32x4, uint32x4, 4)
-FUNCTION(Uint16x8, uint16x8, 8)
-FUNCTION(Uint8x16, uint8x16, 16)
-endmacro
-
-macro SIMD_BOOL_TYPES(FUNCTION)
-FUNCTION(Bool32x4, bool32x4, 4)
-FUNCTION(Bool16x8, bool16x8, 8)
-FUNCTION(Bool8x16, bool8x16, 16)
-endmacro
-
-macro SIMD_ALL_TYPES(FUNCTION)
-SIMD_FLOAT_TYPES(FUNCTION)
-SIMD_INT_TYPES(FUNCTION)
-SIMD_UINT_TYPES(FUNCTION)
-SIMD_BOOL_TYPES(FUNCTION)
-endmacro
-
-macro DECLARE_GLOBALS(NAME, TYPE, LANES)
-var GlobalNAME = GlobalSIMD.NAME;
-endmacro
-
-SIMD_ALL_TYPES(DECLARE_GLOBALS)
-
-macro DECLARE_COMMON_FUNCTIONS(NAME, TYPE, LANES)
-function NAMECheckJS(a) {
-  return %NAMECheck(a);
-}
-
-function NAMEToString() {
-  var value = %ValueOf(this);
-  if (typeof(value) !== 'TYPE') {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "NAME.prototype.toString", this);
-  }
-  var str = "SIMD.NAME(";
-  str += %NAMEExtractLane(value, 0);
-  for (var i = 1; i < LANES; i++) {
-    str += ", " + %NAMEExtractLane(value, i);
-  }
-  return str + ")";
-}
-
-function NAMEToLocaleString() {
-  var value = %ValueOf(this);
-  if (typeof(value) !== 'TYPE') {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "NAME.prototype.toLocaleString", this);
-  }
-  var str = "SIMD.NAME(";
-  str += %NAMEExtractLane(value, 0).toLocaleString();
-  for (var i = 1; i < LANES; i++) {
-    str += ", " + %NAMEExtractLane(value, i).toLocaleString();
-  }
-  return str + ")";
-}
-
-function NAMEValueOf() {
-  var value = %ValueOf(this);
-  if (typeof(value) !== 'TYPE') {
-    throw %make_type_error(kIncompatibleMethodReceiver,
-                        "NAME.prototype.valueOf", this);
-  }
-  return value;
-}
-
-function NAMEExtractLaneJS(instance, lane) {
-  return %NAMEExtractLane(instance, lane);
-}
-endmacro
-
-SIMD_ALL_TYPES(DECLARE_COMMON_FUNCTIONS)
-
-macro DECLARE_SHIFT_FUNCTIONS(NAME, TYPE, LANES)
-function NAMEShiftLeftByScalarJS(instance, shift) {
-  return %NAMEShiftLeftByScalar(instance, shift);
-}
-
-function NAMEShiftRightByScalarJS(instance, shift) {
-  return %NAMEShiftRightByScalar(instance, shift);
-}
-endmacro
-
-SIMD_INT_TYPES(DECLARE_SHIFT_FUNCTIONS)
-SIMD_UINT_TYPES(DECLARE_SHIFT_FUNCTIONS)
-
-macro SIMD_SMALL_INT_TYPES(FUNCTION)
-FUNCTION(Int16x8)
-FUNCTION(Int8x16)
-FUNCTION(Uint8x16)
-FUNCTION(Uint16x8)
-endmacro
-
-macro DECLARE_SMALL_INT_FUNCTIONS(NAME)
-function NAMEAddSaturateJS(a, b) {
-  return %NAMEAddSaturate(a, b);
-}
-
-function NAMESubSaturateJS(a, b) {
-  return %NAMESubSaturate(a, b);
-}
-endmacro
-
-SIMD_SMALL_INT_TYPES(DECLARE_SMALL_INT_FUNCTIONS)
-
-macro DECLARE_SIGNED_FUNCTIONS(NAME, TYPE, LANES)
-function NAMENegJS(a) {
-  return %NAMENeg(a);
-}
-endmacro
-
-SIMD_FLOAT_TYPES(DECLARE_SIGNED_FUNCTIONS)
-SIMD_INT_TYPES(DECLARE_SIGNED_FUNCTIONS)
-
-macro DECLARE_BOOL_FUNCTIONS(NAME, TYPE, LANES)
-function NAMEReplaceLaneJS(instance, lane, value) {
-  return %NAMEReplaceLane(instance, lane, value);
-}
-
-function NAMEAnyTrueJS(s) {
-  return %NAMEAnyTrue(s);
-}
-
-function NAMEAllTrueJS(s) {
-  return %NAMEAllTrue(s);
-}
-endmacro
-
-SIMD_BOOL_TYPES(DECLARE_BOOL_FUNCTIONS)
-
-macro SIMD_NUMERIC_TYPES(FUNCTION)
-SIMD_FLOAT_TYPES(FUNCTION)
-SIMD_INT_TYPES(FUNCTION)
-SIMD_UINT_TYPES(FUNCTION)
-endmacro
-
-macro DECLARE_NUMERIC_FUNCTIONS(NAME, TYPE, LANES)
-function NAMEReplaceLaneJS(instance, lane, value) {
-  return %NAMEReplaceLane(instance, lane, TO_NUMBER(value));
-}
-
-function NAMESelectJS(selector, a, b) {
-  return %NAMESelect(selector, a, b);
-}
-
-function NAMEAddJS(a, b) {
-  return %NAMEAdd(a, b);
-}
-
-function NAMESubJS(a, b) {
-  return %NAMESub(a, b);
-}
-
-function NAMEMulJS(a, b) {
-  return %NAMEMul(a, b);
-}
-
-function NAMEMinJS(a, b) {
-  return %NAMEMin(a, b);
-}
-
-function NAMEMaxJS(a, b) {
-  return %NAMEMax(a, b);
-}
-
-function NAMEEqualJS(a, b) {
-  return %NAMEEqual(a, b);
-}
-
-function NAMENotEqualJS(a, b) {
-  return %NAMENotEqual(a, b);
-}
-
-function NAMELessThanJS(a, b) {
-  return %NAMELessThan(a, b);
-}
-
-function NAMELessThanOrEqualJS(a, b) {
-  return %NAMELessThanOrEqual(a, b);
-}
-
-function NAMEGreaterThanJS(a, b) {
-  return %NAMEGreaterThan(a, b);
-}
-
-function NAMEGreaterThanOrEqualJS(a, b) {
-  return %NAMEGreaterThanOrEqual(a, b);
-}
-
-function NAMELoadJS(tarray, index) {
-  return %NAMELoad(tarray, index);
-}
-
-function NAMEStoreJS(tarray, index, a) {
-  return %NAMEStore(tarray, index, a);
-}
-endmacro
-
-SIMD_NUMERIC_TYPES(DECLARE_NUMERIC_FUNCTIONS)
-
-macro SIMD_LOGICAL_TYPES(FUNCTION)
-SIMD_INT_TYPES(FUNCTION)
-SIMD_UINT_TYPES(FUNCTION)
-SIMD_BOOL_TYPES(FUNCTION)
-endmacro
-
-macro DECLARE_LOGICAL_FUNCTIONS(NAME, TYPE, LANES)
-function NAMEAndJS(a, b) {
-  return %NAMEAnd(a, b);
-}
-
-function NAMEOrJS(a, b) {
-  return %NAMEOr(a, b);
-}
-
-function NAMEXorJS(a, b) {
-  return %NAMEXor(a, b);
-}
-
-function NAMENotJS(a) {
-  return %NAMENot(a);
-}
-endmacro
-
-SIMD_LOGICAL_TYPES(DECLARE_LOGICAL_FUNCTIONS)
-
-macro SIMD_FROM_TYPES(FUNCTION)
-FUNCTION(Float32x4, Int32x4)
-FUNCTION(Float32x4, Uint32x4)
-FUNCTION(Int32x4, Float32x4)
-FUNCTION(Int32x4, Uint32x4)
-FUNCTION(Uint32x4, Float32x4)
-FUNCTION(Uint32x4, Int32x4)
-FUNCTION(Int16x8, Uint16x8)
-FUNCTION(Uint16x8, Int16x8)
-FUNCTION(Int8x16, Uint8x16)
-FUNCTION(Uint8x16, Int8x16)
-endmacro
-
-macro DECLARE_FROM_FUNCTIONS(TO, FROM)
-function TOFromFROMJS(a) {
-  return %TOFromFROM(a);
-}
-endmacro
-
-SIMD_FROM_TYPES(DECLARE_FROM_FUNCTIONS)
-
-macro SIMD_FROM_BITS_TYPES(FUNCTION)
-FUNCTION(Float32x4, Int32x4)
-FUNCTION(Float32x4, Uint32x4)
-FUNCTION(Float32x4, Int16x8)
-FUNCTION(Float32x4, Uint16x8)
-FUNCTION(Float32x4, Int8x16)
-FUNCTION(Float32x4, Uint8x16)
-FUNCTION(Int32x4, Float32x4)
-FUNCTION(Int32x4, Uint32x4)
-FUNCTION(Int32x4, Int16x8)
-FUNCTION(Int32x4, Uint16x8)
-FUNCTION(Int32x4, Int8x16)
-FUNCTION(Int32x4, Uint8x16)
-FUNCTION(Uint32x4, Float32x4)
-FUNCTION(Uint32x4, Int32x4)
-FUNCTION(Uint32x4, Int16x8)
-FUNCTION(Uint32x4, Uint16x8)
-FUNCTION(Uint32x4, Int8x16)
-FUNCTION(Uint32x4, Uint8x16)
-FUNCTION(Int16x8, Float32x4)
-FUNCTION(Int16x8, Int32x4)
-FUNCTION(Int16x8, Uint32x4)
-FUNCTION(Int16x8, Uint16x8)
-FUNCTION(Int16x8, Int8x16)
-FUNCTION(Int16x8, Uint8x16)
-FUNCTION(Uint16x8, Float32x4)
-FUNCTION(Uint16x8, Int32x4)
-FUNCTION(Uint16x8, Uint32x4)
-FUNCTION(Uint16x8, Int16x8)
-FUNCTION(Uint16x8, Int8x16)
-FUNCTION(Uint16x8, Uint8x16)
-FUNCTION(Int8x16, Float32x4)
-FUNCTION(Int8x16, Int32x4)
-FUNCTION(Int8x16, Uint32x4)
-FUNCTION(Int8x16, Int16x8)
-FUNCTION(Int8x16, Uint16x8)
-FUNCTION(Int8x16, Uint8x16)
-FUNCTION(Uint8x16, Float32x4)
-FUNCTION(Uint8x16, Int32x4)
-FUNCTION(Uint8x16, Uint32x4)
-FUNCTION(Uint8x16, Int16x8)
-FUNCTION(Uint8x16, Uint16x8)
-FUNCTION(Uint8x16, Int8x16)
-endmacro
-
-macro DECLARE_FROM_BITS_FUNCTIONS(TO, FROM)
-function TOFromFROMBitsJS(a) {
-  return %TOFromFROMBits(a);
-}
-endmacro
-
-SIMD_FROM_BITS_TYPES(DECLARE_FROM_BITS_FUNCTIONS)
-
-
-macro SIMD_LOADN_STOREN_TYPES(FUNCTION)
-FUNCTION(Float32x4, 1)
-FUNCTION(Float32x4, 2)
-FUNCTION(Float32x4, 3)
-FUNCTION(Int32x4, 1)
-FUNCTION(Int32x4, 2)
-FUNCTION(Int32x4, 3)
-FUNCTION(Uint32x4, 1)
-FUNCTION(Uint32x4, 2)
-FUNCTION(Uint32x4, 3)
-endmacro
-
-macro DECLARE_LOADN_STOREN_FUNCTIONS(NAME, COUNT)
-function NAMELoadCOUNTJS(tarray, index) {
-  return %NAMELoadCOUNT(tarray, index);
-}
-
-function NAMEStoreCOUNTJS(tarray, index, a) {
-  return %NAMEStoreCOUNT(tarray, index, a);
-}
-endmacro
-
-SIMD_LOADN_STOREN_TYPES(DECLARE_LOADN_STOREN_FUNCTIONS)
-
-//-------------------------------------------------------------------
-
-macro SIMD_X4_TYPES(FUNCTION)
-FUNCTION(Float32x4)
-FUNCTION(Int32x4)
-FUNCTION(Uint32x4)
-FUNCTION(Bool32x4)
-endmacro
-
-macro DECLARE_X4_FUNCTIONS(NAME)
-function NAMESplat(s) {
-  return %CreateNAME(s, s, s, s);
-}
-
-function NAMESwizzleJS(a, c0, c1, c2, c3) {
-  return %NAMESwizzle(a, c0, c1, c2, c3);
-}
-
-function NAMEShuffleJS(a, b, c0, c1, c2, c3) {
-  return %NAMEShuffle(a, b, c0, c1, c2, c3);
-}
-endmacro
-
-SIMD_X4_TYPES(DECLARE_X4_FUNCTIONS)
-
-macro SIMD_X8_TYPES(FUNCTION)
-FUNCTION(Int16x8)
-FUNCTION(Uint16x8)
-FUNCTION(Bool16x8)
-endmacro
-
-macro DECLARE_X8_FUNCTIONS(NAME)
-function NAMESplat(s) {
-  return %CreateNAME(s, s, s, s, s, s, s, s);
-}
-
-function NAMESwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7) {
-  return %NAMESwizzle(a, c0, c1, c2, c3, c4, c5, c6, c7);
-}
-
-function NAMEShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7) {
-  return %NAMEShuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7);
-}
-endmacro
-
-SIMD_X8_TYPES(DECLARE_X8_FUNCTIONS)
-
-macro SIMD_X16_TYPES(FUNCTION)
-FUNCTION(Int8x16)
-FUNCTION(Uint8x16)
-FUNCTION(Bool8x16)
-endmacro
-
-macro DECLARE_X16_FUNCTIONS(NAME)
-function NAMESplat(s) {
-  return %CreateNAME(s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s);
-}
-
-function NAMESwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
-                          c12, c13, c14, c15) {
-  return %NAMESwizzle(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
-                         c12, c13, c14, c15);
-}
-
-function NAMEShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
-                             c11, c12, c13, c14, c15) {
-  return %NAMEShuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
-                            c11, c12, c13, c14, c15);
-}
-endmacro
-
-SIMD_X16_TYPES(DECLARE_X16_FUNCTIONS)
-
-//-------------------------------------------------------------------
-
-function Float32x4Constructor(c0, c1, c2, c3) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Float32x4");
-  }
-  return %CreateFloat32x4(TO_NUMBER(c0), TO_NUMBER(c1),
-                          TO_NUMBER(c2), TO_NUMBER(c3));
-}
-
-
-function Int32x4Constructor(c0, c1, c2, c3) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Int32x4");
-  }
-  return %CreateInt32x4(TO_NUMBER(c0), TO_NUMBER(c1),
-                        TO_NUMBER(c2), TO_NUMBER(c3));
-}
-
-
-function Uint32x4Constructor(c0, c1, c2, c3) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Uint32x4");
-  }
-  return %CreateUint32x4(TO_NUMBER(c0), TO_NUMBER(c1),
-                         TO_NUMBER(c2), TO_NUMBER(c3));
-}
-
-
-function Bool32x4Constructor(c0, c1, c2, c3) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Bool32x4");
-  }
-  return %CreateBool32x4(c0, c1, c2, c3);
-}
-
-
-function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Int16x8");
-  }
-  return %CreateInt16x8(TO_NUMBER(c0), TO_NUMBER(c1),
-                        TO_NUMBER(c2), TO_NUMBER(c3),
-                        TO_NUMBER(c4), TO_NUMBER(c5),
-                        TO_NUMBER(c6), TO_NUMBER(c7));
-}
-
-
-function Uint16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Uint16x8");
-  }
-  return %CreateUint16x8(TO_NUMBER(c0), TO_NUMBER(c1),
-                         TO_NUMBER(c2), TO_NUMBER(c3),
-                         TO_NUMBER(c4), TO_NUMBER(c5),
-                         TO_NUMBER(c6), TO_NUMBER(c7));
-}
-
-
-function Bool16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Bool16x8");
-  }
-  return %CreateBool16x8(c0, c1, c2, c3, c4, c5, c6, c7);
-}
-
-
-function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
-                            c12, c13, c14, c15) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Int8x16");
-  }
-  return %CreateInt8x16(TO_NUMBER(c0), TO_NUMBER(c1),
-                        TO_NUMBER(c2), TO_NUMBER(c3),
-                        TO_NUMBER(c4), TO_NUMBER(c5),
-                        TO_NUMBER(c6), TO_NUMBER(c7),
-                        TO_NUMBER(c8), TO_NUMBER(c9),
-                        TO_NUMBER(c10), TO_NUMBER(c11),
-                        TO_NUMBER(c12), TO_NUMBER(c13),
-                        TO_NUMBER(c14), TO_NUMBER(c15));
-}
-
-
-function Uint8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
-                             c12, c13, c14, c15) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Uint8x16");
-  }
-  return %CreateUint8x16(TO_NUMBER(c0), TO_NUMBER(c1),
-                         TO_NUMBER(c2), TO_NUMBER(c3),
-                         TO_NUMBER(c4), TO_NUMBER(c5),
-                         TO_NUMBER(c6), TO_NUMBER(c7),
-                         TO_NUMBER(c8), TO_NUMBER(c9),
-                         TO_NUMBER(c10), TO_NUMBER(c11),
-                         TO_NUMBER(c12), TO_NUMBER(c13),
-                         TO_NUMBER(c14), TO_NUMBER(c15));
-}
-
-
-function Bool8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
-                             c12, c13, c14, c15) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kNotConstructor, "Bool8x16");
-  }
-  return %CreateBool8x16(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,
-                         c13, c14, c15);
-}
-
-
-function Float32x4AbsJS(a) {
-  return %Float32x4Abs(a);
-}
-
-
-function Float32x4SqrtJS(a) {
-  return %Float32x4Sqrt(a);
-}
-
-
-function Float32x4RecipApproxJS(a) {
-  return %Float32x4RecipApprox(a);
-}
-
-
-function Float32x4RecipSqrtApproxJS(a) {
-  return %Float32x4RecipSqrtApprox(a);
-}
-
-
-function Float32x4DivJS(a, b) {
-  return %Float32x4Div(a, b);
-}
-
-
-function Float32x4MinNumJS(a, b) {
-  return %Float32x4MinNum(a, b);
-}
-
-
-function Float32x4MaxNumJS(a, b) {
-  return %Float32x4MaxNum(a, b);
-}
-
-
-%AddNamedProperty(GlobalSIMD, toStringTagSymbol, 'SIMD', READ_ONLY | DONT_ENUM);
-
-macro SETUP_SIMD_TYPE(NAME, TYPE, LANES)
-%SetCode(GlobalNAME, NAMEConstructor);
-%FunctionSetPrototype(GlobalNAME, {});
-%AddNamedProperty(GlobalNAME.prototype, 'constructor', GlobalNAME,
-    DONT_ENUM);
-%AddNamedProperty(GlobalNAME.prototype, toStringTagSymbol, 'NAME',
-    DONT_ENUM | READ_ONLY);
-utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
-  'toLocaleString', NAMEToLocaleString,
-  'toString', NAMEToString,
-  'valueOf', NAMEValueOf,
-]);
-endmacro
-
-SIMD_ALL_TYPES(SETUP_SIMD_TYPE)
-
-//-------------------------------------------------------------------
-
-utils.InstallFunctions(GlobalFloat32x4, DONT_ENUM, [
-  'splat', Float32x4Splat,
-  'check', Float32x4CheckJS,
-  'extractLane', Float32x4ExtractLaneJS,
-  'replaceLane', Float32x4ReplaceLaneJS,
-  'neg', Float32x4NegJS,
-  'abs', Float32x4AbsJS,
-  'sqrt', Float32x4SqrtJS,
-  'reciprocalApproximation', Float32x4RecipApproxJS,
-  'reciprocalSqrtApproximation', Float32x4RecipSqrtApproxJS,
-  'add', Float32x4AddJS,
-  'sub', Float32x4SubJS,
-  'mul', Float32x4MulJS,
-  'div', Float32x4DivJS,
-  'min', Float32x4MinJS,
-  'max', Float32x4MaxJS,
-  'minNum', Float32x4MinNumJS,
-  'maxNum', Float32x4MaxNumJS,
-  'lessThan', Float32x4LessThanJS,
-  'lessThanOrEqual', Float32x4LessThanOrEqualJS,
-  'greaterThan', Float32x4GreaterThanJS,
-  'greaterThanOrEqual', Float32x4GreaterThanOrEqualJS,
-  'equal', Float32x4EqualJS,
-  'notEqual', Float32x4NotEqualJS,
-  'select', Float32x4SelectJS,
-  'swizzle', Float32x4SwizzleJS,
-  'shuffle', Float32x4ShuffleJS,
-  'fromInt32x4', Float32x4FromInt32x4JS,
-  'fromUint32x4', Float32x4FromUint32x4JS,
-  'fromInt32x4Bits', Float32x4FromInt32x4BitsJS,
-  'fromUint32x4Bits', Float32x4FromUint32x4BitsJS,
-  'fromInt16x8Bits', Float32x4FromInt16x8BitsJS,
-  'fromUint16x8Bits', Float32x4FromUint16x8BitsJS,
-  'fromInt8x16Bits', Float32x4FromInt8x16BitsJS,
-  'fromUint8x16Bits', Float32x4FromUint8x16BitsJS,
-  'load', Float32x4LoadJS,
-  'load1', Float32x4Load1JS,
-  'load2', Float32x4Load2JS,
-  'load3', Float32x4Load3JS,
-  'store', Float32x4StoreJS,
-  'store1', Float32x4Store1JS,
-  'store2', Float32x4Store2JS,
-  'store3', Float32x4Store3JS,
-]);
-
-utils.InstallFunctions(GlobalInt32x4, DONT_ENUM, [
-  'splat', Int32x4Splat,
-  'check', Int32x4CheckJS,
-  'extractLane', Int32x4ExtractLaneJS,
-  'replaceLane', Int32x4ReplaceLaneJS,
-  'neg', Int32x4NegJS,
-  'add', Int32x4AddJS,
-  'sub', Int32x4SubJS,
-  'mul', Int32x4MulJS,
-  'min', Int32x4MinJS,
-  'max', Int32x4MaxJS,
-  'and', Int32x4AndJS,
-  'or', Int32x4OrJS,
-  'xor', Int32x4XorJS,
-  'not', Int32x4NotJS,
-  'shiftLeftByScalar', Int32x4ShiftLeftByScalarJS,
-  'shiftRightByScalar', Int32x4ShiftRightByScalarJS,
-  'lessThan', Int32x4LessThanJS,
-  'lessThanOrEqual', Int32x4LessThanOrEqualJS,
-  'greaterThan', Int32x4GreaterThanJS,
-  'greaterThanOrEqual', Int32x4GreaterThanOrEqualJS,
-  'equal', Int32x4EqualJS,
-  'notEqual', Int32x4NotEqualJS,
-  'select', Int32x4SelectJS,
-  'swizzle', Int32x4SwizzleJS,
-  'shuffle', Int32x4ShuffleJS,
-  'fromFloat32x4', Int32x4FromFloat32x4JS,
-  'fromUint32x4', Int32x4FromUint32x4JS,
-  'fromFloat32x4Bits', Int32x4FromFloat32x4BitsJS,
-  'fromUint32x4Bits', Int32x4FromUint32x4BitsJS,
-  'fromInt16x8Bits', Int32x4FromInt16x8BitsJS,
-  'fromUint16x8Bits', Int32x4FromUint16x8BitsJS,
-  'fromInt8x16Bits', Int32x4FromInt8x16BitsJS,
-  'fromUint8x16Bits', Int32x4FromUint8x16BitsJS,
-  'load', Int32x4LoadJS,
-  'load1', Int32x4Load1JS,
-  'load2', Int32x4Load2JS,
-  'load3', Int32x4Load3JS,
-  'store', Int32x4StoreJS,
-  'store1', Int32x4Store1JS,
-  'store2', Int32x4Store2JS,
-  'store3', Int32x4Store3JS,
-]);
-
-utils.InstallFunctions(GlobalUint32x4, DONT_ENUM, [
-  'splat', Uint32x4Splat,
-  'check', Uint32x4CheckJS,
-  'extractLane', Uint32x4ExtractLaneJS,
-  'replaceLane', Uint32x4ReplaceLaneJS,
-  'add', Uint32x4AddJS,
-  'sub', Uint32x4SubJS,
-  'mul', Uint32x4MulJS,
-  'min', Uint32x4MinJS,
-  'max', Uint32x4MaxJS,
-  'and', Uint32x4AndJS,
-  'or', Uint32x4OrJS,
-  'xor', Uint32x4XorJS,
-  'not', Uint32x4NotJS,
-  'shiftLeftByScalar', Uint32x4ShiftLeftByScalarJS,
-  'shiftRightByScalar', Uint32x4ShiftRightByScalarJS,
-  'lessThan', Uint32x4LessThanJS,
-  'lessThanOrEqual', Uint32x4LessThanOrEqualJS,
-  'greaterThan', Uint32x4GreaterThanJS,
-  'greaterThanOrEqual', Uint32x4GreaterThanOrEqualJS,
-  'equal', Uint32x4EqualJS,
-  'notEqual', Uint32x4NotEqualJS,
-  'select', Uint32x4SelectJS,
-  'swizzle', Uint32x4SwizzleJS,
-  'shuffle', Uint32x4ShuffleJS,
-  'fromFloat32x4', Uint32x4FromFloat32x4JS,
-  'fromInt32x4', Uint32x4FromInt32x4JS,
-  'fromFloat32x4Bits', Uint32x4FromFloat32x4BitsJS,
-  'fromInt32x4Bits', Uint32x4FromInt32x4BitsJS,
-  'fromInt16x8Bits', Uint32x4FromInt16x8BitsJS,
-  'fromUint16x8Bits', Uint32x4FromUint16x8BitsJS,
-  'fromInt8x16Bits', Uint32x4FromInt8x16BitsJS,
-  'fromUint8x16Bits', Uint32x4FromUint8x16BitsJS,
-  'load', Uint32x4LoadJS,
-  'load1', Uint32x4Load1JS,
-  'load2', Uint32x4Load2JS,
-  'load3', Uint32x4Load3JS,
-  'store', Uint32x4StoreJS,
-  'store1', Uint32x4Store1JS,
-  'store2', Uint32x4Store2JS,
-  'store3', Uint32x4Store3JS,
-]);
-
-utils.InstallFunctions(GlobalBool32x4, DONT_ENUM, [
-  'splat', Bool32x4Splat,
-  'check', Bool32x4CheckJS,
-  'extractLane', Bool32x4ExtractLaneJS,
-  'replaceLane', Bool32x4ReplaceLaneJS,
-  'and', Bool32x4AndJS,
-  'or', Bool32x4OrJS,
-  'xor', Bool32x4XorJS,
-  'not', Bool32x4NotJS,
-  'anyTrue', Bool32x4AnyTrueJS,
-  'allTrue', Bool32x4AllTrueJS,
-  'swizzle', Bool32x4SwizzleJS,
-  'shuffle', Bool32x4ShuffleJS,
-]);
-
-utils.InstallFunctions(GlobalInt16x8, DONT_ENUM, [
-  'splat', Int16x8Splat,
-  'check', Int16x8CheckJS,
-  'extractLane', Int16x8ExtractLaneJS,
-  'replaceLane', Int16x8ReplaceLaneJS,
-  'neg', Int16x8NegJS,
-  'add', Int16x8AddJS,
-  'sub', Int16x8SubJS,
-  'addSaturate', Int16x8AddSaturateJS,
-  'subSaturate', Int16x8SubSaturateJS,
-  'mul', Int16x8MulJS,
-  'min', Int16x8MinJS,
-  'max', Int16x8MaxJS,
-  'and', Int16x8AndJS,
-  'or', Int16x8OrJS,
-  'xor', Int16x8XorJS,
-  'not', Int16x8NotJS,
-  'shiftLeftByScalar', Int16x8ShiftLeftByScalarJS,
-  'shiftRightByScalar', Int16x8ShiftRightByScalarJS,
-  'lessThan', Int16x8LessThanJS,
-  'lessThanOrEqual', Int16x8LessThanOrEqualJS,
-  'greaterThan', Int16x8GreaterThanJS,
-  'greaterThanOrEqual', Int16x8GreaterThanOrEqualJS,
-  'equal', Int16x8EqualJS,
-  'notEqual', Int16x8NotEqualJS,
-  'select', Int16x8SelectJS,
-  'swizzle', Int16x8SwizzleJS,
-  'shuffle', Int16x8ShuffleJS,
-  'fromUint16x8', Int16x8FromUint16x8JS,
-  'fromFloat32x4Bits', Int16x8FromFloat32x4BitsJS,
-  'fromInt32x4Bits', Int16x8FromInt32x4BitsJS,
-  'fromUint32x4Bits', Int16x8FromUint32x4BitsJS,
-  'fromUint16x8Bits', Int16x8FromUint16x8BitsJS,
-  'fromInt8x16Bits', Int16x8FromInt8x16BitsJS,
-  'fromUint8x16Bits', Int16x8FromUint8x16BitsJS,
-  'load', Int16x8LoadJS,
-  'store', Int16x8StoreJS,
-]);
-
-utils.InstallFunctions(GlobalUint16x8, DONT_ENUM, [
-  'splat', Uint16x8Splat,
-  'check', Uint16x8CheckJS,
-  'extractLane', Uint16x8ExtractLaneJS,
-  'replaceLane', Uint16x8ReplaceLaneJS,
-  'add', Uint16x8AddJS,
-  'sub', Uint16x8SubJS,
-  'addSaturate', Uint16x8AddSaturateJS,
-  'subSaturate', Uint16x8SubSaturateJS,
-  'mul', Uint16x8MulJS,
-  'min', Uint16x8MinJS,
-  'max', Uint16x8MaxJS,
-  'and', Uint16x8AndJS,
-  'or', Uint16x8OrJS,
-  'xor', Uint16x8XorJS,
-  'not', Uint16x8NotJS,
-  'shiftLeftByScalar', Uint16x8ShiftLeftByScalarJS,
-  'shiftRightByScalar', Uint16x8ShiftRightByScalarJS,
-  'lessThan', Uint16x8LessThanJS,
-  'lessThanOrEqual', Uint16x8LessThanOrEqualJS,
-  'greaterThan', Uint16x8GreaterThanJS,
-  'greaterThanOrEqual', Uint16x8GreaterThanOrEqualJS,
-  'equal', Uint16x8EqualJS,
-  'notEqual', Uint16x8NotEqualJS,
-  'select', Uint16x8SelectJS,
-  'swizzle', Uint16x8SwizzleJS,
-  'shuffle', Uint16x8ShuffleJS,
-  'fromInt16x8', Uint16x8FromInt16x8JS,
-  'fromFloat32x4Bits', Uint16x8FromFloat32x4BitsJS,
-  'fromInt32x4Bits', Uint16x8FromInt32x4BitsJS,
-  'fromUint32x4Bits', Uint16x8FromUint32x4BitsJS,
-  'fromInt16x8Bits', Uint16x8FromInt16x8BitsJS,
-  'fromInt8x16Bits', Uint16x8FromInt8x16BitsJS,
-  'fromUint8x16Bits', Uint16x8FromUint8x16BitsJS,
-  'load', Uint16x8LoadJS,
-  'store', Uint16x8StoreJS,
-]);
-
-utils.InstallFunctions(GlobalBool16x8, DONT_ENUM, [
-  'splat', Bool16x8Splat,
-  'check', Bool16x8CheckJS,
-  'extractLane', Bool16x8ExtractLaneJS,
-  'replaceLane', Bool16x8ReplaceLaneJS,
-  'and', Bool16x8AndJS,
-  'or', Bool16x8OrJS,
-  'xor', Bool16x8XorJS,
-  'not', Bool16x8NotJS,
-  'anyTrue', Bool16x8AnyTrueJS,
-  'allTrue', Bool16x8AllTrueJS,
-  'swizzle', Bool16x8SwizzleJS,
-  'shuffle', Bool16x8ShuffleJS,
-]);
-
-utils.InstallFunctions(GlobalInt8x16, DONT_ENUM, [
-  'splat', Int8x16Splat,
-  'check', Int8x16CheckJS,
-  'extractLane', Int8x16ExtractLaneJS,
-  'replaceLane', Int8x16ReplaceLaneJS,
-  'neg', Int8x16NegJS,
-  'add', Int8x16AddJS,
-  'sub', Int8x16SubJS,
-  'addSaturate', Int8x16AddSaturateJS,
-  'subSaturate', Int8x16SubSaturateJS,
-  'mul', Int8x16MulJS,
-  'min', Int8x16MinJS,
-  'max', Int8x16MaxJS,
-  'and', Int8x16AndJS,
-  'or', Int8x16OrJS,
-  'xor', Int8x16XorJS,
-  'not', Int8x16NotJS,
-  'shiftLeftByScalar', Int8x16ShiftLeftByScalarJS,
-  'shiftRightByScalar', Int8x16ShiftRightByScalarJS,
-  'lessThan', Int8x16LessThanJS,
-  'lessThanOrEqual', Int8x16LessThanOrEqualJS,
-  'greaterThan', Int8x16GreaterThanJS,
-  'greaterThanOrEqual', Int8x16GreaterThanOrEqualJS,
-  'equal', Int8x16EqualJS,
-  'notEqual', Int8x16NotEqualJS,
-  'select', Int8x16SelectJS,
-  'swizzle', Int8x16SwizzleJS,
-  'shuffle', Int8x16ShuffleJS,
-  'fromUint8x16', Int8x16FromUint8x16JS,
-  'fromFloat32x4Bits', Int8x16FromFloat32x4BitsJS,
-  'fromInt32x4Bits', Int8x16FromInt32x4BitsJS,
-  'fromUint32x4Bits', Int8x16FromUint32x4BitsJS,
-  'fromInt16x8Bits', Int8x16FromInt16x8BitsJS,
-  'fromUint16x8Bits', Int8x16FromUint16x8BitsJS,
-  'fromUint8x16Bits', Int8x16FromUint8x16BitsJS,
-  'load', Int8x16LoadJS,
-  'store', Int8x16StoreJS,
-]);
-
-utils.InstallFunctions(GlobalUint8x16, DONT_ENUM, [
-  'splat', Uint8x16Splat,
-  'check', Uint8x16CheckJS,
-  'extractLane', Uint8x16ExtractLaneJS,
-  'replaceLane', Uint8x16ReplaceLaneJS,
-  'add', Uint8x16AddJS,
-  'sub', Uint8x16SubJS,
-  'addSaturate', Uint8x16AddSaturateJS,
-  'subSaturate', Uint8x16SubSaturateJS,
-  'mul', Uint8x16MulJS,
-  'min', Uint8x16MinJS,
-  'max', Uint8x16MaxJS,
-  'and', Uint8x16AndJS,
-  'or', Uint8x16OrJS,
-  'xor', Uint8x16XorJS,
-  'not', Uint8x16NotJS,
-  'shiftLeftByScalar', Uint8x16ShiftLeftByScalarJS,
-  'shiftRightByScalar', Uint8x16ShiftRightByScalarJS,
-  'lessThan', Uint8x16LessThanJS,
-  'lessThanOrEqual', Uint8x16LessThanOrEqualJS,
-  'greaterThan', Uint8x16GreaterThanJS,
-  'greaterThanOrEqual', Uint8x16GreaterThanOrEqualJS,
-  'equal', Uint8x16EqualJS,
-  'notEqual', Uint8x16NotEqualJS,
-  'select', Uint8x16SelectJS,
-  'swizzle', Uint8x16SwizzleJS,
-  'shuffle', Uint8x16ShuffleJS,
-  'fromInt8x16', Uint8x16FromInt8x16JS,
-  'fromFloat32x4Bits', Uint8x16FromFloat32x4BitsJS,
-  'fromInt32x4Bits', Uint8x16FromInt32x4BitsJS,
-  'fromUint32x4Bits', Uint8x16FromUint32x4BitsJS,
-  'fromInt16x8Bits', Uint8x16FromInt16x8BitsJS,
-  'fromUint16x8Bits', Uint8x16FromUint16x8BitsJS,
-  'fromInt8x16Bits', Uint8x16FromInt8x16BitsJS,
-  'load', Uint8x16LoadJS,
-  'store', Uint8x16StoreJS,
-]);
-
-utils.InstallFunctions(GlobalBool8x16, DONT_ENUM, [
-  'splat', Bool8x16Splat,
-  'check', Bool8x16CheckJS,
-  'extractLane', Bool8x16ExtractLaneJS,
-  'replaceLane', Bool8x16ReplaceLaneJS,
-  'and', Bool8x16AndJS,
-  'or', Bool8x16OrJS,
-  'xor', Bool8x16XorJS,
-  'not', Bool8x16NotJS,
-  'anyTrue', Bool8x16AnyTrueJS,
-  'allTrue', Bool8x16AllTrueJS,
-  'swizzle', Bool8x16SwizzleJS,
-  'shuffle', Bool8x16ShuffleJS,
-]);
-
-})
diff --git a/src/js/i18n.js b/src/js/i18n.js
index b051b09..e6b8ba5 100644
--- a/src/js/i18n.js
+++ b/src/js/i18n.js
@@ -20,9 +20,15 @@
 var ArrayJoin;
 var ArrayPush;
 var GlobalDate = global.Date;
+var GlobalIntl = global.Intl;
+var GlobalIntlDateTimeFormat = GlobalIntl.DateTimeFormat;
+var GlobalIntlNumberFormat = GlobalIntl.NumberFormat;
+var GlobalIntlCollator = GlobalIntl.Collator;
+var GlobalIntlv8BreakIterator = GlobalIntl.v8BreakIterator;
 var GlobalNumber = global.Number;
 var GlobalRegExp = global.RegExp;
 var GlobalString = global.String;
+var IntlFallbackSymbol = utils.ImportNow("intl_fallback_symbol");
 var InstallFunctions = utils.InstallFunctions;
 var InstallGetter = utils.InstallGetter;
 var InternalArray = utils.InternalArray;
@@ -46,18 +52,11 @@
 }
 
 
-function InstallConstructor(object, name, func) {
-  %CheckIsBootstrapping();
-  SetFunctionName(func, name);
-  %AddNamedProperty(object, name, func, DONT_ENUM);
-  %SetNativeFlag(func);
-  %ToFastProperties(object);
-}
-
 /**
  * Adds bound method to the prototype of the given object.
  */
-function AddBoundMethod(obj, methodName, implementation, length, type) {
+function AddBoundMethod(obj, methodName, implementation, length, typename,
+                        compat) {
   %CheckIsBootstrapping();
   var internalName = %CreatePrivateSymbol(methodName);
   // Making getter an anonymous function will cause
@@ -66,32 +65,30 @@
   // than (as utils.InstallGetter would) on the SharedFunctionInfo
   // associated with all functions returned from AddBoundMethod.
   var getter = ANONYMOUS_FUNCTION(function() {
-    if (!%IsInitializedIntlObjectOfType(this, type)) {
-      throw %make_type_error(kMethodCalledOnWrongObject, methodName);
-    }
-    if (IS_UNDEFINED(this[internalName])) {
+    var receiver = Unwrap(this, typename, obj, methodName, compat);
+    if (IS_UNDEFINED(receiver[internalName])) {
       var boundMethod;
       if (IS_UNDEFINED(length) || length === 2) {
         boundMethod =
-          ANONYMOUS_FUNCTION((fst, snd) => implementation(this, fst, snd));
+          ANONYMOUS_FUNCTION((fst, snd) => implementation(receiver, fst, snd));
       } else if (length === 1) {
-        boundMethod = ANONYMOUS_FUNCTION(fst => implementation(this, fst));
+        boundMethod = ANONYMOUS_FUNCTION(fst => implementation(receiver, fst));
       } else {
         boundMethod = ANONYMOUS_FUNCTION((...args) => {
           // DateTimeFormat.format needs to be 0 arg method, but can still
           // receive an optional dateValue param. If one was provided, pass it
           // along.
           if (args.length > 0) {
-            return implementation(this, args[0]);
+            return implementation(receiver, args[0]);
           } else {
-            return implementation(this);
+            return implementation(receiver);
           }
         });
       }
       %SetNativeFlag(boundMethod);
-      this[internalName] = boundMethod;
+      receiver[internalName] = boundMethod;
     }
-    return this[internalName];
+    return receiver[internalName];
   });
 
   %FunctionRemovePrototype(getter);
@@ -99,12 +96,45 @@
   %SetNativeFlag(getter);
 }
 
+function IntlConstruct(receiver, constructor, create, newTarget, args,
+                       compat) {
+  var locales = args[0];
+  var options = args[1];
+
+  if (IS_UNDEFINED(newTarget)) {
+    if (compat && receiver instanceof constructor) {
+      let success = %object_define_property(receiver, IntlFallbackSymbol,
+                           { value: new constructor(locales, options) });
+      if (!success) {
+        throw %make_type_error(kReinitializeIntl, constructor);
+      }
+      return receiver;
+    }
+
+    return new constructor(locales, options);
+  }
+
+  return create(locales, options);
+}
+
+
+
+function Unwrap(receiver, typename, constructor, method, compat) {
+  if (!%IsInitializedIntlObjectOfType(receiver, typename)) {
+    if (compat && receiver instanceof constructor) {
+      let fallback = receiver[IntlFallbackSymbol];
+      if (%IsInitializedIntlObjectOfType(fallback, typename)) {
+        return fallback;
+      }
+    }
+    throw %make_type_error(kIncompatibleMethodReceiver, method, receiver);
+  }
+  return receiver;
+}
+
+
 // -------------------------------------------------------------------
 
-var Intl = {};
-
-%AddNamedProperty(global, "Intl", Intl, DONT_ENUM);
-
 /**
  * Caches available locales for each service.
  */
@@ -123,6 +153,13 @@
 function GetDefaultICULocaleJS() {
   if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
     DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
+    // Check that this is a valid default, otherwise fall back to "und"
+    for (let service in AVAILABLE_LOCALES) {
+      if (IS_UNDEFINED(getAvailableLocalesOf(service)[DEFAULT_ICU_LOCALE])) {
+        DEFAULT_ICU_LOCALE = "und";
+        break;
+      }
+    }
   }
   return DEFAULT_ICU_LOCALE;
 }
@@ -268,19 +305,16 @@
 
   var requestedLocales = initializeLocaleList(locales);
 
-  // Cache these, they don't ever change per service.
-  if (IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
-    AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
-  }
+  var availableLocales = getAvailableLocalesOf(service);
 
   // Use either best fit or lookup algorithm to match locales.
   if (matcher === 'best fit') {
     return initializeLocaleList(bestFitSupportedLocalesOf(
-        requestedLocales, AVAILABLE_LOCALES[service]));
+        requestedLocales, availableLocales));
   }
 
   return initializeLocaleList(lookupSupportedLocalesOf(
-      requestedLocales, AVAILABLE_LOCALES[service]));
+      requestedLocales, availableLocales));
 }
 
 
@@ -407,17 +441,14 @@
     throw %make_error(kWrongServiceType, service);
   }
 
-  // Cache these, they don't ever change per service.
-  if (IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
-    AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
-  }
+  var availableLocales = getAvailableLocalesOf(service);
 
   for (var i = 0; i < requestedLocales.length; ++i) {
     // Remove all extensions.
     var locale = %RegExpInternalReplace(
         GetAnyExtensionRE(), requestedLocales[i], '');
     do {
-      if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
+      if (!IS_UNDEFINED(availableLocales[locale])) {
         // Return the resolved locale and extension.
         var extensionMatch = %regexp_internal_match(
             GetUnicodeExtensionRE(), requestedLocales[i]);
@@ -628,6 +659,11 @@
  * that is supported. This is required by the spec.
  */
 function getAvailableLocalesOf(service) {
+  // Cache these, they don't ever change per service.
+  if (!IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
+    return AVAILABLE_LOCALES[service];
+  }
+
   var available = %AvailableLocalesOf(service);
 
   for (var i in available) {
@@ -642,6 +678,8 @@
     }
   }
 
+  AVAILABLE_LOCALES[service] = available;
+
   return available;
 }
 
@@ -693,8 +731,8 @@
  * Returns titlecased word, aMeRricA -> America.
  */
 function toTitleCaseWord(word) {
-  return %StringToUpperCase(%_Call(StringSubstr, word, 0, 1)) +
-         %StringToLowerCase(%_Call(StringSubstr, word, 1));
+  return %StringToUpperCaseI18N(%_Call(StringSubstr, word, 0, 1)) +
+         %StringToLowerCaseI18N(%_Call(StringSubstr, word, 1));
 }
 
 /**
@@ -715,7 +753,7 @@
     var parts = %StringSplit(match[2], separator, kMaxUint32);
     for (var i = 1; i < parts.length; i++) {
       var part = parts[i]
-      var lowercasedPart = %StringToLowerCase(part);
+      var lowercasedPart = %StringToLowerCaseI18N(part);
       result = result + separator +
           ((lowercasedPart !== 'es' &&
             lowercasedPart !== 'of' && lowercasedPart !== 'au') ?
@@ -821,6 +859,8 @@
     return false;
   }
 
+  locale = %StringToLowerCaseI18N(locale);
+
   // Just return if it's a x- form. It's all private.
   if (%StringIndexOf(locale, 'x-', 0) === 0) {
     return true;
@@ -911,11 +951,7 @@
 };
 
 // ECMA 402 section 8.2.1
-InstallFunction(Intl, 'getCanonicalLocales', function(locales) {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
+InstallFunction(GlobalIntl, 'getCanonicalLocales', function(locales) {
     return makeArray(canonicalizeLocaleList(locales));
   }
 );
@@ -924,11 +960,7 @@
  * Initializes the given object so it's a valid Collator instance.
  * Useful for subclassing.
  */
-function initializeCollator(collator, locales, options) {
-  if (%IsInitializedIntlObject(collator)) {
-    throw %make_type_error(kReinitializeIntl, "Collator");
-  }
-
+function CreateCollator(locales, options) {
   if (IS_UNDEFINED(options)) {
     options = {};
   }
@@ -1015,12 +1047,9 @@
     usage: {value: internalOptions.usage, writable: true}
   });
 
-  var internalCollator = %CreateCollator(requestedLocale,
-                                         internalOptions,
-                                         resolved);
+  var collator = %CreateCollator(requestedLocale, internalOptions, resolved);
 
-  // Writable, configurable and enumerable are set to false by default.
-  %MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
+  %MarkAsInitializedIntlObjectOfType(collator, 'collator');
   collator[resolvedSymbol] = resolved;
 
   return collator;
@@ -1033,33 +1062,19 @@
  *
  * @constructor
  */
-InstallConstructor(Intl, 'Collator', function() {
-    var locales = arguments[0];
-    var options = arguments[1];
-
-    if (!this || this === Intl) {
-      // Constructor is called as a function.
-      return new Intl.Collator(locales, options);
-    }
-
-    return initializeCollator(TO_OBJECT(this), locales, options);
-  }
-);
+function CollatorConstructor() {
+  return IntlConstruct(this, GlobalIntlCollator, CreateCollator, new.target,
+                       arguments);
+}
+%SetCode(GlobalIntlCollator, CollatorConstructor);
 
 
 /**
  * Collator resolvedOptions method.
  */
-InstallFunction(Intl.Collator.prototype, 'resolvedOptions', function() {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
-    if (!%IsInitializedIntlObjectOfType(this, 'collator')) {
-      throw %make_type_error(kResolvedOptionsCalledOnNonObject, "Collator");
-    }
-
-    var coll = this;
+InstallFunction(GlobalIntlCollator.prototype, 'resolvedOptions', function() {
+    var coll = Unwrap(this, 'collator', GlobalIntlCollator, 'resolvedOptions',
+                      false);
     var locale = getOptimalLanguageTag(coll[resolvedSymbol].requestedLocale,
                                        coll[resolvedSymbol].locale);
 
@@ -1082,11 +1097,7 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-InstallFunction(Intl.Collator, 'supportedLocalesOf', function(locales) {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
+InstallFunction(GlobalIntlCollator, 'supportedLocalesOf', function(locales) {
     return supportedLocalesOf('collator', locales, arguments[1]);
   }
 );
@@ -1103,12 +1114,11 @@
  * the sort order, or x comes after y in the sort order, respectively.
  */
 function compare(collator, x, y) {
-  return %InternalCompare(%GetImplFromInitializedIntlObject(collator),
-                          TO_STRING(x), TO_STRING(y));
+  return %InternalCompare(collator, TO_STRING(x), TO_STRING(y));
 };
 
 
-AddBoundMethod(Intl.Collator, 'compare', compare, 2, 'collator');
+AddBoundMethod(GlobalIntlCollator, 'compare', compare, 2, 'collator', false);
 
 /**
  * Verifies that the input is a well-formed ISO 4217 currency code.
@@ -1116,7 +1126,7 @@
  * For example \u00DFP (Eszett+P) becomes SSP.
  */
 function isWellFormedCurrencyCode(currency) {
-  return typeof currency == "string" && currency.length == 3 &&
+  return typeof currency === "string" && currency.length === 3 &&
       IS_NULL(%regexp_internal_match(/[^A-Za-z]/, currency));
 }
 
@@ -1152,11 +1162,7 @@
  * Initializes the given object so it's a valid NumberFormat instance.
  * Useful for subclassing.
  */
-function initializeNumberFormat(numberFormat, locales, options) {
-  if (%IsInitializedIntlObject(numberFormat)) {
-    throw %make_type_error(kReinitializeIntl, "NumberFormat");
-  }
-
+function CreateNumberFormat(locales, options) {
   if (IS_UNDEFINED(options)) {
     options = {};
   }
@@ -1181,7 +1187,7 @@
   var currencyDisplay = getOption(
       'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
   if (internalOptions.style === 'currency') {
-    defineWEProperty(internalOptions, 'currency', %StringToUpperCase(currency));
+    defineWEProperty(internalOptions, 'currency', %StringToUpperCaseI18N(currency));
     defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
   }
 
@@ -1207,7 +1213,7 @@
   var mnsd = options['minimumSignificantDigits'];
   var mxsd = options['maximumSignificantDigits'];
   if (!IS_UNDEFINED(mnsd) || !IS_UNDEFINED(mxsd)) {
-    mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 0);
+    mnsd = getNumberOption(options, 'minimumSignificantDigits', 1, 21, 1);
     defineWEProperty(internalOptions, 'minimumSignificantDigits', mnsd);
 
     mxsd = getNumberOption(options, 'maximumSignificantDigits', mnsd, 21, 21);
@@ -1252,16 +1258,15 @@
   if (HAS_OWN_PROPERTY(internalOptions, 'maximumSignificantDigits')) {
     defineWEProperty(resolved, 'maximumSignificantDigits', UNDEFINED);
   }
-  var formatter = %CreateNumberFormat(requestedLocale,
-                                      internalOptions,
-                                      resolved);
+  var numberFormat = %CreateNumberFormat(requestedLocale, internalOptions,
+                                         resolved);
 
   if (internalOptions.style === 'currency') {
     %object_define_property(resolved, 'currencyDisplay',
         {value: currencyDisplay, writable: true});
   }
 
-  %MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
+  %MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat');
   numberFormat[resolvedSymbol] = resolved;
 
   return numberFormat;
@@ -1274,33 +1279,20 @@
  *
  * @constructor
  */
-InstallConstructor(Intl, 'NumberFormat', function() {
-    var locales = arguments[0];
-    var options = arguments[1];
-
-    if (!this || this === Intl) {
-      // Constructor is called as a function.
-      return new Intl.NumberFormat(locales, options);
-    }
-
-    return initializeNumberFormat(TO_OBJECT(this), locales, options);
-  }
-);
+function NumberFormatConstructor() {
+  return IntlConstruct(this, GlobalIntlNumberFormat, CreateNumberFormat,
+                       new.target, arguments, true);
+}
+%SetCode(GlobalIntlNumberFormat, NumberFormatConstructor);
 
 
 /**
  * NumberFormat resolvedOptions method.
  */
-InstallFunction(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
-    if (!%IsInitializedIntlObjectOfType(this, 'numberformat')) {
-      throw %make_type_error(kResolvedOptionsCalledOnNonObject, "NumberFormat");
-    }
-
-    var format = this;
+InstallFunction(GlobalIntlNumberFormat.prototype, 'resolvedOptions',
+  function() {
+    var format = Unwrap(this, 'numberformat', GlobalIntlNumberFormat,
+                        'resolvedOptions', true);
     var locale = getOptimalLanguageTag(format[resolvedSymbol].requestedLocale,
                                        format[resolvedSymbol].locale);
 
@@ -1341,11 +1333,8 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-InstallFunction(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
+InstallFunction(GlobalIntlNumberFormat, 'supportedLocalesOf',
+  function(locales) {
     return supportedLocalesOf('numberformat', locales, arguments[1]);
   }
 );
@@ -1360,12 +1349,12 @@
   // Spec treats -0 and +0 as 0.
   var number = TO_NUMBER(value) + 0;
 
-  return %InternalNumberFormat(%GetImplFromInitializedIntlObject(formatter),
-                               number);
+  return %InternalNumberFormat(formatter, number);
 }
 
 
-AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1, 'numberformat');
+AddBoundMethod(GlobalIntlNumberFormat, 'format', formatNumber, 1,
+               'numberformat', true);
 
 /**
  * Returns a string that matches LDML representation of the options object.
@@ -1518,6 +1507,8 @@
     options = TO_OBJECT(options);
   }
 
+  options = %object_create(options);
+
   var needsDefault = true;
   if ((required === 'date' || required === 'any') &&
       (!IS_UNDEFINED(options.weekday) || !IS_UNDEFINED(options.year) ||
@@ -1569,12 +1560,7 @@
  * Initializes the given object so it's a valid DateTimeFormat instance.
  * Useful for subclassing.
  */
-function initializeDateTimeFormat(dateFormat, locales, options) {
-
-  if (%IsInitializedIntlObject(dateFormat)) {
-    throw %make_type_error(kReinitializeIntl, "DateTimeFormat");
-  }
-
+function CreateDateTimeFormat(locales, options) {
   if (IS_UNDEFINED(options)) {
     options = {};
   }
@@ -1636,14 +1622,14 @@
     year: {writable: true}
   });
 
-  var formatter = %CreateDateTimeFormat(
+  var dateFormat = %CreateDateTimeFormat(
     requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
 
   if (resolved.timeZone === "Etc/Unknown") {
     throw %make_range_error(kUnsupportedTimeZone, tz);
   }
 
-  %MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
+  %MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat');
   dateFormat[resolvedSymbol] = resolved;
 
   return dateFormat;
@@ -1656,31 +1642,20 @@
  *
  * @constructor
  */
-InstallConstructor(Intl, 'DateTimeFormat', function() {
-    var locales = arguments[0];
-    var options = arguments[1];
-
-    if (!this || this === Intl) {
-      // Constructor is called as a function.
-      return new Intl.DateTimeFormat(locales, options);
-    }
-
-    return initializeDateTimeFormat(TO_OBJECT(this), locales, options);
-  }
-);
+function DateTimeFormatConstructor() {
+  return IntlConstruct(this, GlobalIntlDateTimeFormat, CreateDateTimeFormat,
+                       new.target, arguments, true);
+}
+%SetCode(GlobalIntlDateTimeFormat, DateTimeFormatConstructor);
 
 
 /**
  * DateTimeFormat resolvedOptions method.
  */
-InstallFunction(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
-    if (!%IsInitializedIntlObjectOfType(this, 'dateformat')) {
-      throw %make_type_error(kResolvedOptionsCalledOnNonObject, "DateTimeFormat");
-    }
+InstallFunction(GlobalIntlDateTimeFormat.prototype, 'resolvedOptions',
+  function() {
+    var format = Unwrap(this, 'dateformat', GlobalIntlDateTimeFormat,
+                        'resolvedOptions', true);
 
     /**
      * Maps ICU calendar names to LDML/BCP47 types for key 'ca'.
@@ -1693,7 +1668,6 @@
       'ethiopic-amete-alem': 'ethioaa'
     };
 
-    var format = this;
     var fromPattern = fromLDMLString(format[resolvedSymbol][patternSymbol]);
     var userCalendar = ICU_CALENDAR_MAP[format[resolvedSymbol].calendar];
     if (IS_UNDEFINED(userCalendar)) {
@@ -1733,11 +1707,8 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-InstallFunction(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
+InstallFunction(GlobalIntlDateTimeFormat, 'supportedLocalesOf',
+  function(locales) {
     return supportedLocalesOf('dateformat', locales, arguments[1]);
   }
 );
@@ -1758,18 +1729,19 @@
 
   if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
 
-  return %InternalDateFormat(%GetImplFromInitializedIntlObject(formatter),
-                             new GlobalDate(dateMs));
+  return %InternalDateFormat(formatter, new GlobalDate(dateMs));
 }
 
 function FormatDateToParts(dateValue) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-  }
   CHECK_OBJECT_COERCIBLE(this, "Intl.DateTimeFormat.prototype.formatToParts");
   if (!IS_OBJECT(this)) {
     throw %make_type_error(kCalledOnNonObject, this);
   }
+  if (!%IsInitializedIntlObjectOfType(this, 'dateformat')) {
+    throw %make_type_error(kIncompatibleMethodReceiver,
+                          'Intl.DateTimeFormat.prototype.formatToParts',
+                          this);
+  }
   var dateMs;
   if (IS_UNDEFINED(dateValue)) {
     dateMs = %DateCurrentTime();
@@ -1779,15 +1751,15 @@
 
   if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
 
-  return %InternalDateFormatToParts(
-      %GetImplFromInitializedIntlObject(this), new GlobalDate(dateMs));
+  return %InternalDateFormatToParts(this, new GlobalDate(dateMs));
 }
 
 %FunctionSetLength(FormatDateToParts, 0);
 
 
 // 0 because date is optional argument.
-AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0, 'dateformat');
+AddBoundMethod(GlobalIntlDateTimeFormat, 'format', formatDate, 0, 'dateformat',
+               true);
 
 
 /**
@@ -1804,7 +1776,7 @@
   tzID = TO_STRING(tzID);
 
   // Special case handling (UTC, GMT).
-  var upperID = %StringToUpperCase(tzID);
+  var upperID = %StringToUpperCaseI18N(tzID);
   if (upperID === 'UTC' || upperID === 'GMT' ||
       upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
     return 'UTC';
@@ -1835,11 +1807,7 @@
  * Initializes the given object so it's a valid BreakIterator instance.
  * Useful for subclassing.
  */
-function initializeBreakIterator(iterator, locales, options) {
-  if (%IsInitializedIntlObject(iterator)) {
-    throw %make_type_error(kReinitializeIntl, "v8BreakIterator");
-  }
-
+function CreateBreakIterator(locales, options) {
   if (IS_UNDEFINED(options)) {
     options = {};
   }
@@ -1858,12 +1826,9 @@
     locale: {writable: true}
   });
 
-  var internalIterator = %CreateBreakIterator(locale.locale,
-                                              internalOptions,
-                                              resolved);
+  var iterator = %CreateBreakIterator(locale.locale, internalOptions, resolved);
 
-  %MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
-                                     internalIterator);
+  %MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator');
   iterator[resolvedSymbol] = resolved;
 
   return iterator;
@@ -1876,34 +1841,25 @@
  *
  * @constructor
  */
-InstallConstructor(Intl, 'v8BreakIterator', function() {
-    var locales = arguments[0];
-    var options = arguments[1];
-
-    if (!this || this === Intl) {
-      // Constructor is called as a function.
-      return new Intl.v8BreakIterator(locales, options);
-    }
-
-    return initializeBreakIterator(TO_OBJECT(this), locales, options);
-  }
-);
+function v8BreakIteratorConstructor() {
+  return IntlConstruct(this, GlobalIntlv8BreakIterator, CreateBreakIterator,
+                       new.target, arguments);
+}
+%SetCode(GlobalIntlv8BreakIterator, v8BreakIteratorConstructor);
 
 
 /**
  * BreakIterator resolvedOptions method.
  */
-InstallFunction(Intl.v8BreakIterator.prototype, 'resolvedOptions',
+InstallFunction(GlobalIntlv8BreakIterator.prototype, 'resolvedOptions',
   function() {
     if (!IS_UNDEFINED(new.target)) {
       throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
     }
 
-    if (!%IsInitializedIntlObjectOfType(this, 'breakiterator')) {
-      throw %make_type_error(kResolvedOptionsCalledOnNonObject, "v8BreakIterator");
-    }
+    var segmenter = Unwrap(this, 'breakiterator', GlobalIntlv8BreakIterator,
+                           'resolvedOptions', false);
 
-    var segmenter = this;
     var locale =
         getOptimalLanguageTag(segmenter[resolvedSymbol].requestedLocale,
                               segmenter[resolvedSymbol].locale);
@@ -1922,7 +1878,7 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-InstallFunction(Intl.v8BreakIterator, 'supportedLocalesOf',
+InstallFunction(GlobalIntlv8BreakIterator, 'supportedLocalesOf',
   function(locales) {
     if (!IS_UNDEFINED(new.target)) {
       throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
@@ -1938,8 +1894,7 @@
  * gets discarded.
  */
 function adoptText(iterator, text) {
-  %BreakIteratorAdoptText(%GetImplFromInitializedIntlObject(iterator),
-                          TO_STRING(text));
+  %BreakIteratorAdoptText(iterator, TO_STRING(text));
 }
 
 
@@ -1947,7 +1902,7 @@
  * Returns index of the first break in the string and moves current pointer.
  */
 function first(iterator) {
-  return %BreakIteratorFirst(%GetImplFromInitializedIntlObject(iterator));
+  return %BreakIteratorFirst(iterator);
 }
 
 
@@ -1955,7 +1910,7 @@
  * Returns the index of the next break and moves the pointer.
  */
 function next(iterator) {
-  return %BreakIteratorNext(%GetImplFromInitializedIntlObject(iterator));
+  return %BreakIteratorNext(iterator);
 }
 
 
@@ -1963,7 +1918,7 @@
  * Returns index of the current break.
  */
 function current(iterator) {
-  return %BreakIteratorCurrent(%GetImplFromInitializedIntlObject(iterator));
+  return %BreakIteratorCurrent(iterator);
 }
 
 
@@ -1971,25 +1926,26 @@
  * Returns type of the current break.
  */
 function breakType(iterator) {
-  return %BreakIteratorBreakType(%GetImplFromInitializedIntlObject(iterator));
+  return %BreakIteratorBreakType(iterator);
 }
 
 
-AddBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1,
+AddBoundMethod(GlobalIntlv8BreakIterator, 'adoptText', adoptText, 1,
                'breakiterator');
-AddBoundMethod(Intl.v8BreakIterator, 'first', first, 0, 'breakiterator');
-AddBoundMethod(Intl.v8BreakIterator, 'next', next, 0, 'breakiterator');
-AddBoundMethod(Intl.v8BreakIterator, 'current', current, 0, 'breakiterator');
-AddBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0,
+AddBoundMethod(GlobalIntlv8BreakIterator, 'first', first, 0, 'breakiterator');
+AddBoundMethod(GlobalIntlv8BreakIterator, 'next', next, 0, 'breakiterator');
+AddBoundMethod(GlobalIntlv8BreakIterator, 'current', current, 0,
+               'breakiterator');
+AddBoundMethod(GlobalIntlv8BreakIterator, 'breakType', breakType, 0,
                'breakiterator');
 
 // Save references to Intl objects and methods we use, for added security.
 var savedObjects = {
-  'collator': Intl.Collator,
-  'numberformat': Intl.NumberFormat,
-  'dateformatall': Intl.DateTimeFormat,
-  'dateformatdate': Intl.DateTimeFormat,
-  'dateformattime': Intl.DateTimeFormat
+  'collator': GlobalIntlCollator,
+  'numberformat': GlobalIntlNumberFormat,
+  'dateformatall': GlobalIntlDateTimeFormat,
+  'dateformatdate': GlobalIntlDateTimeFormat,
+  'dateformattime': GlobalIntlDateTimeFormat
 };
 
 
@@ -2054,18 +2010,11 @@
 
   // StringSplit is slower than this.
   var pos = %StringIndexOf(language, '-', 0);
-  if (pos != -1) {
+  if (pos !== -1) {
     language = %_Call(StringSubstring, language, 0, pos);
   }
 
-  var CUSTOM_CASE_LANGUAGES = ['az', 'el', 'lt', 'tr'];
-  var langIndex = %ArrayIndexOf(CUSTOM_CASE_LANGUAGES, language, 0);
-  if (langIndex == -1) {
-    // language-independent case conversion.
-    return isToUpper ? %StringToUpperCaseI18N(s) : %StringToLowerCaseI18N(s);
-  }
-  return %StringLocaleConvertCase(s, isToUpper,
-                                  CUSTOM_CASE_LANGUAGES[langIndex]);
+  return %StringLocaleConvertCase(s, isToUpper, language);
 }
 
 /**
@@ -2073,10 +2022,6 @@
  * Overrides the built-in method.
  */
 OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
     if (IS_NULL_OR_UNDEFINED(this)) {
       throw %make_type_error(kMethodInvokedOnNullOrUndefined);
     }
@@ -2098,10 +2043,6 @@
  */
 
 OverrideFunction(GlobalString.prototype, 'normalize', function() {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
     CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
     var s = TO_STRING(this);
 
@@ -2120,28 +2061,18 @@
   }
 );
 
+// TODO(littledan): Rewrite these two functions as C++ builtins
 function ToLowerCaseI18N() {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-  }
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
-  var s = TO_STRING(this);
-  return %StringToLowerCaseI18N(s);
+  return %StringToLowerCaseI18N(TO_STRING(this));
 }
 
 function ToUpperCaseI18N() {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-  }
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
-  var s = TO_STRING(this);
-  return %StringToUpperCaseI18N(s);
+  return %StringToUpperCaseI18N(TO_STRING(this));
 }
 
 function ToLocaleLowerCaseI18N(locales) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-  }
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
   return LocaleConvertCase(TO_STRING(this), locales, false);
 }
@@ -2149,37 +2080,18 @@
 %FunctionSetLength(ToLocaleLowerCaseI18N, 0);
 
 function ToLocaleUpperCaseI18N(locales) {
-  if (!IS_UNDEFINED(new.target)) {
-    throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-  }
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
   return LocaleConvertCase(TO_STRING(this), locales, true);
 }
 
 %FunctionSetLength(ToLocaleUpperCaseI18N, 0);
 
-%FunctionRemovePrototype(ToLowerCaseI18N);
-%FunctionRemovePrototype(ToUpperCaseI18N);
-%FunctionRemovePrototype(ToLocaleLowerCaseI18N);
-%FunctionRemovePrototype(ToLocaleUpperCaseI18N);
-
-utils.Export(function(to) {
-  to.ToLowerCaseI18N = ToLowerCaseI18N;
-  to.ToUpperCaseI18N = ToUpperCaseI18N;
-  to.ToLocaleLowerCaseI18N = ToLocaleLowerCaseI18N;
-  to.ToLocaleUpperCaseI18N = ToLocaleUpperCaseI18N;
-});
-
 
 /**
  * Formats a Number object (this) using locale and options values.
  * If locale or options are omitted, defaults are used.
  */
 OverrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
     if (!(this instanceof GlobalNumber) && typeof(this) !== 'number') {
       throw %make_type_error(kMethodInvokedOnWrongType, "Number");
     }
@@ -2218,10 +2130,6 @@
  * present in the output.
  */
 OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
     var locales = arguments[0];
     var options = arguments[1];
     return toLocaleDateTime(
@@ -2236,10 +2144,6 @@
  * in the output.
  */
 OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
     var locales = arguments[0];
     var options = arguments[1];
     return toLocaleDateTime(
@@ -2254,10 +2158,6 @@
  * in the output.
  */
 OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
-    if (!IS_UNDEFINED(new.target)) {
-      throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
-    }
-
     var locales = arguments[0];
     var options = arguments[1];
     return toLocaleDateTime(
@@ -2266,9 +2166,23 @@
 );
 
 %FunctionRemovePrototype(FormatDateToParts);
+%FunctionRemovePrototype(ToLowerCaseI18N);
+%FunctionRemovePrototype(ToUpperCaseI18N);
+%FunctionRemovePrototype(ToLocaleLowerCaseI18N);
+%FunctionRemovePrototype(ToLocaleUpperCaseI18N);
+
+utils.SetFunctionName(FormatDateToParts, "formatToParts");
+utils.SetFunctionName(ToLowerCaseI18N, "toLowerCase");
+utils.SetFunctionName(ToUpperCaseI18N, "toUpperCase");
+utils.SetFunctionName(ToLocaleLowerCaseI18N, "toLocaleLowerCase");
+utils.SetFunctionName(ToLocaleUpperCaseI18N, "toLocaleUpperCase");
 
 utils.Export(function(to) {
   to.FormatDateToParts = FormatDateToParts;
+  to.ToLowerCaseI18N = ToLowerCaseI18N;
+  to.ToUpperCaseI18N = ToUpperCaseI18N;
+  to.ToLocaleLowerCaseI18N = ToLocaleLowerCaseI18N;
+  to.ToLocaleUpperCaseI18N = ToLocaleUpperCaseI18N;
 });
 
 })
diff --git a/src/js/icu-case-mapping.js b/src/js/icu-case-mapping.js
deleted file mode 100644
index 9806249..0000000
--- a/src/js/icu-case-mapping.js
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-"use strict";
-
-%CheckIsBootstrapping();
-
-var GlobalString = global.String;
-var OverrideFunction = utils.OverrideFunction;
-var ToLowerCaseI18N = utils.ImportNow("ToLowerCaseI18N");
-var ToUpperCaseI18N = utils.ImportNow("ToUpperCaseI18N");
-var ToLocaleLowerCaseI18N = utils.ImportNow("ToLocaleLowerCaseI18N");
-var ToLocaleUpperCaseI18N = utils.ImportNow("ToLocaleUpperCaseI18N");
-
-OverrideFunction(GlobalString.prototype, 'toLowerCase', ToLowerCaseI18N, true);
-OverrideFunction(GlobalString.prototype, 'toUpperCase', ToUpperCaseI18N, true);
-OverrideFunction(GlobalString.prototype, 'toLocaleLowerCase',
-                 ToLocaleLowerCaseI18N, true);
-OverrideFunction(GlobalString.prototype, 'toLocaleUpperCase',
-                 ToLocaleUpperCaseI18N, true);
-
-})
diff --git a/src/js/macros.py b/src/js/macros.py
index 5ad578a..f10da42 100644
--- a/src/js/macros.py
+++ b/src/js/macros.py
@@ -60,12 +60,10 @@
 macro IS_NUMBER(arg)            = (typeof(arg) === 'number');
 macro IS_OBJECT(arg)            = (typeof(arg) === 'object');
 macro IS_PROXY(arg)             = (%_IsJSProxy(arg));
-macro IS_REGEXP(arg)            = (%_IsRegExp(arg));
 macro IS_SCRIPT(arg)            = (%_ClassOf(arg) === 'Script');
 macro IS_SET(arg)               = (%_ClassOf(arg) === 'Set');
 macro IS_SET_ITERATOR(arg)      = (%_ClassOf(arg) === 'Set Iterator');
 macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
-macro IS_SIMD_VALUE(arg)        = (%IsSimdValue(arg));
 macro IS_STRING(arg)            = (typeof(arg) === 'string');
 macro IS_SYMBOL(arg)            = (typeof(arg) === 'symbol');
 macro IS_TYPEDARRAY(arg)        = (%_IsTypedArray(arg));
@@ -141,11 +139,11 @@
 # TODO(adamk): Find a more robust way to force Smi representation.
 macro FIXED_ARRAY_SET_SMI(array, index, value) = (FIXED_ARRAY_SET(array, index, (value) | 0));
 
-macro ORDERED_HASH_TABLE_BUCKET_COUNT(table) = (FIXED_ARRAY_GET(table, 0));
-macro ORDERED_HASH_TABLE_ELEMENT_COUNT(table) = (FIXED_ARRAY_GET(table, 1));
-macro ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 1, count));
-macro ORDERED_HASH_TABLE_DELETED_COUNT(table) = (FIXED_ARRAY_GET(table, 2));
-macro ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 2, count));
+macro ORDERED_HASH_TABLE_BUCKET_COUNT(table) = (FIXED_ARRAY_GET(table, 2));
+macro ORDERED_HASH_TABLE_ELEMENT_COUNT(table) = (FIXED_ARRAY_GET(table, 0));
+macro ORDERED_HASH_TABLE_SET_ELEMENT_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 0, count));
+macro ORDERED_HASH_TABLE_DELETED_COUNT(table) = (FIXED_ARRAY_GET(table, 1));
+macro ORDERED_HASH_TABLE_SET_DELETED_COUNT(table, count) = (FIXED_ARRAY_SET_SMI(table, 1, count));
 macro ORDERED_HASH_TABLE_BUCKET_AT(table, bucket) = (FIXED_ARRAY_GET(table, 3 + (bucket)));
 macro ORDERED_HASH_TABLE_SET_BUCKET_AT(table, bucket, entry) = (FIXED_ARRAY_SET(table, 3 + (bucket), entry));
 
diff --git a/src/js/prologue.js b/src/js/prologue.js
index dba77d7..4b40d9a 100644
--- a/src/js/prologue.js
+++ b/src/js/prologue.js
@@ -112,20 +112,6 @@
 }
 
 
-// Helper function to install a getter/setter accessor property.
-function InstallGetterSetter(object, name, getter, setter, attributes) {
-  %CheckIsBootstrapping();
-  if (IS_UNDEFINED(attributes)) attributes = DONT_ENUM;
-  SetFunctionName(getter, name, "get");
-  SetFunctionName(setter, name, "set");
-  %FunctionRemovePrototype(getter);
-  %FunctionRemovePrototype(setter);
-  %DefineAccessorPropertyUnchecked(object, name, getter, setter, attributes);
-  %SetNativeFlag(getter);
-  %SetNativeFlag(setter);
-}
-
-
 function OverrideFunction(object, name, f, afterInitialBootstrap) {
   %CheckIsBootstrapping();
   %object_define_property(object, name, { value: f,
@@ -183,11 +169,9 @@
   var expose_list = [
     "FormatDateToParts",
     "MapEntries",
-    "MapIterator",
     "MapIteratorNext",
     "MaxSimple",
     "MinSimple",
-    "SetIterator",
     "SetIteratorNext",
     "SetValues",
     "ToLocaleLowerCaseI18N",
@@ -217,7 +201,6 @@
 
 function PostExperimentals(utils) {
   %CheckIsBootstrapping();
-  %ExportExperimentalFromRuntime(exports_container);
   for ( ; !IS_UNDEFINED(imports); imports = imports.next) {
     imports(exports_container);
   }
@@ -270,7 +253,6 @@
 utils.InstallConstants = InstallConstants;
 utils.InstallFunctions = InstallFunctions;
 utils.InstallGetter = InstallGetter;
-utils.InstallGetterSetter = InstallGetterSetter;
 utils.OverrideFunction = OverrideFunction;
 utils.SetUpLockedPrototype = SetUpLockedPrototype;
 utils.PostNatives = PostNatives;
@@ -281,7 +263,7 @@
 
 // -----------------------------------------------------------------------
 
-%OptimizeObjectForAddingMultipleProperties(extrasUtils, 5);
+%OptimizeObjectForAddingMultipleProperties(extrasUtils, 7);
 
 extrasUtils.logStackTrace = function logStackTrace() {
   %DebugTrace();
@@ -322,6 +304,15 @@
   };
 };
 
+// We pass true to trigger the debugger's on exception handler.
+extrasUtils.rejectPromise = function rejectPromise(promise, reason) {
+  %promise_internal_reject(promise, reason, true);
+}
+
+extrasUtils.markPromiseAsHandled = function markPromiseAsHandled(promise) {
+  %PromiseMarkAsHandled(promise);
+};
+
 %ToFastProperties(extrasUtils);
 
 })
diff --git a/src/js/promise.js b/src/js/promise.js
index 0b37c64..27571da 100644
--- a/src/js/promise.js
+++ b/src/js/promise.js
@@ -12,423 +12,17 @@
 // Imports
 
 var InternalArray = utils.InternalArray;
-var promiseAsyncStackIDSymbol =
-    utils.ImportNow("promise_async_stack_id_symbol");
 var promiseHandledBySymbol =
     utils.ImportNow("promise_handled_by_symbol");
 var promiseForwardingHandlerSymbol =
     utils.ImportNow("promise_forwarding_handler_symbol");
-var promiseHasHandlerSymbol =
-    utils.ImportNow("promise_has_handler_symbol");
-var promiseRejectReactionsSymbol =
-    utils.ImportNow("promise_reject_reactions_symbol");
-var promiseFulfillReactionsSymbol =
-    utils.ImportNow("promise_fulfill_reactions_symbol");
-var promiseDeferredReactionSymbol =
-    utils.ImportNow("promise_deferred_reaction_symbol");
-var promiseHandledHintSymbol =
-    utils.ImportNow("promise_handled_hint_symbol");
-var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
-var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
-var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
-var SpeciesConstructor;
-var speciesSymbol = utils.ImportNow("species_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var ObjectHasOwnProperty;
-
-utils.Import(function(from) {
-  ObjectHasOwnProperty = from.ObjectHasOwnProperty;
-  SpeciesConstructor = from.SpeciesConstructor;
-});
-
-// -------------------------------------------------------------------
-
-// [[PromiseState]] values:
-// These values should be kept in sync with PromiseStatus in globals.h
-const kPending = 0;
-const kFulfilled = +1;
-const kRejected = +2;
-
-const kResolveCallback = 0;
-const kRejectCallback = 1;
-
-// ES#sec-promise-executor
-// Promise ( executor )
-var GlobalPromise = function Promise(executor) {
-  if (executor === promiseRawSymbol) {
-    return %_NewObject(GlobalPromise, new.target);
-  }
-  if (IS_UNDEFINED(new.target)) throw %make_type_error(kNotAPromise, this);
-  if (!IS_CALLABLE(executor)) {
-    throw %make_type_error(kResolverNotAFunction, executor);
-  }
-
-  var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
-  // Calling the reject function would be a new exception, so debugEvent = true
-  // TODO(gsathya): Remove container for callbacks when this is moved
-  // to CPP/TF.
-  var callbacks = %create_resolving_functions(promise, true);
-  var debug_is_active = DEBUG_IS_ACTIVE;
-  try {
-    if (debug_is_active) %DebugPushPromise(promise);
-    executor(callbacks[kResolveCallback], callbacks[kRejectCallback]);
-  } %catch (e) {  // Natives syntax to mark this catch block.
-    %_Call(callbacks[kRejectCallback], UNDEFINED, e);
-  } finally {
-    if (debug_is_active) %DebugPopPromise();
-  }
-
-  return promise;
-}
-
-// Core functionality.
-
-function PromiseSet(promise, status, value) {
-  SET_PRIVATE(promise, promiseStateSymbol, status);
-  SET_PRIVATE(promise, promiseResultSymbol, value);
-
-  // There are 3 possible states for the resolve, reject symbols when we add
-  // a new callback --
-  // 1) UNDEFINED -- This is the zero state where there is no callback
-  // registered. When we see this state, we directly attach the callbacks to
-  // the symbol.
-  // 2) !IS_ARRAY -- There is a single callback directly attached to the
-  // symbols. We need to create a new array to store additional callbacks.
-  // 3) IS_ARRAY -- There are multiple callbacks already registered,
-  // therefore we can just push the new callback to the existing array.
-  SET_PRIVATE(promise, promiseFulfillReactionsSymbol, UNDEFINED);
-  SET_PRIVATE(promise, promiseRejectReactionsSymbol, UNDEFINED);
-
-  // This symbol is used only when one deferred needs to be attached. When more
-  // than one deferred need to be attached the promise, we attach them directly
-  // to the promiseFulfillReactionsSymbol and promiseRejectReactionsSymbol and
-  // reset this back to UNDEFINED.
-  SET_PRIVATE(promise, promiseDeferredReactionSymbol, UNDEFINED);
-
-  return promise;
-}
-
-function PromiseCreateAndSet(status, value) {
-  var promise = new GlobalPromise(promiseRawSymbol);
-  // If debug is active, notify about the newly created promise first.
-  if (DEBUG_IS_ACTIVE) PromiseSet(promise, kPending, UNDEFINED);
-  return PromiseSet(promise, status, value);
-}
-
-function PromiseInit(promise) {
-  return PromiseSet(promise, kPending, UNDEFINED);
-}
-
-function PromiseHandle(value, handler, deferred) {
-  var debug_is_active = DEBUG_IS_ACTIVE;
-  try {
-    if (debug_is_active) %DebugPushPromise(deferred.promise);
-    var result = handler(value);
-    if (IS_UNDEFINED(deferred.resolve)) {
-      ResolvePromise(deferred.promise, result);
-    } else {
-      %_Call(deferred.resolve, UNDEFINED, result);
-    }
-  } %catch (exception) {  // Natives syntax to mark this catch block.
-    try {
-      if (IS_UNDEFINED(deferred.reject)) {
-        // Pass false for debugEvent so .then chaining does not trigger
-        // redundant ExceptionEvents.
-        %PromiseReject(deferred.promise, exception, false);
-        PromiseSet(deferred.promise, kRejected, exception);
-      } else {
-        %_Call(deferred.reject, UNDEFINED, exception);
-      }
-    } catch (e) { }
-  } finally {
-    if (debug_is_active) %DebugPopPromise();
-  }
-}
-
-function PromiseDebugGetInfo(deferreds, status) {
-  var id, name, instrumenting = DEBUG_IS_ACTIVE;
-
-  if (instrumenting) {
-    // In an async function, reuse the existing stack related to the outer
-    // Promise. Otherwise, e.g. in a direct call to then, save a new stack.
-    // Promises with multiple reactions with one or more of them being async
-    // functions will not get a good stack trace, as async functions require
-    // different stacks from direct Promise use, but we save and restore a
-    // stack once for all reactions. TODO(littledan): Improve this case.
-    if (!IS_UNDEFINED(deferreds) &&
-        HAS_PRIVATE(deferreds.promise, promiseHandledBySymbol) &&
-        HAS_PRIVATE(GET_PRIVATE(deferreds.promise, promiseHandledBySymbol),
-                    promiseAsyncStackIDSymbol)) {
-      id = GET_PRIVATE(GET_PRIVATE(deferreds.promise, promiseHandledBySymbol),
-                       promiseAsyncStackIDSymbol);
-      name = "async function";
-    } else {
-      id = %DebugNextMicrotaskId();
-      name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
-      %DebugAsyncTaskEvent("enqueue", id, name);
-    }
-  }
-  return [id, name];
-}
-
-function PromiseAttachCallbacks(promise, deferred, onResolve, onReject) {
-  var maybeResolveCallbacks =
-      GET_PRIVATE(promise, promiseFulfillReactionsSymbol);
-  if (IS_UNDEFINED(maybeResolveCallbacks)) {
-    SET_PRIVATE(promise, promiseFulfillReactionsSymbol, onResolve);
-    SET_PRIVATE(promise, promiseRejectReactionsSymbol, onReject);
-    SET_PRIVATE(promise, promiseDeferredReactionSymbol, deferred);
-  } else if (!IS_ARRAY(maybeResolveCallbacks)) {
-    var resolveCallbacks = new InternalArray();
-    var rejectCallbacks = new InternalArray();
-    var existingDeferred = GET_PRIVATE(promise, promiseDeferredReactionSymbol);
-
-    resolveCallbacks.push(
-        maybeResolveCallbacks, existingDeferred, onResolve, deferred);
-    rejectCallbacks.push(GET_PRIVATE(promise, promiseRejectReactionsSymbol),
-                         existingDeferred,
-                         onReject,
-                         deferred);
-
-    SET_PRIVATE(promise, promiseFulfillReactionsSymbol, resolveCallbacks);
-    SET_PRIVATE(promise, promiseRejectReactionsSymbol, rejectCallbacks);
-    SET_PRIVATE(promise, promiseDeferredReactionSymbol, UNDEFINED);
-  } else {
-    maybeResolveCallbacks.push(onResolve, deferred);
-    GET_PRIVATE(promise, promiseRejectReactionsSymbol).push(onReject, deferred);
-  }
-}
-
-function PromiseIdResolveHandler(x) { return x; }
-function PromiseIdRejectHandler(r) { %_ReThrow(r); }
-SET_PRIVATE(PromiseIdRejectHandler, promiseForwardingHandlerSymbol, true);
+var GlobalPromise = global.Promise;
 
 // -------------------------------------------------------------------
 // Define exported functions.
 
-// For bootstrapper.
-
-// ES#sec-ispromise IsPromise ( x )
-function IsPromise(x) {
-  return IS_RECEIVER(x) && HAS_DEFINED_PRIVATE(x, promiseStateSymbol);
-}
-
-function PromiseCreate() {
-  return PromiseInit(new GlobalPromise(promiseRawSymbol));
-}
-
-// ES#sec-promise-resolve-functions
-// Promise Resolve Functions, steps 6-13
-function ResolvePromise(promise, resolution) {
-  if (resolution === promise) {
-    var exception = %make_type_error(kPromiseCyclic, resolution);
-    %PromiseReject(promise, exception, true);
-    PromiseSet(promise, kRejected, exception);
-    return;
-  }
-  if (IS_RECEIVER(resolution)) {
-    // 25.4.1.3.2 steps 8-12
-    try {
-      var then = resolution.then;
-    } catch (e) {
-      %PromiseReject(promise, e, true);
-      PromiseSet(promise, kRejected, e);
-      return;
-    }
-
-    // Resolution is a native promise and if it's already resolved or
-    // rejected, shortcircuit the resolution procedure by directly
-    // reusing the value from the promise.
-    if (IsPromise(resolution) && then === PromiseThen) {
-      var thenableState = GET_PRIVATE(resolution, promiseStateSymbol);
-      if (thenableState === kFulfilled) {
-        // This goes inside the if-else to save one symbol lookup in
-        // the slow path.
-        var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
-        %PromiseFulfill(promise, kFulfilled, thenableValue,
-                       promiseFulfillReactionsSymbol);
-        PromiseSet(promise, kFulfilled, thenableValue);
-        SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
-        return;
-      } else if (thenableState === kRejected) {
-        var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
-        if (!HAS_DEFINED_PRIVATE(resolution, promiseHasHandlerSymbol)) {
-          // Promise has already been rejected, but had no handler.
-          // Revoke previously triggered reject event.
-          %PromiseRevokeReject(resolution);
-        }
-        // Don't cause a debug event as this case is forwarding a rejection
-        %PromiseReject(promise, thenableValue, false);
-        PromiseSet(promise, kRejected, thenableValue);
-        SET_PRIVATE(resolution, promiseHasHandlerSymbol, true);
-        return;
-      }
-    }
-
-    if (IS_CALLABLE(then)) {
-      if (DEBUG_IS_ACTIVE && IsPromise(resolution)) {
-          // Mark the dependency of the new promise on the resolution
-        SET_PRIVATE(resolution, promiseHandledBySymbol, promise);
-      }
-      %EnqueuePromiseResolveThenableJob(promise, resolution, then);
-      return;
-    }
-  }
-  %PromiseFulfill(promise, kFulfilled, resolution,
-                  promiseFulfillReactionsSymbol);
-  PromiseSet(promise, kFulfilled, resolution);
-}
-
-// Only used by async-await.js
-function RejectPromise(promise, reason, debugEvent) {
-  %PromiseReject(promise, reason, debugEvent);
-  PromiseSet(promise, kRejected, reason);
-}
-
-// Export to bindings
-function DoRejectPromise(promise, reason) {
-  %PromiseReject(promise, reason, true);
-  PromiseSet(promise, kRejected, reason);
-}
-
-// ES#sec-newpromisecapability
-// NewPromiseCapability ( C )
-function NewPromiseCapability(C, debugEvent) {
-  if (C === GlobalPromise) {
-    // Optimized case, avoid extra closure.
-    var promise = PromiseCreate();
-    // TODO(gsathya): Remove container for callbacks when this is
-    // moved to CPP/TF.
-    var callbacks = %create_resolving_functions(promise, debugEvent);
-    return {
-      promise: promise,
-      resolve: callbacks[kResolveCallback],
-      reject: callbacks[kRejectCallback]
-    };
-  }
-
-  var result = {promise: UNDEFINED, resolve: UNDEFINED, reject: UNDEFINED };
-  result.promise = new C((resolve, reject) => {
-    if (!IS_UNDEFINED(result.resolve) || !IS_UNDEFINED(result.reject))
-        throw %make_type_error(kPromiseExecutorAlreadyInvoked);
-    result.resolve = resolve;
-    result.reject = reject;
-  });
-
-  if (!IS_CALLABLE(result.resolve) || !IS_CALLABLE(result.reject))
-      throw %make_type_error(kPromiseNonCallable);
-
-  return result;
-}
-
-// ES#sec-promise.reject
-// Promise.reject ( x )
-function PromiseReject(r) {
-  if (!IS_RECEIVER(this)) {
-    throw %make_type_error(kCalledOnNonObject, PromiseResolve);
-  }
-  if (this === GlobalPromise) {
-    // Optimized case, avoid extra closure.
-    var promise = PromiseCreateAndSet(kRejected, r);
-    // Trigger debug events if the debugger is on, as Promise.reject is
-    // equivalent to throwing an exception directly.
-    %PromiseRejectEventFromStack(promise, r);
-    return promise;
-  } else {
-    var promiseCapability = NewPromiseCapability(this, true);
-    %_Call(promiseCapability.reject, UNDEFINED, r);
-    return promiseCapability.promise;
-  }
-}
-
-function PerformPromiseThen(promise, onResolve, onReject, resultCapability) {
-  if (!IS_CALLABLE(onResolve)) onResolve = PromiseIdResolveHandler;
-  if (!IS_CALLABLE(onReject)) onReject = PromiseIdRejectHandler;
-
-  var status = GET_PRIVATE(promise, promiseStateSymbol);
-  switch (status) {
-    case kPending:
-      PromiseAttachCallbacks(promise, resultCapability, onResolve, onReject);
-      break;
-    case kFulfilled:
-      %EnqueuePromiseReactionJob(GET_PRIVATE(promise, promiseResultSymbol),
-                                 onResolve, resultCapability, kFulfilled);
-      break;
-    case kRejected:
-      if (!HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
-        // Promise has already been rejected, but had no handler.
-        // Revoke previously triggered reject event.
-        %PromiseRevokeReject(promise);
-      }
-      %EnqueuePromiseReactionJob(GET_PRIVATE(promise, promiseResultSymbol),
-                                 onReject, resultCapability, kRejected);
-      break;
-  }
-
-  // Mark this promise as having handler.
-  SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
-  return resultCapability.promise;
-}
-
-// ES#sec-promise.prototype.then
-// Promise.prototype.then ( onFulfilled, onRejected )
-// Multi-unwrapped chaining with thenable coercion.
-function PromiseThen(onResolve, onReject) {
-  var status = GET_PRIVATE(this, promiseStateSymbol);
-  if (IS_UNDEFINED(status)) {
-    throw %make_type_error(kNotAPromise, this);
-  }
-
-  var constructor = SpeciesConstructor(this, GlobalPromise);
-  var resultCapability;
-
-  // The resultCapability.promise is only ever fulfilled internally,
-  // so we don't need the closures to protect against accidentally
-  // calling them multiple times.
-  if (constructor === GlobalPromise) {
-    // TODO(gsathya): Combine this into NewPromiseCapability.
-    resultCapability = {
-      promise: PromiseCreate(),
-      resolve: UNDEFINED,
-      reject: UNDEFINED
-    };
-  } else {
-    // Pass false for debugEvent so .then chaining does not trigger
-    // redundant ExceptionEvents.
-    resultCapability = NewPromiseCapability(constructor, false);
-  }
-  return PerformPromiseThen(this, onResolve, onReject, resultCapability);
-}
-
-// ES#sec-promise.prototype.catch
-// Promise.prototype.catch ( onRejected )
-function PromiseCatch(onReject) {
-  return this.then(UNDEFINED, onReject);
-}
-
 // Combinators.
 
-// ES#sec-promise.resolve
-// Promise.resolve ( x )
-function PromiseResolve(x) {
-  if (!IS_RECEIVER(this)) {
-    throw %make_type_error(kCalledOnNonObject, PromiseResolve);
-  }
-  if (IsPromise(x) && x.constructor === this) return x;
-
-  // Avoid creating resolving functions.
-  if (this === GlobalPromise) {
-    var promise = PromiseCreate();
-    ResolvePromise(promise, x);
-    return promise;
-  }
-
-  // debugEvent is not so meaningful here as it will be resolved
-  var promiseCapability = NewPromiseCapability(this, true);
-  %_Call(promiseCapability.resolve, UNDEFINED, x);
-  return promiseCapability.promise;
-}
-
 // ES#sec-promise.all
 // Promise.all ( iterable )
 function PromiseAll(iterable) {
@@ -438,7 +32,7 @@
 
   // false debugEvent so that forwarding the rejection through all does not
   // trigger redundant ExceptionEvents
-  var deferred = NewPromiseCapability(this, false);
+  var deferred = %new_promise_capability(this, false);
   var resolutions = new InternalArray();
   var count;
 
@@ -474,7 +68,7 @@
           deferred.reject);
       // For catch prediction, mark that rejections here are semantically
       // handled by the combined Promise.
-      if (instrumenting && IsPromise(throwawayPromise)) {
+      if (instrumenting && %is_promise(throwawayPromise)) {
         SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
       }
       ++i;
@@ -502,7 +96,7 @@
 
   // false debugEvent so that forwarding the rejection through race does not
   // trigger redundant ExceptionEvents
-  var deferred = NewPromiseCapability(this, false);
+  var deferred = %new_promise_capability(this, false);
 
   // For catch prediction, don't treat the .then calls as handling it;
   // instead, recurse outwards.
@@ -517,7 +111,7 @@
                                                       deferred.reject);
       // For catch prediction, mark that rejections here are semantically
       // handled by the combined Promise.
-      if (instrumenting && IsPromise(throwawayPromise)) {
+      if (instrumenting && %is_promise(throwawayPromise)) {
         SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
       }
     }
@@ -527,129 +121,12 @@
   return deferred.promise;
 }
 
-
-// Utility for debugger
-
-function PromiseHasUserDefinedRejectHandlerCheck(handler, deferred) {
-  // Recurse to the forwarding Promise, if any. This may be due to
-  //  - await reaction forwarding to the throwaway Promise, which has
-  //    a dependency edge to the outer Promise.
-  //  - PromiseIdResolveHandler forwarding to the output of .then
-  //  - Promise.all/Promise.race forwarding to a throwaway Promise, which
-  //    has a dependency edge to the generated outer Promise.
-  if (GET_PRIVATE(handler, promiseForwardingHandlerSymbol)) {
-    return PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise);
-  }
-
-  // Otherwise, this is a real reject handler for the Promise
-  return true;
-}
-
-function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
-  // If this promise was marked as being handled by a catch block
-  // in an async function, then it has a user-defined reject handler.
-  if (GET_PRIVATE(promise, promiseHandledHintSymbol)) return true;
-
-  // If this Promise is subsumed by another Promise (a Promise resolved
-  // with another Promise, or an intermediate, hidden, throwaway Promise
-  // within async/await), then recurse on the outer Promise.
-  // In this case, the dependency is one possible way that the Promise
-  // could be resolved, so it does not subsume the other following cases.
-  var outerPromise = GET_PRIVATE(promise, promiseHandledBySymbol);
-  if (outerPromise &&
-      PromiseHasUserDefinedRejectHandlerRecursive(outerPromise)) {
-    return true;
-  }
-
-  var queue = GET_PRIVATE(promise, promiseRejectReactionsSymbol);
-  var deferred = GET_PRIVATE(promise, promiseDeferredReactionSymbol);
-
-  if (IS_UNDEFINED(queue)) return false;
-
-  if (!IS_ARRAY(queue)) {
-    return PromiseHasUserDefinedRejectHandlerCheck(queue, deferred);
-  }
-
-  for (var i = 0; i < queue.length; i += 2) {
-    if (PromiseHasUserDefinedRejectHandlerCheck(queue[i], queue[i + 1])) {
-      return true;
-    }
-  }
-  return false;
-}
-
-// Return whether the promise will be handled by a user-defined reject
-// handler somewhere down the promise chain. For this, we do a depth-first
-// search for a reject handler that's not the default PromiseIdRejectHandler.
-// This function also traverses dependencies of one Promise on another,
-// set up through async/await and Promises resolved with Promises.
-function PromiseHasUserDefinedRejectHandler() {
-  return PromiseHasUserDefinedRejectHandlerRecursive(this);
-};
-
-function MarkPromiseAsHandled(promise) {
-  SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
-}
-
-
-function PromiseSpecies() {
-  return this;
-}
-
 // -------------------------------------------------------------------
 // Install exported functions.
 
-%AddNamedProperty(global, 'Promise', GlobalPromise, DONT_ENUM);
-%AddNamedProperty(GlobalPromise.prototype, toStringTagSymbol, "Promise",
-                  DONT_ENUM | READ_ONLY);
-
 utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
-  "reject", PromiseReject,
   "all", PromiseAll,
   "race", PromiseRace,
-  "resolve", PromiseResolve
 ]);
 
-utils.InstallGetter(GlobalPromise, speciesSymbol, PromiseSpecies);
-
-utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
-  "then", PromiseThen,
-  "catch", PromiseCatch
-]);
-
-%InstallToContext([
-  "promise_catch", PromiseCatch,
-  "promise_create", PromiseCreate,
-  "promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
-  "promise_reject", DoRejectPromise,
-  // TODO(gsathya): Remove this once we update the promise builtin.
-  "promise_internal_reject", RejectPromise,
-  "promise_resolve", ResolvePromise,
-  "promise_then", PromiseThen,
-  "promise_handle", PromiseHandle,
-  "promise_debug_get_info", PromiseDebugGetInfo
-]);
-
-// This allows extras to create promises quickly without building extra
-// resolve/reject closures, and allows them to later resolve and reject any
-// promise without having to hold on to those closures forever.
-utils.InstallFunctions(extrasUtils, 0, [
-  "createPromise", PromiseCreate,
-  "resolvePromise", ResolvePromise,
-  "rejectPromise", DoRejectPromise,
-  "markPromiseAsHandled", MarkPromiseAsHandled
-]);
-
-utils.Export(function(to) {
-  to.IsPromise = IsPromise;
-  to.PromiseCreate = PromiseCreate;
-  to.PromiseThen = PromiseThen;
-
-  to.GlobalPromise = GlobalPromise;
-  to.NewPromiseCapability = NewPromiseCapability;
-  to.PerformPromiseThen = PerformPromiseThen;
-  to.ResolvePromise = ResolvePromise;
-  to.RejectPromise = RejectPromise;
-});
-
 })
diff --git a/src/js/string.js b/src/js/string.js
index 3a9254c..5992f80 100644
--- a/src/js/string.js
+++ b/src/js/string.js
@@ -9,21 +9,9 @@
 // -------------------------------------------------------------------
 // Imports
 
-var ArrayJoin;
-var GlobalRegExp = global.RegExp;
 var GlobalString = global.String;
-var MaxSimple;
-var MinSimple;
 var matchSymbol = utils.ImportNow("match_symbol");
-var replaceSymbol = utils.ImportNow("replace_symbol");
 var searchSymbol = utils.ImportNow("search_symbol");
-var splitSymbol = utils.ImportNow("split_symbol");
-
-utils.Import(function(from) {
-  ArrayJoin = from.ArrayJoin;
-  MaxSimple = from.MaxSimple;
-  MinSimple = from.MinSimple;
-});
 
 //-------------------------------------------------------------------
 
@@ -58,154 +46,6 @@
   return regexp[matchSymbol](subject);
 }
 
-// ES#sec-getsubstitution
-// GetSubstitution(matched, str, position, captures, replacement)
-// Expand the $-expressions in the string and return a new string with
-// the result.
-function GetSubstitution(matched, string, position, captures, replacement) {
-  var matchLength = matched.length;
-  var stringLength = string.length;
-  var capturesLength = captures.length;
-  var tailPos = position + matchLength;
-  var result = "";
-  var pos, expansion, peek, next, scaledIndex, advance, newScaledIndex;
-
-  var next = %StringIndexOf(replacement, '$', 0);
-  if (next < 0) {
-    result += replacement;
-    return result;
-  }
-
-  if (next > 0) result += %_SubString(replacement, 0, next);
-
-  while (true) {
-    expansion = '$';
-    pos = next + 1;
-    if (pos < replacement.length) {
-      peek = %_StringCharCodeAt(replacement, pos);
-      if (peek == 36) {         // $$
-        ++pos;
-        result += '$';
-      } else if (peek == 38) {  // $& - match
-        ++pos;
-        result += matched;
-      } else if (peek == 96) {  // $` - prefix
-        ++pos;
-        result += %_SubString(string, 0, position);
-      } else if (peek == 39) {  // $' - suffix
-        ++pos;
-        result += %_SubString(string, tailPos, stringLength);
-      } else if (peek >= 48 && peek <= 57) {
-        // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
-        scaledIndex = (peek - 48);
-        advance = 1;
-        if (pos + 1 < replacement.length) {
-          next = %_StringCharCodeAt(replacement, pos + 1);
-          if (next >= 48 && next <= 57) {
-            newScaledIndex = scaledIndex * 10 + ((next - 48));
-            if (newScaledIndex < capturesLength) {
-              scaledIndex = newScaledIndex;
-              advance = 2;
-            }
-          }
-        }
-        if (scaledIndex != 0 && scaledIndex < capturesLength) {
-          var capture = captures.at(scaledIndex);
-          if (!IS_UNDEFINED(capture)) result += capture;
-          pos += advance;
-        } else {
-          result += '$';
-        }
-      } else {
-        result += '$';
-      }
-    } else {
-      result += '$';
-    }
-
-    // Go the the next $ in the replacement.
-    next = %StringIndexOf(replacement, '$', pos);
-
-    // Return if there are no more $ characters in the replacement. If we
-    // haven't reached the end, we need to append the suffix.
-    if (next < 0) {
-      if (pos < replacement.length) {
-        result += %_SubString(replacement, pos, replacement.length);
-      }
-      return result;
-    }
-
-    // Append substring between the previous and the next $ character.
-    if (next > pos) {
-      result += %_SubString(replacement, pos, next);
-    }
-  }
-  return result;
-}
-
-// ES6, section 21.1.3.14
-function StringReplace(search, replace) {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.replace");
-
-  // Decision tree for dispatch
-  // .. regexp search (in src/js/regexp.js, RegExpReplace)
-  // .... string replace
-  // ...... non-global search
-  // ........ empty string replace
-  // ........ non-empty string replace (with $-expansion)
-  // ...... global search
-  // ........ no need to circumvent last match info override
-  // ........ need to circument last match info override
-  // .... function replace
-  // ...... global search
-  // ...... non-global search
-  // .. string search
-  // .... special case that replaces with one single character
-  // ...... function replace
-  // ...... string replace (with $-expansion)
-
-  if (!IS_NULL_OR_UNDEFINED(search)) {
-    var replacer = search[replaceSymbol];
-    if (!IS_UNDEFINED(replacer)) {
-      return %_Call(replacer, search, this, replace);
-    }
-  }
-
-  var subject = TO_STRING(this);
-
-  search = TO_STRING(search);
-
-  if (search.length == 1 &&
-      subject.length > 0xFF &&
-      IS_STRING(replace) &&
-      %StringIndexOf(replace, '$', 0) < 0) {
-    // Searching by traversing a cons string tree and replace with cons of
-    // slices works only when the replaced string is a single character, being
-    // replaced by a simple string and only pays off for long strings.
-    return %StringReplaceOneCharWithString(subject, search, replace);
-  }
-  var start = %StringIndexOf(subject, search, 0);
-  if (start < 0) return subject;
-  var end = start + search.length;
-
-  var result = %_SubString(subject, 0, start);
-
-  // Compute the string to replace with.
-  if (IS_CALLABLE(replace)) {
-    result += replace(search, start, subject);
-  } else {
-    // In this case, we don't have any capture groups and can get away with
-    // faking the captures object by simply setting its length to 1.
-    const captures = { length: 1 };
-    const matched = %_SubString(subject, start, end);
-    result += GetSubstitution(matched, subject, start, captures,
-                              TO_STRING(replace));
-  }
-
-  return result + %_SubString(subject, end, subject.length);
-}
-
-
 // ES6 21.1.3.15.
 function StringSearch(pattern) {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.search");
@@ -267,73 +107,9 @@
 }
 
 
-// ES6 21.1.3.17.
-function StringSplitJS(separator, limit) {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.split");
-
-  if (!IS_NULL_OR_UNDEFINED(separator)) {
-    var splitter = separator[splitSymbol];
-    if (!IS_UNDEFINED(splitter)) {
-      return %_Call(splitter, separator, this, limit);
-    }
-  }
-
-  var subject = TO_STRING(this);
-  limit = (IS_UNDEFINED(limit)) ? kMaxUint32 : TO_UINT32(limit);
-
-  var length = subject.length;
-  var separator_string = TO_STRING(separator);
-
-  if (limit === 0) return [];
-
-  // ECMA-262 says that if separator is undefined, the result should
-  // be an array of size 1 containing the entire string.
-  if (IS_UNDEFINED(separator)) return [subject];
-
-  var separator_length = separator_string.length;
-
-  // If the separator string is empty then return the elements in the subject.
-  if (separator_length === 0) return %StringToArray(subject, limit);
-
-  return %StringSplit(subject, separator_string, limit);
-}
-
-
-// ECMA-262, 15.5.4.16
-function StringToLowerCaseJS() {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
-
-  return %StringToLowerCase(TO_STRING(this));
-}
-
-
-// ECMA-262, 15.5.4.17
-function StringToLocaleLowerCase() {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
-
-  return %StringToLowerCase(TO_STRING(this));
-}
-
-
-// ECMA-262, 15.5.4.18
-function StringToUpperCaseJS() {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
-
-  return %StringToUpperCase(TO_STRING(this));
-}
-
-
-// ECMA-262, 15.5.4.19
-function StringToLocaleUpperCase() {
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
-
-  return %StringToUpperCase(TO_STRING(this));
-}
-
-
 // ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
 function HtmlEscape(str) {
-  return %_Call(StringReplace, TO_STRING(str), /"/g, "&quot;");
+  return %RegExpInternalReplace(/"/g, TO_STRING(str), "&quot;");
 }
 
 
@@ -515,14 +291,8 @@
   "concat", StringConcat,
   "match", StringMatchJS,
   "repeat", StringRepeat,
-  "replace", StringReplace,
   "search", StringSearch,
   "slice", StringSlice,
-  "split", StringSplitJS,
-  "toLowerCase", StringToLowerCaseJS,
-  "toLocaleLowerCase", StringToLocaleLowerCase,
-  "toUpperCase", StringToUpperCaseJS,
-  "toLocaleUpperCase", StringToLocaleUpperCase,
 
   "link", StringLink,
   "anchor", StringAnchor,
@@ -539,14 +309,4 @@
   "sup", StringSup
 ]);
 
-// -------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
-  to.StringMatch = StringMatchJS;
-  to.StringReplace = StringReplace;
-  to.StringSlice = StringSlice;
-  to.StringSplit = StringSplitJS;
-});
-
 })
diff --git a/src/js/symbol.js b/src/js/symbol.js
deleted file mode 100644
index 4ec31ae..0000000
--- a/src/js/symbol.js
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalSymbol = global.Symbol;
-var hasInstanceSymbol = utils.ImportNow("has_instance_symbol");
-var isConcatSpreadableSymbol =
-    utils.ImportNow("is_concat_spreadable_symbol");
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var matchSymbol = utils.ImportNow("match_symbol");
-var replaceSymbol = utils.ImportNow("replace_symbol");
-var searchSymbol = utils.ImportNow("search_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
-var splitSymbol = utils.ImportNow("split_symbol");
-var toPrimitiveSymbol = utils.ImportNow("to_primitive_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
-
-// -------------------------------------------------------------------
-
-function SymbolFor(key) {
-  key = TO_STRING(key);
-  var registry = %SymbolRegistry();
-  if (IS_UNDEFINED(registry.for[key])) {
-    var symbol = %CreateSymbol(key);
-    registry.for[key] = symbol;
-    registry.keyFor[symbol] = key;
-  }
-  return registry.for[key];
-}
-
-
-function SymbolKeyFor(symbol) {
-  if (!IS_SYMBOL(symbol)) throw %make_type_error(kSymbolKeyFor, symbol);
-  return %SymbolRegistry().keyFor[symbol];
-}
-
-// -------------------------------------------------------------------
-
-utils.InstallConstants(GlobalSymbol, [
-  "hasInstance", hasInstanceSymbol,
-  "isConcatSpreadable", isConcatSpreadableSymbol,
-  "iterator", iteratorSymbol,
-  "match", matchSymbol,
-  "replace", replaceSymbol,
-  "search", searchSymbol,
-  "species", speciesSymbol,
-  "split", splitSymbol,
-  "toPrimitive", toPrimitiveSymbol,
-  "toStringTag", toStringTagSymbol,
-  "unscopables", unscopablesSymbol,
-]);
-
-utils.InstallFunctions(GlobalSymbol, DONT_ENUM, [
-  "for", SymbolFor,
-  "keyFor", SymbolKeyFor
-]);
-
-})
diff --git a/src/js/typedarray.js b/src/js/typedarray.js
index 7667e18..ef1c1d2 100644
--- a/src/js/typedarray.js
+++ b/src/js/typedarray.js
@@ -20,7 +20,6 @@
 var GlobalArrayBuffer = global.ArrayBuffer;
 var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
 var GlobalObject = global.Object;
-var InnerArrayCopyWithin;
 var InnerArrayEvery;
 var InnerArrayFill;
 var InnerArrayFilter;
@@ -41,7 +40,6 @@
 var ToPositiveInteger;
 var ToIndex;
 var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
 var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
 
 macro TYPED_ARRAYS(FUNCTION)
@@ -69,7 +67,6 @@
   ArrayValues = from.ArrayValues;
   GetIterator = from.GetIterator;
   GetMethod = from.GetMethod;
-  InnerArrayCopyWithin = from.InnerArrayCopyWithin;
   InnerArrayEvery = from.InnerArrayEvery;
   InnerArrayFill = from.InnerArrayFill;
   InnerArrayFilter = from.InnerArrayFilter;
@@ -164,8 +161,7 @@
     }
     newByteLength = bufferByteLength - offset;
     if (newByteLength < 0) {
-      throw %make_range_error(kInvalidTypedArrayAlignment,
-                           "byte length", "NAME", ELEMENT_SIZE);
+      throw %make_range_error(kInvalidOffset, offset);
     }
   } else {
     newByteLength = length * ELEMENT_SIZE;
@@ -260,7 +256,7 @@
       NAMEConstructByTypedArray(this, arg1);
     } else if (IS_RECEIVER(arg1)) {
       var iteratorFn = arg1[iteratorSymbol];
-      if (IS_UNDEFINED(iteratorFn) || iteratorFn === ArrayValues) {
+      if (IS_UNDEFINED(iteratorFn)) {
         NAMEConstructByArrayLike(this, arg1, arg1.length);
       } else {
         NAMEConstructByIterable(this, arg1, iteratorFn);
@@ -439,17 +435,6 @@
 }
 
 
-function TypedArrayCopyWithin(target, start, end) {
-  if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-
-  var length = %_TypedArrayGetLength(this);
-
-  // TODO(littledan): Replace with a memcpy for better performance
-  return InnerArrayCopyWithin(target, start, end, this, length);
-}
-%FunctionSetLength(TypedArrayCopyWithin, 2);
-
-
 // ES6 draft 05-05-15, section 22.2.3.7
 function TypedArrayEvery(f, receiver) {
   if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
@@ -532,25 +517,6 @@
   return PackedArrayReverse(this, length);
 }
 
-
-function TypedArrayComparefn(x, y) {
-  if (x === 0 && x === y) {
-    x = 1 / x;
-    y = 1 / y;
-  }
-  if (x < y) {
-    return -1;
-  } else if (x > y) {
-    return 1;
-  } else if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) {
-    return NUMBER_IS_NAN(y) ? 0 : 1;
-  } else if (NUMBER_IS_NAN(x)) {
-    return 1;
-  }
-  return 0;
-}
-
-
 // ES6 draft 05-18-15, section 22.2.3.25
 function TypedArraySort(comparefn) {
   if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
@@ -558,7 +524,7 @@
   var length = %_TypedArrayGetLength(this);
 
   if (IS_UNDEFINED(comparefn)) {
-    comparefn = TypedArrayComparefn;
+    return %TypedArraySortFast(this);
   }
 
   return InnerArraySort(this, length, comparefn);
@@ -847,10 +813,6 @@
   throw %make_type_error(kConstructAbstractClass, "TypedArray");
 }
 
-function TypedArraySpecies() {
-  return this;
-}
-
 // -------------------------------------------------------------------
 
 %SetCode(GlobalTypedArray, TypedArrayConstructor);
@@ -858,13 +820,11 @@
   "from", TypedArrayFrom,
   "of", TypedArrayOf
 ]);
-utils.InstallGetter(GlobalTypedArray, speciesSymbol, TypedArraySpecies);
 utils.InstallGetter(GlobalTypedArray.prototype, toStringTagSymbol,
                     TypedArrayGetToStringTag);
 utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
   "subarray", TypedArraySubArray,
   "set", TypedArraySet,
-  "copyWithin", TypedArrayCopyWithin,
   "every", TypedArrayEvery,
   "fill", TypedArrayFill,
   "filter", TypedArrayFilter,
diff --git a/src/json-parser.cc b/src/json-parser.cc
index 5e79b61..2ec79ca 100644
--- a/src/json-parser.cc
+++ b/src/json-parser.cc
@@ -399,8 +399,8 @@
                    ->NowContains(value)) {
             Handle<FieldType> value_type(
                 value->OptimalType(isolate(), expected_representation));
-            Map::GeneralizeFieldType(target, descriptor,
-                                     expected_representation, value_type);
+            Map::GeneralizeField(target, descriptor, details.constness(),
+                                 expected_representation, value_type);
           }
           DCHECK(target->instance_descriptors()
                      ->GetFieldType(descriptor)
@@ -478,11 +478,12 @@
   DCHECK(!json_object->map()->is_dictionary_map());
 
   DisallowHeapAllocation no_gc;
-
+  DescriptorArray* descriptors = json_object->map()->instance_descriptors();
   int length = properties->length();
   for (int i = 0; i < length; i++) {
     Handle<Object> value = (*properties)[i];
-    json_object->WriteToField(i, *value);
+    // Initializing store.
+    json_object->WriteToField(i, descriptors->GetDetails(i), *value);
   }
 }
 
diff --git a/src/json-stringifier.cc b/src/json-stringifier.cc
index 29685c2..a187fb5 100644
--- a/src/json-stringifier.cc
+++ b/src/json-stringifier.cc
@@ -323,7 +323,6 @@
     case JS_VALUE_TYPE:
       if (deferred_string_key) SerializeDeferredKey(comma, key);
       return SerializeJSValue(Handle<JSValue>::cast(object));
-    case SIMD128_VALUE_TYPE:
     case SYMBOL_TYPE:
       return UNCHANGED;
     default:
@@ -534,7 +533,8 @@
       PropertyDetails details = map->instance_descriptors()->GetDetails(i);
       if (details.IsDontEnum()) continue;
       Handle<Object> property;
-      if (details.type() == DATA && *map == js_obj->map()) {
+      if (details.location() == kField && *map == js_obj->map()) {
+        DCHECK_EQ(kData, details.kind());
         FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
         property = JSObject::FastPropertyAt(js_obj, details.representation(),
                                             field_index);
diff --git a/src/keys.cc b/src/keys.cc
index 9b6c8f3..af3d393 100644
--- a/src/keys.cc
+++ b/src/keys.cc
@@ -227,7 +227,7 @@
   map->SetEnumLength(0);
 }
 
-bool CheckAndInitalizeSimpleEnumCache(JSReceiver* object) {
+bool CheckAndInitalizeEmptyEnumCache(JSReceiver* object) {
   if (object->map()->EnumLength() == kInvalidEnumCacheSentinel) {
     TrySettingEmptyEnumCache(object);
   }
@@ -248,7 +248,7 @@
   for (PrototypeIterator iter(isolate_, *receiver_); !iter.IsAtEnd();
        iter.Advance()) {
     JSReceiver* current = iter.GetCurrent<JSReceiver>();
-    bool has_no_properties = CheckAndInitalizeSimpleEnumCache(current);
+    bool has_no_properties = CheckAndInitalizeEmptyEnumCache(current);
     if (has_no_properties) continue;
     last_prototype = current;
     has_empty_prototype_ = false;
@@ -271,6 +271,8 @@
   return isolate->factory()->CopyFixedArrayUpTo(array, length);
 }
 
+// Initializes and directly returns the enume cache. Users of this function
+// have to make sure to never directly leak the enum cache.
 Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
                                            Handle<JSObject> object) {
   Handle<Map> map(object->map());
@@ -328,12 +330,13 @@
     if (key->IsSymbol()) continue;
     storage->set(index, key);
     if (!indices.is_null()) {
-      if (details.type() != DATA) {
-        indices = Handle<FixedArray>();
-      } else {
+      if (details.location() == kField) {
+        DCHECK_EQ(kData, details.kind());
         FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
         int load_by_field_index = field_index.GetLoadByFieldIndex();
         indices->set(index, Smi::FromInt(load_by_field_index));
+      } else {
+        indices = Handle<FixedArray>();
       }
     }
     index++;
@@ -369,25 +372,6 @@
   return result;
 }
 
-MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache(
-    Isolate* isolate, Handle<JSObject> object) {
-  // Uninitalized enum cache
-  Map* map = object->map();
-  if (object->elements() != isolate->heap()->empty_fixed_array() ||
-      object->elements() != isolate->heap()->empty_slow_element_dictionary()) {
-    // Assume that there are elements.
-    return MaybeHandle<FixedArray>();
-  }
-  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
-  if (number_of_own_descriptors == 0) {
-    map->SetEnumLength(0);
-    return isolate->factory()->empty_fixed_array();
-  }
-  // We have no elements but possibly enumerable property keys, hence we can
-  // directly initialize the enum cache.
-  return GetFastEnumPropertyKeys(isolate, object);
-}
-
 bool OnlyHasSimpleProperties(Map* map) {
   return map->instance_type() > LAST_CUSTOM_ELEMENTS_RECEIVER;
 }
@@ -427,8 +411,7 @@
   if (enum_length == kInvalidEnumCacheSentinel) {
     Handle<FixedArray> keys;
     // Try initializing the enum cache and return own properties.
-    if (GetOwnKeysWithUninitializedEnumCache(isolate_, object)
-            .ToHandle(&keys)) {
+    if (GetOwnKeysWithUninitializedEnumCache().ToHandle(&keys)) {
       if (FLAG_trace_for_in_enumerate) {
         PrintF("| strings=%d symbols=0 elements=0 || prototypes>=1 ||\n",
                keys->length());
@@ -443,6 +426,28 @@
   return GetOwnKeysWithElements<true>(isolate_, object, keys_conversion);
 }
 
+MaybeHandle<FixedArray>
+FastKeyAccumulator::GetOwnKeysWithUninitializedEnumCache() {
+  Handle<JSObject> object = Handle<JSObject>::cast(receiver_);
+  // Uninitalized enum cache
+  Map* map = object->map();
+  if (object->elements()->length() != 0) {
+    // Assume that there are elements.
+    return MaybeHandle<FixedArray>();
+  }
+  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+  if (number_of_own_descriptors == 0) {
+    map->SetEnumLength(0);
+    return isolate_->factory()->empty_fixed_array();
+  }
+  // We have no elements but possibly enumerable property keys, hence we can
+  // directly initialize the enum cache.
+  Handle<FixedArray> keys = GetFastEnumPropertyKeys(isolate_, object);
+  if (is_for_in_) return keys;
+  // Do not leak the enum cache as it might end up as an elements backing store.
+  return isolate_->factory()->CopyFixedArray(keys);
+}
+
 MaybeHandle<FixedArray> FastKeyAccumulator::GetKeysSlow(
     GetKeysConversion keys_conversion) {
   KeyAccumulator accumulator(isolate_, mode_, filter_);
@@ -797,7 +802,8 @@
   Zone set_zone(isolate_->allocator(), ZONE_NAME);
   const int kPresent = 1;
   const int kGone = 0;
-  IdentityMap<int> unchecked_result_keys(isolate_->heap(), &set_zone);
+  IdentityMap<int, ZoneAllocationPolicy> unchecked_result_keys(
+      isolate_->heap(), ZoneAllocationPolicy(&set_zone));
   int unchecked_result_keys_size = 0;
   for (int i = 0; i < trap_result->length(); ++i) {
     DCHECK(trap_result->get(i)->IsUniqueName());
diff --git a/src/keys.h b/src/keys.h
index 63b8b26..c5ac93c 100644
--- a/src/keys.h
+++ b/src/keys.h
@@ -53,9 +53,10 @@
       Handle<AccessCheckInfo> access_check_info, Handle<JSReceiver> receiver,
       Handle<JSObject> object);
 
+  // Might return directly the object's enum_cache, copy the result before using
+  // as an elements backing store for a JSObject.
   static Handle<FixedArray> GetOwnEnumPropertyKeys(Isolate* isolate,
                                                    Handle<JSObject> object);
-
   void AddKey(Object* key, AddKeyConversion convert = DO_NOT_CONVERT);
   void AddKey(Handle<Object> key, AddKeyConversion convert = DO_NOT_CONVERT);
   void AddKeys(Handle<FixedArray> array, AddKeyConversion convert);
@@ -140,6 +141,8 @@
   MaybeHandle<FixedArray> GetKeysFast(GetKeysConversion convert);
   MaybeHandle<FixedArray> GetKeysSlow(GetKeysConversion convert);
 
+  MaybeHandle<FixedArray> GetOwnKeysWithUninitializedEnumCache();
+
   Isolate* isolate_;
   Handle<JSReceiver> receiver_;
   Handle<JSReceiver> last_non_empty_prototype_;
diff --git a/src/label.h b/src/label.h
new file mode 100644
index 0000000..e77a2af
--- /dev/null
+++ b/src/label.h
@@ -0,0 +1,92 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LABEL_H_
+#define V8_LABEL_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Labels represent pc locations; they are typically jump or call targets.
+// After declaration, a label can be freely used to denote known or (yet)
+// unknown pc location. Assembler::bind() is used to bind a label to the
+// current pc. A label can be bound only once.
+
+class Label {
+ public:
+  enum Distance { kNear, kFar };
+
+  INLINE(Label()) {
+    Unuse();
+    UnuseNear();
+  }
+
+  INLINE(~Label()) {
+    DCHECK(!is_linked());
+    DCHECK(!is_near_linked());
+  }
+
+  INLINE(void Unuse()) { pos_ = 0; }
+  INLINE(void UnuseNear()) { near_link_pos_ = 0; }
+
+  INLINE(bool is_bound() const) { return pos_ < 0; }
+  INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
+  INLINE(bool is_linked() const) { return pos_ > 0; }
+  INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
+
+  // Returns the position of bound or linked labels. Cannot be used
+  // for unused labels.
+  int pos() const {
+    if (pos_ < 0) return -pos_ - 1;
+    if (pos_ > 0) return pos_ - 1;
+    UNREACHABLE();
+    return 0;
+  }
+
+  int near_link_pos() const { return near_link_pos_ - 1; }
+
+ private:
+  // pos_ encodes both the binding state (via its sign)
+  // and the binding position (via its value) of a label.
+  //
+  // pos_ <  0  bound label, pos() returns the jump target position
+  // pos_ == 0  unused label
+  // pos_ >  0  linked label, pos() returns the last reference position
+  int pos_;
+
+  // Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
+  int near_link_pos_;
+
+  void bind_to(int pos) {
+    pos_ = -pos - 1;
+    DCHECK(is_bound());
+  }
+  void link_to(int pos, Distance distance = kFar) {
+    if (distance == kNear) {
+      near_link_pos_ = pos + 1;
+      DCHECK(is_near_linked());
+    } else {
+      pos_ = pos + 1;
+      DCHECK(is_linked());
+    }
+  }
+
+  friend class Assembler;
+  friend class Displacement;
+  friend class RegExpMacroAssemblerIrregexp;
+
+#if V8_TARGET_ARCH_ARM64
+  // On ARM64, the Assembler keeps track of pointers to Labels to resolve
+  // branches to distant targets. Copying labels would confuse the Assembler.
+  DISALLOW_COPY_AND_ASSIGN(Label);  // NOLINT
+#endif
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_LABEL_H_
diff --git a/src/layout-descriptor-inl.h b/src/layout-descriptor-inl.h
index bade05e..4f193b3 100644
--- a/src/layout-descriptor-inl.h
+++ b/src/layout-descriptor-inl.h
@@ -28,7 +28,7 @@
 
 bool LayoutDescriptor::InobjectUnboxedField(int inobject_properties,
                                             PropertyDetails details) {
-  if (details.type() != DATA || !details.representation().IsDouble()) {
+  if (details.location() != kField || !details.representation().IsDouble()) {
     return false;
   }
   // We care only about in-object properties.
diff --git a/src/layout-descriptor.cc b/src/layout-descriptor.cc
index 11a72e7..001bfe0 100644
--- a/src/layout-descriptor.cc
+++ b/src/layout-descriptor.cc
@@ -8,6 +8,7 @@
 
 #include "src/base/bits.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 
 using v8::base::bits::CountTrailingZeros32;
 
@@ -245,7 +246,7 @@
   if (current_length != array_length) {
     DCHECK_LT(array_length, current_length);
     int delta = current_length - array_length;
-    heap->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(this, delta);
+    heap->RightTrimFixedArray(this, delta);
   }
   memset(DataPtr(), 0, DataSize());
   LayoutDescriptor* layout_descriptor =
diff --git a/src/layout-descriptor.h b/src/layout-descriptor.h
index 5a80e73..b75536a 100644
--- a/src/layout-descriptor.h
+++ b/src/layout-descriptor.h
@@ -83,6 +83,7 @@
   // For our gdb macros, we should perhaps change these in the future.
   void Print();
 
+  void ShortPrint(std::ostream& os);
   void Print(std::ostream& os);  // NOLINT
 #endif
 
diff --git a/src/libplatform/default-platform.cc b/src/libplatform/default-platform.cc
index 866a447..0e2144b 100644
--- a/src/libplatform/default-platform.cc
+++ b/src/libplatform/default-platform.cc
@@ -30,6 +30,12 @@
   return reinterpret_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate);
 }
 
+void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate,
+                  double idle_time_in_seconds) {
+  reinterpret_cast<DefaultPlatform*>(platform)->RunIdleTasks(
+      isolate, idle_time_in_seconds);
+}
+
 void SetTracingController(
     v8::Platform* platform,
     v8::platform::tracing::TracingController* tracing_controller) {
@@ -69,6 +75,12 @@
       i->second.pop();
     }
   }
+  for (auto& i : main_thread_idle_queue_) {
+    while (!i.second.empty()) {
+      delete i.second.front();
+      i.second.pop();
+    }
+  }
 }
 
 
@@ -118,6 +130,15 @@
   return deadline_and_task.second;
 }
 
+IdleTask* DefaultPlatform::PopTaskInMainThreadIdleQueue(v8::Isolate* isolate) {
+  auto it = main_thread_idle_queue_.find(isolate);
+  if (it == main_thread_idle_queue_.end() || it->second.empty()) {
+    return nullptr;
+  }
+  IdleTask* task = it->second.front();
+  it->second.pop();
+  return task;
+}
 
 bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate) {
   Task* task = NULL;
@@ -142,8 +163,25 @@
   return true;
 }
 
+void DefaultPlatform::RunIdleTasks(v8::Isolate* isolate,
+                                   double idle_time_in_seconds) {
+  double deadline_in_seconds =
+      MonotonicallyIncreasingTime() + idle_time_in_seconds;
+  while (deadline_in_seconds > MonotonicallyIncreasingTime()) {
+    {
+      IdleTask* task;
+      {
+        base::LockGuard<base::Mutex> guard(&lock_);
+        task = PopTaskInMainThreadIdleQueue(isolate);
+      }
+      if (task == nullptr) return;
+      task->Run(deadline_in_seconds);
+      delete task;
+    }
+  }
+}
 
-void DefaultPlatform::CallOnBackgroundThread(Task *task,
+void DefaultPlatform::CallOnBackgroundThread(Task* task,
                                              ExpectedRuntime expected_runtime) {
   EnsureInitialized();
   queue_.Append(task);
@@ -164,15 +202,13 @@
   main_thread_delayed_queue_[isolate].push(std::make_pair(deadline, task));
 }
 
-
 void DefaultPlatform::CallIdleOnForegroundThread(Isolate* isolate,
                                                  IdleTask* task) {
-  UNREACHABLE();
+  base::LockGuard<base::Mutex> guard(&lock_);
+  main_thread_idle_queue_[isolate].push(task);
 }
 
-
-bool DefaultPlatform::IdleTasksEnabled(Isolate* isolate) { return false; }
-
+bool DefaultPlatform::IdleTasksEnabled(Isolate* isolate) { return true; }
 
 double DefaultPlatform::MonotonicallyIncreasingTime() {
   return base::TimeTicks::HighResolutionNow().ToInternalValue() /
diff --git a/src/libplatform/default-platform.h b/src/libplatform/default-platform.h
index 4b52c28..0ab8e33 100644
--- a/src/libplatform/default-platform.h
+++ b/src/libplatform/default-platform.h
@@ -41,6 +41,8 @@
 
   bool PumpMessageLoop(v8::Isolate* isolate);
 
+  void RunIdleTasks(v8::Isolate* isolate, double idle_time_in_seconds);
+
   // v8::Platform implementation.
   size_t NumberOfAvailableBackgroundThreads() override;
   void CallOnBackgroundThread(Task* task,
@@ -74,13 +76,15 @@
 
   Task* PopTaskInMainThreadQueue(v8::Isolate* isolate);
   Task* PopTaskInMainThreadDelayedQueue(v8::Isolate* isolate);
+  IdleTask* PopTaskInMainThreadIdleQueue(v8::Isolate* isolate);
 
   base::Mutex lock_;
   bool initialized_;
   int thread_pool_size_;
   std::vector<WorkerThread*> thread_pool_;
   TaskQueue queue_;
-  std::map<v8::Isolate*, std::queue<Task*> > main_thread_queue_;
+  std::map<v8::Isolate*, std::queue<Task*>> main_thread_queue_;
+  std::map<v8::Isolate*, std::queue<IdleTask*>> main_thread_idle_queue_;
 
   typedef std::pair<double, Task*> DelayedEntry;
   std::map<v8::Isolate*,
diff --git a/src/libplatform/tracing/trace-config.cc b/src/libplatform/tracing/trace-config.cc
index e77d191..ff90eff 100644
--- a/src/libplatform/tracing/trace-config.cc
+++ b/src/libplatform/tracing/trace-config.cc
@@ -21,8 +21,13 @@
 }
 
 bool TraceConfig::IsCategoryGroupEnabled(const char* category_group) const {
-  for (auto included_category : included_categories_) {
-    if (strcmp(included_category.data(), category_group) == 0) return true;
+  std::stringstream category_stream(category_group);
+  while (category_stream.good()) {
+    std::string category;
+    getline(category_stream, category, ',');
+    for (const auto& included_category : included_categories_) {
+      if (category == included_category) return true;
+    }
   }
   return false;
 }
diff --git a/src/list-inl.h b/src/list-inl.h
index 9a2d11f..5ef6d6e 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -9,6 +9,7 @@
 
 #include "src/base/macros.h"
 #include "src/base/platform/platform.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -34,7 +35,7 @@
 void List<T, P>::AddAll(const Vector<T>& other, P alloc) {
   int result_length = length_ + other.length();
   if (capacity_ < result_length) Resize(result_length, alloc);
-  if (base::is_fundamental<T>()) {
+  if (std::is_fundamental<T>()) {
     memcpy(data_ + length_, other.start(), sizeof(*data_) * other.length());
   } else {
     for (int i = 0; i < other.length(); i++) data_[length_ + i] = other.at(i);
diff --git a/src/list.h b/src/list.h
index 0492865..b59ece4 100644
--- a/src/list.h
+++ b/src/list.h
@@ -8,7 +8,7 @@
 #include <algorithm>
 
 #include "src/checks.h"
-#include "src/utils.h"
+#include "src/vector.h"
 
 namespace v8 {
 namespace internal {
@@ -64,8 +64,8 @@
   // not safe to use after operations that can change the list's
   // backing store (e.g. Add).
   inline T& operator[](int i) const {
-    DCHECK(0 <= i);
-    SLOW_DCHECK(static_cast<unsigned>(i) < static_cast<unsigned>(length_));
+    DCHECK_LE(0, i);
+    DCHECK_GT(static_cast<unsigned>(length_), static_cast<unsigned>(i));
     return data_[i];
   }
   inline T& at(int i) const { return operator[](i); }
diff --git a/src/log-utils.h b/src/log-utils.h
index b165b3e..69fa6f9 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -32,7 +32,7 @@
     return FLAG_log || FLAG_log_api || FLAG_log_code || FLAG_log_gc ||
            FLAG_log_handles || FLAG_log_suspect || FLAG_ll_prof ||
            FLAG_perf_basic_prof || FLAG_perf_prof ||
-           FLAG_log_internal_timer_events || FLAG_prof_cpp;
+           FLAG_log_internal_timer_events || FLAG_prof_cpp || FLAG_trace_ic;
   }
 
   // Frees all resources acquired in Initialize and Open... functions.
@@ -110,9 +110,9 @@
 
   // Implementation of writing to a log file.
   int WriteToFile(const char* msg, int length) {
-    DCHECK(output_handle_ != NULL);
+    DCHECK_NOT_NULL(output_handle_);
     size_t rv = fwrite(msg, 1, length, output_handle_);
-    DCHECK(static_cast<size_t>(length) == rv);
+    DCHECK_EQ(length, rv);
     USE(rv);
     fflush(output_handle_);
     return length;
diff --git a/src/log.cc b/src/log.cc
index bc52d05..8994147 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1249,28 +1249,6 @@
 }
 
 
-void Logger::DebugTag(const char* call_site_tag) {
-  if (!log_->IsEnabled() || !FLAG_log) return;
-  Log::MessageBuilder msg(log_);
-  msg.Append("debug-tag,%s", call_site_tag);
-  msg.WriteToLogFile();
-}
-
-
-void Logger::DebugEvent(const char* event_type, Vector<uint16_t> parameter) {
-  if (!log_->IsEnabled() || !FLAG_log) return;
-  StringBuilder s(parameter.length() + 1);
-  for (int i = 0; i < parameter.length(); ++i) {
-    s.AddCharacter(static_cast<char>(parameter[i]));
-  }
-  char* parameter_string = s.Finalize();
-  Log::MessageBuilder msg(log_);
-  msg.Append("debug-queue-event,%s,%15.3f,%s", event_type,
-             base::OS::TimeCurrentMillis(), parameter_string);
-  DeleteArray(parameter_string);
-  msg.WriteToLogFile();
-}
-
 void Logger::RuntimeCallTimerEvent() {
   RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats();
   RuntimeCallTimer* timer = stats->current_timer();
@@ -1279,7 +1257,7 @@
   if (counter == nullptr) return;
   Log::MessageBuilder msg(log_);
   msg.Append("active-runtime-timer,");
-  msg.AppendDoubleQuotedString(counter->name);
+  msg.AppendDoubleQuotedString(counter->name());
   msg.WriteToLogFile();
 }
 
@@ -1312,6 +1290,93 @@
   msg.WriteToLogFile();
 }
 
+void Logger::ICEvent(const char* type, bool keyed, const Address pc, int line,
+                     int column, Map* map, Object* key, char old_state,
+                     char new_state, const char* modifier,
+                     const char* slow_stub_reason) {
+  if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+  Log::MessageBuilder msg(log_);
+  if (keyed) msg.Append("Keyed");
+  msg.Append("%s,", type);
+  msg.AppendAddress(pc);
+  msg.Append(",%d,%d,", line, column);
+  msg.Append(old_state);
+  msg.Append(",");
+  msg.Append(new_state);
+  msg.Append(",");
+  msg.AppendAddress(reinterpret_cast<Address>(map));
+  msg.Append(",");
+  if (key->IsSmi()) {
+    msg.Append("%d", Smi::cast(key)->value());
+  } else if (key->IsNumber()) {
+    msg.Append("%lf", key->Number());
+  } else if (key->IsString()) {
+    msg.AppendDetailed(String::cast(key), false);
+  } else if (key->IsSymbol()) {
+    msg.AppendSymbolName(Symbol::cast(key));
+  }
+  msg.Append(",%s,", modifier);
+  if (slow_stub_reason != nullptr) {
+    msg.AppendDoubleQuotedString(slow_stub_reason);
+  }
+  msg.WriteToLogFile();
+}
+
+void Logger::CompareIC(const Address pc, int line, int column, Code* stub,
+                       const char* op, const char* old_left,
+                       const char* old_right, const char* old_state,
+                       const char* new_left, const char* new_right,
+                       const char* new_state) {
+  if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+  Log::MessageBuilder msg(log_);
+  msg.Append("CompareIC,");
+  msg.AppendAddress(pc);
+  msg.Append(",%d,%d,", line, column);
+  msg.AppendAddress(reinterpret_cast<Address>(stub));
+  msg.Append(",%s,%s,%s,%s,%s,%s,%s", op, old_left, old_right, old_state,
+             new_left, new_right, new_state);
+  msg.WriteToLogFile();
+}
+
+void Logger::BinaryOpIC(const Address pc, int line, int column, Code* stub,
+                        const char* old_state, const char* new_state,
+                        AllocationSite* allocation_site) {
+  if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+  Log::MessageBuilder msg(log_);
+  msg.Append("BinaryOpIC,");
+  msg.AppendAddress(pc);
+  msg.Append(",%d,%d,", line, column);
+  msg.AppendAddress(reinterpret_cast<Address>(stub));
+  msg.Append(",%s,%s,", old_state, new_state);
+  if (allocation_site != nullptr) {
+    msg.AppendAddress(reinterpret_cast<Address>(allocation_site));
+  }
+  msg.WriteToLogFile();
+}
+
+void Logger::ToBooleanIC(const Address pc, int line, int column, Code* stub,
+                         const char* old_state, const char* new_state) {
+  if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+  Log::MessageBuilder msg(log_);
+  msg.Append("ToBooleanIC,");
+  msg.AppendAddress(pc);
+  msg.Append(",%d,%d,", line, column);
+  msg.AppendAddress(reinterpret_cast<Address>(stub));
+  msg.Append(",%s,%s,", old_state, new_state);
+  msg.WriteToLogFile();
+}
+
+void Logger::PatchIC(const Address pc, const Address test, int delta) {
+  if (!log_->IsEnabled() || !FLAG_trace_ic) return;
+  Log::MessageBuilder msg(log_);
+  msg.Append("PatchIC,");
+  msg.AppendAddress(pc);
+  msg.Append(",");
+  msg.AppendAddress(test);
+  msg.Append(",");
+  msg.Append("%d,", delta);
+  msg.WriteToLogFile();
+}
 
 void Logger::StopProfiler() {
   if (!log_->IsEnabled()) return;
@@ -1329,6 +1394,17 @@
   StopProfiler();
 }
 
+static void AddFunctionAndCode(SharedFunctionInfo* sfi,
+                               AbstractCode* code_object,
+                               Handle<SharedFunctionInfo>* sfis,
+                               Handle<AbstractCode>* code_objects, int offset) {
+  if (sfis != NULL) {
+    sfis[offset] = Handle<SharedFunctionInfo>(sfi);
+  }
+  if (code_objects != NULL) {
+    code_objects[offset] = Handle<AbstractCode>(code_object);
+  }
+}
 
 class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
  public:
@@ -1345,14 +1421,11 @@
     Object* maybe_script = sfi->script();
     if (maybe_script->IsScript()
         && !Script::cast(maybe_script)->HasValidSource()) return;
-    if (sfis_ != NULL) {
-      sfis_[*count_] = Handle<SharedFunctionInfo>(sfi);
-    }
-    if (code_objects_ != NULL) {
-      DCHECK(function->abstract_code()->kind() ==
-             AbstractCode::OPTIMIZED_FUNCTION);
-      code_objects_[*count_] = Handle<AbstractCode>(function->abstract_code());
-    }
+
+    DCHECK(function->abstract_code()->kind() ==
+           AbstractCode::OPTIMIZED_FUNCTION);
+    AddFunctionAndCode(sfi, function->abstract_code(), sfis_, code_objects_,
+                       *count_);
     *count_ = *count_ + 1;
   }
 
@@ -1377,14 +1450,19 @@
     if (sfi->is_compiled()
         && (!sfi->script()->IsScript()
             || Script::cast(sfi->script())->HasValidSource())) {
-      if (sfis != NULL) {
-        sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
+      // In some cases, an SFI might have (and have executing!) both bytecode
+      // and baseline code, so check for both and add them both if needed.
+      if (sfi->HasBytecodeArray()) {
+        AddFunctionAndCode(sfi, AbstractCode::cast(sfi->bytecode_array()), sfis,
+                           code_objects, compiled_funcs_count);
+        ++compiled_funcs_count;
       }
-      if (code_objects != NULL) {
-        code_objects[compiled_funcs_count] =
-            Handle<AbstractCode>(sfi->abstract_code());
+
+      if (!sfi->IsInterpreted()) {
+        AddFunctionAndCode(sfi, AbstractCode::cast(sfi->code()), sfis,
+                           code_objects, compiled_funcs_count);
+        ++compiled_funcs_count;
       }
-      ++compiled_funcs_count;
     }
   }
 
@@ -1445,10 +1523,6 @@
       description = "A load global IC from the snapshot";
       tag = Logger::LOAD_GLOBAL_IC_TAG;
       break;
-    case AbstractCode::CALL_IC:
-      description = "A call IC from the snapshot";
-      tag = CodeEventListener::CALL_IC_TAG;
-      break;
     case AbstractCode::STORE_IC:
       description = "A store IC from the snapshot";
       tag = CodeEventListener::STORE_IC_TAG;
@@ -1469,6 +1543,10 @@
       description = "A Wasm to JavaScript adapter";
       tag = CodeEventListener::STUB_TAG;
       break;
+    case AbstractCode::WASM_INTERPRETER_ENTRY:
+      description = "A Wasm to Interpreter adapter";
+      tag = CodeEventListener::STUB_TAG;
+      break;
     case AbstractCode::NUMBER_OF_KINDS:
       UNIMPLEMENTED();
   }
@@ -1512,7 +1590,6 @@
 
 void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
                                  Handle<AbstractCode> code) {
-  Handle<String> func_name(shared->DebugName());
   if (shared->script()->IsScript()) {
     Handle<Script> script(Script::cast(shared->script()));
     int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
@@ -1551,11 +1628,12 @@
 #if USES_FUNCTION_DESCRIPTORS
       entry_point = *FUNCTION_ENTRYPOINT_ADDRESS(entry_point);
 #endif
-      PROFILE(isolate_, CallbackEvent(*func_name, entry_point));
+      PROFILE(isolate_, CallbackEvent(shared->DebugName(), entry_point));
     }
   } else {
-    PROFILE(isolate_, CodeCreateEvent(CodeEventListener::LAZY_COMPILE_TAG,
-                                      *code, *shared, *func_name));
+    PROFILE(isolate_,
+            CodeCreateEvent(CodeEventListener::LAZY_COMPILE_TAG, *code, *shared,
+                            isolate_->heap()->empty_string()));
   }
 }
 
diff --git a/src/log.h b/src/log.h
index b7a5fc6..6fcb257 100644
--- a/src/log.h
+++ b/src/log.h
@@ -136,12 +136,6 @@
   // object.
   void SuspectReadEvent(Name* name, Object* obj);
 
-  // Emits an event when a message is put on or read from a debugging queue.
-  // DebugTag lets us put a call-site specific label on the event.
-  void DebugTag(const char* call_site_tag);
-  void DebugEvent(const char* event_type, Vector<uint16_t> parameter);
-
-
   // ==== Events logged by --log-api. ====
   void ApiSecurityCheck();
   void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
@@ -189,6 +183,21 @@
 
   void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
 
+  void ICEvent(const char* type, bool keyed, const Address pc, int line,
+               int column, Map* map, Object* key, char old_state,
+               char new_state, const char* modifier,
+               const char* slow_stub_reason);
+  void CompareIC(const Address pc, int line, int column, Code* stub,
+                 const char* op, const char* old_left, const char* old_right,
+                 const char* old_state, const char* new_left,
+                 const char* new_right, const char* new_state);
+  void BinaryOpIC(const Address pc, int line, int column, Code* stub,
+                  const char* old_state, const char* new_state,
+                  AllocationSite* allocation_site);
+  void ToBooleanIC(const Address pc, int line, int column, Code* stub,
+                   const char* old_state, const char* new_state);
+  void PatchIC(const Address pc, const Address test, int delta);
+
   // ==== Events logged by --log-gc. ====
   // Heap sampling events: start, end, and individual types.
   void HeapSampleBeginEvent(const char* space, const char* kind);
diff --git a/src/lookup.cc b/src/lookup.cc
index 186823d..6f50b24 100644
--- a/src/lookup.cc
+++ b/src/lookup.cc
@@ -73,7 +73,7 @@
   JSReceiver* holder = *holder_;
   Map* map = holder->map();
 
-  if (map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+  if (map->IsSpecialReceiverMap()) {
     state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
                          : LookupInSpecialHolder<false>(map, holder);
     if (IsFound()) return;
@@ -191,9 +191,6 @@
   } else if (*name_ == heap()->is_concat_spreadable_symbol()) {
     if (!isolate_->IsIsConcatSpreadableLookupChainIntact()) return;
     isolate_->InvalidateIsConcatSpreadableProtector();
-  } else if (*name_ == heap()->has_instance_symbol()) {
-    if (!isolate_->IsHasInstanceLookupChainIntact()) return;
-    isolate_->InvalidateHasInstanceProtector();
   } else if (*name_ == heap()->iterator_symbol()) {
     if (!isolate_->IsArrayIteratorLookupChainIntact()) return;
     if (holder_->IsJSArray()) {
@@ -237,9 +234,21 @@
   }
   if (!holder->HasFastProperties()) return;
 
+  PropertyConstness new_constness = kConst;
+  if (FLAG_track_constant_fields) {
+    if (constness() == kConst) {
+      DCHECK_EQ(kData, property_details_.kind());
+      // Check that current value matches new value otherwise we should make
+      // the property mutable.
+      if (!IsConstFieldValueEqualTo(*value)) new_constness = kMutable;
+    }
+  } else {
+    new_constness = kMutable;
+  }
+
   Handle<Map> old_map(holder->map(), isolate_);
-  Handle<Map> new_map =
-      Map::PrepareForDataProperty(old_map, descriptor_number(), value);
+  Handle<Map> new_map = Map::PrepareForDataProperty(
+      old_map, descriptor_number(), new_constness, value);
 
   if (old_map.is_identical_to(new_map)) {
     // Update the property details if the representation was None.
@@ -271,12 +280,14 @@
     Handle<Map> old_map(holder->map(), isolate_);
     Handle<Map> new_map = Map::ReconfigureExistingProperty(
         old_map, descriptor_number(), i::kData, attributes);
-    new_map = Map::PrepareForDataProperty(new_map, descriptor_number(), value);
+    // Force mutable to avoid changing constant value by reconfiguring
+    // kData -> kAccessor -> kData.
+    new_map = Map::PrepareForDataProperty(new_map, descriptor_number(),
+                                          kMutable, value);
     JSObject::MigrateToMap(holder, new_map);
     ReloadPropertyInformation<false>();
   } else {
-    PropertyDetails details(attributes, v8::internal::DATA, 0,
-                            PropertyCellType::kMutable);
+    PropertyDetails details(kData, attributes, 0, PropertyCellType::kMutable);
     if (holder->IsJSGlobalObject()) {
       Handle<GlobalDictionary> dictionary(holder->global_dictionary());
 
@@ -297,7 +308,7 @@
     state_ = DATA;
   }
 
-  WriteDataValue(value);
+  WriteDataValue(value, true);
 
 #if VERIFY_HEAP
   if (FLAG_verify_heap) {
@@ -344,7 +355,7 @@
       // SetNextEnumerationIndex.
       int index = dictionary->NextEnumerationIndex();
       dictionary->SetNextEnumerationIndex(index + 1);
-      property_details_ = PropertyDetails(attributes, i::DATA, index,
+      property_details_ = PropertyDetails(kData, attributes, index,
                                           PropertyCellType::kUninitialized);
       PropertyCellType new_type =
           PropertyCell::UpdatedType(cell, value, property_details_);
@@ -355,21 +366,21 @@
     } else {
       // Don't set enumeration index (it will be set during value store).
       property_details_ =
-          PropertyDetails(attributes, i::DATA, 0, PropertyCellType::kNoCell);
+          PropertyDetails(kData, attributes, 0, PropertyCellType::kNoCell);
       transition_ = map;
     }
     return;
   }
 
-  Handle<Map> transition =
-      Map::TransitionToDataProperty(map, name_, value, attributes, store_mode);
+  Handle<Map> transition = Map::TransitionToDataProperty(
+      map, name_, value, attributes, kDefaultFieldConstness, store_mode);
   state_ = TRANSITION;
   transition_ = transition;
 
   if (transition->is_dictionary_map()) {
     // Don't set enumeration index (it will be set during value store).
     property_details_ =
-        PropertyDetails(attributes, i::DATA, 0, PropertyCellType::kNoCell);
+        PropertyDetails(kData, attributes, 0, PropertyCellType::kNoCell);
   } else {
     property_details_ = transition->GetLastDescriptorDetails();
     has_property_ = true;
@@ -518,19 +529,15 @@
   Handle<JSObject> receiver = GetStoreTarget();
   holder_ = receiver;
 
-  PropertyDetails details(attributes, ACCESSOR_CONSTANT, 0,
-                          PropertyCellType::kMutable);
+  PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kMutable);
 
   if (IsElement()) {
     // TODO(verwaest): Move code into the element accessor.
     Handle<SeededNumberDictionary> dictionary =
         JSObject::NormalizeElements(receiver);
 
-    // We unconditionally pass used_as_prototype=false here because the call
-    // to RequireSlowElements takes care of the required IC clearing and
-    // we don't want to walk the heap twice.
-    dictionary =
-        SeededNumberDictionary::Set(dictionary, index_, pair, details, false);
+    dictionary = SeededNumberDictionary::Set(dictionary, index_, pair, details,
+                                             receiver);
     receiver->RequireSlowElements(*dictionary);
 
     if (receiver->HasSlowArgumentsElements()) {
@@ -596,7 +603,8 @@
     result = PropertyCell::cast(result)->value();
   } else if (!holder_->HasFastProperties()) {
     result = holder_->property_dictionary()->ValueAt(number_);
-  } else if (property_details_.type() == v8::internal::DATA) {
+  } else if (property_details_.location() == kField) {
+    DCHECK_EQ(kData, property_details_.kind());
     Handle<JSObject> holder = GetHolder<JSObject>();
     FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
     return JSObject::FastPropertyAt(holder, property_details_.representation(),
@@ -607,17 +615,52 @@
   return handle(result, isolate_);
 }
 
+bool LookupIterator::IsConstFieldValueEqualTo(Object* value) const {
+  DCHECK(!IsElement());
+  DCHECK(holder_->HasFastProperties());
+  DCHECK_EQ(kField, property_details_.location());
+  DCHECK_EQ(kConst, property_details_.constness());
+  Handle<JSObject> holder = GetHolder<JSObject>();
+  FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
+  if (property_details_.representation().IsDouble()) {
+    if (!value->IsNumber()) return false;
+    uint64_t bits;
+    if (holder->IsUnboxedDoubleField(field_index)) {
+      bits = holder->RawFastDoublePropertyAsBitsAt(field_index);
+    } else {
+      Object* current_value = holder->RawFastPropertyAt(field_index);
+      DCHECK(current_value->IsMutableHeapNumber());
+      bits = HeapNumber::cast(current_value)->value_as_bits();
+    }
+    // Use bit representation of double to to check for hole double, since
+    // manipulating the signaling NaN used for the hole in C++, e.g. with
+    // bit_cast or value(), will change its value on ia32 (the x87 stack is
+    // used to return values and stores to the stack silently clear the
+    // signalling bit).
+    if (bits == kHoleNanInt64) {
+      // Uninitialized double field.
+      return true;
+    }
+    return bit_cast<double>(bits) == value->Number();
+  } else {
+    Object* current_value = holder->RawFastPropertyAt(field_index);
+    return current_value->IsUninitialized(isolate()) || current_value == value;
+  }
+}
+
 int LookupIterator::GetFieldDescriptorIndex() const {
   DCHECK(has_property_);
   DCHECK(holder_->HasFastProperties());
-  DCHECK_EQ(v8::internal::DATA, property_details_.type());
+  DCHECK_EQ(kField, property_details_.location());
+  DCHECK_EQ(kData, property_details_.kind());
   return descriptor_number();
 }
 
 int LookupIterator::GetAccessorIndex() const {
   DCHECK(has_property_);
   DCHECK(holder_->HasFastProperties());
-  DCHECK_EQ(v8::internal::ACCESSOR_CONSTANT, property_details_.type());
+  DCHECK_EQ(kDescriptor, property_details_.location());
+  DCHECK_EQ(kAccessor, property_details_.kind());
   return descriptor_number();
 }
 
@@ -625,16 +668,26 @@
 int LookupIterator::GetConstantIndex() const {
   DCHECK(has_property_);
   DCHECK(holder_->HasFastProperties());
-  DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
+  DCHECK_EQ(kDescriptor, property_details_.location());
+  DCHECK_EQ(kData, property_details_.kind());
+  DCHECK(!FLAG_track_constant_fields);
   DCHECK(!IsElement());
   return descriptor_number();
 }
 
+Handle<Map> LookupIterator::GetFieldOwnerMap() const {
+  DCHECK(has_property_);
+  DCHECK(holder_->HasFastProperties());
+  DCHECK_EQ(kField, property_details_.location());
+  DCHECK(!IsElement());
+  Map* holder_map = holder_->map();
+  return handle(holder_map->FindFieldOwner(descriptor_number()), isolate_);
+}
 
 FieldIndex LookupIterator::GetFieldIndex() const {
   DCHECK(has_property_);
   DCHECK(holder_->HasFastProperties());
-  DCHECK_EQ(v8::internal::DATA, property_details_.type());
+  DCHECK_EQ(kField, property_details_.location());
   DCHECK(!IsElement());
   Map* holder_map = holder_->map();
   int index =
@@ -646,7 +699,7 @@
 Handle<FieldType> LookupIterator::GetFieldType() const {
   DCHECK(has_property_);
   DCHECK(holder_->HasFastProperties());
-  DCHECK_EQ(v8::internal::DATA, property_details_.type());
+  DCHECK_EQ(kField, property_details_.location());
   return handle(
       holder_->map()->instance_descriptors()->GetFieldType(descriptor_number()),
       isolate_);
@@ -674,8 +727,8 @@
   return value;
 }
 
-
-void LookupIterator::WriteDataValue(Handle<Object> value) {
+void LookupIterator::WriteDataValue(Handle<Object> value,
+                                    bool initializing_store) {
   DCHECK_EQ(DATA, state_);
   Handle<JSReceiver> holder = GetHolder<JSReceiver>();
   if (IsElement()) {
@@ -683,11 +736,17 @@
     ElementsAccessor* accessor = object->GetElementsAccessor();
     accessor->Set(object, number_, *value);
   } else if (holder->HasFastProperties()) {
-    if (property_details_.type() == v8::internal::DATA) {
+    if (property_details_.location() == kField) {
+      // Check that in case of kConst field the existing value is equal to
+      // |value|.
+      DCHECK_IMPLIES(
+          !initializing_store && property_details_.constness() == kConst,
+          IsConstFieldValueEqualTo(*value));
       JSObject::cast(*holder)->WriteToField(descriptor_number(),
                                             property_details_, *value);
     } else {
-      DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
+      DCHECK_EQ(kDescriptor, property_details_.location());
+      DCHECK_EQ(kConst, property_details_.constness());
     }
   } else if (holder->IsJSGlobalObject()) {
     GlobalDictionary* dictionary = JSObject::cast(*holder)->global_dictionary();
diff --git a/src/lookup.h b/src/lookup.h
index e0b40c4..190a75e 100644
--- a/src/lookup.h
+++ b/src/lookup.h
@@ -236,6 +236,9 @@
   Representation representation() const {
     return property_details().representation();
   }
+  PropertyLocation location() const { return property_details().location(); }
+  PropertyConstness constness() const { return property_details().constness(); }
+  Handle<Map> GetFieldOwnerMap() const;
   FieldIndex GetFieldIndex() const;
   Handle<FieldType> GetFieldType() const;
   int GetFieldDescriptorIndex() const;
@@ -252,7 +255,7 @@
   }
   Handle<InterceptorInfo> GetInterceptorForFailedAccessCheck() const;
   Handle<Object> GetDataValue() const;
-  void WriteDataValue(Handle<Object> value);
+  void WriteDataValue(Handle<Object> value, bool initializing_store);
   inline void UpdateProtector() {
     if (IsElement()) return;
     if (*name_ == heap()->is_concat_spreadable_symbol() ||
@@ -288,7 +291,7 @@
   void NextInternal(Map* map, JSReceiver* holder);
   template <bool is_element>
   inline State LookupInHolder(Map* map, JSReceiver* holder) {
-    return map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE
+    return map->IsSpecialReceiverMap()
                ? LookupInSpecialHolder<is_element>(map, holder)
                : LookupInRegularHolder<is_element>(map, holder);
   }
@@ -303,6 +306,7 @@
   template <bool is_element>
   void RestartInternal(InterceptorState interceptor_state);
   Handle<Object> FetchValue() const;
+  bool IsConstFieldValueEqualTo(Object* value) const;
   template <bool is_element>
   void ReloadPropertyInformation();
 
diff --git a/src/machine-type.cc b/src/machine-type.cc
index 9289673..ba555dd 100644
--- a/src/machine-type.cc
+++ b/src/machine-type.cc
@@ -32,6 +32,12 @@
       return "kRepFloat64";
     case MachineRepresentation::kSimd128:
       return "kRepSimd128";
+    case MachineRepresentation::kSimd1x4:
+      return "kRepSimd1x4";
+    case MachineRepresentation::kSimd1x8:
+      return "kRepSimd1x8";
+    case MachineRepresentation::kSimd1x16:
+      return "kRepSimd1x16";
     case MachineRepresentation::kTaggedSigned:
       return "kRepTaggedSigned";
     case MachineRepresentation::kTaggedPointer:
diff --git a/src/machine-type.h b/src/machine-type.h
index 844c956..1f87cf2 100644
--- a/src/machine-type.h
+++ b/src/machine-type.h
@@ -15,7 +15,7 @@
 namespace v8 {
 namespace internal {
 
-enum class MachineRepresentation : uint8_t {
+enum class MachineRepresentation {
   kNone,
   kBit,
   kWord8,
@@ -29,8 +29,11 @@
   kFloat32,
   kFloat64,
   kSimd128,
+  kSimd1x4,  // SIMD boolean vector types.
+  kSimd1x8,
+  kSimd1x16,
   kFirstFPRepresentation = kFloat32,
-  kLastRepresentation = kSimd128
+  kLastRepresentation = kSimd1x16
 };
 
 static_assert(static_cast<int>(MachineRepresentation::kLastRepresentation) <
@@ -39,7 +42,7 @@
 
 const char* MachineReprToString(MachineRepresentation);
 
-enum class MachineSemantic : uint8_t {
+enum class MachineSemantic {
   kNone,
   kBool,
   kInt32,
@@ -79,28 +82,16 @@
     return semantic() == MachineSemantic::kUint32 ||
            semantic() == MachineSemantic::kUint64;
   }
-
   static MachineRepresentation PointerRepresentation() {
     return (kPointerSize == 4) ? MachineRepresentation::kWord32
                                : MachineRepresentation::kWord64;
   }
-  static MachineType Pointer() {
-    return MachineType(PointerRepresentation(), MachineSemantic::kNone);
+  static MachineType UintPtr() {
+    return (kPointerSize == 4) ? Uint32() : Uint64();
   }
   static MachineType IntPtr() {
     return (kPointerSize == 4) ? Int32() : Int64();
   }
-  static MachineType Float32() {
-    return MachineType(MachineRepresentation::kFloat32,
-                       MachineSemantic::kNumber);
-  }
-  static MachineType Float64() {
-    return MachineType(MachineRepresentation::kFloat64,
-                       MachineSemantic::kNumber);
-  }
-  static MachineType Simd128() {
-    return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
-  }
   static MachineType Int8() {
     return MachineType(MachineRepresentation::kWord8, MachineSemantic::kInt32);
   }
@@ -128,6 +119,30 @@
     return MachineType(MachineRepresentation::kWord64,
                        MachineSemantic::kUint64);
   }
+  static MachineType Float32() {
+    return MachineType(MachineRepresentation::kFloat32,
+                       MachineSemantic::kNumber);
+  }
+  static MachineType Float64() {
+    return MachineType(MachineRepresentation::kFloat64,
+                       MachineSemantic::kNumber);
+  }
+  static MachineType Simd128() {
+    return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
+  }
+  static MachineType Simd1x4() {
+    return MachineType(MachineRepresentation::kSimd1x4, MachineSemantic::kNone);
+  }
+  static MachineType Simd1x8() {
+    return MachineType(MachineRepresentation::kSimd1x8, MachineSemantic::kNone);
+  }
+  static MachineType Simd1x16() {
+    return MachineType(MachineRepresentation::kSimd1x16,
+                       MachineSemantic::kNone);
+  }
+  static MachineType Pointer() {
+    return MachineType(PointerRepresentation(), MachineSemantic::kNone);
+  }
   static MachineType TaggedPointer() {
     return MachineType(MachineRepresentation::kTaggedPointer,
                        MachineSemantic::kAny);
@@ -171,6 +186,16 @@
   static MachineType RepSimd128() {
     return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
   }
+  static MachineType RepSimd1x4() {
+    return MachineType(MachineRepresentation::kSimd1x4, MachineSemantic::kNone);
+  }
+  static MachineType RepSimd1x8() {
+    return MachineType(MachineRepresentation::kSimd1x8, MachineSemantic::kNone);
+  }
+  static MachineType RepSimd1x16() {
+    return MachineType(MachineRepresentation::kSimd1x16,
+                       MachineSemantic::kNone);
+  }
   static MachineType RepTagged() {
     return MachineType(MachineRepresentation::kTagged, MachineSemantic::kNone);
   }
@@ -199,6 +224,12 @@
         return MachineType::Float64();
       case MachineRepresentation::kSimd128:
         return MachineType::Simd128();
+      case MachineRepresentation::kSimd1x4:
+        return MachineType::Simd1x4();
+      case MachineRepresentation::kSimd1x8:
+        return MachineType::Simd1x8();
+      case MachineRepresentation::kSimd1x16:
+        return MachineType::Simd1x16();
       case MachineRepresentation::kTagged:
         return MachineType::AnyTagged();
       case MachineRepresentation::kTaggedSigned:
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index b683045..1c76c69 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -5,8 +5,7 @@
 #ifndef V8_MACRO_ASSEMBLER_H_
 #define V8_MACRO_ASSEMBLER_H_
 
-#include "src/assembler.h"
-
+#include "src/assembler-inl.h"
 
 // Helper types to make boolean flag easier to read at call-site.
 enum InvokeFlag {
@@ -36,47 +35,29 @@
 };
 
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32.h"
-#include "src/ia32/assembler-ia32-inl.h"
 #include "src/ia32/macro-assembler-ia32.h"
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64.h"
-#include "src/x64/assembler-x64-inl.h"
 #include "src/x64/macro-assembler-x64.h"
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64.h"
-#include "src/arm64/assembler-arm64-inl.h"
 #include "src/arm64/constants-arm64.h"
 #include "src/arm64/macro-assembler-arm64.h"
 #include "src/arm64/macro-assembler-arm64-inl.h"
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm.h"
-#include "src/arm/assembler-arm-inl.h"
 #include "src/arm/constants-arm.h"
 #include "src/arm/macro-assembler-arm.h"
 #elif V8_TARGET_ARCH_PPC
-#include "src/ppc/assembler-ppc.h"
-#include "src/ppc/assembler-ppc-inl.h"
 #include "src/ppc/constants-ppc.h"
 #include "src/ppc/macro-assembler-ppc.h"
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips.h"
-#include "src/mips/assembler-mips-inl.h"
 #include "src/mips/constants-mips.h"
 #include "src/mips/macro-assembler-mips.h"
 #elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64.h"
-#include "src/mips64/assembler-mips64-inl.h"
 #include "src/mips64/constants-mips64.h"
 #include "src/mips64/macro-assembler-mips64.h"
 #elif V8_TARGET_ARCH_S390
-#include "src/s390/assembler-s390.h"
-#include "src/s390/assembler-s390-inl.h"
 #include "src/s390/constants-s390.h"
 #include "src/s390/macro-assembler-s390.h"
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/assembler-x87.h"
-#include "src/x87/assembler-x87-inl.h"
 #include "src/x87/macro-assembler-x87.h"
 #else
 #error Unsupported target architecture.
diff --git a/src/managed.h b/src/managed.h
new file mode 100644
index 0000000..b738ec5
--- /dev/null
+++ b/src/managed.h
@@ -0,0 +1,81 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_MANAGED_H_
+#define V8_WASM_MANAGED_H_
+
+#include "src/factory.h"
+#include "src/global-handles.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+
+namespace v8 {
+namespace internal {
+// An object that wraps a pointer to a C++ object and manages its lifetime.
+// The C++ object will be deleted when the managed wrapper object is
+// garbage collected, or, last resort, if the isolate is torn down before GC,
+// as part of Isolate::Dispose().
+// Managed<CppType> may be used polymorphically as Foreign, where the held
+// address is typed as CppType**. The double indirection is due to the
+// use, by Managed, of Isolate::ManagedObjectFinalizer, which has a CppType*
+// first field.
+template <class CppType>
+class Managed : public Foreign {
+ public:
+  V8_INLINE CppType* get() {
+    return *(reinterpret_cast<CppType**>(foreign_address()));
+  }
+
+  static Managed<CppType>* cast(Object* obj) {
+    SLOW_DCHECK(obj->IsForeign());
+    return reinterpret_cast<Managed<CppType>*>(obj);
+  }
+
+  static Handle<Managed<CppType>> New(Isolate* isolate, CppType* ptr) {
+    Isolate::ManagedObjectFinalizer* node =
+        isolate->RegisterForReleaseAtTeardown(ptr,
+                                              Managed<CppType>::NativeDelete);
+    Handle<Managed<CppType>> handle = Handle<Managed<CppType>>::cast(
+        isolate->factory()->NewForeign(reinterpret_cast<Address>(node)));
+    RegisterWeakCallbackForDelete(isolate, handle);
+    return handle;
+  }
+
+ private:
+  static void RegisterWeakCallbackForDelete(Isolate* isolate,
+                                            Handle<Managed<CppType>> handle) {
+    Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
+    GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+                            &Managed<CppType>::GCDelete,
+                            v8::WeakCallbackType::kFinalizer);
+  }
+
+  static void GCDelete(const v8::WeakCallbackInfo<void>& data) {
+    Managed<CppType>** p =
+        reinterpret_cast<Managed<CppType>**>(data.GetParameter());
+
+    Isolate::ManagedObjectFinalizer* finalizer = (*p)->GetFinalizer();
+
+    Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+    finalizer->Dispose();
+    isolate->UnregisterFromReleaseAtTeardown(&finalizer);
+
+    (*p)->set_foreign_address(static_cast<Address>(nullptr));
+    GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+  }
+
+  static void NativeDelete(void* value) {
+    CppType* typed_value = reinterpret_cast<CppType*>(value);
+    delete typed_value;
+  }
+
+  Isolate::ManagedObjectFinalizer* GetFinalizer() {
+    return reinterpret_cast<Isolate::ManagedObjectFinalizer*>(
+        foreign_address());
+  }
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_MANAGED_H_
diff --git a/src/map-updater.cc b/src/map-updater.cc
new file mode 100644
index 0000000..f82c2cf
--- /dev/null
+++ b/src/map-updater.cc
@@ -0,0 +1,654 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/map-updater.h"
+
+#include "src/field-type.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/transitions.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
+  if (obj1 == obj2) return true;  // Valid for both kData and kAccessor kinds.
+  // TODO(ishell): compare AccessorPairs.
+  return false;
+}
+
+}  // namespace
+
+Name* MapUpdater::GetKey(int descriptor) const {
+  return old_descriptors_->GetKey(descriptor);
+}
+
+PropertyDetails MapUpdater::GetDetails(int descriptor) const {
+  DCHECK_LE(0, descriptor);
+  if (descriptor == modified_descriptor_) {
+    return PropertyDetails(new_kind_, new_attributes_, new_location_,
+                           new_constness_, new_representation_);
+  }
+  return old_descriptors_->GetDetails(descriptor);
+}
+
+Object* MapUpdater::GetValue(int descriptor) const {
+  DCHECK_LE(0, descriptor);
+  if (descriptor == modified_descriptor_) {
+    DCHECK_EQ(kDescriptor, new_location_);
+    return *new_value_;
+  }
+  DCHECK_EQ(kDescriptor, GetDetails(descriptor).location());
+  return old_descriptors_->GetValue(descriptor);
+}
+
+FieldType* MapUpdater::GetFieldType(int descriptor) const {
+  DCHECK_LE(0, descriptor);
+  if (descriptor == modified_descriptor_) {
+    DCHECK_EQ(kField, new_location_);
+    return *new_field_type_;
+  }
+  DCHECK_EQ(kField, GetDetails(descriptor).location());
+  return old_descriptors_->GetFieldType(descriptor);
+}
+
+Handle<FieldType> MapUpdater::GetOrComputeFieldType(
+    int descriptor, PropertyLocation location,
+    Representation representation) const {
+  DCHECK_LE(0, descriptor);
+  // |location| is just a pre-fetched GetDetails(descriptor).location().
+  DCHECK_EQ(location, GetDetails(descriptor).location());
+  if (location == kField) {
+    return handle(GetFieldType(descriptor), isolate_);
+  } else {
+    return GetValue(descriptor)->OptimalType(isolate_, representation);
+  }
+}
+
+Handle<FieldType> MapUpdater::GetOrComputeFieldType(
+    Handle<DescriptorArray> descriptors, int descriptor,
+    PropertyLocation location, Representation representation) {
+  // |location| is just a pre-fetched GetDetails(descriptor).location().
+  DCHECK_EQ(descriptors->GetDetails(descriptor).location(), location);
+  if (location == kField) {
+    return handle(descriptors->GetFieldType(descriptor), isolate_);
+  } else {
+    return descriptors->GetValue(descriptor)
+        ->OptimalType(isolate_, representation);
+  }
+}
+
+Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
+                                               PropertyAttributes attributes,
+                                               PropertyConstness constness,
+                                               Representation representation,
+                                               Handle<FieldType> field_type) {
+  DCHECK_EQ(kInitialized, state_);
+  DCHECK_LE(0, descriptor);
+  DCHECK(!old_map_->is_dictionary_map());
+  modified_descriptor_ = descriptor;
+  new_kind_ = kData;
+  new_attributes_ = attributes;
+  new_location_ = kField;
+
+  PropertyDetails old_details =
+      old_descriptors_->GetDetails(modified_descriptor_);
+
+  // If property kind is not reconfigured merge the result with
+  // representation/field type from the old descriptor.
+  if (old_details.kind() == new_kind_) {
+    new_constness_ = GeneralizeConstness(constness, old_details.constness());
+
+    Representation old_representation = old_details.representation();
+    new_representation_ = representation.generalize(old_representation);
+
+    Handle<FieldType> old_field_type =
+        GetOrComputeFieldType(old_descriptors_, modified_descriptor_,
+                              old_details.location(), new_representation_);
+
+    new_field_type_ =
+        Map::GeneralizeFieldType(old_representation, old_field_type,
+                                 new_representation_, field_type, isolate_);
+  } else {
+    // We don't know if this is a first property kind reconfiguration
+    // and we don't know which value was in this property previously
+    // therefore we can't treat such a property as constant.
+    new_constness_ = kMutable;
+    new_representation_ = representation;
+    new_field_type_ = field_type;
+  }
+
+  if (TryRecofigureToDataFieldInplace() == kEnd) return result_map_;
+  if (FindRootMap() == kEnd) return result_map_;
+  if (FindTargetMap() == kEnd) return result_map_;
+  ConstructNewMap();
+  DCHECK_EQ(kEnd, state_);
+  return result_map_;
+}
+
+Handle<Map> MapUpdater::ReconfigureElementsKind(ElementsKind elements_kind) {
+  DCHECK_EQ(kInitialized, state_);
+  new_elements_kind_ = elements_kind;
+
+  if (FindRootMap() == kEnd) return result_map_;
+  if (FindTargetMap() == kEnd) return result_map_;
+  ConstructNewMap();
+  DCHECK_EQ(kEnd, state_);
+  return result_map_;
+}
+
+Handle<Map> MapUpdater::Update() {
+  DCHECK_EQ(kInitialized, state_);
+  DCHECK(old_map_->is_deprecated());
+
+  if (FindRootMap() == kEnd) return result_map_;
+  if (FindTargetMap() == kEnd) return result_map_;
+  ConstructNewMap();
+  DCHECK_EQ(kEnd, state_);
+  return result_map_;
+}
+
+void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
+                                 PropertyConstness new_constness,
+                                 Representation new_representation,
+                                 Handle<FieldType> new_field_type) {
+  Map::GeneralizeField(map, modify_index, new_constness, new_representation,
+                       new_field_type);
+
+  DCHECK_EQ(*old_descriptors_, old_map_->instance_descriptors());
+}
+
+MapUpdater::State MapUpdater::CopyGeneralizeAllFields(const char* reason) {
+  result_map_ = Map::CopyGeneralizeAllFields(old_map_, new_elements_kind_,
+                                             modified_descriptor_, new_kind_,
+                                             new_attributes_, reason);
+  state_ = kEnd;
+  return state_;  // Done.
+}
+
+MapUpdater::State MapUpdater::TryRecofigureToDataFieldInplace() {
+  // If it's just a representation generalization case (i.e. property kind and
+  // attributes stays unchanged) it's fine to transition from None to anything
+  // but double without any modification to the object, because the default
+  // uninitialized value for representation None can be overwritten by both
+  // smi and tagged values. Doubles, however, would require a box allocation.
+  if (new_representation_.IsNone() || new_representation_.IsDouble()) {
+    return state_;  // Not done yet.
+  }
+
+  PropertyDetails old_details =
+      old_descriptors_->GetDetails(modified_descriptor_);
+  Representation old_representation = old_details.representation();
+  if (!old_representation.IsNone()) {
+    return state_;  // Not done yet.
+  }
+
+  DCHECK_EQ(new_kind_, old_details.kind());
+  DCHECK_EQ(new_attributes_, old_details.attributes());
+  DCHECK_EQ(kField, old_details.location());
+  if (FLAG_trace_generalization) {
+    old_map_->PrintGeneralization(
+        stdout, "uninitialized field", modified_descriptor_, old_nof_, old_nof_,
+        false, old_representation, new_representation_,
+        handle(old_descriptors_->GetFieldType(modified_descriptor_), isolate_),
+        MaybeHandle<Object>(), new_field_type_, MaybeHandle<Object>());
+  }
+  Handle<Map> field_owner(old_map_->FindFieldOwner(modified_descriptor_),
+                          isolate_);
+
+  GeneralizeField(field_owner, modified_descriptor_, new_constness_,
+                  new_representation_, new_field_type_);
+  // Check that the descriptor array was updated.
+  DCHECK(old_descriptors_->GetDetails(modified_descriptor_)
+             .representation()
+             .Equals(new_representation_));
+  DCHECK(old_descriptors_->GetFieldType(modified_descriptor_)
+             ->NowIs(new_field_type_));
+
+  result_map_ = old_map_;
+  state_ = kEnd;
+  return state_;  // Done.
+}
+
+MapUpdater::State MapUpdater::FindRootMap() {
+  DCHECK_EQ(kInitialized, state_);
+  // Check the state of the root map.
+  root_map_ = handle(old_map_->FindRootMap(), isolate_);
+  int root_nof = root_map_->NumberOfOwnDescriptors();
+  if (!old_map_->EquivalentToForTransition(*root_map_)) {
+    return CopyGeneralizeAllFields("GenAll_NotEquivalent");
+  }
+
+  ElementsKind from_kind = root_map_->elements_kind();
+  ElementsKind to_kind = new_elements_kind_;
+  // TODO(ishell): Add a test for SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
+  if (from_kind != to_kind && to_kind != DICTIONARY_ELEMENTS &&
+      to_kind != SLOW_STRING_WRAPPER_ELEMENTS &&
+      to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
+      !(IsTransitionableFastElementsKind(from_kind) &&
+        IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
+    return CopyGeneralizeAllFields("GenAll_InvalidElementsTransition");
+  }
+
+  if (modified_descriptor_ >= 0 && modified_descriptor_ < root_nof) {
+    PropertyDetails old_details =
+        old_descriptors_->GetDetails(modified_descriptor_);
+    if (old_details.kind() != new_kind_ ||
+        old_details.attributes() != new_attributes_) {
+      return CopyGeneralizeAllFields("GenAll_RootModification1");
+    }
+    if (old_details.location() != kField) {
+      return CopyGeneralizeAllFields("GenAll_RootModification2");
+    }
+    if (new_constness_ != old_details.constness()) {
+      return CopyGeneralizeAllFields("GenAll_RootModification3");
+    }
+    if (!new_representation_.fits_into(old_details.representation())) {
+      return CopyGeneralizeAllFields("GenAll_RootModification4");
+    }
+
+    DCHECK_EQ(kData, old_details.kind());
+    DCHECK_EQ(kData, new_kind_);
+    DCHECK_EQ(kField, new_location_);
+    FieldType* old_field_type =
+        old_descriptors_->GetFieldType(modified_descriptor_);
+    if (!new_field_type_->NowIs(old_field_type)) {
+      return CopyGeneralizeAllFields("GenAll_RootModification5");
+    }
+  }
+
+  // From here on, use the map with correct elements kind as root map.
+  if (from_kind != to_kind) {
+    root_map_ = Map::AsElementsKind(root_map_, to_kind);
+  }
+  state_ = kAtRootMap;
+  return state_;  // Not done yet.
+}
+
+MapUpdater::State MapUpdater::FindTargetMap() {
+  DCHECK_EQ(kAtRootMap, state_);
+  target_map_ = root_map_;
+
+  int root_nof = root_map_->NumberOfOwnDescriptors();
+  for (int i = root_nof; i < old_nof_; ++i) {
+    PropertyDetails old_details = GetDetails(i);
+    Map* transition = TransitionArray::SearchTransition(
+        *target_map_, old_details.kind(), GetKey(i), old_details.attributes());
+    if (transition == NULL) break;
+    Handle<Map> tmp_map(transition, isolate_);
+
+    Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
+                                            isolate_);
+
+    // Check if target map is incompatible.
+    PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+    DCHECK_EQ(old_details.kind(), tmp_details.kind());
+    DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
+    if (old_details.kind() == kAccessor &&
+        !EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
+      // TODO(ishell): mutable accessors are not implemented yet.
+      return CopyGeneralizeAllFields("GenAll_Incompatible");
+    }
+    PropertyConstness tmp_constness = tmp_details.constness();
+    if (!IsGeneralizableTo(old_details.constness(), tmp_constness)) {
+      break;
+    }
+    if (!IsGeneralizableTo(old_details.location(), tmp_details.location())) {
+      break;
+    }
+    Representation tmp_representation = tmp_details.representation();
+    if (!old_details.representation().fits_into(tmp_representation)) {
+      break;
+    }
+
+    if (tmp_details.location() == kField) {
+      Handle<FieldType> old_field_type =
+          GetOrComputeFieldType(i, old_details.location(), tmp_representation);
+      GeneralizeField(tmp_map, i, tmp_constness, tmp_representation,
+                      old_field_type);
+    } else {
+      // kDescriptor: Check that the value matches.
+      if (!EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
+        break;
+      }
+    }
+    DCHECK(!tmp_map->is_deprecated());
+    target_map_ = tmp_map;
+  }
+
+  // Directly change the map if the target map is more general.
+  int target_nof = target_map_->NumberOfOwnDescriptors();
+  if (target_nof == old_nof_) {
+#ifdef DEBUG
+    if (modified_descriptor_ >= 0) {
+      DescriptorArray* target_descriptors = target_map_->instance_descriptors();
+      PropertyDetails details =
+          target_descriptors->GetDetails(modified_descriptor_);
+      DCHECK_EQ(new_kind_, details.kind());
+      DCHECK_EQ(new_attributes_, details.attributes());
+      DCHECK(IsGeneralizableTo(new_constness_, details.constness()));
+      DCHECK_EQ(new_location_, details.location());
+      DCHECK(new_representation_.fits_into(details.representation()));
+      if (new_location_ == kField) {
+        DCHECK_EQ(kField, details.location());
+        DCHECK(new_field_type_->NowIs(
+            target_descriptors->GetFieldType(modified_descriptor_)));
+      } else {
+        DCHECK(details.location() == kField ||
+               EqualImmutableValues(*new_value_, target_descriptors->GetValue(
+                                                     modified_descriptor_)));
+      }
+    }
+#endif
+    if (*target_map_ != *old_map_) {
+      old_map_->NotifyLeafMapLayoutChange();
+    }
+    result_map_ = target_map_;
+    state_ = kEnd;
+    return state_;  // Done.
+  }
+
+  // Find the last compatible target map in the transition tree.
+  for (int i = target_nof; i < old_nof_; ++i) {
+    PropertyDetails old_details = GetDetails(i);
+    Map* transition = TransitionArray::SearchTransition(
+        *target_map_, old_details.kind(), GetKey(i), old_details.attributes());
+    if (transition == NULL) break;
+    Handle<Map> tmp_map(transition, isolate_);
+    Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
+                                            isolate_);
+#ifdef DEBUG
+    // Check that target map is compatible.
+    PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+    DCHECK_EQ(old_details.kind(), tmp_details.kind());
+    DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
+#endif
+    if (old_details.kind() == kAccessor &&
+        !EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
+      return CopyGeneralizeAllFields("GenAll_Incompatible");
+    }
+    DCHECK(!tmp_map->is_deprecated());
+    target_map_ = tmp_map;
+  }
+
+  state_ = kAtTargetMap;
+  return state_;  // Not done yet.
+}
+
+Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
+  int target_nof = target_map_->NumberOfOwnDescriptors();
+  Handle<DescriptorArray> target_descriptors(
+      target_map_->instance_descriptors(), isolate_);
+
+  // Allocate a new descriptor array large enough to hold the required
+  // descriptors, with minimally the exact same size as the old descriptor
+  // array.
+  int new_slack =
+      Max(old_nof_, old_descriptors_->number_of_descriptors()) - old_nof_;
+  Handle<DescriptorArray> new_descriptors =
+      DescriptorArray::Allocate(isolate_, old_nof_, new_slack);
+  DCHECK(new_descriptors->length() > target_descriptors->length() ||
+         new_descriptors->NumberOfSlackDescriptors() > 0 ||
+         new_descriptors->number_of_descriptors() ==
+             old_descriptors_->number_of_descriptors());
+  DCHECK(new_descriptors->number_of_descriptors() == old_nof_);
+
+  int root_nof = root_map_->NumberOfOwnDescriptors();
+
+  // Given that we passed root modification check in FindRootMap() so
+  // the root descriptors are either not modified at all or already more
+  // general than we requested. Take |root_nof| entries as is.
+  // 0 -> |root_nof|
+  int current_offset = 0;
+  for (int i = 0; i < root_nof; ++i) {
+    PropertyDetails old_details = old_descriptors_->GetDetails(i);
+    if (old_details.location() == kField) {
+      current_offset += old_details.field_width_in_words();
+    }
+    Descriptor d(handle(GetKey(i), isolate_),
+                 handle(old_descriptors_->GetValue(i), isolate_), old_details);
+    new_descriptors->Set(i, &d);
+  }
+
+  // Merge "updated" old_descriptor entries with target_descriptor entries.
+  // |root_nof| -> |target_nof|
+  for (int i = root_nof; i < target_nof; ++i) {
+    Handle<Name> key(GetKey(i), isolate_);
+    PropertyDetails old_details = GetDetails(i);
+    PropertyDetails target_details = target_descriptors->GetDetails(i);
+
+    PropertyKind next_kind = old_details.kind();
+    PropertyAttributes next_attributes = old_details.attributes();
+    DCHECK_EQ(next_kind, target_details.kind());
+    DCHECK_EQ(next_attributes, target_details.attributes());
+
+    PropertyConstness next_constness = GeneralizeConstness(
+        old_details.constness(), target_details.constness());
+
+    // Note: failed values equality check does not invalidate per-object
+    // property constness.
+    PropertyLocation next_location =
+        old_details.location() == kField ||
+                target_details.location() == kField ||
+                !EqualImmutableValues(target_descriptors->GetValue(i),
+                                      GetValue(i))
+            ? kField
+            : kDescriptor;
+
+    if (!FLAG_track_constant_fields && next_location == kField) {
+      next_constness = kMutable;
+    }
+    // Ensure that mutable values are stored in fields.
+    DCHECK_IMPLIES(next_constness == kMutable, next_location == kField);
+
+    Representation next_representation =
+        old_details.representation().generalize(
+            target_details.representation());
+
+    if (next_location == kField) {
+      Handle<FieldType> old_field_type =
+          GetOrComputeFieldType(i, old_details.location(), next_representation);
+
+      Handle<FieldType> target_field_type =
+          GetOrComputeFieldType(target_descriptors, i,
+                                target_details.location(), next_representation);
+
+      Handle<FieldType> next_field_type = Map::GeneralizeFieldType(
+          old_details.representation(), old_field_type, next_representation,
+          target_field_type, isolate_);
+
+      Handle<Object> wrapped_type(Map::WrapFieldType(next_field_type));
+      Descriptor d;
+      if (next_kind == kData) {
+        d = Descriptor::DataField(key, current_offset, next_attributes,
+                                  next_constness, next_representation,
+                                  wrapped_type);
+      } else {
+        // TODO(ishell): mutable accessors are not implemented yet.
+        UNIMPLEMENTED();
+      }
+      current_offset += d.GetDetails().field_width_in_words();
+      new_descriptors->Set(i, &d);
+    } else {
+      DCHECK_EQ(kDescriptor, next_location);
+      DCHECK_EQ(kConst, next_constness);
+
+      Handle<Object> value(GetValue(i), isolate_);
+      Descriptor d;
+      if (next_kind == kData) {
+        DCHECK(!FLAG_track_constant_fields);
+        d = Descriptor::DataConstant(key, value, next_attributes);
+      } else {
+        DCHECK_EQ(kAccessor, next_kind);
+        d = Descriptor::AccessorConstant(key, value, next_attributes);
+      }
+      new_descriptors->Set(i, &d);
+    }
+  }
+
+  // Take "updated" old_descriptor entries.
+  // |target_nof| -> |old_nof|
+  for (int i = target_nof; i < old_nof_; ++i) {
+    PropertyDetails old_details = GetDetails(i);
+    Handle<Name> key(GetKey(i), isolate_);
+
+    PropertyKind next_kind = old_details.kind();
+    PropertyAttributes next_attributes = old_details.attributes();
+    PropertyConstness next_constness = old_details.constness();
+    PropertyLocation next_location = old_details.location();
+    Representation next_representation = old_details.representation();
+
+    Descriptor d;
+    if (next_location == kField) {
+      Handle<FieldType> old_field_type =
+          GetOrComputeFieldType(i, old_details.location(), next_representation);
+
+      Handle<Object> wrapped_type(Map::WrapFieldType(old_field_type));
+      Descriptor d;
+      if (next_kind == kData) {
+        DCHECK_IMPLIES(!FLAG_track_constant_fields, next_constness == kMutable);
+        d = Descriptor::DataField(key, current_offset, next_attributes,
+                                  next_constness, next_representation,
+                                  wrapped_type);
+      } else {
+        // TODO(ishell): mutable accessors are not implemented yet.
+        UNIMPLEMENTED();
+      }
+      current_offset += d.GetDetails().field_width_in_words();
+      new_descriptors->Set(i, &d);
+    } else {
+      DCHECK_EQ(kDescriptor, next_location);
+      DCHECK_EQ(kConst, next_constness);
+
+      Handle<Object> value(GetValue(i), isolate_);
+      if (next_kind == kData) {
+        d = Descriptor::DataConstant(key, value, next_attributes);
+      } else {
+        DCHECK_EQ(kAccessor, next_kind);
+        d = Descriptor::AccessorConstant(key, value, next_attributes);
+      }
+      new_descriptors->Set(i, &d);
+    }
+  }
+
+  new_descriptors->Sort();
+  return new_descriptors;
+}
+
+Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
+  DisallowHeapAllocation no_allocation;
+
+  int root_nof = root_map_->NumberOfOwnDescriptors();
+  Map* current = *root_map_;
+  for (int i = root_nof; i < old_nof_; i++) {
+    Name* name = descriptors->GetKey(i);
+    PropertyDetails details = descriptors->GetDetails(i);
+    Map* next = TransitionArray::SearchTransition(current, details.kind(), name,
+                                                  details.attributes());
+    if (next == NULL) break;
+    DescriptorArray* next_descriptors = next->instance_descriptors();
+
+    PropertyDetails next_details = next_descriptors->GetDetails(i);
+    DCHECK_EQ(details.kind(), next_details.kind());
+    DCHECK_EQ(details.attributes(), next_details.attributes());
+    if (details.constness() != next_details.constness()) break;
+    if (details.location() != next_details.location()) break;
+    if (!details.representation().Equals(next_details.representation())) break;
+
+    if (next_details.location() == kField) {
+      FieldType* next_field_type = next_descriptors->GetFieldType(i);
+      if (!descriptors->GetFieldType(i)->NowIs(next_field_type)) {
+        break;
+      }
+    } else {
+      if (!EqualImmutableValues(descriptors->GetValue(i),
+                                next_descriptors->GetValue(i))) {
+        break;
+      }
+    }
+    current = next;
+  }
+  return handle(current, isolate_);
+}
+
+MapUpdater::State MapUpdater::ConstructNewMap() {
+  Handle<DescriptorArray> new_descriptors = BuildDescriptorArray();
+
+  Handle<Map> split_map = FindSplitMap(new_descriptors);
+  int split_nof = split_map->NumberOfOwnDescriptors();
+  DCHECK_NE(old_nof_, split_nof);
+
+  PropertyDetails split_details = GetDetails(split_nof);
+
+  // Invalidate a transition target at |key|.
+  Map* maybe_transition = TransitionArray::SearchTransition(
+      *split_map, split_details.kind(), GetKey(split_nof),
+      split_details.attributes());
+  if (maybe_transition != NULL) {
+    maybe_transition->DeprecateTransitionTree();
+  }
+
+  // If |maybe_transition| is not NULL then the transition array already
+  // contains entry for given descriptor. This means that the transition
+  // could be inserted regardless of whether transitions array is full or not.
+  if (maybe_transition == NULL &&
+      !TransitionArray::CanHaveMoreTransitions(split_map)) {
+    return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
+  }
+
+  old_map_->NotifyLeafMapLayoutChange();
+
+  if (FLAG_trace_generalization && modified_descriptor_ >= 0) {
+    PropertyDetails old_details =
+        old_descriptors_->GetDetails(modified_descriptor_);
+    PropertyDetails new_details =
+        new_descriptors->GetDetails(modified_descriptor_);
+    MaybeHandle<FieldType> old_field_type;
+    MaybeHandle<FieldType> new_field_type;
+    MaybeHandle<Object> old_value;
+    MaybeHandle<Object> new_value;
+    if (old_details.location() == kField) {
+      old_field_type = handle(
+          old_descriptors_->GetFieldType(modified_descriptor_), isolate_);
+    } else {
+      old_value =
+          handle(old_descriptors_->GetValue(modified_descriptor_), isolate_);
+    }
+    if (new_details.location() == kField) {
+      new_field_type =
+          handle(new_descriptors->GetFieldType(modified_descriptor_), isolate_);
+    } else {
+      new_value =
+          handle(new_descriptors->GetValue(modified_descriptor_), isolate_);
+    }
+
+    old_map_->PrintGeneralization(
+        stdout, "", modified_descriptor_, split_nof, old_nof_,
+        old_details.location() == kDescriptor && new_location_ == kField,
+        old_details.representation(), new_details.representation(),
+        old_field_type, old_value, new_field_type, new_value);
+  }
+
+  Handle<LayoutDescriptor> new_layout_descriptor =
+      LayoutDescriptor::New(split_map, new_descriptors, old_nof_);
+
+  Handle<Map> new_map = Map::AddMissingTransitions(split_map, new_descriptors,
+                                                   new_layout_descriptor);
+
+  // Deprecated part of the transition tree is no longer reachable, so replace
+  // current instance descriptors in the "survived" part of the tree with
+  // the new descriptors to maintain descriptors sharing invariant.
+  split_map->ReplaceDescriptors(*new_descriptors, *new_layout_descriptor);
+
+  result_map_ = new_map;
+  state_ = kEnd;
+  return state_;  // Done.
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/map-updater.h b/src/map-updater.h
new file mode 100644
index 0000000..3389144
--- /dev/null
+++ b/src/map-updater.h
@@ -0,0 +1,180 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAP_RECONFIGURER_H_
+#define V8_MAP_RECONFIGURER_H_
+
+#include "src/elements-kind.h"
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/property-details.h"
+
+namespace v8 {
+namespace internal {
+
+// The |MapUpdater| class implements all sorts of map reconfigurations
+// including changes of elements kind, property attributes, property kind,
+// property location and field representations/type changes. It ensures that
+// the reconfigured map and all the intermediate maps are properly integrated
+// into the exising transition tree.
+//
+// To avoid high degrees over polymorphism, and to stabilize quickly, on every
+// rewrite the new type is deduced by merging the current type with any
+// potential new (partial) version of the type in the transition tree.
+// To do this, on each rewrite:
+// - Search the root of the transition tree using FindRootMap.
+// - Find/create a |root_map| with requested |new_elements_kind|.
+// - Find |target_map|, the newest matching version of this map using the
+//   "updated" |old_map|'s descriptor array (i.e. whose entry at |modify_index|
+//   is considered to be of |new_kind| and having |new_attributes|) to walk
+//   the transition tree.
+// - Merge/generalize the "updated" descriptor array of the |old_map| and
+//   descriptor array of the |target_map|.
+// - Generalize the |modify_index| descriptor using |new_representation| and
+//   |new_field_type|.
+// - Walk the tree again starting from the root towards |target_map|. Stop at
+//   |split_map|, the first map who's descriptor array does not match the merged
+//   descriptor array.
+// - If |target_map| == |split_map|, |target_map| is in the expected state.
+//   Return it.
+// - Otherwise, invalidate the outdated transition target from |target_map|, and
+//   replace its transition tree with a new branch for the updated descriptors.
+class MapUpdater {
+ public:
+  MapUpdater(Isolate* isolate, Handle<Map> old_map)
+      : isolate_(isolate),
+        old_map_(old_map),
+        old_descriptors_(old_map->instance_descriptors(), isolate_),
+        old_nof_(old_map_->NumberOfOwnDescriptors()),
+        new_elements_kind_(old_map_->elements_kind()) {}
+
+  // Prepares for reconfiguring of a property at |descriptor| to data field
+  // with given |attributes| and |representation|/|field_type| and
+  // performs the steps 1-5.
+  Handle<Map> ReconfigureToDataField(int descriptor,
+                                     PropertyAttributes attributes,
+                                     PropertyConstness constness,
+                                     Representation representation,
+                                     Handle<FieldType> field_type);
+
+  // Prepares for reconfiguring elements kind and performs the steps 1-5.
+  Handle<Map> ReconfigureElementsKind(ElementsKind elements_kind);
+
+  // Prepares for updating deprecated map to most up-to-date non-deprecated
+  // version and performs the steps 1-5.
+  Handle<Map> Update();
+
+ private:
+  enum State { kInitialized, kAtRootMap, kAtTargetMap, kEnd };
+
+  // Try to reconfigure property in-place without rebuilding transition tree
+  // and creating new maps. See implementation for details.
+  State TryRecofigureToDataFieldInplace();
+
+  // Step 1.
+  // - Search the root of the transition tree using FindRootMap.
+  // - Find/create a |root_map_| with requested |new_elements_kind_|.
+  State FindRootMap();
+
+  // Step 2.
+  // - Find |target_map_|, the newest matching version of this map using the
+  //   "updated" |old_map|'s descriptor array (i.e. whose entry at
+  //   |modified_descriptor_| is considered to be of |new_kind| and having
+  //   |new_attributes|) to walk the transition tree.
+  State FindTargetMap();
+
+  // Step 3.
+  // - Merge/generalize the "updated" descriptor array of the |old_map_| and
+  //   descriptor array of the |target_map_|.
+  // - Generalize the |modified_descriptor_| using |new_representation| and
+  //   |new_field_type_|.
+  Handle<DescriptorArray> BuildDescriptorArray();
+
+  // Step 4.
+  // - Walk the tree again starting from the root towards |target_map|. Stop at
+  //   |split_map|, the first map who's descriptor array does not match the
+  //   merged descriptor array.
+  Handle<Map> FindSplitMap(Handle<DescriptorArray> descriptors);
+
+  // Step 5.
+  // - If |target_map| == |split_map|, |target_map| is in the expected state.
+  //   Return it.
+  // - Otherwise, invalidate the outdated transition target from |target_map|,
+  //   and replace its transition tree with a new branch for the updated
+  //   descriptors.
+  State ConstructNewMap();
+
+  // When a requested reconfiguration can not be done the result is a copy
+  // of |old_map_| where every field has |Tagged| representation and |Any|
+  // field type. This map is disconnected from the transition tree.
+  State CopyGeneralizeAllFields(const char* reason);
+
+  // Returns name of a |descriptor| property.
+  inline Name* GetKey(int descriptor) const;
+
+  // Returns property details of a |descriptor| in "updated" |old_descrtiptors_|
+  // array.
+  inline PropertyDetails GetDetails(int descriptor) const;
+
+  // Returns value of a |descriptor| with kDescriptor location in "updated"
+  // |old_descrtiptors_| array.
+  inline Object* GetValue(int descriptor) const;
+
+  // Returns field type for a |descriptor| with kField location in "updated"
+  // |old_descrtiptors_| array.
+  inline FieldType* GetFieldType(int descriptor) const;
+
+  // If a |descriptor| property in "updated" |old_descriptors_| has kField
+  // location then returns it's field type otherwise computes optimal field
+  // type for the descriptor's value and |representation|. The |location|
+  // value must be a pre-fetched location for |descriptor|.
+  inline Handle<FieldType> GetOrComputeFieldType(
+      int descriptor, PropertyLocation location,
+      Representation representation) const;
+
+  // If a |descriptor| property in given |descriptors| array has kField
+  // location then returns it's field type otherwise computes optimal field
+  // type for the descriptor's value and |representation|.
+  // The |location| value must be a pre-fetched location for |descriptor|.
+  inline Handle<FieldType> GetOrComputeFieldType(
+      Handle<DescriptorArray> descriptors, int descriptor,
+      PropertyLocation location, Representation representation);
+
+  void GeneralizeField(Handle<Map> map, int modify_index,
+                       PropertyConstness new_constness,
+                       Representation new_representation,
+                       Handle<FieldType> new_field_type);
+
+  Isolate* isolate_;
+  Handle<Map> old_map_;
+  Handle<DescriptorArray> old_descriptors_;
+  Handle<Map> root_map_;
+  Handle<Map> target_map_;
+  Handle<Map> result_map_;
+  int old_nof_;
+
+  State state_ = kInitialized;
+  ElementsKind new_elements_kind_;
+
+  // If |modified_descriptor_| is not equal to -1 them the fields below form
+  // an "update" of the |old_map_|'s descriptors.
+  int modified_descriptor_ = -1;
+  PropertyKind new_kind_ = kData;
+  PropertyAttributes new_attributes_ = NONE;
+  PropertyConstness new_constness_ = kMutable;
+  PropertyLocation new_location_ = kField;
+  Representation new_representation_ = Representation::None();
+
+  // Data specific to kField location.
+  Handle<FieldType> new_field_type_;
+
+  // Data specific to kDescriptor location.
+  Handle<Object> new_value_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_MAP_RECONFIGURER_H_
diff --git a/src/messages.cc b/src/messages.cc
index eea77e3..b5150ef 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -21,11 +21,11 @@
                                  int end_pos)
     : script_(script), start_pos_(start_pos), end_pos_(end_pos) {}
 MessageLocation::MessageLocation(Handle<Script> script, int start_pos,
-                                 int end_pos, Handle<JSFunction> function)
+                                 int end_pos, Handle<SharedFunctionInfo> shared)
     : script_(script),
       start_pos_(start_pos),
       end_pos_(end_pos),
-      function_(function) {}
+      shared_(shared) {}
 MessageLocation::MessageLocation() : start_pos_(-1), end_pos_(-1) {}
 
 // If no message listeners have been registered this one is called
@@ -47,10 +47,9 @@
   }
 }
 
-
 Handle<JSMessageObject> MessageHandler::MakeMessageObject(
     Isolate* isolate, MessageTemplate::Template message,
-    MessageLocation* location, Handle<Object> argument,
+    const MessageLocation* location, Handle<Object> argument,
     Handle<JSArray> stack_frames) {
   Factory* factory = isolate->factory();
 
@@ -75,50 +74,63 @@
   return message_obj;
 }
 
-
-void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
+void MessageHandler::ReportMessage(Isolate* isolate, const MessageLocation* loc,
                                    Handle<JSMessageObject> message) {
-  // We are calling into embedder's code which can throw exceptions.
-  // Thus we need to save current exception state, reset it to the clean one
-  // and ignore scheduled exceptions callbacks can throw.
-
-  // We pass the exception object into the message handler callback though.
-  Object* exception_object = isolate->heap()->undefined_value();
-  if (isolate->has_pending_exception()) {
-    exception_object = isolate->pending_exception();
-  }
-  Handle<Object> exception(exception_object, isolate);
-
-  Isolate::ExceptionScope exception_scope(isolate);
-  isolate->clear_pending_exception();
-  isolate->set_external_caught_exception(false);
-
-  // Turn the exception on the message into a string if it is an object.
-  if (message->argument()->IsJSObject()) {
-    HandleScope scope(isolate);
-    Handle<Object> argument(message->argument(), isolate);
-
-    MaybeHandle<Object> maybe_stringified;
-    Handle<Object> stringified;
-    // Make sure we don't leak uncaught internally generated Error objects.
-    if (argument->IsJSError()) {
-      maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
-    } else {
-      v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
-      catcher.SetVerbose(false);
-      catcher.SetCaptureMessage(false);
-
-      maybe_stringified = Object::ToString(isolate, argument);
-    }
-
-    if (!maybe_stringified.ToHandle(&stringified)) {
-      stringified = isolate->factory()->NewStringFromAsciiChecked("exception");
-    }
-    message->set_argument(*stringified);
-  }
-
   v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
-  v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
+
+  if (api_message_obj->ErrorLevel() == v8::Isolate::kMessageError) {
+    // We are calling into embedder's code which can throw exceptions.
+    // Thus we need to save current exception state, reset it to the clean one
+    // and ignore scheduled exceptions callbacks can throw.
+
+    // We pass the exception object into the message handler callback though.
+    Object* exception_object = isolate->heap()->undefined_value();
+    if (isolate->has_pending_exception()) {
+      exception_object = isolate->pending_exception();
+    }
+    Handle<Object> exception(exception_object, isolate);
+
+    Isolate::ExceptionScope exception_scope(isolate);
+    isolate->clear_pending_exception();
+    isolate->set_external_caught_exception(false);
+
+    // Turn the exception on the message into a string if it is an object.
+    if (message->argument()->IsJSObject()) {
+      HandleScope scope(isolate);
+      Handle<Object> argument(message->argument(), isolate);
+
+      MaybeHandle<Object> maybe_stringified;
+      Handle<Object> stringified;
+      // Make sure we don't leak uncaught internally generated Error objects.
+      if (argument->IsJSError()) {
+        maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
+      } else {
+        v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+        catcher.SetVerbose(false);
+        catcher.SetCaptureMessage(false);
+
+        maybe_stringified = Object::ToString(isolate, argument);
+      }
+
+      if (!maybe_stringified.ToHandle(&stringified)) {
+        stringified =
+            isolate->factory()->NewStringFromAsciiChecked("exception");
+      }
+      message->set_argument(*stringified);
+    }
+
+    v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
+    ReportMessageNoExceptions(isolate, loc, message, api_exception_obj);
+  } else {
+    ReportMessageNoExceptions(isolate, loc, message, v8::Local<v8::Value>());
+  }
+}
+
+void MessageHandler::ReportMessageNoExceptions(
+    Isolate* isolate, const MessageLocation* loc, Handle<Object> message,
+    v8::Local<v8::Value> api_exception_obj) {
+  v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
+  int error_level = api_message_obj->ErrorLevel();
 
   Handle<TemplateList> global_listeners =
       isolate->factory()->message_listeners();
@@ -134,6 +146,11 @@
       if (global_listeners->get(i)->IsUndefined(isolate)) continue;
       FixedArray* listener = FixedArray::cast(global_listeners->get(i));
       Foreign* callback_obj = Foreign::cast(listener->get(0));
+      int32_t message_levels =
+          static_cast<int32_t>(Smi::cast(listener->get(2))->value());
+      if (!(message_levels & error_level)) {
+        continue;
+      }
       v8::MessageCallback callback =
           FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
       Handle<Object> callback_data(listener->get(1), isolate);
@@ -165,139 +182,6 @@
   return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS);
 }
 
-void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
-                                  int frame_ix) {
-  DCHECK(!array->IsWasmFrame(frame_ix));
-  isolate_ = isolate;
-  receiver_ = handle(array->Receiver(frame_ix), isolate);
-  function_ = handle(array->Function(frame_ix), isolate);
-  code_ = handle(array->Code(frame_ix), isolate);
-  offset_ = array->Offset(frame_ix)->value();
-
-  const int flags = array->Flags(frame_ix)->value();
-  force_constructor_ = (flags & FrameArray::kForceConstructor) != 0;
-  is_strict_ = (flags & FrameArray::kIsStrict) != 0;
-}
-
-JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
-                           Handle<JSFunction> function,
-                           Handle<AbstractCode> code, int offset)
-    : isolate_(isolate),
-      receiver_(receiver),
-      function_(function),
-      code_(code),
-      offset_(offset),
-      force_constructor_(false),
-      is_strict_(false) {}
-
-JSStackFrame::JSStackFrame() {}
-
-Handle<Object> JSStackFrame::GetFunction() const {
-  return Handle<Object>::cast(function_);
-}
-
-Handle<Object> JSStackFrame::GetFileName() {
-  if (!HasScript()) return isolate_->factory()->null_value();
-  return handle(GetScript()->name(), isolate_);
-}
-
-Handle<Object> JSStackFrame::GetFunctionName() {
-  Handle<String> result = JSFunction::GetName(function_);
-  if (result->length() != 0) return result;
-
-  if (HasScript() &&
-      GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
-    return isolate_->factory()->eval_string();
-  }
-  return isolate_->factory()->null_value();
-}
-
-namespace {
-
-bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
-                     Handle<JSFunction> fun,
-                     LookupIterator::Configuration config) {
-  LookupIterator iter =
-      LookupIterator::PropertyOrElement(isolate, obj, name, config);
-  if (iter.state() == LookupIterator::DATA) {
-    return iter.GetDataValue().is_identical_to(fun);
-  } else if (iter.state() == LookupIterator::ACCESSOR) {
-    Handle<Object> accessors = iter.GetAccessors();
-    if (accessors->IsAccessorPair()) {
-      Handle<AccessorPair> pair = Handle<AccessorPair>::cast(accessors);
-      return pair->getter() == *fun || pair->setter() == *fun;
-    }
-  }
-  return false;
-}
-
-Handle<Object> ScriptNameOrSourceUrl(Handle<Script> script, Isolate* isolate) {
-  Object* name_or_url = script->source_url();
-  if (!name_or_url->IsString()) name_or_url = script->name();
-  return handle(name_or_url, isolate);
-}
-
-}  // namespace
-
-Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
-  if (!HasScript()) return isolate_->factory()->null_value();
-  return ScriptNameOrSourceUrl(GetScript(), isolate_);
-}
-
-Handle<Object> JSStackFrame::GetMethodName() {
-  if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_)) {
-    return isolate_->factory()->null_value();
-  }
-
-  Handle<JSReceiver> receiver =
-      Object::ToObject(isolate_, receiver_).ToHandleChecked();
-  if (!receiver->IsJSObject()) {
-    return isolate_->factory()->null_value();
-  }
-
-  Handle<JSObject> obj = Handle<JSObject>::cast(receiver);
-  Handle<Object> function_name(function_->shared()->name(), isolate_);
-  if (function_name->IsString()) {
-    Handle<String> name = Handle<String>::cast(function_name);
-    // ES2015 gives getters and setters name prefixes which must
-    // be stripped to find the property name.
-    if (name->IsUtf8EqualTo(CStrVector("get "), true) ||
-        name->IsUtf8EqualTo(CStrVector("set "), true)) {
-      name = isolate_->factory()->NewProperSubString(name, 4, name->length());
-    }
-    if (CheckMethodName(isolate_, obj, name, function_,
-                        LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
-      return name;
-    }
-  }
-
-  HandleScope outer_scope(isolate_);
-  Handle<Object> result;
-  for (PrototypeIterator iter(isolate_, obj, kStartAtReceiver); !iter.IsAtEnd();
-       iter.Advance()) {
-    Handle<Object> current = PrototypeIterator::GetCurrent(iter);
-    if (!current->IsJSObject()) break;
-    Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
-    if (current_obj->IsAccessCheckNeeded()) break;
-    Handle<FixedArray> keys =
-        KeyAccumulator::GetOwnEnumPropertyKeys(isolate_, current_obj);
-    for (int i = 0; i < keys->length(); i++) {
-      HandleScope inner_scope(isolate_);
-      if (!keys->get(i)->IsName()) continue;
-      Handle<Name> name_key(Name::cast(keys->get(i)), isolate_);
-      if (!CheckMethodName(isolate_, current_obj, name_key, function_,
-                           LookupIterator::OWN_SKIP_INTERCEPTOR))
-        continue;
-      // Return null in case of duplicates to avoid confusion.
-      if (!result.is_null()) return isolate_->factory()->null_value();
-      result = inner_scope.CloseAndEscape(name_key);
-    }
-  }
-
-  if (!result.is_null()) return outer_scope.CloseAndEscape(result);
-  return isolate_->factory()->null_value();
-}
-
 namespace {
 
 Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
@@ -326,7 +210,7 @@
 }
 
 MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
-  Handle<Object> sourceURL = Script::GetNameOrSourceURL(script);
+  Handle<Object> sourceURL(script->GetNameOrSourceURL(), isolate);
   if (!sourceURL->IsUndefined(isolate)) {
     DCHECK(sourceURL->IsString());
     return Handle<String>::cast(sourceURL);
@@ -397,11 +281,154 @@
 
 }  // namespace
 
+Handle<Object> StackFrameBase::GetEvalOrigin() {
+  if (!HasScript()) return isolate_->factory()->undefined_value();
+  return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
+}
+
+bool StackFrameBase::IsEval() {
+  return HasScript() &&
+         GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
+}
+
+void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
+                                  int frame_ix) {
+  DCHECK(!array->IsWasmFrame(frame_ix));
+  isolate_ = isolate;
+  receiver_ = handle(array->Receiver(frame_ix), isolate);
+  function_ = handle(array->Function(frame_ix), isolate);
+  code_ = handle(array->Code(frame_ix), isolate);
+  offset_ = array->Offset(frame_ix)->value();
+
+  const int flags = array->Flags(frame_ix)->value();
+  force_constructor_ = (flags & FrameArray::kForceConstructor) != 0;
+  is_strict_ = (flags & FrameArray::kIsStrict) != 0;
+}
+
+JSStackFrame::JSStackFrame() {}
+
+JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
+                           Handle<JSFunction> function,
+                           Handle<AbstractCode> code, int offset)
+    : StackFrameBase(isolate),
+      receiver_(receiver),
+      function_(function),
+      code_(code),
+      offset_(offset),
+      force_constructor_(false),
+      is_strict_(false) {}
+
+Handle<Object> JSStackFrame::GetFunction() const {
+  return Handle<Object>::cast(function_);
+}
+
+Handle<Object> JSStackFrame::GetFileName() {
+  if (!HasScript()) return isolate_->factory()->null_value();
+  return handle(GetScript()->name(), isolate_);
+}
+
+Handle<Object> JSStackFrame::GetFunctionName() {
+  Handle<String> result = JSFunction::GetName(function_);
+  if (result->length() != 0) return result;
+
+  if (HasScript() &&
+      GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
+    return isolate_->factory()->eval_string();
+  }
+  return isolate_->factory()->null_value();
+}
+
+namespace {
+
+bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
+                     Handle<JSFunction> fun,
+                     LookupIterator::Configuration config) {
+  LookupIterator iter =
+      LookupIterator::PropertyOrElement(isolate, obj, name, config);
+  if (iter.state() == LookupIterator::DATA) {
+    return iter.GetDataValue().is_identical_to(fun);
+  } else if (iter.state() == LookupIterator::ACCESSOR) {
+    Handle<Object> accessors = iter.GetAccessors();
+    if (accessors->IsAccessorPair()) {
+      Handle<AccessorPair> pair = Handle<AccessorPair>::cast(accessors);
+      return pair->getter() == *fun || pair->setter() == *fun;
+    }
+  }
+  return false;
+}
+
+Handle<Object> ScriptNameOrSourceUrl(Handle<Script> script, Isolate* isolate) {
+  Object* name_or_url = script->source_url();
+  if (!name_or_url->IsString()) name_or_url = script->name();
+  return handle(name_or_url, isolate);
+}
+
+}  // namespace
+
+Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
+  if (!HasScript()) return isolate_->factory()->null_value();
+  return ScriptNameOrSourceUrl(GetScript(), isolate_);
+}
+
+Handle<Object> JSStackFrame::GetMethodName() {
+  if (receiver_->IsNullOrUndefined(isolate_)) {
+    return isolate_->factory()->null_value();
+  }
+
+  Handle<JSReceiver> receiver =
+      Object::ToObject(isolate_, receiver_).ToHandleChecked();
+  if (!receiver->IsJSObject()) {
+    return isolate_->factory()->null_value();
+  }
+
+  Handle<JSObject> obj = Handle<JSObject>::cast(receiver);
+  Handle<Object> function_name(function_->shared()->name(), isolate_);
+  if (function_name->IsString()) {
+    Handle<String> name = Handle<String>::cast(function_name);
+    // ES2015 gives getters and setters name prefixes which must
+    // be stripped to find the property name.
+    if (name->IsUtf8EqualTo(CStrVector("get "), true) ||
+        name->IsUtf8EqualTo(CStrVector("set "), true)) {
+      name = isolate_->factory()->NewProperSubString(name, 4, name->length());
+    }
+    if (CheckMethodName(isolate_, obj, name, function_,
+                        LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR)) {
+      return name;
+    }
+  }
+
+  HandleScope outer_scope(isolate_);
+  Handle<Object> result;
+  for (PrototypeIterator iter(isolate_, obj, kStartAtReceiver); !iter.IsAtEnd();
+       iter.Advance()) {
+    Handle<Object> current = PrototypeIterator::GetCurrent(iter);
+    if (!current->IsJSObject()) break;
+    Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
+    if (current_obj->IsAccessCheckNeeded()) break;
+    Handle<FixedArray> keys =
+        KeyAccumulator::GetOwnEnumPropertyKeys(isolate_, current_obj);
+    for (int i = 0; i < keys->length(); i++) {
+      HandleScope inner_scope(isolate_);
+      if (!keys->get(i)->IsName()) continue;
+      Handle<Name> name_key(Name::cast(keys->get(i)), isolate_);
+      if (!CheckMethodName(isolate_, current_obj, name_key, function_,
+                           LookupIterator::OWN_SKIP_INTERCEPTOR))
+        continue;
+      // Return null in case of duplicates to avoid confusion.
+      if (!result.is_null()) return isolate_->factory()->null_value();
+      result = inner_scope.CloseAndEscape(name_key);
+    }
+  }
+
+  if (!result.is_null()) return outer_scope.CloseAndEscape(result);
+  return isolate_->factory()->null_value();
+}
+
 Handle<Object> JSStackFrame::GetTypeName() {
   // TODO(jgruber): Check for strict/constructor here as in
   // CallSitePrototypeGetThis.
 
-  if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_))
+  if (receiver_->IsNullOrUndefined(isolate_))
     return isolate_->factory()->null_value();
 
   if (receiver_->IsJSProxy()) return isolate_->factory()->Proxy_string();
@@ -411,11 +438,6 @@
   return JSReceiver::GetConstructorName(receiver_object);
 }
 
-Handle<Object> JSStackFrame::GetEvalOrigin() {
-  if (!HasScript()) return isolate_->factory()->undefined_value();
-  return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
-}
-
 int JSStackFrame::GetLineNumber() {
   DCHECK_LE(0, GetPosition());
   if (HasScript()) return Script::GetLineNumber(GetScript(), GetPosition()) + 1;
@@ -435,13 +457,7 @@
 }
 
 bool JSStackFrame::IsToplevel() {
-  return receiver_->IsJSGlobalProxy() || receiver_->IsNull(isolate_) ||
-         receiver_->IsUndefined(isolate_);
-}
-
-bool JSStackFrame::IsEval() {
-  return HasScript() &&
-         GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
+  return receiver_->IsJSGlobalProxy() || receiver_->IsNullOrUndefined(isolate_);
 }
 
 bool JSStackFrame::IsConstructor() {
@@ -619,6 +635,8 @@
   return handle(Script::cast(function_->shared()->script()), isolate_);
 }
 
+WasmStackFrame::WasmStackFrame() {}
+
 void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
                                     int frame_ix) {
   // This function is called for both wasm and asm.js->wasm frames.
@@ -638,9 +656,10 @@
 Handle<Object> WasmStackFrame::GetFunctionName() {
   Handle<Object> name;
   Handle<WasmCompiledModule> compiled_module(
-      Handle<WasmInstanceObject>::cast(wasm_instance_)->get_compiled_module(),
+      Handle<WasmInstanceObject>::cast(wasm_instance_)->compiled_module(),
       isolate_);
-  if (!WasmCompiledModule::GetFunctionName(compiled_module, wasm_func_index_)
+  if (!WasmCompiledModule::GetFunctionNameOrNull(isolate_, compiled_module,
+                                                 wasm_func_index_)
            .ToHandle(&name)) {
     name = isolate_->factory()->null_value();
   }
@@ -673,6 +692,7 @@
 }
 
 int WasmStackFrame::GetPosition() const {
+  // TODO(wasm): Clean this up (bug 5007).
   return (offset_ < 0) ? (-1 - offset_) : code_->SourcePosition(offset_);
 }
 
@@ -680,6 +700,25 @@
   return isolate_->factory()->null_value();
 }
 
+bool WasmStackFrame::HasScript() const { return true; }
+
+Handle<Script> WasmStackFrame::GetScript() const {
+  return handle(
+      WasmInstanceObject::cast(*wasm_instance_)->compiled_module()->script(),
+      isolate_);
+}
+
+AsmJsWasmStackFrame::AsmJsWasmStackFrame() {}
+
+void AsmJsWasmStackFrame::FromFrameArray(Isolate* isolate,
+                                         Handle<FrameArray> array,
+                                         int frame_ix) {
+  DCHECK(array->IsAsmJsWasmFrame(frame_ix));
+  WasmStackFrame::FromFrameArray(isolate, array, frame_ix);
+  is_at_number_conversion_ =
+      array->Flags(frame_ix)->value() & FrameArray::kAsmJsAtNumberConversion;
+}
+
 Handle<Object> AsmJsWasmStackFrame::GetReceiver() const {
   return isolate_->global_proxy();
 }
@@ -706,8 +745,12 @@
 int AsmJsWasmStackFrame::GetPosition() const {
   DCHECK_LE(0, offset_);
   int byte_offset = code_->SourcePosition(offset_);
-  return wasm::GetAsmWasmSourcePosition(Handle<JSObject>::cast(wasm_instance_),
-                                        wasm_func_index_, byte_offset);
+  Handle<WasmCompiledModule> compiled_module(
+      WasmInstanceObject::cast(*wasm_instance_)->compiled_module(), isolate_);
+  DCHECK_LE(0, byte_offset);
+  return WasmCompiledModule::GetAsmJsSourcePosition(
+      compiled_module, wasm_func_index_, static_cast<uint32_t>(byte_offset),
+      is_at_number_conversion_);
 }
 
 int AsmJsWasmStackFrame::GetLineNumber() {
diff --git a/src/messages.h b/src/messages.h
index 86cc8d0..bb595c2 100644
--- a/src/messages.h
+++ b/src/messages.h
@@ -23,25 +23,26 @@
 class FrameArray;
 class JSMessageObject;
 class LookupIterator;
+class SharedFunctionInfo;
 class SourceInfo;
 
 class MessageLocation {
  public:
   MessageLocation(Handle<Script> script, int start_pos, int end_pos);
   MessageLocation(Handle<Script> script, int start_pos, int end_pos,
-                  Handle<JSFunction> function);
+                  Handle<SharedFunctionInfo> shared);
   MessageLocation();
 
   Handle<Script> script() const { return script_; }
   int start_pos() const { return start_pos_; }
   int end_pos() const { return end_pos_; }
-  Handle<JSFunction> function() const { return function_; }
+  Handle<SharedFunctionInfo> shared() const { return shared_; }
 
  private:
   Handle<Script> script_;
   int start_pos_;
   int end_pos_;
-  Handle<JSFunction> function_;
+  Handle<SharedFunctionInfo> shared_;
 };
 
 class StackFrameBase {
@@ -56,7 +57,7 @@
   virtual Handle<Object> GetScriptNameOrSourceUrl() = 0;
   virtual Handle<Object> GetMethodName() = 0;
   virtual Handle<Object> GetTypeName() = 0;
-  virtual Handle<Object> GetEvalOrigin() = 0;
+  virtual Handle<Object> GetEvalOrigin();
 
   virtual int GetPosition() const = 0;
   // Return 1-based line number, including line offset.
@@ -66,11 +67,20 @@
 
   virtual bool IsNative() = 0;
   virtual bool IsToplevel() = 0;
-  virtual bool IsEval() = 0;
+  virtual bool IsEval();
   virtual bool IsConstructor() = 0;
   virtual bool IsStrict() const = 0;
 
   virtual MaybeHandle<String> ToString() = 0;
+
+ protected:
+  StackFrameBase() {}
+  explicit StackFrameBase(Isolate* isolate) : isolate_(isolate) {}
+  Isolate* isolate_;
+
+ private:
+  virtual bool HasScript() const = 0;
+  virtual Handle<Script> GetScript() const = 0;
 };
 
 class JSStackFrame : public StackFrameBase {
@@ -88,7 +98,6 @@
   Handle<Object> GetScriptNameOrSourceUrl() override;
   Handle<Object> GetMethodName() override;
   Handle<Object> GetTypeName() override;
-  Handle<Object> GetEvalOrigin() override;
 
   int GetPosition() const override;
   int GetLineNumber() override;
@@ -96,7 +105,6 @@
 
   bool IsNative() override;
   bool IsToplevel() override;
-  bool IsEval() override;
   bool IsConstructor() override;
   bool IsStrict() const override { return is_strict_; }
 
@@ -106,10 +114,8 @@
   JSStackFrame();
   void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
 
-  bool HasScript() const;
-  Handle<Script> GetScript() const;
-
-  Isolate* isolate_;
+  bool HasScript() const override;
+  Handle<Script> GetScript() const override;
 
   Handle<Object> receiver_;
   Handle<JSFunction> function_;
@@ -134,7 +140,6 @@
   Handle<Object> GetScriptNameOrSourceUrl() override { return Null(); }
   Handle<Object> GetMethodName() override { return Null(); }
   Handle<Object> GetTypeName() override { return Null(); }
-  Handle<Object> GetEvalOrigin() override { return Null(); }
 
   int GetPosition() const override;
   int GetLineNumber() override { return wasm_func_index_; }
@@ -142,7 +147,6 @@
 
   bool IsNative() override { return false; }
   bool IsToplevel() override { return false; }
-  bool IsEval() override { return false; }
   bool IsConstructor() override { return false; }
   bool IsStrict() const override { return false; }
 
@@ -151,7 +155,8 @@
  protected:
   Handle<Object> Null() const;
 
-  Isolate* isolate_;
+  bool HasScript() const override;
+  Handle<Script> GetScript() const override;
 
   // TODO(wasm): Use proper typing.
   Handle<Object> wasm_instance_;
@@ -160,9 +165,11 @@
   int offset_;
 
  private:
+  WasmStackFrame();
   void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
 
   friend class FrameArrayIterator;
+  friend class AsmJsWasmStackFrame;
 };
 
 class AsmJsWasmStackFrame : public WasmStackFrame {
@@ -180,6 +187,13 @@
   int GetColumnNumber() override;
 
   MaybeHandle<String> ToString() override;
+
+ private:
+  friend class FrameArrayIterator;
+  AsmJsWasmStackFrame();
+  void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+
+  bool is_at_number_conversion_;
 };
 
 class FrameArrayIterator {
@@ -255,7 +269,7 @@
     "ArrayBuffer subclass returned this from species constructor")             \
   T(ArrayFunctionsOnFrozen, "Cannot modify frozen array elements")             \
   T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements")         \
-  T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.")    \
+  T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context")    \
   T(CalledNonCallable, "% is not a function")                                  \
   T(CalledOnNonObject, "% called on non-object")                               \
   T(CalledOnNullOrUndefined, "% called on null or undefined")                  \
@@ -282,7 +296,7 @@
   T(DebuggerFrame, "Debugger: Invalid frame index.")                           \
   T(DebuggerType, "Debugger: Parameters have wrong types.")                    \
   T(DeclarationMissingInitializer, "Missing initializer in % declaration")     \
-  T(DefineDisallowed, "Cannot define property:%, object is not extensible.")   \
+  T(DefineDisallowed, "Cannot define property %, object is not extensible")    \
   T(DetachedOperation, "Cannot perform % on a detached ArrayBuffer")           \
   T(DuplicateTemplateProperty, "Object template has duplicate property '%'")   \
   T(ExtendsValueNotConstructor,                                                \
@@ -294,6 +308,7 @@
   T(IllegalInvocation, "Illegal invocation")                                   \
   T(ImmutablePrototypeSet,                                                     \
     "Immutable prototype object '%' cannot have their prototype set")          \
+  T(ImportCallNotNewExpression, "Cannot use new with import")                  \
   T(IncompatibleMethodReceiver, "Method % called on incompatible receiver %")  \
   T(InstanceofNonobjectProto,                                                  \
     "Function has non-object prototype '%' in instanceof check")               \
@@ -301,7 +316,6 @@
   T(InvalidInOperatorUse, "Cannot use 'in' operator to search for '%' in %")   \
   T(InvalidRegExpExecResult,                                                   \
     "RegExp exec method returned something other than an Object or null")      \
-  T(InvalidSimdOperation, "% is not a valid type for this SIMD operation.")    \
   T(IteratorResultNotAnObject, "Iterator result % is not an object")           \
   T(IteratorValueNotAnObject, "Iterator value % is not an entry object")       \
   T(LanguageID, "Language ID should be string or object.")                     \
@@ -329,13 +343,15 @@
   T(NotIterable, "% is not iterable")                                          \
   T(NotPropertyName, "% is not a valid property name")                         \
   T(NotTypedArray, "this is not a typed array.")                               \
-  T(NotSharedTypedArray, "% is not a shared typed array.")                     \
+  T(NotSuperConstructor, "Super constructor % of % is not a constructor")      \
+  T(NotSuperConstructorAnonymousClass,                                         \
+    "Super constructor % of anonymous class is not a constructor")             \
   T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.")     \
   T(NotInt32SharedTypedArray, "% is not an int32 shared typed array.")         \
   T(ObjectGetterExpectingFunction,                                             \
     "Object.prototype.__defineGetter__: Expecting function")                   \
   T(ObjectGetterCallable, "Getter must be a function: %")                      \
-  T(ObjectNotExtensible, "Can't add property %, object is not extensible")     \
+  T(ObjectNotExtensible, "Cannot add property %, object is not extensible")    \
   T(ObjectSetterExpectingFunction,                                             \
     "Object.prototype.__defineSetter__: Expecting function")                   \
   T(ObjectSetterCallable, "Setter must be a function: %")                      \
@@ -443,9 +459,6 @@
   T(RegExpNonObject, "% getter called on non-object %")                        \
   T(RegExpNonRegExp, "% getter called on non-RegExp object")                   \
   T(ReinitializeIntl, "Trying to re-initialize % object.")                     \
-  T(ResolvedOptionsCalledOnNonObject,                                          \
-    "resolvedOptions method called on a non-object or on a object that is "    \
-    "not Intl.%.")                                                             \
   T(ResolverNotAFunction, "Promise resolver % is not a function")              \
   T(RestrictedFunctionProperties,                                              \
     "'caller' and 'arguments' are restricted function properties and cannot "  \
@@ -462,10 +475,11 @@
   T(StrictCannotCreateProperty, "Cannot create property '%' on % '%'")         \
   T(SymbolIteratorInvalid,                                                     \
     "Result of the Symbol.iterator method is not an object")                   \
+  T(SymbolAsyncIteratorInvalid,                                                \
+    "Result of the Symbol.asyncIterator method is not an object")              \
   T(SymbolKeyFor, "% is not a symbol")                                         \
   T(SymbolToNumber, "Cannot convert a Symbol value to a number")               \
   T(SymbolToString, "Cannot convert a Symbol value to a string")               \
-  T(SimdToNumber, "Cannot convert a SIMD value to a number")                   \
   T(ThrowMethodMissing, "The iterator does not provide a 'throw' method.")     \
   T(UndefinedOrNullToObject, "Cannot convert undefined or null to object")     \
   T(ValueAndAccessor,                                                          \
@@ -474,8 +488,8 @@
   T(VarRedeclaration, "Identifier '%' has already been declared")              \
   T(WrongArgs, "%: Arguments list has wrong type")                             \
   /* ReferenceError */                                                         \
-  T(NonMethod, "'super' is referenced from non-method")                        \
   T(NotDefined, "% is not defined")                                            \
+  T(SuperAlreadyCalled, "Super constructor may only be called once")           \
   T(UnsupportedSuper, "Unsupported reference to 'super'")                      \
   /* RangeError */                                                             \
   T(DateRange, "Provided date is not in valid range.")                         \
@@ -494,8 +508,7 @@
   T(InvalidDataViewAccessorOffset,                                             \
     "Offset is outside the bounds of the DataView")                            \
   T(InvalidDataViewLength, "Invalid DataView length %")                        \
-  T(InvalidDataViewOffset,                                                     \
-    "Start offset % is outside the bounds of the buffer")                      \
+  T(InvalidOffset, "Start offset % is outside the bounds of the buffer")       \
   T(InvalidHint, "Invalid hint: %")                                            \
   T(InvalidLanguageTag, "Invalid language tag: %")                             \
   T(InvalidWeakMapKey, "Invalid value used as weak map key")                   \
@@ -503,10 +516,8 @@
   T(InvalidStringLength, "Invalid string length")                              \
   T(InvalidTimeValue, "Invalid time value")                                    \
   T(InvalidTypedArrayAlignment, "% of % should be a multiple of %")            \
+  T(InvalidTypedArrayIndex, "Invalid typed array index")                       \
   T(InvalidTypedArrayLength, "Invalid typed array length")                     \
-  T(InvalidTypedArrayOffset, "Start offset is too large:")                     \
-  T(InvalidSimdIndex, "Index out of bounds for SIMD operation")                \
-  T(InvalidSimdLaneValue, "Lane value out of bounds for SIMD operation")       \
   T(LetInLexicalBinding, "let is disallowed as a lexically bound name")        \
   T(LocaleMatcher, "Illegal value for localeMatcher:%")                        \
   T(NormalizationForm, "The normalization form should be one of %.")           \
@@ -577,11 +588,14 @@
   T(PushPastSafeLength,                                                        \
     "Pushing % elements on an array-like of length % "                         \
     "is disallowed, as the total surpasses 2**53-1")                           \
-  T(ElementAfterRest, "Rest element must be last element in array")            \
+  T(ElementAfterRest, "Rest element must be last element")                     \
   T(BadSetterRestParameter,                                                    \
     "Setter function argument must not be a rest parameter")                   \
   T(ParamDupe, "Duplicate parameter name not allowed in this context")         \
   T(ParenthesisInArgString, "Function arg string contains parenthesis")        \
+  T(ArgStringTerminatesParametersEarly,                                        \
+    "Arg string terminates parameters early")                                  \
+  T(UnexpectedEndOfArgString, "Unexpected end of arg string")                  \
   T(RuntimeWrongNumArgs, "Runtime function given wrong number of arguments")   \
   T(SingleFunctionLiteral, "Single function literal required")                 \
   T(SloppyFunction,                                                            \
@@ -595,9 +609,13 @@
     "In strict mode code, functions can only be declared at top level or "     \
     "inside a block.")                                                         \
   T(StrictOctalLiteral, "Octal literals are not allowed in strict mode.")      \
+  T(StrictDecimalWithLeadingZero,                                              \
+    "Decimals with leading zeros are not allowed in strict mode.")             \
+  T(StrictOctalEscape,                                                         \
+    "Octal escape sequences are not allowed in strict mode.")                  \
   T(StrictWith, "Strict mode code may not include a with statement")           \
   T(TemplateOctalLiteral,                                                      \
-    "Octal literals are not allowed in template strings.")                     \
+    "Octal escape sequences are not allowed in template strings.")             \
   T(ThisFormalParameter, "'this' is not a valid formal parameter name")        \
   T(AwaitBindingIdentifier,                                                    \
     "'await' is not a valid identifier name in an async function")             \
@@ -625,6 +643,8 @@
   T(UnexpectedTokenNumber, "Unexpected number")                                \
   T(UnexpectedTokenString, "Unexpected string")                                \
   T(UnexpectedTokenRegExp, "Unexpected regular expression")                    \
+  T(UnexpectedLexicalDeclaration,                                              \
+    "Lexical declaration cannot appear in a single-statement context")         \
   T(UnknownLabel, "Undefined label '%'")                                       \
   T(UnresolvableExport,                                                        \
     "The requested module does not provide an export named '%'")               \
@@ -639,6 +659,7 @@
   T(YieldInParameter, "Yield expression not allowed in formal parameter")      \
   /* EvalError */                                                              \
   T(CodeGenFromStrings, "%")                                                   \
+  T(NoSideEffectDebugEvaluate, "Possible side-effect in debug-evaluate")       \
   /* URIError */                                                               \
   T(URIMalformed, "URI malformed")                                             \
   /* Wasm errors (currently Error) */                                          \
@@ -652,12 +673,17 @@
   T(WasmTrapFuncSigMismatch, "function signature mismatch")                    \
   T(WasmTrapInvalidIndex, "invalid index into function table")                 \
   T(WasmTrapTypeError, "invalid type")                                         \
+  /* Asm.js validation related */                                              \
+  T(AsmJsInvalid, "Invalid asm.js: %")                                         \
+  T(AsmJsCompiled, "Converted asm.js to WebAssembly: %")                       \
+  T(AsmJsInstantiated, "Instantiated asm.js: %")                               \
   /* DataCloneError messages */                                                \
   T(DataCloneError, "% could not be cloned.")                                  \
+  T(DataCloneErrorOutOfMemory, "Data cannot be cloned, out of memory.")        \
   T(DataCloneErrorNeuteredArrayBuffer,                                         \
     "An ArrayBuffer is neutered and could not be cloned.")                     \
-  T(DataCloneErrorSharedArrayBufferNotTransferred,                             \
-    "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must be "      \
+  T(DataCloneErrorSharedArrayBufferTransferred,                                \
+    "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must not be "  \
     "transferred.")                                                            \
   T(DataCloneDeserializationError, "Unable to deserialize cloned data.")       \
   T(DataCloneDeserializationVersionError,                                      \
@@ -692,11 +718,11 @@
   // Returns a message object for the API to use.
   static Handle<JSMessageObject> MakeMessageObject(
       Isolate* isolate, MessageTemplate::Template type,
-      MessageLocation* location, Handle<Object> argument,
+      const MessageLocation* location, Handle<Object> argument,
       Handle<JSArray> stack_frames);
 
   // Report a formatted message (needs JS allocation).
-  static void ReportMessage(Isolate* isolate, MessageLocation* loc,
+  static void ReportMessage(Isolate* isolate, const MessageLocation* loc,
                             Handle<JSMessageObject> message);
 
   static void DefaultMessageReport(Isolate* isolate, const MessageLocation* loc,
@@ -704,6 +730,12 @@
   static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data);
   static std::unique_ptr<char[]> GetLocalizedMessage(Isolate* isolate,
                                                      Handle<Object> data);
+
+ private:
+  static void ReportMessageNoExceptions(Isolate* isolate,
+                                        const MessageLocation* loc,
+                                        Handle<Object> message_obj,
+                                        v8::Local<v8::Value> api_exception_obj);
 };
 
 
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 963ed4a..b06ec84 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -41,7 +41,7 @@
 
 #include "src/assembler.h"
 #include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -137,6 +137,17 @@
   return Assembler::kSpecialTargetSize;
 }
 
+Address Assembler::target_address_at(Address pc, Code* code) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
 
 Address Assembler::target_address_from_return_address(Address pc) {
   return pc - kCallTargetAddressOffset;
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 865e64c..784185a 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -204,13 +204,18 @@
   return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
 }
 
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
 void RelocInfo::unchecked_update_wasm_memory_reference(
     Address address, ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
 }
 
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
-                                                  ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+                                           ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_,
                                    reinterpret_cast<Address>(size), flush_mode);
 }
@@ -895,8 +900,7 @@
       } else {
         PrintF("%d\n", instr);
       }
-      next(&l, internal_reference_positions_.find(l.pos()) !=
-                   internal_reference_positions_.end());
+      next(&l, is_internal_reference(&l));
     }
   } else {
     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
@@ -910,14 +914,15 @@
   bool is_internal = false;
   if (L->is_linked() && !trampoline_emitted_) {
     unbound_labels_count_--;
-    next_buffer_check_ += kTrampolineSlotsSize;
+    if (!is_internal_reference(L)) {
+      next_buffer_check_ += kTrampolineSlotsSize;
+    }
   }
 
   while (L->is_linked()) {
     int32_t fixup_pos = L->pos();
     int32_t dist = pos - fixup_pos;
-    is_internal = internal_reference_positions_.find(fixup_pos) !=
-                  internal_reference_positions_.end();
+    is_internal = is_internal_reference(L);
     next(L, is_internal);  // Call next before overwriting link with target at
                            // fixup_pos.
     Instr instr = instr_at(fixup_pos);
@@ -934,7 +939,6 @@
           CHECK((trampoline_pos - fixup_pos) <= branch_offset);
           target_at_put(fixup_pos, trampoline_pos, false);
           fixup_pos = trampoline_pos;
-          dist = pos - fixup_pos;
         }
         target_at_put(fixup_pos, pos, false);
       } else {
@@ -1779,9 +1783,18 @@
 // Helper for base-reg + offset, when offset is larger than int16.
 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
   DCHECK(!src.rm().is(at));
-  lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
-  ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
-  addu(at, at, src.rm());  // Add base register.
+  if (IsMipsArchVariant(kMips32r6)) {
+    int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
+    if (src.offset_ & kNegOffset) {
+      hi += 1;
+    }
+    aui(at, src.rm(), hi);
+    addiu(at, at, src.offset_ & kImm16Mask);
+  } else {
+    lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
+    ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
+    addu(at, at, src.rm());                 // Add base register.
+  }
 }
 
 // Helper for base-reg + upper part of offset, when offset is larger than int16.
@@ -1797,8 +1810,13 @@
   if (src.offset_ & kNegOffset) {
     hi += 1;
   }
-  lui(at, hi);
-  addu(at, at, src.rm());
+
+  if (IsMipsArchVariant(kMips32r6)) {
+    aui(at, src.rm(), hi);
+  } else {
+    lui(at, hi);
+    addu(at, at, src.rm());
+  }
   return (src.offset_ & kImm16Mask);
 }
 
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 1df6e3f..9b259bb 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -68,7 +68,7 @@
 
 #define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
   V(f0)  V(f2)  V(f4)  V(f6)  V(f8)  V(f10) V(f12) V(f14) \
-  V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
+  V(f16) V(f18) V(f20) V(f22) V(f24)
 // clang-format on
 
 // CPU Registers.
@@ -155,6 +155,7 @@
 Register ToRegister(int num);
 
 static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
 
 // Coprocessor register.
 struct FPURegister {
@@ -282,8 +283,7 @@
 #define kLithiumScratchDouble f30
 #define kDoubleRegZero f28
 // Used on mips32r6 for compare operations.
-// We use the last non-callee saved odd register for O32 ABI
-#define kDoubleCompareReg f19
+#define kDoubleCompareReg f26
 
 // FPU (coprocessor 1) control registers.
 // Currently only FCSR (#31) is implemented.
@@ -473,17 +473,10 @@
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
     set_target_address_at(isolate, pc, target, icache_flush_mode);
   }
-  INLINE(static Address target_address_at(Address pc, Code* code)) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    return target_address_at(pc, constant_pool);
-  }
+  INLINE(static Address target_address_at(Address pc, Code* code));
   INLINE(static void set_target_address_at(
       Isolate* isolate, Address pc, Code* code, Address target,
-      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    set_target_address_at(isolate, pc, constant_pool, target,
-                          icache_flush_mode);
-  }
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
 
   // Return the code target address at a call site from the return address
   // of that call in the instruction stream.
@@ -552,6 +545,17 @@
   static const int kDebugBreakSlotLength =
       kDebugBreakSlotInstructions * kInstrSize;
 
+  // Max offset for instructions with 16-bit offset field
+  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+
+  // Max offset for compact branch instructions with 26-bit offset field
+  static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
+
+#ifdef _MIPS_ARCH_MIPS32R6
+  static const int kTrampolineSlotsSize = 2 * kInstrSize;
+#else
+  static const int kTrampolineSlotsSize = 4 * kInstrSize;
+#endif
 
   // ---------------------------------------------------------------------------
   // Code generation.
@@ -1029,9 +1033,6 @@
 
   // Debugging.
 
-  // Mark generator continuation.
-  void RecordGeneratorContinuation();
-
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
@@ -1169,6 +1170,9 @@
   }
 
   bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+  static bool IsCompactBranchSupported() {
+    return IsMipsArchVariant(kMips32r6);
+  }
 
   inline int UnboundLabelsCount() { return unbound_labels_count_; }
 
@@ -1443,18 +1447,15 @@
   // branch instruction generation, where we use jump instructions rather
   // than regular branch instructions.
   bool trampoline_emitted_;
-#ifdef _MIPS_ARCH_MIPS32R6
-  static const int kTrampolineSlotsSize = 2 * kInstrSize;
-#else
-  static const int kTrampolineSlotsSize = 4 * kInstrSize;
-#endif
-  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
-  static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
   static const int kInvalidSlotPos = -1;
 
   // Internal reference positions, required for unbounded internal reference
   // labels.
   std::set<int> internal_reference_positions_;
+  bool is_internal_reference(Label* L) {
+    return internal_reference_positions_.find(L->pos()) !=
+           internal_reference_positions_.end();
+  }
 
   void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
   void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 966214b..e29da4c 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -33,17 +33,6 @@
   __ TailCallRuntime(Runtime::kNewArray);
 }
 
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
-  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
-  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cc);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -246,8 +235,6 @@
     __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
     // Call runtime on identical symbols since we need to throw a TypeError.
     __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
-    // Call runtime on identical SIMD values since we must throw a TypeError.
-    __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
   } else {
     __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
     // Comparing JS objects with <=, >= is complicated.
@@ -255,8 +242,6 @@
       __ Branch(slow, greater, t4, Operand(FIRST_JS_RECEIVER_TYPE));
       // Call runtime on identical symbols since we need to throw a TypeError.
       __ Branch(slow, eq, t4, Operand(SYMBOL_TYPE));
-      // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ Branch(slow, eq, t4, Operand(SIMD128_VALUE_TYPE));
       // Normally here we fall through to return_equal, but undefined is
       // special: (undefined == undefined) == true, but
       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -684,8 +669,11 @@
   if (cc == eq) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(lhs, rhs);
-      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+      __ Push(cp);
+      __ Call(strict() ? isolate()->builtins()->StrictEqual()
+                       : isolate()->builtins()->Equal(),
+              RelocInfo::CODE_TARGET);
+      __ Pop(cp);
     }
     // Turn true into 0 and false into some non-zero value.
     STATIC_ASSERT(EQUAL == 0);
@@ -916,7 +904,6 @@
   SaveFPRegsMode mode = kSaveFPRegs;
   CEntryStub(isolate, 1, mode).GetCode();
   StoreBufferOverflowStub(isolate, mode).GetCode();
-  isolate->set_fp_stubs_generated(true);
 }
 
 
@@ -1155,9 +1142,9 @@
 
   // We build an EntryFrame.
   __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
-  int marker = type();
-  __ li(t2, Operand(Smi::FromInt(marker)));
-  __ li(t1, Operand(Smi::FromInt(marker)));
+  StackFrame::Type marker = type();
+  __ li(t2, Operand(StackFrame::TypeToMarker(marker)));
+  __ li(t1, Operand(StackFrame::TypeToMarker(marker)));
   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
                                       isolate)));
   __ lw(t0, MemOperand(t0));
@@ -1188,12 +1175,12 @@
   __ lw(t2, MemOperand(t1));
   __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
   __ sw(fp, MemOperand(t1));
-  __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   Label cont;
   __ b(&cont);
   __ nop();   // Branch delay slot nop.
   __ bind(&non_outermost_js);
-  __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+  __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME));
   __ bind(&cont);
   __ push(t0);
 
@@ -1260,10 +1247,8 @@
   // Check if the current stack frame is marked as the outermost JS frame.
   Label non_outermost_js_2;
   __ pop(t1);
-  __ Branch(&non_outermost_js_2,
-            ne,
-            t1,
-            Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ Branch(&non_outermost_js_2, ne, t1,
+            Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   __ li(t1, Operand(ExternalReference(js_entry_sp)));
   __ sw(zero_reg, MemOperand(t1));
   __ bind(&non_outermost_js_2);
@@ -1286,50 +1271,6 @@
   __ Jump(ra);
 }
 
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
-  // Return address is in ra.
-  Label miss;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register index = LoadDescriptor::NameRegister();
-  Register scratch = t1;
-  Register result = v0;
-  DCHECK(!scratch.is(receiver) && !scratch.is(index));
-  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
-
-  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          RECEIVER_IS_STRING);
-  char_at_generator.GenerateFast(masm);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  // Ensure that the vector and slot registers won't be clobbered before
-  // calling the miss handler.
-  DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::VectorRegister(),
-                     LoadWithVectorDescriptor::SlotRegister()));
-
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
-                                                          t1, &miss);
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -1431,7 +1372,7 @@
   // (6) External string.  Make it, offset-wise, look like a sequential string.
   //     Go to (4).
   // (7) Short external string or not a string?  If yes, bail out to runtime.
-  // (8) Sliced string.  Replace subject with parent.  Go to (1).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
 
   Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
       not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
@@ -1452,6 +1393,7 @@
   // (2) Sequential or cons?  If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   // Go to (5).
@@ -1478,12 +1420,12 @@
   __ Branch(&runtime, ls, a3, Operand(a1));
   __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
 
-  STATIC_ASSERT(kStringEncodingMask == 4);
-  STATIC_ASSERT(kOneByteStringTag == 4);
+  STATIC_ASSERT(kStringEncodingMask == 8);
+  STATIC_ASSERT(kOneByteStringTag == 8);
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one-byte.
   __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
-  __ sra(a3, a0, 2);  // a3 is 1 for ASCII, 0 for UC16 (used below).
+  __ sra(a3, a0, 3);  // a3 is 1 for ASCII, 0 for UC16 (used below).
   __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
   __ Movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
 
@@ -1728,12 +1670,18 @@
   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
   __ Branch(&runtime, ne, at, Operand(zero_reg));
 
-  // (8) Sliced string.  Replace subject with parent.  Go to (4).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
+  Label thin_string;
+  __ Branch(&thin_string, eq, a1, Operand(kThinStringTag));
   // Load offset into t0 and replace subject string with parent.
   __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ sra(t0, t0, kSmiTagSize);
   __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   __ jmp(&check_underlying);  // Go to (4).
+
+  __ bind(&thin_string);
+  __ lw(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+  __ jmp(&check_underlying);  // Go to (4).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -1771,9 +1719,9 @@
   // a3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
 
-  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
-  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
   // Load the cache state into t2.
@@ -1783,7 +1731,7 @@
   // A monomorphic cache hit or an already megamorphic state: invoke the
   // function without changing the state.
   // We don't know if t2 is a WeakCell or a Symbol, but it's harmless to read at
-  // this position in a symbol (see static asserts in type-feedback-vector.h).
+  // this position in a symbol (see static asserts in feedback-vector.h).
   Label check_allocation_site;
   Register feedback_map = t1;
   Register weak_value = t4;
@@ -1895,187 +1843,6 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
-                               Register slot) {
-  __ Lsa(at, feedback_vector, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-  __ Addu(slot, slot, Operand(Smi::FromInt(1)));
-  __ sw(slot, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
-  // a0 - number of arguments
-  // a1 - function
-  // a3 - slot id
-  // a2 - vector
-  // t0 - loaded from vector[slot]
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
-  __ Branch(miss, ne, a1, Operand(at));
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, a2, a3);
-
-  __ mov(a2, t0);
-  __ mov(a3, a1);
-  ArrayConstructorStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
-  // a0 - number of arguments
-  // a1 - function
-  // a3 - slot id (Smi)
-  // a2 - vector
-  Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
-  // The checks. First, does r1 match the recorded monomorphic target?
-  __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(t0, FieldMemOperand(t0, FixedArray::kHeaderSize));
-
-  // We don't know that we have a weak cell. We might have a private symbol
-  // or an AllocationSite, but the memory is safe to examine.
-  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
-  // FixedArray.
-  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
-  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
-  // computed, meaning that it can't appear to be a pointer. If the low bit is
-  // 0, then hash is computed, but the 0 bit prevents the field from appearing
-  // to be a pointer.
-  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
-  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
-                    WeakCell::kValueOffset &&
-                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
-  __ lw(t1, FieldMemOperand(t0, WeakCell::kValueOffset));
-  __ Branch(&extra_checks_or_miss, ne, a1, Operand(t1));
-
-  // The compare above could have been a SMI/SMI comparison. Guard against this
-  // convincing us that we have a monomorphic JSFunction.
-  __ JumpIfSmi(a1, &extra_checks_or_miss);
-
-  __ bind(&call_function);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, a2, a3);
-
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
-                                                    tail_call_mode()),
-          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
-
-  __ bind(&extra_checks_or_miss);
-  Label uninitialized, miss, not_allocation_site;
-
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ Branch(&call, eq, t0, Operand(at));
-
-  // Verify that t0 contains an AllocationSite
-  __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
-  __ Branch(&not_allocation_site, ne, t1, Operand(at));
-
-  HandleArrayCase(masm, &miss);
-
-  __ bind(&not_allocation_site);
-
-  // The following cases attempt to handle MISS cases without going to the
-  // runtime.
-  if (FLAG_trace_ic) {
-    __ Branch(&miss);
-  }
-
-  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
-  __ Branch(&uninitialized, eq, t0, Operand(at));
-
-  // We are going megamorphic. If the feedback is a JSFunction, it is fine
-  // to handle it here. More complex cases are dealt with in the runtime.
-  __ AssertNotSmi(t0);
-  __ GetObjectType(t0, t1, t1);
-  __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
-  __ Lsa(t0, a2, a3, kPointerSizeLog2 - kSmiTagSize);
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
-
-  __ bind(&call);
-  IncrementCallCount(masm, a2, a3);
-
-  __ bind(&call_count_incremented);
-
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
-
-  __ bind(&uninitialized);
-
-  // We are going monomorphic, provided we actually have a JSFunction.
-  __ JumpIfSmi(a1, &miss);
-
-  // Goto miss case if we do not have a function.
-  __ GetObjectType(a1, t0, t0);
-  __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
-
-  // Make sure the function is not the Array() function, which requires special
-  // behavior on MISS.
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, t0);
-  __ Branch(&miss, eq, a1, Operand(t0));
-
-  // Make sure the function belongs to the same native context.
-  __ lw(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
-  __ lw(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
-  __ lw(t1, NativeContextMemOperand());
-  __ Branch(&miss, ne, t0, Operand(t1));
-
-  // Store the function. Use a stub since we need a frame for allocation.
-  // a2 - vector
-  // a3 - slot
-  // a1 - function
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    CreateWeakCellStub create_stub(masm->isolate());
-    __ SmiTag(a0);
-    __ Push(a0);
-    __ Push(a2, a3);
-    __ Push(cp, a1);
-    __ CallStub(&create_stub);
-    __ Pop(cp, a1);
-    __ Pop(a2, a3);
-    __ Pop(a0);
-    __ SmiUntag(a0);
-  }
-
-  __ Branch(&call_function);
-
-  // We are here because tracing is on or we encountered a MISS case we can't
-  // handle here.
-  __ bind(&miss);
-  GenerateMiss(masm);
-
-  __ Branch(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
-  FrameScope scope(masm, StackFrame::INTERNAL);
-
-  // Preserve the number of arguments as Smi.
-  __ SmiTag(a0);
-  __ Push(a0);
-
-  // Push the receiver and the function and feedback info.
-  __ Push(a1, a2, a3);
-
-  // Call the entry.
-  __ CallRuntime(Runtime::kCallIC_Miss);
-
-  // Move result to a1 and exit the internal frame.
-  __ mov(a1, v0);
-
-  // Restore number of arguments.
-  __ Pop(a0);
-  __ SmiUntag(a0);
-}
-
-
 // StringCharCodeAtGenerator.
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   DCHECK(!t0.is(index_));
@@ -2173,96 +1940,6 @@
   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
 }
 
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  // Fast case of Heap::LookupSingleCharacterStringFromCode.
-
-  DCHECK(!t0.is(result_));
-  DCHECK(!t0.is(code_));
-
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiShiftSize == 0);
-  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
-  __ And(t0, code_, Operand(kSmiTagMask |
-                            ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
-  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
-
-  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged one-byte char code.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ Lsa(result_, result_, code_, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
-  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
-  __ Branch(&slow_case_, eq, result_, Operand(t0));
-  __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
-  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
-  __ bind(&slow_case_);
-  call_helper.BeforeCall(masm);
-  __ push(code_);
-  __ CallRuntime(Runtime::kStringCharFromCode);
-  __ Move(result_, v0);
-
-  call_helper.AfterCall(masm);
-  __ Branch(&exit_);
-
-  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
-                                          Register dest,
-                                          Register src,
-                                          Register count,
-                                          Register scratch,
-                                          String::Encoding encoding) {
-  if (FLAG_debug_code) {
-    // Check that destination is word aligned.
-    __ And(scratch, dest, Operand(kPointerAlignmentMask));
-    __ Check(eq,
-             kDestinationOfCopyNotAligned,
-             scratch,
-             Operand(zero_reg));
-  }
-
-  // Assumes word reads and writes are little endian.
-  // Nothing to do for zero characters.
-  Label done;
-
-  if (encoding == String::TWO_BYTE_ENCODING) {
-    __ Addu(count, count, count);
-  }
-
-  Register limit = count;  // Read until dest equals this.
-  __ Addu(limit, dest, Operand(count));
-
-  Label loop_entry, loop;
-  // Copy bytes from src to dest until dest hits limit.
-  __ Branch(&loop_entry);
-  __ bind(&loop);
-  __ lbu(scratch, MemOperand(src));
-  __ Addu(src, src, Operand(1));
-  __ sb(scratch, MemOperand(dest));
-  __ Addu(dest, dest, Operand(1));
-  __ bind(&loop_entry);
-  __ Branch(&loop, lt, dest, Operand(limit));
-
-  __ bind(&done);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -2889,85 +2566,6 @@
   __ Branch(miss, ne, at, Operand(zero_reg));
 }
 
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
-                                                      Label* miss,
-                                                      Label* done,
-                                                      Register elements,
-                                                      Register name,
-                                                      Register scratch1,
-                                                      Register scratch2) {
-  DCHECK(!elements.is(scratch1));
-  DCHECK(!elements.is(scratch2));
-  DCHECK(!name.is(scratch1));
-  DCHECK(!name.is(scratch2));
-
-  __ AssertName(name);
-
-  // Compute the capacity mask.
-  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
-  __ sra(scratch1, scratch1, kSmiTagSize);  // convert smi to int
-  __ Subu(scratch1, scratch1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
-    if (i > 0) {
-      // Add the probe offset (i + i * i) left shifted to avoid right shifting
-      // the hash in a separate instruction. The value hash + i + i * i is right
-      // shifted in the following and instruction.
-      DCHECK(NameDictionary::GetProbeOffset(i) <
-             1 << (32 - Name::kHashFieldOffset));
-      __ Addu(scratch2, scratch2, Operand(
-          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
-    }
-    __ srl(scratch2, scratch2, Name::kHashShift);
-    __ And(scratch2, scratch1, scratch2);
-
-    // Scale the index by multiplying by the element size.
-    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    // scratch2 = scratch2 * 3.
-
-    __ Lsa(scratch2, scratch2, scratch2, 1);
-
-    // Check if the key is identical to the name.
-    __ Lsa(scratch2, elements, scratch2, 2);
-    __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
-    __ Branch(done, eq, name, Operand(at));
-  }
-
-  const int spill_mask =
-      (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
-       a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
-      ~(scratch1.bit() | scratch2.bit());
-
-  __ MultiPush(spill_mask);
-  if (name.is(a0)) {
-    DCHECK(!elements.is(a1));
-    __ Move(a1, name);
-    __ Move(a0, elements);
-  } else {
-    __ Move(a0, elements);
-    __ Move(a1, name);
-  }
-  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
-  __ CallStub(&stub);
-  __ mov(scratch2, a2);
-  __ mov(at, v0);
-  __ MultiPop(spill_mask);
-
-  __ Branch(done, ne, at, Operand(zero_reg));
-  __ Branch(miss, eq, at, Operand(zero_reg));
-}
-
-
 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   // we cannot call anything that could cause a GC from this stub.
@@ -3260,239 +2858,6 @@
   __ Addu(sp, sp, a1);
 }
 
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(a2);
-  CallICStub stub(isolate(), state());
-  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
-                             Register receiver_map, Register scratch1,
-                             Register scratch2, bool is_polymorphic,
-                             Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-
-  Register cached_map = scratch1;
-
-  __ lw(cached_map,
-        FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-  __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
-  // found, now call handler.
-  Register handler = feedback;
-  __ lw(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-
-
-  Register length = scratch2;
-  __ bind(&start_polymorphic);
-  __ lw(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-  if (!is_polymorphic) {
-    // If the IC could be monomorphic we have to make sure we don't go past the
-    // end of the feedback array.
-    __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
-  }
-
-  Register too_far = length;
-  Register pointer_reg = feedback;
-
-  // +-----+------+------+-----+-----+ ... ----+
-  // | map | len  | wm0  | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ... ----+
-  //                 0      1     2        len-1
-  //                              ^              ^
-  //                              |              |
-  //                         pointer_reg      too_far
-  //                         aka feedback     scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Addu(pointer_reg, feedback,
-          Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ lw(cached_map, MemOperand(pointer_reg));
-  __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
-  __ lw(handler, MemOperand(pointer_reg, kPointerSize));
-  __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-
-  __ bind(&prepare_next);
-  __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
-  __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
-  // We exhausted our array of map handler pairs.
-  __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
-                                  Register receiver_map, Register feedback,
-                                  Register vector, Register slot,
-                                  Register scratch, Label* compare_map,
-                                  Label* load_smi_map, Label* try_array) {
-  __ JumpIfSmi(receiver, load_smi_map);
-  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(compare_map);
-  Register cached_map = scratch;
-  // Move the weak map into the weak_cell register.
-  __ lw(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
-  __ Branch(try_array, ne, cached_map, Operand(receiver_map));
-  Register handler = feedback;
-
-  __ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(handler,
-        FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
-  __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  KeyedStoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
-                                       Register receiver_map, Register scratch1,
-                                       Register scratch2, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-  Label transition_call;
-
-  Register cached_map = scratch1;
-  Register too_far = scratch2;
-  Register pointer_reg = feedback;
-  __ lw(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
-  // +-----+------+------+-----+-----+-----+ ... ----+
-  // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ----+ ... ----+
-  //                 0      1     2              len-1
-  //                 ^                                 ^
-  //                 |                                 |
-  //             pointer_reg                        too_far
-  //             aka feedback                       scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Addu(pointer_reg, feedback,
-          Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ lw(cached_map, MemOperand(pointer_reg));
-  __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
-  // Is it a transitioning store?
-  __ lw(too_far, MemOperand(pointer_reg, kPointerSize));
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ Branch(&transition_call, ne, too_far, Operand(at));
-  __ lw(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
-  __ Addu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-
-  __ bind(&transition_call);
-  __ lw(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
-  __ JumpIfSmi(too_far, miss);
-
-  __ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
-  // Load the map into the correct register.
-  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
-  __ mov(feedback, too_far);
-
-  __ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-
-  __ bind(&prepare_next);
-  __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
-  __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
-  // We exhausted our array of map handler pairs.
-  __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // a1
-  Register key = StoreWithVectorDescriptor::NameRegister();           // a2
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // a3
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // t0
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0));          // a0
-  Register feedback = t1;
-  Register receiver_map = t2;
-  Register scratch1 = t5;
-
-  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-  __ Branch(&not_array, ne, scratch1, Operand(at));
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-
-  Register scratch2 = t4;
-
-  HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
-                             &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ Branch(&try_poly_name, ne, feedback, Operand(at));
-  Handle<Code> megamorphic_stub =
-      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ Branch(&miss, ne, key, Operand(feedback));
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(feedback,
-        FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
-                   &miss);
-
-  __ bind(&miss);
-  KeyedStoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ Branch(USE_DELAY_SLOT, &compare_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
-}
-
-
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -3847,620 +3212,6 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1 : target
-  //  -- a3 : new target
-  //  -- cp : context
-  //  -- ra : return address
-  // -----------------------------------
-  __ AssertFunction(a1);
-  __ AssertReceiver(a3);
-
-  // Verify that the new target is a JSFunction.
-  Label new_object;
-  __ GetObjectType(a3, a2, a2);
-  __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
-
-  // Load the initial map and verify that it's in fact a map.
-  __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(a2, &new_object);
-  __ GetObjectType(a2, a0, a0);
-  __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
-
-  // Fall back to runtime if the target differs from the new target's
-  // initial map constructor.
-  __ lw(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
-  __ Branch(&new_object, ne, a0, Operand(a1));
-
-  // Allocate the JSObject on the heap.
-  Label allocate, done_allocate;
-  __ lbu(t0, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-  __ Allocate(t0, v0, t1, a0, &allocate, SIZE_IN_WORDS);
-  __ bind(&done_allocate);
-
-  // Initialize the JSObject fields.
-  __ sw(a2, FieldMemOperand(v0, JSObject::kMapOffset));
-  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
-  __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
-  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ Addu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
-  // ----------- S t a t e -------------
-  //  -- v0 : result (tagged)
-  //  -- a1 : result fields (untagged)
-  //  -- t1 : result end (untagged)
-  //  -- a2 : initial map
-  //  -- cp : context
-  //  -- ra : return address
-  // -----------------------------------
-
-  // Perform in-object slack tracking if requested.
-  Label slack_tracking;
-  STATIC_ASSERT(Map::kNoSlackTracking == 0);
-  __ lw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
-  __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
-  __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(0));
-  __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);  // In delay slot.
-  {
-    // Initialize all in-object fields with undefined.
-    __ InitializeFieldsWithFiller(a1, t1, a0);
-    __ Ret();
-  }
-  __ bind(&slack_tracking);
-  {
-    // Decrease generous allocation count.
-    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-    __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
-    __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
-
-    // Initialize the in-object fields with undefined.
-    __ lbu(t0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
-    __ sll(t0, t0, kPointerSizeLog2);
-    __ subu(t0, t1, t0);
-    __ InitializeFieldsWithFiller(a1, t0, a0);
-
-    // Initialize the remaining (reserved) fields with one pointer filler map.
-    __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
-    __ InitializeFieldsWithFiller(a1, t1, a0);
-
-    // Check if we can finalize the instance size.
-    Label finalize;
-    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
-    __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
-    __ Branch(&finalize, eq, a3, Operand(zero_reg));
-    __ Ret();
-
-    // Finalize the instance size.
-    __ bind(&finalize);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(v0, a2);
-      __ CallRuntime(Runtime::kFinalizeInstanceSize);
-      __ Pop(v0);
-    }
-    __ Ret();
-  }
-
-  // Fall back to %AllocateInNewSpace.
-  __ bind(&allocate);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    __ sll(t0, t0, kPointerSizeLog2 + kSmiTagSize);
-    __ Push(a2, t0);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(a2);
-  }
-  __ lbu(t1, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-  __ Lsa(t1, v0, t1, kPointerSizeLog2);
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ Subu(t1, t1, Operand(kHeapObjectTag));
-  __ jmp(&done_allocate);
-
-  // Fall back to %NewObject.
-  __ bind(&new_object);
-  __ Push(a1, a3);
-  __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- ra : return address
-  // -----------------------------------
-  __ AssertFunction(a1);
-
-  // Make a2 point to the JavaScript frame.
-  __ mov(a2, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&ok, eq, a1, Operand(a3));
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have rest parameters (only possible if we have an
-  // arguments adaptor frame below the function frame).
-  Label no_rest_parameters;
-  __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Branch(&no_rest_parameters, ne, a3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Check if the arguments adaptor frame contains more arguments than
-  // specified by the function's internal formal parameter count.
-  Label rest_parameters;
-  __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a3,
-        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Subu(a0, a0, Operand(a3));
-  __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
-
-  // Return an empty rest parameter array.
-  __ bind(&no_rest_parameters);
-  {
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- ra : return address
-    // -----------------------------------
-
-    // Allocate an empty rest parameter array.
-    Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the rest parameter array in v0.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
-    __ sw(a1, FieldMemOperand(v0, JSArray::kMapOffset));
-    __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
-    __ sw(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
-    __ sw(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
-    __ Move(a1, Smi::kZero);
-    __ Ret(USE_DELAY_SLOT);
-    __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));  // In delay slot
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-
-    // Fall back to %AllocateInNewSpace.
-    __ bind(&allocate);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(Smi::FromInt(JSArray::kSize));
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-    }
-    __ jmp(&done_allocate);
-  }
-
-  __ bind(&rest_parameters);
-  {
-    // Compute the pointer to the first rest parameter (skippping the receiver).
-    __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
-    __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
-                            1 * kPointerSize));
-
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- a0 : number of rest parameters (tagged)
-    //  -- a1 : function
-    //  -- a2 : pointer to first rest parameters
-    //  -- ra : return address
-    // -----------------------------------
-
-    // Allocate space for the rest parameter array plus the backing store.
-    Label allocate, done_allocate;
-    __ li(t0, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
-    __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the elements array in v0.
-    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-    __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
-    __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
-    __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
-    {
-      Label loop, done_loop;
-      __ sll(at, a0, kPointerSizeLog2 - 1);
-      __ Addu(a1, a3, at);
-      __ bind(&loop);
-      __ Branch(&done_loop, eq, a1, Operand(a3));
-      __ lw(at, MemOperand(a2, 0 * kPointerSize));
-      __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
-      __ Subu(a2, a2, Operand(1 * kPointerSize));
-      __ Addu(a3, a3, Operand(1 * kPointerSize));
-      __ jmp(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Setup the rest parameter array in a3.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
-    __ sw(at, FieldMemOperand(a3, JSArray::kMapOffset));
-    __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-    __ sw(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
-    __ sw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
-    __ sw(a0, FieldMemOperand(a3, JSArray::kLengthOffset));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret(USE_DELAY_SLOT);
-    __ mov(v0, a3);  // In delay slot
-
-    // Fall back to %AllocateInNewSpace (if not too big).
-    Label too_big_for_new_space;
-    __ bind(&allocate);
-    __ Branch(&too_big_for_new_space, gt, t0,
-              Operand(kMaxRegularHeapObjectSize));
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(t0);
-      __ Push(a0, a2, t0);
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-      __ Pop(a0, a2);
-    }
-    __ jmp(&done_allocate);
-
-    // Fall back to %NewStrictArguments.
-    __ bind(&too_big_for_new_space);
-    __ Push(a1);
-    __ TailCallRuntime(Runtime::kNewStrictArguments);
-  }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- ra : return address
-  // -----------------------------------
-  __ AssertFunction(a1);
-
-  // Make t0 point to the JavaScript frame.
-  __ mov(t0, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ lw(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&ok, eq, a1, Operand(a3));
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a2,
-        FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Lsa(a3, t0, a2, kPointerSizeLog2 - 1);
-  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // a1 : function
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-  // t0 : Javascript frame pointer
-  // Registers used over whole function:
-  //  t1 : arguments count (tagged)
-  //  t2 : mapped parameter count (tagged)
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ lw(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a0, MemOperand(t0, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Branch(&adaptor_frame, eq, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // No adaptor, parameter count = argument count.
-  __ mov(t1, a2);
-  __ Branch(USE_DELAY_SLOT, &try_allocate);
-  __ mov(t2, a2);  // In delay slot.
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ Lsa(t0, t0, t1, 1);
-  __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // t1 = argument count (tagged)
-  // t2 = parameter count (tagged)
-  // Compute the mapped parameter count = min(t2, t1) in t2.
-  __ mov(t2, a2);
-  __ Branch(&try_allocate, le, t2, Operand(t1));
-  __ mov(t2, t1);
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  // If there are no mapped parameters, we do not need the parameter_map.
-  Label param_map_size;
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
-  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
-  __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when t2 == 0.
-  __ sll(t5, t2, 1);
-  __ addiu(t5, t5, kParameterMapHeaderSize);
-  __ bind(&param_map_size);
-
-  // 2. Backing store.
-  __ Lsa(t5, t5, t1, 1);
-  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ Addu(t5, t5, Operand(JSSloppyArgumentsObject::kSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(t5, v0, t5, t0, &runtime, NO_ALLOCATION_FLAGS);
-
-  // v0 = address of new object(s) (tagged)
-  // a2 = argument count (smi-tagged)
-  // Get the arguments boilerplate from the current native context into t0.
-  const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  const int kAliasedOffset =
-      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
-  __ lw(t0, NativeContextMemOperand());
-  Label skip2_ne, skip2_eq;
-  __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
-  __ lw(t0, MemOperand(t0, kNormalOffset));
-  __ bind(&skip2_ne);
-
-  __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
-  __ lw(t0, MemOperand(t0, kAliasedOffset));
-  __ bind(&skip2_eq);
-
-  // v0 = address of new object (tagged)
-  // a2 = argument count (smi-tagged)
-  // t0 = address of arguments map (tagged)
-  // t2 = mapped parameter count (tagged)
-  __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
-  __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
-  __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
-  __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // Set up the callee in-object property.
-  __ AssertNotSmi(a1);
-  __ sw(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(t1);
-  __ sw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, t0 will point there, otherwise
-  // it will point to the backing store.
-  __ Addu(t0, v0, Operand(JSSloppyArgumentsObject::kSize));
-  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // v0 = address of new object (tagged)
-  // a2 = argument count (tagged)
-  // t0 = address of parameter map or backing store (tagged)
-  // t2 = mapped parameter count (tagged)
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  Label skip3;
-  __ Branch(&skip3, ne, t2, Operand(Smi::kZero));
-  // Move backing store address to a1, because it is
-  // expected there when filling in the unmapped arguments.
-  __ mov(a1, t0);
-  __ bind(&skip3);
-
-  __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::kZero));
-
-  __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
-  __ Addu(t1, t2, Operand(Smi::FromInt(2)));
-  __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
-  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
-  __ Lsa(t1, t0, t2, 1);
-  __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
-  __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-  __ mov(t1, t2);
-  __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ Subu(t5, t5, Operand(t2));
-  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
-  __ Lsa(a1, t0, t1, 1);
-  __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
-
-  // a1 = address of backing store (tagged)
-  // t0 = address of parameter map (tagged)
-  // a0 = temporary scratch (a.o., for address calculation)
-  // t1 = loop variable (tagged)
-  // t3 = the hole value
-  __ jmp(&parameters_test);
-
-  __ bind(&parameters_loop);
-  __ Subu(t1, t1, Operand(Smi::FromInt(1)));
-  __ sll(a0, t1, 1);
-  __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-  __ Addu(t6, t0, a0);
-  __ sw(t5, MemOperand(t6));
-  __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
-  __ Addu(t6, a1, a0);
-  __ sw(t3, MemOperand(t6));
-  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
-  __ bind(&parameters_test);
-  __ Branch(&parameters_loop, ne, t1, Operand(Smi::kZero));
-
-  // t1 = argument count (tagged).
-  __ lw(t1, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
-
-  __ bind(&skip_parameter_map);
-  // v0 = address of new object (tagged)
-  // a1 = address of backing store (tagged)
-  // t1 = argument count (tagged)
-  // t2 = mapped parameter count (tagged)
-  // t5 = scratch
-  // Copy arguments header and remaining slots (if there are any).
-  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
-  __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
-  __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
-  Label arguments_loop, arguments_test;
-  __ sll(t6, t2, 1);
-  __ Subu(a3, a3, Operand(t6));
-  __ jmp(&arguments_test);
-
-  __ bind(&arguments_loop);
-  __ Subu(a3, a3, Operand(kPointerSize));
-  __ lw(t0, MemOperand(a3, 0));
-  __ Lsa(t5, a1, t2, 1);
-  __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
-  __ Addu(t2, t2, Operand(Smi::FromInt(1)));
-
-  __ bind(&arguments_test);
-  __ Branch(&arguments_loop, lt, t2, Operand(t1));
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  // t1 = argument count (tagged)
-  __ bind(&runtime);
-  __ Push(a1, a3, t1);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- ra : return address
-  // -----------------------------------
-  __ AssertFunction(a1);
-
-  // Make a2 point to the JavaScript frame.
-  __ mov(a2, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ lw(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&ok, eq, a1, Operand(a3));
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have an arguments adaptor frame below the function frame.
-  Label arguments_adaptor, arguments_done;
-  __ lw(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  __ lw(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Branch(&arguments_adaptor, eq, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  {
-    __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-    __ lw(a0,
-          FieldMemOperand(t0, SharedFunctionInfo::kFormalParameterCountOffset));
-    __ Lsa(a2, a2, a0, kPointerSizeLog2 - 1);
-    __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
-                            1 * kPointerSize));
-  }
-  __ Branch(&arguments_done);
-  __ bind(&arguments_adaptor);
-  {
-    __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ Lsa(a2, a3, a0, kPointerSizeLog2 - 1);
-    __ Addu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
-                            1 * kPointerSize));
-  }
-  __ bind(&arguments_done);
-
-  // ----------- S t a t e -------------
-  //  -- cp : context
-  //  -- a0 : number of rest parameters (tagged)
-  //  -- a1 : function
-  //  -- a2 : pointer to first rest parameters
-  //  -- ra : return address
-  // -----------------------------------
-
-  // Allocate space for the strict arguments object plus the backing store.
-  Label allocate, done_allocate;
-  __ li(t0, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ Lsa(t0, t0, a0, kPointerSizeLog2 - 1);
-  __ Allocate(t0, v0, a3, t1, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Setup the elements array in v0.
-  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-  __ sw(at, FieldMemOperand(v0, FixedArray::kMapOffset));
-  __ sw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset));
-  __ Addu(a3, v0, Operand(FixedArray::kHeaderSize));
-  {
-    Label loop, done_loop;
-    __ sll(at, a0, kPointerSizeLog2 - 1);
-    __ Addu(a1, a3, at);
-    __ bind(&loop);
-    __ Branch(&done_loop, eq, a1, Operand(a3));
-    __ lw(at, MemOperand(a2, 0 * kPointerSize));
-    __ sw(at, FieldMemOperand(a3, 0 * kPointerSize));
-    __ Subu(a2, a2, Operand(1 * kPointerSize));
-    __ Addu(a3, a3, Operand(1 * kPointerSize));
-    __ Branch(&loop);
-    __ bind(&done_loop);
-  }
-
-  // Setup the strict arguments object in a3.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
-  __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ sw(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
-  __ sw(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
-  __ sw(a0, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
-  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a3);  // In delay slot
-
-  // Fall back to %AllocateInNewSpace (if not too big).
-  Label too_big_for_new_space;
-  __ bind(&allocate);
-  __ Branch(&too_big_for_new_space, gt, t0, Operand(kMaxRegularHeapObjectSize));
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(t0);
-    __ Push(a0, a2, t0);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(a0, a2);
-  }
-  __ jmp(&done_allocate);
-
-  // Fall back to %NewStrictArguments.
-  __ bind(&too_big_for_new_space);
-  __ Push(a1);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 751095d..e2dd4a9 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -16,17 +16,6 @@
 
 class StringHelper : public AllStatic {
  public:
-  // Generate code for copying a large number of characters. This function
-  // is allowed to spend extra time setting up conditions to make copying
-  // faster. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     Register scratch,
-                                     String::Encoding encoding);
-
   // Compares two flat one-byte strings and returns result in v0.
   static void GenerateCompareFlatOneByteStrings(
       MacroAssembler* masm, Register left, Register right, Register scratch1,
@@ -311,14 +300,6 @@
                                      Handle<Name> name,
                                      Register scratch0);
 
-  static void GeneratePositiveLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register elements,
-                                     Register name,
-                                     Register r0,
-                                     Register r1);
-
   bool SometimesSetsUpAFrame() override { return false; }
 
  private:
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 8aaeaca..beab163 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -605,356 +605,14 @@
 
 #define __ ACCESS_MASM(masm)
 
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* allocation_memento_found) {
-  Register scratch_elements = t0;
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     scratch_elements));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    DCHECK(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(
-        receiver, scratch_elements, allocation_memento_found);
-  }
-
-  // Set transitioned map.
-  __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      t5,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Register ra contains the return address.
-  Label loop, entry, convert_hole, gc_required, only_change_map, done;
-  Register elements = t0;
-  Register length = t1;
-  Register array = t2;
-  Register array_end = array;
-
-  // target_map parameter can be clobbered.
-  Register scratch1 = target_map;
-  Register scratch2 = t5;
-  Register scratch3 = t3;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     elements, length, array, scratch2));
-
-  Register scratch = t6;
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(&only_change_map, eq, at, Operand(elements));
-
-  __ push(ra);
-  __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // elements: source FixedArray
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedDoubleArray.
-  __ sll(scratch, length, 2);
-  __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
-  __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
-  // array: destination FixedDoubleArray, tagged as heap object
-
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
-  __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
-  // Update receiver's map.
-  __ sw(scratch2, FieldMemOperand(array, HeapObject::kMapOffset));
-
-  __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch2,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ Addu(scratch1, array, Operand(kHeapObjectTag - kHeapObjectTag));
-  __ sw(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver,
-                      JSObject::kElementsOffset,
-                      scratch1,
-                      scratch2,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-
-  // Prepare for conversion loop.
-  __ Addu(scratch1, elements,
-      Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Addu(scratch3, array,
-          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  __ Lsa(array_end, scratch3, length, 2);
-
-  // Repurpose registers no longer in use.
-  Register hole_lower = elements;
-  Register hole_upper = length;
-  __ li(hole_lower, Operand(kHoleNanLower32));
-  __ li(hole_upper, Operand(kHoleNanUpper32));
-
-  // scratch1: begin of source FixedArray element fields, not tagged
-  // hole_lower: kHoleNanLower32
-  // hole_upper: kHoleNanUpper32
-  // array_end: end of destination FixedDoubleArray, not tagged
-  // scratch3: begin of FixedDoubleArray element fields, not tagged
-
-  __ Branch(&entry);
-
-  __ bind(&only_change_map);
-  __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch2,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ Branch(&done);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ lw(ra, MemOperand(sp, 0));
-  __ Branch(USE_DELAY_SLOT, fail);
-  __ addiu(sp, sp, kPointerSize);  // In delay slot.
-
-  // Convert and copy elements.
-  __ bind(&loop);
-  __ lw(scratch2, MemOperand(scratch1));
-  __ Addu(scratch1, scratch1, kIntSize);
-  // scratch2: current element
-  __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole);
-
-  // Normal smi, convert to double and store.
-  __ mtc1(scratch2, f0);
-  __ cvt_d_w(f0, f0);
-  __ sdc1(f0, MemOperand(scratch3));
-  __ Branch(USE_DELAY_SLOT, &entry);
-  __ addiu(scratch3, scratch3, kDoubleSize);  // In delay slot.
-
-  // Hole found, store the-hole NaN.
-  __ bind(&convert_hole);
-  if (FLAG_debug_code) {
-    // Restore a "smi-untagged" heap object.
-    __ SmiTag(scratch2);
-    __ Or(scratch2, scratch2, Operand(1));
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
-  }
-  // mantissa
-  __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
-  // exponent
-  __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
-  __ addiu(scratch3, scratch3, kDoubleSize);
-
-  __ bind(&entry);
-  __ Branch(&loop, lt, scratch3, Operand(array_end));
-
-  __ bind(&done);
-  __ pop(ra);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Register ra contains the return address.
-  Label entry, loop, convert_hole, gc_required, only_change_map;
-  Register elements = t0;
-  Register array = t2;
-  Register length = t1;
-  Register scratch = t5;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     elements, array, length, scratch));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(&only_change_map, eq, at, Operand(elements));
-
-  __ MultiPush(
-      value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
-  __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // elements: source FixedArray
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedArray.
-  // Re-use value and target_map registers, as they have been saved on the
-  // stack.
-  Register array_size = value;
-  Register allocate_scratch = target_map;
-  __ sll(array_size, length, 1);
-  __ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize);
-  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
-              NO_ALLOCATION_FLAGS);
-  // array: destination FixedArray, not tagged as heap object
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-  __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ sw(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
-
-  // Prepare for conversion loop.
-  Register src_elements = elements;
-  Register dst_elements = target_map;
-  Register dst_end = length;
-  Register heap_number_map = scratch;
-  __ Addu(src_elements, src_elements, Operand(
-        FixedDoubleArray::kHeaderSize - kHeapObjectTag
-        + Register::kExponentOffset));
-  __ Addu(dst_elements, array,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Lsa(dst_end, dst_elements, dst_end, 1);
-
-  // Allocating heap numbers in the loop below can fail and cause a jump to
-  // gc_required. We can't leave a partly initialized FixedArray behind,
-  // so pessimistically fill it with holes now.
-  Label initialization_loop, initialization_loop_entry;
-  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-  __ Branch(&initialization_loop_entry);
-  __ bind(&initialization_loop);
-  __ sw(scratch, MemOperand(dst_elements));
-  __ Addu(dst_elements, dst_elements, Operand(kPointerSize));
-  __ bind(&initialization_loop_entry);
-  __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
-
-  __ Addu(dst_elements, array,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  // Using offsetted addresses.
-  // dst_elements: begin of destination FixedArray element fields, not tagged
-  // src_elements: begin of source FixedDoubleArray element fields, not tagged,
-  //               points to the exponent
-  // dst_end: end of destination FixedArray, not tagged
-  // array: destination FixedArray
-  // heap_number_map: heap number map
-  __ Branch(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ MultiPop(
-      value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
-  __ Branch(fail);
-
-  __ bind(&loop);
-  Register upper_bits = key;
-  __ lw(upper_bits, MemOperand(src_elements));
-  __ Addu(src_elements, src_elements, kDoubleSize);
-  // upper_bits: current element's upper 32 bit
-  // src_elements: address of next element's upper 32 bit
-  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
-
-  // Non-hole double, copy value into a heap number.
-  Register heap_number = receiver;
-  Register scratch2 = value;
-  Register scratch3 = t6;
-  __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
-                        &gc_required);
-  // heap_number: new heap number
-  // Load mantissa of current element, src_elements
-  // point to exponent of next element.
-  __ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset
-      - Register::kExponentOffset - kDoubleSize)));
-  __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
-  __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
-  __ mov(scratch2, dst_elements);
-  __ sw(heap_number, MemOperand(dst_elements));
-  __ Addu(dst_elements, dst_elements, kIntSize);
-  __ RecordWrite(array,
-                 scratch2,
-                 heap_number,
-                 kRAHasBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ Branch(&entry);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
-  __ sw(scratch2, MemOperand(dst_elements));
-  __ Addu(dst_elements, dst_elements, kIntSize);
-
-  __ bind(&entry);
-  __ Branch(&loop, lt, dst_elements, Operand(dst_end));
-
-  __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver,
-                      JSObject::kElementsOffset,
-                      array,
-                      scratch,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ pop(ra);
-
-  __ bind(&only_change_map);
-  // Update receiver's map.
-  __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                        Register string,
                                        Register index,
                                        Register result,
                                        Label* call_runtime) {
+  Label indirect_string_loaded;
+  __ bind(&indirect_string_loaded);
+
   // Fetch the instance type of the receiver into result register.
   __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -965,18 +623,23 @@
   __ Branch(&check_sequential, eq, at, Operand(zero_reg));
 
   // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ And(at, result, Operand(kSlicedNotConsMask));
-  __ Branch(&cons_string, eq, at, Operand(zero_reg));
+  Label cons_string, thin_string;
+  __ And(at, result, Operand(kStringRepresentationMask));
+  __ Branch(&cons_string, eq, at, Operand(kConsStringTag));
+  __ Branch(&thin_string, eq, at, Operand(kThinStringTag));
 
   // Handle slices.
-  Label indirect_string_loaded;
   __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
   __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
   __ sra(at, result, kSmiTagSize);
   __ Addu(index, index, at);
   __ jmp(&indirect_string_loaded);
 
+  // Handle thin strings.
+  __ bind(&thin_string);
+  __ lw(string, FieldMemOperand(string, ThinString::kActualOffset));
+  __ jmp(&indirect_string_loaded);
+
   // Handle cons strings.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
@@ -988,10 +651,7 @@
   __ Branch(call_runtime, ne, result, Operand(at));
   // Get the first of the two strings and load its instance type.
   __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+  __ jmp(&indirect_string_loaded);
 
   // Distinguish sequential and external strings. Only these two string
   // representations can reach here (slices and flat cons strings have been
@@ -1076,37 +736,29 @@
   return result;
 }
 
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
 
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                               MarkingParity* parity) {
-  if (IsYoungSequence(isolate, sequence)) {
-    *age = kNoAgeCodeAge;
-    *parity = NO_MARKING_PARITY;
-  } else {
-    Address target_address = Assembler::target_address_at(
-        sequence + Assembler::kInstrSize);
-    Code* stub = GetCodeFromTargetAddress(target_address);
-    GetCodeAgeAndParity(stub, age, parity);
-  }
+  Address target_address =
+      Assembler::target_address_at(sequence + Assembler::kInstrSize);
+  Code* stub = GetCodeFromTargetAddress(target_address);
+  return GetAgeOfCodeAgeStub(stub);
 }
 
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
-                                Code::Age age,
-                                MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+                                Code::Age age) {
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
     Assembler::FlushICache(isolate, sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age);
     CodePatcher patcher(isolate, sequence,
                         young_length / Assembler::kInstrSize);
     // Mark this code sequence for FindPlatformCodeAgeSequence().
     patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
     // Load the stub address to t9 and call it,
-    // GetCodeAgeAndParity() extracts the stub address from this instruction.
+    // GetCodeAge() extracts the stub address from this instruction.
     patcher.masm()->li(
         t9,
         Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 478b9df..46b8728 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -93,7 +93,7 @@
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
   for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
-    double double_value = input_->GetDoubleRegister(i);
+    Float64 double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
 }
diff --git a/src/mips/interface-descriptors-mips.cc b/src/mips/interface-descriptors-mips.cc
index 486ae68..eb47d1c 100644
--- a/src/mips/interface-descriptors-mips.cc
+++ b/src/mips/interface-descriptors-mips.cc
@@ -64,37 +64,10 @@
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {a2};
+  Register registers[] = {a1, a2, a3};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void FastNewObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1, a3};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 // static
 const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
 
@@ -146,15 +119,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1, a3};
+  Register registers[] = {a1, a0, a3};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {a1, a0, a3, a2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -183,6 +154,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // a1: the target to call
+  // a2: start index (to support rest parameters)
+  Register registers[] = {a1, a2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void ConstructStubDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -218,13 +196,12 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
-      CallInterfaceDescriptorData* data) {                          \
-    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
-  }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+  Register registers[] = {a1, a3, a0, a2};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
 
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -414,6 +391,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      a1,  // loaded new FP
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index c3abe4f..a28c04a 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1998,6 +1998,49 @@
   }
 }
 
+void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+                            FPURegister ft, FPURegister scratch) {
+  if (IsMipsArchVariant(kMips32r2)) {
+    madd_s(fd, fr, fs, ft);
+  } else {
+    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+    mul_s(scratch, fs, ft);
+    add_s(fd, fr, scratch);
+  }
+}
+
+void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+                            FPURegister ft, FPURegister scratch) {
+  if (IsMipsArchVariant(kMips32r2)) {
+    madd_d(fd, fr, fs, ft);
+  } else {
+    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+    mul_d(scratch, fs, ft);
+    add_d(fd, fr, scratch);
+  }
+}
+
+void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+                            FPURegister ft, FPURegister scratch) {
+  if (IsMipsArchVariant(kMips32r2)) {
+    msub_s(fd, fr, fs, ft);
+  } else {
+    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+    mul_s(scratch, fs, ft);
+    sub_s(fd, scratch, fr);
+  }
+}
+
+void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+                            FPURegister ft, FPURegister scratch) {
+  if (IsMipsArchVariant(kMips32r2)) {
+    msub_d(fd, fr, fs, ft);
+  } else {
+    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+    mul_d(scratch, fs, ft);
+    sub_d(fd, scratch, fr);
+  }
+}
 
 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
                                    Label* nan, Condition cond, FPURegister cmp1,
@@ -2325,186 +2368,6 @@
   }
 }
 
-#define __ masm->
-
-static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
-                         FPURegister src1, FPURegister src2, Label* equal) {
-  if (src1.is(src2)) {
-    __ Move(dst, src1);
-    return true;
-  }
-
-  Label other, compare_not_equal;
-  FPURegister left, right;
-  if (kind == MaxMinKind::kMin) {
-    left = src1;
-    right = src2;
-  } else {
-    left = src2;
-    right = src1;
-  }
-
-  __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
-  // Left and right hand side are equal, check for -0 vs. +0.
-  __ FmoveHigh(t8, src1);
-  __ Branch(&other, eq, t8, Operand(0x80000000));
-  __ Move_d(dst, right);
-  __ Branch(equal);
-  __ bind(&other);
-  __ Move_d(dst, left);
-  __ Branch(equal);
-  __ bind(&compare_not_equal);
-  return false;
-}
-
-static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
-                         FPURegister src1, FPURegister src2, Label* equal) {
-  if (src1.is(src2)) {
-    __ Move(dst, src1);
-    return true;
-  }
-
-  Label other, compare_not_equal;
-  FPURegister left, right;
-  if (kind == MaxMinKind::kMin) {
-    left = src1;
-    right = src2;
-  } else {
-    left = src2;
-    right = src1;
-  }
-
-  __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
-  // Left and right hand side are equal, check for -0 vs. +0.
-  __ FmoveLow(t8, src1);
-  __ Branch(&other, eq, t8, Operand(0x80000000));
-  __ Move_s(dst, right);
-  __ Branch(equal);
-  __ bind(&other);
-  __ Move_s(dst, left);
-  __ Branch(equal);
-  __ bind(&compare_not_equal);
-  return false;
-}
-
-#undef __
-
-void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
-                                   FPURegister src2, Label* nan) {
-  if (nan) {
-    BranchF64(nullptr, nan, eq, src1, src2);
-  }
-  if (IsMipsArchVariant(kMips32r6)) {
-    min_d(dst, src1, src2);
-  } else {
-    Label skip;
-    if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
-      if (dst.is(src1)) {
-        BranchF64(&skip, nullptr, le, src1, src2);
-        Move_d(dst, src2);
-      } else if (dst.is(src2)) {
-        BranchF64(&skip, nullptr, ge, src1, src2);
-        Move_d(dst, src1);
-      } else {
-        Label right;
-        BranchF64(&right, nullptr, gt, src1, src2);
-        Move_d(dst, src1);
-        Branch(&skip);
-        bind(&right);
-        Move_d(dst, src2);
-      }
-    }
-    bind(&skip);
-  }
-}
-
-void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
-                                   FPURegister src2, Label* nan) {
-  if (nan) {
-    BranchF64(nullptr, nan, eq, src1, src2);
-  }
-  if (IsMipsArchVariant(kMips32r6)) {
-    max_d(dst, src1, src2);
-  } else {
-    Label skip;
-    if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
-      if (dst.is(src1)) {
-        BranchF64(&skip, nullptr, ge, src1, src2);
-        Move_d(dst, src2);
-      } else if (dst.is(src2)) {
-        BranchF64(&skip, nullptr, le, src1, src2);
-        Move_d(dst, src1);
-      } else {
-        Label right;
-        BranchF64(&right, nullptr, lt, src1, src2);
-        Move_d(dst, src1);
-        Branch(&skip);
-        bind(&right);
-        Move_d(dst, src2);
-      }
-    }
-    bind(&skip);
-  }
-}
-
-void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
-                                   FPURegister src2, Label* nan) {
-  if (nan) {
-    BranchF32(nullptr, nan, eq, src1, src2);
-  }
-  if (IsMipsArchVariant(kMips32r6)) {
-    min_s(dst, src1, src2);
-  } else {
-    Label skip;
-    if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
-      if (dst.is(src1)) {
-        BranchF32(&skip, nullptr, le, src1, src2);
-        Move_s(dst, src2);
-      } else if (dst.is(src2)) {
-        BranchF32(&skip, nullptr, ge, src1, src2);
-        Move_s(dst, src1);
-      } else {
-        Label right;
-        BranchF32(&right, nullptr, gt, src1, src2);
-        Move_s(dst, src1);
-        Branch(&skip);
-        bind(&right);
-        Move_s(dst, src2);
-      }
-    }
-    bind(&skip);
-  }
-}
-
-void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
-                                   FPURegister src2, Label* nan) {
-  if (nan) {
-    BranchF32(nullptr, nan, eq, src1, src2);
-  }
-  if (IsMipsArchVariant(kMips32r6)) {
-    max_s(dst, src1, src2);
-  } else {
-    Label skip;
-    if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
-      if (dst.is(src1)) {
-        BranchF32(&skip, nullptr, ge, src1, src2);
-        Move_s(dst, src2);
-      } else if (dst.is(src2)) {
-        BranchF32(&skip, nullptr, le, src1, src2);
-        Move_s(dst, src1);
-      } else {
-        Label right;
-        BranchF32(&right, nullptr, lt, src1, src2);
-        Move_s(dst, src1);
-        Branch(&skip);
-        bind(&right);
-        Move_s(dst, src2);
-      }
-    }
-    bind(&skip);
-  }
-}
-
 void MacroAssembler::Clz(Register rd, Register rs) {
   if (IsMipsArchVariant(kLoongson)) {
     DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
@@ -4022,17 +3885,16 @@
   push(at);
 }
 
-
-void MacroAssembler::DebugBreak() {
-  PrepareCEntryArgs(0);
-  PrepareCEntryFunction(
-      ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
-  CEntryStub ces(isolate(), 1);
-  DCHECK(AllowThisStubCall(&ces));
-  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+  // Check whether we need to drop frames to restart a function on the stack.
+  ExternalReference restart_fp =
+      ExternalReference::debug_restart_fp_address(isolate());
+  li(a1, Operand(restart_fp));
+  lw(a1, MemOperand(a1));
+  Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+       ne, a1, Operand(zero_reg));
 }
 
-
 // ---------------------------------------------------------------------------
 // Exception handling.
 
@@ -4334,110 +4196,6 @@
   Addu(result, result, Operand(kHeapObjectTag));
 }
 
-void MacroAssembler::AllocateTwoByteString(Register result,
-                                           Register length,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  sll(scratch1, length, 1);  // Length in bytes, not chars.
-  addiu(scratch1, scratch1,
-       kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
-  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
-  // Allocate two-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result,
-                      length,
-                      Heap::kStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string
-  // while observing object alignment.
-  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  DCHECK(kCharSize == 1);
-  addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
-  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
-  // Allocate one-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
-                                               Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-  InitializeNewString(result,
-                      length,
-                      Heap::kConsStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result,
-                      length,
-                      Heap::kSlicedStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
                                                      Label* not_unique_name) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -4518,77 +4276,6 @@
   Branch(&loop, ult, current_address, Operand(end_address));
 }
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Register scratch,
-                                             Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Branch(fail, ls, scratch,
-         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-  Branch(fail, hi, scratch,
-         Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
-                                          Register scratch,
-                                          Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Branch(fail, hi, scratch,
-         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
-                                                 Register key_reg,
-                                                 Register elements_reg,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Register scratch3,
-                                                 Label* fail,
-                                                 int elements_offset) {
-  DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
-                     scratch3));
-  Label smi_value, done;
-
-  // Handle smi values specially.
-  JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number
-  CheckMap(value_reg,
-           scratch1,
-           Heap::kHeapNumberMapRootIndex,
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  // Double value, turn potential sNaN into qNan.
-  DoubleRegister double_result = f0;
-  DoubleRegister double_scratch = f2;
-
-  ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-  Branch(USE_DELAY_SLOT, &done);  // Canonicalization is one instruction.
-  FPUCanonicalizeNaN(double_result, double_result);
-
-  bind(&smi_value);
-  Register untagged_value = scratch2;
-  SmiUntag(untagged_value, value_reg);
-  mtc1(untagged_value, double_scratch);
-  cvt_d_w(double_result, double_scratch);
-
-  bind(&done);
-  Addu(scratch1, elements_reg,
-      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
-              elements_offset));
-  Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
-  // scratch1 is now effective address of the double element
-  sdc1(double_result, MemOperand(scratch1, 0));
-}
-
 void MacroAssembler::CompareMapAndBranch(Register obj,
                                          Register scratch,
                                          Handle<Map> map,
@@ -4870,17 +4557,15 @@
   }
 }
 
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
-                                             const ParameterCount& expected,
-                                             const ParameterCount& actual) {
-  Label skip_flooding;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  li(t0, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual) {
+  Label skip_hook;
+  ExternalReference debug_hook_active =
+      ExternalReference::debug_hook_on_function_call_address(isolate());
+  li(t0, Operand(debug_hook_active));
   lb(t0, MemOperand(t0));
-  Branch(&skip_flooding, lt, t0, Operand(StepIn));
+  Branch(&skip_hook, eq, t0, Operand(zero_reg));
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4897,7 +4582,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    CallRuntime(Runtime::kDebugOnFunctionCall);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -4911,7 +4596,7 @@
       SmiUntag(expected.reg());
     }
   }
-  bind(&skip_flooding);
+  bind(&skip_hook);
 }
 
 
@@ -4925,8 +4610,8 @@
   DCHECK(function.is(a1));
   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
 
-  if (call_wrapper.NeedsDebugStepCheck()) {
-    FloodFunctionIfStepping(function, new_target, expected, actual);
+  if (call_wrapper.NeedsDebugHookCheck()) {
+    CheckDebugHook(function, new_target, expected, actual);
   }
 
   // Clear the new.target register if not given.
@@ -5051,32 +4736,6 @@
   bind(&done);
 }
 
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
-                                             Register scratch, Label* miss) {
-  // Get the prototype or initial map from the function.
-  lw(result,
-     FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // If the prototype or initial map is the hole, don't return it and
-  // simply miss the cache instead. This will allow us to allocate a
-  // prototype object on-demand in the runtime system.
-  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
-  Branch(miss, eq, result, Operand(t8));
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  GetObjectType(result, scratch, scratch);
-  Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
-  // Get the prototype from the initial map.
-  lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  bind(&done);
-}
-
-
 void MacroAssembler::GetObjectType(Register object,
                                    Register map,
                                    Register type_reg) {
@@ -5603,27 +5262,6 @@
   }
 }
 
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  DCHECK(IsFastElementsKind(expected_kind));
-  DCHECK(IsFastElementsKind(transitioned_kind));
-
-  // Check that the function's map is the same as the expected cached map.
-  lw(scratch, NativeContextMemOperand());
-  lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
-  Branch(no_map_match, ne, map_in_out, Operand(at));
-
-  // Use the transitioned cached map.
-  lw(map_in_out,
-     ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
   lw(dst, NativeContextMemOperand());
   lw(dst, ContextMemOperand(dst, index));
@@ -5646,7 +5284,7 @@
 }
 
 void MacroAssembler::StubPrologue(StackFrame::Type type) {
-  li(at, Operand(Smi::FromInt(type)));
+  li(at, Operand(StackFrame::TypeToMarker(type)));
   PushCommonFrame(at);
 }
 
@@ -5661,7 +5299,7 @@
     Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
     nop(Assembler::CODE_AGE_MARKER_NOP);
     // Load the stub address to t9 and call it,
-    // GetCodeAgeAndParity() extracts the stub address from this instruction.
+    // GetCodeAge() extracts the stub address from this instruction.
     li(t9,
        Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
        CONSTANT_SIZE);
@@ -5675,11 +5313,10 @@
   }
 }
 
-
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
   lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  lw(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
-  lw(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+  lw(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+  lw(vector, FieldMemOperand(vector, Cell::kValueOffset));
 }
 
 
@@ -5705,7 +5342,7 @@
   stack_offset -= kPointerSize;
   sw(fp, MemOperand(sp, stack_offset));
   stack_offset -= kPointerSize;
-  li(t9, Operand(Smi::FromInt(type)));
+  li(t9, Operand(StackFrame::TypeToMarker(type)));
   sw(t9, MemOperand(sp, stack_offset));
   if (type == StackFrame::INTERNAL) {
     DCHECK_EQ(stack_offset, kPointerSize);
@@ -5762,7 +5399,7 @@
   addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
   sw(ra, MemOperand(sp, 4 * kPointerSize));
   sw(fp, MemOperand(sp, 3 * kPointerSize));
-  li(at, Operand(Smi::FromInt(frame_type)));
+  li(at, Operand(StackFrame::TypeToMarker(frame_type)));
   sw(at, MemOperand(sp, 2 * kPointerSize));
   // Set up new frame pointer.
   addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -5862,21 +5499,6 @@
   addiu(sp, sp, 8);
 }
 
-
-void MacroAssembler::InitializeNewString(Register string,
-                                         Register length,
-                                         Heap::RootListIndex map_index,
-                                         Register scratch1,
-                                         Register scratch2) {
-  sll(scratch1, length, kSmiTagSize);
-  LoadRoot(scratch2, map_index);
-  sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
-  li(scratch1, Operand(String::kEmptyHashField));
-  sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
-  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
 int MacroAssembler::ActivationFrameAlignment() {
 #if V8_HOST_ARCH_MIPS
   // Running on the real platform. Use the alignment as mandated by the local
@@ -5955,14 +5577,6 @@
   SmiUntag(dst, src);
 }
 
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
-                                          Register src,
-                                          Label* non_smi_case) {
-  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
-  SmiUntag(dst, src);
-}
-
 void MacroAssembler::JumpIfSmi(Register value,
                                Label* smi_label,
                                Register scratch,
@@ -6157,6 +5771,179 @@
                                                scratch2, failure);
 }
 
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
+                                FPURegister src2, Label* out_of_line) {
+  if (src1.is(src2)) {
+    Move_s(dst, src1);
+    return;
+  }
+
+  // Check if one of operands is NaN.
+  BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+  if (IsMipsArchVariant(kMips32r6)) {
+    max_s(dst, src1, src2);
+  } else {
+    Label return_left, return_right, done;
+
+    BranchF32(&return_right, nullptr, lt, src1, src2);
+    BranchF32(&return_left, nullptr, lt, src2, src1);
+
+    // Operands are equal, but check for +/-0.
+    mfc1(t8, src1);
+    Branch(&return_left, eq, t8, Operand(zero_reg));
+    Branch(&return_right);
+
+    bind(&return_right);
+    if (!src2.is(dst)) {
+      Move_s(dst, src2);
+    }
+    Branch(&done);
+
+    bind(&return_left);
+    if (!src1.is(dst)) {
+      Move_s(dst, src1);
+    }
+
+    bind(&done);
+  }
+}
+
+void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+                                         FPURegister src2) {
+  add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
+                                FPURegister src2, Label* out_of_line) {
+  if (src1.is(src2)) {
+    Move_s(dst, src1);
+    return;
+  }
+
+  // Check if one of operands is NaN.
+  BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+  if (IsMipsArchVariant(kMips32r6)) {
+    min_s(dst, src1, src2);
+  } else {
+    Label return_left, return_right, done;
+
+    BranchF32(&return_left, nullptr, lt, src1, src2);
+    BranchF32(&return_right, nullptr, lt, src2, src1);
+
+    // Left equals right => check for -0.
+    mfc1(t8, src1);
+    Branch(&return_right, eq, t8, Operand(zero_reg));
+    Branch(&return_left);
+
+    bind(&return_right);
+    if (!src2.is(dst)) {
+      Move_s(dst, src2);
+    }
+    Branch(&done);
+
+    bind(&return_left);
+    if (!src1.is(dst)) {
+      Move_s(dst, src1);
+    }
+
+    bind(&done);
+  }
+}
+
+void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+                                         FPURegister src2) {
+  add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
+                                DoubleRegister src2, Label* out_of_line) {
+  if (src1.is(src2)) {
+    Move_d(dst, src1);
+    return;
+  }
+
+  // Check if one of operands is NaN.
+  BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+  if (IsMipsArchVariant(kMips32r6)) {
+    max_d(dst, src1, src2);
+  } else {
+    Label return_left, return_right, done;
+
+    BranchF64(&return_right, nullptr, lt, src1, src2);
+    BranchF64(&return_left, nullptr, lt, src2, src1);
+
+    // Left equals right => check for -0.
+    Mfhc1(t8, src1);
+    Branch(&return_left, eq, t8, Operand(zero_reg));
+    Branch(&return_right);
+
+    bind(&return_right);
+    if (!src2.is(dst)) {
+      Move_d(dst, src2);
+    }
+    Branch(&done);
+
+    bind(&return_left);
+    if (!src1.is(dst)) {
+      Move_d(dst, src1);
+    }
+
+    bind(&done);
+  }
+}
+
+void MacroAssembler::Float64MaxOutOfLine(DoubleRegister dst,
+                                         DoubleRegister src1,
+                                         DoubleRegister src2) {
+  add_d(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
+                                DoubleRegister src2, Label* out_of_line) {
+  if (src1.is(src2)) {
+    Move_d(dst, src1);
+    return;
+  }
+
+  // Check if one of operands is NaN.
+  BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+  if (IsMipsArchVariant(kMips32r6)) {
+    min_d(dst, src1, src2);
+  } else {
+    Label return_left, return_right, done;
+
+    BranchF64(&return_left, nullptr, lt, src1, src2);
+    BranchF64(&return_right, nullptr, lt, src2, src1);
+
+    // Left equals right => check for -0.
+    Mfhc1(t8, src1);
+    Branch(&return_right, eq, t8, Operand(zero_reg));
+    Branch(&return_left);
+
+    bind(&return_right);
+    if (!src2.is(dst)) {
+      Move_d(dst, src2);
+    }
+    Branch(&done);
+
+    bind(&return_left);
+    if (!src1.is(dst)) {
+      Move_d(dst, src1);
+    }
+
+    bind(&done);
+  }
+}
+
+void MacroAssembler::Float64MinOutOfLine(DoubleRegister dst,
+                                         DoubleRegister src1,
+                                         DoubleRegister src2) {
+  add_d(dst, src1, src2);
+}
 
 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
     Register first, Register second, Register scratch1, Register scratch2,
@@ -6172,19 +5959,6 @@
   Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
 }
 
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
-                                                              Register scratch,
-                                                              Label* failure) {
-  const int kFlatOneByteStringMask =
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatOneByteStringTag =
-      kStringTag | kOneByteStringTag | kSeqStringTag;
-  And(scratch, type, Operand(kFlatOneByteStringMask));
-  Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
-}
-
-
 static const int kRegisterPassedArguments = 4;
 
 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -6622,40 +6396,6 @@
   return no_reg;
 }
 
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
-    Register object,
-    Register scratch0,
-    Register scratch1,
-    Label* found) {
-  DCHECK(!scratch1.is(scratch0));
-  Factory* factory = isolate()->factory();
-  Register current = scratch0;
-  Label loop_again, end;
-
-  // Scratch contained elements pointer.
-  Move(current, object);
-  lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
-  lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  Branch(&end, eq, current, Operand(factory->null_value()));
-
-  // Loop based on the map going up the prototype chain.
-  bind(&loop_again);
-  lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
-  lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
-  Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
-  lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
-  DecodeField<Map::ElementsKindBits>(scratch1);
-  Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
-  lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  Branch(&loop_again, ne, current, Operand(factory->null_value()));
-
-  bind(&end);
-}
-
-
 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
                 Register reg5, Register reg6, Register reg7, Register reg8,
                 Register reg9, Register reg10) {
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 824a3bf..5bffd89 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -208,6 +208,12 @@
               Heap::RootListIndex index,
               BranchDelaySlot bdslot = PROTECT);
 
+// Number of instructions needed for calculation of switch table entry address
+#ifdef _MIPS_ARCH_MIPS32R6
+  static const int kSwitchTablePrologueSize = 5;
+#else
+  static const int kSwitchTablePrologueSize = 10;
+#endif
   // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
   // functor/function with 'Label *func(size_t index)' declaration.
   template <typename Func>
@@ -305,17 +311,6 @@
   void Movt(Register rd, Register rs, uint16_t cc = 0);
   void Movf(Register rd, Register rs, uint16_t cc = 0);
 
-  // Min, Max macros.
-  // On pre-r6 these functions may modify at and t8 registers.
-  void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
-                     Label* nan = nullptr);
-  void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
-                     Label* nan = nullptr);
-  void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
-                     Label* nan = nullptr);
-  void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
-                     Label* nan = nullptr);
-
   void Clz(Register rd, Register rs);
 
   // Jump unconditionally to given label.
@@ -560,32 +555,6 @@
   void FastAllocate(Register object_size, Register result, Register result_new,
                     Register scratch, AllocationFlags flags);
 
-  void AllocateTwoByteString(Register result,
-                             Register length,
-                             Register scratch1,
-                             Register scratch2,
-                             Register scratch3,
-                             Label* gc_required);
-  void AllocateOneByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateTwoByteConsString(Register result,
-                                 Register length,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
-  void AllocateOneByteConsString(Register result, Register length,
-                                 Register scratch1, Register scratch2,
-                                 Label* gc_required);
-  void AllocateTwoByteSlicedString(Register result,
-                                   Register length,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Label* gc_required);
-  void AllocateOneByteSlicedString(Register result, Register length,
-                                   Register scratch1, Register scratch2,
-                                   Label* gc_required);
-
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed. All registers are clobbered also
   // when control continues at the gc_required label.
@@ -892,6 +861,15 @@
   // general-purpose register.
   void Mfhc1(Register rt, FPURegister fs);
 
+  void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+              FPURegister scratch);
+  void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+              FPURegister scratch);
+  void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+              FPURegister scratch);
+  void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+              FPURegister scratch);
+
   // Wrapper functions for the different cmp/branch types.
   inline void BranchF32(Label* target, Label* nan, Condition cc,
                         FPURegister cmp1, FPURegister cmp2,
@@ -1037,17 +1015,6 @@
     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
   }
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the native context if the map in register
-  // map_in_out is the cached Array map in the native context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch,
-      Label* no_map_match);
-
   void LoadNativeContextSlot(int index, Register dst);
 
   // Load the initial map from the global function. The registers
@@ -1080,9 +1047,10 @@
                           const ParameterCount& actual, InvokeFlag flag,
                           const CallWrapper& call_wrapper);
 
-  void FloodFunctionIfStepping(Register fun, Register new_target,
-                               const ParameterCount& expected,
-                               const ParameterCount& actual);
+  // On function call, call into the debugger if necessary.
+  void CheckDebugHook(Register fun, Register new_target,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
@@ -1112,12 +1080,9 @@
                         Register scratch,
                         Label* fail);
 
-  // -------------------------------------------------------------------------
-  // Debugger Support.
+  // Frame restart support.
+  void MaybeDropFrames();
 
-  void DebugBreak();
-
-  // -------------------------------------------------------------------------
   // Exception handling.
 
   // Push a new stack handler and link into stack handler chain.
@@ -1141,14 +1106,6 @@
   void GetMapConstructor(Register result, Register map, Register temp,
                          Register temp2);
 
-  // Try to get function prototype of a function and puts the value in
-  // the result register. Checks that the function really is a
-  // function and jumps to the miss label if the fast checks fail. The
-  // function register will be untouched; the other registers may be
-  // clobbered.
-  void TryGetFunctionPrototype(Register function, Register result,
-                               Register scratch, Label* miss);
-
   void GetObjectType(Register function,
                      Register map,
                      Register type_reg);
@@ -1158,30 +1115,6 @@
         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
   }
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map,
-                               Register scratch,
-                               Label* fail);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiElements(Register map,
-                            Register scratch,
-                            Label* fail);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements. Otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register value_reg,
-                                   Register key_reg,
-                                   Register elements_reg,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Register scratch3,
-                                   Label* fail,
-                                   int elements_offset = 0);
-
   // Compare an object's map with the specified map and its transitioned
   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
   // "branch_to" if the result of the comparison is "cond". If multiple map
@@ -1331,6 +1264,31 @@
                     Label* overflow_label, Label* no_overflow_label,
                     Register scratch = at);
 
+  // Perform a floating-point min or max operation with the
+  // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
+  // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
+  // handled in out-of-line code. The specific behaviour depends on supported
+  // instructions.
+  //
+  // These functions assume (and assert) that !src1.is(src2). It is permitted
+  // for the result to alias either input register.
+  void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+                  Label* out_of_line);
+  void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+                  Label* out_of_line);
+  void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
+                  Label* out_of_line);
+  void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
+                  Label* out_of_line);
+
+  // Generate out-of-line cases for the macros above.
+  void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+  void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+  void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1,
+                           DoubleRegister src2);
+  void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1,
+                           DoubleRegister src2);
+
   // -------------------------------------------------------------------------
   // Runtime calls.
 
@@ -1557,10 +1515,6 @@
   // Souce and destination can be the same register.
   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
 
-  // Untag the source value into destination and jump if source is not a smi.
-  // Souce and destination can be the same register.
-  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
   // Jump the register contains a smi.
   void JumpIfSmi(Register value,
                  Label* smi_label,
@@ -1630,11 +1584,6 @@
       Register first_object_instance_type, Register second_object_instance_type,
       Register scratch1, Register scratch2, Label* failure);
 
-  // Check if instance type is sequential one-byte string and jump to label if
-  // it is not.
-  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
-                                                Label* failure);
-
   void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string,
@@ -1708,7 +1657,7 @@
   void Prologue(bool code_pre_aging);
 
   // Load the type feedback vector from a JavaScript frame.
-  void EmitLoadTypeFeedbackVector(Register vector);
+  void EmitLoadFeedbackVector(Register vector);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -1731,20 +1680,6 @@
                                        Register scratch_reg,
                                        Label* no_memento_found);
 
-  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
-                                         Register scratch_reg,
-                                         Label* memento_found) {
-    Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
-                                    &no_memento_found);
-    Branch(memento_found);
-    bind(&no_memento_found);
-  }
-
-  // Jumps to found label if a prototype map has dictionary elements.
-  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
-                                        Register scratch1, Label* found);
-
   bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
 
  private:
@@ -1796,12 +1731,6 @@
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  void InitializeNewString(Register string,
-                           Register length,
-                           Heap::RootListIndex map_index,
-                           Register scratch1,
-                           Register scratch2);
-
   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
   void InNewSpace(Register object, Register scratch,
                   Condition cond,  // ne for new space, eq otherwise.
@@ -1871,13 +1800,13 @@
 void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
                                          Func GetLabelFunction) {
   if (kArchVariant >= kMips32r6) {
-    BlockTrampolinePoolFor(case_count + 5);
+    BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
     addiupc(at, 5);
     Lsa(at, at, index, kPointerSizeLog2);
     lw(at, MemOperand(at));
   } else {
     Label here;
-    BlockTrampolinePoolFor(case_count + 10);
+    BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
     push(ra);
     bal(&here);
     sll(at, index, kPointerSizeLog2);  // Branch delay slot.
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index bd42399..58191a8 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -1129,9 +1129,16 @@
 
 
 int64_t Simulator::get_fpu_register(int fpureg) const {
-  DCHECK(IsFp64Mode());
-  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
-  return FPUregisters_[fpureg];
+  if (IsFp64Mode()) {
+    DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+    return FPUregisters_[fpureg];
+  } else {
+    DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+    uint64_t i64;
+    i64 = static_cast<uint32_t>(get_fpu_register_word(fpureg));
+    i64 |= static_cast<uint64_t>(get_fpu_register_word(fpureg + 1)) << 32;
+    return static_cast<int64_t>(i64);
+  }
 }
 
 
@@ -1688,18 +1695,77 @@
 // executed in the simulator.  Since the host is typically IA32 we will not
 // get the correct MIPS-like behaviour on unaligned accesses.
 
-void Simulator::TraceRegWr(int32_t value) {
+void Simulator::TraceRegWr(int32_t value, TraceType t) {
   if (::v8::internal::FLAG_trace_sim) {
-    SNPrintF(trace_buf_, "%08x", value);
+    union {
+      int32_t fmt_int32;
+      float fmt_float;
+    } v;
+    v.fmt_int32 = value;
+
+    switch (t) {
+      case WORD:
+        SNPrintF(trace_buf_, "%08" PRIx32 "    (%" PRIu64 ")    int32:%" PRId32
+                             " uint32:%" PRIu32,
+                 value, icount_, value, value);
+        break;
+      case FLOAT:
+        SNPrintF(trace_buf_, "%08" PRIx32 "    (%" PRIu64 ")    flt:%e",
+                 v.fmt_int32, icount_, v.fmt_float);
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
 }
 
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
+  if (::v8::internal::FLAG_trace_sim) {
+    union {
+      int64_t fmt_int64;
+      double fmt_double;
+    } v;
+    v.fmt_int64 = value;
+
+    switch (t) {
+      case DWORD:
+        SNPrintF(trace_buf_, "%016" PRIx64 "    (%" PRIu64 ")    int64:%" PRId64
+                             " uint64:%" PRIu64,
+                 value, icount_, value, value);
+        break;
+      case DOUBLE:
+        SNPrintF(trace_buf_, "%016" PRIx64 "    (%" PRIu64 ")    dbl:%e",
+                 v.fmt_int64, icount_, v.fmt_double);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+}
 
 // TODO(plind): consider making icount_ printing a flag option.
-void Simulator::TraceMemRd(int32_t addr, int32_t value) {
+void Simulator::TraceMemRd(int32_t addr, int32_t value, TraceType t) {
   if (::v8::internal::FLAG_trace_sim) {
-    SNPrintF(trace_buf_, "%08x <-- [%08x]    (%" PRIu64 ")", value, addr,
-             icount_);
+    union {
+      int32_t fmt_int32;
+      float fmt_float;
+    } v;
+    v.fmt_int32 = value;
+
+    switch (t) {
+      case WORD:
+        SNPrintF(trace_buf_, "%08" PRIx32 " <-- [%08" PRIx32 "]    (%" PRIu64
+                             ")    int32:%" PRId32 " uint32:%" PRIu32,
+                 value, addr, icount_, value, value);
+        break;
+      case FLOAT:
+        SNPrintF(trace_buf_,
+                 "%08" PRIx32 " <-- [%08" PRIx32 "]    (%" PRIu64 ")    flt:%e",
+                 v.fmt_int32, addr, icount_, v.fmt_float);
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
 }
 
@@ -1708,22 +1774,73 @@
   if (::v8::internal::FLAG_trace_sim) {
     switch (t) {
       case BYTE:
-        SNPrintF(trace_buf_, "      %02x --> [%08x]",
-                 static_cast<int8_t>(value), addr);
+        SNPrintF(trace_buf_,
+                 "      %02" PRIx8 " --> [%08" PRIx32 "]    (%" PRIu64 ")",
+                 static_cast<uint8_t>(value), addr, icount_);
         break;
       case HALF:
-        SNPrintF(trace_buf_, "    %04x --> [%08x]", static_cast<int16_t>(value),
-                 addr);
+        SNPrintF(trace_buf_,
+                 "    %04" PRIx16 " --> [%08" PRIx32 "]    (%" PRIu64 ")",
+                 static_cast<uint16_t>(value), addr, icount_);
         break;
       case WORD:
-        SNPrintF(trace_buf_, "%08x --> [%08x]", value, addr);
+        SNPrintF(trace_buf_,
+                 "%08" PRIx32 " --> [%08" PRIx32 "]    (%" PRIu64 ")", value,
+                 addr, icount_);
         break;
+      default:
+        UNREACHABLE();
     }
   }
 }
 
+void Simulator::TraceMemRd(int32_t addr, int64_t value, TraceType t) {
+  if (::v8::internal::FLAG_trace_sim) {
+    union {
+      int64_t fmt_int64;
+      int32_t fmt_int32[2];
+      float fmt_float[2];
+      double fmt_double;
+    } v;
+    v.fmt_int64 = value;
 
-int Simulator::ReadW(int32_t addr, Instruction* instr) {
+    switch (t) {
+      case DWORD:
+        SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%08" PRIx32 "]    (%" PRIu64
+                             ")    int64:%" PRId64 " uint64:%" PRIu64,
+                 v.fmt_int64, addr, icount_, v.fmt_int64, v.fmt_int64);
+        break;
+      case DOUBLE:
+        SNPrintF(trace_buf_, "%016" PRIx64 " <-- [%08" PRIx32 "]    (%" PRIu64
+                             ")    dbl:%e",
+                 v.fmt_int64, addr, icount_, v.fmt_double);
+        break;
+      case FLOAT_DOUBLE:
+        SNPrintF(trace_buf_, "%08" PRIx32 " <-- [%08" PRIx32 "]    (%" PRIu64
+                             ")    flt:%e dbl:%e",
+                 v.fmt_int32[1], addr, icount_, v.fmt_float[1], v.fmt_double);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+void Simulator::TraceMemWr(int32_t addr, int64_t value, TraceType t) {
+  if (::v8::internal::FLAG_trace_sim) {
+    switch (t) {
+      case DWORD:
+        SNPrintF(trace_buf_,
+                 "%016" PRIx64 " --> [%08" PRIx32 "]    (%" PRIu64 ")", value,
+                 addr, icount_);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+}
+
+int Simulator::ReadW(int32_t addr, Instruction* instr, TraceType t) {
   if (addr >=0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
     PrintF("Memory read from bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
@@ -1733,7 +1850,16 @@
   }
   if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
     intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
-    TraceMemRd(addr, static_cast<int32_t>(*ptr));
+    switch (t) {
+      case WORD:
+        TraceMemRd(addr, static_cast<int32_t>(*ptr), t);
+        break;
+      case FLOAT:
+        // This TraceType is allowed but tracing for this value will be omitted.
+        break;
+      default:
+        UNREACHABLE();
+    }
     return *ptr;
   }
   PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
@@ -1744,7 +1870,6 @@
   return 0;
 }
 
-
 void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
   if (addr >= 0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
@@ -1766,7 +1891,6 @@
   dbg.Debug();
 }
 
-
 double Simulator::ReadD(int32_t addr, Instruction* instr) {
   if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) {
     double* ptr = reinterpret_cast<double*>(addr);
@@ -2460,7 +2584,7 @@
           result = lower;
           break;
       }
-      set_fpu_register_double(fd_reg(), result);
+      SetFPUDoubleResult(fd_reg(), result);
       if (result != fs) {
         set_fcsr_bit(kFCSRInexactFlagBit, true);
       }
@@ -2468,20 +2592,20 @@
     }
     case SEL:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_double(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
+      SetFPUDoubleResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
       break;
     case SELEQZ_C:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_double(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
+      SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
       break;
     case SELNEZ_C:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_double(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
+      SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
       break;
     case MOVZ_C: {
       DCHECK(IsMipsArchVariant(kMips32r2));
       if (rt() == 0) {
-        set_fpu_register_double(fd_reg(), fs);
+        SetFPUDoubleResult(fd_reg(), fs);
       }
       break;
     }
@@ -2490,7 +2614,7 @@
       int32_t rt_reg = instr_.RtValue();
       int32_t rt = get_register(rt_reg);
       if (rt != 0) {
-        set_fpu_register_double(fd_reg(), fs);
+        SetFPUDoubleResult(fd_reg(), fs);
       }
       break;
     }
@@ -2500,115 +2624,121 @@
       ft_cc = get_fcsr_condition_bit(ft_cc);
       if (instr_.Bit(16)) {  // Read Tf bit.
         // MOVT.D
-        if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
+        if (test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs);
       } else {
         // MOVF.D
-        if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
+        if (!test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs);
       }
       break;
     }
     case MIN:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
+      SetFPUDoubleResult(fd_reg(), FPUMin(ft, fs));
       break;
     case MAX:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
+      SetFPUDoubleResult(fd_reg(), FPUMax(ft, fs));
       break;
     case MINA:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
+      SetFPUDoubleResult(fd_reg(), FPUMinA(ft, fs));
       break;
     case MAXA:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
+      SetFPUDoubleResult(fd_reg(), FPUMaxA(ft, fs));
       break;
     case ADD_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation(
               [](double lhs, double rhs) { return lhs + rhs; }, fs, ft));
       break;
     case SUB_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation(
               [](double lhs, double rhs) { return lhs - rhs; }, fs, ft));
       break;
     case MADDF_D:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_double(fd_reg(), fd + (fs * ft));
+      SetFPUDoubleResult(fd_reg(), std::fma(fs, ft, fd));
       break;
     case MSUBF_D:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_double(fd_reg(), fd - (fs * ft));
+      SetFPUDoubleResult(fd_reg(), std::fma(-fs, ft, fd));
       break;
     case MUL_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation(
               [](double lhs, double rhs) { return lhs * rhs; }, fs, ft));
       break;
     case DIV_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation(
               [](double lhs, double rhs) { return lhs / rhs; }, fs, ft));
       break;
     case ABS_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs));
       break;
     case MOV_D:
-      set_fpu_register_double(fd_reg(), fs);
+      SetFPUDoubleResult(fd_reg(), fs);
       break;
     case NEG_D:
-      set_fpu_register_double(
-          fd_reg(), FPUCanonalizeOperation([](double src) { return -src; },
-                                           KeepSign::yes, fs));
+      SetFPUDoubleResult(fd_reg(),
+                         FPUCanonalizeOperation([](double src) { return -src; },
+                                                KeepSign::yes, fs));
       break;
     case SQRT_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs));
       break;
     case RSQRT_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(), FPUCanonalizeOperation(
                         [](double fs) { return 1.0 / std::sqrt(fs); }, fs));
       break;
     case RECIP_D:
-      set_fpu_register_double(
-          fd_reg(),
-          FPUCanonalizeOperation([](double fs) { return 1.0 / fs; }, fs));
+      SetFPUDoubleResult(fd_reg(), FPUCanonalizeOperation(
+                                       [](double fs) { return 1.0 / fs; }, fs));
       break;
     case C_UN_D:
       set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_EQ_D:
       set_fcsr_bit(fcsr_cc, (fs == ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_UEQ_D:
       set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_OLT_D:
       set_fcsr_bit(fcsr_cc, (fs < ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_ULT_D:
       set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_OLE_D:
       set_fcsr_bit(fcsr_cc, (fs <= ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_ULE_D:
       set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case CVT_W_D: {  // Convert double to word.
       double rounded;
       int32_t result;
       round_according_to_fcsr(fs, rounded, result, fs);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -2622,7 +2752,7 @@
         // round to the even one.
         result--;
       }
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -2631,7 +2761,7 @@
     {
       double rounded = trunc(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -2640,7 +2770,7 @@
     {
       double rounded = std::floor(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -2649,20 +2779,20 @@
     {
       double rounded = std::ceil(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
     } break;
     case CVT_S_D:  // Convert double to float (single).
-      set_fpu_register_float(fd_reg(), static_cast<float>(fs));
+      SetFPUFloatResult(fd_reg(), static_cast<float>(fs));
       break;
     case CVT_L_D: {  // Mips32r2: Truncate double to 64-bit long-word.
       if (IsFp64Mode()) {
         int64_t result;
         double rounded;
         round64_according_to_fcsr(fs, rounded, result, fs);
-        set_fpu_register(fd_reg(), result);
+        SetFPUResult(fd_reg(), result);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -2677,7 +2807,7 @@
       double rounded = trunc(fs);
       i64 = static_cast<int64_t>(rounded);
       if (IsFp64Mode()) {
-        set_fpu_register(fd_reg(), i64);
+        SetFPUResult(fd_reg(), i64);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -2697,7 +2827,7 @@
       }
       int64_t i64 = static_cast<int64_t>(result);
       if (IsFp64Mode()) {
-        set_fpu_register(fd_reg(), i64);
+        SetFPUResult(fd_reg(), i64);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -2711,7 +2841,7 @@
       double rounded = std::floor(fs);
       int64_t i64 = static_cast<int64_t>(rounded);
       if (IsFp64Mode()) {
-        set_fpu_register(fd_reg(), i64);
+        SetFPUResult(fd_reg(), i64);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -2725,7 +2855,7 @@
       double rounded = std::ceil(fs);
       int64_t i64 = static_cast<int64_t>(rounded);
       if (IsFp64Mode()) {
-        set_fpu_register(fd_reg(), i64);
+        SetFPUResult(fd_reg(), i64);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -2795,12 +2925,13 @@
       DCHECK(result != 0);
 
       dResult = bit_cast<double>(result);
-      set_fpu_register_double(fd_reg(), dResult);
+      SetFPUDoubleResult(fd_reg(), dResult);
 
       break;
     }
     case C_F_D: {
       set_fcsr_bit(fcsr_cc, false);
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     }
     default:
@@ -2816,83 +2947,83 @@
   switch (instr_.FunctionFieldRaw()) {
     case CVT_S_W:  // Convert word to float (single).
       alu_out = get_fpu_register_signed_word(fs_reg());
-      set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
+      SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
       break;
     case CVT_D_W:  // Convert word to double.
       alu_out = get_fpu_register_signed_word(fs_reg());
-      set_fpu_register_double(fd_reg(), static_cast<double>(alu_out));
+      SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
       break;
     case CMP_AF:
-      set_fpu_register_word(fd_reg(), 0);
+      SetFPUWordResult(fd_reg(), 0);
       break;
     case CMP_UN:
       if (std::isnan(fs) || std::isnan(ft)) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     case CMP_EQ:
       if (fs == ft) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     case CMP_UEQ:
       if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     case CMP_LT:
       if (fs < ft) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     case CMP_ULT:
       if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     case CMP_LE:
       if (fs <= ft) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     case CMP_ULE:
       if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     case CMP_OR:
       if (!std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     case CMP_UNE:
       if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     case CMP_NE:
       if (fs != ft) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult(fd_reg(), 0);
       }
       break;
     default:
@@ -2944,102 +3075,108 @@
           result = lower;
           break;
       }
-      set_fpu_register_float(fd_reg(), result);
+      SetFPUFloatResult(fd_reg(), result);
       if (result != fs) {
         set_fcsr_bit(kFCSRInexactFlagBit, true);
       }
       break;
     }
     case ADD_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
                                  fs, ft));
       break;
     case SUB_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
                                  fs, ft));
       break;
     case MADDF_S:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_float(fd_reg(), fd + (fs * ft));
+      SetFPUFloatResult(fd_reg(), std::fma(fs, ft, fd));
       break;
     case MSUBF_S:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_float(fd_reg(), fd - (fs * ft));
+      SetFPUFloatResult(fd_reg(), std::fma(-fs, ft, fd));
       break;
     case MUL_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
                                  fs, ft));
       break;
     case DIV_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
                                  fs, ft));
       break;
     case ABS_S:
-      set_fpu_register_float(
-          fd_reg(),
-          FPUCanonalizeOperation([](float fs) { return FPAbs(fs); }, fs));
+      SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
+                                      [](float fs) { return FPAbs(fs); }, fs));
       break;
     case MOV_S:
-      set_fpu_register_float(fd_reg(), fs);
+      SetFPUFloatResult(fd_reg(), fs);
       break;
     case NEG_S:
-      set_fpu_register_float(
-          fd_reg(), FPUCanonalizeOperation([](float src) { return -src; },
-                                           KeepSign::yes, fs));
+      SetFPUFloatResult(fd_reg(),
+                        FPUCanonalizeOperation([](float src) { return -src; },
+                                               KeepSign::yes, fs));
       break;
     case SQRT_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs));
       break;
     case RSQRT_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(), FPUCanonalizeOperation(
                         [](float src) { return 1.0 / std::sqrt(src); }, fs));
       break;
     case RECIP_S:
-      set_fpu_register_float(
-          fd_reg(),
-          FPUCanonalizeOperation([](float src) { return 1.0 / src; }, fs));
+      SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
+                                      [](float src) { return 1.0 / src; }, fs));
       break;
     case C_F_D:
       set_fcsr_bit(fcsr_cc, false);
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_UN_D:
       set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_EQ_D:
       set_fcsr_bit(fcsr_cc, (fs == ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_UEQ_D:
       set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_OLT_D:
       set_fcsr_bit(fcsr_cc, (fs < ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_ULT_D:
       set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_OLE_D:
       set_fcsr_bit(fcsr_cc, (fs <= ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_ULE_D:
       set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case CVT_D_S:
-      set_fpu_register_double(fd_reg(), static_cast<double>(fs));
+      SetFPUDoubleResult(fd_reg(), static_cast<double>(fs));
       break;
     case SEL:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_float(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
+      SetFPUFloatResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
       break;
     case CLASS_S: {  // Mips32r6 instruction
       // Convert float input to uint32_t for easier bit manipulation
@@ -3103,33 +3240,33 @@
       DCHECK(result != 0);
 
       fResult = bit_cast<float>(result);
-      set_fpu_register_float(fd_reg(), fResult);
+      SetFPUFloatResult(fd_reg(), fResult);
 
       break;
     }
     case SELEQZ_C:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_float(fd_reg(), (ft_int & 0x1) == 0
-                                           ? get_fpu_register_float(fs_reg())
-                                           : 0.0);
+      SetFPUFloatResult(
+          fd_reg(),
+          (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg()) : 0.0);
       break;
     case SELNEZ_C:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_float(fd_reg(), (ft_int & 0x1) != 0
-                                           ? get_fpu_register_float(fs_reg())
-                                           : 0.0);
+      SetFPUFloatResult(
+          fd_reg(),
+          (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg()) : 0.0);
       break;
     case MOVZ_C: {
       DCHECK(IsMipsArchVariant(kMips32r2));
       if (rt() == 0) {
-        set_fpu_register_float(fd_reg(), fs);
+        SetFPUFloatResult(fd_reg(), fs);
       }
       break;
     }
     case MOVN_C: {
       DCHECK(IsMipsArchVariant(kMips32r2));
       if (rt() != 0) {
-        set_fpu_register_float(fd_reg(), fs);
+        SetFPUFloatResult(fd_reg(), fs);
       }
       break;
     }
@@ -3140,17 +3277,17 @@
 
       if (instr_.Bit(16)) {  // Read Tf bit.
         // MOVT.D
-        if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
+        if (test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs);
       } else {
         // MOVF.D
-        if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
+        if (!test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs);
       }
       break;
     }
     case TRUNC_W_S: {  // Truncate single to word (round towards 0).
       float rounded = trunc(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -3160,7 +3297,7 @@
       float rounded = trunc(fs);
       int64_t i64 = static_cast<int64_t>(rounded);
       if (IsFp64Mode()) {
-        set_fpu_register(fd_reg(), i64);
+        SetFPUResult(fd_reg(), i64);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -3173,7 +3310,7 @@
     {
       float rounded = std::floor(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -3183,7 +3320,7 @@
       float rounded = std::floor(fs);
       int64_t i64 = static_cast<int64_t>(rounded);
       if (IsFp64Mode()) {
-        set_fpu_register(fd_reg(), i64);
+        SetFPUResult(fd_reg(), i64);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -3200,7 +3337,7 @@
         // round to the even one.
         result--;
       }
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -3217,7 +3354,7 @@
       }
       int64_t i64 = static_cast<int64_t>(result);
       if (IsFp64Mode()) {
-        set_fpu_register(fd_reg(), i64);
+        SetFPUResult(fd_reg(), i64);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -3230,7 +3367,7 @@
     {
       float rounded = std::ceil(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -3240,7 +3377,7 @@
       float rounded = std::ceil(fs);
       int64_t i64 = static_cast<int64_t>(rounded);
       if (IsFp64Mode()) {
-        set_fpu_register(fd_reg(), i64);
+        SetFPUResult(fd_reg(), i64);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -3251,26 +3388,26 @@
     }
     case MIN:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
+      SetFPUFloatResult(fd_reg(), FPUMin(ft, fs));
       break;
     case MAX:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
+      SetFPUFloatResult(fd_reg(), FPUMax(ft, fs));
       break;
     case MINA:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
+      SetFPUFloatResult(fd_reg(), FPUMinA(ft, fs));
       break;
     case MAXA:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
+      SetFPUFloatResult(fd_reg(), FPUMaxA(ft, fs));
       break;
     case CVT_L_S: {
       if (IsFp64Mode()) {
         int64_t result;
         float rounded;
         round64_according_to_fcsr(fs, rounded, result, fs);
-        set_fpu_register(fd_reg(), result);
+        SetFPUResult(fd_reg(), result);
         if (set_fcsr_round64_error(fs, rounded)) {
           set_fpu_register_invalid_result64(fs, rounded);
         }
@@ -3283,7 +3420,7 @@
       float rounded;
       int32_t result;
       round_according_to_fcsr(fs, rounded, result, fs);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -3311,7 +3448,7 @@
         i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg()));
         i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg() + 1)) << 32;
       }
-      set_fpu_register_double(fd_reg(), static_cast<double>(i64));
+      SetFPUDoubleResult(fd_reg(), static_cast<double>(i64));
       break;
     case CVT_S_L:
       if (IsFp64Mode()) {
@@ -3320,79 +3457,79 @@
         i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg()));
         i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg() + 1)) << 32;
       }
-      set_fpu_register_float(fd_reg(), static_cast<float>(i64));
+      SetFPUFloatResult(fd_reg(), static_cast<float>(i64));
       break;
     case CMP_AF:  // Mips64r6 CMP.D instructions.
-      set_fpu_register(fd_reg(), 0);
+      SetFPUResult(fd_reg(), 0);
       break;
     case CMP_UN:
       if (std::isnan(fs) || std::isnan(ft)) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_EQ:
       if (fs == ft) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_UEQ:
       if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_LT:
       if (fs < ft) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_ULT:
       if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_LE:
       if (fs <= ft) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_ULE:
       if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_OR:
       if (!std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_UNE:
       if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_NE:
       if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     default:
@@ -3406,16 +3543,16 @@
     case CFC1:
       // At the moment only FCSR is supported.
       DCHECK(fs_reg() == kFCSRRegister);
-      set_register(rt_reg(), FCSR_);
+      SetResult(rt_reg(), FCSR_);
       break;
     case MFC1:
-      set_register(rt_reg(), get_fpu_register_word(fs_reg()));
+      SetResult(rt_reg(), get_fpu_register_word(fs_reg()));
       break;
     case MFHC1:
       if (IsFp64Mode()) {
-        set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+        SetResult(rt_reg(), get_fpu_register_hi_word(fs_reg()));
       } else {
-        set_register(rt_reg(), get_fpu_register_word(fs_reg() + 1));
+        SetResult(rt_reg(), get_fpu_register_word(fs_reg() + 1));
       }
       break;
     case CTC1: {
@@ -3428,18 +3565,26 @@
         DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2));
         FCSR_ = reg & ~kFCSRNaN2008FlagMask;
       }
+      TraceRegWr(static_cast<int32_t>(FCSR_));
       break;
     }
     case MTC1:
       // Hardware writes upper 32-bits to zero on mtc1.
       set_fpu_register_hi_word(fs_reg(), 0);
       set_fpu_register_word(fs_reg(), registers_[rt_reg()]);
+      TraceRegWr(get_fpu_register_word(fs_reg()), FLOAT);
       break;
     case MTHC1:
       if (IsFp64Mode()) {
         set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
+        TraceRegWr(get_fpu_register(fs_reg()), DOUBLE);
       } else {
         set_fpu_register_word(fs_reg() + 1, registers_[rt_reg()]);
+        if (fs_reg() % 2) {
+          TraceRegWr(get_fpu_register_word(fs_reg() + 1), FLOAT);
+        } else {
+          TraceRegWr(get_fpu_register(fs_reg()), DOUBLE);
+        }
       }
       break;
     case S: {
@@ -3472,7 +3617,7 @@
       fr = get_fpu_register_float(fr_reg());
       fs = get_fpu_register_float(fs_reg());
       ft = get_fpu_register_float(ft_reg());
-      set_fpu_register_float(fd_reg(), fs * ft + fr);
+      SetFPUFloatResult(fd_reg(), fs * ft + fr);
       break;
     }
     case MSUB_S: {
@@ -3481,7 +3626,7 @@
       fr = get_fpu_register_float(fr_reg());
       fs = get_fpu_register_float(fs_reg());
       ft = get_fpu_register_float(ft_reg());
-      set_fpu_register_float(fd_reg(), fs * ft - fr);
+      SetFPUFloatResult(fd_reg(), fs * ft - fr);
       break;
     }
     case MADD_D: {
@@ -3490,7 +3635,7 @@
       fr = get_fpu_register_double(fr_reg());
       fs = get_fpu_register_double(fs_reg());
       ft = get_fpu_register_double(ft_reg());
-      set_fpu_register_double(fd_reg(), fs * ft + fr);
+      SetFPUDoubleResult(fd_reg(), fs * ft + fr);
       break;
     }
     case MSUB_D: {
@@ -3499,7 +3644,7 @@
       fr = get_fpu_register_double(fr_reg());
       fs = get_fpu_register_double(fs_reg());
       ft = get_fpu_register_double(ft_reg());
-      set_fpu_register_double(fd_reg(), fs * ft - fr);
+      SetFPUDoubleResult(fd_reg(), fs * ft - fr);
       break;
     }
     default:
@@ -3517,11 +3662,11 @@
   switch (instr_.FunctionFieldRaw()) {
     case SELEQZ_S:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_register(rd_reg(), rt() == 0 ? rs() : 0);
+      SetResult(rd_reg(), rt() == 0 ? rs() : 0);
       break;
     case SELNEZ_S:
       DCHECK(IsMipsArchVariant(kMips32r6));
-      set_register(rd_reg(), rt() != 0 ? rs() : 0);
+      SetResult(rd_reg(), rt() != 0 ? rs() : 0);
       break;
     case JR: {
       int32_t next_pc = rs();
@@ -3622,10 +3767,10 @@
       } else {
         switch (sa()) {
           case MUL_OP:
-            set_register(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
+            SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
             break;
           case MUH_OP:
-            set_register(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
+            SetResult(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
             break;
           default:
             UNIMPLEMENTED_MIPS();
@@ -3641,10 +3786,10 @@
       } else {
         switch (sa()) {
           case MUL_OP:
-            set_register(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+            SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
             break;
           case MUH_OP:
-            set_register(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
+            SetResult(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
             break;
           default:
             UNIMPLEMENTED_MIPS();
@@ -3657,16 +3802,16 @@
         switch (sa()) {
           case DIV_OP:
             if (rs() == INT_MIN && rt() == -1) {
-              set_register(rd_reg(), INT_MIN);
+              SetResult(rd_reg(), INT_MIN);
             } else if (rt() != 0) {
-              set_register(rd_reg(), rs() / rt());
+              SetResult(rd_reg(), rs() / rt());
             }
             break;
           case MOD_OP:
             if (rs() == INT_MIN && rt() == -1) {
-              set_register(rd_reg(), 0);
+              SetResult(rd_reg(), 0);
             } else if (rt() != 0) {
-              set_register(rd_reg(), rs() % rt());
+              SetResult(rd_reg(), rs() % rt());
             }
             break;
           default:
@@ -3692,12 +3837,12 @@
         switch (sa()) {
           case DIV_OP:
             if (rt_u() != 0) {
-              set_register(rd_reg(), rs_u() / rt_u());
+              SetResult(rd_reg(), rs_u() / rt_u());
             }
             break;
           case MOD_OP:
             if (rt_u() != 0) {
-              set_register(rd_reg(), rs_u() % rt_u());
+              SetResult(rd_reg(), rs_u() % rt_u());
             }
             break;
           default:
@@ -3791,8 +3936,7 @@
     // Conditional moves.
     case MOVN:
       if (rt()) {
-        set_register(rd_reg(), rs());
-        TraceRegWr(rs());
+        SetResult(rd_reg(), rs());
       }
       break;
     case MOVCI: {
@@ -3807,8 +3951,7 @@
     }
     case MOVZ:
       if (!rt()) {
-        set_register(rd_reg(), rs());
-        TraceRegWr(rs());
+        SetResult(rd_reg(), rs());
       }
       break;
     default:
@@ -4372,16 +4515,25 @@
     }
     case LWC1:
       set_fpu_register_hi_word(ft_reg, 0);
-      set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr_.instr()));
+      set_fpu_register_word(ft_reg,
+                            ReadW(rs + se_imm16, instr_.instr(), FLOAT));
+      if (ft_reg % 2) {
+        TraceMemRd(rs + se_imm16, get_fpu_register(ft_reg - 1), FLOAT_DOUBLE);
+      } else {
+        TraceMemRd(rs + se_imm16, get_fpu_register_word(ft_reg), FLOAT);
+      }
       break;
     case LDC1:
       set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr()));
+      TraceMemRd(rs + se_imm16, get_fpu_register(ft_reg), DOUBLE);
       break;
     case SWC1:
       WriteW(rs + se_imm16, get_fpu_register_word(ft_reg), instr_.instr());
+      TraceMemWr(rs + se_imm16, get_fpu_register_word(ft_reg));
       break;
     case SDC1:
       WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr());
+      TraceMemWr(rs + se_imm16, get_fpu_register(ft_reg));
       break;
     // ------------- PC-Relative instructions.
     case PCREL: {
@@ -4422,7 +4574,7 @@
           }
         }
       }
-      set_register(rs_reg, alu_out);
+      SetResult(rs_reg, alu_out);
       break;
     }
     default:
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 3795eec..2785f91 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -293,6 +293,9 @@
   // Unsupported instructions use Format to print an error and stop execution.
   void Format(Instruction* instr, const char* format);
 
+  // Helpers for data value tracing.
+  enum TraceType { BYTE, HALF, WORD, DWORD, FLOAT, DOUBLE, FLOAT_DOUBLE };
+
   // Read and write memory.
   inline uint32_t ReadBU(int32_t addr);
   inline int32_t ReadB(int32_t addr);
@@ -305,24 +308,18 @@
   inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
   inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
 
-  inline int ReadW(int32_t addr, Instruction* instr);
+  inline int ReadW(int32_t addr, Instruction* instr, TraceType t = WORD);
   inline void WriteW(int32_t addr, int value, Instruction* instr);
 
   inline double ReadD(int32_t addr, Instruction* instr);
   inline void WriteD(int32_t addr, double value, Instruction* instr);
 
-  // Helpers for data value tracing.
-  enum TraceType {
-    BYTE,
-    HALF,
-    WORD
-    // DWORD,
-    // DFLOAT - Floats may have printing issues due to paired lwc1's
-  };
-
-  void TraceRegWr(int32_t value);
-  void TraceMemWr(int32_t addr, int32_t value, TraceType t);
-  void TraceMemRd(int32_t addr, int32_t value);
+  void TraceRegWr(int32_t value, TraceType t = WORD);
+  void TraceRegWr(int64_t value, TraceType t = DWORD);
+  void TraceMemWr(int32_t addr, int32_t value, TraceType t = WORD);
+  void TraceMemRd(int32_t addr, int32_t value, TraceType t = WORD);
+  void TraceMemWr(int32_t addr, int64_t value, TraceType t = DWORD);
+  void TraceMemRd(int32_t addr, int64_t value, TraceType t = DWORD);
   EmbeddedVector<char, 128> trace_buf_;
 
   // Operations depending on endianness.
@@ -381,6 +378,26 @@
     TraceRegWr(alu_out);
   }
 
+  inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
+    set_fpu_register_word(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register_word(fd_reg));
+  }
+
+  inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
+    set_fpu_register(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register(fd_reg));
+  }
+
+  inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
+    set_fpu_register_float(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register_word(fd_reg), FLOAT);
+  }
+
+  inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
+    set_fpu_register_double(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+  }
+
   void DecodeTypeImmediate();
   void DecodeTypeJump();
 
diff --git a/src/mips64/assembler-mips64-inl.h b/src/mips64/assembler-mips64-inl.h
index 6078ab9..3891391 100644
--- a/src/mips64/assembler-mips64-inl.h
+++ b/src/mips64/assembler-mips64-inl.h
@@ -41,7 +41,7 @@
 
 #include "src/assembler.h"
 #include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -139,6 +139,17 @@
   return Assembler::kSpecialTargetSize;
 }
 
+Address Assembler::target_address_at(Address pc, Code* code) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
 
 Address Assembler::target_address_from_return_address(Address pc) {
   return pc - kCallTargetAddressOffset;
diff --git a/src/mips64/assembler-mips64.cc b/src/mips64/assembler-mips64.cc
index b35b166..b670058 100644
--- a/src/mips64/assembler-mips64.cc
+++ b/src/mips64/assembler-mips64.cc
@@ -183,13 +183,19 @@
       reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
 }
 
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  return static_cast<uint32_t>(
+      reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
+}
+
 void RelocInfo::unchecked_update_wasm_memory_reference(
     Address address, ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
 }
 
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
-                                                  ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+                                           ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_,
                                    reinterpret_cast<Address>(size), flush_mode);
 }
@@ -859,8 +865,7 @@
       } else {
         PrintF("%d\n", instr);
       }
-      next(&l, internal_reference_positions_.find(l.pos()) !=
-                   internal_reference_positions_.end());
+      next(&l, is_internal_reference(&l));
     }
   } else {
     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
@@ -874,14 +879,15 @@
   bool is_internal = false;
   if (L->is_linked() && !trampoline_emitted_) {
     unbound_labels_count_--;
-    next_buffer_check_ += kTrampolineSlotsSize;
+    if (!is_internal_reference(L)) {
+      next_buffer_check_ += kTrampolineSlotsSize;
+    }
   }
 
   while (L->is_linked()) {
     int fixup_pos = L->pos();
     int dist = pos - fixup_pos;
-    is_internal = internal_reference_positions_.find(fixup_pos) !=
-                  internal_reference_positions_.end();
+    is_internal = is_internal_reference(L);
     next(L, is_internal);  // Call next before overwriting link with target at
                            // fixup_pos.
     Instr instr = instr_at(fixup_pos);
@@ -898,7 +904,6 @@
           CHECK((trampoline_pos - fixup_pos) <= branch_offset);
           target_at_put(fixup_pos, trampoline_pos, false);
           fixup_pos = trampoline_pos;
-          dist = pos - fixup_pos;
         }
         target_at_put(fixup_pos, pos, false);
       } else {
@@ -1940,19 +1945,64 @@
 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
   DCHECK(!src.rm().is(at));
   DCHECK(is_int32(src.offset_));
-  daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
-  dsll(at, at, kLuiShift);
-  ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
-  daddu(at, at, src.rm());  // Add base register.
+
+  if (kArchVariant == kMips64r6) {
+    int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
+    if (src.offset_ & kNegOffset) {
+      if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
+        lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
+        ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
+        daddu(at, at, src.rm());                // Add base register.
+        return;
+      }
+
+      hi += 1;
+    }
+
+    daui(at, src.rm(), hi);
+    daddiu(at, at, src.offset_ & kImm16Mask);
+  } else {
+    lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
+    ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
+    daddu(at, at, src.rm());                // Add base register.
+  }
 }
 
+// Helper for base-reg + upper part of offset, when offset is larger than int16.
+// Loads higher part of the offset to AT register.
+// Returns lower part of the offset to be used as offset
+// in Load/Store instructions
+int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
+  DCHECK(!src.rm().is(at));
+  DCHECK(is_int32(src.offset_));
+  int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
+  // If the highest bit of the lower part of the offset is 1, this would make
+  // the offset in the load/store instruction negative. We need to compensate
+  // for this by adding 1 to the upper part of the offset.
+  if (src.offset_ & kNegOffset) {
+    if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
+      LoadRegPlusOffsetToAt(src);
+      return 0;
+    }
+
+    hi += 1;
+  }
+
+  if (kArchVariant == kMips64r6) {
+    daui(at, src.rm(), hi);
+  } else {
+    lui(at, hi);
+    daddu(at, at, src.rm());
+  }
+  return (src.offset_ & kImm16Mask);
+}
 
 void Assembler::lb(Register rd, const MemOperand& rs) {
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LB, at, rd, off16);
   }
 }
 
@@ -1961,8 +2011,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LBU, at, rd, off16);
   }
 }
 
@@ -1971,8 +2021,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LH, at, rd, off16);
   }
 }
 
@@ -1981,8 +2031,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LHU, at, rd, off16);
   }
 }
 
@@ -1991,8 +2041,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LW, at, rd, off16);
   }
 }
 
@@ -2001,8 +2051,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LWU, at, rd, 0);  // Equiv to lwu(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LWU, at, rd, off16);
   }
 }
 
@@ -2025,8 +2075,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to store.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(SB, at, rd, off16);
   }
 }
 
@@ -2035,8 +2085,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to store.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(SH, at, rd, off16);
   }
 }
 
@@ -2045,8 +2095,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to store.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(SW, at, rd, off16);
   }
 }
 
@@ -2130,8 +2180,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(LD, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(LD, at, rd, off16);
   }
 }
 
@@ -2140,8 +2190,8 @@
   if (is_int16(rs.offset_)) {
     GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to store.
-    LoadRegPlusOffsetToAt(rs);
-    GenInstrImmediate(SD, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+    GenInstrImmediate(SD, at, rd, off16);
   }
 }
 
@@ -2551,8 +2601,8 @@
   if (is_int16(src.offset_)) {
     GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(src);
-    GenInstrImmediate(LWC1, at, fd, 0);
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+    GenInstrImmediate(LWC1, at, fd, off16);
   }
 }
 
@@ -2561,8 +2611,8 @@
   if (is_int16(src.offset_)) {
     GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(src);
-    GenInstrImmediate(LDC1, at, fd, 0);
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+    GenInstrImmediate(LDC1, at, fd, off16);
   }
 }
 
@@ -2571,8 +2621,8 @@
   if (is_int16(src.offset_)) {
     GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(src);
-    GenInstrImmediate(SWC1, at, fd, 0);
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+    GenInstrImmediate(SWC1, at, fd, off16);
   }
 }
 
@@ -2582,8 +2632,8 @@
   if (is_int16(src.offset_)) {
     GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
   } else {  // Offset > 16 bits, use multiple instructions to load.
-    LoadRegPlusOffsetToAt(src);
-    GenInstrImmediate(SDC1, at, fd, 0);
+    int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+    GenInstrImmediate(SDC1, at, fd, off16);
   }
 }
 
diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h
index 056cc42..433c03c 100644
--- a/src/mips64/assembler-mips64.h
+++ b/src/mips64/assembler-mips64.h
@@ -155,6 +155,7 @@
 Register ToRegister(int num);
 
 static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
 
 // Coprocessor register.
 struct FPURegister {
@@ -477,17 +478,10 @@
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
     set_target_address_at(isolate, pc, target, icache_flush_mode);
   }
-  INLINE(static Address target_address_at(Address pc, Code* code)) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    return target_address_at(pc, constant_pool);
-  }
+  INLINE(static Address target_address_at(Address pc, Code* code));
   INLINE(static void set_target_address_at(
       Isolate* isolate, Address pc, Code* code, Address target,
-      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    set_target_address_at(isolate, pc, constant_pool, target,
-                          icache_flush_mode);
-  }
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
 
   // Return the code target address at a call site from the return address
   // of that call in the instruction stream.
@@ -559,6 +553,13 @@
   static const int kDebugBreakSlotLength =
       kDebugBreakSlotInstructions * kInstrSize;
 
+  // Max offset for instructions with 16-bit offset field
+  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+
+  // Max offset for compact branch instructions with 26-bit offset field
+  static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
+
+  static const int kTrampolineSlotsSize = 2 * kInstrSize;
 
   // ---------------------------------------------------------------------------
   // Code generation.
@@ -1091,9 +1092,6 @@
 
   // Debugging.
 
-  // Mark generator continuation.
-  void RecordGeneratorContinuation();
-
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
@@ -1226,6 +1224,7 @@
   }
 
   bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+  static bool IsCompactBranchSupported() { return kArchVariant == kMips64r6; }
 
   inline int UnboundLabelsCount() { return unbound_labels_count_; }
 
@@ -1236,6 +1235,7 @@
 
   // Helpers.
   void LoadRegPlusOffsetToAt(const MemOperand& src);
+  int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
 
   // Relocation for a type-recording IC has the AST id added to it.  This
   // member variable is a way to pass the information from the call site to
@@ -1497,14 +1497,15 @@
   // branch instruction generation, where we use jump instructions rather
   // than regular branch instructions.
   bool trampoline_emitted_;
-  static const int kTrampolineSlotsSize = 2 * kInstrSize;
-  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
-  static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
   static const int kInvalidSlotPos = -1;
 
   // Internal reference positions, required for unbounded internal reference
   // labels.
   std::set<int64_t> internal_reference_positions_;
+  bool is_internal_reference(Label* L) {
+    return internal_reference_positions_.find(L->pos()) !=
+           internal_reference_positions_.end();
+  }
 
   void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
   void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index 97f5b73..645599a 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -32,17 +32,6 @@
   __ TailCallRuntime(Runtime::kNewArray);
 }
 
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
-  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
-  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cc);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -244,8 +233,6 @@
     __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
     // Call runtime on identical symbols since we need to throw a TypeError.
     __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
-    // Call runtime on identical SIMD values since we must throw a TypeError.
-    __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
   } else {
     __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
     // Comparing JS objects with <=, >= is complicated.
@@ -253,8 +240,6 @@
       __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
       // Call runtime on identical symbols since we need to throw a TypeError.
       __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
-      // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
       // Normally here we fall through to return_equal, but undefined is
       // special: (undefined == undefined) == true, but
       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -681,8 +666,11 @@
   if (cc == eq) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(lhs, rhs);
-      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+      __ Push(cp);
+      __ Call(strict() ? isolate()->builtins()->StrictEqual()
+                       : isolate()->builtins()->Equal(),
+              RelocInfo::CODE_TARGET);
+      __ Pop(cp);
     }
     // Turn true into 0 and false into some non-zero value.
     STATIC_ASSERT(EQUAL == 0);
@@ -913,7 +901,6 @@
   SaveFPRegsMode mode = kSaveFPRegs;
   CEntryStub(isolate, 1, mode).GetCode();
   StoreBufferOverflowStub(isolate, mode).GetCode();
-  isolate->set_fp_stubs_generated(true);
 }
 
 
@@ -1151,9 +1138,9 @@
 
   // We build an EntryFrame.
   __ li(a7, Operand(-1));  // Push a bad frame pointer to fail if it is used.
-  int marker = type();
-  __ li(a6, Operand(Smi::FromInt(marker)));
-  __ li(a5, Operand(Smi::FromInt(marker)));
+  StackFrame::Type marker = type();
+  __ li(a6, Operand(StackFrame::TypeToMarker(marker)));
+  __ li(a5, Operand(StackFrame::TypeToMarker(marker)));
   ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
   __ li(a4, Operand(c_entry_fp));
   __ ld(a4, MemOperand(a4));
@@ -1184,12 +1171,12 @@
   __ ld(a6, MemOperand(a5));
   __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
   __ sd(fp, MemOperand(a5));
-  __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ li(a4, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   Label cont;
   __ b(&cont);
   __ nop();   // Branch delay slot nop.
   __ bind(&non_outermost_js);
-  __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+  __ li(a4, Operand(StackFrame::INNER_JSENTRY_FRAME));
   __ bind(&cont);
   __ push(a4);
 
@@ -1255,10 +1242,8 @@
   // Check if the current stack frame is marked as the outermost JS frame.
   Label non_outermost_js_2;
   __ pop(a5);
-  __ Branch(&non_outermost_js_2,
-            ne,
-            a5,
-            Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ Branch(&non_outermost_js_2, ne, a5,
+            Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   __ li(a5, Operand(ExternalReference(js_entry_sp)));
   __ sd(zero_reg, MemOperand(a5));
   __ bind(&non_outermost_js_2);
@@ -1281,51 +1266,6 @@
   __ Jump(ra);
 }
 
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
-  // Return address is in ra.
-  Label miss;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register index = LoadDescriptor::NameRegister();
-  Register scratch = a5;
-  Register result = v0;
-  DCHECK(!scratch.is(receiver) && !scratch.is(index));
-  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
-
-  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          RECEIVER_IS_STRING);
-  char_at_generator.GenerateFast(masm);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  // Ensure that the vector and slot registers won't be clobbered before
-  // calling the miss handler.
-  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::VectorRegister(),
-                     LoadWithVectorDescriptor::SlotRegister()));
-
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
-                                                          a5, &miss);
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -1428,7 +1368,7 @@
   // (6) External string.  Make it, offset-wise, look like a sequential string.
   //     Go to (4).
   // (7) Short external string or not a string?  If yes, bail out to runtime.
-  // (8) Sliced string.  Replace subject with parent.  Go to (1).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
 
   Label check_underlying;   // (1)
   Label seq_string;         // (4)
@@ -1452,6 +1392,7 @@
   // (2) Sequential or cons?  If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   // Go to (5).
@@ -1478,12 +1419,12 @@
   __ Branch(&runtime, ls, a3, Operand(a1));
   __ SmiUntag(a1);
 
-  STATIC_ASSERT(kStringEncodingMask == 4);
-  STATIC_ASSERT(kOneByteStringTag == 4);
+  STATIC_ASSERT(kStringEncodingMask == 8);
+  STATIC_ASSERT(kOneByteStringTag == 8);
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one_byte.
   __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
-  __ dsra(a3, a0, 2);  // a3 is 1 for one_byte, 0 for UC16 (used below).
+  __ dsra(a3, a0, 3);  // a3 is 1 for one_byte, 0 for UC16 (used below).
   __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
   __ Movz(t9, a5, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
 
@@ -1729,12 +1670,18 @@
   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
   __ Branch(&runtime, ne, at, Operand(zero_reg));
 
-  // (8) Sliced string.  Replace subject with parent.  Go to (4).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
+  Label thin_string;
+  __ Branch(&thin_string, eq, a1, Operand(kThinStringTag));
   // Load offset into t0 and replace subject string with parent.
   __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ SmiUntag(t0);
   __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   __ jmp(&check_underlying);  // Go to (1).
+
+  __ bind(&thin_string);
+  __ ld(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+  __ jmp(&check_underlying);  // Go to (1).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -1772,9 +1719,9 @@
   // a3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
 
-  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
-  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
   // Load the cache state into a5.
@@ -1785,7 +1732,7 @@
   // A monomorphic cache hit or an already megamorphic state: invoke the
   // function without changing the state.
   // We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at
-  // this position in a symbol (see static asserts in type-feedback-vector.h).
+  // this position in a symbol (see static asserts in feedback-vector.h).
   Label check_allocation_site;
   Register feedback_map = a6;
   Register weak_value = t0;
@@ -1941,189 +1888,6 @@
   __ bind(&exit_);
 }
 
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
-                               Register slot) {
-  __ dsrl(t0, slot, 32 - kPointerSizeLog2);
-  __ Daddu(slot, feedback_vector, Operand(t0));
-  __ ld(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
-  __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
-  __ sd(t0, FieldMemOperand(slot, FixedArray::kHeaderSize + kPointerSize));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
-  // a0 - number of arguments
-  // a1 - function
-  // a3 - slot id
-  // a2 - vector
-  // a4 - allocation site (loaded from vector[slot])
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
-  __ Branch(miss, ne, a1, Operand(at));
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, a2, a3);
-
-  __ mov(a2, a4);
-  __ mov(a3, a1);
-  ArrayConstructorStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
-  // a0 - number of arguments
-  // a1 - function
-  // a3 - slot id (Smi)
-  // a2 - vector
-  Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
-  // The checks. First, does r1 match the recorded monomorphic target?
-  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
-  __ Daddu(a4, a2, Operand(a4));
-  __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
-
-  // We don't know that we have a weak cell. We might have a private symbol
-  // or an AllocationSite, but the memory is safe to examine.
-  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
-  // FixedArray.
-  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
-  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
-  // computed, meaning that it can't appear to be a pointer. If the low bit is
-  // 0, then hash is computed, but the 0 bit prevents the field from appearing
-  // to be a pointer.
-  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
-  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
-                    WeakCell::kValueOffset &&
-                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
-  __ ld(a5, FieldMemOperand(a4, WeakCell::kValueOffset));
-  __ Branch(&extra_checks_or_miss, ne, a1, Operand(a5));
-
-  // The compare above could have been a SMI/SMI comparison. Guard against this
-  // convincing us that we have a monomorphic JSFunction.
-  __ JumpIfSmi(a1, &extra_checks_or_miss);
-
-  __ bind(&call_function);
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, a2, a3);
-
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
-                                                    tail_call_mode()),
-          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
-
-  __ bind(&extra_checks_or_miss);
-  Label uninitialized, miss, not_allocation_site;
-
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ Branch(&call, eq, a4, Operand(at));
-
-  // Verify that a4 contains an AllocationSite
-  __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
-  __ Branch(&not_allocation_site, ne, a5, Operand(at));
-
-  HandleArrayCase(masm, &miss);
-
-  __ bind(&not_allocation_site);
-
-  // The following cases attempt to handle MISS cases without going to the
-  // runtime.
-  if (FLAG_trace_ic) {
-    __ Branch(&miss);
-  }
-
-  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
-  __ Branch(&uninitialized, eq, a4, Operand(at));
-
-  // We are going megamorphic. If the feedback is a JSFunction, it is fine
-  // to handle it here. More complex cases are dealt with in the runtime.
-  __ AssertNotSmi(a4);
-  __ GetObjectType(a4, a5, a5);
-  __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
-  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
-  __ Daddu(a4, a2, Operand(a4));
-  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-  __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
-
-  __ bind(&call);
-  IncrementCallCount(masm, a2, a3);
-
-  __ bind(&call_count_incremented);
-
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
-
-  __ bind(&uninitialized);
-
-  // We are going monomorphic, provided we actually have a JSFunction.
-  __ JumpIfSmi(a1, &miss);
-
-  // Goto miss case if we do not have a function.
-  __ GetObjectType(a1, a4, a4);
-  __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
-
-  // Make sure the function is not the Array() function, which requires special
-  // behavior on MISS.
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a4);
-  __ Branch(&miss, eq, a1, Operand(a4));
-
-  // Make sure the function belongs to the same native context.
-  __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
-  __ ld(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
-  __ ld(t1, NativeContextMemOperand());
-  __ Branch(&miss, ne, t0, Operand(t1));
-
-  // Store the function. Use a stub since we need a frame for allocation.
-  // a2 - vector
-  // a3 - slot
-  // a1 - function
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    CreateWeakCellStub create_stub(masm->isolate());
-    __ SmiTag(a0);
-    __ Push(a0);
-    __ Push(a2, a3);
-    __ Push(cp, a1);
-    __ CallStub(&create_stub);
-    __ Pop(cp, a1);
-    __ Pop(a2, a3);
-    __ Pop(a0);
-    __ SmiUntag(a0);
-  }
-
-  __ Branch(&call_function);
-
-  // We are here because tracing is on or we encountered a MISS case we can't
-  // handle here.
-  __ bind(&miss);
-  GenerateMiss(masm);
-
-  __ Branch(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
-  FrameScope scope(masm, StackFrame::INTERNAL);
-
-  // Preserve number of arguments as Smi.
-  __ SmiTag(a0);
-  __ Push(a0);
-
-  // Push the receiver and the function and feedback info.
-  __ Push(a1, a2, a3);
-
-  // Call the entry.
-  __ CallRuntime(Runtime::kCallIC_Miss);
-
-  // Move result to a1 and exit the internal frame.
-  __ mov(a1, v0);
-
-  // Restore number of arguments.
-  __ Pop(a0);
-  __ SmiUntag(a0);
-}
-
-
 void StringCharCodeAtGenerator::GenerateSlow(
     MacroAssembler* masm, EmbedMode embed_mode,
     const RuntimeCallHelper& call_helper) {
@@ -2183,89 +1947,6 @@
   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
 }
 
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  __ JumpIfNotSmi(code_, &slow_case_);
-  __ Branch(&slow_case_, hi, code_,
-            Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
-
-  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged one_byte char code.
-  __ SmiScale(at, code_, kPointerSizeLog2);
-  __ Daddu(result_, result_, at);
-  __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ Branch(&slow_case_, eq, result_, Operand(at));
-  __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
-  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
-  __ bind(&slow_case_);
-  call_helper.BeforeCall(masm);
-  __ push(code_);
-  __ CallRuntime(Runtime::kStringCharFromCode);
-  __ Move(result_, v0);
-
-  call_helper.AfterCall(masm);
-  __ Branch(&exit_);
-
-  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
-                                          Register dest,
-                                          Register src,
-                                          Register count,
-                                          Register scratch,
-                                          String::Encoding encoding) {
-  if (FLAG_debug_code) {
-    // Check that destination is word aligned.
-    __ And(scratch, dest, Operand(kPointerAlignmentMask));
-    __ Check(eq,
-             kDestinationOfCopyNotAligned,
-             scratch,
-             Operand(zero_reg));
-  }
-
-  // Assumes word reads and writes are little endian.
-  // Nothing to do for zero characters.
-  Label done;
-
-  if (encoding == String::TWO_BYTE_ENCODING) {
-    __ Daddu(count, count, count);
-  }
-
-  Register limit = count;  // Read until dest equals this.
-  __ Daddu(limit, dest, Operand(count));
-
-  Label loop_entry, loop;
-  // Copy bytes from src to dest until dest hits limit.
-  __ Branch(&loop_entry);
-  __ bind(&loop);
-  __ lbu(scratch, MemOperand(src));
-  __ daddiu(src, src, 1);
-  __ sb(scratch, MemOperand(dest));
-  __ daddiu(dest, dest, 1);
-  __ bind(&loop_entry);
-  __ Branch(&loop, lt, dest, Operand(limit));
-
-  __ bind(&done);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(
     MacroAssembler* masm, Register left, Register right, Register scratch1,
     Register scratch2, Register scratch3) {
@@ -2893,84 +2574,6 @@
   __ Branch(miss, ne, at, Operand(zero_reg));
 }
 
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
-                                                      Label* miss,
-                                                      Label* done,
-                                                      Register elements,
-                                                      Register name,
-                                                      Register scratch1,
-                                                      Register scratch2) {
-  DCHECK(!elements.is(scratch1));
-  DCHECK(!elements.is(scratch2));
-  DCHECK(!name.is(scratch1));
-  DCHECK(!name.is(scratch2));
-
-  __ AssertName(name);
-
-  // Compute the capacity mask.
-  __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
-  __ SmiUntag(scratch1);
-  __ Dsubu(scratch1, scratch1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
-    if (i > 0) {
-      // Add the probe offset (i + i * i) left shifted to avoid right shifting
-      // the hash in a separate instruction. The value hash + i + i * i is right
-      // shifted in the following and instruction.
-      DCHECK(NameDictionary::GetProbeOffset(i) <
-             1 << (32 - Name::kHashFieldOffset));
-      __ Daddu(scratch2, scratch2, Operand(
-          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
-    }
-    __ dsrl(scratch2, scratch2, Name::kHashShift);
-    __ And(scratch2, scratch1, scratch2);
-
-    // Scale the index by multiplying by the entry size.
-    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    // scratch2 = scratch2 * 3.
-    __ Dlsa(scratch2, scratch2, scratch2, 1);
-
-    // Check if the key is identical to the name.
-    __ Dlsa(scratch2, elements, scratch2, kPointerSizeLog2);
-    __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
-    __ Branch(done, eq, name, Operand(at));
-  }
-
-  const int spill_mask =
-      (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
-       a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
-      ~(scratch1.bit() | scratch2.bit());
-
-  __ MultiPush(spill_mask);
-  if (name.is(a0)) {
-    DCHECK(!elements.is(a1));
-    __ Move(a1, name);
-    __ Move(a0, elements);
-  } else {
-    __ Move(a0, elements);
-    __ Move(a1, name);
-  }
-  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
-  __ CallStub(&stub);
-  __ mov(scratch2, a2);
-  __ mov(at, v0);
-  __ MultiPop(spill_mask);
-
-  __ Branch(done, ne, at, Operand(zero_reg));
-  __ Branch(miss, eq, at, Operand(zero_reg));
-}
-
-
 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   // we cannot call anything that could cause a GC from this stub.
@@ -3263,239 +2866,6 @@
   __ Daddu(sp, sp, a1);
 }
 
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(a2);
-  CallICStub stub(isolate(), state());
-  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
-                             Register receiver_map, Register scratch1,
-                             Register scratch2, bool is_polymorphic,
-                             Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-
-  Register cached_map = scratch1;
-
-  __ ld(cached_map,
-        FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-  __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
-  // found, now call handler.
-  Register handler = feedback;
-  __ ld(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-
-  Register length = scratch2;
-  __ bind(&start_polymorphic);
-  __ ld(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-  if (!is_polymorphic) {
-    // If the IC could be monomorphic we have to make sure we don't go past the
-    // end of the feedback array.
-    __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
-  }
-
-  Register too_far = length;
-  Register pointer_reg = feedback;
-
-  // +-----+------+------+-----+-----+ ... ----+
-  // | map | len  | wm0  | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ... ----+
-  //                 0      1     2        len-1
-  //                              ^              ^
-  //                              |              |
-  //                         pointer_reg      too_far
-  //                         aka feedback     scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ SmiScale(too_far, length, kPointerSizeLog2);
-  __ Daddu(too_far, feedback, Operand(too_far));
-  __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Daddu(pointer_reg, feedback,
-           Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ ld(cached_map, MemOperand(pointer_reg));
-  __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
-  __ ld(handler, MemOperand(pointer_reg, kPointerSize));
-  __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-
-  __ bind(&prepare_next);
-  __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
-  __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
-  // We exhausted our array of map handler pairs.
-  __ Branch(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
-                                  Register receiver_map, Register feedback,
-                                  Register vector, Register slot,
-                                  Register scratch, Label* compare_map,
-                                  Label* load_smi_map, Label* try_array) {
-  __ JumpIfSmi(receiver, load_smi_map);
-  __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(compare_map);
-  Register cached_map = scratch;
-  // Move the weak map into the weak_cell register.
-  __ ld(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
-  __ Branch(try_array, ne, cached_map, Operand(receiver_map));
-  Register handler = feedback;
-  __ SmiScale(handler, slot, kPointerSizeLog2);
-  __ Daddu(handler, vector, Operand(handler));
-  __ ld(handler,
-        FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
-  __ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
-  __ Jump(t9);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  KeyedStoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
-                                       Register receiver_map, Register scratch1,
-                                       Register scratch2, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-  Label transition_call;
-
-  Register cached_map = scratch1;
-  Register too_far = scratch2;
-  Register pointer_reg = feedback;
-
-  __ ld(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
-  // +-----+------+------+-----+-----+-----+ ... ----+
-  // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ----+ ... ----+
-  //                 0      1     2              len-1
-  //                 ^                                 ^
-  //                 |                                 |
-  //             pointer_reg                        too_far
-  //             aka feedback                       scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ SmiScale(too_far, too_far, kPointerSizeLog2);
-  __ Daddu(too_far, feedback, Operand(too_far));
-  __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Daddu(pointer_reg, feedback,
-           Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ ld(cached_map, MemOperand(pointer_reg));
-  __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
-  // Is it a transitioning store?
-  __ ld(too_far, MemOperand(pointer_reg, kPointerSize));
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ Branch(&transition_call, ne, too_far, Operand(at));
-
-  __ ld(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
-  __ Daddu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-
-  __ bind(&transition_call);
-  __ ld(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
-  __ JumpIfSmi(too_far, miss);
-
-  __ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-  // Load the map into the correct register.
-  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
-  __ Move(feedback, too_far);
-  __ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(t9);
-
-  __ bind(&prepare_next);
-  __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
-  __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
-  // We exhausted our array of map handler pairs.
-  __ Branch(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // a1
-  Register key = StoreWithVectorDescriptor::NameRegister();           // a2
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // a3
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // a4
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0));          // a0
-  Register feedback = a5;
-  Register receiver_map = a6;
-  Register scratch1 = a7;
-
-  __ SmiScale(scratch1, slot, kPointerSizeLog2);
-  __ Daddu(feedback, vector, Operand(scratch1));
-  __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
-
-  // We have a polymorphic element handler.
-  Label try_poly_name;
-
-  Register scratch2 = t0;
-
-  HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
-                             &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ Branch(&try_poly_name, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
-  Handle<Code> megamorphic_stub =
-      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ Branch(&miss, ne, key, Operand(feedback));
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ SmiScale(scratch1, slot, kPointerSizeLog2);
-  __ Daddu(feedback, vector, Operand(scratch1));
-  __ ld(feedback,
-        FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
-                   &miss);
-
-  __ bind(&miss);
-  KeyedStoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ Branch(USE_DELAY_SLOT, &compare_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
-}
-
-
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -3850,637 +3220,6 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1 : target
-  //  -- a3 : new target
-  //  -- cp : context
-  //  -- ra : return address
-  // -----------------------------------
-  __ AssertFunction(a1);
-  __ AssertReceiver(a3);
-
-  // Verify that the new target is a JSFunction.
-  Label new_object;
-  __ GetObjectType(a3, a2, a2);
-  __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
-
-  // Load the initial map and verify that it's in fact a map.
-  __ ld(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(a2, &new_object);
-  __ GetObjectType(a2, a0, a0);
-  __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
-
-  // Fall back to runtime if the target differs from the new target's
-  // initial map constructor.
-  __ ld(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
-  __ Branch(&new_object, ne, a0, Operand(a1));
-
-  // Allocate the JSObject on the heap.
-  Label allocate, done_allocate;
-  __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-  __ Allocate(a4, v0, a5, a0, &allocate, SIZE_IN_WORDS);
-  __ bind(&done_allocate);
-
-  // Initialize the JSObject fields.
-  __ sd(a2, FieldMemOperand(v0, JSObject::kMapOffset));
-  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
-  __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
-  __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ Daddu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
-  // ----------- S t a t e -------------
-  //  -- v0 : result (tagged)
-  //  -- a1 : result fields (untagged)
-  //  -- a5 : result end (untagged)
-  //  -- a2 : initial map
-  //  -- cp : context
-  //  -- ra : return address
-  // -----------------------------------
-
-  // Perform in-object slack tracking if requested.
-  Label slack_tracking;
-  STATIC_ASSERT(Map::kNoSlackTracking == 0);
-  __ lwu(a3, FieldMemOperand(a2, Map::kBitField3Offset));
-  __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
-  __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(zero_reg));
-  __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);  // In delay slot.
-  {
-    // Initialize all in-object fields with undefined.
-    __ InitializeFieldsWithFiller(a1, a5, a0);
-    __ Ret();
-  }
-  __ bind(&slack_tracking);
-  {
-    // Decrease generous allocation count.
-    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-    __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
-    __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
-
-    // Initialize the in-object fields with undefined.
-    __ lbu(a4, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
-    __ dsll(a4, a4, kPointerSizeLog2);
-    __ Dsubu(a4, a5, a4);
-    __ InitializeFieldsWithFiller(a1, a4, a0);
-
-    // Initialize the remaining (reserved) fields with one pointer filler map.
-    __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
-    __ InitializeFieldsWithFiller(a1, a5, a0);
-
-    // Check if we can finalize the instance size.
-    Label finalize;
-    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
-    __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
-    __ Branch(&finalize, eq, a3, Operand(zero_reg));
-    __ Ret();
-
-    // Finalize the instance size.
-    __ bind(&finalize);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(v0, a2);
-      __ CallRuntime(Runtime::kFinalizeInstanceSize);
-      __ Pop(v0);
-    }
-    __ Ret();
-  }
-
-  // Fall back to %AllocateInNewSpace.
-  __ bind(&allocate);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    STATIC_ASSERT(kSmiTag == 0);
-    STATIC_ASSERT(kSmiTagSize == 1);
-    __ dsll(a4, a4, kPointerSizeLog2 + kSmiShiftSize + kSmiTagSize);
-    __ SmiTag(a4);
-    __ Push(a2, a4);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(a2);
-  }
-  __ lbu(a5, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-  __ Dlsa(a5, v0, a5, kPointerSizeLog2);
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ Dsubu(a5, a5, Operand(kHeapObjectTag));
-  __ jmp(&done_allocate);
-
-  // Fall back to %NewObject.
-  __ bind(&new_object);
-  __ Push(a1, a3);
-  __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- ra : return address
-  // -----------------------------------
-  __ AssertFunction(a1);
-
-  // Make a2 point to the JavaScript frame.
-  __ mov(a2, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&ok, eq, a1, Operand(a3));
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have rest parameters (only possible if we have an
-  // arguments adaptor frame below the function frame).
-  Label no_rest_parameters;
-  __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Branch(&no_rest_parameters, ne, a3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // Check if the arguments adaptor frame contains more arguments than
-  // specified by the function's internal formal parameter count.
-  Label rest_parameters;
-  __ SmiLoadUntag(
-      a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a3,
-        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Dsubu(a0, a0, Operand(a3));
-  __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
-
-  // Return an empty rest parameter array.
-  __ bind(&no_rest_parameters);
-  {
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- ra : return address
-    // -----------------------------------
-
-    // Allocate an empty rest parameter array.
-    Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the rest parameter array in v0.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
-    __ sd(a1, FieldMemOperand(v0, JSArray::kMapOffset));
-    __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
-    __ sd(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
-    __ sd(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
-    __ Move(a1, Smi::kZero);
-    __ Ret(USE_DELAY_SLOT);
-    __ sd(a1, FieldMemOperand(v0, JSArray::kLengthOffset));  // In delay slot
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-
-    // Fall back to %AllocateInNewSpace.
-    __ bind(&allocate);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(Smi::FromInt(JSArray::kSize));
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-    }
-    __ jmp(&done_allocate);
-  }
-
-  __ bind(&rest_parameters);
-  {
-    // Compute the pointer to the first rest parameter (skippping the receiver).
-    __ Dlsa(a2, a2, a0, kPointerSizeLog2);
-    __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
-                             1 * kPointerSize));
-
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- a0 : number of rest parameters
-    //  -- a1 : function
-    //  -- a2 : pointer to first rest parameters
-    //  -- ra : return address
-    // -----------------------------------
-
-    // Allocate space for the rest parameter array plus the backing store.
-    Label allocate, done_allocate;
-    __ li(a5, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ Dlsa(a5, a5, a0, kPointerSizeLog2);
-    __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Compute arguments.length in a4.
-    __ SmiTag(a4, a0);
-
-    // Setup the elements array in v0.
-    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-    __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
-    __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
-    __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
-    {
-      Label loop, done_loop;
-      __ Dlsa(a1, a3, a0, kPointerSizeLog2);
-      __ bind(&loop);
-      __ Branch(&done_loop, eq, a1, Operand(a3));
-      __ ld(at, MemOperand(a2, 0 * kPointerSize));
-      __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
-      __ Dsubu(a2, a2, Operand(1 * kPointerSize));
-      __ Daddu(a3, a3, Operand(1 * kPointerSize));
-      __ Branch(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Setup the rest parameter array in a3.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
-    __ sd(at, FieldMemOperand(a3, JSArray::kMapOffset));
-    __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-    __ sd(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
-    __ sd(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
-    __ sd(a4, FieldMemOperand(a3, JSArray::kLengthOffset));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret(USE_DELAY_SLOT);
-    __ mov(v0, a3);  // In delay slot
-
-    // Fall back to %AllocateInNewSpace (if not too big).
-    Label too_big_for_new_space;
-    __ bind(&allocate);
-    __ Branch(&too_big_for_new_space, gt, a5,
-              Operand(kMaxRegularHeapObjectSize));
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(a0);
-      __ SmiTag(a5);
-      __ Push(a0, a2, a5);
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-      __ Pop(a0, a2);
-      __ SmiUntag(a0);
-    }
-    __ jmp(&done_allocate);
-
-    // Fall back to %NewStrictArguments.
-    __ bind(&too_big_for_new_space);
-    __ Push(a1);
-    __ TailCallRuntime(Runtime::kNewStrictArguments);
-  }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- ra : return address
-  // -----------------------------------
-  __ AssertFunction(a1);
-
-  // Make t0 point to the JavaScript frame.
-  __ mov(t0, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ ld(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ ld(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&ok, eq, a1, Operand(a3));
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-  __ lw(a2,
-         FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ Lsa(a3, t0, a2, kPointerSizeLog2);
-  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
-  __ SmiTag(a2);
-
-  // a1 : function
-  // a2 : number of parameters (tagged)
-  // a3 : parameters pointer
-  // t0 : Javascript frame pointer
-  // Registers used over whole function:
-  //  a5 : arguments count (tagged)
-  //  a6 : mapped parameter count (tagged)
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ ld(a4, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a0, MemOperand(a4, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Branch(&adaptor_frame, eq, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
-  // No adaptor, parameter count = argument count.
-  __ mov(a5, a2);
-  __ Branch(USE_DELAY_SLOT, &try_allocate);
-  __ mov(a6, a2);  // In delay slot.
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiScale(t2, a5, kPointerSizeLog2);
-  __ Daddu(a4, a4, Operand(t2));
-  __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // a5 = argument count (tagged)
-  // a6 = parameter count (tagged)
-  // Compute the mapped parameter count = min(a6, a5) in a6.
-  __ mov(a6, a2);
-  __ Branch(&try_allocate, le, a6, Operand(a5));
-  __ mov(a6, a5);
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  // If there are no mapped parameters, we do not need the parameter_map.
-  Label param_map_size;
-  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
-  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
-  __ mov(t1, zero_reg);  // In delay slot: param map size = 0 when a6 == 0.
-  __ SmiScale(t1, a6, kPointerSizeLog2);
-  __ daddiu(t1, t1, kParameterMapHeaderSize);
-  __ bind(&param_map_size);
-
-  // 2. Backing store.
-  __ SmiScale(t2, a5, kPointerSizeLog2);
-  __ Daddu(t1, t1, Operand(t2));
-  __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ Daddu(t1, t1, Operand(JSSloppyArgumentsObject::kSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(t1, v0, t1, a4, &runtime, NO_ALLOCATION_FLAGS);
-
-  // v0 = address of new object(s) (tagged)
-  // a2 = argument count (smi-tagged)
-  // Get the arguments boilerplate from the current native context into a4.
-  const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  const int kAliasedOffset =
-      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
-  __ ld(a4, NativeContextMemOperand());
-  Label skip2_ne, skip2_eq;
-  __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
-  __ ld(a4, MemOperand(a4, kNormalOffset));
-  __ bind(&skip2_ne);
-
-  __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
-  __ ld(a4, MemOperand(a4, kAliasedOffset));
-  __ bind(&skip2_eq);
-
-  // v0 = address of new object (tagged)
-  // a2 = argument count (smi-tagged)
-  // a4 = address of arguments map (tagged)
-  // a6 = mapped parameter count (tagged)
-  __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
-  __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
-  __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
-  __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // Set up the callee in-object property.
-  __ AssertNotSmi(a1);
-  __ sd(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(a5);
-  __ sd(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, a4 will point there, otherwise
-  // it will point to the backing store.
-  __ Daddu(a4, v0, Operand(JSSloppyArgumentsObject::kSize));
-  __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
-
-  // v0 = address of new object (tagged)
-  // a2 = argument count (tagged)
-  // a4 = address of parameter map or backing store (tagged)
-  // a6 = mapped parameter count (tagged)
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  Label skip3;
-  __ Branch(&skip3, ne, a6, Operand(Smi::kZero));
-  // Move backing store address to a1, because it is
-  // expected there when filling in the unmapped arguments.
-  __ mov(a1, a4);
-  __ bind(&skip3);
-
-  __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::kZero));
-
-  __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
-  __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
-  __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
-  __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
-  __ SmiScale(t2, a6, kPointerSizeLog2);
-  __ Daddu(a5, a4, Operand(t2));
-  __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
-  __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-  __ mov(a5, a6);
-  __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
-  __ Dsubu(t1, t1, Operand(a6));
-  __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
-  __ SmiScale(t2, a5, kPointerSizeLog2);
-  __ Daddu(a1, a4, Operand(t2));
-  __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
-
-  // a1 = address of backing store (tagged)
-  // a4 = address of parameter map (tagged)
-  // a0 = temporary scratch (a.o., for address calculation)
-  // t1 = loop variable (tagged)
-  // a7 = the hole value
-  __ jmp(&parameters_test);
-
-  __ bind(&parameters_loop);
-  __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
-  __ SmiScale(a0, a5, kPointerSizeLog2);
-  __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-  __ Daddu(t2, a4, a0);
-  __ sd(t1, MemOperand(t2));
-  __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
-  __ Daddu(t2, a1, a0);
-  __ sd(a7, MemOperand(t2));
-  __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
-  __ bind(&parameters_test);
-  __ Branch(&parameters_loop, ne, a5, Operand(Smi::kZero));
-
-  // Restore t1 = argument count (tagged).
-  __ ld(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
-
-  __ bind(&skip_parameter_map);
-  // v0 = address of new object (tagged)
-  // a1 = address of backing store (tagged)
-  // a5 = argument count (tagged)
-  // a6 = mapped parameter count (tagged)
-  // t1 = scratch
-  // Copy arguments header and remaining slots (if there are any).
-  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
-  __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
-  __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
-
-  Label arguments_loop, arguments_test;
-  __ SmiScale(t2, a6, kPointerSizeLog2);
-  __ Dsubu(a3, a3, Operand(t2));
-  __ jmp(&arguments_test);
-
-  __ bind(&arguments_loop);
-  __ Dsubu(a3, a3, Operand(kPointerSize));
-  __ ld(a4, MemOperand(a3, 0));
-  __ SmiScale(t2, a6, kPointerSizeLog2);
-  __ Daddu(t1, a1, Operand(t2));
-  __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
-  __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
-
-  __ bind(&arguments_test);
-  __ Branch(&arguments_loop, lt, a6, Operand(a5));
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  // a5 = argument count (tagged)
-  __ bind(&runtime);
-  __ Push(a1, a3, a5);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a1 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- ra : return address
-  // -----------------------------------
-  __ AssertFunction(a1);
-
-  // Make a2 point to the JavaScript frame.
-  __ mov(a2, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
-    __ Branch(&ok, eq, a1, Operand(a3));
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have an arguments adaptor frame below the function frame.
-  Label arguments_adaptor, arguments_done;
-  __ ld(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
-  __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Branch(&arguments_adaptor, eq, a0,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-  {
-    __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-    __ lw(a0,
-          FieldMemOperand(a4, SharedFunctionInfo::kFormalParameterCountOffset));
-    __ Dlsa(a2, a2, a0, kPointerSizeLog2);
-    __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
-                             1 * kPointerSize));
-  }
-  __ Branch(&arguments_done);
-  __ bind(&arguments_adaptor);
-  {
-    __ SmiLoadUntag(
-        a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ Dlsa(a2, a3, a0, kPointerSizeLog2);
-    __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
-                             1 * kPointerSize));
-  }
-  __ bind(&arguments_done);
-
-  // ----------- S t a t e -------------
-  //  -- cp : context
-  //  -- a0 : number of rest parameters
-  //  -- a1 : function
-  //  -- a2 : pointer to first rest parameters
-  //  -- ra : return address
-  // -----------------------------------
-
-  // Allocate space for the rest parameter array plus the backing store.
-  Label allocate, done_allocate;
-  __ li(a5, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ Dlsa(a5, a5, a0, kPointerSizeLog2);
-  __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Compute arguments.length in a4.
-  __ SmiTag(a4, a0);
-
-  // Setup the elements array in v0.
-  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-  __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
-  __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
-  __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
-  {
-    Label loop, done_loop;
-    __ Dlsa(a1, a3, a0, kPointerSizeLog2);
-    __ bind(&loop);
-    __ Branch(&done_loop, eq, a1, Operand(a3));
-    __ ld(at, MemOperand(a2, 0 * kPointerSize));
-    __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
-    __ Dsubu(a2, a2, Operand(1 * kPointerSize));
-    __ Daddu(a3, a3, Operand(1 * kPointerSize));
-    __ Branch(&loop);
-    __ bind(&done_loop);
-  }
-
-  // Setup the strict arguments object in a3.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
-  __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
-  __ sd(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
-  __ sd(a4, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
-  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a3);  // In delay slot
-
-  // Fall back to %AllocateInNewSpace (if not too big).
-  Label too_big_for_new_space;
-  __ bind(&allocate);
-  __ Branch(&too_big_for_new_space, gt, a5, Operand(kMaxRegularHeapObjectSize));
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(a0);
-    __ SmiTag(a5);
-    __ Push(a0, a2, a5);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(a0, a2);
-    __ SmiUntag(a0);
-  }
-  __ jmp(&done_allocate);
-
-  // Fall back to %NewStrictArguments.
-  __ bind(&too_big_for_new_space);
-  __ Push(a1);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   int64_t offset = (ref0.address() - ref1.address());
   DCHECK(static_cast<int>(offset) == offset);
diff --git a/src/mips64/code-stubs-mips64.h b/src/mips64/code-stubs-mips64.h
index fdaf4c8..42f90ad 100644
--- a/src/mips64/code-stubs-mips64.h
+++ b/src/mips64/code-stubs-mips64.h
@@ -16,17 +16,6 @@
 
 class StringHelper : public AllStatic {
  public:
-  // Generate code for copying a large number of characters. This function
-  // is allowed to spend extra time setting up conditions to make copying
-  // faster. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     Register scratch,
-                                     String::Encoding encoding);
-
   // Compares two flat one-byte strings and returns result in v0.
   static void GenerateCompareFlatOneByteStrings(
       MacroAssembler* masm, Register left, Register right, Register scratch1,
@@ -312,14 +301,6 @@
                                      Handle<Name> name,
                                      Register scratch0);
 
-  static void GeneratePositiveLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register elements,
-                                     Register name,
-                                     Register r0,
-                                     Register r1);
-
   bool SometimesSetsUpAFrame() override { return false; }
 
  private:
diff --git a/src/mips64/codegen-mips64.cc b/src/mips64/codegen-mips64.cc
index 943c2a6..e7f6cb0 100644
--- a/src/mips64/codegen-mips64.cc
+++ b/src/mips64/codegen-mips64.cc
@@ -607,353 +607,14 @@
 
 #define __ ACCESS_MASM(masm)
 
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* allocation_memento_found) {
-  Register scratch_elements = a4;
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     scratch_elements));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(
-        receiver, scratch_elements, allocation_memento_found);
-  }
-
-  // Set transitioned map.
-  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      t1,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Register ra contains the return address.
-  Label loop, entry, convert_hole, gc_required, only_change_map, done;
-  Register elements = a4;
-  Register length = a5;
-  Register array = a6;
-  Register array_end = array;
-
-  // target_map parameter can be clobbered.
-  Register scratch1 = target_map;
-  Register scratch2 = t1;
-  Register scratch3 = a7;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     elements, length, array, scratch2));
-
-  Register scratch = t2;
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(&only_change_map, eq, at, Operand(elements));
-
-  __ push(ra);
-  __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // elements: source FixedArray
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedDoubleArray.
-  __ SmiScale(scratch, length, kDoubleSizeLog2);
-  __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
-  __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
-  __ Dsubu(array, array, kHeapObjectTag);
-  // array: destination FixedDoubleArray, not tagged as heap object
-
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
-  __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  // Update receiver's map.
-  __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
-
-  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch2,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ Daddu(scratch1, array, Operand(kHeapObjectTag));
-  __ sd(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver,
-                      JSObject::kElementsOffset,
-                      scratch1,
-                      scratch2,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-
-  // Prepare for conversion loop.
-  __ Daddu(scratch1, elements,
-      Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
-  __ SmiScale(array_end, length, kDoubleSizeLog2);
-  __ Daddu(array_end, array_end, scratch3);
-
-  // Repurpose registers no longer in use.
-  Register hole_lower = elements;
-  Register hole_upper = length;
-  __ li(hole_lower, Operand(kHoleNanLower32));
-  __ li(hole_upper, Operand(kHoleNanUpper32));
-
-  // scratch1: begin of source FixedArray element fields, not tagged
-  // hole_lower: kHoleNanLower32
-  // hole_upper: kHoleNanUpper32
-  // array_end: end of destination FixedDoubleArray, not tagged
-  // scratch3: begin of FixedDoubleArray element fields, not tagged
-
-  __ Branch(&entry);
-
-  __ bind(&only_change_map);
-  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch2,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ Branch(&done);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ ld(ra, MemOperand(sp, 0));
-  __ Branch(USE_DELAY_SLOT, fail);
-  __ daddiu(sp, sp, kPointerSize);  // In delay slot.
-
-  // Convert and copy elements.
-  __ bind(&loop);
-  __ ld(scratch2, MemOperand(scratch1));
-  __ Daddu(scratch1, scratch1, kPointerSize);
-  // scratch2: current element
-  __ JumpIfNotSmi(scratch2, &convert_hole);
-  __ SmiUntag(scratch2);
-
-  // Normal smi, convert to double and store.
-  __ mtc1(scratch2, f0);
-  __ cvt_d_w(f0, f0);
-  __ sdc1(f0, MemOperand(scratch3));
-  __ Branch(USE_DELAY_SLOT, &entry);
-  __ daddiu(scratch3, scratch3, kDoubleSize);  // In delay slot.
-
-  // Hole found, store the-hole NaN.
-  __ bind(&convert_hole);
-  if (FLAG_debug_code) {
-    // Restore a "smi-untagged" heap object.
-    __ Or(scratch2, scratch2, Operand(1));
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
-  }
-  // mantissa
-  __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
-  // exponent
-  __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
-  __ Daddu(scratch3, scratch3, kDoubleSize);
-
-  __ bind(&entry);
-  __ Branch(&loop, lt, scratch3, Operand(array_end));
-
-  __ bind(&done);
-  __ pop(ra);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Register ra contains the return address.
-  Label entry, loop, convert_hole, gc_required, only_change_map;
-  Register elements = a4;
-  Register array = a6;
-  Register length = a5;
-  Register scratch = t1;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map,
-                     elements, array, length, scratch));
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(&only_change_map, eq, at, Operand(elements));
-
-  __ MultiPush(
-      value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
-  __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // elements: source FixedArray
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedArray.
-  // Re-use value and target_map registers, as they have been saved on the
-  // stack.
-  Register array_size = value;
-  Register allocate_scratch = target_map;
-  __ SmiScale(array_size, length, kPointerSizeLog2);
-  __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
-  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
-              NO_ALLOCATION_FLAGS);
-  __ Dsubu(array, array, kHeapObjectTag);
-  // array: destination FixedArray, not tagged as heap object
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-  __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  __ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
-
-  // Prepare for conversion loop.
-  Register src_elements = elements;
-  Register dst_elements = target_map;
-  Register dst_end = length;
-  Register heap_number_map = scratch;
-  __ Daddu(src_elements, src_elements,
-           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
-  __ SmiScale(dst_end, dst_end, kPointerSizeLog2);
-  __ Daddu(dst_end, dst_elements, dst_end);
-
-  // Allocating heap numbers in the loop below can fail and cause a jump to
-  // gc_required. We can't leave a partly initialized FixedArray behind,
-  // so pessimistically fill it with holes now.
-  Label initialization_loop, initialization_loop_entry;
-  __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-  __ Branch(&initialization_loop_entry);
-  __ bind(&initialization_loop);
-  __ sd(scratch, MemOperand(dst_elements));
-  __ Daddu(dst_elements, dst_elements, Operand(kPointerSize));
-  __ bind(&initialization_loop_entry);
-  __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
-
-  __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
-  __ Daddu(array, array, Operand(kHeapObjectTag));
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  // Using offsetted addresses.
-  // dst_elements: begin of destination FixedArray element fields, not tagged
-  // src_elements: begin of source FixedDoubleArray element fields, not tagged,
-  //               points to the exponent
-  // dst_end: end of destination FixedArray, not tagged
-  // array: destination FixedArray
-  // heap_number_map: heap number map
-  __ Branch(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ MultiPop(
-      value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
-  __ Branch(fail);
-
-  __ bind(&loop);
-  Register upper_bits = key;
-  __ lw(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
-  __ Daddu(src_elements, src_elements, kDoubleSize);
-  // upper_bits: current element's upper 32 bit
-  // src_elements: address of next element
-  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
-
-  // Non-hole double, copy value into a heap number.
-  Register heap_number = receiver;
-  Register scratch2 = value;
-  Register scratch3 = t2;
-  __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
-                        &gc_required);
-  // heap_number: new heap number
-  // Load current element, src_elements point to next element.
-
-  __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
-  __ sd(scratch2, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
-
-  __ mov(scratch2, dst_elements);
-  __ sd(heap_number, MemOperand(dst_elements));
-  __ Daddu(dst_elements, dst_elements, kPointerSize);
-  __ RecordWrite(array,
-                 scratch2,
-                 heap_number,
-                 kRAHasBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ Branch(&entry);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
-  __ sd(scratch2, MemOperand(dst_elements));
-  __ Daddu(dst_elements, dst_elements, kPointerSize);
-
-  __ bind(&entry);
-  __ Branch(&loop, lt, dst_elements, Operand(dst_end));
-
-  __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver,
-                      JSObject::kElementsOffset,
-                      array,
-                      scratch,
-                      kRAHasBeenSaved,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ pop(ra);
-
-  __ bind(&only_change_map);
-  // Update receiver's map.
-  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                        Register string,
                                        Register index,
                                        Register result,
                                        Label* call_runtime) {
+  Label indirect_string_loaded;
+  __ bind(&indirect_string_loaded);
+
   // Fetch the instance type of the receiver into result register.
   __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -964,18 +625,23 @@
   __ Branch(&check_sequential, eq, at, Operand(zero_reg));
 
   // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ And(at, result, Operand(kSlicedNotConsMask));
-  __ Branch(&cons_string, eq, at, Operand(zero_reg));
+  Label cons_string, thin_string;
+  __ And(at, result, Operand(kStringRepresentationMask));
+  __ Branch(&cons_string, eq, at, Operand(kConsStringTag));
+  __ Branch(&thin_string, eq, at, Operand(kThinStringTag));
 
   // Handle slices.
-  Label indirect_string_loaded;
   __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
   __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
   __ dsra32(at, result, 0);
   __ Daddu(index, index, at);
   __ jmp(&indirect_string_loaded);
 
+  // Handle thin strings.
+  __ bind(&thin_string);
+  __ ld(string, FieldMemOperand(string, ThinString::kActualOffset));
+  __ jmp(&indirect_string_loaded);
+
   // Handle cons strings.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
@@ -987,10 +653,7 @@
   __ Branch(call_runtime, ne, result, Operand(at));
   // Get the first of the two strings and load its instance type.
   __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+  __ jmp(&indirect_string_loaded);
 
   // Distinguish sequential and external strings. Only these two string
   // representations can reach here (slices and flat cons strings have been
@@ -1077,37 +740,29 @@
   return result;
 }
 
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
 
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                               MarkingParity* parity) {
-  if (IsYoungSequence(isolate, sequence)) {
-    *age = kNoAgeCodeAge;
-    *parity = NO_MARKING_PARITY;
-  } else {
-    Address target_address = Assembler::target_address_at(
-        sequence + Assembler::kInstrSize);
-    Code* stub = GetCodeFromTargetAddress(target_address);
-    GetCodeAgeAndParity(stub, age, parity);
-  }
+  Address target_address =
+      Assembler::target_address_at(sequence + Assembler::kInstrSize);
+  Code* stub = GetCodeFromTargetAddress(target_address);
+  return GetAgeOfCodeAgeStub(stub);
 }
 
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
-                                Code::Age age,
-                                MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+                                Code::Age age) {
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
     Assembler::FlushICache(isolate, sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age);
     CodePatcher patcher(isolate, sequence,
                         young_length / Assembler::kInstrSize);
     // Mark this code sequence for FindPlatformCodeAgeSequence().
     patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
     // Load the stub address to t9 and call it,
-    // GetCodeAgeAndParity() extracts the stub address from this instruction.
+    // GetCodeAge() extracts the stub address from this instruction.
     patcher.masm()->li(
         t9,
         Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
diff --git a/src/mips64/deoptimizer-mips64.cc b/src/mips64/deoptimizer-mips64.cc
index ea17124..8b762bd 100644
--- a/src/mips64/deoptimizer-mips64.cc
+++ b/src/mips64/deoptimizer-mips64.cc
@@ -93,7 +93,7 @@
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
   for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
-    double double_value = input_->GetDoubleRegister(i);
+    Float64 double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
 }
diff --git a/src/mips64/interface-descriptors-mips64.cc b/src/mips64/interface-descriptors-mips64.cc
index c6a917f..5ce2bb0 100644
--- a/src/mips64/interface-descriptors-mips64.cc
+++ b/src/mips64/interface-descriptors-mips64.cc
@@ -64,37 +64,10 @@
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {a2};
+  Register registers[] = {a1, a2, a3};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void FastNewObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1, a3};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1};
-  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-
 // static
 const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
 
@@ -139,15 +112,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {a1, a3};
+  Register registers[] = {a1, a0, a3};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {a1, a0, a3, a2};
   data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -183,6 +154,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // a1: the target to call
+  // a2: start index (to support rest parameters)
+  Register registers[] = {a1, a2};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void ConstructStubDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -218,13 +196,12 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
-      CallInterfaceDescriptorData* data) {                          \
-    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
-  }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+  Register registers[] = {a1, a3, a0, a2};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
 
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -413,6 +390,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      a1,  // loaded new FP
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index a3ab4a8..849327e 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -2212,19 +2212,49 @@
   bind(&fail);
 }
 
+void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+                            FPURegister ft, FPURegister scratch) {
+  if (kArchVariant == kMips64r2) {
+    madd_s(fd, fr, fs, ft);
+  } else {
+    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+    mul_s(scratch, fs, ft);
+    add_s(fd, fr, scratch);
+  }
+}
 
 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
     FPURegister ft, FPURegister scratch) {
-  if (0) {  // TODO(plind): find reasonable arch-variant symbol names.
+  if (kArchVariant == kMips64r2) {
     madd_d(fd, fr, fs, ft);
   } else {
-    // Can not change source regs's value.
     DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
     mul_d(scratch, fs, ft);
     add_d(fd, fr, scratch);
   }
 }
 
+void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+                            FPURegister ft, FPURegister scratch) {
+  if (kArchVariant == kMips64r2) {
+    msub_s(fd, fr, fs, ft);
+  } else {
+    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+    mul_s(scratch, fs, ft);
+    sub_s(fd, scratch, fr);
+  }
+}
+
+void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+                            FPURegister ft, FPURegister scratch) {
+  if (kArchVariant == kMips64r2) {
+    msub_d(fd, fr, fs, ft);
+  } else {
+    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+    mul_d(scratch, fs, ft);
+    sub_d(fd, scratch, fr);
+  }
+}
 
 void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
                                    Label* nan, Condition cond, FPURegister cmp1,
@@ -2524,186 +2554,6 @@
   movf(rd, rs, cc);
 }
 
-#define __ masm->
-
-static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
-                         FPURegister src1, FPURegister src2, Label* equal) {
-  if (src1.is(src2)) {
-    __ Move(dst, src1);
-    return true;
-  }
-
-  Label other, compare_not_equal;
-  FPURegister left, right;
-  if (kind == MaxMinKind::kMin) {
-    left = src1;
-    right = src2;
-  } else {
-    left = src2;
-    right = src1;
-  }
-
-  __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
-  // Left and right hand side are equal, check for -0 vs. +0.
-  __ dmfc1(t8, src1);
-  __ Branch(&other, eq, t8, Operand(0x8000000000000000));
-  __ Move_d(dst, right);
-  __ Branch(equal);
-  __ bind(&other);
-  __ Move_d(dst, left);
-  __ Branch(equal);
-  __ bind(&compare_not_equal);
-  return false;
-}
-
-static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
-                         FPURegister src1, FPURegister src2, Label* equal) {
-  if (src1.is(src2)) {
-    __ Move(dst, src1);
-    return true;
-  }
-
-  Label other, compare_not_equal;
-  FPURegister left, right;
-  if (kind == MaxMinKind::kMin) {
-    left = src1;
-    right = src2;
-  } else {
-    left = src2;
-    right = src1;
-  }
-
-  __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
-  // Left and right hand side are equal, check for -0 vs. +0.
-  __ FmoveLow(t8, src1);
-  __ dsll32(t8, t8, 0);
-  __ Branch(&other, eq, t8, Operand(0x8000000000000000));
-  __ Move_s(dst, right);
-  __ Branch(equal);
-  __ bind(&other);
-  __ Move_s(dst, left);
-  __ Branch(equal);
-  __ bind(&compare_not_equal);
-  return false;
-}
-
-#undef __
-
-void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
-                                   FPURegister src2, Label* nan) {
-  if (nan) {
-    BranchF64(nullptr, nan, eq, src1, src2);
-  }
-  if (kArchVariant >= kMips64r6) {
-    min_d(dst, src1, src2);
-  } else {
-    Label skip;
-    if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
-      if (dst.is(src1)) {
-        BranchF64(&skip, nullptr, le, src1, src2);
-        Move_d(dst, src2);
-      } else if (dst.is(src2)) {
-        BranchF64(&skip, nullptr, ge, src1, src2);
-        Move_d(dst, src1);
-      } else {
-        Label right;
-        BranchF64(&right, nullptr, gt, src1, src2);
-        Move_d(dst, src1);
-        Branch(&skip);
-        bind(&right);
-        Move_d(dst, src2);
-      }
-    }
-    bind(&skip);
-  }
-}
-
-void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
-                                   FPURegister src2, Label* nan) {
-  if (nan) {
-    BranchF64(nullptr, nan, eq, src1, src2);
-  }
-  if (kArchVariant >= kMips64r6) {
-    max_d(dst, src1, src2);
-  } else {
-    Label skip;
-    if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
-      if (dst.is(src1)) {
-        BranchF64(&skip, nullptr, ge, src1, src2);
-        Move_d(dst, src2);
-      } else if (dst.is(src2)) {
-        BranchF64(&skip, nullptr, le, src1, src2);
-        Move_d(dst, src1);
-      } else {
-        Label right;
-        BranchF64(&right, nullptr, lt, src1, src2);
-        Move_d(dst, src1);
-        Branch(&skip);
-        bind(&right);
-        Move_d(dst, src2);
-      }
-    }
-    bind(&skip);
-  }
-}
-
-void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
-                                   FPURegister src2, Label* nan) {
-  if (nan) {
-    BranchF32(nullptr, nan, eq, src1, src2);
-  }
-  if (kArchVariant >= kMips64r6) {
-    min_s(dst, src1, src2);
-  } else {
-    Label skip;
-    if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
-      if (dst.is(src1)) {
-        BranchF32(&skip, nullptr, le, src1, src2);
-        Move_s(dst, src2);
-      } else if (dst.is(src2)) {
-        BranchF32(&skip, nullptr, ge, src1, src2);
-        Move_s(dst, src1);
-      } else {
-        Label right;
-        BranchF32(&right, nullptr, gt, src1, src2);
-        Move_s(dst, src1);
-        Branch(&skip);
-        bind(&right);
-        Move_s(dst, src2);
-      }
-    }
-    bind(&skip);
-  }
-}
-
-void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
-                                   FPURegister src2, Label* nan) {
-  if (nan) {
-    BranchF32(nullptr, nan, eq, src1, src2);
-  }
-  if (kArchVariant >= kMips64r6) {
-    max_s(dst, src1, src2);
-  } else {
-    Label skip;
-    if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
-      if (dst.is(src1)) {
-        BranchF32(&skip, nullptr, ge, src1, src2);
-        Move_s(dst, src2);
-      } else if (dst.is(src2)) {
-        BranchF32(&skip, nullptr, le, src1, src2);
-        Move_s(dst, src1);
-      } else {
-        Label right;
-        BranchF32(&right, nullptr, lt, src1, src2);
-        Move_s(dst, src1);
-        Branch(&skip);
-        bind(&right);
-        Move_s(dst, src2);
-      }
-    }
-    bind(&skip);
-  }
-}
 
 void MacroAssembler::Clz(Register rd, Register rs) {
   clz(rd, rs);
@@ -4190,17 +4040,16 @@
   or_(dst, dst, scratch);
 }
 
-
-void MacroAssembler::DebugBreak() {
-  PrepareCEntryArgs(0);
-  PrepareCEntryFunction(
-      ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
-  CEntryStub ces(isolate(), 1);
-  DCHECK(AllowThisStubCall(&ces));
-  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+  // Check whether we need to drop frames to restart a function on the stack.
+  ExternalReference restart_fp =
+      ExternalReference::debug_restart_fp_address(isolate());
+  li(a1, Operand(restart_fp));
+  ld(a1, MemOperand(a1));
+  Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+       ne, a1, Operand(zero_reg));
 }
 
-
 // ---------------------------------------------------------------------------
 // Exception handling.
 
@@ -4472,111 +4321,6 @@
   Daddu(result, result, Operand(kHeapObjectTag));
 }
 
-void MacroAssembler::AllocateTwoByteString(Register result,
-                                           Register length,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  dsll(scratch1, length, 1);  // Length in bytes, not chars.
-  daddiu(scratch1, scratch1,
-       kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
-  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
-  // Allocate two-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result,
-                      length,
-                      Heap::kStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string
-  // while observing object alignment.
-  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  DCHECK(kCharSize == 1);
-  daddiu(scratch1, length,
-      kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
-  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
-  // Allocate one-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
-                                               Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-  InitializeNewString(result,
-                      length,
-                      Heap::kConsStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result,
-                      length,
-                      Heap::kSlicedStringMapRootIndex,
-                      scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
                                                      Label* not_unique_name) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -4657,76 +4401,6 @@
   Branch(&loop, ult, current_address, Operand(end_address));
 }
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Register scratch,
-                                             Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Branch(fail, ls, scratch,
-         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-  Branch(fail, hi, scratch,
-         Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
-                                          Register scratch,
-                                          Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  Branch(fail, hi, scratch,
-         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
-                                                 Register key_reg,
-                                                 Register elements_reg,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* fail,
-                                                 int elements_offset) {
-  DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
-  Label smi_value, done;
-
-  // Handle smi values specially.
-  JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number.
-  CheckMap(value_reg,
-           scratch1,
-           Heap::kHeapNumberMapRootIndex,
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  // Double value, turn potential sNaN into qNan.
-  DoubleRegister double_result = f0;
-  DoubleRegister double_scratch = f2;
-
-  ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-  Branch(USE_DELAY_SLOT, &done);  // Canonicalization is one instruction.
-  FPUCanonicalizeNaN(double_result, double_result);
-
-  bind(&smi_value);
-  // Untag and transfer.
-  dsrl32(scratch1, value_reg, 0);
-  mtc1(scratch1, double_scratch);
-  cvt_d_w(double_result, double_scratch);
-
-  bind(&done);
-  Daddu(scratch1, elements_reg,
-      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
-              elements_offset));
-  dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
-  Daddu(scratch1, scratch1, scratch2);
-  // scratch1 is now effective address of the double element.
-  sdc1(double_result, MemOperand(scratch1, 0));
-}
-
 void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
                                                     FPURegister fs,
                                                     FPURegister ft) {
@@ -5076,17 +4750,15 @@
   }
 }
 
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
-                                             const ParameterCount& expected,
-                                             const ParameterCount& actual) {
-  Label skip_flooding;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  li(t0, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual) {
+  Label skip_hook;
+  ExternalReference debug_hook_active =
+      ExternalReference::debug_hook_on_function_call_address(isolate());
+  li(t0, Operand(debug_hook_active));
   lb(t0, MemOperand(t0));
-  Branch(&skip_flooding, lt, t0, Operand(StepIn));
+  Branch(&skip_hook, eq, t0, Operand(zero_reg));
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -5103,7 +4775,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    CallRuntime(Runtime::kDebugOnFunctionCall);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -5117,7 +4789,7 @@
       SmiUntag(expected.reg());
     }
   }
-  bind(&skip_flooding);
+  bind(&skip_hook);
 }
 
 
@@ -5131,8 +4803,8 @@
   DCHECK(function.is(a1));
   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
 
-  if (call_wrapper.NeedsDebugStepCheck()) {
-    FloodFunctionIfStepping(function, new_target, expected, actual);
+  if (call_wrapper.NeedsDebugHookCheck()) {
+    CheckDebugHook(function, new_target, expected, actual);
   }
 
   // Clear the new.target register if not given.
@@ -5255,32 +4927,6 @@
   bind(&done);
 }
 
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
-                                             Register scratch, Label* miss) {
-  // Get the prototype or initial map from the function.
-  ld(result,
-     FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // If the prototype or initial map is the hole, don't return it and
-  // simply miss the cache instead. This will allow us to allocate a
-  // prototype object on-demand in the runtime system.
-  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
-  Branch(miss, eq, result, Operand(t8));
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  GetObjectType(result, scratch, scratch);
-  Branch(&done, ne, scratch, Operand(MAP_TYPE));
-
-  // Get the prototype from the initial map.
-  ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  bind(&done);
-}
-
-
 void MacroAssembler::GetObjectType(Register object,
                                    Register map,
                                    Register type_reg) {
@@ -5958,27 +5604,6 @@
   }
 }
 
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  DCHECK(IsFastElementsKind(expected_kind));
-  DCHECK(IsFastElementsKind(transitioned_kind));
-
-  // Check that the function's map is the same as the expected cached map.
-  ld(scratch, NativeContextMemOperand());
-  ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
-  Branch(no_map_match, ne, map_in_out, Operand(at));
-
-  // Use the transitioned cached map.
-  ld(map_in_out,
-     ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
   ld(dst, NativeContextMemOperand());
   ld(dst, ContextMemOperand(dst, index));
@@ -6001,7 +5626,7 @@
 }
 
 void MacroAssembler::StubPrologue(StackFrame::Type type) {
-  li(at, Operand(Smi::FromInt(type)));
+  li(at, Operand(StackFrame::TypeToMarker(type)));
   PushCommonFrame(at);
 }
 
@@ -6016,7 +5641,7 @@
     Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
     nop(Assembler::CODE_AGE_MARKER_NOP);
     // Load the stub address to t9 and call it,
-    // GetCodeAgeAndParity() extracts the stub address from this instruction.
+    // GetCodeAge() extracts the stub address from this instruction.
     li(t9,
        Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
        ADDRESS_LOAD);
@@ -6032,10 +5657,10 @@
   }
 }
 
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
   ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  ld(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
-  ld(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+  ld(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+  ld(vector, FieldMemOperand(vector, Cell::kValueOffset));
 }
 
 
@@ -6061,7 +5686,7 @@
   stack_offset -= kPointerSize;
   sd(fp, MemOperand(sp, stack_offset));
   stack_offset -= kPointerSize;
-  li(t9, Operand(Smi::FromInt(type)));
+  li(t9, Operand(StackFrame::TypeToMarker(type)));
   sd(t9, MemOperand(sp, stack_offset));
   if (type == StackFrame::INTERNAL) {
     DCHECK_EQ(stack_offset, kPointerSize);
@@ -6118,7 +5743,7 @@
   daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
   sd(ra, MemOperand(sp, 4 * kPointerSize));
   sd(fp, MemOperand(sp, 3 * kPointerSize));
-  li(at, Operand(Smi::FromInt(frame_type)));
+  li(at, Operand(StackFrame::TypeToMarker(frame_type)));
   sd(at, MemOperand(sp, 2 * kPointerSize));
   // Set up new frame pointer.
   daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
@@ -6216,22 +5841,6 @@
   daddiu(sp, sp, 2 * kPointerSize);
 }
 
-
-void MacroAssembler::InitializeNewString(Register string,
-                                         Register length,
-                                         Heap::RootListIndex map_index,
-                                         Register scratch1,
-                                         Register scratch2) {
-  // dsll(scratch1, length, kSmiTagSize);
-  dsll32(scratch1, length, 0);
-  LoadRoot(scratch2, map_index);
-  sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
-  li(scratch1, Operand(String::kEmptyHashField));
-  sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
-  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
-}
-
-
 int MacroAssembler::ActivationFrameAlignment() {
 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
   // Running on the real platform. Use the alignment as mandated by the local
@@ -6367,15 +5976,6 @@
   SmiUntag(dst, src);
 }
 
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
-                                          Register src,
-                                          Label* non_smi_case) {
-  // DCHECK(!dst.is(src));
-  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
-  SmiUntag(dst, src);
-}
-
 void MacroAssembler::JumpIfSmi(Register value,
                                Label* smi_label,
                                Register scratch,
@@ -6580,6 +6180,179 @@
                                                scratch2, failure);
 }
 
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
+                                FPURegister src2, Label* out_of_line) {
+  if (src1.is(src2)) {
+    Move_s(dst, src1);
+    return;
+  }
+
+  // Check if one of operands is NaN.
+  BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+  if (kArchVariant >= kMips64r6) {
+    max_s(dst, src1, src2);
+  } else {
+    Label return_left, return_right, done;
+
+    BranchF32(&return_right, nullptr, lt, src1, src2);
+    BranchF32(&return_left, nullptr, lt, src2, src1);
+
+    // Operands are equal, but check for +/-0.
+    mfc1(t8, src1);
+    dsll32(t8, t8, 0);
+    Branch(&return_left, eq, t8, Operand(zero_reg));
+    Branch(&return_right);
+
+    bind(&return_right);
+    if (!src2.is(dst)) {
+      Move_s(dst, src2);
+    }
+    Branch(&done);
+
+    bind(&return_left);
+    if (!src1.is(dst)) {
+      Move_s(dst, src1);
+    }
+
+    bind(&done);
+  }
+}
+
+void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+                                         FPURegister src2) {
+  add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
+                                FPURegister src2, Label* out_of_line) {
+  if (src1.is(src2)) {
+    Move_s(dst, src1);
+    return;
+  }
+
+  // Check if one of operands is NaN.
+  BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+  if (kArchVariant >= kMips64r6) {
+    min_s(dst, src1, src2);
+  } else {
+    Label return_left, return_right, done;
+
+    BranchF32(&return_left, nullptr, lt, src1, src2);
+    BranchF32(&return_right, nullptr, lt, src2, src1);
+
+    // Left equals right => check for -0.
+    mfc1(t8, src1);
+    dsll32(t8, t8, 0);
+    Branch(&return_right, eq, t8, Operand(zero_reg));
+    Branch(&return_left);
+
+    bind(&return_right);
+    if (!src2.is(dst)) {
+      Move_s(dst, src2);
+    }
+    Branch(&done);
+
+    bind(&return_left);
+    if (!src1.is(dst)) {
+      Move_s(dst, src1);
+    }
+
+    bind(&done);
+  }
+}
+
+void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+                                         FPURegister src2) {
+  add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
+                                FPURegister src2, Label* out_of_line) {
+  if (src1.is(src2)) {
+    Move_d(dst, src1);
+    return;
+  }
+
+  // Check if one of operands is NaN.
+  BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+  if (kArchVariant >= kMips64r6) {
+    max_d(dst, src1, src2);
+  } else {
+    Label return_left, return_right, done;
+
+    BranchF64(&return_right, nullptr, lt, src1, src2);
+    BranchF64(&return_left, nullptr, lt, src2, src1);
+
+    // Left equals right => check for -0.
+    dmfc1(t8, src1);
+    Branch(&return_left, eq, t8, Operand(zero_reg));
+    Branch(&return_right);
+
+    bind(&return_right);
+    if (!src2.is(dst)) {
+      Move_d(dst, src2);
+    }
+    Branch(&done);
+
+    bind(&return_left);
+    if (!src1.is(dst)) {
+      Move_d(dst, src1);
+    }
+
+    bind(&done);
+  }
+}
+
+void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
+                                         FPURegister src2) {
+  add_d(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
+                                FPURegister src2, Label* out_of_line) {
+  if (src1.is(src2)) {
+    Move_d(dst, src1);
+    return;
+  }
+
+  // Check if one of operands is NaN.
+  BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+  if (kArchVariant >= kMips64r6) {
+    min_d(dst, src1, src2);
+  } else {
+    Label return_left, return_right, done;
+
+    BranchF64(&return_left, nullptr, lt, src1, src2);
+    BranchF64(&return_right, nullptr, lt, src2, src1);
+
+    // Left equals right => check for -0.
+    dmfc1(t8, src1);
+    Branch(&return_right, eq, t8, Operand(zero_reg));
+    Branch(&return_left);
+
+    bind(&return_right);
+    if (!src2.is(dst)) {
+      Move_d(dst, src2);
+    }
+    Branch(&done);
+
+    bind(&return_left);
+    if (!src1.is(dst)) {
+      Move_d(dst, src1);
+    }
+
+    bind(&done);
+  }
+}
+
+void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
+                                         FPURegister src2) {
+  add_d(dst, src1, src2);
+}
 
 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
     Register first, Register second, Register scratch1, Register scratch2,
@@ -6595,18 +6368,6 @@
   Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
 }
 
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
-                                                              Register scratch,
-                                                              Label* failure) {
-  const int kFlatOneByteStringMask =
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatOneByteStringTag =
-      kStringTag | kOneByteStringTag | kSeqStringTag;
-  And(scratch, type, Operand(kFlatOneByteStringMask));
-  Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
-}
-
 static const int kRegisterPassedArguments = 8;
 
 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -7042,40 +6803,6 @@
   return no_reg;
 }
 
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
-    Register object,
-    Register scratch0,
-    Register scratch1,
-    Label* found) {
-  DCHECK(!scratch1.is(scratch0));
-  Factory* factory = isolate()->factory();
-  Register current = scratch0;
-  Label loop_again, end;
-
-  // Scratch contained elements pointer.
-  Move(current, object);
-  ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
-  ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  Branch(&end, eq, current, Operand(factory->null_value()));
-
-  // Loop based on the map going up the prototype chain.
-  bind(&loop_again);
-  ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
-  lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
-  Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
-  lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
-  DecodeField<Map::ElementsKindBits>(scratch1);
-  Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
-  ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  Branch(&loop_again, ne, current, Operand(factory->null_value()));
-
-  bind(&end);
-}
-
-
 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
                 Register reg5, Register reg6, Register reg7, Register reg8,
                 Register reg9, Register reg10) {
diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h
index 5a1cf27..bfb1d52 100644
--- a/src/mips64/macro-assembler-mips64.h
+++ b/src/mips64/macro-assembler-mips64.h
@@ -236,6 +236,13 @@
               Heap::RootListIndex index,
               BranchDelaySlot bdslot = PROTECT);
 
+// Number of instructions needed for calculation of switch table entry address
+#ifdef _MIPS_ARCH_MIPS64R6
+  static const int kSwitchTablePrologueSize = 6;
+#else
+  static const int kSwitchTablePrologueSize = 11;
+#endif
+
   // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
   // functor/function with 'Label *func(size_t index)' declaration.
   template <typename Func>
@@ -337,17 +344,6 @@
   void Movt(Register rd, Register rs, uint16_t cc = 0);
   void Movf(Register rd, Register rs, uint16_t cc = 0);
 
-  // Min, Max macros.
-  // On pre-r6 these functions may modify at and t8 registers.
-  void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
-                     Label* nan = nullptr);
-  void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
-                     Label* nan = nullptr);
-  void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
-                     Label* nan = nullptr);
-  void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
-                     Label* nan = nullptr);
-
   void Clz(Register rd, Register rs);
 
   // Jump unconditionally to given label.
@@ -592,32 +588,6 @@
   void FastAllocate(Register object_size, Register result, Register result_new,
                     Register scratch, AllocationFlags flags);
 
-  void AllocateTwoByteString(Register result,
-                             Register length,
-                             Register scratch1,
-                             Register scratch2,
-                             Register scratch3,
-                             Label* gc_required);
-  void AllocateOneByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateTwoByteConsString(Register result,
-                                 Register length,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
-  void AllocateOneByteConsString(Register result, Register length,
-                                 Register scratch1, Register scratch2,
-                                 Label* gc_required);
-  void AllocateTwoByteSlicedString(Register result,
-                                   Register length,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Label* gc_required);
-  void AllocateOneByteSlicedString(Register result, Register length,
-                                   Register scratch1, Register scratch2,
-                                   Label* gc_required);
-
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed. All registers are clobbered also
   // when control continues at the gc_required label.
@@ -944,10 +914,13 @@
   void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
                                       FPURegister ft);
 
-  void Madd_d(FPURegister fd,
-              FPURegister fr,
-              FPURegister fs,
-              FPURegister ft,
+  void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+              FPURegister scratch);
+  void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+              FPURegister scratch);
+  void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+              FPURegister scratch);
+  void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
               FPURegister scratch);
 
   // Wrapper functions for the different cmp/branch types.
@@ -1095,17 +1068,6 @@
     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
   }
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the native context if the map in register
-  // map_in_out is the cached Array map in the native context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch,
-      Label* no_map_match);
-
   void LoadNativeContextSlot(int index, Register dst);
 
   // Load the initial map from the global function. The registers
@@ -1138,9 +1100,10 @@
                           const ParameterCount& actual, InvokeFlag flag,
                           const CallWrapper& call_wrapper);
 
-  void FloodFunctionIfStepping(Register fun, Register new_target,
-                               const ParameterCount& expected,
-                               const ParameterCount& actual);
+  // On function call, call into the debugger if necessary.
+  void CheckDebugHook(Register fun, Register new_target,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
@@ -1171,12 +1134,9 @@
                         Register scratch,
                         Label* fail);
 
-  // -------------------------------------------------------------------------
-  // Debugger Support.
+  // Frame restart support.
+  void MaybeDropFrames();
 
-  void DebugBreak();
-
-  // -------------------------------------------------------------------------
   // Exception handling.
 
   // Push a new stack handler and link into stack handler chain.
@@ -1200,14 +1160,6 @@
   void GetMapConstructor(Register result, Register map, Register temp,
                          Register temp2);
 
-  // Try to get function prototype of a function and puts the value in
-  // the result register. Checks that the function really is a
-  // function and jumps to the miss label if the fast checks fail. The
-  // function register will be untouched; the other registers may be
-  // clobbered.
-  void TryGetFunctionPrototype(Register function, Register result,
-                               Register scratch, Label* miss);
-
   void GetObjectType(Register function,
                      Register map,
                      Register type_reg);
@@ -1217,29 +1169,6 @@
         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
   }
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map,
-                               Register scratch,
-                               Label* fail);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiElements(Register map,
-                            Register scratch,
-                            Label* fail);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements. Otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register value_reg,
-                                   Register key_reg,
-                                   Register elements_reg,
-                                   Register scratch1,
-                                   Register scratch2,
-                                   Label* fail,
-                                   int elements_offset = 0);
-
   // Compare an object's map with the specified map and its transitioned
   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
   // "branch_to" if the result of the comparison is "cond". If multiple map
@@ -1445,6 +1374,29 @@
     Ret(ge, overflow_check, Operand(zero_reg), bd);
   }
 
+  // Perform a floating-point min or max operation with the
+  // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
+  // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
+  // handled in out-of-line code. The specific behaviour depends on supported
+  // instructions.
+  //
+  // These functions assume (and assert) that !src1.is(src2). It is permitted
+  // for the result to alias either input register.
+  void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+                  Label* out_of_line);
+  void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+                  Label* out_of_line);
+  void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2,
+                  Label* out_of_line);
+  void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2,
+                  Label* out_of_line);
+
+  // Generate out-of-line cases for the macros above.
+  void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+  void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+  void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+  void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+
   // -------------------------------------------------------------------------
   // Runtime calls.
 
@@ -1706,10 +1658,6 @@
   // Source and destination can be the same register.
   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
 
-  // Untag the source value into destination and jump if source is not a smi.
-  // Source and destination can be the same register.
-  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
   // Jump the register contains a smi.
   void JumpIfSmi(Register value,
                  Label* smi_label,
@@ -1779,11 +1727,6 @@
       Register first_object_instance_type, Register second_object_instance_type,
       Register scratch1, Register scratch2, Label* failure);
 
-  // Check if instance type is sequential one-byte string and jump to label if
-  // it is not.
-  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
-                                                Label* failure);
-
   void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string,
@@ -1848,7 +1791,7 @@
   void Prologue(bool code_pre_aging);
 
   // Load the type feedback vector from a JavaScript frame.
-  void EmitLoadTypeFeedbackVector(Register vector);
+  void EmitLoadFeedbackVector(Register vector);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -1871,20 +1814,6 @@
                                        Register scratch_reg,
                                        Label* no_memento_found);
 
-  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
-                                         Register scratch_reg,
-                                         Label* memento_found) {
-    Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
-                                    &no_memento_found);
-    Branch(memento_found);
-    bind(&no_memento_found);
-  }
-
-  // Jumps to found label if a prototype map has dictionary elements.
-  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
-                                        Register scratch1, Label* found);
-
   bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
 
  private:
@@ -1937,12 +1866,6 @@
                       InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  void InitializeNewString(Register string,
-                           Register length,
-                           Heap::RootListIndex map_index,
-                           Register scratch1,
-                           Register scratch2);
-
   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
   void InNewSpace(Register object, Register scratch,
                   Condition cond,  // ne for new space, eq otherwise.
@@ -2014,7 +1937,8 @@
   // Ensure that dd-ed labels following this instruction use 8 bytes aligned
   // addresses.
   if (kArchVariant >= kMips64r6) {
-    BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 6);
+    BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
+                           kSwitchTablePrologueSize);
     // Opposite of Align(8) as we have odd number of instructions in this case.
     if ((pc_offset() & 7) == 0) {
       nop();
@@ -2024,7 +1948,8 @@
     ld(at, MemOperand(at));
   } else {
     Label here;
-    BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 11);
+    BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
+                           kSwitchTablePrologueSize);
     Align(8);
     push(ra);
     bal(&here);
diff --git a/src/mips64/simulator-mips64.cc b/src/mips64/simulator-mips64.cc
index 4a8e007..c0dab73 100644
--- a/src/mips64/simulator-mips64.cc
+++ b/src/mips64/simulator-mips64.cc
@@ -1626,20 +1626,92 @@
   }
 }
 
-
-void Simulator::TraceRegWr(int64_t value) {
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
   if (::v8::internal::FLAG_trace_sim) {
-    SNPrintF(trace_buf_, "%016" PRIx64 " ", value);
+    union {
+      int64_t fmt_int64;
+      int32_t fmt_int32[2];
+      float fmt_float[2];
+      double fmt_double;
+    } v;
+    v.fmt_int64 = value;
+
+    switch (t) {
+      case WORD:
+        SNPrintF(trace_buf_, "%016" PRIx64 "    (%" PRId64 ")    int32:%" PRId32
+                             " uint32:%" PRIu32,
+                 v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+        break;
+      case DWORD:
+        SNPrintF(trace_buf_, "%016" PRIx64 "    (%" PRId64 ")    int64:%" PRId64
+                             " uint64:%" PRIu64,
+                 value, icount_, value, value);
+        break;
+      case FLOAT:
+        SNPrintF(trace_buf_, "%016" PRIx64 "    (%" PRId64 ")    flt:%e",
+                 v.fmt_int64, icount_, v.fmt_float[0]);
+        break;
+      case DOUBLE:
+        SNPrintF(trace_buf_, "%016" PRIx64 "    (%" PRId64 ")    dbl:%e",
+                 v.fmt_int64, icount_, v.fmt_double);
+        break;
+      case FLOAT_DOUBLE:
+        SNPrintF(trace_buf_, "%016" PRIx64 "    (%" PRId64 ")    flt:%e dbl:%e",
+                 v.fmt_int64, icount_, v.fmt_float[0], v.fmt_double);
+        break;
+      case WORD_DWORD:
+        SNPrintF(trace_buf_,
+                 "%016" PRIx64 "    (%" PRId64 ")    int32:%" PRId32
+                 " uint32:%" PRIu32 " int64:%" PRId64 " uint64:%" PRIu64,
+                 v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0],
+                 v.fmt_int64, v.fmt_int64);
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
 }
 
-
 // TODO(plind): consider making icount_ printing a flag option.
-void Simulator::TraceMemRd(int64_t addr, int64_t value) {
+void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) {
   if (::v8::internal::FLAG_trace_sim) {
-    SNPrintF(trace_buf_,
-             "%016" PRIx64 "  <-- [%016" PRIx64 " ]    (%" PRId64 " )", value,
-             addr, icount_);
+    union {
+      int64_t fmt_int64;
+      int32_t fmt_int32[2];
+      float fmt_float[2];
+      double fmt_double;
+    } v;
+    v.fmt_int64 = value;
+
+    switch (t) {
+      case WORD:
+        SNPrintF(trace_buf_, "%016" PRIx64 "  <-- [%016" PRIx64 "]    (%" PRId64
+                             ")    int32:%" PRId32 " uint32:%" PRIu32,
+                 v.fmt_int64, addr, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+        break;
+      case DWORD:
+        SNPrintF(trace_buf_, "%016" PRIx64 "  <-- [%016" PRIx64 "]    (%" PRId64
+                             ")    int64:%" PRId64 " uint64:%" PRIu64,
+                 value, addr, icount_, value, value);
+        break;
+      case FLOAT:
+        SNPrintF(trace_buf_, "%016" PRIx64 "  <-- [%016" PRIx64 "]    (%" PRId64
+                             ")    flt:%e",
+                 v.fmt_int64, addr, icount_, v.fmt_float[0]);
+        break;
+      case DOUBLE:
+        SNPrintF(trace_buf_, "%016" PRIx64 "  <-- [%016" PRIx64 "]    (%" PRId64
+                             ")    dbl:%e",
+                 v.fmt_int64, addr, icount_, v.fmt_double);
+        break;
+      case FLOAT_DOUBLE:
+        SNPrintF(trace_buf_, "%016" PRIx64 "  <-- [%016" PRIx64 "]    (%" PRId64
+                             ")    flt:%e dbl:%e",
+                 v.fmt_int64, addr, icount_, v.fmt_float[0], v.fmt_double);
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
 }
 
@@ -1648,22 +1720,27 @@
   if (::v8::internal::FLAG_trace_sim) {
     switch (t) {
       case BYTE:
-        SNPrintF(trace_buf_, "               %02x --> [%016" PRIx64 " ]",
-                 static_cast<int8_t>(value), addr);
+        SNPrintF(trace_buf_, "               %02" PRIx8 " --> [%016" PRIx64
+                             "]    (%" PRId64 ")",
+                 static_cast<uint8_t>(value), addr, icount_);
         break;
       case HALF:
-        SNPrintF(trace_buf_, "            %04x --> [%016" PRIx64 " ]",
-                 static_cast<int16_t>(value), addr);
+        SNPrintF(trace_buf_, "            %04" PRIx16 " --> [%016" PRIx64
+                             "]    (%" PRId64 ")",
+                 static_cast<uint16_t>(value), addr, icount_);
         break;
       case WORD:
-        SNPrintF(trace_buf_, "        %08x --> [%016" PRIx64 " ]",
-                 static_cast<int32_t>(value), addr);
+        SNPrintF(trace_buf_,
+                 "        %08" PRIx32 " --> [%016" PRIx64 "]    (%" PRId64 ")",
+                 static_cast<uint32_t>(value), addr, icount_);
         break;
       case DWORD:
         SNPrintF(trace_buf_,
-                 "%016" PRIx64 "  --> [%016" PRIx64 " ]    (%" PRId64 " )",
+                 "%016" PRIx64 "  --> [%016" PRIx64 "]    (%" PRId64 " )",
                  value, addr, icount_);
         break;
+      default:
+        UNREACHABLE();
     }
   }
 }
@@ -1671,7 +1748,7 @@
 
 // TODO(plind): sign-extend and zero-extend not implmented properly
 // on all the ReadXX functions, I don't think re-interpret cast does it.
-int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
+int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
   if (addr >=0 && addr < 0x400) {
     // This has to be a NULL-dereference, drop into debugger.
     PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
@@ -1681,7 +1758,7 @@
   }
   if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
     int32_t* ptr = reinterpret_cast<int32_t*>(addr);
-    TraceMemRd(addr, static_cast<int64_t>(*ptr));
+    TraceMemRd(addr, static_cast<int64_t>(*ptr), t);
     return *ptr;
   }
   PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
@@ -1701,7 +1778,7 @@
   }
   if ((addr & 0x3) == 0 || kArchVariant == kMips64r6) {
     uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
-    TraceMemRd(addr, static_cast<int64_t>(*ptr));
+    TraceMemRd(addr, static_cast<int64_t>(*ptr), WORD);
     return *ptr;
   }
   PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
@@ -2455,98 +2532,104 @@
           result = lower;
           break;
       }
-      set_fpu_register_float(fd_reg(), result);
+      SetFPUFloatResult(fd_reg(), result);
       if (result != fs) {
         set_fcsr_bit(kFCSRInexactFlagBit, true);
       }
       break;
     }
     case ADD_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
                                  fs, ft));
       break;
     case SUB_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
                                  fs, ft));
       break;
     case MADDF_S:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_float(fd_reg(), fd + (fs * ft));
+      SetFPUFloatResult(fd_reg(), std::fma(fs, ft, fd));
       break;
     case MSUBF_S:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_float(fd_reg(), fd - (fs * ft));
+      SetFPUFloatResult(fd_reg(), std::fma(-fs, ft, fd));
       break;
     case MUL_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
                                  fs, ft));
       break;
     case DIV_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
                                  fs, ft));
       break;
     case ABS_S:
-      set_fpu_register_float(
-          fd_reg(),
-          FPUCanonalizeOperation([](float fs) { return FPAbs(fs); }, fs));
+      SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
+                                      [](float fs) { return FPAbs(fs); }, fs));
       break;
     case MOV_S:
-      set_fpu_register_float(fd_reg(), fs);
+      SetFPUFloatResult(fd_reg(), fs);
       break;
     case NEG_S:
-      set_fpu_register_float(
-          fd_reg(), FPUCanonalizeOperation([](float src) { return -src; },
-                                           KeepSign::yes, fs));
+      SetFPUFloatResult(fd_reg(),
+                        FPUCanonalizeOperation([](float src) { return -src; },
+                                               KeepSign::yes, fs));
       break;
     case SQRT_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(),
           FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs));
       break;
     case RSQRT_S:
-      set_fpu_register_float(
+      SetFPUFloatResult(
           fd_reg(), FPUCanonalizeOperation(
                         [](float src) { return 1.0 / std::sqrt(src); }, fs));
       break;
     case RECIP_S:
-      set_fpu_register_float(
-          fd_reg(),
-          FPUCanonalizeOperation([](float src) { return 1.0 / src; }, fs));
+      SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation(
+                                      [](float src) { return 1.0 / src; }, fs));
       break;
     case C_F_D:
       set_fcsr_bit(fcsr_cc, false);
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_UN_D:
       set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_EQ_D:
       set_fcsr_bit(fcsr_cc, (fs == ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_UEQ_D:
       set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_OLT_D:
       set_fcsr_bit(fcsr_cc, (fs < ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_ULT_D:
       set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_OLE_D:
       set_fcsr_bit(fcsr_cc, (fs <= ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_ULE_D:
       set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case CVT_D_S:
-      set_fpu_register_double(fd_reg(), static_cast<double>(fs));
+      SetFPUDoubleResult(fd_reg(), static_cast<double>(fs));
       break;
     case CLASS_S: {  // Mips64r6 instruction
       // Convert float input to uint32_t for easier bit manipulation
@@ -2609,15 +2692,14 @@
       DCHECK(result != 0);
 
       fResult = bit_cast<float>(result);
-      set_fpu_register_float(fd_reg(), fResult);
-
+      SetFPUFloatResult(fd_reg(), fResult);
       break;
     }
     case CVT_L_S: {
       float rounded;
       int64_t result;
       round64_according_to_fcsr(fs, rounded, result, fs);
-      set_fpu_register(fd_reg(), result);
+      SetFPUResult(fd_reg(), result);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -2627,7 +2709,7 @@
       float rounded;
       int32_t result;
       round_according_to_fcsr(fs, rounded, result, fs);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -2636,7 +2718,7 @@
     case TRUNC_W_S: {  // Truncate single to word (round towards 0).
       float rounded = trunc(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -2644,7 +2726,7 @@
     case TRUNC_L_S: {  // Mips64r2 instruction.
       float rounded = trunc(fs);
       int64_t result = static_cast<int64_t>(rounded);
-      set_fpu_register(fd_reg(), result);
+      SetFPUResult(fd_reg(), result);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -2658,7 +2740,7 @@
         // round to the even one.
         result--;
       }
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -2673,7 +2755,7 @@
         result--;
       }
       int64_t i64 = static_cast<int64_t>(result);
-      set_fpu_register(fd_reg(), i64);
+      SetFPUResult(fd_reg(), i64);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -2682,7 +2764,7 @@
     case FLOOR_L_S: {  // Mips64r2 instruction.
       float rounded = floor(fs);
       int64_t result = static_cast<int64_t>(rounded);
-      set_fpu_register(fd_reg(), result);
+      SetFPUResult(fd_reg(), result);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -2692,7 +2774,7 @@
     {
       float rounded = std::floor(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -2701,7 +2783,7 @@
     {
       float rounded = std::ceil(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_invalid_result(fs, rounded);
       }
@@ -2709,7 +2791,7 @@
     case CEIL_L_S: {  // Mips64r2 instruction.
       float rounded = ceil(fs);
       int64_t result = static_cast<int64_t>(rounded);
-      set_fpu_register(fd_reg(), result);
+      SetFPUResult(fd_reg(), result);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -2717,47 +2799,47 @@
     }
     case MINA:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_float(fd_reg(), FPUMinA(ft, fs));
+      SetFPUFloatResult(fd_reg(), FPUMinA(ft, fs));
       break;
     case MAXA:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_float(fd_reg(), FPUMaxA(ft, fs));
+      SetFPUFloatResult(fd_reg(), FPUMaxA(ft, fs));
       break;
     case MIN:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_float(fd_reg(), FPUMin(ft, fs));
+      SetFPUFloatResult(fd_reg(), FPUMin(ft, fs));
       break;
     case MAX:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_float(fd_reg(), FPUMax(ft, fs));
+      SetFPUFloatResult(fd_reg(), FPUMax(ft, fs));
       break;
     case SEL:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_float(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
+      SetFPUFloatResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
       break;
     case SELEQZ_C:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_float(fd_reg(), (ft_int & 0x1) == 0
-                                           ? get_fpu_register_float(fs_reg())
-                                           : 0.0);
+      SetFPUFloatResult(
+          fd_reg(),
+          (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg()) : 0.0);
       break;
     case SELNEZ_C:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_float(fd_reg(), (ft_int & 0x1) != 0
-                                           ? get_fpu_register_float(fs_reg())
-                                           : 0.0);
+      SetFPUFloatResult(
+          fd_reg(),
+          (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg()) : 0.0);
       break;
     case MOVZ_C: {
       DCHECK(kArchVariant == kMips64r2);
       if (rt() == 0) {
-        set_fpu_register_float(fd_reg(), fs);
+        SetFPUFloatResult(fd_reg(), fs);
       }
       break;
     }
     case MOVN_C: {
       DCHECK(kArchVariant == kMips64r2);
       if (rt() != 0) {
-        set_fpu_register_float(fd_reg(), fs);
+        SetFPUFloatResult(fd_reg(), fs);
       }
       break;
     }
@@ -2768,10 +2850,10 @@
 
       if (instr_.Bit(16)) {  // Read Tf bit.
         // MOVT.D
-        if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
+        if (test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs);
       } else {
         // MOVF.D
-        if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
+        if (!test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs);
       }
       break;
     }
@@ -2826,7 +2908,7 @@
           result = lower;
           break;
       }
-      set_fpu_register_double(fd_reg(), result);
+      SetFPUDoubleResult(fd_reg(), result);
       if (result != fs) {
         set_fcsr_bit(kFCSRInexactFlagBit, true);
       }
@@ -2834,27 +2916,27 @@
     }
     case SEL:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_double(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
+      SetFPUDoubleResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
       break;
     case SELEQZ_C:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_double(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
+      SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
       break;
     case SELNEZ_C:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_double(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
+      SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
       break;
     case MOVZ_C: {
       DCHECK(kArchVariant == kMips64r2);
       if (rt() == 0) {
-        set_fpu_register_double(fd_reg(), fs);
+        SetFPUDoubleResult(fd_reg(), fs);
       }
       break;
     }
     case MOVN_C: {
       DCHECK(kArchVariant == kMips64r2);
       if (rt() != 0) {
-        set_fpu_register_double(fd_reg(), fs);
+        SetFPUDoubleResult(fd_reg(), fs);
       }
       break;
     }
@@ -2864,115 +2946,121 @@
       ft_cc = get_fcsr_condition_bit(ft_cc);
       if (instr_.Bit(16)) {  // Read Tf bit.
         // MOVT.D
-        if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
+        if (test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs);
       } else {
         // MOVF.D
-        if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
+        if (!test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs);
       }
       break;
     }
     case MINA:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_double(fd_reg(), FPUMinA(ft, fs));
+      SetFPUDoubleResult(fd_reg(), FPUMinA(ft, fs));
       break;
     case MAXA:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_double(fd_reg(), FPUMaxA(ft, fs));
+      SetFPUDoubleResult(fd_reg(), FPUMaxA(ft, fs));
       break;
     case MIN:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_double(fd_reg(), FPUMin(ft, fs));
+      SetFPUDoubleResult(fd_reg(), FPUMin(ft, fs));
       break;
     case MAX:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_double(fd_reg(), FPUMax(ft, fs));
+      SetFPUDoubleResult(fd_reg(), FPUMax(ft, fs));
       break;
     case ADD_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation(
               [](double lhs, double rhs) { return lhs + rhs; }, fs, ft));
       break;
     case SUB_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation(
               [](double lhs, double rhs) { return lhs - rhs; }, fs, ft));
       break;
     case MADDF_D:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_double(fd_reg(), fd + (fs * ft));
+      SetFPUDoubleResult(fd_reg(), std::fma(fs, ft, fd));
       break;
     case MSUBF_D:
       DCHECK(kArchVariant == kMips64r6);
-      set_fpu_register_double(fd_reg(), fd - (fs * ft));
+      SetFPUDoubleResult(fd_reg(), std::fma(-fs, ft, fd));
       break;
     case MUL_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation(
               [](double lhs, double rhs) { return lhs * rhs; }, fs, ft));
       break;
     case DIV_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation(
               [](double lhs, double rhs) { return lhs / rhs; }, fs, ft));
       break;
     case ABS_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs));
       break;
     case MOV_D:
-      set_fpu_register_double(fd_reg(), fs);
+      SetFPUDoubleResult(fd_reg(), fs);
       break;
     case NEG_D:
-      set_fpu_register_double(
-          fd_reg(), FPUCanonalizeOperation([](double src) { return -src; },
-                                           KeepSign::yes, fs));
+      SetFPUDoubleResult(fd_reg(),
+                         FPUCanonalizeOperation([](double src) { return -src; },
+                                                KeepSign::yes, fs));
       break;
     case SQRT_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(),
           FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs));
       break;
     case RSQRT_D:
-      set_fpu_register_double(
+      SetFPUDoubleResult(
           fd_reg(), FPUCanonalizeOperation(
                         [](double fs) { return 1.0 / std::sqrt(fs); }, fs));
       break;
     case RECIP_D:
-      set_fpu_register_double(
-          fd_reg(),
-          FPUCanonalizeOperation([](double fs) { return 1.0 / fs; }, fs));
+      SetFPUDoubleResult(fd_reg(), FPUCanonalizeOperation(
+                                       [](double fs) { return 1.0 / fs; }, fs));
       break;
     case C_UN_D:
       set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_EQ_D:
       set_fcsr_bit(fcsr_cc, (fs == ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_UEQ_D:
       set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_OLT_D:
       set_fcsr_bit(fcsr_cc, (fs < ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_ULT_D:
       set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_OLE_D:
       set_fcsr_bit(fcsr_cc, (fs <= ft));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case C_ULE_D:
       set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     case CVT_W_D: {  // Convert double to word.
       double rounded;
       int32_t result;
       round_according_to_fcsr(fs, rounded, result, fs);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_word_invalid_result(fs, rounded);
       }
@@ -2987,7 +3075,7 @@
         // round to the even one.
         result--;
       }
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_invalid_result(fs, rounded);
       }
@@ -2996,7 +3084,7 @@
     {
       double rounded = trunc(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_invalid_result(fs, rounded);
       }
@@ -3005,7 +3093,7 @@
     {
       double rounded = std::floor(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_invalid_result(fs, rounded);
       }
@@ -3014,19 +3102,19 @@
     {
       double rounded = std::ceil(fs);
       int32_t result = static_cast<int32_t>(rounded);
-      set_fpu_register_word(fd_reg(), result);
+      SetFPUWordResult2(fd_reg(), result);
       if (set_fcsr_round_error(fs, rounded)) {
         set_fpu_register_invalid_result(fs, rounded);
       }
     } break;
     case CVT_S_D:  // Convert double to float (single).
-      set_fpu_register_float(fd_reg(), static_cast<float>(fs));
+      SetFPUFloatResult(fd_reg(), static_cast<float>(fs));
       break;
     case CVT_L_D: {  // Mips64r2: Truncate double to 64-bit long-word.
       double rounded;
       int64_t result;
       round64_according_to_fcsr(fs, rounded, result, fs);
-      set_fpu_register(fd_reg(), result);
+      SetFPUResult(fd_reg(), result);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -3041,7 +3129,7 @@
         result--;
       }
       int64_t i64 = static_cast<int64_t>(result);
-      set_fpu_register(fd_reg(), i64);
+      SetFPUResult(fd_reg(), i64);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -3050,7 +3138,7 @@
     case TRUNC_L_D: {  // Mips64r2 instruction.
       double rounded = trunc(fs);
       int64_t result = static_cast<int64_t>(rounded);
-      set_fpu_register(fd_reg(), result);
+      SetFPUResult(fd_reg(), result);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -3059,7 +3147,7 @@
     case FLOOR_L_D: {  // Mips64r2 instruction.
       double rounded = floor(fs);
       int64_t result = static_cast<int64_t>(rounded);
-      set_fpu_register(fd_reg(), result);
+      SetFPUResult(fd_reg(), result);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -3068,7 +3156,7 @@
     case CEIL_L_D: {  // Mips64r2 instruction.
       double rounded = ceil(fs);
       int64_t result = static_cast<int64_t>(rounded);
-      set_fpu_register(fd_reg(), result);
+      SetFPUResult(fd_reg(), result);
       if (set_fcsr_round64_error(fs, rounded)) {
         set_fpu_register_invalid_result64(fs, rounded);
       }
@@ -3135,12 +3223,12 @@
       DCHECK(result != 0);
 
       dResult = bit_cast<double>(result);
-      set_fpu_register_double(fd_reg(), dResult);
-
+      SetFPUDoubleResult(fd_reg(), dResult);
       break;
     }
     case C_F_D: {
       set_fcsr_bit(fcsr_cc, false);
+      TraceRegWr(test_fcsr_bit(fcsr_cc));
       break;
     }
     default:
@@ -3156,83 +3244,83 @@
   switch (instr_.FunctionFieldRaw()) {
     case CVT_S_W:  // Convert word to float (single).
       alu_out = get_fpu_register_signed_word(fs_reg());
-      set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
+      SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
       break;
     case CVT_D_W:  // Convert word to double.
       alu_out = get_fpu_register_signed_word(fs_reg());
-      set_fpu_register_double(fd_reg(), static_cast<double>(alu_out));
+      SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
       break;
     case CMP_AF:
-      set_fpu_register_word(fd_reg(), 0);
+      SetFPUWordResult2(fd_reg(), 0);
       break;
     case CMP_UN:
       if (std::isnan(fs) || std::isnan(ft)) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     case CMP_EQ:
       if (fs == ft) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     case CMP_UEQ:
       if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     case CMP_LT:
       if (fs < ft) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     case CMP_ULT:
       if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     case CMP_LE:
       if (fs <= ft) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     case CMP_ULE:
       if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     case CMP_OR:
       if (!std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     case CMP_UNE:
       if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     case CMP_NE:
       if (fs != ft) {
-        set_fpu_register_word(fd_reg(), -1);
+        SetFPUWordResult2(fd_reg(), -1);
       } else {
-        set_fpu_register_word(fd_reg(), 0);
+        SetFPUWordResult2(fd_reg(), 0);
       }
       break;
     default:
@@ -3248,83 +3336,83 @@
   switch (instr_.FunctionFieldRaw()) {
     case CVT_D_L:  // Mips32r2 instruction.
       i64 = get_fpu_register(fs_reg());
-      set_fpu_register_double(fd_reg(), static_cast<double>(i64));
+      SetFPUDoubleResult(fd_reg(), static_cast<double>(i64));
       break;
     case CVT_S_L:
       i64 = get_fpu_register(fs_reg());
-      set_fpu_register_float(fd_reg(), static_cast<float>(i64));
+      SetFPUFloatResult(fd_reg(), static_cast<float>(i64));
       break;
     case CMP_AF:
-      set_fpu_register(fd_reg(), 0);
+      SetFPUResult(fd_reg(), 0);
       break;
     case CMP_UN:
       if (std::isnan(fs) || std::isnan(ft)) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_EQ:
       if (fs == ft) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_UEQ:
       if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_LT:
       if (fs < ft) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_ULT:
       if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_LE:
       if (fs <= ft) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_ULE:
       if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_OR:
       if (!std::isnan(fs) && !std::isnan(ft)) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_UNE:
       if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     case CMP_NE:
       if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
-        set_fpu_register(fd_reg(), -1);
+        SetFPUResult(fd_reg(), -1);
       } else {
-        set_fpu_register(fd_reg(), 0);
+        SetFPUResult(fd_reg(), 0);
       }
       break;
     default:
@@ -3343,17 +3431,18 @@
     case CFC1:
       // At the moment only FCSR is supported.
       DCHECK(fs_reg() == kFCSRRegister);
-      set_register(rt_reg(), FCSR_);
+      SetResult(rt_reg(), FCSR_);
       break;
     case MFC1:
       set_register(rt_reg(),
                    static_cast<int64_t>(get_fpu_register_word(fs_reg())));
+      TraceRegWr(get_register(rt_reg()), WORD_DWORD);
       break;
     case DMFC1:
-      set_register(rt_reg(), get_fpu_register(fs_reg()));
+      SetResult(rt_reg(), get_fpu_register(fs_reg()));
       break;
     case MFHC1:
-      set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
+      SetResult(rt_reg(), get_fpu_register_hi_word(fs_reg()));
       break;
     case CTC1: {
       // At the moment only FCSR is supported.
@@ -3365,18 +3454,21 @@
         DCHECK(kArchVariant == kMips64r2);
         FCSR_ = reg & ~kFCSRNaN2008FlagMask;
       }
+      TraceRegWr(FCSR_);
       break;
     }
     case MTC1:
       // Hardware writes upper 32-bits to zero on mtc1.
       set_fpu_register_hi_word(fs_reg(), 0);
       set_fpu_register_word(fs_reg(), static_cast<int32_t>(rt()));
+      TraceRegWr(get_fpu_register(fs_reg()), FLOAT_DOUBLE);
       break;
     case DMTC1:
-      set_fpu_register(fs_reg(), rt());
+      SetFPUResult2(fs_reg(), rt());
       break;
     case MTHC1:
       set_fpu_register_hi_word(fs_reg(), static_cast<int32_t>(rt()));
+      TraceRegWr(get_fpu_register(fs_reg()), DOUBLE);
       break;
     case S:
       DecodeTypeRegisterSRsType();
@@ -3404,7 +3496,7 @@
       fr = get_fpu_register_float(fr_reg());
       fs = get_fpu_register_float(fs_reg());
       ft = get_fpu_register_float(ft_reg());
-      set_fpu_register_float(fd_reg(), fs * ft + fr);
+      SetFPUFloatResult(fd_reg(), fs * ft + fr);
       break;
     }
     case MSUB_S: {
@@ -3413,7 +3505,7 @@
       fr = get_fpu_register_float(fr_reg());
       fs = get_fpu_register_float(fs_reg());
       ft = get_fpu_register_float(ft_reg());
-      set_fpu_register_float(fd_reg(), fs * ft - fr);
+      SetFPUFloatResult(fd_reg(), fs * ft - fr);
       break;
     }
     case MADD_D: {
@@ -3422,7 +3514,7 @@
       fr = get_fpu_register_double(fr_reg());
       fs = get_fpu_register_double(fs_reg());
       ft = get_fpu_register_double(ft_reg());
-      set_fpu_register_double(fd_reg(), fs * ft + fr);
+      SetFPUDoubleResult(fd_reg(), fs * ft + fr);
       break;
     }
     case MSUB_D: {
@@ -3431,7 +3523,7 @@
       fr = get_fpu_register_double(fr_reg());
       fs = get_fpu_register_double(fs_reg());
       ft = get_fpu_register_double(ft_reg());
-      set_fpu_register_double(fd_reg(), fs * ft - fr);
+      SetFPUDoubleResult(fd_reg(), fs * ft - fr);
       break;
     }
     default:
@@ -3449,11 +3541,11 @@
   switch (instr_.FunctionFieldRaw()) {
     case SELEQZ_S:
       DCHECK(kArchVariant == kMips64r6);
-      set_register(rd_reg(), rt() == 0 ? rs() : 0);
+      SetResult(rd_reg(), rt() == 0 ? rs() : 0);
       break;
     case SELNEZ_S:
       DCHECK(kArchVariant == kMips64r6);
-      set_register(rd_reg(), rt() != 0 ? rs() : 0);
+      SetResult(rd_reg(), rt() != 0 ? rs() : 0);
       break;
     case JR: {
       int64_t next_pc = rs();
@@ -3636,10 +3728,10 @@
       } else {
         switch (sa()) {
           case MUL_OP:
-            set_register(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
+            SetResult(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
             break;
           case MUH_OP:
-            set_register(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
+            SetResult(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
             break;
           default:
             UNIMPLEMENTED_MIPS();
@@ -3657,10 +3749,10 @@
       } else {
         switch (sa()) {
           case MUL_OP:
-            set_register(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+            SetResult(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
             break;
           case MUH_OP:
-            set_register(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
+            SetResult(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
             break;
           default:
             UNIMPLEMENTED_MIPS();
@@ -3675,10 +3767,10 @@
       } else {
         switch (sa()) {
           case MUL_OP:
-            set_register(rd_reg(), rs() * rt());
+            SetResult(rd_reg(), rs() * rt());
             break;
           case MUH_OP:
-            set_register(rd_reg(), MultiplyHighSigned(rs(), rt()));
+            SetResult(rd_reg(), MultiplyHighSigned(rs(), rt()));
             break;
           default:
             UNIMPLEMENTED_MIPS();
@@ -3711,16 +3803,16 @@
           switch (sa()) {
             case DIV_OP:
               if (rs() == int_min_value && rt() == -1) {
-                set_register(rd_reg(), int_min_value);
+                SetResult(rd_reg(), int_min_value);
               } else if (rt() != 0) {
-                set_register(rd_reg(), rs() / rt());
+                SetResult(rd_reg(), rs() / rt());
               }
               break;
             case MOD_OP:
               if (rs() == int_min_value && rt() == -1) {
-                set_register(rd_reg(), 0);
+                SetResult(rd_reg(), 0);
               } else if (rt() != 0) {
-                set_register(rd_reg(), rs() % rt());
+                SetResult(rd_reg(), rs() % rt());
               }
               break;
             default:
@@ -3741,12 +3833,12 @@
           switch (sa()) {
             case DIV_OP:
               if (rt_u_32 != 0) {
-                set_register(rd_reg(), rs_u_32 / rt_u_32);
+                SetResult(rd_reg(), rs_u_32 / rt_u_32);
               }
               break;
             case MOD_OP:
               if (rt_u() != 0) {
-                set_register(rd_reg(), rs_u_32 % rt_u_32);
+                SetResult(rd_reg(), rs_u_32 % rt_u_32);
               }
               break;
             default:
@@ -3770,12 +3862,12 @@
           switch (instr_.SaValue()) {
             case DIV_OP:
               if (rt_u() != 0) {
-                set_register(rd_reg(), rs_u() / rt_u());
+                SetResult(rd_reg(), rs_u() / rt_u());
               }
               break;
             case MOD_OP:
               if (rt_u() != 0) {
-                set_register(rd_reg(), rs_u() % rt_u());
+                SetResult(rd_reg(), rs_u() % rt_u());
               }
               break;
             default:
@@ -3892,9 +3984,9 @@
       uint32_t cc = instr_.FBccValue();
       uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
       if (instr_.Bit(16)) {  // Read Tf bit.
-        if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
+        if (test_fcsr_bit(fcsr_cc)) SetResult(rd_reg(), rs());
       } else {
-        if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
+        if (!test_fcsr_bit(fcsr_cc)) SetResult(rd_reg(), rs());
       }
       break;
     }
@@ -4689,10 +4781,12 @@
     }
     case LWC1:
       set_fpu_register(ft_reg, kFPUInvalidResult);  // Trash upper 32 bits.
-      set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr_.instr()));
+      set_fpu_register_word(ft_reg,
+                            ReadW(rs + se_imm16, instr_.instr(), FLOAT_DOUBLE));
       break;
     case LDC1:
       set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr()));
+      TraceMemRd(addr, get_fpu_register(ft_reg), DOUBLE);
       break;
     case SWC1: {
       int32_t alu_out_32 = static_cast<int32_t>(get_fpu_register(ft_reg));
@@ -4701,6 +4795,7 @@
     }
     case SDC1:
       WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr());
+      TraceMemWr(rs + se_imm16, get_fpu_register(ft_reg), DWORD);
       break;
     // ------------- PC-Relative instructions.
     case PCREL: {
@@ -4764,7 +4859,7 @@
           break;
         }
       }
-      set_register(rs_reg, alu_out);
+      SetResult(rs_reg, alu_out);
       break;
     }
     default:
diff --git a/src/mips64/simulator-mips64.h b/src/mips64/simulator-mips64.h
index df98465..6c41ae1 100644
--- a/src/mips64/simulator-mips64.h
+++ b/src/mips64/simulator-mips64.h
@@ -303,6 +303,18 @@
   // Unsupported instructions use Format to print an error and stop execution.
   void Format(Instruction* instr, const char* format);
 
+  // Helpers for data value tracing.
+  enum TraceType {
+    BYTE,
+    HALF,
+    WORD,
+    DWORD,
+    FLOAT,
+    DOUBLE,
+    FLOAT_DOUBLE,
+    WORD_DWORD
+  };
+
   // Read and write memory.
   inline uint32_t ReadBU(int64_t addr);
   inline int32_t ReadB(int64_t addr);
@@ -316,7 +328,7 @@
   inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
 
   inline uint32_t ReadWU(int64_t addr, Instruction* instr);
-  inline int32_t ReadW(int64_t addr, Instruction* instr);
+  inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
   inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
   inline int64_t Read2W(int64_t addr, Instruction* instr);
   inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
@@ -327,18 +339,9 @@
   // Helper for debugging memory access.
   inline void DieOrDebug();
 
-  // Helpers for data value tracing.
-    enum TraceType {
-    BYTE,
-    HALF,
-    WORD,
-    DWORD
-    // DFLOAT - Floats may have printing issues due to paired lwc1's
-  };
-
-  void TraceRegWr(int64_t value);
+  void TraceRegWr(int64_t value, TraceType t = DWORD);
   void TraceMemWr(int64_t addr, int64_t value, TraceType t);
-  void TraceMemRd(int64_t addr, int64_t value);
+  void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD);
 
   // Operations depending on endianness.
   // Get Double Higher / Lower word.
@@ -396,6 +399,36 @@
     TraceRegWr(alu_out);
   }
 
+  inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
+    set_fpu_register_word(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register(fd_reg), WORD);
+  }
+
+  inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
+    set_fpu_register_word(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register(fd_reg));
+  }
+
+  inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
+    set_fpu_register(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register(fd_reg));
+  }
+
+  inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
+    set_fpu_register(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+  }
+
+  inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
+    set_fpu_register_float(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register(fd_reg), FLOAT);
+  }
+
+  inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
+    set_fpu_register_double(fd_reg, alu_out);
+    TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+  }
+
   void DecodeTypeImmediate();
   void DecodeTypeJump();
 
diff --git a/src/objects-body-descriptors-inl.h b/src/objects-body-descriptors-inl.h
index f7a1a71..be9c0f2 100644
--- a/src/objects-body-descriptors-inl.h
+++ b/src/objects-body-descriptors-inl.h
@@ -5,7 +5,9 @@
 #ifndef V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
 #define V8_OBJECTS_BODY_DESCRIPTORS_INL_H_
 
+#include "src/assembler-inl.h"
 #include "src/objects-body-descriptors.h"
+#include "src/transitions.h"
 
 namespace v8 {
 namespace internal {
@@ -360,7 +362,8 @@
                 kSourcePositionTableOffset);
   STATIC_ASSERT(kSourcePositionTableOffset + kPointerSize ==
                 kTypeFeedbackInfoOffset);
-  STATIC_ASSERT(kTypeFeedbackInfoOffset + kPointerSize == kNextCodeLinkOffset);
+  STATIC_ASSERT(kTypeFeedbackInfoOffset + kPointerSize ==
+                kNextCodeLinkOffset);
 
   static bool IsValidSlot(HeapObject* obj, int offset) {
     // Slots in code can't be invalid because we never trim code objects.
@@ -437,6 +440,8 @@
         return ReturnType();
       case kConsStringTag:
         return Op::template apply<ConsString::BodyDescriptor>(p1, p2, p3);
+      case kThinStringTag:
+        return Op::template apply<ThinString::BodyDescriptor>(p1, p2, p3);
       case kSlicedStringTag:
         return Op::template apply<SlicedString::BodyDescriptor>(p1, p2, p3);
       case kExternalStringTag:
@@ -462,6 +467,8 @@
     case JS_OBJECT_TYPE:
     case JS_ERROR_TYPE:
     case JS_ARGUMENTS_TYPE:
+    case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+    case JS_PROMISE_CAPABILITY_TYPE:
     case JS_PROMISE_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
@@ -469,7 +476,6 @@
     case JS_DATE_TYPE:
     case JS_ARRAY_TYPE:
     case JS_MODULE_NAMESPACE_TYPE:
-    case JS_FIXED_ARRAY_ITERATOR_TYPE:
     case JS_TYPED_ARRAY_TYPE:
     case JS_DATA_VIEW_TYPE:
     case JS_SET_TYPE:
@@ -552,7 +558,6 @@
 
     case HEAP_NUMBER_TYPE:
     case MUTABLE_HEAP_NUMBER_TYPE:
-    case SIMD128_VALUE_TYPE:
     case FILLER_TYPE:
     case BYTE_ARRAY_TYPE:
     case FREE_SPACE_TYPE:
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 2580bfb..8a2b9eb 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -4,13 +4,19 @@
 
 #include "src/objects.h"
 
+#include "src/assembler-inl.h"
 #include "src/bootstrapper.h"
 #include "src/disasm.h"
 #include "src/disassembler.h"
 #include "src/field-type.h"
+#include "src/layout-descriptor.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
+#include "src/objects/module-info.h"
 #include "src/ostreams.h"
 #include "src/regexp/jsregexp.h"
+#include "src/transitions.h"
 
 namespace v8 {
 namespace internal {
@@ -44,6 +50,8 @@
 
 
 void HeapObject::HeapObjectVerify() {
+  VerifyHeapPointer(map());
+  CHECK(map()->IsMap());
   InstanceType instance_type = map()->instance_type();
 
   if (instance_type < FIRST_NONSTRING_TYPE) {
@@ -62,9 +70,6 @@
     case MUTABLE_HEAP_NUMBER_TYPE:
       HeapNumber::cast(this)->HeapNumberVerify();
       break;
-    case SIMD128_VALUE_TYPE:
-      Simd128Value::cast(this)->Simd128ValueVerify();
-      break;
     case FIXED_ARRAY_TYPE:
       FixedArray::cast(this)->FixedArrayVerify();
       break;
@@ -104,7 +109,6 @@
     case JS_API_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
-    case JS_PROMISE_TYPE:
       JSObject::cast(this)->JSObjectVerify();
       break;
     case JS_GENERATOR_OBJECT_TYPE:
@@ -143,9 +147,6 @@
     case JS_MODULE_NAMESPACE_TYPE:
       JSModuleNamespace::cast(this)->JSModuleNamespaceVerify();
       break;
-    case JS_FIXED_ARRAY_ITERATOR_TYPE:
-      JSFixedArrayIterator::cast(this)->JSFixedArrayIteratorVerify();
-      break;
     case JS_SET_TYPE:
       JSSet::cast(this)->JSSetVerify();
       break;
@@ -199,12 +200,21 @@
     case JS_STRING_ITERATOR_TYPE:
       JSStringIterator::cast(this)->JSStringIteratorVerify();
       break;
+    case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
+      JSAsyncFromSyncIterator::cast(this)->JSAsyncFromSyncIteratorVerify();
+      break;
     case JS_WEAK_MAP_TYPE:
       JSWeakMap::cast(this)->JSWeakMapVerify();
       break;
     case JS_WEAK_SET_TYPE:
       JSWeakSet::cast(this)->JSWeakSetVerify();
       break;
+    case JS_PROMISE_CAPABILITY_TYPE:
+      JSPromiseCapability::cast(this)->JSPromiseCapabilityVerify();
+      break;
+    case JS_PROMISE_TYPE:
+      JSPromise::cast(this)->JSPromiseVerify();
+      break;
     case JS_REGEXP_TYPE:
       JSRegExp::cast(this)->JSRegExpVerify();
       break;
@@ -265,10 +275,6 @@
   CHECK(IsHeapNumber() || IsMutableHeapNumber());
 }
 
-
-void Simd128Value::Simd128ValueVerify() { CHECK(IsSimd128Value()); }
-
-
 void ByteArray::ByteArrayVerify() {
   CHECK(IsByteArray());
 }
@@ -337,7 +343,9 @@
     DescriptorArray* descriptors = map()->instance_descriptors();
     Isolate* isolate = GetIsolate();
     for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
-      if (descriptors->GetDetails(i).type() == DATA) {
+      PropertyDetails details = descriptors->GetDetails(i);
+      if (details.location() == kField) {
+        DCHECK_EQ(kData, details.kind());
         Representation r = descriptors->GetDetails(i).representation();
         FieldIndex index = FieldIndex::ForDescriptor(map(), i);
         if (IsUnboxedDoubleField(index)) {
@@ -471,7 +479,7 @@
   VerifyObjectField(kFunctionOffset);
   VerifyObjectField(kContextOffset);
   VerifyObjectField(kReceiverOffset);
-  VerifyObjectField(kOperandStackOffset);
+  VerifyObjectField(kRegisterFileOffset);
   VerifyObjectField(kContinuationOffset);
 }
 
@@ -546,6 +554,7 @@
 void String::StringVerify() {
   CHECK(IsString());
   CHECK(length() >= 0 && length() <= Smi::kMaxValue);
+  CHECK_IMPLIES(length() == 0, this == GetHeap()->empty_string());
   if (IsInternalizedString()) {
     CHECK(!GetHeap()->InNewSpace(this));
   }
@@ -553,6 +562,8 @@
     ConsString::cast(this)->ConsStringVerify();
   } else if (IsSlicedString()) {
     SlicedString::cast(this)->SlicedStringVerify();
+  } else if (IsThinString()) {
+    ThinString::cast(this)->ThinStringVerify();
   }
 }
 
@@ -564,12 +575,17 @@
   CHECK(this->length() >= ConsString::kMinLength);
   CHECK(this->length() == this->first()->length() + this->second()->length());
   if (this->IsFlat()) {
-    // A flat cons can only be created by String::SlowTryFlatten.
-    // Afterwards, the first part may be externalized.
-    CHECK(this->first()->IsSeqString() || this->first()->IsExternalString());
+    // A flat cons can only be created by String::SlowFlatten.
+    // Afterwards, the first part may be externalized or internalized.
+    CHECK(this->first()->IsSeqString() || this->first()->IsExternalString() ||
+          this->first()->IsThinString());
   }
 }
 
+void ThinString::ThinStringVerify() {
+  CHECK(this->actual()->IsInternalizedString());
+  CHECK(this->actual()->IsSeqString() || this->actual()->IsExternalString());
+}
 
 void SlicedString::SlicedStringVerify() {
   CHECK(!this->parent()->IsConsString());
@@ -604,21 +620,29 @@
 
 void SharedFunctionInfo::SharedFunctionInfoVerify() {
   CHECK(IsSharedFunctionInfo());
-  VerifyObjectField(kNameOffset);
+
   VerifyObjectField(kCodeOffset);
-  VerifyObjectField(kOptimizedCodeMapOffset);
+  VerifyObjectField(kDebugInfoOffset);
   VerifyObjectField(kFeedbackMetadataOffset);
-  VerifyObjectField(kScopeInfoOffset);
-  VerifyObjectField(kOuterScopeInfoOffset);
+  VerifyObjectField(kFunctionDataOffset);
+  VerifyObjectField(kFunctionIdentifierOffset);
   VerifyObjectField(kInstanceClassNameOffset);
+  VerifyObjectField(kNameOffset);
+  VerifyObjectField(kOptimizedCodeMapOffset);
+  VerifyObjectField(kOuterScopeInfoOffset);
+  VerifyObjectField(kScopeInfoOffset);
+  VerifyObjectField(kScriptOffset);
+
   CHECK(function_data()->IsUndefined(GetIsolate()) || IsApiFunction() ||
         HasBytecodeArray() || HasAsmWasmData());
-  VerifyObjectField(kFunctionDataOffset);
-  VerifyObjectField(kScriptOffset);
-  VerifyObjectField(kDebugInfoOffset);
+
   CHECK(function_identifier()->IsUndefined(GetIsolate()) ||
         HasBuiltinFunctionId() || HasInferredName());
-  VerifyObjectField(kFunctionIdentifierOffset);
+
+  if (scope_info()->length() > 0) {
+    CHECK(kind() == scope_info()->function_kind());
+    CHECK_EQ(kind() == kModule, scope_info()->scope_type() == MODULE_SCOPE);
+  }
 }
 
 
@@ -869,6 +893,12 @@
   CHECK_LE(index(), String::kMaxLength);
 }
 
+void JSAsyncFromSyncIterator::JSAsyncFromSyncIteratorVerify() {
+  CHECK(IsJSAsyncFromSyncIterator());
+  JSObjectVerify();
+  VerifyHeapPointer(sync_iterator());
+}
+
 void JSWeakSet::JSWeakSetVerify() {
   CHECK(IsJSWeakSet());
   JSObjectVerify();
@@ -876,6 +906,36 @@
   CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
 }
 
+void JSPromiseCapability::JSPromiseCapabilityVerify() {
+  CHECK(IsJSPromiseCapability());
+  JSObjectVerify();
+  VerifyPointer(promise());
+  VerifyPointer(resolve());
+  VerifyPointer(reject());
+}
+
+void JSPromise::JSPromiseVerify() {
+  CHECK(IsJSPromise());
+  JSObjectVerify();
+  Isolate* isolate = GetIsolate();
+  VerifySmiField(kStatusOffset);
+  CHECK(result()->IsUndefined(isolate) || result()->IsObject());
+  CHECK(deferred_promise()->IsUndefined(isolate) ||
+        deferred_promise()->IsJSReceiver() ||
+        deferred_promise()->IsFixedArray());
+  CHECK(deferred_on_resolve()->IsUndefined(isolate) ||
+        deferred_on_resolve()->IsCallable() ||
+        deferred_on_resolve()->IsFixedArray());
+  CHECK(deferred_on_reject()->IsUndefined(isolate) ||
+        deferred_on_reject()->IsCallable() ||
+        deferred_on_reject()->IsFixedArray());
+  CHECK(fulfill_reactions()->IsUndefined(isolate) ||
+        fulfill_reactions()->IsCallable() || fulfill_reactions()->IsSymbol() ||
+        fulfill_reactions()->IsFixedArray());
+  CHECK(reject_reactions()->IsUndefined(isolate) ||
+        reject_reactions()->IsSymbol() || reject_reactions()->IsCallable() ||
+        reject_reactions()->IsFixedArray());
+}
 
 void JSRegExp::JSRegExpVerify() {
   JSObjectVerify();
@@ -982,20 +1042,12 @@
 }
 
 
-void Box::BoxVerify() {
-  CHECK(IsBox());
-  value()->ObjectVerify();
-}
-
 void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoVerify() {
-  Isolate* isolate = GetIsolate();
   CHECK(IsPromiseResolveThenableJobInfo());
   CHECK(thenable()->IsJSReceiver());
   CHECK(then()->IsJSReceiver());
   CHECK(resolve()->IsJSFunction());
   CHECK(reject()->IsJSFunction());
-  CHECK(debug_id()->IsNumber() || debug_id()->IsUndefined(isolate));
-  CHECK(debug_name()->IsString() || debug_name()->IsUndefined(isolate));
   CHECK(context()->IsContext());
 }
 
@@ -1003,10 +1055,17 @@
   Isolate* isolate = GetIsolate();
   CHECK(IsPromiseReactionJobInfo());
   CHECK(value()->IsObject());
-  CHECK(tasks()->IsJSArray() || tasks()->IsCallable());
-  CHECK(deferred()->IsJSObject() || deferred()->IsUndefined(isolate));
-  CHECK(debug_id()->IsNumber() || debug_id()->IsUndefined(isolate));
-  CHECK(debug_name()->IsString() || debug_name()->IsUndefined(isolate));
+  CHECK(tasks()->IsFixedArray() || tasks()->IsCallable() ||
+        tasks()->IsSymbol());
+  CHECK(deferred_promise()->IsUndefined(isolate) ||
+        deferred_promise()->IsJSReceiver() ||
+        deferred_promise()->IsFixedArray());
+  CHECK(deferred_on_resolve()->IsUndefined(isolate) ||
+        deferred_on_resolve()->IsCallable() ||
+        deferred_on_resolve()->IsFixedArray());
+  CHECK(deferred_on_reject()->IsUndefined(isolate) ||
+        deferred_on_reject()->IsCallable() ||
+        deferred_on_reject()->IsFixedArray());
   CHECK(context()->IsContext());
 }
 
@@ -1015,16 +1074,6 @@
   VerifyPointer(module());
 }
 
-void JSFixedArrayIterator::JSFixedArrayIteratorVerify() {
-  CHECK(IsJSFixedArrayIterator());
-
-  VerifyPointer(array());
-  VerifyPointer(initial_next());
-  VerifySmiField(kIndexOffset);
-
-  CHECK_LE(index(), array()->length());
-}
-
 void ModuleInfoEntry::ModuleInfoEntryVerify() {
   Isolate* isolate = GetIsolate();
   CHECK(IsModuleInfoEntry());
@@ -1078,6 +1127,12 @@
   CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
 }
 
+void Tuple2::Tuple2Verify() {
+  CHECK(IsTuple2());
+  VerifyObjectField(kValue1Offset);
+  VerifyObjectField(kValue2Offset);
+}
+
 void Tuple3::Tuple3Verify() {
   CHECK(IsTuple3());
   VerifyObjectField(kValue1Offset);
@@ -1091,6 +1146,11 @@
   VerifyObjectField(kExtensionOffset);
 }
 
+void ConstantElementsPair::ConstantElementsPairVerify() {
+  CHECK(IsConstantElementsPair());
+  VerifySmiField(kElementsKindOffset);
+  VerifyObjectField(kConstantValuesOffset);
+}
 
 void AccessorInfo::AccessorInfoVerify() {
   CHECK(IsAccessorInfo());
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 1a8274c..3aa26c5 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -18,6 +18,7 @@
 #include "src/contexts-inl.h"
 #include "src/conversions-inl.h"
 #include "src/factory.h"
+#include "src/feedback-vector-inl.h"
 #include "src/field-index-inl.h"
 #include "src/field-type.h"
 #include "src/handles-inl.h"
@@ -30,10 +31,13 @@
 #include "src/lookup-cache-inl.h"
 #include "src/lookup.h"
 #include "src/objects.h"
+#include "src/objects/literal-objects.h"
+#include "src/objects/module-info.h"
+#include "src/objects/regexp-match-info.h"
+#include "src/objects/scope-info.h"
 #include "src/property.h"
 #include "src/prototype.h"
 #include "src/transitions-inl.h"
-#include "src/type-feedback-vector-inl.h"
 #include "src/v8memory.h"
 
 namespace v8 {
@@ -59,36 +63,23 @@
   return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
 }
 
-#define TYPE_CHECKER(type, instancetype)           \
-  bool HeapObject::Is##type() const {              \
-    return map()->instance_type() == instancetype; \
-  }
-
-#define CAST_ACCESSOR(type)                       \
-  type* type::cast(Object* object) {              \
-    SLOW_DCHECK(object->Is##type());              \
-    return reinterpret_cast<type*>(object);       \
-  }                                               \
-  const type* type::cast(const Object* object) {  \
-    SLOW_DCHECK(object->Is##type());              \
-    return reinterpret_cast<const type*>(object); \
-  }
-
-
 #define INT_ACCESSORS(holder, name, offset)                                   \
   int holder::name() const { return READ_INT_FIELD(this, offset); }           \
   void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
 
-#define ACCESSORS_CHECKED(holder, name, type, offset, condition)     \
-  type* holder::name() const {                                       \
-    DCHECK(condition);                                               \
-    return type::cast(READ_FIELD(this, offset));                     \
-  }                                                                  \
-  void holder::set_##name(type* value, WriteBarrierMode mode) {      \
-    DCHECK(condition);                                               \
-    WRITE_FIELD(this, offset, value);                                \
-    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
+                           set_condition)                             \
+  type* holder::name() const {                                        \
+    DCHECK(get_condition);                                            \
+    return type::cast(READ_FIELD(this, offset));                      \
+  }                                                                   \
+  void holder::set_##name(type* value, WriteBarrierMode mode) {       \
+    DCHECK(set_condition);                                            \
+    WRITE_FIELD(this, offset, value);                                 \
+    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);  \
   }
+#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
+  ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
 
 #define ACCESSORS(holder, name, type, offset) \
   ACCESSORS_CHECKED(holder, name, type, offset, true)
@@ -140,6 +131,62 @@
     set_##field(BooleanBit::set(field(), offset, value));  \
   }
 
+#define TYPE_CHECKER(type, instancetype)           \
+  bool HeapObject::Is##type() const {              \
+    return map()->instance_type() == instancetype; \
+  }
+
+TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
+TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
+TYPE_CHECKER(Cell, CELL_TYPE)
+TYPE_CHECKER(Code, CODE_TYPE)
+TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+TYPE_CHECKER(Foreign, FOREIGN_TYPE)
+TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
+TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+TYPE_CHECKER(JSArgumentsObject, JS_ARGUMENTS_TYPE)
+TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
+TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
+TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
+TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
+TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
+TYPE_CHECKER(JSDate, JS_DATE_TYPE)
+TYPE_CHECKER(JSError, JS_ERROR_TYPE)
+TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
+TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
+TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
+TYPE_CHECKER(JSMap, JS_MAP_TYPE)
+TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
+TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
+TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
+TYPE_CHECKER(JSPromiseCapability, JS_PROMISE_CAPABILITY_TYPE)
+TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
+TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
+TYPE_CHECKER(JSSet, JS_SET_TYPE)
+TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
+TYPE_CHECKER(JSAsyncFromSyncIterator, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE)
+TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
+TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
+TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
+TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
+TYPE_CHECKER(Map, MAP_TYPE)
+TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
+TYPE_CHECKER(Oddball, ODDBALL_TYPE)
+TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
+TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(Symbol, SYMBOL_TYPE)
+TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
+TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
+TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
+
+#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
+  TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
+TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
+#undef TYPED_ARRAY_TYPE_CHECKER
+
+#undef TYPE_CHECKER
+
 bool HeapObject::IsFixedArrayBase() const {
   return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
 }
@@ -150,23 +197,13 @@
          instance_type == TRANSITION_ARRAY_TYPE;
 }
 
+bool HeapObject::IsBoilerplateDescription() const { return IsFixedArray(); }
 
 // External objects are not extensible, so the map check is enough.
 bool HeapObject::IsExternal() const {
   return map() == GetHeap()->external_map();
 }
 
-
-TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
-TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
-TYPE_CHECKER(Symbol, SYMBOL_TYPE)
-TYPE_CHECKER(Simd128Value, SIMD128_VALUE_TYPE)
-
-#define SIMD128_TYPE_CHECKER(TYPE, Type, type, lane_count, lane_type) \
-  bool HeapObject::Is##Type() const { return map() == GetHeap()->type##_map(); }
-SIMD128_TYPES(SIMD128_TYPE_CHECKER)
-#undef SIMD128_TYPE_CHECKER
-
 #define IS_TYPE_FUNCTION_DEF(type_)                               \
   bool Object::Is##type_() const {                                \
     return IsHeapObject() && HeapObject::cast(this)->Is##type_(); \
@@ -184,6 +221,16 @@
 ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
 #undef IS_TYPE_FUNCTION_DEF
 
+bool Object::IsNullOrUndefined(Isolate* isolate) const {
+  Heap* heap = isolate->heap();
+  return this == heap->null_value() || this == heap->undefined_value();
+}
+
+bool HeapObject::IsNullOrUndefined(Isolate* isolate) const {
+  Heap* heap = isolate->heap();
+  return this == heap->null_value() || this == heap->undefined_value();
+}
+
 bool HeapObject::IsString() const {
   return map()->instance_type() < FIRST_NONSTRING_TYPE;
 }
@@ -227,6 +274,11 @@
   return StringShape(String::cast(this)).IsCons();
 }
 
+bool HeapObject::IsThinString() const {
+  if (!IsString()) return false;
+  return StringShape(String::cast(this)).IsThin();
+}
+
 bool HeapObject::IsSlicedString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsSliced();
@@ -266,412 +318,13 @@
          String::cast(this)->IsTwoByteRepresentation();
 }
 
-bool Object::HasValidElements() {
-  // Dictionary is covered under FixedArray.
-  return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
-}
-
-
-bool Object::KeyEquals(Object* second) {
-  Object* first = this;
-  if (second->IsNumber()) {
-    if (first->IsNumber()) return first->Number() == second->Number();
-    Object* temp = first;
-    first = second;
-    second = temp;
-  }
-  if (first->IsNumber()) {
-    DCHECK_LE(0, first->Number());
-    uint32_t expected = static_cast<uint32_t>(first->Number());
-    uint32_t index;
-    return Name::cast(second)->AsArrayIndex(&index) && index == expected;
-  }
-  return Name::cast(first)->Equals(Name::cast(second));
-}
-
-
-bool Object::FilterKey(PropertyFilter filter) {
-  if (IsSymbol()) {
-    if (filter & SKIP_SYMBOLS) return true;
-    if (Symbol::cast(this)->is_private()) return true;
-  } else {
-    if (filter & SKIP_STRINGS) return true;
-  }
-  return false;
-}
-
-
-Handle<Object> Object::NewStorageFor(Isolate* isolate,
-                                     Handle<Object> object,
-                                     Representation representation) {
-  if (representation.IsSmi() && object->IsUninitialized(isolate)) {
-    return handle(Smi::kZero, isolate);
-  }
-  if (!representation.IsDouble()) return object;
-  double value;
-  if (object->IsUninitialized(isolate)) {
-    value = 0;
-  } else if (object->IsMutableHeapNumber()) {
-    value = HeapNumber::cast(*object)->value();
-  } else {
-    value = object->Number();
-  }
-  return isolate->factory()->NewHeapNumber(value, MUTABLE);
-}
-
-
-Handle<Object> Object::WrapForRead(Isolate* isolate,
-                                   Handle<Object> object,
-                                   Representation representation) {
-  DCHECK(!object->IsUninitialized(isolate));
-  if (!representation.IsDouble()) {
-    DCHECK(object->FitsRepresentation(representation));
-    return object;
-  }
-  return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value());
-}
-
-
-StringShape::StringShape(const String* str)
-  : type_(str->map()->instance_type()) {
-  set_valid();
-  DCHECK((type_ & kIsNotStringMask) == kStringTag);
-}
-
-
-StringShape::StringShape(Map* map)
-  : type_(map->instance_type()) {
-  set_valid();
-  DCHECK((type_ & kIsNotStringMask) == kStringTag);
-}
-
-
-StringShape::StringShape(InstanceType t)
-  : type_(static_cast<uint32_t>(t)) {
-  set_valid();
-  DCHECK((type_ & kIsNotStringMask) == kStringTag);
-}
-
-
-bool StringShape::IsInternalized() {
-  DCHECK(valid());
-  STATIC_ASSERT(kNotInternalizedTag != 0);
-  return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
-      (kStringTag | kInternalizedTag);
-}
-
-
-bool String::IsOneByteRepresentation() const {
-  uint32_t type = map()->instance_type();
-  return (type & kStringEncodingMask) == kOneByteStringTag;
-}
-
-
-bool String::IsTwoByteRepresentation() const {
-  uint32_t type = map()->instance_type();
-  return (type & kStringEncodingMask) == kTwoByteStringTag;
-}
-
-
-bool String::IsOneByteRepresentationUnderneath() {
-  uint32_t type = map()->instance_type();
-  STATIC_ASSERT(kIsIndirectStringTag != 0);
-  STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
-  DCHECK(IsFlat());
-  switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
-    case kOneByteStringTag:
-      return true;
-    case kTwoByteStringTag:
-      return false;
-    default:  // Cons or sliced string.  Need to go deeper.
-      return GetUnderlying()->IsOneByteRepresentation();
-  }
-}
-
-
-bool String::IsTwoByteRepresentationUnderneath() {
-  uint32_t type = map()->instance_type();
-  STATIC_ASSERT(kIsIndirectStringTag != 0);
-  STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
-  DCHECK(IsFlat());
-  switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
-    case kOneByteStringTag:
-      return false;
-    case kTwoByteStringTag:
-      return true;
-    default:  // Cons or sliced string.  Need to go deeper.
-      return GetUnderlying()->IsTwoByteRepresentation();
-  }
-}
-
-
-bool String::HasOnlyOneByteChars() {
-  uint32_t type = map()->instance_type();
-  return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
-         IsOneByteRepresentation();
-}
-
-
-bool StringShape::IsCons() {
-  return (type_ & kStringRepresentationMask) == kConsStringTag;
-}
-
-
-bool StringShape::IsSliced() {
-  return (type_ & kStringRepresentationMask) == kSlicedStringTag;
-}
-
-
-bool StringShape::IsIndirect() {
-  return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
-}
-
-
-bool StringShape::IsExternal() {
-  return (type_ & kStringRepresentationMask) == kExternalStringTag;
-}
-
-
-bool StringShape::IsSequential() {
-  return (type_ & kStringRepresentationMask) == kSeqStringTag;
-}
-
-
-StringRepresentationTag StringShape::representation_tag() {
-  uint32_t tag = (type_ & kStringRepresentationMask);
-  return static_cast<StringRepresentationTag>(tag);
-}
-
-
-uint32_t StringShape::encoding_tag() {
-  return type_ & kStringEncodingMask;
-}
-
-
-uint32_t StringShape::full_representation_tag() {
-  return (type_ & (kStringRepresentationMask | kStringEncodingMask));
-}
-
-
-STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
-             Internals::kFullStringRepresentationMask);
-
-STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
-             Internals::kStringEncodingMask);
-
-
-bool StringShape::IsSequentialOneByte() {
-  return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
-}
-
-
-bool StringShape::IsSequentialTwoByte() {
-  return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
-}
-
-
-bool StringShape::IsExternalOneByte() {
-  return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
-}
-
-
-STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
-              Internals::kExternalOneByteRepresentationTag);
-
-STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
-
-
-bool StringShape::IsExternalTwoByte() {
-  return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
-}
-
-
-STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
-             Internals::kExternalTwoByteRepresentationTag);
-
-STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
-
-
-uc32 FlatStringReader::Get(int index) {
-  if (is_one_byte_) {
-    return Get<uint8_t>(index);
-  } else {
-    return Get<uc16>(index);
-  }
-}
-
-
-template <typename Char>
-Char FlatStringReader::Get(int index) {
-  DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
-  DCHECK(0 <= index && index <= length_);
-  if (sizeof(Char) == 1) {
-    return static_cast<Char>(static_cast<const uint8_t*>(start_)[index]);
-  } else {
-    return static_cast<Char>(static_cast<const uc16*>(start_)[index]);
-  }
-}
-
-
-Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) {
-  return key->AsHandle(isolate);
-}
-
-
-Handle<Object> CompilationCacheShape::AsHandle(Isolate* isolate,
-                                               HashTableKey* key) {
-  return key->AsHandle(isolate);
-}
-
-
-Handle<Object> CodeCacheHashTableShape::AsHandle(Isolate* isolate,
-                                                 HashTableKey* key) {
-  return key->AsHandle(isolate);
-}
-
-template <typename Char>
-class SequentialStringKey : public HashTableKey {
- public:
-  explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
-      : string_(string), hash_field_(0), seed_(seed) { }
-
-  uint32_t Hash() override {
-    hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
-                                                           string_.length(),
-                                                           seed_);
-
-    uint32_t result = hash_field_ >> String::kHashShift;
-    DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
-    return result;
-  }
-
-
-  uint32_t HashForObject(Object* other) override {
-    return String::cast(other)->Hash();
-  }
-
-  Vector<const Char> string_;
-  uint32_t hash_field_;
-  uint32_t seed_;
-};
-
-
-class OneByteStringKey : public SequentialStringKey<uint8_t> {
- public:
-  OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
-      : SequentialStringKey<uint8_t>(str, seed) { }
-
-  bool IsMatch(Object* string) override {
-    return String::cast(string)->IsOneByteEqualTo(string_);
-  }
-
-  Handle<Object> AsHandle(Isolate* isolate) override;
-};
-
-
-class SeqOneByteSubStringKey : public HashTableKey {
- public:
-  SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
-      : string_(string), from_(from), length_(length) {
-    DCHECK(string_->IsSeqOneByteString());
-  }
-
-  uint32_t Hash() override {
-    DCHECK(length_ >= 0);
-    DCHECK(from_ + length_ <= string_->length());
-    const uint8_t* chars = string_->GetChars() + from_;
-    hash_field_ = StringHasher::HashSequentialString(
-        chars, length_, string_->GetHeap()->HashSeed());
-    uint32_t result = hash_field_ >> String::kHashShift;
-    DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
-    return result;
-  }
-
-  uint32_t HashForObject(Object* other) override {
-    return String::cast(other)->Hash();
-  }
-
-  bool IsMatch(Object* string) override;
-  Handle<Object> AsHandle(Isolate* isolate) override;
-
- private:
-  Handle<SeqOneByteString> string_;
-  int from_;
-  int length_;
-  uint32_t hash_field_;
-};
-
-
-class TwoByteStringKey : public SequentialStringKey<uc16> {
- public:
-  explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
-      : SequentialStringKey<uc16>(str, seed) { }
-
-  bool IsMatch(Object* string) override {
-    return String::cast(string)->IsTwoByteEqualTo(string_);
-  }
-
-  Handle<Object> AsHandle(Isolate* isolate) override;
-};
-
-
-// Utf8StringKey carries a vector of chars as key.
-class Utf8StringKey : public HashTableKey {
- public:
-  explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
-      : string_(string), hash_field_(0), seed_(seed) { }
-
-  bool IsMatch(Object* string) override {
-    return String::cast(string)->IsUtf8EqualTo(string_);
-  }
-
-  uint32_t Hash() override {
-    if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
-    hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
-    uint32_t result = hash_field_ >> String::kHashShift;
-    DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
-    return result;
-  }
-
-  uint32_t HashForObject(Object* other) override {
-    return String::cast(other)->Hash();
-  }
-
-  Handle<Object> AsHandle(Isolate* isolate) override {
-    if (hash_field_ == 0) Hash();
-    return isolate->factory()->NewInternalizedStringFromUtf8(
-        string_, chars_, hash_field_);
-  }
-
-  Vector<const char> string_;
-  uint32_t hash_field_;
-  int chars_;  // Caches the number of characters when computing the hash code.
-  uint32_t seed_;
-};
-
-
-bool Object::IsNumber() const {
-  return IsSmi() || IsHeapNumber();
-}
-
-
-TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
-TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
-TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
+bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
 
 bool HeapObject::IsFiller() const {
   InstanceType instance_type = map()->instance_type();
   return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
 }
 
-
-
-#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size)               \
-  TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
-
-TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
-#undef TYPED_ARRAY_TYPE_CHECKER
-
 bool HeapObject::IsFixedTypedArrayBase() const {
   InstanceType instance_type = map()->instance_type();
   return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
@@ -696,20 +349,6 @@
           instance_type <= LAST_ARRAY_ITERATOR_TYPE);
 }
 
-TYPE_CHECKER(JSSet, JS_SET_TYPE)
-TYPE_CHECKER(JSMap, JS_MAP_TYPE)
-TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
-TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
-TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
-TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
-TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
-TYPE_CHECKER(Map, MAP_TYPE)
-TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
-TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
-TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
-TYPE_CHECKER(JSFixedArrayIterator, JS_FIXED_ARRAY_ITERATOR_TYPE)
-
 bool HeapObject::IsJSWeakCollection() const {
   return IsJSWeakMap() || IsJSWeakSet();
 }
@@ -728,11 +367,11 @@
   return IsSmi() || IsFixedTypedArrayBase();
 }
 
-bool HeapObject::IsTypeFeedbackVector() const { return IsFixedArray(); }
+bool HeapObject::IsFeedbackVector() const {
+  return map() == GetHeap()->feedback_vector_map();
+}
 
-bool HeapObject::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
-
-bool HeapObject::IsLiteralsArray() const { return IsFixedArray(); }
+bool HeapObject::IsFeedbackMetadata() const { return IsFixedArray(); }
 
 bool HeapObject::IsDeoptimizationInputData() const {
   // Must be a fixed array.
@@ -787,7 +426,7 @@
       map == heap->function_context_map() || map == heap->catch_context_map() ||
       map == heap->with_context_map() || map == heap->native_context_map() ||
       map == heap->block_context_map() || map == heap->module_context_map() ||
-      map == heap->script_context_map() ||
+      map == heap->eval_context_map() || map == heap->script_context_map() ||
       map == heap->debug_evaluate_context_map());
 }
 
@@ -807,28 +446,11 @@
   return map() == GetHeap()->module_info_map();
 }
 
-TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
-TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
-
-
-template <> inline bool Is<JSFunction>(Object* obj) {
+template <>
+inline bool Is<JSFunction>(Object* obj) {
   return obj->IsJSFunction();
 }
 
-
-TYPE_CHECKER(Code, CODE_TYPE)
-TYPE_CHECKER(Oddball, ODDBALL_TYPE)
-TYPE_CHECKER(Cell, CELL_TYPE)
-TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
-TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
-TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
-TYPE_CHECKER(JSDate, JS_DATE_TYPE)
-TYPE_CHECKER(JSError, JS_ERROR_TYPE)
-TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
-TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
-TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
-TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
-
 bool HeapObject::IsAbstractCode() const {
   return IsBytecodeArray() || IsCode();
 }
@@ -837,29 +459,17 @@
   return IsJSValue() && JSValue::cast(this)->value()->IsString();
 }
 
-
-TYPE_CHECKER(Foreign, FOREIGN_TYPE)
-
 bool HeapObject::IsBoolean() const {
   return IsOddball() &&
-      ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
+         ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
 }
 
-
-TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
-TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
-TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
-TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
-
 bool HeapObject::IsJSArrayBufferView() const {
   return IsJSDataView() || IsJSTypedArray();
 }
 
-
-TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
-
-
-template <> inline bool Is<JSArray>(Object* obj) {
+template <>
+inline bool Is<JSArray>(Object* obj) {
   return obj->IsJSArray();
 }
 
@@ -873,18 +483,11 @@
   return IsHashTable() && this != GetHeap()->string_table();
 }
 
-
-bool Object::IsNameDictionary() const {
-  return IsDictionary();
-}
-
+bool Object::IsNameDictionary() const { return IsDictionary(); }
 
 bool Object::IsGlobalDictionary() const { return IsDictionary(); }
 
-
-bool Object::IsSeededNumberDictionary() const {
-  return IsDictionary();
-}
+bool Object::IsSeededNumberDictionary() const { return IsDictionary(); }
 
 bool HeapObject::IsUnseededNumberDictionary() const {
   return map() == GetHeap()->unseeded_number_dictionary_map();
@@ -900,7 +503,6 @@
   return NormalizedMapCache::IsNormalizedMapCache(this);
 }
 
-
 int NormalizedMapCache::GetIndex(Handle<Map> map) {
   return map->Hash() % NormalizedMapCache::kEntries;
 }
@@ -931,16 +533,9 @@
   return map() == GetHeap()->ordered_hash_table_map();
 }
 
+bool Object::IsOrderedHashSet() const { return IsOrderedHashTable(); }
 
-bool Object::IsOrderedHashSet() const {
-  return IsOrderedHashTable();
-}
-
-
-bool Object::IsOrderedHashMap() const {
-  return IsOrderedHashTable();
-}
-
+bool Object::IsOrderedHashMap() const { return IsOrderedHashTable(); }
 
 bool Object::IsPrimitive() const {
   return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap();
@@ -952,9 +547,6 @@
   return result;
 }
 
-
-TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-
 bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); }
 
 bool HeapObject::IsAccessCheckNeeded() const {
@@ -968,10 +560,13 @@
 
 bool HeapObject::IsStruct() const {
   switch (map()->instance_type()) {
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
-  STRUCT_LIST(MAKE_STRUCT_CASE)
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+  case NAME##_TYPE:                        \
+    return true;
+    STRUCT_LIST(MAKE_STRUCT_CASE)
 #undef MAKE_STRUCT_CASE
-    default: return false;
+    default:
+      return false;
   }
 }
 
@@ -992,17 +587,464 @@
              : reinterpret_cast<const HeapNumber*>(this)->value();
 }
 
-
 bool Object::IsNaN() const {
   return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value());
 }
 
-
 bool Object::IsMinusZero() const {
   return this->IsHeapNumber() &&
          i::IsMinusZero(HeapNumber::cast(this)->value());
 }
 
+// ------------------------------------
+// Cast operations
+
+#define CAST_ACCESSOR(type)                       \
+  type* type::cast(Object* object) {              \
+    SLOW_DCHECK(object->Is##type());              \
+    return reinterpret_cast<type*>(object);       \
+  }                                               \
+  const type* type::cast(const Object* object) {  \
+    SLOW_DCHECK(object->Is##type());              \
+    return reinterpret_cast<const type*>(object); \
+  }
+
+CAST_ACCESSOR(AbstractCode)
+CAST_ACCESSOR(ArrayList)
+CAST_ACCESSOR(BoilerplateDescription)
+CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(BytecodeArray)
+CAST_ACCESSOR(Cell)
+CAST_ACCESSOR(Code)
+CAST_ACCESSOR(CodeCacheHashTable)
+CAST_ACCESSOR(CompilationCacheTable)
+CAST_ACCESSOR(ConsString)
+CAST_ACCESSOR(DeoptimizationInputData)
+CAST_ACCESSOR(DeoptimizationOutputData)
+CAST_ACCESSOR(DependentCode)
+CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(ExternalOneByteString)
+CAST_ACCESSOR(ExternalString)
+CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(FixedArrayBase)
+CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(Foreign)
+CAST_ACCESSOR(FrameArray)
+CAST_ACCESSOR(GlobalDictionary)
+CAST_ACCESSOR(HandlerTable)
+CAST_ACCESSOR(HeapObject)
+CAST_ACCESSOR(JSArray)
+CAST_ACCESSOR(JSArrayBuffer)
+CAST_ACCESSOR(JSArrayBufferView)
+CAST_ACCESSOR(JSBoundFunction)
+CAST_ACCESSOR(JSDataView)
+CAST_ACCESSOR(JSDate)
+CAST_ACCESSOR(JSFunction)
+CAST_ACCESSOR(JSGeneratorObject)
+CAST_ACCESSOR(JSGlobalObject)
+CAST_ACCESSOR(JSGlobalProxy)
+CAST_ACCESSOR(JSMap)
+CAST_ACCESSOR(JSMapIterator)
+CAST_ACCESSOR(JSMessageObject)
+CAST_ACCESSOR(JSModuleNamespace)
+CAST_ACCESSOR(JSObject)
+CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(JSReceiver)
+CAST_ACCESSOR(JSRegExp)
+CAST_ACCESSOR(JSPromiseCapability)
+CAST_ACCESSOR(JSPromise)
+CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSAsyncFromSyncIterator)
+CAST_ACCESSOR(JSStringIterator)
+CAST_ACCESSOR(JSArrayIterator)
+CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSValue)
+CAST_ACCESSOR(JSWeakCollection)
+CAST_ACCESSOR(JSWeakMap)
+CAST_ACCESSOR(JSWeakSet)
+CAST_ACCESSOR(LayoutDescriptor)
+CAST_ACCESSOR(Map)
+CAST_ACCESSOR(ModuleInfo)
+CAST_ACCESSOR(Name)
+CAST_ACCESSOR(NameDictionary)
+CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(Object)
+CAST_ACCESSOR(ObjectHashTable)
+CAST_ACCESSOR(ObjectHashSet)
+CAST_ACCESSOR(Oddball)
+CAST_ACCESSOR(OrderedHashMap)
+CAST_ACCESSOR(OrderedHashSet)
+CAST_ACCESSOR(PropertyCell)
+CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(RegExpMatchInfo)
+CAST_ACCESSOR(ScopeInfo)
+CAST_ACCESSOR(SeededNumberDictionary)
+CAST_ACCESSOR(SeqOneByteString)
+CAST_ACCESSOR(SeqString)
+CAST_ACCESSOR(SeqTwoByteString)
+CAST_ACCESSOR(SharedFunctionInfo)
+CAST_ACCESSOR(SlicedString)
+CAST_ACCESSOR(Smi)
+CAST_ACCESSOR(String)
+CAST_ACCESSOR(StringSet)
+CAST_ACCESSOR(StringTable)
+CAST_ACCESSOR(Struct)
+CAST_ACCESSOR(Symbol)
+CAST_ACCESSOR(TemplateInfo)
+CAST_ACCESSOR(ThinString)
+CAST_ACCESSOR(UnseededNumberDictionary)
+CAST_ACCESSOR(WeakCell)
+CAST_ACCESSOR(WeakFixedArray)
+CAST_ACCESSOR(WeakHashTable)
+
+#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
+STRUCT_LIST(MAKE_STRUCT_CAST)
+#undef MAKE_STRUCT_CAST
+
+#undef CAST_ACCESSOR
+
+bool Object::HasValidElements() {
+  // Dictionary is covered under FixedArray.
+  return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
+}
+
+bool Object::KeyEquals(Object* second) {
+  Object* first = this;
+  if (second->IsNumber()) {
+    if (first->IsNumber()) return first->Number() == second->Number();
+    Object* temp = first;
+    first = second;
+    second = temp;
+  }
+  if (first->IsNumber()) {
+    DCHECK_LE(0, first->Number());
+    uint32_t expected = static_cast<uint32_t>(first->Number());
+    uint32_t index;
+    return Name::cast(second)->AsArrayIndex(&index) && index == expected;
+  }
+  return Name::cast(first)->Equals(Name::cast(second));
+}
+
+bool Object::FilterKey(PropertyFilter filter) {
+  if (IsSymbol()) {
+    if (filter & SKIP_SYMBOLS) return true;
+    if (Symbol::cast(this)->is_private()) return true;
+  } else {
+    if (filter & SKIP_STRINGS) return true;
+  }
+  return false;
+}
+
+Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
+                                     Representation representation) {
+  if (!representation.IsDouble()) return object;
+  Handle<HeapNumber> result = isolate->factory()->NewHeapNumber(MUTABLE);
+  if (object->IsUninitialized(isolate)) {
+    result->set_value_as_bits(kHoleNanInt64);
+  } else if (object->IsMutableHeapNumber()) {
+    // Ensure that all bits of the double value are preserved.
+    result->set_value_as_bits(HeapNumber::cast(*object)->value_as_bits());
+  } else {
+    result->set_value(object->Number());
+  }
+  return result;
+}
+
+Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
+                                   Representation representation) {
+  DCHECK(!object->IsUninitialized(isolate));
+  if (!representation.IsDouble()) {
+    DCHECK(object->FitsRepresentation(representation));
+    return object;
+  }
+  return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value());
+}
+
+StringShape::StringShape(const String* str)
+    : type_(str->map()->instance_type()) {
+  set_valid();
+  DCHECK((type_ & kIsNotStringMask) == kStringTag);
+}
+
+StringShape::StringShape(Map* map) : type_(map->instance_type()) {
+  set_valid();
+  DCHECK((type_ & kIsNotStringMask) == kStringTag);
+}
+
+StringShape::StringShape(InstanceType t) : type_(static_cast<uint32_t>(t)) {
+  set_valid();
+  DCHECK((type_ & kIsNotStringMask) == kStringTag);
+}
+
+bool StringShape::IsInternalized() {
+  DCHECK(valid());
+  STATIC_ASSERT(kNotInternalizedTag != 0);
+  return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
+         (kStringTag | kInternalizedTag);
+}
+
+bool String::IsOneByteRepresentation() const {
+  uint32_t type = map()->instance_type();
+  return (type & kStringEncodingMask) == kOneByteStringTag;
+}
+
+bool String::IsTwoByteRepresentation() const {
+  uint32_t type = map()->instance_type();
+  return (type & kStringEncodingMask) == kTwoByteStringTag;
+}
+
+bool String::IsOneByteRepresentationUnderneath() {
+  uint32_t type = map()->instance_type();
+  STATIC_ASSERT(kIsIndirectStringTag != 0);
+  STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
+  DCHECK(IsFlat());
+  switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
+    case kOneByteStringTag:
+      return true;
+    case kTwoByteStringTag:
+      return false;
+    default:  // Cons or sliced string.  Need to go deeper.
+      return GetUnderlying()->IsOneByteRepresentation();
+  }
+}
+
+bool String::IsTwoByteRepresentationUnderneath() {
+  uint32_t type = map()->instance_type();
+  STATIC_ASSERT(kIsIndirectStringTag != 0);
+  STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
+  DCHECK(IsFlat());
+  switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
+    case kOneByteStringTag:
+      return false;
+    case kTwoByteStringTag:
+      return true;
+    default:  // Cons or sliced string.  Need to go deeper.
+      return GetUnderlying()->IsTwoByteRepresentation();
+  }
+}
+
+bool String::HasOnlyOneByteChars() {
+  uint32_t type = map()->instance_type();
+  return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
+         IsOneByteRepresentation();
+}
+
+bool StringShape::IsCons() {
+  return (type_ & kStringRepresentationMask) == kConsStringTag;
+}
+
+bool StringShape::IsThin() {
+  return (type_ & kStringRepresentationMask) == kThinStringTag;
+}
+
+bool StringShape::IsSliced() {
+  return (type_ & kStringRepresentationMask) == kSlicedStringTag;
+}
+
+bool StringShape::IsIndirect() {
+  return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
+}
+
+bool StringShape::IsExternal() {
+  return (type_ & kStringRepresentationMask) == kExternalStringTag;
+}
+
+bool StringShape::IsSequential() {
+  return (type_ & kStringRepresentationMask) == kSeqStringTag;
+}
+
+StringRepresentationTag StringShape::representation_tag() {
+  uint32_t tag = (type_ & kStringRepresentationMask);
+  return static_cast<StringRepresentationTag>(tag);
+}
+
+uint32_t StringShape::encoding_tag() { return type_ & kStringEncodingMask; }
+
+uint32_t StringShape::full_representation_tag() {
+  return (type_ & (kStringRepresentationMask | kStringEncodingMask));
+}
+
+STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
+              Internals::kFullStringRepresentationMask);
+
+STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
+              Internals::kStringEncodingMask);
+
+bool StringShape::IsSequentialOneByte() {
+  return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
+}
+
+bool StringShape::IsSequentialTwoByte() {
+  return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
+}
+
+bool StringShape::IsExternalOneByte() {
+  return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
+}
+
+STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
+              Internals::kExternalOneByteRepresentationTag);
+
+STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
+
+bool StringShape::IsExternalTwoByte() {
+  return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
+}
+
+STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
+              Internals::kExternalTwoByteRepresentationTag);
+
+STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
+
+uc32 FlatStringReader::Get(int index) {
+  if (is_one_byte_) {
+    return Get<uint8_t>(index);
+  } else {
+    return Get<uc16>(index);
+  }
+}
+
+template <typename Char>
+Char FlatStringReader::Get(int index) {
+  DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
+  DCHECK(0 <= index && index <= length_);
+  if (sizeof(Char) == 1) {
+    return static_cast<Char>(static_cast<const uint8_t*>(start_)[index]);
+  } else {
+    return static_cast<Char>(static_cast<const uc16*>(start_)[index]);
+  }
+}
+
+Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) {
+  return key->AsHandle(isolate);
+}
+
+Handle<Object> CompilationCacheShape::AsHandle(Isolate* isolate,
+                                               HashTableKey* key) {
+  return key->AsHandle(isolate);
+}
+
+Handle<Object> CodeCacheHashTableShape::AsHandle(Isolate* isolate,
+                                                 HashTableKey* key) {
+  return key->AsHandle(isolate);
+}
+
+template <typename Char>
+class SequentialStringKey : public HashTableKey {
+ public:
+  explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
+      : string_(string), hash_field_(0), seed_(seed) {}
+
+  uint32_t Hash() override {
+    hash_field_ = StringHasher::HashSequentialString<Char>(
+        string_.start(), string_.length(), seed_);
+
+    uint32_t result = hash_field_ >> String::kHashShift;
+    DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
+    return result;
+  }
+
+  uint32_t HashForObject(Object* other) override {
+    return String::cast(other)->Hash();
+  }
+
+  Vector<const Char> string_;
+  uint32_t hash_field_;
+  uint32_t seed_;
+};
+
+class OneByteStringKey : public SequentialStringKey<uint8_t> {
+ public:
+  OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
+      : SequentialStringKey<uint8_t>(str, seed) {}
+
+  bool IsMatch(Object* string) override {
+    return String::cast(string)->IsOneByteEqualTo(string_);
+  }
+
+  Handle<Object> AsHandle(Isolate* isolate) override;
+};
+
+class SeqOneByteSubStringKey : public HashTableKey {
+ public:
+  SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
+      : string_(string), from_(from), length_(length) {
+    DCHECK(string_->IsSeqOneByteString());
+  }
+
+  uint32_t Hash() override {
+    DCHECK(length_ >= 0);
+    DCHECK(from_ + length_ <= string_->length());
+    const uint8_t* chars = string_->GetChars() + from_;
+    hash_field_ = StringHasher::HashSequentialString(
+        chars, length_, string_->GetHeap()->HashSeed());
+    uint32_t result = hash_field_ >> String::kHashShift;
+    DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
+    return result;
+  }
+
+  uint32_t HashForObject(Object* other) override {
+    return String::cast(other)->Hash();
+  }
+
+  bool IsMatch(Object* string) override;
+  Handle<Object> AsHandle(Isolate* isolate) override;
+
+ private:
+  Handle<SeqOneByteString> string_;
+  int from_;
+  int length_;
+  uint32_t hash_field_;
+};
+
+class TwoByteStringKey : public SequentialStringKey<uc16> {
+ public:
+  explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
+      : SequentialStringKey<uc16>(str, seed) {}
+
+  bool IsMatch(Object* string) override {
+    return String::cast(string)->IsTwoByteEqualTo(string_);
+  }
+
+  Handle<Object> AsHandle(Isolate* isolate) override;
+};
+
+// Utf8StringKey carries a vector of chars as key.
+class Utf8StringKey : public HashTableKey {
+ public:
+  explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
+      : string_(string), hash_field_(0), seed_(seed) {}
+
+  bool IsMatch(Object* string) override {
+    return String::cast(string)->IsUtf8EqualTo(string_);
+  }
+
+  uint32_t Hash() override {
+    if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
+    hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
+    uint32_t result = hash_field_ >> String::kHashShift;
+    DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
+    return result;
+  }
+
+  uint32_t HashForObject(Object* other) override {
+    return String::cast(other)->Hash();
+  }
+
+  Handle<Object> AsHandle(Isolate* isolate) override {
+    if (hash_field_ == 0) Hash();
+    return isolate->factory()->NewInternalizedStringFromUtf8(string_, chars_,
+                                                             hash_field_);
+  }
+
+  Vector<const char> string_;
+  uint32_t hash_field_;
+  int chars_;  // Caches the number of characters when computing the hash code.
+  uint32_t seed_;
+};
 
 Representation Object::OptimalRepresentation() {
   if (!FLAG_track_fields) return Representation::Tagged();
@@ -1051,12 +1093,7 @@
   }
   if (IsHeapNumber()) {
     double num = HeapNumber::cast(this)->value();
-    if (num < 0) return false;
-    uint32_t uint_value = FastD2UI(num);
-    if (FastUI2D(uint_value) == num) {
-      *value = uint_value;
-      return true;
-    }
+    return DoubleToUint32IfEqualToSelf(num, value);
   }
   return false;
 }
@@ -1076,12 +1113,64 @@
 }
 
 // static
+MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
+                                          Handle<Object> value) {
+  if (value->IsSmi() || HeapObject::cast(*value)->IsName()) return value;
+  return ConvertToPropertyKey(isolate, value);
+}
+
+// static
 MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
                                         ToPrimitiveHint hint) {
   if (input->IsPrimitive()) return input;
   return JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input), hint);
 }
 
+// static
+MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
+  if (input->IsNumber()) return input;
+  return ConvertToNumber(HeapObject::cast(*input)->GetIsolate(), input);
+}
+
+// static
+MaybeHandle<Object> Object::ToInteger(Isolate* isolate, Handle<Object> input) {
+  if (input->IsSmi()) return input;
+  return ConvertToInteger(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToInt32(Isolate* isolate, Handle<Object> input) {
+  if (input->IsSmi()) return input;
+  return ConvertToInt32(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToUint32(Isolate* isolate, Handle<Object> input) {
+  if (input->IsSmi()) return handle(Smi::cast(*input)->ToUint32Smi(), isolate);
+  return ConvertToUint32(isolate, input);
+}
+
+// static
+MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
+  if (input->IsString()) return Handle<String>::cast(input);
+  return ConvertToString(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
+  if (input->IsSmi()) {
+    int value = std::max(Smi::cast(*input)->value(), 0);
+    return handle(Smi::FromInt(value), isolate);
+  }
+  return ConvertToLength(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
+                                    MessageTemplate::Template error_index) {
+  if (input->IsSmi() && Smi::cast(*input)->value() >= 0) return input;
+  return ConvertToIndex(isolate, input, error_index);
+}
 
 bool Object::HasSpecificClassOf(String* name) {
   return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
@@ -1381,10 +1470,13 @@
 
 void HeapObject::set_map(Map* value) {
   set_map_word(MapWord::FromMap(value));
-  if (value != NULL) {
+  if (value != nullptr) {
     // TODO(1600) We are passing NULL as a slot because maps can never be on
     // evacuation candidate.
-    value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+    value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
+#ifdef VERIFY_HEAP
+    value->GetHeap()->VerifyObjectLayoutChange(this, value);
+#endif
   }
 }
 
@@ -1396,10 +1488,13 @@
 
 void HeapObject::synchronized_set_map(Map* value) {
   synchronized_set_map_word(MapWord::FromMap(value));
-  if (value != NULL) {
+  if (value != nullptr) {
     // TODO(1600) We are passing NULL as a slot because maps can never be on
     // evacuation candidate.
-    value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
+    value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
+#ifdef VERIFY_HEAP
+    value->GetHeap()->VerifyObjectLayoutChange(this, value);
+#endif
   }
 }
 
@@ -1453,6 +1548,13 @@
   WRITE_DOUBLE_FIELD(this, kValueOffset, value);
 }
 
+uint64_t HeapNumber::value_as_bits() const {
+  return READ_UINT64_FIELD(this, kValueOffset);
+}
+
+void HeapNumber::set_value_as_bits(uint64_t bits) {
+  WRITE_UINT64_FIELD(this, kValueOffset, bits);
+}
 
 int HeapNumber::get_exponent() {
   return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
@@ -1464,110 +1566,6 @@
   return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
 }
 
-
-bool Simd128Value::Equals(Simd128Value* that) {
-  // TODO(bmeurer): This doesn't match the SIMD.js specification, but it seems
-  // to be consistent with what the CompareICStub does, and what is tested in
-  // the current SIMD.js testsuite.
-  if (this == that) return true;
-#define SIMD128_VALUE(TYPE, Type, type, lane_count, lane_type) \
-  if (this->Is##Type()) {                                      \
-    if (!that->Is##Type()) return false;                       \
-    return Type::cast(this)->Equals(Type::cast(that));         \
-  }
-  SIMD128_TYPES(SIMD128_VALUE)
-#undef SIMD128_VALUE
-  return false;
-}
-
-
-// static
-bool Simd128Value::Equals(Handle<Simd128Value> one, Handle<Simd128Value> two) {
-  return one->Equals(*two);
-}
-
-
-#define SIMD128_VALUE_EQUALS(TYPE, Type, type, lane_count, lane_type) \
-  bool Type::Equals(Type* that) {                                     \
-    for (int lane = 0; lane < lane_count; ++lane) {                   \
-      if (this->get_lane(lane) != that->get_lane(lane)) return false; \
-    }                                                                 \
-    return true;                                                      \
-  }
-SIMD128_TYPES(SIMD128_VALUE_EQUALS)
-#undef SIMD128_VALUE_EQUALS
-
-
-#if defined(V8_TARGET_LITTLE_ENDIAN)
-#define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
-  lane_type value =                                                      \
-      READ_##field_type##_FIELD(this, kValueOffset + lane * field_size);
-#elif defined(V8_TARGET_BIG_ENDIAN)
-#define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
-  lane_type value = READ_##field_type##_FIELD(                           \
-      this, kValueOffset + (lane_count - lane - 1) * field_size);
-#else
-#error Unknown byte ordering
-#endif
-
-#if defined(V8_TARGET_LITTLE_ENDIAN)
-#define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
-  WRITE_##field_type##_FIELD(this, kValueOffset + lane * field_size, value);
-#elif defined(V8_TARGET_BIG_ENDIAN)
-#define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
-  WRITE_##field_type##_FIELD(                                         \
-      this, kValueOffset + (lane_count - lane - 1) * field_size, value);
-#else
-#error Unknown byte ordering
-#endif
-
-#define SIMD128_NUMERIC_LANE_FNS(type, lane_type, lane_count, field_type, \
-                                 field_size)                              \
-  lane_type type::get_lane(int lane) const {                              \
-    DCHECK(lane < lane_count && lane >= 0);                               \
-    SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size)      \
-    return value;                                                         \
-  }                                                                       \
-                                                                          \
-  void type::set_lane(int lane, lane_type value) {                        \
-    DCHECK(lane < lane_count && lane >= 0);                               \
-    SIMD128_WRITE_LANE(lane_count, field_type, field_size, value)         \
-  }
-
-SIMD128_NUMERIC_LANE_FNS(Float32x4, float, 4, FLOAT, kFloatSize)
-SIMD128_NUMERIC_LANE_FNS(Int32x4, int32_t, 4, INT32, kInt32Size)
-SIMD128_NUMERIC_LANE_FNS(Uint32x4, uint32_t, 4, UINT32, kInt32Size)
-SIMD128_NUMERIC_LANE_FNS(Int16x8, int16_t, 8, INT16, kShortSize)
-SIMD128_NUMERIC_LANE_FNS(Uint16x8, uint16_t, 8, UINT16, kShortSize)
-SIMD128_NUMERIC_LANE_FNS(Int8x16, int8_t, 16, INT8, kCharSize)
-SIMD128_NUMERIC_LANE_FNS(Uint8x16, uint8_t, 16, UINT8, kCharSize)
-#undef SIMD128_NUMERIC_LANE_FNS
-
-
-#define SIMD128_BOOLEAN_LANE_FNS(type, lane_type, lane_count, field_type, \
-                                 field_size)                              \
-  bool type::get_lane(int lane) const {                                   \
-    DCHECK(lane < lane_count && lane >= 0);                               \
-    SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size)      \
-    DCHECK(value == 0 || value == -1);                                    \
-    return value != 0;                                                    \
-  }                                                                       \
-                                                                          \
-  void type::set_lane(int lane, bool value) {                             \
-    DCHECK(lane < lane_count && lane >= 0);                               \
-    int32_t int_val = value ? -1 : 0;                                     \
-    SIMD128_WRITE_LANE(lane_count, field_type, field_size, int_val)       \
-  }
-
-SIMD128_BOOLEAN_LANE_FNS(Bool32x4, int32_t, 4, INT32, kInt32Size)
-SIMD128_BOOLEAN_LANE_FNS(Bool16x8, int16_t, 8, INT16, kShortSize)
-SIMD128_BOOLEAN_LANE_FNS(Bool8x16, int8_t, 16, INT8, kCharSize)
-#undef SIMD128_BOOLEAN_LANE_FNS
-
-#undef SIMD128_READ_LANE
-#undef SIMD128_WRITE_LANE
-
-
 ACCESSORS(JSReceiver, properties, FixedArray, kPropertiesOffset)
 
 
@@ -1671,6 +1669,11 @@
 }
 
 inline bool AllocationSite::CanTrack(InstanceType type) {
+  if (FLAG_turbo) {
+    // TurboFan doesn't care at all about String pretenuring feedback,
+    // so don't bother even trying to track that.
+    return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE;
+  }
   if (FLAG_allocation_site_pretenuring) {
     return type == JS_ARRAY_TYPE ||
         type == JS_OBJECT_TYPE ||
@@ -2021,7 +2024,7 @@
   // We just have to execute the generational barrier here because we never
   // mark through a weak cell and collect evacuation candidates when we process
   // all weak cells.
-  WriteBarrierMode mode = Marking::IsBlack(ObjectMarking::MarkBitFrom(this))
+  WriteBarrierMode mode = ObjectMarking::IsBlack(this)
                               ? UPDATE_WRITE_BARRIER
                               : UPDATE_WEAK_WRITE_BARRIER;
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
@@ -2093,8 +2096,10 @@
       return JSWeakMap::kSize;
     case JS_WEAK_SET_TYPE:
       return JSWeakSet::kSize;
+    case JS_PROMISE_CAPABILITY_TYPE:
+      return JSPromiseCapability::kSize;
     case JS_PROMISE_TYPE:
-      return JSObject::kHeaderSize;
+      return JSPromise::kSize;
     case JS_REGEXP_TYPE:
       return JSRegExp::kSize;
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -2107,14 +2112,21 @@
       return JSObject::kHeaderSize;
     case JS_STRING_ITERATOR_TYPE:
       return JSStringIterator::kSize;
-    case JS_FIXED_ARRAY_ITERATOR_TYPE:
-      return JSFixedArrayIterator::kHeaderSize;
+    case JS_MODULE_NAMESPACE_TYPE:
+      return JSModuleNamespace::kHeaderSize;
     default:
+      if (type >= FIRST_ARRAY_ITERATOR_TYPE &&
+          type <= LAST_ARRAY_ITERATOR_TYPE) {
+        return JSArrayIterator::kSize;
+      }
       UNREACHABLE();
       return 0;
   }
 }
 
+inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
+  return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
+}
 
 int JSObject::GetInternalFieldCount(Map* map) {
   int instance_size = map->instance_size();
@@ -2195,6 +2207,10 @@
   return READ_DOUBLE_FIELD(this, index.offset());
 }
 
+uint64_t JSObject::RawFastDoublePropertyAsBitsAt(FieldIndex index) {
+  DCHECK(IsUnboxedDoubleField(index));
+  return READ_UINT64_FIELD(this, index.offset());
+}
 
 void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
   if (index.is_inobject()) {
@@ -2206,16 +2222,17 @@
   }
 }
 
-
-void JSObject::RawFastDoublePropertyAtPut(FieldIndex index, double value) {
-  WRITE_DOUBLE_FIELD(this, index.offset(), value);
+void JSObject::RawFastDoublePropertyAsBitsAtPut(FieldIndex index,
+                                                uint64_t bits) {
+  WRITE_UINT64_FIELD(this, index.offset(), bits);
 }
 
-
 void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
   if (IsUnboxedDoubleField(index)) {
     DCHECK(value->IsMutableHeapNumber());
-    RawFastDoublePropertyAtPut(index, HeapNumber::cast(value)->value());
+    // Ensure that all bits of the double value are preserved.
+    RawFastDoublePropertyAsBitsAtPut(index,
+                                     HeapNumber::cast(value)->value_as_bits());
   } else {
     RawFastPropertyAtPut(index, value);
   }
@@ -2223,7 +2240,8 @@
 
 void JSObject::WriteToField(int descriptor, PropertyDetails details,
                             Object* value) {
-  DCHECK(details.type() == DATA);
+  DCHECK_EQ(kField, details.location());
+  DCHECK_EQ(kData, details.kind());
   DisallowHeapAllocation no_gc;
   FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
   if (details.representation().IsDouble()) {
@@ -2231,24 +2249,29 @@
     if (value->IsUninitialized(this->GetIsolate())) {
       return;
     }
+    // Manipulating the signaling NaN used for the hole and uninitialized
+    // double field sentinel in C++, e.g. with bit_cast or value()/set_value(),
+    // will change its value on ia32 (the x87 stack is used to return values
+    // and stores to the stack silently clear the signalling bit).
+    uint64_t bits;
+    if (value->IsSmi()) {
+      bits = bit_cast<uint64_t>(static_cast<double>(Smi::cast(value)->value()));
+    } else {
+      DCHECK(value->IsHeapNumber());
+      bits = HeapNumber::cast(value)->value_as_bits();
+    }
     if (IsUnboxedDoubleField(index)) {
-      RawFastDoublePropertyAtPut(index, value->Number());
+      RawFastDoublePropertyAsBitsAtPut(index, bits);
     } else {
       HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
       DCHECK(box->IsMutableHeapNumber());
-      box->set_value(value->Number());
+      box->set_value_as_bits(bits);
     }
   } else {
     RawFastPropertyAtPut(index, value);
   }
 }
 
-void JSObject::WriteToField(int descriptor, Object* value) {
-  DescriptorArray* desc = map()->instance_descriptors();
-  PropertyDetails details = desc->GetDetails(descriptor);
-  WriteToField(descriptor, details, value);
-}
-
 int JSObject::GetInObjectPropertyOffset(int index) {
   return map()->GetInObjectPropertyOffset(index);
 }
@@ -2327,8 +2350,8 @@
   DCHECK(IsHeapObject());
   Isolate* isolate = HeapObject::cast(this)->GetIsolate();
   if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
-        IsSimd128Value() || IsUndefined(isolate) || IsTrue(isolate) ||
-        IsFalse(isolate) || IsNull(isolate))) {
+        IsUndefined(isolate) || IsTrue(isolate) || IsFalse(isolate) ||
+        IsNull(isolate))) {
     FATAL("API call returned invalid object");
   }
 #endif  // DEBUG
@@ -2337,7 +2360,7 @@
 
 Object* FixedArray::get(int index) const {
   SLOW_DCHECK(index >= 0 && index < this->length());
-  return READ_FIELD(this, kHeaderSize + index * kPointerSize);
+  return NOBARRIER_READ_FIELD(this, kHeaderSize + index * kPointerSize);
 }
 
 Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
@@ -2366,7 +2389,7 @@
   DCHECK(index >= 0 && index < this->length());
   DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
   int offset = kHeaderSize + index * kPointerSize;
-  WRITE_FIELD(this, offset, value);
+  NOBARRIER_WRITE_FIELD(this, offset, value);
 }
 
 
@@ -2376,7 +2399,7 @@
   DCHECK_GE(index, 0);
   DCHECK_LT(index, this->length());
   int offset = kHeaderSize + index * kPointerSize;
-  WRITE_FIELD(this, offset, value);
+  NOBARRIER_WRITE_FIELD(this, offset, value);
   WRITE_BARRIER(GetHeap(), this, offset, value);
 }
 
@@ -2420,6 +2443,9 @@
   DCHECK(!is_the_hole(index));
 }
 
+void FixedDoubleArray::set_the_hole(Isolate* isolate, int index) {
+  set_the_hole(index);
+}
 
 void FixedDoubleArray::set_the_hole(int index) {
   DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
@@ -2588,7 +2614,6 @@
     return kDoubleAligned;
   }
   if (IsHeapNumber()) return kDoubleUnaligned;
-  if (IsSimd128Value()) return kSimd128Unaligned;
 #endif  // V8_HOST_ARCH_32_BIT
   return kWordAligned;
 }
@@ -2597,8 +2622,9 @@
 void FixedArray::set(int index,
                      Object* value,
                      WriteBarrierMode mode) {
-  DCHECK(map() != GetHeap()->fixed_cow_array_map());
-  DCHECK(index >= 0 && index < this->length());
+  DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
+  DCHECK_GE(index, 0);
+  DCHECK_LT(index, this->length());
   int offset = kHeaderSize + index * kPointerSize;
   NOBARRIER_WRITE_FIELD(this, offset, value);
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
@@ -2608,45 +2634,38 @@
 void FixedArray::NoWriteBarrierSet(FixedArray* array,
                                    int index,
                                    Object* value) {
-  DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
-  DCHECK(index >= 0 && index < array->length());
+  DCHECK_NE(array->map(), array->GetHeap()->fixed_cow_array_map());
+  DCHECK_GE(index, 0);
+  DCHECK_LT(index, array->length());
   DCHECK(!array->GetHeap()->InNewSpace(value));
   NOBARRIER_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
 }
 
-
 void FixedArray::set_undefined(int index) {
-  DCHECK(map() != GetHeap()->fixed_cow_array_map());
-  DCHECK(index >= 0 && index < this->length());
-  DCHECK(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
-  WRITE_FIELD(this,
-              kHeaderSize + index * kPointerSize,
-              GetHeap()->undefined_value());
+  set_undefined(GetIsolate(), index);
 }
 
-
-void FixedArray::set_null(int index) {
-  DCHECK(index >= 0 && index < this->length());
-  DCHECK(!GetHeap()->InNewSpace(GetHeap()->null_value()));
-  WRITE_FIELD(this,
-              kHeaderSize + index * kPointerSize,
-              GetHeap()->null_value());
+void FixedArray::set_undefined(Isolate* isolate, int index) {
+  FixedArray::NoWriteBarrierSet(this, index,
+                                isolate->heap()->undefined_value());
 }
 
+void FixedArray::set_null(int index) { set_null(GetIsolate(), index); }
 
-void FixedArray::set_the_hole(int index) {
-  DCHECK(map() != GetHeap()->fixed_cow_array_map());
-  DCHECK(index >= 0 && index < this->length());
-  DCHECK(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
-  WRITE_FIELD(this,
-              kHeaderSize + index * kPointerSize,
-              GetHeap()->the_hole_value());
+void FixedArray::set_null(Isolate* isolate, int index) {
+  FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->null_value());
 }
 
+void FixedArray::set_the_hole(int index) { set_the_hole(GetIsolate(), index); }
+
+void FixedArray::set_the_hole(Isolate* isolate, int index) {
+  FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->the_hole_value());
+}
 
 void FixedArray::FillWithHoles(int from, int to) {
+  Isolate* isolate = GetIsolate();
   for (int i = from; i < to; i++) {
-    set_the_hole(i);
+    set_the_hole(isolate, i);
   }
 }
 
@@ -2705,7 +2724,7 @@
 
 int DescriptorArray::number_of_descriptors_storage() {
   int len = length();
-  return len == 0 ? 0 : (len - kFirstIndex) / kDescriptorSize;
+  return len == 0 ? 0 : (len - kFirstIndex) / kEntrySize;
 }
 
 
@@ -2940,26 +2959,6 @@
   return result;
 }
 
-// static
-Handle<Map> Map::ReconfigureProperty(Handle<Map> map, int modify_index,
-                                     PropertyKind new_kind,
-                                     PropertyAttributes new_attributes,
-                                     Representation new_representation,
-                                     Handle<FieldType> new_field_type,
-                                     StoreMode store_mode) {
-  return Reconfigure(map, map->elements_kind(), modify_index, new_kind,
-                     new_attributes, new_representation, new_field_type,
-                     store_mode);
-}
-
-// static
-Handle<Map> Map::ReconfigureElementsKind(Handle<Map> map,
-                                         ElementsKind new_elements_kind) {
-  return Reconfigure(map, new_elements_kind, -1, kData, NONE,
-                     Representation::None(), FieldType::None(map->GetIsolate()),
-                     ALLOW_IN_DESCRIPTOR);
-}
-
 Object** DescriptorArray::GetKeySlot(int descriptor_number) {
   DCHECK(descriptor_number < number_of_descriptors());
   return RawFieldOfElementAt(ToKeyIndex(descriptor_number));
@@ -2998,15 +2997,6 @@
 }
 
 
-void DescriptorArray::SetRepresentation(int descriptor_index,
-                                        Representation representation) {
-  DCHECK(!representation.IsNone());
-  PropertyDetails details = GetDetails(descriptor_index);
-  set(ToDetailsIndex(descriptor_index),
-      details.CopyWithRepresentation(representation).AsSmi());
-}
-
-
 Object** DescriptorArray::GetValueSlot(int descriptor_number) {
   DCHECK(descriptor_number < number_of_descriptors());
   return RawFieldOfElementAt(ToValueIndex(descriptor_number));
@@ -3035,58 +3025,36 @@
   return PropertyDetails(Smi::cast(details));
 }
 
-
-PropertyType DescriptorArray::GetType(int descriptor_number) {
-  return GetDetails(descriptor_number).type();
-}
-
-
 int DescriptorArray::GetFieldIndex(int descriptor_number) {
   DCHECK(GetDetails(descriptor_number).location() == kField);
   return GetDetails(descriptor_number).field_index();
 }
 
-Object* DescriptorArray::GetConstant(int descriptor_number) {
-  return GetValue(descriptor_number);
+FieldType* DescriptorArray::GetFieldType(int descriptor_number) {
+  DCHECK(GetDetails(descriptor_number).location() == kField);
+  Object* wrapped_type = GetValue(descriptor_number);
+  return Map::UnwrapFieldType(wrapped_type);
 }
 
-
-Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
-  DCHECK(GetType(descriptor_number) == ACCESSOR_CONSTANT);
-  return GetValue(descriptor_number);
-}
-
-
-AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
-  DCHECK(GetType(descriptor_number) == ACCESSOR_CONSTANT);
-  Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
-  return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
-}
-
-
 void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
   desc->Init(handle(GetKey(descriptor_number), GetIsolate()),
              handle(GetValue(descriptor_number), GetIsolate()),
              GetDetails(descriptor_number));
 }
 
-
-void DescriptorArray::SetDescriptor(int descriptor_number, Descriptor* desc) {
+void DescriptorArray::Set(int descriptor_number, Name* key, Object* value,
+                          PropertyDetails details) {
   // Range check.
   DCHECK(descriptor_number < number_of_descriptors());
-  set(ToKeyIndex(descriptor_number), *desc->GetKey());
-  set(ToValueIndex(descriptor_number), *desc->GetValue());
-  set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi());
+  set(ToKeyIndex(descriptor_number), key);
+  set(ToValueIndex(descriptor_number), value);
+  set(ToDetailsIndex(descriptor_number), details.AsSmi());
 }
 
-
 void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
-  // Range check.
-  DCHECK(descriptor_number < number_of_descriptors());
-
-  set(ToKeyIndex(descriptor_number), *desc->GetKey());
-  set(ToValueIndex(descriptor_number), *desc->GetValue());
-  set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi());
+  Name* key = *desc->GetKey();
+  Object* value = *desc->GetValue();
+  Set(descriptor_number, key, value, desc->GetDetails());
 }
 
 
@@ -3117,14 +3085,6 @@
 }
 
 
-PropertyType DescriptorArray::Entry::type() { return descs_->GetType(index_); }
-
-
-Object* DescriptorArray::Entry::GetCallbackObject() {
-  return descs_->GetValue(index_);
-}
-
-
 int HashTableBase::NumberOfElements() {
   return Smi::cast(get(kNumberOfElementsIndex))->value();
 }
@@ -3272,107 +3232,6 @@
 }
 
 
-// ------------------------------------
-// Cast operations
-
-CAST_ACCESSOR(AbstractCode)
-CAST_ACCESSOR(ArrayList)
-CAST_ACCESSOR(Bool16x8)
-CAST_ACCESSOR(Bool32x4)
-CAST_ACCESSOR(Bool8x16)
-CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(BytecodeArray)
-CAST_ACCESSOR(Cell)
-CAST_ACCESSOR(Code)
-CAST_ACCESSOR(CodeCacheHashTable)
-CAST_ACCESSOR(CompilationCacheTable)
-CAST_ACCESSOR(ConsString)
-CAST_ACCESSOR(DeoptimizationInputData)
-CAST_ACCESSOR(DeoptimizationOutputData)
-CAST_ACCESSOR(DependentCode)
-CAST_ACCESSOR(DescriptorArray)
-CAST_ACCESSOR(ExternalOneByteString)
-CAST_ACCESSOR(ExternalString)
-CAST_ACCESSOR(ExternalTwoByteString)
-CAST_ACCESSOR(FixedArray)
-CAST_ACCESSOR(FixedArrayBase)
-CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(FixedTypedArrayBase)
-CAST_ACCESSOR(Float32x4)
-CAST_ACCESSOR(Foreign)
-CAST_ACCESSOR(FrameArray)
-CAST_ACCESSOR(GlobalDictionary)
-CAST_ACCESSOR(HandlerTable)
-CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(Int16x8)
-CAST_ACCESSOR(Int32x4)
-CAST_ACCESSOR(Int8x16)
-CAST_ACCESSOR(JSArray)
-CAST_ACCESSOR(JSArrayBuffer)
-CAST_ACCESSOR(JSArrayBufferView)
-CAST_ACCESSOR(JSBoundFunction)
-CAST_ACCESSOR(JSDataView)
-CAST_ACCESSOR(JSDate)
-CAST_ACCESSOR(JSFunction)
-CAST_ACCESSOR(JSGeneratorObject)
-CAST_ACCESSOR(JSGlobalObject)
-CAST_ACCESSOR(JSGlobalProxy)
-CAST_ACCESSOR(JSMap)
-CAST_ACCESSOR(JSMapIterator)
-CAST_ACCESSOR(JSMessageObject)
-CAST_ACCESSOR(JSModuleNamespace)
-CAST_ACCESSOR(JSFixedArrayIterator)
-CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(JSProxy)
-CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(JSSet)
-CAST_ACCESSOR(JSSetIterator)
-CAST_ACCESSOR(JSStringIterator)
-CAST_ACCESSOR(JSArrayIterator)
-CAST_ACCESSOR(JSTypedArray)
-CAST_ACCESSOR(JSValue)
-CAST_ACCESSOR(JSWeakCollection)
-CAST_ACCESSOR(JSWeakMap)
-CAST_ACCESSOR(JSWeakSet)
-CAST_ACCESSOR(LayoutDescriptor)
-CAST_ACCESSOR(Map)
-CAST_ACCESSOR(ModuleInfo)
-CAST_ACCESSOR(Name)
-CAST_ACCESSOR(NameDictionary)
-CAST_ACCESSOR(NormalizedMapCache)
-CAST_ACCESSOR(Object)
-CAST_ACCESSOR(ObjectHashTable)
-CAST_ACCESSOR(ObjectHashSet)
-CAST_ACCESSOR(Oddball)
-CAST_ACCESSOR(OrderedHashMap)
-CAST_ACCESSOR(OrderedHashSet)
-CAST_ACCESSOR(PropertyCell)
-CAST_ACCESSOR(TemplateList)
-CAST_ACCESSOR(RegExpMatchInfo)
-CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(SeededNumberDictionary)
-CAST_ACCESSOR(SeqOneByteString)
-CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqTwoByteString)
-CAST_ACCESSOR(SharedFunctionInfo)
-CAST_ACCESSOR(Simd128Value)
-CAST_ACCESSOR(SlicedString)
-CAST_ACCESSOR(Smi)
-CAST_ACCESSOR(String)
-CAST_ACCESSOR(StringSet)
-CAST_ACCESSOR(StringTable)
-CAST_ACCESSOR(Struct)
-CAST_ACCESSOR(Symbol)
-CAST_ACCESSOR(TemplateInfo)
-CAST_ACCESSOR(Uint16x8)
-CAST_ACCESSOR(Uint32x4)
-CAST_ACCESSOR(Uint8x16)
-CAST_ACCESSOR(UnseededNumberDictionary)
-CAST_ACCESSOR(WeakCell)
-CAST_ACCESSOR(WeakFixedArray)
-CAST_ACCESSOR(WeakHashTable)
-
 template <class T>
 PodArray<T>* PodArray<T>::cast(Object* object) {
   SLOW_DCHECK(object->IsByteArray());
@@ -3491,66 +3350,6 @@
   set(1 + index * 2, offset);
 }
 
-
-Object* LiteralsArray::get(int index) const { return FixedArray::get(index); }
-
-
-void LiteralsArray::set(int index, Object* value) {
-  FixedArray::set(index, value);
-}
-
-
-void LiteralsArray::set(int index, Smi* value) {
-  FixedArray::set(index, value);
-}
-
-
-void LiteralsArray::set(int index, Object* value, WriteBarrierMode mode) {
-  FixedArray::set(index, value, mode);
-}
-
-
-LiteralsArray* LiteralsArray::cast(Object* object) {
-  SLOW_DCHECK(object->IsLiteralsArray());
-  return reinterpret_cast<LiteralsArray*>(object);
-}
-
-
-TypeFeedbackVector* LiteralsArray::feedback_vector() const {
-  if (length() == 0) {
-    return TypeFeedbackVector::cast(
-        const_cast<FixedArray*>(FixedArray::cast(this)));
-  }
-  return TypeFeedbackVector::cast(get(kVectorIndex));
-}
-
-
-void LiteralsArray::set_feedback_vector(TypeFeedbackVector* vector) {
-  if (length() <= kVectorIndex) {
-    DCHECK(vector->length() == 0);
-    return;
-  }
-  set(kVectorIndex, vector);
-}
-
-
-Object* LiteralsArray::literal(int literal_index) const {
-  return get(kFirstLiteralIndex + literal_index);
-}
-
-
-void LiteralsArray::set_literal(int literal_index, Object* literal) {
-  set(kFirstLiteralIndex + literal_index, literal);
-}
-
-void LiteralsArray::set_literal_undefined(int literal_index) {
-  set_undefined(kFirstLiteralIndex + literal_index);
-}
-
-int LiteralsArray::literals_count() const {
-  return length() - kFirstLiteralIndex;
-}
-
 int HandlerTable::GetRangeStart(int index) const {
   return Smi::cast(get(index * kRangeEntrySize + kRangeStartIndex))->value();
 }
@@ -3603,11 +3402,6 @@
   return length() / kRangeEntrySize;
 }
 
-#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
-  STRUCT_LIST(MAKE_STRUCT_CAST)
-#undef MAKE_STRUCT_CAST
-
-
 template <typename Derived, typename Shape, typename Key>
 HashTable<Derived, Shape, Key>*
 HashTable<Derived, Shape, Key>::cast(Object* obj) {
@@ -3704,7 +3498,7 @@
 SMI_ACCESSORS(Symbol, flags, kFlagsOffset)
 BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
 BOOL_ACCESSORS(Symbol, flags, is_well_known_symbol, kWellKnownSymbolBit)
-
+BOOL_ACCESSORS(Symbol, flags, is_public, kPublicBit)
 
 bool String::Equals(String* other) {
   if (other == this) return true;
@@ -3725,10 +3519,19 @@
 
 
 Handle<String> String::Flatten(Handle<String> string, PretenureFlag pretenure) {
-  if (!string->IsConsString()) return string;
-  Handle<ConsString> cons = Handle<ConsString>::cast(string);
-  if (cons->IsFlat()) return handle(cons->first());
-  return SlowFlatten(cons, pretenure);
+  if (string->IsConsString()) {
+    Handle<ConsString> cons = Handle<ConsString>::cast(string);
+    if (cons->IsFlat()) {
+      string = handle(cons->first());
+    } else {
+      return SlowFlatten(cons, pretenure);
+    }
+  }
+  if (string->IsThinString()) {
+    string = handle(Handle<ThinString>::cast(string)->actual());
+    DCHECK(!string->IsConsString());
+  }
+  return string;
 }
 
 
@@ -3749,6 +3552,9 @@
     case kSlicedStringTag | kOneByteStringTag:
     case kSlicedStringTag | kTwoByteStringTag:
       return SlicedString::cast(this)->SlicedStringGet(index);
+    case kThinStringTag | kOneByteStringTag:
+    case kThinStringTag | kTwoByteStringTag:
+      return ThinString::cast(this)->ThinStringGet(index);
     default:
       break;
   }
@@ -3780,6 +3586,7 @@
   DCHECK(this->IsFlat());
   DCHECK(StringShape(this).IsIndirect());
   STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
+  STATIC_ASSERT(ConsString::kFirstOffset == ThinString::kActualOffset);
   const int kUnderlyingOffset = SlicedString::kParentOffset;
   return String::cast(READ_FIELD(this, kUnderlyingOffset));
 }
@@ -3831,6 +3638,11 @@
       case kConsStringTag | kTwoByteStringTag:
         return ConsString::cast(string);
 
+      case kThinStringTag | kOneByteStringTag:
+      case kThinStringTag | kTwoByteStringTag:
+        string = ThinString::cast(string)->actual();
+        continue;
+
       default:
         UNREACHABLE();
         return NULL;
@@ -3854,6 +3666,12 @@
   return flat.ToUC16Vector();
 }
 
+uint32_t String::ToValidIndex(Object* number) {
+  uint32_t index = PositiveNumberToUint32(number);
+  uint32_t length_value = static_cast<uint32_t>(length());
+  if (index > length_value) return length_value;
+  return index;
+}
 
 uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
   DCHECK(index >= 0 && index < length());
@@ -3956,6 +3774,7 @@
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kSecondOffset, value, mode);
 }
 
+ACCESSORS(ThinString, actual, String, kActualOffset);
 
 bool ExternalString::is_short() {
   InstanceType type = map()->instance_type();
@@ -4221,13 +4040,23 @@
   WRITE_INT8_FIELD(this, kOSRNestingLevelOffset, depth);
 }
 
+BytecodeArray::Age BytecodeArray::bytecode_age() const {
+  return static_cast<Age>(READ_INT8_FIELD(this, kBytecodeAgeOffset));
+}
+
+void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
+  DCHECK_GE(age, kFirstBytecodeAge);
+  DCHECK_LE(age, kLastBytecodeAge);
+  STATIC_ASSERT(kLastBytecodeAge <= kMaxInt8);
+  WRITE_INT8_FIELD(this, kBytecodeAgeOffset, static_cast<int8_t>(age));
+}
+
 int BytecodeArray::parameter_count() const {
   // Parameter count is stored as the size on stack of the parameters to allow
   // it to be used directly by generated code.
   return READ_INT_FIELD(this, kParameterSizeOffset) >> kPointerSizeLog2;
 }
 
-
 ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
 ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
 ACCESSORS(BytecodeArray, source_position_table, ByteArray,
@@ -4901,7 +4730,9 @@
     if (details.representation().IsSmi()) return true;
     if (details.representation().IsDouble()) return true;
     if (details.representation().IsHeapObject()) return true;
-    if (details.type() == DATA_CONSTANT) return true;
+    if (details.kind() == kData && details.location() == kDescriptor) {
+      return true;
+    }
   }
   return false;
 }
@@ -4950,6 +4781,12 @@
 bool Map::IsJSTypedArrayMap() { return instance_type() == JS_TYPED_ARRAY_TYPE; }
 bool Map::IsJSDataViewMap() { return instance_type() == JS_DATA_VIEW_TYPE; }
 
+bool Map::IsSpecialReceiverMap() {
+  bool result = IsSpecialReceiverInstanceType(instance_type());
+  DCHECK_IMPLIES(!result,
+                 !has_named_interceptor() && !is_access_check_needed());
+  return result;
+}
 
 bool Map::CanOmitMapChecks() {
   return is_stable() && FLAG_omit_map_checks_for_leaf_maps;
@@ -5035,7 +4872,8 @@
 }
 
 ExtraICState Code::extra_ic_state() {
-  DCHECK(is_inline_cache_stub() || is_debug_stub());
+  DCHECK(is_binary_op_stub() || is_compare_ic_stub() ||
+         is_to_boolean_ic_stub() || is_debug_stub());
   return ExtractExtraICStateFromFlags(flags());
 }
 
@@ -5125,6 +4963,32 @@
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
+inline bool Code::is_promise_rejection() {
+  DCHECK(kind() == BUILTIN);
+  return IsPromiseRejectionField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+inline void Code::set_is_promise_rejection(bool value) {
+  DCHECK(kind() == BUILTIN);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = IsPromiseRejectionField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+inline bool Code::is_exception_caught() {
+  DCHECK(kind() == BUILTIN);
+  return IsExceptionCaughtField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+inline void Code::set_is_exception_caught(bool value) {
+  DCHECK(kind() == BUILTIN);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = IsExceptionCaughtField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
 bool Code::has_deoptimization_support() {
   DCHECK_EQ(FUNCTION, kind());
   unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
@@ -5306,7 +5170,7 @@
   return false;
 }
 bool Code::is_handler() { return kind() == HANDLER; }
-bool Code::is_call_stub() { return kind() == CALL_IC; }
+bool Code::is_stub() { return kind() == STUB; }
 bool Code::is_binary_op_stub() { return kind() == BINARY_OP_IC; }
 bool Code::is_compare_ic_stub() { return kind() == COMPARE_IC; }
 bool Code::is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
@@ -5388,42 +5252,20 @@
 
 bool Code::IsWeakObjectInOptimizedCode(Object* object) {
   if (object->IsMap()) {
-    return Map::cast(object)->CanTransition() &&
-           FLAG_weak_embedded_maps_in_optimized_code;
+    return Map::cast(object)->CanTransition();
   }
   if (object->IsCell()) {
     object = Cell::cast(object)->value();
   } else if (object->IsPropertyCell()) {
     object = PropertyCell::cast(object)->value();
   }
-  if (object->IsJSReceiver()) {
-    return FLAG_weak_embedded_objects_in_optimized_code;
-  }
-  if (object->IsContext()) {
-    // Contexts of inlined functions are embedded in optimized code.
-    return FLAG_weak_embedded_objects_in_optimized_code;
+  if (object->IsJSReceiver() || object->IsContext()) {
+    return true;
   }
   return false;
 }
 
 
-class Code::FindAndReplacePattern {
- public:
-  FindAndReplacePattern() : count_(0) { }
-  void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) {
-    DCHECK(count_ < kMaxCount);
-    find_[count_] = map_to_find;
-    replace_[count_] = obj_to_replace;
-    ++count_;
-  }
- private:
-  static const int kMaxCount = 4;
-  int count_;
-  Handle<Map> find_[kMaxCount];
-  Handle<Object> replace_[kMaxCount];
-  friend class Code;
-};
-
 int AbstractCode::instruction_size() {
   if (IsCode()) {
     return GetCode()->instruction_size();
@@ -5448,16 +5290,6 @@
   }
 }
 
-int AbstractCode::LookupRangeInHandlerTable(
-    int code_offset, int* data, HandlerTable::CatchPrediction* prediction) {
-  if (IsCode()) {
-    return GetCode()->LookupRangeInHandlerTable(code_offset, data, prediction);
-  } else {
-    return GetBytecodeArray()->LookupRangeInHandlerTable(code_offset, data,
-                                                         prediction);
-  }
-}
-
 int AbstractCode::SizeIncludingMetadata() {
   if (IsCode()) {
     return GetCode()->SizeIncludingMetadata();
@@ -5609,7 +5441,7 @@
 // it should never try to (otherwise, layout descriptor must be updated too).
 #ifdef DEBUG
   PropertyDetails details = desc->GetDetails();
-  CHECK(details.type() != DATA || !details.representation().IsDouble());
+  CHECK(details.location() != kField || !details.representation().IsDouble());
 #endif
 }
 
@@ -5692,7 +5524,7 @@
 ACCESSORS(JSBoundFunction, bound_arguments, FixedArray, kBoundArgumentsOffset)
 
 ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
-ACCESSORS(JSFunction, literals, LiteralsArray, kLiteralsOffset)
+ACCESSORS(JSFunction, feedback_vector_cell, Cell, kFeedbackVectorOffset)
 ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
 
 ACCESSORS(JSGlobalObject, native_context, Context, kNativeContextOffset)
@@ -5711,21 +5543,20 @@
 ACCESSORS(AccessorInfo, js_getter, Object, kJsGetterOffset)
 ACCESSORS(AccessorInfo, data, Object, kDataOffset)
 
-ACCESSORS(Box, value, Object, kValueOffset)
-
 ACCESSORS(PromiseResolveThenableJobInfo, thenable, JSReceiver, kThenableOffset)
 ACCESSORS(PromiseResolveThenableJobInfo, then, JSReceiver, kThenOffset)
 ACCESSORS(PromiseResolveThenableJobInfo, resolve, JSFunction, kResolveOffset)
 ACCESSORS(PromiseResolveThenableJobInfo, reject, JSFunction, kRejectOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, debug_id, Object, kDebugIdOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, debug_name, Object, kDebugNameOffset)
 ACCESSORS(PromiseResolveThenableJobInfo, context, Context, kContextOffset);
 
 ACCESSORS(PromiseReactionJobInfo, value, Object, kValueOffset);
 ACCESSORS(PromiseReactionJobInfo, tasks, Object, kTasksOffset);
-ACCESSORS(PromiseReactionJobInfo, deferred, Object, kDeferredOffset);
-ACCESSORS(PromiseReactionJobInfo, debug_id, Object, kDebugIdOffset);
-ACCESSORS(PromiseReactionJobInfo, debug_name, Object, kDebugNameOffset);
+ACCESSORS(PromiseReactionJobInfo, deferred_promise, Object,
+          kDeferredPromiseOffset);
+ACCESSORS(PromiseReactionJobInfo, deferred_on_resolve, Object,
+          kDeferredOnResolveOffset);
+ACCESSORS(PromiseReactionJobInfo, deferred_on_reject, Object,
+          kDeferredOnRejectOffset);
 ACCESSORS(PromiseReactionJobInfo, context, Context, kContextOffset);
 
 Map* PrototypeInfo::ObjectCreateMap() {
@@ -5777,18 +5608,18 @@
 SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
 BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
 
-ACCESSORS(Tuple3, value1, Object, kValue1Offset)
-ACCESSORS(Tuple3, value2, Object, kValue2Offset)
+ACCESSORS(Tuple2, value1, Object, kValue1Offset)
+ACCESSORS(Tuple2, value2, Object, kValue2Offset)
 ACCESSORS(Tuple3, value3, Object, kValue3Offset)
 
 ACCESSORS(ContextExtension, scope_info, ScopeInfo, kScopeInfoOffset)
 ACCESSORS(ContextExtension, extension, Object, kExtensionOffset)
 
-ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
+SMI_ACCESSORS(ConstantElementsPair, elements_kind, kElementsKindOffset)
+ACCESSORS(ConstantElementsPair, constant_values, FixedArrayBase,
+          kConstantValuesOffset)
 
-ACCESSORS(JSFixedArrayIterator, array, FixedArray, kArrayOffset)
-SMI_ACCESSORS(JSFixedArrayIterator, index, kIndexOffset)
-ACCESSORS(JSFixedArrayIterator, initial_next, JSFunction, kNextOffset)
+ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
 
 ACCESSORS(Module, code, Object, kCodeOffset)
 ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
@@ -5853,6 +5684,9 @@
 ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
 ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
           kPrototypeTemplateOffset)
+ACCESSORS(FunctionTemplateInfo, prototype_provider_template, Object,
+          kPrototypeProviderTemplateOffset)
+
 ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
 ACCESSORS(FunctionTemplateInfo, named_property_handler, Object,
           kNamedPropertyHandlerOffset)
@@ -5933,7 +5767,7 @@
                   this->type() != TYPE_WASM)
 SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
                       this->type() != TYPE_WASM)
-ACCESSORS(Script, shared_function_infos, Object, kSharedFunctionInfosOffset)
+ACCESSORS(Script, shared_function_infos, FixedArray, kSharedFunctionInfosOffset)
 SMI_ACCESSORS(Script, flags, kFlagsOffset)
 ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
 ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
@@ -5948,10 +5782,6 @@
   set_flags(BooleanBit::set(flags(), kCompilationTypeBit,
       type == COMPILATION_TYPE_EVAL));
 }
-bool Script::hide_source() { return BooleanBit::get(flags(), kHideSourceBit); }
-void Script::set_hide_source(bool value) {
-  set_flags(BooleanBit::set(flags(), kHideSourceBit, value));
-}
 Script::CompilationState Script::compilation_state() {
   return BooleanBit::get(flags(), kCompilationStateBit) ?
       COMPILATION_STATE_COMPILED : COMPILATION_STATE_INITIAL;
@@ -5972,6 +5802,7 @@
 
 
 ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
+SMI_ACCESSORS(DebugInfo, debugger_hints, kDebuggerHintsIndex)
 ACCESSORS(DebugInfo, debug_bytecode_array, Object, kDebugBytecodeArrayIndex)
 ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
 
@@ -6008,8 +5839,9 @@
 ACCESSORS(SharedFunctionInfo, optimized_code_map, FixedArray,
           kOptimizedCodeMapOffset)
 ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, feedback_metadata, TypeFeedbackMetadata,
+ACCESSORS(SharedFunctionInfo, feedback_metadata, FeedbackMetadata,
           kFeedbackMetadataOffset)
+SMI_ACCESSORS(SharedFunctionInfo, function_literal_id, kFunctionLiteralIdOffset)
 #if TRACE_MAPS
 SMI_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
 #endif
@@ -6040,32 +5872,12 @@
 BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
                kIsTopLevelBit)
 
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
-               kAllowLazyCompilation)
-BOOL_ACCESSORS(SharedFunctionInfo,
-               compiler_hints,
-               uses_arguments,
-               kUsesArguments)
-BOOL_ACCESSORS(SharedFunctionInfo,
-               compiler_hints,
-               has_duplicate_parameters,
-               kHasDuplicateParameters)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, deserialized, kDeserialized)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, never_compiled,
-               kNeverCompiled)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
-               kIsDeclaration)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
-               kMarkedForTierUp)
-
 #if V8_HOST_ARCH_32_BIT
 SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
 SMI_ACCESSORS(SharedFunctionInfo, internal_formal_parameter_count,
               kFormalParameterCountOffset)
 SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
               kExpectedNofPropertiesOffset)
-SMI_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
 SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
               kStartPositionAndTypeOffset)
 SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
@@ -6115,7 +5927,6 @@
 PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo,
                         expected_nof_properties,
                         kExpectedNofPropertiesOffset)
-PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
 
 PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, end_position, kEndPositionOffset)
 PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo,
@@ -6143,12 +5954,6 @@
 
 #endif
 
-
-BOOL_GETTER(SharedFunctionInfo,
-            compiler_hints,
-            optimization_disabled,
-            kOptimizationDisabled)
-
 AbstractCode* SharedFunctionInfo::abstract_code() {
   if (HasBytecodeArray()) {
     return AbstractCode::cast(bytecode_array());
@@ -6157,20 +5962,43 @@
   }
 }
 
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
+               kAllowLazyCompilation)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_arguments,
+               kUsesArguments)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, has_duplicate_parameters,
+               kHasDuplicateParameters)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
+               kIsDeclaration)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
+               kMarkedForTierUp)
+
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
+               kNeedsHomeObject)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, must_use_ignition_turbo,
+               kMustUseIgnitionTurbo)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
+               kIsAsmWasmBroken)
+
+BOOL_GETTER(SharedFunctionInfo, compiler_hints, optimization_disabled,
+            kOptimizationDisabled)
+
 void SharedFunctionInfo::set_optimization_disabled(bool disable) {
   set_compiler_hints(BooleanBit::set(compiler_hints(),
                                      kOptimizationDisabled,
                                      disable));
 }
 
-
 LanguageMode SharedFunctionInfo::language_mode() {
   STATIC_ASSERT(LANGUAGE_END == 2);
   return construct_language_mode(
       BooleanBit::get(compiler_hints(), kStrictModeFunction));
 }
 
-
 void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
   STATIC_ASSERT(LANGUAGE_END == 2);
   // We only allow language mode transitions that set the same language mode
@@ -6185,7 +6013,6 @@
   return FunctionKindBits::decode(compiler_hints());
 }
 
-
 void SharedFunctionInfo::set_kind(FunctionKind kind) {
   DCHECK(IsValidFunctionKind(kind));
   int hints = compiler_hints();
@@ -6193,25 +6020,19 @@
   set_compiler_hints(hints);
 }
 
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, needs_home_object,
-               kNeedsHomeObject)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, force_inline, kForceInline)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
-               name_should_print_as_anonymous,
-               kNameShouldPrintAsAnonymous)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous_expression,
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints,
+               name_should_print_as_anonymous, kNameShouldPrintAsAnonymous)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, is_anonymous_expression,
                kIsAnonymousExpression)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
-               kDontCrankshaft)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
-               kIsAsmWasmBroken)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, requires_class_field_init,
-               kRequiresClassFieldInit)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_class_field_initializer,
-               kIsClassFieldInitializer)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, deserialized, kDeserialized)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, has_no_side_effect,
+               kHasNoSideEffect)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, computed_has_no_side_effect,
+               kComputedHasNoSideEffect)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, debug_is_blackboxed,
+               kDebugIsBlackboxed)
+BOOL_ACCESSORS(SharedFunctionInfo, debugger_hints, computed_debug_is_blackboxed,
+               kComputedDebugIsBlackboxed)
 
 bool Script::HasValidSource() {
   Object* src = this->source();
@@ -6251,6 +6072,10 @@
 
 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
   DCHECK(value->kind() != Code::OPTIMIZED_FUNCTION);
+  // If the SharedFunctionInfo has bytecode we should never mark it for lazy
+  // compile, since the bytecode is never flushed.
+  DCHECK(value != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy) ||
+         !HasBytecodeArray());
   WRITE_FIELD(this, kCodeOffset, value);
   CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
 }
@@ -6270,8 +6095,6 @@
 #endif  // DEBUG
 
   set_code(value);
-
-  if (is_compiled()) set_never_compiled(false);
 }
 
 bool SharedFunctionInfo::IsInterpreted() const {
@@ -6313,25 +6136,35 @@
   return scope_info()->HasSimpleParameters();
 }
 
-
-bool SharedFunctionInfo::HasDebugInfo() {
-  bool has_debug_info = debug_info()->IsStruct();
+bool SharedFunctionInfo::HasDebugInfo() const {
+  bool has_debug_info = !debug_info()->IsSmi();
+  DCHECK_EQ(debug_info()->IsStruct(), has_debug_info);
   DCHECK(!has_debug_info || HasDebugCode());
   return has_debug_info;
 }
 
-
-DebugInfo* SharedFunctionInfo::GetDebugInfo() {
+DebugInfo* SharedFunctionInfo::GetDebugInfo() const {
   DCHECK(HasDebugInfo());
   return DebugInfo::cast(debug_info());
 }
 
-
-bool SharedFunctionInfo::HasDebugCode() {
+bool SharedFunctionInfo::HasDebugCode() const {
   if (HasBaselineCode()) return code()->has_debug_break_slots();
   return HasBytecodeArray();
 }
 
+int SharedFunctionInfo::debugger_hints() const {
+  if (HasDebugInfo()) return GetDebugInfo()->debugger_hints();
+  return Smi::cast(debug_info())->value();
+}
+
+void SharedFunctionInfo::set_debugger_hints(int value) {
+  if (HasDebugInfo()) {
+    GetDebugInfo()->set_debugger_hints(value);
+  } else {
+    set_debug_info(Smi::FromInt(value));
+  }
+}
 
 bool SharedFunctionInfo::IsApiFunction() {
   return function_data()->IsFunctionTemplateInfo();
@@ -6348,11 +6181,11 @@
   set_function_data(data);
 }
 
-bool SharedFunctionInfo::HasBytecodeArray() {
+bool SharedFunctionInfo::HasBytecodeArray() const {
   return function_data()->IsBytecodeArray();
 }
 
-BytecodeArray* SharedFunctionInfo::bytecode_array() {
+BytecodeArray* SharedFunctionInfo::bytecode_array() const {
   DCHECK(HasBytecodeArray());
   return BytecodeArray::cast(function_data());
 }
@@ -6367,11 +6200,11 @@
   set_function_data(GetHeap()->undefined_value());
 }
 
-bool SharedFunctionInfo::HasAsmWasmData() {
+bool SharedFunctionInfo::HasAsmWasmData() const {
   return function_data()->IsFixedArray();
 }
 
-FixedArray* SharedFunctionInfo::asm_wasm_data() {
+FixedArray* SharedFunctionInfo::asm_wasm_data() const {
   DCHECK(HasAsmWasmData());
   return FixedArray::cast(function_data());
 }
@@ -6497,23 +6330,25 @@
       opt_count_and_bailout_reason(), reason));
 }
 
-
-bool SharedFunctionInfo::IsBuiltin() {
+bool SharedFunctionInfo::IsUserJavaScript() {
   Object* script_obj = script();
-  if (script_obj->IsUndefined(GetIsolate())) return true;
+  if (script_obj->IsUndefined(GetIsolate())) return false;
   Script* script = Script::cast(script_obj);
-  Script::Type type = static_cast<Script::Type>(script->type());
-  return type != Script::TYPE_NORMAL;
+  return static_cast<Script::Type>(script->type()) == Script::TYPE_NORMAL;
 }
 
 bool SharedFunctionInfo::IsSubjectToDebugging() {
-  return !IsBuiltin() && !HasAsmWasmData();
+  return IsUserJavaScript() && !HasAsmWasmData();
 }
 
 bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
   return optimized_code_map() == GetHeap()->empty_fixed_array();
 }
 
+FeedbackVector* JSFunction::feedback_vector() const {
+  DCHECK(feedback_vector_cell()->value()->IsFeedbackVector());
+  return FeedbackVector::cast(feedback_vector_cell()->value());
+}
 
 bool JSFunction::IsOptimized() {
   return code()->kind() == Code::OPTIMIZED_FUNCTION;
@@ -6621,11 +6456,29 @@
   }
 }
 
+bool JSFunction::has_feedback_vector() const {
+  return !feedback_vector_cell()->value()->IsUndefined(GetIsolate());
+}
+
+JSFunction::FeedbackVectorState JSFunction::GetFeedbackVectorState(
+    Isolate* isolate) const {
+  Cell* cell = feedback_vector_cell();
+  if (cell == isolate->heap()->undefined_cell()) {
+    return TOP_LEVEL_SCRIPT_NEEDS_VECTOR;
+  } else if (cell->value() == isolate->heap()->undefined_value() ||
+             !has_feedback_vector()) {
+    return NEEDS_VECTOR;
+  }
+  return HAS_VECTOR;
+}
 
 Context* JSFunction::context() {
   return Context::cast(READ_FIELD(this, kContextOffset));
 }
 
+bool JSFunction::has_context() const {
+  return READ_FIELD(this, kContextOffset)->IsContext();
+}
 
 JSObject* JSFunction::global_proxy() {
   return context()->global_proxy();
@@ -6697,11 +6550,6 @@
          code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
 }
 
-TypeFeedbackVector* JSFunction::feedback_vector() {
-  LiteralsArray* array = literals();
-  return array->feedback_vector();
-}
-
 ACCESSORS(JSProxy, target, JSReceiver, kTargetOffset)
 ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
 ACCESSORS(JSProxy, hash, Object, kHashOffset)
@@ -6750,7 +6598,7 @@
 ACCESSORS(JSGeneratorObject, input_or_debug_pos, Object, kInputOrDebugPosOffset)
 SMI_ACCESSORS(JSGeneratorObject, resume_mode, kResumeModeOffset)
 SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
-ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
+ACCESSORS(JSGeneratorObject, register_file, FixedArray, kRegisterFileOffset)
 
 bool JSGeneratorObject::is_suspended() const {
   DCHECK_LT(kGeneratorExecuting, 0);
@@ -6766,8 +6614,6 @@
   return continuation() == kGeneratorExecuting;
 }
 
-TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
-
 ACCESSORS(JSValue, value, Object, kValueOffset)
 
 
@@ -6800,17 +6646,21 @@
 ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
 SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
 SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
-
+SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
 
 INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
 INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
 INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
-ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
-ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
-ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, source_position_table, ByteArray, kSourcePositionTableOffset)
-ACCESSORS(Code, raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
-ACCESSORS(Code, next_code_link, Object, kNextCodeLinkOffset)
+#define CODE_ACCESSORS(name, type, offset)           \
+  ACCESSORS_CHECKED2(Code, name, type, offset, true, \
+                     !GetHeap()->InNewSpace(value))
+CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
+CODE_ACCESSORS(handler_table, FixedArray, kHandlerTableOffset)
+CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+CODE_ACCESSORS(source_position_table, ByteArray, kSourcePositionTableOffset)
+CODE_ACCESSORS(raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
+CODE_ACCESSORS(next_code_link, Object, kNextCodeLinkOffset)
+#undef CODE_ACCESSORS
 
 void Code::WipeOutHeader() {
   WRITE_FIELD(this, kRelocationInfoOffset, NULL);
@@ -6984,6 +6834,7 @@
 
 
 void JSArrayBuffer::set_is_external(bool value) {
+  DCHECK(!value || !has_guard_region());
   set_bit_field(IsExternal::update(bit_field(), value));
 }
 
@@ -7013,6 +6864,13 @@
   set_bit_field(IsShared::update(bit_field(), value));
 }
 
+bool JSArrayBuffer::has_guard_region() {
+  return HasGuardRegion::decode(bit_field());
+}
+
+void JSArrayBuffer::set_has_guard_region(bool value) {
+  set_bit_field(HasGuardRegion::update(bit_field(), value));
+}
 
 Object* JSArrayBufferView::byte_offset() const {
   if (WasNeutered()) return Smi::kZero;
@@ -7069,11 +6927,37 @@
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kLengthOffset, value, mode);
 }
 
+// static
+MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
+                                                 Handle<Object> receiver,
+                                                 const char* method_name) {
+  if (V8_UNLIKELY(!receiver->IsJSTypedArray())) {
+    const MessageTemplate::Template message = MessageTemplate::kNotTypedArray;
+    THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
+  }
+
+  // TODO(caitp): throw if array.[[ViewedArrayBuffer]] is neutered (per v8:4648)
+  return Handle<JSTypedArray>::cast(receiver);
+}
 
 #ifdef VERIFY_HEAP
 ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
 #endif
 
+ACCESSORS(JSPromiseCapability, promise, Object, kPromiseOffset)
+ACCESSORS(JSPromiseCapability, resolve, Object, kResolveOffset)
+ACCESSORS(JSPromiseCapability, reject, Object, kRejectOffset)
+
+SMI_ACCESSORS(JSPromise, status, kStatusOffset)
+ACCESSORS(JSPromise, result, Object, kResultOffset)
+ACCESSORS(JSPromise, deferred_promise, Object, kDeferredPromiseOffset)
+ACCESSORS(JSPromise, deferred_on_resolve, Object, kDeferredOnResolveOffset)
+ACCESSORS(JSPromise, deferred_on_reject, Object, kDeferredOnRejectOffset)
+ACCESSORS(JSPromise, fulfill_reactions, Object, kFulfillReactionsOffset)
+ACCESSORS(JSPromise, reject_reactions, Object, kRejectReactionsOffset)
+SMI_ACCESSORS(JSPromise, flags, kFlagsOffset)
+BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
+BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
 
 ACCESSORS(JSRegExp, data, Object, kDataOffset)
 ACCESSORS(JSRegExp, flags, Object, kFlagsOffset)
@@ -8042,29 +7926,6 @@
 }
 
 
-bool ScopeInfo::IsAsmModule() { return AsmModuleField::decode(Flags()); }
-
-
-bool ScopeInfo::IsAsmFunction() { return AsmFunctionField::decode(Flags()); }
-
-
-bool ScopeInfo::HasSimpleParameters() {
-  return HasSimpleParametersField::decode(Flags());
-}
-
-
-#define SCOPE_INFO_FIELD_ACCESSORS(name)                                      \
-  void ScopeInfo::Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
-  int ScopeInfo::name() {                                                     \
-    if (length() > 0) {                                                       \
-      return Smi::cast(get(k##name))->value();                                \
-    } else {                                                                  \
-      return 0;                                                               \
-    }                                                                         \
-  }
-FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(SCOPE_INFO_FIELD_ACCESSORS)
-#undef SCOPE_INFO_FIELD_ACCESSORS
-
 ACCESSORS(ModuleInfoEntry, export_name, Object, kExportNameOffset)
 ACCESSORS(ModuleInfoEntry, local_name, Object, kLocalNameOffset)
 ACCESSORS(ModuleInfoEntry, import_name, Object, kImportNameOffset)
@@ -8073,35 +7934,6 @@
 SMI_ACCESSORS(ModuleInfoEntry, beg_pos, kBegPosOffset)
 SMI_ACCESSORS(ModuleInfoEntry, end_pos, kEndPosOffset)
 
-FixedArray* ModuleInfo::module_requests() const {
-  return FixedArray::cast(get(kModuleRequestsIndex));
-}
-
-FixedArray* ModuleInfo::special_exports() const {
-  return FixedArray::cast(get(kSpecialExportsIndex));
-}
-
-FixedArray* ModuleInfo::regular_exports() const {
-  return FixedArray::cast(get(kRegularExportsIndex));
-}
-
-FixedArray* ModuleInfo::regular_imports() const {
-  return FixedArray::cast(get(kRegularImportsIndex));
-}
-
-FixedArray* ModuleInfo::namespace_imports() const {
-  return FixedArray::cast(get(kNamespaceImportsIndex));
-}
-
-#ifdef DEBUG
-bool ModuleInfo::Equals(ModuleInfo* other) const {
-  return regular_exports() == other->regular_exports() &&
-         regular_imports() == other->regular_imports() &&
-         special_exports() == other->special_exports() &&
-         namespace_imports() == other->namespace_imports();
-}
-#endif
-
 void Map::ClearCodeCache(Heap* heap) {
   // No write barrier is needed since empty_fixed_array is not in new space.
   // Please note this function is used during marking:
@@ -8129,11 +7961,13 @@
 
 
 bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
+  // This constant is somewhat arbitrary. Any large enough value would work.
+  const uint32_t kMaxFastArrayLength = 32 * 1024 * 1024;
   // If the new array won't fit in a some non-trivial fraction of the max old
   // space size, then force it to go dictionary mode.
-  uint32_t max_fast_array_size =
+  uint32_t heap_based_upper_bound =
       static_cast<uint32_t>((heap->MaxOldGenerationSize() / kDoubleSize) / 4);
-  return new_length >= max_fast_array_size;
+  return new_length >= Min(kMaxFastArrayLength, heap_based_upper_bound);
 }
 
 
@@ -8394,13 +8228,16 @@
 ACCESSORS(JSArrayIterator, index, Object, kNextIndexOffset)
 ACCESSORS(JSArrayIterator, object_map, Object, kIteratedObjectMapOffset)
 
+ACCESSORS(JSAsyncFromSyncIterator, sync_iterator, JSReceiver,
+          kSyncIteratorOffset)
+
 ACCESSORS(JSStringIterator, string, String, kStringOffset)
 SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
 
-#undef TYPE_CHECKER
-#undef CAST_ACCESSOR
 #undef INT_ACCESSORS
 #undef ACCESSORS
+#undef ACCESSORS_CHECKED
+#undef ACCESSORS_CHECKED2
 #undef SMI_ACCESSORS
 #undef SYNCHRONIZED_SMI_ACCESSORS
 #undef NOBARRIER_SMI_ACCESSORS
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 83e00b9..1e1a106 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -7,6 +7,7 @@
 #include <iomanip>
 #include <memory>
 
+#include "src/bootstrapper.h"
 #include "src/disasm.h"
 #include "src/disassembler.h"
 #include "src/interpreter/bytecodes.h"
@@ -73,9 +74,6 @@
       HeapNumber::cast(this)->HeapNumberPrint(os);
       os << ">\n";
       break;
-    case SIMD128_VALUE_TYPE:
-      Simd128Value::cast(this)->Simd128ValuePrint(os);
-      break;
     case FIXED_DOUBLE_ARRAY_TYPE:
       FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
       break;
@@ -149,11 +147,14 @@
     case JS_SPECIAL_API_OBJECT_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
-    case JS_PROMISE_TYPE:
     case JS_ARGUMENTS_TYPE:
     case JS_ERROR_TYPE:
+    case JS_PROMISE_CAPABILITY_TYPE:
       JSObject::cast(this)->JSObjectPrint(os);
       break;
+    case JS_PROMISE_TYPE:
+      JSPromise::cast(this)->JSPromisePrint(os);
+      break;
     case JS_ARRAY_TYPE:
       JSArray::cast(this)->JSArrayPrint(os);
       break;
@@ -232,9 +233,6 @@
     case JS_TYPED_ARRAY_TYPE:
       JSTypedArray::cast(this)->JSTypedArrayPrint(os);
       break;
-    case JS_FIXED_ARRAY_ITERATOR_TYPE:
-      JSFixedArrayIterator::cast(this)->JSFixedArrayIteratorPrint(os);
-      break;
     case JS_DATA_VIEW_TYPE:
       JSDataView::cast(this)->JSDataViewPrint(os);
       break;
@@ -252,59 +250,6 @@
   }
 }
 
-
-void Simd128Value::Simd128ValuePrint(std::ostream& os) {  // NOLINT
-#define PRINT_SIMD128_VALUE(TYPE, Type, type, lane_count, lane_type) \
-  if (Is##Type()) return Type::cast(this)->Type##Print(os);
-  SIMD128_TYPES(PRINT_SIMD128_VALUE)
-#undef PRINT_SIMD128_VALUE
-  UNREACHABLE();
-}
-
-
-void Float32x4::Float32x4Print(std::ostream& os) {  // NOLINT
-  char arr[100];
-  Vector<char> buffer(arr, arraysize(arr));
-  os << std::string(DoubleToCString(get_lane(0), buffer)) << ", "
-     << std::string(DoubleToCString(get_lane(1), buffer)) << ", "
-     << std::string(DoubleToCString(get_lane(2), buffer)) << ", "
-     << std::string(DoubleToCString(get_lane(3), buffer));
-}
-
-
-#define SIMD128_INT_PRINT_FUNCTION(type, lane_count)                \
-  void type::type##Print(std::ostream& os) {                        \
-    char arr[100];                                                  \
-    Vector<char> buffer(arr, arraysize(arr));                       \
-    os << std::string(IntToCString(get_lane(0), buffer));           \
-    for (int i = 1; i < lane_count; i++) {                          \
-      os << ", " << std::string(IntToCString(get_lane(i), buffer)); \
-    }                                                               \
-  }
-SIMD128_INT_PRINT_FUNCTION(Int32x4, 4)
-SIMD128_INT_PRINT_FUNCTION(Uint32x4, 4)
-SIMD128_INT_PRINT_FUNCTION(Int16x8, 8)
-SIMD128_INT_PRINT_FUNCTION(Uint16x8, 8)
-SIMD128_INT_PRINT_FUNCTION(Int8x16, 16)
-SIMD128_INT_PRINT_FUNCTION(Uint8x16, 16)
-#undef SIMD128_INT_PRINT_FUNCTION
-
-
-#define SIMD128_BOOL_PRINT_FUNCTION(type, lane_count)            \
-  void type::type##Print(std::ostream& os) {                     \
-    char arr[100];                                               \
-    Vector<char> buffer(arr, arraysize(arr));                    \
-    os << std::string(get_lane(0) ? "true" : "false");           \
-    for (int i = 1; i < lane_count; i++) {                       \
-      os << ", " << std::string(get_lane(i) ? "true" : "false"); \
-    }                                                            \
-  }
-SIMD128_BOOL_PRINT_FUNCTION(Bool32x4, 4)
-SIMD128_BOOL_PRINT_FUNCTION(Bool16x8, 8)
-SIMD128_BOOL_PRINT_FUNCTION(Bool8x16, 16)
-#undef SIMD128_BOOL_PRINT_FUNCTION
-
-
 void ByteArray::ByteArrayPrint(std::ostream& os) {  // NOLINT
   os << "byte array, data starts at " << GetDataStartAddress();
 }
@@ -326,65 +271,64 @@
   os << "fixed " << Traits::Designator();
 }
 
-
-void JSObject::PrintProperties(std::ostream& os) {  // NOLINT
+bool JSObject::PrintProperties(std::ostream& os) {  // NOLINT
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
-    for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
-      os << "\n   ";
+    int i = 0;
+    for (; i < map()->NumberOfOwnDescriptors(); i++) {
+      os << "\n    ";
       descs->GetKey(i)->NamePrint(os);
       os << ": ";
-      switch (descs->GetType(i)) {
-        case DATA: {
-          FieldIndex index = FieldIndex::ForDescriptor(map(), i);
-          if (IsUnboxedDoubleField(index)) {
-            os << "<unboxed double> " << RawFastDoublePropertyAt(index);
+      PropertyDetails details = descs->GetDetails(i);
+      switch (details.location()) {
+        case kField: {
+          FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
+          if (IsUnboxedDoubleField(field_index)) {
+            os << "<unboxed double> " << RawFastDoublePropertyAt(field_index);
           } else {
-            os << Brief(RawFastPropertyAt(index));
+            os << Brief(RawFastPropertyAt(field_index));
           }
-          os << " (data field at offset " << index.property_index() << ")";
           break;
         }
-        case ACCESSOR: {
-          FieldIndex index = FieldIndex::ForDescriptor(map(), i);
-          os << " (accessor field at offset " << index.property_index() << ")";
-          break;
-        }
-        case DATA_CONSTANT:
-          os << Brief(descs->GetConstant(i)) << " (data constant)";
-          break;
-        case ACCESSOR_CONSTANT:
-          os << Brief(descs->GetCallbacksObject(i)) << " (accessor constant)";
+        case kDescriptor:
+          os << Brief(descs->GetValue(i));
           break;
       }
+      os << " ";
+      details.PrintAsFastTo(os, PropertyDetails::kForProperties);
     }
+    return i > 0;
   } else if (IsJSGlobalObject()) {
     global_dictionary()->Print(os);
   } else {
     property_dictionary()->Print(os);
   }
+  return true;
 }
 
 namespace {
 
 template <class T>
+bool IsTheHoleAt(T* array, int index) {
+  return false;
+}
+
+template <>
+bool IsTheHoleAt(FixedDoubleArray* array, int index) {
+  return array->is_the_hole(index);
+}
+
+template <class T>
 double GetScalarElement(T* array, int index) {
+  if (IsTheHoleAt(array, index)) {
+    return std::numeric_limits<double>::quiet_NaN();
+  }
   return array->get_scalar(index);
 }
 
-double GetScalarElement(FixedDoubleArray* array, int index) {
-  if (array->is_the_hole(index)) return bit_cast<double>(kHoleNanInt64);
-  return array->get_scalar(index);
-}
-
-bool is_the_hole(double maybe_hole) {
-  return bit_cast<uint64_t>(maybe_hole) == kHoleNanInt64;
-}
-
-}  // namespace
-
-template <class T, bool print_the_hole>
-static void DoPrintElements(std::ostream& os, Object* object) {  // NOLINT
+template <class T>
+void DoPrintElements(std::ostream& os, Object* object) {  // NOLINT
+  const bool print_the_hole = std::is_same<T, FixedDoubleArray>::value;
   T* array = T::cast(object);
   if (array->length() == 0) return;
   int previous_index = 0;
@@ -395,7 +339,7 @@
     if (i < array->length()) value = GetScalarElement(array, i);
     bool values_are_nan = std::isnan(previous_value) && std::isnan(value);
     if (i != array->length() && (previous_value == value || values_are_nan) &&
-        is_the_hole(previous_value) == is_the_hole(value)) {
+        IsTheHoleAt(array, i - 1) == IsTheHoleAt(array, i)) {
       continue;
     }
     os << "\n";
@@ -405,7 +349,7 @@
       ss << '-' << (i - 1);
     }
     os << std::setw(12) << ss.str() << ": ";
-    if (print_the_hole && is_the_hole(previous_value)) {
+    if (print_the_hole && IsTheHoleAt(array, i - 1)) {
       os << "<the_hole>";
     } else {
       os << previous_value;
@@ -415,50 +359,54 @@
   }
 }
 
+void PrintFixedArrayElements(std::ostream& os, FixedArray* array) {
+  // Print in array notation for non-sparse arrays.
+  Object* previous_value = array->length() > 0 ? array->get(0) : nullptr;
+  Object* value = nullptr;
+  int previous_index = 0;
+  int i;
+  for (i = 1; i <= array->length(); i++) {
+    if (i < array->length()) value = array->get(i);
+    if (previous_value == value && i != array->length()) {
+      continue;
+    }
+    os << "\n";
+    std::stringstream ss;
+    ss << previous_index;
+    if (previous_index != i - 1) {
+      ss << '-' << (i - 1);
+    }
+    os << std::setw(12) << ss.str() << ": " << Brief(previous_value);
+    previous_index = i;
+    previous_value = value;
+  }
+}
 
-void JSObject::PrintElements(std::ostream& os) {  // NOLINT
+}  // namespace
+
+bool JSObject::PrintElements(std::ostream& os) {  // NOLINT
   // Don't call GetElementsKind, its validation code can cause the printer to
   // fail when debugging.
-  if (elements()->length() == 0) return;
+  if (elements()->length() == 0) return false;
   switch (map()->elements_kind()) {
     case FAST_HOLEY_SMI_ELEMENTS:
     case FAST_SMI_ELEMENTS:
     case FAST_HOLEY_ELEMENTS:
     case FAST_ELEMENTS:
     case FAST_STRING_WRAPPER_ELEMENTS: {
-      // Print in array notation for non-sparse arrays.
-      FixedArray* array = FixedArray::cast(elements());
-      Object* previous_value = array->get(0);
-      Object* value = nullptr;
-      int previous_index = 0;
-      int i;
-      for (i = 1; i <= array->length(); i++) {
-        if (i < array->length()) value = array->get(i);
-        if (previous_value == value && i != array->length()) {
-          continue;
-        }
-        os << "\n";
-        std::stringstream ss;
-        ss << previous_index;
-        if (previous_index != i - 1) {
-          ss << '-' << (i - 1);
-        }
-        os << std::setw(12) << ss.str() << ": " << Brief(previous_value);
-        previous_index = i;
-        previous_value = value;
-      }
+      PrintFixedArrayElements(os, FixedArray::cast(elements()));
       break;
     }
     case FAST_HOLEY_DOUBLE_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS: {
-      DoPrintElements<FixedDoubleArray, true>(os, elements());
+      DoPrintElements<FixedDoubleArray>(os, elements());
       break;
     }
 
-#define PRINT_ELEMENTS(Type, type, TYPE, elementType, size)     \
-  case TYPE##_ELEMENTS: {                                       \
-    DoPrintElements<Fixed##Type##Array, false>(os, elements()); \
-    break;                                                      \
+#define PRINT_ELEMENTS(Type, type, TYPE, elementType, size) \
+  case TYPE##_ELEMENTS: {                                   \
+    DoPrintElements<Fixed##Type##Array>(os, elements());    \
+    break;                                                  \
   }
       TYPED_ARRAYS(PRINT_ELEMENTS)
 #undef PRINT_ELEMENTS
@@ -481,6 +429,7 @@
     case NO_ELEMENTS:
       break;
   }
+  return true;
 }
 
 
@@ -511,19 +460,19 @@
 
 static void JSObjectPrintBody(std::ostream& os, JSObject* obj,  // NOLINT
                               bool print_elements = true) {
-  os << "\n - properties = {";
-  obj->PrintProperties(os);
-  os << "\n }\n";
+  os << "\n - properties = " << Brief(obj->properties()) << " {";
+  if (obj->PrintProperties(os)) os << "\n ";
+  os << "}\n";
   if (print_elements && obj->elements()->length() > 0) {
-    os << " - elements = {";
-    obj->PrintElements(os);
-    os << "\n }\n";
+    os << " - elements = " << Brief(obj->elements()) << " {";
+    if (obj->PrintElements(os)) os << "\n ";
+    os << "}\n";
   }
   int internal_fields = obj->GetInternalFieldCount();
   if (internal_fields > 0) {
     os << " - internal fields = {";
     for (int i = 0; i < internal_fields; i++) {
-      os << "\n    " << Brief(obj->GetInternalField(i));
+      os << "\n    " << obj->GetInternalField(i);
     }
     os << "\n }\n";
   }
@@ -541,6 +490,18 @@
   JSObjectPrintBody(os, this);
 }
 
+void JSPromise::JSPromisePrint(std::ostream& os) {  // NOLINT
+  JSObjectPrintHeader(os, this, "JSPromise");
+  os << "\n - status = " << JSPromise::Status(status());
+  os << "\n - result = " << Brief(result());
+  os << "\n - deferred_promise: " << Brief(deferred_promise());
+  os << "\n - deferred_on_resolve: " << Brief(deferred_on_resolve());
+  os << "\n - deferred_on_reject: " << Brief(deferred_on_reject());
+  os << "\n - fulfill_reactions = " << Brief(fulfill_reactions());
+  os << "\n - reject_reactions = " << Brief(reject_reactions());
+  os << "\n - has_handler = " << has_handler();
+  os << "\n ";
+}
 
 void JSRegExp::JSRegExpPrint(std::ostream& os) {  // NOLINT
   JSObjectPrintHeader(os, this, "JSRegExp");
@@ -578,6 +539,7 @@
   }
   if (is_deprecated()) os << "\n - deprecated_map";
   if (is_stable()) os << "\n - stable_map";
+  if (is_migration_target()) os << "\n - migration_target";
   if (is_dictionary_map()) os << "\n - dictionary_map";
   if (has_hidden_prototype()) os << "\n - has_hidden_prototype";
   if (has_named_interceptor()) os << "\n - named_interceptor";
@@ -597,7 +559,8 @@
      << "#" << NumberOfOwnDescriptors() << ": "
      << Brief(instance_descriptors());
   if (FLAG_unbox_double_fields) {
-    os << "\n - layout descriptor: " << Brief(layout_descriptor());
+    os << "\n - layout descriptor: ";
+    layout_descriptor()->ShortPrint(os);
   }
   int nof_transitions = TransitionArray::NumberOfTransitions(raw_transitions());
   if (nof_transitions > 0) {
@@ -631,25 +594,18 @@
 
 void FixedArray::FixedArrayPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "FixedArray");
+  os << "\n - map = " << Brief(map());
   os << "\n - length: " << length();
-  for (int i = 0; i < length(); i++) {
-    os << "\n  [" << i << "]: " << Brief(get(i));
-  }
+  PrintFixedArrayElements(os, this);
   os << "\n";
 }
 
 
 void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "FixedDoubleArray");
+  os << "\n - map = " << Brief(map());
   os << "\n - length: " << length();
-  for (int i = 0; i < length(); i++) {
-    os << "\n  [" << i << "]: ";
-    if (is_the_hole(i)) {
-      os << "<the hole>";
-    } else {
-      os << get_scalar(i);
-    }
-  }
+  DoPrintElements<FixedDoubleArray>(os, this);
   os << "\n";
 }
 
@@ -686,31 +642,24 @@
     return;
   }
 
-  for (int slot = 0, name_index = 0; slot < slot_count;) {
-    FeedbackVectorSlotKind kind = This()->GetKind(slot);
-    int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+  for (int slot = 0; slot < slot_count;) {
+    FeedbackSlotKind kind = This()->GetKind(FeedbackSlot(slot));
+    int entry_size = FeedbackMetadata::GetSlotSize(kind);
     DCHECK_LT(0, entry_size);
-
     os << "\n Slot #" << slot << " " << kind;
-    if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
-      os << ", " << Brief(*This()->GetName(name_index++));
-    }
-
     slot += entry_size;
   }
   os << "\n";
 }
 
-void TypeFeedbackMetadata::Print() {
+void FeedbackMetadata::Print() {
   OFStream os(stdout);
-  TypeFeedbackMetadataPrint(os);
+  FeedbackMetadataPrint(os);
   os << std::flush;
 }
 
-
-void TypeFeedbackMetadata::TypeFeedbackMetadataPrint(
-    std::ostream& os) {  // NOLINT
-  HeapObject::PrintHeader(os, "TypeFeedbackMetadata");
+void FeedbackMetadata::FeedbackMetadataPrint(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "FeedbackMetadata");
   os << "\n - length: " << length();
   if (length() == 0) {
     os << " (empty)\n";
@@ -718,89 +667,93 @@
   }
   os << "\n - slot_count: " << slot_count();
 
-  TypeFeedbackMetadataIterator iter(this);
+  FeedbackMetadataIterator iter(this);
   while (iter.HasNext()) {
-    FeedbackVectorSlot slot = iter.Next();
-    FeedbackVectorSlotKind kind = iter.kind();
+    FeedbackSlot slot = iter.Next();
+    FeedbackSlotKind kind = iter.kind();
     os << "\n Slot " << slot << " " << kind;
-    if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
-      os << ", " << Brief(iter.name());
-    }
   }
   os << "\n";
 }
 
-
-void TypeFeedbackVector::Print() {
+void FeedbackVector::Print() {
   OFStream os(stdout);
-  TypeFeedbackVectorPrint(os);
+  FeedbackVectorPrint(os);
   os << std::flush;
 }
 
-
-void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) {  // NOLINT
-  HeapObject::PrintHeader(os, "TypeFeedbackVector");
+void FeedbackVector::FeedbackVectorPrint(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "FeedbackVector");
   os << "\n - length: " << length();
   if (length() == 0) {
     os << " (empty)\n";
     return;
   }
 
-  TypeFeedbackMetadataIterator iter(metadata());
+  FeedbackMetadataIterator iter(metadata());
   while (iter.HasNext()) {
-    FeedbackVectorSlot slot = iter.Next();
-    FeedbackVectorSlotKind kind = iter.kind();
+    FeedbackSlot slot = iter.Next();
+    FeedbackSlotKind kind = iter.kind();
 
     os << "\n Slot " << slot << " " << kind;
-    if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
-      os << ", " << Brief(iter.name());
-    }
     os << " ";
     switch (kind) {
-      case FeedbackVectorSlotKind::LOAD_IC: {
+      case FeedbackSlotKind::kLoadProperty: {
         LoadICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
-      case FeedbackVectorSlotKind::LOAD_GLOBAL_IC: {
+      case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+      case FeedbackSlotKind::kLoadGlobalNotInsideTypeof: {
         LoadGlobalICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
-      case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
+      case FeedbackSlotKind::kLoadKeyed: {
         KeyedLoadICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
-      case FeedbackVectorSlotKind::CALL_IC: {
+      case FeedbackSlotKind::kCall: {
         CallICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
-      case FeedbackVectorSlotKind::STORE_IC: {
+      case FeedbackSlotKind::kStoreNamedSloppy:
+      case FeedbackSlotKind::kStoreNamedStrict:
+      case FeedbackSlotKind::kStoreOwnNamed: {
         StoreICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
-      case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+      case FeedbackSlotKind::kStoreKeyedSloppy:
+      case FeedbackSlotKind::kStoreKeyedStrict: {
         KeyedStoreICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
-      case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC: {
+      case FeedbackSlotKind::kBinaryOp: {
         BinaryOpICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
-      case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
+      case FeedbackSlotKind::kCompareOp: {
         CompareICNexus nexus(this, slot);
         os << Code::ICState2String(nexus.StateFromFeedback());
         break;
       }
-      case FeedbackVectorSlotKind::GENERAL:
+      case FeedbackSlotKind::kStoreDataPropertyInLiteral: {
+        StoreDataPropertyInLiteralICNexus nexus(this, slot);
+        os << Code::ICState2String(nexus.StateFromFeedback());
         break;
-      case FeedbackVectorSlotKind::INVALID:
-      case FeedbackVectorSlotKind::KINDS_NUMBER:
+      }
+      case FeedbackSlotKind::kCreateClosure:
+      case FeedbackSlotKind::kLiteral:
+      case FeedbackSlotKind::kGeneral:
+        break;
+      case FeedbackSlotKind::kToBoolean:
+      case FeedbackSlotKind::kInvalid:
+      case FeedbackSlotKind::kKindsNumber:
         UNREACHABLE();
         break;
     }
@@ -839,6 +792,8 @@
     os << "#";
   } else if (StringShape(this).IsCons()) {
     os << "c\"";
+  } else if (StringShape(this).IsThin()) {
+    os << ">\"";
   } else {
     os << "\"";
   }
@@ -1011,15 +966,6 @@
   JSObjectPrintBody(os, this);
 }
 
-void JSFixedArrayIterator::JSFixedArrayIteratorPrint(
-    std::ostream& os) {  // NOLINT
-  JSObjectPrintHeader(os, this, "JSFixedArrayIterator");
-  os << "\n - array = " << Brief(array());
-  os << "\n - index = " << index();
-  os << "\n - initial_next = " << Brief(initial_next());
-  JSObjectPrintBody(os, this);
-}
-
 void JSDataView::JSDataViewPrint(std::ostream& os) {  // NOLINT
   JSObjectPrintHeader(os, this, "JSDataView");
   os << "\n - buffer =" << Brief(buffer());
@@ -1053,17 +999,47 @@
     os << "\n   - async";
   }
   os << "\n - context = " << Brief(context());
-  os << "\n - literals = " << Brief(literals());
+  os << "\n - feedback vector cell = " << Brief(feedback_vector_cell());
   os << "\n - code = " << Brief(code());
   JSObjectPrintBody(os, this);
 }
 
+namespace {
+
+std::ostream& operator<<(std::ostream& os, FunctionKind kind) {
+  os << "[";
+  if (kind == FunctionKind::kNormalFunction) {
+    os << " NormalFunction";
+  } else {
+#define PRINT_FLAG(name)                                                  \
+  if (static_cast<int>(kind) & static_cast<int>(FunctionKind::k##name)) { \
+    os << " " << #name;                                                   \
+  }
+
+    PRINT_FLAG(ArrowFunction)
+    PRINT_FLAG(GeneratorFunction)
+    PRINT_FLAG(ConciseMethod)
+    PRINT_FLAG(DefaultConstructor)
+    PRINT_FLAG(DerivedConstructor)
+    PRINT_FLAG(BaseConstructor)
+    PRINT_FLAG(GetterFunction)
+    PRINT_FLAG(SetterFunction)
+    PRINT_FLAG(AsyncFunction)
+    PRINT_FLAG(Module)
+#undef PRINT_FLAG
+  }
+  return os << " ]";
+}
+
+}  // namespace
 
 void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "SharedFunctionInfo");
   os << "\n - name = " << Brief(name());
+  os << "\n - kind = " << kind();
   os << "\n - formal_parameter_count = " << internal_formal_parameter_count();
   os << "\n - expected_nof_properties = " << expected_nof_properties();
+  os << "\n - language_mode = " << language_mode();
   os << "\n - ast_node_count = " << ast_node_count();
   os << "\n - instance class name = ";
   instance_class_name()->Print(os);
@@ -1090,12 +1066,15 @@
   os << "\n - function token position = " << function_token_position();
   os << "\n - start position = " << start_position();
   os << "\n - end position = " << end_position();
-  os << "\n - debug info = " << Brief(debug_info());
+  if (HasDebugInfo()) {
+    os << "\n - debug info = " << Brief(debug_info());
+  } else {
+    os << "\n - no debug info";
+  }
   os << "\n - length = " << length();
-  os << "\n - num_literals = " << num_literals();
   os << "\n - optimized_code_map = " << Brief(optimized_code_map());
   os << "\n - feedback_metadata = ";
-  feedback_metadata()->TypeFeedbackMetadataPrint(os);
+  feedback_metadata()->FeedbackMetadataPrint(os);
   if (HasBytecodeArray()) {
     os << "\n - bytecode_array = " << bytecode_array();
   }
@@ -1105,7 +1084,9 @@
 
 void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) {  // NOLINT
   JSObjectPrintHeader(os, this, "JSGlobalProxy");
-  os << "\n - native context = " << Brief(native_context());
+  if (!GetIsolate()->bootstrapper()->IsActive()) {
+    os << "\n - native context = " << Brief(native_context());
+  }
   os << "\n - hash = " << Brief(hash());
   JSObjectPrintBody(os, this);
 }
@@ -1113,7 +1094,9 @@
 
 void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) {  // NOLINT
   JSObjectPrintHeader(os, this, "JSGlobalObject");
-  os << "\n - native context = " << Brief(native_context());
+  if (!GetIsolate()->bootstrapper()->IsActive()) {
+    os << "\n - native context = " << Brief(native_context());
+  }
   os << "\n - global proxy = " << Brief(global_proxy());
   JSObjectPrintBody(os, this);
 }
@@ -1129,7 +1112,8 @@
 void PropertyCell::PropertyCellPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "PropertyCell");
   os << "\n - value: " << Brief(value());
-  os << "\n - details: " << property_details();
+  os << "\n - details: ";
+  property_details().PrintAsSlowTo(os);
   PropertyCellType cell_type = property_details().cell_type();
   os << "\n - cell_type: ";
   if (value()->IsTheHole(GetIsolate())) {
@@ -1214,12 +1198,6 @@
 }
 
 
-void Box::BoxPrint(std::ostream& os) {  // NOLINT
-  HeapObject::PrintHeader(os, "Box");
-  os << "\n - value: " << Brief(value());
-  os << "\n";
-}
-
 void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoPrint(
     std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "PromiseResolveThenableJobInfo");
@@ -1227,8 +1205,6 @@
   os << "\n - then: " << Brief(then());
   os << "\n - resolve: " << Brief(resolve());
   os << "\n - reject: " << Brief(reject());
-  os << "\n - debug id: " << Brief(debug_id());
-  os << "\n - debug name: " << Brief(debug_name());
   os << "\n - context: " << Brief(context());
   os << "\n";
 }
@@ -1238,9 +1214,9 @@
   HeapObject::PrintHeader(os, "PromiseReactionJobInfo");
   os << "\n - value: " << Brief(value());
   os << "\n - tasks: " << Brief(tasks());
-  os << "\n - deferred: " << Brief(deferred());
-  os << "\n - debug id: " << Brief(debug_id());
-  os << "\n - debug name: " << Brief(debug_name());
+  os << "\n - deferred_promise: " << Brief(deferred_promise());
+  os << "\n - deferred_on_resolve: " << Brief(deferred_on_resolve());
+  os << "\n - deferred_on_reject: " << Brief(deferred_on_reject());
   os << "\n - reaction context: " << Brief(context());
   os << "\n";
 }
@@ -1259,17 +1235,26 @@
 
 void Module::ModulePrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "Module");
+  // TODO(neis): Simplify once modules have a script field.
+  if (!evaluated()) {
+    SharedFunctionInfo* shared = code()->IsSharedFunctionInfo()
+                                     ? SharedFunctionInfo::cast(code())
+                                     : JSFunction::cast(code())->shared();
+    Object* origin = Script::cast(shared->script())->GetNameOrSourceURL();
+    os << "\n - origin: " << Brief(origin);
+  }
   os << "\n - code: " << Brief(code());
   os << "\n - exports: " << Brief(exports());
   os << "\n - requested_modules: " << Brief(requested_modules());
-  os << "\n - evaluated: " << evaluated();
+  os << "\n - instantiated, evaluated: " << instantiated() << ", "
+     << evaluated();
   os << "\n";
 }
 
 void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) {  // NOLINT
-  HeapObject::PrintHeader(os, "JSModuleNamespace");
+  JSObjectPrintHeader(os, this, "JSModuleNamespace");
   os << "\n - module: " << Brief(module());
-  os << "\n";
+  JSObjectPrintBody(os, this);
 }
 
 void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) {  // NOLINT
@@ -1282,6 +1267,13 @@
   os << "\n";
 }
 
+void Tuple2::Tuple2Print(std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Tuple2");
+  os << "\n - value1: " << Brief(value1());
+  os << "\n - value2: " << Brief(value2());
+  os << "\n";
+}
+
 void Tuple3::Tuple3Print(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "Tuple3");
   os << "\n - value1: " << Brief(value1());
@@ -1297,6 +1289,13 @@
   os << "\n";
 }
 
+void ConstantElementsPair::ConstantElementsPairPrint(
+    std::ostream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "ConstantElementsPair");
+  os << "\n - elements_kind: " << static_cast<ElementsKind>(elements_kind());
+  os << "\n - constant_values: " << Brief(constant_values());
+  os << "\n";
+}
 
 void AccessorPair::AccessorPairPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "AccessorPair");
@@ -1392,7 +1391,7 @@
   } else if (transition_info()->IsJSArray()) {
     os << "Array literal " << Brief(transition_info());
   } else {
-    os << "unknown transition_info" << Brief(transition_info());
+    os << "unknown transition_info " << Brief(transition_info());
   }
   os << "\n";
 }
@@ -1460,16 +1459,24 @@
   os << std::flush;
 }
 
+void LayoutDescriptor::ShortPrint(std::ostream& os) {
+  if (IsSmi()) {
+    os << this;  // Print tagged value for easy use with "jld" gdb macro.
+  } else {
+    os << Brief(this);
+  }
+}
 
 void LayoutDescriptor::Print(std::ostream& os) {  // NOLINT
   os << "Layout descriptor: ";
-  if (IsOddball() && IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
-    os << "<uninitialized>";
-  } else if (IsFastPointerLayout()) {
+  if (IsFastPointerLayout()) {
     os << "<all tagged>";
   } else if (IsSmi()) {
     os << "fast";
     PrintBitMask(os, static_cast<uint32_t>(Smi::cast(this)->value()));
+  } else if (IsOddball() &&
+             IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
+    os << "<uninitialized>";
   } else {
     os << "slow";
     int len = length();
@@ -1546,15 +1553,43 @@
 
 void DescriptorArray::PrintDescriptors(std::ostream& os) {  // NOLINT
   HandleScope scope(GetIsolate());
-  os << "Descriptor array #" << number_of_descriptors();
+  os << "Descriptor array #" << number_of_descriptors() << ":";
   for (int i = 0; i < number_of_descriptors(); i++) {
-    Descriptor desc;
-    Get(i, &desc);
-    os << "\n " << i << ": " << desc;
+    Name* key = GetKey(i);
+    os << "\n  [" << i << "]: ";
+#ifdef OBJECT_PRINT
+    key->NamePrint(os);
+#else
+    key->ShortPrint(os);
+#endif
+    os << " ";
+    PrintDescriptorDetails(os, i, PropertyDetails::kPrintFull);
   }
   os << "\n";
 }
 
+void DescriptorArray::PrintDescriptorDetails(std::ostream& os, int descriptor,
+                                             PropertyDetails::PrintMode mode) {
+  PropertyDetails details = GetDetails(descriptor);
+  details.PrintAsFastTo(os, mode);
+  os << " @ ";
+  Object* value = GetValue(descriptor);
+  switch (details.location()) {
+    case kField: {
+      FieldType* field_type = Map::UnwrapFieldType(value);
+      field_type->PrintTo(os);
+      break;
+    }
+    case kDescriptor:
+      os << Brief(value);
+      if (value->IsAccessorPair()) {
+        AccessorPair* pair = AccessorPair::cast(value);
+        os << "(get: " << Brief(pair->getter())
+           << ", set: " << Brief(pair->setter()) << ")";
+      }
+      break;
+  }
+}
 
 void TransitionArray::Print() {
   OFStream os(stdout);
@@ -1592,18 +1627,13 @@
     } else if (key == heap->strict_function_transition_symbol()) {
       os << " (transition to strict function)";
     } else {
-      PropertyDetails details = GetTargetDetails(key, target);
+      DCHECK(!IsSpecialTransition(key));
       os << "(transition to ";
-      if (details.location() == kDescriptor) {
-        os << "immutable ";
-      }
-      os << (details.kind() == kData ? "data" : "accessor");
-      if (details.location() == kDescriptor) {
-        Object* value =
-            target->instance_descriptors()->GetValue(target->LastAdded());
-        os << " " << Brief(value);
-      }
-      os << "), attrs: " << details.attributes();
+      int descriptor = target->LastAdded();
+      DescriptorArray* descriptors = target->instance_descriptors();
+      descriptors->PrintDescriptorDetails(os, descriptor,
+                                          PropertyDetails::kForTransitions);
+      os << ")";
     }
     os << " -> " << Brief(target);
   }
@@ -1633,11 +1663,19 @@
   isolate->FindCodeObject(reinterpret_cast<i::Address>(object))->Print();
 }
 
-extern void _v8_internal_Print_TypeFeedbackVector(void* object) {
+extern void _v8_internal_Print_FeedbackMetadata(void* object) {
   if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
-    printf("Not a type feedback vector\n");
+    printf("Not a feedback metadata object\n");
   } else {
-    reinterpret_cast<i::TypeFeedbackVector*>(object)->Print();
+    reinterpret_cast<i::FeedbackMetadata*>(object)->Print();
+  }
+}
+
+extern void _v8_internal_Print_FeedbackVector(void* object) {
+  if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
+    printf("Not a feedback vector\n");
+  } else {
+    reinterpret_cast<i::FeedbackVector*>(object)->Print();
   }
 }
 
@@ -1649,6 +1687,15 @@
   }
 }
 
+extern void _v8_internal_Print_LayoutDescriptor(void* object) {
+  i::Object* o = reinterpret_cast<i::Object*>(object);
+  if (!o->IsLayoutDescriptor()) {
+    printf("Not a layout descriptor\n");
+  } else {
+    reinterpret_cast<i::LayoutDescriptor*>(object)->Print();
+  }
+}
+
 extern void _v8_internal_Print_TransitionArray(void* object) {
   if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
     printf("Not a transition array\n");
diff --git a/src/objects.cc b/src/objects.cc
index e711a21..9f9a628 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -18,6 +18,7 @@
 #include "src/api-arguments-inl.h"
 #include "src/api-natives.h"
 #include "src/api.h"
+#include "src/arguments.h"
 #include "src/base/bits.h"
 #include "src/base/utils/random-number-generator.h"
 #include "src/bootstrapper.h"
@@ -28,6 +29,7 @@
 #include "src/counters-inl.h"
 #include "src/counters.h"
 #include "src/date.h"
+#include "src/debug/debug-evaluate.h"
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/elements.h"
@@ -49,6 +51,7 @@
 #include "src/log.h"
 #include "src/lookup.h"
 #include "src/macro-assembler.h"
+#include "src/map-updater.h"
 #include "src/messages.h"
 #include "src/objects-body-descriptors-inl.h"
 #include "src/property-descriptor.h"
@@ -139,7 +142,8 @@
 }
 
 // static
-MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
+MaybeHandle<Object> Object::ConvertToNumber(Isolate* isolate,
+                                            Handle<Object> input) {
   while (true) {
     if (input->IsNumber()) {
       return input;
@@ -150,15 +154,10 @@
     if (input->IsOddball()) {
       return Oddball::ToNumber(Handle<Oddball>::cast(input));
     }
-    Isolate* const isolate = Handle<HeapObject>::cast(input)->GetIsolate();
     if (input->IsSymbol()) {
       THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToNumber),
                       Object);
     }
-    if (input->IsSimd128Value()) {
-      THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSimdToNumber),
-                      Object);
-    }
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
                                                 ToPrimitiveHint::kNumber),
@@ -166,28 +165,33 @@
   }
 }
 
-
 // static
-MaybeHandle<Object> Object::ToInteger(Isolate* isolate, Handle<Object> input) {
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+MaybeHandle<Object> Object::ConvertToInteger(Isolate* isolate,
+                                             Handle<Object> input) {
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ConvertToNumber(isolate, input),
+                             Object);
+  if (input->IsSmi()) return input;
   return isolate->factory()->NewNumber(DoubleToInteger(input->Number()));
 }
 
-
 // static
-MaybeHandle<Object> Object::ToInt32(Isolate* isolate, Handle<Object> input) {
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+MaybeHandle<Object> Object::ConvertToInt32(Isolate* isolate,
+                                           Handle<Object> input) {
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ConvertToNumber(isolate, input),
+                             Object);
+  if (input->IsSmi()) return input;
   return isolate->factory()->NewNumberFromInt(DoubleToInt32(input->Number()));
 }
 
-
 // static
-MaybeHandle<Object> Object::ToUint32(Isolate* isolate, Handle<Object> input) {
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+MaybeHandle<Object> Object::ConvertToUint32(Isolate* isolate,
+                                            Handle<Object> input) {
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ConvertToNumber(isolate, input),
+                             Object);
+  if (input->IsSmi()) return handle(Smi::cast(*input)->ToUint32Smi(), isolate);
   return isolate->factory()->NewNumberFromUint(DoubleToUint32(input->Number()));
 }
 
-
 // static
 MaybeHandle<Name> Object::ConvertToName(Isolate* isolate,
                                         Handle<Object> input) {
@@ -198,12 +202,35 @@
   return ToString(isolate, input);
 }
 
+// ES6 7.1.14
 // static
-MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
-  while (true) {
-    if (input->IsString()) {
-      return Handle<String>::cast(input);
+MaybeHandle<Object> Object::ConvertToPropertyKey(Isolate* isolate,
+                                                 Handle<Object> value) {
+  // 1. Let key be ToPrimitive(argument, hint String).
+  MaybeHandle<Object> maybe_key =
+      Object::ToPrimitive(value, ToPrimitiveHint::kString);
+  // 2. ReturnIfAbrupt(key).
+  Handle<Object> key;
+  if (!maybe_key.ToHandle(&key)) return key;
+  // 3. If Type(key) is Symbol, then return key.
+  if (key->IsSymbol()) return key;
+  // 4. Return ToString(key).
+  // Extending spec'ed behavior, we'd be happy to return an element index.
+  if (key->IsSmi()) return key;
+  if (key->IsHeapNumber()) {
+    uint32_t uint_value;
+    if (value->ToArrayLength(&uint_value) &&
+        uint_value <= static_cast<uint32_t>(Smi::kMaxValue)) {
+      return handle(Smi::FromInt(static_cast<int>(uint_value)), isolate);
     }
+  }
+  return Object::ToString(isolate, key);
+}
+
+// static
+MaybeHandle<String> Object::ConvertToString(Isolate* isolate,
+                                            Handle<Object> input) {
+  while (true) {
     if (input->IsOddball()) {
       return handle(Handle<Oddball>::cast(input)->to_string(), isolate);
     }
@@ -214,13 +241,15 @@
       THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToString),
                       String);
     }
-    if (input->IsSimd128Value()) {
-      return Simd128Value::ToString(Handle<Simd128Value>::cast(input));
-    }
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
                                                 ToPrimitiveHint::kString),
         String);
+    // The previous isString() check happened in Object::ToString and thus we
+    // put it at the end of the loop in this helper.
+    if (input->IsString()) {
+      return Handle<String>::cast(input);
+    }
   }
 }
 
@@ -268,8 +297,7 @@
                                              Handle<Object> input) {
   DisallowJavascriptExecution no_js(isolate);
 
-  if (input->IsString() || input->IsNumber() || input->IsOddball() ||
-      input->IsSimd128Value()) {
+  if (input->IsString() || input->IsNumber() || input->IsOddball()) {
     return Object::ToString(isolate, input).ToHandleChecked();
   } else if (input->IsFunction()) {
     // -- F u n c t i o n
@@ -375,11 +403,16 @@
 }
 
 // static
-MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
+MaybeHandle<Object> Object::ConvertToLength(Isolate* isolate,
+                                            Handle<Object> input) {
   ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+  if (input->IsSmi()) {
+    int value = std::max(Smi::cast(*input)->value(), 0);
+    return handle(Smi::FromInt(value), isolate);
+  }
   double len = DoubleToInteger(input->Number());
   if (len <= 0.0) {
-    len = 0.0;
+    return handle(Smi::kZero, isolate);
   } else if (len >= kMaxSafeInteger) {
     len = kMaxSafeInteger;
   }
@@ -387,10 +420,12 @@
 }
 
 // static
-MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
-                                    MessageTemplate::Template error_index) {
-  if (input->IsUndefined(isolate)) return isolate->factory()->NewNumber(0.0);
+MaybeHandle<Object> Object::ConvertToIndex(
+    Isolate* isolate, Handle<Object> input,
+    MessageTemplate::Template error_index) {
+  if (input->IsUndefined(isolate)) return handle(Smi::kZero, isolate);
   ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+  if (input->IsSmi() && Smi::cast(*input)->value() >= 0) return input;
   double len = DoubleToInteger(input->Number()) + 0.0;
   auto js_len = isolate->factory()->NewNumber(len);
   if (len < 0.0 || len > kMaxSafeInteger) {
@@ -404,7 +439,7 @@
   DCHECK(IsHeapObject());
   Isolate* isolate = HeapObject::cast(this)->GetIsolate();
   if (IsBoolean()) return IsTrue(isolate);
-  if (IsUndefined(isolate) || IsNull(isolate)) return false;
+  if (IsNullOrUndefined(isolate)) return false;
   if (IsUndetectable()) return false;  // Undetectable object is false.
   if (IsString()) return String::cast(this)->length() != 0;
   if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
@@ -537,18 +572,6 @@
       } else {
         return Just(false);
       }
-    } else if (x->IsSimd128Value()) {
-      if (y->IsSimd128Value()) {
-        return Just(Simd128Value::Equals(Handle<Simd128Value>::cast(x),
-                                         Handle<Simd128Value>::cast(y)));
-      } else if (y->IsJSReceiver()) {
-        if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
-                 .ToHandle(&y)) {
-          return Nothing<bool>();
-        }
-      } else {
-        return Just(false);
-      }
     } else if (x->IsJSReceiver()) {
       if (y->IsJSReceiver()) {
         return Just(x.is_identical_to(y));
@@ -574,9 +597,6 @@
   } else if (this->IsString()) {
     if (!that->IsString()) return false;
     return String::cast(this)->Equals(String::cast(that));
-  } else if (this->IsSimd128Value()) {
-    if (!that->IsSimd128Value()) return false;
-    return Simd128Value::cast(this)->Equals(Simd128Value::cast(that));
   }
   return this == that;
 }
@@ -592,10 +612,6 @@
   if (object->IsString()) return isolate->factory()->string_string();
   if (object->IsSymbol()) return isolate->factory()->symbol_string();
   if (object->IsString()) return isolate->factory()->string_string();
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
-  if (object->Is##Type()) return isolate->factory()->type##_string();
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
   if (object->IsCallable()) return isolate->factory()->function_string();
   return isolate->factory()->object_string();
 }
@@ -847,7 +863,7 @@
   Isolate* isolate = receiver->GetIsolate();
   ASSIGN_RETURN_ON_EXCEPTION(isolate, func,
                              JSReceiver::GetProperty(receiver, name), Object);
-  if (func->IsNull(isolate) || func->IsUndefined(isolate)) {
+  if (func->IsNullOrUndefined(isolate)) {
     return isolate->factory()->undefined_value();
   }
   if (!func->IsCallable()) {
@@ -858,10 +874,30 @@
   return func;
 }
 
+namespace {
+MaybeHandle<FixedArray> CreateListFromArrayLikeFastPath(
+    Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
+  if (element_types != ElementTypes::kAll || !object->IsJSArray()) {
+    return MaybeHandle<FixedArray>();
+  }
+  Handle<JSArray> array = Handle<JSArray>::cast(object);
+  uint32_t length;
+  if (!array->HasArrayPrototype(isolate) ||
+      !array->length()->ToUint32(&length) || !array->HasFastElements() ||
+      !JSObject::PrototypeHasNoElements(isolate, *array)) {
+    return MaybeHandle<FixedArray>();
+  }
+  return array->GetElementsAccessor()->CreateListFromArray(isolate, array);
+}
+}  // namespace
 
 // static
 MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
     Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
+  // Fast-path for JS_ARRAY_TYPE.
+  MaybeHandle<FixedArray> fast_result =
+      CreateListFromArrayLikeFastPath(isolate, object, element_types);
+  if (!fast_result.is_null()) return fast_result;
   // 1. ReturnIfAbrupt(object).
   // 2. (default elementTypes -- not applicable.)
   // 3. If Type(obj) is not Object, throw a TypeError exception.
@@ -872,6 +908,7 @@
                                      "CreateListFromArrayLike")),
                     FixedArray);
   }
+
   // 4. Let len be ? ToLength(? Get(obj, "length")).
   Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
   Handle<Object> raw_length_number;
@@ -1790,11 +1827,13 @@
                                  GetPropertyWithInterceptor(it, &done), Object);
       if (done) return result;
     }
+
   } else {
-    MaybeHandle<Object> result;
+    Handle<Object> result;
     bool done;
-    result = GetPropertyWithInterceptorInternal(it, interceptor, &done);
-    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, result,
+        GetPropertyWithInterceptorInternal(it, interceptor, &done), Object);
     if (done) return result;
   }
 
@@ -1830,7 +1869,7 @@
   } else {
     Maybe<PropertyAttributes> result =
         GetPropertyAttributesWithInterceptorInternal(it, interceptor);
-    RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+    if (isolate->has_pending_exception()) return Nothing<PropertyAttributes>();
     if (result.FromMaybe(ABSENT) != ABSENT) return result;
   }
   isolate->ReportFailedAccessCheck(checked);
@@ -1866,10 +1905,9 @@
   } else {
     Maybe<bool> result = SetPropertyWithInterceptorInternal(
         it, interceptor, should_throw, value);
-    RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+    if (isolate->has_pending_exception()) return Nothing<bool>();
     if (result.IsJust()) return result;
   }
-
   isolate->ReportFailedAccessCheck(checked);
   RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
   return Just(true);
@@ -1937,6 +1975,178 @@
   }
 }
 
+namespace {
+
+bool HasExcludedProperty(
+    const ScopedVector<Handle<Object>>* excluded_properties,
+    Handle<Object> search_element) {
+  // TODO(gsathya): Change this to be a hashtable.
+  for (int i = 0; i < excluded_properties->length(); i++) {
+    if (search_element->SameValue(*excluded_properties->at(i))) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+MUST_USE_RESULT Maybe<bool> FastAssign(
+    Handle<JSReceiver> target, Handle<Object> source,
+    const ScopedVector<Handle<Object>>* excluded_properties, bool use_set) {
+  // Non-empty strings are the only non-JSReceivers that need to be handled
+  // explicitly by Object.assign.
+  if (!source->IsJSReceiver()) {
+    return Just(!source->IsString() || String::cast(*source)->length() == 0);
+  }
+
+  // If the target is deprecated, the object will be updated on first store. If
+  // the source for that store equals the target, this will invalidate the
+  // cached representation of the source. Preventively upgrade the target.
+  // Do this on each iteration since any property load could cause deprecation.
+  if (target->map()->is_deprecated()) {
+    JSObject::MigrateInstance(Handle<JSObject>::cast(target));
+  }
+
+  Isolate* isolate = target->GetIsolate();
+  Handle<Map> map(JSReceiver::cast(*source)->map(), isolate);
+
+  if (!map->IsJSObjectMap()) return Just(false);
+  if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+  Handle<JSObject> from = Handle<JSObject>::cast(source);
+  if (from->elements() != isolate->heap()->empty_fixed_array()) {
+    return Just(false);
+  }
+
+  Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+  int length = map->NumberOfOwnDescriptors();
+
+  bool stable = true;
+
+  for (int i = 0; i < length; i++) {
+    Handle<Name> next_key(descriptors->GetKey(i), isolate);
+    Handle<Object> prop_value;
+    // Directly decode from the descriptor array if |from| did not change shape.
+    if (stable) {
+      PropertyDetails details = descriptors->GetDetails(i);
+      if (!details.IsEnumerable()) continue;
+      if (details.kind() == kData) {
+        if (details.location() == kDescriptor) {
+          prop_value = handle(descriptors->GetValue(i), isolate);
+        } else {
+          Representation representation = details.representation();
+          FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+          prop_value = JSObject::FastPropertyAt(from, representation, index);
+        }
+      } else {
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate, prop_value, JSReceiver::GetProperty(from, next_key),
+            Nothing<bool>());
+        stable = from->map() == *map;
+      }
+    } else {
+      // If the map did change, do a slower lookup. We are still guaranteed that
+      // the object has a simple shape, and that the key is a name.
+      LookupIterator it(from, next_key, from,
+                        LookupIterator::OWN_SKIP_INTERCEPTOR);
+      if (!it.IsFound()) continue;
+      DCHECK(it.state() == LookupIterator::DATA ||
+             it.state() == LookupIterator::ACCESSOR);
+      if (!it.IsEnumerable()) continue;
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+    }
+
+    if (use_set) {
+      LookupIterator it(target, next_key, target);
+      bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
+      Maybe<bool> result = Object::SetProperty(
+          &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+      if (result.IsNothing()) return result;
+      if (stable && call_to_js) stable = from->map() == *map;
+    } else {
+      if (excluded_properties != nullptr &&
+          HasExcludedProperty(excluded_properties, next_key)) {
+        continue;
+      }
+
+      // 4a ii 2. Perform ? CreateDataProperty(target, nextKey, propValue).
+      bool success;
+      LookupIterator it = LookupIterator::PropertyOrElement(
+          isolate, target, next_key, &success, LookupIterator::OWN);
+      CHECK(success);
+      CHECK(
+          JSObject::CreateDataProperty(&it, prop_value, Object::THROW_ON_ERROR)
+              .FromJust());
+    }
+  }
+
+  return Just(true);
+}
+}  // namespace
+
+// static
+Maybe<bool> JSReceiver::SetOrCopyDataProperties(
+    Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
+    const ScopedVector<Handle<Object>>* excluded_properties, bool use_set) {
+  Maybe<bool> fast_assign =
+      FastAssign(target, source, excluded_properties, use_set);
+  if (fast_assign.IsNothing()) return Nothing<bool>();
+  if (fast_assign.FromJust()) return Just(true);
+
+  Handle<JSReceiver> from = Object::ToObject(isolate, source).ToHandleChecked();
+  // 3b. Let keys be ? from.[[OwnPropertyKeys]]().
+  Handle<FixedArray> keys;
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+      isolate, keys,
+      KeyAccumulator::GetKeys(from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+                              GetKeysConversion::kKeepNumbers),
+      Nothing<bool>());
+
+  // 4. Repeat for each element nextKey of keys in List order,
+  for (int j = 0; j < keys->length(); ++j) {
+    Handle<Object> next_key(keys->get(j), isolate);
+    // 4a i. Let desc be ? from.[[GetOwnProperty]](nextKey).
+    PropertyDescriptor desc;
+    Maybe<bool> found =
+        JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
+    if (found.IsNothing()) return Nothing<bool>();
+    // 4a ii. If desc is not undefined and desc.[[Enumerable]] is true, then
+    if (found.FromJust() && desc.enumerable()) {
+      // 4a ii 1. Let propValue be ? Get(from, nextKey).
+      Handle<Object> prop_value;
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate, prop_value,
+          Runtime::GetObjectProperty(isolate, from, next_key), Nothing<bool>());
+
+      if (use_set) {
+        // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
+        Handle<Object> status;
+        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+            isolate, status, Runtime::SetObjectProperty(
+                                 isolate, target, next_key, prop_value, STRICT),
+            Nothing<bool>());
+      } else {
+        if (excluded_properties != nullptr &&
+            HasExcludedProperty(excluded_properties, next_key)) {
+          continue;
+        }
+
+        // 4a ii 2. Perform ! CreateDataProperty(target, nextKey, propValue).
+        bool success;
+        LookupIterator it = LookupIterator::PropertyOrElement(
+            isolate, target, next_key, &success, LookupIterator::OWN);
+        CHECK(success);
+        CHECK(JSObject::CreateDataProperty(&it, prop_value,
+                                           Object::THROW_ON_ERROR)
+                  .FromJust());
+      }
+    }
+  }
+
+  return Just(true);
+}
+
 Map* Object::GetPrototypeChainRootMap(Isolate* isolate) {
   DisallowHeapAllocation no_alloc;
   if (IsSmi()) {
@@ -1944,8 +2154,8 @@
     return native_context->number_function()->initial_map();
   }
 
-  // The object is either a number, a string, a symbol, a boolean, a SIMD value,
-  // a real JS object, or a Harmony proxy.
+  // The object is either a number, a string, a symbol, a boolean, a real JS
+  // object, or a Harmony proxy.
   HeapObject* heap_object = HeapObject::cast(this);
   return heap_object->map()->GetPrototypeChainRootMap(isolate);
 }
@@ -1971,8 +2181,8 @@
 // objects.  This avoids a double lookup in the cases where we know we will
 // add the hash to the JSObject if it does not already exist.
 Object* GetSimpleHash(Object* object) {
-  // The object is either a Smi, a HeapNumber, a name, an odd-ball,
-  // a SIMD value type, a real JS object, or a Harmony proxy.
+  // The object is either a Smi, a HeapNumber, a name, an odd-ball, a real JS
+  // object, or a Harmony proxy.
   if (object->IsSmi()) {
     uint32_t hash =
         ComputeIntegerHash(Smi::cast(object)->value(), kZeroHashSeed);
@@ -1996,10 +2206,6 @@
     uint32_t hash = Oddball::cast(object)->to_string()->Hash();
     return Smi::FromInt(hash);
   }
-  if (object->IsSimd128Value()) {
-    uint32_t hash = Simd128Value::cast(object)->Hash();
-    return Smi::FromInt(hash & Smi::kMaxValue);
-  }
   DCHECK(object->IsJSReceiver());
   // Simply return the receiver as it is guaranteed to not be a SMI.
   return object;
@@ -2046,23 +2252,6 @@
   if (IsString() && other->IsString()) {
     return String::cast(this)->Equals(String::cast(other));
   }
-  if (IsFloat32x4() && other->IsFloat32x4()) {
-    Float32x4* a = Float32x4::cast(this);
-    Float32x4* b = Float32x4::cast(other);
-    for (int i = 0; i < 4; i++) {
-      float x = a->get_lane(i);
-      float y = b->get_lane(i);
-      // Implements the ES5 SameValue operation for floating point types.
-      // http://www.ecma-international.org/ecma-262/6.0/#sec-samevalue
-      if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
-      if (std::signbit(x) != std::signbit(y)) return false;
-    }
-    return true;
-  } else if (IsSimd128Value() && other->IsSimd128Value()) {
-    Simd128Value* a = Simd128Value::cast(this);
-    Simd128Value* b = Simd128Value::cast(other);
-    return a->map() == b->map() && a->BitwiseEquals(b);
-  }
   return false;
 }
 
@@ -2082,23 +2271,6 @@
   if (IsString() && other->IsString()) {
     return String::cast(this)->Equals(String::cast(other));
   }
-  if (IsFloat32x4() && other->IsFloat32x4()) {
-    Float32x4* a = Float32x4::cast(this);
-    Float32x4* b = Float32x4::cast(other);
-    for (int i = 0; i < 4; i++) {
-      float x = a->get_lane(i);
-      float y = b->get_lane(i);
-      // Implements the ES6 SameValueZero operation for floating point types.
-      // http://www.ecma-international.org/ecma-262/6.0/#sec-samevaluezero
-      if (x != y && !(std::isnan(x) && std::isnan(y))) return false;
-      // SameValueZero doesn't distinguish between 0 and -0.
-    }
-    return true;
-  } else if (IsSimd128Value() && other->IsSimd128Value()) {
-    Simd128Value* a = Simd128Value::cast(this);
-    Simd128Value* b = Simd128Value::cast(other);
-    return a->map() == b->map() && a->BitwiseEquals(b);
-  }
   return false;
 }
 
@@ -2154,6 +2326,40 @@
   }
 }
 
+bool Object::IterationHasObservableEffects() {
+  // Check that this object is an array.
+  if (!IsJSArray()) return true;
+  JSArray* spread_array = JSArray::cast(this);
+  Isolate* isolate = spread_array->GetIsolate();
+
+  // Check that we have the original ArrayPrototype.
+  JSObject* array_proto = JSObject::cast(spread_array->map()->prototype());
+  if (!isolate->is_initial_array_prototype(array_proto)) return true;
+
+  // Check that the ArrayPrototype hasn't been modified in a way that would
+  // affect iteration.
+  if (!isolate->IsArrayIteratorLookupChainIntact()) return true;
+
+  // Check that the map of the initial array iterator hasn't changed.
+  Map* iterator_map = isolate->initial_array_iterator_prototype()->map();
+  if (!isolate->is_initial_array_iterator_prototype_map(iterator_map)) {
+    return true;
+  }
+
+  // For FastPacked kinds, iteration will have the same effect as simply
+  // accessing each property in order.
+  ElementsKind array_kind = spread_array->GetElementsKind();
+  if (IsFastPackedElementsKind(array_kind)) return false;
+
+  // For FastHoley kinds, an element access on a hole would cause a lookup on
+  // the prototype. This could have different results if the prototype has been
+  // changed.
+  if (IsFastHoleyElementsKind(array_kind) &&
+      isolate->IsFastArrayConstructorPrototypeChainIntact()) {
+    return false;
+  }
+  return true;
+}
 
 void Object::ShortPrint(FILE* out) {
   OFStream os(out);
@@ -2182,9 +2388,6 @@
   return os;
 }
 
-// Declaration of the static Smi::kZero constant.
-Smi* const Smi::kZero(nullptr);
-
 void Smi::SmiPrint(std::ostream& os) const {  // NOLINT
   os << value();
 }
@@ -2219,7 +2422,16 @@
   DCHECK(cons->second()->length() != 0);
 
   // TurboFan can create cons strings with empty first parts.
-  if (cons->first()->length() == 0) return handle(cons->second());
+  while (cons->first()->length() == 0) {
+    // We do not want to call this function recursively. Therefore we call
+    // String::Flatten only in those cases where String::SlowFlatten is not
+    // called again.
+    if (cons->second()->IsConsString() && !cons->second()->IsFlat()) {
+      cons = handle(ConsString::cast(cons->second()));
+    } else {
+      return String::Flatten(handle(cons->second()));
+    }
+  }
 
   DCHECK(AllowHeapAllocation::IsAllowed());
   Isolate* isolate = cons->GetIsolate();
@@ -2270,7 +2482,7 @@
   Heap* heap = GetHeap();
   bool is_one_byte = this->IsOneByteRepresentation();
   bool is_internalized = this->IsInternalizedString();
-  bool has_pointers = this->IsConsString() || this->IsSlicedString();
+  bool has_pointers = StringShape(this).IsIndirect();
 
   // Morph the string to an external string by replacing the map and
   // reinitializing the fields.  This won't work if the space the existing
@@ -2311,7 +2523,7 @@
   self->set_resource(resource);
   if (is_internalized) self->Hash();  // Force regeneration of the hash value.
 
-  heap->AdjustLiveBytes(this, new_size - size, Heap::CONCURRENT_TO_SWEEPER);
+  heap->AdjustLiveBytes(this, new_size - size);
   return true;
 }
 
@@ -2342,7 +2554,7 @@
   if (size < ExternalString::kShortSize) return false;
   Heap* heap = GetHeap();
   bool is_internalized = this->IsInternalizedString();
-  bool has_pointers = this->IsConsString() || this->IsSlicedString();
+  bool has_pointers = StringShape(this).IsIndirect();
 
   // Morph the string to an external string by replacing the map and
   // reinitializing the fields.  This won't work if the space the existing
@@ -2377,7 +2589,7 @@
   self->set_resource(resource);
   if (is_internalized) self->Hash();  // Force regeneration of the hash value.
 
-  heap->AdjustLiveBytes(this, new_size - size, Heap::CONCURRENT_TO_SWEEPER);
+  heap->AdjustLiveBytes(this, new_size - size);
   return true;
 }
 
@@ -2619,10 +2831,10 @@
 
 void Map::PrintGeneralization(
     FILE* file, const char* reason, int modify_index, int split,
-    int descriptors, bool constant_to_field, Representation old_representation,
-    Representation new_representation, MaybeHandle<FieldType> old_field_type,
-    MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
-    MaybeHandle<Object> new_value) {
+    int descriptors, bool descriptor_to_field,
+    Representation old_representation, Representation new_representation,
+    MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
+    MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) {
   OFStream os(file);
   os << "[generalizing]";
   Name* name = instance_descriptors()->GetKey(modify_index);
@@ -2632,7 +2844,7 @@
     os << "{symbol " << static_cast<void*>(name) << "}";
   }
   os << ":";
-  if (constant_to_field) {
+  if (descriptor_to_field) {
     os << "c";
   } else {
     os << old_representation.Mnemonic() << "{";
@@ -2673,8 +2885,8 @@
     if (!o_r.Equals(n_r)) {
       String::cast(o->GetKey(i))->PrintOn(file);
       PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
-    } else if (o->GetDetails(i).type() == DATA_CONSTANT &&
-               n->GetDetails(i).type() == DATA) {
+    } else if (o->GetDetails(i).location() == kDescriptor &&
+               n->GetDetails(i).location() == kField) {
       Name* name = o->GetKey(i);
       if (name->IsString()) {
         String::cast(name)->PrintOn(file);
@@ -2809,17 +3021,6 @@
       os << '>';
       break;
     }
-    case SIMD128_VALUE_TYPE: {
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
-  if (Is##Type()) {                                           \
-    os << "<" #Type ">";                                      \
-    break;                                                    \
-  }
-      SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-      UNREACHABLE();
-      break;
-    }
     case JS_PROXY_TYPE:
       os << "<JSProxy>";
       break;
@@ -2910,101 +3111,6 @@
 #define READ_BYTE_FIELD(p, offset) \
   (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
 
-
-// static
-Handle<String> Simd128Value::ToString(Handle<Simd128Value> input) {
-#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
-  if (input->Is##Type()) return Type::ToString(Handle<Type>::cast(input));
-  SIMD128_TYPES(SIMD128_TYPE)
-#undef SIMD128_TYPE
-  UNREACHABLE();
-  return Handle<String>::null();
-}
-
-
-// static
-Handle<String> Float32x4::ToString(Handle<Float32x4> input) {
-  Isolate* const isolate = input->GetIsolate();
-  char arr[100];
-  Vector<char> buffer(arr, arraysize(arr));
-  std::ostringstream os;
-  os << "SIMD.Float32x4("
-     << std::string(DoubleToCString(input->get_lane(0), buffer)) << ", "
-     << std::string(DoubleToCString(input->get_lane(1), buffer)) << ", "
-     << std::string(DoubleToCString(input->get_lane(2), buffer)) << ", "
-     << std::string(DoubleToCString(input->get_lane(3), buffer)) << ")";
-  return isolate->factory()->NewStringFromAsciiChecked(os.str().c_str());
-}
-
-
-#define SIMD128_BOOL_TO_STRING(Type, lane_count)                            \
-  Handle<String> Type::ToString(Handle<Type> input) {                       \
-    Isolate* const isolate = input->GetIsolate();                           \
-    std::ostringstream os;                                                  \
-    os << "SIMD." #Type "(";                                                \
-    os << (input->get_lane(0) ? "true" : "false");                          \
-    for (int i = 1; i < lane_count; i++) {                                  \
-      os << ", " << (input->get_lane(i) ? "true" : "false");                \
-    }                                                                       \
-    os << ")";                                                              \
-    return isolate->factory()->NewStringFromAsciiChecked(os.str().c_str()); \
-  }
-SIMD128_BOOL_TO_STRING(Bool32x4, 4)
-SIMD128_BOOL_TO_STRING(Bool16x8, 8)
-SIMD128_BOOL_TO_STRING(Bool8x16, 16)
-#undef SIMD128_BOOL_TO_STRING
-
-
-#define SIMD128_INT_TO_STRING(Type, lane_count)                             \
-  Handle<String> Type::ToString(Handle<Type> input) {                       \
-    Isolate* const isolate = input->GetIsolate();                           \
-    char arr[100];                                                          \
-    Vector<char> buffer(arr, arraysize(arr));                               \
-    std::ostringstream os;                                                  \
-    os << "SIMD." #Type "(";                                                \
-    os << IntToCString(input->get_lane(0), buffer);                         \
-    for (int i = 1; i < lane_count; i++) {                                  \
-      os << ", " << IntToCString(input->get_lane(i), buffer);               \
-    }                                                                       \
-    os << ")";                                                              \
-    return isolate->factory()->NewStringFromAsciiChecked(os.str().c_str()); \
-  }
-SIMD128_INT_TO_STRING(Int32x4, 4)
-SIMD128_INT_TO_STRING(Uint32x4, 4)
-SIMD128_INT_TO_STRING(Int16x8, 8)
-SIMD128_INT_TO_STRING(Uint16x8, 8)
-SIMD128_INT_TO_STRING(Int8x16, 16)
-SIMD128_INT_TO_STRING(Uint8x16, 16)
-#undef SIMD128_INT_TO_STRING
-
-
-bool Simd128Value::BitwiseEquals(const Simd128Value* other) const {
-  return READ_INT64_FIELD(this, kValueOffset) ==
-             READ_INT64_FIELD(other, kValueOffset) &&
-         READ_INT64_FIELD(this, kValueOffset + kInt64Size) ==
-             READ_INT64_FIELD(other, kValueOffset + kInt64Size);
-}
-
-
-uint32_t Simd128Value::Hash() const {
-  uint32_t seed = v8::internal::kZeroHashSeed;
-  uint32_t hash;
-  hash = ComputeIntegerHash(READ_INT32_FIELD(this, kValueOffset), seed);
-  hash = ComputeIntegerHash(
-      READ_INT32_FIELD(this, kValueOffset + 1 * kInt32Size), hash * 31);
-  hash = ComputeIntegerHash(
-      READ_INT32_FIELD(this, kValueOffset + 2 * kInt32Size), hash * 31);
-  hash = ComputeIntegerHash(
-      READ_INT32_FIELD(this, kValueOffset + 3 * kInt32Size), hash * 31);
-  return hash;
-}
-
-
-void Simd128Value::CopyBits(void* destination) const {
-  memcpy(destination, &READ_BYTE_FIELD(this, kValueOffset), kSimd128Size);
-}
-
-
 String* JSReceiver::class_name() {
   if (IsFunction()) {
     return GetHeap()->Function_string();
@@ -3064,8 +3170,7 @@
              : result;
 }
 
-
-Context* JSReceiver::GetCreationContext() {
+Handle<Context> JSReceiver::GetCreationContext() {
   JSReceiver* receiver = this;
   while (receiver->IsJSBoundFunction()) {
     receiver = JSBoundFunction::cast(receiver)->bound_target_function();
@@ -3081,17 +3186,29 @@
     function = JSFunction::cast(receiver);
   }
 
-  return function->context()->native_context();
+  return function->has_context()
+             ? Handle<Context>(function->context()->native_context())
+             : Handle<Context>::null();
 }
 
-static Handle<Object> WrapType(Handle<FieldType> type) {
+Handle<Object> Map::WrapFieldType(Handle<FieldType> type) {
   if (type->IsClass()) return Map::WeakCellForMap(type->AsClass());
   return type;
 }
 
+FieldType* Map::UnwrapFieldType(Object* wrapped_type) {
+  Object* value = wrapped_type;
+  if (value->IsWeakCell()) {
+    if (WeakCell::cast(value)->cleared()) return FieldType::None();
+    value = WeakCell::cast(value)->value();
+  }
+  return FieldType::cast(value);
+}
+
 MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
                                     Handle<FieldType> type,
                                     PropertyAttributes attributes,
+                                    PropertyConstness constness,
                                     Representation representation,
                                     TransitionFlag flag) {
   DCHECK(DescriptorArray::kNotFound ==
@@ -3113,11 +3230,12 @@
     type = FieldType::Any(isolate);
   }
 
-  Handle<Object> wrapped_type(WrapType(type));
+  Handle<Object> wrapped_type(WrapFieldType(type));
 
-  DataDescriptor new_field_desc(name, index, wrapped_type, attributes,
-                                representation);
-  Handle<Map> new_map = Map::CopyAddDescriptor(map, &new_field_desc, flag);
+  DCHECK_IMPLIES(!FLAG_track_constant_fields, constness == kMutable);
+  Descriptor d = Descriptor::DataField(name, index, attributes, constness,
+                                       representation, wrapped_type);
+  Handle<Map> new_map = Map::CopyAddDescriptor(map, &d, flag);
   int unused_property_fields = new_map->unused_property_fields() - 1;
   if (unused_property_fields < 0) {
     unused_property_fields += JSObject::kFieldsAdded;
@@ -3137,9 +3255,18 @@
     return MaybeHandle<Map>();
   }
 
-  // Allocate new instance descriptors with (name, constant) added.
-  DataConstantDescriptor new_constant_desc(name, constant, attributes);
-  return Map::CopyAddDescriptor(map, &new_constant_desc, flag);
+  if (FLAG_track_constant_fields) {
+    Isolate* isolate = map->GetIsolate();
+    Representation representation = constant->OptimalRepresentation();
+    Handle<FieldType> type = constant->OptimalType(isolate, representation);
+    return CopyWithField(map, name, type, attributes, kConst, representation,
+                         flag);
+  } else {
+    // Allocate new instance descriptors with (name, constant) added.
+    Descriptor d = Descriptor::DataConstant(name, 0, constant, attributes);
+    Handle<Map> new_map = Map::CopyAddDescriptor(map, &d, flag);
+    return new_map;
+  }
 }
 
 const char* Representation::Mnemonic() const {
@@ -3157,6 +3284,34 @@
   }
 }
 
+bool Map::TransitionRemovesTaggedField(Map* target) {
+  int inobject = GetInObjectProperties();
+  int target_inobject = target->GetInObjectProperties();
+  for (int i = target_inobject; i < inobject; i++) {
+    FieldIndex index = FieldIndex::ForPropertyIndex(this, i);
+    if (!IsUnboxedDoubleField(index)) return true;
+  }
+  return false;
+}
+
+bool Map::TransitionChangesTaggedFieldToUntaggedField(Map* target) {
+  int inobject = GetInObjectProperties();
+  int target_inobject = target->GetInObjectProperties();
+  int limit = Min(inobject, target_inobject);
+  for (int i = 0; i < limit; i++) {
+    FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
+    if (!IsUnboxedDoubleField(index) && target->IsUnboxedDoubleField(index)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+bool Map::TransitionRequiresSynchronizationWithGC(Map* target) {
+  return TransitionRemovesTaggedField(target) ||
+         TransitionChangesTaggedFieldToUntaggedField(target);
+}
+
 bool Map::InstancesNeedRewriting(Map* target) {
   int target_number_of_fields = target->NumberOfFields();
   int target_inobject = target->GetInObjectProperties();
@@ -3276,7 +3431,7 @@
           FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
       DCHECK(details.representation().IsDouble());
       DCHECK(!new_map->IsUnboxedDoubleField(index));
-      Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+      Handle<Object> value = isolate->factory()->NewMutableHeapNumber();
       object->RawFastPropertyAtPut(index, *value);
       object->synchronized_set_map(*new_map);
       return;
@@ -3292,11 +3447,12 @@
     // Properly initialize newly added property.
     Handle<Object> value;
     if (details.representation().IsDouble()) {
-      value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+      value = isolate->factory()->NewMutableHeapNumber();
     } else {
       value = isolate->factory()->uninitialized_value();
     }
-    DCHECK_EQ(DATA, details.type());
+    DCHECK_EQ(kField, details.location());
+    DCHECK_EQ(kData, details.kind());
     int target_index = details.field_index() - new_map->GetInObjectProperties();
     DCHECK(target_index >= 0);  // Must be a backing store index.
     new_storage->set(target_index, *value);
@@ -3339,36 +3495,40 @@
 
   for (int i = 0; i < old_nof; i++) {
     PropertyDetails details = new_descriptors->GetDetails(i);
-    if (details.type() != DATA) continue;
+    if (details.location() != kField) continue;
+    DCHECK_EQ(kData, details.kind());
     PropertyDetails old_details = old_descriptors->GetDetails(i);
     Representation old_representation = old_details.representation();
     Representation representation = details.representation();
     Handle<Object> value;
-    if (old_details.type() == ACCESSOR_CONSTANT) {
-      // In case of kAccessor -> kData property reconfiguration, the property
-      // must already be prepared for data or certain type.
-      DCHECK(!details.representation().IsNone());
-      if (details.representation().IsDouble()) {
-        value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+    if (old_details.location() == kDescriptor) {
+      if (old_details.kind() == kAccessor) {
+        // In case of kAccessor -> kData property reconfiguration, the property
+        // must already be prepared for data of certain type.
+        DCHECK(!details.representation().IsNone());
+        if (details.representation().IsDouble()) {
+          value = isolate->factory()->NewMutableHeapNumber();
+        } else {
+          value = isolate->factory()->uninitialized_value();
+        }
       } else {
-        value = isolate->factory()->uninitialized_value();
+        DCHECK_EQ(kData, old_details.kind());
+        value = handle(old_descriptors->GetValue(i), isolate);
+        DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
       }
-    } else if (old_details.type() == DATA_CONSTANT) {
-      value = handle(old_descriptors->GetValue(i), isolate);
-      DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
     } else {
+      DCHECK_EQ(kField, old_details.location());
       FieldIndex index = FieldIndex::ForDescriptor(*old_map, i);
       if (object->IsUnboxedDoubleField(index)) {
-        double old = object->RawFastDoublePropertyAt(index);
-        value = isolate->factory()->NewHeapNumber(
-            old, representation.IsDouble() ? MUTABLE : IMMUTABLE);
+        uint64_t old_bits = object->RawFastDoublePropertyAsBitsAt(index);
+        value = isolate->factory()->NewHeapNumberFromBits(
+            old_bits, representation.IsDouble() ? MUTABLE : IMMUTABLE);
 
       } else {
         value = handle(object->RawFastPropertyAt(index), isolate);
         if (!old_representation.IsDouble() && representation.IsDouble()) {
-          if (old_representation.IsNone()) {
-            value = handle(Smi::kZero, isolate);
-          }
+          DCHECK_IMPLIES(old_representation.IsNone(),
+                         value->IsUninitialized(isolate));
           value = Object::NewStorageFor(isolate, value, representation);
         } else if (old_representation.IsDouble() &&
                    !representation.IsDouble()) {
@@ -3384,10 +3544,11 @@
 
   for (int i = old_nof; i < new_nof; i++) {
     PropertyDetails details = new_descriptors->GetDetails(i);
-    if (details.type() != DATA) continue;
+    if (details.location() != kField) continue;
+    DCHECK_EQ(kData, details.kind());
     Handle<Object> value;
     if (details.representation().IsDouble()) {
-      value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+      value = isolate->factory()->NewMutableHeapNumber();
     } else {
       value = isolate->factory()->uninitialized_value();
     }
@@ -3401,6 +3562,8 @@
 
   Heap* heap = isolate->heap();
 
+  heap->NotifyObjectLayoutChange(*object, no_allocation);
+
   // Copy (real) inobject properties. If necessary, stop at number_of_fields to
   // avoid overwriting |one_pointer_filler_map|.
   int limit = Min(inobject, number_of_fields);
@@ -3411,12 +3574,16 @@
     // yet.
     if (new_map->IsUnboxedDoubleField(index)) {
       DCHECK(value->IsMutableHeapNumber());
-      object->RawFastDoublePropertyAtPut(index,
-                                         HeapNumber::cast(value)->value());
+      // Ensure that all bits of the double value are preserved.
+      object->RawFastDoublePropertyAsBitsAtPut(
+          index, HeapNumber::cast(value)->value_as_bits());
       if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
         // Transition from tagged to untagged slot.
         heap->ClearRecordedSlot(*object,
                                 HeapObject::RawField(*object, index.offset()));
+      } else {
+        DCHECK(!heap->HasRecordedSlot(
+            *object, HeapObject::RawField(*object, index.offset())));
       }
     } else {
       object->RawFastPropertyAtPut(index, value);
@@ -3427,7 +3594,7 @@
   // If there are properties in the new backing store, trim it to the correct
   // size and install the backing store into the object.
   if (external > 0) {
-    heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*array, inobject);
+    heap->RightTrimFixedArray(*array, inobject);
     object->set_properties(*array);
   }
 
@@ -3440,8 +3607,7 @@
     Address address = object->address();
     heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta,
                                ClearRecordedSlots::kYes);
-    heap->AdjustLiveBytes(*object, -instance_size_delta,
-                          Heap::CONCURRENT_TO_SWEEPER);
+    heap->AdjustLiveBytes(*object, -instance_size_delta);
   }
 
   // We are storing the new map using release store after creating a filler for
@@ -3476,17 +3642,10 @@
   for (int i = 0; i < real_size; i++) {
     PropertyDetails details = descs->GetDetails(i);
     Handle<Name> key(descs->GetKey(i));
-    switch (details.type()) {
-      case DATA_CONSTANT: {
-        Handle<Object> value(descs->GetConstant(i), isolate);
-        PropertyDetails d(details.attributes(), DATA, i + 1,
-                          PropertyCellType::kNoCell);
-        dictionary = NameDictionary::Add(dictionary, key, value, d);
-        break;
-      }
-      case DATA: {
-        FieldIndex index = FieldIndex::ForDescriptor(*map, i);
-        Handle<Object> value;
+    Handle<Object> value;
+    if (details.location() == kField) {
+      FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+      if (details.kind() == kData) {
         if (object->IsUnboxedDoubleField(index)) {
           double old_value = object->RawFastDoublePropertyAt(index);
           value = isolate->factory()->NewHeapNumber(old_value);
@@ -3498,27 +3657,19 @@
             value = isolate->factory()->NewHeapNumber(old->value());
           }
         }
-        PropertyDetails d(details.attributes(), DATA, i + 1,
-                          PropertyCellType::kNoCell);
-        dictionary = NameDictionary::Add(dictionary, key, value, d);
-        break;
+      } else {
+        DCHECK_EQ(kAccessor, details.kind());
+        value = handle(object->RawFastPropertyAt(index), isolate);
       }
-      case ACCESSOR: {
-        FieldIndex index = FieldIndex::ForDescriptor(*map, i);
-        Handle<Object> value(object->RawFastPropertyAt(index), isolate);
-        PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
-                          PropertyCellType::kNoCell);
-        dictionary = NameDictionary::Add(dictionary, key, value, d);
-        break;
-      }
-      case ACCESSOR_CONSTANT: {
-        Handle<Object> value(descs->GetCallbacksObject(i), isolate);
-        PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
-                          PropertyCellType::kNoCell);
-        dictionary = NameDictionary::Add(dictionary, key, value, d);
-        break;
-      }
+
+    } else {
+      DCHECK_EQ(kDescriptor, details.location());
+      value = handle(descs->GetValue(i), isolate);
     }
+    DCHECK(!value.is_null());
+    PropertyDetails d(details.kind(), details.attributes(), i + 1,
+                      PropertyCellType::kNoCell);
+    dictionary = NameDictionary::Add(dictionary, key, value, d);
   }
 
   // Copy the next enumeration index from instance descriptor.
@@ -3527,17 +3678,18 @@
   // From here on we cannot fail and we shouldn't GC anymore.
   DisallowHeapAllocation no_allocation;
 
+  Heap* heap = isolate->heap();
+  heap->NotifyObjectLayoutChange(*object, no_allocation);
+
   // Resize the object in the heap if necessary.
   int new_instance_size = new_map->instance_size();
   int instance_size_delta = map->instance_size() - new_instance_size;
   DCHECK(instance_size_delta >= 0);
 
   if (instance_size_delta > 0) {
-    Heap* heap = isolate->heap();
     heap->CreateFillerObjectAt(object->address() + new_instance_size,
                                instance_size_delta, ClearRecordedSlots::kYes);
-    heap->AdjustLiveBytes(*object, -instance_size_delta,
-                          Heap::CONCURRENT_TO_SWEEPER);
+    heap->AdjustLiveBytes(*object, -instance_size_delta);
   }
 
   // We are storing the new map using release store after creating a filler for
@@ -3647,22 +3799,31 @@
   return result;
 }
 
-Handle<Map> Map::CopyGeneralizeAllRepresentations(
-    Handle<Map> map, ElementsKind elements_kind, int modify_index,
-    StoreMode store_mode, PropertyKind kind, PropertyAttributes attributes,
-    const char* reason) {
+void DescriptorArray::GeneralizeAllFields() {
+  int length = number_of_descriptors();
+  for (int i = 0; i < length; i++) {
+    PropertyDetails details = GetDetails(i);
+    details = details.CopyWithRepresentation(Representation::Tagged());
+    if (details.location() == kField) {
+      DCHECK_EQ(kData, details.kind());
+      details = details.CopyWithConstness(kMutable);
+      SetValue(i, FieldType::Any());
+    }
+    set(ToDetailsIndex(i), details.AsSmi());
+  }
+}
+
+Handle<Map> Map::CopyGeneralizeAllFields(Handle<Map> map,
+                                         ElementsKind elements_kind,
+                                         int modify_index, PropertyKind kind,
+                                         PropertyAttributes attributes,
+                                         const char* reason) {
   Isolate* isolate = map->GetIsolate();
   Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
   Handle<DescriptorArray> descriptors =
       DescriptorArray::CopyUpTo(old_descriptors, number_of_own_descriptors);
-
-  for (int i = 0; i < number_of_own_descriptors; i++) {
-    descriptors->SetRepresentation(i, Representation::Tagged());
-    if (descriptors->GetDetails(i).type() == DATA) {
-      descriptors->SetValue(i, FieldType::Any());
-    }
-  }
+  descriptors->GeneralizeAllFields();
 
   Handle<LayoutDescriptor> new_layout_descriptor(
       LayoutDescriptor::FastPointerLayout(), isolate);
@@ -3673,14 +3834,16 @@
   // Unless the instance is being migrated, ensure that modify_index is a field.
   if (modify_index >= 0) {
     PropertyDetails details = descriptors->GetDetails(modify_index);
-    if (store_mode == FORCE_FIELD &&
-        (details.type() != DATA || details.attributes() != attributes)) {
-      int field_index = details.type() == DATA ? details.field_index()
-                                               : new_map->NumberOfFields();
-      DataDescriptor d(handle(descriptors->GetKey(modify_index), isolate),
-                       field_index, attributes, Representation::Tagged());
+    if (details.constness() != kMutable || details.location() != kField ||
+        details.attributes() != attributes) {
+      int field_index = details.location() == kField
+                            ? details.field_index()
+                            : new_map->NumberOfFields();
+      Descriptor d = Descriptor::DataField(
+          handle(descriptors->GetKey(modify_index), isolate), field_index,
+          attributes, Representation::Tagged());
       descriptors->Replace(modify_index, &d);
-      if (details.type() != DATA) {
+      if (details.location() != kField) {
         int unused_property_fields = new_map->unused_property_fields() - 1;
         if (unused_property_fields < 0) {
           unused_property_fields += JSObject::kFieldsAdded;
@@ -3693,14 +3856,13 @@
 
     if (FLAG_trace_generalization) {
       MaybeHandle<FieldType> field_type = FieldType::None(isolate);
-      if (details.type() == DATA) {
+      if (details.location() == kField) {
         field_type = handle(
             map->instance_descriptors()->GetFieldType(modify_index), isolate);
       }
       map->PrintGeneralization(
           stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(),
-          new_map->NumberOfOwnDescriptors(),
-          details.type() == DATA_CONSTANT && store_mode == FORCE_FIELD,
+          new_map->NumberOfOwnDescriptors(), details.location() == kDescriptor,
           details.representation(), Representation::Tagged(), field_type,
           MaybeHandle<Object>(), FieldType::Any(isolate),
           MaybeHandle<Object>());
@@ -3725,13 +3887,6 @@
 }
 
 
-static inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
-  if (obj1 == obj2) return true;  // Valid for both kData and kAccessor kinds.
-  // TODO(ishell): compare AccessorPairs.
-  return false;
-}
-
-
 // Installs |new_descriptors| over the current instance_descriptors to ensure
 // proper sharing of descriptor arrays.
 void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
@@ -3774,50 +3929,9 @@
 }
 
 
-Map* Map::FindLastMatchMap(int verbatim,
-                           int length,
-                           DescriptorArray* descriptors) {
-  DisallowHeapAllocation no_allocation;
-
-  // This can only be called on roots of transition trees.
-  DCHECK_EQ(verbatim, NumberOfOwnDescriptors());
-
-  Map* current = this;
-
-  for (int i = verbatim; i < length; i++) {
-    Name* name = descriptors->GetKey(i);
-    PropertyDetails details = descriptors->GetDetails(i);
-    Map* next = TransitionArray::SearchTransition(current, details.kind(), name,
-                                                  details.attributes());
-    if (next == NULL) break;
-    DescriptorArray* next_descriptors = next->instance_descriptors();
-
-    PropertyDetails next_details = next_descriptors->GetDetails(i);
-    DCHECK_EQ(details.kind(), next_details.kind());
-    DCHECK_EQ(details.attributes(), next_details.attributes());
-    if (details.location() != next_details.location()) break;
-    if (!details.representation().Equals(next_details.representation())) break;
-
-    if (next_details.location() == kField) {
-      FieldType* next_field_type = next_descriptors->GetFieldType(i);
-      if (!descriptors->GetFieldType(i)->NowIs(next_field_type)) {
-        break;
-      }
-    } else {
-      if (!EqualImmutableValues(descriptors->GetValue(i),
-                                next_descriptors->GetValue(i))) {
-        break;
-      }
-    }
-    current = next;
-  }
-  return current;
-}
-
-
 Map* Map::FindFieldOwner(int descriptor) {
   DisallowHeapAllocation no_allocation;
-  DCHECK_EQ(DATA, instance_descriptors()->GetDetails(descriptor).type());
+  DCHECK_EQ(kField, instance_descriptors()->GetDetails(descriptor).location());
   Map* result = this;
   Isolate* isolate = GetIsolate();
   while (true) {
@@ -3830,15 +3944,16 @@
   return result;
 }
 
-
 void Map::UpdateFieldType(int descriptor, Handle<Name> name,
+                          PropertyConstness new_constness,
                           Representation new_representation,
                           Handle<Object> new_wrapped_type) {
   DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeakCell());
   // We store raw pointers in the queue, so no allocations are allowed.
   DisallowHeapAllocation no_allocation;
   PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
-  if (details.type() != DATA) return;
+  if (details.location() != kField) return;
+  DCHECK_EQ(kData, details.kind());
 
   Zone zone(GetIsolate()->allocator(), ZONE_NAME);
   ZoneQueue<Map*> backlog(&zone);
@@ -3857,15 +3972,19 @@
     DescriptorArray* descriptors = current->instance_descriptors();
     PropertyDetails details = descriptors->GetDetails(descriptor);
 
+    // Currently constness change implies map change.
+    DCHECK_EQ(new_constness, details.constness());
+
     // It is allowed to change representation here only from None to something.
     DCHECK(details.representation().Equals(new_representation) ||
            details.representation().IsNone());
 
     // Skip if already updated the shared descriptor.
     if (descriptors->GetValue(descriptor) != *new_wrapped_type) {
-      DataDescriptor d(name, descriptors->GetFieldIndex(descriptor),
-                       new_wrapped_type, details.attributes(),
-                       new_representation);
+      DCHECK_IMPLIES(!FLAG_track_constant_fields, new_constness == kMutable);
+      Descriptor d = Descriptor::DataField(
+          name, descriptors->GetFieldIndex(descriptor), details.attributes(),
+          new_constness, new_representation, new_wrapped_type);
       descriptors->Replace(descriptor, &d);
     }
   }
@@ -3895,25 +4014,28 @@
 
 
 // static
-void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
-                              Representation new_representation,
-                              Handle<FieldType> new_field_type) {
+void Map::GeneralizeField(Handle<Map> map, int modify_index,
+                          PropertyConstness new_constness,
+                          Representation new_representation,
+                          Handle<FieldType> new_field_type) {
   Isolate* isolate = map->GetIsolate();
 
   // Check if we actually need to generalize the field type at all.
   Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
-  Representation old_representation =
-      old_descriptors->GetDetails(modify_index).representation();
+  PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+  PropertyConstness old_constness = old_details.constness();
+  Representation old_representation = old_details.representation();
   Handle<FieldType> old_field_type(old_descriptors->GetFieldType(modify_index),
                                    isolate);
 
-  if (old_representation.Equals(new_representation) &&
+  if (old_constness == new_constness &&
+      old_representation.Equals(new_representation) &&
       !FieldTypeIsCleared(new_representation, *new_field_type) &&
       // Checking old_field_type for being cleared is not necessary because
       // the NowIs check below would fail anyway in that case.
       new_field_type->NowIs(old_field_type)) {
-    DCHECK(Map::GeneralizeFieldType(old_representation, old_field_type,
-                                    new_representation, new_field_type, isolate)
+    DCHECK(GeneralizeFieldType(old_representation, old_field_type,
+                               new_representation, new_field_type, isolate)
                ->NowIs(old_field_type));
     return;
   }
@@ -3931,9 +4053,9 @@
   PropertyDetails details = descriptors->GetDetails(modify_index);
   Handle<Name> name(descriptors->GetKey(modify_index));
 
-  Handle<Object> wrapped_type(WrapType(new_field_type));
-  field_owner->UpdateFieldType(modify_index, name, new_representation,
-                               wrapped_type);
+  Handle<Object> wrapped_type(WrapFieldType(new_field_type));
+  field_owner->UpdateFieldType(modify_index, name, new_constness,
+                               new_representation, wrapped_type);
   field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
       isolate, DependentCode::kFieldOwnerGroup);
 
@@ -3946,577 +4068,40 @@
   }
 }
 
-static inline Handle<FieldType> GetFieldType(
-    Isolate* isolate, Handle<DescriptorArray> descriptors, int descriptor,
-    PropertyLocation location, Representation representation) {
-#ifdef DEBUG
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-  DCHECK_EQ(kData, details.kind());
-  DCHECK_EQ(details.location(), location);
-#endif
-  if (location == kField) {
-    return handle(descriptors->GetFieldType(descriptor), isolate);
-  } else {
-    return descriptors->GetValue(descriptor)
-        ->OptimalType(isolate, representation);
-  }
+// TODO(ishell): remove.
+// static
+Handle<Map> Map::ReconfigureProperty(Handle<Map> map, int modify_index,
+                                     PropertyKind new_kind,
+                                     PropertyAttributes new_attributes,
+                                     Representation new_representation,
+                                     Handle<FieldType> new_field_type) {
+  DCHECK_EQ(kData, new_kind);  // Only kData case is supported.
+  MapUpdater mu(map->GetIsolate(), map);
+  return mu.ReconfigureToDataField(modify_index, new_attributes, kConst,
+                                   new_representation, new_field_type);
 }
 
-// Reconfigures elements kind to |new_elements_kind| and/or property at
-// |modify_index| with |new_kind|, |new_attributes|, |store_mode| and/or
-// |new_representation|/|new_field_type|.
-// If |modify_index| is negative then no properties are reconfigured but the
-// map is migrated to the up-to-date non-deprecated state.
-//
-// This method rewrites or completes the transition tree to reflect the new
-// change. To avoid high degrees over polymorphism, and to stabilize quickly,
-// on every rewrite the new type is deduced by merging the current type with
-// any potential new (partial) version of the type in the transition tree.
-// To do this, on each rewrite:
-// - Search the root of the transition tree using FindRootMap.
-// - Find/create a |root_map| with requested |new_elements_kind|.
-// - Find |target_map|, the newest matching version of this map using the
-//   virtually "enhanced" |old_map|'s descriptor array (i.e. whose entry at
-//   |modify_index| is considered to be of |new_kind| and having
-//   |new_attributes|) to walk the transition tree.
-// - Merge/generalize the "enhanced" descriptor array of the |old_map| and
-//   descriptor array of the |target_map|.
-// - Generalize the |modify_index| descriptor using |new_representation| and
-//   |new_field_type|.
-// - Walk the tree again starting from the root towards |target_map|. Stop at
-//   |split_map|, the first map who's descriptor array does not match the merged
-//   descriptor array.
-// - If |target_map| == |split_map|, |target_map| is in the expected state.
-//   Return it.
-// - Otherwise, invalidate the outdated transition target from |target_map|, and
-//   replace its transition tree with a new branch for the updated descriptors.
-Handle<Map> Map::Reconfigure(Handle<Map> old_map,
-                             ElementsKind new_elements_kind, int modify_index,
-                             PropertyKind new_kind,
-                             PropertyAttributes new_attributes,
-                             Representation new_representation,
-                             Handle<FieldType> new_field_type,
-                             StoreMode store_mode) {
-  DCHECK_NE(kAccessor, new_kind);  // TODO(ishell): not supported yet.
-  DCHECK(store_mode != FORCE_FIELD || modify_index >= 0);
-  Isolate* isolate = old_map->GetIsolate();
-
-  Handle<DescriptorArray> old_descriptors(
-      old_map->instance_descriptors(), isolate);
-  int old_nof = old_map->NumberOfOwnDescriptors();
-
-  // If it's just a representation generalization case (i.e. property kind and
-  // attributes stays unchanged) it's fine to transition from None to anything
-  // but double without any modification to the object, because the default
-  // uninitialized value for representation None can be overwritten by both
-  // smi and tagged values. Doubles, however, would require a box allocation.
-  if (modify_index >= 0 && !new_representation.IsNone() &&
-      !new_representation.IsDouble() &&
-      old_map->elements_kind() == new_elements_kind) {
-    PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
-    Representation old_representation = old_details.representation();
-
-    if (old_representation.IsNone()) {
-      DCHECK_EQ(new_kind, old_details.kind());
-      DCHECK_EQ(new_attributes, old_details.attributes());
-      DCHECK_EQ(DATA, old_details.type());
-      if (FLAG_trace_generalization) {
-        old_map->PrintGeneralization(
-            stdout, "uninitialized field", modify_index,
-            old_map->NumberOfOwnDescriptors(),
-            old_map->NumberOfOwnDescriptors(), false, old_representation,
-            new_representation,
-            handle(old_descriptors->GetFieldType(modify_index), isolate),
-            MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
-      }
-      Handle<Map> field_owner(old_map->FindFieldOwner(modify_index), isolate);
-
-      GeneralizeFieldType(field_owner, modify_index, new_representation,
-                          new_field_type);
-      DCHECK(old_descriptors->GetDetails(modify_index)
-                 .representation()
-                 .Equals(new_representation));
-      DCHECK(
-          old_descriptors->GetFieldType(modify_index)->NowIs(new_field_type));
-      return old_map;
-    }
-  }
-
-  // Check the state of the root map.
-  Handle<Map> root_map(old_map->FindRootMap(), isolate);
-  if (!old_map->EquivalentToForTransition(*root_map)) {
-    return CopyGeneralizeAllRepresentations(
-        old_map, new_elements_kind, modify_index, store_mode, new_kind,
-        new_attributes, "GenAll_NotEquivalent");
-  }
-
-  ElementsKind from_kind = root_map->elements_kind();
-  ElementsKind to_kind = new_elements_kind;
-  // TODO(ishell): Add a test for SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
-  if (from_kind != to_kind && to_kind != DICTIONARY_ELEMENTS &&
-      to_kind != SLOW_STRING_WRAPPER_ELEMENTS &&
-      to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
-      !(IsTransitionableFastElementsKind(from_kind) &&
-        IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
-    return CopyGeneralizeAllRepresentations(
-        old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
-        "GenAll_InvalidElementsTransition");
-  }
-  int root_nof = root_map->NumberOfOwnDescriptors();
-  if (modify_index >= 0 && modify_index < root_nof) {
-    PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
-    if (old_details.kind() != new_kind ||
-        old_details.attributes() != new_attributes) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
-          "GenAll_RootModification1");
-    }
-    if ((old_details.type() != DATA && store_mode == FORCE_FIELD) ||
-        (old_details.type() == DATA &&
-         (!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
-          !new_representation.fits_into(old_details.representation())))) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
-          "GenAll_RootModification2");
-    }
-  }
-
-  // From here on, use the map with correct elements kind as root map.
-  if (from_kind != to_kind) {
-    root_map = Map::AsElementsKind(root_map, to_kind);
-  }
-
-  Handle<Map> target_map = root_map;
-  for (int i = root_nof; i < old_nof; ++i) {
-    PropertyDetails old_details = old_descriptors->GetDetails(i);
-    PropertyKind next_kind;
-    PropertyLocation next_location;
-    PropertyAttributes next_attributes;
-    Representation next_representation;
-    bool property_kind_reconfiguration = false;
-
-    if (modify_index == i) {
-      DCHECK_EQ(FORCE_FIELD, store_mode);
-      property_kind_reconfiguration = old_details.kind() != new_kind;
-
-      next_kind = new_kind;
-      next_location = kField;
-      next_attributes = new_attributes;
-      // If property kind is not reconfigured merge the result with
-      // representation/field type from the old descriptor.
-      next_representation = new_representation;
-      if (!property_kind_reconfiguration) {
-        next_representation =
-            next_representation.generalize(old_details.representation());
-      }
-
-    } else {
-      next_kind = old_details.kind();
-      next_location = old_details.location();
-      next_attributes = old_details.attributes();
-      next_representation = old_details.representation();
-    }
-    Map* transition = TransitionArray::SearchTransition(
-        *target_map, next_kind, old_descriptors->GetKey(i), next_attributes);
-    if (transition == NULL) break;
-    Handle<Map> tmp_map(transition, isolate);
-
-    Handle<DescriptorArray> tmp_descriptors = handle(
-        tmp_map->instance_descriptors(), isolate);
-
-    // Check if target map is incompatible.
-    PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
-    DCHECK_EQ(next_kind, tmp_details.kind());
-    DCHECK_EQ(next_attributes, tmp_details.attributes());
-    if (next_kind == kAccessor &&
-        !EqualImmutableValues(old_descriptors->GetValue(i),
-                              tmp_descriptors->GetValue(i))) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
-          "GenAll_Incompatible");
-    }
-    if (next_location == kField && tmp_details.location() == kDescriptor) break;
-
-    Representation tmp_representation = tmp_details.representation();
-    if (!next_representation.fits_into(tmp_representation)) break;
-
-    PropertyLocation old_location = old_details.location();
-    PropertyLocation tmp_location = tmp_details.location();
-    if (tmp_location == kField) {
-      if (next_kind == kData) {
-        Handle<FieldType> next_field_type;
-        if (modify_index == i) {
-          next_field_type = new_field_type;
-          if (!property_kind_reconfiguration) {
-            Handle<FieldType> old_field_type =
-                GetFieldType(isolate, old_descriptors, i,
-                             old_details.location(), tmp_representation);
-            Representation old_representation = old_details.representation();
-            next_field_type = GeneralizeFieldType(
-                old_representation, old_field_type, new_representation,
-                next_field_type, isolate);
-          }
-        } else {
-          Handle<FieldType> old_field_type =
-              GetFieldType(isolate, old_descriptors, i, old_details.location(),
-                           tmp_representation);
-          next_field_type = old_field_type;
-        }
-        GeneralizeFieldType(tmp_map, i, tmp_representation, next_field_type);
-      }
-    } else if (old_location == kField ||
-               !EqualImmutableValues(old_descriptors->GetValue(i),
-                                     tmp_descriptors->GetValue(i))) {
-      break;
-    }
-    DCHECK(!tmp_map->is_deprecated());
-    target_map = tmp_map;
-  }
-
-  // Directly change the map if the target map is more general.
-  Handle<DescriptorArray> target_descriptors(
-      target_map->instance_descriptors(), isolate);
-  int target_nof = target_map->NumberOfOwnDescriptors();
-  if (target_nof == old_nof &&
-      (store_mode != FORCE_FIELD ||
-       (modify_index >= 0 &&
-        target_descriptors->GetDetails(modify_index).location() == kField))) {
-#ifdef DEBUG
-    if (modify_index >= 0) {
-      PropertyDetails details = target_descriptors->GetDetails(modify_index);
-      DCHECK_EQ(new_kind, details.kind());
-      DCHECK_EQ(new_attributes, details.attributes());
-      DCHECK(new_representation.fits_into(details.representation()));
-      DCHECK(details.location() != kField ||
-             new_field_type->NowIs(
-                 target_descriptors->GetFieldType(modify_index)));
-    }
-#endif
-    if (*target_map != *old_map) {
-      old_map->NotifyLeafMapLayoutChange();
-    }
-    return target_map;
-  }
-
-  // Find the last compatible target map in the transition tree.
-  for (int i = target_nof; i < old_nof; ++i) {
-    PropertyDetails old_details = old_descriptors->GetDetails(i);
-    PropertyKind next_kind;
-    PropertyAttributes next_attributes;
-    if (modify_index == i) {
-      next_kind = new_kind;
-      next_attributes = new_attributes;
-    } else {
-      next_kind = old_details.kind();
-      next_attributes = old_details.attributes();
-    }
-    Map* transition = TransitionArray::SearchTransition(
-        *target_map, next_kind, old_descriptors->GetKey(i), next_attributes);
-    if (transition == NULL) break;
-    Handle<Map> tmp_map(transition, isolate);
-    Handle<DescriptorArray> tmp_descriptors(
-        tmp_map->instance_descriptors(), isolate);
-
-    // Check if target map is compatible.
-#ifdef DEBUG
-    PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
-    DCHECK_EQ(next_kind, tmp_details.kind());
-    DCHECK_EQ(next_attributes, tmp_details.attributes());
-#endif
-    if (next_kind == kAccessor &&
-        !EqualImmutableValues(old_descriptors->GetValue(i),
-                              tmp_descriptors->GetValue(i))) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
-          "GenAll_Incompatible");
-    }
-    DCHECK(!tmp_map->is_deprecated());
-    target_map = tmp_map;
-  }
-  target_nof = target_map->NumberOfOwnDescriptors();
-  target_descriptors = handle(target_map->instance_descriptors(), isolate);
-
-  // Allocate a new descriptor array large enough to hold the required
-  // descriptors, with minimally the exact same size as the old descriptor
-  // array.
-  int new_slack = Max(
-      old_nof, old_descriptors->number_of_descriptors()) - old_nof;
-  Handle<DescriptorArray> new_descriptors = DescriptorArray::Allocate(
-      isolate, old_nof, new_slack);
-  DCHECK(new_descriptors->length() > target_descriptors->length() ||
-         new_descriptors->NumberOfSlackDescriptors() > 0 ||
-         new_descriptors->number_of_descriptors() ==
-         old_descriptors->number_of_descriptors());
-  DCHECK(new_descriptors->number_of_descriptors() == old_nof);
-
-  // 0 -> |root_nof|
-  int current_offset = 0;
-  for (int i = 0; i < root_nof; ++i) {
-    PropertyDetails old_details = old_descriptors->GetDetails(i);
-    if (old_details.location() == kField) {
-      current_offset += old_details.field_width_in_words();
-    }
-    Descriptor d(handle(old_descriptors->GetKey(i), isolate),
-                 handle(old_descriptors->GetValue(i), isolate),
-                 old_details);
-    new_descriptors->Set(i, &d);
-  }
-
-  // |root_nof| -> |target_nof|
-  for (int i = root_nof; i < target_nof; ++i) {
-    Handle<Name> target_key(target_descriptors->GetKey(i), isolate);
-    PropertyDetails old_details = old_descriptors->GetDetails(i);
-    PropertyDetails target_details = target_descriptors->GetDetails(i);
-
-    PropertyKind next_kind;
-    PropertyAttributes next_attributes;
-    PropertyLocation next_location;
-    Representation next_representation;
-    bool property_kind_reconfiguration = false;
-
-    if (modify_index == i) {
-      DCHECK_EQ(FORCE_FIELD, store_mode);
-      property_kind_reconfiguration = old_details.kind() != new_kind;
-
-      next_kind = new_kind;
-      next_attributes = new_attributes;
-      next_location = kField;
-
-      // Merge new representation/field type with ones from the target
-      // descriptor. If property kind is not reconfigured merge the result with
-      // representation/field type from the old descriptor.
-      next_representation =
-          new_representation.generalize(target_details.representation());
-      if (!property_kind_reconfiguration) {
-        next_representation =
-            next_representation.generalize(old_details.representation());
-      }
-    } else {
-      // Merge old_descriptor and target_descriptor entries.
-      DCHECK_EQ(target_details.kind(), old_details.kind());
-      next_kind = target_details.kind();
-      next_attributes = target_details.attributes();
-      next_location =
-          old_details.location() == kField ||
-                  target_details.location() == kField ||
-                  !EqualImmutableValues(target_descriptors->GetValue(i),
-                                        old_descriptors->GetValue(i))
-              ? kField
-              : kDescriptor;
-
-      next_representation = old_details.representation().generalize(
-          target_details.representation());
-    }
-    DCHECK_EQ(next_kind, target_details.kind());
-    DCHECK_EQ(next_attributes, target_details.attributes());
-
-    if (next_location == kField) {
-      if (next_kind == kData) {
-        Handle<FieldType> target_field_type =
-            GetFieldType(isolate, target_descriptors, i,
-                         target_details.location(), next_representation);
-
-        Handle<FieldType> next_field_type;
-        if (modify_index == i) {
-          next_field_type = GeneralizeFieldType(
-              target_details.representation(), target_field_type,
-              new_representation, new_field_type, isolate);
-          if (!property_kind_reconfiguration) {
-            Handle<FieldType> old_field_type =
-                GetFieldType(isolate, old_descriptors, i,
-                             old_details.location(), next_representation);
-            next_field_type = GeneralizeFieldType(
-                old_details.representation(), old_field_type,
-                next_representation, next_field_type, isolate);
-          }
-        } else {
-          Handle<FieldType> old_field_type =
-              GetFieldType(isolate, old_descriptors, i, old_details.location(),
-                           next_representation);
-          next_field_type = GeneralizeFieldType(
-              old_details.representation(), old_field_type, next_representation,
-              target_field_type, isolate);
-        }
-        Handle<Object> wrapped_type(WrapType(next_field_type));
-        DataDescriptor d(target_key, current_offset, wrapped_type,
-                         next_attributes, next_representation);
-        current_offset += d.GetDetails().field_width_in_words();
-        new_descriptors->Set(i, &d);
-      } else {
-        UNIMPLEMENTED();  // TODO(ishell): implement.
-      }
-    } else {
-      PropertyDetails details(next_attributes, next_kind, next_location,
-                              next_representation);
-      Descriptor d(target_key, handle(target_descriptors->GetValue(i), isolate),
-                   details);
-      new_descriptors->Set(i, &d);
-    }
-  }
-
-  // |target_nof| -> |old_nof|
-  for (int i = target_nof; i < old_nof; ++i) {
-    PropertyDetails old_details = old_descriptors->GetDetails(i);
-    Handle<Name> old_key(old_descriptors->GetKey(i), isolate);
-
-    // Merge old_descriptor entry and modified details together.
-    PropertyKind next_kind;
-    PropertyAttributes next_attributes;
-    PropertyLocation next_location;
-    Representation next_representation;
-    bool property_kind_reconfiguration = false;
-
-    if (modify_index == i) {
-      DCHECK_EQ(FORCE_FIELD, store_mode);
-      // In case of property kind reconfiguration it is not necessary to
-      // take into account representation/field type of the old descriptor.
-      property_kind_reconfiguration = old_details.kind() != new_kind;
-
-      next_kind = new_kind;
-      next_attributes = new_attributes;
-      next_location = kField;
-      next_representation = new_representation;
-      if (!property_kind_reconfiguration) {
-        next_representation =
-            next_representation.generalize(old_details.representation());
-      }
-    } else {
-      next_kind = old_details.kind();
-      next_attributes = old_details.attributes();
-      next_location = old_details.location();
-      next_representation = old_details.representation();
-    }
-
-    if (next_location == kField) {
-      if (next_kind == kData) {
-        Handle<FieldType> next_field_type;
-        if (modify_index == i) {
-          next_field_type = new_field_type;
-          if (!property_kind_reconfiguration) {
-            Handle<FieldType> old_field_type =
-                GetFieldType(isolate, old_descriptors, i,
-                             old_details.location(), next_representation);
-            next_field_type = GeneralizeFieldType(
-                old_details.representation(), old_field_type,
-                next_representation, next_field_type, isolate);
-          }
-        } else {
-          Handle<FieldType> old_field_type =
-              GetFieldType(isolate, old_descriptors, i, old_details.location(),
-                           next_representation);
-          next_field_type = old_field_type;
-        }
-
-        Handle<Object> wrapped_type(WrapType(next_field_type));
-
-        DataDescriptor d(old_key, current_offset, wrapped_type, next_attributes,
-                         next_representation);
-        current_offset += d.GetDetails().field_width_in_words();
-        new_descriptors->Set(i, &d);
-      } else {
-        UNIMPLEMENTED();  // TODO(ishell): implement.
-      }
-    } else {
-      PropertyDetails details(next_attributes, next_kind, next_location,
-                              next_representation);
-      Descriptor d(old_key, handle(old_descriptors->GetValue(i), isolate),
-                   details);
-      new_descriptors->Set(i, &d);
-    }
-  }
-
-  new_descriptors->Sort();
-
-  DCHECK(store_mode != FORCE_FIELD ||
-         new_descriptors->GetDetails(modify_index).location() == kField);
-
-  Handle<Map> split_map(root_map->FindLastMatchMap(
-          root_nof, old_nof, *new_descriptors), isolate);
-  int split_nof = split_map->NumberOfOwnDescriptors();
-  DCHECK_NE(old_nof, split_nof);
-
-  PropertyKind split_kind;
-  PropertyAttributes split_attributes;
-  if (modify_index == split_nof) {
-    split_kind = new_kind;
-    split_attributes = new_attributes;
-  } else {
-    PropertyDetails split_prop_details = old_descriptors->GetDetails(split_nof);
-    split_kind = split_prop_details.kind();
-    split_attributes = split_prop_details.attributes();
-  }
-
-  // Invalidate a transition target at |key|.
-  Map* maybe_transition = TransitionArray::SearchTransition(
-      *split_map, split_kind, old_descriptors->GetKey(split_nof),
-      split_attributes);
-  if (maybe_transition != NULL) {
-    maybe_transition->DeprecateTransitionTree();
-  }
-
-  // If |maybe_transition| is not NULL then the transition array already
-  // contains entry for given descriptor. This means that the transition
-  // could be inserted regardless of whether transitions array is full or not.
-  if (maybe_transition == NULL &&
-      !TransitionArray::CanHaveMoreTransitions(split_map)) {
-    return CopyGeneralizeAllRepresentations(
-        old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
-        "GenAll_CantHaveMoreTransitions");
-  }
-
-  old_map->NotifyLeafMapLayoutChange();
-
-  if (FLAG_trace_generalization && modify_index >= 0) {
-    PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
-    PropertyDetails new_details = new_descriptors->GetDetails(modify_index);
-    MaybeHandle<FieldType> old_field_type;
-    MaybeHandle<FieldType> new_field_type;
-    MaybeHandle<Object> old_value;
-    MaybeHandle<Object> new_value;
-    if (old_details.type() == DATA) {
-      old_field_type =
-          handle(old_descriptors->GetFieldType(modify_index), isolate);
-    } else {
-      old_value = handle(old_descriptors->GetValue(modify_index), isolate);
-    }
-    if (new_details.type() == DATA) {
-      new_field_type =
-          handle(new_descriptors->GetFieldType(modify_index), isolate);
-    } else {
-      new_value = handle(new_descriptors->GetValue(modify_index), isolate);
-    }
-
-    old_map->PrintGeneralization(
-        stdout, "", modify_index, split_nof, old_nof,
-        old_details.location() == kDescriptor && store_mode == FORCE_FIELD,
-        old_details.representation(), new_details.representation(),
-        old_field_type, old_value, new_field_type, new_value);
-  }
-
-  Handle<LayoutDescriptor> new_layout_descriptor =
-      LayoutDescriptor::New(split_map, new_descriptors, old_nof);
-
-  Handle<Map> new_map =
-      AddMissingTransitions(split_map, new_descriptors, new_layout_descriptor);
-
-  // Deprecated part of the transition tree is no longer reachable, so replace
-  // current instance descriptors in the "survived" part of the tree with
-  // the new descriptors to maintain descriptors sharing invariant.
-  split_map->ReplaceDescriptors(*new_descriptors, *new_layout_descriptor);
-  return new_map;
+// TODO(ishell): remove.
+// static
+Handle<Map> Map::ReconfigureElementsKind(Handle<Map> map,
+                                         ElementsKind new_elements_kind) {
+  MapUpdater mu(map->GetIsolate(), map);
+  return mu.ReconfigureElementsKind(new_elements_kind);
 }
 
+// Generalize all fields and update the transition tree.
+Handle<Map> Map::GeneralizeAllFields(Handle<Map> map) {
+  Isolate* isolate = map->GetIsolate();
+  Handle<FieldType> any_type = FieldType::Any(isolate);
 
-// Generalize the representation of all DATA descriptors.
-Handle<Map> Map::GeneralizeAllFieldRepresentations(
-    Handle<Map> map) {
   Handle<DescriptorArray> descriptors(map->instance_descriptors());
   for (int i = 0; i < map->NumberOfOwnDescriptors(); ++i) {
     PropertyDetails details = descriptors->GetDetails(i);
-    if (details.type() == DATA) {
-      map = ReconfigureProperty(map, i, kData, details.attributes(),
-                                Representation::Tagged(),
-                                FieldType::Any(map->GetIsolate()), FORCE_FIELD);
+    if (details.location() == kField) {
+      DCHECK_EQ(kData, details.kind());
+      MapUpdater mu(isolate, map);
+      map = mu.ReconfigureToDataField(i, details.attributes(), kMutable,
+                                      Representation::Tagged(), any_type);
     }
   }
   return map;
@@ -4569,49 +4154,51 @@
     PropertyDetails new_details = new_descriptors->GetDetails(i);
     DCHECK_EQ(old_details.kind(), new_details.kind());
     DCHECK_EQ(old_details.attributes(), new_details.attributes());
+    if (!IsGeneralizableTo(old_details.constness(), new_details.constness())) {
+      return nullptr;
+    }
+    DCHECK(IsGeneralizableTo(old_details.location(), new_details.location()));
     if (!old_details.representation().fits_into(new_details.representation())) {
       return nullptr;
     }
-    switch (new_details.type()) {
-      case DATA: {
+    if (new_details.location() == kField) {
+      if (new_details.kind() == kData) {
         FieldType* new_type = new_descriptors->GetFieldType(i);
         // Cleared field types need special treatment. They represent lost
         // knowledge, so we must first generalize the new_type to "Any".
         if (FieldTypeIsCleared(new_details.representation(), new_type)) {
           return nullptr;
         }
-        PropertyType old_property_type = old_details.type();
-        if (old_property_type == DATA) {
+        DCHECK_EQ(kData, old_details.kind());
+        if (old_details.location() == kField) {
           FieldType* old_type = old_descriptors->GetFieldType(i);
           if (FieldTypeIsCleared(old_details.representation(), old_type) ||
               !old_type->NowIs(new_type)) {
             return nullptr;
           }
         } else {
-          DCHECK(old_property_type == DATA_CONSTANT);
+          DCHECK_EQ(kDescriptor, old_details.location());
+          DCHECK(!FLAG_track_constant_fields);
           Object* old_value = old_descriptors->GetValue(i);
           if (!new_type->NowContains(old_value)) {
             return nullptr;
           }
         }
-        break;
-      }
-      case ACCESSOR: {
+
+      } else {
+        DCHECK_EQ(kAccessor, new_details.kind());
 #ifdef DEBUG
         FieldType* new_type = new_descriptors->GetFieldType(i);
         DCHECK(new_type->IsAny());
 #endif
-        break;
+        UNREACHABLE();
       }
-
-      case DATA_CONSTANT:
-      case ACCESSOR_CONSTANT: {
-        Object* old_value = old_descriptors->GetValue(i);
-        Object* new_value = new_descriptors->GetValue(i);
-        if (old_details.location() == kField || old_value != new_value) {
-          return nullptr;
-        }
-        break;
+    } else {
+      DCHECK_EQ(kDescriptor, new_details.location());
+      Object* old_value = old_descriptors->GetValue(i);
+      Object* new_value = new_descriptors->GetValue(i);
+      if (old_details.location() == kField || old_value != new_value) {
+        return nullptr;
       }
     }
   }
@@ -4623,9 +4210,8 @@
 // static
 Handle<Map> Map::Update(Handle<Map> map) {
   if (!map->is_deprecated()) return map;
-  return ReconfigureProperty(map, -1, kData, NONE, Representation::None(),
-                             FieldType::None(map->GetIsolate()),
-                             ALLOW_IN_DESCRIPTOR);
+  MapUpdater mu(map->GetIsolate(), map);
+  return mu.Update();
 }
 
 Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
@@ -4915,7 +4501,7 @@
   it->PrepareForDataProperty(to_assign);
 
   // Write the property value.
-  it->WriteDataValue(to_assign);
+  it->WriteDataValue(to_assign, false);
 
 #if VERIFY_HEAP
   if (FLAG_verify_heap) {
@@ -4989,7 +4575,7 @@
     it->ApplyTransitionToDataProperty(receiver);
 
     // Write the property value.
-    it->WriteDataValue(value);
+    it->WriteDataValue(value, true);
 
 #if VERIFY_HEAP
     if (FLAG_verify_heap) {
@@ -5045,6 +4631,36 @@
   map->UpdateDescriptors(*new_descriptors, layout_descriptor);
 }
 
+// static
+Handle<Map> Map::GetObjectCreateMap(Handle<HeapObject> prototype) {
+  Isolate* isolate = prototype->GetIsolate();
+  Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+                  isolate);
+  if (map->prototype() == *prototype) return map;
+  if (prototype->IsNull(isolate)) {
+    return isolate->slow_object_with_null_prototype_map();
+  }
+  if (prototype->IsJSObject()) {
+    Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+    if (!js_prototype->map()->is_prototype_map()) {
+      JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
+    }
+    Handle<PrototypeInfo> info =
+        Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+    // TODO(verwaest): Use inobject slack tracking for this map.
+    if (info->HasObjectCreateMap()) {
+      map = handle(info->ObjectCreateMap(), isolate);
+    } else {
+      map = Map::CopyInitialMap(map);
+      Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+      PrototypeInfo::SetObjectCreateMap(info, map);
+    }
+    return map;
+  }
+
+  return Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+}
+
 template <class T>
 static int AppendUniqueCallbacks(Handle<TemplateList> callbacks,
                                  Handle<typename T::Array> array,
@@ -5094,8 +4710,9 @@
                      int valid_descriptors,
                      Handle<DescriptorArray> array) {
     DisallowHeapAllocation no_gc;
-    AccessorConstantDescriptor desc(key, entry, entry->property_attributes());
-    array->Append(&desc);
+    Descriptor d =
+        Descriptor::AccessorConstant(key, entry, entry->property_attributes());
+    array->Append(&d);
   }
 };
 
@@ -5631,7 +5248,7 @@
 MaybeHandle<Context> JSObject::GetFunctionRealm(Handle<JSObject> object) {
   DCHECK(object->map()->is_constructor());
   DCHECK(!object->IsJSFunction());
-  return handle(object->GetCreationContext());
+  return object->GetCreationContext();
 }
 
 
@@ -5979,7 +5596,7 @@
     iteration_order =
         NameDictionary::DoGenerateNewEnumerationIndices(dictionary);
   } else {
-    iteration_order = NameDictionary::BuildIterationIndicesArray(dictionary);
+    iteration_order = NameDictionary::IterationIndices(dictionary);
   }
 
   int instance_descriptor_length = iteration_order->length();
@@ -5990,10 +5607,16 @@
     int index = Smi::cast(iteration_order->get(i))->value();
     DCHECK(dictionary->IsKey(isolate, dictionary->KeyAt(index)));
 
-    Object* value = dictionary->ValueAt(index);
-    PropertyType type = dictionary->DetailsAt(index).type();
-    if (type == DATA && !value->IsJSFunction()) {
-      number_of_fields += 1;
+    PropertyKind kind = dictionary->DetailsAt(index).kind();
+    if (kind == kData) {
+      if (FLAG_track_constant_fields) {
+        number_of_fields += 1;
+      } else {
+        Object* value = dictionary->ValueAt(index);
+        if (!value->IsJSFunction()) {
+          number_of_fields += 1;
+        }
+      }
     }
   }
 
@@ -6057,14 +5680,28 @@
     Object* value = dictionary->ValueAt(index);
 
     PropertyDetails details = dictionary->DetailsAt(index);
+    DCHECK_EQ(kField, details.location());
+    DCHECK_EQ(kMutable, details.constness());
     int enumeration_index = details.dictionary_index();
-    PropertyType type = details.type();
 
-    if (value->IsJSFunction()) {
-      DataConstantDescriptor d(key, handle(value, isolate),
-                               details.attributes());
-      descriptors->Set(enumeration_index - 1, &d);
-    } else if (type == DATA) {
+    Descriptor d;
+    if (details.kind() == kData) {
+      if (!FLAG_track_constant_fields && value->IsJSFunction()) {
+        d = Descriptor::DataConstant(key, handle(value, isolate),
+                                     details.attributes());
+      } else {
+        d = Descriptor::DataField(
+            key, current_offset, details.attributes(), kDefaultFieldConstness,
+            // TODO(verwaest): value->OptimalRepresentation();
+            Representation::Tagged(), FieldType::Any(isolate));
+      }
+    } else {
+      DCHECK_EQ(kAccessor, details.kind());
+      d = Descriptor::AccessorConstant(key, handle(value, isolate),
+                                       details.attributes());
+    }
+    details = d.GetDetails();
+    if (details.location() == kField) {
       if (current_offset < inobject_props) {
         object->InObjectPropertyAtPut(current_offset, value,
                                       UPDATE_WRITE_BARRIER);
@@ -6072,18 +5709,9 @@
         int offset = current_offset - inobject_props;
         fields->set(offset, value);
       }
-      DataDescriptor d(key, current_offset, details.attributes(),
-                       // TODO(verwaest): value->OptimalRepresentation();
-                       Representation::Tagged());
-      current_offset += d.GetDetails().field_width_in_words();
-      descriptors->Set(enumeration_index - 1, &d);
-    } else if (type == ACCESSOR_CONSTANT) {
-      AccessorConstantDescriptor d(key, handle(value, isolate),
-                                   details.attributes());
-      descriptors->Set(enumeration_index - 1, &d);
-    } else {
-      UNREACHABLE();
+      current_offset += details.field_width_in_words();
     }
+    descriptors->Set(enumeration_index - 1, &d);
   }
   DCHECK(current_offset == number_of_fields);
 
@@ -6123,9 +5751,10 @@
 void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
   if (dictionary->requires_slow_elements()) return;
   dictionary->set_requires_slow_elements();
-  // TODO(verwaest): Remove this hack.
   if (map()->is_prototype_map()) {
-    TypeFeedbackVector::ClearAllKeyedStoreICs(GetIsolate());
+    // If this object is a prototype (the callee will check), invalidate any
+    // prototype chains involving it.
+    InvalidatePrototypeChains(map());
   }
 }
 
@@ -6412,33 +6041,6 @@
   return DeleteProperty(&it, language_mode);
 }
 
-
-// ES6 7.1.14
-// static
-MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
-                                          Handle<Object> value) {
-  // 1. Let key be ToPrimitive(argument, hint String).
-  MaybeHandle<Object> maybe_key =
-      Object::ToPrimitive(value, ToPrimitiveHint::kString);
-  // 2. ReturnIfAbrupt(key).
-  Handle<Object> key;
-  if (!maybe_key.ToHandle(&key)) return key;
-  // 3. If Type(key) is Symbol, then return key.
-  if (key->IsSymbol()) return key;
-  // 4. Return ToString(key).
-  // Extending spec'ed behavior, we'd be happy to return an element index.
-  if (key->IsSmi()) return key;
-  if (key->IsHeapNumber()) {
-    uint32_t uint_value;
-    if (value->ToArrayLength(&uint_value) &&
-        uint_value <= static_cast<uint32_t>(Smi::kMaxValue)) {
-      return handle(Smi::FromInt(static_cast<int>(uint_value)), isolate);
-    }
-  }
-  return Object::ToString(isolate, key);
-}
-
-
 // ES6 19.1.2.4
 // static
 Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
@@ -6563,12 +6165,15 @@
     return JSProxy::DefineOwnProperty(isolate, Handle<JSProxy>::cast(object),
                                       key, desc, should_throw);
   }
+  if (object->IsJSTypedArray()) {
+    return JSTypedArray::DefineOwnProperty(
+        isolate, Handle<JSTypedArray>::cast(object), key, desc, should_throw);
+  }
   // TODO(neis): Special case for JSModuleNamespace?
 
   // OrdinaryDefineOwnProperty, by virtue of calling
-  // DefineOwnPropertyIgnoreAttributes, can handle arguments (ES6 9.4.4.2)
-  // and IntegerIndexedExotics (ES6 9.4.5.3), with one exception:
-  // TODO(jkummerow): Setting an indexed accessor on a typed array should throw.
+  // DefineOwnPropertyIgnoreAttributes, can handle arguments
+  // (ES#sec-arguments-exotic-objects-defineownproperty-p-desc).
   return OrdinaryDefineOwnProperty(isolate, Handle<JSObject>::cast(object), key,
                                    desc, should_throw);
 }
@@ -6970,7 +6575,6 @@
   return false;
 }
 
-
 bool PropertyKeyToArrayIndex(Handle<Object> index_obj, uint32_t* output) {
   return PropertyKeyToArrayLength(index_obj, output) && *output != kMaxUInt32;
 }
@@ -7300,12 +6904,12 @@
   if (it.IsFound()) {
     DCHECK_EQ(LookupIterator::DATA, it.state());
     DCHECK_EQ(DONT_ENUM, it.property_attributes());
-    it.WriteDataValue(value);
+    it.WriteDataValue(value, false);
     return Just(true);
   }
 
   Handle<NameDictionary> dict(proxy->property_dictionary());
-  PropertyDetails details(DONT_ENUM, DATA, 0, PropertyCellType::kNoCell);
+  PropertyDetails details(kData, DONT_ENUM, 0, PropertyCellType::kNoCell);
   Handle<NameDictionary> result =
       NameDictionary::Add(dict, private_name, value, details);
   if (!dict.is_identical_to(result)) proxy->set_properties(*result);
@@ -7330,7 +6934,13 @@
 
 Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
                                                  PropertyDescriptor* desc) {
-  if (it->state() == LookupIterator::INTERCEPTOR) {
+  bool has_access = true;
+  if (it->state() == LookupIterator::ACCESS_CHECK) {
+    has_access = it->HasAccess() || JSObject::AllCanRead(it);
+    it->Next();
+  }
+
+  if (has_access && it->state() == LookupIterator::INTERCEPTOR) {
     Isolate* isolate = it->isolate();
     Handle<InterceptorInfo> interceptor = it->GetInterceptor();
     if (!interceptor->descriptor()->IsUndefined(isolate)) {
@@ -7374,6 +6984,7 @@
       }
     }
   }
+  it->Restart();
   return Just(false);
 }
 }  // namespace
@@ -7998,7 +7609,7 @@
       PropertyDetails details = dictionary->DetailsAt(i);
       int attrs = attributes;
       // READ_ONLY is an invalid attribute for JS setters/getters.
-      if ((attributes & READ_ONLY) && details.type() == ACCESSOR_CONSTANT) {
+      if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
         Object* v = dictionary->ValueAt(i);
         if (v->IsPropertyCell()) v = PropertyCell::cast(v)->value();
         if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
@@ -8238,12 +7849,14 @@
       int limit = copy->map()->NumberOfOwnDescriptors();
       for (int i = 0; i < limit; i++) {
         PropertyDetails details = descriptors->GetDetails(i);
-        if (details.type() != DATA) continue;
+        if (details.location() != kField) continue;
+        DCHECK_EQ(kData, details.kind());
         FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
         if (object->IsUnboxedDoubleField(index)) {
           if (copying) {
-            double value = object->RawFastDoublePropertyAt(index);
-            copy->RawFastDoublePropertyAtPut(index, value);
+            // Ensure that all bits of the double value are preserved.
+            uint64_t value = object->RawFastDoublePropertyAsBitsAt(index);
+            copy->RawFastDoublePropertyAsBitsAtPut(index, value);
           }
         } else {
           Handle<Object> value(object->RawFastPropertyAt(index), isolate);
@@ -8563,8 +8176,8 @@
   // Wrapped string elements aren't explicitly stored in the elements backing
   // store, but are loaded indirectly from the underlying string.
   return !IsStringWrapperElementsKind(elements_kind()) &&
-         instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
-         !has_hidden_prototype() && !is_dictionary_map();
+         !IsSpecialReceiverMap() && !has_hidden_prototype() &&
+         !is_dictionary_map();
 }
 
 MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
@@ -8832,7 +8445,9 @@
     DescriptorArray* descs = map()->instance_descriptors();
     bool value_is_number = value->IsNumber();
     for (int i = 0; i < number_of_own_descriptors; i++) {
-      if (descs->GetType(i) == DATA) {
+      PropertyDetails details = descs->GetDetails(i);
+      if (details.location() == kField) {
+        DCHECK_EQ(kData, details.kind());
         FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
         if (IsUnboxedDoubleField(field_index)) {
           if (value_is_number) {
@@ -8852,9 +8467,12 @@
             return descs->GetKey(i);
           }
         }
-      } else if (descs->GetType(i) == DATA_CONSTANT) {
-        if (descs->GetConstant(i) == value) {
-          return descs->GetKey(i);
+      } else {
+        DCHECK_EQ(kDescriptor, details.location());
+        if (details.kind() == kData) {
+          if (descs->GetValue(i) == value) {
+            return descs->GetKey(i);
+          }
         }
       }
     }
@@ -9000,12 +8618,13 @@
   Isolate* isolate = map->GetIsolate();
   // Strict function maps have Function as a constructor but the
   // Function's initial map is a sloppy function map. Same holds for
-  // GeneratorFunction and its initial map.
+  // GeneratorFunction / AsyncFunction and its initial map.
   Object* constructor = map->GetConstructor();
   DCHECK(constructor->IsJSFunction());
   DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
          *map == *isolate->strict_function_map() ||
-         *map == *isolate->strict_generator_function_map());
+         *map == *isolate->generator_function_map() ||
+         *map == *isolate->async_function_map());
 #endif
   // Initial maps must always own their descriptors and it's descriptor array
   // does not contain descriptors that do not belong to the map.
@@ -9161,13 +8780,7 @@
       CHECK(maybe_name.ToHandle(&name));
       ConnectTransition(map, result, name, simple_flag);
     } else {
-      int length = descriptors->number_of_descriptors();
-      for (int i = 0; i < length; i++) {
-        descriptors->SetRepresentation(i, Representation::Tagged());
-        if (descriptors->GetDetails(i).type() == DATA) {
-          descriptors->SetValue(i, FieldType::Any());
-        }
-      }
+      descriptors->GeneralizeAllFields();
       result->InitializeDescriptors(*descriptors,
                                     LayoutDescriptor::FastPointerLayout());
     }
@@ -9449,42 +9062,45 @@
   return new_map;
 }
 
-FieldType* DescriptorArray::GetFieldType(int descriptor_number) {
-  DCHECK(GetDetails(descriptor_number).location() == kField);
-  Object* value = GetValue(descriptor_number);
-  if (value->IsWeakCell()) {
-    if (WeakCell::cast(value)->cleared()) return FieldType::None();
-    value = WeakCell::cast(value)->value();
-  }
-  return FieldType::cast(value);
-}
-
 namespace {
 
-bool CanHoldValue(DescriptorArray* descriptors, int descriptor, Object* value) {
+bool CanHoldValue(DescriptorArray* descriptors, int descriptor,
+                  PropertyConstness constness, Object* value) {
   PropertyDetails details = descriptors->GetDetails(descriptor);
-  switch (details.type()) {
-    case DATA:
-      return value->FitsRepresentation(details.representation()) &&
+  if (details.location() == kField) {
+    if (details.kind() == kData) {
+      return IsGeneralizableTo(constness, details.constness()) &&
+             value->FitsRepresentation(details.representation()) &&
              descriptors->GetFieldType(descriptor)->NowContains(value);
-
-    case DATA_CONSTANT:
-      DCHECK(descriptors->GetConstant(descriptor) != value ||
-             value->FitsRepresentation(details.representation()));
-      return descriptors->GetConstant(descriptor) == value;
-
-    case ACCESSOR:
-    case ACCESSOR_CONSTANT:
+    } else {
+      DCHECK_EQ(kAccessor, details.kind());
       return false;
-  }
+    }
 
+  } else {
+    DCHECK_EQ(kDescriptor, details.location());
+    DCHECK_EQ(kConst, details.constness());
+    if (details.kind() == kData) {
+      DCHECK(!FLAG_track_constant_fields);
+      DCHECK(descriptors->GetValue(descriptor) != value ||
+             value->FitsRepresentation(details.representation()));
+      return descriptors->GetValue(descriptor) == value;
+    } else {
+      DCHECK_EQ(kAccessor, details.kind());
+      return false;
+    }
+  }
   UNREACHABLE();
   return false;
 }
 
 Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
+                                     PropertyConstness constness,
                                      Handle<Object> value) {
-  if (CanHoldValue(map->instance_descriptors(), descriptor, *value)) return map;
+  if (CanHoldValue(map->instance_descriptors(), descriptor, constness,
+                   *value)) {
+    return map;
+  }
 
   Isolate* isolate = map->GetIsolate();
   PropertyAttributes attributes =
@@ -9492,25 +9108,27 @@
   Representation representation = value->OptimalRepresentation();
   Handle<FieldType> type = value->OptimalType(isolate, representation);
 
-  return Map::ReconfigureProperty(map, descriptor, kData, attributes,
-                                  representation, type, FORCE_FIELD);
+  MapUpdater mu(isolate, map);
+  return mu.ReconfigureToDataField(descriptor, attributes, constness,
+                                   representation, type);
 }
 
 }  // namespace
 
 // static
 Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
+                                        PropertyConstness constness,
                                         Handle<Object> value) {
   // Dictionaries can store any property value.
   DCHECK(!map->is_dictionary_map());
   // Update to the newest map before storing the property.
-  return UpdateDescriptorForValue(Update(map), descriptor, value);
+  return UpdateDescriptorForValue(Update(map), descriptor, constness, value);
 }
 
-
 Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
                                           Handle<Object> value,
                                           PropertyAttributes attributes,
+                                          PropertyConstness constness,
                                           StoreFromKeyed store_mode) {
   RuntimeCallTimerScope stats_scope(
       *map, map->is_prototype_map()
@@ -9533,19 +9151,19 @@
                               ->GetDetails(descriptor)
                               .attributes());
 
-    return UpdateDescriptorForValue(transition, descriptor, value);
+    return UpdateDescriptorForValue(transition, descriptor, constness, value);
   }
 
   TransitionFlag flag = INSERT_TRANSITION;
   MaybeHandle<Map> maybe_map;
-  if (value->IsJSFunction()) {
+  if (!FLAG_track_constant_fields && value->IsJSFunction()) {
     maybe_map = Map::CopyWithConstant(map, name, value, attributes, flag);
   } else if (!map->TooManyFastProperties(store_mode)) {
     Isolate* isolate = name->GetIsolate();
     Representation representation = value->OptimalRepresentation();
     Handle<FieldType> type = value->OptimalType(isolate, representation);
-    maybe_map =
-        Map::CopyWithField(map, name, type, attributes, representation, flag);
+    maybe_map = Map::CopyWithField(map, name, type, attributes, constness,
+                                   representation, flag);
   }
 
   Handle<Map> result;
@@ -9576,9 +9194,9 @@
   if (!map->GetBackPointer()->IsMap()) {
     // There is no benefit from reconstructing transition tree for maps without
     // back pointers.
-    return CopyGeneralizeAllRepresentations(
-        map, map->elements_kind(), descriptor, FORCE_FIELD, kind, attributes,
-        "GenAll_AttributesMismatchProtoMap");
+    return CopyGeneralizeAllFields(map, map->elements_kind(), descriptor, kind,
+                                   attributes,
+                                   "GenAll_AttributesMismatchProtoMap");
   }
 
   if (FLAG_trace_generalization) {
@@ -9586,9 +9204,12 @@
   }
 
   Isolate* isolate = map->GetIsolate();
-  Handle<Map> new_map = ReconfigureProperty(
-      map, descriptor, kind, attributes, Representation::None(),
-      FieldType::None(isolate), FORCE_FIELD);
+
+  MapUpdater mu(isolate, map);
+  DCHECK_EQ(kData, kind);  // Only kData case is supported so far.
+  Handle<Map> new_map = mu.ReconfigureToDataField(
+      descriptor, attributes, kDefaultFieldConstness, Representation::None(),
+      FieldType::None(isolate));
   return new_map;
 }
 
@@ -9648,7 +9269,7 @@
       return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
     }
     PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
-    if (old_details.type() != ACCESSOR_CONSTANT) {
+    if (old_details.kind() != kAccessor) {
       return Map::Normalize(map, mode, "AccessorsOverwritingNonAccessors");
     }
 
@@ -9690,8 +9311,8 @@
   pair->SetComponents(*getter, *setter);
 
   TransitionFlag flag = INSERT_TRANSITION;
-  AccessorConstantDescriptor new_desc(name, pair, attributes);
-  return Map::CopyInsertDescriptor(map, &new_desc, flag);
+  Descriptor d = Descriptor::AccessorConstant(name, pair, attributes);
+  return Map::CopyInsertDescriptor(map, &d, flag);
 }
 
 
@@ -9770,15 +9391,13 @@
       if (!key->IsPrivate()) {
         int mask = DONT_DELETE | DONT_ENUM;
         // READ_ONLY is an invalid attribute for JS setters/getters.
-        if (details.type() != ACCESSOR_CONSTANT || !value->IsAccessorPair()) {
+        if (details.kind() != kAccessor || !value->IsAccessorPair()) {
           mask |= READ_ONLY;
         }
         details = details.CopyAddAttributes(
             static_cast<PropertyAttributes>(attributes & mask));
       }
-      Descriptor inner_desc(
-          handle(key), handle(value, desc->GetIsolate()), details);
-      descriptors->SetDescriptor(i, &inner_desc);
+      descriptors->Set(i, key, value, details);
     }
   } else {
     for (int i = 0; i < size; ++i) {
@@ -9799,7 +9418,8 @@
     }
     PropertyDetails details = GetDetails(i);
     PropertyDetails other_details = desc->GetDetails(i);
-    if (details.type() != other_details.type() ||
+    if (details.kind() != other_details.kind() ||
+        details.location() != other_details.location() ||
         !details.representation().Equals(other_details.representation())) {
       return false;
     }
@@ -10113,8 +9733,7 @@
 void FixedArray::Shrink(int new_length) {
   DCHECK(0 <= new_length && new_length <= length());
   if (new_length < length()) {
-    GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
-        this, length() - new_length);
+    GetHeap()->RightTrimFixedArray(this, length() - new_length);
   }
 }
 
@@ -10127,7 +9746,6 @@
   }
 }
 
-
 #ifdef DEBUG
 bool FixedArray::IsEqualTo(FixedArray* other) {
   if (length() != other->length()) return false;
@@ -10308,6 +9926,12 @@
   return array;
 }
 
+Handle<ArrayList> ArrayList::New(Isolate* isolate, int size) {
+  Handle<ArrayList> result = Handle<ArrayList>::cast(
+      isolate->factory()->NewFixedArray(size + kFirstIndex));
+  result->SetLength(0);
+  return result;
+}
 
 bool ArrayList::IsFull() {
   int capacity = length();
@@ -10442,17 +10066,11 @@
   }
 }
 
-
 void DescriptorArray::CopyFrom(int index, DescriptorArray* src) {
-  Object* value = src->GetValue(index);
   PropertyDetails details = src->GetDetails(index);
-  Descriptor desc(handle(src->GetKey(index)),
-                  handle(value, src->GetIsolate()),
-                  details);
-  SetDescriptor(index, &desc);
+  Set(index, src->GetKey(index), src->GetValue(index), details);
 }
 
-
 void DescriptorArray::Sort() {
   // In-place heap sort.
   int len = number_of_descriptors();
@@ -10554,34 +10172,12 @@
 
 SharedFunctionInfo* DeoptimizationInputData::GetInlinedFunction(int index) {
   if (index == -1) {
-    return SharedFunctionInfo::cast(this->SharedFunctionInfo());
+    return SharedFunctionInfo::cast(SharedFunctionInfo());
   } else {
     return SharedFunctionInfo::cast(LiteralArray()->get(index));
   }
 }
 
-const int LiteralsArray::kFeedbackVectorOffset =
-    LiteralsArray::OffsetOfElementAt(LiteralsArray::kVectorIndex);
-
-const int LiteralsArray::kOffsetToFirstLiteral =
-    LiteralsArray::OffsetOfElementAt(LiteralsArray::kFirstLiteralIndex);
-
-// static
-Handle<LiteralsArray> LiteralsArray::New(Isolate* isolate,
-                                         Handle<TypeFeedbackVector> vector,
-                                         int number_of_literals,
-                                         PretenureFlag pretenure) {
-  if (vector->is_empty() && number_of_literals == 0) {
-    return Handle<LiteralsArray>::cast(
-        isolate->factory()->empty_literals_array());
-  }
-  Handle<FixedArray> literals = isolate->factory()->NewFixedArray(
-      number_of_literals + kFirstLiteralIndex, pretenure);
-  Handle<LiteralsArray> casted_literals = Handle<LiteralsArray>::cast(literals);
-  casted_literals->set_feedback_vector(*vector);
-  return casted_literals;
-}
-
 int HandlerTable::LookupRange(int pc_offset, int* data_out,
                               CatchPrediction* prediction_out) {
   int innermost_handler = -1;
@@ -10598,7 +10194,7 @@
     int handler_offset = HandlerOffsetField::decode(handler_field);
     CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
     int handler_data = Smi::cast(get(i + kRangeDataIndex))->value();
-    if (pc_offset > start_offset && pc_offset <= end_offset) {
+    if (pc_offset >= start_offset && pc_offset < end_offset) {
       DCHECK_GE(start_offset, innermost_start);
       DCHECK_LT(end_offset, innermost_end);
       innermost_handler = handler_offset;
@@ -10668,11 +10264,7 @@
   return isolate->factory()->NewSubString(string, left, right);
 }
 
-bool String::LooksValid() {
-  if (!GetIsolate()->heap()->Contains(this)) return false;
-  return true;
-}
-
+bool String::LooksValid() { return GetIsolate()->heap()->Contains(this); }
 
 // static
 MaybeHandle<String> Name::ToFunctionName(Handle<Name> name) {
@@ -10806,8 +10398,7 @@
     }
     string = cons->first();
     shape = StringShape(string);
-  }
-  if (shape.representation_tag() == kSlicedStringTag) {
+  } else if (shape.representation_tag() == kSlicedStringTag) {
     SlicedString* slice = SlicedString::cast(string);
     offset = slice->offset();
     string = slice->parent();
@@ -10815,6 +10406,13 @@
     DCHECK(shape.representation_tag() != kConsStringTag &&
            shape.representation_tag() != kSlicedStringTag);
   }
+  if (shape.representation_tag() == kThinStringTag) {
+    ThinString* thin = ThinString::cast(string);
+    string = thin->actual();
+    shape = StringShape(string);
+    DCHECK(!shape.IsCons());
+    DCHECK(!shape.IsSliced());
+  }
   if (shape.encoding_tag() == kOneByteStringTag) {
     const uint8_t* start;
     if (shape.representation_tag() == kSeqStringTag) {
@@ -10900,6 +10498,7 @@
       return slice->parent()->GetTwoByteData(start + slice->offset());
     }
     case kConsStringTag:
+    case kThinStringTag:
       UNREACHABLE();
       return NULL;
   }
@@ -11166,6 +10765,7 @@
   return 0;
 }
 
+uint16_t ThinString::ThinStringGet(int index) { return actual()->Get(index); }
 
 uint16_t SlicedString::SlicedStringGet(int index) {
   return parent()->Get(offset() + index);
@@ -11260,6 +10860,10 @@
         WriteToFlat(slice->parent(), sink, from + offset, to + offset);
         return;
       }
+      case kOneByteStringTag | kThinStringTag:
+      case kTwoByteStringTag | kThinStringTag:
+        source = ThinString::cast(source)->actual();
+        break;
     }
   }
 }
@@ -11481,6 +11085,17 @@
   if (len != other->length()) return false;
   if (len == 0) return true;
 
+  // Fast check: if at least one ThinString is involved, dereference it/them
+  // and restart.
+  if (this->IsThinString() || other->IsThinString()) {
+    if (other->IsThinString()) other = ThinString::cast(other)->actual();
+    if (this->IsThinString()) {
+      return ThinString::cast(this)->actual()->Equals(other);
+    } else {
+      return this->Equals(other);
+    }
+  }
+
   // Fast check: if hash code is computed for both strings
   // a fast negative check can be performed.
   if (HasHashCode() && other->HasHashCode()) {
@@ -11522,6 +11137,14 @@
   if (one_length != two->length()) return false;
   if (one_length == 0) return true;
 
+  // Fast check: if at least one ThinString is involved, dereference it/them
+  // and restart.
+  if (one->IsThinString() || two->IsThinString()) {
+    if (one->IsThinString()) one = handle(ThinString::cast(*one)->actual());
+    if (two->IsThinString()) two = handle(ThinString::cast(*two)->actual());
+    return String::Equals(one, two);
+  }
+
   // Fast check: if hash code is computed for both strings
   // a fast negative check can be performed.
   if (one->HasHashCode() && two->HasHashCode()) {
@@ -11630,7 +11253,7 @@
 
 Object* String::IndexOf(Isolate* isolate, Handle<Object> receiver,
                         Handle<Object> search, Handle<Object> position) {
-  if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+  if (receiver->IsNullOrUndefined(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
                               isolate->factory()->NewStringFromAsciiChecked(
@@ -11647,11 +11270,9 @@
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
                                      Object::ToInteger(isolate, position));
 
-  double index = std::max(position->Number(), 0.0);
-  index = std::min(index, static_cast<double>(receiver_string->length()));
-
-  return Smi::FromInt(String::IndexOf(isolate, receiver_string, search_string,
-                                      static_cast<uint32_t>(index)));
+  uint32_t index = receiver_string->ToValidIndex(*position);
+  return Smi::FromInt(
+      String::IndexOf(isolate, receiver_string, search_string, index));
 }
 
 namespace {
@@ -11833,7 +11454,7 @@
 
 Object* String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
                             Handle<Object> search, Handle<Object> position) {
-  if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+  if (receiver->IsNullOrUndefined(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
                               isolate->factory()->NewStringFromAsciiChecked(
@@ -11857,11 +11478,7 @@
   } else {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
                                        Object::ToInteger(isolate, position));
-
-    double position_number = std::max(position->Number(), 0.0);
-    position_number = std::min(position_number,
-                               static_cast<double>(receiver_string->length()));
-    start_index = static_cast<uint32_t>(position_number);
+    start_index = receiver_string->ToValidIndex(*position);
   }
 
   uint32_t pattern_length = search_string->length();
@@ -12006,6 +11623,9 @@
 
 
 Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
+  Heap* heap = string->GetHeap();
+  if (new_length == 0) return heap->isolate()->factory()->empty_string();
+
   int new_size, old_size;
   int old_length = string->length();
   if (old_length <= new_length) return string;
@@ -12025,18 +11645,16 @@
   DCHECK_OBJECT_ALIGNED(start_of_string);
   DCHECK_OBJECT_ALIGNED(start_of_string + new_size);
 
-  Heap* heap = string->GetHeap();
   // Sizes are pointer size aligned, so that we can use filler objects
   // that are a multiple of pointer size.
   heap->CreateFillerObjectAt(start_of_string + new_size, delta,
                              ClearRecordedSlots::kNo);
-  heap->AdjustLiveBytes(*string, -delta, Heap::CONCURRENT_TO_SWEEPER);
+  heap->AdjustLiveBytes(*string, -delta);
 
   // We are storing the new length using release store after creating a filler
   // for the left-over space to avoid races with the sweeper thread.
   string->synchronized_set_length(new_length);
 
-  if (new_length == 0) return heap->isolate()->factory()->empty_string();
   return string;
 }
 
@@ -12209,7 +11827,9 @@
   int properties =
       mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other->GetInObjectProperties();
   return CheckEquivalent(this, other) && bit_field2() == other->bit_field2() &&
-         GetInObjectProperties() == properties;
+         GetInObjectProperties() == properties &&
+         JSObject::GetInternalFieldCount(this) ==
+             JSObject::GetInternalFieldCount(other);
 }
 
 
@@ -12283,60 +11903,33 @@
 }
 
 // static
-Handle<LiteralsArray> SharedFunctionInfo::FindOrCreateLiterals(
-    Handle<SharedFunctionInfo> shared, Handle<Context> native_context) {
-  Isolate* isolate = shared->GetIsolate();
-  CodeAndLiterals result =
-      shared->SearchOptimizedCodeMap(*native_context, BailoutId::None());
-  if (result.literals != nullptr) {
-    DCHECK(shared->feedback_metadata()->is_empty() ||
-           !result.literals->feedback_vector()->is_empty());
-    return handle(result.literals, isolate);
-  }
-
-  Handle<TypeFeedbackVector> feedback_vector =
-      TypeFeedbackVector::New(isolate, handle(shared->feedback_metadata()));
-  Handle<LiteralsArray> literals =
-      LiteralsArray::New(isolate, feedback_vector, shared->num_literals());
-  Handle<Code> code;
-  if (result.code != nullptr) {
-    code = Handle<Code>(result.code, isolate);
-  }
-  AddToOptimizedCodeMap(shared, native_context, code, literals,
-                        BailoutId::None());
-  return literals;
-}
-
-// static
 void SharedFunctionInfo::AddToOptimizedCodeMap(
     Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
-    MaybeHandle<Code> code, Handle<LiteralsArray> literals,
-    BailoutId osr_ast_id) {
+    Handle<Code> code, BailoutId osr_ast_id) {
   Isolate* isolate = shared->GetIsolate();
   if (isolate->serializer_enabled()) return;
-  DCHECK(code.is_null() ||
-         code.ToHandleChecked()->kind() == Code::OPTIMIZED_FUNCTION);
+  DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
   DCHECK(native_context->IsNativeContext());
-  STATIC_ASSERT(kEntryLength == 4);
+  STATIC_ASSERT(kEntryLength == 2);
   Handle<FixedArray> new_code_map;
   int entry;
 
+  if (!osr_ast_id.IsNone()) {
+    Context::AddToOptimizedCodeMap(native_context, shared, code, osr_ast_id);
+    return;
+  }
+
+  DCHECK(osr_ast_id.IsNone());
   if (shared->OptimizedCodeMapIsCleared()) {
     new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
     entry = kEntriesStart;
   } else {
     Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
-    entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
+    entry = shared->SearchOptimizedCodeMapEntry(*native_context);
     if (entry >= kEntriesStart) {
-      // Just set the code and literals of the entry.
-      if (!code.is_null()) {
-        Handle<WeakCell> code_cell =
-            isolate->factory()->NewWeakCell(code.ToHandleChecked());
-        old_code_map->set(entry + kCachedCodeOffset, *code_cell);
-      }
-      Handle<WeakCell> literals_cell =
-          isolate->factory()->NewWeakCell(literals);
-      old_code_map->set(entry + kLiteralsOffset, *literals_cell);
+      // Just set the code of the entry.
+      Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+      old_code_map->set(entry + kCachedCodeOffset, *code_cell);
       return;
     }
 
@@ -12364,16 +11957,11 @@
     }
   }
 
-  Handle<WeakCell> code_cell =
-      code.is_null() ? isolate->factory()->empty_weak_cell()
-                     : isolate->factory()->NewWeakCell(code.ToHandleChecked());
-  Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+  Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
   WeakCell* context_cell = native_context->self_weak_cell();
 
   new_code_map->set(entry + kContextOffset, context_cell);
   new_code_map->set(entry + kCachedCodeOffset, *code_cell);
-  new_code_map->set(entry + kLiteralsOffset, *literals_cell);
-  new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
 
 #ifdef DEBUG
   for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
@@ -12383,9 +11971,6 @@
     DCHECK(cell->cleared() ||
            (cell->value()->IsCode() &&
             Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
-    cell = WeakCell::cast(new_code_map->get(i + kLiteralsOffset));
-    DCHECK(cell->cleared() || cell->value()->IsFixedArray());
-    DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
   }
 #endif
 
@@ -12405,81 +11990,62 @@
 void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
                                                    const char* reason) {
   DisallowHeapAllocation no_gc;
-  if (OptimizedCodeMapIsCleared()) return;
+  Isolate* isolate = GetIsolate();
+  bool found = false;
 
-  Heap* heap = GetHeap();
-  FixedArray* code_map = optimized_code_map();
-  int dst = kEntriesStart;
-  int length = code_map->length();
-  for (int src = kEntriesStart; src < length; src += kEntryLength) {
-    DCHECK(WeakCell::cast(code_map->get(src))->cleared() ||
-           WeakCell::cast(code_map->get(src))->value()->IsNativeContext());
-    if (WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
-        optimized_code) {
-      BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
-      if (FLAG_trace_opt) {
-        PrintF("[evicting entry from optimizing code map (%s) for ", reason);
-        ShortPrint();
-        if (osr.IsNone()) {
+  if (!OptimizedCodeMapIsCleared()) {
+    Heap* heap = isolate->heap();
+    FixedArray* code_map = optimized_code_map();
+    int length = code_map->length();
+    for (int src = kEntriesStart; src < length; src += kEntryLength) {
+      DCHECK(WeakCell::cast(code_map->get(src))->cleared() ||
+             WeakCell::cast(code_map->get(src))->value()->IsNativeContext());
+      found = WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
+              optimized_code;
+      if (found) {
+        if (FLAG_trace_opt) {
+          PrintF("[evicting entry from optimizing code map (%s) for ", reason);
+          ShortPrint();
           PrintF("]\n");
-        } else {
-          PrintF(" (osr ast id %d)]\n", osr.ToInt());
         }
+        // Just clear the code.
+        code_map->set(src + kCachedCodeOffset, heap->empty_weak_cell(),
+                      SKIP_WRITE_BARRIER);
       }
-      if (!osr.IsNone()) {
-        // Evict the src entry by not copying it to the dst entry.
-        continue;
-      }
-      // In case of non-OSR entry just clear the code in order to proceed
-      // sharing literals.
-      code_map->set(src + kCachedCodeOffset, heap->empty_weak_cell(),
-                    SKIP_WRITE_BARRIER);
-    }
-
-    // Keep the src entry by copying it to the dst entry.
-    if (dst != src) {
-      code_map->set(dst + kContextOffset, code_map->get(src + kContextOffset));
-      code_map->set(dst + kCachedCodeOffset,
-                    code_map->get(src + kCachedCodeOffset));
-      code_map->set(dst + kLiteralsOffset,
-                    code_map->get(src + kLiteralsOffset));
-      code_map->set(dst + kOsrAstIdOffset,
-                    code_map->get(src + kOsrAstIdOffset));
-    }
-    dst += kEntryLength;
-  }
-  if (dst != length) {
-    // Always trim even when array is cleared because of heap verifier.
-    heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
-                                                           length - dst);
-    if (code_map->length() == kEntriesStart) {
-      ClearOptimizedCodeMap();
     }
   }
-}
 
-
-void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
-  FixedArray* code_map = optimized_code_map();
-  DCHECK(shrink_by % kEntryLength == 0);
-  DCHECK(shrink_by <= code_map->length() - kEntriesStart);
-  // Always trim even when array is cleared because of heap verifier.
-  GetHeap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(code_map,
-                                                              shrink_by);
-  if (code_map->length() == kEntriesStart) {
-    ClearOptimizedCodeMap();
+  if (!found) {
+    // We didn't find the code in here. It must be osr'd code.
+    isolate->EvictOSROptimizedCode(optimized_code, reason);
   }
 }
 
 // static
 void JSFunction::EnsureLiterals(Handle<JSFunction> function) {
   Handle<SharedFunctionInfo> shared(function->shared());
-  Handle<Context> native_context(function->context()->native_context());
-  if (function->literals() ==
-      function->GetIsolate()->heap()->empty_literals_array()) {
-    Handle<LiteralsArray> literals =
-        SharedFunctionInfo::FindOrCreateLiterals(shared, native_context);
-    function->set_literals(*literals);
+  Isolate* isolate = shared->GetIsolate();
+
+  FeedbackVectorState state = function->GetFeedbackVectorState(isolate);
+  switch (state) {
+    case TOP_LEVEL_SCRIPT_NEEDS_VECTOR: {
+      // A top level script didn't get it's literals installed.
+      Handle<FeedbackVector> feedback_vector =
+          FeedbackVector::New(isolate, shared);
+      Handle<Cell> new_cell =
+          isolate->factory()->NewOneClosureCell(feedback_vector);
+      function->set_feedback_vector_cell(*new_cell);
+      break;
+    }
+    case NEEDS_VECTOR: {
+      Handle<FeedbackVector> feedback_vector =
+          FeedbackVector::New(isolate, shared);
+      function->feedback_vector_cell()->set_value(*feedback_vector);
+      break;
+    }
+    case HAS_VECTOR:
+      // Nothing to do.
+      break;
   }
 }
 
@@ -12524,19 +12090,10 @@
 static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
   DisallowHeapAllocation no_gc;
   if (!object->HasFastProperties()) return false;
-  Map* map = object->map();
-  if (map->is_prototype_map()) return false;
-  DescriptorArray* descriptors = map->instance_descriptors();
-  for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
-    PropertyDetails details = descriptors->GetDetails(i);
-    if (details.location() == kDescriptor) continue;
-    if (details.representation().IsHeapObject() ||
-        details.representation().IsTagged()) {
-      FieldIndex index = FieldIndex::ForDescriptor(map, i);
-      if (object->RawFastPropertyAt(index)->IsJSFunction()) return true;
-    }
-  }
-  return false;
+  if (object->IsJSGlobalProxy()) return false;
+  if (object->GetIsolate()->bootstrapper()->IsActive()) return false;
+  return !object->map()->is_prototype_map() ||
+         !object->map()->should_be_fast_prototype_map();
 }
 
 // static
@@ -12551,8 +12108,10 @@
     if (!current->IsJSObject()) return;
     Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
     Map* current_map = current_obj->map();
-    if (current_map->is_prototype_map() &&
-        !current_map->should_be_fast_prototype_map()) {
+    if (current_map->is_prototype_map()) {
+      // If the map is already marked as should be fast, we're done. Its
+      // prototypes will have been marked already as well.
+      if (current_map->should_be_fast_prototype_map()) return;
       Handle<Map> map(current_map);
       Map::SetShouldBeFastPrototypeMap(map, true, isolate);
       JSObject::OptimizeAsPrototype(current_obj, FAST_PROTOTYPE);
@@ -12758,9 +12317,17 @@
 // static
 Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
                                                         Isolate* isolate) {
-  Handle<Object> maybe_prototype(
-      map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
-  if (!maybe_prototype->IsJSObject()) return Handle<Cell>::null();
+  Handle<Object> maybe_prototype;
+  if (map->IsJSGlobalObjectMap()) {
+    DCHECK(map->is_prototype_map());
+    // Global object is prototype of a global proxy and therefore we can
+    // use its validity cell for guarding global object's prototype change.
+    maybe_prototype = isolate->global_object();
+  } else {
+    maybe_prototype =
+        handle(map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
+    if (!maybe_prototype->IsJSObject()) return Handle<Cell>::null();
+  }
   Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
   // Ensure the prototype is registered with its own prototypes so its cell
   // will be invalidated when necessary.
@@ -12997,6 +12564,7 @@
     case JS_API_OBJECT_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_ARRAY_TYPE:
+    case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_DATA_VIEW_TYPE:
     case JS_DATE_TYPE:
@@ -13038,7 +12606,6 @@
     case ODDBALL_TYPE:
     case PROPERTY_CELL_TYPE:
     case SHARED_FUNCTION_INFO_TYPE:
-    case SIMD128_VALUE_TYPE:
     case SYMBOL_TYPE:
     case WEAK_CELL_TYPE:
 
@@ -13131,7 +12698,7 @@
 
     // Link initial map and constructor function if the new.target is actually a
     // subclass constructor.
-    if (IsSubclassConstructor(function->shared()->kind())) {
+    if (IsDerivedConstructor(function->shared()->kind())) {
       Handle<Object> prototype(function->instance_prototype(), isolate);
       InstanceType instance_type = constructor_initial_map->instance_type();
       DCHECK(CanSubclassHaveInobjectProperties(instance_type));
@@ -13281,8 +12848,7 @@
   Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
 
   // Check if {function} should hide its source code.
-  if (!shared_info->script()->IsScript() ||
-      Script::cast(shared_info->script())->hide_source()) {
+  if (!shared_info->IsUserJavaScript()) {
     return NativeCodeFunctionSourceString(shared_info);
   }
 
@@ -13304,6 +12870,10 @@
     return NativeCodeFunctionSourceString(shared_info);
   }
 
+  if (FLAG_harmony_function_tostring) {
+    return Handle<String>::cast(shared_info->GetSourceCodeHarmony());
+  }
+
   IncrementalStringBuilder builder(isolate);
   FunctionKind kind = shared_info->kind();
   if (!IsArrowFunction(kind)) {
@@ -13355,8 +12925,8 @@
     // position, but store it as negative value for lazy translation.
     StackTraceFrameIterator it(script->GetIsolate());
     if (!it.done() && it.is_javascript()) {
-      FrameSummary summary = FrameSummary::GetFirst(it.javascript_frame());
-      script->set_eval_from_shared(summary.function()->shared());
+      FrameSummary summary = FrameSummary::GetTop(it.javascript_frame());
+      script->set_eval_from_shared(summary.AsJavaScript().function()->shared());
       script->set_eval_from_position(-summary.code_offset());
       return;
     }
@@ -13409,15 +12979,7 @@
                              PositionInfo* info, OffsetFlag offset_flag) {
   // For wasm, we do not create an artificial line_ends array, but do the
   // translation directly.
-  if (script->type() == Script::TYPE_WASM) {
-    Handle<WasmCompiledModule> compiled_module(
-        WasmCompiledModule::cast(script->wasm_compiled_module()));
-    DCHECK_LE(0, position);
-    return wasm::GetPositionInfo(compiled_module,
-                                 static_cast<uint32_t>(position), info);
-  }
-
-  InitLineEnds(script);
+  if (script->type() != Script::TYPE_WASM) InitLineEnds(script);
   return script->GetPositionInfo(position, info, offset_flag);
 }
 
@@ -13453,6 +13015,16 @@
                              OffsetFlag offset_flag) const {
   DisallowHeapAllocation no_allocation;
 
+  // For wasm, we do not rely on the line_ends array, but do the translation
+  // directly.
+  if (type() == Script::TYPE_WASM) {
+    Handle<WasmCompiledModule> compiled_module(
+        WasmCompiledModule::cast(wasm_compiled_module()));
+    DCHECK_LE(0, position);
+    return compiled_module->GetPositionInfo(static_cast<uint32_t>(position),
+                                            info);
+  }
+
   if (line_ends()->IsUndefined(GetIsolate())) {
     // Slow mode: we do not have line_ends. We have to iterate through source.
     if (!GetPositionInfoSlow(this, position, info)) return false;
@@ -13546,15 +13118,11 @@
   return info.line;
 }
 
-Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
-  Isolate* isolate = script->GetIsolate();
-
+Object* Script::GetNameOrSourceURL() {
+  Isolate* isolate = GetIsolate();
   // Keep in sync with ScriptNameOrSourceURL in messages.js.
-
-  if (!script->source_url()->IsUndefined(isolate)) {
-    return handle(script->source_url(), isolate);
-  }
-  return handle(script->name(), isolate);
+  if (!source_url()->IsUndefined(isolate)) return source_url();
+  return name();
 }
 
 
@@ -13583,53 +13151,68 @@
   return result;
 }
 
-
 MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
-    FunctionLiteral* fun) {
-  WeakFixedArray::Iterator iterator(shared_function_infos());
-  SharedFunctionInfo* shared;
-  while ((shared = iterator.Next<SharedFunctionInfo>())) {
-    if (fun->function_token_position() == shared->function_token_position() &&
-        fun->start_position() == shared->start_position() &&
-        fun->end_position() == shared->end_position()) {
-      return Handle<SharedFunctionInfo>(shared);
-    }
+    Isolate* isolate, const FunctionLiteral* fun) {
+  DCHECK_NE(fun->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
+  DCHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
+  Object* shared = shared_function_infos()->get(fun->function_literal_id());
+  if (shared->IsUndefined(isolate) || WeakCell::cast(shared)->cleared()) {
+    return MaybeHandle<SharedFunctionInfo>();
   }
-  return MaybeHandle<SharedFunctionInfo>();
+  return handle(SharedFunctionInfo::cast(WeakCell::cast(shared)->value()));
 }
 
-
 Script::Iterator::Iterator(Isolate* isolate)
     : iterator_(isolate->heap()->script_list()) {}
 
 
 Script* Script::Iterator::Next() { return iterator_.Next<Script>(); }
 
+SharedFunctionInfo::ScriptIterator::ScriptIterator(Handle<Script> script)
+    : ScriptIterator(script->GetIsolate(),
+                     handle(script->shared_function_infos())) {}
 
-SharedFunctionInfo::Iterator::Iterator(Isolate* isolate)
-    : script_iterator_(isolate),
-      sfi_iterator_(isolate->heap()->noscript_shared_function_infos()) {}
+SharedFunctionInfo::ScriptIterator::ScriptIterator(
+    Isolate* isolate, Handle<FixedArray> shared_function_infos)
+    : isolate_(isolate),
+      shared_function_infos_(shared_function_infos),
+      index_(0) {}
 
-
-bool SharedFunctionInfo::Iterator::NextScript() {
-  Script* script = script_iterator_.Next();
-  if (script == NULL) return false;
-  sfi_iterator_.Reset(script->shared_function_infos());
-  return true;
+SharedFunctionInfo* SharedFunctionInfo::ScriptIterator::Next() {
+  while (index_ < shared_function_infos_->length()) {
+    Object* raw = shared_function_infos_->get(index_++);
+    if (raw->IsUndefined(isolate_) || WeakCell::cast(raw)->cleared()) continue;
+    return SharedFunctionInfo::cast(WeakCell::cast(raw)->value());
+  }
+  return nullptr;
 }
 
+void SharedFunctionInfo::ScriptIterator::Reset(Handle<Script> script) {
+  shared_function_infos_ = handle(script->shared_function_infos());
+  index_ = 0;
+}
 
-SharedFunctionInfo* SharedFunctionInfo::Iterator::Next() {
-  do {
-    SharedFunctionInfo* next = sfi_iterator_.Next<SharedFunctionInfo>();
-    if (next != NULL) return next;
-  } while (NextScript());
-  return NULL;
+SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate)
+    : script_iterator_(isolate),
+      noscript_sfi_iterator_(isolate->heap()->noscript_shared_function_infos()),
+      sfi_iterator_(handle(script_iterator_.Next(), isolate)) {}
+
+SharedFunctionInfo* SharedFunctionInfo::GlobalIterator::Next() {
+  SharedFunctionInfo* next = noscript_sfi_iterator_.Next<SharedFunctionInfo>();
+  if (next != nullptr) return next;
+  for (;;) {
+    next = sfi_iterator_.Next();
+    if (next != nullptr) return next;
+    Script* next_script = script_iterator_.Next();
+    if (next_script == nullptr) return nullptr;
+    sfi_iterator_.Reset(handle(next_script));
+  }
 }
 
 
 void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
                                    Handle<Object> script_object) {
+  DCHECK_NE(shared->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
   if (shared->script() == *script_object) return;
   Isolate* isolate = shared->GetIsolate();
 
@@ -13637,39 +13220,52 @@
   // the shared function info may be temporarily in two lists.
   // This is okay because the gc-time processing of these lists can tolerate
   // duplicates.
-  Handle<Object> list;
   if (script_object->IsScript()) {
     Handle<Script> script = Handle<Script>::cast(script_object);
-    list = handle(script->shared_function_infos(), isolate);
+    Handle<FixedArray> list = handle(script->shared_function_infos(), isolate);
+#ifdef DEBUG
+    DCHECK_LT(shared->function_literal_id(), list->length());
+    if (list->get(shared->function_literal_id())->IsWeakCell() &&
+        !WeakCell::cast(list->get(shared->function_literal_id()))->cleared()) {
+      DCHECK(
+          WeakCell::cast(list->get(shared->function_literal_id()))->value() ==
+          *shared);
+    }
+#endif
+    Handle<WeakCell> cell = isolate->factory()->NewWeakCell(shared);
+    list->set(shared->function_literal_id(), *cell);
   } else {
-    list = isolate->factory()->noscript_shared_function_infos();
-  }
+    Handle<Object> list = isolate->factory()->noscript_shared_function_infos();
 
 #ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    WeakFixedArray::Iterator iterator(*list);
-    SharedFunctionInfo* next;
-    while ((next = iterator.Next<SharedFunctionInfo>())) {
-      DCHECK_NE(next, *shared);
+    if (FLAG_enable_slow_asserts) {
+      WeakFixedArray::Iterator iterator(*list);
+      SharedFunctionInfo* next;
+      while ((next = iterator.Next<SharedFunctionInfo>())) {
+        DCHECK_NE(next, *shared);
+      }
     }
-  }
 #endif  // DEBUG
-  list = WeakFixedArray::Add(list, shared);
 
-  if (script_object->IsScript()) {
-    Handle<Script> script = Handle<Script>::cast(script_object);
-    script->set_shared_function_infos(*list);
-  } else {
+    list = WeakFixedArray::Add(list, shared);
+
     isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
   }
 
-  // Remove shared function info from old script's list.
   if (shared->script()->IsScript()) {
+    // Remove shared function info from old script's list.
     Script* old_script = Script::cast(shared->script());
-    if (old_script->shared_function_infos()->IsWeakFixedArray()) {
-      WeakFixedArray* list =
-          WeakFixedArray::cast(old_script->shared_function_infos());
-      list->Remove(shared);
+
+    // Due to liveedit, it might happen that the old_script doesn't know
+    // about the SharedFunctionInfo, so we have to guard against that.
+    Handle<FixedArray> infos(old_script->shared_function_infos(), isolate);
+    if (shared->function_literal_id() < infos->length()) {
+      Object* raw = old_script->shared_function_infos()->get(
+          shared->function_literal_id());
+      if (!raw->IsWeakCell() || WeakCell::cast(raw)->value() == *shared) {
+        old_script->shared_function_infos()->set(
+            shared->function_literal_id(), isolate->heap()->undefined_value());
+      }
     }
   } else {
     // Remove shared function info from root array.
@@ -13688,6 +13284,16 @@
   return String::cast(n);
 }
 
+bool SharedFunctionInfo::HasNoSideEffect() {
+  if (!computed_has_no_side_effect()) {
+    DisallowHeapAllocation not_handlified;
+    Handle<SharedFunctionInfo> info(this);
+    set_has_no_side_effect(DebugEvaluate::FunctionHasNoSideEffect(info));
+    set_computed_has_no_side_effect(true);
+  }
+  return has_no_side_effect();
+}
+
 // The filter is a pattern that matches function names in this way:
 //   "*"      all; the default
 //   "-"      all but the top-level function
@@ -13738,6 +13344,15 @@
       source, start_position(), end_position());
 }
 
+Handle<Object> SharedFunctionInfo::GetSourceCodeHarmony() {
+  Isolate* isolate = GetIsolate();
+  if (!HasSourceCode()) return isolate->factory()->undefined_value();
+  Handle<String> script_source(String::cast(Script::cast(script())->source()));
+  int start_pos = function_token_position();
+  if (start_pos == kNoSourcePosition) start_pos = start_position();
+  return isolate->factory()->NewSubString(script_source, start_pos,
+                                          end_position());
+}
 
 bool SharedFunctionInfo::IsInlineable() {
   // Check that the function has a script associated with it.
@@ -13790,7 +13405,7 @@
     JSFunction* func = JSFunction::cast(current);
     SharedFunctionInfo* shared = func->shared();
     expected_nof_properties += shared->expected_nof_properties();
-    if (!IsSubclassConstructor(shared->kind())) {
+    if (!IsDerivedConstructor(shared->kind())) {
       break;
     }
   }
@@ -13933,8 +13548,6 @@
   shared_info->set_language_mode(lit->language_mode());
   shared_info->set_uses_arguments(lit->scope()->arguments() != NULL);
   shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
-  shared_info->set_is_function(lit->is_function());
-  shared_info->set_never_compiled(true);
   shared_info->set_kind(lit->kind());
   if (!IsConstructable(lit->kind(), lit->language_mode())) {
     shared_info->SetConstructStub(
@@ -13942,9 +13555,7 @@
   }
   shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
   shared_info->set_asm_function(lit->scope()->asm_function());
-  shared_info->set_requires_class_field_init(lit->requires_class_field_init());
-  shared_info->set_is_class_field_initializer(
-      lit->is_class_field_initializer());
+  shared_info->set_function_literal_id(lit->function_literal_id());
   SetExpectedNofPropertiesFromEstimate(shared_info, lit);
 }
 
@@ -13999,19 +13610,15 @@
   }
 }
 
-
-int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
-                                                    BailoutId osr_ast_id) {
+int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context) {
   DisallowHeapAllocation no_gc;
   DCHECK(native_context->IsNativeContext());
   if (!OptimizedCodeMapIsCleared()) {
     FixedArray* optimized_code_map = this->optimized_code_map();
     int length = optimized_code_map->length();
-    Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
     for (int i = kEntriesStart; i < length; i += kEntryLength) {
       if (WeakCell::cast(optimized_code_map->get(i + kContextOffset))
-                  ->value() == native_context &&
-          optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
+              ->value() == native_context) {
         return i;
       }
     }
@@ -14031,20 +13638,20 @@
   }
 }
 
-CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
-    Context* native_context, BailoutId osr_ast_id) {
-  CodeAndLiterals result = {nullptr, nullptr};
-  int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
+Code* SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
+                                                 BailoutId osr_ast_id) {
+  Code* result = nullptr;
+  if (!osr_ast_id.IsNone()) {
+    return native_context->SearchOptimizedCodeMap(this, osr_ast_id);
+  }
+
+  DCHECK(osr_ast_id.IsNone());
+  int entry = SearchOptimizedCodeMapEntry(native_context);
   if (entry != kNotFound) {
     FixedArray* code_map = optimized_code_map();
     DCHECK_LE(entry + kEntryLength, code_map->length());
     WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
-    WeakCell* literals_cell =
-        WeakCell::cast(code_map->get(entry + kLiteralsOffset));
-
-    result = {cell->cleared() ? nullptr : Code::cast(cell->value()),
-              literals_cell->cleared() ? nullptr : LiteralsArray::cast(
-                                                       literals_cell->value())};
+    result = cell->cleared() ? nullptr : Code::cast(cell->value());
   }
   return result;
 }
@@ -14289,7 +13896,8 @@
     RelocInfo* info = it.rinfo();
     Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
     if (target->is_inline_cache_stub()) {
-      IC::Clear(this->GetIsolate(), info->pc(), info->host()->constant_pool());
+      ICUtility::Clear(this->GetIsolate(), info->pc(),
+                       info->host()->constant_pool());
     }
   }
 }
@@ -14324,11 +13932,10 @@
 }
 
 void JSFunction::ClearTypeFeedbackInfo() {
-  feedback_vector()->ClearSlots(shared());
-}
-
-void JSFunction::ClearTypeFeedbackInfoAtGCTime() {
-  feedback_vector()->ClearSlotsAtGCTime(shared());
+  if (feedback_vector_cell()->value()->IsFeedbackVector()) {
+    FeedbackVector* vector = feedback_vector();
+    vector->ClearSlots(this);
+  }
 }
 
 BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
@@ -14353,21 +13960,13 @@
   return 0;
 }
 
-int Code::LookupRangeInHandlerTable(int code_offset, int* data,
-                                    HandlerTable::CatchPrediction* prediction) {
-  DCHECK(!is_optimized_code());
-  HandlerTable* table = HandlerTable::cast(handler_table());
-  return table->LookupRange(code_offset, data, prediction);
-}
-
 void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
-  PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY);
+  PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge);
 }
 
 
 void Code::MarkCodeAsExecuted(byte* sequence, Isolate* isolate) {
-  PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge,
-      NO_MARKING_PARITY);
+  PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge);
 }
 
 
@@ -14401,28 +14000,25 @@
 void Code::PreAge(Isolate* isolate) {
   byte* sequence = FindCodeAgeSequence();
   if (sequence != NULL) {
-    PatchPlatformCodeAge(isolate, sequence, kPreAgedCodeAge, NO_MARKING_PARITY);
+    PatchPlatformCodeAge(isolate, sequence, kPreAgedCodeAge);
   }
 }
 
 void Code::MarkToBeExecutedOnce(Isolate* isolate) {
   byte* sequence = FindCodeAgeSequence();
   if (sequence != NULL) {
-    PatchPlatformCodeAge(isolate, sequence, kToBeExecutedOnceCodeAge,
-                         NO_MARKING_PARITY);
+    PatchPlatformCodeAge(isolate, sequence, kToBeExecutedOnceCodeAge);
   }
 }
 
-void Code::MakeOlder(MarkingParity current_parity) {
+void Code::MakeOlder() {
   byte* sequence = FindCodeAgeSequence();
   if (sequence != NULL) {
-    Age age;
-    MarkingParity code_parity;
     Isolate* isolate = GetIsolate();
-    GetCodeAgeAndParity(isolate, sequence, &age, &code_parity);
+    Age age = GetCodeAge(isolate, sequence);
     Age next_age = NextAge(age);
-    if (age != next_age && code_parity != current_parity) {
-      PatchPlatformCodeAge(isolate, sequence, next_age, current_parity);
+    if (age != next_age) {
+      PatchPlatformCodeAge(isolate, sequence, next_age);
     }
   }
 }
@@ -14448,77 +14044,47 @@
   if (sequence == NULL) {
     return kNoAgeCodeAge;
   }
-  Age age;
-  MarkingParity parity;
-  GetCodeAgeAndParity(GetIsolate(), sequence, &age, &parity);
-  return age;
+  return GetCodeAge(GetIsolate(), sequence);
 }
 
-
-void Code::GetCodeAgeAndParity(Code* code, Age* age,
-                               MarkingParity* parity) {
+Code::Age Code::GetAgeOfCodeAgeStub(Code* code) {
   Isolate* isolate = code->GetIsolate();
   Builtins* builtins = isolate->builtins();
-  Code* stub = NULL;
-#define HANDLE_CODE_AGE(AGE)                                            \
-  stub = *builtins->Make##AGE##CodeYoungAgainEvenMarking();             \
-  if (code == stub) {                                                   \
-    *age = k##AGE##CodeAge;                                             \
-    *parity = EVEN_MARKING_PARITY;                                      \
-    return;                                                             \
-  }                                                                     \
-  stub = *builtins->Make##AGE##CodeYoungAgainOddMarking();              \
-  if (code == stub) {                                                   \
-    *age = k##AGE##CodeAge;                                             \
-    *parity = ODD_MARKING_PARITY;                                       \
-    return;                                                             \
+#define HANDLE_CODE_AGE(AGE)                            \
+  if (code == *builtins->Make##AGE##CodeYoungAgain()) { \
+    return k##AGE##CodeAge;                             \
   }
   CODE_AGE_LIST(HANDLE_CODE_AGE)
 #undef HANDLE_CODE_AGE
-  stub = *builtins->MarkCodeAsExecutedOnce();
-  if (code == stub) {
-    *age = kNotExecutedCodeAge;
-    *parity = NO_MARKING_PARITY;
-    return;
+  if (code == *builtins->MarkCodeAsExecutedOnce()) {
+    return kNotExecutedCodeAge;
   }
-  stub = *builtins->MarkCodeAsExecutedTwice();
-  if (code == stub) {
-    *age = kExecutedOnceCodeAge;
-    *parity = NO_MARKING_PARITY;
-    return;
+  if (code == *builtins->MarkCodeAsExecutedTwice()) {
+    return kExecutedOnceCodeAge;
   }
-  stub = *builtins->MarkCodeAsToBeExecutedOnce();
-  if (code == stub) {
-    *age = kToBeExecutedOnceCodeAge;
-    *parity = NO_MARKING_PARITY;
-    return;
+  if (code == *builtins->MarkCodeAsToBeExecutedOnce()) {
+    return kToBeExecutedOnceCodeAge;
   }
   UNREACHABLE();
+  return kNoAgeCodeAge;
 }
 
-
-Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
+Code* Code::GetCodeAgeStub(Isolate* isolate, Age age) {
   Builtins* builtins = isolate->builtins();
   switch (age) {
-#define HANDLE_CODE_AGE(AGE)                                            \
-    case k##AGE##CodeAge: {                                             \
-      Code* stub = parity == EVEN_MARKING_PARITY                        \
-          ? *builtins->Make##AGE##CodeYoungAgainEvenMarking()           \
-          : *builtins->Make##AGE##CodeYoungAgainOddMarking();           \
-      return stub;                                                      \
-    }
+#define HANDLE_CODE_AGE(AGE)                       \
+  case k##AGE##CodeAge: {                          \
+    return *builtins->Make##AGE##CodeYoungAgain(); \
+  }
     CODE_AGE_LIST(HANDLE_CODE_AGE)
 #undef HANDLE_CODE_AGE
     case kNotExecutedCodeAge: {
-      DCHECK(parity == NO_MARKING_PARITY);
       return *builtins->MarkCodeAsExecutedOnce();
     }
     case kExecutedOnceCodeAge: {
-      DCHECK(parity == NO_MARKING_PARITY);
       return *builtins->MarkCodeAsExecutedTwice();
     }
     case kToBeExecutedOnceCodeAge: {
-      DCHECK(parity == NO_MARKING_PARITY);
       return *builtins->MarkCodeAsToBeExecutedOnce();
     }
     default:
@@ -14712,14 +14278,24 @@
           break;
         }
 
+        case Translation::CONSTRUCT_STUB_FRAME: {
+          int bailout_id = iterator.Next();
+          int shared_info_id = iterator.Next();
+          Object* shared_info = LiteralArray()->get(shared_info_id);
+          unsigned height = iterator.Next();
+          os << "{bailout_id=" << bailout_id << ", function="
+             << Brief(SharedFunctionInfo::cast(shared_info)->DebugName())
+             << ", height=" << height << "}";
+          break;
+        }
+
         case Translation::COMPILED_STUB_FRAME: {
           Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
           os << "{kind=" << stub_kind << "}";
           break;
         }
 
-        case Translation::ARGUMENTS_ADAPTOR_FRAME:
-        case Translation::CONSTRUCT_STUB_FRAME: {
+        case Translation::ARGUMENTS_ADAPTOR_FRAME: {
           int shared_info_id = iterator.Next();
           Object* shared_info = LiteralArray()->get(shared_info_id);
           unsigned height = iterator.Next();
@@ -14906,8 +14482,8 @@
     if (!IC::ICUseVector(kind())) {
       InlineCacheState ic_state = IC::StateFromCode(this);
       os << "ic_state = " << ICState2String(ic_state) << "\n";
+      PrintExtraICState(os, kind(), extra_ic_state());
     }
-    PrintExtraICState(os, kind(), extra_ic_state());
     if (is_compare_ic_stub()) {
       DCHECK(CodeStub::GetMajorKey(this) == CodeStub::CompareIC);
       CompareICStub stub(stub_key(), GetIsolate());
@@ -15122,11 +14698,17 @@
             from->length());
 }
 
-int BytecodeArray::LookupRangeInHandlerTable(
-    int code_offset, int* data, HandlerTable::CatchPrediction* prediction) {
-  HandlerTable* table = HandlerTable::cast(handler_table());
-  code_offset++;  // Point after current bytecode.
-  return table->LookupRange(code_offset, data, prediction);
+void BytecodeArray::MakeOlder() {
+  Age age = bytecode_age();
+  if (age < kLastBytecodeAge) {
+    set_bytecode_age(static_cast<Age>(age + 1));
+  }
+  DCHECK_GE(bytecode_age(), kFirstBytecodeAge);
+  DCHECK_LE(bytecode_age(), kLastBytecodeAge);
+}
+
+bool BytecodeArray::IsOld() const {
+  return bytecode_age() >= kIsOldBytecodeAge;
 }
 
 // static
@@ -15565,9 +15147,6 @@
   // SpiderMonkey behaves this way.
   if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true);
 
-  bool dictionary_elements_in_chain =
-      object->map()->DictionaryElementsInPrototypeChainOnly();
-
   bool all_extensible = object->map()->is_extensible();
   Handle<JSObject> real_receiver = object;
   if (from_javascript) {
@@ -15633,14 +15212,6 @@
   DCHECK(new_map->prototype() == *value);
   JSObject::MigrateToMap(real_receiver, new_map);
 
-  if (from_javascript && !dictionary_elements_in_chain &&
-      new_map->DictionaryElementsInPrototypeChainOnly()) {
-    // If the prototype chain didn't previously have element callbacks, then
-    // KeyedStoreICs need to be cleared to ensure any that involve this
-    // map go generic.
-    TypeFeedbackVector::ClearAllKeyedStoreICs(isolate);
-  }
-
   heap->ClearInstanceofCache();
   DCHECK(size == object->Size());
   return Just(true);
@@ -15837,12 +15408,10 @@
   ElementsAccessor* accessor = ElementsAccessor::ForKind(to);
   accessor->Add(object, index, value, attributes, new_capacity);
 
-  uint32_t new_length = old_length;
-  Handle<Object> new_length_handle;
   if (object->IsJSArray() && index >= old_length) {
-    new_length = index + 1;
-    new_length_handle = isolate->factory()->NewNumberFromUint(new_length);
-    JSArray::cast(*object)->set_length(*new_length_handle);
+    Handle<Object> new_length =
+        isolate->factory()->NewNumberFromUint(index + 1);
+    JSArray::cast(*object)->set_length(*new_length);
   }
 
   return Just(true);
@@ -16147,7 +15716,8 @@
       } else {
         os << Brief(k);
       }
-      os << ": " << Brief(this->ValueAt(i)) << " " << this->DetailsAt(i);
+      os << ": " << Brief(this->ValueAt(i)) << " ";
+      this->DetailsAt(i).PrintAsSlowTo(os);
     }
   }
 }
@@ -16213,118 +15783,6 @@
           ElementsKindToShiftSize(kind));
 }
 
-void FixedArray::SwapPairs(FixedArray* numbers, int i, int j) {
-  Object* temp = get(i);
-  set(i, get(j));
-  set(j, temp);
-  if (this != numbers) {
-    temp = numbers->get(i);
-    numbers->set(i, Smi::cast(numbers->get(j)));
-    numbers->set(j, Smi::cast(temp));
-  }
-}
-
-
-static void InsertionSortPairs(FixedArray* content,
-                               FixedArray* numbers,
-                               int len) {
-  for (int i = 1; i < len; i++) {
-    int j = i;
-    while (j > 0 &&
-           (NumberToUint32(numbers->get(j - 1)) >
-            NumberToUint32(numbers->get(j)))) {
-      content->SwapPairs(numbers, j - 1, j);
-      j--;
-    }
-  }
-}
-
-
-void HeapSortPairs(FixedArray* content, FixedArray* numbers, int len) {
-  // In-place heap sort.
-  DCHECK(content->length() == numbers->length());
-
-  // Bottom-up max-heap construction.
-  for (int i = 1; i < len; ++i) {
-    int child_index = i;
-    while (child_index > 0) {
-      int parent_index = ((child_index + 1) >> 1) - 1;
-      uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
-      uint32_t child_value = NumberToUint32(numbers->get(child_index));
-      if (parent_value < child_value) {
-        content->SwapPairs(numbers, parent_index, child_index);
-      } else {
-        break;
-      }
-      child_index = parent_index;
-    }
-  }
-
-  // Extract elements and create sorted array.
-  for (int i = len - 1; i > 0; --i) {
-    // Put max element at the back of the array.
-    content->SwapPairs(numbers, 0, i);
-    // Sift down the new top element.
-    int parent_index = 0;
-    while (true) {
-      int child_index = ((parent_index + 1) << 1) - 1;
-      if (child_index >= i) break;
-      uint32_t child1_value = NumberToUint32(numbers->get(child_index));
-      uint32_t child2_value = NumberToUint32(numbers->get(child_index + 1));
-      uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
-      if (child_index + 1 >= i || child1_value > child2_value) {
-        if (parent_value > child1_value) break;
-        content->SwapPairs(numbers, parent_index, child_index);
-        parent_index = child_index;
-      } else {
-        if (parent_value > child2_value) break;
-        content->SwapPairs(numbers, parent_index, child_index + 1);
-        parent_index = child_index + 1;
-      }
-    }
-  }
-}
-
-
-// Sort this array and the numbers as pairs wrt. the (distinct) numbers.
-void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
-  DCHECK(this->length() == numbers->length());
-  // For small arrays, simply use insertion sort.
-  if (len <= 10) {
-    InsertionSortPairs(this, numbers, len);
-    return;
-  }
-  // Check the range of indices.
-  uint32_t min_index = NumberToUint32(numbers->get(0));
-  uint32_t max_index = min_index;
-  uint32_t i;
-  for (i = 1; i < len; i++) {
-    if (NumberToUint32(numbers->get(i)) < min_index) {
-      min_index = NumberToUint32(numbers->get(i));
-    } else if (NumberToUint32(numbers->get(i)) > max_index) {
-      max_index = NumberToUint32(numbers->get(i));
-    }
-  }
-  if (max_index - min_index + 1 == len) {
-    // Indices form a contiguous range, unless there are duplicates.
-    // Do an in-place linear time sort assuming distinct numbers, but
-    // avoid hanging in case they are not.
-    for (i = 0; i < len; i++) {
-      uint32_t p;
-      uint32_t j = 0;
-      // While the current element at i is not at its correct position p,
-      // swap the elements at these two positions.
-      while ((p = NumberToUint32(numbers->get(i)) - min_index) != i &&
-             j++ < len) {
-        SwapPairs(numbers, i, p);
-      }
-    }
-  } else {
-    HeapSortPairs(this, numbers, len);
-    return;
-  }
-}
-
 bool JSObject::WasConstructedFromApiFunction() {
   auto instance_type = map()->instance_type();
   bool is_api_object = instance_type == JS_API_OBJECT_TYPE ||
@@ -16344,94 +15802,6 @@
   return is_api_object;
 }
 
-MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
-                                                Handle<Object> object) {
-  if (*object == isolate->heap()->undefined_value()) {
-    return isolate->factory()->undefined_to_string();
-  }
-  if (*object == isolate->heap()->null_value()) {
-    return isolate->factory()->null_to_string();
-  }
-
-  Handle<JSReceiver> receiver =
-      Object::ToObject(isolate, object).ToHandleChecked();
-
-  // For proxies, we must check IsArray() before get(toStringTag) to comply
-  // with the specification
-  Maybe<bool> is_array = Nothing<bool>();
-  InstanceType instance_type = receiver->map()->instance_type();
-  if (instance_type == JS_PROXY_TYPE) {
-    is_array = Object::IsArray(receiver);
-    MAYBE_RETURN(is_array, MaybeHandle<String>());
-  }
-
-  Handle<String> tag;
-  Handle<Object> to_string_tag;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate, to_string_tag,
-      JSReceiver::GetProperty(receiver,
-                              isolate->factory()->to_string_tag_symbol()),
-      String);
-  if (to_string_tag->IsString()) {
-    tag = Handle<String>::cast(to_string_tag);
-  } else {
-    switch (instance_type) {
-      case JS_API_OBJECT_TYPE:
-      case JS_SPECIAL_API_OBJECT_TYPE:
-        tag = handle(receiver->class_name(), isolate);
-        break;
-      case JS_ARGUMENTS_TYPE:
-        return isolate->factory()->arguments_to_string();
-      case JS_ARRAY_TYPE:
-        return isolate->factory()->array_to_string();
-      case JS_BOUND_FUNCTION_TYPE:
-      case JS_FUNCTION_TYPE:
-        return isolate->factory()->function_to_string();
-      case JS_ERROR_TYPE:
-        return isolate->factory()->error_to_string();
-      case JS_DATE_TYPE:
-        return isolate->factory()->date_to_string();
-      case JS_REGEXP_TYPE:
-        return isolate->factory()->regexp_to_string();
-      case JS_PROXY_TYPE: {
-        if (is_array.FromJust()) {
-          return isolate->factory()->array_to_string();
-        }
-        if (receiver->IsCallable()) {
-          return isolate->factory()->function_to_string();
-        }
-        return isolate->factory()->object_to_string();
-      }
-      case JS_VALUE_TYPE: {
-        Object* value = JSValue::cast(*receiver)->value();
-        if (value->IsString()) {
-          return isolate->factory()->string_to_string();
-        }
-        if (value->IsNumber()) {
-          return isolate->factory()->number_to_string();
-        }
-        if (value->IsBoolean()) {
-          return isolate->factory()->boolean_to_string();
-        }
-        if (value->IsSymbol()) {
-          return isolate->factory()->object_to_string();
-        }
-        UNREACHABLE();
-        tag = handle(receiver->class_name(), isolate);
-        break;
-      }
-      default:
-        return isolate->factory()->object_to_string();
-    }
-  }
-
-  IncrementalStringBuilder builder(isolate);
-  builder.AppendCString("[object ");
-  builder.AppendString(tag);
-  builder.AppendCharacter(']');
-  return builder.Finish();
-}
-
 const char* Symbol::PrivateSymbolToName() const {
   Heap* heap = GetIsolate()->heap();
 #define SYMBOL_CHECK_AND_PRINT(name) \
@@ -16460,12 +15830,22 @@
 // StringSharedKeys are used as keys in the eval cache.
 class StringSharedKey : public HashTableKey {
  public:
+  // This tuple unambiguously identifies calls to eval() or
+  // CreateDynamicFunction() (such as through the Function() constructor).
+  // * source is the string passed into eval(). For dynamic functions, this is
+  //   the effective source for the function, some of which is implicitly
+  //   generated.
+  // * shared is the shared function info for the function containing the call
+  //   to eval(). for dynamic functions, shared is the native context closure.
+  // * When positive, position is the position in the source where eval is
+  //   called. When negative, position is the negation of the position in the
+  //   dynamic function's effective source where the ')' ends the parameters.
   StringSharedKey(Handle<String> source, Handle<SharedFunctionInfo> shared,
-                  LanguageMode language_mode, int scope_position)
+                  LanguageMode language_mode, int position)
       : source_(source),
         shared_(shared),
         language_mode_(language_mode),
-        scope_position_(scope_position) {}
+        position_(position) {}
 
   bool IsMatch(Object* other) override {
     DisallowHeapAllocation no_allocation;
@@ -16481,8 +15861,8 @@
     DCHECK(is_valid_language_mode(language_unchecked));
     LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
     if (language_mode != language_mode_) return false;
-    int scope_position = Smi::cast(other_array->get(3))->value();
-    if (scope_position != scope_position_) return false;
+    int position = Smi::cast(other_array->get(3))->value();
+    if (position != position_) return false;
     String* source = String::cast(other_array->get(1));
     return source->Equals(*source_);
   }
@@ -16490,7 +15870,7 @@
   static uint32_t StringSharedHashHelper(String* source,
                                          SharedFunctionInfo* shared,
                                          LanguageMode language_mode,
-                                         int scope_position) {
+                                         int position) {
     uint32_t hash = source->Hash();
     if (shared->HasSourceCode()) {
       // Instead of using the SharedFunctionInfo pointer in the hash
@@ -16502,14 +15882,14 @@
       hash ^= String::cast(script->source())->Hash();
       STATIC_ASSERT(LANGUAGE_END == 2);
       if (is_strict(language_mode)) hash ^= 0x8000;
-      hash += scope_position;
+      hash += position;
     }
     return hash;
   }
 
   uint32_t Hash() override {
     return StringSharedHashHelper(*source_, *shared_, language_mode_,
-                                  scope_position_);
+                                  position_);
   }
 
   uint32_t HashForObject(Object* obj) override {
@@ -16523,9 +15903,8 @@
     int language_unchecked = Smi::cast(other_array->get(2))->value();
     DCHECK(is_valid_language_mode(language_unchecked));
     LanguageMode language_mode = static_cast<LanguageMode>(language_unchecked);
-    int scope_position = Smi::cast(other_array->get(3))->value();
-    return StringSharedHashHelper(source, shared, language_mode,
-                                  scope_position);
+    int position = Smi::cast(other_array->get(3))->value();
+    return StringSharedHashHelper(source, shared, language_mode, position);
   }
 
 
@@ -16534,7 +15913,7 @@
     array->set(0, *shared_);
     array->set(1, *source_);
     array->set(2, Smi::FromInt(language_mode_));
-    array->set(3, Smi::FromInt(scope_position_));
+    array->set(3, Smi::FromInt(position_));
     return array;
   }
 
@@ -16542,9 +15921,22 @@
   Handle<String> source_;
   Handle<SharedFunctionInfo> shared_;
   LanguageMode language_mode_;
-  int scope_position_;
+  int position_;
 };
 
+// static
+const char* JSPromise::Status(int status) {
+  switch (status) {
+    case v8::Promise::kFulfilled:
+      return "resolved";
+    case v8::Promise::kPending:
+      return "pending";
+    case v8::Promise::kRejected:
+      return "rejected";
+  }
+  UNREACHABLE();
+  return NULL;
+}
 
 namespace {
 
@@ -16816,6 +16208,17 @@
       DCHECK(string_->IsInternalizedString());
       return string_;
     }
+    if (FLAG_thin_strings) {
+      // External strings get special treatment, to avoid copying their
+      // contents.
+      if (string_->IsExternalOneByteString()) {
+        return isolate->factory()
+            ->InternalizeExternalString<ExternalOneByteString>(string_);
+      } else if (string_->IsExternalTwoByteString()) {
+        return isolate->factory()
+            ->InternalizeExternalString<ExternalTwoByteString>(string_);
+      }
+    }
     // Otherwise allocate a new internalized string.
     return isolate->factory()->NewInternalizedStringImpl(
         string_, string_->length(), string_->hash_field());
@@ -16825,6 +16228,7 @@
     return String::cast(obj)->Hash();
   }
 
+ private:
   Handle<String> string_;
 };
 
@@ -16858,7 +16262,13 @@
   if (capacity > HashTable::kMaxCapacity) {
     v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
   }
+  return New(isolate, capacity, pretenure);
+}
 
+template <typename Derived, typename Shape, typename Key>
+Handle<Derived> HashTable<Derived, Shape, Key>::New(Isolate* isolate,
+                                                    int capacity,
+                                                    PretenureFlag pretenure) {
   Factory* factory = isolate->factory();
   int length = EntryToIndex(capacity);
   Handle<FixedArray> array = factory->NewFixedArray(length, pretenure);
@@ -16871,7 +16281,6 @@
   return table;
 }
 
-
 // Find entry for key otherwise return kNotFound.
 template <typename Derived, typename Shape>
 int NameDictionaryBase<Derived, Shape>::FindEntry(Handle<Name> key) {
@@ -17145,6 +16554,10 @@
     Isolate*, int at_least_space_for, PretenureFlag pretenure,
     MinimumCapacity capacity_option);
 
+template Handle<SeededNumberDictionary>
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
+           uint32_t>::NewEmpty(Isolate*, PretenureFlag pretenure);
+
 template Handle<UnseededNumberDictionary>
 Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
            uint32_t>::New(Isolate*, int at_least_space_for,
@@ -17155,6 +16568,10 @@
 Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::New(
     Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
 
+template Handle<NameDictionary>
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::NewEmpty(
+    Isolate*, PretenureFlag pretenure);
+
 template Handle<GlobalDictionary>
 Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::New(
     Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
@@ -17220,10 +16637,6 @@
 
 template Handle<FixedArray> Dictionary<
     NameDictionary, NameDictionaryShape,
-    Handle<Name> >::BuildIterationIndicesArray(Handle<NameDictionary>);
-
-template Handle<FixedArray> Dictionary<
-    NameDictionary, NameDictionaryShape,
     Handle<Name> >::GenerateNewEnumerationIndices(Handle<NameDictionary>);
 
 template Handle<SeededNumberDictionary>
@@ -17278,6 +16691,12 @@
     Handle<FixedArray> storage, KeyCollectionMode mode,
     KeyAccumulator* accumulator);
 
+template Handle<FixedArray>
+Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
+    IterationIndices(
+        Handle<
+            Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>>
+            dictionary);
 template void
 Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
     CollectKeysTo(Handle<Dictionary<GlobalDictionary, GlobalDictionaryShape,
@@ -17285,6 +16704,10 @@
                       dictionary,
                   KeyAccumulator* keys);
 
+template Handle<FixedArray>
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::IterationIndices(
+    Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
+        dictionary);
 template void
 Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::CollectKeysTo(
     Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
@@ -17321,7 +16744,7 @@
     HandleScope scope(isolate);
     Handle<Object> value(dict->ValueAt(i), isolate);
     PropertyDetails details = dict->DetailsAt(i);
-    if (details.type() == ACCESSOR_CONSTANT || details.IsReadOnly()) {
+    if (details.kind() == kAccessor || details.IsReadOnly()) {
       // Bail out and do the sorting of undefineds and array holes in JS.
       // Also bail out if the element is not supposed to be moved.
       return bailout;
@@ -17337,7 +16760,7 @@
         return bailout;
       } else {
         Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
-            new_dict, pos, value, details, object->map()->is_prototype_map());
+            new_dict, pos, value, details, object);
         DCHECK(result.is_identical_to(new_dict));
         USE(result);
         pos++;
@@ -17348,7 +16771,7 @@
       return bailout;
     } else {
       Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
-          new_dict, key, value, details, object->map()->is_prototype_map());
+          new_dict, key, value, details, object);
       DCHECK(result.is_identical_to(new_dict));
       USE(result);
     }
@@ -17365,7 +16788,7 @@
     HandleScope scope(isolate);
     Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
         new_dict, pos, isolate->factory()->undefined_value(), no_details,
-        object->map()->is_prototype_map());
+        object);
     DCHECK(result.is_identical_to(new_dict));
     USE(result);
     pos++;
@@ -17386,7 +16809,7 @@
 Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
                                                 uint32_t limit) {
   Isolate* isolate = object->GetIsolate();
-  if (object->HasSloppyArgumentsElements()) {
+  if (object->HasSloppyArgumentsElements() || !object->map()->is_extensible()) {
     return handle(Smi::FromInt(-1), isolate);
   }
 
@@ -17503,11 +16926,11 @@
     }
     result = undefs;
     while (undefs < holes) {
-      elements->set_undefined(undefs);
+      elements->set_undefined(isolate, undefs);
       undefs++;
     }
     while (holes < limit) {
-      elements->set_the_hole(holes);
+      elements->set_the_hole(isolate, holes);
       holes++;
     }
   }
@@ -17515,6 +16938,98 @@
   return isolate->factory()->NewNumberFromUint(result);
 }
 
+namespace {
+
+bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s,
+                                 Handle<Object>* index) {
+  DCHECK(s->IsString() || s->IsSmi());
+
+  Handle<Object> result;
+  if (s->IsSmi()) {
+    result = s;
+  } else {
+    result = String::ToNumber(Handle<String>::cast(s));
+    if (!result->IsMinusZero()) {
+      Handle<String> str = Object::ToString(isolate, result).ToHandleChecked();
+      // Avoid treating strings like "2E1" and "20" as the same key.
+      if (!str->SameValue(*s)) return false;
+    }
+  }
+  *index = result;
+  return true;
+}
+
+}  // anonymous namespace
+
+// ES#sec-integer-indexed-exotic-objects-defineownproperty-p-desc
+// static
+Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
+                                            Handle<JSTypedArray> o,
+                                            Handle<Object> key,
+                                            PropertyDescriptor* desc,
+                                            ShouldThrow should_throw) {
+  // 1. Assert: IsPropertyKey(P) is true.
+  DCHECK(key->IsName() || key->IsNumber());
+  // 2. Assert: O is an Object that has a [[ViewedArrayBuffer]] internal slot.
+  // 3. If Type(P) is String, then
+  if (key->IsString() || key->IsSmi()) {
+    // 3a. Let numericIndex be ! CanonicalNumericIndexString(P)
+    // 3b. If numericIndex is not undefined, then
+    Handle<Object> numeric_index;
+    if (CanonicalNumericIndexString(isolate, key, &numeric_index)) {
+      // 3b i. If IsInteger(numericIndex) is false, return false.
+      // 3b ii. If numericIndex = -0, return false.
+      // 3b iii. If numericIndex < 0, return false.
+      // FIXME: the standard allows up to 2^53 elements.
+      uint32_t index;
+      if (numeric_index->IsMinusZero() || !numeric_index->ToUint32(&index)) {
+        RETURN_FAILURE(isolate, should_throw,
+                       NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
+      }
+      // 3b iv. Let length be O.[[ArrayLength]].
+      uint32_t length = o->length()->Number();
+      // 3b v. If numericIndex ≥ length, return false.
+      if (index >= length) {
+        RETURN_FAILURE(isolate, should_throw,
+                       NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
+      }
+      // 3b vi. If IsAccessorDescriptor(Desc) is true, return false.
+      if (PropertyDescriptor::IsAccessorDescriptor(desc)) {
+        RETURN_FAILURE(isolate, should_throw,
+                       NewTypeError(MessageTemplate::kRedefineDisallowed, key));
+      }
+      // 3b vii. If Desc has a [[Configurable]] field and if
+      //         Desc.[[Configurable]] is true, return false.
+      // 3b viii. If Desc has an [[Enumerable]] field and if Desc.[[Enumerable]]
+      //          is false, return false.
+      // 3b ix. If Desc has a [[Writable]] field and if Desc.[[Writable]] is
+      //        false, return false.
+      if ((desc->has_configurable() && desc->configurable()) ||
+          (desc->has_enumerable() && !desc->enumerable()) ||
+          (desc->has_writable() && !desc->writable())) {
+        RETURN_FAILURE(isolate, should_throw,
+                       NewTypeError(MessageTemplate::kRedefineDisallowed, key));
+      }
+      // 3b x. If Desc has a [[Value]] field, then
+      //   3b x 1. Let value be Desc.[[Value]].
+      //   3b x 2. Return ? IntegerIndexedElementSet(O, numericIndex, value).
+      if (desc->has_value()) {
+        if (!desc->has_configurable()) desc->set_configurable(false);
+        if (!desc->has_enumerable()) desc->set_enumerable(true);
+        if (!desc->has_writable()) desc->set_writable(true);
+        Handle<Object> value = desc->value();
+        RETURN_ON_EXCEPTION_VALUE(isolate,
+                                  SetOwnElementIgnoreAttributes(
+                                      o, index, value, desc->ToAttributes()),
+                                  Nothing<bool>());
+      }
+      // 3b xi. Return true.
+      return Just(true);
+    }
+  }
+  // 4. Return ! OrdinaryDefineOwnProperty(O, P, Desc).
+  return OrdinaryDefineOwnProperty(isolate, o, key, desc, should_throw);
+}
 
 ExternalArrayType JSTypedArray::type() {
   switch (elements()->map()->instance_type()) {
@@ -17577,12 +17092,12 @@
     if (original_cell_type == PropertyCellType::kInvalidated) {
       cell = PropertyCell::InvalidateEntry(dictionary, entry);
     }
-    PropertyDetails details(NONE, DATA, 0, cell_type);
+    PropertyDetails details(kData, NONE, 0, cell_type);
     cell->set_property_details(details);
     return cell;
   }
   cell = isolate->factory()->NewPropertyCell();
-  PropertyDetails details(NONE, DATA, 0, cell_type);
+  PropertyDetails details(kData, NONE, 0, cell_type);
   dictionary =
       GlobalDictionary::Add(dictionary, name, cell, details, entry_out);
   // {*entry_out} is initialized inside GlobalDictionary::Add().
@@ -17661,6 +17176,9 @@
   if (string->IsInternalizedString()) {
     return string;
   }
+  if (string->IsThinString()) {
+    return handle(Handle<ThinString>::cast(string)->actual(), isolate);
+  }
   return LookupStringIfExists(isolate, string);
 }
 
@@ -17707,31 +17225,98 @@
   isolate->heap()->SetRootStringTable(*table);
 }
 
+namespace {
+
+template <class StringClass>
+void MigrateExternalStringResource(Isolate* isolate, Handle<String> from,
+                                   Handle<String> to) {
+  Handle<StringClass> cast_from = Handle<StringClass>::cast(from);
+  Handle<StringClass> cast_to = Handle<StringClass>::cast(to);
+  const typename StringClass::Resource* to_resource = cast_to->resource();
+  if (to_resource == nullptr) {
+    // |to| is a just-created internalized copy of |from|. Migrate the resource.
+    cast_to->set_resource(cast_from->resource());
+    // Zap |from|'s resource pointer to reflect the fact that |from| has
+    // relinquished ownership of its resource.
+    cast_from->set_resource(nullptr);
+  } else if (to_resource != cast_from->resource()) {
+    // |to| already existed and has its own resource. Finalize |from|.
+    isolate->heap()->FinalizeExternalString(*from);
+  }
+}
+
+}  // namespace
 
 Handle<String> StringTable::LookupString(Isolate* isolate,
                                          Handle<String> string) {
+  if (string->IsThinString()) {
+    DCHECK(Handle<ThinString>::cast(string)->actual()->IsInternalizedString());
+    return handle(Handle<ThinString>::cast(string)->actual(), isolate);
+  }
   if (string->IsConsString() && string->IsFlat()) {
-    string = String::Flatten(string);
+    string = handle(Handle<ConsString>::cast(string)->first(), isolate);
     if (string->IsInternalizedString()) return string;
   }
 
   InternalizedStringKey key(string);
   Handle<String> result = LookupKey(isolate, &key);
 
-  if (string->IsConsString()) {
-    Handle<ConsString> cons = Handle<ConsString>::cast(string);
-    cons->set_first(*result);
-    cons->set_second(isolate->heap()->empty_string());
-  } else if (string->IsSlicedString()) {
-    STATIC_ASSERT(ConsString::kSize == SlicedString::kSize);
-    DisallowHeapAllocation no_gc;
-    bool one_byte = result->IsOneByteRepresentation();
-    Handle<Map> map = one_byte ? isolate->factory()->cons_one_byte_string_map()
-                               : isolate->factory()->cons_string_map();
-    string->set_map(*map);
-    Handle<ConsString> cons = Handle<ConsString>::cast(string);
-    cons->set_first(*result);
-    cons->set_second(isolate->heap()->empty_string());
+  if (FLAG_thin_strings) {
+    if (string->IsExternalString()) {
+      if (result->IsExternalOneByteString()) {
+        MigrateExternalStringResource<ExternalOneByteString>(isolate, string,
+                                                             result);
+      } else if (result->IsExternalTwoByteString()) {
+        MigrateExternalStringResource<ExternalTwoByteString>(isolate, string,
+                                                             result);
+      } else {
+        // If the external string is duped into an existing non-external
+        // internalized string, free its resource (it's about to be rewritten
+        // into a ThinString below).
+        isolate->heap()->FinalizeExternalString(*string);
+      }
+    }
+
+    // The LookupKey() call above tries to internalize the string in-place.
+    // In cases where that wasn't possible (e.g. new-space strings), turn them
+    // into ThinStrings referring to their internalized versions now.
+    if (!string->IsInternalizedString()) {
+      DisallowHeapAllocation no_gc;
+      bool one_byte = result->IsOneByteRepresentation();
+      Handle<Map> map = one_byte
+                            ? isolate->factory()->thin_one_byte_string_map()
+                            : isolate->factory()->thin_string_map();
+      int old_size = string->Size();
+      DCHECK(old_size >= ThinString::kSize);
+      string->synchronized_set_map(*map);
+      Handle<ThinString> thin = Handle<ThinString>::cast(string);
+      thin->set_actual(*result);
+      Address thin_end = thin->address() + ThinString::kSize;
+      int size_delta = old_size - ThinString::kSize;
+      if (size_delta != 0) {
+        Heap* heap = isolate->heap();
+        heap->CreateFillerObjectAt(thin_end, size_delta,
+                                   ClearRecordedSlots::kNo);
+        heap->AdjustLiveBytes(*thin, -size_delta);
+      }
+    }
+  } else {  // !FLAG_thin_strings
+    if (string->IsConsString()) {
+      Handle<ConsString> cons = Handle<ConsString>::cast(string);
+      cons->set_first(*result);
+      cons->set_second(isolate->heap()->empty_string());
+    } else if (string->IsSlicedString()) {
+      STATIC_ASSERT(ConsString::kSize == SlicedString::kSize);
+      DisallowHeapAllocation no_gc;
+      bool one_byte = result->IsOneByteRepresentation();
+      Handle<Map> map = one_byte
+                            ? isolate->factory()->cons_one_byte_string_map()
+                            : isolate->factory()->cons_string_map();
+      string->set_map(*map);
+      Handle<ConsString> cons = Handle<ConsString>::cast(string);
+      cons->set_first(*result);
+      cons->set_second(isolate->heap()->empty_string());
+    }
   }
   return result;
 }
@@ -17819,21 +17404,153 @@
   return Handle<Object>(get(index + 1), isolate);
 }
 
+namespace {
 
-Handle<Object> CompilationCacheTable::LookupEval(
-    Handle<String> src, Handle<SharedFunctionInfo> outer_info,
-    LanguageMode language_mode, int scope_position) {
-  Isolate* isolate = GetIsolate();
-  // Cache key is the tuple (source, outer shared function info, scope position)
-  // to unambiguously identify the context chain the cached eval code assumes.
-  StringSharedKey key(src, outer_info, language_mode, scope_position);
-  int entry = FindEntry(&key);
-  if (entry == kNotFound) return isolate->factory()->undefined_value();
-  int index = EntryToIndex(entry);
-  if (!get(index)->IsFixedArray()) return isolate->factory()->undefined_value();
-  return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
+const int kLiteralEntryLength = 2;
+const int kLiteralInitialLength = 2;
+const int kLiteralContextOffset = 0;
+const int kLiteralLiteralsOffset = 1;
+
+int SearchLiteralsMapEntry(CompilationCacheTable* cache, int cache_entry,
+                           Context* native_context) {
+  DisallowHeapAllocation no_gc;
+  DCHECK(native_context->IsNativeContext());
+  Object* obj = cache->get(cache_entry);
+
+  if (obj->IsFixedArray()) {
+    FixedArray* literals_map = FixedArray::cast(obj);
+    int length = literals_map->length();
+    for (int i = 0; i < length; i += kLiteralEntryLength) {
+      if (WeakCell::cast(literals_map->get(i + kLiteralContextOffset))
+              ->value() == native_context) {
+        return i;
+      }
+    }
+  }
+  return -1;
 }
 
+void AddToLiteralsMap(Handle<CompilationCacheTable> cache, int cache_entry,
+                      Handle<Context> native_context, Handle<Cell> literals) {
+  Isolate* isolate = native_context->GetIsolate();
+  DCHECK(native_context->IsNativeContext());
+  STATIC_ASSERT(kLiteralEntryLength == 2);
+  Handle<FixedArray> new_literals_map;
+  int entry;
+
+  Object* obj = cache->get(cache_entry);
+
+  if (!obj->IsFixedArray() || FixedArray::cast(obj)->length() == 0) {
+    new_literals_map =
+        isolate->factory()->NewFixedArray(kLiteralInitialLength, TENURED);
+    entry = 0;
+  } else {
+    Handle<FixedArray> old_literals_map(FixedArray::cast(obj), isolate);
+    entry = SearchLiteralsMapEntry(*cache, cache_entry, *native_context);
+    if (entry >= 0) {
+      // Just set the code of the entry.
+      Handle<WeakCell> literals_cell =
+          isolate->factory()->NewWeakCell(literals);
+      old_literals_map->set(entry + kLiteralLiteralsOffset, *literals_cell);
+      return;
+    }
+
+    // Can we reuse an entry?
+    DCHECK(entry < 0);
+    int length = old_literals_map->length();
+    for (int i = 0; i < length; i += kLiteralEntryLength) {
+      if (WeakCell::cast(old_literals_map->get(i + kLiteralContextOffset))
+              ->cleared()) {
+        new_literals_map = old_literals_map;
+        entry = i;
+        break;
+      }
+    }
+
+    if (entry < 0) {
+      // Copy old optimized code map and append one new entry.
+      new_literals_map = isolate->factory()->CopyFixedArrayAndGrow(
+          old_literals_map, kLiteralEntryLength, TENURED);
+      entry = old_literals_map->length();
+    }
+  }
+
+  Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+  WeakCell* context_cell = native_context->self_weak_cell();
+
+  new_literals_map->set(entry + kLiteralContextOffset, context_cell);
+  new_literals_map->set(entry + kLiteralLiteralsOffset, *literals_cell);
+
+#ifdef DEBUG
+  for (int i = 0; i < new_literals_map->length(); i += kLiteralEntryLength) {
+    WeakCell* cell =
+        WeakCell::cast(new_literals_map->get(i + kLiteralContextOffset));
+    DCHECK(cell->cleared() || cell->value()->IsNativeContext());
+    cell = WeakCell::cast(new_literals_map->get(i + kLiteralLiteralsOffset));
+    DCHECK(cell->cleared() || (cell->value()->IsCell()));
+  }
+#endif
+
+  Object* old_literals_map = cache->get(cache_entry);
+  if (old_literals_map != *new_literals_map) {
+    cache->set(cache_entry, *new_literals_map);
+  }
+}
+
+Cell* SearchLiteralsMap(CompilationCacheTable* cache, int cache_entry,
+                        Context* native_context) {
+  Cell* result = nullptr;
+  int entry = SearchLiteralsMapEntry(cache, cache_entry, native_context);
+  if (entry >= 0) {
+    FixedArray* literals_map = FixedArray::cast(cache->get(cache_entry));
+    DCHECK_LE(entry + kLiteralEntryLength, literals_map->length());
+    WeakCell* cell =
+        WeakCell::cast(literals_map->get(entry + kLiteralLiteralsOffset));
+
+    result = cell->cleared() ? nullptr : Cell::cast(cell->value());
+  }
+  DCHECK(result == nullptr || result->IsCell());
+  return result;
+}
+
+}  // namespace
+
+InfoVectorPair CompilationCacheTable::LookupScript(Handle<String> src,
+                                                   Handle<Context> context,
+                                                   LanguageMode language_mode) {
+  InfoVectorPair empty_result;
+  Handle<SharedFunctionInfo> shared(context->closure()->shared());
+  StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+  int entry = FindEntry(&key);
+  if (entry == kNotFound) return empty_result;
+  int index = EntryToIndex(entry);
+  if (!get(index)->IsFixedArray()) return empty_result;
+  Object* obj = get(index + 1);
+  if (obj->IsSharedFunctionInfo()) {
+    Cell* literals =
+        SearchLiteralsMap(this, index + 2, context->native_context());
+    return InfoVectorPair(SharedFunctionInfo::cast(obj), literals);
+  }
+  return empty_result;
+}
+
+InfoVectorPair CompilationCacheTable::LookupEval(
+    Handle<String> src, Handle<SharedFunctionInfo> outer_info,
+    Handle<Context> native_context, LanguageMode language_mode, int position) {
+  InfoVectorPair empty_result;
+  StringSharedKey key(src, outer_info, language_mode, position);
+  int entry = FindEntry(&key);
+  if (entry == kNotFound) return empty_result;
+  int index = EntryToIndex(entry);
+  if (!get(index)->IsFixedArray()) return empty_result;
+  Object* obj = get(EntryToIndex(entry) + 1);
+  if (obj->IsSharedFunctionInfo()) {
+    Cell* literals =
+        SearchLiteralsMap(this, EntryToIndex(entry) + 2, *native_context);
+    return InfoVectorPair(SharedFunctionInfo::cast(obj), literals);
+  }
+  return empty_result;
+}
 
 Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
                                                    JSRegExp::Flags flags) {
@@ -17861,20 +17578,41 @@
   return cache;
 }
 
+Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
+    Handle<CompilationCacheTable> cache, Handle<String> src,
+    Handle<Context> context, LanguageMode language_mode,
+    Handle<SharedFunctionInfo> value, Handle<Cell> literals) {
+  Isolate* isolate = cache->GetIsolate();
+  Handle<SharedFunctionInfo> shared(context->closure()->shared());
+  Handle<Context> native_context(context->native_context());
+  StringSharedKey key(src, shared, language_mode, kNoSourcePosition);
+  Handle<Object> k = key.AsHandle(isolate);
+  cache = EnsureCapacity(cache, 1, &key);
+  int entry = cache->FindInsertionEntry(key.Hash());
+  cache->set(EntryToIndex(entry), *k);
+  cache->set(EntryToIndex(entry) + 1, *value);
+  AddToLiteralsMap(cache, EntryToIndex(entry) + 2, native_context, literals);
+  cache->ElementAdded();
+  return cache;
+}
 
 Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
     Handle<CompilationCacheTable> cache, Handle<String> src,
     Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
-    int scope_position) {
+    Handle<Context> native_context, Handle<Cell> literals, int position) {
   Isolate* isolate = cache->GetIsolate();
-  StringSharedKey key(src, outer_info, value->language_mode(), scope_position);
+  StringSharedKey key(src, outer_info, value->language_mode(), position);
   {
     Handle<Object> k = key.AsHandle(isolate);
-    DisallowHeapAllocation no_allocation_scope;
     int entry = cache->FindEntry(&key);
     if (entry != kNotFound) {
       cache->set(EntryToIndex(entry), *k);
       cache->set(EntryToIndex(entry) + 1, *value);
+      // AddToLiteralsMap may allocate a new sub-array to live in the entry,
+      // but it won't change the cache array. Therefore EntryToIndex and
+      // entry remains correct.
+      AddToLiteralsMap(cache, EntryToIndex(entry) + 2, native_context,
+                       literals);
       return cache;
     }
   }
@@ -17924,9 +17662,14 @@
       }
     } else if (get(entry_index)->IsFixedArray()) {
       SharedFunctionInfo* info = SharedFunctionInfo::cast(get(value_index));
-      if (info->code()->kind() != Code::FUNCTION || info->code()->IsOld()) {
-        NoWriteBarrierSet(this, entry_index, the_hole_value);
-        NoWriteBarrierSet(this, value_index, the_hole_value);
+      bool is_old =
+          info->IsInterpreted()
+              ? info->bytecode_array()->IsOld()
+              : info->code()->kind() != Code::FUNCTION || info->code()->IsOld();
+      if (is_old) {
+        for (int i = 0; i < kEntrySize; i++) {
+          NoWriteBarrierSet(this, entry_index + i, the_hole_value);
+        }
         ElementRemoved();
       }
     }
@@ -17941,8 +17684,9 @@
     int entry_index = EntryToIndex(entry);
     int value_index = entry_index + 1;
     if (get(value_index) == value) {
-      NoWriteBarrierSet(this, entry_index, the_hole_value);
-      NoWriteBarrierSet(this, value_index, the_hole_value);
+      for (int i = 0; i < kEntrySize; i++) {
+        NoWriteBarrierSet(this, entry_index + i, the_hole_value);
+      }
       ElementRemoved();
     }
   }
@@ -17962,44 +17706,24 @@
   return dict;
 }
 
-
 template <typename Derived, typename Shape, typename Key>
-Handle<FixedArray> Dictionary<Derived, Shape, Key>::BuildIterationIndicesArray(
-    Handle<Derived> dictionary) {
-  Isolate* isolate = dictionary->GetIsolate();
-  Factory* factory = isolate->factory();
-  int length = dictionary->NumberOfElements();
-
-  Handle<FixedArray> iteration_order = factory->NewFixedArray(length);
-  Handle<FixedArray> enumeration_order = factory->NewFixedArray(length);
-
-  // Fill both the iteration order array and the enumeration order array
-  // with property details.
-  int capacity = dictionary->Capacity();
-  int pos = 0;
-  for (int i = 0; i < capacity; i++) {
-    if (dictionary->IsKey(isolate, dictionary->KeyAt(i))) {
-      int index = dictionary->DetailsAt(i).dictionary_index();
-      iteration_order->set(pos, Smi::FromInt(i));
-      enumeration_order->set(pos, Smi::FromInt(index));
-      pos++;
-    }
-  }
-  DCHECK(pos == length);
-
-  // Sort the arrays wrt. enumeration order.
-  iteration_order->SortPairs(*enumeration_order, enumeration_order->length());
-  return iteration_order;
+Handle<Derived> Dictionary<Derived, Shape, Key>::NewEmpty(
+    Isolate* isolate, PretenureFlag pretenure) {
+  Handle<Derived> dict = DerivedHashTable::New(isolate, 1, pretenure);
+  // Attempt to add one element to the empty dictionary must cause reallocation.
+  DCHECK(!dict->HasSufficientCapacityToAdd(1));
+  // Initialize the next enumeration index.
+  dict->SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
+  return dict;
 }
 
-
 template <typename Derived, typename Shape, typename Key>
 Handle<FixedArray>
 Dictionary<Derived, Shape, Key>::GenerateNewEnumerationIndices(
     Handle<Derived> dictionary) {
   int length = dictionary->NumberOfElements();
 
-  Handle<FixedArray> iteration_order = BuildIterationIndicesArray(dictionary);
+  Handle<FixedArray> iteration_order = IterationIndices(dictionary);
   DCHECK(iteration_order->length() == length);
 
   // Iterate over the dictionary using the enumeration order and update
@@ -18130,15 +17854,15 @@
     if (!this->IsKey(isolate, k)) continue;
     DCHECK(!IsDeleted(i));
     PropertyDetails details = this->DetailsAt(i);
-    if (details.type() == ACCESSOR_CONSTANT) return true;
+    if (details.kind() == kAccessor) return true;
     PropertyAttributes attr = details.attributes();
     if (attr & ALL_ATTRIBUTES_MASK) return true;
   }
   return false;
 }
 
-void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
-                                                bool used_as_prototype) {
+void SeededNumberDictionary::UpdateMaxNumberKey(
+    uint32_t key, Handle<JSObject> dictionary_holder) {
   DisallowHeapAllocation no_allocation;
   // If the dictionary requires slow elements an element has already
   // been added at a high index.
@@ -18146,9 +17870,8 @@
   // Check if this index is high enough that we should require slow
   // elements.
   if (key > kRequiresSlowElementsLimit) {
-    if (used_as_prototype) {
-      // TODO(verwaest): Remove this hack.
-      TypeFeedbackVector::ClearAllKeyedStoreICs(GetIsolate());
+    if (!dictionary_holder.is_null()) {
+      dictionary_holder->RequireSlowElements(this);
     }
     set_requires_slow_elements();
     return;
@@ -18161,11 +17884,11 @@
   }
 }
 
-
 Handle<SeededNumberDictionary> SeededNumberDictionary::AddNumberEntry(
     Handle<SeededNumberDictionary> dictionary, uint32_t key,
-    Handle<Object> value, PropertyDetails details, bool used_as_prototype) {
-  dictionary->UpdateMaxNumberKey(key, used_as_prototype);
+    Handle<Object> value, PropertyDetails details,
+    Handle<JSObject> dictionary_holder) {
+  dictionary->UpdateMaxNumberKey(key, dictionary_holder);
   SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
   return Add(dictionary, key, value, details);
 }
@@ -18193,8 +17916,8 @@
 
 Handle<SeededNumberDictionary> SeededNumberDictionary::AtNumberPut(
     Handle<SeededNumberDictionary> dictionary, uint32_t key,
-    Handle<Object> value, bool used_as_prototype) {
-  dictionary->UpdateMaxNumberKey(key, used_as_prototype);
+    Handle<Object> value, Handle<JSObject> dictionary_holder) {
+  dictionary->UpdateMaxNumberKey(key, dictionary_holder);
   return AtPut(dictionary, key, value);
 }
 
@@ -18206,13 +17929,13 @@
   return AtPut(dictionary, key, value);
 }
 
-
 Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
     Handle<SeededNumberDictionary> dictionary, uint32_t key,
-    Handle<Object> value, PropertyDetails details, bool used_as_prototype) {
+    Handle<Object> value, PropertyDetails details,
+    Handle<JSObject> dictionary_holder) {
   int entry = dictionary->FindEntry(key);
   if (entry == kNotFound) {
-    return AddNumberEntry(dictionary, key, value, details, used_as_prototype);
+    return AddNumberEntry(dictionary, key, value, details, dictionary_holder);
   }
   // Preserve enumeration index.
   details = details.set_index(dictionary->DetailsAt(entry).dictionary_index());
@@ -18271,6 +17994,7 @@
     Handle<Dictionary<Derived, Shape, Key>> dictionary,
     Handle<FixedArray> storage, KeyCollectionMode mode,
     KeyAccumulator* accumulator) {
+  DCHECK_IMPLIES(mode != KeyCollectionMode::kOwnOnly, accumulator != nullptr);
   Isolate* isolate = dictionary->GetIsolate();
   int length = storage->length();
   int capacity = dictionary->Capacity();
@@ -18296,7 +18020,7 @@
       storage->set(properties, Smi::FromInt(i));
     }
     properties++;
-    if (properties == length) break;
+    if (mode == KeyCollectionMode::kOwnOnly && properties == length) break;
   }
 
   CHECK_EQ(length, properties);
@@ -18313,6 +18037,34 @@
 }
 
 template <typename Derived, typename Shape, typename Key>
+Handle<FixedArray> Dictionary<Derived, Shape, Key>::IterationIndices(
+    Handle<Dictionary<Derived, Shape, Key>> dictionary) {
+  Isolate* isolate = dictionary->GetIsolate();
+  int capacity = dictionary->Capacity();
+  int length = dictionary->NumberOfElements();
+  Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
+  int array_size = 0;
+  {
+    DisallowHeapAllocation no_gc;
+    Dictionary<Derived, Shape, Key>* raw_dict = *dictionary;
+    for (int i = 0; i < capacity; i++) {
+      Object* k = raw_dict->KeyAt(i);
+      if (!raw_dict->IsKey(isolate, k)) continue;
+      if (raw_dict->IsDeleted(i)) continue;
+      array->set(array_size++, Smi::FromInt(i));
+    }
+
+    DCHECK_EQ(array_size, length);
+
+    EnumIndexComparator<Derived> cmp(static_cast<Derived*>(raw_dict));
+    Smi** start = reinterpret_cast<Smi**>(array->GetFirstElementAddress());
+    std::sort(start, start + array_size, cmp);
+  }
+  array->Shrink(array_size);
+  return array;
+}
+
+template <typename Derived, typename Shape, typename Key>
 void Dictionary<Derived, Shape, Key>::CollectKeysTo(
     Handle<Dictionary<Derived, Shape, Key>> dictionary, KeyAccumulator* keys) {
   Isolate* isolate = keys->isolate();
@@ -18966,6 +18718,40 @@
   return was_present;
 }
 
+Handle<JSArray> JSWeakCollection::GetEntries(Handle<JSWeakCollection> holder,
+                                             int max_entries) {
+  Isolate* isolate = holder->GetIsolate();
+  Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+  if (max_entries == 0 || max_entries > table->NumberOfElements()) {
+    max_entries = table->NumberOfElements();
+  }
+  int values_per_entry = holder->IsJSWeakMap() ? 2 : 1;
+  Handle<FixedArray> entries =
+      isolate->factory()->NewFixedArray(max_entries * values_per_entry);
+  // Recompute max_values because GC could have removed elements from the table.
+  if (max_entries > table->NumberOfElements()) {
+    max_entries = table->NumberOfElements();
+  }
+
+  {
+    DisallowHeapAllocation no_gc;
+    int count = 0;
+    for (int i = 0;
+         count / values_per_entry < max_entries && i < table->Capacity(); i++) {
+      Handle<Object> key(table->KeyAt(i), isolate);
+      if (table->IsKey(isolate, *key)) {
+        entries->set(count++, *key);
+        if (values_per_entry > 1) {
+          Object* value = table->Lookup(key);
+          entries->set(count++, value);
+        }
+      }
+    }
+    DCHECK_EQ(max_entries * values_per_entry, count);
+  }
+  return isolate->factory()->NewJSArrayWithElements(entries);
+}
+
 // Check if there is a break point at this source position.
 bool DebugInfo::HasBreakPoint(int source_position) {
   // Get the break point info object for this code offset.
@@ -19052,11 +18838,8 @@
   DCHECK(index != kNoBreakPointInfo);
 
   // Allocate new BreakPointInfo object and set the break point.
-  Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
-      isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
-  new_break_point_info->set_source_position(source_position);
-  new_break_point_info->set_break_point_objects(
-      isolate->heap()->undefined_value());
+  Handle<BreakPointInfo> new_break_point_info =
+      isolate->factory()->NewBreakPointInfo(source_position);
   BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
   debug_info->break_points()->set(index, *new_break_point_info);
 }
@@ -19366,27 +19149,6 @@
 }
 
 
-// static
-MaybeHandle<Object> JSDate::ToPrimitive(Handle<JSReceiver> receiver,
-                                        Handle<Object> hint) {
-  Isolate* const isolate = receiver->GetIsolate();
-  if (hint->IsString()) {
-    Handle<String> hint_string = Handle<String>::cast(hint);
-    if (hint_string->Equals(isolate->heap()->number_string())) {
-      return JSReceiver::OrdinaryToPrimitive(receiver,
-                                             OrdinaryToPrimitiveHint::kNumber);
-    }
-    if (hint_string->Equals(isolate->heap()->default_string()) ||
-        hint_string->Equals(isolate->heap()->string_string())) {
-      return JSReceiver::OrdinaryToPrimitive(receiver,
-                                             OrdinaryToPrimitiveHint::kString);
-    }
-  }
-  THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kInvalidHint, hint),
-                  Object);
-}
-
-
 void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
   int days = DateCache::DaysFromTime(local_time_ms);
   int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
@@ -19472,6 +19234,11 @@
   set_backing_store(NULL);
   set_byte_length(Smi::kZero);
   set_was_neutered(true);
+  // Invalidate the neutering protector.
+  Isolate* const isolate = GetIsolate();
+  if (isolate->IsArrayBufferNeuteringIntact()) {
+    isolate->InvalidateArrayBufferNeuteringProtector();
+  }
 }
 
 
@@ -19719,24 +19486,14 @@
 
 int JSGeneratorObject::source_position() const {
   CHECK(is_suspended());
-  AbstractCode* code;
-  int code_offset;
-  if (function()->shared()->HasBytecodeArray()) {
-    // New-style generators.
-    DCHECK(!function()->shared()->HasBaselineCode());
-    code_offset = Smi::cast(input_or_debug_pos())->value();
-    // The stored bytecode offset is relative to a different base than what
-    // is used in the source position table, hence the subtraction.
-    code_offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
-    code = AbstractCode::cast(function()->shared()->bytecode_array());
-  } else {
-    // Old-style generators.
-    DCHECK(function()->shared()->HasBaselineCode());
-    code_offset = continuation();
-    CHECK(0 <= code_offset);
-    CHECK(code_offset < function()->code()->instruction_size());
-    code = AbstractCode::cast(function()->shared()->code());
-  }
+  DCHECK(function()->shared()->HasBytecodeArray());
+  DCHECK(!function()->shared()->HasBaselineCode());
+  int code_offset = Smi::cast(input_or_debug_pos())->value();
+  // The stored bytecode offset is relative to a different base than what
+  // is used in the source position table, hence the subtraction.
+  code_offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
+  AbstractCode* code =
+      AbstractCode::cast(function()->shared()->bytecode_array());
   return code->SourcePosition(code_offset);
 }
 
diff --git a/src/objects.h b/src/objects.h
index 747a4f0..04d3d38 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -38,6 +38,8 @@
 #include "src/s390/constants-s390.h"  // NOLINT
 #endif
 
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
 
 //
 // Most object types in the V8 JavaScript are described in this file.
@@ -72,7 +74,6 @@
 //           - JSDate
 //         - JSMessageObject
 //         - JSModuleNamespace
-//         - JSFixedArrayIterator
 //       - JSProxy
 //     - FixedArrayBase
 //       - ByteArray
@@ -80,7 +81,6 @@
 //       - FixedArray
 //         - DescriptorArray
 //         - FrameArray
-//         - LiteralsArray
 //         - HashTable
 //           - Dictionary
 //           - StringTable
@@ -92,8 +92,8 @@
 //           - OrderedHashSet
 //           - OrderedHashMap
 //         - Context
-//         - TypeFeedbackMetadata
-//         - TypeFeedbackVector
+//         - FeedbackMetadata
+//         - FeedbackVector
 //         - TemplateList
 //         - TransitionArray
 //         - ScopeInfo
@@ -108,6 +108,7 @@
 //           - SeqTwoByteString
 //         - SlicedString
 //         - ConsString
+//         - ThinString
 //         - ExternalString
 //           - ExternalOneByteString
 //           - ExternalTwoByteString
@@ -121,17 +122,6 @@
 //             - ExternalTwoByteInternalizedString
 //       - Symbol
 //     - HeapNumber
-//     - Simd128Value
-//       - Float32x4
-//       - Int32x4
-//       - Uint32x4
-//       - Bool32x4
-//       - Int16x8
-//       - Uint16x8
-//       - Bool16x8
-//       - Int8x16
-//       - Uint8x16
-//       - Bool8x16
 //     - Cell
 //     - PropertyCell
 //     - Code
@@ -141,7 +131,6 @@
 //     - Foreign
 //     - SharedFunctionInfo
 //     - Struct
-//       - Box
 //       - AccessorInfo
 //       - PromiseResolveThenableJobInfo
 //       - PromiseReactionJobInfo
@@ -181,29 +170,12 @@
   STORE_NO_TRANSITION_HANDLE_COW
 };
 
-
-enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-
 enum MutableMode {
   MUTABLE,
   IMMUTABLE
 };
 
 
-enum ExternalArrayType {
-  kExternalInt8Array = 1,
-  kExternalUint8Array,
-  kExternalInt16Array,
-  kExternalUint16Array,
-  kExternalInt32Array,
-  kExternalUint32Array,
-  kExternalFloat32Array,
-  kExternalFloat64Array,
-  kExternalUint8ClampedArray,
-};
-
-
 static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) {
   return store_mode == STORE_TRANSITION_TO_OBJECT ||
          store_mode == STORE_TRANSITION_TO_DOUBLE ||
@@ -244,10 +216,6 @@
 };
 
 
-// Indicates whether a value can be loaded as a constant.
-enum StoreMode { ALLOW_IN_DESCRIPTOR, FORCE_FIELD };
-
-
 // PropertyNormalizationMode is used to specify whether to keep
 // inobject properties when normalizing properties of a JSObject.
 enum PropertyNormalizationMode {
@@ -288,19 +256,6 @@
   OWN_DESCRIPTORS
 };
 
-// The GC maintains a bit of information, the MarkingParity, which toggles
-// from odd to even and back every time marking is completed. Incremental
-// marking can visit an object twice during a marking phase, so algorithms that
-// that piggy-back on marking can use the parity to ensure that they only
-// perform an operation on an object once per marking phase: they record the
-// MarkingParity when they visit an object, and only re-visit the object when it
-// is marked again and the MarkingParity changes.
-enum MarkingParity {
-  NO_MARKING_PARITY,
-  ODD_MARKING_PARITY,
-  EVEN_MARKING_PARITY
-};
-
 // ICs store extra state in a Code object. The default extra state is
 // kNoExtraICState.
 typedef int ExtraICState;
@@ -338,38 +293,35 @@
 // JSObject for GC purposes. The first four entries here have typeof
 // 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
 #define INSTANCE_TYPE_LIST(V)                                   \
-  V(STRING_TYPE)                                                \
-  V(ONE_BYTE_STRING_TYPE)                                       \
-  V(CONS_STRING_TYPE)                                           \
-  V(CONS_ONE_BYTE_STRING_TYPE)                                  \
-  V(SLICED_STRING_TYPE)                                         \
-  V(SLICED_ONE_BYTE_STRING_TYPE)                                \
-  V(EXTERNAL_STRING_TYPE)                                       \
-  V(EXTERNAL_ONE_BYTE_STRING_TYPE)                              \
-  V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE)                    \
-  V(SHORT_EXTERNAL_STRING_TYPE)                                 \
-  V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE)                        \
-  V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE)              \
-                                                                \
   V(INTERNALIZED_STRING_TYPE)                                   \
-  V(ONE_BYTE_INTERNALIZED_STRING_TYPE)                          \
   V(EXTERNAL_INTERNALIZED_STRING_TYPE)                          \
+  V(ONE_BYTE_INTERNALIZED_STRING_TYPE)                          \
   V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE)                 \
   V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE)       \
   V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE)                    \
   V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE)           \
   V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+  V(STRING_TYPE)                                                \
+  V(CONS_STRING_TYPE)                                           \
+  V(EXTERNAL_STRING_TYPE)                                       \
+  V(SLICED_STRING_TYPE)                                         \
+  V(THIN_STRING_TYPE)                                           \
+  V(ONE_BYTE_STRING_TYPE)                                       \
+  V(CONS_ONE_BYTE_STRING_TYPE)                                  \
+  V(EXTERNAL_ONE_BYTE_STRING_TYPE)                              \
+  V(SLICED_ONE_BYTE_STRING_TYPE)                                \
+  V(THIN_ONE_BYTE_STRING_TYPE)                                  \
+  V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE)                    \
+  V(SHORT_EXTERNAL_STRING_TYPE)                                 \
+  V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE)                        \
+  V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE)              \
                                                                 \
   V(SYMBOL_TYPE)                                                \
-  V(SIMD128_VALUE_TYPE)                                         \
+  V(HEAP_NUMBER_TYPE)                                           \
+  V(ODDBALL_TYPE)                                               \
                                                                 \
   V(MAP_TYPE)                                                   \
   V(CODE_TYPE)                                                  \
-  V(ODDBALL_TYPE)                                               \
-  V(CELL_TYPE)                                                  \
-  V(PROPERTY_CELL_TYPE)                                         \
-                                                                \
-  V(HEAP_NUMBER_TYPE)                                           \
   V(MUTABLE_HEAP_NUMBER_TYPE)                                   \
   V(FOREIGN_TYPE)                                               \
   V(BYTE_ARRAY_TYPE)                                            \
@@ -386,6 +338,7 @@
   V(FIXED_FLOAT64_ARRAY_TYPE)                                   \
   V(FIXED_UINT8_CLAMPED_ARRAY_TYPE)                             \
                                                                 \
+  V(FIXED_DOUBLE_ARRAY_TYPE)                                    \
   V(FILLER_TYPE)                                                \
                                                                 \
   V(ACCESSOR_INFO_TYPE)                                         \
@@ -395,68 +348,69 @@
   V(CALL_HANDLER_INFO_TYPE)                                     \
   V(FUNCTION_TEMPLATE_INFO_TYPE)                                \
   V(OBJECT_TEMPLATE_INFO_TYPE)                                  \
-  V(SIGNATURE_INFO_TYPE)                                        \
-  V(TYPE_SWITCH_INFO_TYPE)                                      \
-  V(ALLOCATION_MEMENTO_TYPE)                                    \
   V(ALLOCATION_SITE_TYPE)                                       \
+  V(ALLOCATION_MEMENTO_TYPE)                                    \
   V(SCRIPT_TYPE)                                                \
   V(TYPE_FEEDBACK_INFO_TYPE)                                    \
   V(ALIASED_ARGUMENTS_ENTRY_TYPE)                               \
-  V(BOX_TYPE)                                                   \
   V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE)                     \
   V(PROMISE_REACTION_JOB_INFO_TYPE)                             \
+  V(DEBUG_INFO_TYPE)                                            \
+  V(BREAK_POINT_INFO_TYPE)                                      \
   V(PROTOTYPE_INFO_TYPE)                                        \
+  V(TUPLE2_TYPE)                                                \
   V(TUPLE3_TYPE)                                                \
   V(CONTEXT_EXTENSION_TYPE)                                     \
+  V(CONSTANT_ELEMENTS_PAIR_TYPE)                                \
   V(MODULE_TYPE)                                                \
   V(MODULE_INFO_ENTRY_TYPE)                                     \
-                                                                \
   V(FIXED_ARRAY_TYPE)                                           \
-  V(FIXED_DOUBLE_ARRAY_TYPE)                                    \
-  V(SHARED_FUNCTION_INFO_TYPE)                                  \
-  V(WEAK_CELL_TYPE)                                             \
   V(TRANSITION_ARRAY_TYPE)                                      \
+  V(SHARED_FUNCTION_INFO_TYPE)                                  \
+  V(CELL_TYPE)                                                  \
+  V(WEAK_CELL_TYPE)                                             \
+  V(PROPERTY_CELL_TYPE)                                         \
                                                                 \
-  V(JS_MESSAGE_OBJECT_TYPE)                                     \
-                                                                \
+  V(JS_PROXY_TYPE)                                              \
+  V(JS_GLOBAL_OBJECT_TYPE)                                      \
+  V(JS_GLOBAL_PROXY_TYPE)                                       \
+  V(JS_SPECIAL_API_OBJECT_TYPE)                                 \
   V(JS_VALUE_TYPE)                                              \
+  V(JS_MESSAGE_OBJECT_TYPE)                                     \
   V(JS_DATE_TYPE)                                               \
+  V(JS_API_OBJECT_TYPE)                                         \
   V(JS_OBJECT_TYPE)                                             \
   V(JS_ARGUMENTS_TYPE)                                          \
   V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)                           \
   V(JS_GENERATOR_OBJECT_TYPE)                                   \
   V(JS_MODULE_NAMESPACE_TYPE)                                   \
-  V(JS_FIXED_ARRAY_ITERATOR_TYPE)                               \
-  V(JS_GLOBAL_OBJECT_TYPE)                                      \
-  V(JS_GLOBAL_PROXY_TYPE)                                       \
-  V(JS_API_OBJECT_TYPE)                                         \
-  V(JS_SPECIAL_API_OBJECT_TYPE)                                 \
   V(JS_ARRAY_TYPE)                                              \
   V(JS_ARRAY_BUFFER_TYPE)                                       \
   V(JS_TYPED_ARRAY_TYPE)                                        \
   V(JS_DATA_VIEW_TYPE)                                          \
-  V(JS_PROXY_TYPE)                                              \
   V(JS_SET_TYPE)                                                \
   V(JS_MAP_TYPE)                                                \
   V(JS_SET_ITERATOR_TYPE)                                       \
   V(JS_MAP_ITERATOR_TYPE)                                       \
   V(JS_WEAK_MAP_TYPE)                                           \
   V(JS_WEAK_SET_TYPE)                                           \
+  V(JS_PROMISE_CAPABILITY_TYPE)                                 \
   V(JS_PROMISE_TYPE)                                            \
   V(JS_REGEXP_TYPE)                                             \
   V(JS_ERROR_TYPE)                                              \
+  V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE)                           \
   V(JS_STRING_ITERATOR_TYPE)                                    \
                                                                 \
   V(JS_TYPED_ARRAY_KEY_ITERATOR_TYPE)                           \
   V(JS_FAST_ARRAY_KEY_ITERATOR_TYPE)                            \
   V(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)                         \
                                                                 \
-  V(JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE)                      \
   V(JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE)                     \
-  V(JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE)                     \
+  V(JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE)                      \
   V(JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE)                    \
-  V(JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE)                     \
+  V(JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE)                     \
   V(JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE)                    \
+  V(JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE)                     \
   V(JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE)                   \
   V(JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE)                   \
   V(JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE)             \
@@ -469,12 +423,12 @@
   V(JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE)         \
   V(JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE)                   \
                                                                 \
-  V(JS_INT8_ARRAY_VALUE_ITERATOR_TYPE)                          \
   V(JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE)                         \
-  V(JS_INT16_ARRAY_VALUE_ITERATOR_TYPE)                         \
+  V(JS_INT8_ARRAY_VALUE_ITERATOR_TYPE)                          \
   V(JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE)                        \
-  V(JS_INT32_ARRAY_VALUE_ITERATOR_TYPE)                         \
+  V(JS_INT16_ARRAY_VALUE_ITERATOR_TYPE)                         \
   V(JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE)                        \
+  V(JS_INT32_ARRAY_VALUE_ITERATOR_TYPE)                         \
   V(JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE)                       \
   V(JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE)                       \
   V(JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE)                 \
@@ -488,9 +442,7 @@
   V(JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE)                       \
                                                                 \
   V(JS_BOUND_FUNCTION_TYPE)                                     \
-  V(JS_FUNCTION_TYPE)                                           \
-  V(DEBUG_INFO_TYPE)                                            \
-  V(BREAK_POINT_INFO_TYPE)
+  V(JS_FUNCTION_TYPE)
 
 // Since string types are not consecutive, this macro is used to
 // iterate over them.
@@ -541,7 +493,10 @@
   V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE,               \
     ExternalTwoByteString::kShortSize,                                        \
     short_external_internalized_string_with_one_byte_data,                    \
-    ShortExternalInternalizedStringWithOneByteData)
+    ShortExternalInternalizedStringWithOneByteData)                           \
+  V(THIN_STRING_TYPE, ThinString::kSize, thin_string, ThinString)             \
+  V(THIN_ONE_BYTE_STRING_TYPE, ThinString::kSize, thin_one_byte_string,       \
+    ThinOneByteString)
 
 // A struct is a simple object a set of object-valued fields.  Including an
 // object type in this causes the compiler to generate most of the boilerplate
@@ -553,11 +508,6 @@
 // type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
 // manually.
 #define STRUCT_LIST(V)                                                       \
-  V(BOX, Box, box)                                                           \
-  V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo,        \
-    promise_resolve_thenable_job_info)                                       \
-  V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo,                       \
-    promise_reaction_job_info)                                               \
   V(ACCESSOR_INFO, AccessorInfo, accessor_info)                              \
   V(ACCESSOR_PAIR, AccessorPair, accessor_pair)                              \
   V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info)                   \
@@ -565,18 +515,24 @@
   V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info)                   \
   V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info)    \
   V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info)          \
-  V(SCRIPT, Script, script)                                                  \
   V(ALLOCATION_SITE, AllocationSite, allocation_site)                        \
   V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento)               \
+  V(SCRIPT, Script, script)                                                  \
   V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info)                \
   V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
+  V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo,        \
+    promise_resolve_thenable_job_info)                                       \
+  V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo,                       \
+    promise_reaction_job_info)                                               \
   V(DEBUG_INFO, DebugInfo, debug_info)                                       \
   V(BREAK_POINT_INFO, BreakPointInfo, break_point_info)                      \
   V(PROTOTYPE_INFO, PrototypeInfo, prototype_info)                           \
+  V(TUPLE2, Tuple2, tuple2)                                                  \
   V(TUPLE3, Tuple3, tuple3)                                                  \
+  V(CONTEXT_EXTENSION, ContextExtension, context_extension)                  \
+  V(CONSTANT_ELEMENTS_PAIR, ConstantElementsPair, constant_elements_pair)    \
   V(MODULE, Module, module)                                                  \
-  V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry)                   \
-  V(CONTEXT_EXTENSION, ContextExtension, context_extension)
+  V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry)
 
 // We use the full 8 bits of the instance_type field to encode heap object
 // instance types.  The high-order bit (bit 7) is set if the object is not a
@@ -591,20 +547,21 @@
 const uint32_t kNotInternalizedTag = 0x40;
 const uint32_t kInternalizedTag = 0x0;
 
-// If bit 7 is clear then bit 2 indicates whether the string consists of
+// If bit 7 is clear then bit 3 indicates whether the string consists of
 // two-byte characters or one-byte characters.
-const uint32_t kStringEncodingMask = 0x4;
+const uint32_t kStringEncodingMask = 0x8;
 const uint32_t kTwoByteStringTag = 0x0;
-const uint32_t kOneByteStringTag = 0x4;
+const uint32_t kOneByteStringTag = 0x8;
 
-// If bit 7 is clear, the low-order 2 bits indicate the representation
+// If bit 7 is clear, the low-order 3 bits indicate the representation
 // of the string.
-const uint32_t kStringRepresentationMask = 0x03;
+const uint32_t kStringRepresentationMask = 0x07;
 enum StringRepresentationTag {
   kSeqStringTag = 0x0,
   kConsStringTag = 0x1,
   kExternalStringTag = 0x2,
-  kSlicedStringTag = 0x3
+  kSlicedStringTag = 0x3,
+  kThinStringTag = 0x5
 };
 const uint32_t kIsIndirectStringMask = 0x1;
 const uint32_t kIsIndirectStringTag = 0x1;
@@ -614,22 +571,17 @@
                kIsIndirectStringMask) == kIsIndirectStringTag);  // NOLINT
 STATIC_ASSERT((kSlicedStringTag &
                kIsIndirectStringMask) == kIsIndirectStringTag);  // NOLINT
+STATIC_ASSERT((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
 
-// Use this mask to distinguish between cons and slice only after making
-// sure that the string is one of the two (an indirect string).
-const uint32_t kSlicedNotConsMask = kSlicedStringTag & ~kConsStringTag;
-STATIC_ASSERT(IS_POWER_OF_TWO(kSlicedNotConsMask));
-
-// If bit 7 is clear, then bit 3 indicates whether this two-byte
+// If bit 7 is clear, then bit 4 indicates whether this two-byte
 // string actually contains one byte data.
-const uint32_t kOneByteDataHintMask = 0x08;
-const uint32_t kOneByteDataHintTag = 0x08;
+const uint32_t kOneByteDataHintMask = 0x10;
+const uint32_t kOneByteDataHintTag = 0x10;
 
 // If bit 7 is clear and string representation indicates an external string,
-// then bit 4 indicates whether the data pointer is cached.
-const uint32_t kShortExternalStringMask = 0x10;
-const uint32_t kShortExternalStringTag = 0x10;
-
+// then bit 5 indicates whether the data pointer is cached.
+const uint32_t kShortExternalStringMask = 0x20;
+const uint32_t kShortExternalStringTag = 0x20;
 
 // A ConsString with an empty string as the right side is a candidate
 // for being shortcut by the garbage collector. We don't allocate any
@@ -693,13 +645,15 @@
   SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
       SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
       kNotInternalizedTag,
+  THIN_STRING_TYPE = kTwoByteStringTag | kThinStringTag | kNotInternalizedTag,
+  THIN_ONE_BYTE_STRING_TYPE =
+      kOneByteStringTag | kThinStringTag | kNotInternalizedTag,
 
   // Non-string names
   SYMBOL_TYPE = kNotStringTag,  // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
 
   // Other primitives (cannot contain non-map-word pointers to heap objects).
   HEAP_NUMBER_TYPE,
-  SIMD128_VALUE_TYPE,
   ODDBALL_TYPE,  // LAST_PRIMITIVE_TYPE
 
   // Objects allocated in their own spaces (never in new space).
@@ -733,29 +687,28 @@
   CALL_HANDLER_INFO_TYPE,
   FUNCTION_TEMPLATE_INFO_TYPE,
   OBJECT_TEMPLATE_INFO_TYPE,
-  SIGNATURE_INFO_TYPE,
-  TYPE_SWITCH_INFO_TYPE,
   ALLOCATION_SITE_TYPE,
   ALLOCATION_MEMENTO_TYPE,
   SCRIPT_TYPE,
   TYPE_FEEDBACK_INFO_TYPE,
   ALIASED_ARGUMENTS_ENTRY_TYPE,
-  BOX_TYPE,
   PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
   PROMISE_REACTION_JOB_INFO_TYPE,
   DEBUG_INFO_TYPE,
   BREAK_POINT_INFO_TYPE,
+  PROTOTYPE_INFO_TYPE,
+  TUPLE2_TYPE,
+  TUPLE3_TYPE,
+  CONTEXT_EXTENSION_TYPE,
+  CONSTANT_ELEMENTS_PAIR_TYPE,
+  MODULE_TYPE,
+  MODULE_INFO_ENTRY_TYPE,
   FIXED_ARRAY_TYPE,
+  TRANSITION_ARRAY_TYPE,
   SHARED_FUNCTION_INFO_TYPE,
   CELL_TYPE,
   WEAK_CELL_TYPE,
-  TRANSITION_ARRAY_TYPE,
   PROPERTY_CELL_TYPE,
-  PROTOTYPE_INFO_TYPE,
-  TUPLE3_TYPE,
-  CONTEXT_EXTENSION_TYPE,
-  MODULE_TYPE,
-  MODULE_INFO_ENTRY_TYPE,
 
   // All the following types are subtypes of JSReceiver, which corresponds to
   // objects in the JS sense. The first and the last type in this range are
@@ -777,7 +730,6 @@
   JS_CONTEXT_EXTENSION_OBJECT_TYPE,
   JS_GENERATOR_OBJECT_TYPE,
   JS_MODULE_NAMESPACE_TYPE,
-  JS_FIXED_ARRAY_ITERATOR_TYPE,
   JS_ARRAY_TYPE,
   JS_ARRAY_BUFFER_TYPE,
   JS_TYPED_ARRAY_TYPE,
@@ -788,9 +740,11 @@
   JS_MAP_ITERATOR_TYPE,
   JS_WEAK_MAP_TYPE,
   JS_WEAK_SET_TYPE,
+  JS_PROMISE_CAPABILITY_TYPE,
   JS_PROMISE_TYPE,
   JS_REGEXP_TYPE,
   JS_ERROR_TYPE,
+  JS_ASYNC_FROM_SYNC_ITERATOR_TYPE,
   JS_STRING_ITERATOR_TYPE,
 
   JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
@@ -914,7 +868,6 @@
   V(HANDLER_TABLE_SUB_TYPE)                      \
   V(JS_COLLECTION_SUB_TYPE)                      \
   V(JS_WEAK_COLLECTION_SUB_TYPE)                 \
-  V(LITERALS_ARRAY_SUB_TYPE)                     \
   V(MAP_CODE_CACHE_SUB_TYPE)                     \
   V(NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE)     \
   V(NUMBER_STRING_CACHE_SUB_TYPE)                \
@@ -933,8 +886,8 @@
   V(STRING_SPLIT_CACHE_SUB_TYPE)                 \
   V(STRING_TABLE_SUB_TYPE)                       \
   V(TEMPLATE_INFO_SUB_TYPE)                      \
-  V(TYPE_FEEDBACK_VECTOR_SUB_TYPE)               \
-  V(TYPE_FEEDBACK_METADATA_SUB_TYPE)             \
+  V(FEEDBACK_VECTOR_SUB_TYPE)                    \
+  V(FEEDBACK_METADATA_SUB_TYPE)                  \
   V(WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE)
 
 enum FixedArraySubInstanceType {
@@ -965,25 +918,6 @@
 };
 
 
-#define DECL_BOOLEAN_ACCESSORS(name) \
-  inline bool name() const;          \
-  inline void set_##name(bool value);
-
-#define DECL_INT_ACCESSORS(name) \
-  inline int name() const;       \
-  inline void set_##name(int value);
-
-
-#define DECL_ACCESSORS(name, type)                                      \
-  inline type* name() const;                                            \
-  inline void set_##name(type* value,                                   \
-                         WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
-
-
-#define DECLARE_CAST(type)                              \
-  INLINE(static type* cast(Object* object));            \
-  INLINE(static const type* cast(const Object* object));
-
 class AbstractCode;
 class AccessorPair;
 class AllocationSite;
@@ -992,12 +926,12 @@
 class Cell;
 class ConsString;
 class ElementsAccessor;
+class FindAndReplacePattern;
 class FixedArrayBase;
 class FunctionLiteral;
 class JSGlobalObject;
 class KeyAccumulator;
 class LayoutDescriptor;
-class LiteralsArray;
 class LookupIterator;
 class FieldType;
 class Module;
@@ -1012,8 +946,8 @@
 class SharedFunctionInfo;
 class StringStream;
 class TypeFeedbackInfo;
-class TypeFeedbackMetadata;
-class TypeFeedbackVector;
+class FeedbackMetadata;
+class FeedbackVector;
 class WeakCell;
 class TransitionArray;
 class TemplateList;
@@ -1021,12 +955,6 @@
 // A template-ized version of the IsXXX functions.
 template <class C> inline bool Is(Object* obj);
 
-#ifdef VERIFY_HEAP
-#define DECLARE_VERIFIER(Name) void Name##Verify();
-#else
-#define DECLARE_VERIFIER(Name)
-#endif
-
 #ifdef OBJECT_PRINT
 #define DECLARE_PRINTER(Name) void Name##Print(std::ostream& os);  // NOLINT
 #else
@@ -1043,17 +971,6 @@
 #define HEAP_OBJECT_TYPE_LIST(V) \
   V(HeapNumber)                  \
   V(MutableHeapNumber)           \
-  V(Simd128Value)                \
-  V(Float32x4)                   \
-  V(Int32x4)                     \
-  V(Uint32x4)                    \
-  V(Bool32x4)                    \
-  V(Int16x8)                     \
-  V(Uint16x8)                    \
-  V(Bool16x8)                    \
-  V(Int8x16)                     \
-  V(Uint8x16)                    \
-  V(Bool8x16)                    \
   V(Name)                        \
   V(UniqueName)                  \
   V(String)                      \
@@ -1066,6 +983,7 @@
   V(SeqTwoByteString)            \
   V(SeqOneByteString)            \
   V(InternalizedString)          \
+  V(ThinString)                  \
   V(Symbol)                      \
                                  \
   V(FixedTypedArrayBase)         \
@@ -1083,22 +1001,22 @@
   V(FreeSpace)                   \
   V(JSReceiver)                  \
   V(JSObject)                    \
+  V(JSArgumentsObject)           \
   V(JSContextExtensionObject)    \
   V(JSGeneratorObject)           \
   V(JSModuleNamespace)           \
-  V(JSFixedArrayIterator)        \
   V(Map)                         \
   V(DescriptorArray)             \
   V(FrameArray)                  \
   V(TransitionArray)             \
-  V(LiteralsArray)               \
-  V(TypeFeedbackMetadata)        \
-  V(TypeFeedbackVector)          \
+  V(FeedbackMetadata)            \
+  V(FeedbackVector)              \
   V(DeoptimizationInputData)     \
   V(DeoptimizationOutputData)    \
   V(DependentCode)               \
   V(HandlerTable)                \
   V(FixedArray)                  \
+  V(BoilerplateDescription)      \
   V(FixedDoubleArray)            \
   V(WeakFixedArray)              \
   V(ArrayList)                   \
@@ -1123,12 +1041,14 @@
   V(JSArray)                     \
   V(JSArrayBuffer)               \
   V(JSArrayBufferView)           \
+  V(JSAsyncFromSyncIterator)     \
   V(JSCollection)                \
   V(JSTypedArray)                \
   V(JSArrayIterator)             \
   V(JSDataView)                  \
   V(JSProxy)                     \
   V(JSError)                     \
+  V(JSPromiseCapability)         \
   V(JSPromise)                   \
   V(JSStringIterator)            \
   V(JSSet)                       \
@@ -1199,11 +1119,14 @@
   OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
   HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
+
 #define IS_TYPE_FUNCTION_DECL(Type, Value) \
   INLINE(bool Is##Type(Isolate* isolate) const);
   ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
 
+  INLINE(bool IsNullOrUndefined(Isolate* isolate) const);
+
   // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
   // a keyed store is of the form a[expression] = foo.
   enum StoreFromKeyed {
@@ -1313,37 +1236,38 @@
       Handle<Object> input, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
 
   // ES6 section 7.1.3 ToNumber
-  MUST_USE_RESULT static MaybeHandle<Object> ToNumber(Handle<Object> input);
+  MUST_USE_RESULT static inline MaybeHandle<Object> ToNumber(
+      Handle<Object> input);
 
   // ES6 section 7.1.4 ToInteger
-  MUST_USE_RESULT static MaybeHandle<Object> ToInteger(Isolate* isolate,
-                                                       Handle<Object> input);
+  MUST_USE_RESULT static inline MaybeHandle<Object> ToInteger(
+      Isolate* isolate, Handle<Object> input);
 
   // ES6 section 7.1.5 ToInt32
-  MUST_USE_RESULT static MaybeHandle<Object> ToInt32(Isolate* isolate,
-                                                     Handle<Object> input);
+  MUST_USE_RESULT static inline MaybeHandle<Object> ToInt32(
+      Isolate* isolate, Handle<Object> input);
 
   // ES6 section 7.1.6 ToUint32
-  MUST_USE_RESULT static MaybeHandle<Object> ToUint32(Isolate* isolate,
-                                                      Handle<Object> input);
+  MUST_USE_RESULT inline static MaybeHandle<Object> ToUint32(
+      Isolate* isolate, Handle<Object> input);
 
   // ES6 section 7.1.12 ToString
-  MUST_USE_RESULT static MaybeHandle<String> ToString(Isolate* isolate,
-                                                      Handle<Object> input);
+  MUST_USE_RESULT static inline MaybeHandle<String> ToString(
+      Isolate* isolate, Handle<Object> input);
 
   static Handle<String> NoSideEffectsToString(Isolate* isolate,
                                               Handle<Object> input);
 
   // ES6 section 7.1.14 ToPropertyKey
-  MUST_USE_RESULT static MaybeHandle<Object> ToPropertyKey(
+  MUST_USE_RESULT static inline MaybeHandle<Object> ToPropertyKey(
       Isolate* isolate, Handle<Object> value);
 
   // ES6 section 7.1.15 ToLength
-  MUST_USE_RESULT static MaybeHandle<Object> ToLength(Isolate* isolate,
-                                                      Handle<Object> input);
+  MUST_USE_RESULT static inline MaybeHandle<Object> ToLength(
+      Isolate* isolate, Handle<Object> input);
 
   // ES6 section 7.1.17 ToIndex
-  MUST_USE_RESULT static MaybeHandle<Object> ToIndex(
+  MUST_USE_RESULT static inline MaybeHandle<Object> ToIndex(
       Isolate* isolate, Handle<Object> input,
       MessageTemplate::Template error_index);
 
@@ -1522,6 +1446,11 @@
   // allow kMaxUInt32.
   inline bool ToArrayIndex(uint32_t* index);
 
+  // Returns true if the result of iterating over the object is the same
+  // (including observable effects) as simply accessing the properties between 0
+  // and length.
+  bool IterationHasObservableEffects();
+
   DECLARE_VERIFIER(Object)
 #ifdef VERIFY_HEAP
   // Verify a pointer is a valid object pointer.
@@ -1530,10 +1459,6 @@
 
   inline void VerifyApiCallResultType();
 
-  // ES6 19.1.3.6 Object.prototype.toString
-  MUST_USE_RESULT static MaybeHandle<String> ObjectProtoToString(
-      Isolate* isolate, Handle<Object> object);
-
   // Prints this object without details.
   void ShortPrint(FILE* out = stdout);
 
@@ -1573,6 +1498,23 @@
 
   MUST_USE_RESULT static MaybeHandle<Name> ConvertToName(Isolate* isolate,
                                                          Handle<Object> input);
+  MUST_USE_RESULT static MaybeHandle<Object> ConvertToPropertyKey(
+      Isolate* isolate, Handle<Object> value);
+  MUST_USE_RESULT static MaybeHandle<String> ConvertToString(
+      Isolate* isolate, Handle<Object> input);
+  MUST_USE_RESULT static MaybeHandle<Object> ConvertToNumber(
+      Isolate* isolate, Handle<Object> input);
+  MUST_USE_RESULT static MaybeHandle<Object> ConvertToInteger(
+      Isolate* isolate, Handle<Object> input);
+  MUST_USE_RESULT static MaybeHandle<Object> ConvertToInt32(
+      Isolate* isolate, Handle<Object> input);
+  MUST_USE_RESULT static MaybeHandle<Object> ConvertToUint32(
+      Isolate* isolate, Handle<Object> input);
+  MUST_USE_RESULT static MaybeHandle<Object> ConvertToLength(
+      Isolate* isolate, Handle<Object> input);
+  MUST_USE_RESULT static MaybeHandle<Object> ConvertToIndex(
+      Isolate* isolate, Handle<Object> input,
+      MessageTemplate::Template error_index);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
 };
@@ -1600,6 +1542,10 @@
  public:
   // Returns the integer value.
   inline int value() const { return Internals::SmiValue(this); }
+  inline Smi* ToUint32Smi() {
+    if (value() <= 0) return Smi::kZero;
+    return Smi::FromInt(static_cast<uint32_t>(value()));
+  }
 
   // Convert a value to a Smi object.
   static inline Smi* FromInt(int value) {
@@ -1626,7 +1572,7 @@
   V8_EXPORT_PRIVATE void SmiPrint(std::ostream& os) const;  // NOLINT
   DECLARE_VERIFIER(Smi)
 
-  V8_EXPORT_PRIVATE static Smi* const kZero;
+  static constexpr Smi* const kZero = nullptr;
   static const int kMinValue =
       (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
   static const int kMaxValue = -(kMinValue + 1);
@@ -1725,6 +1671,8 @@
   ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
 
+  INLINE(bool IsNullOrUndefined(Isolate* isolate) const);
+
 #define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
   INLINE(bool Is##Name() const);
   STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
@@ -1847,6 +1795,9 @@
   inline double value() const;
   inline void set_value(double value);
 
+  inline uint64_t value_as_bits() const;
+  inline void set_value_as_bits(uint64_t bits);
+
   DECLARE_CAST(HeapNumber)
 
   // Dispatched behavior.
@@ -1891,71 +1842,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(HeapNumber);
 };
 
-
-// The Simd128Value class describes heap allocated 128 bit SIMD values.
-class Simd128Value : public HeapObject {
- public:
-  DECLARE_CAST(Simd128Value)
-
-  DECLARE_PRINTER(Simd128Value)
-  DECLARE_VERIFIER(Simd128Value)
-
-  static Handle<String> ToString(Handle<Simd128Value> input);
-
-  // Equality operations.
-  inline bool Equals(Simd128Value* that);
-  static inline bool Equals(Handle<Simd128Value> one, Handle<Simd128Value> two);
-
-  // Checks that another instance is bit-wise equal.
-  bool BitwiseEquals(const Simd128Value* other) const;
-  // Computes a hash from the 128 bit value, viewed as 4 32-bit integers.
-  uint32_t Hash() const;
-  // Copies the 16 bytes of SIMD data to the destination address.
-  void CopyBits(void* destination) const;
-
-  // Layout description.
-  static const int kValueOffset = HeapObject::kHeaderSize;
-  static const int kSize = kValueOffset + kSimd128Size;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Simd128Value);
-};
-
-
-// V has parameters (TYPE, Type, type, lane count, lane type)
-#define SIMD128_TYPES(V)                       \
-  V(FLOAT32X4, Float32x4, float32x4, 4, float) \
-  V(INT32X4, Int32x4, int32x4, 4, int32_t)     \
-  V(UINT32X4, Uint32x4, uint32x4, 4, uint32_t) \
-  V(BOOL32X4, Bool32x4, bool32x4, 4, bool)     \
-  V(INT16X8, Int16x8, int16x8, 8, int16_t)     \
-  V(UINT16X8, Uint16x8, uint16x8, 8, uint16_t) \
-  V(BOOL16X8, Bool16x8, bool16x8, 8, bool)     \
-  V(INT8X16, Int8x16, int8x16, 16, int8_t)     \
-  V(UINT8X16, Uint8x16, uint8x16, 16, uint8_t) \
-  V(BOOL8X16, Bool8x16, bool8x16, 16, bool)
-
-#define SIMD128_VALUE_CLASS(TYPE, Type, type, lane_count, lane_type) \
-  class Type final : public Simd128Value {                           \
-   public:                                                           \
-    inline lane_type get_lane(int lane) const;                       \
-    inline void set_lane(int lane, lane_type value);                 \
-                                                                     \
-    DECLARE_CAST(Type)                                               \
-                                                                     \
-    DECLARE_PRINTER(Type)                                            \
-                                                                     \
-    static Handle<String> ToString(Handle<Type> input);              \
-                                                                     \
-    inline bool Equals(Type* that);                                  \
-                                                                     \
-   private:                                                          \
-    DISALLOW_IMPLICIT_CONSTRUCTORS(Type);                            \
-  };
-SIMD128_TYPES(SIMD128_VALUE_CLASS)
-#undef SIMD128_VALUE_CLASS
-
-
 enum EnsureElementsMode {
   DONT_ALLOW_DOUBLE_ELEMENTS,
   ALLOW_COPIED_DOUBLE_ELEMENTS,
@@ -2016,6 +1902,15 @@
   MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(
       Isolate* isolate, Handle<JSReceiver> object, Handle<Object> proto);
 
+  // Reads all enumerable own properties of source and adds them to
+  // target, using either Set or CreateDataProperty depending on the
+  // use_set argument. This only copies values not present in the
+  // maybe_excluded_properties list.
+  MUST_USE_RESULT static Maybe<bool> SetOrCopyDataProperties(
+      Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
+      const ScopedVector<Handle<Object>>* excluded_properties = nullptr,
+      bool use_set = true);
+
   // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
   MUST_USE_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
   MUST_USE_RESULT static inline Maybe<bool> HasProperty(
@@ -2114,7 +2009,7 @@
   // function that was used to instantiate the object).
   static Handle<String> GetConstructorName(Handle<JSReceiver> receiver);
 
-  Context* GetCreationContext();
+  Handle<Context> GetCreationContext();
 
   MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetPropertyAttributes(
       Handle<JSReceiver> object, Handle<Name> name);
@@ -2510,13 +2405,13 @@
                                        FieldIndex index);
   inline Object* RawFastPropertyAt(FieldIndex index);
   inline double RawFastDoublePropertyAt(FieldIndex index);
+  inline uint64_t RawFastDoublePropertyAsBitsAt(FieldIndex index);
 
   inline void FastPropertyAtPut(FieldIndex index, Object* value);
   inline void RawFastPropertyAtPut(FieldIndex index, Object* value);
-  inline void RawFastDoublePropertyAtPut(FieldIndex index, double value);
+  inline void RawFastDoublePropertyAsBitsAtPut(FieldIndex index, uint64_t bits);
   inline void WriteToField(int descriptor, PropertyDetails details,
                            Object* value);
-  inline void WriteToField(int descriptor, Object* value);
 
   // Access to in object properties.
   inline int GetInObjectPropertyOffset(int index);
@@ -2570,8 +2465,8 @@
   DECLARE_PRINTER(JSObject)
   DECLARE_VERIFIER(JSObject)
 #ifdef OBJECT_PRINT
-  void PrintProperties(std::ostream& os);   // NOLINT
-  void PrintElements(std::ostream& os);     // NOLINT
+  bool PrintProperties(std::ostream& os);  // NOLINT
+  bool PrintElements(std::ostream& os);    // NOLINT
 #endif
 #if defined(DEBUG) || defined(OBJECT_PRINT)
   void PrintTransitions(std::ostream& os);  // NOLINT
@@ -2855,8 +2750,11 @@
 
   // Setters for frequently used oddballs located in old space.
   inline void set_undefined(int index);
+  inline void set_undefined(Isolate* isolate, int index);
   inline void set_null(int index);
+  inline void set_null(Isolate* isolate, int index);
   inline void set_the_hole(int index);
+  inline void set_the_hole(Isolate* isolate, int index);
 
   inline Object** GetFirstElementAddress();
   inline bool ContainsOnlySmisOrHoles();
@@ -2873,10 +2771,12 @@
   void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
 
   // Garbage collection support.
-  static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; }
+  static constexpr int SizeFor(int length) {
+    return kHeaderSize + length * kPointerSize;
+  }
 
   // Code Generation support.
-  static int OffsetOfElementAt(int index) { return SizeFor(index); }
+  static constexpr int OffsetOfElementAt(int index) { return SizeFor(index); }
 
   // Garbage collection support.
   inline Object** RawFieldOfElementAt(int index);
@@ -2898,16 +2798,6 @@
   bool IsEqualTo(FixedArray* other);
 #endif
 
-  // Swap two elements in a pair of arrays.  If this array and the
-  // numbers array are the same object, the elements are only swapped
-  // once.
-  void SwapPairs(FixedArray* numbers, int i, int j);
-
-  // Sort prefix of this array and the numbers array as pairs wrt. the
-  // numbers.  If the numbers array and the this array are the same
-  // object, the prefix of this array is sorted.
-  void SortPairs(FixedArray* numbers, uint32_t len);
-
   typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
 
  protected:
@@ -2923,7 +2813,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
 };
 
-
 // FixedDoubleArray describes fixed-sized arrays with element type double.
 class FixedDoubleArray: public FixedArrayBase {
  public:
@@ -2933,6 +2822,7 @@
   static inline Handle<Object> get(FixedDoubleArray* array, int index,
                                    Isolate* isolate);
   inline void set(int index, double value);
+  inline void set_the_hole(Isolate* isolate, int index);
   inline void set_the_hole(int index);
 
   // Checking for the hole.
@@ -3050,6 +2940,7 @@
                                AddMode mode = kNone);
   static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj1,
                                Handle<Object> obj2, AddMode = kNone);
+  static Handle<ArrayList> New(Isolate* isolate, int size);
   inline int Length();
   inline void SetLength(int length);
   inline Object* Get(int index);
@@ -3067,57 +2958,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
 };
 
-// The property RegExpMatchInfo includes the matchIndices
-// array of the last successful regexp match (an array of start/end index
-// pairs for the match and all the captured substrings), the invariant is
-// that there are at least two capture indices.  The array also contains
-// the subject string for the last successful match.
-// After creation the result must be treated as a FixedArray in all regards.
-class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
- public:
-  // Returns the number of captures, which is defined as the length of the
-  // matchIndices objects of the last match. matchIndices contains two indices
-  // for each capture (including the match itself), i.e. 2 * #captures + 2.
-  inline int NumberOfCaptureRegisters();
-  inline void SetNumberOfCaptureRegisters(int value);
-
-  // Returns the subject string of the last match.
-  inline String* LastSubject();
-  inline void SetLastSubject(String* value);
-
-  // Like LastSubject, but modifiable by the user.
-  inline Object* LastInput();
-  inline void SetLastInput(Object* value);
-
-  // Returns the i'th capture index, 0 <= i < NumberOfCaptures(). Capture(0) and
-  // Capture(1) determine the start- and endpoint of the match itself.
-  inline int Capture(int i);
-  inline void SetCapture(int i, int value);
-
-  // Reserves space for captures.
-  static Handle<RegExpMatchInfo> ReserveCaptures(
-      Handle<RegExpMatchInfo> match_info, int capture_count);
-
-  DECLARE_CAST(RegExpMatchInfo)
-
-  static const int kNumberOfCapturesIndex = 0;
-  static const int kLastSubjectIndex = 1;
-  static const int kLastInputIndex = 2;
-  static const int kFirstCaptureIndex = 3;
-  static const int kLastMatchOverhead = kFirstCaptureIndex;
-
-  static const int kNumberOfCapturesOffset = FixedArray::kHeaderSize;
-  static const int kLastSubjectOffset = kNumberOfCapturesOffset + kPointerSize;
-  static const int kLastInputOffset = kLastSubjectOffset + kPointerSize;
-  static const int kFirstCaptureOffset = kLastInputOffset + kPointerSize;
-
-  // Every match info is guaranteed to have enough space to store two captures.
-  static const int kInitialCaptureIndices = 2;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMatchInfo);
-};
-
 #define FRAME_ARRAY_FIELD_LIST(V) \
   V(WasmInstance, Object)         \
   V(WasmFunctionIndex, Smi)       \
@@ -3147,6 +2987,7 @@
   static const int kIsAsmJsWasmFrame = 1 << 1;
   static const int kIsStrict = 1 << 2;
   static const int kForceConstructor = 1 << 3;
+  static const int kAsmJsAtNumberConversion = 1 << 4;
 
   static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
                                           Handle<Object> receiver,
@@ -3204,7 +3045,7 @@
 //          [0]: pointer to fixed array with enum cache
 //          [1]: either Smi(0) or pointer to fixed array with indices
 //   [2]: first key
-//   [2 + number of descriptors * kDescriptorSize]: start of slack
+//   [2 + number of descriptors * kEntrySize]: start of slack
 class DescriptorArray: public FixedArray {
  public:
   // Returns true for both shared empty_descriptor_array and for smis, which the
@@ -3252,24 +3093,24 @@
   inline Object** GetDescriptorStartSlot(int descriptor_number);
   inline Object** GetDescriptorEndSlot(int descriptor_number);
   inline PropertyDetails GetDetails(int descriptor_number);
-  inline PropertyType GetType(int descriptor_number);
   inline int GetFieldIndex(int descriptor_number);
-  FieldType* GetFieldType(int descriptor_number);
-  inline Object* GetConstant(int descriptor_number);
-  inline Object* GetCallbacksObject(int descriptor_number);
-  inline AccessorDescriptor* GetCallbacks(int descriptor_number);
+  inline FieldType* GetFieldType(int descriptor_number);
 
   inline Name* GetSortedKey(int descriptor_number);
   inline int GetSortedKeyIndex(int descriptor_number);
   inline void SetSortedKey(int pointer, int descriptor_number);
-  inline void SetRepresentation(int descriptor_number,
-                                Representation representation);
 
   // Accessor for complete descriptor.
   inline void Get(int descriptor_number, Descriptor* desc);
   inline void Set(int descriptor_number, Descriptor* desc);
+  inline void Set(int descriptor_number, Name* key, Object* value,
+                  PropertyDetails details);
   void Replace(int descriptor_number, Descriptor* descriptor);
 
+  // Generalizes constness, representation and field type of all field
+  // descriptors.
+  void GeneralizeAllFields();
+
   // Append automatically sets the enumeration index. This should only be used
   // to add descriptors in bulk at the end, followed by sorting the descriptor
   // array.
@@ -3326,10 +3167,11 @@
   static const int kEnumCacheBridgeCacheOffset = FixedArray::kHeaderSize;
 
   // Layout of descriptor.
-  static const int kDescriptorKey = 0;
-  static const int kDescriptorDetails = 1;
-  static const int kDescriptorValue = 2;
-  static const int kDescriptorSize = 3;
+  // Naming is consistent with Dictionary classes for easy templating.
+  static const int kEntryKeyIndex = 0;
+  static const int kEntryDetailsIndex = 1;
+  static const int kEntryValueIndex = 2;
+  static const int kEntrySize = 3;
 
 #if defined(DEBUG) || defined(OBJECT_PRINT)
   // For our gdb macros, we should perhaps change these in the future.
@@ -3337,6 +3179,9 @@
 
   // Print all the descriptors.
   void PrintDescriptors(std::ostream& os);  // NOLINT
+
+  void PrintDescriptorDetails(std::ostream& os, int descriptor,
+                              PropertyDetails::PrintMode mode);
 #endif
 
 #ifdef DEBUG
@@ -3357,41 +3202,23 @@
   }
 
   static int ToDetailsIndex(int descriptor_number) {
-    return kFirstIndex + (descriptor_number * kDescriptorSize) +
-           kDescriptorDetails;
+    return kFirstIndex + (descriptor_number * kEntrySize) + kEntryDetailsIndex;
   }
 
   // Conversion from descriptor number to array indices.
   static int ToKeyIndex(int descriptor_number) {
-    return kFirstIndex + (descriptor_number * kDescriptorSize) + kDescriptorKey;
+    return kFirstIndex + (descriptor_number * kEntrySize) + kEntryKeyIndex;
   }
 
   static int ToValueIndex(int descriptor_number) {
-    return kFirstIndex + (descriptor_number * kDescriptorSize) +
-           kDescriptorValue;
+    return kFirstIndex + (descriptor_number * kEntrySize) + kEntryValueIndex;
   }
 
  private:
-  // An entry in a DescriptorArray, represented as an (array, index) pair.
-  class Entry {
-   public:
-    inline explicit Entry(DescriptorArray* descs, int index) :
-        descs_(descs), index_(index) { }
-
-    inline PropertyType type();
-    inline Object* GetCallbackObject();
-
-   private:
-    DescriptorArray* descs_;
-    int index_;
-  };
-
   // Transfer a complete descriptor from the src descriptor array to this
   // descriptor array.
   void CopyFrom(int index, DescriptorArray* src);
 
-  inline void SetDescriptor(int descriptor_number, Descriptor* desc);
-
   // Swap first and second descriptor.
   inline void SwapSortedKeys(int first, int second);
 
@@ -3593,6 +3420,9 @@
  protected:
   friend class ObjectHashTable;
 
+  MUST_USE_RESULT static Handle<Derived> New(Isolate* isolate, int capacity,
+                                             PretenureFlag pretenure);
+
   // Find the entry at which to insert element with the given key that
   // has the given hash value.
   uint32_t FindInsertionEntry(uint32_t hash);
@@ -3788,6 +3618,10 @@
 
   enum SortMode { UNSORTED, SORTED };
 
+  // Return the key indices sorted by its enumeration index.
+  static Handle<FixedArray> IterationIndices(
+      Handle<Dictionary<Derived, Shape, Key>> dictionary);
+
   // Collect the keys into the given KeyAccumulator, in ascending chronological
   // order of property creation.
   static void CollectKeysTo(Handle<Dictionary<Derived, Shape, Key>> dictionary,
@@ -3814,6 +3648,10 @@
       PretenureFlag pretenure = NOT_TENURED,
       MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
 
+  // Creates an dictionary with minimal possible capacity.
+  MUST_USE_RESULT static Handle<Derived> NewEmpty(
+      Isolate* isolate, PretenureFlag pretenure = NOT_TENURED);
+
   // Ensures that a new dictionary is created when the capacity is checked.
   void SetRequiresCopyOnCapacityChange();
 
@@ -3843,14 +3681,11 @@
                                              PropertyDetails details,
                                              int* entry_out = nullptr);
 
-  // Returns iteration indices array for the |dictionary|.
-  // Values are direct indices in the |HashTable| array.
-  static Handle<FixedArray> BuildIterationIndicesArray(
-      Handle<Derived> dictionary);
-
   static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex;
   static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
 
+  static const bool kIsEnumerable = Shape::kIsEnumerable;
+
  protected:
   // Generic at put operation.
   MUST_USE_RESULT static Handle<Derived> AtPut(
@@ -4023,18 +3858,20 @@
   // Type specific at put (default NONE attributes is used when adding).
   MUST_USE_RESULT static Handle<SeededNumberDictionary> AtNumberPut(
       Handle<SeededNumberDictionary> dictionary, uint32_t key,
-      Handle<Object> value, bool used_as_prototype);
+      Handle<Object> value, Handle<JSObject> dictionary_holder);
   MUST_USE_RESULT static Handle<SeededNumberDictionary> AddNumberEntry(
       Handle<SeededNumberDictionary> dictionary, uint32_t key,
-      Handle<Object> value, PropertyDetails details, bool used_as_prototype);
+      Handle<Object> value, PropertyDetails details,
+      Handle<JSObject> dictionary_holder);
 
   // Set an existing entry or add a new one if needed.
   // Return the updated dictionary.
   MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
       Handle<SeededNumberDictionary> dictionary, uint32_t key,
-      Handle<Object> value, PropertyDetails details, bool used_as_prototype);
+      Handle<Object> value, PropertyDetails details,
+      Handle<JSObject> dictionary_holder);
 
-  void UpdateMaxNumberKey(uint32_t key, bool used_as_prototype);
+  void UpdateMaxNumberKey(uint32_t key, Handle<JSObject> dictionary_holder);
 
   // Returns true if the dictionary contains any elements that are non-writable,
   // non-configurable, non-enumerable, or have getters/setters.
@@ -4297,22 +4134,23 @@
   static const int kNotFound = -1;
   static const int kMinCapacity = 4;
 
-  static const int kNumberOfBucketsIndex = 0;
-  static const int kNumberOfElementsIndex = kNumberOfBucketsIndex + 1;
-  static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
-  static const int kHashTableStartIndex = kNumberOfDeletedElementsIndex + 1;
+  static const int kNumberOfElementsIndex = 0;
+  // The next table is stored at the same index as the nof elements.
   static const int kNextTableIndex = kNumberOfElementsIndex;
+  static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
+  static const int kNumberOfBucketsIndex = kNumberOfDeletedElementsIndex + 1;
+  static const int kHashTableStartIndex = kNumberOfBucketsIndex + 1;
 
-  static const int kNumberOfBucketsOffset =
-      kHeaderSize + kNumberOfBucketsIndex * kPointerSize;
-  static const int kNumberOfElementsOffset =
-      kHeaderSize + kNumberOfElementsIndex * kPointerSize;
-  static const int kNumberOfDeletedElementsOffset =
-      kHeaderSize + kNumberOfDeletedElementsIndex * kPointerSize;
-  static const int kHashTableStartOffset =
-      kHeaderSize + kHashTableStartIndex * kPointerSize;
-  static const int kNextTableOffset =
-      kHeaderSize + kNextTableIndex * kPointerSize;
+  static constexpr const int kNumberOfElementsOffset =
+      FixedArray::OffsetOfElementAt(kNumberOfElementsIndex);
+  static constexpr const int kNextTableOffset =
+      FixedArray::OffsetOfElementAt(kNextTableIndex);
+  static constexpr const int kNumberOfDeletedElementsOffset =
+      FixedArray::OffsetOfElementAt(kNumberOfDeletedElementsIndex);
+  static constexpr const int kNumberOfBucketsOffset =
+      FixedArray::OffsetOfElementAt(kNumberOfBucketsIndex);
+  static constexpr const int kHashTableStartOffset =
+      FixedArray::OffsetOfElementAt(kHashTableStartIndex);
 
   static const int kEntrySize = entrysize + 1;
   static const int kChainOffset = entrysize;
@@ -4436,314 +4274,6 @@
 };
 
 
-// ScopeInfo represents information about different scopes of a source
-// program  and the allocation of the scope's variables. Scope information
-// is stored in a compressed form in ScopeInfo objects and is used
-// at runtime (stack dumps, deoptimization, etc.).
-
-// This object provides quick access to scope info details for runtime
-// routines.
-class ScopeInfo : public FixedArray {
- public:
-  DECLARE_CAST(ScopeInfo)
-
-  // Return the type of this scope.
-  ScopeType scope_type();
-
-  // Does this scope call eval?
-  bool CallsEval();
-
-  // Return the language mode of this scope.
-  LanguageMode language_mode();
-
-  // True if this scope is a (var) declaration scope.
-  bool is_declaration_scope();
-
-  // Does this scope make a sloppy eval call?
-  bool CallsSloppyEval() { return CallsEval() && is_sloppy(language_mode()); }
-
-  // Return the total number of locals allocated on the stack and in the
-  // context. This includes the parameters that are allocated in the context.
-  int LocalCount();
-
-  // Return the number of stack slots for code. This number consists of two
-  // parts:
-  //  1. One stack slot per stack allocated local.
-  //  2. One stack slot for the function name if it is stack allocated.
-  int StackSlotCount();
-
-  // Return the number of context slots for code if a context is allocated. This
-  // number consists of three parts:
-  //  1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
-  //  2. One context slot per context allocated local.
-  //  3. One context slot for the function name if it is context allocated.
-  // Parameters allocated in the context count as context allocated locals. If
-  // no contexts are allocated for this scope ContextLength returns 0.
-  int ContextLength();
-
-  // Does this scope declare a "this" binding?
-  bool HasReceiver();
-
-  // Does this scope declare a "this" binding, and the "this" binding is stack-
-  // or context-allocated?
-  bool HasAllocatedReceiver();
-
-  // Does this scope declare a "new.target" binding?
-  bool HasNewTarget();
-
-  // Is this scope the scope of a named function expression?
-  bool HasFunctionName();
-
-  // Return if this has context allocated locals.
-  bool HasHeapAllocatedLocals();
-
-  // Return if contexts are allocated for this scope.
-  bool HasContext();
-
-  // Return if this is a function scope with "use asm".
-  inline bool IsAsmModule();
-
-  // Return if this is a nested function within an asm module scope.
-  inline bool IsAsmFunction();
-
-  inline bool HasSimpleParameters();
-
-  // Return the function_name if present.
-  String* FunctionName();
-
-  ModuleInfo* ModuleDescriptorInfo();
-
-  // Return the name of the given parameter.
-  String* ParameterName(int var);
-
-  // Return the name of the given local.
-  String* LocalName(int var);
-
-  // Return the name of the given stack local.
-  String* StackLocalName(int var);
-
-  // Return the name of the given stack local.
-  int StackLocalIndex(int var);
-
-  // Return the name of the given context local.
-  String* ContextLocalName(int var);
-
-  // Return the mode of the given context local.
-  VariableMode ContextLocalMode(int var);
-
-  // Return the initialization flag of the given context local.
-  InitializationFlag ContextLocalInitFlag(int var);
-
-  // Return the initialization flag of the given context local.
-  MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var);
-
-  // Return true if this local was introduced by the compiler, and should not be
-  // exposed to the user in a debugger.
-  static bool VariableIsSynthetic(String* name);
-
-  // Lookup support for serialized scope info. Returns the
-  // the stack slot index for a given slot name if the slot is
-  // present; otherwise returns a value < 0. The name must be an internalized
-  // string.
-  int StackSlotIndex(String* name);
-
-  // Lookup support for serialized scope info. Returns the local context slot
-  // index for a given slot name if the slot is present; otherwise
-  // returns a value < 0. The name must be an internalized string.
-  // If the slot is present and mode != NULL, sets *mode to the corresponding
-  // mode for that variable.
-  static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
-                              VariableMode* mode, InitializationFlag* init_flag,
-                              MaybeAssignedFlag* maybe_assigned_flag);
-
-  // Lookup metadata of a MODULE-allocated variable.  Return 0 if there is no
-  // module variable with the given name (the index value of a MODULE variable
-  // is never 0).
-  int ModuleIndex(Handle<String> name, VariableMode* mode,
-                  InitializationFlag* init_flag,
-                  MaybeAssignedFlag* maybe_assigned_flag);
-
-  // Lookup the name of a certain context slot by its index.
-  String* ContextSlotName(int slot_index);
-
-  // Lookup support for serialized scope info. Returns the
-  // parameter index for a given parameter name if the parameter is present;
-  // otherwise returns a value < 0. The name must be an internalized string.
-  int ParameterIndex(String* name);
-
-  // Lookup support for serialized scope info. Returns the function context
-  // slot index if the function name is present and context-allocated (named
-  // function expressions, only), otherwise returns a value < 0. The name
-  // must be an internalized string.
-  int FunctionContextSlotIndex(String* name);
-
-  // Lookup support for serialized scope info.  Returns the receiver context
-  // slot index if scope has a "this" binding, and the binding is
-  // context-allocated.  Otherwise returns a value < 0.
-  int ReceiverContextSlotIndex();
-
-  FunctionKind function_kind();
-
-  // Returns true if this ScopeInfo is linked to a outer ScopeInfo.
-  bool HasOuterScopeInfo();
-
-  // Returns true if this ScopeInfo was created for a debug-evaluate scope.
-  bool IsDebugEvaluateScope();
-
-  // Can be used to mark a ScopeInfo that looks like a with-scope as actually
-  // being a debug-evaluate scope.
-  void SetIsDebugEvaluateScope();
-
-  // Return the outer ScopeInfo if present.
-  ScopeInfo* OuterScopeInfo();
-
-#ifdef DEBUG
-  bool Equals(ScopeInfo* other) const;
-#endif
-
-  static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope,
-                                  MaybeHandle<ScopeInfo> outer_scope);
-  static Handle<ScopeInfo> CreateForWithScope(
-      Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
-  static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
-
-  // Serializes empty scope info.
-  V8_EXPORT_PRIVATE static ScopeInfo* Empty(Isolate* isolate);
-
-#ifdef DEBUG
-  void Print();
-#endif
-
-  // The layout of the static part of a ScopeInfo is as follows. Each entry is
-  // numeric and occupies one array slot.
-// 1. A set of properties of the scope.
-// 2. The number of parameters. For non-function scopes this is 0.
-// 3. The number of non-parameter variables allocated on the stack.
-// 4. The number of non-parameter and parameter variables allocated in the
-//    context.
-#define FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(V) \
-  V(Flags)                                   \
-  V(ParameterCount)                          \
-  V(StackLocalCount)                         \
-  V(ContextLocalCount)
-
-#define FIELD_ACCESSORS(name)       \
-  inline void Set##name(int value); \
-  inline int name();
-  FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
-#undef FIELD_ACCESSORS
-
-  enum {
-#define DECL_INDEX(name) k##name,
-    FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(DECL_INDEX)
-#undef DECL_INDEX
-    kVariablePartIndex
-  };
-
- private:
-  // The layout of the variable part of a ScopeInfo is as follows:
-  // 1. ParameterNames:
-  //    This part stores the names of the parameters for function scopes. One
-  //    slot is used per parameter, so in total this part occupies
-  //    ParameterCount() slots in the array. For other scopes than function
-  //    scopes ParameterCount() is 0.
-  // 2. StackLocalFirstSlot:
-  //    Index of a first stack slot for stack local. Stack locals belonging to
-  //    this scope are located on a stack at slots starting from this index.
-  // 3. StackLocalNames:
-  //    Contains the names of local variables that are allocated on the stack,
-  //    in increasing order of the stack slot index. First local variable has a
-  //    stack slot index defined in StackLocalFirstSlot (point 2 above).
-  //    One slot is used per stack local, so in total this part occupies
-  //    StackLocalCount() slots in the array.
-  // 4. ContextLocalNames:
-  //    Contains the names of local variables and parameters that are allocated
-  //    in the context. They are stored in increasing order of the context slot
-  //    index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
-  //    context local, so in total this part occupies ContextLocalCount() slots
-  //    in the array.
-  // 5. ContextLocalInfos:
-  //    Contains the variable modes and initialization flags corresponding to
-  //    the context locals in ContextLocalNames. One slot is used per
-  //    context local, so in total this part occupies ContextLocalCount()
-  //    slots in the array.
-  // 6. ReceiverInfo:
-  //    If the scope binds a "this" value, one slot is reserved to hold the
-  //    context or stack slot index for the variable.
-  // 7. FunctionNameInfo:
-  //    If the scope belongs to a named function expression this part contains
-  //    information about the function variable. It always occupies two array
-  //    slots:  a. The name of the function variable.
-  //            b. The context or stack slot index for the variable.
-  // 8. OuterScopeInfoIndex:
-  //    The outer scope's ScopeInfo or the hole if there's none.
-  // 9. ModuleInfo, ModuleVariableCount, and ModuleVariables:
-  //    For a module scope, this part contains the ModuleInfo, the number of
-  //    MODULE-allocated variables, and the metadata of those variables.  For
-  //    non-module scopes it is empty.
-  int ParameterNamesIndex();
-  int StackLocalFirstSlotIndex();
-  int StackLocalNamesIndex();
-  int ContextLocalNamesIndex();
-  int ContextLocalInfosIndex();
-  int ReceiverInfoIndex();
-  int FunctionNameInfoIndex();
-  int OuterScopeInfoIndex();
-  int ModuleInfoIndex();
-  int ModuleVariableCountIndex();
-  int ModuleVariablesIndex();
-
-  int Lookup(Handle<String> name, int start, int end, VariableMode* mode,
-             VariableLocation* location, InitializationFlag* init_flag,
-             MaybeAssignedFlag* maybe_assigned_flag);
-
-  // Get metadata of i-th MODULE-allocated variable, where 0 <= i <
-  // ModuleVariableCount.  The metadata is returned via out-arguments, which may
-  // be nullptr if the corresponding information is not requested
-  void ModuleVariable(int i, String** name, int* index,
-                      VariableMode* mode = nullptr,
-                      InitializationFlag* init_flag = nullptr,
-                      MaybeAssignedFlag* maybe_assigned_flag = nullptr);
-
-  // Used for the function name variable for named function expressions, and for
-  // the receiver.
-  enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
-
-  // Properties of scopes.
-  class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
-  class CallsEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {};
-  STATIC_ASSERT(LANGUAGE_END == 2);
-  class LanguageModeField
-      : public BitField<LanguageMode, CallsEvalField::kNext, 1> {};
-  class DeclarationScopeField
-      : public BitField<bool, LanguageModeField::kNext, 1> {};
-  class ReceiverVariableField
-      : public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
-                        2> {};
-  class HasNewTargetField
-      : public BitField<bool, ReceiverVariableField::kNext, 1> {};
-  class FunctionVariableField
-      : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
-  class AsmModuleField
-      : public BitField<bool, FunctionVariableField::kNext, 1> {};
-  class AsmFunctionField : public BitField<bool, AsmModuleField::kNext, 1> {};
-  class HasSimpleParametersField
-      : public BitField<bool, AsmFunctionField::kNext, 1> {};
-  class FunctionKindField
-      : public BitField<FunctionKind, HasSimpleParametersField::kNext, 10> {};
-  class HasOuterScopeInfoField
-      : public BitField<bool, FunctionKindField::kNext, 1> {};
-  class IsDebugEvaluateScopeField
-      : public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
-
-  // Properties of variables.
-  class VariableModeField : public BitField<VariableMode, 0, 3> {};
-  class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
-  class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
-
-  friend class ScopeIterator;
-};
-
 // The cache for maps used by normalized (dictionary mode) objects.
 // Such maps do not have property descriptors, so a typical program
 // needs very limited number of distinct normalized maps.
@@ -4815,7 +4345,9 @@
   inline void SetReturnOffset(int index, int value);
   inline void SetReturnHandler(int index, int offset);
 
-  // Lookup handler in a table based on ranges.
+  // Lookup handler in a table based on ranges. The {pc_offset} is an offset to
+  // the start of the potentially throwing instruction (using return addresses
+  // for this value would be invalid).
   int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
 
   // Lookup handler in a table based on return addresses.
@@ -4939,6 +4471,17 @@
 // BytecodeArray represents a sequence of interpreter bytecodes.
 class BytecodeArray : public FixedArrayBase {
  public:
+#define DECLARE_BYTECODE_AGE_ENUM(X) k##X##BytecodeAge,
+  enum Age {
+    kNoAgeBytecodeAge = 0,
+    CODE_AGE_LIST(DECLARE_BYTECODE_AGE_ENUM) kAfterLastBytecodeAge,
+    kFirstBytecodeAge = kNoAgeBytecodeAge,
+    kLastBytecodeAge = kAfterLastBytecodeAge - 1,
+    kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1,
+    kIsOldBytecodeAge = kSexagenarianBytecodeAge
+  };
+#undef DECLARE_BYTECODE_AGE_ENUM
+
   static int SizeFor(int length) {
     return OBJECT_POINTER_ALIGN(kHeaderSize + length);
   }
@@ -4969,6 +4512,10 @@
   inline int osr_loop_nesting_level() const;
   inline void set_osr_loop_nesting_level(int depth);
 
+  // Accessors for bytecode's code age.
+  inline Age bytecode_age() const;
+  inline void set_bytecode_age(Age age);
+
   // Accessors for the constant pool.
   DECL_ACCESSORS(constant_pool, FixedArray)
 
@@ -5000,8 +4547,9 @@
 
   void CopyBytecodesTo(BytecodeArray* to);
 
-  int LookupRangeInHandlerTable(int code_offset, int* data,
-                                HandlerTable::CatchPrediction* prediction);
+  // Bytecode aging
+  bool IsOld() const;
+  void MakeOlder();
 
   // Layout description.
   static const int kConstantPoolOffset = FixedArrayBase::kHeaderSize;
@@ -5012,7 +4560,8 @@
   static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
   static const int kInterruptBudgetOffset = kParameterSizeOffset + kIntSize;
   static const int kOSRNestingLevelOffset = kInterruptBudgetOffset + kIntSize;
-  static const int kHeaderSize = kOSRNestingLevelOffset + kCharSize;
+  static const int kBytecodeAgeOffset = kOSRNestingLevelOffset + kCharSize;
+  static const int kHeaderSize = kBytecodeAgeOffset + kCharSize;
 
   // Maximal memory consumption for a single BytecodeArray.
   static const int kMaxSize = 512 * MB;
@@ -5288,42 +4837,6 @@
 #endif
 };
 
-
-// A literals array contains the literals for a JSFunction. It also holds
-// the type feedback vector.
-class LiteralsArray : public FixedArray {
- public:
-  static const int kVectorIndex = 0;
-  static const int kFirstLiteralIndex = 1;
-  V8_EXPORT_PRIVATE static const int kFeedbackVectorOffset;
-  static const int kOffsetToFirstLiteral;
-
-  static int OffsetOfLiteralAt(int index) {
-    return OffsetOfElementAt(index + kFirstLiteralIndex);
-  }
-
-  inline TypeFeedbackVector* feedback_vector() const;
-  inline void set_feedback_vector(TypeFeedbackVector* vector);
-  inline Object* literal(int literal_index) const;
-  inline void set_literal(int literal_index, Object* literal);
-  inline void set_literal_undefined(int literal_index);
-  inline int literals_count() const;
-
-  static Handle<LiteralsArray> New(Isolate* isolate,
-                                   Handle<TypeFeedbackVector> vector,
-                                   int number_of_literals,
-                                   PretenureFlag pretenure = TENURED);
-
-  DECLARE_CAST(LiteralsArray)
-
- private:
-  inline Object* get(int index) const;
-  inline void set(int index, Object* value);
-  inline void set(int index, Smi* value);
-  inline void set(int index, Object* value, WriteBarrierMode mode);
-};
-
-
 class TemplateList : public FixedArray {
  public:
   static Handle<TemplateList> New(Isolate* isolate, int size);
@@ -5356,13 +4869,13 @@
   V(REGEXP)                 \
   V(WASM_FUNCTION)          \
   V(WASM_TO_JS_FUNCTION)    \
-  V(JS_TO_WASM_FUNCTION)
+  V(JS_TO_WASM_FUNCTION)    \
+  V(WASM_INTERPRETER_ENTRY)
 
 #define IC_KIND_LIST(V) \
   V(LOAD_IC)            \
   V(LOAD_GLOBAL_IC)     \
   V(KEYED_LOAD_IC)      \
-  V(CALL_IC)            \
   V(STORE_IC)           \
   V(KEYED_STORE_IC)     \
   V(BINARY_OP_IC)       \
@@ -5465,7 +4978,7 @@
   inline bool is_inline_cache_stub();
   inline bool is_debug_stub();
   inline bool is_handler();
-  inline bool is_call_stub();
+  inline bool is_stub();
   inline bool is_binary_op_stub();
   inline bool is_compare_ic_stub();
   inline bool is_to_boolean_ic_stub();
@@ -5562,6 +5075,16 @@
   inline bool marked_for_deoptimization();
   inline void set_marked_for_deoptimization(bool flag);
 
+  // [is_promise_rejection]: For kind BUILTIN tells whether the exception
+  // thrown by the code will lead to promise rejection.
+  inline bool is_promise_rejection();
+  inline void set_is_promise_rejection(bool flag);
+
+  // [is_exception_caught]: For kind BUILTIN tells whether the exception
+  // thrown by the code will be caught internally.
+  inline bool is_exception_caught();
+  inline void set_is_exception_caught(bool flag);
+
   // [constant_pool]: The constant pool for this function.
   inline Address constant_pool();
 
@@ -5577,7 +5100,6 @@
   // Find the first map in an IC stub.
   Map* FindFirstMap();
 
-  class FindAndReplacePattern;
   // For each (map-to-find, object-to-replace) pair in the pattern, this
   // function replaces the corresponding placeholder in the code with the
   // object-to-replace. The function assumes that pairs in the pattern come in
@@ -5711,9 +5233,6 @@
   BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
   uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
 
-  int LookupRangeInHandlerTable(int code_offset, int* data,
-                                HandlerTable::CatchPrediction* prediction);
-
 #define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
   enum Age {
     kToBeExecutedOnceCodeAge = -3,
@@ -5739,12 +5258,12 @@
   void MakeYoung(Isolate* isolate);
   void PreAge(Isolate* isolate);
   void MarkToBeExecutedOnce(Isolate* isolate);
-  void MakeOlder(MarkingParity);
+  void MakeOlder();
   static bool IsYoungSequence(Isolate* isolate, byte* sequence);
   bool IsOld();
   Age GetAge();
   static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) {
-    return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY);
+    return GetCodeAgeStub(isolate, kNotExecutedCodeAge);
   }
 
   void PrintDeoptLocation(FILE* out, Address pc);
@@ -5797,6 +5316,9 @@
       kConstantPoolOffset + kConstantPoolSize;
   static const int kHeaderPaddingStart = kBuiltinIndexOffset + kIntSize;
 
+  enum TrapFields { kTrapCodeOffset, kTrapLandingOffset, kTrapDataSize };
+
+
   // Add padding to align the instruction start following right after
   // the Code object header.
   static const int kHeaderSize =
@@ -5836,9 +5358,11 @@
   static const int kCanHaveWeakObjects = kIsTurbofannedBit + 1;
   // Could be moved to overlap previous bits when we need more space.
   static const int kIsConstructStub = kCanHaveWeakObjects + 1;
+  static const int kIsPromiseRejection = kIsConstructStub + 1;
+  static const int kIsExceptionCaught = kIsPromiseRejection + 1;
 
   STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
-  STATIC_ASSERT(kIsConstructStub + 1 <= 32);
+  STATIC_ASSERT(kIsExceptionCaught + 1 <= 32);
 
   class StackSlotsField: public BitField<int,
       kStackSlotsFirstBit, kStackSlotsBitCount> {};  // NOLINT
@@ -5850,6 +5374,10 @@
       : public BitField<bool, kCanHaveWeakObjects, 1> {};  // NOLINT
   class IsConstructStubField : public BitField<bool, kIsConstructStub, 1> {
   };  // NOLINT
+  class IsPromiseRejectionField
+      : public BitField<bool, kIsPromiseRejection, 1> {};  // NOLINT
+  class IsExceptionCaughtField : public BitField<bool, kIsExceptionCaught, 1> {
+  };  // NOLINT
 
   // KindSpecificFlags2 layout (ALL)
   static const int kIsCrankshaftedBit = 0;
@@ -5886,16 +5414,12 @@
 
   // Code aging
   byte* FindCodeAgeSequence();
-  static void GetCodeAgeAndParity(Code* code, Age* age,
-                                  MarkingParity* parity);
-  static void GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                                  MarkingParity* parity);
-  static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity);
+  static Age GetCodeAge(Isolate* isolate, byte* sequence);
+  static Age GetAgeOfCodeAgeStub(Code* code);
+  static Code* GetCodeAgeStub(Isolate* isolate, Age age);
 
   // Code aging -- platform-specific
-  static void PatchPlatformCodeAge(Isolate* isolate,
-                                   byte* sequence, Age age,
-                                   MarkingParity parity);
+  static void PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Age age);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
 };
@@ -5931,10 +5455,6 @@
   // Set the source position table.
   inline void set_source_position_table(ByteArray* source_position_table);
 
-  // Return the exception handler table.
-  inline int LookupRangeInHandlerTable(
-      int code_offset, int* data, HandlerTable::CatchPrediction* prediction);
-
   // Returns the size of instructions and the metadata.
   inline int SizeIncludingMetadata();
 
@@ -6328,30 +5848,43 @@
 
   int NumberOfFields();
 
+  // Returns true if transition to the given map requires special
+  // synchronization with the concurrent marker.
+  bool TransitionRequiresSynchronizationWithGC(Map* target);
+  // Returns true if transition to the given map removes a tagged in-object
+  // field.
+  bool TransitionRemovesTaggedField(Map* target);
+  // Returns true if transition to the given map replaces a tagged in-object
+  // field with an untagged in-object field.
+  bool TransitionChangesTaggedFieldToUntaggedField(Map* target);
+
   // TODO(ishell): candidate with JSObject::MigrateToMap().
   bool InstancesNeedRewriting(Map* target);
   bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
                               int target_inobject, int target_unused,
                               int* old_number_of_fields);
   // TODO(ishell): moveit!
-  static Handle<Map> GeneralizeAllFieldRepresentations(Handle<Map> map);
+  static Handle<Map> GeneralizeAllFields(Handle<Map> map);
   MUST_USE_RESULT static Handle<FieldType> GeneralizeFieldType(
       Representation rep1, Handle<FieldType> type1, Representation rep2,
       Handle<FieldType> type2, Isolate* isolate);
-  static void GeneralizeFieldType(Handle<Map> map, int modify_index,
-                                  Representation new_representation,
-                                  Handle<FieldType> new_field_type);
+  static void GeneralizeField(Handle<Map> map, int modify_index,
+                              PropertyConstness new_constness,
+                              Representation new_representation,
+                              Handle<FieldType> new_field_type);
 
-  static inline Handle<Map> ReconfigureProperty(
-      Handle<Map> map, int modify_index, PropertyKind new_kind,
-      PropertyAttributes new_attributes, Representation new_representation,
-      Handle<FieldType> new_field_type, StoreMode store_mode);
+  static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
+                                         PropertyKind new_kind,
+                                         PropertyAttributes new_attributes,
+                                         Representation new_representation,
+                                         Handle<FieldType> new_field_type);
 
-  static inline Handle<Map> ReconfigureElementsKind(
-      Handle<Map> map, ElementsKind new_elements_kind);
+  static Handle<Map> ReconfigureElementsKind(Handle<Map> map,
+                                             ElementsKind new_elements_kind);
 
   static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
                                             int descriptor_number,
+                                            PropertyConstness constness,
                                             Handle<Object> value);
 
   static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
@@ -6471,10 +6004,13 @@
                                           Descriptor* descriptor,
                                           TransitionFlag flag);
 
+  static Handle<Object> WrapFieldType(Handle<FieldType> type);
+  static FieldType* UnwrapFieldType(Object* wrapped_type);
+
   MUST_USE_RESULT static MaybeHandle<Map> CopyWithField(
       Handle<Map> map, Handle<Name> name, Handle<FieldType> type,
-      PropertyAttributes attributes, Representation representation,
-      TransitionFlag flag);
+      PropertyAttributes attributes, PropertyConstness constness,
+      Representation representation, TransitionFlag flag);
 
   MUST_USE_RESULT static MaybeHandle<Map> CopyWithConstant(
       Handle<Map> map,
@@ -6515,6 +6051,7 @@
                                               Handle<Name> name,
                                               Handle<Object> value,
                                               PropertyAttributes attributes,
+                                              PropertyConstness constness,
                                               StoreFromKeyed store_mode);
   static Handle<Map> TransitionToAccessorProperty(
       Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
@@ -6568,6 +6105,8 @@
 
   Code* LookupInCodeCache(Name* name, Code::Flags code);
 
+  static Handle<Map> GetObjectCreateMap(Handle<HeapObject> prototype);
+
   // Computes a hash value for this map, to be used in HashTables and such.
   int Hash();
 
@@ -6592,6 +6131,8 @@
   inline bool IsJSTypedArrayMap();
   inline bool IsJSDataViewMap();
 
+  inline bool IsSpecialReceiverMap();
+
   inline bool CanOmitMapChecks();
 
   static void AddDependentCode(Handle<Map> map,
@@ -6678,7 +6219,6 @@
   static const int kInstanceTypeAndBitFieldOffset =
       kInstanceAttributesOffset + 0;
   static const int kBitField2Offset = kInstanceAttributesOffset + 2;
-  static const int kUnusedPropertyFieldsByte = 3;
   static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3;
 
   STATIC_ASSERT(kInstanceTypeAndBitFieldOffset ==
@@ -6735,6 +6275,11 @@
       Handle<Map> split_map, Handle<DescriptorArray> descriptors,
       Handle<LayoutDescriptor> full_layout_descriptor);
 
+  // Fires when the layout of an object with a leaf map changes.
+  // This includes adding transitions to the leaf map or changing
+  // the descriptor array.
+  inline void NotifyLeafMapLayoutChange();
+
  private:
   // Returns the map that this (root) map transitions to if its elements_kind
   // is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
@@ -6783,23 +6328,10 @@
   static Handle<Map> CopyNormalized(Handle<Map> map,
                                     PropertyNormalizationMode mode);
 
-  static Handle<Map> Reconfigure(Handle<Map> map,
-                                 ElementsKind new_elements_kind,
-                                 int modify_index, PropertyKind new_kind,
-                                 PropertyAttributes new_attributes,
-                                 Representation new_representation,
-                                 Handle<FieldType> new_field_type,
-                                 StoreMode store_mode);
-
-  static Handle<Map> CopyGeneralizeAllRepresentations(
+  // TODO(ishell): Move to MapUpdater.
+  static Handle<Map> CopyGeneralizeAllFields(
       Handle<Map> map, ElementsKind elements_kind, int modify_index,
-      StoreMode store_mode, PropertyKind kind, PropertyAttributes attributes,
-      const char* reason);
-
-  // Fires when the layout of an object with a leaf map changes.
-  // This includes adding transitions to the leaf map or changing
-  // the descriptor array.
-  inline void NotifyLeafMapLayoutChange();
+      PropertyKind kind, PropertyAttributes attributes, const char* reason);
 
   void DeprecateTransitionTree();
 
@@ -6807,17 +6339,18 @@
                           LayoutDescriptor* new_layout_descriptor);
 
 
-  Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
-
   // Update field type of the given descriptor to new representation and new
   // type. The type must be prepared for storing in descriptor array:
   // it must be either a simple type or a map wrapped in a weak cell.
   void UpdateFieldType(int descriptor_number, Handle<Name> name,
+                       PropertyConstness new_constness,
                        Representation new_representation,
                        Handle<Object> new_wrapped_type);
 
+  // TODO(ishell): Move to MapUpdater.
   void PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
                             PropertyAttributes attributes);
+  // TODO(ishell): Move to MapUpdater.
   void PrintGeneralization(FILE* file, const char* reason, int modify_index,
                            int split, int descriptors, bool constant_to_field,
                            Representation old_representation,
@@ -6826,10 +6359,11 @@
                            MaybeHandle<Object> old_value,
                            MaybeHandle<FieldType> new_field_type,
                            MaybeHandle<Object> new_value);
-
   static const int kFastPropertiesSoftLimit = 12;
   static const int kMaxFastProperties = 128;
 
+  friend class MapUpdater;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
 };
 
@@ -6850,17 +6384,14 @@
   DECL_ACCESSORS(then, JSReceiver)
   DECL_ACCESSORS(resolve, JSFunction)
   DECL_ACCESSORS(reject, JSFunction)
-  DECL_ACCESSORS(debug_id, Object)
-  DECL_ACCESSORS(debug_name, Object)
+
   DECL_ACCESSORS(context, Context)
 
   static const int kThenableOffset = Struct::kHeaderSize;
   static const int kThenOffset = kThenableOffset + kPointerSize;
   static const int kResolveOffset = kThenOffset + kPointerSize;
   static const int kRejectOffset = kResolveOffset + kPointerSize;
-  static const int kDebugIdOffset = kRejectOffset + kPointerSize;
-  static const int kDebugNameOffset = kDebugIdOffset + kPointerSize;
-  static const int kContextOffset = kDebugNameOffset + kPointerSize;
+  static const int kContextOffset = kRejectOffset + kPointerSize;
   static const int kSize = kContextOffset + kPointerSize;
 
   DECLARE_CAST(PromiseResolveThenableJobInfo)
@@ -6871,22 +6402,32 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobInfo);
 };
 
+class JSPromise;
+
 // Struct to hold state required for PromiseReactionJob.
 class PromiseReactionJobInfo : public Struct {
  public:
   DECL_ACCESSORS(value, Object)
   DECL_ACCESSORS(tasks, Object)
-  DECL_ACCESSORS(deferred, Object)
-  DECL_ACCESSORS(debug_id, Object)
-  DECL_ACCESSORS(debug_name, Object)
+
+  // Check comment in JSPromise for information on what state these
+  // deferred fields could be in.
+  DECL_ACCESSORS(deferred_promise, Object)
+  DECL_ACCESSORS(deferred_on_resolve, Object)
+  DECL_ACCESSORS(deferred_on_reject, Object)
+
+  DECL_INT_ACCESSORS(debug_id)
+
   DECL_ACCESSORS(context, Context)
 
   static const int kValueOffset = Struct::kHeaderSize;
   static const int kTasksOffset = kValueOffset + kPointerSize;
-  static const int kDeferredOffset = kTasksOffset + kPointerSize;
-  static const int kDebugIdOffset = kDeferredOffset + kPointerSize;
-  static const int kDebugNameOffset = kDebugIdOffset + kPointerSize;
-  static const int kContextOffset = kDebugNameOffset + kPointerSize;
+  static const int kDeferredPromiseOffset = kTasksOffset + kPointerSize;
+  static const int kDeferredOnResolveOffset =
+      kDeferredPromiseOffset + kPointerSize;
+  static const int kDeferredOnRejectOffset =
+      kDeferredOnResolveOffset + kPointerSize;
+  static const int kContextOffset = kDeferredOnRejectOffset + kPointerSize;
   static const int kSize = kContextOffset + kPointerSize;
 
   DECLARE_CAST(PromiseReactionJobInfo)
@@ -6897,26 +6438,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseReactionJobInfo);
 };
 
-// A simple one-element struct, useful where smis need to be boxed.
-class Box : public Struct {
- public:
-  // [value]: the boxed contents.
-  DECL_ACCESSORS(value, Object)
-
-  DECLARE_CAST(Box)
-
-  // Dispatched behavior.
-  DECLARE_PRINTER(Box)
-  DECLARE_VERIFIER(Box)
-
-  static const int kValueOffset = HeapObject::kHeaderSize;
-  static const int kSize = kValueOffset + kPointerSize;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Box);
-};
-
-
 // Container for metadata stored on each prototype map.
 class PrototypeInfo : public Struct {
  public:
@@ -6975,10 +6496,27 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
 };
 
-class Tuple3 : public Struct {
+class Tuple2 : public Struct {
  public:
   DECL_ACCESSORS(value1, Object)
   DECL_ACCESSORS(value2, Object)
+
+  DECLARE_CAST(Tuple2)
+
+  // Dispatched behavior.
+  DECLARE_PRINTER(Tuple2)
+  DECLARE_VERIFIER(Tuple2)
+
+  static const int kValue1Offset = HeapObject::kHeaderSize;
+  static const int kValue2Offset = kValue1Offset + kPointerSize;
+  static const int kSize = kValue2Offset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple2);
+};
+
+class Tuple3 : public Tuple2 {
+ public:
   DECL_ACCESSORS(value3, Object)
 
   DECLARE_CAST(Tuple3)
@@ -6987,9 +6525,7 @@
   DECLARE_PRINTER(Tuple3)
   DECLARE_VERIFIER(Tuple3)
 
-  static const int kValue1Offset = HeapObject::kHeaderSize;
-  static const int kValue2Offset = kValue1Offset + kPointerSize;
-  static const int kValue3Offset = kValue2Offset + kPointerSize;
+  static const int kValue3Offset = Tuple2::kSize;
   static const int kSize = kValue3Offset + kPointerSize;
 
  private:
@@ -7022,7 +6558,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(ContextExtension);
 };
 
-
 // Script describes a script which has been added to the VM.
 class Script: public Struct {
  public:
@@ -7031,7 +6566,8 @@
     TYPE_NATIVE = 0,
     TYPE_EXTENSION = 1,
     TYPE_NORMAL = 2,
-    TYPE_WASM = 3
+    TYPE_WASM = 3,
+    TYPE_INSPECTOR = 4
   };
 
   // Script compilation types.
@@ -7085,7 +6621,7 @@
 
   // [shared_function_infos]: weak fixed array containing all shared
   // function infos created from this script.
-  DECL_ACCESSORS(shared_function_infos, Object)
+  DECL_ACCESSORS(shared_function_infos, FixedArray)
 
   // [flags]: Holds an exciting bitfield.
   DECL_INT_ACCESSORS(flags)
@@ -7110,11 +6646,6 @@
   inline CompilationState compilation_state();
   inline void set_compilation_state(CompilationState state);
 
-  // [hide_source]: determines whether the script source can be exposed as
-  // function source. Encoded in the 'flags' field.
-  inline bool hide_source();
-  inline void set_hide_source(bool value);
-
   // [origin_options]: optional attributes set by the embedder via ScriptOrigin,
   // and used by the embedder to make decisions about the script. V8 just passes
   // this through. Encoded in the 'flags' field.
@@ -7127,7 +6658,7 @@
   // resource is accessible. Otherwise, always return true.
   inline bool HasValidSource();
 
-  static Handle<Object> GetNameOrSourceURL(Handle<Script> script);
+  Object* GetNameOrSourceURL();
 
   // Set eval origin for stack trace formatting.
   static void SetEvalOrigin(Handle<Script> script,
@@ -7175,7 +6706,8 @@
 
   // Look through the list of existing shared function infos to find one
   // that matches the function literal.  Return empty handle if not found.
-  MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(FunctionLiteral* fun);
+  MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(
+      Isolate* isolate, const FunctionLiteral* fun);
 
   // Iterate over all script objects on the heap.
   class Iterator {
@@ -7215,9 +6747,8 @@
   // Bit positions in the flags field.
   static const int kCompilationTypeBit = 0;
   static const int kCompilationStateBit = 1;
-  static const int kHideSourceBit = 2;
-  static const int kOriginOptionsShift = 3;
-  static const int kOriginOptionsSize = 3;
+  static const int kOriginOptionsShift = 2;
+  static const int kOriginOptionsSize = 4;
   static const int kOriginOptionsMask = ((1 << kOriginOptionsSize) - 1)
                                         << kOriginOptionsShift;
 
@@ -7235,11 +6766,27 @@
 // Installation of ids for the selected builtin functions is handled
 // by the bootstrapper.
 #define FUNCTIONS_WITH_ID_LIST(V)                           \
+  V(Array, isArray, ArrayIsArray)                           \
+  V(Array.prototype, concat, ArrayConcat)                   \
+  V(Array.prototype, every, ArrayEvery)                     \
+  V(Array.prototype, fill, ArrayFill)                       \
+  V(Array.prototype, filter, ArrayFilter)                   \
+  V(Array.prototype, findIndex, ArrayFindIndex)             \
+  V(Array.prototype, forEach, ArrayForEach)                 \
+  V(Array.prototype, includes, ArrayIncludes)               \
   V(Array.prototype, indexOf, ArrayIndexOf)                 \
+  V(Array.prototype, join, ArrayJoin)                       \
   V(Array.prototype, lastIndexOf, ArrayLastIndexOf)         \
-  V(Array.prototype, push, ArrayPush)                       \
+  V(Array.prototype, map, ArrayMap)                         \
   V(Array.prototype, pop, ArrayPop)                         \
+  V(Array.prototype, push, ArrayPush)                       \
+  V(Array.prototype, reverse, ArrayReverse)                 \
   V(Array.prototype, shift, ArrayShift)                     \
+  V(Array.prototype, slice, ArraySlice)                     \
+  V(Array.prototype, some, ArraySome)                       \
+  V(Array.prototype, splice, ArraySplice)                   \
+  V(Array.prototype, unshift, ArrayUnshift)                 \
+  V(Date, now, DateNow)                                     \
   V(Date.prototype, getDate, DateGetDate)                   \
   V(Date.prototype, getDay, DateGetDay)                     \
   V(Date.prototype, getFullYear, DateGetFullYear)           \
@@ -7251,14 +6798,37 @@
   V(Date.prototype, getTime, DateGetTime)                   \
   V(Function.prototype, apply, FunctionApply)               \
   V(Function.prototype, call, FunctionCall)                 \
+  V(Object, assign, ObjectAssign)                           \
+  V(Object, create, ObjectCreate)                           \
   V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
+  V(Object.prototype, toString, ObjectToString)             \
+  V(RegExp.prototype, compile, RegExpCompile)               \
+  V(RegExp.prototype, exec, RegExpExec)                     \
+  V(RegExp.prototype, test, RegExpTest)                     \
+  V(RegExp.prototype, toString, RegExpToString)             \
   V(String.prototype, charCodeAt, StringCharCodeAt)         \
   V(String.prototype, charAt, StringCharAt)                 \
+  V(String.prototype, codePointAt, StringCodePointAt)       \
   V(String.prototype, concat, StringConcat)                 \
+  V(String.prototype, endsWith, StringEndsWith)             \
+  V(String.prototype, includes, StringIncludes)             \
+  V(String.prototype, indexOf, StringIndexOf)               \
+  V(String.prototype, lastIndexOf, StringLastIndexOf)       \
+  V(String.prototype, repeat, StringRepeat)                 \
+  V(String.prototype, slice, StringSlice)                   \
+  V(String.prototype, startsWith, StringStartsWith)         \
   V(String.prototype, substr, StringSubstr)                 \
+  V(String.prototype, substring, StringSubstring)           \
   V(String.prototype, toLowerCase, StringToLowerCase)       \
+  V(String.prototype, toString, StringToString)             \
   V(String.prototype, toUpperCase, StringToUpperCase)       \
+  V(String.prototype, trim, StringTrim)                     \
+  V(String.prototype, trimLeft, StringTrimLeft)             \
+  V(String.prototype, trimRight, StringTrimRight)           \
+  V(String.prototype, valueOf, StringValueOf)               \
   V(String, fromCharCode, StringFromCharCode)               \
+  V(String, fromCodePoint, StringFromCodePoint)             \
+  V(String, raw, StringRaw)                                 \
   V(Math, random, MathRandom)                               \
   V(Math, floor, MathFloor)                                 \
   V(Math, round, MathRound)                                 \
@@ -7299,7 +6869,29 @@
   V(Number, isSafeInteger, NumberIsSafeInteger)             \
   V(Number, parseFloat, NumberParseFloat)                   \
   V(Number, parseInt, NumberParseInt)                       \
-  V(Number.prototype, toString, NumberToString)
+  V(Number.prototype, toString, NumberToString)             \
+  V(Map.prototype, clear, MapClear)                         \
+  V(Map.prototype, delete, MapDelete)                       \
+  V(Map.prototype, entries, MapEntries)                     \
+  V(Map.prototype, forEach, MapForEach)                     \
+  V(Map.prototype, has, MapHas)                             \
+  V(Map.prototype, keys, MapKeys)                           \
+  V(Map.prototype, set, MapSet)                             \
+  V(Map.prototype, values, MapValues)                       \
+  V(Set.prototype, add, SetAdd)                             \
+  V(Set.prototype, clear, SetClear)                         \
+  V(Set.prototype, delete, SetDelete)                       \
+  V(Set.prototype, entries, SetEntries)                     \
+  V(Set.prototype, forEach, SetForEach)                     \
+  V(Set.prototype, has, SetHas)                             \
+  V(Set.prototype, keys, SetKeys)                           \
+  V(Set.prototype, values, SetValues)                       \
+  V(WeakMap.prototype, delete, WeakMapDelete)               \
+  V(WeakMap.prototype, has, WeakMapHas)                     \
+  V(WeakMap.prototype, set, WeakMapSet)                     \
+  V(WeakSet.prototype, add, WeakSetAdd)                     \
+  V(WeakSet.prototype, delete, WeakSetDelete)               \
+  V(WeakSet.prototype, has, WeakSetHas)
 
 #define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
   V(Atomics, load, AtomicsLoad)          \
@@ -7344,15 +6936,13 @@
   kStringIteratorNext,
 };
 
-
 // Result of searching in an optimized code map of a SharedFunctionInfo. Note
-// that both {code} and {literals} can be NULL to pass search result status.
-struct CodeAndLiterals {
-  Code* code;            // Cached optimized code.
-  LiteralsArray* literals;  // Cached literals array.
+// that both {code} and {vector} can be NULL to pass search result status.
+struct CodeAndVector {
+  Code* code;                  // Cached optimized code.
+  FeedbackVector* vector;      // Cached feedback vector.
 };
 
-
 // SharedFunctionInfo describes the JSFunction information that can be
 // shared by multiple instances of the function.
 class SharedFunctionInfo: public HeapObject {
@@ -7383,11 +6973,7 @@
   DECL_ACCESSORS(optimized_code_map, FixedArray)
 
   // Returns entry from optimized code map for specified context and OSR entry.
-  // Note that {code == nullptr, literals == nullptr} indicates no matching
-  // entry has been found, whereas {code, literals == nullptr} indicates that
-  // code is context-independent.
-  CodeAndLiterals SearchOptimizedCodeMap(Context* native_context,
-                                         BailoutId osr_ast_id);
+  Code* SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id);
 
   // Clear optimized code map.
   void ClearOptimizedCodeMap();
@@ -7405,19 +6991,10 @@
   // the entry itself is left in the map in order to proceed sharing literals.
   void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
 
-  // Trims the optimized code map after entries have been removed.
-  void TrimOptimizedCodeMap(int shrink_by);
-
-  static Handle<LiteralsArray> FindOrCreateLiterals(
-      Handle<SharedFunctionInfo> shared, Handle<Context> native_context);
-
   // Add or update entry in the optimized code map for context-dependent code.
-  // If {code} is not given, then an existing entry's code won't be overwritten.
   static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
                                     Handle<Context> native_context,
-                                    MaybeHandle<Code> code,
-                                    Handle<LiteralsArray> literals,
-                                    BailoutId osr_ast_id);
+                                    Handle<Code> code, BailoutId osr_ast_id);
 
   // Set up the link between shared function info and the script. The shared
   // function info is added to the list on the script.
@@ -7428,9 +7005,7 @@
   static const int kEntriesStart = 0;
   static const int kContextOffset = 0;
   static const int kCachedCodeOffset = 1;
-  static const int kLiteralsOffset = 2;
-  static const int kOsrAstIdOffset = 3;
-  static const int kEntryLength = 4;
+  static const int kEntryLength = 2;
   static const int kInitialLength = kEntriesStart + kEntryLength;
 
   static const int kNotFound = -1;
@@ -7442,10 +7017,6 @@
   static const int kOffsetToPreviousCachedCode =
       FixedArray::kHeaderSize +
       kPointerSize * (kCachedCodeOffset - kEntryLength);
-  static const int kOffsetToPreviousLiterals =
-      FixedArray::kHeaderSize + kPointerSize * (kLiteralsOffset - kEntryLength);
-  static const int kOffsetToPreviousOsrAstId =
-      FixedArray::kHeaderSize + kPointerSize * (kOsrAstIdOffset - kEntryLength);
 
   // [scope_info]: Scope info.
   DECL_ACCESSORS(scope_info, ScopeInfo)
@@ -7486,7 +7057,13 @@
   // [feedback_metadata] - describes ast node feedback from full-codegen and
   // (increasingly) from crankshafted code where sufficient feedback isn't
   // available.
-  DECL_ACCESSORS(feedback_metadata, TypeFeedbackMetadata)
+  DECL_ACCESSORS(feedback_metadata, FeedbackMetadata)
+
+  // [function_literal_id] - uniquely identifies the FunctionLiteral this
+  // SharedFunctionInfo represents within its script, or -1 if this
+  // SharedFunctionInfo object doesn't correspond to a parsed FunctionLiteral.
+  inline int function_literal_id() const;
+  inline void set_function_literal_id(int value);
 
 #if TRACE_MAPS
   // [unique_id] - For --trace-maps purposes, an identifier that's persistent
@@ -7508,12 +7085,12 @@
   inline bool IsApiFunction();
   inline FunctionTemplateInfo* get_api_func_data();
   inline void set_api_func_data(FunctionTemplateInfo* data);
-  inline bool HasBytecodeArray();
-  inline BytecodeArray* bytecode_array();
+  inline bool HasBytecodeArray() const;
+  inline BytecodeArray* bytecode_array() const;
   inline void set_bytecode_array(BytecodeArray* bytecode);
   inline void ClearBytecodeArray();
-  inline bool HasAsmWasmData();
-  inline FixedArray* asm_wasm_data();
+  inline bool HasAsmWasmData() const;
+  inline FixedArray* asm_wasm_data() const;
   inline void set_asm_wasm_data(FixedArray* data);
   inline void ClearAsmWasmData();
 
@@ -7538,10 +7115,6 @@
   // [script]: Script from which the function originates.
   DECL_ACCESSORS(script, Object)
 
-  // [num_literals]: Number of literals used by this function.
-  inline int num_literals() const;
-  inline void set_num_literals(int value);
-
   // [start_position_and_type]: Field used to store both the source code
   // position, whether or not the function is a function expression,
   // and whether or not the function is a toplevel function. The two
@@ -7551,18 +7124,53 @@
   inline void set_start_position_and_type(int value);
 
   // The function is subject to debugging if a debug info is attached.
-  inline bool HasDebugInfo();
-  inline DebugInfo* GetDebugInfo();
+  inline bool HasDebugInfo() const;
+  inline DebugInfo* GetDebugInfo() const;
 
   // A function has debug code if the compiled code has debug break slots.
-  inline bool HasDebugCode();
+  inline bool HasDebugCode() const;
 
   // [debug info]: Debug information.
   DECL_ACCESSORS(debug_info, Object)
 
+  // Bit field containing various information collected for debugging.
+  // This field is either stored on the kDebugInfo slot or inside the
+  // debug info struct.
+  inline int debugger_hints() const;
+  inline void set_debugger_hints(int value);
+
+  // Indicates that the function was created by the Function function.
+  // Though it's anonymous, toString should treat it as if it had the name
+  // "anonymous".  We don't set the name itself so that the system does not
+  // see a binding for it.
+  DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
+
+  // Indicates that the function is either an anonymous expression
+  // or an arrow function (the name field can be set through the API,
+  // which does not change this flag).
+  DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
+
+  // Indicates that the the shared function info is deserialized from cache.
+  DECL_BOOLEAN_ACCESSORS(deserialized)
+
+  // Indicates that the function cannot cause side-effects.
+  DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
+
+  // Indicates that |has_no_side_effect| has been computed and set.
+  DECL_BOOLEAN_ACCESSORS(computed_has_no_side_effect)
+
+  // Indicates that the function should be skipped during stepping.
+  DECL_BOOLEAN_ACCESSORS(debug_is_blackboxed)
+
+  // Indicates that |debug_is_blackboxed| has been computed and set.
+  DECL_BOOLEAN_ACCESSORS(computed_debug_is_blackboxed)
+
   // The function's name if it is non-empty, otherwise the inferred name.
   String* DebugName();
 
+  // The function cannot cause any side effects.
+  bool HasNoSideEffect();
+
   // Used for flags such as --hydrogen-filter.
   bool PassesFilter(const char* raw_filter);
 
@@ -7635,41 +7243,17 @@
   // Indicate that this function should always be inlined in optimized code.
   DECL_BOOLEAN_ACCESSORS(force_inline)
 
-  // Indicates that the function was created by the Function function.
-  // Though it's anonymous, toString should treat it as if it had the name
-  // "anonymous".  We don't set the name itself so that the system does not
-  // see a binding for it.
-  DECL_BOOLEAN_ACCESSORS(name_should_print_as_anonymous)
-
-  // Indicates that the function is either an anonymous expression
-  // or an arrow function (the name field can be set through the API,
-  // which does not change this flag).
-  DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
-
-  // Is this a function or top-level/eval code.
-  DECL_BOOLEAN_ACCESSORS(is_function)
-
-  // Indicates that code for this function cannot be compiled with Crankshaft.
-  DECL_BOOLEAN_ACCESSORS(dont_crankshaft)
+  // Indicates that code for this function must be compiled through the
+  // Ignition / TurboFan pipeline, and is unsupported by
+  // FullCodegen / Crankshaft.
+  DECL_BOOLEAN_ACCESSORS(must_use_ignition_turbo)
 
   // Indicates that code for this function cannot be flushed.
   DECL_BOOLEAN_ACCESSORS(dont_flush)
 
-  // Indicates that this is a constructor for a base class with instance fields.
-  DECL_BOOLEAN_ACCESSORS(requires_class_field_init)
-  // Indicates that this is a synthesized function to set up class instance
-  // fields.
-  DECL_BOOLEAN_ACCESSORS(is_class_field_initializer)
-
   // Indicates that this function is an asm function.
   DECL_BOOLEAN_ACCESSORS(asm_function)
 
-  // Indicates that the the shared function info is deserialized from cache.
-  DECL_BOOLEAN_ACCESSORS(deserialized)
-
-  // Indicates that the the shared function info has never been compiled before.
-  DECL_BOOLEAN_ACCESSORS(never_compiled)
-
   // Whether this function was created from a FunctionDeclaration.
   DECL_BOOLEAN_ACCESSORS(is_declaration)
 
@@ -7703,6 +7287,7 @@
   // [source code]: Source code for the function.
   bool HasSourceCode() const;
   Handle<Object> GetSourceCode();
+  Handle<Object> GetSourceCodeHarmony();
 
   // Number of times the function was optimized.
   inline int opt_count();
@@ -7733,8 +7318,8 @@
   // Tells whether this function should be subject to debugging.
   inline bool IsSubjectToDebugging();
 
-  // Whether this function is defined in native code or extensions.
-  inline bool IsBuiltin();
+  // Whether this function is defined in user-provided JavaScript code.
+  inline bool IsUserJavaScript();
 
   // Check whether or not this function is inlineable.
   bool IsInlineable();
@@ -7757,19 +7342,35 @@
 
   void ResetForNewContext(int new_ic_age);
 
-  // Iterate over all shared function infos.
-  class Iterator {
+  // Iterate over all shared function infos in a given script.
+  class ScriptIterator {
    public:
-    explicit Iterator(Isolate* isolate);
+    explicit ScriptIterator(Handle<Script> script);
+    ScriptIterator(Isolate* isolate, Handle<FixedArray> shared_function_infos);
+    SharedFunctionInfo* Next();
+
+    // Reset the iterator to run on |script|.
+    void Reset(Handle<Script> script);
+
+   private:
+    Isolate* isolate_;
+    Handle<FixedArray> shared_function_infos_;
+    int index_;
+    DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
+  };
+
+  // Iterate over all shared function infos on the heap.
+  class GlobalIterator {
+   public:
+    explicit GlobalIterator(Isolate* isolate);
     SharedFunctionInfo* Next();
 
    private:
-    bool NextScript();
-
     Script::Iterator script_iterator_;
-    WeakFixedArray::Iterator sfi_iterator_;
+    WeakFixedArray::Iterator noscript_sfi_iterator_;
+    SharedFunctionInfo::ScriptIterator sfi_iterator_;
     DisallowHeapAllocation no_gc_;
-    DISALLOW_COPY_AND_ASSIGN(Iterator);
+    DISALLOW_COPY_AND_ASSIGN(GlobalIterator);
   };
 
   DECLARE_CAST(SharedFunctionInfo)
@@ -7794,13 +7395,15 @@
   static const int kFunctionIdentifierOffset = kDebugInfoOffset + kPointerSize;
   static const int kFeedbackMetadataOffset =
       kFunctionIdentifierOffset + kPointerSize;
+  static const int kFunctionLiteralIdOffset =
+      kFeedbackMetadataOffset + kPointerSize;
 #if TRACE_MAPS
-  static const int kUniqueIdOffset = kFeedbackMetadataOffset + kPointerSize;
+  static const int kUniqueIdOffset = kFunctionLiteralIdOffset + kPointerSize;
   static const int kLastPointerFieldOffset = kUniqueIdOffset;
 #else
   // Just to not break the postmortrem support with conditional offsets
-  static const int kUniqueIdOffset = kFeedbackMetadataOffset;
-  static const int kLastPointerFieldOffset = kFeedbackMetadataOffset;
+  static const int kUniqueIdOffset = kFunctionLiteralIdOffset;
+  static const int kLastPointerFieldOffset = kFunctionLiteralIdOffset;
 #endif
 
 #if V8_HOST_ARCH_32_BIT
@@ -7927,31 +7530,40 @@
     kAllowLazyCompilation,
     kMarkedForTierUp,
     kOptimizationDisabled,
-    kNeverCompiled,
+    kHasDuplicateParameters,
     kNative,
     kStrictModeFunction,
     kUsesArguments,
     kNeedsHomeObject,
     // byte 1
-    kHasDuplicateParameters,
     kForceInline,
     kIsAsmFunction,
-    kIsAnonymousExpression,
-    kNameShouldPrintAsAnonymous,
-    kIsFunction,
-    kDontCrankshaft,
+    kMustUseIgnitionTurbo,
     kDontFlush,
+    kIsDeclaration,
+    kIsAsmWasmBroken,
+
+    kUnused1,  // Unused fields.
+    kUnused2,
+
     // byte 2
     kFunctionKind,
     // rest of byte 2 and first two bits of byte 3 are used by FunctionKind
     // byte 3
-    kDeserialized = kFunctionKind + 10,
-    kIsDeclaration,
-    kIsAsmWasmBroken,
-    kRequiresClassFieldInit,
-    kIsClassFieldInitializer,
-    kCompilerHintsCount,  // Pseudo entry
+    kCompilerHintsCount = kFunctionKind + 10,  // Pseudo entry
   };
+
+  // Bit positions in debugger_hints.
+  enum DebuggerHints {
+    kIsAnonymousExpression,
+    kNameShouldPrintAsAnonymous,
+    kDeserialized,
+    kHasNoSideEffect,
+    kComputedHasNoSideEffect,
+    kDebugIsBlackboxed,
+    kComputedDebugIsBlackboxed,
+  };
+
   // kFunctionKind has to be byte-aligned
   STATIC_ASSERT((kFunctionKind % kBitsPerByte) == 0);
 
@@ -8031,11 +7643,10 @@
 #undef BYTE_OFFSET
 
  private:
-  // Returns entry from optimized code map for specified context and OSR entry.
+  // Returns entry from optimized code map for specified context.
   // The result is either kNotFound, or a start index of the context-dependent
   // entry.
-  int SearchOptimizedCodeMapEntry(Context* native_context,
-                                  BailoutId osr_ast_id);
+  int SearchOptimizedCodeMapEntry(Context* native_context);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
 };
@@ -8066,8 +7677,7 @@
 
   // [input_or_debug_pos]
   // For executing generators: the most recent input value.
-  // For suspended new-style generators: debug information (bytecode offset).
-  // For suspended old-style generators: unused.
+  // For suspended generators: debug information (bytecode offset).
   // There is currently no need to remember the most recent input value for a
   // suspended generator.
   DECL_ACCESSORS(input_or_debug_pos, Object)
@@ -8091,8 +7701,8 @@
   // is suspended.
   int source_position() const;
 
-  // [operand_stack]: Saved operand stack.
-  DECL_ACCESSORS(operand_stack, FixedArray)
+  // [register_file]: Saved interpreter register file.
+  DECL_ACCESSORS(register_file, FixedArray)
 
   DECLARE_CAST(JSGeneratorObject)
 
@@ -8110,93 +7720,13 @@
   static const int kInputOrDebugPosOffset = kReceiverOffset + kPointerSize;
   static const int kResumeModeOffset = kInputOrDebugPosOffset + kPointerSize;
   static const int kContinuationOffset = kResumeModeOffset + kPointerSize;
-  static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
-  static const int kSize = kOperandStackOffset + kPointerSize;
+  static const int kRegisterFileOffset = kContinuationOffset + kPointerSize;
+  static const int kSize = kRegisterFileOffset + kPointerSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
 };
 
-class ModuleInfoEntry : public Struct {
- public:
-  DECLARE_CAST(ModuleInfoEntry)
-  DECLARE_PRINTER(ModuleInfoEntry)
-  DECLARE_VERIFIER(ModuleInfoEntry)
-
-  DECL_ACCESSORS(export_name, Object)
-  DECL_ACCESSORS(local_name, Object)
-  DECL_ACCESSORS(import_name, Object)
-  DECL_INT_ACCESSORS(module_request)
-  DECL_INT_ACCESSORS(cell_index)
-  DECL_INT_ACCESSORS(beg_pos)
-  DECL_INT_ACCESSORS(end_pos)
-
-  static Handle<ModuleInfoEntry> New(Isolate* isolate,
-                                     Handle<Object> export_name,
-                                     Handle<Object> local_name,
-                                     Handle<Object> import_name,
-                                     int module_request, int cell_index,
-                                     int beg_pos, int end_pos);
-
-  static const int kExportNameOffset = HeapObject::kHeaderSize;
-  static const int kLocalNameOffset = kExportNameOffset + kPointerSize;
-  static const int kImportNameOffset = kLocalNameOffset + kPointerSize;
-  static const int kModuleRequestOffset = kImportNameOffset + kPointerSize;
-  static const int kCellIndexOffset = kModuleRequestOffset + kPointerSize;
-  static const int kBegPosOffset = kCellIndexOffset + kPointerSize;
-  static const int kEndPosOffset = kBegPosOffset + kPointerSize;
-  static const int kSize = kEndPosOffset + kPointerSize;
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfoEntry);
-};
-
-// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
-class ModuleInfo : public FixedArray {
- public:
-  DECLARE_CAST(ModuleInfo)
-
-  static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
-                                ModuleDescriptor* descr);
-
-  inline FixedArray* module_requests() const;
-  inline FixedArray* special_exports() const;
-  inline FixedArray* regular_exports() const;
-  inline FixedArray* namespace_imports() const;
-  inline FixedArray* regular_imports() const;
-
-  // Accessors for [regular_exports].
-  int RegularExportCount() const;
-  String* RegularExportLocalName(int i) const;
-  int RegularExportCellIndex(int i) const;
-  FixedArray* RegularExportExportNames(int i) const;
-
-  static Handle<ModuleInfoEntry> LookupRegularImport(Handle<ModuleInfo> info,
-                                                     Handle<String> local_name);
-
-#ifdef DEBUG
-  inline bool Equals(ModuleInfo* other) const;
-#endif
-
- private:
-  friend class Factory;
-  friend class ModuleDescriptor;
-  enum {
-    kModuleRequestsIndex,
-    kSpecialExportsIndex,
-    kRegularExportsIndex,
-    kNamespaceImportsIndex,
-    kRegularImportsIndex,
-    kLength
-  };
-  enum {
-    kRegularExportLocalNameOffset,
-    kRegularExportCellIndexOffset,
-    kRegularExportExportNamesOffset,
-    kRegularExportLength
-  };
-  DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfo);
-};
 // When importing a module namespace (import * as foo from "bar"), a
 // JSModuleNamespace object (representing module "bar") is created and bound to
 // the declared variable (foo).  A module can have at most one namespace object.
@@ -8214,8 +7744,16 @@
   // schedule an exception and return Nothing.
   MUST_USE_RESULT MaybeHandle<Object> GetExport(Handle<String> name);
 
+  // In-object fields.
+  enum {
+    kToStringTagFieldIndex,
+    kInObjectFieldCount,
+  };
+
   static const int kModuleOffset = JSObject::kHeaderSize;
-  static const int kSize = kModuleOffset + kPointerSize;
+  static const int kHeaderSize = kModuleOffset + kPointerSize;
+
+  static const int kSize = kHeaderSize + kPointerSize * kInObjectFieldCount;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSModuleNamespace);
@@ -8294,8 +7832,6 @@
   static const int kSize = kRequestedModulesOffset + kPointerSize;
 
  private:
-  enum { kEvaluatedBit };
-
   static void CreateExport(Handle<Module> module, int cell_index,
                            Handle<FixedArray> names);
   static void CreateIndirectExport(Handle<Module> module, Handle<String> name,
@@ -8384,6 +7920,7 @@
 
   // [context]: The context for this function.
   inline Context* context();
+  inline bool has_context() const;
   inline void set_context(Object* context);
   inline JSObject* global_proxy();
   inline Context* native_context();
@@ -8437,26 +7974,25 @@
   // Completes inobject slack tracking on initial map if it is active.
   inline void CompleteInobjectSlackTrackingIfActive();
 
-  // [literals]: Fixed array holding the materialized literals.
-  //
-  // If the function contains object, regexp or array literals, the
-  // literals array prefix contains the object, regexp, and array
-  // function to be used when creating these literals.  This is
-  // necessary so that we do not dynamically lookup the object, regexp
-  // or array functions.  Performing a dynamic lookup, we might end up
-  // using the functions from a new context that we should not have
-  // access to. For API objects we store the boilerplate in the literal array.
-  DECL_ACCESSORS(literals, LiteralsArray)
+  // [feedback_vector_cell]: Fixed array holding the feedback vector.
+  DECL_ACCESSORS(feedback_vector_cell, Cell)
 
+  enum FeedbackVectorState {
+    TOP_LEVEL_SCRIPT_NEEDS_VECTOR,
+    NEEDS_VECTOR,
+    HAS_VECTOR
+  };
+
+  inline FeedbackVectorState GetFeedbackVectorState(Isolate* isolate) const;
+
+  // feedback_vector() can be used once the function is compiled.
+  inline FeedbackVector* feedback_vector() const;
+  inline bool has_feedback_vector() const;
   static void EnsureLiterals(Handle<JSFunction> function);
-  inline TypeFeedbackVector* feedback_vector();
 
-  // Unconditionally clear the type feedback vector (including vector ICs).
+  // Unconditionally clear the type feedback vector.
   void ClearTypeFeedbackInfo();
 
-  // Clear the type feedback vector with a more subtle policy at GC time.
-  void ClearTypeFeedbackInfoAtGCTime();
-
   // The initial map for an object created by this constructor.
   inline Map* initial_map();
   static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
@@ -8565,8 +8101,9 @@
   static const int kSharedFunctionInfoOffset =
       kPrototypeOrInitialMapOffset + kPointerSize;
   static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
-  static const int kLiteralsOffset = kContextOffset + kPointerSize;
-  static const int kNonWeakFieldsEndOffset = kLiteralsOffset + kPointerSize;
+  static const int kFeedbackVectorOffset = kContextOffset + kPointerSize;
+  static const int kNonWeakFieldsEndOffset =
+      kFeedbackVectorOffset + kPointerSize;
   static const int kCodeEntryOffset = kNonWeakFieldsEndOffset;
   static const int kNextFunctionLinkOffset = kCodeEntryOffset + kPointerSize;
   static const int kSize = kNextFunctionLinkOffset + kPointerSize;
@@ -8713,10 +8250,6 @@
 
   void SetValue(Object* value, bool is_value_nan);
 
-  // ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ]
-  static MUST_USE_RESULT MaybeHandle<Object> ToPrimitive(
-      Handle<JSReceiver> receiver, Handle<Object> hint);
-
   // Dispatched behavior.
   DECLARE_PRINTER(JSDate)
   DECLARE_VERIFIER(JSDate)
@@ -8813,6 +8346,9 @@
   // position, or the empty string if the position is invalid.
   Handle<String> GetSourceLine() const;
 
+  inline int error_level() const;
+  inline void set_error_level(int level);
+
   DECLARE_CAST(JSMessageObject)
 
   // Dispatched behavior.
@@ -8826,13 +8362,105 @@
   static const int kStackFramesOffset = kScriptOffset + kPointerSize;
   static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
   static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
-  static const int kSize = kEndPositionOffset + kPointerSize;
+  static const int kErrorLevelOffset = kEndPositionOffset + kPointerSize;
+  static const int kSize = kErrorLevelOffset + kPointerSize;
 
   typedef FixedBodyDescriptor<HeapObject::kMapOffset,
                               kStackFramesOffset + kPointerSize,
                               kSize> BodyDescriptor;
 };
 
+class JSPromise;
+
+// TODO(caitp): Make this a Struct once properties are no longer accessed from
+// JS
+class JSPromiseCapability : public JSObject {
+ public:
+  DECLARE_CAST(JSPromiseCapability)
+
+  DECLARE_VERIFIER(JSPromiseCapability)
+
+  DECL_ACCESSORS(promise, Object)
+  DECL_ACCESSORS(resolve, Object)
+  DECL_ACCESSORS(reject, Object)
+
+  static const int kPromiseOffset = JSObject::kHeaderSize;
+  static const int kResolveOffset = kPromiseOffset + kPointerSize;
+  static const int kRejectOffset = kResolveOffset + kPointerSize;
+  static const int kSize = kRejectOffset + kPointerSize;
+
+  enum InObjectPropertyIndex {
+    kPromiseIndex,
+    kResolveIndex,
+    kRejectIndex,
+    kInObjectPropertyCount  // Dummy.
+  };
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSPromiseCapability);
+};
+
+class JSPromise : public JSObject {
+ public:
+  DECL_INT_ACCESSORS(status)
+  DECL_ACCESSORS(result, Object)
+
+  // There are 3 possible states for these fields --
+  // 1) Undefined -- This is the zero state when there is no callback
+  // or deferred fields registered.
+  //
+  // 2) Object -- There is a single callback directly attached to the
+  // fulfill_reactions, reject_reactions and the deferred fields are
+  // directly attached to the slots. In this state, deferred_promise
+  // is a JSReceiver and deferred_on_{resolve, reject} are Callables.
+  //
+  // 3) FixedArray -- There is more than one callback and deferred
+  // fields attached to a FixedArray.
+  //
+  // The callback can be a Callable or a Symbol.
+  DECL_ACCESSORS(deferred_promise, Object)
+  DECL_ACCESSORS(deferred_on_resolve, Object)
+  DECL_ACCESSORS(deferred_on_reject, Object)
+  DECL_ACCESSORS(fulfill_reactions, Object)
+  DECL_ACCESSORS(reject_reactions, Object)
+
+  DECL_INT_ACCESSORS(flags)
+
+  // [has_handler]: Whether this promise has a reject handler or not.
+  DECL_BOOLEAN_ACCESSORS(has_handler)
+
+  // [handled_hint]: Whether this promise will be handled by a catch
+  // block in an async function.
+  DECL_BOOLEAN_ACCESSORS(handled_hint)
+
+  static const char* Status(int status);
+
+  DECLARE_CAST(JSPromise)
+
+  // Dispatched behavior.
+  DECLARE_PRINTER(JSPromise)
+  DECLARE_VERIFIER(JSPromise)
+
+  // Layout description.
+  static const int kStatusOffset = JSObject::kHeaderSize;
+  static const int kResultOffset = kStatusOffset + kPointerSize;
+  static const int kDeferredPromiseOffset = kResultOffset + kPointerSize;
+  static const int kDeferredOnResolveOffset =
+      kDeferredPromiseOffset + kPointerSize;
+  static const int kDeferredOnRejectOffset =
+      kDeferredOnResolveOffset + kPointerSize;
+  static const int kFulfillReactionsOffset =
+      kDeferredOnRejectOffset + kPointerSize;
+  static const int kRejectReactionsOffset =
+      kFulfillReactionsOffset + kPointerSize;
+  static const int kFlagsOffset = kRejectReactionsOffset + kPointerSize;
+  static const int kSize = kFlagsOffset + kPointerSize;
+
+  // Flags layout.
+  static const int kHasHandlerBit = 0;
+  static const int kHandledHintBit = 1;
+};
+
 // Regular expressions
 // The regular expression holds a single reference to a FixedArray in
 // the kDataOffset field.
@@ -9004,9 +8632,25 @@
   static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
 
   static const int kPrefixSize = 0;
-  static const int kEntrySize = 2;
+  static const int kEntrySize = 3;
 };
 
+class InfoVectorPair {
+ public:
+  InfoVectorPair() : shared_(nullptr), vector_cell_(nullptr) {}
+  InfoVectorPair(SharedFunctionInfo* shared, Cell* vector_cell)
+      : shared_(shared), vector_cell_(vector_cell) {}
+
+  SharedFunctionInfo* shared() const { return shared_; }
+  Cell* vector() const { return vector_cell_; }
+
+  bool has_shared() const { return shared_ != nullptr; }
+  bool has_vector() const { return vector_cell_ != nullptr; }
+
+ private:
+  SharedFunctionInfo* shared_;
+  Cell* vector_cell_;
+};
 
 // This cache is used in two different variants. For regexp caching, it simply
 // maps identifying info of the regexp to the cached regexp object. Scripts and
@@ -9026,18 +8670,25 @@
   // Find cached value for a string key, otherwise return null.
   Handle<Object> Lookup(
       Handle<String> src, Handle<Context> context, LanguageMode language_mode);
-  Handle<Object> LookupEval(
-      Handle<String> src, Handle<SharedFunctionInfo> shared,
-      LanguageMode language_mode, int scope_position);
+  InfoVectorPair LookupScript(Handle<String> src, Handle<Context> context,
+                              LanguageMode language_mode);
+  InfoVectorPair LookupEval(Handle<String> src,
+                            Handle<SharedFunctionInfo> shared,
+                            Handle<Context> native_context,
+                            LanguageMode language_mode, int position);
   Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
   static Handle<CompilationCacheTable> Put(
       Handle<CompilationCacheTable> cache, Handle<String> src,
       Handle<Context> context, LanguageMode language_mode,
       Handle<Object> value);
+  static Handle<CompilationCacheTable> PutScript(
+      Handle<CompilationCacheTable> cache, Handle<String> src,
+      Handle<Context> context, LanguageMode language_mode,
+      Handle<SharedFunctionInfo> value, Handle<Cell> literals);
   static Handle<CompilationCacheTable> PutEval(
       Handle<CompilationCacheTable> cache, Handle<String> src,
-      Handle<SharedFunctionInfo> context, Handle<SharedFunctionInfo> value,
-      int scope_position);
+      Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
+      Handle<Context> native_context, Handle<Cell> literals, int position);
   static Handle<CompilationCacheTable> PutRegExp(
       Handle<CompilationCacheTable> cache, Handle<String> src,
       JSRegExp::Flags flags, Handle<FixedArray> value);
@@ -9142,14 +8793,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo);
 };
 
-
-enum AllocationSiteMode {
-  DONT_TRACK_ALLOCATION_SITE,
-  TRACK_ALLOCATION_SITE,
-  LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
-};
-
-
 class AllocationSite: public Struct {
  public:
   static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
@@ -9427,6 +9070,7 @@
   inline bool IsExternal();
   inline bool IsCons();
   inline bool IsSliced();
+  inline bool IsThin();
   inline bool IsIndirect();
   inline bool IsExternalOneByte();
   inline bool IsExternalTwoByte();
@@ -9583,6 +9227,10 @@
   // a load.
   DECL_BOOLEAN_ACCESSORS(is_well_known_symbol)
 
+  // [is_public]: Whether this is a symbol created by Symbol.for. Calling
+  // Symbol.keyFor on such a symbol simply needs to return the attached name.
+  DECL_BOOLEAN_ACCESSORS(is_public)
+
   DECLARE_CAST(Symbol)
 
   // Dispatched behavior.
@@ -9594,14 +9242,16 @@
   static const int kFlagsOffset = kNameOffset + kPointerSize;
   static const int kSize = kFlagsOffset + kPointerSize;
 
+  // Flags layout.
+  static const int kPrivateBit = 0;
+  static const int kWellKnownSymbolBit = 1;
+  static const int kPublicBit = 2;
+
   typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
 
   void SymbolShortPrint(std::ostream& os);
 
  private:
-  static const int kPrivateBit = 0;
-  static const int kWellKnownSymbolBit = 1;
-
   const char* PrivateSymbolToName() const;
 
 #if TRACE_MAPS
@@ -9840,6 +9490,7 @@
 
   // Conversion.
   inline bool AsArrayIndex(uint32_t* index);
+  uint32_t inline ToValidIndex(Object* number);
 
   // Trimming.
   enum TrimMode { kTrim, kTrimLeft, kTrimRight };
@@ -10132,6 +9783,34 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(ConsString);
 };
 
+// The ThinString class describes string objects that are just references
+// to another string object. They are used for in-place internalization when
+// the original string cannot actually be internalized in-place: in these
+// cases, the original string is converted to a ThinString pointing at its
+// internalized version (which is allocated as a new object).
+// In terms of memory layout and most algorithms operating on strings,
+// ThinStrings can be thought of as "one-part cons strings".
+class ThinString : public String {
+ public:
+  // Actual string that this ThinString refers to.
+  inline String* actual() const;
+  inline void set_actual(String* s,
+                         WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+  V8_EXPORT_PRIVATE uint16_t ThinStringGet(int index);
+
+  DECLARE_CAST(ThinString)
+  DECLARE_VERIFIER(ThinString)
+
+  // Layout description.
+  static const int kActualOffset = String::kSize;
+  static const int kSize = kActualOffset + kPointerSize;
+
+  typedef FixedBodyDescriptor<kActualOffset, kSize, kSize> BodyDescriptor;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(ThinString);
+};
 
 // The Sliced String class describes strings that are substrings of another
 // sequential string.  The motivation is to save time and memory when creating
@@ -10768,6 +10447,32 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayIterator);
 };
 
+// The [Async-from-Sync Iterator] object
+// (proposal-async-iteration/#sec-async-from-sync-iterator-objects)
+// An object which wraps an ordinary Iterator and converts it to behave
+// according to the Async Iterator protocol.
+// (See https://tc39.github.io/proposal-async-iteration/#sec-iteration)
+class JSAsyncFromSyncIterator : public JSObject {
+ public:
+  DECLARE_CAST(JSAsyncFromSyncIterator)
+  DECLARE_PRINTER(JSAsyncFromSyncIterator)
+  DECLARE_VERIFIER(JSAsyncFromSyncIterator)
+
+  // Async-from-Sync Iterator instances are ordinary objects that inherit
+  // properties from the %AsyncFromSyncIteratorPrototype% intrinsic object.
+  // Async-from-Sync Iterator instances are initially created with the internal
+  // slots listed in Table 4.
+  // (proposal-async-iteration/#table-async-from-sync-iterator-internal-slots)
+  DECL_ACCESSORS(sync_iterator, JSReceiver)
+
+  // Offsets of object fields.
+  static const int kSyncIteratorOffset = JSObject::kHeaderSize;
+  static const int kSize = kSyncIteratorOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSAsyncFromSyncIterator);
+};
+
 class JSStringIterator : public JSObject {
  public:
   // Dispatched behavior.
@@ -10791,37 +10496,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
 };
 
-// A JS iterator over the elements of a FixedArray.
-// This corresponds to ListIterator in ecma262/#sec-createlistiterator.
-class JSFixedArrayIterator : public JSObject {
- public:
-  DECLARE_CAST(JSFixedArrayIterator)
-  DECLARE_PRINTER(JSFixedArrayIterator)
-  DECLARE_VERIFIER(JSFixedArrayIterator)
-
-  // The array over which the iterator iterates.
-  DECL_ACCESSORS(array, FixedArray)
-
-  // The index of the array element that will be returned next.
-  DECL_INT_ACCESSORS(index)
-
-  // The initial value of the object's "next" property.
-  DECL_ACCESSORS(initial_next, JSFunction)
-
-  static const int kArrayOffset = JSObject::kHeaderSize;
-  static const int kIndexOffset = kArrayOffset + kPointerSize;
-  static const int kNextOffset = kIndexOffset + kPointerSize;
-  static const int kHeaderSize = kNextOffset + kPointerSize;
-
-  enum InObjectPropertyIndex {
-    kNextIndex,
-    kInObjectPropertyCount  // Dummy.
-  };
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(JSFixedArrayIterator);
-};
-
 // OrderedHashTableIterator is an iterator that iterates over the keys and
 // values of an OrderedHashTable.
 //
@@ -10944,6 +10618,8 @@
                   Handle<Object> value, int32_t hash);
   static bool Delete(Handle<JSWeakCollection> collection, Handle<Object> key,
                      int32_t hash);
+  static Handle<JSArray> GetEntries(Handle<JSWeakCollection> holder,
+                                    int max_entries);
 
   static const int kTableOffset = JSObject::kHeaderSize;
   static const int kNextOffset = kTableOffset + kPointerSize;
@@ -11023,6 +10699,9 @@
   inline bool is_shared();
   inline void set_is_shared(bool value);
 
+  inline bool has_guard_region();
+  inline void set_has_guard_region(bool value);
+
   DECLARE_CAST(JSArrayBuffer)
 
   void Neuter();
@@ -11032,10 +10711,12 @@
       void* data, size_t allocated_length,
       SharedFlag shared = SharedFlag::kNotShared);
 
-  static bool SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
-                                  Isolate* isolate, size_t allocated_length,
-                                  bool initialize = true,
-                                  SharedFlag shared = SharedFlag::kNotShared);
+  // Returns false if array buffer contents could not be allocated.
+  // In this case, |array_buffer| will not be set up.
+  static bool SetupAllocatingData(
+      Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
+      size_t allocated_length, bool initialize = true,
+      SharedFlag shared = SharedFlag::kNotShared) WARN_UNUSED_RESULT;
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSArrayBuffer)
@@ -11062,6 +10743,7 @@
   class IsNeuterable : public BitField<bool, 2, 1> {};
   class WasNeutered : public BitField<bool, 3, 1> {};
   class IsShared : public BitField<bool, 4, 1> {};
+  class HasGuardRegion : public BitField<bool, 5, 1> {};
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
@@ -11106,6 +10788,11 @@
   DECL_ACCESSORS(length, Object)
   inline uint32_t length_value() const;
 
+  // ES6 9.4.5.3
+  MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+      Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
+      PropertyDescriptor* desc, ShouldThrow should_throw);
+
   DECLARE_CAST(JSTypedArray)
 
   ExternalArrayType type();
@@ -11113,6 +10800,10 @@
 
   Handle<JSArrayBuffer> GetBuffer();
 
+  static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
+                                                   Handle<Object> receiver,
+                                                   const char* method_name);
+
   // Dispatched behavior.
   DECLARE_PRINTER(JSTypedArray)
   DECLARE_VERIFIER(JSTypedArray)
@@ -11543,6 +11234,7 @@
  public:
   DECL_ACCESSORS(call_code, Object)
   DECL_ACCESSORS(prototype_template, Object)
+  DECL_ACCESSORS(prototype_provider_template, Object)
   DECL_ACCESSORS(parent_template, Object)
   DECL_ACCESSORS(named_property_handler, Object)
   DECL_ACCESSORS(indexed_property_handler, Object)
@@ -11577,11 +11269,15 @@
   DECLARE_PRINTER(FunctionTemplateInfo)
   DECLARE_VERIFIER(FunctionTemplateInfo)
 
+  static const int kInvalidSerialNumber = 0;
+
   static const int kCallCodeOffset = TemplateInfo::kHeaderSize;
   static const int kPrototypeTemplateOffset =
       kCallCodeOffset + kPointerSize;
-  static const int kParentTemplateOffset =
+  static const int kPrototypeProviderTemplateOffset =
       kPrototypeTemplateOffset + kPointerSize;
+  static const int kParentTemplateOffset =
+      kPrototypeProviderTemplateOffset + kPointerSize;
   static const int kNamedPropertyHandlerOffset =
       kParentTemplateOffset + kPointerSize;
   static const int kIndexedPropertyHandlerOffset =
@@ -11663,6 +11359,9 @@
   // The shared function info for the source being debugged.
   DECL_ACCESSORS(shared, SharedFunctionInfo)
 
+  // Bit field containing various information collected for debugging.
+  DECL_INT_ACCESSORS(debugger_hints)
+
   DECL_ACCESSORS(debug_bytecode_array, Object)
   // Fixed array holding status information for each active break point.
   DECL_ACCESSORS(break_points, FixedArray)
@@ -11683,8 +11382,6 @@
   // Get the number of break points for this function.
   int GetBreakPointCount();
 
-  static Smi* uninitialized() { return Smi::kZero; }
-
   inline bool HasDebugBytecodeArray();
   inline bool HasDebugCode();
 
@@ -11699,8 +11396,10 @@
   DECLARE_VERIFIER(DebugInfo)
 
   static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
-  static const int kDebugBytecodeArrayIndex =
+  static const int kDebuggerHintsIndex =
       kSharedFunctionInfoIndex + kPointerSize;
+  static const int kDebugBytecodeArrayIndex =
+      kDebuggerHintsIndex + kPointerSize;
   static const int kBreakPointsStateIndex =
       kDebugBytecodeArrayIndex + kPointerSize;
   static const int kSize = kBreakPointsStateIndex + kPointerSize;
@@ -11755,11 +11454,6 @@
 };
 
 
-#undef DECL_BOOLEAN_ACCESSORS
-#undef DECL_ACCESSORS
-#undef DECLARE_CAST
-#undef DECLARE_VERIFIER
-
 #define VISITOR_SYNCHRONIZATION_TAGS_LIST(V)                               \
   V(kStringTable, "string_table", "(Internalized strings)")                \
   V(kExternalStringsTable, "external_strings_table", "(External strings)") \
@@ -11879,4 +11573,6 @@
 }  // NOLINT, false-positive due to second-order macros.
 }  // NOLINT, false-positive due to second-order macros.
 
+#include "src/objects/object-macros-undef.h"
+
 #endif  // V8_OBJECTS_H_
diff --git a/src/objects/literal-objects.cc b/src/objects/literal-objects.cc
new file mode 100644
index 0000000..551b036
--- /dev/null
+++ b/src/objects/literal-objects.cc
@@ -0,0 +1,55 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/objects/literal-objects.h"
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+Object* BoilerplateDescription::name(int index) const {
+  // get() already checks for out of bounds access, but we do not want to allow
+  // access to the last element, if it is the number of properties.
+  DCHECK_NE(size(), index);
+  return get(2 * index);
+}
+
+Object* BoilerplateDescription::value(int index) const {
+  return get(2 * index + 1);
+}
+
+int BoilerplateDescription::size() const {
+  DCHECK_EQ(0, (length() - (this->has_number_of_properties() ? 1 : 0)) % 2);
+  // Rounding is intended.
+  return length() / 2;
+}
+
+int BoilerplateDescription::backing_store_size() const {
+  if (has_number_of_properties()) {
+    // If present, the last entry contains the number of properties.
+    return Smi::cast(this->get(length() - 1))->value();
+  }
+  // If the number is not given explicitly, we assume there are no
+  // properties with computed names.
+  return size();
+}
+
+void BoilerplateDescription::set_backing_store_size(Isolate* isolate,
+                                                    int backing_store_size) {
+  DCHECK(has_number_of_properties());
+  DCHECK_NE(size(), backing_store_size);
+  Handle<Object> backing_store_size_obj =
+      isolate->factory()->NewNumberFromInt(backing_store_size);
+  set(length() - 1, *backing_store_size_obj);
+}
+
+bool BoilerplateDescription::has_number_of_properties() const {
+  return length() % 2 != 0;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/objects/literal-objects.h b/src/objects/literal-objects.h
new file mode 100644
index 0000000..fdd321e
--- /dev/null
+++ b/src/objects/literal-objects.h
@@ -0,0 +1,67 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_LITERAL_OBJECTS_H_
+#define V8_OBJECTS_LITERAL_OBJECTS_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// BoilerplateDescription is a list of properties consisting of name value
+// pairs. In addition to the properties, it provides the projected number
+// of properties in the backing store. This number includes properties with
+// computed names that are not
+// in the list.
+class BoilerplateDescription : public FixedArray {
+ public:
+  Object* name(int index) const;
+  Object* value(int index) const;
+
+  // The number of boilerplate properties.
+  int size() const;
+
+  // Number of boilerplate properties and properties with computed names.
+  int backing_store_size() const;
+
+  void set_backing_store_size(Isolate* isolate, int backing_store_size);
+
+  DECLARE_CAST(BoilerplateDescription)
+
+ private:
+  bool has_number_of_properties() const;
+};
+
+// Pair of {ElementsKind} and an array of constant values for {ArrayLiteral}
+// expressions. Used to communicate with the runtime for literal boilerplate
+// creation within the {Runtime_CreateArrayLiteral} method.
+class ConstantElementsPair : public Struct {
+ public:
+  DECL_INT_ACCESSORS(elements_kind)
+  DECL_ACCESSORS(constant_values, FixedArrayBase)
+
+  DECLARE_CAST(ConstantElementsPair)
+
+  // Dispatched behavior.
+  DECLARE_PRINTER(ConstantElementsPair)
+  DECLARE_VERIFIER(ConstantElementsPair)
+
+  static const int kElementsKindOffset = HeapObject::kHeaderSize;
+  static const int kConstantValuesOffset = kElementsKindOffset + kPointerSize;
+  static const int kSize = kConstantValuesOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantElementsPair);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif  // V8_OBJECTS_LITERAL_OBJECTS_H_
diff --git a/src/objects/module-info.h b/src/objects/module-info.h
new file mode 100644
index 0000000..099ee5f
--- /dev/null
+++ b/src/objects/module-info.h
@@ -0,0 +1,129 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MODULE_INFO_H_
+#define V8_OBJECTS_MODULE_INFO_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class Handle;
+class Isolate;
+class ModuleDescriptor;
+class ModuleInfoEntry;
+class String;
+class Zone;
+
+// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
+class ModuleInfo : public FixedArray {
+ public:
+  DECLARE_CAST(ModuleInfo)
+
+  static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
+                                ModuleDescriptor* descr);
+
+  inline FixedArray* module_requests() const {
+    return FixedArray::cast(get(kModuleRequestsIndex));
+  }
+
+  inline FixedArray* special_exports() const {
+    return FixedArray::cast(get(kSpecialExportsIndex));
+  }
+
+  inline FixedArray* regular_exports() const {
+    return FixedArray::cast(get(kRegularExportsIndex));
+  }
+
+  inline FixedArray* regular_imports() const {
+    return FixedArray::cast(get(kRegularImportsIndex));
+  }
+
+  inline FixedArray* namespace_imports() const {
+    return FixedArray::cast(get(kNamespaceImportsIndex));
+  }
+
+  // Accessors for [regular_exports].
+  int RegularExportCount() const;
+  String* RegularExportLocalName(int i) const;
+  int RegularExportCellIndex(int i) const;
+  FixedArray* RegularExportExportNames(int i) const;
+
+  static Handle<ModuleInfoEntry> LookupRegularImport(Handle<ModuleInfo> info,
+                                                     Handle<String> local_name);
+
+#ifdef DEBUG
+  inline bool Equals(ModuleInfo* other) const {
+    return regular_exports() == other->regular_exports() &&
+           regular_imports() == other->regular_imports() &&
+           special_exports() == other->special_exports() &&
+           namespace_imports() == other->namespace_imports();
+  }
+#endif
+
+ private:
+  friend class Factory;
+  friend class ModuleDescriptor;
+  enum {
+    kModuleRequestsIndex,
+    kSpecialExportsIndex,
+    kRegularExportsIndex,
+    kNamespaceImportsIndex,
+    kRegularImportsIndex,
+    kLength
+  };
+  enum {
+    kRegularExportLocalNameOffset,
+    kRegularExportCellIndexOffset,
+    kRegularExportExportNamesOffset,
+    kRegularExportLength
+  };
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfo);
+};
+
+class ModuleInfoEntry : public Struct {
+ public:
+  DECLARE_CAST(ModuleInfoEntry)
+  DECLARE_PRINTER(ModuleInfoEntry)
+  DECLARE_VERIFIER(ModuleInfoEntry)
+
+  DECL_ACCESSORS(export_name, Object)
+  DECL_ACCESSORS(local_name, Object)
+  DECL_ACCESSORS(import_name, Object)
+  DECL_INT_ACCESSORS(module_request)
+  DECL_INT_ACCESSORS(cell_index)
+  DECL_INT_ACCESSORS(beg_pos)
+  DECL_INT_ACCESSORS(end_pos)
+
+  static Handle<ModuleInfoEntry> New(Isolate* isolate,
+                                     Handle<Object> export_name,
+                                     Handle<Object> local_name,
+                                     Handle<Object> import_name,
+                                     int module_request, int cell_index,
+                                     int beg_pos, int end_pos);
+
+  static const int kExportNameOffset = HeapObject::kHeaderSize;
+  static const int kLocalNameOffset = kExportNameOffset + kPointerSize;
+  static const int kImportNameOffset = kLocalNameOffset + kPointerSize;
+  static const int kModuleRequestOffset = kImportNameOffset + kPointerSize;
+  static const int kCellIndexOffset = kModuleRequestOffset + kPointerSize;
+  static const int kBegPosOffset = kCellIndexOffset + kPointerSize;
+  static const int kEndPosOffset = kBegPosOffset + kPointerSize;
+  static const int kSize = kEndPosOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfoEntry);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif  // V8_OBJECTS_MODULE_INFO_H_
diff --git a/src/objects/object-macros-undef.h b/src/objects/object-macros-undef.h
new file mode 100644
index 0000000..509d297
--- /dev/null
+++ b/src/objects/object-macros-undef.h
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#undef DECL_BOOLEAN_ACCESSORS
+#undef DECL_INT_ACCESSORS
+#undef DECL_ACCESSORS
+#undef DECLARE_CAST
+#undef DECLARE_VERIFIER
diff --git a/src/objects/object-macros.h b/src/objects/object-macros.h
new file mode 100644
index 0000000..a3ececc
--- /dev/null
+++ b/src/objects/object-macros.h
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Note 1: Any file that includes this one should include object-macros-undef.h
+// at the bottom.
+
+// Note 2: This file is deliberately missing the include guards (the undeffing
+// approach wouldn't work otherwise).
+
+#define DECL_BOOLEAN_ACCESSORS(name) \
+  inline bool name() const;          \
+  inline void set_##name(bool value);
+
+#define DECL_INT_ACCESSORS(name) \
+  inline int name() const;       \
+  inline void set_##name(int value);
+
+#define DECL_ACCESSORS(name, type)    \
+  inline type* name() const;          \
+  inline void set_##name(type* value, \
+                         WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+#define DECLARE_CAST(type)                   \
+  INLINE(static type* cast(Object* object)); \
+  INLINE(static const type* cast(const Object* object));
+
+#ifdef VERIFY_HEAP
+#define DECLARE_VERIFIER(Name) void Name##Verify();
+#else
+#define DECLARE_VERIFIER(Name)
+#endif
diff --git a/src/objects/regexp-match-info.h b/src/objects/regexp-match-info.h
new file mode 100644
index 0000000..327ded3
--- /dev/null
+++ b/src/objects/regexp-match-info.h
@@ -0,0 +1,76 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_REGEXP_MATCH_INFO_H_
+#define V8_OBJECTS_REGEXP_MATCH_INFO_H_
+
+#include "src/base/compiler-specific.h"
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class Object;
+class String;
+
+// The property RegExpMatchInfo includes the matchIndices
+// array of the last successful regexp match (an array of start/end index
+// pairs for the match and all the captured substrings), the invariant is
+// that there are at least two capture indices.  The array also contains
+// the subject string for the last successful match.
+// After creation the result must be treated as a FixedArray in all regards.
+class V8_EXPORT_PRIVATE RegExpMatchInfo : NON_EXPORTED_BASE(public FixedArray) {
+ public:
+  // Returns the number of captures, which is defined as the length of the
+  // matchIndices objects of the last match. matchIndices contains two indices
+  // for each capture (including the match itself), i.e. 2 * #captures + 2.
+  inline int NumberOfCaptureRegisters();
+  inline void SetNumberOfCaptureRegisters(int value);
+
+  // Returns the subject string of the last match.
+  inline String* LastSubject();
+  inline void SetLastSubject(String* value);
+
+  // Like LastSubject, but modifiable by the user.
+  inline Object* LastInput();
+  inline void SetLastInput(Object* value);
+
+  // Returns the i'th capture index, 0 <= i < NumberOfCaptures(). Capture(0) and
+  // Capture(1) determine the start- and endpoint of the match itself.
+  inline int Capture(int i);
+  inline void SetCapture(int i, int value);
+
+  // Reserves space for captures.
+  static Handle<RegExpMatchInfo> ReserveCaptures(
+      Handle<RegExpMatchInfo> match_info, int capture_count);
+
+  DECLARE_CAST(RegExpMatchInfo)
+
+  static const int kNumberOfCapturesIndex = 0;
+  static const int kLastSubjectIndex = 1;
+  static const int kLastInputIndex = 2;
+  static const int kFirstCaptureIndex = 3;
+  static const int kLastMatchOverhead = kFirstCaptureIndex;
+
+  static const int kNumberOfCapturesOffset = FixedArray::kHeaderSize;
+  static const int kLastSubjectOffset = kNumberOfCapturesOffset + kPointerSize;
+  static const int kLastInputOffset = kLastSubjectOffset + kPointerSize;
+  static const int kFirstCaptureOffset = kLastInputOffset + kPointerSize;
+
+  // Every match info is guaranteed to have enough space to store two captures.
+  static const int kInitialCaptureIndices = 2;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(RegExpMatchInfo);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif  // V8_OBJECTS_REGEXP_MATCH_INFO_H_
diff --git a/src/ast/scopeinfo.cc b/src/objects/scope-info.cc
similarity index 97%
rename from src/ast/scopeinfo.cc
rename to src/objects/scope-info.cc
index 3a3ea03..ae828cc 100644
--- a/src/ast/scopeinfo.cc
+++ b/src/objects/scope-info.cc
@@ -4,10 +4,13 @@
 
 #include <stdlib.h>
 
+#include "src/objects/scope-info.h"
+
 #include "src/ast/context-slot-cache.h"
 #include "src/ast/scopes.h"
 #include "src/ast/variables.h"
 #include "src/bootstrapper.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -143,13 +146,15 @@
   bool has_simple_parameters = false;
   bool asm_module = false;
   bool asm_function = false;
-  FunctionKind function_kind = kNormalFunction;
   if (scope->is_function_scope()) {
     DeclarationScope* function_scope = scope->AsDeclarationScope();
     has_simple_parameters = function_scope->has_simple_parameters();
     asm_module = function_scope->asm_module();
     asm_function = function_scope->asm_function();
-    function_kind = function_scope->function_kind();
+  }
+  FunctionKind function_kind = kNormalFunction;
+  if (scope->is_declaration_scope()) {
+    function_kind = scope->AsDeclarationScope()->function_kind();
   }
 
   // Encode the flags.
@@ -391,37 +396,28 @@
   return scope_info;
 }
 
-
 ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
   return isolate->heap()->empty_scope_info();
 }
 
-
 ScopeType ScopeInfo::scope_type() {
   DCHECK_LT(0, length());
   return ScopeTypeField::decode(Flags());
 }
 
-
 bool ScopeInfo::CallsEval() {
   return length() > 0 && CallsEvalField::decode(Flags());
 }
 
-
 LanguageMode ScopeInfo::language_mode() {
   return length() > 0 ? LanguageModeField::decode(Flags()) : SLOPPY;
 }
 
-
 bool ScopeInfo::is_declaration_scope() {
   return DeclarationScopeField::decode(Flags());
 }
 
-
-int ScopeInfo::LocalCount() {
-  return StackLocalCount() + ContextLocalCount();
-}
-
+int ScopeInfo::LocalCount() { return StackLocalCount() + ContextLocalCount(); }
 
 int ScopeInfo::StackSlotCount() {
   if (length() > 0) {
@@ -432,7 +428,6 @@
   return 0;
 }
 
-
 int ScopeInfo::ContextLength() {
   if (length() > 0) {
     int context_locals = ContextLocalCount();
@@ -443,6 +438,7 @@
                        (scope_type() == BLOCK_SCOPE && CallsSloppyEval() &&
                         is_declaration_scope()) ||
                        (scope_type() == FUNCTION_SCOPE && CallsSloppyEval()) ||
+                       (scope_type() == FUNCTION_SCOPE && IsAsmModule()) ||
                        scope_type() == MODULE_SCOPE;
 
     if (has_context) {
@@ -453,7 +449,6 @@
   return 0;
 }
 
-
 bool ScopeInfo::HasReceiver() {
   if (length() > 0) {
     return NONE != ReceiverVariableField::decode(Flags());
@@ -462,7 +457,6 @@
   }
 }
 
-
 bool ScopeInfo::HasAllocatedReceiver() {
   if (length() > 0) {
     VariableAllocationInfo allocation = ReceiverVariableField::decode(Flags());
@@ -472,10 +466,8 @@
   }
 }
 
-
 bool ScopeInfo::HasNewTarget() { return HasNewTargetField::decode(Flags()); }
 
-
 bool ScopeInfo::HasFunctionName() {
   if (length() > 0) {
     return NONE != FunctionVariableField::decode(Flags());
@@ -517,11 +509,7 @@
   }
 }
 
-
-bool ScopeInfo::HasContext() {
-  return ContextLength() > 0;
-}
-
+bool ScopeInfo::HasContext() { return ContextLength() > 0; }
 
 String* ScopeInfo::FunctionName() {
   DCHECK(HasFunctionName());
@@ -545,7 +533,6 @@
   return String::cast(get(info_index));
 }
 
-
 String* ScopeInfo::LocalName(int var) {
   DCHECK_LE(0, var);
   DCHECK_LT(var, LocalCount());
@@ -555,7 +542,6 @@
   return String::cast(get(info_index));
 }
 
-
 String* ScopeInfo::StackLocalName(int var) {
   DCHECK_LE(0, var);
   DCHECK_LT(var, StackLocalCount());
@@ -563,7 +549,6 @@
   return String::cast(get(info_index));
 }
 
-
 int ScopeInfo::StackLocalIndex(int var) {
   DCHECK_LE(0, var);
   DCHECK_LT(var, StackLocalCount());
@@ -571,7 +556,6 @@
   return first_slot_index + var;
 }
 
-
 String* ScopeInfo::ContextLocalName(int var) {
   DCHECK_LE(0, var);
   DCHECK_LT(var, ContextLocalCount());
@@ -579,7 +563,6 @@
   return String::cast(get(info_index));
 }
 
-
 VariableMode ScopeInfo::ContextLocalMode(int var) {
   DCHECK_LE(0, var);
   DCHECK_LT(var, ContextLocalCount());
@@ -588,7 +571,6 @@
   return VariableModeField::decode(value);
 }
 
-
 InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
   DCHECK_LE(0, var);
   DCHECK_LT(var, ContextLocalCount());
@@ -597,7 +579,6 @@
   return InitFlagField::decode(value);
 }
 
-
 MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
   DCHECK_LE(0, var);
   DCHECK_LT(var, ContextLocalCount());
@@ -615,7 +596,6 @@
          name->Equals(name->GetHeap()->this_string());
 }
 
-
 int ScopeInfo::StackSlotIndex(String* name) {
   DCHECK(name->IsInternalizedString());
   if (length() > 0) {
@@ -704,7 +684,6 @@
   return ContextLocalName(var);
 }
 
-
 int ScopeInfo::ParameterIndex(String* name) {
   DCHECK(name->IsInternalizedString());
   if (length() > 0) {
@@ -724,7 +703,6 @@
   return -1;
 }
 
-
 int ScopeInfo::ReceiverContextSlotIndex() {
   if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT)
     return Smi::cast(get(ReceiverInfoIndex()))->value();
@@ -742,7 +720,6 @@
   return -1;
 }
 
-
 FunctionKind ScopeInfo::function_kind() {
   return FunctionKindField::decode(Flags());
 }
@@ -752,7 +729,6 @@
   return kVariablePartIndex;
 }
 
-
 int ScopeInfo::StackLocalFirstSlotIndex() {
   return ParameterNamesIndex() + ParameterCount();
 }
@@ -818,15 +794,12 @@
 
 #ifdef DEBUG
 
-static void PrintList(const char* list_name,
-                      int nof_internal_slots,
-                      int start,
-                      int end,
-                      ScopeInfo* scope_info) {
+static void PrintList(const char* list_name, int nof_internal_slots, int start,
+                      int end, ScopeInfo* scope_info) {
   if (start < end) {
     PrintF("\n  // %s\n", list_name);
     if (nof_internal_slots > 0) {
-      PrintF("  %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
+      PrintF("  %2d - %2d [internal slots]\n", 0, nof_internal_slots - 1);
     }
     for (int i = nof_internal_slots; start < end; ++i, ++start) {
       PrintF("  %2d ", i);
@@ -836,7 +809,6 @@
   }
 }
 
-
 void ScopeInfo::Print() {
   PrintF("ScopeInfo ");
   if (HasFunctionName()) {
diff --git a/src/objects/scope-info.h b/src/objects/scope-info.h
new file mode 100644
index 0000000..75a374d
--- /dev/null
+++ b/src/objects/scope-info.h
@@ -0,0 +1,348 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SCOPE_INFO_H_
+#define V8_OBJECTS_SCOPE_INFO_H_
+
+#include "src/globals.h"
+#include "src/objects.h"
+#include "src/utils.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class Handle;
+class Isolate;
+template <typename T>
+class MaybeHandle;
+class Scope;
+class Zone;
+
+// ScopeInfo represents information about different scopes of a source
+// program  and the allocation of the scope's variables. Scope information
+// is stored in a compressed form in ScopeInfo objects and is used
+// at runtime (stack dumps, deoptimization, etc.).
+
+// This object provides quick access to scope info details for runtime
+// routines.
+class ScopeInfo : public FixedArray {
+ public:
+  DECLARE_CAST(ScopeInfo)
+
+  // Return the type of this scope.
+  ScopeType scope_type();
+
+  // Does this scope call eval?
+  bool CallsEval();
+
+  // Return the language mode of this scope.
+  LanguageMode language_mode();
+
+  // True if this scope is a (var) declaration scope.
+  bool is_declaration_scope();
+
+  // Does this scope make a sloppy eval call?
+  bool CallsSloppyEval() { return CallsEval() && is_sloppy(language_mode()); }
+
+  // Return the total number of locals allocated on the stack and in the
+  // context. This includes the parameters that are allocated in the context.
+  int LocalCount();
+
+  // Return the number of stack slots for code. This number consists of two
+  // parts:
+  //  1. One stack slot per stack allocated local.
+  //  2. One stack slot for the function name if it is stack allocated.
+  int StackSlotCount();
+
+  // Return the number of context slots for code if a context is allocated. This
+  // number consists of three parts:
+  //  1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
+  //  2. One context slot per context allocated local.
+  //  3. One context slot for the function name if it is context allocated.
+  // Parameters allocated in the context count as context allocated locals. If
+  // no contexts are allocated for this scope ContextLength returns 0.
+  int ContextLength();
+
+  // Does this scope declare a "this" binding?
+  bool HasReceiver();
+
+  // Does this scope declare a "this" binding, and the "this" binding is stack-
+  // or context-allocated?
+  bool HasAllocatedReceiver();
+
+  // Does this scope declare a "new.target" binding?
+  bool HasNewTarget();
+
+  // Is this scope the scope of a named function expression?
+  bool HasFunctionName();
+
+  // Return if this has context allocated locals.
+  bool HasHeapAllocatedLocals();
+
+  // Return if contexts are allocated for this scope.
+  bool HasContext();
+
+  // Return if this is a function scope with "use asm".
+  inline bool IsAsmModule() { return AsmModuleField::decode(Flags()); }
+
+  // Return if this is a nested function within an asm module scope.
+  inline bool IsAsmFunction() { return AsmFunctionField::decode(Flags()); }
+
+  inline bool HasSimpleParameters() {
+    return HasSimpleParametersField::decode(Flags());
+  }
+
+  // Return the function_name if present.
+  String* FunctionName();
+
+  ModuleInfo* ModuleDescriptorInfo();
+
+  // Return the name of the given parameter.
+  String* ParameterName(int var);
+
+  // Return the name of the given local.
+  String* LocalName(int var);
+
+  // Return the name of the given stack local.
+  String* StackLocalName(int var);
+
+  // Return the name of the given stack local.
+  int StackLocalIndex(int var);
+
+  // Return the name of the given context local.
+  String* ContextLocalName(int var);
+
+  // Return the mode of the given context local.
+  VariableMode ContextLocalMode(int var);
+
+  // Return the initialization flag of the given context local.
+  InitializationFlag ContextLocalInitFlag(int var);
+
+  // Return the initialization flag of the given context local.
+  MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var);
+
+  // Return true if this local was introduced by the compiler, and should not be
+  // exposed to the user in a debugger.
+  static bool VariableIsSynthetic(String* name);
+
+  // Lookup support for serialized scope info. Returns the
+  // the stack slot index for a given slot name if the slot is
+  // present; otherwise returns a value < 0. The name must be an internalized
+  // string.
+  int StackSlotIndex(String* name);
+
+  // Lookup support for serialized scope info. Returns the local context slot
+  // index for a given slot name if the slot is present; otherwise
+  // returns a value < 0. The name must be an internalized string.
+  // If the slot is present and mode != NULL, sets *mode to the corresponding
+  // mode for that variable.
+  static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
+                              VariableMode* mode, InitializationFlag* init_flag,
+                              MaybeAssignedFlag* maybe_assigned_flag);
+
+  // Lookup metadata of a MODULE-allocated variable.  Return 0 if there is no
+  // module variable with the given name (the index value of a MODULE variable
+  // is never 0).
+  int ModuleIndex(Handle<String> name, VariableMode* mode,
+                  InitializationFlag* init_flag,
+                  MaybeAssignedFlag* maybe_assigned_flag);
+
+  // Lookup the name of a certain context slot by its index.
+  String* ContextSlotName(int slot_index);
+
+  // Lookup support for serialized scope info. Returns the
+  // parameter index for a given parameter name if the parameter is present;
+  // otherwise returns a value < 0. The name must be an internalized string.
+  int ParameterIndex(String* name);
+
+  // Lookup support for serialized scope info. Returns the function context
+  // slot index if the function name is present and context-allocated (named
+  // function expressions, only), otherwise returns a value < 0. The name
+  // must be an internalized string.
+  int FunctionContextSlotIndex(String* name);
+
+  // Lookup support for serialized scope info.  Returns the receiver context
+  // slot index if scope has a "this" binding, and the binding is
+  // context-allocated.  Otherwise returns a value < 0.
+  int ReceiverContextSlotIndex();
+
+  FunctionKind function_kind();
+
+  // Returns true if this ScopeInfo is linked to a outer ScopeInfo.
+  bool HasOuterScopeInfo();
+
+  // Returns true if this ScopeInfo was created for a debug-evaluate scope.
+  bool IsDebugEvaluateScope();
+
+  // Can be used to mark a ScopeInfo that looks like a with-scope as actually
+  // being a debug-evaluate scope.
+  void SetIsDebugEvaluateScope();
+
+  // Return the outer ScopeInfo if present.
+  ScopeInfo* OuterScopeInfo();
+
+#ifdef DEBUG
+  bool Equals(ScopeInfo* other) const;
+#endif
+
+  static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope,
+                                  MaybeHandle<ScopeInfo> outer_scope);
+  static Handle<ScopeInfo> CreateForWithScope(
+      Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
+  static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
+
+  // Serializes empty scope info.
+  V8_EXPORT_PRIVATE static ScopeInfo* Empty(Isolate* isolate);
+
+#ifdef DEBUG
+  void Print();
+#endif
+
+// The layout of the static part of a ScopeInfo is as follows. Each entry is
+// numeric and occupies one array slot.
+// 1. A set of properties of the scope.
+// 2. The number of parameters. For non-function scopes this is 0.
+// 3. The number of non-parameter variables allocated on the stack.
+// 4. The number of non-parameter and parameter variables allocated in the
+//    context.
+#define FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(V) \
+  V(Flags)                                   \
+  V(ParameterCount)                          \
+  V(StackLocalCount)                         \
+  V(ContextLocalCount)
+
+#define FIELD_ACCESSORS(name)                                             \
+  inline void Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
+  inline int name() {                                                     \
+    if (length() > 0) {                                                   \
+      return Smi::cast(get(k##name))->value();                            \
+    } else {                                                              \
+      return 0;                                                           \
+    }                                                                     \
+  }
+
+  FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
+#undef FIELD_ACCESSORS
+
+  enum {
+#define DECL_INDEX(name) k##name,
+    FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(DECL_INDEX)
+#undef DECL_INDEX
+        kVariablePartIndex
+  };
+
+ private:
+  // The layout of the variable part of a ScopeInfo is as follows:
+  // 1. ParameterNames:
+  //    This part stores the names of the parameters for function scopes. One
+  //    slot is used per parameter, so in total this part occupies
+  //    ParameterCount() slots in the array. For other scopes than function
+  //    scopes ParameterCount() is 0.
+  // 2. StackLocalFirstSlot:
+  //    Index of a first stack slot for stack local. Stack locals belonging to
+  //    this scope are located on a stack at slots starting from this index.
+  // 3. StackLocalNames:
+  //    Contains the names of local variables that are allocated on the stack,
+  //    in increasing order of the stack slot index. First local variable has a
+  //    stack slot index defined in StackLocalFirstSlot (point 2 above).
+  //    One slot is used per stack local, so in total this part occupies
+  //    StackLocalCount() slots in the array.
+  // 4. ContextLocalNames:
+  //    Contains the names of local variables and parameters that are allocated
+  //    in the context. They are stored in increasing order of the context slot
+  //    index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
+  //    context local, so in total this part occupies ContextLocalCount() slots
+  //    in the array.
+  // 5. ContextLocalInfos:
+  //    Contains the variable modes and initialization flags corresponding to
+  //    the context locals in ContextLocalNames. One slot is used per
+  //    context local, so in total this part occupies ContextLocalCount()
+  //    slots in the array.
+  // 6. ReceiverInfo:
+  //    If the scope binds a "this" value, one slot is reserved to hold the
+  //    context or stack slot index for the variable.
+  // 7. FunctionNameInfo:
+  //    If the scope belongs to a named function expression this part contains
+  //    information about the function variable. It always occupies two array
+  //    slots:  a. The name of the function variable.
+  //            b. The context or stack slot index for the variable.
+  // 8. OuterScopeInfoIndex:
+  //    The outer scope's ScopeInfo or the hole if there's none.
+  // 9. ModuleInfo, ModuleVariableCount, and ModuleVariables:
+  //    For a module scope, this part contains the ModuleInfo, the number of
+  //    MODULE-allocated variables, and the metadata of those variables.  For
+  //    non-module scopes it is empty.
+  int ParameterNamesIndex();
+  int StackLocalFirstSlotIndex();
+  int StackLocalNamesIndex();
+  int ContextLocalNamesIndex();
+  int ContextLocalInfosIndex();
+  int ReceiverInfoIndex();
+  int FunctionNameInfoIndex();
+  int OuterScopeInfoIndex();
+  int ModuleInfoIndex();
+  int ModuleVariableCountIndex();
+  int ModuleVariablesIndex();
+
+  int Lookup(Handle<String> name, int start, int end, VariableMode* mode,
+             VariableLocation* location, InitializationFlag* init_flag,
+             MaybeAssignedFlag* maybe_assigned_flag);
+
+  // Get metadata of i-th MODULE-allocated variable, where 0 <= i <
+  // ModuleVariableCount.  The metadata is returned via out-arguments, which may
+  // be nullptr if the corresponding information is not requested
+  void ModuleVariable(int i, String** name, int* index,
+                      VariableMode* mode = nullptr,
+                      InitializationFlag* init_flag = nullptr,
+                      MaybeAssignedFlag* maybe_assigned_flag = nullptr);
+
+  // Used for the function name variable for named function expressions, and for
+  // the receiver.
+  enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
+
+  // Properties of scopes.
+  class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
+  class CallsEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {};
+  STATIC_ASSERT(LANGUAGE_END == 2);
+  class LanguageModeField
+      : public BitField<LanguageMode, CallsEvalField::kNext, 1> {};
+  class DeclarationScopeField
+      : public BitField<bool, LanguageModeField::kNext, 1> {};
+  class ReceiverVariableField
+      : public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
+                        2> {};
+  class HasNewTargetField
+      : public BitField<bool, ReceiverVariableField::kNext, 1> {};
+  class FunctionVariableField
+      : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
+  class AsmModuleField
+      : public BitField<bool, FunctionVariableField::kNext, 1> {};
+  class AsmFunctionField : public BitField<bool, AsmModuleField::kNext, 1> {};
+  class HasSimpleParametersField
+      : public BitField<bool, AsmFunctionField::kNext, 1> {};
+  class FunctionKindField
+      : public BitField<FunctionKind, HasSimpleParametersField::kNext, 10> {};
+  class HasOuterScopeInfoField
+      : public BitField<bool, FunctionKindField::kNext, 1> {};
+  class IsDebugEvaluateScopeField
+      : public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
+
+  // Properties of variables.
+  class VariableModeField : public BitField<VariableMode, 0, 3> {};
+  class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
+  class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
+
+  friend class ScopeIterator;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif  // V8_OBJECTS_SCOPE_INFO_H_
diff --git a/src/parsing/OWNERS b/src/parsing/OWNERS
index 44cc4ed..5f136aa 100644
--- a/src/parsing/OWNERS
+++ b/src/parsing/OWNERS
@@ -3,5 +3,7 @@
 adamk@chromium.org
 littledan@chromium.org
 marja@chromium.org
+neis@chromium.org
 rossberg@chromium.org
+verwaest@chromium.org
 vogelheim@chromium.org
diff --git a/src/parsing/duplicate-finder.cc b/src/parsing/duplicate-finder.cc
index 6b57153..0e03da7 100644
--- a/src/parsing/duplicate-finder.cc
+++ b/src/parsing/duplicate-finder.cc
@@ -4,83 +4,26 @@
 
 #include "src/parsing/duplicate-finder.h"
 
-#include "src/conversions.h"
-#include "src/unicode-cache.h"
-
 namespace v8 {
 namespace internal {
 
-int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) {
-  return AddSymbol(key, true, value);
+bool DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key) {
+  return AddSymbol(key, true);
 }
 
-int DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key, int value) {
-  return AddSymbol(Vector<const uint8_t>::cast(key), false, value);
+bool DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key) {
+  return AddSymbol(Vector<const uint8_t>::cast(key), false);
 }
 
-int DuplicateFinder::AddSymbol(Vector<const uint8_t> key, bool is_one_byte,
-                               int value) {
+bool DuplicateFinder::AddSymbol(Vector<const uint8_t> key, bool is_one_byte) {
   uint32_t hash = Hash(key, is_one_byte);
   byte* encoding = BackupKey(key, is_one_byte);
   base::HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
   int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
-  entry->value =
-      reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
+  entry->value = reinterpret_cast<void*>(1);
   return old_value;
 }
 
-int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
-  DCHECK(key.length() > 0);
-  // Quick check for already being in canonical form.
-  if (IsNumberCanonical(key)) {
-    return AddOneByteSymbol(key, value);
-  }
-
-  int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
-  double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
-  int length;
-  const char* string;
-  if (!std::isfinite(double_value)) {
-    string = "Infinity";
-    length = 8;  // strlen("Infinity");
-  } else {
-    string = DoubleToCString(double_value,
-                             Vector<char>(number_buffer_, kBufferSize));
-    length = StrLength(string);
-  }
-  return AddSymbol(
-      Vector<const byte>(reinterpret_cast<const byte*>(string), length), true,
-      value);
-}
-
-bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
-  // Test for a safe approximation of number literals that are already
-  // in canonical form: max 15 digits, no leading zeroes, except an
-  // integer part that is a single zero, and no trailing zeros below
-  // the decimal point.
-  int pos = 0;
-  int length = number.length();
-  if (number.length() > 15) return false;
-  if (number[pos] == '0') {
-    pos++;
-  } else {
-    while (pos < length &&
-           static_cast<unsigned>(number[pos] - '0') <= ('9' - '0'))
-      pos++;
-  }
-  if (length == pos) return true;
-  if (number[pos] != '.') return false;
-  pos++;
-  bool invalid_last_digit = true;
-  while (pos < length) {
-    uint8_t digit = number[pos] - '0';
-    if (digit > '9' - '0') return false;
-    invalid_last_digit = (digit == 0);
-    pos++;
-  }
-  return !invalid_last_digit;
-}
-
 uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
   // Primitive hash function, almost identical to the one used
   // for strings (except that it's seeded by the length and representation).
diff --git a/src/parsing/duplicate-finder.h b/src/parsing/duplicate-finder.h
index a3858e7..c11c477 100644
--- a/src/parsing/duplicate-finder.h
+++ b/src/parsing/duplicate-finder.h
@@ -11,25 +11,16 @@
 namespace v8 {
 namespace internal {
 
-class UnicodeCache;
-
 // DuplicateFinder discovers duplicate symbols.
 class DuplicateFinder {
  public:
-  explicit DuplicateFinder(UnicodeCache* constants)
-      : unicode_constants_(constants), backing_store_(16), map_(&Match) {}
+  DuplicateFinder() : backing_store_(16), map_(&Match) {}
 
-  int AddOneByteSymbol(Vector<const uint8_t> key, int value);
-  int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
-  // Add a a number literal by converting it (if necessary)
-  // to the string that ToString(ToNumber(literal)) would generate.
-  // and then adding that string with AddOneByteSymbol.
-  // This string is the actual value used as key in an object literal,
-  // and the one that must be different from the other keys.
-  int AddNumber(Vector<const uint8_t> key, int value);
+  bool AddOneByteSymbol(Vector<const uint8_t> key);
+  bool AddTwoByteSymbol(Vector<const uint16_t> key);
 
  private:
-  int AddSymbol(Vector<const uint8_t> key, bool is_one_byte, int value);
+  bool AddSymbol(Vector<const uint8_t> key, bool is_one_byte);
   // Backs up the key and its length in the backing store.
   // The backup is stored with a base 127 encoding of the
   // length (plus a bit saying whether the string is one byte),
@@ -40,22 +31,13 @@
   // for having the same base-127 encoded lengths and representation.
   // and then having the same 'length' bytes following.
   static bool Match(void* first, void* second);
+
   // Creates a hash from a sequence of bytes.
   static uint32_t Hash(Vector<const uint8_t> key, bool is_one_byte);
-  // Checks whether a string containing a JS number is its canonical
-  // form.
-  static bool IsNumberCanonical(Vector<const uint8_t> key);
 
-  // Size of buffer. Sufficient for using it to call DoubleToCString in
-  // from conversions.h.
-  static const int kBufferSize = 100;
-
-  UnicodeCache* unicode_constants_;
   // Backing store used to store strings used as hashmap keys.
   SequenceCollector<unsigned char> backing_store_;
   base::CustomMatcherHashMap map_;
-  // Buffer used for string->number->canonical string conversions.
-  char number_buffer_[kBufferSize];
 };
 
 }  // namespace internal
diff --git a/src/parsing/func-name-inferrer.cc b/src/parsing/func-name-inferrer.cc
index a86e1c2..28dbe49 100644
--- a/src/parsing/func-name-inferrer.cc
+++ b/src/parsing/func-name-inferrer.cc
@@ -4,9 +4,10 @@
 
 #include "src/parsing/func-name-inferrer.h"
 
-#include "src/ast/ast.h"
 #include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
 #include "src/list-inl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -46,8 +47,8 @@
 
 void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
   if (IsOpen()) {
-    DCHECK(names_stack_.length() > 0);
-    DCHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
+    CHECK(names_stack_.length() > 0);
+    CHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
     names_stack_.RemoveLast();
   }
 }
diff --git a/src/parsing/func-name-inferrer.h b/src/parsing/func-name-inferrer.h
index cc9204b..9eea4a8 100644
--- a/src/parsing/func-name-inferrer.h
+++ b/src/parsing/func-name-inferrer.h
@@ -5,7 +5,6 @@
 #ifndef V8_PARSING_FUNC_NAME_INFERRER_H_
 #define V8_PARSING_FUNC_NAME_INFERRER_H_
 
-#include "src/handles.h"
 #include "src/zone/zone.h"
 
 namespace v8 {
diff --git a/src/parsing/parameter-initializer-rewriter.cc b/src/parsing/parameter-initializer-rewriter.cc
index 73224a2..7a1bc1e 100644
--- a/src/parsing/parameter-initializer-rewriter.cc
+++ b/src/parsing/parameter-initializer-rewriter.cc
@@ -4,9 +4,10 @@
 
 #include "src/parsing/parameter-initializer-rewriter.h"
 
-#include "src/ast/ast.h"
 #include "src/ast/ast-traversal-visitor.h"
+#include "src/ast/ast.h"
 #include "src/ast/scopes.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/parsing/parse-info.cc b/src/parsing/parse-info.cc
index 4fbfb19..37dca66 100644
--- a/src/parsing/parse-info.cc
+++ b/src/parsing/parse-info.cc
@@ -4,14 +4,19 @@
 
 #include "src/parsing/parse-info.h"
 
+#include "src/api.h"
 #include "src/ast/ast-value-factory.h"
 #include "src/ast/ast.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects-inl.h"
+#include "src/objects/scope-info.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
 
-ParseInfo::ParseInfo(Zone* zone)
-    : zone_(zone),
+ParseInfo::ParseInfo(AccountingAllocator* zone_allocator)
+    : zone_(std::make_shared<Zone>(zone_allocator, ZONE_NAME)),
       flags_(0),
       source_stream_(nullptr),
       source_stream_encoding_(ScriptCompiler::StreamedSource::ONE_BYTE),
@@ -19,20 +24,25 @@
       extension_(nullptr),
       compile_options_(ScriptCompiler::kNoCompileOptions),
       script_scope_(nullptr),
+      asm_function_scope_(nullptr),
       unicode_cache_(nullptr),
       stack_limit_(0),
       hash_seed_(0),
       compiler_hints_(0),
       start_position_(0),
       end_position_(0),
+      parameters_end_pos_(kNoSourcePosition),
+      function_literal_id_(FunctionLiteral::kIdTypeInvalid),
+      max_function_literal_id_(FunctionLiteral::kIdTypeInvalid),
       isolate_(nullptr),
       cached_data_(nullptr),
       ast_value_factory_(nullptr),
       function_name_(nullptr),
-      literal_(nullptr) {}
+      literal_(nullptr),
+      deferred_handles_(nullptr) {}
 
-ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
-    : ParseInfo(zone) {
+ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
+    : ParseInfo(shared->GetIsolate()->allocator()) {
   isolate_ = shared->GetIsolate();
 
   set_toplevel(shared->is_toplevel());
@@ -43,10 +53,12 @@
   set_compiler_hints(shared->compiler_hints());
   set_start_position(shared->start_position());
   set_end_position(shared->end_position());
+  function_literal_id_ = shared->function_literal_id();
   set_stack_limit(isolate_->stack_guard()->real_climit());
   set_unicode_cache(isolate_->unicode_cache());
   set_language_mode(shared->language_mode());
   set_shared_info(shared);
+  set_module(shared->kind() == FunctionKind::kModule);
 
   Handle<Script> script(Script::cast(shared->script()));
   set_script(script);
@@ -60,11 +72,17 @@
   }
 }
 
-ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
+ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared,
+                     std::shared_ptr<Zone> zone)
+    : ParseInfo(shared) {
+  zone_.swap(zone);
+}
+
+ParseInfo::ParseInfo(Handle<Script> script)
+    : ParseInfo(script->GetIsolate()->allocator()) {
   isolate_ = script->GetIsolate();
 
-  set_allow_lazy_parsing(String::cast(script->source())->length() >
-                         FLAG_min_preparse_length);
+  set_allow_lazy_parsing();
   set_toplevel();
   set_hash_seed(isolate_->heap()->HashSeed());
   set_stack_limit(isolate_->stack_guard()->real_climit());
@@ -83,25 +101,67 @@
   ast_value_factory_ = nullptr;
 }
 
+// static
+ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
+  Isolate* isolate = shared->GetIsolate();
+  ParseInfo* p = new ParseInfo(isolate->allocator());
+  p->isolate_ = isolate;
+
+  p->set_toplevel(shared->is_toplevel());
+  p->set_allow_lazy_parsing(FLAG_lazy_inner_functions);
+  p->set_hash_seed(isolate->heap()->HashSeed());
+  p->set_is_named_expression(shared->is_named_expression());
+  p->set_calls_eval(shared->scope_info()->CallsEval());
+  p->set_compiler_hints(shared->compiler_hints());
+  p->set_start_position(shared->start_position());
+  p->set_end_position(shared->end_position());
+  p->function_literal_id_ = shared->function_literal_id();
+  p->set_stack_limit(isolate->stack_guard()->real_climit());
+  p->set_unicode_cache(isolate->unicode_cache());
+  p->set_language_mode(shared->language_mode());
+  p->set_shared_info(shared);
+  p->set_module(shared->kind() == FunctionKind::kModule);
+
+  // BUG(5946): This function exists as a workaround until we can
+  // get rid of %SetCode in our native functions. The ParseInfo
+  // is explicitly set up for the case that:
+  // a) you have a native built-in,
+  // b) it's being run for the 2nd-Nth time in an isolate,
+  // c) we've already compiled bytecode and therefore don't need
+  //    to parse.
+  // We tolerate a ParseInfo without a Script in this case.
+  p->set_native(true);
+  p->set_eval(false);
+
+  Handle<HeapObject> scope_info(shared->outer_scope_info());
+  if (!scope_info->IsTheHole(isolate) &&
+      Handle<ScopeInfo>::cast(scope_info)->length() > 0) {
+    p->set_outer_scope_info(Handle<ScopeInfo>::cast(scope_info));
+  }
+  return p;
+}
+
 DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
 
 bool ParseInfo::is_declaration() const {
   return (compiler_hints_ & (1 << SharedFunctionInfo::kIsDeclaration)) != 0;
 }
 
-bool ParseInfo::requires_class_field_init() const {
-  return (compiler_hints_ &
-          (1 << SharedFunctionInfo::kRequiresClassFieldInit)) != 0;
-}
-bool ParseInfo::is_class_field_initializer() const {
-  return (compiler_hints_ &
-          (1 << SharedFunctionInfo::kIsClassFieldInitializer)) != 0;
-}
-
 FunctionKind ParseInfo::function_kind() const {
   return SharedFunctionInfo::FunctionKindBits::decode(compiler_hints_);
 }
 
+void ParseInfo::set_deferred_handles(
+    std::shared_ptr<DeferredHandles> deferred_handles) {
+  DCHECK(deferred_handles_.get() == nullptr);
+  deferred_handles_.swap(deferred_handles);
+}
+
+void ParseInfo::set_deferred_handles(DeferredHandles* deferred_handles) {
+  DCHECK(deferred_handles_.get() == nullptr);
+  deferred_handles_.reset(deferred_handles);
+}
+
 #ifdef DEBUG
 bool ParseInfo::script_is_native() const {
   return script_->type() == Script::TYPE_NATIVE;
diff --git a/src/parsing/parse-info.h b/src/parsing/parse-info.h
index 24188d9..4828690 100644
--- a/src/parsing/parse-info.h
+++ b/src/parsing/parse-info.h
@@ -5,9 +5,12 @@
 #ifndef V8_PARSING_PARSE_INFO_H_
 #define V8_PARSING_PARSE_INFO_H_
 
+#include <memory>
+
 #include "include/v8.h"
 #include "src/globals.h"
 #include "src/handles.h"
+#include "src/parsing/preparsed-scope-data.h"
 
 namespace v8 {
 
@@ -15,9 +18,11 @@
 
 namespace internal {
 
+class AccountingAllocator;
 class AstRawString;
 class AstValueFactory;
 class DeclarationScope;
+class DeferredHandles;
 class FunctionLiteral;
 class ScriptData;
 class SharedFunctionInfo;
@@ -28,13 +33,26 @@
 // A container for the inputs, configuration options, and outputs of parsing.
 class V8_EXPORT_PRIVATE ParseInfo {
  public:
-  explicit ParseInfo(Zone* zone);
-  ParseInfo(Zone* zone, Handle<Script> script);
-  ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared);
+  explicit ParseInfo(AccountingAllocator* zone_allocator);
+  ParseInfo(Handle<Script> script);
+  ParseInfo(Handle<SharedFunctionInfo> shared);
+
+  // TODO(rmcilroy): Remove once Hydrogen no longer needs this.
+  ParseInfo(Handle<SharedFunctionInfo> shared, std::shared_ptr<Zone> zone);
 
   ~ParseInfo();
 
-  Zone* zone() const { return zone_; }
+  static ParseInfo* AllocateWithoutScript(Handle<SharedFunctionInfo> shared);
+
+  Zone* zone() const { return zone_.get(); }
+
+  std::shared_ptr<Zone> zone_shared() const { return zone_; }
+
+  void set_deferred_handles(std::shared_ptr<DeferredHandles> deferred_handles);
+  void set_deferred_handles(DeferredHandles* deferred_handles);
+  std::shared_ptr<DeferredHandles> deferred_handles() const {
+    return deferred_handles_;
+  }
 
 // Convenience accessor methods for flags.
 #define FLAG_ACCESSOR(flag, getter, setter)     \
@@ -93,13 +111,12 @@
   ScriptData** cached_data() const { return cached_data_; }
   void set_cached_data(ScriptData** cached_data) { cached_data_ = cached_data; }
 
+  PreParsedScopeData* preparsed_scope_data() { return &preparsed_scope_data_; }
+
   ScriptCompiler::CompileOptions compile_options() const {
     return compile_options_;
   }
   void set_compile_options(ScriptCompiler::CompileOptions compile_options) {
-    if (compile_options == ScriptCompiler::kConsumeParserCache) {
-      set_allow_lazy_parsing();
-    }
     compile_options_ = compile_options;
   }
 
@@ -108,6 +125,11 @@
     script_scope_ = script_scope;
   }
 
+  DeclarationScope* asm_function_scope() const { return asm_function_scope_; }
+  void set_asm_function_scope(DeclarationScope* scope) {
+    asm_function_scope_ = scope;
+  }
+
   AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
   void set_ast_value_factory(AstValueFactory* ast_value_factory) {
     ast_value_factory_ = ast_value_factory;
@@ -147,10 +169,23 @@
   int end_position() const { return end_position_; }
   void set_end_position(int end_position) { end_position_ = end_position; }
 
+  int parameters_end_pos() const { return parameters_end_pos_; }
+  void set_parameters_end_pos(int parameters_end_pos) {
+    parameters_end_pos_ = parameters_end_pos;
+  }
+
+  int function_literal_id() const { return function_literal_id_; }
+  void set_function_literal_id(int function_literal_id) {
+    function_literal_id_ = function_literal_id;
+  }
+
+  int max_function_literal_id() const { return max_function_literal_id_; }
+  void set_max_function_literal_id(int max_function_literal_id) {
+    max_function_literal_id_ = max_function_literal_id;
+  }
+
   // Getters for individual compiler hints.
   bool is_declaration() const;
-  bool requires_class_field_init() const;
-  bool is_class_field_initializer() const;
   FunctionKind function_kind() const;
 
   //--------------------------------------------------------------------------
@@ -180,8 +215,12 @@
   }
 
   void ReopenHandlesInNewHandleScope() {
-    shared_ = Handle<SharedFunctionInfo>(*shared_);
-    script_ = Handle<Script>(*script_);
+    if (!script_.is_null()) {
+      script_ = Handle<Script>(*script_);
+    }
+    if (!shared_.is_null()) {
+      shared_ = Handle<SharedFunctionInfo>(*shared_);
+    }
     Handle<ScopeInfo> outer_scope_info;
     if (maybe_outer_scope_info_.ToHandle(&outer_scope_info)) {
       maybe_outer_scope_info_ = Handle<ScopeInfo>(*outer_scope_info);
@@ -213,7 +252,7 @@
   };
 
   //------------- Inputs to parsing and scope analysis -----------------------
-  Zone* zone_;
+  std::shared_ptr<Zone> zone_;
   unsigned flags_;
   ScriptCompiler::ExternalSourceStream* source_stream_;
   ScriptCompiler::StreamedSource::Encoding source_stream_encoding_;
@@ -221,12 +260,16 @@
   v8::Extension* extension_;
   ScriptCompiler::CompileOptions compile_options_;
   DeclarationScope* script_scope_;
+  DeclarationScope* asm_function_scope_;
   UnicodeCache* unicode_cache_;
   uintptr_t stack_limit_;
   uint32_t hash_seed_;
   int compiler_hints_;
   int start_position_;
   int end_position_;
+  int parameters_end_pos_;
+  int function_literal_id_;
+  int max_function_literal_id_;
 
   // TODO(titzer): Move handles and isolate out of ParseInfo.
   Isolate* isolate_;
@@ -236,11 +279,13 @@
 
   //----------- Inputs+Outputs of parsing and scope analysis -----------------
   ScriptData** cached_data_;  // used if available, populated if requested.
+  PreParsedScopeData preparsed_scope_data_;
   AstValueFactory* ast_value_factory_;  // used if available, otherwise new.
   const AstRawString* function_name_;
 
   //----------- Output of parsing and scope analysis ------------------------
   FunctionLiteral* literal_;
+  std::shared_ptr<DeferredHandles> deferred_handles_;
 
   void SetFlag(Flag f) { flags_ |= f; }
   void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
diff --git a/src/parsing/parser-base.h b/src/parsing/parser-base.h
index bb62f86..cf56c53 100644
--- a/src/parsing/parser-base.h
+++ b/src/parsing/parser-base.h
@@ -9,6 +9,7 @@
 #include "src/ast/scopes.h"
 #include "src/bailout-reason.h"
 #include "src/base/hashmap.h"
+#include "src/counters.h"
 #include "src/globals.h"
 #include "src/messages.h"
 #include "src/parsing/expression-classifier.h"
@@ -100,6 +101,12 @@
 // Used in functions where the return type is ExpressionT.
 #define CHECK_OK CHECK_OK_CUSTOM(EmptyExpression)
 
+#define CHECK_OK_VOID ok); \
+  if (!*ok) return;        \
+  ((void)0
+#define DUMMY )  // to make indentation work
+#undef DUMMY
+
 // Common base class template shared between parser and pre-parser.
 // The Impl parameter is the actual class of the parser/pre-parser,
 // following the Curiously Recurring Template Pattern (CRTP).
@@ -192,14 +199,16 @@
 
   ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
              v8::Extension* extension, AstValueFactory* ast_value_factory,
-             RuntimeCallStats* runtime_call_stats)
-      : scope_state_(nullptr),
+             RuntimeCallStats* runtime_call_stats,
+             bool parsing_on_main_thread = true)
+      : scope_(nullptr),
         function_state_(nullptr),
         extension_(extension),
         fni_(nullptr),
         ast_value_factory_(ast_value_factory),
         ast_node_factory_(ast_value_factory),
         runtime_call_stats_(runtime_call_stats),
+        parsing_on_main_thread_(parsing_on_main_thread),
         parsing_module_(false),
         stack_limit_(stack_limit),
         zone_(zone),
@@ -207,29 +216,34 @@
         scanner_(scanner),
         stack_overflow_(false),
         default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
-        allow_lazy_(false),
+        function_literal_id_(0),
         allow_natives_(false),
         allow_tailcalls_(false),
         allow_harmony_do_expressions_(false),
         allow_harmony_function_sent_(false),
-        allow_harmony_async_await_(false),
         allow_harmony_restrictive_generators_(false),
         allow_harmony_trailing_commas_(false),
-        allow_harmony_class_fields_(false) {}
+        allow_harmony_class_fields_(false),
+        allow_harmony_object_rest_spread_(false),
+        allow_harmony_dynamic_import_(false),
+        allow_harmony_async_iteration_(false),
+        allow_harmony_template_escapes_(false) {}
 
 #define ALLOW_ACCESSORS(name)                           \
   bool allow_##name() const { return allow_##name##_; } \
   void set_allow_##name(bool allow) { allow_##name##_ = allow; }
 
-  ALLOW_ACCESSORS(lazy);
   ALLOW_ACCESSORS(natives);
   ALLOW_ACCESSORS(tailcalls);
   ALLOW_ACCESSORS(harmony_do_expressions);
   ALLOW_ACCESSORS(harmony_function_sent);
-  ALLOW_ACCESSORS(harmony_async_await);
   ALLOW_ACCESSORS(harmony_restrictive_generators);
   ALLOW_ACCESSORS(harmony_trailing_commas);
   ALLOW_ACCESSORS(harmony_class_fields);
+  ALLOW_ACCESSORS(harmony_object_rest_spread);
+  ALLOW_ACCESSORS(harmony_dynamic_import);
+  ALLOW_ACCESSORS(harmony_async_iteration);
+  ALLOW_ACCESSORS(harmony_template_escapes);
 
 #undef ALLOW_ACCESSORS
 
@@ -246,6 +260,13 @@
     return default_eager_compile_hint_;
   }
 
+  int GetNextFunctionLiteralId() { return ++function_literal_id_; }
+  int GetLastFunctionLiteralId() const { return function_literal_id_; }
+
+  void SkipFunctionLiterals(int delta) { function_literal_id_ += delta; }
+
+  void ResetFunctionLiteralId() { function_literal_id_ = 0; }
+
   Zone* zone() const { return zone_; }
 
  protected:
@@ -271,57 +292,26 @@
   class ObjectLiteralChecker;
 
   // ---------------------------------------------------------------------------
-  // ScopeState and its subclasses implement the parser's scope stack.
-  // ScopeState keeps track of the current scope, and the outer ScopeState. The
-  // parser's scope_state_ points to the top ScopeState. ScopeState's
-  // constructor push on the scope stack and the destructors pop. BlockState and
-  // FunctionState are used to hold additional per-block and per-function state.
-  class ScopeState BASE_EMBEDDED {
+  // BlockState and FunctionState implement the parser's scope stack.
+  // The parser's current scope is in scope_. BlockState and FunctionState
+  // constructors push on the scope stack and the destructors pop. They are also
+  // used to hold the parser's per-funcion state.
+  class BlockState BASE_EMBEDDED {
    public:
-    V8_INLINE Scope* scope() const { return scope_; }
-    Zone* zone() const { return scope_->zone(); }
-
-   protected:
-    ScopeState(ScopeState** scope_stack, Scope* scope)
-        : scope_stack_(scope_stack), outer_scope_(*scope_stack), scope_(scope) {
-      *scope_stack = this;
+    BlockState(Scope** scope_stack, Scope* scope)
+        : scope_stack_(scope_stack), outer_scope_(*scope_stack) {
+      *scope_stack_ = scope;
     }
-    ~ScopeState() { *scope_stack_ = outer_scope_; }
+
+    BlockState(Zone* zone, Scope** scope_stack)
+        : BlockState(scope_stack,
+                     new (zone) Scope(zone, *scope_stack, BLOCK_SCOPE)) {}
+
+    ~BlockState() { *scope_stack_ = outer_scope_; }
 
    private:
-    ScopeState** const scope_stack_;
-    ScopeState* const outer_scope_;
-    Scope* const scope_;
-  };
-
-  class BlockState final : public ScopeState {
-   public:
-    BlockState(ScopeState** scope_stack, Scope* scope)
-        : ScopeState(scope_stack, scope) {}
-
-    // BlockState(ScopeState**) automatically manages Scope(BLOCK_SCOPE)
-    // allocation.
-    // TODO(verwaest): Move to LazyBlockState class that only allocates the
-    // scope when needed.
-    explicit BlockState(Zone* zone, ScopeState** scope_stack)
-        : ScopeState(scope_stack, NewScope(zone, *scope_stack)) {}
-
-    void SetNonlinear() { this->scope()->SetNonlinear(); }
-    void set_start_position(int pos) { this->scope()->set_start_position(pos); }
-    void set_end_position(int pos) { this->scope()->set_end_position(pos); }
-    void set_is_hidden() { this->scope()->set_is_hidden(); }
-    Scope* FinalizedBlockScope() const {
-      return this->scope()->FinalizeBlockScope();
-    }
-    LanguageMode language_mode() const {
-      return this->scope()->language_mode();
-    }
-
-   private:
-    Scope* NewScope(Zone* zone, ScopeState* outer_state) {
-      Scope* parent = outer_state->scope();
-      return new (zone) Scope(zone, parent, BLOCK_SCOPE);
-    }
+    Scope** const scope_stack_;
+    Scope* const outer_scope_;
   };
 
   struct DestructuringAssignment {
@@ -383,26 +373,13 @@
     kInsideForInOfBody,
   };
 
-  class FunctionState final : public ScopeState {
+  class FunctionState final : public BlockState {
    public:
-    FunctionState(FunctionState** function_state_stack,
-                  ScopeState** scope_stack, DeclarationScope* scope);
+    FunctionState(FunctionState** function_state_stack, Scope** scope_stack,
+                  DeclarationScope* scope);
     ~FunctionState();
 
-    DeclarationScope* scope() const {
-      return ScopeState::scope()->AsDeclarationScope();
-    }
-
-    int NextMaterializedLiteralIndex() {
-      return next_materialized_literal_index_++;
-    }
-    int materialized_literal_count() {
-      return next_materialized_literal_index_;
-    }
-
-    void SkipMaterializedLiterals(int count) {
-      next_materialized_literal_index_ += count;
-    }
+    DeclarationScope* scope() const { return scope_->AsDeclarationScope(); }
 
     void AddProperty() { expected_property_count_++; }
     int expected_property_count() { return expected_property_count_; }
@@ -410,22 +387,23 @@
     FunctionKind kind() const { return scope()->function_kind(); }
     FunctionState* outer() const { return outer_function_state_; }
 
-    void set_generator_object_variable(typename Types::Variable* variable) {
-      DCHECK(variable != NULL);
-      DCHECK(IsResumableFunction(kind()));
-      generator_object_variable_ = variable;
-    }
     typename Types::Variable* generator_object_variable() const {
-      return generator_object_variable_;
+      return scope()->generator_object_var();
     }
 
-    void set_promise_variable(typename Types::Variable* variable) {
-      DCHECK(variable != NULL);
-      DCHECK(IsAsyncFunction(kind()));
-      promise_variable_ = variable;
-    }
     typename Types::Variable* promise_variable() const {
-      return promise_variable_;
+      return scope()->promise_var();
+    }
+
+    void RewindDestructuringAssignments(int pos) {
+      destructuring_assignments_to_rewrite_.Rewind(pos);
+    }
+
+    void SetDestructuringAssignmentsScope(int pos, Scope* scope) {
+      for (int i = pos; i < destructuring_assignments_to_rewrite_.length();
+           ++i) {
+        destructuring_assignments_to_rewrite_[i].scope = scope;
+      }
     }
 
     const ZoneList<DestructuringAssignment>&
@@ -458,25 +436,25 @@
       return &non_patterns_to_rewrite_;
     }
 
-    bool next_function_is_parenthesized() const {
-      return next_function_is_parenthesized_;
+    bool next_function_is_likely_called() const {
+      return next_function_is_likely_called_;
     }
 
-    void set_next_function_is_parenthesized(bool parenthesized) {
-      next_function_is_parenthesized_ = parenthesized;
+    bool previous_function_was_likely_called() const {
+      return previous_function_was_likely_called_;
     }
 
-    bool this_function_is_parenthesized() const {
-      return this_function_is_parenthesized_;
+    void set_next_function_is_likely_called() {
+      next_function_is_likely_called_ = true;
     }
 
    private:
     void AddDestructuringAssignment(DestructuringAssignment pair) {
-      destructuring_assignments_to_rewrite_.Add(pair, this->zone());
+      destructuring_assignments_to_rewrite_.Add(pair, scope_->zone());
     }
 
     void AddNonPatternForRewriting(ExpressionT expr, bool* ok) {
-      non_patterns_to_rewrite_.Add(expr, this->zone());
+      non_patterns_to_rewrite_.Add(expr, scope_->zone());
       if (non_patterns_to_rewrite_.length() >=
           std::numeric_limits<uint16_t>::max())
         *ok = false;
@@ -490,16 +468,9 @@
     // Properties count estimation.
     int expected_property_count_;
 
-    // For generators, this variable may hold the generator object. It variable
-    // is used by yield expressions and return statements. It is not necessary
-    // for generator functions to have this variable set.
-    Variable* generator_object_variable_;
-    // For async functions, this variable holds a temporary for the Promise
-    // being created as output of the async function.
-    Variable* promise_variable_;
-
     FunctionState** function_state_stack_;
     FunctionState* outer_function_state_;
+    DeclarationScope* scope_;
 
     ZoneList<DestructuringAssignment> destructuring_assignments_to_rewrite_;
     TailCallExpressionList tail_call_expressions_;
@@ -508,13 +479,13 @@
 
     ZoneList<typename ExpressionClassifier::Error> reported_errors_;
 
-    // If true, the next (and immediately following) function literal is
-    // preceded by a parenthesis.
-    bool next_function_is_parenthesized_;
-
-    // The value of the parents' next_function_is_parenthesized_, as it applies
-    // to this function. Filled in by constructor.
-    bool this_function_is_parenthesized_;
+    // Record whether the next (=== immediately following) function literal is
+    // preceded by a parenthesis / exclamation mark. Also record the previous
+    // state.
+    // These are managed by the FunctionState constructor; the caller may only
+    // call set_next_function_is_likely_called.
+    bool next_function_is_likely_called_;
+    bool previous_function_was_likely_called_;
 
     friend Impl;
     friend class Checkpoint;
@@ -593,7 +564,6 @@
   struct DeclarationDescriptor {
     enum Kind { NORMAL, PARAMETER };
     Scope* scope;
-    Scope* hoist_scope;
     VariableMode mode;
     int declaration_pos;
     int initialization_pos;
@@ -633,7 +603,6 @@
           scope(nullptr),
           init_block(parser->impl()->NullBlock()),
           inner_block(parser->impl()->NullBlock()),
-          for_promise_reject(false),
           bound_names(1, parser->zone()),
           tail_call_expressions(parser->zone()) {}
     IdentifierT name;
@@ -642,7 +611,6 @@
     Scope* scope;
     BlockT init_block;
     BlockT inner_block;
-    bool for_promise_reject;
     ZoneList<const AstRawString*> bound_names;
     TailCallExpressionList tail_call_expressions;
   };
@@ -666,17 +634,17 @@
         : proxy(nullptr),
           extends(parser->impl()->EmptyExpression()),
           properties(parser->impl()->NewClassPropertyList(4)),
-          instance_field_initializers(parser->impl()->NewExpressionList(0)),
           constructor(parser->impl()->EmptyFunctionLiteral()),
           has_seen_constructor(false),
-          static_initializer_var(nullptr) {}
+          has_name_static_property(false),
+          has_static_computed_names(false) {}
     VariableProxy* proxy;
     ExpressionT extends;
     typename Types::ClassPropertyList properties;
-    ExpressionListT instance_field_initializers;
     FunctionLiteralT constructor;
     bool has_seen_constructor;
-    Variable* static_initializer_var;
+    bool has_name_static_property;
+    bool has_static_computed_names;
   };
 
   DeclarationScope* NewScriptScope() const {
@@ -712,10 +680,15 @@
     return new (zone()) Scope(zone(), parent, scope_type);
   }
 
-  DeclarationScope* NewFunctionScope(FunctionKind kind) const {
+  // Creates a function scope that always allocates in zone(). The function
+  // scope itself is either allocated in zone() or in target_zone if one is
+  // passed in.
+  DeclarationScope* NewFunctionScope(FunctionKind kind,
+                                     Zone* target_zone = nullptr) const {
     DCHECK(ast_value_factory());
-    DeclarationScope* result =
-        new (zone()) DeclarationScope(zone(), scope(), FUNCTION_SCOPE, kind);
+    if (target_zone == nullptr) target_zone = zone();
+    DeclarationScope* result = new (target_zone)
+        DeclarationScope(zone(), scope(), FUNCTION_SCOPE, kind);
     // TODO(verwaest): Move into the DeclarationScope constructor.
     if (!IsArrowFunction(kind)) {
       result->DeclareDefaultFunctionVariables(ast_value_factory());
@@ -810,6 +783,7 @@
   bool is_any_identifier(Token::Value token) {
     return token == Token::IDENTIFIER || token == Token::ENUM ||
            token == Token::AWAIT || token == Token::ASYNC ||
+           token == Token::ESCAPED_STRICT_RESERVED_WORD ||
            token == Token::FUTURE_STRICT_RESERVED_WORD || token == Token::LET ||
            token == Token::STATIC || token == Token::YIELD;
   }
@@ -855,36 +829,39 @@
   }
 
   // Checks whether an octal literal was last seen between beg_pos and end_pos.
-  // If so, reports an error. Only called for strict mode and template strings.
-  void CheckOctalLiteral(int beg_pos, int end_pos,
-                         MessageTemplate::Template message, bool* ok) {
+  // Only called for strict mode strings.
+  void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
     Scanner::Location octal = scanner()->octal_position();
     if (octal.IsValid() && beg_pos <= octal.beg_pos &&
         octal.end_pos <= end_pos) {
+      MessageTemplate::Template message = scanner()->octal_message();
+      DCHECK_NE(message, MessageTemplate::kNone);
       impl()->ReportMessageAt(octal, message);
       scanner()->clear_octal_position();
+      if (message == MessageTemplate::kStrictDecimalWithLeadingZero) {
+        impl()->CountUsage(v8::Isolate::kDecimalWithLeadingZeroInStrictMode);
+      }
       *ok = false;
     }
   }
-  // for now, this check just collects statistics.
-  void CheckDecimalLiteralWithLeadingZero(int beg_pos, int end_pos) {
-    Scanner::Location token_location =
-        scanner()->decimal_with_leading_zero_position();
-    if (token_location.IsValid() && beg_pos <= token_location.beg_pos &&
-        token_location.end_pos <= end_pos) {
-      scanner()->clear_decimal_with_leading_zero_position();
-      impl()->CountUsage(v8::Isolate::kDecimalWithLeadingZeroInStrictMode);
+
+  // Checks if an octal literal or an invalid hex or unicode escape sequence
+  // appears in a template literal. In the presence of such, either
+  // returns false or reports an error, depending on should_throw. Otherwise
+  // returns true.
+  inline bool CheckTemplateEscapes(bool should_throw, bool* ok) {
+    if (!scanner()->has_invalid_template_escape()) {
+      return true;
     }
-  }
 
-  inline void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
-    CheckOctalLiteral(beg_pos, end_pos, MessageTemplate::kStrictOctalLiteral,
-                      ok);
-  }
-
-  inline void CheckTemplateOctalLiteral(int beg_pos, int end_pos, bool* ok) {
-    CheckOctalLiteral(beg_pos, end_pos, MessageTemplate::kTemplateOctalLiteral,
-                      ok);
+    // Handle error case(s)
+    if (should_throw) {
+      impl()->ReportMessageAt(scanner()->invalid_template_escape_location(),
+                              scanner()->invalid_template_escape_message());
+      *ok = false;
+    }
+    scanner()->clear_invalid_template_escape();
+    return false;
   }
 
   void CheckDestructuringElement(ExpressionT element, int beg_pos, int end_pos);
@@ -1058,14 +1035,6 @@
     }
   }
 
-  void ExpressionUnexpectedToken() {
-    MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
-    const char* arg;
-    Scanner::Location location = scanner()->peek_location();
-    GetUnexpectedTokenMessage(peek(), &message, &location, &arg);
-    classifier()->RecordExpressionError(location, message, arg);
-  }
-
   void BindingPatternUnexpectedToken() {
     MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
     const char* arg;
@@ -1143,6 +1112,7 @@
     kShorthandProperty,
     kMethodProperty,
     kClassField,
+    kSpreadProperty,
     kNotSet
   };
 
@@ -1154,11 +1124,13 @@
   ExpressionT ParseObjectLiteral(bool* ok);
   ClassLiteralPropertyT ParseClassPropertyDefinition(
       ClassLiteralChecker* checker, bool has_extends, bool* is_computed_name,
-      bool* has_seen_constructor, bool* ok);
+      bool* has_seen_constructor, ClassLiteralProperty::Kind* property_kind,
+      bool* is_static, bool* has_name_static_property, bool* ok);
   FunctionLiteralT ParseClassFieldForInitializer(bool has_initializer,
                                                  bool* ok);
   ObjectLiteralPropertyT ParseObjectPropertyDefinition(
-      ObjectLiteralChecker* checker, bool* is_computed_name, bool* ok);
+      ObjectLiteralChecker* checker, bool* is_computed_name,
+      bool* is_rest_property, bool* ok);
   ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
                                  bool maybe_arrow, bool* ok);
   ExpressionListT ParseArguments(Scanner::Location* first_spread_pos,
@@ -1177,9 +1149,14 @@
   ExpressionT ParseMemberExpression(bool* is_async, bool* ok);
   ExpressionT ParseMemberExpressionContinuation(ExpressionT expression,
                                                 bool* is_async, bool* ok);
+
+  // `rewritable_length`: length of the destructuring_assignments_to_rewrite()
+  // queue in the parent function state, prior to parsing of formal parameters.
+  // If the arrow function is lazy, any items added during formal parameter
+  // parsing are removed from the queue.
   ExpressionT ParseArrowFunctionLiteral(bool accept_IN,
                                         const FormalParametersT& parameters,
-                                        bool* ok);
+                                        int rewritable_length, bool* ok);
   void ParseAsyncFunctionBody(Scope* scope, StatementListT body,
                               FunctionKind kind, FunctionBodyType type,
                               bool accept_IN, int pos, bool* ok);
@@ -1188,8 +1165,10 @@
                                 Scanner::Location class_name_location,
                                 bool name_is_strict_reserved,
                                 int class_token_pos, bool* ok);
-  ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool* ok);
+  ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool tagged,
+                                   bool* ok);
   ExpressionT ParseSuperExpression(bool is_new, bool* ok);
+  ExpressionT ParseDynamicImportExpression(bool* ok);
   ExpressionT ParseNewTargetExpression(bool* ok);
 
   void ParseFormalParameter(FormalParametersT* parameters, bool* ok);
@@ -1214,6 +1193,12 @@
                                    bool default_export, bool* ok);
   StatementT ParseNativeDeclaration(bool* ok);
 
+  // Consumes the ending }.
+  void ParseFunctionBody(StatementListT result, IdentifierT function_name,
+                         int pos, const FormalParametersT& parameters,
+                         FunctionKind kind,
+                         FunctionLiteral::FunctionType function_type, bool* ok);
+
   // Under some circumstances, we allow preparsing to abort if the preparsed
   // function is "long and trivial", and fully parse instead. Our current
   // definition of "long and trivial" is:
@@ -1234,6 +1219,9 @@
   LazyParsingResult ParseStatementList(StatementListT body, int end_token,
                                        bool may_abort, bool* ok);
   StatementT ParseStatementListItem(bool* ok);
+  StatementT ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok) {
+    return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+  }
   StatementT ParseStatement(ZoneList<const AstRawString*>* labels,
                             AllowLabelledFunctionStatement allow_function,
                             bool* ok);
@@ -1244,11 +1232,8 @@
   // Parse a SubStatement in strict mode, or with an extra block scope in
   // sloppy mode to handle
   // ES#sec-functiondeclarations-in-ifstatement-statement-clauses
-  // The legacy parameter indicates whether function declarations are
-  // banned by the ES2015 specification in this location, and they are being
-  // permitted here to match previous V8 behavior.
   StatementT ParseScopedStatement(ZoneList<const AstRawString*>* labels,
-                                  bool legacy, bool* ok);
+                                  bool* ok);
 
   StatementT ParseVariableStatement(VariableDeclarationContext var_context,
                                     ZoneList<const AstRawString*>* names,
@@ -1280,6 +1265,21 @@
                                   bool* ok);
   StatementT ParseTryStatement(bool* ok);
   StatementT ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+  StatementT ParseForEachStatementWithDeclarations(
+      int stmt_pos, ForInfo* for_info, ZoneList<const AstRawString*>* labels,
+      bool* ok);
+  StatementT ParseForEachStatementWithoutDeclarations(
+      int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
+      ForInfo* for_info, ZoneList<const AstRawString*>* labels, bool* ok);
+
+  // Parse a C-style for loop: 'for (<init>; <cond>; <step>) { ... }'
+  StatementT ParseStandardForLoop(int stmt_pos, StatementT init,
+                                  bool bound_names_are_lexical,
+                                  ForInfo* for_info, BlockState* for_state,
+                                  ZoneList<const AstRawString*>* labels,
+                                  bool* ok);
+  StatementT ParseForAwaitStatement(ZoneList<const AstRawString*>* labels,
+                                    bool* ok);
 
   bool IsNextLetKeyword();
   bool IsTrivialExpression();
@@ -1309,6 +1309,24 @@
     return expression->IsObjectLiteral() || expression->IsArrayLiteral();
   }
 
+  // Due to hoisting, the value of a 'var'-declared variable may actually change
+  // even if the code contains only the "initial" assignment, namely when that
+  // assignment occurs inside a loop.  For example:
+  //
+  //   let i = 10;
+  //   do { var x = i } while (i--):
+  //
+  // As a simple and very conservative approximation of this, we explicitly mark
+  // as maybe-assigned any non-lexical variable whose initializing "declaration"
+  // does not syntactically occur in the function scope.  (In the example above,
+  // it occurs in a block scope.)
+  //
+  // Note that non-lexical variables include temporaries, which may also get
+  // assigned inside a loop due to the various rewritings that the parser
+  // performs.
+  //
+  static void MarkLoopVariableAsAssigned(Scope* scope, Variable* var);
+
   // Keep track of eval() calls since they disable all local variable
   // optimizations. This checks if expression is an eval call, and if yes,
   // forwards the information to scope.
@@ -1327,6 +1345,15 @@
     return Call::NOT_EVAL;
   }
 
+  // Convenience method which determines the type of return statement to emit
+  // depending on the current function type.
+  inline StatementT BuildReturnStatement(ExpressionT expr, int pos) {
+    if (V8_UNLIKELY(is_async_function())) {
+      return factory()->NewAsyncReturnStatement(expr, pos);
+    }
+    return factory()->NewReturnStatement(expr, pos);
+  }
+
   // Validation per ES6 object literals.
   class ObjectLiteralChecker {
    public:
@@ -1373,7 +1400,7 @@
   ModuleDescriptor* module() const {
     return scope()->AsModuleScope()->module();
   }
-  Scope* scope() const { return scope_state_->scope(); }
+  Scope* scope() const { return scope_; }
 
   // Stack of expression classifiers.
   // The top of the stack is always pointed to by classifier().
@@ -1416,13 +1443,14 @@
 
   // Parser base's protected field members.
 
-  ScopeState* scope_state_;        // Scope stack.
+  Scope* scope_;                   // Scope stack.
   FunctionState* function_state_;  // Function state stack.
   v8::Extension* extension_;
   FuncNameInferrer* fni_;
   AstValueFactory* ast_value_factory_;  // Not owned.
   typename Types::Factory ast_node_factory_;
   RuntimeCallStats* runtime_call_stats_;
+  bool parsing_on_main_thread_;
   bool parsing_module_;
   uintptr_t stack_limit_;
 
@@ -1437,42 +1465,45 @@
 
   FunctionLiteral::EagerCompileHint default_eager_compile_hint_;
 
-  bool allow_lazy_;
+  int function_literal_id_;
+
   bool allow_natives_;
   bool allow_tailcalls_;
   bool allow_harmony_do_expressions_;
   bool allow_harmony_function_sent_;
-  bool allow_harmony_async_await_;
   bool allow_harmony_restrictive_generators_;
   bool allow_harmony_trailing_commas_;
   bool allow_harmony_class_fields_;
+  bool allow_harmony_object_rest_spread_;
+  bool allow_harmony_dynamic_import_;
+  bool allow_harmony_async_iteration_;
+  bool allow_harmony_template_escapes_;
 
   friend class DiscardableZoneScope;
 };
 
 template <typename Impl>
 ParserBase<Impl>::FunctionState::FunctionState(
-    FunctionState** function_state_stack, ScopeState** scope_stack,
+    FunctionState** function_state_stack, Scope** scope_stack,
     DeclarationScope* scope)
-    : ScopeState(scope_stack, scope),
+    : BlockState(scope_stack, scope),
       next_materialized_literal_index_(0),
       expected_property_count_(0),
-      generator_object_variable_(nullptr),
-      promise_variable_(nullptr),
       function_state_stack_(function_state_stack),
       outer_function_state_(*function_state_stack),
+      scope_(scope),
       destructuring_assignments_to_rewrite_(16, scope->zone()),
       tail_call_expressions_(scope->zone()),
       return_expr_context_(ReturnExprContext::kInsideValidBlock),
       non_patterns_to_rewrite_(0, scope->zone()),
       reported_errors_(16, scope->zone()),
-      next_function_is_parenthesized_(false),
-      this_function_is_parenthesized_(false) {
+      next_function_is_likely_called_(false),
+      previous_function_was_likely_called_(false) {
   *function_state_stack = this;
   if (outer_function_state_) {
-    this_function_is_parenthesized_ =
-        outer_function_state_->next_function_is_parenthesized_;
-    outer_function_state_->next_function_is_parenthesized_ = false;
+    outer_function_state_->previous_function_was_likely_called_ =
+        outer_function_state_->next_function_is_likely_called_;
+    outer_function_state_->next_function_is_likely_called_ = false;
   }
 }
 
@@ -1594,7 +1625,7 @@
     }
 
     if (classifier()->duplicate_finder() != nullptr &&
-        scanner()->FindSymbol(classifier()->duplicate_finder(), 1) != 0) {
+        scanner()->FindSymbol(classifier()->duplicate_finder())) {
       classifier()->RecordDuplicateFormalParameterError(scanner()->location());
     }
     return name;
@@ -1634,7 +1665,8 @@
                                     !IsAsyncFunction(function_kind)) ||
       next == Token::ASYNC) {
     *is_strict_reserved = false;
-  } else if (next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
+  } else if (next == Token::ESCAPED_STRICT_RESERVED_WORD ||
+             next == Token::FUTURE_STRICT_RESERVED_WORD || next == Token::LET ||
              next == Token::STATIC ||
              (next == Token::YIELD && !IsGeneratorFunction(function_kind))) {
     *is_strict_reserved = true;
@@ -1676,8 +1708,6 @@
     return impl()->EmptyExpression();
   }
 
-  int literal_index = function_state_->NextMaterializedLiteralIndex();
-
   IdentifierT js_pattern = impl()->GetNextSymbol();
   Maybe<RegExp::Flags> flags = scanner()->ScanRegExpFlags();
   if (flags.IsNothing()) {
@@ -1688,7 +1718,7 @@
   }
   int js_flags = flags.FromJust();
   Next();
-  return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
+  return factory()->NewRegExpLiteral(js_pattern, js_flags, pos);
 }
 
 template <typename Impl>
@@ -1728,8 +1758,7 @@
       return impl()->ExpressionFromLiteral(Next(), beg_pos);
 
     case Token::ASYNC:
-      if (allow_harmony_async_await() &&
-          !scanner()->HasAnyLineTerminatorAfterNext() &&
+      if (!scanner()->HasAnyLineTerminatorAfterNext() &&
           PeekAhead() == Token::FUNCTION) {
         Consume(Token::ASYNC);
         return ParseAsyncFunctionLiteral(CHECK_OK);
@@ -1789,8 +1818,10 @@
       }
       // Heuristically try to detect immediately called functions before
       // seeing the call parentheses.
-      function_state_->set_next_function_is_parenthesized(peek() ==
-                                                          Token::FUNCTION);
+      if (peek() == Token::FUNCTION ||
+          (peek() == Token::ASYNC && PeekAhead() == Token::FUNCTION)) {
+        function_state_->set_next_function_is_likely_called();
+      }
       ExpressionT expr = ParseExpressionCoverGrammar(true, CHECK_OK);
       Expect(Token::RPAREN, CHECK_OK);
       return expr;
@@ -1815,7 +1846,7 @@
     case Token::TEMPLATE_SPAN:
     case Token::TEMPLATE_TAIL:
       BindingPatternUnexpectedToken();
-      return ParseTemplateLiteral(impl()->NoTemplateTag(), beg_pos, ok);
+      return ParseTemplateLiteral(impl()->NoTemplateTag(), beg_pos, false, ok);
 
     case Token::MOD:
       if (allow_natives() || extension_ != NULL) {
@@ -1902,6 +1933,13 @@
       // a trailing comma is allowed at the end of an arrow parameter list
       break;
     }
+
+    // Pass on the 'set_next_function_is_likely_called' flag if we have
+    // several function literals separated by comma.
+    if (peek() == Token::FUNCTION &&
+        function_state_->previous_function_was_likely_called()) {
+      function_state_->set_next_function_is_likely_called();
+    }
   }
 
   return result;
@@ -1958,11 +1996,8 @@
   }
   Expect(Token::RBRACK, CHECK_OK);
 
-  // Update the scope information before the pre-parsing bailout.
-  int literal_index = function_state_->NextMaterializedLiteralIndex();
-
-  ExpressionT result = factory()->NewArrayLiteral(values, first_spread_index,
-                                                  literal_index, pos);
+  ExpressionT result =
+      factory()->NewArrayLiteral(values, first_spread_index, pos);
   if (first_spread_index >= 0) {
     result = factory()->NewRewritableExpression(result);
     impl()->QueueNonPatternForRewriting(result, ok);
@@ -2025,7 +2060,7 @@
   Token::Value token = peek();
   int pos = peek_position();
 
-  if (allow_harmony_async_await() && !*is_generator && token == Token::ASYNC &&
+  if (!*is_generator && token == Token::ASYNC &&
       !scanner()->HasAnyLineTerminatorAfterNext()) {
     Consume(Token::ASYNC);
     token = peek();
@@ -2091,6 +2126,29 @@
       break;
     }
 
+    case Token::ELLIPSIS:
+      if (allow_harmony_object_rest_spread()) {
+        *name = impl()->EmptyIdentifier();
+        Consume(Token::ELLIPSIS);
+        expression = ParseAssignmentExpression(true, CHECK_OK);
+        *kind = PropertyKind::kSpreadProperty;
+
+        if (expression->IsAssignment()) {
+          classifier()->RecordPatternError(
+              scanner()->location(),
+              MessageTemplate::kInvalidDestructuringTarget);
+        } else {
+          CheckDestructuringElement(expression, pos,
+                                    scanner()->location().end_pos);
+        }
+
+        if (peek() != Token::RBRACE) {
+          classifier()->RecordPatternError(scanner()->location(),
+                                           MessageTemplate::kElementAfterRest);
+        }
+        return expression;
+      }
+
     default:
       *name = ParseIdentifierName(CHECK_OK);
       break;
@@ -2114,25 +2172,28 @@
 
 template <typename Impl>
 typename ParserBase<Impl>::ClassLiteralPropertyT
-ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
-                                               bool has_extends,
-                                               bool* is_computed_name,
-                                               bool* has_seen_constructor,
-                                               bool* ok) {
-  DCHECK(has_seen_constructor != nullptr);
+ParserBase<Impl>::ParseClassPropertyDefinition(
+    ClassLiteralChecker* checker, bool has_extends, bool* is_computed_name,
+    bool* has_seen_constructor, ClassLiteralProperty::Kind* property_kind,
+    bool* is_static, bool* has_name_static_property, bool* ok) {
+  DCHECK_NOT_NULL(has_seen_constructor);
+  DCHECK_NOT_NULL(has_name_static_property);
   bool is_get = false;
   bool is_set = false;
   bool is_generator = false;
   bool is_async = false;
-  bool is_static = false;
+  *is_static = false;
+  *property_kind = ClassLiteralProperty::METHOD;
   PropertyKind kind = PropertyKind::kNotSet;
 
   Token::Value name_token = peek();
 
+  int function_token_position = scanner()->peek_location().beg_pos;
   IdentifierT name = impl()->EmptyIdentifier();
   ExpressionT name_expression;
   if (name_token == Token::STATIC) {
     Consume(Token::STATIC);
+    function_token_position = scanner()->peek_location().beg_pos;
     if (peek() == Token::LPAREN) {
       kind = PropertyKind::kMethodProperty;
       name = impl()->GetSymbol();  // TODO(bakkot) specialize on 'static'
@@ -2142,7 +2203,7 @@
       name = impl()->GetSymbol();  // TODO(bakkot) specialize on 'static'
       name_expression = factory()->NewStringLiteral(name, position());
     } else {
-      is_static = true;
+      *is_static = true;
       name_expression = ParsePropertyName(
           &name, &kind, &is_generator, &is_get, &is_set, &is_async,
           is_computed_name, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
@@ -2153,6 +2214,10 @@
         is_computed_name, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
   }
 
+  if (!*has_name_static_property && *is_static && impl()->IsName(name)) {
+    *has_name_static_property = true;
+  }
+
   switch (kind) {
     case PropertyKind::kClassField:
     case PropertyKind::kNotSet:  // This case is a name followed by a name or
@@ -2169,9 +2234,10 @@
         ExpressionT function_literal = ParseClassFieldForInitializer(
             has_initializer, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
         ExpectSemicolon(CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+        *property_kind = ClassLiteralProperty::FIELD;
         return factory()->NewClassLiteralProperty(
-            name_expression, function_literal, ClassLiteralProperty::FIELD,
-            is_static, *is_computed_name);
+            name_expression, function_literal, *property_kind, *is_static,
+            *is_computed_name);
       } else {
         ReportUnexpectedToken(Next());
         *ok = false;
@@ -2188,7 +2254,7 @@
       if (!*is_computed_name) {
         checker->CheckClassMethodName(
             name_token, PropertyKind::kMethodProperty, is_generator, is_async,
-            is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+            *is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
       }
 
       FunctionKind kind = is_generator
@@ -2196,20 +2262,23 @@
                               : is_async ? FunctionKind::kAsyncConciseMethod
                                          : FunctionKind::kConciseMethod;
 
-      if (!is_static && impl()->IsConstructor(name)) {
+      if (!*is_static && impl()->IsConstructor(name)) {
         *has_seen_constructor = true;
-        kind = has_extends ? FunctionKind::kSubclassConstructor
+        kind = has_extends ? FunctionKind::kDerivedConstructor
                            : FunctionKind::kBaseConstructor;
       }
 
       ExpressionT value = impl()->ParseFunctionLiteral(
           name, scanner()->location(), kSkipFunctionNameCheck, kind,
-          kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
-          language_mode(), CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+          FLAG_harmony_function_tostring ? function_token_position
+                                         : kNoSourcePosition,
+          FunctionLiteral::kAccessorOrMethod, language_mode(),
+          CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
 
+      *property_kind = ClassLiteralProperty::METHOD;
       return factory()->NewClassLiteralProperty(name_expression, value,
-                                                ClassLiteralProperty::METHOD,
-                                                is_static, *is_computed_name);
+                                                *property_kind, *is_static,
+                                                *is_computed_name);
     }
 
     case PropertyKind::kAccessorProperty: {
@@ -2218,7 +2287,7 @@
       if (!*is_computed_name) {
         checker->CheckClassMethodName(
             name_token, PropertyKind::kAccessorProperty, false, false,
-            is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+            *is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
         // Make sure the name expression is a string since we need a Name for
         // Runtime_DefineAccessorPropertyUnchecked and since we can determine
         // this statically we can skip the extra runtime check.
@@ -2231,18 +2300,23 @@
 
       FunctionLiteralT value = impl()->ParseFunctionLiteral(
           name, scanner()->location(), kSkipFunctionNameCheck, kind,
-          kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
-          language_mode(), CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+          FLAG_harmony_function_tostring ? function_token_position
+                                         : kNoSourcePosition,
+          FunctionLiteral::kAccessorOrMethod, language_mode(),
+          CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
 
       if (!*is_computed_name) {
         impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
       }
 
-      return factory()->NewClassLiteralProperty(
-          name_expression, value,
-          is_get ? ClassLiteralProperty::GETTER : ClassLiteralProperty::SETTER,
-          is_static, *is_computed_name);
+      *property_kind =
+          is_get ? ClassLiteralProperty::GETTER : ClassLiteralProperty::SETTER;
+      return factory()->NewClassLiteralProperty(name_expression, value,
+                                                *property_kind, *is_static,
+                                                *is_computed_name);
     }
+    case PropertyKind::kSpreadProperty:
+      UNREACHABLE();
   }
   UNREACHABLE();
   return impl()->EmptyClassLiteralProperty();
@@ -2257,9 +2331,8 @@
   FunctionKind kind = FunctionKind::kConciseMethod;
   DeclarationScope* initializer_scope = NewFunctionScope(kind);
   initializer_scope->set_start_position(scanner()->location().end_pos);
-  FunctionState initializer_state(&function_state_, &scope_state_,
-                                  initializer_scope);
-  DCHECK(scope() == initializer_scope);
+  FunctionState initializer_state(&function_state_, &scope_, initializer_scope);
+  DCHECK_EQ(initializer_scope, scope());
   scope()->SetLanguageMode(STRICT);
   ExpressionClassifier expression_classifier(this);
   ExpressionT value;
@@ -2275,12 +2348,10 @@
   body->Add(factory()->NewReturnStatement(value, kNoSourcePosition), zone());
   FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
       impl()->EmptyIdentifierString(), initializer_scope, body,
-      initializer_state.materialized_literal_count(),
       initializer_state.expected_property_count(), 0, 0,
       FunctionLiteral::kNoDuplicateParameters,
       FunctionLiteral::kAnonymousExpression, default_eager_compile_hint_,
-      initializer_scope->start_position(), true);
-  function_literal->set_is_class_field_initializer(true);
+      initializer_scope->start_position(), true, GetNextFunctionLiteralId());
   return function_literal;
 }
 
@@ -2288,6 +2359,7 @@
 typename ParserBase<Impl>::ObjectLiteralPropertyT
 ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
                                                 bool* is_computed_name,
+                                                bool* is_rest_property,
                                                 bool* ok) {
   bool is_get = false;
   bool is_set = false;
@@ -2305,6 +2377,19 @@
       is_computed_name, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
   switch (kind) {
+    case PropertyKind::kSpreadProperty:
+      DCHECK(allow_harmony_object_rest_spread());
+      DCHECK(!is_get && !is_set && !is_generator && !is_async &&
+             !*is_computed_name);
+      DCHECK(name_token == Token::ELLIPSIS);
+
+      *is_computed_name = true;
+      *is_rest_property = true;
+
+      return factory()->NewObjectLiteralProperty(
+          impl()->GetLiteralTheHole(kNoSourcePosition), name_expression,
+          ObjectLiteralProperty::SPREAD, true);
+
     case PropertyKind::kValueProperty: {
       DCHECK(!is_get && !is_set && !is_generator && !is_async);
 
@@ -2347,7 +2432,7 @@
       DCHECK(!*is_computed_name);
 
       if (classifier()->duplicate_finder() != nullptr &&
-          scanner()->FindSymbol(classifier()->duplicate_finder(), 1) != 0) {
+          scanner()->FindSymbol(classifier()->duplicate_finder())) {
         classifier()->RecordDuplicateFormalParameterError(
             scanner()->location());
       }
@@ -2411,8 +2496,9 @@
 
       ExpressionT value = impl()->ParseFunctionLiteral(
           name, scanner()->location(), kSkipFunctionNameCheck, kind,
-          kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
-          language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+          FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
+          FunctionLiteral::kAccessorOrMethod, language_mode(),
+          CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
       return factory()->NewObjectLiteralProperty(
           name_expression, value, ObjectLiteralProperty::COMPUTED,
@@ -2440,8 +2526,9 @@
 
       FunctionLiteralT value = impl()->ParseFunctionLiteral(
           name, scanner()->location(), kSkipFunctionNameCheck, kind,
-          kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
-          language_mode(), CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+          FLAG_harmony_function_tostring ? next_beg_pos : kNoSourcePosition,
+          FunctionLiteral::kAccessorOrMethod, language_mode(),
+          CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
 
       if (!*is_computed_name) {
         impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
@@ -2473,7 +2560,9 @@
   typename Types::ObjectPropertyList properties =
       impl()->NewObjectPropertyList(4);
   int number_of_boilerplate_properties = 0;
+
   bool has_computed_names = false;
+  bool has_rest_property = false;
   ObjectLiteralChecker checker(this);
 
   Expect(Token::LBRACE, CHECK_OK);
@@ -2482,17 +2571,24 @@
     FuncNameInferrer::State fni_state(fni_);
 
     bool is_computed_name = false;
-    ObjectLiteralPropertyT property =
-        ParseObjectPropertyDefinition(&checker, &is_computed_name, CHECK_OK);
+    bool is_rest_property = false;
+    ObjectLiteralPropertyT property = ParseObjectPropertyDefinition(
+        &checker, &is_computed_name, &is_rest_property, CHECK_OK);
 
     if (is_computed_name) {
       has_computed_names = true;
     }
 
-    // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
-    if (!has_computed_names && impl()->IsBoilerplateProperty(property)) {
+    if (is_rest_property) {
+      has_rest_property = true;
+    }
+
+    if (impl()->IsBoilerplateProperty(property) && !has_computed_names) {
+      // Count CONSTANT or COMPUTED properties to maintain the enumeration
+      // order.
       number_of_boilerplate_properties++;
     }
+
     properties->Add(property, zone());
 
     if (peek() != Token::RBRACE) {
@@ -2504,13 +2600,18 @@
   }
   Expect(Token::RBRACE, CHECK_OK);
 
-  // Computation of literal_index must happen before pre parse bailout.
-  int literal_index = function_state_->NextMaterializedLiteralIndex();
+  // In pattern rewriter, we rewrite rest property to call out to a
+  // runtime function passing all the other properties as arguments to
+  // this runtime function. Here, we make sure that the number of
+  // properties is less than number of arguments allowed for a runtime
+  // call.
+  if (has_rest_property && properties->length() > Code::kMaxArguments) {
+    this->classifier()->RecordPatternError(Scanner::Location(pos, position()),
+                                           MessageTemplate::kTooManyArguments);
+  }
 
-  return factory()->NewObjectLiteral(properties,
-                                     literal_index,
-                                     number_of_boilerplate_properties,
-                                     pos);
+  return factory()->NewObjectLiteral(
+      properties, number_of_boilerplate_properties, pos, has_rest_property);
 }
 
 template <typename Impl>
@@ -2523,8 +2624,6 @@
   ExpressionListT result = impl()->NewExpressionList(4);
   Expect(Token::LPAREN, CHECK_OK_CUSTOM(NullExpressionList));
   bool done = (peek() == Token::RPAREN);
-  bool was_unspread = false;
-  int unspread_sequences_count = 0;
   while (!done) {
     int start_pos = peek_position();
     bool is_spread = Check(Token::ELLIPSIS);
@@ -2544,15 +2643,6 @@
     }
     result->Add(argument, zone_);
 
-    // unspread_sequences_count is the number of sequences of parameters which
-    // are not prefixed with a spread '...' operator.
-    if (is_spread) {
-      was_unspread = false;
-    } else if (!was_unspread) {
-      was_unspread = true;
-      unspread_sequences_count++;
-    }
-
     if (result->length() > Code::kMaxArguments) {
       ReportMessage(MessageTemplate::kTooManyArguments);
       *ok = false;
@@ -2579,12 +2669,6 @@
     if (maybe_arrow) {
       impl()->RewriteNonPattern(CHECK_OK_CUSTOM(NullExpressionList));
     }
-    if (spread_arg.IsValid()) {
-      // Unspread parameter sequences are translated into array literals in the
-      // parser. Ensure that the number of materialized literals matches between
-      // the parser and preparser
-      impl()->MaterializeUnspreadArgumentsLiterals(unspread_sequences_count);
-    }
   }
 
   return result;
@@ -2611,8 +2695,10 @@
       this, classifier()->duplicate_finder());
 
   Scope::Snapshot scope_snapshot(scope());
+  int rewritable_length =
+      function_state_->destructuring_assignments_to_rewrite().length();
 
-  bool is_async = allow_harmony_async_await() && peek() == Token::ASYNC &&
+  bool is_async = peek() == Token::ASYNC &&
                   !scanner()->HasAnyLineTerminatorAfterNext() &&
                   IsValidArrowFormalParametersStart(PeekAhead());
 
@@ -2664,6 +2750,7 @@
     this->scope()->PropagateUsageFlagsToScope(scope);
 
     scope_snapshot.Reparent(scope);
+    function_state_->SetDestructuringAssignmentsScope(rewritable_length, scope);
 
     FormalParametersT parameters(scope);
     if (!classifier()->is_simple_parameter_list()) {
@@ -2680,7 +2767,8 @@
     if (duplicate_loc.IsValid()) {
       classifier()->RecordDuplicateFormalParameterError(duplicate_loc);
     }
-    expression = ParseArrowFunctionLiteral(accept_IN, parameters, CHECK_OK);
+    expression = ParseArrowFunctionLiteral(accept_IN, parameters,
+                                           rewritable_length, CHECK_OK);
     impl()->Discard();
     classifier()->RecordPatternError(arrow_loc,
                                      MessageTemplate::kUnexpectedToken,
@@ -2732,7 +2820,7 @@
         MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
   }
 
-  expression = impl()->MarkExpressionAsAssigned(expression);
+  impl()->MarkExpressionAsAssigned(expression);
 
   Token::Value op = Next();  // Get assignment operator.
   if (op != Token::ASSIGN) {
@@ -2944,6 +3032,12 @@
 
     op = Next();
     int pos = position();
+
+    // Assume "! function ..." indicates the function is likely to be called.
+    if (op == Token::NOT && peek() == Token::FUNCTION) {
+      function_state_->set_next_function_is_likely_called();
+    }
+
     ExpressionT expression = ParseUnaryExpression(CHECK_OK);
     impl()->RewriteNonPattern(CHECK_OK);
 
@@ -2973,7 +3067,7 @@
     expression = CheckAndRewriteReferenceExpression(
         expression, beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
-    expression = impl()->MarkExpressionAsAssigned(expression);
+    impl()->MarkExpressionAsAssigned(expression);
     impl()->RewriteNonPattern(CHECK_OK);
 
     return factory()->NewCountOperation(op,
@@ -3013,7 +3107,7 @@
     expression = CheckAndRewriteReferenceExpression(
         expression, lhs_beg_pos, scanner()->location().end_pos,
         MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
-    expression = impl()->MarkExpressionAsAssigned(expression);
+    impl()->MarkExpressionAsAssigned(expression);
     impl()->RewriteNonPattern(CHECK_OK);
 
     Token::Value next = Next();
@@ -3119,8 +3213,7 @@
 
         bool is_super_call = result->IsSuperCallReference();
         if (spread_pos.IsValid()) {
-          args = impl()->PrepareSpreadArguments(args);
-          result = impl()->SpreadCall(result, args, pos);
+          result = impl()->SpreadCall(result, args, pos, is_possibly_eval);
         } else {
           result = factory()->NewCall(result, args, pos, is_possibly_eval);
         }
@@ -3128,7 +3221,6 @@
         // Explicit calls to the super constructor using super() perform an
         // implicit binding assignment to the 'this' variable.
         if (is_super_call) {
-          result = impl()->RewriteSuperCall(result);
           ExpressionT this_expr = impl()->ThisExpression(pos);
           result =
               factory()->NewAssignment(Token::INIT, this_expr, result, pos);
@@ -3156,7 +3248,7 @@
         impl()->RewriteNonPattern(CHECK_OK);
         BindingPatternUnexpectedToken();
         ArrowFormalParametersUnexpectedToken();
-        result = ParseTemplateLiteral(result, position(), CHECK_OK);
+        result = ParseTemplateLiteral(result, position(), true, CHECK_OK);
         break;
       }
 
@@ -3199,6 +3291,11 @@
     if (peek() == Token::SUPER) {
       const bool is_new = true;
       result = ParseSuperExpression(is_new, CHECK_OK);
+    } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT) {
+      impl()->ReportMessageAt(scanner()->peek_location(),
+                              MessageTemplate::kImportCallNotNewExpression);
+      *ok = false;
+      return impl()->EmptyExpression();
     } else if (peek() == Token::PERIOD) {
       return ParseNewTargetExpression(CHECK_OK);
     } else {
@@ -3211,7 +3308,6 @@
       ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
 
       if (spread_pos.IsValid()) {
-        args = impl()->PrepareSpreadArguments(args);
         result = impl()->SpreadCallNew(result, args, new_pos);
       } else {
         result = factory()->NewCallNew(result, args, new_pos);
@@ -3233,7 +3329,11 @@
   // MemberExpression ::
   //   (PrimaryExpression | FunctionLiteral | ClassLiteral)
   //     ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
-
+  //
+  // CallExpression ::
+  //   (SuperCall | ImportCall)
+  //     ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
+  //
   // The '[' Expression ']' and '.' Identifier parts are parsed by
   // ParseMemberExpressionContinuation, and the Arguments part is parsed by the
   // caller.
@@ -3271,7 +3371,12 @@
     Scanner::Location function_name_location = Scanner::Location::invalid();
     FunctionLiteral::FunctionType function_type =
         FunctionLiteral::kAnonymousExpression;
-    if (peek_any_identifier()) {
+    if (impl()->ParsingDynamicFunctionDeclaration()) {
+      // We don't want dynamic functions to actually declare their name
+      // "anonymous". We just want that name in the toString().
+      Consume(Token::IDENTIFIER);
+      DCHECK(scanner()->UnescapedLiteralMatches("anonymous", 9));
+    } else if (peek_any_identifier()) {
       name = ParseIdentifierOrStrictReservedWord(
           function_kind, &is_strict_reserved_name, CHECK_OK);
       function_name_location = scanner()->location();
@@ -3286,6 +3391,8 @@
   } else if (peek() == Token::SUPER) {
     const bool is_new = false;
     result = ParseSuperExpression(is_new, CHECK_OK);
+  } else if (allow_harmony_dynamic_import() && peek() == Token::IMPORT) {
+    result = ParseDynamicImportExpression(CHECK_OK);
   } else {
     result = ParsePrimaryExpression(is_async, CHECK_OK);
   }
@@ -3295,6 +3402,20 @@
 }
 
 template <typename Impl>
+typename ParserBase<Impl>::ExpressionT
+ParserBase<Impl>::ParseDynamicImportExpression(bool* ok) {
+  DCHECK(allow_harmony_dynamic_import());
+  Consume(Token::IMPORT);
+  int pos = position();
+  Expect(Token::LPAREN, CHECK_OK);
+  ExpressionT arg = ParseAssignmentExpression(true, CHECK_OK);
+  Expect(Token::RPAREN, CHECK_OK);
+  ZoneList<ExpressionT>* args = new (zone()) ZoneList<ExpressionT>(1, zone());
+  args->Add(arg, zone());
+  return factory()->NewCallRuntime(Runtime::kDynamicImportCall, args, pos);
+}
+
+template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
     bool is_new, bool* ok) {
   Expect(Token::SUPER, CHECK_OK);
@@ -3310,7 +3431,7 @@
     }
     // new super() is never allowed.
     // super() is only allowed in derived constructor
-    if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
+    if (!is_new && peek() == Token::LPAREN && IsDerivedConstructor(kind)) {
       // TODO(rossberg): This might not be the correct FunctionState for the
       // method here.
       return impl()->NewSuperCallReference(pos);
@@ -3407,7 +3528,7 @@
             expression->AsFunctionLiteral()->SetShouldEagerCompile();
           }
         }
-        expression = ParseTemplateLiteral(expression, pos, CHECK_OK);
+        expression = ParseTemplateLiteral(expression, pos, true, CHECK_OK);
         break;
       }
       case Token::ILLEGAL: {
@@ -3501,10 +3622,7 @@
     }
   }
 
-  for (int i = 0; i < parameters->arity; ++i) {
-    auto parameter = parameters->at(i);
-    impl()->DeclareFormalParameter(parameters->scope, parameter);
-  }
+  impl()->DeclareFormalParameters(parameters->scope, parameters->params);
 }
 
 template <typename Impl>
@@ -3551,12 +3669,7 @@
   }
 
   parsing_result->descriptor.scope = scope();
-  parsing_result->descriptor.hoist_scope = nullptr;
 
-  // The scope of a var/const declared variable anywhere inside a function
-  // is the entire function (ECMA-262, 3rd, 10.1.3, and 12.2). The scope
-  // of a let declared variable is the scope of the immediately enclosing
-  // block.
   int bindings_start = peek_position();
   do {
     // Parse binding pattern.
@@ -3725,8 +3838,22 @@
       pos, FunctionLiteral::kDeclaration, language_mode(),
       CHECK_OK_CUSTOM(NullStatement));
 
-  return impl()->DeclareFunction(variable_name, function, pos, is_generator,
-                                 is_async, names, ok);
+  // In ES6, a function behaves as a lexical binding, except in
+  // a script scope, or the initial scope of eval or another function.
+  VariableMode mode =
+      (!scope()->is_declaration_scope() || scope()->is_module_scope()) ? LET
+                                                                       : VAR;
+  // Async functions don't undergo sloppy mode block scoped hoisting, and don't
+  // allow duplicates in a block. Both are represented by the
+  // sloppy_block_function_map. Don't add them to the map for async functions.
+  // Generators are also supposed to be prohibited; currently doing this behind
+  // a flag and UseCounting violations to assess web compatibility.
+  bool is_sloppy_block_function =
+      is_sloppy(language_mode()) && !scope()->is_declaration_scope() &&
+      !is_async && !(allow_harmony_restrictive_generators() && is_generator);
+
+  return impl()->DeclareFunction(variable_name, function, mode, pos,
+                                 is_sloppy_block_function, names, ok);
 }
 
 template <typename Impl>
@@ -3815,6 +3942,107 @@
 }
 
 template <typename Impl>
+void ParserBase<Impl>::ParseFunctionBody(
+    typename ParserBase<Impl>::StatementListT result, IdentifierT function_name,
+    int pos, const FormalParametersT& parameters, FunctionKind kind,
+    FunctionLiteral::FunctionType function_type, bool* ok) {
+  static const int kFunctionNameAssignmentIndex = 0;
+  if (function_type == FunctionLiteral::kNamedExpression) {
+    DCHECK(!impl()->IsEmptyIdentifier(function_name));
+    // If we have a named function expression, we add a local variable
+    // declaration to the body of the function with the name of the
+    // function and let it refer to the function itself (closure).
+    // Not having parsed the function body, the language mode may still change,
+    // so we reserve a spot and create the actual const assignment later.
+    DCHECK_EQ(kFunctionNameAssignmentIndex, result->length());
+    result->Add(impl()->NullStatement(), zone());
+  }
+
+  DeclarationScope* function_scope = scope()->AsDeclarationScope();
+  DeclarationScope* inner_scope = function_scope;
+  BlockT inner_block = impl()->NullBlock();
+
+  StatementListT body = result;
+  if (!parameters.is_simple) {
+    inner_scope = NewVarblockScope();
+    inner_scope->set_start_position(scanner()->location().beg_pos);
+    inner_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
+    inner_block->set_scope(inner_scope);
+    body = inner_block->statements();
+  }
+
+  {
+    BlockState block_state(&scope_, inner_scope);
+
+    if (IsGeneratorFunction(kind)) {
+      impl()->ParseAndRewriteGeneratorFunctionBody(pos, kind, body, ok);
+    } else if (IsAsyncFunction(kind)) {
+      const bool accept_IN = true;
+      ParseAsyncFunctionBody(inner_scope, body, kind, FunctionBodyType::kNormal,
+                             accept_IN, pos, CHECK_OK_VOID);
+    } else {
+      ParseStatementList(body, Token::RBRACE, CHECK_OK_VOID);
+    }
+
+    if (IsDerivedConstructor(kind)) {
+      body->Add(factory()->NewReturnStatement(impl()->ThisExpression(),
+                                              kNoSourcePosition),
+                zone());
+    }
+  }
+
+  Expect(Token::RBRACE, CHECK_OK_VOID);
+  scope()->set_end_position(scanner()->location().end_pos);
+
+  if (!parameters.is_simple) {
+    DCHECK_NOT_NULL(inner_scope);
+    DCHECK_EQ(function_scope, scope());
+    DCHECK_EQ(function_scope, inner_scope->outer_scope());
+    impl()->SetLanguageMode(function_scope, inner_scope->language_mode());
+    BlockT init_block =
+        impl()->BuildParameterInitializationBlock(parameters, CHECK_OK_VOID);
+
+    if (is_sloppy(inner_scope->language_mode())) {
+      impl()->InsertSloppyBlockFunctionVarBindings(inner_scope);
+    }
+
+    // TODO(littledan): Merge the two rejection blocks into one
+    if (IsAsyncFunction(kind)) {
+      init_block = impl()->BuildRejectPromiseOnException(init_block);
+    }
+
+    inner_scope->set_end_position(scanner()->location().end_pos);
+    if (inner_scope->FinalizeBlockScope() != nullptr) {
+      impl()->CheckConflictingVarDeclarations(inner_scope, CHECK_OK_VOID);
+      impl()->InsertShadowingVarBindingInitializers(inner_block);
+    } else {
+      inner_block->set_scope(nullptr);
+    }
+    inner_scope = nullptr;
+
+    result->Add(init_block, zone());
+    result->Add(inner_block, zone());
+  } else {
+    DCHECK_EQ(inner_scope, function_scope);
+    if (is_sloppy(function_scope->language_mode())) {
+      impl()->InsertSloppyBlockFunctionVarBindings(function_scope);
+    }
+  }
+
+  if (!IsArrowFunction(kind)) {
+    // Declare arguments after parsing the function since lexical 'arguments'
+    // masks the arguments object. Declare arguments before declaring the
+    // function var since the arguments object masks 'function arguments'.
+    function_scope->DeclareArguments(ast_value_factory());
+  }
+
+  impl()->CreateFunctionNameAssignment(function_name, pos, function_type,
+                                       function_scope, result,
+                                       kFunctionNameAssignmentIndex);
+  impl()->MarkCollectedTailCallExpressions();
+}
+
+template <typename Impl>
 void ParserBase<Impl>::CheckArityRestrictions(int param_count,
                                               FunctionKind function_kind,
                                               bool has_rest,
@@ -3889,11 +4117,16 @@
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT
 ParserBase<Impl>::ParseArrowFunctionLiteral(
-    bool accept_IN, const FormalParametersT& formal_parameters, bool* ok) {
+    bool accept_IN, const FormalParametersT& formal_parameters,
+    int rewritable_length, bool* ok) {
+  const RuntimeCallStats::CounterId counters[2][2] = {
+      {&RuntimeCallStats::ParseBackgroundArrowFunctionLiteral,
+       &RuntimeCallStats::ParseArrowFunctionLiteral},
+      {&RuntimeCallStats::PreParseBackgroundArrowFunctionLiteral,
+       &RuntimeCallStats::PreParseArrowFunctionLiteral}};
   RuntimeCallTimerScope runtime_timer(
       runtime_call_stats_,
-      Impl::IsPreParser() ? &RuntimeCallStats::ParseArrowFunctionLiteral
-                          : &RuntimeCallStats::PreParseArrowFunctionLiteral);
+      counters[Impl::IsPreParser()][parsing_on_main_thread_]);
 
   if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
     // ASI inserts `;` after arrow parameters if a line terminator is found.
@@ -3905,8 +4138,8 @@
   }
 
   StatementListT body = impl()->NullStatementList();
-  int materialized_literal_count = -1;
   int expected_property_count = -1;
+  int function_literal_id = GetNextFunctionLiteralId();
 
   FunctionKind kind = formal_parameters.scope->function_kind();
   FunctionLiteral::EagerCompileHint eager_compile_hint =
@@ -3920,14 +4153,9 @@
   bool should_be_used_once_hint = false;
   bool has_braces = true;
   {
-    FunctionState function_state(&function_state_, &scope_state_,
+    FunctionState function_state(&function_state_, &scope_,
                                  formal_parameters.scope);
 
-    function_state.SkipMaterializedLiterals(
-        formal_parameters.materialized_literals_count);
-
-    impl()->ReindexLiterals(formal_parameters);
-
     Expect(Token::ARROW, CHECK_OK);
 
     if (peek() == Token::LBRACE) {
@@ -3948,16 +4176,10 @@
         LazyParsingResult result = impl()->SkipFunction(
             kind, formal_parameters.scope, &dummy_num_parameters,
             &dummy_function_length, &dummy_has_duplicate_parameters,
-            &materialized_literal_count, &expected_property_count, false, true,
-            CHECK_OK);
+            &expected_property_count, false, true, CHECK_OK);
         formal_parameters.scope->ResetAfterPreparsing(
             ast_value_factory_, result == kLazyParsingAborted);
 
-        if (formal_parameters.materialized_literals_count > 0) {
-          materialized_literal_count +=
-              formal_parameters.materialized_literals_count;
-        }
-
         if (result == kLazyParsingAborted) {
           bookmark.Apply();
           // Trigger eager (re-)parsing, just below this block.
@@ -3972,11 +4194,11 @@
       }
       if (!is_lazy_top_level_function) {
         Consume(Token::LBRACE);
-        body = impl()->ParseEagerFunctionBody(
-            impl()->EmptyIdentifier(), kNoSourcePosition, formal_parameters,
-            kind, FunctionLiteral::kAnonymousExpression, CHECK_OK);
-        materialized_literal_count =
-            function_state.materialized_literal_count();
+        body = impl()->NewStatementList(8);
+        impl()->ParseFunctionBody(body, impl()->EmptyIdentifier(),
+                                  kNoSourcePosition, formal_parameters, kind,
+                                  FunctionLiteral::kAnonymousExpression,
+                                  CHECK_OK);
         expected_property_count = function_state.expected_property_count();
       }
     } else {
@@ -3999,15 +4221,13 @@
       } else {
         ExpressionT expression = ParseAssignmentExpression(accept_IN, CHECK_OK);
         impl()->RewriteNonPattern(CHECK_OK);
-        body->Add(
-            factory()->NewReturnStatement(expression, expression->position()),
-            zone());
+        body->Add(BuildReturnStatement(expression, expression->position()),
+                  zone());
         if (allow_tailcalls() && !is_sloppy(language_mode())) {
           // ES6 14.6.1 Static Semantics: IsInTailPosition
           impl()->MarkTailPosition(expression);
         }
       }
-      materialized_literal_count = function_state.materialized_literal_count();
       expected_property_count = function_state.expected_property_count();
       impl()->MarkCollectedTailCallExpressions();
     }
@@ -4029,6 +4249,14 @@
     }
     impl()->CheckConflictingVarDeclarations(formal_parameters.scope, CHECK_OK);
 
+    if (is_lazy_top_level_function) {
+      FunctionState* parent_state = function_state.outer();
+      DCHECK_NOT_NULL(parent_state);
+      DCHECK_GE(parent_state->destructuring_assignments_to_rewrite().length(),
+                rewritable_length);
+      parent_state->RewindDestructuringAssignments(rewritable_length);
+    }
+
     impl()->RewriteDestructuringAssignments();
   }
 
@@ -4040,11 +4268,12 @@
   }
   FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
       impl()->EmptyIdentifierString(), formal_parameters.scope, body,
-      materialized_literal_count, expected_property_count,
-      formal_parameters.num_parameters(), formal_parameters.function_length,
+      expected_property_count, formal_parameters.num_parameters(),
+      formal_parameters.function_length,
       FunctionLiteral::kNoDuplicateParameters,
       FunctionLiteral::kAnonymousExpression, eager_compile_hint,
-      formal_parameters.scope->start_position(), has_braces);
+      formal_parameters.scope->start_position(), has_braces,
+      function_literal_id);
 
   function_literal->set_function_token_position(
       formal_parameters.scope->start_position());
@@ -4075,21 +4304,18 @@
     return impl()->EmptyExpression();
   }
 
-  BlockState block_state(zone(), &scope_state_);
+  BlockState block_state(zone(), &scope_);
   RaiseLanguageMode(STRICT);
 
   ClassInfo class_info(this);
-  impl()->DeclareClassVariable(name, block_state.scope(), &class_info,
-                               class_token_pos, CHECK_OK);
+  impl()->DeclareClassVariable(name, &class_info, class_token_pos, CHECK_OK);
 
+  scope()->set_start_position(scanner()->location().end_pos);
   if (Check(Token::EXTENDS)) {
-    block_state.set_start_position(scanner()->location().end_pos);
     ExpressionClassifier extends_classifier(this);
     class_info.extends = ParseLeftHandSideExpression(CHECK_OK);
     impl()->RewriteNonPattern(CHECK_OK);
     impl()->AccumulateFormalParameterContainmentErrors();
-  } else {
-    block_state.set_start_position(scanner()->location().end_pos);
   }
 
   ClassLiteralChecker checker(this);
@@ -4102,14 +4328,26 @@
     FuncNameInferrer::State fni_state(fni_);
     bool is_computed_name = false;  // Classes do not care about computed
                                     // property names here.
+    bool is_static;
+    ClassLiteralProperty::Kind property_kind;
     ExpressionClassifier property_classifier(this);
+    // If we haven't seen the constructor yet, it potentially is the next
+    // property.
+    bool is_constructor = !class_info.has_seen_constructor;
     ClassLiteralPropertyT property = ParseClassPropertyDefinition(
         &checker, has_extends, &is_computed_name,
-        &class_info.has_seen_constructor, CHECK_OK);
+        &class_info.has_seen_constructor, &property_kind, &is_static,
+        &class_info.has_name_static_property, CHECK_OK);
+    if (!class_info.has_static_computed_names && is_static &&
+        is_computed_name) {
+      class_info.has_static_computed_names = true;
+    }
+    is_constructor &= class_info.has_seen_constructor;
     impl()->RewriteNonPattern(CHECK_OK);
     impl()->AccumulateFormalParameterContainmentErrors();
 
-    impl()->DeclareClassProperty(name, property, &class_info, CHECK_OK);
+    impl()->DeclareClassProperty(name, property, property_kind, is_static,
+                                 is_constructor, &class_info, CHECK_OK);
     impl()->InferFunctionName();
   }
 
@@ -4123,8 +4361,6 @@
                                               FunctionBodyType body_type,
                                               bool accept_IN, int pos,
                                               bool* ok) {
-  scope->ForceContextAllocation();
-
   impl()->PrepareAsyncFunctionBody(body, kind, pos);
 
   BlockT block = factory()->NewBlock(nullptr, 8, true, kNoSourcePosition);
@@ -4160,7 +4396,12 @@
   IdentifierT name = impl()->EmptyIdentifier();
   FunctionLiteral::FunctionType type = FunctionLiteral::kAnonymousExpression;
 
-  if (peek_any_identifier()) {
+  if (impl()->ParsingDynamicFunctionDeclaration()) {
+    // We don't want dynamic functions to actually declare their name
+    // "anonymous". We just want that name in the toString().
+    Consume(Token::IDENTIFIER);
+    DCHECK(scanner()->UnescapedLiteralMatches("anonymous", 9));
+  } else if (peek_any_identifier()) {
     type = FunctionLiteral::kNamedExpression;
     name = ParseIdentifierOrStrictReservedWord(FunctionKind::kAsyncFunction,
                                                &is_strict_reserved, CHECK_OK);
@@ -4174,7 +4415,7 @@
 
 template <typename Impl>
 typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseTemplateLiteral(
-    ExpressionT tag, int start, bool* ok) {
+    ExpressionT tag, int start, bool tagged, bool* ok) {
   // A TemplateLiteral is made up of 0 or more TEMPLATE_SPAN tokens (literal
   // text followed by a substitution expression), finalized by a single
   // TEMPLATE_TAIL.
@@ -4187,22 +4428,25 @@
   // TEMPLATE_SPAN, or a TEMPLATE_TAIL.
   CHECK(peek() == Token::TEMPLATE_SPAN || peek() == Token::TEMPLATE_TAIL);
 
+  bool forbid_illegal_escapes = !allow_harmony_template_escapes() || !tagged;
+
   // If we reach a TEMPLATE_TAIL first, we are parsing a NoSubstitutionTemplate.
   // In this case we may simply consume the token and build a template with a
   // single TEMPLATE_SPAN and no expressions.
   if (peek() == Token::TEMPLATE_TAIL) {
     Consume(Token::TEMPLATE_TAIL);
     int pos = position();
-    CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
     typename Impl::TemplateLiteralState ts = impl()->OpenTemplateLiteral(pos);
-    impl()->AddTemplateSpan(&ts, true);
+    bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes, CHECK_OK);
+    impl()->AddTemplateSpan(&ts, is_valid, true);
     return impl()->CloseTemplateLiteral(&ts, start, tag);
   }
 
   Consume(Token::TEMPLATE_SPAN);
   int pos = position();
   typename Impl::TemplateLiteralState ts = impl()->OpenTemplateLiteral(pos);
-  impl()->AddTemplateSpan(&ts, false);
+  bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes, CHECK_OK);
+  impl()->AddTemplateSpan(&ts, is_valid, false);
   Token::Value next;
 
   // If we open with a TEMPLATE_SPAN, we must scan the subsequent expression,
@@ -4210,7 +4454,6 @@
   // case, representing a TemplateMiddle).
 
   do {
-    CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
     next = peek();
     if (next == Token::EOS) {
       impl()->ReportMessageAt(Scanner::Location(start, peek_position()),
@@ -4256,11 +4499,11 @@
       return impl()->EmptyExpression();
     }
 
-    impl()->AddTemplateSpan(&ts, next == Token::TEMPLATE_TAIL);
+    bool is_valid = CheckTemplateEscapes(forbid_illegal_escapes, CHECK_OK);
+    impl()->AddTemplateSpan(&ts, is_valid, next == Token::TEMPLATE_TAIL);
   } while (next == Token::TEMPLATE_SPAN);
 
   DCHECK_EQ(next, Token::TEMPLATE_TAIL);
-  CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
   // Once we've reached a TEMPLATE_TAIL, we can close the TemplateLiteral.
   return impl()->CloseTemplateLiteral(&ts, start, tag);
 }
@@ -4291,7 +4534,12 @@
   }
   if (expression->IsCall()) {
     // If it is a call, make it a runtime error for legacy web compatibility.
+    // Bug: https://bugs.chromium.org/p/v8/issues/detail?id=4480
     // Rewrite `expr' to `expr[throw ReferenceError]'.
+    impl()->CountUsage(
+        is_strict(language_mode())
+            ? v8::Isolate::kAssigmentExpressionLHSIsCallInStrict
+            : v8::Isolate::kAssigmentExpressionLHSIsCallInSloppy);
     ExpressionT error = impl()->NewThrowReferenceError(message, beg_pos);
     return factory()->NewProperty(expression, error, beg_pos);
   }
@@ -4473,7 +4721,7 @@
       }
       break;
     case Token::ASYNC:
-      if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+      if (PeekAhead() == Token::FUNCTION &&
           !scanner()->HasAnyLineTerminatorAfterNext()) {
         Consume(Token::ASYNC);
         return ParseAsyncFunctionDeclaration(nullptr, false, ok);
@@ -4525,6 +4773,10 @@
     case Token::WHILE:
       return ParseWhileStatement(labels, ok);
     case Token::FOR:
+      if (V8_UNLIKELY(allow_harmony_async_iteration() && is_async_function() &&
+                      PeekAhead() == Token::AWAIT)) {
+        return ParseForAwaitStatement(labels, ok);
+      }
       return ParseForStatement(labels, ok);
     case Token::CONTINUE:
     case Token::BREAK:
@@ -4607,8 +4859,8 @@
   // Parse the statements and collect escaping labels.
   Expect(Token::LBRACE, CHECK_OK_CUSTOM(NullBlock));
   {
-    BlockState block_state(zone(), &scope_state_);
-    block_state.set_start_position(scanner()->location().beg_pos);
+    BlockState block_state(zone(), &scope_);
+    scope()->set_start_position(scanner()->location().beg_pos);
     typename Types::Target target(this, body);
 
     while (peek() != Token::RBRACE) {
@@ -4619,30 +4871,27 @@
     }
 
     Expect(Token::RBRACE, CHECK_OK_CUSTOM(NullBlock));
-    block_state.set_end_position(scanner()->location().end_pos);
-    body->set_scope(block_state.FinalizedBlockScope());
+    scope()->set_end_position(scanner()->location().end_pos);
+    body->set_scope(scope()->FinalizeBlockScope());
   }
   return body;
 }
 
 template <typename Impl>
 typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseScopedStatement(
-    ZoneList<const AstRawString*>* labels, bool legacy, bool* ok) {
-  if (is_strict(language_mode()) || peek() != Token::FUNCTION || legacy) {
-    return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  if (is_strict(language_mode()) || peek() != Token::FUNCTION) {
+    return ParseStatement(labels, ok);
   } else {
-    if (legacy) {
-      impl()->CountUsage(v8::Isolate::kLegacyFunctionDeclaration);
-    }
     // Make a block around the statement for a lexical binding
     // is introduced by a FunctionDeclaration.
-    BlockState block_state(zone(), &scope_state_);
-    block_state.set_start_position(scanner()->location().beg_pos);
+    BlockState block_state(zone(), &scope_);
+    scope()->set_start_position(scanner()->location().beg_pos);
     BlockT block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
     StatementT body = ParseFunctionDeclaration(CHECK_OK);
     block->statements()->Add(body, zone());
-    block_state.set_end_position(scanner()->location().end_pos);
-    block->set_scope(block_state.FinalizedBlockScope());
+    scope()->set_end_position(scanner()->location().end_pos);
+    block->set_scope(scope()->FinalizeBlockScope());
     return block;
   }
 }
@@ -4710,6 +4959,19 @@
       ReportUnexpectedToken(Next());
       *ok = false;
       return impl()->NullStatement();
+    case Token::LET: {
+      Token::Value next_next = PeekAhead();
+      // "let" followed by either "[", "{" or an identifier means a lexical
+      // declaration, which should not appear here.
+      if (next_next != Token::LBRACK && next_next != Token::LBRACE &&
+          next_next != Token::IDENTIFIER) {
+        break;
+      }
+      impl()->ReportMessageAt(scanner()->peek_location(),
+                              MessageTemplate::kUnexpectedLexicalDeclaration);
+      *ok = false;
+      return impl()->NullStatement();
+    }
     default:
       break;
   }
@@ -4724,14 +4986,11 @@
                                   CHECK_OK);
     Consume(Token::COLON);
     // ES#sec-labelled-function-declarations Labelled Function Declarations
-    if (peek() == Token::FUNCTION && is_sloppy(language_mode())) {
-      if (allow_function == kAllowLabelledFunctionStatement) {
-        return ParseFunctionDeclaration(ok);
-      } else {
-        return ParseScopedStatement(labels, true, ok);
-      }
+    if (peek() == Token::FUNCTION && is_sloppy(language_mode()) &&
+        allow_function == kAllowLabelledFunctionStatement) {
+      return ParseFunctionDeclaration(ok);
     }
-    return ParseStatement(labels, kDisallowLabelledFunctionStatement, ok);
+    return ParseStatement(labels, ok);
   }
 
   // If we have an extension, we allow a native function declaration.
@@ -4759,10 +5018,10 @@
   Expect(Token::LPAREN, CHECK_OK);
   ExpressionT condition = ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
-  StatementT then_statement = ParseScopedStatement(labels, false, CHECK_OK);
+  StatementT then_statement = ParseScopedStatement(labels, CHECK_OK);
   StatementT else_statement = impl()->NullStatement();
   if (Check(Token::ELSE)) {
-    else_statement = ParseScopedStatement(labels, false, CHECK_OK);
+    else_statement = ParseScopedStatement(labels, CHECK_OK);
   } else {
     else_statement = factory()->NewEmptyStatement(kNoSourcePosition);
   }
@@ -4866,13 +5125,13 @@
   ExpressionT return_value = impl()->EmptyExpression();
   if (scanner()->HasAnyLineTerminatorBeforeNext() || tok == Token::SEMICOLON ||
       tok == Token::RBRACE || tok == Token::EOS) {
-    if (IsSubclassConstructor(function_state_->kind())) {
+    if (IsDerivedConstructor(function_state_->kind())) {
       return_value = impl()->ThisExpression(loc.beg_pos);
     } else {
       return_value = impl()->GetLiteralUndefined(position());
     }
   } else {
-    if (IsSubclassConstructor(function_state_->kind())) {
+    if (IsDerivedConstructor(function_state_->kind())) {
       // Because of the return code rewriting that happens in case of a subclass
       // constructor we don't want to accept tail calls, therefore we don't set
       // ReturnExprScope to kInsideValidReturnStatement here.
@@ -4890,7 +5149,7 @@
   }
   ExpectSemicolon(CHECK_OK);
   return_value = impl()->RewriteReturn(return_value, loc.beg_pos);
-  return factory()->NewReturnStatement(return_value, loc.beg_pos);
+  return BuildReturnStatement(return_value, loc.beg_pos);
 }
 
 template <typename Impl>
@@ -4915,9 +5174,9 @@
   Scope* with_scope = NewScope(WITH_SCOPE);
   StatementT body = impl()->NullStatement();
   {
-    BlockState block_state(&scope_state_, with_scope);
+    BlockState block_state(&scope_, with_scope);
     with_scope->set_start_position(scanner()->peek_location().beg_pos);
-    body = ParseScopedStatement(labels, true, CHECK_OK);
+    body = ParseStatement(labels, CHECK_OK);
     with_scope->set_end_position(scanner()->location().end_pos);
   }
   return factory()->NewWithStatement(with_scope, expr, body, pos);
@@ -4933,7 +5192,7 @@
   typename Types::Target target(this, loop);
 
   Expect(Token::DO, CHECK_OK);
-  StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+  StatementT body = ParseStatement(nullptr, CHECK_OK);
   Expect(Token::WHILE, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
 
@@ -4963,7 +5222,7 @@
   Expect(Token::LPAREN, CHECK_OK);
   ExpressionT cond = ParseExpression(true, CHECK_OK);
   Expect(Token::RPAREN, CHECK_OK);
-  StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
+  StatementT body = ParseStatement(nullptr, CHECK_OK);
 
   loop->Initialize(cond, body);
   return loop;
@@ -5007,9 +5266,9 @@
   auto switch_statement = factory()->NewSwitchStatement(labels, switch_pos);
 
   {
-    BlockState cases_block_state(zone(), &scope_state_);
-    cases_block_state.set_start_position(scanner()->location().beg_pos);
-    cases_block_state.SetNonlinear();
+    BlockState cases_block_state(zone(), &scope_);
+    scope()->set_start_position(switch_pos);
+    scope()->SetNonlinear();
     typename Types::Target target(this, switch_statement);
 
     bool default_seen = false;
@@ -5042,9 +5301,9 @@
     }
     Expect(Token::RBRACE, CHECK_OK);
 
-    cases_block_state.set_end_position(scanner()->location().end_pos);
-    return impl()->RewriteSwitchStatement(
-        tag, switch_statement, cases, cases_block_state.FinalizedBlockScope());
+    scope()->set_end_position(scanner()->location().end_pos);
+    return impl()->RewriteSwitchStatement(tag, switch_statement, cases,
+                                          scope()->FinalizeBlockScope());
   }
 }
 
@@ -5073,7 +5332,6 @@
   }
 
   CatchInfo catch_info(this);
-  catch_info.for_promise_reject = allow_natives() && Check(Token::MOD);
 
   if (peek() != Token::CATCH && peek() != Token::FINALLY) {
     ReportMessage(MessageTemplate::kNoCatchOrFinally);
@@ -5091,16 +5349,15 @@
       CollectExpressionsInTailPositionToListScope
           collect_tail_call_expressions_scope(
               function_state_, &catch_info.tail_call_expressions);
-      BlockState catch_block_state(&scope_state_, catch_info.scope);
+      BlockState catch_block_state(&scope_, catch_info.scope);
 
       catch_block = factory()->NewBlock(nullptr, 16, false, kNoSourcePosition);
 
       // Create a block scope to hold any lexical declarations created
       // as part of destructuring the catch parameter.
       {
-        BlockState catch_variable_block_state(zone(), &scope_state_);
-        catch_variable_block_state.set_start_position(
-            scanner()->location().beg_pos);
+        BlockState catch_variable_block_state(zone(), &scope_);
+        scope()->set_start_position(scanner()->location().beg_pos);
         typename Types::Target target(this, catch_block);
 
         // This does not simply call ParsePrimaryExpression to avoid
@@ -5125,10 +5382,8 @@
         catch_info.inner_block = ParseBlock(nullptr, CHECK_OK);
         catch_block->statements()->Add(catch_info.inner_block, zone());
         impl()->ValidateCatchBlock(catch_info, CHECK_OK);
-        catch_variable_block_state.set_end_position(
-            scanner()->location().end_pos);
-        catch_block->set_scope(
-            catch_variable_block_state.FinalizedBlockScope());
+        scope()->set_end_position(scanner()->location().end_pos);
+        catch_block->set_scope(scope()->FinalizeBlockScope());
       }
     }
 
@@ -5153,180 +5408,198 @@
   bool bound_names_are_lexical = false;
 
   // Create an in-between scope for let-bound iteration variables.
-  BlockState for_state(zone(), &scope_state_);
+  BlockState for_state(zone(), &scope_);
   Expect(Token::FOR, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
-  for_state.set_start_position(scanner()->location().beg_pos);
-  for_state.set_is_hidden();
+  scope()->set_start_position(scanner()->location().beg_pos);
+  scope()->set_is_hidden();
 
   StatementT init = impl()->NullStatement();
-  if (peek() != Token::SEMICOLON) {
-    // An initializer is present.
-    if (peek() == Token::VAR || peek() == Token::CONST ||
-        (peek() == Token::LET && IsNextLetKeyword())) {
-      // The initializer contains declarations.
-      ParseVariableDeclarations(kForStatement, &for_info.parsing_result,
-                                nullptr, CHECK_OK);
-      bound_names_are_lexical =
-          IsLexicalVariableMode(for_info.parsing_result.descriptor.mode);
-      for_info.position = scanner()->location().beg_pos;
 
-      if (CheckInOrOf(&for_info.mode)) {
-        // Just one declaration followed by in/of.
-        if (for_info.parsing_result.declarations.length() != 1) {
-          impl()->ReportMessageAt(
-              for_info.parsing_result.bindings_loc,
-              MessageTemplate::kForInOfLoopMultiBindings,
-              ForEachStatement::VisitModeString(for_info.mode));
-          *ok = false;
-          return impl()->NullStatement();
-        }
-        if (for_info.parsing_result.first_initializer_loc.IsValid() &&
-            (is_strict(language_mode()) ||
-             for_info.mode == ForEachStatement::ITERATE ||
-             bound_names_are_lexical ||
-             !impl()->IsIdentifier(
-                 for_info.parsing_result.declarations[0].pattern))) {
-          impl()->ReportMessageAt(
-              for_info.parsing_result.first_initializer_loc,
-              MessageTemplate::kForInOfLoopInitializer,
-              ForEachStatement::VisitModeString(for_info.mode));
-          *ok = false;
-          return impl()->NullStatement();
-        }
+  if (peek() == Token::VAR || peek() == Token::CONST ||
+      (peek() == Token::LET && IsNextLetKeyword())) {
+    // The initializer contains declarations.
+    ParseVariableDeclarations(kForStatement, &for_info.parsing_result, nullptr,
+                              CHECK_OK);
+    bound_names_are_lexical =
+        IsLexicalVariableMode(for_info.parsing_result.descriptor.mode);
+    for_info.position = scanner()->location().beg_pos;
 
-        BlockT init_block = impl()->RewriteForVarInLegacy(for_info);
-
-        auto loop =
-            factory()->NewForEachStatement(for_info.mode, labels, stmt_pos);
-        typename Types::Target target(this, loop);
-
-        int each_keyword_pos = scanner()->location().beg_pos;
-
-        ExpressionT enumerable = impl()->EmptyExpression();
-        if (for_info.mode == ForEachStatement::ITERATE) {
-          ExpressionClassifier classifier(this);
-          enumerable = ParseAssignmentExpression(true, CHECK_OK);
-          impl()->RewriteNonPattern(CHECK_OK);
-        } else {
-          enumerable = ParseExpression(true, CHECK_OK);
-        }
-
-        Expect(Token::RPAREN, CHECK_OK);
-
-        StatementT final_loop = impl()->NullStatement();
-        {
-          ReturnExprScope no_tail_calls(function_state_,
-                                        ReturnExprContext::kInsideForInOfBody);
-          BlockState block_state(zone(), &scope_state_);
-          block_state.set_start_position(scanner()->location().beg_pos);
-
-          StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
-
-          BlockT body_block = impl()->NullBlock();
-          ExpressionT each_variable = impl()->EmptyExpression();
-          impl()->DesugarBindingInForEachStatement(&for_info, &body_block,
-                                                   &each_variable, CHECK_OK);
-          body_block->statements()->Add(body, zone());
-          final_loop = impl()->InitializeForEachStatement(
-              loop, each_variable, enumerable, body_block, each_keyword_pos);
-
-          block_state.set_end_position(scanner()->location().end_pos);
-          body_block->set_scope(block_state.FinalizedBlockScope());
-        }
-
-        init_block =
-            impl()->CreateForEachStatementTDZ(init_block, for_info, ok);
-
-        for_state.set_end_position(scanner()->location().end_pos);
-        Scope* for_scope = for_state.FinalizedBlockScope();
-        // Parsed for-in loop w/ variable declarations.
-        if (!impl()->IsNullStatement(init_block)) {
-          init_block->statements()->Add(final_loop, zone());
-          init_block->set_scope(for_scope);
-          return init_block;
-        } else {
-          DCHECK_NULL(for_scope);
-          return final_loop;
-        }
-      } else {
-        // One or more declaration not followed by in/of.
-        init = impl()->BuildInitializationBlock(
-            &for_info.parsing_result,
-            bound_names_are_lexical ? &for_info.bound_names : nullptr,
-            CHECK_OK);
-      }
-    } else {
-      // The initializer does not contain declarations.
-      int lhs_beg_pos = peek_position();
-      ExpressionClassifier classifier(this);
-      ExpressionT expression = ParseExpressionCoverGrammar(false, CHECK_OK);
-      int lhs_end_pos = scanner()->location().end_pos;
-
-      bool is_for_each = CheckInOrOf(&for_info.mode);
-      bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
-                                              expression->IsObjectLiteral());
-
-      if (is_destructuring) {
-        ValidateAssignmentPattern(CHECK_OK);
-      } else {
-        impl()->RewriteNonPattern(CHECK_OK);
-      }
-
-      if (is_for_each) {
-        // Initializer is reference followed by in/of.
-        if (!is_destructuring) {
-          expression = impl()->CheckAndRewriteReferenceExpression(
-              expression, lhs_beg_pos, lhs_end_pos,
-              MessageTemplate::kInvalidLhsInFor, kSyntaxError, CHECK_OK);
-        }
-
-        auto loop =
-            factory()->NewForEachStatement(for_info.mode, labels, stmt_pos);
-        typename Types::Target target(this, loop);
-
-        int each_keyword_pos = scanner()->location().beg_pos;
-
-        ExpressionT enumerable = impl()->EmptyExpression();
-        if (for_info.mode == ForEachStatement::ITERATE) {
-          ExpressionClassifier classifier(this);
-          enumerable = ParseAssignmentExpression(true, CHECK_OK);
-          impl()->RewriteNonPattern(CHECK_OK);
-        } else {
-          enumerable = ParseExpression(true, CHECK_OK);
-        }
-
-        Expect(Token::RPAREN, CHECK_OK);
-
-        {
-          ReturnExprScope no_tail_calls(function_state_,
-                                        ReturnExprContext::kInsideForInOfBody);
-          BlockState block_state(zone(), &scope_state_);
-          block_state.set_start_position(scanner()->location().beg_pos);
-
-          // For legacy compat reasons, give for loops similar treatment to
-          // if statements in allowing a function declaration for a body
-          StatementT body = ParseScopedStatement(nullptr, true, CHECK_OK);
-          block_state.set_end_position(scanner()->location().end_pos);
-          StatementT final_loop = impl()->InitializeForEachStatement(
-              loop, expression, enumerable, body, each_keyword_pos);
-
-          Scope* for_scope = for_state.FinalizedBlockScope();
-          DCHECK_NULL(for_scope);
-          USE(for_scope);
-          Scope* block_scope = block_state.FinalizedBlockScope();
-          DCHECK_NULL(block_scope);
-          USE(block_scope);
-          return final_loop;
-        }
-      } else {
-        // Initializer is just an expression.
-        init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
-      }
+    if (CheckInOrOf(&for_info.mode)) {
+      return ParseForEachStatementWithDeclarations(stmt_pos, &for_info, labels,
+                                                   ok);
     }
+
+    // One or more declaration not followed by in/of.
+    init = impl()->BuildInitializationBlock(
+        &for_info.parsing_result,
+        bound_names_are_lexical ? &for_info.bound_names : nullptr, CHECK_OK);
+  } else if (peek() != Token::SEMICOLON) {
+    // The initializer does not contain declarations.
+    int lhs_beg_pos = peek_position();
+    ExpressionClassifier classifier(this);
+    ExpressionT expression = ParseExpressionCoverGrammar(false, CHECK_OK);
+    int lhs_end_pos = scanner()->location().end_pos;
+
+    bool is_for_each = CheckInOrOf(&for_info.mode);
+    bool is_destructuring = is_for_each && (expression->IsArrayLiteral() ||
+                                            expression->IsObjectLiteral());
+
+    if (is_destructuring) {
+      ValidateAssignmentPattern(CHECK_OK);
+    } else {
+      impl()->RewriteNonPattern(CHECK_OK);
+    }
+
+    if (is_for_each) {
+      return ParseForEachStatementWithoutDeclarations(stmt_pos, expression,
+                                                      lhs_beg_pos, lhs_end_pos,
+                                                      &for_info, labels, ok);
+    }
+    // Initializer is just an expression.
+    init = factory()->NewExpressionStatement(expression, lhs_beg_pos);
   }
 
   // Standard 'for' loop, we have parsed the initializer at this point.
+  return ParseStandardForLoop(stmt_pos, init, bound_names_are_lexical,
+                              &for_info, &for_state, labels, ok);
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseForEachStatementWithDeclarations(
+    int stmt_pos, ForInfo* for_info, ZoneList<const AstRawString*>* labels,
+    bool* ok) {
+  // Just one declaration followed by in/of.
+  if (for_info->parsing_result.declarations.length() != 1) {
+    impl()->ReportMessageAt(for_info->parsing_result.bindings_loc,
+                            MessageTemplate::kForInOfLoopMultiBindings,
+                            ForEachStatement::VisitModeString(for_info->mode));
+    *ok = false;
+    return impl()->NullStatement();
+  }
+  if (for_info->parsing_result.first_initializer_loc.IsValid() &&
+      (is_strict(language_mode()) ||
+       for_info->mode == ForEachStatement::ITERATE ||
+       IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
+       !impl()->IsIdentifier(
+           for_info->parsing_result.declarations[0].pattern))) {
+    impl()->ReportMessageAt(for_info->parsing_result.first_initializer_loc,
+                            MessageTemplate::kForInOfLoopInitializer,
+                            ForEachStatement::VisitModeString(for_info->mode));
+    *ok = false;
+    return impl()->NullStatement();
+  }
+
+  BlockT init_block = impl()->RewriteForVarInLegacy(*for_info);
+
+  auto loop = factory()->NewForEachStatement(for_info->mode, labels, stmt_pos);
+  typename Types::Target target(this, loop);
+
+  int each_keyword_pos = scanner()->location().beg_pos;
+
+  ExpressionT enumerable = impl()->EmptyExpression();
+  if (for_info->mode == ForEachStatement::ITERATE) {
+    ExpressionClassifier classifier(this);
+    enumerable = ParseAssignmentExpression(true, CHECK_OK);
+    impl()->RewriteNonPattern(CHECK_OK);
+  } else {
+    enumerable = ParseExpression(true, CHECK_OK);
+  }
+
+  Expect(Token::RPAREN, CHECK_OK);
+
+  StatementT final_loop = impl()->NullStatement();
+  {
+    ReturnExprScope no_tail_calls(function_state_,
+                                  ReturnExprContext::kInsideForInOfBody);
+    BlockState block_state(zone(), &scope_);
+    scope()->set_start_position(scanner()->location().beg_pos);
+
+    StatementT body = ParseStatement(nullptr, CHECK_OK);
+
+    BlockT body_block = impl()->NullBlock();
+    ExpressionT each_variable = impl()->EmptyExpression();
+    impl()->DesugarBindingInForEachStatement(for_info, &body_block,
+                                             &each_variable, CHECK_OK);
+    body_block->statements()->Add(body, zone());
+    final_loop = impl()->InitializeForEachStatement(
+        loop, each_variable, enumerable, body_block, each_keyword_pos);
+
+    scope()->set_end_position(scanner()->location().end_pos);
+    body_block->set_scope(scope()->FinalizeBlockScope());
+  }
+
+  init_block = impl()->CreateForEachStatementTDZ(init_block, *for_info, ok);
+
+  scope()->set_end_position(scanner()->location().end_pos);
+  Scope* for_scope = scope()->FinalizeBlockScope();
+  // Parsed for-in loop w/ variable declarations.
+  if (!impl()->IsNullStatement(init_block)) {
+    init_block->statements()->Add(final_loop, zone());
+    init_block->set_scope(for_scope);
+    return init_block;
+  }
+
+  DCHECK_NULL(for_scope);
+  return final_loop;
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT
+ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
+    int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
+    ForInfo* for_info, ZoneList<const AstRawString*>* labels, bool* ok) {
+  // Initializer is reference followed by in/of.
+  if (!expression->IsArrayLiteral() && !expression->IsObjectLiteral()) {
+    expression = impl()->CheckAndRewriteReferenceExpression(
+        expression, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
+        kSyntaxError, CHECK_OK);
+  }
+
+  auto loop = factory()->NewForEachStatement(for_info->mode, labels, stmt_pos);
+  typename Types::Target target(this, loop);
+
+  int each_keyword_pos = scanner()->location().beg_pos;
+
+  ExpressionT enumerable = impl()->EmptyExpression();
+  if (for_info->mode == ForEachStatement::ITERATE) {
+    ExpressionClassifier classifier(this);
+    enumerable = ParseAssignmentExpression(true, CHECK_OK);
+    impl()->RewriteNonPattern(CHECK_OK);
+  } else {
+    enumerable = ParseExpression(true, CHECK_OK);
+  }
+
+  Expect(Token::RPAREN, CHECK_OK);
+  Scope* for_scope = scope();
+
+  {
+    ReturnExprScope no_tail_calls(function_state_,
+                                  ReturnExprContext::kInsideForInOfBody);
+    BlockState block_state(zone(), &scope_);
+    scope()->set_start_position(scanner()->location().beg_pos);
+
+    StatementT body = ParseStatement(nullptr, CHECK_OK);
+    scope()->set_end_position(scanner()->location().end_pos);
+    StatementT final_loop = impl()->InitializeForEachStatement(
+        loop, expression, enumerable, body, each_keyword_pos);
+
+    for_scope = for_scope->FinalizeBlockScope();
+    USE(for_scope);
+    DCHECK_NULL(for_scope);
+    Scope* block_scope = scope()->FinalizeBlockScope();
+    USE(block_scope);
+    DCHECK_NULL(block_scope);
+    return final_loop;
+  }
+}
+
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStandardForLoop(
+    int stmt_pos, StatementT init, bool bound_names_are_lexical,
+    ForInfo* for_info, BlockState* for_state,
+    ZoneList<const AstRawString*>* labels, bool* ok) {
   auto loop = factory()->NewForStatement(labels, stmt_pos);
   typename Types::Target target(this, loop);
 
@@ -5339,13 +5612,12 @@
   // If there are let bindings, then condition and the next statement of the
   // for loop must be parsed in a new scope.
   Scope* inner_scope = scope();
-  // TODO(verwaest): Allocate this through a ScopeState as well.
-  if (bound_names_are_lexical && for_info.bound_names.length() > 0) {
+  if (bound_names_are_lexical && for_info->bound_names.length() > 0) {
     inner_scope = NewScopeWithParent(inner_scope, BLOCK_SCOPE);
     inner_scope->set_start_position(scanner()->location().beg_pos);
   }
   {
-    BlockState block_state(&scope_state_, inner_scope);
+    BlockState block_state(&scope_, inner_scope);
 
     if (peek() != Token::SEMICOLON) {
       cond = ParseExpression(true, CHECK_OK);
@@ -5358,53 +5630,201 @@
     }
     Expect(Token::RPAREN, CHECK_OK);
 
-    body = ParseScopedStatement(nullptr, true, CHECK_OK);
+    body = ParseStatement(nullptr, CHECK_OK);
   }
 
-  if (bound_names_are_lexical && for_info.bound_names.length() > 0) {
+  if (bound_names_are_lexical && for_info->bound_names.length() > 0) {
     auto result = impl()->DesugarLexicalBindingsInForStatement(
-        loop, init, cond, next, body, inner_scope, for_info, CHECK_OK);
-    for_state.set_end_position(scanner()->location().end_pos);
+        loop, init, cond, next, body, inner_scope, *for_info, CHECK_OK);
+    scope()->set_end_position(scanner()->location().end_pos);
+    inner_scope->set_end_position(scanner()->location().end_pos);
     return result;
-  } else {
-    for_state.set_end_position(scanner()->location().end_pos);
-    Scope* for_scope = for_state.FinalizedBlockScope();
-    if (for_scope != nullptr) {
-      // Rewrite a for statement of the form
-      //   for (const x = i; c; n) b
-      //
-      // into
-      //
-      //   {
-      //     const x = i;
-      //     for (; c; n) b
-      //   }
-      //
-      // or, desugar
-      //   for (; c; n) b
-      // into
-      //   {
-      //     for (; c; n) b
-      //   }
-      // just in case b introduces a lexical binding some other way, e.g., if b
-      // is a FunctionDeclaration.
-      BlockT block = factory()->NewBlock(nullptr, 2, false, kNoSourcePosition);
-      if (!impl()->IsNullStatement(init)) {
-        block->statements()->Add(init, zone());
-      }
-      block->statements()->Add(loop, zone());
-      block->set_scope(for_scope);
-      loop->Initialize(init, cond, next, body);
-      return block;
-    } else {
-      loop->Initialize(init, cond, next, body);
-      return loop;
+  }
+
+  scope()->set_end_position(scanner()->location().end_pos);
+  Scope* for_scope = scope()->FinalizeBlockScope();
+  if (for_scope != nullptr) {
+    // Rewrite a for statement of the form
+    //   for (const x = i; c; n) b
+    //
+    // into
+    //
+    //   {
+    //     const x = i;
+    //     for (; c; n) b
+    //   }
+    //
+    // or, desugar
+    //   for (; c; n) b
+    // into
+    //   {
+    //     for (; c; n) b
+    //   }
+    // just in case b introduces a lexical binding some other way, e.g., if b
+    // is a FunctionDeclaration.
+    BlockT block = factory()->NewBlock(nullptr, 2, false, kNoSourcePosition);
+    if (!impl()->IsNullStatement(init)) {
+      block->statements()->Add(init, zone());
     }
+    block->statements()->Add(loop, zone());
+    block->set_scope(for_scope);
+    loop->Initialize(init, cond, next, body);
+    return block;
+  }
+
+  loop->Initialize(init, cond, next, body);
+  return loop;
+}
+
+template <typename Impl>
+void ParserBase<Impl>::MarkLoopVariableAsAssigned(Scope* scope, Variable* var) {
+  if (!IsLexicalVariableMode(var->mode()) && !scope->is_function_scope()) {
+    var->set_maybe_assigned();
   }
 }
 
-#undef CHECK_OK
-#undef CHECK_OK_CUSTOM
+template <typename Impl>
+typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForAwaitStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
+  // for await '(' ForDeclaration of AssignmentExpression ')'
+  DCHECK(is_async_function());
+  DCHECK(allow_harmony_async_iteration());
+
+  int stmt_pos = peek_position();
+
+  ForInfo for_info(this);
+  for_info.mode = ForEachStatement::ITERATE;
+
+  // Create an in-between scope for let-bound iteration variables.
+  BlockState for_state(zone(), &scope_);
+  Expect(Token::FOR, CHECK_OK);
+  Expect(Token::AWAIT, CHECK_OK);
+  Expect(Token::LPAREN, CHECK_OK);
+  scope()->set_start_position(scanner()->location().beg_pos);
+  scope()->set_is_hidden();
+
+  auto loop = factory()->NewForOfStatement(labels, stmt_pos);
+  typename Types::Target target(this, loop);
+
+  ExpressionT each_variable = impl()->EmptyExpression();
+
+  bool has_declarations = false;
+
+  if (peek() == Token::VAR || peek() == Token::CONST ||
+      (peek() == Token::LET && IsNextLetKeyword())) {
+    // The initializer contains declarations
+    // 'for' 'await' '(' ForDeclaration 'of' AssignmentExpression ')'
+    //     Statement
+    // 'for' 'await' '(' 'var' ForBinding 'of' AssignmentExpression ')'
+    //     Statement
+    has_declarations = true;
+    ParseVariableDeclarations(kForStatement, &for_info.parsing_result, nullptr,
+                              CHECK_OK);
+    for_info.position = scanner()->location().beg_pos;
+
+    // Only a single declaration is allowed in for-await-of loops
+    if (for_info.parsing_result.declarations.length() != 1) {
+      impl()->ReportMessageAt(for_info.parsing_result.bindings_loc,
+                              MessageTemplate::kForInOfLoopMultiBindings,
+                              "for-await-of");
+      *ok = false;
+      return impl()->NullStatement();
+    }
+
+    // for-await-of's declarations do not permit initializers.
+    if (for_info.parsing_result.first_initializer_loc.IsValid()) {
+      impl()->ReportMessageAt(for_info.parsing_result.first_initializer_loc,
+                              MessageTemplate::kForInOfLoopInitializer,
+                              "for-await-of");
+      *ok = false;
+      return impl()->NullStatement();
+    }
+  } else {
+    // The initializer does not contain declarations.
+    // 'for' 'await' '(' LeftHandSideExpression 'of' AssignmentExpression ')'
+    //     Statement
+    int lhs_beg_pos = peek_position();
+    ExpressionClassifier classifier(this);
+    ExpressionT lhs = each_variable = ParseLeftHandSideExpression(CHECK_OK);
+    int lhs_end_pos = scanner()->location().end_pos;
+
+    if (lhs->IsArrayLiteral() || lhs->IsObjectLiteral()) {
+      ValidateAssignmentPattern(CHECK_OK);
+    } else {
+      impl()->RewriteNonPattern(CHECK_OK);
+      each_variable = impl()->CheckAndRewriteReferenceExpression(
+          lhs, lhs_beg_pos, lhs_end_pos, MessageTemplate::kInvalidLhsInFor,
+          kSyntaxError, CHECK_OK);
+    }
+  }
+
+  ExpectContextualKeyword(CStrVector("of"), CHECK_OK);
+  int each_keyword_pos = scanner()->location().beg_pos;
+
+  const bool kAllowIn = true;
+  ExpressionT iterable = impl()->EmptyExpression();
+
+  {
+    ExpressionClassifier classifier(this);
+    iterable = ParseAssignmentExpression(kAllowIn, CHECK_OK);
+    impl()->RewriteNonPattern(CHECK_OK);
+  }
+
+  Expect(Token::RPAREN, CHECK_OK);
+
+  StatementT final_loop = impl()->NullStatement();
+  Scope* for_scope = scope();
+  {
+    ReturnExprScope no_tail_calls(function_state_,
+                                  ReturnExprContext::kInsideForInOfBody);
+    BlockState block_state(zone(), &scope_);
+    scope()->set_start_position(scanner()->location().beg_pos);
+
+    StatementT body = ParseStatement(nullptr, CHECK_OK);
+    scope()->set_end_position(scanner()->location().end_pos);
+
+    if (has_declarations) {
+      BlockT body_block = impl()->NullBlock();
+      impl()->DesugarBindingInForEachStatement(&for_info, &body_block,
+                                               &each_variable, CHECK_OK);
+      body_block->statements()->Add(body, zone());
+      body_block->set_scope(scope()->FinalizeBlockScope());
+
+      const bool finalize = true;
+      final_loop = impl()->InitializeForOfStatement(
+          loop, each_variable, iterable, body_block, finalize,
+          IteratorType::kAsync, each_keyword_pos);
+    } else {
+      const bool finalize = true;
+      final_loop = impl()->InitializeForOfStatement(
+          loop, each_variable, iterable, body, finalize, IteratorType::kAsync,
+          each_keyword_pos);
+
+      for_scope = for_scope->FinalizeBlockScope();
+      DCHECK_NULL(for_scope);
+      USE(for_scope);
+      Scope* block_scope = scope()->FinalizeBlockScope();
+      DCHECK_NULL(block_scope);
+      USE(block_scope);
+      return final_loop;
+    }
+  }
+
+  DCHECK(has_declarations);
+  BlockT init_block =
+      impl()->CreateForEachStatementTDZ(impl()->NullBlock(), for_info, ok);
+
+  for_scope->set_end_position(scanner()->location().end_pos);
+  for_scope = for_scope->FinalizeBlockScope();
+  // Parsed for-in loop w/ variable declarations.
+  if (!impl()->IsNullStatement(init_block)) {
+    init_block->statements()->Add(final_loop, zone());
+    init_block->set_scope(for_scope);
+    return init_block;
+  }
+  DCHECK_NULL(for_scope);
+  return final_loop;
+}
 
 template <typename Impl>
 void ParserBase<Impl>::ObjectLiteralChecker::CheckDuplicateProto(
@@ -5456,6 +5876,9 @@
   }
 }
 
+#undef CHECK_OK
+#undef CHECK_OK_CUSTOM
+#undef CHECK_OK_VOID
 
 }  // namespace internal
 }  // namespace v8
diff --git a/src/parsing/parser.cc b/src/parsing/parser.cc
index 8d88901..cc6b6a2 100644
--- a/src/parsing/parser.cc
+++ b/src/parsing/parser.cc
@@ -8,13 +8,14 @@
 
 #include "src/api.h"
 #include "src/ast/ast-expression-rewriter.h"
-#include "src/ast/ast-literal-reindexer.h"
+#include "src/ast/ast-function-literal-id-reindexer.h"
 #include "src/ast/ast-traversal-visitor.h"
 #include "src/ast/ast.h"
 #include "src/bailout-reason.h"
 #include "src/base/platform/platform.h"
 #include "src/char-predicates-inl.h"
 #include "src/messages.h"
+#include "src/objects-inl.h"
 #include "src/parsing/duplicate-finder.h"
 #include "src/parsing/parameter-initializer-rewriter.h"
 #include "src/parsing/parse-info.h"
@@ -112,8 +113,13 @@
         fni_(parser->ast_value_factory_, temp_zone),
         parser_(parser),
         prev_fni_(parser->fni_),
-        prev_zone_(parser->zone_) {
+        prev_zone_(parser->zone_),
+        prev_allow_lazy_(parser->allow_lazy_),
+        prev_temp_zoned_(parser->temp_zoned_) {
     if (use_temp_zone) {
+      DCHECK(!parser_->temp_zoned_);
+      parser_->allow_lazy_ = false;
+      parser_->temp_zoned_ = true;
       parser_->fni_ = &fni_;
       parser_->zone_ = temp_zone;
       if (parser_->reusable_preparser_ != nullptr) {
@@ -125,6 +131,8 @@
   void Reset() {
     parser_->fni_ = prev_fni_;
     parser_->zone_ = prev_zone_;
+    parser_->allow_lazy_ = prev_allow_lazy_;
+    parser_->temp_zoned_ = prev_temp_zoned_;
     if (parser_->reusable_preparser_ != nullptr) {
       parser_->reusable_preparser_->zone_ = prev_zone_;
       parser_->reusable_preparser_->factory()->set_zone(prev_zone_);
@@ -139,6 +147,8 @@
   Parser* parser_;
   FuncNameInferrer* prev_fni_;
   Zone* prev_zone_;
+  bool prev_allow_lazy_;
+  bool prev_temp_zoned_;
 
   DISALLOW_COPY_AND_ASSIGN(DiscardableZoneScope);
 };
@@ -146,96 +156,36 @@
 void Parser::SetCachedData(ParseInfo* info) {
   DCHECK_NULL(cached_parse_data_);
   if (consume_cached_parse_data()) {
-    cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
-    if (cached_parse_data_ == nullptr) {
-      compile_options_ = ScriptCompiler::kNoCompileOptions;
+    if (allow_lazy_) {
+      cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
+      if (cached_parse_data_ != nullptr) return;
     }
+    compile_options_ = ScriptCompiler::kNoCompileOptions;
   }
 }
 
-Expression* Parser::CallClassFieldInitializer(Scope* scope,
-                                              Expression* this_expr) {
-  // This produces the expression
-  // `.class_field_intializer(this_expr)`, where '.class_field_intializer' is
-  // the name
-  // of a synthetic variable.
-  // 'this_expr' will be 'this' in a base constructor and the result of calling
-  // 'super' in a derived one.
-  const AstRawString* init_fn_name =
-      ast_value_factory()->dot_class_field_init_string();
-  VariableProxy* init_fn_proxy = scope->NewUnresolved(factory(), init_fn_name);
-  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
-  args->Add(init_fn_proxy, zone());
-  args->Add(this_expr, zone());
-  return factory()->NewCallRuntime(Runtime::kInlineCall, args,
-                                   kNoSourcePosition);
-}
-
-Expression* Parser::RewriteSuperCall(Expression* super_call) {
-  // TODO(bakkot) find a way to avoid this for classes without fields.
-  if (!allow_harmony_class_fields()) {
-    return super_call;
-  }
-  // This turns a super call `super()` into a do expression of the form
-  // do {
-  //   tmp x = super();
-  //   if (.class-field-init)
-  //     .class-field-init(x)
-  //   x; // This isn't actually present; our do-expression representation
-  // allows specifying that the expression returns x directly.
-  // }
-  Variable* var_tmp =
-      scope()->NewTemporary(ast_value_factory()->empty_string());
-  Block* block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
-  Assignment* assignment = factory()->NewAssignment(
-      Token::ASSIGN, factory()->NewVariableProxy(var_tmp), super_call,
-      kNoSourcePosition);
-  block->statements()->Add(
-      factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
-  const AstRawString* init_fn_name =
-      ast_value_factory()->dot_class_field_init_string();
-  VariableProxy* init_fn_proxy =
-      scope()->NewUnresolved(factory(), init_fn_name);
-  Expression* condition = init_fn_proxy;
-  Statement* initialize = factory()->NewExpressionStatement(
-      CallClassFieldInitializer(scope(), factory()->NewVariableProxy(var_tmp)),
-      kNoSourcePosition);
-  IfStatement* if_statement = factory()->NewIfStatement(
-      condition, initialize, factory()->NewEmptyStatement(kNoSourcePosition),
-      kNoSourcePosition);
-  block->statements()->Add(if_statement, zone());
-  return factory()->NewDoExpression(block, var_tmp, kNoSourcePosition);
-}
-
 FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
-                                            bool call_super,
-                                            bool requires_class_field_init,
-                                            int pos, int end_pos,
-                                            LanguageMode language_mode) {
-  int materialized_literal_count = -1;
+                                            bool call_super, int pos,
+                                            int end_pos) {
   int expected_property_count = -1;
   const int parameter_count = 0;
   if (name == nullptr) name = ast_value_factory()->empty_string();
 
-  FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
+  FunctionKind kind = call_super ? FunctionKind::kDefaultDerivedConstructor
                                  : FunctionKind::kDefaultBaseConstructor;
   DeclarationScope* function_scope = NewFunctionScope(kind);
-  SetLanguageMode(function_scope,
-                  static_cast<LanguageMode>(language_mode | STRICT));
+  SetLanguageMode(function_scope, STRICT);
   // Set start and end position to the same value
   function_scope->set_start_position(pos);
   function_scope->set_end_position(pos);
   ZoneList<Statement*>* body = NULL;
 
   {
-    FunctionState function_state(&function_state_, &scope_state_,
-                                 function_scope);
+    FunctionState function_state(&function_state_, &scope_, function_scope);
 
     body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
     if (call_super) {
-      // $super_constructor = %_GetSuperConstructor(<this-function>)
-      // %reflect_construct(
-      //     $super_constructor, InternalArray(...args), new.target)
+      // Create a SuperCallReference and handle in BytecodeGenerator.
       auto constructor_args_name = ast_value_factory()->empty_string();
       bool is_duplicate;
       bool is_rest = true;
@@ -245,44 +195,24 @@
           ast_value_factory());
 
       ZoneList<Expression*>* args =
-          new (zone()) ZoneList<Expression*>(2, zone());
-      VariableProxy* this_function_proxy =
-          NewUnresolved(ast_value_factory()->this_function_string(), pos);
-      ZoneList<Expression*>* tmp =
           new (zone()) ZoneList<Expression*>(1, zone());
-      tmp->Add(this_function_proxy, zone());
-      Expression* super_constructor = factory()->NewCallRuntime(
-          Runtime::kInlineGetSuperConstructor, tmp, pos);
-      args->Add(super_constructor, zone());
       Spread* spread_args = factory()->NewSpread(
           factory()->NewVariableProxy(constructor_args), pos, pos);
-      ZoneList<Expression*>* spread_args_expr =
-          new (zone()) ZoneList<Expression*>(1, zone());
-      spread_args_expr->Add(spread_args, zone());
-      args->AddAll(*PrepareSpreadArguments(spread_args_expr), zone());
-      VariableProxy* new_target_proxy =
-          NewUnresolved(ast_value_factory()->new_target_string(), pos);
-      args->Add(new_target_proxy, zone());
-      Expression* call = factory()->NewCallRuntime(
-          Context::REFLECT_CONSTRUCT_INDEX, args, pos);
-      if (requires_class_field_init) {
-        call = CallClassFieldInitializer(scope(), call);
-      }
+
+      args->Add(spread_args, zone());
+      Expression* super_call_ref = NewSuperCallReference(pos);
+      Expression* call = factory()->NewCall(super_call_ref, args, pos);
       body->Add(factory()->NewReturnStatement(call, pos), zone());
     }
 
-    materialized_literal_count = function_state.materialized_literal_count();
     expected_property_count = function_state.expected_property_count();
   }
 
   FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
-      name, function_scope, body, materialized_literal_count,
-      expected_property_count, parameter_count, parameter_count,
-      FunctionLiteral::kNoDuplicateParameters,
+      name, function_scope, body, expected_property_count, parameter_count,
+      parameter_count, FunctionLiteral::kNoDuplicateParameters,
       FunctionLiteral::kAnonymousExpression, default_eager_compile_hint(), pos,
-      true);
-
-  function_literal->set_requires_class_field_init(requires_class_field_init);
+      true, GetNextFunctionLiteralId());
 
   return function_literal;
 }
@@ -454,8 +384,8 @@
   // this_function[home_object_symbol]
   VariableProxy* this_function_proxy =
       NewUnresolved(ast_value_factory()->this_function_string(), pos);
-  Expression* home_object_symbol_literal =
-      factory()->NewSymbolLiteral("home_object_symbol", kNoSourcePosition);
+  Expression* home_object_symbol_literal = factory()->NewSymbolLiteral(
+      AstSymbol::kHomeObjectSymbol, kNoSourcePosition);
   Expression* home_object = factory()->NewProperty(
       this_function_proxy, home_object_symbol_literal, pos);
   return factory()->NewSuperPropertyReference(
@@ -511,15 +441,6 @@
   return NULL;
 }
 
-Expression* Parser::GetIterator(Expression* iterable, int pos) {
-  Expression* iterator_symbol_literal =
-      factory()->NewSymbolLiteral("iterator_symbol", kNoSourcePosition);
-  Expression* prop =
-      factory()->NewProperty(iterable, iterator_symbol_literal, pos);
-  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(0, zone());
-  return factory()->NewCall(prop, args, pos);
-}
-
 void Parser::MarkTailPosition(Expression* expression) {
   expression->MarkTail();
 }
@@ -582,7 +503,8 @@
 Parser::Parser(ParseInfo* info)
     : ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
                          info->extension(), info->ast_value_factory(),
-                         info->isolate()->counters()->runtime_call_stats()),
+                         info->isolate()->counters()->runtime_call_stats(),
+                         true),
       scanner_(info->unicode_cache()),
       reusable_preparser_(nullptr),
       original_scope_(nullptr),
@@ -591,8 +513,10 @@
       compile_options_(info->compile_options()),
       cached_parse_data_(nullptr),
       total_preparse_skipped_(0),
-      parsing_on_main_thread_(true),
-      log_(nullptr) {
+      temp_zoned_(false),
+      log_(nullptr),
+      preparsed_scope_data_(info->preparsed_scope_data()),
+      parameters_end_pos_(info->parameters_end_pos()) {
   // Even though we were passed ParseInfo, we should not store it in
   // Parser - this makes sure that Isolate is not accidentally accessed via
   // ParseInfo during background parsing.
@@ -615,25 +539,28 @@
   set_default_eager_compile_hint(can_compile_lazily
                                      ? FunctionLiteral::kShouldLazyCompile
                                      : FunctionLiteral::kShouldEagerCompile);
-  set_allow_lazy(FLAG_lazy && info->allow_lazy_parsing() &&
-                 !info->is_native() && info->extension() == nullptr &&
-                 can_compile_lazily);
+  allow_lazy_ = FLAG_lazy && info->allow_lazy_parsing() && !info->is_native() &&
+                info->extension() == nullptr && can_compile_lazily;
   set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
   set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
                       info->isolate()->is_tail_call_elimination_enabled());
   set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
   set_allow_harmony_function_sent(FLAG_harmony_function_sent);
-  set_allow_harmony_async_await(FLAG_harmony_async_await);
   set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
   set_allow_harmony_trailing_commas(FLAG_harmony_trailing_commas);
   set_allow_harmony_class_fields(FLAG_harmony_class_fields);
+  set_allow_harmony_object_rest_spread(FLAG_harmony_object_rest_spread);
+  set_allow_harmony_dynamic_import(FLAG_harmony_dynamic_import);
+  set_allow_harmony_async_iteration(FLAG_harmony_async_iteration);
+  set_allow_harmony_template_escapes(FLAG_harmony_template_escapes);
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
     use_counts_[feature] = 0;
   }
   if (info->ast_value_factory() == NULL) {
     // info takes ownership of AstValueFactory.
-    info->set_ast_value_factory(new AstValueFactory(zone(), info->hash_seed()));
+    info->set_ast_value_factory(new AstValueFactory(
+        zone(), info->isolate()->ast_string_constants(), info->hash_seed()));
     info->set_ast_value_factory_owned();
     ast_value_factory_ = info->ast_value_factory();
     ast_node_factory_.set_ast_value_factory(ast_value_factory_);
@@ -665,7 +592,6 @@
   // It's OK to use the Isolate & counters here, since this function is only
   // called in the main thread.
   DCHECK(parsing_on_main_thread_);
-
   RuntimeCallTimerScope runtime_timer(
       runtime_call_stats_, info->is_eval() ? &RuntimeCallStats::ParseEval
                                            : &RuntimeCallStats::ParseProgram);
@@ -682,7 +608,11 @@
   ParserLogger logger;
 
   if (produce_cached_parse_data()) {
-    log_ = &logger;
+    if (allow_lazy_) {
+      log_ = &logger;
+    } else {
+      compile_options_ = ScriptCompiler::kNoCompileOptions;
+    }
   } else if (consume_cached_parse_data()) {
     cached_parse_data_->Initialize();
   }
@@ -727,10 +657,13 @@
   // Note that this function can be called from the main thread or from a
   // background thread. We should not access anything Isolate / heap dependent
   // via ParseInfo, and also not pass it forward.
-  DCHECK_NULL(scope_state_);
+  DCHECK_NULL(scope_);
   DCHECK_NULL(target_stack_);
 
-  ParsingModeScope mode(this, allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY);
+  ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
+  ResetFunctionLiteralId();
+  DCHECK(info->function_literal_id() == FunctionLiteral::kIdTypeTopLevel ||
+         info->function_literal_id() == FunctionLiteral::kIdTypeInvalid);
 
   FunctionLiteral* result = NULL;
   {
@@ -748,7 +681,7 @@
 
     scope->set_start_position(0);
 
-    FunctionState function_state(&function_state_, &scope_state_, scope);
+    FunctionState function_state(&function_state_, &scope_, scope);
 
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
     bool ok = true;
@@ -764,7 +697,7 @@
       DCHECK(!is_duplicate);
       var->AllocateTo(VariableLocation::PARAMETER, 0);
 
-      PrepareGeneratorVariables(&function_state);
+      PrepareGeneratorVariables();
       Expression* initial_yield =
           BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
       body->Add(
@@ -788,8 +721,6 @@
 
     if (ok && is_strict(language_mode())) {
       CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
-      CheckDecimalLiteralWithLeadingZero(beg_pos,
-                                         scanner()->location().end_pos);
     }
     if (ok && is_sloppy(language_mode())) {
       // TODO(littledan): Function bindings on the global object that modify
@@ -816,11 +747,13 @@
       RewriteDestructuringAssignments();
       int parameter_count = parsing_module_ ? 1 : 0;
       result = factory()->NewScriptOrEvalFunctionLiteral(
-          scope, body, function_state.materialized_literal_count(),
-          function_state.expected_property_count(), parameter_count);
+          scope, body, function_state.expected_property_count(),
+          parameter_count);
     }
   }
 
+  info->set_max_function_literal_id(GetLastFunctionLiteralId());
+
   // Make sure the target stack is empty.
   DCHECK(target_stack_ == NULL);
 
@@ -842,6 +775,12 @@
   }
   Handle<SharedFunctionInfo> shared_info = info->shared_info();
   DeserializeScopeChain(info, info->maybe_outer_scope_info());
+  if (info->asm_function_scope()) {
+    original_scope_ = info->asm_function_scope();
+    factory()->set_zone(info->zone());
+  } else {
+    DCHECK_EQ(factory()->zone(), info->zone());
+  }
 
   // Initialize parser state.
   source = String::Flatten(source);
@@ -884,13 +823,17 @@
                                          const AstRawString* raw_name,
                                          Utf16CharacterStream* source) {
   scanner_.Initialize(source);
-  DCHECK_NULL(scope_state_);
+  DCHECK_NULL(scope_);
   DCHECK_NULL(target_stack_);
 
   DCHECK(ast_value_factory());
   fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
   fni_->PushEnclosingName(raw_name);
 
+  ResetFunctionLiteralId();
+  DCHECK_LT(0, info->function_literal_id());
+  SkipFunctionLiterals(info->function_literal_id() - 1);
+
   ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
 
   // Place holder for the result.
@@ -901,9 +844,8 @@
     Scope* outer = original_scope_;
     DeclarationScope* outer_function = outer->GetClosureScope();
     DCHECK(outer);
-    FunctionState function_state(&function_state_, &scope_state_,
-                                 outer_function);
-    BlockState block_state(&scope_state_, outer);
+    FunctionState function_state(&function_state_, &scope_, outer_function);
+    BlockState block_state(&scope_, outer);
     DCHECK(is_sloppy(outer->language_mode()) ||
            is_strict(info->language_mode()));
     FunctionLiteral::FunctionType function_type = ComputeFunctionType(info);
@@ -911,7 +853,7 @@
     bool ok = true;
 
     if (IsArrowFunction(kind)) {
-      if (allow_harmony_async_await() && IsAsyncFunction(kind)) {
+      if (IsAsyncFunction(kind)) {
         DCHECK(!scanner()->HasAnyLineTerminatorAfterNext());
         if (!Check(Token::ASYNC)) {
           CHECK(stack_overflow());
@@ -938,12 +880,14 @@
       scope->set_start_position(info->start_position());
       ExpressionClassifier formals_classifier(this);
       ParserFormalParameters formals(scope);
+      int rewritable_length =
+          function_state.destructuring_assignments_to_rewrite().length();
       Checkpoint checkpoint(this);
       {
         // Parsing patterns as variable reference expression creates
         // NewUnresolved references in current scope. Entrer arrow function
         // scope for formal parameter parsing.
-        BlockState block_state(&scope_state_, scope);
+        BlockState block_state(&scope_, scope);
         if (Check(Token::LPAREN)) {
           // '(' StrictFormalParameters ')'
           ParseFormalParameterList(&formals, &ok);
@@ -951,15 +895,31 @@
         } else {
           // BindingIdentifier
           ParseFormalParameter(&formals, &ok);
-          if (ok) DeclareFormalParameter(formals.scope, formals.at(0));
+          if (ok) DeclareFormalParameters(formals.scope, formals.params);
         }
       }
 
       if (ok) {
         checkpoint.Restore(&formals.materialized_literals_count);
+        if (GetLastFunctionLiteralId() != info->function_literal_id() - 1) {
+          // If there were FunctionLiterals in the parameters, we need to
+          // renumber them to shift down so the next function literal id for
+          // the arrow function is the one requested.
+          AstFunctionLiteralIdReindexer reindexer(
+              stack_limit_,
+              (info->function_literal_id() - 1) - GetLastFunctionLiteralId());
+          for (auto p : formals.params) {
+            if (p->pattern != nullptr) reindexer.Reindex(p->pattern);
+            if (p->initializer != nullptr) reindexer.Reindex(p->initializer);
+          }
+          ResetFunctionLiteralId();
+          SkipFunctionLiterals(info->function_literal_id() - 1);
+        }
+
         // Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
         // not be observable, or else the preparser would have failed.
-        Expression* expression = ParseArrowFunctionLiteral(true, formals, &ok);
+        Expression* expression =
+            ParseArrowFunctionLiteral(true, formals, rewritable_length, &ok);
         if (ok) {
           // Scanning must end at the same position that was recorded
           // previously. If not, parsing has been interrupted due to a stack
@@ -972,6 +932,10 @@
             // must produce a FunctionLiteral.
             DCHECK(expression->IsFunctionLiteral());
             result = expression->AsFunctionLiteral();
+            // Rewrite destructuring assignments in the parameters. (The ones
+            // inside the function body are rewritten by
+            // ParseArrowFunctionLiteral.)
+            RewriteDestructuringAssignments();
           } else {
             ok = false;
           }
@@ -979,29 +943,12 @@
       }
     } else if (IsDefaultConstructor(kind)) {
       DCHECK_EQ(scope(), outer);
-      bool is_subclass_constructor = IsSubclassConstructor(kind);
-      result = DefaultConstructor(
-          raw_name, is_subclass_constructor, info->requires_class_field_init(),
-          info->start_position(), info->end_position(), info->language_mode());
-      if (!is_subclass_constructor && info->requires_class_field_init()) {
-        result = InsertClassFieldInitializer(result);
-      }
-    } else if (info->is_class_field_initializer()) {
-      Handle<SharedFunctionInfo> shared_info = info->shared_info();
-      DCHECK(!shared_info.is_null());
-      if (shared_info->length() == 0) {
-        result = ParseClassFieldForInitializer(
-            info->start_position() != info->end_position(), &ok);
-      } else {
-        result = SynthesizeClassFieldInitializer(shared_info->length());
-      }
+      result = DefaultConstructor(raw_name, IsDerivedConstructor(kind),
+                                  info->start_position(), info->end_position());
     } else {
       result = ParseFunctionLiteral(
           raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
           kNoSourcePosition, function_type, info->language_mode(), &ok);
-      if (info->requires_class_field_init()) {
-        result = InsertClassFieldInitializer(result);
-      }
     }
     // Make sure the results agree.
     DCHECK(ok == (result != nullptr));
@@ -1009,6 +956,8 @@
 
   // Make sure the target stack is empty.
   DCHECK_NULL(target_stack_);
+  DCHECK_IMPLIES(result,
+                 info->function_literal_id() == result->function_literal_id());
   return result;
 }
 
@@ -1019,15 +968,21 @@
   //    ExportDeclaration
   //    StatementListItem
 
-  switch (peek()) {
-    case Token::IMPORT:
-      ParseImportDeclaration(CHECK_OK);
-      return factory()->NewEmptyStatement(kNoSourcePosition);
-    case Token::EXPORT:
-      return ParseExportDeclaration(ok);
-    default:
-      return ParseStatementListItem(ok);
+  Token::Value next = peek();
+
+  if (next == Token::EXPORT) {
+    return ParseExportDeclaration(ok);
   }
+
+  // We must be careful not to parse a dynamic import expression as an import
+  // declaration.
+  if (next == Token::IMPORT &&
+      (!allow_harmony_dynamic_import() || PeekAhead() != Token::LPAREN)) {
+    ParseImportDeclaration(CHECK_OK);
+    return factory()->NewEmptyStatement(kNoSourcePosition);
+  }
+
+  return ParseStatementListItem(ok);
 }
 
 
@@ -1290,7 +1245,7 @@
       break;
 
     case Token::ASYNC:
-      if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+      if (PeekAhead() == Token::FUNCTION &&
           !scanner()->HasAnyLineTerminatorAfterNext()) {
         Consume(Token::ASYNC);
         result = ParseAsyncFunctionDeclaration(&local_names, true, CHECK_OK);
@@ -1423,14 +1378,11 @@
       break;
 
     case Token::ASYNC:
-      if (allow_harmony_async_await()) {
-        // TODO(neis): Why don't we have the same check here as in
-        // ParseStatementListItem?
-        Consume(Token::ASYNC);
-        result = ParseAsyncFunctionDeclaration(&names, false, CHECK_OK);
-        break;
-      }
-    /* falls through */
+      // TODO(neis): Why don't we have the same check here as in
+      // ParseStatementListItem?
+      Consume(Token::ASYNC);
+      result = ParseAsyncFunctionDeclaration(&names, false, CHECK_OK);
+      break;
 
     default:
       *ok = false;
@@ -1533,34 +1485,24 @@
 }
 
 Statement* Parser::DeclareFunction(const AstRawString* variable_name,
-                                   FunctionLiteral* function, int pos,
-                                   bool is_generator, bool is_async,
+                                   FunctionLiteral* function, VariableMode mode,
+                                   int pos, bool is_sloppy_block_function,
                                    ZoneList<const AstRawString*>* names,
                                    bool* ok) {
-  // In ES6, a function behaves as a lexical binding, except in
-  // a script scope, or the initial scope of eval or another function.
-  VariableMode mode =
-      (!scope()->is_declaration_scope() || scope()->is_module_scope()) ? LET
-                                                                       : VAR;
   VariableProxy* proxy =
       factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE);
+
   Declaration* declaration =
       factory()->NewFunctionDeclaration(proxy, function, scope(), pos);
   Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
           CHECK_OK);
   if (names) names->Add(variable_name, zone());
-  // Async functions don't undergo sloppy mode block scoped hoisting, and don't
-  // allow duplicates in a block. Both are represented by the
-  // sloppy_block_function_map. Don't add them to the map for async functions.
-  // Generators are also supposed to be prohibited; currently doing this behind
-  // a flag and UseCounting violations to assess web compatibility.
-  if (is_sloppy(language_mode()) && !scope()->is_declaration_scope() &&
-      !is_async && !(allow_harmony_restrictive_generators() && is_generator)) {
-    SloppyBlockFunctionStatement* delegate =
-        factory()->NewSloppyBlockFunctionStatement(scope());
-    DeclarationScope* target_scope = GetDeclarationScope();
-    target_scope->DeclareSloppyBlockFunction(variable_name, delegate);
-    return delegate;
+  if (is_sloppy_block_function) {
+    SloppyBlockFunctionStatement* statement =
+        factory()->NewSloppyBlockFunctionStatement();
+    GetDeclarationScope()->DeclareSloppyBlockFunction(variable_name, scope(),
+                                                      statement);
+    return statement;
   }
   return factory()->NewEmptyStatement(kNoSourcePosition);
 }
@@ -1601,6 +1543,7 @@
 
 ZoneList<const AstRawString*>* Parser::DeclareLabel(
     ZoneList<const AstRawString*>* labels, VariableProxy* var, bool* ok) {
+  DCHECK(IsIdentifier(var));
   const AstRawString* label = var->raw_name();
   // TODO(1240780): We don't check for redeclaration of labels
   // during preparsing since keeping track of the set of active
@@ -1635,7 +1578,7 @@
 }
 
 Expression* Parser::RewriteReturn(Expression* return_value, int pos) {
-  if (IsSubclassConstructor(function_state_->kind())) {
+  if (IsDerivedConstructor(function_state_->kind())) {
     // For subclass constructors we need to return this in case of undefined
     // return a Smi (transformed into an exception in the ConstructStub)
     // for a non object.
@@ -1675,8 +1618,6 @@
   }
   if (is_generator()) {
     return_value = BuildIteratorResult(return_value, true);
-  } else if (is_async_function()) {
-    return_value = BuildResolvePromise(return_value, return_value->position());
   }
   return return_value;
 }
@@ -1729,6 +1670,10 @@
   Block* cases_block = factory()->NewBlock(NULL, 1, false, kNoSourcePosition);
   cases_block->statements()->Add(switch_statement, zone());
   cases_block->set_scope(scope);
+  DCHECK_IMPLIES(scope != nullptr,
+                 switch_statement->position() >= scope->start_position());
+  DCHECK_IMPLIES(scope != nullptr,
+                 switch_statement->position() < scope->end_position());
   switch_block->statements()->Add(cases_block, zone());
   return switch_block;
 }
@@ -1738,13 +1683,11 @@
     DCHECK_NOT_NULL(catch_info->pattern);
     catch_info->name = ast_value_factory()->dot_catch_string();
   }
-  catch_info->variable = catch_info->scope->DeclareLocal(
-      catch_info->name, VAR, kCreatedInitialized, NORMAL_VARIABLE);
+  catch_info->variable = catch_info->scope->DeclareLocal(catch_info->name, VAR);
   if (catch_info->pattern != nullptr) {
     DeclarationDescriptor descriptor;
     descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
     descriptor.scope = scope();
-    descriptor.hoist_scope = nullptr;
     descriptor.mode = LET;
     descriptor.declaration_pos = catch_info->pattern->position();
     descriptor.initialization_pos = catch_info->pattern->position();
@@ -1798,15 +1741,9 @@
     DCHECK_NOT_NULL(catch_info.scope);
     DCHECK_NOT_NULL(catch_info.variable);
     TryCatchStatement* statement;
-    if (catch_info.for_promise_reject) {
-      statement = factory()->NewTryCatchStatementForPromiseReject(
-          try_block, catch_info.scope, catch_info.variable, catch_block,
-          kNoSourcePosition);
-    } else {
-      statement = factory()->NewTryCatchStatement(
-          try_block, catch_info.scope, catch_info.variable, catch_block,
-          kNoSourcePosition);
-    }
+    statement = factory()->NewTryCatchStatement(try_block, catch_info.scope,
+                                                catch_info.variable,
+                                                catch_block, kNoSourcePosition);
 
     try_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
     try_block->statements()->Add(statement, zone());
@@ -1830,10 +1767,80 @@
   }
 }
 
-// !%_IsJSReceiver(result = iterator.next()) &&
-//     %ThrowIteratorResultNotAnObject(result)
+void Parser::ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
+                                                  ZoneList<Statement*>* body,
+                                                  bool* ok) {
+  // We produce:
+  //
+  // try { InitialYield; ...body...; return {value: undefined, done: true} }
+  // finally { %_GeneratorClose(generator) }
+  //
+  // - InitialYield yields the actual generator object.
+  // - Any return statement inside the body will have its argument wrapped
+  //   in a "done" iterator result object.
+  // - If the generator terminates for whatever reason, we must close it.
+  //   Hence the finally clause.
+
+  Block* try_block = factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
+  Expression* initial_yield = BuildInitialYield(pos, kind);
+  try_block->statements()->Add(
+      factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
+      zone());
+  ParseStatementList(try_block->statements(), Token::RBRACE, ok);
+  if (!*ok) return;
+
+  Statement* final_return = factory()->NewReturnStatement(
+      BuildIteratorResult(nullptr, true), kNoSourcePosition);
+  try_block->statements()->Add(final_return, zone());
+
+  Block* finally_block =
+      factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
+  VariableProxy* call_proxy =
+      factory()->NewVariableProxy(function_state_->generator_object_variable());
+  args->Add(call_proxy, zone());
+  Expression* call = factory()->NewCallRuntime(Runtime::kInlineGeneratorClose,
+                                               args, kNoSourcePosition);
+  finally_block->statements()->Add(
+      factory()->NewExpressionStatement(call, kNoSourcePosition), zone());
+
+  body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
+                                              kNoSourcePosition),
+            zone());
+}
+
+void Parser::CreateFunctionNameAssignment(
+    const AstRawString* function_name, int pos,
+    FunctionLiteral::FunctionType function_type,
+    DeclarationScope* function_scope, ZoneList<Statement*>* result, int index) {
+  if (function_type == FunctionLiteral::kNamedExpression) {
+    StatementT statement = factory()->NewEmptyStatement(kNoSourcePosition);
+    if (function_scope->LookupLocal(function_name) == nullptr) {
+      // Now that we know the language mode, we can create the const assignment
+      // in the previously reserved spot.
+      DCHECK_EQ(function_scope, scope());
+      Variable* fvar = function_scope->DeclareFunctionVar(function_name);
+      VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
+      statement = factory()->NewExpressionStatement(
+          factory()->NewAssignment(Token::INIT, fproxy,
+                                   factory()->NewThisFunction(pos),
+                                   kNoSourcePosition),
+          kNoSourcePosition);
+    }
+    result->Set(index, statement);
+  }
+}
+
+// [if (IteratorType == kNormal)]
+//     !%_IsJSReceiver(result = iterator.next()) &&
+//         %ThrowIteratorResultNotAnObject(result)
+// [else if (IteratorType == kAsync)]
+//     !%_IsJSReceiver(result = Await(iterator.next())) &&
+//         %ThrowIteratorResultNotAnObject(result)
+// [endif]
 Expression* Parser::BuildIteratorNextResult(Expression* iterator,
-                                            Variable* result, int pos) {
+                                            Variable* result, IteratorType type,
+                                            int pos) {
   Expression* next_literal = factory()->NewStringLiteral(
       ast_value_factory()->next_string(), kNoSourcePosition);
   Expression* next_property =
@@ -1842,6 +1849,9 @@
       new (zone()) ZoneList<Expression*>(0, zone());
   Expression* next_call =
       factory()->NewCall(next_property, next_arguments, pos);
+  if (type == IteratorType::kAsync) {
+    next_call = RewriteAwaitExpression(next_call, pos);
+  }
   Expression* result_proxy = factory()->NewVariableProxy(result);
   Expression* left =
       factory()->NewAssignment(Token::ASSIGN, result_proxy, next_call, pos);
@@ -1876,7 +1886,7 @@
   if (for_of != NULL) {
     const bool finalize = true;
     return InitializeForOfStatement(for_of, each, subject, body, finalize,
-                                    each_keyword_pos);
+                                    IteratorType::kNormal, each_keyword_pos);
   } else {
     if (each->IsArrayLiteral() || each->IsObjectLiteral()) {
       Variable* temp = NewTemporary(ast_value_factory()->empty_string());
@@ -1893,6 +1903,7 @@
       body = block;
       each = factory()->NewVariableProxy(temp);
     }
+    MarkExpressionAsAssigned(each);
     stmt->AsForInStatement()->Initialize(each, subject, body);
   }
   return stmt;
@@ -1954,6 +1965,7 @@
                                               Block** body_block,
                                               Expression** each_variable,
                                               bool* ok) {
+  DCHECK(for_info->parsing_result.declarations.length() == 1);
   DeclarationParsingResult::Declaration& decl =
       for_info->parsing_result.declarations[0];
   Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
@@ -1968,14 +1980,13 @@
     bool is_for_var_of =
         for_info->mode == ForEachStatement::ITERATE &&
         for_info->parsing_result.descriptor.mode == VariableMode::VAR;
+    bool collect_names =
+        IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
+        is_for_var_of;
 
     PatternRewriter::DeclareAndInitializeVariables(
         this, each_initialization_block, &descriptor, &decl,
-        (IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
-         is_for_var_of)
-            ? &for_info->bound_names
-            : nullptr,
-        CHECK_OK_VOID);
+        collect_names ? &for_info->bound_names : nullptr, CHECK_OK_VOID);
 
     // Annex B.3.5 prohibits the form
     // `try {} catch(e) { for (var e of {}); }`
@@ -2028,39 +2039,42 @@
   return init_block;
 }
 
-Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
-                                            Expression* each,
-                                            Expression* iterable,
-                                            Statement* body, bool finalize,
-                                            int next_result_pos) {
+Statement* Parser::InitializeForOfStatement(
+    ForOfStatement* for_of, Expression* each, Expression* iterable,
+    Statement* body, bool finalize, IteratorType type, int next_result_pos) {
   // Create the auxiliary expressions needed for iterating over the iterable,
   // and initialize the given ForOfStatement with them.
   // If finalize is true, also instrument the loop with code that performs the
   // proper ES6 iterator finalization.  In that case, the result is not
   // immediately a ForOfStatement.
-
   const int nopos = kNoSourcePosition;
   auto avfactory = ast_value_factory();
 
-  Variable* iterator = NewTemporary(ast_value_factory()->dot_iterator_string());
-  Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
+  Variable* iterator = NewTemporary(avfactory->dot_iterator_string());
+  Variable* result = NewTemporary(avfactory->dot_result_string());
   Variable* completion = NewTemporary(avfactory->empty_string());
 
-  // iterator = iterable[Symbol.iterator]()
+  // iterator = GetIterator(iterable, type)
   Expression* assign_iterator;
   {
     assign_iterator = factory()->NewAssignment(
         Token::ASSIGN, factory()->NewVariableProxy(iterator),
-        GetIterator(iterable, iterable->position()), iterable->position());
+        factory()->NewGetIterator(iterable, type, iterable->position()),
+        iterable->position());
   }
 
-  // !%_IsJSReceiver(result = iterator.next()) &&
-  //     %ThrowIteratorResultNotAnObject(result)
+  // [if (IteratorType == kNormal)]
+  //     !%_IsJSReceiver(result = iterator.next()) &&
+  //         %ThrowIteratorResultNotAnObject(result)
+  // [else if (IteratorType == kAsync)]
+  //     !%_IsJSReceiver(result = Await(iterator.next())) &&
+  //         %ThrowIteratorResultNotAnObject(result)
+  // [endif]
   Expression* next_result;
   {
     Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
     next_result =
-        BuildIteratorNextResult(iterator_proxy, result, next_result_pos);
+        BuildIteratorNextResult(iterator_proxy, result, type, next_result_pos);
   }
 
   // result.done
@@ -2148,7 +2162,8 @@
 
   for_of->Initialize(body, iterator, assign_iterator, next_result, result_done,
                      assign_each);
-  return finalize ? FinalizeForOfStatement(for_of, completion, nopos) : for_of;
+  return finalize ? FinalizeForOfStatement(for_of, completion, type, nopos)
+                  : for_of;
 }
 
 Statement* Parser::DesugarLexicalBindingsInForStatement(
@@ -2247,7 +2262,7 @@
 
   Block* inner_block = factory()->NewBlock(NULL, 3, false, kNoSourcePosition);
   {
-    BlockState block_state(&scope_state_, inner_scope);
+    BlockState block_state(&scope_, inner_scope);
 
     Block* ignore_completion_block = factory()->NewBlock(
         nullptr, for_info.bound_names.length() + 3, true, kNoSourcePosition);
@@ -2383,7 +2398,6 @@
       inner_block->statements()->Add(ignore_completion_block, zone());
     }
 
-    inner_scope->set_end_position(scanner()->location().end_pos);
     inner_block->set_scope(inner_scope);
   }
 
@@ -2441,7 +2455,8 @@
     expr = assignment->target();
   }
 
-  AddFormalParameter(parameters, expr, initializer, end_pos, is_rest);
+  AddFormalParameter(parameters, expr, initializer,
+                     end_pos, is_rest);
 }
 
 void Parser::DeclareArrowFunctionFormalParameters(
@@ -2463,45 +2478,27 @@
   if (!parameters->is_simple) {
     this->classifier()->RecordNonSimpleParameter();
   }
-  for (int i = 0; i < parameters->arity; ++i) {
-    auto parameter = parameters->at(i);
-    DeclareFormalParameter(parameters->scope, parameter);
-    if (!this->classifier()
-             ->is_valid_formal_parameter_list_without_duplicates() &&
-        !duplicate_loc->IsValid()) {
-      *duplicate_loc =
-          this->classifier()->duplicate_formal_parameter_error().location;
-    }
+  DeclareFormalParameters(parameters->scope, parameters->params);
+  if (!this->classifier()
+           ->is_valid_formal_parameter_list_without_duplicates()) {
+    *duplicate_loc =
+        this->classifier()->duplicate_formal_parameter_error().location;
   }
   DCHECK_EQ(parameters->is_simple, parameters->scope->has_simple_parameters());
 }
 
-void Parser::ReindexLiterals(const ParserFormalParameters& parameters) {
-  if (function_state_->materialized_literal_count() > 0) {
-    AstLiteralReindexer reindexer;
-
-    for (const auto p : parameters.params) {
-      if (p.pattern != nullptr) reindexer.Reindex(p.pattern);
-      if (p.initializer != nullptr) reindexer.Reindex(p.initializer);
-    }
-
-    DCHECK(reindexer.count() <= function_state_->materialized_literal_count());
-  }
-}
-
-void Parser::PrepareGeneratorVariables(FunctionState* function_state) {
-  // For generators, allocating variables in contexts is currently a win
-  // because it minimizes the work needed to suspend and resume an
-  // activation.  The machine code produced for generators (by full-codegen)
-  // relies on this forced context allocation, but not in an essential way.
-  scope()->ForceContextAllocation();
+void Parser::PrepareGeneratorVariables() {
+  // For generators, allocating variables in contexts is currently a win because
+  // it minimizes the work needed to suspend and resume an activation.  The
+  // code produced for generators relies on this forced context allocation (it
+  // does not restore the frame's parameters upon resume).
+  function_state_->scope()->ForceContextAllocation();
 
   // Calling a generator returns a generator object.  That object is stored
   // in a temporary variable, a definition that is used by "yield"
   // expressions.
-  Variable* temp =
-      NewTemporary(ast_value_factory()->dot_generator_object_string());
-  function_state->set_generator_object_variable(temp);
+  function_state_->scope()->DeclareGeneratorObjectVar(
+      ast_value_factory()->dot_generator_object_string());
 }
 
 FunctionLiteral* Parser::ParseFunctionLiteral(
@@ -2532,7 +2529,7 @@
   }
 
   FunctionLiteral::EagerCompileHint eager_compile_hint =
-      function_state_->next_function_is_parenthesized()
+      function_state_->next_function_is_likely_called()
           ? FunctionLiteral::kShouldEagerCompile
           : default_eager_compile_hint();
 
@@ -2569,7 +2566,7 @@
   // immediately). bar can be parsed lazily, but we need to parse it in a mode
   // that tracks unresolved variables.
   DCHECK_IMPLIES(parse_lazily(), FLAG_lazy);
-  DCHECK_IMPLIES(parse_lazily(), allow_lazy());
+  DCHECK_IMPLIES(parse_lazily(), allow_lazy_);
   DCHECK_IMPLIES(parse_lazily(), extension_ == nullptr);
 
   bool can_preparse = parse_lazily() &&
@@ -2578,8 +2575,11 @@
   bool is_lazy_top_level_function =
       can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
 
-  RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
-                                      &RuntimeCallStats::ParseFunctionLiteral);
+  RuntimeCallTimerScope runtime_timer(
+      runtime_call_stats_,
+      parsing_on_main_thread_
+          ? &RuntimeCallStats::ParseFunctionLiteral
+          : &RuntimeCallStats::ParseBackgroundFunctionLiteral);
 
   // Determine whether we can still lazy parse the inner function.
   // The preconditions are:
@@ -2594,39 +2594,36 @@
   //   FunctionExpression; even without enclosing parentheses it might be
   //   immediately invoked.
   // - The function literal shouldn't be hinted to eagerly compile.
-  // - For asm.js functions the body needs to be available when module
-  //   validation is active, because we examine the entire module at once.
 
   // Inner functions will be parsed using a temporary Zone. After parsing, we
   // will migrate unresolved variable into a Scope in the main Zone.
   // TODO(marja): Refactor parsing modes: simplify this.
   bool use_temp_zone =
-      (FLAG_lazy_inner_functions
+      (FLAG_aggressive_lazy_inner_functions
            ? can_preparse
            : (is_lazy_top_level_function ||
-              (allow_lazy() && function_type == FunctionLiteral::kDeclaration &&
-               eager_compile_hint == FunctionLiteral::kShouldLazyCompile))) &&
-      !(FLAG_validate_asm && scope()->IsAsmModule());
+              (parse_lazily() &&
+               function_type == FunctionLiteral::kDeclaration &&
+               eager_compile_hint == FunctionLiteral::kShouldLazyCompile)));
+
+  DCHECK_IMPLIES(
+      (is_lazy_top_level_function ||
+       (parse_lazily() && function_type == FunctionLiteral::kDeclaration &&
+        eager_compile_hint == FunctionLiteral::kShouldLazyCompile)),
+      can_preparse);
   bool is_lazy_inner_function =
       use_temp_zone && FLAG_lazy_inner_functions && !is_lazy_top_level_function;
 
-  // This Scope lives in the main zone. We'll migrate data into that zone later.
-  DeclarationScope* scope = NewFunctionScope(kind);
-  SetLanguageMode(scope, language_mode);
-#ifdef DEBUG
-  scope->SetScopeName(function_name);
-#endif
-
   ZoneList<Statement*>* body = nullptr;
-  int materialized_literal_count = -1;
   int expected_property_count = -1;
   bool should_be_used_once_hint = false;
   int num_parameters = -1;
   int function_length = -1;
   bool has_duplicate_parameters = false;
+  int function_literal_id = GetNextFunctionLiteralId();
 
-  Expect(Token::LPAREN, CHECK_OK);
-  scope->set_start_position(scanner()->location().beg_pos);
+  Zone* outer_zone = zone();
+  DeclarationScope* scope;
 
   {
     // Temporary zones can nest. When we migrate free variables (see below), we
@@ -2641,10 +2638,19 @@
     // information when the function is parsed.
     Zone temp_zone(zone()->allocator(), ZONE_NAME);
     DiscardableZoneScope zone_scope(this, &temp_zone, use_temp_zone);
+
+    // This Scope lives in the main zone. We'll migrate data into that zone
+    // later.
+    scope = NewFunctionScope(kind, outer_zone);
+    SetLanguageMode(scope, language_mode);
 #ifdef DEBUG
+    scope->SetScopeName(function_name);
     if (use_temp_zone) scope->set_needs_migration();
 #endif
 
+    Expect(Token::LPAREN, CHECK_OK);
+    scope->set_start_position(scanner()->location().beg_pos);
+
     // Eager or lazy parse? If is_lazy_top_level_function, we'll parse
     // lazily. We'll call SkipFunction, which may decide to
     // abort lazy parsing if it suspects that wasn't a good idea. If so (in
@@ -2653,11 +2659,10 @@
     if (is_lazy_top_level_function || is_lazy_inner_function) {
       Scanner::BookmarkScope bookmark(scanner());
       bookmark.Set();
-      LazyParsingResult result =
-          SkipFunction(kind, scope, &num_parameters, &function_length,
-                       &has_duplicate_parameters, &materialized_literal_count,
-                       &expected_property_count, is_lazy_inner_function,
-                       is_lazy_top_level_function, CHECK_OK);
+      LazyParsingResult result = SkipFunction(
+          kind, scope, &num_parameters, &function_length,
+          &has_duplicate_parameters, &expected_property_count,
+          is_lazy_inner_function, is_lazy_top_level_function, CHECK_OK);
 
       if (result == kLazyParsingAborted) {
         DCHECK(is_lazy_top_level_function);
@@ -2677,10 +2682,10 @@
     }
 
     if (!is_lazy_top_level_function && !is_lazy_inner_function) {
-      body = ParseFunction(
-          function_name, pos, kind, function_type, scope, &num_parameters,
-          &function_length, &has_duplicate_parameters,
-          &materialized_literal_count, &expected_property_count, CHECK_OK);
+      body = ParseFunction(function_name, pos, kind, function_type, scope,
+                           &num_parameters, &function_length,
+                           &has_duplicate_parameters, &expected_property_count,
+                           CHECK_OK);
     }
 
     DCHECK(use_temp_zone || !is_lazy_top_level_function);
@@ -2688,22 +2693,32 @@
       // If the preconditions are correct the function body should never be
       // accessed, but do this anyway for better behaviour if they're wrong.
       body = nullptr;
-      scope->AnalyzePartially(&previous_zone_ast_node_factory);
+      scope->AnalyzePartially(&previous_zone_ast_node_factory,
+                              preparsed_scope_data_);
     }
 
+    DCHECK_IMPLIES(use_temp_zone, temp_zoned_);
     if (FLAG_trace_preparse) {
       PrintF("  [%s]: %i-%i %.*s\n",
              is_lazy_top_level_function
                  ? "Preparse no-resolution"
-                 : (use_temp_zone ? "Preparse resolution" : "Full parse"),
+                 : (temp_zoned_ ? "Preparse resolution" : "Full parse"),
              scope->start_position(), scope->end_position(),
              function_name->byte_length(), function_name->raw_data());
+    }
+    if (V8_UNLIKELY(FLAG_runtime_stats)) {
       if (is_lazy_top_level_function) {
-        CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats_,
-                                       PreParseNoVariableResolution);
-      } else if (use_temp_zone) {
-        CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats_,
-                                       PreParseWithVariableResolution);
+        RuntimeCallStats::CorrectCurrentCounterId(
+            runtime_call_stats_,
+            parsing_on_main_thread_
+                ? &RuntimeCallStats::PreParseNoVariableResolution
+                : &RuntimeCallStats::PreParseBackgroundNoVariableResolution);
+      } else if (temp_zoned_) {
+        RuntimeCallStats::CorrectCurrentCounterId(
+            runtime_call_stats_,
+            parsing_on_main_thread_
+                ? &RuntimeCallStats::PreParseWithVariableResolution
+                : &RuntimeCallStats::PreParseBackgroundWithVariableResolution);
       }
     }
 
@@ -2716,8 +2731,6 @@
     if (is_strict(language_mode)) {
       CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
                               CHECK_OK);
-      CheckDecimalLiteralWithLeadingZero(scope->start_position(),
-                                         scope->end_position());
     }
     CheckConflictingVarDeclarations(scope, CHECK_OK);
   }  // DiscardableZoneScope goes out of scope.
@@ -2728,9 +2741,9 @@
 
   // Note that the FunctionLiteral needs to be created in the main Zone again.
   FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
-      function_name, scope, body, materialized_literal_count,
-      expected_property_count, num_parameters, function_length,
-      duplicate_parameters, function_type, eager_compile_hint, pos, true);
+      function_name, scope, body, expected_property_count, num_parameters,
+      function_length, duplicate_parameters, function_type, eager_compile_hint,
+      pos, true, function_literal_id);
   function_literal->set_function_token_position(function_token_pos);
   if (should_be_used_once_hint)
     function_literal->set_should_be_used_once_hint();
@@ -2745,9 +2758,10 @@
 Parser::LazyParsingResult Parser::SkipFunction(
     FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
     int* function_length, bool* has_duplicate_parameters,
-    int* materialized_literal_count, int* expected_property_count,
-    bool is_inner_function, bool may_abort, bool* ok) {
+    int* expected_property_count, bool is_inner_function, bool may_abort,
+    bool* ok) {
   DCHECK_NE(kNoSourcePosition, function_scope->start_position());
+  DCHECK_EQ(kNoSourcePosition, parameters_end_pos_);
   if (produce_cached_parse_data()) CHECK(log_);
 
   DCHECK_IMPLIES(IsArrowFunction(kind),
@@ -2772,12 +2786,12 @@
       *num_parameters = entry.num_parameters();
       *function_length = entry.function_length();
       *has_duplicate_parameters = entry.has_duplicate_parameters();
-      *materialized_literal_count = entry.literal_count();
       *expected_property_count = entry.property_count();
       SetLanguageMode(function_scope, entry.language_mode());
       if (entry.uses_super_property())
         function_scope->RecordSuperPropertyUsage();
       if (entry.calls_eval()) function_scope->RecordEvalCall();
+      SkipFunctionLiterals(entry.num_inner_functions());
       return kLazyParsingComplete;
     }
     cached_parse_data_->Reject();
@@ -2788,17 +2802,18 @@
   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
 
   if (reusable_preparser_ == NULL) {
-    reusable_preparser_ = new PreParser(zone(), &scanner_, ast_value_factory(),
-                                        &pending_error_handler_,
-                                        runtime_call_stats_, stack_limit_);
-    reusable_preparser_->set_allow_lazy(true);
+    reusable_preparser_ = new PreParser(
+        zone(), &scanner_, stack_limit_, ast_value_factory(),
+        &pending_error_handler_, runtime_call_stats_, parsing_on_main_thread_);
 #define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
     SET_ALLOW(natives);
     SET_ALLOW(harmony_do_expressions);
     SET_ALLOW(harmony_function_sent);
-    SET_ALLOW(harmony_async_await);
     SET_ALLOW(harmony_trailing_commas);
     SET_ALLOW(harmony_class_fields);
+    SET_ALLOW(harmony_object_rest_spread);
+    SET_ALLOW(harmony_dynamic_import);
+    SET_ALLOW(harmony_async_iteration);
 #undef SET_ALLOW
   }
   // Aborting inner function preparsing would leave scopes in an inconsistent
@@ -2829,15 +2844,16 @@
   *num_parameters = logger->num_parameters();
   *function_length = logger->function_length();
   *has_duplicate_parameters = logger->has_duplicate_parameters();
-  *materialized_literal_count = logger->literals();
   *expected_property_count = logger->properties();
+  SkipFunctionLiterals(logger->num_inner_functions());
   if (!is_inner_function && produce_cached_parse_data()) {
     DCHECK(log_);
     log_->LogFunction(
         function_scope->start_position(), function_scope->end_position(),
         *num_parameters, *function_length, *has_duplicate_parameters,
-        *materialized_literal_count, *expected_property_count, language_mode(),
-        function_scope->uses_super_property(), function_scope->calls_eval());
+        *expected_property_count, language_mode(),
+        function_scope->uses_super_property(), function_scope->calls_eval(),
+        logger->num_inner_functions());
   }
   return kLazyParsingComplete;
 }
@@ -2886,6 +2902,7 @@
     if (to_rewrite->is_rewritten()) return;
     Parser::PatternRewriter::RewriteDestructuringAssignment(parser_, to_rewrite,
                                                             scope_);
+    AstTraversalVisitor::VisitRewritableExpression(to_rewrite);
   }
 
   // Code in function literals does not need to be eagerly rewritten, it will be
@@ -2908,48 +2925,46 @@
   DCHECK(!parameters.is_simple);
   DCHECK(scope()->is_function_scope());
   Block* init_block = factory()->NewBlock(NULL, 1, true, kNoSourcePosition);
-  for (int i = 0; i < parameters.params.length(); ++i) {
-    auto parameter = parameters.params[i];
-    if (parameter.is_rest && parameter.pattern->IsVariableProxy()) break;
+  int index = 0;
+  for (auto parameter : parameters.params) {
+    if (parameter->is_nondestructuring_rest()) break;
     DeclarationDescriptor descriptor;
     descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
     descriptor.scope = scope();
-    descriptor.hoist_scope = nullptr;
     descriptor.mode = LET;
-    descriptor.declaration_pos = parameter.pattern->position();
+    descriptor.declaration_pos = parameter->pattern->position();
     // The position that will be used by the AssignmentExpression
     // which copies from the temp parameter to the pattern.
     //
     // TODO(adamk): Should this be kNoSourcePosition, since
     // it's just copying from a temp var to the real param var?
-    descriptor.initialization_pos = parameter.pattern->position();
+    descriptor.initialization_pos = parameter->pattern->position();
     Expression* initial_value =
-        factory()->NewVariableProxy(parameters.scope->parameter(i));
-    if (parameter.initializer != nullptr) {
+        factory()->NewVariableProxy(parameters.scope->parameter(index));
+    if (parameter->initializer != nullptr) {
       // IS_UNDEFINED($param) ? initializer : $param
 
       // Ensure initializer is rewritten
-      RewriteParameterInitializer(parameter.initializer, scope());
+      RewriteParameterInitializer(parameter->initializer, scope());
 
       auto condition = factory()->NewCompareOperation(
           Token::EQ_STRICT,
-          factory()->NewVariableProxy(parameters.scope->parameter(i)),
+          factory()->NewVariableProxy(parameters.scope->parameter(index)),
           factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
       initial_value = factory()->NewConditional(
-          condition, parameter.initializer, initial_value, kNoSourcePosition);
-      descriptor.initialization_pos = parameter.initializer->position();
+          condition, parameter->initializer, initial_value, kNoSourcePosition);
+      descriptor.initialization_pos = parameter->initializer->position();
     }
 
     Scope* param_scope = scope();
     Block* param_block = init_block;
-    if (!parameter.is_simple() && scope()->calls_sloppy_eval()) {
+    if (!parameter->is_simple() && scope()->calls_sloppy_eval()) {
       param_scope = NewVarblockScope();
       param_scope->set_start_position(descriptor.initialization_pos);
-      param_scope->set_end_position(parameter.initializer_end_position);
+      param_scope->set_end_position(parameter->initializer_end_position);
       param_scope->RecordEvalCall();
       param_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
       param_block->set_scope(param_scope);
-      descriptor.hoist_scope = scope();
       // Pass the appropriate scope in so that PatternRewriter can appropriately
       // rewrite inner initializers of the pattern to param_scope
       descriptor.scope = param_scope;
@@ -2958,24 +2973,25 @@
                                        param_scope);
     }
 
-    BlockState block_state(&scope_state_, param_scope);
+    BlockState block_state(&scope_, param_scope);
     DeclarationParsingResult::Declaration decl(
-        parameter.pattern, parameter.initializer_end_position, initial_value);
+        parameter->pattern, parameter->initializer_end_position, initial_value);
     PatternRewriter::DeclareAndInitializeVariables(
         this, param_block, &descriptor, &decl, nullptr, CHECK_OK);
 
     if (param_block != init_block) {
-      param_scope = block_state.FinalizedBlockScope();
+      param_scope = param_scope->FinalizeBlockScope();
       if (param_scope != nullptr) {
         CheckConflictingVarDeclarations(param_scope, CHECK_OK);
       }
       init_block->statements()->Add(param_block, zone());
     }
+    ++index;
   }
   return init_block;
 }
 
-Block* Parser::BuildRejectPromiseOnException(Block* inner_block, bool* ok) {
+Block* Parser::BuildRejectPromiseOnException(Block* inner_block) {
   // .promise = %AsyncFunctionPromiseCreate();
   // try {
   //   <inner_block>
@@ -2994,7 +3010,7 @@
         Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX,
         new (zone()) ZoneList<Expression*>(0, zone()), kNoSourcePosition);
     Assignment* assign_promise = factory()->NewAssignment(
-        Token::INIT, factory()->NewVariableProxy(PromiseVariable()),
+        Token::ASSIGN, factory()->NewVariableProxy(PromiseVariable()),
         create_promise, kNoSourcePosition);
     set_promise =
         factory()->NewExpressionStatement(assign_promise, kNoSourcePosition);
@@ -3005,8 +3021,7 @@
   Scope* catch_scope = NewScope(CATCH_SCOPE);
   catch_scope->set_is_hidden();
   Variable* catch_variable =
-      catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
-                                kCreatedInitialized, NORMAL_VARIABLE);
+      catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR);
   Block* catch_block = factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
 
   Expression* promise_reject = BuildRejectPromise(
@@ -3045,15 +3060,20 @@
   return result;
 }
 
-Expression* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
+Assignment* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
+  // .generator = %CreateJSGeneratorObject(...);
   DCHECK_NOT_NULL(function_state_->generator_object_variable());
   ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
   args->Add(factory()->NewThisFunction(pos), zone());
   args->Add(IsArrowFunction(kind) ? GetLiteralUndefined(pos)
                                   : ThisExpression(kNoSourcePosition),
             zone());
-  return factory()->NewCallRuntime(Runtime::kCreateJSGeneratorObject, args,
-                                   pos);
+  Expression* allocation =
+      factory()->NewCallRuntime(Runtime::kCreateJSGeneratorObject, args, pos);
+  VariableProxy* proxy =
+      factory()->NewVariableProxy(function_state_->generator_object_variable());
+  return factory()->NewAssignment(Token::INIT, proxy, allocation,
+                                  kNoSourcePosition);
 }
 
 Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
@@ -3069,15 +3089,15 @@
 }
 
 Expression* Parser::BuildRejectPromise(Expression* value, int pos) {
-  // %RejectPromiseNoDebugEvent(.promise, value, true), .promise
-  // The NoDebugEvent variant disables the additional debug event for the
-  // rejection since a debug event already happened for the exception that got
-  // us here.
-  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
+  // %promise_internal_reject(.promise, value, false), .promise
+  // Disables the additional debug event for the rejection since a debug event
+  // already happened for the exception that got us here.
+  ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(3, zone());
   args->Add(factory()->NewVariableProxy(PromiseVariable()), zone());
   args->Add(value, zone());
+  args->Add(factory()->NewBooleanLiteral(false, pos), zone());
   Expression* call_runtime = factory()->NewCallRuntime(
-      Context::REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, args, pos);
+      Context::PROMISE_INTERNAL_REJECT_INDEX, args, pos);
   return factory()->NewBinaryOperation(
       Token::COMMA, call_runtime,
       factory()->NewVariableProxy(PromiseVariable()), pos);
@@ -3089,24 +3109,20 @@
   // comes first should create it and stash it in the FunctionState.
   Variable* promise = function_state_->promise_variable();
   if (function_state_->promise_variable() == nullptr) {
-    promise = scope()->NewTemporary(ast_value_factory()->empty_string());
-    function_state_->set_promise_variable(promise);
+    promise = function_state_->scope()->DeclarePromiseVar(
+        ast_value_factory()->empty_string());
   }
   return promise;
 }
 
 Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
-  Expression* allocation = BuildCreateJSGeneratorObject(pos, kind);
-  VariableProxy* init_proxy =
-      factory()->NewVariableProxy(function_state_->generator_object_variable());
-  Assignment* assignment = factory()->NewAssignment(
-      Token::INIT, init_proxy, allocation, kNoSourcePosition);
-  VariableProxy* get_proxy =
+  Assignment* assignment = BuildCreateJSGeneratorObject(pos, kind);
+  VariableProxy* generator =
       factory()->NewVariableProxy(function_state_->generator_object_variable());
   // The position of the yield is important for reporting the exception
   // caused by calling the .throw method on a generator suspended at the
   // initial yield (i.e. right after generator instantiation).
-  return factory()->NewYield(get_proxy, assignment, scope()->start_position(),
+  return factory()->NewYield(generator, assignment, scope()->start_position(),
                              Yield::kOnExceptionThrow);
 }
 
@@ -3114,17 +3130,44 @@
     const AstRawString* function_name, int pos, FunctionKind kind,
     FunctionLiteral::FunctionType function_type,
     DeclarationScope* function_scope, int* num_parameters, int* function_length,
-    bool* has_duplicate_parameters, int* materialized_literal_count,
-    int* expected_property_count, bool* ok) {
-  FunctionState function_state(&function_state_, &scope_state_, function_scope);
+    bool* has_duplicate_parameters, int* expected_property_count, bool* ok) {
+  ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
 
-  DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+  FunctionState function_state(&function_state_, &scope_, function_scope);
+
+  DuplicateFinder duplicate_finder;
   ExpressionClassifier formals_classifier(this, &duplicate_finder);
 
-  if (IsGeneratorFunction(kind)) PrepareGeneratorVariables(&function_state);
+  if (IsResumableFunction(kind)) PrepareGeneratorVariables();
+
+  int expected_parameters_end_pos = parameters_end_pos_;
+  if (expected_parameters_end_pos != kNoSourcePosition) {
+    // This is the first function encountered in a CreateDynamicFunction eval.
+    parameters_end_pos_ = kNoSourcePosition;
+    // The function name should have been ignored, giving us the empty string
+    // here.
+    DCHECK_EQ(function_name, ast_value_factory()->empty_string());
+  }
 
   ParserFormalParameters formals(function_scope);
   ParseFormalParameterList(&formals, CHECK_OK);
+  if (expected_parameters_end_pos != kNoSourcePosition) {
+    // Check for '(' or ')' shenanigans in the parameter string for dynamic
+    // functions.
+    int position = peek_position();
+    if (position < expected_parameters_end_pos) {
+      ReportMessageAt(Scanner::Location(position, position + 1),
+                      MessageTemplate::kArgStringTerminatesParametersEarly);
+      *ok = false;
+      return nullptr;
+    } else if (position > expected_parameters_end_pos) {
+      ReportMessageAt(Scanner::Location(expected_parameters_end_pos - 2,
+                                        expected_parameters_end_pos),
+                      MessageTemplate::kUnexpectedEndOfArgString);
+      *ok = false;
+      return nullptr;
+    }
+  }
   Expect(Token::RPAREN, CHECK_OK);
   int formals_end_position = scanner()->location().end_pos;
   *num_parameters = formals.num_parameters();
@@ -3135,8 +3178,8 @@
                          CHECK_OK);
   Expect(Token::LBRACE, CHECK_OK);
 
-  ZoneList<Statement*>* body = ParseEagerFunctionBody(
-      function_name, pos, formals, kind, function_type, ok);
+  ZoneList<Statement*>* body = new (zone()) ZoneList<Statement*>(8, zone());
+  ParseFunctionBody(body, function_name, pos, formals, kind, function_type, ok);
 
   // Validate parameter names. We can do this only after parsing the function,
   // since the function can declare itself strict.
@@ -3151,292 +3194,11 @@
   *has_duplicate_parameters =
       !classifier()->is_valid_formal_parameter_list_without_duplicates();
 
-  *materialized_literal_count = function_state.materialized_literal_count();
   *expected_property_count = function_state.expected_property_count();
   return body;
 }
 
-ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
-    const AstRawString* function_name, int pos,
-    const ParserFormalParameters& parameters, FunctionKind kind,
-    FunctionLiteral::FunctionType function_type, bool* ok) {
-  ParsingModeScope mode(this, allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY);
-  ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
-
-  static const int kFunctionNameAssignmentIndex = 0;
-  if (function_type == FunctionLiteral::kNamedExpression) {
-    DCHECK(function_name != NULL);
-    // If we have a named function expression, we add a local variable
-    // declaration to the body of the function with the name of the
-    // function and let it refer to the function itself (closure).
-    // Not having parsed the function body, the language mode may still change,
-    // so we reserve a spot and create the actual const assignment later.
-    DCHECK_EQ(kFunctionNameAssignmentIndex, result->length());
-    result->Add(NULL, zone());
-  }
-
-  ZoneList<Statement*>* body = result;
-  DeclarationScope* function_scope = scope()->AsDeclarationScope();
-  DeclarationScope* inner_scope = function_scope;
-  Block* inner_block = nullptr;
-  if (!parameters.is_simple) {
-    inner_scope = NewVarblockScope();
-    inner_scope->set_start_position(scanner()->location().beg_pos);
-    inner_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
-    inner_block->set_scope(inner_scope);
-    body = inner_block->statements();
-  }
-
-  {
-    BlockState block_state(&scope_state_, inner_scope);
-
-    if (IsGeneratorFunction(kind)) {
-      // We produce:
-      //
-      // try { InitialYield; ...body...; return {value: undefined, done: true} }
-      // finally { %_GeneratorClose(generator) }
-      //
-      // - InitialYield yields the actual generator object.
-      // - Any return statement inside the body will have its argument wrapped
-      //   in a "done" iterator result object.
-      // - If the generator terminates for whatever reason, we must close it.
-      //   Hence the finally clause.
-
-      Block* try_block =
-          factory()->NewBlock(nullptr, 3, false, kNoSourcePosition);
-      Expression* initial_yield = BuildInitialYield(pos, kind);
-      try_block->statements()->Add(
-          factory()->NewExpressionStatement(initial_yield, kNoSourcePosition),
-          zone());
-      ParseStatementList(try_block->statements(), Token::RBRACE, CHECK_OK);
-
-      Statement* final_return = factory()->NewReturnStatement(
-          BuildIteratorResult(nullptr, true), kNoSourcePosition);
-      try_block->statements()->Add(final_return, zone());
-
-      Block* finally_block =
-          factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
-      ZoneList<Expression*>* args =
-          new (zone()) ZoneList<Expression*>(1, zone());
-      VariableProxy* call_proxy = factory()->NewVariableProxy(
-          function_state_->generator_object_variable());
-      args->Add(call_proxy, zone());
-      Expression* call = factory()->NewCallRuntime(
-          Runtime::kInlineGeneratorClose, args, kNoSourcePosition);
-      finally_block->statements()->Add(
-          factory()->NewExpressionStatement(call, kNoSourcePosition), zone());
-
-      body->Add(factory()->NewTryFinallyStatement(try_block, finally_block,
-                                                  kNoSourcePosition),
-                zone());
-    } else if (IsAsyncFunction(kind)) {
-      const bool accept_IN = true;
-      ParseAsyncFunctionBody(inner_scope, body, kind, FunctionBodyType::kNormal,
-                             accept_IN, pos, CHECK_OK);
-    } else {
-      ParseStatementList(body, Token::RBRACE, CHECK_OK);
-    }
-
-    if (IsSubclassConstructor(kind)) {
-      body->Add(factory()->NewReturnStatement(ThisExpression(kNoSourcePosition),
-                                              kNoSourcePosition),
-                zone());
-    }
-  }
-
-  Expect(Token::RBRACE, CHECK_OK);
-  scope()->set_end_position(scanner()->location().end_pos);
-
-  if (!parameters.is_simple) {
-    DCHECK_NOT_NULL(inner_scope);
-    DCHECK_EQ(function_scope, scope());
-    DCHECK_EQ(function_scope, inner_scope->outer_scope());
-    DCHECK_EQ(body, inner_block->statements());
-    SetLanguageMode(function_scope, inner_scope->language_mode());
-    Block* init_block = BuildParameterInitializationBlock(parameters, CHECK_OK);
-
-    if (is_sloppy(inner_scope->language_mode())) {
-      InsertSloppyBlockFunctionVarBindings(inner_scope);
-    }
-
-    // TODO(littledan): Merge the two rejection blocks into one
-    if (IsAsyncFunction(kind)) {
-      init_block = BuildRejectPromiseOnException(init_block, CHECK_OK);
-    }
-
-    DCHECK_NOT_NULL(init_block);
-
-    inner_scope->set_end_position(scanner()->location().end_pos);
-    if (inner_scope->FinalizeBlockScope() != nullptr) {
-      CheckConflictingVarDeclarations(inner_scope, CHECK_OK);
-      InsertShadowingVarBindingInitializers(inner_block);
-    }
-    inner_scope = nullptr;
-
-    result->Add(init_block, zone());
-    result->Add(inner_block, zone());
-  } else {
-    DCHECK_EQ(inner_scope, function_scope);
-    if (is_sloppy(function_scope->language_mode())) {
-      InsertSloppyBlockFunctionVarBindings(function_scope);
-    }
-  }
-
-  if (!IsArrowFunction(kind)) {
-    // Declare arguments after parsing the function since lexical 'arguments'
-    // masks the arguments object. Declare arguments before declaring the
-    // function var since the arguments object masks 'function arguments'.
-    function_scope->DeclareArguments(ast_value_factory());
-  }
-
-  if (function_type == FunctionLiteral::kNamedExpression) {
-    Statement* statement;
-    if (function_scope->LookupLocal(function_name) == nullptr) {
-      // Now that we know the language mode, we can create the const assignment
-      // in the previously reserved spot.
-      DCHECK_EQ(function_scope, scope());
-      Variable* fvar = function_scope->DeclareFunctionVar(function_name);
-      VariableProxy* fproxy = factory()->NewVariableProxy(fvar);
-      statement = factory()->NewExpressionStatement(
-          factory()->NewAssignment(Token::INIT, fproxy,
-                                   factory()->NewThisFunction(pos),
-                                   kNoSourcePosition),
-          kNoSourcePosition);
-    } else {
-      statement = factory()->NewEmptyStatement(kNoSourcePosition);
-    }
-    result->Set(kFunctionNameAssignmentIndex, statement);
-  }
-
-  MarkCollectedTailCallExpressions();
-  return result;
-}
-
-Expression* Parser::InstallHomeObject(Expression* function_literal,
-                                      Expression* home_object) {
-  Block* do_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
-  Variable* result_var =
-      scope()->NewTemporary(ast_value_factory()->empty_string());
-  DoExpression* do_expr =
-      factory()->NewDoExpression(do_block, result_var, kNoSourcePosition);
-  Assignment* init = factory()->NewAssignment(
-      Token::ASSIGN, factory()->NewVariableProxy(result_var), function_literal,
-      kNoSourcePosition);
-  do_block->statements()->Add(
-      factory()->NewExpressionStatement(init, kNoSourcePosition), zone());
-  Property* home_object_property = factory()->NewProperty(
-      factory()->NewVariableProxy(result_var),
-      factory()->NewSymbolLiteral("home_object_symbol", kNoSourcePosition),
-      kNoSourcePosition);
-  Assignment* assignment = factory()->NewAssignment(
-      Token::ASSIGN, home_object_property, home_object, kNoSourcePosition);
-  do_block->statements()->Add(
-      factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
-  return do_expr;
-}
-
-const AstRawString* ClassFieldVariableName(bool is_name,
-                                           AstValueFactory* ast_value_factory,
-                                           int index) {
-  std::string name =
-      ".class-field-" + std::to_string(index) + (is_name ? "-name" : "-func");
-  return ast_value_factory->GetOneByteString(name.c_str());
-}
-
-FunctionLiteral* Parser::SynthesizeClassFieldInitializer(int count) {
-  DCHECK(count > 0);
-  // Makes a function which reads the names and initializers for each class
-  // field out of deterministically named local variables and sets each property
-  // to the result of evaluating its corresponding initializer in turn.
-
-  // This produces a function which looks like
-  // function () {
-  //   this[.class-field-0-name] = .class-field-0-func();
-  //   this[.class-field-1-name] = .class-field-1-func();
-  //   [...]
-  //   this[.class-field-n-name] = .class-field-n-func();
-  //   return this;
-  // }
-  // except that it performs defineProperty, so that instead of '=' it has
-  // %DefineDataPropertyInLiteral(this, .class-field-0-name,
-  // .class-field-0-func(),
-  //   DONT_ENUM, false)
-
-  RaiseLanguageMode(STRICT);
-  FunctionKind kind = FunctionKind::kConciseMethod;
-  DeclarationScope* initializer_scope = NewFunctionScope(kind);
-  SetLanguageMode(initializer_scope, language_mode());
-  initializer_scope->set_start_position(scanner()->location().end_pos);
-  initializer_scope->set_end_position(scanner()->location().end_pos);
-  FunctionState initializer_state(&function_state_, &scope_state_,
-                                  initializer_scope);
-  ZoneList<Statement*>* body = new (zone()) ZoneList<Statement*>(count, zone());
-  for (int i = 0; i < count; ++i) {
-    const AstRawString* name =
-        ClassFieldVariableName(true, ast_value_factory(), i);
-    VariableProxy* name_proxy = scope()->NewUnresolved(factory(), name);
-    const AstRawString* function_name =
-        ClassFieldVariableName(false, ast_value_factory(), i);
-    VariableProxy* function_proxy =
-        scope()->NewUnresolved(factory(), function_name);
-    ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
-    args->Add(function_proxy, zone());
-    args->Add(ThisExpression(kNoSourcePosition), zone());
-    Expression* call = factory()->NewCallRuntime(Runtime::kInlineCall, args,
-                                                 kNoSourcePosition);
-    ZoneList<Expression*>* define_property_args =
-        new (zone()) ZoneList<Expression*>(5, zone());
-    define_property_args->Add(ThisExpression(kNoSourcePosition), zone());
-    define_property_args->Add(name_proxy, zone());
-    define_property_args->Add(call, zone());
-    define_property_args->Add(
-        factory()->NewNumberLiteral(DONT_ENUM, kNoSourcePosition), zone());
-    define_property_args->Add(
-        factory()->NewNumberLiteral(
-            false,  // TODO(bakkot) function name inference a la class { x =
-                    // function(){}; static y = function(){}; }
-            kNoSourcePosition),
-        zone());
-    body->Add(factory()->NewExpressionStatement(
-                  factory()->NewCallRuntime(
-                      Runtime::kDefineDataProperty,
-                      define_property_args,  // TODO(bakkot) verify that this is
-                      // the same as object_define_property
-                      kNoSourcePosition),
-                  kNoSourcePosition),
-              zone());
-  }
-  body->Add(factory()->NewReturnStatement(ThisExpression(kNoSourcePosition),
-                                          kNoSourcePosition),
-            zone());
-  FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
-      ast_value_factory()->empty_string(), initializer_scope, body,
-      initializer_state.materialized_literal_count(),
-      initializer_state.expected_property_count(), 0, count,
-      FunctionLiteral::kNoDuplicateParameters,
-      FunctionLiteral::kAnonymousExpression,
-      FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position(),
-      true);
-  function_literal->set_is_class_field_initializer(true);
-  return function_literal;
-}
-
-FunctionLiteral* Parser::InsertClassFieldInitializer(
-    FunctionLiteral* constructor) {
-  Statement* call_initializer = factory()->NewExpressionStatement(
-      CallClassFieldInitializer(
-          constructor->scope(),
-          constructor->scope()->NewUnresolved(
-              factory(), ast_value_factory()->this_string(), kNoSourcePosition,
-              THIS_VARIABLE)),
-      kNoSourcePosition);
-  constructor->body()->InsertAt(0, call_initializer, zone());
-  return constructor;
-}
-
-// If a class name is specified, this method declares the class variable
-// and sets class_info->proxy to point to that name.
-void Parser::DeclareClassVariable(const AstRawString* name, Scope* block_scope,
+void Parser::DeclareClassVariable(const AstRawString* name,
                                   ClassInfo* class_info, int class_token_pos,
                                   bool* ok) {
 #ifdef DEBUG
@@ -3446,7 +3208,7 @@
   if (name != nullptr) {
     class_info->proxy = factory()->NewVariableProxy(name, NORMAL_VARIABLE);
     Declaration* declaration = factory()->NewVariableDeclaration(
-        class_info->proxy, block_scope, class_token_pos);
+        class_info->proxy, scope(), class_token_pos);
     Declare(declaration, DeclarationDescriptor::NORMAL, CONST,
             Variable::DefaultInitializationFlag(CONST), ok);
   }
@@ -3455,13 +3217,14 @@
 // This method declares a property of the given class.  It updates the
 // following fields of class_info, as appropriate:
 //   - constructor
-//   - static_initializer_var
-//   - instance_field_initializers
 //   - properties
 void Parser::DeclareClassProperty(const AstRawString* class_name,
                                   ClassLiteralProperty* property,
+                                  ClassLiteralProperty::Kind kind,
+                                  bool is_static, bool is_constructor,
                                   ClassInfo* class_info, bool* ok) {
-  if (class_info->has_seen_constructor && class_info->constructor == nullptr) {
+  if (is_constructor) {
+    DCHECK(!class_info->constructor);
     class_info->constructor = GetPropertyValue(property)->AsFunctionLiteral();
     DCHECK_NOT_NULL(class_info->constructor);
     class_info->constructor->set_raw_name(
@@ -3472,47 +3235,7 @@
 
   if (property->kind() == ClassLiteralProperty::FIELD) {
     DCHECK(allow_harmony_class_fields());
-    if (property->is_static()) {
-      if (class_info->static_initializer_var == nullptr) {
-        class_info->static_initializer_var =
-            NewTemporary(ast_value_factory()->empty_string());
-      }
-      // TODO(bakkot) only do this conditionally
-      Expression* function = InstallHomeObject(
-          property->value(),
-          factory()->NewVariableProxy(class_info->static_initializer_var));
-      ZoneList<Expression*>* args =
-          new (zone()) ZoneList<Expression*>(2, zone());
-      args->Add(function, zone());
-      args->Add(factory()->NewVariableProxy(class_info->static_initializer_var),
-                zone());
-      Expression* call = factory()->NewCallRuntime(Runtime::kInlineCall, args,
-                                                   kNoSourcePosition);
-      property->set_value(call);
-    } else {
-      // if (is_computed_name) { // TODO(bakkot) figure out why this is
-      // necessary for non-computed names in full-codegen
-      ZoneList<Expression*>* to_name_args =
-          new (zone()) ZoneList<Expression*>(1, zone());
-      to_name_args->Add(property->key(), zone());
-      property->set_key(factory()->NewCallRuntime(
-          Runtime::kToName, to_name_args, kNoSourcePosition));
-      //}
-      const AstRawString* name = ClassFieldVariableName(
-          true, ast_value_factory(),
-          class_info->instance_field_initializers->length());
-      VariableProxy* name_proxy =
-          factory()->NewVariableProxy(name, NORMAL_VARIABLE);
-      Declaration* name_declaration = factory()->NewVariableDeclaration(
-          name_proxy, scope(), kNoSourcePosition);
-      Variable* name_var =
-          Declare(name_declaration, DeclarationDescriptor::NORMAL, CONST,
-                  kNeedsInitialization, ok, scope());
-      DCHECK(*ok);
-      if (!*ok) return;
-      class_info->instance_field_initializers->Add(property->value(), zone());
-      property->set_value(factory()->NewVariableProxy(name_var));
-    }
+    // TODO(littledan): Implement class fields
   }
   class_info->properties->Add(property, zone());
 }
@@ -3522,9 +3245,9 @@
 //   - constructor (if missing, it updates it with a default constructor)
 //   - proxy
 //   - extends
-//   - static_initializer_var
-//   - instance_field_initializers
 //   - properties
+//   - has_name_static_property
+//   - has_static_computed_names
 Expression* Parser::RewriteClassLiteral(const AstRawString* name,
                                         ClassInfo* class_info, int pos,
                                         bool* ok) {
@@ -3534,22 +3257,12 @@
   DoExpression* do_expr = factory()->NewDoExpression(do_block, result_var, pos);
 
   bool has_extends = class_info->extends != nullptr;
-  bool has_instance_fields =
-      class_info->instance_field_initializers->length() > 0;
-  DCHECK(!has_instance_fields || allow_harmony_class_fields());
   bool has_default_constructor = class_info->constructor == nullptr;
   if (has_default_constructor) {
     class_info->constructor =
-        DefaultConstructor(name, has_extends, has_instance_fields, pos, end_pos,
-                           scope()->language_mode());
+        DefaultConstructor(name, has_extends, pos, end_pos);
   }
 
-  if (has_instance_fields && !has_extends) {
-    class_info->constructor =
-        InsertClassFieldInitializer(class_info->constructor);
-    class_info->constructor->set_requires_class_field_init(true);
-  }  // The derived case is handled by rewriting super calls.
-
   scope()->set_end_position(end_pos);
 
   if (name != nullptr) {
@@ -3559,12 +3272,9 @@
 
   ClassLiteral* class_literal = factory()->NewClassLiteral(
       class_info->proxy, class_info->extends, class_info->constructor,
-      class_info->properties, pos, end_pos);
-
-  if (class_info->static_initializer_var != nullptr) {
-    class_literal->set_static_initializer_proxy(
-        factory()->NewVariableProxy(class_info->static_initializer_var));
-  }
+      class_info->properties, pos, end_pos,
+      class_info->has_name_static_property,
+      class_info->has_static_computed_names);
 
   do_block->statements()->Add(
       factory()->NewExpressionStatement(
@@ -3573,53 +3283,6 @@
                                    class_literal, kNoSourcePosition),
           pos),
       zone());
-  if (allow_harmony_class_fields() &&
-      (has_instance_fields || (has_extends && !has_default_constructor))) {
-    // Default constructors for derived classes without fields will not try to
-    // read this variable, so there's no need to create it.
-    const AstRawString* init_fn_name =
-        ast_value_factory()->dot_class_field_init_string();
-    Variable* init_fn_var = scope()->DeclareLocal(
-        init_fn_name, CONST, kCreatedInitialized, NORMAL_VARIABLE);
-    Expression* initializer =
-        has_instance_fields
-            ? static_cast<Expression*>(SynthesizeClassFieldInitializer(
-                  class_info->instance_field_initializers->length()))
-            : factory()->NewBooleanLiteral(false, kNoSourcePosition);
-    Assignment* assignment = factory()->NewAssignment(
-        Token::INIT, factory()->NewVariableProxy(init_fn_var), initializer,
-        kNoSourcePosition);
-    do_block->statements()->Add(
-        factory()->NewExpressionStatement(assignment, kNoSourcePosition),
-        zone());
-  }
-  for (int i = 0; i < class_info->instance_field_initializers->length(); ++i) {
-    const AstRawString* function_name =
-        ClassFieldVariableName(false, ast_value_factory(), i);
-    VariableProxy* function_proxy =
-        factory()->NewVariableProxy(function_name, NORMAL_VARIABLE);
-    Declaration* function_declaration = factory()->NewVariableDeclaration(
-        function_proxy, scope(), kNoSourcePosition);
-    Variable* function_var =
-        Declare(function_declaration, DeclarationDescriptor::NORMAL, CONST,
-                kNeedsInitialization, ok, scope());
-    if (!*ok) return nullptr;
-    Property* prototype_property = factory()->NewProperty(
-        factory()->NewVariableProxy(result_var),
-        factory()->NewStringLiteral(ast_value_factory()->prototype_string(),
-                                    kNoSourcePosition),
-        kNoSourcePosition);
-    Expression* function_value = InstallHomeObject(
-        class_info->instance_field_initializers->at(i),
-        prototype_property);  // TODO(bakkot) ideally this would be conditional,
-                              // especially in trivial cases
-    Assignment* function_assignment = factory()->NewAssignment(
-        Token::INIT, factory()->NewVariableProxy(function_var), function_value,
-        kNoSourcePosition);
-    do_block->statements()->Add(factory()->NewExpressionStatement(
-                                    function_assignment, kNoSourcePosition),
-                                zone());
-  }
   do_block->set_scope(scope()->FinalizeBlockScope());
   do_expr->set_represented_function(class_info->constructor);
   AddFunctionForNameInference(class_info->constructor);
@@ -3655,7 +3318,7 @@
   DCHECK(inner_scope->is_declaration_scope());
   Scope* function_scope = inner_scope->outer_scope();
   DCHECK(function_scope->is_function_scope());
-  BlockState block_state(&scope_state_, inner_scope);
+  BlockState block_state(&scope_, inner_scope);
   for (Declaration* decl : *inner_scope->declarations()) {
     if (decl->proxy()->var()->mode() != VAR || !decl->IsVariableDeclaration()) {
       continue;
@@ -3736,21 +3399,18 @@
   }
 }
 
-
-void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
-  // Internalize strings and values.
-  ast_value_factory()->Internalize(isolate);
-
-  // Error processing.
-  if (error) {
-    if (stack_overflow()) {
-      isolate->StackOverflow();
-    } else {
-      DCHECK(pending_error_handler_.has_pending_error());
-      pending_error_handler_.ThrowPendingError(isolate, script);
-    }
+void Parser::ReportErrors(Isolate* isolate, Handle<Script> script) {
+  if (stack_overflow()) {
+    isolate->StackOverflow();
+  } else {
+    DCHECK(pending_error_handler_.has_pending_error());
+    // Internalize ast values for throwing the pending error.
+    ast_value_factory()->Internalize(isolate);
+    pending_error_handler_.ThrowPendingError(isolate, script);
   }
+}
 
+void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
   // Move statistics to Isolate.
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
@@ -3771,47 +3431,10 @@
           v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE) {
     // Copy over the counters from the background thread to the main counters on
     // the isolate.
-    // TODO(cbruni,lpy): properly attach the runtime stats to the trace for
-    // background parsing.
     isolate->counters()->runtime_call_stats()->Add(runtime_call_stats_);
   }
 }
 
-
-// ----------------------------------------------------------------------------
-// The Parser interface.
-
-
-bool Parser::ParseStatic(ParseInfo* info) {
-  Parser parser(info);
-  if (parser.Parse(info)) {
-    info->set_language_mode(info->literal()->language_mode());
-    return true;
-  }
-  return false;
-}
-
-
-bool Parser::Parse(ParseInfo* info) {
-  DCHECK(info->literal() == NULL);
-  FunctionLiteral* result = NULL;
-  // Ok to use Isolate here; this function is only called in the main thread.
-  DCHECK(parsing_on_main_thread_);
-  Isolate* isolate = info->isolate();
-
-  if (info->is_toplevel()) {
-    SetCachedData(info);
-    result = ParseProgram(isolate, info);
-  } else {
-    result = ParseFunction(isolate, info);
-  }
-  info->set_literal(result);
-
-  Internalize(isolate, info->script(), result == NULL);
-  return (result != NULL);
-}
-
-
 void Parser::ParseOnBackground(ParseInfo* info) {
   parsing_on_main_thread_ = false;
 
@@ -3819,7 +3442,13 @@
   FunctionLiteral* result = NULL;
 
   ParserLogger logger;
-  if (produce_cached_parse_data()) log_ = &logger;
+  if (produce_cached_parse_data()) {
+    if (allow_lazy_) {
+      log_ = &logger;
+    } else {
+      compile_options_ = ScriptCompiler::kNoCompileOptions;
+    }
+  }
   if (FLAG_runtime_stats) {
     // Create separate runtime stats for background parsing.
     runtime_call_stats_ = new (zone()) RuntimeCallStats();
@@ -3833,7 +3462,8 @@
   } else {
     DCHECK(info->character_stream() == nullptr);
     stream.reset(ScannerStream::For(info->source_stream(),
-                                    info->source_stream_encoding()));
+                                    info->source_stream_encoding(),
+                                    runtime_call_stats_));
     stream_ptr = stream.get();
   }
   DCHECK(info->maybe_outer_scope_info().is_null());
@@ -3863,9 +3493,13 @@
     if (result != NULL) *info->cached_data() = logger.GetScriptData();
     log_ = NULL;
   }
-  if (FLAG_runtime_stats) {
-    // TODO(cbruni,lpy): properly attach the runtime stats to the trace for
-    // background parsing.
+  if (FLAG_runtime_stats &
+      v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+    auto value = v8::tracing::TracedValue::Create();
+    runtime_call_stats_->Dump(value.get());
+    TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
+                         "V8.RuntimeStats", TRACE_EVENT_SCOPE_THREAD,
+                         "runtime-call-stats", std::move(value));
   }
 }
 
@@ -3873,15 +3507,20 @@
   return new (zone()) TemplateLiteral(zone(), pos);
 }
 
-
-void Parser::AddTemplateSpan(TemplateLiteralState* state, bool tail) {
+void Parser::AddTemplateSpan(TemplateLiteralState* state, bool should_cook,
+                             bool tail) {
+  DCHECK(should_cook || allow_harmony_template_escapes());
   int pos = scanner()->location().beg_pos;
   int end = scanner()->location().end_pos - (tail ? 1 : 2);
-  const AstRawString* tv = scanner()->CurrentSymbol(ast_value_factory());
   const AstRawString* trv = scanner()->CurrentRawSymbol(ast_value_factory());
-  Literal* cooked = factory()->NewStringLiteral(tv, pos);
   Literal* raw = factory()->NewStringLiteral(trv, pos);
-  (*state)->AddTemplateSpan(cooked, raw, end, zone());
+  if (should_cook) {
+    const AstRawString* tv = scanner()->CurrentSymbol(ast_value_factory());
+    Literal* cooked = factory()->NewStringLiteral(tv, pos);
+    (*state)->AddTemplateSpan(cooked, raw, end, zone());
+  } else {
+    (*state)->AddTemplateSpan(GetLiteralUndefined(pos), raw, end, zone());
+  }
 }
 
 
@@ -3925,19 +3564,14 @@
   } else {
     uint32_t hash = ComputeTemplateLiteralHash(lit);
 
-    int cooked_idx = function_state_->NextMaterializedLiteralIndex();
-    int raw_idx = function_state_->NextMaterializedLiteralIndex();
-
     // $getTemplateCallSite
     ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(4, zone());
     args->Add(factory()->NewArrayLiteral(
-                  const_cast<ZoneList<Expression*>*>(cooked_strings),
-                  cooked_idx, pos),
+                  const_cast<ZoneList<Expression*>*>(cooked_strings), pos),
               zone());
-    args->Add(
-        factory()->NewArrayLiteral(
-            const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx, pos),
-        zone());
+    args->Add(factory()->NewArrayLiteral(
+                  const_cast<ZoneList<Expression*>*>(raw_strings), pos),
+              zone());
 
     // Truncate hash to Smi-range.
     Smi* hash_obj = Smi::cast(Internals::IntToSmi(static_cast<int>(hash)));
@@ -3985,6 +3619,19 @@
   return running_hash;
 }
 
+namespace {
+
+bool OnlyLastArgIsSpread(ZoneList<Expression*>* args) {
+  for (int i = 0; i < args->length() - 1; i++) {
+    if (args->at(i)->IsSpread()) {
+      return false;
+    }
+  }
+  return args->at(args->length() - 1)->IsSpread();
+}
+
+}  // namespace
+
 ZoneList<Expression*>* Parser::PrepareSpreadArguments(
     ZoneList<Expression*>* list) {
   ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(1, zone());
@@ -4021,9 +3668,7 @@
         while (i < n && !list->at(i)->IsSpread()) {
           unspread->Add(list->at(i++), zone());
         }
-        int literal_index = function_state_->NextMaterializedLiteralIndex();
-        args->Add(factory()->NewArrayLiteral(unspread, literal_index,
-                                             kNoSourcePosition),
+        args->Add(factory()->NewArrayLiteral(unspread, kNoSourcePosition),
                   zone());
 
         if (i == n) break;
@@ -4048,11 +3693,21 @@
 }
 
 Expression* Parser::SpreadCall(Expression* function,
-                               ZoneList<Expression*>* args, int pos) {
+                               ZoneList<Expression*>* args, int pos,
+                               Call::PossiblyEval is_possibly_eval) {
+  // Handle these cases in BytecodeGenerator.
+  // [Call,New]WithSpread bytecodes aren't used with tailcalls - see
+  // https://crbug.com/v8/5867
+  if (!allow_tailcalls() && OnlyLastArgIsSpread(args)) {
+    return factory()->NewCall(function, args, pos);
+  }
+
   if (function->IsSuperCallReference()) {
     // Super calls
     // $super_constructor = %_GetSuperConstructor(<this-function>)
     // %reflect_construct($super_constructor, args, new.target)
+
+    args = PrepareSpreadArguments(args);
     ZoneList<Expression*>* tmp = new (zone()) ZoneList<Expression*>(1, zone());
     tmp->Add(function->AsSuperCallReference()->this_function_var(), zone());
     Expression* super_constructor = factory()->NewCallRuntime(
@@ -4062,6 +3717,7 @@
     return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args,
                                      pos);
   } else {
+    args = PrepareSpreadArguments(args);
     if (function->IsProperty()) {
       // Method calls
       if (function->AsProperty()->IsSuperAccess()) {
@@ -4092,6 +3748,11 @@
 
 Expression* Parser::SpreadCallNew(Expression* function,
                                   ZoneList<Expression*>* args, int pos) {
+  if (OnlyLastArgIsSpread(args)) {
+    // Handle in BytecodeGenerator.
+    return factory()->NewCallNew(function, args, pos);
+  }
+  args = PrepareSpreadArguments(args);
   args->InsertAt(0, function, zone());
 
   return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
@@ -4139,23 +3800,13 @@
 // when desugaring the body of async_function.
 void Parser::PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
                                       FunctionKind kind, int pos) {
-  // function async_function() {
-  //   .generator_object = %CreateGeneratorObject();
-  //   BuildRejectPromiseOnException({
-  //     ... block ...
-  //     return %ResolvePromise(.promise, expr), .promise;
-  //   })
-  // }
-
-  Variable* temp =
-      NewTemporary(ast_value_factory()->dot_generator_object_string());
-  function_state_->set_generator_object_variable(temp);
-
-  Expression* init_generator_variable = factory()->NewAssignment(
-      Token::INIT, factory()->NewVariableProxy(temp),
-      BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition);
-  body->Add(factory()->NewExpressionStatement(init_generator_variable,
-                                              kNoSourcePosition),
+  // When parsing an async arrow function, we get here without having called
+  // PrepareGeneratorVariables yet, so do it now.
+  if (function_state_->generator_object_variable() == nullptr) {
+    PrepareGeneratorVariables();
+  }
+  body->Add(factory()->NewExpressionStatement(
+                BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition),
             zone());
 }
 
@@ -4163,7 +3814,7 @@
 void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
                                       Expression* return_value, bool* ok) {
   // function async_function() {
-  //   .generator_object = %CreateGeneratorObject();
+  //   .generator_object = %CreateJSGeneratorObject();
   //   BuildRejectPromiseOnException({
   //     ... block ...
   //     return %ResolvePromise(.promise, expr), .promise;
@@ -4174,7 +3825,7 @@
   block->statements()->Add(
       factory()->NewReturnStatement(return_value, return_value->position()),
       zone());
-  block = BuildRejectPromiseOnException(block, CHECK_OK_VOID);
+  block = BuildRejectPromiseOnException(block);
   body->Add(block, zone());
 }
 
@@ -4200,10 +3851,7 @@
   // TODO(littledan): investigate why this ordering is needed in more detail.
   Variable* generator_object_variable =
       function_state_->generator_object_variable();
-
-  // If generator_object_variable is null,
-  // TODO(littledan): Is this necessary?
-  if (!generator_object_variable) return value;
+  DCHECK_NOT_NULL(generator_object_variable);
 
   const int nopos = kNoSourcePosition;
 
@@ -4445,12 +4093,11 @@
             kNoSourcePosition);
       }
       // for (each of spread) %AppendElement($R, each)
-      ForEachStatement* loop = factory()->NewForEachStatement(
-          ForEachStatement::ITERATE, nullptr, kNoSourcePosition);
+      ForOfStatement* loop =
+          factory()->NewForOfStatement(nullptr, kNoSourcePosition);
       const bool finalize = false;
-      InitializeForOfStatement(loop->AsForOfStatement(),
-                               factory()->NewVariableProxy(each), subject,
-                               append_body, finalize);
+      InitializeForOfStatement(loop, factory()->NewVariableProxy(each), subject,
+                               append_body, finalize, IteratorType::kNormal);
       do_block->statements()->Add(loop, zone());
     }
   }
@@ -4531,12 +4178,11 @@
 //     const kReturn = 1;
 //     const kThrow = 2;
 //
-//     let input = function.sent;
+//     let input = undefined;
 //     let mode = kNext;
 //     let output = undefined;
 //
-//     let iterator = iterable[Symbol.iterator]();
-//     if (!IS_RECEIVER(iterator)) throw MakeTypeError(kSymbolIteratorInvalid);
+//     let iterator = GetIterator(iterable);
 //
 //     while (true) {
 //       // From the generator to the iterator:
@@ -4639,41 +4285,18 @@
     initialize_output = factory()->NewExpressionStatement(assignment, nopos);
   }
 
-  // let iterator = iterable[Symbol.iterator];
+  // let iterator = GetIterator(iterable);
   Variable* var_iterator = NewTemporary(ast_value_factory()->empty_string());
   Statement* get_iterator;
   {
-    Expression* iterator = GetIterator(iterable, nopos);
+    Expression* iterator =
+        factory()->NewGetIterator(iterable, IteratorType::kNormal, nopos);
     Expression* iterator_proxy = factory()->NewVariableProxy(var_iterator);
     Expression* assignment = factory()->NewAssignment(
         Token::ASSIGN, iterator_proxy, iterator, nopos);
     get_iterator = factory()->NewExpressionStatement(assignment, nopos);
   }
 
-  // if (!IS_RECEIVER(iterator)) throw MakeTypeError(kSymbolIteratorInvalid);
-  Statement* validate_iterator;
-  {
-    Expression* is_receiver_call;
-    {
-      auto args = new (zone()) ZoneList<Expression*>(1, zone());
-      args->Add(factory()->NewVariableProxy(var_iterator), zone());
-      is_receiver_call =
-          factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
-    }
-
-    Statement* throw_call;
-    {
-      Expression* call =
-          NewThrowTypeError(MessageTemplate::kSymbolIteratorInvalid,
-                            ast_value_factory()->empty_string(), nopos);
-      throw_call = factory()->NewExpressionStatement(call, nopos);
-    }
-
-    validate_iterator = factory()->NewIfStatement(
-        is_receiver_call, factory()->NewEmptyStatement(nopos), throw_call,
-        nopos);
-  }
-
   // output = iterator.next(input);
   Statement* call_next;
   {
@@ -4749,7 +4372,8 @@
     Block* then = factory()->NewBlock(nullptr, 4 + 1, false, nopos);
     BuildIteratorCloseForCompletion(
         scope(), then->statements(), var_iterator,
-        factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos));
+        factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos),
+        IteratorType::kNormal);
     then->statements()->Add(throw_call, zone());
     check_throw = factory()->NewIfStatement(
         condition, then, factory()->NewEmptyStatement(nopos), nopos);
@@ -4906,8 +4530,7 @@
     Scope* catch_scope = NewScope(CATCH_SCOPE);
     catch_scope->set_is_hidden();
     const AstRawString* name = ast_value_factory()->dot_catch_string();
-    Variable* catch_variable = catch_scope->DeclareLocal(
-        name, VAR, kCreatedInitialized, NORMAL_VARIABLE);
+    Variable* catch_variable = catch_scope->DeclareLocal(name, VAR);
 
     try_catch = factory()->NewTryCatchStatementForDesugaring(
         try_block, catch_scope, catch_variable, catch_block, nopos);
@@ -4978,12 +4601,11 @@
     // The rewriter needs to process the get_value statement only, hence we
     // put the preceding statements into an init block.
 
-    Block* do_block_ = factory()->NewBlock(nullptr, 7, true, nopos);
+    Block* do_block_ = factory()->NewBlock(nullptr, 6, true, nopos);
     do_block_->statements()->Add(initialize_input, zone());
     do_block_->statements()->Add(initialize_mode, zone());
     do_block_->statements()->Add(initialize_output, zone());
     do_block_->statements()->Add(get_iterator, zone());
-    do_block_->statements()->Add(validate_iterator, zone());
     do_block_->statements()->Add(loop, zone());
     do_block_->statements()->Add(maybe_return_value, zone());
 
@@ -5118,7 +4740,8 @@
 
 void Parser::FinalizeIteratorUse(Scope* use_scope, Variable* completion,
                                  Expression* condition, Variable* iter,
-                                 Block* iterator_use, Block* target) {
+                                 Block* iterator_use, Block* target,
+                                 IteratorType type) {
   //
   // This function adds two statements to [target], corresponding to the
   // following code:
@@ -5174,8 +4797,8 @@
   {
     Block* block = factory()->NewBlock(nullptr, 2, true, nopos);
     Expression* proxy = factory()->NewVariableProxy(completion);
-    BuildIteratorCloseForCompletion(use_scope, block->statements(), iter,
-                                    proxy);
+    BuildIteratorCloseForCompletion(use_scope, block->statements(), iter, proxy,
+                                    type);
     DCHECK(block->statements()->length() == 2);
 
     maybe_close = factory()->NewBlock(nullptr, 1, true, nopos);
@@ -5194,8 +4817,7 @@
   {
     Scope* catch_scope = NewScopeWithParent(use_scope, CATCH_SCOPE);
     Variable* catch_variable =
-        catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
-                                  kCreatedInitialized, NORMAL_VARIABLE);
+        catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR);
     catch_scope->set_is_hidden();
 
     Statement* rethrow;
@@ -5235,7 +4857,8 @@
 void Parser::BuildIteratorCloseForCompletion(Scope* scope,
                                              ZoneList<Statement*>* statements,
                                              Variable* iterator,
-                                             Expression* completion) {
+                                             Expression* completion,
+                                             IteratorType type) {
   //
   // This function adds two statements to [statements], corresponding to the
   // following code:
@@ -5246,9 +4869,17 @@
   //       if (!IS_CALLABLE(iteratorReturn)) {
   //         throw MakeTypeError(kReturnMethodNotCallable);
   //       }
-  //       try { %_Call(iteratorReturn, iterator) } catch (_) { }
+  //       [if (IteratorType == kAsync)]
+  //           try { Await(%_Call(iteratorReturn, iterator) } catch (_) { }
+  //       [else]
+  //           try { %_Call(iteratorReturn, iterator) } catch (_) { }
+  //       [endif]
   //     } else {
-  //       let output = %_Call(iteratorReturn, iterator);
+  //       [if (IteratorType == kAsync)]
+  //           let output = Await(%_Call(iteratorReturn, iterator));
+  //       [else]
+  //           let output = %_Call(iteratorReturn, iterator);
+  //       [endif]
   //       if (!IS_RECEIVER(output)) {
   //         %ThrowIterResultNotAnObject(output);
   //       }
@@ -5293,6 +4924,10 @@
     Expression* call =
         factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
 
+    if (type == IteratorType::kAsync) {
+      call = RewriteAwaitExpression(call, nopos);
+    }
+
     Block* try_block = factory()->NewBlock(nullptr, 1, false, nopos);
     try_block->statements()->Add(factory()->NewExpressionStatement(call, nopos),
                                  zone());
@@ -5301,8 +4936,7 @@
 
     Scope* catch_scope = NewScopeWithParent(scope, CATCH_SCOPE);
     Variable* catch_variable =
-        catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
-                                  kCreatedInitialized, NORMAL_VARIABLE);
+        catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR);
     catch_scope->set_is_hidden();
 
     try_call_return = factory()->NewTryCatchStatement(
@@ -5323,6 +4957,9 @@
       args->Add(factory()->NewVariableProxy(iterator), zone());
       Expression* call =
           factory()->NewCallRuntime(Runtime::kInlineCall, args, nopos);
+      if (type == IteratorType::kAsync) {
+        call = RewriteAwaitExpression(call, nopos);
+      }
 
       Expression* output_proxy = factory()->NewVariableProxy(var_output);
       Expression* assignment =
@@ -5393,7 +5030,8 @@
 }
 
 Statement* Parser::FinalizeForOfStatement(ForOfStatement* loop,
-                                          Variable* var_completion, int pos) {
+                                          Variable* var_completion,
+                                          IteratorType type, int pos) {
   //
   // This function replaces the loop with the following wrapping:
   //
@@ -5406,7 +5044,7 @@
   //       %ReThrow(e);
   //     }
   //   } finally {
-  //     if (!(completion === kNormalCompletion || IS_UNDEFINED(#iterator))) {
+  //     if (!(completion === kNormalCompletion)) {
   //       #BuildIteratorCloseForCompletion(#iterator, completion)
   //     }
   //   }
@@ -5417,18 +5055,13 @@
 
   const int nopos = kNoSourcePosition;
 
-  // !(completion === kNormalCompletion || IS_UNDEFINED(#iterator))
+  // !(completion === kNormalCompletion)
   Expression* closing_condition;
   {
-    Expression* lhs = factory()->NewCompareOperation(
+    Expression* cmp = factory()->NewCompareOperation(
         Token::EQ_STRICT, factory()->NewVariableProxy(var_completion),
         factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
-    Expression* rhs = factory()->NewCompareOperation(
-        Token::EQ_STRICT, factory()->NewVariableProxy(loop->iterator()),
-        factory()->NewUndefinedLiteral(nopos), nopos);
-    closing_condition = factory()->NewUnaryOperation(
-        Token::NOT, factory()->NewBinaryOperation(Token::OR, lhs, rhs, nopos),
-        nopos);
+    closing_condition = factory()->NewUnaryOperation(Token::NOT, cmp, nopos);
   }
 
   Block* final_loop = factory()->NewBlock(nullptr, 2, false, nopos);
@@ -5442,7 +5075,7 @@
     DCHECK_EQ(scope()->scope_type(), BLOCK_SCOPE);
 
     FinalizeIteratorUse(loop_scope, var_completion, closing_condition,
-                        loop->iterator(), try_block, final_loop);
+                        loop->iterator(), try_block, final_loop, type);
   }
 
   return final_loop;
diff --git a/src/parsing/parser.h b/src/parsing/parser.h
index 736419d..e2223d9 100644
--- a/src/parsing/parser.h
+++ b/src/parsing/parser.h
@@ -10,10 +10,12 @@
 #include "src/base/compiler-specific.h"
 #include "src/globals.h"
 #include "src/parsing/parser-base.h"
+#include "src/parsing/parsing.h"
 #include "src/parsing/preparse-data-format.h"
 #include "src/parsing/preparse-data.h"
 #include "src/parsing/preparser.h"
 #include "src/pending-compilation-error-handler.h"
+#include "src/utils.h"
 
 namespace v8 {
 
@@ -25,6 +27,7 @@
 class ScriptData;
 class ParserTarget;
 class ParserTargetScope;
+class PreParsedScopeData;
 
 class FunctionEntry BASE_EMBEDDED {
  public:
@@ -33,9 +36,9 @@
     kEndPositionIndex,
     kNumParametersIndex,
     kFunctionLengthIndex,
-    kLiteralCountIndex,
     kPropertyCountIndex,
     kFlagsIndex,
+    kNumInnerFunctionsIndex,
     kSize
   };
 
@@ -65,7 +68,6 @@
   int end_pos() const { return backing_[kEndPositionIndex]; }
   int num_parameters() const { return backing_[kNumParametersIndex]; }
   int function_length() const { return backing_[kFunctionLengthIndex]; }
-  int literal_count() const { return backing_[kLiteralCountIndex]; }
   int property_count() const { return backing_[kPropertyCountIndex]; }
   LanguageMode language_mode() const {
     return LanguageModeField::decode(backing_[kFlagsIndex]);
@@ -79,6 +81,7 @@
   bool has_duplicate_parameters() const {
     return HasDuplicateParametersField::decode(backing_[kFlagsIndex]);
   }
+  int num_inner_functions() const { return backing_[kNumInnerFunctionsIndex]; }
 
   bool is_valid() const { return !backing_.is_empty(); }
 
@@ -135,7 +138,7 @@
 
 
 struct ParserFormalParameters : FormalParametersBase {
-  struct Parameter {
+  struct Parameter : public ZoneObject {
     Parameter(const AstRawString* name, Expression* pattern,
               Expression* initializer, int initializer_end_position,
               bool is_rest)
@@ -149,16 +152,23 @@
     Expression* initializer;
     int initializer_end_position;
     bool is_rest;
+    Parameter* next_parameter = nullptr;
     bool is_simple() const {
       return pattern->IsVariableProxy() && initializer == nullptr && !is_rest;
     }
+
+    bool is_nondestructuring_rest() const {
+      DCHECK_IMPLIES(is_rest, initializer == nullptr);
+      return is_rest && pattern->IsVariableProxy();
+    }
+
+    Parameter** next() { return &next_parameter; }
+    Parameter* const* next() const { return &next_parameter; }
   };
 
   explicit ParserFormalParameters(DeclarationScope* scope)
-      : FormalParametersBase(scope), params(4, scope->zone()) {}
-  ZoneList<Parameter> params;
-
-  const Parameter& at(int i) const { return params[i]; }
+      : FormalParametersBase(scope) {}
+  ThreadedList<Parameter> params;
 };
 
 template <>
@@ -203,11 +213,6 @@
 
   static bool const IsPreParser() { return false; }
 
-  // Parses the source code represented by the compilation info and sets its
-  // function literal.  Returns false (and deallocates any allocated AST
-  // nodes) if parsing failed.
-  static bool ParseStatic(ParseInfo* info);
-  bool Parse(ParseInfo* info);
   void ParseOnBackground(ParseInfo* info);
 
   // Deserialize the scope chain prior to parsing in which the script is going
@@ -221,14 +226,17 @@
   void DeserializeScopeChain(ParseInfo* info,
                              MaybeHandle<ScopeInfo> maybe_outer_scope_info);
 
-  // Handle errors detected during parsing, move statistics to Isolate,
-  // internalize strings (move them to the heap).
-  void Internalize(Isolate* isolate, Handle<Script> script, bool error);
+  // Handle errors detected during parsing
+  void ReportErrors(Isolate* isolate, Handle<Script> script);
+  // Move statistics to Isolate
+  void UpdateStatistics(Isolate* isolate, Handle<Script> script);
   void HandleSourceURLComments(Isolate* isolate, Handle<Script> script);
 
  private:
   friend class ParserBase<Parser>;
   friend class v8::internal::ExpressionClassifier<ParserTypes<Parser>>;
+  friend bool v8::internal::parsing::ParseProgram(ParseInfo*, bool);
+  friend bool v8::internal::parsing::ParseFunction(ParseInfo*, bool);
 
   bool AllowsLazyParsingWithoutUnresolvedVariables() const {
     return scope()->AllowsLazyParsingWithoutUnresolvedVariables(
@@ -262,7 +270,7 @@
     return scope()->NewTemporary(name);
   }
 
-  void PrepareGeneratorVariables(FunctionState* function_state);
+  void PrepareGeneratorVariables();
 
   // Limit the allowed number of local variables in a function. The hard limit
   // is that offsets computed by FullCodeGenerator::StackOperand and similar
@@ -290,12 +298,10 @@
     return compile_options_;
   }
   bool consume_cached_parse_data() const {
-    return allow_lazy() &&
-           compile_options_ == ScriptCompiler::kConsumeParserCache;
+    return compile_options_ == ScriptCompiler::kConsumeParserCache;
   }
   bool produce_cached_parse_data() const {
-    return allow_lazy() &&
-           compile_options_ == ScriptCompiler::kProduceParserCache;
+    return compile_options_ == ScriptCompiler::kProduceParserCache;
   }
 
   void ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
@@ -340,19 +346,29 @@
                                  Block* finally_block,
                                  const CatchInfo& catch_info, int pos);
 
+  void ParseAndRewriteGeneratorFunctionBody(int pos, FunctionKind kind,
+                                            ZoneList<Statement*>* body,
+                                            bool* ok);
+  void CreateFunctionNameAssignment(const AstRawString* function_name, int pos,
+                                    FunctionLiteral::FunctionType function_type,
+                                    DeclarationScope* function_scope,
+                                    ZoneList<Statement*>* result, int index);
+
   Statement* DeclareFunction(const AstRawString* variable_name,
-                             FunctionLiteral* function, int pos,
-                             bool is_generator, bool is_async,
+                             FunctionLiteral* function, VariableMode mode,
+                             int pos, bool is_sloppy_block_function,
                              ZoneList<const AstRawString*>* names, bool* ok);
   V8_INLINE Statement* DeclareClass(const AstRawString* variable_name,
                                     Expression* value,
                                     ZoneList<const AstRawString*>* names,
                                     int class_token_pos, int end_pos, bool* ok);
   V8_INLINE void DeclareClassVariable(const AstRawString* name,
-                                      Scope* block_scope, ClassInfo* class_info,
+                                      ClassInfo* class_info,
                                       int class_token_pos, bool* ok);
   V8_INLINE void DeclareClassProperty(const AstRawString* class_name,
                                       ClassLiteralProperty* property,
+                                      ClassLiteralProperty::Kind kind,
+                                      bool is_static, bool is_constructor,
                                       ClassInfo* class_info, bool* ok);
   V8_INLINE Expression* RewriteClassLiteral(const AstRawString* name,
                                             ClassInfo* class_info, int pos,
@@ -417,6 +433,7 @@
     PatternContext SetAssignmentContextIfNeeded(Expression* node);
     PatternContext SetInitializerContextIfNeeded(Expression* node);
 
+    bool DeclaresParameterContainingSloppyEval() const;
     void RewriteParameterScopes(Expression* expr);
 
     Variable* CreateTempVar(Expression* value = nullptr);
@@ -443,12 +460,15 @@
     DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
   };
 
-  // !%_IsJSReceiver(result = iterator.next()) &&
-  //     %ThrowIteratorResultNotAnObject(result)
+  // [if (IteratorType == kAsync)]
+  //     !%_IsJSReceiver(result = Await(iterator.next()) &&
+  //         %ThrowIteratorResultNotAnObject(result)
+  // [else]
+  //     !%_IsJSReceiver(result = iterator.next()) &&
+  //         %ThrowIteratorResultNotAnObject(result)
+  // [endif]
   Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
-                                      int pos);
-
-  Expression* GetIterator(Expression* iterable, int pos);
+                                      IteratorType type, int pos);
 
   // Initialize the components of a for-in / for-of statement.
   Statement* InitializeForEachStatement(ForEachStatement* stmt,
@@ -456,8 +476,9 @@
                                         Statement* body, int each_keyword_pos);
   Statement* InitializeForOfStatement(ForOfStatement* stmt, Expression* each,
                                       Expression* iterable, Statement* body,
-                                      bool finalize,
+                                      bool finalize, IteratorType type,
                                       int next_result_pos = kNoSourcePosition);
+
   Block* RewriteForVarInLegacy(const ForInfo& for_info);
   void DesugarBindingInForEachStatement(ForInfo* for_info, Block** body_block,
                                         Expression** each_variable, bool* ok);
@@ -476,11 +497,6 @@
       int function_token_position, FunctionLiteral::FunctionType type,
       LanguageMode language_mode, bool* ok);
 
-  Expression* InstallHomeObject(Expression* function_literal,
-                                Expression* home_object);
-  FunctionLiteral* SynthesizeClassFieldInitializer(int count);
-  FunctionLiteral* InsertClassFieldInitializer(FunctionLiteral* constructor);
-
   // Get odd-ball literals.
   Literal* GetLiteralUndefined(int position);
 
@@ -523,35 +539,30 @@
 
   // Factory methods.
   FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
-                                      bool requires_class_field_init, int pos,
-                                      int end_pos, LanguageMode language_mode);
+                                      int pos, int end_pos);
 
   // Skip over a lazy function, either using cached data if we have it, or
   // by parsing the function with PreParser. Consumes the ending }.
   // If may_abort == true, the (pre-)parser may decide to abort skipping
   // in order to force the function to be eagerly parsed, after all.
-  LazyParsingResult SkipFunction(
-      FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
-      int* function_length, bool* has_duplicate_parameters,
-      int* materialized_literal_count, int* expected_property_count,
-      bool is_inner_function, bool may_abort, bool* ok);
+  LazyParsingResult SkipFunction(FunctionKind kind,
+                                 DeclarationScope* function_scope,
+                                 int* num_parameters, int* function_length,
+                                 bool* has_duplicate_parameters,
+                                 int* expected_property_count,
+                                 bool is_inner_function, bool may_abort,
+                                 bool* ok);
 
   Block* BuildParameterInitializationBlock(
       const ParserFormalParameters& parameters, bool* ok);
-  Block* BuildRejectPromiseOnException(Block* block, bool* ok);
-
-  // Consumes the ending }.
-  ZoneList<Statement*>* ParseEagerFunctionBody(
-      const AstRawString* function_name, int pos,
-      const ParserFormalParameters& parameters, FunctionKind kind,
-      FunctionLiteral::FunctionType function_type, bool* ok);
+  Block* BuildRejectPromiseOnException(Block* block);
 
   ZoneList<Statement*>* ParseFunction(
       const AstRawString* function_name, int pos, FunctionKind kind,
       FunctionLiteral::FunctionType function_type,
       DeclarationScope* function_scope, int* num_parameters,
       int* function_length, bool* has_duplicate_parameters,
-      int* materialized_literal_count, int* expected_property_count, bool* ok);
+      int* expected_property_count, bool* ok);
 
   void ThrowPendingError(Isolate* isolate, Handle<Script> script);
 
@@ -587,7 +598,15 @@
   typedef TemplateLiteral* TemplateLiteralState;
 
   TemplateLiteralState OpenTemplateLiteral(int pos);
-  void AddTemplateSpan(TemplateLiteralState* state, bool tail);
+  // "should_cook" means that the span can be "cooked": in tagged template
+  // literals, both the raw and "cooked" representations are available to user
+  // code ("cooked" meaning that escape sequences are converted to their
+  // interpreted values). With the --harmony-template-escapes flag, invalid
+  // escape sequences cause the cooked span to be represented by undefined,
+  // instead of being a syntax error.
+  // "tail" indicates that this span is the last in the literal.
+  void AddTemplateSpan(TemplateLiteralState* state, bool should_cook,
+                       bool tail);
   void AddTemplateExpression(TemplateLiteralState* state,
                              Expression* expression);
   Expression* CloseTemplateLiteral(TemplateLiteralState* state, int start,
@@ -596,10 +615,9 @@
 
   ZoneList<Expression*>* PrepareSpreadArguments(ZoneList<Expression*>* list);
   Expression* SpreadCall(Expression* function, ZoneList<Expression*>* args,
-                         int pos);
+                         int pos, Call::PossiblyEval is_possibly_eval);
   Expression* SpreadCallNew(Expression* function, ZoneList<Expression*>* args,
                             int pos);
-  Expression* CallClassFieldInitializer(Scope* scope, Expression* this_expr);
   Expression* RewriteSuperCall(Expression* call_expression);
 
   void SetLanguageMode(Scope* scope, LanguageMode mode);
@@ -630,7 +648,7 @@
   void RewriteParameterInitializer(Expression* expr, Scope* scope);
 
   Expression* BuildInitialYield(int pos, FunctionKind kind);
-  Expression* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
+  Assignment* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
   Expression* BuildResolvePromise(Expression* value, int pos);
   Expression* BuildRejectPromise(Expression* value, int pos);
   Variable* PromiseVariable();
@@ -642,16 +660,18 @@
 
   void FinalizeIteratorUse(Scope* use_scope, Variable* completion,
                            Expression* condition, Variable* iter,
-                           Block* iterator_use, Block* result);
+                           Block* iterator_use, Block* result,
+                           IteratorType type);
 
   Statement* FinalizeForOfStatement(ForOfStatement* loop, Variable* completion,
-                                    int pos);
+                                    IteratorType type, int pos);
   void BuildIteratorClose(ZoneList<Statement*>* statements, Variable* iterator,
                           Variable* input, Variable* output);
   void BuildIteratorCloseForCompletion(Scope* scope,
                                        ZoneList<Statement*>* statements,
                                        Variable* iterator,
-                                       Expression* completion);
+                                       Expression* completion,
+                                       IteratorType type);
   Statement* CheckCallable(Variable* var, Expression* error, int pos);
 
   V8_INLINE Expression* RewriteAwaitExpression(Expression* value, int pos);
@@ -686,10 +706,6 @@
     return identifier == ast_value_factory()->undefined_string();
   }
 
-  V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const {
-    return scanner()->IdentifierIsFutureStrictReserved(identifier);
-  }
-
   // Returns true if the expression is of type "this.foo".
   V8_INLINE static bool IsThisProperty(Expression* expression) {
     DCHECK(expression != NULL);
@@ -724,10 +740,8 @@
     return identifier == ast_value_factory()->constructor_string();
   }
 
-  V8_INLINE bool IsDirectEvalCall(Expression* expression) const {
-    if (!expression->IsCall()) return false;
-    expression = expression->AsCall()->expression();
-    return IsIdentifier(expression) && IsEval(AsIdentifier(expression));
+  V8_INLINE bool IsName(const AstRawString* identifier) const {
+    return identifier == ast_value_factory()->name_string();
   }
 
   V8_INLINE static bool IsBoilerplateProperty(
@@ -826,12 +840,11 @@
 
   // Determine if the expression is a variable proxy and mark it as being used
   // in an assignment or with a increment/decrement operator.
-  V8_INLINE static Expression* MarkExpressionAsAssigned(
-      Expression* expression) {
-    VariableProxy* proxy =
-        expression != NULL ? expression->AsVariableProxy() : NULL;
-    if (proxy != NULL) proxy->set_is_assigned();
-    return expression;
+  V8_INLINE static void MarkExpressionAsAssigned(Expression* expression) {
+    DCHECK_NOT_NULL(expression);
+    if (expression->IsVariableProxy()) {
+      expression->AsVariableProxy()->set_is_assigned();
+    }
   }
 
   // Returns true if we have a binary expression between two numeric
@@ -1038,8 +1051,7 @@
     auto* init_block = BuildParameterInitializationBlock(parameters, ok);
     if (!*ok) return;
     if (is_async) {
-      init_block = BuildRejectPromiseOnException(init_block, ok);
-      if (!*ok) return;
+      init_block = BuildRejectPromiseOnException(init_block);
     }
     if (init_block != nullptr) body->Add(init_block, zone());
   }
@@ -1050,38 +1062,35 @@
                                     int initializer_end_position,
                                     bool is_rest) {
     parameters->UpdateArityAndFunctionLength(initializer != nullptr, is_rest);
-    bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
-    const AstRawString* name = is_simple
+    bool has_simple_name = pattern->IsVariableProxy() && initializer == nullptr;
+    const AstRawString* name = has_simple_name
                                    ? pattern->AsVariableProxy()->raw_name()
                                    : ast_value_factory()->empty_string();
-    parameters->params.Add(
-        ParserFormalParameters::Parameter(name, pattern, initializer,
-                                          initializer_end_position, is_rest),
-        parameters->scope->zone());
+    auto parameter =
+        new (parameters->scope->zone()) ParserFormalParameters::Parameter(
+            name, pattern, initializer, initializer_end_position, is_rest);
+
+    parameters->params.Add(parameter);
   }
 
-  V8_INLINE void DeclareFormalParameter(
+  V8_INLINE void DeclareFormalParameters(
       DeclarationScope* scope,
-      const ParserFormalParameters::Parameter& parameter) {
-    bool is_duplicate = false;
+      const ThreadedList<ParserFormalParameters::Parameter>& parameters) {
     bool is_simple = classifier()->is_simple_parameter_list();
-    auto name = is_simple || parameter.is_rest
-                    ? parameter.name
-                    : ast_value_factory()->empty_string();
-    auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
     if (!is_simple) scope->SetHasNonSimpleParameters();
-    bool is_optional = parameter.initializer != nullptr;
-    Variable* var =
-        scope->DeclareParameter(name, mode, is_optional, parameter.is_rest,
-                                &is_duplicate, ast_value_factory());
-    if (is_duplicate) {
-      classifier()->RecordDuplicateFormalParameterError(scanner()->location());
-    }
-    if (is_sloppy(scope->language_mode())) {
-      // TODO(sigurds) Mark every parameter as maybe assigned. This is a
-      // conservative approximation necessary to account for parameters
-      // that are assigned via the arguments array.
-      var->set_maybe_assigned();
+    for (auto parameter : parameters) {
+      bool is_duplicate = false;
+      bool use_name = is_simple || parameter->is_nondestructuring_rest();
+      bool is_optional = parameter->initializer != nullptr;
+      scope->DeclareParameter(
+          use_name ? parameter->name : ast_value_factory()->empty_string(),
+          use_name ? VAR : TEMPORARY, is_optional, parameter->is_rest,
+          &is_duplicate, ast_value_factory());
+      if (is_duplicate &&
+          classifier()->is_valid_formal_parameter_list_without_duplicates()) {
+        classifier()->RecordDuplicateFormalParameterError(
+            scanner()->location());
+      }
     }
   }
 
@@ -1091,15 +1100,11 @@
                                             Scanner::Location* duplicate_loc,
                                             bool* ok);
 
-  void ReindexLiterals(const ParserFormalParameters& parameters);
-
   V8_INLINE Expression* NoTemplateTag() { return NULL; }
   V8_INLINE static bool IsTaggedTemplate(const Expression* tag) {
     return tag != NULL;
   }
 
-  V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {}
-
   Expression* ExpressionListToExpression(ZoneList<Expression*>* args);
 
   void AddAccessorPrefixToFunctionName(bool is_get, FunctionLiteral* function,
@@ -1124,6 +1129,12 @@
     ++use_counts_[feature];
   }
 
+  // Returns true iff we're parsing the first function literal during
+  // CreateDynamicFunction().
+  V8_INLINE bool ParsingDynamicFunctionDeclaration() const {
+    return parameters_end_pos_ != kNoSourcePosition;
+  }
+
   // Parser's private field members.
   friend class DiscardableZoneScope;  // Uses reusable_preparser_.
   // FIXME(marja): Make reusable_preparser_ always use its own temp Zone (call
@@ -1147,8 +1158,17 @@
   // parsing.
   int use_counts_[v8::Isolate::kUseCounterFeatureCount];
   int total_preparse_skipped_;
-  bool parsing_on_main_thread_;
+  bool allow_lazy_;
+  bool temp_zoned_;
   ParserLogger* log_;
+
+  PreParsedScopeData* preparsed_scope_data_;
+
+  // If not kNoSourcePosition, indicates that the first function literal
+  // encountered is a dynamic function, see CreateDynamicFunction(). This field
+  // indicates the correct position of the ')' that closes the parameter list.
+  // After that ')' is encountered, this field is reset to kNoSourcePosition.
+  int parameters_end_pos_;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/src/parsing/parsing.cc b/src/parsing/parsing.cc
new file mode 100644
index 0000000..ede13ac
--- /dev/null
+++ b/src/parsing/parsing.cc
@@ -0,0 +1,74 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/parsing.h"
+
+#include <memory>
+
+#include "src/ast/ast.h"
+#include "src/objects-inl.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parser.h"
+
+namespace v8 {
+namespace internal {
+namespace parsing {
+
+bool ParseProgram(ParseInfo* info, bool internalize) {
+  DCHECK(info->is_toplevel());
+  DCHECK_NULL(info->literal());
+
+  Parser parser(info);
+
+  FunctionLiteral* result = nullptr;
+  // Ok to use Isolate here; this function is only called in the main thread.
+  DCHECK(parser.parsing_on_main_thread_);
+  Isolate* isolate = info->isolate();
+
+  parser.SetCachedData(info);
+  result = parser.ParseProgram(isolate, info);
+  info->set_literal(result);
+  if (result == nullptr) {
+    parser.ReportErrors(isolate, info->script());
+  } else {
+    info->set_language_mode(info->literal()->language_mode());
+  }
+  parser.UpdateStatistics(isolate, info->script());
+  if (internalize) {
+    info->ast_value_factory()->Internalize(isolate);
+  }
+  return (result != nullptr);
+}
+
+bool ParseFunction(ParseInfo* info, bool internalize) {
+  DCHECK(!info->is_toplevel());
+  DCHECK_NULL(info->literal());
+
+  Parser parser(info);
+
+  FunctionLiteral* result = nullptr;
+  // Ok to use Isolate here; this function is only called in the main thread.
+  DCHECK(parser.parsing_on_main_thread_);
+  Isolate* isolate = info->isolate();
+
+  result = parser.ParseFunction(isolate, info);
+  info->set_literal(result);
+  if (result == nullptr) {
+    parser.ReportErrors(isolate, info->script());
+  }
+  parser.UpdateStatistics(isolate, info->script());
+  if (internalize) {
+    info->ast_value_factory()->Internalize(isolate);
+  }
+  return (result != nullptr);
+}
+
+bool ParseAny(ParseInfo* info, bool internalize) {
+  return info->is_toplevel() ? ParseProgram(info, internalize)
+                             : ParseFunction(info, internalize);
+}
+
+}  // namespace parsing
+}  // namespace internal
+}  // namespace v8
diff --git a/src/parsing/parsing.h b/src/parsing/parsing.h
new file mode 100644
index 0000000..3902377
--- /dev/null
+++ b/src/parsing/parsing.h
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PARSING_H_
+#define V8_PARSING_PARSING_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class ParseInfo;
+
+namespace parsing {
+
+// Parses the top-level source code represented by the parse info and sets its
+// function literal.  Returns false (and deallocates any allocated AST
+// nodes) if parsing failed. Internalizes AST nodes on the heap if
+// |internalize|.
+V8_EXPORT_PRIVATE bool ParseProgram(ParseInfo* info, bool internalize = true);
+
+// Like ParseProgram but for an individual function. Internalizes AST nodes on
+// the heap if |internalize|.
+V8_EXPORT_PRIVATE bool ParseFunction(ParseInfo* info, bool internalize = true);
+
+// If you don't know whether info->is_toplevel() is true or not, use this method
+// to dispatch to either of the above functions. Prefer to use the above methods
+// whenever possible. Internalizes AST nodes on the heap if |internalize|.
+V8_EXPORT_PRIVATE bool ParseAny(ParseInfo* info, bool internalize = true);
+
+}  // namespace parsing
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PARSING_PARSING_H_
diff --git a/src/parsing/pattern-rewriter.cc b/src/parsing/pattern-rewriter.cc
index f3d9bb0..b4312a2 100644
--- a/src/parsing/pattern-rewriter.cc
+++ b/src/parsing/pattern-rewriter.cc
@@ -4,6 +4,7 @@
 
 #include "src/ast/ast.h"
 #include "src/messages.h"
+#include "src/objects-inl.h"
 #include "src/parsing/parameter-initializer-rewriter.h"
 #include "src/parsing/parser.h"
 
@@ -136,22 +137,31 @@
       factory()->NewVariableProxy(name, NORMAL_VARIABLE, pattern->position());
   Declaration* declaration = factory()->NewVariableDeclaration(
       proxy, descriptor_->scope, descriptor_->declaration_pos);
+
+  // When an extra declaration scope needs to be inserted to account for
+  // a sloppy eval in a default parameter or function body, the parameter
+  // needs to be declared in the function's scope, not in the varblock
+  // scope which will be used for the initializer expression.
+  Scope* outer_function_scope = nullptr;
+  if (DeclaresParameterContainingSloppyEval()) {
+    outer_function_scope = descriptor_->scope->outer_scope();
+  }
   Variable* var = parser_->Declare(
       declaration, descriptor_->declaration_kind, descriptor_->mode,
       Variable::DefaultInitializationFlag(descriptor_->mode), ok_,
-      descriptor_->hoist_scope);
+      outer_function_scope);
   if (!*ok_) return;
   DCHECK_NOT_NULL(var);
   DCHECK(proxy->is_resolved());
   DCHECK(initializer_position_ != kNoSourcePosition);
   var->set_initializer_position(initializer_position_);
 
-  // TODO(adamk): This should probably be checking hoist_scope.
-  // Move it to Parser::Declare() to make it easier to test
-  // the right scope.
-  Scope* declaration_scope = IsLexicalVariableMode(descriptor_->mode)
-                                 ? descriptor_->scope
-                                 : descriptor_->scope->GetDeclarationScope();
+  Scope* declaration_scope =
+      outer_function_scope != nullptr
+          ? outer_function_scope
+          : (IsLexicalVariableMode(descriptor_->mode)
+                 ? descriptor_->scope
+                 : descriptor_->scope->GetDeclarationScope());
   if (declaration_scope->num_var() > kMaxNumFunctionLocals) {
     parser_->ReportMessage(MessageTemplate::kTooManyVariables);
     *ok_ = false;
@@ -164,6 +174,9 @@
   // If there's no initializer, we're done.
   if (value == nullptr) return;
 
+  Scope* var_init_scope = descriptor_->scope;
+  MarkLoopVariableAsAssigned(var_init_scope, proxy->var());
+
   // A declaration of the form:
   //
   //    var v = x;
@@ -176,7 +189,6 @@
   // 'v' than the 'v' in the declaration (e.g., if we are inside a
   // 'with' statement or 'catch' block). Global var declarations
   // also need special treatment.
-  Scope* var_init_scope = descriptor_->scope;
 
   if (descriptor_->mode == VAR && var_init_scope->is_script_scope()) {
     // Global variable declarations must be compiled in a specific
@@ -307,7 +319,24 @@
     block_->statements()->Add(factory()->NewExpressionStatement(expr, pos),
                               zone());
   }
-  return set_context(old_context);
+  set_context(old_context);
+}
+
+bool Parser::PatternRewriter::DeclaresParameterContainingSloppyEval() const {
+  // Need to check for a binding context to make sure we have a descriptor.
+  if (IsBindingContext() &&
+      // Only relevant for parameters.
+      descriptor_->declaration_kind == DeclarationDescriptor::PARAMETER &&
+      // And only when scope is a block scope;
+      // without eval, it is a function scope.
+      scope()->is_block_scope()) {
+    DCHECK(scope()->calls_sloppy_eval());
+    DCHECK(scope()->is_declaration_scope());
+    DCHECK(scope()->outer_scope()->is_function_scope());
+    return true;
+  }
+
+  return false;
 }
 
 // When an extra declaration scope needs to be inserted to account for
@@ -315,34 +344,76 @@
 // needs to be in that new inner scope which was added after initial
 // parsing.
 void Parser::PatternRewriter::RewriteParameterScopes(Expression* expr) {
-  if (!IsBindingContext()) return;
-  if (descriptor_->declaration_kind != DeclarationDescriptor::PARAMETER) return;
-  if (!scope()->is_block_scope()) return;
-
-  DCHECK(scope()->is_declaration_scope());
-  DCHECK(scope()->outer_scope()->is_function_scope());
-  DCHECK(scope()->calls_sloppy_eval());
-
-  ReparentParameterExpressionScope(parser_->stack_limit(), expr, scope());
+  if (DeclaresParameterContainingSloppyEval()) {
+    ReparentParameterExpressionScope(parser_->stack_limit(), expr, scope());
+  }
 }
 
 void Parser::PatternRewriter::VisitObjectLiteral(ObjectLiteral* pattern,
                                                  Variable** temp_var) {
   auto temp = *temp_var = CreateTempVar(current_value_);
 
+  ZoneList<Expression*>* rest_runtime_callargs = nullptr;
+  if (pattern->has_rest_property()) {
+    // non_rest_properties_count = pattern->properties()->length - 1;
+    // args_length = 1 + non_rest_properties_count because we need to
+    // pass temp as well to the runtime function.
+    int args_length = pattern->properties()->length();
+    rest_runtime_callargs =
+        new (zone()) ZoneList<Expression*>(args_length, zone());
+    rest_runtime_callargs->Add(factory()->NewVariableProxy(temp), zone());
+  }
+
   block_->statements()->Add(parser_->BuildAssertIsCoercible(temp), zone());
 
   for (ObjectLiteralProperty* property : *pattern->properties()) {
     PatternContext context = SetInitializerContextIfNeeded(property->value());
+    Expression* value;
 
-    // Computed property names contain expressions which might require
-    // scope rewriting.
-    if (!property->key()->IsLiteral()) RewriteParameterScopes(property->key());
+    if (property->kind() == ObjectLiteralProperty::Kind::SPREAD) {
+      // var { y, [x++]: a, ...c } = temp
+      //     becomes
+      // var y = temp.y;
+      // var temp1 = %ToName(x++);
+      // var a = temp[temp1];
+      // var c;
+      // c = %CopyDataPropertiesWithExcludedProperties(temp, "y", temp1);
+      value = factory()->NewCallRuntime(
+          Runtime::kCopyDataPropertiesWithExcludedProperties,
+          rest_runtime_callargs, kNoSourcePosition);
+    } else {
+      Expression* key = property->key();
 
-    RecurseIntoSubpattern(
-        property->value(),
-        factory()->NewProperty(factory()->NewVariableProxy(temp),
-                               property->key(), kNoSourcePosition));
+      if (!key->IsLiteral()) {
+        // Computed property names contain expressions which might require
+        // scope rewriting.
+        RewriteParameterScopes(key);
+      }
+
+      if (pattern->has_rest_property()) {
+        Expression* excluded_property = key;
+
+        if (property->is_computed_name()) {
+          DCHECK(!key->IsPropertyName() || !key->IsNumberLiteral());
+          auto args = new (zone()) ZoneList<Expression*>(1, zone());
+          args->Add(key, zone());
+          auto to_name_key = CreateTempVar(factory()->NewCallRuntime(
+              Runtime::kToName, args, kNoSourcePosition));
+          key = factory()->NewVariableProxy(to_name_key);
+          excluded_property = factory()->NewVariableProxy(to_name_key);
+        } else {
+          DCHECK(key->IsPropertyName() || key->IsNumberLiteral());
+        }
+
+        DCHECK(rest_runtime_callargs != nullptr);
+        rest_runtime_callargs->Add(excluded_property, zone());
+      }
+
+      value = factory()->NewProperty(factory()->NewVariableProxy(temp), key,
+                                     kNoSourcePosition);
+    }
+
+    RecurseIntoSubpattern(property->value(), value);
     set_context(context);
   }
 }
@@ -359,8 +430,9 @@
   DCHECK(block_->ignore_completion_value());
 
   auto temp = *temp_var = CreateTempVar(current_value_);
-  auto iterator = CreateTempVar(parser_->GetIterator(
-      factory()->NewVariableProxy(temp), kNoSourcePosition));
+  auto iterator = CreateTempVar(
+      factory()->NewGetIterator(factory()->NewVariableProxy(temp),
+                                IteratorType::kNormal, kNoSourcePosition));
   auto done =
       CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
   auto result = CreateTempVar();
@@ -446,7 +518,7 @@
           factory()->NewExpressionStatement(
               parser_->BuildIteratorNextResult(
                   factory()->NewVariableProxy(iterator), result,
-                  kNoSourcePosition),
+                  IteratorType::kNormal, kNoSourcePosition),
               kNoSourcePosition),
           zone());
       next_block->statements()->Add(inner_if, zone());
@@ -503,11 +575,8 @@
     Variable* array;
     {
       auto empty_exprs = new (zone()) ZoneList<Expression*>(0, zone());
-      array = CreateTempVar(factory()->NewArrayLiteral(
-          empty_exprs,
-          // Reuse pattern's literal index - it is unused since there is no
-          // actual literal allocated.
-          node->literal_index(), kNoSourcePosition));
+      array = CreateTempVar(
+          factory()->NewArrayLiteral(empty_exprs, kNoSourcePosition));
     }
 
     // done = true;
@@ -520,7 +589,7 @@
     // result = IteratorNext(iterator);
     Statement* get_next = factory()->NewExpressionStatement(
         parser_->BuildIteratorNextResult(factory()->NewVariableProxy(iterator),
-                                         result, nopos),
+                                         result, IteratorType::kNormal, nopos),
         nopos);
 
     // %AppendElement(array, result.value);
@@ -589,7 +658,7 @@
       Token::NOT, factory()->NewVariableProxy(done), nopos);
 
   parser_->FinalizeIteratorUse(scope(), completion, closing_condition, iterator,
-                               block_, target);
+                               block_, target, IteratorType::kNormal);
   block_ = target;
 }
 
@@ -673,6 +742,7 @@
 NOT_A_PATTERN(ForStatement)
 NOT_A_PATTERN(FunctionDeclaration)
 NOT_A_PATTERN(FunctionLiteral)
+NOT_A_PATTERN(GetIterator)
 NOT_A_PATTERN(IfStatement)
 NOT_A_PATTERN(Literal)
 NOT_A_PATTERN(NativeFunctionLiteral)
diff --git a/src/parsing/preparse-data-format.h b/src/parsing/preparse-data-format.h
index 30d1d75..32e9a23 100644
--- a/src/parsing/preparse-data-format.h
+++ b/src/parsing/preparse-data-format.h
@@ -14,7 +14,7 @@
  public:
   // Layout and constants of the preparse data exchange format.
   static const unsigned kMagicNumber = 0xBadDead;
-  static const unsigned kCurrentVersion = 13;
+  static const unsigned kCurrentVersion = 14;
 
   static const int kMagicOffset = 0;
   static const int kVersionOffset = 1;
diff --git a/src/parsing/preparse-data.cc b/src/parsing/preparse-data.cc
index e9a4e8f..da90f2f 100644
--- a/src/parsing/preparse-data.cc
+++ b/src/parsing/preparse-data.cc
@@ -6,6 +6,7 @@
 #include "src/base/hashmap.h"
 #include "src/base/logging.h"
 #include "src/globals.h"
+#include "src/objects-inl.h"
 #include "src/parsing/parser.h"
 #include "src/parsing/preparse-data-format.h"
 
@@ -14,18 +15,19 @@
 
 void ParserLogger::LogFunction(int start, int end, int num_parameters,
                                int function_length,
-                               bool has_duplicate_parameters, int literals,
-                               int properties, LanguageMode language_mode,
-                               bool uses_super_property, bool calls_eval) {
+                               bool has_duplicate_parameters, int properties,
+                               LanguageMode language_mode,
+                               bool uses_super_property, bool calls_eval,
+                               int num_inner_functions) {
   function_store_.Add(start);
   function_store_.Add(end);
   function_store_.Add(num_parameters);
   function_store_.Add(function_length);
-  function_store_.Add(literals);
   function_store_.Add(properties);
   function_store_.Add(
       FunctionEntry::EncodeFlags(language_mode, uses_super_property, calls_eval,
                                  has_duplicate_parameters));
+  function_store_.Add(num_inner_functions);
 }
 
 ParserLogger::ParserLogger() {
diff --git a/src/parsing/preparse-data.h b/src/parsing/preparse-data.h
index 767484a..eb38475 100644
--- a/src/parsing/preparse-data.h
+++ b/src/parsing/preparse-data.h
@@ -52,17 +52,18 @@
       : end_(-1),
         num_parameters_(-1),
         function_length_(-1),
-        has_duplicate_parameters_(false) {}
+        has_duplicate_parameters_(false),
+        num_inner_functions_(-1) {}
 
   void LogFunction(int end, int num_parameters, int function_length,
-                   bool has_duplicate_parameters, int literals,
-                   int properties) {
+                   bool has_duplicate_parameters, int properties,
+                   int num_inner_functions) {
     end_ = end;
     num_parameters_ = num_parameters;
     function_length_ = function_length;
     has_duplicate_parameters_ = has_duplicate_parameters;
-    literals_ = literals;
     properties_ = properties;
+    num_inner_functions_ = num_inner_functions;
   }
 
   int end() const { return end_; }
@@ -75,12 +76,10 @@
   bool has_duplicate_parameters() const {
     return has_duplicate_parameters_;
   }
-  int literals() const {
-    return literals_;
-  }
   int properties() const {
     return properties_;
   }
+  int num_inner_functions() const { return num_inner_functions_; }
 
  private:
   int end_;
@@ -88,8 +87,8 @@
   int num_parameters_;
   int function_length_;
   bool has_duplicate_parameters_;
-  int literals_;
   int properties_;
+  int num_inner_functions_;
 };
 
 class ParserLogger final {
@@ -97,9 +96,9 @@
   ParserLogger();
 
   void LogFunction(int start, int end, int num_parameters, int function_length,
-                   bool has_duplicate_parameters, int literals, int properties,
+                   bool has_duplicate_parameters, int properties,
                    LanguageMode language_mode, bool uses_super_property,
-                   bool calls_eval);
+                   bool calls_eval, int num_inner_functions);
 
   ScriptData* GetScriptData();
 
diff --git a/src/parsing/preparsed-scope-data.cc b/src/parsing/preparsed-scope-data.cc
new file mode 100644
index 0000000..d1d497c
--- /dev/null
+++ b/src/parsing/preparsed-scope-data.cc
@@ -0,0 +1,86 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/preparsed-scope-data.h"
+
+#include "src/ast/scopes.h"
+#include "src/ast/variables.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool PreParsedScopeData::HasVariablesWhichNeedAllocationData(Scope* scope) {
+  if (!scope->is_hidden()) {
+    for (Variable* var : *scope->locals()) {
+      if (var->mode() == VAR || var->mode() == LET || var->mode() == CONST) {
+        return true;
+      }
+    }
+  }
+  for (Scope* inner = scope->inner_scope(); inner != nullptr;
+       inner = inner->sibling()) {
+    if (HasVariablesWhichNeedAllocationData(inner)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+PreParsedScopeData::ScopeScope::ScopeScope(PreParsedScopeData* data,
+                                           ScopeType scope_type,
+                                           int start_position, int end_position)
+    : data_(data), previous_scope_(data->current_scope_) {
+  data->current_scope_ = this;
+  data->backing_store_.push_back(scope_type);
+  data->backing_store_.push_back(start_position);
+  data->backing_store_.push_back(end_position);
+  // Reserve space for variable and inner scope count (we don't know yet how
+  // many will be added).
+  index_in_data_ = data->backing_store_.size();
+  data->backing_store_.push_back(-1);
+  data->backing_store_.push_back(-1);
+}
+
+PreParsedScopeData::ScopeScope::~ScopeScope() {
+  data_->current_scope_ = previous_scope_;
+  if (got_data_) {
+    DCHECK_GT(variable_count_ + inner_scope_count_, 0);
+    if (previous_scope_ != nullptr) {
+      previous_scope_->got_data_ = true;
+      ++previous_scope_->inner_scope_count_;
+    }
+    data_->backing_store_[index_in_data_] = inner_scope_count_;
+    data_->backing_store_[index_in_data_ + 1] = variable_count_;
+  } else {
+    // No interesting data for this scope (or its children); remove from the
+    // data.
+    DCHECK_EQ(data_->backing_store_.size(), index_in_data_ + 2);
+    DCHECK_GE(index_in_data_, 3);
+    DCHECK_EQ(variable_count_, 0);
+    data_->backing_store_.erase(
+        data_->backing_store_.begin() + index_in_data_ - 3,
+        data_->backing_store_.end());
+  }
+}
+
+void PreParsedScopeData::ScopeScope::MaybeAddVariable(Variable* var) {
+  if (var->mode() == VAR || var->mode() == LET || var->mode() == CONST) {
+#ifdef DEBUG
+    // For tests (which check that the data is about the same variables).
+    const AstRawString* name = var->raw_name();
+    data_->backing_store_.push_back(name->length());
+    for (int i = 0; i < name->length(); ++i) {
+      data_->backing_store_.push_back(name->raw_data()[i]);
+    }
+#endif
+    data_->backing_store_.push_back(var->location());
+    data_->backing_store_.push_back(var->maybe_assigned());
+    ++variable_count_;
+    got_data_ = true;
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/parsing/preparsed-scope-data.h b/src/parsing/preparsed-scope-data.h
new file mode 100644
index 0000000..72d1a71
--- /dev/null
+++ b/src/parsing/preparsed-scope-data.h
@@ -0,0 +1,57 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PREPARSED_SCOPE_DATA_H_
+#define V8_PARSING_PREPARSED_SCOPE_DATA_H_
+
+#include <vector>
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class PreParsedScopeData {
+ public:
+  PreParsedScopeData() {}
+  ~PreParsedScopeData() {}
+
+  // Whether the scope has variables whose context allocation or
+  // maybeassignedness we need to decide based on preparsed scope data.
+  static bool HasVariablesWhichNeedAllocationData(Scope* scope);
+
+  class ScopeScope {
+   public:
+    ScopeScope(PreParsedScopeData* data, ScopeType scope_type,
+               int start_position, int end_position);
+    ~ScopeScope();
+
+    void MaybeAddVariable(Variable* var);
+
+   private:
+    PreParsedScopeData* data_;
+    size_t index_in_data_;
+    ScopeScope* previous_scope_;
+
+    int inner_scope_count_ = 0;
+    int variable_count_ = 0;
+    bool got_data_ = false;
+    DISALLOW_COPY_AND_ASSIGN(ScopeScope);
+  };
+
+ private:
+  friend class ScopeTestHelper;
+
+  // TODO(marja): Make the backing store more efficient once we know exactly
+  // what data is needed.
+  std::vector<int> backing_store_;
+  ScopeScope* current_scope_ = nullptr;
+
+  DISALLOW_COPY_AND_ASSIGN(PreParsedScopeData);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PARSING_PREPARSED_SCOPE_DATA_H_
diff --git a/src/parsing/preparser.cc b/src/parsing/preparser.cc
index 1b21c3d..3ed9a4d 100644
--- a/src/parsing/preparser.cc
+++ b/src/parsing/preparser.cc
@@ -67,6 +67,8 @@
         return PreParserIdentifier::Prototype();
       if (scanner->LiteralMatches("constructor", 11))
         return PreParserIdentifier::Constructor();
+      if (scanner->LiteralMatches("name", 4))
+        return PreParserIdentifier::Name();
       return PreParserIdentifier::Default();
   }
 }
@@ -86,29 +88,33 @@
 PreParser::PreParseResult PreParser::PreParseFunction(
     FunctionKind kind, DeclarationScope* function_scope, bool parsing_module,
     bool is_inner_function, bool may_abort, int* use_counts) {
-  RuntimeCallTimerScope runtime_timer(
-      runtime_call_stats_,
-      track_unresolved_variables_
-          ? &RuntimeCallStats::PreParseWithVariableResolution
-          : &RuntimeCallStats::PreParseNoVariableResolution);
   DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
   parsing_module_ = parsing_module;
   use_counts_ = use_counts;
   DCHECK(!track_unresolved_variables_);
   track_unresolved_variables_ = is_inner_function;
+#ifdef DEBUG
+  function_scope->set_is_being_lazily_parsed(true);
+#endif
+
+  // In the preparser, we use the function literal ids to count how many
+  // FunctionLiterals were encountered. The PreParser doesn't actually persist
+  // FunctionLiterals, so there IDs don't matter.
+  ResetFunctionLiteralId();
 
   // The caller passes the function_scope which is not yet inserted into the
-  // scope_state_. All scopes above the function_scope are ignored by the
+  // scope stack. All scopes above the function_scope are ignored by the
   // PreParser.
-  DCHECK_NULL(scope_state_);
-  FunctionState function_state(&function_state_, &scope_state_, function_scope);
+  DCHECK_NULL(function_state_);
+  DCHECK_NULL(scope_);
+  FunctionState function_state(&function_state_, &scope_, function_scope);
   // This indirection is needed so that we can use the CHECK_OK macros.
   bool ok_holder = true;
   bool* ok = &ok_holder;
 
   PreParserFormalParameters formals(function_scope);
   bool has_duplicate_parameters = false;
-  DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+  DuplicateFinder duplicate_finder;
   std::unique_ptr<ExpressionClassifier> formals_classifier;
 
   // Parse non-arrow function parameters. For arrow functions, the parameters
@@ -129,10 +135,46 @@
   }
 
   Expect(Token::LBRACE, CHECK_OK_VALUE(kPreParseSuccess));
-  LazyParsingResult result = ParseStatementListAndLogFunction(
-      &formals, has_duplicate_parameters, may_abort, ok);
+  DeclarationScope* inner_scope = function_scope;
+  LazyParsingResult result;
+
+  if (!formals.is_simple) {
+    inner_scope = NewVarblockScope();
+    inner_scope->set_start_position(scanner()->location().beg_pos);
+  }
+
+  {
+    BlockState block_state(&scope_, inner_scope);
+    result = ParseStatementListAndLogFunction(
+        &formals, has_duplicate_parameters, may_abort, ok);
+  }
+
+  if (!formals.is_simple) {
+    BuildParameterInitializationBlock(formals, ok);
+
+    if (is_sloppy(inner_scope->language_mode())) {
+      inner_scope->HoistSloppyBlockFunctions(nullptr);
+    }
+
+    SetLanguageMode(function_scope, inner_scope->language_mode());
+    inner_scope->set_end_position(scanner()->peek_location().end_pos);
+    inner_scope->FinalizeBlockScope();
+  } else {
+    if (is_sloppy(function_scope->language_mode())) {
+      function_scope->HoistSloppyBlockFunctions(nullptr);
+    }
+  }
+
+  if (!IsArrowFunction(kind) && track_unresolved_variables_) {
+    // Declare arguments after parsing the function since lexical 'arguments'
+    // masks the arguments object. Declare arguments before declaring the
+    // function var since the arguments object masks 'function arguments'.
+    function_scope->DeclareArguments(ast_value_factory());
+  }
+
   use_counts_ = nullptr;
   track_unresolved_variables_ = false;
+
   if (result == kLazyParsingAborted) {
     return kPreParseAbort;
   } else if (stack_overflow()) {
@@ -156,8 +198,6 @@
     if (is_strict(function_scope->language_mode())) {
       int end_pos = scanner()->location().end_pos;
       CheckStrictOctalLiteral(function_scope->start_position(), end_pos, ok);
-      CheckDecimalLiteralWithLeadingZero(function_scope->start_position(),
-                                         end_pos);
     }
   }
   return kPreParseSuccess;
@@ -184,19 +224,21 @@
     LanguageMode language_mode, bool* ok) {
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
+  const RuntimeCallStats::CounterId counters[2][2] = {
+      {&RuntimeCallStats::PreParseBackgroundNoVariableResolution,
+       &RuntimeCallStats::PreParseNoVariableResolution},
+      {&RuntimeCallStats::PreParseBackgroundWithVariableResolution,
+       &RuntimeCallStats::PreParseWithVariableResolution}};
   RuntimeCallTimerScope runtime_timer(
       runtime_call_stats_,
-      track_unresolved_variables_
-          ? &RuntimeCallStats::PreParseWithVariableResolution
-          : &RuntimeCallStats::PreParseNoVariableResolution);
+      counters[track_unresolved_variables_][parsing_on_main_thread_]);
 
-  // Parse function body.
-  PreParserStatementList body;
   DeclarationScope* function_scope = NewFunctionScope(kind);
   function_scope->SetLanguageMode(language_mode);
-  FunctionState function_state(&function_state_, &scope_state_, function_scope);
-  DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+  FunctionState function_state(&function_state_, &scope_, function_scope);
+  DuplicateFinder duplicate_finder;
   ExpressionClassifier formals_classifier(this, &duplicate_finder);
+  GetNextFunctionLiteralId();
 
   Expect(Token::LPAREN, CHECK_OK);
   int start_position = scanner()->location().beg_pos;
@@ -210,12 +252,21 @@
                          formals_end_position, CHECK_OK);
 
   Expect(Token::LBRACE, CHECK_OK);
-  ParseStatementList(body, Token::RBRACE, CHECK_OK);
-  Expect(Token::RBRACE, CHECK_OK);
+
+  // Parse function body.
+  PreParserStatementList body;
+  int pos = function_token_pos == kNoSourcePosition ? peek_position()
+                                                    : function_token_pos;
+  ParseFunctionBody(body, function_name, pos, formals, kind, function_type,
+                    CHECK_OK);
 
   // Parsing the body may change the language mode in our scope.
   language_mode = function_scope->language_mode();
 
+  if (is_sloppy(language_mode)) {
+    function_scope->HoistSloppyBlockFunctions(nullptr);
+  }
+
   // Validate name and parameter names. We can do this only after parsing the
   // function, since the function can declare itself strict.
   CheckFunctionName(language_mode, function_name, function_name_validity,
@@ -227,9 +278,7 @@
   int end_position = scanner()->location().end_pos;
   if (is_strict(language_mode)) {
     CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
-    CheckDecimalLiteralWithLeadingZero(start_position, end_position);
   }
-  function_scope->set_end_position(end_position);
 
   if (FLAG_trace_preparse) {
     PrintF("  [%s]: %i-%i\n",
@@ -252,26 +301,27 @@
   // Position right after terminal '}'.
   DCHECK_EQ(Token::RBRACE, scanner()->peek());
   int body_end = scanner()->peek_location().end_pos;
-  DCHECK(this->scope()->is_function_scope());
+  DCHECK_EQ(this->scope()->is_function_scope(), formals->is_simple);
   log_.LogFunction(body_end, formals->num_parameters(),
                    formals->function_length, has_duplicate_parameters,
-                   function_state_->materialized_literal_count(),
-                   function_state_->expected_property_count());
+                   function_state_->expected_property_count(),
+                   GetLastFunctionLiteralId());
   return kLazyParsingComplete;
 }
 
 PreParserExpression PreParser::ExpressionFromIdentifier(
     PreParserIdentifier name, int start_position, InferName infer) {
+  VariableProxy* proxy = nullptr;
   if (track_unresolved_variables_) {
     AstNodeFactory factory(ast_value_factory());
     // Setting the Zone is necessary because zone_ might be the temp Zone, and
     // AstValueFactory doesn't know about it.
     factory.set_zone(zone());
     DCHECK_NOT_NULL(name.string_);
-    scope()->NewUnresolved(&factory, name.string_, start_position,
-                           NORMAL_VARIABLE);
+    proxy = scope()->NewUnresolved(&factory, name.string_, start_position,
+                                   NORMAL_VARIABLE);
   }
-  return PreParserExpression::FromIdentifier(name, zone());
+  return PreParserExpression::FromIdentifier(name, proxy, zone());
 }
 
 void PreParser::DeclareAndInitializeVariables(
@@ -279,22 +329,21 @@
     const DeclarationDescriptor* declaration_descriptor,
     const DeclarationParsingResult::Declaration* declaration,
     ZoneList<const AstRawString*>* names, bool* ok) {
-  if (declaration->pattern.identifiers_ != nullptr) {
+  if (declaration->pattern.variables_ != nullptr) {
     DCHECK(FLAG_lazy_inner_functions);
-    /* Mimic what Parser does when declaring variables (see
-       Parser::PatternRewriter::VisitVariableProxy).
-
-       var + no initializer -> RemoveUnresolved
-       let / const + no initializer -> RemoveUnresolved
-       var + initializer -> RemoveUnresolved followed by NewUnresolved
-       let / const + initializer -> RemoveUnresolved
-    */
-
-    if (declaration->initializer.IsEmpty() ||
-        (declaration_descriptor->mode == VariableMode::LET ||
-         declaration_descriptor->mode == VariableMode::CONST)) {
-      for (auto identifier : *(declaration->pattern.identifiers_)) {
-        declaration_descriptor->scope->RemoveUnresolved(identifier);
+    DCHECK(track_unresolved_variables_);
+    for (auto variable : *(declaration->pattern.variables_)) {
+      declaration_descriptor->scope->RemoveUnresolved(variable);
+      Variable* var = scope()->DeclareVariableName(
+          variable->raw_name(), declaration_descriptor->mode);
+      if (FLAG_preparser_scope_analysis) {
+        MarkLoopVariableAsAssigned(declaration_descriptor->scope, var);
+        // This is only necessary if there is an initializer, but we don't have
+        // that information here.  Consequently, the preparser sometimes says
+        // maybe-assigned where the parser (correctly) says never-assigned.
+      }
+      if (names) {
+        names->Add(variable->raw_name(), zone());
       }
     }
   }
diff --git a/src/parsing/preparser.h b/src/parsing/preparser.h
index f4687eb..7498127 100644
--- a/src/parsing/preparser.h
+++ b/src/parsing/preparser.h
@@ -5,8 +5,11 @@
 #ifndef V8_PARSING_PREPARSER_H
 #define V8_PARSING_PREPARSER_H
 
+#include "src/ast/ast.h"
 #include "src/ast/scopes.h"
 #include "src/parsing/parser-base.h"
+#include "src/parsing/preparse-data.h"
+#include "src/pending-compilation-error-handler.h"
 
 namespace v8 {
 namespace internal {
@@ -67,6 +70,9 @@
   static PreParserIdentifier Async() {
     return PreParserIdentifier(kAsyncIdentifier);
   }
+  static PreParserIdentifier Name() {
+    return PreParserIdentifier(kNameIdentifier);
+  }
   bool IsEmpty() const { return type_ == kEmptyIdentifier; }
   bool IsEval() const { return type_ == kEvalIdentifier; }
   bool IsArguments() const { return type_ == kArgumentsIdentifier; }
@@ -79,11 +85,7 @@
   bool IsConstructor() const { return type_ == kConstructorIdentifier; }
   bool IsEnum() const { return type_ == kEnumIdentifier; }
   bool IsAwait() const { return type_ == kAwaitIdentifier; }
-  bool IsFutureStrictReserved() const {
-    return type_ == kFutureStrictReservedIdentifier ||
-           type_ == kLetIdentifier || type_ == kStaticIdentifier ||
-           type_ == kYieldIdentifier;
-  }
+  bool IsName() const { return type_ == kNameIdentifier; }
 
   // Allow identifier->name()[->length()] to work. The preparser
   // does not need the actual positions/lengths of the identifiers.
@@ -109,7 +111,8 @@
     kConstructorIdentifier,
     kEnumIdentifier,
     kAwaitIdentifier,
-    kAsyncIdentifier
+    kAsyncIdentifier,
+    kNameIdentifier
   };
 
   explicit PreParserIdentifier(Type type) : type_(type), string_(nullptr) {}
@@ -125,49 +128,65 @@
 class PreParserExpression {
  public:
   PreParserExpression()
-      : code_(TypeField::encode(kEmpty)), identifiers_(nullptr) {}
+      : code_(TypeField::encode(kEmpty)), variables_(nullptr) {}
 
   static PreParserExpression Empty() { return PreParserExpression(); }
 
   static PreParserExpression Default(
-      ZoneList<const AstRawString*>* identifiers = nullptr) {
-    return PreParserExpression(TypeField::encode(kExpression), identifiers);
+      ZoneList<VariableProxy*>* variables = nullptr) {
+    return PreParserExpression(TypeField::encode(kExpression), variables);
   }
 
   static PreParserExpression Spread(PreParserExpression expression) {
     return PreParserExpression(TypeField::encode(kSpreadExpression),
-                               expression.identifiers_);
+                               expression.variables_);
   }
 
   static PreParserExpression FromIdentifier(PreParserIdentifier id,
+                                            VariableProxy* variable,
                                             Zone* zone) {
     PreParserExpression expression(TypeField::encode(kIdentifierExpression) |
                                    IdentifierTypeField::encode(id.type_));
-    expression.AddIdentifier(id.string_, zone);
+    expression.AddVariable(variable, zone);
     return expression;
   }
 
   static PreParserExpression BinaryOperation(PreParserExpression left,
                                              Token::Value op,
-                                             PreParserExpression right) {
-    return PreParserExpression(TypeField::encode(kBinaryOperationExpression));
+                                             PreParserExpression right,
+                                             Zone* zone) {
+    if (op == Token::COMMA) {
+      // Possibly an arrow function parameter list.
+      if (left.variables_ == nullptr) {
+        return PreParserExpression(TypeField::encode(kExpression),
+                                   right.variables_);
+      }
+      if (right.variables_ != nullptr) {
+        for (auto variable : *right.variables_) {
+          left.variables_->Add(variable, zone);
+        }
+      }
+      return PreParserExpression(TypeField::encode(kExpression),
+                                 left.variables_);
+    }
+    return PreParserExpression(TypeField::encode(kExpression));
   }
 
-  static PreParserExpression Assignment() {
+  static PreParserExpression Assignment(ZoneList<VariableProxy*>* variables) {
     return PreParserExpression(TypeField::encode(kExpression) |
-                               ExpressionTypeField::encode(kAssignment));
+                                   ExpressionTypeField::encode(kAssignment),
+                               variables);
   }
 
   static PreParserExpression ObjectLiteral(
-      ZoneList<const AstRawString*>* identifiers = nullptr) {
+      ZoneList<VariableProxy*>* variables) {
     return PreParserExpression(TypeField::encode(kObjectLiteralExpression),
-                               identifiers);
+                               variables);
   }
 
-  static PreParserExpression ArrayLiteral(
-      ZoneList<const AstRawString*>* identifiers = nullptr) {
+  static PreParserExpression ArrayLiteral(ZoneList<VariableProxy*>* variables) {
     return PreParserExpression(TypeField::encode(kArrayLiteralExpression),
-                               identifiers);
+                               variables);
   }
 
   static PreParserExpression StringLiteral() {
@@ -184,9 +203,10 @@
                                IsUseAsmField::encode(true));
   }
 
-  static PreParserExpression This() {
+  static PreParserExpression This(ZoneList<VariableProxy*>* variables) {
     return PreParserExpression(TypeField::encode(kExpression) |
-                               ExpressionTypeField::encode(kThisExpression));
+                                   ExpressionTypeField::encode(kThisExpression),
+                               variables);
   }
 
   static PreParserExpression ThisProperty() {
@@ -284,11 +304,6 @@
             ExpressionTypeField::decode(code_) == kCallEvalExpression);
   }
 
-  bool IsDirectEvalCall() const {
-    return TypeField::decode(code_) == kExpression &&
-           ExpressionTypeField::decode(code_) == kCallEvalExpression;
-  }
-
   bool IsSuperCallReference() const {
     return TypeField::decode(code_) == kExpression &&
            ExpressionTypeField::decode(code_) == kSuperCallReference;
@@ -313,10 +328,6 @@
 
   PreParserExpression AsFunctionLiteral() { return *this; }
 
-  bool IsBinaryOperation() const {
-    return TypeField::decode(code_) == kBinaryOperationExpression;
-  }
-
   // Dummy implementation for making expression->somefunc() work in both Parser
   // and PreParser.
   PreParserExpression* operator->() { return this; }
@@ -329,15 +340,12 @@
   int position() const { return kNoSourcePosition; }
   void set_function_token_position(int position) {}
 
-  void set_is_class_field_initializer(bool is_class_field_initializer) {}
-
  private:
   enum Type {
     kEmpty,
     kExpression,
     kIdentifierExpression,
     kStringLiteralExpression,
-    kBinaryOperationExpression,
     kSpreadExpression,
     kObjectLiteralExpression,
     kArrayLiteralExpression
@@ -354,19 +362,18 @@
     kAssignment
   };
 
-  explicit PreParserExpression(
-      uint32_t expression_code,
-      ZoneList<const AstRawString*>* identifiers = nullptr)
-      : code_(expression_code), identifiers_(identifiers) {}
+  explicit PreParserExpression(uint32_t expression_code,
+                               ZoneList<VariableProxy*>* variables = nullptr)
+      : code_(expression_code), variables_(variables) {}
 
-  void AddIdentifier(const AstRawString* identifier, Zone* zone) {
-    if (identifier == nullptr) {
+  void AddVariable(VariableProxy* variable, Zone* zone) {
+    if (variable == nullptr) {
       return;
     }
-    if (identifiers_ == nullptr) {
-      identifiers_ = new (zone) ZoneList<const AstRawString*>(1, zone);
+    if (variables_ == nullptr) {
+      variables_ = new (zone) ZoneList<VariableProxy*>(1, zone);
     }
-    identifiers_->Add(identifier, zone);
+    variables_->Add(variable, zone);
   }
 
   // The first three bits are for the Type.
@@ -389,9 +396,9 @@
   typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
 
   uint32_t code_;
-  // If the PreParser is used in the identifier tracking mode,
-  // PreParserExpression accumulates identifiers in that expression.
-  ZoneList<const AstRawString*>* identifiers_;
+  // If the PreParser is used in the variable tracking mode, PreParserExpression
+  // accumulates variables in that expression.
+  ZoneList<VariableProxy*>* variables_;
 
   friend class PreParser;
   friend class PreParserFactory;
@@ -401,23 +408,24 @@
 
 
 // The pre-parser doesn't need to build lists of expressions, identifiers, or
-// the like. If the PreParser is used in identifier tracking mode, it needs to
-// build lists of identifiers though.
+// the like. If the PreParser is used in variable tracking mode, it needs to
+// build lists of variables though.
 template <typename T>
 class PreParserList {
  public:
   // These functions make list->Add(some_expression) work (and do nothing).
-  PreParserList() : length_(0), identifiers_(nullptr) {}
+  PreParserList() : length_(0), variables_(nullptr) {}
   PreParserList* operator->() { return this; }
-  void Add(T, Zone* zone);
+  void Add(const T& element, Zone* zone);
   int length() const { return length_; }
   static PreParserList Null() { return PreParserList(-1); }
   bool IsNull() const { return length_ == -1; }
+  void Set(int index, const T& element) {}
 
  private:
-  explicit PreParserList(int n) : length_(n), identifiers_(nullptr) {}
+  explicit PreParserList(int n) : length_(n), variables_(nullptr) {}
   int length_;
-  ZoneList<const AstRawString*>* identifiers_;
+  ZoneList<VariableProxy*>* variables_;
 
   friend class PreParser;
   friend class PreParserFactory;
@@ -425,22 +433,22 @@
 
 template <>
 inline void PreParserList<PreParserExpression>::Add(
-    PreParserExpression expression, Zone* zone) {
-  if (expression.identifiers_ != nullptr) {
+    const PreParserExpression& expression, Zone* zone) {
+  if (expression.variables_ != nullptr) {
     DCHECK(FLAG_lazy_inner_functions);
     DCHECK(zone != nullptr);
-    if (identifiers_ == nullptr) {
-      identifiers_ = new (zone) ZoneList<const AstRawString*>(1, zone);
+    if (variables_ == nullptr) {
+      variables_ = new (zone) ZoneList<VariableProxy*>(1, zone);
     }
-    for (auto identifier : (*expression.identifiers_)) {
-      identifiers_->Add(identifier, zone);
+    for (auto identifier : (*expression.variables_)) {
+      variables_->Add(identifier, zone);
     }
   }
   ++length_;
 }
 
 template <typename T>
-void PreParserList<T>::Add(T, Zone* zone) {
+void PreParserList<T>::Add(const T& element, Zone* zone) {
   ++length_;
 }
 
@@ -532,7 +540,8 @@
 class PreParserFactory {
  public:
   explicit PreParserFactory(AstValueFactory* ast_value_factory)
-      : zone_(ast_value_factory->zone()) {}
+      : ast_value_factory_(ast_value_factory),
+        zone_(ast_value_factory->zone()) {}
 
   void set_zone(Zone* zone) { zone_ = zone; }
 
@@ -541,7 +550,14 @@
     // This is needed for object literal property names. Property names are
     // normalized to string literals during object literal parsing.
     PreParserExpression expression = PreParserExpression::Default();
-    expression.AddIdentifier(identifier.string_, zone_);
+    if (identifier.string_ != nullptr) {
+      DCHECK(FLAG_lazy_inner_functions);
+      AstNodeFactory factory(ast_value_factory_);
+      factory.set_zone(zone_);
+      VariableProxy* variable =
+          factory.NewVariableProxy(identifier.string_, NORMAL_VARIABLE);
+      expression.AddVariable(variable, zone_);
+    }
     return expression;
   }
   PreParserExpression NewNumberLiteral(double number,
@@ -552,14 +568,12 @@
     return PreParserExpression::Default();
   }
   PreParserExpression NewRegExpLiteral(PreParserIdentifier js_pattern,
-                                       int js_flags, int literal_index,
-                                       int pos) {
+                                       int js_flags, int pos) {
     return PreParserExpression::Default();
   }
   PreParserExpression NewArrayLiteral(PreParserExpressionList values,
-                                      int first_spread_index, int literal_index,
-                                      int pos) {
-    return PreParserExpression::ArrayLiteral(values.identifiers_);
+                                      int first_spread_index, int pos) {
+    return PreParserExpression::ArrayLiteral(values.variables_);
   }
   PreParserExpression NewClassLiteralProperty(PreParserExpression key,
                                               PreParserExpression value,
@@ -572,18 +586,17 @@
                                                PreParserExpression value,
                                                ObjectLiteralProperty::Kind kind,
                                                bool is_computed_name) {
-    return PreParserExpression::Default(value.identifiers_);
+    return PreParserExpression::Default(value.variables_);
   }
   PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
                                                PreParserExpression value,
                                                bool is_computed_name) {
-    return PreParserExpression::Default(value.identifiers_);
+    return PreParserExpression::Default(value.variables_);
   }
   PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
-                                       int literal_index,
-                                       int boilerplate_properties,
-                                       int pos) {
-    return PreParserExpression::ObjectLiteral(properties.identifiers_);
+                                       int boilerplate_properties, int pos,
+                                       bool has_rest_property) {
+    return PreParserExpression::ObjectLiteral(properties.variables_);
   }
   PreParserExpression NewVariableProxy(void* variable) {
     return PreParserExpression::Default();
@@ -604,7 +617,7 @@
   PreParserExpression NewBinaryOperation(Token::Value op,
                                          PreParserExpression left,
                                          PreParserExpression right, int pos) {
-    return PreParserExpression::BinaryOperation(left, op, right);
+    return PreParserExpression::BinaryOperation(left, op, right, zone_);
   }
   PreParserExpression NewCompareOperation(Token::Value op,
                                           PreParserExpression left,
@@ -618,7 +631,9 @@
                                     PreParserExpression left,
                                     PreParserExpression right,
                                     int pos) {
-    return PreParserExpression::Assignment();
+    // Identifiers need to be tracked since this might be a parameter with a
+    // default value inside an arrow function parameter list.
+    return PreParserExpression::Assignment(left.variables_);
   }
   PreParserExpression NewYield(PreParserExpression generator_object,
                                PreParserExpression expression, int pos,
@@ -655,14 +670,17 @@
                                         int pos) {
     return PreParserStatement::Jump();
   }
+  PreParserStatement NewAsyncReturnStatement(PreParserExpression expression,
+                                             int pos) {
+    return PreParserStatement::Jump();
+  }
   PreParserExpression NewFunctionLiteral(
       PreParserIdentifier name, Scope* scope, PreParserStatementList body,
-      int materialized_literal_count, int expected_property_count,
-      int parameter_count, int function_length,
+      int expected_property_count, int parameter_count, int function_length,
       FunctionLiteral::ParameterFlag has_duplicate_parameters,
       FunctionLiteral::FunctionType function_type,
       FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
-      bool has_braces) {
+      bool has_braces, int function_literal_id) {
     return PreParserExpression::Default();
   }
 
@@ -746,6 +764,17 @@
     return PreParserStatement::Default();
   }
 
+  PreParserStatement NewForOfStatement(ZoneList<const AstRawString*>* labels,
+                                       int pos) {
+    return PreParserStatement::Default();
+  }
+
+  PreParserExpression NewCallRuntime(Runtime::FunctionId id,
+                                     ZoneList<PreParserExpression>* arguments,
+                                     int pos) {
+    return PreParserExpression::Default();
+  }
+
   // Return the object itself as AstVisitor and implement the needed
   // dummy method right in this class.
   PreParserFactory* visitor() { return this; }
@@ -755,14 +784,32 @@
   }
 
  private:
+  AstValueFactory* ast_value_factory_;
   Zone* zone_;
 };
 
 
 struct PreParserFormalParameters : FormalParametersBase {
+  struct Parameter : public ZoneObject {
+    Parameter(PreParserExpression pattern, bool is_destructuring, bool is_rest)
+        : pattern(pattern),
+          is_destructuring(is_destructuring),
+          is_rest(is_rest) {}
+    Parameter** next() { return &next_parameter; }
+    Parameter* const* next() const { return &next_parameter; }
+
+    bool is_nondestructuring_rest() const {
+      return is_rest && !is_destructuring;
+    }
+    PreParserExpression pattern;
+    Parameter* next_parameter = nullptr;
+    bool is_destructuring : 1;
+    bool is_rest : 1;
+  };
   explicit PreParserFormalParameters(DeclarationScope* scope)
       : FormalParametersBase(scope) {}
-  PreParserIdentifier at(int i) { return PreParserIdentifier(); }  // Dummy
+
+  ThreadedList<Parameter> params;
 };
 
 
@@ -838,11 +885,14 @@
     kPreParseSuccess
   };
 
-  PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
+  PreParser(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
+            AstValueFactory* ast_value_factory,
             PendingCompilationErrorHandler* pending_error_handler,
-            RuntimeCallStats* runtime_call_stats, uintptr_t stack_limit)
+            RuntimeCallStats* runtime_call_stats,
+            bool parsing_on_main_thread = true)
       : ParserBase<PreParser>(zone, scanner, stack_limit, nullptr,
-                              ast_value_factory, runtime_call_stats),
+                              ast_value_factory, runtime_call_stats,
+                              parsing_on_main_thread),
         use_counts_(nullptr),
         track_unresolved_variables_(false),
         pending_error_handler_(pending_error_handler) {}
@@ -855,17 +905,19 @@
   // success (even if parsing failed, the pre-parse data successfully
   // captured the syntax error), and false if a stack-overflow happened
   // during parsing.
-  PreParseResult PreParseProgram(int* materialized_literals = 0,
-                                 bool is_module = false) {
-    DCHECK_NULL(scope_state_);
+  PreParseResult PreParseProgram(bool is_module = false) {
+    DCHECK_NULL(scope_);
     DeclarationScope* scope = NewScriptScope();
+#ifdef DEBUG
+    scope->set_is_being_lazily_parsed(true);
+#endif
 
     // ModuleDeclarationInstantiation for Source Text Module Records creates a
     // new Module Environment Record whose outer lexical environment record is
     // the global scope.
     if (is_module) scope = NewModuleScope(scope);
 
-    FunctionState top_scope(&function_state_, &scope_state_, scope);
+    FunctionState top_scope(&function_state_, &scope_, scope);
     bool ok = true;
     int start_position = scanner()->peek_location().beg_pos;
     parsing_module_ = is_module;
@@ -877,11 +929,6 @@
     } else if (is_strict(this->scope()->language_mode())) {
       CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
                               &ok);
-      CheckDecimalLiteralWithLeadingZero(start_position,
-                                         scanner()->location().end_pos);
-    }
-    if (materialized_literals) {
-      *materialized_literals = function_state_->materialized_literal_count();
     }
     return kPreParseSuccess;
   }
@@ -911,21 +958,16 @@
   // By making the 'exception handling' explicit, we are forced to check
   // for failure at the call sites.
 
-  V8_INLINE PreParserStatementList ParseEagerFunctionBody(
-      PreParserIdentifier function_name, int pos,
-      const PreParserFormalParameters& parameters, FunctionKind kind,
-      FunctionLiteral::FunctionType function_type, bool* ok);
-
   // Indicates that we won't switch from the preparser to the preparser; we'll
   // just stay where we are.
   bool AllowsLazyParsingWithoutUnresolvedVariables() const { return false; }
   bool parse_lazily() const { return false; }
 
-  V8_INLINE LazyParsingResult SkipFunction(
-      FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
-      int* function_length, bool* has_duplicate_parameters,
-      int* materialized_literal_count, int* expected_property_count,
-      bool is_inner_function, bool may_abort, bool* ok) {
+  V8_INLINE LazyParsingResult
+  SkipFunction(FunctionKind kind, DeclarationScope* function_scope,
+               int* num_parameters, int* function_length,
+               bool* has_duplicate_parameters, int* expected_property_count,
+               bool is_inner_function, bool may_abort, bool* ok) {
     UNREACHABLE();
     return kLazyParsingComplete;
   }
@@ -945,7 +987,8 @@
   }
   V8_INLINE void AddTemplateExpression(TemplateLiteralState* state,
                                        PreParserExpression expression) {}
-  V8_INLINE void AddTemplateSpan(TemplateLiteralState* state, bool tail) {}
+  V8_INLINE void AddTemplateSpan(TemplateLiteralState* state, bool should_cook,
+                                 bool tail) {}
   V8_INLINE PreParserExpression CloseTemplateLiteral(
       TemplateLiteralState* state, int start, PreParserExpression tag);
   V8_INLINE void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
@@ -958,23 +1001,14 @@
   V8_INLINE void MarkCollectedTailCallExpressions() {}
   V8_INLINE void MarkTailPosition(PreParserExpression expression) {}
 
-  V8_INLINE PreParserExpressionList
-  PrepareSpreadArguments(PreParserExpressionList list) {
-    return list;
-  }
-
   V8_INLINE PreParserExpression SpreadCall(PreParserExpression function,
                                            PreParserExpressionList args,
-                                           int pos);
+                                           int pos,
+                                           Call::PossiblyEval possibly_eval);
   V8_INLINE PreParserExpression SpreadCallNew(PreParserExpression function,
                                               PreParserExpressionList args,
                                               int pos);
 
-  V8_INLINE PreParserExpression
-  RewriteSuperCall(PreParserExpression call_expression) {
-    return call_expression;
-  }
-
   V8_INLINE void RewriteDestructuringAssignments() {}
 
   V8_INLINE PreParserExpression RewriteExponentiation(PreParserExpression left,
@@ -1015,8 +1049,7 @@
       bool* ok) {
     DCHECK(!expr.AsIdentifier().IsEnum());
     DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
-    DCHECK(is_sloppy(language_mode()) ||
-           !IsFutureStrictReserved(expr.AsIdentifier()));
+    DCHECK(IsIdentifier(expr));
     return labels;
   }
 
@@ -1035,7 +1068,22 @@
       PreParserStatementList cases, Scope* scope) {
     return PreParserStatement::Default();
   }
-  V8_INLINE void RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {}
+
+  V8_INLINE void RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
+    if (track_unresolved_variables_) {
+      if (catch_info->name.string_ != nullptr) {
+        // Unlike in the parser, we need to declare the catch variable as LET
+        // variable, so that it won't get hoisted out of the scope.
+        catch_info->scope->DeclareVariableName(catch_info->name.string_, LET);
+      }
+      if (catch_info->pattern.variables_ != nullptr) {
+        for (auto variable : *catch_info->pattern.variables_) {
+          scope()->DeclareVariableName(variable->raw_name(), LET);
+        }
+      }
+    }
+  }
+
   V8_INLINE void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {}
   V8_INLINE PreParserStatement RewriteTryStatement(
       PreParserStatement try_block, PreParserStatement catch_block,
@@ -1043,6 +1091,16 @@
     return PreParserStatement::Default();
   }
 
+  V8_INLINE void ParseAndRewriteGeneratorFunctionBody(
+      int pos, FunctionKind kind, PreParserStatementList body, bool* ok) {
+    ParseStatementList(body, Token::RBRACE, ok);
+  }
+  V8_INLINE void CreateFunctionNameAssignment(
+      PreParserIdentifier function_name, int pos,
+      FunctionLiteral::FunctionType function_type,
+      DeclarationScope* function_scope, PreParserStatementList result,
+      int index) {}
+
   V8_INLINE PreParserExpression RewriteDoExpression(PreParserStatement body,
                                                     int pos, bool* ok) {
     return PreParserExpression::Default();
@@ -1060,9 +1118,18 @@
   }
 
   V8_INLINE PreParserStatement DeclareFunction(
-      PreParserIdentifier variable_name, PreParserExpression function, int pos,
-      bool is_generator, bool is_async, ZoneList<const AstRawString*>* names,
-      bool* ok) {
+      PreParserIdentifier variable_name, PreParserExpression function,
+      VariableMode mode, int pos, bool is_sloppy_block_function,
+      ZoneList<const AstRawString*>* names, bool* ok) {
+    DCHECK_NULL(names);
+    if (variable_name.string_ != nullptr) {
+      DCHECK(track_unresolved_variables_);
+      scope()->DeclareVariableName(variable_name.string_, mode);
+      if (is_sloppy_block_function) {
+        GetDeclarationScope()->DeclareSloppyBlockFunction(variable_name.string_,
+                                                          scope());
+      }
+    }
     return Statement::Default();
   }
 
@@ -1070,17 +1137,29 @@
   DeclareClass(PreParserIdentifier variable_name, PreParserExpression value,
                ZoneList<const AstRawString*>* names, int class_token_pos,
                int end_pos, bool* ok) {
+    // Preparser shouldn't be used in contexts where we need to track the names.
+    DCHECK_NULL(names);
+    if (variable_name.string_ != nullptr) {
+      DCHECK(track_unresolved_variables_);
+      scope()->DeclareVariableName(variable_name.string_, LET);
+    }
     return PreParserStatement::Default();
   }
   V8_INLINE void DeclareClassVariable(PreParserIdentifier name,
-                                      Scope* block_scope, ClassInfo* class_info,
+                                      ClassInfo* class_info,
                                       int class_token_pos, bool* ok) {}
   V8_INLINE void DeclareClassProperty(PreParserIdentifier class_name,
                                       PreParserExpression property,
-                                      ClassInfo* class_info, bool* ok) {}
+                                      ClassLiteralProperty::Kind kind,
+                                      bool is_static, bool is_constructor,
+                                      ClassInfo* class_info, bool* ok) {
+  }
   V8_INLINE PreParserExpression RewriteClassLiteral(PreParserIdentifier name,
                                                     ClassInfo* class_info,
                                                     int pos, bool* ok) {
+    bool has_default_constructor = !class_info->has_seen_constructor;
+    // Account for the default constructor.
+    if (has_default_constructor) GetNextFunctionLiteralId();
     return PreParserExpression::Default();
   }
 
@@ -1115,10 +1194,6 @@
     return identifier.IsAwait();
   }
 
-  V8_INLINE bool IsFutureStrictReserved(PreParserIdentifier identifier) const {
-    return identifier.IsFutureStrictReserved();
-  }
-
   // Returns true if the expression is of type "this.foo".
   V8_INLINE static bool IsThisProperty(PreParserExpression expression) {
     return expression.IsThisProperty();
@@ -1146,8 +1221,8 @@
     return identifier.IsConstructor();
   }
 
-  V8_INLINE bool IsDirectEvalCall(PreParserExpression expression) const {
-    return expression.IsDirectEvalCall();
+  V8_INLINE bool IsName(PreParserIdentifier identifier) const {
+    return identifier.IsName();
   }
 
   V8_INLINE static bool IsBoilerplateProperty(PreParserExpression property) {
@@ -1202,11 +1277,16 @@
   V8_INLINE static void CheckAssigningFunctionLiteralToProperty(
       PreParserExpression left, PreParserExpression right) {}
 
-  V8_INLINE static PreParserExpression MarkExpressionAsAssigned(
-      PreParserExpression expression) {
+  V8_INLINE void MarkExpressionAsAssigned(PreParserExpression expression) {
     // TODO(marja): To be able to produce the same errors, the preparser needs
     // to start tracking which expressions are variables and which are assigned.
-    return expression;
+    if (expression.variables_ != nullptr) {
+      DCHECK(FLAG_lazy_inner_functions);
+      DCHECK(track_unresolved_variables_);
+      for (auto variable : *expression.variables_) {
+        variable->set_is_assigned();
+      }
+    }
   }
 
   V8_INLINE bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
@@ -1229,6 +1309,11 @@
   V8_INLINE PreParserStatement
   BuildInitializationBlock(DeclarationParsingResult* parsing_result,
                            ZoneList<const AstRawString*>* names, bool* ok) {
+    for (auto declaration : parsing_result->declarations) {
+      DeclareAndInitializeVariables(PreParserStatement::Default(),
+                                    &(parsing_result->descriptor), &declaration,
+                                    names, ok);
+    }
     return PreParserStatement::Default();
   }
 
@@ -1236,17 +1321,51 @@
   InitializeForEachStatement(PreParserStatement stmt, PreParserExpression each,
                              PreParserExpression subject,
                              PreParserStatement body, int each_keyword_pos) {
+    MarkExpressionAsAssigned(each);
+    return stmt;
+  }
+
+  V8_INLINE PreParserStatement InitializeForOfStatement(
+      PreParserStatement stmt, PreParserExpression each,
+      PreParserExpression iterable, PreParserStatement body, bool finalize,
+      IteratorType type, int next_result_pos = kNoSourcePosition) {
+    MarkExpressionAsAssigned(each);
     return stmt;
   }
 
   V8_INLINE PreParserStatement RewriteForVarInLegacy(const ForInfo& for_info) {
     return PreParserStatement::Null();
   }
+
   V8_INLINE void DesugarBindingInForEachStatement(
       ForInfo* for_info, PreParserStatement* body_block,
-      PreParserExpression* each_variable, bool* ok) {}
+      PreParserExpression* each_variable, bool* ok) {
+    if (track_unresolved_variables_) {
+      DCHECK(for_info->parsing_result.declarations.length() == 1);
+      bool is_for_var_of =
+          for_info->mode == ForEachStatement::ITERATE &&
+          for_info->parsing_result.descriptor.mode == VariableMode::VAR;
+      bool collect_names =
+          IsLexicalVariableMode(for_info->parsing_result.descriptor.mode) ||
+          is_for_var_of;
+
+      DeclareAndInitializeVariables(
+          PreParserStatement::Default(), &for_info->parsing_result.descriptor,
+          &for_info->parsing_result.declarations[0],
+          collect_names ? &for_info->bound_names : nullptr, ok);
+    }
+  }
+
   V8_INLINE PreParserStatement CreateForEachStatementTDZ(
       PreParserStatement init_block, const ForInfo& for_info, bool* ok) {
+    if (track_unresolved_variables_) {
+      if (IsLexicalVariableMode(for_info.parsing_result.descriptor.mode)) {
+        for (auto name : for_info.bound_names) {
+          scope()->DeclareVariableName(name, LET);
+        }
+        return PreParserStatement::Default();
+      }
+    }
     return init_block;
   }
 
@@ -1255,9 +1374,43 @@
       PreParserExpression cond, PreParserStatement next,
       PreParserStatement body, Scope* inner_scope, const ForInfo& for_info,
       bool* ok) {
+    // See Parser::DesugarLexicalBindingsInForStatement.
+    if (track_unresolved_variables_) {
+      for (auto name : for_info.bound_names) {
+        inner_scope->DeclareVariableName(
+            name, for_info.parsing_result.descriptor.mode);
+      }
+    }
     return loop;
   }
 
+  V8_INLINE PreParserStatement BuildParameterInitializationBlock(
+      const PreParserFormalParameters& parameters, bool* ok) {
+    if (track_unresolved_variables_) {
+      for (auto parameter : parameters.params) {
+        if (parameter->is_nondestructuring_rest()) break;
+        if (parameter->pattern.variables_ != nullptr) {
+          for (auto variable : *parameter->pattern.variables_) {
+            scope()->DeclareVariableName(variable->raw_name(), LET);
+          }
+        }
+      }
+    }
+    return PreParserStatement::Default();
+  }
+
+  V8_INLINE PreParserStatement
+  BuildRejectPromiseOnException(PreParserStatement init_block) {
+    return PreParserStatement::Default();
+  }
+
+  V8_INLINE void InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope) {
+    scope->HoistSloppyBlockFunctions(nullptr);
+  }
+
+  V8_INLINE void InsertShadowingVarBindingInitializers(
+      PreParserStatement block) {}
+
   V8_INLINE PreParserExpression
   NewThrowReferenceError(MessageTemplate::Template message, int pos) {
     return PreParserExpression::Default();
@@ -1374,7 +1527,19 @@
   }
 
   V8_INLINE PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
-    return PreParserExpression::This();
+    ZoneList<VariableProxy*>* variables = nullptr;
+    if (track_unresolved_variables_) {
+      AstNodeFactory factory(ast_value_factory());
+      // Setting the Zone is necessary because zone_ might be the temp Zone, and
+      // AstValueFactory doesn't know about it.
+      factory.set_zone(zone());
+      VariableProxy* proxy = scope()->NewUnresolved(
+          &factory, ast_value_factory()->this_string(), pos, THIS_VARIABLE);
+
+      variables = new (zone()) ZoneList<VariableProxy*>(1, zone());
+      variables->Add(proxy, zone());
+    }
+    return PreParserExpression::This(variables);
   }
 
   V8_INLINE PreParserExpression NewSuperPropertyReference(int pos) {
@@ -1449,13 +1614,31 @@
                                     PreParserExpression initializer,
                                     int initializer_end_position,
                                     bool is_rest) {
+    if (track_unresolved_variables_) {
+      DCHECK(FLAG_lazy_inner_functions);
+      parameters->params.Add(new (zone()) PreParserFormalParameters::Parameter(
+          pattern, !IsIdentifier(pattern), is_rest));
+    }
     parameters->UpdateArityAndFunctionLength(!initializer.IsEmpty(), is_rest);
   }
 
-  V8_INLINE void DeclareFormalParameter(DeclarationScope* scope,
-                                        PreParserIdentifier parameter) {
-    if (!classifier()->is_simple_parameter_list()) {
-      scope->SetHasNonSimpleParameters();
+  V8_INLINE void DeclareFormalParameters(
+      DeclarationScope* scope,
+      const ThreadedList<PreParserFormalParameters::Parameter>& parameters) {
+    bool is_simple = classifier()->is_simple_parameter_list();
+    if (!is_simple) scope->SetHasNonSimpleParameters();
+    if (track_unresolved_variables_) {
+      DCHECK(FLAG_lazy_inner_functions);
+      for (auto parameter : parameters) {
+        bool use_name = is_simple || parameter->is_nondestructuring_rest();
+        if (use_name) {
+          DCHECK_NOT_NULL(parameter->pattern.variables_);
+          DCHECK_EQ(parameter->pattern.variables_->length(), 1);
+          auto variable = (*parameter->pattern.variables_)[0];
+          scope->DeclareParameterName(variable->raw_name(), parameter->is_rest,
+                                      ast_value_factory());
+        }
+      }
     }
   }
 
@@ -1465,10 +1648,16 @@
       bool* ok) {
     // TODO(wingo): Detect duplicated identifiers in paramlists.  Detect
     // parameter lists that are too long.
+    if (track_unresolved_variables_) {
+      DCHECK(FLAG_lazy_inner_functions);
+      if (params.variables_ != nullptr) {
+        for (auto variable : *params.variables_) {
+          parameters->scope->DeclareVariableName(variable->raw_name(), VAR);
+        }
+      }
+    }
   }
 
-  V8_INLINE void ReindexLiterals(const PreParserFormalParameters& parameters) {}
-
   V8_INLINE PreParserExpression NoTemplateTag() {
     return PreParserExpression::NoTemplateTag();
   }
@@ -1477,15 +1666,9 @@
     return !tag.IsNoTemplateTag();
   }
 
-  V8_INLINE void MaterializeUnspreadArgumentsLiterals(int count) {
-    for (int i = 0; i < count; ++i) {
-      function_state_->NextMaterializedLiteralIndex();
-    }
-  }
-
   V8_INLINE PreParserExpression
   ExpressionListToExpression(PreParserExpressionList args) {
-    return PreParserExpression::Default(args.identifiers_);
+    return PreParserExpression::Default(args.variables_);
   }
 
   V8_INLINE void AddAccessorPrefixToFunctionName(bool is_get,
@@ -1509,6 +1692,8 @@
     if (use_counts_ != nullptr) ++use_counts_[feature];
   }
 
+  V8_INLINE bool ParsingDynamicFunctionDeclaration() const { return false; }
+
   // Preparser's private field members.
 
   int* use_counts_;
@@ -1518,9 +1703,9 @@
 };
 
 PreParserExpression PreParser::SpreadCall(PreParserExpression function,
-                                          PreParserExpressionList args,
-                                          int pos) {
-  return factory()->NewCall(function, args, pos);
+                                          PreParserExpressionList args, int pos,
+                                          Call::PossiblyEval possibly_eval) {
+  return factory()->NewCall(function, args, pos, possibly_eval);
 }
 
 PreParserExpression PreParser::SpreadCallNew(PreParserExpression function,
@@ -1529,34 +1714,9 @@
   return factory()->NewCallNew(function, args, pos);
 }
 
-PreParserStatementList PreParser::ParseEagerFunctionBody(
-    PreParserIdentifier function_name, int pos,
-    const PreParserFormalParameters& parameters, FunctionKind kind,
-    FunctionLiteral::FunctionType function_type, bool* ok) {
-  PreParserStatementList result;
-
-  Scope* inner_scope = scope();
-  if (!parameters.is_simple) inner_scope = NewScope(BLOCK_SCOPE);
-
-  {
-    BlockState block_state(&scope_state_, inner_scope);
-    ParseStatementList(result, Token::RBRACE, ok);
-    if (!*ok) return PreParserStatementList();
-  }
-
-  Expect(Token::RBRACE, ok);
-  return result;
-}
-
 PreParserExpression PreParser::CloseTemplateLiteral(TemplateLiteralState* state,
                                                     int start,
                                                     PreParserExpression tag) {
-  if (IsTaggedTemplate(tag)) {
-    // Emulate generation of array literals for tag callsite
-    // 1st is array of cooked strings, second is array of raw strings
-    function_state_->NextMaterializedLiteralIndex();
-    function_state_->NextMaterializedLiteralIndex();
-  }
   return EmptyExpression();
 }
 
diff --git a/src/parsing/rewriter.cc b/src/parsing/rewriter.cc
index 69ac417..b56457e 100644
--- a/src/parsing/rewriter.cc
+++ b/src/parsing/rewriter.cc
@@ -6,6 +6,7 @@
 
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
+#include "src/objects-inl.h"
 #include "src/parsing/parse-info.h"
 #include "src/parsing/parser.h"
 
@@ -14,8 +15,8 @@
 
 class Processor final : public AstVisitor<Processor> {
  public:
-  Processor(Isolate* isolate, DeclarationScope* closure_scope, Variable* result,
-            AstValueFactory* ast_value_factory)
+  Processor(uintptr_t stack_limit, DeclarationScope* closure_scope,
+            Variable* result, AstValueFactory* ast_value_factory)
       : result_(result),
         result_assigned_(false),
         replacement_(nullptr),
@@ -25,7 +26,7 @@
         closure_scope_(closure_scope),
         factory_(ast_value_factory) {
     DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
-    InitializeAstVisitor(isolate);
+    InitializeAstVisitor(stack_limit);
   }
 
   Processor(Parser* parser, DeclarationScope* closure_scope, Variable* result,
@@ -243,7 +244,6 @@
   // Only rewrite finally if it could contain 'break' or 'continue'. Always
   // rewrite try.
   if (breakable_) {
-    bool set_after = is_set_;
     // Only set result before a 'break' or 'continue'.
     is_set_ = true;
     Visit(node->finally_block());
@@ -265,7 +265,6 @@
         0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
     node->finally_block()->statements()->Add(
         factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
-    is_set_ = set_after;
   }
   Visit(node->try_block());
   node->set_try_block(replacement_->AsBlock());
@@ -356,23 +355,36 @@
 // Assumes code has been parsed.  Mutates the AST, so the AST should not
 // continue to be used in the case of failure.
 bool Rewriter::Rewrite(ParseInfo* info) {
+  DisallowHeapAllocation no_allocation;
+  DisallowHandleAllocation no_handles;
+  DisallowHandleDereference no_deref;
+
+  RuntimeCallTimerScope runtimeTimer(
+      info->isolate(), &RuntimeCallStats::CompileRewriteReturnResult);
+
   FunctionLiteral* function = info->literal();
   DCHECK_NOT_NULL(function);
   Scope* scope = function->scope();
   DCHECK_NOT_NULL(scope);
   if (!scope->is_script_scope() && !scope->is_eval_scope()) return true;
+
   DeclarationScope* closure_scope = scope->GetClosureScope();
 
   ZoneList<Statement*>* body = function->body();
   if (!body->is_empty()) {
     Variable* result = closure_scope->NewTemporary(
         info->ast_value_factory()->dot_result_string());
-    // The name string must be internalized at this point.
-    info->ast_value_factory()->Internalize(info->isolate());
-    DCHECK(!result->name().is_null());
-    Processor processor(info->isolate(), closure_scope, result,
-                        info->ast_value_factory());
+    Processor processor(info->isolate()->stack_guard()->real_climit(),
+                        closure_scope, result, info->ast_value_factory());
     processor.Process(body);
+
+    // TODO(leszeks): Remove this check and releases once internalization is
+    // moved out of parsing/analysis.
+    DCHECK(ThreadId::Current().Equals(info->isolate()->thread_id()));
+    no_deref.Release();
+    no_handles.Release();
+    no_allocation.Release();
+
     // Internalize any values created during rewriting.
     info->ast_value_factory()->Internalize(info->isolate());
     if (processor.HasStackOverflow()) return false;
@@ -392,6 +404,10 @@
 
 bool Rewriter::Rewrite(Parser* parser, DeclarationScope* closure_scope,
                        DoExpression* expr, AstValueFactory* factory) {
+  DisallowHeapAllocation no_allocation;
+  DisallowHandleAllocation no_handles;
+  DisallowHandleDereference no_deref;
+
   Block* block = expr->block();
   DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
   DCHECK(block->scope() == nullptr ||
diff --git a/src/parsing/scanner-character-streams.cc b/src/parsing/scanner-character-streams.cc
index f7c7fd5..d3162df 100644
--- a/src/parsing/scanner-character-streams.cc
+++ b/src/parsing/scanner-character-streams.cc
@@ -5,6 +5,7 @@
 #include "src/parsing/scanner-character-streams.h"
 
 #include "include/v8.h"
+#include "src/counters.h"
 #include "src/globals.h"
 #include "src/handles.h"
 #include "src/objects-inl.h"
@@ -194,9 +195,11 @@
 class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
  public:
   Utf8ExternalStreamingStream(
-      ScriptCompiler::ExternalSourceStream* source_stream)
+      ScriptCompiler::ExternalSourceStream* source_stream,
+      RuntimeCallStats* stats)
       : current_({0, {0, 0, unibrow::Utf8::Utf8IncrementalBuffer(0)}}),
-        source_stream_(source_stream) {}
+        source_stream_(source_stream),
+        stats_(stats) {}
   ~Utf8ExternalStreamingStream() override {
     for (size_t i = 0; i < chunks_.size(); i++) delete[] chunks_[i].data;
   }
@@ -245,6 +248,7 @@
   std::vector<Chunk> chunks_;
   Position current_;
   ScriptCompiler::ExternalSourceStream* source_stream_;
+  RuntimeCallStats* stats_;
 };
 
 bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
@@ -335,6 +339,7 @@
 }
 
 bool Utf8ExternalStreamingStream::FetchChunk() {
+  RuntimeCallTimerScope scope(stats_, &RuntimeCallStats::GetMoreDataCallback);
   DCHECK_EQ(current_.chunk_no, chunks_.size());
   DCHECK(chunks_.empty() || chunks_.back().length != 0);
 
@@ -466,20 +471,23 @@
 // Return the chunk index for the chunk containing position.
 // If position is behind the end of the stream, the index of the last,
 // zero-length chunk is returned.
-size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source_,
-                 size_t position) {
+size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source,
+                 size_t position, RuntimeCallStats* stats) {
   size_t end_pos =
       chunks.empty() ? 0 : (chunks.back().byte_pos + chunks.back().byte_length);
 
   // Get more data if needed. We usually won't enter the loop body.
   bool out_of_data = !chunks.empty() && chunks.back().byte_length == 0;
-  while (!out_of_data && end_pos <= position + 1) {
-    const uint8_t* chunk = nullptr;
-    size_t len = source_->GetMoreData(&chunk);
+  {
+    RuntimeCallTimerScope scope(stats, &RuntimeCallStats::GetMoreDataCallback);
+    while (!out_of_data && end_pos <= position + 1) {
+      const uint8_t* chunk = nullptr;
+      size_t len = source->GetMoreData(&chunk);
 
-    chunks.push_back({chunk, len, end_pos});
-    end_pos += len;
-    out_of_data = (len == 0);
+      chunks.push_back({chunk, len, end_pos});
+      end_pos += len;
+      out_of_data = (len == 0);
+    }
   }
 
   // Here, we should always have at least one chunk, and we either have the
@@ -520,8 +528,8 @@
 class OneByteExternalStreamingStream : public BufferedUtf16CharacterStream {
  public:
   explicit OneByteExternalStreamingStream(
-      ScriptCompiler::ExternalSourceStream* source)
-      : source_(source) {}
+      ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats)
+      : source_(source), stats_(stats) {}
   ~OneByteExternalStreamingStream() override { DeleteChunks(chunks_); }
 
  protected:
@@ -530,10 +538,11 @@
  private:
   Chunks chunks_;
   ScriptCompiler::ExternalSourceStream* source_;
+  RuntimeCallStats* stats_;
 };
 
 size_t OneByteExternalStreamingStream::FillBuffer(size_t position) {
-  const Chunk& chunk = chunks_[FindChunk(chunks_, source_, position)];
+  const Chunk& chunk = chunks_[FindChunk(chunks_, source_, position, stats_)];
   if (chunk.byte_length == 0) return 0;
 
   size_t start_pos = position - chunk.byte_pos;
@@ -554,7 +563,7 @@
 class TwoByteExternalStreamingStream : public Utf16CharacterStream {
  public:
   explicit TwoByteExternalStreamingStream(
-      ScriptCompiler::ExternalSourceStream* source);
+      ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats);
   ~TwoByteExternalStreamingStream() override;
 
  protected:
@@ -562,14 +571,16 @@
 
   Chunks chunks_;
   ScriptCompiler::ExternalSourceStream* source_;
+  RuntimeCallStats* stats_;
   uc16 one_char_buffer_;
 };
 
 TwoByteExternalStreamingStream::TwoByteExternalStreamingStream(
-    ScriptCompiler::ExternalSourceStream* source)
+    ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats)
     : Utf16CharacterStream(&one_char_buffer_, &one_char_buffer_,
                            &one_char_buffer_, 0),
       source_(source),
+      stats_(stats),
       one_char_buffer_(0) {}
 
 TwoByteExternalStreamingStream::~TwoByteExternalStreamingStream() {
@@ -581,7 +592,7 @@
 
   // We'll search for the 2nd byte of our character, to make sure we
   // have enough data for at least one character.
-  size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+  size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
 
   // Out of data? Return 0.
   if (chunks_[chunk_no].byte_length == 0) {
@@ -649,7 +660,7 @@
 class TwoByteExternalBufferedStream : public Utf16CharacterStream {
  public:
   explicit TwoByteExternalBufferedStream(
-      ScriptCompiler::ExternalSourceStream* source);
+      ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats);
   ~TwoByteExternalBufferedStream();
 
  protected:
@@ -667,11 +678,14 @@
 
   Chunks chunks_;
   ScriptCompiler::ExternalSourceStream* source_;
+  RuntimeCallStats* stats_;
 };
 
 TwoByteExternalBufferedStream::TwoByteExternalBufferedStream(
-    ScriptCompiler::ExternalSourceStream* source)
-    : Utf16CharacterStream(buffer_, buffer_, buffer_, 0), source_(source) {}
+    ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats)
+    : Utf16CharacterStream(buffer_, buffer_, buffer_, 0),
+      source_(source),
+      stats_(stats) {}
 
 TwoByteExternalBufferedStream::~TwoByteExternalBufferedStream() {
   DeleteChunks(chunks_);
@@ -680,7 +694,7 @@
 bool TwoByteExternalBufferedStream::ReadBlock() {
   size_t position = pos();
   // Find chunk in which the position belongs
-  size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+  size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
 
   // Out of data? Return 0.
   if (chunks_[chunk_no].byte_length == 0) {
@@ -726,7 +740,7 @@
   {
     size_t new_pos = position / kBufferSize * kBufferSize;
     if (new_pos != position) {
-      chunk_no = FindChunk(chunks_, source_, 2 * new_pos + 1);
+      chunk_no = FindChunk(chunks_, source_, 2 * new_pos + 1, stats_);
       buffer_pos_ = new_pos;
       buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
       position = new_pos;
@@ -768,7 +782,7 @@
   totalLength += bytes_to_move;
   position = (current->byte_pos + current->byte_length) / 2;
   if (position - buffer_pos_ < kBufferSize) {
-    chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+    chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
     current = &chunks_[chunk_no];
     odd_start = current->byte_pos % 2;
     bytes_to_move = i::Min(2 * kBufferSize - totalLength, current->byte_length);
@@ -781,7 +795,7 @@
                  current->data, bytes_to_move);
       totalLength += bytes_to_move;
       position = (current->byte_pos + current->byte_length) / 2;
-      chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+      chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
       current = &chunks_[chunk_no];
       odd_start = current->byte_pos % 2;
       bytes_to_move =
@@ -828,18 +842,19 @@
 
 Utf16CharacterStream* ScannerStream::For(
     ScriptCompiler::ExternalSourceStream* source_stream,
-    v8::ScriptCompiler::StreamedSource::Encoding encoding) {
+    v8::ScriptCompiler::StreamedSource::Encoding encoding,
+    RuntimeCallStats* stats) {
   switch (encoding) {
     case v8::ScriptCompiler::StreamedSource::TWO_BYTE:
 #if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
-      return new TwoByteExternalStreamingStream(source_stream);
+      return new TwoByteExternalStreamingStream(source_stream, stats);
 #else
-      return new TwoByteExternalBufferedStream(source_stream);
+      return new TwoByteExternalBufferedStream(source_stream, stats);
 #endif
     case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
-      return new OneByteExternalStreamingStream(source_stream);
+      return new OneByteExternalStreamingStream(source_stream, stats);
     case v8::ScriptCompiler::StreamedSource::UTF8:
-      return new Utf8ExternalStreamingStream(source_stream);
+      return new Utf8ExternalStreamingStream(source_stream, stats);
   }
   UNREACHABLE();
   return nullptr;
diff --git a/src/parsing/scanner-character-streams.h b/src/parsing/scanner-character-streams.h
index ac81613..291765c 100644
--- a/src/parsing/scanner-character-streams.h
+++ b/src/parsing/scanner-character-streams.h
@@ -6,12 +6,15 @@
 #define V8_PARSING_SCANNER_CHARACTER_STREAMS_H_
 
 #include "include/v8.h"  // for v8::ScriptCompiler
-#include "src/handles.h"
 
 namespace v8 {
 namespace internal {
 
+template <typename T>
+class Handle;
 class Utf16CharacterStream;
+class RuntimeCallStats;
+class String;
 
 class ScannerStream {
  public:
@@ -20,7 +23,8 @@
                                    int end_pos);
   static Utf16CharacterStream* For(
       ScriptCompiler::ExternalSourceStream* source_stream,
-      ScriptCompiler::StreamedSource::Encoding encoding);
+      ScriptCompiler::StreamedSource::Encoding encoding,
+      RuntimeCallStats* stats);
 
   // For testing:
   static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data);
diff --git a/src/parsing/scanner.cc b/src/parsing/scanner.cc
index 363ab7d..c1580bb 100644
--- a/src/parsing/scanner.cc
+++ b/src/parsing/scanner.cc
@@ -19,6 +19,46 @@
 namespace v8 {
 namespace internal {
 
+// Scoped helper for saving & restoring scanner error state.
+// This is used for tagged template literals, in which normally forbidden
+// escape sequences are allowed.
+class ErrorState {
+ public:
+  ErrorState(MessageTemplate::Template* message_stack,
+             Scanner::Location* location_stack)
+      : message_stack_(message_stack),
+        old_message_(*message_stack),
+        location_stack_(location_stack),
+        old_location_(*location_stack) {
+    *message_stack_ = MessageTemplate::kNone;
+    *location_stack_ = Scanner::Location::invalid();
+  }
+
+  ~ErrorState() {
+    *message_stack_ = old_message_;
+    *location_stack_ = old_location_;
+  }
+
+  void MoveErrorTo(MessageTemplate::Template* message_dest,
+                   Scanner::Location* location_dest) {
+    if (*message_stack_ == MessageTemplate::kNone) {
+      return;
+    }
+    if (*message_dest == MessageTemplate::kNone) {
+      *message_dest = *message_stack_;
+      *location_dest = *location_stack_;
+    }
+    *message_stack_ = MessageTemplate::kNone;
+    *location_stack_ = Scanner::Location::invalid();
+  }
+
+ private:
+  MessageTemplate::Template* const message_stack_;
+  MessageTemplate::Template const old_message_;
+  Scanner::Location* const location_stack_;
+  Scanner::Location const old_location_;
+};
+
 Handle<String> Scanner::LiteralBuffer::Internalize(Isolate* isolate) const {
   if (is_one_byte()) {
     return isolate->factory()->InternalizeOneByteString(one_byte_literal());
@@ -26,6 +66,68 @@
   return isolate->factory()->InternalizeTwoByteString(two_byte_literal());
 }
 
+int Scanner::LiteralBuffer::NewCapacity(int min_capacity) {
+  int capacity = Max(min_capacity, backing_store_.length());
+  int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
+  return new_capacity;
+}
+
+void Scanner::LiteralBuffer::ExpandBuffer() {
+  Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
+  MemCopy(new_store.start(), backing_store_.start(), position_);
+  backing_store_.Dispose();
+  backing_store_ = new_store;
+}
+
+void Scanner::LiteralBuffer::ConvertToTwoByte() {
+  DCHECK(is_one_byte_);
+  Vector<byte> new_store;
+  int new_content_size = position_ * kUC16Size;
+  if (new_content_size >= backing_store_.length()) {
+    // Ensure room for all currently read code units as UC16 as well
+    // as the code unit about to be stored.
+    new_store = Vector<byte>::New(NewCapacity(new_content_size));
+  } else {
+    new_store = backing_store_;
+  }
+  uint8_t* src = backing_store_.start();
+  uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
+  for (int i = position_ - 1; i >= 0; i--) {
+    dst[i] = src[i];
+  }
+  if (new_store.start() != backing_store_.start()) {
+    backing_store_.Dispose();
+    backing_store_ = new_store;
+  }
+  position_ = new_content_size;
+  is_one_byte_ = false;
+}
+
+void Scanner::LiteralBuffer::AddCharSlow(uc32 code_unit) {
+  if (position_ >= backing_store_.length()) ExpandBuffer();
+  if (is_one_byte_) {
+    if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
+      backing_store_[position_] = static_cast<byte>(code_unit);
+      position_ += kOneByteSize;
+      return;
+    }
+    ConvertToTwoByte();
+  }
+  if (code_unit <=
+      static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+    *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
+    position_ += kUC16Size;
+  } else {
+    *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
+        unibrow::Utf16::LeadSurrogate(code_unit);
+    position_ += kUC16Size;
+    if (position_ >= backing_store_.length()) ExpandBuffer();
+    *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
+        unibrow::Utf16::TrailSurrogate(code_unit);
+    position_ += kUC16Size;
+  }
+}
+
 // ----------------------------------------------------------------------------
 // Scanner::BookmarkScope
 
@@ -78,10 +180,8 @@
 Scanner::Scanner(UnicodeCache* unicode_cache)
     : unicode_cache_(unicode_cache),
       octal_pos_(Location::invalid()),
-      decimal_with_leading_zero_pos_(Location::invalid()),
-      found_html_comment_(false) {
-}
-
+      octal_message_(MessageTemplate::kNone),
+      found_html_comment_(false) {}
 
 void Scanner::Initialize(Utf16CharacterStream* source) {
   source_ = source;
@@ -888,16 +988,12 @@
       break;
   }
 
-  // According to ECMA-262, section 7.8.4, characters not covered by the
-  // above cases should be illegal, but they are commonly handled as
-  // non-escaped characters by JS VMs.
+  // Other escaped characters are interpreted as their non-escaped version.
   AddLiteralChar(c);
   return true;
 }
 
 
-// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
-// ECMA-262. Other JS VMs support them.
 template <bool capture_raw>
 uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
   uc32 x = c - '0';
@@ -917,6 +1013,7 @@
   // occur before the "use strict" directive.
   if (c != '0' || i > 0) {
     octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
+    octal_message_ = MessageTemplate::kStrictOctalEscape;
   }
   return x;
 }
@@ -978,6 +1075,12 @@
   // TEMPLATE_TAIL terminates a TemplateLiteral and does not need to be
   // followed by an Expression.
 
+  // These scoped helpers save and restore the original error state, so that we
+  // can specially treat invalid escape sequences in templates (which are
+  // handled by the parser).
+  ErrorState scanner_error_state(&scanner_error_, &scanner_error_location_);
+  ErrorState octal_error_state(&octal_message_, &octal_pos_);
+
   Token::Value result = Token::TEMPLATE_SPAN;
   LiteralScope literal(this);
   StartRawLiteral();
@@ -1008,8 +1111,16 @@
             AddRawLiteralChar('\n');
           }
         }
-      } else if (!ScanEscape<capture_raw, in_template_literal>()) {
-        return Token::ILLEGAL;
+      } else {
+        bool success = ScanEscape<capture_raw, in_template_literal>();
+        USE(success);
+        DCHECK_EQ(!success, has_error());
+        // For templates, invalid escape sequence checking is handled in the
+        // parser.
+        scanner_error_state.MoveErrorTo(&invalid_template_escape_message_,
+                                        &invalid_template_escape_location_);
+        octal_error_state.MoveErrorTo(&invalid_template_escape_message_,
+                                      &invalid_template_escape_location_);
       }
     } else if (c < 0) {
       // Unterminated template literal
@@ -1034,6 +1145,7 @@
   literal.Complete();
   next_.location.end_pos = source_pos();
   next_.token = result;
+
   return result;
 }
 
@@ -1130,6 +1242,7 @@
           if (c0_  < '0' || '7'  < c0_) {
             // Octal literal finished.
             octal_pos_ = Location(start_pos, source_pos());
+            octal_message_ = MessageTemplate::kStrictOctalLiteral;
             break;
           }
           AddLiteralCharAdvance();
@@ -1152,13 +1265,16 @@
         }
 
         if (next_.literal_chars->one_byte_literal().length() <= 10 &&
-            value <= Smi::kMaxValue && c0_ != '.' && c0_ != 'e' && c0_ != 'E') {
+            value <= Smi::kMaxValue && c0_ != '.' &&
+            (c0_ == kEndOfInput || !unicode_cache_->IsIdentifierStart(c0_))) {
           next_.smi_value_ = static_cast<uint32_t>(value);
           literal.Complete();
           HandleLeadSurrogate();
 
-          if (kind == DECIMAL_WITH_LEADING_ZERO)
-            decimal_with_leading_zero_pos_ = Location(start_pos, source_pos());
+          if (kind == DECIMAL_WITH_LEADING_ZERO) {
+            octal_pos_ = Location(start_pos, source_pos());
+            octal_message_ = MessageTemplate::kStrictDecimalWithLeadingZero;
+          }
           return Token::SMI;
         }
         HandleLeadSurrogate();
@@ -1198,8 +1314,10 @@
 
   literal.Complete();
 
-  if (kind == DECIMAL_WITH_LEADING_ZERO)
-    decimal_with_leading_zero_pos_ = Location(start_pos, source_pos());
+  if (kind == DECIMAL_WITH_LEADING_ZERO) {
+    octal_pos_ = Location(start_pos, source_pos());
+    octal_message_ = MessageTemplate::kStrictDecimalWithLeadingZero;
+  }
   return Token::NUMBER;
 }
 
@@ -1339,19 +1457,6 @@
 }
 
 
-bool Scanner::IdentifierIsFutureStrictReserved(
-    const AstRawString* string) const {
-  // Keywords are always 1-byte strings.
-  if (!string->is_one_byte()) return false;
-  if (string->IsOneByteEqualTo("let") || string->IsOneByteEqualTo("static") ||
-      string->IsOneByteEqualTo("yield")) {
-    return true;
-  }
-  return Token::FUTURE_STRICT_RESERVED_WORD ==
-         KeywordOrIdentifierToken(string->raw_data(), string->length());
-}
-
-
 Token::Value Scanner::ScanIdentifierOrKeyword() {
   DCHECK(unicode_cache_->IsIdentifierStart(c0_));
   LiteralScope literal(this);
@@ -1435,7 +1540,9 @@
     Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
     Token::Value token =
         KeywordOrIdentifierToken(chars.start(), chars.length());
-    if (token == Token::IDENTIFIER) literal.Complete();
+    if (token == Token::IDENTIFIER ||
+        token == Token::FUTURE_STRICT_RESERVED_WORD)
+      literal.Complete();
     return token;
   }
   literal.Complete();
@@ -1612,14 +1719,13 @@
   return std::find(str.begin(), str.end(), '.') != str.end();
 }
 
-
-int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
+bool Scanner::FindSymbol(DuplicateFinder* finder) {
   // TODO(vogelheim): Move this logic into the calling class; this can be fully
   //                  implemented using the public interface.
   if (is_literal_one_byte()) {
-    return finder->AddOneByteSymbol(literal_one_byte_string(), value);
+    return finder->AddOneByteSymbol(literal_one_byte_string());
   }
-  return finder->AddTwoByteSymbol(literal_two_byte_string(), value);
+  return finder->AddTwoByteSymbol(literal_two_byte_string());
 }
 
 void Scanner::SeekNext(size_t position) {
diff --git a/src/parsing/scanner.h b/src/parsing/scanner.h
index 6f6fab5..9885b8e 100644
--- a/src/parsing/scanner.h
+++ b/src/parsing/scanner.h
@@ -209,10 +209,27 @@
   // (the token last returned by Next()).
   Location location() const { return current_.location; }
 
+  // This error is specifically an invalid hex or unicode escape sequence.
   bool has_error() const { return scanner_error_ != MessageTemplate::kNone; }
   MessageTemplate::Template error() const { return scanner_error_; }
   Location error_location() const { return scanner_error_location_; }
 
+  bool has_invalid_template_escape() const {
+    return invalid_template_escape_message_ != MessageTemplate::kNone;
+  }
+  MessageTemplate::Template invalid_template_escape_message() const {
+    return invalid_template_escape_message_;
+  }
+  Location invalid_template_escape_location() const {
+    return invalid_template_escape_location_;
+  }
+
+  void clear_invalid_template_escape() {
+    DCHECK(has_invalid_template_escape());
+    invalid_template_escape_message_ = MessageTemplate::kNone;
+    invalid_template_escape_location_ = Location::invalid();
+  }
+
   // Similar functions for the upcoming token.
 
   // One token look-ahead (past the token returned by Next()).
@@ -268,20 +285,17 @@
     return false;
   }
 
-  int FindSymbol(DuplicateFinder* finder, int value);
+  bool FindSymbol(DuplicateFinder* finder);
 
   UnicodeCache* unicode_cache() { return unicode_cache_; }
 
   // Returns the location of the last seen octal literal.
   Location octal_position() const { return octal_pos_; }
-  void clear_octal_position() { octal_pos_ = Location::invalid(); }
-  // Returns the location of the last seen decimal literal with a leading zero.
-  Location decimal_with_leading_zero_position() const {
-    return decimal_with_leading_zero_pos_;
+  void clear_octal_position() {
+    octal_pos_ = Location::invalid();
+    octal_message_ = MessageTemplate::kNone;
   }
-  void clear_decimal_with_leading_zero_position() {
-    decimal_with_leading_zero_pos_ = Location::invalid();
-  }
+  MessageTemplate::Template octal_message() const { return octal_message_; }
 
   // Returns the value of the last smi that was scanned.
   uint32_t smi_value() const { return current_.smi_value_; }
@@ -328,8 +342,6 @@
     return tmp;
   }
 
-  bool IdentifierIsFutureStrictReserved(const AstRawString* string) const;
-
   bool FoundHtmlComment() const { return found_html_comment_; }
 
  private:
@@ -358,36 +370,16 @@
     ~LiteralBuffer() { backing_store_.Dispose(); }
 
     INLINE(void AddChar(char code_unit)) {
-      if (position_ >= backing_store_.length()) ExpandBuffer();
-      DCHECK(is_one_byte_);
       DCHECK(IsValidAscii(code_unit));
-      backing_store_[position_] = static_cast<byte>(code_unit);
-      position_ += kOneByteSize;
-      return;
+      AddOneByteChar(static_cast<byte>(code_unit));
     }
 
     INLINE(void AddChar(uc32 code_unit)) {
-      if (position_ >= backing_store_.length()) ExpandBuffer();
-      if (is_one_byte_) {
-        if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
-          backing_store_[position_] = static_cast<byte>(code_unit);
-          position_ += kOneByteSize;
-          return;
-        }
-        ConvertToTwoByte();
-      }
-      if (code_unit <=
-          static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
-        *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
-        position_ += kUC16Size;
+      if (is_one_byte_ &&
+          code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
+        AddOneByteChar(static_cast<byte>(code_unit));
       } else {
-        *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
-            unibrow::Utf16::LeadSurrogate(code_unit);
-        position_ += kUC16Size;
-        if (position_ >= backing_store_.length()) ExpandBuffer();
-        *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
-            unibrow::Utf16::TrailSurrogate(code_unit);
-        position_ += kUC16Size;
+        AddCharSlow(code_unit);
       }
     }
 
@@ -439,43 +431,18 @@
       return iscntrl(code_unit) || isprint(code_unit);
     }
 
-    inline int NewCapacity(int min_capacity) {
-      int capacity = Max(min_capacity, backing_store_.length());
-      int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
-      return new_capacity;
-    }
-
-    void ExpandBuffer() {
-      Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
-      MemCopy(new_store.start(), backing_store_.start(), position_);
-      backing_store_.Dispose();
-      backing_store_ = new_store;
-    }
-
-    void ConvertToTwoByte() {
+    INLINE(void AddOneByteChar(byte one_byte_char)) {
       DCHECK(is_one_byte_);
-      Vector<byte> new_store;
-      int new_content_size = position_ * kUC16Size;
-      if (new_content_size >= backing_store_.length()) {
-        // Ensure room for all currently read code units as UC16 as well
-        // as the code unit about to be stored.
-        new_store = Vector<byte>::New(NewCapacity(new_content_size));
-      } else {
-        new_store = backing_store_;
-      }
-      uint8_t* src = backing_store_.start();
-      uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
-      for (int i = position_ - 1; i >= 0; i--) {
-        dst[i] = src[i];
-      }
-      if (new_store.start() != backing_store_.start()) {
-        backing_store_.Dispose();
-        backing_store_ = new_store;
-      }
-      position_ = new_content_size;
-      is_one_byte_ = false;
+      if (position_ >= backing_store_.length()) ExpandBuffer();
+      backing_store_[position_] = one_byte_char;
+      position_ += kOneByteSize;
     }
 
+    void AddCharSlow(uc32 code_unit);
+    int NewCapacity(int min_capacity);
+    void ExpandBuffer();
+    void ConvertToTwoByte();
+
     bool is_one_byte_;
     int position_;
     Vector<byte> backing_store_;
@@ -516,6 +483,7 @@
     next_next_.raw_literal_chars = NULL;
     found_html_comment_ = false;
     scanner_error_ = MessageTemplate::kNone;
+    invalid_template_escape_message_ = MessageTemplate::kNone;
   }
 
   void ReportScannerError(const Location& location,
@@ -787,7 +755,7 @@
 
   // Last-seen positions of potentially problematic tokens.
   Location octal_pos_;
-  Location decimal_with_leading_zero_pos_;
+  MessageTemplate::Template octal_message_;
 
   // One Unicode character look-ahead; c0_ < 0 at the end of the input.
   uc32 c0_;
@@ -806,6 +774,9 @@
 
   MessageTemplate::Template scanner_error_;
   Location scanner_error_location_;
+
+  MessageTemplate::Template invalid_template_escape_message_;
+  Location invalid_template_escape_location_;
 };
 
 }  // namespace internal
diff --git a/src/pending-compilation-error-handler.cc b/src/pending-compilation-error-handler.cc
index 8f7660d..e2db6db 100644
--- a/src/pending-compilation-error-handler.cc
+++ b/src/pending-compilation-error-handler.cc
@@ -9,6 +9,7 @@
 #include "src/handles.h"
 #include "src/isolate.h"
 #include "src/messages.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -21,7 +22,6 @@
         ->NewStringFromUtf8(CStrVector(char_arg_))
         .ToHandleChecked();
   }
-  if (!handle_arg_.is_null()) return handle_arg_;
   return isolate->factory()->undefined_string();
 }
 
diff --git a/src/pending-compilation-error-handler.h b/src/pending-compilation-error-handler.h
index 563bef9..42c679e 100644
--- a/src/pending-compilation-error-handler.h
+++ b/src/pending-compilation-error-handler.h
@@ -58,20 +58,6 @@
     error_type_ = error_type;
   }
 
-  void ReportMessageAt(int start_position, int end_position,
-                       MessageTemplate::Template message, Handle<String> arg,
-                       ParseErrorType error_type = kSyntaxError) {
-    if (has_pending_error_) return;
-    has_pending_error_ = true;
-    start_position_ = start_position;
-    end_position_ = end_position;
-    message_ = message;
-    char_arg_ = nullptr;
-    arg_ = nullptr;
-    handle_arg_ = arg;
-    error_type_ = error_type;
-  }
-
   bool has_pending_error() const { return has_pending_error_; }
 
   void ThrowPendingError(Isolate* isolate, Handle<Script> script);
@@ -86,7 +72,6 @@
   MessageTemplate::Template message_;
   const AstRawString* arg_;
   const char* char_arg_;
-  Handle<String> handle_arg_;
   ParseErrorType error_type_;
 
   DISALLOW_COPY_AND_ASSIGN(PendingCompilationErrorHandler);
diff --git a/src/perf-jit.cc b/src/perf-jit.cc
index 6641a12..907a4cd 100644
--- a/src/perf-jit.cc
+++ b/src/perf-jit.cc
@@ -212,7 +212,7 @@
   DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
 
   // Debug info has to be emitted first.
-  if (FLAG_perf_prof_debug_info && shared != nullptr) {
+  if (FLAG_perf_prof && shared != nullptr) {
     LogWriteDebugInfo(code, shared);
   }
 
@@ -246,6 +246,38 @@
   LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
 }
 
+namespace {
+
+std::unique_ptr<char[]> GetScriptName(Handle<Script> script) {
+  Object* name_or_url = script->GetNameOrSourceURL();
+  int name_length = 0;
+  std::unique_ptr<char[]> name_string;
+  if (name_or_url->IsString()) {
+    return String::cast(name_or_url)
+        ->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &name_length);
+  } else {
+    const char unknown[] = "<unknown>";
+    name_length = static_cast<int>(strlen(unknown));
+    char* buffer = NewArray<char>(name_length);
+    base::OS::StrNCpy(buffer, name_length + 1, unknown,
+                      static_cast<size_t>(name_length));
+    return std::unique_ptr<char[]>(buffer);
+  }
+}
+
+SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
+                                         Handle<SharedFunctionInfo> function,
+                                         SourcePosition pos) {
+  if (code->is_turbofanned() || code->is_crankshafted()) {
+    DisallowHeapAllocation disallow;
+    return pos.InliningStack(code)[0];
+  } else {
+    return SourcePositionInfo(pos, function);
+  }
+}
+
+}  // namespace
+
 void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
   // Compute the entry count and get the name of the script.
   uint32_t entry_count = 0;
@@ -255,24 +287,6 @@
   }
   if (entry_count == 0) return;
   Handle<Script> script(Script::cast(shared->script()));
-  Handle<Object> name_or_url(Script::GetNameOrSourceURL(script));
-
-  int name_length = 0;
-  std::unique_ptr<char[]> name_string;
-  if (name_or_url->IsString()) {
-    name_string =
-        Handle<String>::cast(name_or_url)
-            ->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &name_length);
-    DCHECK_EQ(0, name_string.get()[name_length]);
-  } else {
-    const char unknown[] = "<unknown>";
-    name_length = static_cast<int>(strlen(unknown));
-    char* buffer = NewArray<char>(name_length);
-    base::OS::StrNCpy(buffer, name_length + 1, unknown,
-                      static_cast<size_t>(name_length));
-    name_string = std::unique_ptr<char[]>(buffer);
-  }
-  DCHECK_EQ(name_length, static_cast<int>(strlen(name_string.get())));
 
   PerfJitCodeDebugInfo debug_info;
 
@@ -284,42 +298,44 @@
   uint32_t size = sizeof(debug_info);
   // Add the sizes of fixed parts of entries.
   size += entry_count * sizeof(PerfJitDebugEntry);
-  // Add the size of the name after the first entry.
-  size += (static_cast<uint32_t>(name_length) + 1) * entry_count;
+  // Add the size of the name after each entry.
+
+  Handle<Code> code_handle(code);
+  Handle<SharedFunctionInfo> function_handle(shared);
+  for (SourcePositionTableIterator iterator(code->source_position_table());
+       !iterator.done(); iterator.Advance()) {
+    SourcePositionInfo info(GetSourcePositionInfo(code_handle, function_handle,
+                                                  iterator.source_position()));
+    Handle<Script> script(Script::cast(info.function->script()));
+    std::unique_ptr<char[]> name_string = GetScriptName(script);
+    size += (static_cast<uint32_t>(strlen(name_string.get())) + 1);
+  }
 
   int padding = ((size + 7) & (~7)) - size;
-
   debug_info.size_ = size + padding;
-
   LogWriteBytes(reinterpret_cast<const char*>(&debug_info), sizeof(debug_info));
 
-  int script_line_offset = script->line_offset();
-  Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
   Address code_start = code->instruction_start();
 
   for (SourcePositionTableIterator iterator(code->source_position_table());
        !iterator.done(); iterator.Advance()) {
-    int position = iterator.source_position().ScriptOffset();
-    int line_number = Script::GetLineNumber(script, position);
-    // Compute column.
-    int relative_line_number = line_number - script_line_offset;
-    int start =
-        (relative_line_number == 0)
-            ? 0
-            : Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
-    int column_offset = position - start;
-    if (relative_line_number == 0) {
-      // For the case where the code is on the same line as the script tag.
-      column_offset += script->column_offset();
-    }
-
+    SourcePositionInfo info(GetSourcePositionInfo(code_handle, function_handle,
+                                                  iterator.source_position()));
     PerfJitDebugEntry entry;
+    // TODO(danno): There seems to be a bug in the dwarf handling of JIT code in
+    // the perf tool. It seems to erroneously believe that the first instruction
+    // of functions is at offset 0x40 when displayed in "perf report". To
+    // compensate for this, add a magic constant to the position addresses when
+    // writing them out.
     entry.address_ =
-        reinterpret_cast<uint64_t>(code_start + iterator.code_offset());
-    entry.line_number_ = line_number;
-    entry.column_ = column_offset;
+        reinterpret_cast<intptr_t>(code_start + iterator.code_offset() + 0x40);
+    entry.line_number_ = info.line + 1;
+    entry.column_ = info.column + 1;
     LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
-    LogWriteBytes(name_string.get(), name_length + 1);
+    Handle<Script> script(Script::cast(info.function->script()));
+    std::unique_ptr<char[]> name_string = GetScriptName(script);
+    LogWriteBytes(name_string.get(),
+                  static_cast<uint32_t>(strlen(name_string.get())) + 1);
   }
   char padding_bytes[] = "\0\0\0\0\0\0\0\0";
   LogWriteBytes(padding_bytes, padding);
diff --git a/src/ppc/assembler-ppc-inl.h b/src/ppc/assembler-ppc-inl.h
index 12201da..216650c 100644
--- a/src/ppc/assembler-ppc-inl.h
+++ b/src/ppc/assembler-ppc-inl.h
@@ -41,7 +41,7 @@
 
 #include "src/assembler.h"
 #include "src/debug/debug.h"
-
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -131,6 +131,17 @@
 
 int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
 
+Address Assembler::target_address_at(Address pc, Code* code) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
 
 Address Assembler::target_address_from_return_address(Address pc) {
 // Returns the address of the call target from the return address that will
@@ -466,9 +477,9 @@
 
 
 #if V8_TARGET_ARCH_PPC64
-const int kLoadIntptrOpcode = LD;
+const uint32_t kLoadIntptrOpcode = LD;
 #else
-const int kLoadIntptrOpcode = LWZ;
+const uint32_t kLoadIntptrOpcode = LWZ;
 #endif
 
 // Constant pool load sequence detection:
@@ -481,7 +492,7 @@
 bool Assembler::IsConstantPoolLoadStart(Address pc,
                                         ConstantPoolEntry::Access* access) {
   Instr instr = instr_at(pc);
-  int opcode = instr & kOpcodeMask;
+  uint32_t opcode = instr & kOpcodeMask;
   if (!GetRA(instr).is(kConstantPoolRegister)) return false;
   bool overflowed = (opcode == ADDIS);
 #ifdef DEBUG
@@ -501,7 +512,7 @@
 bool Assembler::IsConstantPoolLoadEnd(Address pc,
                                       ConstantPoolEntry::Access* access) {
   Instr instr = instr_at(pc);
-  int opcode = instr & kOpcodeMask;
+  uint32_t opcode = instr & kOpcodeMask;
   bool overflowed = false;
   if (!(opcode == kLoadIntptrOpcode || opcode == LFD)) return false;
   if (!GetRA(instr).is(kConstantPoolRegister)) {
diff --git a/src/ppc/assembler-ppc.cc b/src/ppc/assembler-ppc.cc
index 08a8005..645561d 100644
--- a/src/ppc/assembler-ppc.cc
+++ b/src/ppc/assembler-ppc.cc
@@ -66,6 +66,9 @@
 #ifndef USE_SIMULATOR
   // Probe for additional features at runtime.
   base::CPU cpu;
+  if (cpu.part() == base::CPU::PPC_POWER9) {
+    supported_ |= (1u << MODULO);
+  }
 #if V8_TARGET_ARCH_PPC64
   if (cpu.part() == base::CPU::PPC_POWER8) {
     supported_ |= (1u << FPR_GPR_MOV);
@@ -79,6 +82,7 @@
   if (cpu.part() == base::CPU::PPC_POWER7 ||
       cpu.part() == base::CPU::PPC_POWER8) {
     supported_ |= (1u << ISELECT);
+    supported_ |= (1u << VSX);
   }
 #if V8_OS_LINUX
   if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
@@ -96,6 +100,8 @@
   supported_ |= (1u << FPU);
   supported_ |= (1u << LWSYNC);
   supported_ |= (1u << ISELECT);
+  supported_ |= (1u << VSX);
+  supported_ |= (1u << MODULO);
 #if V8_TARGET_ARCH_PPC64
   supported_ |= (1u << FPR_GPR_MOV);
 #endif
@@ -171,14 +177,19 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  return static_cast<uint32_t>(
+      reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+}
 
 void RelocInfo::unchecked_update_wasm_memory_reference(
     Address address, ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
 }
 
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
-                                                  ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+                                           ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_,
                                    reinterpret_cast<Address>(size), flush_mode);
 }
@@ -344,7 +355,7 @@
 
 bool Assembler::IsCmpRegister(Instr instr) {
   return (((instr & kOpcodeMask) == EXT2) &&
-          ((instr & kExt2OpcodeMask) == CMP));
+          ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
 }
 
 
@@ -359,7 +370,7 @@
 #if V8_TARGET_ARCH_PPC64
 bool Assembler::IsRldicl(Instr instr) {
   return (((instr & kOpcodeMask) == EXT5) &&
-          ((instr & kExt5OpcodeMask) == RLDICL));
+          ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
 }
 #endif
 
@@ -371,7 +382,7 @@
 
 bool Assembler::IsCrSet(Instr instr) {
   return (((instr & kOpcodeMask) == EXT1) &&
-          ((instr & kExt1OpcodeMask) == CREQV));
+          ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
 }
 
 
@@ -414,7 +425,7 @@
 int Assembler::target_at(int pos) {
   Instr instr = instr_at(pos);
   // check which type of branch this is 16 or 26 bit offset
-  int opcode = instr & kOpcodeMask;
+  uint32_t opcode = instr & kOpcodeMask;
   int link;
   switch (opcode) {
     case BX:
@@ -444,7 +455,7 @@
 
 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
   Instr instr = instr_at(pos);
-  int opcode = instr & kOpcodeMask;
+  uint32_t opcode = instr & kOpcodeMask;
 
   if (is_branch != nullptr) {
     *is_branch = (opcode == BX || opcode == BCX);
@@ -524,7 +535,7 @@
 
 int Assembler::max_reach_from(int pos) {
   Instr instr = instr_at(pos);
-  int opcode = instr & kOpcodeMask;
+  uint32_t opcode = instr & kOpcodeMask;
 
   // check which type of branch this is 16 or 26 bit offset
   switch (opcode) {
@@ -635,12 +646,19 @@
   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
 }
 
-
 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
                         OEBit o, RCBit r) {
   emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
 }
 
+void Assembler::xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
+                         DoubleRegister b) {
+  int AX = ((a.code() & 0x20) >> 5) & 0x1;
+  int BX = ((b.code() & 0x20) >> 5) & 0x1;
+  int TX = ((t.code() & 0x20) >> 5) & 0x1;
+  emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 | (b.code()
+       & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
+}
 
 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
                         int maskbit, RCBit r) {
@@ -936,6 +954,13 @@
   xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
 }
 
+void Assembler::modsw(Register rt, Register ra, Register rb) {
+  x_form(EXT2 | MODSW, ra, rt, rb, LeaveRC);
+}
+
+void Assembler::moduw(Register rt, Register ra, Register rb) {
+  x_form(EXT2 | MODUW, ra, rt, rb, LeaveRC);
+}
 
 void Assembler::addi(Register dst, Register src, const Operand& imm) {
   DCHECK(!src.is(r0));  // use li instead to show intent
@@ -1540,6 +1565,14 @@
                       RCBit r) {
   xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
 }
+
+void Assembler::modsd(Register rt, Register ra, Register rb) {
+  x_form(EXT2 | MODSD, ra, rt, rb, LeaveRC);
+}
+
+void Assembler::modud(Register rt, Register ra, Register rb) {
+  x_form(EXT2 | MODUD, ra, rt, rb, LeaveRC);
+}
 #endif
 
 
@@ -2217,13 +2250,13 @@
 
 void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
                         RCBit rc) {
-  emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
+  emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
 }
 
 
 void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
                        RCBit rc) {
-  emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
+  emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
 }
 
 
@@ -2322,6 +2355,24 @@
        frc.code() * B6 | rc);
 }
 
+// Support for VSX instructions
+
+void Assembler::xsadddp(const DoubleRegister frt, const DoubleRegister fra,
+                        const DoubleRegister frb) {
+  xx3_form(EXT6 | XSADDDP, frt, fra, frb);
+}
+void Assembler::xssubdp(const DoubleRegister frt, const DoubleRegister fra,
+                        const DoubleRegister frb) {
+  xx3_form(EXT6 | XSSUBDP, frt, fra, frb);
+}
+void Assembler::xsdivdp(const DoubleRegister frt, const DoubleRegister fra,
+                        const DoubleRegister frb) {
+  xx3_form(EXT6 | XSDIVDP, frt, fra, frb);
+}
+void Assembler::xsmuldp(const DoubleRegister frt, const DoubleRegister fra,
+                        const DoubleRegister frb) {
+  xx3_form(EXT6 | XSMULDP, frt, fra, frb);
+}
 
 // Pseudo instructions.
 void Assembler::nop(int type) {
diff --git a/src/ppc/assembler-ppc.h b/src/ppc/assembler-ppc.h
index f49ac63..810b42f 100644
--- a/src/ppc/assembler-ppc.h
+++ b/src/ppc/assembler-ppc.h
@@ -206,6 +206,7 @@
 const Register cp = r30;                     // JavaScript context pointer.
 
 static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
 
 // Double word FP register.
 struct DoubleRegister {
@@ -469,17 +470,10 @@
   INLINE(static void set_target_address_at(
       Isolate* isolate, Address pc, Address constant_pool, Address target,
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
-  INLINE(static Address target_address_at(Address pc, Code* code)) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    return target_address_at(pc, constant_pool);
-  }
+  INLINE(static Address target_address_at(Address pc, Code* code));
   INLINE(static void set_target_address_at(
       Isolate* isolate, Address pc, Code* code, Address target,
-      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    set_target_address_at(isolate, pc, constant_pool, target,
-                          icache_flush_mode);
-  }
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
 
   // Return the code target address at a call site from the return address
   // of that call in the instruction stream.
@@ -837,6 +831,8 @@
             RCBit r = LeaveRC);
   void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
              RCBit r = LeaveRC);
+  void modsw(Register rt, Register ra, Register rb);
+  void moduw(Register rt, Register ra, Register rb);
 
   void addi(Register dst, Register src, const Operand& imm);
   void addis(Register dst, Register src, const Operand& imm);
@@ -932,6 +928,8 @@
             RCBit r = LeaveRC);
   void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
              RCBit r = LeaveRC);
+  void modsd(Register rt, Register ra, Register rb);
+  void modud(Register rt, Register ra, Register rb);
 #endif
 
   void rlwinm(Register ra, Register rs, int sh, int mb, int me,
@@ -1104,6 +1102,17 @@
              const DoubleRegister frc, const DoubleRegister frb,
              RCBit rc = LeaveRC);
 
+  // Support for VSX instructions
+
+  void xsadddp(const DoubleRegister frt, const DoubleRegister fra,
+               const DoubleRegister frb);
+  void xssubdp(const DoubleRegister frt, const DoubleRegister fra,
+               const DoubleRegister frb);
+  void xsdivdp(const DoubleRegister frt, const DoubleRegister fra,
+               const DoubleRegister frb);
+  void xsmuldp(const DoubleRegister frt, const DoubleRegister fra,
+               const DoubleRegister frc);
+
   // Pseudo instructions
 
   // Different nop operations are used by the code generator to detect certain
@@ -1188,9 +1197,6 @@
 
   // Debugging
 
-  // Mark generator continuation.
-  void RecordGeneratorContinuation();
-
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
@@ -1409,6 +1415,8 @@
   void x_form(Instr instr, Register ra, Register rs, Register rb, RCBit r);
   void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o,
                RCBit r);
+  void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
+                DoubleRegister b);
   void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit,
                RCBit r);
   void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit,
diff --git a/src/ppc/code-stubs-ppc.cc b/src/ppc/code-stubs-ppc.cc
index a48fc06..389cba2 100644
--- a/src/ppc/code-stubs-ppc.cc
+++ b/src/ppc/code-stubs-ppc.cc
@@ -32,17 +32,6 @@
   __ TailCallRuntime(Runtime::kNewArray);
 }
 
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
-  descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
-  descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cond);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
@@ -216,9 +205,6 @@
     // Call runtime on identical symbols since we need to throw a TypeError.
     __ cmpi(r7, Operand(SYMBOL_TYPE));
     __ beq(slow);
-    // Call runtime on identical SIMD values since we must throw a TypeError.
-    __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
-    __ beq(slow);
   } else {
     __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
     __ beq(&heap_number);
@@ -229,9 +215,6 @@
       // Call runtime on identical symbols since we need to throw a TypeError.
       __ cmpi(r7, Operand(SYMBOL_TYPE));
       __ beq(slow);
-      // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ cmpi(r7, Operand(SIMD128_VALUE_TYPE));
-      __ beq(slow);
       // Normally here we fall through to return_equal, but undefined is
       // special: (undefined == undefined) == true, but
       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -664,8 +647,11 @@
   if (cc == eq) {
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ Push(lhs, rhs);
-      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+      __ Push(cp);
+      __ Call(strict() ? isolate()->builtins()->StrictEqual()
+                       : isolate()->builtins()->Equal(),
+              RelocInfo::CODE_TARGET);
+      __ Pop(cp);
     }
     // Turn true into 0 and false into some non-zero value.
     STATIC_ASSERT(EQUAL == 0);
@@ -879,7 +865,6 @@
   SaveFPRegsMode mode = kSaveFPRegs;
   CEntryStub(isolate, 1, mode).GetCode();
   StoreBufferOverflowStub(isolate, mode).GetCode();
-  isolate->set_fp_stubs_generated(true);
 }
 
 
@@ -1115,8 +1100,8 @@
     __ li(kConstantPoolRegister, Operand::Zero());
     __ push(kConstantPoolRegister);
   }
-  int marker = type();
-  __ LoadSmiLiteral(r0, Smi::FromInt(marker));
+  StackFrame::Type marker = type();
+  __ mov(r0, Operand(StackFrame::TypeToMarker(marker)));
   __ push(r0);
   __ push(r0);
   // Save copies of the top frame descriptor on the stack.
@@ -1135,11 +1120,11 @@
   __ cmpi(r9, Operand::Zero());
   __ bne(&non_outermost_js);
   __ StoreP(fp, MemOperand(r8));
-  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+  __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   Label cont;
   __ b(&cont);
   __ bind(&non_outermost_js);
-  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+  __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
   __ bind(&cont);
   __ push(ip);  // frame-type
 
@@ -1202,7 +1187,7 @@
   // Check if the current stack frame is marked as the outermost JS frame.
   Label non_outermost_js_2;
   __ pop(r8);
-  __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
+  __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   __ bne(&non_outermost_js_2);
   __ mov(r9, Operand::Zero());
   __ mov(r8, Operand(ExternalReference(js_entry_sp)));
@@ -1229,55 +1214,6 @@
   __ blr();
 }
 
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  // Ensure that the vector and slot registers won't be clobbered before
-  // calling the miss handler.
-  DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::VectorRegister(),
-                     LoadWithVectorDescriptor::SlotRegister()));
-
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7,
-                                                          r8, &miss);
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
-  // Return address is in lr.
-  Label miss;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register index = LoadDescriptor::NameRegister();
-  Register scratch = r8;
-  Register result = r3;
-  DCHECK(!scratch.is(receiver) && !scratch.is(index));
-  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
-         result.is(LoadWithVectorDescriptor::SlotRegister()));
-
-  // StringCharAtGenerator doesn't use the result register until it's passed
-  // the different miss possibilities. If it did, we would have a conflict
-  // when FLAG_vector_ics is true.
-  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          RECEIVER_IS_STRING);
-  char_at_generator.GenerateFast(masm);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
 // Just jump directly to runtime if native RegExp is not selected at compile
 // time or if regexp entry in generated code is turned off runtime switch or
@@ -1382,7 +1318,7 @@
   // (6) External string.  Make it, offset-wise, look like a sequential string.
   //     Go to (4).
   // (7) Short external string or not a string?  If yes, bail out to runtime.
-  // (8) Sliced string.  Replace subject with parent.  Go to (1).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
 
   Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
       not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
@@ -1394,7 +1330,7 @@
   // (1) Sequential string?  If yes, go to (4).
 
   STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
-                 kShortExternalStringMask) == 0x93);
+                 kShortExternalStringMask) == 0xa7);
   __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
                           kShortExternalStringMask));
   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
@@ -1403,6 +1339,7 @@
   // (2) Sequential or cons? If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   STATIC_ASSERT(kExternalStringTag < 0xffffu);
@@ -1431,9 +1368,9 @@
   __ ble(&runtime);
   __ SmiUntag(r4);
 
-  STATIC_ASSERT(4 == kOneByteStringTag);
+  STATIC_ASSERT(8 == kOneByteStringTag);
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  STATIC_ASSERT(kStringEncodingMask == 4);
+  STATIC_ASSERT(kStringEncodingMask == 8);
   __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC);
   __ beq(&encoding_type_UC16, cr0);
   __ LoadP(code,
@@ -1679,12 +1616,19 @@
   __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
   __ bne(&runtime, cr0);
 
-  // (8) Sliced string.  Replace subject with parent.  Go to (4).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
+  Label thin_string;
+  __ cmpi(r4, Operand(kThinStringTag));
+  __ beq(&thin_string);
   // Load offset into r11 and replace subject string with parent.
   __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ SmiUntag(r11);
   __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   __ b(&check_underlying);  // Go to (4).
+
+  __ bind(&thin_string);
+  __ LoadP(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+  __ b(&check_underlying);  // Go to (4).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -1719,9 +1663,9 @@
   // r6 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
 
-  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
-  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
   const int count_offset = FixedArray::kHeaderSize + kPointerSize;
@@ -1734,7 +1678,7 @@
   // A monomorphic cache hit or an already megamorphic state: invoke the
   // function without changing the state.
   // We don't know if r8 is a WeakCell or a Symbol, but it's harmless to read at
-  // this position in a symbol (see static asserts in type-feedback-vector.h).
+  // this position in a symbol (see static asserts in feedback-vector.h).
   Label check_allocation_site;
   Register feedback_map = r9;
   Register weak_value = r10;
@@ -1860,190 +1804,6 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
-                               Register slot, Register temp) {
-  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
-  __ SmiToPtrArrayOffset(temp, slot);
-  __ add(feedback_vector, feedback_vector, temp);
-  __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
-  __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
-  __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
-  // r3 - number of arguments
-  // r4 - function
-  // r6 - slot id
-  // r5 - vector
-  // r7 - allocation site (loaded from vector[slot])
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
-  __ cmp(r4, r8);
-  __ bne(miss);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, r5, r6, r0);
-
-  __ mr(r5, r7);
-  __ mr(r6, r4);
-  ArrayConstructorStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
-  // r3 - number of arguments
-  // r4 - function
-  // r6 - slot id (Smi)
-  // r5 - vector
-  Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
-  // The checks. First, does r4 match the recorded monomorphic target?
-  __ SmiToPtrArrayOffset(r9, r6);
-  __ add(r9, r5, r9);
-  __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize));
-
-  // We don't know that we have a weak cell. We might have a private symbol
-  // or an AllocationSite, but the memory is safe to examine.
-  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
-  // FixedArray.
-  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
-  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
-  // computed, meaning that it can't appear to be a pointer. If the low bit is
-  // 0, then hash is computed, but the 0 bit prevents the field from appearing
-  // to be a pointer.
-  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
-  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
-                    WeakCell::kValueOffset &&
-                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
-  __ LoadP(r8, FieldMemOperand(r7, WeakCell::kValueOffset));
-  __ cmp(r4, r8);
-  __ bne(&extra_checks_or_miss);
-
-  // The compare above could have been a SMI/SMI comparison. Guard against this
-  // convincing us that we have a monomorphic JSFunction.
-  __ JumpIfSmi(r4, &extra_checks_or_miss);
-
-  __ bind(&call_function);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, r5, r6, r0);
-
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
-                                                    tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&extra_checks_or_miss);
-  Label uninitialized, miss, not_allocation_site;
-
-  __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
-  __ beq(&call);
-
-  // Verify that r7 contains an AllocationSite
-  __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
-  __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
-  __ bne(&not_allocation_site);
-
-  // We have an allocation site.
-  HandleArrayCase(masm, &miss);
-
-  __ bind(&not_allocation_site);
-
-  // The following cases attempt to handle MISS cases without going to the
-  // runtime.
-  if (FLAG_trace_ic) {
-    __ b(&miss);
-  }
-
-  __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
-  __ beq(&uninitialized);
-
-  // We are going megamorphic. If the feedback is a JSFunction, it is fine
-  // to handle it here. More complex cases are dealt with in the runtime.
-  __ AssertNotSmi(r7);
-  __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
-  __ bne(&miss);
-  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
-  __ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
-
-  __ bind(&call);
-
-  // Increment the call count for megamorphic function calls.
-  IncrementCallCount(masm, r5, r6, r0);
-
-  __ bind(&call_count_incremented);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&uninitialized);
-
-  // We are going monomorphic, provided we actually have a JSFunction.
-  __ JumpIfSmi(r4, &miss);
-
-  // Goto miss case if we do not have a function.
-  __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
-  __ bne(&miss);
-
-  // Make sure the function is not the Array() function, which requires special
-  // behavior on MISS.
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
-  __ cmp(r4, r7);
-  __ beq(&miss);
-
-  // Make sure the function belongs to the same native context.
-  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset));
-  __ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX));
-  __ LoadP(ip, NativeContextMemOperand());
-  __ cmp(r7, ip);
-  __ bne(&miss);
-
-  // Store the function. Use a stub since we need a frame for allocation.
-  // r5 - vector
-  // r6 - slot
-  // r4 - function
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    CreateWeakCellStub create_stub(masm->isolate());
-    __ SmiTag(r3);
-    __ Push(r3, r5, r6, cp, r4);
-    __ CallStub(&create_stub);
-    __ Pop(r5, r6, cp, r4);
-    __ Pop(r3);
-    __ SmiUntag(r3);
-  }
-
-  __ b(&call_function);
-
-  // We are here because tracing is on or we encountered a MISS case we can't
-  // handle here.
-  __ bind(&miss);
-  GenerateMiss(masm);
-
-  __ b(&call_count_incremented);
-}
-
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
-  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
-  // Preserve the number of arguments as Smi.
-  __ SmiTag(r3);
-
-  // Push the receiver and the function and feedback info.
-  __ Push(r3, r4, r5, r6);
-
-  // Call the entry.
-  __ CallRuntime(Runtime::kCallIC_Miss);
-
-  // Move result to r4 and exit the internal frame.
-  __ mr(r4, r3);
-
-  // Restore number of arguments.
-  __ Pop(r3);
-  __ SmiUntag(r3);
-}
-
 
 // StringCharCodeAtGenerator
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
@@ -2130,85 +1890,6 @@
   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
 }
 
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
-  __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
-  __ ori(r0, r0, Operand(kSmiTagMask));
-  __ and_(r0, code_, r0, SetRC);
-  __ bne(&slow_case_, cr0);
-
-  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged one-byte char code.
-  __ mr(r0, code_);
-  __ SmiToPtrArrayOffset(code_, code_);
-  __ add(result_, result_, code_);
-  __ mr(code_, r0);
-  __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
-  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
-  __ beq(&slow_case_);
-  __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
-  __ bind(&slow_case_);
-  call_helper.BeforeCall(masm);
-  __ push(code_);
-  __ CallRuntime(Runtime::kStringCharFromCode);
-  __ Move(result_, r3);
-  call_helper.AfterCall(masm);
-  __ b(&exit_);
-
-  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
-                                          Register src, Register count,
-                                          Register scratch,
-                                          String::Encoding encoding) {
-  if (FLAG_debug_code) {
-    // Check that destination is word aligned.
-    __ andi(r0, dest, Operand(kPointerAlignmentMask));
-    __ Check(eq, kDestinationOfCopyNotAligned, cr0);
-  }
-
-  // Nothing to do for zero characters.
-  Label done;
-  if (encoding == String::TWO_BYTE_ENCODING) {
-    // double the length
-    __ add(count, count, count, LeaveOE, SetRC);
-    __ beq(&done, cr0);
-  } else {
-    __ cmpi(count, Operand::Zero());
-    __ beq(&done);
-  }
-
-  // Copy count bytes from src to dst.
-  Label byte_loop;
-  __ mtctr(count);
-  __ bind(&byte_loop);
-  __ lbz(scratch, MemOperand(src));
-  __ addi(src, src, Operand(1));
-  __ stb(scratch, MemOperand(dest));
-  __ addi(dest, dest, Operand(1));
-  __ bdnz(&byte_loop);
-
-  __ bind(&done);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -2842,84 +2523,6 @@
   __ bne(miss);
 }
 
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(
-    MacroAssembler* masm, Label* miss, Label* done, Register elements,
-    Register name, Register scratch1, Register scratch2) {
-  DCHECK(!elements.is(scratch1));
-  DCHECK(!elements.is(scratch2));
-  DCHECK(!name.is(scratch1));
-  DCHECK(!name.is(scratch2));
-
-  __ AssertName(name);
-
-  // Compute the capacity mask.
-  __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
-  __ SmiUntag(scratch1);  // convert smi to int
-  __ subi(scratch1, scratch1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ lwz(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
-    if (i > 0) {
-      // Add the probe offset (i + i * i) left shifted to avoid right shifting
-      // the hash in a separate instruction. The value hash + i + i * i is right
-      // shifted in the following and instruction.
-      DCHECK(NameDictionary::GetProbeOffset(i) <
-             1 << (32 - Name::kHashFieldOffset));
-      __ addi(scratch2, scratch2,
-              Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
-    }
-    __ srwi(scratch2, scratch2, Operand(Name::kHashShift));
-    __ and_(scratch2, scratch1, scratch2);
-
-    // Scale the index by multiplying by the entry size.
-    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    // scratch2 = scratch2 * 3.
-    __ ShiftLeftImm(ip, scratch2, Operand(1));
-    __ add(scratch2, scratch2, ip);
-
-    // Check if the key is identical to the name.
-    __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2));
-    __ add(scratch2, elements, ip);
-    __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
-    __ cmp(name, ip);
-    __ beq(done);
-  }
-
-  const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
-                          r5.bit() | r4.bit() | r3.bit()) &
-                         ~(scratch1.bit() | scratch2.bit());
-
-  __ mflr(r0);
-  __ MultiPush(spill_mask);
-  if (name.is(r3)) {
-    DCHECK(!elements.is(r4));
-    __ mr(r4, name);
-    __ mr(r3, elements);
-  } else {
-    __ mr(r3, elements);
-    __ mr(r4, name);
-  }
-  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
-  __ CallStub(&stub);
-  __ cmpi(r3, Operand::Zero());
-  __ mr(scratch2, r5);
-  __ MultiPop(spill_mask);
-  __ mtlr(r0);
-
-  __ bne(done);
-  __ beq(miss);
-}
-
-
 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   // we cannot call anything that could cause a GC from this stub.
@@ -3196,252 +2799,6 @@
   __ Ret();
 }
 
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(r5);
-  CallICStub stub(isolate(), state());
-  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
-                             Register receiver_map, Register scratch1,
-                             Register scratch2, bool is_polymorphic,
-                             Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-
-  Register cached_map = scratch1;
-
-  __ LoadP(cached_map,
-           FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-  __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ cmp(receiver_map, cached_map);
-  __ bne(&start_polymorphic);
-  // found, now call handler.
-  Register handler = feedback;
-  __ LoadP(handler,
-           FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-
-
-  Register length = scratch2;
-  __ bind(&start_polymorphic);
-  __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-  if (!is_polymorphic) {
-    // If the IC could be monomorphic we have to make sure we don't go past the
-    // end of the feedback array.
-    __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
-    __ beq(miss);
-  }
-
-  Register too_far = length;
-  Register pointer_reg = feedback;
-
-  // +-----+------+------+-----+-----+ ... ----+
-  // | map | len  | wm0  | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ... ----+
-  //                 0      1     2        len-1
-  //                              ^              ^
-  //                              |              |
-  //                         pointer_reg      too_far
-  //                         aka feedback     scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ SmiToPtrArrayOffset(r0, length);
-  __ add(too_far, feedback, r0);
-  __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ addi(pointer_reg, feedback,
-          Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ LoadP(cached_map, MemOperand(pointer_reg));
-  __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ cmp(receiver_map, cached_map);
-  __ bne(&prepare_next);
-  __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
-  __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-
-  __ bind(&prepare_next);
-  __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
-  __ cmp(pointer_reg, too_far);
-  __ blt(&next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ b(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
-                                  Register receiver_map, Register feedback,
-                                  Register vector, Register slot,
-                                  Register scratch, Label* compare_map,
-                                  Label* load_smi_map, Label* try_array) {
-  __ JumpIfSmi(receiver, load_smi_map);
-  __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(compare_map);
-  Register cached_map = scratch;
-  // Move the weak map into the weak_cell register.
-  __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
-  __ cmp(cached_map, receiver_map);
-  __ bne(try_array);
-  Register handler = feedback;
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ add(handler, vector, r0);
-  __ LoadP(handler,
-           FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
-  __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  KeyedStoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
-                                       Register receiver_map, Register scratch1,
-                                       Register scratch2, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-  Label transition_call;
-
-  Register cached_map = scratch1;
-  Register too_far = scratch2;
-  Register pointer_reg = feedback;
-  __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
-  // +-----+------+------+-----+-----+-----+ ... ----+
-  // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ----+ ... ----+
-  //                 0      1     2              len-1
-  //                 ^                                 ^
-  //                 |                                 |
-  //             pointer_reg                        too_far
-  //             aka feedback                       scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ SmiToPtrArrayOffset(r0, too_far);
-  __ add(too_far, feedback, r0);
-  __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ addi(pointer_reg, feedback,
-          Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ LoadP(cached_map, MemOperand(pointer_reg));
-  __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ cmp(receiver_map, cached_map);
-  __ bne(&prepare_next);
-  // Is it a transitioning store?
-  __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
-  __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
-  __ bne(&transition_call);
-  __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
-  __ addi(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-
-  __ bind(&transition_call);
-  __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
-  __ JumpIfSmi(too_far, miss);
-
-  __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
-  // Load the map into the correct register.
-  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
-  __ mr(feedback, too_far);
-
-  __ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-
-  __ bind(&prepare_next);
-  __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
-  __ cmpl(pointer_reg, too_far);
-  __ blt(&next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ b(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // r4
-  Register key = StoreWithVectorDescriptor::NameRegister();           // r5
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // r6
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // r7
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r3));          // r3
-  Register feedback = r8;
-  Register receiver_map = r9;
-  Register scratch1 = r10;
-
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ add(feedback, vector, r0);
-  __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ bne(&not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-
-  Register scratch2 = r11;
-
-  HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
-                             &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ bne(&try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmp(key, feedback);
-  __ bne(&miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ add(feedback, vector, r0);
-  __ LoadP(feedback,
-           FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
-                   &miss);
-
-  __ bind(&miss);
-  KeyedStoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ b(&compare_map);
-}
-
-
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     PredictableCodeSizeScope predictable(masm,
@@ -3812,665 +3169,6 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r4 : target
-  //  -- r6 : new target
-  //  -- cp : context
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r4);
-  __ AssertReceiver(r6);
-
-  // Verify that the new target is a JSFunction.
-  Label new_object;
-  __ CompareObjectType(r6, r5, r5, JS_FUNCTION_TYPE);
-  __ bne(&new_object);
-
-  // Load the initial map and verify that it's in fact a map.
-  __ LoadP(r5, FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(r5, &new_object);
-  __ CompareObjectType(r5, r3, r3, MAP_TYPE);
-  __ bne(&new_object);
-
-  // Fall back to runtime if the target differs from the new target's
-  // initial map constructor.
-  __ LoadP(r3, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
-  __ cmp(r3, r4);
-  __ bne(&new_object);
-
-  // Allocate the JSObject on the heap.
-  Label allocate, done_allocate;
-  __ lbz(r7, FieldMemOperand(r5, Map::kInstanceSizeOffset));
-  __ Allocate(r7, r3, r8, r9, &allocate, SIZE_IN_WORDS);
-  __ bind(&done_allocate);
-
-  // Initialize the JSObject fields.
-  __ StoreP(r5, FieldMemOperand(r3, JSObject::kMapOffset), r0);
-  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
-  __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ addi(r4, r3, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
-  // ----------- S t a t e -------------
-  //  -- r3 : result (tagged)
-  //  -- r4 : result fields (untagged)
-  //  -- r8 : result end (untagged)
-  //  -- r5 : initial map
-  //  -- cp : context
-  //  -- lr : return address
-  // -----------------------------------
-
-  // Perform in-object slack tracking if requested.
-  Label slack_tracking;
-  STATIC_ASSERT(Map::kNoSlackTracking == 0);
-  __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
-  __ lwz(r6, FieldMemOperand(r5, Map::kBitField3Offset));
-  __ DecodeField<Map::ConstructionCounter>(r10, r6, SetRC);
-  __ bne(&slack_tracking, cr0);
-  {
-    // Initialize all in-object fields with undefined.
-    __ InitializeFieldsWithFiller(r4, r8, r9);
-    __ Ret();
-  }
-  __ bind(&slack_tracking);
-  {
-    // Decrease generous allocation count.
-    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-    __ Add(r6, r6, -(1 << Map::ConstructionCounter::kShift), r0);
-    __ stw(r6, FieldMemOperand(r5, Map::kBitField3Offset));
-
-    // Initialize the in-object fields with undefined.
-    __ lbz(r7, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
-    __ ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
-    __ sub(r7, r8, r7);
-    __ InitializeFieldsWithFiller(r4, r7, r9);
-
-    // Initialize the remaining (reserved) fields with one pointer filler map.
-    __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
-    __ InitializeFieldsWithFiller(r4, r8, r9);
-
-    // Check if we can finalize the instance size.
-    __ cmpi(r10, Operand(Map::kSlackTrackingCounterEnd));
-    __ Ret(ne);
-
-    // Finalize the instance size.
-    {
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ Push(r3, r5);
-      __ CallRuntime(Runtime::kFinalizeInstanceSize);
-      __ Pop(r3);
-    }
-    __ Ret();
-  }
-
-  // Fall back to %AllocateInNewSpace.
-  __ bind(&allocate);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    STATIC_ASSERT(kSmiTag == 0);
-    __ ShiftLeftImm(r7, r7,
-                    Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
-    __ Push(r5, r7);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(r5);
-  }
-  __ lbz(r8, FieldMemOperand(r5, Map::kInstanceSizeOffset));
-  __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
-  __ add(r8, r3, r8);
-  __ subi(r8, r8, Operand(kHeapObjectTag));
-  __ b(&done_allocate);
-
-  // Fall back to %NewObject.
-  __ bind(&new_object);
-  __ Push(r4, r6);
-  __ TailCallRuntime(Runtime::kNewObject);
-}
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r4 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r4);
-
-  // Make r5 point to the JavaScript frame.
-  __ mr(r5, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
-    __ cmp(ip, r4);
-    __ beq(&ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have rest parameters (only possible if we have an
-  // arguments adaptor frame below the function frame).
-  Label no_rest_parameters;
-  __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ bne(&no_rest_parameters);
-
-  // Check if the arguments adaptor frame contains more arguments than
-  // specified by the function's internal formal parameter count.
-  Label rest_parameters;
-  __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadWordArith(
-      r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_PPC64
-  __ SmiTag(r6);
-#endif
-  __ sub(r3, r3, r6, LeaveOE, SetRC);
-  __ bgt(&rest_parameters, cr0);
-
-  // Return an empty rest parameter array.
-  __ bind(&no_rest_parameters);
-  {
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- lr : return address
-    // -----------------------------------
-
-    // Allocate an empty rest parameter array.
-    Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the rest parameter array in r0.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
-    __ StoreP(r4, FieldMemOperand(r3, JSArray::kMapOffset), r0);
-    __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
-    __ StoreP(r4, FieldMemOperand(r3, JSArray::kPropertiesOffset), r0);
-    __ StoreP(r4, FieldMemOperand(r3, JSArray::kElementsOffset), r0);
-    __ li(r4, Operand::Zero());
-    __ StoreP(r4, FieldMemOperand(r3, JSArray::kLengthOffset), r0);
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace.
-    __ bind(&allocate);
-    {
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ Push(Smi::FromInt(JSArray::kSize));
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-    }
-    __ b(&done_allocate);
-  }
-
-  __ bind(&rest_parameters);
-  {
-    // Compute the pointer to the first rest parameter (skippping the receiver).
-    __ SmiToPtrArrayOffset(r9, r3);
-    __ add(r5, r5, r9);
-    __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- r3 : number of rest parameters (tagged)
-    //  -- r4 : function
-    //  -- r5 : pointer just past first rest parameters
-    //  -- r9 : size of rest parameters
-    //  -- lr : return address
-    // -----------------------------------
-
-    // Allocate space for the rest parameter array plus the backing store.
-    Label allocate, done_allocate;
-    __ mov(r10, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ add(r10, r10, r9);
-    __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the elements array in r6.
-    __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
-    __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
-    __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
-    __ addi(r7, r6,
-            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-    {
-      Label loop;
-      __ SmiUntag(r0, r3);
-      __ mtctr(r0);
-      __ bind(&loop);
-      __ LoadPU(ip, MemOperand(r5, -kPointerSize));
-      __ StorePU(ip, MemOperand(r7, kPointerSize));
-      __ bdnz(&loop);
-      __ addi(r7, r7, Operand(kPointerSize));
-    }
-
-    // Setup the rest parameter array in r7.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4);
-    __ StoreP(r4, MemOperand(r7, JSArray::kMapOffset));
-    __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
-    __ StoreP(r4, MemOperand(r7, JSArray::kPropertiesOffset));
-    __ StoreP(r6, MemOperand(r7, JSArray::kElementsOffset));
-    __ StoreP(r3, MemOperand(r7, JSArray::kLengthOffset));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ addi(r3, r7, Operand(kHeapObjectTag));
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace (if not too big).
-    Label too_big_for_new_space;
-    __ bind(&allocate);
-    __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
-    __ bgt(&too_big_for_new_space);
-    {
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(r10);
-      __ Push(r3, r5, r10);
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-      __ mr(r6, r3);
-      __ Pop(r3, r5);
-    }
-    __ b(&done_allocate);
-
-    // Fall back to %NewRestParameter.
-    __ bind(&too_big_for_new_space);
-    __ push(r4);
-    __ TailCallRuntime(Runtime::kNewRestParameter);
-  }
-}
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r4 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r4);
-
-  // Make r10 point to the JavaScript frame.
-  __ mr(r10, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ LoadP(r10, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ LoadP(ip, MemOperand(r10, StandardFrameConstants::kFunctionOffset));
-    __ cmp(ip, r4);
-    __ beq(&ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-
-  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadWordArith(
-      r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_PPC64
-  __ SmiTag(r5);
-#endif
-  __ SmiToPtrArrayOffset(r6, r5);
-  __ add(r6, r10, r6);
-  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // r4 : function
-  // r5 : number of parameters (tagged)
-  // r6 : parameters pointer
-  // r10 : JavaScript frame pointer
-  // Registers used over whole function:
-  // r8 : arguments count (tagged)
-  // r9 : mapped parameter count (tagged)
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ LoadP(r7, MemOperand(r10, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(r3, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ beq(&adaptor_frame);
-
-  // No adaptor, parameter count = argument count.
-  __ mr(r8, r5);
-  __ mr(r9, r5);
-  __ b(&try_allocate);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToPtrArrayOffset(r6, r8);
-  __ add(r6, r6, r7);
-  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // r8 = argument count (tagged)
-  // r9 = parameter count (tagged)
-  // Compute the mapped parameter count = min(r5, r8) in r9.
-  __ cmp(r5, r8);
-  if (CpuFeatures::IsSupported(ISELECT)) {
-    __ isel(lt, r9, r5, r8);
-  } else {
-    Label skip;
-    __ mr(r9, r5);
-    __ blt(&skip);
-    __ mr(r9, r8);
-    __ bind(&skip);
-  }
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  // If there are no mapped parameters, we do not need the parameter_map.
-  __ CmpSmiLiteral(r9, Smi::kZero, r0);
-  if (CpuFeatures::IsSupported(ISELECT)) {
-    __ SmiToPtrArrayOffset(r11, r9);
-    __ addi(r11, r11, Operand(kParameterMapHeaderSize));
-    __ isel(eq, r11, r0, r11);
-  } else {
-    Label skip2, skip3;
-    __ bne(&skip2);
-    __ li(r11, Operand::Zero());
-    __ b(&skip3);
-    __ bind(&skip2);
-    __ SmiToPtrArrayOffset(r11, r9);
-    __ addi(r11, r11, Operand(kParameterMapHeaderSize));
-    __ bind(&skip3);
-  }
-
-  // 2. Backing store.
-  __ SmiToPtrArrayOffset(r7, r8);
-  __ add(r11, r11, r7);
-  __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(r11, r3, r11, r7, &runtime, NO_ALLOCATION_FLAGS);
-
-  // r3 = address of new object(s) (tagged)
-  // r5 = argument count (smi-tagged)
-  // Get the arguments boilerplate from the current native context into r4.
-  const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  const int kAliasedOffset =
-      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
-  __ LoadP(r7, NativeContextMemOperand());
-  __ cmpi(r9, Operand::Zero());
-  if (CpuFeatures::IsSupported(ISELECT)) {
-    __ LoadP(r11, MemOperand(r7, kNormalOffset));
-    __ LoadP(r7, MemOperand(r7, kAliasedOffset));
-    __ isel(eq, r7, r11, r7);
-  } else {
-    Label skip4, skip5;
-    __ bne(&skip4);
-    __ LoadP(r7, MemOperand(r7, kNormalOffset));
-    __ b(&skip5);
-    __ bind(&skip4);
-    __ LoadP(r7, MemOperand(r7, kAliasedOffset));
-    __ bind(&skip5);
-  }
-
-  // r3 = address of new object (tagged)
-  // r5 = argument count (smi-tagged)
-  // r7 = address of arguments map (tagged)
-  // r9 = mapped parameter count (tagged)
-  __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
-  __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
-  __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
-  // Set up the callee in-object property.
-  __ AssertNotSmi(r4);
-  __ StoreP(r4, FieldMemOperand(r3, JSSloppyArgumentsObject::kCalleeOffset),
-            r0);
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(r8);
-  __ StoreP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset),
-            r0);
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, r7 will point there, otherwise
-  // it will point to the backing store.
-  __ addi(r7, r3, Operand(JSSloppyArgumentsObject::kSize));
-  __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
-
-  // r3 = address of new object (tagged)
-  // r5 = argument count (tagged)
-  // r7 = address of parameter map or backing store (tagged)
-  // r9 = mapped parameter count (tagged)
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ CmpSmiLiteral(r9, Smi::kZero, r0);
-  if (CpuFeatures::IsSupported(ISELECT)) {
-    __ isel(eq, r4, r7, r4);
-    __ beq(&skip_parameter_map);
-  } else {
-    Label skip6;
-    __ bne(&skip6);
-    // Move backing store address to r4, because it is
-    // expected there when filling in the unmapped arguments.
-    __ mr(r4, r7);
-    __ b(&skip_parameter_map);
-    __ bind(&skip6);
-  }
-
-  __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
-  __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
-  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
-  __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
-            r0);
-  __ SmiToPtrArrayOffset(r8, r9);
-  __ add(r8, r8, r7);
-  __ addi(r8, r8, Operand(kParameterMapHeaderSize));
-  __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
-            r0);
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop;
-  __ mr(r8, r9);
-  __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
-  __ sub(r11, r11, r9);
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ SmiToPtrArrayOffset(r4, r8);
-  __ add(r4, r4, r7);
-  __ addi(r4, r4, Operand(kParameterMapHeaderSize));
-
-  // r4 = address of backing store (tagged)
-  // r7 = address of parameter map (tagged)
-  // r8 = temporary scratch (a.o., for address calculation)
-  // r10 = temporary scratch (a.o., for address calculation)
-  // ip = the hole value
-  __ SmiUntag(r8);
-  __ mtctr(r8);
-  __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
-  __ add(r10, r4, r8);
-  __ add(r8, r7, r8);
-  __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-
-  __ bind(&parameters_loop);
-  __ StorePU(r11, MemOperand(r8, -kPointerSize));
-  __ StorePU(ip, MemOperand(r10, -kPointerSize));
-  __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
-  __ bdnz(&parameters_loop);
-
-  // Restore r8 = argument count (tagged).
-  __ LoadP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset));
-
-  __ bind(&skip_parameter_map);
-  // r3 = address of new object (tagged)
-  // r4 = address of backing store (tagged)
-  // r8 = argument count (tagged)
-  // r9 = mapped parameter count (tagged)
-  // r11 = scratch
-  // Copy arguments header and remaining slots (if there are any).
-  __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
-  __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
-  __ sub(r11, r8, r9, LeaveOE, SetRC);
-  __ Ret(eq, cr0);
-
-  Label arguments_loop;
-  __ SmiUntag(r11);
-  __ mtctr(r11);
-
-  __ SmiToPtrArrayOffset(r0, r9);
-  __ sub(r6, r6, r0);
-  __ add(r11, r4, r0);
-  __ addi(r11, r11,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-
-  __ bind(&arguments_loop);
-  __ LoadPU(r7, MemOperand(r6, -kPointerSize));
-  __ StorePU(r7, MemOperand(r11, kPointerSize));
-  __ bdnz(&arguments_loop);
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  // r8 = argument count (tagged)
-  __ bind(&runtime);
-  __ Push(r4, r6, r8);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r4 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r4);
-
-  // Make r5 point to the JavaScript frame.
-  __ mr(r5, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kFunctionOffset));
-    __ cmp(ip, r4);
-    __ b(&ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have an arguments adaptor frame below the function frame.
-  Label arguments_adaptor, arguments_done;
-  __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ beq(&arguments_adaptor);
-  {
-    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-    __ LoadWordArith(
-        r3,
-        FieldMemOperand(r7, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_PPC64
-    __ SmiTag(r3);
-#endif
-    __ SmiToPtrArrayOffset(r9, r3);
-    __ add(r5, r5, r9);
-  }
-  __ b(&arguments_done);
-  __ bind(&arguments_adaptor);
-  {
-    __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ SmiToPtrArrayOffset(r9, r3);
-    __ add(r5, r6, r9);
-  }
-  __ bind(&arguments_done);
-  __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // ----------- S t a t e -------------
-  //  -- cp : context
-  //  -- r3 : number of rest parameters (tagged)
-  //  -- r4 : function
-  //  -- r5 : pointer just past first rest parameters
-  //  -- r9 : size of rest parameters
-  //  -- lr : return address
-  // -----------------------------------
-
-  // Allocate space for the strict arguments object plus the backing store.
-  Label allocate, done_allocate;
-  __ mov(r10,
-         Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ add(r10, r10, r9);
-  __ Allocate(r10, r6, r7, r8, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Setup the elements array in r6.
-  __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
-  __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
-  __ addi(r7, r6,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-  {
-    Label loop, done_loop;
-    __ SmiUntag(r0, r3, SetRC);
-    __ beq(&done_loop, cr0);
-    __ mtctr(r0);
-    __ bind(&loop);
-    __ LoadPU(ip, MemOperand(r5, -kPointerSize));
-    __ StorePU(ip, MemOperand(r7, kPointerSize));
-    __ bdnz(&loop);
-    __ bind(&done_loop);
-    __ addi(r7, r7, Operand(kPointerSize));
-  }
-
-  // Setup the rest parameter array in r7.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
-  __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kMapOffset));
-  __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kPropertiesOffset));
-  __ StoreP(r6, MemOperand(r7, JSStrictArgumentsObject::kElementsOffset));
-  __ StoreP(r3, MemOperand(r7, JSStrictArgumentsObject::kLengthOffset));
-  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
-  __ addi(r3, r7, Operand(kHeapObjectTag));
-  __ Ret();
-
-  // Fall back to %AllocateInNewSpace (if not too big).
-  Label too_big_for_new_space;
-  __ bind(&allocate);
-  __ Cmpi(r10, Operand(kMaxRegularHeapObjectSize), r0);
-  __ bgt(&too_big_for_new_space);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(r10);
-    __ Push(r3, r5, r10);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ mr(r6, r3);
-    __ Pop(r3, r5);
-  }
-  __ b(&done_allocate);
-
-  // Fall back to %NewStrictArguments.
-  __ bind(&too_big_for_new_space);
-  __ push(r4);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
diff --git a/src/ppc/code-stubs-ppc.h b/src/ppc/code-stubs-ppc.h
index d394171..f873f93 100644
--- a/src/ppc/code-stubs-ppc.h
+++ b/src/ppc/code-stubs-ppc.h
@@ -16,15 +16,6 @@
 
 class StringHelper : public AllStatic {
  public:
-  // Generate code for copying a large number of characters. This function
-  // is allowed to spend extra time setting up conditions to make copying
-  // faster. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
-                                     Register src, Register count,
-                                     Register scratch,
-                                     String::Encoding encoding);
-
   // Compares two flat one-byte strings and returns result in r0.
   static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
                                                 Register left, Register right,
@@ -297,10 +288,6 @@
                                      Register properties, Handle<Name> name,
                                      Register scratch0);
 
-  static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
-                                     Label* done, Register elements,
-                                     Register name, Register r0, Register r1);
-
   bool SometimesSetsUpAFrame() override { return false; }
 
  private:
diff --git a/src/ppc/codegen-ppc.cc b/src/ppc/codegen-ppc.cc
index 07853ed..212e6db 100644
--- a/src/ppc/codegen-ppc.cc
+++ b/src/ppc/codegen-ppc.cc
@@ -73,308 +73,13 @@
 
 #define __ ACCESS_MASM(masm)
 
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm, Register receiver, Register key, Register value,
-    Register target_map, AllocationSiteMode mode,
-    Label* allocation_memento_found) {
-  Register scratch_elements = r7;
-  DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    DCHECK(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r11,
-                                         allocation_memento_found);
-  }
-
-  // Set transitioned map.
-  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm, Register receiver, Register key, Register value,
-    Register target_map, AllocationSiteMode mode, Label* fail) {
-  // lr contains the return address
-  Label loop, entry, convert_hole, only_change_map, done;
-  Register elements = r7;
-  Register length = r8;
-  Register array = r9;
-  Register array_end = array;
-
-  // target_map parameter can be clobbered.
-  Register scratch1 = target_map;
-  Register scratch2 = r10;
-  Register scratch3 = r11;
-  Register scratch4 = r14;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
-                     scratch2));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ beq(&only_change_map);
-
-  __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedDoubleArray.
-  __ SmiToDoubleArrayOffset(scratch3, length);
-  __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
-  __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
-  __ subi(array, array, Operand(kHeapObjectTag));
-  // array: destination FixedDoubleArray, not tagged as heap object.
-  // elements: source FixedArray.
-
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
-  __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  // Update receiver's map.
-  __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
-
-  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ addi(scratch1, array, Operand(kHeapObjectTag));
-  __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
-  __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Prepare for conversion loop.
-  __ addi(scratch1, elements,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
-  __ SmiToDoubleArrayOffset(array_end, length);
-  __ add(array_end, scratch2, array_end);
-// Repurpose registers no longer in use.
-#if V8_TARGET_ARCH_PPC64
-  Register hole_int64 = elements;
-  __ mov(hole_int64, Operand(kHoleNanInt64));
-#else
-  Register hole_lower = elements;
-  Register hole_upper = length;
-  __ mov(hole_lower, Operand(kHoleNanLower32));
-  __ mov(hole_upper, Operand(kHoleNanUpper32));
-#endif
-  // scratch1: begin of source FixedArray element fields, not tagged
-  // hole_lower: kHoleNanLower32 OR hol_int64
-  // hole_upper: kHoleNanUpper32
-  // array_end: end of destination FixedDoubleArray, not tagged
-  // scratch2: begin of FixedDoubleArray element fields, not tagged
-
-  __ b(&entry);
-
-  __ bind(&only_change_map);
-  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ b(&done);
-
-  // Convert and copy elements.
-  __ bind(&loop);
-  __ LoadP(scratch3, MemOperand(scratch1));
-  __ addi(scratch1, scratch1, Operand(kPointerSize));
-  // scratch3: current element
-  __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
-
-  // Normal smi, convert to double and store.
-  __ ConvertIntToDouble(scratch3, d0);
-  __ stfd(d0, MemOperand(scratch2, 0));
-  __ addi(scratch2, scratch2, Operand(8));
-  __ b(&entry);
-
-  // Hole found, store the-hole NaN.
-  __ bind(&convert_hole);
-  if (FLAG_debug_code) {
-    __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
-    __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
-    __ Assert(eq, kObjectFoundInSmiOnlyArray);
-  }
-#if V8_TARGET_ARCH_PPC64
-  __ std(hole_int64, MemOperand(scratch2, 0));
-#else
-  __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
-  __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
-#endif
-  __ addi(scratch2, scratch2, Operand(8));
-
-  __ bind(&entry);
-  __ cmp(scratch2, array_end);
-  __ blt(&loop);
-
-  __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, Register receiver, Register key, Register value,
-    Register target_map, AllocationSiteMode mode, Label* fail) {
-  // Register lr contains the return address.
-  Label loop, convert_hole, gc_required, only_change_map;
-  Register elements = r7;
-  Register array = r9;
-  Register length = r8;
-  Register scratch = r10;
-  Register scratch3 = r11;
-  Register hole_value = r14;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
-                     scratch));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ beq(&only_change_map);
-
-  __ Push(target_map, receiver, key, value);
-  __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // elements: source FixedDoubleArray
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedArray.
-  // Re-use value and target_map registers, as they have been saved on the
-  // stack.
-  Register array_size = value;
-  Register allocate_scratch = target_map;
-  __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
-  __ SmiToPtrArrayOffset(r0, length);
-  __ add(array_size, array_size, r0);
-  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
-              NO_ALLOCATION_FLAGS);
-  // array: destination FixedArray, tagged as heap object
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(length, FieldMemOperand(array,
-            FixedDoubleArray::kLengthOffset), r0);
-  __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
-
-  // Prepare for conversion loop.
-  Register src_elements = elements;
-  Register dst_elements = target_map;
-  Register dst_end = length;
-  Register heap_number_map = scratch;
-  __ addi(src_elements, elements,
-          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(length, length);
-  __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
-
-  Label initialization_loop, loop_done;
-  __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
-  __ beq(&loop_done, cr0);
-
-  // Allocating heap numbers in the loop below can fail and cause a jump to
-  // gc_required. We can't leave a partly initialized FixedArray behind,
-  // so pessimistically fill it with holes now.
-  __ mtctr(r0);
-  __ addi(dst_elements, array,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-  __ bind(&initialization_loop);
-  __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
-  __ bdnz(&initialization_loop);
-
-  __ addi(dst_elements, array,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(dst_end, dst_elements, length);
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  // Using offsetted addresses in src_elements to fully take advantage of
-  // post-indexing.
-  // dst_elements: begin of destination FixedArray element fields, not tagged
-  // src_elements: begin of source FixedDoubleArray element fields,
-  //               not tagged, +4
-  // dst_end: end of destination FixedArray, not tagged
-  // array: destination FixedArray
-  // hole_value: the-hole pointer
-  // heap_number_map: heap number map
-  __ b(&loop);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ Pop(target_map, receiver, key, value);
-  __ b(fail);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ StoreP(hole_value, MemOperand(dst_elements));
-  __ addi(dst_elements, dst_elements, Operand(kPointerSize));
-  __ cmpl(dst_elements, dst_end);
-  __ bge(&loop_done);
-
-  __ bind(&loop);
-  Register upper_bits = key;
-  __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
-  __ addi(src_elements, src_elements, Operand(kDoubleSize));
-  // upper_bits: current element's upper 32 bit
-  // src_elements: address of next element's upper 32 bit
-  __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
-  __ beq(&convert_hole);
-
-  // Non-hole double, copy value into a heap number.
-  Register heap_number = receiver;
-  Register scratch2 = value;
-  __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
-                        &gc_required);
-  // heap_number: new heap number
-#if V8_TARGET_ARCH_PPC64
-  __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
-  // subtract tag for std
-  __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
-  __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
-#else
-  __ lwz(scratch2,
-         MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
-  __ lwz(upper_bits,
-         MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
-  __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
-  __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
-#endif
-  __ mr(scratch2, dst_elements);
-  __ StoreP(heap_number, MemOperand(dst_elements));
-  __ addi(dst_elements, dst_elements, Operand(kPointerSize));
-  __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ cmpl(dst_elements, dst_end);
-  __ blt(&loop);
-  __ bind(&loop_done);
-
-  __ Pop(target_map, receiver, key, value);
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
-  __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  __ bind(&only_change_map);
-  // Update receiver's map.
-  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
 // assume ip can be used as a scratch register below
 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
                                        Register index, Register result,
                                        Label* call_runtime) {
+  Label indirect_string_loaded;
+  __ bind(&indirect_string_loaded);
+
   // Fetch the instance type of the receiver into result register.
   __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -384,20 +89,26 @@
   __ andi(r0, result, Operand(kIsIndirectStringMask));
   __ beq(&check_sequential, cr0);
 
-  // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ mov(ip, Operand(kSlicedNotConsMask));
-  __ and_(r0, result, ip, SetRC);
-  __ beq(&cons_string, cr0);
+  // Dispatch on the indirect string shape: slice or cons or thin.
+  Label cons_string, thin_string;
+  __ andi(ip, result, Operand(kStringRepresentationMask));
+  __ cmpi(ip, Operand(kConsStringTag));
+  __ beq(&cons_string);
+  __ cmpi(ip, Operand(kThinStringTag));
+  __ beq(&thin_string);
 
   // Handle slices.
-  Label indirect_string_loaded;
   __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
   __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
   __ SmiUntag(ip, result);
   __ add(index, index, ip);
   __ b(&indirect_string_loaded);
 
+  // Handle thin strings.
+  __ bind(&thin_string);
+  __ LoadP(string, FieldMemOperand(string, ThinString::kActualOffset));
+  __ b(&indirect_string_loaded);
+
   // Handle cons strings.
   // Check whether the right hand side is the empty string (i.e. if
   // this is really a flat string in a cons string). If that is not
@@ -409,10 +120,7 @@
   __ bne(call_runtime);
   // Get the first of the two strings and load its instance type.
   __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+  __ b(&indirect_string_loaded);
 
   // Distinguish sequential and external strings. Only these two string
   // representations can reach here (slices and flat cons strings have been
@@ -493,31 +201,25 @@
   return result;
 }
 
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
 
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                               MarkingParity* parity) {
-  if (IsYoungSequence(isolate, sequence)) {
-    *age = kNoAgeCodeAge;
-    *parity = NO_MARKING_PARITY;
-  } else {
-    Code* code = NULL;
-    Address target_address =
-        Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
-    Code* stub = GetCodeFromTargetAddress(target_address);
-    GetCodeAgeAndParity(stub, age, parity);
-  }
+  Code* code = NULL;
+  Address target_address =
+      Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
+  Code* stub = GetCodeFromTargetAddress(target_address);
+  return GetAgeOfCodeAgeStub(stub);
 }
 
-
-void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
-                                MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+                                Code::Age age) {
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
     Assembler::FlushICache(isolate, sequence, young_length);
   } else {
     // FIXED_SEQUENCE
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age);
     CodePatcher patcher(isolate, sequence,
                         young_length / Assembler::kInstrSize);
     Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
diff --git a/src/ppc/constants-ppc.h b/src/ppc/constants-ppc.h
index 393f039..d131438 100644
--- a/src/ppc/constants-ppc.h
+++ b/src/ppc/constants-ppc.h
@@ -91,233 +91,2497 @@
 // representing instructions from usual 32 bit values.
 // Instruction objects are pointers to 32bit values, and provide methods to
 // access the various ISA fields.
-typedef int32_t Instr;
+typedef uint32_t Instr;
 
-// Opcodes as defined in section 4.2 table 34 (32bit PowerPC)
-enum Opcode {
-  TWI = 3 << 26,       // Trap Word Immediate
-  MULLI = 7 << 26,     // Multiply Low Immediate
-  SUBFIC = 8 << 26,    // Subtract from Immediate Carrying
-  CMPLI = 10 << 26,    // Compare Logical Immediate
-  CMPI = 11 << 26,     // Compare Immediate
-  ADDIC = 12 << 26,    // Add Immediate Carrying
-  ADDICx = 13 << 26,   // Add Immediate Carrying and Record
-  ADDI = 14 << 26,     // Add Immediate
-  ADDIS = 15 << 26,    // Add Immediate Shifted
-  BCX = 16 << 26,      // Branch Conditional
-  SC = 17 << 26,       // System Call
-  BX = 18 << 26,       // Branch
-  EXT1 = 19 << 26,     // Extended code set 1
-  RLWIMIX = 20 << 26,  // Rotate Left Word Immediate then Mask Insert
-  RLWINMX = 21 << 26,  // Rotate Left Word Immediate then AND with Mask
-  RLWNMX = 23 << 26,   // Rotate Left Word then AND with Mask
-  ORI = 24 << 26,      // OR Immediate
-  ORIS = 25 << 26,     // OR Immediate Shifted
-  XORI = 26 << 26,     // XOR Immediate
-  XORIS = 27 << 26,    // XOR Immediate Shifted
-  ANDIx = 28 << 26,    // AND Immediate
-  ANDISx = 29 << 26,   // AND Immediate Shifted
-  EXT5 = 30 << 26,     // Extended code set 5 - 64bit only
-  EXT2 = 31 << 26,     // Extended code set 2
-  LWZ = 32 << 26,      // Load Word and Zero
-  LWZU = 33 << 26,     // Load Word with Zero Update
-  LBZ = 34 << 26,      // Load Byte and Zero
-  LBZU = 35 << 26,     // Load Byte and Zero with Update
-  STW = 36 << 26,      // Store
-  STWU = 37 << 26,     // Store Word with Update
-  STB = 38 << 26,      // Store Byte
-  STBU = 39 << 26,     // Store Byte with Update
-  LHZ = 40 << 26,      // Load Half and Zero
-  LHZU = 41 << 26,     // Load Half and Zero with Update
-  LHA = 42 << 26,      // Load Half Algebraic
-  LHAU = 43 << 26,     // Load Half Algebraic with Update
-  STH = 44 << 26,      // Store Half
-  STHU = 45 << 26,     // Store Half with Update
-  LMW = 46 << 26,      // Load Multiple Word
-  STMW = 47 << 26,     // Store Multiple Word
-  LFS = 48 << 26,      // Load Floating-Point Single
-  LFSU = 49 << 26,     // Load Floating-Point Single with Update
-  LFD = 50 << 26,      // Load Floating-Point Double
-  LFDU = 51 << 26,     // Load Floating-Point Double with Update
-  STFS = 52 << 26,     // Store Floating-Point Single
-  STFSU = 53 << 26,    // Store Floating-Point Single with Update
-  STFD = 54 << 26,     // Store Floating-Point Double
-  STFDU = 55 << 26,    // Store Floating-Point Double with Update
-  LD = 58 << 26,       // Load Double Word
-  EXT3 = 59 << 26,     // Extended code set 3
-  STD = 62 << 26,      // Store Double Word (optionally with Update)
-  EXT4 = 63 << 26      // Extended code set 4
-};
+#define PPC_XX3_OPCODE_LIST(V)                                                 \
+  /* VSX Scalar Add Double-Precision */                                        \
+  V(xsadddp, XSADDDP, 0xF0000100)                                              \
+  /* VSX Scalar Add Single-Precision */                                        \
+  V(xsaddsp, XSADDSP, 0xF0000000)                                              \
+  /* VSX Scalar Compare Ordered Double-Precision */                            \
+  V(xscmpodp, XSCMPODP, 0xF0000158)                                            \
+  /* VSX Scalar Compare Unordered Double-Precision */                          \
+  V(xscmpudp, XSCMPUDP, 0xF0000118)                                            \
+  /* VSX Scalar Copy Sign Double-Precision */                                  \
+  V(xscpsgndp, XSCPSGNDP, 0xF0000580)                                          \
+  /* VSX Scalar Divide Double-Precision */                                     \
+  V(xsdivdp, XSDIVDP, 0xF00001C0)                                              \
+  /* VSX Scalar Divide Single-Precision */                                     \
+  V(xsdivsp, XSDIVSP, 0xF00000C0)                                              \
+  /* VSX Scalar Multiply-Add Type-A Double-Precision */                        \
+  V(xsmaddadp, XSMADDADP, 0xF0000108)                                          \
+  /* VSX Scalar Multiply-Add Type-A Single-Precision */                        \
+  V(xsmaddasp, XSMADDASP, 0xF0000008)                                          \
+  /* VSX Scalar Multiply-Add Type-M Double-Precision */                        \
+  V(xsmaddmdp, XSMADDMDP, 0xF0000148)                                          \
+  /* VSX Scalar Multiply-Add Type-M Single-Precision */                        \
+  V(xsmaddmsp, XSMADDMSP, 0xF0000048)                                          \
+  /* VSX Scalar Maximum Double-Precision */                                    \
+  V(xsmaxdp, XSMAXDP, 0xF0000500)                                              \
+  /* VSX Scalar Minimum Double-Precision */                                    \
+  V(xsmindp, XSMINDP, 0xF0000540)                                              \
+  /* VSX Scalar Multiply-Subtract Type-A Double-Precision */                   \
+  V(xsmsubadp, XSMSUBADP, 0xF0000188)                                          \
+  /* VSX Scalar Multiply-Subtract Type-A Single-Precision */                   \
+  V(xsmsubasp, XSMSUBASP, 0xF0000088)                                          \
+  /* VSX Scalar Multiply-Subtract Type-M Double-Precision */                   \
+  V(xsmsubmdp, XSMSUBMDP, 0xF00001C8)                                          \
+  /* VSX Scalar Multiply-Subtract Type-M Single-Precision */                   \
+  V(xsmsubmsp, XSMSUBMSP, 0xF00000C8)                                          \
+  /* VSX Scalar Multiply Double-Precision */                                   \
+  V(xsmuldp, XSMULDP, 0xF0000180)                                              \
+  /* VSX Scalar Multiply Single-Precision */                                   \
+  V(xsmulsp, XSMULSP, 0xF0000080)                                              \
+  /* VSX Scalar Negative Multiply-Add Type-A Double-Precision */               \
+  V(xsnmaddadp, XSNMADDADP, 0xF0000508)                                        \
+  /* VSX Scalar Negative Multiply-Add Type-A Single-Precision */               \
+  V(xsnmaddasp, XSNMADDASP, 0xF0000408)                                        \
+  /* VSX Scalar Negative Multiply-Add Type-M Double-Precision */               \
+  V(xsnmaddmdp, XSNMADDMDP, 0xF0000548)                                        \
+  /* VSX Scalar Negative Multiply-Add Type-M Single-Precision */               \
+  V(xsnmaddmsp, XSNMADDMSP, 0xF0000448)                                        \
+  /* VSX Scalar Negative Multiply-Subtract Type-A Double-Precision */          \
+  V(xsnmsubadp, XSNMSUBADP, 0xF0000588)                                        \
+  /* VSX Scalar Negative Multiply-Subtract Type-A Single-Precision */          \
+  V(xsnmsubasp, XSNMSUBASP, 0xF0000488)                                        \
+  /* VSX Scalar Negative Multiply-Subtract Type-M Double-Precision */          \
+  V(xsnmsubmdp, XSNMSUBMDP, 0xF00005C8)                                        \
+  /* VSX Scalar Negative Multiply-Subtract Type-M Single-Precision */          \
+  V(xsnmsubmsp, XSNMSUBMSP, 0xF00004C8)                                        \
+  /* VSX Scalar Reciprocal Estimate Double-Precision */                        \
+  V(xsredp, XSREDP, 0xF0000168)                                                \
+  /* VSX Scalar Reciprocal Estimate Single-Precision */                        \
+  V(xsresp, XSRESP, 0xF0000068)                                                \
+  /* VSX Scalar Subtract Double-Precision */                                   \
+  V(xssubdp, XSSUBDP, 0xF0000140)                                              \
+  /* VSX Scalar Subtract Single-Precision */                                   \
+  V(xssubsp, XSSUBSP, 0xF0000040)                                              \
+  /* VSX Scalar Test for software Divide Double-Precision */                   \
+  V(xstdivdp, XSTDIVDP, 0xF00001E8)                                            \
+  /* VSX Vector Add Double-Precision */                                        \
+  V(xvadddp, XVADDDP, 0xF0000300)                                              \
+  /* VSX Vector Add Single-Precision */                                        \
+  V(xvaddsp, XVADDSP, 0xF0000200)                                              \
+  /* VSX Vector Compare Equal To Double-Precision */                           \
+  V(xvcmpeqdp, XVCMPEQDP, 0xF0000318)                                          \
+  /* VSX Vector Compare Equal To Double-Precision & record CR6 */              \
+  V(xvcmpeqdpx, XVCMPEQDPx, 0xF0000718)                                        \
+  /* VSX Vector Compare Equal To Single-Precision */                           \
+  V(xvcmpeqsp, XVCMPEQSP, 0xF0000218)                                          \
+  /* VSX Vector Compare Equal To Single-Precision & record CR6 */              \
+  V(xvcmpeqspx, XVCMPEQSPx, 0xF0000618)                                        \
+  /* VSX Vector Compare Greater Than or Equal To Double-Precision */           \
+  V(xvcmpgedp, XVCMPGEDP, 0xF0000398)                                          \
+  /* VSX Vector Compare Greater Than or Equal To Double-Precision & record */  \
+  /* CR6 */                                                                    \
+  V(xvcmpgedpx, XVCMPGEDPx, 0xF0000798)                                        \
+  /* VSX Vector Compare Greater Than or Equal To Single-Precision */           \
+  V(xvcmpgesp, XVCMPGESP, 0xF0000298)                                          \
+  /* VSX Vector Compare Greater Than or Equal To Single-Precision & record */  \
+  /* CR6 */                                                                    \
+  V(xvcmpgespx, XVCMPGESPx, 0xF0000698)                                        \
+  /* VSX Vector Compare Greater Than Double-Precision */                       \
+  V(xvcmpgtdp, XVCMPGTDP, 0xF0000358)                                          \
+  /* VSX Vector Compare Greater Than Double-Precision & record CR6 */          \
+  V(xvcmpgtdpx, XVCMPGTDPx, 0xF0000758)                                        \
+  /* VSX Vector Compare Greater Than Single-Precision */                       \
+  V(xvcmpgtsp, XVCMPGTSP, 0xF0000258)                                          \
+  /* VSX Vector Compare Greater Than Single-Precision & record CR6 */          \
+  V(xvcmpgtspx, XVCMPGTSPx, 0xF0000658)                                        \
+  /* VSX Vector Copy Sign Double-Precision */                                  \
+  V(xvcpsgndp, XVCPSGNDP, 0xF0000780)                                          \
+  /* VSX Vector Copy Sign Single-Precision */                                  \
+  V(xvcpsgnsp, XVCPSGNSP, 0xF0000680)                                          \
+  /* VSX Vector Divide Double-Precision */                                     \
+  V(xvdivdp, XVDIVDP, 0xF00003C0)                                              \
+  /* VSX Vector Divide Single-Precision */                                     \
+  V(xvdivsp, XVDIVSP, 0xF00002C0)                                              \
+  /* VSX Vector Multiply-Add Type-A Double-Precision */                        \
+  V(xvmaddadp, XVMADDADP, 0xF0000308)                                          \
+  /* VSX Vector Multiply-Add Type-A Single-Precision */                        \
+  V(xvmaddasp, XVMADDASP, 0xF0000208)                                          \
+  /* VSX Vector Multiply-Add Type-M Double-Precision */                        \
+  V(xvmaddmdp, XVMADDMDP, 0xF0000348)                                          \
+  /* VSX Vector Multiply-Add Type-M Single-Precision */                        \
+  V(xvmaddmsp, XVMADDMSP, 0xF0000248)                                          \
+  /* VSX Vector Maximum Double-Precision */                                    \
+  V(xvmaxdp, XVMAXDP, 0xF0000700)                                              \
+  /* VSX Vector Maximum Single-Precision */                                    \
+  V(xvmaxsp, XVMAXSP, 0xF0000600)                                              \
+  /* VSX Vector Minimum Double-Precision */                                    \
+  V(xvmindp, XVMINDP, 0xF0000740)                                              \
+  /* VSX Vector Minimum Single-Precision */                                    \
+  V(xvminsp, XVMINSP, 0xF0000640)                                              \
+  /* VSX Vector Multiply-Subtract Type-A Double-Precision */                   \
+  V(xvmsubadp, XVMSUBADP, 0xF0000388)                                          \
+  /* VSX Vector Multiply-Subtract Type-A Single-Precision */                   \
+  V(xvmsubasp, XVMSUBASP, 0xF0000288)                                          \
+  /* VSX Vector Multiply-Subtract Type-M Double-Precision */                   \
+  V(xvmsubmdp, XVMSUBMDP, 0xF00003C8)                                          \
+  /* VSX Vector Multiply-Subtract Type-M Single-Precision */                   \
+  V(xvmsubmsp, XVMSUBMSP, 0xF00002C8)                                          \
+  /* VSX Vector Multiply Double-Precision */                                   \
+  V(xvmuldp, XVMULDP, 0xF0000380)                                              \
+  /* VSX Vector Multiply Single-Precision */                                   \
+  V(xvmulsp, XVMULSP, 0xF0000280)                                              \
+  /* VSX Vector Negative Multiply-Add Type-A Double-Precision */               \
+  V(xvnmaddadp, XVNMADDADP, 0xF0000708)                                        \
+  /* VSX Vector Negative Multiply-Add Type-A Single-Precision */               \
+  V(xvnmaddasp, XVNMADDASP, 0xF0000608)                                        \
+  /* VSX Vector Negative Multiply-Add Type-M Double-Precision */               \
+  V(xvnmaddmdp, XVNMADDMDP, 0xF0000748)                                        \
+  /* VSX Vector Negative Multiply-Add Type-M Single-Precision */               \
+  V(xvnmaddmsp, XVNMADDMSP, 0xF0000648)                                        \
+  /* VSX Vector Negative Multiply-Subtract Type-A Double-Precision */          \
+  V(xvnmsubadp, XVNMSUBADP, 0xF0000788)                                        \
+  /* VSX Vector Negative Multiply-Subtract Type-A Single-Precision */          \
+  V(xvnmsubasp, XVNMSUBASP, 0xF0000688)                                        \
+  /* VSX Vector Negative Multiply-Subtract Type-M Double-Precision */          \
+  V(xvnmsubmdp, XVNMSUBMDP, 0xF00007C8)                                        \
+  /* VSX Vector Negative Multiply-Subtract Type-M Single-Precision */          \
+  V(xvnmsubmsp, XVNMSUBMSP, 0xF00006C8)                                        \
+  /* VSX Vector Reciprocal Estimate Double-Precision */                        \
+  V(xvredp, XVREDP, 0xF0000368)                                                \
+  /* VSX Vector Reciprocal Estimate Single-Precision */                        \
+  V(xvresp, XVRESP, 0xF0000268)                                                \
+  /* VSX Vector Subtract Double-Precision */                                   \
+  V(xvsubdp, XVSUBDP, 0xF0000340)                                              \
+  /* VSX Vector Subtract Single-Precision */                                   \
+  V(xvsubsp, XVSUBSP, 0xF0000240)                                              \
+  /* VSX Vector Test for software Divide Double-Precision */                   \
+  V(xvtdivdp, XVTDIVDP, 0xF00003E8)                                            \
+  /* VSX Vector Test for software Divide Single-Precision */                   \
+  V(xvtdivsp, XVTDIVSP, 0xF00002E8)                                            \
+  /* VSX Logical AND */                                                        \
+  V(xxland, XXLAND, 0xF0000410)                                                \
+  /* VSX Logical AND with Complement */                                        \
+  V(xxlandc, XXLANDC, 0xF0000450)                                              \
+  /* VSX Logical Equivalence */                                                \
+  V(xxleqv, XXLEQV, 0xF00005D0)                                                \
+  /* VSX Logical NAND */                                                       \
+  V(xxlnand, XXLNAND, 0xF0000590)                                              \
+  /* VSX Logical NOR */                                                        \
+  V(xxlnor, XXLNOR, 0xF0000510)                                                \
+  /* VSX Logical OR */                                                         \
+  V(xxlor, XXLOR, 0xF0000490)                                                  \
+  /* VSX Logical OR with Complement */                                         \
+  V(xxlorc, XXLORC, 0xF0000550)                                                \
+  /* VSX Logical XOR */                                                        \
+  V(xxlxor, XXLXOR, 0xF00004D0)                                                \
+  /* VSX Merge High Word */                                                    \
+  V(xxmrghw, XXMRGHW, 0xF0000090)                                              \
+  /* VSX Merge Low Word */                                                     \
+  V(xxmrglw, XXMRGLW, 0xF0000190)                                              \
+  /* VSX Permute Doubleword Immediate */                                       \
+  V(xxpermdi, XXPERMDI, 0xF0000050)                                            \
+  /* VSX Shift Left Double by Word Immediate */                                \
+  V(xxsldwi, XXSLDWI, 0xF0000010)                                              \
+  /* VSX Splat Word */                                                         \
+  V(xxspltw, XXSPLTW, 0xF0000290)
 
-// Bits 10-1
-enum OpcodeExt1 {
-  MCRF = 0 << 1,      // Move Condition Register Field
-  BCLRX = 16 << 1,    // Branch Conditional Link Register
-  CRNOR = 33 << 1,    // Condition Register NOR)
-  RFI = 50 << 1,      // Return from Interrupt
-  CRANDC = 129 << 1,  // Condition Register AND with Complement
-  ISYNC = 150 << 1,   // Instruction Synchronize
-  CRXOR = 193 << 1,   // Condition Register XOR
-  CRNAND = 225 << 1,  // Condition Register NAND
-  CRAND = 257 << 1,   // Condition Register AND
-  CREQV = 289 << 1,   // Condition Register Equivalent
-  CRORC = 417 << 1,   // Condition Register OR with Complement
-  CROR = 449 << 1,    // Condition Register OR
-  BCCTRX = 528 << 1   // Branch Conditional to Count Register
-};
+#define PPC_Z23_OPCODE_LIST(V)                                                 \
+  /* Decimal Quantize */                                                       \
+  V(dqua, DQUA, 0xEC000006)                                                    \
+  /* Decimal Quantize Immediate */                                             \
+  V(dquai, DQUAI, 0xEC000086)                                                  \
+  /* Decimal Quantize Immediate Quad */                                        \
+  V(dquaiq, DQUAIQ, 0xFC000086)                                                \
+  /* Decimal Quantize Quad */                                                  \
+  V(dquaq, DQUAQ, 0xFC000006)                                                  \
+  /* Decimal Floating Round To FP Integer Without Inexact */                   \
+  V(drintn, DRINTN, 0xEC0001C6)                                                \
+  /* Decimal Floating Round To FP Integer Without Inexact Quad */              \
+  V(drintnq, DRINTNQ, 0xFC0001C6)                                              \
+  /* Decimal Floating Round To FP Integer With Inexact */                      \
+  V(drintx, DRINTX, 0xEC0000C6)                                                \
+  /* Decimal Floating Round To FP Integer With Inexact Quad */                 \
+  V(drintxq, DRINTXQ, 0xFC0000C6)                                              \
+  /* Decimal Floating Reround */                                               \
+  V(drrnd, DRRND, 0xEC000046)                                                  \
+  /* Decimal Floating Reround Quad */                                          \
+  V(drrndq, DRRNDQ, 0xFC000046)
 
-// Bits 9-1 or 10-1
-enum OpcodeExt2 {
-  CMP = 0 << 1,
-  TW = 4 << 1,
-  SUBFCX = 8 << 1,
-  ADDCX = 10 << 1,
-  MULHWUX = 11 << 1,
-  ISEL = 15 << 1,
-  MFCR = 19 << 1,
-  LWARX = 20 << 1,
-  LDX = 21 << 1,
-  LWZX = 23 << 1,  // load word zero w/ x-form
-  SLWX = 24 << 1,
-  CNTLZWX = 26 << 1,
-  SLDX = 27 << 1,
-  ANDX = 28 << 1,
-  CMPL = 32 << 1,
-  SUBFX = 40 << 1,
-  MFVSRD = 51 << 1,  // Move From VSR Doubleword
-  LDUX = 53 << 1,
-  DCBST = 54 << 1,
-  LWZUX = 55 << 1,  // load word zero w/ update x-form
-  CNTLZDX = 58 << 1,
-  ANDCX = 60 << 1,
-  MULHWX = 75 << 1,
-  DCBF = 86 << 1,
-  LBZX = 87 << 1,  // load byte zero w/ x-form
-  NEGX = 104 << 1,
-  MFVSRWZ = 115 << 1,  // Move From VSR Word And Zero
-  LBZUX = 119 << 1,    // load byte zero w/ update x-form
-  NORX = 124 << 1,
-  SUBFEX = 136 << 1,
-  ADDEX = 138 << 1,
-  STDX = 149 << 1,
-  STWX = 151 << 1,    // store word w/ x-form
-  MTVSRD = 179 << 1,  // Move To VSR Doubleword
-  STDUX = 181 << 1,
-  STWUX = 183 << 1,    // store word w/ update x-form
-                       /*
-      MTCRF
-      MTMSR
-      STWCXx
-      SUBFZEX
-    */
-  ADDZEX = 202 << 1,   // Add to Zero Extended
-                       /*
-     MTSR
-   */
-  MTVSRWA = 211 << 1,  // Move To VSR Word Algebraic
-  STBX = 215 << 1,     // store byte w/ x-form
-  MULLD = 233 << 1,    // Multiply Low Double Word
-  MULLW = 235 << 1,    // Multiply Low Word
-  MTVSRWZ = 243 << 1,  // Move To VSR Word And Zero
-  STBUX = 247 << 1,    // store byte w/ update x-form
-  ADDX = 266 << 1,     // Add
-  LHZX = 279 << 1,     // load half-word zero w/ x-form
-  LHZUX = 311 << 1,    // load half-word zero w/ update x-form
-  LWAX = 341 << 1,     // load word algebraic w/ x-form
-  LHAX = 343 << 1,     // load half-word algebraic w/ x-form
-  LHAUX = 375 << 1,    // load half-word algebraic w/ update x-form
-  XORX = 316 << 1,     // Exclusive OR
-  MFSPR = 339 << 1,    // Move from Special-Purpose-Register
-  POPCNTW = 378 << 1,  // Population Count Words
-  STHX = 407 << 1,     // store half-word w/ x-form
-  ORC = 412 << 1,      // Or with Complement
-  STHUX = 439 << 1,    // store half-word w/ update x-form
-  ORX = 444 << 1,      // Or
-  DIVDU = 457 << 1,    // Divide Double Word Unsigned
-  DIVWU = 459 << 1,    // Divide Word Unsigned
-  MTSPR = 467 << 1,    // Move to Special-Purpose-Register
-  DIVD = 489 << 1,     // Divide Double Word
-  DIVW = 491 << 1,     // Divide Word
-  POPCNTD = 506 << 1,  // Population Count Doubleword
+#define PPC_Z22_OPCODE_LIST(V)                                                 \
+  /* Decimal Floating Shift Coefficient Left Immediate */                      \
+  V(dscli, DSCLI, 0xEC000084)                                                  \
+  /* Decimal Floating Shift Coefficient Left Immediate Quad */                 \
+  V(dscliq, DSCLIQ, 0xFC000084)                                                \
+  /* Decimal Floating Shift Coefficient Right Immediate */                     \
+  V(dscri, DSCRI, 0xEC0000C4)                                                  \
+  /* Decimal Floating Shift Coefficient Right Immediate Quad */                \
+  V(dscriq, DSCRIQ, 0xFC0000C4)                                                \
+  /* Decimal Floating Test Data Class */                                       \
+  V(dtstdc, DTSTDC, 0xEC000184)                                                \
+  /* Decimal Floating Test Data Class Quad */                                  \
+  V(dtstdcq, DTSTDCQ, 0xFC000184)                                              \
+  /* Decimal Floating Test Data Group */                                       \
+  V(dtstdg, DTSTDG, 0xEC0001C4)                                                \
+  /* Decimal Floating Test Data Group Quad */                                  \
+  V(dtstdgq, DTSTDGQ, 0xFC0001C4)
 
-  // Below represent bits 10-1  (any value >= 512)
-  LDBRX = 532 << 1,   // load double word byte reversed w/ x-form
-  LWBRX = 534 << 1,   // load word byte reversed w/ x-form
-  LFSX = 535 << 1,    // load float-single w/ x-form
-  SRWX = 536 << 1,    // Shift Right Word
-  SRDX = 539 << 1,    // Shift Right Double Word
-  LFSUX = 567 << 1,   // load float-single w/ update x-form
-  SYNC = 598 << 1,    // Synchronize
-  LFDX = 599 << 1,    // load float-double w/ x-form
-  LFDUX = 631 << 1,   // load float-double w/ update X-form
-  STFSX = 663 << 1,   // store float-single w/ x-form
-  STFSUX = 695 << 1,  // store float-single w/ update x-form
-  STFDX = 727 << 1,   // store float-double w/ x-form
-  STFDUX = 759 << 1,  // store float-double w/ update x-form
-  LHBRX = 790 << 1,   // load half word byte reversed w/ x-form
-  SRAW = 792 << 1,    // Shift Right Algebraic Word
-  SRAD = 794 << 1,    // Shift Right Algebraic Double Word
-  SRAWIX = 824 << 1,  // Shift Right Algebraic Word Immediate
-  SRADIX = 413 << 2,  // Shift Right Algebraic Double Word Immediate
-  EXTSH = 922 << 1,   // Extend Sign Halfword
-  EXTSB = 954 << 1,   // Extend Sign Byte
-  ICBI = 982 << 1,    // Instruction Cache Block Invalidate
-  EXTSW = 986 << 1    // Extend Sign Word
-};
+#define PPC_XX2_OPCODE_LIST(V)                                                 \
+  /* Move To VSR Doubleword */                                                 \
+  V(mtvsrd, MTVSRD, 0x7C000166)                                                \
+  /* Move To VSR Word Algebraic */                                             \
+  V(mtvsrwa, MTVSRWA, 0x7C0001A6)                                              \
+  /* Move To VSR Word and Zero */                                              \
+  V(mtvsrwz, MTVSRWZ, 0x7C0001E6)                                              \
+  /* VSX Scalar Absolute Value Double-Precision */                             \
+  V(xsabsdp, XSABSDP, 0xF0000564)                                              \
+  /* VSX Scalar Convert Double-Precision to Single-Precision */                \
+  V(xscvdpsp, XSCVDPSP, 0xF0000424)                                            \
+  /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */    \
+  /* signalling */                                                             \
+  V(xscvdpspn, XSCVDPSPN, 0xF000042C)                                          \
+  /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */   \
+  /* Saturate */                                                               \
+  V(xscvdpsxds, XSCVDPSXDS, 0xF0000560)                                        \
+  /* VSX Scalar Convert Double-Precision to Signed Fixed-Point Word */         \
+  /* Saturate */                                                               \
+  V(xscvdpsxws, XSCVDPSXWS, 0xF0000160)                                        \
+  /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point */            \
+  /* Doubleword Saturate */                                                    \
+  V(xscvdpuxds, XSCVDPUXDS, 0xF0000520)                                        \
+  /* VSX Scalar Convert Double-Precision to Unsigned Fixed-Point Word */       \
+  /* Saturate */                                                               \
+  V(xscvdpuxws, XSCVDPUXWS, 0xF0000120)                                        \
+  /* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */          \
+  V(xscvspdp, XSCVSPDP, 0xF0000524)                                            \
+  /* Scalar Convert Single-Precision to Double-Precision format Non- */        \
+  /* signalling */                                                             \
+  V(xscvspdpn, XSCVSPDPN, 0xF000052C)                                          \
+  /* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */   \
+  V(xscvsxddp, XSCVSXDDP, 0xF00005E0)                                          \
+  /* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */   \
+  V(xscvsxdsp, XSCVSXDSP, 0xF00004E0)                                          \
+  /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Double- */          \
+  /* Precision */                                                              \
+  V(xscvuxddp, XSCVUXDDP, 0xF00005A0)                                          \
+  /* VSX Scalar Convert Unsigned Fixed-Point Doubleword to Single- */          \
+  /* Precision */                                                              \
+  V(xscvuxdsp, XSCVUXDSP, 0xF00004A0)                                          \
+  /* VSX Scalar Negative Absolute Value Double-Precision */                    \
+  V(xsnabsdp, XSNABSDP, 0xF00005A4)                                            \
+  /* VSX Scalar Negate Double-Precision */                                     \
+  V(xsnegdp, XSNEGDP, 0xF00005E4)                                              \
+  /* VSX Scalar Round to Double-Precision Integer */                           \
+  V(xsrdpi, XSRDPI, 0xF0000124)                                                \
+  /* VSX Scalar Round to Double-Precision Integer using Current rounding */    \
+  /* mode */                                                                   \
+  V(xsrdpic, XSRDPIC, 0xF00001AC)                                              \
+  /* VSX Scalar Round to Double-Precision Integer toward -Infinity */          \
+  V(xsrdpim, XSRDPIM, 0xF00001E4)                                              \
+  /* VSX Scalar Round to Double-Precision Integer toward +Infinity */          \
+  V(xsrdpip, XSRDPIP, 0xF00001A4)                                              \
+  /* VSX Scalar Round to Double-Precision Integer toward Zero */               \
+  V(xsrdpiz, XSRDPIZ, 0xF0000164)                                              \
+  /* VSX Scalar Round to Single-Precision */                                   \
+  V(xsrsp, XSRSP, 0xF0000464)                                                  \
+  /* VSX Scalar Reciprocal Square Root Estimate Double-Precision */            \
+  V(xsrsqrtedp, XSRSQRTEDP, 0xF0000128)                                        \
+  /* VSX Scalar Reciprocal Square Root Estimate Single-Precision */            \
+  V(xsrsqrtesp, XSRSQRTESP, 0xF0000028)                                        \
+  /* VSX Scalar Square Root Double-Precision */                                \
+  V(xssqrtdp, XSSQRTDP, 0xF000012C)                                            \
+  /* VSX Scalar Square Root Single-Precision */                                \
+  V(xssqrtsp, XSSQRTSP, 0xF000002C)                                            \
+  /* VSX Scalar Test for software Square Root Double-Precision */              \
+  V(xstsqrtdp, XSTSQRTDP, 0xF00001A8)                                          \
+  /* VSX Vector Absolute Value Double-Precision */                             \
+  V(xvabsdp, XVABSDP, 0xF0000764)                                              \
+  /* VSX Vector Absolute Value Single-Precision */                             \
+  V(xvabssp, XVABSSP, 0xF0000664)                                              \
+  /* VSX Vector Convert Double-Precision to Single-Precision */                \
+  V(xvcvdpsp, XVCVDPSP, 0xF0000624)                                            \
+  /* VSX Vector Convert Double-Precision to Signed Fixed-Point Doubleword */   \
+  /* Saturate */                                                               \
+  V(xvcvdpsxds, XVCVDPSXDS, 0xF0000760)                                        \
+  /* VSX Vector Convert Double-Precision to Signed Fixed-Point Word */         \
+  /* Saturate */                                                               \
+  V(xvcvdpsxws, XVCVDPSXWS, 0xF0000360)                                        \
+  /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point */            \
+  /* Doubleword Saturate */                                                    \
+  V(xvcvdpuxds, XVCVDPUXDS, 0xF0000720)                                        \
+  /* VSX Vector Convert Double-Precision to Unsigned Fixed-Point Word */       \
+  /* Saturate */                                                               \
+  V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)                                        \
+  /* VSX Vector Convert Single-Precision to Double-Precision */                \
+  V(xvcvspdp, XVCVSPDP, 0xF0000724)                                            \
+  /* VSX Vector Convert Single-Precision to Signed Fixed-Point Doubleword */   \
+  /* Saturate */                                                               \
+  V(xvcvspsxds, XVCVSPSXDS, 0xF0000660)                                        \
+  /* VSX Vector Convert Single-Precision to Signed Fixed-Point Word */         \
+  /* Saturate */                                                               \
+  V(xvcvspsxws, XVCVSPSXWS, 0xF0000260)                                        \
+  /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point */            \
+  /* Doubleword Saturate */                                                    \
+  V(xvcvspuxds, XVCVSPUXDS, 0xF0000620)                                        \
+  /* VSX Vector Convert Single-Precision to Unsigned Fixed-Point Word */       \
+  /* Saturate */                                                               \
+  V(xvcvspuxws, XVCVSPUXWS, 0xF0000220)                                        \
+  /* VSX Vector Convert Signed Fixed-Point Doubleword to Double-Precision */   \
+  V(xvcvsxddp, XVCVSXDDP, 0xF00007E0)                                          \
+  /* VSX Vector Convert Signed Fixed-Point Doubleword to Single-Precision */   \
+  V(xvcvsxdsp, XVCVSXDSP, 0xF00006E0)                                          \
+  /* VSX Vector Convert Signed Fixed-Point Word to Double-Precision */         \
+  V(xvcvsxwdp, XVCVSXWDP, 0xF00003E0)                                          \
+  /* VSX Vector Convert Signed Fixed-Point Word to Single-Precision */         \
+  V(xvcvsxwsp, XVCVSXWSP, 0xF00002E0)                                          \
+  /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Double- */          \
+  /* Precision */                                                              \
+  V(xvcvuxddp, XVCVUXDDP, 0xF00007A0)                                          \
+  /* VSX Vector Convert Unsigned Fixed-Point Doubleword to Single- */          \
+  /* Precision */                                                              \
+  V(xvcvuxdsp, XVCVUXDSP, 0xF00006A0)                                          \
+  /* VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision */       \
+  V(xvcvuxwdp, XVCVUXWDP, 0xF00003A0)                                          \
+  /* VSX Vector Convert Unsigned Fixed-Point Word to Single-Precision */       \
+  V(xvcvuxwsp, XVCVUXWSP, 0xF00002A0)                                          \
+  /* VSX Vector Negative Absolute Value Double-Precision */                    \
+  V(xvnabsdp, XVNABSDP, 0xF00007A4)                                            \
+  /* VSX Vector Negative Absolute Value Single-Precision */                    \
+  V(xvnabssp, XVNABSSP, 0xF00006A4)                                            \
+  /* VSX Vector Negate Double-Precision */                                     \
+  V(xvnegdp, XVNEGDP, 0xF00007E4)                                              \
+  /* VSX Vector Negate Single-Precision */                                     \
+  V(xvnegsp, XVNEGSP, 0xF00006E4)                                              \
+  /* VSX Vector Round to Double-Precision Integer */                           \
+  V(xvrdpi, XVRDPI, 0xF0000324)                                                \
+  /* VSX Vector Round to Double-Precision Integer using Current rounding */    \
+  /* mode */                                                                   \
+  V(xvrdpic, XVRDPIC, 0xF00003AC)                                              \
+  /* VSX Vector Round to Double-Precision Integer toward -Infinity */          \
+  V(xvrdpim, XVRDPIM, 0xF00003E4)                                              \
+  /* VSX Vector Round to Double-Precision Integer toward +Infinity */          \
+  V(xvrdpip, XVRDPIP, 0xF00003A4)                                              \
+  /* VSX Vector Round to Double-Precision Integer toward Zero */               \
+  V(xvrdpiz, XVRDPIZ, 0xF0000364)                                              \
+  /* VSX Vector Round to Single-Precision Integer */                           \
+  V(xvrspi, XVRSPI, 0xF0000224)                                                \
+  /* VSX Vector Round to Single-Precision Integer using Current rounding */    \
+  /* mode */                                                                   \
+  V(xvrspic, XVRSPIC, 0xF00002AC)                                              \
+  /* VSX Vector Round to Single-Precision Integer toward -Infinity */          \
+  V(xvrspim, XVRSPIM, 0xF00002E4)                                              \
+  /* VSX Vector Round to Single-Precision Integer toward +Infinity */          \
+  V(xvrspip, XVRSPIP, 0xF00002A4)                                              \
+  /* VSX Vector Round to Single-Precision Integer toward Zero */               \
+  V(xvrspiz, XVRSPIZ, 0xF0000264)                                              \
+  /* VSX Vector Reciprocal Square Root Estimate Double-Precision */            \
+  V(xvrsqrtedp, XVRSQRTEDP, 0xF0000328)                                        \
+  /* VSX Vector Reciprocal Square Root Estimate Single-Precision */            \
+  V(xvrsqrtesp, XVRSQRTESP, 0xF0000228)                                        \
+  /* VSX Vector Square Root Double-Precision */                                \
+  V(xvsqrtdp, XVSQRTDP, 0xF000032C)                                            \
+  /* VSX Vector Square Root Single-Precision */                                \
+  V(xvsqrtsp, XVSQRTSP, 0xF000022C)                                            \
+  /* VSX Vector Test for software Square Root Double-Precision */              \
+  V(xvtsqrtdp, XVTSQRTDP, 0xF00003A8)                                          \
+  /* VSX Vector Test for software Square Root Single-Precision */              \
+  V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8)
 
-// Some use Bits 10-1 and other only 5-1 for the opcode
-enum OpcodeExt4 {
-  // Bits 5-1
-  FDIV = 18 << 1,   // Floating Divide
-  FSUB = 20 << 1,   // Floating Subtract
-  FADD = 21 << 1,   // Floating Add
-  FSQRT = 22 << 1,  // Floating Square Root
-  FSEL = 23 << 1,   // Floating Select
-  FMUL = 25 << 1,   // Floating Multiply
-  FMSUB = 28 << 1,  // Floating Multiply-Subtract
-  FMADD = 29 << 1,  // Floating Multiply-Add
+#define PPC_EVX_OPCODE_LIST(V)                                                 \
+  /* Vector Load Double Word into Double Word by External PID Indexed */       \
+  V(evlddepx, EVLDDEPX, 0x7C00063E)                                            \
+  /* Vector Store Double of Double by External PID Indexed */                  \
+  V(evstddepx, EVSTDDEPX, 0x7C00073E)                                          \
+  /* Bit Reversed Increment */                                                 \
+  V(brinc, BRINC, 0x1000020F)                                                  \
+  /* Vector Absolute Value */                                                  \
+  V(evabs, EVABS, 0x10000208)                                                  \
+  /* Vector Add Immediate Word */                                              \
+  V(evaddiw, EVADDIW, 0x10000202)                                              \
+  /* Vector Add Signed, Modulo, Integer to Accumulator Word */                 \
+  V(evaddsmiaaw, EVADDSMIAAW, 0x100004C9)                                      \
+  /* Vector Add Signed, Saturate, Integer to Accumulator Word */               \
+  V(evaddssiaaw, EVADDSSIAAW, 0x100004C1)                                      \
+  /* Vector Add Unsigned, Modulo, Integer to Accumulator Word */               \
+  V(evaddumiaaw, EVADDUMIAAW, 0x100004C8)                                      \
+  /* Vector Add Unsigned, Saturate, Integer to Accumulator Word */             \
+  V(evaddusiaaw, EVADDUSIAAW, 0x100004C0)                                      \
+  /* Vector Add Word */                                                        \
+  V(evaddw, EVADDW, 0x10000200)                                                \
+  /* Vector AND */                                                             \
+  V(evand, EVAND, 0x10000211)                                                  \
+  /* Vector AND with Complement */                                             \
+  V(evandc, EVANDC, 0x10000212)                                                \
+  /* Vector Compare Equal */                                                   \
+  V(evcmpeq, EVCMPEQ, 0x10000234)                                              \
+  /* Vector Compare Greater Than Signed */                                     \
+  V(evcmpgts, EVCMPGTS, 0x10000231)                                            \
+  /* Vector Compare Greater Than Unsigned */                                   \
+  V(evcmpgtu, EVCMPGTU, 0x10000230)                                            \
+  /* Vector Compare Less Than Signed */                                        \
+  V(evcmplts, EVCMPLTS, 0x10000233)                                            \
+  /* Vector Compare Less Than Unsigned */                                      \
+  V(evcmpltu, EVCMPLTU, 0x10000232)                                            \
+  /* Vector Count Leading Signed Bits Word */                                  \
+  V(evcntlsw, EVCNTLSW, 0x1000020E)                                            \
+  /* Vector Count Leading Zeros Word */                                        \
+  V(evcntlzw, EVCNTLZW, 0x1000020D)                                            \
+  /* Vector Divide Word Signed */                                              \
+  V(evdivws, EVDIVWS, 0x100004C6)                                              \
+  /* Vector Divide Word Unsigned */                                            \
+  V(evdivwu, EVDIVWU, 0x100004C7)                                              \
+  /* Vector Equivalent */                                                      \
+  V(eveqv, EVEQV, 0x10000219)                                                  \
+  /* Vector Extend Sign Byte */                                                \
+  V(evextsb, EVEXTSB, 0x1000020A)                                              \
+  /* Vector Extend Sign Half Word */                                           \
+  V(evextsh, EVEXTSH, 0x1000020B)                                              \
+  /* Vector Load Double Word into Double Word */                               \
+  V(evldd, EVLDD, 0x10000301)                                                  \
+  /* Vector Load Double Word into Double Word Indexed */                       \
+  V(evlddx, EVLDDX, 0x10000300)                                                \
+  /* Vector Load Double into Four Half Words */                                \
+  V(evldh, EVLDH, 0x10000305)                                                  \
+  /* Vector Load Double into Four Half Words Indexed */                        \
+  V(evldhx, EVLDHX, 0x10000304)                                                \
+  /* Vector Load Double into Two Words */                                      \
+  V(evldw, EVLDW, 0x10000303)                                                  \
+  /* Vector Load Double into Two Words Indexed */                              \
+  V(evldwx, EVLDWX, 0x10000302)                                                \
+  /* Vector Load Half Word into Half Words Even and Splat */                   \
+  V(evlhhesplat, EVLHHESPLAT, 0x10000309)                                      \
+  /* Vector Load Half Word into Half Words Even and Splat Indexed */           \
+  V(evlhhesplatx, EVLHHESPLATX, 0x10000308)                                    \
+  /* Vector Load Half Word into Half Word Odd Signed and Splat */              \
+  V(evlhhossplat, EVLHHOSSPLAT, 0x1000030F)                                    \
+  /* Vector Load Half Word into Half Word Odd Signed and Splat Indexed */      \
+  V(evlhhossplatx, EVLHHOSSPLATX, 0x1000030E)                                  \
+  /* Vector Load Half Word into Half Word Odd Unsigned and Splat */            \
+  V(evlhhousplat, EVLHHOUSPLAT, 0x1000030D)                                    \
+  /* Vector Load Half Word into Half Word Odd Unsigned and Splat Indexed */    \
+  V(evlhhousplatx, EVLHHOUSPLATX, 0x1000030C)                                  \
+  /* Vector Load Word into Two Half Words Even */                              \
+  V(evlwhe, EVLWHE, 0x10000311)                                                \
+  /* Vector Load Word into Two Half Words Even Indexed */                      \
+  V(evlwhex, EVLWHEX, 0x10000310)                                              \
+  /* Vector Load Word into Two Half Words Odd Signed (with sign extension) */  \
+  V(evlwhos, EVLWHOS, 0x10000317)                                              \
+  /* Vector Load Word into Two Half Words Odd Signed Indexed (with sign */     \
+  /* extension) */                                                             \
+  V(evlwhosx, EVLWHOSX, 0x10000316)                                            \
+  /* Vector Load Word into Two Half Words Odd Unsigned (zero-extended) */      \
+  V(evlwhou, EVLWHOU, 0x10000315)                                              \
+  /* Vector Load Word into Two Half Words Odd Unsigned Indexed (zero- */       \
+  /* extended) */                                                              \
+  V(evlwhoux, EVLWHOUX, 0x10000314)                                            \
+  /* Vector Load Word into Two Half Words and Splat */                         \
+  V(evlwhsplat, EVLWHSPLAT, 0x1000031D)                                        \
+  /* Vector Load Word into Two Half Words and Splat Indexed */                 \
+  V(evlwhsplatx, EVLWHSPLATX, 0x1000031C)                                      \
+  /* Vector Load Word into Word and Splat */                                   \
+  V(evlwwsplat, EVLWWSPLAT, 0x10000319)                                        \
+  /* Vector Load Word into Word and Splat Indexed */                           \
+  V(evlwwsplatx, EVLWWSPLATX, 0x10000318)                                      \
+  /* Vector Merge High */                                                      \
+  V(evmergehi, EVMERGEHI, 0x1000022C)                                          \
+  /* Vector Merge High/Low */                                                  \
+  V(evmergehilo, EVMERGEHILO, 0x1000022E)                                      \
+  /* Vector Merge Low */                                                       \
+  V(evmergelo, EVMERGELO, 0x1000022D)                                          \
+  /* Vector Merge Low/High */                                                  \
+  V(evmergelohi, EVMERGELOHI, 0x1000022F)                                      \
+  /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */  \
+  /* and Accumulate */                                                         \
+  V(evmhegsmfaa, EVMHEGSMFAA, 0x1000052B)                                      \
+  /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Fractional */  \
+  /* and Accumulate Negative */                                                \
+  V(evmhegsmfan, EVMHEGSMFAN, 0x100005AB)                                      \
+  /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */     \
+  /* and Accumulate */                                                         \
+  V(evmhegsmiaa, EVMHEGSMIAA, 0x10000529)                                      \
+  /* Vector Multiply Half Words, Even, Guarded, Signed, Modulo, Integer */     \
+  /* and Accumulate Negative */                                                \
+  V(evmhegsmian, EVMHEGSMIAN, 0x100005A9)                                      \
+  /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */   \
+  /* and Accumulate */                                                         \
+  V(evmhegumiaa, EVMHEGUMIAA, 0x10000528)                                      \
+  /* Vector Multiply Half Words, Even, Guarded, Unsigned, Modulo, Integer */   \
+  /* and Accumulate Negative */                                                \
+  V(evmhegumian, EVMHEGUMIAN, 0x100005A8)                                      \
+  /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional */           \
+  V(evmhesmf, EVMHESMF, 0x1000040B)                                            \
+  /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional to */        \
+  /* Accumulator */                                                            \
+  V(evmhesmfa, EVMHESMFA, 0x1000042B)                                          \
+  /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */       \
+  /* Accumulate into Words */                                                  \
+  V(evmhesmfaaw, EVMHESMFAAW, 0x1000050B)                                      \
+  /* Vector Multiply Half Words, Even, Signed, Modulo, Fractional and */       \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhesmfanw, EVMHESMFANW, 0x1000058B)                                      \
+  /* Vector Multiply Half Words, Even, Signed, Modulo, Integer */              \
+  V(evmhesmi, EVMHESMI, 0x10000409)                                            \
+  /* Vector Multiply Half Words, Even, Signed, Modulo, Integer to */           \
+  /* Accumulator */                                                            \
+  V(evmhesmia, EVMHESMIA, 0x10000429)                                          \
+  /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */          \
+  /* Accumulate into Words */                                                  \
+  V(evmhesmiaaw, EVMHESMIAAW, 0x10000509)                                      \
+  /* Vector Multiply Half Words, Even, Signed, Modulo, Integer and */          \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhesmianw, EVMHESMIANW, 0x10000589)                                      \
+  /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional */         \
+  V(evmhessf, EVMHESSF, 0x10000403)                                            \
+  /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional to */      \
+  /* Accumulator */                                                            \
+  V(evmhessfa, EVMHESSFA, 0x10000423)                                          \
+  /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */     \
+  /* Accumulate into Words */                                                  \
+  V(evmhessfaaw, EVMHESSFAAW, 0x10000503)                                      \
+  /* Vector Multiply Half Words, Even, Signed, Saturate, Fractional and */     \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhessfanw, EVMHESSFANW, 0x10000583)                                      \
+  /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */        \
+  /* Accumulate into Words */                                                  \
+  V(evmhessiaaw, EVMHESSIAAW, 0x10000501)                                      \
+  /* Vector Multiply Half Words, Even, Signed, Saturate, Integer and */        \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhessianw, EVMHESSIANW, 0x10000581)                                      \
+  /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer */            \
+  V(evmheumi, EVMHEUMI, 0x10000408)                                            \
+  /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer to */         \
+  /* Accumulator */                                                            \
+  V(evmheumia, EVMHEUMIA, 0x10000428)                                          \
+  /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */        \
+  /* Accumulate into Words */                                                  \
+  V(evmheumiaaw, EVMHEUMIAAW, 0x10000508)                                      \
+  /* Vector Multiply Half Words, Even, Unsigned, Modulo, Integer and */        \
+  /* Accumulate Negative into Words */                                         \
+  V(evmheumianw, EVMHEUMIANW, 0x10000588)                                      \
+  /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */      \
+  /* Accumulate into Words */                                                  \
+  V(evmheusiaaw, EVMHEUSIAAW, 0x10000500)                                      \
+  /* Vector Multiply Half Words, Even, Unsigned, Saturate, Integer and */      \
+  /* Accumulate Negative into Words */                                         \
+  V(evmheusianw, EVMHEUSIANW, 0x10000580)                                      \
+  /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */   \
+  /* and Accumulate */                                                         \
+  V(evmhogsmfaa, EVMHOGSMFAA, 0x1000052F)                                      \
+  /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Fractional */   \
+  /* and Accumulate Negative */                                                \
+  V(evmhogsmfan, EVMHOGSMFAN, 0x100005AF)                                      \
+  /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer, */     \
+  /* and Accumulate */                                                         \
+  V(evmhogsmiaa, EVMHOGSMIAA, 0x1000052D)                                      \
+  /* Vector Multiply Half Words, Odd, Guarded, Signed, Modulo, Integer and */  \
+  /* Accumulate Negative */                                                    \
+  V(evmhogsmian, EVMHOGSMIAN, 0x100005AD)                                      \
+  /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */    \
+  /* and Accumulate */                                                         \
+  V(evmhogumiaa, EVMHOGUMIAA, 0x1000052C)                                      \
+  /* Vector Multiply Half Words, Odd, Guarded, Unsigned, Modulo, Integer */    \
+  /* and Accumulate Negative */                                                \
+  V(evmhogumian, EVMHOGUMIAN, 0x100005AC)                                      \
+  /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional */            \
+  V(evmhosmf, EVMHOSMF, 0x1000040F)                                            \
+  /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional to */         \
+  /* Accumulator */                                                            \
+  V(evmhosmfa, EVMHOSMFA, 0x1000042F)                                          \
+  /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */        \
+  /* Accumulate into Words */                                                  \
+  V(evmhosmfaaw, EVMHOSMFAAW, 0x1000050F)                                      \
+  /* Vector Multiply Half Words, Odd, Signed, Modulo, Fractional and */        \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhosmfanw, EVMHOSMFANW, 0x1000058F)                                      \
+  /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer */               \
+  V(evmhosmi, EVMHOSMI, 0x1000040D)                                            \
+  /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer to */            \
+  /* Accumulator */                                                            \
+  V(evmhosmia, EVMHOSMIA, 0x1000042D)                                          \
+  /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */           \
+  /* Accumulate into Words */                                                  \
+  V(evmhosmiaaw, EVMHOSMIAAW, 0x1000050D)                                      \
+  /* Vector Multiply Half Words, Odd, Signed, Modulo, Integer and */           \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhosmianw, EVMHOSMIANW, 0x1000058D)                                      \
+  /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional */          \
+  V(evmhossf, EVMHOSSF, 0x10000407)                                            \
+  /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional to */       \
+  /* Accumulator */                                                            \
+  V(evmhossfa, EVMHOSSFA, 0x10000427)                                          \
+  /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */      \
+  /* Accumulate into Words */                                                  \
+  V(evmhossfaaw, EVMHOSSFAAW, 0x10000507)                                      \
+  /* Vector Multiply Half Words, Odd, Signed, Saturate, Fractional and */      \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhossfanw, EVMHOSSFANW, 0x10000587)                                      \
+  /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */         \
+  /* Accumulate into Words */                                                  \
+  V(evmhossiaaw, EVMHOSSIAAW, 0x10000505)                                      \
+  /* Vector Multiply Half Words, Odd, Signed, Saturate, Integer and */         \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhossianw, EVMHOSSIANW, 0x10000585)                                      \
+  /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer */             \
+  V(evmhoumi, EVMHOUMI, 0x1000040C)                                            \
+  /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer to */          \
+  /* Accumulator */                                                            \
+  V(evmhoumia, EVMHOUMIA, 0x1000042C)                                          \
+  /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */         \
+  /* Accumulate into Words */                                                  \
+  V(evmhoumiaaw, EVMHOUMIAAW, 0x1000050C)                                      \
+  /* Vector Multiply Half Words, Odd, Unsigned, Modulo, Integer and */         \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhoumianw, EVMHOUMIANW, 0x1000058C)                                      \
+  /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */       \
+  /* Accumulate into Words */                                                  \
+  V(evmhousiaaw, EVMHOUSIAAW, 0x10000504)                                      \
+  /* Vector Multiply Half Words, Odd, Unsigned, Saturate, Integer and */       \
+  /* Accumulate Negative into Words */                                         \
+  V(evmhousianw, EVMHOUSIANW, 0x10000584)                                      \
+  /* Initialize Accumulator */                                                 \
+  V(evmra, EVMRA, 0x100004C4)                                                  \
+  /* Vector Multiply Word High Signed, Modulo, Fractional */                   \
+  V(evmwhsmf, EVMWHSMF, 0x1000044F)                                            \
+  /* Vector Multiply Word High Signed, Modulo, Fractional to Accumulator */    \
+  V(evmwhsmfa, EVMWHSMFA, 0x1000046F)                                          \
+  /* Vector Multiply Word High Signed, Modulo, Integer */                      \
+  V(evmwhsmi, EVMWHSMI, 0x1000044D)                                            \
+  /* Vector Multiply Word High Signed, Modulo, Integer to Accumulator */       \
+  V(evmwhsmia, EVMWHSMIA, 0x1000046D)                                          \
+  /* Vector Multiply Word High Signed, Saturate, Fractional */                 \
+  V(evmwhssf, EVMWHSSF, 0x10000447)                                            \
+  /* Vector Multiply Word High Signed, Saturate, Fractional to Accumulator */  \
+  V(evmwhssfa, EVMWHSSFA, 0x10000467)                                          \
+  /* Vector Multiply Word High Unsigned, Modulo, Integer */                    \
+  V(evmwhumi, EVMWHUMI, 0x1000044C)                                            \
+  /* Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator */     \
+  V(evmwhumia, EVMWHUMIA, 0x1000046C)                                          \
+  /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate in */     \
+  /* Words */                                                                  \
+  V(evmwlsmiaaw, EVMWLSMIAAW, 0x10000549)                                      \
+  /* Vector Multiply Word Low Signed, Modulo, Integer and Accumulate */        \
+  /* Negative in Words */                                                      \
+  V(evmwlsmianw, EVMWLSMIANW, 0x100005C9)                                      \
+  /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate in */   \
+  /* Words */                                                                  \
+  V(evmwlssiaaw, EVMWLSSIAAW, 0x10000541)                                      \
+  /* Vector Multiply Word Low Signed, Saturate, Integer and Accumulate */      \
+  /* Negative in Words */                                                      \
+  V(evmwlssianw, EVMWLSSIANW, 0x100005C1)                                      \
+  /* Vector Multiply Word Low Unsigned, Modulo, Integer */                     \
+  V(evmwlumi, EVMWLUMI, 0x10000448)                                            \
+  /* Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator */      \
+  V(evmwlumia, EVMWLUMIA, 0x10000468)                                          \
+  /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate in */   \
+  /* Words */                                                                  \
+  V(evmwlumiaaw, EVMWLUMIAAW, 0x10000548)                                      \
+  /* Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate */      \
+  /* Negative in Words */                                                      \
+  V(evmwlumianw, EVMWLUMIANW, 0x100005C8)                                      \
+  /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */    \
+  /* in Words */                                                               \
+  V(evmwlusiaaw, EVMWLUSIAAW, 0x10000540)                                      \
+  /* Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate */    \
+  /* Negative in Words */                                                      \
+  V(evmwlusianw, EVMWLUSIANW, 0x100005C0)                                      \
+  /* Vector Multiply Word Signed, Modulo, Fractional */                        \
+  V(evmwsmf, EVMWSMF, 0x1000045B)                                              \
+  /* Vector Multiply Word Signed, Modulo, Fractional to Accumulator */         \
+  V(evmwsmfa, EVMWSMFA, 0x1000047B)                                            \
+  /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */         \
+  V(evmwsmfaa, EVMWSMFAA, 0x1000055B)                                          \
+  /* Vector Multiply Word Signed, Modulo, Fractional and Accumulate */         \
+  /* Negative */                                                               \
+  V(evmwsmfan, EVMWSMFAN, 0x100005DB)                                          \
+  /* Vector Multiply Word Signed, Modulo, Integer */                           \
+  V(evmwsmi, EVMWSMI, 0x10000459)                                              \
+  /* Vector Multiply Word Signed, Modulo, Integer to Accumulator */            \
+  V(evmwsmia, EVMWSMIA, 0x10000479)                                            \
+  /* Vector Multiply Word Signed, Modulo, Integer and Accumulate */            \
+  V(evmwsmiaa, EVMWSMIAA, 0x10000559)                                          \
+  /* Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative */   \
+  V(evmwsmian, EVMWSMIAN, 0x100005D9)                                          \
+  /* Vector Multiply Word Signed, Saturate, Fractional */                      \
+  V(evmwssf, EVMWSSF, 0x10000453)                                              \
+  /* Vector Multiply Word Signed, Saturate, Fractional to Accumulator */       \
+  V(evmwssfa, EVMWSSFA, 0x10000473)                                            \
+  /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */       \
+  V(evmwssfaa, EVMWSSFAA, 0x10000553)                                          \
+  /* Vector Multiply Word Signed, Saturate, Fractional and Accumulate */       \
+  /* Negative */                                                               \
+  V(evmwssfan, EVMWSSFAN, 0x100005D3)                                          \
+  /* Vector Multiply Word Unsigned, Modulo, Integer */                         \
+  V(evmwumi, EVMWUMI, 0x10000458)                                              \
+  /* Vector Multiply Word Unsigned, Modulo, Integer to Accumulator */          \
+  V(evmwumia, EVMWUMIA, 0x10000478)                                            \
+  /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */          \
+  V(evmwumiaa, EVMWUMIAA, 0x10000558)                                          \
+  /* Vector Multiply Word Unsigned, Modulo, Integer and Accumulate */          \
+  /* Negative */                                                               \
+  V(evmwumian, EVMWUMIAN, 0x100005D8)                                          \
+  /* Vector NAND */                                                            \
+  V(evnand, EVNAND, 0x1000021E)                                                \
+  /* Vector Negate */                                                          \
+  V(evneg, EVNEG, 0x10000209)                                                  \
+  /* Vector NOR */                                                             \
+  V(evnor, EVNOR, 0x10000218)                                                  \
+  /* Vector OR */                                                              \
+  V(evor, EVOR, 0x10000217)                                                    \
+  /* Vector OR with Complement */                                              \
+  V(evorc, EVORC, 0x1000021B)                                                  \
+  /* Vector Rotate Left Word */                                                \
+  V(evrlw, EVRLW, 0x10000228)                                                  \
+  /* Vector Rotate Left Word Immediate */                                      \
+  V(evrlwi, EVRLWI, 0x1000022A)                                                \
+  /* Vector Round Word */                                                      \
+  V(evrndw, EVRNDW, 0x1000020C)                                                \
+  /* Vector Shift Left Word */                                                 \
+  V(evslw, EVSLW, 0x10000224)                                                  \
+  /* Vector Shift Left Word Immediate */                                       \
+  V(evslwi, EVSLWI, 0x10000226)                                                \
+  /* Vector Splat Fractional Immediate */                                      \
+  V(evsplatfi, EVSPLATFI, 0x1000022B)                                          \
+  /* Vector Splat Immediate */                                                 \
+  V(evsplati, EVSPLATI, 0x10000229)                                            \
+  /* Vector Shift Right Word Immediate Signed */                               \
+  V(evsrwis, EVSRWIS, 0x10000223)                                              \
+  /* Vector Shift Right Word Immediate Unsigned */                             \
+  V(evsrwiu, EVSRWIU, 0x10000222)                                              \
+  /* Vector Shift Right Word Signed */                                         \
+  V(evsrws, EVSRWS, 0x10000221)                                                \
+  /* Vector Shift Right Word Unsigned */                                       \
+  V(evsrwu, EVSRWU, 0x10000220)                                                \
+  /* Vector Store Double of Double */                                          \
+  V(evstdd, EVSTDD, 0x10000321)                                                \
+  /* Vector Store Double of Double Indexed */                                  \
+  V(evstddx, EVSTDDX, 0x10000320)                                              \
+  /* Vector Store Double of Four Half Words */                                 \
+  V(evstdh, EVSTDH, 0x10000325)                                                \
+  /* Vector Store Double of Four Half Words Indexed */                         \
+  V(evstdhx, EVSTDHX, 0x10000324)                                              \
+  /* Vector Store Double of Two Words */                                       \
+  V(evstdw, EVSTDW, 0x10000323)                                                \
+  /* Vector Store Double of Two Words Indexed */                               \
+  V(evstdwx, EVSTDWX, 0x10000322)                                              \
+  /* Vector Store Word of Two Half Words from Even */                          \
+  V(evstwhe, EVSTWHE, 0x10000331)                                              \
+  /* Vector Store Word of Two Half Words from Even Indexed */                  \
+  V(evstwhex, EVSTWHEX, 0x10000330)                                            \
+  /* Vector Store Word of Two Half Words from Odd */                           \
+  V(evstwho, EVSTWHO, 0x10000335)                                              \
+  /* Vector Store Word of Two Half Words from Odd Indexed */                   \
+  V(evstwhox, EVSTWHOX, 0x10000334)                                            \
+  /* Vector Store Word of Word from Even */                                    \
+  V(evstwwe, EVSTWWE, 0x10000339)                                              \
+  /* Vector Store Word of Word from Even Indexed */                            \
+  V(evstwwex, EVSTWWEX, 0x10000338)                                            \
+  /* Vector Store Word of Word from Odd */                                     \
+  V(evstwwo, EVSTWWO, 0x1000033D)                                              \
+  /* Vector Store Word of Word from Odd Indexed */                             \
+  V(evstwwox, EVSTWWOX, 0x1000033C)                                            \
+  /* Vector Subtract Signed, Modulo, Integer to Accumulator Word */            \
+  V(evsubfsmiaaw, EVSUBFSMIAAW, 0x100004CB)                                    \
+  /* Vector Subtract Signed, Saturate, Integer to Accumulator Word */          \
+  V(evsubfssiaaw, EVSUBFSSIAAW, 0x100004C3)                                    \
+  /* Vector Subtract Unsigned, Modulo, Integer to Accumulator Word */          \
+  V(evsubfumiaaw, EVSUBFUMIAAW, 0x100004CA)                                    \
+  /* Vector Subtract Unsigned, Saturate, Integer to Accumulator Word */        \
+  V(evsubfusiaaw, EVSUBFUSIAAW, 0x100004C2)                                    \
+  /* Vector Subtract from Word */                                              \
+  V(evsubfw, EVSUBFW, 0x10000204)                                              \
+  /* Vector Subtract Immediate from Word */                                    \
+  V(evsubifw, EVSUBIFW, 0x10000206)                                            \
+  /* Vector XOR */                                                             \
+  V(evxor, EVXOR, 0x10000216)                                                  \
+  /* Floating-Point Double-Precision Absolute Value */                         \
+  V(efdabs, EFDABS, 0x100002E4)                                                \
+  /* Floating-Point Double-Precision Add */                                    \
+  V(efdadd, EFDADD, 0x100002E0)                                                \
+  /* Floating-Point Double-Precision Convert from Single-Precision */          \
+  V(efdcfs, EFDCFS, 0x100002EF)                                                \
+  /* Convert Floating-Point Double-Precision from Signed Fraction */           \
+  V(efdcfsf, EFDCFSF, 0x100002F3)                                              \
+  /* Convert Floating-Point Double-Precision from Signed Integer */            \
+  V(efdcfsi, EFDCFSI, 0x100002F1)                                              \
+  /* Convert Floating-Point Double-Precision from Signed Integer */            \
+  /* Doubleword */                                                             \
+  V(efdcfsid, EFDCFSID, 0x100002E3)                                            \
+  /* Convert Floating-Point Double-Precision from Unsigned Fraction */         \
+  V(efdcfuf, EFDCFUF, 0x100002F2)                                              \
+  /* Convert Floating-Point Double-Precision from Unsigned Integer */          \
+  V(efdcfui, EFDCFUI, 0x100002F0)                                              \
+  /* Convert Floating-Point Double-Precision fromUnsigned Integer */           \
+  /* Doubleword */                                                             \
+  V(efdcfuid, EFDCFUID, 0x100002E2)                                            \
+  /* Floating-Point Double-Precision Compare Equal */                          \
+  V(efdcmpeq, EFDCMPEQ, 0x100002EE)                                            \
+  /* Floating-Point Double-Precision Compare Greater Than */                   \
+  V(efdcmpgt, EFDCMPGT, 0x100002EC)                                            \
+  /* Floating-Point Double-Precision Compare Less Than */                      \
+  V(efdcmplt, EFDCMPLT, 0x100002ED)                                            \
+  /* Convert Floating-Point Double-Precision to Signed Fraction */             \
+  V(efdctsf, EFDCTSF, 0x100002F7)                                              \
+  /* Convert Floating-Point Double-Precision to Signed Integer */              \
+  V(efdctsi, EFDCTSI, 0x100002F5)                                              \
+  /* Convert Floating-Point Double-Precision to Signed Integer Doubleword */   \
+  /* with Round toward Zero */                                                 \
+  V(efdctsidz, EFDCTSIDZ, 0x100002EB)                                          \
+  /* Convert Floating-Point Double-Precision to Signed Integer with Round */   \
+  /* toward Zero */                                                            \
+  V(efdctsiz, EFDCTSIZ, 0x100002FA)                                            \
+  /* Convert Floating-Point Double-Precision to Unsigned Fraction */           \
+  V(efdctuf, EFDCTUF, 0x100002F6)                                              \
+  /* Convert Floating-Point Double-Precision to Unsigned Integer */            \
+  V(efdctui, EFDCTUI, 0x100002F4)                                              \
+  /* Convert Floating-Point Double-Precision to Unsigned Integer */            \
+  /* Doubleword with Round toward Zero */                                      \
+  V(efdctuidz, EFDCTUIDZ, 0x100002EA)                                          \
+  /* Convert Floating-Point Double-Precision to Unsigned Integer with */       \
+  /* Round toward Zero */                                                      \
+  V(efdctuiz, EFDCTUIZ, 0x100002F8)                                            \
+  /* Floating-Point Double-Precision Divide */                                 \
+  V(efddiv, EFDDIV, 0x100002E9)                                                \
+  /* Floating-Point Double-Precision Multiply */                               \
+  V(efdmul, EFDMUL, 0x100002E8)                                                \
+  /* Floating-Point Double-Precision Negative Absolute Value */                \
+  V(efdnabs, EFDNABS, 0x100002E5)                                              \
+  /* Floating-Point Double-Precision Negate */                                 \
+  V(efdneg, EFDNEG, 0x100002E6)                                                \
+  /* Floating-Point Double-Precision Subtract */                               \
+  V(efdsub, EFDSUB, 0x100002E1)                                                \
+  /* Floating-Point Double-Precision Test Equal */                             \
+  V(efdtsteq, EFDTSTEQ, 0x100002FE)                                            \
+  /* Floating-Point Double-Precision Test Greater Than */                      \
+  V(efdtstgt, EFDTSTGT, 0x100002FC)                                            \
+  /* Floating-Point Double-Precision Test Less Than */                         \
+  V(efdtstlt, EFDTSTLT, 0x100002FD)                                            \
+  /* Floating-Point Single-Precision Convert from Double-Precision */          \
+  V(efscfd, EFSCFD, 0x100002CF)                                                \
+  /* Floating-Point Absolute Value */                                          \
+  V(efsabs, EFSABS, 0x100002C4)                                                \
+  /* Floating-Point Add */                                                     \
+  V(efsadd, EFSADD, 0x100002C0)                                                \
+  /* Convert Floating-Point from Signed Fraction */                            \
+  V(efscfsf, EFSCFSF, 0x100002D3)                                              \
+  /* Convert Floating-Point from Signed Integer */                             \
+  V(efscfsi, EFSCFSI, 0x100002D1)                                              \
+  /* Convert Floating-Point from Unsigned Fraction */                          \
+  V(efscfuf, EFSCFUF, 0x100002D2)                                              \
+  /* Convert Floating-Point from Unsigned Integer */                           \
+  V(efscfui, EFSCFUI, 0x100002D0)                                              \
+  /* Floating-Point Compare Equal */                                           \
+  V(efscmpeq, EFSCMPEQ, 0x100002CE)                                            \
+  /* Floating-Point Compare Greater Than */                                    \
+  V(efscmpgt, EFSCMPGT, 0x100002CC)                                            \
+  /* Floating-Point Compare Less Than */                                       \
+  V(efscmplt, EFSCMPLT, 0x100002CD)                                            \
+  /* Convert Floating-Point to Signed Fraction */                              \
+  V(efsctsf, EFSCTSF, 0x100002D7)                                              \
+  /* Convert Floating-Point to Signed Integer */                               \
+  V(efsctsi, EFSCTSI, 0x100002D5)                                              \
+  /* Convert Floating-Point to Signed Integer with Round toward Zero */        \
+  V(efsctsiz, EFSCTSIZ, 0x100002DA)                                            \
+  /* Convert Floating-Point to Unsigned Fraction */                            \
+  V(efsctuf, EFSCTUF, 0x100002D6)                                              \
+  /* Convert Floating-Point to Unsigned Integer */                             \
+  V(efsctui, EFSCTUI, 0x100002D4)                                              \
+  /* Convert Floating-Point to Unsigned Integer with Round toward Zero */      \
+  V(efsctuiz, EFSCTUIZ, 0x100002D8)                                            \
+  /* Floating-Point Divide */                                                  \
+  V(efsdiv, EFSDIV, 0x100002C9)                                                \
+  /* Floating-Point Multiply */                                                \
+  V(efsmul, EFSMUL, 0x100002C8)                                                \
+  /* Floating-Point Negative Absolute Value */                                 \
+  V(efsnabs, EFSNABS, 0x100002C5)                                              \
+  /* Floating-Point Negate */                                                  \
+  V(efsneg, EFSNEG, 0x100002C6)                                                \
+  /* Floating-Point Subtract */                                                \
+  V(efssub, EFSSUB, 0x100002C1)                                                \
+  /* Floating-Point Test Equal */                                              \
+  V(efststeq, EFSTSTEQ, 0x100002DE)                                            \
+  /* Floating-Point Test Greater Than */                                       \
+  V(efststgt, EFSTSTGT, 0x100002DC)                                            \
+  /* Floating-Point Test Less Than */                                          \
+  V(efststlt, EFSTSTLT, 0x100002DD)                                            \
+  /* Vector Floating-Point Absolute Value */                                   \
+  V(evfsabs, EVFSABS, 0x10000284)                                              \
+  /* Vector Floating-Point Add */                                              \
+  V(evfsadd, EVFSADD, 0x10000280)                                              \
+  /* Vector Convert Floating-Point from Signed Fraction */                     \
+  V(evfscfsf, EVFSCFSF, 0x10000293)                                            \
+  /* Vector Convert Floating-Point from Signed Integer */                      \
+  V(evfscfsi, EVFSCFSI, 0x10000291)                                            \
+  /* Vector Convert Floating-Point from Unsigned Fraction */                   \
+  V(evfscfuf, EVFSCFUF, 0x10000292)                                            \
+  /* Vector Convert Floating-Point from Unsigned Integer */                    \
+  V(evfscfui, EVFSCFUI, 0x10000290)                                            \
+  /* Vector Floating-Point Compare Equal */                                    \
+  V(evfscmpeq, EVFSCMPEQ, 0x1000028E)                                          \
+  /* Vector Floating-Point Compare Greater Than */                             \
+  V(evfscmpgt, EVFSCMPGT, 0x1000028C)                                          \
+  /* Vector Floating-Point Compare Less Than */                                \
+  V(evfscmplt, EVFSCMPLT, 0x1000028D)                                          \
+  /* Vector Convert Floating-Point to Signed Fraction */                       \
+  V(evfsctsf, EVFSCTSF, 0x10000297)                                            \
+  /* Vector Convert Floating-Point to Signed Integer */                        \
+  V(evfsctsi, EVFSCTSI, 0x10000295)                                            \
+  /* Vector Convert Floating-Point to Signed Integer with Round toward */      \
+  /* Zero */                                                                   \
+  V(evfsctsiz, EVFSCTSIZ, 0x1000029A)                                          \
+  /* Vector Convert Floating-Point to Unsigned Fraction */                     \
+  V(evfsctuf, EVFSCTUF, 0x10000296)                                            \
+  /* Vector Convert Floating-Point to Unsigned Integer */                      \
+  V(evfsctui, EVFSCTUI, 0x10000294)                                            \
+  /* Vector Convert Floating-Point to Unsigned Integer with Round toward */    \
+  /* Zero */                                                                   \
+  V(evfsctuiz, EVFSCTUIZ, 0x10000298)                                          \
+  /* Vector Floating-Point Divide */                                           \
+  V(evfsdiv, EVFSDIV, 0x10000289)                                              \
+  /* Vector Floating-Point Multiply */                                         \
+  V(evfsmul, EVFSMUL, 0x10000288)                                              \
+  /* Vector Floating-Point Negative Absolute Value */                          \
+  V(evfsnabs, EVFSNABS, 0x10000285)                                            \
+  /* Vector Floating-Point Negate */                                           \
+  V(evfsneg, EVFSNEG, 0x10000286)                                              \
+  /* Vector Floating-Point Subtract */                                         \
+  V(evfssub, EVFSSUB, 0x10000281)                                              \
+  /* Vector Floating-Point Test Equal */                                       \
+  V(evfststeq, EVFSTSTEQ, 0x1000029E)                                          \
+  /* Vector Floating-Point Test Greater Than */                                \
+  V(evfststgt, EVFSTSTGT, 0x1000029C)                                          \
+  /* Vector Floating-Point Test Less Than */                                   \
+  V(evfststlt, EVFSTSTLT, 0x1000029D)
 
-  // Bits 10-1
-  FCMPU = 0 << 1,      // Floating Compare Unordered
-  FRSP = 12 << 1,      // Floating-Point Rounding
-  FCTIW = 14 << 1,     // Floating Convert to Integer Word X-form
-  FCTIWZ = 15 << 1,    // Floating Convert to Integer Word with Round to Zero
-  MTFSB1 = 38 << 1,    // Move to FPSCR Bit 1
-  FNEG = 40 << 1,      // Floating Negate
-  MCRFS = 64 << 1,     // Move to Condition Register from FPSCR
-  MTFSB0 = 70 << 1,    // Move to FPSCR Bit 0
-  FMR = 72 << 1,       // Floating Move Register
-  MTFSFI = 134 << 1,   // Move to FPSCR Field Immediate
-  FABS = 264 << 1,     // Floating Absolute Value
-  FRIN = 392 << 1,     // Floating Round to Integer Nearest
-  FRIZ = 424 << 1,     // Floating Round to Integer Toward Zero
-  FRIP = 456 << 1,     // Floating Round to Integer Plus
-  FRIM = 488 << 1,     // Floating Round to Integer Minus
-  MFFS = 583 << 1,     // move from FPSCR x-form
-  MTFSF = 711 << 1,    // move to FPSCR fields XFL-form
-  FCTID = 814 << 1,    // Floating convert to integer doubleword
-  FCTIDZ = 815 << 1,   // ^^^ with round toward zero
-  FCFID = 846 << 1,    // Floating convert from integer doubleword
-  FCTIDU = 942 << 1,   // Floating convert to integer doubleword unsigned
-  FCTIDUZ = 943 << 1,  // ^^^ with round toward zero
-  FCFIDU = 974 << 1    // Floating convert from integer doubleword unsigned
-};
+#define PPC_VC_OPCODE_LIST(V)                                                  \
+  /* Vector Compare Bounds Single-Precision */                                 \
+  V(vcmpbfp, VCMPBFP, 0x100003C6)                                              \
+  /* Vector Compare Equal To Single-Precision */                               \
+  V(vcmpeqfp, VCMPEQFP, 0x100000C6)                                            \
+  /* Vector Compare Equal To Unsigned Byte */                                  \
+  V(vcmpequb, VCMPEQUB, 0x10000006)                                            \
+  /* Vector Compare Equal To Unsigned Doubleword */                            \
+  V(vcmpequd, VCMPEQUD, 0x100000C7)                                            \
+  /* Vector Compare Equal To Unsigned Halfword */                              \
+  V(vcmpequh, VCMPEQUH, 0x10000046)                                            \
+  /* Vector Compare Equal To Unsigned Word */                                  \
+  V(vcmpequw, VCMPEQUW, 0x10000086)                                            \
+  /* Vector Compare Greater Than or Equal To Single-Precision */               \
+  V(vcmpgefp, VCMPGEFP, 0x100001C6)                                            \
+  /* Vector Compare Greater Than Single-Precision */                           \
+  V(vcmpgtfp, VCMPGTFP, 0x100002C6)                                            \
+  /* Vector Compare Greater Than Signed Byte */                                \
+  V(vcmpgtsb, VCMPGTSB, 0x10000306)                                            \
+  /* Vector Compare Greater Than Signed Doubleword */                          \
+  V(vcmpgtsd, VCMPGTSD, 0x100003C7)                                            \
+  /* Vector Compare Greater Than Signed Halfword */                            \
+  V(vcmpgtsh, VCMPGTSH, 0x10000346)                                            \
+  /* Vector Compare Greater Than Signed Word */                                \
+  V(vcmpgtsw, VCMPGTSW, 0x10000386)                                            \
+  /* Vector Compare Greater Than Unsigned Byte */                              \
+  V(vcmpgtub, VCMPGTUB, 0x10000206)                                            \
+  /* Vector Compare Greater Than Unsigned Doubleword */                        \
+  V(vcmpgtud, VCMPGTUD, 0x100002C7)                                            \
+  /* Vector Compare Greater Than Unsigned Halfword */                          \
+  V(vcmpgtuh, VCMPGTUH, 0x10000246)                                            \
+  /* Vector Compare Greater Than Unsigned Word */                              \
+  V(vcmpgtuw, VCMPGTUW, 0x10000286)
 
-enum OpcodeExt5 {
-  // Bits 4-2
-  RLDICL = 0 << 1,  // Rotate Left Double Word Immediate then Clear Left
-  RLDICR = 2 << 1,  // Rotate Left Double Word Immediate then Clear Right
-  RLDIC = 4 << 1,   // Rotate Left Double Word Immediate then Clear
-  RLDIMI = 6 << 1,  // Rotate Left Double Word Immediate then Mask Insert
-  // Bits 4-1
-  RLDCL = 8 << 1,  // Rotate Left Double Word then Clear Left
-  RLDCR = 9 << 1   // Rotate Left Double Word then Clear Right
+#define PPC_X_OPCODE_LIST(V)                                                   \
+  /* Bit Permute Doubleword */                                                 \
+  V(bpermd, BPERMD, 0x7C0001F8)                                                \
+  /* Count Leading Zeros Doubleword */                                         \
+  V(cntlzd, CNTLZDX, 0x7C000074)                                               \
+  /* Extend Sign Word */                                                       \
+  V(extsw, EXTSW, 0x7C0007B4)                                                  \
+  /* Load Doubleword And Reserve Indexed */                                    \
+  V(ldarx, LDARX, 0x7C0000A8)                                                  \
+  /* Load Doubleword Byte-Reverse Indexed */                                   \
+  V(ldbrx, LDBRX, 0x7C000428)                                                  \
+  /* Load Doubleword with Update Indexed */                                    \
+  V(ldux, LDUX, 0x7C00006A)                                                    \
+  /* Load Doubleword Indexed */                                                \
+  V(ldx, LDX, 0x7C00002A)                                                      \
+  /* Load Word Algebraic with Update Indexed */                                \
+  V(lwaux, LWAUX, 0x7C0002EA)                                                  \
+  /* Load Word Algebraic Indexed */                                            \
+  V(lwax, LWAX, 0x7C0002AA)                                                    \
+  /* Modulo Signed Dword */                                                    \
+  V(modsd, MODSD, 0x7C000612)                                                  \
+  /*  Modulo Unsigned Dword */                                                 \
+  V(modud, MODUD, 0x7C000212)                                                  \
+  /* Population Count Doubleword */                                            \
+  V(popcntd, POPCNTD, 0x7C0003F4)                                              \
+  /* Parity Doubleword */                                                      \
+  V(prtyd, PRTYD, 0x7C000174)                                                  \
+  /* Shift Left Doubleword */                                                  \
+  V(sld, SLDX, 0x7C000036)                                                     \
+  /* Shift Right Algebraic Doubleword */                                       \
+  V(srad, SRAD, 0x7C000634)                                                    \
+  /* Shift Right Doubleword */                                                 \
+  V(srd, SRDX, 0x7C000436)                                                     \
+  /* Store Doubleword Byte-Reverse Indexed */                                  \
+  V(stdbrx, STDBRX, 0x7C000528)                                                \
+  /* Store Doubleword Conditional Indexed & record CR0 */                      \
+  V(stdcx, STDCX, 0x7C0001AD)                                                  \
+  /* Store Doubleword with Update Indexed */                                   \
+  V(stdux, STDUX, 0x7C00016A)                                                  \
+  /* Store Doubleword Indexed */                                               \
+  V(stdx, STDX, 0x7C00012A)                                                    \
+  /* Trap Doubleword */                                                        \
+  V(td, TD, 0x7C000088)                                                        \
+  /* AND */                                                                    \
+  V(andx, ANDX, 0x7C000038)                                                    \
+  /* AND with Complement */                                                    \
+  V(andc, ANDCX, 0x7C000078)                                                   \
+  /* Branch Conditional to Branch Target Address Register */                   \
+  V(bctar, BCTAR, 0x4C000460)                                                  \
+  /* Compare */                                                                \
+  V(cmp, CMP, 0x7C000000)                                                      \
+  /* Compare Byte */                                                           \
+  V(cmpb, CMPB, 0x7C0003F8)                                                    \
+  /* Compare Logical */                                                        \
+  V(cmpl, CMPL, 0x7C000040)                                                    \
+  /* Count Leading Zeros Word */                                               \
+  V(cntlzw, CNTLZWX, 0x7C000034)                                               \
+  /* Data Cache Block Flush */                                                 \
+  V(dcbf, DCBF, 0x7C0000AC)                                                    \
+  /* Data Cache Block Store */                                                 \
+  V(dcbst, DCBST, 0x7C00006C)                                                  \
+  /* Data Cache Block Touch */                                                 \
+  V(dcbt, DCBT, 0x7C00022C)                                                    \
+  /* Data Cache Block Touch for Store */                                       \
+  V(dcbtst, DCBTST, 0x7C0001EC)                                                \
+  /* Data Cache Block Zero */                                                  \
+  V(dcbz, DCBZ, 0x7C0007EC)                                                    \
+  /* Equivalent */                                                             \
+  V(eqv, EQV, 0x7C000238)                                                      \
+  /* Extend Sign Byte */                                                       \
+  V(extsb, EXTSB, 0x7C000774)                                                  \
+  /* Extend Sign Halfword */                                                   \
+  V(extsh, EXTSH, 0x7C000734)                                                  \
+  /* Instruction Cache Block Invalidate */                                     \
+  V(icbi, ICBI, 0x7C0007AC)                                                    \
+  /* Load Byte And Reserve Indexed */                                          \
+  V(lbarx, LBARX, 0x7C000068)                                                  \
+  /* Load Byte and Zero with Update Indexed */                                 \
+  V(lbzux, LBZUX, 0x7C0000EE)                                                  \
+  /* Load Byte and Zero Indexed */                                             \
+  V(lbzx, LBZX, 0x7C0000AE)                                                    \
+  /* Load Halfword And Reserve Indexed Xform */                                \
+  V(lharx, LHARX, 0x7C0000E8)                                                  \
+  /* Load Halfword Algebraic with Update Indexed */                            \
+  V(lhaux, LHAUX, 0x7C0002EE)                                                  \
+  /* Load Halfword Algebraic Indexed */                                        \
+  V(lhax, LHAX, 0x7C0002AE)                                                    \
+  /* Load Halfword Byte-Reverse Indexed */                                     \
+  V(lhbrx, LHBRX, 0x7C00062C)                                                  \
+  /* Load Halfword and Zero with Update Indexed */                             \
+  V(lhzux, LHZUX, 0x7C00026E)                                                  \
+  /* Load Halfword and Zero Indexed */                                         \
+  V(lhzx, LHZX, 0x7C00022E)                                                    \
+  /* Load Word and Reserve Indexed */                                          \
+  V(lwarx, LWARX, 0x7C000028)                                                  \
+  /* Load Word Byte-Reverse Indexed */                                         \
+  V(lwbrx, LWBRX, 0x7C00042C)                                                  \
+  /* Load Word and Zero with Update Indexed */                                 \
+  V(lwzux, LWZUX, 0x7C00006E)                                                  \
+  /* Load Word and Zero Indexed */                                             \
+  V(lwzx, LWZX, 0x7C00002E)                                                    \
+  /* Modulo Signed Word */                                                     \
+  V(mods, MODSW, 0x7C000616)                                                   \
+  /* Modulo Unsigned Word */                                                   \
+  V(moduw, MODUW, 0x7C000216)                                                  \
+  /* NAND */                                                                   \
+  V(nand, NAND, 0x7C0003B8)                                                    \
+  /* NOR */                                                                    \
+  V(nor, NORX, 0x7C0000F8)                                                     \
+  /* OR */                                                                     \
+  V(orx, ORX, 0x7C000378)                                                      \
+  /* OR with Complement */                                                     \
+  V(orc, ORC, 0x7C000338)                                                      \
+  /* Population Count Byte-wise */                                             \
+  V(popcntb, POPCNTB, 0x7C0000F4)                                              \
+  /* Population Count Words */                                                 \
+  V(popcntw, POPCNTW, 0x7C0002F4)                                              \
+  /* Parity Word */                                                            \
+  V(prtyw, PRTYW, 0x7C000134)                                                  \
+  /* Shift Left Word */                                                        \
+  V(slw, SLWX, 0x7C000030)                                                     \
+  /* Shift Right Algebraic Word */                                             \
+  V(sraw, SRAW, 0x7C000630)                                                    \
+  /* Shift Right Algebraic Word Immediate */                                   \
+  V(srawi, SRAWIX, 0x7C000670)                                                 \
+  /* Shift Right Word */                                                       \
+  V(srw, SRWX, 0x7C000430)                                                     \
+  /* Store Byte Conditional Indexed */                                         \
+  V(stbcx, STBCX, 0x7C00056D)                                                  \
+  /* Store Byte with Update Indexed */                                         \
+  V(stbux, STBUX, 0x7C0001EE)                                                  \
+  /* Store Byte Indexed */                                                     \
+  V(stbx, STBX, 0x7C0001AE)                                                    \
+  /* Store Halfword Byte-Reverse Indexed */                                    \
+  V(sthbrx, STHBRX, 0x7C00072C)                                                \
+  /* Store Halfword Conditional Indexed Xform */                               \
+  V(sthcx, STHCX, 0x7C0005AD)                                                  \
+  /* Store Halfword with Update Indexed */                                     \
+  V(sthux, STHUX, 0x7C00036E)                                                  \
+  /* Store Halfword Indexed */                                                 \
+  V(sthx, STHX, 0x7C00032E)                                                    \
+  /* Store Word Byte-Reverse Indexed */                                        \
+  V(stwbrx, STWBRX, 0x7C00052C)                                                \
+  /* Store Word Conditional Indexed & record CR0 */                            \
+  V(stwcx, STWCX, 0x7C00012D)                                                  \
+  /* Store Word with Update Indexed */                                         \
+  V(stwux, STWUX, 0x7C00016E)                                                  \
+  /* Store Word Indexed */                                                     \
+  V(stwx, STWX, 0x7C00012E)                                                    \
+  /* Synchronize */                                                            \
+  V(sync, SYNC, 0x7C0004AC)                                                    \
+  /* Trap Word */                                                              \
+  V(tw, TW, 0x7C000008)                                                        \
+  /* ExecuExecuted No Operation */                                             \
+  V(xnop, XNOP, 0x68000000)                                                    \
+  /* XOR */                                                                    \
+  V(xorx, XORX, 0x7C000278)                                                    \
+  /* Convert Binary Coded Decimal To Declets */                                \
+  V(cbcdtd, CBCDTD, 0x7C000274)                                                \
+  /* Convert Declets To Binary Coded Decimal */                                \
+  V(cdtbcd, CDTBCD, 0x7C000234)                                                \
+  /* Decimal Floating Add */                                                   \
+  V(dadd, DADD, 0xEC000004)                                                    \
+  /* Decimal Floating Add Quad */                                              \
+  V(daddq, DADDQ, 0xFC000004)                                                  \
+  /* Decimal Floating Convert From Fixed */                                    \
+  V(dcffix, DCFFIX, 0xEC000644)                                                \
+  /* Decimal Floating Convert From Fixed Quad */                               \
+  V(dcffixq, DCFFIXQ, 0xFC000644)                                              \
+  /* Decimal Floating Compare Ordered */                                       \
+  V(dcmpo, DCMPO, 0xEC000104)                                                  \
+  /* Decimal Floating Compare Ordered Quad */                                  \
+  V(dcmpoq, DCMPOQ, 0xFC000104)                                                \
+  /* Decimal Floating Compare Unordered */                                     \
+  V(dcmpu, DCMPU, 0xEC000504)                                                  \
+  /* Decimal Floating Compare Unordered Quad */                                \
+  V(dcmpuq, DCMPUQ, 0xFC000504)                                                \
+  /* Decimal Floating Convert To DFP Long */                                   \
+  V(dctdp, DCTDP, 0xEC000204)                                                  \
+  /* Decimal Floating Convert To Fixed */                                      \
+  V(dctfix, DCTFIX, 0xEC000244)                                                \
+  /* Decimal Floating Convert To Fixed Quad */                                 \
+  V(dctfixq, DCTFIXQ, 0xFC000244)                                              \
+  /* Decimal Floating Convert To DFP Extended */                               \
+  V(dctqpq, DCTQPQ, 0xFC000204)                                                \
+  /* Decimal Floating Decode DPD To BCD */                                     \
+  V(ddedpd, DDEDPD, 0xEC000284)                                                \
+  /* Decimal Floating Decode DPD To BCD Quad */                                \
+  V(ddedpdq, DDEDPDQ, 0xFC000284)                                              \
+  /* Decimal Floating Divide */                                                \
+  V(ddiv, DDIV, 0xEC000444)                                                    \
+  /* Decimal Floating Divide Quad */                                           \
+  V(ddivq, DDIVQ, 0xFC000444)                                                  \
+  /* Decimal Floating Encode BCD To DPD */                                     \
+  V(denbcd, DENBCD, 0xEC000684)                                                \
+  /* Decimal Floating Encode BCD To DPD Quad */                                \
+  V(denbcdq, DENBCDQ, 0xFC000684)                                              \
+  /* Decimal Floating Insert Exponent */                                       \
+  V(diex, DIEX, 0xEC0006C4)                                                    \
+  /* Decimal Floating Insert Exponent Quad */                                  \
+  V(diexq, DIEXQ, 0xFC0006C4)                                                  \
+  /* Decimal Floating Multiply */                                              \
+  V(dmul, DMUL, 0xEC000044)                                                    \
+  /* Decimal Floating Multiply Quad */                                         \
+  V(dmulq, DMULQ, 0xFC000044)                                                  \
+  /* Decimal Floating Round To DFP Long */                                     \
+  V(drdpq, DRDPQ, 0xFC000604)                                                  \
+  /* Decimal Floating Round To DFP Short */                                    \
+  V(drsp, DRSP, 0xEC000604)                                                    \
+  /* Decimal Floating Subtract */                                              \
+  V(dsub, DSUB, 0xEC000404)                                                    \
+  /* Decimal Floating Subtract Quad */                                         \
+  V(dsubq, DSUBQ, 0xFC000404)                                                  \
+  /* Decimal Floating Test Exponent */                                         \
+  V(dtstex, DTSTEX, 0xEC000144)                                                \
+  /* Decimal Floating Test Exponent Quad */                                    \
+  V(dtstexq, DTSTEXQ, 0xFC000144)                                              \
+  /* Decimal Floating Test Significance */                                     \
+  V(dtstsf, DTSTSF, 0xEC000544)                                                \
+  /* Decimal Floating Test Significance Quad */                                \
+  V(dtstsfq, DTSTSFQ, 0xFC000544)                                              \
+  /* Decimal Floating Extract Exponent */                                      \
+  V(dxex, DXEX, 0xEC0002C4)                                                    \
+  /* Decimal Floating Extract Exponent Quad */                                 \
+  V(dxexq, DXEXQ, 0xFC0002C4)                                                  \
+  /* Decorated Storage Notify */                                               \
+  V(dsn, DSN, 0x7C0003C6)                                                      \
+  /* Load Byte with Decoration Indexed */                                      \
+  V(lbdx, LBDX, 0x7C000406)                                                    \
+  /* Load Doubleword with Decoration Indexed */                                \
+  V(lddx, LDDX, 0x7C0004C6)                                                    \
+  /* Load Floating Doubleword with Decoration Indexed */                       \
+  V(lfddx, LFDDX, 0x7C000646)                                                  \
+  /* Load Halfword with Decoration Indexed */                                  \
+  V(lhdx, LHDX, 0x7C000446)                                                    \
+  /* Load Word with Decoration Indexed */                                      \
+  V(lwdx, LWDX, 0x7C000486)                                                    \
+  /* Store Byte with Decoration Indexed */                                     \
+  V(stbdx, STBDX, 0x7C000506)                                                  \
+  /* Store Doubleword with Decoration Indexed */                               \
+  V(stddx, STDDX, 0x7C0005C6)                                                  \
+  /* Store Floating Doubleword with Decoration Indexed */                      \
+  V(stfddx, STFDDX, 0x7C000746)                                                \
+  /* Store Halfword with Decoration Indexed */                                 \
+  V(sthdx, STHDX, 0x7C000546)                                                  \
+  /* Store Word with Decoration Indexed */                                     \
+  V(stwdx, STWDX, 0x7C000586)                                                  \
+  /* Data Cache Block Allocate */                                              \
+  V(dcba, DCBA, 0x7C0005EC)                                                    \
+  /* Data Cache Block Invalidate */                                            \
+  V(dcbi, DCBI, 0x7C0003AC)                                                    \
+  /* Instruction Cache Block Touch */                                          \
+  V(icbt, ICBT, 0x7C00002C)                                                    \
+  /* Memory Barrier */                                                         \
+  V(mbar, MBAR, 0x7C0006AC)                                                    \
+  /* Move to Condition Register from XER */                                    \
+  V(mcrxr, MCRXR, 0x7C000400)                                                  \
+  /* TLB Invalidate Local Indexed */                                           \
+  V(tlbilx, TLBILX, 0x7C000024)                                                \
+  /* TLB Invalidate Virtual Address Indexed */                                 \
+  V(tlbivax, TLBIVAX, 0x7C000624)                                              \
+  /* TLB Read Entry */                                                         \
+  V(tlbre, TLBRE, 0x7C000764)                                                  \
+  /* TLB Search Indexed */                                                     \
+  V(tlbsx, TLBSX, 0x7C000724)                                                  \
+  /* TLB Write Entry */                                                        \
+  V(tlbwe, TLBWE, 0x7C0007A4)                                                  \
+  /* Write External Enable */                                                  \
+  V(wrtee, WRTEE, 0x7C000106)                                                  \
+  /* Write External Enable Immediate */                                        \
+  V(wrteei, WRTEEI, 0x7C000146)                                                \
+  /* Data Cache Read */                                                        \
+  V(dcread, DCREAD, 0x7C00028C)                                                \
+  /* Instruction Cache Read */                                                 \
+  V(icread, ICREAD, 0x7C0007CC)                                                \
+  /* Data Cache Invalidate */                                                  \
+  V(dci, DCI, 0x7C00038C)                                                      \
+  /* Instruction Cache Invalidate */                                           \
+  V(ici, ICI, 0x7C00078C)                                                      \
+  /* Move From Device Control Register User Mode Indexed */                    \
+  V(mfdcrux, MFDCRUX, 0x7C000246)                                              \
+  /* Move From Device Control Register Indexed */                              \
+  V(mfdcrx, MFDCRX, 0x7C000206)                                                \
+  /* Move To Device Control Register User Mode Indexed */                      \
+  V(mtdcrux, MTDCRUX, 0x7C000346)                                              \
+  /* Move To Device Control Register Indexed */                                \
+  V(mtdcrx, MTDCRX, 0x7C000306)                                                \
+  /* Return From Debug Interrupt */                                            \
+  V(rfdi, RFDI, 0x4C00004E)                                                    \
+  /* Data Cache Block Flush by External PID */                                 \
+  V(dcbfep, DCBFEP, 0x7C0000FE)                                                \
+  /* Data Cache Block Store by External PID */                                 \
+  V(dcbstep, DCBSTEP, 0x7C00007E)                                              \
+  /* Data Cache Block Touch by External PID */                                 \
+  V(dcbtep, DCBTEP, 0x7C00027E)                                                \
+  /* Data Cache Block Touch for Store by External PID */                       \
+  V(dcbtstep, DCBTSTEP, 0x7C0001FE)                                            \
+  /* Data Cache Block Zero by External PID */                                  \
+  V(dcbzep, DCBZEP, 0x7C0007FE)                                                \
+  /* Instruction Cache Block Invalidate by External PID */                     \
+  V(icbiep, ICBIEP, 0x7C0007BE)                                                \
+  /* Load Byte and Zero by External PID Indexed */                             \
+  V(lbepx, LBEPX, 0x7C0000BE)                                                  \
+  /* Load Floating-Point Double by External PID Indexed */                     \
+  V(lfdepx, LFDEPX, 0x7C0004BE)                                                \
+  /* Load Halfword and Zero by External PID Indexed */                         \
+  V(lhepx, LHEPX, 0x7C00023E)                                                  \
+  /* Load Vector by External PID Indexed */                                    \
+  V(lvepx, LVEPX, 0x7C00024E)                                                  \
+  /* Load Vector by External PID Indexed Last */                               \
+  V(lvepxl, LVEPXL, 0x7C00020E)                                                \
+  /* Load Word and Zero by External PID Indexed */                             \
+  V(lwepx, LWEPX, 0x7C00003E)                                                  \
+  /* Store Byte by External PID Indexed */                                     \
+  V(stbepx, STBEPX, 0x7C0001BE)                                                \
+  /* Store Floating-Point Double by External PID Indexed */                    \
+  V(stfdepx, STFDEPX, 0x7C0005BE)                                              \
+  /* Store Halfword by External PID Indexed */                                 \
+  V(sthepx, STHEPX, 0x7C00033E)                                                \
+  /* Store Vector by External PID Indexed */                                   \
+  V(stvepx, STVEPX, 0x7C00064E)                                                \
+  /* Store Vector by External PID Indexed Last */                              \
+  V(stvepxl, STVEPXL, 0x7C00060E)                                              \
+  /* Store Word by External PID Indexed */                                     \
+  V(stwepx, STWEPX, 0x7C00013E)                                                \
+  /* Load Doubleword by External PID Indexed */                                \
+  V(ldepx, LDEPX, 0x7C00003A)                                                  \
+  /* Store Doubleword by External PID Indexed */                               \
+  V(stdepx, STDEPX, 0x7C00013A)                                                \
+  /* TLB Search and Reserve Indexed */                                         \
+  V(tlbsrx, TLBSRX, 0x7C0006A5)                                                \
+  /* External Control In Word Indexed */                                       \
+  V(eciwx, ECIWX, 0x7C00026C)                                                  \
+  /* External Control Out Word Indexed */                                      \
+  V(ecowx, ECOWX, 0x7C00036C)                                                  \
+  /* Data Cache Block Lock Clear */                                            \
+  V(dcblc, DCBLC, 0x7C00030C)                                                  \
+  /* Data Cache Block Lock Query */                                            \
+  V(dcblq, DCBLQ, 0x7C00034D)                                                  \
+  /* Data Cache Block Touch and Lock Set */                                    \
+  V(dcbtls, DCBTLS, 0x7C00014C)                                                \
+  /* Data Cache Block Touch for Store and Lock Set */                          \
+  V(dcbtstls, DCBTSTLS, 0x7C00010C)                                            \
+  /* Instruction Cache Block Lock Clear */                                     \
+  V(icblc, ICBLC, 0x7C0001CC)                                                  \
+  /* Instruction Cache Block Lock Query */                                     \
+  V(icblq, ICBLQ, 0x7C00018D)                                                  \
+  /* Instruction Cache Block Touch and Lock Set */                             \
+  V(icbtls, ICBTLS, 0x7C0003CC)                                                \
+  /* Floating Compare Ordered */                                               \
+  V(fcmpo, FCMPO, 0xFC000040)                                                  \
+  /* Floating Compare Unordered */                                             \
+  V(fcmpu, FCMPU, 0xFC000000)                                                  \
+  /* Floating Test for software Divide */                                      \
+  V(ftdiv, FTDIV, 0xFC000100)                                                  \
+  /* Floating Test for software Square Root */                                 \
+  V(ftsqrt, FTSQRT, 0xFC000140)                                                \
+  /* Load Floating-Point Double with Update Indexed */                         \
+  V(lfdux, LFDUX, 0x7C0004EE)                                                  \
+  /* Load Floating-Point Double Indexed */                                     \
+  V(lfdx, LFDX, 0x7C0004AE)                                                    \
+  /* Load Floating-Point as Integer Word Algebraic Indexed */                  \
+  V(lfiwax, LFIWAX, 0x7C0006AE)                                                \
+  /* Load Floating-Point as Integer Word and Zero Indexed */                   \
+  V(lfiwzx, LFIWZX, 0x7C0006EE)                                                \
+  /* Load Floating-Point Single with Update Indexed */                         \
+  V(lfsux, LFSUX, 0x7C00046E)                                                  \
+  /* Load Floating-Point Single Indexed */                                     \
+  V(lfsx, LFSX, 0x7C00042E)                                                    \
+  /* Move To Condition Register from FPSCR */                                  \
+  V(mcrfs, MCRFS, 0xFC000080)                                                  \
+  /* Store Floating-Point Double with Update Indexed */                        \
+  V(stfdux, STFDUX, 0x7C0005EE)                                                \
+  /* Store Floating-Point Double Indexed */                                    \
+  V(stfdx, STFDX, 0x7C0005AE)                                                  \
+  /* Store Floating-Point as Integer Word Indexed */                           \
+  V(stfiwx, STFIWX, 0x7C0007AE)                                                \
+  /* Store Floating-Point Single with Update Indexed */                        \
+  V(stfsux, STFSUX, 0x7C00056E)                                                \
+  /* Store Floating-Point Single Indexed */                                    \
+  V(stfsx, STFSX, 0x7C00052E)                                                  \
+  /* Load Floating-Point Double Pair Indexed */                                \
+  V(lfdpx, LFDPX, 0x7C00062E)                                                  \
+  /* Store Floating-Point Double Pair Indexed */                               \
+  V(stfdpx, STFDPX, 0x7C00072E)                                                \
+  /* Floating Absolute Value */                                                \
+  V(fabs, FABS, 0xFC000210)                                                    \
+  /* Floating Convert From Integer Doubleword */                               \
+  V(fcfid, FCFID, 0xFC00069C)                                                  \
+  /* Floating Convert From Integer Doubleword Single */                        \
+  V(fcfids, FCFIDS, 0xEC00069C)                                                \
+  /* Floating Convert From Integer Doubleword Unsigned */                      \
+  V(fcfidu, FCFIDU, 0xFC00079C)                                                \
+  /* Floating Convert From Integer Doubleword Unsigned Single */               \
+  V(fcfidus, FCFIDUS, 0xEC00079C)                                              \
+  /* Floating Copy Sign */                                                     \
+  V(fcpsgn, FCPSGN, 0xFC000010)                                                \
+  /* Floating Convert To Integer Doubleword */                                 \
+  V(fctid, FCTID, 0xFC00065C)                                                  \
+  /* Floating Convert To Integer Doubleword Unsigned */                        \
+  V(fctidu, FCTIDU, 0xFC00075C)                                                \
+  /* Floating Convert To Integer Doubleword Unsigned with round toward */      \
+  /* Zero */                                                                   \
+  V(fctiduz, FCTIDUZ, 0xFC00075E)                                              \
+  /* Floating Convert To Integer Doubleword with round toward Zero */          \
+  V(fctidz, FCTIDZ, 0xFC00065E)                                                \
+  /* Floating Convert To Integer Word */                                       \
+  V(fctiw, FCTIW, 0xFC00001C)                                                  \
+  /* Floating Convert To Integer Word Unsigned */                              \
+  V(fctiwu, FCTIWU, 0xFC00011C)                                                \
+  /* Floating Convert To Integer Word Unsigned with round toward Zero */       \
+  V(fctiwuz, FCTIWUZ, 0xFC00011E)                                              \
+  /* Floating Convert To Integer Word with round to Zero */                    \
+  V(fctiwz, FCTIWZ, 0xFC00001E)                                                \
+  /* Floating Move Register */                                                 \
+  V(fmr, FMR, 0xFC000090)                                                      \
+  /* Floating Negative Absolute Value */                                       \
+  V(fnabs, FNABS, 0xFC000110)                                                  \
+  /* Floating Negate */                                                        \
+  V(fneg, FNEG, 0xFC000050)                                                    \
+  /* Floating Round to Single-Precision */                                     \
+  V(frsp, FRSP, 0xFC000018)                                                    \
+  /* Move From FPSCR */                                                        \
+  V(mffs, MFFS, 0xFC00048E)                                                    \
+  /* Move To FPSCR Bit 0 */                                                    \
+  V(mtfsb0, MTFSB0, 0xFC00008C)                                                \
+  /* Move To FPSCR Bit 1 */                                                    \
+  V(mtfsb1, MTFSB1, 0xFC00004C)                                                \
+  /* Move To FPSCR Field Immediate */                                          \
+  V(mtfsfi, MTFSFI, 0xFC00010C)                                                \
+  /* Floating Round To Integer Minus */                                        \
+  V(frim, FRIM, 0xFC0003D0)                                                    \
+  /* Floating Round To Integer Nearest */                                      \
+  V(frin, FRIN, 0xFC000310)                                                    \
+  /* Floating Round To Integer Plus */                                         \
+  V(frip, FRIP, 0xFC000390)                                                    \
+  /* Floating Round To Integer toward Zero */                                  \
+  V(friz, FRIZ, 0xFC000350)                                                    \
+  /* Multiply Cross Halfword to Word Signed */                                 \
+  V(mulchw, MULCHW, 0x10000150)                                                \
+  /* Multiply Cross Halfword to Word Unsigned */                               \
+  V(mulchwu, MULCHWU, 0x10000110)                                              \
+  /* Multiply High Halfword to Word Signed */                                  \
+  V(mulhhw, MULHHW, 0x10000050)                                                \
+  /* Multiply High Halfword to Word Unsigned */                                \
+  V(mulhhwu, MULHHWU, 0x10000010)                                              \
+  /* Multiply Low Halfword to Word Signed */                                   \
+  V(mullhw, MULLHW, 0x10000350)                                                \
+  /* Multiply Low Halfword to Word Unsigned */                                 \
+  V(mullhwu, MULLHWU, 0x10000310)                                              \
+  /* Determine Leftmost Zero Byte DQ 56 E0000000 P 58 LSQ lq Load Quadword */  \
+  V(dlmzb, DLMZB, 0x7C00009C)                                                  \
+  /* Load Quadword And Reserve Indexed */                                      \
+  V(lqarx, LQARX, 0x7C000228)                                                  \
+  /* Store Quadword Conditional Indexed and record CR0 */                      \
+  V(stqcx, STQCX, 0x7C00016D)                                                  \
+  /* Load String Word Immediate */                                             \
+  V(lswi, LSWI, 0x7C0004AA)                                                    \
+  /* Load String Word Indexed */                                               \
+  V(lswx, LSWX, 0x7C00042A)                                                    \
+  /* Store String Word Immediate */                                            \
+  V(stswi, STSWI, 0x7C0005AA)                                                  \
+  /* Store String Word Indexed */                                              \
+  V(stswx, STSWX, 0x7C00052A)                                                  \
+  /* Clear BHRB */                                                             \
+  V(clrbhrb, CLRBHRB, 0x7C00035C)                                              \
+  /* Enforce In-order Execution of I/O */                                      \
+  V(eieio, EIEIO, 0x7C0006AC)                                                  \
+  /* Load Byte and Zero Caching Inhibited Indexed */                           \
+  V(lbzcix, LBZCIX, 0x7C0006AA)                                                \
+  /* Load Doubleword Caching Inhibited Indexed */                              \
+  V(ldcix, LDCIX, 0x7C0006EA)                                                  \
+  /* Load Halfword and Zero Caching Inhibited Indexed */                       \
+  V(lhzcix, LHZCIX, 0x7C00066A)                                                \
+  /* Load Word and Zero Caching Inhibited Indexed */                           \
+  V(lwzcix, LWZCIX, 0x7C00062A)                                                \
+  /* Move From Segment Register */                                             \
+  V(mfsr, MFSR, 0x7C0004A6)                                                    \
+  /* Move From Segment Register Indirect */                                    \
+  V(mfsrin, MFSRIN, 0x7C000526)                                                \
+  /* Move To Machine State Register Doubleword */                              \
+  V(mtmsrd, MTMSRD, 0x7C000164)                                                \
+  /* Move To Split Little Endian */                                            \
+  V(mtsle, MTSLE, 0x7C000126)                                                  \
+  /* Move To Segment Register */                                               \
+  V(mtsr, MTSR, 0x7C0001A4)                                                    \
+  /* Move To Segment Register Indirect */                                      \
+  V(mtsrin, MTSRIN, 0x7C0001E4)                                                \
+  /* SLB Find Entry ESID */                                                    \
+  V(slbfee, SLBFEE, 0x7C0007A7)                                                \
+  /* SLB Invalidate All */                                                     \
+  V(slbia, SLBIA, 0x7C0003E4)                                                  \
+  /* SLB Invalidate Entry */                                                   \
+  V(slbie, SLBIE, 0x7C000364)                                                  \
+  /* SLB Move From Entry ESID */                                               \
+  V(slbmfee, SLBMFEE, 0x7C000726)                                              \
+  /* SLB Move From Entry VSID */                                               \
+  V(slbmfev, SLBMFEV, 0x7C0006A6)                                              \
+  /* SLB Move To Entry */                                                      \
+  V(slbmte, SLBMTE, 0x7C000324)                                                \
+  /* Store Byte Caching Inhibited Indexed */                                   \
+  V(stbcix, STBCIX, 0x7C0007AA)                                                \
+  /* Store Doubleword Caching Inhibited Indexed */                             \
+  V(stdcix, STDCIX, 0x7C0007EA)                                                \
+  /* Store Halfword and Zero Caching Inhibited Indexed */                      \
+  V(sthcix, STHCIX, 0x7C00076A)                                                \
+  /* Store Word and Zero Caching Inhibited Indexed */                          \
+  V(stwcix, STWCIX, 0x7C00072A)                                                \
+  /* TLB Invalidate All */                                                     \
+  V(tlbia, TLBIA, 0x7C0002E4)                                                  \
+  /* TLB Invalidate Entry */                                                   \
+  V(tlbie, TLBIE, 0x7C000264)                                                  \
+  /* TLB Invalidate Entry Local */                                             \
+  V(tlbiel, TLBIEL, 0x7C000224)                                                \
+  /* Message Clear Privileged */                                               \
+  V(msgclrp, MSGCLRP, 0x7C00015C)                                              \
+  /* Message Send Privileged */                                                \
+  V(msgsndp, MSGSNDP, 0x7C00011C)                                              \
+  /* Message Clear */                                                          \
+  V(msgclr, MSGCLR, 0x7C0001DC)                                                \
+  /* Message Send */                                                           \
+  V(msgsnd, MSGSND, 0x7C00019C)                                                \
+  /* Move From Machine State Register */                                       \
+  V(mfmsr, MFMSR, 0x7C0000A6)                                                  \
+  /* Move To Machine State Register */                                         \
+  V(mtmsr, MTMSR, 0x7C000124)                                                  \
+  /* TLB Synchronize */                                                        \
+  V(tlbsync, TLBSYNC, 0x7C00046C)                                              \
+  /* Transaction Abort */                                                      \
+  V(tabort, TABORT, 0x7C00071D)                                                \
+  /* Transaction Abort Doubleword Conditional */                               \
+  V(tabortdc, TABORTDC, 0x7C00065D)                                            \
+  /* Transaction Abort Doubleword Conditional Immediate */                     \
+  V(tabortdci, TABORTDCI, 0x7C0006DD)                                          \
+  /* Transaction Abort Word Conditional */                                     \
+  V(tabortwc, TABORTWC, 0x7C00061D)                                            \
+  /* Transaction Abort Word Conditional Immediate */                           \
+  V(tabortwci, TABORTWCI, 0x7C00069D)                                          \
+  /* Transaction Begin */                                                      \
+  V(tbegin, TBEGIN, 0x7C00051D)                                                \
+  /* Transaction Check */                                                      \
+  V(tcheck, TCHECK, 0x7C00059C)                                                \
+  /* Transaction End */                                                        \
+  V(tend, TEND, 0x7C00055C)                                                    \
+  /* Transaction Recheckpoint */                                               \
+  V(trechkpt, TRECHKPT, 0x7C0007DD)                                            \
+  /* Transaction Reclaim */                                                    \
+  V(treclaim, TRECLAIM, 0x7C00075D)                                            \
+  /* Transaction Suspend or Resume */                                          \
+  V(tsr, TSR, 0x7C0005DC)                                                      \
+  /* Load Vector Element Byte Indexed */                                       \
+  V(lvebx, LVEBX, 0x7C00000E)                                                  \
+  /* Load Vector Element Halfword Indexed */                                   \
+  V(lvehx, LVEHX, 0x7C00004E)                                                  \
+  /* Load Vector Element Word Indexed */                                       \
+  V(lvewx, LVEWX, 0x7C00008E)                                                  \
+  /* Load Vector for Shift Left */                                             \
+  V(lvsl, LVSL, 0x7C00000C)                                                    \
+  /* Load Vector for Shift Right */                                            \
+  V(lvsr, LVSR, 0x7C00004C)                                                    \
+  /* Load Vector Indexed */                                                    \
+  V(lvx, LVX, 0x7C0000CE)                                                      \
+  /* Load Vector Indexed Last */                                               \
+  V(lvxl, LVXL, 0x7C0002CE)                                                    \
+  /* Store Vector Element Byte Indexed */                                      \
+  V(stvebx, STVEBX, 0x7C00010E)                                                \
+  /* Store Vector Element Halfword Indexed */                                  \
+  V(stvehx, STVEHX, 0x7C00014E)                                                \
+  /* Store Vector Element Word Indexed */                                      \
+  V(stvewx, STVEWX, 0x7C00018E)                                                \
+  /* Store Vector Indexed */                                                   \
+  V(stvx, STVX, 0x7C0001CE)                                                    \
+  /* Store Vector Indexed Last */                                              \
+  V(stvxl, STVXL, 0x7C0003CE)                                                  \
+  /* Vector Minimum Signed Doubleword */                                       \
+  V(vminsd, VMINSD, 0x100003C2)                                                \
+  /* Floating Merge Even Word */                                               \
+  V(fmrgew, FMRGEW, 0xFC00078C)                                                \
+  /* Floating Merge Odd Word */                                                \
+  V(fmrgow, FMRGOW, 0xFC00068C)                                                \
+  /* Wait for Interrupt */                                                     \
+  V(wait, WAIT, 0x7C00007C)
+
+#define PPC_EVS_OPCODE_LIST(V)                                                 \
+  /* Vector Select */                                                          \
+  V(evsel, EVSEL, 0x10000278)
+
+#define PPC_DS_OPCODE_LIST(V)                                                  \
+  /* Load Doubleword */                                                        \
+  V(ld, LD, 0xE8000000)                                                        \
+  /* Load Doubleword with Update */                                            \
+  V(ldu, LDU, 0xE8000001)                                                      \
+  /* Load Word Algebraic */                                                    \
+  V(lwa, LWA, 0xE8000002)                                                      \
+  /* Store Doubleword */                                                       \
+  V(std, STD, 0xF8000000)                                                      \
+  /* Store Doubleword with Update */                                           \
+  V(stdu, STDU, 0xF8000001)                                                    \
+  /* Load Floating-Point Double Pair */                                        \
+  V(lfdp, LFDP, 0xE4000000)                                                    \
+  /* Store Floating-Point Double Pair */                                       \
+  V(stfdp, STFDP, 0xF4000000)                                                  \
+  /* Store Quadword */                                                         \
+  V(stq, STQ, 0xF8000002)
+
+#define PPC_D_OPCODE_LIST(V)                                                   \
+  /* Trap Doubleword Immediate */                                              \
+  V(tdi, TDI, 0x08000000)                                                      \
+  /* Add Immediate */                                                          \
+  V(addi, ADDI, 0x38000000)                                                    \
+  /* Add Immediate Carrying */                                                 \
+  V(addic, ADDIC, 0x30000000)                                                  \
+  /* Add Immediate Carrying & record CR0 */                                    \
+  V(addicx, ADDICx, 0x34000000)                                                \
+  /* Add Immediate Shifted */                                                  \
+  V(addis, ADDIS, 0x3C000000)                                                  \
+  /* AND Immediate & record CR0 */                                             \
+  V(andix, ANDIx, 0x70000000)                                                  \
+  /* AND Immediate Shifted & record CR0 */                                     \
+  V(andisx, ANDISx, 0x74000000)                                                \
+  /* Compare Immediate */                                                      \
+  V(cmpi, CMPI, 0x2C000000)                                                    \
+  /* Compare Logical Immediate */                                              \
+  V(cmpli, CMPLI, 0x28000000)                                                  \
+  /* Load Byte and Zero */                                                     \
+  V(lbz, LBZ, 0x88000000)                                                      \
+  /* Load Byte and Zero with Update */                                         \
+  V(lbzu, LBZU, 0x8C000000)                                                    \
+  /* Load Halfword Algebraic */                                                \
+  V(lha, LHA, 0xA8000000)                                                      \
+  /* Load Halfword Algebraic with Update */                                    \
+  V(lhau, LHAU, 0xAC000000)                                                    \
+  /* Load Halfword and Zero */                                                 \
+  V(lhz, LHZ, 0xA0000000)                                                      \
+  /* Load Halfword and Zero with Update */                                     \
+  V(lhzu, LHZU, 0xA4000000)                                                    \
+  /* Load Multiple Word */                                                     \
+  V(lmw, LMW, 0xB8000000)                                                      \
+  /* Load Word and Zero */                                                     \
+  V(lwz, LWZ, 0x80000000)                                                      \
+  /* Load Word and Zero with Update */                                         \
+  V(lwzu, LWZU, 0x84000000)                                                    \
+  /* Multiply Low Immediate */                                                 \
+  V(mulli, MULLI, 0x1C000000)                                                  \
+  /* OR Immediate */                                                           \
+  V(ori, ORI, 0x60000000)                                                      \
+  /* OR Immediate Shifted */                                                   \
+  V(oris, ORIS, 0x64000000)                                                    \
+  /* Store Byte */                                                             \
+  V(stb, STB, 0x98000000)                                                      \
+  /* Store Byte with Update */                                                 \
+  V(stbu, STBU, 0x9C000000)                                                    \
+  /* Store Halfword */                                                         \
+  V(sth, STH, 0xB0000000)                                                      \
+  /* Store Halfword with Update */                                             \
+  V(sthu, STHU, 0xB4000000)                                                    \
+  /* Store Multiple Word */                                                    \
+  V(stmw, STMW, 0xBC000000)                                                    \
+  /* Store Word */                                                             \
+  V(stw, STW, 0x90000000)                                                      \
+  /* Store Word with Update */                                                 \
+  V(stwu, STWU, 0x94000000)                                                    \
+  /* Subtract From Immediate Carrying */                                       \
+  V(subfic, SUBFIC, 0x20000000)                                                \
+  /* Trap Word Immediate */                                                    \
+  V(twi, TWI, 0x0C000000)                                                      \
+  /* XOR Immediate */                                                          \
+  V(xori, XORI, 0x68000000)                                                    \
+  /* XOR Immediate Shifted */                                                  \
+  V(xoris, XORIS, 0x6C000000)                                                  \
+  /* Load Floating-Point Double */                                             \
+  V(lfd, LFD, 0xC8000000)                                                      \
+  /* Load Floating-Point Double with Update */                                 \
+  V(lfdu, LFDU, 0xCC000000)                                                    \
+  /* Load Floating-Point Single */                                             \
+  V(lfs, LFS, 0xC0000000)                                                      \
+  /* Load Floating-Point Single with Update */                                 \
+  V(lfsu, LFSU, 0xC4000000)                                                    \
+  /* Store Floating-Point Double */                                            \
+  V(stfd, STFD, 0xD8000000)                                                    \
+  /* Store Floating-Point Double with Update */                                \
+  V(stfdu, STFDU, 0xDC000000)                                                  \
+  /* Store Floating-Point Single */                                            \
+  V(stfs, STFS, 0xD0000000)                                                    \
+  /* Store Floating-Point Single with Update */                                \
+  V(stfsu, STFSU, 0xD4000000)
+
+#define PPC_XFL_OPCODE_LIST(V)                                                 \
+  /* Move To FPSCR Fields */                                                   \
+  V(mtfsf, MTFSF, 0xFC00058E)
+
+#define PPC_XFX_OPCODE_LIST(V)                                                 \
+  /* Move From Condition Register */                                           \
+  V(mfcr, MFCR, 0x7C000026)                                                    \
+  /* Move From One Condition Register Field */                                 \
+  V(mfocrf, MFOCRF, 0x7C100026)                                                \
+  /* Move From Special Purpose Register */                                     \
+  V(mfspr, MFSPR, 0x7C0002A6)                                                  \
+  /* Move To Condition Register Fields */                                      \
+  V(mtcrf, MTCRF, 0x7C000120)                                                  \
+  /* Move To One Condition Register Field */                                   \
+  V(mtocrf, MTOCRF, 0x7C100120)                                                \
+  /* Move To Special Purpose Register */                                       \
+  V(mtspr, MTSPR, 0x7C0003A6)                                                  \
+  /* Debugger Notify Halt */                                                   \
+  V(dnh, DNH, 0x4C00018C)                                                      \
+  /* Move From Device Control Register */                                      \
+  V(mfdcr, MFDCR, 0x7C000286)                                                  \
+  /* Move To Device Control Register */                                        \
+  V(mtdcr, MTDCR, 0x7C000386)                                                  \
+  /* Move from Performance Monitor Register */                                 \
+  V(mfpmr, MFPMR, 0x7C00029C)                                                  \
+  /* Move To Performance Monitor Register */                                   \
+  V(mtpmr, MTPMR, 0x7C00039C)                                                  \
+  /* Move From Branch History Rolling Buffer */                                \
+  V(mfbhrbe, MFBHRBE, 0x7C00025C)                                              \
+  /* Move From Time Base */                                                    \
+  V(mftb, MFTB, 0x7C0002E6)
+
+#define PPC_MDS_OPCODE_LIST(V)                                                 \
+  /* Rotate Left Doubleword then Clear Left */                                 \
+  V(rldcl, RLDCL, 0x78000010)                                                  \
+  /* Rotate Left Doubleword then Clear Right */                                \
+  V(rldcr, RLDCR, 0x78000012)
+
+#define PPC_A_OPCODE_LIST(V)                                                   \
+  /* Integer Select */                                                         \
+  V(isel, ISEL, 0x7C00001E)                                                    \
+  /* Floating Add */                                                           \
+  V(fadd, FADD, 0xFC00002A)                                                    \
+  /* Floating Add Single */                                                    \
+  V(fadds, FADDS, 0xEC00002A)                                                  \
+  /* Floating Divide */                                                        \
+  V(fdiv, FDIV, 0xFC000024)                                                    \
+  /* Floating Divide Single */                                                 \
+  V(fdivs, FDIVS, 0xEC000024)                                                  \
+  /* Floating Multiply-Add */                                                  \
+  V(fmadd, FMADD, 0xFC00003A)                                                  \
+  /* Floating Multiply-Add Single */                                           \
+  V(fmadds, FMADDS, 0xEC00003A)                                                \
+  /* Floating Multiply-Subtract */                                             \
+  V(fmsub, FMSUB, 0xFC000038)                                                  \
+  /* Floating Multiply-Subtract Single */                                      \
+  V(fmsubs, FMSUBS, 0xEC000038)                                                \
+  /* Floating Multiply */                                                      \
+  V(fmul, FMUL, 0xFC000032)                                                    \
+  /* Floating Multiply Single */                                               \
+  V(fmuls, FMULS, 0xEC000032)                                                  \
+  /* Floating Negative Multiply-Add */                                         \
+  V(fnmadd, FNMADD, 0xFC00003E)                                                \
+  /* Floating Negative Multiply-Add Single */                                  \
+  V(fnmadds, FNMADDS, 0xEC00003E)                                              \
+  /* Floating Negative Multiply-Subtract */                                    \
+  V(fnmsub, FNMSUB, 0xFC00003C)                                                \
+  /* Floating Negative Multiply-Subtract Single */                             \
+  V(fnmsubs, FNMSUBS, 0xEC00003C)                                              \
+  /* Floating Reciprocal Estimate Single */                                    \
+  V(fres, FRES, 0xEC000030)                                                    \
+  /* Floating Reciprocal Square Root Estimate */                               \
+  V(frsqrte, FRSQRTE, 0xFC000034)                                              \
+  /* Floating Select */                                                        \
+  V(fsel, FSEL, 0xFC00002E)                                                    \
+  /* Floating Square Root */                                                   \
+  V(fsqrt, FSQRT, 0xFC00002C)                                                  \
+  /* Floating Square Root Single */                                            \
+  V(fsqrts, FSQRTS, 0xEC00002C)                                                \
+  /* Floating Subtract */                                                      \
+  V(fsub, FSUB, 0xFC000028)                                                    \
+  /* Floating Subtract Single */                                               \
+  V(fsubs, FSUBS, 0xEC000028)                                                  \
+  /* Floating Reciprocal Estimate */                                           \
+  V(fre, FRE, 0xFC000030)                                                      \
+  /* Floating Reciprocal Square Root Estimate Single */                        \
+  V(frsqrtes, FRSQRTES, 0xEC000034)
+
+#define PPC_VA_OPCODE_LIST(V)                                                  \
+  /* Vector Add Extended & write Carry Unsigned Quadword */                    \
+  V(vaddecuq, VADDECUQ, 0x1000003D)                                            \
+  /* Vector Add Extended Unsigned Quadword Modulo */                           \
+  V(vaddeuqm, VADDEUQM, 0x1000003C)                                            \
+  /* Vector Multiply-Add Single-Precision */                                   \
+  V(vmaddfp, VMADDFP, 0x1000002E)                                              \
+  /* Vector Multiply-High-Add Signed Halfword Saturate */                      \
+  V(vmhaddshs, VMHADDSHS, 0x10000020)                                          \
+  /* Vector Multiply-High-Round-Add Signed Halfword Saturate */                \
+  V(vmhraddshs, VMHRADDSHS, 0x10000021)                                        \
+  /* Vector Multiply-Low-Add Unsigned Halfword Modulo */                       \
+  V(vmladduhm, VMLADDUHM, 0x10000022)                                          \
+  /* Vector Multiply-Sum Mixed Byte Modulo */                                  \
+  V(vmsummbm, VMSUMMBM, 0x10000025)                                            \
+  /* Vector Multiply-Sum Signed Halfword Modulo */                             \
+  V(vmsumshm, VMSUMSHM, 0x10000028)                                            \
+  /* Vector Multiply-Sum Signed Halfword Saturate */                           \
+  V(vmsumshs, VMSUMSHS, 0x10000029)                                            \
+  /* Vector Multiply-Sum Unsigned Byte Modulo */                               \
+  V(vmsumubm, VMSUMUBM, 0x10000024)                                            \
+  /* Vector Multiply-Sum Unsigned Halfword Modulo */                           \
+  V(vmsumuhm, VMSUMUHM, 0x10000026)                                            \
+  /* Vector Multiply-Sum Unsigned Halfword Saturate */                         \
+  V(vmsumuhs, VMSUMUHS, 0x10000027)                                            \
+  /* Vector Negative Multiply-Subtract Single-Precision */                     \
+  V(vnmsubfp, VNMSUBFP, 0x1000002F)                                            \
+  /* Vector Permute */                                                         \
+  V(vperm, VPERM, 0x1000002B)                                                  \
+  /* Vector Select */                                                          \
+  V(vsel, VSEL, 0x1000002A)                                                    \
+  /* Vector Shift Left Double by Octet Immediate */                            \
+  V(vsldoi, VSLDOI, 0x1000002C)                                                \
+  /* Vector Subtract Extended & write Carry Unsigned Quadword */               \
+  V(vsubecuq, VSUBECUQ, 0x1000003F)                                            \
+  /* Vector Subtract Extended Unsigned Quadword Modulo */                      \
+  V(vsubeuqm, VSUBEUQM, 0x1000003E)                                            \
+  /* Vector Permute and Exclusive-OR */                                        \
+  V(vpermxor, VPERMXOR, 0x1000002D)
+
+#define PPC_XX1_OPCODE_LIST(V)                                                 \
+  /* Load VSR Scalar Doubleword Indexed */                                     \
+  V(lxsdx, LXSDX, 0x7C000498)                                                  \
+  /* Load VSX Scalar as Integer Word Algebraic Indexed */                      \
+  V(lxsiwax, LXSIWAX, 0x7C000098)                                              \
+  /* Load VSX Scalar as Integer Word and Zero Indexed */                       \
+  V(lxsiwzx, LXSIWZX, 0x7C000018)                                              \
+  /* Load VSX Scalar Single-Precision Indexed */                               \
+  V(lxsspx, LXSSPX, 0x7C000418)                                                \
+  /* Load VSR Vector Doubleword*2 Indexed */                                   \
+  V(lxvd, LXVD, 0x7C000698)                                                    \
+  /* Load VSR Vector Doubleword & Splat Indexed */                             \
+  V(lxvdsx, LXVDSX, 0x7C000298)                                                \
+  /* Load VSR Vector Word*4 Indexed */                                         \
+  V(lxvw, LXVW, 0x7C000618)                                                    \
+  /* Move From VSR Doubleword */                                               \
+  V(mfvsrd, MFVSRD, 0x7C000066)                                                \
+  /* Move From VSR Word and Zero */                                            \
+  V(mfvsrwz, MFVSRWZ, 0x7C0000E6)                                              \
+  /* Store VSR Scalar Doubleword Indexed */                                    \
+  V(stxsdx, STXSDX, 0x7C000598)                                                \
+  /* Store VSX Scalar as Integer Word Indexed */                               \
+  V(stxsiwx, STXSIWX, 0x7C000118)                                              \
+  /* Store VSR Scalar Word Indexed */                                          \
+  V(stxsspx, STXSSPX, 0x7C000518)                                              \
+  /* Store VSR Vector Doubleword*2 Indexed */                                  \
+  V(stxvd, STXVD, 0x7C000798)                                                  \
+  /* Store VSR Vector Word*4 Indexed */                                        \
+  V(stxvw, STXVW, 0x7C000718)
+
+#define PPC_B_OPCODE_LIST(V)                                                   \
+  /* Branch Conditional */                                                     \
+  V(bc, BCX, 0x40000000)
+
+#define PPC_XO_OPCODE_LIST(V)                                                  \
+  /* Divide Doubleword */                                                      \
+  V(divd, DIVD, 0x7C0003D2)                                                    \
+  /* Divide Doubleword Extended */                                             \
+  V(divde, DIVDE, 0x7C000352)                                                  \
+  /* Divide Doubleword Extended & record OV */                                 \
+  V(divdeo, DIVDEO, 0x7C000752)                                                \
+  /* Divide Doubleword Extended Unsigned */                                    \
+  V(divdeu, DIVDEU, 0x7C000312)                                                \
+  /* Divide Doubleword Extended Unsigned & record OV */                        \
+  V(divdeuo, DIVDEUO, 0x7C000712)                                              \
+  /* Divide Doubleword & record OV */                                          \
+  V(divdo, DIVDO, 0x7C0007D2)                                                  \
+  /* Divide Doubleword Unsigned */                                             \
+  V(divdu, DIVDU, 0x7C000392)                                                  \
+  /* Divide Doubleword Unsigned & record OV */                                 \
+  V(divduo, DIVDUO, 0x7C000792)                                                \
+  /* Multiply High Doubleword */                                               \
+  V(mulhd, MULHD, 0x7C000092)                                                  \
+  /* Multiply High Doubleword Unsigned */                                      \
+  V(mulhdu, MULHDU, 0x7C000012)                                                \
+  /* Multiply Low Doubleword */                                                \
+  V(mulld, MULLD, 0x7C0001D2)                                                  \
+  /* Multiply Low Doubleword & record OV */                                    \
+  V(mulldo, MULLDO, 0x7C0005D2)                                                \
+  /* Add */                                                                    \
+  V(add, ADDX, 0x7C000214)                                                     \
+  /* Add Carrying */                                                           \
+  V(addc, ADDCX, 0x7C000014)                                                   \
+  /* Add Carrying & record OV */                                               \
+  V(addco, ADDCO, 0x7C000414)                                                  \
+  /* Add Extended */                                                           \
+  V(adde, ADDEX, 0x7C000114)                                                   \
+  /* Add Extended & record OV & record OV */                                   \
+  V(addeo, ADDEO, 0x7C000514)                                                  \
+  /* Add to Minus One Extended */                                              \
+  V(addme, ADDME, 0x7C0001D4)                                                  \
+  /* Add to Minus One Extended & record OV */                                  \
+  V(addmeo, ADDMEO, 0x7C0005D4)                                                \
+  /* Add & record OV */                                                        \
+  V(addo, ADDO, 0x7C000614)                                                    \
+  /* Add to Zero Extended */                                                   \
+  V(addze, ADDZEX, 0x7C000194)                                                 \
+  /* Add to Zero Extended & record OV */                                       \
+  V(addzeo, ADDZEO, 0x7C000594)                                                \
+  /* Divide Word Format */                                                     \
+  V(divw, DIVW, 0x7C0003D6)                                                    \
+  /* Divide Word Extended */                                                   \
+  V(divwe, DIVWE, 0x7C000356)                                                  \
+  /* Divide Word Extended & record OV */                                       \
+  V(divweo, DIVWEO, 0x7C000756)                                                \
+  /* Divide Word Extended Unsigned */                                          \
+  V(divweu, DIVWEU, 0x7C000316)                                                \
+  /* Divide Word Extended Unsigned & record OV */                              \
+  V(divweuo, DIVWEUO, 0x7C000716)                                              \
+  /* Divide Word & record OV */                                                \
+  V(divwo, DIVWO, 0x7C0007D6)                                                  \
+  /* Divide Word Unsigned */                                                   \
+  V(divwu, DIVWU, 0x7C000396)                                                  \
+  /* Divide Word Unsigned & record OV */                                       \
+  V(divwuo, DIVWUO, 0x7C000796)                                                \
+  /* Multiply High Word */                                                     \
+  V(mulhw, MULHWX, 0x7C000096)                                                 \
+  /* Multiply High Word Unsigned */                                            \
+  V(mulhwu, MULHWUX, 0x7C000016)                                               \
+  /* Multiply Low Word */                                                      \
+  V(mullw, MULLW, 0x7C0001D6)                                                  \
+  /* Multiply Low Word & record OV */                                          \
+  V(mullwo, MULLWO, 0x7C0005D6)                                                \
+  /* Negate */                                                                 \
+  V(neg, NEGX, 0x7C0000D0)                                                     \
+  /* Negate & record OV */                                                     \
+  V(nego, NEGO, 0x7C0004D0)                                                    \
+  /* Subtract From */                                                          \
+  V(subf, SUBFX, 0x7C000050)                                                   \
+  /* Subtract From Carrying */                                                 \
+  V(subfc, SUBFCX, 0x7C000010)                                                 \
+  /* Subtract From Carrying & record OV */                                     \
+  V(subfco, SUBFCO, 0x7C000410)                                                \
+  /* Subtract From Extended */                                                 \
+  V(subfe, SUBFEX, 0x7C000110)                                                 \
+  /* Subtract From Extended & record OV */                                     \
+  V(subfeo, SUBFEO, 0x7C000510)                                                \
+  /* Subtract From Minus One Extended */                                       \
+  V(subfme, SUBFME, 0x7C0001D0)                                                \
+  /* Subtract From Minus One Extended & record OV */                           \
+  V(subfmeo, SUBFMEO, 0x7C0005D0)                                              \
+  /* Subtract From & record OV */                                              \
+  V(subfo, SUBFO, 0x7C000450)                                                  \
+  /* Subtract From Zero Extended */                                            \
+  V(subfze, SUBFZE, 0x7C000190)                                                \
+  /* Subtract From Zero Extended & record OV */                                \
+  V(subfzeo, SUBFZEO, 0x7C000590)                                              \
+  /* Add and Generate Sixes */                                                 \
+  V(addg, ADDG, 0x7C000094)                                                    \
+  /* Multiply Accumulate Cross Halfword to Word Modulo Signed */               \
+  V(macchw, MACCHW, 0x10000158)                                                \
+  /* Multiply Accumulate Cross Halfword to Word Modulo Signed & record OV */   \
+  V(macchwo, MACCHWO, 0x10000158)                                              \
+  /* Multiply Accumulate Cross Halfword to Word Saturate Signed */             \
+  V(macchws, MACCHWS, 0x100001D8)                                              \
+  /* Multiply Accumulate Cross Halfword to Word Saturate Signed & record */    \
+  /* OV */                                                                     \
+  V(macchwso, MACCHWSO, 0x100001D8)                                            \
+  /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned */           \
+  V(macchwsu, MACCHWSU, 0x10000198)                                            \
+  /* Multiply Accumulate Cross Halfword to Word Saturate Unsigned & record */  \
+  /* OV */                                                                     \
+  V(macchwsuo, MACCHWSUO, 0x10000198)                                          \
+  /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned */             \
+  V(macchwu, MACCHWU, 0x10000118)                                              \
+  /* Multiply Accumulate Cross Halfword to Word Modulo Unsigned & record */    \
+  /* OV */                                                                     \
+  V(macchwuo, MACCHWUO, 0x10000118)                                            \
+  /* Multiply Accumulate High Halfword to Word Modulo Signed */                \
+  V(machhw, MACHHW, 0x10000058)                                                \
+  /* Multiply Accumulate High Halfword to Word Modulo Signed & record OV */    \
+  V(machhwo, MACHHWO, 0x10000058)                                              \
+  /* Multiply Accumulate High Halfword to Word Saturate Signed */              \
+  V(machhws, MACHHWS, 0x100000D8)                                              \
+  /* Multiply Accumulate High Halfword to Word Saturate Signed & record OV */  \
+  V(machhwso, MACHHWSO, 0x100000D8)                                            \
+  /* Multiply Accumulate High Halfword to Word Saturate Unsigned */            \
+  V(machhwsu, MACHHWSU, 0x10000098)                                            \
+  /* Multiply Accumulate High Halfword to Word Saturate Unsigned & record */   \
+  /* OV */                                                                     \
+  V(machhwsuo, MACHHWSUO, 0x10000098)                                          \
+  /* Multiply Accumulate High Halfword to Word Modulo Unsigned */              \
+  V(machhwu, MACHHWU, 0x10000018)                                              \
+  /* Multiply Accumulate High Halfword to Word Modulo Unsigned & record OV */  \
+  V(machhwuo, MACHHWUO, 0x10000018)                                            \
+  /* Multiply Accumulate Low Halfword to Word Modulo Signed */                 \
+  V(maclhw, MACLHW, 0x10000358)                                                \
+  /* Multiply Accumulate Low Halfword to Word Modulo Signed & record OV */     \
+  V(maclhwo, MACLHWO, 0x10000358)                                              \
+  /* Multiply Accumulate Low Halfword to Word Saturate Signed */               \
+  V(maclhws, MACLHWS, 0x100003D8)                                              \
+  /* Multiply Accumulate Low Halfword to Word Saturate Signed & record OV */   \
+  V(maclhwso, MACLHWSO, 0x100003D8)                                            \
+  /* Multiply Accumulate Low Halfword to Word Saturate Unsigned */             \
+  V(maclhwsu, MACLHWSU, 0x10000398)                                            \
+  /* Multiply Accumulate Low Halfword to Word Saturate Unsigned & record */    \
+  /* OV */                                                                     \
+  V(maclhwsuo, MACLHWSUO, 0x10000398)                                          \
+  /* Multiply Accumulate Low Halfword to Word Modulo Unsigned */               \
+  V(maclhwu, MACLHWU, 0x10000318)                                              \
+  /* Multiply Accumulate Low Halfword to Word Modulo Unsigned & record OV */   \
+  V(maclhwuo, MACLHWUO, 0x10000318)                                            \
+  /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed */      \
+  V(nmacchw, NMACCHW, 0x1000015C)                                              \
+  /* Negative Multiply Accumulate Cross Halfword to Word Modulo Signed & */    \
+  /* record OV */                                                              \
+  V(nmacchwo, NMACCHWO, 0x1000015C)                                            \
+  /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed */    \
+  V(nmacchws, NMACCHWS, 0x100001DC)                                            \
+  /* Negative Multiply Accumulate Cross Halfword to Word Saturate Signed & */  \
+  /* record OV */                                                              \
+  V(nmacchwso, NMACCHWSO, 0x100001DC)                                          \
+  /* Negative Multiply Accumulate High Halfword to Word Modulo Signed */       \
+  V(nmachhw, NMACHHW, 0x1000005C)                                              \
+  /* Negative Multiply Accumulate High Halfword to Word Modulo Signed & */     \
+  /* record OV */                                                              \
+  V(nmachhwo, NMACHHWO, 0x1000005C)                                            \
+  /* Negative Multiply Accumulate High Halfword to Word Saturate Signed */     \
+  V(nmachhws, NMACHHWS, 0x100000DC)                                            \
+  /* Negative Multiply Accumulate High Halfword to Word Saturate Signed & */   \
+  /* record OV */                                                              \
+  V(nmachhwso, NMACHHWSO, 0x100000DC)                                          \
+  /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed */        \
+  V(nmaclhw, NMACLHW, 0x1000035C)                                              \
+  /* Negative Multiply Accumulate Low Halfword to Word Modulo Signed & */      \
+  /* record OV */                                                              \
+  V(nmaclhwo, NMACLHWO, 0x1000035C)                                            \
+  /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed */      \
+  V(nmaclhws, NMACLHWS, 0x100003DC)                                            \
+  /* Negative Multiply Accumulate Low Halfword to Word Saturate Signed & */    \
+  /* record OV */                                                              \
+  V(nmaclhwso, NMACLHWSO, 0x100003DC)
+
+#define PPC_XL_OPCODE_LIST(V)                                                  \
+  /* Branch Conditional to Count Register */                                   \
+  V(bcctr, BCCTRX, 0x4C000420)                                                 \
+  /* Branch Conditional to Link Register */                                    \
+  V(bclr, BCLRX, 0x4C000020)                                                   \
+  /* Condition Register AND */                                                 \
+  V(crand, CRAND, 0x4C000202)                                                  \
+  /* Condition Register AND with Complement */                                 \
+  V(crandc, CRANDC, 0x4C000102)                                                \
+  /* Condition Register Equivalent */                                          \
+  V(creqv, CREQV, 0x4C000242)                                                  \
+  /* Condition Register NAND */                                                \
+  V(crnand, CRNAND, 0x4C0001C2)                                                \
+  /* Condition Register NOR */                                                 \
+  V(crnor, CRNOR, 0x4C000042)                                                  \
+  /* Condition Register OR */                                                  \
+  V(cror, CROR, 0x4C000382)                                                    \
+  /* Condition Register OR with Complement */                                  \
+  V(crorc, CRORC, 0x4C000342)                                                  \
+  /* Condition Register XOR */                                                 \
+  V(crxor, CRXOR, 0x4C000182)                                                  \
+  /* Instruction Synchronize */                                                \
+  V(isync, ISYNC, 0x4C00012C)                                                  \
+  /* Move Condition Register Field */                                          \
+  V(mcrf, MCRF, 0x4C000000)                                                    \
+  /* Return From Critical Interrupt */                                         \
+  V(rfci, RFCI, 0x4C000066)                                                    \
+  /* Return From Interrupt */                                                  \
+  V(rfi, RFI, 0x4C000064)                                                      \
+  /* Return From Machine Check Interrupt */                                    \
+  V(rfmci, RFMCI, 0x4C00004C)                                                  \
+  /* Embedded Hypervisor Privilege */                                          \
+  V(ehpriv, EHPRIV, 0x7C00021C)                                                \
+  /* Return From Guest Interrupt */                                            \
+  V(rfgi, RFGI, 0x4C0000CC)                                                    \
+  /* Doze */                                                                   \
+  V(doze, DOZE, 0x4C000324)                                                    \
+  /* Return From Interrupt Doubleword Hypervisor */                            \
+  V(hrfid, HRFID, 0x4C000224)                                                  \
+  /* Nap */                                                                    \
+  V(nap, NAP, 0x4C000364)                                                      \
+  /* Return from Event Based Branch */                                         \
+  V(rfebb, RFEBB, 0x4C000124)                                                  \
+  /* Return from Interrupt Doubleword */                                       \
+  V(rfid, RFID, 0x4C000024)                                                    \
+  /* Rip Van Winkle */                                                         \
+  V(rvwinkle, RVWINKLE, 0x4C0003E4)                                            \
+  /* Sleep */                                                                  \
+  V(sleep, SLEEP, 0x4C0003A4)
+
+#define PPC_XX4_OPCODE_LIST(V)                                                 \
+  /* VSX Select */                                                             \
+  V(xxsel, XXSEL, 0xF0000030)
+
+#define PPC_I_OPCODE_LIST(V)                                                   \
+  /* Branch */                                                                 \
+  V(b, BX, 0x48000000)
+
+#define PPC_M_OPCODE_LIST(V)                                                   \
+  /* Rotate Left Word Immediate then Mask Insert */                            \
+  V(rlwimi, RLWIMIX, 0x50000000)                                               \
+  /* Rotate Left Word Immediate then AND with Mask */                          \
+  V(rlwinm, RLWINMX, 0x54000000)                                               \
+  /* Rotate Left Word then AND with Mask */                                    \
+  V(rlwnm, RLWNMX, 0x5C000000)
+
+#define PPC_VX_OPCODE_LIST(V)                                                  \
+  /* Decimal Add Modulo */                                                     \
+  V(bcdadd, BCDADD, 0xF0000400)                                                \
+  /* Decimal Subtract Modulo */                                                \
+  V(bcdsub, BCDSUB, 0xF0000440)                                                \
+  /* Move From Vector Status and Control Register */                           \
+  V(mfvscr, MFVSCR, 0x10000604)                                                \
+  /* Move To Vector Status and Control Register */                             \
+  V(mtvscr, MTVSCR, 0x10000644)                                                \
+  /* Vector Add & write Carry Unsigned Quadword */                             \
+  V(vaddcuq, VADDCUQ, 0x10000140)                                              \
+  /* Vector Add and Write Carry-Out Unsigned Word */                           \
+  V(vaddcuw, VADDCUW, 0x10000180)                                              \
+  /* Vector Add Single-Precision */                                            \
+  V(vaddfp, VADDFP, 0x1000000A)                                                \
+  /* Vector Add Signed Byte Saturate */                                        \
+  V(vaddsbs, VADDSBS, 0x10000300)                                              \
+  /* Vector Add Signed Halfword Saturate */                                    \
+  V(vaddshs, VADDSHS, 0x10000340)                                              \
+  /* Vector Add Signed Word Saturate */                                        \
+  V(vaddsws, VADDSWS, 0x10000380)                                              \
+  /* Vector Add Unsigned Byte Modulo */                                        \
+  V(vaddubm, VADDUBM, 0x10000000)                                              \
+  /* Vector Add Unsigned Byte Saturate */                                      \
+  V(vaddubs, VADDUBS, 0x10000200)                                              \
+  /* Vector Add Unsigned Doubleword Modulo */                                  \
+  V(vaddudm, VADDUDM, 0x100000C0)                                              \
+  /* Vector Add Unsigned Halfword Modulo */                                    \
+  V(vadduhm, VADDUHM, 0x10000040)                                              \
+  /* Vector Add Unsigned Halfword Saturate */                                  \
+  V(vadduhs, VADDUHS, 0x10000240)                                              \
+  /* Vector Add Unsigned Quadword Modulo */                                    \
+  V(vadduqm, VADDUQM, 0x10000100)                                              \
+  /* Vector Add Unsigned Word Modulo */                                        \
+  V(vadduwm, VADDUWM, 0x10000080)                                              \
+  /* Vector Add Unsigned Word Saturate */                                      \
+  V(vadduws, VADDUWS, 0x10000280)                                              \
+  /* Vector Logical AND */                                                     \
+  V(vand, VAND, 0x10000404)                                                    \
+  /* Vector Logical AND with Complement */                                     \
+  V(vandc, VANDC, 0x10000444)                                                  \
+  /* Vector Average Signed Byte */                                             \
+  V(vavgsb, VAVGSB, 0x10000502)                                                \
+  /* Vector Average Signed Halfword */                                         \
+  V(vavgsh, VAVGSH, 0x10000542)                                                \
+  /* Vector Average Signed Word */                                             \
+  V(vavgsw, VAVGSW, 0x10000582)                                                \
+  /* Vector Average Unsigned Byte */                                           \
+  V(vavgub, VAVGUB, 0x10000402)                                                \
+  /* Vector Average Unsigned Halfword */                                       \
+  V(vavguh, VAVGUH, 0x10000442)                                                \
+  /* Vector Average Unsigned Word */                                           \
+  V(vavguw, VAVGUW, 0x10000482)                                                \
+  /* Vector Bit Permute Quadword */                                            \
+  V(vbpermq, VBPERMQ, 0x1000054C)                                              \
+  /* Vector Convert From Signed Fixed-Point Word To Single-Precision */        \
+  V(vcfsx, VCFSX, 0x1000034A)                                                  \
+  /* Vector Convert From Unsigned Fixed-Point Word To Single-Precision */      \
+  V(vcfux, VCFUX, 0x1000030A)                                                  \
+  /* Vector Count Leading Zeros Byte */                                        \
+  V(vclzb, VCLZB, 0x10000702)                                                  \
+  /* Vector Count Leading Zeros Doubleword */                                  \
+  V(vclzd, VCLZD, 0x100007C2)                                                  \
+  /* Vector Count Leading Zeros Halfword */                                    \
+  V(vclzh, VCLZH, 0x10000742)                                                  \
+  /* Vector Count Leading Zeros Word */                                        \
+  V(vclzw, VCLZW, 0x10000782)                                                  \
+  /* Vector Convert From Single-Precision To Signed Fixed-Point Word */        \
+  /* Saturate */                                                               \
+  V(vctsxs, VCTSXS, 0x100003CA)                                                \
+  /* Vector Convert From Single-Precision To Unsigned Fixed-Point Word */      \
+  /* Saturate */                                                               \
+  V(vctuxs, VCTUXS, 0x1000038A)                                                \
+  /* Vector Equivalence */                                                     \
+  V(veqv, VEQV, 0x10000684)                                                    \
+  /* Vector 2 Raised to the Exponent Estimate Single-Precision */              \
+  V(vexptefp, VEXPTEFP, 0x1000018A)                                            \
+  /* Vector Gather Bits by Byte by Doubleword */                               \
+  V(vgbbd, VGBBD, 0x1000050C)                                                  \
+  /* Vector Log Base 2 Estimate Single-Precision */                            \
+  V(vlogefp, VLOGEFP, 0x100001CA)                                              \
+  /* Vector Maximum Single-Precision */                                        \
+  V(vmaxfp, VMAXFP, 0x1000040A)                                                \
+  /* Vector Maximum Signed Byte */                                             \
+  V(vmaxsb, VMAXSB, 0x10000102)                                                \
+  /* Vector Maximum Signed Doubleword */                                       \
+  V(vmaxsd, VMAXSD, 0x100001C2)                                                \
+  /* Vector Maximum Signed Halfword */                                         \
+  V(vmaxsh, VMAXSH, 0x10000142)                                                \
+  /* Vector Maximum Signed Word */                                             \
+  V(vmaxsw, VMAXSW, 0x10000182)                                                \
+  /* Vector Maximum Unsigned Byte */                                           \
+  V(vmaxub, VMAXUB, 0x10000002)                                                \
+  /* Vector Maximum Unsigned Doubleword */                                     \
+  V(vmaxud, VMAXUD, 0x100000C2)                                                \
+  /* Vector Maximum Unsigned Halfword */                                       \
+  V(vmaxuh, VMAXUH, 0x10000042)                                                \
+  /* Vector Maximum Unsigned Word */                                           \
+  V(vmaxuw, VMAXUW, 0x10000082)                                                \
+  /* Vector Minimum Single-Precision */                                        \
+  V(vminfp, VMINFP, 0x1000044A)                                                \
+  /* Vector Minimum Signed Byte */                                             \
+  V(vminsb, VMINSB, 0x10000302)                                                \
+  /* Vector Minimum Signed Halfword */                                         \
+  V(vminsh, VMINSH, 0x10000342)                                                \
+  /* Vector Minimum Signed Word */                                             \
+  V(vminsw, VMINSW, 0x10000382)                                                \
+  /* Vector Minimum Unsigned Byte */                                           \
+  V(vminub, VMINUB, 0x10000202)                                                \
+  /* Vector Minimum Unsigned Doubleword */                                     \
+  V(vminud, VMINUD, 0x100002C2)                                                \
+  /* Vector Minimum Unsigned Halfword */                                       \
+  V(vminuh, VMINUH, 0x10000242)                                                \
+  /* Vector Minimum Unsigned Word */                                           \
+  V(vminuw, VMINUW, 0x10000282)                                                \
+  /* Vector Merge High Byte */                                                 \
+  V(vmrghb, VMRGHB, 0x1000000C)                                                \
+  /* Vector Merge High Halfword */                                             \
+  V(vmrghh, VMRGHH, 0x1000004C)                                                \
+  /* Vector Merge High Word */                                                 \
+  V(vmrghw, VMRGHW, 0x1000008C)                                                \
+  /* Vector Merge Low Byte */                                                  \
+  V(vmrglb, VMRGLB, 0x1000010C)                                                \
+  /* Vector Merge Low Halfword */                                              \
+  V(vmrglh, VMRGLH, 0x1000014C)                                                \
+  /* Vector Merge Low Word */                                                  \
+  V(vmrglw, VMRGLW, 0x1000018C)                                                \
+  /* Vector Multiply Even Signed Byte */                                       \
+  V(vmulesb, VMULESB, 0x10000308)                                              \
+  /* Vector Multiply Even Signed Halfword */                                   \
+  V(vmulesh, VMULESH, 0x10000348)                                              \
+  /* Vector Multiply Even Signed Word */                                       \
+  V(vmulesw, VMULESW, 0x10000388)                                              \
+  /* Vector Multiply Even Unsigned Byte */                                     \
+  V(vmuleub, VMULEUB, 0x10000208)                                              \
+  /* Vector Multiply Even Unsigned Halfword */                                 \
+  V(vmuleuh, VMULEUH, 0x10000248)                                              \
+  /* Vector Multiply Even Unsigned Word */                                     \
+  V(vmuleuw, VMULEUW, 0x10000288)                                              \
+  /* Vector Multiply Odd Signed Byte */                                        \
+  V(vmulosb, VMULOSB, 0x10000108)                                              \
+  /* Vector Multiply Odd Signed Halfword */                                    \
+  V(vmulosh, VMULOSH, 0x10000148)                                              \
+  /* Vector Multiply Odd Signed Word */                                        \
+  V(vmulosw, VMULOSW, 0x10000188)                                              \
+  /* Vector Multiply Odd Unsigned Byte */                                      \
+  V(vmuloub, VMULOUB, 0x10000008)                                              \
+  /* Vector Multiply Odd Unsigned Halfword */                                  \
+  V(vmulouh, VMULOUH, 0x10000048)                                              \
+  /* Vector Multiply Odd Unsigned Word */                                      \
+  V(vmulouw, VMULOUW, 0x10000088)                                              \
+  /* Vector Multiply Unsigned Word Modulo */                                   \
+  V(vmuluwm, VMULUWM, 0x10000089)                                              \
+  /* Vector NAND */                                                            \
+  V(vnand, VNAND, 0x10000584)                                                  \
+  /* Vector Logical NOR */                                                     \
+  V(vnor, VNOR, 0x10000504)                                                    \
+  /* Vector Logical OR */                                                      \
+  V(vor, VOR, 0x10000484)                                                      \
+  /* Vector OR with Complement */                                              \
+  V(vorc, VORC, 0x10000544)                                                    \
+  /* Vector Pack Pixel */                                                      \
+  V(vpkpx, VPKPX, 0x1000030E)                                                  \
+  /* Vector Pack Signed Doubleword Signed Saturate */                          \
+  V(vpksdss, VPKSDSS, 0x100005CE)                                              \
+  /* Vector Pack Signed Doubleword Unsigned Saturate */                        \
+  V(vpksdus, VPKSDUS, 0x1000054E)                                              \
+  /* Vector Pack Signed Halfword Signed Saturate */                            \
+  V(vpkshss, VPKSHSS, 0x1000018E)                                              \
+  /* Vector Pack Signed Halfword Unsigned Saturate */                          \
+  V(vpkshus, VPKSHUS, 0x1000010E)                                              \
+  /* Vector Pack Signed Word Signed Saturate */                                \
+  V(vpkswss, VPKSWSS, 0x100001CE)                                              \
+  /* Vector Pack Signed Word Unsigned Saturate */                              \
+  V(vpkswus, VPKSWUS, 0x1000014E)                                              \
+  /* Vector Pack Unsigned Doubleword Unsigned Modulo */                        \
+  V(vpkudum, VPKUDUM, 0x1000044E)                                              \
+  /* Vector Pack Unsigned Doubleword Unsigned Saturate */                      \
+  V(vpkudus, VPKUDUS, 0x100004CE)                                              \
+  /* Vector Pack Unsigned Halfword Unsigned Modulo */                          \
+  V(vpkuhum, VPKUHUM, 0x1000000E)                                              \
+  /* Vector Pack Unsigned Halfword Unsigned Saturate */                        \
+  V(vpkuhus, VPKUHUS, 0x1000008E)                                              \
+  /* Vector Pack Unsigned Word Unsigned Modulo */                              \
+  V(vpkuwum, VPKUWUM, 0x1000004E)                                              \
+  /* Vector Pack Unsigned Word Unsigned Saturate */                            \
+  V(vpkuwus, VPKUWUS, 0x100000CE)                                              \
+  /* Vector Polynomial Multiply-Sum Byte */                                    \
+  V(vpmsumb, VPMSUMB, 0x10000408)                                              \
+  /* Vector Polynomial Multiply-Sum Doubleword */                              \
+  V(vpmsumd, VPMSUMD, 0x100004C8)                                              \
+  /* Vector Polynomial Multiply-Sum Halfword */                                \
+  V(vpmsumh, VPMSUMH, 0x10000448)                                              \
+  /* Vector Polynomial Multiply-Sum Word */                                    \
+  V(vpmsumw, VPMSUMW, 0x10000488)                                              \
+  /* Vector Population Count Byte */                                           \
+  V(vpopcntb, VPOPCNTB, 0x10000703)                                            \
+  /* Vector Population Count Doubleword */                                     \
+  V(vpopcntd, VPOPCNTD, 0x100007C3)                                            \
+  /* Vector Population Count Halfword */                                       \
+  V(vpopcnth, VPOPCNTH, 0x10000743)                                            \
+  /* Vector Population Count Word */                                           \
+  V(vpopcntw, VPOPCNTW, 0x10000783)                                            \
+  /* Vector Reciprocal Estimate Single-Precision */                            \
+  V(vrefp, VREFP, 0x1000010A)                                                  \
+  /* Vector Round to Single-Precision Integer toward -Infinity */              \
+  V(vrfim, VRFIM, 0x100002CA)                                                  \
+  /* Vector Round to Single-Precision Integer Nearest */                       \
+  V(vrfin, VRFIN, 0x1000020A)                                                  \
+  /* Vector Round to Single-Precision Integer toward +Infinity */              \
+  V(vrfip, VRFIP, 0x1000028A)                                                  \
+  /* Vector Round to Single-Precision Integer toward Zero */                   \
+  V(vrfiz, VRFIZ, 0x1000024A)                                                  \
+  /* Vector Rotate Left Byte */                                                \
+  V(vrlb, VRLB, 0x10000004)                                                    \
+  /* Vector Rotate Left Doubleword */                                          \
+  V(vrld, VRLD, 0x100000C4)                                                    \
+  /* Vector Rotate Left Halfword */                                            \
+  V(vrlh, VRLH, 0x10000044)                                                    \
+  /* Vector Rotate Left Word */                                                \
+  V(vrlw, VRLW, 0x10000084)                                                    \
+  /* Vector Reciprocal Square Root Estimate Single-Precision */                \
+  V(vrsqrtefp, VRSQRTEFP, 0x1000014A)                                          \
+  /* Vector Shift Left */                                                      \
+  V(vsl, VSL, 0x100001C4)                                                      \
+  /* Vector Shift Left Byte */                                                 \
+  V(vslb, VSLB, 0x10000104)                                                    \
+  /* Vector Shift Left Doubleword */                                           \
+  V(vsld, VSLD, 0x100005C4)                                                    \
+  /* Vector Shift Left Halfword */                                             \
+  V(vslh, VSLH, 0x10000144)                                                    \
+  /* Vector Shift Left by Octet */                                             \
+  V(vslo, VSLO, 0x1000040C)                                                    \
+  /* Vector Shift Left Word */                                                 \
+  V(vslw, VSLW, 0x10000184)                                                    \
+  /* Vector Splat Byte */                                                      \
+  V(vspltb, VSPLTB, 0x1000020C)                                                \
+  /* Vector Splat Halfword */                                                  \
+  V(vsplth, VSPLTH, 0x1000024C)                                                \
+  /* Vector Splat Immediate Signed Byte */                                     \
+  V(vspltisb, VSPLTISB, 0x1000030C)                                            \
+  /* Vector Splat Immediate Signed Halfword */                                 \
+  V(vspltish, VSPLTISH, 0x1000034C)                                            \
+  /* Vector Splat Immediate Signed Word */                                     \
+  V(vspltisw, VSPLTISW, 0x1000038C)                                            \
+  /* Vector Splat Word */                                                      \
+  V(vspltw, VSPLTW, 0x1000028C)                                                \
+  /* Vector Shift Right */                                                     \
+  V(vsr, VSR, 0x100002C4)                                                      \
+  /* Vector Shift Right Algebraic Byte */                                      \
+  V(vsrab, VSRAB, 0x10000304)                                                  \
+  /* Vector Shift Right Algebraic Doubleword */                                \
+  V(vsrad, VSRAD, 0x100003C4)                                                  \
+  /* Vector Shift Right Algebraic Halfword */                                  \
+  V(vsrah, VSRAH, 0x10000344)                                                  \
+  /* Vector Shift Right Algebraic Word */                                      \
+  V(vsraw, VSRAW, 0x10000384)                                                  \
+  /* Vector Shift Right Byte */                                                \
+  V(vsrb, VSRB, 0x10000204)                                                    \
+  /* Vector Shift Right Doubleword */                                          \
+  V(vsrd, VSRD, 0x100006C4)                                                    \
+  /* Vector Shift Right Halfword */                                            \
+  V(vsrh, VSRH, 0x10000244)                                                    \
+  /* Vector Shift Right by Octet */                                            \
+  V(vsro, VSRO, 0x1000044C)                                                    \
+  /* Vector Shift Right Word */                                                \
+  V(vsrw, VSRW, 0x10000284)                                                    \
+  /* Vector Subtract & write Carry Unsigned Quadword */                        \
+  V(vsubcuq, VSUBCUQ, 0x10000540)                                              \
+  /* Vector Subtract and Write Carry-Out Unsigned Word */                      \
+  V(vsubcuw, VSUBCUW, 0x10000580)                                              \
+  /* Vector Subtract Single-Precision */                                       \
+  V(vsubfp, VSUBFP, 0x1000004A)                                                \
+  /* Vector Subtract Signed Byte Saturate */                                   \
+  V(vsubsbs, VSUBSBS, 0x10000700)                                              \
+  /* Vector Subtract Signed Halfword Saturate */                               \
+  V(vsubshs, VSUBSHS, 0x10000740)                                              \
+  /* Vector Subtract Signed Word Saturate */                                   \
+  V(vsubsws, VSUBSWS, 0x10000780)                                              \
+  /* Vector Subtract Unsigned Byte Modulo */                                   \
+  V(vsububm, VSUBUBM, 0x10000400)                                              \
+  /* Vector Subtract Unsigned Byte Saturate */                                 \
+  V(vsububs, VSUBUBS, 0x10000600)                                              \
+  /* Vector Subtract Unsigned Doubleword Modulo */                             \
+  V(vsubudm, VSUBUDM, 0x100004C0)                                              \
+  /* Vector Subtract Unsigned Halfword Modulo */                               \
+  V(vsubuhm, VSUBUHM, 0x10000440)                                              \
+  /* Vector Subtract Unsigned Halfword Saturate */                             \
+  V(vsubuhs, VSUBUHS, 0x10000640)                                              \
+  /* Vector Subtract Unsigned Quadword Modulo */                               \
+  V(vsubuqm, VSUBUQM, 0x10000500)                                              \
+  /* Vector Subtract Unsigned Word Modulo */                                   \
+  V(vsubuwm, VSUBUWM, 0x10000480)                                              \
+  /* Vector Subtract Unsigned Word Saturate */                                 \
+  V(vsubuws, VSUBUWS, 0x10000680)                                              \
+  /* Vector Sum across Half Signed Word Saturate */                            \
+  V(vsum2sws, VSUM2SWS, 0x10000688)                                            \
+  /* Vector Sum across Quarter Signed Byte Saturate */                         \
+  V(vsum4sbs, VSUM4SBS, 0x10000708)                                            \
+  /* Vector Sum across Quarter Signed Halfword Saturate */                     \
+  V(vsum4shs, VSUM4SHS, 0x10000648)                                            \
+  /* Vector Sum across Quarter Unsigned Byte Saturate */                       \
+  V(vsum4bus, VSUM4BUS, 0x10000608)                                            \
+  /* Vector Sum across Signed Word Saturate */                                 \
+  V(vsumsws, VSUMSWS, 0x10000788)                                              \
+  /* Vector Unpack High Pixel */                                               \
+  V(vupkhpx, VUPKHPX, 0x1000034E)                                              \
+  /* Vector Unpack High Signed Byte */                                         \
+  V(vupkhsb, VUPKHSB, 0x1000020E)                                              \
+  /* Vector Unpack High Signed Halfword */                                     \
+  V(vupkhsh, VUPKHSH, 0x1000024E)                                              \
+  /* Vector Unpack High Signed Word */                                         \
+  V(vupkhsw, VUPKHSW, 0x1000064E)                                              \
+  /* Vector Unpack Low Pixel */                                                \
+  V(vupklpx, VUPKLPX, 0x100003CE)                                              \
+  /* Vector Unpack Low Signed Byte */                                          \
+  V(vupklsb, VUPKLSB, 0x1000028E)                                              \
+  /* Vector Unpack Low Signed Halfword */                                      \
+  V(vupklsh, VUPKLSH, 0x100002CE)                                              \
+  /* Vector Unpack Low Signed Word */                                          \
+  V(vupklsw, VUPKLSW, 0x100006CE)                                              \
+  /* Vector Logical XOR */                                                     \
+  V(vxor, VXOR, 0x100004C4)                                                    \
+  /* Vector AES Cipher */                                                      \
+  V(vcipher, VCIPHER, 0x10000508)                                              \
+  /* Vector AES Cipher Last */                                                 \
+  V(vcipherlast, VCIPHERLAST, 0x10000509)                                      \
+  /* Vector AES Inverse Cipher */                                              \
+  V(vncipher, VNCIPHER, 0x10000548)                                            \
+  /* Vector AES Inverse Cipher Last */                                         \
+  V(vncipherlast, VNCIPHERLAST, 0x10000549)                                    \
+  /* Vector AES S-Box */                                                       \
+  V(vsbox, VSBOX, 0x100005C8)                                                  \
+  /* Vector SHA-512 Sigma Doubleword */                                        \
+  V(vshasigmad, VSHASIGMAD, 0x100006C2)                                        \
+  /* Vector SHA-256 Sigma Word */                                              \
+  V(vshasigmaw, VSHASIGMAW, 0x10000682)                                        \
+  /* Vector Merge Even Word */                                                 \
+  V(vmrgew, VMRGEW, 0x1000078C)                                                \
+  /* Vector Merge Odd Word */                                                  \
+  V(vmrgow, VMRGOW, 0x1000068C)
+
+#define PPC_XS_OPCODE_LIST(V)                                                  \
+  /* Shift Right Algebraic Doubleword Immediate */                             \
+  V(sradi, SRADIX, 0x7C000674)
+
+#define PPC_MD_OPCODE_LIST(V)                                                  \
+  /* Rotate Left Doubleword Immediate then Clear */                            \
+  V(rldic, RLDIC, 0x78000008)                                                  \
+  /* Rotate Left Doubleword Immediate then Clear Left */                       \
+  V(rldicl, RLDICL, 0x78000000)                                                \
+  /* Rotate Left Doubleword Immediate then Clear Right */                      \
+  V(rldicr, RLDICR, 0x78000004)                                                \
+  /* Rotate Left Doubleword Immediate then Mask Insert */                      \
+  V(rldimi, RLDIMI, 0x7800000C)
+
+#define PPC_SC_OPCODE_LIST(V)                                                  \
+  /* System Call */                                                            \
+  V(sc, SC, 0x44000002)
+
+
+#define PPC_OPCODE_LIST(V)    \
+  PPC_X_OPCODE_LIST(V)        \
+  PPC_XO_OPCODE_LIST(V)       \
+  PPC_DS_OPCODE_LIST(V)       \
+  PPC_MDS_OPCODE_LIST(V)      \
+  PPC_MD_OPCODE_LIST(V)       \
+  PPC_XS_OPCODE_LIST(V)       \
+  PPC_D_OPCODE_LIST(V)        \
+  PPC_I_OPCODE_LIST(V)        \
+  PPC_B_OPCODE_LIST(V)        \
+  PPC_XL_OPCODE_LIST(V)       \
+  PPC_A_OPCODE_LIST(V)        \
+  PPC_XFX_OPCODE_LIST(V)      \
+  PPC_M_OPCODE_LIST(V)        \
+  PPC_SC_OPCODE_LIST(V)       \
+  PPC_Z23_OPCODE_LIST(V)      \
+  PPC_Z22_OPCODE_LIST(V)      \
+  PPC_EVX_OPCODE_LIST(V)      \
+  PPC_XFL_OPCODE_LIST(V)      \
+  PPC_EVS_OPCODE_LIST(V)      \
+  PPC_VX_OPCODE_LIST(V)       \
+  PPC_VA_OPCODE_LIST(V)       \
+  PPC_VC_OPCODE_LIST(V)       \
+  PPC_XX1_OPCODE_LIST(V)      \
+  PPC_XX2_OPCODE_LIST(V)      \
+  PPC_XX3_OPCODE_LIST(V)      \
+  PPC_XX4_OPCODE_LIST(V)
+
+
+enum Opcode : uint32_t {
+#define DECLARE_INSTRUCTION(name, opcode_name, opcode_value)                   \
+  opcode_name = opcode_value,
+  PPC_OPCODE_LIST(DECLARE_INSTRUCTION)
+#undef DECLARE_INSTRUCTION
+  EXT1 = 0x4C000000,   // Extended code set 1
+  EXT2 = 0x7C000000,   // Extended code set 2
+  EXT3 = 0xEC000000,   // Extended code set 3
+  EXT4 = 0xFC000000,   // Extended code set 4
+  EXT5 = 0x78000000,   // Extended code set 5 - 64bit only
+  EXT6 = 0xF0000000,   // Extended code set 6
 };
 
 // Instruction encoding bits and masks.
 enum {
   // Instruction encoding bit
   B1 = 1 << 1,
+  B2 = 1 << 2,
+  B3 = 1 << 3,
   B4 = 1 << 4,
   B5 = 1 << 5,
   B7 = 1 << 7,
@@ -523,7 +2787,7 @@
   }
 
   // Read a bit field out of the instruction bits.
-  inline int BitField(int hi, int lo) const {
+  inline uint32_t BitField(int hi, int lo) const {
     return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
   }
 
@@ -539,7 +2803,7 @@
 
 
   // Read a bit field out of the instruction bits.
-  static inline int BitField(Instr instr, int hi, int lo) {
+  static inline uint32_t BitField(Instr instr, int hi, int lo) {
     return instr & (((2 << (hi - lo)) - 1) << lo);
   }
 
@@ -554,7 +2818,7 @@
 
   inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
   inline Opcode OpcodeField() const {
-    return static_cast<Opcode>(BitField(24, 21));
+    return static_cast<Opcode>(BitField(31, 26));
   }
 
   // Fields used in Software interrupt instructions
diff --git a/src/ppc/deoptimizer-ppc.cc b/src/ppc/deoptimizer-ppc.cc
index 39102a1..b96dc6f 100644
--- a/src/ppc/deoptimizer-ppc.cc
+++ b/src/ppc/deoptimizer-ppc.cc
@@ -101,7 +101,7 @@
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
   for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
-    double double_value = input_->GetDoubleRegister(i);
+    Float64 double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
 }
diff --git a/src/ppc/disasm-ppc.cc b/src/ppc/disasm-ppc.cc
index c0a02a8..6baf3d0 100644
--- a/src/ppc/disasm-ppc.cc
+++ b/src/ppc/disasm-ppc.cc
@@ -82,6 +82,7 @@
   void DecodeExt3(Instruction* instr);
   void DecodeExt4(Instruction* instr);
   void DecodeExt5(Instruction* instr);
+  void DecodeExt6(Instruction* instr);
 
   const disasm::NameConverter& converter_;
   Vector<char> out_buffer_;
@@ -370,13 +371,13 @@
 
 
 void Decoder::DecodeExt1(Instruction* instr) {
-  switch (instr->Bits(10, 1) << 1) {
+  switch (EXT1 | (instr->BitField(10, 1))) {
     case MCRF: {
       UnknownFormat(instr, "mcrf");  // not used by V8
       break;
     }
     case BCLRX: {
-      int bo = instr->Bits(25, 21) << 21;
+      int bo = instr->BitField(25, 21);
       int bi = instr->Bits(20, 16);
       CRBit cond = static_cast<CRBit>(bi & (CRWIDTH - 1));
       switch (bo) {
@@ -446,7 +447,7 @@
       break;
     }
     case BCCTRX: {
-      switch (instr->Bits(25, 21) << 21) {
+      switch (instr->BitField(25, 21)) {
         case DCBNZF: {
           UnknownFormat(instr, "bcctrx-dcbnzf");
           break;
@@ -540,7 +541,7 @@
 
 void Decoder::DecodeExt2(Instruction* instr) {
   // Some encodings are 10-1 bits, handle those first
-  switch (instr->Bits(10, 1) << 1) {
+  switch (EXT2 | (instr->BitField(10, 1))) {
     case SRWX: {
       Format(instr, "srw'.    'ra, 'rs, 'rb");
       return;
@@ -561,6 +562,24 @@
       return;
     }
 #endif
+    case MODSW: {
+      Format(instr, "modsw  'rt, 'ra, 'rb");
+      return;
+    }
+    case MODUW: {
+      Format(instr, "moduw  'rt, 'ra, 'rb");
+      return;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case MODSD: {
+      Format(instr, "modsd  'rt, 'ra, 'rb");
+      return;
+    }
+    case MODUD: {
+      Format(instr, "modud  'rt, 'ra, 'rb");
+      return;
+    }
+#endif
     case SRAWIX: {
       Format(instr, "srawi'.  'ra,'rs,'sh");
       return;
@@ -623,7 +642,7 @@
 #endif
   }
 
-  switch (instr->Bits(10, 2) << 2) {
+  switch (EXT2 | (instr->BitField(10, 2))) {
     case SRADIX: {
       Format(instr, "sradi'.  'ra,'rs,'sh");
       return;
@@ -631,7 +650,7 @@
   }
 
   // ?? are all of these xo_form?
-  switch (instr->Bits(9, 1) << 1) {
+  switch (EXT2 | (instr->BitField(9, 1))) {
     case CMP: {
 #if V8_TARGET_ARCH_PPC64
       if (instr->Bit(21)) {
@@ -880,7 +899,7 @@
 #endif
   }
 
-  switch (instr->Bits(5, 1) << 1) {
+  switch (EXT2 | (instr->BitField(5, 1))) {
     case ISEL: {
       Format(instr, "isel    'rt, 'ra, 'rb");
       return;
@@ -893,7 +912,7 @@
 
 
 void Decoder::DecodeExt3(Instruction* instr) {
-  switch (instr->Bits(10, 1) << 1) {
+  switch (EXT3 | (instr->BitField(10, 1))) {
     case FCFID: {
       Format(instr, "fcfids'. 'Dt, 'Db");
       break;
@@ -910,7 +929,7 @@
 
 
 void Decoder::DecodeExt4(Instruction* instr) {
-  switch (instr->Bits(5, 1) << 1) {
+  switch (EXT4 | (instr->BitField(5, 1))) {
     case FDIV: {
       Format(instr, "fdiv'.   'Dt, 'Da, 'Db");
       return;
@@ -945,7 +964,7 @@
     }
   }
 
-  switch (instr->Bits(10, 1) << 1) {
+  switch (EXT4 | (instr->BitField(10, 1))) {
     case FCMPU: {
       Format(instr, "fcmpu   'Da, 'Db");
       break;
@@ -1046,7 +1065,7 @@
 
 
 void Decoder::DecodeExt5(Instruction* instr) {
-  switch (instr->Bits(4, 2) << 2) {
+  switch (EXT5 | (instr->BitField(4, 2))) {
     case RLDICL: {
       Format(instr, "rldicl'. 'ra, 'rs, 'sh, 'mb");
       return;
@@ -1064,7 +1083,7 @@
       return;
     }
   }
-  switch (instr->Bits(4, 1) << 1) {
+  switch (EXT5 | (instr->BitField(4, 1))) {
     case RLDCL: {
       Format(instr, "rldcl'.  'ra, 'rs, 'sb, 'mb");
       return;
@@ -1073,6 +1092,28 @@
   Unknown(instr);  // not used by V8
 }
 
+void Decoder::DecodeExt6(Instruction* instr) {
+  switch (EXT6 | (instr->BitField(10, 3))) {
+#define DECODE_XX3_INSTRUCTIONS(name, opcode_name, opcode_value) \
+  case opcode_name: {                                            \
+    Format(instr, #name" 'Dt, 'Da, 'Db");                        \
+    return;                                                      \
+  }
+    PPC_XX3_OPCODE_LIST(DECODE_XX3_INSTRUCTIONS)
+#undef DECODE_XX3_INSTRUCTIONS
+  }
+  switch (EXT6 | (instr->BitField(10, 2))) {
+#define DECODE_XX2_INSTRUCTIONS(name, opcode_name, opcode_value) \
+  case opcode_name: {                                            \
+    Format(instr, #name" 'Dt, 'Db");                             \
+    return;                                                      \
+  }
+    PPC_XX2_OPCODE_LIST(DECODE_XX2_INSTRUCTIONS)
+  }
+#undef DECODE_XX3_INSTRUCTIONS
+  Unknown(instr);  // not used by V8
+}
+
 #undef VERIFIY
 
 // Disassemble the instruction at *instr_ptr into the output buffer.
@@ -1089,7 +1130,8 @@
     return Instruction::kInstrSize;
   }
 
-  switch (instr->OpcodeValue() << 26) {
+  uint32_t opcode = instr->OpcodeValue() << 26;
+  switch (opcode) {
     case TWI: {
       PrintSoftwareInterrupt(instr->SvcValue());
       break;
@@ -1360,6 +1402,10 @@
       DecodeExt5(instr);
       break;
     }
+    case EXT6: {
+      DecodeExt6(instr);
+      break;
+    }
 #if V8_TARGET_ARCH_PPC64
     case LD: {
       switch (instr->Bits(1, 0)) {
diff --git a/src/ppc/interface-descriptors-ppc.cc b/src/ppc/interface-descriptors-ppc.cc
index 74ad564..ed03094 100644
--- a/src/ppc/interface-descriptors-ppc.cc
+++ b/src/ppc/interface-descriptors-ppc.cc
@@ -63,31 +63,7 @@
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r5};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r4, r6};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r4};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r4};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r4};
+  Register registers[] = {r4, r5, r6};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -143,15 +119,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r4, r6};
+  Register registers[] = {r4, r3, r6};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r4, r3, r6, r5};
   data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -180,6 +154,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // r5 : start index (to support rest parameters)
+  // r4 : the target to call
+  Register registers[] = {r4, r5};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void ConstructStubDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -214,13 +195,12 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
-      CallInterfaceDescriptorData* data) {                          \
-    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
-  }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+  Register registers[] = {r4, r6, r3, r5};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
 
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -411,6 +391,15 @@
   };
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
+
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r4,  // loaded new FP
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ppc/macro-assembler-ppc.cc b/src/ppc/macro-assembler-ppc.cc
index 6588540..f2aa2e0 100644
--- a/src/ppc/macro-assembler-ppc.cc
+++ b/src/ppc/macro-assembler-ppc.cc
@@ -969,7 +969,7 @@
                                   int prologue_offset) {
   {
     ConstantPoolUnavailableScope constant_pool_unavailable(this);
-    LoadSmiLiteral(r11, Smi::FromInt(type));
+    mov(r11, Operand(StackFrame::TypeToMarker(type)));
     PushCommonFrame(r11);
   }
   if (FLAG_enable_embedded_constant_pool) {
@@ -1020,11 +1020,10 @@
   }
 }
 
-
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
   LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
-  LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+  LoadP(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+  LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
 }
 
 
@@ -1035,10 +1034,10 @@
     // This path cannot rely on ip containing code entry.
     PushCommonFrame();
     LoadConstantPoolPointerRegister();
-    LoadSmiLiteral(ip, Smi::FromInt(type));
+    mov(ip, Operand(StackFrame::TypeToMarker(type)));
     push(ip);
   } else {
-    LoadSmiLiteral(ip, Smi::FromInt(type));
+    mov(ip, Operand(StackFrame::TypeToMarker(type)));
     PushCommonFrame(ip);
   }
   if (type == StackFrame::INTERNAL) {
@@ -1144,7 +1143,7 @@
   // all of the pushes that have happened inside of V8
   // since we were called from C code
 
-  LoadSmiLiteral(ip, Smi::FromInt(frame_type));
+  mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
   PushCommonFrame(ip);
   // Reserve room for saved entry sp and code object.
   subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1193,19 +1192,6 @@
   StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
 }
 
-
-void MacroAssembler::InitializeNewString(Register string, Register length,
-                                         Heap::RootListIndex map_index,
-                                         Register scratch1, Register scratch2) {
-  SmiTag(scratch1, length);
-  LoadRoot(scratch2, map_index);
-  StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
-  li(scratch1, Operand(String::kEmptyHashField));
-  StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
-  StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
-}
-
-
 int MacroAssembler::ActivationFrameAlignment() {
 #if !defined(USE_SIMULATOR)
   // Running on the real platform. Use the alignment as mandated by the local
@@ -1403,19 +1389,17 @@
   }
 }
 
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
-                                             const ParameterCount& expected,
-                                             const ParameterCount& actual) {
-  Label skip_flooding;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  mov(r7, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual) {
+  Label skip_hook;
+  ExternalReference debug_hook_avtive =
+      ExternalReference::debug_hook_on_function_call_address(isolate());
+  mov(r7, Operand(debug_hook_avtive));
   LoadByte(r7, MemOperand(r7), r0);
   extsb(r7, r7);
-  cmpi(r7, Operand(StepIn));
-  blt(&skip_flooding);
+  CmpSmiLiteral(r7, Smi::kZero, r0);
+  beq(&skip_hook);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1431,7 +1415,7 @@
       Push(new_target);
     }
     Push(fun, fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    CallRuntime(Runtime::kDebugOnFunctionCall);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -1445,7 +1429,7 @@
       SmiUntag(expected.reg());
     }
   }
-  bind(&skip_flooding);
+  bind(&skip_hook);
 }
 
 
@@ -1459,8 +1443,8 @@
   DCHECK(function.is(r4));
   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
 
-  if (call_wrapper.NeedsDebugStepCheck()) {
-    FloodFunctionIfStepping(function, new_target, expected, actual);
+  if (call_wrapper.NeedsDebugHookCheck()) {
+    CheckDebugHook(function, new_target, expected, actual);
   }
 
   // Clear the new.target register if not given.
@@ -1569,16 +1553,17 @@
 }
 
 
-void MacroAssembler::DebugBreak() {
-  li(r3, Operand::Zero());
-  mov(r4,
-      Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
-  CEntryStub ces(isolate(), 1);
-  DCHECK(AllowThisStubCall(&ces));
-  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+  // Check whether we need to drop frames to restart a function on the stack.
+  ExternalReference restart_fp =
+      ExternalReference::debug_restart_fp_address(isolate());
+  mov(r4, Operand(restart_fp));
+  LoadWordArith(r4, MemOperand(r4));
+  cmpi(r4, Operand::Zero());
+  Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+       ne);
 }
 
-
 void MacroAssembler::PushStackHandler() {
   // Adjust this code if not the case.
   STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
@@ -1949,103 +1934,6 @@
   addi(result, result, Operand(kHeapObjectTag));
 }
 
-
-void MacroAssembler::AllocateTwoByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  slwi(scratch1, length, Operand(1));  // Length in bytes, not chars.
-  addi(scratch1, scratch1,
-       Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
-  mov(r0, Operand(~kObjectAlignmentMask));
-  and_(scratch1, scratch1, r0);
-
-  // Allocate two-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  DCHECK(kCharSize == 1);
-  addi(scratch1, length,
-       Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
-  li(r0, Operand(~kObjectAlignmentMask));
-  and_(scratch1, scratch1, r0);
-
-  // Allocate one-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
-                      scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-
 void MacroAssembler::CompareObjectType(Register object, Register map,
                                        Register type_reg, InstanceType type) {
   const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
@@ -2070,60 +1958,6 @@
   cmp(obj, r0);
 }
 
-void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
-                                             Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-  ble(fail);
-  cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
-  bgt(fail);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
-                                          Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
-  cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-  bgt(fail);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
-    Register value_reg, Register key_reg, Register elements_reg,
-    Register scratch1, DoubleRegister double_scratch, Label* fail,
-    int elements_offset) {
-  DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
-  Label smi_value, store;
-
-  // Handle smi values specially.
-  JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number
-  CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
-           DONT_DO_SMI_CHECK);
-
-  lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-  // Double value, turn potential sNaN into qNaN.
-  CanonicalizeNaN(double_scratch);
-  b(&store);
-
-  bind(&smi_value);
-  SmiToDouble(double_scratch, value_reg);
-
-  bind(&store);
-  SmiToDoubleArrayOffset(scratch1, key_reg);
-  add(scratch1, elements_reg, scratch1);
-  stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
-                                                     elements_offset));
-}
-
-
 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
                                             Register right,
                                             Register overflow_dst,
@@ -2304,33 +2138,6 @@
   bind(&done);
 }
 
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
-                                             Register scratch, Label* miss) {
-  // Get the prototype or initial map from the function.
-  LoadP(result,
-        FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // If the prototype or initial map is the hole, don't return it and
-  // simply miss the cache instead. This will allow us to allocate a
-  // prototype object on-demand in the runtime system.
-  LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-  cmp(result, r0);
-  beq(miss);
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  CompareObjectType(result, scratch, scratch, MAP_TYPE);
-  bne(&done);
-
-  // Get the prototype from the initial map.
-  LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  bind(&done);
-}
-
-
 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
                               Condition cond) {
   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
@@ -2737,25 +2544,6 @@
   }
 }
 
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind, ElementsKind transitioned_kind,
-    Register map_in_out, Register scratch, Label* no_map_match) {
-  DCHECK(IsFastElementsKind(expected_kind));
-  DCHECK(IsFastElementsKind(transitioned_kind));
-
-  // Check that the function's map is the same as the expected cached map.
-  LoadP(scratch, NativeContextMemOperand());
-  LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
-  cmp(map_in_out, ip);
-  bne(no_map_match);
-
-  // Use the transitioned cached map.
-  LoadP(map_in_out,
-        ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
   LoadP(dst, NativeContextMemOperand());
   LoadP(dst, ContextMemOperand(dst, index));
@@ -2840,16 +2628,6 @@
   beq(smi_case, cr0);
 }
 
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
-                                          Label* non_smi_case) {
-  STATIC_ASSERT(kSmiTag == 0);
-  TestBitRange(src, kSmiTagSize - 1, 0, r0);
-  SmiUntag(dst, src);
-  bne(non_smi_case, cr0);
-}
-
-
 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
                                      Label* on_either_smi) {
   STATIC_ASSERT(kSmiTag == 0);
@@ -3130,19 +2908,6 @@
   bne(failure);
 }
 
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
-                                                              Register scratch,
-                                                              Label* failure) {
-  const int kFlatOneByteStringMask =
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatOneByteStringTag =
-      kStringTag | kOneByteStringTag | kSeqStringTag;
-  andi(scratch, type, Operand(kFlatOneByteStringMask));
-  cmpi(scratch, Operand(kFlatOneByteStringTag));
-  bne(failure);
-}
-
 static const int kRegisterPassedArguments = 8;
 
 
@@ -3867,7 +3632,6 @@
 
 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
   subi(sp, sp, Operand(kFloatSize));
-  frsp(src, src);
   stfs(src, MemOperand(sp, 0));
   nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
   lwz(dst, MemOperand(sp, 0));
@@ -4492,44 +4256,6 @@
   return no_reg;
 }
 
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
-                                                      Register scratch0,
-                                                      Register scratch1,
-                                                      Label* found) {
-  DCHECK(!scratch1.is(scratch0));
-  Register current = scratch0;
-  Label loop_again, end;
-
-  // scratch contained elements pointer.
-  mr(current, object);
-  LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
-  LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  CompareRoot(current, Heap::kNullValueRootIndex);
-  beq(&end);
-
-  // Loop based on the map going up the prototype chain.
-  bind(&loop_again);
-  LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
-
-  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  lbz(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
-  cmpi(scratch1, Operand(JS_OBJECT_TYPE));
-  blt(found);
-
-  lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
-  DecodeField<Map::ElementsKindBits>(scratch1);
-  cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
-  beq(found);
-  LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  CompareRoot(current, Heap::kNullValueRootIndex);
-  bne(&loop_again);
-
-  bind(&end);
-}
-
-
 #ifdef DEBUG
 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
                 Register reg5, Register reg6, Register reg7, Register reg8,
diff --git a/src/ppc/macro-assembler-ppc.h b/src/ppc/macro-assembler-ppc.h
index 28eceb1..a1d2932 100644
--- a/src/ppc/macro-assembler-ppc.h
+++ b/src/ppc/macro-assembler-ppc.h
@@ -470,16 +470,6 @@
     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
   }
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the native context if the map in register
-  // map_in_out is the cached Array map in the native context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
-                                           ElementsKind transitioned_kind,
-                                           Register map_in_out,
-                                           Register scratch,
-                                           Label* no_map_match);
-
   void LoadNativeContextSlot(int index, Register dst);
 
   // Load the initial map from the global function. The registers
@@ -621,9 +611,10 @@
                           const ParameterCount& actual, InvokeFlag flag,
                           const CallWrapper& call_wrapper);
 
-  void FloodFunctionIfStepping(Register fun, Register new_target,
-                               const ParameterCount& expected,
-                               const ParameterCount& actual);
+  // On function call, call into the debugger if necessary.
+  void CheckDebugHook(Register fun, Register new_target,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
@@ -644,12 +635,10 @@
 
   void IsObjectNameType(Register object, Register scratch, Label* fail);
 
-  // ---------------------------------------------------------------------------
-  // Debugger Support
-
   void DebugBreak();
+  // Frame restart support
+  void MaybeDropFrames();
 
-  // ---------------------------------------------------------------------------
   // Exception handling
 
   // Push a new stack handler and link into stack handler chain.
@@ -722,25 +711,6 @@
   void FastAllocate(Register object_size, Register result, Register result_end,
                     Register scratch, AllocationFlags flags);
 
-  void AllocateTwoByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateOneByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateTwoByteConsString(Register result, Register length,
-                                 Register scratch1, Register scratch2,
-                                 Label* gc_required);
-  void AllocateOneByteConsString(Register result, Register length,
-                                 Register scratch1, Register scratch2,
-                                 Label* gc_required);
-  void AllocateTwoByteSlicedString(Register result, Register length,
-                                   Register scratch1, Register scratch2,
-                                   Label* gc_required);
-  void AllocateOneByteSlicedString(Register result, Register length,
-                                   Register scratch1, Register scratch2,
-                                   Label* gc_required);
-
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed. All registers are clobbered also
   // when control continues at the gc_required label.
@@ -779,14 +749,6 @@
   void GetMapConstructor(Register result, Register map, Register temp,
                          Register temp2);
 
-  // Try to get function prototype of a function and puts the value in
-  // the result register. Checks that the function really is a
-  // function and jumps to the miss label if the fast checks fail. The
-  // function register will be untouched; the other registers may be
-  // clobbered.
-  void TryGetFunctionPrototype(Register function, Register result,
-                               Register scratch, Label* miss);
-
   // Compare object type for heap object.  heap_object contains a non-Smi
   // whose object type should be compared with the given type.  This both
   // sets the flags and leaves the object type in the type_reg register.
@@ -803,22 +765,6 @@
   // sets the flags and leaves the object type in the type_reg register.
   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map, Register scratch, Label* fail);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiElements(Register map, Register scratch, Label* fail);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements. Otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
-                                   Register elements_reg, Register scratch1,
-                                   DoubleRegister double_scratch, Label* fail,
-                                   int elements_offset = 0);
-
   // Compare an object's map with the specified map and its transitioned
   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
   // set with result of map compare. If multiple map compares are required, the
@@ -1309,10 +1255,6 @@
   // Souce and destination can be the same register.
   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
 
-  // Untag the source value into destination and jump if source is not a smi.
-  // Souce and destination can be the same register.
-  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
   inline void TestIfSmi(Register value, Register scratch) {
     TestBitRange(value, kSmiTagSize - 1, 0, scratch);
   }
@@ -1434,11 +1376,6 @@
       Register first_object_instance_type, Register second_object_instance_type,
       Register scratch1, Register scratch2, Label* failure);
 
-  // Check if instance type is sequential one-byte string and jump to label if
-  // it is not.
-  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
-                                                Label* failure);
-
   void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string, Register index,
@@ -1502,7 +1439,7 @@
   }
 
   // Load the type feedback vector from a JavaScript frame.
-  void EmitLoadTypeFeedbackVector(Register vector);
+  void EmitLoadFeedbackVector(Register vector);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type,
@@ -1528,21 +1465,6 @@
                                        Register scratch2_reg,
                                        Label* no_memento_found);
 
-  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
-                                         Register scratch_reg,
-                                         Register scratch2_reg,
-                                         Label* memento_found) {
-    Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
-                                    &no_memento_found);
-    beq(memento_found);
-    bind(&no_memento_found);
-  }
-
-  // Jumps to found label if a prototype map has dictionary elements.
-  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
-                                        Register scratch1, Label* found);
-
   // Loads the constant pool pointer (kConstantPoolRegister).
   void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
       Register code_target_address);
@@ -1571,10 +1493,6 @@
                       bool* definitely_mismatches, InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  void InitializeNewString(Register string, Register length,
-                           Heap::RootListIndex map_index, Register scratch1,
-                           Register scratch2);
-
   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
   void InNewSpace(Register object, Register scratch,
                   Condition cond,  // eq for new space, ne otherwise.
diff --git a/src/ppc/simulator-ppc.cc b/src/ppc/simulator-ppc.cc
index 84fbb39..0586328 100644
--- a/src/ppc/simulator-ppc.cc
+++ b/src/ppc/simulator-ppc.cc
@@ -1623,7 +1623,8 @@
 
 // Handle execution based on instruction types.
 void Simulator::ExecuteExt1(Instruction* instr) {
-  switch (instr->Bits(10, 1) << 1) {
+  uint32_t opcode = EXT1 | instr->BitField(10, 1);
+  switch (opcode) {
     case MCRF:
       UNIMPLEMENTED();  // Not used by V8.
     case BCLRX:
@@ -1678,7 +1679,7 @@
 bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
   bool found = true;
 
-  int opcode = instr->Bits(10, 1) << 1;
+  uint32_t opcode = EXT2 | instr->BitField(10, 1);
   switch (opcode) {
     case SRWX: {
       int rs = instr->RSValue();
@@ -1708,6 +1709,60 @@
       break;
     }
 #endif
+    case MODUW: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      uint32_t ra_val = get_register(ra);
+      uint32_t rb_val = get_register(rb);
+      uint32_t alu_out = (rb_val == 0) ? -1 : ra_val % rb_val;
+      set_register(rt, alu_out);
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case MODUD: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      uint64_t ra_val = get_register(ra);
+      uint64_t rb_val = get_register(rb);
+      uint64_t alu_out = (rb_val == 0) ? -1 : ra_val % rb_val;
+      set_register(rt, alu_out);
+      break;
+    }
+#endif
+    case MODSW: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int32_t ra_val = get_register(ra);
+      int32_t rb_val = get_register(rb);
+      bool overflow = (ra_val == kMinInt && rb_val == -1);
+      // result is undefined if divisor is zero or if operation
+      // is 0x80000000 / -1.
+      int32_t alu_out = (rb_val == 0 || overflow) ? -1 : ra_val % rb_val;
+      set_register(rt, alu_out);
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case MODSD: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int64_t ra_val = get_register(ra);
+      int64_t rb_val = get_register(rb);
+      int64_t one = 1;  // work-around gcc
+      int64_t kMinLongLong = (one << 63);
+      // result is undefined if divisor is zero or if operation
+      // is 0x80000000_00000000 / -1.
+      int64_t alu_out =
+          (rb_val == 0 || (ra_val == kMinLongLong && rb_val == -1))
+              ? -1
+              : ra_val % rb_val;
+      set_register(rt, alu_out);
+      break;
+    }
+#endif
     case SRAW: {
       int rs = instr->RSValue();
       int ra = instr->RAValue();
@@ -1895,7 +1950,7 @@
   if (found) return found;
 
   found = true;
-  opcode = instr->Bits(10, 2) << 2;
+  opcode = EXT2 | instr->BitField(10, 2);
   switch (opcode) {
     case SRADIX: {
       int ra = instr->RAValue();
@@ -1922,7 +1977,7 @@
 bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
   bool found = true;
 
-  int opcode = instr->Bits(9, 1) << 1;
+  uint32_t opcode = EXT2 | instr->BitField(9, 1);
   switch (opcode) {
     case TW: {
       // used for call redirection in simulation mode
@@ -2180,7 +2235,7 @@
 
 bool Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
   bool found = true;
-  int opcode = instr->Bits(9, 1) << 1;
+  uint32_t opcode = EXT2 | instr->BitField(9, 1);
   switch (opcode) {
     case CNTLZWX: {
       int rs = instr->RSValue();
@@ -2698,7 +2753,7 @@
 
 
 void Simulator::ExecuteExt2_5bit(Instruction* instr) {
-  int opcode = instr->Bits(5, 1) << 1;
+  uint32_t opcode = EXT2 | instr->BitField(5, 1);
   switch (opcode) {
     case ISEL: {
       int rt = instr->RTValue();
@@ -2731,9 +2786,9 @@
 
 
 void Simulator::ExecuteExt3(Instruction* instr) {
-  int opcode = instr->Bits(10, 1) << 1;
+  uint32_t opcode = EXT3 | instr->BitField(10, 1);
   switch (opcode) {
-    case FCFID: {
+    case FCFIDS: {
       // fcfids
       int frt = instr->RTValue();
       int frb = instr->RBValue();
@@ -2742,7 +2797,7 @@
       set_d_register_from_double(frt, frt_val);
       return;
     }
-    case FCFIDU: {
+    case FCFIDUS: {
       // fcfidus
       int frt = instr->RTValue();
       int frb = instr->RBValue();
@@ -2757,7 +2812,8 @@
 
 
 void Simulator::ExecuteExt4(Instruction* instr) {
-  switch (instr->Bits(5, 1) << 1) {
+  uint32_t opcode = EXT4 | instr->BitField(5, 1);
+  switch (opcode) {
     case FDIV: {
       int frt = instr->RTValue();
       int fra = instr->RAValue();
@@ -2844,7 +2900,7 @@
       return;
     }
   }
-  int opcode = instr->Bits(10, 1) << 1;
+  opcode = EXT4 | instr->BitField(10, 1);
   switch (opcode) {
     case FCMPU: {
       int fra = instr->RAValue();
@@ -3182,7 +3238,8 @@
 
 #if V8_TARGET_ARCH_PPC64
 void Simulator::ExecuteExt5(Instruction* instr) {
-  switch (instr->Bits(4, 2) << 2) {
+  uint32_t opcode = EXT5 | instr->BitField(4, 2);
+  switch (opcode) {
     case RLDICL: {
       int ra = instr->RAValue();
       int rs = instr->RSValue();
@@ -3270,7 +3327,8 @@
       return;
     }
   }
-  switch (instr->Bits(4, 1) << 1) {
+  opcode = EXT5 | instr->BitField(4, 1);
+  switch (opcode) {
     case RLDCL: {
       int ra = instr->RAValue();
       int rs = instr->RSValue();
@@ -3295,9 +3353,55 @@
 }
 #endif
 
+void Simulator::ExecuteExt6(Instruction* instr) {
+  uint32_t opcode = EXT6 | instr->BitField(10, 3);
+  switch (opcode) {
+    case XSADDDP: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = fra_val + frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case XSSUBDP: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = fra_val - frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case XSMULDP: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = fra_val * frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case XSDIVDP: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = fra_val / frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+  }
+  UNIMPLEMENTED();  // Not used by V8.
+}
 
 void Simulator::ExecuteGeneric(Instruction* instr) {
-  int opcode = instr->OpcodeValue() << 26;
+  uint32_t opcode = instr->OpcodeField();
   switch (opcode) {
     case SUBFIC: {
       int rt = instr->RTValue();
@@ -3701,7 +3805,16 @@
       intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
       int32_t val = ReadW(ra_val + offset, instr);
       float* fptr = reinterpret_cast<float*>(&val);
-      set_d_register_from_double(frt, static_cast<double>(*fptr));
+// Conversion using double changes sNan to qNan on ia32/x64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+      if (val == 0x7fa00000) {
+        set_d_register(frt, 0x7ff4000000000000);
+      } else {
+#endif
+        set_d_register_from_double(frt, static_cast<double>(*fptr));
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+      }
+#endif
       if (opcode == LFSU) {
         DCHECK(ra != 0);
         set_register(ra, ra_val + offset);
@@ -3731,7 +3844,19 @@
         int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
         intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
         float frs_val = static_cast<float>(get_double_from_d_register(frs));
-        int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+        int32_t* p;
+// Conversion using double changes sNan to qNan on ia32/x64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+        int64_t frs_isnan = get_d_register(frs);
+        int32_t frs_nan_single = 0x7fa00000;
+        if (frs_isnan == 0x7ff4000000000000) {
+          p = &frs_nan_single;
+        } else {
+#endif
+          p = reinterpret_cast<int32_t*>(&frs_val);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+        }
+#endif
         WriteW(ra_val + offset, *p, instr);
         if (opcode == STFSU) {
           DCHECK(ra != 0);
@@ -3810,6 +3935,10 @@
       break;
     }
 #endif
+    case EXT6: {
+      ExecuteExt6(instr);
+      break;
+    }
 
     default: {
       UNIMPLEMENTED();
@@ -3839,7 +3968,7 @@
   if (::v8::internal::FLAG_trace_sim) {
     Trace(instr);
   }
-  int opcode = instr->OpcodeValue() << 26;
+  uint32_t opcode = instr->OpcodeField();
   if (opcode == TWI) {
     SoftwareInterrupt(instr);
   } else {
diff --git a/src/ppc/simulator-ppc.h b/src/ppc/simulator-ppc.h
index d061545..91e7f05 100644
--- a/src/ppc/simulator-ppc.h
+++ b/src/ppc/simulator-ppc.h
@@ -321,6 +321,7 @@
 #if V8_TARGET_ARCH_PPC64
   void ExecuteExt5(Instruction* instr);
 #endif
+  void ExecuteExt6(Instruction* instr);
   void ExecuteGeneric(Instruction* instr);
 
   void SetFPSCR(int bit) { fp_condition_reg_ |= (1 << (31 - bit)); }
diff --git a/src/profiler/cpu-profiler.cc b/src/profiler/cpu-profiler.cc
index 6821ba6..85f9d5e 100644
--- a/src/profiler/cpu-profiler.cc
+++ b/src/profiler/cpu-profiler.cc
@@ -277,6 +277,21 @@
   profiles_->set_cpu_profiler(this);
 }
 
+void CpuProfiler::CreateEntriesForRuntimeCallStats() {
+  static_entries_.clear();
+  RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
+  CodeMap* code_map = generator_->code_map();
+  for (int i = 0; i < RuntimeCallStats::counters_count; ++i) {
+    RuntimeCallCounter* counter = &(rcs->*(RuntimeCallStats::counters[i]));
+    DCHECK(counter->name());
+    std::unique_ptr<CodeEntry> entry(
+        new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
+                      CodeEntry::kEmptyNamePrefix, "native V8Runtime"));
+    code_map->AddCode(reinterpret_cast<Address>(counter), entry.get(), 1);
+    static_entries_.push_back(std::move(entry));
+  }
+}
+
 void CpuProfiler::CollectSample() {
   if (processor_) {
     processor_->AddCurrentStack(isolate_);
@@ -305,9 +320,10 @@
   // Disable logging when using the new implementation.
   saved_is_logging_ = logger->is_logging_;
   logger->is_logging_ = false;
-  generator_.reset(new ProfileGenerator(isolate_, profiles_.get()));
+  generator_.reset(new ProfileGenerator(profiles_.get()));
   processor_.reset(new ProfilerEventsProcessor(isolate_, generator_.get(),
                                                sampling_interval_));
+  CreateEntriesForRuntimeCallStats();
   logger->SetUpProfilerListener();
   ProfilerListener* profiler_listener = logger->profiler_listener();
   profiler_listener->AddObserver(this);
diff --git a/src/profiler/cpu-profiler.h b/src/profiler/cpu-profiler.h
index fa31754..a6872e4 100644
--- a/src/profiler/cpu-profiler.h
+++ b/src/profiler/cpu-profiler.h
@@ -220,12 +220,14 @@
   void StopProcessor();
   void ResetProfiles();
   void LogBuiltins();
+  void CreateEntriesForRuntimeCallStats();
 
   Isolate* const isolate_;
   base::TimeDelta sampling_interval_;
   std::unique_ptr<CpuProfilesCollection> profiles_;
   std::unique_ptr<ProfileGenerator> generator_;
   std::unique_ptr<ProfilerEventsProcessor> processor_;
+  std::vector<std::unique_ptr<CodeEntry>> static_entries_;
   bool saved_is_logging_;
   bool is_profiling_;
 
diff --git a/src/profiler/heap-profiler.cc b/src/profiler/heap-profiler.cc
index 2df28a7..938bb12 100644
--- a/src/profiler/heap-profiler.cc
+++ b/src/profiler/heap-profiler.cc
@@ -6,6 +6,7 @@
 
 #include "src/api.h"
 #include "src/debug/debug.h"
+#include "src/heap/heap-inl.h"
 #include "src/profiler/allocation-tracker.h"
 #include "src/profiler/heap-snapshot-generator-inl.h"
 #include "src/profiler/sampling-heap-profiler.h"
@@ -16,9 +17,8 @@
 HeapProfiler::HeapProfiler(Heap* heap)
     : ids_(new HeapObjectsMap(heap)),
       names_(new StringsStorage(heap)),
-      is_tracking_object_moves_(false) {
-}
-
+      is_tracking_object_moves_(false),
+      get_retainer_infos_callback_(nullptr) {}
 
 static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
   delete *snapshot_ptr;
@@ -61,6 +61,19 @@
       class_id, Utils::ToLocal(Handle<Object>(wrapper)));
 }
 
+void HeapProfiler::SetGetRetainerInfosCallback(
+    v8::HeapProfiler::GetRetainerInfosCallback callback) {
+  get_retainer_infos_callback_ = callback;
+}
+
+v8::HeapProfiler::RetainerInfos HeapProfiler::GetRetainerInfos(
+    Isolate* isolate) {
+  v8::HeapProfiler::RetainerInfos infos;
+  if (get_retainer_infos_callback_ != nullptr)
+    infos =
+        get_retainer_infos_callback_(reinterpret_cast<v8::Isolate*>(isolate));
+  return infos;
+}
 
 HeapSnapshot* HeapProfiler::TakeSnapshot(
     v8::ActivityControl* control,
diff --git a/src/profiler/heap-profiler.h b/src/profiler/heap-profiler.h
index 3e1dcb5..a10cb92 100644
--- a/src/profiler/heap-profiler.h
+++ b/src/profiler/heap-profiler.h
@@ -66,6 +66,11 @@
                                                       Object** wrapper);
   void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
 
+  void SetGetRetainerInfosCallback(
+      v8::HeapProfiler::GetRetainerInfosCallback callback);
+
+  v8::HeapProfiler::RetainerInfos GetRetainerInfos(Isolate* isolate);
+
   bool is_tracking_object_moves() const { return is_tracking_object_moves_; }
   bool is_tracking_allocations() const { return !!allocation_tracker_; }
 
@@ -86,6 +91,7 @@
   bool is_tracking_object_moves_;
   base::Mutex profiler_mutex_;
   std::unique_ptr<SamplingHeapProfiler> sampling_heap_profiler_;
+  v8::HeapProfiler::GetRetainerInfosCallback get_retainer_infos_callback_;
 
   DISALLOW_COPY_AND_ASSIGN(HeapProfiler);
 };
diff --git a/src/profiler/heap-snapshot-generator-inl.h b/src/profiler/heap-snapshot-generator-inl.h
index 169ab56..eeb212a 100644
--- a/src/profiler/heap-snapshot-generator-inl.h
+++ b/src/profiler/heap-snapshot-generator-inl.h
@@ -38,13 +38,17 @@
   return next_index;
 }
 
-
-HeapGraphEdge** HeapEntry::children_arr() {
+std::deque<HeapGraphEdge*>::iterator HeapEntry::children_begin() {
   DCHECK(children_index_ >= 0);
-  SLOW_DCHECK(children_index_ < snapshot_->children().length() ||
-      (children_index_ == snapshot_->children().length() &&
+  SLOW_DCHECK(
+      children_index_ < static_cast<int>(snapshot_->children().size()) ||
+      (children_index_ == static_cast<int>(snapshot_->children().size()) &&
        children_count_ == 0));
-  return &snapshot_->children().first() + children_index_;
+  return snapshot_->children().begin() + children_index_;
+}
+
+std::deque<HeapGraphEdge*>::iterator HeapEntry::children_end() {
+  return children_begin() + children_count_;
 }
 
 
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index 2fd682e..b7b97a8 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -4,13 +4,20 @@
 
 #include "src/profiler/heap-snapshot-generator.h"
 
+#include <utility>
+
+#include "src/api.h"
 #include "src/code-stubs.h"
 #include "src/conversions.h"
 #include "src/debug/debug.h"
+#include "src/layout-descriptor.h"
 #include "src/objects-body-descriptors.h"
+#include "src/objects-inl.h"
 #include "src/profiler/allocation-tracker.h"
 #include "src/profiler/heap-profiler.h"
 #include "src/profiler/heap-snapshot-generator-inl.h"
+#include "src/prototype.h"
+#include "src/transitions.h"
 
 namespace v8 {
 namespace internal {
@@ -63,7 +70,7 @@
                                   const char* name,
                                   HeapEntry* entry) {
   HeapGraphEdge edge(type, name, this->index(), entry->index());
-  snapshot_->edges().Add(edge);
+  snapshot_->edges().push_back(edge);
   ++children_count_;
 }
 
@@ -72,7 +79,7 @@
                                     int index,
                                     HeapEntry* entry) {
   HeapGraphEdge edge(type, index, this->index(), entry->index());
-  snapshot_->edges().Add(edge);
+  snapshot_->edges().push_back(edge);
   ++children_count_;
 }
 
@@ -97,9 +104,8 @@
     base::OS::Print("\"\n");
   }
   if (--max_depth == 0) return;
-  Vector<HeapGraphEdge*> ch = children();
-  for (int i = 0; i < ch.length(); ++i) {
-    HeapGraphEdge& edge = *ch[i];
+  for (auto i = children_begin(); i != children_end(); ++i) {
+    HeapGraphEdge& edge = **i;
     const char* edge_prefix = "";
     EmbeddedVector<char, 64> index;
     const char* edge_name = index.start();
@@ -153,7 +159,6 @@
     case kConsString: return "/concatenated string/";
     case kSlicedString: return "/sliced string/";
     case kSymbol: return "/symbol/";
-    case kSimdValue: return "/simd/";
     default: return "???";
   }
 }
@@ -270,15 +275,15 @@
 
 
 void HeapSnapshot::FillChildren() {
-  DCHECK(children().is_empty());
-  children().Allocate(edges().length());
+  DCHECK(children().empty());
+  children().resize(edges().size());
   int children_index = 0;
   for (int i = 0; i < entries().length(); ++i) {
     HeapEntry* entry = &entries()[i];
     children_index = entry->set_children_index(children_index);
   }
-  DCHECK(edges().length() == children_index);
-  for (int i = 0; i < edges().length(); ++i) {
+  DCHECK_EQ(edges().size(), static_cast<size_t>(children_index));
+  for (size_t i = 0; i < edges().size(); ++i) {
     HeapGraphEdge* edge = &edges()[i];
     edge->ReplaceToIndexWithEntry(this);
     edge->from()->add_child(edge);
@@ -335,12 +340,10 @@
 
 
 size_t HeapSnapshot::RawSnapshotSize() const {
-  return
-      sizeof(*this) +
-      GetMemoryUsedByList(entries_) +
-      GetMemoryUsedByList(edges_) +
-      GetMemoryUsedByList(children_) +
-      GetMemoryUsedByList(sorted_entries_);
+  return sizeof(*this) + GetMemoryUsedByList(entries_) +
+         edges_.size() * sizeof(decltype(edges_)::value_type) +
+         children_.size() * sizeof(decltype(children_)::value_type) +
+         GetMemoryUsedByList(sorted_entries_);
 }
 
 
@@ -839,8 +842,6 @@
     return AddEntry(object, HeapEntry::kArray, "");
   } else if (object->IsHeapNumber()) {
     return AddEntry(object, HeapEntry::kHeapNumber, "number");
-  } else if (object->IsSimd128Value()) {
-    return AddEntry(object, HeapEntry::kSimdValue, "simd");
   }
   return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
 }
@@ -1035,8 +1036,6 @@
     ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
   } else if (obj->IsCode()) {
     ExtractCodeReferences(entry, Code::cast(obj));
-  } else if (obj->IsBox()) {
-    ExtractBoxReferences(entry, Box::cast(obj));
   } else if (obj->IsCell()) {
     ExtractCellReferences(entry, Cell::cast(obj));
   } else if (obj->IsWeakCell()) {
@@ -1113,9 +1112,11 @@
       }
     }
     SharedFunctionInfo* shared_info = js_fun->shared();
-    TagObject(js_fun->literals(), "(function literals)");
-    SetInternalReference(js_fun, entry, "literals", js_fun->literals(),
-                         JSFunction::kLiteralsOffset);
+    TagObject(js_fun->feedback_vector_cell(),
+              "(function feedback vector cell)");
+    SetInternalReference(js_fun, entry, "feedback_vector_cell",
+                         js_fun->feedback_vector_cell(),
+                         JSFunction::kFeedbackVectorOffset);
     TagObject(shared_info, "(shared function info)");
     SetInternalReference(js_fun, entry,
                          "shared", shared_info,
@@ -1168,6 +1169,10 @@
     SlicedString* ss = SlicedString::cast(string);
     SetInternalReference(ss, entry, "parent", ss->parent(),
                          SlicedString::kParentOffset);
+  } else if (string->IsThinString()) {
+    ThinString* ts = ThinString::cast(string);
+    SetInternalReference(ts, entry, "actual", ts->actual(),
+                         ThinString::kActualOffset);
   }
 }
 
@@ -1450,10 +1455,6 @@
                        Code::kGCMetadataOffset);
 }
 
-void V8HeapExplorer::ExtractBoxReferences(int entry, Box* box) {
-  SetInternalReference(box, entry, "value", box->value(), Box::kValueOffset);
-}
-
 void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
   SetInternalReference(cell, entry, "value", cell->value(), Cell::kValueOffset);
 }
@@ -1822,7 +1823,6 @@
          object != heap_->empty_byte_array() &&
          object != heap_->empty_fixed_array() &&
          object != heap_->empty_descriptor_array() &&
-         object != heap_->empty_type_feedback_vector() &&
          object != heap_->fixed_array_map() && object != heap_->cell_map() &&
          object != heap_->global_property_cell_map() &&
          object != heap_->shared_function_info_map() &&
@@ -2286,55 +2286,52 @@
 
 void NativeObjectsExplorer::FillRetainedObjects() {
   if (embedder_queried_) return;
-  Isolate* isolate = isolate_;
-  const GCType major_gc_type = kGCTypeMarkSweepCompact;
-  // Record objects that are joined into ObjectGroups.
-  isolate->heap()->CallGCPrologueCallbacks(
-      major_gc_type, kGCCallbackFlagConstructRetainedObjectInfos);
-  List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
-  for (int i = 0; i < groups->length(); ++i) {
-    ObjectGroup* group = groups->at(i);
-    if (group->info == NULL) continue;
-    List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info);
-    for (size_t j = 0; j < group->length; ++j) {
-      HeapObject* obj = HeapObject::cast(*group->objects[j]);
-      list->Add(obj);
-      in_groups_.Insert(obj);
+  v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate_));
+  v8::HeapProfiler::RetainerInfos infos =
+      snapshot_->profiler()->GetRetainerInfos(isolate_);
+  for (auto& pair : infos.groups) {
+    List<HeapObject*>* list = GetListMaybeDisposeInfo(pair.first);
+    for (auto& persistent : pair.second) {
+      if (persistent->IsEmpty()) continue;
+
+      Handle<Object> object = v8::Utils::OpenHandle(
+          *persistent->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
+      DCHECK(!object.is_null());
+      HeapObject* heap_object = HeapObject::cast(*object);
+      list->Add(heap_object);
+      in_groups_.Insert(heap_object);
     }
-    group->info = NULL;  // Acquire info object ownership.
   }
-  isolate->global_handles()->RemoveObjectGroups();
-  isolate->heap()->CallGCEpilogueCallbacks(major_gc_type, kNoGCCallbackFlags);
+
   // Record objects that are not in ObjectGroups, but have class ID.
   GlobalHandlesExtractor extractor(this);
-  isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
+  isolate_->global_handles()->IterateAllRootsWithClassIds(&extractor);
+
+  edges_ = std::move(infos.edges);
   embedder_queried_ = true;
 }
 
+void NativeObjectsExplorer::FillEdges() {
+  v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate_));
+  // Fill in actual edges found.
+  for (auto& pair : edges_) {
+    if (pair.first->IsEmpty() || pair.second->IsEmpty()) continue;
 
-void NativeObjectsExplorer::FillImplicitReferences() {
-  Isolate* isolate = isolate_;
-  List<ImplicitRefGroup*>* groups =
-      isolate->global_handles()->implicit_ref_groups();
-  for (int i = 0; i < groups->length(); ++i) {
-    ImplicitRefGroup* group = groups->at(i);
-    HeapObject* parent = *group->parent;
+    Handle<Object> parent_object = v8::Utils::OpenHandle(
+        *pair.first->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
+    HeapObject* parent = HeapObject::cast(*parent_object);
     int parent_entry =
         filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
     DCHECK(parent_entry != HeapEntry::kNoEntry);
-    Object*** children = group->children;
-    for (size_t j = 0; j < group->length; ++j) {
-      Object* child = *children[j];
-      HeapEntry* child_entry =
-          filler_->FindOrAddEntry(child, native_entries_allocator_);
-      filler_->SetNamedReference(
-          HeapGraphEdge::kInternal,
-          parent_entry,
-          "native",
-          child_entry);
-    }
+    Handle<Object> child_object = v8::Utils::OpenHandle(
+        *pair.second->Get(reinterpret_cast<v8::Isolate*>(isolate_)));
+    HeapObject* child = HeapObject::cast(*child_object);
+    HeapEntry* child_entry =
+        filler_->FindOrAddEntry(child, native_entries_allocator_);
+    filler_->SetNamedReference(HeapGraphEdge::kInternal, parent_entry, "native",
+                               child_entry);
   }
-  isolate->global_handles()->RemoveImplicitRefGroups();
+  edges_.clear();
 }
 
 List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
@@ -2354,7 +2351,7 @@
     SnapshotFiller* filler) {
   filler_ = filler;
   FillRetainedObjects();
-  FillImplicitReferences();
+  FillEdges();
   if (EstimateObjectsCount() > 0) {
     for (base::HashMap::Entry* p = objects_by_info_.Start(); p != NULL;
          p = objects_by_info_.Next(p)) {
@@ -2491,6 +2488,20 @@
       heap_(heap) {
 }
 
+namespace {
+class NullContextScope {
+ public:
+  explicit NullContextScope(Isolate* isolate)
+      : isolate_(isolate), prev_(isolate->context()) {
+    isolate_->set_context(nullptr);
+  }
+  ~NullContextScope() { isolate_->set_context(prev_); }
+
+ private:
+  Isolate* isolate_;
+  Context* prev_;
+};
+}  //  namespace
 
 bool HeapSnapshotGenerator::GenerateSnapshot() {
   v8_heap_explorer_.TagGlobalObjects();
@@ -2504,6 +2515,8 @@
   heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
                            GarbageCollectionReason::kHeapProfiler);
 
+  NullContextScope null_context_scope(heap_->isolate());
+
 #ifdef VERIFY_HEAP
   Heap* debug_heap = heap_;
   if (FLAG_verify_heap) {
@@ -2797,8 +2810,8 @@
 
 
 void HeapSnapshotJSONSerializer::SerializeEdges() {
-  List<HeapGraphEdge*>& edges = snapshot_->children();
-  for (int i = 0; i < edges.length(); ++i) {
+  std::deque<HeapGraphEdge*>& edges = snapshot_->children();
+  for (size_t i = 0; i < edges.size(); ++i) {
     DCHECK(i == 0 ||
            edges[i - 1]->from()->index() <= edges[i]->from()->index());
     SerializeEdge(edges[i], i == 0);
@@ -2916,7 +2929,7 @@
   writer_->AddString(",\"node_count\":");
   writer_->AddNumber(snapshot_->entries().length());
   writer_->AddString(",\"edge_count\":");
-  writer_->AddNumber(snapshot_->edges().length());
+  writer_->AddNumber(static_cast<double>(snapshot_->edges().size()));
   writer_->AddString(",\"trace_function_count\":");
   uint32_t count = 0;
   AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
diff --git a/src/profiler/heap-snapshot-generator.h b/src/profiler/heap-snapshot-generator.h
index b235ff0..022f238 100644
--- a/src/profiler/heap-snapshot-generator.h
+++ b/src/profiler/heap-snapshot-generator.h
@@ -5,6 +5,7 @@
 #ifndef V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
 #define V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
 
+#include <deque>
 #include <unordered_map>
 
 #include "include/v8-profiler.h"
@@ -91,8 +92,7 @@
     kSynthetic = v8::HeapGraphNode::kSynthetic,
     kConsString = v8::HeapGraphNode::kConsString,
     kSlicedString = v8::HeapGraphNode::kSlicedString,
-    kSymbol = v8::HeapGraphNode::kSymbol,
-    kSimdValue = v8::HeapGraphNode::kSimdValue
+    kSymbol = v8::HeapGraphNode::kSymbol
   };
   static const int kNoEntry;
 
@@ -115,10 +115,9 @@
   int children_count() const { return children_count_; }
   INLINE(int set_children_index(int index));
   void add_child(HeapGraphEdge* edge) {
-    children_arr()[children_count_++] = edge;
+    *(children_begin() + children_count_++) = edge;
   }
-  Vector<HeapGraphEdge*> children() {
-    return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
+  HeapGraphEdge* child(int i) { return *(children_begin() + i); }
   INLINE(Isolate* isolate() const);
 
   void SetIndexedReference(
@@ -130,7 +129,8 @@
       const char* prefix, const char* edge_name, int max_depth, int indent);
 
  private:
-  INLINE(HeapGraphEdge** children_arr());
+  INLINE(std::deque<HeapGraphEdge*>::iterator children_begin());
+  INLINE(std::deque<HeapGraphEdge*>::iterator children_end());
   const char* TypeAsString();
 
   unsigned type_: 4;
@@ -163,8 +163,8 @@
     return &entries_[gc_subroot_indexes_[index]];
   }
   List<HeapEntry>& entries() { return entries_; }
-  List<HeapGraphEdge>& edges() { return edges_; }
-  List<HeapGraphEdge*>& children() { return children_; }
+  std::deque<HeapGraphEdge>& edges() { return edges_; }
+  std::deque<HeapGraphEdge*>& children() { return children_; }
   void RememberLastJSObjectId();
   SnapshotObjectId max_snapshot_js_object_id() const {
     return max_snapshot_js_object_id_;
@@ -192,8 +192,8 @@
   int gc_roots_index_;
   int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
   List<HeapEntry> entries_;
-  List<HeapGraphEdge> edges_;
-  List<HeapGraphEdge*> children_;
+  std::deque<HeapGraphEdge> edges_;
+  std::deque<HeapGraphEdge*> children_;
   List<HeapEntry*> sorted_entries_;
   SnapshotObjectId max_snapshot_js_object_id_;
 
@@ -385,7 +385,6 @@
   void ExtractAccessorInfoReferences(int entry, AccessorInfo* accessor_info);
   void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
   void ExtractCodeReferences(int entry, Code* code);
-  void ExtractBoxReferences(int entry, Box* box);
   void ExtractCellReferences(int entry, Cell* cell);
   void ExtractWeakCellReferences(int entry, WeakCell* weak_cell);
   void ExtractPropertyCellReferences(int entry, PropertyCell* cell);
@@ -495,7 +494,7 @@
 
  private:
   void FillRetainedObjects();
-  void FillImplicitReferences();
+  void FillEdges();
   List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
   void SetNativeRootReference(v8::RetainedObjectInfo* info);
   void SetRootNativeRootsReference();
@@ -531,6 +530,7 @@
   HeapEntriesAllocator* native_entries_allocator_;
   // Used during references extraction.
   SnapshotFiller* filler_;
+  v8::HeapProfiler::RetainerEdges edges_;
 
   static HeapThing const kNativesRootObject;
 
diff --git a/src/profiler/profile-generator.cc b/src/profiler/profile-generator.cc
index b647670..742d368 100644
--- a/src/profiler/profile-generator.cc
+++ b/src/profiler/profile-generator.cc
@@ -8,6 +8,7 @@
 #include "src/debug/debug.h"
 #include "src/deoptimizer.h"
 #include "src/global-handles.h"
+#include "src/objects-inl.h"
 #include "src/profiler/cpu-profiler.h"
 #include "src/profiler/profile-generator-inl.h"
 #include "src/tracing/trace-event.h"
@@ -635,9 +636,8 @@
   current_profiles_semaphore_.Signal();
 }
 
-ProfileGenerator::ProfileGenerator(Isolate* isolate,
-                                   CpuProfilesCollection* profiles)
-    : isolate_(isolate), profiles_(profiles) {}
+ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
+    : profiles_(profiles) {}
 
 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
   std::vector<CodeEntry*> entries;
@@ -742,20 +742,7 @@
 }
 
 CodeEntry* ProfileGenerator::FindEntry(void* address) {
-  CodeEntry* entry = code_map_.FindEntry(reinterpret_cast<Address>(address));
-  if (!entry) {
-    RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
-    void* start = reinterpret_cast<void*>(rcs);
-    void* end = reinterpret_cast<void*>(rcs + 1);
-    if (start <= address && address < end) {
-      RuntimeCallCounter* counter =
-          reinterpret_cast<RuntimeCallCounter*>(address);
-      entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name,
-                            CodeEntry::kEmptyNamePrefix, "native V8Runtime");
-      code_map_.AddCode(reinterpret_cast<Address>(address), entry, 1);
-    }
-  }
-  return entry;
+  return code_map_.FindEntry(reinterpret_cast<Address>(address));
 }
 
 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
diff --git a/src/profiler/profile-generator.h b/src/profiler/profile-generator.h
index 1b3cad6..c108fbd 100644
--- a/src/profiler/profile-generator.h
+++ b/src/profiler/profile-generator.h
@@ -368,7 +368,7 @@
 
 class ProfileGenerator {
  public:
-  ProfileGenerator(Isolate* isolate, CpuProfilesCollection* profiles);
+  explicit ProfileGenerator(CpuProfilesCollection* profiles);
 
   void RecordTickSample(const TickSample& sample);
 
@@ -378,7 +378,6 @@
   CodeEntry* FindEntry(void* address);
   CodeEntry* EntryForVMState(StateTag tag);
 
-  Isolate* isolate_;
   CpuProfilesCollection* profiles_;
   CodeMap code_map_;
 
diff --git a/src/profiler/profiler-listener.cc b/src/profiler/profiler-listener.cc
index 640f967..bacfffa 100644
--- a/src/profiler/profiler-listener.cc
+++ b/src/profiler/profiler-listener.cc
@@ -5,6 +5,7 @@
 #include "src/profiler/profiler-listener.h"
 
 #include "src/deoptimizer.h"
+#include "src/objects-inl.h"
 #include "src/profiler/cpu-profiler.h"
 #include "src/profiler/profile-generator-inl.h"
 #include "src/source-position-table.h"
@@ -84,9 +85,9 @@
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
   rec->start = abstract_code->address();
-  Script* script = Script::cast(shared->script());
   JITLineInfoTable* line_table = NULL;
-  if (script) {
+  if (shared->script()->IsScript()) {
+    Script* script = Script::cast(shared->script());
     line_table = new JITLineInfoTable();
     int offset = abstract_code->IsCode() ? Code::kHeaderSize
                                          : BytecodeArray::kHeaderSize;
@@ -269,8 +270,9 @@
       std::vector<CpuProfileDeoptFrame> inlined_frames;
       for (SourcePositionInfo& pos_info : last_position.InliningStack(code)) {
         DCHECK(pos_info.position.ScriptOffset() != kNoSourcePosition);
-        size_t offset = static_cast<size_t>(pos_info.position.ScriptOffset());
+        if (!pos_info.function->script()->IsScript()) continue;
         int script_id = Script::cast(pos_info.function->script())->id();
+        size_t offset = static_cast<size_t>(pos_info.position.ScriptOffset());
         inlined_frames.push_back(CpuProfileDeoptFrame({script_id, offset}));
       }
       if (!inlined_frames.empty() &&
diff --git a/src/profiler/tracing-cpu-profiler.cc b/src/profiler/tracing-cpu-profiler.cc
index 8b31225..a9b84b6 100644
--- a/src/profiler/tracing-cpu-profiler.cc
+++ b/src/profiler/tracing-cpu-profiler.cc
@@ -8,9 +8,6 @@
 #include "src/tracing/trace-event.h"
 #include "src/v8.h"
 
-#define PROFILER_TRACE_CATEGORY_ENABLED(cat) \
-  (*TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT(cat)))
-
 namespace v8 {
 
 std::unique_ptr<TracingCpuProfiler> TracingCpuProfiler::Create(
@@ -25,8 +22,9 @@
 TracingCpuProfilerImpl::TracingCpuProfilerImpl(Isolate* isolate)
     : isolate_(isolate), profiling_enabled_(false) {
   // Make sure tracing system notices profiler categories.
-  PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler");
-  PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler.hires");
+  TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"));
+  TRACE_EVENT_WARMUP_CATEGORY(
+      TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires"));
   V8::GetCurrentPlatform()->AddTraceStateObserver(this);
 }
 
@@ -36,7 +34,10 @@
 }
 
 void TracingCpuProfilerImpl::OnTraceEnabled() {
-  if (!PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler")) return;
+  bool enabled;
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+      TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"), &enabled);
+  if (!enabled) return;
   profiling_enabled_ = true;
   isolate_->RequestInterrupt(
       [](v8::Isolate*, void* data) {
@@ -59,8 +60,10 @@
 void TracingCpuProfilerImpl::StartProfiling() {
   base::LockGuard<base::Mutex> lock(&mutex_);
   if (!profiling_enabled_ || profiler_) return;
-  int sampling_interval_us =
-      PROFILER_TRACE_CATEGORY_ENABLED("v8.cpu_profiler.hires") ? 100 : 1000;
+  bool enabled;
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+      TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires"), &enabled);
+  int sampling_interval_us = enabled ? 100 : 1000;
   profiler_.reset(new CpuProfiler(isolate_));
   profiler_->set_sampling_interval(
       base::TimeDelta::FromMicroseconds(sampling_interval_us));
diff --git a/src/promise-utils.cc b/src/promise-utils.cc
deleted file mode 100644
index 607dbe8..0000000
--- a/src/promise-utils.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/promise-utils.h"
-
-#include "src/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-enum PromiseResolvingFunctionContextSlot {
-  kAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
-  kPromiseSlot,
-  kDebugEventSlot,
-  kPromiseContextLength,
-};
-
-JSObject* PromiseUtils::GetPromise(Handle<Context> context) {
-  return JSObject::cast(context->get(kPromiseSlot));
-}
-
-Object* PromiseUtils::GetDebugEvent(Handle<Context> context) {
-  return context->get(kDebugEventSlot);
-}
-
-bool PromiseUtils::HasAlreadyVisited(Handle<Context> context) {
-  return Smi::cast(context->get(kAlreadyVisitedSlot))->value() != 0;
-}
-
-void PromiseUtils::SetAlreadyVisited(Handle<Context> context) {
-  context->set(kAlreadyVisitedSlot, Smi::FromInt(1));
-}
-
-void PromiseUtils::CreateResolvingFunctions(Isolate* isolate,
-                                            Handle<JSObject> promise,
-                                            Handle<Object> debug_event,
-                                            Handle<JSFunction>* resolve,
-                                            Handle<JSFunction>* reject) {
-  DCHECK(debug_event->IsTrue(isolate) || debug_event->IsFalse(isolate));
-  Handle<Context> context =
-      isolate->factory()->NewPromiseResolvingFunctionContext(
-          kPromiseContextLength);
-  context->set_native_context(*isolate->native_context());
-  // We set the closure to be an empty function, same as native context.
-  context->set_closure(isolate->native_context()->closure());
-  context->set(kAlreadyVisitedSlot, Smi::kZero);
-  context->set(kPromiseSlot, *promise);
-  context->set(kDebugEventSlot, *debug_event);
-
-  Handle<SharedFunctionInfo> resolve_shared_fun(
-      isolate->native_context()->promise_resolve_shared_fun(), isolate);
-  Handle<JSFunction> resolve_fun =
-      isolate->factory()->NewFunctionFromSharedFunctionInfo(
-          isolate->sloppy_function_without_prototype_map(), resolve_shared_fun,
-          isolate->native_context(), TENURED);
-
-  Handle<SharedFunctionInfo> reject_shared_fun(
-      isolate->native_context()->promise_reject_shared_fun(), isolate);
-  Handle<JSFunction> reject_fun =
-      isolate->factory()->NewFunctionFromSharedFunctionInfo(
-          isolate->sloppy_function_without_prototype_map(), reject_shared_fun,
-          isolate->native_context(), TENURED);
-
-  resolve_fun->set_context(*context);
-  reject_fun->set_context(*context);
-
-  *resolve = resolve_fun;
-  *reject = reject_fun;
-}
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/promise-utils.h b/src/promise-utils.h
deleted file mode 100644
index 6ed6fcd..0000000
--- a/src/promise-utils.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PROMISE_UTILS_H_
-#define V8_PROMISE_UTILS_H_
-
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-
-// Helper methods for Promise builtins.
-class PromiseUtils : public AllStatic {
- public:
-  // These get and set the slots on the PromiseResolvingContext, which
-  // is used by the resolve/reject promise callbacks.
-  static JSObject* GetPromise(Handle<Context> context);
-  static Object* GetDebugEvent(Handle<Context> context);
-  static bool HasAlreadyVisited(Handle<Context> context);
-  static void SetAlreadyVisited(Handle<Context> context);
-
-  static void CreateResolvingFunctions(Isolate* isolate,
-                                       Handle<JSObject> promise,
-                                       Handle<Object> debug_event,
-                                       Handle<JSFunction>* resolve,
-                                       Handle<JSFunction>* reject);
-};
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_PROMISE_UTILS_H_
diff --git a/src/property-descriptor.cc b/src/property-descriptor.cc
index f22a263..70ddd5d 100644
--- a/src/property-descriptor.cc
+++ b/src/property-descriptor.cc
@@ -61,19 +61,26 @@
     PropertyDetails details = descs->GetDetails(i);
     Name* key = descs->GetKey(i);
     Handle<Object> value;
-    switch (details.type()) {
-      case DATA:
+    if (details.location() == kField) {
+      if (details.kind() == kData) {
         value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
                                          details.representation(),
                                          FieldIndex::ForDescriptor(map, i));
-        break;
-      case DATA_CONSTANT:
-        value = handle(descs->GetConstant(i), isolate);
-        break;
-      case ACCESSOR:
-      case ACCESSOR_CONSTANT:
+      } else {
+        DCHECK_EQ(kAccessor, details.kind());
         // Bail out to slow path.
         return false;
+      }
+
+    } else {
+      DCHECK_EQ(kDescriptor, details.location());
+      if (details.kind() == kData) {
+        value = handle(descs->GetValue(i), isolate);
+      } else {
+        DCHECK_EQ(kAccessor, details.kind());
+        // Bail out to slow path.
+        return false;
+      }
     }
     Heap* heap = isolate->heap();
     if (key == heap->enumerable_string()) {
diff --git a/src/property-details.h b/src/property-details.h
index d720b1c..6e9184d 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -7,6 +7,8 @@
 
 #include "include/v8.h"
 #include "src/allocation.h"
+// TODO(ishell): remove once FLAG_track_constant_fields is removed.
+#include "src/flags.h"
 #include "src/utils.h"
 
 namespace v8 {
@@ -64,27 +66,21 @@
 class Smi;
 class TypeInfo;
 
-// Type of properties.
 // Order of kinds is significant.
 // Must fit in the BitField PropertyDetails::KindField.
 enum PropertyKind { kData = 0, kAccessor = 1 };
 
-
 // Order of modes is significant.
-// Must fit in the BitField PropertyDetails::StoreModeField.
+// Must fit in the BitField PropertyDetails::LocationField.
 enum PropertyLocation { kField = 0, kDescriptor = 1 };
 
+// Order of modes is significant.
+// Must fit in the BitField PropertyDetails::ConstnessField.
+enum PropertyConstness { kMutable = 0, kConst = 1 };
 
-// Order of properties is significant.
-// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in debug/mirrors.js.
-enum PropertyType {
-  DATA = (kField << 1) | kData,
-  DATA_CONSTANT = (kDescriptor << 1) | kData,
-  ACCESSOR = (kField << 1) | kAccessor,
-  ACCESSOR_CONSTANT = (kDescriptor << 1) | kAccessor
-};
-
+// TODO(ishell): remove once constant field tracking is done.
+const PropertyConstness kDefaultFieldConstness =
+    FLAG_track_constant_fields ? kConst : kMutable;
 
 class Representation {
  public:
@@ -234,38 +230,29 @@
 // They are used both in property dictionaries and instance descriptors.
 class PropertyDetails BASE_EMBEDDED {
  public:
-  PropertyDetails(PropertyAttributes attributes, PropertyType type, int index,
+  // Property details for dictionary mode properties/elements.
+  PropertyDetails(PropertyKind kind, PropertyAttributes attributes, int index,
                   PropertyCellType cell_type) {
-    value_ = TypeField::encode(type) | AttributesField::encode(attributes) |
+    value_ = KindField::encode(kind) | LocationField::encode(kField) |
+             AttributesField::encode(attributes) |
              DictionaryStorageField::encode(index) |
              PropertyCellTypeField::encode(cell_type);
-
-    DCHECK(type == this->type());
-    DCHECK(attributes == this->attributes());
   }
 
-  PropertyDetails(PropertyAttributes attributes,
-                  PropertyType type,
-                  Representation representation,
-                  int field_index = 0) {
-    value_ = TypeField::encode(type)
-        | AttributesField::encode(attributes)
-        | RepresentationField::encode(EncodeRepresentation(representation))
-        | FieldIndexField::encode(field_index);
-  }
-
-  PropertyDetails(PropertyAttributes attributes, PropertyKind kind,
-                  PropertyLocation location, Representation representation,
-                  int field_index = 0) {
-    value_ = KindField::encode(kind) | LocationField::encode(location) |
-             AttributesField::encode(attributes) |
+  // Property details for fast mode properties.
+  PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
+                  PropertyLocation location, PropertyConstness constness,
+                  Representation representation, int field_index = 0) {
+    value_ = KindField::encode(kind) | AttributesField::encode(attributes) |
+             LocationField::encode(location) |
+             ConstnessField::encode(constness) |
              RepresentationField::encode(EncodeRepresentation(representation)) |
              FieldIndexField::encode(field_index);
   }
 
   static PropertyDetails Empty(
       PropertyCellType cell_type = PropertyCellType::kNoCell) {
-    return PropertyDetails(NONE, DATA, 0, cell_type);
+    return PropertyDetails(kData, NONE, 0, cell_type);
   }
 
   int pointer() const { return DescriptorPointer::decode(value_); }
@@ -289,6 +276,9 @@
   PropertyDetails CopyWithRepresentation(Representation representation) const {
     return PropertyDetails(value_, representation);
   }
+  PropertyDetails CopyWithConstness(PropertyConstness constness) const {
+    return PropertyDetails(value_, constness);
+  }
   PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) const {
     new_attributes =
         static_cast<PropertyAttributes>(attributes() | new_attributes);
@@ -309,8 +299,7 @@
 
   PropertyKind kind() const { return KindField::decode(value_); }
   PropertyLocation location() const { return LocationField::decode(value_); }
-
-  PropertyType type() const { return TypeField::decode(value_); }
+  PropertyConstness constness() const { return ConstnessField::decode(value_); }
 
   PropertyAttributes attributes() const {
     return AttributesField::decode(value_);
@@ -343,28 +332,30 @@
   // Bit fields in value_ (type, shift, size). Must be public so the
   // constants can be embedded in generated code.
   class KindField : public BitField<PropertyKind, 0, 1> {};
-  class LocationField : public BitField<PropertyLocation, 1, 1> {};
-  class AttributesField : public BitField<PropertyAttributes, 2, 3> {};
+  class LocationField : public BitField<PropertyLocation, KindField::kNext, 1> {
+  };
+  class ConstnessField
+      : public BitField<PropertyConstness, LocationField::kNext, 1> {};
+  class AttributesField
+      : public BitField<PropertyAttributes, ConstnessField::kNext, 3> {};
   static const int kAttributesReadOnlyMask =
       (READ_ONLY << AttributesField::kShift);
 
   // Bit fields for normalized objects.
-  class PropertyCellTypeField : public BitField<PropertyCellType, 5, 2> {};
-  class DictionaryStorageField : public BitField<uint32_t, 7, 24> {};
+  class PropertyCellTypeField
+      : public BitField<PropertyCellType, AttributesField::kNext, 2> {};
+  class DictionaryStorageField
+      : public BitField<uint32_t, PropertyCellTypeField::kNext, 23> {};
 
   // Bit fields for fast objects.
-  class RepresentationField : public BitField<uint32_t, 5, 4> {};
+  class RepresentationField
+      : public BitField<uint32_t, AttributesField::kNext, 4> {};
   class DescriptorPointer
-      : public BitField<uint32_t, 9, kDescriptorIndexBitCount> {};  // NOLINT
-  class FieldIndexField
-      : public BitField<uint32_t, 9 + kDescriptorIndexBitCount,
+      : public BitField<uint32_t, RepresentationField::kNext,
                         kDescriptorIndexBitCount> {};  // NOLINT
-
-  // NOTE: TypeField overlaps with KindField and LocationField.
-  class TypeField : public BitField<PropertyType, 0, 2> {};
-  STATIC_ASSERT(KindField::kNext == LocationField::kShift);
-  STATIC_ASSERT(TypeField::kShift == KindField::kShift);
-  STATIC_ASSERT(TypeField::kNext == LocationField::kNext);
+  class FieldIndexField : public BitField<uint32_t, DescriptorPointer::kNext,
+                                          kDescriptorIndexBitCount> {
+  };  // NOLINT
 
   // All bits for both fast and slow objects must fit in a smi.
   STATIC_ASSERT(DictionaryStorageField::kNext <= 31);
@@ -377,6 +368,19 @@
   void Print(bool dictionary_mode);
 #endif
 
+  enum PrintMode {
+    kPrintAttributes = 1 << 0,
+    kPrintFieldIndex = 1 << 1,
+    kPrintRepresentation = 1 << 2,
+    kPrintPointer = 1 << 3,
+
+    kForProperties = kPrintFieldIndex,
+    kForTransitions = kPrintAttributes,
+    kPrintFull = -1,
+  };
+  void PrintAsSlowTo(std::ostream& out);
+  void PrintAsFastTo(std::ostream& out, PrintMode mode = kPrintFull);
+
  private:
   PropertyDetails(int value, int pointer) {
     value_ = DescriptorPointer::update(value, pointer);
@@ -385,6 +389,9 @@
     value_ = RepresentationField::update(
         value, EncodeRepresentation(representation));
   }
+  PropertyDetails(int value, PropertyConstness constness) {
+    value_ = ConstnessField::update(value, constness);
+  }
   PropertyDetails(int value, PropertyAttributes attributes) {
     value_ = AttributesField::update(value, attributes);
   }
@@ -392,10 +399,25 @@
   uint32_t value_;
 };
 
+// kField location is more general than kDescriptor, kDescriptor generalizes
+// only to itself.
+inline bool IsGeneralizableTo(PropertyLocation a, PropertyLocation b) {
+  return b == kField || a == kDescriptor;
+}
+
+// kMutable constness is more general than kConst, kConst generalizes only to
+// itself.
+inline bool IsGeneralizableTo(PropertyConstness a, PropertyConstness b) {
+  return b == kMutable || a == kConst;
+}
+
+inline PropertyConstness GeneralizeConstness(PropertyConstness a,
+                                             PropertyConstness b) {
+  return a == kMutable ? kMutable : b;
+}
 
 std::ostream& operator<<(std::ostream& os,
                          const PropertyAttributes& attributes);
-std::ostream& operator<<(std::ostream& os, const PropertyDetails& details);
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/property.cc b/src/property.cc
index a4e0d67..339076d 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -6,6 +6,7 @@
 
 #include "src/field-type.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 #include "src/ostreams.h"
 
 namespace v8 {
@@ -21,72 +22,83 @@
   return os;
 }
 
-DataDescriptor::DataDescriptor(Handle<Name> key, int field_index,
-                               PropertyAttributes attributes,
-                               Representation representation)
-    : Descriptor(key, FieldType::Any(key->GetIsolate()), attributes, DATA,
-                 representation, field_index) {}
+Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
+                                 PropertyAttributes attributes,
+                                 Representation representation) {
+  return DataField(key, field_index, attributes, kMutable, representation,
+                   FieldType::Any(key->GetIsolate()));
+}
 
-struct FastPropertyDetails {
-  explicit FastPropertyDetails(const PropertyDetails& v) : details(v) {}
-  const PropertyDetails details;
-};
+Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
+                                 PropertyAttributes attributes,
+                                 PropertyConstness constness,
+                                 Representation representation,
+                                 Handle<Object> wrapped_field_type) {
+  DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakCell());
+  PropertyDetails details(kData, attributes, kField, constness, representation,
+                          field_index);
+  return Descriptor(key, wrapped_field_type, details);
+}
 
+Descriptor Descriptor::DataConstant(Handle<Name> key, int field_index,
+                                    Handle<Object> value,
+                                    PropertyAttributes attributes) {
+  if (FLAG_track_constant_fields) {
+    Handle<Object> any_type(FieldType::Any(), key->GetIsolate());
+    return DataField(key, field_index, attributes, kConst,
+                     Representation::Tagged(), any_type);
+
+  } else {
+    return Descriptor(key, value, kData, attributes, kDescriptor, kConst,
+                      value->OptimalRepresentation(), field_index);
+  }
+}
 
 // Outputs PropertyDetails as a dictionary details.
-std::ostream& operator<<(std::ostream& os, const PropertyDetails& details) {
+void PropertyDetails::PrintAsSlowTo(std::ostream& os) {
   os << "(";
-  if (details.location() == kDescriptor) {
-    os << "immutable ";
-  }
-  os << (details.kind() == kData ? "data" : "accessor");
-  return os << ", dictionary_index: " << details.dictionary_index()
-            << ", attrs: " << details.attributes() << ")";
+  if (constness() == kConst) os << "const ";
+  os << (kind() == kData ? "data" : "accessor");
+  os << ", dictionary_index: " << dictionary_index();
+  os << ", attrs: " << attributes() << ")";
 }
 
-
 // Outputs PropertyDetails as a descriptor array details.
-std::ostream& operator<<(std::ostream& os,
-                         const FastPropertyDetails& details_fast) {
-  const PropertyDetails& details = details_fast.details;
+void PropertyDetails::PrintAsFastTo(std::ostream& os, PrintMode mode) {
   os << "(";
-  if (details.location() == kDescriptor) {
-    os << "immutable ";
+  if (constness() == kConst) os << "const ";
+  os << (kind() == kData ? "data" : "accessor");
+  if (location() == kField) {
+    os << " field";
+    if (mode & kPrintFieldIndex) {
+      os << " " << field_index();
+    }
+    if (mode & kPrintRepresentation) {
+      os << ":" << representation().Mnemonic();
+    }
+  } else {
+    os << " descriptor";
   }
-  os << (details.kind() == kData ? "data" : "accessor");
-  os << ": " << details.representation().Mnemonic();
-  if (details.location() == kField) {
-    os << ", field_index: " << details.field_index();
+  if (mode & kPrintPointer) {
+    os << ", p: " << pointer();
   }
-  return os << ", p: " << details.pointer()
-            << ", attrs: " << details.attributes() << ")";
+  if (mode & kPrintAttributes) {
+    os << ", attrs: " << attributes();
+  }
+  os << ")";
 }
 
-
 #ifdef OBJECT_PRINT
 void PropertyDetails::Print(bool dictionary_mode) {
   OFStream os(stdout);
   if (dictionary_mode) {
-    os << *this;
+    PrintAsSlowTo(os);
   } else {
-    os << FastPropertyDetails(*this);
+    PrintAsFastTo(os, PrintMode::kPrintFull);
   }
   os << "\n" << std::flush;
 }
 #endif
 
-
-std::ostream& operator<<(std::ostream& os, const Descriptor& d) {
-  Object* value = *d.GetValue();
-  os << "Descriptor " << Brief(*d.GetKey()) << " @ " << Brief(value) << " ";
-  if (value->IsAccessorPair()) {
-    AccessorPair* pair = AccessorPair::cast(value);
-    os << "(get: " << Brief(pair->getter())
-       << ", set: " << Brief(pair->setter()) << ") ";
-  }
-  os << FastPropertyDetails(d.GetDetails());
-  return os;
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/property.h b/src/property.h
index 233233c..ab183d9 100644
--- a/src/property.h
+++ b/src/property.h
@@ -18,22 +18,48 @@
 // Each descriptor has a key, property attributes, property type,
 // property index (in the actual instance-descriptor array) and
 // optionally a piece of data.
-class Descriptor BASE_EMBEDDED {
+class Descriptor final BASE_EMBEDDED {
  public:
+  Descriptor() : details_(Smi::kZero) {}
+
   Handle<Name> GetKey() const { return key_; }
   Handle<Object> GetValue() const { return value_; }
   PropertyDetails GetDetails() const { return details_; }
 
   void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
 
+  static Descriptor DataField(Handle<Name> key, int field_index,
+                              PropertyAttributes attributes,
+                              Representation representation);
+
+  static Descriptor DataField(Handle<Name> key, int field_index,
+                              PropertyAttributes attributes,
+                              PropertyConstness constness,
+                              Representation representation,
+                              Handle<Object> wrapped_field_type);
+
+  static Descriptor DataConstant(Handle<Name> key, Handle<Object> value,
+                                 PropertyAttributes attributes) {
+    return Descriptor(key, value, kData, attributes, kDescriptor, kConst,
+                      value->OptimalRepresentation(), 0);
+  }
+
+  static Descriptor DataConstant(Handle<Name> key, int field_index,
+                                 Handle<Object> value,
+                                 PropertyAttributes attributes);
+
+  static Descriptor AccessorConstant(Handle<Name> key, Handle<Object> foreign,
+                                     PropertyAttributes attributes) {
+    return Descriptor(key, foreign, kAccessor, attributes, kDescriptor, kConst,
+                      Representation::Tagged(), 0);
+  }
+
  private:
   Handle<Name> key_;
   Handle<Object> value_;
   PropertyDetails details_;
 
  protected:
-  Descriptor() : details_(Smi::kZero) {}
-
   void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
     DCHECK(key->IsUniqueName());
     DCHECK_IMPLIES(key->IsPrivate(), !details.IsEnumerable());
@@ -48,57 +74,23 @@
     DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
   }
 
-  Descriptor(Handle<Name> key, Handle<Object> value,
-             PropertyAttributes attributes, PropertyType type,
-             Representation representation, int field_index = 0)
+  Descriptor(Handle<Name> key, Handle<Object> value, PropertyKind kind,
+             PropertyAttributes attributes, PropertyLocation location,
+             PropertyConstness constness, Representation representation,
+             int field_index)
       : key_(key),
         value_(value),
-        details_(attributes, type, representation, field_index) {
+        details_(kind, attributes, location, constness, representation,
+                 field_index) {
     DCHECK(key->IsUniqueName());
     DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
   }
 
   friend class DescriptorArray;
   friend class Map;
+  friend class MapUpdater;
 };
 
-
-std::ostream& operator<<(std::ostream& os, const Descriptor& d);
-
-
-class DataDescriptor final : public Descriptor {
- public:
-  DataDescriptor(Handle<Name> key, int field_index,
-                 PropertyAttributes attributes, Representation representation);
-  // The field type is either a simple type or a map wrapped in a weak cell.
-  DataDescriptor(Handle<Name> key, int field_index,
-                 Handle<Object> wrapped_field_type,
-                 PropertyAttributes attributes, Representation representation)
-      : Descriptor(key, wrapped_field_type, attributes, DATA, representation,
-                   field_index) {
-    DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakCell());
-  }
-};
-
-
-class DataConstantDescriptor final : public Descriptor {
- public:
-  DataConstantDescriptor(Handle<Name> key, Handle<Object> value,
-                         PropertyAttributes attributes)
-      : Descriptor(key, value, attributes, DATA_CONSTANT,
-                   value->OptimalRepresentation()) {}
-};
-
-
-class AccessorConstantDescriptor final : public Descriptor {
- public:
-  AccessorConstantDescriptor(Handle<Name> key, Handle<Object> foreign,
-                             PropertyAttributes attributes)
-      : Descriptor(key, foreign, attributes, ACCESSOR_CONSTANT,
-                   Representation::Tagged()) {}
-};
-
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/prototype.h b/src/prototype.h
index 38d6cab..3d973db 100644
--- a/src/prototype.h
+++ b/src/prototype.h
@@ -161,8 +161,7 @@
     // we visit to an arbitrarily chosen large number.
     seen_proxies_++;
     if (seen_proxies_ > kProxyPrototypeLimit) {
-      isolate_->Throw(
-          *isolate_->factory()->NewRangeError(MessageTemplate::kStackOverflow));
+      isolate_->StackOverflow();
       return false;
     }
     MaybeHandle<Object> proto =
@@ -174,6 +173,7 @@
   }
 
   bool IsAtEnd() const { return is_at_end_; }
+  Isolate* isolate() const { return isolate_; }
 
  private:
   Isolate* isolate_;
diff --git a/src/regexp/interpreter-irregexp.cc b/src/regexp/interpreter-irregexp.cc
index 14834d5..4f8f96a 100644
--- a/src/regexp/interpreter-irregexp.cc
+++ b/src/regexp/interpreter-irregexp.cc
@@ -9,6 +9,7 @@
 #include "src/regexp/interpreter-irregexp.h"
 
 #include "src/ast/ast.h"
+#include "src/objects-inl.h"
 #include "src/regexp/bytecodes-irregexp.h"
 #include "src/regexp/jsregexp.h"
 #include "src/regexp/regexp-macro-assembler.h"
diff --git a/src/regexp/jsregexp-inl.h b/src/regexp/jsregexp-inl.h
index ca7a9fe..4bcda43 100644
--- a/src/regexp/jsregexp-inl.h
+++ b/src/regexp/jsregexp-inl.h
@@ -7,7 +7,6 @@
 #define V8_REGEXP_JSREGEXP_INL_H_
 
 #include "src/allocation.h"
-#include "src/handles.h"
 #include "src/heap/heap.h"
 #include "src/objects.h"
 #include "src/regexp/jsregexp.h"
diff --git a/src/regexp/jsregexp.cc b/src/regexp/jsregexp.cc
index f0abc9a..0ed3086 100644
--- a/src/regexp/jsregexp.cc
+++ b/src/regexp/jsregexp.cc
@@ -27,7 +27,7 @@
 #include "src/unicode-decoder.h"
 
 #ifdef V8_I18N_SUPPORT
-#include "unicode/uset.h"
+#include "unicode/uniset.h"
 #include "unicode/utypes.h"
 #endif  // V8_I18N_SUPPORT
 
@@ -451,7 +451,7 @@
 
 int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
                                 Handle<String> subject) {
-  subject = String::Flatten(subject);
+  DCHECK(subject->IsFlat());
 
   // Check representation of the underlying storage.
   bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
@@ -565,6 +565,8 @@
   Isolate* isolate = regexp->GetIsolate();
   DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
 
+  subject = String::Flatten(subject);
+
   // Prepare space for the return values.
 #if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
   if (FLAG_trace_regexp_bytecodes) {
@@ -1104,12 +1106,11 @@
     RegExpNode* start,
     int capture_count,
     Handle<String> pattern) {
-  Heap* heap = pattern->GetHeap();
+  Isolate* isolate = pattern->GetHeap()->isolate();
 
 #ifdef DEBUG
   if (FLAG_trace_regexp_assembler)
-    macro_assembler_ =
-        new RegExpMacroAssemblerTracer(isolate(), macro_assembler);
+    macro_assembler_ = new RegExpMacroAssemblerTracer(isolate, macro_assembler);
   else
 #endif
     macro_assembler_ = macro_assembler;
@@ -1133,11 +1134,11 @@
   }
 
   Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
-  heap->IncreaseTotalRegexpCodeGenerated(code->Size());
+  isolate->IncreaseTotalRegexpCodeGenerated(code->Size());
   work_list_ = NULL;
 #ifdef ENABLE_DISASSEMBLER
   if (FLAG_print_code) {
-    CodeTracer::Scope trace_scope(heap->isolate()->GetCodeTracer());
+    CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
     OFStream os(trace_scope.file());
     Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
   }
@@ -5114,30 +5115,22 @@
   // Use ICU to compute the case fold closure over the ranges.
   DCHECK(compiler->unicode());
   DCHECK(compiler->ignore_case());
-  USet* set = uset_openEmpty();
+  icu::UnicodeSet set;
   for (int i = 0; i < ranges->length(); i++) {
-    uset_addRange(set, ranges->at(i).from(), ranges->at(i).to());
+    set.add(ranges->at(i).from(), ranges->at(i).to());
   }
   ranges->Clear();
-  uset_closeOver(set, USET_CASE_INSENSITIVE);
+  set.closeOver(USET_CASE_INSENSITIVE);
   // Full case mapping map single characters to multiple characters.
   // Those are represented as strings in the set. Remove them so that
   // we end up with only simple and common case mappings.
-  uset_removeAllStrings(set);
-  int item_count = uset_getItemCount(set);
-  int item_result = 0;
-  UErrorCode ec = U_ZERO_ERROR;
+  set.removeAllStrings();
   Zone* zone = compiler->zone();
-  for (int i = 0; i < item_count; i++) {
-    uc32 start = 0;
-    uc32 end = 0;
-    item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
-    ranges->Add(CharacterRange::Range(start, end), zone);
+  for (int i = 0; i < set.getRangeCount(); i++) {
+    ranges->Add(CharacterRange::Range(set.getRangeStart(i), set.getRangeEnd(i)),
+                zone);
   }
   // No errors and everything we collected have been ranges.
-  DCHECK_EQ(U_ZERO_ERROR, ec);
-  DCHECK_EQ(0, item_result);
-  uset_close(set);
 #else
   // Fallback if ICU is not included.
   CharacterRange::AddCaseEquivalents(compiler->isolate(), compiler->zone(),
@@ -6742,8 +6735,7 @@
   // Inserted here, instead of in Assembler, because it depends on information
   // in the AST that isn't replicated in the Node structure.
   static const int kMaxBacksearchLimit = 1024;
-  if (is_end_anchored &&
-      !is_start_anchored &&
+  if (is_end_anchored && !is_start_anchored && !is_sticky &&
       max_length < kMaxBacksearchLimit) {
     macro_assembler.SetCurrentPositionFromEnd(max_length);
   }
@@ -6768,8 +6760,9 @@
 bool RegExpEngine::TooMuchRegExpCode(Handle<String> pattern) {
   Heap* heap = pattern->GetHeap();
   bool too_much = pattern->length() > RegExpImpl::kRegExpTooLargeToOptimize;
-  if (heap->total_regexp_code_generated() > RegExpImpl::kRegExpCompiledLimit &&
-      heap->memory_allocator()->SizeExecutable() >
+  if (heap->isolate()->total_regexp_code_generated() >
+          RegExpImpl::kRegExpCompiledLimit &&
+      heap->CommittedMemoryExecutable() >
           RegExpImpl::kRegExpExecutableMemoryLimit) {
     too_much = true;
   }
diff --git a/src/regexp/jsregexp.h b/src/regexp/jsregexp.h
index b2e84ba..77d61ae 100644
--- a/src/regexp/jsregexp.h
+++ b/src/regexp/jsregexp.h
@@ -158,7 +158,7 @@
   // total regexp code compiled including code that has subsequently been freed
   // and the total executable memory at any point.
   static const size_t kRegExpExecutableMemoryLimit = 16 * MB;
-  static const int kRegExpCompiledLimit = 1 * MB;
+  static const size_t kRegExpCompiledLimit = 1 * MB;
   static const int kRegExpTooLargeToOptimize = 20 * KB;
 
  private:
diff --git a/src/regexp/regexp-ast.cc b/src/regexp/regexp-ast.cc
index b5c2bb6..85babb1 100644
--- a/src/regexp/regexp-ast.cc
+++ b/src/regexp/regexp-ast.cc
@@ -264,6 +264,12 @@
   return NULL;
 }
 
+void* RegExpUnparser::VisitGroup(RegExpGroup* that, void* data) {
+  os_ << "(?: ";
+  that->body()->Accept(this, data);
+  os_ << ")";
+  return NULL;
+}
 
 void* RegExpUnparser::VisitLookaround(RegExpLookaround* that, void* data) {
   os_ << "(";
diff --git a/src/regexp/regexp-ast.h b/src/regexp/regexp-ast.h
index 07a8155..a45d083 100644
--- a/src/regexp/regexp-ast.h
+++ b/src/regexp/regexp-ast.h
@@ -21,12 +21,12 @@
   VISIT(Atom)                             \
   VISIT(Quantifier)                       \
   VISIT(Capture)                          \
+  VISIT(Group)                            \
   VISIT(Lookaround)                       \
   VISIT(BackReference)                    \
   VISIT(Empty)                            \
   VISIT(Text)
 
-
 #define FORWARD_DECLARE(Name) class RegExp##Name;
 FOR_EACH_REG_EXP_TREE_TYPE(FORWARD_DECLARE)
 #undef FORWARD_DECLARE
@@ -440,6 +440,26 @@
   const ZoneVector<uc16>* name_;
 };
 
+class RegExpGroup final : public RegExpTree {
+ public:
+  explicit RegExpGroup(RegExpTree* body) : body_(body) {}
+  void* Accept(RegExpVisitor* visitor, void* data) override;
+  RegExpNode* ToNode(RegExpCompiler* compiler,
+                     RegExpNode* on_success) override {
+    return body_->ToNode(compiler, on_success);
+  }
+  RegExpGroup* AsGroup() override;
+  bool IsAnchoredAtStart() override { return body_->IsAnchoredAtStart(); }
+  bool IsAnchoredAtEnd() override { return body_->IsAnchoredAtEnd(); }
+  bool IsGroup() override;
+  int min_match() override { return body_->min_match(); }
+  int max_match() override { return body_->max_match(); }
+  Interval CaptureRegisters() override { return body_->CaptureRegisters(); }
+  RegExpTree* body() { return body_; }
+
+ private:
+  RegExpTree* body_;
+};
 
 class RegExpLookaround final : public RegExpTree {
  public:
diff --git a/src/regexp/regexp-macro-assembler-irregexp.cc b/src/regexp/regexp-macro-assembler-irregexp.cc
index a0bb5e7..3316c33 100644
--- a/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -7,9 +7,10 @@
 #include "src/regexp/regexp-macro-assembler-irregexp.h"
 
 #include "src/ast/ast.h"
+#include "src/objects-inl.h"
 #include "src/regexp/bytecodes-irregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
+#include "src/regexp/regexp-macro-assembler.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/regexp/regexp-macro-assembler-tracer.cc b/src/regexp/regexp-macro-assembler-tracer.cc
index abdf577..d311a09 100644
--- a/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/src/regexp/regexp-macro-assembler-tracer.cc
@@ -5,6 +5,7 @@
 #include "src/regexp/regexp-macro-assembler-tracer.h"
 
 #include "src/ast/ast.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/regexp/regexp-macro-assembler.cc b/src/regexp/regexp-macro-assembler.cc
index 0a7f5c1..2e3a8a2 100644
--- a/src/regexp/regexp-macro-assembler.cc
+++ b/src/regexp/regexp-macro-assembler.cc
@@ -134,6 +134,9 @@
     start_index += SlicedString::cast(subject)->offset();
     subject = SlicedString::cast(subject)->parent();
   }
+  if (subject->IsThinString()) {
+    subject = ThinString::cast(subject)->actual();
+  }
   DCHECK(start_index >= 0);
   DCHECK(start_index <= subject->length());
   if (subject->IsSeqOneByteString()) {
@@ -146,6 +149,7 @@
     return reinterpret_cast<const byte*>(
         ExternalOneByteString::cast(subject)->GetChars() + start_index);
   } else {
+    DCHECK(subject->IsExternalTwoByteString());
     return reinterpret_cast<const byte*>(
         ExternalTwoByteString::cast(subject)->GetChars() + start_index);
   }
@@ -239,6 +243,9 @@
     subject_ptr = slice->parent();
     slice_offset = slice->offset();
   }
+  if (StringShape(subject_ptr).IsThin()) {
+    subject_ptr = ThinString::cast(subject_ptr)->actual();
+  }
   // Ensure that an underlying string has the same representation.
   bool is_one_byte = subject_ptr->IsOneByteRepresentation();
   DCHECK(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
diff --git a/src/regexp/regexp-parser.cc b/src/regexp/regexp-parser.cc
index fd3123f..3621f7d 100644
--- a/src/regexp/regexp-parser.cc
+++ b/src/regexp/regexp-parser.cc
@@ -13,7 +13,7 @@
 #include "src/utils.h"
 
 #ifdef V8_I18N_SUPPORT
-#include "unicode/uset.h"
+#include "unicode/uniset.h"
 #endif  // V8_I18N_SUPPORT
 
 namespace v8 {
@@ -75,6 +75,7 @@
   if (has_next()) {
     StackLimitCheck check(isolate());
     if (check.HasOverflowed()) {
+      if (FLAG_abort_on_stack_overflow) FATAL("Aborting on stack overflow");
       ReportError(CStrVector(
           MessageTemplate::TemplateString(MessageTemplate::kStackOverflow)));
     } else if (zone()->excess_allocation()) {
@@ -215,7 +216,9 @@
           RegExpCapture* capture = GetCapture(capture_index);
           capture->set_body(body);
           body = capture;
-        } else if (group_type != GROUPING) {
+        } else if (group_type == GROUPING) {
+          body = new (zone()) RegExpGroup(body);
+        } else {
           DCHECK(group_type == POSITIVE_LOOKAROUND ||
                  group_type == NEGATIVE_LOOKAROUND);
           bool is_positive = (group_type == POSITIVE_LOOKAROUND);
@@ -1082,37 +1085,37 @@
 bool LookupPropertyValueName(UProperty property,
                              const char* property_value_name, bool negate,
                              ZoneList<CharacterRange>* result, Zone* zone) {
+  UProperty property_for_lookup = property;
+  if (property_for_lookup == UCHAR_SCRIPT_EXTENSIONS) {
+    // For the property Script_Extensions, we have to do the property value
+    // name lookup as if the property is Script.
+    property_for_lookup = UCHAR_SCRIPT;
+  }
   int32_t property_value =
-      u_getPropertyValueEnum(property, property_value_name);
+      u_getPropertyValueEnum(property_for_lookup, property_value_name);
   if (property_value == UCHAR_INVALID_CODE) return false;
 
   // We require the property name to match exactly to one of the property value
   // aliases. However, u_getPropertyValueEnum uses loose matching.
-  if (!IsExactPropertyValueAlias(property_value_name, property,
+  if (!IsExactPropertyValueAlias(property_value_name, property_for_lookup,
                                  property_value)) {
     return false;
   }
 
-  USet* set = uset_openEmpty();
   UErrorCode ec = U_ZERO_ERROR;
-  uset_applyIntPropertyValue(set, property, property_value, &ec);
-  bool success = ec == U_ZERO_ERROR && !uset_isEmpty(set);
+  icu::UnicodeSet set;
+  set.applyIntPropertyValue(property, property_value, ec);
+  bool success = ec == U_ZERO_ERROR && !set.isEmpty();
 
   if (success) {
-    uset_removeAllStrings(set);
-    if (negate) uset_complement(set);
-    int item_count = uset_getItemCount(set);
-    int item_result = 0;
-    for (int i = 0; i < item_count; i++) {
-      uc32 start = 0;
-      uc32 end = 0;
-      item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
-      result->Add(CharacterRange::Range(start, end), zone);
+    set.removeAllStrings();
+    if (negate) set.complement();
+    for (int i = 0; i < set.getRangeCount(); i++) {
+      result->Add(
+          CharacterRange::Range(set.getRangeStart(i), set.getRangeEnd(i)),
+          zone);
     }
-    DCHECK_EQ(U_ZERO_ERROR, ec);
-    DCHECK_EQ(0, item_result);
   }
-  uset_close(set);
   return success;
 }
 
@@ -1196,9 +1199,14 @@
     const char* property_name = first_part.ToConstVector().start();
     const char* value_name = second_part.ToConstVector().start();
     UProperty property = u_getPropertyEnum(property_name);
-    if (property < UCHAR_INT_START) return false;
-    if (property >= UCHAR_INT_LIMIT) return false;
     if (!IsExactPropertyAlias(property_name, property)) return false;
+    if (property == UCHAR_GENERAL_CATEGORY) {
+      // We want to allow aggregate value names such as "Letter".
+      property = UCHAR_GENERAL_CATEGORY_MASK;
+    } else if (property != UCHAR_SCRIPT &&
+               property != UCHAR_SCRIPT_EXTENSIONS) {
+      return false;
+    }
     return LookupPropertyValueName(property, value_name, negate, result,
                                    zone());
   }
@@ -1720,12 +1728,10 @@
 bool RegExpBuilder::NeedsDesugaringForIgnoreCase(uc32 c) {
 #ifdef V8_I18N_SUPPORT
   if (unicode() && ignore_case()) {
-    USet* set = uset_open(c, c);
-    uset_closeOver(set, USET_CASE_INSENSITIVE);
-    uset_removeAllStrings(set);
-    bool result = uset_size(set) > 1;
-    uset_close(set);
-    return result;
+    icu::UnicodeSet set(c, c);
+    set.closeOver(USET_CASE_INSENSITIVE);
+    set.removeAllStrings();
+    return set.size() > 1;
   }
   // In the case where ICU is not included, we act as if the unicode flag is
   // not set, and do not desugar.
diff --git a/src/regexp/regexp-utils.cc b/src/regexp/regexp-utils.cc
index 62daf3f..570a348 100644
--- a/src/regexp/regexp-utils.cc
+++ b/src/regexp/regexp-utils.cc
@@ -118,12 +118,6 @@
 
   Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
 
-  if (isolate->regexp_function()->initial_map() == receiver->map()) {
-    // Fast-path for unmodified JSRegExp instances.
-    // TODO(ishell): Adapt for new fast-path logic.
-    return Just(true);
-  }
-
   Handle<Object> match;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate, match,
@@ -151,7 +145,14 @@
   if (!proto->IsJSReceiver()) return false;
 
   Handle<Map> initial_proto_initial_map = isolate->regexp_prototype_map();
-  return (JSReceiver::cast(proto)->map() == *initial_proto_initial_map);
+  if (JSReceiver::cast(proto)->map() != *initial_proto_initial_map) {
+    return false;
+  }
+
+  // The smi check is required to omit ToLength(lastIndex) calls with possible
+  // user-code execution on the fast path.
+  Object* last_index = JSRegExp::cast(recv)->LastIndex();
+  return last_index->IsSmi() && Smi::cast(last_index)->value() >= 0;
 }
 
 int RegExpUtils::AdvanceStringIndex(Isolate* isolate, Handle<String> string,
@@ -180,8 +181,7 @@
 
   ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
                              Object::ToLength(isolate, last_index_obj), Object);
-
-  const int last_index = Handle<Smi>::cast(last_index_obj)->value();
+  const int last_index = PositiveNumberToUint32(*last_index_obj);
   const int new_last_index =
       AdvanceStringIndex(isolate, string, last_index, unicode);
 
diff --git a/src/regexp/regexp-utils.h b/src/regexp/regexp-utils.h
index eff1ed7..eb5f85c 100644
--- a/src/regexp/regexp-utils.h
+++ b/src/regexp/regexp-utils.h
@@ -10,6 +10,8 @@
 namespace v8 {
 namespace internal {
 
+class RegExpMatchInfo;
+
 // Helper methods for C++ regexp builtins.
 class RegExpUtils : public AllStatic {
  public:
diff --git a/src/regexp/x64/regexp-macro-assembler-x64.cc b/src/regexp/x64/regexp-macro-assembler-x64.cc
index aafc840..54dc341 100644
--- a/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -6,8 +6,10 @@
 
 #include "src/regexp/x64/regexp-macro-assembler-x64.h"
 
+#include "src/factory.h"
 #include "src/log.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/regexp/regexp-stack.h"
 #include "src/unicode.h"
diff --git a/src/regexp/x87/OWNERS b/src/regexp/x87/OWNERS
index dd9998b..61245ae 100644
--- a/src/regexp/x87/OWNERS
+++ b/src/regexp/x87/OWNERS
@@ -1 +1,2 @@
 weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 0de9e1c..6f9f44e 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -114,7 +114,7 @@
   }
 
   // Harvest vector-ics as well
-  TypeFeedbackVector* vector = function->feedback_vector();
+  FeedbackVector* vector = function->feedback_vector();
   int with = 0, gen = 0, type_vector_ic_count = 0;
   const bool is_interpreted = function->shared()->IsInterpreted();
 
@@ -170,7 +170,7 @@
                                                 int loop_nesting_levels) {
   JSFunction* function = frame->function();
   SharedFunctionInfo* shared = function->shared();
-  if (!FLAG_use_osr || function->shared()->IsBuiltin()) {
+  if (!FLAG_use_osr || !function->shared()->IsUserJavaScript()) {
     return;
   }
 
@@ -408,8 +408,7 @@
     int typeinfo, generic, total, type_percentage, generic_percentage;
     GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
                 &generic_percentage);
-    if (type_percentage >= FLAG_type_info_threshold &&
-        generic_percentage <= FLAG_generic_ic_threshold) {
+    if (type_percentage >= FLAG_type_info_threshold) {
       // If this particular function hasn't had any ICs patched for enough
       // ticks, optimize it now.
       return OptimizationReason::kHotAndStable;
@@ -431,8 +430,7 @@
     int typeinfo, generic, total, type_percentage, generic_percentage;
     GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
                 &generic_percentage);
-    if (type_percentage >= FLAG_type_info_threshold &&
-        generic_percentage <= FLAG_generic_ic_threshold) {
+    if (type_percentage >= FLAG_type_info_threshold) {
       return OptimizationReason::kSmallFunction;
     }
   }
@@ -474,10 +472,10 @@
     // Update shared function info ticks after checking for whether functions
     // should be optimized to keep FCG (which updates ticks on code) and
     // Ignition (which updates ticks on shared function info) in sync.
-    List<JSFunction*> functions(4);
+    List<SharedFunctionInfo*> functions(4);
     frame->GetFunctions(&functions);
     for (int i = functions.length(); --i >= 0;) {
-      SharedFunctionInfo* shared_function_info = functions[i]->shared();
+      SharedFunctionInfo* shared_function_info = functions[i];
       int ticks = shared_function_info->profiler_ticks();
       if (ticks < Smi::kMaxValue) {
         shared_function_info->set_profiler_ticks(ticks + 1);
diff --git a/src/runtime/runtime-array.cc b/src/runtime/runtime-array.cc
index 1a2d957..07c6ad0 100644
--- a/src/runtime/runtime-array.cc
+++ b/src/runtime/runtime-array.cc
@@ -19,7 +19,7 @@
 
 RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0);
   Object* length = prototype->length();
   CHECK(length->IsSmi());
@@ -37,7 +37,7 @@
     BuiltinFunctionId id = static_cast<BuiltinFunctionId>(-1)) {
   Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
   Handle<JSFunction> optimized =
-      isolate->factory()->NewFunctionWithoutPrototype(key, code);
+      isolate->factory()->NewFunctionWithoutPrototype(key, code, true);
   if (argc < 0) {
     optimized->shared()->DontAdaptArguments();
   } else {
@@ -46,6 +46,8 @@
   if (id >= 0) {
     optimized->shared()->set_builtin_function_id(id);
   }
+  optimized->shared()->set_language_mode(STRICT);
+  optimized->shared()->set_native(true);
   JSObject::AddProperty(holder, key, optimized, NONE);
 }
 
@@ -60,17 +62,12 @@
 
 RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   Handle<JSObject> holder =
       isolate->factory()->NewJSObject(isolate->object_function());
 
   InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
-  if (FLAG_minimal) {
-    InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
-  } else {
-    FastArrayPushStub stub(isolate);
-    InstallCode(isolate, holder, "push", stub.GetCode());
-  }
+  InstallBuiltin(isolate, holder, "push", Builtins::kFastArrayPush);
   InstallBuiltin(isolate, holder, "shift", Builtins::kArrayShift);
   InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
   InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
@@ -83,14 +80,12 @@
                  kArrayValues);
   InstallBuiltin(isolate, holder, "entries", Builtins::kArrayPrototypeEntries,
                  0, kArrayEntries);
-
   return *holder;
 }
 
-
 RUNTIME_FUNCTION(Runtime_FixedArrayGet) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_CHECKED(FixedArray, object, 0);
   CONVERT_SMI_ARG_CHECKED(index, 1);
   return object->get(index);
@@ -99,7 +94,7 @@
 
 RUNTIME_FUNCTION(Runtime_FixedArraySet) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_CHECKED(FixedArray, object, 0);
   CONVERT_SMI_ARG_CHECKED(index, 1);
   CONVERT_ARG_CHECKED(Object, value, 2);
@@ -127,7 +122,7 @@
 // Returns -1 if hole removal is not supported by this method.
 RUNTIME_FUNCTION(Runtime_RemoveArrayHoles) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
   if (object->IsJSProxy()) return Smi::FromInt(-1);
@@ -139,7 +134,7 @@
 // Move contents of argument 0 (an array) to argument 1 (an array)
 RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, from, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, to, 1);
   JSObject::ValidateElements(from);
@@ -162,7 +157,7 @@
 // How many elements does this object/array have?
 RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
   Handle<FixedArrayBase> elements(array->elements(), isolate);
   SealHandleScope shs(isolate);
@@ -205,7 +200,7 @@
 // Intervals can span over some keys that are not in the object.
 RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
   ElementsKind kind = array->GetElementsKind();
@@ -249,8 +244,7 @@
   }
 
   if (j != keys->length()) {
-    isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
-        *keys, keys->length() - j);
+    isolate->heap()->RightTrimFixedArray(*keys, keys->length() - j);
   }
 
   return *isolate->factory()->NewJSArrayWithElements(keys);
@@ -363,7 +357,7 @@
 
 RUNTIME_FUNCTION(Runtime_NormalizeElements) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
   CHECK(!array->HasFixedTypedArrayElements());
   CHECK(!array->IsJSGlobalProxy());
@@ -375,7 +369,7 @@
 // GrowArrayElements returns a sentinel Smi if the object was normalized.
 RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_NUMBER_CHECKED(int, key, Int32, args[1]);
 
@@ -399,7 +393,7 @@
 
 RUNTIME_FUNCTION(Runtime_HasComplexElements) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
   for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
        !iter.IsAtEnd(); iter.Advance()) {
@@ -421,7 +415,7 @@
 // ES6 22.1.2.2 Array.isArray
 RUNTIME_FUNCTION(Runtime_ArrayIsArray) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   Maybe<bool> result = Object::IsArray(object);
   MAYBE_RETURN(result, isolate->heap()->exception());
@@ -430,14 +424,14 @@
 
 RUNTIME_FUNCTION(Runtime_IsArray) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSArray());
 }
 
 RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, original_array, 0);
   RETURN_RESULT_OR_FAILURE(
       isolate, Object::ArraySpeciesConstructor(isolate, original_array));
@@ -446,7 +440,7 @@
 // ES7 22.1.3.11 Array.prototype.includes
 RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
 
@@ -481,29 +475,37 @@
 
   // Let n be ? ToInteger(fromIndex). (If fromIndex is undefined, this step
   // produces the value 0.)
-  int64_t start_from;
-  {
+  int64_t index = 0;
+  if (!from_index->IsUndefined(isolate)) {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, from_index,
                                        Object::ToInteger(isolate, from_index));
-    double fp = from_index->Number();
-    if (fp > len) return isolate->heap()->false_value();
-    start_from = static_cast<int64_t>(fp);
-  }
 
-  int64_t index;
-  if (start_from >= 0) {
-    index = start_from;
-  } else {
-    index = len + start_from;
-    if (index < 0) {
-      index = 0;
+    if (V8_LIKELY(from_index->IsSmi())) {
+      int start_from = Smi::cast(*from_index)->value();
+      if (start_from < 0) {
+        index = std::max<int64_t>(len + start_from, 0);
+      } else {
+        index = start_from;
+      }
+    } else {
+      DCHECK(from_index->IsHeapNumber());
+      double start_from = from_index->Number();
+      if (start_from >= len) return isolate->heap()->false_value();
+      if (V8_LIKELY(std::isfinite(start_from))) {
+        if (start_from < 0) {
+          index = static_cast<int64_t>(std::max<double>(start_from + len, 0));
+        } else {
+          index = start_from;
+        }
+      }
     }
+
+    DCHECK_GE(index, 0);
   }
 
   // If the receiver is not a special receiver type, and the length is a valid
   // element index, perform fast operation tailored to specific ElementsKinds.
-  if (object->map()->instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
-      len < kMaxUInt32 &&
+  if (!object->map()->IsSpecialReceiverMap() && len < kMaxUInt32 &&
       JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
     Handle<JSObject> obj = Handle<JSObject>::cast(object);
     ElementsAccessor* elements = obj->GetElementsAccessor();
@@ -538,21 +540,21 @@
 
 RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
 
   // Let O be ? ToObject(this value).
-  Handle<Object> receiver_obj = args.at<Object>(0);
-  if (receiver_obj->IsNull(isolate) || receiver_obj->IsUndefined(isolate)) {
+  Handle<Object> receiver_obj = args.at(0);
+  if (receiver_obj->IsNullOrUndefined(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
                               isolate->factory()->NewStringFromAsciiChecked(
                                   "Array.prototype.indexOf")));
   }
   Handle<JSReceiver> object;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, object, Object::ToObject(isolate, args.at<Object>(0)));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
+                                     Object::ToObject(isolate, args.at(0)));
 
   // Let len be ? ToLength(? Get(O, "length")).
   int64_t len;
@@ -601,8 +603,7 @@
 
   // If the receiver is not a special receiver type, and the length is a valid
   // element index, perform fast operation tailored to specific ElementsKinds.
-  if (object->map()->instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
-      len < kMaxUInt32 &&
+  if (!object->map()->IsSpecialReceiverMap() && len < kMaxUInt32 &&
       JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
     Handle<JSObject> obj = Handle<JSObject>::cast(object);
     ElementsAccessor* elements = obj->GetElementsAccessor();
@@ -636,47 +637,50 @@
   return Smi::FromInt(-1);
 }
 
+
 RUNTIME_FUNCTION(Runtime_SpreadIterablePrepare) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, spread, 0);
 
-  if (spread->IsJSArray()) {
-    // Check that the spread arg has fast elements
-    Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
-    ElementsKind array_kind = spread_array->GetElementsKind();
-
-    // And that it has the orignal ArrayPrototype
-    JSObject* array_proto = JSObject::cast(spread_array->map()->prototype());
-    Map* iterator_map = isolate->initial_array_iterator_prototype()->map();
-
-    // Check that the iterator acts as expected.
-    // If IsArrayIteratorLookupChainIntact(), then we know that the initial
-    // ArrayIterator is being used. If the map of the prototype has changed,
-    // then take the slow path.
-
-    if (isolate->is_initial_array_prototype(array_proto) &&
-        isolate->IsArrayIteratorLookupChainIntact() &&
-        isolate->is_initial_array_iterator_prototype_map(iterator_map)) {
-      if (IsFastPackedElementsKind(array_kind)) {
-        return *spread;
-      }
-      if (IsFastHoleyElementsKind(array_kind) &&
-          isolate->IsFastArrayConstructorPrototypeChainIntact()) {
-        return *spread;
-      }
-    }
+  // Iterate over the spread if we need to.
+  if (spread->IterationHasObservableEffects()) {
+    Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, spread,
+        Execution::Call(isolate, spread_iterable_function,
+                        isolate->factory()->undefined_value(), 1, &spread));
   }
 
-  Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
+  return *spread;
+}
 
-  Handle<Object> spreaded;
+RUNTIME_FUNCTION(Runtime_SpreadIterableFixed) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, spread, 0);
+
+  // The caller should check if proper iteration is necessary.
+  Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, spreaded,
+      isolate, spread,
       Execution::Call(isolate, spread_iterable_function,
                       isolate->factory()->undefined_value(), 1, &spread));
 
-  return *spreaded;
+  // Create a new FixedArray and put the result of the spread into it.
+  Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
+  uint32_t spread_length;
+  CHECK(spread_array->length()->ToArrayIndex(&spread_length));
+
+  Handle<FixedArray> result = isolate->factory()->NewFixedArray(spread_length);
+  ElementsAccessor* accessor = spread_array->GetElementsAccessor();
+  for (uint32_t i = 0; i < spread_length; i++) {
+    DCHECK(accessor->HasElement(spread_array, i));
+    Handle<Object> element = accessor->Get(spread_array, i);
+    result->set(i, *element);
+  }
+
+  return *result;
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-atomics.cc b/src/runtime/runtime-atomics.cc
index 3bd0738..ff7ded9 100644
--- a/src/runtime/runtime-atomics.cc
+++ b/src/runtime/runtime-atomics.cc
@@ -349,7 +349,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
@@ -383,7 +383,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -415,7 +415,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsSub) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -447,7 +447,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -479,7 +479,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsOr) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -511,7 +511,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsXor) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -543,7 +543,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -575,7 +575,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0);
   uint32_t usize = NumberToUint32(*size);
   return isolate->heap()->ToBoolean(AtomicIsLockFree(usize));
diff --git a/src/runtime/runtime-classes.cc b/src/runtime/runtime-classes.cc
index 323604f..9398586 100644
--- a/src/runtime/runtime-classes.cc
+++ b/src/runtime/runtime-classes.cc
@@ -7,8 +7,10 @@
 #include <stdlib.h>
 #include <limits>
 
+#include "src/accessors.h"
 #include "src/arguments.h"
 #include "src/debug/debug.h"
+#include "src/elements.h"
 #include "src/frames-inl.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
@@ -18,17 +20,9 @@
 namespace internal {
 
 
-RUNTIME_FUNCTION(Runtime_ThrowNonMethodError) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  THROW_NEW_ERROR_RETURN_FAILURE(
-      isolate, NewReferenceError(MessageTemplate::kNonMethod));
-}
-
-
 RUNTIME_FUNCTION(Runtime_ThrowUnsupportedSuperError) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   THROW_NEW_ERROR_RETURN_FAILURE(
       isolate, NewReferenceError(MessageTemplate::kUnsupportedSuper));
 }
@@ -36,7 +30,7 @@
 
 RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
   Handle<Object> name(constructor->shared()->name(), isolate);
   THROW_NEW_ERROR_RETURN_FAILURE(
@@ -44,40 +38,63 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_ThrowArrayNotSubclassableError) {
+RUNTIME_FUNCTION(Runtime_ThrowStaticPrototypeError) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  THROW_NEW_ERROR_RETURN_FAILURE(
-      isolate, NewTypeError(MessageTemplate::kArrayNotSubclassable));
-}
-
-
-static Object* ThrowStaticPrototypeError(Isolate* isolate) {
+  DCHECK_EQ(0, args.length());
   THROW_NEW_ERROR_RETURN_FAILURE(
       isolate, NewTypeError(MessageTemplate::kStaticPrototype));
 }
 
-
-RUNTIME_FUNCTION(Runtime_ThrowStaticPrototypeError) {
+RUNTIME_FUNCTION(Runtime_ThrowSuperAlreadyCalledError) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  return ThrowStaticPrototypeError(isolate);
+  DCHECK_EQ(0, args.length());
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewReferenceError(MessageTemplate::kSuperAlreadyCalled));
 }
 
+namespace {
 
-RUNTIME_FUNCTION(Runtime_ThrowIfStaticPrototype) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
-  if (Name::Equals(name, isolate->factory()->prototype_string())) {
-    return ThrowStaticPrototypeError(isolate);
+Object* ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
+                                 Handle<JSFunction> function) {
+  Handle<Object> super_name;
+  if (constructor->IsJSFunction()) {
+    super_name = handle(Handle<JSFunction>::cast(constructor)->shared()->name(),
+                        isolate);
+  } else if (constructor->IsOddball()) {
+    DCHECK(constructor->IsNull(isolate));
+    super_name = isolate->factory()->null_string();
+  } else {
+    super_name = Object::NoSideEffectsToString(isolate, constructor);
   }
-  return *name;
+  // null constructor
+  if (Handle<String>::cast(super_name)->length() == 0) {
+    super_name = isolate->factory()->null_string();
+  }
+  Handle<Object> function_name(function->shared()->name(), isolate);
+  // anonymous class
+  if (Handle<String>::cast(function_name)->length() == 0) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewTypeError(MessageTemplate::kNotSuperConstructorAnonymousClass,
+                     super_name));
+  }
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kNotSuperConstructor, super_name,
+                            function_name));
 }
 
+}  // namespace
+
+RUNTIME_FUNCTION(Runtime_ThrowNotSuperConstructor) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
+  return ThrowNotSuperConstructor(isolate, constructor, function);
+}
 
 RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return isolate->heap()->home_object_symbol();
 }
 
@@ -143,13 +160,6 @@
                           prototype, attribs),
                       Object);
 
-  // TODO(arv): Only do this conditionally.
-  Handle<Symbol> home_object_symbol(isolate->heap()->home_object_symbol());
-  RETURN_ON_EXCEPTION(
-      isolate, JSObject::SetOwnPropertyIgnoreAttributes(
-                   constructor, home_object_symbol, prototype, DONT_ENUM),
-      Object);
-
   if (!constructor_parent.is_null()) {
     MAYBE_RETURN_NULL(JSObject::SetPrototype(constructor, constructor_parent,
                                              false, Object::THROW_ON_ERROR));
@@ -171,13 +181,14 @@
                    handle(Smi::FromInt(end_position), isolate), STRICT),
       Object);
 
-  return constructor;
+  // Caller already has access to constructor, so return the prototype.
+  return prototype;
 }
 
 
 RUNTIME_FUNCTION(Runtime_DefineClass) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 1);
   CONVERT_SMI_ARG_CHECKED(start_position, 2);
@@ -189,6 +200,42 @@
 }
 
 namespace {
+void InstallClassNameAccessor(Isolate* isolate, Handle<JSObject> object) {
+  PropertyAttributes attrs =
+      static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+  // Cannot fail since this should only be called when creating an object
+  // literal.
+  CHECK(!JSObject::SetAccessor(
+             object, Accessors::FunctionNameInfo(object->GetIsolate(), attrs))
+             .is_null());
+}
+}  // anonymous namespace
+
+RUNTIME_FUNCTION(Runtime_InstallClassNameAccessor) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  InstallClassNameAccessor(isolate, object);
+  return *object;
+}
+
+RUNTIME_FUNCTION(Runtime_InstallClassNameAccessorWithCheck) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+
+  // If a property named "name" is already defined, exit.
+  Handle<Name> key = isolate->factory()->name_string();
+  if (JSObject::HasRealNamedProperty(object, key).FromMaybe(false)) {
+    return *object;
+  }
+
+  // Define the "name" accessor.
+  InstallClassNameAccessor(isolate, object);
+  return *object;
+}
+
+namespace {
 
 enum class SuperMode { kLoad, kStore };
 
@@ -326,7 +373,7 @@
 
 RUNTIME_FUNCTION(Runtime_StoreToSuper_Strict) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
@@ -339,7 +386,7 @@
 
 RUNTIME_FUNCTION(Runtime_StoreToSuper_Sloppy) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
@@ -373,7 +420,7 @@
 
 RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Strict) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
@@ -387,7 +434,7 @@
 
 RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
@@ -403,7 +450,13 @@
   SealHandleScope shs(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(JSFunction, active_function, 0);
-  return active_function->map()->prototype();
+  Object* prototype = active_function->map()->prototype();
+  if (!prototype->IsConstructor()) {
+    HandleScope scope(isolate);
+    return ThrowNotSuperConstructor(isolate, handle(prototype, isolate),
+                                    handle(active_function, isolate));
+  }
+  return prototype;
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-collections.cc b/src/runtime/runtime-collections.cc
index 57e5d98..214ce1c 100644
--- a/src/runtime/runtime-collections.cc
+++ b/src/runtime/runtime-collections.cc
@@ -14,7 +14,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringGetRawHashField) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
   return *isolate->factory()->NewNumberFromUint(string->hash_field());
 }
@@ -22,14 +22,14 @@
 
 RUNTIME_FUNCTION(Runtime_TheHole) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return isolate->heap()->the_hole_value();
 }
 
 
 RUNTIME_FUNCTION(Runtime_JSCollectionGetTable) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(JSObject, object, 0);
   CHECK(object->IsJSSet() || object->IsJSMap());
   return static_cast<JSCollection*>(object)->table();
@@ -38,7 +38,7 @@
 
 RUNTIME_FUNCTION(Runtime_GenericHash) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   Smi* hash = Object::GetOrCreateHash(isolate, object);
   return hash;
@@ -47,7 +47,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetInitialize) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   JSSet::Initialize(holder, isolate);
   return *holder;
@@ -56,7 +56,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetGrow) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
   table = OrderedHashSet::EnsureGrowable(table);
@@ -67,7 +67,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetShrink) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
   table = OrderedHashSet::Shrink(table);
@@ -78,7 +78,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetClear) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   JSSet::Clear(holder);
   return isolate->heap()->undefined_value();
@@ -87,7 +87,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1);
   CONVERT_SMI_ARG_CHECKED(kind, 2)
@@ -103,7 +103,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetIteratorClone) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
 
   Handle<JSSetIterator> result = isolate->factory()->NewJSSetIterator();
@@ -117,7 +117,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_CHECKED(JSSetIterator, holder, 0);
   CONVERT_ARG_CHECKED(JSArray, value_array, 1);
   return holder->Next(value_array);
@@ -130,7 +130,7 @@
 // 2: Iteration kind
 RUNTIME_FUNCTION(Runtime_SetIteratorDetails) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
   Handle<FixedArray> details = isolate->factory()->NewFixedArray(4);
   details->set(0, isolate->heap()->ToBoolean(holder->HasMore()));
@@ -142,7 +142,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapInitialize) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   JSMap::Initialize(holder, isolate);
   return *holder;
@@ -151,7 +151,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapShrink) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
   table = OrderedHashMap::Shrink(table);
@@ -162,7 +162,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapClear) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   JSMap::Clear(holder);
   return isolate->heap()->undefined_value();
@@ -171,7 +171,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapGrow) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
   table = OrderedHashMap::EnsureGrowable(table);
@@ -182,7 +182,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1);
   CONVERT_SMI_ARG_CHECKED(kind, 2)
@@ -199,7 +199,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapIteratorClone) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
 
   Handle<JSMapIterator> result = isolate->factory()->NewJSMapIterator();
@@ -217,7 +217,7 @@
 // 2: Iteration kind
 RUNTIME_FUNCTION(Runtime_MapIteratorDetails) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
   Handle<FixedArray> details = isolate->factory()->NewFixedArray(4);
   details->set(0, isolate->heap()->ToBoolean(holder->HasMore()));
@@ -229,42 +229,17 @@
 
 RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
   CONVERT_NUMBER_CHECKED(int, max_entries, Int32, args[1]);
   CHECK(max_entries >= 0);
-
-  Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
-  if (max_entries == 0 || max_entries > table->NumberOfElements()) {
-    max_entries = table->NumberOfElements();
-  }
-  Handle<FixedArray> entries =
-      isolate->factory()->NewFixedArray(max_entries * 2);
-  // Allocation can cause GC can delete weak elements. Reload.
-  if (max_entries > table->NumberOfElements()) {
-    max_entries = table->NumberOfElements();
-  }
-
-  {
-    DisallowHeapAllocation no_gc;
-    int count = 0;
-    for (int i = 0; count / 2 < max_entries && i < table->Capacity(); i++) {
-      Handle<Object> key(table->KeyAt(i), isolate);
-      if (table->IsKey(isolate, *key)) {
-        entries->set(count++, *key);
-        Object* value = table->Lookup(key);
-        entries->set(count++, value);
-      }
-    }
-    DCHECK_EQ(max_entries * 2, count);
-  }
-  return *isolate->factory()->NewJSArrayWithElements(entries);
+  return *JSWeakCollection::GetEntries(holder, max_entries);
 }
 
 
 RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_CHECKED(JSMapIterator, holder, 0);
   CONVERT_ARG_CHECKED(JSArray, value_array, 1);
   return holder->Next(value_array);
@@ -273,7 +248,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   JSWeakCollection::Initialize(weak_collection, isolate);
   return *weak_collection;
@@ -282,7 +257,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_SMI_ARG_CHECKED(hash, 2)
@@ -298,7 +273,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_SMI_ARG_CHECKED(hash, 2)
@@ -313,7 +288,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_SMI_ARG_CHECKED(hash, 2)
@@ -328,7 +303,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CHECK(key->IsJSReceiver() || key->IsSymbol());
@@ -344,30 +319,11 @@
 
 RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
   CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
   CHECK(max_values >= 0);
-
-  Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
-  if (max_values == 0 || max_values > table->NumberOfElements()) {
-    max_values = table->NumberOfElements();
-  }
-  Handle<FixedArray> values = isolate->factory()->NewFixedArray(max_values);
-  // Recompute max_values because GC could have removed elements from the table.
-  if (max_values > table->NumberOfElements()) {
-    max_values = table->NumberOfElements();
-  }
-  {
-    DisallowHeapAllocation no_gc;
-    int count = 0;
-    for (int i = 0; count < max_values && i < table->Capacity(); i++) {
-      Object* key = table->KeyAt(i);
-      if (table->IsKey(isolate, key)) values->set(count++, key);
-    }
-    DCHECK_EQ(max_values, count);
-  }
-  return *isolate->factory()->NewJSArrayWithElements(values);
+  return *JSWeakCollection::GetEntries(holder, max_values);
 }
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-compiler.cc b/src/runtime/runtime-compiler.cc
index 472e076..f929d73 100644
--- a/src/runtime/runtime-compiler.cc
+++ b/src/runtime/runtime-compiler.cc
@@ -11,7 +11,6 @@
 #include "src/deoptimizer.h"
 #include "src/frames-inl.h"
 #include "src/full-codegen/full-codegen.h"
-#include "src/interpreter/bytecode-array-iterator.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
 #include "src/v8threads.h"
@@ -93,11 +92,11 @@
   }
   Handle<JSObject> foreign;
   if (args[2]->IsJSObject()) {
-    foreign = args.at<i::JSObject>(2);
+    foreign = args.at<JSObject>(2);
   }
   Handle<JSArrayBuffer> memory;
   if (args[3]->IsJSArrayBuffer()) {
-    memory = args.at<i::JSArrayBuffer>(3);
+    memory = args.at<JSArrayBuffer>(3);
   }
   if (function->shared()->HasAsmWasmData() &&
       AsmJs::IsStdlibValid(isolate, handle(function->shared()->asm_wasm_data()),
@@ -128,7 +127,7 @@
 
 RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
   DCHECK(AllowHeapAllocation::IsAllowed());
   delete deoptimizer;
@@ -159,7 +158,7 @@
 
 RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_SMI_ARG_CHECKED(type_arg, 0);
   Deoptimizer::BailoutType type =
       static_cast<Deoptimizer::BailoutType>(type_arg);
@@ -183,6 +182,10 @@
     JavaScriptFrameIterator top_it(isolate);
     JavaScriptFrame* top_frame = top_it.frame();
     isolate->set_context(Context::cast(top_frame->context()));
+  } else {
+    // TODO(turbofan): We currently need the native context to materialize
+    // the arguments object, but only to get to its map.
+    isolate->set_context(function->native_context());
   }
 
   // Make sure to materialize objects before causing any allocation.
@@ -270,9 +273,9 @@
   // Revert the patched back edge table, regardless of whether OSR succeeds.
   BackEdgeTable::Revert(frame->isolate(), *caller_code);
 
+  // Return a BailoutId representing an AST id of the {IterationStatement}.
   uint32_t pc_offset =
       static_cast<uint32_t>(frame->pc() - caller_code->instruction_start());
-
   return caller_code->TranslatePcOffsetToAstId(pc_offset);
 }
 
@@ -293,27 +296,15 @@
   // Reset the OSR loop nesting depth to disarm back edges.
   bytecode->set_osr_loop_nesting_level(0);
 
-  // Translate the offset of the jump instruction to the jump target offset of
-  // that instruction so that the derived BailoutId points to the loop header.
-  // TODO(mstarzinger): This can be merged with {BytecodeBranchAnalysis} which
-  // already performs a pre-pass over the bytecode stream anyways.
-  int jump_offset = iframe->GetBytecodeOffset();
-  interpreter::BytecodeArrayIterator iterator(bytecode);
-  while (iterator.current_offset() + iterator.current_prefix_offset() <
-         jump_offset) {
-    iterator.Advance();
-  }
-  DCHECK(interpreter::Bytecodes::IsJump(iterator.current_bytecode()));
-  int jump_target_offset = iterator.GetJumpTargetOffset();
-
-  return BailoutId(jump_target_offset);
+  // Return a BailoutId representing the bytecode offset of the back branch.
+  return BailoutId(iframe->GetBytecodeOffset());
 }
 
 }  // namespace
 
 RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
 
   // We're not prepared to handle a function with arguments object.
@@ -398,7 +389,7 @@
 
 RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
 
   // First check if this is a real stack overflow.
@@ -455,9 +446,10 @@
   static const ParseRestriction restriction = NO_PARSE_RESTRICTION;
   Handle<JSFunction> compiled;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, compiled, Compiler::GetFunctionFromEval(
-                             source, outer_info, context, language_mode,
-                             restriction, eval_scope_position, eval_position),
+      isolate, compiled,
+      Compiler::GetFunctionFromEval(source, outer_info, context, language_mode,
+                                    restriction, kNoSourcePosition,
+                                    eval_scope_position, eval_position),
       isolate->heap()->exception());
   return *compiled;
 }
@@ -465,9 +457,9 @@
 
 RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 6);
+  DCHECK_EQ(6, args.length());
 
-  Handle<Object> callee = args.at<Object>(0);
+  Handle<Object> callee = args.at(0);
 
   // If "eval" didn't refer to the original GlobalEval, it's not a
   // direct call to eval.
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index 824ea92..3649621 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -5,6 +5,8 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
+#include "src/compiler.h"
+#include "src/debug/debug-coverage.h"
 #include "src/debug/debug-evaluate.h"
 #include "src/debug/debug-frames.h"
 #include "src/debug/debug-scopes.h"
@@ -24,31 +26,30 @@
 
 RUNTIME_FUNCTION(Runtime_DebugBreak) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
-  isolate->debug()->set_return_value(value);
+  HandleScope scope(isolate);
+  ReturnValueScope result_scope(isolate->debug());
+  isolate->debug()->set_return_value(*value);
 
   // Get the top-most JavaScript frame.
   JavaScriptFrameIterator it(isolate);
   isolate->debug()->Break(it.frame());
-
-  isolate->debug()->SetAfterBreakTarget(it.frame());
-  return *isolate->debug()->return_value();
+  return isolate->debug()->return_value();
 }
 
 RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
-  isolate->debug()->set_return_value(value);
+  HandleScope scope(isolate);
+  ReturnValueScope result_scope(isolate->debug());
+  isolate->debug()->set_return_value(*value);
 
   // Get the top-most JavaScript frame.
   JavaScriptFrameIterator it(isolate);
   isolate->debug()->Break(it.frame());
 
-  // If live-edit has dropped frames, we are not going back to dispatch.
-  if (LiveEdit::SetAfterBreakTarget(isolate->debug())) return Smi::kZero;
-
   // Return the handler from the original bytecode array.
   DCHECK(it.frame()->is_interpreted());
   InterpretedFrame* interpreted_frame =
@@ -65,7 +66,7 @@
 
 RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   if (isolate->debug()->break_points_active()) {
     isolate->debug()->HandleDebugBreak();
   }
@@ -79,20 +80,24 @@
 // args[1]: object supplied during callback
 RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
-  CHECK(args[0]->IsJSFunction() || args[0]->IsUndefined(isolate) ||
-        args[0]->IsNull(isolate));
+  DCHECK_EQ(2, args.length());
+  CHECK(args[0]->IsJSFunction() || args[0]->IsNullOrUndefined(isolate));
   CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, data, 1);
-  isolate->debug()->SetEventListener(callback, data);
-
+  if (callback->IsJSFunction()) {
+    JavaScriptDebugDelegate* delegate = new JavaScriptDebugDelegate(
+        isolate, Handle<JSFunction>::cast(callback), data);
+    isolate->debug()->SetDebugDelegate(delegate, true);
+  } else {
+    isolate->debug()->SetDebugDelegate(nullptr, false);
+  }
   return isolate->heap()->undefined_value();
 }
 
 
 RUNTIME_FUNCTION(Runtime_ScheduleBreak) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   isolate->stack_guard()->RequestDebugBreak();
   return isolate->heap()->undefined_value();
 }
@@ -136,14 +141,6 @@
   return it->isolate()->factory()->undefined_value();
 }
 
-
-static Handle<Object> DebugGetProperty(Handle<Object> object,
-                                       Handle<Name> name) {
-  LookupIterator it(object, name);
-  return DebugGetProperty(&it);
-}
-
-
 template <class IteratorType>
 static MaybeHandle<JSArray> GetIteratorInternalProperties(
     Isolate* isolate, Handle<IteratorType> object) {
@@ -248,24 +245,8 @@
     result->set(5, generator->receiver());
     return factory->NewJSArrayWithElements(result);
   } else if (object->IsJSPromise()) {
-    Handle<JSObject> promise = Handle<JSObject>::cast(object);
-
-    Handle<Object> status_obj =
-        DebugGetProperty(promise, isolate->factory()->promise_state_symbol());
-    CHECK(status_obj->IsSmi());
-    const char* status = "rejected";
-    int status_val = Handle<Smi>::cast(status_obj)->value();
-    switch (status_val) {
-      case kPromiseFulfilled:
-        status = "resolved";
-        break;
-      case kPromisePending:
-        status = "pending";
-        break;
-      default:
-        DCHECK_EQ(kPromiseRejected, status_val);
-    }
-
+    Handle<JSPromise> promise = Handle<JSPromise>::cast(object);
+    const char* status = JSPromise::Status(promise->status());
     Handle<FixedArray> result = factory->NewFixedArray(2 * 2);
     Handle<String> promise_status =
         factory->NewStringFromAsciiChecked("[[PromiseStatus]]");
@@ -273,8 +254,7 @@
     Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
     result->set(1, *status_str);
 
-    Handle<Object> value_obj =
-        DebugGetProperty(promise, isolate->factory()->promise_result_symbol());
+    Handle<Object> value_obj(promise->result(), isolate);
     Handle<String> promise_value =
         factory->NewStringFromAsciiChecked("[[PromiseValue]]");
     result->set(2, *promise_value);
@@ -315,7 +295,7 @@
 
 RUNTIME_FUNCTION(Runtime_DebugGetInternalProperties) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
   RETURN_RESULT_OR_FAILURE(isolate,
                            Runtime::GetInternalProperties(isolate, obj));
@@ -407,7 +387,7 @@
 RUNTIME_FUNCTION(Runtime_DebugGetProperty) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
@@ -416,14 +396,13 @@
   return *DebugGetProperty(&it);
 }
 
-
-// Return the property type calculated from the property details.
+// Return the property kind calculated from the property details.
 // args[0]: smi with property details.
-RUNTIME_FUNCTION(Runtime_DebugPropertyTypeFromDetails) {
+RUNTIME_FUNCTION(Runtime_DebugPropertyKindFromDetails) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
-  return Smi::FromInt(static_cast<int>(details.type()));
+  return Smi::FromInt(static_cast<int>(details.kind()));
 }
 
 
@@ -431,7 +410,7 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(Runtime_DebugPropertyAttributesFromDetails) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
   return Smi::FromInt(static_cast<int>(details.attributes()));
 }
@@ -439,7 +418,7 @@
 
 RUNTIME_FUNCTION(Runtime_CheckExecutionState) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   CHECK(isolate->debug()->CheckExecutionState(break_id));
   return isolate->heap()->true_value();
@@ -448,7 +427,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetFrameCount) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   CHECK(isolate->debug()->CheckExecutionState(break_id));
 
@@ -460,22 +439,18 @@
     return Smi::kZero;
   }
 
+  List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
   for (StackTraceFrameIterator it(isolate, id); !it.done(); it.Advance()) {
-    List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-    if (it.is_wasm()) {
-      n++;
-    } else {
-      it.javascript_frame()->Summarize(&frames);
-      for (int i = frames.length() - 1; i >= 0; i--) {
-        // Omit functions from native and extension scripts.
-        if (frames[i].function()->shared()->IsSubjectToDebugging()) n++;
-      }
+    frames.Clear();
+    it.frame()->Summarize(&frames);
+    for (int i = frames.length() - 1; i >= 0; i--) {
+      // Omit functions from native and extension scripts.
+      if (frames[i].is_subject_to_debugging()) n++;
     }
   }
   return Smi::FromInt(n);
 }
 
-
 static const int kFrameDetailsFrameIdIndex = 0;
 static const int kFrameDetailsReceiverIndex = 1;
 static const int kFrameDetailsFunctionIndex = 2;
@@ -508,7 +483,7 @@
 // Return value if any
 RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   CHECK(isolate->debug()->CheckExecutionState(break_id));
 
@@ -524,11 +499,11 @@
 
   StackTraceFrameIterator it(isolate, id);
   // Inlined frame index in optimized frame, starting from outer function.
-  int inlined_jsframe_index =
+  int inlined_frame_index =
       DebugFrameHelper::FindIndexedNonNativeFrame(&it, index);
-  if (inlined_jsframe_index == -1) return heap->undefined_value();
+  if (inlined_frame_index == -1) return heap->undefined_value();
 
-  FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
+  FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate);
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
@@ -539,10 +514,7 @@
   Handle<Object> frame_id(DebugFrameHelper::WrapFrameId(it.frame()->id()),
                           isolate);
 
-  // Find source position in unoptimized code.
-  int position = frame_inspector.GetSourcePosition();
-
-  if (it.is_wasm()) {
+  if (frame_inspector.summary().IsWasm()) {
     // Create the details array (no dynamic information for wasm).
     Handle<FixedArray> details =
         isolate->factory()->NewFixedArray(kFrameDetailsFirstDynamicIndex);
@@ -551,10 +523,7 @@
     details->set(kFrameDetailsFrameIdIndex, *frame_id);
 
     // Add the function name.
-    Handle<Object> wasm_instance(it.wasm_frame()->wasm_instance(), isolate);
-    int func_index = it.wasm_frame()->function_index();
-    Handle<String> func_name =
-        wasm::GetWasmFunctionName(isolate, wasm_instance, func_index);
+    Handle<String> func_name = frame_inspector.summary().FunctionName();
     details->set(kFrameDetailsFunctionIndex, *func_name);
 
     // Add the script wrapper
@@ -569,21 +538,8 @@
     details->set(kFrameDetailsLocalCountIndex, Smi::kZero);
 
     // Add the source position.
-    // For wasm, it is function-local, so translate it to a module-relative
-    // position, such that together with the script it uniquely identifies the
-    // position.
-    Handle<Object> positionValue;
-    if (position != kNoSourcePosition) {
-      int translated_position = position;
-      if (!wasm::WasmIsAsmJs(*wasm_instance, isolate)) {
-        Handle<WasmCompiledModule> compiled_module(
-            wasm::GetCompiledModule(JSObject::cast(*wasm_instance)), isolate);
-        translated_position +=
-            wasm::GetFunctionCodeOffset(compiled_module, func_index);
-      }
-      details->set(kFrameDetailsSourcePositionIndex,
-                   Smi::FromInt(translated_position));
-    }
+    int position = frame_inspector.summary().SourcePosition();
+    details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
 
     // Add the constructor information.
     details->set(kFrameDetailsConstructCallIndex, heap->ToBoolean(false));
@@ -604,6 +560,9 @@
     return *isolate->factory()->NewJSArrayWithElements(details);
   }
 
+  // Find source position in unoptimized code.
+  int position = frame_inspector.GetSourcePosition();
+
   // Handle JavaScript frames.
   bool is_optimized = it.frame()->is_optimized();
 
@@ -678,14 +637,14 @@
   // to the frame information.
   Handle<Object> return_value = isolate->factory()->undefined_value();
   if (at_return) {
-    return_value = isolate->debug()->return_value();
+    return_value = handle(isolate->debug()->return_value(), isolate);
   }
 
   // Now advance to the arguments adapter frame (if any). It contains all
   // the provided parameters whereas the function frame always have the number
   // of arguments matching the functions parameters. The rest of the
   // information (except for what is collected above) is the same.
-  if ((inlined_jsframe_index == 0) &&
+  if ((inlined_frame_index == 0) &&
       it.javascript_frame()->has_adapted_arguments()) {
     it.AdvanceToArgumentsFrame();
     frame_inspector.SetArgumentsFrame(it.frame());
@@ -743,7 +702,7 @@
   }
   if (is_optimized) {
     flags |= 1 << 1;
-    flags |= inlined_jsframe_index << 2;
+    flags |= inlined_frame_index << 2;
   }
   details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
 
@@ -777,9 +736,12 @@
   }
 
   // Add the receiver (same as in function frame).
-  Handle<Object> receiver(it.frame()->receiver(), isolate);
-  DCHECK(!function->shared()->IsBuiltin());
-  DCHECK_IMPLIES(is_sloppy(shared->language_mode()), receiver->IsJSReceiver());
+  Handle<Object> receiver = frame_inspector.summary().receiver();
+  DCHECK(function->shared()->IsUserJavaScript());
+  // Optimized frames only restore the receiver as best-effort (see
+  // OptimizedFrame::Summarize).
+  DCHECK_IMPLIES(!is_optimized && is_sloppy(shared->language_mode()),
+                 receiver->IsJSReceiver());
   details->set(kFrameDetailsReceiverIndex, *receiver);
 
   DCHECK_EQ(details_size, details_index);
@@ -789,7 +751,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetScopeCount) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   CHECK(isolate->debug()->CheckExecutionState(break_id));
 
@@ -797,8 +759,10 @@
 
   // Get the frame where the debugging is performed.
   StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
-  JavaScriptFrameIterator it(isolate, id);
-  JavaScriptFrame* frame = it.frame();
+  StackTraceFrameIterator it(isolate, id);
+  StandardFrame* frame = it.frame();
+  if (it.frame()->is_wasm()) return 0;
+
   FrameInspector frame_inspector(frame, 0, isolate);
 
   // Count the visible scopes.
@@ -822,7 +786,7 @@
 // 1: Scope object
 RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   CHECK(isolate->debug()->CheckExecutionState(break_id));
 
@@ -832,8 +796,9 @@
 
   // Get the frame where the debugging is performed.
   StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
-  JavaScriptFrameIterator frame_it(isolate, id);
-  JavaScriptFrame* frame = frame_it.frame();
+  StackTraceFrameIterator frame_it(isolate, id);
+  // Wasm has no scopes, this must be javascript.
+  JavaScriptFrame* frame = JavaScriptFrame::cast(frame_it.frame());
   FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
 
   // Find the requested scope.
@@ -918,7 +883,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetFunctionScopeDetails) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   // Check arguments.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
@@ -957,7 +922,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   if (!args[0]->IsJSGeneratorObject()) {
     return isolate->heap()->undefined_value();
@@ -1004,7 +969,7 @@
 // Return true if success and false otherwise
 RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 6);
+  DCHECK_EQ(6, args.length());
 
   // Check arguments.
   CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
@@ -1021,8 +986,9 @@
 
     // Get the frame where the debugging is performed.
     StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
-    JavaScriptFrameIterator frame_it(isolate, id);
-    JavaScriptFrame* frame = frame_it.frame();
+    StackTraceFrameIterator frame_it(isolate, id);
+    // Wasm has no scopes, this must be javascript.
+    JavaScriptFrame* frame = JavaScriptFrame::cast(frame_it.frame());
     FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
 
     ScopeIterator it(isolate, &frame_inspector);
@@ -1043,7 +1009,7 @@
 
 RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
 
 #ifdef DEBUG
   // Print the scopes for the top frame.
@@ -1063,7 +1029,7 @@
 // args[0]: disable break state
 RUNTIME_FUNCTION(Runtime_SetBreakPointsActive) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_BOOLEAN_ARG_CHECKED(active, 0);
   isolate->debug()->set_break_points_active(active);
   return isolate->heap()->undefined_value();
@@ -1077,7 +1043,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CHECK(isolate->debug()->is_active());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
   CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[1]);
@@ -1107,7 +1073,7 @@
 // args[2]: number: break point object
 RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CHECK(isolate->debug()->is_active());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
@@ -1132,7 +1098,7 @@
 // args[3]: number: break point object
 RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CHECK(isolate->debug()->is_active());
   CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
@@ -1164,7 +1130,7 @@
 // args[0]: number: break point object
 RUNTIME_FUNCTION(Runtime_ClearBreakPoint) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CHECK(isolate->debug()->is_active());
   CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 0);
 
@@ -1180,7 +1146,7 @@
 // args[1]: Boolean indicating on/off.
 RUNTIME_FUNCTION(Runtime_ChangeBreakOnException) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
   CONVERT_BOOLEAN_ARG_CHECKED(enable, 1);
 
@@ -1197,7 +1163,7 @@
 // args[0]: boolean indicating uncaught exceptions
 RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
 
   ExceptionBreakType type = static_cast<ExceptionBreakType>(type_arg);
@@ -1213,7 +1179,7 @@
 //          of frames to step down.
 RUNTIME_FUNCTION(Runtime_PrepareStep) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   CHECK(isolate->debug()->CheckExecutionState(break_id));
 
@@ -1224,7 +1190,7 @@
   // Get the step action and check validity.
   StepAction step_action = static_cast<StepAction>(NumberToInt32(args[1]));
   if (step_action != StepIn && step_action != StepNext &&
-      step_action != StepOut && step_action != StepFrame) {
+      step_action != StepOut) {
     return isolate->Throw(isolate->heap()->illegal_argument_string());
   }
 
@@ -1236,11 +1202,10 @@
   return isolate->heap()->undefined_value();
 }
 
-
 // Clear all stepping set by PrepareStep.
 RUNTIME_FUNCTION(Runtime_ClearStepping) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   CHECK(isolate->debug()->is_active());
   isolate->debug()->ClearStepping();
   return isolate->heap()->undefined_value();
@@ -1252,21 +1217,20 @@
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
-  DCHECK(args.length() == 6);
+  DCHECK_EQ(5, args.length());
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   CHECK(isolate->debug()->CheckExecutionState(break_id));
 
   CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
   CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
-  CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
-  CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 5);
+  CONVERT_BOOLEAN_ARG_CHECKED(throw_on_side_effect, 4);
 
   StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
 
   RETURN_RESULT_OR_FAILURE(
       isolate, DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source,
-                                    disable_break, context_extension));
+                                    throw_on_side_effect));
 }
 
 
@@ -1275,27 +1239,19 @@
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(2, args.length());
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   CHECK(isolate->debug()->CheckExecutionState(break_id));
 
   CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
-  CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
-  CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 3);
 
-  RETURN_RESULT_OR_FAILURE(
-      isolate,
-      DebugEvaluate::Global(isolate, source, disable_break, context_extension));
+  RETURN_RESULT_OR_FAILURE(isolate, DebugEvaluate::Global(isolate, source));
 }
 
 
 RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-
-  // This runtime function is used by the debugger to determine whether the
-  // debugger is active or not. Hence we fail gracefully here and don't crash.
-  if (!isolate->debug()->is_active()) return isolate->ThrowIllegalOperation();
+  DCHECK_EQ(0, args.length());
 
   Handle<FixedArray> instances;
   {
@@ -1342,7 +1298,7 @@
 // args[2]: the the maximum number of objects to return
 RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, filter, 1);
   CHECK(filter->IsUndefined(isolate) || filter->IsJSObject());
@@ -1399,7 +1355,7 @@
 // args[1]: the the maximum number of objects to return
 RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
   CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
   CHECK(max_references >= 0);
@@ -1432,7 +1388,7 @@
 // args[0]: the object to find the prototype for.
 RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   // TODO(1543): Come up with a solution for clients to handle potential errors
   // thrown by an intermediate proxy.
@@ -1444,7 +1400,7 @@
 // TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
@@ -1492,29 +1448,9 @@
 }
 
 
-// Calls specified function with or without entering the debugger.
-// This is used in unit tests to run code as if debugger is entered or simply
-// to have a stack with C++ frame in the middle.
-RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-
-  DebugScope debug_scope(isolate->debug());
-  if (debug_scope.failed()) {
-    DCHECK(isolate->has_pending_exception());
-    return isolate->heap()->exception();
-  }
-
-  RETURN_RESULT_OR_FAILURE(
-      isolate, Execution::Call(isolate, function,
-                               handle(function->global_proxy()), 0, NULL));
-}
-
-
 RUNTIME_FUNCTION(Runtime_GetDebugContext) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   Handle<Context> context;
   {
     DebugScope debug_scope(isolate->debug());
@@ -1534,7 +1470,7 @@
 // Presently, it only does a full GC.
 RUNTIME_FUNCTION(Runtime_CollectGarbage) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
                                      GarbageCollectionReason::kRuntime);
   return isolate->heap()->undefined_value();
@@ -1544,7 +1480,7 @@
 // Gets the current heap usage.
 RUNTIME_FUNCTION(Runtime_GetHeapUsage) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
   if (!Smi::IsValid(usage)) {
     return *isolate->factory()->NewNumberFromInt(usage);
@@ -1561,7 +1497,7 @@
 // some kind of user interaction the performance is not crucial.
 RUNTIME_FUNCTION(Runtime_GetScript) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, script_name, 0);
 
   Handle<Script> found;
@@ -1585,55 +1521,75 @@
 // TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptLineCount) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(JSValue, script, 0);
 
   CHECK(script->value()->IsScript());
   Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
 
+  if (script_handle->type() == Script::TYPE_WASM) {
+    // Return 0 for now; this function will disappear soon anyway.
+    return Smi::FromInt(0);
+  }
+
   Script::InitLineEnds(script_handle);
 
   FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
   return Smi::FromInt(line_ends_array->length());
 }
 
+namespace {
+
+int ScriptLinePosition(Handle<Script> script, int line) {
+  if (line < 0) return -1;
+
+  if (script->type() == Script::TYPE_WASM) {
+    return WasmCompiledModule::cast(script->wasm_compiled_module())
+        ->GetFunctionOffset(line);
+  }
+
+  Script::InitLineEnds(script);
+
+  FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
+  const int line_count = line_ends_array->length();
+  DCHECK_LT(0, line_count);
+
+  if (line == 0) return 0;
+  // If line == line_count, we return the first position beyond the last line.
+  if (line > line_count) return -1;
+  return Smi::cast(line_ends_array->get(line - 1))->value() + 1;
+}
+
+}  // namespace
+
 // TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_CHECKED(JSValue, script, 0);
   CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
 
   CHECK(script->value()->IsScript());
   Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
 
-  Script::InitLineEnds(script_handle);
-
-  FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
-  const int line_count = line_ends_array->length();
-
-  // If line == line_count, we return the first position beyond the last line.
-  if (line < 0 || line > line_count) {
-    return Smi::FromInt(-1);
-  } else if (line == 0) {
-    return Smi::kZero;
-  } else {
-    DCHECK(0 < line && line <= line_count);
-    const int pos = Smi::cast(line_ends_array->get(line - 1))->value() + 1;
-    return Smi::FromInt(pos);
-  }
+  return Smi::FromInt(ScriptLinePosition(script_handle, line));
 }
 
 // TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptLineEndPosition) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_CHECKED(JSValue, script, 0);
   CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
 
   CHECK(script->value()->IsScript());
   Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
 
+  if (script_handle->type() == Script::TYPE_WASM) {
+    // Return zero for now; this function will disappear soon anyway.
+    return Smi::FromInt(0);
+  }
+
   Script::InitLineEnds(script_handle);
 
   FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
@@ -1679,6 +1635,21 @@
 
 namespace {
 
+int ScriptLinePositionWithOffset(Handle<Script> script, int line, int offset) {
+  if (line < 0 || offset < 0) return -1;
+
+  if (line == 0 || offset == 0)
+    return ScriptLinePosition(script, line) + offset;
+
+  Script::PositionInfo info;
+  if (!Script::GetPositionInfo(script, offset, &info, Script::NO_OFFSET)) {
+    return -1;
+  }
+
+  const int total_line = info.line + line;
+  return ScriptLinePosition(script, total_line);
+}
+
 Handle<Object> ScriptLocationFromLine(Isolate* isolate, Handle<Script> script,
                                       Handle<Object> opt_line,
                                       Handle<Object> opt_column,
@@ -1686,51 +1657,24 @@
   // Line and column are possibly undefined and we need to handle these cases,
   // additionally subtracting corresponding offsets.
 
-  int32_t line;
-  if (opt_line->IsNull(isolate) || opt_line->IsUndefined(isolate)) {
-    line = 0;
-  } else {
+  int32_t line = 0;
+  if (!opt_line->IsNullOrUndefined(isolate)) {
     CHECK(opt_line->IsNumber());
     line = NumberToInt32(*opt_line) - script->line_offset();
   }
 
-  int32_t column;
-  if (opt_column->IsNull(isolate) || opt_column->IsUndefined(isolate)) {
-    column = 0;
-  } else {
+  int32_t column = 0;
+  if (!opt_column->IsNullOrUndefined(isolate)) {
     CHECK(opt_column->IsNumber());
     column = NumberToInt32(*opt_column);
     if (line == 0) column -= script->column_offset();
   }
 
-  if (line < 0 || column < 0 || offset < 0) {
-    return isolate->factory()->null_value();
-  }
+  int line_position = ScriptLinePositionWithOffset(script, line, offset);
+  if (line_position < 0 || column < 0) return isolate->factory()->null_value();
 
-  Script::InitLineEnds(script);
-
-  FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
-  const int line_count = line_ends_array->length();
-
-  int position;
-  if (line == 0) {
-    position = offset + column;
-  } else {
-    Script::PositionInfo info;
-    if (!Script::GetPositionInfo(script, offset, &info, Script::NO_OFFSET) ||
-        info.line + line >= line_count) {
-      return isolate->factory()->null_value();
-    }
-
-    const int offset_line = info.line + line;
-    const int offset_line_position =
-        (offset_line == 0)
-            ? 0
-            : Smi::cast(line_ends_array->get(offset_line - 1))->value() + 1;
-    position = offset_line_position + column;
-  }
-
-  return GetJSPositionInfo(script, position, Script::NO_OFFSET, isolate);
+  return GetJSPositionInfo(script, line_position + column, Script::NO_OFFSET,
+                           isolate);
 }
 
 // Slow traversal over all scripts on the heap.
@@ -1760,7 +1704,7 @@
 // TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSValue, script, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
@@ -1776,7 +1720,7 @@
 // TODO(5530): Rename once conflicting function has been deleted.
 RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine2) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_NUMBER_CHECKED(int32_t, scriptid, Int32, args[0]);
   CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
@@ -1791,7 +1735,7 @@
 // TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptPositionInfo) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_CHECKED(JSValue, script, 0);
   CONVERT_NUMBER_CHECKED(int32_t, position, Int32, args[1]);
   CONVERT_BOOLEAN_ARG_CHECKED(with_offset, 2);
@@ -1804,18 +1748,39 @@
   return *GetJSPositionInfo(script_handle, position, offset_flag, isolate);
 }
 
+// TODO(5530): Rename once conflicting function has been deleted.
+RUNTIME_FUNCTION(Runtime_ScriptPositionInfo2) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  CONVERT_NUMBER_CHECKED(int32_t, scriptid, Int32, args[0]);
+  CONVERT_NUMBER_CHECKED(int32_t, position, Int32, args[1]);
+  CONVERT_BOOLEAN_ARG_CHECKED(with_offset, 2);
+
+  Handle<Script> script;
+  CHECK(GetScriptById(isolate, scriptid, &script));
+
+  const Script::OffsetFlag offset_flag =
+      with_offset ? Script::WITH_OFFSET : Script::NO_OFFSET;
+  return *GetJSPositionInfo(script, position, offset_flag, isolate);
+}
+
 // Returns the given line as a string, or null if line is out of bounds.
 // The parameter line is expected to include the script's line offset.
 // TODO(5530): Remove once uses in debug.js are gone.
 RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_CHECKED(JSValue, script, 0);
   CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
 
   CHECK(script->value()->IsScript());
   Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
 
+  if (script_handle->type() == Script::TYPE_WASM) {
+    // Return null for now; this function will disappear soon anyway.
+    return isolate->heap()->null_value();
+  }
+
   Script::InitLineEnds(script_handle);
 
   FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
@@ -1837,14 +1802,19 @@
   return *str;
 }
 
-// Set one shot breakpoints for the callback function that is passed to a
-// built-in function such as Array.forEach to enable stepping into the callback,
-// if we are indeed stepping and the callback is subject to debugging.
-RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
+// On function call, depending on circumstances, prepare for stepping in,
+// or perform a side effect check.
+RUNTIME_FUNCTION(Runtime_DebugOnFunctionCall) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
-  isolate->debug()->PrepareStepIn(fun);
+  if (isolate->debug()->last_step_action() >= StepIn) {
+    isolate->debug()->PrepareStepIn(fun);
+  }
+  if (isolate->needs_side_effect_check() &&
+      !isolate->debug()->PerformSideEffectCheck(fun)) {
+    return isolate->heap()->exception();
+  }
   return isolate->heap()->undefined_value();
 }
 
@@ -1856,17 +1826,17 @@
   return isolate->heap()->undefined_value();
 }
 
-RUNTIME_FUNCTION(Runtime_DebugRecordAsyncFunction) {
+RUNTIME_FUNCTION(Runtime_DebugRecordGenerator) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
   CHECK(isolate->debug()->last_step_action() >= StepNext);
-  isolate->debug()->RecordAsyncFunction(generator);
+  isolate->debug()->RecordGenerator(generator);
   return isolate->heap()->undefined_value();
 }
 
 RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   HandleScope scope(isolate);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
   isolate->PushPromise(promise);
@@ -1875,39 +1845,109 @@
 
 
 RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   SealHandleScope shs(isolate);
   isolate->PopPromise();
   return isolate->heap()->undefined_value();
 }
 
-RUNTIME_FUNCTION(Runtime_DebugNextMicrotaskId) {
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionPromiseCreated) {
+  DCHECK_EQ(1, args.length());
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  return Smi::FromInt(isolate->GetNextDebugMicrotaskId());
-}
-
-RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) {
-  DCHECK(args.length() == 3);
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(String, type, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, id, 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
-  isolate->debug()->OnAsyncTaskEvent(type, id, name);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  isolate->PushPromise(promise);
+  int id = isolate->debug()->NextAsyncTaskId(promise);
+  Handle<Symbol> async_stack_id_symbol =
+      isolate->factory()->promise_async_stack_id_symbol();
+  JSObject::SetProperty(promise, async_stack_id_symbol,
+                        handle(Smi::FromInt(id), isolate), STRICT)
+      .Assert();
+  isolate->debug()->OnAsyncTaskEvent(debug::kDebugEnqueueAsyncFunction, id, 0);
   return isolate->heap()->undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_DebugPromiseReject) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSPromise, rejected_promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+
+  isolate->debug()->OnPromiseReject(rejected_promise, value);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugAsyncEventEnqueueRecurring) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+  CONVERT_SMI_ARG_CHECKED(status, 1);
+  if (isolate->debug()->is_active()) {
+    isolate->debug()->OnAsyncTaskEvent(
+        status == v8::Promise::kFulfilled ? debug::kDebugEnqueuePromiseResolve
+                                          : debug::kDebugEnqueuePromiseReject,
+        isolate->debug()->NextAsyncTaskId(promise), 0);
+  }
+  return isolate->heap()->undefined_value();
+}
 
 RUNTIME_FUNCTION(Runtime_DebugIsActive) {
   SealHandleScope shs(isolate);
   return Smi::FromInt(isolate->debug()->is_active());
 }
 
-
 RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
   UNIMPLEMENTED();
   return NULL;
 }
 
+RUNTIME_FUNCTION(Runtime_DebugCollectCoverage) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+  // Collect coverage data.
+  std::unique_ptr<Coverage> coverage(Coverage::Collect(isolate, false));
+  Factory* factory = isolate->factory();
+  // Turn the returned data structure into JavaScript.
+  // Create an array of scripts.
+  int num_scripts = static_cast<int>(coverage->size());
+  // Prepare property keys.
+  Handle<FixedArray> scripts_array = factory->NewFixedArray(num_scripts);
+  Handle<String> script_string = factory->NewStringFromStaticChars("script");
+  Handle<String> start_string = factory->NewStringFromStaticChars("start");
+  Handle<String> end_string = factory->NewStringFromStaticChars("end");
+  Handle<String> count_string = factory->NewStringFromStaticChars("count");
+  for (int i = 0; i < num_scripts; i++) {
+    const auto& script_data = coverage->at(i);
+    HandleScope inner_scope(isolate);
+    int num_functions = static_cast<int>(script_data.functions.size());
+    Handle<FixedArray> functions_array = factory->NewFixedArray(num_functions);
+    for (int j = 0; j < num_functions; j++) {
+      const auto& function_data = script_data.functions[j];
+      Handle<JSObject> range_obj = factory->NewJSObjectWithNullProto();
+      JSObject::AddProperty(range_obj, start_string,
+                            factory->NewNumberFromInt(function_data.start),
+                            NONE);
+      JSObject::AddProperty(range_obj, end_string,
+                            factory->NewNumberFromInt(function_data.end), NONE);
+      JSObject::AddProperty(range_obj, count_string,
+                            factory->NewNumberFromUint(function_data.count),
+                            NONE);
+      functions_array->set(j, *range_obj);
+    }
+    Handle<JSArray> script_obj =
+        factory->NewJSArrayWithElements(functions_array, FAST_ELEMENTS);
+    Handle<JSObject> wrapper = Script::GetWrapper(script_data.script);
+    JSObject::AddProperty(script_obj, script_string, wrapper, NONE);
+    scripts_array->set(i, *script_obj);
+  }
+  return *factory->NewJSArrayWithElements(scripts_array, FAST_ELEMENTS);
+}
+
+RUNTIME_FUNCTION(Runtime_DebugTogglePreciseCoverage) {
+  SealHandleScope shs(isolate);
+  CONVERT_BOOLEAN_ARG_CHECKED(enable, 0);
+  Coverage::TogglePrecise(isolate, enable);
+  return isolate->heap()->undefined_value();
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-error.cc b/src/runtime/runtime-error.cc
index 3a9b192..6ded550 100644
--- a/src/runtime/runtime-error.cc
+++ b/src/runtime/runtime-error.cc
@@ -15,7 +15,7 @@
 
 RUNTIME_FUNCTION(Runtime_ErrorToString) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, recv, 0);
   RETURN_RESULT_OR_FAILURE(isolate, ErrorUtils::ToString(isolate, recv));
 }
diff --git a/src/runtime/runtime-forin.cc b/src/runtime/runtime-forin.cc
index bd37cdc..9a7c539 100644
--- a/src/runtime/runtime-forin.cc
+++ b/src/runtime/runtime-forin.cc
@@ -160,22 +160,5 @@
                            HasEnumerableProperty(isolate, receiver, key));
 }
 
-
-RUNTIME_FUNCTION(Runtime_ForInNext) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(4, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, cache_array, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 2);
-  CONVERT_SMI_ARG_CHECKED(index, 3);
-  Handle<Object> key = handle(cache_array->get(index), isolate);
-  // Don't need filtering if expected map still matches that of the receiver.
-  if (receiver->map() == *cache_type) {
-    return *key;
-  }
-  RETURN_RESULT_OR_FAILURE(isolate,
-                           HasEnumerableProperty(isolate, receiver, key));
-}
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-function.cc b/src/runtime/runtime-function.cc
index a91ab28..ac8a430 100644
--- a/src/runtime/runtime-function.cc
+++ b/src/runtime/runtime-function.cc
@@ -17,7 +17,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionGetName) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
   if (function->IsJSBoundFunction()) {
@@ -32,7 +32,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionSetName) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
@@ -45,7 +45,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   CHECK(f->RemovePrototype());
@@ -99,7 +99,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   int pos = fun->shared()->start_position();
@@ -108,7 +108,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   FixedArray* array = fun->native_context()->embedder_data();
@@ -117,7 +117,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   CONVERT_ARG_CHECKED(String, name, 1);
@@ -128,7 +128,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   CONVERT_SMI_ARG_CHECKED(length, 1);
@@ -140,7 +140,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
@@ -153,7 +153,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return isolate->heap()->ToBoolean(f->shared()->IsApiFunction());
@@ -162,7 +162,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetCode) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, source, 1);
@@ -190,7 +190,6 @@
   target_shared->set_scope_info(source_shared->scope_info());
   target_shared->set_outer_scope_info(source_shared->outer_scope_info());
   target_shared->set_length(source_shared->length());
-  target_shared->set_num_literals(source_shared->num_literals());
   target_shared->set_feedback_metadata(source_shared->feedback_metadata());
   target_shared->set_internal_formal_parameter_count(
       source_shared->internal_formal_parameter_count());
@@ -203,8 +202,14 @@
       source_shared->opt_count_and_bailout_reason());
   target_shared->set_native(was_native);
   target_shared->set_profiler_ticks(source_shared->profiler_ticks());
-  SharedFunctionInfo::SetScript(
-      target_shared, Handle<Object>(source_shared->script(), isolate));
+  target_shared->set_function_literal_id(source_shared->function_literal_id());
+
+  Handle<Object> source_script(source_shared->script(), isolate);
+  if (source_script->IsScript()) {
+    SharedFunctionInfo::SetScript(source_shared,
+                                  isolate->factory()->undefined_value());
+  }
+  SharedFunctionInfo::SetScript(target_shared, source_script);
 
   // Set the code of the target function.
   target->ReplaceCode(source_shared->code());
@@ -254,10 +259,10 @@
 RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
   SealHandleScope shs(isolate);
   DCHECK_EQ(1, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  CONVERT_ARG_CHECKED(Object, object, 0);
 
   if (object->IsJSFunction()) {
-    JSFunction* func = JSFunction::cast(*object);
+    JSFunction* func = JSFunction::cast(object);
     func->shared()->set_force_inline(true);
   }
   return isolate->heap()->undefined_value();
@@ -272,7 +277,7 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
   ScopedVector<Handle<Object>> argv(argc);
   for (int i = 0; i < argc; ++i) {
-    argv[i] = args.at<Object>(2 + i);
+    argv[i] = args.at(2 + i);
   }
   RETURN_RESULT_OR_FAILURE(
       isolate, Execution::Call(isolate, target, receiver, argc, argv.start()));
@@ -282,7 +287,7 @@
 // ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
 RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
   return *Object::ConvertReceiver(isolate, receiver).ToHandleChecked();
 }
diff --git a/src/runtime/runtime-futex.cc b/src/runtime/runtime-futex.cc
index a93bb23..b6582ff 100644
--- a/src/runtime/runtime-futex.cc
+++ b/src/runtime/runtime-futex.cc
@@ -19,7 +19,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsWait) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CONVERT_INT32_ARG_CHECKED(value, 2);
@@ -29,6 +29,11 @@
   CHECK_EQ(sta->type(), kExternalInt32Array);
   CHECK(timeout == V8_INFINITY || !std::isnan(timeout));
 
+  if (!isolate->allow_atomics_wait()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed));
+  }
+
   Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();
   size_t addr = (index << 2) + NumberToSize(sta->byte_offset());
 
@@ -37,10 +42,10 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsWake) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
-  CONVERT_INT32_ARG_CHECKED(count, 2);
+  CONVERT_UINT32_ARG_CHECKED(count, 2);
   CHECK(sta->GetBuffer()->is_shared());
   CHECK_LT(index, NumberToSize(sta->length()));
   CHECK_EQ(sta->type(), kExternalInt32Array);
@@ -53,7 +58,7 @@
 
 RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
   CONVERT_SIZE_ARG_CHECKED(index, 1);
   CHECK(sta->GetBuffer()->is_shared());
@@ -65,5 +70,14 @@
 
   return FutexEmulation::NumWaitersForTesting(isolate, array_buffer, addr);
 }
+
+RUNTIME_FUNCTION(Runtime_SetAllowAtomicsWait) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_BOOLEAN_ARG_CHECKED(set, 0);
+
+  isolate->set_allow_atomics_wait(set);
+  return isolate->heap()->undefined_value();
+}
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-generator.cc b/src/runtime/runtime-generator.cc
index bb63a3d..9648673 100644
--- a/src/runtime/runtime-generator.cc
+++ b/src/runtime/runtime-generator.cc
@@ -5,7 +5,6 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
-#include "src/debug/debug.h"
 #include "src/factory.h"
 #include "src/frames-inl.h"
 #include "src/objects-inl.h"
@@ -15,76 +14,30 @@
 
 RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
   CHECK(IsResumableFunction(function->shared()->kind()));
 
-  Handle<FixedArray> operand_stack;
-  if (function->shared()->HasBytecodeArray()) {
-    // New-style generators.
-    DCHECK(!function->shared()->HasBaselineCode());
-    int size = function->shared()->bytecode_array()->register_count();
-    operand_stack = isolate->factory()->NewFixedArray(size);
-  } else {
-    // Old-style generators.
-    DCHECK(function->shared()->HasBaselineCode());
-    operand_stack = isolate->factory()->empty_fixed_array();
-  }
+  // Underlying function needs to have bytecode available.
+  DCHECK(function->shared()->HasBytecodeArray());
+  DCHECK(!function->shared()->HasBaselineCode());
+  int size = function->shared()->bytecode_array()->register_count();
+  Handle<FixedArray> register_file = isolate->factory()->NewFixedArray(size);
 
   Handle<JSGeneratorObject> generator =
       isolate->factory()->NewJSGeneratorObject(function);
   generator->set_function(*function);
   generator->set_context(isolate->context());
   generator->set_receiver(*receiver);
-  generator->set_operand_stack(*operand_stack);
+  generator->set_register_file(*register_file);
   generator->set_continuation(JSGeneratorObject::kGeneratorExecuting);
   return *generator;
 }
 
-RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
-  HandleScope handle_scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
-
-  JavaScriptFrameIterator stack_iterator(isolate);
-  JavaScriptFrame* frame = stack_iterator.frame();
-  CHECK(IsResumableFunction(frame->function()->shared()->kind()));
-  DCHECK_EQ(frame->function(), generator_object->function());
-  DCHECK(frame->function()->shared()->is_compiled());
-  DCHECK(!frame->function()->IsOptimized());
-
-  isolate->debug()->RecordAsyncFunction(generator_object);
-
-  // The caller should have saved the context and continuation already.
-  DCHECK_EQ(generator_object->context(), Context::cast(frame->context()));
-  DCHECK_LT(0, generator_object->continuation());
-
-  // We expect there to be at least two values on the operand stack: the return
-  // value of the yield expression, and the arguments to this runtime call.
-  // Neither of those should be saved.
-  int operands_count = frame->ComputeOperandsCount();
-  DCHECK_GE(operands_count, 1 + args.length());
-  operands_count -= 1 + args.length();
-
-  if (operands_count == 0) {
-    // Although it's semantically harmless to call this function with an
-    // operands_count of zero, it is also unnecessary.
-    DCHECK_EQ(generator_object->operand_stack(),
-              isolate->heap()->empty_fixed_array());
-  } else {
-    Handle<FixedArray> operand_stack =
-        isolate->factory()->NewFixedArray(operands_count);
-    frame->SaveOperandStack(*operand_stack);
-    generator_object->set_operand_stack(*operand_stack);
-  }
-
-  return isolate->heap()->undefined_value();
-}
-
 RUNTIME_FUNCTION(Runtime_GeneratorClose) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
 
   generator->set_continuation(JSGeneratorObject::kGeneratorClosed);
@@ -94,7 +47,7 @@
 
 RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
 
   return generator->function();
@@ -102,15 +55,23 @@
 
 RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
 
   return generator->receiver();
 }
 
+RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+  return generator->context();
+}
+
 RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
 
   return generator->input_or_debug_pos();
@@ -118,7 +79,7 @@
 
 RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
 
   return Smi::FromInt(generator->resume_mode());
@@ -126,7 +87,7 @@
 
 RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
 
   return Smi::FromInt(generator->continuation());
@@ -134,7 +95,7 @@
 
 RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
 
   if (!generator->is_suspended()) return isolate->heap()->undefined_value();
diff --git a/src/runtime/runtime-i18n.cc b/src/runtime/runtime-i18n.cc
index aeef25e..e89175a 100644
--- a/src/runtime/runtime-i18n.cc
+++ b/src/runtime/runtime-i18n.cc
@@ -8,13 +8,15 @@
 
 #include <memory>
 
-#include "src/api.h"
 #include "src/api-natives.h"
+#include "src/api.h"
 #include "src/arguments.h"
 #include "src/factory.h"
 #include "src/i18n.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
+#include "src/string-case.h"
+#include "src/utils.h"
 
 #include "unicode/brkiter.h"
 #include "unicode/calendar.h"
@@ -71,7 +73,7 @@
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
 
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0);
 
   v8::String::Utf8Value locale_id(v8::Utils::ToLocal(locale_id_str));
@@ -108,7 +110,7 @@
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
 
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
 
   const icu::Locale* available_locales = NULL;
@@ -153,7 +155,7 @@
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
 
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
 
   icu::Locale default_locale;
 
@@ -174,7 +176,7 @@
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
 
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSArray, input, 0);
 
@@ -258,7 +260,7 @@
 RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
 
@@ -274,7 +276,7 @@
 RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, expected_type, 1);
@@ -292,63 +294,33 @@
 RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, impl, 2);
 
   Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
   JSObject::SetProperty(input, marker, type, STRICT).Assert();
 
-  marker = isolate->factory()->intl_impl_object_symbol();
-  JSObject::SetProperty(input, marker, impl, STRICT).Assert();
-
   return isolate->heap()->undefined_value();
 }
 
 
-RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) {
-  HandleScope scope(isolate);
-
-  DCHECK(args.length() == 1);
-
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
-
-  if (!input->IsJSObject()) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate, NewTypeError(MessageTemplate::kNotIntlObject, input));
-  }
-
-  Handle<JSObject> obj = Handle<JSObject>::cast(input);
-
-  Handle<Symbol> marker = isolate->factory()->intl_impl_object_symbol();
-
-  Handle<Object> impl = JSReceiver::GetDataProperty(obj, marker);
-  if (!impl->IsJSObject()) {
-    THROW_NEW_ERROR_RETURN_FAILURE(
-        isolate, NewTypeError(MessageTemplate::kNotIntlObject, obj));
-  }
-  return *impl;
-}
-
-
 RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
 
-  Handle<ObjectTemplateInfo> date_format_template = I18N::GetTemplate(isolate);
+  Handle<JSFunction> constructor(
+      isolate->native_context()->intl_date_time_format_function());
 
-  // Create an empty object wrapper.
   Handle<JSObject> local_object;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, local_object,
-      ApiNatives::InstantiateObject(date_format_template));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
+                                     JSObject::New(constructor, constructor));
 
   // Set date time formatter as internal field of the resulting JS object.
   icu::SimpleDateFormat* date_format =
@@ -358,11 +330,6 @@
 
   local_object->SetInternalField(0, reinterpret_cast<Smi*>(date_format));
 
-  Factory* factory = isolate->factory();
-  Handle<String> key = factory->NewStringFromStaticChars("dateFormat");
-  Handle<String> value = factory->NewStringFromStaticChars("valid");
-  JSObject::AddProperty(local_object, key, value, NONE);
-
   // Make object handle weak so we can delete the data format once GC kicks in.
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
   GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
@@ -375,7 +342,7 @@
 RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
@@ -385,7 +352,7 @@
 
   icu::SimpleDateFormat* date_format =
       DateFormat::UnpackDateFormat(isolate, date_format_holder);
-  if (!date_format) return isolate->ThrowIllegalOperation();
+  CHECK_NOT_NULL(date_format);
 
   icu::UnicodeString result;
   date_format->format(value->Number(), result);
@@ -476,7 +443,7 @@
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
 
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
@@ -486,7 +453,7 @@
 
   icu::SimpleDateFormat* date_format =
       DateFormat::UnpackDateFormat(isolate, date_format_holder);
-  if (!date_format) return isolate->ThrowIllegalOperation();
+  CHECK_NOT_NULL(date_format);
 
   icu::UnicodeString formatted;
   icu::FieldPositionIterator fp_iter;
@@ -529,47 +496,21 @@
   return *result;
 }
 
-RUNTIME_FUNCTION(Runtime_InternalDateParse) {
-  HandleScope scope(isolate);
-
-  DCHECK(args.length() == 2);
-
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, date_string, 1);
-
-  v8::String::Utf8Value utf8_date(v8::Utils::ToLocal(date_string));
-  icu::UnicodeString u_date(icu::UnicodeString::fromUTF8(*utf8_date));
-  icu::SimpleDateFormat* date_format =
-      DateFormat::UnpackDateFormat(isolate, date_format_holder);
-  if (!date_format) return isolate->ThrowIllegalOperation();
-
-  UErrorCode status = U_ZERO_ERROR;
-  UDate date = date_format->parse(u_date, status);
-  if (U_FAILURE(status)) return isolate->heap()->undefined_value();
-
-  RETURN_RESULT_OR_FAILURE(
-      isolate, JSDate::New(isolate->date_function(), isolate->date_function(),
-                           static_cast<double>(date)));
-}
-
-
 RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
 
-  Handle<ObjectTemplateInfo> number_format_template =
-      I18N::GetTemplate(isolate);
+  Handle<JSFunction> constructor(
+      isolate->native_context()->intl_number_format_function());
 
-  // Create an empty object wrapper.
   Handle<JSObject> local_object;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, local_object,
-      ApiNatives::InstantiateObject(number_format_template));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
+                                     JSObject::New(constructor, constructor));
 
   // Set number formatter as internal field of the resulting JS object.
   icu::DecimalFormat* number_format =
@@ -579,11 +520,6 @@
 
   local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format));
 
-  Factory* factory = isolate->factory();
-  Handle<String> key = factory->NewStringFromStaticChars("numberFormat");
-  Handle<String> value = factory->NewStringFromStaticChars("valid");
-  JSObject::AddProperty(local_object, key, value, NONE);
-
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
   GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
                           NumberFormat::DeleteNumberFormat,
@@ -595,7 +531,7 @@
 RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
@@ -605,7 +541,7 @@
 
   icu::DecimalFormat* number_format =
       NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
-  if (!number_format) return isolate->ThrowIllegalOperation();
+  CHECK_NOT_NULL(number_format);
 
   icu::UnicodeString result;
   number_format->format(value->Number(), result);
@@ -617,62 +553,21 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_InternalNumberParse) {
-  HandleScope scope(isolate);
-
-  DCHECK(args.length() == 2);
-
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
-
-  isolate->CountUsage(v8::Isolate::UseCounterFeature::kIntlV8Parse);
-
-  v8::String::Utf8Value utf8_number(v8::Utils::ToLocal(number_string));
-  icu::UnicodeString u_number(icu::UnicodeString::fromUTF8(*utf8_number));
-  icu::DecimalFormat* number_format =
-      NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
-  if (!number_format) return isolate->ThrowIllegalOperation();
-
-  UErrorCode status = U_ZERO_ERROR;
-  icu::Formattable result;
-  // ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
-  // to be part of Chrome.
-  // TODO(cira): Include currency parsing code using parseCurrency call.
-  // We need to check if the formatter parses all currencies or only the
-  // one it was constructed with (it will impact the API - how to return ISO
-  // code and the value).
-  number_format->parse(u_number, result, status);
-  if (U_FAILURE(status)) return isolate->heap()->undefined_value();
-
-  switch (result.getType()) {
-    case icu::Formattable::kDouble:
-      return *isolate->factory()->NewNumber(result.getDouble());
-    case icu::Formattable::kLong:
-      return *isolate->factory()->NewNumberFromInt(result.getLong());
-    case icu::Formattable::kInt64:
-      return *isolate->factory()->NewNumber(
-          static_cast<double>(result.getInt64()));
-    default:
-      return isolate->heap()->undefined_value();
-  }
-}
-
-
 RUNTIME_FUNCTION(Runtime_CreateCollator) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
 
-  Handle<ObjectTemplateInfo> collator_template = I18N::GetTemplate(isolate);
+  Handle<JSFunction> constructor(
+      isolate->native_context()->intl_collator_function());
 
-  // Create an empty object wrapper.
   Handle<JSObject> local_object;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, local_object, ApiNatives::InstantiateObject(collator_template));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
+                                     JSObject::New(constructor, constructor));
 
   // Set collator as internal field of the resulting JS object.
   icu::Collator* collator =
@@ -682,11 +577,6 @@
 
   local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator));
 
-  Factory* factory = isolate->factory();
-  Handle<String> key = factory->NewStringFromStaticChars("collator");
-  Handle<String> value = factory->NewStringFromStaticChars("valid");
-  JSObject::AddProperty(local_object, key, value, NONE);
-
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
   GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
                           Collator::DeleteCollator,
@@ -698,14 +588,14 @@
 RUNTIME_FUNCTION(Runtime_InternalCompare) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, string1, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, string2, 2);
 
   icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
-  if (!collator) return isolate->ThrowIllegalOperation();
+  CHECK_NOT_NULL(collator);
 
   string1 = String::Flatten(string1);
   string2 = String::Flatten(string2);
@@ -720,10 +610,11 @@
     String::FlatContent flat2 = string2->GetFlatContent();
     std::unique_ptr<uc16[]> sap1;
     std::unique_ptr<uc16[]> sap2;
-    const UChar* string_val1 = GetUCharBufferFromFlat(flat1, &sap1, length1);
-    const UChar* string_val2 = GetUCharBufferFromFlat(flat2, &sap2, length2);
-    result =
-        collator->compare(string_val1, length1, string_val2, length2, status);
+    icu::UnicodeString string_val1(
+        FALSE, GetUCharBufferFromFlat(flat1, &sap1, length1), length1);
+    icu::UnicodeString string_val2(
+        FALSE, GetUCharBufferFromFlat(flat2, &sap2, length2), length2);
+    result = collator->compare(string_val1, string_val2, status);
   }
   if (U_FAILURE(status)) return isolate->ThrowIllegalOperation();
 
@@ -743,7 +634,7 @@
       {"nfkc", UNORM2_DECOMPOSE},
   };
 
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]);
@@ -792,23 +683,21 @@
 RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
 
-  Handle<ObjectTemplateInfo> break_iterator_template =
-      I18N::GetTemplate2(isolate);
+  Handle<JSFunction> constructor(
+      isolate->native_context()->intl_v8_break_iterator_function());
 
-  // Create an empty object wrapper.
   Handle<JSObject> local_object;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, local_object,
-      ApiNatives::InstantiateObject(break_iterator_template));
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
+                                     JSObject::New(constructor, constructor));
 
   // Set break iterator as internal field of the resulting JS object.
-  icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator(
+  icu::BreakIterator* break_iterator = V8BreakIterator::InitializeBreakIterator(
       isolate, locale, options, resolved);
 
   if (!break_iterator) return isolate->ThrowIllegalOperation();
@@ -817,16 +706,11 @@
   // Make sure that the pointer to adopted text is NULL.
   local_object->SetInternalField(1, static_cast<Smi*>(nullptr));
 
-  Factory* factory = isolate->factory();
-  Handle<String> key = factory->NewStringFromStaticChars("breakIterator");
-  Handle<String> value = factory->NewStringFromStaticChars("valid");
-  JSObject::AddProperty(local_object, key, value, NONE);
-
   // Make object handle weak so we can delete the break iterator once GC kicks
   // in.
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
   GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
-                          BreakIterator::DeleteBreakIterator,
+                          V8BreakIterator::DeleteBreakIterator,
                           WeakCallbackType::kInternalFields);
   return *local_object;
 }
@@ -835,14 +719,14 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, text, 1);
 
   icu::BreakIterator* break_iterator =
-      BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
-  if (!break_iterator) return isolate->ThrowIllegalOperation();
+      V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+  CHECK_NOT_NULL(break_iterator);
 
   icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
       break_iterator_holder->GetInternalField(1));
@@ -866,13 +750,13 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
 
   icu::BreakIterator* break_iterator =
-      BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
-  if (!break_iterator) return isolate->ThrowIllegalOperation();
+      V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+  CHECK_NOT_NULL(break_iterator);
 
   return *isolate->factory()->NewNumberFromInt(break_iterator->first());
 }
@@ -881,13 +765,13 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
 
   icu::BreakIterator* break_iterator =
-      BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
-  if (!break_iterator) return isolate->ThrowIllegalOperation();
+      V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+  CHECK_NOT_NULL(break_iterator);
 
   return *isolate->factory()->NewNumberFromInt(break_iterator->next());
 }
@@ -896,13 +780,13 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
 
   icu::BreakIterator* break_iterator =
-      BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
-  if (!break_iterator) return isolate->ThrowIllegalOperation();
+      V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+  CHECK_NOT_NULL(break_iterator);
 
   return *isolate->factory()->NewNumberFromInt(break_iterator->current());
 }
@@ -911,13 +795,13 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
   HandleScope scope(isolate);
 
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
 
   icu::BreakIterator* break_iterator =
-      BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
-  if (!break_iterator) return isolate->ThrowIllegalOperation();
+      V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+  CHECK_NOT_NULL(break_iterator);
 
   // TODO(cira): Remove cast once ICU fixes base BreakIterator class.
   icu::RuleBasedBreakIterator* rule_based_iterator =
@@ -949,6 +833,8 @@
   Handle<SeqTwoByteString> result;
   std::unique_ptr<uc16[]> sap;
 
+  if (dest_length == 0) return isolate->heap()->empty_string();
+
   // This is not a real loop. It'll be executed only once (no overflow) or
   // twice (overflow).
   for (int i = 0; i < 2; ++i) {
@@ -957,6 +843,7 @@
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, result, isolate->factory()->NewRawTwoByteString(dest_length));
     DisallowHeapAllocation no_gc;
+    DCHECK(s->IsFlat());
     String::FlatContent flat = s->GetFlatContent();
     const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
     status = U_ZERO_ERROR;
@@ -1042,15 +929,14 @@
 const uint16_t sharp_s = 0xDF;
 
 template <typename Char>
-bool ToUpperOneByte(const Vector<const Char>& src,
-                    Handle<SeqOneByteString> result, int* sharp_s_count) {
+bool ToUpperOneByte(const Vector<const Char>& src, uint8_t* dest,
+                    int* sharp_s_count) {
   // Still pretty-fast path for the input with non-ASCII Latin-1 characters.
 
   // There are two special cases.
   //  1. U+00B5 and U+00FF are mapped to a character beyond U+00FF.
   //  2. Lower case sharp-S converts to "SS" (two characters)
   *sharp_s_count = 0;
-  int32_t index = 0;
   for (auto it = src.begin(); it != src.end(); ++it) {
     uint16_t ch = static_cast<uint16_t>(*it);
     if (V8_UNLIKELY(ch == sharp_s)) {
@@ -1062,7 +948,7 @@
       // need to take the 16-bit path.
       return false;
     }
-    result->SeqOneByteStringSet(index++, ToLatin1Upper(ch));
+    *dest++ = ToLatin1Upper(ch);
   }
 
   return true;
@@ -1083,105 +969,112 @@
   }
 }
 
-}  // namespace
-
-RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(args.length(), 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
-
-  int length = s->length();
-  s = String::Flatten(s);
-  // First scan the string for uppercase and non-ASCII characters:
-  if (s->HasOnlyOneByteChars()) {
-    int first_index_to_lower = length;
-    for (int index = 0; index < length; ++index) {
-      // Blink specializes this path for one-byte strings, so it
-      // does not need to do a generic get, but can do the equivalent
-      // of SeqOneByteStringGet.
-      uint16_t ch = s->Get(index);
-      if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
-        first_index_to_lower = index;
-        break;
-      }
+inline int FindFirstUpperOrNonAscii(Handle<String> s, int length) {
+  for (int index = 0; index < length; ++index) {
+    uint16_t ch = s->Get(index);
+    if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
+      return index;
     }
-
-    // Nothing to do if the string is all ASCII with no uppercase.
-    if (first_index_to_lower == length) return *s;
-
-    // We depend here on the invariant that the length of a Latin1
-    // string is invariant under ToLowerCase, and the result always
-    // fits in the Latin1 range in the *root locale*. It does not hold
-    // for ToUpperCase even in the root locale.
-    Handle<SeqOneByteString> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, isolate->factory()->NewRawOneByteString(length));
-
-    DisallowHeapAllocation no_gc;
-    String::FlatContent flat = s->GetFlatContent();
-    if (flat.IsOneByte()) {
-      const uint8_t* src = flat.ToOneByteVector().start();
-      CopyChars(result->GetChars(), src,
-                static_cast<size_t>(first_index_to_lower));
-      for (int index = first_index_to_lower; index < length; ++index) {
-        uint16_t ch = static_cast<uint16_t>(src[index]);
-        result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
-      }
-    } else {
-      const uint16_t* src = flat.ToUC16Vector().start();
-      CopyChars(result->GetChars(), src,
-                static_cast<size_t>(first_index_to_lower));
-      for (int index = first_index_to_lower; index < length; ++index) {
-        uint16_t ch = src[index];
-        result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
-      }
-    }
-
-    return *result;
   }
-
-  // Blink had an additional case here for ASCII 2-byte strings, but
-  // that is subsumed by the above code (assuming there isn't a false
-  // negative for HasOnlyOneByteChars).
-
-  // Do a slower implementation for cases that include non-ASCII characters.
-  return LocaleConvertCase(s, isolate, false, "");
+  return length;
 }
 
-RUNTIME_FUNCTION(Runtime_StringToUpperCaseI18N) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(args.length(), 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate) {
+  if (!s->HasOnlyOneByteChars()) {
+    // Use a slower implementation for strings with characters beyond U+00FF.
+    return LocaleConvertCase(s, isolate, false, "");
+  }
 
-  // This function could be optimized for no-op cases the way lowercase
-  // counterpart is, but in empirical testing, few actual calls to upper()
-  // are no-ops. So, it wouldn't be worth the extra time for pre-scanning.
+  int length = s->length();
 
+  // We depend here on the invariant that the length of a Latin1
+  // string is invariant under ToLowerCase, and the result always
+  // fits in the Latin1 range in the *root locale*. It does not hold
+  // for ToUpperCase even in the root locale.
+
+  // Scan the string for uppercase and non-ASCII characters for strings
+  // shorter than a machine-word without any memory allocation overhead.
+  // TODO(jshin): Apply this to a longer input by breaking FastAsciiConvert()
+  // to two parts, one for scanning the prefix with no change and the other for
+  // handling ASCII-only characters.
+  int index_to_first_unprocessed = length;
+  const bool is_short = length < static_cast<int>(sizeof(uintptr_t));
+  if (is_short) {
+    index_to_first_unprocessed = FindFirstUpperOrNonAscii(s, length);
+    // Nothing to do if the string is all ASCII with no uppercase.
+    if (index_to_first_unprocessed == length) return *s;
+  }
+
+  Handle<SeqOneByteString> result =
+      isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+
+  DisallowHeapAllocation no_gc;
+  DCHECK(s->IsFlat());
+  String::FlatContent flat = s->GetFlatContent();
+  uint8_t* dest = result->GetChars();
+  if (flat.IsOneByte()) {
+    const uint8_t* src = flat.ToOneByteVector().start();
+    bool has_changed_character = false;
+    index_to_first_unprocessed = FastAsciiConvert<true>(
+        reinterpret_cast<char*>(dest), reinterpret_cast<const char*>(src),
+        length, &has_changed_character);
+    // If not ASCII, we keep the result up to index_to_first_unprocessed and
+    // process the rest.
+    if (index_to_first_unprocessed == length)
+      return has_changed_character ? *result : *s;
+
+    for (int index = index_to_first_unprocessed; index < length; ++index) {
+      dest[index] = ToLatin1Lower(static_cast<uint16_t>(src[index]));
+    }
+  } else {
+    if (index_to_first_unprocessed == length) {
+      DCHECK(!is_short);
+      index_to_first_unprocessed = FindFirstUpperOrNonAscii(s, length);
+    }
+    // Nothing to do if the string is all ASCII with no uppercase.
+    if (index_to_first_unprocessed == length) return *s;
+    const uint16_t* src = flat.ToUC16Vector().start();
+    CopyChars(dest, src, index_to_first_unprocessed);
+    for (int index = index_to_first_unprocessed; index < length; ++index) {
+      dest[index] = ToLatin1Lower(static_cast<uint16_t>(src[index]));
+    }
+  }
+
+  return *result;
+}
+
+MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate) {
   int32_t length = s->length();
-  s = String::Flatten(s);
+  if (s->HasOnlyOneByteChars() && length > 0) {
+    Handle<SeqOneByteString> result =
+        isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
 
-  if (s->HasOnlyOneByteChars()) {
-    Handle<SeqOneByteString> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, isolate->factory()->NewRawOneByteString(length));
-
+    DCHECK(s->IsFlat());
     int sharp_s_count;
     bool is_result_single_byte;
     {
       DisallowHeapAllocation no_gc;
       String::FlatContent flat = s->GetFlatContent();
-      // If it was ok to slow down ASCII-only input slightly, ToUpperFastASCII
-      // could be removed  because ToUpperOneByte is pretty fast now (it
-      // does not call ICU API any more.).
+      uint8_t* dest = result->GetChars();
       if (flat.IsOneByte()) {
         Vector<const uint8_t> src = flat.ToOneByteVector();
-        if (ToUpperFastASCII(src, result)) return *result;
-        is_result_single_byte = ToUpperOneByte(src, result, &sharp_s_count);
+        bool has_changed_character = false;
+        int index_to_first_unprocessed =
+            FastAsciiConvert<false>(reinterpret_cast<char*>(result->GetChars()),
+                                    reinterpret_cast<const char*>(src.start()),
+                                    length, &has_changed_character);
+        if (index_to_first_unprocessed == length)
+          return has_changed_character ? *result : *s;
+        // If not ASCII, we keep the result up to index_to_first_unprocessed and
+        // process the rest.
+        is_result_single_byte =
+            ToUpperOneByte(src.SubVector(index_to_first_unprocessed, length),
+                           dest + index_to_first_unprocessed, &sharp_s_count);
       } else {
         DCHECK(flat.IsTwoByte());
         Vector<const uint16_t> src = flat.ToUC16Vector();
         if (ToUpperFastASCII(src, result)) return *result;
-        is_result_single_byte = ToUpperOneByte(src, result, &sharp_s_count);
+        is_result_single_byte = ToUpperOneByte(src, dest, &sharp_s_count);
       }
     }
 
@@ -1212,26 +1105,67 @@
   return LocaleConvertCase(s, isolate, true, "");
 }
 
+MUST_USE_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
+                                    Isolate* isolate) {
+  return is_upper ? ConvertToUpper(s, isolate) : ConvertToLower(s, isolate);
+}
+
+}  // namespace
+
+RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(args.length(), 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+  s = String::Flatten(s);
+  return ConvertToLower(s, isolate);
+}
+
+RUNTIME_FUNCTION(Runtime_StringToUpperCaseI18N) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(args.length(), 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+  s = String::Flatten(s);
+  return ConvertToUpper(s, isolate);
+}
+
 RUNTIME_FUNCTION(Runtime_StringLocaleConvertCase) {
   HandleScope scope(isolate);
   DCHECK_EQ(args.length(), 3);
   CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(is_upper, 1);
-  CONVERT_ARG_HANDLE_CHECKED(SeqOneByteString, lang, 2);
+  CONVERT_ARG_HANDLE_CHECKED(String, lang_arg, 2);
 
-  // All the languages requiring special handling ("az", "el", "lt", "tr")
-  // have a 2-letter language code.
-  DCHECK(lang->length() == 2);
-  uint8_t lang_str[3];
-  memcpy(lang_str, lang->GetChars(), 2);
-  lang_str[2] = 0;
+  // Primary language tag can be up to 8 characters long in theory.
+  // https://tools.ietf.org/html/bcp47#section-2.2.1
+  DCHECK(lang_arg->length() <= 8);
+  lang_arg = String::Flatten(lang_arg);
   s = String::Flatten(s);
+
+  // All the languages requiring special-handling have two-letter codes.
+  if (V8_UNLIKELY(lang_arg->length() > 2))
+    return ConvertCase(s, is_upper, isolate);
+
+  char c1, c2;
+  {
+    DisallowHeapAllocation no_gc;
+    String::FlatContent lang = lang_arg->GetFlatContent();
+    c1 = lang.Get(0);
+    c2 = lang.Get(1);
+  }
   // TODO(jshin): Consider adding a fast path for ASCII or Latin-1. The fastpath
   // in the root locale needs to be adjusted for az, lt and tr because even case
   // mapping of ASCII range characters are different in those locales.
-  // Greek (el) does not require any adjustment, though.
-  return LocaleConvertCase(s, isolate, is_upper,
-                           reinterpret_cast<const char*>(lang_str));
+  // Greek (el) does not require any adjustment.
+  if (V8_UNLIKELY(c1 == 't' && c2 == 'r'))
+    return LocaleConvertCase(s, isolate, is_upper, "tr");
+  if (V8_UNLIKELY(c1 == 'e' && c2 == 'l'))
+    return LocaleConvertCase(s, isolate, is_upper, "el");
+  if (V8_UNLIKELY(c1 == 'l' && c2 == 't'))
+    return LocaleConvertCase(s, isolate, is_upper, "lt");
+  if (V8_UNLIKELY(c1 == 'a' && c2 == 'z'))
+    return LocaleConvertCase(s, isolate, is_upper, "az");
+
+  return ConvertCase(s, is_upper, isolate);
 }
 
 RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc
index 621f335..8399509 100644
--- a/src/runtime/runtime-internal.cc
+++ b/src/runtime/runtime-internal.cc
@@ -9,13 +9,14 @@
 #include "src/arguments.h"
 #include "src/ast/prettyprinter.h"
 #include "src/bootstrapper.h"
+#include "src/builtins/builtins.h"
 #include "src/conversions.h"
 #include "src/debug/debug.h"
 #include "src/frames-inl.h"
 #include "src/isolate-inl.h"
 #include "src/messages.h"
 #include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parsing.h"
 #include "src/wasm/wasm-module.h"
 
 namespace v8 {
@@ -23,7 +24,7 @@
 
 RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   CHECK(isolate->bootstrapper()->IsActive());
   return isolate->heap()->undefined_value();
 }
@@ -31,7 +32,7 @@
 
 RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
   CHECK(isolate->bootstrapper()->IsActive());
   JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
@@ -42,22 +43,9 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_ExportExperimentalFromRuntime) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
-  CHECK(isolate->bootstrapper()->IsActive());
-  JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
-                                "ExportExperimentalFromRuntime");
-  Bootstrapper::ExportExperimentalFromRuntime(isolate, container);
-  JSObject::MigrateSlowToFast(container, 0, "ExportExperimentalFromRuntime");
-  return *container;
-}
-
-
 RUNTIME_FUNCTION(Runtime_InstallToContext) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
   CHECK(array->HasFastElements());
   CHECK(isolate->bootstrapper()->IsActive());
@@ -82,14 +70,14 @@
 
 RUNTIME_FUNCTION(Runtime_Throw) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   return isolate->Throw(args[0]);
 }
 
 
 RUNTIME_FUNCTION(Runtime_ReThrow) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   return isolate->ReThrow(args[0]);
 }
 
@@ -100,15 +88,22 @@
   return isolate->StackOverflow();
 }
 
+RUNTIME_FUNCTION(Runtime_ThrowSymbolAsyncIteratorInvalid) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kSymbolAsyncIteratorInvalid));
+}
+
 RUNTIME_FUNCTION(Runtime_ThrowTypeError) {
   HandleScope scope(isolate);
   DCHECK_LE(1, args.length());
   CONVERT_SMI_ARG_CHECKED(message_id_smi, 0);
 
   Handle<Object> undefined = isolate->factory()->undefined_value();
-  Handle<Object> arg0 = (args.length() > 1) ? args.at<Object>(1) : undefined;
-  Handle<Object> arg1 = (args.length() > 2) ? args.at<Object>(2) : undefined;
-  Handle<Object> arg2 = (args.length() > 3) ? args.at<Object>(3) : undefined;
+  Handle<Object> arg0 = (args.length() > 1) ? args.at(1) : undefined;
+  Handle<Object> arg1 = (args.length() > 2) ? args.at(2) : undefined;
+  Handle<Object> arg2 = (args.length() > 3) ? args.at(3) : undefined;
 
   MessageTemplate::Template message_id =
       static_cast<MessageTemplate::Template>(message_id_smi);
@@ -117,77 +112,23 @@
                                  NewTypeError(message_id, arg0, arg1, arg2));
 }
 
-RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_SMI_ARG_CHECKED(message_id, 0);
-  CONVERT_SMI_ARG_CHECKED(byte_offset, 1);
-  Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
-      static_cast<MessageTemplate::Template>(message_id));
-
-  // For wasm traps, the byte offset (a.k.a source position) can not be
-  // determined from relocation info, since the explicit checks for traps
-  // converge in one singe block which calls this runtime function.
-  // We hence pass the byte offset explicitely, and patch it into the top-most
-  // frame (a wasm frame) on the collected stack trace.
-  // TODO(wasm): This implementation is temporary, see bug #5007:
-  // https://bugs.chromium.org/p/v8/issues/detail?id=5007
-  Handle<JSObject> error = Handle<JSObject>::cast(error_obj);
-  Handle<Object> stack_trace_obj = JSReceiver::GetDataProperty(
-      error, isolate->factory()->stack_trace_symbol());
-  // Patch the stack trace (array of <receiver, function, code, position>).
-  if (stack_trace_obj->IsJSArray()) {
-    Handle<FrameArray> stack_elements(
-        FrameArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
-    DCHECK(stack_elements->Code(0)->kind() == AbstractCode::WASM_FUNCTION);
-    DCHECK(stack_elements->Offset(0)->value() >= 0);
-    stack_elements->SetOffset(0, Smi::FromInt(-1 - byte_offset));
-  }
-
-  // Patch the detailed stack trace (array of JSObjects with various
-  // properties).
-  Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
-      error, isolate->factory()->detailed_stack_trace_symbol());
-  if (detailed_stack_trace_obj->IsJSArray()) {
-    Handle<FixedArray> stack_elements(
-        FixedArray::cast(JSArray::cast(*detailed_stack_trace_obj)->elements()));
-    DCHECK_GE(stack_elements->length(), 1);
-    Handle<JSObject> top_frame(JSObject::cast(stack_elements->get(0)));
-    Handle<String> wasm_offset_key =
-        isolate->factory()->InternalizeOneByteString(
-            STATIC_CHAR_VECTOR("column"));
-    LookupIterator it(top_frame, wasm_offset_key, top_frame,
-                      LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
-    if (it.IsFound()) {
-      DCHECK(JSReceiver::GetDataProperty(&it)->IsSmi());
-      // Make column number 1-based here.
-      Maybe<bool> data_set = JSReceiver::SetDataProperty(
-          &it, handle(Smi::FromInt(byte_offset + 1), isolate));
-      DCHECK(data_set.IsJust() && data_set.FromJust() == true);
-      USE(data_set);
-    }
-  }
-
-  return isolate->Throw(*error_obj);
-}
-
 RUNTIME_FUNCTION(Runtime_UnwindAndFindExceptionHandler) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return isolate->UnwindAndFindHandler();
 }
 
 
 RUNTIME_FUNCTION(Runtime_PromoteScheduledException) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return isolate->PromoteScheduledException();
 }
 
 
 RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
   THROW_NEW_ERROR_RETURN_FAILURE(
       isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
@@ -196,7 +137,7 @@
 
 RUNTIME_FUNCTION(Runtime_NewTypeError) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_INT32_ARG_CHECKED(template_index, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
   auto message_template =
@@ -207,7 +148,7 @@
 
 RUNTIME_FUNCTION(Runtime_NewReferenceError) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_INT32_ARG_CHECKED(template_index, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
   auto message_template =
@@ -218,7 +159,7 @@
 
 RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_INT32_ARG_CHECKED(template_index, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
   auto message_template =
@@ -234,7 +175,7 @@
 
 RUNTIME_FUNCTION(Runtime_ThrowIllegalInvocation) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   THROW_NEW_ERROR_RETURN_FAILURE(
       isolate, NewTypeError(MessageTemplate::kIllegalInvocation));
 }
@@ -249,6 +190,14 @@
       NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, arg0, arg1));
 }
 
+RUNTIME_FUNCTION(Runtime_ThrowInvalidHint) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, hint, 0);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kInvalidHint, hint));
+}
+
 RUNTIME_FUNCTION(Runtime_ThrowInvalidStringLength) {
   HandleScope scope(isolate);
   THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
@@ -256,13 +205,40 @@
 
 RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
   THROW_NEW_ERROR_RETURN_FAILURE(
       isolate,
       NewTypeError(MessageTemplate::kIteratorResultNotAnObject, value));
 }
 
+RUNTIME_FUNCTION(Runtime_ThrowSymbolIteratorInvalid) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(0, args.length());
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowNonCallableInInstanceOfCheck) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kNonCallableInInstanceOfCheck));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowNonObjectInInstanceOfCheck) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kNonObjectInInstanceOfCheck));
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError(MessageTemplate::kNotConstructor, object));
+}
+
 RUNTIME_FUNCTION(Runtime_ThrowNotGeneric) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
@@ -290,7 +266,7 @@
 
 RUNTIME_FUNCTION(Runtime_StackGuard) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
 
   // First check if this is a real stack overflow.
   StackLimitCheck check(isolate);
@@ -304,14 +280,14 @@
 
 RUNTIME_FUNCTION(Runtime_Interrupt) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return isolate->stack_guard()->HandleInterrupts();
 }
 
 
 RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_SMI_ARG_CHECKED(size, 0);
   CHECK(IsAligned(size, kPointerSize));
   CHECK(size > 0);
@@ -322,14 +298,14 @@
 
 RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_SMI_ARG_CHECKED(size, 0);
   CONVERT_SMI_ARG_CHECKED(flags, 1);
   CHECK(IsAligned(size, kPointerSize));
   CHECK(size > 0);
-  CHECK(size <= kMaxRegularHeapObjectSize);
   bool double_align = AllocateDoubleAlignFlag::decode(flags);
   AllocationSpace space = AllocateTargetSpace::decode(flags);
+  CHECK(size <= kMaxRegularHeapObjectSize || space == LO_SPACE);
   return *isolate->factory()->NewFillerObject(size, double_align, space);
 }
 
@@ -337,6 +313,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_SMI_ARG_CHECKED(length, 0);
+  if (length == 0) return isolate->heap()->empty_string();
   Handle<SeqOneByteString> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result, isolate->factory()->NewRawOneByteString(length));
@@ -347,6 +324,7 @@
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_SMI_ARG_CHECKED(length, 0);
+  if (length == 0) return isolate->heap()->empty_string();
   Handle<SeqTwoByteString> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result, isolate->factory()->NewRawTwoByteString(length));
@@ -365,20 +343,19 @@
 bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
   JavaScriptFrameIterator it(isolate);
   if (!it.done()) {
-    JavaScriptFrame* frame = it.frame();
-    JSFunction* fun = frame->function();
-    Object* script = fun->shared()->script();
+    // Compute the location from the function and the relocation info of the
+    // baseline code. For optimized code this will use the deoptimization
+    // information to get canonical location information.
+    List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+    it.frame()->Summarize(&frames);
+    auto& summary = frames.last().AsJavaScript();
+    Handle<SharedFunctionInfo> shared(summary.function()->shared());
+    Handle<Object> script(shared->script(), isolate);
+    int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
     if (script->IsScript() &&
-        !(Script::cast(script)->source()->IsUndefined(isolate))) {
-      Handle<Script> casted_script(Script::cast(script), isolate);
-      // Compute the location from the function and the relocation info of the
-      // baseline code. For optimized code this will use the deoptimization
-      // information to get canonical location information.
-      List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-      it.frame()->Summarize(&frames);
-      FrameSummary& summary = frames.last();
-      int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
-      *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
+        !(Handle<Script>::cast(script)->source()->IsUndefined(isolate))) {
+      Handle<Script> casted_script = Handle<Script>::cast(script);
+      *target = MessageLocation(casted_script, pos, pos + 1, shared);
       return true;
     }
   }
@@ -389,13 +366,9 @@
 Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
   MessageLocation location;
   if (ComputeLocation(isolate, &location)) {
-    Zone zone(isolate->allocator(), ZONE_NAME);
-    std::unique_ptr<ParseInfo> info(
-        location.function()->shared()->is_function()
-            ? new ParseInfo(&zone, handle(location.function()->shared()))
-            : new ParseInfo(&zone, location.script()));
-    if (Parser::ParseStatic(info.get())) {
-      CallPrinter printer(isolate, location.function()->shared()->IsBuiltin());
+    std::unique_ptr<ParseInfo> info(new ParseInfo(location.shared()));
+    if (parsing::ParseAny(info.get())) {
+      CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
       Handle<String> str = printer.Print(info->literal(), location.start_pos());
       if (str->length() > 0) return str;
     } else {
@@ -407,7 +380,6 @@
 
 }  // namespace
 
-
 RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
@@ -522,15 +494,6 @@
       isolate, Object::OrdinaryHasInstance(isolate, callable, object));
 }
 
-RUNTIME_FUNCTION(Runtime_IsWasmInstance) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
-  CONVERT_ARG_CHECKED(Object, object, 0);
-  bool is_wasm_instance =
-      object->IsJSObject() && wasm::IsWasmInstance(JSObject::cast(object));
-  return *isolate->factory()->ToBoolean(is_wasm_instance);
-}
-
 RUNTIME_FUNCTION(Runtime_Typeof) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
@@ -538,5 +501,29 @@
   return *Object::TypeOf(isolate, object);
 }
 
+RUNTIME_FUNCTION(Runtime_AllowDynamicFunction) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
+  Handle<JSObject> global_proxy(target->global_proxy(), isolate);
+  return *isolate->factory()->ToBoolean(
+      Builtins::AllowDynamicFunction(isolate, target, global_proxy));
+}
+
+RUNTIME_FUNCTION(Runtime_CreateAsyncFromSyncIterator) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+
+  CONVERT_ARG_HANDLE_CHECKED(Object, sync_iterator, 0);
+
+  if (!sync_iterator->IsJSReceiver()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
+  }
+
+  return *isolate->factory()->NewJSAsyncFromSyncIterator(
+      Handle<JSReceiver>::cast(sync_iterator));
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-interpreter.cc b/src/runtime/runtime-interpreter.cc
index 62eee17..9f3897b 100644
--- a/src/runtime/runtime-interpreter.cc
+++ b/src/runtime/runtime-interpreter.cc
@@ -21,12 +21,17 @@
 
 RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
   HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
-  CONVERT_SMI_ARG_CHECKED(pretenured_flag, 1);
+  CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
+  CONVERT_SMI_ARG_CHECKED(index, 2);
+  CONVERT_SMI_ARG_CHECKED(pretenured_flag, 3);
   Handle<Context> context(isolate->context(), isolate);
+  FeedbackSlot slot = FeedbackVector::ToSlot(index);
+  Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
   return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
-      shared, context, static_cast<PretenureFlag>(pretenured_flag));
+      shared, context, vector_cell,
+      static_cast<PretenureFlag>(pretenured_flag));
 }
 
 namespace {
@@ -155,22 +160,6 @@
   return isolate->heap()->undefined_value();
 }
 
-RUNTIME_FUNCTION(Runtime_InterpreterClearPendingMessage) {
-  SealHandleScope shs(isolate);
-  DCHECK_EQ(0, args.length());
-  Object* message = isolate->thread_local_top()->pending_message_obj_;
-  isolate->clear_pending_message();
-  return message;
-}
-
-RUNTIME_FUNCTION(Runtime_InterpreterSetPendingMessage) {
-  SealHandleScope shs(isolate);
-  DCHECK_EQ(1, args.length());
-  CONVERT_ARG_HANDLE_CHECKED(Object, message, 0);
-  isolate->thread_local_top()->pending_message_obj_ = *message;
-  return isolate->heap()->undefined_value();
-}
-
 RUNTIME_FUNCTION(Runtime_InterpreterAdvanceBytecodeOffset) {
   SealHandleScope shs(isolate);
   DCHECK_EQ(2, args.length());
diff --git a/src/runtime/runtime-literals.cc b/src/runtime/runtime-literals.cc
index 8bb4522..7beadf5 100644
--- a/src/runtime/runtime-literals.cc
+++ b/src/runtime/runtime-literals.cc
@@ -15,31 +15,23 @@
 namespace internal {
 
 static Handle<Map> ComputeObjectLiteralMap(
-    Handle<Context> context, Handle<FixedArray> constant_properties,
+    Handle<Context> context,
+    Handle<BoilerplateDescription> boilerplate_description,
     bool* is_result_from_cache) {
-  int properties_length = constant_properties->length();
-  int number_of_properties = properties_length / 2;
-
-  for (int p = 0; p != properties_length; p += 2) {
-    Object* key = constant_properties->get(p);
-    uint32_t element_index = 0;
-    if (key->ToArrayIndex(&element_index)) {
-      // An index key does not require space in the property backing store.
-      number_of_properties--;
-    }
-  }
+  int number_of_properties = boilerplate_description->backing_store_size();
   Isolate* isolate = context->GetIsolate();
   return isolate->factory()->ObjectLiteralMapFromCache(
       context, number_of_properties, is_result_from_cache);
 }
 
 MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
-    Isolate* isolate, Handle<LiteralsArray> literals,
-    Handle<FixedArray> constant_properties);
+    Isolate* isolate, Handle<FeedbackVector> vector,
+    Handle<BoilerplateDescription> boilerplate_description);
 
 MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
-    Isolate* isolate, Handle<LiteralsArray> literals,
-    Handle<FixedArray> constant_properties, bool should_have_fast_elements) {
+    Isolate* isolate, Handle<FeedbackVector> vector,
+    Handle<BoilerplateDescription> boilerplate_description,
+    bool should_have_fast_elements) {
   Handle<Context> context = isolate->native_context();
 
   // In case we have function literals, we want the object to be in
@@ -47,11 +39,11 @@
   // maps with constant functions can't be shared if the functions are
   // not the same (which is the common case).
   bool is_result_from_cache = false;
-  Handle<Map> map = ComputeObjectLiteralMap(context, constant_properties,
+  Handle<Map> map = ComputeObjectLiteralMap(context, boilerplate_description,
                                             &is_result_from_cache);
 
   PretenureFlag pretenure_flag =
-      isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
+      isolate->heap()->InNewSpace(*vector) ? NOT_TENURED : TENURED;
 
   Handle<JSObject> boilerplate =
       isolate->factory()->NewJSObjectFromMap(map, pretenure_flag);
@@ -60,26 +52,27 @@
   if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
 
   // Add the constant properties to the boilerplate.
-  int length = constant_properties->length();
+  int length = boilerplate_description->size();
   bool should_transform =
       !is_result_from_cache && boilerplate->HasFastProperties();
   bool should_normalize = should_transform;
   if (should_normalize) {
     // TODO(verwaest): We might not want to ever normalize here.
-    JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES,
-                                  length / 2, "Boilerplate");
+    JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES, length,
+                                  "Boilerplate");
   }
   // TODO(verwaest): Support tracking representations in the boilerplate.
-  for (int index = 0; index < length; index += 2) {
-    Handle<Object> key(constant_properties->get(index + 0), isolate);
-    Handle<Object> value(constant_properties->get(index + 1), isolate);
-    if (value->IsFixedArray()) {
-      // The value contains the constant_properties of a
+  for (int index = 0; index < length; index++) {
+    Handle<Object> key(boilerplate_description->name(index), isolate);
+    Handle<Object> value(boilerplate_description->value(index), isolate);
+    if (value->IsBoilerplateDescription()) {
+      // The value contains the boilerplate properties of a
       // simple object or array literal.
-      Handle<FixedArray> array = Handle<FixedArray>::cast(value);
+      Handle<BoilerplateDescription> boilerplate =
+          Handle<BoilerplateDescription>::cast(value);
       ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, value, CreateLiteralBoilerplate(isolate, literals, array),
-          Object);
+          isolate, value,
+          CreateLiteralBoilerplate(isolate, vector, boilerplate), Object);
     }
     MaybeHandle<Object> maybe_result;
     uint32_t element_index = 0;
@@ -112,21 +105,20 @@
 }
 
 static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
-    Isolate* isolate, Handle<LiteralsArray> literals,
-    Handle<FixedArray> elements) {
+    Isolate* isolate, Handle<FeedbackVector> vector,
+    Handle<ConstantElementsPair> elements) {
   // Create the JSArray.
   Handle<JSFunction> constructor = isolate->array_function();
 
   PretenureFlag pretenure_flag =
-      isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
+      isolate->heap()->InNewSpace(*vector) ? NOT_TENURED : TENURED;
 
   Handle<JSArray> object = Handle<JSArray>::cast(
       isolate->factory()->NewJSObject(constructor, pretenure_flag));
 
   ElementsKind constant_elements_kind =
-      static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
-  Handle<FixedArrayBase> constant_elements_values(
-      FixedArrayBase::cast(elements->get(1)));
+      static_cast<ElementsKind>(elements->elements_kind());
+  Handle<FixedArrayBase> constant_elements_values(elements->constant_values());
 
   {
     DisallowHeapAllocation no_gc;
@@ -162,15 +154,16 @@
       copied_elements_values = fixed_array_values_copy;
       FOR_WITH_HANDLE_SCOPE(
           isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
-            if (fixed_array_values->get(i)->IsFixedArray()) {
-              // The value contains the constant_properties of a
+            if (fixed_array_values->get(i)->IsBoilerplateDescription()) {
+              // The value contains the boilerplate properties of a
               // simple object or array literal.
-              Handle<FixedArray> fa(
-                  FixedArray::cast(fixed_array_values->get(i)));
+              Handle<BoilerplateDescription> boilerplate(
+                  BoilerplateDescription::cast(fixed_array_values->get(i)));
               Handle<Object> result;
               ASSIGN_RETURN_ON_EXCEPTION(
                   isolate, result,
-                  CreateLiteralBoilerplate(isolate, literals, fa), Object);
+                  CreateLiteralBoilerplate(isolate, vector, boilerplate),
+                  Object);
               fixed_array_values_copy->set(i, *result);
             }
           });
@@ -184,16 +177,25 @@
 }
 
 MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
-    Isolate* isolate, Handle<LiteralsArray> literals,
-    Handle<FixedArray> array) {
-  Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
+    Isolate* isolate, Handle<FeedbackVector> vector,
+    Handle<BoilerplateDescription> array) {
+  Handle<HeapObject> elements = CompileTimeValue::GetElements(array);
   switch (CompileTimeValue::GetLiteralType(array)) {
-    case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
-      return CreateObjectLiteralBoilerplate(isolate, literals, elements, true);
-    case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
-      return CreateObjectLiteralBoilerplate(isolate, literals, elements, false);
-    case CompileTimeValue::ARRAY_LITERAL:
-      return CreateArrayLiteralBoilerplate(isolate, literals, elements);
+    case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS: {
+      Handle<BoilerplateDescription> props =
+          Handle<BoilerplateDescription>::cast(elements);
+      return CreateObjectLiteralBoilerplate(isolate, vector, props, true);
+    }
+    case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS: {
+      Handle<BoilerplateDescription> props =
+          Handle<BoilerplateDescription>::cast(elements);
+      return CreateObjectLiteralBoilerplate(isolate, vector, props, false);
+    }
+    case CompileTimeValue::ARRAY_LITERAL: {
+      Handle<ConstantElementsPair> elems =
+          Handle<ConstantElementsPair>::cast(elements);
+      return CreateArrayLiteralBoilerplate(isolate, vector, elems);
+    }
     default:
       UNREACHABLE();
       return MaybeHandle<Object>();
@@ -208,13 +210,15 @@
   CONVERT_SMI_ARG_CHECKED(index, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
   CONVERT_SMI_ARG_CHECKED(flags, 3);
+  FeedbackSlot literal_slot(FeedbackVector::ToSlot(index));
 
   // Check if boilerplate exists. If not, create it first.
-  Handle<Object> boilerplate(closure->literals()->literal(index), isolate);
+  Handle<Object> boilerplate(closure->feedback_vector()->Get(literal_slot),
+                             isolate);
   if (boilerplate->IsUndefined(isolate)) {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, boilerplate, JSRegExp::New(pattern, JSRegExp::Flags(flags)));
-    closure->literals()->set_literal(index, *boilerplate);
+    closure->feedback_vector()->Set(literal_slot, *boilerplate);
   }
   return *JSRegExp::Copy(Handle<JSRegExp>::cast(boilerplate));
 }
@@ -225,24 +229,25 @@
   DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
+  CONVERT_ARG_HANDLE_CHECKED(BoilerplateDescription, boilerplate_description,
+                             2);
   CONVERT_SMI_ARG_CHECKED(flags, 3);
-  Handle<LiteralsArray> literals(closure->literals(), isolate);
+  Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
   bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
   bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
 
-  CHECK(literals_index >= 0);
-  CHECK(literals_index < literals->literals_count());
+  FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
+  CHECK(literals_slot.ToInt() < vector->slot_count());
 
   // Check if boilerplate exists. If not, create it first.
-  Handle<Object> literal_site(literals->literal(literals_index), isolate);
+  Handle<Object> literal_site(vector->Get(literals_slot), isolate);
   Handle<AllocationSite> site;
   Handle<JSObject> boilerplate;
   if (literal_site->IsUndefined(isolate)) {
     Handle<Object> raw_boilerplate;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, raw_boilerplate,
-        CreateObjectLiteralBoilerplate(isolate, literals, constant_properties,
+        CreateObjectLiteralBoilerplate(isolate, vector, boilerplate_description,
                                        should_have_fast_elements));
     boilerplate = Handle<JSObject>::cast(raw_boilerplate);
 
@@ -253,7 +258,7 @@
     creation_context.ExitScope(site, boilerplate);
 
     // Update the functions literal and return the boilerplate.
-    literals->set_literal(literals_index, *site);
+    vector->Set(literals_slot, *site);
   } else {
     site = Handle<AllocationSite>::cast(literal_site);
     boilerplate =
@@ -269,17 +274,16 @@
 }
 
 MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
-    Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
-    Handle<FixedArray> elements) {
+    Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot literals_slot,
+    Handle<ConstantElementsPair> elements) {
   // Check if boilerplate exists. If not, create it first.
-  Handle<Object> literal_site(literals->literal(literals_index), isolate);
+  Handle<Object> literal_site(vector->Get(literals_slot), isolate);
   Handle<AllocationSite> site;
   if (literal_site->IsUndefined(isolate)) {
-    DCHECK(*elements != isolate->heap()->empty_fixed_array());
     Handle<Object> boilerplate;
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, boilerplate,
-        CreateArrayLiteralBoilerplate(isolate, literals, elements),
+        CreateArrayLiteralBoilerplate(isolate, vector, elements),
         AllocationSite);
 
     AllocationSiteCreationContext creation_context(isolate);
@@ -290,7 +294,7 @@
     }
     creation_context.ExitScope(site, Handle<JSObject>::cast(boilerplate));
 
-    literals->set_literal(literals_index, *site);
+    vector->Set(literals_slot, *site);
   } else {
     site = Handle<AllocationSite>::cast(literal_site);
   }
@@ -298,15 +302,14 @@
   return site;
 }
 
-
 static MaybeHandle<JSObject> CreateArrayLiteralImpl(
-    Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
-    Handle<FixedArray> elements, int flags) {
-  CHECK(literals_index >= 0 && literals_index < literals->literals_count());
+    Isolate* isolate, Handle<FeedbackVector> vector, FeedbackSlot literals_slot,
+    Handle<ConstantElementsPair> elements, int flags) {
+  CHECK(literals_slot.ToInt() < vector->slot_count());
   Handle<AllocationSite> site;
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, site,
-      GetLiteralAllocationSite(isolate, literals, literals_index, elements),
+      GetLiteralAllocationSite(isolate, vector, literals_slot, elements),
       JSObject);
 
   bool enable_mementos = (flags & ArrayLiteral::kDisableMementos) == 0;
@@ -328,13 +331,14 @@
   DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
+  CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
   CONVERT_SMI_ARG_CHECKED(flags, 3);
 
-  Handle<LiteralsArray> literals(closure->literals(), isolate);
+  FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
+  Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
   RETURN_RESULT_OR_FAILURE(
-      isolate, CreateArrayLiteralImpl(isolate, literals, literals_index,
-                                      elements, flags));
+      isolate,
+      CreateArrayLiteralImpl(isolate, vector, literals_slot, elements, flags));
 }
 
 
@@ -343,13 +347,13 @@
   DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
+  CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
 
-  Handle<LiteralsArray> literals(closure->literals(), isolate);
+  Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
+  FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
   RETURN_RESULT_OR_FAILURE(
-      isolate,
-      CreateArrayLiteralImpl(isolate, literals, literals_index, elements,
-                             ArrayLiteral::kShallowElements));
+      isolate, CreateArrayLiteralImpl(isolate, vector, literals_slot, elements,
+                                      ArrayLiteral::kShallowElements));
 }
 
 }  // namespace internal
diff --git a/src/runtime/runtime-liveedit.cc b/src/runtime/runtime-liveedit.cc
index a19ccaa..5649325 100644
--- a/src/runtime/runtime-liveedit.cc
+++ b/src/runtime/runtime-liveedit.cc
@@ -21,7 +21,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(JSValue, script_value, 0);
 
   CHECK(script_value->value()->IsScript());
@@ -63,7 +63,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditGatherCompileInfo) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_CHECKED(JSValue, script, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
 
@@ -81,7 +81,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditReplaceScript) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, old_script_name, 2);
@@ -100,15 +100,31 @@
   }
 }
 
+// Recreate the shared function infos array after changing the IDs of all
+// SharedFunctionInfos.
+RUNTIME_FUNCTION(Runtime_LiveEditFixupScript) {
+  HandleScope scope(isolate);
+  CHECK(isolate->debug()->live_edit_enabled());
+  DCHECK_EQ(args.length(), 2);
+  CONVERT_ARG_CHECKED(JSValue, script_value, 0);
+  CONVERT_INT32_ARG_CHECKED(max_function_literal_id, 1);
+
+  CHECK(script_value->value()->IsScript());
+  Handle<Script> script(Script::cast(script_value->value()));
+
+  LiveEdit::FixupScript(script, max_function_literal_id);
+  return isolate->heap()->undefined_value();
+}
 
 RUNTIME_FUNCTION(Runtime_LiveEditFunctionSourceUpdated) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(args.length(), 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
+  CONVERT_INT32_ARG_CHECKED(new_function_literal_id, 1);
   CHECK(SharedInfoWrapper::IsInstance(shared_info));
 
-  LiveEdit::FunctionSourceUpdated(shared_info);
+  LiveEdit::FunctionSourceUpdated(shared_info, new_function_literal_id);
   return isolate->heap()->undefined_value();
 }
 
@@ -117,7 +133,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditReplaceFunctionCode) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
   CHECK(SharedInfoWrapper::IsInstance(shared_info));
@@ -131,7 +147,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditFunctionSetScript) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, script_object, 1);
 
@@ -158,7 +174,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditReplaceRefToNestedFunction) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
@@ -181,7 +197,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
   CHECK(SharedInfoWrapper::IsInstance(shared_array));
@@ -198,7 +214,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, old_shared_array, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, new_shared_array, 1);
   CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 2);
@@ -236,7 +252,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
 
@@ -256,7 +272,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   CHECK(isolate->debug()->CheckExecutionState(break_id));
 
diff --git a/src/runtime/runtime-maths.cc b/src/runtime/runtime-maths.cc
index 404305a..4cb4f00 100644
--- a/src/runtime/runtime-maths.cc
+++ b/src/runtime/runtime-maths.cc
@@ -9,13 +9,16 @@
 #include "src/base/utils/random-number-generator.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
+#include "src/counters.h"
+#include "src/double.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
 RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
 
   Handle<Context> native_context = isolate->native_context();
   DCHECK_EQ(0, native_context->math_random_index()->value());
diff --git a/src/runtime/runtime-module.cc b/src/runtime/runtime-module.cc
index 2b81343..f36a09b 100644
--- a/src/runtime/runtime-module.cc
+++ b/src/runtime/runtime-module.cc
@@ -5,13 +5,22 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
+RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  // TODO(gsathya): Implement ImportCall.
+  return isolate->heap()->undefined_value();
+}
+
 RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_SMI_ARG_CHECKED(module_request, 0);
   Handle<Module> module(isolate->context()->module());
   return *Module::GetModuleNamespace(module, module_request);
@@ -19,7 +28,7 @@
 
 RUNTIME_FUNCTION(Runtime_LoadModuleVariable) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_SMI_ARG_CHECKED(index, 0);
   Handle<Module> module(isolate->context()->module());
   return *Module::LoadVariable(module, index);
@@ -27,7 +36,7 @@
 
 RUNTIME_FUNCTION(Runtime_StoreModuleVariable) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_SMI_ARG_CHECKED(index, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
   Handle<Module> module(isolate->context()->module());
diff --git a/src/runtime/runtime-numbers.cc b/src/runtime/runtime-numbers.cc
index bfe8763..4d8d5d2 100644
--- a/src/runtime/runtime-numbers.cc
+++ b/src/runtime/runtime-numbers.cc
@@ -15,7 +15,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsValidSmi) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
   return isolate->heap()->ToBoolean(Smi::IsValid(number));
@@ -73,7 +73,7 @@
 // ES6 18.2.4 parseFloat(string)
 RUNTIME_FUNCTION(Runtime_StringParseFloat) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
 
   double value =
@@ -86,7 +86,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToString) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
 
   return *isolate->factory()->NumberToString(number);
@@ -95,7 +95,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
 
   return *isolate->factory()->NumberToString(number, false);
@@ -106,7 +106,7 @@
 // a small integer.
 RUNTIME_FUNCTION(Runtime_NumberToSmi) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   if (obj->IsSmi()) {
     return obj;
@@ -126,7 +126,7 @@
 // compared lexicographically.
 RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_SMI_ARG_CHECKED(x_value, 0);
   CONVERT_SMI_ARG_CHECKED(y_value, 1);
 
@@ -200,14 +200,14 @@
 
 RUNTIME_FUNCTION(Runtime_MaxSmi) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return Smi::FromInt(Smi::kMaxValue);
 }
 
 
 RUNTIME_FUNCTION(Runtime_IsSmi) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsSmi());
 }
@@ -215,21 +215,21 @@
 
 RUNTIME_FUNCTION(Runtime_GetRootNaN) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return isolate->heap()->nan_value();
 }
 
 
 RUNTIME_FUNCTION(Runtime_GetHoleNaNUpper) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return *isolate->factory()->NewNumberFromUint(kHoleNanUpper32);
 }
 
 
 RUNTIME_FUNCTION(Runtime_GetHoleNaNLower) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return *isolate->factory()->NewNumberFromUint(kHoleNanLower32);
 }
 
diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc
index c7e9cf3..dd24728 100644
--- a/src/runtime/runtime-object.cc
+++ b/src/runtime/runtime-object.cc
@@ -19,7 +19,7 @@
                                                Handle<Object> object,
                                                Handle<Object> key,
                                                bool* is_found_out) {
-  if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
+  if (object->IsNullOrUndefined(isolate)) {
     THROW_NEW_ERROR(
         isolate,
         NewTypeError(MessageTemplate::kNonObjectPropertyLoad, key, object),
@@ -56,6 +56,14 @@
       DisallowHeapAllocation no_allocation;
       Handle<JSObject> receiver = Handle<JSObject>::cast(receiver_obj);
       Handle<Name> key = Handle<Name>::cast(key_obj);
+      // Get to a ThinString's referenced internalized string, but don't
+      // otherwise force internalization. We assume that internalization
+      // (which is a dictionary lookup with a non-internalized key) is
+      // about as expensive as doing the property dictionary lookup with
+      // the non-internalized key directly.
+      if (key->IsThinString()) {
+        key = handle(Handle<ThinString>::cast(key)->actual(), isolate);
+      }
       if (receiver->IsJSGlobalObject()) {
         // Attempt dictionary lookup.
         GlobalDictionary* dictionary = receiver->global_dictionary();
@@ -63,7 +71,7 @@
         if (entry != GlobalDictionary::kNotFound) {
           DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
           PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(entry));
-          if (cell->property_details().type() == DATA) {
+          if (cell->property_details().kind() == kData) {
             Object* value = cell->value();
             if (!value->IsTheHole(isolate)) {
               return Handle<Object>(value, isolate);
@@ -76,7 +84,7 @@
         NameDictionary* dictionary = receiver->property_dictionary();
         int entry = dictionary->FindEntry(key);
         if ((entry != NameDictionary::kNotFound) &&
-            (dictionary->DetailsAt(entry).type() == DATA)) {
+            (dictionary->DetailsAt(entry).kind() == kData)) {
           Object* value = dictionary->ValueAt(entry);
           return Handle<Object>(value, isolate);
         }
@@ -133,7 +141,7 @@
 // ES6 19.1.3.2
 RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
   HandleScope scope(isolate);
-  Handle<Object> property = args.at<Object>(1);
+  Handle<Object> property = args.at(1);
 
   Handle<Name> key;
   uint32_t index;
@@ -145,7 +153,7 @@
     key_is_array_index = key->AsArrayIndex(&index);
   }
 
-  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> object = args.at(0);
 
   if (object->IsJSObject()) {
     Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
@@ -199,7 +207,7 @@
         key_is_array_index
             ? index < static_cast<uint32_t>(String::cast(*object)->length())
             : key->Equals(isolate->heap()->length_string()));
-  } else if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+  } else if (object->IsNullOrUndefined(isolate)) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
   }
@@ -212,7 +220,7 @@
 // an Object.create stub.
 RUNTIME_FUNCTION(Runtime_ObjectCreate) {
   HandleScope scope(isolate);
-  Handle<Object> prototype = args.at<Object>(0);
+  Handle<Object> prototype = args.at(0);
   if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
@@ -222,30 +230,8 @@
   // function's initial map from the current native context.
   // TODO(bmeurer): Use a dedicated cache for Object.create; think about
   // slack tracking for Object.create.
-  Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
-                  isolate);
-  if (map->prototype() != *prototype) {
-    if (prototype->IsNull(isolate)) {
-      map = isolate->slow_object_with_null_prototype_map();
-    } else if (prototype->IsJSObject()) {
-      Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
-      if (!js_prototype->map()->is_prototype_map()) {
-        JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
-      }
-      Handle<PrototypeInfo> info =
-          Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
-      // TODO(verwaest): Use inobject slack tracking for this map.
-      if (info->HasObjectCreateMap()) {
-        map = handle(info->ObjectCreateMap(), isolate);
-      } else {
-        map = Map::CopyInitialMap(map);
-        Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
-        PrototypeInfo::SetObjectCreateMap(info, map);
-      }
-    } else {
-      map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
-    }
-  }
+  Handle<Map> map =
+      Map::GetObjectCreateMap(Handle<HeapObject>::cast(prototype));
 
   bool is_dictionary_map = map->is_dictionary_map();
   Handle<FixedArray> object_properties;
@@ -262,7 +248,7 @@
   }
 
   // Define the properties if properties was specified and is not undefined.
-  Handle<Object> properties = args.at<Object>(1);
+  Handle<Object> properties = args.at(1);
   if (!properties->IsUndefined(isolate)) {
     RETURN_FAILURE_ON_EXCEPTION(
         isolate, JSReceiver::DefineProperties(isolate, object, properties));
@@ -276,7 +262,7 @@
                                                Handle<Object> key,
                                                Handle<Object> value,
                                                LanguageMode language_mode) {
-  if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
+  if (object->IsNullOrUndefined(isolate)) {
     THROW_NEW_ERROR(
         isolate,
         NewTypeError(MessageTemplate::kNonObjectPropertyStore, key, object),
@@ -297,7 +283,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetPrototype) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
   RETURN_RESULT_OR_FAILURE(isolate, JSReceiver::GetPrototype(isolate, obj));
 }
@@ -305,7 +291,7 @@
 
 RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
   MAYBE_RETURN(
@@ -316,7 +302,7 @@
 
 RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_SMI_ARG_CHECKED(properties, 1);
   // Conservative upper limit to prevent fuzz tests from going OOM.
@@ -331,7 +317,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetProperty) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
@@ -343,7 +329,7 @@
 // KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
 RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
@@ -503,7 +489,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
   CONVERT_SMI_ARG_CHECKED(filter_value, 1);
   PropertyFilter filter = static_cast<PropertyFilter>(filter_value);
@@ -522,7 +508,7 @@
 // args[0]: object
 RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   if (!args[0]->IsJSObject()) {
     return Smi::kZero;
   }
@@ -538,7 +524,7 @@
 
 RUNTIME_FUNCTION(Runtime_ToFastProperties) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   if (object->IsJSObject() && !object->IsJSGlobalObject()) {
     JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0,
@@ -550,7 +536,7 @@
 
 RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return *isolate->factory()->NewHeapNumber(0);
 }
 
@@ -566,7 +552,7 @@
 
 RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(Map, initial_map, 0);
   initial_map->CompleteInobjectSlackTracking();
@@ -577,7 +563,7 @@
 
 RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
   CHECK((index->value() & 1) == 1);
@@ -596,10 +582,11 @@
 
 RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   if (!object->IsJSObject()) return Smi::kZero;
   Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+  // It could have been a DCHECK but we call this function directly from tests.
   if (!js_object->map()->is_deprecated()) return Smi::kZero;
   // This call must not cause lazy deopts, because it's called from deferred
   // code where we can't handle lazy deopts for lack of a suitable bailout
@@ -612,13 +599,13 @@
 
 RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSGlobalProxy());
 }
 
 static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
-  return obj->IsUndefined(isolate) || obj->IsCallable() || obj->IsNull(isolate);
+  return obj->IsNullOrUndefined(isolate) || obj->IsCallable();
 }
 
 
@@ -630,7 +617,7 @@
 //           descriptor.
 RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 5);
+  DCHECK_EQ(5, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   CHECK(!obj->IsNull(isolate));
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
@@ -648,14 +635,36 @@
 
 RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 5);
+  DCHECK_EQ(6, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-  CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
-  CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
+  CONVERT_SMI_ARG_CHECKED(flag, 3);
+  CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 4);
+  CONVERT_SMI_ARG_CHECKED(index, 5);
 
-  if (set_function_name) {
+  StoreDataPropertyInLiteralICNexus nexus(vector, vector->ToSlot(index));
+  if (nexus.ic_state() == UNINITIALIZED) {
+    if (name->IsUniqueName()) {
+      nexus.ConfigureMonomorphic(name, handle(object->map()));
+    } else {
+      nexus.ConfigureMegamorphic();
+    }
+  } else if (nexus.ic_state() == MONOMORPHIC) {
+    if (nexus.FindFirstMap() != object->map() ||
+        nexus.GetFeedbackExtra() != *name) {
+      nexus.ConfigureMegamorphic();
+    }
+  }
+
+  DataPropertyInLiteralFlags flags =
+      static_cast<DataPropertyInLiteralFlag>(flag);
+
+  PropertyAttributes attrs = (flags & DataPropertyInLiteralFlag::kDontEnum)
+                                 ? PropertyAttributes::DONT_ENUM
+                                 : PropertyAttributes::NONE;
+
+  if (flags & DataPropertyInLiteralFlag::kSetFunctionName) {
     DCHECK(value->IsJSFunction());
     JSFunction::SetName(Handle<JSFunction>::cast(value), name,
                         isolate->factory()->empty_string());
@@ -671,42 +680,10 @@
   return *object;
 }
 
-RUNTIME_FUNCTION(Runtime_DefineDataProperty) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 5);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-  CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
-  CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
-
-  if (set_function_name) {
-    DCHECK(value->IsJSFunction());
-    JSFunction::SetName(Handle<JSFunction>::cast(value), name,
-                        isolate->factory()->empty_string());
-  }
-
-  PropertyDescriptor desc;
-  desc.set_writable(!(attrs & ReadOnly));
-  desc.set_enumerable(!(attrs & DontEnum));
-  desc.set_configurable(!(attrs & DontDelete));
-  desc.set_value(value);
-
-  Maybe<bool> result = JSReceiver::DefineOwnProperty(isolate, receiver, name,
-                                                     &desc, Object::DONT_THROW);
-  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-  if (result.IsNothing()) {
-    DCHECK(isolate->has_pending_exception());
-    return isolate->heap()->exception();
-  }
-
-  return *receiver;
-}
-
 // Return property without being observable by accessors or interceptors.
 RUNTIME_FUNCTION(Runtime_GetDataProperty) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
   return *JSReceiver::GetDataProperty(object, name);
@@ -714,17 +691,17 @@
 
 RUNTIME_FUNCTION(Runtime_GetConstructorName) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
 
-  CHECK(!object->IsUndefined(isolate) && !object->IsNull(isolate));
+  CHECK(!object->IsNullOrUndefined(isolate));
   Handle<JSReceiver> recv = Object::ToObject(isolate, object).ToHandleChecked();
   return *JSReceiver::GetConstructorName(recv);
 }
 
 RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(HeapObject, obj, 0);
   return isolate->heap()->ToBoolean(
       IsFastPackedElementsKind(obj->map()->elements_kind()));
@@ -733,7 +710,7 @@
 
 RUNTIME_FUNCTION(Runtime_ValueOf) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   if (!obj->IsJSValue()) return obj;
   return JSValue::cast(obj)->value();
@@ -742,7 +719,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSReceiver());
 }
@@ -750,7 +727,7 @@
 
 RUNTIME_FUNCTION(Runtime_ClassOf) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   if (!obj->IsJSReceiver()) return isolate->heap()->null_value();
   return JSReceiver::cast(obj)->class_name();
@@ -759,7 +736,7 @@
 
 RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
@@ -776,10 +753,60 @@
   return isolate->heap()->undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_CopyDataProperties) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
+
+  // 2. If source is undefined or null, let keys be an empty List.
+  if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
+    return isolate->heap()->undefined_value();
+  }
+
+  MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, target, source,
+                                                   nullptr, false),
+               isolate->heap()->exception());
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_CopyDataPropertiesWithExcludedProperties) {
+  HandleScope scope(isolate);
+  DCHECK_LE(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, source, 0);
+
+  // 2. If source is undefined or null, let keys be an empty List.
+  if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
+    return isolate->heap()->undefined_value();
+  }
+
+  ScopedVector<Handle<Object>> excluded_properties(args.length() - 1);
+  for (int i = 1; i < args.length(); i++) {
+    Handle<Object> property = args.at(i);
+    uint32_t property_num;
+    // We convert string to number if possible, in cases of computed
+    // properties resolving to numbers, which would've been strings
+    // instead because of our call to %ToName() in the desugaring for
+    // computed properties.
+    if (property->IsString() &&
+        String::cast(*property)->AsArrayIndex(&property_num)) {
+      property = isolate->factory()->NewNumberFromUint(property_num);
+    }
+
+    excluded_properties[i - 1] = property;
+  }
+
+  Handle<JSObject> target =
+      isolate->factory()->NewJSObject(isolate->object_function());
+  MAYBE_RETURN(JSReceiver::SetOrCopyDataProperties(isolate, target, source,
+                                                   &excluded_properties, false),
+               isolate->heap()->exception());
+  return *target;
+}
 
 RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
@@ -947,7 +974,7 @@
 
 RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, o, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
diff --git a/src/runtime/runtime-promise.cc b/src/runtime/runtime-promise.cc
index 226993a..7f84199 100644
--- a/src/runtime/runtime-promise.cc
+++ b/src/runtime/runtime-promise.cc
@@ -1,27 +1,31 @@
 // Copyright 2016 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
-
 #include "src/runtime/runtime-utils.h"
 
+#include "src/arguments.h"
+#include "src/counters.h"
 #include "src/debug/debug.h"
 #include "src/elements.h"
-#include "src/promise-utils.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
 namespace {
 
-void PromiseRejectEvent(Isolate* isolate, Handle<JSReceiver> promise,
+void PromiseRejectEvent(Isolate* isolate, Handle<JSPromise> promise,
                         Handle<Object> rejected_promise, Handle<Object> value,
                         bool debug_event) {
+  isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+                          isolate->factory()->undefined_value());
+
   if (isolate->debug()->is_active() && debug_event) {
     isolate->debug()->OnPromiseReject(rejected_promise, value);
   }
-  Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
-  // Do not report if we actually have a handler.
-  if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
+
+  // Report only if we don't actually have a handler.
+  if (!promise->has_handler()) {
     isolate->ReportPromiseReject(Handle<JSObject>::cast(promise), value,
                                  v8::kPromiseRejectWithNoHandler);
   }
@@ -30,9 +34,9 @@
 }  // namespace
 
 RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
 
   Handle<Object> rejected_promise = promise;
@@ -41,142 +45,54 @@
     // undefined, which will be interpreted by PromiseRejectEvent
     // as being a caught exception event.
     rejected_promise = isolate->GetPromiseOnStackOnThrow();
+    isolate->debug()->OnAsyncTaskEvent(
+        debug::kDebugEnqueuePromiseReject,
+        isolate->debug()->NextAsyncTaskId(promise), 0);
   }
   PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
   return isolate->heap()->undefined_value();
 }
 
-RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
-  DCHECK(args.length() == 1);
+RUNTIME_FUNCTION(Runtime_ReportPromiseReject) {
+  DCHECK_EQ(2, args.length());
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
-  Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
+  CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+  isolate->ReportPromiseReject(Handle<JSObject>::cast(promise), value,
+                               v8::kPromiseRejectWithNoHandler);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
+  DCHECK_EQ(1, args.length());
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
   // At this point, no revocation has been issued before
-  CHECK(JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate));
+  CHECK(!promise->has_handler());
   isolate->ReportPromiseReject(promise, Handle<Object>(),
                                v8::kPromiseHandlerAddedAfterReject);
   return isolate->heap()->undefined_value();
 }
 
-namespace {
-void EnqueuePromiseReactionJob(Isolate* isolate, Handle<Object> value,
-                               Handle<Object> tasks, Handle<Object> deferred,
-                               Handle<Object> status) {
-  Handle<Object> debug_id = isolate->factory()->undefined_value();
-  Handle<Object> debug_name = isolate->factory()->undefined_value();
-  if (isolate->debug()->is_active()) {
-    MaybeHandle<Object> maybe_result;
-    Handle<Object> argv[] = {deferred, status};
-    maybe_result = Execution::TryCall(
-        isolate, isolate->promise_debug_get_info(),
-        isolate->factory()->undefined_value(), arraysize(argv), argv);
-    Handle<Object> result;
-    if ((maybe_result).ToHandle(&result)) {
-      CHECK(result->IsJSArray());
-      Handle<JSArray> array = Handle<JSArray>::cast(result);
-      ElementsAccessor* accessor = array->GetElementsAccessor();
-      DCHECK(accessor->HasElement(array, 0));
-      DCHECK(accessor->HasElement(array, 1));
-      debug_id = accessor->Get(array, 0);
-      debug_name = accessor->Get(array, 1);
-    }
-  }
-  Handle<PromiseReactionJobInfo> info =
-      isolate->factory()->NewPromiseReactionJobInfo(value, tasks, deferred,
-                                                    debug_id, debug_name,
-                                                    isolate->native_context());
-  isolate->EnqueueMicrotask(info);
-}
-
-void PromiseFulfill(Isolate* isolate, Handle<JSReceiver> promise,
-                    Handle<Smi> status, Handle<Object> value,
-                    Handle<Symbol> reaction) {
-  Handle<Object> tasks = JSReceiver::GetDataProperty(promise, reaction);
-  if (!tasks->IsUndefined(isolate)) {
-    Handle<Object> deferred = JSReceiver::GetDataProperty(
-        promise, isolate->factory()->promise_deferred_reaction_symbol());
-    EnqueuePromiseReactionJob(isolate, value, tasks, deferred, status);
-  }
-}
-}  // namespace
-
-RUNTIME_FUNCTION(Runtime_PromiseReject) {
-  DCHECK(args.length() == 3);
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, promise, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
-  CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
-
-  PromiseRejectEvent(isolate, promise, promise, reason, debug_event);
-
-  Handle<Smi> status = handle(Smi::FromInt(kPromiseRejected), isolate);
-  Handle<Symbol> reaction =
-      isolate->factory()->promise_reject_reactions_symbol();
-  PromiseFulfill(isolate, promise, status, reason, reaction);
-  return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_PromiseFulfill) {
-  DCHECK(args.length() == 4);
-  HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, promise, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Smi, status, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-  CONVERT_ARG_HANDLE_CHECKED(Symbol, reaction, 3);
-  PromiseFulfill(isolate, promise, status, value, reaction);
-  return isolate->heap()->undefined_value();
-}
-
 RUNTIME_FUNCTION(Runtime_EnqueuePromiseReactionJob) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Object, tasks, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, deferred, 2);
-  CONVERT_ARG_HANDLE_CHECKED(Object, status, 3);
-  EnqueuePromiseReactionJob(isolate, value, tasks, deferred, status);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(PromiseReactionJobInfo, info, 0);
+  isolate->EnqueueMicrotask(info);
   return isolate->heap()->undefined_value();
 }
 
 RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, resolution, 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, then, 2);
-
-  // TODO(gsathya): Add fast path for native promises with unmodified
-  // PromiseThen (which don't need these resolving functions, but
-  // instead can just call resolve/reject directly).
-  Handle<JSFunction> resolve, reject;
-  PromiseUtils::CreateResolvingFunctions(
-      isolate, promise, isolate->factory()->false_value(), &resolve, &reject);
-
-  Handle<Object> debug_id, debug_name;
-  if (isolate->debug()->is_active()) {
-    debug_id =
-        handle(Smi::FromInt(isolate->GetNextDebugMicrotaskId()), isolate);
-    debug_name = isolate->factory()->PromiseResolveThenableJob_string();
-    isolate->debug()->OnAsyncTaskEvent(isolate->factory()->enqueue_string(),
-                                       debug_id,
-                                       Handle<String>::cast(debug_name));
-  } else {
-    debug_id = isolate->factory()->undefined_value();
-    debug_name = isolate->factory()->undefined_value();
-  }
-
-  Handle<PromiseResolveThenableJobInfo> info =
-      isolate->factory()->NewPromiseResolveThenableJobInfo(
-          resolution, then, resolve, reject, debug_id, debug_name,
-          isolate->native_context());
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(PromiseResolveThenableJobInfo, info, 0);
   isolate->EnqueueMicrotask(info);
-
   return isolate->heap()->undefined_value();
 }
 
 RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
   isolate->EnqueueMicrotask(microtask);
   return isolate->heap()->undefined_value();
@@ -184,10 +100,76 @@
 
 RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   isolate->RunMicrotasks();
   return isolate->heap()->undefined_value();
 }
 
+RUNTIME_FUNCTION(Runtime_PromiseStatus) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+
+  return Smi::FromInt(promise->status());
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseResult) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+  return promise->result();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseMarkAsHandled) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_CHECKED(JSPromise, promise, 0);
+
+  promise->set_has_handler(true);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseHookInit) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, parent, 1);
+  isolate->RunPromiseHook(PromiseHookType::kInit, promise, parent);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseHookResolve) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+  isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+                          isolate->factory()->undefined_value());
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  if (promise->IsJSPromise()) {
+    isolate->RunPromiseHook(PromiseHookType::kBefore,
+                            Handle<JSPromise>::cast(promise),
+                            isolate->factory()->undefined_value());
+  }
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseHookAfter) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  if (promise->IsJSPromise()) {
+    isolate->RunPromiseHook(PromiseHookType::kAfter,
+                            Handle<JSPromise>::cast(promise),
+                            isolate->factory()->undefined_value());
+  }
+  return isolate->heap()->undefined_value();
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime-proxy.cc b/src/runtime/runtime-proxy.cc
index 87c7c91..de8231e 100644
--- a/src/runtime/runtime-proxy.cc
+++ b/src/runtime/runtime-proxy.cc
@@ -44,7 +44,7 @@
     // 6.a. Return Call(target, thisArgument, argumentsList).
     ScopedVector<Handle<Object>> argv(arguments_length);
     for (int i = 0; i < arguments_length; ++i) {
-      argv[i] = args.at<Object>(i + 1);
+      argv[i] = args.at(i + 1);
     }
     RETURN_RESULT_OR_FAILURE(
         isolate, Execution::Call(isolate, target, receiver, arguments_length,
@@ -100,7 +100,7 @@
     // 6.b. Return Construct(target, argumentsList, newTarget).
     ScopedVector<Handle<Object>> argv(arguments_length);
     for (int i = 0; i < arguments_length; ++i) {
-      argv[i] = args.at<Object>(i + 1);
+      argv[i] = args.at(i + 1);
     }
     RETURN_RESULT_OR_FAILURE(
         isolate, Execution::New(isolate, target, new_target, arguments_length,
@@ -135,7 +135,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsJSProxy) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSProxy());
 }
@@ -143,7 +143,7 @@
 
 RUNTIME_FUNCTION(Runtime_JSProxyGetHandler) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
   return proxy->handler();
 }
@@ -151,7 +151,7 @@
 
 RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
   return proxy->target();
 }
@@ -159,7 +159,7 @@
 
 RUNTIME_FUNCTION(Runtime_JSProxyRevoke) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
   JSProxy::Revoke(proxy);
   return isolate->heap()->undefined_value();
diff --git a/src/runtime/runtime-regexp.cc b/src/runtime/runtime-regexp.cc
index d572eed..aec9556 100644
--- a/src/runtime/runtime-regexp.cc
+++ b/src/runtime/runtime-regexp.cc
@@ -431,6 +431,9 @@
   } else {
     result_len = static_cast<int>(result_len_64);
   }
+  if (result_len == 0) {
+    return isolate->heap()->empty_string();
+  }
 
   int subject_pos = 0;
   int result_pos = 0;
@@ -652,7 +655,7 @@
   if (!heap->lo_space()->Contains(*answer)) {
     heap->CreateFillerObjectAt(end_of_string, delta, ClearRecordedSlots::kNo);
   }
-  heap->AdjustLiveBytes(*answer, -delta, Heap::CONCURRENT_TO_SWEEPER);
+  heap->AdjustLiveBytes(*answer, -delta);
   return *answer;
 }
 
@@ -685,7 +688,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
@@ -698,7 +701,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringSplit) {
   HandleScope handle_scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
@@ -781,7 +784,7 @@
 // RegExpCreate ( P, F )
 RUNTIME_FUNCTION(Runtime_RegExpCreate) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, source_object, 0);
 
   Handle<String> source;
@@ -806,7 +809,7 @@
 
 RUNTIME_FUNCTION(Runtime_RegExpExec) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
   CONVERT_INT32_ARG_CHECKED(index, 2);
@@ -822,7 +825,7 @@
 
 RUNTIME_FUNCTION(Runtime_RegExpInternalReplace) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
@@ -1081,15 +1084,32 @@
   Factory* factory = isolate->factory();
   Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
 
-  // TODO(jgruber): This is a pattern we could refactor.
+  const int flags = regexp->GetFlags();
+
+  DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
+  DCHECK_EQ(flags & JSRegExp::kGlobal, 0);
+
+  // TODO(jgruber): This should be an easy port to CSA with massive payback.
+
+  const bool sticky = (flags & JSRegExp::kSticky) != 0;
+  uint32_t last_index = 0;
+  if (sticky) {
+    Handle<Object> last_index_obj(regexp->LastIndex(), isolate);
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
+                               Object::ToLength(isolate, last_index_obj),
+                               String);
+    last_index = PositiveNumberToUint32(*last_index_obj);
+
+    if (static_cast<int>(last_index) > subject->length()) last_index = 0;
+  }
+
   Handle<Object> match_indices_obj;
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, match_indices_obj,
-      RegExpImpl::Exec(regexp, subject, 0, last_match_info), String);
+      RegExpImpl::Exec(regexp, subject, last_index, last_match_info), String);
 
   if (match_indices_obj->IsNull(isolate)) {
-    RETURN_ON_EXCEPTION(isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0),
-                        String);
+    if (sticky) regexp->SetLastIndex(0);
     return subject;
   }
 
@@ -1099,6 +1119,8 @@
   const int index = match_indices->Capture(0);
   const int end_of_match = match_indices->Capture(1);
 
+  if (sticky) regexp->SetLastIndex(end_of_match);
+
   IncrementalStringBuilder builder(isolate);
   builder.AppendString(factory->NewSubString(subject, 0, index));
 
@@ -1150,10 +1172,9 @@
                                                   Handle<Object> replace_obj) {
   Factory* factory = isolate->factory();
 
-  // TODO(jgruber): We need the even stricter guarantee of an unmodified
-  // JSRegExp map here for access to GetFlags to be legal.
   const int flags = regexp->GetFlags();
   const bool global = (flags & JSRegExp::kGlobal) != 0;
+  const bool sticky = (flags & JSRegExp::kSticky) != 0;
 
   // Functional fast-paths are dispatched directly by replace builtin.
   DCHECK(!replace_obj->IsCallable());
@@ -1168,14 +1189,24 @@
   if (!global) {
     // Non-global regexp search, string replace.
 
+    uint32_t last_index = 0;
+    if (sticky) {
+      Handle<Object> last_index_obj(regexp->LastIndex(), isolate);
+      ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
+                                 Object::ToLength(isolate, last_index_obj),
+                                 String);
+      last_index = PositiveNumberToUint32(*last_index_obj);
+
+      if (static_cast<int>(last_index) > string->length()) last_index = 0;
+    }
+
     Handle<Object> match_indices_obj;
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, match_indices_obj,
-        RegExpImpl::Exec(regexp, string, 0, last_match_info), String);
+        RegExpImpl::Exec(regexp, string, last_index, last_match_info), String);
 
     if (match_indices_obj->IsNull(isolate)) {
-      RETURN_ON_EXCEPTION(
-          isolate, RegExpUtils::SetLastIndex(isolate, regexp, 0), String);
+      if (sticky) regexp->SetLastIndex(0);
       return string;
     }
 
@@ -1184,6 +1215,8 @@
     const int start_index = match_indices->Capture(0);
     const int end_index = match_indices->Capture(1);
 
+    if (sticky) regexp->SetLastIndex(end_index);
+
     IncrementalStringBuilder builder(isolate);
     builder.AppendString(factory->NewSubString(string, 0, start_index));
 
@@ -1237,7 +1270,7 @@
 // This is only called for StringReplaceGlobalRegExpWithFunction.
 RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
   HandleScope handles(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
@@ -1259,26 +1292,244 @@
 
 RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, replace, 2);
 
+  DCHECK(RegExpUtils::IsUnmodifiedRegExp(isolate, regexp));
+
   RETURN_RESULT_OR_FAILURE(isolate, StringReplaceNonGlobalRegExpWithFunction(
                                         isolate, subject, regexp, replace));
 }
 
+namespace {
+
+// ES##sec-speciesconstructor
+// SpeciesConstructor ( O, defaultConstructor )
+MUST_USE_RESULT MaybeHandle<Object> SpeciesConstructor(
+    Isolate* isolate, Handle<JSReceiver> recv,
+    Handle<JSFunction> default_ctor) {
+  Handle<Object> ctor_obj;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, ctor_obj,
+      JSObject::GetProperty(recv, isolate->factory()->constructor_string()),
+      Object);
+
+  if (ctor_obj->IsUndefined(isolate)) return default_ctor;
+
+  if (!ctor_obj->IsJSReceiver()) {
+    THROW_NEW_ERROR(isolate,
+                    NewTypeError(MessageTemplate::kConstructorNotReceiver),
+                    Object);
+  }
+
+  Handle<JSReceiver> ctor = Handle<JSReceiver>::cast(ctor_obj);
+
+  Handle<Object> species;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate, species,
+      JSObject::GetProperty(ctor, isolate->factory()->species_symbol()),
+      Object);
+
+  if (species->IsNullOrUndefined(isolate)) {
+    return default_ctor;
+  }
+
+  if (species->IsConstructor()) return species;
+
+  THROW_NEW_ERROR(
+      isolate, NewTypeError(MessageTemplate::kSpeciesNotConstructor), Object);
+}
+
+MUST_USE_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
+                                             Handle<Object> object,
+                                             uint32_t* out) {
+  if (object->IsUndefined(isolate)) {
+    *out = kMaxUInt32;
+    return object;
+  }
+
+  Handle<Object> number;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, number, Object::ToNumber(object), Object);
+  *out = NumberToUint32(*number);
+  return object;
+}
+
+Handle<JSArray> NewJSArrayWithElements(Isolate* isolate,
+                                       Handle<FixedArray> elems,
+                                       int num_elems) {
+  elems->Shrink(num_elems);
+  return isolate->factory()->NewJSArrayWithElements(elems);
+}
+
+}  // namespace
+
+// Slow path for:
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@split ] ( string, limit )
+RUNTIME_FUNCTION(Runtime_RegExpSplit) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+
+  DCHECK(args[1]->IsString());
+
+  CONVERT_ARG_HANDLE_CHECKED(JSReceiver, recv, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, string, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, limit_obj, 2);
+
+  Factory* factory = isolate->factory();
+
+  Handle<JSFunction> regexp_fun = isolate->regexp_function();
+  Handle<Object> ctor;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, ctor, SpeciesConstructor(isolate, recv, regexp_fun));
+
+  Handle<Object> flags_obj;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, flags_obj, JSObject::GetProperty(recv, factory->flags_string()));
+
+  Handle<String> flags;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags,
+                                     Object::ToString(isolate, flags_obj));
+
+  Handle<String> u_str = factory->LookupSingleCharacterStringFromCode('u');
+  const bool unicode = (String::IndexOf(isolate, flags, u_str, 0) >= 0);
+
+  Handle<String> y_str = factory->LookupSingleCharacterStringFromCode('y');
+  const bool sticky = (String::IndexOf(isolate, flags, y_str, 0) >= 0);
+
+  Handle<String> new_flags = flags;
+  if (!sticky) {
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_flags,
+                                       factory->NewConsString(flags, y_str));
+  }
+
+  Handle<JSReceiver> splitter;
+  {
+    const int argc = 2;
+
+    ScopedVector<Handle<Object>> argv(argc);
+    argv[0] = recv;
+    argv[1] = new_flags;
+
+    Handle<JSFunction> ctor_fun = Handle<JSFunction>::cast(ctor);
+    Handle<Object> splitter_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, splitter_obj, Execution::New(ctor_fun, argc, argv.start()));
+
+    splitter = Handle<JSReceiver>::cast(splitter_obj);
+  }
+
+  uint32_t limit;
+  RETURN_FAILURE_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit));
+
+  const uint32_t length = string->length();
+
+  if (limit == 0) return *factory->NewJSArray(0);
+
+  if (length == 0) {
+    Handle<Object> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
+                                                 factory->undefined_value()));
+
+    if (!result->IsNull(isolate)) return *factory->NewJSArray(0);
+
+    Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
+    elems->set(0, *string);
+    return *factory->NewJSArrayWithElements(elems);
+  }
+
+  static const int kInitialArraySize = 8;
+  Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
+  int num_elems = 0;
+
+  uint32_t string_index = 0;
+  uint32_t prev_string_index = 0;
+  while (string_index < length) {
+    RETURN_FAILURE_ON_EXCEPTION(
+        isolate, RegExpUtils::SetLastIndex(isolate, splitter, string_index));
+
+    Handle<Object> result;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
+                                                 factory->undefined_value()));
+
+    if (result->IsNull(isolate)) {
+      string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
+                                                     string_index, unicode);
+      continue;
+    }
+
+    Handle<Object> last_index_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, last_index_obj, RegExpUtils::GetLastIndex(isolate, splitter));
+
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, last_index_obj, Object::ToLength(isolate, last_index_obj));
+
+    const uint32_t end =
+        std::min(PositiveNumberToUint32(*last_index_obj), length);
+    if (end == prev_string_index) {
+      string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
+                                                     string_index, unicode);
+      continue;
+    }
+
+    {
+      Handle<String> substr =
+          factory->NewSubString(string, prev_string_index, string_index);
+      elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+      if (static_cast<uint32_t>(num_elems) == limit) {
+        return *NewJSArrayWithElements(isolate, elems, num_elems);
+      }
+    }
+
+    prev_string_index = end;
+
+    Handle<Object> num_captures_obj;
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, num_captures_obj,
+        Object::GetProperty(result, isolate->factory()->length_string()));
+
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+        isolate, num_captures_obj, Object::ToLength(isolate, num_captures_obj));
+    const int num_captures = PositiveNumberToUint32(*num_captures_obj);
+
+    for (int i = 1; i < num_captures; i++) {
+      Handle<Object> capture;
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+          isolate, capture, Object::GetElement(isolate, result, i));
+      elems = FixedArray::SetAndGrow(elems, num_elems++, capture);
+      if (static_cast<uint32_t>(num_elems) == limit) {
+        return *NewJSArrayWithElements(isolate, elems, num_elems);
+      }
+    }
+
+    string_index = prev_string_index;
+  }
+
+  {
+    Handle<String> substr =
+        factory->NewSubString(string, prev_string_index, length);
+    elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+  }
+
+  return *NewJSArrayWithElements(isolate, elems, num_elems);
+}
+
 // Slow path for:
 // ES#sec-regexp.prototype-@@replace
 // RegExp.prototype [ @@replace ] ( string, replaceValue )
 RUNTIME_FUNCTION(Runtime_RegExpReplace) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, recv, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, string, 1);
-  Handle<Object> replace_obj = args.at<Object>(2);
+  Handle<Object> replace_obj = args.at(2);
 
   Factory* factory = isolate->factory();
 
@@ -1291,7 +1542,7 @@
                                replace_obj));
   }
 
-  const int length = string->length();
+  const uint32_t length = string->length();
   const bool functional_replace = replace_obj->IsCallable();
 
   Handle<String> replace;
@@ -1348,7 +1599,7 @@
 
   // TODO(jgruber): Look into ReplacementStringBuilder instead.
   IncrementalStringBuilder builder(isolate);
-  int next_source_position = 0;
+  uint32_t next_source_position = 0;
 
   for (const auto& result : results) {
     Handle<Object> captures_length_obj;
@@ -1359,8 +1610,7 @@
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, captures_length_obj,
         Object::ToLength(isolate, captures_length_obj));
-    const int captures_length =
-        std::max(Handle<Smi>::cast(captures_length_obj)->value(), 0);
+    const int captures_length = PositiveNumberToUint32(*captures_length_obj);
 
     Handle<Object> match_obj;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
@@ -1381,8 +1631,8 @@
     // 2^53 - 1 (at least for ToLength), we might actually need uint64_t here?
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, position_obj, Object::ToInteger(isolate, position_obj));
-    const int position =
-        std::max(std::min(Handle<Smi>::cast(position_obj)->value(), length), 0);
+    const uint32_t position =
+        std::min(PositiveNumberToUint32(*position_obj), length);
 
     ZoneVector<Handle<Object>> captures(&zone);
     for (int n = 0; n < captures_length; n++) {
@@ -1442,16 +1692,28 @@
 
 RUNTIME_FUNCTION(Runtime_RegExpExecReThrow) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   Object* exception = isolate->pending_exception();
   isolate->clear_pending_exception();
   return isolate->ReThrow(exception);
 }
 
+RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
+
+  RETURN_FAILURE_ON_EXCEPTION(isolate,
+                              JSRegExp::Initialize(regexp, source, flags));
+
+  return *regexp;
+}
 
 RUNTIME_FUNCTION(Runtime_IsRegExp) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSRegExp());
 }
diff --git a/src/runtime/runtime-scopes.cc b/src/runtime/runtime-scopes.cc
index 377799f..76e7c2b 100644
--- a/src/runtime/runtime-scopes.cc
+++ b/src/runtime/runtime-scopes.cc
@@ -45,8 +45,8 @@
     Isolate* isolate, Handle<JSGlobalObject> global, Handle<String> name,
     Handle<Object> value, PropertyAttributes attr, bool is_var,
     bool is_function_declaration, RedeclarationType redeclaration_type,
-    Handle<TypeFeedbackVector> feedback_vector = Handle<TypeFeedbackVector>(),
-    FeedbackVectorSlot slot = FeedbackVectorSlot::Invalid()) {
+    Handle<FeedbackVector> feedback_vector = Handle<FeedbackVector>(),
+    FeedbackSlot slot = FeedbackSlot::Invalid()) {
   Handle<ScriptContextTable> script_contexts(
       global->native_context()->script_context_table());
   ScriptContextTable::LookupResult lookup;
@@ -86,10 +86,8 @@
 
       // Check whether we can reconfigure the existing property into a
       // function.
-      PropertyDetails old_details = it.property_details();
-      if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
-          (it.state() == LookupIterator::ACCESSOR &&
-           it.GetAccessors()->IsAccessorPair())) {
+      if (old_attributes & READ_ONLY || old_attributes & DONT_ENUM ||
+          (it.state() == LookupIterator::ACCESSOR)) {
         // ECMA-262 section 15.1.11 GlobalDeclarationInstantiation 5.d:
         // If hasRestrictedGlobal is true, throw a SyntaxError exception.
         // ECMA-262 section 18.2.1.3 EvalDeclarationInstantiation 8.a.iv.1.b:
@@ -117,7 +115,8 @@
   RETURN_FAILURE_ON_EXCEPTION(
       isolate, JSObject::DefineOwnPropertyIgnoreAttributes(&it, value, attr));
 
-  if (!feedback_vector.is_null()) {
+  if (!feedback_vector.is_null() &&
+      it.state() != LookupIterator::State::INTERCEPTOR) {
     DCHECK_EQ(*global, *it.GetHolder<Object>());
     // Preinitialize the feedback slot if the global object does not have
     // named interceptor or the interceptor is not masking.
@@ -130,18 +129,19 @@
   return isolate->heap()->undefined_value();
 }
 
-Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> pairs, int flags,
-                       Handle<TypeFeedbackVector> feedback_vector) {
+Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
+                       int flags, Handle<FeedbackVector> feedback_vector) {
   HandleScope scope(isolate);
   Handle<JSGlobalObject> global(isolate->global_object());
   Handle<Context> context(isolate->context());
 
   // Traverse the name/value pairs and set the properties.
-  int length = pairs->length();
-  FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 2, {
-    FeedbackVectorSlot slot(Smi::cast(pairs->get(i))->value());
-    Handle<String> name(feedback_vector->GetName(slot), isolate);
-    Handle<Object> initial_value(pairs->get(i + 1), isolate);
+  int length = declarations->length();
+  FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 4, {
+    Handle<String> name(String::cast(declarations->get(i)), isolate);
+    FeedbackSlot slot(Smi::cast(declarations->get(i + 1))->value());
+    Handle<Object> possibly_literal_slot(declarations->get(i + 2), isolate);
+    Handle<Object> initial_value(declarations->get(i + 3), isolate);
 
     bool is_var = initial_value->IsUndefined(isolate);
     bool is_function = initial_value->IsSharedFunctionInfo();
@@ -149,12 +149,16 @@
 
     Handle<Object> value;
     if (is_function) {
+      DCHECK(possibly_literal_slot->IsSmi());
       // Copy the function and update its context. Use it as value.
       Handle<SharedFunctionInfo> shared =
           Handle<SharedFunctionInfo>::cast(initial_value);
+      FeedbackSlot literals_slot(Smi::cast(*possibly_literal_slot)->value());
+      Handle<Cell> literals(Cell::cast(feedback_vector->Get(literals_slot)),
+                            isolate);
       Handle<JSFunction> function =
-          isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
-                                                                TENURED);
+          isolate->factory()->NewFunctionFromSharedFunctionInfo(
+              shared, context, literals, TENURED);
       value = function;
     } else {
       value = isolate->factory()->undefined_value();
@@ -186,11 +190,11 @@
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
 
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
   CONVERT_SMI_ARG_CHECKED(flags, 1);
-  CONVERT_ARG_HANDLE_CHECKED(TypeFeedbackVector, feedback_vector, 2);
+  CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, feedback_vector, 2);
 
-  return DeclareGlobals(isolate, pairs, flags, feedback_vector);
+  return DeclareGlobals(isolate, declarations, flags, feedback_vector);
 }
 
 // TODO(ishell): merge this with Runtime::kDeclareGlobals once interpreter
@@ -199,13 +203,12 @@
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
 
-  CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
   CONVERT_SMI_ARG_CHECKED(flags, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 2);
 
-  Handle<TypeFeedbackVector> feedback_vector(closure->feedback_vector(),
-                                             isolate);
-  return DeclareGlobals(isolate, pairs, flags, feedback_vector);
+  Handle<FeedbackVector> feedback_vector(closure->feedback_vector(), isolate);
+  return DeclareGlobals(isolate, declarations, flags, feedback_vector);
 }
 
 RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
@@ -224,15 +227,15 @@
 
 Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
                           Handle<Object> value) {
-  // Declarations are always made in a function, native, or script context, or
-  // a declaration block scope. Since this is called from eval, the context
-  // passed is the context of the caller, which may be some nested context and
-  // not the declaration context.
+  // Declarations are always made in a function, native, eval, or script
+  // context, or a declaration block scope. Since this is called from eval, the
+  // context passed is the context of the caller, which may be some nested
+  // context and not the declaration context.
   Handle<Context> context_arg(isolate->context(), isolate);
   Handle<Context> context(context_arg->declaration_context(), isolate);
 
   DCHECK(context->IsFunctionContext() || context->IsNativeContext() ||
-         context->IsScriptContext() ||
+         context->IsScriptContext() || context->IsEvalContext() ||
          (context->IsBlockContext() && context->has_extension()));
 
   bool is_function = value->IsJSFunction();
@@ -313,6 +316,8 @@
     }
     DCHECK(object->IsJSContextExtensionObject() || object->IsJSGlobalObject());
   } else {
+    // Sloppy eval will never have an extension object, as vars are hoisted out,
+    // and lets are known statically.
     DCHECK(context->IsFunctionContext());
     object =
         isolate->factory()->NewJSObject(isolate->context_extension_function());
@@ -352,7 +357,7 @@
   // Find frame containing arguments passed to the caller.
   JavaScriptFrameIterator it(isolate);
   JavaScriptFrame* frame = it.frame();
-  List<JSFunction*> functions(2);
+  List<SharedFunctionInfo*> functions(2);
   frame->GetFunctions(&functions);
   if (functions.length() > 1) {
     int inlined_jsframe_index = functions.length() - 1;
@@ -377,6 +382,8 @@
         NewArray<Handle<Object>>(*total_argc));
     bool should_deoptimize = false;
     for (int i = 0; i < argument_count; i++) {
+      // If we materialize any object, we should deoptimize the frame because we
+      // might alias an object that was eliminated by escape analysis.
       should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
       Handle<Object> value = iter->GetValue();
       param_data[i] = value;
@@ -384,7 +391,7 @@
     }
 
     if (should_deoptimize) {
-      translated_values.StoreMaterializedValuesAndDeopt();
+      translated_values.StoreMaterializedValuesAndDeopt(frame);
     }
 
     return param_data;
@@ -407,7 +414,7 @@
 template <typename T>
 Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
                                     T parameters, int argument_count) {
-  CHECK(!IsSubclassConstructor(callee->shared()->kind()));
+  CHECK(!IsDerivedConstructor(callee->shared()->kind()));
   DCHECK(callee->shared()->has_simple_parameters());
   Handle<JSObject> result =
       isolate->factory()->NewArgumentsObject(callee, argument_count);
@@ -517,7 +524,7 @@
 
 RUNTIME_FUNCTION(Runtime_NewSloppyArguments_Generic) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
   // This generic runtime function can also be used when the caller has been
   // inlined, we use the slow but accurate {GetCallerArguments}.
@@ -582,34 +589,78 @@
 
 RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
-  Object** parameters = reinterpret_cast<Object**>(args[1]);
-  CONVERT_SMI_ARG_CHECKED(argument_count, 2);
+  StackFrameIterator iterator(isolate);
+
+  // Stub/interpreter handler frame
+  iterator.Advance();
+  DCHECK(iterator.frame()->type() == StackFrame::STUB);
+
+  // Function frame
+  iterator.Advance();
+  JavaScriptFrame* function_frame = JavaScriptFrame::cast(iterator.frame());
+  DCHECK(function_frame->is_java_script());
+  int argc = function_frame->GetArgumentsLength();
+  Address fp = function_frame->fp();
+  if (function_frame->has_adapted_arguments()) {
+    iterator.Advance();
+    fp = iterator.frame()->fp();
+  }
+
+  Object** parameters = reinterpret_cast<Object**>(
+      fp + argc * kPointerSize + StandardFrameConstants::kCallerSPOffset);
   ParameterArguments argument_getter(parameters);
-  return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
+  return *NewSloppyArguments(isolate, callee, argument_getter, argc);
 }
 
+RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Object** frame = reinterpret_cast<Object**>(args[0]);
+  CONVERT_SMI_ARG_CHECKED(length, 1);
+  Handle<FixedArray> result =
+      isolate->factory()->NewUninitializedFixedArray(length);
+  int const offset = length + 1;
+  DisallowHeapAllocation no_gc;
+  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+  for (int index = 0; index < length; ++index) {
+    result->set(index, frame[offset - index], mode);
+  }
+  return *result;
+}
 
 RUNTIME_FUNCTION(Runtime_NewClosure) {
   HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
+  CONVERT_SMI_ARG_CHECKED(index, 2);
   Handle<Context> context(isolate->context(), isolate);
-  return *isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
-                                                                NOT_TENURED);
+  FeedbackSlot slot = FeedbackVector::ToSlot(index);
+  Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
+  Handle<JSFunction> function =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          shared, context, vector_cell, NOT_TENURED);
+  return *function;
 }
 
 
 RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
   HandleScope scope(isolate);
-  DCHECK_EQ(1, args.length());
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FeedbackVector, vector, 1);
+  CONVERT_SMI_ARG_CHECKED(index, 2);
   Handle<Context> context(isolate->context(), isolate);
+  FeedbackSlot slot = FeedbackVector::ToSlot(index);
+  Handle<Cell> vector_cell(Cell::cast(vector->Get(slot)), isolate);
   // The caller ensures that we pretenure closures that are assigned
   // directly to properties.
-  return *isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
-                                                                TENURED);
+  Handle<JSFunction> function =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          shared, context, vector_cell, TENURED);
+  return *function;
 }
 
 static Object* FindNameClash(Handle<ScopeInfo> scope_info,
@@ -654,7 +705,7 @@
 
 RUNTIME_FUNCTION(Runtime_NewScriptContext) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
@@ -670,8 +721,9 @@
   // Script contexts have a canonical empty function as their closure, not the
   // anonymous closure containing the global code.  See
   // FullCodeGenerator::PushFunctionArgumentForContextAllocation.
-  Handle<JSFunction> closure(
-      function->shared()->IsBuiltin() ? *function : native_context->closure());
+  Handle<JSFunction> closure(function->shared()->IsUserJavaScript()
+                                 ? native_context->closure()
+                                 : *function);
   Handle<Context> result =
       isolate->factory()->NewScriptContext(closure, scope_info);
 
@@ -684,19 +736,19 @@
   return *result;
 }
 
-
 RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  CONVERT_SMI_ARG_CHECKED(scope_type, 1);
 
   DCHECK(function->context() == isolate->context());
   int length = function->shared()->scope_info()->ContextLength();
-  return *isolate->factory()->NewFunctionContext(length, function);
+  return *isolate->factory()->NewFunctionContext(
+      length, function, static_cast<ScopeType>(scope_type));
 }
 
-
 RUNTIME_FUNCTION(Runtime_PushWithContext) {
   HandleScope scope(isolate);
   DCHECK_EQ(3, args.length());
diff --git a/src/runtime/runtime-simd.cc b/src/runtime/runtime-simd.cc
deleted file mode 100644
index 9542a44..0000000
--- a/src/runtime/runtime-simd.cc
+++ /dev/null
@@ -1,1016 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/runtime/runtime-utils.h"
-
-#include "src/arguments.h"
-#include "src/base/macros.h"
-#include "src/conversions.h"
-#include "src/factory.h"
-#include "src/objects-inl.h"
-
-// Implement Single Instruction Multiple Data (SIMD) operations as defined in
-// the SIMD.js draft spec:
-// http://littledan.github.io/simd.html
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// Functions to convert Numbers to SIMD component types.
-
-template <typename T, typename F>
-static bool CanCast(F from) {
-  // A float can't represent 2^31 - 1 or 2^32 - 1 exactly, so promote the limits
-  // to double. Otherwise, the limit is truncated and numbers like 2^31 or 2^32
-  // get through, causing any static_cast to be undefined.
-  from = trunc(from);
-  return from >= static_cast<double>(std::numeric_limits<T>::min()) &&
-         from <= static_cast<double>(std::numeric_limits<T>::max());
-}
-
-
-// Explicitly specialize for conversions to float, which always succeed.
-template <>
-bool CanCast<float>(int32_t from) {
-  return true;
-}
-
-
-template <>
-bool CanCast<float>(uint32_t from) {
-  return true;
-}
-
-
-template <typename T>
-static T ConvertNumber(double number);
-
-
-template <>
-float ConvertNumber<float>(double number) {
-  return DoubleToFloat32(number);
-}
-
-
-template <>
-int32_t ConvertNumber<int32_t>(double number) {
-  return DoubleToInt32(number);
-}
-
-
-template <>
-uint32_t ConvertNumber<uint32_t>(double number) {
-  return DoubleToUint32(number);
-}
-
-
-template <>
-int16_t ConvertNumber<int16_t>(double number) {
-  return static_cast<int16_t>(DoubleToInt32(number));
-}
-
-
-template <>
-uint16_t ConvertNumber<uint16_t>(double number) {
-  return static_cast<uint16_t>(DoubleToUint32(number));
-}
-
-
-template <>
-int8_t ConvertNumber<int8_t>(double number) {
-  return static_cast<int8_t>(DoubleToInt32(number));
-}
-
-
-template <>
-uint8_t ConvertNumber<uint8_t>(double number) {
-  return static_cast<uint8_t>(DoubleToUint32(number));
-}
-
-
-// TODO(bbudge): Make this consistent with SIMD instruction results.
-inline float RecipApprox(float a) { return 1.0f / a; }
-
-
-// TODO(bbudge): Make this consistent with SIMD instruction results.
-inline float RecipSqrtApprox(float a) { return 1.0f / std::sqrt(a); }
-
-
-// Saturating addition for int16_t and int8_t.
-template <typename T>
-inline T AddSaturate(T a, T b) {
-  const T max = std::numeric_limits<T>::max();
-  const T min = std::numeric_limits<T>::min();
-  int32_t result = a + b;
-  if (result > max) return max;
-  if (result < min) return min;
-  return result;
-}
-
-
-// Saturating subtraction for int16_t and int8_t.
-template <typename T>
-inline T SubSaturate(T a, T b) {
-  const T max = std::numeric_limits<T>::max();
-  const T min = std::numeric_limits<T>::min();
-  int32_t result = a - b;
-  if (result > max) return max;
-  if (result < min) return min;
-  return result;
-}
-
-
-inline float Min(float a, float b) {
-  if (a < b) return a;
-  if (a > b) return b;
-  if (a == b) return std::signbit(a) ? a : b;
-  return std::numeric_limits<float>::quiet_NaN();
-}
-
-
-inline float Max(float a, float b) {
-  if (a > b) return a;
-  if (a < b) return b;
-  if (a == b) return std::signbit(b) ? a : b;
-  return std::numeric_limits<float>::quiet_NaN();
-}
-
-
-inline float MinNumber(float a, float b) {
-  if (std::isnan(a)) return b;
-  if (std::isnan(b)) return a;
-  return Min(a, b);
-}
-
-
-inline float MaxNumber(float a, float b) {
-  if (std::isnan(a)) return b;
-  if (std::isnan(b)) return a;
-  return Max(a, b);
-}
-
-}  // namespace
-
-//-------------------------------------------------------------------
-
-// SIMD helper functions.
-
-RUNTIME_FUNCTION(Runtime_IsSimdValue) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  return isolate->heap()->ToBoolean(args[0]->IsSimd128Value());
-}
-
-
-//-------------------------------------------------------------------
-
-// Utility macros.
-
-// TODO(gdeepti): Fix to use ToNumber conversion once polyfill is updated.
-#define CONVERT_SIMD_LANE_ARG_CHECKED(name, index, lanes)            \
-  Handle<Object> name_object = args.at<Object>(index);               \
-  if (!name_object->IsNumber()) {                                    \
-    THROW_NEW_ERROR_RETURN_FAILURE(                                  \
-        isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex));  \
-  }                                                                  \
-  double number = name_object->Number();                             \
-  if (number < 0 || number >= lanes || !IsInt32Double(number)) {     \
-    THROW_NEW_ERROR_RETURN_FAILURE(                                  \
-        isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
-  }                                                                  \
-  uint32_t name = static_cast<uint32_t>(number);
-
-#define CONVERT_SIMD_ARG_HANDLE_THROW(Type, name, index)                \
-  Handle<Type> name;                                                    \
-  if (args[index]->Is##Type()) {                                        \
-    name = args.at<Type>(index);                                        \
-  } else {                                                              \
-    THROW_NEW_ERROR_RETURN_FAILURE(                                     \
-        isolate, NewTypeError(MessageTemplate::kInvalidSimdOperation)); \
-  }
-
-#define SIMD_UNARY_OP(type, lane_type, lane_count, op, result) \
-  static const int kLaneCount = lane_count;                    \
-  DCHECK(args.length() == 1);                                  \
-  CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                   \
-  lane_type lanes[kLaneCount];                                 \
-  for (int i = 0; i < kLaneCount; i++) {                       \
-    lanes[i] = op(a->get_lane(i));                             \
-  }                                                            \
-  Handle<type> result = isolate->factory()->New##type(lanes);
-
-#define SIMD_BINARY_OP(type, lane_type, lane_count, op, result) \
-  static const int kLaneCount = lane_count;                     \
-  DCHECK(args.length() == 2);                                   \
-  CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                    \
-  CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1);                    \
-  lane_type lanes[kLaneCount];                                  \
-  for (int i = 0; i < kLaneCount; i++) {                        \
-    lanes[i] = op(a->get_lane(i), b->get_lane(i));              \
-  }                                                             \
-  Handle<type> result = isolate->factory()->New##type(lanes);
-
-#define SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, op, result) \
-  static const int kLaneCount = lane_count;                               \
-  DCHECK(args.length() == 2);                                             \
-  CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                              \
-  CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1);                              \
-  bool lanes[kLaneCount];                                                 \
-  for (int i = 0; i < kLaneCount; i++) {                                  \
-    lanes[i] = a->get_lane(i) op b->get_lane(i);                          \
-  }                                                                       \
-  Handle<bool_type> result = isolate->factory()->New##bool_type(lanes);
-
-//-------------------------------------------------------------------
-
-// Common functions.
-
-#define GET_NUMERIC_ARG(lane_type, name, index)              \
-  Handle<Object> a;                                          \
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(                        \
-      isolate, a, Object::ToNumber(args.at<Object>(index))); \
-  name = ConvertNumber<lane_type>(a->Number());
-
-#define GET_BOOLEAN_ARG(lane_type, name, index) \
-  name = args[index]->BooleanValue();
-
-#define SIMD_ALL_TYPES(FUNCTION)                              \
-  FUNCTION(Float32x4, float, 4, NewNumber, GET_NUMERIC_ARG)   \
-  FUNCTION(Int32x4, int32_t, 4, NewNumber, GET_NUMERIC_ARG)   \
-  FUNCTION(Uint32x4, uint32_t, 4, NewNumber, GET_NUMERIC_ARG) \
-  FUNCTION(Bool32x4, bool, 4, ToBoolean, GET_BOOLEAN_ARG)     \
-  FUNCTION(Int16x8, int16_t, 8, NewNumber, GET_NUMERIC_ARG)   \
-  FUNCTION(Uint16x8, uint16_t, 8, NewNumber, GET_NUMERIC_ARG) \
-  FUNCTION(Bool16x8, bool, 8, ToBoolean, GET_BOOLEAN_ARG)     \
-  FUNCTION(Int8x16, int8_t, 16, NewNumber, GET_NUMERIC_ARG)   \
-  FUNCTION(Uint8x16, uint8_t, 16, NewNumber, GET_NUMERIC_ARG) \
-  FUNCTION(Bool8x16, bool, 16, ToBoolean, GET_BOOLEAN_ARG)
-
-#define SIMD_CREATE_FUNCTION(type, lane_type, lane_count, extract, replace) \
-  RUNTIME_FUNCTION(Runtime_Create##type) {                                  \
-    static const int kLaneCount = lane_count;                               \
-    HandleScope scope(isolate);                                             \
-    DCHECK(args.length() == kLaneCount);                                    \
-    lane_type lanes[kLaneCount];                                            \
-    for (int i = 0; i < kLaneCount; i++) {                                  \
-      replace(lane_type, lanes[i], i)                                       \
-    }                                                                       \
-    return *isolate->factory()->New##type(lanes);                           \
-  }
-
-#define SIMD_EXTRACT_FUNCTION(type, lane_type, lane_count, extract, replace) \
-  RUNTIME_FUNCTION(Runtime_##type##ExtractLane) {                            \
-    HandleScope scope(isolate);                                              \
-    DCHECK(args.length() == 2);                                              \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                               \
-    CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, lane_count);                      \
-    return *isolate->factory()->extract(a->get_lane(lane));                  \
-  }
-
-#define SIMD_REPLACE_FUNCTION(type, lane_type, lane_count, extract, replace) \
-  RUNTIME_FUNCTION(Runtime_##type##ReplaceLane) {                            \
-    static const int kLaneCount = lane_count;                                \
-    HandleScope scope(isolate);                                              \
-    DCHECK(args.length() == 3);                                              \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, simd, 0);                            \
-    CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, kLaneCount);                      \
-    lane_type lanes[kLaneCount];                                             \
-    for (int i = 0; i < kLaneCount; i++) {                                   \
-      lanes[i] = simd->get_lane(i);                                          \
-    }                                                                        \
-    replace(lane_type, lanes[lane], 2);                                      \
-    Handle<type> result = isolate->factory()->New##type(lanes);              \
-    return *result;                                                          \
-  }
-
-#define SIMD_CHECK_FUNCTION(type, lane_type, lane_count, extract, replace) \
-  RUNTIME_FUNCTION(Runtime_##type##Check) {                                \
-    HandleScope scope(isolate);                                            \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                             \
-    return *a;                                                             \
-  }
-
-#define SIMD_SWIZZLE_FUNCTION(type, lane_type, lane_count, extract, replace) \
-  RUNTIME_FUNCTION(Runtime_##type##Swizzle) {                                \
-    static const int kLaneCount = lane_count;                                \
-    HandleScope scope(isolate);                                              \
-    DCHECK(args.length() == 1 + kLaneCount);                                 \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                               \
-    lane_type lanes[kLaneCount];                                             \
-    for (int i = 0; i < kLaneCount; i++) {                                   \
-      CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 1, kLaneCount);               \
-      lanes[i] = a->get_lane(index);                                         \
-    }                                                                        \
-    Handle<type> result = isolate->factory()->New##type(lanes);              \
-    return *result;                                                          \
-  }
-
-#define SIMD_SHUFFLE_FUNCTION(type, lane_type, lane_count, extract, replace) \
-  RUNTIME_FUNCTION(Runtime_##type##Shuffle) {                                \
-    static const int kLaneCount = lane_count;                                \
-    HandleScope scope(isolate);                                              \
-    DCHECK(args.length() == 2 + kLaneCount);                                 \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                               \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1);                               \
-    lane_type lanes[kLaneCount];                                             \
-    for (int i = 0; i < kLaneCount; i++) {                                   \
-      CONVERT_SIMD_LANE_ARG_CHECKED(index, i + 2, kLaneCount * 2);           \
-      lanes[i] = index < kLaneCount ? a->get_lane(index)                     \
-                                    : b->get_lane(index - kLaneCount);       \
-    }                                                                        \
-    Handle<type> result = isolate->factory()->New##type(lanes);              \
-    return *result;                                                          \
-  }
-
-SIMD_ALL_TYPES(SIMD_CREATE_FUNCTION)
-SIMD_ALL_TYPES(SIMD_EXTRACT_FUNCTION)
-SIMD_ALL_TYPES(SIMD_REPLACE_FUNCTION)
-SIMD_ALL_TYPES(SIMD_CHECK_FUNCTION)
-SIMD_ALL_TYPES(SIMD_SWIZZLE_FUNCTION)
-SIMD_ALL_TYPES(SIMD_SHUFFLE_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Float-only functions.
-
-#define SIMD_ABS_FUNCTION(type, lane_type, lane_count)            \
-  RUNTIME_FUNCTION(Runtime_##type##Abs) {                         \
-    HandleScope scope(isolate);                                   \
-    SIMD_UNARY_OP(type, lane_type, lane_count, std::abs, result); \
-    return *result;                                               \
-  }
-
-#define SIMD_SQRT_FUNCTION(type, lane_type, lane_count)            \
-  RUNTIME_FUNCTION(Runtime_##type##Sqrt) {                         \
-    HandleScope scope(isolate);                                    \
-    SIMD_UNARY_OP(type, lane_type, lane_count, std::sqrt, result); \
-    return *result;                                                \
-  }
-
-#define SIMD_RECIP_APPROX_FUNCTION(type, lane_type, lane_count)      \
-  RUNTIME_FUNCTION(Runtime_##type##RecipApprox) {                    \
-    HandleScope scope(isolate);                                      \
-    SIMD_UNARY_OP(type, lane_type, lane_count, RecipApprox, result); \
-    return *result;                                                  \
-  }
-
-#define SIMD_RECIP_SQRT_APPROX_FUNCTION(type, lane_type, lane_count)     \
-  RUNTIME_FUNCTION(Runtime_##type##RecipSqrtApprox) {                    \
-    HandleScope scope(isolate);                                          \
-    SIMD_UNARY_OP(type, lane_type, lane_count, RecipSqrtApprox, result); \
-    return *result;                                                      \
-  }
-
-#define BINARY_DIV(a, b) (a) / (b)
-#define SIMD_DIV_FUNCTION(type, lane_type, lane_count)               \
-  RUNTIME_FUNCTION(Runtime_##type##Div) {                            \
-    HandleScope scope(isolate);                                      \
-    SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_DIV, result); \
-    return *result;                                                  \
-  }
-
-#define SIMD_MINNUM_FUNCTION(type, lane_type, lane_count)           \
-  RUNTIME_FUNCTION(Runtime_##type##MinNum) {                        \
-    HandleScope scope(isolate);                                     \
-    SIMD_BINARY_OP(type, lane_type, lane_count, MinNumber, result); \
-    return *result;                                                 \
-  }
-
-#define SIMD_MAXNUM_FUNCTION(type, lane_type, lane_count)           \
-  RUNTIME_FUNCTION(Runtime_##type##MaxNum) {                        \
-    HandleScope scope(isolate);                                     \
-    SIMD_BINARY_OP(type, lane_type, lane_count, MaxNumber, result); \
-    return *result;                                                 \
-  }
-
-SIMD_ABS_FUNCTION(Float32x4, float, 4)
-SIMD_SQRT_FUNCTION(Float32x4, float, 4)
-SIMD_RECIP_APPROX_FUNCTION(Float32x4, float, 4)
-SIMD_RECIP_SQRT_APPROX_FUNCTION(Float32x4, float, 4)
-SIMD_DIV_FUNCTION(Float32x4, float, 4)
-SIMD_MINNUM_FUNCTION(Float32x4, float, 4)
-SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
-
-//-------------------------------------------------------------------
-
-// Int-only functions.
-
-#define SIMD_INT_TYPES(FUNCTION)    \
-  FUNCTION(Int32x4, int32_t, 32, 4) \
-  FUNCTION(Int16x8, int16_t, 16, 8) \
-  FUNCTION(Int8x16, int8_t, 8, 16)
-
-#define SIMD_UINT_TYPES(FUNCTION)     \
-  FUNCTION(Uint32x4, uint32_t, 32, 4) \
-  FUNCTION(Uint16x8, uint16_t, 16, 8) \
-  FUNCTION(Uint8x16, uint8_t, 8, 16)
-
-#define CONVERT_SHIFT_ARG_CHECKED(name, index)                          \
-  Handle<Object> name_object = args.at<Object>(index);                  \
-  if (!name_object->IsNumber()) {                                       \
-    THROW_NEW_ERROR_RETURN_FAILURE(                                     \
-        isolate, NewTypeError(MessageTemplate::kInvalidSimdOperation)); \
-  }                                                                     \
-  int32_t signed_shift = 0;                                             \
-  args[index]->ToInt32(&signed_shift);                                  \
-  uint32_t name = bit_cast<uint32_t>(signed_shift);
-
-#define SIMD_LSL_FUNCTION(type, lane_type, lane_bits, lane_count) \
-  RUNTIME_FUNCTION(Runtime_##type##ShiftLeftByScalar) {           \
-    static const int kLaneCount = lane_count;                     \
-    HandleScope scope(isolate);                                   \
-    DCHECK(args.length() == 2);                                   \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                    \
-    CONVERT_SHIFT_ARG_CHECKED(shift, 1);                          \
-    lane_type lanes[kLaneCount] = {0};                            \
-    shift &= lane_bits - 1;                                       \
-    for (int i = 0; i < kLaneCount; i++) {                        \
-      lanes[i] = a->get_lane(i) << shift;                         \
-    }                                                             \
-    Handle<type> result = isolate->factory()->New##type(lanes);   \
-    return *result;                                               \
-  }
-
-#define SIMD_LSR_FUNCTION(type, lane_type, lane_bits, lane_count)              \
-  RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) {                       \
-    static const int kLaneCount = lane_count;                                  \
-    HandleScope scope(isolate);                                                \
-    DCHECK(args.length() == 2);                                                \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                                 \
-    CONVERT_SHIFT_ARG_CHECKED(shift, 1);                                       \
-    lane_type lanes[kLaneCount] = {0};                                         \
-    shift &= lane_bits - 1;                                                    \
-    for (int i = 0; i < kLaneCount; i++) {                                     \
-      lanes[i] = static_cast<lane_type>(bit_cast<lane_type>(a->get_lane(i)) >> \
-                                        shift);                                \
-    }                                                                          \
-    Handle<type> result = isolate->factory()->New##type(lanes);                \
-    return *result;                                                            \
-  }
-
-#define SIMD_ASR_FUNCTION(type, lane_type, lane_bits, lane_count)      \
-  RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) {               \
-    static const int kLaneCount = lane_count;                          \
-    HandleScope scope(isolate);                                        \
-    DCHECK(args.length() == 2);                                        \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0);                         \
-    CONVERT_SHIFT_ARG_CHECKED(shift, 1);                               \
-    shift &= lane_bits - 1;                                            \
-    lane_type lanes[kLaneCount];                                       \
-    for (int i = 0; i < kLaneCount; i++) {                             \
-      int64_t shifted = static_cast<int64_t>(a->get_lane(i)) >> shift; \
-      lanes[i] = static_cast<lane_type>(shifted);                      \
-    }                                                                  \
-    Handle<type> result = isolate->factory()->New##type(lanes);        \
-    return *result;                                                    \
-  }
-
-SIMD_INT_TYPES(SIMD_LSL_FUNCTION)
-SIMD_UINT_TYPES(SIMD_LSL_FUNCTION)
-SIMD_INT_TYPES(SIMD_ASR_FUNCTION)
-SIMD_UINT_TYPES(SIMD_LSR_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Bool-only functions.
-
-#define SIMD_BOOL_TYPES(FUNCTION) \
-  FUNCTION(Bool32x4, 4)           \
-  FUNCTION(Bool16x8, 8)           \
-  FUNCTION(Bool8x16, 16)
-
-#define SIMD_ANY_FUNCTION(type, lane_count)    \
-  RUNTIME_FUNCTION(Runtime_##type##AnyTrue) {  \
-    HandleScope scope(isolate);                \
-    DCHECK(args.length() == 1);                \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
-    bool result = false;                       \
-    for (int i = 0; i < lane_count; i++) {     \
-      if (a->get_lane(i)) {                    \
-        result = true;                         \
-        break;                                 \
-      }                                        \
-    }                                          \
-    return isolate->heap()->ToBoolean(result); \
-  }
-
-#define SIMD_ALL_FUNCTION(type, lane_count)    \
-  RUNTIME_FUNCTION(Runtime_##type##AllTrue) {  \
-    HandleScope scope(isolate);                \
-    DCHECK(args.length() == 1);                \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
-    bool result = true;                        \
-    for (int i = 0; i < lane_count; i++) {     \
-      if (!a->get_lane(i)) {                   \
-        result = false;                        \
-        break;                                 \
-      }                                        \
-    }                                          \
-    return isolate->heap()->ToBoolean(result); \
-  }
-
-SIMD_BOOL_TYPES(SIMD_ANY_FUNCTION)
-SIMD_BOOL_TYPES(SIMD_ALL_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Small Int-only functions.
-
-#define SIMD_SMALL_INT_TYPES(FUNCTION) \
-  FUNCTION(Int16x8, int16_t, 8)        \
-  FUNCTION(Uint16x8, uint16_t, 8)      \
-  FUNCTION(Int8x16, int8_t, 16)        \
-  FUNCTION(Uint8x16, uint8_t, 16)
-
-#define SIMD_ADD_SATURATE_FUNCTION(type, lane_type, lane_count)       \
-  RUNTIME_FUNCTION(Runtime_##type##AddSaturate) {                     \
-    HandleScope scope(isolate);                                       \
-    SIMD_BINARY_OP(type, lane_type, lane_count, AddSaturate, result); \
-    return *result;                                                   \
-  }
-
-#define BINARY_SUB(a, b) (a) - (b)
-#define SIMD_SUB_SATURATE_FUNCTION(type, lane_type, lane_count)       \
-  RUNTIME_FUNCTION(Runtime_##type##SubSaturate) {                     \
-    HandleScope scope(isolate);                                       \
-    SIMD_BINARY_OP(type, lane_type, lane_count, SubSaturate, result); \
-    return *result;                                                   \
-  }
-
-SIMD_SMALL_INT_TYPES(SIMD_ADD_SATURATE_FUNCTION)
-SIMD_SMALL_INT_TYPES(SIMD_SUB_SATURATE_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Numeric functions.
-
-#define SIMD_NUMERIC_TYPES(FUNCTION) \
-  FUNCTION(Float32x4, float, 4)      \
-  FUNCTION(Int32x4, int32_t, 4)      \
-  FUNCTION(Uint32x4, uint32_t, 4)    \
-  FUNCTION(Int16x8, int16_t, 8)      \
-  FUNCTION(Uint16x8, uint16_t, 8)    \
-  FUNCTION(Int8x16, int8_t, 16)      \
-  FUNCTION(Uint8x16, uint8_t, 16)
-
-#define BINARY_ADD(a, b) (a) + (b)
-#define SIMD_ADD_FUNCTION(type, lane_type, lane_count)               \
-  RUNTIME_FUNCTION(Runtime_##type##Add) {                            \
-    HandleScope scope(isolate);                                      \
-    SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_ADD, result); \
-    return *result;                                                  \
-  }
-
-#define BINARY_SUB(a, b) (a) - (b)
-#define SIMD_SUB_FUNCTION(type, lane_type, lane_count)               \
-  RUNTIME_FUNCTION(Runtime_##type##Sub) {                            \
-    HandleScope scope(isolate);                                      \
-    SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_SUB, result); \
-    return *result;                                                  \
-  }
-
-#define BINARY_MUL(a, b) (a) * (b)
-#define SIMD_MUL_FUNCTION(type, lane_type, lane_count)               \
-  RUNTIME_FUNCTION(Runtime_##type##Mul) {                            \
-    HandleScope scope(isolate);                                      \
-    SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_MUL, result); \
-    return *result;                                                  \
-  }
-
-#define SIMD_MIN_FUNCTION(type, lane_type, lane_count)        \
-  RUNTIME_FUNCTION(Runtime_##type##Min) {                     \
-    HandleScope scope(isolate);                               \
-    SIMD_BINARY_OP(type, lane_type, lane_count, Min, result); \
-    return *result;                                           \
-  }
-
-#define SIMD_MAX_FUNCTION(type, lane_type, lane_count)        \
-  RUNTIME_FUNCTION(Runtime_##type##Max) {                     \
-    HandleScope scope(isolate);                               \
-    SIMD_BINARY_OP(type, lane_type, lane_count, Max, result); \
-    return *result;                                           \
-  }
-
-SIMD_NUMERIC_TYPES(SIMD_ADD_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_SUB_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_MUL_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_MIN_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_MAX_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Relational functions.
-
-#define SIMD_RELATIONAL_TYPES(FUNCTION) \
-  FUNCTION(Float32x4, Bool32x4, 4)      \
-  FUNCTION(Int32x4, Bool32x4, 4)        \
-  FUNCTION(Uint32x4, Bool32x4, 4)       \
-  FUNCTION(Int16x8, Bool16x8, 8)        \
-  FUNCTION(Uint16x8, Bool16x8, 8)       \
-  FUNCTION(Int8x16, Bool8x16, 16)       \
-  FUNCTION(Uint8x16, Bool8x16, 16)
-
-#define SIMD_EQUALITY_TYPES(FUNCTION) \
-  SIMD_RELATIONAL_TYPES(FUNCTION)     \
-  FUNCTION(Bool32x4, Bool32x4, 4)     \
-  FUNCTION(Bool16x8, Bool16x8, 8)     \
-  FUNCTION(Bool8x16, Bool8x16, 16)
-
-#define SIMD_EQUAL_FUNCTION(type, bool_type, lane_count)               \
-  RUNTIME_FUNCTION(Runtime_##type##Equal) {                            \
-    HandleScope scope(isolate);                                        \
-    SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, ==, result); \
-    return *result;                                                    \
-  }
-
-#define SIMD_NOT_EQUAL_FUNCTION(type, bool_type, lane_count)           \
-  RUNTIME_FUNCTION(Runtime_##type##NotEqual) {                         \
-    HandleScope scope(isolate);                                        \
-    SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, !=, result); \
-    return *result;                                                    \
-  }
-
-SIMD_EQUALITY_TYPES(SIMD_EQUAL_FUNCTION)
-SIMD_EQUALITY_TYPES(SIMD_NOT_EQUAL_FUNCTION)
-
-#define SIMD_LESS_THAN_FUNCTION(type, bool_type, lane_count)          \
-  RUNTIME_FUNCTION(Runtime_##type##LessThan) {                        \
-    HandleScope scope(isolate);                                       \
-    SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, <, result); \
-    return *result;                                                   \
-  }
-
-#define SIMD_LESS_THAN_OR_EQUAL_FUNCTION(type, bool_type, lane_count)  \
-  RUNTIME_FUNCTION(Runtime_##type##LessThanOrEqual) {                  \
-    HandleScope scope(isolate);                                        \
-    SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, <=, result); \
-    return *result;                                                    \
-  }
-
-#define SIMD_GREATER_THAN_FUNCTION(type, bool_type, lane_count)       \
-  RUNTIME_FUNCTION(Runtime_##type##GreaterThan) {                     \
-    HandleScope scope(isolate);                                       \
-    SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, >, result); \
-    return *result;                                                   \
-  }
-
-#define SIMD_GREATER_THAN_OR_EQUAL_FUNCTION(type, bool_type, lane_count) \
-  RUNTIME_FUNCTION(Runtime_##type##GreaterThanOrEqual) {                 \
-    HandleScope scope(isolate);                                          \
-    SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, >=, result);   \
-    return *result;                                                      \
-  }
-
-SIMD_RELATIONAL_TYPES(SIMD_LESS_THAN_FUNCTION)
-SIMD_RELATIONAL_TYPES(SIMD_LESS_THAN_OR_EQUAL_FUNCTION)
-SIMD_RELATIONAL_TYPES(SIMD_GREATER_THAN_FUNCTION)
-SIMD_RELATIONAL_TYPES(SIMD_GREATER_THAN_OR_EQUAL_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Logical functions.
-
-#define SIMD_LOGICAL_TYPES(FUNCTION)    \
-  FUNCTION(Int32x4, int32_t, 4, _INT)   \
-  FUNCTION(Uint32x4, uint32_t, 4, _INT) \
-  FUNCTION(Int16x8, int16_t, 8, _INT)   \
-  FUNCTION(Uint16x8, uint16_t, 8, _INT) \
-  FUNCTION(Int8x16, int8_t, 16, _INT)   \
-  FUNCTION(Uint8x16, uint8_t, 16, _INT) \
-  FUNCTION(Bool32x4, bool, 4, _BOOL)    \
-  FUNCTION(Bool16x8, bool, 8, _BOOL)    \
-  FUNCTION(Bool8x16, bool, 16, _BOOL)
-
-#define BINARY_AND_INT(a, b) (a) & (b)
-#define BINARY_AND_BOOL(a, b) (a) && (b)
-#define SIMD_AND_FUNCTION(type, lane_type, lane_count, op)               \
-  RUNTIME_FUNCTION(Runtime_##type##And) {                                \
-    HandleScope scope(isolate);                                          \
-    SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_AND##op, result); \
-    return *result;                                                      \
-  }
-
-#define BINARY_OR_INT(a, b) (a) | (b)
-#define BINARY_OR_BOOL(a, b) (a) || (b)
-#define SIMD_OR_FUNCTION(type, lane_type, lane_count, op)               \
-  RUNTIME_FUNCTION(Runtime_##type##Or) {                                \
-    HandleScope scope(isolate);                                         \
-    SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_OR##op, result); \
-    return *result;                                                     \
-  }
-
-#define BINARY_XOR_INT(a, b) (a) ^ (b)
-#define BINARY_XOR_BOOL(a, b) (a) != (b)
-#define SIMD_XOR_FUNCTION(type, lane_type, lane_count, op)               \
-  RUNTIME_FUNCTION(Runtime_##type##Xor) {                                \
-    HandleScope scope(isolate);                                          \
-    SIMD_BINARY_OP(type, lane_type, lane_count, BINARY_XOR##op, result); \
-    return *result;                                                      \
-  }
-
-#define UNARY_NOT_INT ~
-#define UNARY_NOT_BOOL !
-#define SIMD_NOT_FUNCTION(type, lane_type, lane_count, op)             \
-  RUNTIME_FUNCTION(Runtime_##type##Not) {                              \
-    HandleScope scope(isolate);                                        \
-    SIMD_UNARY_OP(type, lane_type, lane_count, UNARY_NOT##op, result); \
-    return *result;                                                    \
-  }
-
-SIMD_LOGICAL_TYPES(SIMD_AND_FUNCTION)
-SIMD_LOGICAL_TYPES(SIMD_OR_FUNCTION)
-SIMD_LOGICAL_TYPES(SIMD_XOR_FUNCTION)
-SIMD_LOGICAL_TYPES(SIMD_NOT_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Select functions.
-
-#define SIMD_SELECT_TYPES(FUNCTION)         \
-  FUNCTION(Float32x4, float, Bool32x4, 4)   \
-  FUNCTION(Int32x4, int32_t, Bool32x4, 4)   \
-  FUNCTION(Uint32x4, uint32_t, Bool32x4, 4) \
-  FUNCTION(Int16x8, int16_t, Bool16x8, 8)   \
-  FUNCTION(Uint16x8, uint16_t, Bool16x8, 8) \
-  FUNCTION(Int8x16, int8_t, Bool8x16, 16)   \
-  FUNCTION(Uint8x16, uint8_t, Bool8x16, 16)
-
-#define SIMD_SELECT_FUNCTION(type, lane_type, bool_type, lane_count)  \
-  RUNTIME_FUNCTION(Runtime_##type##Select) {                          \
-    static const int kLaneCount = lane_count;                         \
-    HandleScope scope(isolate);                                       \
-    DCHECK(args.length() == 3);                                       \
-    CONVERT_SIMD_ARG_HANDLE_THROW(bool_type, mask, 0);                \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 1);                        \
-    CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 2);                        \
-    lane_type lanes[kLaneCount];                                      \
-    for (int i = 0; i < kLaneCount; i++) {                            \
-      lanes[i] = mask->get_lane(i) ? a->get_lane(i) : b->get_lane(i); \
-    }                                                                 \
-    Handle<type> result = isolate->factory()->New##type(lanes);       \
-    return *result;                                                   \
-  }
-
-SIMD_SELECT_TYPES(SIMD_SELECT_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Signed / unsigned functions.
-
-#define SIMD_SIGNED_TYPES(FUNCTION) \
-  FUNCTION(Float32x4, float, 4)     \
-  FUNCTION(Int32x4, int32_t, 4)     \
-  FUNCTION(Int16x8, int16_t, 8)     \
-  FUNCTION(Int8x16, int8_t, 16)
-
-#define SIMD_NEG_FUNCTION(type, lane_type, lane_count)     \
-  RUNTIME_FUNCTION(Runtime_##type##Neg) {                  \
-    HandleScope scope(isolate);                            \
-    SIMD_UNARY_OP(type, lane_type, lane_count, -, result); \
-    return *result;                                        \
-  }
-
-SIMD_SIGNED_TYPES(SIMD_NEG_FUNCTION)
-
-//-------------------------------------------------------------------
-
-// Casting functions.
-
-#define SIMD_FROM_TYPES(FUNCTION)                   \
-  FUNCTION(Float32x4, float, 4, Int32x4, int32_t)   \
-  FUNCTION(Float32x4, float, 4, Uint32x4, uint32_t) \
-  FUNCTION(Int32x4, int32_t, 4, Float32x4, float)   \
-  FUNCTION(Int32x4, int32_t, 4, Uint32x4, uint32_t) \
-  FUNCTION(Uint32x4, uint32_t, 4, Float32x4, float) \
-  FUNCTION(Uint32x4, uint32_t, 4, Int32x4, int32_t) \
-  FUNCTION(Int16x8, int16_t, 8, Uint16x8, uint16_t) \
-  FUNCTION(Uint16x8, uint16_t, 8, Int16x8, int16_t) \
-  FUNCTION(Int8x16, int8_t, 16, Uint8x16, uint8_t)  \
-  FUNCTION(Uint8x16, uint8_t, 16, Int8x16, int8_t)
-
-#define SIMD_FROM_FUNCTION(type, lane_type, lane_count, from_type, from_ctype) \
-  RUNTIME_FUNCTION(Runtime_##type##From##from_type) {                          \
-    static const int kLaneCount = lane_count;                                  \
-    HandleScope scope(isolate);                                                \
-    DCHECK(args.length() == 1);                                                \
-    CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0);                            \
-    lane_type lanes[kLaneCount];                                               \
-    for (int i = 0; i < kLaneCount; i++) {                                     \
-      from_ctype a_value = a->get_lane(i);                                     \
-      if (a_value != a_value || !CanCast<lane_type>(a_value)) {                \
-        THROW_NEW_ERROR_RETURN_FAILURE(                                        \
-            isolate, NewRangeError(MessageTemplate::kInvalidSimdLaneValue));   \
-      }                                                                        \
-      lanes[i] = static_cast<lane_type>(a_value);                              \
-    }                                                                          \
-    Handle<type> result = isolate->factory()->New##type(lanes);                \
-    return *result;                                                            \
-  }
-
-SIMD_FROM_TYPES(SIMD_FROM_FUNCTION)
-
-#define SIMD_FROM_BITS_TYPES(FUNCTION)       \
-  FUNCTION(Float32x4, float, 4, Int32x4)     \
-  FUNCTION(Float32x4, float, 4, Uint32x4)    \
-  FUNCTION(Float32x4, float, 4, Int16x8)     \
-  FUNCTION(Float32x4, float, 4, Uint16x8)    \
-  FUNCTION(Float32x4, float, 4, Int8x16)     \
-  FUNCTION(Float32x4, float, 4, Uint8x16)    \
-  FUNCTION(Int32x4, int32_t, 4, Float32x4)   \
-  FUNCTION(Int32x4, int32_t, 4, Uint32x4)    \
-  FUNCTION(Int32x4, int32_t, 4, Int16x8)     \
-  FUNCTION(Int32x4, int32_t, 4, Uint16x8)    \
-  FUNCTION(Int32x4, int32_t, 4, Int8x16)     \
-  FUNCTION(Int32x4, int32_t, 4, Uint8x16)    \
-  FUNCTION(Uint32x4, uint32_t, 4, Float32x4) \
-  FUNCTION(Uint32x4, uint32_t, 4, Int32x4)   \
-  FUNCTION(Uint32x4, uint32_t, 4, Int16x8)   \
-  FUNCTION(Uint32x4, uint32_t, 4, Uint16x8)  \
-  FUNCTION(Uint32x4, uint32_t, 4, Int8x16)   \
-  FUNCTION(Uint32x4, uint32_t, 4, Uint8x16)  \
-  FUNCTION(Int16x8, int16_t, 8, Float32x4)   \
-  FUNCTION(Int16x8, int16_t, 8, Int32x4)     \
-  FUNCTION(Int16x8, int16_t, 8, Uint32x4)    \
-  FUNCTION(Int16x8, int16_t, 8, Uint16x8)    \
-  FUNCTION(Int16x8, int16_t, 8, Int8x16)     \
-  FUNCTION(Int16x8, int16_t, 8, Uint8x16)    \
-  FUNCTION(Uint16x8, uint16_t, 8, Float32x4) \
-  FUNCTION(Uint16x8, uint16_t, 8, Int32x4)   \
-  FUNCTION(Uint16x8, uint16_t, 8, Uint32x4)  \
-  FUNCTION(Uint16x8, uint16_t, 8, Int16x8)   \
-  FUNCTION(Uint16x8, uint16_t, 8, Int8x16)   \
-  FUNCTION(Uint16x8, uint16_t, 8, Uint8x16)  \
-  FUNCTION(Int8x16, int8_t, 16, Float32x4)   \
-  FUNCTION(Int8x16, int8_t, 16, Int32x4)     \
-  FUNCTION(Int8x16, int8_t, 16, Uint32x4)    \
-  FUNCTION(Int8x16, int8_t, 16, Int16x8)     \
-  FUNCTION(Int8x16, int8_t, 16, Uint16x8)    \
-  FUNCTION(Int8x16, int8_t, 16, Uint8x16)    \
-  FUNCTION(Uint8x16, uint8_t, 16, Float32x4) \
-  FUNCTION(Uint8x16, uint8_t, 16, Int32x4)   \
-  FUNCTION(Uint8x16, uint8_t, 16, Uint32x4)  \
-  FUNCTION(Uint8x16, uint8_t, 16, Int16x8)   \
-  FUNCTION(Uint8x16, uint8_t, 16, Uint16x8)  \
-  FUNCTION(Uint8x16, uint8_t, 16, Int8x16)
-
-#define SIMD_FROM_BITS_FUNCTION(type, lane_type, lane_count, from_type) \
-  RUNTIME_FUNCTION(Runtime_##type##From##from_type##Bits) {             \
-    static const int kLaneCount = lane_count;                           \
-    HandleScope scope(isolate);                                         \
-    DCHECK(args.length() == 1);                                         \
-    CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0);                     \
-    lane_type lanes[kLaneCount];                                        \
-    a->CopyBits(lanes);                                                 \
-    Handle<type> result = isolate->factory()->New##type(lanes);         \
-    return *result;                                                     \
-  }
-
-SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
-
-
-//-------------------------------------------------------------------
-
-// Load and Store functions.
-
-#define SIMD_LOADN_STOREN_TYPES(FUNCTION) \
-  FUNCTION(Float32x4, float, 4)           \
-  FUNCTION(Int32x4, int32_t, 4)           \
-  FUNCTION(Uint32x4, uint32_t, 4)
-
-#define SIMD_COERCE_INDEX(name, i)                                            \
-  Handle<Object> length_object, number_object;                                \
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(                                         \
-      isolate, length_object, Object::ToLength(isolate, args.at<Object>(i))); \
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_object,                  \
-                                     Object::ToNumber(args.at<Object>(i)));   \
-  if (number_object->Number() != length_object->Number()) {                   \
-    THROW_NEW_ERROR_RETURN_FAILURE(                                           \
-        isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex));           \
-  }                                                                           \
-  int32_t name = number_object->Number();
-
-// Common Load and Store Functions
-
-#define SIMD_LOAD(type, lane_type, lane_count, count, result)        \
-  static const int kLaneCount = lane_count;                          \
-  DCHECK(args.length() == 2);                                        \
-  CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0);            \
-  SIMD_COERCE_INDEX(index, 1);                                       \
-  size_t bpe = tarray->element_size();                               \
-  uint32_t bytes = count * sizeof(lane_type);                        \
-  size_t byte_length = NumberToSize(tarray->byte_length());          \
-  if (index < 0 || index * bpe + bytes > byte_length) {              \
-    THROW_NEW_ERROR_RETURN_FAILURE(                                  \
-        isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
-  }                                                                  \
-  size_t tarray_offset = NumberToSize(tarray->byte_offset());        \
-  uint8_t* tarray_base =                                             \
-      static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) +  \
-      tarray_offset;                                                 \
-  lane_type lanes[kLaneCount] = {0};                                 \
-  memcpy(lanes, tarray_base + index * bpe, bytes);                   \
-  Handle<type> result = isolate->factory()->New##type(lanes);
-
-#define SIMD_STORE(type, lane_type, lane_count, count, a)            \
-  static const int kLaneCount = lane_count;                          \
-  DCHECK(args.length() == 3);                                        \
-  CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0);            \
-  CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 2);                         \
-  SIMD_COERCE_INDEX(index, 1);                                       \
-  size_t bpe = tarray->element_size();                               \
-  uint32_t bytes = count * sizeof(lane_type);                        \
-  size_t byte_length = NumberToSize(tarray->byte_length());          \
-  if (index < 0 || byte_length < index * bpe + bytes) {              \
-    THROW_NEW_ERROR_RETURN_FAILURE(                                  \
-        isolate, NewRangeError(MessageTemplate::kInvalidSimdIndex)); \
-  }                                                                  \
-  size_t tarray_offset = NumberToSize(tarray->byte_offset());        \
-  uint8_t* tarray_base =                                             \
-      static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) +  \
-      tarray_offset;                                                 \
-  lane_type lanes[kLaneCount];                                       \
-  for (int i = 0; i < kLaneCount; i++) {                             \
-    lanes[i] = a->get_lane(i);                                       \
-  }                                                                  \
-  memcpy(tarray_base + index * bpe, lanes, bytes);
-
-#define SIMD_LOAD_FUNCTION(type, lane_type, lane_count)         \
-  RUNTIME_FUNCTION(Runtime_##type##Load) {                      \
-    HandleScope scope(isolate);                                 \
-    SIMD_LOAD(type, lane_type, lane_count, lane_count, result); \
-    return *result;                                             \
-  }
-
-
-#define SIMD_LOAD1_FUNCTION(type, lane_type, lane_count) \
-  RUNTIME_FUNCTION(Runtime_##type##Load1) {              \
-    HandleScope scope(isolate);                          \
-    SIMD_LOAD(type, lane_type, lane_count, 1, result);   \
-    return *result;                                      \
-  }
-
-
-#define SIMD_LOAD2_FUNCTION(type, lane_type, lane_count) \
-  RUNTIME_FUNCTION(Runtime_##type##Load2) {              \
-    HandleScope scope(isolate);                          \
-    SIMD_LOAD(type, lane_type, lane_count, 2, result);   \
-    return *result;                                      \
-  }
-
-
-#define SIMD_LOAD3_FUNCTION(type, lane_type, lane_count) \
-  RUNTIME_FUNCTION(Runtime_##type##Load3) {              \
-    HandleScope scope(isolate);                          \
-    SIMD_LOAD(type, lane_type, lane_count, 3, result);   \
-    return *result;                                      \
-  }
-
-
-#define SIMD_STORE_FUNCTION(type, lane_type, lane_count)    \
-  RUNTIME_FUNCTION(Runtime_##type##Store) {                 \
-    HandleScope scope(isolate);                             \
-    SIMD_STORE(type, lane_type, lane_count, lane_count, a); \
-    return *a;                                              \
-  }
-
-
-#define SIMD_STORE1_FUNCTION(type, lane_type, lane_count) \
-  RUNTIME_FUNCTION(Runtime_##type##Store1) {              \
-    HandleScope scope(isolate);                           \
-    SIMD_STORE(type, lane_type, lane_count, 1, a);        \
-    return *a;                                            \
-  }
-
-
-#define SIMD_STORE2_FUNCTION(type, lane_type, lane_count) \
-  RUNTIME_FUNCTION(Runtime_##type##Store2) {              \
-    HandleScope scope(isolate);                           \
-    SIMD_STORE(type, lane_type, lane_count, 2, a);        \
-    return *a;                                            \
-  }
-
-
-#define SIMD_STORE3_FUNCTION(type, lane_type, lane_count) \
-  RUNTIME_FUNCTION(Runtime_##type##Store3) {              \
-    HandleScope scope(isolate);                           \
-    SIMD_STORE(type, lane_type, lane_count, 3, a);        \
-    return *a;                                            \
-  }
-
-
-SIMD_NUMERIC_TYPES(SIMD_LOAD_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_LOAD1_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_LOAD2_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_LOAD3_FUNCTION)
-SIMD_NUMERIC_TYPES(SIMD_STORE_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_STORE1_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_STORE2_FUNCTION)
-SIMD_LOADN_STOREN_TYPES(SIMD_STORE3_FUNCTION)
-
-//-------------------------------------------------------------------
-
-}  // namespace internal
-}  // namespace v8
diff --git a/src/runtime/runtime-strings.cc b/src/runtime/runtime-strings.cc
index 328bdce..3a43591 100644
--- a/src/runtime/runtime-strings.cc
+++ b/src/runtime/runtime-strings.cc
@@ -5,6 +5,9 @@
 #include "src/runtime/runtime-utils.h"
 
 #include "src/arguments.h"
+#include "src/conversions.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
 #include "src/regexp/jsregexp-inl.h"
 #include "src/string-builder.h"
 #include "src/string-search.h"
@@ -12,6 +15,44 @@
 namespace v8 {
 namespace internal {
 
+RUNTIME_FUNCTION(Runtime_GetSubstitution) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(4, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, matched, 0);
+  CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+  CONVERT_SMI_ARG_CHECKED(position, 2);
+  CONVERT_ARG_HANDLE_CHECKED(String, replacement, 3);
+
+  // A simple match without captures.
+  class SimpleMatch : public String::Match {
+   public:
+    SimpleMatch(Handle<String> match, Handle<String> prefix,
+                Handle<String> suffix)
+        : match_(match), prefix_(prefix), suffix_(suffix) {}
+
+    Handle<String> GetMatch() override { return match_; }
+    MaybeHandle<String> GetCapture(int i, bool* capture_exists) override {
+      *capture_exists = false;
+      return match_;  // Return arbitrary string handle.
+    }
+    Handle<String> GetPrefix() override { return prefix_; }
+    Handle<String> GetSuffix() override { return suffix_; }
+    int CaptureCount() override { return 0; }
+
+   private:
+    Handle<String> match_, prefix_, suffix_;
+  };
+
+  Handle<String> prefix =
+      isolate->factory()->NewSubString(subject, 0, position);
+  Handle<String> suffix = isolate->factory()->NewSubString(
+      subject, position + matched->length(), subject->length());
+  SimpleMatch match(matched, prefix, suffix);
+
+  RETURN_RESULT_OR_FAILURE(
+      isolate, String::GetSubstitution(isolate, &match, replacement));
+}
+
 // This may return an empty MaybeHandle if an exception is thrown or
 // we abort due to reaching the recursion limit.
 MaybeHandle<String> StringReplaceOneCharWithString(
@@ -60,7 +101,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
@@ -86,23 +127,38 @@
   return isolate->StackOverflow();
 }
 
-
+// ES6 #sec-string.prototype.indexof
+// String.prototype.indexOf(searchString [, position])
 RUNTIME_FUNCTION(Runtime_StringIndexOf) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  return String::IndexOf(isolate, args.at<Object>(0), args.at<Object>(1),
-                         args.at<Object>(2));
+  DCHECK_EQ(3, args.length());
+  return String::IndexOf(isolate, args.at(0), args.at(1), args.at(2));
+}
+
+// ES6 #sec-string.prototype.indexof
+// String.prototype.indexOf(searchString, position)
+// Fast version that assumes that does not perform conversions of the incoming
+// arguments.
+RUNTIME_FUNCTION(Runtime_StringIndexOfUnchecked) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<String> receiver_string = args.at<String>(0);
+  Handle<String> search_string = args.at<String>(1);
+  int index = std::min(std::max(args.smi_at(2), 0), receiver_string->length());
+
+  return Smi::FromInt(String::IndexOf(isolate, receiver_string, search_string,
+                                      static_cast<uint32_t>(index)));
 }
 
 RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
   HandleScope handle_scope(isolate);
-  return String::LastIndexOf(isolate, args.at<Object>(0), args.at<Object>(1),
+  return String::LastIndexOf(isolate, args.at(0), args.at(1),
                              isolate->factory()->undefined_value());
 }
 
 RUNTIME_FUNCTION(Runtime_SubString) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
   int start, end;
@@ -134,7 +190,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringAdd) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, obj1, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, obj2, 1);
   isolate->counters()->string_add_runtime()->Increment();
@@ -151,7 +207,7 @@
 
 RUNTIME_FUNCTION(Runtime_InternalizeString) {
   HandleScope handles(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
   return *isolate->factory()->InternalizeString(string);
 }
@@ -159,7 +215,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
   HandleScope handle_scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
 
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
@@ -200,7 +256,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
   int32_t array_length;
   if (!args[1]->ToInt32(&array_length)) {
@@ -247,6 +303,9 @@
   if (length == -1) {
     return isolate->Throw(isolate->heap()->illegal_argument_string());
   }
+  if (length == 0) {
+    return isolate->heap()->empty_string();
+  }
 
   if (one_byte) {
     Handle<SeqOneByteString> answer;
@@ -270,7 +329,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
   int32_t array_length;
   if (!args[1]->ToInt32(&array_length)) {
@@ -411,7 +470,7 @@
 
 RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArray, elements_array, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
   CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
@@ -529,7 +588,7 @@
 // For example, "foo" => ["f", "o", "o"].
 RUNTIME_FUNCTION(Runtime_StringToArray) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
 
@@ -573,298 +632,6 @@
 }
 
 
-static inline bool ToUpperOverflows(uc32 character) {
-  // y with umlauts and the micro sign are the only characters that stop
-  // fitting into one-byte when converting to uppercase.
-  static const uc32 yuml_code = 0xff;
-  static const uc32 micro_code = 0xb5;
-  return (character == yuml_code || character == micro_code);
-}
-
-
-template <class Converter>
-MUST_USE_RESULT static Object* ConvertCaseHelper(
-    Isolate* isolate, String* string, SeqString* result, int result_length,
-    unibrow::Mapping<Converter, 128>* mapping) {
-  DisallowHeapAllocation no_gc;
-  // We try this twice, once with the assumption that the result is no longer
-  // than the input and, if that assumption breaks, again with the exact
-  // length.  This may not be pretty, but it is nicer than what was here before
-  // and I hereby claim my vaffel-is.
-  //
-  // NOTE: This assumes that the upper/lower case of an ASCII
-  // character is also ASCII.  This is currently the case, but it
-  // might break in the future if we implement more context and locale
-  // dependent upper/lower conversions.
-  bool has_changed_character = false;
-
-  // Convert all characters to upper case, assuming that they will fit
-  // in the buffer
-  StringCharacterStream stream(string);
-  unibrow::uchar chars[Converter::kMaxWidth];
-  // We can assume that the string is not empty
-  uc32 current = stream.GetNext();
-  bool ignore_overflow = Converter::kIsToLower || result->IsSeqTwoByteString();
-  for (int i = 0; i < result_length;) {
-    bool has_next = stream.HasMore();
-    uc32 next = has_next ? stream.GetNext() : 0;
-    int char_length = mapping->get(current, next, chars);
-    if (char_length == 0) {
-      // The case conversion of this character is the character itself.
-      result->Set(i, current);
-      i++;
-    } else if (char_length == 1 &&
-               (ignore_overflow || !ToUpperOverflows(current))) {
-      // Common case: converting the letter resulted in one character.
-      DCHECK(static_cast<uc32>(chars[0]) != current);
-      result->Set(i, chars[0]);
-      has_changed_character = true;
-      i++;
-    } else if (result_length == string->length()) {
-      bool overflows = ToUpperOverflows(current);
-      // We've assumed that the result would be as long as the
-      // input but here is a character that converts to several
-      // characters.  No matter, we calculate the exact length
-      // of the result and try the whole thing again.
-      //
-      // Note that this leaves room for optimization.  We could just
-      // memcpy what we already have to the result string.  Also,
-      // the result string is the last object allocated we could
-      // "realloc" it and probably, in the vast majority of cases,
-      // extend the existing string to be able to hold the full
-      // result.
-      int next_length = 0;
-      if (has_next) {
-        next_length = mapping->get(next, 0, chars);
-        if (next_length == 0) next_length = 1;
-      }
-      int current_length = i + char_length + next_length;
-      while (stream.HasMore()) {
-        current = stream.GetNext();
-        overflows |= ToUpperOverflows(current);
-        // NOTE: we use 0 as the next character here because, while
-        // the next character may affect what a character converts to,
-        // it does not in any case affect the length of what it convert
-        // to.
-        int char_length = mapping->get(current, 0, chars);
-        if (char_length == 0) char_length = 1;
-        current_length += char_length;
-        if (current_length > String::kMaxLength) {
-          AllowHeapAllocation allocate_error_and_return;
-          THROW_NEW_ERROR_RETURN_FAILURE(isolate,
-                                         NewInvalidStringLengthError());
-        }
-      }
-      // Try again with the real length.  Return signed if we need
-      // to allocate a two-byte string for to uppercase.
-      return (overflows && !ignore_overflow) ? Smi::FromInt(-current_length)
-                                             : Smi::FromInt(current_length);
-    } else {
-      for (int j = 0; j < char_length; j++) {
-        result->Set(i, chars[j]);
-        i++;
-      }
-      has_changed_character = true;
-    }
-    current = next;
-  }
-  if (has_changed_character) {
-    return result;
-  } else {
-    // If we didn't actually change anything in doing the conversion
-    // we simple return the result and let the converted string
-    // become garbage; there is no reason to keep two identical strings
-    // alive.
-    return string;
-  }
-}
-
-
-static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
-static const uintptr_t kAsciiMask = kOneInEveryByte << 7;
-
-// Given a word and two range boundaries returns a word with high bit
-// set in every byte iff the corresponding input byte was strictly in
-// the range (m, n). All the other bits in the result are cleared.
-// This function is only useful when it can be inlined and the
-// boundaries are statically known.
-// Requires: all bytes in the input word and the boundaries must be
-// ASCII (less than 0x7F).
-static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
-  // Use strict inequalities since in edge cases the function could be
-  // further simplified.
-  DCHECK(0 < m && m < n);
-  // Has high bit set in every w byte less than n.
-  uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
-  // Has high bit set in every w byte greater than m.
-  uintptr_t tmp2 = w + kOneInEveryByte * (0x7F - m);
-  return (tmp1 & tmp2 & (kOneInEveryByte * 0x80));
-}
-
-
-#ifdef DEBUG
-static bool CheckFastAsciiConvert(char* dst, const char* src, int length,
-                                  bool changed, bool is_to_lower) {
-  bool expected_changed = false;
-  for (int i = 0; i < length; i++) {
-    if (dst[i] == src[i]) continue;
-    expected_changed = true;
-    if (is_to_lower) {
-      DCHECK('A' <= src[i] && src[i] <= 'Z');
-      DCHECK(dst[i] == src[i] + ('a' - 'A'));
-    } else {
-      DCHECK('a' <= src[i] && src[i] <= 'z');
-      DCHECK(dst[i] == src[i] - ('a' - 'A'));
-    }
-  }
-  return (expected_changed == changed);
-}
-#endif
-
-
-template <class Converter>
-static bool FastAsciiConvert(char* dst, const char* src, int length,
-                             bool* changed_out) {
-#ifdef DEBUG
-  char* saved_dst = dst;
-  const char* saved_src = src;
-#endif
-  DisallowHeapAllocation no_gc;
-  // We rely on the distance between upper and lower case letters
-  // being a known power of 2.
-  DCHECK('a' - 'A' == (1 << 5));
-  // Boundaries for the range of input characters than require conversion.
-  static const char lo = Converter::kIsToLower ? 'A' - 1 : 'a' - 1;
-  static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1;
-  bool changed = false;
-  uintptr_t or_acc = 0;
-  const char* const limit = src + length;
-
-  // dst is newly allocated and always aligned.
-  DCHECK(IsAligned(reinterpret_cast<intptr_t>(dst), sizeof(uintptr_t)));
-  // Only attempt processing one word at a time if src is also aligned.
-  if (IsAligned(reinterpret_cast<intptr_t>(src), sizeof(uintptr_t))) {
-    // Process the prefix of the input that requires no conversion one aligned
-    // (machine) word at a time.
-    while (src <= limit - sizeof(uintptr_t)) {
-      const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
-      or_acc |= w;
-      if (AsciiRangeMask(w, lo, hi) != 0) {
-        changed = true;
-        break;
-      }
-      *reinterpret_cast<uintptr_t*>(dst) = w;
-      src += sizeof(uintptr_t);
-      dst += sizeof(uintptr_t);
-    }
-    // Process the remainder of the input performing conversion when
-    // required one word at a time.
-    while (src <= limit - sizeof(uintptr_t)) {
-      const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
-      or_acc |= w;
-      uintptr_t m = AsciiRangeMask(w, lo, hi);
-      // The mask has high (7th) bit set in every byte that needs
-      // conversion and we know that the distance between cases is
-      // 1 << 5.
-      *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
-      src += sizeof(uintptr_t);
-      dst += sizeof(uintptr_t);
-    }
-  }
-  // Process the last few bytes of the input (or the whole input if
-  // unaligned access is not supported).
-  while (src < limit) {
-    char c = *src;
-    or_acc |= c;
-    if (lo < c && c < hi) {
-      c ^= (1 << 5);
-      changed = true;
-    }
-    *dst = c;
-    ++src;
-    ++dst;
-  }
-
-  if ((or_acc & kAsciiMask) != 0) return false;
-
-  DCHECK(CheckFastAsciiConvert(saved_dst, saved_src, length, changed,
-                               Converter::kIsToLower));
-
-  *changed_out = changed;
-  return true;
-}
-
-
-template <class Converter>
-MUST_USE_RESULT static Object* ConvertCase(
-    Handle<String> s, Isolate* isolate,
-    unibrow::Mapping<Converter, 128>* mapping) {
-  s = String::Flatten(s);
-  int length = s->length();
-  // Assume that the string is not empty; we need this assumption later
-  if (length == 0) return *s;
-
-  // Simpler handling of ASCII strings.
-  //
-  // NOTE: This assumes that the upper/lower case of an ASCII
-  // character is also ASCII.  This is currently the case, but it
-  // might break in the future if we implement more context and locale
-  // dependent upper/lower conversions.
-  if (s->IsOneByteRepresentationUnderneath()) {
-    // Same length as input.
-    Handle<SeqOneByteString> result =
-        isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
-    DisallowHeapAllocation no_gc;
-    String::FlatContent flat_content = s->GetFlatContent();
-    DCHECK(flat_content.IsFlat());
-    bool has_changed_character = false;
-    bool is_ascii = FastAsciiConvert<Converter>(
-        reinterpret_cast<char*>(result->GetChars()),
-        reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
-        length, &has_changed_character);
-    // If not ASCII, we discard the result and take the 2 byte path.
-    if (is_ascii) return has_changed_character ? *result : *s;
-  }
-
-  Handle<SeqString> result;  // Same length as input.
-  if (s->IsOneByteRepresentation()) {
-    result = isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
-  } else {
-    result = isolate->factory()->NewRawTwoByteString(length).ToHandleChecked();
-  }
-
-  Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
-  if (answer->IsException(isolate) || answer->IsString()) return answer;
-
-  DCHECK(answer->IsSmi());
-  length = Smi::cast(answer)->value();
-  if (s->IsOneByteRepresentation() && length > 0) {
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, isolate->factory()->NewRawOneByteString(length));
-  } else {
-    if (length < 0) length = -length;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result, isolate->factory()->NewRawTwoByteString(length));
-  }
-  return ConvertCaseHelper(isolate, *s, *result, length, mapping);
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(args.length(), 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
-  return ConvertCase(s, isolate, isolate->runtime_state()->to_lower_mapping());
-}
-
-
-RUNTIME_FUNCTION(Runtime_StringToUpperCase) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(args.length(), 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
-  return ConvertCase(s, isolate, isolate->runtime_state()->to_upper_mapping());
-}
-
 RUNTIME_FUNCTION(Runtime_StringLessThan) {
   HandleScope handle_scope(isolate);
   DCHECK_EQ(2, args.length());
@@ -955,7 +722,7 @@
 
 RUNTIME_FUNCTION(Runtime_FlattenString) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
   return *String::Flatten(str);
 }
@@ -982,7 +749,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   if (!args[0]->IsString()) return isolate->heap()->undefined_value();
   if (!args[1]->IsNumber()) return isolate->heap()->undefined_value();
   if (std::isinf(args.number_at(1))) return isolate->heap()->nan_value();
diff --git a/src/runtime/runtime-symbol.cc b/src/runtime/runtime-symbol.cc
index 300a643..2eaef63 100644
--- a/src/runtime/runtime-symbol.cc
+++ b/src/runtime/runtime-symbol.cc
@@ -14,7 +14,7 @@
 
 RUNTIME_FUNCTION(Runtime_CreateSymbol) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
   CHECK(name->IsString() || name->IsUndefined(isolate));
   Handle<Symbol> symbol = isolate->factory()->NewSymbol();
@@ -25,7 +25,7 @@
 
 RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
   CHECK(name->IsString() || name->IsUndefined(isolate));
   Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
@@ -36,7 +36,7 @@
 
 RUNTIME_FUNCTION(Runtime_SymbolDescription) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Symbol, symbol, 0);
   return symbol->name();
 }
@@ -56,16 +56,9 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_SymbolRegistry) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
-  return *isolate->GetSymbolRegistry();
-}
-
-
 RUNTIME_FUNCTION(Runtime_SymbolIsPrivate) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Symbol, symbol, 0);
   return isolate->heap()->ToBoolean(symbol->is_private());
 }
diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc
index 7054192..c6234fc 100644
--- a/src/runtime/runtime-test.cc
+++ b/src/runtime/runtime-test.cc
@@ -7,6 +7,7 @@
 #include <memory>
 
 #include "src/arguments.h"
+#include "src/assembler-inl.h"
 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
 #include "src/compiler.h"
 #include "src/deoptimizer.h"
@@ -19,12 +20,51 @@
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-objects.h"
 
+namespace {
+struct WasmCompileControls {
+  uint32_t MaxWasmBufferSize = std::numeric_limits<uint32_t>::max();
+  bool AllowAnySizeForAsync = true;
+};
+
+// We need per-isolate controls, because we sometimes run tests in multiple
+// isolates
+// concurrently.
+// To avoid upsetting the static initializer count, we lazy initialize this.
+v8::base::LazyInstance<std::map<v8::Isolate*, WasmCompileControls>>::type
+    g_PerIsolateWasmControls = LAZY_INSTANCE_INITIALIZER;
+
+bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
+                          bool is_async) {
+  DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
+  const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
+  return (is_async && ctrls.AllowAnySizeForAsync) ||
+         (v8::Local<v8::ArrayBuffer>::Cast(value)->ByteLength() <=
+          ctrls.MaxWasmBufferSize);
+}
+
+// Use the compile controls for instantiation, too
+bool IsWasmInstantiateAllowed(v8::Isolate* isolate,
+                              v8::Local<v8::Value> module_or_bytes,
+                              v8::MaybeLocal<v8::Value> ffi, bool is_async) {
+  DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
+  const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
+  if (is_async && ctrls.AllowAnySizeForAsync) return true;
+  if (!module_or_bytes->IsWebAssemblyCompiledModule()) {
+    return IsWasmCompileAllowed(isolate, module_or_bytes, is_async);
+  }
+  v8::Local<v8::WasmCompiledModule> module =
+      v8::Local<v8::WasmCompiledModule>::Cast(module_or_bytes);
+  return static_cast<uint32_t>(module->GetWasmWireBytes()->Length()) <=
+         ctrls.MaxWasmBufferSize;
+}
+}  // namespace
+
 namespace v8 {
 namespace internal {
 
 RUNTIME_FUNCTION(Runtime_ConstructDouble) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
   CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
   uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
@@ -33,7 +73,7 @@
 
 RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   // This function is used by fuzzers to get coverage in compiler.
   // Ignore calls on non-function objects to avoid runtime errors.
@@ -48,7 +88,7 @@
 
   // TODO(turbofan): Deoptimization is not supported yet.
   if (function->code()->is_turbofanned() &&
-      function->shared()->asm_function() && !FLAG_turbo_asm_deoptimization) {
+      function->shared()->asm_function()) {
     return isolate->heap()->undefined_value();
   }
 
@@ -60,7 +100,7 @@
 
 RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
 
   Handle<JSFunction> function;
 
@@ -74,7 +114,7 @@
 
   // TODO(turbofan): Deoptimization is not supported yet.
   if (function->code()->is_turbofanned() &&
-      function->shared()->asm_function() && !FLAG_turbo_asm_deoptimization) {
+      function->shared()->asm_function()) {
     return isolate->heap()->undefined_value();
   }
 
@@ -86,7 +126,7 @@
 
 RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
 #if defined(USE_SIMULATOR)
   return isolate->heap()->true_value();
 #else
@@ -97,7 +137,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return isolate->heap()->ToBoolean(
       isolate->concurrent_recompilation_enabled());
 }
@@ -127,6 +167,12 @@
     return isolate->heap()->undefined_value();
   }
 
+  // If function isn't compiled, compile it now.
+  if (!function->shared()->is_compiled() &&
+      !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
+    return isolate->heap()->undefined_value();
+  }
+
   // If the function is already optimized, just return.
   if (function->IsOptimized()) return isolate->heap()->undefined_value();
 
@@ -146,7 +192,7 @@
 
 RUNTIME_FUNCTION(Runtime_InterpretFunctionOnNextCall) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
   if (!function_object->IsJSFunction()) {
     return isolate->heap()->undefined_value();
@@ -164,13 +210,19 @@
 
 RUNTIME_FUNCTION(Runtime_BaselineFunctionOnNextCall) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
   if (!function_object->IsJSFunction()) {
     return isolate->heap()->undefined_value();
   }
   Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
 
+  // If function isn't compiled, compile it now.
+  if (!function->shared()->is_compiled() &&
+      !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
+    return isolate->heap()->undefined_value();
+  }
+
   // Do not tier down if we are already on optimized code. Replacing optimized
   // code without actual deoptimization can lead to funny bugs.
   if (function->code()->kind() != Code::OPTIMIZED_FUNCTION &&
@@ -216,7 +268,7 @@
 
 RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
   function->shared()->set_disable_optimization_reason(
       kOptimizationDisabledForTest);
@@ -224,21 +276,28 @@
   return isolate->heap()->undefined_value();
 }
 
-
 RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1 || args.length() == 2);
+  int status = 0;
   if (!isolate->use_crankshaft()) {
-    return Smi::FromInt(4);  // 4 == "never".
+    status |= static_cast<int>(OptimizationStatus::kNeverOptimize);
+  }
+  if (FLAG_always_opt || FLAG_prepare_always_opt) {
+    status |= static_cast<int>(OptimizationStatus::kAlwaysOptimize);
+  }
+  if (FLAG_deopt_every_n_times) {
+    status |= static_cast<int>(OptimizationStatus::kMaybeDeopted);
   }
 
   // This function is used by fuzzers to get coverage for optimizations
   // in compiler. Ignore calls on non-function objects to avoid runtime errors.
   CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
   if (!function_object->IsJSFunction()) {
-    return isolate->heap()->undefined_value();
+    return Smi::FromInt(status);
   }
   Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+  status |= static_cast<int>(OptimizationStatus::kIsFunction);
 
   bool sync_with_compiler_thread = true;
   if (args.length() == 2) {
@@ -257,27 +316,21 @@
       base::OS::Sleep(base::TimeDelta::FromMilliseconds(50));
     }
   }
-  if (FLAG_always_opt || FLAG_prepare_always_opt) {
-    // With --always-opt, optimization status expectations might not
-    // match up, so just return a sentinel.
-    return Smi::FromInt(3);  // 3 == "always".
-  }
-  if (FLAG_deopt_every_n_times) {
-    return Smi::FromInt(6);  // 6 == "maybe deopted".
-  }
-  if (function->IsOptimized() && function->code()->is_turbofanned()) {
-    return Smi::FromInt(7);  // 7 == "TurboFan compiler".
+  if (function->IsOptimized()) {
+    status |= static_cast<int>(OptimizationStatus::kOptimized);
+    if (function->code()->is_turbofanned()) {
+      status |= static_cast<int>(OptimizationStatus::kTurboFanned);
+    }
   }
   if (function->IsInterpreted()) {
-    return Smi::FromInt(8);  // 8 == "Interpreted".
+    status |= static_cast<int>(OptimizationStatus::kInterpreted);
   }
-  return function->IsOptimized() ? Smi::FromInt(1)   // 1 == "yes".
-                                 : Smi::FromInt(2);  // 2 == "no".
+  return Smi::FromInt(status);
 }
 
 
 RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   if (FLAG_block_concurrent_recompilation &&
       isolate->concurrent_recompilation_enabled()) {
     isolate->optimizing_compile_dispatcher()->Unblock();
@@ -288,19 +341,23 @@
 
 RUNTIME_FUNCTION(Runtime_GetOptimizationCount) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   return Smi::FromInt(function->shared()->opt_count());
 }
 
+static void ReturnThis(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  args.GetReturnValue().Set(args.This());
+}
 
 RUNTIME_FUNCTION(Runtime_GetUndetectable) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
 
   Local<v8::ObjectTemplate> desc = v8::ObjectTemplate::New(v8_isolate);
   desc->MarkAsUndetectable();
+  desc->SetCallAsFunctionHandler(ReturnThis);
   Local<v8::Object> obj;
   if (!desc->NewInstance(v8_isolate->GetCurrentContext()).ToLocal(&obj)) {
     return nullptr;
@@ -323,7 +380,7 @@
 // parameters when it is called.
 RUNTIME_FUNCTION(Runtime_GetCallable) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
   Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(v8_isolate);
   Local<ObjectTemplate> instance_template = t->InstanceTemplate();
@@ -337,9 +394,9 @@
   return *Utils::OpenHandle(*instance);
 }
 
-RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
+RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   function->ClearTypeFeedbackInfo();
   Code* unoptimized = function->shared()->code();
@@ -412,9 +469,29 @@
   return isolate->heap()->ToBoolean(count == 1);
 }
 
+RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
+  HandleScope scope(isolate);
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+  CHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
+  CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
+  WasmCompileControls& ctrl = (*g_PerIsolateWasmControls.Pointer())[v8_isolate];
+  ctrl.AllowAnySizeForAsync = allow_async;
+  ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
+  isolate->set_allow_wasm_compile_callback(IsWasmCompileAllowed);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_SetWasmInstantiateControls) {
+  HandleScope scope(isolate);
+  CHECK(args.length() == 0);
+  isolate->set_allow_wasm_instantiate_callback(IsWasmInstantiateAllowed);
+  return isolate->heap()->undefined_value();
+}
+
 RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   isolate->heap()->NotifyContextDisposed(true);
   return isolate->heap()->undefined_value();
 }
@@ -444,7 +521,7 @@
 
 RUNTIME_FUNCTION(Runtime_DebugPrint) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   OFStream os(stdout);
 #ifdef DEBUG
@@ -475,7 +552,7 @@
 
 RUNTIME_FUNCTION(Runtime_DebugTrace) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   isolate->PrintStack(stdout);
   return isolate->heap()->undefined_value();
 }
@@ -485,7 +562,7 @@
 // very slowly for very deeply nested ConsStrings.  For debugging use only.
 RUNTIME_FUNCTION(Runtime_GlobalPrint) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
 
   CONVERT_ARG_CHECKED(String, string, 0);
   StringCharacterStream stream(string);
@@ -501,7 +578,7 @@
   // The code below doesn't create handles, but when breaking here in GDB
   // having a handle scope might be useful.
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   base::OS::DebugBreak();
   return isolate->heap()->undefined_value();
 }
@@ -510,7 +587,7 @@
 // Sets a v8 flag.
 RUNTIME_FUNCTION(Runtime_SetFlags) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(String, arg, 0);
   std::unique_ptr<char[]> flags =
       arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -521,7 +598,7 @@
 
 RUNTIME_FUNCTION(Runtime_Abort) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_SMI_ARG_CHECKED(message_id, 0);
   const char* message =
       GetBailoutReason(static_cast<BailoutReason>(message_id));
@@ -535,7 +612,7 @@
 
 RUNTIME_FUNCTION(Runtime_AbortJS) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
   base::OS::PrintError("abort: %s\n", message->ToCString().get());
   isolate->PrintStack(stderr);
@@ -546,14 +623,14 @@
 
 
 RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   return Smi::FromInt(Natives::GetBuiltinsCount());
 }
 
 // TODO(5510): remove this.
 RUNTIME_FUNCTION(Runtime_GetV8Version) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
 
   const char* version_string = v8::V8::GetVersion();
 
@@ -564,7 +641,7 @@
 RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
   HandleScope scope(isolate);
 #ifdef DEBUG
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   // Get the function and make sure it is compiled.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
   if (!Compiler::Compile(func, Compiler::KEEP_EXCEPTION)) {
@@ -628,7 +705,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetExceptionDetails) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSObject, exception_obj, 0);
 
   Factory* factory = isolate->factory();
@@ -653,7 +730,7 @@
 
 RUNTIME_FUNCTION(Runtime_HaveSameMap) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_CHECKED(JSObject, obj1, 0);
   CONVERT_ARG_CHECKED(JSObject, obj2, 1);
   return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
@@ -662,41 +739,48 @@
 
 RUNTIME_FUNCTION(Runtime_InNewSpace) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(isolate->heap()->InNewSpace(obj));
 }
 
-static bool IsAsmWasmCode(Isolate* isolate, Handle<JSFunction> function) {
+RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
   if (!function->shared()->HasAsmWasmData()) {
     // Doesn't have wasm data.
-    return false;
+    return isolate->heap()->false_value();
   }
   if (function->shared()->code() !=
       isolate->builtins()->builtin(Builtins::kInstantiateAsmJs)) {
     // Hasn't been compiled yet.
-    return false;
+    return isolate->heap()->false_value();
   }
-  return true;
+  return isolate->heap()->true_value();
 }
 
-RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  // TODO(mstarzinger): --always-opt should still allow asm.js->wasm,
-  // but currently does not. For now, pretend asm.js->wasm is on for
-  // this case. Be more accurate once this is corrected.
-  return isolate->heap()->ToBoolean(
-      ((FLAG_always_opt || FLAG_prepare_always_opt) && FLAG_validate_asm) ||
-      IsAsmWasmCode(isolate, function));
+namespace {
+bool DisallowCodegenFromStringsCallback(v8::Local<v8::Context> context) {
+  return false;
+}
 }
 
-RUNTIME_FUNCTION(Runtime_IsNotAsmWasmCode) {
+RUNTIME_FUNCTION(Runtime_DisallowCodegenFromStrings) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
-  return isolate->heap()->ToBoolean(!IsAsmWasmCode(isolate, function));
+  DCHECK_EQ(0, args.length());
+  v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+  v8_isolate->SetAllowCodeGenerationFromStringsCallback(
+      DisallowCodegenFromStringsCallback);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_IsWasmCode) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  bool is_js_to_wasm = function->code()->kind() == Code::JS_TO_WASM_FUNCTION;
+  return isolate->heap()->ToBoolean(is_js_to_wasm);
 }
 
 #define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name)       \
@@ -736,15 +820,18 @@
   return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
 }
 
+#define CONVERT_ARG_HANDLE_CHECKED_2(Type, name, index) \
+  CHECK(Type::Is##Type(args[index]));                   \
+  Handle<Type> name = args.at<Type>(index);
+
 // Take a compiled wasm module, serialize it and copy the buffer into an array
 // buffer, which is then returned.
 RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED_2(WasmModuleObject, module_obj, 0);
 
-  Handle<FixedArray> orig =
-      handle(FixedArray::cast(module_obj->GetInternalField(0)));
+  Handle<WasmCompiledModule> orig(module_obj->compiled_module());
   std::unique_ptr<ScriptData> data =
       WasmCompiledModuleSerializer::SerializeWasmModule(isolate, orig);
   void* buff = isolate->array_buffer_allocator()->Allocate(data->length());
@@ -758,7 +845,7 @@
 // Return undefined if unsuccessful.
 RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK_EQ(2, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, wire_bytes, 1);
 
@@ -793,8 +880,8 @@
 
 RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+  DCHECK_EQ(2, args.length());
+  CONVERT_ARG_HANDLE_CHECKED_2(WasmModuleObject, module_obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Smi, instance_count, 1);
   wasm::testing::ValidateInstancesChain(isolate, module_obj,
                                         instance_count->value());
@@ -803,17 +890,34 @@
 
 RUNTIME_FUNCTION(Runtime_ValidateWasmModuleState) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED_2(WasmModuleObject, module_obj, 0);
   wasm::testing::ValidateModuleState(isolate, module_obj);
   return isolate->heap()->ToBoolean(true);
 }
 
 RUNTIME_FUNCTION(Runtime_ValidateWasmOrphanedInstance) {
   HandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
-  wasm::testing::ValidateOrphanedInstance(isolate, instance_obj);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED_2(WasmInstanceObject, instance, 0);
+  wasm::testing::ValidateOrphanedInstance(isolate, instance);
+  return isolate->heap()->ToBoolean(true);
+}
+
+RUNTIME_FUNCTION(Runtime_Verify) {
+  HandleScope shs(isolate);
+  DCHECK_EQ(1, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+#ifdef VERIFY_HEAP
+  object->ObjectVerify();
+#else
+  CHECK(object->IsObject());
+  if (object->IsHeapObject()) {
+    CHECK(HeapObject::cast(*object)->map()->IsMap());
+  } else {
+    CHECK(object->IsSmi());
+  }
+#endif
   return isolate->heap()->ToBoolean(true);
 }
 
diff --git a/src/runtime/runtime-typedarray.cc b/src/runtime/runtime-typedarray.cc
index cb0e062..4ca7bbb 100644
--- a/src/runtime/runtime-typedarray.cc
+++ b/src/runtime/runtime-typedarray.cc
@@ -15,7 +15,7 @@
 
 RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
   SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
   return holder->byte_length();
 }
@@ -23,7 +23,7 @@
 
 RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
@@ -56,7 +56,7 @@
 
 RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
   if (array_buffer->backing_store() == NULL) {
     CHECK(Smi::kZero == array_buffer->byte_length());
@@ -97,7 +97,7 @@
 
 RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 6);
+  DCHECK_EQ(6, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
   CONVERT_SMI_ARG_CHECKED(arrayId, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2);
@@ -179,7 +179,7 @@
 // Returns true if backing store was initialized or false otherwise.
 RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 4);
+  DCHECK_EQ(4, args.length());
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
   CONVERT_SMI_ARG_CHECKED(arrayId, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, source, 2);
@@ -310,7 +310,7 @@
 
 RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
+  DCHECK_EQ(3, args.length());
   if (!args[0]->IsJSTypedArray()) {
     THROW_NEW_ERROR_RETURN_FAILURE(
         isolate, NewTypeError(MessageTemplate::kNotTypedArray));
@@ -367,9 +367,70 @@
   }
 }
 
+namespace {
+
+template <typename T>
+bool CompareNum(T x, T y) {
+  if (x < y) {
+    return true;
+  } else if (x > y) {
+    return false;
+  } else if (!std::is_integral<T>::value) {
+    double _x = x, _y = y;
+    if (x == 0 && x == y) {
+      /* -0.0 is less than +0.0 */
+      return std::signbit(_x) && !std::signbit(_y);
+    } else if (!std::isnan(_x) && std::isnan(_y)) {
+      /* number is less than NaN */
+      return true;
+    }
+  }
+  return false;
+}
+
+}  // namespace
+
+RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
+  HandleScope scope(isolate);
+  DCHECK_EQ(1, args.length());
+
+  CONVERT_ARG_HANDLE_CHECKED(Object, target_obj, 0);
+
+  Handle<JSTypedArray> array;
+  const char* method = "%TypedArray%.prototype.sort";
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, array, JSTypedArray::Validate(isolate, target_obj, method));
+
+  // This line can be removed when JSTypedArray::Validate throws
+  // if array.[[ViewedArrayBuffer]] is neutered(v8:4648)
+  if (V8_UNLIKELY(array->WasNeutered())) return *array;
+
+  size_t length = array->length_value();
+  if (length <= 1) return *array;
+
+  Handle<FixedTypedArrayBase> elements(
+      FixedTypedArrayBase::cast(array->elements()));
+  switch (array->type()) {
+#define TYPED_ARRAY_SORT(Type, type, TYPE, ctype, size)     \
+  case kExternal##Type##Array: {                            \
+    ctype* data = static_cast<ctype*>(elements->DataPtr()); \
+    if (kExternal##Type##Array == kExternalFloat64Array ||  \
+        kExternal##Type##Array == kExternalFloat32Array)    \
+      std::sort(data, data + length, CompareNum<ctype>);    \
+    else                                                    \
+      std::sort(data, data + length);                       \
+    break;                                                  \
+  }
+
+    TYPED_ARRAYS(TYPED_ARRAY_SORT)
+#undef TYPED_ARRAY_SORT
+  }
+
+  return *array;
+}
 
 RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) {
-  DCHECK(args.length() == 0);
+  DCHECK_EQ(0, args.length());
   DCHECK_OBJECT_SIZE(FLAG_typed_array_max_size_in_heap +
                      FixedTypedArrayBase::kDataOffset);
   return Smi::FromInt(FLAG_typed_array_max_size_in_heap);
@@ -378,14 +439,14 @@
 
 RUNTIME_FUNCTION(Runtime_IsTypedArray) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   return isolate->heap()->ToBoolean(args[0]->IsJSTypedArray());
 }
 
 
 RUNTIME_FUNCTION(Runtime_IsSharedTypedArray) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   return isolate->heap()->ToBoolean(
       args[0]->IsJSTypedArray() &&
       JSTypedArray::cast(args[0])->GetBuffer()->is_shared());
@@ -394,7 +455,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   if (!args[0]->IsJSTypedArray()) {
     return isolate->heap()->false_value();
   }
@@ -409,7 +470,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK_EQ(1, args.length());
   if (!args[0]->IsJSTypedArray()) {
     return isolate->heap()->false_value();
   }
diff --git a/src/runtime/runtime-utils.h b/src/runtime/runtime-utils.h
index 147efed..8c7714a 100644
--- a/src/runtime/runtime-utils.h
+++ b/src/runtime/runtime-utils.h
@@ -6,6 +6,7 @@
 #define V8_RUNTIME_RUNTIME_UTILS_H_
 
 #include "src/base/logging.h"
+#include "src/globals.h"
 #include "src/runtime/runtime.h"
 
 namespace v8 {
@@ -24,7 +25,7 @@
 
 #define CONVERT_NUMBER_ARG_HANDLE_CHECKED(name, index) \
   CHECK(args[index]->IsNumber());                      \
-  Handle<Object> name = args.at<Object>(index);
+  Handle<Object> name = args.at(index);
 
 // Cast the given object to a boolean and store it in a variable with
 // the given name.  If the object is not a boolean we crash safely.
@@ -47,10 +48,10 @@
 
 // Cast the given argument to a size_t and store its value in a variable with
 // the given name.  If the argument is not a size_t we crash safely.
-#define CONVERT_SIZE_ARG_CHECKED(name, index)            \
-  CHECK(args[index]->IsNumber());                        \
-  Handle<Object> name##_object = args.at<Object>(index); \
-  size_t name = 0;                                       \
+#define CONVERT_SIZE_ARG_CHECKED(name, index)    \
+  CHECK(args[index]->IsNumber());                \
+  Handle<Object> name##_object = args.at(index); \
+  size_t name = 0;                               \
   CHECK(TryNumberToSize(*name##_object, &name));
 
 // Call the specified converter on the object *comand store the result in
diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc
index ab69046..9f125c1 100644
--- a/src/runtime/runtime-wasm.cc
+++ b/src/runtime/runtime-wasm.cc
@@ -14,50 +14,125 @@
 #include "src/objects-inl.h"
 #include "src/v8memory.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-opcodes.h"
 
 namespace v8 {
 namespace internal {
 
+namespace {
+WasmInstanceObject* GetWasmInstanceOnStackTop(Isolate* isolate) {
+  DisallowHeapAllocation no_allocation;
+  const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
+  Address pc =
+      Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
+  Code* code = isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
+  DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
+  WasmInstanceObject* owning_instance = wasm::GetOwningWasmInstance(code);
+  CHECK_NOT_NULL(owning_instance);
+  return owning_instance;
+}
+Context* GetWasmContextOnStackTop(Isolate* isolate) {
+  return GetWasmInstanceOnStackTop(isolate)
+      ->compiled_module()
+      ->ptr_to_native_context();
+}
+}  // namespace
+
 RUNTIME_FUNCTION(Runtime_WasmMemorySize) {
   HandleScope scope(isolate);
   DCHECK_EQ(0, args.length());
 
-  Handle<JSObject> module_instance;
-  {
-    // Get the module JSObject
-    DisallowHeapAllocation no_allocation;
-    const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
-    Address pc =
-        Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
-    Code* code =
-        isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
-    Object* owning_instance = wasm::GetOwningWasmInstance(code);
-    CHECK_NOT_NULL(owning_instance);
-    module_instance = handle(JSObject::cast(owning_instance), isolate);
-  }
+  Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate),
+                                      isolate);
   return *isolate->factory()->NewNumberFromInt(
-      wasm::GetInstanceMemorySize(isolate, module_instance));
+      wasm::GetInstanceMemorySize(isolate, instance));
 }
 
 RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
   HandleScope scope(isolate);
   DCHECK_EQ(1, args.length());
   CONVERT_UINT32_ARG_CHECKED(delta_pages, 0);
-  Handle<JSObject> module_instance;
-  {
-    // Get the module JSObject
-    DisallowHeapAllocation no_allocation;
-    const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
-    Address pc =
-        Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
-    Code* code =
-        isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
-    Object* owning_instance = wasm::GetOwningWasmInstance(code);
-    CHECK_NOT_NULL(owning_instance);
-    module_instance = handle(JSObject::cast(owning_instance), isolate);
-  }
+  Handle<WasmInstanceObject> instance(GetWasmInstanceOnStackTop(isolate),
+                                      isolate);
+
+  // Set the current isolate's context.
+  DCHECK_NULL(isolate->context());
+  isolate->set_context(instance->compiled_module()->ptr_to_native_context());
+
   return *isolate->factory()->NewNumberFromInt(
-      wasm::GrowInstanceMemory(isolate, module_instance, delta_pages));
+      wasm::GrowMemory(isolate, instance, delta_pages));
+}
+
+Object* ThrowRuntimeError(Isolate* isolate, int message_id, int byte_offset,
+                          bool patch_source_position) {
+  HandleScope scope(isolate);
+  DCHECK_NULL(isolate->context());
+  isolate->set_context(GetWasmContextOnStackTop(isolate));
+  Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
+      static_cast<MessageTemplate::Template>(message_id));
+
+  if (!patch_source_position) {
+    return isolate->Throw(*error_obj);
+  }
+
+  // For wasm traps, the byte offset (a.k.a source position) can not be
+  // determined from relocation info, since the explicit checks for traps
+  // converge in one singe block which calls this runtime function.
+  // We hence pass the byte offset explicitely, and patch it into the top-most
+  // frame (a wasm frame) on the collected stack trace.
+  // TODO(wasm): This implementation is temporary, see bug #5007:
+  // https://bugs.chromium.org/p/v8/issues/detail?id=5007
+  Handle<JSObject> error = Handle<JSObject>::cast(error_obj);
+  Handle<Object> stack_trace_obj = JSReceiver::GetDataProperty(
+      error, isolate->factory()->stack_trace_symbol());
+  // Patch the stack trace (array of <receiver, function, code, position>).
+  if (stack_trace_obj->IsJSArray()) {
+    Handle<FrameArray> stack_elements(
+        FrameArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
+    DCHECK(stack_elements->Code(0)->kind() == AbstractCode::WASM_FUNCTION);
+    DCHECK(stack_elements->Offset(0)->value() >= 0);
+    stack_elements->SetOffset(0, Smi::FromInt(-1 - byte_offset));
+  }
+
+  // Patch the detailed stack trace (array of JSObjects with various
+  // properties).
+  Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
+      error, isolate->factory()->detailed_stack_trace_symbol());
+  if (detailed_stack_trace_obj->IsJSArray()) {
+    Handle<FixedArray> stack_elements(
+        FixedArray::cast(JSArray::cast(*detailed_stack_trace_obj)->elements()));
+    DCHECK_GE(stack_elements->length(), 1);
+    Handle<JSObject> top_frame(JSObject::cast(stack_elements->get(0)));
+    Handle<String> wasm_offset_key =
+        isolate->factory()->InternalizeOneByteString(
+            STATIC_CHAR_VECTOR("column"));
+    LookupIterator it(top_frame, wasm_offset_key, top_frame,
+                      LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+    if (it.IsFound()) {
+      DCHECK(JSReceiver::GetDataProperty(&it)->IsSmi());
+      // Make column number 1-based here.
+      Maybe<bool> data_set = JSReceiver::SetDataProperty(
+          &it, handle(Smi::FromInt(byte_offset + 1), isolate));
+      DCHECK(data_set.IsJust() && data_set.FromJust() == true);
+      USE(data_set);
+    }
+  }
+
+  return isolate->Throw(*error_obj);
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowWasmErrorFromTrapIf) {
+  DCHECK_EQ(1, args.length());
+  CONVERT_SMI_ARG_CHECKED(message_id, 0);
+  return ThrowRuntimeError(isolate, message_id, 0, false);
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
+  DCHECK_EQ(2, args.length());
+  CONVERT_SMI_ARG_CHECKED(message_id, 0);
+  CONVERT_SMI_ARG_CHECKED(byte_offset, 1);
+  return ThrowRuntimeError(isolate, message_id, byte_offset, true);
 }
 
 RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
@@ -75,6 +150,10 @@
 
   const int32_t thrown_value = (upper << 16) | lower;
 
+  // Set the current isolate's context.
+  DCHECK_NULL(isolate->context());
+  isolate->set_context(GetWasmContextOnStackTop(isolate));
+
   return isolate->Throw(*isolate->factory()->NewNumberFromInt(thrown_value));
 }
 
@@ -89,5 +168,45 @@
   return exception;
 }
 
+RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
+  DCHECK_EQ(3, args.length());
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
+  CONVERT_NUMBER_CHECKED(int32_t, func_index, Int32, args[1]);
+  CONVERT_ARG_HANDLE_CHECKED(Object, arg_buffer_obj, 2);
+  CHECK(WasmInstanceObject::IsWasmInstanceObject(*instance_obj));
+  Handle<WasmInstanceObject> instance =
+      Handle<WasmInstanceObject>::cast(instance_obj);
+
+  // The arg buffer is the raw pointer to the caller's stack. It looks like a
+  // Smi (lowest bit not set, as checked by IsSmi), but is no valid Smi. We just
+  // cast it back to the raw pointer.
+  CHECK(!arg_buffer_obj->IsHeapObject());
+  CHECK(arg_buffer_obj->IsSmi());
+  uint8_t* arg_buffer = reinterpret_cast<uint8_t*>(*arg_buffer_obj);
+
+  // Set the current isolate's context.
+  DCHECK_NULL(isolate->context());
+  isolate->set_context(instance->compiled_module()->ptr_to_native_context());
+
+  instance->debug_info()->RunInterpreter(func_index, arg_buffer);
+  return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_WasmStackGuard) {
+  SealHandleScope shs(isolate);
+  DCHECK_EQ(0, args.length());
+
+  // Set the current isolate's context.
+  DCHECK_NULL(isolate->context());
+  isolate->set_context(GetWasmContextOnStackTop(isolate));
+
+  // Check if this is a real stack overflow.
+  StackLimitCheck check(isolate);
+  if (check.JsHasOverflowed()) return isolate->StackOverflow();
+
+  return isolate->stack_guard()->HandleInterrupts();
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/runtime/runtime.cc b/src/runtime/runtime.cc
index 9d1cd39..38f1805 100644
--- a/src/runtime/runtime.cc
+++ b/src/runtime/runtime.cc
@@ -10,6 +10,7 @@
 #include "src/handles-inl.h"
 #include "src/heap/heap.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 #include "src/runtime/runtime-utils.h"
 
 namespace v8 {
diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h
index 8e2e83c..6c5a039 100644
--- a/src/runtime/runtime.h
+++ b/src/runtime/runtime.h
@@ -9,8 +9,8 @@
 
 #include "src/allocation.h"
 #include "src/base/platform/time.h"
+#include "src/elements-kind.h"
 #include "src/globals.h"
-#include "src/objects.h"
 #include "src/unicode.h"
 #include "src/zone/zone.h"
 
@@ -45,7 +45,6 @@
   F(EstimateNumberOfElements, 1, 1)  \
   F(GetArrayKeys, 2, 1)              \
   F(NewArray, -1 /* >= 3 */, 1)      \
-  F(ArrayPush, -1, 1)                \
   F(FunctionBind, -1, 1)             \
   F(NormalizeElements, 1, 1)         \
   F(GrowArrayElements, 2, 1)         \
@@ -57,7 +56,8 @@
   F(ArraySpeciesConstructor, 1, 1)   \
   F(ArrayIncludes_Slow, 3, 1)        \
   F(ArrayIndexOf, 3, 1)              \
-  F(SpreadIterablePrepare, 1, 1)
+  F(SpreadIterablePrepare, 1, 1)     \
+  F(SpreadIterableFixed, 1, 1)
 
 #define FOR_EACH_INTRINSIC_ATOMICS(F)           \
   F(ThrowNotIntegerSharedTypedArrayError, 1, 1) \
@@ -73,23 +73,25 @@
   F(AtomicsIsLockFree, 1, 1)                    \
   F(AtomicsWait, 4, 1)                          \
   F(AtomicsWake, 3, 1)                          \
-  F(AtomicsNumWaitersForTesting, 2, 1)
+  F(AtomicsNumWaitersForTesting, 2, 1)          \
+  F(SetAllowAtomicsWait, 1, 1)
 
-#define FOR_EACH_INTRINSIC_CLASSES(F)       \
-  F(ThrowNonMethodError, 0, 1)              \
-  F(ThrowUnsupportedSuperError, 0, 1)       \
-  F(ThrowConstructorNonCallableError, 1, 1) \
-  F(ThrowArrayNotSubclassableError, 0, 1)   \
-  F(ThrowStaticPrototypeError, 0, 1)        \
-  F(ThrowIfStaticPrototype, 1, 1)           \
-  F(HomeObjectSymbol, 0, 1)                 \
-  F(DefineClass, 4, 1)                      \
-  F(LoadFromSuper, 3, 1)                    \
-  F(LoadKeyedFromSuper, 3, 1)               \
-  F(StoreToSuper_Strict, 4, 1)              \
-  F(StoreToSuper_Sloppy, 4, 1)              \
-  F(StoreKeyedToSuper_Strict, 4, 1)         \
-  F(StoreKeyedToSuper_Sloppy, 4, 1)         \
+#define FOR_EACH_INTRINSIC_CLASSES(F)        \
+  F(ThrowUnsupportedSuperError, 0, 1)        \
+  F(ThrowConstructorNonCallableError, 1, 1)  \
+  F(ThrowStaticPrototypeError, 0, 1)         \
+  F(ThrowSuperAlreadyCalledError, 0, 1)      \
+  F(ThrowNotSuperConstructor, 2, 1)          \
+  F(HomeObjectSymbol, 0, 1)                  \
+  F(DefineClass, 4, 1)                       \
+  F(InstallClassNameAccessor, 1, 1)          \
+  F(InstallClassNameAccessorWithCheck, 1, 1) \
+  F(LoadFromSuper, 3, 1)                     \
+  F(LoadKeyedFromSuper, 3, 1)                \
+  F(StoreToSuper_Strict, 4, 1)               \
+  F(StoreToSuper_Sloppy, 4, 1)               \
+  F(StoreKeyedToSuper_Strict, 4, 1)          \
+  F(StoreKeyedToSuper_Sloppy, 4, 1)          \
   F(GetSuperConstructor, 1, 1)
 
 #define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
@@ -147,7 +149,7 @@
   F(DebugGetInternalProperties, 1, 1)           \
   F(DebugGetPropertyDetails, 2, 1)              \
   F(DebugGetProperty, 2, 1)                     \
-  F(DebugPropertyTypeFromDetails, 1, 1)         \
+  F(DebugPropertyKindFromDetails, 1, 1)         \
   F(DebugPropertyAttributesFromDetails, 1, 1)   \
   F(CheckExecutionState, 1, 1)                  \
   F(GetFrameCount, 1, 1)                        \
@@ -170,8 +172,8 @@
   F(IsBreakOnException, 1, 1)                   \
   F(PrepareStep, 2, 1)                          \
   F(ClearStepping, 0, 1)                        \
-  F(DebugEvaluate, 6, 1)                        \
-  F(DebugEvaluateGlobal, 4, 1)                  \
+  F(DebugEvaluate, 5, 1)                        \
+  F(DebugEvaluateGlobal, 2, 1)                  \
   F(DebugGetLoadedScripts, 0, 1)                \
   F(DebugReferencedBy, 3, 1)                    \
   F(DebugConstructedBy, 2, 1)                   \
@@ -179,7 +181,6 @@
   F(DebugSetScriptSource, 2, 1)                 \
   F(FunctionGetInferredName, 1, 1)              \
   F(FunctionGetDebugName, 1, 1)                 \
-  F(ExecuteInDebugContext, 1, 1)                \
   F(GetDebugContext, 0, 1)                      \
   F(CollectGarbage, 1, 1)                       \
   F(GetHeapUsage, 0, 1)                         \
@@ -190,31 +191,32 @@
   F(ScriptLocationFromLine, 4, 1)               \
   F(ScriptLocationFromLine2, 4, 1)              \
   F(ScriptPositionInfo, 3, 1)                   \
+  F(ScriptPositionInfo2, 3, 1)                  \
   F(ScriptSourceLine, 2, 1)                     \
-  F(DebugPrepareStepInIfStepping, 1, 1)         \
+  F(DebugOnFunctionCall, 1, 1)                  \
   F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
-  F(DebugRecordAsyncFunction, 1, 1)             \
+  F(DebugRecordGenerator, 1, 1)                 \
   F(DebugPushPromise, 1, 1)                     \
   F(DebugPopPromise, 0, 1)                      \
-  F(DebugNextMicrotaskId, 0, 1)                 \
-  F(DebugAsyncTaskEvent, 3, 1)                  \
+  F(DebugPromiseReject, 2, 1)                   \
+  F(DebugAsyncEventEnqueueRecurring, 2, 1)      \
+  F(DebugAsyncFunctionPromiseCreated, 1, 1)     \
   F(DebugIsActive, 0, 1)                        \
-  F(DebugBreakInOptimizedCode, 0, 1)
+  F(DebugBreakInOptimizedCode, 0, 1)            \
+  F(DebugCollectCoverage, 0, 1)                 \
+  F(DebugTogglePreciseCoverage, 1, 1)
 
 #define FOR_EACH_INTRINSIC_ERROR(F) F(ErrorToString, 1, 1)
 
 #define FOR_EACH_INTRINSIC_FORIN(F) \
   F(ForInEnumerate, 1, 1)           \
   F(ForInFilter, 2, 1)              \
-  F(ForInHasProperty, 2, 1)         \
-  F(ForInNext, 4, 1)
+  F(ForInHasProperty, 2, 1)
 
 #define FOR_EACH_INTRINSIC_INTERPRETER(F) \
-  F(InterpreterNewClosure, 2, 1)          \
+  F(InterpreterNewClosure, 4, 1)          \
   F(InterpreterTraceBytecodeEntry, 3, 1)  \
   F(InterpreterTraceBytecodeExit, 3, 1)   \
-  F(InterpreterClearPendingMessage, 0, 1) \
-  F(InterpreterSetPendingMessage, 1, 1)   \
   F(InterpreterAdvanceBytecodeOffset, 2, 1)
 
 #define FOR_EACH_INTRINSIC_FUNCTION(F)     \
@@ -241,10 +243,10 @@
 
 #define FOR_EACH_INTRINSIC_GENERATOR(F) \
   F(CreateJSGeneratorObject, 2, 1)      \
-  F(SuspendJSGeneratorObject, 1, 1)     \
   F(GeneratorClose, 1, 1)               \
   F(GeneratorGetFunction, 1, 1)         \
   F(GeneratorGetReceiver, 1, 1)         \
+  F(GeneratorGetContext, 1, 1)          \
   F(GeneratorGetInputOrDebugPos, 1, 1)  \
   F(GeneratorGetContinuation, 1, 1)     \
   F(GeneratorGetSourcePosition, 1, 1)   \
@@ -258,15 +260,12 @@
   F(GetLanguageTagVariants, 1, 1)            \
   F(IsInitializedIntlObject, 1, 1)           \
   F(IsInitializedIntlObjectOfType, 2, 1)     \
-  F(MarkAsInitializedIntlObjectOfType, 3, 1) \
-  F(GetImplFromInitializedIntlObject, 1, 1)  \
+  F(MarkAsInitializedIntlObjectOfType, 2, 1) \
   F(CreateDateTimeFormat, 3, 1)              \
   F(InternalDateFormat, 2, 1)                \
   F(InternalDateFormatToParts, 2, 1)         \
-  F(InternalDateParse, 2, 1)                 \
   F(CreateNumberFormat, 3, 1)                \
   F(InternalNumberFormat, 2, 1)              \
-  F(InternalNumberParse, 2, 1)               \
   F(CreateCollator, 3, 1)                    \
   F(InternalCompare, 3, 1)                   \
   F(StringNormalize, 2, 1)                   \
@@ -290,26 +289,18 @@
   F(AllocateSeqOneByteString, 1, 1)                 \
   F(AllocateSeqTwoByteString, 1, 1)                 \
   F(CheckIsBootstrapping, 0, 1)                     \
+  F(CreateAsyncFromSyncIterator, 1, 1)              \
   F(CreateListFromArrayLike, 1, 1)                  \
-  F(EnqueueMicrotask, 1, 1)                         \
-  F(EnqueuePromiseReactionJob, 4, 1)                \
-  F(EnqueuePromiseResolveThenableJob, 3, 1)         \
   F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1)  \
-  F(ExportExperimentalFromRuntime, 1, 1)            \
   F(ExportFromRuntime, 1, 1)                        \
   F(IncrementUseCounter, 1, 1)                      \
   F(InstallToContext, 1, 1)                         \
   F(Interrupt, 0, 1)                                \
   F(IS_VAR, 1, 1)                                   \
-  F(IsWasmInstance, 1, 1)                           \
   F(NewReferenceError, 2, 1)                        \
   F(NewSyntaxError, 2, 1)                           \
   F(NewTypeError, 2, 1)                             \
   F(OrdinaryHasInstance, 2, 1)                      \
-  F(PromiseReject, 3, 1)                            \
-  F(PromiseFulfill, 4, 1)                           \
-  F(PromiseRejectEventFromStack, 2, 1)              \
-  F(PromiseRevokeReject, 1, 1)                      \
   F(PromoteScheduledException, 0, 1)                \
   F(ReThrow, 1, 1)                                  \
   F(RunMicrotasks, 0, 1)                            \
@@ -324,16 +315,22 @@
   F(ThrowGeneratorRunning, 0, 1)                    \
   F(ThrowIllegalInvocation, 0, 1)                   \
   F(ThrowIncompatibleMethodReceiver, 2, 1)          \
+  F(ThrowInvalidHint, 1, 1)                         \
   F(ThrowInvalidStringLength, 0, 1)                 \
   F(ThrowIteratorResultNotAnObject, 1, 1)           \
+  F(ThrowSymbolIteratorInvalid, 0, 1)               \
+  F(ThrowNonCallableInInstanceOfCheck, 0, 1)        \
+  F(ThrowNonObjectInInstanceOfCheck, 0, 1)          \
+  F(ThrowNotConstructor, 1, 1)                      \
   F(ThrowNotGeneric, 1, 1)                          \
   F(ThrowReferenceError, 1, 1)                      \
   F(ThrowStackOverflow, 0, 1)                       \
+  F(ThrowSymbolAsyncIteratorInvalid, 0, 1)          \
   F(ThrowTypeError, -1 /* >= 1 */, 1)               \
-  F(ThrowWasmError, 2, 1)                           \
   F(ThrowUndefinedOrNullToObject, 1, 1)             \
   F(Typeof, 1, 1)                                   \
-  F(UnwindAndFindExceptionHandler, 0, 1)
+  F(UnwindAndFindExceptionHandler, 0, 1)            \
+  F(AllowDynamicFunction, 1, 1)
 
 #define FOR_EACH_INTRINSIC_LITERALS(F) \
   F(CreateRegExpLiteral, 4, 1)         \
@@ -341,13 +338,13 @@
   F(CreateArrayLiteral, 4, 1)          \
   F(CreateArrayLiteralStubBailout, 3, 1)
 
-
 #define FOR_EACH_INTRINSIC_LIVEEDIT(F)              \
   F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
   F(LiveEditGatherCompileInfo, 2, 1)                \
   F(LiveEditReplaceScript, 3, 1)                    \
-  F(LiveEditFunctionSourceUpdated, 1, 1)            \
+  F(LiveEditFunctionSourceUpdated, 2, 1)            \
   F(LiveEditReplaceFunctionCode, 2, 1)              \
+  F(LiveEditFixupScript, 2, 1)                      \
   F(LiveEditFunctionSetScript, 2, 1)                \
   F(LiveEditReplaceRefToNestedFunction, 3, 1)       \
   F(LiveEditPatchFunctionPositions, 2, 1)           \
@@ -358,6 +355,7 @@
 #define FOR_EACH_INTRINSIC_MATHS(F) F(GenerateRandomNumbers, 0, 1)
 
 #define FOR_EACH_INTRINSIC_MODULE(F) \
+  F(DynamicImportCall, 1, 1)         \
   F(GetModuleNamespace, 1, 1)        \
   F(LoadModuleVariable, 1, 1)        \
   F(StoreModuleVariable, 2, 1)
@@ -377,56 +375,57 @@
   F(GetHoleNaNUpper, 0, 1)             \
   F(GetHoleNaNLower, 0, 1)
 
-#define FOR_EACH_INTRINSIC_OBJECT(F)                 \
-  F(GetPrototype, 1, 1)                              \
-  F(ObjectHasOwnProperty, 2, 1)                      \
-  F(ObjectCreate, 2, 1)                              \
-  F(InternalSetPrototype, 2, 1)                      \
-  F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
-  F(GetProperty, 2, 1)                               \
-  F(KeyedGetProperty, 2, 1)                          \
-  F(AddNamedProperty, 4, 1)                          \
-  F(SetProperty, 4, 1)                               \
-  F(AddElement, 3, 1)                                \
-  F(AppendElement, 2, 1)                             \
-  F(DeleteProperty_Sloppy, 2, 1)                     \
-  F(DeleteProperty_Strict, 2, 1)                     \
-  F(HasProperty, 2, 1)                               \
-  F(GetOwnPropertyKeys, 2, 1)                        \
-  F(GetInterceptorInfo, 1, 1)                        \
-  F(ToFastProperties, 1, 1)                          \
-  F(AllocateHeapNumber, 0, 1)                        \
-  F(NewObject, 2, 1)                                 \
-  F(FinalizeInstanceSize, 1, 1)                      \
-  F(LoadMutableDouble, 2, 1)                         \
-  F(TryMigrateInstance, 1, 1)                        \
-  F(IsJSGlobalProxy, 1, 1)                           \
-  F(DefineAccessorPropertyUnchecked, 5, 1)           \
-  F(DefineDataPropertyInLiteral, 5, 1)               \
-  F(DefineDataProperty, 5, 1)                        \
-  F(GetDataProperty, 2, 1)                           \
-  F(GetConstructorName, 1, 1)                        \
-  F(HasFastPackedElements, 1, 1)                     \
-  F(ValueOf, 1, 1)                                   \
-  F(IsJSReceiver, 1, 1)                              \
-  F(ClassOf, 1, 1)                                   \
-  F(DefineGetterPropertyUnchecked, 4, 1)             \
-  F(DefineSetterPropertyUnchecked, 4, 1)             \
-  F(ToObject, 1, 1)                                  \
-  F(ToPrimitive, 1, 1)                               \
-  F(ToPrimitive_Number, 1, 1)                        \
-  F(ToNumber, 1, 1)                                  \
-  F(ToInteger, 1, 1)                                 \
-  F(ToLength, 1, 1)                                  \
-  F(ToString, 1, 1)                                  \
-  F(ToName, 1, 1)                                    \
-  F(SameValue, 2, 1)                                 \
-  F(SameValueZero, 2, 1)                             \
-  F(Compare, 3, 1)                                   \
-  F(HasInPrototypeChain, 2, 1)                       \
-  F(CreateIterResultObject, 2, 1)                    \
-  F(CreateKeyValueArray, 2, 1)                       \
-  F(IsAccessCheckNeeded, 1, 1)                       \
+#define FOR_EACH_INTRINSIC_OBJECT(F)                            \
+  F(GetPrototype, 1, 1)                                         \
+  F(ObjectHasOwnProperty, 2, 1)                                 \
+  F(ObjectCreate, 2, 1)                                         \
+  F(InternalSetPrototype, 2, 1)                                 \
+  F(OptimizeObjectForAddingMultipleProperties, 2, 1)            \
+  F(GetProperty, 2, 1)                                          \
+  F(KeyedGetProperty, 2, 1)                                     \
+  F(AddNamedProperty, 4, 1)                                     \
+  F(SetProperty, 4, 1)                                          \
+  F(AddElement, 3, 1)                                           \
+  F(AppendElement, 2, 1)                                        \
+  F(DeleteProperty_Sloppy, 2, 1)                                \
+  F(DeleteProperty_Strict, 2, 1)                                \
+  F(HasProperty, 2, 1)                                          \
+  F(GetOwnPropertyKeys, 2, 1)                                   \
+  F(GetInterceptorInfo, 1, 1)                                   \
+  F(ToFastProperties, 1, 1)                                     \
+  F(AllocateHeapNumber, 0, 1)                                   \
+  F(NewObject, 2, 1)                                            \
+  F(FinalizeInstanceSize, 1, 1)                                 \
+  F(LoadMutableDouble, 2, 1)                                    \
+  F(TryMigrateInstance, 1, 1)                                   \
+  F(IsJSGlobalProxy, 1, 1)                                      \
+  F(DefineAccessorPropertyUnchecked, 5, 1)                      \
+  F(DefineDataPropertyInLiteral, 6, 1)                          \
+  F(GetDataProperty, 2, 1)                                      \
+  F(GetConstructorName, 1, 1)                                   \
+  F(HasFastPackedElements, 1, 1)                                \
+  F(ValueOf, 1, 1)                                              \
+  F(IsJSReceiver, 1, 1)                                         \
+  F(ClassOf, 1, 1)                                              \
+  F(CopyDataProperties, 2, 1)                                   \
+  F(CopyDataPropertiesWithExcludedProperties, -1 /* >= 1 */, 1) \
+  F(DefineGetterPropertyUnchecked, 4, 1)                        \
+  F(DefineSetterPropertyUnchecked, 4, 1)                        \
+  F(ToObject, 1, 1)                                             \
+  F(ToPrimitive, 1, 1)                                          \
+  F(ToPrimitive_Number, 1, 1)                                   \
+  F(ToNumber, 1, 1)                                             \
+  F(ToInteger, 1, 1)                                            \
+  F(ToLength, 1, 1)                                             \
+  F(ToString, 1, 1)                                             \
+  F(ToName, 1, 1)                                               \
+  F(SameValue, 2, 1)                                            \
+  F(SameValueZero, 2, 1)                                        \
+  F(Compare, 3, 1)                                              \
+  F(HasInPrototypeChain, 2, 1)                                  \
+  F(CreateIterResultObject, 2, 1)                               \
+  F(CreateKeyValueArray, 2, 1)                                  \
+  F(IsAccessCheckNeeded, 1, 1)                                  \
   F(CreateDataProperty, 3, 1)
 
 #define FOR_EACH_INTRINSIC_OPERATORS(F) \
@@ -451,6 +450,21 @@
   F(GreaterThanOrEqual, 2, 1)           \
   F(InstanceOf, 2, 1)
 
+#define FOR_EACH_INTRINSIC_PROMISE(F)       \
+  F(EnqueueMicrotask, 1, 1)                 \
+  F(EnqueuePromiseReactionJob, 1, 1)        \
+  F(EnqueuePromiseResolveThenableJob, 1, 1) \
+  F(PromiseHookInit, 2, 1)                  \
+  F(PromiseHookResolve, 1, 1)               \
+  F(PromiseHookBefore, 1, 1)                \
+  F(PromiseHookAfter, 1, 1)                 \
+  F(PromiseMarkAsHandled, 1, 1)             \
+  F(PromiseRejectEventFromStack, 2, 1)      \
+  F(PromiseRevokeReject, 1, 1)              \
+  F(PromiseResult, 1, 1)                    \
+  F(PromiseStatus, 1, 1)                    \
+  F(ReportPromiseReject, 2, 1)
+
 #define FOR_EACH_INTRINSIC_PROXY(F)     \
   F(IsJSProxy, 1, 1)                    \
   F(JSProxyCall, -1 /* >= 2 */, 1)      \
@@ -465,8 +479,10 @@
   F(RegExpExec, 4, 1)                               \
   F(RegExpExecMultiple, 4, 1)                       \
   F(RegExpExecReThrow, 4, 1)                        \
+  F(RegExpInitializeAndCompile, 3, 1)               \
   F(RegExpInternalReplace, 3, 1)                    \
   F(RegExpReplace, 3, 1)                            \
+  F(RegExpSplit, 3, 1)                              \
   F(StringReplaceGlobalRegExpWithString, 4, 1)      \
   F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
   F(StringSplit, 3, 1)
@@ -482,10 +498,11 @@
   F(NewStrictArguments, 1, 1)           \
   F(NewRestParameter, 1, 1)             \
   F(NewSloppyArguments, 3, 1)           \
-  F(NewClosure, 1, 1)                   \
-  F(NewClosure_Tenured, 1, 1)           \
+  F(NewArgumentsElements, 2, 1)         \
+  F(NewClosure, 3, 1)                   \
+  F(NewClosure_Tenured, 3, 1)           \
   F(NewScriptContext, 2, 1)             \
-  F(NewFunctionContext, 1, 1)           \
+  F(NewFunctionContext, 2, 1)           \
   F(PushModuleContext, 3, 1)            \
   F(PushWithContext, 3, 1)              \
   F(PushCatchContext, 4, 1)             \
@@ -496,319 +513,11 @@
   F(StoreLookupSlot_Sloppy, 2, 1)       \
   F(StoreLookupSlot_Strict, 2, 1)
 
-#define FOR_EACH_INTRINSIC_SIMD(F)     \
-  F(IsSimdValue, 1, 1)                 \
-  F(CreateFloat32x4, 4, 1)             \
-  F(CreateInt32x4, 4, 1)               \
-  F(CreateUint32x4, 4, 1)              \
-  F(CreateBool32x4, 4, 1)              \
-  F(CreateInt16x8, 8, 1)               \
-  F(CreateUint16x8, 8, 1)              \
-  F(CreateBool16x8, 8, 1)              \
-  F(CreateInt8x16, 16, 1)              \
-  F(CreateUint8x16, 16, 1)             \
-  F(CreateBool8x16, 16, 1)             \
-  F(Float32x4Check, 1, 1)              \
-  F(Float32x4ExtractLane, 2, 1)        \
-  F(Float32x4ReplaceLane, 3, 1)        \
-  F(Float32x4Abs, 1, 1)                \
-  F(Float32x4Neg, 1, 1)                \
-  F(Float32x4Sqrt, 1, 1)               \
-  F(Float32x4RecipApprox, 1, 1)        \
-  F(Float32x4RecipSqrtApprox, 1, 1)    \
-  F(Float32x4Add, 2, 1)                \
-  F(Float32x4Sub, 2, 1)                \
-  F(Float32x4Mul, 2, 1)                \
-  F(Float32x4Div, 2, 1)                \
-  F(Float32x4Min, 2, 1)                \
-  F(Float32x4Max, 2, 1)                \
-  F(Float32x4MinNum, 2, 1)             \
-  F(Float32x4MaxNum, 2, 1)             \
-  F(Float32x4Equal, 2, 1)              \
-  F(Float32x4NotEqual, 2, 1)           \
-  F(Float32x4LessThan, 2, 1)           \
-  F(Float32x4LessThanOrEqual, 2, 1)    \
-  F(Float32x4GreaterThan, 2, 1)        \
-  F(Float32x4GreaterThanOrEqual, 2, 1) \
-  F(Float32x4Select, 3, 1)             \
-  F(Float32x4Swizzle, 5, 1)            \
-  F(Float32x4Shuffle, 6, 1)            \
-  F(Float32x4FromInt32x4, 1, 1)        \
-  F(Float32x4FromUint32x4, 1, 1)       \
-  F(Float32x4FromInt32x4Bits, 1, 1)    \
-  F(Float32x4FromUint32x4Bits, 1, 1)   \
-  F(Float32x4FromInt16x8Bits, 1, 1)    \
-  F(Float32x4FromUint16x8Bits, 1, 1)   \
-  F(Float32x4FromInt8x16Bits, 1, 1)    \
-  F(Float32x4FromUint8x16Bits, 1, 1)   \
-  F(Float32x4Load, 2, 1)               \
-  F(Float32x4Load1, 2, 1)              \
-  F(Float32x4Load2, 2, 1)              \
-  F(Float32x4Load3, 2, 1)              \
-  F(Float32x4Store, 3, 1)              \
-  F(Float32x4Store1, 3, 1)             \
-  F(Float32x4Store2, 3, 1)             \
-  F(Float32x4Store3, 3, 1)             \
-  F(Int32x4Check, 1, 1)                \
-  F(Int32x4ExtractLane, 2, 1)          \
-  F(Int32x4ReplaceLane, 3, 1)          \
-  F(Int32x4Neg, 1, 1)                  \
-  F(Int32x4Add, 2, 1)                  \
-  F(Int32x4Sub, 2, 1)                  \
-  F(Int32x4Mul, 2, 1)                  \
-  F(Int32x4Min, 2, 1)                  \
-  F(Int32x4Max, 2, 1)                  \
-  F(Int32x4And, 2, 1)                  \
-  F(Int32x4Or, 2, 1)                   \
-  F(Int32x4Xor, 2, 1)                  \
-  F(Int32x4Not, 1, 1)                  \
-  F(Int32x4ShiftLeftByScalar, 2, 1)    \
-  F(Int32x4ShiftRightByScalar, 2, 1)   \
-  F(Int32x4Equal, 2, 1)                \
-  F(Int32x4NotEqual, 2, 1)             \
-  F(Int32x4LessThan, 2, 1)             \
-  F(Int32x4LessThanOrEqual, 2, 1)      \
-  F(Int32x4GreaterThan, 2, 1)          \
-  F(Int32x4GreaterThanOrEqual, 2, 1)   \
-  F(Int32x4Select, 3, 1)               \
-  F(Int32x4Swizzle, 5, 1)              \
-  F(Int32x4Shuffle, 6, 1)              \
-  F(Int32x4FromFloat32x4, 1, 1)        \
-  F(Int32x4FromUint32x4, 1, 1)         \
-  F(Int32x4FromFloat32x4Bits, 1, 1)    \
-  F(Int32x4FromUint32x4Bits, 1, 1)     \
-  F(Int32x4FromInt16x8Bits, 1, 1)      \
-  F(Int32x4FromUint16x8Bits, 1, 1)     \
-  F(Int32x4FromInt8x16Bits, 1, 1)      \
-  F(Int32x4FromUint8x16Bits, 1, 1)     \
-  F(Int32x4Load, 2, 1)                 \
-  F(Int32x4Load1, 2, 1)                \
-  F(Int32x4Load2, 2, 1)                \
-  F(Int32x4Load3, 2, 1)                \
-  F(Int32x4Store, 3, 1)                \
-  F(Int32x4Store1, 3, 1)               \
-  F(Int32x4Store2, 3, 1)               \
-  F(Int32x4Store3, 3, 1)               \
-  F(Uint32x4Check, 1, 1)               \
-  F(Uint32x4ExtractLane, 2, 1)         \
-  F(Uint32x4ReplaceLane, 3, 1)         \
-  F(Uint32x4Add, 2, 1)                 \
-  F(Uint32x4Sub, 2, 1)                 \
-  F(Uint32x4Mul, 2, 1)                 \
-  F(Uint32x4Min, 2, 1)                 \
-  F(Uint32x4Max, 2, 1)                 \
-  F(Uint32x4And, 2, 1)                 \
-  F(Uint32x4Or, 2, 1)                  \
-  F(Uint32x4Xor, 2, 1)                 \
-  F(Uint32x4Not, 1, 1)                 \
-  F(Uint32x4ShiftLeftByScalar, 2, 1)   \
-  F(Uint32x4ShiftRightByScalar, 2, 1)  \
-  F(Uint32x4Equal, 2, 1)               \
-  F(Uint32x4NotEqual, 2, 1)            \
-  F(Uint32x4LessThan, 2, 1)            \
-  F(Uint32x4LessThanOrEqual, 2, 1)     \
-  F(Uint32x4GreaterThan, 2, 1)         \
-  F(Uint32x4GreaterThanOrEqual, 2, 1)  \
-  F(Uint32x4Select, 3, 1)              \
-  F(Uint32x4Swizzle, 5, 1)             \
-  F(Uint32x4Shuffle, 6, 1)             \
-  F(Uint32x4FromFloat32x4, 1, 1)       \
-  F(Uint32x4FromInt32x4, 1, 1)         \
-  F(Uint32x4FromFloat32x4Bits, 1, 1)   \
-  F(Uint32x4FromInt32x4Bits, 1, 1)     \
-  F(Uint32x4FromInt16x8Bits, 1, 1)     \
-  F(Uint32x4FromUint16x8Bits, 1, 1)    \
-  F(Uint32x4FromInt8x16Bits, 1, 1)     \
-  F(Uint32x4FromUint8x16Bits, 1, 1)    \
-  F(Uint32x4Load, 2, 1)                \
-  F(Uint32x4Load1, 2, 1)               \
-  F(Uint32x4Load2, 2, 1)               \
-  F(Uint32x4Load3, 2, 1)               \
-  F(Uint32x4Store, 3, 1)               \
-  F(Uint32x4Store1, 3, 1)              \
-  F(Uint32x4Store2, 3, 1)              \
-  F(Uint32x4Store3, 3, 1)              \
-  F(Bool32x4Check, 1, 1)               \
-  F(Bool32x4ExtractLane, 2, 1)         \
-  F(Bool32x4ReplaceLane, 3, 1)         \
-  F(Bool32x4And, 2, 1)                 \
-  F(Bool32x4Or, 2, 1)                  \
-  F(Bool32x4Xor, 2, 1)                 \
-  F(Bool32x4Not, 1, 1)                 \
-  F(Bool32x4AnyTrue, 1, 1)             \
-  F(Bool32x4AllTrue, 1, 1)             \
-  F(Bool32x4Swizzle, 5, 1)             \
-  F(Bool32x4Shuffle, 6, 1)             \
-  F(Bool32x4Equal, 2, 1)               \
-  F(Bool32x4NotEqual, 2, 1)            \
-  F(Int16x8Check, 1, 1)                \
-  F(Int16x8ExtractLane, 2, 1)          \
-  F(Int16x8ReplaceLane, 3, 1)          \
-  F(Int16x8Neg, 1, 1)                  \
-  F(Int16x8Add, 2, 1)                  \
-  F(Int16x8AddSaturate, 2, 1)          \
-  F(Int16x8Sub, 2, 1)                  \
-  F(Int16x8SubSaturate, 2, 1)          \
-  F(Int16x8Mul, 2, 1)                  \
-  F(Int16x8Min, 2, 1)                  \
-  F(Int16x8Max, 2, 1)                  \
-  F(Int16x8And, 2, 1)                  \
-  F(Int16x8Or, 2, 1)                   \
-  F(Int16x8Xor, 2, 1)                  \
-  F(Int16x8Not, 1, 1)                  \
-  F(Int16x8ShiftLeftByScalar, 2, 1)    \
-  F(Int16x8ShiftRightByScalar, 2, 1)   \
-  F(Int16x8Equal, 2, 1)                \
-  F(Int16x8NotEqual, 2, 1)             \
-  F(Int16x8LessThan, 2, 1)             \
-  F(Int16x8LessThanOrEqual, 2, 1)      \
-  F(Int16x8GreaterThan, 2, 1)          \
-  F(Int16x8GreaterThanOrEqual, 2, 1)   \
-  F(Int16x8Select, 3, 1)               \
-  F(Int16x8Swizzle, 9, 1)              \
-  F(Int16x8Shuffle, 10, 1)             \
-  F(Int16x8FromUint16x8, 1, 1)         \
-  F(Int16x8FromFloat32x4Bits, 1, 1)    \
-  F(Int16x8FromInt32x4Bits, 1, 1)      \
-  F(Int16x8FromUint32x4Bits, 1, 1)     \
-  F(Int16x8FromUint16x8Bits, 1, 1)     \
-  F(Int16x8FromInt8x16Bits, 1, 1)      \
-  F(Int16x8FromUint8x16Bits, 1, 1)     \
-  F(Int16x8Load, 2, 1)                 \
-  F(Int16x8Store, 3, 1)                \
-  F(Uint16x8Check, 1, 1)               \
-  F(Uint16x8ExtractLane, 2, 1)         \
-  F(Uint16x8ReplaceLane, 3, 1)         \
-  F(Uint16x8Add, 2, 1)                 \
-  F(Uint16x8AddSaturate, 2, 1)         \
-  F(Uint16x8Sub, 2, 1)                 \
-  F(Uint16x8SubSaturate, 2, 1)         \
-  F(Uint16x8Mul, 2, 1)                 \
-  F(Uint16x8Min, 2, 1)                 \
-  F(Uint16x8Max, 2, 1)                 \
-  F(Uint16x8And, 2, 1)                 \
-  F(Uint16x8Or, 2, 1)                  \
-  F(Uint16x8Xor, 2, 1)                 \
-  F(Uint16x8Not, 1, 1)                 \
-  F(Uint16x8ShiftLeftByScalar, 2, 1)   \
-  F(Uint16x8ShiftRightByScalar, 2, 1)  \
-  F(Uint16x8Equal, 2, 1)               \
-  F(Uint16x8NotEqual, 2, 1)            \
-  F(Uint16x8LessThan, 2, 1)            \
-  F(Uint16x8LessThanOrEqual, 2, 1)     \
-  F(Uint16x8GreaterThan, 2, 1)         \
-  F(Uint16x8GreaterThanOrEqual, 2, 1)  \
-  F(Uint16x8Select, 3, 1)              \
-  F(Uint16x8Swizzle, 9, 1)             \
-  F(Uint16x8Shuffle, 10, 1)            \
-  F(Uint16x8FromInt16x8, 1, 1)         \
-  F(Uint16x8FromFloat32x4Bits, 1, 1)   \
-  F(Uint16x8FromInt32x4Bits, 1, 1)     \
-  F(Uint16x8FromUint32x4Bits, 1, 1)    \
-  F(Uint16x8FromInt16x8Bits, 1, 1)     \
-  F(Uint16x8FromInt8x16Bits, 1, 1)     \
-  F(Uint16x8FromUint8x16Bits, 1, 1)    \
-  F(Uint16x8Load, 2, 1)                \
-  F(Uint16x8Store, 3, 1)               \
-  F(Bool16x8Check, 1, 1)               \
-  F(Bool16x8ExtractLane, 2, 1)         \
-  F(Bool16x8ReplaceLane, 3, 1)         \
-  F(Bool16x8And, 2, 1)                 \
-  F(Bool16x8Or, 2, 1)                  \
-  F(Bool16x8Xor, 2, 1)                 \
-  F(Bool16x8Not, 1, 1)                 \
-  F(Bool16x8AnyTrue, 1, 1)             \
-  F(Bool16x8AllTrue, 1, 1)             \
-  F(Bool16x8Swizzle, 9, 1)             \
-  F(Bool16x8Shuffle, 10, 1)            \
-  F(Bool16x8Equal, 2, 1)               \
-  F(Bool16x8NotEqual, 2, 1)            \
-  F(Int8x16Check, 1, 1)                \
-  F(Int8x16ExtractLane, 2, 1)          \
-  F(Int8x16ReplaceLane, 3, 1)          \
-  F(Int8x16Neg, 1, 1)                  \
-  F(Int8x16Add, 2, 1)                  \
-  F(Int8x16AddSaturate, 2, 1)          \
-  F(Int8x16Sub, 2, 1)                  \
-  F(Int8x16SubSaturate, 2, 1)          \
-  F(Int8x16Mul, 2, 1)                  \
-  F(Int8x16Min, 2, 1)                  \
-  F(Int8x16Max, 2, 1)                  \
-  F(Int8x16And, 2, 1)                  \
-  F(Int8x16Or, 2, 1)                   \
-  F(Int8x16Xor, 2, 1)                  \
-  F(Int8x16Not, 1, 1)                  \
-  F(Int8x16ShiftLeftByScalar, 2, 1)    \
-  F(Int8x16ShiftRightByScalar, 2, 1)   \
-  F(Int8x16Equal, 2, 1)                \
-  F(Int8x16NotEqual, 2, 1)             \
-  F(Int8x16LessThan, 2, 1)             \
-  F(Int8x16LessThanOrEqual, 2, 1)      \
-  F(Int8x16GreaterThan, 2, 1)          \
-  F(Int8x16GreaterThanOrEqual, 2, 1)   \
-  F(Int8x16Select, 3, 1)               \
-  F(Int8x16Swizzle, 17, 1)             \
-  F(Int8x16Shuffle, 18, 1)             \
-  F(Int8x16FromUint8x16, 1, 1)         \
-  F(Int8x16FromFloat32x4Bits, 1, 1)    \
-  F(Int8x16FromInt32x4Bits, 1, 1)      \
-  F(Int8x16FromUint32x4Bits, 1, 1)     \
-  F(Int8x16FromInt16x8Bits, 1, 1)      \
-  F(Int8x16FromUint16x8Bits, 1, 1)     \
-  F(Int8x16FromUint8x16Bits, 1, 1)     \
-  F(Int8x16Load, 2, 1)                 \
-  F(Int8x16Store, 3, 1)                \
-  F(Uint8x16Check, 1, 1)               \
-  F(Uint8x16ExtractLane, 2, 1)         \
-  F(Uint8x16ReplaceLane, 3, 1)         \
-  F(Uint8x16Add, 2, 1)                 \
-  F(Uint8x16AddSaturate, 2, 1)         \
-  F(Uint8x16Sub, 2, 1)                 \
-  F(Uint8x16SubSaturate, 2, 1)         \
-  F(Uint8x16Mul, 2, 1)                 \
-  F(Uint8x16Min, 2, 1)                 \
-  F(Uint8x16Max, 2, 1)                 \
-  F(Uint8x16And, 2, 1)                 \
-  F(Uint8x16Or, 2, 1)                  \
-  F(Uint8x16Xor, 2, 1)                 \
-  F(Uint8x16Not, 1, 1)                 \
-  F(Uint8x16ShiftLeftByScalar, 2, 1)   \
-  F(Uint8x16ShiftRightByScalar, 2, 1)  \
-  F(Uint8x16Equal, 2, 1)               \
-  F(Uint8x16NotEqual, 2, 1)            \
-  F(Uint8x16LessThan, 2, 1)            \
-  F(Uint8x16LessThanOrEqual, 2, 1)     \
-  F(Uint8x16GreaterThan, 2, 1)         \
-  F(Uint8x16GreaterThanOrEqual, 2, 1)  \
-  F(Uint8x16Select, 3, 1)              \
-  F(Uint8x16Swizzle, 17, 1)            \
-  F(Uint8x16Shuffle, 18, 1)            \
-  F(Uint8x16FromInt8x16, 1, 1)         \
-  F(Uint8x16FromFloat32x4Bits, 1, 1)   \
-  F(Uint8x16FromInt32x4Bits, 1, 1)     \
-  F(Uint8x16FromUint32x4Bits, 1, 1)    \
-  F(Uint8x16FromInt16x8Bits, 1, 1)     \
-  F(Uint8x16FromUint16x8Bits, 1, 1)    \
-  F(Uint8x16FromInt8x16Bits, 1, 1)     \
-  F(Uint8x16Load, 2, 1)                \
-  F(Uint8x16Store, 3, 1)               \
-  F(Bool8x16Check, 1, 1)               \
-  F(Bool8x16ExtractLane, 2, 1)         \
-  F(Bool8x16ReplaceLane, 3, 1)         \
-  F(Bool8x16And, 2, 1)                 \
-  F(Bool8x16Or, 2, 1)                  \
-  F(Bool8x16Xor, 2, 1)                 \
-  F(Bool8x16Not, 1, 1)                 \
-  F(Bool8x16AnyTrue, 1, 1)             \
-  F(Bool8x16AllTrue, 1, 1)             \
-  F(Bool8x16Swizzle, 17, 1)            \
-  F(Bool8x16Shuffle, 18, 1)            \
-  F(Bool8x16Equal, 2, 1)               \
-  F(Bool8x16NotEqual, 2, 1)
-
 #define FOR_EACH_INTRINSIC_STRINGS(F)     \
+  F(GetSubstitution, 4, 1)                \
   F(StringReplaceOneCharWithString, 3, 1) \
   F(StringIndexOf, 3, 1)                  \
+  F(StringIndexOfUnchecked, 3, 1)         \
   F(StringLastIndexOf, 2, 1)              \
   F(SubString, 3, 1)                      \
   F(StringAdd, 2, 1)                      \
@@ -819,8 +528,6 @@
   F(StringBuilderJoin, 3, 1)              \
   F(SparseJoinWithSeparator, 3, 1)        \
   F(StringToArray, 2, 1)                  \
-  F(StringToLowerCase, 1, 1)              \
-  F(StringToUpperCase, 1, 1)              \
   F(StringLessThan, 2, 1)                 \
   F(StringLessThanOrEqual, 2, 1)          \
   F(StringGreaterThan, 2, 1)              \
@@ -837,7 +544,6 @@
   F(CreatePrivateSymbol, 1, 1)       \
   F(SymbolDescription, 1, 1)         \
   F(SymbolDescriptiveString, 1, 1)   \
-  F(SymbolRegistry, 0, 1)            \
   F(SymbolIsPrivate, 1, 1)
 
 #define FOR_EACH_INTRINSIC_TEST(F)            \
@@ -856,7 +562,7 @@
   F(GetOptimizationCount, 1, 1)               \
   F(GetUndetectable, 0, 1)                    \
   F(GetCallable, 0, 1)                        \
-  F(ClearFunctionTypeFeedback, 1, 1)          \
+  F(ClearFunctionFeedback, 1, 1)              \
   F(CheckWasmWrapperElision, 2, 1)            \
   F(NotifyContextDisposed, 0, 1)              \
   F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
@@ -898,10 +604,14 @@
   F(SerializeWasmModule, 1, 1)                \
   F(DeserializeWasmModule, 2, 1)              \
   F(IsAsmWasmCode, 1, 1)                      \
-  F(IsNotAsmWasmCode, 1, 1)                   \
+  F(IsWasmCode, 1, 1)                         \
+  F(DisallowCodegenFromStrings, 0, 1)         \
   F(ValidateWasmInstancesChain, 2, 1)         \
   F(ValidateWasmModuleState, 1, 1)            \
-  F(ValidateWasmOrphanedInstance, 1, 1)
+  F(ValidateWasmOrphanedInstance, 1, 1)       \
+  F(SetWasmCompileControls, 2, 1)             \
+  F(SetWasmInstantiateControls, 0, 1)         \
+  F(Verify, 1, 1)
 
 #define FOR_EACH_INTRINSIC_TYPEDARRAY(F)     \
   F(ArrayBufferGetByteLength, 1, 1)          \
@@ -914,18 +624,23 @@
   F(TypedArrayGetLength, 1, 1)               \
   F(TypedArrayGetBuffer, 1, 1)               \
   F(TypedArraySetFastCases, 3, 1)            \
+  F(TypedArraySortFast, 1, 1)                \
   F(TypedArrayMaxSizeInHeap, 0, 1)           \
   F(IsTypedArray, 1, 1)                      \
   F(IsSharedTypedArray, 1, 1)                \
   F(IsSharedIntegerTypedArray, 1, 1)         \
   F(IsSharedInteger32TypedArray, 1, 1)
 
-#define FOR_EACH_INTRINSIC_WASM(F) \
-  F(WasmGrowMemory, 1, 1)          \
-  F(WasmMemorySize, 0, 1)          \
-  F(WasmThrowTypeError, 0, 1)      \
-  F(WasmThrow, 2, 1)               \
-  F(WasmGetCaughtExceptionValue, 1, 1)
+#define FOR_EACH_INTRINSIC_WASM(F)     \
+  F(WasmGrowMemory, 1, 1)              \
+  F(WasmMemorySize, 0, 1)              \
+  F(ThrowWasmError, 2, 1)              \
+  F(ThrowWasmErrorFromTrapIf, 1, 1)    \
+  F(WasmThrowTypeError, 0, 1)          \
+  F(WasmThrow, 2, 1)                   \
+  F(WasmGetCaughtExceptionValue, 1, 1) \
+  F(WasmRunInterpreter, 3, 1)          \
+  F(WasmStackGuard, 0, 1)
 
 #define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
   F(LoadLookupSlotForCall, 1, 2)
@@ -938,22 +653,20 @@
 #define FOR_EACH_INTRINSIC_IC(F)             \
   F(BinaryOpIC_Miss, 2, 1)                   \
   F(BinaryOpIC_MissWithAllocationSite, 3, 1) \
-  F(CallIC_Miss, 3, 1)                       \
   F(CompareIC_Miss, 3, 1)                    \
   F(ElementsTransitionAndStoreIC_Miss, 6, 1) \
   F(KeyedLoadIC_Miss, 4, 1)                  \
-  F(KeyedLoadIC_MissFromStubFailure, 4, 1)   \
   F(KeyedStoreIC_Miss, 5, 1)                 \
   F(KeyedStoreIC_Slow, 5, 1)                 \
   F(LoadElementWithInterceptor, 2, 1)        \
-  F(LoadGlobalIC_Miss, 2, 1)                 \
-  F(LoadGlobalIC_Slow, 2, 1)                 \
+  F(LoadGlobalIC_Miss, 3, 1)                 \
+  F(LoadGlobalIC_Slow, 3, 1)                 \
   F(LoadIC_Miss, 4, 1)                       \
-  F(LoadPropertyWithInterceptor, 3, 1)       \
+  F(LoadPropertyWithInterceptor, 5, 1)       \
   F(LoadPropertyWithInterceptorOnly, 3, 1)   \
   F(StoreCallbackProperty, 6, 1)             \
   F(StoreIC_Miss, 5, 1)                      \
-  F(StorePropertyWithInterceptor, 3, 1)      \
+  F(StorePropertyWithInterceptor, 5, 1)      \
   F(ToBooleanIC_Miss, 1, 1)                  \
   F(Unreachable, 0, 1)
 
@@ -980,10 +693,10 @@
   FOR_EACH_INTRINSIC_NUMBERS(F)             \
   FOR_EACH_INTRINSIC_OBJECT(F)              \
   FOR_EACH_INTRINSIC_OPERATORS(F)           \
+  FOR_EACH_INTRINSIC_PROMISE(F)             \
   FOR_EACH_INTRINSIC_PROXY(F)               \
   FOR_EACH_INTRINSIC_REGEXP(F)              \
   FOR_EACH_INTRINSIC_SCOPES(F)              \
-  FOR_EACH_INTRINSIC_SIMD(F)                \
   FOR_EACH_INTRINSIC_STRINGS(F)             \
   FOR_EACH_INTRINSIC_SYMBOL(F)              \
   FOR_EACH_INTRINSIC_TEST(F)                \
@@ -1009,14 +722,13 @@
 
 class Runtime : public AllStatic {
  public:
-  enum FunctionId {
+  enum FunctionId : int32_t {
 #define F(name, nargs, ressize) k##name,
 #define I(name, nargs, ressize) kInline##name,
-  FOR_EACH_INTRINSIC(F)
-  FOR_EACH_INTRINSIC(I)
+    FOR_EACH_INTRINSIC(F) FOR_EACH_INTRINSIC(I)
 #undef I
 #undef F
-    kNumFunctions,
+        kNumFunctions,
   };
 
   enum IntrinsicType { RUNTIME, INLINE };
@@ -1133,6 +845,18 @@
 STATIC_ASSERT(LANGUAGE_END == 2);
 class DeclareGlobalsLanguageMode : public BitField<LanguageMode, 2, 1> {};
 
+// A set of bits returned by Runtime_GetOptimizationStatus.
+// These bits must be in sync with bits defined in test/mjsunit/mjsunit.js
+enum class OptimizationStatus {
+  kIsFunction = 1 << 0,
+  kNeverOptimize = 1 << 1,
+  kAlwaysOptimize = 1 << 2,
+  kMaybeDeopted = 1 << 3,
+  kOptimized = 1 << 4,
+  kTurboFanned = 1 << 5,
+  kInterpreted = 1 << 6,
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/s390/assembler-s390-inl.h b/src/s390/assembler-s390-inl.h
index 189b89c..eee6d6c 100644
--- a/src/s390/assembler-s390-inl.h
+++ b/src/s390/assembler-s390-inl.h
@@ -41,6 +41,7 @@
 
 #include "src/assembler.h"
 #include "src/debug/debug.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -118,6 +119,18 @@
 
 int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
 
+Address Assembler::target_address_at(Address pc, Code* code) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
+
 Address Assembler::target_address_from_return_address(Address pc) {
   // Returns the address of the call target from the return address that will
   // be returned to after a call.
diff --git a/src/s390/assembler-s390.cc b/src/s390/assembler-s390.cc
index a448947..19510b2 100644
--- a/src/s390/assembler-s390.cc
+++ b/src/s390/assembler-s390.cc
@@ -138,39 +138,48 @@
     // The facilities we are checking for are:
     //   Bit 45 - Distinct Operands for instructions like ARK, SRK, etc.
     // As such, we require only 1 double word
-    int64_t facilities[1];
-    facilities[0] = 0;
+    int64_t facilities[3] = {0L};
     // LHI sets up GPR0
     // STFLE is specified as .insn, as opcode is not recognized.
     // We register the instructions kill r0 (LHI) and the CC (STFLE).
     asm volatile(
-        "lhi   0,0\n"
+        "lhi   0,2\n"
         ".insn s,0xb2b00000,%0\n"
         : "=Q"(facilities)
         :
         : "cc", "r0");
 
+    uint64_t one = static_cast<uint64_t>(1);
     // Test for Distinct Operands Facility - Bit 45
-    if (facilities[0] & (1lu << (63 - 45))) {
+    if (facilities[0] & (one << (63 - 45))) {
       supported_ |= (1u << DISTINCT_OPS);
     }
     // Test for General Instruction Extension Facility - Bit 34
-    if (facilities[0] & (1lu << (63 - 34))) {
+    if (facilities[0] & (one << (63 - 34))) {
       supported_ |= (1u << GENERAL_INSTR_EXT);
     }
     // Test for Floating Point Extension Facility - Bit 37
-    if (facilities[0] & (1lu << (63 - 37))) {
+    if (facilities[0] & (one << (63 - 37))) {
       supported_ |= (1u << FLOATING_POINT_EXT);
     }
+    // Test for Vector Facility - Bit 129
+    if (facilities[2] & (one << (63 - (129 - 128)))) {
+      supported_ |= (1u << VECTOR_FACILITY);
+    }
+    // Test for Miscellaneous Instruction Extension Facility - Bit 58
+    if (facilities[0] & (1lu << (63 - 58))) {
+      supported_ |= (1u << MISC_INSTR_EXT2);
+    }
   }
 #else
   // All distinct ops instructions can be simulated
   supported_ |= (1u << DISTINCT_OPS);
   // RISBG can be simulated
   supported_ |= (1u << GENERAL_INSTR_EXT);
-
   supported_ |= (1u << FLOATING_POINT_EXT);
+  supported_ |= (1u << MISC_INSTR_EXT2);
   USE(performSTFLE);  // To avoid assert
+  supported_ |= (1u << VECTOR_FACILITY);
 #endif
   supported_ |= (1u << FPU);
 }
@@ -192,6 +201,8 @@
   printf("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
   printf("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
   printf("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
+  printf("VECTOR_FACILITY=%d\n", CpuFeatures::IsSupported(VECTOR_FACILITY));
+  printf("MISC_INSTR_EXT2=%d\n", CpuFeatures::IsSupported(MISC_INSTR_EXT2));
 }
 
 Register ToRegister(int num) {
@@ -233,13 +244,19 @@
   return Assembler::target_address_at(pc_, host_);
 }
 
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  return static_cast<uint32_t>(
+      reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+}
+
 void RelocInfo::unchecked_update_wasm_memory_reference(
     Address address, ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
 }
 
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
-                                                  ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+                                           ICacheFlushMode flush_mode) {
   Assembler::set_target_address_at(isolate_, pc_, host_,
                                    reinterpret_cast<Address>(size), flush_mode);
 }
@@ -522,11 +539,11 @@
 
 // Pseudo op - branch on condition
 void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
-  int offset = branch_offset;
-  if (is_bound && is_int16(offset)) {
-    brc(c, Operand(offset & 0xFFFF));  // short jump
+  int offset_in_halfwords = branch_offset / 2;
+  if (is_bound && is_int16(offset_in_halfwords)) {
+    brc(c, Operand(offset_in_halfwords));  // short jump
   } else {
-    brcl(c, Operand(offset));  // long jump
+    brcl(c, Operand(offset_in_halfwords));  // long jump
   }
 }
 
@@ -580,66 +597,7 @@
   }
 }
 
-// RR format: <insn> R1,R2
-//    +--------+----+----+
-//    | OpCode | R1 | R2 |
-//    +--------+----+----+
-//    0        8    12  15
-#define RR_FORM_EMIT(name, op) \
-  void Assembler::name(Register r1, Register r2) { rr_form(op, r1, r2); }
 
-void Assembler::rr_form(Opcode op, Register r1, Register r2) {
-  DCHECK(is_uint8(op));
-  emit2bytes(op * B8 | r1.code() * B4 | r2.code());
-}
-
-void Assembler::rr_form(Opcode op, DoubleRegister r1, DoubleRegister r2) {
-  DCHECK(is_uint8(op));
-  emit2bytes(op * B8 | r1.code() * B4 | r2.code());
-}
-
-// RR2 format: <insn> M1,R2
-//    +--------+----+----+
-//    | OpCode | M1 | R2 |
-//    +--------+----+----+
-//    0        8    12  15
-#define RR2_FORM_EMIT(name, op) \
-  void Assembler::name(Condition m1, Register r2) { rr_form(op, m1, r2); }
-
-void Assembler::rr_form(Opcode op, Condition m1, Register r2) {
-  DCHECK(is_uint8(op));
-  DCHECK(is_uint4(m1));
-  emit2bytes(op * B8 | m1 * B4 | r2.code());
-}
-
-// RX format: <insn> R1,D2(X2,B2)
-//    +--------+----+----+----+-------------+
-//    | OpCode | R1 | X2 | B2 |     D2      |
-//    +--------+----+----+----+-------------+
-//    0        8    12   16   20           31
-#define RX_FORM_EMIT(name, op)                                           \
-  void Assembler::name(Register r, const MemOperand& opnd) {             \
-    name(r, opnd.getIndexRegister(), opnd.getBaseRegister(),             \
-         opnd.getDisplacement());                                        \
-  }                                                                      \
-  void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
-    rx_form(op, r1, x2, b2, d2);                                         \
-  }
-void Assembler::rx_form(Opcode op, Register r1, Register x2, Register b2,
-                        Disp d2) {
-  DCHECK(is_uint8(op));
-  DCHECK(is_uint12(d2));
-  emit4bytes(op * B24 | r1.code() * B20 | x2.code() * B16 | b2.code() * B12 |
-             d2);
-}
-
-void Assembler::rx_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
-                        Disp d2) {
-  DCHECK(is_uint8(op));
-  DCHECK(is_uint12(d2));
-  emit4bytes(op * B24 | r1.code() * B20 | x2.code() * B16 | b2.code() * B12 |
-             d2);
-}
 
 // RI1 format: <insn> R1,I2
 //    +--------+----+----+------------------+
@@ -667,7 +625,7 @@
 void Assembler::ri_form(Opcode op, Condition m1, const Operand& i2) {
   DCHECK(is_uint12(op));
   DCHECK(is_uint4(m1));
-  DCHECK(is_uint16(i2.imm_));
+  DCHECK(op == BRC ? is_int16(i2.imm_) : is_uint16(i2.imm_));
   emit4bytes((op & 0xFF0) * B20 | m1 * B20 | (op & 0xF) * B16 |
              (i2.imm_ & 0xFFFF));
 }
@@ -716,75 +674,6 @@
   emit6bytes(code);
 }
 
-// RIL1 format: <insn> R1,I2
-//   +--------+----+----+------------------------------------+
-//   | OpCode | R1 |OpCd|                  I2                |
-//   +--------+----+----+------------------------------------+
-//   0        8    12   16                                  47
-#define RIL1_FORM_EMIT(name, op) \
-  void Assembler::name(Register r, const Operand& i2) { ril_form(op, r, i2); }
-
-void Assembler::ril_form(Opcode op, Register r1, const Operand& i2) {
-  DCHECK(is_uint12(op));
-  uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
-                  (static_cast<uint64_t>(r1.code())) * B36 |
-                  (static_cast<uint64_t>(op & 0x00F)) * B32 |
-                  (static_cast<uint64_t>(i2.imm_) & 0xFFFFFFFF);
-  emit6bytes(code);
-}
-
-// RIL2 format: <insn> M1,I2
-//   +--------+----+----+------------------------------------+
-//   | OpCode | M1 |OpCd|                  I2                |
-//   +--------+----+----+------------------------------------+
-//   0        8    12   16                                  47
-#define RIL2_FORM_EMIT(name, op)                          \
-  void Assembler::name(Condition m1, const Operand& i2) { \
-    ril_form(op, m1, i2);                                 \
-  }
-
-void Assembler::ril_form(Opcode op, Condition m1, const Operand& i2) {
-  DCHECK(is_uint12(op));
-  DCHECK(is_uint4(m1));
-  uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
-                  (static_cast<uint64_t>(m1)) * B36 |
-                  (static_cast<uint64_t>(op & 0x00F)) * B32 |
-                  (static_cast<uint64_t>(i2.imm_ & 0xFFFFFFFF));
-  emit6bytes(code);
-}
-
-// RRE format: <insn> R1,R2
-//    +------------------+--------+----+----+
-//    |      OpCode      |////////| R1 | R2 |
-//    +------------------+--------+----+----+
-//    0                  16       24   28  31
-#define RRE_FORM_EMIT(name, op) \
-  void Assembler::name(Register r1, Register r2) { rre_form(op, r1, r2); }
-
-void Assembler::rre_form(Opcode op, Register r1, Register r2) {
-  DCHECK(is_uint16(op));
-  emit4bytes(op << 16 | r1.code() * B4 | r2.code());
-}
-
-void Assembler::rre_form(Opcode op, DoubleRegister r1, DoubleRegister r2) {
-  DCHECK(is_uint16(op));
-  emit4bytes(op << 16 | r1.code() * B4 | r2.code());
-}
-
-// RRD format: <insn> R1,R3, R2
-//    +------------------+----+----+----+----+
-//    |      OpCode      | R1 |////| R3 | R2 |
-//    +------------------+----+----+----+----+
-//    0                  16  20   24   28   31
-#define RRD_FORM_EMIT(name, op)                                 \
-  void Assembler::name(Register r1, Register r3, Register r2) { \
-    rrd_form(op, r1, r3, r2);                                   \
-  }
-
-void Assembler::rrd_form(Opcode op, Register r1, Register r3, Register r2) {
-  emit4bytes(op << 16 | r1.code() * B12 | r3.code() * B4 | r2.code());
-}
-
 // RS1 format: <insn> R1,R3,D2(B2)
 //    +--------+----+----+----+-------------+
 //    | OpCode | R1 | R3 | B2 |     D2      |
@@ -942,62 +831,6 @@
   emit6bytes(code);
 }
 
-// RXY format: <insn> R1,D2(X2,B2)
-//    +--------+----+----+----+-------------+--------+--------+
-//    | OpCode | R1 | X2 | B2 |     DL2     |   DH2  | OpCode |
-//    +--------+----+----+----+-------------+--------+--------+
-//    0        8    12   16   20            32   36   40      47
-#define RXY_FORM_EMIT(name, op)                                          \
-  void Assembler::name(Register r1, Register x2, Register b2, Disp d2) { \
-    rxy_form(op, r1, x2, b2, d2);                                        \
-  }                                                                      \
-  void Assembler::name(Register r1, const MemOperand& opnd) {            \
-    name(r1, opnd.getIndexRegister(), opnd.getBaseRegister(),            \
-         opnd.getDisplacement());                                        \
-  }
-
-void Assembler::rxy_form(Opcode op, Register r1, Register x2, Register b2,
-                         Disp d2) {
-  DCHECK(is_int20(d2));
-  DCHECK(is_uint16(op));
-  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
-                  (static_cast<uint64_t>(r1.code())) * B36 |
-                  (static_cast<uint64_t>(x2.code())) * B32 |
-                  (static_cast<uint64_t>(b2.code())) * B28 |
-                  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
-                  (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
-                  (static_cast<uint64_t>(op & 0x00FF));
-  emit6bytes(code);
-}
-
-void Assembler::rxy_form(Opcode op, Register r1, Condition m3, Register b2,
-                         Disp d2) {
-  DCHECK(is_int20(d2));
-  DCHECK(is_uint16(op));
-  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
-                  (static_cast<uint64_t>(r1.code())) * B36 |
-                  (static_cast<uint64_t>(m3 & 0xF)) * B32 |
-                  (static_cast<uint64_t>(b2.code())) * B28 |
-                  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
-                  (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
-                  (static_cast<uint64_t>(op & 0x00FF));
-  emit6bytes(code);
-}
-
-void Assembler::rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
-                         Disp d2) {
-  DCHECK(is_int20(d2));
-  DCHECK(is_uint16(op));
-  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
-                  (static_cast<uint64_t>(r1.code())) * B36 |
-                  (static_cast<uint64_t>(x2.code())) * B32 |
-                  (static_cast<uint64_t>(b2.code())) * B28 |
-                  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
-                  (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
-                  (static_cast<uint64_t>(op & 0x00FF));
-  emit6bytes(code);
-}
-
 // RRS format: <insn> R1,R2,M3,D4(B4)
 //    +--------+----+----+----+-------------+----+---+--------+
 //    | OpCode | R1 | R2 | B4 |     D4      | M3 |///| OpCode |
@@ -1104,7 +937,7 @@
   }
 
 void Assembler::siy_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
-  DCHECK(is_uint20(d1));
+  DCHECK(is_uint20(d1) || is_int20(d1));
   DCHECK(is_uint16(op));
   DCHECK(is_uint8(i2.imm_));
   uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
@@ -1417,66 +1250,20 @@
 // end of S390 Instruction generation
 
 // start of S390 instruction
-RX_FORM_EMIT(bc, BC)
-RR_FORM_EMIT(bctr, BCTR)
-RXE_FORM_EMIT(ceb, CEB)
 SS1_FORM_EMIT(ed, ED)
-RX_FORM_EMIT(ex, EX)
-RRE_FORM_EMIT(flogr, FLOGR)
-RRE_FORM_EMIT(lcgr, LCGR)
-RR_FORM_EMIT(lcr, LCR)
-RX_FORM_EMIT(le_z, LE)
-RXY_FORM_EMIT(ley, LEY)
-RIL1_FORM_EMIT(llihf, LLIHF)
-RIL1_FORM_EMIT(llilf, LLILF)
-RRE_FORM_EMIT(lngr, LNGR)
-RR_FORM_EMIT(lnr, LNR)
-RRE_FORM_EMIT(lrvr, LRVR)
-RRE_FORM_EMIT(lrvgr, LRVGR)
-RXY_FORM_EMIT(lrv, LRV)
-RXY_FORM_EMIT(lrvg, LRVG)
-RXY_FORM_EMIT(lrvh, LRVH)
 SS1_FORM_EMIT(mvn, MVN)
 SS1_FORM_EMIT(nc, NC)
 SI_FORM_EMIT(ni, NI)
-RIL1_FORM_EMIT(nihf, NIHF)
-RIL1_FORM_EMIT(nilf, NILF)
 RI1_FORM_EMIT(nilh, NILH)
 RI1_FORM_EMIT(nill, NILL)
-RIL1_FORM_EMIT(oihf, OIHF)
-RIL1_FORM_EMIT(oilf, OILF)
 RI1_FORM_EMIT(oill, OILL)
-RRE_FORM_EMIT(popcnt, POPCNT_Z)
-RIL1_FORM_EMIT(slfi, SLFI)
-RXY_FORM_EMIT(slgf, SLGF)
-RIL1_FORM_EMIT(slgfi, SLGFI)
-RXY_FORM_EMIT(strvh, STRVH)
-RXY_FORM_EMIT(strv, STRV)
-RXY_FORM_EMIT(strvg, STRVG)
 RI1_FORM_EMIT(tmll, TMLL)
 SS1_FORM_EMIT(tr, TR)
 S_FORM_EMIT(ts, TS)
-RIL1_FORM_EMIT(xihf, XIHF)
-RIL1_FORM_EMIT(xilf, XILF)
 
 // -------------------------
 // Load Address Instructions
 // -------------------------
-// Load Address Register-Storage
-void Assembler::la(Register r1, const MemOperand& opnd) {
-  rx_form(LA, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Address Register-Storage
-void Assembler::lay(Register r1, const MemOperand& opnd) {
-  rxy_form(LAY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Address Relative Long
-void Assembler::larl(Register r1, const Operand& opnd) {
-  ril_form(LARL, r1, opnd);
-}
-
 // Load Address Relative Long
 void Assembler::larl(Register r1, Label* l) {
   larl(r1, Operand(branch_offset(l)));
@@ -1485,137 +1272,15 @@
 // -----------------
 // Load Instructions
 // -----------------
-// Load Byte Register-Storage (32<-8)
-void Assembler::lb(Register r, const MemOperand& src) {
-  rxy_form(LB, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Byte Register-Register (32<-8)
-void Assembler::lbr(Register r1, Register r2) { rre_form(LBR, r1, r2); }
-
-// Load Byte Register-Storage (64<-8)
-void Assembler::lgb(Register r, const MemOperand& src) {
-  rxy_form(LGB, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Byte Register-Register (64<-8)
-void Assembler::lgbr(Register r1, Register r2) { rre_form(LGBR, r1, r2); }
-
-// Load Halfword Register-Storage (32<-16)
-void Assembler::lh(Register r, const MemOperand& src) {
-  rx_form(LH, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Halfword Register-Storage (32<-16)
-void Assembler::lhy(Register r, const MemOperand& src) {
-  rxy_form(LHY, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Halfword Register-Register (32<-16)
-void Assembler::lhr(Register r1, Register r2) { rre_form(LHR, r1, r2); }
-
-// Load Halfword Register-Storage (64<-16)
-void Assembler::lgh(Register r, const MemOperand& src) {
-  rxy_form(LGH, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Halfword Register-Register (64<-16)
-void Assembler::lghr(Register r1, Register r2) { rre_form(LGHR, r1, r2); }
-
-// Load Register-Storage (32)
-void Assembler::l(Register r, const MemOperand& src) {
-  rx_form(L, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Register-Storage (32)
-void Assembler::ly(Register r, const MemOperand& src) {
-  rxy_form(LY, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Register-Register (32)
-void Assembler::lr(Register r1, Register r2) { rr_form(LR, r1, r2); }
-
-// Load Register-Storage (64)
-void Assembler::lg(Register r, const MemOperand& src) {
-  rxy_form(LG, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Register-Register (64)
-void Assembler::lgr(Register r1, Register r2) { rre_form(LGR, r1, r2); }
-
-// Load Register-Storage (64<-32)
-void Assembler::lgf(Register r, const MemOperand& src) {
-  rxy_form(LGF, r, src.rx(), src.rb(), src.offset());
-}
-
-// Load Sign Extended Register-Register (64<-32)
-void Assembler::lgfr(Register r1, Register r2) { rre_form(LGFR, r1, r2); }
-
 // Load Halfword Immediate (32)
 void Assembler::lhi(Register r, const Operand& imm) { ri_form(LHI, r, imm); }
 
 // Load Halfword Immediate (64)
 void Assembler::lghi(Register r, const Operand& imm) { ri_form(LGHI, r, imm); }
 
-// --------------------------
-// Load And Test Instructions
-// --------------------------
-// Load and Test Register-Storage (32)
-void Assembler::lt_z(Register r1, const MemOperand& opnd) {
-  rxy_form(LT, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load and Test Register-Storage (64)
-void Assembler::ltg(Register r1, const MemOperand& opnd) {
-  rxy_form(LTG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load and Test Register-Register (32)
-void Assembler::ltr(Register r1, Register r2) { rr_form(LTR, r1, r2); }
-
-// Load and Test Register-Register (64)
-void Assembler::ltgr(Register r1, Register r2) { rre_form(LTGR, r1, r2); }
-
-// Load and Test Register-Register (64<-32)
-void Assembler::ltgfr(Register r1, Register r2) { rre_form(LTGFR, r1, r2); }
-
 // -------------------------
 // Load Logical Instructions
 // -------------------------
-// Load Logical Character (32) - loads a byte and zero ext.
-void Assembler::llc(Register r1, const MemOperand& opnd) {
-  rxy_form(LLC, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Logical Character (64) - loads a byte and zero ext.
-void Assembler::llgc(Register r1, const MemOperand& opnd) {
-  rxy_form(LLGC, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Logical halfword Register-Storage (64<-32)
-void Assembler::llgf(Register r1, const MemOperand& opnd) {
-  rxy_form(LLGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Logical Register-Register (64<-32)
-void Assembler::llgfr(Register r1, Register r2) { rre_form(LLGFR, r1, r2); }
-
-// Load Logical halfword Register-Storage (32)
-void Assembler::llh(Register r1, const MemOperand& opnd) {
-  rxy_form(LLH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Logical halfword Register-Storage (64)
-void Assembler::llgh(Register r1, const MemOperand& opnd) {
-  rxy_form(LLGH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Logical halfword Register-Register (32)
-void Assembler::llhr(Register r1, Register r2) { rre_form(LLHR, r1, r2); }
-
-// Load Logical halfword Register-Register (64)
-void Assembler::llghr(Register r1, Register r2) { rre_form(LLGHR, r1, r2); }
-
 // Load On Condition R-R (32)
 void Assembler::locr(Condition m3, Register r1, Register r2) {
   rrf2_form(LOCR << 16 | m3 * B12 | r1.code() * B4 | r2.code());
@@ -1628,63 +1293,25 @@
 
 // Load On Condition R-M (32)
 void Assembler::loc(Condition m3, Register r1, const MemOperand& src) {
-  rxy_form(LOC, r1, m3, src.rb(), src.offset());
+  rsy_form(LOC, r1, m3, src.rb(), src.offset());
 }
 
 // Load On Condition R-M (64)
 void Assembler::locg(Condition m3, Register r1, const MemOperand& src) {
-  rxy_form(LOCG, r1, m3, src.rb(), src.offset());
+  rsy_form(LOCG, r1, m3, src.rb(), src.offset());
 }
 
 // -------------------
 // Branch Instructions
 // -------------------
-// Branch and Save
-void Assembler::basr(Register r1, Register r2) { rr_form(BASR, r1, r2); }
-
-// Indirect Conditional Branch via register
-void Assembler::bcr(Condition m, Register target) { rr_form(BCR, m, target); }
-
-// Branch on Count (32)
-void Assembler::bct(Register r, const MemOperand& opnd) {
-  rx_form(BCT, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
 // Branch on Count (64)
-void Assembler::bctg(Register r, const MemOperand& opnd) {
-  rxy_form(BCTG, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
 // Branch Relative and Save (32)
 void Assembler::bras(Register r, const Operand& opnd) {
   ri_form(BRAS, r, opnd);
 }
 
-// Branch Relative and Save (64)
-void Assembler::brasl(Register r, const Operand& opnd) {
-  ril_form(BRASL, r, opnd);
-}
-
 // Branch relative on Condition (32)
-void Assembler::brc(Condition c, const Operand& opnd) {
-  // BRC actually encodes # of halfwords, so divide by 2.
-  int16_t numHalfwords = static_cast<int16_t>(opnd.immediate()) / 2;
-  Operand halfwordOp = Operand(numHalfwords);
-  halfwordOp.setBits(16);
-  ri_form(BRC, c, halfwordOp);
-}
-
-// Branch Relative on Condition (64)
-void Assembler::brcl(Condition c, const Operand& opnd, bool isCodeTarget) {
-  Operand halfwordOp = opnd;
-  // Operand for code targets will be index to code_targets_
-  if (!isCodeTarget) {
-    // BRCL actually encodes # of halfwords, so divide by 2.
-    int32_t numHalfwords = static_cast<int32_t>(opnd.immediate()) / 2;
-    halfwordOp = Operand(numHalfwords);
-  }
-  ril_form(BRCL, c, halfwordOp);
-}
+void Assembler::brc(Condition c, const Operand& opnd) { ri_form(BRC, c, opnd); }
 
 // Branch On Count (32)
 void Assembler::brct(Register r1, const Operand& imm) {
@@ -1707,37 +1334,6 @@
 // --------------------
 // Compare Instructions
 // --------------------
-// Compare Register-Storage (32)
-void Assembler::c(Register r, const MemOperand& opnd) {
-  rx_form(C, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Register-Storage (32)
-void Assembler::cy(Register r, const MemOperand& opnd) {
-  rxy_form(CY, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Register-Register (32)
-void Assembler::cr_z(Register r1, Register r2) { rr_form(CR, r1, r2); }
-
-// Compare Register-Storage (64)
-void Assembler::cg(Register r, const MemOperand& opnd) {
-  rxy_form(CG, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Register-Register (64)
-void Assembler::cgr(Register r1, Register r2) { rre_form(CGR, r1, r2); }
-
-// Compare Halfword Register-Storage (32)
-void Assembler::ch(Register r, const MemOperand& opnd) {
-  rx_form(CH, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Halfword Register-Storage (32)
-void Assembler::chy(Register r, const MemOperand& opnd) {
-  rxy_form(CHY, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
 // Compare Halfword Immediate (32)
 void Assembler::chi(Register r, const Operand& opnd) { ri_form(CHI, r, opnd); }
 
@@ -1746,46 +1342,9 @@
   ri_form(CGHI, r, opnd);
 }
 
-// Compare Immediate (32)
-void Assembler::cfi(Register r, const Operand& opnd) { ril_form(CFI, r, opnd); }
-
-// Compare Immediate (64)
-void Assembler::cgfi(Register r, const Operand& opnd) {
-  ril_form(CGFI, r, opnd);
-}
-
 // ----------------------------
 // Compare Logical Instructions
 // ----------------------------
-// Compare Logical Register-Storage (32)
-void Assembler::cl(Register r, const MemOperand& opnd) {
-  rx_form(CL, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Logical Register-Storage (32)
-void Assembler::cly(Register r, const MemOperand& opnd) {
-  rxy_form(CLY, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Logical Register-Register (32)
-void Assembler::clr(Register r1, Register r2) { rr_form(CLR, r1, r2); }
-
-// Compare Logical Register-Storage (64)
-void Assembler::clg(Register r, const MemOperand& opnd) {
-  rxy_form(CLG, r, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Compare Logical Register-Register (64)
-void Assembler::clgr(Register r1, Register r2) { rre_form(CLGR, r1, r2); }
-
-// Compare Logical Immediate (32)
-void Assembler::clfi(Register r1, const Operand& i2) { ril_form(CLFI, r1, i2); }
-
-// Compare Logical Immediate (64<32)
-void Assembler::clgfi(Register r1, const Operand& i2) {
-  ril_form(CLGFI, r1, i2);
-}
-
 // Compare Immediate (Mem - Imm) (8)
 void Assembler::cli(const MemOperand& opnd, const Operand& imm) {
   si_form(CLI, imm, opnd.rb(), opnd.offset());
@@ -1856,31 +1415,6 @@
 // -----------------------
 // 32-bit Add Instructions
 // -----------------------
-// Add Register-Storage (32)
-void Assembler::a(Register r1, const MemOperand& opnd) {
-  rx_form(A, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Register-Storage (32)
-void Assembler::ay(Register r1, const MemOperand& opnd) {
-  rxy_form(AY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Immediate (32)
-void Assembler::afi(Register r1, const Operand& opnd) {
-  ril_form(AFI, r1, opnd);
-}
-
-// Add Halfword Register-Storage (32)
-void Assembler::ah(Register r1, const MemOperand& opnd) {
-  rx_form(AH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Halfword Register-Storage (32)
-void Assembler::ahy(Register r1, const MemOperand& opnd) {
-  rxy_form(AHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
 // Add Halfword Immediate (32)
 void Assembler::ahi(Register r1, const Operand& i2) { ri_form(AHI, r1, i2); }
 
@@ -1889,9 +1423,6 @@
   rie_form(AHIK, r1, r3, i2);
 }
 
-// Add Register (32)
-void Assembler::ar(Register r1, Register r2) { rr_form(AR, r1, r2); }
-
 // Add Register-Register-Register (32)
 void Assembler::ark(Register r1, Register r2, Register r3) {
   rrf1_form(ARK, r1, r2, r3);
@@ -1907,24 +1438,6 @@
 // -----------------------
 // 64-bit Add Instructions
 // -----------------------
-// Add Register-Storage (64)
-void Assembler::ag(Register r1, const MemOperand& opnd) {
-  rxy_form(AG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Register-Storage (64<-32)
-void Assembler::agf(Register r1, const MemOperand& opnd) {
-  rxy_form(AGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Immediate (64)
-void Assembler::agfi(Register r1, const Operand& opnd) {
-  ril_form(AGFI, r1, opnd);
-}
-
-// Add Register-Register (64<-32)
-void Assembler::agfr(Register r1, Register r2) { rre_form(AGFR, r1, r2); }
-
 // Add Halfword Immediate (64)
 void Assembler::aghi(Register r1, const Operand& i2) { ri_form(AGHI, r1, i2); }
 
@@ -1933,9 +1446,6 @@
   rie_form(AGHIK, r1, r3, i2);
 }
 
-// Add Register (64)
-void Assembler::agr(Register r1, Register r2) { rre_form(AGR, r1, r2); }
-
 // Add Register-Register-Register (64)
 void Assembler::agrk(Register r1, Register r2, Register r3) {
   rrf1_form(AGRK, r1, r2, r3);
@@ -1951,27 +1461,6 @@
 // -------------------------------
 // 32-bit Add Logical Instructions
 // -------------------------------
-// Add Logical Register-Storage (32)
-void Assembler::al_z(Register r1, const MemOperand& opnd) {
-  rx_form(AL, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Logical Register-Storage (32)
-void Assembler::aly(Register r1, const MemOperand& opnd) {
-  rxy_form(ALY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Logical Immediate (32)
-void Assembler::alfi(Register r1, const Operand& opnd) {
-  ril_form(ALFI, r1, opnd);
-}
-
-// Add Logical Register-Register (32)
-void Assembler::alr(Register r1, Register r2) { rr_form(ALR, r1, r2); }
-
-// Add Logical With Carry Register-Register (32)
-void Assembler::alcr(Register r1, Register r2) { rre_form(ALCR, r1, r2); }
-
 // Add Logical Register-Register-Register (32)
 void Assembler::alrk(Register r1, Register r2, Register r3) {
   rrf1_form(ALRK, r1, r2, r3);
@@ -1980,19 +1469,6 @@
 // -------------------------------
 // 64-bit Add Logical Instructions
 // -------------------------------
-// Add Logical Register-Storage (64)
-void Assembler::alg(Register r1, const MemOperand& opnd) {
-  rxy_form(ALG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Add Logical Immediate (64)
-void Assembler::algfi(Register r1, const Operand& opnd) {
-  ril_form(ALGFI, r1, opnd);
-}
-
-// Add Logical Register-Register (64)
-void Assembler::algr(Register r1, Register r2) { rre_form(ALGR, r1, r2); }
-
 // Add Logical Register-Register-Register (64)
 void Assembler::algrk(Register r1, Register r2, Register r3) {
   rrf1_form(ALGRK, r1, r2, r3);
@@ -2001,29 +1477,6 @@
 // ----------------------------
 // 32-bit Subtract Instructions
 // ----------------------------
-// Subtract Register-Storage (32)
-void Assembler::s(Register r1, const MemOperand& opnd) {
-  rx_form(S, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Register-Storage (32)
-void Assembler::sy(Register r1, const MemOperand& opnd) {
-  rxy_form(SY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Halfword Register-Storage (32)
-void Assembler::sh(Register r1, const MemOperand& opnd) {
-  rx_form(SH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Halfword Register-Storage (32)
-void Assembler::shy(Register r1, const MemOperand& opnd) {
-  rxy_form(SHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Register (32)
-void Assembler::sr(Register r1, Register r2) { rr_form(SR, r1, r2); }
-
 // Subtract Register-Register-Register (32)
 void Assembler::srk(Register r1, Register r2, Register r3) {
   rrf1_form(SRK, r1, r2, r3);
@@ -2032,22 +1485,6 @@
 // ----------------------------
 // 64-bit Subtract Instructions
 // ----------------------------
-// Subtract Register-Storage (64)
-void Assembler::sg(Register r1, const MemOperand& opnd) {
-  rxy_form(SG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Register-Storage (64<-32)
-void Assembler::sgf(Register r1, const MemOperand& opnd) {
-  rxy_form(SGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Register (64)
-void Assembler::sgr(Register r1, Register r2) { rre_form(SGR, r1, r2); }
-
-// Subtract Register (64<-32)
-void Assembler::sgfr(Register r1, Register r2) { rre_form(SGFR, r1, r2); }
-
 // Subtract Register-Register-Register (64)
 void Assembler::sgrk(Register r1, Register r2, Register r3) {
   rrf1_form(SGRK, r1, r2, r3);
@@ -2056,22 +1493,6 @@
 // ------------------------------------
 // 32-bit Subtract Logical Instructions
 // ------------------------------------
-// Subtract Logical Register-Storage (32)
-void Assembler::sl(Register r1, const MemOperand& opnd) {
-  rx_form(SL, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Logical Register-Storage (32)
-void Assembler::sly(Register r1, const MemOperand& opnd) {
-  rxy_form(SLY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Logical Register-Register (32)
-void Assembler::slr(Register r1, Register r2) { rr_form(SLR, r1, r2); }
-
-// Subtract Logical With Borrow Register-Register (32)
-void Assembler::slbr(Register r1, Register r2) { rre_form(SLBR, r1, r2); }
-
 // Subtract Logical Register-Register-Register (32)
 void Assembler::slrk(Register r1, Register r2, Register r3) {
   rrf1_form(SLRK, r1, r2, r3);
@@ -2080,14 +1501,6 @@
 // ------------------------------------
 // 64-bit Subtract Logical Instructions
 // ------------------------------------
-// Subtract Logical Register-Storage (64)
-void Assembler::slg(Register r1, const MemOperand& opnd) {
-  rxy_form(SLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Subtract Logical Register-Register (64)
-void Assembler::slgr(Register r1, Register r2) { rre_form(SLGR, r1, r2); }
-
 // Subtract Logical Register-Register-Register (64)
 void Assembler::slgrk(Register r1, Register r2, Register r3) {
   rrf1_form(SLGRK, r1, r2, r3);
@@ -2096,218 +1509,57 @@
 // ----------------------------
 // 32-bit Multiply Instructions
 // ----------------------------
-// Multiply Register-Storage (64<32)
-void Assembler::m(Register r1, const MemOperand& opnd) {
-  DCHECK(r1.code() % 2 == 0);
-  rx_form(M, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-void Assembler::mfy(Register r1, const MemOperand& opnd) {
-  DCHECK(r1.code() % 2 == 0);
-  rxy_form(MFY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Register (64<32)
-void Assembler::mr_z(Register r1, Register r2) {
-  DCHECK(r1.code() % 2 == 0);
-  rr_form(MR, r1, r2);
-}
-
-// Multiply Logical Register-Storage (64<32)
-void Assembler::ml(Register r1, const MemOperand& opnd) {
-  rxy_form(ML, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Logical Register (64<32)
-void Assembler::mlr(Register r1, Register r2) {
-  DCHECK(r1.code() % 2 == 0);
-  rre_form(MLR, r1, r2);
-}
-
-// Multiply Single Register-Storage (32)
-void Assembler::ms(Register r1, const MemOperand& opnd) {
-  rx_form(MS, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Single Register-Storage (32)
-void Assembler::msy(Register r1, const MemOperand& opnd) {
-  rxy_form(MSY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Single Immediate (32)
-void Assembler::msfi(Register r1, const Operand& opnd) {
-  ril_form(MSFI, r1, opnd);
-}
-
-// Multiply Single Register (64<32)
-void Assembler::msr(Register r1, Register r2) { rre_form(MSR, r1, r2); }
-
-// Multiply Halfword Register-Storage (32)
-void Assembler::mh(Register r1, const MemOperand& opnd) {
-  rx_form(MH, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Halfword Register-Storage (32)
-void Assembler::mhy(Register r1, const MemOperand& opnd) {
-  rxy_form(MHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
 // Multiply Halfword Immediate (32)
 void Assembler::mhi(Register r1, const Operand& opnd) {
   ri_form(MHI, r1, opnd);
 }
 
+// Multiply Single Register (32)
+void Assembler::msrkc(Register r1, Register r2, Register r3) {
+  rrf1_form(MSRKC, r1, r2, r3);
+}
+
+// Multiply Single Register (64)
+void Assembler::msgrkc(Register r1, Register r2, Register r3) {
+  rrf1_form(MSGRKC, r1, r2, r3);
+}
+
 // ----------------------------
 // 64-bit Multiply Instructions
 // ----------------------------
-// Multiply Logical Register-Storage (128<64)
-void Assembler::mlg(Register r1, const MemOperand& opnd) {
-  rxy_form(MLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Register (128<64)
-void Assembler::mlgr(Register r1, Register r2) { rre_form(MLGR, r1, r2); }
-
 // Multiply Halfword Immediate (64)
 void Assembler::mghi(Register r1, const Operand& opnd) {
   ri_form(MGHI, r1, opnd);
 }
 
-// Multiply Single Immediate (64)
-void Assembler::msgfi(Register r1, const Operand& opnd) {
-  ril_form(MSGFI, r1, opnd);
-}
-
-// Multiply Single Register-Storage (64)
-void Assembler::msg(Register r1, const MemOperand& opnd) {
-  rxy_form(MSG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Multiply Single Register-Register (64)
-void Assembler::msgr(Register r1, Register r2) { rre_form(MSGR, r1, r2); }
-
-// --------------------------
-// 32-bit Divide Instructions
-// --------------------------
-// Divide Register-Storage (32<-64)
-void Assembler::d(Register r1, const MemOperand& opnd) {
-  rx_form(D, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Divide Register (32<-64)
-void Assembler::dr(Register r1, Register r2) {
-  DCHECK(r1.code() % 2 == 0);
-  rr_form(DR, r1, r2);
-}
-
-// Divide Logical Register-Storage (32<-64)
-void Assembler::dl(Register r1, const MemOperand& opnd) {
-  rx_form(DL, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Divide Logical Register (32<-64)
-void Assembler::dlr(Register r1, Register r2) { rre_form(DLR, r1, r2); }
-
-// --------------------------
-// 64-bit Divide Instructions
-// --------------------------
-// Divide Logical Register (64<-128)
-void Assembler::dlgr(Register r1, Register r2) { rre_form(DLGR, r1, r2); }
-
-// Divide Single Register (64<-32)
-void Assembler::dsgr(Register r1, Register r2) { rre_form(DSGR, r1, r2); }
-
 // --------------------
 // Bitwise Instructions
 // --------------------
-// AND Register-Storage (32)
-void Assembler::n(Register r1, const MemOperand& opnd) {
-  rx_form(N, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// AND Register-Storage (32)
-void Assembler::ny(Register r1, const MemOperand& opnd) {
-  rxy_form(NY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// AND Register (32)
-void Assembler::nr(Register r1, Register r2) { rr_form(NR, r1, r2); }
-
 // AND Register-Register-Register (32)
 void Assembler::nrk(Register r1, Register r2, Register r3) {
   rrf1_form(NRK, r1, r2, r3);
 }
 
-// AND Register-Storage (64)
-void Assembler::ng(Register r1, const MemOperand& opnd) {
-  rxy_form(NG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// AND Register (64)
-void Assembler::ngr(Register r1, Register r2) { rre_form(NGR, r1, r2); }
-
 // AND Register-Register-Register (64)
 void Assembler::ngrk(Register r1, Register r2, Register r3) {
   rrf1_form(NGRK, r1, r2, r3);
 }
 
-// OR Register-Storage (32)
-void Assembler::o(Register r1, const MemOperand& opnd) {
-  rx_form(O, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// OR Register-Storage (32)
-void Assembler::oy(Register r1, const MemOperand& opnd) {
-  rxy_form(OY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// OR Register (32)
-void Assembler::or_z(Register r1, Register r2) { rr_form(OR, r1, r2); }
-
 // OR Register-Register-Register (32)
 void Assembler::ork(Register r1, Register r2, Register r3) {
   rrf1_form(ORK, r1, r2, r3);
 }
 
-// OR Register-Storage (64)
-void Assembler::og(Register r1, const MemOperand& opnd) {
-  rxy_form(OG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// OR Register (64)
-void Assembler::ogr(Register r1, Register r2) { rre_form(OGR, r1, r2); }
-
 // OR Register-Register-Register (64)
 void Assembler::ogrk(Register r1, Register r2, Register r3) {
   rrf1_form(OGRK, r1, r2, r3);
 }
 
-// XOR Register-Storage (32)
-void Assembler::x(Register r1, const MemOperand& opnd) {
-  rx_form(X, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// XOR Register-Storage (32)
-void Assembler::xy(Register r1, const MemOperand& opnd) {
-  rxy_form(XY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// XOR Register (32)
-void Assembler::xr(Register r1, Register r2) { rr_form(XR, r1, r2); }
-
 // XOR Register-Register-Register (32)
 void Assembler::xrk(Register r1, Register r2, Register r3) {
   rrf1_form(XRK, r1, r2, r3);
 }
 
-// XOR Register-Storage (64)
-void Assembler::xg(Register r1, const MemOperand& opnd) {
-  rxy_form(XG, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// XOR Register (64)
-void Assembler::xgr(Register r1, Register r2) { rre_form(XGR, r1, r2); }
-
 // XOR Register-Register-Register (64)
 void Assembler::xgrk(Register r1, Register r2, Register r3) {
   rrf1_form(XGRK, r1, r2, r3);
@@ -2320,19 +1572,6 @@
           opnd2.getBaseRegister(), opnd2.getDisplacement());
 }
 
-// -------------------------------------------
-// Bitwise GPR <-> FPR Conversion Instructions
-// -------------------------------------------
-// Load GR from FPR (64 <- L)
-void Assembler::lgdr(Register r1, DoubleRegister f2) {
-  rre_form(LGDR, r1, Register::from_code(f2.code()));
-}
-
-// Load FPR from FR (L <- 64)
-void Assembler::ldgr(DoubleRegister f1, Register r2) {
-  rre_form(LDGR, Register::from_code(f1.code()), r2);
-}
-
 void Assembler::EnsureSpaceFor(int space_needed) {
   if (buffer_space() <= (kGap + space_needed)) {
     GrowBuffer(space_needed);
@@ -2547,37 +1786,7 @@
   EnsureSpace ensure_space(this);
 
   int32_t target_index = emit_code_target(target, rmode);
-  brcl(cond, Operand(target_index), true);
-}
-
-// Store (32)
-void Assembler::st(Register src, const MemOperand& dst) {
-  rx_form(ST, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store (32)
-void Assembler::sty(Register src, const MemOperand& dst) {
-  rxy_form(STY, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store Halfword
-void Assembler::sth(Register src, const MemOperand& dst) {
-  rx_form(STH, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store Halfword
-void Assembler::sthy(Register src, const MemOperand& dst) {
-  rxy_form(STHY, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store Character
-void Assembler::stc(Register src, const MemOperand& dst) {
-  rx_form(STC, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Store Character
-void Assembler::stcy(Register src, const MemOperand& dst) {
-  rxy_form(STCY, src, dst.rx(), dst.rb(), dst.offset());
+  brcl(cond, Operand(target_index));
 }
 
 // 32-bit Load Multiple - short displacement (12-bits unsigned)
@@ -2605,32 +1814,6 @@
   sil_form(MVGHI, opnd1.getBaseRegister(), opnd1.getDisplacement(), i2);
 }
 
-// Store Register (64)
-void Assembler::stg(Register src, const MemOperand& dst) {
-  DCHECK(!(dst.rb().code() == 15 && dst.offset() < 0));
-  rxy_form(STG, src, dst.rx(), dst.rb(), dst.offset());
-}
-
-// Insert Character
-void Assembler::ic_z(Register r1, const MemOperand& opnd) {
-  rx_form(IC_z, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Insert Character
-void Assembler::icy(Register r1, const MemOperand& opnd) {
-  rxy_form(ICY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Insert Immediate (High)
-void Assembler::iihf(Register r1, const Operand& opnd) {
-  ril_form(IIHF, r1, opnd);
-}
-
-// Insert Immediate (low)
-void Assembler::iilf(Register r1, const Operand& opnd) {
-  ril_form(IILF, r1, opnd);
-}
-
 // Insert Immediate (high high)
 void Assembler::iihh(Register r1, const Operand& opnd) {
   ri_form(IIHH, r1, opnd);
@@ -2651,108 +1834,42 @@
   ri_form(IILL, r1, opnd);
 }
 
-// Load Immediate 32->64
-void Assembler::lgfi(Register r1, const Operand& opnd) {
-  ril_form(LGFI, r1, opnd);
-}
-
 // GPR <-> FPR Instructions
 
 // Floating point instructions
 //
-// Load zero Register (64)
-void Assembler::lzdr(DoubleRegister r1) {
-  rre_form(LZDR, Register::from_code(r1.code()), Register::from_code(0));
-}
-
-// Add Register-Register (LB)
-void Assembler::aebr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(AEBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
 // Add Register-Storage (LB)
 void Assembler::adb(DoubleRegister r1, const MemOperand& opnd) {
   rxe_form(ADB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
            opnd.offset());
 }
 
-// Add Register-Register (LB)
-void Assembler::adbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(ADBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Compare Register-Register (LB)
-void Assembler::cebr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(CEBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Compare Register-Storage (LB)
-void Assembler::cdb(DoubleRegister r1, const MemOperand& opnd) {
-  rx_form(CD, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
-          opnd.offset());
-}
-
-// Compare Register-Register (LB)
-void Assembler::cdbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(CDBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Divide Register-Register (LB)
-void Assembler::debr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(DEBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
 // Divide Register-Storage (LB)
 void Assembler::ddb(DoubleRegister r1, const MemOperand& opnd) {
   rxe_form(DDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
            opnd.offset());
 }
 
-// Divide Register-Register (LB)
-void Assembler::ddbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(DDBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Multiply Register-Register (LB)
-void Assembler::meebr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(MEEBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
 // Multiply Register-Storage (LB)
 void Assembler::mdb(DoubleRegister r1, const MemOperand& opnd) {
   rxe_form(MDB, Register::from_code(r1.code()), opnd.rb(), opnd.rx(),
            opnd.offset());
 }
 
-// Multiply Register-Register (LB)
-void Assembler::mdbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(MDBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Subtract Register-Register (LB)
-void Assembler::sebr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(SEBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
 // Subtract Register-Storage (LB)
 void Assembler::sdb(DoubleRegister r1, const MemOperand& opnd) {
   rxe_form(SDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
            opnd.offset());
 }
 
-// Subtract Register-Register (LB)
-void Assembler::sdbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(SDBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
+void Assembler::ceb(DoubleRegister r1, const MemOperand& opnd) {
+  rxe_form(CEB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+           opnd.offset());
+}
+
+void Assembler::cdb(DoubleRegister r1, const MemOperand& opnd) {
+  rxe_form(CDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
+           opnd.offset());
 }
 
 // Square Root (LB)
@@ -2761,115 +1878,6 @@
            opnd.offset());
 }
 
-// Square Root Register-Register (LB)
-void Assembler::sqebr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(SQEBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Square Root Register-Register (LB)
-void Assembler::sqdbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(SQDBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Load Rounded (double -> float)
-void Assembler::ledbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(LEDBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Load Lengthen (float -> double)
-void Assembler::ldebr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(LDEBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Load Complement Register-Register (LB)
-void Assembler::lcdbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(LCDBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Load Complement Register-Register (LB)
-void Assembler::lcebr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(LCEBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Load Positive Register-Register (LB)
-void Assembler::lpebr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(LPEBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Load Positive Register-Register (LB)
-void Assembler::lpdbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(LPDBR, Register::from_code(r1.code()),
-           Register::from_code(r2.code()));
-}
-
-// Store Double (64)
-void Assembler::std(DoubleRegister r1, const MemOperand& opnd) {
-  rx_form(STD, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Store Double (64)
-void Assembler::stdy(DoubleRegister r1, const MemOperand& opnd) {
-  DCHECK(!(opnd.rb().code() == 15 && opnd.offset() < 0));
-  rxy_form(STDY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Store Float (32)
-void Assembler::ste(DoubleRegister r1, const MemOperand& opnd) {
-  rx_form(STE, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Store Float (32)
-void Assembler::stey(DoubleRegister r1, const MemOperand& opnd) {
-  DCHECK(!(opnd.rb().code() == 15 && opnd.offset() < 0));
-  rxy_form(STEY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Double (64)
-void Assembler::ld(DoubleRegister r1, const MemOperand& opnd) {
-  DCHECK(is_uint12(opnd.offset()));
-  rx_form(LD, r1, opnd.rx(), opnd.rb(), opnd.offset() & 0xfff);
-}
-
-// Load Double (64)
-void Assembler::ldy(DoubleRegister r1, const MemOperand& opnd) {
-  DCHECK(is_int20(opnd.offset()));
-  rxy_form(LDY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Float (32)
-void Assembler::le_z(DoubleRegister r1, const MemOperand& opnd) {
-  DCHECK(is_uint12(opnd.offset()));
-  rx_form(LE, r1, opnd.rx(), opnd.rb(), opnd.offset() & 0xfff);
-}
-
-// Load Float (32)
-void Assembler::ley(DoubleRegister r1, const MemOperand& opnd) {
-  DCHECK(is_int20(opnd.offset()));
-  rxy_form(LEY, r1, opnd.rx(), opnd.rb(), opnd.offset());
-}
-
-// Load Double Register-Register (64)
-void Assembler::ldr(DoubleRegister r1, DoubleRegister r2) {
-  rr_form(LDR, r1, r2);
-}
-
-// Load And Test Register-Register (L)
-void Assembler::ltebr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(LTEBR, r1, r2);
-}
-
-// Load And Test Register-Register (L)
-void Assembler::ltdbr(DoubleRegister r1, DoubleRegister r2) {
-  rre_form(LTDBR, r1, r2);
-}
-
 // Convert to Fixed point (64<-S)
 void Assembler::cgebr(Condition m, Register r1, DoubleRegister r2) {
   rrfe_form(CGEBR, m, Condition(0), r1, Register::from_code(r2.code()));
@@ -2885,21 +1893,6 @@
   rrfe_form(CFDBR, m, Condition(0), r1, Register::from_code(r2.code()));
 }
 
-// Convert from Fixed point (L<-64)
-void Assembler::cegbr(DoubleRegister r1, Register r2) {
-  rre_form(CEGBR, Register::from_code(r1.code()), r2);
-}
-
-// Convert from Fixed point (L<-64)
-void Assembler::cdgbr(DoubleRegister r1, Register r2) {
-  rre_form(CDGBR, Register::from_code(r1.code()), r2);
-}
-
-// Convert from Fixed point (L<-32)
-void Assembler::cdfbr(DoubleRegister r1, Register r2) {
-  rre_form(CDFBR, Register::from_code(r1.code()), r2);
-}
-
 // Convert to Fixed Logical (64<-L)
 void Assembler::clgdbr(Condition m3, Condition m4, Register r1,
                        DoubleRegister r2) {
@@ -2988,20 +1981,6 @@
   rrf2_form(FIDBRA << 16 | m3 * B12 | d1.code() * B4 | d2.code());
 }
 
-// Multiply and Add - MADBR R1, R3, R2
-// R1 = R3 * R2 + R1
-void Assembler::madbr(DoubleRegister d1, DoubleRegister d3, DoubleRegister d2) {
-  rrd_form(MADBR, Register::from_code(d1.code()),
-           Register::from_code(d3.code()), Register::from_code(d2.code()));
-}
-
-// Multiply and Subtract - MSDBR R1, R3, R2
-// R1 = R3 * R2 - R1
-void Assembler::msdbr(DoubleRegister d1, DoubleRegister d3, DoubleRegister d2) {
-  rrd_form(MSDBR, Register::from_code(d1.code()),
-           Register::from_code(d3.code()), Register::from_code(d2.code()));
-}
-
 // end of S390instructions
 
 bool Assembler::IsNop(SixByteInstr instr, int type) {
@@ -3012,6 +1991,21 @@
   return ((instr & 0xffff) == 0x1800);  // lr r0,r0
 }
 
+// dummy instruction reserved for special use.
+void Assembler::dumy(int r1, int x2, int b2, int d2) {
+#if defined(USE_SIMULATOR)
+  int op = 0xE353;
+  uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+                  (static_cast<uint64_t>(r1) & 0xF) * B36 |
+                  (static_cast<uint64_t>(x2) & 0xF) * B32 |
+                  (static_cast<uint64_t>(b2) & 0xF) * B28 |
+                  (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+                  (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+                  (static_cast<uint64_t>(op & 0x00FF));
+  emit6bytes(code);
+#endif
+}
+
 void Assembler::GrowBuffer(int needed) {
   if (!own_buffer_) FATAL("external code buffer is too small");
 
diff --git a/src/s390/assembler-s390.h b/src/s390/assembler-s390.h
index 65f0126..24146df 100644
--- a/src/s390/assembler-s390.h
+++ b/src/s390/assembler-s390.h
@@ -187,6 +187,7 @@
 const Register cp = r13;              // JavaScript context pointer.
 
 static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
 
 // Double word FP register.
 struct DoubleRegister {
@@ -447,17 +448,10 @@
   INLINE(static void set_target_address_at(
       Isolate* isolate, Address pc, Address constant_pool, Address target,
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
-  INLINE(static Address target_address_at(Address pc, Code* code)) {
-    Address constant_pool = NULL;
-    return target_address_at(pc, constant_pool);
-  }
+  INLINE(static Address target_address_at(Address pc, Code* code));
   INLINE(static void set_target_address_at(
       Isolate* isolate, Address pc, Code* code, Address target,
-      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
-    Address constant_pool = NULL;
-    set_target_address_at(isolate, pc, constant_pool, target,
-                          icache_flush_mode);
-  }
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
 
   // Return the code target address at a call site from the return address
   // of that call in the instruction stream.
@@ -545,6 +539,227 @@
   // ---------------------------------------------------------------------------
   // Code generation
 
+  template <class T, int size, int lo, int hi>
+  inline T getfield(T value) {
+    DCHECK(lo < hi);
+    DCHECK(size > 0);
+    int mask = hi - lo;
+    int shift = size * 8 - hi;
+    uint32_t mask_value = (mask == 32) ? 0xffffffff : (1 << mask) - 1;
+    return (value & mask_value) << shift;
+  }
+
+  // Declare generic instruction formats by fields
+  inline void e_format(Opcode opcode) {
+    emit2bytes(getfield<uint16_t, 2, 0, 16>(opcode));
+  }
+
+  inline void i_format(Opcode opcode, int f1) {
+    emit2bytes(getfield<uint16_t, 2, 0, 8>(opcode) |
+               getfield<uint16_t, 2, 8, 16>(f1));
+  }
+
+  inline void ie_format(Opcode opcode, int f1, int f2) {
+    emit4bytes(getfield<uint32_t, 4, 0, 16>(opcode) |
+               getfield<uint32_t, 4, 24, 28>(f1) |
+               getfield<uint32_t, 4, 28, 32>(f2));
+  }
+  inline void mii_format(Opcode opcode, int f1, int f2, int f3) {
+    emit6bytes(
+        getfield<uint64_t, 6, 0, 8>(opcode) | getfield<uint64_t, 6, 8, 12>(f1) |
+        getfield<uint64_t, 6, 12, 24>(f2) | getfield<uint64_t, 6, 24, 48>(f3));
+  }
+
+  inline void ri_format(Opcode opcode, int f1, int f2) {
+    uint32_t op1 = opcode >> 4;
+    uint32_t op2 = opcode & 0xf;
+    emit4bytes(
+        getfield<uint32_t, 4, 0, 8>(op1) | getfield<uint32_t, 4, 8, 12>(f1) |
+        getfield<uint32_t, 4, 12, 16>(op2) | getfield<uint32_t, 4, 16, 32>(f2));
+  }
+
+  inline void rie_1_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+    uint32_t op1 = opcode >> 8;
+    uint32_t op2 = opcode & 0xff;
+    emit6bytes(
+        getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+        getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
+        getfield<uint64_t, 6, 32, 36>(f4) | getfield<uint64_t, 6, 40, 48>(op2));
+  }
+
+  inline void rie_2_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+    uint32_t op1 = opcode >> 8;
+    uint32_t op2 = opcode & 0xff;
+    emit6bytes(
+        getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+        getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
+        getfield<uint64_t, 6, 32, 40>(f4) | getfield<uint64_t, 6, 40, 48>(op2));
+  }
+
+  inline void rie_3_format(Opcode opcode, int f1, int f2, int f3, int f4,
+                           int f5) {
+    uint32_t op1 = opcode >> 8;
+    uint32_t op2 = opcode & 0xff;
+    emit6bytes(
+        getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+        getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 24>(f3) |
+        getfield<uint64_t, 6, 24, 32>(f4) | getfield<uint64_t, 6, 32, 40>(f5) |
+        getfield<uint64_t, 6, 40, 48>(op2));
+  }
+
+#define DECLARE_S390_RIL_AB_INSTRUCTIONS(name, op_name, op_value) \
+  template <class R1>                                             \
+  inline void name(R1 r1, const Operand& i2) {                    \
+    ril_format(op_name, r1.code(), i2.immediate());               \
+  }
+#define DECLARE_S390_RIL_C_INSTRUCTIONS(name, op_name, op_value) \
+  inline void name(Condition m1, const Operand& i2) {            \
+    ril_format(op_name, m1, i2.immediate());                     \
+  }
+
+  inline void ril_format(Opcode opcode, int f1, int f2) {
+    uint32_t op1 = opcode >> 4;
+    uint32_t op2 = opcode & 0xf;
+    emit6bytes(
+        getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+        getfield<uint64_t, 6, 12, 16>(op2) | getfield<uint64_t, 6, 16, 48>(f2));
+  }
+  S390_RIL_A_OPCODE_LIST(DECLARE_S390_RIL_AB_INSTRUCTIONS)
+  S390_RIL_B_OPCODE_LIST(DECLARE_S390_RIL_AB_INSTRUCTIONS)
+  S390_RIL_C_OPCODE_LIST(DECLARE_S390_RIL_C_INSTRUCTIONS)
+#undef DECLARE_S390_RIL_AB_INSTRUCTIONS
+#undef DECLARE_S390_RIL_C_INSTRUCTIONS
+
+  inline void ris_format(Opcode opcode, int f1, int f2, int f3, int f4,
+                         int f5) {
+    uint32_t op1 = opcode >> 8;
+    uint32_t op2 = opcode & 0xff;
+    emit6bytes(
+        getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+        getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 20>(f3) |
+        getfield<uint64_t, 6, 20, 32>(f4) | getfield<uint64_t, 6, 32, 40>(f5) |
+        getfield<uint64_t, 6, 40, 48>(op2));
+  }
+
+#define DECLARE_S390_RR_INSTRUCTIONS(name, op_name, op_value) \
+  inline void name(Register r1, Register r2) {                \
+    rr_format(op_name, r1.code(), r2.code());                 \
+  }                                                           \
+  inline void name(DoubleRegister r1, DoubleRegister r2) {    \
+    rr_format(op_name, r1.code(), r2.code());                 \
+  }                                                           \
+  inline void name(Condition m1, Register r2) {               \
+    rr_format(op_name, m1, r2.code());                        \
+  }
+
+  inline void rr_format(Opcode opcode, int f1, int f2) {
+    emit2bytes(getfield<uint16_t, 2, 0, 8>(opcode) |
+               getfield<uint16_t, 2, 8, 12>(f1) |
+               getfield<uint16_t, 2, 12, 16>(f2));
+  }
+  S390_RR_OPCODE_LIST(DECLARE_S390_RR_INSTRUCTIONS)
+#undef DECLARE_S390_RR_INSTRUCTIONS
+
+#define DECLARE_S390_RRD_INSTRUCTIONS(name, op_name, op_value) \
+  template <class R1, class R2, class R3>                      \
+  inline void name(R1 r1, R3 r3, R2 r2) {                      \
+    rrd_format(op_name, r1.code(), r3.code(), r2.code());      \
+  }
+  inline void rrd_format(Opcode opcode, int f1, int f2, int f3) {
+    emit4bytes(getfield<uint32_t, 4, 0, 16>(opcode) |
+               getfield<uint32_t, 4, 16, 20>(f1) |
+               getfield<uint32_t, 4, 24, 28>(f2) |
+               getfield<uint32_t, 4, 28, 32>(f3));
+  }
+  S390_RRD_OPCODE_LIST(DECLARE_S390_RRD_INSTRUCTIONS)
+#undef DECLARE_S390_RRD_INSTRUCTIONS
+
+#define DECLARE_S390_RRE_INSTRUCTIONS(name, op_name, op_value) \
+  template <class R1, class R2>                                \
+  inline void name(R1 r1, R2 r2) {                             \
+    rre_format(op_name, r1.code(), r2.code());                 \
+  }
+  inline void rre_format(Opcode opcode, int f1, int f2) {
+    emit4bytes(getfield<uint32_t, 4, 0, 16>(opcode) |
+               getfield<uint32_t, 4, 24, 28>(f1) |
+               getfield<uint32_t, 4, 28, 32>(f2));
+  }
+  S390_RRE_OPCODE_LIST(DECLARE_S390_RRE_INSTRUCTIONS)
+  // Special format
+  void lzdr(DoubleRegister r1) { rre_format(LZDR, r1.code(), 0); }
+#undef DECLARE_S390_RRE_INSTRUCTIONS
+
+  inline void rrf_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+    emit4bytes(
+        getfield<uint32_t, 4, 0, 16>(opcode) |
+        getfield<uint32_t, 4, 16, 20>(f1) | getfield<uint32_t, 4, 20, 24>(f2) |
+        getfield<uint32_t, 4, 24, 28>(f3) | getfield<uint32_t, 4, 28, 32>(f4));
+  }
+
+#define DECLARE_S390_RX_INSTRUCTIONS(name, op_name, op_value)        \
+  template <class R1>                                                \
+  inline void name(R1 r1, Register x2, Register b2, Disp d2) {       \
+    rx_format(op_name, r1.code(), x2.code(), b2.code(), d2);         \
+  }                                                                  \
+  template <class R1>                                                \
+  inline void name(R1 r1, const MemOperand& opnd) {                  \
+    name(r1, opnd.getIndexRegister(),                                \
+         opnd.getBaseRegister(), opnd.getDisplacement());            \
+  }
+
+  inline void rx_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+    DCHECK(is_uint8(opcode));
+    DCHECK(is_uint12(f4));
+    emit4bytes(getfield<uint32_t, 4, 0, 8>(opcode) |
+               getfield<uint32_t, 4, 8, 12>(f1) |
+               getfield<uint32_t, 4, 12, 16>(f2) |
+               getfield<uint32_t, 4, 16, 20>(f3) |
+               getfield<uint32_t, 4, 20, 32>(f4));
+  }
+  S390_RX_A_OPCODE_LIST(DECLARE_S390_RX_INSTRUCTIONS)
+
+  void bc(Condition cond, const MemOperand& opnd) {
+    bc(cond, opnd.getIndexRegister(),
+       opnd.getBaseRegister(), opnd.getDisplacement());
+  }
+  void bc(Condition cond, Register x2, Register b2, Disp d2) {
+    rx_format(BC, cond, x2.code(), b2.code(), d2);
+  }
+#undef DECLARE_S390_RX_INSTRUCTIONS
+
+#define DECLARE_S390_RXY_INSTRUCTIONS(name, op_name, op_value)       \
+  template <class R1, class R2>                                      \
+  inline void name(R1 r1, R2 r2, Register b2, Disp d2) {             \
+    rxy_format(op_name, r1.code(), r2.code(), b2.code(), d2);        \
+  }                                                                  \
+  template <class R1>                                                \
+  inline void name(R1 r1, const MemOperand& opnd) {                  \
+    name(r1, opnd.getIndexRegister(),                                \
+         opnd.getBaseRegister(), opnd.getDisplacement());            \
+  }
+
+  inline void rxy_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+    DCHECK(is_uint16(opcode));
+    DCHECK(is_int20(f4));
+    emit6bytes(getfield<uint64_t, 6, 0, 8>(opcode >> 8) |
+               getfield<uint64_t, 6, 8, 12>(f1) |
+               getfield<uint64_t, 6, 12, 16>(f2) |
+               getfield<uint64_t, 6, 16, 20>(f3) |
+               getfield<uint64_t, 6, 20, 32>(f4 & 0x0fff) |
+               getfield<uint64_t, 6, 32, 40>(f4 >> 12) |
+               getfield<uint64_t, 6, 40, 48>(opcode & 0x00ff));
+  }
+  S390_RXY_A_OPCODE_LIST(DECLARE_S390_RXY_INSTRUCTIONS)
+
+  void pfd(Condition cond, const MemOperand& opnd) {
+    pfd(cond, opnd.getIndexRegister(),
+        opnd.getBaseRegister(), opnd.getDisplacement());
+  }
+  void pfd(Condition cond, Register x2, Register b2, Disp d2) {
+    rxy_format(PFD, cond, x2.code(), b2.code(), d2);
+  }
+#undef DECLARE_S390_RXY_INSTRUCTIONS
+
   // Helper for unconditional branch to Label with update to save register
   void b(Register r, Label* l) {
     int32_t halfwords = branch_offset(l) / 2;
@@ -633,10 +848,6 @@
 
 #define RR2_FORM(name) void name(Condition m1, Register r2)
 
-#define RX_FORM(name)                                        \
-  void name(Register r1, Register x2, Register b2, Disp d2); \
-  void name(Register r1, const MemOperand& opnd)
-
 #define RI1_FORM(name) void name(Register r, const Operand& i)
 
 #define RI2_FORM(name) void name(Condition m, const Operand& i)
@@ -647,10 +858,6 @@
   void name(Register r1, Register r2, const Operand& i3, const Operand& i4, \
             const Operand& i5)
 
-#define RIL1_FORM(name) void name(Register r1, const Operand& i2)
-
-#define RIL2_FORM(name) void name(Condition m1, const Operand& i2)
-
 #define RXE_FORM(name)                            \
   void name(Register r1, const MemOperand& opnd); \
   void name(Register r1, Register b2, Register x2, Disp d2)
@@ -659,10 +866,6 @@
   void name(Register r1, Register r3, const MemOperand& opnd); \
   void name(Register r1, Register r3, Register b2, Register x2, Disp d2)
 
-#define RXY_FORM(name)                                       \
-  void name(Register r1, Register x2, Register b2, Disp d2); \
-  void name(Register r1, const MemOperand& opnd)
-
 #define RSI_FORM(name) void name(Register r1, Register r3, const Operand& i)
 
 #define RIS_FORM(name)                                       \
@@ -679,8 +882,6 @@
   void name(Register b1, Disp d1, const Operand& i2); \
   void name(const MemOperand& opnd, const Operand& i2)
 
-#define RRE_FORM(name) void name(Register r1, Register r2)
-
 #define RRF1_FORM(name) void name(Register r1, Register r2, Register r3)
 
 #define RRF2_FORM(name) void name(Condition m1, Register r1, Register r2)
@@ -712,8 +913,6 @@
   void name(Register r1, Condition m3, Register b2, Disp d2); \
   void name(Register r1, Condition m3, const MemOperand& opnd)
 
-#define RRD_FORM(name) void name(Register r1, Register r3, Register r2)
-
 #define RRS_FORM(name)                                                     \
   void name(Register r1, Register r2, Register b4, Disp d4, Condition m3); \
   void name(Register r1, Register r2, Condition m3, const MemOperand& opnd)
@@ -758,119 +957,87 @@
   void name(Register r3, Register b1, Disp d1, Register b2, Disp d2); \
   void name(Register r3, const MemOperand& opnd1, const MemOperand& opnd2)
 
+#define DECLARE_VRR_A_INSTRUCTIONS(name, opcode_name, opcode_value)           \
+  void name(DoubleRegister v1, DoubleRegister v2, Condition m5, Condition m4, \
+            Condition m3) {                                                   \
+    uint64_t code = (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 |    \
+                    (static_cast<uint64_t>(v1.code())) * B36 |                \
+                    (static_cast<uint64_t>(v2.code())) * B32 |                \
+                    (static_cast<uint64_t>(m5 & 0xF)) * B20 |                 \
+                    (static_cast<uint64_t>(m4 & 0xF)) * B16 |                 \
+                    (static_cast<uint64_t>(m3 & 0xF)) * B12 |                 \
+                    (static_cast<uint64_t>(opcode_value & 0x00FF));           \
+    emit6bytes(code);                                                         \
+  }
+  S390_VRR_A_OPCODE_LIST(DECLARE_VRR_A_INSTRUCTIONS)
+#undef DECLARE_VRR_A_INSTRUCTIONS
+
+#define DECLARE_VRR_C_INSTRUCTIONS(name, opcode_name, opcode_value)        \
+  void name(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3,       \
+            Condition m6, Condition m5, Condition m4) {                    \
+    uint64_t code = (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+                    (static_cast<uint64_t>(v1.code())) * B36 |             \
+                    (static_cast<uint64_t>(v2.code())) * B32 |             \
+                    (static_cast<uint64_t>(v3.code())) * B28 |             \
+                    (static_cast<uint64_t>(m6 & 0xF)) * B20 |              \
+                    (static_cast<uint64_t>(m5 & 0xF)) * B16 |              \
+                    (static_cast<uint64_t>(m4 & 0xF)) * B12 |              \
+                    (static_cast<uint64_t>(opcode_value & 0x00FF));        \
+    emit6bytes(code);                                                      \
+  }
+  S390_VRR_C_OPCODE_LIST(DECLARE_VRR_C_INSTRUCTIONS)
+#undef DECLARE_VRR_C_INSTRUCTIONS
+
+  // Single Element format
+  void vfa(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3) {
+    vfa(v1, v2, v3, static_cast<Condition>(0), static_cast<Condition>(8),
+        static_cast<Condition>(3));
+  }
+  void vfs(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3) {
+    vfs(v1, v2, v3, static_cast<Condition>(0), static_cast<Condition>(8),
+        static_cast<Condition>(3));
+  }
+  void vfm(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3) {
+    vfm(v1, v2, v3, static_cast<Condition>(0), static_cast<Condition>(8),
+        static_cast<Condition>(3));
+  }
+  void vfd(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3) {
+    vfd(v1, v2, v3, static_cast<Condition>(0), static_cast<Condition>(8),
+        static_cast<Condition>(3));
+  }
+
   // S390 instruction sets
-  RX_FORM(bc);
-  RR_FORM(bctr);
-  RX_FORM(cd);
-  RRE_FORM(cdr);
-  RXE_FORM(cdb);
-  RXE_FORM(ceb);
   RXE_FORM(ddb);
-  RRE_FORM(ddbr);
   SS1_FORM(ed);
-  RRE_FORM(epair);
-  RX_FORM(ex);
   RRF2_FORM(fidbr);
-  RRE_FORM(flogr);
-  RX_FORM(ic_z);
-  RXY_FORM(icy);
-  RIL1_FORM(iihf);
   RI1_FORM(iihh);
   RI1_FORM(iihl);
-  RIL1_FORM(iilf);
-  RIL1_FORM(lgfi);
   RI1_FORM(iilh);
   RI1_FORM(iill);
-  RRE_FORM(lcgr);
-  RR_FORM(lcr);
-  RX_FORM(le_z);
-  RXY_FORM(ley);
-  RIL1_FORM(llihf);
-  RIL1_FORM(llilf);
-  RRE_FORM(lngr);
-  RR_FORM(lnr);
   RSY1_FORM(loc);
-  RXY_FORM(lrv);
-  RRE_FORM(lrvr);
-  RRE_FORM(lrvgr);
-  RXY_FORM(lrvh);
-  RXY_FORM(lrvg);
   RXE_FORM(mdb);
-  RRE_FORM(mdbr);
   SS4_FORM(mvck);
   SSF_FORM(mvcos);
   SS4_FORM(mvcs);
   SS1_FORM(mvn);
   SS1_FORM(nc);
   SI_FORM(ni);
-  RIL1_FORM(nihf);
-  RIL1_FORM(nilf);
   RI1_FORM(nilh);
   RI1_FORM(nill);
-  RIL1_FORM(oihf);
-  RIL1_FORM(oilf);
   RI1_FORM(oill);
-  RRE_FORM(popcnt);
   RXE_FORM(sdb);
-  RRE_FORM(sdbr);
-  RIL1_FORM(slfi);
-  RXY_FORM(slgf);
-  RIL1_FORM(slgfi);
   RS1_FORM(srdl);
-  RX_FORM(ste);
-  RXY_FORM(stey);
-  RXY_FORM(strv);
-  RXY_FORM(strvh);
-  RXY_FORM(strvg);
   RI1_FORM(tmll);
   SS1_FORM(tr);
   S_FORM(ts);
-  RIL1_FORM(xihf);
-  RIL1_FORM(xilf);
 
   // Load Address Instructions
-  void la(Register r, const MemOperand& opnd);
-  void lay(Register r, const MemOperand& opnd);
-  void larl(Register r1, const Operand& opnd);
   void larl(Register r, Label* l);
 
   // Load Instructions
-  void lb(Register r, const MemOperand& src);
-  void lbr(Register r1, Register r2);
-  void lgb(Register r, const MemOperand& src);
-  void lgbr(Register r1, Register r2);
-  void lh(Register r, const MemOperand& src);
-  void lhy(Register r, const MemOperand& src);
-  void lhr(Register r1, Register r2);
-  void lgh(Register r, const MemOperand& src);
-  void lghr(Register r1, Register r2);
-  void l(Register r, const MemOperand& src);
-  void ly(Register r, const MemOperand& src);
-  void lr(Register r1, Register r2);
-  void lg(Register r, const MemOperand& src);
-  void lgr(Register r1, Register r2);
-  void lgf(Register r, const MemOperand& src);
-  void lgfr(Register r1, Register r2);
   void lhi(Register r, const Operand& imm);
   void lghi(Register r, const Operand& imm);
 
-  // Load And Test Instructions
-  void lt_z(Register r, const MemOperand& src);
-  void ltg(Register r, const MemOperand& src);
-  void ltr(Register r1, Register r2);
-  void ltgr(Register r1, Register r2);
-  void ltgfr(Register r1, Register r2);
-
-  // Load Logical Instructions
-  void llc(Register r, const MemOperand& src);
-  void llgc(Register r, const MemOperand& src);
-  void llgf(Register r, const MemOperand& src);
-  void llgfr(Register r1, Register r2);
-  void llh(Register r, const MemOperand& src);
-  void llgh(Register r, const MemOperand& src);
-  void llhr(Register r1, Register r2);
-  void llghr(Register r1, Register r2);
-
   // Load Multiple Instructions
   void lm(Register r1, Register r2, const MemOperand& src);
   void lmy(Register r1, Register r2, const MemOperand& src);
@@ -883,13 +1050,6 @@
   void locg(Condition m3, Register r1, const MemOperand& src);
 
   // Store Instructions
-  void st(Register r, const MemOperand& src);
-  void stc(Register r, const MemOperand& src);
-  void stcy(Register r, const MemOperand& src);
-  void stg(Register r, const MemOperand& src);
-  void sth(Register r, const MemOperand& src);
-  void sthy(Register r, const MemOperand& src);
-  void sty(Register r, const MemOperand& src);
 
   // Store Multiple Instructions
   void stm(Register r1, Register r2, const MemOperand& src);
@@ -897,26 +1057,10 @@
   void stmg(Register r1, Register r2, const MemOperand& src);
 
   // Compare Instructions
-  void c(Register r, const MemOperand& opnd);
-  void cy(Register r, const MemOperand& opnd);
-  void cr_z(Register r1, Register r2);
-  void cg(Register r, const MemOperand& opnd);
-  void cgr(Register r1, Register r2);
-  void ch(Register r, const MemOperand& opnd);
-  void chy(Register r, const MemOperand& opnd);
   void chi(Register r, const Operand& opnd);
   void cghi(Register r, const Operand& opnd);
-  void cfi(Register r, const Operand& opnd);
-  void cgfi(Register r, const Operand& opnd);
 
   // Compare Logical Instructions
-  void cl(Register r, const MemOperand& opnd);
-  void cly(Register r, const MemOperand& opnd);
-  void clr(Register r1, Register r2);
-  void clg(Register r, const MemOperand& opnd);
-  void clgr(Register r1, Register r2);
-  void clfi(Register r, const Operand& opnd);
-  void clgfi(Register r, const Operand& opnd);
   void cli(const MemOperand& mem, const Operand& imm);
   void cliy(const MemOperand& mem, const Operand& imm);
   void clc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
@@ -978,160 +1122,58 @@
   void mvc(const MemOperand& opnd1, const MemOperand& opnd2, uint32_t length);
 
   // Branch Instructions
-  void basr(Register r1, Register r2);
-  void bcr(Condition m, Register target);
-  void bct(Register r, const MemOperand& opnd);
-  void bctg(Register r, const MemOperand& opnd);
   void bras(Register r, const Operand& opnd);
-  void brasl(Register r, const Operand& opnd);
   void brc(Condition c, const Operand& opnd);
-  void brcl(Condition m, const Operand& opnd, bool isCodeTarget = false);
   void brct(Register r1, const Operand& opnd);
   void brctg(Register r1, const Operand& opnd);
 
   // 32-bit Add Instructions
-  void a(Register r1, const MemOperand& opnd);
-  void ay(Register r1, const MemOperand& opnd);
-  void afi(Register r1, const Operand& opnd);
-  void ah(Register r1, const MemOperand& opnd);
-  void ahy(Register r1, const MemOperand& opnd);
   void ahi(Register r1, const Operand& opnd);
   void ahik(Register r1, Register r3, const Operand& opnd);
-  void ar(Register r1, Register r2);
   void ark(Register r1, Register r2, Register r3);
   void asi(const MemOperand&, const Operand&);
 
   // 64-bit Add Instructions
-  void ag(Register r1, const MemOperand& opnd);
-  void agf(Register r1, const MemOperand& opnd);
-  void agfi(Register r1, const Operand& opnd);
-  void agfr(Register r1, Register r2);
   void aghi(Register r1, const Operand& opnd);
   void aghik(Register r1, Register r3, const Operand& opnd);
-  void agr(Register r1, Register r2);
   void agrk(Register r1, Register r2, Register r3);
   void agsi(const MemOperand&, const Operand&);
 
   // 32-bit Add Logical Instructions
-  void al_z(Register r1, const MemOperand& opnd);
-  void aly(Register r1, const MemOperand& opnd);
-  void alfi(Register r1, const Operand& opnd);
-  void alr(Register r1, Register r2);
-  void alcr(Register r1, Register r2);
   void alrk(Register r1, Register r2, Register r3);
 
   // 64-bit Add Logical Instructions
-  void alg(Register r1, const MemOperand& opnd);
-  void algfi(Register r1, const Operand& opnd);
-  void algr(Register r1, Register r2);
   void algrk(Register r1, Register r2, Register r3);
 
   // 32-bit Subtract Instructions
-  void s(Register r1, const MemOperand& opnd);
-  void sy(Register r1, const MemOperand& opnd);
-  void sh(Register r1, const MemOperand& opnd);
-  void shy(Register r1, const MemOperand& opnd);
-  void sr(Register r1, Register r2);
   void srk(Register r1, Register r2, Register r3);
 
   // 64-bit Subtract Instructions
-  void sg(Register r1, const MemOperand& opnd);
-  void sgf(Register r1, const MemOperand& opnd);
-  void sgr(Register r1, Register r2);
-  void sgfr(Register r1, Register r2);
   void sgrk(Register r1, Register r2, Register r3);
 
   // 32-bit Subtract Logical Instructions
-  void sl(Register r1, const MemOperand& opnd);
-  void sly(Register r1, const MemOperand& opnd);
-  void slr(Register r1, Register r2);
   void slrk(Register r1, Register r2, Register r3);
-  void slbr(Register r1, Register r2);
 
   // 64-bit Subtract Logical Instructions
-  void slg(Register r1, const MemOperand& opnd);
-  void slgr(Register r1, Register r2);
   void slgrk(Register r1, Register r2, Register r3);
 
   // 32-bit Multiply Instructions
-  void m(Register r1, const MemOperand& opnd);
-  void mfy(Register r1, const MemOperand& opnd);
-  void mr_z(Register r1, Register r2);
-  void ml(Register r1, const MemOperand& opnd);
-  void mlr(Register r1, Register r2);
-  void ms(Register r1, const MemOperand& opnd);
-  void msy(Register r1, const MemOperand& opnd);
-  void msfi(Register r1, const Operand& opnd);
-  void msr(Register r1, Register r2);
-  void mh(Register r1, const MemOperand& opnd);
-  void mhy(Register r1, const MemOperand& opnd);
   void mhi(Register r1, const Operand& opnd);
+  void msrkc(Register r1, Register r2, Register r3);
+  void msgrkc(Register r1, Register r2, Register r3);
 
   // 64-bit Multiply Instructions
-  void mlg(Register r1, const MemOperand& opnd);
-  void mlgr(Register r1, Register r2);
   void mghi(Register r1, const Operand& opnd);
-  void msgfi(Register r1, const Operand& opnd);
-  void msg(Register r1, const MemOperand& opnd);
-  void msgr(Register r1, Register r2);
-
-  // 32-bit Divide Instructions
-  void d(Register r1, const MemOperand& opnd);
-  void dr(Register r1, Register r2);
-  void dl(Register r1, const MemOperand& opnd);
-  void dlr(Register r1, Register r2);
-
-  // 64-bit Divide Instructions
-  void dlgr(Register r1, Register r2);
-  void dsgr(Register r1, Register r2);
 
   // Bitwise Instructions (AND / OR / XOR)
-  void n(Register r1, const MemOperand& opnd);
-  void ny(Register r1, const MemOperand& opnd);
-  void nr(Register r1, Register r2);
   void nrk(Register r1, Register r2, Register r3);
-  void ng(Register r1, const MemOperand& opnd);
-  void ngr(Register r1, Register r2);
   void ngrk(Register r1, Register r2, Register r3);
-  void o(Register r1, const MemOperand& opnd);
-  void oy(Register r1, const MemOperand& opnd);
-  void or_z(Register r1, Register r2);
   void ork(Register r1, Register r2, Register r3);
-  void og(Register r1, const MemOperand& opnd);
-  void ogr(Register r1, Register r2);
   void ogrk(Register r1, Register r2, Register r3);
-  void x(Register r1, const MemOperand& opnd);
-  void xy(Register r1, const MemOperand& opnd);
-  void xr(Register r1, Register r2);
   void xrk(Register r1, Register r2, Register r3);
-  void xg(Register r1, const MemOperand& opnd);
-  void xgr(Register r1, Register r2);
   void xgrk(Register r1, Register r2, Register r3);
   void xc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
 
-  // Bitwise GPR <-> FPR Conversion Instructions
-  void lgdr(Register r1, DoubleRegister f2);
-  void ldgr(DoubleRegister f1, Register r2);
-
-  // Floating Point Load / Store Instructions
-  void ld(DoubleRegister r1, const MemOperand& opnd);
-  void ldy(DoubleRegister r1, const MemOperand& opnd);
-  void le_z(DoubleRegister r1, const MemOperand& opnd);
-  void ley(DoubleRegister r1, const MemOperand& opnd);
-  void ldr(DoubleRegister r1, DoubleRegister r2);
-  void ltdbr(DoubleRegister r1, DoubleRegister r2);
-  void ltebr(DoubleRegister r1, DoubleRegister r2);
-  void std(DoubleRegister r1, const MemOperand& opnd);
-  void stdy(DoubleRegister r1, const MemOperand& opnd);
-  void ste(DoubleRegister r1, const MemOperand& opnd);
-  void stey(DoubleRegister r1, const MemOperand& opnd);
-
-  // Floating Point Load Rounded/Positive Instructions
-  void ledbr(DoubleRegister r1, DoubleRegister r2);
-  void ldebr(DoubleRegister r1, DoubleRegister r2);
-  void lpebr(DoubleRegister r1, DoubleRegister r2);
-  void lpdbr(DoubleRegister r1, DoubleRegister r2);
-
   // Floating <-> Fixed Point Conversion Instructions
   void cdlfbr(Condition m3, Condition m4, DoubleRegister fltReg,
               Register fixReg);
@@ -1150,40 +1192,21 @@
   void clgebr(Condition m3, Condition m4, Register fixReg,
               DoubleRegister fltReg);
   void cfdbr(Condition m, Register fixReg, DoubleRegister fltReg);
-  void cdfbr(DoubleRegister fltReg, Register fixReg);
   void cgebr(Condition m, Register fixReg, DoubleRegister fltReg);
   void cgdbr(Condition m, Register fixReg, DoubleRegister fltReg);
-  void cegbr(DoubleRegister fltReg, Register fixReg);
-  void cdgbr(DoubleRegister fltReg, Register fixReg);
   void cfebr(Condition m3, Register fixReg, DoubleRegister fltReg);
   void cefbr(Condition m3, DoubleRegister fltReg, Register fixReg);
 
   // Floating Point Compare Instructions
-  void cebr(DoubleRegister r1, DoubleRegister r2);
   void cdb(DoubleRegister r1, const MemOperand& opnd);
-  void cdbr(DoubleRegister r1, DoubleRegister r2);
+  void ceb(DoubleRegister r1, const MemOperand& opnd);
 
   // Floating Point Arithmetic Instructions
-  void aebr(DoubleRegister r1, DoubleRegister r2);
   void adb(DoubleRegister r1, const MemOperand& opnd);
-  void adbr(DoubleRegister r1, DoubleRegister r2);
-  void lzdr(DoubleRegister r1);
-  void sebr(DoubleRegister r1, DoubleRegister r2);
   void sdb(DoubleRegister r1, const MemOperand& opnd);
-  void sdbr(DoubleRegister r1, DoubleRegister r2);
-  void meebr(DoubleRegister r1, DoubleRegister r2);
   void mdb(DoubleRegister r1, const MemOperand& opnd);
-  void mdbr(DoubleRegister r1, DoubleRegister r2);
-  void debr(DoubleRegister r1, DoubleRegister r2);
   void ddb(DoubleRegister r1, const MemOperand& opnd);
-  void ddbr(DoubleRegister r1, DoubleRegister r2);
-  void madbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
-  void msdbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
-  void sqebr(DoubleRegister r1, DoubleRegister r2);
   void sqdb(DoubleRegister r1, const MemOperand& opnd);
-  void sqdbr(DoubleRegister r1, DoubleRegister r2);
-  void lcdbr(DoubleRegister r1, DoubleRegister r2);
-  void lcebr(DoubleRegister r1, DoubleRegister r2);
   void ldeb(DoubleRegister r1, const MemOperand& opnd);
 
   enum FIDBRA_MASK3 {
@@ -1224,6 +1247,8 @@
 
   void nop(int type = 0);  // 0 is the default non-marking type.
 
+  void dumy(int r1, int x2, int b2, int d2);
+
   // Check the code size generated from label to here.
   int SizeOfCodeGeneratedSince(Label* label) {
     return pc_offset() - label->pos();
@@ -1231,9 +1256,6 @@
 
   // Debugging
 
-  // Mark generator continuation.
-  void RecordGeneratorContinuation();
-
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
@@ -1367,16 +1389,8 @@
 
   // Helpers to emit binary encoding for various instruction formats.
 
-  inline void rr_form(Opcode op, Register r1, Register r2);
-  inline void rr_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
-  inline void rr_form(Opcode op, Condition m1, Register r2);
   inline void rr2_form(uint8_t op, Condition m1, Register r2);
 
-  inline void rx_form(Opcode op, Register r1, Register x2, Register b2,
-                      Disp d2);
-  inline void rx_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
-                      Disp d2);
-
   inline void ri_form(Opcode op, Register r1, const Operand& i2);
   inline void ri_form(Opcode op, Condition m1, const Operand& i2);
 
@@ -1384,17 +1398,9 @@
   inline void rie_f_form(Opcode op, Register r1, Register r2, const Operand& i3,
                          const Operand& i4, const Operand& i5);
 
-  inline void ril_form(Opcode op, Register r1, const Operand& i2);
-  inline void ril_form(Opcode op, Condition m1, const Operand& i2);
-
   inline void ris_form(Opcode op, Register r1, Condition m3, Register b4,
                        Disp d4, const Operand& i2);
 
-  inline void rrd_form(Opcode op, Register r1, Register r3, Register r2);
-
-  inline void rre_form(Opcode op, Register r1, Register r2);
-  inline void rre_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
-
   inline void rrf1_form(Opcode op, Register r1, Register r2, Register r3);
   inline void rrf1_form(uint32_t x);
   inline void rrf2_form(uint32_t x);
@@ -1424,13 +1430,6 @@
   inline void rxf_form(Opcode op, Register r1, Register r3, Register b2,
                        Register x2, Disp d2);
 
-  inline void rxy_form(Opcode op, Register r1, Register x2, Register b2,
-                       Disp d2);
-  inline void rxy_form(Opcode op, Register r1, Condition m3, Register b2,
-                       Disp d2);
-  inline void rxy_form(Opcode op, DoubleRegister r1, Register x2, Register b2,
-                       Disp d2);
-
   inline void s_form(Opcode op, Register b1, Disp d2);
 
   inline void si_form(Opcode op, const Operand& i2, Register b1, Disp d1);
diff --git a/src/s390/code-stubs-s390.cc b/src/s390/code-stubs-s390.cc
index 553d6d8..52a8db1 100644
--- a/src/s390/code-stubs-s390.cc
+++ b/src/s390/code-stubs-s390.cc
@@ -32,17 +32,6 @@
   __ TailCallRuntime(Runtime::kNewArray);
 }
 
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
-  descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
-  descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cond);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
@@ -217,9 +206,6 @@
     // Call runtime on identical symbols since we need to throw a TypeError.
     __ CmpP(r6, Operand(SYMBOL_TYPE));
     __ beq(slow);
-    // Call runtime on identical SIMD values since we must throw a TypeError.
-    __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
-    __ beq(slow);
   } else {
     __ CompareObjectType(r2, r6, r6, HEAP_NUMBER_TYPE);
     __ beq(&heap_number);
@@ -230,9 +216,6 @@
       // Call runtime on identical symbols since we need to throw a TypeError.
       __ CmpP(r6, Operand(SYMBOL_TYPE));
       __ beq(slow);
-      // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ CmpP(r6, Operand(SIMD128_VALUE_TYPE));
-      __ beq(slow);
       // Normally here we fall through to return_equal, but undefined is
       // special: (undefined == undefined) == true, but
       // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
@@ -647,8 +630,11 @@
   if (cc == eq) {
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ Push(lhs, rhs);
-      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+      __ Push(cp);
+      __ Call(strict() ? isolate()->builtins()->StrictEqual()
+                       : isolate()->builtins()->Equal(),
+              RelocInfo::CODE_TARGET);
+      __ Pop(cp);
     }
     // Turn true into 0 and false into some non-zero value.
     STATIC_ASSERT(EQUAL == 0);
@@ -847,7 +833,6 @@
   SaveFPRegsMode mode = kSaveFPRegs;
   CEntryStub(isolate, 1, mode).GetCode();
   StoreBufferOverflowStub(isolate, mode).GetCode();
-  isolate->set_fp_stubs_generated(true);
 }
 
 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -1095,9 +1080,9 @@
   // Push a bad frame pointer to fail if it is used.
   __ LoadImmP(r10, Operand(-1));
 
-  int marker = type();
-  __ LoadSmiLiteral(r9, Smi::FromInt(marker));
-  __ LoadSmiLiteral(r8, Smi::FromInt(marker));
+  StackFrame::Type marker = type();
+  __ Load(r9, Operand(StackFrame::TypeToMarker(marker)));
+  __ Load(r8, Operand(StackFrame::TypeToMarker(marker)));
   // Save copies of the top frame descriptor on the stack.
   __ mov(r7, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
   __ LoadP(r7, MemOperand(r7));
@@ -1115,11 +1100,11 @@
   __ LoadAndTestP(r8, MemOperand(r7));
   __ bne(&non_outermost_js, Label::kNear);
   __ StoreP(fp, MemOperand(r7));
-  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+  __ Load(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   Label cont;
   __ b(&cont, Label::kNear);
   __ bind(&non_outermost_js);
-  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+  __ Load(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
 
   __ bind(&cont);
   __ StoreP(ip, MemOperand(sp));  // frame-type
@@ -1186,7 +1171,7 @@
   // Check if the current stack frame is marked as the outermost JS frame.
   Label non_outermost_js_2;
   __ pop(r7);
-  __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
+  __ CmpP(r7, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
   __ bne(&non_outermost_js_2, Label::kNear);
   __ mov(r8, Operand::Zero());
   __ mov(r7, Operand(ExternalReference(js_entry_sp)));
@@ -1228,52 +1213,6 @@
   __ b(r14);
 }
 
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  // Ensure that the vector and slot registers won't be clobbered before
-  // calling the miss handler.
-  DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::VectorRegister(),
-                     LoadWithVectorDescriptor::SlotRegister()));
-
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
-                                                          r7, &miss);
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
-  // Return address is in lr.
-  Label miss;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register index = LoadDescriptor::NameRegister();
-  Register scratch = r7;
-  Register result = r2;
-  DCHECK(!scratch.is(receiver) && !scratch.is(index));
-  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
-         result.is(LoadWithVectorDescriptor::SlotRegister()));
-
-  // StringCharAtGenerator doesn't use the result register until it's passed
-  // the different miss possibilities. If it did, we would have a conflict
-  // when FLAG_vector_ics is true.
-  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          RECEIVER_IS_STRING);
-  char_at_generator.GenerateFast(masm);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
 // Just jump directly to runtime if native RegExp is not selected at compile
 // time or if regexp entry in generated code is turned off runtime switch or
@@ -1379,7 +1318,7 @@
   // (6) External string.  Make it, offset-wise, look like a sequential string.
   //     Go to (4).
   // (7) Short external string or not a string?  If yes, bail out to runtime.
-  // (8) Sliced string.  Replace subject with parent.  Go to (1).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
 
   Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
       not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
@@ -1391,7 +1330,7 @@
   // (1) Sequential string?  If yes, go to (4).
 
   STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
-                 kShortExternalStringMask) == 0x93);
+                 kShortExternalStringMask) == 0xa7);
   __ mov(r3, Operand(kIsNotStringMask | kStringRepresentationMask |
                      kShortExternalStringMask));
   __ AndP(r3, r2);
@@ -1401,6 +1340,7 @@
   // (2) Sequential or cons? If not, go to (5).
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   STATIC_ASSERT(kExternalStringTag < 0xffffu);
@@ -1429,9 +1369,9 @@
   __ ble(&runtime);
   __ SmiUntag(r3);
 
-  STATIC_ASSERT(4 == kOneByteStringTag);
+  STATIC_ASSERT(8 == kOneByteStringTag);
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  STATIC_ASSERT(kStringEncodingMask == 4);
+  STATIC_ASSERT(kStringEncodingMask == 8);
   __ ExtractBitMask(r5, r2, kStringEncodingMask, SetRC);
   __ beq(&encoding_type_UC16, Label::kNear);
   __ LoadP(code,
@@ -1688,12 +1628,19 @@
   __ AndP(r0, r3);
   __ bne(&runtime);
 
-  // (8) Sliced string.  Replace subject with parent.  Go to (4).
+  // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
+  Label thin_string;
+  __ CmpP(r3, Operand(kThinStringTag));
+  __ beq(&thin_string);
   // Load offset into ip and replace subject string with parent.
   __ LoadP(ip, FieldMemOperand(subject, SlicedString::kOffsetOffset));
   __ SmiUntag(ip);
   __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
   __ b(&check_underlying);  // Go to (4).
+
+  __ bind(&thin_string);
+  __ LoadP(subject, FieldMemOperand(subject, ThinString::kActualOffset));
+  __ b(&check_underlying);  // Go to (4).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -1726,9 +1673,9 @@
   // r5 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
 
-  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
-  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+  DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
   const int count_offset = FixedArray::kHeaderSize + kPointerSize;
@@ -1741,7 +1688,7 @@
   // A monomorphic cache hit or an already megamorphic state: invoke the
   // function without changing the state.
   // We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at
-  // this position in a symbol (see static asserts in type-feedback-vector.h).
+  // this position in a symbol (see static asserts in feedback-vector.h).
   Label check_allocation_site;
   Register feedback_map = r8;
   Register weak_value = r9;
@@ -1861,188 +1808,6 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
-// Note: feedback_vector and slot are clobbered after the call.
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
-                               Register slot, Register temp) {
-  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
-  __ SmiToPtrArrayOffset(temp, slot);
-  __ AddP(feedback_vector, feedback_vector, temp);
-  __ LoadP(slot, FieldMemOperand(feedback_vector, count_offset));
-  __ AddSmiLiteral(slot, slot, Smi::FromInt(1), temp);
-  __ StoreP(slot, FieldMemOperand(feedback_vector, count_offset), temp);
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
-  // r2 - number of arguments
-  // r3 - function
-  // r5 - slot id
-  // r4 - vector
-  // r6 - allocation site (loaded from vector[slot])
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7);
-  __ CmpP(r3, r7);
-  __ bne(miss);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, r4, r5, r1);
-
-  __ LoadRR(r4, r6);
-  __ LoadRR(r5, r3);
-  ArrayConstructorStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void CallICStub::Generate(MacroAssembler* masm) {
-  // r2 - number of arguments
-  // r3 - function
-  // r5 - slot id (Smi)
-  // r4 - vector
-  Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
-  // The checks. First, does r3 match the recorded monomorphic target?
-  __ SmiToPtrArrayOffset(r8, r5);
-  __ AddP(r8, r4, r8);
-  __ LoadP(r6, FieldMemOperand(r8, FixedArray::kHeaderSize));
-
-  // We don't know that we have a weak cell. We might have a private symbol
-  // or an AllocationSite, but the memory is safe to examine.
-  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
-  // FixedArray.
-  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
-  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
-  // computed, meaning that it can't appear to be a pointer. If the low bit is
-  // 0, then hash is computed, but the 0 bit prevents the field from appearing
-  // to be a pointer.
-  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
-  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
-                    WeakCell::kValueOffset &&
-                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
-  __ LoadP(r7, FieldMemOperand(r6, WeakCell::kValueOffset));
-  __ CmpP(r3, r7);
-  __ bne(&extra_checks_or_miss, Label::kNear);
-
-  // The compare above could have been a SMI/SMI comparison. Guard against this
-  // convincing us that we have a monomorphic JSFunction.
-  __ JumpIfSmi(r3, &extra_checks_or_miss);
-
-  __ bind(&call_function);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, r4, r5, r1);
-
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
-                                                    tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&extra_checks_or_miss);
-  Label uninitialized, miss, not_allocation_site;
-
-  __ CompareRoot(r6, Heap::kmegamorphic_symbolRootIndex);
-  __ beq(&call);
-
-  // Verify that r6 contains an AllocationSite
-  __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
-  __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex);
-  __ bne(&not_allocation_site);
-
-  // We have an allocation site.
-  HandleArrayCase(masm, &miss);
-
-  __ bind(&not_allocation_site);
-
-  // The following cases attempt to handle MISS cases without going to the
-  // runtime.
-  if (FLAG_trace_ic) {
-    __ b(&miss);
-  }
-
-  __ CompareRoot(r6, Heap::kuninitialized_symbolRootIndex);
-  __ beq(&uninitialized);
-
-  // We are going megamorphic. If the feedback is a JSFunction, it is fine
-  // to handle it here. More complex cases are dealt with in the runtime.
-  __ AssertNotSmi(r6);
-  __ CompareObjectType(r6, r7, r7, JS_FUNCTION_TYPE);
-  __ bne(&miss);
-  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
-  __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
-
-  __ bind(&call);
-
-  // Increment the call count for megamorphic function calls.
-  IncrementCallCount(masm, r4, r5, r1);
-
-  __ bind(&call_count_incremented);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&uninitialized);
-
-  // We are going monomorphic, provided we actually have a JSFunction.
-  __ JumpIfSmi(r3, &miss);
-
-  // Goto miss case if we do not have a function.
-  __ CompareObjectType(r3, r6, r6, JS_FUNCTION_TYPE);
-  __ bne(&miss);
-
-  // Make sure the function is not the Array() function, which requires special
-  // behavior on MISS.
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r6);
-  __ CmpP(r3, r6);
-  __ beq(&miss);
-
-  // Make sure the function belongs to the same native context.
-  __ LoadP(r6, FieldMemOperand(r3, JSFunction::kContextOffset));
-  __ LoadP(r6, ContextMemOperand(r6, Context::NATIVE_CONTEXT_INDEX));
-  __ LoadP(ip, NativeContextMemOperand());
-  __ CmpP(r6, ip);
-  __ bne(&miss);
-
-  // Store the function. Use a stub since we need a frame for allocation.
-  // r4 - vector
-  // r5 - slot
-  // r3 - function
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    CreateWeakCellStub create_stub(masm->isolate());
-    __ SmiTag(r2);
-    __ Push(r2, r4, r5, cp, r3);
-    __ CallStub(&create_stub);
-    __ Pop(r4, r5, cp, r3);
-    __ Pop(r2);
-    __ SmiUntag(r2);
-  }
-
-  __ b(&call_function);
-
-  // We are here because tracing is on or we encountered a MISS case we can't
-  // handle here.
-  __ bind(&miss);
-  GenerateMiss(masm);
-
-  __ b(&call_count_incremented);
-}
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
-  FrameScope scope(masm, StackFrame::INTERNAL);
-
-  // Preserve the number of arguments as Smi.
-  __ SmiTag(r2);
-
-  // Push the receiver and the function and feedback info.
-  __ Push(r2, r3, r4, r5);
-
-  // Call the entry.
-  __ CallRuntime(Runtime::kCallIC_Miss);
-
-  // Move result to r3 and exit the internal frame.
-  __ LoadRR(r3, r2);
-
-  // Restore number of arguments.
-  __ Pop(r2);
-  __ SmiUntag(r2);
-}
-
 // StringCharCodeAtGenerator
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   // If the receiver is a smi trigger the non-string case.
@@ -2128,82 +1893,6 @@
   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
 }
 
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
-  __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU));
-  __ OrP(r0, r0, Operand(kSmiTagMask));
-  __ AndP(r0, code_, r0);
-  __ bne(&slow_case_);
-
-  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged one-byte char code.
-  __ LoadRR(r0, code_);
-  __ SmiToPtrArrayOffset(code_, code_);
-  __ AddP(result_, code_);
-  __ LoadRR(code_, r0);
-  __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
-  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
-  __ beq(&slow_case_);
-  __ bind(&exit_);
-}
-
-void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
-  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
-  __ bind(&slow_case_);
-  call_helper.BeforeCall(masm);
-  __ push(code_);
-  __ CallRuntime(Runtime::kStringCharFromCode);
-  __ Move(result_, r2);
-  call_helper.AfterCall(masm);
-  __ b(&exit_);
-
-  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-enum CopyCharactersFlags { COPY_ASCII = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
-                                          Register src, Register count,
-                                          Register scratch,
-                                          String::Encoding encoding) {
-  if (FLAG_debug_code) {
-    // Check that destination is word aligned.
-    __ mov(r0, Operand(kPointerAlignmentMask));
-    __ AndP(r0, dest);
-    __ Check(eq, kDestinationOfCopyNotAligned, cr0);
-  }
-
-  // Nothing to do for zero characters.
-  Label done;
-  if (encoding == String::TWO_BYTE_ENCODING) {
-    // double the length
-    __ AddP(count, count, count);
-    __ beq(&done, Label::kNear);
-  } else {
-    __ CmpP(count, Operand::Zero());
-    __ beq(&done, Label::kNear);
-  }
-
-  // Copy count bytes from src to dst.
-  Label byte_loop;
-  // TODO(joransiu): Convert into MVC loop
-  __ bind(&byte_loop);
-  __ LoadlB(scratch, MemOperand(src));
-  __ la(src, MemOperand(src, 1));
-  __ stc(scratch, MemOperand(dest));
-  __ la(dest, MemOperand(dest, 1));
-  __ BranchOnCount(count, &byte_loop);
-
-  __ bind(&done);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -2792,83 +2481,6 @@
   __ bne(miss);
 }
 
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(
-    MacroAssembler* masm, Label* miss, Label* done, Register elements,
-    Register name, Register scratch1, Register scratch2) {
-  DCHECK(!elements.is(scratch1));
-  DCHECK(!elements.is(scratch2));
-  DCHECK(!name.is(scratch1));
-  DCHECK(!name.is(scratch2));
-
-  __ AssertName(name);
-
-  // Compute the capacity mask.
-  __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
-  __ SmiUntag(scratch1);  // convert smi to int
-  __ SubP(scratch1, Operand(1));
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ LoadlW(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
-    if (i > 0) {
-      // Add the probe offset (i + i * i) left shifted to avoid right shifting
-      // the hash in a separate instruction. The value hash + i + i * i is right
-      // shifted in the following and instruction.
-      DCHECK(NameDictionary::GetProbeOffset(i) <
-             1 << (32 - Name::kHashFieldOffset));
-      __ AddP(scratch2,
-              Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
-    }
-    __ srl(scratch2, Operand(String::kHashShift));
-    __ AndP(scratch2, scratch1);
-
-    // Scale the index by multiplying by the entry size.
-    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    // scratch2 = scratch2 * 3.
-    __ ShiftLeftP(ip, scratch2, Operand(1));
-    __ AddP(scratch2, ip);
-
-    // Check if the key is identical to the name.
-    __ ShiftLeftP(ip, scratch2, Operand(kPointerSizeLog2));
-    __ AddP(scratch2, elements, ip);
-    __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
-    __ CmpP(name, ip);
-    __ beq(done);
-  }
-
-  const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
-                          r4.bit() | r3.bit() | r2.bit()) &
-                         ~(scratch1.bit() | scratch2.bit());
-
-  __ LoadRR(r0, r14);
-  __ MultiPush(spill_mask);
-  if (name.is(r2)) {
-    DCHECK(!elements.is(r3));
-    __ LoadRR(r3, name);
-    __ LoadRR(r2, elements);
-  } else {
-    __ LoadRR(r2, elements);
-    __ LoadRR(r3, name);
-  }
-  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
-  __ CallStub(&stub);
-  __ LoadRR(r1, r2);
-  __ LoadRR(scratch2, r4);
-  __ MultiPop(spill_mask);
-  __ LoadRR(r14, r0);
-
-  __ CmpP(r1, Operand::Zero());
-  __ bne(done);
-  __ beq(miss);
-}
-
 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   // we cannot call anything that could cause a GC from this stub.
@@ -3139,246 +2751,6 @@
   __ Ret();
 }
 
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(r4);
-  CallICStub stub(isolate(), state());
-  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
-                             Register receiver_map, Register scratch1,
-                             Register scratch2, bool is_polymorphic,
-                             Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-
-  Register cached_map = scratch1;
-
-  __ LoadP(cached_map,
-           FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-  __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ CmpP(receiver_map, cached_map);
-  __ bne(&start_polymorphic, Label::kNear);
-  // found, now call handler.
-  Register handler = feedback;
-  __ LoadP(handler,
-           FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-
-  Register length = scratch2;
-  __ bind(&start_polymorphic);
-  __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-  if (!is_polymorphic) {
-    // If the IC could be monomorphic we have to make sure we don't go past the
-    // end of the feedback array.
-    __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
-    __ beq(miss);
-  }
-
-  Register too_far = length;
-  Register pointer_reg = feedback;
-
-  // +-----+------+------+-----+-----+ ... ----+
-  // | map | len  | wm0  | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ... ----+
-  //                 0      1     2        len-1
-  //                              ^              ^
-  //                              |              |
-  //                         pointer_reg      too_far
-  //                         aka feedback     scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ SmiToPtrArrayOffset(r0, length);
-  __ AddP(too_far, feedback, r0);
-  __ AddP(too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ AddP(pointer_reg, feedback,
-          Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ LoadP(cached_map, MemOperand(pointer_reg));
-  __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ CmpP(receiver_map, cached_map);
-  __ bne(&prepare_next, Label::kNear);
-  __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
-  __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-
-  __ bind(&prepare_next);
-  __ AddP(pointer_reg, Operand(kPointerSize * 2));
-  __ CmpP(pointer_reg, too_far);
-  __ blt(&next_loop, Label::kNear);
-
-  // We exhausted our array of map handler pairs.
-  __ b(miss);
-}
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
-                                  Register receiver_map, Register feedback,
-                                  Register vector, Register slot,
-                                  Register scratch, Label* compare_map,
-                                  Label* load_smi_map, Label* try_array) {
-  __ JumpIfSmi(receiver, load_smi_map);
-  __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bind(compare_map);
-  Register cached_map = scratch;
-  // Move the weak map into the weak_cell register.
-  __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
-  __ CmpP(cached_map, receiver_map);
-  __ bne(try_array);
-  Register handler = feedback;
-  __ SmiToPtrArrayOffset(r1, slot);
-  __ LoadP(handler,
-           FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
-  __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  KeyedStoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
-                                       Register receiver_map, Register scratch1,
-                                       Register scratch2, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-  Label transition_call;
-
-  Register cached_map = scratch1;
-  Register too_far = scratch2;
-  Register pointer_reg = feedback;
-  __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
-  // +-----+------+------+-----+-----+-----+ ... ----+
-  // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
-  // +-----+------+------+-----+-----+ ----+ ... ----+
-  //                 0      1     2              len-1
-  //                 ^                                 ^
-  //                 |                                 |
-  //             pointer_reg                        too_far
-  //             aka feedback                       scratch2
-  // also need receiver_map
-  // use cached_map (scratch1) to look in the weak map values.
-  __ SmiToPtrArrayOffset(r0, too_far);
-  __ AddP(too_far, feedback, r0);
-  __ AddP(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ AddP(pointer_reg, feedback,
-          Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
-  __ bind(&next_loop);
-  __ LoadP(cached_map, MemOperand(pointer_reg));
-  __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
-  __ CmpP(receiver_map, cached_map);
-  __ bne(&prepare_next);
-  // Is it a transitioning store?
-  __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
-  __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
-  __ bne(&transition_call);
-  __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
-  __ AddP(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-
-  __ bind(&transition_call);
-  __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
-  __ JumpIfSmi(too_far, miss);
-
-  __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
-  // Load the map into the correct register.
-  DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
-  __ LoadRR(feedback, too_far);
-
-  __ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(ip);
-
-  __ bind(&prepare_next);
-  __ AddP(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
-  __ CmpLogicalP(pointer_reg, too_far);
-  __ blt(&next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ b(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // r3
-  Register key = StoreWithVectorDescriptor::NameRegister();           // r4
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // r5
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // r6
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r2));          // r2
-  Register feedback = r7;
-  Register receiver_map = r8;
-  Register scratch1 = r9;
-
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ AddP(feedback, vector, r0);
-  __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
-                        scratch1, &compare_map, &load_smi_map, &try_array);
-
-  __ bind(&try_array);
-  // Is it a fixed array?
-  __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
-  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
-  __ bne(&not_array);
-
-  // We have a polymorphic element handler.
-  Label polymorphic, try_poly_name;
-  __ bind(&polymorphic);
-
-  Register scratch2 = ip;
-
-  HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
-                             &miss);
-
-  __ bind(&not_array);
-  // Is it generic?
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ bne(&try_poly_name);
-  Handle<Code> megamorphic_stub =
-      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ CmpP(key, feedback);
-  __ bne(&miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ SmiToPtrArrayOffset(r0, slot);
-  __ AddP(feedback, vector, r0);
-  __ LoadP(feedback,
-           FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
-                   &miss);
-
-  __ bind(&miss);
-  KeyedStoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ b(&compare_map);
-}
-
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     PredictableCodeSizeScope predictable(masm,
@@ -3754,654 +3126,6 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r3 : target
-  //  -- r5 : new target
-  //  -- cp : context
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r3);
-  __ AssertReceiver(r5);
-
-  // Verify that the new target is a JSFunction.
-  Label new_object;
-  __ CompareObjectType(r5, r4, r4, JS_FUNCTION_TYPE);
-  __ bne(&new_object);
-
-  // Load the initial map and verify that it's in fact a map.
-  __ LoadP(r4, FieldMemOperand(r5, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(r4, &new_object);
-  __ CompareObjectType(r4, r2, r2, MAP_TYPE);
-  __ bne(&new_object);
-
-  // Fall back to runtime if the target differs from the new target's
-  // initial map constructor.
-  __ LoadP(r2, FieldMemOperand(r4, Map::kConstructorOrBackPointerOffset));
-  __ CmpP(r2, r3);
-  __ bne(&new_object);
-
-  // Allocate the JSObject on the heap.
-  Label allocate, done_allocate;
-  __ LoadlB(r6, FieldMemOperand(r4, Map::kInstanceSizeOffset));
-  __ Allocate(r6, r2, r7, r8, &allocate, SIZE_IN_WORDS);
-  __ bind(&done_allocate);
-
-  // Initialize the JSObject fields.
-  __ StoreP(r4, FieldMemOperand(r2, JSObject::kMapOffset));
-  __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r5, FieldMemOperand(r2, JSObject::kPropertiesOffset));
-  __ StoreP(r5, FieldMemOperand(r2, JSObject::kElementsOffset));
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ AddP(r3, r2, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
-  // ----------- S t a t e -------------
-  //  -- r2 : result (tagged)
-  //  -- r3 : result fields (untagged)
-  //  -- r7 : result end (untagged)
-  //  -- r4 : initial map
-  //  -- cp : context
-  //  -- lr : return address
-  // -----------------------------------
-
-  // Perform in-object slack tracking if requested.
-  Label slack_tracking;
-  STATIC_ASSERT(Map::kNoSlackTracking == 0);
-  __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
-  __ LoadlW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
-  __ DecodeField<Map::ConstructionCounter>(r9, r5);
-  __ LoadAndTestP(r9, r9);
-  __ bne(&slack_tracking);
-  {
-    // Initialize all in-object fields with undefined.
-    __ InitializeFieldsWithFiller(r3, r7, r8);
-
-    __ Ret();
-  }
-  __ bind(&slack_tracking);
-  {
-    // Decrease generous allocation count.
-    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-    __ Add32(r5, r5, Operand(-(1 << Map::ConstructionCounter::kShift)));
-    __ StoreW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
-
-    // Initialize the in-object fields with undefined.
-    __ LoadlB(r6, FieldMemOperand(r4, Map::kUnusedPropertyFieldsOffset));
-    __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
-    __ SubP(r6, r7, r6);
-    __ InitializeFieldsWithFiller(r3, r6, r8);
-
-    // Initialize the remaining (reserved) fields with one pointer filler map.
-    __ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex);
-    __ InitializeFieldsWithFiller(r3, r7, r8);
-
-    // Check if we can finalize the instance size.
-    __ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd));
-    __ Ret(ne);
-
-    // Finalize the instance size.
-    {
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ Push(r2, r4);
-      __ CallRuntime(Runtime::kFinalizeInstanceSize);
-      __ Pop(r2);
-    }
-    __ Ret();
-  }
-
-  // Fall back to %AllocateInNewSpace.
-  __ bind(&allocate);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    STATIC_ASSERT(kSmiTag == 0);
-    __ ShiftLeftP(r6, r6,
-                  Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
-    __ Push(r4, r6);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(r4);
-  }
-  __ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset));
-  __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
-  __ AddP(r7, r2, r7);
-  __ SubP(r7, r7, Operand(kHeapObjectTag));
-  __ b(&done_allocate);
-
-  // Fall back to %NewObject.
-  __ bind(&new_object);
-  __ Push(r3, r5);
-  __ TailCallRuntime(Runtime::kNewObject);
-}
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r3 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r3);
-
-  // Make r4 point to the JavaScript frame.
-  __ LoadRR(r4, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
-    __ CmpP(ip, r3);
-    __ b(&ok, Label::kNear);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have rest parameters (only possible if we have an
-  // arguments adaptor frame below the function frame).
-  Label no_rest_parameters;
-  __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ bne(&no_rest_parameters);
-
-  // Check if the arguments adaptor frame contains more arguments than
-  // specified by the function's internal formal parameter count.
-  Label rest_parameters;
-  __ LoadP(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ LoadP(r5, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadW(
-      r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_S390X
-  __ SmiTag(r5);
-#endif
-  __ SubP(r2, r2, r5);
-  __ bgt(&rest_parameters);
-
-  // Return an empty rest parameter array.
-  __ bind(&no_rest_parameters);
-  {
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- lr : return address
-    // -----------------------------------
-
-    // Allocate an empty rest parameter array.
-    Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the rest parameter array in r0.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
-    __ StoreP(r3, FieldMemOperand(r2, JSArray::kMapOffset), r0);
-    __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
-    __ StoreP(r3, FieldMemOperand(r2, JSArray::kPropertiesOffset), r0);
-    __ StoreP(r3, FieldMemOperand(r2, JSArray::kElementsOffset), r0);
-    __ LoadImmP(r3, Operand::Zero());
-    __ StoreP(r3, FieldMemOperand(r2, JSArray::kLengthOffset), r0);
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace.
-    __ bind(&allocate);
-    {
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ Push(Smi::FromInt(JSArray::kSize));
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-    }
-    __ b(&done_allocate);
-  }
-
-  __ bind(&rest_parameters);
-  {
-    // Compute the pointer to the first rest parameter (skippping the receiver).
-    __ SmiToPtrArrayOffset(r8, r2);
-    __ AddP(r4, r4, r8);
-    __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
-    // ----------- S t a t e -------------
-    //  -- cp : context
-    //  -- r2 : number of rest parameters (tagged)
-    //  -- r3 : function
-    //  -- r4 : pointer just past first rest parameters
-    //  -- r8 : size of rest parameters
-    //  -- lr : return address
-    // -----------------------------------
-
-    // Allocate space for the rest parameter array plus the backing store.
-    Label allocate, done_allocate;
-    __ mov(r9, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ AddP(r9, r9, r8);
-    __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the elements array in r5.
-    __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
-    __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
-    __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
-    __ AddP(r6, r5,
-            Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-    {
-      Label loop;
-      __ SmiUntag(r1, r2);
-      // __ mtctr(r0);
-      __ bind(&loop);
-      __ lay(r4, MemOperand(r4, -kPointerSize));
-      __ LoadP(ip, MemOperand(r4));
-      __ la(r6, MemOperand(r6, kPointerSize));
-      __ StoreP(ip, MemOperand(r6));
-      // __ bdnz(&loop);
-      __ BranchOnCount(r1, &loop);
-      __ AddP(r6, r6, Operand(kPointerSize));
-    }
-
-    // Setup the rest parameter array in r6.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3);
-    __ StoreP(r3, MemOperand(r6, JSArray::kMapOffset));
-    __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
-    __ StoreP(r3, MemOperand(r6, JSArray::kPropertiesOffset));
-    __ StoreP(r5, MemOperand(r6, JSArray::kElementsOffset));
-    __ StoreP(r2, MemOperand(r6, JSArray::kLengthOffset));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ AddP(r2, r6, Operand(kHeapObjectTag));
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace (if not too big).
-    Label too_big_for_new_space;
-    __ bind(&allocate);
-    __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
-    __ bgt(&too_big_for_new_space);
-    {
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(r9);
-      __ Push(r2, r4, r9);
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-      __ LoadRR(r5, r2);
-      __ Pop(r2, r4);
-    }
-    __ b(&done_allocate);
-
-    // Fall back to %NewRestParameter.
-    __ bind(&too_big_for_new_space);
-    __ push(r3);
-    __ TailCallRuntime(Runtime::kNewRestParameter);
-  }
-}
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r3 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r3);
-
-  // Make r9 point to the JavaScript frame.
-  __ LoadRR(r9, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ LoadP(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ LoadP(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
-    __ CmpP(ip, r3);
-    __ beq(&ok, Label::kNear);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadW(
-      r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_S390X
-  __ SmiTag(r4);
-#endif
-  __ SmiToPtrArrayOffset(r5, r4);
-  __ AddP(r5, r9, r5);
-  __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // r3 : function
-  // r4 : number of parameters (tagged)
-  // r5 : parameters pointer
-  // r9 : JavaScript frame pointer
-  // Registers used over whole function:
-  // r7 : arguments count (tagged)
-  // r8 : mapped parameter count (tagged)
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ LoadP(r6, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ beq(&adaptor_frame);
-
-  // No adaptor, parameter count = argument count.
-  __ LoadRR(r7, r4);
-  __ LoadRR(r8, r4);
-  __ b(&try_allocate);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ SmiToPtrArrayOffset(r5, r7);
-  __ AddP(r5, r5, r6);
-  __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // r7 = argument count (tagged)
-  // r8 = parameter count (tagged)
-  // Compute the mapped parameter count = min(r4, r7) in r8.
-  __ CmpP(r4, r7);
-  Label skip;
-  __ LoadRR(r8, r4);
-  __ blt(&skip);
-  __ LoadRR(r8, r7);
-  __ bind(&skip);
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  // If there are no mapped parameters, we do not need the parameter_map.
-  __ CmpSmiLiteral(r8, Smi::kZero, r0);
-  Label skip2, skip3;
-  __ bne(&skip2);
-  __ LoadImmP(r1, Operand::Zero());
-  __ b(&skip3);
-  __ bind(&skip2);
-  __ SmiToPtrArrayOffset(r1, r8);
-  __ AddP(r1, r1, Operand(kParameterMapHeaderSize));
-  __ bind(&skip3);
-
-  // 2. Backing store.
-  __ SmiToPtrArrayOffset(r6, r7);
-  __ AddP(r1, r1, r6);
-  __ AddP(r1, r1, Operand(FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ AddP(r1, r1, Operand(JSSloppyArgumentsObject::kSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(r1, r2, r1, r6, &runtime, NO_ALLOCATION_FLAGS);
-
-  // r2 = address of new object(s) (tagged)
-  // r4 = argument count (smi-tagged)
-  // Get the arguments boilerplate from the current native context into r3.
-  const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
-  const int kAliasedOffset =
-      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
-
-  __ LoadP(r6, NativeContextMemOperand());
-  __ CmpP(r8, Operand::Zero());
-  Label skip4, skip5;
-  __ bne(&skip4);
-  __ LoadP(r6, MemOperand(r6, kNormalOffset));
-  __ b(&skip5);
-  __ bind(&skip4);
-  __ LoadP(r6, MemOperand(r6, kAliasedOffset));
-  __ bind(&skip5);
-
-  // r2 = address of new object (tagged)
-  // r4 = argument count (smi-tagged)
-  // r6 = address of arguments map (tagged)
-  // r8 = mapped parameter count (tagged)
-  __ StoreP(r6, FieldMemOperand(r2, JSObject::kMapOffset), r0);
-  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r1, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
-  __ StoreP(r1, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
-
-  // Set up the callee in-object property.
-  __ AssertNotSmi(r3);
-  __ StoreP(r3, FieldMemOperand(r2, JSSloppyArgumentsObject::kCalleeOffset),
-            r0);
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  __ AssertSmi(r7);
-  __ StoreP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset),
-            r0);
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, r6 will point there, otherwise
-  // it will point to the backing store.
-  __ AddP(r6, r2, Operand(JSSloppyArgumentsObject::kSize));
-  __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
-
-  // r2 = address of new object (tagged)
-  // r4 = argument count (tagged)
-  // r6 = address of parameter map or backing store (tagged)
-  // r8 = mapped parameter count (tagged)
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ CmpSmiLiteral(r8, Smi::kZero, r0);
-  Label skip6;
-  __ bne(&skip6);
-  // Move backing store address to r3, because it is
-  // expected there when filling in the unmapped arguments.
-  __ LoadRR(r3, r6);
-  __ b(&skip_parameter_map);
-  __ bind(&skip6);
-
-  __ LoadRoot(r7, Heap::kSloppyArgumentsElementsMapRootIndex);
-  __ StoreP(r7, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
-  __ AddSmiLiteral(r7, r8, Smi::FromInt(2), r0);
-  __ StoreP(r7, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
-  __ StoreP(cp, FieldMemOperand(r6, FixedArray::kHeaderSize + 0 * kPointerSize),
-            r0);
-  __ SmiToPtrArrayOffset(r7, r8);
-  __ AddP(r7, r7, r6);
-  __ AddP(r7, r7, Operand(kParameterMapHeaderSize));
-  __ StoreP(r7, FieldMemOperand(r6, FixedArray::kHeaderSize + 1 * kPointerSize),
-            r0);
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop;
-  __ LoadRR(r7, r8);
-  __ AddSmiLiteral(r1, r4, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
-  __ SubP(r1, r1, r8);
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ SmiToPtrArrayOffset(r3, r7);
-  __ AddP(r3, r3, r6);
-  __ AddP(r3, r3, Operand(kParameterMapHeaderSize));
-
-  // r3 = address of backing store (tagged)
-  // r6 = address of parameter map (tagged)
-  // r7 = temporary scratch (a.o., for address calculation)
-  // r9 = temporary scratch (a.o., for address calculation)
-  // ip = the hole value
-  __ SmiUntag(r7);
-  __ push(r4);
-  __ LoadRR(r4, r7);
-  __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
-  __ AddP(r9, r3, r7);
-  __ AddP(r7, r6, r7);
-  __ AddP(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ AddP(r7, r7, Operand(kParameterMapHeaderSize - kHeapObjectTag));
-
-  __ bind(&parameters_loop);
-  __ StoreP(r1, MemOperand(r7, -kPointerSize));
-  __ lay(r7, MemOperand(r7, -kPointerSize));
-  __ StoreP(ip, MemOperand(r9, -kPointerSize));
-  __ lay(r9, MemOperand(r9, -kPointerSize));
-  __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0);
-  __ BranchOnCount(r4, &parameters_loop);
-  __ pop(r4);
-
-  // Restore r7 = argument count (tagged).
-  __ LoadP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset));
-
-  __ bind(&skip_parameter_map);
-  // r2 = address of new object (tagged)
-  // r3 = address of backing store (tagged)
-  // r7 = argument count (tagged)
-  // r8 = mapped parameter count (tagged)
-  // r1 = scratch
-  // Copy arguments header and remaining slots (if there are any).
-  __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(r1, FieldMemOperand(r3, FixedArray::kMapOffset), r0);
-  __ StoreP(r7, FieldMemOperand(r3, FixedArray::kLengthOffset), r0);
-  __ SubP(r1, r7, r8);
-  __ Ret(eq);
-
-  Label arguments_loop;
-  __ SmiUntag(r1);
-  __ LoadRR(r4, r1);
-
-  __ SmiToPtrArrayOffset(r0, r8);
-  __ SubP(r5, r5, r0);
-  __ AddP(r1, r3, r0);
-  __ AddP(r1, r1,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-
-  __ bind(&arguments_loop);
-  __ LoadP(r6, MemOperand(r5, -kPointerSize));
-  __ lay(r5, MemOperand(r5, -kPointerSize));
-  __ StoreP(r6, MemOperand(r1, kPointerSize));
-  __ la(r1, MemOperand(r1, kPointerSize));
-  __ BranchOnCount(r4, &arguments_loop);
-
-  // Return.
-  __ Ret();
-
-  // Do the runtime call to allocate the arguments object.
-  // r7 = argument count (tagged)
-  __ bind(&runtime);
-  __ Push(r3, r5, r7);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r3 : function
-  //  -- cp : context
-  //  -- fp : frame pointer
-  //  -- lr : return address
-  // -----------------------------------
-  __ AssertFunction(r3);
-
-  // Make r4 point to the JavaScript frame.
-  __ LoadRR(r4, fp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kFunctionOffset));
-    __ CmpP(ip, r3);
-    __ beq(&ok, Label::kNear);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have an arguments adaptor frame below the function frame.
-  Label arguments_adaptor, arguments_done;
-  __ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
-  __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
-  __ beq(&arguments_adaptor);
-  {
-    __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
-    __ LoadW(r2, FieldMemOperand(
-                     r6, SharedFunctionInfo::kFormalParameterCountOffset));
-#if V8_TARGET_ARCH_S390X
-    __ SmiTag(r2);
-#endif
-    __ SmiToPtrArrayOffset(r8, r2);
-    __ AddP(r4, r4, r8);
-  }
-  __ b(&arguments_done);
-  __ bind(&arguments_adaptor);
-  {
-    __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ SmiToPtrArrayOffset(r8, r2);
-    __ AddP(r4, r5, r8);
-  }
-  __ bind(&arguments_done);
-  __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset));
-
-  // ----------- S t a t e -------------
-  //  -- cp : context
-  //  -- r2 : number of rest parameters (tagged)
-  //  -- r3 : function
-  //  -- r4 : pointer just past first rest parameters
-  //  -- r8 : size of rest parameters
-  //  -- lr : return address
-  // -----------------------------------
-
-  // Allocate space for the strict arguments object plus the backing store.
-  Label allocate, done_allocate;
-  __ mov(r9, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ AddP(r9, r9, r8);
-  __ Allocate(r9, r5, r6, r7, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Setup the elements array in r5.
-  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0);
-  __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0);
-  __ AddP(r6, r5,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-  {
-    Label loop, done_loop;
-    __ SmiUntag(r1, r2);
-    __ LoadAndTestP(r1, r1);
-    __ beq(&done_loop);
-    __ bind(&loop);
-    __ lay(r4, MemOperand(r4, -kPointerSize));
-    __ LoadP(ip, MemOperand(r4));
-    __ la(r6, MemOperand(r6, kPointerSize));
-    __ StoreP(ip, MemOperand(r6));
-    __ BranchOnCount(r1, &loop);
-    __ bind(&done_loop);
-    __ AddP(r6, r6, Operand(kPointerSize));
-  }
-
-  // Setup the rest parameter array in r6.
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r3);
-  __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kMapOffset));
-  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
-  __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kPropertiesOffset));
-  __ StoreP(r5, MemOperand(r6, JSStrictArgumentsObject::kElementsOffset));
-  __ StoreP(r2, MemOperand(r6, JSStrictArgumentsObject::kLengthOffset));
-  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
-  __ AddP(r2, r6, Operand(kHeapObjectTag));
-  __ Ret();
-
-  // Fall back to %AllocateInNewSpace (if not too big).
-  Label too_big_for_new_space;
-  __ bind(&allocate);
-  __ CmpP(r9, Operand(kMaxRegularHeapObjectSize));
-  __ bgt(&too_big_for_new_space);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(r9);
-    __ Push(r2, r4, r9);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ LoadRR(r5, r2);
-    __ Pop(r2, r4);
-  }
-  __ b(&done_allocate);
-
-  // Fall back to %NewStrictArguments.
-  __ bind(&too_big_for_new_space);
-  __ push(r3);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
   return ref0.address() - ref1.address();
 }
diff --git a/src/s390/code-stubs-s390.h b/src/s390/code-stubs-s390.h
index 461e569..c599308 100644
--- a/src/s390/code-stubs-s390.h
+++ b/src/s390/code-stubs-s390.h
@@ -14,15 +14,6 @@
 
 class StringHelper : public AllStatic {
  public:
-  // Generate code for copying a large number of characters. This function
-  // is allowed to spend extra time setting up conditions to make copying
-  // faster. Copying of overlapping regions is not supported.
-  // Dest register ends at the position after the last character written.
-  static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
-                                     Register src, Register count,
-                                     Register scratch,
-                                     String::Encoding encoding);
-
   // Compares two flat one-byte strings and returns result in r0.
   static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
                                                 Register left, Register right,
@@ -321,10 +312,6 @@
                                      Register properties, Handle<Name> name,
                                      Register scratch0);
 
-  static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
-                                     Label* done, Register elements,
-                                     Register name, Register r0, Register r1);
-
   bool SometimesSetsUpAFrame() override { return false; }
 
  private:
diff --git a/src/s390/codegen-s390.cc b/src/s390/codegen-s390.cc
index d92cc54..6b84200 100644
--- a/src/s390/codegen-s390.cc
+++ b/src/s390/codegen-s390.cc
@@ -66,310 +66,13 @@
 
 #define __ ACCESS_MASM(masm)
 
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm, Register receiver, Register key, Register value,
-    Register target_map, AllocationSiteMode mode,
-    Label* allocation_memento_found) {
-  Register scratch_elements = r6;
-  DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    DCHECK(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1,
-                                         allocation_memento_found);
-  }
-
-  // Set transitioned map.
-  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm, Register receiver, Register key, Register value,
-    Register target_map, AllocationSiteMode mode, Label* fail) {
-  // lr contains the return address
-  Label loop, entry, convert_hole, gc_required, only_change_map, done;
-  Register elements = r6;
-  Register length = r7;
-  Register array = r8;
-  Register array_end = array;
-
-  // target_map parameter can be clobbered.
-  Register scratch1 = target_map;
-  Register scratch2 = r1;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
-                     scratch2));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ beq(&only_change_map, Label::kNear);
-
-  // Preserve lr and use r14 as a temporary register.
-  __ push(r14);
-
-  __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedDoubleArray.
-  __ SmiToDoubleArrayOffset(r14, length);
-  __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
-  __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
-  __ SubP(array, array, Operand(kHeapObjectTag));
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
-  __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
-  // Update receiver's map.
-  __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
-
-  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
-                      kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ AddP(scratch1, array, Operand(kHeapObjectTag));
-  __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
-                      kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Prepare for conversion loop.
-  __ AddP(target_map, elements,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
-  __ SmiToDoubleArrayOffset(array, length);
-  __ AddP(array_end, r9, array);
-// Repurpose registers no longer in use.
-#if V8_TARGET_ARCH_S390X
-  Register hole_int64 = elements;
-#else
-  Register hole_lower = elements;
-  Register hole_upper = length;
-#endif
-  // scratch1: begin of source FixedArray element fields, not tagged
-  // hole_lower: kHoleNanLower32 OR hol_int64
-  // hole_upper: kHoleNanUpper32
-  // array_end: end of destination FixedDoubleArray, not tagged
-  // scratch2: begin of FixedDoubleArray element fields, not tagged
-
-  __ b(&entry, Label::kNear);
-
-  __ bind(&only_change_map);
-  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ b(&done, Label::kNear);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ pop(r14);
-  __ b(fail);
-
-  // Convert and copy elements.
-  __ bind(&loop);
-  __ LoadP(r14, MemOperand(scratch1));
-  __ la(scratch1, MemOperand(scratch1, kPointerSize));
-  // r1: current element
-  __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
-
-  // Normal smi, convert to double and store.
-  __ ConvertIntToDouble(r14, d0);
-  __ StoreDouble(d0, MemOperand(r9, 0));
-  __ la(r9, MemOperand(r9, 8));
-
-  __ b(&entry, Label::kNear);
-
-  // Hole found, store the-hole NaN.
-  __ bind(&convert_hole);
-  if (FLAG_debug_code) {
-    // Restore a "smi-untagged" heap object.
-    __ LoadP(r1, MemOperand(r5, -kPointerSize));
-    __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
-    __ Assert(eq, kObjectFoundInSmiOnlyArray);
-  }
-#if V8_TARGET_ARCH_S390X
-  __ stg(hole_int64, MemOperand(r9, 0));
-#else
-  __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
-  __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
-#endif
-  __ AddP(r9, Operand(8));
-
-  __ bind(&entry);
-  __ CmpP(r9, array_end);
-  __ blt(&loop);
-
-  __ pop(r14);
-  __ bind(&done);
-}
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, Register receiver, Register key, Register value,
-    Register target_map, AllocationSiteMode mode, Label* fail) {
-  // Register lr contains the return address.
-  Label loop, convert_hole, gc_required, only_change_map;
-  Register elements = r6;
-  Register array = r8;
-  Register length = r7;
-  Register scratch = r1;
-  Register scratch3 = r9;
-  Register hole_value = r9;
-
-  // Verify input registers don't conflict with locals.
-  DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
-                     scratch));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
-  __ beq(&only_change_map);
-
-  __ Push(target_map, receiver, key, value);
-  __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  // elements: source FixedDoubleArray
-  // length: number of elements (smi-tagged)
-
-  // Allocate new FixedArray.
-  // Re-use value and target_map registers, as they have been saved on the
-  // stack.
-  Register array_size = value;
-  Register allocate_scratch = target_map;
-  __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
-  __ SmiToPtrArrayOffset(r0, length);
-  __ AddP(array_size, r0);
-  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
-              NO_ALLOCATION_FLAGS);
-  // array: destination FixedArray, tagged as heap object
-  // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
-  __ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset),
-            r0);
-  __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
-
-  // Prepare for conversion loop.
-  Register src_elements = elements;
-  Register dst_elements = target_map;
-  Register dst_end = length;
-  Register heap_number_map = scratch;
-  __ AddP(src_elements,
-          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(length, length);
-  __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
-
-  Label initialization_loop, loop_done;
-  __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
-  __ beq(&loop_done, Label::kNear);
-
-  // Allocating heap numbers in the loop below can fail and cause a jump to
-  // gc_required. We can't leave a partly initialized FixedArray behind,
-  // so pessimistically fill it with holes now.
-  __ AddP(dst_elements, array,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
-  __ bind(&initialization_loop);
-  __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
-  __ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
-  __ BranchOnCount(scratch, &initialization_loop);
-
-  __ AddP(dst_elements, array,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ AddP(dst_end, dst_elements, length);
-  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  // Using offsetted addresses in src_elements to fully take advantage of
-  // post-indexing.
-  // dst_elements: begin of destination FixedArray element fields, not tagged
-  // src_elements: begin of source FixedDoubleArray element fields,
-  //               not tagged, +4
-  // dst_end: end of destination FixedArray, not tagged
-  // array: destination FixedArray
-  // hole_value: the-hole pointer
-  // heap_number_map: heap number map
-  __ b(&loop, Label::kNear);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ Pop(target_map, receiver, key, value);
-  __ b(fail);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ StoreP(hole_value, MemOperand(dst_elements));
-  __ AddP(dst_elements, Operand(kPointerSize));
-  __ CmpLogicalP(dst_elements, dst_end);
-  __ bge(&loop_done);
-
-  __ bind(&loop);
-  Register upper_bits = key;
-  __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
-  __ AddP(src_elements, Operand(kDoubleSize));
-  // upper_bits: current element's upper 32 bit
-  // src_elements: address of next element's upper 32 bit
-  __ Cmp32(upper_bits, Operand(kHoleNanUpper32));
-  __ beq(&convert_hole, Label::kNear);
-
-  // Non-hole double, copy value into a heap number.
-  Register heap_number = receiver;
-  Register scratch2 = value;
-  __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
-                        &gc_required);
-// heap_number: new heap number
-#if V8_TARGET_ARCH_S390X
-  __ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
-  // subtract tag for std
-  __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
-  __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
-#else
-  __ LoadlW(scratch2,
-            MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
-  __ LoadlW(upper_bits,
-            MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
-  __ StoreW(scratch2,
-            FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
-  __ StoreW(upper_bits,
-            FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
-#endif
-  __ LoadRR(scratch2, dst_elements);
-  __ StoreP(heap_number, MemOperand(dst_elements));
-  __ AddP(dst_elements, Operand(kPointerSize));
-  __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ CmpLogicalP(dst_elements, dst_end);
-  __ blt(&loop);
-  __ bind(&loop_done);
-
-  __ Pop(target_map, receiver, key, value);
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  __ bind(&only_change_map);
-  // Update receiver's map.
-  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
-                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
 // assume ip can be used as a scratch register below
 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
                                        Register index, Register result,
                                        Label* call_runtime) {
+  Label indirect_string_loaded;
+  __ bind(&indirect_string_loaded);
+
   // Fetch the instance type of the receiver into result register.
   __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -381,19 +84,25 @@
   __ beq(&check_sequential, Label::kNear /*, cr0*/);
 
   // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ mov(ip, Operand(kSlicedNotConsMask));
-  __ LoadRR(r0, result);
-  __ AndP(r0, ip /*, SetRC*/);  // Should be okay to remove RC
-  __ beq(&cons_string, Label::kNear /*, cr0*/);
+  Label cons_string, thin_string;
+  __ LoadRR(ip, result);
+  __ nilf(ip, Operand(kStringRepresentationMask));
+  __ CmpP(ip, Operand(kConsStringTag));
+  __ beq(&cons_string);
+  __ CmpP(ip, Operand(kThinStringTag));
+  __ beq(&thin_string);
 
   // Handle slices.
-  Label indirect_string_loaded;
   __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
   __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
   __ SmiUntag(ip, result);
   __ AddP(index, ip);
-  __ b(&indirect_string_loaded, Label::kNear);
+  __ b(&indirect_string_loaded);
+
+  // Handle thin strings.
+  __ bind(&thin_string);
+  __ LoadP(string, FieldMemOperand(string, ThinString::kActualOffset));
+  __ b(&indirect_string_loaded);
 
   // Handle cons strings.
   // Check whether the right hand side is the empty string (i.e. if
@@ -406,10 +115,7 @@
   __ bne(call_runtime);
   // Get the first of the two strings and load its instance type.
   __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
-  __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+  __ b(&indirect_string_loaded);
 
   // Distinguish sequential and external strings. Only these two string
   // representations can reach here (slices and flat cons strings have been
@@ -487,29 +193,25 @@
   return result;
 }
 
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                               MarkingParity* parity) {
-  if (IsYoungSequence(isolate, sequence)) {
-    *age = kNoAgeCodeAge;
-    *parity = NO_MARKING_PARITY;
-  } else {
-    Code* code = NULL;
-    Address target_address =
-        Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
-    Code* stub = GetCodeFromTargetAddress(target_address);
-    GetCodeAgeAndParity(stub, age, parity);
-  }
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
+
+  Code* code = NULL;
+  Address target_address =
+      Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
+  Code* stub = GetCodeFromTargetAddress(target_address);
+  return GetAgeOfCodeAgeStub(stub);
 }
 
-void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
-                                MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+                                Code::Age age) {
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
     Assembler::FlushICache(isolate, sequence, young_length);
   } else {
     // FIXED_SEQUENCE
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age);
     CodePatcher patcher(isolate, sequence, young_length);
     intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
     // We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
diff --git a/src/s390/constants-s390.h b/src/s390/constants-s390.h
index 9dfb32c..59bf34b 100644
--- a/src/s390/constants-s390.h
+++ b/src/s390/constants-s390.h
@@ -157,765 +157,1565 @@
 typedef uint32_t FourByteInstr;
 typedef uint64_t SixByteInstr;
 
+#define S390_RSY_A_OPCODE_LIST(V)                                              \
+  V(lmg, LMG, 0xEB04)     /* type = RSY_A LOAD MULTIPLE (64)  */               \
+  V(srag, SRAG, 0xEB0A)   /* type = RSY_A SHIFT RIGHT SINGLE (64)  */          \
+  V(slag, SLAG, 0xEB0B)   /* type = RSY_A SHIFT LEFT SINGLE (64)  */           \
+  V(srlg, SRLG, 0xEB0C)   /* type = RSY_A SHIFT RIGHT SINGLE LOGICAL (64)  */  \
+  V(sllg, SLLG, 0xEB0D)   /* type = RSY_A SHIFT LEFT SINGLE LOGICAL (64)  */   \
+  V(tracg, TRACG, 0xEB0F) /* type = RSY_A TRACE (64)  */                       \
+  V(csy, CSY, 0xEB14)     /* type = RSY_A COMPARE AND SWAP (32)  */            \
+  V(rllg, RLLG, 0xEB1C)   /* type = RSY_A ROTATE LEFT SINGLE LOGICAL (64)  */  \
+  V(rll, RLL, 0xEB1D)     /* type = RSY_A ROTATE LEFT SINGLE LOGICAL (32)  */  \
+  V(stmg, STMG, 0xEB24)   /* type = RSY_A STORE MULTIPLE (64)  */              \
+  V(stctg, STCTG, 0xEB25) /* type = RSY_A STORE CONTROL (64)  */               \
+  V(stmh, STMH, 0xEB26)   /* type = RSY_A STORE MULTIPLE HIGH (32)  */         \
+  V(lctlg, LCTLG, 0xEB2F) /* type = RSY_A LOAD CONTROL (64)  */                \
+  V(csg, CSG, 0xEB30)     /* type = RSY_A COMPARE AND SWAP (64)  */            \
+  V(cdsy, CDSY, 0xEB31)   /* type = RSY_A COMPARE DOUBLE AND SWAP (32)  */     \
+  V(cdsg, CDSG, 0xEB3E)   /* type = RSY_A COMPARE DOUBLE AND SWAP (64)  */     \
+  V(bxhg, BXHG, 0xEB44)   /* type = RSY_A BRANCH ON INDEX HIGH (64)  */        \
+  V(bxleg, BXLEG, 0xEB45) /* type = RSY_A BRANCH ON INDEX LOW OR EQUAL (64) */ \
+  V(ecag, ECAG, 0xEB4C)   /* type = RSY_A EXTRACT CPU ATTRIBUTE  */            \
+  V(mvclu, MVCLU, 0xEB8E) /* type = RSY_A MOVE LONG UNICODE  */                \
+  V(clclu, CLCLU, 0xEB8F) /* type = RSY_A COMPARE LOGICAL LONG UNICODE  */     \
+  V(stmy, STMY, 0xEB90)   /* type = RSY_A STORE MULTIPLE (32)  */              \
+  V(lmh, LMH, 0xEB96)     /* type = RSY_A LOAD MULTIPLE HIGH (32)  */          \
+  V(lmy, LMY, 0xEB98)     /* type = RSY_A LOAD MULTIPLE (32)  */               \
+  V(lamy, LAMY, 0xEB9A)   /* type = RSY_A LOAD ACCESS MULTIPLE  */             \
+  V(stamy, STAMY, 0xEB9B) /* type = RSY_A STORE ACCESS MULTIPLE  */            \
+  V(srak, SRAK, 0xEBDC)   /* type = RSY_A SHIFT RIGHT SINGLE (32)  */          \
+  V(slak, SLAK, 0xEBDD)   /* type = RSY_A SHIFT LEFT SINGLE (32)  */           \
+  V(srlk, SRLK, 0xEBDE)   /* type = RSY_A SHIFT RIGHT SINGLE LOGICAL (32)  */  \
+  V(sllk, SLLK, 0xEBDF)   /* type = RSY_A SHIFT LEFT SINGLE LOGICAL (32)  */   \
+  V(lang, LANG, 0xEBE4)   /* type = RSY_A LOAD AND AND (64)  */                \
+  V(laog, LAOG, 0xEBE6)   /* type = RSY_A LOAD AND OR (64)  */                 \
+  V(laxg, LAXG, 0xEBE7)   /* type = RSY_A LOAD AND EXCLUSIVE OR (64)  */       \
+  V(laag, LAAG, 0xEBE8)   /* type = RSY_A LOAD AND ADD (64)  */                \
+  V(laalg, LAALG, 0xEBEA) /* type = RSY_A LOAD AND ADD LOGICAL (64)  */        \
+  V(lan, LAN, 0xEBF4)     /* type = RSY_A LOAD AND AND (32)  */                \
+  V(lao, LAO, 0xEBF6)     /* type = RSY_A LOAD AND OR (32)  */                 \
+  V(lax, LAX, 0xEBF7)     /* type = RSY_A LOAD AND EXCLUSIVE OR (32)  */       \
+  V(laa, LAA, 0xEBF8)     /* type = RSY_A LOAD AND ADD (32)  */                \
+  V(laal, LAAL, 0xEBFA)   /* type = RSY_A LOAD AND ADD LOGICAL (32)  */
+
+#define S390_RSY_B_OPCODE_LIST(V)                                              \
+  V(clmh, CLMH,                                                                \
+    0xEB20) /* type = RSY_B COMPARE LOGICAL CHAR. UNDER MASK (high)  */        \
+  V(clmy, CLMY,                                                                \
+    0xEB21) /* type = RSY_B COMPARE LOGICAL CHAR. UNDER MASK (low)  */         \
+  V(clt, CLT, 0xEB23)   /* type = RSY_B COMPARE LOGICAL AND TRAP (32)  */      \
+  V(clgt, CLGT, 0xEB2B) /* type = RSY_B COMPARE LOGICAL AND TRAP (64)  */      \
+  V(stcmh, STCMH,                                                              \
+    0xEB2C) /* type = RSY_B STORE CHARACTERS UNDER MASK (high)  */             \
+  V(stcmy, STCMY, 0xEB2D) /* type = RSY_B STORE CHARACTERS UNDER MASK (low) */ \
+  V(icmh, ICMH, 0xEB80) /* type = RSY_B INSERT CHARACTERS UNDER MASK (high) */ \
+  V(icmy, ICMY, 0xEB81) /* type = RSY_B INSERT CHARACTERS UNDER MASK (low)  */ \
+  V(locfh, LOCFH, 0xEBE0)   /* type = RSY_B LOAD HIGH ON CONDITION (32)  */    \
+  V(stocfh, STOCFH, 0xEBE1) /* type = RSY_B STORE HIGH ON CONDITION  */        \
+  V(locg, LOCG, 0xEBE2)     /* type = RSY_B LOAD ON CONDITION (64)  */         \
+  V(stocg, STOCG, 0xEBE3)   /* type = RSY_B STORE ON CONDITION (64)  */        \
+  V(loc, LOC, 0xEBF2)       /* type = RSY_B LOAD ON CONDITION (32)  */         \
+  V(stoc, STOC, 0xEBF3)     /* type = RSY_B STORE ON CONDITION (32)  */
+
+#define S390_RXE_OPCODE_LIST(V)                                                \
+  V(lcbb, LCBB, 0xE727) /* type = RXE   LOAD COUNT TO BLOCK BOUNDARY  */       \
+  V(ldeb, LDEB, 0xED04) /* type = RXE   LOAD LENGTHENED (short to long BFP) */ \
+  V(lxdb, LXDB,                                                                \
+    0xED05) /* type = RXE   LOAD LENGTHENED (long to extended BFP)  */         \
+  V(lxeb, LXEB,                                                                \
+    0xED06) /* type = RXE   LOAD LENGTHENED (short to extended BFP)  */        \
+  V(mxdb, MXDB, 0xED07) /* type = RXE   MULTIPLY (long to extended BFP)  */    \
+  V(keb, KEB, 0xED08)   /* type = RXE   COMPARE AND SIGNAL (short BFP)  */     \
+  V(ceb, CEB, 0xED09)   /* type = RXE   COMPARE (short BFP)  */                \
+  V(aeb, AEB, 0xED0A)   /* type = RXE   ADD (short BFP)  */                    \
+  V(seb, SEB, 0xED0B)   /* type = RXE   SUBTRACT (short BFP)  */               \
+  V(mdeb, MDEB, 0xED0C) /* type = RXE   MULTIPLY (short to long BFP)  */       \
+  V(deb, DEB, 0xED0D)   /* type = RXE   DIVIDE (short BFP)  */                 \
+  V(tceb, TCEB, 0xED10) /* type = RXE   TEST DATA CLASS (short BFP)  */        \
+  V(tcdb, TCDB, 0xED11) /* type = RXE   TEST DATA CLASS (long BFP)  */         \
+  V(tcxb, TCXB, 0xED12) /* type = RXE   TEST DATA CLASS (extended BFP)  */     \
+  V(sqeb, SQEB, 0xED14) /* type = RXE   SQUARE ROOT (short BFP)  */            \
+  V(sqdb, SQDB, 0xED15) /* type = RXE   SQUARE ROOT (long BFP)  */             \
+  V(meeb, MEEB, 0xED17) /* type = RXE   MULTIPLY (short BFP)  */               \
+  V(kdb, KDB, 0xED18)   /* type = RXE   COMPARE AND SIGNAL (long BFP)  */      \
+  V(cdb, CDB, 0xED19)   /* type = RXE   COMPARE (long BFP)  */                 \
+  V(adb, ADB, 0xED1A)   /* type = RXE   ADD (long BFP)  */                     \
+  V(sdb, SDB, 0xED1B)   /* type = RXE   SUBTRACT (long BFP)  */                \
+  V(mdb, MDB, 0xED1C)   /* type = RXE   MULTIPLY (long BFP)  */                \
+  V(ddb, DDB, 0xED1D)   /* type = RXE   DIVIDE (long BFP)  */                  \
+  V(lde, LDE, 0xED24) /* type = RXE   LOAD LENGTHENED (short to long HFP)  */  \
+  V(lxd, LXD,                                                                  \
+    0xED25) /* type = RXE   LOAD LENGTHENED (long to extended HFP)  */         \
+  V(lxe, LXE,                                                                  \
+    0xED26) /* type = RXE   LOAD LENGTHENED (short to extended HFP)  */        \
+  V(sqe, SQE, 0xED34)     /* type = RXE   SQUARE ROOT (short HFP)  */          \
+  V(sqd, SQD, 0xED35)     /* type = RXE   SQUARE ROOT (long HFP)  */           \
+  V(mee, MEE, 0xED37)     /* type = RXE   MULTIPLY (short HFP)  */             \
+  V(tdcet, TDCET, 0xED50) /* type = RXE   TEST DATA CLASS (short DFP)  */      \
+  V(tdget, TDGET, 0xED51) /* type = RXE   TEST DATA GROUP (short DFP)  */      \
+  V(tdcdt, TDCDT, 0xED54) /* type = RXE   TEST DATA CLASS (long DFP)  */       \
+  V(tdgdt, TDGDT, 0xED55) /* type = RXE   TEST DATA GROUP (long DFP)  */       \
+  V(tdcxt, TDCXT, 0xED58) /* type = RXE   TEST DATA CLASS (extended DFP)  */   \
+  V(tdgxt, TDGXT, 0xED59) /* type = RXE   TEST DATA GROUP (extended DFP)  */
+
+#define S390_RRF_A_OPCODE_LIST(V)                                           \
+  V(ipte, IPTE, 0xB221)     /* type = RRF_A INVALIDATE PAGE TABLE ENTRY  */ \
+  V(mdtr, MDTR, 0xB3D0)     /* type = RRF_A MULTIPLY (long DFP)  */         \
+  V(mdtra, MDTRA, 0xB3D0)   /* type = RRF_A MULTIPLY (long DFP)  */         \
+  V(ddtr, DDTR, 0xB3D1)     /* type = RRF_A DIVIDE (long DFP)  */           \
+  V(ddtra, DDTRA, 0xB3D1)   /* type = RRF_A DIVIDE (long DFP)  */           \
+  V(adtr, ADTR, 0xB3D2)     /* type = RRF_A ADD (long DFP)  */              \
+  V(adtra, ADTRA, 0xB3D2)   /* type = RRF_A ADD (long DFP)  */              \
+  V(sdtr, SDTR, 0xB3D3)     /* type = RRF_A SUBTRACT (long DFP)  */         \
+  V(sdtra, SDTRA, 0xB3D3)   /* type = RRF_A SUBTRACT (long DFP)  */         \
+  V(mxtr, MXTR, 0xB3D8)     /* type = RRF_A MULTIPLY (extended DFP)  */     \
+  V(mxtra, MXTRA, 0xB3D8)   /* type = RRF_A MULTIPLY (extended DFP)  */     \
+  V(msrkc, MSRKC, 0xB9FD)   /* type = RRF_A MULTIPLY (32)*/                 \
+  V(msgrkc, MSGRKC, 0xB9ED) /* type = RRF_A MULTIPLY (64)*/                 \
+  V(dxtr, DXTR, 0xB3D9)     /* type = RRF_A DIVIDE (extended DFP)  */       \
+  V(dxtra, DXTRA, 0xB3D9)   /* type = RRF_A DIVIDE (extended DFP)  */       \
+  V(axtr, AXTR, 0xB3DA)     /* type = RRF_A ADD (extended DFP)  */          \
+  V(axtra, AXTRA, 0xB3DA)   /* type = RRF_A ADD (extended DFP)  */          \
+  V(sxtr, SXTR, 0xB3DB)     /* type = RRF_A SUBTRACT (extended DFP)  */     \
+  V(sxtra, SXTRA, 0xB3DB)   /* type = RRF_A SUBTRACT (extended DFP)  */     \
+  V(ahhhr, AHHHR, 0xB9C8)   /* type = RRF_A ADD HIGH (32)  */               \
+  V(shhhr, SHHHR, 0xB9C9)   /* type = RRF_A SUBTRACT HIGH (32)  */          \
+  V(alhhhr, ALHHHR, 0xB9CA) /* type = RRF_A ADD LOGICAL HIGH (32)  */       \
+  V(slhhhr, SLHHHR, 0xB9CB) /* type = RRF_A SUBTRACT LOGICAL HIGH (32)  */  \
+  V(ahhlr, AHHLR, 0xB9D8)   /* type = RRF_A ADD HIGH (32)  */               \
+  V(shhlr, SHHLR, 0xB9D9)   /* type = RRF_A SUBTRACT HIGH (32)  */          \
+  V(alhhlr, ALHHLR, 0xB9DA) /* type = RRF_A ADD LOGICAL HIGH (32)  */       \
+  V(slhhlr, SLHHLR, 0xB9DB) /* type = RRF_A SUBTRACT LOGICAL HIGH (32)  */  \
+  V(ngrk, NGRK, 0xB9E4)     /* type = RRF_A AND (64)  */                    \
+  V(ogrk, OGRK, 0xB9E6)     /* type = RRF_A OR (64)  */                     \
+  V(xgrk, XGRK, 0xB9E7)     /* type = RRF_A EXCLUSIVE OR (64)  */           \
+  V(agrk, AGRK, 0xB9E8)     /* type = RRF_A ADD (64)  */                    \
+  V(sgrk, SGRK, 0xB9E9)     /* type = RRF_A SUBTRACT (64)  */               \
+  V(algrk, ALGRK, 0xB9EA)   /* type = RRF_A ADD LOGICAL (64)  */            \
+  V(slgrk, SLGRK, 0xB9EB)   /* type = RRF_A SUBTRACT LOGICAL (64)  */       \
+  V(nrk, NRK, 0xB9F4)       /* type = RRF_A AND (32)  */                    \
+  V(ork, ORK, 0xB9F6)       /* type = RRF_A OR (32)  */                     \
+  V(xrk, XRK, 0xB9F7)       /* type = RRF_A EXCLUSIVE OR (32)  */           \
+  V(ark, ARK, 0xB9F8)       /* type = RRF_A ADD (32)  */                    \
+  V(srk, SRK, 0xB9F9)       /* type = RRF_A SUBTRACT (32)  */               \
+  V(alrk, ALRK, 0xB9FA)     /* type = RRF_A ADD LOGICAL (32)  */            \
+  V(slrk, SLRK, 0xB9FB)     /* type = RRF_A SUBTRACT LOGICAL (32)  */
+
+#define S390_RXF_OPCODE_LIST(V)                                                \
+  V(maeb, MAEB, 0xED0E) /* type = RXF   MULTIPLY AND ADD (short BFP)  */       \
+  V(mseb, MSEB, 0xED0F) /* type = RXF   MULTIPLY AND SUBTRACT (short BFP)  */  \
+  V(madb, MADB, 0xED1E) /* type = RXF   MULTIPLY AND ADD (long BFP)  */        \
+  V(msdb, MSDB, 0xED1F) /* type = RXF   MULTIPLY AND SUBTRACT (long BFP)  */   \
+  V(mae, MAE, 0xED2E)   /* type = RXF   MULTIPLY AND ADD (short HFP)  */       \
+  V(mse, MSE, 0xED2F)   /* type = RXF   MULTIPLY AND SUBTRACT (short HFP)  */  \
+  V(mayl, MAYL,                                                                \
+    0xED38) /* type = RXF   MULTIPLY AND ADD UNNRM. (long to ext. low HFP)  */ \
+  V(myl, MYL,                                                                  \
+    0xED39) /* type = RXF   MULTIPLY UNNORM. (long to ext. low HFP)  */        \
+  V(may, MAY,                                                                  \
+    0xED3A) /* type = RXF   MULTIPLY & ADD UNNORMALIZED (long to ext. HFP)  */ \
+  V(my, MY,                                                                    \
+    0xED3B) /* type = RXF   MULTIPLY UNNORMALIZED (long to ext. HFP)  */       \
+  V(mayh, MAYH,                                                                \
+    0xED3C) /* type = RXF   MULTIPLY AND ADD UNNRM. (long to ext. high HFP) */ \
+  V(myh, MYH,                                                                  \
+    0xED3D) /* type = RXF   MULTIPLY UNNORM. (long to ext. high HFP)  */       \
+  V(mad, MAD, 0xED3E)   /* type = RXF   MULTIPLY AND ADD (long HFP)  */        \
+  V(msd, MSD, 0xED3F)   /* type = RXF   MULTIPLY AND SUBTRACT (long HFP)  */   \
+  V(sldt, SLDT, 0xED40) /* type = RXF   SHIFT SIGNIFICAND LEFT (long DFP)  */  \
+  V(srdt, SRDT, 0xED41) /* type = RXF   SHIFT SIGNIFICAND RIGHT (long DFP)  */ \
+  V(slxt, SLXT,                                                                \
+    0xED48) /* type = RXF   SHIFT SIGNIFICAND LEFT (extended DFP)  */          \
+  V(srxt, SRXT,                                                                \
+    0xED49) /* type = RXF   SHIFT SIGNIFICAND RIGHT (extended DFP)  */
+
+#define S390_IE_OPCODE_LIST(V) \
+  V(niai, NIAI, 0xB2FA) /* type = IE    NEXT INSTRUCTION ACCESS INTENT  */
+
+#define S390_RRF_B_OPCODE_LIST(V)                                           \
+  V(diebr, DIEBR, 0xB353) /* type = RRF_B DIVIDE TO INTEGER (short BFP)  */ \
+  V(didbr, DIDBR, 0xB35B) /* type = RRF_B DIVIDE TO INTEGER (long BFP)  */  \
+  V(cpsdr, CPSDR, 0xB372) /* type = RRF_B COPY SIGN (long)  */              \
+  V(qadtr, QADTR, 0xB3F5) /* type = RRF_B QUANTIZE (long DFP)  */           \
+  V(iedtr, IEDTR,                                                           \
+    0xB3F6) /* type = RRF_B INSERT BIASED EXPONENT (64 to long DFP)  */     \
+  V(rrdtr, RRDTR, 0xB3F7) /* type = RRF_B REROUND (long DFP)  */            \
+  V(qaxtr, QAXTR, 0xB3FD) /* type = RRF_B QUANTIZE (extended DFP)  */       \
+  V(iextr, IEXTR,                                                           \
+    0xB3FE) /* type = RRF_B INSERT BIASED EXPONENT (64 to extended DFP)  */ \
+  V(rrxtr, RRXTR, 0xB3FF) /* type = RRF_B REROUND (extended DFP)  */        \
+  V(kmctr, KMCTR, 0xB92D) /* type = RRF_B CIPHER MESSAGE WITH COUNTER  */   \
+  V(idte, IDTE, 0xB98E)   /* type = RRF_B INVALIDATE DAT TABLE ENTRY  */    \
+  V(crdte, CRDTE,                                                           \
+    0xB98F) /* type = RRF_B COMPARE AND REPLACE DAT TABLE ENTRY  */         \
+  V(lptea, LPTEA, 0xB9AA) /* type = RRF_B LOAD PAGE TABLE ENTRY ADDRESS  */
+
+#define S390_RRF_C_OPCODE_LIST(V)                                           \
+  V(sske, SSKE, 0xB22B)   /* type = RRF_C SET STORAGE KEY EXTENDED  */      \
+  V(cuutf, CUUTF, 0xB2A6) /* type = RRF_C CONVERT UNICODE TO UTF-8  */      \
+  V(cu21, CU21, 0xB2A6)   /* type = RRF_C CONVERT UTF-16 TO UTF-8  */       \
+  V(cutfu, CUTFU, 0xB2A7) /* type = RRF_C CONVERT UTF-8 TO UNICODE  */      \
+  V(cu12, CU12, 0xB2A7)   /* type = RRF_C CONVERT UTF-8 TO UTF-16  */       \
+  V(ppa, PPA, 0xB2E8)     /* type = RRF_C PERFORM PROCESSOR ASSIST  */      \
+  V(cgrt, CGRT, 0xB960)   /* type = RRF_C COMPARE AND TRAP (64)  */         \
+  V(clgrt, CLGRT, 0xB961) /* type = RRF_C COMPARE LOGICAL AND TRAP (64)  */ \
+  V(crt, CRT, 0xB972)     /* type = RRF_C COMPARE AND TRAP (32)  */         \
+  V(clrt, CLRT, 0xB973)   /* type = RRF_C COMPARE LOGICAL AND TRAP (32)  */ \
+  V(trtt, TRTT, 0xB990)   /* type = RRF_C TRANSLATE TWO TO TWO  */          \
+  V(trto, TRTO, 0xB991)   /* type = RRF_C TRANSLATE TWO TO ONE  */          \
+  V(trot, TROT, 0xB992)   /* type = RRF_C TRANSLATE ONE TO TWO  */          \
+  V(troo, TROO, 0xB993)   /* type = RRF_C TRANSLATE ONE TO ONE  */          \
+  V(cu14, CU14, 0xB9B0)   /* type = RRF_C CONVERT UTF-8 TO UTF-32  */       \
+  V(cu24, CU24, 0xB9B1)   /* type = RRF_C CONVERT UTF-16 TO UTF-32  */      \
+  V(trtre, TRTRE,                                                           \
+    0xB9BD) /* type = RRF_C TRANSLATE AND TEST REVERSE EXTENDED  */         \
+  V(trte, TRTE, 0xB9BF)     /* type = RRF_C TRANSLATE AND TEST EXTENDED  */ \
+  V(locfhr, LOCFHR, 0xB9E0) /* type = RRF_C LOAD HIGH ON CONDITION (32)  */ \
+  V(locgr, LOCGR, 0xB9E2)   /* type = RRF_C LOAD ON CONDITION (64)  */      \
+  V(locr, LOCR, 0xB9F2)     /* type = RRF_C LOAD ON CONDITION (32)  */
+
+#define S390_MII_OPCODE_LIST(V) \
+  V(bprp, BPRP, 0xC5) /* type = MII   BRANCH PREDICTION RELATIVE PRELOAD  */
+
+#define S390_RRF_D_OPCODE_LIST(V)                                         \
+  V(ldetr, LDETR,                                                         \
+    0xB3D4) /* type = RRF_D LOAD LENGTHENED (short to long DFP)  */       \
+  V(lxdtr, LXDTR,                                                         \
+    0xB3DC) /* type = RRF_D LOAD LENGTHENED (long to extended DFP)  */    \
+  V(csdtr, CSDTR,                                                         \
+    0xB3E3) /* type = RRF_D CONVERT TO SIGNED PACKED (long DFP to 64)  */ \
+  V(csxtr, CSXTR,                                                         \
+    0xB3EB) /* type = RRF_D CONVERT TO SIGNED PACKED (extended DFP to 128)  */
+
+#define S390_RRF_E_OPCODE_LIST(V)                                              \
+  V(ledbra, LEDBRA,                                                            \
+    0xB344) /* type = RRF_E LOAD ROUNDED (long to short BFP)  */               \
+  V(ldxbra, LDXBRA,                                                            \
+    0xB345) /* type = RRF_E LOAD ROUNDED (extended to long BFP)  */            \
+  V(lexbra, LEXBRA,                                                            \
+    0xB346) /* type = RRF_E LOAD ROUNDED (extended to short BFP)  */           \
+  V(fixbr, FIXBR, 0xB347)   /* type = RRF_E LOAD FP INTEGER (extended BFP)  */ \
+  V(fixbra, FIXBRA, 0xB347) /* type = RRF_E LOAD FP INTEGER (extended BFP)  */ \
+  V(tbedr, TBEDR,                                                              \
+    0xB350)             /* type = RRF_E CONVERT HFP TO BFP (long to short)  */ \
+  V(tbdr, TBDR, 0xB351) /* type = RRF_E CONVERT HFP TO BFP (long)  */          \
+  V(fiebr, FIEBR, 0xB357)   /* type = RRF_E LOAD FP INTEGER (short BFP)  */    \
+  V(fiebra, FIEBRA, 0xB357) /* type = RRF_E LOAD FP INTEGER (short BFP)  */    \
+  V(fidbr, FIDBR, 0xB35F)   /* type = RRF_E LOAD FP INTEGER (long BFP)  */     \
+  V(fidbra, FIDBRA, 0xB35F) /* type = RRF_E LOAD FP INTEGER (long BFP)  */     \
+  V(celfbr, CELFBR,                                                            \
+    0xB390) /* type = RRF_E CONVERT FROM LOGICAL (32 to short BFP)  */         \
+  V(cdlfbr, CDLFBR,                                                            \
+    0xB391) /* type = RRF_E CONVERT FROM LOGICAL (32 to long BFP)  */          \
+  V(cxlfbr, CXLFBR,                                                            \
+    0xB392) /* type = RRF_E CONVERT FROM LOGICAL (32 to extended BFP)  */      \
+  V(cefbra, CEFBRA,                                                            \
+    0xB394) /* type = RRF_E CONVERT FROM FIXED (32 to short BFP)  */           \
+  V(cdfbra, CDFBRA,                                                            \
+    0xB395) /* type = RRF_E CONVERT FROM FIXED (32 to long BFP)  */            \
+  V(cxfbra, CXFBRA,                                                            \
+    0xB396) /* type = RRF_E CONVERT FROM FIXED (32 to extended BFP)  */        \
+  V(cfebr, CFEBR,                                                              \
+    0xB398) /* type = RRF_E CONVERT TO FIXED (short BFP to 32)  */             \
+  V(cfebra, CFEBRA,                                                            \
+    0xB398) /* type = RRF_E CONVERT TO FIXED (short BFP to 32)  */             \
+  V(cfdbr, CFDBR, 0xB399) /* type = RRF_E CONVERT TO FIXED (long BFP to 32) */ \
+  V(cfdbra, CFDBRA,                                                            \
+    0xB399) /* type = RRF_E CONVERT TO FIXED (long BFP to 32)  */              \
+  V(cfxbr, CFXBR,                                                              \
+    0xB39A) /* type = RRF_E CONVERT TO FIXED (extended BFP to 32)  */          \
+  V(cfxbra, CFXBRA,                                                            \
+    0xB39A) /* type = RRF_E CONVERT TO FIXED (extended BFP to 32)  */          \
+  V(clfebr, CLFEBR,                                                            \
+    0xB39C) /* type = RRF_E CONVERT TO LOGICAL (short BFP to 32)  */           \
+  V(clfdbr, CLFDBR,                                                            \
+    0xB39D) /* type = RRF_E CONVERT TO LOGICAL (long BFP to 32)  */            \
+  V(clfxbr, CLFXBR,                                                            \
+    0xB39E) /* type = RRF_E CONVERT TO LOGICAL (extended BFP to 32)  */        \
+  V(celgbr, CELGBR,                                                            \
+    0xB3A0) /* type = RRF_E CONVERT FROM LOGICAL (64 to short BFP)  */         \
+  V(cdlgbr, CDLGBR,                                                            \
+    0xB3A1) /* type = RRF_E CONVERT FROM LOGICAL (64 to long BFP)  */          \
+  V(cxlgbr, CXLGBR,                                                            \
+    0xB3A2) /* type = RRF_E CONVERT FROM LOGICAL (64 to extended BFP)  */      \
+  V(cegbra, CEGBRA,                                                            \
+    0xB3A4) /* type = RRF_E CONVERT FROM FIXED (64 to short BFP)  */           \
+  V(cdgbra, CDGBRA,                                                            \
+    0xB3A5) /* type = RRF_E CONVERT FROM FIXED (64 to long BFP)  */            \
+  V(cxgbra, CXGBRA,                                                            \
+    0xB3A6) /* type = RRF_E CONVERT FROM FIXED (64 to extended BFP)  */        \
+  V(cgebr, CGEBR,                                                              \
+    0xB3A8) /* type = RRF_E CONVERT TO FIXED (short BFP to 64)  */             \
+  V(cgebra, CGEBRA,                                                            \
+    0xB3A8) /* type = RRF_E CONVERT TO FIXED (short BFP to 64)  */             \
+  V(cgdbr, CGDBR, 0xB3A9) /* type = RRF_E CONVERT TO FIXED (long BFP to 64) */ \
+  V(cgdbra, CGDBRA,                                                            \
+    0xB3A9) /* type = RRF_E CONVERT TO FIXED (long BFP to 64)  */              \
+  V(cgxbr, CGXBR,                                                              \
+    0xB3AA) /* type = RRF_E CONVERT TO FIXED (extended BFP to 64)  */          \
+  V(cgxbra, CGXBRA,                                                            \
+    0xB3AA) /* type = RRF_E CONVERT TO FIXED (extended BFP to 64)  */          \
+  V(clgebr, CLGEBR,                                                            \
+    0xB3AC) /* type = RRF_E CONVERT TO LOGICAL (short BFP to 64)  */           \
+  V(clgdbr, CLGDBR,                                                            \
+    0xB3AD) /* type = RRF_E CONVERT TO LOGICAL (long BFP to 64)  */            \
+  V(clgxbr, CLGXBR,                                                            \
+    0xB3AE) /* type = RRF_E CONVERT TO LOGICAL (extended BFP to 64)  */        \
+  V(cfer, CFER, 0xB3B8) /* type = RRF_E CONVERT TO FIXED (short HFP to 32)  */ \
+  V(cfdr, CFDR, 0xB3B9) /* type = RRF_E CONVERT TO FIXED (long HFP to 32)  */  \
+  V(cfxr, CFXR,                                                                \
+    0xB3BA) /* type = RRF_E CONVERT TO FIXED (extended HFP to 32)  */          \
+  V(cger, CGER, 0xB3C8) /* type = RRF_E CONVERT TO FIXED (short HFP to 64)  */ \
+  V(cgdr, CGDR, 0xB3C9) /* type = RRF_E CONVERT TO FIXED (long HFP to 64)  */  \
+  V(cgxr, CGXR,                                                                \
+    0xB3CA) /* type = RRF_E CONVERT TO FIXED (extended HFP to 64)  */          \
+  V(ledtr, LEDTR, 0xB3D5) /* type = RRF_E LOAD ROUNDED (long to short DFP)  */ \
+  V(fidtr, FIDTR, 0xB3D7) /* type = RRF_E LOAD FP INTEGER (long DFP)  */       \
+  V(ldxtr, LDXTR,                                                              \
+    0xB3DD) /* type = RRF_E LOAD ROUNDED (extended to long DFP)  */            \
+  V(fixtr, FIXTR, 0xB3DF) /* type = RRF_E LOAD FP INTEGER (extended DFP)  */   \
+  V(cgdtr, CGDTR, 0xB3E1) /* type = RRF_E CONVERT TO FIXED (long DFP to 64) */ \
+  V(cgdtra, CGDTRA,                                                            \
+    0xB3E1) /* type = RRF_E CONVERT TO FIXED (long DFP to 64)  */              \
+  V(cgxtr, CGXTR,                                                              \
+    0xB3E9) /* type = RRF_E CONVERT TO FIXED (extended DFP to 64)  */          \
+  V(cgxtra, CGXTRA,                                                            \
+    0xB3E9) /* type = RRF_E CONVERT TO FIXED (extended DFP to 64)  */          \
+  V(cdgtra, CDGTRA,                                                            \
+    0xB3F1) /* type = RRF_E CONVERT FROM FIXED (64 to long DFP)  */            \
+  V(cxgtra, CXGTRA,                                                            \
+    0xB3F9) /* type = RRF_E CONVERT FROM FIXED (64 to extended DFP)  */        \
+  V(cfdtr, CFDTR, 0xB941) /* type = RRF_E CONVERT TO FIXED (long DFP to 32) */ \
+  V(clgdtr, CLGDTR,                                                            \
+    0xB942) /* type = RRF_E CONVERT TO LOGICAL (long DFP to 64)  */            \
+  V(clfdtr, CLFDTR,                                                            \
+    0xB943) /* type = RRF_E CONVERT TO LOGICAL (long DFP to 32)  */            \
+  V(cfxtr, CFXTR,                                                              \
+    0xB949) /* type = RRF_E CONVERT TO FIXED (extended DFP to 32)  */          \
+  V(clgxtr, CLGXTR,                                                            \
+    0xB94A) /* type = RRF_E CONVERT TO LOGICAL (extended DFP to 64)  */        \
+  V(clfxtr, CLFXTR,                                                            \
+    0xB94B) /* type = RRF_E CONVERT TO LOGICAL (extended DFP to 32)  */        \
+  V(cdlgtr, CDLGTR,                                                            \
+    0xB952) /* type = RRF_E CONVERT FROM LOGICAL (64 to long DFP)  */          \
+  V(cdlftr, CDLFTR,                                                            \
+    0xB953) /* type = RRF_E CONVERT FROM LOGICAL (32 to long DFP)  */          \
+  V(cxlgtr, CXLGTR,                                                            \
+    0xB95A) /* type = RRF_E CONVERT FROM LOGICAL (64 to extended DFP)  */      \
+  V(cxlftr, CXLFTR,                                                            \
+    0xB95B) /* type = RRF_E CONVERT FROM LOGICAL (32 to extended DFP)  */
+
+#define S390_VRR_A_OPCODE_LIST(V)                                              \
+  V(vpopct, VPOPCT, 0xE750) /* type = VRR_A VECTOR POPULATION COUNT  */        \
+  V(vctz, VCTZ, 0xE752)     /* type = VRR_A VECTOR COUNT TRAILING ZEROS  */    \
+  V(vclz, VCLZ, 0xE753)     /* type = VRR_A VECTOR COUNT LEADING ZEROS  */     \
+  V(vlr, VLR, 0xE756)       /* type = VRR_A VECTOR LOAD  */                    \
+  V(vistr, VISTR, 0xE75C)   /* type = VRR_A VECTOR ISOLATE STRING  */          \
+  V(vseg, VSEG, 0xE75F) /* type = VRR_A VECTOR SIGN EXTEND TO DOUBLEWORD  */   \
+  V(vclgd, VCLGD,                                                              \
+    0xE7C0) /* type = VRR_A VECTOR FP CONVERT TO LOGICAL 64-BIT  */            \
+  V(vcdlg, VCDLG,                                                              \
+    0xE7C1) /* type = VRR_A VECTOR FP CONVERT FROM LOGICAL 64-BIT  */          \
+  V(vcgd, VCGD, 0xE7C2) /* type = VRR_A VECTOR FP CONVERT TO FIXED 64-BIT  */  \
+  V(vcdg, VCDG, 0xE7C3) /* type = VRR_A VECTOR FP CONVERT FROM FIXED 64-BIT */ \
+  V(vlde, VLDE, 0xE7C4) /* type = VRR_A VECTOR FP LOAD LENGTHENED  */          \
+  V(vled, VLED, 0xE7C5) /* type = VRR_A VECTOR FP LOAD ROUNDED  */             \
+  V(vfi, VFI, 0xE7C7)   /* type = VRR_A VECTOR LOAD FP INTEGER  */             \
+  V(wfk, WFK, 0xE7CA) /* type = VRR_A VECTOR FP COMPARE AND SIGNAL SCALAR  */  \
+  V(wfc, WFC, 0xE7CB) /* type = VRR_A VECTOR FP COMPARE SCALAR  */             \
+  V(vfpso, VFPSO, 0xE7CC) /* type = VRR_A VECTOR FP PERFORM SIGN OPERATION  */ \
+  V(vfsq, VFSQ, 0xE7CE)   /* type = VRR_A VECTOR FP SQUARE ROOT  */            \
+  V(vupll, VUPLL, 0xE7D4) /* type = VRR_A VECTOR UNPACK LOGICAL LOW  */        \
+  V(vuplh, VUPLH, 0xE7D5) /* type = VRR_A VECTOR UNPACK LOGICAL HIGH  */       \
+  V(vupl, VUPL, 0xE7D6)   /* type = VRR_A VECTOR UNPACK LOW  */                \
+  V(vuph, VUPH, 0xE7D7)   /* type = VRR_A VECTOR UNPACK HIGH  */               \
+  V(vtm, VTM, 0xE7D8)     /* type = VRR_A VECTOR TEST UNDER MASK  */           \
+  V(vecl, VECL, 0xE7D9)   /* type = VRR_A VECTOR ELEMENT COMPARE LOGICAL  */   \
+  V(vec, VEC, 0xE7DB)     /* type = VRR_A VECTOR ELEMENT COMPARE  */           \
+  V(vlc, VLC, 0xE7DE)     /* type = VRR_A VECTOR LOAD COMPLEMENT  */           \
+  V(vlp, VLP, 0xE7DF)     /* type = VRR_A VECTOR LOAD POSITIVE  */
+
+#define S390_VRR_B_OPCODE_LIST(V)                                           \
+  V(vfee, VFEE, 0xE780)   /* type = VRR_B VECTOR FIND ELEMENT EQUAL  */     \
+  V(vfene, VFENE, 0xE781) /* type = VRR_B VECTOR FIND ELEMENT NOT EQUAL  */ \
+  V(vfae, VFAE, 0xE782)   /* type = VRR_B VECTOR FIND ANY ELEMENT EQUAL  */ \
+  V(vpkls, VPKLS, 0xE795) /* type = VRR_B VECTOR PACK LOGICAL SATURATE  */  \
+  V(vpks, VPKS, 0xE797)   /* type = VRR_B VECTOR PACK SATURATE  */          \
+  V(vceq, VCEQ, 0xE7F8)   /* type = VRR_B VECTOR COMPARE EQUAL  */          \
+  V(vchl, VCHL, 0xE7F9)   /* type = VRR_B VECTOR COMPARE HIGH LOGICAL  */   \
+  V(vch, VCH, 0xE7FB)     /* type = VRR_B VECTOR COMPARE HIGH  */
+
+#define S390_VRR_C_OPCODE_LIST(V)                                              \
+  V(vmrl, VMRL, 0xE760)   /* type = VRR_C VECTOR MERGE LOW  */                 \
+  V(vmrh, VMRH, 0xE761)   /* type = VRR_C VECTOR MERGE HIGH  */                \
+  V(vsum, VSUM, 0xE764)   /* type = VRR_C VECTOR SUM ACROSS WORD  */           \
+  V(vsumg, VSUMG, 0xE765) /* type = VRR_C VECTOR SUM ACROSS DOUBLEWORD  */     \
+  V(vcksm, VCKSM, 0xE766) /* type = VRR_C VECTOR CHECKSUM  */                  \
+  V(vsumq, VSUMQ, 0xE767) /* type = VRR_C VECTOR SUM ACROSS QUADWORD  */       \
+  V(vn, VN, 0xE768)       /* type = VRR_C VECTOR AND  */                       \
+  V(vnc, VNC, 0xE769)     /* type = VRR_C VECTOR AND WITH COMPLEMENT  */       \
+  V(vo, VO, 0xE76A)       /* type = VRR_C VECTOR OR  */                        \
+  V(vno, VNO, 0xE76B)     /* type = VRR_C VECTOR NOR  */                       \
+  V(vx, VX, 0xE76D)       /* type = VRR_C VECTOR EXCLUSIVE OR  */              \
+  V(veslv, VESLV, 0xE770) /* type = VRR_C VECTOR ELEMENT SHIFT LEFT  */        \
+  V(verllv, VERLLV,                                                            \
+    0xE773)             /* type = VRR_C VECTOR ELEMENT ROTATE LEFT LOGICAL  */ \
+  V(vsl, VSL, 0xE774)   /* type = VRR_C VECTOR SHIFT LEFT  */                  \
+  V(vslb, VSLB, 0xE775) /* type = VRR_C VECTOR SHIFT LEFT BY BYTE  */          \
+  V(vesrlv, VESRLV,                                                            \
+    0xE778) /* type = VRR_C VECTOR ELEMENT SHIFT RIGHT LOGICAL  */             \
+  V(vesrav, VESRAV,                                                            \
+    0xE77A) /* type = VRR_C VECTOR ELEMENT SHIFT RIGHT ARITHMETIC  */          \
+  V(vsrl, VSRL, 0xE77C) /* type = VRR_C VECTOR SHIFT RIGHT LOGICAL  */         \
+  V(vsrlb, VSRLB,                                                              \
+    0xE77D)             /* type = VRR_C VECTOR SHIFT RIGHT LOGICAL BY BYTE  */ \
+  V(vsra, VSRA, 0xE77E) /* type = VRR_C VECTOR SHIFT RIGHT ARITHMETIC  */      \
+  V(vsrab, VSRAB,                                                              \
+    0xE77F) /* type = VRR_C VECTOR SHIFT RIGHT ARITHMETIC BY BYTE  */          \
+  V(vpdi, VPDI, 0xE784) /* type = VRR_C VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ \
+  V(vpk, VPK, 0xE794)   /* type = VRR_C VECTOR PACK  */                        \
+  V(vmlh, VMLH, 0xE7A1) /* type = VRR_C VECTOR MULTIPLY LOGICAL HIGH  */       \
+  V(vml, VML, 0xE7A2)   /* type = VRR_C VECTOR MULTIPLY LOW  */                \
+  V(vmh, VMH, 0xE7A3)   /* type = VRR_C VECTOR MULTIPLY HIGH  */               \
+  V(vmle, VMLE, 0xE7A4) /* type = VRR_C VECTOR MULTIPLY LOGICAL EVEN  */       \
+  V(vmlo, VMLO, 0xE7A5) /* type = VRR_C VECTOR MULTIPLY LOGICAL ODD  */        \
+  V(vme, VME, 0xE7A6)   /* type = VRR_C VECTOR MULTIPLY EVEN  */               \
+  V(vmo, VMO, 0xE7A7)   /* type = VRR_C VECTOR MULTIPLY ODD  */                \
+  V(vgfm, VGFM, 0xE7B4) /* type = VRR_C VECTOR GALOIS FIELD MULTIPLY SUM  */   \
+  V(vfs, VFS, 0xE7E2)   /* type = VRR_C VECTOR FP SUBTRACT  */                 \
+  V(vfa, VFA, 0xE7E3)   /* type = VRR_C VECTOR FP ADD  */                      \
+  V(vfd, VFD, 0xE7E5)   /* type = VRR_C VECTOR FP DIVIDE  */                   \
+  V(vfm, VFM, 0xE7E7)   /* type = VRR_C VECTOR FP MULTIPLY  */                 \
+  V(vfce, VFCE, 0xE7E8) /* type = VRR_C VECTOR FP COMPARE EQUAL  */            \
+  V(vfche, VFCHE, 0xE7EA) /* type = VRR_C VECTOR FP COMPARE HIGH OR EQUAL  */  \
+  V(vfch, VFCH, 0xE7EB)   /* type = VRR_C VECTOR FP COMPARE HIGH  */           \
+  V(vavgl, VAVGL, 0xE7F0) /* type = VRR_C VECTOR AVERAGE LOGICAL  */           \
+  V(vacc, VACC, 0xE7F1)   /* type = VRR_C VECTOR ADD COMPUTE CARRY  */         \
+  V(vavg, VAVG, 0xE7F2)   /* type = VRR_C VECTOR AVERAGE  */                   \
+  V(va, VA, 0xE7F3)       /* type = VRR_C VECTOR ADD  */                       \
+  V(vscbi, VSCBI,                                                              \
+    0xE7F5) /* type = VRR_C VECTOR SUBTRACT COMPUTE BORROW INDICATION  */      \
+  V(vs, VS, 0xE7F7)     /* type = VRR_C VECTOR SUBTRACT  */                    \
+  V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL  */             \
+  V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL  */             \
+  V(vmn, VMN, 0xE7FE)   /* type = VRR_C VECTOR MINIMUM  */                     \
+  V(vmx, VMX, 0xE7FF)   /* type = VRR_C VECTOR MAXIMUM  */
+
+#define S390_VRI_A_OPCODE_LIST(V)                                              \
+  V(vleib, VLEIB, 0xE740) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (8) */ \
+  V(vleih, VLEIH,                                                              \
+    0xE741) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (16)  */             \
+  V(vleig, VLEIG,                                                              \
+    0xE742) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (64)  */             \
+  V(vleif, VLEIF,                                                              \
+    0xE743)             /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (32)  */ \
+  V(vgbm, VGBM, 0xE744) /* type = VRI_A VECTOR GENERATE BYTE MASK  */          \
+  V(vrepi, VREPI, 0xE745) /* type = VRI_A VECTOR REPLICATE IMMEDIATE  */
+
+#define S390_VRR_D_OPCODE_LIST(V)                                              \
+  V(vstrc, VSTRC, 0xE78A) /* type = VRR_D VECTOR STRING RANGE COMPARE  */      \
+  V(vmalh, VMALH,                                                              \
+    0xE7A9) /* type = VRR_D VECTOR MULTIPLY AND ADD LOGICAL HIGH  */           \
+  V(vmal, VMAL, 0xE7AA) /* type = VRR_D VECTOR MULTIPLY AND ADD LOW  */        \
+  V(vmah, VMAH, 0xE7AB) /* type = VRR_D VECTOR MULTIPLY AND ADD HIGH  */       \
+  V(vmale, VMALE,                                                              \
+    0xE7AC) /* type = VRR_D VECTOR MULTIPLY AND ADD LOGICAL EVEN  */           \
+  V(vmalo, VMALO,                                                              \
+    0xE7AD) /* type = VRR_D VECTOR MULTIPLY AND ADD LOGICAL ODD  */            \
+  V(vmae, VMAE, 0xE7AE) /* type = VRR_D VECTOR MULTIPLY AND ADD EVEN  */       \
+  V(vmao, VMAO, 0xE7AF) /* type = VRR_D VECTOR MULTIPLY AND ADD ODD  */        \
+  V(vaccc, VACCC,                                                              \
+    0xE7B9)           /* type = VRR_D VECTOR ADD WITH CARRY COMPUTE CARRY  */  \
+  V(vac, VAC, 0xE7BB) /* type = VRR_D VECTOR ADD WITH CARRY  */                \
+  V(vgfma, VGFMA,                                                              \
+    0xE7BC) /* type = VRR_D VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ \
+  V(vsbcbi, VSBCBI, 0xE7BD) /* type = VRR_D VECTOR SUBTRACT WITH BORROW     */ \
+                            /* COMPUTE BORROW INDICATION  */                   \
+  V(vsbi, VSBI,                                                                \
+    0xE7BF) /* type = VRR_D VECTOR SUBTRACT WITH BORROW INDICATION  */
+
+#define S390_VRI_B_OPCODE_LIST(V) \
+  V(vgm, VGM, 0xE746) /* type = VRI_B VECTOR GENERATE MASK  */
+
+#define S390_VRR_E_OPCODE_LIST(V)                                             \
+  V(vperm, VPERM, 0xE78C) /* type = VRR_E VECTOR PERMUTE  */                  \
+  V(vsel, VSEL, 0xE78D)   /* type = VRR_E VECTOR SELECT  */                   \
+  V(vfms, VFMS, 0xE78E)   /* type = VRR_E VECTOR FP MULTIPLY AND SUBTRACT  */ \
+  V(vfma, VFMA, 0xE78F)   /* type = VRR_E VECTOR FP MULTIPLY AND ADD  */
+
+#define S390_VRI_C_OPCODE_LIST(V) \
+  V(vrep, VREP, 0xE74D) /* type = VRI_C VECTOR REPLICATE  */
+
+#define S390_VRI_D_OPCODE_LIST(V)                                           \
+  V(verim, VERIM,                                                           \
+    0xE772) /* type = VRI_D VECTOR ELEMENT ROTATE AND INSERT UNDER MASK  */ \
+  V(vsldb, VSLDB, 0xE777) /* type = VRI_D VECTOR SHIFT LEFT DOUBLE BY BYTE  */
+
+#define S390_VRR_F_OPCODE_LIST(V) \
+  V(vlvgp, VLVGP, 0xE762) /* type = VRR_F VECTOR LOAD VR FROM GRS DISJOINT  */
+
+#define S390_RIS_OPCODE_LIST(V)                                                \
+  V(cgib, CGIB,                                                                \
+    0xECFC) /* type = RIS   COMPARE IMMEDIATE AND BRANCH (64<-8)  */           \
+  V(clgib, CLGIB,                                                              \
+    0xECFD) /* type = RIS   COMPARE LOGICAL IMMEDIATE AND BRANCH (64<-8)  */   \
+  V(cib, CIB, 0xECFE) /* type = RIS   COMPARE IMMEDIATE AND BRANCH (32<-8)  */ \
+  V(clib, CLIB,                                                                \
+    0xECFF) /* type = RIS   COMPARE LOGICAL IMMEDIATE AND BRANCH (32<-8)  */
+
+#define S390_VRI_E_OPCODE_LIST(V) \
+  V(vftci, VFTCI,                 \
+    0xE74A) /* type = VRI_E VECTOR FP TEST DATA CLASS IMMEDIATE  */
+
+#define S390_RSL_A_OPCODE_LIST(V) \
+  V(tp, TP, 0xEBC0) /* type = RSL_A TEST DECIMAL  */
+
+#define S390_RSL_B_OPCODE_LIST(V)                                             \
+  V(cpdt, CPDT, 0xEDAC) /* type = RSL_B CONVERT TO PACKED (from long DFP)  */ \
+  V(cpxt, CPXT,                                                               \
+    0xEDAD) /* type = RSL_B CONVERT TO PACKED (from extended DFP)  */         \
+  V(cdpt, CDPT, 0xEDAE) /* type = RSL_B CONVERT FROM PACKED (to long DFP)  */ \
+  V(cxpt, CXPT,                                                               \
+    0xEDAF) /* type = RSL_B CONVERT FROM PACKED (to extended DFP)  */
+
+#define S390_SI_OPCODE_LIST(V)                                          \
+  V(tm, TM, 0x91)       /* type = SI    TEST UNDER MASK  */             \
+  V(mvi, MVI, 0x92)     /* type = SI    MOVE (immediate)  */            \
+  V(ni, NI, 0x94)       /* type = SI    AND (immediate)  */             \
+  V(cli, CLI, 0x95)     /* type = SI    COMPARE LOGICAL (immediate)  */ \
+  V(oi, OI, 0x96)       /* type = SI    OR (immediate)  */              \
+  V(xi, XI, 0x97)       /* type = SI    EXCLUSIVE OR (immediate)  */    \
+  V(stnsm, STNSM, 0xAC) /* type = SI    STORE THEN AND SYSTEM MASK  */  \
+  V(stosm, STOSM, 0xAD) /* type = SI    STORE THEN OR SYSTEM MASK  */   \
+  V(mc, MC, 0xAF)       /* type = SI    MONITOR CALL  */
+
+#define S390_SIL_OPCODE_LIST(V)                                                \
+  V(mvhhi, MVHHI, 0xE544) /* type = SIL   MOVE (16<-16)  */                    \
+  V(mvghi, MVGHI, 0xE548) /* type = SIL   MOVE (64<-16)  */                    \
+  V(mvhi, MVHI, 0xE54C)   /* type = SIL   MOVE (32<-16)  */                    \
+  V(chhsi, CHHSI,                                                              \
+    0xE554) /* type = SIL   COMPARE HALFWORD IMMEDIATE (16<-16)  */            \
+  V(clhhsi, CLHHSI,                                                            \
+    0xE555) /* type = SIL   COMPARE LOGICAL IMMEDIATE (16<-16)  */             \
+  V(cghsi, CGHSI,                                                              \
+    0xE558) /* type = SIL   COMPARE HALFWORD IMMEDIATE (64<-16)  */            \
+  V(clghsi, CLGHSI,                                                            \
+    0xE559)             /* type = SIL   COMPARE LOGICAL IMMEDIATE (64<-16)  */ \
+  V(chsi, CHSI, 0xE55C) /* type = SIL   COMPARE HALFWORD IMMEDIATE (32<-16) */ \
+  V(clfhsi, CLFHSI,                                                            \
+    0xE55D) /* type = SIL   COMPARE LOGICAL IMMEDIATE (32<-16)  */             \
+  V(tbegin, TBEGIN,                                                            \
+    0xE560) /* type = SIL   TRANSACTION BEGIN (nonconstrained)  */             \
+  V(tbeginc, TBEGINC,                                                          \
+    0xE561) /* type = SIL   TRANSACTION BEGIN (constrained)  */
+
+#define S390_VRS_A_OPCODE_LIST(V)                                            \
+  V(vesl, VESL, 0xE730) /* type = VRS_A VECTOR ELEMENT SHIFT LEFT  */        \
+  V(verll, VERLL,                                                            \
+    0xE733)           /* type = VRS_A VECTOR ELEMENT ROTATE LEFT LOGICAL  */ \
+  V(vlm, VLM, 0xE736) /* type = VRS_A VECTOR LOAD MULTIPLE  */               \
+  V(vesrl, VESRL,                                                            \
+    0xE738) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT LOGICAL  */           \
+  V(vesra, VESRA,                                                            \
+    0xE73A) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT ARITHMETIC  */        \
+  V(vstm, VSTM, 0xE73E) /* type = VRS_A VECTOR STORE MULTIPLE  */
+
+#define S390_RIL_A_OPCODE_LIST(V)                                              \
+  V(lgfi, LGFI, 0xC01)   /* type = RIL_A LOAD IMMEDIATE (64<-32)  */           \
+  V(xihf, XIHF, 0xC06)   /* type = RIL_A EXCLUSIVE OR IMMEDIATE (high)  */     \
+  V(xilf, XILF, 0xC07)   /* type = RIL_A EXCLUSIVE OR IMMEDIATE (low)  */      \
+  V(iihf, IIHF, 0xC08)   /* type = RIL_A INSERT IMMEDIATE (high)  */           \
+  V(iilf, IILF, 0xC09)   /* type = RIL_A INSERT IMMEDIATE (low)  */            \
+  V(nihf, NIHF, 0xC0A)   /* type = RIL_A AND IMMEDIATE (high)  */              \
+  V(nilf, NILF, 0xC0B)   /* type = RIL_A AND IMMEDIATE (low)  */               \
+  V(oihf, OIHF, 0xC0C)   /* type = RIL_A OR IMMEDIATE (high)  */               \
+  V(oilf, OILF, 0xC0D)   /* type = RIL_A OR IMMEDIATE (low)  */                \
+  V(llihf, LLIHF, 0xC0E) /* type = RIL_A LOAD LOGICAL IMMEDIATE (high)  */     \
+  V(llilf, LLILF, 0xC0F) /* type = RIL_A LOAD LOGICAL IMMEDIATE (low)  */      \
+  V(msgfi, MSGFI, 0xC20) /* type = RIL_A MULTIPLY SINGLE IMMEDIATE (64<-32) */ \
+  V(msfi, MSFI, 0xC21)   /* type = RIL_A MULTIPLY SINGLE IMMEDIATE (32)  */    \
+  V(slgfi, SLGFI,                                                              \
+    0xC24)             /* type = RIL_A SUBTRACT LOGICAL IMMEDIATE (64<-32)  */ \
+  V(slfi, SLFI, 0xC25) /* type = RIL_A SUBTRACT LOGICAL IMMEDIATE (32)  */     \
+  V(agfi, AGFI, 0xC28) /* type = RIL_A ADD IMMEDIATE (64<-32)  */              \
+  V(afi, AFI, 0xC29)   /* type = RIL_A ADD IMMEDIATE (32)  */                  \
+  V(algfi, ALGFI, 0xC2A) /* type = RIL_A ADD LOGICAL IMMEDIATE (64<-32)  */    \
+  V(alfi, ALFI, 0xC2B)   /* type = RIL_A ADD LOGICAL IMMEDIATE (32)  */        \
+  V(cgfi, CGFI, 0xC2C)   /* type = RIL_A COMPARE IMMEDIATE (64<-32)  */        \
+  V(cfi, CFI, 0xC2D)     /* type = RIL_A COMPARE IMMEDIATE (32)  */            \
+  V(clgfi, CLGFI, 0xC2E) /* type = RIL_A COMPARE LOGICAL IMMEDIATE (64<-32) */ \
+  V(clfi, CLFI, 0xC2F)   /* type = RIL_A COMPARE LOGICAL IMMEDIATE (32)  */    \
+  V(aih, AIH, 0xCC8)     /* type = RIL_A ADD IMMEDIATE HIGH (32)  */           \
+  V(alsih, ALSIH,                                                              \
+    0xCCA) /* type = RIL_A ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32)  */     \
+  V(alsihn, ALSIHN,                                                            \
+    0xCCB) /* type = RIL_A ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32)  */     \
+  V(cih, CIH, 0xCCD)   /* type = RIL_A COMPARE IMMEDIATE HIGH (32)  */         \
+  V(clih, CLIH, 0xCCF) /* type = RIL_A COMPARE LOGICAL IMMEDIATE HIGH (32)  */
+
+#define S390_RIL_B_OPCODE_LIST(V)                                              \
+  V(larl, LARL, 0xC00)   /* type = RIL_B LOAD ADDRESS RELATIVE LONG  */        \
+  V(brasl, BRASL, 0xC05) /* type = RIL_B BRANCH RELATIVE AND SAVE LONG  */     \
+  V(llhrl, LLHRL,                                                              \
+    0xC42) /* type = RIL_B LOAD LOGICAL HALFWORD RELATIVE LONG (32<-16)  */    \
+  V(lghrl, LGHRL,                                                              \
+    0xC44) /* type = RIL_B LOAD HALFWORD RELATIVE LONG (64<-16)  */            \
+  V(lhrl, LHRL, 0xC45) /* type = RIL_B LOAD HALFWORD RELATIVE LONG (32<-16) */ \
+  V(llghrl, LLGHRL,                                                            \
+    0xC46) /* type = RIL_B LOAD LOGICAL HALFWORD RELATIVE LONG (64<-16)  */    \
+  V(sthrl, STHRL, 0xC47) /* type = RIL_B STORE HALFWORD RELATIVE LONG (16)  */ \
+  V(lgrl, LGRL, 0xC48)   /* type = RIL_B LOAD RELATIVE LONG (64)  */           \
+  V(stgrl, STGRL, 0xC4B) /* type = RIL_B STORE RELATIVE LONG (64)  */          \
+  V(lgfrl, LGFRL, 0xC4C) /* type = RIL_B LOAD RELATIVE LONG (64<-32)  */       \
+  V(lrl, LRL, 0xC4D)     /* type = RIL_B LOAD RELATIVE LONG (32)  */           \
+  V(llgfrl, LLGFRL,                                                            \
+    0xC4E)             /* type = RIL_B LOAD LOGICAL RELATIVE LONG (64<-32)  */ \
+  V(strl, STRL, 0xC4F) /* type = RIL_B STORE RELATIVE LONG (32)  */            \
+  V(exrl, EXRL, 0xC60) /* type = RIL_B EXECUTE RELATIVE LONG  */               \
+  V(cghrl, CGHRL,                                                              \
+    0xC64) /* type = RIL_B COMPARE HALFWORD RELATIVE LONG (64<-16)  */         \
+  V(chrl, CHRL,                                                                \
+    0xC65) /* type = RIL_B COMPARE HALFWORD RELATIVE LONG (32<-16)  */         \
+  V(clghrl, CLGHRL,                                                            \
+    0xC66) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (64<-16)  */          \
+  V(clhrl, CLHRL,                                                              \
+    0xC67) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (32<-16)  */          \
+  V(cgrl, CGRL, 0xC68)   /* type = RIL_B COMPARE RELATIVE LONG (64)  */        \
+  V(clgrl, CLGRL, 0xC6A) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (64) */ \
+  V(cgfrl, CGFRL, 0xC6C) /* type = RIL_B COMPARE RELATIVE LONG (64<-32)  */    \
+  V(crl, CRL, 0xC6D)     /* type = RIL_B COMPARE RELATIVE LONG (32)  */        \
+  V(clgfrl, CLGFRL,                                                            \
+    0xC6E) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (64<-32)  */          \
+  V(clrl, CLRL, 0xC6F) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (32)  */  \
+  V(brcth, BRCTH, 0xCC6) /* type = RIL_B BRANCH RELATIVE ON COUNT HIGH (32) */
+
+#define S390_VRS_B_OPCODE_LIST(V)                                          \
+  V(vlvg, VLVG, 0xE722) /* type = VRS_B VECTOR LOAD VR ELEMENT FROM GR  */ \
+  V(vll, VLL, 0xE737)   /* type = VRS_B VECTOR LOAD WITH LENGTH  */        \
+  V(vstl, VSTL, 0xE73F) /* type = VRS_B VECTOR STORE WITH LENGTH  */
+
+#define S390_RIL_C_OPCODE_LIST(V)                                              \
+  V(brcl, BRCL, 0xC04)   /* type = RIL_C BRANCH RELATIVE ON CONDITION LONG  */ \
+  V(pfdrl, PFDRL, 0xC62) /* type = RIL_C PREFETCH DATA RELATIVE LONG  */
+
+#define S390_VRS_C_OPCODE_LIST(V) \
+  V(vlgv, VLGV, 0xE721) /* type = VRS_C VECTOR LOAD GR FROM VR ELEMENT  */
+
+#define S390_RI_A_OPCODE_LIST(V)                                               \
+  V(iihh, IIHH, 0xA50)   /* type = RI_A  INSERT IMMEDIATE (high high)  */      \
+  V(iihl, IIHL, 0xA51)   /* type = RI_A  INSERT IMMEDIATE (high low)  */       \
+  V(iilh, IILH, 0xA52)   /* type = RI_A  INSERT IMMEDIATE (low high)  */       \
+  V(iill, IILL, 0xA53)   /* type = RI_A  INSERT IMMEDIATE (low low)  */        \
+  V(nihh, NIHH, 0xA54)   /* type = RI_A  AND IMMEDIATE (high high)  */         \
+  V(nihl, NIHL, 0xA55)   /* type = RI_A  AND IMMEDIATE (high low)  */          \
+  V(nilh, NILH, 0xA56)   /* type = RI_A  AND IMMEDIATE (low high)  */          \
+  V(nill, NILL, 0xA57)   /* type = RI_A  AND IMMEDIATE (low low)  */           \
+  V(oihh, OIHH, 0xA58)   /* type = RI_A  OR IMMEDIATE (high high)  */          \
+  V(oihl, OIHL, 0xA59)   /* type = RI_A  OR IMMEDIATE (high low)  */           \
+  V(oilh, OILH, 0xA5A)   /* type = RI_A  OR IMMEDIATE (low high)  */           \
+  V(oill, OILL, 0xA5B)   /* type = RI_A  OR IMMEDIATE (low low)  */            \
+  V(llihh, LLIHH, 0xA5C) /* type = RI_A  LOAD LOGICAL IMMEDIATE (high high) */ \
+  V(llihl, LLIHL, 0xA5D) /* type = RI_A  LOAD LOGICAL IMMEDIATE (high low)  */ \
+  V(llilh, LLILH, 0xA5E) /* type = RI_A  LOAD LOGICAL IMMEDIATE (low high)  */ \
+  V(llill, LLILL, 0xA5F) /* type = RI_A  LOAD LOGICAL IMMEDIATE (low low)  */  \
+  V(tmlh, TMLH, 0xA70)   /* type = RI_A  TEST UNDER MASK (low high)  */        \
+  V(tmh, TMH, 0xA70)     /* type = RI_A  TEST UNDER MASK HIGH  */              \
+  V(tmll, TMLL, 0xA71)   /* type = RI_A  TEST UNDER MASK (low low)  */         \
+  V(tml, TML, 0xA71)     /* type = RI_A  TEST UNDER MASK LOW  */               \
+  V(tmhh, TMHH, 0xA72)   /* type = RI_A  TEST UNDER MASK (high high)  */       \
+  V(tmhl, TMHL, 0xA73)   /* type = RI_A  TEST UNDER MASK (high low)  */        \
+  V(lhi, LHI, 0xA78)     /* type = RI_A  LOAD HALFWORD IMMEDIATE (32)<-16  */  \
+  V(lghi, LGHI, 0xA79)   /* type = RI_A  LOAD HALFWORD IMMEDIATE (64<-16)  */  \
+  V(ahi, AHI, 0xA7A)     /* type = RI_A  ADD HALFWORD IMMEDIATE (32<-16)  */   \
+  V(aghi, AGHI, 0xA7B)   /* type = RI_A  ADD HALFWORD IMMEDIATE (64<-16)  */   \
+  V(mhi, MHI, 0xA7C) /* type = RI_A  MULTIPLY HALFWORD IMMEDIATE (32<-16)  */  \
+  V(mghi, MGHI, 0xA7D) /* type = RI_A  MULTIPLY HALFWORD IMMEDIATE (64<-16) */ \
+  V(chi, CHI, 0xA7E)   /* type = RI_A  COMPARE HALFWORD IMMEDIATE (32<-16)  */ \
+  V(cghi, CGHI, 0xA7F) /* type = RI_A  COMPARE HALFWORD IMMEDIATE (64<-16)  */
+
+#define S390_RSI_OPCODE_LIST(V)                                              \
+  V(brxh, BRXH, 0x84) /* type = RSI   BRANCH RELATIVE ON INDEX HIGH (32)  */ \
+  V(brxle, BRXLE,                                                            \
+    0x85) /* type = RSI   BRANCH RELATIVE ON INDEX LOW OR EQ. (32)  */
+
+#define S390_RI_B_OPCODE_LIST(V)                                           \
+  V(bras, BRAS, 0xA75)   /* type = RI_B  BRANCH RELATIVE AND SAVE  */      \
+  V(brct, BRCT, 0xA76)   /* type = RI_B  BRANCH RELATIVE ON COUNT (32)  */ \
+  V(brctg, BRCTG, 0xA77) /* type = RI_B  BRANCH RELATIVE ON COUNT (64)  */
+
+#define S390_RI_C_OPCODE_LIST(V) \
+  V(brc, BRC, 0xA74) /* type = RI_C BRANCH RELATIVE ON CONDITION  */
+
+#define S390_RSL_OPCODE_LIST(V)                                                \
+  V(czdt, CZDT, 0xEDA8) /* type = RSL CONVERT TO ZONED (from long DFP)  */     \
+  V(czxt, CZXT, 0xEDA9) /* type = RSL CONVERT TO ZONED (from extended DFP)  */ \
+  V(cdzt, CDZT, 0xEDAA) /* type = RSL CONVERT FROM ZONED (to long DFP)  */     \
+  V(cxzt, CXZT, 0xEDAB) /* type = RSL CONVERT FROM ZONED (to extended DFP) */
+
+#define S390_SMI_OPCODE_LIST(V) \
+  V(bpp, BPP, 0xC7) /* type = SMI   BRANCH PREDICTION PRELOAD  */
+
+#define S390_RXY_A_OPCODE_LIST(V)                                              \
+  V(ltg, LTG, 0xE302)   /* type = RXY_A LOAD AND TEST (64)  */                 \
+  V(lrag, LRAG, 0xE303) /* type = RXY_A LOAD REAL ADDRESS (64)  */             \
+  V(lg, LG, 0xE304)     /* type = RXY_A LOAD (64)  */                          \
+  V(cvby, CVBY, 0xE306) /* type = RXY_A CONVERT TO BINARY (32)  */             \
+  V(ag, AG, 0xE308)     /* type = RXY_A ADD (64)  */                           \
+  V(sg, SG, 0xE309)     /* type = RXY_A SUBTRACT (64)  */                      \
+  V(alg, ALG, 0xE30A)   /* type = RXY_A ADD LOGICAL (64)  */                   \
+  V(slg, SLG, 0xE30B)   /* type = RXY_A SUBTRACT LOGICAL (64)  */              \
+  V(msg, MSG, 0xE30C)   /* type = RXY_A MULTIPLY SINGLE (64)  */               \
+  V(dsg, DSG, 0xE30D)   /* type = RXY_A DIVIDE SINGLE (64)  */                 \
+  V(cvbg, CVBG, 0xE30E) /* type = RXY_A CONVERT TO BINARY (64)  */             \
+  V(lrvg, LRVG, 0xE30F) /* type = RXY_A LOAD REVERSED (64)  */                 \
+  V(lt_z, LT, 0xE312)   /* type = RXY_A LOAD AND TEST (32)  */                 \
+  V(lray, LRAY, 0xE313) /* type = RXY_A LOAD REAL ADDRESS (32)  */             \
+  V(lgf, LGF, 0xE314)   /* type = RXY_A LOAD (64<-32)  */                      \
+  V(lgh, LGH, 0xE315)   /* type = RXY_A LOAD HALFWORD (64<-16)  */             \
+  V(llgf, LLGF, 0xE316) /* type = RXY_A LOAD LOGICAL (64<-32)  */              \
+  V(llgt, LLGT,                                                                \
+    0xE317) /* type = RXY_A LOAD LOGICAL THIRTY ONE BITS (64<-31)  */          \
+  V(agf, AGF, 0xE318)     /* type = RXY_A ADD (64<-32)  */                     \
+  V(sgf, SGF, 0xE319)     /* type = RXY_A SUBTRACT (64<-32)  */                \
+  V(algf, ALGF, 0xE31A)   /* type = RXY_A ADD LOGICAL (64<-32)  */             \
+  V(slgf, SLGF, 0xE31B)   /* type = RXY_A SUBTRACT LOGICAL (64<-32)  */        \
+  V(msgf, MSGF, 0xE31C)   /* type = RXY_A MULTIPLY SINGLE (64<-32)  */         \
+  V(dsgf, DSGF, 0xE31D)   /* type = RXY_A DIVIDE SINGLE (64<-32)  */           \
+  V(lrv, LRV, 0xE31E)     /* type = RXY_A LOAD REVERSED (32)  */               \
+  V(lrvh, LRVH, 0xE31F)   /* type = RXY_A LOAD REVERSED (16)  */               \
+  V(cg, CG, 0xE320)       /* type = RXY_A COMPARE (64)  */                     \
+  V(clg, CLG, 0xE321)     /* type = RXY_A COMPARE LOGICAL (64)  */             \
+  V(stg, STG, 0xE324)     /* type = RXY_A STORE (64)  */                       \
+  V(ntstg, NTSTG, 0xE325) /* type = RXY_A NONTRANSACTIONAL STORE (64)  */      \
+  V(cvdy, CVDY, 0xE326)   /* type = RXY_A CONVERT TO DECIMAL (32)  */          \
+  V(lzrg, LZRG, 0xE32A) /* type = RXY_A LOAD AND ZERO RIGHTMOST BYTE (64)  */  \
+  V(cvdg, CVDG, 0xE32E) /* type = RXY_A CONVERT TO DECIMAL (64)  */            \
+  V(strvg, STRVG, 0xE32F) /* type = RXY_A STORE REVERSED (64)  */              \
+  V(cgf, CGF, 0xE330)     /* type = RXY_A COMPARE (64<-32)  */                 \
+  V(clgf, CLGF, 0xE331)   /* type = RXY_A COMPARE LOGICAL (64<-32)  */         \
+  V(ltgf, LTGF, 0xE332)   /* type = RXY_A LOAD AND TEST (64<-32)  */           \
+  V(cgh, CGH, 0xE334)     /* type = RXY_A COMPARE HALFWORD (64<-16)  */        \
+  V(llzrgf, LLZRGF,                                                            \
+    0xE33A) /* type = RXY_A LOAD LOGICAL AND ZERO RIGHTMOST BYTE (64<-32)  */  \
+  V(lzrf, LZRF, 0xE33B) /* type = RXY_A LOAD AND ZERO RIGHTMOST BYTE (32)  */  \
+  V(strv, STRV, 0xE33E) /* type = RXY_A STORE REVERSED (32)  */                \
+  V(strvh, STRVH, 0xE33F) /* type = RXY_A STORE REVERSED (16)  */              \
+  V(bctg, BCTG, 0xE346)   /* type = RXY_A BRANCH ON COUNT (64)  */             \
+  V(sty, STY, 0xE350)     /* type = RXY_A STORE (32)  */                       \
+  V(msy, MSY, 0xE351)     /* type = RXY_A MULTIPLY SINGLE (32)  */             \
+  V(ny, NY, 0xE354)       /* type = RXY_A AND (32)  */                         \
+  V(cly, CLY, 0xE355)     /* type = RXY_A COMPARE LOGICAL (32)  */             \
+  V(oy, OY, 0xE356)       /* type = RXY_A OR (32)  */                          \
+  V(xy, XY, 0xE357)       /* type = RXY_A EXCLUSIVE OR (32)  */                \
+  V(ly, LY, 0xE358)       /* type = RXY_A LOAD (32)  */                        \
+  V(cy, CY, 0xE359)       /* type = RXY_A COMPARE (32)  */                     \
+  V(ay, AY, 0xE35A)       /* type = RXY_A ADD (32)  */                         \
+  V(sy, SY, 0xE35B)       /* type = RXY_A SUBTRACT (32)  */                    \
+  V(mfy, MFY, 0xE35C)     /* type = RXY_A MULTIPLY (64<-32)  */                \
+  V(aly, ALY, 0xE35E)     /* type = RXY_A ADD LOGICAL (32)  */                 \
+  V(sly, SLY, 0xE35F)     /* type = RXY_A SUBTRACT LOGICAL (32)  */            \
+  V(sthy, STHY, 0xE370)   /* type = RXY_A STORE HALFWORD (16)  */              \
+  V(lay, LAY, 0xE371)     /* type = RXY_A LOAD ADDRESS  */                     \
+  V(stcy, STCY, 0xE372)   /* type = RXY_A STORE CHARACTER  */                  \
+  V(icy, ICY, 0xE373)     /* type = RXY_A INSERT CHARACTER  */                 \
+  V(laey, LAEY, 0xE375)   /* type = RXY_A LOAD ADDRESS EXTENDED  */            \
+  V(lb, LB, 0xE376)       /* type = RXY_A LOAD BYTE (32<-8)  */                \
+  V(lgb, LGB, 0xE377)     /* type = RXY_A LOAD BYTE (64<-8)  */                \
+  V(lhy, LHY, 0xE378)     /* type = RXY_A LOAD HALFWORD (32)<-16  */           \
+  V(chy, CHY, 0xE379)     /* type = RXY_A COMPARE HALFWORD (32<-16)  */        \
+  V(ahy, AHY, 0xE37A)     /* type = RXY_A ADD HALFWORD (32<-16)  */            \
+  V(shy, SHY, 0xE37B)     /* type = RXY_A SUBTRACT HALFWORD (32<-16)  */       \
+  V(mhy, MHY, 0xE37C)     /* type = RXY_A MULTIPLY HALFWORD (32<-16)  */       \
+  V(ng, NG, 0xE380)       /* type = RXY_A AND (64)  */                         \
+  V(og, OG, 0xE381)       /* type = RXY_A OR (64)  */                          \
+  V(xg, XG, 0xE382)       /* type = RXY_A EXCLUSIVE OR (64)  */                \
+  V(lgat, LGAT, 0xE385)   /* type = RXY_A LOAD AND TRAP (64)  */               \
+  V(mlg, MLG, 0xE386)     /* type = RXY_A MULTIPLY LOGICAL (128<-64)  */       \
+  V(dlg, DLG, 0xE387)     /* type = RXY_A DIVIDE LOGICAL (64<-128)  */         \
+  V(alcg, ALCG, 0xE388)   /* type = RXY_A ADD LOGICAL WITH CARRY (64)  */      \
+  V(slbg, SLBG, 0xE389) /* type = RXY_A SUBTRACT LOGICAL WITH BORROW (64)  */  \
+  V(stpq, STPQ, 0xE38E) /* type = RXY_A STORE PAIR TO QUADWORD  */             \
+  V(lpq, LPQ, 0xE38F) /* type = RXY_A LOAD PAIR FROM QUADWORD (64&64<-128)  */ \
+  V(llgc, LLGC, 0xE390) /* type = RXY_A LOAD LOGICAL CHARACTER (64<-8)  */     \
+  V(llgh, LLGH, 0xE391) /* type = RXY_A LOAD LOGICAL HALFWORD (64<-16)  */     \
+  V(llc, LLC, 0xE394)   /* type = RXY_A LOAD LOGICAL CHARACTER (32<-8)  */     \
+  V(llh, LLH, 0xE395)   /* type = RXY_A LOAD LOGICAL HALFWORD (32<-16)  */     \
+  V(ml, ML, 0xE396)     /* type = RXY_A MULTIPLY LOGICAL (64<-32)  */          \
+  V(dl, DL, 0xE397)     /* type = RXY_A DIVIDE LOGICAL (32<-64)  */            \
+  V(alc, ALC, 0xE398)   /* type = RXY_A ADD LOGICAL WITH CARRY (32)  */        \
+  V(slb, SLB, 0xE399)   /* type = RXY_A SUBTRACT LOGICAL WITH BORROW (32)  */  \
+  V(llgtat, LLGTAT,                                                            \
+    0xE39C) /* type = RXY_A LOAD LOGICAL THIRTY ONE BITS AND TRAP (64<-31)  */ \
+  V(llgfat, LLGFAT, 0xE39D) /* type = RXY_A LOAD LOGICAL AND TRAP (64<-32)  */ \
+  V(lat, LAT, 0xE39F)       /* type = RXY_A LOAD AND TRAP (32L<-32)  */        \
+  V(lbh, LBH, 0xE3C0)       /* type = RXY_A LOAD BYTE HIGH (32<-8)  */         \
+  V(llch, LLCH, 0xE3C2) /* type = RXY_A LOAD LOGICAL CHARACTER HIGH (32<-8) */ \
+  V(stch, STCH, 0xE3C3) /* type = RXY_A STORE CHARACTER HIGH (8)  */           \
+  V(lhh, LHH, 0xE3C4)   /* type = RXY_A LOAD HALFWORD HIGH (32<-16)  */        \
+  V(llhh, LLHH, 0xE3C6) /* type = RXY_A LOAD LOGICAL HALFWORD HIGH (32<-16) */ \
+  V(sthh, STHH, 0xE3C7) /* type = RXY_A STORE HALFWORD HIGH (16)  */           \
+  V(lfhat, LFHAT, 0xE3C8) /* type = RXY_A LOAD HIGH AND TRAP (32H<-32)  */     \
+  V(lfh, LFH, 0xE3CA)     /* type = RXY_A LOAD HIGH (32)  */                   \
+  V(stfh, STFH, 0xE3CB)   /* type = RXY_A STORE HIGH (32)  */                  \
+  V(chf, CHF, 0xE3CD)     /* type = RXY_A COMPARE HIGH (32)  */                \
+  V(clhf, CLHF, 0xE3CF)   /* type = RXY_A COMPARE LOGICAL HIGH (32)  */        \
+  V(ley, LEY, 0xED64)     /* type = RXY_A LOAD (short)  */                     \
+  V(ldy, LDY, 0xED65)     /* type = RXY_A LOAD (long)  */                      \
+  V(stey, STEY, 0xED66)   /* type = RXY_A STORE (short)  */                    \
+  V(stdy, STDY, 0xED67)   /* type = RXY_A STORE (long)  */
+
+#define S390_RXY_B_OPCODE_LIST(V) \
+  V(pfd, PFD, 0xE336) /* type = RXY_B PREFETCH DATA  */
+
+#define S390_SIY_OPCODE_LIST(V)                                           \
+  V(tmy, TMY, 0xEB51)   /* type = SIY   TEST UNDER MASK  */               \
+  V(mviy, MVIY, 0xEB52) /* type = SIY   MOVE (immediate)  */              \
+  V(niy, NIY, 0xEB54)   /* type = SIY   AND (immediate)  */               \
+  V(cliy, CLIY, 0xEB55) /* type = SIY   COMPARE LOGICAL (immediate)  */   \
+  V(oiy, OIY, 0xEB56)   /* type = SIY   OR (immediate)  */                \
+  V(xiy, XIY, 0xEB57)   /* type = SIY   EXCLUSIVE OR (immediate)  */      \
+  V(asi, ASI, 0xEB6A)   /* type = SIY   ADD IMMEDIATE (32<-8)  */         \
+  V(alsi, ALSI,                                                           \
+    0xEB6E) /* type = SIY   ADD LOGICAL WITH SIGNED IMMEDIATE (32<-8)  */ \
+  V(agsi, AGSI, 0xEB7A) /* type = SIY   ADD IMMEDIATE (64<-8)  */         \
+  V(algsi, ALGSI,                                                         \
+    0xEB7E) /* type = SIY   ADD LOGICAL WITH SIGNED IMMEDIATE (64<-8)  */
+
+#define S390_SS_A_OPCODE_LIST(V)                                        \
+  V(trtr, TRTR, 0xD0)   /* type = SS_A  TRANSLATE AND TEST REVERSE  */  \
+  V(mvn, MVN, 0xD1)     /* type = SS_A  MOVE NUMERICS  */               \
+  V(mvc, MVC, 0xD2)     /* type = SS_A  MOVE (character)  */            \
+  V(mvz, MVZ, 0xD3)     /* type = SS_A  MOVE ZONES  */                  \
+  V(nc, NC, 0xD4)       /* type = SS_A  AND (character)  */             \
+  V(clc, CLC, 0xD5)     /* type = SS_A  COMPARE LOGICAL (character)  */ \
+  V(oc, OC, 0xD6)       /* type = SS_A  OR (character)  */              \
+  V(xc, XC, 0xD7)       /* type = SS_A  EXCLUSIVE OR (character)  */    \
+  V(tr, TR, 0xDC)       /* type = SS_A  TRANSLATE  */                   \
+  V(trt, TRT, 0xDD)     /* type = SS_A  TRANSLATE AND TEST  */          \
+  V(ed, ED, 0xDE)       /* type = SS_A  EDIT  */                        \
+  V(edmk, EDMK, 0xDF)   /* type = SS_A  EDIT AND MARK  */               \
+  V(unpku, UNPKU, 0xE2) /* type = SS_A  UNPACK UNICODE  */              \
+  V(mvcin, MVCIN, 0xE8) /* type = SS_A  MOVE INVERSE  */                \
+  V(unpka, UNPKA, 0xEA) /* type = SS_A  UNPACK ASCII  */
+
+#define S390_E_OPCODE_LIST(V)                                                  \
+  V(pr, PR, 0x0101)       /* type = E     PROGRAM RETURN  */                   \
+  V(upt, UPT, 0x0102)     /* type = E     UPDATE TREE  */                      \
+  V(ptff, PTFF, 0x0104)   /* type = E     PERFORM TIMING FACILITY FUNCTION  */ \
+  V(sckpf, SCKPF, 0x0107) /* type = E     SET CLOCK PROGRAMMABLE FIELD  */     \
+  V(pfpo, PFPO, 0x010A)   /* type = E     PERFORM FLOATING-POINT OPERATION  */ \
+  V(tam, TAM, 0x010B)     /* type = E     TEST ADDRESSING MODE  */             \
+  V(sam24, SAM24, 0x010C) /* type = E     SET ADDRESSING MODE (24)  */         \
+  V(sam31, SAM31, 0x010D) /* type = E     SET ADDRESSING MODE (31)  */         \
+  V(sam64, SAM64, 0x010E) /* type = E     SET ADDRESSING MODE (64)  */         \
+  V(trap2, TRAP2, 0x01FF) /* type = E     TRAP  */
+
+#define S390_SS_B_OPCODE_LIST(V)                           \
+  V(mvo, MVO, 0xF1)   /* type = SS_B  MOVE WITH OFFSET  */ \
+  V(pack, PACK, 0xF2) /* type = SS_B  PACK  */             \
+  V(unpk, UNPK, 0xF3) /* type = SS_B  UNPACK  */           \
+  V(zap, ZAP, 0xF8)   /* type = SS_B  ZERO AND ADD  */     \
+  V(cp, CP, 0xF9)     /* type = SS_B  COMPARE DECIMAL  */  \
+  V(ap, AP, 0xFA)     /* type = SS_B  ADD DECIMAL  */      \
+  V(sp, SP, 0xFB)     /* type = SS_B  SUBTRACT DECIMAL  */ \
+  V(mp, MP, 0xFC)     /* type = SS_B  MULTIPLY DECIMAL  */ \
+  V(dp, DP, 0xFD)     /* type = SS_B  DIVIDE DECIMAL  */
+
+#define S390_SS_C_OPCODE_LIST(V) \
+  V(srp, SRP, 0xF0) /* type = SS_C  SHIFT AND ROUND DECIMAL  */
+
+#define S390_SS_D_OPCODE_LIST(V)                          \
+  V(mvck, MVCK, 0xD9) /* type = SS_D  MOVE WITH KEY  */   \
+  V(mvcp, MVCP, 0xDA) /* type = SS_D  MOVE TO PRIMARY  */ \
+  V(mvcs, MVCS, 0xDB) /* type = SS_D  MOVE TO SECONDARY  */
+
+#define S390_SS_E_OPCODE_LIST(V)                                 \
+  V(plo, PLO, 0xEE) /* type = SS_E  PERFORM LOCKED OPERATION  */ \
+  V(lmd, LMD, 0xEF) /* type = SS_E  LOAD MULTIPLE DISJOINT (64<-32&32)  */
+
+#define S390_I_OPCODE_LIST(V) \
+  V(svc, SVC, 0x0A) /* type = I     SUPERVISOR CALL  */
+
+#define S390_SS_F_OPCODE_LIST(V)                     \
+  V(pku, PKU, 0xE1) /* type = SS_F  PACK UNICODE  */ \
+  V(pka, PKA, 0xE9) /* type = SS_F  PACK ASCII  */
+
+#define S390_SSE_OPCODE_LIST(V)                                             \
+  V(lasp, LASP, 0xE500)   /* type = SSE   LOAD ADDRESS SPACE PARAMETERS  */ \
+  V(tprot, TPROT, 0xE501) /* type = SSE   TEST PROTECTION  */               \
+  V(strag, STRAG, 0xE502) /* type = SSE   STORE REAL ADDRESS  */            \
+  V(mvcsk, MVCSK, 0xE50E) /* type = SSE   MOVE WITH SOURCE KEY  */          \
+  V(mvcdk, MVCDK, 0xE50F) /* type = SSE   MOVE WITH DESTINATION KEY  */
+
+#define S390_SSF_OPCODE_LIST(V)                                                \
+  V(mvcos, MVCOS, 0xC80) /* type = SSF   MOVE WITH OPTIONAL SPECIFICATIONS  */ \
+  V(ectg, ECTG, 0xC81)   /* type = SSF   EXTRACT CPU TIME  */                  \
+  V(csst, CSST, 0xC82)   /* type = SSF   COMPARE AND SWAP AND STORE  */        \
+  V(lpd, LPD, 0xC84)     /* type = SSF   LOAD PAIR DISJOINT (32)  */           \
+  V(lpdg, LPDG, 0xC85)   /* type = SSF   LOAD PAIR DISJOINT (64)  */
+
+#define S390_RS_A_OPCODE_LIST(V)                                              \
+  V(bxh, BXH, 0x86)     /* type = RS_A  BRANCH ON INDEX HIGH (32)  */         \
+  V(bxle, BXLE, 0x87)   /* type = RS_A  BRANCH ON INDEX LOW OR EQUAL (32)  */ \
+  V(srl, SRL, 0x88)     /* type = RS_A  SHIFT RIGHT SINGLE LOGICAL (32)  */   \
+  V(sll, SLL, 0x89)     /* type = RS_A  SHIFT LEFT SINGLE LOGICAL (32)  */    \
+  V(sra, SRA, 0x8A)     /* type = RS_A  SHIFT RIGHT SINGLE (32)  */           \
+  V(sla, SLA, 0x8B)     /* type = RS_A  SHIFT LEFT SINGLE (32)  */            \
+  V(srdl, SRDL, 0x8C)   /* type = RS_A  SHIFT RIGHT DOUBLE LOGICAL (64)  */   \
+  V(sldl, SLDL, 0x8D)   /* type = RS_A  SHIFT LEFT DOUBLE LOGICAL (64)  */    \
+  V(srda, SRDA, 0x8E)   /* type = RS_A  SHIFT RIGHT DOUBLE (64)  */           \
+  V(slda, SLDA, 0x8F)   /* type = RS_A  SHIFT LEFT DOUBLE (64)  */            \
+  V(stm, STM, 0x90)     /* type = RS_A  STORE MULTIPLE (32)  */               \
+  V(lm, LM, 0x98)       /* type = RS_A  LOAD MULTIPLE (32)  */                \
+  V(trace, TRACE, 0x99) /* type = RS_A  TRACE (32)  */                        \
+  V(lam, LAM, 0x9A)     /* type = RS_A  LOAD ACCESS MULTIPLE  */              \
+  V(stam, STAM, 0x9B)   /* type = RS_A  STORE ACCESS MULTIPLE  */             \
+  V(mvcle, MVCLE, 0xA8) /* type = RS_A  MOVE LONG EXTENDED  */                \
+  V(clcle, CLCLE, 0xA9) /* type = RS_A  COMPARE LOGICAL LONG EXTENDED  */     \
+  V(sigp, SIGP, 0xAE)   /* type = RS_A  SIGNAL PROCESSOR  */                  \
+  V(stctl, STCTL, 0xB6) /* type = RS_A  STORE CONTROL (32)  */                \
+  V(lctl, LCTL, 0xB7)   /* type = RS_A  LOAD CONTROL (32)  */                 \
+  V(cs, CS, 0xBA)       /* type = RS_A  COMPARE AND SWAP (32)  */             \
+  V(cds, CDS, 0xBB)     /* type = RS_A  COMPARE DOUBLE AND SWAP (32)  */
+
+#define S390_RS_B_OPCODE_LIST(V)                                               \
+  V(clm, CLM, 0xBD) /* type = RS_B  COMPARE LOGICAL CHAR. UNDER MASK (low)  */ \
+  V(stcm, STCM, 0xBE) /* type = RS_B  STORE CHARACTERS UNDER MASK (low)  */    \
+  V(icm, ICM, 0xBF)   /* type = RS_B  INSERT CHARACTERS UNDER MASK (low)  */
+
+#define S390_S_OPCODE_LIST(V)                                                  \
+  V(awr, AWR, 0x2E)           /* type = S     ADD UNNORMALIZED (long HFP)  */  \
+  V(lpsw, LPSW, 0x82)         /* type = S     LOAD PSW  */                     \
+  V(diagnose, DIAGNOSE, 0x83) /* type = S     DIAGNOSE  */                     \
+  V(ts, TS, 0x93)             /* type = S     TEST AND SET  */                 \
+  V(stidp, STIDP, 0xB202)     /* type = S     STORE CPU ID  */                 \
+  V(sck, SCK, 0xB204)         /* type = S     SET CLOCK  */                    \
+  V(stck, STCK, 0xB205)       /* type = S     STORE CLOCK  */                  \
+  V(sckc, SCKC, 0xB206)       /* type = S     SET CLOCK COMPARATOR  */         \
+  V(stckc, STCKC, 0xB207)     /* type = S     STORE CLOCK COMPARATOR  */       \
+  V(spt, SPT, 0xB208)         /* type = S     SET CPU TIMER  */                \
+  V(stpt, STPT, 0xB209)       /* type = S     STORE CPU TIMER  */              \
+  V(spka, SPKA, 0xB20A)       /* type = S     SET PSW KEY FROM ADDRESS  */     \
+  V(ipk, IPK, 0xB20B)         /* type = S     INSERT PSW KEY  */               \
+  V(ptlb, PTLB, 0xB20D)       /* type = S     PURGE TLB  */                    \
+  V(spx, SPX, 0xB210)         /* type = S     SET PREFIX  */                   \
+  V(stpx, STPX, 0xB211)       /* type = S     STORE PREFIX  */                 \
+  V(stap, STAP, 0xB212)       /* type = S     STORE CPU ADDRESS  */            \
+  V(pc, PC, 0xB218)           /* type = S     PROGRAM CALL  */                 \
+  V(sac, SAC, 0xB219)         /* type = S     SET ADDRESS SPACE CONTROL  */    \
+  V(cfc, CFC, 0xB21A)         /* type = S     COMPARE AND FORM CODEWORD  */    \
+  V(csch, CSCH, 0xB230)       /* type = S     CLEAR SUBCHANNEL  */             \
+  V(hsch, HSCH, 0xB231)       /* type = S     HALT SUBCHANNEL  */              \
+  V(msch, MSCH, 0xB232)       /* type = S     MODIFY SUBCHANNEL  */            \
+  V(ssch, SSCH, 0xB233)       /* type = S     START SUBCHANNEL  */             \
+  V(stsch, STSCH, 0xB234)     /* type = S     STORE SUBCHANNEL  */             \
+  V(tsch, TSCH, 0xB235)       /* type = S     TEST SUBCHANNEL  */              \
+  V(tpi, TPI, 0xB236)         /* type = S     TEST PENDING INTERRUPTION  */    \
+  V(sal, SAL, 0xB237)         /* type = S     SET ADDRESS LIMIT  */            \
+  V(rsch, RSCH, 0xB238)       /* type = S     RESUME SUBCHANNEL  */            \
+  V(stcrw, STCRW, 0xB239)     /* type = S     STORE CHANNEL REPORT WORD  */    \
+  V(stcps, STCPS, 0xB23A)     /* type = S     STORE CHANNEL PATH STATUS  */    \
+  V(rchp, RCHP, 0xB23B)       /* type = S     RESET CHANNEL PATH  */           \
+  V(schm, SCHM, 0xB23C)       /* type = S     SET CHANNEL MONITOR  */          \
+  V(xsch, XSCH, 0xB276)       /* type = S     CANCEL SUBCHANNEL  */            \
+  V(rp, RP_Z, 0xB277)         /* type = S     RESUME PROGRAM  */               \
+  V(stcke, STCKE, 0xB278)     /* type = S     STORE CLOCK EXTENDED  */         \
+  V(sacf, SACF, 0xB279)     /* type = S     SET ADDRESS SPACE CONTROL FAST  */ \
+  V(stckf, STCKF, 0xB27C)   /* type = S     STORE CLOCK FAST  */               \
+  V(stsi, STSI, 0xB27D)     /* type = S     STORE SYSTEM INFORMATION  */       \
+  V(srnm, SRNM, 0xB299)     /* type = S     SET BFP ROUNDING MODE (2 bit)  */  \
+  V(stfpc, STFPC, 0xB29C)   /* type = S     STORE FPC  */                      \
+  V(lfpc, LFPC, 0xB29D)     /* type = S     LOAD FPC  */                       \
+  V(stfle, STFLE, 0xB2B0)   /* type = S     STORE FACILITY LIST EXTENDED  */   \
+  V(stfl, STFL, 0xB2B1)     /* type = S     STORE FACILITY LIST  */            \
+  V(lpswe, LPSWE, 0xB2B2)   /* type = S     LOAD PSW EXTENDED  */              \
+  V(srnmb, SRNMB, 0xB2B8)   /* type = S     SET BFP ROUNDING MODE (3 bit)  */  \
+  V(srnmt, SRNMT, 0xB2B9)   /* type = S     SET DFP ROUNDING MODE  */          \
+  V(lfas, LFAS, 0xB2BD)     /* type = S     LOAD FPC AND SIGNAL  */            \
+  V(tend, TEND, 0xB2F8)     /* type = S     TRANSACTION END  */                \
+  V(tabort, TABORT, 0xB2FC) /* type = S     TRANSACTION ABORT  */              \
+  V(trap4, TRAP4, 0xB2FF)   /* type = S     TRAP  */
+
+#define S390_RX_A_OPCODE_LIST(V)                                            \
+  V(la, LA, 0x41)     /* type = RX_A  LOAD ADDRESS  */                      \
+  V(stc, STC, 0x42)   /* type = RX_A  STORE CHARACTER  */                   \
+  V(ic_z, IC_z, 0x43) /* type = RX_A  INSERT CHARACTER  */                  \
+  V(ex, EX, 0x44)     /* type = RX_A  EXECUTE  */                           \
+  V(bal, BAL, 0x45)   /* type = RX_A  BRANCH AND LINK  */                   \
+  V(bct, BCT, 0x46)   /* type = RX_A  BRANCH ON COUNT (32)  */              \
+  V(lh, LH, 0x48)     /* type = RX_A  LOAD HALFWORD (32<-16)  */            \
+  V(ch, CH, 0x49)     /* type = RX_A  COMPARE HALFWORD (32<-16)  */         \
+  V(ah, AH, 0x4A)     /* type = RX_A  ADD HALFWORD (32<-16)  */             \
+  V(sh, SH, 0x4B)     /* type = RX_A  SUBTRACT HALFWORD (32<-16)  */        \
+  V(mh, MH, 0x4C)     /* type = RX_A  MULTIPLY HALFWORD (32<-16)  */        \
+  V(bas, BAS, 0x4D)   /* type = RX_A  BRANCH AND SAVE  */                   \
+  V(cvd, CVD, 0x4E)   /* type = RX_A  CONVERT TO DECIMAL (32)  */           \
+  V(cvb, CVB, 0x4F)   /* type = RX_A  CONVERT TO BINARY (32)  */            \
+  V(st, ST, 0x50)     /* type = RX_A  STORE (32)  */                        \
+  V(lae, LAE, 0x51)   /* type = RX_A  LOAD ADDRESS EXTENDED  */             \
+  V(n, N, 0x54)       /* type = RX_A  AND (32)  */                          \
+  V(cl, CL, 0x55)     /* type = RX_A  COMPARE LOGICAL (32)  */              \
+  V(o, O, 0x56)       /* type = RX_A  OR (32)  */                           \
+  V(x, X, 0x57)       /* type = RX_A  EXCLUSIVE OR (32)  */                 \
+  V(l, L, 0x58)       /* type = RX_A  LOAD (32)  */                         \
+  V(c, C, 0x59)       /* type = RX_A  COMPARE (32)  */                      \
+  V(a, A, 0x5A)       /* type = RX_A  ADD (32)  */                          \
+  V(s, S, 0x5B)       /* type = RX_A  SUBTRACT (32)  */                     \
+  V(m, M, 0x5C)       /* type = RX_A  MULTIPLY (64<-32)  */                 \
+  V(d, D, 0x5D)       /* type = RX_A  DIVIDE (32<-64)  */                   \
+  V(al_z, AL, 0x5E)   /* type = RX_A  ADD LOGICAL (32)  */                  \
+  V(sl, SL, 0x5F)     /* type = RX_A  SUBTRACT LOGICAL (32)  */             \
+  V(std, STD, 0x60)   /* type = RX_A  STORE (long)  */                      \
+  V(mxd, MXD, 0x67)   /* type = RX_A  MULTIPLY (long to extended HFP)  */   \
+  V(ld, LD, 0x68)     /* type = RX_A  LOAD (long)  */                       \
+  V(cd, CD, 0x69)     /* type = RX_A  COMPARE (long HFP)  */                \
+  V(ad, AD, 0x6A)     /* type = RX_A  ADD NORMALIZED (long HFP)  */         \
+  V(sd, SD, 0x6B)     /* type = RX_A  SUBTRACT NORMALIZED (long HFP)  */    \
+  V(md, MD, 0x6C)     /* type = RX_A  MULTIPLY (long HFP)  */               \
+  V(dd, DD, 0x6D)     /* type = RX_A  DIVIDE (long HFP)  */                 \
+  V(aw, AW, 0x6E)     /* type = RX_A  ADD UNNORMALIZED (long HFP)  */       \
+  V(sw, SW, 0x6F)     /* type = RX_A  SUBTRACT UNNORMALIZED (long HFP)  */  \
+  V(ste, STE, 0x70)   /* type = RX_A  STORE (short)  */                     \
+  V(ms, MS, 0x71)     /* type = RX_A  MULTIPLY SINGLE (32)  */              \
+  V(le_z, LE, 0x78)   /* type = RX_A  LOAD (short)  */                      \
+  V(ce, CE, 0x79)     /* type = RX_A  COMPARE (short HFP)  */               \
+  V(ae, AE, 0x7A)     /* type = RX_A  ADD NORMALIZED (short HFP)  */        \
+  V(se, SE, 0x7B)     /* type = RX_A  SUBTRACT NORMALIZED (short HFP)  */   \
+  V(mde, MDE, 0x7C)   /* type = RX_A  MULTIPLY (short to long HFP)  */      \
+  V(me, ME, 0x7C)     /* type = RX_A  MULTIPLY (short to long HFP)  */      \
+  V(de, DE, 0x7D)     /* type = RX_A  DIVIDE (short HFP)  */                \
+  V(au, AU, 0x7E)     /* type = RX_A  ADD UNNORMALIZED (short HFP)  */      \
+  V(su, SU, 0x7F)     /* type = RX_A  SUBTRACT UNNORMALIZED (short HFP)  */ \
+  V(ssm, SSM, 0x80)   /* type = RX_A  SET SYSTEM MASK  */                   \
+  V(lra, LRA, 0xB1)   /* type = RX_A  LOAD REAL ADDRESS (32)  */            \
+  V(sth, STH, 0x40)   /* type = RX_A  STORE HALFWORD (16)  */
+
+#define S390_RX_B_OPCODE_LIST(V) \
+  V(bc, BC, 0x47)     /* type = RX_B  BRANCH ON CONDITION  */
+
+#define S390_RIE_A_OPCODE_LIST(V)                                              \
+  V(cgit, CGIT, 0xEC70) /* type = RIE_A COMPARE IMMEDIATE AND TRAP (64<-16) */ \
+  V(clgit, CLGIT,                                                              \
+    0xEC71) /* type = RIE_A COMPARE LOGICAL IMMEDIATE AND TRAP (64<-16)  */    \
+  V(cit, CIT, 0xEC72) /* type = RIE_A COMPARE IMMEDIATE AND TRAP (32<-16)  */  \
+  V(clfit, CLFIT,                                                              \
+    0xEC73) /* type = RIE_A COMPARE LOGICAL IMMEDIATE AND TRAP (32<-16)  */
+
+#define S390_RRD_OPCODE_LIST(V)                                                \
+  V(maebr, MAEBR, 0xB30E) /* type = RRD   MULTIPLY AND ADD (short BFP)  */     \
+  V(msebr, MSEBR, 0xB30F) /* type = RRD   MULTIPLY AND SUBTRACT (short BFP) */ \
+  V(madbr, MADBR, 0xB31E) /* type = RRD   MULTIPLY AND ADD (long BFP)  */      \
+  V(msdbr, MSDBR, 0xB31F) /* type = RRD   MULTIPLY AND SUBTRACT (long BFP)  */ \
+  V(maer, MAER, 0xB32E)   /* type = RRD   MULTIPLY AND ADD (short HFP)  */     \
+  V(mser, MSER, 0xB32F) /* type = RRD   MULTIPLY AND SUBTRACT (short HFP)  */  \
+  V(maylr, MAYLR,                                                              \
+    0xB338) /* type = RRD   MULTIPLY AND ADD UNNRM. (long to ext. low HFP)  */ \
+  V(mylr, MYLR,                                                                \
+    0xB339) /* type = RRD   MULTIPLY UNNORM. (long to ext. low HFP)  */        \
+  V(mayr, MAYR,                                                                \
+    0xB33A) /* type = RRD   MULTIPLY & ADD UNNORMALIZED (long to ext. HFP)  */ \
+  V(myr, MYR,                                                                  \
+    0xB33B) /* type = RRD   MULTIPLY UNNORMALIZED (long to ext. HFP)  */       \
+  V(mayhr, MAYHR,                                                              \
+    0xB33C) /* type = RRD   MULTIPLY AND ADD UNNRM. (long to ext. high HFP) */ \
+  V(myhr, MYHR,                                                                \
+    0xB33D) /* type = RRD   MULTIPLY UNNORM. (long to ext. high HFP)  */       \
+  V(madr, MADR, 0xB33E) /* type = RRD   MULTIPLY AND ADD (long HFP)  */        \
+  V(msdr, MSDR, 0xB33F) /* type = RRD   MULTIPLY AND SUBTRACT (long HFP)  */
+
+#define S390_RIE_B_OPCODE_LIST(V)                                            \
+  V(cgrj, CGRJ, 0xEC64) /* type = RIE_B COMPARE AND BRANCH RELATIVE (64)  */ \
+  V(clgrj, CLGRJ,                                                            \
+    0xEC65) /* type = RIE_B COMPARE LOGICAL AND BRANCH RELATIVE (64)  */     \
+  V(crj, CRJ, 0xEC76) /* type = RIE_B COMPARE AND BRANCH RELATIVE (32)  */   \
+  V(clrj, CLRJ,                                                              \
+    0xEC77) /* type = RIE_B COMPARE LOGICAL AND BRANCH RELATIVE (32)  */
+
+#define S390_RRE_OPCODE_LIST(V)                                                \
+  V(ipm, IPM, 0xB222)     /* type = RRE   INSERT PROGRAM MASK  */              \
+  V(ivsk, IVSK, 0xB223)   /* type = RRE   INSERT VIRTUAL STORAGE KEY  */       \
+  V(iac, IAC, 0xB224)     /* type = RRE   INSERT ADDRESS SPACE CONTROL  */     \
+  V(ssar, SSAR, 0xB225)   /* type = RRE   SET SECONDARY ASN  */                \
+  V(epar, EPAR, 0xB226)   /* type = RRE   EXTRACT PRIMARY ASN  */              \
+  V(esar, ESAR, 0xB227)   /* type = RRE   EXTRACT SECONDARY ASN  */            \
+  V(pt, PT, 0xB228)       /* type = RRE   PROGRAM TRANSFER  */                 \
+  V(iske, ISKE, 0xB229)   /* type = RRE   INSERT STORAGE KEY EXTENDED  */      \
+  V(rrbe, RRBE, 0xB22A)   /* type = RRE   RESET REFERENCE BIT EXTENDED  */     \
+  V(tb, TB, 0xB22C)       /* type = RRE   TEST BLOCK  */                       \
+  V(dxr, DXR, 0xB22D)     /* type = RRE   DIVIDE (extended HFP)  */            \
+  V(pgin, PGIN, 0xB22E)   /* type = RRE   PAGE IN  */                          \
+  V(pgout, PGOUT, 0xB22F) /* type = RRE   PAGE OUT  */                         \
+  V(bakr, BAKR, 0xB240)   /* type = RRE   BRANCH AND STACK  */                 \
+  V(cksm, CKSM, 0xB241)   /* type = RRE   CHECKSUM  */                         \
+  V(sqdr, SQDR, 0xB244)   /* type = RRE   SQUARE ROOT (long HFP)  */           \
+  V(sqer, SQER, 0xB245)   /* type = RRE   SQUARE ROOT (short HFP)  */          \
+  V(stura, STURA, 0xB246) /* type = RRE   STORE USING REAL ADDRESS (32)  */    \
+  V(msta, MSTA, 0xB247)   /* type = RRE   MODIFY STACKED STATE  */             \
+  V(palb, PALB, 0xB248)   /* type = RRE   PURGE ALB  */                        \
+  V(ereg, EREG, 0xB249)   /* type = RRE   EXTRACT STACKED REGISTERS (32)  */   \
+  V(esta, ESTA, 0xB24A)   /* type = RRE   EXTRACT STACKED STATE  */            \
+  V(lura, LURA, 0xB24B)   /* type = RRE   LOAD USING REAL ADDRESS (32)  */     \
+  V(tar, TAR, 0xB24C)     /* type = RRE   TEST ACCESS  */                      \
+  V(cpya, CPYA, 0xB24D)   /* type = RRE   COPY ACCESS  */                      \
+  V(sar, SAR, 0xB24E)     /* type = RRE   SET ACCESS  */                       \
+  V(ear, EAR, 0xB24F)     /* type = RRE   EXTRACT ACCESS  */                   \
+  V(csp, CSP, 0xB250)     /* type = RRE   COMPARE AND SWAP AND PURGE (32)  */  \
+  V(msr, MSR, 0xB252)     /* type = RRE   MULTIPLY SINGLE (32)  */             \
+  V(mvpg, MVPG, 0xB254)   /* type = RRE   MOVE PAGE  */                        \
+  V(mvst, MVST, 0xB255)   /* type = RRE   MOVE STRING  */                      \
+  V(cuse, CUSE, 0xB257)   /* type = RRE   COMPARE UNTIL SUBSTRING EQUAL  */    \
+  V(bsg, BSG, 0xB258)     /* type = RRE   BRANCH IN SUBSPACE GROUP  */         \
+  V(bsa, BSA, 0xB25A)     /* type = RRE   BRANCH AND SET AUTHORITY  */         \
+  V(clst, CLST, 0xB25D)   /* type = RRE   COMPARE LOGICAL STRING  */           \
+  V(srst, SRST, 0xB25E)   /* type = RRE   SEARCH STRING  */                    \
+  V(cmpsc, CMPSC, 0xB263) /* type = RRE   COMPRESSION CALL  */                 \
+  V(tre, TRE, 0xB2A5)     /* type = RRE   TRANSLATE EXTENDED  */               \
+  V(etnd, ETND, 0xB2EC) /* type = RRE   EXTRACT TRANSACTION NESTING DEPTH  */  \
+  V(lpebr, LPEBR, 0xB300) /* type = RRE   LOAD POSITIVE (short BFP)  */        \
+  V(lnebr, LNEBR, 0xB301) /* type = RRE   LOAD NEGATIVE (short BFP)  */        \
+  V(ltebr, LTEBR, 0xB302) /* type = RRE   LOAD AND TEST (short BFP)  */        \
+  V(lcebr, LCEBR, 0xB303) /* type = RRE   LOAD COMPLEMENT (short BFP)  */      \
+  V(ldebr, LDEBR,                                                              \
+    0xB304) /* type = RRE   LOAD LENGTHENED (short to long BFP)  */            \
+  V(lxdbr, LXDBR,                                                              \
+    0xB305) /* type = RRE   LOAD LENGTHENED (long to extended BFP)  */         \
+  V(lxebr, LXEBR,                                                              \
+    0xB306) /* type = RRE   LOAD LENGTHENED (short to extended BFP)  */        \
+  V(mxdbr, MXDBR, 0xB307) /* type = RRE   MULTIPLY (long to extended BFP)  */  \
+  V(kebr, KEBR, 0xB308)   /* type = RRE   COMPARE AND SIGNAL (short BFP)  */   \
+  V(cebr, CEBR, 0xB309)   /* type = RRE   COMPARE (short BFP)  */              \
+  V(aebr, AEBR, 0xB30A)   /* type = RRE   ADD (short BFP)  */                  \
+  V(sebr, SEBR, 0xB30B)   /* type = RRE   SUBTRACT (short BFP)  */             \
+  V(mdebr, MDEBR, 0xB30C) /* type = RRE   MULTIPLY (short to long BFP)  */     \
+  V(debr, DEBR, 0xB30D)   /* type = RRE   DIVIDE (short BFP)  */               \
+  V(lpdbr, LPDBR, 0xB310) /* type = RRE   LOAD POSITIVE (long BFP)  */         \
+  V(lndbr, LNDBR, 0xB311) /* type = RRE   LOAD NEGATIVE (long BFP)  */         \
+  V(ltdbr, LTDBR, 0xB312) /* type = RRE   LOAD AND TEST (long BFP)  */         \
+  V(lcdbr, LCDBR, 0xB313) /* type = RRE   LOAD COMPLEMENT (long BFP)  */       \
+  V(sqebr, SQEBR, 0xB314) /* type = RRE   SQUARE ROOT (short BFP)  */          \
+  V(sqdbr, SQDBR, 0xB315) /* type = RRE   SQUARE ROOT (long BFP)  */           \
+  V(sqxbr, SQXBR, 0xB316) /* type = RRE   SQUARE ROOT (extended BFP)  */       \
+  V(meebr, MEEBR, 0xB317) /* type = RRE   MULTIPLY (short BFP)  */             \
+  V(kdbr, KDBR, 0xB318)   /* type = RRE   COMPARE AND SIGNAL (long BFP)  */    \
+  V(cdbr, CDBR, 0xB319)   /* type = RRE   COMPARE (long BFP)  */               \
+  V(adbr, ADBR, 0xB31A)   /* type = RRE   ADD (long BFP)  */                   \
+  V(sdbr, SDBR, 0xB31B)   /* type = RRE   SUBTRACT (long BFP)  */              \
+  V(mdbr, MDBR, 0xB31C)   /* type = RRE   MULTIPLY (long BFP)  */              \
+  V(ddbr, DDBR, 0xB31D)   /* type = RRE   DIVIDE (long BFP)  */                \
+  V(lder, LDER, 0xB324) /* type = RRE   LOAD LENGTHENED (short to long HFP) */ \
+  V(lxdr, LXDR,                                                                \
+    0xB325) /* type = RRE   LOAD LENGTHENED (long to extended HFP)  */         \
+  V(lxer, LXER,                                                                \
+    0xB326) /* type = RRE   LOAD LENGTHENED (short to extended HFP)  */        \
+  V(sqxr, SQXR, 0xB336)   /* type = RRE   SQUARE ROOT (extended HFP)  */       \
+  V(meer, MEER, 0xB337)   /* type = RRE   MULTIPLY (short HFP)  */             \
+  V(lpxbr, LPXBR, 0xB340) /* type = RRE   LOAD POSITIVE (extended BFP)  */     \
+  V(lnxbr, LNXBR, 0xB341) /* type = RRE   LOAD NEGATIVE (extended BFP)  */     \
+  V(ltxbr, LTXBR, 0xB342) /* type = RRE   LOAD AND TEST (extended BFP)  */     \
+  V(lcxbr, LCXBR, 0xB343) /* type = RRE   LOAD COMPLEMENT (extended BFP)  */   \
+  V(ledbr, LEDBR, 0xB344) /* type = RRE   LOAD ROUNDED (long to short BFP)  */ \
+  V(ldxbr, LDXBR,                                                              \
+    0xB345) /* type = RRE   LOAD ROUNDED (extended to long BFP)  */            \
+  V(lexbr, LEXBR,                                                              \
+    0xB346) /* type = RRE   LOAD ROUNDED (extended to short BFP)  */           \
+  V(kxbr, KXBR, 0xB348) /* type = RRE   COMPARE AND SIGNAL (extended BFP)  */  \
+  V(cxbr, CXBR, 0xB349) /* type = RRE   COMPARE (extended BFP)  */             \
+  V(axbr, AXBR, 0xB34A) /* type = RRE   ADD (extended BFP)  */                 \
+  V(sxbr, SXBR, 0xB34B) /* type = RRE   SUBTRACT (extended BFP)  */            \
+  V(mxbr, MXBR, 0xB34C) /* type = RRE   MULTIPLY (extended BFP)  */            \
+  V(dxbr, DXBR, 0xB34D) /* type = RRE   DIVIDE (extended BFP)  */              \
+  V(thder, THDER,                                                              \
+    0xB358)             /* type = RRE   CONVERT BFP TO HFP (short to long)  */ \
+  V(thdr, THDR, 0xB359) /* type = RRE   CONVERT BFP TO HFP (long)  */          \
+  V(lpxr, LPXR, 0xB360) /* type = RRE   LOAD POSITIVE (extended HFP)  */       \
+  V(lnxr, LNXR, 0xB361) /* type = RRE   LOAD NEGATIVE (extended HFP)  */       \
+  V(ltxr, LTXR, 0xB362) /* type = RRE   LOAD AND TEST (extended HFP)  */       \
+  V(lcxr, LCXR, 0xB363) /* type = RRE   LOAD COMPLEMENT (extended HFP)  */     \
+  V(lxr, LXR, 0xB365)   /* type = RRE   LOAD (extended)  */                    \
+  V(lexr, LEXR,                                                                \
+    0xB366) /* type = RRE   LOAD ROUNDED (extended to short HFP)  */           \
+  V(fixr, FIXR, 0xB367)   /* type = RRE   LOAD FP INTEGER (extended HFP)  */   \
+  V(cxr, CXR, 0xB369)     /* type = RRE   COMPARE (extended HFP)  */           \
+  V(lpdfr, LPDFR, 0xB370) /* type = RRE   LOAD POSITIVE (long)  */             \
+  V(lndfr, LNDFR, 0xB371) /* type = RRE   LOAD NEGATIVE (long)  */             \
+  V(lcdfr, LCDFR, 0xB373) /* type = RRE   LOAD COMPLEMENT (long)  */           \
+  V(lzer, LZER, 0xB374)   /* type = RRE   LOAD ZERO (short)  */                \
+  V(lzdr, LZDR, 0xB375)   /* type = RRE   LOAD ZERO (long)  */                 \
+  V(lzxr, LZXR, 0xB376)   /* type = RRE   LOAD ZERO (extended)  */             \
+  V(fier, FIER, 0xB377)   /* type = RRE   LOAD FP INTEGER (short HFP)  */      \
+  V(fidr, FIDR, 0xB37F)   /* type = RRE   LOAD FP INTEGER (long HFP)  */       \
+  V(sfpc, SFPC, 0xB384)   /* type = RRE   SET FPC  */                          \
+  V(sfasr, SFASR, 0xB385) /* type = RRE   SET FPC AND SIGNAL  */               \
+  V(efpc, EFPC, 0xB38C)   /* type = RRE   EXTRACT FPC  */                      \
+  V(cefbr, CEFBR,                                                              \
+    0xB394) /* type = RRE   CONVERT FROM FIXED (32 to short BFP)  */           \
+  V(cdfbr, CDFBR,                                                              \
+    0xB395) /* type = RRE   CONVERT FROM FIXED (32 to long BFP)  */            \
+  V(cxfbr, CXFBR,                                                              \
+    0xB396) /* type = RRE   CONVERT FROM FIXED (32 to extended BFP)  */        \
+  V(cegbr, CEGBR,                                                              \
+    0xB3A4) /* type = RRE   CONVERT FROM FIXED (64 to short BFP)  */           \
+  V(cdgbr, CDGBR,                                                              \
+    0xB3A5) /* type = RRE   CONVERT FROM FIXED (64 to long BFP)  */            \
+  V(cxgbr, CXGBR,                                                              \
+    0xB3A6) /* type = RRE   CONVERT FROM FIXED (64 to extended BFP)  */        \
+  V(cefr, CEFR,                                                                \
+    0xB3B4) /* type = RRE   CONVERT FROM FIXED (32 to short HFP)  */           \
+  V(cdfr, CDFR, 0xB3B5) /* type = RRE   CONVERT FROM FIXED (32 to long HFP) */ \
+  V(cxfr, CXFR,                                                                \
+    0xB3B6) /* type = RRE   CONVERT FROM FIXED (32 to extended HFP)  */        \
+  V(ldgr, LDGR, 0xB3C1) /* type = RRE   LOAD FPR FROM GR (64 to long)  */      \
+  V(cegr, CEGR,                                                                \
+    0xB3C4) /* type = RRE   CONVERT FROM FIXED (64 to short HFP)  */           \
+  V(cdgr, CDGR, 0xB3C5) /* type = RRE   CONVERT FROM FIXED (64 to long HFP) */ \
+  V(cxgr, CXGR,                                                                \
+    0xB3C6) /* type = RRE   CONVERT FROM FIXED (64 to extended HFP)  */        \
+  V(lgdr, LGDR, 0xB3CD)   /* type = RRE   LOAD GR FROM FPR (long to 64)  */    \
+  V(ltdtr, LTDTR, 0xB3D6) /* type = RRE   LOAD AND TEST (long DFP)  */         \
+  V(ltxtr, LTXTR, 0xB3DE) /* type = RRE   LOAD AND TEST (extended DFP)  */     \
+  V(kdtr, KDTR, 0xB3E0)   /* type = RRE   COMPARE AND SIGNAL (long DFP)  */    \
+  V(cudtr, CUDTR, 0xB3E2) /* type = RRE   CONVERT TO UNSIGNED PACKED (long */  \
+                          /* DFP to 64) CUDTR  */                              \
+  V(cdtr, CDTR, 0xB3E4)   /* type = RRE   COMPARE (long DFP)  */               \
+  V(eedtr, EEDTR,                                                              \
+    0xB3E5) /* type = RRE   EXTRACT BIASED EXPONENT (long DFP to 64)  */       \
+  V(esdtr, ESDTR,                                                              \
+    0xB3E7) /* type = RRE   EXTRACT SIGNIFICANCE (long DFP to 64)  */          \
+  V(kxtr, KXTR, 0xB3E8) /* type = RRE   COMPARE AND SIGNAL (extended DFP)  */  \
+  V(cuxtr, CUXTR,                                                              \
+    0xB3EA) /* type = RRE   CONVERT TO UNSIGNED PACKED (extended DFP       */  \
+            /* CUXTR to 128)  */                                               \
+  V(cxtr, CXTR, 0xB3EC) /* type = RRE   COMPARE (extended DFP)  */             \
+  V(eextr, EEXTR,                                                              \
+    0xB3ED) /* type = RRE   EXTRACT BIASED EXPONENT (extended DFP to 64)  */   \
+  V(esxtr, ESXTR,                                                              \
+    0xB3EF) /* type = RRE   EXTRACT SIGNIFICANCE (extended DFP to 64)  */      \
+  V(cdgtr, CDGTR,                                                              \
+    0xB3F1) /* type = RRE   CONVERT FROM FIXED (64 to long DFP)  */            \
+  V(cdutr, CDUTR,                                                              \
+    0xB3F2) /* type = RRE   CONVERT FROM UNSIGNED PACKED (64 to long DFP)  */  \
+  V(cdstr, CDSTR,                                                              \
+    0xB3F3) /* type = RRE   CONVERT FROM SIGNED PACKED (64 to long DFP)  */    \
+  V(cedtr, CEDTR,                                                              \
+    0xB3F4) /* type = RRE   COMPARE BIASED EXPONENT (long DFP)  */             \
+  V(cxgtr, CXGTR,                                                              \
+    0xB3F9) /* type = RRE   CONVERT FROM FIXED (64 to extended DFP)  */        \
+  V(cxutr, CXUTR,                                                              \
+    0xB3FA) /* type = RRE   CONVERT FROM UNSIGNED PACKED (128 to ext. DFP)  */ \
+  V(cxstr, CXSTR, 0xB3FB) /* type = RRE   CONVERT FROM SIGNED PACKED (128 to*/ \
+                          /* extended DFP)  */                                 \
+  V(cextr, CEXTR,                                                              \
+    0xB3FC) /* type = RRE   COMPARE BIASED EXPONENT (extended DFP)  */         \
+  V(lpgr, LPGR, 0xB900)   /* type = RRE   LOAD POSITIVE (64)  */               \
+  V(lngr, LNGR, 0xB901)   /* type = RRE   LOAD NEGATIVE (64)  */               \
+  V(ltgr, LTGR, 0xB902)   /* type = RRE   LOAD AND TEST (64)  */               \
+  V(lcgr, LCGR, 0xB903)   /* type = RRE   LOAD COMPLEMENT (64)  */             \
+  V(lgr, LGR, 0xB904)     /* type = RRE   LOAD (64)  */                        \
+  V(lurag, LURAG, 0xB905) /* type = RRE   LOAD USING REAL ADDRESS (64)  */     \
+  V(lgbr, LGBR, 0xB906)   /* type = RRE   LOAD BYTE (64<-8)  */                \
+  V(lghr, LGHR, 0xB907)   /* type = RRE   LOAD HALFWORD (64<-16)  */           \
+  V(agr, AGR, 0xB908)     /* type = RRE   ADD (64)  */                         \
+  V(sgr, SGR, 0xB909)     /* type = RRE   SUBTRACT (64)  */                    \
+  V(algr, ALGR, 0xB90A)   /* type = RRE   ADD LOGICAL (64)  */                 \
+  V(slgr, SLGR, 0xB90B)   /* type = RRE   SUBTRACT LOGICAL (64)  */            \
+  V(msgr, MSGR, 0xB90C)   /* type = RRE   MULTIPLY SINGLE (64)  */             \
+  V(dsgr, DSGR, 0xB90D)   /* type = RRE   DIVIDE SINGLE (64)  */               \
+  V(eregg, EREGG, 0xB90E) /* type = RRE   EXTRACT STACKED REGISTERS (64)  */   \
+  V(lrvgr, LRVGR, 0xB90F) /* type = RRE   LOAD REVERSED (64)  */               \
+  V(lpgfr, LPGFR, 0xB910) /* type = RRE   LOAD POSITIVE (64<-32)  */           \
+  V(lngfr, LNGFR, 0xB911) /* type = RRE   LOAD NEGATIVE (64<-32)  */           \
+  V(ltgfr, LTGFR, 0xB912) /* type = RRE   LOAD AND TEST (64<-32)  */           \
+  V(lcgfr, LCGFR, 0xB913) /* type = RRE   LOAD COMPLEMENT (64<-32)  */         \
+  V(lgfr, LGFR, 0xB914)   /* type = RRE   LOAD (64<-32)  */                    \
+  V(llgfr, LLGFR, 0xB916) /* type = RRE   LOAD LOGICAL (64<-32)  */            \
+  V(llgtr, LLGTR,                                                              \
+    0xB917) /* type = RRE   LOAD LOGICAL THIRTY ONE BITS (64<-31)  */          \
+  V(agfr, AGFR, 0xB918)   /* type = RRE   ADD (64<-32)  */                     \
+  V(sgfr, SGFR, 0xB919)   /* type = RRE   SUBTRACT (64<-32)  */                \
+  V(algfr, ALGFR, 0xB91A) /* type = RRE   ADD LOGICAL (64<-32)  */             \
+  V(slgfr, SLGFR, 0xB91B) /* type = RRE   SUBTRACT LOGICAL (64<-32)  */        \
+  V(msgfr, MSGFR, 0xB91C) /* type = RRE   MULTIPLY SINGLE (64<-32)  */         \
+  V(dsgfr, DSGFR, 0xB91D) /* type = RRE   DIVIDE SINGLE (64<-32)  */           \
+  V(kmac, KMAC, 0xB91E) /* type = RRE   COMPUTE MESSAGE AUTHENTICATION CODE */ \
+  V(lrvr, LRVR, 0xB91F) /* type = RRE   LOAD REVERSED (32)  */                 \
+  V(cgr, CGR, 0xB920)   /* type = RRE   COMPARE (64)  */                       \
+  V(clgr, CLGR, 0xB921) /* type = RRE   COMPARE LOGICAL (64)  */               \
+  V(sturg, STURG, 0xB925) /* type = RRE   STORE USING REAL ADDRESS (64)  */    \
+  V(lbr, LBR, 0xB926)     /* type = RRE   LOAD BYTE (32<-8)  */                \
+  V(lhr, LHR, 0xB927)     /* type = RRE   LOAD HALFWORD (32<-16)  */           \
+  V(pckmo, PCKMO,                                                              \
+    0xB928) /* type = RRE   PERFORM CRYPTOGRAPHIC KEY MGMT. OPERATIONS  */     \
+  V(kmf, KMF, 0xB92A) /* type = RRE   CIPHER MESSAGE WITH CIPHER FEEDBACK  */  \
+  V(kmo, KMO, 0xB92B) /* type = RRE   CIPHER MESSAGE WITH OUTPUT FEEDBACK  */  \
+  V(pcc, PCC, 0xB92C) /* type = RRE   PERFORM CRYPTOGRAPHIC COMPUTATION  */    \
+  V(km, KM, 0xB92E)   /* type = RRE   CIPHER MESSAGE  */                       \
+  V(kmc, KMC, 0xB92F) /* type = RRE   CIPHER MESSAGE WITH CHAINING  */         \
+  V(cgfr, CGFR, 0xB930)   /* type = RRE   COMPARE (64<-32)  */                 \
+  V(clgfr, CLGFR, 0xB931) /* type = RRE   COMPARE LOGICAL (64<-32)  */         \
+  V(ppno, PPNO,                                                                \
+    0xB93C) /* type = RRE   PERFORM PSEUDORANDOM NUMBER OPERATION  */          \
+  V(kimd, KIMD, 0xB93E) /* type = RRE   COMPUTE INTERMEDIATE MESSAGE DIGEST */ \
+  V(klmd, KLMD, 0xB93F) /* type = RRE   COMPUTE LAST MESSAGE DIGEST  */        \
+  V(bctgr, BCTGR, 0xB946) /* type = RRE   BRANCH ON COUNT (64)  */             \
+  V(cdftr, CDFTR,                                                              \
+    0xB951) /* type = RRE   CONVERT FROM FIXED (32 to long DFP)  */            \
+  V(cxftr, CXFTR,                                                              \
+    0xB959) /* type = RRE   CONVERT FROM FIXED (32 to extended DFP)  */        \
+  V(ngr, NGR, 0xB980)     /* type = RRE   AND (64)  */                         \
+  V(ogr, OGR, 0xB981)     /* type = RRE   OR (64)  */                          \
+  V(xgr, XGR, 0xB982)     /* type = RRE   EXCLUSIVE OR (64)  */                \
+  V(flogr, FLOGR, 0xB983) /* type = RRE   FIND LEFTMOST ONE  */                \
+  V(llgcr, LLGCR, 0xB984) /* type = RRE   LOAD LOGICAL CHARACTER (64<-8)  */   \
+  V(llghr, LLGHR, 0xB985) /* type = RRE   LOAD LOGICAL HALFWORD (64<-16)  */   \
+  V(mlgr, MLGR, 0xB986)   /* type = RRE   MULTIPLY LOGICAL (128<-64)  */       \
+  V(dlgr, DLGR, 0xB987)   /* type = RRE   DIVIDE LOGICAL (64<-128)  */         \
+  V(alcgr, ALCGR, 0xB988) /* type = RRE   ADD LOGICAL WITH CARRY (64)  */      \
+  V(slbgr, SLBGR, 0xB989) /* type = RRE   SUBTRACT LOGICAL WITH BORROW (64) */ \
+  V(cspg, CSPG, 0xB98A)   /* type = RRE   COMPARE AND SWAP AND PURGE (64)  */  \
+  V(epsw, EPSW, 0xB98D)   /* type = RRE   EXTRACT PSW  */                      \
+  V(llcr, LLCR, 0xB994)   /* type = RRE   LOAD LOGICAL CHARACTER (32<-8)  */   \
+  V(llhr, LLHR, 0xB995)   /* type = RRE   LOAD LOGICAL HALFWORD (32<-16)  */   \
+  V(mlr, MLR, 0xB996)     /* type = RRE   MULTIPLY LOGICAL (64<-32)  */        \
+  V(dlr, DLR, 0xB997)     /* type = RRE   DIVIDE LOGICAL (32<-64)  */          \
+  V(alcr, ALCR, 0xB998)   /* type = RRE   ADD LOGICAL WITH CARRY (32)  */      \
+  V(slbr, SLBR, 0xB999) /* type = RRE   SUBTRACT LOGICAL WITH BORROW (32)  */  \
+  V(epair, EPAIR, 0xB99A) /* type = RRE   EXTRACT PRIMARY ASN AND INSTANCE  */ \
+  V(esair, ESAIR,                                                              \
+    0xB99B)             /* type = RRE   EXTRACT SECONDARY ASN AND INSTANCE  */ \
+  V(esea, ESEA, 0xB99D) /* type = RRE   EXTRACT AND SET EXTENDED AUTHORITY  */ \
+  V(pti, PTI, 0xB99E)   /* type = RRE   PROGRAM TRANSFER WITH INSTANCE  */     \
+  V(ssair, SSAIR, 0xB99F) /* type = RRE   SET SECONDARY ASN WITH INSTANCE  */  \
+  V(ptf, PTF, 0xB9A2)     /* type = RRE   PERFORM TOPOLOGY FUNCTION  */        \
+  V(rrbm, RRBM, 0xB9AE)   /* type = RRE   RESET REFERENCE BITS MULTIPLE  */    \
+  V(pfmf, PFMF, 0xB9AF) /* type = RRE   PERFORM FRAME MANAGEMENT FUNCTION  */  \
+  V(cu41, CU41, 0xB9B2) /* type = RRE   CONVERT UTF-32 TO UTF-8  */            \
+  V(cu42, CU42, 0xB9B3) /* type = RRE   CONVERT UTF-32 TO UTF-16  */           \
+  V(srstu, SRSTU, 0xB9BE)     /* type = RRE   SEARCH STRING UNICODE  */        \
+  V(chhr, CHHR, 0xB9CD)       /* type = RRE   COMPARE HIGH (32)  */            \
+  V(clhhr, CLHHR, 0xB9CF)     /* type = RRE   COMPARE LOGICAL HIGH (32)  */    \
+  V(chlr, CHLR, 0xB9DD)       /* type = RRE   COMPARE HIGH (32)  */            \
+  V(clhlr, CLHLR, 0xB9DF)     /* type = RRE   COMPARE LOGICAL HIGH (32)  */    \
+  V(popcnt, POPCNT_Z, 0xB9E1) /* type = RRE   POPULATION COUNT  */
+
+#define S390_RIE_C_OPCODE_LIST(V)                                             \
+  V(cgij, CGIJ,                                                               \
+    0xEC7C) /* type = RIE_C COMPARE IMMEDIATE AND BRANCH RELATIVE (64<-8)  */ \
+  V(clgij, CLGIJ,                                                             \
+    0xEC7D) /* type = RIE_C COMPARE LOGICAL IMMEDIATE AND BRANCH RELATIVE  */ \
+            /* (64<-8)  */                                                    \
+  V(cij, CIJ,                                                                 \
+    0xEC7E) /* type = RIE_C COMPARE IMMEDIATE AND BRANCH RELATIVE (32<-8)  */ \
+  V(clij, CLIJ, 0xEC7F) /* type = RIE_C COMPARE LOGICAL IMMEDIATE AND      */ \
+                        /* BRANCH RELATIVE (32<-8)  */
+
+#define S390_RIE_D_OPCODE_LIST(V)                                          \
+  V(ahik, AHIK, 0xECD8)   /* type = RIE_D ADD IMMEDIATE (32<-16)  */       \
+  V(aghik, AGHIK, 0xECD9) /* type = RIE_D ADD IMMEDIATE (64<-16)  */       \
+  V(alhsik, ALHSIK,                                                        \
+    0xECDA) /* type = RIE_D ADD LOGICAL WITH SIGNED IMMEDIATE (32<-16)  */ \
+  V(alghsik, ALGHSIK,                                                      \
+    0xECDB) /* type = RIE_D ADD LOGICAL WITH SIGNED IMMEDIATE (64<-16)  */
+
+#define S390_VRV_OPCODE_LIST(V)                                           \
+  V(vgeg, VGEG, 0xE712)   /* type = VRV   VECTOR GATHER ELEMENT (64)  */  \
+  V(vgef, VGEF, 0xE713)   /* type = VRV   VECTOR GATHER ELEMENT (32)  */  \
+  V(vsceg, VSCEG, 0xE71A) /* type = VRV   VECTOR SCATTER ELEMENT (64)  */ \
+  V(vscef, VSCEF, 0xE71B) /* type = VRV   VECTOR SCATTER ELEMENT (32)  */
+
+#define S390_RIE_E_OPCODE_LIST(V)                                  \
+  V(brxhg, BRXHG,                                                  \
+    0xEC44) /* type = RIE_E BRANCH RELATIVE ON INDEX HIGH (64)  */ \
+  V(brxlg, BRXLG,                                                  \
+    0xEC45) /* type = RIE_E BRANCH RELATIVE ON INDEX LOW OR EQ. (64)  */
+
+#define S390_RR_OPCODE_LIST(V)                                                 \
+  V(spm, SPM, 0x04)     /* type = RR    SET PROGRAM MASK  */                   \
+  V(balr, BALR, 0x05)   /* type = RR    BRANCH AND LINK  */                    \
+  V(bctr, BCTR, 0x06)   /* type = RR    BRANCH ON COUNT (32)  */               \
+  V(bcr, BCR, 0x07)     /* type = RR    BRANCH ON CONDITION  */                \
+  V(bsm, BSM, 0x0B)     /* type = RR    BRANCH AND SET MODE  */                \
+  V(bassm, BASSM, 0x0C) /* type = RR    BRANCH AND SAVE AND SET MODE  */       \
+  V(basr, BASR, 0x0D)   /* type = RR    BRANCH AND SAVE  */                    \
+  V(mvcl, MVCL, 0x0E)   /* type = RR    MOVE LONG  */                          \
+  V(clcl, CLCL, 0x0F)   /* type = RR    COMPARE LOGICAL LONG  */               \
+  V(lpr, LPR, 0x10)     /* type = RR    LOAD POSITIVE (32)  */                 \
+  V(lnr, LNR, 0x11)     /* type = RR    LOAD NEGATIVE (32)  */                 \
+  V(ltr, LTR, 0x12)     /* type = RR    LOAD AND TEST (32)  */                 \
+  V(lcr, LCR, 0x13)     /* type = RR    LOAD COMPLEMENT (32)  */               \
+  V(nr, NR, 0x14)       /* type = RR    AND (32)  */                           \
+  V(clr, CLR, 0x15)     /* type = RR    COMPARE LOGICAL (32)  */               \
+  V(or_z, OR, 0x16)     /* type = RR    OR (32)  */                            \
+  V(xr, XR, 0x17)       /* type = RR    EXCLUSIVE OR (32)  */                  \
+  V(lr, LR, 0x18)       /* type = RR    LOAD (32)  */                          \
+  V(cr_z, CR, 0x19)     /* type = RR    COMPARE (32)  */                       \
+  V(ar, AR, 0x1A)       /* type = RR    ADD (32)  */                           \
+  V(sr, SR, 0x1B)       /* type = RR    SUBTRACT (32)  */                      \
+  V(mr_z, MR, 0x1C)     /* type = RR    MULTIPLY (64<-32)  */                  \
+  V(dr, DR, 0x1D)       /* type = RR    DIVIDE (32<-64)  */                    \
+  V(alr, ALR, 0x1E)     /* type = RR    ADD LOGICAL (32)  */                   \
+  V(slr, SLR, 0x1F)     /* type = RR    SUBTRACT LOGICAL (32)  */              \
+  V(lpdr, LPDR, 0x20)   /* type = RR    LOAD POSITIVE (long HFP)  */           \
+  V(lndr, LNDR, 0x21)   /* type = RR    LOAD NEGATIVE (long HFP)  */           \
+  V(ltdr, LTDR, 0x22)   /* type = RR    LOAD AND TEST (long HFP)  */           \
+  V(lcdr, LCDR, 0x23)   /* type = RR    LOAD COMPLEMENT (long HFP)  */         \
+  V(hdr, HDR, 0x24)     /* type = RR    HALVE (long HFP)  */                   \
+  V(ldxr, LDXR, 0x25) /* type = RR    LOAD ROUNDED (extended to long HFP)  */  \
+  V(lrdr, LRDR, 0x25) /* type = RR    LOAD ROUNDED (extended to long HFP)  */  \
+  V(mxr, MXR, 0x26)   /* type = RR    MULTIPLY (extended HFP)  */              \
+  V(mxdr, MXDR, 0x27) /* type = RR    MULTIPLY (long to extended HFP)  */      \
+  V(ldr, LDR, 0x28)   /* type = RR    LOAD (long)  */                          \
+  V(cdr, CDR, 0x29)   /* type = RR    COMPARE (long HFP)  */                   \
+  V(adr, ADR, 0x2A)   /* type = RR    ADD NORMALIZED (long HFP)  */            \
+  V(sdr, SDR, 0x2B)   /* type = RR    SUBTRACT NORMALIZED (long HFP)  */       \
+  V(mdr, MDR, 0x2C)   /* type = RR    MULTIPLY (long HFP)  */                  \
+  V(ddr, DDR, 0x2D)   /* type = RR    DIVIDE (long HFP)  */                    \
+  V(swr, SWR, 0x2F)   /* type = RR    SUBTRACT UNNORMALIZED (long HFP)  */     \
+  V(lper, LPER, 0x30) /* type = RR    LOAD POSITIVE (short HFP)  */            \
+  V(lner, LNER, 0x31) /* type = RR    LOAD NEGATIVE (short HFP)  */            \
+  V(lter, LTER, 0x32) /* type = RR    LOAD AND TEST (short HFP)  */            \
+  V(lcer, LCER, 0x33) /* type = RR    LOAD COMPLEMENT (short HFP)  */          \
+  V(her_z, HER_Z, 0x34) /* type = RR    HALVE (short HFP)  */                  \
+  V(ledr, LEDR, 0x35)   /* type = RR    LOAD ROUNDED (long to short HFP)  */   \
+  V(lrer, LRER, 0x35)   /* type = RR    LOAD ROUNDED (long to short HFP)  */   \
+  V(axr, AXR, 0x36)     /* type = RR    ADD NORMALIZED (extended HFP)  */      \
+  V(sxr, SXR, 0x37)     /* type = RR    SUBTRACT NORMALIZED (extended HFP)  */ \
+  V(ler, LER, 0x38)     /* type = RR    LOAD (short)  */                       \
+  V(cer, CER, 0x39)     /* type = RR    COMPARE (short HFP)  */                \
+  V(aer, AER, 0x3A)     /* type = RR    ADD NORMALIZED (short HFP)  */         \
+  V(ser, SER, 0x3B)     /* type = RR    SUBTRACT NORMALIZED (short HFP)  */    \
+  V(mder, MDER, 0x3C)   /* type = RR    MULTIPLY (short to long HFP)  */       \
+  V(mer, MER, 0x3C)     /* type = RR    MULTIPLY (short to long HFP)  */       \
+  V(der, DER, 0x3D)     /* type = RR    DIVIDE (short HFP)  */                 \
+  V(aur, AUR, 0x3E)     /* type = RR    ADD UNNORMALIZED (short HFP)  */       \
+  V(sur, SUR, 0x3F)     /* type = RR    SUBTRACT UNNORMALIZED (short HFP)  */
+
+#define S390_RIE_F_OPCODE_LIST(V)                                              \
+  V(risblg, RISBLG,                                                            \
+    0xEC51) /* type = RIE_F ROTATE THEN INSERT SELECTED BITS LOW (64)  */      \
+  V(rnsbg, RNSBG,                                                              \
+    0xEC54) /* type = RIE_F ROTATE THEN AND SELECTED BITS (64)  */             \
+  V(risbg, RISBG,                                                              \
+    0xEC55) /* type = RIE_F ROTATE THEN INSERT SELECTED BITS (64)  */          \
+  V(rosbg, ROSBG, 0xEC56) /* type = RIE_F ROTATE THEN OR SELECTED BITS (64) */ \
+  V(rxsbg, RXSBG,                                                              \
+    0xEC57) /* type = RIE_F ROTATE THEN EXCLUSIVE OR SELECT. BITS (64)  */     \
+  V(risbgn, RISBGN,                                                            \
+    0xEC59) /* type = RIE_F ROTATE THEN INSERT SELECTED BITS (64)  */          \
+  V(risbhg, RISBHG,                                                            \
+    0xEC5D) /* type = RIE_F ROTATE THEN INSERT SELECTED BITS HIGH (64)  */
+
+#define S390_VRX_OPCODE_LIST(V)                                             \
+  V(vleb, VLEB, 0xE700) /* type = VRX   VECTOR LOAD ELEMENT (8)  */         \
+  V(vleh, VLEH, 0xE701) /* type = VRX   VECTOR LOAD ELEMENT (16)  */        \
+  V(vleg, VLEG, 0xE702) /* type = VRX   VECTOR LOAD ELEMENT (64)  */        \
+  V(vlef, VLEF, 0xE703) /* type = VRX   VECTOR LOAD ELEMENT (32)  */        \
+  V(vllez, VLLEZ,                                                           \
+    0xE704) /* type = VRX   VECTOR LOAD LOGICAL ELEMENT AND ZERO  */        \
+  V(vlrep, VLREP, 0xE705) /* type = VRX   VECTOR LOAD AND REPLICATE  */     \
+  V(vl, VL, 0xE706)       /* type = VRX   VECTOR LOAD  */                   \
+  V(vlbb, VLBB, 0xE707)   /* type = VRX   VECTOR LOAD TO BLOCK BOUNDARY  */ \
+  V(vsteb, VSTEB, 0xE708) /* type = VRX   VECTOR STORE ELEMENT (8)  */      \
+  V(vsteh, VSTEH, 0xE709) /* type = VRX   VECTOR STORE ELEMENT (16)  */     \
+  V(vsteg, VSTEG, 0xE70A) /* type = VRX   VECTOR STORE ELEMENT (64)  */     \
+  V(vstef, VSTEF, 0xE70B) /* type = VRX   VECTOR STORE ELEMENT (32)  */     \
+  V(vst, VST, 0xE70E)     /* type = VRX   VECTOR STORE  */
+
+#define S390_RIE_G_OPCODE_LIST(V)                                             \
+  V(lochi, LOCHI,                                                             \
+    0xEC42) /* type = RIE_G LOAD HALFWORD IMMEDIATE ON CONDITION (32<-16)  */ \
+  V(locghi, LOCGHI,                                                           \
+    0xEC46) /* type = RIE_G LOAD HALFWORD IMMEDIATE ON CONDITION (64<-16)  */ \
+  V(lochhi, LOCHHI, 0xEC4E) /* type = RIE_G LOAD HALFWORD HIGH IMMEDIATE   */ \
+                            /* ON CONDITION (32<-16)  */
+
+#define S390_RRS_OPCODE_LIST(V)                                               \
+  V(cgrb, CGRB, 0xECE4)   /* type = RRS   COMPARE AND BRANCH (64)  */         \
+  V(clgrb, CLGRB, 0xECE5) /* type = RRS   COMPARE LOGICAL AND BRANCH (64)  */ \
+  V(crb, CRB, 0xECF6)     /* type = RRS   COMPARE AND BRANCH (32)  */         \
+  V(clrb, CLRB, 0xECF7)   /* type = RRS   COMPARE LOGICAL AND BRANCH (32)  */
+
+#define S390_OPCODE_LIST(V) \
+  S390_RSY_A_OPCODE_LIST(V) \
+  S390_RSY_B_OPCODE_LIST(V) \
+  S390_RXE_OPCODE_LIST(V)   \
+  S390_RRF_A_OPCODE_LIST(V) \
+  S390_RXF_OPCODE_LIST(V)   \
+  S390_IE_OPCODE_LIST(V)    \
+  S390_RRF_B_OPCODE_LIST(V) \
+  S390_RRF_C_OPCODE_LIST(V) \
+  S390_MII_OPCODE_LIST(V)   \
+  S390_RRF_D_OPCODE_LIST(V) \
+  S390_RRF_E_OPCODE_LIST(V) \
+  S390_VRR_A_OPCODE_LIST(V) \
+  S390_VRR_B_OPCODE_LIST(V) \
+  S390_VRR_C_OPCODE_LIST(V) \
+  S390_VRI_A_OPCODE_LIST(V) \
+  S390_VRR_D_OPCODE_LIST(V) \
+  S390_VRI_B_OPCODE_LIST(V) \
+  S390_VRR_E_OPCODE_LIST(V) \
+  S390_VRI_C_OPCODE_LIST(V) \
+  S390_VRI_D_OPCODE_LIST(V) \
+  S390_VRR_F_OPCODE_LIST(V) \
+  S390_RIS_OPCODE_LIST(V)   \
+  S390_VRI_E_OPCODE_LIST(V) \
+  S390_RSL_A_OPCODE_LIST(V) \
+  S390_RSL_B_OPCODE_LIST(V) \
+  S390_SI_OPCODE_LIST(V)    \
+  S390_SIL_OPCODE_LIST(V)   \
+  S390_VRS_A_OPCODE_LIST(V) \
+  S390_RIL_A_OPCODE_LIST(V) \
+  S390_RIL_B_OPCODE_LIST(V) \
+  S390_VRS_B_OPCODE_LIST(V) \
+  S390_RIL_C_OPCODE_LIST(V) \
+  S390_VRS_C_OPCODE_LIST(V) \
+  S390_RI_A_OPCODE_LIST(V)  \
+  S390_RSI_OPCODE_LIST(V)   \
+  S390_RI_B_OPCODE_LIST(V)  \
+  S390_RI_C_OPCODE_LIST(V)  \
+  S390_RSL_OPCODE_LIST(V)   \
+  S390_SMI_OPCODE_LIST(V)   \
+  S390_RXY_A_OPCODE_LIST(V) \
+  S390_RXY_B_OPCODE_LIST(V) \
+  S390_SIY_OPCODE_LIST(V)   \
+  S390_SS_A_OPCODE_LIST(V)  \
+  S390_E_OPCODE_LIST(V)     \
+  S390_SS_B_OPCODE_LIST(V)  \
+  S390_SS_C_OPCODE_LIST(V)  \
+  S390_SS_D_OPCODE_LIST(V)  \
+  S390_SS_E_OPCODE_LIST(V)  \
+  S390_I_OPCODE_LIST(V)     \
+  S390_SS_F_OPCODE_LIST(V)  \
+  S390_SSE_OPCODE_LIST(V)   \
+  S390_SSF_OPCODE_LIST(V)   \
+  S390_RS_A_OPCODE_LIST(V)  \
+  S390_RS_B_OPCODE_LIST(V)  \
+  S390_S_OPCODE_LIST(V)     \
+  S390_RX_A_OPCODE_LIST(V)  \
+  S390_RX_B_OPCODE_LIST(V)  \
+  S390_RIE_A_OPCODE_LIST(V) \
+  S390_RRD_OPCODE_LIST(V)   \
+  S390_RIE_B_OPCODE_LIST(V) \
+  S390_RRE_OPCODE_LIST(V)   \
+  S390_RIE_C_OPCODE_LIST(V) \
+  S390_RIE_D_OPCODE_LIST(V) \
+  S390_VRV_OPCODE_LIST(V)   \
+  S390_RIE_E_OPCODE_LIST(V) \
+  S390_RR_OPCODE_LIST(V)    \
+  S390_RIE_F_OPCODE_LIST(V) \
+  S390_VRX_OPCODE_LIST(V)   \
+  S390_RIE_G_OPCODE_LIST(V) \
+  S390_RRS_OPCODE_LIST(V)
+
 // Opcodes as defined in Appendix B-2 table
 enum Opcode {
-  A = 0x5A,           // Add (32)
-  ADB = 0xED1A,       // Add (long BFP)
-  ADBR = 0xB31A,      // Add (long BFP)
-  ADTR = 0xB3D2,      // Add (long DFP)
-  ADTRA = 0xB3D2,     // Add (long DFP)
-  AEB = 0xED0A,       // Add (short BFP)
-  AEBR = 0xB30A,      // Add (short BFP)
-  AFI = 0xC29,        // Add Immediate (32)
-  AG = 0xE308,        // Add (64)
-  AGF = 0xE318,       // Add (64<-32)
-  AGFI = 0xC28,       // Add Immediate (64<-32)
-  AGFR = 0xB918,      // Add (64<-32)
-  AGHI = 0xA7B,       // Add Halfword Immediate (64)
-  AGHIK = 0xECD9,     // Add Immediate (64<-16)
-  AGR = 0xB908,       // Add (64)
-  AGRK = 0xB9E8,      // Add (64)
-  AGSI = 0xEB7A,      // Add Immediate (64<-8)
-  AH = 0x4A,          // Add Halfword
-  AHHHR = 0xB9C8,     // Add High (32)
-  AHHLR = 0xB9D8,     // Add High (32)
-  AHI = 0xA7A,        // Add Halfword Immediate (32)
-  AHIK = 0xECD8,      // Add Immediate (32<-16)
-  AHY = 0xE37A,       // Add Halfword
-  AIH = 0xCC8,        // Add Immediate High (32)
-  AL = 0x5E,          // Add Logical (32)
-  ALC = 0xE398,       // Add Logical With Carry (32)
-  ALCG = 0xE388,      // Add Logical With Carry (64)
-  ALCGR = 0xB988,     // Add Logical With Carry (64)
-  ALCR = 0xB998,      // Add Logical With Carry (32)
-  ALFI = 0xC2B,       // Add Logical Immediate (32)
-  ALG = 0xE30A,       // Add Logical (64)
-  ALGF = 0xE31A,      // Add Logical (64<-32)
-  ALGFI = 0xC2A,      // Add Logical Immediate (64<-32)
-  ALGFR = 0xB91A,     // Add Logical (64<-32)
-  ALGHSIK = 0xECDB,   // Add Logical With Signed Immediate (64<-16)
-  ALGR = 0xB90A,      // Add Logical (64)
-  ALGRK = 0xB9EA,     // Add Logical (64)
-  ALGSI = 0xEB7E,     // Add Logical With Signed Immediate (64<-8)
-  ALHHHR = 0xB9CA,    // Add Logical High (32)
-  ALHHLR = 0xB9DA,    // Add Logical High (32)
-  ALHSIK = 0xECDA,    // Add Logical With Signed Immediate (32<-16)
-  ALR = 0x1E,         // Add Logical (32)
-  ALRK = 0xB9FA,      // Add Logical (32)
-  ALSI = 0xEB6E,      // Add Logical With Signed Immediate (32<-8)
-  ALSIH = 0xCCA,      // Add Logical With Signed Immediate High (32)
-  ALSIHN = 0xCCB,     // Add Logical With Signed Immediate High (32)
-  ALY = 0xE35E,       // Add Logical (32)
-  AP = 0xFA,          // Add Decimal
-  AR = 0x1A,          // Add (32)
-  ARK = 0xB9F8,       // Add (32)
-  ASI = 0xEB6A,       // Add Immediate (32<-8)
-  AXBR = 0xB34A,      // Add (extended BFP)
-  AXTR = 0xB3DA,      // Add (extended DFP)
-  AXTRA = 0xB3DA,     // Add (extended DFP)
-  AY = 0xE35A,        // Add (32)
-  BAL = 0x45,         // Branch And Link
-  BALR = 0x05,        // Branch And Link
-  BAS = 0x4D,         // Branch And Save
-  BASR = 0x0D,        // Branch And Save
-  BASSM = 0x0C,       // Branch And Save And Set Mode
-  BC = 0x47,          // Branch On Condition
-  BCR = 0x07,         // Branch On Condition
-  BCT = 0x46,         // Branch On Count (32)
-  BCTG = 0xE346,      // Branch On Count (64)
-  BCTGR = 0xB946,     // Branch On Count (64)
-  BCTR = 0x06,        // Branch On Count (32)
-  BPP = 0xC7,         // Branch Prediction Preload
-  BPRP = 0xC5,        // Branch Prediction Relative Preload
-  BRAS = 0xA75,       // Branch Relative And Save
-  BRASL = 0xC05,      // Branch Relative And Save Long
-  BRC = 0xA74,        // Branch Relative On Condition
-  BRCL = 0xC04,       // Branch Relative On Condition Long
-  BRCT = 0xA76,       // Branch Relative On Count (32)
-  BRCTG = 0xA77,      // Branch Relative On Count (64)
-  BRCTH = 0xCC6,      // Branch Relative On Count High (32)
-  BRXH = 0x84,        // Branch Relative On Index High (32)
-  BRXHG = 0xEC44,     // Branch Relative On Index High (64)
-  BRXLE = 0x85,       // Branch Relative On Index Low Or Eq. (32)
-  BRXLG = 0xEC45,     // Branch Relative On Index Low Or Eq. (64)
-  BSM = 0x0B,         // Branch And Set Mode
-  BXH = 0x86,         // Branch On Index High (32)
-  BXHG = 0xEB44,      // Branch On Index High (64)
-  BXLE = 0x87,        // Branch On Index Low Or Equal (32)
-  BXLEG = 0xEB45,     // Branch On Index Low Or Equal (64)
-  C = 0x59,           // Compare (32)
-  CDB = 0xED19,       // Compare (long BFP)
-  CDBR = 0xB319,      // Compare (long BFP)
-  CDFBR = 0xB395,     // Convert From Fixed (32 to long BFP)
-  CDFBRA = 0xB395,    // Convert From Fixed (32 to long BFP)
-  CDFTR = 0xB951,     // Convert From Fixed (32 to long DFP)
-  CDGBR = 0xB3A5,     // Convert From Fixed (64 to long BFP)
-  CDGBRA = 0xB3A5,    // Convert From Fixed (64 to long BFP)
-  CDGTR = 0xB3F1,     // Convert From Fixed (64 to long DFP)
-  CDGTRA = 0xB3F1,    // Convert From Fixed (64 to long DFP)
-  CDLFBR = 0xB391,    // Convert From Logical (32 to long BFP)
-  CDLFTR = 0xB953,    // Convert From Logical (32 to long DFP)
-  CDLGBR = 0xB3A1,    // Convert From Logical (64 to long BFP)
-  CDLGTR = 0xB952,    // Convert From Logical (64 to long DFP)
-  CDS = 0xBB,         // Compare Double And Swap (32)
-  CDSG = 0xEB3E,      // Compare Double And Swap (64)
-  CDSTR = 0xB3F3,     // Convert From Signed Packed (64 to long DFP)
-  CDSY = 0xEB31,      // Compare Double And Swap (32)
-  CDTR = 0xB3E4,      // Compare (long DFP)
-  CDUTR = 0xB3F2,     // Convert From Unsigned Packed (64 to long DFP)
-  CDZT = 0xEDAA,      // Convert From Zoned (to long DFP)
-  CEB = 0xED09,       // Compare (short BFP)
-  CEBR = 0xB309,      // Compare (short BFP)
-  CEDTR = 0xB3F4,     // Compare Biased Exponent (long DFP)
-  CEFBR = 0xB394,     // Convert From Fixed (32 to short BFP)
-  CEFBRA = 0xB394,    // Convert From Fixed (32 to short BFP)
-  CEGBR = 0xB3A4,     // Convert From Fixed (64 to short BFP)
-  CEGBRA = 0xB3A4,    // Convert From Fixed (64 to short BFP)
-  CELFBR = 0xB390,    // Convert From Logical (32 to short BFP)
-  CELGBR = 0xB3A0,    // Convert From Logical (64 to short BFP)
-  CEXTR = 0xB3FC,     // Compare Biased Exponent (extended DFP)
-  CFC = 0xB21A,       // Compare And Form Codeword
-  CFDBR = 0xB399,     // Convert To Fixed (long BFP to 32)
-  CFDBRA = 0xB399,    // Convert To Fixed (long BFP to 32)
-  CFDR = 0xB3B9,      // Convert To Fixed (long HFP to 32)
-  CFDTR = 0xB941,     // Convert To Fixed (long DFP to 32)
-  CFEBR = 0xB398,     // Convert To Fixed (short BFP to 32)
-  CFEBRA = 0xB398,    // Convert To Fixed (short BFP to 32)
-  CFER = 0xB3B8,      // Convert To Fixed (short HFP to 32)
-  CFI = 0xC2D,        // Compare Immediate (32)
-  CFXBR = 0xB39A,     // Convert To Fixed (extended BFP to 32)
-  CFXBRA = 0xB39A,    // Convert To Fixed (extended BFP to 32)
-  CFXR = 0xB3BA,      // Convert To Fixed (extended HFP to 32)
-  CFXTR = 0xB949,     // Convert To Fixed (extended DFP to 32)
-  CG = 0xE320,        // Compare (64)
-  CGDBR = 0xB3A9,     // Convert To Fixed (long BFP to 64)
-  CGDBRA = 0xB3A9,    // Convert To Fixed (long BFP to 64)
-  CGDR = 0xB3C9,      // Convert To Fixed (long HFP to 64)
-  CGDTR = 0xB3E1,     // Convert To Fixed (long DFP to 64)
-  CGDTRA = 0xB3E1,    // Convert To Fixed (long DFP to 64)
-  CGEBR = 0xB3A8,     // Convert To Fixed (short BFP to 64)
-  CGEBRA = 0xB3A8,    // Convert To Fixed (short BFP to 64)
-  CGER = 0xB3C8,      // Convert To Fixed (short HFP to 64)
-  CGF = 0xE330,       // Compare (64<-32)
-  CGFI = 0xC2C,       // Compare Immediate (64<-32)
-  CGFR = 0xB930,      // Compare (64<-32)
-  CGFRL = 0xC6C,      // Compare Relative Long (64<-32)
-  CGH = 0xE334,       // Compare Halfword (64<-16)
-  CGHI = 0xA7F,       // Compare Halfword Immediate (64<-16)
-  CGHRL = 0xC64,      // Compare Halfword Relative Long (64<-16)
-  CGHSI = 0xE558,     // Compare Halfword Immediate (64<-16)
-  CGIB = 0xECFC,      // Compare Immediate And Branch (64<-8)
-  CGIJ = 0xEC7C,      // Compare Immediate And Branch Relative (64<-8)
-  CGIT = 0xEC70,      // Compare Immediate And Trap (64<-16)
-  CGR = 0xB920,       // Compare (64)
-  CGRB = 0xECE4,      // Compare And Branch (64)
-  CGRJ = 0xEC64,      // Compare And Branch Relative (64)
-  CGRL = 0xC68,       // Compare Relative Long (64)
-  CGRT = 0xB960,      // Compare And Trap (64)
-  CGXBR = 0xB3AA,     // Convert To Fixed (extended BFP to 64)
-  CGXBRA = 0xB3AA,    // Convert To Fixed (extended BFP to 64)
-  CGXR = 0xB3CA,      // Convert To Fixed (extended HFP to 64)
-  CGXTR = 0xB3E9,     // Convert To Fixed (extended DFP to 64)
-  CGXTRA = 0xB3E9,    // Convert To Fixed (extended DFP to 64)
-  CH = 0x49,          // Compare Halfword (32<-16)
-  CHF = 0xE3CD,       // Compare High (32)
-  CHHR = 0xB9CD,      // Compare High (32)
-  CHHSI = 0xE554,     // Compare Halfword Immediate (16)
-  CHI = 0xA7E,        // Compare Halfword Immediate (32<-16)
-  CHLR = 0xB9DD,      // Compare High (32)
-  CHRL = 0xC65,       // Compare Halfword Relative Long (32<-16)
-  CHSI = 0xE55C,      // Compare Halfword Immediate (32<-16)
-  CHY = 0xE379,       // Compare Halfword (32<-16)
-  CIB = 0xECFE,       // Compare Immediate And Branch (32<-8)
-  CIH = 0xCCD,        // Compare Immediate High (32)
-  CIJ = 0xEC7E,       // Compare Immediate And Branch Relative (32<-8)
-  CIT = 0xEC72,       // Compare Immediate And Trap (32<-16)
-  CKSM = 0xB241,      // Checksum
-  CL = 0x55,          // Compare Logical (32)
-  CLC = 0xD5,         // Compare Logical (character)
-  CLCL = 0x0F,        // Compare Logical Long
-  CLCLE = 0xA9,       // Compare Logical Long Extended
-  CLCLU = 0xEB8F,     // Compare Logical Long Unicode
-  CLFDBR = 0xB39D,    // Convert To Logical (long BFP to 32)
-  CLFDTR = 0xB943,    // Convert To Logical (long DFP to 32)
-  CLFEBR = 0xB39C,    // Convert To Logical (short BFP to 32)
-  CLFHSI = 0xE55D,    // Compare Logical Immediate (32<-16)
-  CLFI = 0xC2F,       // Compare Logical Immediate (32)
-  CLFIT = 0xEC73,     // Compare Logical Immediate And Trap (32<-16)
-  CLFXBR = 0xB39E,    // Convert To Logical (extended BFP to 32)
-  CLFXTR = 0xB94B,    // Convert To Logical (extended DFP to 32)
-  CLG = 0xE321,       // Compare Logical (64)
-  CLGDBR = 0xB3AD,    // Convert To Logical (long BFP to 64)
-  CLGDTR = 0xB942,    // Convert To Logical (long DFP to 64)
-  CLGEBR = 0xB3AC,    // Convert To Logical (short BFP to 64)
-  CLGF = 0xE331,      // Compare Logical (64<-32)
-  CLGFI = 0xC2E,      // Compare Logical Immediate (64<-32)
-  CLGR = 0xB921,      // Compare Logical (64)
-  CLI = 0x95,         // Compare Logical Immediate (8)
-  CLIY = 0xEB55,      // Compare Logical Immediate (8)
-  CLR = 0x15,         // Compare Logical (32)
-  CLY = 0xE355,       // Compare Logical (32)
-  CD = 0x69,          // Compare (LH)
-  CDR = 0x29,         // Compare (LH)
-  CR = 0x19,          // Compare (32)
-  CSST = 0xC82,       // Compare And Swap And Store
-  CSXTR = 0xB3EB,     // Convert To Signed Packed (extended DFP to 128)
-  CSY = 0xEB14,       // Compare And Swap (32)
-  CU12 = 0xB2A7,      // Convert Utf-8 To Utf-16
-  CU14 = 0xB9B0,      // Convert Utf-8 To Utf-32
-  CU21 = 0xB2A6,      // Convert Utf-16 To Utf-8
-  CU24 = 0xB9B1,      // Convert Utf-16 To Utf-32
-  CU41 = 0xB9B2,      // Convert Utf-32 To Utf-8
-  CU42 = 0xB9B3,      // Convert Utf-32 To Utf-16
-  CUDTR = 0xB3E2,     // Convert To Unsigned Packed (long DFP to 64)
-  CUSE = 0xB257,      // Compare Until Substring Equal
-  CUTFU = 0xB2A7,     // Convert Utf-8 To Unicode
-  CUUTF = 0xB2A6,     // Convert Unicode To Utf-8
-  CUXTR = 0xB3EA,     // Convert To Unsigned Packed (extended DFP to 128)
-  CVB = 0x4F,         // Convert To Binary (32)
-  CVBG = 0xE30E,      // Convert To Binary (64)
-  CVBY = 0xE306,      // Convert To Binary (32)
-  CVD = 0x4E,         // Convert To Decimal (32)
-  CVDG = 0xE32E,      // Convert To Decimal (64)
-  CVDY = 0xE326,      // Convert To Decimal (32)
-  CXBR = 0xB349,      // Compare (extended BFP)
-  CXFBR = 0xB396,     // Convert From Fixed (32 to extended BFP)
-  CXFBRA = 0xB396,    // Convert From Fixed (32 to extended BFP)
-  CXFTR = 0xB959,     // Convert From Fixed (32 to extended DFP)
-  CXGBR = 0xB3A6,     // Convert From Fixed (64 to extended BFP)
-  CXGBRA = 0xB3A6,    // Convert From Fixed (64 to extended BFP)
-  CXGTR = 0xB3F9,     // Convert From Fixed (64 to extended DFP)
-  CXGTRA = 0xB3F9,    // Convert From Fixed (64 to extended DFP)
-  CXLFBR = 0xB392,    // Convert From Logical (32 to extended BFP)
-  CXLFTR = 0xB95B,    // Convert From Logical (32 to extended DFP)
-  CXLGBR = 0xB3A2,    // Convert From Logical (64 to extended BFP)
-  CXLGTR = 0xB95A,    // Convert From Logical (64 to extended DFP)
-  CXSTR = 0xB3FB,     // Convert From Signed Packed (128 to extended DFP)
-  CXTR = 0xB3EC,      // Compare (extended DFP)
-  CXUTR = 0xB3FA,     // Convert From Unsigned Packed (128 to ext. DFP)
-  CXZT = 0xEDAB,      // Convert From Zoned (to extended DFP)
-  CY = 0xE359,        // Compare (32)
-  CZDT = 0xEDA8,      // Convert To Zoned (from long DFP)
-  CZXT = 0xEDA9,      // Convert To Zoned (from extended DFP)
-  D = 0x5D,           // Divide (32<-64)
-  DDB = 0xED1D,       // Divide (long BFP)
-  DDBR = 0xB31D,      // Divide (long BFP)
-  DDTR = 0xB3D1,      // Divide (long DFP)
-  DDTRA = 0xB3D1,     // Divide (long DFP)
-  DEB = 0xED0D,       // Divide (short BFP)
-  DEBR = 0xB30D,      // Divide (short BFP)
-  DIDBR = 0xB35B,     // Divide To Integer (long BFP)
-  DIEBR = 0xB353,     // Divide To Integer (short BFP)
-  DL = 0xE397,        // Divide Logical (32<-64)
-  DLG = 0xE387,       // Divide Logical (64<-128)
-  DLGR = 0xB987,      // Divide Logical (64<-128)
-  DLR = 0xB997,       // Divide Logical (32<-64)
-  DP = 0xFD,          // Divide Decimal
-  DR = 0x1D,          // Divide (32<-64)
-  DSG = 0xE30D,       // Divide Single (64)
-  DSGF = 0xE31D,      // Divide Single (64<-32)
-  DSGFR = 0xB91D,     // Divide Single (64<-32)
-  DSGR = 0xB90D,      // Divide Single (64)
-  DXBR = 0xB34D,      // Divide (extended BFP)
-  DXTR = 0xB3D9,      // Divide (extended DFP)
-  DXTRA = 0xB3D9,     // Divide (extended DFP)
-  EAR = 0xB24F,       // Extract Access
-  ECAG = 0xEB4C,      // Extract Cache Attribute
-  ECTG = 0xC81,       // Extract Cpu Time
-  ED = 0xDE,          // Edit
-  EDMK = 0xDF,        // Edit And Mark
-  EEDTR = 0xB3E5,     // Extract Biased Exponent (long DFP to 64)
-  EEXTR = 0xB3ED,     // Extract Biased Exponent (extended DFP to 64)
-  EFPC = 0xB38C,      // Extract Fpc
-  EPSW = 0xB98D,      // Extract Psw
-  ESDTR = 0xB3E7,     // Extract Significance (long DFP)
-  ESXTR = 0xB3EF,     // Extract Significance (extended DFP)
-  ETND = 0xB2EC,      // Extract Transaction Nesting Depth
-  EX = 0x44,          // Execute
-  EXRL = 0xC60,       // Execute Relative Long
-  FIDBR = 0xB35F,     // Load Fp Integer (long BFP)
-  FIDBRA = 0xB35F,    // Load Fp Integer (long BFP)
-  FIDTR = 0xB3D7,     // Load Fp Integer (long DFP)
-  FIEBR = 0xB357,     // Load Fp Integer (short BFP)
-  FIEBRA = 0xB357,    // Load Fp Integer (short BFP)
-  FIXBR = 0xB347,     // Load Fp Integer (extended BFP)
-  FIXBRA = 0xB347,    // Load Fp Integer (extended BFP)
-  FIXTR = 0xB3DF,     // Load Fp Integer (extended DFP)
-  FLOGR = 0xB983,     // Find Leftmost One
-  HSCH = 0xB231,      // Halt Subchannel
-  IC_z = 0x43,        // Insert Character
-  ICM = 0xBF,         // Insert Characters Under Mask (low)
-  ICMH = 0xEB80,      // Insert Characters Under Mask (high)
-  ICMY = 0xEB81,      // Insert Characters Under Mask (low)
-  ICY = 0xE373,       // Insert Character
-  IEDTR = 0xB3F6,     // Insert Biased Exponent (64 to long DFP)
-  IEXTR = 0xB3FE,     // Insert Biased Exponent (64 to extended DFP)
-  IIHF = 0xC08,       // Insert Immediate (high)
-  IIHH = 0xA50,       // Insert Immediate (high high)
-  IIHL = 0xA51,       // Insert Immediate (high low)
-  IILF = 0xC09,       // Insert Immediate (low)
-  IILH = 0xA52,       // Insert Immediate (low high)
-  IILL = 0xA53,       // Insert Immediate (low low)
-  IPM = 0xB222,       // Insert Program Mask
-  KDB = 0xED18,       // Compare And Signal (long BFP)
-  KDBR = 0xB318,      // Compare And Signal (long BFP)
-  KDTR = 0xB3E0,      // Compare And Signal (long DFP)
-  KEB = 0xED08,       // Compare And Signal (short BFP)
-  KEBR = 0xB308,      // Compare And Signal (short BFP)
-  KIMD = 0xB93E,      // Compute Intermediate Message Digest
-  KLMD = 0xB93F,      // Compute Last Message Digest
-  KM = 0xB92E,        // Cipher Message
-  KMAC = 0xB91E,      // Compute Message Authentication Code
-  KMC = 0xB92F,       // Cipher Message With Chaining
-  KMCTR = 0xB92D,     // Cipher Message With Counter
-  KMF = 0xB92A,       // Cipher Message With Cfb
-  KMO = 0xB92B,       // Cipher Message With Ofb
-  KXBR = 0xB348,      // Compare And Signal (extended BFP)
-  KXTR = 0xB3E8,      // Compare And Signal (extended DFP)
-  L = 0x58,           // Load (32)
-  LA = 0x41,          // Load Address
-  LAA = 0xEBF8,       // Load And Add (32)
-  LAAG = 0xEBE8,      // Load And Add (64)
-  LAAL = 0xEBFA,      // Load And Add Logical (32)
-  LAALG = 0xEBEA,     // Load And Add Logical (64)
-  LAE = 0x51,         // Load Address Extended
-  LAEY = 0xE375,      // Load Address Extended
-  LAN = 0xEBF4,       // Load And And (32)
-  LANG = 0xEBE4,      // Load And And (64)
-  LAO = 0xEBF6,       // Load And Or (32)
-  LAOG = 0xEBE6,      // Load And Or (64)
-  LARL = 0xC00,       // Load Address Relative Long
-  LAT = 0xE39F,       // Load And Trap (32L<-32)
-  LAX = 0xEBF7,       // Load And Exclusive Or (32)
-  LAXG = 0xEBE7,      // Load And Exclusive Or (64)
-  LAY = 0xE371,       // Load Address
-  LB = 0xE376,        // Load Byte (32)
-  LBH = 0xE3C0,       // Load Byte High (32<-8)
-  LBR = 0xB926,       // Load Byte (32)
-  LCDBR = 0xB313,     // Load Complement (long BFP)
-  LCDFR = 0xB373,     // Load Complement (long)
-  LCEBR = 0xB303,     // Load Complement (short BFP)
-  LCGFR = 0xB913,     // Load Complement (64<-32)
-  LCGR = 0xB903,      // Load Complement (64)
-  LCR = 0x13,         // Load Complement (32)
-  LCXBR = 0xB343,     // Load Complement (extended BFP)
-  LD = 0x68,          // Load (long)
-  LDEB = 0xED04,      // Load Lengthened (short to long BFP)
-  LDEBR = 0xB304,     // Load Lengthened (short to long BFP)
-  LDETR = 0xB3D4,     // Load Lengthened (short to long DFP)
-  LDGR = 0xB3C1,      // Load Fpr From Gr (64 to long)
-  LDR = 0x28,         // Load (long)
-  LDXBR = 0xB345,     // Load Rounded (extended to long BFP)
-  LDXBRA = 0xB345,    // Load Rounded (extended to long BFP)
-  LDXTR = 0xB3DD,     // Load Rounded (extended to long DFP)
-  LDY = 0xED65,       // Load (long)
-  LE = 0x78,          // Load (short)
-  LEDBR = 0xB344,     // Load Rounded (long to short BFP)
-  LEDBRA = 0xB344,    // Load Rounded (long to short BFP)
-  LEDTR = 0xB3D5,     // Load Rounded (long to short DFP)
-  LER = 0x38,         // Load (short)
-  LEXBR = 0xB346,     // Load Rounded (extended to short BFP)
-  LEXBRA = 0xB346,    // Load Rounded (extended to short BFP)
-  LEY = 0xED64,       // Load (short)
-  LFAS = 0xB2BD,      // Load Fpc And Signal
-  LFH = 0xE3CA,       // Load High (32)
-  LFHAT = 0xE3C8,     // Load High And Trap (32H<-32)
-  LFPC = 0xB29D,      // Load Fpc
-  LG = 0xE304,        // Load (64)
-  LGAT = 0xE385,      // Load And Trap (64)
-  LGB = 0xE377,       // Load Byte (64)
-  LGBR = 0xB906,      // Load Byte (64)
-  LGDR = 0xB3CD,      // Load Gr From Fpr (long to 64)
-  LGF = 0xE314,       // Load (64<-32)
-  LGFI = 0xC01,       // Load Immediate (64<-32)
-  LGFR = 0xB914,      // Load (64<-32)
-  LGFRL = 0xC4C,      // Load Relative Long (64<-32)
-  LGH = 0xE315,       // Load Halfword (64)
-  LGHI = 0xA79,       // Load Halfword Immediate (64)
-  LGHR = 0xB907,      // Load Halfword (64)
-  LGHRL = 0xC44,      // Load Halfword Relative Long (64<-16)
-  LGR = 0xB904,       // Load (64)
-  LGRL = 0xC48,       // Load Relative Long (64)
-  LH = 0x48,          // Load Halfword (32)
-  LHH = 0xE3C4,       // Load Halfword High (32<-16)
-  LHI = 0xA78,        // Load Halfword Immediate (32)
-  LHR = 0xB927,       // Load Halfword (32)
-  LHRL = 0xC45,       // Load Halfword Relative Long (32<-16)
-  LHY = 0xE378,       // Load Halfword (32)
-  LLC = 0xE394,       // Load Logical Character (32)
-  LLCH = 0xE3C2,      // Load Logical Character High (32<-8)
-  LLCR = 0xB994,      // Load Logical Character (32)
-  LLGC = 0xE390,      // Load Logical Character (64)
-  LLGCR = 0xB984,     // Load Logical Character (64)
-  LLGF = 0xE316,      // Load Logical (64<-32)
-  LLGFAT = 0xE39D,    // Load Logical And Trap (64<-32)
-  LLGFR = 0xB916,     // Load Logical (64<-32)
-  LLGFRL = 0xC4E,     // Load Logical Relative Long (64<-32)
-  LLGH = 0xE391,      // Load Logical Halfword (64)
-  LLGHR = 0xB985,     // Load Logical Halfword (64)
-  LLGHRL = 0xC46,     // Load Logical Halfword Relative Long (64<-16)
-  LLGT = 0xE317,      // Load Logical Thirty One Bits
-  LLGTAT = 0xE39C,    // Load Logical Thirty One Bits And Trap (64<-31)
-  LLGTR = 0xB917,     // Load Logical Thirty One Bits
-  LLH = 0xE395,       // Load Logical Halfword (32)
-  LLHH = 0xE3C6,      // Load Logical Halfword High (32<-16)
-  LLHR = 0xB995,      // Load Logical Halfword (32)
-  LLHRL = 0xC42,      // Load Logical Halfword Relative Long (32<-16)
-  LLIHF = 0xC0E,      // Load Logical Immediate (high)
-  LLIHH = 0xA5C,      // Load Logical Immediate (high high)
-  LLIHL = 0xA5D,      // Load Logical Immediate (high low)
-  LLILF = 0xC0F,      // Load Logical Immediate (low)
-  LLILH = 0xA5E,      // Load Logical Immediate (low high)
-  LLILL = 0xA5F,      // Load Logical Immediate (low low)
-  LM = 0x98,          // Load Multiple (32)
-  LMD = 0xEF,         // Load Multiple Disjoint
-  LMG = 0xEB04,       // Load Multiple (64)
-  LMH = 0xEB96,       // Load Multiple High
-  LMY = 0xEB98,       // Load Multiple (32)
-  LNDBR = 0xB311,     // Load Negative (long BFP)
-  LNDFR = 0xB371,     // Load Negative (long)
-  LNEBR = 0xB301,     // Load Negative (short BFP)
-  LNGFR = 0xB911,     // Load Negative (64<-32)
-  LNGR = 0xB901,      // Load Negative (64)
-  LNR = 0x11,         // Load Negative (32)
-  LNXBR = 0xB341,     // Load Negative (extended BFP)
-  LOC = 0xEBF2,       // Load On Condition (32)
-  LOCG = 0xEBE2,      // Load On Condition (64)
-  LOCGR = 0xB9E2,     // Load On Condition (64)
-  LOCR = 0xB9F2,      // Load On Condition (32)
-  LPD = 0xC84,        // Load Pair Disjoint (32)
-  LPDBR = 0xB310,     // Load Positive (long BFP)
-  LPDFR = 0xB370,     // Load Positive (long)
-  LPDG = 0xC85,       // Load Pair Disjoint (64)
-  LPEBR = 0xB300,     // Load Positive (short BFP)
-  LPGFR = 0xB910,     // Load Positive (64<-32)
-  LPGR = 0xB900,      // Load Positive (64)
-  LPQ = 0xE38F,       // Load Pair From Quadword
-  LPR = 0x10,         // Load Positive (32)
-  LPXBR = 0xB340,     // Load Positive (extended BFP)
-  LR = 0x18,          // Load (32)
-  LRL = 0xC4D,        // Load Relative Long (32)
-  LRV = 0xE31E,       // Load Reversed (32)
-  LRVG = 0xE30F,      // Load Reversed (64)
-  LRVGR = 0xB90F,     // Load Reversed (64)
-  LRVH = 0xE31F,      // Load Reversed (16)
-  LRVR = 0xB91F,      // Load Reversed (32)
-  LT = 0xE312,        // Load And Test (32)
-  LTDBR = 0xB312,     // Load And Test (long BFP)
-  LTDTR = 0xB3D6,     // Load And Test (long DFP)
-  LTEBR = 0xB302,     // Load And Test (short BFP)
-  LTG = 0xE302,       // Load And Test (64)
-  LTGF = 0xE332,      // Load And Test (64<-32)
-  LTGFR = 0xB912,     // Load And Test (64<-32)
-  LTGR = 0xB902,      // Load And Test (64)
-  LTR = 0x12,         // Load And Test (32)
-  LTXBR = 0xB342,     // Load And Test (extended BFP)
-  LTXTR = 0xB3DE,     // Load And Test (extended DFP)
-  LXDB = 0xED05,      // Load Lengthened (long to extended BFP)
-  LXDBR = 0xB305,     // Load Lengthened (long to extended BFP)
-  LXDTR = 0xB3DC,     // Load Lengthened (long to extended DFP)
-  LXEB = 0xED06,      // Load Lengthened (short to extended BFP)
-  LXEBR = 0xB306,     // Load Lengthened (short to extended BFP)
-  LXR = 0xB365,       // Load (extended)
-  LY = 0xE358,        // Load (32)
-  LZDR = 0xB375,      // Load Zero (long)
-  LZER = 0xB374,      // Load Zero (short)
-  LZXR = 0xB376,      // Load Zero (extended)
-  M = 0x5C,           // Multiply (64<-32)
-  MADB = 0xED1E,      // Multiply And Add (long BFP)
-  MADBR = 0xB31E,     // Multiply And Add (long BFP)
-  MAEB = 0xED0E,      // Multiply And Add (short BFP)
-  MAEBR = 0xB30E,     // Multiply And Add (short BFP)
-  MC = 0xAF,          // Monitor Call
-  MDB = 0xED1C,       // Multiply (long BFP)
-  MDBR = 0xB31C,      // Multiply (long BFP)
-  MDEB = 0xED0C,      // Multiply (short to long BFP)
-  MDEBR = 0xB30C,     // Multiply (short to long BFP)
-  MDTR = 0xB3D0,      // Multiply (long DFP)
-  MDTRA = 0xB3D0,     // Multiply (long DFP)
-  MEEB = 0xED17,      // Multiply (short BFP)
-  MEEBR = 0xB317,     // Multiply (short BFP)
-  MFY = 0xE35C,       // Multiply (64<-32)
-  MGHI = 0xA7D,       // Multiply Halfword Immediate (64)
-  MH = 0x4C,          // Multiply Halfword (32)
-  MHI = 0xA7C,        // Multiply Halfword Immediate (32)
-  MHY = 0xE37C,       // Multiply Halfword (32)
-  ML = 0xE396,        // Multiply Logical (64<-32)
-  MLG = 0xE386,       // Multiply Logical (128<-64)
-  MLGR = 0xB986,      // Multiply Logical (128<-64)
-  MLR = 0xB996,       // Multiply Logical (64<-32)
-  MP = 0xFC,          // Multiply Decimal
-  MR = 0x1C,          // Multiply (64<-32)
-  MS = 0x71,          // Multiply Single (32)
-  MSCH = 0xB232,      // Modify Subchannel
-  MSDB = 0xED1F,      // Multiply And Subtract (long BFP)
-  MSDBR = 0xB31F,     // Multiply And Subtract (long BFP)
-  MSEB = 0xED0F,      // Multiply And Subtract (short BFP)
-  MSEBR = 0xB30F,     // Multiply And Subtract (short BFP)
-  MSFI = 0xC21,       // Multiply Single Immediate (32)
-  MSG = 0xE30C,       // Multiply Single (64)
-  MSGF = 0xE31C,      // Multiply Single (64<-32)
-  MSGFI = 0xC20,      // Multiply Single Immediate (64<-32)
-  MSGFR = 0xB91C,     // Multiply Single (64<-32)
-  MSGR = 0xB90C,      // Multiply Single (64)
-  MSR = 0xB252,       // Multiply Single (32)
-  MSY = 0xE351,       // Multiply Single (32)
-  MVC = 0xD2,         // Move (character)
-  MVCP = 0xDA,        // Move To Primary
-  MVCDK = 0xE50F,     // Move To Primary
-  MVCIN = 0xE8,       // Move Inverse
-  MVCL = 0x0E,        // Move Long
-  MVCLE = 0xA8,       // Move Long Extended
-  MVCLU = 0xEB8E,     // Move Long Unicode
-  MVGHI = 0xE548,     // Move (64<-16)
-  MVHHI = 0xE544,     // Move (16<-16)
-  MVHI = 0xE54C,      // Move (32<-16)
-  MVI = 0x92,         // Move (immediate)
-  MVIY = 0xEB52,      // Move (immediate)
-  MVN = 0xD1,         // Move Numerics
-  MVO = 0xF1,         // Move With Offset
-  MVST = 0xB255,      // Move String
-  MVZ = 0xD3,         // Move Zones
-  MXBR = 0xB34C,      // Multiply (extended BFP)
-  MXDB = 0xED07,      // Multiply (long to extended BFP)
-  MXDBR = 0xB307,     // Multiply (long to extended BFP)
-  MXTR = 0xB3D8,      // Multiply (extended DFP)
-  MXTRA = 0xB3D8,     // Multiply (extended DFP)
-  N = 0x54,           // And (32)
-  NC = 0xD4,          // And (character)
-  NG = 0xE380,        // And (64)
-  NGR = 0xB980,       // And (64)
-  NGRK = 0xB9E4,      // And (64)
-  NI = 0x94,          // And (immediate)
-  NIAI = 0xB2FA,      // Next Instruction Access Intent Ie Eh
-  NIHF = 0xC0A,       // And Immediate (high)
-  NIHH = 0xA54,       // And Immediate (high high)
-  NIHL = 0xA55,       // And Immediate (high low)
-  NILF = 0xC0B,       // And Immediate (low)
-  NILH = 0xA56,       // And Immediate (low high)
-  NILL = 0xA57,       // And Immediate (low low)
-  NIY = 0xEB54,       // And (immediate)
-  NR = 0x14,          // And (32)
-  NRK = 0xB9F4,       // And (32)
-  NTSTG = 0xE325,     // Nontransactional Store Rxy Tx ¤9 A Sp St B2
-  NY = 0xE354,        // And (32)
-  O = 0x56,           // Or (32)
-  OC = 0xD6,          // Or (character)
-  OG = 0xE381,        // Or (64)
-  OGR = 0xB981,       // Or (64)
-  OGRK = 0xB9E6,      // Or (64)
-  OI = 0x96,          // Or (immediate)
-  OIHF = 0xC0C,       // Or Immediate (high)
-  OIHH = 0xA58,       // Or Immediate (high high)
-  OIHL = 0xA59,       // Or Immediate (high low)
-  OILF = 0xC0D,       // Or Immediate (low)
-  OILH = 0xA5A,       // Or Immediate (low high)
-  OILL = 0xA5B,       // Or Immediate (low low)
-  OIY = 0xEB56,       // Or (immediate)
-  OR = 0x16,          // Or (32)
-  ORK = 0xB9F6,       // Or (32)
-  OY = 0xE356,        // Or (32)
-  PACK = 0xF2,        // Pack
-  PCC = 0xB92C,       // Perform Cryptographic Computation
-  PFD = 0xE336,       // Prefetch Data
-  PFDRL = 0xC62,      // Prefetch Data Relative Long
-  PFPO = 0x010A,      // Perform Floating-POINT Operation
-  PKA = 0xE9,         // Pack Ascii
-  PKU = 0xE1,         // Pack Unicode
-  PLO = 0xEE,         // Perform Locked Operation
-  POPCNT_Z = 0xB9E1,  // Population Count
-  PPA = 0xB2E8,       // Perform Processor Assist
-  QADTR = 0xB3F5,     // Quantize (long DFP)
-  QAXTR = 0xB3FD,     // Quantize (extended DFP)
-  RCHP = 0xB23B,      // Reset Channel Path
-  RISBG = 0xEC55,     // Rotate Then Insert Selected Bits
-  RISBGN = 0xEC59,    // Rotate Then Insert Selected Bits
-  RISBHG = 0xEC5D,    // Rotate Then Insert Selected Bits High
-  RISBLG = 0xEC51,    // Rotate Then Insert Selected Bits Low
-  RLL = 0xEB1D,       // Rotate Left Single Logical (32)
-  RLLG = 0xEB1C,      // Rotate Left Single Logical (64)
-  RNSBG = 0xEC54,     // Rotate Then And Selected Bits
-  ROSBG = 0xEC56,     // Rotate Then Or Selected Bits
-  RRDTR = 0xB3F7,     // Reround (long DFP)
-  RRXTR = 0xB3FF,     // Reround (extended DFP)
-  RSCH = 0xB238,      // Resume Subchannel
-  RXSBG = 0xEC57,     // Rotate Then Exclusive Or Selected Bits
-  S = 0x5B,           // Subtract (32)
-  SAL = 0xB237,       // Set Address Limit
-  SAR = 0xB24E,       // Set Access
-  SCHM = 0xB23C,      // Set Channel Monitor
-  SDB = 0xED1B,       // Subtract (long BFP)
-  SDBR = 0xB31B,      // Subtract (long BFP)
-  SDTR = 0xB3D3,      // Subtract (long DFP)
-  SDTRA = 0xB3D3,     // Subtract (long DFP)
-  SEB = 0xED0B,       // Subtract (short BFP)
-  SEBR = 0xB30B,      // Subtract (short BFP)
-  SFASR = 0xB385,     // Set Fpc And Signal
-  SFPC = 0xB384,      // Set Fpc
-  SG = 0xE309,        // Subtract (64)
-  SGF = 0xE319,       // Subtract (64<-32)
-  SGFR = 0xB919,      // Subtract (64<-32)
-  SGR = 0xB909,       // Subtract (64)
-  SGRK = 0xB9E9,      // Subtract (64)
-  SH = 0x4B,          // Subtract Halfword
-  SHHHR = 0xB9C9,     // Subtract High (32)
-  SHHLR = 0xB9D9,     // Subtract High (32)
-  SHY = 0xE37B,       // Subtract Halfword
-  SL = 0x5F,          // Subtract Logical (32)
-  SLA = 0x8B,         // Shift Left Single (32)
-  SLAG = 0xEB0B,      // Shift Left Single (64)
-  SLAK = 0xEBDD,      // Shift Left Single (32)
-  SLB = 0xE399,       // Subtract Logical With Borrow (32)
-  SLBG = 0xE389,      // Subtract Logical With Borrow (64)
-  SLBGR = 0xB989,     // Subtract Logical With Borrow (64)
-  SLBR = 0xB999,      // Subtract Logical With Borrow (32)
-  SLDA = 0x8F,        // Shift Left Double
-  SLDL = 0x8D,        // Shift Left Double Logical
-  SLDT = 0xED40,      // Shift Significand Left (long DFP)
-  SLFI = 0xC25,       // Subtract Logical Immediate (32)
-  SLG = 0xE30B,       // Subtract Logical (64)
-  SLGF = 0xE31B,      // Subtract Logical (64<-32)
-  SLGFI = 0xC24,      // Subtract Logical Immediate (64<-32)
-  SLGFR = 0xB91B,     // Subtract Logical (64<-32)
-  SLGR = 0xB90B,      // Subtract Logical (64)
-  SLGRK = 0xB9EB,     // Subtract Logical (64)
-  SLHHHR = 0xB9CB,    // Subtract Logical High (32)
-  SLHHLR = 0xB9DB,    // Subtract Logical High (32)
-  SLL = 0x89,         // Shift Left Single Logical (32)
-  SLLG = 0xEB0D,      // Shift Left Single Logical (64)
-  SLLK = 0xEBDF,      // Shift Left Single Logical (32)
-  SLR = 0x1F,         // Subtract Logical (32)
-  SLRK = 0xB9FB,      // Subtract Logical (32)
-  SLXT = 0xED48,      // Shift Significand Left (extended DFP)
-  SLY = 0xE35F,       // Subtract Logical (32)
-  SP = 0xFB,          // Subtract Decimal
-  SPM = 0x04,         // Set Program Mask
-  SQDB = 0xED15,      // Square Root (long BFP)
-  SQDBR = 0xB315,     // Square Root (long BFP)
-  SQEB = 0xED14,      // Square Root (short BFP)
-  SQEBR = 0xB314,     // Square Root (short BFP)
-  SQXBR = 0xB316,     // Square Root (extended BFP)
-  SR = 0x1B,          // Subtract (32)
-  SRA = 0x8A,         // Shift Right Single (32)
-  SRAG = 0xEB0A,      // Shift Right Single (64)
-  SRAK = 0xEBDC,      // Shift Right Single (32)
-  SRDA = 0x8E,        // Shift Right Double
-  SRDL = 0x8C,        // Shift Right Double Logical
-  SRDT = 0xED41,      // Shift Significand Right (long DFP)
-  SRK = 0xB9F9,       // Subtract (32)
-  SRL = 0x88,         // Shift Right Single Logical (32)
-  SRLG = 0xEB0C,      // Shift Right Single Logical (64)
-  SRLK = 0xEBDE,      // Shift Right Single Logical (32)
-  SRNM = 0xB299,      // Set BFP Rounding Mode (2 bit)
-  SRNMB = 0xB2B8,     // Set BFP Rounding Mode (3 bit)
-  SRNMT = 0xB2B9,     // Set DFP Rounding Mode
-  SRP = 0xF0,         // Shift And Round Decimal
-  SRST = 0xB25E,      // Search String
-  SRSTU = 0xB9BE,     // Search String Unicode
-  SRXT = 0xED49,      // Shift Significand Right (extended DFP)
-  SSCH = 0xB233,      // Start Subchannel
-  ST = 0x50,          // Store (32)
-  STC = 0x42,         // Store Character
-  STCH = 0xE3C3,      // Store Character High (8)
-  STCK = 0xB205,      // Store Clock
-  STCKE = 0xB278,     // Store Clock Extended
-  STCKF = 0xB27C,     // Store Clock Fast
-  STCM = 0xBE,        // Store Characters Under Mask (low)
-  STCMH = 0xEB2C,     // Store Characters Under Mask (high)
-  STCMY = 0xEB2D,     // Store Characters Under Mask (low)
-  STCPS = 0xB23A,     // Store Channel Path Status
-  STCRW = 0xB239,     // Store Channel Report Word
-  STCY = 0xE372,      // Store Character
-  STD = 0x60,         // Store (long)
-  STDY = 0xED67,      // Store (long)
-  STE = 0x70,         // Store (short)
-  STEY = 0xED66,      // Store (short)
-  STFH = 0xE3CB,      // Store High (32)
-  STFLE = 0xB2B0,     // Store Facility List Extended
-  STFPC = 0xB29C,     // Store Fpc
-  STG = 0xE324,       // Store (64)
-  STGRL = 0xC4B,      // Store Relative Long (64)
-  STH = 0x40,         // Store Halfword
-  STHH = 0xE3C7,      // Store Halfword High (16)
-  STHRL = 0xC47,      // Store Halfword Relative Long
-  STHY = 0xE370,      // Store Halfword
-  STM = 0x90,         // Store Multiple (32)
-  STMG = 0xEB24,      // Store Multiple (64)
-  STMH = 0xEB26,      // Store Multiple High
-  STMY = 0xEB90,      // Store Multiple (32)
-  STOC = 0xEBF3,      // Store On Condition (32)
-  STOCG = 0xEBE3,     // Store On Condition (64)
-  STPQ = 0xE38E,      // Store Pair To Quadword
-  STRL = 0xC4F,       // Store Relative Long (32)
-  STRV = 0xE33E,      // Store Reversed (32)
-  STRVG = 0xE32F,     // Store Reversed (64)
-  STRVH = 0xE33F,     // Store Reversed (16)
-  STSCH = 0xB234,     // Store Subchannel
-  STY = 0xE350,       // Store (32)
-  SVC = 0x0A,         // Supervisor Call
-  SXBR = 0xB34B,      // Subtract (extended BFP)
-  SXTR = 0xB3DB,      // Subtract (extended DFP)
-  SXTRA = 0xB3DB,     // Subtract (extended DFP)
-  SY = 0xE35B,        // Subtract (32)
-  TABORT = 0xB2FC,    // Transaction Abort
-  TBDR = 0xB351,      // Convert HFP To BFP (long)
-  TBEDR = 0xB350,     // Convert HFP To BFP (long to short)
-  TBEGIN = 0xE560,    // Transaction Begin
-  TBEGINC = 0xE561,   // Transaction Begin
-  TCDB = 0xED11,      // Test Data Class (long BFP)
-  TCEB = 0xED10,      // Test Data Class (short BFP)
-  TCXB = 0xED12,      // Test Data Class (extended BFP)
-  TDCDT = 0xED54,     // Test Data Class (long DFP)
-  TDCET = 0xED50,     // Test Data Class (short DFP)
-  TDCXT = 0xED58,     // Test Data Class (extended DFP)
-  TDGDT = 0xED55,     // Test Data Group (long DFP)
-  TDGET = 0xED51,     // Test Data Group (short DFP)
-  TDGXT = 0xED59,     // Test Data Group (extended DFP)
-  TEND = 0xB2F8,      // Transaction End
-  THDER = 0xB358,     // Convert BFP To HFP (short to long)
-  THDR = 0xB359,      // Convert BFP To HFP (long)
-  TM = 0x91,          // Test Under Mask Si C A B1
-  TMH = 0xA70,        // Test Under Mask High
-  TMHH = 0xA72,       // Test Under Mask (high high)
-  TMHL = 0xA73,       // Test Under Mask (high low)
-  TML = 0xA71,        // Test Under Mask Low
-  TMLH = 0xA70,       // Test Under Mask (low high)
-  TMLL = 0xA71,       // Test Under Mask (low low)
-  TMY = 0xEB51,       // Test Under Mask
-  TP = 0xEBC0,        // Test Decimal
-  TPI = 0xB236,       // Test Pending Interruption
-  TR = 0xDC,          // Translate
-  TRAP4 = 0xB2FF,     // Trap (4)
-  TRE = 0xB2A5,       // Translate Extended
-  TROO = 0xB993,      // Translate One To One
-  TROT = 0xB992,      // Translate One To Two
-  TRT = 0xDD,         // Translate And Test
-  TRTE = 0xB9BF,      // Translate And Test Extended
-  TRTO = 0xB991,      // Translate Two To One
-  TRTR = 0xD0,        // Translate And Test Reverse
-  TRTRE = 0xB9BD,     // Translate And Test Reverse Extended
-  TRTT = 0xB990,      // Translate Two To Two
-  TS = 0x93,          // Test And Set
-  TSCH = 0xB235,      // Test Subchannel
-  UNPK = 0xF3,        // Unpack
-  UNPKA = 0xEA,       // Unpack Ascii
-  UNPKU = 0xE2,       // Unpack Unicode
-  UPT = 0x0102,       // Update Tree
-  X = 0x57,           // Exclusive Or (32)
-  XC = 0xD7,          // Exclusive Or (character)
-  XG = 0xE382,        // Exclusive Or (64)
-  XGR = 0xB982,       // Exclusive Or (64)
-  XGRK = 0xB9E7,      // Exclusive Or (64)
-  XI = 0x97,          // Exclusive Or (immediate)
-  XIHF = 0xC06,       // Exclusive Or Immediate (high)
-  XILF = 0xC07,       // Exclusive Or Immediate (low)
-  XIY = 0xEB57,       // Exclusive Or (immediate)
-  XR = 0x17,          // Exclusive Or (32)
-  XRK = 0xB9F7,       // Exclusive Or (32)
-  XSCH = 0xB276,      // Cancel Subchannel
-  XY = 0xE357,        // Exclusive Or (32)
-  ZAP = 0xF8,         // Zero And Add
-  BKPT = 0x0001       // GDB Software Breakpoint
+#define DECLARE_OPCODES(name, opcode_name, opcode_value) \
+  opcode_name = opcode_value,
+  S390_OPCODE_LIST(DECLARE_OPCODES)
+#undef DECLARE_OPCODES
+
+      BKPT = 0x0001,  // GDB Software Breakpoint
+  DUMY = 0xE353       // Special dummy opcode
 };
 
 // Instruction encoding bits and masks.
@@ -1303,15 +2103,69 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
 };
 
-// I Instruction -- suspect this will not be used,
-// but implement for completeness
-class IInstruction : Instruction {
- public:
-  inline int IValue() const { return Bits<TwoByteInstr, int>(7, 0); }
+#define DECLARE_FIELD_FOR_TWO_BYTE_INSTR(name, T, lo, hi)   \
+  inline int name() const {                                 \
+    return Bits<TwoByteInstr, T>(15 - (lo), 15 - (hi) + 1); \
+  }
 
+#define DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(name, T, lo, hi)   \
+  inline int name() const {                                  \
+    return Bits<FourByteInstr, T>(31 - (lo), 31 - (hi) + 1); \
+  }
+
+#define DECLARE_FIELD_FOR_SIX_BYTE_INSTR(name, T, lo, hi)   \
+  inline int name() const {                                 \
+    return Bits<SixByteInstr, T>(47 - (lo), 47 - (hi) + 1); \
+  }
+
+class TwoByteInstruction : public Instruction {
+ public:
   inline int size() const { return 2; }
 };
 
+class FourByteInstruction : public Instruction {
+ public:
+  inline int size() const { return 4; }
+};
+
+class SixByteInstruction : public Instruction {
+ public:
+  inline int size() const { return 6; }
+};
+
+// I Instruction
+class IInstruction : public TwoByteInstruction {
+ public:
+  DECLARE_FIELD_FOR_TWO_BYTE_INSTR(IValue, int, 8, 16);
+};
+
+// E Instruction
+class EInstruction : public TwoByteInstruction {};
+
+// IE Instruction
+class IEInstruction : public FourByteInstruction {
+ public:
+  DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I1Value, int, 24, 28);
+  DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2Value, int, 28, 32);
+};
+
+// MII Instruction
+class MIIInstruction : public SixByteInstruction {
+ public:
+  DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M1Value, uint32_t, 8, 12);
+  DECLARE_FIELD_FOR_SIX_BYTE_INSTR(RI2Value, int, 12, 24);
+  DECLARE_FIELD_FOR_SIX_BYTE_INSTR(RI3Value, int, 24, 47);
+};
+
+// RI Instruction
+class RIInstruction : public FourByteInstruction {
+ public:
+  DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(R1Value, int, 8, 12);
+  DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2Value, int, 16, 32);
+  DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2UnsignedValue, uint32_t, 16, 32);
+  DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(M1Value, uint32_t, 8, 12);
+};
+
 // RR Instruction
 class RRInstruction : Instruction {
  public:
@@ -1358,20 +2212,6 @@
   inline int size() const { return 4; }
 };
 
-// RI Instruction
-class RIInstruction : Instruction {
- public:
-  inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
-  inline int16_t I2Value() const { return Bits<FourByteInstr, int16_t>(15, 0); }
-  inline uint16_t I2UnsignedValue() const {
-    return Bits<FourByteInstr, uint16_t>(15, 0);
-  }
-  inline Condition M1Value() const {
-    return static_cast<Condition>(Bits<FourByteInstr, int>(23, 20));
-  }
-  inline int size() const { return 4; }
-};
-
 // RS Instruction
 class RSInstruction : Instruction {
  public:
@@ -1505,6 +2345,17 @@
   inline int size() const { return 6; }
 };
 
+// VRR Instruction
+class VRR_C_Instruction : SixByteInstruction {
+ public:
+  DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12);
+  DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R2Value, int, 12, 16);
+  DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R3Value, int, 16, 20);
+  DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M6Value, uint32_t, 24, 28);
+  DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M5Value, uint32_t, 28, 32);
+  DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M4Value, uint32_t, 32, 36);
+};
+
 // Helper functions for converting between register numbers and names.
 class Registers {
  public:
diff --git a/src/s390/deoptimizer-s390.cc b/src/s390/deoptimizer-s390.cc
index 6ee8c74..46d9391 100644
--- a/src/s390/deoptimizer-s390.cc
+++ b/src/s390/deoptimizer-s390.cc
@@ -95,7 +95,7 @@
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
   for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
-    double double_value = input_->GetDoubleRegister(i);
+    Float64 double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
 }
diff --git a/src/s390/disasm-s390.cc b/src/s390/disasm-s390.cc
index 26079b9..e35e590 100644
--- a/src/s390/disasm-s390.cc
+++ b/src/s390/disasm-s390.cc
@@ -562,6 +562,9 @@
     case BKPT:
       Format(instr, "bkpt");
       break;
+    case LPR:
+      Format(instr, "lpr\t'r1, 'r2");
+      break;
     default:
       return false;
   }
@@ -709,6 +712,9 @@
     case XGRK:
       Format(instr, "xgrk\t'r5,'r6,'r3");
       break;
+    case CGFR:
+      Format(instr, "cgfr\t'r5,'r6");
+      break;
     case CGR:
       Format(instr, "cgr\t'r5,'r6");
       break;
@@ -718,6 +724,15 @@
     case LLGFR:
       Format(instr, "llgfr\t'r5,'r6");
       break;
+    case POPCNT_Z:
+      Format(instr, "popcnt\t'r5,'r6");
+      break;
+    case LLGCR:
+      Format(instr, "llgcr\t'r5,'r6");
+      break;
+    case LLCR:
+      Format(instr, "llcr\t'r5,'r6");
+      break;
     case LBR:
       Format(instr, "lbr\t'r5,'r6");
       break;
@@ -760,6 +775,9 @@
     case MSR:
       Format(instr, "msr\t'r5,'r6");
       break;
+    case MSRKC:
+      Format(instr, "msrkc\t'r5,'r6,'r3");
+      break;
     case LGBR:
       Format(instr, "lgbr\t'r5,'r6");
       break;
@@ -769,9 +787,18 @@
     case MSGR:
       Format(instr, "msgr\t'r5,'r6");
       break;
+    case MSGRKC:
+      Format(instr, "msgrkc\t'r5,'r6,'r3");
+      break;
     case DSGR:
       Format(instr, "dsgr\t'r5,'r6");
       break;
+    case DSGFR:
+      Format(instr, "dsgfr\t'r5,'r6");
+      break;
+    case MSGFR:
+      Format(instr, "msgfr\t'r5,'r6");
+      break;
     case LZDR:
       Format(instr, "lzdr\t'f5");
       break;
@@ -1036,6 +1063,12 @@
       Format(instr, "trap4");
       break;
     }
+    case LPGR:
+      Format(instr, "lpgr\t'r1, 'r2");
+      break;
+    case LPGFR:
+      Format(instr, "lpgfr\t'r1,'r2");
+      break;
     default:
       return false;
   }
@@ -1052,6 +1085,15 @@
 
   Opcode opcode = instr->S390OpcodeValue();
   switch (opcode) {
+    case DUMY:
+      Format(instr, "dumy\t'r1, 'd2 ( 'r2d, 'r3 )");
+      break;
+#define DECODE_VRR_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+  case opcode_name:                                                \
+    Format(instr, #name "\t'f1,'f2,'f3");                          \
+    break;
+      S390_VRR_C_OPCODE_LIST(DECODE_VRR_C_INSTRUCTIONS)
+#undef DECODE_VRR_C_INSTRUCTIONS
     case LLILF:
       Format(instr, "llilf\t'r1,'i7");
       break;
@@ -1061,6 +1103,9 @@
     case AFI:
       Format(instr, "afi\t'r1,'i7");
       break;
+    case AIH:
+      Format(instr, "aih\t'r1,'i7");
+      break;
     case ASI:
       Format(instr, "asi\t'd2('r3),'ic");
       break;
@@ -1082,6 +1127,12 @@
     case CLFI:
       Format(instr, "clfi\t'r1,'i7");
       break;
+    case CLIH:
+      Format(instr, "clih\t'r1,'i7");
+      break;
+    case CIH:
+      Format(instr, "cih\t'r1,'i2");
+      break;
     case CFI:
       Format(instr, "cfi\t'r1,'i2");
       break;
@@ -1364,6 +1415,15 @@
     case MSG:
       Format(instr, "msg\t'r1,'d2('r2d,'r3)");
       break;
+    case DSG:
+      Format(instr, "dsg\t'r1,'d2('r2d,'r3)");
+      break;
+    case DSGF:
+      Format(instr, "dsgf\t'r1,'d2('r2d,'r3)");
+      break;
+    case MSGF:
+      Format(instr, "msgf\t'r1,'d2('r2d,'r3)");
+      break;
     case MSY:
       Format(instr, "msy\t'r1,'d2('r2d,'r3)");
       break;
@@ -1374,7 +1434,13 @@
       Format(instr, "stdy\t'f1,'d2('r2d,'r3)");
       break;
     case ADB:
-      Format(instr, "adb\t'r1,'d1('r2d, 'r3)");
+      Format(instr, "adb\t'f1,'d1('r2d, 'r3)");
+      break;
+    case CDB:
+      Format(instr, "cdb\t'f1,'d1('r2d, 'r3)");
+      break;
+    case CEB:
+      Format(instr, "ceb\t'f1,'d1('r2d, 'r3)");
       break;
     case SDB:
       Format(instr, "sdb\t'r1,'d1('r2d, 'r3)");
@@ -1388,6 +1454,9 @@
     case SQDB:
       Format(instr, "sqdb\t'r1,'d1('r2d, 'r3)");
       break;
+    case PFD:
+      Format(instr, "pfd\t'm1,'d2('r2d,'r3)");
+      break;
     default:
       return false;
   }
diff --git a/src/s390/interface-descriptors-s390.cc b/src/s390/interface-descriptors-s390.cc
index 7fdf993..8060cfe 100644
--- a/src/s390/interface-descriptors-s390.cc
+++ b/src/s390/interface-descriptors-s390.cc
@@ -61,31 +61,7 @@
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r4};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3, r5};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3};
+  Register registers[] = {r3, r4, r5};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
@@ -134,13 +110,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {r3, r5};
+  Register registers[] = {r3, r2, r5};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {r3, r2, r5, r4};
   data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -167,6 +143,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // r4 : start index (to support rest parameters)
+  // r3 : the target to call
+  Register registers[] = {r3, r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 void ConstructStubDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // r2 : number of arguments
@@ -197,13 +181,12 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
-      CallInterfaceDescriptorData* data) {                          \
-    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
-  }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+  Register registers[] = {r3, r5, r2, r4};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
 
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -387,6 +370,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      r3,  // loaded new FP
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/s390/macro-assembler-s390.cc b/src/s390/macro-assembler-s390.cc
index fbf82cc..9084931 100644
--- a/src/s390/macro-assembler-s390.cc
+++ b/src/s390/macro-assembler-s390.cc
@@ -933,7 +933,7 @@
                                   int prologue_offset) {
   {
     ConstantPoolUnavailableScope constant_pool_unavailable(this);
-    LoadSmiLiteral(r1, Smi::FromInt(type));
+    Load(r1, Operand(StackFrame::TypeToMarker(type)));
     PushCommonFrame(r1);
   }
 }
@@ -969,10 +969,10 @@
   }
 }
 
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
   LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  LoadP(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
-  LoadP(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+  LoadP(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+  LoadP(vector, FieldMemOperand(vector, Cell::kValueOffset));
 }
 
 void MacroAssembler::EnterFrame(StackFrame::Type type,
@@ -984,7 +984,7 @@
   //    type
   //    CodeObject  <-- new sp
 
-  LoadSmiLiteral(ip, Smi::FromInt(type));
+  Load(ip, Operand(StackFrame::TypeToMarker(type)));
   PushCommonFrame(ip);
 
   if (type == StackFrame::INTERNAL) {
@@ -1057,7 +1057,7 @@
   // all of the pushes that have happened inside of V8
   // since we were called from C code
   CleanseP(r14);
-  LoadSmiLiteral(r1, Smi::FromInt(frame_type));
+  Load(r1, Operand(StackFrame::TypeToMarker(frame_type)));
   PushCommonFrame(r1);
   // Reserve room for saved entry sp and code object.
   lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
@@ -1101,17 +1101,6 @@
   StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset));
 }
 
-void MacroAssembler::InitializeNewString(Register string, Register length,
-                                         Heap::RootListIndex map_index,
-                                         Register scratch1, Register scratch2) {
-  SmiTag(scratch1, length);
-  LoadRoot(scratch2, map_index);
-  StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset));
-  StoreP(FieldMemOperand(string, String::kHashFieldSlot),
-         Operand(String::kEmptyHashField), scratch1);
-  StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
-}
-
 int MacroAssembler::ActivationFrameAlignment() {
 #if !defined(USE_SIMULATOR)
   // Running on the real platform. Use the alignment as mandated by the local
@@ -1306,17 +1295,16 @@
   }
 }
 
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
-                                             const ParameterCount& expected,
-                                             const ParameterCount& actual) {
-  Label skip_flooding;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  mov(r6, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual) {
+  Label skip_hook;
+  ExternalReference debug_hook_avtive =
+      ExternalReference::debug_hook_on_function_call_address(isolate());
+  mov(r6, Operand(debug_hook_avtive));
   LoadB(r6, MemOperand(r6));
-  CmpP(r6, Operand(StepIn));
-  blt(&skip_flooding);
+  CmpP(r6, Operand::Zero());
+  beq(&skip_hook);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1332,7 +1320,7 @@
       Push(new_target);
     }
     Push(fun, fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    CallRuntime(Runtime::kDebugOnFunctionCall);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -1346,7 +1334,7 @@
       SmiUntag(expected.reg());
     }
   }
-  bind(&skip_flooding);
+  bind(&skip_hook);
 }
 
 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -1360,8 +1348,8 @@
   DCHECK(function.is(r3));
   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
 
-  if (call_wrapper.NeedsDebugStepCheck()) {
-    FloodFunctionIfStepping(function, new_target, expected, actual);
+  if (call_wrapper.NeedsDebugHookCheck()) {
+    CheckDebugHook(function, new_target, expected, actual);
   }
 
   // Clear the new.target register if not given.
@@ -1464,13 +1452,15 @@
   bgt(fail);
 }
 
-void MacroAssembler::DebugBreak() {
-  LoadImmP(r2, Operand::Zero());
-  mov(r3,
-      Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
-  CEntryStub ces(isolate(), 1);
-  DCHECK(AllowThisStubCall(&ces));
-  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+  // Check whether we need to drop frames to restart a function on the stack.
+  ExternalReference restart_fp =
+      ExternalReference::debug_restart_fp_address(isolate());
+  mov(r3, Operand(restart_fp));
+  LoadP(r3, MemOperand(r3));
+  CmpP(r3, Operand::Zero());
+  Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
+       ne);
 }
 
 void MacroAssembler::PushStackHandler() {
@@ -1579,25 +1569,18 @@
 
   // Set up allocation top address register.
   Register top_address = scratch1;
-  // This code stores a temporary value in ip. This is OK, as the code below
-  // does not need ip for implicit literal generation.
-  Register alloc_limit = ip;
   Register result_end = scratch2;
   mov(top_address, Operand(allocation_top));
 
   if ((flags & RESULT_CONTAINS_TOP) == 0) {
     // Load allocation top into result and allocation limit into ip.
     LoadP(result, MemOperand(top_address));
-    LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
   } else {
     if (emit_debug_code()) {
       // Assert that result actually contains top on entry.
-      LoadP(alloc_limit, MemOperand(top_address));
-      CmpP(result, alloc_limit);
+      CmpP(result, MemOperand(top_address));
       Check(eq, kUnexpectedAllocationTop);
     }
-    // Load allocation limit. Result already contains allocation top.
-    LoadP(alloc_limit, MemOperand(top_address, limit - top));
   }
 
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1611,7 +1594,7 @@
     Label aligned;
     beq(&aligned, Label::kNear);
     if ((flags & PRETENURE) != 0) {
-      CmpLogicalP(result, alloc_limit);
+      CmpLogicalP(result, MemOperand(top_address, limit - top));
       bge(gc_required);
     }
     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
@@ -1621,27 +1604,26 @@
 #endif
   }
 
-  // Calculate new top and bail out if new space is exhausted. Use result
-  // to calculate the new top.
-  SubP(r0, alloc_limit, result);
-  if (is_int16(object_size)) {
-    CmpP(r0, Operand(object_size));
-    blt(gc_required);
-    AddP(result_end, result, Operand(object_size));
-  } else {
-    mov(result_end, Operand(object_size));
-    CmpP(r0, result_end);
-    blt(gc_required);
-    AddP(result_end, result, result_end);
-  }
+  AddP(result_end, result, Operand(object_size));
+
+  // Compare with allocation limit.
+  CmpLogicalP(result_end, MemOperand(top_address, limit - top));
+  bge(gc_required);
 
   if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
     // The top pointer is not updated for allocation folding dominators.
     StoreP(result_end, MemOperand(top_address));
   }
 
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+    // Prefetch the allocation_top's next cache line in advance to
+    // help alleviate potential cache misses.
+    // Mode 2 - Prefetch the data into a cache line for store access.
+    pfd(static_cast<Condition>(2), MemOperand(result, 256));
+  }
+
   // Tag object.
-  AddP(result, result, Operand(kHeapObjectTag));
+  la(result, MemOperand(result, kHeapObjectTag));
 }
 
 void MacroAssembler::Allocate(Register object_size, Register result,
@@ -1676,24 +1658,17 @@
 
   // Set up allocation top address and allocation limit registers.
   Register top_address = scratch;
-  // This code stores a temporary value in ip. This is OK, as the code below
-  // does not need ip for implicit literal generation.
-  Register alloc_limit = ip;
   mov(top_address, Operand(allocation_top));
 
   if ((flags & RESULT_CONTAINS_TOP) == 0) {
-    // Load allocation top into result and allocation limit into alloc_limit..
+    // Load allocation top into result
     LoadP(result, MemOperand(top_address));
-    LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
   } else {
     if (emit_debug_code()) {
       // Assert that result actually contains top on entry.
-      LoadP(alloc_limit, MemOperand(top_address));
-      CmpP(result, alloc_limit);
+      CmpP(result, MemOperand(top_address));
       Check(eq, kUnexpectedAllocationTop);
     }
-    // Load allocation limit. Result already contains allocation top.
-    LoadP(alloc_limit, MemOperand(top_address, limit - top));
   }
 
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1707,7 +1682,7 @@
     Label aligned;
     beq(&aligned, Label::kNear);
     if ((flags & PRETENURE) != 0) {
-      CmpLogicalP(result, alloc_limit);
+      CmpLogicalP(result, MemOperand(top_address, limit - top));
       bge(gc_required);
     }
     mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
@@ -1720,17 +1695,14 @@
   // Calculate new top and bail out if new space is exhausted. Use result
   // to calculate the new top. Object size may be in words so a shift is
   // required to get the number of bytes.
-  SubP(r0, alloc_limit, result);
   if ((flags & SIZE_IN_WORDS) != 0) {
     ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
-    CmpP(r0, result_end);
-    blt(gc_required);
     AddP(result_end, result, result_end);
   } else {
-    CmpP(r0, object_size);
-    blt(gc_required);
     AddP(result_end, result, object_size);
   }
+  CmpLogicalP(result_end, MemOperand(top_address, limit - top));
+  bge(gc_required);
 
   // Update allocation top. result temporarily holds the new top.
   if (emit_debug_code()) {
@@ -1742,8 +1714,15 @@
     StoreP(result_end, MemOperand(top_address));
   }
 
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+    // Prefetch the allocation_top's next cache line in advance to
+    // help alleviate potential cache misses.
+    // Mode 2 - Prefetch the data into a cache line for store access.
+    pfd(static_cast<Condition>(2), MemOperand(result, 256));
+  }
+
   // Tag object.
-  AddP(result, result, Operand(kHeapObjectTag));
+  la(result, MemOperand(result, kHeapObjectTag));
 }
 
 void MacroAssembler::FastAllocate(Register object_size, Register result,
@@ -1795,8 +1774,15 @@
   }
   StoreP(result_end, MemOperand(top_address));
 
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+    // Prefetch the allocation_top's next cache line in advance to
+    // help alleviate potential cache misses.
+    // Mode 2 - Prefetch the data into a cache line for store access.
+    pfd(static_cast<Condition>(2), MemOperand(result, 256));
+  }
+
   // Tag object.
-  AddP(result, result, Operand(kHeapObjectTag));
+  la(result, MemOperand(result, kHeapObjectTag));
 }
 
 void MacroAssembler::FastAllocate(int object_size, Register result,
@@ -1837,103 +1823,34 @@
 #endif
   }
 
+#if V8_TARGET_ARCH_S390X
+  // Limit to 64-bit only, as double alignment check above may adjust
+  // allocation top by an extra kDoubleSize/2.
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(object_size)) {
+    // Update allocation top.
+    AddP(MemOperand(top_address), Operand(object_size));
+  } else {
+    // Calculate new top using result.
+    AddP(result_end, result, Operand(object_size));
+    // Update allocation top.
+    StoreP(result_end, MemOperand(top_address));
+  }
+#else
   // Calculate new top using result.
   AddP(result_end, result, Operand(object_size));
-
-  // The top pointer is not updated for allocation folding dominators.
+  // Update allocation top.
   StoreP(result_end, MemOperand(top_address));
+#endif
+
+  if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+    // Prefetch the allocation_top's next cache line in advance to
+    // help alleviate potential cache misses.
+    // Mode 2 - Prefetch the data into a cache line for store access.
+    pfd(static_cast<Condition>(2), MemOperand(result, 256));
+  }
 
   // Tag object.
-  AddP(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::AllocateTwoByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-
-  ShiftLeftP(scratch1, length, Operand(1));  // Length in bytes, not chars.
-  AddP(scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
-
-  AndP(scratch1, Operand(~kObjectAlignmentMask));
-
-  // Allocate two-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
-                      scratch2);
-}
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  DCHECK(kCharSize == 1);
-  AddP(scratch1, length,
-       Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
-  AndP(scratch1, Operand(~kObjectAlignmentMask));
-
-  // Allocate one-byte string in new space.
-  Allocate(scratch1, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
-                      scratch2);
-}
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
-                      scratch1, scratch2);
-}
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
-                      scratch2);
-}
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
-                                                 Register length,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
-                      scratch1, scratch2);
+  la(result, MemOperand(result, kHeapObjectTag));
 }
 
 void MacroAssembler::CompareObjectType(Register object, Register map,
@@ -1956,62 +1873,10 @@
   CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
 }
 
-void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
-                                             Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
-                 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-  ble(fail);
-  CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
-                 Operand(Map::kMaximumBitField2FastHoleyElementValue));
-  bgt(fail);
-}
-
-void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
-                                          Label* fail) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
-                 Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-  bgt(fail);
-}
-
 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
   SmiUntag(ip, smi);
   ConvertIntToDouble(ip, value);
 }
-void MacroAssembler::StoreNumberToDoubleElements(
-    Register value_reg, Register key_reg, Register elements_reg,
-    Register scratch1, DoubleRegister double_scratch, Label* fail,
-    int elements_offset) {
-  DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
-  Label smi_value, store;
-
-  // Handle smi values specially.
-  JumpIfSmi(value_reg, &smi_value);
-
-  // Ensure that the object is a heap number
-  CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
-           DONT_DO_SMI_CHECK);
-
-  LoadDouble(double_scratch,
-             FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-  // Force a canonical NaN.
-  CanonicalizeNaN(double_scratch);
-  b(&store);
-
-  bind(&smi_value);
-  SmiToDouble(double_scratch, value_reg);
-
-  bind(&store);
-  SmiToDoubleArrayOffset(scratch1, key_reg);
-  StoreDouble(double_scratch,
-              FieldMemOperand(elements_reg, scratch1,
-                              FixedDoubleArray::kHeaderSize - elements_offset));
-}
 
 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
                                 Label* early_success) {
@@ -2092,30 +1957,6 @@
   bind(&done);
 }
 
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
-                                             Register scratch, Label* miss) {
-  // Get the prototype or initial map from the function.
-  LoadP(result,
-        FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // If the prototype or initial map is the hole, don't return it and
-  // simply miss the cache instead. This will allow us to allocate a
-  // prototype object on-demand in the runtime system.
-  CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  beq(miss);
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  CompareObjectType(result, scratch, scratch, MAP_TYPE);
-  bne(&done, Label::kNear);
-
-  // Get the prototype from the initial map.
-  LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  bind(&done);
-}
-
 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
                               Condition cond) {
   DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
@@ -2491,23 +2332,6 @@
   }
 }
 
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind, ElementsKind transitioned_kind,
-    Register map_in_out, Register scratch, Label* no_map_match) {
-  DCHECK(IsFastElementsKind(expected_kind));
-  DCHECK(IsFastElementsKind(transitioned_kind));
-
-  // Check that the function's map is the same as the expected cached map.
-  LoadP(scratch, NativeContextMemOperand());
-  LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
-  CmpP(map_in_out, ip);
-  bne(no_map_match);
-
-  // Use the transitioned cached map.
-  LoadP(map_in_out,
-        ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
   LoadP(dst, NativeContextMemOperand());
   LoadP(dst, ContextMemOperand(dst, index));
@@ -2592,25 +2416,6 @@
   beq(smi_case);
 }
 
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
-                                          Label* non_smi_case) {
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-
-  // We can more optimally use TestIfSmi if dst != src
-  // otherwise, the UnTag operation will kill the CC and we cannot
-  // test the Tag bit.
-  if (src.code() != dst.code()) {
-    SmiUntag(dst, src);
-    TestIfSmi(src);
-  } else {
-    TestBit(src, 0, r0);
-    SmiUntag(dst, src);
-    LoadAndTestRR(r0, r0);
-  }
-  bne(non_smi_case);
-}
-
 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
                                      Label* on_either_smi) {
   STATIC_ASSERT(kSmiTag == 0);
@@ -2881,20 +2686,6 @@
   bne(failure);
 }
 
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
-                                                              Register scratch,
-                                                              Label* failure) {
-  const int kFlatOneByteStringMask =
-      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatOneByteStringTag =
-      kStringTag | kOneByteStringTag | kSeqStringTag;
-
-  if (!scratch.is(type)) LoadRR(scratch, type);
-  nilf(scratch, Operand(kFlatOneByteStringMask));
-  CmpP(scratch, Operand(kFlatOneByteStringTag));
-  bne(failure);
-}
-
 static const int kRegisterPassedArguments = 5;
 
 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -3307,12 +3098,10 @@
   DCHECK(!r.IsDouble());
   if (r.IsInteger8()) {
     LoadB(dst, mem);
-    lgbr(dst, dst);
   } else if (r.IsUInteger8()) {
     LoadlB(dst, mem);
   } else if (r.IsInteger16()) {
     LoadHalfWordP(dst, mem, scratch);
-    lghr(dst, dst);
   } else if (r.IsUInteger16()) {
     LoadHalfWordP(dst, mem, scratch);
 #if V8_TARGET_ARCH_S390X
@@ -3413,42 +3202,6 @@
   return no_reg;
 }
 
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
-                                                      Register scratch0,
-                                                      Register scratch1,
-                                                      Label* found) {
-  DCHECK(!scratch1.is(scratch0));
-  Register current = scratch0;
-  Label loop_again, end;
-
-  // scratch contained elements pointer.
-  LoadRR(current, object);
-  LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
-  LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  CompareRoot(current, Heap::kNullValueRootIndex);
-  beq(&end);
-
-  // Loop based on the map going up the prototype chain.
-  bind(&loop_again);
-  LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
-
-  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
-  CmpP(scratch1, Operand(JS_OBJECT_TYPE));
-  blt(found);
-
-  LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
-  DecodeField<Map::ElementsKindBits>(scratch1);
-  CmpP(scratch1, Operand(DICTIONARY_ELEMENTS));
-  beq(found);
-  LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
-  CompareRoot(current, Heap::kNullValueRootIndex);
-  bne(&loop_again);
-
-  bind(&end);
-}
-
 void MacroAssembler::mov(Register dst, const Operand& src) {
   if (src.rmode_ != kRelocInfo_NONEPTR) {
     // some form of relocation needed
@@ -3484,6 +3237,88 @@
   msfi(dst, src1);
 }
 
+#define Generate_MulHigh32(instr) \
+  {                               \
+    lgfr(dst, src1);              \
+    instr(dst, src2);             \
+    srlg(dst, dst, Operand(32));  \
+  }
+
+void MacroAssembler::MulHigh32(Register dst, Register src1,
+                               const MemOperand& src2) {
+  Generate_MulHigh32(msgf);
+}
+
+void MacroAssembler::MulHigh32(Register dst, Register src1, Register src2) {
+  if (dst.is(src2)) {
+    std::swap(src1, src2);
+  }
+  Generate_MulHigh32(msgfr);
+}
+
+void MacroAssembler::MulHigh32(Register dst, Register src1,
+                               const Operand& src2) {
+  Generate_MulHigh32(msgfi);
+}
+
+#undef Generate_MulHigh32
+
+#define Generate_MulHighU32(instr) \
+  {                                \
+    lr(r1, src1);                  \
+    instr(r0, src2);               \
+    LoadlW(dst, r0);               \
+  }
+
+void MacroAssembler::MulHighU32(Register dst, Register src1,
+                                const MemOperand& src2) {
+  Generate_MulHighU32(ml);
+}
+
+void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) {
+  Generate_MulHighU32(mlr);
+}
+
+void MacroAssembler::MulHighU32(Register dst, Register src1,
+                                const Operand& src2) {
+  USE(dst);
+  USE(src1);
+  USE(src2);
+  UNREACHABLE();
+}
+
+#undef Generate_MulHighU32
+
+#define Generate_Mul32WithOverflowIfCCUnequal(instr) \
+  {                                                  \
+    lgfr(dst, src1);                                 \
+    instr(dst, src2);                                \
+    cgfr(dst, dst);                                  \
+  }
+
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+                                                  const MemOperand& src2) {
+  Register result = dst;
+  if (src2.rx().is(dst) || src2.rb().is(dst)) dst = r0;
+  Generate_Mul32WithOverflowIfCCUnequal(msgf);
+  if (!result.is(dst)) llgfr(result, dst);
+}
+
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+                                                  Register src2) {
+  if (dst.is(src2)) {
+    std::swap(src1, src2);
+  }
+  Generate_Mul32WithOverflowIfCCUnequal(msgfr);
+}
+
+void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+                                                  const Operand& src2) {
+  Generate_Mul32WithOverflowIfCCUnequal(msgfi);
+}
+
+#undef Generate_Mul32WithOverflowIfCCUnequal
+
 void MacroAssembler::Mul64(Register dst, const MemOperand& src1) {
   if (is_int20(src1.offset())) {
     msg(dst, src1);
@@ -3499,13 +3334,17 @@
 }
 
 void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
-  if (dst.is(src2)) {
-    MulP(dst, src1);
-  } else if (dst.is(src1)) {
-    MulP(dst, src2);
+  if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+    MulPWithCondition(dst, src1, src2);
   } else {
-    Move(dst, src1);
-    MulP(dst, src2);
+    if (dst.is(src2)) {
+      MulP(dst, src1);
+    } else if (dst.is(src1)) {
+      MulP(dst, src2);
+    } else {
+      Move(dst, src1);
+      MulP(dst, src2);
+    }
   }
 }
 
@@ -3519,6 +3358,108 @@
 #endif
 }
 
+#define Generate_Div32(instr) \
+  {                           \
+    lgfr(r1, src1);           \
+    instr(r0, src2);          \
+    LoadlW(dst, r1);          \
+  }
+
+void MacroAssembler::Div32(Register dst, Register src1,
+                           const MemOperand& src2) {
+  Generate_Div32(dsgf);
+}
+
+void MacroAssembler::Div32(Register dst, Register src1, Register src2) {
+  Generate_Div32(dsgfr);
+}
+
+void MacroAssembler::Div32(Register dst, Register src1, const Operand& src2) {
+  USE(dst);
+  USE(src1);
+  USE(src2);
+  UNREACHABLE();
+}
+
+#undef Generate_Div32
+
+#define Generate_DivU32(instr) \
+  {                            \
+    lr(r0, src1);              \
+    srdl(r0, Operand(32));     \
+    instr(r0, src2);           \
+    LoadlW(dst, r1);           \
+  }
+
+void MacroAssembler::DivU32(Register dst, Register src1,
+                            const MemOperand& src2) {
+  Generate_DivU32(dl);
+}
+
+void MacroAssembler::DivU32(Register dst, Register src1, Register src2) {
+  Generate_DivU32(dlr);
+}
+
+void MacroAssembler::DivU32(Register dst, Register src1, const Operand& src2) {
+  USE(dst);
+  USE(src1);
+  USE(src2);
+  UNREACHABLE();
+}
+
+#undef Generate_DivU32
+
+#define Generate_Mod32(instr) \
+  {                           \
+    lgfr(r1, src1);           \
+    instr(r0, src2);          \
+    LoadlW(dst, r0);          \
+  }
+
+void MacroAssembler::Mod32(Register dst, Register src1,
+                           const MemOperand& src2) {
+  Generate_Mod32(dsgf);
+}
+
+void MacroAssembler::Mod32(Register dst, Register src1, Register src2) {
+  Generate_Mod32(dsgfr);
+}
+
+void MacroAssembler::Mod32(Register dst, Register src1, const Operand& src2) {
+  USE(dst);
+  USE(src1);
+  USE(src2);
+  UNREACHABLE();
+}
+
+#undef Generate_Mod32
+
+#define Generate_ModU32(instr) \
+  {                            \
+    lr(r0, src1);              \
+    srdl(r0, Operand(32));     \
+    instr(r0, src2);           \
+    LoadlW(dst, r0);           \
+  }
+
+void MacroAssembler::ModU32(Register dst, Register src1,
+                            const MemOperand& src2) {
+  Generate_ModU32(dl);
+}
+
+void MacroAssembler::ModU32(Register dst, Register src1, Register src2) {
+  Generate_ModU32(dlr);
+}
+
+void MacroAssembler::ModU32(Register dst, Register src1, const Operand& src2) {
+  USE(dst);
+  USE(src1);
+  USE(src2);
+  UNREACHABLE();
+}
+
+#undef Generate_ModU32
+
 void MacroAssembler::MulP(Register dst, const Operand& opnd) {
 #if V8_TARGET_ARCH_S390X
   msgfi(dst, opnd);
@@ -3535,6 +3476,16 @@
 #endif
 }
 
+void MacroAssembler::MulPWithCondition(Register dst, Register src1,
+                                       Register src2) {
+  CHECK(CpuFeatures::IsSupported(MISC_INSTR_EXT2));
+#if V8_TARGET_ARCH_S390X
+  msgrkc(dst, src1, src2);
+#else
+  msrkc(dst, src1, src2);
+#endif
+}
+
 void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
 #if V8_TARGET_ARCH_S390X
   if (is_uint16(opnd.offset())) {
@@ -3553,6 +3504,17 @@
 #endif
 }
 
+void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
+  sqdbr(result, input);
+}
+void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
+  if (is_uint12(input.offset())) {
+    sqdb(result, input);
+  } else {
+    ldy(result, input);
+    sqdbr(result, result);
+  }
+}
 //----------------------------------------------------------------------------
 //  Add Instructions
 //----------------------------------------------------------------------------
@@ -3565,6 +3527,12 @@
     afi(dst, opnd);
 }
 
+// Add 32-bit (Register dst = Register dst + Immediate opnd)
+void MacroAssembler::Add32_RI(Register dst, const Operand& opnd) {
+  // Just a wrapper for above
+  Add32(dst, opnd);
+}
+
 // Add Pointer Size (Register dst = Register dst + Immediate opnd)
 void MacroAssembler::AddP(Register dst, const Operand& opnd) {
 #if V8_TARGET_ARCH_S390X
@@ -3589,6 +3557,13 @@
   Add32(dst, opnd);
 }
 
+// Add 32-bit (Register dst = Register src + Immediate opnd)
+void MacroAssembler::Add32_RRI(Register dst, Register src,
+                               const Operand& opnd) {
+  // Just a wrapper for above
+  Add32(dst, src, opnd);
+}
+
 // Add Pointer Size (Register dst = Register src + Immediate opnd)
 void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) {
   if (!dst.is(src)) {
@@ -3955,8 +3930,8 @@
 }
 
 void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
-  sllg(src, src, Operand(32));
-  ldgr(dst, src);
+  sllg(r0, src, Operand(32));
+  ldgr(dst, r0);
 }
 
 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
@@ -4337,12 +4312,24 @@
 #else
     lhi(dst, opnd);
 #endif
-  } else {
+  } else if (is_int32(value)) {
+#if V8_TARGET_ARCH_S390X
+    lgfi(dst, opnd);
+#else
+    iilf(dst, opnd);
+#endif
+  } else if (is_uint32(value)) {
 #if V8_TARGET_ARCH_S390X
     llilf(dst, opnd);
 #else
     iilf(dst, opnd);
 #endif
+  } else {
+    int32_t hi_32 = static_cast<int64_t>(value) >> 32;
+    int32_t lo_32 = static_cast<int32_t>(value);
+
+    iihf(dst, Operand(hi_32));
+    iilf(dst, Operand(lo_32));
   }
 }
 
@@ -4359,6 +4346,19 @@
 #endif
 }
 
+void MacroAssembler::LoadPositiveP(Register result, Register input) {
+#if V8_TARGET_ARCH_S390X
+  lpgr(result, input);
+#else
+  lpr(result, input);
+#endif
+}
+
+void MacroAssembler::LoadPositive32(Register result, Register input) {
+  lpr(result, input);
+  lgfr(result, result);
+}
+
 //-----------------------------------------------------------------------------
 //  Compare Helpers
 //-----------------------------------------------------------------------------
@@ -4532,9 +4532,16 @@
   uint32_t lo_32 = static_cast<uint32_t>(value);
 
   // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
-  iihf(scratch, Operand(hi_32));
-  iilf(scratch, Operand(lo_32));
-  ldgr(result, scratch);
+  if (value == 0) {
+    lzdr(result);
+  } else if (lo_32 == 0) {
+    llihf(scratch, Operand(hi_32));
+    ldgr(result, scratch);
+  } else {
+    iihf(scratch, Operand(hi_32));
+    iilf(scratch, Operand(lo_32));
+    ldgr(result, scratch);
+  }
 }
 
 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
@@ -4545,19 +4552,19 @@
 
 void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
                                         Register scratch) {
-  uint32_t hi_32 = bit_cast<uint32_t>(value);
-  uint32_t lo_32 = 0;
-
-  // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
-  iihf(scratch, Operand(hi_32));
-  iilf(scratch, Operand(lo_32));
-  ldgr(result, scratch);
+  uint64_t int_val = static_cast<uint64_t>(bit_cast<uint32_t, float>(value))
+                     << 32;
+  LoadDoubleLiteral(result, int_val, scratch);
 }
 
 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
 #if V8_TARGET_ARCH_S390X
-  LoadSmiLiteral(scratch, smi);
-  cgr(src1, scratch);
+  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    cih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+  } else {
+    LoadSmiLiteral(scratch, smi);
+    cgr(src1, scratch);
+  }
 #else
   // CFI takes 32-bit immediate.
   cfi(src1, Operand(smi));
@@ -4567,8 +4574,12 @@
 void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
                                           Register scratch) {
 #if V8_TARGET_ARCH_S390X
-  LoadSmiLiteral(scratch, smi);
-  clgr(src1, scratch);
+  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    clih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+  } else {
+    LoadSmiLiteral(scratch, smi);
+    clgr(src1, scratch);
+  }
 #else
   // CLFI takes 32-bit immediate
   clfi(src1, Operand(smi));
@@ -4578,8 +4589,13 @@
 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
                                    Register scratch) {
 #if V8_TARGET_ARCH_S390X
-  LoadSmiLiteral(scratch, smi);
-  AddP(dst, src, scratch);
+  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    if (!dst.is(src)) LoadRR(dst, src);
+    aih(dst, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+  } else {
+    LoadSmiLiteral(scratch, smi);
+    AddP(dst, src, scratch);
+  }
 #else
   AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
 #endif
@@ -4588,8 +4604,13 @@
 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
                                    Register scratch) {
 #if V8_TARGET_ARCH_S390X
-  LoadSmiLiteral(scratch, smi);
-  SubP(dst, src, scratch);
+  if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+    if (!dst.is(src)) LoadRR(dst, src);
+    aih(dst, Operand((-reinterpret_cast<intptr_t>(smi)) >> 32));
+  } else {
+    LoadSmiLiteral(scratch, smi);
+    SubP(dst, src, scratch);
+  }
 #else
   AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
 #endif
@@ -4852,6 +4873,14 @@
 #endif
 }
 
+void MacroAssembler::LoadlB(Register dst, Register src) {
+#if V8_TARGET_ARCH_S390X
+  llgcr(dst, src);
+#else
+  llcr(dst, src);
+#endif
+}
+
 void MacroAssembler::LoadLogicalReversedWordP(Register dst,
                                               const MemOperand& mem) {
   lrv(dst, mem);
@@ -5207,7 +5236,7 @@
   ar(dst, r0);
   ShiftRight(r0, dst, Operand(8));
   ar(dst, r0);
-  LoadB(dst, dst);
+  LoadlB(dst, dst);
 }
 
 #ifdef V8_TARGET_ARCH_S390X
@@ -5222,7 +5251,7 @@
   AddP(dst, r0);
   ShiftRightP(r0, dst, Operand(8));
   AddP(dst, r0);
-  LoadB(dst, dst);
+  LoadlB(dst, dst);
 }
 #endif
 
diff --git a/src/s390/macro-assembler-s390.h b/src/s390/macro-assembler-s390.h
index 06fcaf0..a3b57e9 100644
--- a/src/s390/macro-assembler-s390.h
+++ b/src/s390/macro-assembler-s390.h
@@ -245,8 +245,10 @@
 
   // Add (Register - Immediate)
   void Add32(Register dst, const Operand& imm);
+  void Add32_RI(Register dst, const Operand& imm);
   void AddP(Register dst, const Operand& imm);
   void Add32(Register dst, Register src, const Operand& imm);
+  void Add32_RRI(Register dst, Register src, const Operand& imm);
   void AddP(Register dst, Register src, const Operand& imm);
 
   // Add (Register - Register)
@@ -282,8 +284,12 @@
 
   // Subtract (Register - Immediate)
   void Sub32(Register dst, const Operand& imm);
+  void Sub32_RI(Register dst, const Operand& imm) { Sub32(dst, imm); }
   void SubP(Register dst, const Operand& imm);
   void Sub32(Register dst, Register src, const Operand& imm);
+  void Sub32_RRI(Register dst, Register src, const Operand& imm) {
+    Sub32(dst, src, imm);
+  }
   void SubP(Register dst, Register src, const Operand& imm);
 
   // Subtract (Register - Register)
@@ -316,12 +322,42 @@
   void Mul32(Register dst, const MemOperand& src1);
   void Mul32(Register dst, Register src1);
   void Mul32(Register dst, const Operand& src1);
+  void MulHigh32(Register dst, Register src1, const MemOperand& src2);
+  void MulHigh32(Register dst, Register src1, Register src2);
+  void MulHigh32(Register dst, Register src1, const Operand& src2);
+  void MulHighU32(Register dst, Register src1, const MemOperand& src2);
+  void MulHighU32(Register dst, Register src1, Register src2);
+  void MulHighU32(Register dst, Register src1, const Operand& src2);
+  void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+                                    const MemOperand& src2);
+  void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2);
+  void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
+                                    const Operand& src2);
   void Mul64(Register dst, const MemOperand& src1);
   void Mul64(Register dst, Register src1);
   void Mul64(Register dst, const Operand& src1);
+  void MulPWithCondition(Register dst, Register src1, Register src2);
 
   // Divide
   void DivP(Register dividend, Register divider);
+  void Div32(Register dst, Register src1, const MemOperand& src2);
+  void Div32(Register dst, Register src1, Register src2);
+  void Div32(Register dst, Register src1, const Operand& src2);
+  void DivU32(Register dst, Register src1, const MemOperand& src2);
+  void DivU32(Register dst, Register src1, Register src2);
+  void DivU32(Register dst, Register src1, const Operand& src2);
+
+  // Mod
+  void Mod32(Register dst, Register src1, const MemOperand& src2);
+  void Mod32(Register dst, Register src1, Register src2);
+  void Mod32(Register dst, Register src1, const Operand& src2);
+  void ModU32(Register dst, Register src1, const MemOperand& src2);
+  void ModU32(Register dst, Register src1, Register src2);
+  void ModU32(Register dst, Register src1, const Operand& src2);
+
+  // Square root
+  void Sqrt(DoubleRegister result, DoubleRegister input);
+  void Sqrt(DoubleRegister result, const MemOperand& input);
 
   // Compare
   void Cmp32(Register src1, Register src2);
@@ -354,6 +390,7 @@
   void LoadB(Register dst, const MemOperand& opnd);
   void LoadB(Register dst, Register src);
   void LoadlB(Register dst, const MemOperand& opnd);
+  void LoadlB(Register dst, Register src);
 
   void LoadLogicalReversedWordP(Register dst, const MemOperand& opnd);
   void LoadLogicalReversedHalfWordP(Register dst, const MemOperand& opnd);
@@ -374,6 +411,9 @@
   // Load On Condition
   void LoadOnConditionP(Condition cond, Register dst, Register src);
 
+  void LoadPositiveP(Register result, Register input);
+  void LoadPositive32(Register result, Register input);
+
   // Store Floating Point
   void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
   void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
@@ -784,16 +824,6 @@
     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
   }
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the native context if the map in register
-  // map_in_out is the cached Array map in the native context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
-                                           ElementsKind transitioned_kind,
-                                           Register map_in_out,
-                                           Register scratch,
-                                           Label* no_map_match);
-
   void LoadNativeContextSlot(int index, Register dst);
 
   // Load the initial map from the global function. The registers
@@ -838,8 +868,10 @@
   void StoreRepresentation(Register src, const MemOperand& mem,
                            Representation r, Register scratch = no_reg);
 
-  void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
-  void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+  void AddSmiLiteral(Register dst, Register src, Smi* smi,
+                     Register scratch = r0);
+  void SubSmiLiteral(Register dst, Register src, Smi* smi,
+                     Register scratch = r0);
   void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
   void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
   void AndSmiLiteral(Register dst, Register src, Smi* smi);
@@ -891,9 +923,10 @@
                           const ParameterCount& actual, InvokeFlag flag,
                           const CallWrapper& call_wrapper);
 
-  void FloodFunctionIfStepping(Register fun, Register new_target,
-                               const ParameterCount& expected,
-                               const ParameterCount& actual);
+  // On function call, call into the debugger if necessary.
+  void CheckDebugHook(Register fun, Register new_target,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
@@ -914,12 +947,9 @@
 
   void IsObjectNameType(Register object, Register scratch, Label* fail);
 
-  // ---------------------------------------------------------------------------
-  // Debugger Support
+  // Frame restart support
+  void MaybeDropFrames();
 
-  void DebugBreak();
-
-  // ---------------------------------------------------------------------------
   // Exception handling
 
   // Push a new stack handler and link into stack handler chain.
@@ -990,25 +1020,6 @@
   void FastAllocate(Register object_size, Register result, Register result_end,
                     Register scratch, AllocationFlags flags);
 
-  void AllocateTwoByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateOneByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateTwoByteConsString(Register result, Register length,
-                                 Register scratch1, Register scratch2,
-                                 Label* gc_required);
-  void AllocateOneByteConsString(Register result, Register length,
-                                 Register scratch1, Register scratch2,
-                                 Label* gc_required);
-  void AllocateTwoByteSlicedString(Register result, Register length,
-                                   Register scratch1, Register scratch2,
-                                   Label* gc_required);
-  void AllocateOneByteSlicedString(Register result, Register length,
-                                   Register scratch1, Register scratch2,
-                                   Label* gc_required);
-
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed. All registers are clobbered also
   // when control continues at the gc_required label.
@@ -1047,14 +1058,6 @@
   void GetMapConstructor(Register result, Register map, Register temp,
                          Register temp2);
 
-  // Try to get function prototype of a function and puts the value in
-  // the result register. Checks that the function really is a
-  // function and jumps to the miss label if the fast checks fail. The
-  // function register will be untouched; the other registers may be
-  // clobbered.
-  void TryGetFunctionPrototype(Register function, Register result,
-                               Register scratch, Label* miss);
-
   // Compare object type for heap object.  heap_object contains a non-Smi
   // whose object type should be compared with the given type.  This both
   // sets the flags and leaves the object type in the type_reg register.
@@ -1071,22 +1074,6 @@
   // sets the flags and leaves the object type in the type_reg register.
   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map, Register scratch, Label* fail);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiElements(Register map, Register scratch, Label* fail);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements. Otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
-                                   Register elements_reg, Register scratch1,
-                                   DoubleRegister double_scratch, Label* fail,
-                                   int elements_offset = 0);
-
   // Compare an object's map with the specified map and its transitioned
   // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
   // set with result of map compare. If multiple map compares are required, the
@@ -1576,12 +1563,19 @@
   // Souce and destination can be the same register.
   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
 
-  // Untag the source value into destination and jump if source is not a smi.
-  // Souce and destination can be the same register.
-  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
   inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
 
+  inline void TestIfSmi(MemOperand value) {
+    if (is_uint12(value.offset())) {
+      tm(value, Operand(1));
+    } else if (is_int20(value.offset())) {
+      tmy(value, Operand(1));
+    } else {
+      LoadB(r0, value);
+      tmll(r0, Operand(1));
+    }
+  }
+
   inline void TestIfPositiveSmi(Register value, Register scratch) {
     STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
                   (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
@@ -1695,11 +1689,6 @@
       Register first_object_instance_type, Register second_object_instance_type,
       Register scratch1, Register scratch2, Label* failure);
 
-  // Check if instance type is sequential one-byte string and jump to label if
-  // it is not.
-  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
-                                                Label* failure);
-
   void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string, Register index,
@@ -1746,7 +1735,7 @@
   }
 
   // Load the type feedback vector from a JavaScript frame.
-  void EmitLoadTypeFeedbackVector(Register vector);
+  void EmitLoadFeedbackVector(Register vector);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type,
@@ -1772,21 +1761,6 @@
                                        Register scratch2_reg,
                                        Label* no_memento_found);
 
-  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
-                                         Register scratch_reg,
-                                         Register scratch2_reg,
-                                         Label* memento_found) {
-    Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
-                                    &no_memento_found);
-    beq(memento_found);
-    bind(&no_memento_found);
-  }
-
-  // Jumps to found label if a prototype map has dictionary elements.
-  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
-                                        Register scratch1, Label* found);
-
  private:
   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
 
@@ -1802,10 +1776,6 @@
                       bool* definitely_mismatches, InvokeFlag flag,
                       const CallWrapper& call_wrapper);
 
-  void InitializeNewString(Register string, Register length,
-                           Heap::RootListIndex map_index, Register scratch1,
-                           Register scratch2);
-
   // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
   void InNewSpace(Register object, Register scratch,
                   Condition cond,  // eq for new space, ne otherwise.
diff --git a/src/s390/simulator-s390.cc b/src/s390/simulator-s390.cc
index 74d37bc..c5d3a1c 100644
--- a/src/s390/simulator-s390.cc
+++ b/src/s390/simulator-s390.cc
@@ -743,6 +743,18 @@
     EvalTable[i] = &Simulator::Evaluate_Unknown;
   }
 
+#define S390_SUPPORTED_VECTOR_OPCODE_LIST(V)                 \
+  V(vfs, VFS, 0xE7E2) /* type = VRR_C VECTOR FP SUBTRACT  */ \
+  V(vfa, VFA, 0xE7E3) /* type = VRR_C VECTOR FP ADD  */      \
+  V(vfd, VFD, 0xE7E5) /* type = VRR_C VECTOR FP DIVIDE  */   \
+  V(vfm, VFM, 0xE7E7) /* type = VRR_C VECTOR FP MULTIPLY  */
+
+#define CREATE_EVALUATE_TABLE(name, op_name, op_value) \
+  EvalTable[op_name] = &Simulator::Evaluate_##op_name;
+  S390_SUPPORTED_VECTOR_OPCODE_LIST(CREATE_EVALUATE_TABLE);
+#undef CREATE_EVALUATE_TABLE
+
+  EvalTable[DUMY] = &Simulator::Evaluate_DUMY;
   EvalTable[BKPT] = &Simulator::Evaluate_BKPT;
   EvalTable[SPM] = &Simulator::Evaluate_SPM;
   EvalTable[BALR] = &Simulator::Evaluate_BALR;
@@ -953,6 +965,7 @@
   EvalTable[ALSIH] = &Simulator::Evaluate_ALSIH;
   EvalTable[ALSIHN] = &Simulator::Evaluate_ALSIHN;
   EvalTable[CIH] = &Simulator::Evaluate_CIH;
+  EvalTable[CLIH] = &Simulator::Evaluate_CLIH;
   EvalTable[STCK] = &Simulator::Evaluate_STCK;
   EvalTable[CFC] = &Simulator::Evaluate_CFC;
   EvalTable[IPM] = &Simulator::Evaluate_IPM;
@@ -972,6 +985,7 @@
   EvalTable[SAR] = &Simulator::Evaluate_SAR;
   EvalTable[EAR] = &Simulator::Evaluate_EAR;
   EvalTable[MSR] = &Simulator::Evaluate_MSR;
+  EvalTable[MSRKC] = &Simulator::Evaluate_MSRKC;
   EvalTable[MVST] = &Simulator::Evaluate_MVST;
   EvalTable[CUSE] = &Simulator::Evaluate_CUSE;
   EvalTable[SRST] = &Simulator::Evaluate_SRST;
@@ -1145,6 +1159,7 @@
   EvalTable[ALGR] = &Simulator::Evaluate_ALGR;
   EvalTable[SLGR] = &Simulator::Evaluate_SLGR;
   EvalTable[MSGR] = &Simulator::Evaluate_MSGR;
+  EvalTable[MSGRKC] = &Simulator::Evaluate_MSGRKC;
   EvalTable[DSGR] = &Simulator::Evaluate_DSGR;
   EvalTable[LRVGR] = &Simulator::Evaluate_LRVGR;
   EvalTable[LPGFR] = &Simulator::Evaluate_LPGFR;
@@ -1835,6 +1850,11 @@
   return *ptr;
 }
 
+float Simulator::ReadFloat(intptr_t addr) {
+  float* ptr = reinterpret_cast<float*>(addr);
+  return *ptr;
+}
+
 // Returns the limit of the stack area to enable checking for stack overflows.
 uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
   // The simulator uses a separate JS stack. If we have exhausted the C stack,
@@ -6049,6 +6069,15 @@
   int d2 = AS(RXEInstruction)->D2Value();      \
   int length = 6;
 
+#define DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4) \
+  int r1 = AS(VRR_C_Instruction)->R1Value();             \
+  int r2 = AS(VRR_C_Instruction)->R2Value();             \
+  int r3 = AS(VRR_C_Instruction)->R3Value();             \
+  int m6 = AS(VRR_C_Instruction)->M6Value();             \
+  int m5 = AS(VRR_C_Instruction)->M5Value();             \
+  int m4 = AS(VRR_C_Instruction)->M4Value();             \
+  int length = 6;
+
 #define GET_ADDRESS(index_reg, base_reg, offset)       \
   (((index_reg) == 0) ? 0 : get_register(index_reg)) + \
       (((base_reg) == 0) ? 0 : get_register(base_reg)) + offset
@@ -6058,6 +6087,77 @@
   return 0;
 }
 
+EVALUATE(VFA) {
+  DCHECK_OPCODE(VFA);
+  DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+  USE(m6);
+  USE(m5);
+  USE(m4);
+  DCHECK(m5 == 8);
+  DCHECK(m4 == 3);
+  double r2_val = get_double_from_d_register(r2);
+  double r3_val = get_double_from_d_register(r3);
+  double r1_val = r2_val + r3_val;
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
+
+EVALUATE(VFS) {
+  DCHECK_OPCODE(VFS);
+  DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+  USE(m6);
+  USE(m5);
+  USE(m4);
+  DCHECK(m5 == 8);
+  DCHECK(m4 == 3);
+  double r2_val = get_double_from_d_register(r2);
+  double r3_val = get_double_from_d_register(r3);
+  double r1_val = r2_val - r3_val;
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
+
+EVALUATE(VFM) {
+  DCHECK_OPCODE(VFM);
+  DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+  USE(m6);
+  USE(m5);
+  USE(m4);
+  DCHECK(m5 == 8);
+  DCHECK(m4 == 3);
+  double r2_val = get_double_from_d_register(r2);
+  double r3_val = get_double_from_d_register(r3);
+  double r1_val = r2_val * r3_val;
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
+
+EVALUATE(VFD) {
+  DCHECK_OPCODE(VFD);
+  DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+  USE(m6);
+  USE(m5);
+  USE(m4);
+  DCHECK(m5 == 8);
+  DCHECK(m4 == 3);
+  double r2_val = get_double_from_d_register(r2);
+  double r3_val = get_double_from_d_register(r3);
+  double r1_val = r2_val / r3_val;
+  set_d_register_from_double(r1, r1_val);
+  return length;
+}
+
+EVALUATE(DUMY) {
+  DCHECK_OPCODE(DUMY);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  USE(r1);
+  USE(x2);
+  USE(b2);
+  USE(d2);
+  // dummy instruction does nothing.
+  return length;
+}
+
 EVALUATE(CLR) {
   DCHECK_OPCODE(CLR);
   DECODE_RR_INSTRUCTION(r1, r2);
@@ -6474,9 +6574,18 @@
 }
 
 EVALUATE(LPR) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(LPR);
+  // Load Positive (32)
+  DECODE_RR_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  // If negative, then negate it.
+  r2_val = (r2_val < 0) ? -r2_val : r2_val;
+  set_low_register(r1, r2_val);
+  SetS390ConditionCode<int32_t>(r2_val, 0);
+  if (r2_val == (static_cast<int32_t>(1) << 31)) {
+    SetS390OverflowCode(true);
+  }
+  return length;
 }
 
 EVALUATE(LNR) {
@@ -6504,15 +6613,16 @@
   DCHECK_OPCODE(LCR);
   DECODE_RR_INSTRUCTION(r1, r2);
   int32_t r2_val = get_low_register<int32_t>(r2);
-  r2_val = ~r2_val;
-  r2_val = r2_val + 1;
-  set_low_register(r1, r2_val);
+  int32_t result = 0;
+  bool isOF = false;
+  isOF = __builtin_ssub_overflow(0, r2_val, &result);
+  set_low_register(r1, result);
   SetS390ConditionCode<int32_t>(r2_val, 0);
   // Checks for overflow where r2_val = -2147483648.
   // Cannot do int comparison due to GCC 4.8 bug on x86.
   // Detect INT_MIN alternatively, as it is the only value where both
   // original and result are negative due to overflow.
-  if (r2_val == (static_cast<int32_t>(1) << 31)) {
+  if (isOF) {
     SetS390OverflowCode(true);
   }
   return length;
@@ -7677,47 +7787,38 @@
 EVALUATE(TMLL) {
   DCHECK_OPCODE(TMLL);
   DECODE_RI_A_INSTRUCTION(instr, r1, i2);
-  int mask = i2 & 0x0000FFFF;
-  if (mask == 0) {
-    condition_reg_ = 0x0;
-    return length;
-  }
+  uint32_t mask = i2 & 0x0000FFFF;
   uint32_t r1_val = get_low_register<uint32_t>(r1);
   r1_val = r1_val & 0x0000FFFF;  // uses only the last 16bits
 
-  // Test if all selected bits are Zero
-  bool allSelectedBitsAreZeros = true;
-  for (int i = 0; i < 15; i++) {
-    if (mask & (1 << i)) {
-      if (r1_val & (1 << i)) {
-        allSelectedBitsAreZeros = false;
-        break;
-      }
-    }
-  }
-  if (allSelectedBitsAreZeros) {
+  // Test if all selected bits are zeros or mask is zero
+  if (0 == (mask & r1_val)) {
     condition_reg_ = 0x8;
     return length;  // Done!
   }
 
+  DCHECK(mask != 0);
   // Test if all selected bits are one
-  bool allSelectedBitsAreOnes = true;
-  for (int i = 0; i < 15; i++) {
-    if (mask & (1 << i)) {
-      if (!(r1_val & (1 << i))) {
-        allSelectedBitsAreOnes = false;
-        break;
-      }
-    }
-  }
-  if (allSelectedBitsAreOnes) {
+  if (mask == (mask & r1_val)) {
     condition_reg_ = 0x1;
     return length;  // Done!
   }
 
   // Now we know selected bits mixed zeros and ones
   // Test if the leftmost bit is zero or one
-  for (int i = 14; i >= 0; i--) {
+#if defined(__GNUC__)
+  int leadingZeros = __builtin_clz(mask);
+  mask = 0x80000000u >> leadingZeros;
+  if (mask & r1_val) {
+    // leftmost bit is one
+    condition_reg_ = 0x4;
+  } else {
+    // leftmost bit is zero
+    condition_reg_ = 0x2;
+  }
+  return length;  // Done!
+#else
+  for (int i = 15; i >= 0; i--) {
     if (mask & (1 << i)) {
       if (r1_val & (1 << i)) {
         // leftmost bit is one
@@ -7729,6 +7830,8 @@
       return length;  // Done!
     }
   }
+#endif
+  UNREACHABLE();
   return length;
 }
 
@@ -8220,9 +8323,15 @@
 }
 
 EVALUATE(AIH) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(AIH);
+  DECODE_RIL_A_INSTRUCTION(r1, i2);
+  int32_t r1_val = get_high_register<int32_t>(r1);
+  bool isOF = CheckOverflowForIntAdd(r1_val, static_cast<int32_t>(i2), int32_t);
+  r1_val += static_cast<int32_t>(i2);
+  set_high_register(r1, r1_val);
+  SetS390ConditionCode<int32_t>(r1_val, 0);
+  SetS390OverflowCode(isOF);
+  return length;
 }
 
 EVALUATE(ALSIH) {
@@ -8238,9 +8347,19 @@
 }
 
 EVALUATE(CIH) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(CIH);
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  int32_t r1_val = get_high_register<int32_t>(r1);
+  SetS390ConditionCode<int32_t>(r1_val, static_cast<int32_t>(imm));
+  return length;
+}
+
+EVALUATE(CLIH) {
+  DCHECK_OPCODE(CLIH);
+  // Compare Logical with Immediate (32)
+  DECODE_RIL_A_INSTRUCTION(r1, imm);
+  SetS390ConditionCode<uint32_t>(get_high_register<uint32_t>(r1), imm);
+  return length;
 }
 
 EVALUATE(STCK) {
@@ -8360,6 +8479,21 @@
   return length;
 }
 
+EVALUATE(MSRKC) {
+  DCHECK_OPCODE(MSRKC);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  int32_t r3_val = get_low_register<int32_t>(r3);
+  int64_t result64 =
+      static_cast<int64_t>(r2_val) * static_cast<int64_t>(r3_val);
+  int32_t result32 = static_cast<int32_t>(result64);
+  bool isOF = (static_cast<int64_t>(result32) != result64);
+  SetS390ConditionCode<int32_t>(result32, 0);
+  SetS390OverflowCode(isOF);
+  set_low_register(r1, result32);
+  return length;
+}
+
 EVALUATE(MVST) {
   UNIMPLEMENTED();
   USE(instr);
@@ -9800,9 +9934,17 @@
 }
 
 EVALUATE(LPGR) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(LPGR);
+  // Load Positive (32)
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r2_val = get_register(r2);
+  r2_val = (r2_val < 0) ? -r2_val : r2_val;  // If negative, then negate it.
+  set_register(r1, r2_val);
+  SetS390ConditionCode<int64_t>(r2_val, 0);
+  if (r2_val == (static_cast<int64_t>(1) << 63)) {
+    SetS390OverflowCode(true);
+  }
+  return length;
 }
 
 EVALUATE(LNGR) {
@@ -9831,12 +9973,16 @@
   DCHECK_OPCODE(LCGR);
   DECODE_RRE_INSTRUCTION(r1, r2);
   int64_t r2_val = get_register(r2);
-  r2_val = ~r2_val;
-  r2_val = r2_val + 1;
-  set_register(r1, r2_val);
-  SetS390ConditionCode<int64_t>(r2_val, 0);
-  // if the input is INT_MIN, loading its compliment would be overflowing
-  if (r2_val == (static_cast<int64_t>(1) << 63)) {
+  int64_t result = 0;
+  bool isOF = false;
+#ifdef V8_TARGET_ARCH_S390X
+  isOF = __builtin_ssubl_overflow(0L, r2_val, &result);
+#else
+  isOF = __builtin_ssubll_overflow(0L, r2_val, &result);
+#endif
+  set_register(r1, result);
+  SetS390ConditionCode<int64_t>(result, 0);
+  if (isOF) {
     SetS390OverflowCode(true);
   }
   return length;
@@ -9877,6 +10023,20 @@
   return length;
 }
 
+EVALUATE(MSGRKC) {
+  DCHECK_OPCODE(MSGRKC);
+  DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+  int64_t r2_val = get_register(r2);
+  int64_t r3_val = get_register(r3);
+  volatile int64_t result64 = r2_val * r3_val;
+  bool isOF = ((r2_val == -1 && result64 == (static_cast<int64_t>(1L) << 63)) ||
+               (r2_val != 0 && result64 / r2_val != r3_val));
+  SetS390ConditionCode<int64_t>(result64, 0);
+  SetS390OverflowCode(isOF);
+  set_register(r1, result64);
+  return length;
+}
+
 EVALUATE(DSGR) {
   DCHECK_OPCODE(DSGR);
   DECODE_RRE_INSTRUCTION(r1, r2);
@@ -9901,9 +10061,15 @@
 }
 
 EVALUATE(LPGFR) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(LPGFR);
+  // Load Positive (32)
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int32_t r2_val = get_low_register<int32_t>(r2);
+  // If negative, then negate it.
+  int64_t r1_val = static_cast<int64_t>((r2_val < 0) ? -r2_val : r2_val);
+  set_register(r1, r1_val);
+  SetS390ConditionCode<int64_t>(r1_val, 0);
+  return length;
 }
 
 EVALUATE(LNGFR) {
@@ -9992,15 +10158,26 @@
 }
 
 EVALUATE(MSGFR) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(MSGFR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+  int64_t product = r1_val * r2_val;
+  set_register(r1, product);
+  return length;
 }
 
 EVALUATE(DSGFR) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(DSGFR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  DCHECK(r1 % 2 == 0);
+  int64_t r1_val = get_register(r1 + 1);
+  int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+  int64_t quotient = r1_val / r2_val;
+  int64_t remainder = r1_val % r2_val;
+  set_register(r1, remainder);
+  set_register(r1 + 1, quotient);
+  return length;
 }
 
 EVALUATE(KMAC) {
@@ -10076,9 +10253,13 @@
 }
 
 EVALUATE(CGFR) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(CGFR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  // Compare (64)
+  int64_t r1_val = get_register(r1);
+  int64_t r2_val = static_cast<int64_t>(get_low_register<int32_t>(r2));
+  SetS390ConditionCode<int64_t>(r1_val, r2_val);
+  return length;
 }
 
 EVALUATE(KIMD) {
@@ -10227,9 +10408,13 @@
 }
 
 EVALUATE(LLGCR) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(LLGCR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint64_t r2_val = get_low_register<uint64_t>(r2);
+  r2_val <<= 56;
+  r2_val >>= 56;
+  set_register(r1, r2_val);
+  return length;
 }
 
 EVALUATE(LLGHR) {
@@ -10307,9 +10492,13 @@
 }
 
 EVALUATE(LLCR) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(LLCR);
+  DECODE_RRE_INSTRUCTION(r1, r2);
+  uint32_t r2_val = get_low_register<uint32_t>(r2);
+  r2_val <<= 24;
+  r2_val >>= 24;
+  set_low_register(r1, r2_val);
+  return length;
 }
 
 EVALUATE(LLHR) {
@@ -10906,15 +11095,34 @@
 }
 
 EVALUATE(MSGF) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(MSGF);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  int64_t mem_val =
+      static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+  int64_t r1_val = get_register(r1);
+  int64_t product = r1_val * mem_val;
+  set_register(r1, product);
+  return length;
 }
 
 EVALUATE(DSGF) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(DSGF);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  DCHECK(r1 % 2 == 0);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  int64_t mem_val =
+      static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+  int64_t r1_val = get_register(r1 + 1);
+  int64_t quotient = r1_val / mem_val;
+  int64_t remainder = r1_val % mem_val;
+  set_register(r1, remainder);
+  set_register(r1 + 1, quotient);
+  return length;
 }
 
 EVALUATE(LRVG) {
@@ -11020,9 +11228,9 @@
 }
 
 EVALUATE(PFD) {
-  UNIMPLEMENTED();
+  DCHECK_OPCODE(PFD);
   USE(instr);
-  return 0;
+  return 6;
 }
 
 EVALUATE(STRV) {
@@ -11473,9 +11681,20 @@
 }
 
 EVALUATE(DL) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(DL);
+  DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  DCHECK(r1 % 2 == 0);
+  uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+  uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
+  uint64_t quotient =
+      static_cast<uint64_t>(r1_val) / static_cast<uint64_t>(mem_val);
+  uint64_t remainder =
+      static_cast<uint64_t>(r1_val) % static_cast<uint64_t>(mem_val);
+  set_low_register(r1, remainder);
+  set_low_register(r1 + 1, quotient);
+  return length;
 }
 
 EVALUATE(ALC) {
@@ -12333,9 +12552,16 @@
 }
 
 EVALUATE(CEB) {
-  UNIMPLEMENTED();
-  USE(instr);
-  return 0;
+  DCHECK_OPCODE(CEB);
+
+  DECODE_RXE_INSTRUCTION(r1, b2, x2, d2);
+  int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
+  int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
+  intptr_t d2_val = d2;
+  float r1_val = get_float32_from_d_register(r1);
+  float fval = ReadFloat(b2_val + x2_val + d2_val);
+  SetS390ConditionCode<float>(r1_val, fval);
+  return length;
 }
 
 EVALUATE(AEB) {
diff --git a/src/s390/simulator-s390.h b/src/s390/simulator-s390.h
index 1ce6bf7..98532ff 100644
--- a/src/s390/simulator-s390.h
+++ b/src/s390/simulator-s390.h
@@ -304,6 +304,7 @@
 
   inline int64_t ReadDW(intptr_t addr);
   inline double ReadDouble(intptr_t addr);
+  inline float ReadFloat(intptr_t addr);
   inline void WriteDW(intptr_t addr, int64_t value);
 
   // S390
@@ -522,6 +523,12 @@
   static void EvalTableInit();
 
 #define EVALUATE(name) int Evaluate_##name(Instruction* instr)
+#define EVALUATE_VRR_INSTRUCTIONS(name, op_name, op_value) EVALUATE(op_name);
+  S390_VRR_C_OPCODE_LIST(EVALUATE_VRR_INSTRUCTIONS)
+  S390_VRR_A_OPCODE_LIST(EVALUATE_VRR_INSTRUCTIONS)
+#undef EVALUATE_VRR_INSTRUCTIONS
+
+  EVALUATE(DUMY);
   EVALUATE(BKPT);
   EVALUATE(SPM);
   EVALUATE(BALR);
@@ -732,6 +739,7 @@
   EVALUATE(ALSIH);
   EVALUATE(ALSIHN);
   EVALUATE(CIH);
+  EVALUATE(CLIH);
   EVALUATE(STCK);
   EVALUATE(CFC);
   EVALUATE(IPM);
@@ -751,6 +759,7 @@
   EVALUATE(SAR);
   EVALUATE(EAR);
   EVALUATE(MSR);
+  EVALUATE(MSRKC);
   EVALUATE(MVST);
   EVALUATE(CUSE);
   EVALUATE(SRST);
@@ -924,6 +933,7 @@
   EVALUATE(ALGR);
   EVALUATE(SLGR);
   EVALUATE(MSGR);
+  EVALUATE(MSGRKC);
   EVALUATE(DSGR);
   EVALUATE(LRVGR);
   EVALUATE(LPGFR);
diff --git a/src/signature.h b/src/signature.h
index 32050fe..519138b 100644
--- a/src/signature.h
+++ b/src/signature.h
@@ -32,7 +32,7 @@
     return reps_[index];
   }
 
-  bool Equals(Signature* that) {
+  bool Equals(const Signature* that) const {
     if (this == that) return true;
     if (this->parameter_count() != that->parameter_count()) return false;
     if (this->return_count() != that->return_count()) return false;
diff --git a/src/snapshot/code-serializer.cc b/src/snapshot/code-serializer.cc
index 86a9164..7f57f0a 100644
--- a/src/snapshot/code-serializer.cc
+++ b/src/snapshot/code-serializer.cc
@@ -7,8 +7,10 @@
 #include <memory>
 
 #include "src/code-stubs.h"
+#include "src/counters.h"
 #include "src/log.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 #include "src/snapshot/deserializer.h"
 #include "src/snapshot/snapshot.h"
 #include "src/version.h"
@@ -88,7 +90,12 @@
 #define IC_KIND_CASE(KIND) case Code::KIND:
         IC_KIND_LIST(IC_KIND_CASE)
 #undef IC_KIND_CASE
-        SerializeCodeStub(code_object, how_to_code, where_to_point);
+        if (code_object->builtin_index() == -1) {
+          SerializeCodeStub(code_object, how_to_code, where_to_point);
+        } else {
+          SerializeBuiltin(code_object->builtin_index(), how_to_code,
+                           where_to_point);
+        }
         return;
       case Code::FUNCTION:
         DCHECK(code_object->has_reloc_info_for_serialization());
@@ -104,6 +111,12 @@
     return SerializeObject(isolate()->heap()->undefined_value(), how_to_code,
                            where_to_point, skip);
   }
+
+  if (obj->IsScript()) {
+    // Wrapper object is a context-dependent JSValue. Reset it here.
+    Script::cast(obj)->set_wrapper(isolate()->heap()->undefined_value());
+  }
+
   // Past this point we should not see any (context-specific) maps anymore.
   CHECK(!obj->IsMap());
   // There should be no references to the global object embedded.
@@ -218,23 +231,33 @@
   return scope.CloseAndEscape(result);
 }
 
+WasmCompiledModuleSerializer::WasmCompiledModuleSerializer(
+    Isolate* isolate, uint32_t source_hash, Handle<Context> native_context,
+    Handle<SeqOneByteString> module_bytes)
+    : CodeSerializer(isolate, source_hash) {
+  reference_map()->AddAttachedReference(*isolate->native_context());
+  reference_map()->AddAttachedReference(*module_bytes);
+}
+
 std::unique_ptr<ScriptData> WasmCompiledModuleSerializer::SerializeWasmModule(
     Isolate* isolate, Handle<FixedArray> input) {
   Handle<WasmCompiledModule> compiled_module =
       Handle<WasmCompiledModule>::cast(input);
-  WasmCompiledModuleSerializer wasm_cs(isolate, 0);
-  wasm_cs.reference_map()->AddAttachedReference(*isolate->native_context());
-  wasm_cs.reference_map()->AddAttachedReference(
-      *compiled_module->module_bytes());
+  WasmCompiledModuleSerializer wasm_cs(isolate, 0, isolate->native_context(),
+                                       handle(compiled_module->module_bytes()));
   ScriptData* data = wasm_cs.Serialize(compiled_module);
   return std::unique_ptr<ScriptData>(data);
 }
 
 MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
     Isolate* isolate, ScriptData* data, Vector<const byte> wire_bytes) {
+  MaybeHandle<FixedArray> nothing;
+  if (!wasm::IsWasmCodegenAllowed(isolate, isolate->native_context())) {
+    return nothing;
+  }
   SerializedCodeData::SanityCheckResult sanity_check_result =
       SerializedCodeData::CHECK_SUCCESS;
-  MaybeHandle<FixedArray> nothing;
+
   const SerializedCodeData scd = SerializedCodeData::FromCachedData(
       isolate, data, 0, &sanity_check_result);
 
@@ -262,13 +285,39 @@
 
   MaybeHandle<HeapObject> obj = deserializer.DeserializeObject(isolate);
   if (obj.is_null() || !obj.ToHandleChecked()->IsFixedArray()) return nothing;
-  Handle<WasmCompiledModule> compiled_module =
-      Handle<WasmCompiledModule>::cast(obj.ToHandleChecked());
+  // Cast without type checks, as the module wrapper is not there yet.
+  Handle<WasmCompiledModule> compiled_module(
+      static_cast<WasmCompiledModule*>(*obj.ToHandleChecked()), isolate);
 
-  WasmCompiledModule::RecreateModuleWrapper(isolate, compiled_module);
+  WasmCompiledModule::ReinitializeAfterDeserialization(isolate,
+                                                       compiled_module);
+  DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
   return compiled_module;
 }
 
+void WasmCompiledModuleSerializer::SerializeCodeObject(
+    Code* code_object, HowToCode how_to_code, WhereToPoint where_to_point) {
+  Code::Kind kind = code_object->kind();
+  switch (kind) {
+    case Code::WASM_FUNCTION:
+    case Code::JS_TO_WASM_FUNCTION:
+      // Just serialize the code_object.
+      break;
+    case Code::WASM_TO_JS_FUNCTION:
+      // Serialize the illegal builtin instead. On instantiation of a
+      // deserialized module, these will be replaced again.
+      code_object = *isolate()->builtins()->Illegal();
+      break;
+    default:
+      UNREACHABLE();
+  }
+  SerializeGeneric(code_object, how_to_code, where_to_point);
+}
+
+bool WasmCompiledModuleSerializer::ElideObject(Object* obj) {
+  return obj->IsWeakCell() || obj->IsForeign() || obj->IsBreakPointInfo();
+}
+
 class Checksum {
  public:
   explicit Checksum(Vector<const byte> payload) {
diff --git a/src/snapshot/code-serializer.h b/src/snapshot/code-serializer.h
index 1575737..4d87a73 100644
--- a/src/snapshot/code-serializer.h
+++ b/src/snapshot/code-serializer.h
@@ -64,23 +64,13 @@
 
  protected:
   void SerializeCodeObject(Code* code_object, HowToCode how_to_code,
-                           WhereToPoint where_to_point) override {
-    Code::Kind kind = code_object->kind();
-    if (kind == Code::WASM_FUNCTION || kind == Code::WASM_TO_JS_FUNCTION ||
-        kind == Code::JS_TO_WASM_FUNCTION) {
-      SerializeGeneric(code_object, how_to_code, where_to_point);
-    } else {
-      UNREACHABLE();
-    }
-  }
-
-  bool ElideObject(Object* obj) override {
-    return obj->IsWeakCell() || obj->IsForeign();
-  };
+                           WhereToPoint where_to_point) override;
+  bool ElideObject(Object* obj) override;
 
  private:
-  WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash)
-      : CodeSerializer(isolate, source_hash) {}
+  WasmCompiledModuleSerializer(Isolate* isolate, uint32_t source_hash,
+                               Handle<Context> native_context,
+                               Handle<SeqOneByteString> module_bytes);
   DISALLOW_COPY_AND_ASSIGN(WasmCompiledModuleSerializer);
 };
 
diff --git a/src/snapshot/deserializer.cc b/src/snapshot/deserializer.cc
index aabd806..86d20e1 100644
--- a/src/snapshot/deserializer.cc
+++ b/src/snapshot/deserializer.cc
@@ -4,13 +4,17 @@
 
 #include "src/snapshot/deserializer.h"
 
+#include "src/api.h"
+#include "src/assembler-inl.h"
 #include "src/bootstrapper.h"
 #include "src/external-reference-table.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
 #include "src/isolate.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 #include "src/snapshot/natives.h"
 #include "src/v8.h"
+#include "src/v8threads.h"
 
 namespace v8 {
 namespace internal {
@@ -93,6 +97,7 @@
     isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
     DeserializeDeferredObjects();
     FlushICacheForNewIsolate();
+    RestoreExternalReferenceRedirectors(&accessor_infos_);
   }
 
   isolate_->heap()->set_native_contexts_list(
@@ -111,7 +116,8 @@
 }
 
 MaybeHandle<Object> Deserializer::DeserializePartial(
-    Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
+    Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+    v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
   Initialize(isolate);
   if (!ReserveSpace()) {
     V8::FatalProcessOutOfMemory("deserialize context");
@@ -128,7 +134,7 @@
   Object* root;
   VisitPointer(&root);
   DeserializeDeferredObjects();
-  DeserializeInternalFields();
+  DeserializeInternalFields(internal_fields_deserializer);
 
   isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
 
@@ -213,14 +219,13 @@
   }
 }
 
-void Deserializer::DeserializeInternalFields() {
+void Deserializer::DeserializeInternalFields(
+    v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
   if (!source_.HasMore() || source_.Get() != kInternalFieldsData) return;
   DisallowHeapAllocation no_gc;
   DisallowJavascriptExecution no_js(isolate_);
   DisallowCompilation no_compile(isolate_);
-  v8::DeserializeInternalFieldsCallback callback =
-      isolate_->deserialize_internal_fields_callback();
-  DCHECK_NOT_NULL(callback);
+  DCHECK_NOT_NULL(internal_fields_deserializer.callback);
   for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
     HandleScope scope(isolate_);
     int space = code & kSpaceMask;
@@ -232,8 +237,9 @@
     int size = source_.GetInt();
     byte* data = new byte[size];
     source_.CopyRaw(data, size);
-    callback(v8::Utils::ToLocal(obj), index,
-             {reinterpret_cast<char*>(data), size});
+    internal_fields_deserializer.callback(v8::Utils::ToLocal(obj), index,
+                                          {reinterpret_cast<char*>(data), size},
+                                          internal_fields_deserializer.data);
     delete[] data;
   }
 }
@@ -316,6 +322,10 @@
     if (deserializing_user_code() || space == LO_SPACE) {
       new_code_objects_.Add(Code::cast(obj));
     }
+  } else if (obj->IsAccessorInfo()) {
+    if (isolate_->external_reference_redirector()) {
+      accessor_infos_.Add(AccessorInfo::cast(obj));
+    }
   }
   // Check alignment.
   DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
diff --git a/src/snapshot/deserializer.h b/src/snapshot/deserializer.h
index db79962..0348956 100644
--- a/src/snapshot/deserializer.h
+++ b/src/snapshot/deserializer.h
@@ -48,8 +48,9 @@
   void Deserialize(Isolate* isolate);
 
   // Deserialize a single object and the objects reachable from it.
-  MaybeHandle<Object> DeserializePartial(Isolate* isolate,
-                                         Handle<JSGlobalProxy> global_proxy);
+  MaybeHandle<Object> DeserializePartial(
+      Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+      v8::DeserializeInternalFieldsCallback internal_fields_deserializer);
 
   // Deserialize an object graph. Fail gracefully.
   MaybeHandle<HeapObject> DeserializeObject(Isolate* isolate);
@@ -83,12 +84,13 @@
     DCHECK_EQ(kWordAligned, next_alignment_);
     int alignment = data - (kAlignmentPrefix - 1);
     DCHECK_LE(kWordAligned, alignment);
-    DCHECK_LE(alignment, kSimd128Unaligned);
+    DCHECK_LE(alignment, kDoubleUnaligned);
     next_alignment_ = static_cast<AllocationAlignment>(alignment);
   }
 
   void DeserializeDeferredObjects();
-  void DeserializeInternalFields();
+  void DeserializeInternalFields(
+      v8::DeserializeInternalFieldsCallback internal_fields_deserializer);
 
   void FlushICacheForNewIsolate();
   void FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
@@ -138,6 +140,7 @@
 
   List<HeapObject*> deserialized_large_objects_;
   List<Code*> new_code_objects_;
+  List<AccessorInfo*> accessor_infos_;
   List<Handle<String> > new_internalized_strings_;
   List<Handle<Script> > new_scripts_;
 
diff --git a/src/snapshot/partial-serializer.cc b/src/snapshot/partial-serializer.cc
index e89f44f..7f30c9c 100644
--- a/src/snapshot/partial-serializer.cc
+++ b/src/snapshot/partial-serializer.cc
@@ -23,7 +23,7 @@
   OutputStatistics("PartialSerializer");
 }
 
-void PartialSerializer::Serialize(Object** o) {
+void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
   if ((*o)->IsContext()) {
     Context* context = Context::cast(*o);
     reference_map()->AddAttachedReference(context->global_proxy());
@@ -93,16 +93,15 @@
   // Clear literal boilerplates.
   if (obj->IsJSFunction()) {
     JSFunction* function = JSFunction::cast(obj);
-    LiteralsArray* literals = function->literals();
-    for (int i = 0; i < literals->literals_count(); i++) {
-      literals->set_literal_undefined(i);
-    }
     function->ClearTypeFeedbackInfo();
   }
 
   if (obj->IsJSObject()) {
     JSObject* jsobj = JSObject::cast(obj);
-    if (jsobj->GetInternalFieldCount() > 0) internal_field_holders_.Add(jsobj);
+    if (jsobj->GetInternalFieldCount() > 0) {
+      DCHECK_NOT_NULL(serialize_internal_fields_.callback);
+      internal_field_holders_.Add(jsobj);
+    }
   }
 
   // Object has not yet been serialized.  Serialize it here.
@@ -129,7 +128,7 @@
   DisallowHeapAllocation no_gc;
   DisallowJavascriptExecution no_js(isolate());
   DisallowCompilation no_compile(isolate());
-  DCHECK_NOT_NULL(serialize_internal_fields_);
+  DCHECK_NOT_NULL(serialize_internal_fields_.callback);
   sink_.Put(kInternalFieldsData, "internal fields data");
   while (internal_field_holders_.length() > 0) {
     HandleScope scope(isolate());
@@ -139,7 +138,8 @@
     int internal_fields_count = obj->GetInternalFieldCount();
     for (int i = 0; i < internal_fields_count; i++) {
       if (obj->GetInternalField(i)->IsHeapObject()) continue;
-      StartupData data = serialize_internal_fields_(v8::Utils::ToLocal(obj), i);
+      StartupData data = serialize_internal_fields_.callback(
+          v8::Utils::ToLocal(obj), i, serialize_internal_fields_.data);
       sink_.Put(kNewObject + reference.space(), "internal field holder");
       PutBackReference(*obj, reference);
       sink_.PutInt(i, "internal field index");
diff --git a/src/snapshot/partial-serializer.h b/src/snapshot/partial-serializer.h
index 45d64e4..2d7c9ed 100644
--- a/src/snapshot/partial-serializer.h
+++ b/src/snapshot/partial-serializer.h
@@ -21,7 +21,7 @@
   ~PartialSerializer() override;
 
   // Serialize the objects reachable from a single object pointer.
-  void Serialize(Object** o);
+  void Serialize(Object** o, bool include_global_proxy);
 
  private:
   void SerializeObject(HeapObject* o, HowToCode how_to_code,
diff --git a/src/snapshot/serializer-common.cc b/src/snapshot/serializer-common.cc
index f188793..89aabdf 100644
--- a/src/snapshot/serializer-common.cc
+++ b/src/snapshot/serializer-common.cc
@@ -7,6 +7,7 @@
 #include "src/external-reference-table.h"
 #include "src/ic/stub-cache.h"
 #include "src/list-inl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -21,8 +22,9 @@
   ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
   for (uint32_t i = 0; i < table->size(); ++i) {
     Address addr = table->address(i);
-    DCHECK(map_->Get(addr).IsNothing() ||
-           strncmp(table->name(i), "Redirect to ", 12) == 0);
+    // Ignore duplicate API references.
+    if (table->is_api_reference(i) && !map_->Get(addr).IsNothing()) continue;
+    DCHECK(map_->Get(addr).IsNothing());
     map_->Set(addr, i);
     DCHECK(map_->Get(addr).IsJust());
   }
@@ -81,5 +83,14 @@
   return !o->IsString() && !o->IsScript();
 }
 
+void SerializerDeserializer::RestoreExternalReferenceRedirectors(
+    List<AccessorInfo*>* accessor_infos) {
+  // Restore wiped accessor infos.
+  for (AccessorInfo* info : *accessor_infos) {
+    Foreign::cast(info->js_getter())
+        ->set_foreign_address(info->redirected_getter());
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/snapshot/serializer-common.h b/src/snapshot/serializer-common.h
index 201ac4e..b426efd 100644
--- a/src/snapshot/serializer-common.h
+++ b/src/snapshot/serializer-common.h
@@ -86,6 +86,8 @@
  protected:
   static bool CanBeDeferred(HeapObject* o);
 
+  void RestoreExternalReferenceRedirectors(List<AccessorInfo*>* accessor_infos);
+
   // ---------- byte code range 0x00..0x7f ----------
   // Byte codes in this range represent Where, HowToCode and WhereToPoint.
   // Where the pointed-to object can be found:
diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc
index 2e971e3..d99ca2a 100644
--- a/src/snapshot/serializer.cc
+++ b/src/snapshot/serializer.cc
@@ -4,6 +4,8 @@
 
 #include "src/snapshot/serializer.h"
 
+#include "src/assembler-inl.h"
+#include "src/heap/heap-inl.h"
 #include "src/macro-assembler.h"
 #include "src/snapshot/natives.h"
 
diff --git a/src/snapshot/snapshot-common.cc b/src/snapshot/snapshot-common.cc
index 959ac56..1658b3b 100644
--- a/src/snapshot/snapshot-common.cc
+++ b/src/snapshot/snapshot-common.cc
@@ -9,6 +9,7 @@
 #include "src/api.h"
 #include "src/base/platform/platform.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/objects-inl.h"
 #include "src/snapshot/deserializer.h"
 #include "src/snapshot/snapshot-source-sink.h"
 #include "src/version.h"
@@ -50,8 +51,8 @@
 }
 
 MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
-    Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
-    size_t context_index) {
+    Isolate* isolate, Handle<JSGlobalProxy> global_proxy, size_t context_index,
+    v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
   if (!isolate->snapshot_available()) return Handle<Context>();
   base::ElapsedTimer timer;
   if (FLAG_profile_deserialization) timer.Start();
@@ -62,8 +63,8 @@
   SnapshotData snapshot_data(context_data);
   Deserializer deserializer(&snapshot_data);
 
-  MaybeHandle<Object> maybe_context =
-      deserializer.DeserializePartial(isolate, global_proxy);
+  MaybeHandle<Object> maybe_context = deserializer.DeserializePartial(
+      isolate, global_proxy, internal_fields_deserializer);
   Handle<Object> result;
   if (!maybe_context.ToHandle(&result)) return MaybeHandle<Context>();
   CHECK(result->IsContext());
diff --git a/src/snapshot/snapshot-source-sink.cc b/src/snapshot/snapshot-source-sink.cc
index cee5875..66a14bc 100644
--- a/src/snapshot/snapshot-source-sink.cc
+++ b/src/snapshot/snapshot-source-sink.cc
@@ -7,7 +7,7 @@
 
 #include "src/base/logging.h"
 #include "src/handles-inl.h"
-
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/snapshot/snapshot.h b/src/snapshot/snapshot.h
index 49a6092..010072a 100644
--- a/src/snapshot/snapshot.h
+++ b/src/snapshot/snapshot.h
@@ -59,7 +59,8 @@
   // Create a new context using the internal partial snapshot.
   static MaybeHandle<Context> NewContextFromSnapshot(
       Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
-      size_t context_index);
+      size_t context_index,
+      v8::DeserializeInternalFieldsCallback internal_fields_deserializer);
 
   static bool HaveASnapshotToStartFrom(Isolate* isolate);
 
diff --git a/src/snapshot/startup-serializer.cc b/src/snapshot/startup-serializer.cc
index 80598e8..4b27746 100644
--- a/src/snapshot/startup-serializer.cc
+++ b/src/snapshot/startup-serializer.cc
@@ -21,6 +21,7 @@
 }
 
 StartupSerializer::~StartupSerializer() {
+  RestoreExternalReferenceRedirectors(&accessor_infos_);
   OutputStatistics("StartupSerializer");
 }
 
@@ -66,6 +67,14 @@
 
   FlushSkip(skip);
 
+  if (isolate_->external_reference_redirector() && obj->IsAccessorInfo()) {
+    // Wipe external reference redirects in the accessor info.
+    AccessorInfo* info = AccessorInfo::cast(obj);
+    Address original_address = Foreign::cast(info->getter())->foreign_address();
+    Foreign::cast(info->js_getter())->set_foreign_address(original_address);
+    accessor_infos_.Add(info);
+  }
+
   // Object has not yet been serialized.  Serialize it here.
   ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
                                      where_to_point);
@@ -116,10 +125,8 @@
   CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
   // No active or weak handles.
   CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
-  CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
+  CHECK_EQ(0, isolate->global_handles()->global_handles_count());
   CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
-  // We don't support serializing installed extensions.
-  CHECK(!isolate->has_installed_extensions());
   // First visit immortal immovables to make sure they end up in the first page.
   serializing_immortal_immovables_roots_ = true;
   isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
diff --git a/src/snapshot/startup-serializer.h b/src/snapshot/startup-serializer.h
index ac75c5d..4a597e6 100644
--- a/src/snapshot/startup-serializer.h
+++ b/src/snapshot/startup-serializer.h
@@ -73,6 +73,7 @@
   bool serializing_immortal_immovables_roots_;
   std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
   PartialCacheIndexMap partial_cache_index_map_;
+  List<AccessorInfo*> accessor_infos_;
   DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
 };
 
diff --git a/src/source-position-table.h b/src/source-position-table.h
index f569ac9..756838d 100644
--- a/src/source-position-table.h
+++ b/src/source-position-table.h
@@ -8,7 +8,6 @@
 #include "src/assert-scope.h"
 #include "src/checks.h"
 #include "src/globals.h"
-#include "src/handles.h"
 #include "src/source-position.h"
 #include "src/zone/zone-containers.h"
 
@@ -18,6 +17,8 @@
 class AbstractCode;
 class BytecodeArray;
 class ByteArray;
+template <typename T>
+class Handle;
 class Isolate;
 class Zone;
 
diff --git a/src/source-position.cc b/src/source-position.cc
index e9f86db..02bb339 100644
--- a/src/source-position.cc
+++ b/src/source-position.cc
@@ -11,10 +11,16 @@
 
 std::ostream& operator<<(std::ostream& out, const SourcePositionInfo& pos) {
   Handle<SharedFunctionInfo> function(pos.function);
-  Handle<Script> script(Script::cast(function->script()));
+  String* name = nullptr;
+  if (function->script()->IsScript()) {
+    Script* script = Script::cast(function->script());
+    if (script->name()->IsString()) {
+      name = String::cast(script->name());
+    }
+  }
   out << "<";
-  if (script->name()->IsString()) {
-    out << String::cast(script->name())->ToCString(DISALLOW_NULLS).get();
+  if (name != nullptr) {
+    out << name->ToCString(DISALLOW_NULLS).get();
   } else {
     out << "unknown";
   }
@@ -43,29 +49,16 @@
   return out;
 }
 
-SourcePositionInfo SourcePosition::Info(
-    Handle<SharedFunctionInfo> function) const {
-  SourcePositionInfo result(*this, function);
-  Handle<Script> script(Script::cast(function->script()));
-  Script::PositionInfo pos;
-  if (Script::GetPositionInfo(script, ScriptOffset(), &pos,
-                              Script::WITH_OFFSET)) {
-    result.line = pos.line;
-    result.column = pos.column;
-  }
-  return result;
-}
-
 std::vector<SourcePositionInfo> SourcePosition::InliningStack(
     CompilationInfo* cinfo) const {
   SourcePosition pos = *this;
   std::vector<SourcePositionInfo> stack;
   while (pos.isInlined()) {
     const auto& inl = cinfo->inlined_functions()[pos.InliningId()];
-    stack.push_back(pos.Info(inl.shared_info));
+    stack.push_back(SourcePositionInfo(pos, inl.shared_info));
     pos = inl.position.position;
   }
-  stack.push_back(pos.Info(cinfo->shared_info()));
+  stack.push_back(SourcePositionInfo(pos, cinfo->shared_info()));
   return stack;
 }
 
@@ -80,23 +73,26 @@
         deopt_data->InliningPositions()->get(pos.InliningId());
     Handle<SharedFunctionInfo> function(
         deopt_data->GetInlinedFunction(inl.inlined_function_id));
-    stack.push_back(pos.Info(function));
+    stack.push_back(SourcePositionInfo(pos, function));
     pos = inl.position;
   }
   Handle<SharedFunctionInfo> function(
       SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
-  stack.push_back(pos.Info(function));
+  stack.push_back(SourcePositionInfo(pos, function));
   return stack;
 }
 
 void SourcePosition::Print(std::ostream& out,
                            SharedFunctionInfo* function) const {
-  Script* script = Script::cast(function->script());
-  Object* source_name = script->name();
   Script::PositionInfo pos;
-  script->GetPositionInfo(ScriptOffset(), &pos, Script::WITH_OFFSET);
+  Object* source_name = nullptr;
+  if (function->script()->IsScript()) {
+    Script* script = Script::cast(function->script());
+    source_name = script->name();
+    script->GetPositionInfo(ScriptOffset(), &pos, Script::WITH_OFFSET);
+  }
   out << "<";
-  if (source_name->IsString()) {
+  if (source_name != nullptr && source_name->IsString()) {
     out << String::cast(source_name)
                ->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
                .get();
@@ -127,5 +123,19 @@
   }
 }
 
+SourcePositionInfo::SourcePositionInfo(SourcePosition pos,
+                                       Handle<SharedFunctionInfo> f)
+    : position(pos), function(f) {
+  if (function->script()->IsScript()) {
+    Handle<Script> script(Script::cast(function->script()));
+    Script::PositionInfo info;
+    if (Script::GetPositionInfo(script, pos.ScriptOffset(), &info,
+                                Script::WITH_OFFSET)) {
+      line = info.line;
+      column = info.column;
+    }
+  }
+}
+
 }  // namespace internal
 }  // namespace v8
diff --git a/src/source-position.h b/src/source-position.h
index aa7d31b..beab996 100644
--- a/src/source-position.h
+++ b/src/source-position.h
@@ -43,10 +43,11 @@
   }
   bool isInlined() const { return InliningId() != kNotInlined; }
 
+  // Assumes that the code object is optimized
   std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
-  std::vector<SourcePositionInfo> InliningStack(CompilationInfo* code) const;
+  std::vector<SourcePositionInfo> InliningStack(CompilationInfo* cinfo) const;
 
-  void Print(std::ostream& out, Code* function) const;
+  void Print(std::ostream& out, Code* code) const;
 
   int ScriptOffset() const { return ScriptOffsetField::decode(value_) - 1; }
   int InliningId() const { return InliningIdField::decode(value_) - 1; }
@@ -75,7 +76,6 @@
 
  private:
   void Print(std::ostream& out, SharedFunctionInfo* function) const;
-  SourcePositionInfo Info(Handle<SharedFunctionInfo> script) const;
 
   // InliningId is in the high bits for better compression in
   // SourcePositionTable.
@@ -102,8 +102,7 @@
 };
 
 struct SourcePositionInfo {
-  explicit SourcePositionInfo(SourcePosition pos, Handle<SharedFunctionInfo> f)
-      : position(pos), function(f) {}
+  SourcePositionInfo(SourcePosition pos, Handle<SharedFunctionInfo> f);
 
   SourcePosition position;
   Handle<SharedFunctionInfo> function;
diff --git a/src/string-builder.h b/src/string-builder.h
index edc6476..c8c1329 100644
--- a/src/string-builder.h
+++ b/src/string-builder.h
@@ -310,6 +310,8 @@
 
   INLINE(bool HasOverflowed()) const { return overflowed_; }
 
+  INLINE(int Length()) const { return accumulator_->length() + current_index_; }
+
   // Change encoding to two-byte.
   void ChangeEncoding() {
     DCHECK_EQ(String::ONE_BYTE_ENCODING, encoding_);
diff --git a/src/string-case.cc b/src/string-case.cc
new file mode 100644
index 0000000..52d9636
--- /dev/null
+++ b/src/string-case.cc
@@ -0,0 +1,130 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/string-case.h"
+
+#include "src/assert-scope.h"
+#include "src/base/logging.h"
+#include "src/globals.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+bool CheckFastAsciiConvert(char* dst, const char* src, int length, bool changed,
+                           bool is_to_lower) {
+  bool expected_changed = false;
+  for (int i = 0; i < length; i++) {
+    if (dst[i] == src[i]) continue;
+    expected_changed = true;
+    if (is_to_lower) {
+      DCHECK('A' <= src[i] && src[i] <= 'Z');
+      DCHECK(dst[i] == src[i] + ('a' - 'A'));
+    } else {
+      DCHECK('a' <= src[i] && src[i] <= 'z');
+      DCHECK(dst[i] == src[i] - ('a' - 'A'));
+    }
+  }
+  return (expected_changed == changed);
+}
+#endif
+
+const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
+const uintptr_t kAsciiMask = kOneInEveryByte << 7;
+
+// Given a word and two range boundaries returns a word with high bit
+// set in every byte iff the corresponding input byte was strictly in
+// the range (m, n). All the other bits in the result are cleared.
+// This function is only useful when it can be inlined and the
+// boundaries are statically known.
+// Requires: all bytes in the input word and the boundaries must be
+// ASCII (less than 0x7F).
+static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
+  // Use strict inequalities since in edge cases the function could be
+  // further simplified.
+  DCHECK(0 < m && m < n);
+  // Has high bit set in every w byte less than n.
+  uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
+  // Has high bit set in every w byte greater than m.
+  uintptr_t tmp2 = w + kOneInEveryByte * (0x7F - m);
+  return (tmp1 & tmp2 & (kOneInEveryByte * 0x80));
+}
+
+template <bool is_lower>
+int FastAsciiConvert(char* dst, const char* src, int length,
+                     bool* changed_out) {
+#ifdef DEBUG
+  char* saved_dst = dst;
+#endif
+  const char* saved_src = src;
+  DisallowHeapAllocation no_gc;
+  // We rely on the distance between upper and lower case letters
+  // being a known power of 2.
+  DCHECK('a' - 'A' == (1 << 5));
+  // Boundaries for the range of input characters than require conversion.
+  static const char lo = is_lower ? 'A' - 1 : 'a' - 1;
+  static const char hi = is_lower ? 'Z' + 1 : 'z' + 1;
+  bool changed = false;
+  const char* const limit = src + length;
+
+  // dst is newly allocated and always aligned.
+  DCHECK(IsAligned(reinterpret_cast<intptr_t>(dst), sizeof(uintptr_t)));
+  // Only attempt processing one word at a time if src is also aligned.
+  if (IsAligned(reinterpret_cast<intptr_t>(src), sizeof(uintptr_t))) {
+    // Process the prefix of the input that requires no conversion one aligned
+    // (machine) word at a time.
+    while (src <= limit - sizeof(uintptr_t)) {
+      const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+      if ((w & kAsciiMask) != 0) return static_cast<int>(src - saved_src);
+      if (AsciiRangeMask(w, lo, hi) != 0) {
+        changed = true;
+        break;
+      }
+      *reinterpret_cast<uintptr_t*>(dst) = w;
+      src += sizeof(uintptr_t);
+      dst += sizeof(uintptr_t);
+    }
+    // Process the remainder of the input performing conversion when
+    // required one word at a time.
+    while (src <= limit - sizeof(uintptr_t)) {
+      const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+      if ((w & kAsciiMask) != 0) return static_cast<int>(src - saved_src);
+      uintptr_t m = AsciiRangeMask(w, lo, hi);
+      // The mask has high (7th) bit set in every byte that needs
+      // conversion and we know that the distance between cases is
+      // 1 << 5.
+      *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
+      src += sizeof(uintptr_t);
+      dst += sizeof(uintptr_t);
+    }
+  }
+  // Process the last few bytes of the input (or the whole input if
+  // unaligned access is not supported).
+  while (src < limit) {
+    char c = *src;
+    if ((c & kAsciiMask) != 0) return static_cast<int>(src - saved_src);
+    if (lo < c && c < hi) {
+      c ^= (1 << 5);
+      changed = true;
+    }
+    *dst = c;
+    ++src;
+    ++dst;
+  }
+
+  DCHECK(
+      CheckFastAsciiConvert(saved_dst, saved_src, length, changed, is_lower));
+
+  *changed_out = changed;
+  return length;
+}
+
+template int FastAsciiConvert<false>(char* dst, const char* src, int length,
+                                     bool* changed_out);
+template int FastAsciiConvert<true>(char* dst, const char* src, int length,
+                                    bool* changed_out);
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/string-case.h b/src/string-case.h
new file mode 100644
index 0000000..3fe3bc2
--- /dev/null
+++ b/src/string-case.h
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRING_CASE_H_
+#define V8_STRING_CASE_H_
+
+namespace v8 {
+namespace internal {
+
+template <bool is_lower>
+int FastAsciiConvert(char* dst, const char* src, int length, bool* changed_out);
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_STRING_CASE_H__
diff --git a/src/string-stream.cc b/src/string-stream.cc
index acfb917..650b3cf 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -7,6 +7,8 @@
 #include <memory>
 
 #include "src/handles-inl.h"
+#include "src/log.h"
+#include "src/objects-inl.h"
 #include "src/prototype.h"
 
 namespace v8 {
@@ -204,53 +206,6 @@
 }
 
 
-void StringStream::Add(const char* format) {
-  Add(CStrVector(format));
-}
-
-
-void StringStream::Add(Vector<const char> format) {
-  Add(format, Vector<FmtElm>::empty());
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0) {
-  const char argc = 1;
-  FmtElm argv[argc] = { arg0 };
-  Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1) {
-  const char argc = 2;
-  FmtElm argv[argc] = { arg0, arg1 };
-  Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
-                       FmtElm arg2) {
-  const char argc = 3;
-  FmtElm argv[argc] = { arg0, arg1, arg2 };
-  Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
-                       FmtElm arg2, FmtElm arg3) {
-  const char argc = 4;
-  FmtElm argv[argc] = { arg0, arg1, arg2, arg3 };
-  Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
-                       FmtElm arg2, FmtElm arg3, FmtElm arg4) {
-  const char argc = 5;
-  FmtElm argv[argc] = { arg0, arg1, arg2, arg3, arg4 };
-  Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
 std::unique_ptr<char[]> StringStream::ToCString() const {
   char* str = NewArray<char>(length_ + 1);
   MemCopy(str, buffer_, length_);
@@ -349,7 +304,8 @@
   DescriptorArray* descs = map->instance_descriptors();
   for (int i = 0; i < real_size; i++) {
     PropertyDetails details = descs->GetDetails(i);
-    if (details.type() == DATA) {
+    if (details.location() == kField) {
+      DCHECK_EQ(kData, details.kind());
       Object* key = descs->GetKey(i);
       if (key->IsString() || key->IsNumber()) {
         int len = 3;
@@ -528,8 +484,8 @@
   Object* name = fun->shared()->name();
   bool print_name = false;
   Isolate* isolate = fun->GetIsolate();
-  if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate) ||
-      receiver->IsTheHole(isolate) || receiver->IsJSProxy()) {
+  if (receiver->IsNullOrUndefined(isolate) || receiver->IsTheHole(isolate) ||
+      receiver->IsJSProxy()) {
     print_name = true;
   } else if (isolate->context() != nullptr) {
     if (!receiver->IsJSObject()) {
diff --git a/src/string-stream.h b/src/string-stream.h
index 1c1d27a..c9be46f 100644
--- a/src/string-stream.h
+++ b/src/string-stream.h
@@ -5,8 +5,6 @@
 #ifndef V8_STRING_STREAM_H_
 #define V8_STRING_STREAM_H_
 
-#include <memory>
-
 #include "src/allocation.h"
 #include "src/handles.h"
 #include "src/vector.h"
@@ -56,48 +54,53 @@
   DISALLOW_COPY_AND_ASSIGN(FixedStringAllocator);
 };
 
-
-class FmtElm final {
- public:
-  FmtElm(int value) : type_(INT) {  // NOLINT
-    data_.u_int_ = value;
-  }
-  explicit FmtElm(double value) : type_(DOUBLE) {
-    data_.u_double_ = value;
-  }
-  FmtElm(const char* value) : type_(C_STR) {  // NOLINT
-    data_.u_c_str_ = value;
-  }
-  FmtElm(const Vector<const uc16>& value) : type_(LC_STR) {  // NOLINT
-    data_.u_lc_str_ = &value;
-  }
-  FmtElm(Object* value) : type_(OBJ) {  // NOLINT
-    data_.u_obj_ = value;
-  }
-  FmtElm(Handle<Object> value) : type_(HANDLE) {  // NOLINT
-    data_.u_handle_ = value.location();
-  }
-  FmtElm(void* value) : type_(POINTER) {  // NOLINT
-    data_.u_pointer_ = value;
-  }
-
- private:
-  friend class StringStream;
-  enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
-  Type type_;
-  union {
-    int u_int_;
-    double u_double_;
-    const char* u_c_str_;
-    const Vector<const uc16>* u_lc_str_;
-    Object* u_obj_;
-    Object** u_handle_;
-    void* u_pointer_;
-  } data_;
-};
-
-
 class StringStream final {
+  class FmtElm final {
+   public:
+    FmtElm(int value) : FmtElm(INT) {  // NOLINT
+      data_.u_int_ = value;
+    }
+    explicit FmtElm(double value) : FmtElm(DOUBLE) {  // NOLINT
+      data_.u_double_ = value;
+    }
+    FmtElm(const char* value) : FmtElm(C_STR) {  // NOLINT
+      data_.u_c_str_ = value;
+    }
+    FmtElm(const Vector<const uc16>& value) : FmtElm(LC_STR) {  // NOLINT
+      data_.u_lc_str_ = &value;
+    }
+    FmtElm(Object* value) : FmtElm(OBJ) {  // NOLINT
+      data_.u_obj_ = value;
+    }
+    FmtElm(Handle<Object> value) : FmtElm(HANDLE) {  // NOLINT
+      data_.u_handle_ = value.location();
+    }
+    FmtElm(void* value) : FmtElm(POINTER) {  // NOLINT
+      data_.u_pointer_ = value;
+    }
+
+   private:
+    friend class StringStream;
+    enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
+
+#ifdef DEBUG
+    Type type_;
+    explicit FmtElm(Type type) : type_(type) {}
+#else
+    explicit FmtElm(Type) {}
+#endif
+
+    union {
+      int u_int_;
+      double u_double_;
+      const char* u_c_str_;
+      const Vector<const uc16>* u_lc_str_;
+      Object* u_obj_;
+      Object** u_handle_;
+      void* u_pointer_;
+    } data_;
+  };
+
  public:
   enum ObjectPrintMode { kPrintObjectConcise, kPrintObjectVerbose };
   StringStream(StringAllocator* allocator,
@@ -113,23 +116,19 @@
   bool Put(char c);
   bool Put(String* str);
   bool Put(String* str, int start, int end);
-  void Add(Vector<const char> format, Vector<FmtElm> elms);
-  void Add(const char* format);
-  void Add(Vector<const char> format);
-  void Add(const char* format, FmtElm arg0);
-  void Add(const char* format, FmtElm arg0, FmtElm arg1);
-  void Add(const char* format, FmtElm arg0, FmtElm arg1, FmtElm arg2);
-  void Add(const char* format,
-           FmtElm arg0,
-           FmtElm arg1,
-           FmtElm arg2,
-           FmtElm arg3);
-  void Add(const char* format,
-           FmtElm arg0,
-           FmtElm arg1,
-           FmtElm arg2,
-           FmtElm arg3,
-           FmtElm arg4);
+  void Add(const char* format) { Add(CStrVector(format)); }
+  void Add(Vector<const char> format) { Add(format, Vector<FmtElm>()); }
+
+  template <typename... Args>
+  void Add(const char* format, Args... args) {
+    Add(CStrVector(format), args...);
+  }
+
+  template <typename... Args>
+  void Add(Vector<const char> format, Args... args) {
+    FmtElm elems[]{args...};
+    Add(format, ArrayVector(elems));
+  }
 
   // Getting the message out.
   void OutputToFile(FILE* out);
@@ -165,6 +164,7 @@
   static const int kInitialCapacity = 16;
 
  private:
+  void Add(Vector<const char> format, Vector<FmtElm> elms);
   void PrintObject(Object* obj);
 
   StringAllocator* allocator_;
diff --git a/src/third_party/vtune/BUILD.gn b/src/third_party/vtune/BUILD.gn
new file mode 100644
index 0000000..33e8443
--- /dev/null
+++ b/src/third_party/vtune/BUILD.gn
@@ -0,0 +1,20 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/toolchain/toolchain.gni")
+
+static_library("v8_vtune") {
+  sources = [
+    "ittnotify_config.h",
+    "ittnotify_types.h",
+    "jitprofiling.cc",
+    "jitprofiling.h",
+    "v8-vtune.h",
+    "vtune-jit.cc",
+    "vtune-jit.h",
+  ]
+  deps = [
+    "//:v8",
+  ]
+}
diff --git a/src/tracing/traced-value.cc b/src/tracing/traced-value.cc
index 81be623..9b2a45c 100644
--- a/src/tracing/traced-value.cc
+++ b/src/tracing/traced-value.cc
@@ -24,40 +24,36 @@
 #define DEBUG_POP_CONTAINER() ((void)0)
 #endif
 
-std::string EscapeString(const std::string& value) {
-  std::string result;
-  result.reserve(value.length() + 2);
-  result += '"';
-  size_t length = value.length();
+void EscapeAndAppendString(const char* value, std::string* result) {
+  *result += '"';
   char number_buffer[10];
-  for (size_t src = 0; src < length; ++src) {
-    char c = value[src];
+  while (*value) {
+    char c = *value++;
     switch (c) {
       case '\t':
-        result += "\\t";
+        *result += "\\t";
         break;
       case '\n':
-        result += "\\n";
+        *result += "\\n";
         break;
       case '\"':
-        result += "\\\"";
+        *result += "\\\"";
         break;
       case '\\':
-        result += "\\\\";
+        *result += "\\\\";
         break;
       default:
         if (c < '\040') {
           base::OS::SNPrintF(
               number_buffer, arraysize(number_buffer), "\\u%04X",
               static_cast<unsigned>(static_cast<unsigned char>(c)));
-          result += number_buffer;
+          *result += number_buffer;
         } else {
-          result += c;
+          *result += c;
         }
     }
   }
-  result += '"';
-  return result;
+  *result += '"';
 }
 
 }  // namespace
@@ -95,10 +91,10 @@
   data_ += value ? "true" : "false";
 }
 
-void TracedValue::SetString(const char* name, const std::string& value) {
+void TracedValue::SetString(const char* name, const char* value) {
   DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
   WriteName(name);
-  data_ += EscapeString(value);
+  EscapeAndAppendString(value, &data_);
 }
 
 void TracedValue::BeginDictionary(const char* name) {
@@ -123,12 +119,6 @@
   data_ += std::to_string(value);
 }
 
-void TracedValue::AppendLongInteger(int64_t value) {
-  DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
-  WriteComma();
-  data_ += std::to_string(value);
-}
-
 void TracedValue::AppendDouble(double value) {
   DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
   WriteComma();
@@ -142,10 +132,10 @@
   data_ += value ? "true" : "false";
 }
 
-void TracedValue::AppendString(const std::string& value) {
+void TracedValue::AppendString(const char* value) {
   DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
   WriteComma();
-  data_ += EscapeString(value);
+  EscapeAndAppendString(value, &data_);
 }
 
 void TracedValue::BeginDictionary() {
diff --git a/src/tracing/traced-value.h b/src/tracing/traced-value.h
index b5c265c..7de4c23 100644
--- a/src/tracing/traced-value.h
+++ b/src/tracing/traced-value.h
@@ -29,15 +29,18 @@
   void SetInteger(const char* name, int value);
   void SetDouble(const char* name, double value);
   void SetBoolean(const char* name, bool value);
-  void SetString(const char* name, const std::string& value);
+  void SetString(const char* name, const char* value);
+  void SetString(const char* name, const std::string& value) {
+    SetString(name, value.c_str());
+  }
   void BeginDictionary(const char* name);
   void BeginArray(const char* name);
 
   void AppendInteger(int);
-  void AppendLongInteger(int64_t);
   void AppendDouble(double);
   void AppendBoolean(bool);
-  void AppendString(const std::string&);
+  void AppendString(const char*);
+  void AppendString(const std::string& value) { AppendString(value.c_str()); }
   void BeginArray();
   void BeginDictionary();
 
diff --git a/src/tracing/tracing-category-observer.cc b/src/tracing/tracing-category-observer.cc
index 3fffd2f..6a36158 100644
--- a/src/tracing/tracing-category-observer.cc
+++ b/src/tracing/tracing-category-observer.cc
@@ -21,6 +21,7 @@
   TRACE_EVENT_WARMUP_CATEGORY(
       TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"));
   TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"));
+  TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats"));
 }
 
 void TracingCategoryObserver::TearDown() {
@@ -46,12 +47,18 @@
   if (enabled) {
     v8::internal::FLAG_gc_stats |= ENABLED_BY_TRACING;
   }
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats"),
+                                     &enabled);
+  if (enabled) {
+    v8::internal::FLAG_ic_stats |= ENABLED_BY_TRACING;
+  }
 }
 
 void TracingCategoryObserver::OnTraceDisabled() {
   v8::internal::FLAG_runtime_stats &=
       ~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING);
   v8::internal::FLAG_gc_stats &= ~ENABLED_BY_TRACING;
+  v8::internal::FLAG_ic_stats &= ~ENABLED_BY_TRACING;
 }
 
 }  // namespace tracing
diff --git a/src/transitions.cc b/src/transitions.cc
index 88c1549..5333fa6 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -202,7 +202,8 @@
   if (target == NULL) return Handle<Map>::null();
   PropertyDetails details = target->GetLastDescriptorDetails();
   DCHECK_EQ(NONE, details.attributes());
-  if (details.type() != DATA) return Handle<Map>::null();
+  if (details.location() != kField) return Handle<Map>::null();
+  DCHECK_EQ(kData, details.kind());
   return Handle<Map>(target);
 }
 
@@ -214,7 +215,8 @@
   if (!IsSimpleTransition(raw_transition)) return Handle<String>::null();
   Map* target = GetSimpleTransition(raw_transition);
   PropertyDetails details = GetSimpleTargetDetails(target);
-  if (details.type() != DATA) return Handle<String>::null();
+  if (details.location() != kField) return Handle<String>::null();
+  DCHECK_EQ(kData, details.kind());
   if (details.attributes() != NONE) return Handle<String>::null();
   Name* name = GetSimpleTransitionKey(target);
   if (!name->IsString()) return Handle<String>::null();
diff --git a/src/trap-handler/trap-handler.h b/src/trap-handler/trap-handler.h
new file mode 100644
index 0000000..e6dd9bd
--- /dev/null
+++ b/src/trap-handler/trap-handler.h
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRAP_HANDLER_H_
+#define V8_TRAP_HANDLER_H_
+
+namespace v8 {
+namespace internal {
+namespace trap_handler {
+
+struct ProtectedInstructionData {
+  // The offset of this instruction from the start of its code object.
+  intptr_t instr_offset;
+
+  // The offset of the landing pad from the start of its code object.
+  //
+  // TODO(eholk): Using a single landing pad and store parameters here.
+  intptr_t landing_offset;
+};
+
+}  // namespace trap_handler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TRAP_HANDLER_H_
diff --git a/src/type-feedback-vector-inl.h b/src/type-feedback-vector-inl.h
deleted file mode 100644
index 58dfe33..0000000
--- a/src/type-feedback-vector-inl.h
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPE_FEEDBACK_VECTOR_INL_H_
-#define V8_TYPE_FEEDBACK_VECTOR_INL_H_
-
-#include "src/globals.h"
-#include "src/type-feedback-vector.h"
-
-namespace v8 {
-namespace internal {
-
-
-template <typename Derived>
-FeedbackVectorSlot FeedbackVectorSpecBase<Derived>::AddSlot(
-    FeedbackVectorSlotKind kind) {
-  int slot = This()->slots();
-  int entries_per_slot = TypeFeedbackMetadata::GetSlotSize(kind);
-  This()->append(kind);
-  for (int i = 1; i < entries_per_slot; i++) {
-    This()->append(FeedbackVectorSlotKind::INVALID);
-  }
-  return FeedbackVectorSlot(slot);
-}
-
-
-// static
-TypeFeedbackMetadata* TypeFeedbackMetadata::cast(Object* obj) {
-  DCHECK(obj->IsTypeFeedbackVector());
-  return reinterpret_cast<TypeFeedbackMetadata*>(obj);
-}
-
-bool TypeFeedbackMetadata::is_empty() const {
-  if (length() == 0) return true;
-  return false;
-}
-
-int TypeFeedbackMetadata::slot_count() const {
-  if (length() == 0) return 0;
-  DCHECK(length() > kReservedIndexCount);
-  return Smi::cast(get(kSlotsCountIndex))->value();
-}
-
-
-// static
-TypeFeedbackVector* TypeFeedbackVector::cast(Object* obj) {
-  DCHECK(obj->IsTypeFeedbackVector());
-  return reinterpret_cast<TypeFeedbackVector*>(obj);
-}
-
-
-int TypeFeedbackMetadata::GetSlotSize(FeedbackVectorSlotKind kind) {
-  DCHECK_NE(FeedbackVectorSlotKind::INVALID, kind);
-  DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, kind);
-  if (kind == FeedbackVectorSlotKind::GENERAL ||
-      kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC ||
-      kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
-    return 1;
-  }
-
-  return 2;
-}
-
-bool TypeFeedbackMetadata::SlotRequiresName(FeedbackVectorSlotKind kind) {
-  switch (kind) {
-    case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
-      return true;
-
-    case FeedbackVectorSlotKind::CALL_IC:
-    case FeedbackVectorSlotKind::LOAD_IC:
-    case FeedbackVectorSlotKind::KEYED_LOAD_IC:
-    case FeedbackVectorSlotKind::STORE_IC:
-    case FeedbackVectorSlotKind::KEYED_STORE_IC:
-    case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
-    case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
-    case FeedbackVectorSlotKind::GENERAL:
-    case FeedbackVectorSlotKind::INVALID:
-      return false;
-
-    case FeedbackVectorSlotKind::KINDS_NUMBER:
-      break;
-  }
-  UNREACHABLE();
-  return false;
-}
-
-bool TypeFeedbackVector::is_empty() const {
-  return length() == kReservedIndexCount;
-}
-
-int TypeFeedbackVector::slot_count() const {
-  return length() - kReservedIndexCount;
-}
-
-
-TypeFeedbackMetadata* TypeFeedbackVector::metadata() const {
-  return TypeFeedbackMetadata::cast(get(kMetadataIndex));
-}
-
-int TypeFeedbackVector::invocation_count() const {
-  return Smi::cast(get(kInvocationCountIndex))->value();
-}
-
-// Conversion from an integer index to either a slot or an ic slot.
-// static
-FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) {
-  DCHECK(index >= kReservedIndexCount);
-  return FeedbackVectorSlot(index - kReservedIndexCount);
-}
-
-
-Object* TypeFeedbackVector::Get(FeedbackVectorSlot slot) const {
-  return get(GetIndex(slot));
-}
-
-
-void TypeFeedbackVector::Set(FeedbackVectorSlot slot, Object* value,
-                             WriteBarrierMode mode) {
-  set(GetIndex(slot), value, mode);
-}
-
-// Helper function to transform the feedback to BinaryOperationHint.
-BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback) {
-  switch (type_feedback) {
-    case BinaryOperationFeedback::kNone:
-      return BinaryOperationHint::kNone;
-    case BinaryOperationFeedback::kSignedSmall:
-      return BinaryOperationHint::kSignedSmall;
-    case BinaryOperationFeedback::kNumber:
-    case BinaryOperationFeedback::kNumberOrOddball:
-      return BinaryOperationHint::kNumberOrOddball;
-    case BinaryOperationFeedback::kString:
-      return BinaryOperationHint::kString;
-    case BinaryOperationFeedback::kAny:
-    default:
-      return BinaryOperationHint::kAny;
-  }
-  UNREACHABLE();
-  return BinaryOperationHint::kNone;
-}
-
-// Helper function to transform the feedback to CompareOperationHint.
-CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
-  switch (type_feedback) {
-    case CompareOperationFeedback::kNone:
-      return CompareOperationHint::kNone;
-    case CompareOperationFeedback::kSignedSmall:
-      return CompareOperationHint::kSignedSmall;
-    case CompareOperationFeedback::kNumber:
-      return CompareOperationHint::kNumber;
-    default:
-      return CompareOperationHint::kAny;
-  }
-  UNREACHABLE();
-  return CompareOperationHint::kNone;
-}
-
-void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
-                                       int* vector_ic_count,
-                                       bool code_is_interpreted) {
-  Object* megamorphic_sentinel =
-      *TypeFeedbackVector::MegamorphicSentinel(GetIsolate());
-  int with = 0;
-  int gen = 0;
-  int total = 0;
-  TypeFeedbackMetadataIterator iter(metadata());
-  while (iter.HasNext()) {
-    FeedbackVectorSlot slot = iter.Next();
-    FeedbackVectorSlotKind kind = iter.kind();
-
-    Object* const obj = Get(slot);
-    switch (kind) {
-      case FeedbackVectorSlotKind::CALL_IC:
-      case FeedbackVectorSlotKind::LOAD_IC:
-      case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
-      case FeedbackVectorSlotKind::KEYED_LOAD_IC:
-      case FeedbackVectorSlotKind::STORE_IC:
-      case FeedbackVectorSlotKind::KEYED_STORE_IC: {
-        if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
-          with++;
-        } else if (obj == megamorphic_sentinel) {
-          gen++;
-        }
-        total++;
-        break;
-      }
-      case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
-      case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC: {
-        // If we are not running interpreted code, we need to ignore the special
-        // IC slots for binaryop/compare used by the interpreter.
-        // TODO(mvstanton): Remove code_is_interpreted when full code is retired
-        // from service.
-        if (code_is_interpreted) {
-          int const feedback = Smi::cast(obj)->value();
-          if (kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
-            CompareOperationHint hint =
-                CompareOperationHintFromFeedback(feedback);
-            if (hint == CompareOperationHint::kAny) {
-              gen++;
-            } else if (hint != CompareOperationHint::kNone) {
-              with++;
-            }
-          } else {
-            DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC, kind);
-            BinaryOperationHint hint =
-                BinaryOperationHintFromFeedback(feedback);
-            if (hint == BinaryOperationHint::kAny) {
-              gen++;
-            } else if (hint != BinaryOperationHint::kNone) {
-              with++;
-            }
-          }
-          total++;
-        }
-        break;
-      }
-      case FeedbackVectorSlotKind::GENERAL:
-        break;
-      case FeedbackVectorSlotKind::INVALID:
-      case FeedbackVectorSlotKind::KINDS_NUMBER:
-        UNREACHABLE();
-        break;
-    }
-  }
-
-  *with_type_info = with;
-  *generic = gen;
-  *vector_ic_count = total;
-}
-
-Handle<Symbol> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
-  return isolate->factory()->uninitialized_symbol();
-}
-
-Handle<Symbol> TypeFeedbackVector::MegamorphicSentinel(Isolate* isolate) {
-  return isolate->factory()->megamorphic_symbol();
-}
-
-Handle<Symbol> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
-  return isolate->factory()->premonomorphic_symbol();
-}
-
-Symbol* TypeFeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
-  return isolate->heap()->uninitialized_symbol();
-}
-
-bool TypeFeedbackMetadataIterator::HasNext() const {
-  return next_slot_.ToInt() < metadata()->slot_count();
-}
-
-FeedbackVectorSlot TypeFeedbackMetadataIterator::Next() {
-  DCHECK(HasNext());
-  cur_slot_ = next_slot_;
-  slot_kind_ = metadata()->GetKind(cur_slot_);
-  next_slot_ = FeedbackVectorSlot(next_slot_.ToInt() + entry_size());
-  return cur_slot_;
-}
-
-int TypeFeedbackMetadataIterator::entry_size() const {
-  return TypeFeedbackMetadata::GetSlotSize(kind());
-}
-
-Object* FeedbackNexus::GetFeedback() const { return vector()->Get(slot()); }
-
-
-Object* FeedbackNexus::GetFeedbackExtra() const {
-#ifdef DEBUG
-  FeedbackVectorSlotKind kind = vector()->GetKind(slot());
-  DCHECK_LT(1, TypeFeedbackMetadata::GetSlotSize(kind));
-#endif
-  int extra_index = vector()->GetIndex(slot()) + 1;
-  return vector()->get(extra_index);
-}
-
-
-void FeedbackNexus::SetFeedback(Object* feedback, WriteBarrierMode mode) {
-  vector()->Set(slot(), feedback, mode);
-}
-
-
-void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
-                                     WriteBarrierMode mode) {
-#ifdef DEBUG
-  FeedbackVectorSlotKind kind = vector()->GetKind(slot());
-  DCHECK_LT(1, TypeFeedbackMetadata::GetSlotSize(kind));
-#endif
-  int index = vector()->GetIndex(slot()) + 1;
-  vector()->set(index, feedback_extra, mode);
-}
-
-
-Isolate* FeedbackNexus::GetIsolate() const { return vector()->GetIsolate(); }
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TYPE_FEEDBACK_VECTOR_INL_H_
diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h
deleted file mode 100644
index 3bb51c1..0000000
--- a/src/type-feedback-vector.h
+++ /dev/null
@@ -1,724 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TYPE_FEEDBACK_VECTOR_H_
-#define V8_TYPE_FEEDBACK_VECTOR_H_
-
-#include <vector>
-
-#include "src/base/logging.h"
-#include "src/elements-kind.h"
-#include "src/objects.h"
-#include "src/type-hints.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-enum class FeedbackVectorSlotKind {
-  // This kind means that the slot points to the middle of other slot
-  // which occupies more than one feedback vector element.
-  // There must be no such slots in the system.
-  INVALID,
-
-  CALL_IC,
-  LOAD_IC,
-  LOAD_GLOBAL_IC,
-  KEYED_LOAD_IC,
-  STORE_IC,
-  KEYED_STORE_IC,
-  INTERPRETER_BINARYOP_IC,
-  INTERPRETER_COMPARE_IC,
-
-  // This is a general purpose slot that occupies one feedback vector element.
-  GENERAL,
-
-  KINDS_NUMBER  // Last value indicating number of kinds.
-};
-
-std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind);
-
-
-template <typename Derived>
-class FeedbackVectorSpecBase {
- public:
-  inline FeedbackVectorSlot AddSlot(FeedbackVectorSlotKind kind);
-
-  FeedbackVectorSlot AddCallICSlot() {
-    return AddSlot(FeedbackVectorSlotKind::CALL_IC);
-  }
-
-  FeedbackVectorSlot AddLoadICSlot() {
-    return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
-  }
-
-  FeedbackVectorSlot AddLoadGlobalICSlot(Handle<String> name) {
-    This()->append_name(name);
-    return AddSlot(FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
-  }
-
-  FeedbackVectorSlot AddKeyedLoadICSlot() {
-    return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
-  }
-
-  FeedbackVectorSlot AddStoreICSlot() {
-    return AddSlot(FeedbackVectorSlotKind::STORE_IC);
-  }
-
-  FeedbackVectorSlot AddKeyedStoreICSlot() {
-    return AddSlot(FeedbackVectorSlotKind::KEYED_STORE_IC);
-  }
-
-  FeedbackVectorSlot AddInterpreterBinaryOpICSlot() {
-    return AddSlot(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC);
-  }
-
-  FeedbackVectorSlot AddInterpreterCompareICSlot() {
-    return AddSlot(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC);
-  }
-
-  FeedbackVectorSlot AddGeneralSlot() {
-    return AddSlot(FeedbackVectorSlotKind::GENERAL);
-  }
-
-#ifdef OBJECT_PRINT
-  // For gdb debugging.
-  void Print();
-#endif  // OBJECT_PRINT
-
-  DECLARE_PRINTER(FeedbackVectorSpec)
-
- private:
-  Derived* This() { return static_cast<Derived*>(this); }
-};
-
-
-class StaticFeedbackVectorSpec
-    : public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
- public:
-  StaticFeedbackVectorSpec() : slot_count_(0), name_count_(0) {}
-
-  int slots() const { return slot_count_; }
-
-  FeedbackVectorSlotKind GetKind(int slot) const {
-    DCHECK(slot >= 0 && slot < slot_count_);
-    return kinds_[slot];
-  }
-
-  int name_count() const { return name_count_; }
-
-  Handle<String> GetName(int index) const {
-    DCHECK(index >= 0 && index < name_count_);
-    return names_[index];
-  }
-
- private:
-  friend class FeedbackVectorSpecBase<StaticFeedbackVectorSpec>;
-
-  void append(FeedbackVectorSlotKind kind) {
-    DCHECK(slot_count_ < kMaxLength);
-    kinds_[slot_count_++] = kind;
-  }
-
-  void append_name(Handle<String> name) {
-    DCHECK(name_count_ < kMaxLength);
-    names_[name_count_++] = name;
-  }
-
-  static const int kMaxLength = 12;
-
-  int slot_count_;
-  FeedbackVectorSlotKind kinds_[kMaxLength];
-  int name_count_;
-  Handle<String> names_[kMaxLength];
-};
-
-
-class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
- public:
-  explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone), names_(zone) {
-    slot_kinds_.reserve(16);
-    names_.reserve(8);
-  }
-
-  int slots() const { return static_cast<int>(slot_kinds_.size()); }
-
-  FeedbackVectorSlotKind GetKind(int slot) const {
-    return static_cast<FeedbackVectorSlotKind>(slot_kinds_.at(slot));
-  }
-
-  int name_count() const { return static_cast<int>(names_.size()); }
-
-  Handle<String> GetName(int index) const { return names_.at(index); }
-
- private:
-  friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
-
-  void append(FeedbackVectorSlotKind kind) {
-    slot_kinds_.push_back(static_cast<unsigned char>(kind));
-  }
-
-  void append_name(Handle<String> name) { names_.push_back(name); }
-
-  ZoneVector<unsigned char> slot_kinds_;
-  ZoneVector<Handle<String>> names_;
-};
-
-
-// The shape of the TypeFeedbackMetadata is an array with:
-// 0: slot_count
-// 1: names table
-// 2..N: slot kinds packed into a bit vector
-//
-class TypeFeedbackMetadata : public FixedArray {
- public:
-  // Casting.
-  static inline TypeFeedbackMetadata* cast(Object* obj);
-
-  static const int kSlotsCountIndex = 0;
-  static const int kNamesTableIndex = 1;
-  static const int kReservedIndexCount = 2;
-
-  static const int kNameTableEntrySize = 2;
-  static const int kNameTableSlotIndex = 0;
-  static const int kNameTableNameIndex = 1;
-
-  // Returns number of feedback vector elements used by given slot kind.
-  static inline int GetSlotSize(FeedbackVectorSlotKind kind);
-
-  // Defines if slots of given kind require "name".
-  static inline bool SlotRequiresName(FeedbackVectorSlotKind kind);
-
-  bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
-
-  bool DiffersFrom(const TypeFeedbackMetadata* other_metadata) const;
-
-  inline bool is_empty() const;
-
-  // Returns number of slots in the vector.
-  inline int slot_count() const;
-
-  // Returns slot kind for given slot.
-  FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
-
-  // Returns name for given slot.
-  String* GetName(FeedbackVectorSlot slot) const;
-
-  template <typename Spec>
-  static Handle<TypeFeedbackMetadata> New(Isolate* isolate, const Spec* spec);
-
-#ifdef OBJECT_PRINT
-  // For gdb debugging.
-  void Print();
-#endif  // OBJECT_PRINT
-
-  DECLARE_PRINTER(TypeFeedbackMetadata)
-
-  static const char* Kind2String(FeedbackVectorSlotKind kind);
-
- private:
-  static const int kFeedbackVectorSlotKindBits = 5;
-  STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
-                (1 << kFeedbackVectorSlotKindBits));
-
-  void SetKind(FeedbackVectorSlot slot, FeedbackVectorSlotKind kind);
-
-  typedef BitSetComputer<FeedbackVectorSlotKind, kFeedbackVectorSlotKindBits,
-                         kSmiValueSize, uint32_t> VectorICComputer;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackMetadata);
-};
-
-
-// The shape of the TypeFeedbackVector is an array with:
-// 0: feedback metadata
-// 1: invocation count
-// 2: feedback slot #0
-// ...
-// 2 + slot_count - 1: feedback slot #(slot_count-1)
-//
-class TypeFeedbackVector : public FixedArray {
- public:
-  // Casting.
-  static inline TypeFeedbackVector* cast(Object* obj);
-
-  static const int kMetadataIndex = 0;
-  static const int kInvocationCountIndex = 1;
-  static const int kReservedIndexCount = 2;
-
-  inline void ComputeCounts(int* with_type_info, int* generic,
-                            int* vector_ic_count, bool code_is_interpreted);
-
-  inline bool is_empty() const;
-
-  // Returns number of slots in the vector.
-  inline int slot_count() const;
-
-  inline TypeFeedbackMetadata* metadata() const;
-  inline int invocation_count() const;
-
-  // Conversion from a slot to an integer index to the underlying array.
-  static int GetIndex(FeedbackVectorSlot slot) {
-    return kReservedIndexCount + slot.ToInt();
-  }
-  static int GetIndexFromSpec(const FeedbackVectorSpec* spec,
-                              FeedbackVectorSlot slot);
-
-  // Conversion from an integer index to the underlying array to a slot.
-  static inline FeedbackVectorSlot ToSlot(int index);
-  inline Object* Get(FeedbackVectorSlot slot) const;
-  inline void Set(FeedbackVectorSlot slot, Object* value,
-                  WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
-  // Returns slot kind for given slot.
-  FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
-  // Returns name corresponding to given slot or an empty string.
-  String* GetName(FeedbackVectorSlot slot) const;
-
-  static Handle<TypeFeedbackVector> New(Isolate* isolate,
-                                        Handle<TypeFeedbackMetadata> metadata);
-
-  static Handle<TypeFeedbackVector> Copy(Isolate* isolate,
-                                         Handle<TypeFeedbackVector> vector);
-
-#ifdef OBJECT_PRINT
-  // For gdb debugging.
-  void Print();
-#endif  // OBJECT_PRINT
-
-  DECLARE_PRINTER(TypeFeedbackVector)
-
-  // Clears the vector slots.
-  void ClearSlots(SharedFunctionInfo* shared) { ClearSlotsImpl(shared, true); }
-
-  void ClearSlotsAtGCTime(SharedFunctionInfo* shared) {
-    ClearSlotsImpl(shared, false);
-  }
-
-  static void ClearAllKeyedStoreICs(Isolate* isolate);
-  void ClearKeyedStoreICs(SharedFunctionInfo* shared);
-
-  // The object that indicates an uninitialized cache.
-  static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
-
-  // The object that indicates a megamorphic state.
-  static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate);
-
-  // The object that indicates a premonomorphic state.
-  static inline Handle<Symbol> PremonomorphicSentinel(Isolate* isolate);
-
-  // A raw version of the uninitialized sentinel that's safe to read during
-  // garbage collection (e.g., for patching the cache).
-  static inline Symbol* RawUninitializedSentinel(Isolate* isolate);
-
-  static const int kDummyLoadICSlot = 0;
-  static const int kDummyKeyedLoadICSlot = 2;
-  static const int kDummyStoreICSlot = 4;
-  static const int kDummyKeyedStoreICSlot = 6;
-
-  static Handle<TypeFeedbackVector> DummyVector(Isolate* isolate);
-  static FeedbackVectorSlot DummySlot(int dummyIndex) {
-    DCHECK(dummyIndex >= 0 && dummyIndex <= kDummyKeyedStoreICSlot);
-    return FeedbackVectorSlot(dummyIndex);
-  }
-
- private:
-  void ClearSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackVector);
-};
-
-
-// The following asserts protect an optimization in type feedback vector
-// code that looks into the contents of a slot assuming to find a String,
-// a Symbol, an AllocationSite, a WeakCell, or a FixedArray.
-STATIC_ASSERT(WeakCell::kSize >= 2 * kPointerSize);
-STATIC_ASSERT(WeakCell::kValueOffset == AllocationSite::kTransitionInfoOffset);
-STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
-STATIC_ASSERT(WeakCell::kValueOffset == Name::kHashFieldSlot);
-// Verify that an empty hash field looks like a tagged object, but can't
-// possibly be confused with a pointer.
-STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
-STATIC_ASSERT(Name::kEmptyHashField == 0x3);
-// Verify that a set hash field will not look like a tagged object.
-STATIC_ASSERT(Name::kHashNotComputedMask == kHeapObjectTag);
-
-
-class TypeFeedbackMetadataIterator {
- public:
-  explicit TypeFeedbackMetadataIterator(Handle<TypeFeedbackMetadata> metadata)
-      : metadata_handle_(metadata),
-        next_slot_(FeedbackVectorSlot(0)),
-        slot_kind_(FeedbackVectorSlotKind::INVALID) {}
-
-  explicit TypeFeedbackMetadataIterator(TypeFeedbackMetadata* metadata)
-      : metadata_(metadata),
-        next_slot_(FeedbackVectorSlot(0)),
-        slot_kind_(FeedbackVectorSlotKind::INVALID) {}
-
-  inline bool HasNext() const;
-
-  inline FeedbackVectorSlot Next();
-
-  // Returns slot kind of the last slot returned by Next().
-  FeedbackVectorSlotKind kind() const {
-    DCHECK_NE(FeedbackVectorSlotKind::INVALID, slot_kind_);
-    DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, slot_kind_);
-    return slot_kind_;
-  }
-
-  // Returns entry size of the last slot returned by Next().
-  inline int entry_size() const;
-
-  String* name() const {
-    DCHECK(TypeFeedbackMetadata::SlotRequiresName(kind()));
-    return metadata()->GetName(cur_slot_);
-  }
-
- private:
-  TypeFeedbackMetadata* metadata() const {
-    return !metadata_handle_.is_null() ? *metadata_handle_ : metadata_;
-  }
-
-  // The reason for having a handle and a raw pointer to the meta data is
-  // to have a single iterator implementation for both "handlified" and raw
-  // pointer use cases.
-  Handle<TypeFeedbackMetadata> metadata_handle_;
-  TypeFeedbackMetadata* metadata_;
-  FeedbackVectorSlot cur_slot_;
-  FeedbackVectorSlot next_slot_;
-  FeedbackVectorSlotKind slot_kind_;
-};
-
-
-// A FeedbackNexus is the combination of a TypeFeedbackVector and a slot.
-// Derived classes customize the update and retrieval of feedback.
-class FeedbackNexus {
- public:
-  FeedbackNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : vector_handle_(vector), vector_(NULL), slot_(slot) {}
-  FeedbackNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
-      : vector_(vector), slot_(slot) {}
-  virtual ~FeedbackNexus() {}
-
-  Handle<TypeFeedbackVector> vector_handle() const {
-    DCHECK(vector_ == NULL);
-    return vector_handle_;
-  }
-  TypeFeedbackVector* vector() const {
-    return vector_handle_.is_null() ? vector_ : *vector_handle_;
-  }
-  FeedbackVectorSlot slot() const { return slot_; }
-
-  InlineCacheState ic_state() const { return StateFromFeedback(); }
-  bool IsUninitialized() const { return StateFromFeedback() == UNINITIALIZED; }
-  Map* FindFirstMap() const {
-    MapHandleList maps;
-    ExtractMaps(&maps);
-    if (maps.length() > 0) return *maps.at(0);
-    return NULL;
-  }
-
-  // TODO(mvstanton): remove FindAllMaps, it didn't survive a code review.
-  void FindAllMaps(MapHandleList* maps) const { ExtractMaps(maps); }
-
-  virtual InlineCacheState StateFromFeedback() const = 0;
-  virtual int ExtractMaps(MapHandleList* maps) const;
-  virtual MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
-  virtual bool FindHandlers(List<Handle<Object>>* code_list,
-                            int length = -1) const;
-  virtual Name* FindFirstName() const { return NULL; }
-
-  virtual void ConfigureUninitialized();
-  virtual void ConfigurePremonomorphic();
-  virtual void ConfigureMegamorphic();
-
-  inline Object* GetFeedback() const;
-  inline Object* GetFeedbackExtra() const;
-
-  inline Isolate* GetIsolate() const;
-
- protected:
-  inline void SetFeedback(Object* feedback,
-                          WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-  inline void SetFeedbackExtra(Object* feedback_extra,
-                               WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
-  Handle<FixedArray> EnsureArrayOfSize(int length);
-  Handle<FixedArray> EnsureExtraArrayOfSize(int length);
-  void InstallHandlers(Handle<FixedArray> array, MapHandleList* maps,
-                       List<Handle<Object>>* handlers);
-
- private:
-  // The reason for having a vector handle and a raw pointer is that we can and
-  // should use handles during IC miss, but not during GC when we clear ICs. If
-  // you have a handle to the vector that is better because more operations can
-  // be done, like allocation.
-  Handle<TypeFeedbackVector> vector_handle_;
-  TypeFeedbackVector* vector_;
-  FeedbackVectorSlot slot_;
-};
-
-
-class CallICNexus final : public FeedbackNexus {
- public:
-  CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
-  }
-  CallICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
-  }
-
-  void Clear(Code* host);
-
-  void ConfigureUninitialized() override;
-  void ConfigureMonomorphicArray();
-  void ConfigureMonomorphic(Handle<JSFunction> function);
-  void ConfigureMegamorphic() final;
-  void ConfigureMegamorphic(int call_count);
-
-  InlineCacheState StateFromFeedback() const final;
-
-  int ExtractMaps(MapHandleList* maps) const final {
-    // CallICs don't record map feedback.
-    return 0;
-  }
-  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
-    return MaybeHandle<Code>();
-  }
-  bool FindHandlers(List<Handle<Object>>* code_list,
-                    int length = -1) const final {
-    return length == 0;
-  }
-
-  int ExtractCallCount();
-
-  // Compute the call frequency based on the call count and the invocation
-  // count (taken from the type feedback vector).
-  float ComputeCallFrequency();
-};
-
-
-class LoadICNexus : public FeedbackNexus {
- public:
-  LoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
-  }
-  explicit LoadICNexus(Isolate* isolate)
-      : FeedbackNexus(
-            TypeFeedbackVector::DummyVector(isolate),
-            FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot)) {}
-  LoadICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
-  }
-
-  void Clear(Code* host);
-
-  void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Object> handler);
-
-  void ConfigurePolymorphic(MapHandleList* maps,
-                            List<Handle<Object>>* handlers);
-
-  InlineCacheState StateFromFeedback() const override;
-};
-
-class LoadGlobalICNexus : public FeedbackNexus {
- public:
-  LoadGlobalICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC, vector->GetKind(slot));
-  }
-  LoadGlobalICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC, vector->GetKind(slot));
-  }
-
-  int ExtractMaps(MapHandleList* maps) const final {
-    // LoadGlobalICs don't record map feedback.
-    return 0;
-  }
-  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
-    return MaybeHandle<Code>();
-  }
-  bool FindHandlers(List<Handle<Object>>* code_list,
-                    int length = -1) const final {
-    return length == 0;
-  }
-
-  void ConfigureMegamorphic() override { UNREACHABLE(); }
-  void Clear(Code* host);
-
-  void ConfigureUninitialized() override;
-  void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
-  void ConfigureHandlerMode(Handle<Code> handler);
-
-  InlineCacheState StateFromFeedback() const override;
-};
-
-class KeyedLoadICNexus : public FeedbackNexus {
- public:
-  KeyedLoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
-  }
-  explicit KeyedLoadICNexus(Isolate* isolate)
-      : FeedbackNexus(
-            TypeFeedbackVector::DummyVector(isolate),
-            FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot)) {}
-  KeyedLoadICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
-  }
-
-  void Clear(Code* host);
-
-  // name can be a null handle for element loads.
-  void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
-                            Handle<Object> handler);
-  // name can be null.
-  void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
-                            List<Handle<Object>>* handlers);
-
-  void ConfigureMegamorphicKeyed(IcCheckType property_type);
-
-  IcCheckType GetKeyType() const;
-  InlineCacheState StateFromFeedback() const override;
-  Name* FindFirstName() const override;
-};
-
-
-class StoreICNexus : public FeedbackNexus {
- public:
-  StoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
-  }
-  explicit StoreICNexus(Isolate* isolate)
-      : FeedbackNexus(
-            TypeFeedbackVector::DummyVector(isolate),
-            FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot)) {}
-  StoreICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
-  }
-
-  void Clear(Code* host);
-
-  void ConfigureMonomorphic(Handle<Map> receiver_map, Handle<Object> handler);
-
-  void ConfigurePolymorphic(MapHandleList* maps,
-                            List<Handle<Object>>* handlers);
-
-  InlineCacheState StateFromFeedback() const override;
-};
-
-
-class KeyedStoreICNexus : public FeedbackNexus {
- public:
-  KeyedStoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
-  }
-  explicit KeyedStoreICNexus(Isolate* isolate)
-      : FeedbackNexus(
-            TypeFeedbackVector::DummyVector(isolate),
-            FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot)) {}
-  KeyedStoreICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
-  }
-
-  void Clear(Code* host);
-
-  // name can be a null handle for element loads.
-  void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
-                            Handle<Object> handler);
-  // name can be null.
-  void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
-                            List<Handle<Object>>* handlers);
-  void ConfigurePolymorphic(MapHandleList* maps,
-                            MapHandleList* transitioned_maps,
-                            CodeHandleList* handlers);
-  void ConfigureMegamorphicKeyed(IcCheckType property_type);
-
-  KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
-  IcCheckType GetKeyType() const;
-
-  InlineCacheState StateFromFeedback() const override;
-  Name* FindFirstName() const override;
-};
-
-class BinaryOpICNexus final : public FeedbackNexus {
- public:
-  BinaryOpICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
-              vector->GetKind(slot));
-  }
-  BinaryOpICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
-              vector->GetKind(slot));
-  }
-
-  void Clear(Code* host);
-
-  InlineCacheState StateFromFeedback() const final;
-  BinaryOperationHint GetBinaryOperationFeedback() const;
-
-  int ExtractMaps(MapHandleList* maps) const final {
-    // BinaryOpICs don't record map feedback.
-    return 0;
-  }
-  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
-    return MaybeHandle<Code>();
-  }
-  bool FindHandlers(List<Handle<Object>>* code_list,
-                    int length = -1) const final {
-    return length == 0;
-  }
-};
-
-class CompareICNexus final : public FeedbackNexus {
- public:
-  CompareICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
-              vector->GetKind(slot));
-  }
-  CompareICNexus(TypeFeedbackVector* vector, FeedbackVectorSlot slot)
-      : FeedbackNexus(vector, slot) {
-    DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
-              vector->GetKind(slot));
-  }
-
-  void Clear(Code* host);
-
-  InlineCacheState StateFromFeedback() const final;
-  CompareOperationHint GetCompareOperationFeedback() const;
-
-  int ExtractMaps(MapHandleList* maps) const final {
-    // BinaryOpICs don't record map feedback.
-    return 0;
-  }
-  MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const final {
-    return MaybeHandle<Code>();
-  }
-  bool FindHandlers(List<Handle<Object>>* code_list,
-                    int length = -1) const final {
-    return length == 0;
-  }
-};
-
-inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
-inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback);
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TRANSITIONS_H_
diff --git a/src/type-hints.cc b/src/type-hints.cc
index 1c40c59..29a15c6 100644
--- a/src/type-hints.cc
+++ b/src/type-hints.cc
@@ -36,6 +36,12 @@
       return os << "Number";
     case CompareOperationHint::kNumberOrOddball:
       return os << "NumberOrOddball";
+    case CompareOperationHint::kInternalizedString:
+      return os << "InternalizedString";
+    case CompareOperationHint::kString:
+      return os << "String";
+    case CompareOperationHint::kReceiver:
+      return os << "Receiver";
     case CompareOperationHint::kAny:
       return os << "Any";
   }
@@ -63,8 +69,6 @@
       return os << "Symbol";
     case ToBooleanHint::kHeapNumber:
       return os << "HeapNumber";
-    case ToBooleanHint::kSimdValue:
-      return os << "SimdValue";
     case ToBooleanHint::kAny:
       return os << "Any";
     case ToBooleanHint::kNeedsMap:
@@ -74,6 +78,35 @@
   return os;
 }
 
+std::string ToString(ToBooleanHint hint) {
+  switch (hint) {
+    case ToBooleanHint::kNone:
+      return "None";
+    case ToBooleanHint::kUndefined:
+      return "Undefined";
+    case ToBooleanHint::kBoolean:
+      return "Boolean";
+    case ToBooleanHint::kNull:
+      return "Null";
+    case ToBooleanHint::kSmallInteger:
+      return "SmallInteger";
+    case ToBooleanHint::kReceiver:
+      return "Receiver";
+    case ToBooleanHint::kString:
+      return "String";
+    case ToBooleanHint::kSymbol:
+      return "Symbol";
+    case ToBooleanHint::kHeapNumber:
+      return "HeapNumber";
+    case ToBooleanHint::kAny:
+      return "Any";
+    case ToBooleanHint::kNeedsMap:
+      return "NeedsMap";
+  }
+  UNREACHABLE();
+  return "";
+}
+
 std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
   if (hints == ToBooleanHint::kAny) return os << "Any";
   if (hints == ToBooleanHint::kNone) return os << "None";
@@ -89,6 +122,22 @@
   return os;
 }
 
+std::string ToString(ToBooleanHints hints) {
+  if (hints == ToBooleanHint::kAny) return "Any";
+  if (hints == ToBooleanHint::kNone) return "None";
+  std::string ret;
+  bool first = true;
+  for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * 8; ++i) {
+    ToBooleanHint const hint = static_cast<ToBooleanHint>(1u << i);
+    if (hints & hint) {
+      if (!first) ret += "|";
+      first = false;
+      ret += ToString(hint);
+    }
+  }
+  return ret;
+}
+
 std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
   switch (flags) {
     case STRING_ADD_CHECK_NONE:
diff --git a/src/type-hints.h b/src/type-hints.h
index e6138c7..c7c6ccc 100644
--- a/src/type-hints.h
+++ b/src/type-hints.h
@@ -33,6 +33,9 @@
   kSignedSmall,
   kNumber,
   kNumberOrOddball,
+  kInternalizedString,
+  kString,
+  kReceiver,
   kAny
 };
 
@@ -53,18 +56,19 @@
   kString = 1u << 5,
   kSymbol = 1u << 6,
   kHeapNumber = 1u << 7,
-  kSimdValue = 1u << 8,
   kAny = kUndefined | kBoolean | kNull | kSmallInteger | kReceiver | kString |
-         kSymbol | kHeapNumber | kSimdValue,
-  kNeedsMap = kReceiver | kString | kSymbol | kHeapNumber | kSimdValue,
+         kSymbol | kHeapNumber,
+  kNeedsMap = kReceiver | kString | kSymbol | kHeapNumber,
   kCanBeUndetectable = kReceiver,
 };
 
 std::ostream& operator<<(std::ostream&, ToBooleanHint);
+std::string ToString(ToBooleanHint);
 
 typedef base::Flags<ToBooleanHint, uint16_t> ToBooleanHints;
 
 std::ostream& operator<<(std::ostream&, ToBooleanHints);
+std::string ToString(ToBooleanHints);
 
 DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
 
diff --git a/src/type-info.cc b/src/type-info.cc
index fd3a2dc..d7de1b8 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -4,6 +4,7 @@
 
 #include "src/type-info.h"
 
+#include "src/assembler-inl.h"
 #include "src/ast/ast.h"
 #include "src/code-stubs.h"
 #include "src/ic/ic.h"
@@ -13,10 +14,10 @@
 namespace v8 {
 namespace internal {
 
-
-TypeFeedbackOracle::TypeFeedbackOracle(
-    Isolate* isolate, Zone* zone, Handle<Code> code,
-    Handle<TypeFeedbackVector> feedback_vector, Handle<Context> native_context)
+TypeFeedbackOracle::TypeFeedbackOracle(Isolate* isolate, Zone* zone,
+                                       Handle<Code> code,
+                                       Handle<FeedbackVector> feedback_vector,
+                                       Handle<Context> native_context)
     : native_context_(native_context), isolate_(isolate), zone_(zone) {
   BuildDictionary(code);
   DCHECK(dictionary_->IsUnseededNumberDictionary());
@@ -24,7 +25,7 @@
   // the type feedback info contained therein.
   // TODO(mvstanton): revisit the decision to copy when we weakly
   // traverse the feedback vector at GC time.
-  feedback_vector_ = TypeFeedbackVector::Copy(isolate, feedback_vector);
+  feedback_vector_ = FeedbackVector::Copy(isolate, feedback_vector);
 }
 
 
@@ -47,8 +48,7 @@
   return Handle<Object>::cast(isolate()->factory()->undefined_value());
 }
 
-
-Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorSlot slot) {
+Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackSlot slot) {
   DCHECK(slot.ToInt() >= 0 && slot.ToInt() < feedback_vector_->length());
   Handle<Object> undefined =
       Handle<Object>::cast(isolate()->factory()->undefined_value());
@@ -62,23 +62,20 @@
     obj = cell->value();
   }
 
-  if (obj->IsJSFunction() || obj->IsAllocationSite() || obj->IsSymbol() ||
-      obj->IsSimd128Value()) {
+  if (obj->IsJSFunction() || obj->IsAllocationSite() || obj->IsSymbol()) {
     return Handle<Object>(obj, isolate());
   }
 
   return undefined;
 }
 
-
-InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
-    FeedbackVectorSlot slot) {
+InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(FeedbackSlot slot) {
   if (!slot.IsInvalid()) {
-    FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
-    if (kind == FeedbackVectorSlotKind::LOAD_IC) {
+    FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
+    if (IsLoadICKind(kind)) {
       LoadICNexus nexus(feedback_vector_, slot);
       return nexus.StateFromFeedback();
-    } else if (kind == FeedbackVectorSlotKind::KEYED_LOAD_IC) {
+    } else if (IsKeyedLoadICKind(kind)) {
       KeyedLoadICNexus nexus(feedback_vector_, slot);
       return nexus.StateFromFeedback();
     }
@@ -89,14 +86,13 @@
   return PREMONOMORPHIC;
 }
 
-
-bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorSlot slot) {
+bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackSlot slot) {
   if (!slot.IsInvalid()) {
-    FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
-    if (kind == FeedbackVectorSlotKind::STORE_IC) {
+    FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
+    if (IsStoreICKind(kind)) {
       StoreICNexus nexus(feedback_vector_, slot);
       return nexus.StateFromFeedback() == UNINITIALIZED;
-    } else if (kind == FeedbackVectorSlotKind::KEYED_STORE_IC) {
+    } else if (IsKeyedStoreICKind(kind)) {
       KeyedStoreICNexus nexus(feedback_vector_, slot);
       return nexus.StateFromFeedback() == UNINITIALIZED;
     }
@@ -104,42 +100,34 @@
   return true;
 }
 
-
-bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorSlot slot) {
+bool TypeFeedbackOracle::CallIsUninitialized(FeedbackSlot slot) {
   Handle<Object> value = GetInfo(slot);
   return value->IsUndefined(isolate()) ||
          value.is_identical_to(
-             TypeFeedbackVector::UninitializedSentinel(isolate()));
+             FeedbackVector::UninitializedSentinel(isolate()));
 }
 
-
-bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorSlot slot) {
+bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackSlot slot) {
   Handle<Object> value = GetInfo(slot);
   return value->IsAllocationSite() || value->IsJSFunction();
 }
 
-
-bool TypeFeedbackOracle::CallNewIsMonomorphic(FeedbackVectorSlot slot) {
+bool TypeFeedbackOracle::CallNewIsMonomorphic(FeedbackSlot slot) {
   Handle<Object> info = GetInfo(slot);
   return info->IsAllocationSite() || info->IsJSFunction();
 }
 
-
-byte TypeFeedbackOracle::ForInType(FeedbackVectorSlot feedback_vector_slot) {
+byte TypeFeedbackOracle::ForInType(FeedbackSlot feedback_vector_slot) {
   Handle<Object> value = GetInfo(feedback_vector_slot);
-  return value.is_identical_to(
-             TypeFeedbackVector::UninitializedSentinel(isolate()))
+  return value.is_identical_to(FeedbackVector::UninitializedSentinel(isolate()))
              ? ForInStatement::FAST_FOR_IN
              : ForInStatement::SLOW_FOR_IN;
 }
 
-
 void TypeFeedbackOracle::GetStoreModeAndKeyType(
-    FeedbackVectorSlot slot, KeyedAccessStoreMode* store_mode,
+    FeedbackSlot slot, KeyedAccessStoreMode* store_mode,
     IcCheckType* key_type) {
-  if (!slot.IsInvalid() &&
-      feedback_vector_->GetKind(slot) ==
-          FeedbackVectorSlotKind::KEYED_STORE_IC) {
+  if (!slot.IsInvalid() && feedback_vector_->IsKeyedStoreIC(slot)) {
     KeyedStoreICNexus nexus(feedback_vector_, slot);
     *store_mode = nexus.GetKeyedAccessStoreMode();
     *key_type = nexus.GetKeyType();
@@ -149,8 +137,7 @@
   }
 }
 
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(FeedbackVectorSlot slot) {
+Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(FeedbackSlot slot) {
   Handle<Object> info = GetInfo(slot);
   if (info->IsAllocationSite()) {
     return Handle<JSFunction>(isolate()->native_context()->array_function());
@@ -159,9 +146,7 @@
   return Handle<JSFunction>::cast(info);
 }
 
-
-Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(
-    FeedbackVectorSlot slot) {
+Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(FeedbackSlot slot) {
   Handle<Object> info = GetInfo(slot);
   if (info->IsJSFunction()) {
     return Handle<JSFunction>::cast(info);
@@ -171,9 +156,8 @@
   return Handle<JSFunction>(isolate()->native_context()->array_function());
 }
 
-
 Handle<AllocationSite> TypeFeedbackOracle::GetCallAllocationSite(
-    FeedbackVectorSlot slot) {
+    FeedbackSlot slot) {
   Handle<Object> info = GetInfo(slot);
   if (info->IsAllocationSite()) {
     return Handle<AllocationSite>::cast(info);
@@ -181,9 +165,8 @@
   return Handle<AllocationSite>::null();
 }
 
-
 Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
-    FeedbackVectorSlot slot) {
+    FeedbackSlot slot) {
   Handle<Object> info = GetInfo(slot);
   if (info->IsAllocationSite()) {
     return Handle<AllocationSite>::cast(info);
@@ -203,6 +186,12 @@
       return AstType::Number();
     case CompareOperationHint::kNumberOrOddball:
       return AstType::NumberOrOddball();
+    case CompareOperationHint::kInternalizedString:
+      return AstType::InternalizedString();
+    case CompareOperationHint::kString:
+      return AstType::String();
+    case CompareOperationHint::kReceiver:
+      return AstType::Receiver();
     case CompareOperationHint::kAny:
       return AstType::Any();
   }
@@ -232,7 +221,7 @@
 
 }  // end anonymous namespace
 
-void TypeFeedbackOracle::CompareType(TypeFeedbackId id, FeedbackVectorSlot slot,
+void TypeFeedbackOracle::CompareType(TypeFeedbackId id, FeedbackSlot slot,
                                      AstType** left_type, AstType** right_type,
                                      AstType** combined_type) {
   Handle<Object> info = GetInfo(id);
@@ -293,7 +282,7 @@
   }
 }
 
-void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot,
+void TypeFeedbackOracle::BinaryType(TypeFeedbackId id, FeedbackSlot slot,
                                     AstType** left, AstType** right,
                                     AstType** result,
                                     Maybe<int>* fixed_right_arg,
@@ -364,8 +353,7 @@
   }
 }
 
-AstType* TypeFeedbackOracle::CountType(TypeFeedbackId id,
-                                       FeedbackVectorSlot slot) {
+AstType* TypeFeedbackOracle::CountType(TypeFeedbackId id, FeedbackSlot slot) {
   Handle<Object> object = GetInfo(id);
   if (slot.IsInvalid()) {
     DCHECK(!object->IsCode());
@@ -394,8 +382,7 @@
   return all_strings;
 }
 
-
-void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorSlot slot,
+void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackSlot slot,
                                                Handle<Name> name,
                                                SmallMapList* receiver_types) {
   receiver_types->Clear();
@@ -406,9 +393,8 @@
   }
 }
 
-
 void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
-    FeedbackVectorSlot slot, SmallMapList* receiver_types, bool* is_string,
+    FeedbackSlot slot, SmallMapList* receiver_types, bool* is_string,
     IcCheckType* key_type) {
   receiver_types->Clear();
   if (slot.IsInvalid()) {
@@ -422,8 +408,7 @@
   }
 }
 
-
-void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorSlot slot,
+void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackSlot slot,
                                                  Handle<Name> name,
                                                  SmallMapList* receiver_types) {
   receiver_types->Clear();
@@ -431,24 +416,22 @@
                        receiver_types);
 }
 
-
 void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
-    FeedbackVectorSlot slot, SmallMapList* receiver_types,
+    FeedbackSlot slot, SmallMapList* receiver_types,
     KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
   receiver_types->Clear();
   CollectReceiverTypes(slot, receiver_types);
   GetStoreModeAndKeyType(slot, store_mode, key_type);
 }
 
-
-void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorSlot slot,
+void TypeFeedbackOracle::CountReceiverTypes(FeedbackSlot slot,
                                             SmallMapList* receiver_types) {
   receiver_types->Clear();
   if (!slot.IsInvalid()) CollectReceiverTypes(slot, receiver_types);
 }
 
 void TypeFeedbackOracle::CollectReceiverTypes(StubCache* stub_cache,
-                                              FeedbackVectorSlot slot,
+                                              FeedbackSlot slot,
                                               Handle<Name> name,
                                               SmallMapList* types) {
   StoreICNexus nexus(feedback_vector_, slot);
@@ -468,15 +451,14 @@
   }
 }
 
-
-void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorSlot slot,
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackSlot slot,
                                               SmallMapList* types) {
-  FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
-  if (kind == FeedbackVectorSlotKind::STORE_IC) {
+  FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
+  if (IsStoreICKind(kind) || IsStoreOwnICKind(kind)) {
     StoreICNexus nexus(feedback_vector_, slot);
     CollectReceiverTypes(&nexus, types);
   } else {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, kind);
+    DCHECK(IsKeyedStoreICKind(kind));
     KeyedStoreICNexus nexus(feedback_vector_, slot);
     CollectReceiverTypes(&nexus, types);
   }
diff --git a/src/type-info.h b/src/type-info.h
index 06a0c9e..c8e3556 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -23,40 +23,39 @@
 class TypeFeedbackOracle: public ZoneObject {
  public:
   TypeFeedbackOracle(Isolate* isolate, Zone* zone, Handle<Code> code,
-                     Handle<TypeFeedbackVector> feedback_vector,
+                     Handle<FeedbackVector> feedback_vector,
                      Handle<Context> native_context);
 
-  InlineCacheState LoadInlineCacheState(FeedbackVectorSlot slot);
-  bool StoreIsUninitialized(FeedbackVectorSlot slot);
-  bool CallIsUninitialized(FeedbackVectorSlot slot);
-  bool CallIsMonomorphic(FeedbackVectorSlot slot);
-  bool CallNewIsMonomorphic(FeedbackVectorSlot slot);
+  InlineCacheState LoadInlineCacheState(FeedbackSlot slot);
+  bool StoreIsUninitialized(FeedbackSlot slot);
+  bool CallIsUninitialized(FeedbackSlot slot);
+  bool CallIsMonomorphic(FeedbackSlot slot);
+  bool CallNewIsMonomorphic(FeedbackSlot slot);
 
   // TODO(1571) We can't use ForInStatement::ForInType as the return value due
   // to various cycles in our headers.
   // TODO(rossberg): once all oracle access is removed from ast.cc, it should
   // be possible.
-  byte ForInType(FeedbackVectorSlot feedback_vector_slot);
+  byte ForInType(FeedbackSlot feedback_vector_slot);
 
-  void GetStoreModeAndKeyType(FeedbackVectorSlot slot,
+  void GetStoreModeAndKeyType(FeedbackSlot slot,
                               KeyedAccessStoreMode* store_mode,
                               IcCheckType* key_type);
 
-  void PropertyReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
+  void PropertyReceiverTypes(FeedbackSlot slot, Handle<Name> name,
                              SmallMapList* receiver_types);
-  void KeyedPropertyReceiverTypes(FeedbackVectorSlot slot,
+  void KeyedPropertyReceiverTypes(FeedbackSlot slot,
                                   SmallMapList* receiver_types, bool* is_string,
                                   IcCheckType* key_type);
-  void AssignmentReceiverTypes(FeedbackVectorSlot slot, Handle<Name> name,
+  void AssignmentReceiverTypes(FeedbackSlot slot, Handle<Name> name,
                                SmallMapList* receiver_types);
-  void KeyedAssignmentReceiverTypes(FeedbackVectorSlot slot,
+  void KeyedAssignmentReceiverTypes(FeedbackSlot slot,
                                     SmallMapList* receiver_types,
                                     KeyedAccessStoreMode* store_mode,
                                     IcCheckType* key_type);
-  void CountReceiverTypes(FeedbackVectorSlot slot,
-                          SmallMapList* receiver_types);
+  void CountReceiverTypes(FeedbackSlot slot, SmallMapList* receiver_types);
 
-  void CollectReceiverTypes(FeedbackVectorSlot slot, SmallMapList* types);
+  void CollectReceiverTypes(FeedbackSlot slot, SmallMapList* types);
   void CollectReceiverTypes(FeedbackNexus* nexus, SmallMapList* types);
 
   static bool IsRelevantFeedback(Map* map, Context* native_context) {
@@ -66,10 +65,10 @@
                native_context;
   }
 
-  Handle<JSFunction> GetCallTarget(FeedbackVectorSlot slot);
-  Handle<AllocationSite> GetCallAllocationSite(FeedbackVectorSlot slot);
-  Handle<JSFunction> GetCallNewTarget(FeedbackVectorSlot slot);
-  Handle<AllocationSite> GetCallNewAllocationSite(FeedbackVectorSlot slot);
+  Handle<JSFunction> GetCallTarget(FeedbackSlot slot);
+  Handle<AllocationSite> GetCallAllocationSite(FeedbackSlot slot);
+  Handle<JSFunction> GetCallNewTarget(FeedbackSlot slot);
+  Handle<AllocationSite> GetCallNewAllocationSite(FeedbackSlot slot);
 
   // TODO(1571) We can't use ToBooleanICStub::Types as the return value because
   // of various cycles in our headers. Death to tons of implementations in
@@ -77,22 +76,22 @@
   uint16_t ToBooleanTypes(TypeFeedbackId id);
 
   // Get type information for arithmetic operations and compares.
-  void BinaryType(TypeFeedbackId id, FeedbackVectorSlot slot, AstType** left,
+  void BinaryType(TypeFeedbackId id, FeedbackSlot slot, AstType** left,
                   AstType** right, AstType** result,
                   Maybe<int>* fixed_right_arg,
                   Handle<AllocationSite>* allocation_site,
                   Token::Value operation);
 
-  void CompareType(TypeFeedbackId id, FeedbackVectorSlot slot, AstType** left,
+  void CompareType(TypeFeedbackId id, FeedbackSlot slot, AstType** left,
                    AstType** right, AstType** combined);
 
-  AstType* CountType(TypeFeedbackId id, FeedbackVectorSlot slot);
+  AstType* CountType(TypeFeedbackId id, FeedbackSlot slot);
 
   Zone* zone() const { return zone_; }
   Isolate* isolate() const { return isolate_; }
 
  private:
-  void CollectReceiverTypes(StubCache* stub_cache, FeedbackVectorSlot slot,
+  void CollectReceiverTypes(StubCache* stub_cache, FeedbackSlot slot,
                             Handle<Name> name, SmallMapList* types);
   void CollectReceiverTypes(StubCache* stub_cache, FeedbackNexus* nexus,
                             Handle<Name> name, SmallMapList* types);
@@ -117,14 +116,14 @@
 
   // Returns an element from the type feedback vector. Returns undefined
   // if there is no information.
-  Handle<Object> GetInfo(FeedbackVectorSlot slot);
+  Handle<Object> GetInfo(FeedbackSlot slot);
 
  private:
   Handle<Context> native_context_;
   Isolate* isolate_;
   Zone* zone_;
   Handle<UnseededNumberDictionary> dictionary_;
-  Handle<TypeFeedbackVector> feedback_vector_;
+  Handle<FeedbackVector> feedback_vector_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
 };
diff --git a/src/utils.cc b/src/utils.cc
index ef640c3..96a7d2c 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -76,8 +76,7 @@
   return buffer_.start();
 }
 
-
-std::ostream& operator<<(std::ostream& os, FeedbackVectorSlot slot) {
+std::ostream& operator<<(std::ostream& os, FeedbackSlot slot) {
   return os << "#" << slot.id_;
 }
 
diff --git a/src/utils.h b/src/utils.h
index bd5589c..f6e50e5 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -20,6 +20,7 @@
 #include "src/globals.h"
 #include "src/list.h"
 #include "src/vector.h"
+#include "src/zone/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -136,15 +137,20 @@
   return nibble + msb4[x];
 }
 
-
-// The C++ standard leaves the semantics of '>>' undefined for
-// negative signed operands. Most implementations do the right thing,
-// though.
-inline int ArithmeticShiftRight(int x, int s) {
-  return x >> s;
+template <typename T>
+static T ArithmeticShiftRight(T x, int shift) {
+  DCHECK_LE(0, shift);
+  if (x < 0) {
+    // Right shift of signed values is implementation defined. Simulate a
+    // true arithmetic right shift by adding leading sign bits.
+    using UnsignedT = typename std::make_unsigned<T>::type;
+    UnsignedT mask = ~(static_cast<UnsignedT>(~0) >> shift);
+    return (static_cast<UnsignedT>(x) >> shift) | mask;
+  } else {
+    return x >> shift;
+  }
 }
 
-
 template <typename T>
 int Compare(const T& a, const T& b) {
   if (a == b)
@@ -186,6 +192,11 @@
   return IsAligned(offs, alignment);
 }
 
+template <typename T, typename U>
+inline T RoundUpToMultipleOfPowOf2(T value, U multiple) {
+  DCHECK(multiple && ((multiple & (multiple - 1)) == 0));
+  return (value + multiple - 1) & ~(multiple - 1);
+}
 
 // Returns the maximum of the two parameters.
 template <typename T>
@@ -502,13 +513,22 @@
                                          size_t size) {
   memmove(dest, src, size);
 }
-const int kMinComplexMemCopy = 16 * kPointerSize;
+const int kMinComplexMemCopy = 8;
 #endif  // V8_TARGET_ARCH_IA32
 
 
 // ----------------------------------------------------------------------------
 // Miscellaneous
 
+// Memory offset for lower and higher bits in a 64 bit integer.
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+static const int kInt64LowerHalfMemoryOffset = 0;
+static const int kInt64UpperHalfMemoryOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+static const int kInt64LowerHalfMemoryOffset = 4;
+static const int kInt64UpperHalfMemoryOffset = 0;
+#endif  // V8_TARGET_LITTLE_ENDIAN
+
 // A static resource holds a static instance that can be reserved in
 // a local scope using an instance of Access.  Attempts to re-reserve
 // the instance will cause an error.
@@ -879,24 +899,21 @@
   return lhs.ToInt() > rhs.ToInt();
 }
 
-
-class FeedbackVectorSlot {
+class FeedbackSlot {
  public:
-  FeedbackVectorSlot() : id_(kInvalidSlot) {}
-  explicit FeedbackVectorSlot(int id) : id_(id) {}
+  FeedbackSlot() : id_(kInvalidSlot) {}
+  explicit FeedbackSlot(int id) : id_(id) {}
 
   int ToInt() const { return id_; }
 
-  static FeedbackVectorSlot Invalid() { return FeedbackVectorSlot(); }
+  static FeedbackSlot Invalid() { return FeedbackSlot(); }
   bool IsInvalid() const { return id_ == kInvalidSlot; }
 
-  bool operator==(FeedbackVectorSlot that) const {
-    return this->id_ == that.id_;
-  }
-  bool operator!=(FeedbackVectorSlot that) const { return !(*this == that); }
+  bool operator==(FeedbackSlot that) const { return this->id_ == that.id_; }
+  bool operator!=(FeedbackSlot that) const { return !(*this == that); }
 
-  friend size_t hash_value(FeedbackVectorSlot slot) { return slot.ToInt(); }
-  friend std::ostream& operator<<(std::ostream& os, FeedbackVectorSlot);
+  friend size_t hash_value(FeedbackSlot slot) { return slot.ToInt(); }
+  friend std::ostream& operator<<(std::ostream& os, FeedbackSlot);
 
  private:
   static const int kInvalidSlot = -1;
@@ -918,6 +935,17 @@
   static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
   static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
 
+  // Special bailout id support for deopting into the {JSConstructStub} stub.
+  // The following hard-coded deoptimization points are supported by the stub:
+  //  - {ConstructStubCreate} maps to {construct_stub_create_deopt_pc_offset}.
+  //  - {ConstructStubInvoke} maps to {construct_stub_invoke_deopt_pc_offset}.
+  static BailoutId ConstructStubCreate() { return BailoutId(1); }
+  static BailoutId ConstructStubInvoke() { return BailoutId(2); }
+  bool IsValidForConstructStub() const {
+    return id_ == ConstructStubCreate().ToInt() ||
+           id_ == ConstructStubInvoke().ToInt();
+  }
+
   bool IsNone() const { return id_ == kNoneId; }
   bool operator==(const BailoutId& other) const { return id_ == other.id_; }
   bool operator!=(const BailoutId& other) const { return id_ != other.id_; }
@@ -946,19 +974,6 @@
   int id_;
 };
 
-class TokenDispenserForFinally {
- public:
-  int GetBreakContinueToken() { return next_token_++; }
-  static const int kFallThroughToken = 0;
-  static const int kThrowToken = 1;
-  static const int kReturnToken = 2;
-
-  static const int kFirstBreakContinueToken = 3;
-  static const int kInvalidToken = -1;
-
- private:
-  int next_token_ = kFirstBreakContinueToken;
-};
 
 // ----------------------------------------------------------------------------
 // I/O support.
@@ -1640,9 +1655,31 @@
     friend class ThreadedList;
   };
 
+  class ConstIterator final {
+   public:
+    ConstIterator& operator++() {
+      entry_ = (*entry_)->next();
+      return *this;
+    }
+    bool operator!=(const ConstIterator& other) {
+      return entry_ != other.entry_;
+    }
+    const T* operator*() const { return *entry_; }
+
+   private:
+    explicit ConstIterator(T* const* entry) : entry_(entry) {}
+
+    T* const* entry_;
+
+    friend class ThreadedList;
+  };
+
   Iterator begin() { return Iterator(&head_); }
   Iterator end() { return Iterator(tail_); }
 
+  ConstIterator begin() const { return ConstIterator(&head_); }
+  ConstIterator end() const { return ConstIterator(tail_); }
+
   void Rewind(Iterator reset_point) {
     tail_ = reset_point.entry_;
     *tail_ = nullptr;
@@ -1677,6 +1714,21 @@
   DISALLOW_COPY_AND_ASSIGN(ThreadedList);
 };
 
+// Can be used to create a threaded list of |T|.
+template <typename T>
+class ThreadedListZoneEntry final : public ZoneObject {
+ public:
+  explicit ThreadedListZoneEntry(T value) : value_(value), next_(nullptr) {}
+
+  T value() { return value_; }
+  ThreadedListZoneEntry<T>** next() { return &next_; }
+
+ private:
+  T value_;
+  ThreadedListZoneEntry<T>* next_;
+  DISALLOW_COPY_AND_ASSIGN(ThreadedListZoneEntry);
+};
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/v8.cc b/src/v8.cc
index 7f0230a..45e60ad 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -4,6 +4,7 @@
 
 #include "src/v8.h"
 
+#include "src/api.h"
 #include "src/assembler.h"
 #include "src/base/once.h"
 #include "src/base/platform/platform.h"
@@ -15,7 +16,7 @@
 #include "src/frames.h"
 #include "src/isolate.h"
 #include "src/libsampler/sampler.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
 #include "src/profiler/heap-profiler.h"
 #include "src/runtime-profiler.h"
 #include "src/snapshot/natives.h"
@@ -66,7 +67,7 @@
     FLAG_max_semi_space_size = 1;
   }
 
-  if (FLAG_turbo && strcmp(FLAG_turbo_filter, "~~") == 0) {
+  if (FLAG_opt && FLAG_turbo && strcmp(FLAG_turbo_filter, "~~") == 0) {
     const char* filter_flag = "--turbo-filter=*";
     FlagList::SetFlagsFromString(filter_flag, StrLength(filter_flag));
   }
diff --git a/src/v8.gyp b/src/v8.gyp
index 020ec09..144f482 100644
--- a/src/v8.gyp
+++ b/src/v8.gyp
@@ -397,6 +397,7 @@
         '../include/v8-profiler.h',
         '../include/v8-testing.h',
         '../include/v8-util.h',
+        '../include/v8-version-string.h',
         '../include/v8-version.h',
         '../include/v8.h',
         '../include/v8config.h',
@@ -431,12 +432,13 @@
         'asmjs/switch-logic.cc',
         'assembler.cc',
         'assembler.h',
+        'assembler-inl.h',
         'assert-scope.h',
         'assert-scope.cc',
         'ast/ast-expression-rewriter.cc',
         'ast/ast-expression-rewriter.h',
-        'ast/ast-literal-reindexer.cc',
-        'ast/ast-literal-reindexer.h',
+        'ast/ast-function-literal-id-reindexer.cc',
+        'ast/ast-function-literal-id-reindexer.h',
         'ast/ast-numbering.cc',
         'ast/ast-numbering.h',
         'ast/ast-traversal-visitor.h',
@@ -455,7 +457,6 @@
         'ast/modules.h',
         'ast/prettyprinter.cc',
         'ast/prettyprinter.h',
-        'ast/scopeinfo.cc',
         'ast/scopes.cc',
         'ast/scopes.h',
         'ast/variables.cc',
@@ -475,12 +476,20 @@
         'bootstrapper.cc',
         'bootstrapper.h',
         'builtins/builtins-api.cc',
+        'builtins/builtins-arguments.cc',
+        'builtins/builtins-arguments.h',
         'builtins/builtins-arraybuffer.cc',
         'builtins/builtins-array.cc',
+        'builtins/builtins-async-iterator.cc',
+        'builtins/builtins-async-function.cc',
+        'builtins/builtins-async.cc',
+        'builtins/builtins-async.h',
         'builtins/builtins-boolean.cc',
         'builtins/builtins-call.cc',
         'builtins/builtins-callsite.cc',
         'builtins/builtins-conversion.cc',
+        'builtins/builtins-constructor.cc',
+        'builtins/builtins-constructor.h',
         'builtins/builtins-dataview.cc',
         'builtins/builtins-date.cc',
         'builtins/builtins-debug.cc',
@@ -489,22 +498,26 @@
         'builtins/builtins-generator.cc',
         'builtins/builtins-global.cc',
         'builtins/builtins-handler.cc',
+        'builtins/builtins-ic.cc',
         'builtins/builtins-internal.cc',
         'builtins/builtins-interpreter.cc',
-        'builtins/builtins-iterator.cc',
         'builtins/builtins-json.cc',
         'builtins/builtins-math.cc',
         'builtins/builtins-number.cc',
         'builtins/builtins-object.cc',
+        'builtins/builtins-object.h',
         'builtins/builtins-promise.cc',
+        'builtins/builtins-promise.h',
         'builtins/builtins-proxy.cc',
         'builtins/builtins-reflect.cc',
         'builtins/builtins-regexp.cc',
+        'builtins/builtins-regexp.h',
         'builtins/builtins-sharedarraybuffer.cc',
         'builtins/builtins-string.cc',
         'builtins/builtins-symbol.cc',
         'builtins/builtins-typedarray.cc',
         'builtins/builtins-utils.h',
+        'builtins/builtins-wasm.cc',
         'builtins/builtins.cc',
         'builtins/builtins.h',
         'cached-powers.cc',
@@ -548,12 +561,12 @@
         'compiler/basic-block-instrumentor.h',
         'compiler/branch-elimination.cc',
         'compiler/branch-elimination.h',
-        'compiler/bytecode-branch-analysis.cc',
-        'compiler/bytecode-branch-analysis.h',
+        'compiler/bytecode-analysis.cc',
+        'compiler/bytecode-analysis.h',
         'compiler/bytecode-graph-builder.cc',
         'compiler/bytecode-graph-builder.h',
-        'compiler/bytecode-loop-analysis.cc',
-        'compiler/bytecode-loop-analysis.h',
+        'compiler/bytecode-liveness-map.cc',
+        'compiler/bytecode-liveness-map.h',
         'compiler/c-linkage.cc',
         'compiler/checkpoint-elimination.cc',
         'compiler/checkpoint-elimination.h',
@@ -591,6 +604,8 @@
         'compiler/frame-states.h',
         'compiler/gap-resolver.cc',
         'compiler/gap-resolver.h',
+        'compiler/graph-assembler.cc',
+        'compiler/graph-assembler.h',
         'compiler/graph-reducer.cc',
         'compiler/graph-reducer.h',
         'compiler/graph-replay.cc',
@@ -623,8 +638,6 @@
         'compiler/js-frame-specialization.h',
         'compiler/js-generic-lowering.cc',
         'compiler/js-generic-lowering.h',
-        'compiler/js-global-object-specialization.cc',
-        'compiler/js-global-object-specialization.h',
         'compiler/js-graph.cc',
         'compiler/js-graph.h',
         'compiler/js-inlining.cc',
@@ -637,6 +650,8 @@
         'compiler/js-native-context-specialization.h',
         'compiler/js-operator.cc',
         'compiler/js-operator.h',
+        'compiler/js-type-hint-lowering.cc',
+        'compiler/js-type-hint-lowering.h',
         'compiler/js-typed-lowering.cc',
         'compiler/js-typed-lowering.h',
         'compiler/jump-threading.cc',
@@ -726,8 +741,6 @@
         'compiler/types.h',
         'compiler/type-cache.cc',
         'compiler/type-cache.h',
-        'compiler/type-hint-analyzer.cc',
-        'compiler/type-hint-analyzer.h',
         'compiler/typed-optimization.cc',
         'compiler/typed-optimization.h',
         'compiler/typer.cc',
@@ -742,6 +755,8 @@
         'compiler/wasm-linkage.cc',
         'compiler/zone-stats.cc',
         'compiler/zone-stats.h',
+        'compiler-dispatcher/compiler-dispatcher.cc',
+        'compiler-dispatcher/compiler-dispatcher.h',
         'compiler-dispatcher/compiler-dispatcher-job.cc',
         'compiler-dispatcher/compiler-dispatcher-job.h',
         'compiler-dispatcher/compiler-dispatcher-tracer.cc',
@@ -827,6 +842,8 @@
         'dateparser-inl.h',
         'dateparser.cc',
         'dateparser.h',
+        'debug/debug-coverage.cc',
+        'debug/debug-coverage.h',
         'debug/debug-evaluate.cc',
         'debug/debug-evaluate.h',
         'debug/debug-interface.h',
@@ -836,6 +853,7 @@
         'debug/debug-scopes.h',
         'debug/debug.cc',
         'debug/debug.h',
+        'debug/interface-types.h',
         'debug/liveedit.cc',
         'debug/liveedit.h',
         'deoptimize-reason.cc',
@@ -879,10 +897,16 @@
         'fast-accessor-assembler.h',
         'fast-dtoa.cc',
         'fast-dtoa.h',
+        'feedback-vector-inl.h',
+        'feedback-vector.cc',
+        'feedback-vector.h',
+        'ffi/ffi-compiler.cc',
+        'ffi/ffi-compiler.h',
         'field-index.h',
         'field-index-inl.h',
         'field-type.cc',
         'field-type.h',
+        'find-and-replace-pattern.h',
         'fixed-dtoa.cc',
         'fixed-dtoa.h',
         'flag-definitions.h',
@@ -909,6 +933,8 @@
         'heap/array-buffer-tracker.h',
         'heap/code-stats.cc',
         'heap/code-stats.h',
+        'heap/embedder-tracing.cc',
+        'heap/embedder-tracing.h',
         'heap/memory-reducer.cc',
         'heap/memory-reducer.h',
         'heap/gc-idle-time-handler.cc',
@@ -952,6 +978,8 @@
         'ic/access-compiler-data.h',
         'ic/access-compiler.cc',
         'ic/access-compiler.h',
+        'ic/accessor-assembler.cc',
+        'ic/accessor-assembler.h',
         'ic/call-optimization.cc',
         'ic/call-optimization.h',
         'ic/handler-compiler.cc',
@@ -961,10 +989,10 @@
         'ic/ic-inl.h',
         'ic/ic-state.cc',
         'ic/ic-state.h',
+        'ic/ic-stats.cc',
+        'ic/ic-stats.h',
         'ic/ic.cc',
         'ic/ic.h',
-        'ic/ic-compiler.cc',
-        'ic/ic-compiler.h',
         'ic/keyed-store-generic.cc',
         'ic/keyed-store-generic.h',
         'identity-map.cc',
@@ -973,10 +1001,14 @@
         'interface-descriptors.h',
         'interpreter/bytecodes.cc',
         'interpreter/bytecodes.h',
+        'interpreter/bytecode-array-accessor.cc',
+        'interpreter/bytecode-array-accessor.h',
         'interpreter/bytecode-array-builder.cc',
         'interpreter/bytecode-array-builder.h',
         'interpreter/bytecode-array-iterator.cc',
         'interpreter/bytecode-array-iterator.h',
+        'interpreter/bytecode-array-random-iterator.cc',
+        'interpreter/bytecode-array-random-iterator.h',
         'interpreter/bytecode-array-writer.cc',
         'interpreter/bytecode-array-writer.h',
         'interpreter/bytecode-dead-code-optimizer.cc',
@@ -1023,6 +1055,7 @@
         'json-stringifier.h',
         'keys.h',
         'keys.cc',
+        'label.h',
         'layout-descriptor-inl.h',
         'layout-descriptor.cc',
         'layout-descriptor.h',
@@ -1040,9 +1073,12 @@
         'lookup-cache.h',
         'lookup.cc',
         'lookup.h',
+        'map-updater.cc',
+        'map-updater.h',
         'macro-assembler.h',
         'machine-type.cc',
         'machine-type.h',
+        'managed.h',
         'messages.cc',
         'messages.h',
         'msan.h',
@@ -1053,6 +1089,14 @@
         'objects-printer.cc',
         'objects.cc',
         'objects.h',
+        'objects/literal-objects.cc',
+        'objects/literal-objects.h',
+        'objects/module-info.h',
+        'objects/object-macros.h',
+        'objects/object-macros-undef.h',
+        'objects/regexp-match-info.h',
+        'objects/scope-info.cc',
+        'objects/scope-info.h',
         'ostreams.cc',
         'ostreams.h',
         'parsing/duplicate-finder.cc',
@@ -1067,10 +1111,14 @@
         'parsing/parser-base.h',
         'parsing/parser.cc',
         'parsing/parser.h',
+        'parsing/parsing.cc',
+        'parsing/parsing.h',
         'parsing/pattern-rewriter.cc',
         'parsing/preparse-data-format.h',
         'parsing/preparse-data.cc',
         'parsing/preparse-data.h',
+        'parsing/preparsed-scope-data.cc',
+        'parsing/preparsed-scope-data.h',
         'parsing/preparser.cc',
         'parsing/preparser.h',
         'parsing/rewriter.cc',
@@ -1112,8 +1160,6 @@
         'profiler/tracing-cpu-profiler.h',
         'profiler/unbound-queue-inl.h',
         'profiler/unbound-queue.h',
-        'promise-utils.h',
-        'promise-utils.cc',
         'property-descriptor.cc',
         'property-descriptor.h',
         'property-details.h',
@@ -1171,7 +1217,6 @@
         'runtime/runtime-proxy.cc',
         'runtime/runtime-regexp.cc',
         'runtime/runtime-scopes.cc',
-        'runtime/runtime-simd.cc',
         'runtime/runtime-strings.cc',
         'runtime/runtime-symbol.cc',
         'runtime/runtime-test.cc',
@@ -1213,6 +1258,8 @@
         'startup-data-util.h',
         'string-builder.cc',
         'string-builder.h',
+        'string-case.cc',
+        'string-case.h',
         'string-search.h',
         'string-stream.cc',
         'string-stream.h',
@@ -1229,9 +1276,7 @@
         'transitions-inl.h',
         'transitions.cc',
         'transitions.h',
-        'type-feedback-vector-inl.h',
-        'type-feedback-vector.cc',
-        'type-feedback-vector.h',
+        'trap-handler/trap-handler.h',
         'type-hints.cc',
         'type-hints.h',
         'type-info.cc',
@@ -1260,20 +1305,23 @@
         'version.h',
         'vm-state-inl.h',
         'vm-state.h',
-        'wasm/ast-decoder.cc',
-        'wasm/ast-decoder.h',
         'wasm/decoder.h',
+        'wasm/function-body-decoder.cc',
+        'wasm/function-body-decoder.h',
+        'wasm/function-body-decoder-impl.h',
         'wasm/leb-helper.h',
-        'wasm/managed.h',
         'wasm/module-decoder.cc',
         'wasm/module-decoder.h',
         'wasm/signature-map.cc',
         'wasm/signature-map.h',
+        'wasm/wasm-code-specialization.h',
+        'wasm/wasm-code-specialization.cc',
         'wasm/wasm-debug.cc',
         'wasm/wasm-external-refs.cc',
         'wasm/wasm-external-refs.h',
         'wasm/wasm-js.cc',
         'wasm/wasm-js.h',
+        'wasm/wasm-limits.h',
         'wasm/wasm-macro-gen.h',
         'wasm/wasm-module.cc',
         'wasm/wasm-module.h',
@@ -1287,6 +1335,8 @@
         'wasm/wasm-opcodes.h',
         'wasm/wasm-result.cc',
         'wasm/wasm-result.h',
+        'wasm/wasm-text.cc',
+        'wasm/wasm-text.h',
         'zone/accounting-allocator.cc',
         'zone/accounting-allocator.h',
         'zone/zone-segment.cc',
@@ -1298,6 +1348,7 @@
         'zone/zone-segment.h',
         'zone/zone-allocator.h',
         'zone/zone-containers.h',
+        'zone/zone-handle-set.h',
       ],
       'conditions': [
         ['want_separate_host_toolset==1', {
@@ -1351,8 +1402,6 @@
             'ic/arm/access-compiler-arm.cc',
             'ic/arm/handler-compiler-arm.cc',
             'ic/arm/ic-arm.cc',
-            'ic/arm/ic-compiler-arm.cc',
-            'ic/arm/stub-cache-arm.cc',
             'regexp/arm/regexp-macro-assembler-arm.cc',
             'regexp/arm/regexp-macro-assembler-arm.h',
           ],
@@ -1411,8 +1460,6 @@
             'ic/arm64/access-compiler-arm64.cc',
             'ic/arm64/handler-compiler-arm64.cc',
             'ic/arm64/ic-arm64.cc',
-            'ic/arm64/ic-compiler-arm64.cc',
-            'ic/arm64/stub-cache-arm64.cc',
             'regexp/arm64/regexp-macro-assembler-arm64.cc',
             'regexp/arm64/regexp-macro-assembler-arm64.h',
           ],
@@ -1452,8 +1499,6 @@
             'ic/ia32/access-compiler-ia32.cc',
             'ic/ia32/handler-compiler-ia32.cc',
             'ic/ia32/ic-ia32.cc',
-            'ic/ia32/ic-compiler-ia32.cc',
-            'ic/ia32/stub-cache-ia32.cc',
             'regexp/ia32/regexp-macro-assembler-ia32.cc',
             'regexp/ia32/regexp-macro-assembler-ia32.h',
           ],
@@ -1493,8 +1538,6 @@
             'ic/x87/access-compiler-x87.cc',
             'ic/x87/handler-compiler-x87.cc',
             'ic/x87/ic-x87.cc',
-            'ic/x87/ic-compiler-x87.cc',
-            'ic/x87/stub-cache-x87.cc',
             'regexp/x87/regexp-macro-assembler-x87.cc',
             'regexp/x87/regexp-macro-assembler-x87.h',
           ],
@@ -1536,8 +1579,6 @@
             'ic/mips/access-compiler-mips.cc',
             'ic/mips/handler-compiler-mips.cc',
             'ic/mips/ic-mips.cc',
-            'ic/mips/ic-compiler-mips.cc',
-            'ic/mips/stub-cache-mips.cc',
             'regexp/mips/regexp-macro-assembler-mips.cc',
             'regexp/mips/regexp-macro-assembler-mips.h',
           ],
@@ -1579,8 +1620,6 @@
             'ic/mips64/access-compiler-mips64.cc',
             'ic/mips64/handler-compiler-mips64.cc',
             'ic/mips64/ic-mips64.cc',
-            'ic/mips64/ic-compiler-mips64.cc',
-            'ic/mips64/stub-cache-mips64.cc',
             'regexp/mips64/regexp-macro-assembler-mips64.cc',
             'regexp/mips64/regexp-macro-assembler-mips64.h',
           ],
@@ -1624,8 +1663,6 @@
             'ic/x64/access-compiler-x64.cc',
             'ic/x64/handler-compiler-x64.cc',
             'ic/x64/ic-x64.cc',
-            'ic/x64/ic-compiler-x64.cc',
-            'ic/x64/stub-cache-x64.cc',
             'regexp/x64/regexp-macro-assembler-x64.cc',
             'regexp/x64/regexp-macro-assembler-x64.h',
             'third_party/valgrind/valgrind.h',
@@ -1649,8 +1686,6 @@
             'ic/ppc/access-compiler-ppc.cc',
             'ic/ppc/handler-compiler-ppc.cc',
             'ic/ppc/ic-ppc.cc',
-            'ic/ppc/ic-compiler-ppc.cc',
-            'ic/ppc/stub-cache-ppc.cc',
             'ppc/assembler-ppc-inl.h',
             'ppc/assembler-ppc.cc',
             'ppc/assembler-ppc.h',
@@ -1691,9 +1726,7 @@
             'full-codegen/s390/full-codegen-s390.cc',
             'ic/s390/access-compiler-s390.cc',
             'ic/s390/handler-compiler-s390.cc',
-            'ic/s390/ic-compiler-s390.cc',
             'ic/s390/ic-s390.cc',
-            'ic/s390/stub-cache-s390.cc',
             'regexp/s390/regexp-macro-assembler-s390.cc',
             'regexp/s390/regexp-macro-assembler-s390.h',
             's390/assembler-s390.cc',
@@ -2231,7 +2264,6 @@
           'js/prologue.js',
           'js/runtime.js',
           'js/v8natives.js',
-          'js/symbol.js',
           'js/array.js',
           'js/string.js',
           'js/arraybuffer.js',
@@ -2244,7 +2276,7 @@
           'js/templates.js',
           'js/spread.js',
           'js/proxy.js',
-          'js/async-await.js',
+          'js/harmony-string-padding.js',
           'debug/mirrors.js',
           'debug/debug.js',
           'debug/liveedit.js',
@@ -2253,8 +2285,6 @@
           'js/macros.py',
           'messages.h',
           'js/harmony-atomics.js',
-          'js/harmony-simd.js',
-          'js/harmony-string-padding.js',
         ],
         'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
         'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
@@ -2263,10 +2293,6 @@
         'conditions': [
           ['v8_enable_i18n_support==1', {
             'library_files': ['js/i18n.js'],
-            'experimental_library_files': [
-              'js/datetime-format-to-parts.js',
-              'js/icu-case-mapping.js',
-             ],
           }],
         ],
       },
diff --git a/src/value-serializer.cc b/src/value-serializer.cc
index c6abb8a..7b17275 100644
--- a/src/value-serializer.cc
+++ b/src/value-serializer.cc
@@ -23,7 +23,14 @@
 namespace v8 {
 namespace internal {
 
-static const uint32_t kLatestVersion = 9;
+// Version 9: (imported from Blink)
+// Version 10: one-byte (Latin-1) strings
+// Version 11: properly separate undefined from the hole in arrays
+// Version 12: regexp and string objects share normal string encoding
+// Version 13: host objects have an explicit tag (rather than handling all
+//             unknown tags)
+static const uint32_t kLatestVersion = 13;
+
 static const int kPretenureThreshold = 100 * KB;
 
 template <typename T>
@@ -46,6 +53,7 @@
   // refTableSize:uint32_t (previously used for sanity checks; safe to ignore)
   kVerifyObjectCount = '?',
   // Oddballs (no data).
+  kTheHole = '-',
   kUndefined = '_',
   kNull = '0',
   kTrue = 'T',
@@ -61,6 +69,7 @@
   kDouble = 'N',
   // byteLength:uint32_t, then raw data
   kUtf8String = 'S',
+  kOneByteString = '"',
   kTwoByteString = 'c',
   // Reference to a serialized object. objectID:uint32_t
   kObjectReference = '^',
@@ -110,13 +119,16 @@
   // ObjectReference to one) serialized just before it. This is a quirk arising
   // from the previous stack-based implementation.
   kArrayBufferView = 'V',
-  // Shared array buffer (transferred). transferID:uint32_t
-  kSharedArrayBufferTransfer = 'u',
+  // Shared array buffer. transferID:uint32_t
+  kSharedArrayBuffer = 'u',
   // Compiled WebAssembly module. encodingType:(one-byte tag).
   // If encodingType == 'y' (raw bytes):
   //  wasmWireByteLength:uint32_t, then raw data
   //  compiledDataLength:uint32_t, then raw data
   kWasmModule = 'W',
+  // The delegate is responsible for processing all following data.
+  // This "escapes" to whatever wire format the delegate chooses.
+  kHostObject = '\\',
 };
 
 namespace {
@@ -145,8 +157,9 @@
     : isolate_(isolate),
       delegate_(delegate),
       zone_(isolate->allocator(), ZONE_NAME),
-      id_map_(isolate->heap(), &zone_),
-      array_buffer_transfer_map_(isolate->heap(), &zone_) {}
+      id_map_(isolate->heap(), ZoneAllocationPolicy(&zone_)),
+      array_buffer_transfer_map_(isolate->heap(),
+                                 ZoneAllocationPolicy(&zone_)) {}
 
 ValueSerializer::~ValueSerializer() {
   if (buffer_) {
@@ -163,6 +176,10 @@
   WriteVarint(kLatestVersion);
 }
 
+void ValueSerializer::SetTreatArrayBufferViewsAsHostObjects(bool mode) {
+  treat_array_buffer_views_as_host_objects_ = mode;
+}
+
 void ValueSerializer::WriteTag(SerializationTag tag) {
   uint8_t raw_tag = static_cast<uint8_t>(tag);
   WriteRawBytes(&raw_tag, sizeof(raw_tag));
@@ -217,18 +234,26 @@
 }
 
 void ValueSerializer::WriteRawBytes(const void* source, size_t length) {
-  memcpy(ReserveRawBytes(length), source, length);
+  uint8_t* dest;
+  if (ReserveRawBytes(length).To(&dest)) {
+    memcpy(dest, source, length);
+  }
 }
 
-uint8_t* ValueSerializer::ReserveRawBytes(size_t bytes) {
+Maybe<uint8_t*> ValueSerializer::ReserveRawBytes(size_t bytes) {
   size_t old_size = buffer_size_;
   size_t new_size = old_size + bytes;
-  if (new_size > buffer_capacity_) ExpandBuffer(new_size);
+  if (V8_UNLIKELY(new_size > buffer_capacity_)) {
+    bool ok;
+    if (!ExpandBuffer(new_size).To(&ok)) {
+      return Nothing<uint8_t*>();
+    }
+  }
   buffer_size_ = new_size;
-  return &buffer_[old_size];
+  return Just(&buffer_[old_size]);
 }
 
-void ValueSerializer::ExpandBuffer(size_t required_capacity) {
+Maybe<bool> ValueSerializer::ExpandBuffer(size_t required_capacity) {
   DCHECK_GT(required_capacity, buffer_capacity_);
   size_t requested_capacity =
       std::max(required_capacity, buffer_capacity_ * 2) + 64;
@@ -241,9 +266,15 @@
     new_buffer = realloc(buffer_, requested_capacity);
     provided_capacity = requested_capacity;
   }
-  DCHECK_GE(provided_capacity, requested_capacity);
-  buffer_ = reinterpret_cast<uint8_t*>(new_buffer);
-  buffer_capacity_ = provided_capacity;
+  if (new_buffer) {
+    DCHECK(provided_capacity >= requested_capacity);
+    buffer_ = reinterpret_cast<uint8_t*>(new_buffer);
+    buffer_capacity_ = provided_capacity;
+    return Just(true);
+  } else {
+    out_of_memory_ = true;
+    return Nothing<bool>();
+  }
 }
 
 void ValueSerializer::WriteUint32(uint32_t value) {
@@ -269,24 +300,26 @@
 void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
                                           Handle<JSArrayBuffer> array_buffer) {
   DCHECK(!array_buffer_transfer_map_.Find(array_buffer));
+  DCHECK(!array_buffer->is_shared());
   array_buffer_transfer_map_.Set(array_buffer, transfer_id);
 }
 
 Maybe<bool> ValueSerializer::WriteObject(Handle<Object> object) {
+  out_of_memory_ = false;
   if (object->IsSmi()) {
     WriteSmi(Smi::cast(*object));
-    return Just(true);
+    return ThrowIfOutOfMemory();
   }
 
   DCHECK(object->IsHeapObject());
   switch (HeapObject::cast(*object)->map()->instance_type()) {
     case ODDBALL_TYPE:
       WriteOddball(Oddball::cast(*object));
-      return Just(true);
+      return ThrowIfOutOfMemory();
     case HEAP_NUMBER_TYPE:
     case MUTABLE_HEAP_NUMBER_TYPE:
       WriteHeapNumber(HeapNumber::cast(*object));
-      return Just(true);
+      return ThrowIfOutOfMemory();
     case JS_TYPED_ARRAY_TYPE:
     case JS_DATA_VIEW_TYPE: {
       // Despite being JSReceivers, these have their wrapped buffer serialized
@@ -295,7 +328,7 @@
       // TODO(jbroman): It may be possible to avoid materializing a typed
       // array's buffer here.
       Handle<JSArrayBufferView> view = Handle<JSArrayBufferView>::cast(object);
-      if (!id_map_.Find(view)) {
+      if (!id_map_.Find(view) && !treat_array_buffer_views_as_host_objects_) {
         Handle<JSArrayBuffer> buffer(
             view->IsJSTypedArray()
                 ? Handle<JSTypedArray>::cast(view)->GetBuffer()
@@ -307,7 +340,7 @@
     default:
       if (object->IsString()) {
         WriteString(Handle<String>::cast(object));
-        return Just(true);
+        return ThrowIfOutOfMemory();
       } else if (object->IsJSReceiver()) {
         return WriteJSReceiver(Handle<JSReceiver>::cast(object));
       } else {
@@ -356,22 +389,9 @@
   String::FlatContent flat = string->GetFlatContent();
   DCHECK(flat.IsFlat());
   if (flat.IsOneByte()) {
-    // The existing format uses UTF-8, rather than Latin-1. As a result we must
-    // to do work to encode strings that have characters outside ASCII.
-    // TODO(jbroman): In a future format version, consider adding a tag for
-    // Latin-1 strings, so that this can be skipped.
-    WriteTag(SerializationTag::kUtf8String);
     Vector<const uint8_t> chars = flat.ToOneByteVector();
-    if (String::IsAscii(chars.begin(), chars.length())) {
-      WriteOneByteString(chars);
-    } else {
-      v8::Local<v8::String> api_string = Utils::ToLocal(string);
-      uint32_t utf8_length = api_string->Utf8Length();
-      WriteVarint(utf8_length);
-      api_string->WriteUtf8(
-          reinterpret_cast<char*>(ReserveRawBytes(utf8_length)), utf8_length,
-          nullptr, v8::String::NO_NULL_TERMINATION);
-    }
+    WriteTag(SerializationTag::kOneByteString);
+    WriteOneByteString(chars);
   } else if (flat.IsTwoByte()) {
     Vector<const uc16> chars = flat.ToUC16Vector();
     uint32_t byte_length = chars.length() * sizeof(uc16);
@@ -391,7 +411,7 @@
   if (uint32_t id = *id_map_entry) {
     WriteTag(SerializationTag::kObjectReference);
     WriteVarint(id - 1);
-    return Just(true);
+    return ThrowIfOutOfMemory();
   }
 
   // Otherwise, allocate an ID for it.
@@ -400,7 +420,7 @@
 
   // Eliminate callable and exotic objects, which should not be serialized.
   InstanceType instance_type = receiver->map()->instance_type();
-  if (receiver->IsCallable() || (instance_type <= LAST_SPECIAL_RECEIVER_TYPE &&
+  if (receiver->IsCallable() || (IsSpecialReceiverInstanceType(instance_type) &&
                                  instance_type != JS_SPECIAL_API_OBJECT_TYPE)) {
     ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
     return Nothing<bool>();
@@ -417,7 +437,7 @@
     case JS_API_OBJECT_TYPE: {
       Handle<JSObject> js_object = Handle<JSObject>::cast(receiver);
       Map* map = js_object->map();
-      if (FLAG_expose_wasm &&
+      if (!FLAG_wasm_disable_structured_cloning &&
           map->GetConstructor() ==
               isolate_->native_context()->wasm_module_constructor()) {
         return WriteWasmModule(js_object);
@@ -431,18 +451,18 @@
       return WriteHostObject(Handle<JSObject>::cast(receiver));
     case JS_DATE_TYPE:
       WriteJSDate(JSDate::cast(*receiver));
-      return Just(true);
+      return ThrowIfOutOfMemory();
     case JS_VALUE_TYPE:
       return WriteJSValue(Handle<JSValue>::cast(receiver));
     case JS_REGEXP_TYPE:
       WriteJSRegExp(JSRegExp::cast(*receiver));
-      return Just(true);
+      return ThrowIfOutOfMemory();
     case JS_MAP_TYPE:
       return WriteJSMap(Handle<JSMap>::cast(receiver));
     case JS_SET_TYPE:
       return WriteJSSet(Handle<JSSet>::cast(receiver));
     case JS_ARRAY_BUFFER_TYPE:
-      return WriteJSArrayBuffer(JSArrayBuffer::cast(*receiver));
+      return WriteJSArrayBuffer(Handle<JSArrayBuffer>::cast(receiver));
     case JS_TYPED_ARRAY_TYPE:
     case JS_DATA_VIEW_TYPE:
       return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
@@ -474,7 +494,8 @@
 
     Handle<Object> value;
     if (V8_LIKELY(!map_changed)) map_changed = *map == object->map();
-    if (V8_LIKELY(!map_changed && details.type() == DATA)) {
+    if (V8_LIKELY(!map_changed && details.location() == kField)) {
+      DCHECK_EQ(kData, details.kind());
       FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
       value = JSObject::FastPropertyAt(object, details.representation(),
                                        field_index);
@@ -496,7 +517,7 @@
 
   WriteTag(SerializationTag::kEndJSObject);
   WriteVarint<uint32_t>(properties_written);
-  return Just(true);
+  return ThrowIfOutOfMemory();
 }
 
 Maybe<bool> ValueSerializer::WriteJSObjectSlow(Handle<JSObject> object) {
@@ -511,7 +532,7 @@
   }
   WriteTag(SerializationTag::kEndJSObject);
   WriteVarint<uint32_t>(properties_written);
-  return Just(true);
+  return ThrowIfOutOfMemory();
 }
 
 Maybe<bool> ValueSerializer::WriteJSArray(Handle<JSArray> array) {
@@ -530,10 +551,6 @@
 
   if (should_serialize_densely) {
     DCHECK_LE(length, static_cast<uint32_t>(FixedArray::kMaxLength));
-
-    // TODO(jbroman): Distinguish between undefined and a hole (this can happen
-    // if serializing one of the elements deletes another). This requires wire
-    // format changes.
     WriteTag(SerializationTag::kBeginDenseJSArray);
     WriteVarint<uint32_t>(length);
     uint32_t i = 0;
@@ -548,6 +565,9 @@
         break;
       }
       case FAST_DOUBLE_ELEMENTS: {
+        // Elements are empty_fixed_array, not a FixedDoubleArray, if the array
+        // is empty. No elements to encode in this case anyhow.
+        if (length == 0) break;
         Handle<FixedDoubleArray> elements(
             FixedDoubleArray::cast(array->elements()), isolate_);
         for (; i < length; i++) {
@@ -581,6 +601,13 @@
       // with.
       Handle<Object> element;
       LookupIterator it(isolate_, array, i, array, LookupIterator::OWN);
+      if (!it.IsFound()) {
+        // This can happen in the case where an array that was originally dense
+        // became sparse during serialization. It's too late to switch to the
+        // sparse format, but we can mark the elements as absent.
+        WriteTag(SerializationTag::kTheHole);
+        continue;
+      }
       if (!Object::GetProperty(&it).ToHandle(&element) ||
           !WriteObject(element).FromMaybe(false)) {
         return Nothing<bool>();
@@ -616,7 +643,7 @@
     WriteVarint<uint32_t>(properties_written);
     WriteVarint<uint32_t>(length);
   }
-  return Just(true);
+  return ThrowIfOutOfMemory();
 }
 
 void ValueSerializer::WriteJSDate(JSDate* date) {
@@ -634,32 +661,19 @@
     WriteTag(SerializationTag::kNumberObject);
     WriteDouble(inner_value->Number());
   } else if (inner_value->IsString()) {
-    // TODO(jbroman): Replace UTF-8 encoding with the same options available for
-    // ordinary strings.
     WriteTag(SerializationTag::kStringObject);
-    v8::Local<v8::String> api_string =
-        Utils::ToLocal(handle(String::cast(inner_value), isolate_));
-    uint32_t utf8_length = api_string->Utf8Length();
-    WriteVarint(utf8_length);
-    api_string->WriteUtf8(reinterpret_cast<char*>(ReserveRawBytes(utf8_length)),
-                          utf8_length, nullptr,
-                          v8::String::NO_NULL_TERMINATION);
+    WriteString(handle(String::cast(inner_value), isolate_));
   } else {
     DCHECK(inner_value->IsSymbol());
     ThrowDataCloneError(MessageTemplate::kDataCloneError, value);
     return Nothing<bool>();
   }
-  return Just(true);
+  return ThrowIfOutOfMemory();
 }
 
 void ValueSerializer::WriteJSRegExp(JSRegExp* regexp) {
   WriteTag(SerializationTag::kRegExp);
-  v8::Local<v8::String> api_string =
-      Utils::ToLocal(handle(regexp->Pattern(), isolate_));
-  uint32_t utf8_length = api_string->Utf8Length();
-  WriteVarint(utf8_length);
-  api_string->WriteUtf8(reinterpret_cast<char*>(ReserveRawBytes(utf8_length)),
-                        utf8_length, nullptr, v8::String::NO_NULL_TERMINATION);
+  WriteString(handle(regexp->Pattern(), isolate_));
   WriteVarint(static_cast<uint32_t>(regexp->GetFlags()));
 }
 
@@ -691,7 +705,7 @@
   }
   WriteTag(SerializationTag::kEndJSMap);
   WriteVarint<uint32_t>(length);
-  return Just(true);
+  return ThrowIfOutOfMemory();
 }
 
 Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
@@ -721,23 +735,32 @@
   }
   WriteTag(SerializationTag::kEndJSSet);
   WriteVarint<uint32_t>(length);
-  return Just(true);
+  return ThrowIfOutOfMemory();
 }
 
-Maybe<bool> ValueSerializer::WriteJSArrayBuffer(JSArrayBuffer* array_buffer) {
-  uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
-  if (transfer_entry) {
-    WriteTag(array_buffer->is_shared()
-                 ? SerializationTag::kSharedArrayBufferTransfer
-                 : SerializationTag::kArrayBufferTransfer);
-    WriteVarint(*transfer_entry);
-    return Just(true);
+Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
+    Handle<JSArrayBuffer> array_buffer) {
+  if (array_buffer->is_shared()) {
+    if (!delegate_) {
+      ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
+      return Nothing<bool>();
+    }
+
+    v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+    Maybe<uint32_t> index = delegate_->GetSharedArrayBufferId(
+        v8_isolate, Utils::ToLocalShared(array_buffer));
+    RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
+
+    WriteTag(SerializationTag::kSharedArrayBuffer);
+    WriteVarint(index.FromJust());
+    return ThrowIfOutOfMemory();
   }
 
-  if (array_buffer->is_shared()) {
-    ThrowDataCloneError(
-        MessageTemplate::kDataCloneErrorSharedArrayBufferNotTransferred);
-    return Nothing<bool>();
+  uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
+  if (transfer_entry) {
+    WriteTag(SerializationTag::kArrayBufferTransfer);
+    WriteVarint(*transfer_entry);
+    return ThrowIfOutOfMemory();
   }
   if (array_buffer->was_neutered()) {
     ThrowDataCloneError(MessageTemplate::kDataCloneErrorNeuteredArrayBuffer);
@@ -745,16 +768,19 @@
   }
   double byte_length = array_buffer->byte_length()->Number();
   if (byte_length > std::numeric_limits<uint32_t>::max()) {
-    ThrowDataCloneError(MessageTemplate::kDataCloneError, handle(array_buffer));
+    ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
     return Nothing<bool>();
   }
   WriteTag(SerializationTag::kArrayBuffer);
   WriteVarint<uint32_t>(byte_length);
   WriteRawBytes(array_buffer->backing_store(), byte_length);
-  return Just(true);
+  return ThrowIfOutOfMemory();
 }
 
 Maybe<bool> ValueSerializer::WriteJSArrayBufferView(JSArrayBufferView* view) {
+  if (treat_array_buffer_views_as_host_objects_) {
+    return WriteHostObject(handle(view, isolate_));
+  }
   WriteTag(SerializationTag::kArrayBufferView);
   ArrayBufferViewTag tag = ArrayBufferViewTag::kInt8Array;
   if (view->IsJSTypedArray()) {
@@ -773,7 +799,7 @@
   WriteVarint(static_cast<uint8_t>(tag));
   WriteVarint(NumberToUint32(view->byte_offset()));
   WriteVarint(NumberToUint32(view->byte_length()));
-  return Just(true);
+  return ThrowIfOutOfMemory();
 }
 
 Maybe<bool> ValueSerializer::WriteWasmModule(Handle<JSObject> object) {
@@ -783,11 +809,13 @@
   WriteTag(SerializationTag::kWasmModule);
   WriteRawBytes(&encoding_tag, sizeof(encoding_tag));
 
-  Handle<String> wire_bytes = compiled_part->module_bytes();
+  Handle<String> wire_bytes(compiled_part->module_bytes(), isolate_);
   int wire_bytes_length = wire_bytes->length();
   WriteVarint<uint32_t>(wire_bytes_length);
-  uint8_t* destination = ReserveRawBytes(wire_bytes_length);
-  String::WriteToFlat(*wire_bytes, destination, 0, wire_bytes_length);
+  uint8_t* destination;
+  if (ReserveRawBytes(wire_bytes_length).To(&destination)) {
+    String::WriteToFlat(*wire_bytes, destination, 0, wire_bytes_length);
+  }
 
   std::unique_ptr<ScriptData> script_data =
       WasmCompiledModuleSerializer::SerializeWasmModule(isolate_,
@@ -796,10 +824,11 @@
   WriteVarint<uint32_t>(script_data_length);
   WriteRawBytes(script_data->data(), script_data_length);
 
-  return Just(true);
+  return ThrowIfOutOfMemory();
 }
 
 Maybe<bool> ValueSerializer::WriteHostObject(Handle<JSObject> object) {
+  WriteTag(SerializationTag::kHostObject);
   if (!delegate_) {
     isolate_->Throw(*isolate_->factory()->NewError(
         isolate_->error_function(), MessageTemplate::kDataCloneError, object));
@@ -847,6 +876,14 @@
                              isolate_->factory()->empty_string());
 }
 
+Maybe<bool> ValueSerializer::ThrowIfOutOfMemory() {
+  if (out_of_memory_) {
+    ThrowDataCloneError(MessageTemplate::kDataCloneErrorOutOfMemory);
+    return Nothing<bool>();
+  }
+  return Just(true);
+}
+
 void ValueSerializer::ThrowDataCloneError(
     MessageTemplate::Template template_index, Handle<Object> arg0) {
   Handle<String> message =
@@ -1006,10 +1043,10 @@
   }
   Handle<SeededNumberDictionary> dictionary =
       array_buffer_transfer_map_.ToHandleChecked();
-  const bool used_as_prototype = false;
+  Handle<JSObject> not_a_prototype_holder;
   Handle<SeededNumberDictionary> new_dictionary =
       SeededNumberDictionary::AtNumberPut(dictionary, transfer_id, array_buffer,
-                                          used_as_prototype);
+                                          not_a_prototype_holder);
   if (!new_dictionary.is_identical_to(dictionary)) {
     GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
     array_buffer_transfer_map_ = Handle<SeededNumberDictionary>::cast(
@@ -1073,6 +1110,8 @@
     }
     case SerializationTag::kUtf8String:
       return ReadUtf8String();
+    case SerializationTag::kOneByteString:
+      return ReadOneByteString();
     case SerializationTag::kTwoByteString:
       return ReadTwoByteString();
     case SerializationTag::kObjectReference: {
@@ -1105,48 +1144,79 @@
       const bool is_shared = false;
       return ReadTransferredJSArrayBuffer(is_shared);
     }
-    case SerializationTag::kSharedArrayBufferTransfer: {
+    case SerializationTag::kSharedArrayBuffer: {
       const bool is_shared = true;
       return ReadTransferredJSArrayBuffer(is_shared);
     }
     case SerializationTag::kWasmModule:
       return ReadWasmModule();
-    default:
-      // TODO(jbroman): Introduce an explicit tag for host objects to avoid
-      // having to treat every unknown tag as a potential host object.
-      position_--;
+    case SerializationTag::kHostObject:
       return ReadHostObject();
+    default:
+      // Before there was an explicit tag for host objects, all unknown tags
+      // were delegated to the host.
+      if (version_ < 13) {
+        position_--;
+        return ReadHostObject();
+      }
+      return MaybeHandle<Object>();
   }
 }
 
+MaybeHandle<String> ValueDeserializer::ReadString() {
+  if (version_ < 12) return ReadUtf8String();
+  Handle<Object> object;
+  if (!ReadObject().ToHandle(&object) || !object->IsString()) {
+    return MaybeHandle<String>();
+  }
+  return Handle<String>::cast(object);
+}
+
 MaybeHandle<String> ValueDeserializer::ReadUtf8String() {
   uint32_t utf8_length;
   Vector<const uint8_t> utf8_bytes;
   if (!ReadVarint<uint32_t>().To(&utf8_length) ||
       utf8_length >
           static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
-      !ReadRawBytes(utf8_length).To(&utf8_bytes))
+      !ReadRawBytes(utf8_length).To(&utf8_bytes)) {
     return MaybeHandle<String>();
+  }
   return isolate_->factory()->NewStringFromUtf8(
       Vector<const char>::cast(utf8_bytes), pretenure_);
 }
 
+MaybeHandle<String> ValueDeserializer::ReadOneByteString() {
+  uint32_t byte_length;
+  Vector<const uint8_t> bytes;
+  if (!ReadVarint<uint32_t>().To(&byte_length) ||
+      byte_length >
+          static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
+      !ReadRawBytes(byte_length).To(&bytes)) {
+    return MaybeHandle<String>();
+  }
+  return isolate_->factory()->NewStringFromOneByte(bytes, pretenure_);
+}
+
 MaybeHandle<String> ValueDeserializer::ReadTwoByteString() {
   uint32_t byte_length;
   Vector<const uint8_t> bytes;
   if (!ReadVarint<uint32_t>().To(&byte_length) ||
       byte_length >
           static_cast<uint32_t>(std::numeric_limits<int32_t>::max()) ||
-      byte_length % sizeof(uc16) != 0 || !ReadRawBytes(byte_length).To(&bytes))
+      byte_length % sizeof(uc16) != 0 ||
+      !ReadRawBytes(byte_length).To(&bytes)) {
     return MaybeHandle<String>();
+  }
 
   // Allocate an uninitialized string so that we can do a raw memcpy into the
   // string on the heap (regardless of alignment).
+  if (byte_length == 0) return isolate_->factory()->empty_string();
   Handle<SeqTwoByteString> string;
   if (!isolate_->factory()
            ->NewRawTwoByteString(byte_length / sizeof(uc16), pretenure_)
-           .ToHandle(&string))
+           .ToHandle(&string)) {
     return MaybeHandle<String>();
+  }
 
   // Copy the bytes directly into the new string.
   // Warning: this uses host endianness.
@@ -1269,10 +1339,20 @@
 
   Handle<FixedArray> elements(FixedArray::cast(array->elements()), isolate_);
   for (uint32_t i = 0; i < length; i++) {
+    SerializationTag tag;
+    if (PeekTag().To(&tag) && tag == SerializationTag::kTheHole) {
+      ConsumeTag(SerializationTag::kTheHole);
+      continue;
+    }
+
     Handle<Object> element;
     if (!ReadObject().ToHandle(&element)) return MaybeHandle<JSArray>();
-    // TODO(jbroman): Distinguish between undefined and a hole.
-    if (element->IsUndefined(isolate_)) continue;
+
+    // Serialization versions less than 11 encode the hole the same as
+    // undefined. For consistency with previous behavior, store these as the
+    // hole. Past version 11, undefined means undefined.
+    if (version_ < 11 && element->IsUndefined(isolate_)) continue;
+
     elements->set(i, *element);
   }
 
@@ -1330,7 +1410,7 @@
     }
     case SerializationTag::kStringObject: {
       Handle<String> string;
-      if (!ReadUtf8String().ToHandle(&string)) return MaybeHandle<JSValue>();
+      if (!ReadString().ToHandle(&string)) return MaybeHandle<JSValue>();
       value = Handle<JSValue>::cast(isolate_->factory()->NewJSObject(
           isolate_->string_function(), pretenure_));
       value->set_value(*string);
@@ -1349,7 +1429,7 @@
   Handle<String> pattern;
   uint32_t raw_flags;
   Handle<JSRegExp> regexp;
-  if (!ReadUtf8String().ToHandle(&pattern) ||
+  if (!ReadString().ToHandle(&pattern) ||
       !ReadVarint<uint32_t>().To(&raw_flags) ||
       !JSRegExp::New(pattern, static_cast<JSRegExp::Flags>(raw_flags))
            .ToHandle(&regexp)) {
@@ -1443,8 +1523,10 @@
   const bool should_initialize = false;
   Handle<JSArrayBuffer> array_buffer =
       isolate_->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, pretenure_);
-  JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length,
-                                     should_initialize);
+  if (!JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length,
+                                          should_initialize)) {
+    return MaybeHandle<JSArrayBuffer>();
+  }
   memcpy(array_buffer->backing_store(), position_, byte_length);
   position_ += byte_length;
   AddObjectWithID(id, array_buffer);
@@ -1514,7 +1596,7 @@
 }
 
 MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
-  if (!FLAG_expose_wasm) return MaybeHandle<JSObject>();
+  if (FLAG_wasm_disable_structured_cloning) return MaybeHandle<JSObject>();
 
   Vector<const uint8_t> encoding_tag;
   if (!ReadRawBytes(sizeof(WasmEncodingTag)).To(&encoding_tag) ||
@@ -1551,11 +1633,14 @@
   }
 
   // If that fails, recompile.
-  wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
-  return wasm::CreateModuleObjectFromBytes(
-      isolate_, wire_bytes.begin(), wire_bytes.end(), &thrower,
-      wasm::ModuleOrigin::kWasmOrigin, Handle<Script>::null(), nullptr,
-      nullptr);
+  MaybeHandle<JSObject> result;
+  {
+    wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
+    result = wasm::SyncCompile(isolate_, &thrower,
+                               wasm::ModuleWireBytes(wire_bytes));
+  }
+  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate_, JSObject);
+  return result;
 }
 
 MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
@@ -1584,10 +1669,15 @@
   DisallowHeapAllocation no_gc;
   DescriptorArray* descriptors = object->map()->instance_descriptors();
   for (unsigned i = 0; i < properties.size(); i++) {
+    // Initializing store.
     object->WriteToField(i, descriptors->GetDetails(i), *properties[i]);
   }
 }
 
+static bool IsValidObjectKey(Handle<Object> value) {
+  return value->IsName() || value->IsNumber();
+}
+
 Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
     Handle<JSObject> object, SerializationTag end_tag,
     bool can_use_transitions) {
@@ -1623,7 +1713,9 @@
         key = expected_key;
         target = TransitionArray::ExpectedTransitionTarget(map);
       } else {
-        if (!ReadObject().ToHandle(&key)) return Nothing<uint32_t>();
+        if (!ReadObject().ToHandle(&key) || !IsValidObjectKey(key)) {
+          return Nothing<uint32_t>();
+        }
         if (key->IsString()) {
           key =
               isolate_->factory()->InternalizeString(Handle<String>::cast(key));
@@ -1654,8 +1746,8 @@
                    ->NowContains(value)) {
             Handle<FieldType> value_type =
                 value->OptimalType(isolate_, expected_representation);
-            Map::GeneralizeFieldType(target, descriptor,
-                                     expected_representation, value_type);
+            Map::GeneralizeField(target, descriptor, details.constness(),
+                                 expected_representation, value_type);
           }
           DCHECK(target->instance_descriptors()
                      ->GetFieldType(descriptor)
@@ -1703,7 +1795,9 @@
     }
 
     Handle<Object> key;
-    if (!ReadObject().ToHandle(&key)) return Nothing<uint32_t>();
+    if (!ReadObject().ToHandle(&key) || !IsValidObjectKey(key)) {
+      return Nothing<uint32_t>();
+    }
     Handle<Object> value;
     if (!ReadObject().ToHandle(&value)) return Nothing<uint32_t>();
 
@@ -1752,6 +1846,7 @@
                                                   uint32_t num_properties) {
   for (unsigned i = 0; i < 2 * num_properties; i += 2) {
     Handle<Object> key = data[i];
+    if (!IsValidObjectKey(key)) return Nothing<bool>();
     Handle<Object> value = data[i + 1];
     bool success;
     LookupIterator it = LookupIterator::PropertyOrElement(
@@ -1765,6 +1860,20 @@
   return Just(true);
 }
 
+namespace {
+
+// Throws a generic "deserialization failed" exception by default, unless a more
+// specific exception has already been thrown.
+void ThrowDeserializationExceptionIfNonePending(Isolate* isolate) {
+  if (!isolate->has_pending_exception()) {
+    isolate->Throw(*isolate->factory()->NewError(
+        MessageTemplate::kDataCloneDeserializationError));
+  }
+  DCHECK(isolate->has_pending_exception());
+}
+
+}  // namespace
+
 MaybeHandle<Object>
 ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
   DCHECK_EQ(version_, 0u);
@@ -1797,7 +1906,7 @@
             !SetPropertiesFromKeyValuePairs(
                  isolate_, js_object, &stack[begin_properties], num_properties)
                  .FromMaybe(false)) {
-          DCHECK(isolate_->has_pending_exception());
+          ThrowDeserializationExceptionIfNonePending(isolate_);
           return MaybeHandle<Object>();
         }
 
@@ -1828,7 +1937,7 @@
             !SetPropertiesFromKeyValuePairs(
                  isolate_, js_array, &stack[begin_properties], num_properties)
                  .FromMaybe(false)) {
-          DCHECK(isolate_->has_pending_exception());
+          ThrowDeserializationExceptionIfNonePending(isolate_);
           return MaybeHandle<Object>();
         }
 
diff --git a/src/value-serializer.h b/src/value-serializer.h
index 86e21cf..7961b2e 100644
--- a/src/value-serializer.h
+++ b/src/value-serializer.h
@@ -84,9 +84,18 @@
   void WriteRawBytes(const void* source, size_t length);
   void WriteDouble(double value);
 
+  /*
+   * Indicate whether to treat ArrayBufferView objects as host objects,
+   * i.e. pass them to Delegate::WriteHostObject. This should not be
+   * called when no Delegate was passed.
+   *
+   * The default is not to treat ArrayBufferViews as host objects.
+   */
+  void SetTreatArrayBufferViewsAsHostObjects(bool mode);
+
  private:
   // Managing allocations of the internal buffer.
-  void ExpandBuffer(size_t required_capacity);
+  Maybe<bool> ExpandBuffer(size_t required_capacity);
 
   // Writing the wire format.
   void WriteTag(SerializationTag tag);
@@ -96,7 +105,7 @@
   void WriteZigZag(T value);
   void WriteOneByteString(Vector<const uint8_t> chars);
   void WriteTwoByteString(Vector<const uc16> chars);
-  uint8_t* ReserveRawBytes(size_t bytes);
+  Maybe<uint8_t*> ReserveRawBytes(size_t bytes);
 
   // Writing V8 objects of various kinds.
   void WriteOddball(Oddball* oddball);
@@ -112,7 +121,8 @@
   void WriteJSRegExp(JSRegExp* regexp);
   Maybe<bool> WriteJSMap(Handle<JSMap> map) WARN_UNUSED_RESULT;
   Maybe<bool> WriteJSSet(Handle<JSSet> map) WARN_UNUSED_RESULT;
-  Maybe<bool> WriteJSArrayBuffer(JSArrayBuffer* array_buffer);
+  Maybe<bool> WriteJSArrayBuffer(Handle<JSArrayBuffer> array_buffer)
+      WARN_UNUSED_RESULT;
   Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
   Maybe<bool> WriteWasmModule(Handle<JSObject> object) WARN_UNUSED_RESULT;
   Maybe<bool> WriteHostObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
@@ -133,21 +143,25 @@
   V8_NOINLINE void ThrowDataCloneError(MessageTemplate::Template template_index,
                                        Handle<Object> arg0);
 
+  Maybe<bool> ThrowIfOutOfMemory();
+
   Isolate* const isolate_;
   v8::ValueSerializer::Delegate* const delegate_;
+  bool treat_array_buffer_views_as_host_objects_ = false;
   uint8_t* buffer_ = nullptr;
   size_t buffer_size_ = 0;
   size_t buffer_capacity_ = 0;
+  bool out_of_memory_ = false;
   Zone zone_;
 
   // To avoid extra lookups in the identity map, ID+1 is actually stored in the
   // map (checking if the used identity is zero is the fast way of checking if
   // the entry is new).
-  IdentityMap<uint32_t> id_map_;
+  IdentityMap<uint32_t, ZoneAllocationPolicy> id_map_;
   uint32_t next_id_ = 0;
 
   // A similar map, for transferred array buffers.
-  IdentityMap<uint32_t> array_buffer_transfer_map_;
+  IdentityMap<uint32_t, ZoneAllocationPolicy> array_buffer_transfer_map_;
 
   DISALLOW_COPY_AND_ASSIGN(ValueSerializer);
 };
@@ -225,9 +239,15 @@
   // "stack machine".
   MaybeHandle<Object> ReadObjectInternal() WARN_UNUSED_RESULT;
 
+  // Reads a string intended to be part of a more complicated object.
+  // Before v12, these are UTF-8 strings. After, they can be any encoding
+  // permissible for a string (with the relevant tag).
+  MaybeHandle<String> ReadString() WARN_UNUSED_RESULT;
+
   // Reading V8 objects of specific kinds.
   // The tag is assumed to have already been read.
   MaybeHandle<String> ReadUtf8String() WARN_UNUSED_RESULT;
+  MaybeHandle<String> ReadOneByteString() WARN_UNUSED_RESULT;
   MaybeHandle<String> ReadTwoByteString() WARN_UNUSED_RESULT;
   MaybeHandle<JSObject> ReadJSObject() WARN_UNUSED_RESULT;
   MaybeHandle<JSArray> ReadSparseJSArray() WARN_UNUSED_RESULT;
diff --git a/src/vector.h b/src/vector.h
index 080f89e..eb58083 100644
--- a/src/vector.h
+++ b/src/vector.h
@@ -33,7 +33,7 @@
 
   // Returns a vector using the same backing storage as this one,
   // spanning from and including 'from', to but not including 'to'.
-  Vector<T> SubVector(int from, int to) {
+  Vector<T> SubVector(int from, int to) const {
     DCHECK(0 <= from);
     SLOW_DCHECK(from < to);
     SLOW_DCHECK(static_cast<unsigned>(to) <= static_cast<unsigned>(length_));
@@ -119,6 +119,9 @@
     return Vector<T>(start_ + offset, length_ - offset);
   }
 
+  // Implicit conversion from Vector<T> to Vector<const T>.
+  inline operator Vector<const T>() { return Vector<const T>::cast(*this); }
+
   // Factory method for creating empty vectors.
   static Vector<T> empty() { return Vector<T>(NULL, 0); }
 
diff --git a/src/version.cc b/src/version.cc
index 7305bf2..3252d55 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -4,6 +4,7 @@
 
 #include "src/version.h"
 
+#include "include/v8-version-string.h"
 #include "include/v8-version.h"
 #include "src/utils.h"
 
@@ -12,25 +13,6 @@
 // number. This define is mainly used by the build system script.
 #define SONAME            ""
 
-#if V8_IS_CANDIDATE_VERSION
-#define CANDIDATE_STRING " (candidate)"
-#else
-#define CANDIDATE_STRING ""
-#endif
-
-#define SX(x) #x
-#define S(x) SX(x)
-
-#if V8_PATCH_LEVEL > 0
-#define VERSION_STRING                                                      \
-  S(V8_MAJOR_VERSION) "." S(V8_MINOR_VERSION) "." S(V8_BUILD_NUMBER) "." S( \
-      V8_PATCH_LEVEL) CANDIDATE_STRING
-#else
-#define VERSION_STRING                                               \
-  S(V8_MAJOR_VERSION) "." S(V8_MINOR_VERSION) "." S(V8_BUILD_NUMBER) \
-      CANDIDATE_STRING
-#endif
-
 namespace v8 {
 namespace internal {
 
@@ -40,7 +22,7 @@
 int Version::patch_ = V8_PATCH_LEVEL;
 bool Version::candidate_ = (V8_IS_CANDIDATE_VERSION != 0);
 const char* Version::soname_ = SONAME;
-const char* Version::version_string_ = VERSION_STRING;
+const char* Version::version_string_ = V8_VERSION_STRING;
 
 // Calculate the V8 version string.
 void Version::GetString(Vector<char> str) {
diff --git a/src/wasm/OWNERS b/src/wasm/OWNERS
index 2822c29..4f54661 100644
--- a/src/wasm/OWNERS
+++ b/src/wasm/OWNERS
@@ -2,6 +2,7 @@
 
 ahaas@chromium.org
 bradnelson@chromium.org
+clemensh@chromium.org
 mtrofin@chromium.org
 rossberg@chromium.org
 titzer@chromium.org
diff --git a/src/wasm/ast-decoder.h b/src/wasm/ast-decoder.h
deleted file mode 100644
index 9ce323e..0000000
--- a/src/wasm/ast-decoder.h
+++ /dev/null
@@ -1,444 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_AST_DECODER_H_
-#define V8_WASM_AST_DECODER_H_
-
-#include "src/base/compiler-specific.h"
-#include "src/globals.h"
-#include "src/signature.h"
-#include "src/wasm/decoder.h"
-#include "src/wasm/wasm-opcodes.h"
-#include "src/wasm/wasm-result.h"
-
-namespace v8 {
-namespace internal {
-
-class BitVector;  // forward declaration
-
-namespace compiler {  // external declarations from compiler.
-class WasmGraphBuilder;
-}
-
-namespace wasm {
-
-const uint32_t kMaxNumWasmLocals = 8000000;
-struct WasmGlobal;
-
-// Helpers for decoding different kinds of operands which follow bytecodes.
-struct LocalIndexOperand {
-  uint32_t index;
-  LocalType type;
-  unsigned length;
-
-  inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
-    index = decoder->checked_read_u32v(pc, 1, &length, "local index");
-    type = kAstStmt;
-  }
-};
-
-struct ImmI8Operand {
-  int8_t value;
-  unsigned length;
-  inline ImmI8Operand(Decoder* decoder, const byte* pc) {
-    value = bit_cast<int8_t>(decoder->checked_read_u8(pc, 1, "immi8"));
-    length = 1;
-  }
-};
-
-struct ImmI32Operand {
-  int32_t value;
-  unsigned length;
-  inline ImmI32Operand(Decoder* decoder, const byte* pc) {
-    value = decoder->checked_read_i32v(pc, 1, &length, "immi32");
-  }
-};
-
-struct ImmI64Operand {
-  int64_t value;
-  unsigned length;
-  inline ImmI64Operand(Decoder* decoder, const byte* pc) {
-    value = decoder->checked_read_i64v(pc, 1, &length, "immi64");
-  }
-};
-
-struct ImmF32Operand {
-  float value;
-  unsigned length;
-  inline ImmF32Operand(Decoder* decoder, const byte* pc) {
-    value = bit_cast<float>(decoder->checked_read_u32(pc, 1, "immf32"));
-    length = 4;
-  }
-};
-
-struct ImmF64Operand {
-  double value;
-  unsigned length;
-  inline ImmF64Operand(Decoder* decoder, const byte* pc) {
-    value = bit_cast<double>(decoder->checked_read_u64(pc, 1, "immf64"));
-    length = 8;
-  }
-};
-
-struct GlobalIndexOperand {
-  uint32_t index;
-  LocalType type;
-  const WasmGlobal* global;
-  unsigned length;
-
-  inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
-    index = decoder->checked_read_u32v(pc, 1, &length, "global index");
-    global = nullptr;
-    type = kAstStmt;
-  }
-};
-
-struct BlockTypeOperand {
-  uint32_t arity;
-  const byte* types;  // pointer to encoded types for the block.
-  unsigned length;
-
-  inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
-    uint8_t val = decoder->checked_read_u8(pc, 1, "block type");
-    LocalType type = kAstStmt;
-    length = 1;
-    arity = 0;
-    types = nullptr;
-    if (decode_local_type(val, &type)) {
-      arity = type == kAstStmt ? 0 : 1;
-      types = pc + 1;
-    } else {
-      // Handle multi-value blocks.
-      if (!FLAG_wasm_mv_prototype) {
-        decoder->error(pc, pc + 1, "invalid block arity > 1");
-        return;
-      }
-      if (val != kMultivalBlock) {
-        decoder->error(pc, pc + 1, "invalid block type");
-        return;
-      }
-      // Decode and check the types vector of the block.
-      unsigned len = 0;
-      uint32_t count = decoder->checked_read_u32v(pc, 2, &len, "block arity");
-      // {count} is encoded as {arity-2}, so that a {0} count here corresponds
-      // to a block with 2 values. This makes invalid/redundant encodings
-      // impossible.
-      arity = count + 2;
-      length = 1 + len + arity;
-      types = pc + 1 + 1 + len;
-
-      for (uint32_t i = 0; i < arity; i++) {
-        uint32_t offset = 1 + 1 + len + i;
-        val = decoder->checked_read_u8(pc, offset, "block type");
-        decode_local_type(val, &type);
-        if (type == kAstStmt) {
-          decoder->error(pc, pc + offset, "invalid block type");
-          return;
-        }
-      }
-    }
-  }
-  // Decode a byte representing a local type. Return {false} if the encoded
-  // byte was invalid or {kMultivalBlock}.
-  bool decode_local_type(uint8_t val, LocalType* result) {
-    switch (static_cast<LocalTypeCode>(val)) {
-      case kLocalVoid:
-        *result = kAstStmt;
-        return true;
-      case kLocalI32:
-        *result = kAstI32;
-        return true;
-      case kLocalI64:
-        *result = kAstI64;
-        return true;
-      case kLocalF32:
-        *result = kAstF32;
-        return true;
-      case kLocalF64:
-        *result = kAstF64;
-        return true;
-      case kLocalS128:
-        *result = kAstS128;
-        return true;
-      default:
-        *result = kAstStmt;
-        return false;
-    }
-  }
-  LocalType read_entry(unsigned index) {
-    DCHECK_LT(index, arity);
-    LocalType result;
-    CHECK(decode_local_type(types[index], &result));
-    return result;
-  }
-};
-
-struct Control;
-struct BreakDepthOperand {
-  uint32_t depth;
-  Control* target;
-  unsigned length;
-  inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
-    depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
-    target = nullptr;
-  }
-};
-
-struct CallIndirectOperand {
-  uint32_t table_index;
-  uint32_t index;
-  FunctionSig* sig;
-  unsigned length;
-  inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
-    unsigned len = 0;
-    index = decoder->checked_read_u32v(pc, 1, &len, "signature index");
-    table_index = decoder->checked_read_u8(pc, 1 + len, "table index");
-    if (table_index != 0) {
-      decoder->error(pc, pc + 1 + len, "expected table index 0, found %u",
-                     table_index);
-    }
-    length = 1 + len;
-    sig = nullptr;
-  }
-};
-
-struct CallFunctionOperand {
-  uint32_t index;
-  FunctionSig* sig;
-  unsigned length;
-  inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
-    unsigned len1 = 0;
-    unsigned len2 = 0;
-    index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
-    length = len1 + len2;
-    sig = nullptr;
-  }
-};
-
-struct MemoryIndexOperand {
-  uint32_t index;
-  unsigned length;
-  inline MemoryIndexOperand(Decoder* decoder, const byte* pc) {
-    index = decoder->checked_read_u8(pc, 1, "memory index");
-    if (index != 0) {
-      decoder->error(pc, pc + 1, "expected memory index 0, found %u", index);
-    }
-    length = 1;
-  }
-};
-
-struct BranchTableOperand {
-  uint32_t table_count;
-  const byte* start;
-  const byte* table;
-  inline BranchTableOperand(Decoder* decoder, const byte* pc) {
-    DCHECK_EQ(kExprBrTable, decoder->checked_read_u8(pc, 0, "opcode"));
-    start = pc + 1;
-    unsigned len1 = 0;
-    table_count = decoder->checked_read_u32v(pc, 1, &len1, "table count");
-    if (table_count > (UINT_MAX / sizeof(uint32_t)) - 1 ||
-        len1 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
-      decoder->error(pc, "branch table size overflow");
-    }
-    table = pc + 1 + len1;
-  }
-  inline uint32_t read_entry(Decoder* decoder, unsigned i) {
-    DCHECK(i <= table_count);
-    return table ? decoder->read_u32(table + i * sizeof(uint32_t)) : 0;
-  }
-};
-
-// A helper to iterate over a branch table.
-class BranchTableIterator {
- public:
-  unsigned cur_index() { return index_; }
-  bool has_next() { return decoder_->ok() && index_ <= table_count_; }
-  uint32_t next() {
-    DCHECK(has_next());
-    index_++;
-    unsigned length = 0;
-    uint32_t result =
-        decoder_->checked_read_u32v(pc_, 0, &length, "branch table entry");
-    pc_ += length;
-    return result;
-  }
-  // length, including the length of the {BranchTableOperand}, but not the
-  // opcode.
-  unsigned length() {
-    while (has_next()) next();
-    return static_cast<unsigned>(pc_ - start_);
-  }
-  const byte* pc() { return pc_; }
-
-  BranchTableIterator(Decoder* decoder, BranchTableOperand& operand)
-      : decoder_(decoder),
-        start_(operand.start),
-        pc_(operand.table),
-        index_(0),
-        table_count_(operand.table_count) {}
-
- private:
-  Decoder* decoder_;
-  const byte* start_;
-  const byte* pc_;
-  uint32_t index_;        // the current index.
-  uint32_t table_count_;  // the count of entries, not including default.
-};
-
-struct MemoryAccessOperand {
-  uint32_t alignment;
-  uint32_t offset;
-  unsigned length;
-  inline MemoryAccessOperand(Decoder* decoder, const byte* pc,
-                             uint32_t max_alignment) {
-    unsigned alignment_length;
-    alignment =
-        decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
-    if (max_alignment < alignment) {
-      decoder->error(pc, pc + 1,
-                     "invalid alignment; expected maximum alignment is %u, "
-                     "actual alignment is %u",
-                     max_alignment, alignment);
-    }
-    unsigned offset_length;
-    offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
-                                        &offset_length, "offset");
-    length = alignment_length + offset_length;
-  }
-};
-
-typedef compiler::WasmGraphBuilder TFBuilder;
-struct ModuleEnv;  // forward declaration of module interface.
-
-// All of the various data structures necessary to decode a function body.
-struct FunctionBody {
-  ModuleEnv* module;  // module environment
-  FunctionSig* sig;   // function signature
-  const byte* base;   // base of the module bytes, for error reporting
-  const byte* start;  // start of the function body
-  const byte* end;    // end of the function body
-};
-
-static inline FunctionBody FunctionBodyForTesting(const byte* start,
-                                                  const byte* end) {
-  return {nullptr, nullptr, start, start, end};
-}
-
-struct DecodeStruct {
-  int unused;
-};
-typedef Result<DecodeStruct*> DecodeResult;
-inline std::ostream& operator<<(std::ostream& os, const DecodeStruct& tree) {
-  return os;
-}
-
-V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
-                                              FunctionBody& body);
-DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
-                          FunctionBody& body);
-bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
-              std::ostream& os,
-              std::vector<std::tuple<uint32_t, int, int>>* offset_table);
-
-// A simplified form of AST printing, e.g. from a debugger.
-void PrintAstForDebugging(const byte* start, const byte* end);
-
-inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
-                                   ModuleEnv* module, FunctionSig* sig,
-                                   const byte* start, const byte* end) {
-  FunctionBody body = {module, sig, nullptr, start, end};
-  return VerifyWasmCode(allocator, body);
-}
-
-inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
-                                 TFBuilder* builder, ModuleEnv* module,
-                                 FunctionSig* sig, const byte* start,
-                                 const byte* end) {
-  FunctionBody body = {module, sig, nullptr, start, end};
-  return BuildTFGraph(allocator, builder, body);
-}
-
-struct AstLocalDecls {
-  // The size of the encoded declarations.
-  uint32_t decls_encoded_size;  // size of encoded declarations
-
-  // Total number of locals.
-  uint32_t total_local_count;
-
-  // List of {local type, count} pairs.
-  ZoneVector<std::pair<LocalType, uint32_t>> local_types;
-
-  // Constructor initializes the vector.
-  explicit AstLocalDecls(Zone* zone)
-      : decls_encoded_size(0), total_local_count(0), local_types(zone) {}
-};
-
-V8_EXPORT_PRIVATE bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
-                                        const byte* end);
-V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone,
-                                                             size_t num_locals,
-                                                             const byte* start,
-                                                             const byte* end);
-
-// Computes the length of the opcode at the given address.
-V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
-
-// A simple forward iterator for bytecodes.
-class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
- public:
-  // If one wants to iterate over the bytecode without looking at {pc_offset()}.
-  class iterator {
-   public:
-    inline iterator& operator++() {
-      DCHECK_LT(ptr_, end_);
-      ptr_ += OpcodeLength(ptr_, end_);
-      return *this;
-    }
-    inline WasmOpcode operator*() {
-      DCHECK_LT(ptr_, end_);
-      return static_cast<WasmOpcode>(*ptr_);
-    }
-    inline bool operator==(const iterator& that) {
-      return this->ptr_ == that.ptr_;
-    }
-    inline bool operator!=(const iterator& that) {
-      return this->ptr_ != that.ptr_;
-    }
-
-   private:
-    friend class BytecodeIterator;
-    const byte* ptr_;
-    const byte* end_;
-    iterator(const byte* ptr, const byte* end) : ptr_(ptr), end_(end) {}
-  };
-
-  // Create a new {BytecodeIterator}. If the {decls} pointer is non-null,
-  // assume the bytecode starts with local declarations and decode them.
-  // Otherwise, do not decode local decls.
-  BytecodeIterator(const byte* start, const byte* end,
-                   AstLocalDecls* decls = nullptr);
-
-  inline iterator begin() const { return iterator(pc_, end_); }
-  inline iterator end() const { return iterator(end_, end_); }
-
-  WasmOpcode current() {
-    return static_cast<WasmOpcode>(
-        checked_read_u8(pc_, 0, "expected bytecode"));
-  }
-
-  void next() {
-    if (pc_ < end_) {
-      pc_ += OpcodeLength(pc_, end_);
-      if (pc_ >= end_) pc_ = end_;
-    }
-  }
-
-  bool has_next() { return pc_ < end_; }
-};
-
-}  // namespace wasm
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_WASM_AST_DECODER_H_
diff --git a/src/wasm/decoder.h b/src/wasm/decoder.h
index fc8f110..bfd1436 100644
--- a/src/wasm/decoder.h
+++ b/src/wasm/decoder.h
@@ -34,7 +34,12 @@
   Decoder(const byte* start, const byte* end)
       : start_(start),
         pc_(start),
-        limit_(end),
+        end_(end),
+        error_pc_(nullptr),
+        error_pt_(nullptr) {}
+  Decoder(const byte* start, const byte* pc, const byte* end)
+      : start_(start),
+        pc_(pc),
         end_(end),
         error_pc_(nullptr),
         error_pt_(nullptr) {}
@@ -44,7 +49,7 @@
   inline bool check(const byte* base, unsigned offset, unsigned length,
                     const char* msg) {
     DCHECK_GE(base, start_);
-    if ((base + offset + length) > limit_) {
+    if ((base + offset + length) > end_) {
       error(base, base + offset, "%s", msg);
       return false;
     }
@@ -185,22 +190,27 @@
 
   // Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
   void consume_bytes(uint32_t size, const char* name = "skip") {
-    TRACE("  +%d  %-20s: %d bytes\n", static_cast<int>(pc_ - start_), name,
-          size);
+#if DEBUG
+    if (name) {
+      // Only trace if the name is not null.
+      TRACE("  +%d  %-20s: %d bytes\n", static_cast<int>(pc_ - start_), name,
+            size);
+    }
+#endif
     if (checkAvailable(size)) {
       pc_ += size;
     } else {
-      pc_ = limit_;
+      pc_ = end_;
     }
   }
 
-  // Check that at least {size} bytes exist between {pc_} and {limit_}.
+  // Check that at least {size} bytes exist between {pc_} and {end_}.
   bool checkAvailable(int size) {
     intptr_t pc_overflow_value = std::numeric_limits<intptr_t>::max() - size;
     if (size < 0 || (intptr_t)pc_ > pc_overflow_value) {
       error(pc_, nullptr, "reading %d bytes would underflow/overflow", size);
       return false;
-    } else if (pc_ < start_ || limit_ < (pc_ + size)) {
+    } else if (pc_ < start_ || end_ < (pc_ + size)) {
       error(pc_, nullptr, "expected %d bytes, fell off end", size);
       return false;
     } else {
@@ -241,11 +251,11 @@
   template <typename T>
   T traceOffEnd() {
     T t = 0;
-    for (const byte* ptr = pc_; ptr < limit_; ptr++) {
+    for (const byte* ptr = pc_; ptr < end_; ptr++) {
       TRACE("%02x ", *ptr);
     }
     TRACE("<end>\n");
-    pc_ = limit_;
+    pc_ = end_;
     return t;
   }
 
@@ -272,7 +282,6 @@
   void Reset(const byte* start, const byte* end) {
     start_ = start;
     pc_ = start;
-    limit_ = end;
     end_ = end;
     error_pc_ = nullptr;
     error_pt_ = nullptr;
@@ -281,16 +290,16 @@
 
   bool ok() const { return error_msg_ == nullptr; }
   bool failed() const { return !ok(); }
-  bool more() const { return pc_ < limit_; }
+  bool more() const { return pc_ < end_; }
 
-  const byte* start() { return start_; }
-  const byte* pc() { return pc_; }
-  uint32_t pc_offset() { return static_cast<uint32_t>(pc_ - start_); }
+  const byte* start() const { return start_; }
+  const byte* pc() const { return pc_; }
+  uint32_t pc_offset() const { return static_cast<uint32_t>(pc_ - start_); }
+  const byte* end() const { return end_; }
 
  protected:
   const byte* start_;
   const byte* pc_;
-  const byte* limit_;
   const byte* end_;
   const byte* error_pc_;
   const byte* error_pt_;
@@ -308,7 +317,7 @@
     const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
     const byte* ptr = base + offset;
     const byte* end = ptr + kMaxLength;
-    if (end > limit_) end = limit_;
+    if (end > end_) end = end_;
     int shift = 0;
     byte b = 0;
     IntType result = 0;
@@ -358,7 +367,7 @@
       const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
       const byte* pos = pc_;
       const byte* end = pc_ + kMaxLength;
-      if (end > limit_) end = limit_;
+      if (end > end_) end = end_;
 
       IntType result = 0;
       int shift = 0;
@@ -373,8 +382,10 @@
 
       int length = static_cast<int>(pc_ - pos);
       if (pc_ == end && (b & 0x80)) {
+        TRACE("\n");
         error(pc_ - 1, "varint too large");
       } else if (length == 0) {
+        TRACE("\n");
         error(pc_, "varint of length 0");
       } else if (is_signed) {
         if (length < kMaxLength) {
diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h
new file mode 100644
index 0000000..6759ed6
--- /dev/null
+++ b/src/wasm/function-body-decoder-impl.h
@@ -0,0 +1,325 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_FUNCTION_BODY_DECODER_IMPL_H_
+#define V8_WASM_FUNCTION_BODY_DECODER_IMPL_H_
+
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+struct WasmGlobal;
+
+// Helpers for decoding different kinds of operands which follow bytecodes.
+struct LocalIndexOperand {
+  uint32_t index;
+  ValueType type;
+  unsigned length;
+
+  inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
+    index = decoder->checked_read_u32v(pc, 1, &length, "local index");
+    type = kWasmStmt;
+  }
+};
+
+struct ImmI32Operand {
+  int32_t value;
+  unsigned length;
+  inline ImmI32Operand(Decoder* decoder, const byte* pc) {
+    value = decoder->checked_read_i32v(pc, 1, &length, "immi32");
+  }
+};
+
+struct ImmI64Operand {
+  int64_t value;
+  unsigned length;
+  inline ImmI64Operand(Decoder* decoder, const byte* pc) {
+    value = decoder->checked_read_i64v(pc, 1, &length, "immi64");
+  }
+};
+
+struct ImmF32Operand {
+  float value;
+  unsigned length;
+  inline ImmF32Operand(Decoder* decoder, const byte* pc) {
+    // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
+    uint32_t tmp = decoder->checked_read_u32(pc, 1, "immf32");
+    memcpy(&value, &tmp, sizeof(value));
+    length = 4;
+  }
+};
+
+struct ImmF64Operand {
+  double value;
+  unsigned length;
+  inline ImmF64Operand(Decoder* decoder, const byte* pc) {
+    // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
+    uint64_t tmp = decoder->checked_read_u64(pc, 1, "immf64");
+    memcpy(&value, &tmp, sizeof(value));
+    length = 8;
+  }
+};
+
+struct GlobalIndexOperand {
+  uint32_t index;
+  ValueType type;
+  const WasmGlobal* global;
+  unsigned length;
+
+  inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
+    index = decoder->checked_read_u32v(pc, 1, &length, "global index");
+    global = nullptr;
+    type = kWasmStmt;
+  }
+};
+
+struct BlockTypeOperand {
+  uint32_t arity;
+  const byte* types;  // pointer to encoded types for the block.
+  unsigned length;
+
+  inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
+    uint8_t val = decoder->checked_read_u8(pc, 1, "block type");
+    ValueType type = kWasmStmt;
+    length = 1;
+    arity = 0;
+    types = nullptr;
+    if (decode_local_type(val, &type)) {
+      arity = type == kWasmStmt ? 0 : 1;
+      types = pc + 1;
+    } else {
+      // Handle multi-value blocks.
+      if (!FLAG_wasm_mv_prototype) {
+        decoder->error(pc, pc + 1, "invalid block arity > 1");
+        return;
+      }
+      if (val != kMultivalBlock) {
+        decoder->error(pc, pc + 1, "invalid block type");
+        return;
+      }
+      // Decode and check the types vector of the block.
+      unsigned len = 0;
+      uint32_t count = decoder->checked_read_u32v(pc, 2, &len, "block arity");
+      // {count} is encoded as {arity-2}, so that a {0} count here corresponds
+      // to a block with 2 values. This makes invalid/redundant encodings
+      // impossible.
+      arity = count + 2;
+      length = 1 + len + arity;
+      types = pc + 1 + 1 + len;
+
+      for (uint32_t i = 0; i < arity; i++) {
+        uint32_t offset = 1 + 1 + len + i;
+        val = decoder->checked_read_u8(pc, offset, "block type");
+        decode_local_type(val, &type);
+        if (type == kWasmStmt) {
+          decoder->error(pc, pc + offset, "invalid block type");
+          return;
+        }
+      }
+    }
+  }
+  // Decode a byte representing a local type. Return {false} if the encoded
+  // byte was invalid or {kMultivalBlock}.
+  bool decode_local_type(uint8_t val, ValueType* result) {
+    switch (static_cast<ValueTypeCode>(val)) {
+      case kLocalVoid:
+        *result = kWasmStmt;
+        return true;
+      case kLocalI32:
+        *result = kWasmI32;
+        return true;
+      case kLocalI64:
+        *result = kWasmI64;
+        return true;
+      case kLocalF32:
+        *result = kWasmF32;
+        return true;
+      case kLocalF64:
+        *result = kWasmF64;
+        return true;
+      case kLocalS128:
+        *result = kWasmS128;
+        return true;
+      case kLocalS1x4:
+        *result = kWasmS1x4;
+        return true;
+      case kLocalS1x8:
+        *result = kWasmS1x8;
+        return true;
+      case kLocalS1x16:
+        *result = kWasmS1x16;
+        return true;
+      default:
+        *result = kWasmStmt;
+        return false;
+    }
+  }
+  ValueType read_entry(unsigned index) {
+    DCHECK_LT(index, arity);
+    ValueType result;
+    CHECK(decode_local_type(types[index], &result));
+    return result;
+  }
+};
+
+struct Control;
+struct BreakDepthOperand {
+  uint32_t depth;
+  Control* target;
+  unsigned length;
+  inline BreakDepthOperand(Decoder* decoder, const byte* pc) {
+    depth = decoder->checked_read_u32v(pc, 1, &length, "break depth");
+    target = nullptr;
+  }
+};
+
+struct CallIndirectOperand {
+  uint32_t table_index;
+  uint32_t index;
+  FunctionSig* sig;
+  unsigned length;
+  inline CallIndirectOperand(Decoder* decoder, const byte* pc) {
+    unsigned len = 0;
+    index = decoder->checked_read_u32v(pc, 1, &len, "signature index");
+    table_index = decoder->checked_read_u8(pc, 1 + len, "table index");
+    if (table_index != 0) {
+      decoder->error(pc, pc + 1 + len, "expected table index 0, found %u",
+                     table_index);
+    }
+    length = 1 + len;
+    sig = nullptr;
+  }
+};
+
+struct CallFunctionOperand {
+  uint32_t index;
+  FunctionSig* sig;
+  unsigned length;
+  inline CallFunctionOperand(Decoder* decoder, const byte* pc) {
+    unsigned len1 = 0;
+    unsigned len2 = 0;
+    index = decoder->checked_read_u32v(pc, 1 + len1, &len2, "function index");
+    length = len1 + len2;
+    sig = nullptr;
+  }
+};
+
+struct MemoryIndexOperand {
+  uint32_t index;
+  unsigned length;
+  inline MemoryIndexOperand(Decoder* decoder, const byte* pc) {
+    index = decoder->checked_read_u8(pc, 1, "memory index");
+    if (index != 0) {
+      decoder->error(pc, pc + 1, "expected memory index 0, found %u", index);
+    }
+    length = 1;
+  }
+};
+
+struct BranchTableOperand {
+  uint32_t table_count;
+  const byte* start;
+  const byte* table;
+  inline BranchTableOperand(Decoder* decoder, const byte* pc) {
+    DCHECK_EQ(kExprBrTable, decoder->checked_read_u8(pc, 0, "opcode"));
+    start = pc + 1;
+    unsigned len1 = 0;
+    table_count = decoder->checked_read_u32v(pc, 1, &len1, "table count");
+    if (table_count > (UINT_MAX / sizeof(uint32_t)) - 1 ||
+        len1 > UINT_MAX - (table_count + 1) * sizeof(uint32_t)) {
+      decoder->error(pc, "branch table size overflow");
+    }
+    table = pc + 1 + len1;
+  }
+};
+
+// A helper to iterate over a branch table.
+class BranchTableIterator {
+ public:
+  unsigned cur_index() { return index_; }
+  bool has_next() { return decoder_->ok() && index_ <= table_count_; }
+  uint32_t next() {
+    DCHECK(has_next());
+    index_++;
+    unsigned length = 0;
+    uint32_t result =
+        decoder_->checked_read_u32v(pc_, 0, &length, "branch table entry");
+    pc_ += length;
+    return result;
+  }
+  // length, including the length of the {BranchTableOperand}, but not the
+  // opcode.
+  unsigned length() {
+    while (has_next()) next();
+    return static_cast<unsigned>(pc_ - start_);
+  }
+  const byte* pc() { return pc_; }
+
+  BranchTableIterator(Decoder* decoder, BranchTableOperand& operand)
+      : decoder_(decoder),
+        start_(operand.start),
+        pc_(operand.table),
+        index_(0),
+        table_count_(operand.table_count) {}
+
+ private:
+  Decoder* decoder_;
+  const byte* start_;
+  const byte* pc_;
+  uint32_t index_;        // the current index.
+  uint32_t table_count_;  // the count of entries, not including default.
+};
+
+struct MemoryAccessOperand {
+  uint32_t alignment;
+  uint32_t offset;
+  unsigned length;
+  inline MemoryAccessOperand(Decoder* decoder, const byte* pc,
+                             uint32_t max_alignment) {
+    unsigned alignment_length;
+    alignment =
+        decoder->checked_read_u32v(pc, 1, &alignment_length, "alignment");
+    if (max_alignment < alignment) {
+      decoder->error(pc, pc + 1,
+                     "invalid alignment; expected maximum alignment is %u, "
+                     "actual alignment is %u",
+                     max_alignment, alignment);
+    }
+    unsigned offset_length;
+    offset = decoder->checked_read_u32v(pc, 1 + alignment_length,
+                                        &offset_length, "offset");
+    length = alignment_length + offset_length;
+  }
+};
+
+// Operand for SIMD lane operations.
+struct SimdLaneOperand {
+  uint8_t lane;
+  unsigned length;
+
+  inline SimdLaneOperand(Decoder* decoder, const byte* pc) {
+    lane = decoder->checked_read_u8(pc, 2, "lane");
+    length = 1;
+  }
+};
+
+// Operand for SIMD shift operations.
+struct SimdShiftOperand {
+  uint8_t shift;
+  unsigned length;
+
+  inline SimdShiftOperand(Decoder* decoder, const byte* pc) {
+    shift = decoder->checked_read_u8(pc, 2, "shift");
+    length = 1;
+  }
+};
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_FUNCTION_BODY_DECODER_IMPL_H_
diff --git a/src/wasm/ast-decoder.cc b/src/wasm/function-body-decoder.cc
similarity index 68%
rename from src/wasm/ast-decoder.cc
rename to src/wasm/function-body-decoder.cc
index ff6af34..dc2f83b 100644
--- a/src/wasm/ast-decoder.cc
+++ b/src/wasm/function-body-decoder.cc
@@ -4,13 +4,17 @@
 
 #include "src/signature.h"
 
+#include "src/base/platform/elapsed-timer.h"
 #include "src/bit-vector.h"
 #include "src/flags.h"
 #include "src/handles.h"
+#include "src/objects-inl.h"
 #include "src/zone/zone-containers.h"
 
-#include "src/wasm/ast-decoder.h"
 #include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-limits.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-opcodes.h"
 
@@ -31,16 +35,14 @@
 #define TRACE(...)
 #endif
 
-#define CHECK_PROTOTYPE_OPCODE(flag)                   \
-  if (module_ && module_->origin == kAsmJsOrigin) {    \
-    error("Opcode not supported for asmjs modules");   \
-  }                                                    \
-  if (!FLAG_##flag) {                                  \
-    error("Invalid opcode (enable with --" #flag ")"); \
-    break;                                             \
+#define CHECK_PROTOTYPE_OPCODE(flag)                           \
+  if (module_ != nullptr && module_->origin == kAsmJsOrigin) { \
+    error("Opcode not supported for asmjs modules");           \
+  }                                                            \
+  if (!FLAG_##flag) {                                          \
+    error("Invalid opcode (enable with --" #flag ")");         \
+    break;                                                     \
   }
-// TODO(titzer): this is only for intermediate migration.
-#define IMPLICIT_FUNCTION_END 1
 
 // An SsaEnv environment carries the current local variable renaming
 // as well as the current effect and control dependency in the TF graph.
@@ -70,7 +72,7 @@
 struct Value {
   const byte* pc;
   TFNode* node;
-  LocalType type;
+  ValueType type;
 };
 
 struct TryInfo : public ZoneObject {
@@ -87,9 +89,9 @@
     Value first;
   } vals;  // Either multiple values or a single value.
 
-  Value& first() {
-    DCHECK_GT(arity, 0u);
-    return arity == 1 ? vals.first : vals.array[0];
+  Value& operator[](size_t i) {
+    DCHECK_GT(arity, i);
+    return arity == 1 ? vals.first : vals.array[i];
   }
 };
 
@@ -101,11 +103,12 @@
 struct Control {
   const byte* pc;
   ControlKind kind;
-  int stack_depth;    // stack height at the beginning of the construct.
-  SsaEnv* end_env;    // end environment for the construct.
-  SsaEnv* false_env;  // false environment (only for if).
-  TryInfo* try_info;  // Information used for compiling try statements.
+  size_t stack_depth;      // stack height at the beginning of the construct.
+  SsaEnv* end_env;         // end environment for the construct.
+  SsaEnv* false_env;       // false environment (only for if).
+  TryInfo* try_info;       // Information used for compiling try statements.
   int32_t previous_catch;  // The previous Control (on the stack) with a catch.
+  bool unreachable;        // The current block has been ended.
 
   // Values merged into the end of this control construct.
   MergeValues merge;
@@ -116,30 +119,30 @@
   inline bool is_try() const { return kind == kControlTry; }
 
   // Named constructors.
-  static Control Block(const byte* pc, int stack_depth, SsaEnv* end_env,
+  static Control Block(const byte* pc, size_t stack_depth, SsaEnv* end_env,
                        int32_t previous_catch) {
-    return {pc,      kControlBlock, stack_depth,    end_env,
-            nullptr, nullptr,       previous_catch, {0, {NO_VALUE}}};
+    return {pc,      kControlBlock,  stack_depth, end_env,        nullptr,
+            nullptr, previous_catch, false,       {0, {NO_VALUE}}};
   }
 
-  static Control If(const byte* pc, int stack_depth, SsaEnv* end_env,
+  static Control If(const byte* pc, size_t stack_depth, SsaEnv* end_env,
                     SsaEnv* false_env, int32_t previous_catch) {
-    return {pc,        kControlIf, stack_depth,    end_env,
-            false_env, nullptr,    previous_catch, {0, {NO_VALUE}}};
+    return {pc,      kControlIf,     stack_depth, end_env,        false_env,
+            nullptr, previous_catch, false,       {0, {NO_VALUE}}};
   }
 
-  static Control Loop(const byte* pc, int stack_depth, SsaEnv* end_env,
+  static Control Loop(const byte* pc, size_t stack_depth, SsaEnv* end_env,
                       int32_t previous_catch) {
-    return {pc,      kControlLoop, stack_depth,    end_env,
-            nullptr, nullptr,      previous_catch, {0, {NO_VALUE}}};
+    return {pc,      kControlLoop,   stack_depth, end_env,        nullptr,
+            nullptr, previous_catch, false,       {0, {NO_VALUE}}};
   }
 
-  static Control Try(const byte* pc, int stack_depth, SsaEnv* end_env,
+  static Control Try(const byte* pc, size_t stack_depth, SsaEnv* end_env,
                      Zone* zone, SsaEnv* catch_env, int32_t previous_catch) {
     DCHECK_NOT_NULL(catch_env);
     TryInfo* try_info = new (zone) TryInfo(catch_env);
-    return {pc,      kControlTry, stack_depth,    end_env,
-            nullptr, try_info,    previous_catch, {0, {NO_VALUE}}};
+    return {pc,       kControlTry,    stack_depth, end_env,        nullptr,
+            try_info, previous_catch, false,       {0, {NO_VALUE}}};
   }
 };
 
@@ -150,38 +153,136 @@
   (build() ? CheckForException(builder_->func(__VA_ARGS__)) : nullptr)
 #define BUILD0(func) (build() ? CheckForException(builder_->func()) : nullptr)
 
-struct LaneOperand {
-  uint8_t lane;
-  unsigned length;
-
-  inline LaneOperand(Decoder* decoder, const byte* pc) {
-    lane = decoder->checked_read_u8(pc, 2, "lane");
-    length = 1;
-  }
-};
-
 // Generic Wasm bytecode decoder with utilities for decoding operands,
 // lengths, etc.
 class WasmDecoder : public Decoder {
  public:
-  WasmDecoder(ModuleEnv* module, FunctionSig* sig, const byte* start,
+  WasmDecoder(const WasmModule* module, FunctionSig* sig, const byte* start,
               const byte* end)
       : Decoder(start, end),
         module_(module),
         sig_(sig),
-        total_locals_(0),
         local_types_(nullptr) {}
-  ModuleEnv* module_;
+  const WasmModule* module_;
   FunctionSig* sig_;
-  size_t total_locals_;
-  ZoneVector<LocalType>* local_types_;
+
+  ZoneVector<ValueType>* local_types_;
+
+  size_t total_locals() const {
+    return local_types_ == nullptr ? 0 : local_types_->size();
+  }
+
+  static bool DecodeLocals(Decoder* decoder, const FunctionSig* sig,
+                           ZoneVector<ValueType>* type_list) {
+    DCHECK_NOT_NULL(type_list);
+    // Initialize from signature.
+    if (sig != nullptr) {
+      type_list->reserve(sig->parameter_count());
+      for (size_t i = 0; i < sig->parameter_count(); ++i) {
+        type_list->push_back(sig->GetParam(i));
+      }
+    }
+    // Decode local declarations, if any.
+    uint32_t entries = decoder->consume_u32v("local decls count");
+    if (decoder->failed()) return false;
+
+    TRACE("local decls count: %u\n", entries);
+    while (entries-- > 0 && decoder->ok() && decoder->more()) {
+      uint32_t count = decoder->consume_u32v("local count");
+      if (decoder->failed()) return false;
+
+      if ((count + type_list->size()) > kV8MaxWasmFunctionLocals) {
+        decoder->error(decoder->pc() - 1, "local count too large");
+        return false;
+      }
+      byte code = decoder->consume_u8("local type");
+      if (decoder->failed()) return false;
+
+      ValueType type;
+      switch (code) {
+        case kLocalI32:
+          type = kWasmI32;
+          break;
+        case kLocalI64:
+          type = kWasmI64;
+          break;
+        case kLocalF32:
+          type = kWasmF32;
+          break;
+        case kLocalF64:
+          type = kWasmF64;
+          break;
+        case kLocalS128:
+          type = kWasmS128;
+          break;
+        case kLocalS1x4:
+          type = kWasmS1x4;
+          break;
+        case kLocalS1x8:
+          type = kWasmS1x8;
+          break;
+        case kLocalS1x16:
+          type = kWasmS1x16;
+          break;
+        default:
+          decoder->error(decoder->pc() - 1, "invalid local type");
+          return false;
+      }
+      type_list->insert(type_list->end(), count, type);
+    }
+    DCHECK(decoder->ok());
+    return true;
+  }
+
+  static BitVector* AnalyzeLoopAssignment(Decoder* decoder, const byte* pc,
+                                          int locals_count, Zone* zone) {
+    if (pc >= decoder->end()) return nullptr;
+    if (*pc != kExprLoop) return nullptr;
+
+    BitVector* assigned = new (zone) BitVector(locals_count, zone);
+    int depth = 0;
+    // Iteratively process all AST nodes nested inside the loop.
+    while (pc < decoder->end() && decoder->ok()) {
+      WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+      unsigned length = 1;
+      switch (opcode) {
+        case kExprLoop:
+        case kExprIf:
+        case kExprBlock:
+        case kExprTry:
+          length = OpcodeLength(decoder, pc);
+          depth++;
+          break;
+        case kExprSetLocal:  // fallthru
+        case kExprTeeLocal: {
+          LocalIndexOperand operand(decoder, pc);
+          if (assigned->length() > 0 &&
+              operand.index < static_cast<uint32_t>(assigned->length())) {
+            // Unverified code might have an out-of-bounds index.
+            assigned->Add(operand.index);
+          }
+          length = 1 + operand.length;
+          break;
+        }
+        case kExprEnd:
+          depth--;
+          break;
+        default:
+          length = OpcodeLength(decoder, pc);
+          break;
+      }
+      if (depth <= 0) break;
+      pc += length;
+    }
+    return decoder->ok() ? assigned : nullptr;
+  }
 
   inline bool Validate(const byte* pc, LocalIndexOperand& operand) {
-    if (operand.index < total_locals_) {
+    if (operand.index < total_locals()) {
       if (local_types_) {
         operand.type = local_types_->at(operand.index);
       } else {
-        operand.type = kAstStmt;
+        operand.type = kWasmStmt;
       }
       return true;
     }
@@ -190,9 +291,8 @@
   }
 
   inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
-    ModuleEnv* m = module_;
-    if (m && m->module && operand.index < m->module->globals.size()) {
-      operand.global = &m->module->globals[operand.index];
+    if (module_ != nullptr && operand.index < module_->globals.size()) {
+      operand.global = &module_->globals[operand.index];
       operand.type = operand.global->type;
       return true;
     }
@@ -201,9 +301,8 @@
   }
 
   inline bool Complete(const byte* pc, CallFunctionOperand& operand) {
-    ModuleEnv* m = module_;
-    if (m && m->module && operand.index < m->module->functions.size()) {
-      operand.sig = m->module->functions[operand.index].sig;
+    if (module_ != nullptr && operand.index < module_->functions.size()) {
+      operand.sig = module_->functions[operand.index].sig;
       return true;
     }
     return false;
@@ -218,17 +317,15 @@
   }
 
   inline bool Complete(const byte* pc, CallIndirectOperand& operand) {
-    ModuleEnv* m = module_;
-    if (m && m->module && operand.index < m->module->signatures.size()) {
-      operand.sig = m->module->signatures[operand.index];
+    if (module_ != nullptr && operand.index < module_->signatures.size()) {
+      operand.sig = module_->signatures[operand.index];
       return true;
     }
     return false;
   }
 
   inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
-    uint32_t table_index = 0;
-    if (!module_->IsValidTable(table_index)) {
+    if (module_ == nullptr || module_->function_tables.empty()) {
       error("function table has to exist to execute call_indirect");
       return false;
     }
@@ -255,42 +352,94 @@
     return true;
   }
 
-  inline bool Validate(const byte* pc, LaneOperand& operand) {
-    if (operand.lane < 0 || operand.lane > 3) {
-      error(pc_, pc_ + 2, "invalid extract lane value");
+  inline bool Validate(const byte* pc, WasmOpcode opcode,
+                       SimdLaneOperand& operand) {
+    uint8_t num_lanes = 0;
+    switch (opcode) {
+      case kExprF32x4ExtractLane:
+      case kExprF32x4ReplaceLane:
+      case kExprI32x4ExtractLane:
+      case kExprI32x4ReplaceLane:
+        num_lanes = 4;
+        break;
+      case kExprI16x8ExtractLane:
+      case kExprI16x8ReplaceLane:
+        num_lanes = 8;
+        break;
+      case kExprI8x16ExtractLane:
+      case kExprI8x16ReplaceLane:
+        num_lanes = 16;
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    if (operand.lane < 0 || operand.lane >= num_lanes) {
+      error(pc_, pc_ + 2, "invalid lane index");
       return false;
     } else {
       return true;
     }
   }
 
-  unsigned OpcodeLength(const byte* pc) {
+  inline bool Validate(const byte* pc, WasmOpcode opcode,
+                       SimdShiftOperand& operand) {
+    uint8_t max_shift = 0;
+    switch (opcode) {
+      case kExprI32x4Shl:
+      case kExprI32x4ShrS:
+      case kExprI32x4ShrU:
+        max_shift = 32;
+        break;
+      case kExprI16x8Shl:
+      case kExprI16x8ShrS:
+      case kExprI16x8ShrU:
+        max_shift = 16;
+        break;
+      case kExprI8x16Shl:
+      case kExprI8x16ShrS:
+      case kExprI8x16ShrU:
+        max_shift = 8;
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    if (operand.shift < 0 || operand.shift >= max_shift) {
+      error(pc_, pc_ + 2, "invalid shift amount");
+      return false;
+    } else {
+      return true;
+    }
+  }
+
+  static unsigned OpcodeLength(Decoder* decoder, const byte* pc) {
     switch (static_cast<byte>(*pc)) {
 #define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
       FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
       FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
 #undef DECLARE_OPCODE_CASE
       {
-        MemoryAccessOperand operand(this, pc, UINT32_MAX);
+        MemoryAccessOperand operand(decoder, pc, UINT32_MAX);
         return 1 + operand.length;
       }
       case kExprBr:
       case kExprBrIf: {
-        BreakDepthOperand operand(this, pc);
+        BreakDepthOperand operand(decoder, pc);
         return 1 + operand.length;
       }
       case kExprSetGlobal:
       case kExprGetGlobal: {
-        GlobalIndexOperand operand(this, pc);
+        GlobalIndexOperand operand(decoder, pc);
         return 1 + operand.length;
       }
 
       case kExprCallFunction: {
-        CallFunctionOperand operand(this, pc);
+        CallFunctionOperand operand(decoder, pc);
         return 1 + operand.length;
       }
       case kExprCallIndirect: {
-        CallIndirectOperand operand(this, pc);
+        CallIndirectOperand operand(decoder, pc);
         return 1 + operand.length;
       }
 
@@ -298,7 +447,7 @@
       case kExprIf:  // fall thru
       case kExprLoop:
       case kExprBlock: {
-        BlockTypeOperand operand(this, pc);
+        BlockTypeOperand operand(decoder, pc);
         return 1 + operand.length;
       }
 
@@ -306,35 +455,33 @@
       case kExprTeeLocal:
       case kExprGetLocal:
       case kExprCatch: {
-        LocalIndexOperand operand(this, pc);
+        LocalIndexOperand operand(decoder, pc);
         return 1 + operand.length;
       }
       case kExprBrTable: {
-        BranchTableOperand operand(this, pc);
-        BranchTableIterator iterator(this, operand);
+        BranchTableOperand operand(decoder, pc);
+        BranchTableIterator iterator(decoder, operand);
         return 1 + iterator.length();
       }
       case kExprI32Const: {
-        ImmI32Operand operand(this, pc);
+        ImmI32Operand operand(decoder, pc);
         return 1 + operand.length;
       }
       case kExprI64Const: {
-        ImmI64Operand operand(this, pc);
+        ImmI64Operand operand(decoder, pc);
         return 1 + operand.length;
       }
       case kExprGrowMemory:
       case kExprMemorySize: {
-        MemoryIndexOperand operand(this, pc);
+        MemoryIndexOperand operand(decoder, pc);
         return 1 + operand.length;
       }
-      case kExprI8Const:
-        return 2;
       case kExprF32Const:
         return 5;
       case kExprF64Const:
         return 9;
       case kSimdPrefix: {
-        byte simd_index = checked_read_u8(pc, 1, "simd_index");
+        byte simd_index = decoder->checked_read_u8(pc, 1, "simd_index");
         WasmOpcode opcode =
             static_cast<WasmOpcode>(kSimdPrefix << 8 | simd_index);
         switch (opcode) {
@@ -351,7 +498,7 @@
             return 3;
           }
           default:
-            error("invalid SIMD opcode");
+            decoder->error(pc, "invalid SIMD opcode");
             return 2;
         }
       }
@@ -363,24 +510,24 @@
 
 static const int32_t kNullCatch = -1;
 
-// The full WASM decoder for bytecode. Both verifies bytecode and generates
-// a TurboFan IR graph.
+// The full WASM decoder for bytecode. Verifies bytecode and, optionally,
+// generates a TurboFan IR graph.
 class WasmFullDecoder : public WasmDecoder {
  public:
+  WasmFullDecoder(Zone* zone, const wasm::WasmModule* module,
+                  const FunctionBody& body)
+      : WasmFullDecoder(zone, module, nullptr, body) {}
+
   WasmFullDecoder(Zone* zone, TFBuilder* builder, const FunctionBody& body)
-      : WasmDecoder(body.module, body.sig, body.start, body.end),
-        zone_(zone),
-        builder_(builder),
-        base_(body.base),
-        local_type_vec_(zone),
-        stack_(zone),
-        control_(zone),
-        last_end_found_(false),
-        current_catch_(kNullCatch) {
-    local_types_ = &local_type_vec_;
-  }
+      : WasmFullDecoder(zone, builder->module_env() == nullptr
+                                  ? nullptr
+                                  : builder->module_env()->module,
+                        builder, body) {}
 
   bool Decode() {
+    if (FLAG_wasm_code_fuzzer_gen_test) {
+      PrintRawWasmCode(start_, end_);
+    }
     base::ElapsedTimer decode_timer;
     if (FLAG_trace_wasm_decode_time) {
       decode_timer.Start();
@@ -393,47 +540,21 @@
       return false;
     }
 
-    DecodeLocalDecls();
+    DCHECK_EQ(0, local_types_->size());
+    WasmDecoder::DecodeLocals(this, sig_, local_types_);
     InitSsaEnv();
     DecodeFunctionBody();
 
     if (failed()) return TraceFailed();
 
-#if IMPLICIT_FUNCTION_END
-    // With implicit end support (old style), the function block
-    // remains on the stack. Other control blocks are an error.
-    if (control_.size() > 1) {
-      error(pc_, control_.back().pc, "unterminated control structure");
-      return TraceFailed();
-    }
-
-    // Assume an implicit end to the function body block.
-    if (control_.size() == 1) {
-      Control* c = &control_.back();
-      if (ssa_env_->go()) {
-        FallThruTo(c);
-      }
-
-      if (c->end_env->go()) {
-        // Push the end values onto the stack.
-        stack_.resize(c->stack_depth);
-        if (c->merge.arity == 1) {
-          stack_.push_back(c->merge.vals.first);
-        } else {
-          for (unsigned i = 0; i < c->merge.arity; i++) {
-            stack_.push_back(c->merge.vals.array[i]);
-          }
-        }
-
-        TRACE("  @%-8d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
-        SetEnv("function:end", c->end_env);
-        DoReturn();
-        TRACE("\n");
-      }
-    }
-#else
     if (!control_.empty()) {
-      error(pc_, control_.back().pc, "unterminated control structure");
+      // Generate a better error message whether the unterminated control
+      // structure is the function body block or an innner structure.
+      if (control_.size() > 1) {
+        error(pc_, control_.back().pc, "unterminated control structure");
+      } else {
+        error("function body must end with \"end\" opcode.");
+      }
       return TraceFailed();
     }
 
@@ -441,7 +562,6 @@
       error("function body must end with \"end\" opcode.");
       return false;
     }
-#endif
 
     if (FLAG_trace_wasm_decode_time) {
       double ms = decode_timer.Elapsed().InMillisecondsF();
@@ -459,36 +579,21 @@
     return false;
   }
 
-  bool DecodeLocalDecls(AstLocalDecls& decls) {
-    DecodeLocalDecls();
-    if (failed()) return false;
-    decls.decls_encoded_size = pc_offset();
-    decls.local_types.reserve(local_type_vec_.size());
-    for (size_t pos = 0; pos < local_type_vec_.size();) {
-      uint32_t count = 0;
-      LocalType type = local_type_vec_[pos];
-      while (pos < local_type_vec_.size() && local_type_vec_[pos] == type) {
-        pos++;
-        count++;
-      }
-      decls.local_types.push_back(std::pair<LocalType, uint32_t>(type, count));
-    }
-    decls.total_local_count = static_cast<uint32_t>(local_type_vec_.size());
-    return true;
-  }
-
-  BitVector* AnalyzeLoopAssignmentForTesting(const byte* pc,
-                                             size_t num_locals) {
-    total_locals_ = num_locals;
-    local_type_vec_.reserve(num_locals);
-    if (num_locals > local_type_vec_.size()) {
-      local_type_vec_.insert(local_type_vec_.end(),
-                             num_locals - local_type_vec_.size(), kAstI32);
-    }
-    return AnalyzeLoopAssignment(pc);
-  }
-
  private:
+  WasmFullDecoder(Zone* zone, const wasm::WasmModule* module,
+                  TFBuilder* builder, const FunctionBody& body)
+      : WasmDecoder(module, body.sig, body.start, body.end),
+        zone_(zone),
+        builder_(builder),
+        base_(body.base),
+        local_type_vec_(zone),
+        stack_(zone),
+        control_(zone),
+        last_end_found_(false),
+        current_catch_(kNullCatch) {
+    local_types_ = &local_type_vec_;
+  }
+
   static const size_t kErrorMsgSize = 128;
 
   Zone* zone_;
@@ -497,7 +602,7 @@
 
   SsaEnv* ssa_env_;
 
-  ZoneVector<LocalType> local_type_vec_;  // types of local variables.
+  ZoneVector<ValueType> local_type_vec_;  // types of local variables.
   ZoneVector<Value> stack_;               // stack of values.
   ZoneVector<Control> control_;           // stack of blocks, loops, and ifs.
   bool last_end_found_;
@@ -521,11 +626,11 @@
       // Initialize local variables.
       uint32_t index = 0;
       while (index < sig_->parameter_count()) {
-        ssa_env->locals[index] = builder_->Param(index, local_type_vec_[index]);
+        ssa_env->locals[index] = builder_->Param(index);
         index++;
       }
       while (index < local_type_vec_.size()) {
-        LocalType type = local_type_vec_[index];
+        ValueType type = local_type_vec_[index];
         TFNode* node = DefaultValue(type);
         while (index < local_type_vec_.size() &&
                local_type_vec_[index] == type) {
@@ -533,27 +638,28 @@
           ssa_env->locals[index++] = node;
         }
       }
-      builder_->set_module(module_);
     }
     ssa_env->control = start;
     ssa_env->effect = start;
     SetEnv("initial", ssa_env);
     if (builder_) {
-      builder_->StackCheck(position());
+      // The function-prologue stack check is associated with position 0, which
+      // is never a position of any instruction in the function.
+      builder_->StackCheck(0);
     }
   }
 
-  TFNode* DefaultValue(LocalType type) {
+  TFNode* DefaultValue(ValueType type) {
     switch (type) {
-      case kAstI32:
+      case kWasmI32:
         return builder_->Int32Constant(0);
-      case kAstI64:
+      case kWasmI64:
         return builder_->Int64Constant(0);
-      case kAstF32:
+      case kWasmF32:
         return builder_->Float32Constant(0);
-      case kAstF64:
+      case kWasmF64:
         return builder_->Float64Constant(0);
-      case kAstS128:
+      case kWasmS128:
         return builder_->CreateS128Value(0);
       default:
         UNREACHABLE();
@@ -572,58 +678,19 @@
     return bytes;
   }
 
-  // Decodes the locals declarations, if any, populating {local_type_vec_}.
-  void DecodeLocalDecls() {
-    DCHECK_EQ(0u, local_type_vec_.size());
-    // Initialize {local_type_vec} from signature.
-    if (sig_) {
-      local_type_vec_.reserve(sig_->parameter_count());
-      for (size_t i = 0; i < sig_->parameter_count(); ++i) {
-        local_type_vec_.push_back(sig_->GetParam(i));
-      }
+  bool CheckHasMemory() {
+    if (!module_->has_memory) {
+      error(pc_ - 1, "memory instruction with no memory");
     }
-    // Decode local declarations, if any.
-    uint32_t entries = consume_u32v("local decls count");
-    TRACE("local decls count: %u\n", entries);
-    while (entries-- > 0 && pc_ < limit_) {
-      uint32_t count = consume_u32v("local count");
-      if (count > kMaxNumWasmLocals) {
-        error(pc_ - 1, "local count too large");
-        return;
-      }
-      byte code = consume_u8("local type");
-      LocalType type;
-      switch (code) {
-        case kLocalI32:
-          type = kAstI32;
-          break;
-        case kLocalI64:
-          type = kAstI64;
-          break;
-        case kLocalF32:
-          type = kAstF32;
-          break;
-        case kLocalF64:
-          type = kAstF64;
-          break;
-        case kLocalS128:
-          type = kAstS128;
-          break;
-        default:
-          error(pc_ - 1, "invalid local type");
-          return;
-      }
-      local_type_vec_.insert(local_type_vec_.end(), count, type);
-    }
-    total_locals_ = local_type_vec_.size();
+    return module_->has_memory;
   }
 
   // Decodes the body of a function.
   void DecodeFunctionBody() {
     TRACE("wasm-decode %p...%p (module+%d, %d bytes) %s\n",
           reinterpret_cast<const void*>(start_),
-          reinterpret_cast<const void*>(limit_), baserel(pc_),
-          static_cast<int>(limit_ - start_), builder_ ? "graph building" : "");
+          reinterpret_cast<const void*>(end_), baserel(pc_),
+          static_cast<int>(end_ - start_), builder_ ? "graph building" : "");
 
     {
       // Set up initial function block.
@@ -643,15 +710,15 @@
       }
     }
 
-    if (pc_ >= limit_) return;  // Nothing to do.
-
-    while (true) {  // decoding loop.
+    while (pc_ < end_) {  // decoding loop.
       unsigned len = 1;
       WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
-      if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
-        TRACE("  @%-8d #%02x:%-20s|", startrel(pc_), opcode,
-              WasmOpcodes::ShortOpcodeName(opcode));
+#if DEBUG
+      if (FLAG_trace_wasm_decoder && !WasmOpcodes::IsPrefixOpcode(opcode)) {
+        TRACE("  @%-8d #%-20s|", startrel(pc_),
+              WasmOpcodes::OpcodeName(opcode));
       }
+#endif
 
       FunctionSig* sig = WasmOpcodes::Signature(opcode);
       if (sig) {
@@ -673,8 +740,12 @@
           }
           case kExprThrow: {
             CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
-            Value value = Pop(0, kAstI32);
+            Value value = Pop(0, kWasmI32);
             BUILD(Throw, value.node);
+            // TODO(titzer): Throw should end control, but currently we build a
+            // (reachable) runtime call instead of connecting it directly to
+            // end.
+            //            EndControl();
             break;
           }
           case kExprTry: {
@@ -710,9 +781,7 @@
               break;
             }
 
-            if (ssa_env_->go()) {
-              MergeValuesInto(c);
-            }
+            FallThruTo(c);
             stack_.resize(c->stack_depth);
 
             DCHECK_NOT_NULL(c->try_info);
@@ -746,7 +815,7 @@
           case kExprIf: {
             // Condition on top of stack. Split environments for branches.
             BlockTypeOperand operand(this, pc_);
-            Value cond = Pop(0, kAstI32);
+            Value cond = Pop(0, kWasmI32);
             TFNode* if_true = nullptr;
             TFNode* if_false = nullptr;
             BUILD(BranchNoHint, cond.node, &if_true, &if_false);
@@ -776,8 +845,8 @@
               break;
             }
             FallThruTo(c);
-            // Switch to environment for false branch.
             stack_.resize(c->stack_depth);
+            // Switch to environment for false branch.
             SetEnv("if_else:false", c->false_env);
             c->false_env = nullptr;  // record that an else is already seen
             break;
@@ -791,7 +860,8 @@
             Control* c = &control_.back();
             if (c->is_loop()) {
               // A loop just leaves the values on the stack.
-              TypeCheckLoopFallThru(c);
+              TypeCheckFallThru(c);
+              if (c->unreachable) PushEndValues(c);
               PopControl();
               SetEnv("loop:end", ssa_env_);
               break;
@@ -800,8 +870,7 @@
               if (c->false_env != nullptr) {
                 // End the true branch of a one-armed if.
                 Goto(c->false_env, c->end_env);
-                if (ssa_env_->go() &&
-                    static_cast<int>(stack_.size()) != c->stack_depth) {
+                if (!c->unreachable && stack_.size() != c->stack_depth) {
                   error("end of if expected empty stack");
                   stack_.resize(c->stack_depth);
                 }
@@ -824,49 +893,32 @@
             }
             FallThruTo(c);
             SetEnv(name, c->end_env);
+            PushEndValues(c);
 
-            // Push the end values onto the stack.
-            stack_.resize(c->stack_depth);
-            if (c->merge.arity == 1) {
-              stack_.push_back(c->merge.vals.first);
-            } else {
-              for (unsigned i = 0; i < c->merge.arity; i++) {
-                stack_.push_back(c->merge.vals.array[i]);
-              }
-            }
-
-            PopControl();
-
-            if (control_.empty()) {
-              // If the last (implicit) control was popped, check we are at end.
+            if (control_.size() == 1) {
+              // If at the last (implicit) control, check we are at end.
               if (pc_ + 1 != end_) {
                 error(pc_, pc_ + 1, "trailing code after function end");
+                break;
               }
               last_end_found_ = true;
               if (ssa_env_->go()) {
                 // The result of the block is the return value.
-                TRACE("  @%-8d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
+                TRACE("  @%-8d #xx:%-20s|", startrel(pc_), "(implicit) return");
                 DoReturn();
                 TRACE("\n");
+              } else {
+                TypeCheckFallThru(c);
               }
-              return;
             }
+            PopControl();
             break;
           }
           case kExprSelect: {
-            Value cond = Pop(2, kAstI32);
+            Value cond = Pop(2, kWasmI32);
             Value fval = Pop();
-            Value tval = Pop();
-            if (tval.type == kAstStmt || tval.type != fval.type) {
-              if (tval.type != kAstEnd && fval.type != kAstEnd) {
-                error("type mismatch in select");
-                break;
-              }
-            }
+            Value tval = Pop(0, fval.type);
             if (build()) {
-              DCHECK(tval.type != kAstEnd);
-              DCHECK(fval.type != kAstEnd);
-              DCHECK(cond.type != kAstEnd);
               TFNode* controls[2];
               builder_->BranchNoHint(cond.node, &controls[0], &controls[1]);
               TFNode* merge = builder_->Merge(2, controls);
@@ -875,7 +927,7 @@
               Push(tval.type, phi);
               ssa_env_->control = merge;
             } else {
-              Push(tval.type, nullptr);
+              Push(tval.type == kWasmVar ? fval.type : tval.type, nullptr);
             }
             break;
           }
@@ -890,7 +942,7 @@
           }
           case kExprBrIf: {
             BreakDepthOperand operand(this, pc_);
-            Value cond = Pop(0, kAstI32);
+            Value cond = Pop(0, kWasmI32);
             if (ok() && Validate(pc_, operand, control_)) {
               SsaEnv* fenv = ssa_env_;
               SsaEnv* tenv = Split(fenv);
@@ -907,7 +959,7 @@
             BranchTableOperand operand(this, pc_);
             BranchTableIterator iterator(this, operand);
             if (Validate(pc_, operand, control_.size())) {
-              Value key = Pop(0, kAstI32);
+              Value key = Pop(0, kWasmI32);
               if (failed()) break;
 
               SsaEnv* break_env = ssa_env_;
@@ -917,6 +969,7 @@
 
                 SsaEnv* copy = Steal(break_env);
                 ssa_env_ = copy;
+                MergeValues* merge = nullptr;
                 while (ok() && iterator.has_next()) {
                   uint32_t i = iterator.cur_index();
                   const byte* pos = iterator.pc();
@@ -930,6 +983,26 @@
                                           ? BUILD(IfDefault, sw)
                                           : BUILD(IfValue, i, sw);
                   BreakTo(target);
+
+                  // Check that label types match up.
+                  Control* c = &control_[control_.size() - target - 1];
+                  if (i == 0) {
+                    merge = &c->merge;
+                  } else if (merge->arity != c->merge.arity) {
+                    error(pos, pos, "inconsistent arity in br_table target %d"
+                          " (previous was %u, this one %u)",
+                          i, merge->arity, c->merge.arity);
+                  } else if (control_.back().unreachable) {
+                    for (uint32_t j = 0; ok() && j < merge->arity; ++j) {
+                      if ((*merge)[j].type != c->merge[j].type) {
+                        error(pos, pos,
+                              "type error in br_table target %d operand %d"
+                              " (previous expected %s, this one %s)", i, j,
+                              WasmOpcodes::TypeName((*merge)[j].type),
+                              WasmOpcodes::TypeName(c->merge[j].type));
+                      }
+                    }
+                  }
                 }
                 if (failed()) break;
               } else {
@@ -946,6 +1019,7 @@
               ssa_env_ = break_env;
             }
             len = 1 + iterator.length();
+            EndControl();
             break;
           }
           case kExprReturn: {
@@ -957,33 +1031,27 @@
             EndControl();
             break;
           }
-          case kExprI8Const: {
-            ImmI8Operand operand(this, pc_);
-            Push(kAstI32, BUILD(Int32Constant, operand.value));
-            len = 1 + operand.length;
-            break;
-          }
           case kExprI32Const: {
             ImmI32Operand operand(this, pc_);
-            Push(kAstI32, BUILD(Int32Constant, operand.value));
+            Push(kWasmI32, BUILD(Int32Constant, operand.value));
             len = 1 + operand.length;
             break;
           }
           case kExprI64Const: {
             ImmI64Operand operand(this, pc_);
-            Push(kAstI64, BUILD(Int64Constant, operand.value));
+            Push(kWasmI64, BUILD(Int64Constant, operand.value));
             len = 1 + operand.length;
             break;
           }
           case kExprF32Const: {
             ImmF32Operand operand(this, pc_);
-            Push(kAstF32, BUILD(Float32Constant, operand.value));
+            Push(kWasmF32, BUILD(Float32Constant, operand.value));
             len = 1 + operand.length;
             break;
           }
           case kExprF64Const: {
             ImmF64Operand operand(this, pc_);
-            Push(kAstF64, BUILD(Float64Constant, operand.value));
+            Push(kWasmF64, BUILD(Float64Constant, operand.value));
             len = 1 + operand.length;
             break;
           }
@@ -1045,79 +1113,81 @@
             break;
           }
           case kExprI32LoadMem8S:
-            len = DecodeLoadMem(kAstI32, MachineType::Int8());
+            len = DecodeLoadMem(kWasmI32, MachineType::Int8());
             break;
           case kExprI32LoadMem8U:
-            len = DecodeLoadMem(kAstI32, MachineType::Uint8());
+            len = DecodeLoadMem(kWasmI32, MachineType::Uint8());
             break;
           case kExprI32LoadMem16S:
-            len = DecodeLoadMem(kAstI32, MachineType::Int16());
+            len = DecodeLoadMem(kWasmI32, MachineType::Int16());
             break;
           case kExprI32LoadMem16U:
-            len = DecodeLoadMem(kAstI32, MachineType::Uint16());
+            len = DecodeLoadMem(kWasmI32, MachineType::Uint16());
             break;
           case kExprI32LoadMem:
-            len = DecodeLoadMem(kAstI32, MachineType::Int32());
+            len = DecodeLoadMem(kWasmI32, MachineType::Int32());
             break;
           case kExprI64LoadMem8S:
-            len = DecodeLoadMem(kAstI64, MachineType::Int8());
+            len = DecodeLoadMem(kWasmI64, MachineType::Int8());
             break;
           case kExprI64LoadMem8U:
-            len = DecodeLoadMem(kAstI64, MachineType::Uint8());
+            len = DecodeLoadMem(kWasmI64, MachineType::Uint8());
             break;
           case kExprI64LoadMem16S:
-            len = DecodeLoadMem(kAstI64, MachineType::Int16());
+            len = DecodeLoadMem(kWasmI64, MachineType::Int16());
             break;
           case kExprI64LoadMem16U:
-            len = DecodeLoadMem(kAstI64, MachineType::Uint16());
+            len = DecodeLoadMem(kWasmI64, MachineType::Uint16());
             break;
           case kExprI64LoadMem32S:
-            len = DecodeLoadMem(kAstI64, MachineType::Int32());
+            len = DecodeLoadMem(kWasmI64, MachineType::Int32());
             break;
           case kExprI64LoadMem32U:
-            len = DecodeLoadMem(kAstI64, MachineType::Uint32());
+            len = DecodeLoadMem(kWasmI64, MachineType::Uint32());
             break;
           case kExprI64LoadMem:
-            len = DecodeLoadMem(kAstI64, MachineType::Int64());
+            len = DecodeLoadMem(kWasmI64, MachineType::Int64());
             break;
           case kExprF32LoadMem:
-            len = DecodeLoadMem(kAstF32, MachineType::Float32());
+            len = DecodeLoadMem(kWasmF32, MachineType::Float32());
             break;
           case kExprF64LoadMem:
-            len = DecodeLoadMem(kAstF64, MachineType::Float64());
+            len = DecodeLoadMem(kWasmF64, MachineType::Float64());
             break;
           case kExprI32StoreMem8:
-            len = DecodeStoreMem(kAstI32, MachineType::Int8());
+            len = DecodeStoreMem(kWasmI32, MachineType::Int8());
             break;
           case kExprI32StoreMem16:
-            len = DecodeStoreMem(kAstI32, MachineType::Int16());
+            len = DecodeStoreMem(kWasmI32, MachineType::Int16());
             break;
           case kExprI32StoreMem:
-            len = DecodeStoreMem(kAstI32, MachineType::Int32());
+            len = DecodeStoreMem(kWasmI32, MachineType::Int32());
             break;
           case kExprI64StoreMem8:
-            len = DecodeStoreMem(kAstI64, MachineType::Int8());
+            len = DecodeStoreMem(kWasmI64, MachineType::Int8());
             break;
           case kExprI64StoreMem16:
-            len = DecodeStoreMem(kAstI64, MachineType::Int16());
+            len = DecodeStoreMem(kWasmI64, MachineType::Int16());
             break;
           case kExprI64StoreMem32:
-            len = DecodeStoreMem(kAstI64, MachineType::Int32());
+            len = DecodeStoreMem(kWasmI64, MachineType::Int32());
             break;
           case kExprI64StoreMem:
-            len = DecodeStoreMem(kAstI64, MachineType::Int64());
+            len = DecodeStoreMem(kWasmI64, MachineType::Int64());
             break;
           case kExprF32StoreMem:
-            len = DecodeStoreMem(kAstF32, MachineType::Float32());
+            len = DecodeStoreMem(kWasmF32, MachineType::Float32());
             break;
           case kExprF64StoreMem:
-            len = DecodeStoreMem(kAstF64, MachineType::Float64());
+            len = DecodeStoreMem(kWasmF64, MachineType::Float64());
             break;
           case kExprGrowMemory: {
+            if (!CheckHasMemory()) break;
             MemoryIndexOperand operand(this, pc_);
+            DCHECK_NOT_NULL(module_);
             if (module_->origin != kAsmJsOrigin) {
-              Value val = Pop(0, kAstI32);
-              Push(kAstI32, BUILD(GrowMemory, val.node));
+              Value val = Pop(0, kWasmI32);
+              Push(kWasmI32, BUILD(GrowMemory, val.node));
             } else {
               error("grow_memory is not supported for asmjs modules");
             }
@@ -1125,8 +1195,9 @@
             break;
           }
           case kExprMemorySize: {
+            if (!CheckHasMemory()) break;
             MemoryIndexOperand operand(this, pc_);
-            Push(kAstI32, BUILD(CurrentMemoryPages));
+            Push(kWasmI32, BUILD(CurrentMemoryPages));
             len = 1 + operand.length;
             break;
           }
@@ -1144,7 +1215,7 @@
           case kExprCallIndirect: {
             CallIndirectOperand operand(this, pc_);
             if (Validate(pc_, operand)) {
-              Value index = Pop(0, kAstI32);
+              Value index = Pop(0, kWasmI32);
               TFNode** buffer = PopArgs(operand.sig);
               if (buffer) buffer[0] = index.node;
               TFNode** rets = nullptr;
@@ -1159,13 +1230,13 @@
             len++;
             byte simd_index = checked_read_u8(pc_, 1, "simd index");
             opcode = static_cast<WasmOpcode>(opcode << 8 | simd_index);
-            TRACE("  @%-4d #%02x #%02x:%-20s|", startrel(pc_), kSimdPrefix,
-                  simd_index, WasmOpcodes::ShortOpcodeName(opcode));
+            TRACE("  @%-4d #%-20s|", startrel(pc_),
+                  WasmOpcodes::OpcodeName(opcode));
             len += DecodeSimdOpcode(opcode);
             break;
           }
           case kAtomicPrefix: {
-            if (!module_ || module_->origin != kAsmJsOrigin) {
+            if (module_ == nullptr || module_->origin != kAsmJsOrigin) {
               error("Atomics are allowed only in AsmJs modules");
               break;
             }
@@ -1184,7 +1255,7 @@
           }
           default: {
             // Deal with special asmjs opcodes.
-            if (module_ && module_->origin == kAsmJsOrigin) {
+            if (module_ != nullptr && module_->origin == kAsmJsOrigin) {
               sig = WasmOpcodes::AsmjsSignature(opcode);
               if (sig) {
                 BuildSimpleOperator(opcode, sig);
@@ -1199,6 +1270,35 @@
 
 #if DEBUG
       if (FLAG_trace_wasm_decoder) {
+        PrintF(" ");
+        for (size_t i = 0; i < control_.size(); ++i) {
+          Control* c = &control_[i];
+          enum ControlKind {
+            kControlIf,
+            kControlBlock,
+            kControlLoop,
+            kControlTry
+          };
+          switch (c->kind) {
+            case kControlIf:
+              PrintF("I");
+              break;
+            case kControlBlock:
+              PrintF("B");
+              break;
+            case kControlLoop:
+              PrintF("L");
+              break;
+            case kControlTry:
+              PrintF("T");
+              break;
+            default:
+              break;
+          }
+          PrintF("%u", c->merge.arity);
+          if (c->unreachable) PrintF("*");
+        }
+        PrintF(" | ");
         for (size_t i = 0; i < stack_.size(); ++i) {
           Value& val = stack_[i];
           WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
@@ -1207,7 +1307,7 @@
           }
           PrintF(" %c@%d:%s", WasmOpcodes::ShortNameOf(val.type),
                  static_cast<int>(val.pc - start_),
-                 WasmOpcodes::ShortOpcodeName(opcode));
+                 WasmOpcodes::OpcodeName(opcode));
           switch (opcode) {
             case kExprI32Const: {
               ImmI32Operand operand(this, val.pc);
@@ -1228,20 +1328,23 @@
             default:
               break;
           }
+          if (val.node == nullptr) PrintF("?");
         }
         PrintF("\n");
       }
 #endif
       pc_ += len;
-      if (pc_ >= limit_) {
-        // End of code reached or exceeded.
-        if (pc_ > limit_ && ok()) error("Beyond end of code");
-        return;
-      }
     }  // end decode loop
+    if (pc_ > end_ && ok()) error("Beyond end of code");
   }
 
-  void EndControl() { ssa_env_->Kill(SsaEnv::kControlEnd); }
+  void EndControl() {
+    ssa_env_->Kill(SsaEnv::kControlEnd);
+    if (!control_.empty()) {
+      stack_.resize(control_.back().stack_depth);
+      control_.back().unreachable = true;
+    }
+  }
 
   void SetBlockType(Control* c, BlockTypeOperand& operand) {
     c->merge.arity = operand.arity;
@@ -1273,77 +1376,123 @@
     }
   }
 
-  LocalType GetReturnType(FunctionSig* sig) {
-    return sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
+  ValueType GetReturnType(FunctionSig* sig) {
+    return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
   }
 
   void PushBlock(SsaEnv* end_env) {
-    const int stack_depth = static_cast<int>(stack_.size());
     control_.emplace_back(
-        Control::Block(pc_, stack_depth, end_env, current_catch_));
+        Control::Block(pc_, stack_.size(), end_env, current_catch_));
   }
 
   void PushLoop(SsaEnv* end_env) {
-    const int stack_depth = static_cast<int>(stack_.size());
     control_.emplace_back(
-        Control::Loop(pc_, stack_depth, end_env, current_catch_));
+        Control::Loop(pc_, stack_.size(), end_env, current_catch_));
   }
 
   void PushIf(SsaEnv* end_env, SsaEnv* false_env) {
-    const int stack_depth = static_cast<int>(stack_.size());
     control_.emplace_back(
-        Control::If(pc_, stack_depth, end_env, false_env, current_catch_));
+        Control::If(pc_, stack_.size(), end_env, false_env, current_catch_));
   }
 
   void PushTry(SsaEnv* end_env, SsaEnv* catch_env) {
-    const int stack_depth = static_cast<int>(stack_.size());
-    control_.emplace_back(Control::Try(pc_, stack_depth, end_env, zone_,
+    control_.emplace_back(Control::Try(pc_, stack_.size(), end_env, zone_,
                                        catch_env, current_catch_));
     current_catch_ = static_cast<int32_t>(control_.size() - 1);
   }
 
   void PopControl() { control_.pop_back(); }
 
-  int DecodeLoadMem(LocalType type, MachineType mem_type) {
+  int DecodeLoadMem(ValueType type, MachineType mem_type) {
+    if (!CheckHasMemory()) return 0;
     MemoryAccessOperand operand(this, pc_,
                                 ElementSizeLog2Of(mem_type.representation()));
 
-    Value index = Pop(0, kAstI32);
+    Value index = Pop(0, kWasmI32);
     TFNode* node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
                          operand.alignment, position());
     Push(type, node);
     return 1 + operand.length;
   }
 
-  int DecodeStoreMem(LocalType type, MachineType mem_type) {
+  int DecodeStoreMem(ValueType type, MachineType mem_type) {
+    if (!CheckHasMemory()) return 0;
     MemoryAccessOperand operand(this, pc_,
                                 ElementSizeLog2Of(mem_type.representation()));
     Value val = Pop(1, type);
-    Value index = Pop(0, kAstI32);
+    Value index = Pop(0, kWasmI32);
     BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
           val.node, position());
     return 1 + operand.length;
   }
 
-  unsigned ExtractLane(WasmOpcode opcode, LocalType type) {
-    LaneOperand operand(this, pc_);
-    if (Validate(pc_, operand)) {
-      TFNode* input = Pop(0, LocalType::kSimd128).node;
-      TFNode* node = BUILD(SimdExtractLane, opcode, operand.lane, input);
+  unsigned SimdExtractLane(WasmOpcode opcode, ValueType type) {
+    SimdLaneOperand operand(this, pc_);
+    if (Validate(pc_, opcode, operand)) {
+      compiler::NodeVector inputs(1, zone_);
+      inputs[0] = Pop(0, ValueType::kSimd128).node;
+      TFNode* node = BUILD(SimdLaneOp, opcode, operand.lane, inputs);
       Push(type, node);
     }
     return operand.length;
   }
 
+  unsigned SimdReplaceLane(WasmOpcode opcode, ValueType type) {
+    SimdLaneOperand operand(this, pc_);
+    if (Validate(pc_, opcode, operand)) {
+      compiler::NodeVector inputs(2, zone_);
+      inputs[1] = Pop(1, type).node;
+      inputs[0] = Pop(0, ValueType::kSimd128).node;
+      TFNode* node = BUILD(SimdLaneOp, opcode, operand.lane, inputs);
+      Push(ValueType::kSimd128, node);
+    }
+    return operand.length;
+  }
+
+  unsigned SimdShiftOp(WasmOpcode opcode) {
+    SimdShiftOperand operand(this, pc_);
+    if (Validate(pc_, opcode, operand)) {
+      compiler::NodeVector inputs(1, zone_);
+      inputs[0] = Pop(0, ValueType::kSimd128).node;
+      TFNode* node = BUILD(SimdShiftOp, opcode, operand.shift, inputs);
+      Push(ValueType::kSimd128, node);
+    }
+    return operand.length;
+  }
+
   unsigned DecodeSimdOpcode(WasmOpcode opcode) {
     unsigned len = 0;
     switch (opcode) {
-      case kExprI32x4ExtractLane: {
-        len = ExtractLane(opcode, LocalType::kWord32);
+      case kExprF32x4ExtractLane: {
+        len = SimdExtractLane(opcode, ValueType::kFloat32);
         break;
       }
-      case kExprF32x4ExtractLane: {
-        len = ExtractLane(opcode, LocalType::kFloat32);
+      case kExprI32x4ExtractLane:
+      case kExprI16x8ExtractLane:
+      case kExprI8x16ExtractLane: {
+        len = SimdExtractLane(opcode, ValueType::kWord32);
+        break;
+      }
+      case kExprF32x4ReplaceLane: {
+        len = SimdReplaceLane(opcode, ValueType::kFloat32);
+        break;
+      }
+      case kExprI32x4ReplaceLane:
+      case kExprI16x8ReplaceLane:
+      case kExprI8x16ReplaceLane: {
+        len = SimdReplaceLane(opcode, ValueType::kWord32);
+        break;
+      }
+      case kExprI32x4Shl:
+      case kExprI32x4ShrS:
+      case kExprI32x4ShrU:
+      case kExprI16x8Shl:
+      case kExprI16x8ShrS:
+      case kExprI16x8ShrU:
+      case kExprI8x16Shl:
+      case kExprI8x16ShrS:
+      case kExprI8x16ShrU: {
+        len = SimdShiftOp(opcode);
         break;
       }
       default: {
@@ -1381,12 +1530,25 @@
     EndControl();
   }
 
-  void Push(LocalType type, TFNode* node) {
-    if (type != kAstStmt && type != kAstEnd) {
+  void Push(ValueType type, TFNode* node) {
+    if (type != kWasmStmt) {
       stack_.push_back({pc_, node, type});
     }
   }
 
+  void PushEndValues(Control* c) {
+    DCHECK_EQ(c, &control_.back());
+    stack_.resize(c->stack_depth);
+    if (c->merge.arity == 1) {
+      stack_.push_back(c->merge.vals.first);
+    } else {
+      for (unsigned i = 0; i < c->merge.arity; i++) {
+        stack_.push_back(c->merge.vals.array[i]);
+      }
+    }
+    DCHECK_EQ(c->stack_depth + c->merge.arity, stack_.size());
+  }
+
   void PushReturns(FunctionSig* sig, TFNode** rets) {
     for (size_t i = 0; i < sig->return_count(); i++) {
       // When verifying only, then {rets} will be null, so push null.
@@ -1396,34 +1558,27 @@
 
   const char* SafeOpcodeNameAt(const byte* pc) {
     if (pc >= end_) return "<end>";
-    return WasmOpcodes::ShortOpcodeName(static_cast<WasmOpcode>(*pc));
+    return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(*pc));
   }
 
-  Value Pop(int index, LocalType expected) {
-    if (!ssa_env_->go()) {
-      // Unreachable code is essentially not typechecked.
-      return {pc_, nullptr, expected};
-    }
+  Value Pop(int index, ValueType expected) {
     Value val = Pop();
-    if (val.type != expected) {
-      if (val.type != kAstEnd) {
-        error(pc_, val.pc, "%s[%d] expected type %s, found %s of type %s",
-              SafeOpcodeNameAt(pc_), index, WasmOpcodes::TypeName(expected),
-              SafeOpcodeNameAt(val.pc), WasmOpcodes::TypeName(val.type));
-      }
+    if (val.type != expected && val.type != kWasmVar && expected != kWasmVar) {
+      error(pc_, val.pc, "%s[%d] expected type %s, found %s of type %s",
+            SafeOpcodeNameAt(pc_), index, WasmOpcodes::TypeName(expected),
+            SafeOpcodeNameAt(val.pc), WasmOpcodes::TypeName(val.type));
     }
     return val;
   }
 
   Value Pop() {
-    if (!ssa_env_->go()) {
-      // Unreachable code is essentially not typechecked.
-      return {pc_, nullptr, kAstEnd};
-    }
     size_t limit = control_.empty() ? 0 : control_.back().stack_depth;
     if (stack_.size() <= limit) {
-      Value val = {pc_, nullptr, kAstStmt};
-      error(pc_, pc_, "%s found empty stack", SafeOpcodeNameAt(pc_));
+      // Popping past the current control start in reachable code.
+      Value val = {pc_, nullptr, kWasmVar};
+      if (!control_.back().unreachable) {
+        error(pc_, pc_, "%s found empty stack", SafeOpcodeNameAt(pc_));
+      }
       return val;
     }
     Value val = stack_.back();
@@ -1431,22 +1586,6 @@
     return val;
   }
 
-  Value PopUpTo(int stack_depth) {
-    if (!ssa_env_->go()) {
-      // Unreachable code is essentially not typechecked.
-      return {pc_, nullptr, kAstEnd};
-    }
-    if (stack_depth == static_cast<int>(stack_.size())) {
-      Value val = {pc_, nullptr, kAstStmt};
-      return val;
-    } else {
-      DCHECK_LE(stack_depth, static_cast<int>(stack_.size()));
-      Value val = Pop();
-      stack_.resize(stack_depth);
-      return val;
-    }
-  }
-
   int baserel(const byte* ptr) {
     return base_ ? static_cast<int>(ptr - base_) : 0;
   }
@@ -1454,17 +1593,17 @@
   int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
 
   void BreakTo(unsigned depth) {
-    if (!ssa_env_->go()) return;
     Control* c = &control_[control_.size() - depth - 1];
     if (c->is_loop()) {
       // This is the inner loop block, which does not have a value.
       Goto(ssa_env_, c->end_env);
     } else {
       // Merge the value(s) into the end of the block.
-      if (c->stack_depth + c->merge.arity > stack_.size()) {
+      size_t expected = control_.back().stack_depth + c->merge.arity;
+      if (stack_.size() < expected && !control_.back().unreachable) {
         error(
             pc_, pc_,
-            "expected at least %d values on the stack for br to @%d, found %d",
+            "expected at least %u values on the stack for br to @%d, found %d",
             c->merge.arity, startrel(c->pc),
             static_cast<int>(stack_.size() - c->stack_depth));
         return;
@@ -1474,37 +1613,41 @@
   }
 
   void FallThruTo(Control* c) {
-    if (!ssa_env_->go()) return;
+    DCHECK_EQ(c, &control_.back());
     // Merge the value(s) into the end of the block.
-    int arity = static_cast<int>(c->merge.arity);
-    if (c->stack_depth + arity != static_cast<int>(stack_.size())) {
-      error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
-            arity, startrel(c->pc));
+    size_t expected = c->stack_depth + c->merge.arity;
+    if (stack_.size() == expected ||
+        (stack_.size() < expected && c->unreachable)) {
+      MergeValuesInto(c);
+      c->unreachable = false;
       return;
     }
-    MergeValuesInto(c);
+    error(pc_, pc_, "expected %u elements on the stack for fallthru to @%d",
+          c->merge.arity, startrel(c->pc));
   }
 
-  inline Value& GetMergeValueFromStack(Control* c, int i) {
+  inline Value& GetMergeValueFromStack(Control* c, size_t i) {
     return stack_[stack_.size() - c->merge.arity + i];
   }
 
-  void TypeCheckLoopFallThru(Control* c) {
-    if (!ssa_env_->go()) return;
+  void TypeCheckFallThru(Control* c) {
+    DCHECK_EQ(c, &control_.back());
     // Fallthru must match arity exactly.
     int arity = static_cast<int>(c->merge.arity);
-    if (c->stack_depth + arity != static_cast<int>(stack_.size())) {
+    if (c->stack_depth + arity < stack_.size() ||
+        (c->stack_depth + arity != stack_.size() && !c->unreachable)) {
       error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
             arity, startrel(c->pc));
       return;
     }
     // Typecheck the values left on the stack.
-    for (unsigned i = 0; i < c->merge.arity; i++) {
+    size_t avail = stack_.size() - c->stack_depth;
+    for (size_t i = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
+         i < c->merge.arity; i++) {
       Value& val = GetMergeValueFromStack(c, i);
-      Value& old =
-          c->merge.arity == 1 ? c->merge.vals.first : c->merge.vals.array[i];
+      Value& old = c->merge[i];
       if (val.type != old.type) {
-        error(pc_, pc_, "type error in merge[%d] (expected %s, got %s)", i,
+        error(pc_, pc_, "type error in merge[%zu] (expected %s, got %s)", i,
               WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
         return;
       }
@@ -1514,23 +1657,24 @@
   void MergeValuesInto(Control* c) {
     SsaEnv* target = c->end_env;
     bool first = target->state == SsaEnv::kUnreachable;
+    bool reachable = ssa_env_->go();
     Goto(ssa_env_, target);
 
-    for (unsigned i = 0; i < c->merge.arity; i++) {
+    size_t avail = stack_.size() - control_.back().stack_depth;
+    for (size_t i = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
+         i < c->merge.arity; i++) {
       Value& val = GetMergeValueFromStack(c, i);
-      Value& old =
-          c->merge.arity == 1 ? c->merge.vals.first : c->merge.vals.array[i];
-      if (val.type != old.type) {
-        error(pc_, pc_, "type error in merge[%d] (expected %s, got %s)", i,
+      Value& old = c->merge[i];
+      if (val.type != old.type && val.type != kWasmVar) {
+        error(pc_, pc_, "type error in merge[%zu] (expected %s, got %s)", i,
               WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
         return;
       }
-      if (builder_) {
+      if (builder_ && reachable) {
+        DCHECK_NOT_NULL(val.node);
         old.node =
             first ? val.node : CreateOrMergeIntoPhi(old.type, target->control,
                                                     old.node, val.node);
-      } else {
-        old.node = nullptr;
       }
     }
   }
@@ -1555,13 +1699,13 @@
             break;
         }
       }
-      PrintF("  env = %p, state = %c, reason = %s", static_cast<void*>(env),
+      PrintF("{set_env = %p, state = %c, reason = %s", static_cast<void*>(env),
              state, reason);
       if (env && env->control) {
         PrintF(", control = ");
         compiler::WasmGraphBuilder::PrintDebugName(env->control);
       }
-      PrintF("\n");
+      PrintF("}");
     }
 #endif
     ssa_env_ = env;
@@ -1602,7 +1746,7 @@
     } else {
       DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
       try_info->exception =
-          CreateOrMergeIntoPhi(kAstI32, try_info->catch_env->control,
+          CreateOrMergeIntoPhi(kWasmI32, try_info->catch_env->control,
                                try_info->exception, if_exception);
     }
 
@@ -1686,7 +1830,7 @@
     return from->Kill();
   }
 
-  TFNode* CreateOrMergeIntoPhi(LocalType type, TFNode* merge, TFNode* tnode,
+  TFNode* CreateOrMergeIntoPhi(ValueType type, TFNode* merge, TFNode* tnode,
                                TFNode* fnode) {
     DCHECK_NOT_NULL(builder_);
     if (builder_->IsPhiWithMerge(tnode, merge)) {
@@ -1710,7 +1854,8 @@
     env->effect = builder_->EffectPhi(1, &env->effect, env->control);
     builder_->Terminate(env->effect, env->control);
     if (FLAG_wasm_loop_assignment_analysis) {
-      BitVector* assigned = AnalyzeLoopAssignment(pc);
+      BitVector* assigned = AnalyzeLoopAssignment(
+          this, pc, static_cast<int>(total_locals()), zone_);
       if (failed()) return env;
       if (assigned != nullptr) {
         // Only introduce phis for variables assigned in this loop.
@@ -1789,52 +1934,10 @@
   }
 
   virtual void onFirstError() {
-    limit_ = start_;     // Terminate decoding loop.
+    end_ = start_;       // Terminate decoding loop.
     builder_ = nullptr;  // Don't build any more nodes.
     TRACE(" !%s\n", error_msg_.get());
   }
-  BitVector* AnalyzeLoopAssignment(const byte* pc) {
-    if (pc >= limit_) return nullptr;
-    if (*pc != kExprLoop) return nullptr;
-
-    BitVector* assigned =
-        new (zone_) BitVector(static_cast<int>(local_type_vec_.size()), zone_);
-    int depth = 0;
-    // Iteratively process all AST nodes nested inside the loop.
-    while (pc < limit_ && ok()) {
-      WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
-      unsigned length = 1;
-      switch (opcode) {
-        case kExprLoop:
-        case kExprIf:
-        case kExprBlock:
-        case kExprTry:
-          length = OpcodeLength(pc);
-          depth++;
-          break;
-        case kExprSetLocal:  // fallthru
-        case kExprTeeLocal: {
-          LocalIndexOperand operand(this, pc);
-          if (assigned->length() > 0 &&
-              operand.index < static_cast<uint32_t>(assigned->length())) {
-            // Unverified code might have an out-of-bounds index.
-            assigned->Add(operand.index);
-          }
-          length = 1 + operand.length;
-          break;
-        }
-        case kExprEnd:
-          depth--;
-          break;
-        default:
-          length = OpcodeLength(pc);
-          break;
-      }
-      if (depth <= 0) break;
-      pc += length;
-    }
-    return ok() ? assigned : nullptr;
-  }
 
   inline wasm::WasmCodePosition position() {
     int offset = static_cast<int>(pc_ - start_);
@@ -1865,30 +1968,33 @@
   }
 };
 
-bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
+bool DecodeLocalDecls(BodyLocalDecls* decls, const byte* start,
                       const byte* end) {
-  AccountingAllocator allocator;
-  Zone tmp(&allocator, ZONE_NAME);
-  FunctionBody body = {nullptr, nullptr, nullptr, start, end};
-  WasmFullDecoder decoder(&tmp, nullptr, body);
-  return decoder.DecodeLocalDecls(decls);
+  Decoder decoder(start, end);
+  if (WasmDecoder::DecodeLocals(&decoder, nullptr, &decls->type_list)) {
+    DCHECK(decoder.ok());
+    decls->encoded_size = decoder.pc_offset();
+    return true;
+  }
+  return false;
 }
 
 BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
-                                   AstLocalDecls* decls)
+                                   BodyLocalDecls* decls)
     : Decoder(start, end) {
   if (decls != nullptr) {
-    if (DecodeLocalDecls(*decls, start, end)) {
-      pc_ += decls->decls_encoded_size;
+    if (DecodeLocalDecls(decls, start, end)) {
+      pc_ += decls->encoded_size;
       if (pc_ > end_) pc_ = end_;
     }
   }
 }
 
 DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
+                            const wasm::WasmModule* module,
                             FunctionBody& body) {
   Zone zone(allocator, ZONE_NAME);
-  WasmFullDecoder decoder(&zone, nullptr, body);
+  WasmFullDecoder decoder(&zone, module, body);
   decoder.Decode();
   return decoder.toResult<DecodeStruct*>(nullptr);
 }
@@ -1902,21 +2008,35 @@
 }
 
 unsigned OpcodeLength(const byte* pc, const byte* end) {
-  WasmDecoder decoder(nullptr, nullptr, pc, end);
-  return decoder.OpcodeLength(pc);
+  Decoder decoder(pc, end);
+  return WasmDecoder::OpcodeLength(&decoder, pc);
 }
 
-void PrintAstForDebugging(const byte* start, const byte* end) {
+void PrintRawWasmCode(const byte* start, const byte* end) {
   AccountingAllocator allocator;
-  OFStream os(stdout);
-  PrintAst(&allocator, FunctionBodyForTesting(start, end), os, nullptr);
+  PrintRawWasmCode(&allocator, FunctionBodyForTesting(start, end), nullptr);
 }
 
-bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
-              std::ostream& os,
-              std::vector<std::tuple<uint32_t, int, int>>* offset_table) {
+namespace {
+const char* RawOpcodeName(WasmOpcode opcode) {
+  switch (opcode) {
+#define DECLARE_NAME_CASE(name, opcode, sig) \
+  case kExpr##name:                          \
+    return "kExpr" #name;
+    FOREACH_OPCODE(DECLARE_NAME_CASE)
+#undef DECLARE_NAME_CASE
+    default:
+      break;
+  }
+  return "Unknown";
+}
+}  // namespace
+
+bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
+                      const wasm::WasmModule* module) {
+  OFStream os(stdout);
   Zone zone(allocator, ZONE_NAME);
-  WasmFullDecoder decoder(&zone, nullptr, body);
+  WasmFullDecoder decoder(&zone, module, body);
   int line_nr = 0;
 
   // Print the function signature.
@@ -1926,14 +2046,22 @@
   }
 
   // Print the local declarations.
-  AstLocalDecls decls(&zone);
+  BodyLocalDecls decls(&zone);
   BytecodeIterator i(body.start, body.end, &decls);
-  if (body.start != i.pc()) {
+  if (body.start != i.pc() && !FLAG_wasm_code_fuzzer_gen_test) {
     os << "// locals: ";
-    for (auto p : decls.local_types) {
-      LocalType type = p.first;
-      uint32_t count = p.second;
-      os << " " << count << " " << WasmOpcodes::TypeName(type);
+    if (!decls.type_list.empty()) {
+      ValueType type = decls.type_list[0];
+      uint32_t count = 0;
+      for (size_t pos = 0; pos < decls.type_list.size(); ++pos) {
+        if (decls.type_list[pos] == type) {
+          ++count;
+        } else {
+          os << " " << count << " " << WasmOpcodes::TypeName(type);
+          type = decls.type_list[pos];
+          count = 1;
+        }
+      }
     }
     os << std::endl;
     ++line_nr;
@@ -1949,25 +2077,22 @@
   ++line_nr;
   unsigned control_depth = 0;
   for (; i.has_next(); i.next()) {
-    unsigned length = decoder.OpcodeLength(i.pc());
+    unsigned length = WasmDecoder::OpcodeLength(&decoder, i.pc());
 
     WasmOpcode opcode = i.current();
     if (opcode == kExprElse) control_depth--;
 
     int num_whitespaces = control_depth < 32 ? 2 * control_depth : 64;
-    if (offset_table) {
-      offset_table->push_back(
-          std::make_tuple(i.pc_offset(), line_nr, num_whitespaces));
-    }
 
     // 64 whitespaces
     const char* padding =
         "                                                                ";
     os.write(padding, num_whitespaces);
-    os << "k" << WasmOpcodes::OpcodeName(opcode) << ",";
+
+    os << RawOpcodeName(opcode) << ",";
 
     for (size_t j = 1; j < length; ++j) {
-      os << " " << AsHex(i.pc()[j], 2) << ",";
+      os << " 0x" << AsHex(i.pc()[j], 2) << ",";
     }
 
     switch (opcode) {
@@ -2024,7 +2149,7 @@
       }
       default:
         break;
-      }
+    }
     os << std::endl;
     ++line_nr;
   }
@@ -2034,9 +2159,9 @@
 
 BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
                                            const byte* start, const byte* end) {
-  FunctionBody body = {nullptr, nullptr, nullptr, start, end};
-  WasmFullDecoder decoder(zone, nullptr, body);
-  return decoder.AnalyzeLoopAssignmentForTesting(start, num_locals);
+  Decoder decoder(start, end);
+  return WasmDecoder::AnalyzeLoopAssignment(&decoder, start,
+                                            static_cast<int>(num_locals), zone);
 }
 
 }  // namespace wasm
diff --git a/src/wasm/function-body-decoder.h b/src/wasm/function-body-decoder.h
new file mode 100644
index 0000000..6e6b824
--- /dev/null
+++ b/src/wasm/function-body-decoder.h
@@ -0,0 +1,191 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_FUNCTION_BODY_DECODER_H_
+#define V8_WASM_FUNCTION_BODY_DECODER_H_
+
+#include <iterator>
+
+#include "src/base/compiler-specific.h"
+#include "src/base/iterator.h"
+#include "src/globals.h"
+#include "src/signature.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-result.h"
+
+namespace v8 {
+namespace internal {
+
+class BitVector;  // forward declaration
+
+namespace compiler {  // external declarations from compiler.
+class WasmGraphBuilder;
+}
+
+namespace wasm {
+
+typedef compiler::WasmGraphBuilder TFBuilder;
+struct WasmModule;  // forward declaration of module interface.
+
+// A wrapper around the signature and bytes of a function.
+struct FunctionBody {
+  FunctionSig* sig;   // function signature
+  const byte* base;   // base of the module bytes, for error reporting
+  const byte* start;  // start of the function body
+  const byte* end;    // end of the function body
+};
+
+static inline FunctionBody FunctionBodyForTesting(const byte* start,
+                                                  const byte* end) {
+  return {nullptr, start, start, end};
+}
+
+struct DecodeStruct {
+  int unused;
+};
+typedef Result<DecodeStruct*> DecodeResult;
+inline std::ostream& operator<<(std::ostream& os, const DecodeStruct& tree) {
+  return os;
+}
+
+V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
+                                              const wasm::WasmModule* module,
+                                              FunctionBody& body);
+DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
+                          FunctionBody& body);
+bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
+                      const wasm::WasmModule* module);
+
+// A simplified form of AST printing, e.g. from a debugger.
+void PrintRawWasmCode(const byte* start, const byte* end);
+
+inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
+                                   const WasmModule* module, FunctionSig* sig,
+                                   const byte* start, const byte* end) {
+  FunctionBody body = {sig, nullptr, start, end};
+  return VerifyWasmCode(allocator, module, body);
+}
+
+inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
+                                 TFBuilder* builder, FunctionSig* sig,
+                                 const byte* start, const byte* end) {
+  FunctionBody body = {sig, nullptr, start, end};
+  return BuildTFGraph(allocator, builder, body);
+}
+
+struct BodyLocalDecls {
+  // The size of the encoded declarations.
+  uint32_t encoded_size;  // size of encoded declarations
+
+  ZoneVector<ValueType> type_list;
+
+  // Constructor initializes the vector.
+  explicit BodyLocalDecls(Zone* zone) : encoded_size(0), type_list(zone) {}
+};
+
+V8_EXPORT_PRIVATE bool DecodeLocalDecls(BodyLocalDecls* decls,
+                                        const byte* start, const byte* end);
+V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone,
+                                                             size_t num_locals,
+                                                             const byte* start,
+                                                             const byte* end);
+
+// Computes the length of the opcode at the given address.
+V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
+
+// A simple forward iterator for bytecodes.
+class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
+  // Base class for both iterators defined below.
+  class iterator_base {
+   public:
+    inline iterator_base& operator++() {
+      DCHECK_LT(ptr_, end_);
+      ptr_ += OpcodeLength(ptr_, end_);
+      return *this;
+    }
+    inline bool operator==(const iterator_base& that) {
+      return this->ptr_ == that.ptr_;
+    }
+    inline bool operator!=(const iterator_base& that) {
+      return this->ptr_ != that.ptr_;
+    }
+
+   protected:
+    const byte* ptr_;
+    const byte* end_;
+    iterator_base(const byte* ptr, const byte* end) : ptr_(ptr), end_(end) {}
+  };
+
+ public:
+  // If one wants to iterate over the bytecode without looking at {pc_offset()}.
+  class opcode_iterator
+      : public iterator_base,
+        public std::iterator<std::input_iterator_tag, WasmOpcode> {
+   public:
+    inline WasmOpcode operator*() {
+      DCHECK_LT(ptr_, end_);
+      return static_cast<WasmOpcode>(*ptr_);
+    }
+
+   private:
+    friend class BytecodeIterator;
+    opcode_iterator(const byte* ptr, const byte* end)
+        : iterator_base(ptr, end) {}
+  };
+  // If one wants to iterate over the instruction offsets without looking at
+  // opcodes.
+  class offset_iterator
+      : public iterator_base,
+        public std::iterator<std::input_iterator_tag, uint32_t> {
+   public:
+    inline uint32_t operator*() {
+      DCHECK_LT(ptr_, end_);
+      return static_cast<uint32_t>(ptr_ - start_);
+    }
+
+   private:
+    const byte* start_;
+    friend class BytecodeIterator;
+    offset_iterator(const byte* start, const byte* ptr, const byte* end)
+        : iterator_base(ptr, end), start_(start) {}
+  };
+
+  // Create a new {BytecodeIterator}. If the {decls} pointer is non-null,
+  // assume the bytecode starts with local declarations and decode them.
+  // Otherwise, do not decode local decls.
+  BytecodeIterator(const byte* start, const byte* end,
+                   BodyLocalDecls* decls = nullptr);
+
+  base::iterator_range<opcode_iterator> opcodes() {
+    return base::iterator_range<opcode_iterator>(opcode_iterator(pc_, end_),
+                                                 opcode_iterator(end_, end_));
+  }
+
+  base::iterator_range<offset_iterator> offsets() {
+    return base::iterator_range<offset_iterator>(
+        offset_iterator(start_, pc_, end_),
+        offset_iterator(start_, end_, end_));
+  }
+
+  WasmOpcode current() {
+    return static_cast<WasmOpcode>(
+        checked_read_u8(pc_, 0, "expected bytecode"));
+  }
+
+  void next() {
+    if (pc_ < end_) {
+      pc_ += OpcodeLength(pc_, end_);
+      if (pc_ >= end_) pc_ = end_;
+    }
+  }
+
+  bool has_next() { return pc_ < end_; }
+};
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_FUNCTION_BODY_DECODER_H_
diff --git a/src/wasm/managed.h b/src/wasm/managed.h
deleted file mode 100644
index 785d5d3..0000000
--- a/src/wasm/managed.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_WASM_MANAGED_H_
-#define V8_WASM_MANAGED_H_
-
-#include "src/factory.h"
-#include "src/global-handles.h"
-#include "src/handles.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-// An object that wraps a pointer to a C++ object and optionally deletes it
-// when the managed wrapper object is garbage collected.
-template <class CppType>
-class Managed : public Foreign {
- public:
-  V8_INLINE CppType* get() {
-    return reinterpret_cast<CppType*>(foreign_address());
-  }
-
-  static Handle<Managed<CppType>> New(Isolate* isolate, CppType* ptr,
-                                      bool delete_on_gc = true) {
-    Handle<Foreign> foreign =
-        isolate->factory()->NewForeign(reinterpret_cast<Address>(ptr));
-    Handle<Managed<CppType>> handle(
-        reinterpret_cast<Managed<CppType>*>(*foreign), isolate);
-    if (delete_on_gc) {
-      RegisterWeakCallbackForDelete(isolate, handle);
-    }
-    return handle;
-  }
-
- private:
-  static void RegisterWeakCallbackForDelete(Isolate* isolate,
-                                            Handle<Managed<CppType>> handle) {
-    Handle<Object> global_handle = isolate->global_handles()->Create(*handle);
-    GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
-                            &Managed<CppType>::Delete,
-                            v8::WeakCallbackType::kFinalizer);
-  }
-  static void Delete(const v8::WeakCallbackInfo<void>& data) {
-    Managed<CppType>** p =
-        reinterpret_cast<Managed<CppType>**>(data.GetParameter());
-    delete (*p)->get();
-    (*p)->set_foreign_address(0);
-    GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
-  }
-};
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_WASM_MANAGED_H_
diff --git a/src/wasm/module-decoder.cc b/src/wasm/module-decoder.cc
index c8eace3..440e5dc 100644
--- a/src/wasm/module-decoder.cc
+++ b/src/wasm/module-decoder.cc
@@ -3,15 +3,19 @@
 // found in the LICENSE file.
 
 #include "src/wasm/module-decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
 
 #include "src/base/functional.h"
 #include "src/base/platform/platform.h"
+#include "src/counters.h"
 #include "src/flags.h"
 #include "src/macro-assembler.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
+#include "src/ostreams.h"
 #include "src/v8.h"
 
 #include "src/wasm/decoder.h"
+#include "src/wasm/wasm-limits.h"
 
 namespace v8 {
 namespace internal {
@@ -26,30 +30,63 @@
 #define TRACE(...)
 #endif
 
+const char* SectionName(WasmSectionCode code) {
+  switch (code) {
+    case kUnknownSectionCode:
+      return "Unknown";
+    case kTypeSectionCode:
+      return "Type";
+    case kImportSectionCode:
+      return "Import";
+    case kFunctionSectionCode:
+      return "Function";
+    case kTableSectionCode:
+      return "Table";
+    case kMemorySectionCode:
+      return "Memory";
+    case kGlobalSectionCode:
+      return "Global";
+    case kExportSectionCode:
+      return "Export";
+    case kStartSectionCode:
+      return "Start";
+    case kCodeSectionCode:
+      return "Code";
+    case kElementSectionCode:
+      return "Element";
+    case kDataSectionCode:
+      return "Data";
+    case kNameSectionCode:
+      return "Name";
+    default:
+      return "<unknown>";
+  }
+}
+
 namespace {
 
 const char* kNameString = "name";
 const size_t kNameStringLength = 4;
 
-LocalType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
+ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
   switch (expr.kind) {
     case WasmInitExpr::kNone:
-      return kAstStmt;
+      return kWasmStmt;
     case WasmInitExpr::kGlobalIndex:
       return expr.val.global_index < module->globals.size()
                  ? module->globals[expr.val.global_index].type
-                 : kAstStmt;
+                 : kWasmStmt;
     case WasmInitExpr::kI32Const:
-      return kAstI32;
+      return kWasmI32;
     case WasmInitExpr::kI64Const:
-      return kAstI64;
+      return kWasmI64;
     case WasmInitExpr::kF32Const:
-      return kAstF32;
+      return kWasmF32;
     case WasmInitExpr::kF64Const:
-      return kAstF64;
+      return kWasmF64;
     default:
       UNREACHABLE();
-      return kAstStmt;
+      return kWasmStmt;
   }
 }
 
@@ -177,19 +214,21 @@
  public:
   ModuleDecoder(Zone* zone, const byte* module_start, const byte* module_end,
                 ModuleOrigin origin)
-      : Decoder(module_start, module_end), module_zone(zone), origin_(origin) {
+      : Decoder(module_start, module_end),
+        module_zone(zone),
+        origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
     result_.start = start_;
-    if (limit_ < start_) {
+    if (end_ < start_) {
       error(start_, "end is less than start");
-      limit_ = start_;
+      end_ = start_;
     }
   }
 
   virtual void onFirstError() {
-    pc_ = limit_;  // On error, terminate section decoding loop.
+    pc_ = end_;  // On error, terminate section decoding loop.
   }
 
-  static void DumpModule(WasmModule* module, const ModuleResult& result) {
+  void DumpModule(const ModuleResult& result) {
     std::string path;
     if (FLAG_dump_wasm_module_path) {
       path = FLAG_dump_wasm_module_path;
@@ -199,7 +238,7 @@
       }
     }
     // File are named `HASH.{ok,failed}.wasm`.
-    size_t hash = base::hash_range(module->module_start, module->module_end);
+    size_t hash = base::hash_range(start_, end_);
     char buf[32] = {'\0'};
 #if V8_OS_WIN && _MSC_VER < 1900
 #define snprintf sprintf_s
@@ -208,17 +247,15 @@
              result.ok() ? "ok" : "failed");
     std::string name(buf);
     if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
-      fwrite(module->module_start, module->module_end - module->module_start, 1,
-             wasm_file);
+      fwrite(start_, end_ - start_, 1, wasm_file);
       fclose(wasm_file);
     }
   }
 
   // Decodes an entire module.
-  ModuleResult DecodeModule(WasmModule* module, bool verify_functions = true) {
+  ModuleResult DecodeModule(bool verify_functions = true) {
     pc_ = start_;
-    module->module_start = start_;
-    module->module_end = limit_;
+    WasmModule* module = new WasmModule(module_zone);
     module->min_mem_pages = 0;
     module->max_mem_pages = 0;
     module->mem_export = false;
@@ -249,8 +286,8 @@
 
     // ===== Type section ====================================================
     if (section_iter.section_code() == kTypeSectionCode) {
-      uint32_t signatures_count = consume_u32v("signatures count");
-      module->signatures.reserve(SafeReserve(signatures_count));
+      uint32_t signatures_count = consume_count("types count", kV8MaxWasmTypes);
+      module->signatures.reserve(signatures_count);
       for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
         TRACE("DecodeSignature[%d] module+%d\n", i,
               static_cast<int>(pc_ - start_));
@@ -262,8 +299,9 @@
 
     // ===== Import section ==================================================
     if (section_iter.section_code() == kImportSectionCode) {
-      uint32_t import_table_count = consume_u32v("import table count");
-      module->import_table.reserve(SafeReserve(import_table_count));
+      uint32_t import_table_count =
+          consume_count("imports count", kV8MaxWasmImports);
+      module->import_table.reserve(import_table_count);
       for (uint32_t i = 0; ok() && i < import_table_count; ++i) {
         TRACE("DecodeImportTable[%d] module+%d\n", i,
               static_cast<int>(pc_ - start_));
@@ -280,9 +318,6 @@
         const byte* pos = pc_;
         import->module_name_offset =
             consume_string(&import->module_name_length, true);
-        if (import->module_name_length == 0) {
-          error(pos, "import module name cannot be NULL");
-        }
         import->field_name_offset =
             consume_string(&import->field_name_length, true);
 
@@ -307,6 +342,7 @@
           }
           case kExternalTable: {
             // ===== Imported table ==========================================
+            if (!AddTable(module)) break;
             import->index =
                 static_cast<uint32_t>(module->function_tables.size());
             module->function_tables.push_back({0, 0, false,
@@ -314,30 +350,29 @@
                                                false, SignatureMap()});
             expect_u8("element type", kWasmAnyFunctionTypeForm);
             WasmIndirectFunctionTable* table = &module->function_tables.back();
-            consume_resizable_limits(
-                "element count", "elements", WasmModule::kV8MaxTableSize,
-                &table->min_size, &table->has_max, WasmModule::kV8MaxTableSize,
-                &table->max_size);
+            consume_resizable_limits("element count", "elements",
+                                     FLAG_wasm_max_table_size, &table->min_size,
+                                     &table->has_max, FLAG_wasm_max_table_size,
+                                     &table->max_size);
             break;
           }
           case kExternalMemory: {
             // ===== Imported memory =========================================
-            bool has_max = false;
-            consume_resizable_limits("memory", "pages", WasmModule::kV8MaxPages,
-                                     &module->min_mem_pages, &has_max,
-                                     WasmModule::kSpecMaxPages,
-                                     &module->max_mem_pages);
-            module->has_memory = true;
+            if (!AddMemory(module)) break;
+            consume_resizable_limits(
+                "memory", "pages", FLAG_wasm_max_mem_pages,
+                &module->min_mem_pages, &module->has_max_mem,
+                kSpecMaxWasmMemoryPages, &module->max_mem_pages);
             break;
           }
           case kExternalGlobal: {
             // ===== Imported global =========================================
             import->index = static_cast<uint32_t>(module->globals.size());
             module->globals.push_back(
-                {kAstStmt, false, WasmInitExpr(), 0, true, false});
+                {kWasmStmt, false, WasmInitExpr(), 0, true, false});
             WasmGlobal* global = &module->globals.back();
             global->type = consume_value_type();
-            global->mutability = consume_u8("mutability") != 0;
+            global->mutability = consume_mutability();
             if (global->mutability) {
               error("mutable globals cannot be imported");
             }
@@ -353,8 +388,9 @@
 
     // ===== Function section ================================================
     if (section_iter.section_code() == kFunctionSectionCode) {
-      uint32_t functions_count = consume_u32v("functions count");
-      module->functions.reserve(SafeReserve(functions_count));
+      uint32_t functions_count =
+          consume_count("functions count", kV8MaxWasmFunctions);
+      module->functions.reserve(functions_count);
       module->num_declared_functions = functions_count;
       for (uint32_t i = 0; ok() && i < functions_count; ++i) {
         uint32_t func_index = static_cast<uint32_t>(module->functions.size());
@@ -375,23 +411,17 @@
 
     // ===== Table section ===================================================
     if (section_iter.section_code() == kTableSectionCode) {
-      const byte* pos = pc_;
-      uint32_t table_count = consume_u32v("table count");
-      // Require at most one table for now.
-      if (table_count > 1) {
-        error(pos, pos, "invalid table count %d, maximum 1", table_count);
-      }
-      if (module->function_tables.size() < 1) {
-        module->function_tables.push_back({0, 0, false, std::vector<int32_t>(),
-                                           false, false, SignatureMap()});
-      }
+      uint32_t table_count = consume_count("table count", kV8MaxWasmTables);
 
       for (uint32_t i = 0; ok() && i < table_count; i++) {
+        if (!AddTable(module)) break;
+        module->function_tables.push_back({0, 0, false, std::vector<int32_t>(),
+                                           false, false, SignatureMap()});
         WasmIndirectFunctionTable* table = &module->function_tables.back();
         expect_u8("table type", kWasmAnyFunctionTypeForm);
         consume_resizable_limits("table elements", "elements",
-                                 WasmModule::kV8MaxTableSize, &table->min_size,
-                                 &table->has_max, WasmModule::kV8MaxTableSize,
+                                 FLAG_wasm_max_table_size, &table->min_size,
+                                 &table->has_max, FLAG_wasm_max_table_size,
                                  &table->max_size);
       }
       section_iter.advance();
@@ -399,39 +429,30 @@
 
     // ===== Memory section ==================================================
     if (section_iter.section_code() == kMemorySectionCode) {
-      const byte* pos = pc_;
-      uint32_t memory_count = consume_u32v("memory count");
-      // Require at most one memory for now.
-      if (memory_count > 1) {
-        error(pos, pos, "invalid memory count %d, maximum 1", memory_count);
-      }
+      uint32_t memory_count = consume_count("memory count", kV8MaxWasmMemories);
 
       for (uint32_t i = 0; ok() && i < memory_count; i++) {
-        bool has_max = false;
-        consume_resizable_limits(
-            "memory", "pages", WasmModule::kV8MaxPages, &module->min_mem_pages,
-            &has_max, WasmModule::kSpecMaxPages, &module->max_mem_pages);
+        if (!AddMemory(module)) break;
+        consume_resizable_limits("memory", "pages", FLAG_wasm_max_mem_pages,
+                                 &module->min_mem_pages, &module->has_max_mem,
+                                 kSpecMaxWasmMemoryPages,
+                                 &module->max_mem_pages);
       }
-      module->has_memory = true;
       section_iter.advance();
     }
 
     // ===== Global section ==================================================
     if (section_iter.section_code() == kGlobalSectionCode) {
-      uint32_t globals_count = consume_u32v("globals count");
+      uint32_t globals_count =
+          consume_count("globals count", kV8MaxWasmGlobals);
       uint32_t imported_globals = static_cast<uint32_t>(module->globals.size());
-      if (!IsWithinLimit(std::numeric_limits<int32_t>::max(), globals_count,
-                         imported_globals)) {
-        error(pos, pos, "too many imported+defined globals: %u + %u",
-              imported_globals, globals_count);
-      }
-      module->globals.reserve(SafeReserve(imported_globals + globals_count));
+      module->globals.reserve(imported_globals + globals_count);
       for (uint32_t i = 0; ok() && i < globals_count; ++i) {
         TRACE("DecodeGlobal[%d] module+%d\n", i,
               static_cast<int>(pc_ - start_));
         // Add an uninitialized global and pass a pointer to it.
         module->globals.push_back(
-            {kAstStmt, false, WasmInitExpr(), 0, false, false});
+            {kWasmStmt, false, WasmInitExpr(), 0, false, false});
         WasmGlobal* global = &module->globals.back();
         DecodeGlobalInModule(module, i + imported_globals, global);
       }
@@ -440,8 +461,9 @@
 
     // ===== Export section ==================================================
     if (section_iter.section_code() == kExportSectionCode) {
-      uint32_t export_table_count = consume_u32v("export table count");
-      module->export_table.reserve(SafeReserve(export_table_count));
+      uint32_t export_table_count =
+          consume_count("exports count", kV8MaxWasmImports);
+      module->export_table.reserve(export_table_count);
       for (uint32_t i = 0; ok() && i < export_table_count; ++i) {
         TRACE("DecodeExportTable[%d] module+%d\n", i,
               static_cast<int>(pc_ - start_));
@@ -473,7 +495,11 @@
           }
           case kExternalMemory: {
             uint32_t index = consume_u32v("memory index");
-            if (index != 0) error("invalid memory index != 0");
+            // TODO(titzer): This should become more regular
+            // once we support multiple memories.
+            if (!module->has_memory || index != 0) {
+              error("invalid memory index != 0");
+            }
             module->mem_export = true;
             break;
           }
@@ -493,8 +519,8 @@
             break;
         }
       }
-      // Check for duplicate exports.
-      if (ok() && module->export_table.size() > 1) {
+      // Check for duplicate exports (except for asm.js).
+      if (ok() && origin_ != kAsmJsOrigin && module->export_table.size() > 1) {
         std::vector<WasmExport> sorted_exports(module->export_table);
         const byte* base = start_;
         auto cmp_less = [base](const WasmExport& a, const WasmExport& b) {
@@ -538,7 +564,8 @@
 
     // ===== Elements section ================================================
     if (section_iter.section_code() == kElementSectionCode) {
-      uint32_t element_count = consume_u32v("element count");
+      uint32_t element_count =
+          consume_count("element count", FLAG_wasm_max_table_size);
       for (uint32_t i = 0; ok() && i < element_count; ++i) {
         const byte* pos = pc();
         uint32_t table_index = consume_u32v("table index");
@@ -551,19 +578,18 @@
         } else {
           table = &module->function_tables[table_index];
         }
-        WasmInitExpr offset = consume_init_expr(module, kAstI32);
-        uint32_t num_elem = consume_u32v("number of elements");
+        WasmInitExpr offset = consume_init_expr(module, kWasmI32);
+        uint32_t num_elem =
+            consume_count("number of elements", kV8MaxWasmTableEntries);
         std::vector<uint32_t> vector;
         module->table_inits.push_back({table_index, offset, vector});
         WasmTableInit* init = &module->table_inits.back();
-        init->entries.reserve(SafeReserve(num_elem));
         for (uint32_t j = 0; ok() && j < num_elem; j++) {
           WasmFunction* func = nullptr;
           uint32_t index = consume_func_index(module, &func);
           init->entries.push_back(index);
           if (table && index < module->functions.size()) {
             // Canonicalize signature indices during decoding.
-            // TODO(titzer): suboptimal, redundant when verifying only.
             table->map.FindOrInsert(module->functions[index].sig);
           }
         }
@@ -587,10 +613,8 @@
         function->code_start_offset = pc_offset();
         function->code_end_offset = pc_offset() + size;
         if (verify_functions) {
-          ModuleEnv module_env;
-          module_env.module = module;
-          module_env.origin = module->origin;
-
+          ModuleBytesEnv module_env(module, nullptr,
+                                    ModuleWireBytes(start_, end_));
           VerifyFunctionBody(i + module->num_imported_functions, &module_env,
                              function);
         }
@@ -601,8 +625,9 @@
 
     // ===== Data section ====================================================
     if (section_iter.section_code() == kDataSectionCode) {
-      uint32_t data_segments_count = consume_u32v("data segments count");
-      module->data_segments.reserve(SafeReserve(data_segments_count));
+      uint32_t data_segments_count =
+          consume_count("data segments count", kV8MaxWasmDataSegments);
+      module->data_segments.reserve(data_segments_count);
       for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
         if (!module->has_memory) {
           error("cannot load data without memory");
@@ -623,22 +648,29 @@
 
     // ===== Name section ====================================================
     if (section_iter.section_code() == kNameSectionCode) {
-      uint32_t functions_count = consume_u32v("functions count");
+      // TODO(titzer): find a way to report name errors as warnings.
+      // Use an inner decoder so that errors don't fail the outer decoder.
+      Decoder inner(start_, pc_, end_);
+      uint32_t functions_count = inner.consume_u32v("functions count");
 
-      for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+      for (uint32_t i = 0; inner.ok() && i < functions_count; ++i) {
         uint32_t function_name_length = 0;
-        uint32_t name_offset = consume_string(&function_name_length, false);
+        uint32_t name_offset =
+            consume_string(inner, &function_name_length, false);
         uint32_t func_index = i;
-        if (func_index < module->functions.size()) {
+        if (inner.ok() && func_index < module->functions.size()) {
           module->functions[func_index].name_offset = name_offset;
           module->functions[func_index].name_length = function_name_length;
         }
 
-        uint32_t local_names_count = consume_u32v("local names count");
-        for (uint32_t j = 0; ok() && j < local_names_count; j++) {
-          skip_string();
+        uint32_t local_names_count = inner.consume_u32v("local names count");
+        for (uint32_t j = 0; inner.ok() && j < local_names_count; j++) {
+          uint32_t length = inner.consume_u32v("string length");
+          inner.consume_bytes(length, "string");
         }
       }
+      // Skip the whole names section in the outer decoder.
+      consume_bytes(section_iter.payload_length(), nullptr);
       section_iter.advance();
     }
 
@@ -656,25 +688,19 @@
     if (verify_functions && result.ok()) {
       result.MoveFrom(result_);  // Copy error code and location.
     }
-    if (FLAG_dump_wasm_module) DumpModule(module, result);
+    if (FLAG_dump_wasm_module) DumpModule(result);
     return result;
   }
 
-  uint32_t SafeReserve(uint32_t count) {
-    // Avoid OOM by only reserving up to a certain size.
-    const uint32_t kMaxReserve = 20000;
-    return count < kMaxReserve ? count : kMaxReserve;
-  }
-
   // Decodes a single anonymous function starting at {start_}.
-  FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
+  FunctionResult DecodeSingleFunction(ModuleBytesEnv* module_env,
                                       WasmFunction* function) {
     pc_ = start_;
     function->sig = consume_sig();            // read signature
     function->name_offset = 0;                // ---- name
     function->name_length = 0;                // ---- name length
     function->code_start_offset = off(pc_);   // ---- code start
-    function->code_end_offset = off(limit_);  // ---- code end
+    function->code_end_offset = off(end_);    // ---- code end
 
     if (ok()) VerifyFunctionBody(0, module_env, function);
 
@@ -693,7 +719,7 @@
 
   WasmInitExpr DecodeInitExpr(const byte* start) {
     pc_ = start;
-    return consume_init_expr(nullptr, kAstStmt);
+    return consume_init_expr(nullptr, kWasmStmt);
   }
 
  private:
@@ -703,13 +729,32 @@
 
   uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
 
+  bool AddTable(WasmModule* module) {
+    if (module->function_tables.size() > 0) {
+      error("At most one table is supported");
+      return false;
+    } else {
+      return true;
+    }
+  }
+
+  bool AddMemory(WasmModule* module) {
+    if (module->has_memory) {
+      error("At most one memory is supported");
+      return false;
+    } else {
+      module->has_memory = true;
+      return true;
+    }
+  }
+
   // Decodes a single global entry inside a module starting at {pc_}.
   void DecodeGlobalInModule(WasmModule* module, uint32_t index,
                             WasmGlobal* global) {
     global->type = consume_value_type();
-    global->mutability = consume_u8("mutability") != 0;
+    global->mutability = consume_mutability();
     const byte* pos = pc();
-    global->init = consume_init_expr(module, kAstStmt);
+    global->init = consume_init_expr(module, kWasmStmt);
     switch (global->init.kind) {
       case WasmInitExpr::kGlobalIndex: {
         uint32_t other_index = global->init.val.global_index;
@@ -747,12 +792,12 @@
   void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
     const byte* start = pc_;
     expect_u8("linear memory index", 0);
-    segment->dest_addr = consume_init_expr(module, kAstI32);
+    segment->dest_addr = consume_init_expr(module, kWasmI32);
     segment->source_size = consume_u32v("source size");
     segment->source_offset = static_cast<uint32_t>(pc_ - start_);
 
     // Validate the data is in the module.
-    uint32_t module_limit = static_cast<uint32_t>(limit_ - start_);
+    uint32_t module_limit = static_cast<uint32_t>(end_ - start_);
     if (!IsWithinLimit(module_limit, segment->source_offset,
                        segment->source_size)) {
       error(start, "segment out of bounds of module");
@@ -779,21 +824,24 @@
   }
 
   // Verifies the body (code) of a given function.
-  void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
+  void VerifyFunctionBody(uint32_t func_num, ModuleBytesEnv* menv,
                           WasmFunction* function) {
+    WasmFunctionName func_name(function,
+                               menv->wire_bytes.GetNameOrNull(function));
     if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
       OFStream os(stdout);
-      os << "Verifying WASM function " << WasmFunctionName(function, menv)
-         << std::endl;
+      os << "Verifying WASM function " << func_name << std::endl;
     }
-    FunctionBody body = {menv, function->sig, start_,
+    FunctionBody body = {function->sig, start_,
                          start_ + function->code_start_offset,
                          start_ + function->code_end_offset};
-    DecodeResult result = VerifyWasmCode(module_zone->allocator(), body);
+    DecodeResult result = VerifyWasmCode(
+        module_zone->allocator(),
+        menv == nullptr ? nullptr : menv->module_env.module, body);
     if (result.failed()) {
       // Wrap the error message from the function decoder.
       std::ostringstream str;
-      str << "in function " << WasmFunctionName(function, menv) << ": ";
+      str << "in function " << func_name << ": ";
       str << result;
       std::string strval = str.str();
       const char* raw = strval.c_str();
@@ -808,25 +856,24 @@
     }
   }
 
-  // Reads a length-prefixed string, checking that it is within bounds. Returns
-  // the offset of the string, and the length as an out parameter.
   uint32_t consume_string(uint32_t* length, bool validate_utf8) {
-    *length = consume_u32v("string length");
-    uint32_t offset = pc_offset();
-    const byte* string_start = pc_;
-    // Consume bytes before validation to guarantee that the string is not oob.
-    if (*length > 0) consume_bytes(*length, "string");
-    if (ok() && validate_utf8 &&
-        !unibrow::Utf8::Validate(string_start, *length)) {
-      error(string_start, "no valid UTF-8 string");
-    }
-    return offset;
+    return consume_string(*this, length, validate_utf8);
   }
 
-  // Skips over a length-prefixed string, but checks that it is within bounds.
-  void skip_string() {
-    uint32_t length = consume_u32v("string length");
-    consume_bytes(length, "string");
+  // Reads a length-prefixed string, checking that it is within bounds. Returns
+  // the offset of the string, and the length as an out parameter.
+  uint32_t consume_string(Decoder& decoder, uint32_t* length,
+                          bool validate_utf8) {
+    *length = decoder.consume_u32v("string length");
+    uint32_t offset = decoder.pc_offset();
+    const byte* string_start = decoder.pc();
+    // Consume bytes before validation to guarantee that the string is not oob.
+    if (*length > 0) decoder.consume_bytes(*length, "string");
+    if (decoder.ok() && validate_utf8 &&
+        !unibrow::Utf8::Validate(string_start, *length)) {
+      decoder.error(string_start, "no valid UTF-8 string");
+    }
+    return offset;
   }
 
   uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
@@ -842,6 +889,17 @@
     return sig_index;
   }
 
+  uint32_t consume_count(const char* name, size_t maximum) {
+    const byte* p = pc_;
+    uint32_t count = consume_u32v(name);
+    if (count > maximum) {
+      error(p, p, "%s of %u exceeds internal limit of %zu", name, count,
+            maximum);
+      return static_cast<uint32_t>(maximum);
+    }
+    return count;
+  }
+
   uint32_t consume_func_index(WasmModule* module, WasmFunction** func) {
     return consume_index("function index", module->functions, func);
   }
@@ -912,7 +970,7 @@
     return true;
   }
 
-  WasmInitExpr consume_init_expr(WasmModule* module, LocalType expected) {
+  WasmInitExpr consume_init_expr(WasmModule* module, ValueType expected) {
     const byte* pos = pc();
     uint8_t opcode = consume_u8("opcode");
     WasmInitExpr expr;
@@ -978,7 +1036,7 @@
     if (!expect_u8("end opcode", kExprEnd)) {
       expr.kind = WasmInitExpr::kNone;
     }
-    if (expected != kAstStmt && TypeOf(module, expr) != kAstI32) {
+    if (expected != kWasmStmt && TypeOf(module, expr) != kWasmI32) {
       error(pos, pos, "type error in init expression, expected %s, got %s",
             WasmOpcodes::TypeName(expected),
             WasmOpcodes::TypeName(TypeOf(module, expr)));
@@ -986,29 +1044,43 @@
     return expr;
   }
 
+  // Read a mutability flag
+  bool consume_mutability() {
+    byte val = consume_u8("mutability");
+    if (val > 1) error(pc_ - 1, "invalid mutability");
+    return val != 0;
+  }
+
   // Reads a single 8-bit integer, interpreting it as a local type.
-  LocalType consume_value_type() {
+  ValueType consume_value_type() {
     byte val = consume_u8("value type");
-    LocalTypeCode t = static_cast<LocalTypeCode>(val);
+    ValueTypeCode t = static_cast<ValueTypeCode>(val);
     switch (t) {
       case kLocalI32:
-        return kAstI32;
+        return kWasmI32;
       case kLocalI64:
-        return kAstI64;
+        return kWasmI64;
       case kLocalF32:
-        return kAstF32;
+        return kWasmF32;
       case kLocalF64:
-        return kAstF64;
-      case kLocalS128:
-        if (origin_ != kAsmJsOrigin && FLAG_wasm_simd_prototype) {
-          return kAstS128;
-        } else {
-          error(pc_ - 1, "invalid local type");
-          return kAstStmt;
-        }
+        return kWasmF64;
       default:
+        if (origin_ != kAsmJsOrigin && FLAG_wasm_simd_prototype) {
+          switch (t) {
+            case kLocalS128:
+              return kWasmS128;
+            case kLocalS1x4:
+              return kWasmS1x4;
+            case kLocalS1x8:
+              return kWasmS1x8;
+            case kLocalS1x16:
+              return kWasmS1x16;
+            default:
+              break;
+          }
+        }
         error(pc_ - 1, "invalid local type");
-        return kAstStmt;
+        return kWasmStmt;
     }
   }
 
@@ -1016,35 +1088,32 @@
   FunctionSig* consume_sig() {
     if (!expect_u8("type form", kWasmFunctionTypeForm)) return nullptr;
     // parse parameter types
-    uint32_t param_count = consume_u32v("param count");
-    std::vector<LocalType> params;
+    uint32_t param_count =
+        consume_count("param count", kV8MaxWasmFunctionParams);
+    if (failed()) return nullptr;
+    std::vector<ValueType> params;
     for (uint32_t i = 0; ok() && i < param_count; ++i) {
-      LocalType param = consume_value_type();
+      ValueType param = consume_value_type();
       params.push_back(param);
     }
 
     // parse return types
-    const byte* pt = pc_;
-    uint32_t return_count = consume_u32v("return count");
-    if (return_count > kMaxReturnCount) {
-      error(pt, pt, "return count of %u exceeds maximum of %u", return_count,
-            kMaxReturnCount);
-      return nullptr;
-    }
-    std::vector<LocalType> returns;
+    const size_t max_return_count = FLAG_wasm_mv_prototype
+                                        ? kV8MaxWasmFunctionMultiReturns
+                                        : kV8MaxWasmFunctionReturns;
+    uint32_t return_count = consume_count("return count", max_return_count);
+    if (failed()) return nullptr;
+    std::vector<ValueType> returns;
     for (uint32_t i = 0; ok() && i < return_count; ++i) {
-      LocalType ret = consume_value_type();
+      ValueType ret = consume_value_type();
       returns.push_back(ret);
     }
 
-    if (failed()) {
-      // Decoding failed, return void -> void
-      return new (module_zone) FunctionSig(0, 0, nullptr);
-    }
+    if (failed()) return nullptr;
 
     // FunctionSig stores the return types first.
-    LocalType* buffer =
-        module_zone->NewArray<LocalType>(param_count + return_count);
+    ValueType* buffer =
+        module_zone->NewArray<ValueType>(param_count + return_count);
     uint32_t b = 0;
     for (uint32_t i = 0; i < return_count; ++i) buffer[b++] = returns[i];
     for (uint32_t i = 0; i < param_count; ++i) buffer[b++] = params[i];
@@ -1113,16 +1182,16 @@
       isolate->counters()->wasm_decode_module_time());
   size_t size = module_end - module_start;
   if (module_start > module_end) return ModuleError("start > end");
-  if (size >= kMaxModuleSize) return ModuleError("size > maximum module size");
+  if (size >= kV8MaxWasmModuleSize)
+    return ModuleError("size > maximum module size");
   // TODO(bradnelson): Improve histogram handling of size_t.
   isolate->counters()->wasm_module_size_bytes()->AddSample(
       static_cast<int>(size));
   // Signatures are stored in zone memory, which have the same lifetime
   // as the {module}.
   Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
-  WasmModule* module = new WasmModule(zone, module_start);
   ModuleDecoder decoder(zone, module_start, module_end, origin);
-  ModuleResult result = decoder.DecodeModule(module, verify_functions);
+  ModuleResult result = decoder.DecodeModule(verify_functions);
   // TODO(bradnelson): Improve histogram handling of size_t.
   // TODO(titzer): this isn't accurate, since it doesn't count the data
   // allocated on the C++ heap.
@@ -1146,14 +1215,14 @@
 }
 
 FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
-                                  ModuleEnv* module_env,
+                                  ModuleBytesEnv* module_env,
                                   const byte* function_start,
                                   const byte* function_end) {
   HistogramTimerScope wasm_decode_function_time_scope(
       isolate->counters()->wasm_decode_function_time());
   size_t size = function_end - function_start;
   if (function_start > function_end) return FunctionError("start > end");
-  if (size > kMaxFunctionSize)
+  if (size > kV8MaxWasmFunctionSize)
     return FunctionError("size > maximum function size");
   isolate->counters()->wasm_function_size_bytes()->AddSample(
       static_cast<int>(size));
@@ -1185,7 +1254,7 @@
   for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
     uint32_t size = decoder.consume_u32v("body size");
     int offset = static_cast<int>(section_offset + decoder.pc_offset());
-    table.push_back(std::make_pair(offset, static_cast<int>(size)));
+    table.emplace_back(offset, static_cast<int>(size));
     DCHECK(table.back().first >= 0 && table.back().second >= 0);
     decoder.consume_bytes(size);
   }
@@ -1208,22 +1277,31 @@
   for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
     uint32_t size = decoder.consume_u32v("table size");
     if (size == 0) {
-      table.push_back(std::vector<std::pair<int, int>>());
+      table.emplace_back();
       continue;
     }
     if (!decoder.checkAvailable(size)) {
       decoder.error("illegal asm function offset table size");
     }
     const byte* table_end = decoder.pc() + size;
-    uint32_t locals_size = decoder.consume_u32("locals size");
+    uint32_t locals_size = decoder.consume_u32v("locals size");
+    int function_start_position = decoder.consume_u32v("function start pos");
     int last_byte_offset = locals_size;
-    int last_asm_position = 0;
-    std::vector<std::pair<int, int>> func_asm_offsets;
+    int last_asm_position = function_start_position;
+    std::vector<AsmJsOffsetEntry> func_asm_offsets;
     func_asm_offsets.reserve(size / 4);  // conservative estimation
+    // Add an entry for the stack check, associated with position 0.
+    func_asm_offsets.push_back(
+        {0, function_start_position, function_start_position});
     while (decoder.ok() && decoder.pc() < table_end) {
       last_byte_offset += decoder.consume_u32v("byte offset delta");
-      last_asm_position += decoder.consume_i32v("asm position delta");
-      func_asm_offsets.push_back({last_byte_offset, last_asm_position});
+      int call_position =
+          last_asm_position + decoder.consume_i32v("call position delta");
+      int to_number_position =
+          call_position + decoder.consume_i32v("to_number position delta");
+      last_asm_position = to_number_position;
+      func_asm_offsets.push_back(
+          {last_byte_offset, call_position, to_number_position});
     }
     if (decoder.pc() != table_end) {
       decoder.error("broken asm offset table");
@@ -1235,6 +1313,36 @@
   return decoder.toResult(std::move(table));
 }
 
+std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
+                                                      const byte* end) {
+  Decoder decoder(start, end);
+  decoder.consume_bytes(4, "wasm magic");
+  decoder.consume_bytes(4, "wasm version");
+
+  std::vector<CustomSectionOffset> result;
+
+  while (decoder.more()) {
+    byte section_code = decoder.consume_u8("section code");
+    uint32_t section_length = decoder.consume_u32v("section length");
+    uint32_t section_start = decoder.pc_offset();
+    if (section_code != 0) {
+      // Skip known sections.
+      decoder.consume_bytes(section_length, "section bytes");
+      continue;
+    }
+    uint32_t name_length = decoder.consume_u32v("name length");
+    uint32_t name_offset = decoder.pc_offset();
+    decoder.consume_bytes(name_length, "section name");
+    uint32_t payload_offset = decoder.pc_offset();
+    uint32_t payload_length = section_length - (payload_offset - section_start);
+    decoder.consume_bytes(payload_length);
+    result.push_back({section_start, name_offset, name_length, payload_offset,
+                      payload_length, section_length});
+  }
+
+  return result;
+}
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/module-decoder.h b/src/wasm/module-decoder.h
index 7cf5cfe..446883f 100644
--- a/src/wasm/module-decoder.h
+++ b/src/wasm/module-decoder.h
@@ -6,7 +6,7 @@
 #define V8_WASM_MODULE_DECODER_H_
 
 #include "src/globals.h"
-#include "src/wasm/ast-decoder.h"
+#include "src/wasm/function-body-decoder.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-result.h"
 
@@ -14,11 +14,44 @@
 namespace internal {
 namespace wasm {
 
+const uint32_t kWasmMagic = 0x6d736100;
+const uint32_t kWasmVersion = 0x01;
+const uint8_t kWasmFunctionTypeForm = 0x60;
+const uint8_t kWasmAnyFunctionTypeForm = 0x70;
+const uint8_t kResizableMaximumFlag = 1;
+
+enum WasmSectionCode {
+  kUnknownSectionCode = 0,   // code for unknown sections
+  kTypeSectionCode = 1,      // Function signature declarations
+  kImportSectionCode = 2,    // Import declarations
+  kFunctionSectionCode = 3,  // Function declarations
+  kTableSectionCode = 4,     // Indirect function table and other tables
+  kMemorySectionCode = 5,    // Memory attributes
+  kGlobalSectionCode = 6,    // Global declarations
+  kExportSectionCode = 7,    // Exports
+  kStartSectionCode = 8,     // Start function declaration
+  kElementSectionCode = 9,   // Elements section
+  kCodeSectionCode = 10,     // Function code
+  kDataSectionCode = 11,     // Data segments
+  kNameSectionCode = 12,     // Name section (encoded as a string)
+};
+
+inline bool IsValidSectionCode(uint8_t byte) {
+  return kTypeSectionCode <= byte && byte <= kDataSectionCode;
+}
+
+const char* SectionName(WasmSectionCode code);
+
 typedef Result<const WasmModule*> ModuleResult;
 typedef Result<WasmFunction*> FunctionResult;
 typedef std::vector<std::pair<int, int>> FunctionOffsets;
 typedef Result<FunctionOffsets> FunctionOffsetsResult;
-typedef std::vector<std::vector<std::pair<int, int>>> AsmJsOffsets;
+struct AsmJsOffsetEntry {
+  int byte_offset;
+  int source_position_call;
+  int source_position_number_conversion;
+};
+typedef std::vector<std::vector<AsmJsOffsetEntry>> AsmJsOffsets;
 typedef Result<AsmJsOffsets> AsmJsOffsetsResult;
 
 // Decodes the bytes of a WASM module between {module_start} and {module_end}.
@@ -37,7 +70,8 @@
 // Decodes the bytes of a WASM function between
 // {function_start} and {function_end}.
 V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunction(Isolate* isolate,
-                                                    Zone* zone, ModuleEnv* env,
+                                                    Zone* zone,
+                                                    ModuleBytesEnv* env,
                                                     const byte* function_start,
                                                     const byte* function_end);
 
@@ -50,6 +84,18 @@
 V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(const byte* start,
                                                             const byte* end);
 
+struct CustomSectionOffset {
+  uint32_t section_start;
+  uint32_t name_offset;
+  uint32_t name_length;
+  uint32_t payload_offset;
+  uint32_t payload_length;
+  uint32_t section_length;
+};
+
+V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
+    const byte* start, const byte* end);
+
 // Extracts the mapping from wasm byte offset to asm.js source position per
 // function.
 // Returns a vector of vectors with <byte_offset, source_position> entries, or
diff --git a/src/wasm/wasm-code-specialization.cc b/src/wasm/wasm-code-specialization.cc
new file mode 100644
index 0000000..1147899
--- /dev/null
+++ b/src/wasm/wasm-code-specialization.cc
@@ -0,0 +1,263 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-code-specialization.h"
+
+#include "src/assembler-inl.h"
+#include "src/objects-inl.h"
+#include "src/source-position-table.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+namespace {
+
+int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
+  DCHECK_EQ(static_cast<int>(kExprCallFunction), static_cast<int>(*pc));
+  decoder.Reset(pc + 1, pc + 6);
+  uint32_t call_idx = decoder.consume_u32v("call index");
+  DCHECK(decoder.ok());
+  DCHECK_GE(kMaxInt, call_idx);
+  return static_cast<int>(call_idx);
+}
+
+int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
+                                       size_t offset_l) {
+  DCHECK_GE(kMaxInt, offset_l);
+  int offset = static_cast<int>(offset_l);
+  DCHECK(!iterator.done());
+  int byte_pos;
+  do {
+    byte_pos = iterator.source_position().ScriptOffset();
+    iterator.Advance();
+  } while (!iterator.done() && iterator.code_offset() <= offset);
+  return byte_pos;
+}
+
+class PatchDirectCallsHelper {
+ public:
+  PatchDirectCallsHelper(WasmInstanceObject* instance, Code* code)
+      : source_pos_it(code->source_position_table()),
+        decoder(nullptr, nullptr) {
+    FixedArray* deopt_data = code->deoptimization_data();
+    DCHECK_EQ(2, deopt_data->length());
+    WasmCompiledModule* comp_mod = instance->compiled_module();
+    int func_index = Smi::cast(deopt_data->get(1))->value();
+    func_bytes = comp_mod->module_bytes()->GetChars() +
+                 comp_mod->module()->functions[func_index].code_start_offset;
+  }
+
+  SourcePositionTableIterator source_pos_it;
+  Decoder decoder;
+  const byte* func_bytes;
+};
+
+}  // namespace
+
+CodeSpecialization::CodeSpecialization(Isolate* isolate, Zone* zone)
+    : objects_to_relocate(isolate->heap(), ZoneAllocationPolicy(zone)) {}
+
+CodeSpecialization::~CodeSpecialization() {}
+
+void CodeSpecialization::RelocateMemoryReferences(Address old_start,
+                                                  uint32_t old_size,
+                                                  Address new_start,
+                                                  uint32_t new_size) {
+  DCHECK(old_mem_start == nullptr && old_mem_size == 0 &&
+         new_mem_start == nullptr && new_mem_size == 0);
+  DCHECK(old_start != new_start || old_size != new_size);
+  old_mem_start = old_start;
+  old_mem_size = old_size;
+  new_mem_start = new_start;
+  new_mem_size = new_size;
+}
+
+void CodeSpecialization::RelocateGlobals(Address old_start, Address new_start) {
+  DCHECK(old_globals_start == 0 && new_globals_start == 0);
+  DCHECK(old_start != 0 || new_start != 0);
+  old_globals_start = old_start;
+  new_globals_start = new_start;
+}
+
+void CodeSpecialization::PatchTableSize(uint32_t old_size, uint32_t new_size) {
+  DCHECK(old_function_table_size == 0 && new_function_table_size == 0);
+  DCHECK(old_size != 0 || new_size != 0);
+  old_function_table_size = old_size;
+  new_function_table_size = new_size;
+}
+
+void CodeSpecialization::RelocateDirectCalls(
+    Handle<WasmInstanceObject> instance) {
+  DCHECK(relocate_direct_calls_instance.is_null());
+  DCHECK(!instance.is_null());
+  relocate_direct_calls_instance = instance;
+}
+
+void CodeSpecialization::RelocateObject(Handle<Object> old_obj,
+                                        Handle<Object> new_obj) {
+  DCHECK(!old_obj.is_null() && !new_obj.is_null());
+  has_objects_to_relocate = true;
+  objects_to_relocate.Set(*old_obj, new_obj);
+}
+
+bool CodeSpecialization::ApplyToWholeInstance(
+    WasmInstanceObject* instance, ICacheFlushMode icache_flush_mode) {
+  DisallowHeapAllocation no_gc;
+  WasmCompiledModule* compiled_module = instance->compiled_module();
+  FixedArray* code_table = compiled_module->ptr_to_code_table();
+  WasmModule* module = compiled_module->module();
+  std::vector<WasmFunction>* wasm_functions =
+      &compiled_module->module()->functions;
+  DCHECK_EQ(wasm_functions->size() +
+                compiled_module->module()->num_exported_functions,
+            code_table->length());
+
+  bool changed = false;
+  int func_index = module->num_imported_functions;
+
+  // Patch all wasm functions.
+  for (int num_wasm_functions = static_cast<int>(wasm_functions->size());
+       func_index < num_wasm_functions; ++func_index) {
+    Code* wasm_function = Code::cast(code_table->get(func_index));
+    changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
+  }
+
+  // Patch all exported functions.
+  for (auto exp : module->export_table) {
+    if (exp.kind != kExternalFunction) continue;
+    Code* export_wrapper = Code::cast(code_table->get(func_index));
+    DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+    // There must be exactly one call to WASM_FUNCTION or WASM_TO_JS_FUNCTION.
+    int num_wasm_calls = 0;
+    for (RelocIterator it(export_wrapper,
+                          RelocInfo::ModeMask(RelocInfo::CODE_TARGET));
+         !it.done(); it.next()) {
+      DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
+      Code* code = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+      // Ignore calls to other builtins like ToNumber.
+      if (code->kind() != Code::WASM_FUNCTION &&
+          code->kind() != Code::WASM_TO_JS_FUNCTION &&
+          code->builtin_index() != Builtins::kIllegal)
+        continue;
+      ++num_wasm_calls;
+      Code* new_code = Code::cast(code_table->get(exp.index));
+      DCHECK(new_code->kind() == Code::WASM_FUNCTION ||
+             new_code->kind() == Code::WASM_TO_JS_FUNCTION);
+      it.rinfo()->set_target_address(new_code->instruction_start(),
+                                     UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+      changed = true;
+    }
+    DCHECK_EQ(1, num_wasm_calls);
+    func_index++;
+  }
+  DCHECK_EQ(code_table->length(), func_index);
+  return changed;
+}
+
+bool CodeSpecialization::ApplyToWasmCode(Code* code,
+                                         ICacheFlushMode icache_flush_mode) {
+  DisallowHeapAllocation no_gc;
+  DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
+
+  bool reloc_mem_addr = old_mem_start != new_mem_start;
+  bool reloc_mem_size = old_mem_size != new_mem_size;
+  bool reloc_globals = old_globals_start || new_globals_start;
+  bool patch_table_size = old_function_table_size || new_function_table_size;
+  bool reloc_direct_calls = !relocate_direct_calls_instance.is_null();
+  bool reloc_objects = has_objects_to_relocate;
+
+  int reloc_mode = 0;
+  auto add_mode = [&reloc_mode](bool cond, RelocInfo::Mode mode) {
+    if (cond) reloc_mode |= RelocInfo::ModeMask(mode);
+  };
+  add_mode(reloc_mem_addr, RelocInfo::WASM_MEMORY_REFERENCE);
+  add_mode(reloc_mem_size, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+  add_mode(reloc_globals, RelocInfo::WASM_GLOBAL_REFERENCE);
+  add_mode(patch_table_size, RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE);
+  add_mode(reloc_direct_calls, RelocInfo::CODE_TARGET);
+  add_mode(reloc_objects, RelocInfo::EMBEDDED_OBJECT);
+
+  std::unique_ptr<PatchDirectCallsHelper> patch_direct_calls_helper;
+  bool changed = false;
+
+  for (RelocIterator it(code, reloc_mode); !it.done(); it.next()) {
+    RelocInfo::Mode mode = it.rinfo()->rmode();
+    switch (mode) {
+      case RelocInfo::WASM_MEMORY_REFERENCE:
+        DCHECK(reloc_mem_addr);
+        it.rinfo()->update_wasm_memory_reference(old_mem_start, new_mem_start,
+                                                 icache_flush_mode);
+        changed = true;
+        break;
+      case RelocInfo::WASM_MEMORY_SIZE_REFERENCE:
+        DCHECK(reloc_mem_size);
+        it.rinfo()->update_wasm_memory_size(old_mem_size, new_mem_size,
+                                            icache_flush_mode);
+        changed = true;
+        break;
+      case RelocInfo::WASM_GLOBAL_REFERENCE:
+        DCHECK(reloc_globals);
+        it.rinfo()->update_wasm_global_reference(
+            old_globals_start, new_globals_start, icache_flush_mode);
+        changed = true;
+        break;
+      case RelocInfo::CODE_TARGET: {
+        DCHECK(reloc_direct_calls);
+        Code* old_code =
+            Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+        // Skip everything which is not a wasm call (stack checks, traps, ...).
+        if (old_code->kind() != Code::WASM_FUNCTION &&
+            old_code->kind() != Code::WASM_TO_JS_FUNCTION &&
+            old_code->builtin_index() != Builtins::kIllegal)
+          continue;
+        // Iterate simultaneously over the relocation information and the source
+        // position table. For each call in the reloc info, move the source
+        // position iterator forward to that position to find the byte offset of
+        // the respective call. Then extract the call index from the module wire
+        // bytes to find the new compiled function.
+        size_t offset = it.rinfo()->pc() - code->instruction_start();
+        if (!patch_direct_calls_helper) {
+          patch_direct_calls_helper.reset(new PatchDirectCallsHelper(
+              *relocate_direct_calls_instance, code));
+        }
+        int byte_pos = AdvanceSourcePositionTableIterator(
+            patch_direct_calls_helper->source_pos_it, offset);
+        int called_func_index = ExtractDirectCallIndex(
+            patch_direct_calls_helper->decoder,
+            patch_direct_calls_helper->func_bytes + byte_pos);
+        FixedArray* code_table =
+            relocate_direct_calls_instance->compiled_module()
+                ->ptr_to_code_table();
+        Code* new_code = Code::cast(code_table->get(called_func_index));
+        it.rinfo()->set_target_address(new_code->instruction_start(),
+                                       UPDATE_WRITE_BARRIER, icache_flush_mode);
+        changed = true;
+      } break;
+      case RelocInfo::EMBEDDED_OBJECT: {
+        DCHECK(reloc_objects);
+        Object* old = it.rinfo()->target_object();
+        Handle<Object>* new_obj = objects_to_relocate.Find(old);
+        if (new_obj) {
+          it.rinfo()->set_target_object(**new_obj, UPDATE_WRITE_BARRIER,
+                                        icache_flush_mode);
+          changed = true;
+        }
+      } break;
+      case RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE:
+        DCHECK(patch_table_size);
+        it.rinfo()->update_wasm_function_table_size_reference(
+            old_function_table_size, new_function_table_size,
+            icache_flush_mode);
+        changed = true;
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  return changed;
+}
diff --git a/src/wasm/wasm-code-specialization.h b/src/wasm/wasm-code-specialization.h
new file mode 100644
index 0000000..fa54235
--- /dev/null
+++ b/src/wasm/wasm-code-specialization.h
@@ -0,0 +1,70 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_CODE_SPECIALIZATION_H_
+#define V8_WASM_CODE_SPECIALIZATION_H_
+
+#include "src/assembler.h"
+#include "src/identity-map.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Helper class to specialize wasm code for a specific instance, or to update
+// code when memory / globals / tables change.
+// This class in unhandlified, and contains a DisallowHeapAllocation field to
+// ensure that no allocations happen while it is alive.
+//
+// Set up all relocations / patching that should be performed by the Relocate* /
+// Patch* methods, then apply all changes in one step using the Apply* methods.
+class CodeSpecialization {
+ public:
+  CodeSpecialization(Isolate*, Zone*);
+  ~CodeSpecialization();
+
+  // Update memory references.
+  void RelocateMemoryReferences(Address old_start, uint32_t old_size,
+                                Address new_start, uint32_t new_size);
+  // Update references to global variables.
+  void RelocateGlobals(Address old_start, Address new_start);
+  // Update function table size.
+  // TODO(wasm): Prepare this for more than one indirect function table.
+  void PatchTableSize(uint32_t old_size, uint32_t new_size);
+  // Update all direct call sites based on the code table in the given instance.
+  void RelocateDirectCalls(Handle<WasmInstanceObject> instance);
+  // Relocate an arbitrary object (e.g. function table).
+  void RelocateObject(Handle<Object> old_obj, Handle<Object> new_obj);
+
+  // Apply all relocations and patching to all code in the instance (wasm code
+  // and exported functions).
+  bool ApplyToWholeInstance(WasmInstanceObject*,
+                            ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
+  // Apply all relocations and patching to one wasm code object.
+  bool ApplyToWasmCode(Code*, ICacheFlushMode = FLUSH_ICACHE_IF_NEEDED);
+
+ private:
+  Address old_mem_start = 0;
+  uint32_t old_mem_size = 0;
+  Address new_mem_start = 0;
+  uint32_t new_mem_size = 0;
+
+  Address old_globals_start = 0;
+  Address new_globals_start = 0;
+
+  uint32_t old_function_table_size = 0;
+  uint32_t new_function_table_size = 0;
+
+  Handle<WasmInstanceObject> relocate_direct_calls_instance;
+
+  bool has_objects_to_relocate = false;
+  IdentityMap<Handle<Object>, ZoneAllocationPolicy> objects_to_relocate;
+};
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_CODE_SPECIALIZATION_H_
diff --git a/src/wasm/wasm-debug.cc b/src/wasm/wasm-debug.cc
index 11c2ef8..c00a4f1 100644
--- a/src/wasm/wasm-debug.cc
+++ b/src/wasm/wasm-debug.cc
@@ -2,144 +2,446 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/assembler-inl.h"
 #include "src/assert-scope.h"
+#include "src/compiler/wasm-compiler.h"
 #include "src/debug/debug.h"
 #include "src/factory.h"
+#include "src/frames-inl.h"
 #include "src/isolate.h"
 #include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-limits.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-objects.h"
+#include "src/zone/accounting-allocator.h"
 
 using namespace v8::internal;
 using namespace v8::internal::wasm;
 
 namespace {
 
-enum {
-  kWasmDebugInfoWasmObj,
-  kWasmDebugInfoWasmBytesHash,
-  kWasmDebugInfoAsmJsOffsets,
-  kWasmDebugInfoNumEntries
+// Forward declaration.
+class InterpreterHandle;
+InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info);
+
+class InterpreterHandle {
+  AccountingAllocator allocator_;
+  WasmInstance instance_;
+  WasmInterpreter interpreter_;
+  Isolate* isolate_;
+  StepAction next_step_action_ = StepNone;
+  int last_step_stack_depth_ = 0;
+
+ public:
+  // Initialize in the right order, using helper methods to make this possible.
+  // WasmInterpreter has to be allocated in place, since it is not movable.
+  InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
+      : instance_(debug_info->wasm_instance()->compiled_module()->module()),
+        interpreter_(GetBytesEnv(&instance_, debug_info), &allocator_),
+        isolate_(isolate) {
+    if (debug_info->wasm_instance()->has_memory_buffer()) {
+      JSArrayBuffer* mem_buffer = debug_info->wasm_instance()->memory_buffer();
+      instance_.mem_start =
+          reinterpret_cast<byte*>(mem_buffer->backing_store());
+      CHECK(mem_buffer->byte_length()->ToUint32(&instance_.mem_size));
+    } else {
+      DCHECK_EQ(0, instance_.module->min_mem_pages);
+      instance_.mem_start = nullptr;
+      instance_.mem_size = 0;
+    }
+  }
+
+  static ModuleBytesEnv GetBytesEnv(WasmInstance* instance,
+                                    WasmDebugInfo* debug_info) {
+    // Return raw pointer into heap. The WasmInterpreter will make its own copy
+    // of this data anyway, and there is no heap allocation in-between.
+    SeqOneByteString* bytes_str =
+        debug_info->wasm_instance()->compiled_module()->module_bytes();
+    Vector<const byte> bytes(bytes_str->GetChars(), bytes_str->length());
+    return ModuleBytesEnv(instance->module, instance, bytes);
+  }
+
+  WasmInterpreter* interpreter() { return &interpreter_; }
+  const WasmModule* module() { return instance_.module; }
+
+  void PrepareStep(StepAction step_action) {
+    next_step_action_ = step_action;
+    last_step_stack_depth_ = CurrentStackDepth();
+  }
+
+  void ClearStepping() { next_step_action_ = StepNone; }
+
+  int CurrentStackDepth() {
+    DCHECK_EQ(1, interpreter()->GetThreadCount());
+    return interpreter()->GetThread(0)->GetFrameCount();
+  }
+
+  void Execute(uint32_t func_index, uint8_t* arg_buffer) {
+    DCHECK_GE(module()->functions.size(), func_index);
+    FunctionSig* sig = module()->functions[func_index].sig;
+    DCHECK_GE(kMaxInt, sig->parameter_count());
+    int num_params = static_cast<int>(sig->parameter_count());
+    ScopedVector<WasmVal> wasm_args(num_params);
+    uint8_t* arg_buf_ptr = arg_buffer;
+    for (int i = 0; i < num_params; ++i) {
+      int param_size = 1 << ElementSizeLog2Of(sig->GetParam(i));
+#define CASE_ARG_TYPE(type, ctype)                                  \
+  case type:                                                        \
+    DCHECK_EQ(param_size, sizeof(ctype));                           \
+    wasm_args[i] = WasmVal(*reinterpret_cast<ctype*>(arg_buf_ptr)); \
+    break;
+      switch (sig->GetParam(i)) {
+        CASE_ARG_TYPE(kWasmI32, uint32_t)
+        CASE_ARG_TYPE(kWasmI64, uint64_t)
+        CASE_ARG_TYPE(kWasmF32, float)
+        CASE_ARG_TYPE(kWasmF64, double)
+#undef CASE_ARG_TYPE
+        default:
+          UNREACHABLE();
+      }
+      arg_buf_ptr += RoundUpToMultipleOfPowOf2(param_size, 8);
+    }
+
+    WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
+    // We do not support reentering an already running interpreter at the moment
+    // (like INTERPRETER -> JS -> WASM -> INTERPRETER).
+    DCHECK(thread->state() == WasmInterpreter::STOPPED ||
+           thread->state() == WasmInterpreter::FINISHED);
+    thread->Reset();
+    thread->PushFrame(&module()->functions[func_index], wasm_args.start());
+    bool finished = false;
+    while (!finished) {
+      // TODO(clemensh): Add occasional StackChecks.
+      WasmInterpreter::State state = ContinueExecution(thread);
+      switch (state) {
+        case WasmInterpreter::State::PAUSED:
+          NotifyDebugEventListeners(thread);
+          break;
+        case WasmInterpreter::State::FINISHED:
+          // Perfect, just break the switch and exit the loop.
+          finished = true;
+          break;
+        case WasmInterpreter::State::TRAPPED:
+          // TODO(clemensh): Generate appropriate JS exception.
+          UNIMPLEMENTED();
+          break;
+        // STOPPED and RUNNING should never occur here.
+        case WasmInterpreter::State::STOPPED:
+        case WasmInterpreter::State::RUNNING:
+        default:
+          UNREACHABLE();
+      }
+    }
+
+    // Copy back the return value
+    DCHECK_GE(kV8MaxWasmFunctionReturns, sig->return_count());
+    // TODO(wasm): Handle multi-value returns.
+    DCHECK_EQ(1, kV8MaxWasmFunctionReturns);
+    if (sig->return_count()) {
+      WasmVal ret_val = thread->GetReturnValue(0);
+#define CASE_RET_TYPE(type, ctype)                                       \
+  case type:                                                             \
+    DCHECK_EQ(1 << ElementSizeLog2Of(sig->GetReturn(0)), sizeof(ctype)); \
+    *reinterpret_cast<ctype*>(arg_buffer) = ret_val.to<ctype>();         \
+    break;
+      switch (sig->GetReturn(0)) {
+        CASE_RET_TYPE(kWasmI32, uint32_t)
+        CASE_RET_TYPE(kWasmI64, uint64_t)
+        CASE_RET_TYPE(kWasmF32, float)
+        CASE_RET_TYPE(kWasmF64, double)
+#undef CASE_RET_TYPE
+        default:
+          UNREACHABLE();
+      }
+    }
+  }
+
+  WasmInterpreter::State ContinueExecution(WasmInterpreter::Thread* thread) {
+    switch (next_step_action_) {
+      case StepNone:
+        return thread->Run();
+      case StepIn:
+        return thread->Step();
+      case StepOut:
+        thread->AddBreakFlags(WasmInterpreter::BreakFlag::AfterReturn);
+        return thread->Run();
+      case StepNext: {
+        int stack_depth = thread->GetFrameCount();
+        if (stack_depth == last_step_stack_depth_) return thread->Step();
+        thread->AddBreakFlags(stack_depth > last_step_stack_depth_
+                                  ? WasmInterpreter::BreakFlag::AfterReturn
+                                  : WasmInterpreter::BreakFlag::AfterCall);
+        return thread->Run();
+      }
+      default:
+        UNREACHABLE();
+        return WasmInterpreter::STOPPED;
+    }
+  }
+
+  Handle<WasmInstanceObject> GetInstanceObject() {
+    StackTraceFrameIterator it(isolate_);
+    WasmInterpreterEntryFrame* frame =
+        WasmInterpreterEntryFrame::cast(it.frame());
+    Handle<WasmInstanceObject> instance_obj(frame->wasm_instance(), isolate_);
+    DCHECK_EQ(this, GetInterpreterHandle(instance_obj->debug_info()));
+    return instance_obj;
+  }
+
+  void NotifyDebugEventListeners(WasmInterpreter::Thread* thread) {
+    // Enter the debugger.
+    DebugScope debug_scope(isolate_->debug());
+    if (debug_scope.failed()) return;
+
+    // Postpone interrupt during breakpoint processing.
+    PostponeInterruptsScope postpone(isolate_);
+
+    // Check whether we hit a breakpoint.
+    if (isolate_->debug()->break_points_active()) {
+      Handle<WasmCompiledModule> compiled_module(
+          GetInstanceObject()->compiled_module(), isolate_);
+      int position = GetTopPosition(compiled_module);
+      Handle<FixedArray> breakpoints;
+      if (compiled_module->CheckBreakPoints(position).ToHandle(&breakpoints)) {
+        // We hit one or several breakpoints. Clear stepping, notify the
+        // listeners and return.
+        ClearStepping();
+        Handle<Object> hit_breakpoints_js =
+            isolate_->factory()->NewJSArrayWithElements(breakpoints);
+        isolate_->debug()->OnDebugBreak(hit_breakpoints_js);
+        return;
+      }
+    }
+
+    // We did not hit a breakpoint, so maybe this pause is related to stepping.
+    bool hit_step = false;
+    switch (next_step_action_) {
+      case StepNone:
+        break;
+      case StepIn:
+        hit_step = true;
+        break;
+      case StepOut:
+        hit_step = thread->GetFrameCount() < last_step_stack_depth_;
+        break;
+      case StepNext: {
+        hit_step = thread->GetFrameCount() == last_step_stack_depth_;
+        break;
+      }
+      default:
+        UNREACHABLE();
+    }
+    if (!hit_step) return;
+    ClearStepping();
+    isolate_->debug()->OnDebugBreak(isolate_->factory()->undefined_value());
+  }
+
+  int GetTopPosition(Handle<WasmCompiledModule> compiled_module) {
+    DCHECK_EQ(1, interpreter()->GetThreadCount());
+    WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
+    DCHECK_LT(0, thread->GetFrameCount());
+
+    wasm::InterpretedFrame frame =
+        thread->GetFrame(thread->GetFrameCount() - 1);
+    return compiled_module->GetFunctionOffset(frame.function()->func_index) +
+           frame.pc();
+  }
+
+  std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
+      Address frame_pointer) {
+    // TODO(clemensh): Use frame_pointer.
+    USE(frame_pointer);
+
+    DCHECK_EQ(1, interpreter()->GetThreadCount());
+    WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
+    std::vector<std::pair<uint32_t, int>> stack(thread->GetFrameCount());
+    for (int i = 0, e = thread->GetFrameCount(); i < e; ++i) {
+      wasm::InterpretedFrame frame = thread->GetFrame(i);
+      stack[i] = {frame.function()->func_index, frame.pc()};
+    }
+    return stack;
+  }
+
+  std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
+      Address frame_pointer, int idx) {
+    // TODO(clemensh): Use frame_pointer.
+    USE(frame_pointer);
+
+    DCHECK_EQ(1, interpreter()->GetThreadCount());
+    WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
+    return std::unique_ptr<wasm::InterpretedFrame>(
+        new wasm::InterpretedFrame(thread->GetMutableFrame(idx)));
+  }
+
+  uint64_t NumInterpretedCalls() {
+    DCHECK_EQ(1, interpreter()->GetThreadCount());
+    return interpreter()->GetThread(0)->NumInterpretedCalls();
+  }
 };
 
-// TODO(clemensh): Move asm.js offset tables to the compiled module.
-FixedArray *GetAsmJsOffsetTables(Handle<WasmDebugInfo> debug_info,
-                                 Isolate *isolate) {
-  Object *offset_tables = debug_info->get(kWasmDebugInfoAsmJsOffsets);
-  if (!offset_tables->IsUndefined(isolate)) {
-    return FixedArray::cast(offset_tables);
+InterpreterHandle* GetOrCreateInterpreterHandle(
+    Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
+  Handle<Object> handle(debug_info->get(WasmDebugInfo::kInterpreterHandle),
+                        isolate);
+  if (handle->IsUndefined(isolate)) {
+    InterpreterHandle* cpp_handle = new InterpreterHandle(isolate, *debug_info);
+    handle = Managed<InterpreterHandle>::New(isolate, cpp_handle);
+    debug_info->set(WasmDebugInfo::kInterpreterHandle, *handle);
   }
 
-  Handle<JSObject> wasm_instance(debug_info->wasm_instance(), isolate);
-  Handle<WasmCompiledModule> compiled_module(GetCompiledModule(*wasm_instance),
-                                             isolate);
-  DCHECK(compiled_module->has_asm_js_offset_tables());
-
-  AsmJsOffsetsResult asm_offsets;
-  {
-    Handle<ByteArray> asm_offset_tables =
-        compiled_module->asm_js_offset_tables();
-    DisallowHeapAllocation no_gc;
-    const byte *bytes_start = asm_offset_tables->GetDataStartAddress();
-    const byte *bytes_end = bytes_start + asm_offset_tables->length();
-    asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
-  }
-  // Wasm bytes must be valid and must contain asm.js offset table.
-  DCHECK(asm_offsets.ok());
-  DCHECK_GE(static_cast<size_t>(kMaxInt), asm_offsets.val.size());
-  int num_functions = static_cast<int>(asm_offsets.val.size());
-  DCHECK_EQ(
-      wasm::GetNumberOfFunctions(handle(debug_info->wasm_instance())),
-      static_cast<int>(num_functions +
-                       compiled_module->module()->num_imported_functions));
-  Handle<FixedArray> all_tables =
-      isolate->factory()->NewFixedArray(num_functions);
-  debug_info->set(kWasmDebugInfoAsmJsOffsets, *all_tables);
-  for (int func = 0; func < num_functions; ++func) {
-    std::vector<std::pair<int, int>> &func_asm_offsets = asm_offsets.val[func];
-    if (func_asm_offsets.empty()) continue;
-    size_t array_size = 2 * kIntSize * func_asm_offsets.size();
-    CHECK_LE(array_size, static_cast<size_t>(kMaxInt));
-    ByteArray *arr =
-        *isolate->factory()->NewByteArray(static_cast<int>(array_size));
-    all_tables->set(func, arr);
-    int idx = 0;
-    for (std::pair<int, int> p : func_asm_offsets) {
-      // Byte offsets must be strictly monotonously increasing:
-      DCHECK(idx == 0 || p.first > arr->get_int(idx - 2));
-      arr->set_int(idx++, p.first);
-      arr->set_int(idx++, p.second);
-    }
-    DCHECK_EQ(arr->length(), idx * kIntSize);
-  }
-  return *all_tables;
+  return Handle<Managed<InterpreterHandle>>::cast(handle)->get();
 }
+
+InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info) {
+  Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandle);
+  DCHECK(!handle_obj->IsUndefined(debug_info->GetIsolate()));
+  return Managed<InterpreterHandle>::cast(handle_obj)->get();
+}
+
+InterpreterHandle* GetInterpreterHandleOrNull(WasmDebugInfo* debug_info) {
+  Object* handle_obj = debug_info->get(WasmDebugInfo::kInterpreterHandle);
+  if (handle_obj->IsUndefined(debug_info->GetIsolate())) return nullptr;
+  return Managed<InterpreterHandle>::cast(handle_obj)->get();
+}
+
+int GetNumFunctions(WasmInstanceObject* instance) {
+  size_t num_functions =
+      instance->compiled_module()->module()->functions.size();
+  DCHECK_GE(kMaxInt, num_functions);
+  return static_cast<int>(num_functions);
+}
+
+Handle<FixedArray> GetOrCreateInterpretedFunctions(
+    Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
+  Handle<Object> obj(debug_info->get(WasmDebugInfo::kInterpretedFunctions),
+                     isolate);
+  if (!obj->IsUndefined(isolate)) return Handle<FixedArray>::cast(obj);
+
+  Handle<FixedArray> new_arr = isolate->factory()->NewFixedArray(
+      GetNumFunctions(debug_info->wasm_instance()));
+  debug_info->set(WasmDebugInfo::kInterpretedFunctions, *new_arr);
+  return new_arr;
+}
+
+void RedirectCallsitesInCode(Code* code, Code* old_target, Code* new_target) {
+  DisallowHeapAllocation no_gc;
+  for (RelocIterator it(code, RelocInfo::kCodeTargetMask); !it.done();
+       it.next()) {
+    DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
+    Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+    if (target != old_target) continue;
+    it.rinfo()->set_target_address(new_target->instruction_start());
+  }
+}
+
+void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
+                                 Code* old_target, Code* new_target) {
+  DisallowHeapAllocation no_gc;
+  // Redirect all calls in wasm functions.
+  FixedArray* code_table = instance->compiled_module()->ptr_to_code_table();
+  for (int i = 0, e = GetNumFunctions(instance); i < e; ++i) {
+    RedirectCallsitesInCode(Code::cast(code_table->get(i)), old_target,
+                            new_target);
+  }
+
+  // Redirect all calls in exported functions.
+  FixedArray* weak_exported_functions =
+      instance->compiled_module()->ptr_to_weak_exported_functions();
+  for (int i = 0, e = weak_exported_functions->length(); i != e; ++i) {
+    WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
+    if (weak_function->cleared()) continue;
+    Code* code = JSFunction::cast(weak_function->value())->code();
+    RedirectCallsitesInCode(code, old_target, new_target);
+  }
+}
+
 }  // namespace
 
-Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<JSObject> wasm) {
-  Isolate *isolate = wasm->GetIsolate();
-  Factory *factory = isolate->factory();
-  Handle<FixedArray> arr =
-      factory->NewFixedArray(kWasmDebugInfoNumEntries, TENURED);
-  arr->set(kWasmDebugInfoWasmObj, *wasm);
-  int hash = 0;
-  Handle<SeqOneByteString> wasm_bytes = GetWasmBytes(wasm);
-  {
-    DisallowHeapAllocation no_gc;
-    hash = StringHasher::HashSequentialString(
-        wasm_bytes->GetChars(), wasm_bytes->length(), kZeroHashSeed);
-  }
-  Handle<Object> hash_obj = factory->NewNumberFromInt(hash, TENURED);
-  arr->set(kWasmDebugInfoWasmBytesHash, *hash_obj);
-
+Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
+  Isolate* isolate = instance->GetIsolate();
+  Factory* factory = isolate->factory();
+  Handle<FixedArray> arr = factory->NewFixedArray(kFieldCount, TENURED);
+  arr->set(kInstance, *instance);
   return Handle<WasmDebugInfo>::cast(arr);
 }
 
-bool WasmDebugInfo::IsDebugInfo(Object *object) {
+bool WasmDebugInfo::IsDebugInfo(Object* object) {
   if (!object->IsFixedArray()) return false;
-  FixedArray *arr = FixedArray::cast(object);
-  return arr->length() == kWasmDebugInfoNumEntries &&
-         IsWasmInstance(arr->get(kWasmDebugInfoWasmObj)) &&
-         arr->get(kWasmDebugInfoWasmBytesHash)->IsNumber();
+  FixedArray* arr = FixedArray::cast(object);
+  if (arr->length() != kFieldCount) return false;
+  if (!IsWasmInstance(arr->get(kInstance))) return false;
+  Isolate* isolate = arr->GetIsolate();
+  if (!arr->get(kInterpreterHandle)->IsUndefined(isolate) &&
+      !arr->get(kInterpreterHandle)->IsForeign())
+    return false;
+  return true;
 }
 
-WasmDebugInfo *WasmDebugInfo::cast(Object *object) {
+WasmDebugInfo* WasmDebugInfo::cast(Object* object) {
   DCHECK(IsDebugInfo(object));
-  return reinterpret_cast<WasmDebugInfo *>(object);
+  return reinterpret_cast<WasmDebugInfo*>(object);
 }
 
-JSObject *WasmDebugInfo::wasm_instance() {
-  return JSObject::cast(get(kWasmDebugInfoWasmObj));
+WasmInstanceObject* WasmDebugInfo::wasm_instance() {
+  return WasmInstanceObject::cast(get(kInstance));
 }
 
-int WasmDebugInfo::GetAsmJsSourcePosition(Handle<WasmDebugInfo> debug_info,
-                                          int func_index, int byte_offset) {
-  Isolate *isolate = debug_info->GetIsolate();
-  Handle<JSObject> instance(debug_info->wasm_instance(), isolate);
-  FixedArray *offset_tables = GetAsmJsOffsetTables(debug_info, isolate);
+void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
+                                  int func_index, int offset) {
+  Isolate* isolate = debug_info->GetIsolate();
+  InterpreterHandle* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
+  RedirectToInterpreter(debug_info, func_index);
+  const WasmFunction* func = &handle->module()->functions[func_index];
+  handle->interpreter()->SetBreakpoint(func, offset, true);
+}
 
-  WasmCompiledModule *compiled_module = wasm::GetCompiledModule(*instance);
-  int num_imported_functions =
-      compiled_module->module()->num_imported_functions;
-  DCHECK_LE(num_imported_functions, func_index);
-  func_index -= num_imported_functions;
-  DCHECK_LT(func_index, offset_tables->length());
-  ByteArray *offset_table = ByteArray::cast(offset_tables->get(func_index));
+void WasmDebugInfo::RedirectToInterpreter(Handle<WasmDebugInfo> debug_info,
+                                          int func_index) {
+  Isolate* isolate = debug_info->GetIsolate();
+  DCHECK_LE(0, func_index);
+  DCHECK_GT(debug_info->wasm_instance()->module()->functions.size(),
+            func_index);
+  Handle<FixedArray> interpreted_functions =
+      GetOrCreateInterpretedFunctions(isolate, debug_info);
+  if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) return;
 
-  // Binary search for the current byte offset.
-  int left = 0;                                       // inclusive
-  int right = offset_table->length() / kIntSize / 2;  // exclusive
-  DCHECK_LT(left, right);
-  while (right - left > 1) {
-    int mid = left + (right - left) / 2;
-    if (offset_table->get_int(2 * mid) <= byte_offset) {
-      left = mid;
-    } else {
-      right = mid;
-    }
-  }
-  // There should be an entry for each position that could show up on the stack
-  // trace:
-  DCHECK_EQ(byte_offset, offset_table->get_int(2 * left));
-  return offset_table->get_int(2 * left + 1);
+  // Ensure that the interpreter is instantiated.
+  GetOrCreateInterpreterHandle(isolate, debug_info);
+  Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
+  Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
+      isolate, func_index,
+      instance->compiled_module()->module()->functions[func_index].sig,
+      instance);
+
+  Handle<FixedArray> code_table = instance->compiled_module()->code_table();
+  Handle<Code> old_code(Code::cast(code_table->get(func_index)), isolate);
+  interpreted_functions->set(func_index, *new_code);
+
+  RedirectCallsitesInInstance(isolate, *instance, *old_code, *new_code);
+}
+
+void WasmDebugInfo::PrepareStep(StepAction step_action) {
+  GetInterpreterHandle(this)->PrepareStep(step_action);
+}
+
+void WasmDebugInfo::RunInterpreter(int func_index, uint8_t* arg_buffer) {
+  DCHECK_LE(0, func_index);
+  GetInterpreterHandle(this)->Execute(static_cast<uint32_t>(func_index),
+                                      arg_buffer);
+}
+
+std::vector<std::pair<uint32_t, int>> WasmDebugInfo::GetInterpretedStack(
+    Address frame_pointer) {
+  return GetInterpreterHandle(this)->GetInterpretedStack(frame_pointer);
+}
+
+std::unique_ptr<wasm::InterpretedFrame> WasmDebugInfo::GetInterpretedFrame(
+    Address frame_pointer, int idx) {
+  return GetInterpreterHandle(this)->GetInterpretedFrame(frame_pointer, idx);
+}
+
+uint64_t WasmDebugInfo::NumInterpretedCalls() {
+  auto handle = GetInterpreterHandleOrNull(this);
+  return handle ? handle->NumInterpretedCalls() : 0;
 }
diff --git a/src/wasm/wasm-external-refs.cc b/src/wasm/wasm-external-refs.cc
index 4c4c91b..e982cc7 100644
--- a/src/wasm/wasm-external-refs.cc
+++ b/src/wasm/wasm-external-refs.cc
@@ -208,6 +208,19 @@
   double y = ReadDoubleValue(param1);
   WriteDoubleValue(param0, Pow(x, y));
 }
+
+static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
+
+void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
+  wasm_trap_callback_for_testing = callback;
+}
+
+void call_trap_callback_for_testing() {
+  if (wasm_trap_callback_for_testing) {
+    wasm_trap_callback_for_testing();
+  }
+}
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/wasm-external-refs.h b/src/wasm/wasm-external-refs.h
index d9539ce..04337b9 100644
--- a/src/wasm/wasm-external-refs.h
+++ b/src/wasm/wasm-external-refs.h
@@ -61,6 +61,12 @@
 
 void float64_pow_wrapper(double* param0, double* param1);
 
+typedef void (*WasmTrapCallbackForTesting)();
+
+void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback);
+
+void call_trap_callback_for_testing();
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/wasm-interpreter.cc b/src/wasm/wasm-interpreter.cc
index 6e049ff..f32b5e6 100644
--- a/src/wasm/wasm-interpreter.cc
+++ b/src/wasm/wasm-interpreter.cc
@@ -2,12 +2,18 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <type_traits>
+
 #include "src/wasm/wasm-interpreter.h"
 
+#include "src/conversions.h"
+#include "src/objects-inl.h"
 #include "src/utils.h"
-#include "src/wasm/ast-decoder.h"
 #include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/function-body-decoder.h"
 #include "src/wasm/wasm-external-refs.h"
+#include "src/wasm/wasm-limits.h"
 #include "src/wasm/wasm-module.h"
 
 #include "src/zone/accounting-allocator.h"
@@ -62,6 +68,7 @@
   V(I64GtS, int64_t, >)         \
   V(I64GeS, int64_t, >=)        \
   V(F32Add, float, +)           \
+  V(F32Sub, float, -)           \
   V(F32Eq, float, ==)           \
   V(F32Ne, float, !=)           \
   V(F32Lt, float, <)            \
@@ -69,17 +76,16 @@
   V(F32Gt, float, >)            \
   V(F32Ge, float, >=)           \
   V(F64Add, double, +)          \
+  V(F64Sub, double, -)          \
   V(F64Eq, double, ==)          \
   V(F64Ne, double, !=)          \
   V(F64Lt, double, <)           \
   V(F64Le, double, <=)          \
   V(F64Gt, double, >)           \
-  V(F64Ge, double, >=)
-
-#define FOREACH_SIMPLE_BINOP_NAN(V) \
-  V(F32Mul, float, *)               \
-  V(F64Mul, double, *)              \
-  V(F32Div, float, /)               \
+  V(F64Ge, double, >=)          \
+  V(F32Mul, float, *)           \
+  V(F64Mul, double, *)          \
+  V(F32Div, float, /)           \
   V(F64Div, double, /)
 
 #define FOREACH_OTHER_BINOP(V) \
@@ -101,14 +107,10 @@
   V(I32Rol, int32_t)           \
   V(I64Ror, int64_t)           \
   V(I64Rol, int64_t)           \
-  V(F32Sub, float)             \
   V(F32Min, float)             \
   V(F32Max, float)             \
-  V(F32CopySign, float)        \
   V(F64Min, double)            \
   V(F64Max, double)            \
-  V(F64Sub, double)            \
-  V(F64CopySign, double)       \
   V(I32AsmjsDivS, int32_t)     \
   V(I32AsmjsDivU, uint32_t)    \
   V(I32AsmjsRemS, int32_t)     \
@@ -158,15 +160,11 @@
   V(F64UConvertI64, uint64_t)    \
   V(F64ConvertF32, float)        \
   V(F64ReinterpretI64, int64_t)  \
-  V(I32ReinterpretF32, float)    \
-  V(I64ReinterpretF64, double)   \
   V(I32AsmjsSConvertF32, float)  \
   V(I32AsmjsUConvertF32, float)  \
   V(I32AsmjsSConvertF64, double) \
-  V(I32AsmjsUConvertF64, double)
-
-#define FOREACH_OTHER_UNOP_NAN(V) \
-  V(F32Sqrt, float)               \
+  V(I32AsmjsUConvertF64, double) \
+  V(F32Sqrt, float)              \
   V(F64Sqrt, double)
 
 static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
@@ -293,41 +291,6 @@
   return (a << shift) | (a >> (64 - shift));
 }
 
-static float quiet(float a) {
-  static const uint32_t kSignalingBit = 1 << 22;
-  uint32_t q = bit_cast<uint32_t>(std::numeric_limits<float>::quiet_NaN());
-  if ((q & kSignalingBit) != 0) {
-    // On some machines, the signaling bit set indicates it's a quiet NaN.
-    return bit_cast<float>(bit_cast<uint32_t>(a) | kSignalingBit);
-  } else {
-    // On others, the signaling bit set indicates it's a signaling NaN.
-    return bit_cast<float>(bit_cast<uint32_t>(a) & ~kSignalingBit);
-  }
-}
-
-static double quiet(double a) {
-  static const uint64_t kSignalingBit = 1ULL << 51;
-  uint64_t q = bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN());
-  if ((q & kSignalingBit) != 0) {
-    // On some machines, the signaling bit set indicates it's a quiet NaN.
-    return bit_cast<double>(bit_cast<uint64_t>(a) | kSignalingBit);
-  } else {
-    // On others, the signaling bit set indicates it's a signaling NaN.
-    return bit_cast<double>(bit_cast<uint64_t>(a) & ~kSignalingBit);
-  }
-}
-
-static inline float ExecuteF32Sub(float a, float b, TrapReason* trap) {
-  float result = a - b;
-  // Some architectures (e.g. MIPS) need extra checking to preserve the payload
-  // of a NaN operand.
-  if (result - result != 0) {
-    if (std::isnan(a)) return quiet(a);
-    if (std::isnan(b)) return quiet(b);
-  }
-  return result;
-}
-
 static inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
   return JSMin(a, b);
 }
@@ -340,17 +303,6 @@
   return copysignf(a, b);
 }
 
-static inline double ExecuteF64Sub(double a, double b, TrapReason* trap) {
-  double result = a - b;
-  // Some architectures (e.g. MIPS) need extra checking to preserve the payload
-  // of a NaN operand.
-  if (result - result != 0) {
-    if (std::isnan(a)) return quiet(a);
-    if (std::isnan(b)) return quiet(b);
-  }
-  return result;
-}
-
 static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
   return JSMin(a, b);
 }
@@ -651,19 +603,20 @@
   return bit_cast<double>(a);
 }
 
-static inline int32_t ExecuteI32ReinterpretF32(float a, TrapReason* trap) {
-  return bit_cast<int32_t>(a);
+static inline int32_t ExecuteI32ReinterpretF32(WasmVal a) {
+  return a.to_unchecked<int32_t>();
 }
 
-static inline int64_t ExecuteI64ReinterpretF64(double a, TrapReason* trap) {
-  return bit_cast<int64_t>(a);
+static inline int64_t ExecuteI64ReinterpretF64(WasmVal a) {
+  return a.to_unchecked<int64_t>();
 }
 
 static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
                                         WasmInstance* instance) {
   // TODO(ahaas): Move memory allocation to wasm-module.cc for better
   // encapsulation.
-  if (delta_pages > wasm::WasmModule::kV8MaxPages) {
+  if (delta_pages > FLAG_wasm_max_mem_pages ||
+      delta_pages > instance->module->max_mem_pages) {
     return -1;
   }
   uint32_t old_size = instance->mem_size;
@@ -679,8 +632,9 @@
   } else {
     DCHECK_NOT_NULL(instance->mem_start);
     new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
-    if (new_size >
-        wasm::WasmModule::kV8MaxPages * wasm::WasmModule::kPageSize) {
+    if (new_size / wasm::WasmModule::kPageSize > FLAG_wasm_max_mem_pages ||
+        new_size / wasm::WasmModule::kPageSize >
+            instance->module->max_mem_pages) {
       return -1;
     }
     new_mem_start = static_cast<byte*>(realloc(instance->mem_start, new_size));
@@ -721,8 +675,8 @@
  public:
   ControlTransferMap map_;
 
-  ControlTransfers(Zone* zone, ModuleEnv* env, AstLocalDecls* locals,
-                   const byte* start, const byte* end)
+  ControlTransfers(Zone* zone, BodyLocalDecls* locals, const byte* start,
+                   const byte* end)
       : map_(zone) {
     // Represents a control flow label.
     struct CLabel : public ZoneObject {
@@ -872,7 +826,7 @@
 // Code and metadata needed to execute a function.
 struct InterpreterCode {
   const WasmFunction* function;  // wasm function
-  AstLocalDecls locals;          // local declarations
+  BodyLocalDecls locals;         // local declarations
   const byte* orig_start;        // start of original code
   const byte* orig_end;          // end of original code
   byte* start;                   // start of (maybe altered) code
@@ -890,14 +844,13 @@
   const WasmModule* module_;
   ZoneVector<InterpreterCode> interpreter_code_;
 
-  CodeMap(const WasmModule* module, Zone* zone)
+  CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
       : zone_(zone), module_(module), interpreter_code_(zone) {
     if (module == nullptr) return;
     for (size_t i = 0; i < module->functions.size(); ++i) {
       const WasmFunction* function = &module->functions[i];
-      const byte* code_start =
-          module->module_start + function->code_start_offset;
-      const byte* code_end = module->module_start + function->code_end_offset;
+      const byte* code_start = module_start + function->code_start_offset;
+      const byte* code_end = module_start + function->code_end_offset;
       AddFunction(function, code_start, code_end);
     }
   }
@@ -929,10 +882,9 @@
   InterpreterCode* Preprocess(InterpreterCode* code) {
     if (code->targets == nullptr && code->start) {
       // Compute the control targets map and the local declarations.
-      CHECK(DecodeLocalDecls(code->locals, code->start, code->end));
-      ModuleEnv env = {module_, nullptr, kWasmOrigin};
+      CHECK(DecodeLocalDecls(&code->locals, code->start, code->end));
       code->targets = new (zone_) ControlTransfers(
-          zone_, &env, &code->locals, code->orig_start, code->orig_end);
+          zone_, &code->locals, code->orig_start, code->orig_end);
     }
     return code;
   }
@@ -940,7 +892,7 @@
   int AddFunction(const WasmFunction* function, const byte* code_start,
                   const byte* code_end) {
     InterpreterCode code = {
-        function, AstLocalDecls(zone_),          code_start,
+        function, BodyLocalDecls(zone_),         code_start,
         code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
         nullptr};
 
@@ -963,31 +915,27 @@
   }
 };
 
+namespace {
 // Responsible for executing code directly.
-class ThreadImpl : public WasmInterpreter::Thread {
+class ThreadImpl {
  public:
   ThreadImpl(Zone* zone, CodeMap* codemap, WasmInstance* instance)
       : codemap_(codemap),
         instance_(instance),
         stack_(zone),
         frames_(zone),
-        blocks_(zone),
-        state_(WasmInterpreter::STOPPED),
-        break_pc_(kInvalidPc),
-        trap_reason_(kTrapCount),
-        possible_nondeterminism_(false) {}
-
-  virtual ~ThreadImpl() {}
+        blocks_(zone) {}
 
   //==========================================================================
   // Implementation of public interface for WasmInterpreter::Thread.
   //==========================================================================
 
-  virtual WasmInterpreter::State state() { return state_; }
+  WasmInterpreter::State state() { return state_; }
 
-  virtual void PushFrame(const WasmFunction* function, WasmVal* args) {
+  void PushFrame(const WasmFunction* function, WasmVal* args) {
     InterpreterCode* code = codemap()->FindCode(function);
     CHECK_NOT_NULL(code);
+    ++num_interpreted_calls_;
     frames_.push_back({code, 0, 0, stack_.size()});
     for (size_t i = 0; i < function->sig->parameter_count(); ++i) {
       stack_.push_back(args[i]);
@@ -1000,7 +948,7 @@
           frames_.back().ret_pc);
   }
 
-  virtual WasmInterpreter::State Run() {
+  WasmInterpreter::State Run() {
     do {
       TRACE("  => Run()\n");
       if (state_ == WasmInterpreter::STOPPED ||
@@ -1012,7 +960,7 @@
     return state_;
   }
 
-  virtual WasmInterpreter::State Step() {
+  WasmInterpreter::State Step() {
     TRACE("  => Step()\n");
     if (state_ == WasmInterpreter::STOPPED ||
         state_ == WasmInterpreter::PAUSED) {
@@ -1022,9 +970,9 @@
     return state_;
   }
 
-  virtual void Pause() { UNIMPLEMENTED(); }
+  void Pause() { UNIMPLEMENTED(); }
 
-  virtual void Reset() {
+  void Reset() {
     TRACE("----- RESET -----\n");
     stack_.clear();
     frames_.clear();
@@ -1033,33 +981,40 @@
     possible_nondeterminism_ = false;
   }
 
-  virtual int GetFrameCount() { return static_cast<int>(frames_.size()); }
-
-  virtual const WasmFrame* GetFrame(int index) {
-    UNIMPLEMENTED();
-    return nullptr;
+  int GetFrameCount() {
+    DCHECK_GE(kMaxInt, frames_.size());
+    return static_cast<int>(frames_.size());
   }
 
-  virtual WasmFrame* GetMutableFrame(int index) {
-    UNIMPLEMENTED();
-    return nullptr;
+  template <typename FrameCons>
+  InterpretedFrame GetMutableFrame(int index, FrameCons frame_cons) {
+    DCHECK_LE(0, index);
+    DCHECK_GT(frames_.size(), index);
+    Frame* frame = &frames_[index];
+    DCHECK_GE(kMaxInt, frame->ret_pc);
+    DCHECK_GE(kMaxInt, frame->sp);
+    DCHECK_GE(kMaxInt, frame->llimit());
+    return frame_cons(frame->code->function, static_cast<int>(frame->ret_pc),
+                      static_cast<int>(frame->sp),
+                      static_cast<int>(frame->llimit()));
   }
 
-  virtual WasmVal GetReturnValue(int index) {
+  WasmVal GetReturnValue(int index) {
     if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
     CHECK_EQ(WasmInterpreter::FINISHED, state_);
     CHECK_LT(static_cast<size_t>(index), stack_.size());
     return stack_[index];
   }
 
-  virtual pc_t GetBreakpointPc() { return break_pc_; }
+  pc_t GetBreakpointPc() { return break_pc_; }
 
-  virtual bool PossibleNondeterminism() { return possible_nondeterminism_; }
+  bool PossibleNondeterminism() { return possible_nondeterminism_; }
 
-  bool Terminated() {
-    return state_ == WasmInterpreter::TRAPPED ||
-           state_ == WasmInterpreter::FINISHED;
-  }
+  uint64_t NumInterpretedCalls() { return num_interpreted_calls_; }
+
+  void AddBreakFlags(uint8_t flags) { break_flags_ |= flags; }
+
+  void ClearBreakFlags() { break_flags_ = WasmInterpreter::BreakFlag::None; }
 
  private:
   // Entries on the stack of functions being evaluated.
@@ -1072,7 +1027,7 @@
     // Limit of parameters.
     sp_t plimit() { return sp + code->function->sig->parameter_count(); }
     // Limit of locals.
-    sp_t llimit() { return plimit() + code->locals.total_local_count; }
+    sp_t llimit() { return plimit() + code->locals.type_list.size(); }
   };
 
   struct Block {
@@ -1087,10 +1042,12 @@
   ZoneVector<WasmVal> stack_;
   ZoneVector<Frame> frames_;
   ZoneVector<Block> blocks_;
-  WasmInterpreter::State state_;
-  pc_t break_pc_;
-  TrapReason trap_reason_;
-  bool possible_nondeterminism_;
+  WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
+  pc_t break_pc_ = kInvalidPc;
+  TrapReason trap_reason_ = kTrapCount;
+  bool possible_nondeterminism_ = false;
+  uint8_t break_flags_ = 0;  // a combination of WasmInterpreter::BreakFlag
+  uint64_t num_interpreted_calls_ = 0;
 
   CodeMap* codemap() { return codemap_; }
   WasmInstance* instance() { return instance_; }
@@ -1106,6 +1063,7 @@
   void PushFrame(InterpreterCode* code, pc_t call_pc, pc_t ret_pc) {
     CHECK_NOT_NULL(code);
     DCHECK(!frames_.empty());
+    ++num_interpreted_calls_;
     frames_.back().call_pc = call_pc;
     frames_.back().ret_pc = ret_pc;
     size_t arity = code->function->sig->parameter_count();
@@ -1121,28 +1079,28 @@
   }
 
   pc_t InitLocals(InterpreterCode* code) {
-    for (auto p : code->locals.local_types) {
+    for (auto p : code->locals.type_list) {
       WasmVal val;
-      switch (p.first) {
-        case kAstI32:
+      switch (p) {
+        case kWasmI32:
           val = WasmVal(static_cast<int32_t>(0));
           break;
-        case kAstI64:
+        case kWasmI64:
           val = WasmVal(static_cast<int64_t>(0));
           break;
-        case kAstF32:
+        case kWasmF32:
           val = WasmVal(static_cast<float>(0));
           break;
-        case kAstF64:
+        case kWasmF64:
           val = WasmVal(static_cast<double>(0));
           break;
         default:
           UNREACHABLE();
           break;
       }
-      stack_.insert(stack_.end(), p.second, val);
+      stack_.push_back(val);
     }
-    return code->locals.decls_encoded_size;
+    return code->locals.encoded_size;
   }
 
   void CommitPc(pc_t pc) {
@@ -1173,7 +1131,7 @@
   }
 
   bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, size_t arity) {
-    DCHECK_GT(frames_.size(), 0u);
+    DCHECK_GT(frames_.size(), 0);
     // Pop all blocks for this frame.
     while (!blocks_.empty() && blocks_.back().fp == frames_.size()) {
       blocks_.pop_back();
@@ -1222,42 +1180,73 @@
     stack_.resize(stack_.size() - pop_count);
   }
 
+  template <typename ctype, typename mtype>
+  bool ExecuteLoad(Decoder* decoder, InterpreterCode* code, pc_t pc, int& len) {
+    MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype));
+    uint32_t index = Pop().to<uint32_t>();
+    size_t effective_mem_size = instance()->mem_size - sizeof(mtype);
+    if (operand.offset > effective_mem_size ||
+        index > (effective_mem_size - operand.offset)) {
+      DoTrap(kTrapMemOutOfBounds, pc);
+      return false;
+    }
+    byte* addr = instance()->mem_start + operand.offset + index;
+    WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr)));
+
+    Push(pc, result);
+    len = 1 + operand.length;
+    return true;
+  }
+
+  template <typename ctype, typename mtype>
+  bool ExecuteStore(Decoder* decoder, InterpreterCode* code, pc_t pc,
+                    int& len) {
+    MemoryAccessOperand operand(decoder, code->at(pc), sizeof(ctype));
+    WasmVal val = Pop();
+
+    uint32_t index = Pop().to<uint32_t>();
+    size_t effective_mem_size = instance()->mem_size - sizeof(mtype);
+    if (operand.offset > effective_mem_size ||
+        index > (effective_mem_size - operand.offset)) {
+      DoTrap(kTrapMemOutOfBounds, pc);
+      return false;
+    }
+    byte* addr = instance()->mem_start + operand.offset + index;
+    WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>()));
+    len = 1 + operand.length;
+
+    if (std::is_same<float, ctype>::value) {
+      possible_nondeterminism_ |= std::isnan(val.to<float>());
+    } else if (std::is_same<double, ctype>::value) {
+      possible_nondeterminism_ |= std::isnan(val.to<double>());
+    }
+    return true;
+  }
+
   void Execute(InterpreterCode* code, pc_t pc, int max) {
     Decoder decoder(code->start, code->end);
     pc_t limit = code->end - code->start;
-    while (true) {
-      if (max-- <= 0) {
-        // Maximum number of instructions reached.
-        state_ = WasmInterpreter::PAUSED;
-        return CommitPc(pc);
-      }
+    while (--max >= 0) {
+#define PAUSE_IF_BREAK_FLAG(flag) \
+  if (V8_UNLIKELY(break_flags_ & WasmInterpreter::BreakFlag::flag)) max = 0;
 
-      if (pc >= limit) {
-        // Fell off end of code; do an implicit return.
-        TRACE("@%-3zu: ImplicitReturn\n", pc);
-        if (!DoReturn(&code, &pc, &limit, code->function->sig->return_count()))
-          return;
-        decoder.Reset(code->start, code->end);
-        continue;
-      }
+      DCHECK_GT(limit, pc);
 
       const char* skip = "        ";
       int len = 1;
       byte opcode = code->start[pc];
       byte orig = opcode;
-      if (opcode == kInternalBreakpoint) {
+      if (V8_UNLIKELY(opcode == kInternalBreakpoint)) {
         orig = code->orig_start[pc];
         if (SkipBreakpoint(code, pc)) {
           // skip breakpoint by switching on original code.
           skip = "[skip]  ";
         } else {
-          state_ = WasmInterpreter::PAUSED;
           TRACE("@%-3zu: [break] %-24s:", pc,
                 WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
           TraceValueStack();
           TRACE("\n");
-          break_pc_ = pc;
-          return CommitPc(pc);
+          break;
         }
       }
 
@@ -1347,6 +1336,7 @@
           size_t arity = code->function->sig->return_count();
           if (!DoReturn(&code, &pc, &limit, arity)) return;
           decoder.Reset(code->start, code->end);
+          PAUSE_IF_BREAK_FLAG(AfterReturn);
           continue;
         }
         case kExprUnreachable: {
@@ -1357,12 +1347,6 @@
           blocks_.pop_back();
           break;
         }
-        case kExprI8Const: {
-          ImmI8Operand operand(&decoder, code->at(pc));
-          Push(pc, WasmVal(operand.value));
-          len = 1 + operand.length;
-          break;
-        }
         case kExprI32Const: {
           ImmI32Operand operand(&decoder, code->at(pc));
           Push(pc, WasmVal(operand.value));
@@ -1418,6 +1402,7 @@
           DoCall(target, &pc, pc + 1 + operand.length, &limit);
           code = target;
           decoder.Reset(code->start, code->end);
+          PAUSE_IF_BREAK_FLAG(AfterCall);
           continue;
         }
         case kExprCallIndirect: {
@@ -1444,21 +1429,22 @@
           DoCall(target, &pc, pc + 1 + operand.length, &limit);
           code = target;
           decoder.Reset(code->start, code->end);
+          PAUSE_IF_BREAK_FLAG(AfterCall);
           continue;
         }
         case kExprGetGlobal: {
           GlobalIndexOperand operand(&decoder, code->at(pc));
           const WasmGlobal* global = &module()->globals[operand.index];
           byte* ptr = instance()->globals_start + global->offset;
-          LocalType type = global->type;
+          ValueType type = global->type;
           WasmVal val;
-          if (type == kAstI32) {
+          if (type == kWasmI32) {
             val = WasmVal(*reinterpret_cast<int32_t*>(ptr));
-          } else if (type == kAstI64) {
+          } else if (type == kWasmI64) {
             val = WasmVal(*reinterpret_cast<int64_t*>(ptr));
-          } else if (type == kAstF32) {
+          } else if (type == kWasmF32) {
             val = WasmVal(*reinterpret_cast<float*>(ptr));
-          } else if (type == kAstF64) {
+          } else if (type == kWasmF64) {
             val = WasmVal(*reinterpret_cast<double*>(ptr));
           } else {
             UNREACHABLE();
@@ -1471,15 +1457,15 @@
           GlobalIndexOperand operand(&decoder, code->at(pc));
           const WasmGlobal* global = &module()->globals[operand.index];
           byte* ptr = instance()->globals_start + global->offset;
-          LocalType type = global->type;
+          ValueType type = global->type;
           WasmVal val = Pop();
-          if (type == kAstI32) {
+          if (type == kWasmI32) {
             *reinterpret_cast<int32_t*>(ptr) = val.to<int32_t>();
-          } else if (type == kAstI64) {
+          } else if (type == kWasmI64) {
             *reinterpret_cast<int64_t*>(ptr) = val.to<int64_t>();
-          } else if (type == kAstF32) {
+          } else if (type == kWasmF32) {
             *reinterpret_cast<float*>(ptr) = val.to<float>();
-          } else if (type == kAstF64) {
+          } else if (type == kWasmF64) {
             *reinterpret_cast<double*>(ptr) = val.to<double>();
           } else {
             UNREACHABLE();
@@ -1488,20 +1474,10 @@
           break;
         }
 
-#define LOAD_CASE(name, ctype, mtype)                                       \
-  case kExpr##name: {                                                       \
-    MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype));     \
-    uint32_t index = Pop().to<uint32_t>();                                  \
-    size_t effective_mem_size = instance()->mem_size - sizeof(mtype);       \
-    if (operand.offset > effective_mem_size ||                              \
-        index > (effective_mem_size - operand.offset)) {                    \
-      return DoTrap(kTrapMemOutOfBounds, pc);                               \
-    }                                                                       \
-    byte* addr = instance()->mem_start + operand.offset + index;            \
-    WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr))); \
-    Push(pc, result);                                                       \
-    len = 1 + operand.length;                                               \
-    break;                                                                  \
+#define LOAD_CASE(name, ctype, mtype)                                \
+  case kExpr##name: {                                                \
+    if (!ExecuteLoad<ctype, mtype>(&decoder, code, pc, len)) return; \
+    break;                                                           \
   }
 
           LOAD_CASE(I32LoadMem8S, int32_t, int8_t);
@@ -1520,20 +1496,10 @@
           LOAD_CASE(F64LoadMem, double, double);
 #undef LOAD_CASE
 
-#define STORE_CASE(name, ctype, mtype)                                        \
-  case kExpr##name: {                                                         \
-    MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype));       \
-    WasmVal val = Pop();                                                      \
-    uint32_t index = Pop().to<uint32_t>();                                    \
-    size_t effective_mem_size = instance()->mem_size - sizeof(mtype);         \
-    if (operand.offset > effective_mem_size ||                                \
-        index > (effective_mem_size - operand.offset)) {                      \
-      return DoTrap(kTrapMemOutOfBounds, pc);                                 \
-    }                                                                         \
-    byte* addr = instance()->mem_start + operand.offset + index;              \
-    WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>())); \
-    len = 1 + operand.length;                                                 \
-    break;                                                                    \
+#define STORE_CASE(name, ctype, mtype)                                \
+  case kExpr##name: {                                                 \
+    if (!ExecuteStore<ctype, mtype>(&decoder, code, pc, len)) return; \
+    break;                                                            \
   }
 
           STORE_CASE(I32StoreMem8, int32_t, int8_t);
@@ -1605,6 +1571,23 @@
           len = 1 + operand.length;
           break;
         }
+        // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
+        // specially to guarantee that the quiet bit of a NaN is preserved on
+        // ia32 by the reinterpret casts.
+        case kExprI32ReinterpretF32: {
+          WasmVal val = Pop();
+          WasmVal result(ExecuteI32ReinterpretF32(val));
+          Push(pc, result);
+          possible_nondeterminism_ |= std::isnan(val.to<float>());
+          break;
+        }
+        case kExprI64ReinterpretF64: {
+          WasmVal val = Pop();
+          WasmVal result(ExecuteI64ReinterpretF64(val));
+          Push(pc, result);
+          possible_nondeterminism_ |= std::isnan(val.to<double>());
+          break;
+        }
 #define EXECUTE_SIMPLE_BINOP(name, ctype, op)             \
   case kExpr##name: {                                     \
     WasmVal rval = Pop();                                 \
@@ -1616,19 +1599,6 @@
           FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
 #undef EXECUTE_SIMPLE_BINOP
 
-#define EXECUTE_SIMPLE_BINOP_NAN(name, ctype, op)        \
-  case kExpr##name: {                                    \
-    WasmVal rval = Pop();                                \
-    WasmVal lval = Pop();                                \
-    ctype result = lval.to<ctype>() op rval.to<ctype>(); \
-    possible_nondeterminism_ |= std::isnan(result);      \
-    WasmVal result_val(result);                          \
-    Push(pc, result_val);                                \
-    break;                                               \
-  }
-          FOREACH_SIMPLE_BINOP_NAN(EXECUTE_SIMPLE_BINOP_NAN)
-#undef EXECUTE_SIMPLE_BINOP_NAN
-
 #define EXECUTE_OTHER_BINOP(name, ctype)              \
   case kExpr##name: {                                 \
     TrapReason trap = kTrapCount;                     \
@@ -1642,6 +1612,28 @@
           FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
 #undef EXECUTE_OTHER_BINOP
 
+        case kExprF32CopySign: {
+          // Handle kExprF32CopySign separately because it may introduce
+          // observable non-determinism.
+          TrapReason trap = kTrapCount;
+          volatile float rval = Pop().to<float>();
+          volatile float lval = Pop().to<float>();
+          WasmVal result(ExecuteF32CopySign(lval, rval, &trap));
+          Push(pc, result);
+          possible_nondeterminism_ |= std::isnan(rval);
+          break;
+        }
+        case kExprF64CopySign: {
+          // Handle kExprF32CopySign separately because it may introduce
+          // observable non-determinism.
+          TrapReason trap = kTrapCount;
+          volatile double rval = Pop().to<double>();
+          volatile double lval = Pop().to<double>();
+          WasmVal result(ExecuteF64CopySign(lval, rval, &trap));
+          Push(pc, result);
+          possible_nondeterminism_ |= std::isnan(rval);
+          break;
+        }
 #define EXECUTE_OTHER_UNOP(name, ctype)              \
   case kExpr##name: {                                \
     TrapReason trap = kTrapCount;                    \
@@ -1654,20 +1646,6 @@
           FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
 #undef EXECUTE_OTHER_UNOP
 
-#define EXECUTE_OTHER_UNOP_NAN(name, ctype)          \
-  case kExpr##name: {                                \
-    TrapReason trap = kTrapCount;                    \
-    volatile ctype val = Pop().to<ctype>();          \
-    ctype result = Execute##name(val, &trap);        \
-    possible_nondeterminism_ |= std::isnan(result);  \
-    WasmVal result_val(result);                      \
-    if (trap != kTrapCount) return DoTrap(trap, pc); \
-    Push(pc, result_val);                            \
-    break;                                           \
-  }
-          FOREACH_OTHER_UNOP_NAN(EXECUTE_OTHER_UNOP_NAN)
-#undef EXECUTE_OTHER_UNOP_NAN
-
         default:
           V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
                    code->start[pc], OpcodeName(code->start[pc]));
@@ -1675,13 +1653,25 @@
       }
 
       pc += len;
+      if (pc == limit) {
+        // Fell off end of code; do an implicit return.
+        TRACE("@%-3zu: ImplicitReturn\n", pc);
+        if (!DoReturn(&code, &pc, &limit, code->function->sig->return_count()))
+          return;
+        decoder.Reset(code->start, code->end);
+        PAUSE_IF_BREAK_FLAG(AfterReturn);
+      }
     }
-    UNREACHABLE();  // above decoding loop should run forever.
+    // Set break_pc_, even though we might have stopped because max was reached.
+    // We don't want to stop after executing zero instructions next time.
+    break_pc_ = pc;
+    state_ = WasmInterpreter::PAUSED;
+    CommitPc(pc);
   }
 
   WasmVal Pop() {
-    DCHECK_GT(stack_.size(), 0u);
-    DCHECK_GT(frames_.size(), 0u);
+    DCHECK_GT(stack_.size(), 0);
+    DCHECK_GT(frames_.size(), 0);
     DCHECK_GT(stack_.size(), frames_.back().llimit());  // can't pop into locals
     WasmVal val = stack_.back();
     stack_.pop_back();
@@ -1689,8 +1679,8 @@
   }
 
   void PopN(int n) {
-    DCHECK_GE(stack_.size(), static_cast<size_t>(n));
-    DCHECK_GT(frames_.size(), 0u);
+    DCHECK_GE(stack_.size(), n);
+    DCHECK_GT(frames_.size(), 0);
     size_t nsize = stack_.size() - n;
     DCHECK_GE(nsize, frames_.back().llimit());  // can't pop into locals
     stack_.resize(nsize);
@@ -1698,13 +1688,13 @@
 
   WasmVal PopArity(size_t arity) {
     if (arity == 0) return WasmVal();
-    CHECK_EQ(1u, arity);
+    CHECK_EQ(1, arity);
     return Pop();
   }
 
   void Push(pc_t pc, WasmVal val) {
     // TODO(titzer): store PC as well?
-    if (val.type != kAstStmt) stack_.push_back(val);
+    if (val.type != kWasmStmt) stack_.push_back(val);
   }
 
   void TraceStack(const char* phase, pc_t pc) {
@@ -1716,6 +1706,7 @@
   }
 
   void TraceValueStack() {
+#ifdef DEBUG
     Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
     sp_t sp = top ? top->sp : 0;
     sp_t plimit = top ? top->plimit() : 0;
@@ -1730,19 +1721,19 @@
           PrintF(" s%zu:", i);
         WasmVal val = stack_[i];
         switch (val.type) {
-          case kAstI32:
+          case kWasmI32:
             PrintF("i32:%d", val.to<int32_t>());
             break;
-          case kAstI64:
+          case kWasmI64:
             PrintF("i64:%" PRId64 "", val.to<int64_t>());
             break;
-          case kAstF32:
+          case kWasmF32:
             PrintF("f32:%f", val.to<float>());
             break;
-          case kAstF64:
+          case kWasmF64:
             PrintF("f64:%lf", val.to<double>());
             break;
-          case kAstStmt:
+          case kWasmStmt:
             PrintF("void");
             break;
           default:
@@ -1751,45 +1742,114 @@
         }
       }
     }
+#endif  // DEBUG
   }
 };
 
+// Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
+// Thread* is the public interface, without knowledge of the object layout.
+// This cast is potentially risky, but as long as we always cast it back before
+// accessing any data, it should be fine. UBSan is not complaining.
+WasmInterpreter::Thread* ToThread(ThreadImpl* impl) {
+  return reinterpret_cast<WasmInterpreter::Thread*>(impl);
+}
+static ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
+  return reinterpret_cast<ThreadImpl*>(thread);
+}
+}  // namespace
+
+//============================================================================
+// Implementation of the pimpl idiom for WasmInterpreter::Thread.
+// Instead of placing a pointer to the ThreadImpl inside of the Thread object,
+// we just reinterpret_cast them. ThreadImpls are only allocated inside this
+// translation unit anyway.
+//============================================================================
+WasmInterpreter::State WasmInterpreter::Thread::state() {
+  return ToImpl(this)->state();
+}
+void WasmInterpreter::Thread::PushFrame(const WasmFunction* function,
+                                        WasmVal* args) {
+  return ToImpl(this)->PushFrame(function, args);
+}
+WasmInterpreter::State WasmInterpreter::Thread::Run() {
+  return ToImpl(this)->Run();
+}
+WasmInterpreter::State WasmInterpreter::Thread::Step() {
+  return ToImpl(this)->Step();
+}
+void WasmInterpreter::Thread::Pause() { return ToImpl(this)->Pause(); }
+void WasmInterpreter::Thread::Reset() { return ToImpl(this)->Reset(); }
+pc_t WasmInterpreter::Thread::GetBreakpointPc() {
+  return ToImpl(this)->GetBreakpointPc();
+}
+int WasmInterpreter::Thread::GetFrameCount() {
+  return ToImpl(this)->GetFrameCount();
+}
+const InterpretedFrame WasmInterpreter::Thread::GetFrame(int index) {
+  return GetMutableFrame(index);
+}
+InterpretedFrame WasmInterpreter::Thread::GetMutableFrame(int index) {
+  // We have access to the constructor of InterpretedFrame, but ThreadImpl has
+  // not. So pass it as a lambda (should all get inlined).
+  auto frame_cons = [](const WasmFunction* function, int pc, int fp, int sp) {
+    return InterpretedFrame(function, pc, fp, sp);
+  };
+  return ToImpl(this)->GetMutableFrame(index, frame_cons);
+}
+WasmVal WasmInterpreter::Thread::GetReturnValue(int index) {
+  return ToImpl(this)->GetReturnValue(index);
+}
+bool WasmInterpreter::Thread::PossibleNondeterminism() {
+  return ToImpl(this)->PossibleNondeterminism();
+}
+uint64_t WasmInterpreter::Thread::NumInterpretedCalls() {
+  return ToImpl(this)->NumInterpretedCalls();
+}
+void WasmInterpreter::Thread::AddBreakFlags(uint8_t flags) {
+  ToImpl(this)->AddBreakFlags(flags);
+}
+void WasmInterpreter::Thread::ClearBreakFlags() {
+  ToImpl(this)->ClearBreakFlags();
+}
+
 //============================================================================
 // The implementation details of the interpreter.
 //============================================================================
 class WasmInterpreterInternals : public ZoneObject {
  public:
   WasmInstance* instance_;
+  // Create a copy of the module bytes for the interpreter, since the passed
+  // pointer might be invalidated after constructing the interpreter.
+  const ZoneVector<uint8_t> module_bytes_;
   CodeMap codemap_;
-  ZoneVector<ThreadImpl*> threads_;
+  ZoneVector<ThreadImpl> threads_;
 
-  WasmInterpreterInternals(Zone* zone, WasmInstance* instance)
-      : instance_(instance),
-        codemap_(instance_ ? instance_->module : nullptr, zone),
+  WasmInterpreterInternals(Zone* zone, const ModuleBytesEnv& env)
+      : instance_(env.module_env.instance),
+        module_bytes_(env.wire_bytes.start(), env.wire_bytes.end(), zone),
+        codemap_(
+            env.module_env.instance ? env.module_env.instance->module : nullptr,
+            module_bytes_.data(), zone),
         threads_(zone) {
-    threads_.push_back(new ThreadImpl(zone, &codemap_, instance));
+    threads_.emplace_back(zone, &codemap_, env.module_env.instance);
   }
 
-  void Delete() {
-    // TODO(titzer): CFI doesn't like threads in the ZoneVector.
-    for (auto t : threads_) delete t;
-    threads_.resize(0);
-  }
+  void Delete() { threads_.clear(); }
 };
 
 //============================================================================
 // Implementation of the public interface of the interpreter.
 //============================================================================
-WasmInterpreter::WasmInterpreter(WasmInstance* instance,
+WasmInterpreter::WasmInterpreter(const ModuleBytesEnv& env,
                                  AccountingAllocator* allocator)
     : zone_(allocator, ZONE_NAME),
-      internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
+      internals_(new (&zone_) WasmInterpreterInternals(&zone_, env)) {}
 
 WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
 
-void WasmInterpreter::Run() { internals_->threads_[0]->Run(); }
+void WasmInterpreter::Run() { internals_->threads_[0].Run(); }
 
-void WasmInterpreter::Pause() { internals_->threads_[0]->Pause(); }
+void WasmInterpreter::Pause() { internals_->threads_[0].Pause(); }
 
 bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
                                     bool enabled) {
@@ -1797,7 +1857,7 @@
   if (!code) return false;
   size_t size = static_cast<size_t>(code->end - code->start);
   // Check bounds for {pc}.
-  if (pc < code->locals.decls_encoded_size || pc >= size) return false;
+  if (pc < code->locals.encoded_size || pc >= size) return false;
   // Make a copy of the code before enabling a breakpoint.
   if (enabled && code->orig_start == code->start) {
     code->start = reinterpret_cast<byte*>(zone_.New(size));
@@ -1818,7 +1878,7 @@
   if (!code) return false;
   size_t size = static_cast<size_t>(code->end - code->start);
   // Check bounds for {pc}.
-  if (pc < code->locals.decls_encoded_size || pc >= size) return false;
+  if (pc < code->locals.encoded_size || pc >= size) return false;
   // Check if a breakpoint is present at that place in the code.
   return code->start[pc] == kInternalBreakpoint;
 }
@@ -1834,30 +1894,7 @@
 
 WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
   CHECK_EQ(0, id);  // only one thread for now.
-  return internals_->threads_[id];
-}
-
-WasmVal WasmInterpreter::GetLocalVal(const WasmFrame* frame, int index) {
-  CHECK_GE(index, 0);
-  UNIMPLEMENTED();
-  WasmVal none;
-  none.type = kAstStmt;
-  return none;
-}
-
-WasmVal WasmInterpreter::GetExprVal(const WasmFrame* frame, int pc) {
-  UNIMPLEMENTED();
-  WasmVal none;
-  none.type = kAstStmt;
-  return none;
-}
-
-void WasmInterpreter::SetLocalVal(WasmFrame* frame, int index, WasmVal val) {
-  UNIMPLEMENTED();
-}
-
-void WasmInterpreter::SetExprVal(WasmFrame* frame, int pc, WasmVal val) {
-  UNIMPLEMENTED();
+  return ToThread(&internals_->threads_[id]);
 }
 
 size_t WasmInterpreter::GetMemorySize() {
@@ -1885,10 +1922,39 @@
 
 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
     Zone* zone, const byte* start, const byte* end) {
-  ControlTransfers targets(zone, nullptr, nullptr, start, end);
+  ControlTransfers targets(zone, nullptr, start, end);
   return targets.map_;
 }
 
+//============================================================================
+// Implementation of the frame inspection interface.
+//============================================================================
+int InterpretedFrame::GetParameterCount() const {
+  USE(fp_);
+  USE(sp_);
+  // TODO(clemensh): Return the correct number of parameters.
+  return 0;
+}
+
+WasmVal InterpretedFrame::GetLocalVal(int index) const {
+  CHECK_GE(index, 0);
+  UNIMPLEMENTED();
+  WasmVal none;
+  none.type = kWasmStmt;
+  return none;
+}
+
+WasmVal InterpretedFrame::GetExprVal(int pc) const {
+  UNIMPLEMENTED();
+  WasmVal none;
+  none.type = kWasmStmt;
+  return none;
+}
+
+void InterpretedFrame::SetLocalVal(int index, WasmVal val) { UNIMPLEMENTED(); }
+
+void InterpretedFrame::SetExprVal(int pc, WasmVal val) { UNIMPLEMENTED(); }
+
 }  // namespace wasm
 }  // namespace internal
 }  // namespace v8
diff --git a/src/wasm/wasm-interpreter.h b/src/wasm/wasm-interpreter.h
index 360362b..ab11a08 100644
--- a/src/wasm/wasm-interpreter.h
+++ b/src/wasm/wasm-interpreter.h
@@ -17,8 +17,8 @@
 namespace wasm {
 
 // forward declarations.
+struct ModuleBytesEnv;
 struct WasmFunction;
-struct WasmInstance;
 class WasmInterpreterInternals;
 
 typedef size_t pc_t;
@@ -32,23 +32,23 @@
 
 // Macro for defining union members.
 #define FOREACH_UNION_MEMBER(V) \
-  V(i32, kAstI32, int32_t)      \
-  V(u32, kAstI32, uint32_t)     \
-  V(i64, kAstI64, int64_t)      \
-  V(u64, kAstI64, uint64_t)     \
-  V(f32, kAstF32, float)        \
-  V(f64, kAstF64, double)
+  V(i32, kWasmI32, int32_t)     \
+  V(u32, kWasmI32, uint32_t)    \
+  V(i64, kWasmI64, int64_t)     \
+  V(u64, kWasmI64, uint64_t)    \
+  V(f32, kWasmF32, float)       \
+  V(f64, kWasmF64, double)
 
 // Representation of values within the interpreter.
 struct WasmVal {
-  LocalType type;
+  ValueType type;
   union {
 #define DECLARE_FIELD(field, localtype, ctype) ctype field;
     FOREACH_UNION_MEMBER(DECLARE_FIELD)
 #undef DECLARE_FIELD
   } val;
 
-  WasmVal() : type(kAstStmt) {}
+  WasmVal() : type(kWasmStmt) {}
 
 #define DECLARE_CONSTRUCTOR(field, localtype, ctype) \
   explicit WasmVal(ctype v) : type(localtype) { val.field = v; }
@@ -56,13 +56,22 @@
 #undef DECLARE_CONSTRUCTOR
 
   template <typename T>
-  T to() {
+  inline T to() {
+    UNREACHABLE();
+  }
+
+  template <typename T>
+  inline T to_unchecked() {
     UNREACHABLE();
   }
 };
 
 #define DECLARE_CAST(field, localtype, ctype) \
   template <>                                 \
+  inline ctype WasmVal::to_unchecked() {      \
+    return val.field;                         \
+  }                                           \
+  template <>                                 \
   inline ctype WasmVal::to() {                \
     CHECK_EQ(localtype, type);                \
     return val.field;                         \
@@ -70,21 +79,25 @@
 FOREACH_UNION_MEMBER(DECLARE_CAST)
 #undef DECLARE_CAST
 
-template <>
-inline void WasmVal::to() {
-  CHECK_EQ(kAstStmt, type);
-}
-
 // Representation of frames within the interpreter.
-class WasmFrame {
+class InterpretedFrame {
  public:
   const WasmFunction* function() const { return function_; }
   int pc() const { return pc_; }
 
+  //==========================================================================
+  // Stack frame inspection.
+  //==========================================================================
+  int GetParameterCount() const;
+  WasmVal GetLocalVal(int index) const;
+  WasmVal GetExprVal(int pc) const;
+  void SetLocalVal(int index, WasmVal val);
+  void SetExprVal(int pc, WasmVal val);
+
  private:
   friend class WasmInterpreter;
 
-  WasmFrame(const WasmFunction* function, int pc, int fp, int sp)
+  InterpretedFrame(const WasmFunction* function, int pc, int fp, int sp)
       : function_(function), pc_(pc), fp_(fp), sp_(sp) {}
 
   const WasmFunction* function_;
@@ -107,35 +120,53 @@
   //                       +------------- Finish -------------> FINISHED
   enum State { STOPPED, RUNNING, PAUSED, FINISHED, TRAPPED };
 
+  // Tells a thread to pause after certain instructions.
+  enum BreakFlag : uint8_t {
+    None = 0,
+    AfterReturn = 1 << 0,
+    AfterCall = 1 << 1
+  };
+
   // Representation of a thread in the interpreter.
-  class Thread {
+  class V8_EXPORT_PRIVATE Thread {
+    // Don't instante Threads; they will be allocated as ThreadImpl in the
+    // interpreter implementation.
+    Thread() = delete;
+
    public:
     // Execution control.
-    virtual State state() = 0;
-    virtual void PushFrame(const WasmFunction* function, WasmVal* args) = 0;
-    virtual State Run() = 0;
-    virtual State Step() = 0;
-    virtual void Pause() = 0;
-    virtual void Reset() = 0;
-    virtual ~Thread() {}
+    State state();
+    void PushFrame(const WasmFunction* function, WasmVal* args);
+    State Run();
+    State Step();
+    void Pause();
+    void Reset();
 
     // Stack inspection and modification.
-    virtual pc_t GetBreakpointPc() = 0;
-    virtual int GetFrameCount() = 0;
-    virtual const WasmFrame* GetFrame(int index) = 0;
-    virtual WasmFrame* GetMutableFrame(int index) = 0;
-    virtual WasmVal GetReturnValue(int index = 0) = 0;
+    pc_t GetBreakpointPc();
+    int GetFrameCount();
+    const InterpretedFrame GetFrame(int index);
+    InterpretedFrame GetMutableFrame(int index);
+    WasmVal GetReturnValue(int index = 0);
+
     // Returns true if the thread executed an instruction which may produce
     // nondeterministic results, e.g. float div, float sqrt, and float mul,
     // where the sign bit of a NaN is nondeterministic.
-    virtual bool PossibleNondeterminism() = 0;
+    bool PossibleNondeterminism();
+
+    // Returns the number of calls / function frames executed on this thread.
+    uint64_t NumInterpretedCalls();
 
     // Thread-specific breakpoints.
-    bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
-    bool GetBreakpoint(const WasmFunction* function, int pc);
+    // TODO(wasm): Implement this once we support multiple threads.
+    // bool SetBreakpoint(const WasmFunction* function, int pc, bool enabled);
+    // bool GetBreakpoint(const WasmFunction* function, int pc);
+
+    void AddBreakFlags(uint8_t flags);
+    void ClearBreakFlags();
   };
 
-  WasmInterpreter(WasmInstance* instance, AccountingAllocator* allocator);
+  WasmInterpreter(const ModuleBytesEnv& env, AccountingAllocator* allocator);
   ~WasmInterpreter();
 
   //==========================================================================
@@ -161,14 +192,6 @@
   Thread* GetThread(int id);
 
   //==========================================================================
-  // Stack frame inspection.
-  //==========================================================================
-  WasmVal GetLocalVal(const WasmFrame* frame, int index);
-  WasmVal GetExprVal(const WasmFrame* frame, int pc);
-  void SetLocalVal(WasmFrame* frame, int index, WasmVal val);
-  void SetExprVal(WasmFrame* frame, int pc, WasmVal val);
-
-  //==========================================================================
   // Memory access.
   //==========================================================================
   size_t GetMemorySize();
diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc
index 0e030a2..f16d8b2 100644
--- a/src/wasm/wasm-js.cc
+++ b/src/wasm/wasm-js.cc
@@ -13,11 +13,13 @@
 #include "src/factory.h"
 #include "src/handles.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 #include "src/objects.h"
 #include "src/parsing/parse-info.h"
 
 #include "src/wasm/module-decoder.h"
 #include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-limits.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-objects.h"
 #include "src/wasm/wasm-result.h"
@@ -28,13 +30,26 @@
 
 namespace v8 {
 
-enum WasmMemoryObjectData {
-  kWasmMemoryBuffer,
-  kWasmMemoryMaximum,
-  kWasmMemoryInstanceObject
-};
-
 namespace {
+
+#define RANGE_ERROR_MSG                                                        \
+  "Wasm compilation exceeds internal limits in this context for the provided " \
+  "arguments"
+
+// TODO(wasm): move brand check to the respective types, and don't throw
+// in it, rather, use a provided ErrorThrower, or let caller handle it.
+static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> sym) {
+  if (!value->IsJSObject()) return false;
+  i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
+  Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
+  return has_brand.FromMaybe(false);
+}
+
+static bool BrandCheck(i::Handle<i::Object> value, i::Handle<i::Symbol> sym,
+                       ErrorThrower* thrower, const char* msg) {
+  return HasBrand(value, sym) ? true : (thrower->TypeError("%s", msg), false);
+}
+
 i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
   return isolate->factory()->NewStringFromAsciiChecked(str);
 }
@@ -42,28 +57,82 @@
   return Utils::ToLocal(v8_str(reinterpret_cast<i::Isolate*>(isolate), str));
 }
 
-struct RawBuffer {
-  const byte* start;
-  const byte* end;
-  size_t size() { return static_cast<size_t>(end - start); }
-};
+i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
+    const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+  v8::Isolate* isolate = args.GetIsolate();
+  if (args.Length() < 1) {
+    thrower->TypeError("Argument 0 must be a WebAssembly.Module");
+    return {};
+  }
 
-RawBuffer GetRawBufferSource(
-    v8::Local<v8::Value> source, ErrorThrower* thrower) {
+  Local<Context> context = isolate->GetCurrentContext();
+  i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+  if (!BrandCheck(Utils::OpenHandle(*args[0]),
+                  i::handle(i_context->wasm_module_sym()), thrower,
+                  "Argument 0 must be a WebAssembly.Module")) {
+    return {};
+  }
+
+  Local<Object> module_obj = Local<Object>::Cast(args[0]);
+  return i::Handle<i::WasmModuleObject>::cast(
+      v8::Utils::OpenHandle(*module_obj));
+}
+
+bool IsCompilationAllowed(i::Isolate* isolate, ErrorThrower* thrower,
+                          v8::Local<v8::Value> source, bool is_async) {
+  // Allow caller to do one final check on thrower state, rather than
+  // one at each step. No information is lost - failure reason is captured
+  // in the thrower state.
+  if (thrower->error()) return false;
+
+  AllowWasmCompileCallback callback = isolate->allow_wasm_compile_callback();
+  if (callback != nullptr &&
+      !callback(reinterpret_cast<v8::Isolate*>(isolate), source, is_async)) {
+    thrower->RangeError(RANGE_ERROR_MSG);
+    return false;
+  }
+  return true;
+}
+
+bool IsInstantiationAllowed(i::Isolate* isolate, ErrorThrower* thrower,
+                            v8::Local<v8::Value> module_or_bytes,
+                            i::MaybeHandle<i::JSReceiver> ffi, bool is_async) {
+  // Allow caller to do one final check on thrower state, rather than
+  // one at each step. No information is lost - failure reason is captured
+  // in the thrower state.
+  if (thrower->error()) return false;
+  v8::MaybeLocal<v8::Value> v8_ffi;
+  if (!ffi.is_null()) {
+    v8_ffi = v8::Local<v8::Value>::Cast(Utils::ToLocal(ffi.ToHandleChecked()));
+  }
+  AllowWasmInstantiateCallback callback =
+      isolate->allow_wasm_instantiate_callback();
+  if (callback != nullptr &&
+      !callback(reinterpret_cast<v8::Isolate*>(isolate), module_or_bytes,
+                v8_ffi, is_async)) {
+    thrower->RangeError(RANGE_ERROR_MSG);
+    return false;
+  }
+  return true;
+}
+
+i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
+    const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+  if (args.Length() < 1) {
+    thrower->TypeError("Argument 0 must be a buffer source");
+    return i::wasm::ModuleWireBytes(nullptr, nullptr);
+  }
+
   const byte* start = nullptr;
-  const byte* end = nullptr;
-
+  size_t length = 0;
+  v8::Local<v8::Value> source = args[0];
   if (source->IsArrayBuffer()) {
     // A raw array buffer was passed.
     Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(source);
     ArrayBuffer::Contents contents = buffer->GetContents();
 
     start = reinterpret_cast<const byte*>(contents.Data());
-    end = start + contents.ByteLength();
-
-    if (start == nullptr || end == start) {
-      thrower->CompileError("ArrayBuffer argument is empty");
-    }
+    length = contents.ByteLength();
   } else if (source->IsTypedArray()) {
     // A TypedArray was passed.
     Local<TypedArray> array = Local<TypedArray>::Cast(source);
@@ -73,194 +142,257 @@
 
     start =
         reinterpret_cast<const byte*>(contents.Data()) + array->ByteOffset();
-    end = start + array->ByteLength();
-
-    if (start == nullptr || end == start) {
-      thrower->TypeError("ArrayBuffer argument is empty");
-    }
+    length = array->ByteLength();
   } else {
-    thrower->TypeError("Argument 0 must be an ArrayBuffer or Uint8Array");
+    thrower->TypeError("Argument 0 must be a buffer source");
   }
-
-  return {start, end};
-}
-
-static i::MaybeHandle<i::WasmModuleObject> CreateModuleObject(
-    v8::Isolate* isolate, const v8::Local<v8::Value> source,
-    ErrorThrower* thrower) {
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  i::MaybeHandle<i::JSObject> nothing;
-
-  RawBuffer buffer = GetRawBufferSource(source, thrower);
-  if (buffer.start == nullptr) return i::MaybeHandle<i::WasmModuleObject>();
-
-  DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
-  return i::wasm::CreateModuleObjectFromBytes(
-      i_isolate, buffer.start, buffer.end, thrower, i::wasm::kWasmOrigin,
-      i::Handle<i::Script>::null(), nullptr, nullptr);
-}
-
-static bool ValidateModule(v8::Isolate* isolate,
-                           const v8::Local<v8::Value> source,
-                           ErrorThrower* thrower) {
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  i::MaybeHandle<i::JSObject> nothing;
-
-  RawBuffer buffer = GetRawBufferSource(source, thrower);
-  if (buffer.start == nullptr) return false;
-
-  DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
-  return i::wasm::ValidateModuleBytes(i_isolate, buffer.start, buffer.end,
-                                      thrower,
-                                      i::wasm::ModuleOrigin::kWasmOrigin);
-}
-
-static bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
-                       i::Handle<i::Symbol> sym, const char* msg) {
-  if (value->IsJSObject()) {
-    i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
-    Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
-    if (has_brand.IsNothing()) return false;
-    if (has_brand.ToChecked()) return true;
+  DCHECK_IMPLIES(length, start != nullptr);
+  if (length == 0) {
+    thrower->CompileError("BufferSource argument is empty");
   }
-  v8::Local<v8::Value> e = v8::Exception::TypeError(v8_str(isolate, msg));
-  isolate->ThrowException(e);
-  return false;
+  if (length > i::wasm::kV8MaxWasmModuleSize) {
+    thrower->RangeError("buffer source exceeds maximum size of %zu (is %zu)",
+                        i::wasm::kV8MaxWasmModuleSize, length);
+  }
+  if (thrower->error()) return i::wasm::ModuleWireBytes(nullptr, nullptr);
+  // TODO(titzer): use the handle as well?
+  return i::wasm::ModuleWireBytes(start, start + length);
 }
 
+i::MaybeHandle<i::JSReceiver> GetSecondArgumentAsImports(
+    const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+  if (args.Length() < 2) return {};
+  if (args[1]->IsUndefined()) return {};
+
+  if (!args[1]->IsObject()) {
+    thrower->TypeError("Argument 1 must be an object");
+    return {};
+  }
+  Local<Object> obj = Local<Object>::Cast(args[1]);
+  return i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
+}
+
+// WebAssembly.compile(bytes) -> Promise
 void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   HandleScope scope(isolate);
-  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
-                       "WebAssembly.compile()");
-
-  if (args.Length() < 1) {
-    thrower.TypeError("Argument 0 must be a buffer source");
-    return;
-  }
-  i::MaybeHandle<i::JSObject> module_obj =
-      CreateModuleObject(isolate, args[0], &thrower);
+  ErrorThrower thrower(i_isolate, "WebAssembly.compile()");
 
   Local<Context> context = isolate->GetCurrentContext();
   v8::Local<v8::Promise::Resolver> resolver;
   if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
-  if (thrower.error()) {
-    resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
-  } else {
-    resolver->Resolve(context, Utils::ToLocal(module_obj.ToHandleChecked()));
-  }
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
   return_value.Set(resolver->GetPromise());
-}
 
-void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  v8::Isolate* isolate = args.GetIsolate();
-  HandleScope scope(isolate);
-  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
-                       "WebAssembly.validate()");
-
-  if (args.Length() < 1) {
-    thrower.TypeError("Argument 0 must be a buffer source");
+  auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+  if (!IsCompilationAllowed(i_isolate, &thrower, args[0], true)) {
+    resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
     return;
   }
+  DCHECK(!thrower.error());
+  i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
+  i::wasm::AsyncCompile(i_isolate, promise, bytes);
+}
+
+// WebAssembly.validate(bytes) -> bool
+void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  HandleScope scope(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.validate()");
+
+  auto bytes = GetFirstArgumentAsBytes(args, &thrower);
 
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
-  if (ValidateModule(isolate, args[0], &thrower)) {
+  if (!thrower.error() &&
+      i::wasm::SyncValidate(reinterpret_cast<i::Isolate*>(isolate), &thrower,
+                            bytes)) {
     return_value.Set(v8::True(isolate));
   } else {
+    if (thrower.wasm_error()) thrower.Reify();  // Clear error.
     return_value.Set(v8::False(isolate));
   }
 }
 
+// new WebAssembly.Module(bytes) -> WebAssembly.Module
 void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   HandleScope scope(isolate);
-  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
-                       "WebAssembly.Module()");
+  ErrorThrower thrower(i_isolate, "WebAssembly.Module()");
 
-  if (args.Length() < 1) {
-    thrower.TypeError("Argument 0 must be a buffer source");
-    return;
-  }
-  i::MaybeHandle<i::JSObject> module_obj =
-      CreateModuleObject(isolate, args[0], &thrower);
+  auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+  if (!IsCompilationAllowed(i_isolate, &thrower, args[0], false)) return;
+
+  DCHECK(!thrower.error());
+  i::MaybeHandle<i::Object> module_obj =
+      i::wasm::SyncCompile(i_isolate, &thrower, bytes);
   if (module_obj.is_null()) return;
 
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
   return_value.Set(Utils::ToLocal(module_obj.ToHandleChecked()));
 }
 
+// WebAssembly.Module.imports(module) -> Array<Import>
+void WebAssemblyModuleImports(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  HandleScope scope(args.GetIsolate());
+  v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.Module.imports()");
+
+  auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
+  if (thrower.error()) return;
+  auto imports = i::wasm::GetImports(i_isolate, maybe_module.ToHandleChecked());
+  args.GetReturnValue().Set(Utils::ToLocal(imports));
+}
+
+// WebAssembly.Module.exports(module) -> Array<Export>
+void WebAssemblyModuleExports(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  HandleScope scope(args.GetIsolate());
+  v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.Module.exports()");
+
+  auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
+  if (thrower.error()) return;
+  auto exports = i::wasm::GetExports(i_isolate, maybe_module.ToHandleChecked());
+  args.GetReturnValue().Set(Utils::ToLocal(exports));
+}
+
+// WebAssembly.Module.customSections(module, name) -> Array<Section>
+void WebAssemblyModuleCustomSections(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  HandleScope scope(args.GetIsolate());
+  v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.Module.customSections()");
+
+  auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
+  if (thrower.error()) return;
+
+  if (args.Length() < 2) {
+    thrower.TypeError("Argument 1 must be a string");
+    return;
+  }
+
+  i::Handle<i::Object> name = Utils::OpenHandle(*args[1]);
+  if (!name->IsString()) {
+    thrower.TypeError("Argument 1 must be a string");
+    return;
+  }
+
+  auto custom_sections =
+      i::wasm::GetCustomSections(i_isolate, maybe_module.ToHandleChecked(),
+                                 i::Handle<i::String>::cast(name), &thrower);
+  if (thrower.error()) return;
+  args.GetReturnValue().Set(Utils::ToLocal(custom_sections));
+}
+
+// new WebAssembly.Instance(module, imports) -> WebAssembly.Instance
 void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
   HandleScope scope(args.GetIsolate());
   v8::Isolate* isolate = args.GetIsolate();
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
   ErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
 
-  if (args.Length() < 1) {
-    thrower.TypeError("Argument 0 must be a WebAssembly.Module");
+  auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
+  if (thrower.error()) return;
+
+  auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
+  if (!IsInstantiationAllowed(i_isolate, &thrower, args[0], maybe_imports,
+                              false)) {
     return;
   }
+  DCHECK(!thrower.error());
+
+  i::MaybeHandle<i::Object> instance_object = i::wasm::SyncInstantiate(
+      i_isolate, &thrower, maybe_module.ToHandleChecked(), maybe_imports,
+      i::MaybeHandle<i::JSArrayBuffer>());
+  if (instance_object.is_null()) return;
+  args.GetReturnValue().Set(Utils::ToLocal(instance_object.ToHandleChecked()));
+}
+
+// WebAssembly.instantiate(module, imports) -> WebAssembly.Instance
+// WebAssembly.instantiate(bytes, imports) ->
+//     {module: WebAssembly.Module, instance: WebAssembly.Instance}
+void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
+
+  HandleScope scope(isolate);
 
   Local<Context> context = isolate->GetCurrentContext();
   i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
-  if (!BrandCheck(isolate, Utils::OpenHandle(*args[0]),
-                  i::Handle<i::Symbol>(i_context->wasm_module_sym()),
-                  "Argument 0 must be a WebAssembly.Module")) {
-    return;
-  }
 
-  Local<Object> obj = Local<Object>::Cast(args[0]);
-  i::Handle<i::JSObject> i_obj =
-      i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
-
-  i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
-  if (args.Length() > 1 && args[1]->IsObject()) {
-    Local<Object> obj = Local<Object>::Cast(args[1]);
-    ffi = i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
-  }
-
-  i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
-  if (args.Length() > 2 && args[2]->IsObject()) {
-    Local<Object> obj = Local<Object>::Cast(args[2]);
-    i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
-    if (i::WasmJs::IsWasmMemoryObject(i_isolate, mem_obj)) {
-      memory = i::Handle<i::JSArrayBuffer>(
-          i::Handle<i::WasmMemoryObject>::cast(mem_obj)->get_buffer(),
-          i_isolate);
-    } else {
-      thrower.TypeError("Argument 2 must be a WebAssembly.Memory");
-    }
-  }
-  i::MaybeHandle<i::JSObject> instance =
-      i::wasm::WasmModule::Instantiate(i_isolate, &thrower, i_obj, ffi, memory);
-  if (instance.is_null()) {
-    if (!thrower.error()) thrower.RuntimeError("Could not instantiate module");
-    return;
-  }
-  DCHECK(!i_isolate->has_pending_exception());
+  v8::Local<v8::Promise::Resolver> resolver;
+  if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
-  return_value.Set(Utils::ToLocal(instance.ToHandleChecked()));
+  return_value.Set(resolver->GetPromise());
+
+  if (args.Length() < 1) {
+    thrower.TypeError(
+        "Argument 0 must be provided and must be either a buffer source or a "
+        "WebAssembly.Module object");
+    resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+    return;
+  }
+
+  i::Handle<i::Object> first_arg = Utils::OpenHandle(*args[0]);
+  if (!first_arg->IsJSObject()) {
+    thrower.TypeError(
+        "Argument 0 must be a buffer source or a WebAssembly.Module object");
+    resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+    return;
+  }
+
+  auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
+  if (thrower.error()) {
+    resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+    return;
+  }
+  if (!IsInstantiationAllowed(i_isolate, &thrower, args[0], maybe_imports,
+                              true)) {
+    resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+    return;
+  }
+  i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
+  if (HasBrand(first_arg, i::Handle<i::Symbol>(i_context->wasm_module_sym()))) {
+    // WebAssembly.instantiate(module, imports) -> WebAssembly.Instance
+    auto module_object = GetFirstArgumentAsModule(args, &thrower);
+    i::wasm::AsyncInstantiate(i_isolate, promise,
+                              module_object.ToHandleChecked(), maybe_imports);
+  } else {
+    // WebAssembly.instantiate(bytes, imports) -> {module, instance}
+    auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+    if (thrower.error()) {
+      resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+      return;
+    }
+    i::wasm::AsyncCompileAndInstantiate(i_isolate, promise, bytes,
+                                        maybe_imports);
+  }
 }
 
 bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
                         Local<Context> context, Local<v8::Object> object,
-                        Local<String> property, int* result, int lower_bound,
-                        int upper_bound) {
+                        Local<String> property, int* result,
+                        int64_t lower_bound, uint64_t upper_bound) {
   v8::MaybeLocal<v8::Value> maybe = object->Get(context, property);
   v8::Local<v8::Value> value;
   if (maybe.ToLocal(&value)) {
     int64_t number;
     if (!value->IntegerValue(context).To(&number)) return false;
-    if (number < static_cast<int64_t>(lower_bound)) {
+    if (number < lower_bound) {
       thrower->RangeError("Property value %" PRId64
-                          " is below the lower bound %d",
+                          " is below the lower bound %" PRIx64,
                           number, lower_bound);
       return false;
     }
     if (number > static_cast<int64_t>(upper_bound)) {
       thrower->RangeError("Property value %" PRId64
-                          " is above the upper bound %d",
+                          " is above the upper bound %" PRIu64,
                           number, upper_bound);
       return false;
     }
@@ -270,13 +402,12 @@
   return false;
 }
 
-const int max_table_size = 1 << 26;
-
+// new WebAssembly.Table(args) -> WebAssembly.Table
 void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   HandleScope scope(isolate);
-  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
-                       "WebAssembly.Module()");
+  ErrorThrower thrower(i_isolate, "WebAssembly.Module()");
   if (args.Length() < 1 || !args[0]->IsObject()) {
     thrower.TypeError("Argument 0 must be a table descriptor");
     return;
@@ -299,31 +430,25 @@
     }
   }
   // The descriptor's 'initial'.
-  int initial;
+  int initial = 0;
   if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
                           v8_str(isolate, "initial"), &initial, 0,
-                          max_table_size)) {
+                          i::FLAG_wasm_max_table_size)) {
     return;
   }
   // The descriptor's 'maximum'.
-  int maximum = 0;
+  int maximum = -1;
   Local<String> maximum_key = v8_str(isolate, "maximum");
   Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
 
-  if (has_maximum.IsNothing()) {
-    // There has been an exception, just return.
-    return;
-  }
-  if (has_maximum.FromJust()) {
+  if (!has_maximum.IsNothing() && has_maximum.FromJust()) {
     if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
-                            &maximum, initial, max_table_size)) {
+                            &maximum, initial,
+                            i::wasm::kSpecMaxWasmTableSize)) {
       return;
     }
-  } else {
-    maximum = static_cast<int>(i::wasm::WasmModule::kV8MaxTableSize);
   }
 
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::Handle<i::FixedArray> fixed_array;
   i::Handle<i::JSObject> table_obj =
       i::WasmTableObject::New(i_isolate, initial, maximum, &fixed_array);
@@ -333,9 +458,9 @@
 
 void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   HandleScope scope(isolate);
-  ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
-                       "WebAssembly.Module()");
+  ErrorThrower thrower(i_isolate, "WebAssembly.Memory()");
   if (args.Length() < 1 || !args[0]->IsObject()) {
     thrower.TypeError("Argument 0 must be a memory descriptor");
     return;
@@ -343,45 +468,47 @@
   Local<Context> context = isolate->GetCurrentContext();
   Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
   // The descriptor's 'initial'.
-  int initial;
+  int initial = 0;
   if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
-                          v8_str(isolate, "initial"), &initial, 0, 65536)) {
+                          v8_str(isolate, "initial"), &initial, 0,
+                          i::FLAG_wasm_max_mem_pages)) {
     return;
   }
   // The descriptor's 'maximum'.
-  int maximum = 0;
+  int maximum = -1;
   Local<String> maximum_key = v8_str(isolate, "maximum");
   Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
 
-  if (has_maximum.IsNothing()) {
-    // There has been an exception, just return.
-    return;
-  }
-  if (has_maximum.FromJust()) {
+  if (!has_maximum.IsNothing() && has_maximum.FromJust()) {
     if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
-                            &maximum, initial, 65536)) {
+                            &maximum, initial,
+                            i::wasm::kSpecMaxWasmMemoryPages)) {
       return;
     }
   }
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  i::Handle<i::JSArrayBuffer> buffer =
-      i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
   size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
                 static_cast<size_t>(initial);
-  i::JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, size);
-
-  i::Handle<i::JSObject> memory_obj = i::WasmMemoryObject::New(
-      i_isolate, buffer, has_maximum.FromJust() ? maximum : -1);
+  i::Handle<i::JSArrayBuffer> buffer =
+      i::wasm::NewArrayBuffer(i_isolate, size, i::FLAG_wasm_guard_pages);
+  if (buffer.is_null()) {
+    thrower.RangeError("could not allocate memory");
+    return;
+  }
+  i::Handle<i::JSObject> memory_obj =
+      i::WasmMemoryObject::New(i_isolate, buffer, maximum);
   args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
 }
 
 void WebAssemblyTableGetLength(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  HandleScope scope(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.Table.length()");
   Local<Context> context = isolate->GetCurrentContext();
   i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
-  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
-                  i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+  if (!BrandCheck(Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
                   "Receiver is not a WebAssembly.Table")) {
     return;
   }
@@ -391,20 +518,23 @@
       v8::Number::New(isolate, receiver->current_length()));
 }
 
+// WebAssembly.Table.grow(num) -> num
 void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  HandleScope scope(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.Table.grow()");
   Local<Context> context = isolate->GetCurrentContext();
   i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
-  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
-                  i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+  if (!BrandCheck(Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
                   "Receiver is not a WebAssembly.Table")) {
     return;
   }
 
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   auto receiver =
       i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
-  i::Handle<i::FixedArray> old_array(receiver->get_functions(), i_isolate);
+  i::Handle<i::FixedArray> old_array(receiver->functions(), i_isolate);
   int old_size = old_array->length();
   int64_t new_size64 = 0;
   if (args.Length() > 0 && !args[0]->IntegerValue(context).To(&new_size64)) {
@@ -412,14 +542,21 @@
   }
   new_size64 += old_size;
 
-  if (new_size64 < old_size || new_size64 > receiver->maximum_length()) {
-    v8::Local<v8::Value> e = v8::Exception::RangeError(
-        v8_str(isolate, new_size64 < old_size ? "trying to shrink table"
-                                              : "maximum table size exceeded"));
-    isolate->ThrowException(e);
+  int64_t max_size64 = receiver->maximum_length();
+  if (max_size64 < 0 ||
+      max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_table_size)) {
+    max_size64 = i::FLAG_wasm_max_table_size;
+  }
+
+  if (new_size64 < old_size || new_size64 > max_size64) {
+    thrower.RangeError(new_size64 < old_size ? "trying to shrink table"
+                                             : "maximum table size exceeded");
     return;
   }
+
   int new_size = static_cast<int>(new_size64);
+  i::WasmTableObject::Grow(i_isolate, receiver,
+                           static_cast<uint32_t>(new_size - old_size));
 
   if (new_size != old_size) {
     i::Handle<i::FixedArray> new_array =
@@ -430,30 +567,33 @@
     receiver->set_functions(*new_array);
   }
 
-  // TODO(titzer): update relevant instances.
+  // TODO(gdeepti): use weak links for instances
+  v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+  return_value.Set(old_size);
 }
 
+// WebAssembly.Table.get(num) -> JSFunction
 void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  HandleScope scope(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.Table.get()");
   Local<Context> context = isolate->GetCurrentContext();
   i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
-  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
-                  i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+  if (!BrandCheck(Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
                   "Receiver is not a WebAssembly.Table")) {
     return;
   }
 
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   auto receiver =
       i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
-  i::Handle<i::FixedArray> array(receiver->get_functions(), i_isolate);
+  i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
   int i = 0;
   if (args.Length() > 0 && !args[0]->Int32Value(context).To(&i)) return;
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
   if (i < 0 || i >= array->length()) {
-    v8::Local<v8::Value> e =
-        v8::Exception::RangeError(v8_str(isolate, "index out of bounds"));
-    isolate->ThrowException(e);
+    thrower.RangeError("index out of bounds");
     return;
   }
 
@@ -461,20 +601,21 @@
   return_value.Set(Utils::ToLocal(value));
 }
 
+// WebAssembly.Table.set(num, JSFunction)
 void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  HandleScope scope(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.Table.set()");
   Local<Context> context = isolate->GetCurrentContext();
   i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
-  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
-                  i::Handle<i::Symbol>(i_context->wasm_table_sym()),
+  if (!BrandCheck(Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_table_sym()), &thrower,
                   "Receiver is not a WebAssembly.Table")) {
     return;
   }
   if (args.Length() < 2) {
-    v8::Local<v8::Value> e = v8::Exception::TypeError(
-        v8_str(isolate, "Argument 1 must be null or a function"));
-    isolate->ThrowException(e);
+    thrower.TypeError("Argument 1 must be null or a function");
     return;
   }
   i::Handle<i::Object> value = Utils::OpenHandle(*args[1]);
@@ -482,25 +623,21 @@
       (!value->IsJSFunction() ||
        i::Handle<i::JSFunction>::cast(value)->code()->kind() !=
            i::Code::JS_TO_WASM_FUNCTION)) {
-    v8::Local<v8::Value> e = v8::Exception::TypeError(
-        v8_str(isolate, "Argument 1 must be null or a WebAssembly function"));
-    isolate->ThrowException(e);
+    thrower.TypeError("Argument 1 must be null or a WebAssembly function");
     return;
   }
 
   auto receiver =
       i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
-  i::Handle<i::FixedArray> array(receiver->get_functions(), i_isolate);
+  i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
   int i;
   if (!args[0]->Int32Value(context).To(&i)) return;
   if (i < 0 || i >= array->length()) {
-    v8::Local<v8::Value> e =
-        v8::Exception::RangeError(v8_str(isolate, "index out of bounds"));
-    isolate->ThrowException(e);
+    thrower.RangeError("index out of bounds");
     return;
   }
 
-  i::Handle<i::FixedArray> dispatch_tables(receiver->get_dispatch_tables(),
+  i::Handle<i::FixedArray> dispatch_tables(receiver->dispatch_tables(),
                                            i_isolate);
   if (value->IsNull(i_isolate)) {
     i::wasm::UpdateDispatchTables(i_isolate, dispatch_tables, i,
@@ -513,68 +650,67 @@
   i::Handle<i::FixedArray>::cast(array)->set(i, *value);
 }
 
+// WebAssembly.Memory.grow(num) -> num
 void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  HandleScope scope(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.Memory.grow()");
   Local<Context> context = isolate->GetCurrentContext();
   i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
-  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
-                  i::Handle<i::Symbol>(i_context->wasm_memory_sym()),
+  if (!BrandCheck(Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_memory_sym()), &thrower,
                   "Receiver is not a WebAssembly.Memory")) {
     return;
   }
-  if (args.Length() < 1) {
-    v8::Local<v8::Value> e = v8::Exception::TypeError(
-        v8_str(isolate, "Argument 0 required, must be numeric value of pages"));
-    isolate->ThrowException(e);
+  int64_t delta_size = 0;
+  if (args.Length() < 1 || !args[0]->IntegerValue(context).To(&delta_size)) {
+    thrower.TypeError("Argument 0 required, must be numeric value of pages");
     return;
   }
-
-  uint32_t delta = args[0]->Uint32Value(context).FromJust();
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  i::Handle<i::JSObject> receiver =
-      i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
-  i::Handle<i::Object> instance_object(
-      receiver->GetInternalField(kWasmMemoryInstanceObject), i_isolate);
-  i::Handle<i::JSObject> instance(
-      i::Handle<i::JSObject>::cast(instance_object));
-
-  // TODO(gdeepti) Implement growing memory when shared by different
-  // instances.
-  int32_t ret = internal::wasm::GrowInstanceMemory(i_isolate, instance, delta);
+  i::Handle<i::WasmMemoryObject> receiver =
+      i::Handle<i::WasmMemoryObject>::cast(Utils::OpenHandle(*args.This()));
+  int64_t max_size64 = receiver->maximum_pages();
+  if (max_size64 < 0 ||
+      max_size64 > static_cast<int64_t>(i::FLAG_wasm_max_mem_pages)) {
+    max_size64 = i::FLAG_wasm_max_mem_pages;
+  }
+  i::Handle<i::JSArrayBuffer> old_buffer(receiver->buffer());
+  uint32_t old_size =
+      old_buffer->byte_length()->Number() / i::wasm::kSpecMaxWasmMemoryPages;
+  int64_t new_size64 = old_size + delta_size;
+  if (delta_size < 0 || max_size64 < new_size64 || new_size64 < old_size) {
+    thrower.RangeError(new_size64 < old_size ? "trying to shrink memory"
+                                             : "maximum memory size exceeded");
+    return;
+  }
+  int32_t ret = i::wasm::GrowWebAssemblyMemory(
+      i_isolate, receiver, static_cast<uint32_t>(delta_size));
   if (ret == -1) {
-    v8::Local<v8::Value> e = v8::Exception::Error(
-        v8_str(isolate, "Unable to grow instance memory."));
-    isolate->ThrowException(e);
+    thrower.RangeError("Unable to grow instance memory.");
     return;
   }
-  i::MaybeHandle<i::JSArrayBuffer> buffer =
-      internal::wasm::GetInstanceMemory(i_isolate, instance);
-  if (buffer.is_null()) {
-    v8::Local<v8::Value> e = v8::Exception::Error(
-        v8_str(isolate, "WebAssembly.Memory buffer object not set."));
-    isolate->ThrowException(e);
-    return;
-  }
-  receiver->SetInternalField(kWasmMemoryBuffer, *buffer.ToHandleChecked());
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
   return_value.Set(ret);
 }
 
+// WebAssembly.Memory.buffer -> ArrayBuffer
 void WebAssemblyMemoryGetBuffer(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::Isolate* isolate = args.GetIsolate();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  HandleScope scope(isolate);
+  ErrorThrower thrower(i_isolate, "WebAssembly.Memory.buffer");
   Local<Context> context = isolate->GetCurrentContext();
   i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
-  if (!BrandCheck(isolate, Utils::OpenHandle(*args.This()),
-                  i::Handle<i::Symbol>(i_context->wasm_memory_sym()),
+  if (!BrandCheck(Utils::OpenHandle(*args.This()),
+                  i::Handle<i::Symbol>(i_context->wasm_memory_sym()), &thrower,
                   "Receiver is not a WebAssembly.Memory")) {
     return;
   }
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  i::Handle<i::JSObject> receiver =
-      i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
-  i::Handle<i::Object> buffer(receiver->GetInternalField(kWasmMemoryBuffer),
-                              i_isolate);
+  i::Handle<i::WasmMemoryObject> receiver =
+      i::Handle<i::WasmMemoryObject>::cast(Utils::OpenHandle(*args.This()));
+  i::Handle<i::Object> buffer(receiver->buffer(), i_isolate);
   DCHECK(buffer->IsJSArrayBuffer());
   v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
   return_value.Set(Utils::ToLocal(buffer));
@@ -586,20 +722,23 @@
 static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
                                                       FunctionCallback func) {
   Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
-  Local<FunctionTemplate> local = FunctionTemplate::New(isolate, func);
-  return v8::Utils::OpenHandle(*local);
+  Local<FunctionTemplate> templ = FunctionTemplate::New(isolate, func);
+  templ->ReadOnlyPrototype();
+  return v8::Utils::OpenHandle(*templ);
 }
 
 namespace internal {
 
 Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
-                               const char* str, FunctionCallback func) {
+                               const char* str, FunctionCallback func,
+                               int length = 0) {
   Handle<String> name = v8_str(isolate, str);
   Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
   Handle<JSFunction> function =
       ApiNatives::InstantiateFunction(temp).ToHandleChecked();
-  PropertyAttributes attributes =
-      static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+  JSFunction::SetName(function, name, isolate->factory()->empty_string());
+  function->shared()->set_length(length);
+  PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
   JSObject::AddProperty(object, name, function, attributes);
   return function;
 }
@@ -611,27 +750,46 @@
   Handle<JSFunction> function =
       ApiNatives::InstantiateFunction(temp).ToHandleChecked();
   v8::PropertyAttribute attributes =
-      static_cast<v8::PropertyAttribute>(v8::DontDelete | v8::ReadOnly);
+      static_cast<v8::PropertyAttribute>(v8::DontEnum);
   Utils::ToLocal(object)->SetAccessorProperty(Utils::ToLocal(name),
                                               Utils::ToLocal(function),
                                               Local<Function>(), attributes);
   return function;
 }
 
-void WasmJs::InstallWasmModuleSymbolIfNeeded(Isolate* isolate,
-                                             Handle<JSGlobalObject> global,
-                                             Handle<Context> context) {
-  if (!context->get(Context::WASM_MODULE_SYM_INDEX)->IsSymbol() ||
-      !context->get(Context::WASM_INSTANCE_SYM_INDEX)->IsSymbol()) {
-    InstallWasmMapsIfNeeded(isolate, isolate->native_context());
-    InstallWasmConstructors(isolate, isolate->global_object(),
-                            isolate->native_context());
-  }
-}
+void WasmJs::Install(Isolate* isolate) {
+  Handle<JSGlobalObject> global = isolate->global_object();
+  Handle<Context> context(global->native_context(), isolate);
+  // TODO(titzer): once FLAG_expose_wasm is gone, this should become a DCHECK.
+  if (context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) return;
 
-void WasmJs::InstallWasmConstructors(Isolate* isolate,
-                                     Handle<JSGlobalObject> global,
-                                     Handle<Context> context) {
+  // Install Maps.
+
+  // TODO(titzer): Also make one for strict mode functions?
+  Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
+
+  InstanceType instance_type = prev_map->instance_type();
+  int internal_fields = JSObject::GetInternalFieldCount(*prev_map);
+  CHECK_EQ(0, internal_fields);
+  int pre_allocated =
+      prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
+  int instance_size = 0;
+  int in_object_properties = 0;
+  int wasm_internal_fields = internal_fields + 1  // module instance object
+      + 1                  // function arity
+      + 1;                 // function signature
+  JSFunction::CalculateInstanceSizeHelper(instance_type, wasm_internal_fields,
+                                          0, &instance_size,
+                                          &in_object_properties);
+
+  int unused_property_fields = in_object_properties - pre_allocated;
+  Handle<Map> map = Map::CopyInitialMap(
+      prev_map, instance_size, in_object_properties, unused_property_fields);
+
+  context->set_wasm_function_map(*map);
+
+  // Install symbols.
+
   Factory* factory = isolate->factory();
   // Create private symbols.
   Handle<Symbol> module_sym = factory->NewPrivateSymbol();
@@ -646,7 +804,9 @@
   Handle<Symbol> memory_sym = factory->NewPrivateSymbol();
   context->set_wasm_memory_sym(*memory_sym);
 
-  // Bind the WebAssembly object.
+  // Install the JS API.
+
+  // Setup WebAssembly
   Handle<String> name = v8_str(isolate, "WebAssembly");
   Handle<JSFunction> cons = factory->NewFunction(name);
   JSFunction::SetInstancePrototype(
@@ -655,128 +815,103 @@
   Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
   PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
   JSObject::AddProperty(global, name, webassembly, attributes);
-
-  // Setup compile
-  InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile);
-
-  // Setup compile
-  InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate);
+  PropertyAttributes ro_attributes =
+      static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+  JSObject::AddProperty(webassembly, factory->to_string_tag_symbol(),
+                        v8_str(isolate, "WebAssembly"), ro_attributes);
+  InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile, 1);
+  InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1);
+  InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1);
 
   // Setup Module
   Handle<JSFunction> module_constructor =
-      InstallFunc(isolate, webassembly, "Module", WebAssemblyModule);
+      InstallFunc(isolate, webassembly, "Module", WebAssemblyModule, 1);
   context->set_wasm_module_constructor(*module_constructor);
   Handle<JSObject> module_proto =
       factory->NewJSObject(module_constructor, TENURED);
-  i::Handle<i::Map> map = isolate->factory()->NewMap(
-      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+  i::Handle<i::Map> module_map = isolate->factory()->NewMap(
+      i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
                              WasmModuleObject::kFieldCount * i::kPointerSize);
-  JSFunction::SetInitialMap(module_constructor, map, module_proto);
+  JSFunction::SetInitialMap(module_constructor, module_map, module_proto);
+  InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
+              1);
+  InstallFunc(isolate, module_constructor, "exports", WebAssemblyModuleExports,
+              1);
+  InstallFunc(isolate, module_constructor, "customSections",
+              WebAssemblyModuleCustomSections, 2);
   JSObject::AddProperty(module_proto, isolate->factory()->constructor_string(),
                         module_constructor, DONT_ENUM);
+  JSObject::AddProperty(module_proto, factory->to_string_tag_symbol(),
+                        v8_str(isolate, "WebAssembly.Module"), ro_attributes);
 
   // Setup Instance
   Handle<JSFunction> instance_constructor =
-      InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance);
+      InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance, 1);
   context->set_wasm_instance_constructor(*instance_constructor);
+  Handle<JSObject> instance_proto =
+      factory->NewJSObject(instance_constructor, TENURED);
+  i::Handle<i::Map> instance_map = isolate->factory()->NewMap(
+      i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
+                             WasmInstanceObject::kFieldCount * i::kPointerSize);
+  JSFunction::SetInitialMap(instance_constructor, instance_map, instance_proto);
+  JSObject::AddProperty(instance_proto,
+                        isolate->factory()->constructor_string(),
+                        instance_constructor, DONT_ENUM);
+  JSObject::AddProperty(instance_proto, factory->to_string_tag_symbol(),
+                        v8_str(isolate, "WebAssembly.Instance"), ro_attributes);
 
   // Setup Table
   Handle<JSFunction> table_constructor =
-      InstallFunc(isolate, webassembly, "Table", WebAssemblyTable);
+      InstallFunc(isolate, webassembly, "Table", WebAssemblyTable, 1);
   context->set_wasm_table_constructor(*table_constructor);
   Handle<JSObject> table_proto =
       factory->NewJSObject(table_constructor, TENURED);
-  map = isolate->factory()->NewMap(
-      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+  i::Handle<i::Map> table_map = isolate->factory()->NewMap(
+      i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
                              WasmTableObject::kFieldCount * i::kPointerSize);
-  JSFunction::SetInitialMap(table_constructor, map, table_proto);
+  JSFunction::SetInitialMap(table_constructor, table_map, table_proto);
   JSObject::AddProperty(table_proto, isolate->factory()->constructor_string(),
                         table_constructor, DONT_ENUM);
   InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
-  InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow);
-  InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet);
-  InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet);
+  InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1);
+  InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1);
+  InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
+  JSObject::AddProperty(table_proto, factory->to_string_tag_symbol(),
+                        v8_str(isolate, "WebAssembly.Table"), ro_attributes);
 
   // Setup Memory
   Handle<JSFunction> memory_constructor =
-      InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory);
+      InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory, 1);
   context->set_wasm_memory_constructor(*memory_constructor);
   Handle<JSObject> memory_proto =
       factory->NewJSObject(memory_constructor, TENURED);
-  map = isolate->factory()->NewMap(
-      i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+  i::Handle<i::Map> memory_map = isolate->factory()->NewMap(
+      i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
                              WasmMemoryObject::kFieldCount * i::kPointerSize);
-  JSFunction::SetInitialMap(memory_constructor, map, memory_proto);
+  JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto);
   JSObject::AddProperty(memory_proto, isolate->factory()->constructor_string(),
                         memory_constructor, DONT_ENUM);
-  InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow);
+  InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
   InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
+  JSObject::AddProperty(memory_proto, factory->to_string_tag_symbol(),
+                        v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
 
   // Setup errors
-  attributes = static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+  attributes = static_cast<PropertyAttributes>(DONT_ENUM);
   Handle<JSFunction> compile_error(
       isolate->native_context()->wasm_compile_error_function());
   JSObject::AddProperty(webassembly, isolate->factory()->CompileError_string(),
                         compile_error, attributes);
+  Handle<JSFunction> link_error(
+      isolate->native_context()->wasm_link_error_function());
+  JSObject::AddProperty(webassembly, isolate->factory()->LinkError_string(),
+                        link_error, attributes);
   Handle<JSFunction> runtime_error(
       isolate->native_context()->wasm_runtime_error_function());
   JSObject::AddProperty(webassembly, isolate->factory()->RuntimeError_string(),
                         runtime_error, attributes);
 }
 
-void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
-  if (!FLAG_expose_wasm && !FLAG_validate_asm) {
-    return;
-  }
-
-  // Setup wasm function map.
-  Handle<Context> context(global->native_context(), isolate);
-  InstallWasmMapsIfNeeded(isolate, context);
-
-  if (FLAG_expose_wasm) {
-    InstallWasmConstructors(isolate, global, context);
-  }
-}
-
-void WasmJs::InstallWasmMapsIfNeeded(Isolate* isolate,
-                                     Handle<Context> context) {
-  if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
-    // TODO(titzer): Move this to bootstrapper.cc??
-    // TODO(titzer): Also make one for strict mode functions?
-    Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
-
-    InstanceType instance_type = prev_map->instance_type();
-    int internal_fields = JSObject::GetInternalFieldCount(*prev_map);
-    CHECK_EQ(0, internal_fields);
-    int pre_allocated =
-        prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
-    int instance_size = 0;
-    int in_object_properties = 0;
-    int wasm_internal_fields = internal_fields + 1  // module instance object
-                               + 1                  // function arity
-                               + 1;                 // function signature
-    JSFunction::CalculateInstanceSizeHelper(instance_type, wasm_internal_fields,
-                                            0, &instance_size,
-                                            &in_object_properties);
-
-    int unused_property_fields = in_object_properties - pre_allocated;
-    Handle<Map> map = Map::CopyInitialMap(
-        prev_map, instance_size, in_object_properties, unused_property_fields);
-
-    context->set_wasm_function_map(*map);
-  }
-}
-
-static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> symbol) {
-  if (value->IsJSObject()) {
-    i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
-    Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, symbol);
-    if (has_brand.IsNothing()) return false;
-    if (has_brand.ToChecked()) return true;
-  }
-  return false;
-}
-
 bool WasmJs::IsWasmMemoryObject(Isolate* isolate, Handle<Object> value) {
   i::Handle<i::Symbol> symbol(isolate->context()->wasm_memory_sym(), isolate);
   return HasBrand(value, symbol);
diff --git a/src/wasm/wasm-js.h b/src/wasm/wasm-js.h
index f5b9596..05d5ea3 100644
--- a/src/wasm/wasm-js.h
+++ b/src/wasm/wasm-js.h
@@ -13,16 +13,7 @@
 // Exposes a WASM API to JavaScript through the V8 API.
 class WasmJs {
  public:
-  static void Install(Isolate* isolate, Handle<JSGlobalObject> global_object);
-
-  V8_EXPORT_PRIVATE static void InstallWasmModuleSymbolIfNeeded(
-      Isolate* isolate, Handle<JSGlobalObject> global, Handle<Context> context);
-
-  V8_EXPORT_PRIVATE static void InstallWasmMapsIfNeeded(
-      Isolate* isolate, Handle<Context> context);
-  static void InstallWasmConstructors(Isolate* isolate,
-                                      Handle<JSGlobalObject> global,
-                                      Handle<Context> context);
+  V8_EXPORT_PRIVATE static void Install(Isolate* isolate);
 
   // WebAssembly.Table.
   static bool IsWasmTableObject(Isolate* isolate, Handle<Object> value);
diff --git a/src/wasm/wasm-limits.h b/src/wasm/wasm-limits.h
new file mode 100644
index 0000000..bf657a8
--- /dev/null
+++ b/src/wasm/wasm-limits.h
@@ -0,0 +1,47 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_LIMITS_H_
+#define V8_WASM_WASM_LIMITS_H_
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// The following limits are imposed by V8 on WebAssembly modules.
+// The limits are agreed upon with other engines for consistency.
+const size_t kV8MaxWasmTypes = 1000000;
+const size_t kV8MaxWasmFunctions = 1000000;
+const size_t kV8MaxWasmImports = 100000;
+const size_t kV8MaxWasmExports = 100000;
+const size_t kV8MaxWasmGlobals = 1000000;
+const size_t kV8MaxWasmDataSegments = 100000;
+// Don't use this limit directly, but use the value of FLAG_wasm_max_mem_pages.
+const size_t kV8MaxWasmMemoryPages = 16384;  // = 1 GiB
+const size_t kV8MaxWasmStringSize = 100000;
+const size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024;  // = 1 GiB
+const size_t kV8MaxWasmFunctionSize = 128 * 1024;
+const size_t kV8MaxWasmFunctionLocals = 50000;
+const size_t kV8MaxWasmFunctionParams = 1000;
+const size_t kV8MaxWasmFunctionMultiReturns = 1000;
+const size_t kV8MaxWasmFunctionReturns = 1;
+// Don't use this limit directly, but use the value of FLAG_wasm_max_table_size.
+const size_t kV8MaxWasmTableSize = 10000000;
+const size_t kV8MaxWasmTableEntries = 10000000;
+const size_t kV8MaxWasmTables = 1;
+const size_t kV8MaxWasmMemories = 1;
+
+const size_t kSpecMaxWasmMemoryPages = 65536;
+const size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
+
+const uint64_t kWasmMaxHeapOffset =
+    static_cast<uint64_t>(
+        std::numeric_limits<uint32_t>::max())  // maximum base value
+    + std::numeric_limits<uint32_t>::max();    // maximum index value
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_WASM_LIMITS_H_
diff --git a/src/wasm/wasm-macro-gen.h b/src/wasm/wasm-macro-gen.h
index ce2f843..931ad92 100644
--- a/src/wasm/wasm-macro-gen.h
+++ b/src/wasm/wasm-macro-gen.h
@@ -59,6 +59,7 @@
 // Control.
 //------------------------------------------------------------------------------
 #define WASM_NOP kExprNop
+#define WASM_END kExprEnd
 
 #define ARITY_0 0
 #define ARITY_1 1
@@ -71,13 +72,13 @@
 #define WASM_BLOCK(...) kExprBlock, kLocalVoid, __VA_ARGS__, kExprEnd
 
 #define WASM_BLOCK_T(t, ...)                                       \
-  kExprBlock, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t)), \
+  kExprBlock, static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t)), \
       __VA_ARGS__, kExprEnd
 
 #define WASM_BLOCK_TT(t1, t2, ...)                                       \
   kExprBlock, kMultivalBlock, 0,                                         \
-      static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t1)),              \
-      static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t2)), __VA_ARGS__, \
+      static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t1)),              \
+      static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t2)), __VA_ARGS__, \
       kExprEnd
 
 #define WASM_BLOCK_I(...) kExprBlock, kLocalI32, __VA_ARGS__, kExprEnd
@@ -99,13 +100,13 @@
   cond, kExprIf, kLocalVoid, tstmt, kExprElse, fstmt, kExprEnd
 
 #define WASM_IF_ELSE_T(t, cond, tstmt, fstmt)                                \
-  cond, kExprIf, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t)), tstmt, \
+  cond, kExprIf, static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t)), tstmt, \
       kExprElse, fstmt, kExprEnd
 
 #define WASM_IF_ELSE_TT(t1, t2, cond, tstmt, fstmt)                           \
   cond, kExprIf, kMultivalBlock, 0,                                           \
-      static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t1)),                   \
-      static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t2)), tstmt, kExprElse, \
+      static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t1)),                   \
+      static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t2)), tstmt, kExprElse, \
       fstmt, kExprEnd
 
 #define WASM_IF_ELSE_I(cond, tstmt, fstmt) \
@@ -140,9 +141,8 @@
 // Misc expressions.
 //------------------------------------------------------------------------------
 #define WASM_ID(...) __VA_ARGS__
-#define WASM_ZERO kExprI8Const, 0
-#define WASM_ONE kExprI8Const, 1
-#define WASM_I8(val) kExprI8Const, static_cast<byte>(val)
+#define WASM_ZERO kExprI32Const, 0
+#define WASM_ONE kExprI32Const, 1
 
 #define I32V_MIN(length) -(1 << (6 + (7 * ((length) - 1))))
 #define I32V_MAX(length) ((1 << (6 + (7 * ((length) - 1)))) - 1)
@@ -195,7 +195,7 @@
     pos = WriteUint32v(buffer, pos, static_cast<uint32_t>(local_decls.size()));
     for (size_t i = 0; i < local_decls.size(); ++i) {
       pos = WriteUint32v(buffer, pos, local_decls[i].first);
-      buffer[pos++] = WasmOpcodes::LocalTypeCodeFor(local_decls[i].second);
+      buffer[pos++] = WasmOpcodes::ValueTypeCodeFor(local_decls[i].second);
     }
     DCHECK_EQ(Size(), pos);
     return pos;
@@ -203,7 +203,7 @@
 
   // Add locals declarations to this helper. Return the index of the newly added
   // local(s), with an optional adjustment for the parameters.
-  uint32_t AddLocals(uint32_t count, LocalType type) {
+  uint32_t AddLocals(uint32_t count, ValueType type) {
     uint32_t result =
         static_cast<uint32_t>(total + (sig ? sig->parameter_count() : 0));
     total += count;
@@ -211,7 +211,7 @@
       count += local_decls.back().first;
       local_decls.pop_back();
     }
-    local_decls.push_back(std::pair<uint32_t, LocalType>(count, type));
+    local_decls.push_back(std::pair<uint32_t, ValueType>(count, type));
     return result;
   }
 
@@ -227,7 +227,7 @@
 
  private:
   FunctionSig* sig;
-  ZoneVector<std::pair<uint32_t, LocalType>> local_decls;
+  ZoneVector<std::pair<uint32_t, ValueType>> local_decls;
   size_t total;
 
   size_t SizeofUint32v(uint32_t val) const {
@@ -372,15 +372,16 @@
       static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 8),  \
       static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 16), \
       static_cast<byte>(bit_cast<uint32_t>(static_cast<float>(val)) >> 24)
-#define WASM_F64(val)                                        \
-  kExprF64Const, static_cast<byte>(bit_cast<uint64_t>(val)), \
-      static_cast<byte>(bit_cast<uint64_t>(val) >> 8),       \
-      static_cast<byte>(bit_cast<uint64_t>(val) >> 16),      \
-      static_cast<byte>(bit_cast<uint64_t>(val) >> 24),      \
-      static_cast<byte>(bit_cast<uint64_t>(val) >> 32),      \
-      static_cast<byte>(bit_cast<uint64_t>(val) >> 40),      \
-      static_cast<byte>(bit_cast<uint64_t>(val) >> 48),      \
-      static_cast<byte>(bit_cast<uint64_t>(val) >> 56)
+#define WASM_F64(val)                                                        \
+  kExprF64Const,                                                             \
+      static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val))),       \
+      static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 8),  \
+      static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 16), \
+      static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 24), \
+      static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 32), \
+      static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 40), \
+      static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 48), \
+      static_cast<byte>(bit_cast<uint64_t>(static_cast<double>(val)) >> 56)
 #define WASM_GET_LOCAL(index) kExprGetLocal, static_cast<byte>(index)
 #define WASM_SET_LOCAL(index, val) val, kExprSetLocal, static_cast<byte>(index)
 #define WASM_TEE_LOCAL(index, val) val, kExprTeeLocal, static_cast<byte>(index)
@@ -447,15 +448,15 @@
 #define WASM_WHILE(x, y)                                              \
   kExprLoop, kLocalVoid, x, kExprIf, kLocalVoid, y, kExprBr, DEPTH_1, \
       kExprEnd, kExprEnd
-#define WASM_INC_LOCAL(index)                                            \
-  kExprGetLocal, static_cast<byte>(index), kExprI8Const, 1, kExprI32Add, \
+#define WASM_INC_LOCAL(index)                                             \
+  kExprGetLocal, static_cast<byte>(index), kExprI32Const, 1, kExprI32Add, \
       kExprTeeLocal, static_cast<byte>(index)
 #define WASM_INC_LOCAL_BYV(index, count)                    \
-  kExprGetLocal, static_cast<byte>(index), kExprI8Const,    \
+  kExprGetLocal, static_cast<byte>(index), kExprI32Const,   \
       static_cast<byte>(count), kExprI32Add, kExprTeeLocal, \
       static_cast<byte>(index)
 #define WASM_INC_LOCAL_BY(index, count)                     \
-  kExprGetLocal, static_cast<byte>(index), kExprI8Const,    \
+  kExprGetLocal, static_cast<byte>(index), kExprI32Const,   \
       static_cast<byte>(count), kExprI32Add, kExprSetLocal, \
       static_cast<byte>(index)
 #define WASM_UNOP(opcode, x) x, static_cast<byte>(opcode)
@@ -621,14 +622,32 @@
 //------------------------------------------------------------------------------
 // Simd Operations.
 //------------------------------------------------------------------------------
-#define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
-#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
-  x, kSimdPrefix, kExprI32x4ExtractLane & 0xff, static_cast<byte>(lane)
-#define WASM_SIMD_I32x4_ADD(x, y) x, y, kSimdPrefix, kExprI32x4Add & 0xff
-#define WASM_SIMD_F32x4_SPLAT(x) x, kSimdPrefix, kExprF32x4Splat & 0xff
-#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
-  x, kSimdPrefix, kExprF32x4ExtractLane & 0xff, static_cast<byte>(lane)
-#define WASM_SIMD_F32x4_ADD(x, y) x, y, kSimdPrefix, kExprF32x4Add & 0xff
+// TODO(bbudge) Migrate these into tests.
+#define WASM_SIMD_F32x4_SPLAT(x) \
+  x, kSimdPrefix, static_cast<byte>(kExprF32x4Splat)
+#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x)               \
+  x, kSimdPrefix, static_cast<byte>(kExprF32x4ExtractLane), \
+      static_cast<byte>(lane)
+#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y)               \
+  x, y, kSimdPrefix, static_cast<byte>(kExprF32x4ReplaceLane), \
+      static_cast<byte>(lane)
+#define WASM_SIMD_F32x4_ADD(x, y) \
+  x, y, kSimdPrefix, static_cast<byte>(kExprF32x4Add)
+#define WASM_SIMD_F32x4_SUB(x, y) \
+  x, y, kSimdPrefix, static_cast<byte>(kExprF32x4Sub)
+
+#define WASM_SIMD_I32x4_SPLAT(x) \
+  x, kSimdPrefix, static_cast<byte>(kExprI32x4Splat)
+#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x)               \
+  x, kSimdPrefix, static_cast<byte>(kExprI32x4ExtractLane), \
+      static_cast<byte>(lane)
+#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y)               \
+  x, y, kSimdPrefix, static_cast<byte>(kExprI32x4ReplaceLane), \
+      static_cast<byte>(lane)
+#define WASM_SIMD_I32x4_ADD(x, y) \
+  x, y, kSimdPrefix, static_cast<byte>(kExprI32x4Add)
+#define WASM_SIMD_I32x4_SUB(x, y) \
+  x, y, kSimdPrefix, static_cast<byte>(kExprI32x4Sub)
 
 #define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
 #define SIZEOF_SIG_ENTRY_v_v 3
diff --git a/src/wasm/wasm-module-builder.cc b/src/wasm/wasm-module-builder.cc
index 290e98e..a9c724a 100644
--- a/src/wasm/wasm-module-builder.cc
+++ b/src/wasm/wasm-module-builder.cc
@@ -5,11 +5,13 @@
 #include "src/signature.h"
 
 #include "src/handles.h"
+#include "src/objects-inl.h"
 #include "src/v8.h"
 #include "src/zone/zone-containers.h"
 
-#include "src/wasm/ast-decoder.h"
+#include "src/wasm/function-body-decoder.h"
 #include "src/wasm/leb-helper.h"
+#include "src/wasm/module-decoder.h"
 #include "src/wasm/wasm-macro-gen.h"
 #include "src/wasm/wasm-module-builder.h"
 #include "src/wasm/wasm-module.h"
@@ -50,11 +52,10 @@
     : builder_(builder),
       locals_(builder->zone()),
       signature_index_(0),
-      exported_(0),
       func_index_(static_cast<uint32_t>(builder->functions_.size())),
       body_(builder->zone()),
       name_(builder->zone()),
-      exported_name_(builder->zone()),
+      exported_names_(builder->zone()),
       i32_temps_(builder->zone()),
       i64_temps_(builder->zone()),
       f32_temps_(builder->zone()),
@@ -62,13 +63,20 @@
       direct_calls_(builder->zone()),
       asm_offsets_(builder->zone(), 8) {}
 
-void WasmFunctionBuilder::EmitVarInt(uint32_t val) {
-  byte buffer[8];
+void WasmFunctionBuilder::EmitVarInt(int32_t val) {
+  byte buffer[5];
+  byte* ptr = buffer;
+  LEBHelper::write_i32v(&ptr, val);
+  DCHECK_GE(5, ptr - buffer);
+  body_.insert(body_.end(), buffer, ptr);
+}
+
+void WasmFunctionBuilder::EmitVarUint(uint32_t val) {
+  byte buffer[5];
   byte* ptr = buffer;
   LEBHelper::write_u32v(&ptr, val);
-  for (byte* p = buffer; p < ptr; p++) {
-    body_.push_back(*p);
-  }
+  DCHECK_GE(5, ptr - buffer);
+  body_.insert(body_.end(), buffer, ptr);
 }
 
 void WasmFunctionBuilder::SetSignature(FunctionSig* sig) {
@@ -77,21 +85,21 @@
   signature_index_ = builder_->AddSignature(sig);
 }
 
-uint32_t WasmFunctionBuilder::AddLocal(LocalType type) {
+uint32_t WasmFunctionBuilder::AddLocal(ValueType type) {
   DCHECK(locals_.has_sig());
   return locals_.AddLocals(1, type);
 }
 
 void WasmFunctionBuilder::EmitGetLocal(uint32_t local_index) {
-  EmitWithVarInt(kExprGetLocal, local_index);
+  EmitWithVarUint(kExprGetLocal, local_index);
 }
 
 void WasmFunctionBuilder::EmitSetLocal(uint32_t local_index) {
-  EmitWithVarInt(kExprSetLocal, local_index);
+  EmitWithVarUint(kExprSetLocal, local_index);
 }
 
 void WasmFunctionBuilder::EmitTeeLocal(uint32_t local_index) {
-  EmitWithVarInt(kExprTeeLocal, local_index);
+  EmitWithVarUint(kExprTeeLocal, local_index);
 }
 
 void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
@@ -116,20 +124,19 @@
   body_.push_back(imm2);
 }
 
-void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode,
-                                         uint32_t immediate) {
+void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode, int32_t immediate) {
   body_.push_back(static_cast<byte>(opcode));
   EmitVarInt(immediate);
 }
 
+void WasmFunctionBuilder::EmitWithVarUint(WasmOpcode opcode,
+                                          uint32_t immediate) {
+  body_.push_back(static_cast<byte>(opcode));
+  EmitVarUint(immediate);
+}
+
 void WasmFunctionBuilder::EmitI32Const(int32_t value) {
-  // TODO(titzer): variable-length signed and unsigned i32 constants.
-  if (-128 <= value && value <= 127) {
-    EmitWithU8(kExprI8Const, static_cast<byte>(value));
-  } else {
-    byte code[] = {WASM_I32V_5(value)};
-    EmitCode(code, sizeof(code));
-  }
+  EmitWithVarInt(kExprI32Const, value);
 }
 
 void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
@@ -141,12 +148,9 @@
   EmitCode(code, sizeof(code));
 }
 
-void WasmFunctionBuilder::Export() { exported_ = true; }
-
 void WasmFunctionBuilder::ExportAs(Vector<const char> name) {
-  exported_ = true;
-  exported_name_.resize(name.length());
-  memcpy(exported_name_.data(), name.start(), name.length());
+  exported_names_.push_back(ZoneVector<char>(
+      name.start(), name.start() + name.length(), builder_->zone()));
 }
 
 void WasmFunctionBuilder::SetName(Vector<const char> name) {
@@ -154,8 +158,9 @@
   memcpy(name_.data(), name.start(), name.length());
 }
 
-void WasmFunctionBuilder::AddAsmWasmOffset(int asm_position) {
-  // We only want to emit one mapping per byte offset:
+void WasmFunctionBuilder::AddAsmWasmOffset(int call_position,
+                                           int to_number_position) {
+  // We only want to emit one mapping per byte offset.
   DCHECK(asm_offsets_.size() == 0 || body_.size() > last_asm_byte_offset_);
 
   DCHECK_LE(body_.size(), kMaxUInt32);
@@ -163,22 +168,31 @@
   asm_offsets_.write_u32v(byte_offset - last_asm_byte_offset_);
   last_asm_byte_offset_ = byte_offset;
 
-  DCHECK_GE(asm_position, 0);
-  asm_offsets_.write_i32v(asm_position - last_asm_source_position_);
-  last_asm_source_position_ = asm_position;
+  DCHECK_GE(call_position, 0);
+  asm_offsets_.write_i32v(call_position - last_asm_source_position_);
+
+  DCHECK_GE(to_number_position, 0);
+  asm_offsets_.write_i32v(to_number_position - call_position);
+  last_asm_source_position_ = to_number_position;
+}
+
+void WasmFunctionBuilder::SetAsmFunctionStartPosition(int position) {
+  DCHECK_EQ(0, asm_func_start_source_position_);
+  DCHECK_LE(0, position);
+  // Must be called before emitting any asm.js source position.
+  DCHECK_EQ(0, asm_offsets_.size());
+  asm_func_start_source_position_ = position;
+  last_asm_source_position_ = position;
 }
 
 void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
   buffer.write_u32v(signature_index_);
 }
 
-void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer) const {
-  if (exported_) {
-    const ZoneVector<char>* exported_name =
-        exported_name_.size() == 0 ? &name_ : &exported_name_;
-    buffer.write_size(exported_name->size());
-    buffer.write(reinterpret_cast<const byte*>(exported_name->data()),
-                 exported_name->size());
+void WasmFunctionBuilder::WriteExports(ZoneBuffer& buffer) const {
+  for (auto name : exported_names_) {
+    buffer.write_size(name.size());
+    buffer.write(reinterpret_cast<const byte*>(name.data()), name.size());
     buffer.write_u8(kExternalFunction);
     buffer.write_u32v(func_index_ +
                       static_cast<uint32_t>(builder_->imports_.size()));
@@ -204,14 +218,19 @@
 }
 
 void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const {
-  if (asm_offsets_.size() == 0) {
+  if (asm_func_start_source_position_ == 0 && asm_offsets_.size() == 0) {
     buffer.write_size(0);
     return;
   }
-  buffer.write_size(asm_offsets_.size() + kInt32Size);
+  size_t locals_enc_size = LEBHelper::sizeof_u32v(locals_.Size());
+  size_t func_start_size =
+      LEBHelper::sizeof_u32v(asm_func_start_source_position_);
+  buffer.write_size(asm_offsets_.size() + locals_enc_size + func_start_size);
   // Offset of the recorded byte offsets.
   DCHECK_GE(kMaxUInt32, locals_.Size());
-  buffer.write_u32(static_cast<uint32_t>(locals_.Size()));
+  buffer.write_u32v(static_cast<uint32_t>(locals_.Size()));
+  // Start position of the function.
+  buffer.write_u32v(asm_func_start_source_position_);
   buffer.write(asm_offsets_.begin(), asm_offsets_.size());
 }
 
@@ -271,8 +290,15 @@
   }
 }
 
-void WasmModuleBuilder::AddIndirectFunction(uint32_t index) {
-  indirect_functions_.push_back(index);
+uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
+  uint32_t ret = static_cast<uint32_t>(indirect_functions_.size());
+  indirect_functions_.resize(indirect_functions_.size() + count);
+  return ret;
+}
+
+void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
+                                            uint32_t direct) {
+  indirect_functions_[indirect] = direct;
 }
 
 uint32_t WasmModuleBuilder::AddImport(const char* name, int name_length,
@@ -285,7 +311,7 @@
   start_function_index_ = function->func_index();
 }
 
-uint32_t WasmModuleBuilder::AddGlobal(LocalType type, bool exported,
+uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool exported,
                                       bool mutability,
                                       const WasmInitExpr& init) {
   globals_.push_back({type, exported, mutability, init});
@@ -309,11 +335,11 @@
       buffer.write_u8(kWasmFunctionTypeForm);
       buffer.write_size(sig->parameter_count());
       for (size_t j = 0; j < sig->parameter_count(); j++) {
-        buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
+        buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(sig->GetParam(j)));
       }
       buffer.write_size(sig->return_count());
       for (size_t j = 0; j < sig->return_count(); j++) {
-        buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(sig->GetReturn(j)));
+        buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(sig->GetReturn(j)));
       }
     }
     FixupSection(buffer, start);
@@ -324,10 +350,10 @@
     size_t start = EmitSection(kImportSectionCode, buffer);
     buffer.write_size(imports_.size());
     for (auto import : imports_) {
-      buffer.write_u32v(import.name_length);  // module name length
-      buffer.write(reinterpret_cast<const byte*>(import.name),  // module name
+      buffer.write_u32v(0);                   // module name length
+      buffer.write_u32v(import.name_length);  // field name length
+      buffer.write(reinterpret_cast<const byte*>(import.name),  // field name
                    import.name_length);
-      buffer.write_u32v(0);  // field name length
       buffer.write_u8(kExternalFunction);
       buffer.write_u32v(import.sig_index);
     }
@@ -341,7 +367,7 @@
     buffer.write_size(functions_.size());
     for (auto function : functions_) {
       function->WriteSignature(buffer);
-      if (function->exported()) exports++;
+      exports += function->exported_names_.size();
       if (function->name_.size() > 0) has_names = true;
     }
     FixupSection(buffer, start);
@@ -374,29 +400,29 @@
     buffer.write_size(globals_.size());
 
     for (auto global : globals_) {
-      buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(global.type));
+      buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(global.type));
       buffer.write_u8(global.mutability ? 1 : 0);
       switch (global.init.kind) {
         case WasmInitExpr::kI32Const: {
-          DCHECK_EQ(kAstI32, global.type);
+          DCHECK_EQ(kWasmI32, global.type);
           const byte code[] = {WASM_I32V_5(global.init.val.i32_const)};
           buffer.write(code, sizeof(code));
           break;
         }
         case WasmInitExpr::kI64Const: {
-          DCHECK_EQ(kAstI64, global.type);
+          DCHECK_EQ(kWasmI64, global.type);
           const byte code[] = {WASM_I64V_10(global.init.val.i64_const)};
           buffer.write(code, sizeof(code));
           break;
         }
         case WasmInitExpr::kF32Const: {
-          DCHECK_EQ(kAstF32, global.type);
+          DCHECK_EQ(kWasmF32, global.type);
           const byte code[] = {WASM_F32(global.init.val.f32_const)};
           buffer.write(code, sizeof(code));
           break;
         }
         case WasmInitExpr::kF64Const: {
-          DCHECK_EQ(kAstF64, global.type);
+          DCHECK_EQ(kWasmF64, global.type);
           const byte code[] = {WASM_F64(global.init.val.f64_const)};
           buffer.write(code, sizeof(code));
           break;
@@ -410,22 +436,22 @@
         default: {
           // No initializer, emit a default value.
           switch (global.type) {
-            case kAstI32: {
+            case kWasmI32: {
               const byte code[] = {WASM_I32V_1(0)};
               buffer.write(code, sizeof(code));
               break;
             }
-            case kAstI64: {
+            case kWasmI64: {
               const byte code[] = {WASM_I64V_1(0)};
               buffer.write(code, sizeof(code));
               break;
             }
-            case kAstF32: {
+            case kWasmF32: {
               const byte code[] = {WASM_F32(0.0)};
               buffer.write(code, sizeof(code));
               break;
             }
-            case kAstF64: {
+            case kWasmF64: {
               const byte code[] = {WASM_F64(0.0)};
               buffer.write(code, sizeof(code));
               break;
@@ -444,7 +470,7 @@
   if (exports > 0) {
     size_t start = EmitSection(kExportSectionCode, buffer);
     buffer.write_u32v(exports);
-    for (auto function : functions_) function->WriteExport(buffer);
+    for (auto function : functions_) function->WriteExports(buffer);
     FixupSection(buffer, start);
   }
 
@@ -517,10 +543,8 @@
     }
     for (auto function : functions_) {
       buffer.write_size(function->name_.size());
-      if (function->name_.size() > 0) {
-        buffer.write(reinterpret_cast<const byte*>(&function->name_[0]),
-                     function->name_.size());
-      }
+      buffer.write(reinterpret_cast<const byte*>(function->name_.data()),
+                   function->name_.size());
       buffer.write_u8(0);
     }
     FixupSection(buffer, start);
@@ -534,6 +558,8 @@
   for (auto function : functions_) {
     function->WriteAsmWasmOffsetTable(buffer);
   }
+  // Append a 0 to indicate that this is an encoded table.
+  buffer.write_u8(0);
 }
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/wasm-module-builder.h b/src/wasm/wasm-module-builder.h
index d35313e..c6903cd 100644
--- a/src/wasm/wasm-module-builder.h
+++ b/src/wasm/wasm-module-builder.h
@@ -120,8 +120,9 @@
  public:
   // Building methods.
   void SetSignature(FunctionSig* sig);
-  uint32_t AddLocal(LocalType type);
-  void EmitVarInt(uint32_t val);
+  uint32_t AddLocal(ValueType type);
+  void EmitVarInt(int32_t val);
+  void EmitVarUint(uint32_t val);
   void EmitCode(const byte* code, uint32_t code_size);
   void Emit(WasmOpcode opcode);
   void EmitGetLocal(uint32_t index);
@@ -130,19 +131,19 @@
   void EmitI32Const(int32_t val);
   void EmitWithU8(WasmOpcode opcode, const byte immediate);
   void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
-  void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
+  void EmitWithVarInt(WasmOpcode opcode, int32_t immediate);
+  void EmitWithVarUint(WasmOpcode opcode, uint32_t immediate);
   void EmitDirectCallIndex(uint32_t index);
-  void Export();
   void ExportAs(Vector<const char> name);
   void SetName(Vector<const char> name);
-  void AddAsmWasmOffset(int asm_position);
+  void AddAsmWasmOffset(int call_position, int to_number_position);
+  void SetAsmFunctionStartPosition(int position);
 
   void WriteSignature(ZoneBuffer& buffer) const;
-  void WriteExport(ZoneBuffer& buffer) const;
+  void WriteExports(ZoneBuffer& buffer) const;
   void WriteBody(ZoneBuffer& buffer) const;
   void WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const;
 
-  bool exported() { return exported_; }
   uint32_t func_index() { return func_index_; }
   FunctionSig* signature();
 
@@ -159,11 +160,10 @@
   WasmModuleBuilder* builder_;
   LocalDeclEncoder locals_;
   uint32_t signature_index_;
-  bool exported_;
   uint32_t func_index_;
   ZoneVector<uint8_t> body_;
   ZoneVector<char> name_;
-  ZoneVector<char> exported_name_;
+  ZoneVector<ZoneVector<char>> exported_names_;
   ZoneVector<uint32_t> i32_temps_;
   ZoneVector<uint32_t> i64_temps_;
   ZoneVector<uint32_t> f32_temps_;
@@ -174,22 +174,23 @@
   ZoneBuffer asm_offsets_;
   uint32_t last_asm_byte_offset_ = 0;
   uint32_t last_asm_source_position_ = 0;
+  uint32_t asm_func_start_source_position_ = 0;
 };
 
 class WasmTemporary {
  public:
-  WasmTemporary(WasmFunctionBuilder* builder, LocalType type) {
+  WasmTemporary(WasmFunctionBuilder* builder, ValueType type) {
     switch (type) {
-      case kAstI32:
+      case kWasmI32:
         temporary_ = &builder->i32_temps_;
         break;
-      case kAstI64:
+      case kWasmI64:
         temporary_ = &builder->i64_temps_;
         break;
-      case kAstF32:
+      case kWasmF32:
         temporary_ = &builder->f32_temps_;
         break;
-      case kAstF64:
+      case kWasmF64:
         temporary_ = &builder->f64_temps_;
         break;
       default:
@@ -226,11 +227,12 @@
     imports_[index].name_length = name_length;
   }
   WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
-  uint32_t AddGlobal(LocalType type, bool exported, bool mutability = true,
+  uint32_t AddGlobal(ValueType type, bool exported, bool mutability = true,
                      const WasmInitExpr& init = WasmInitExpr());
   void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
   uint32_t AddSignature(FunctionSig* sig);
-  void AddIndirectFunction(uint32_t index);
+  uint32_t AllocateIndirectFunctions(uint32_t count);
+  void SetIndirectFunction(uint32_t indirect, uint32_t direct);
   void MarkStartFunction(WasmFunctionBuilder* builder);
 
   // Writing methods.
@@ -256,7 +258,7 @@
   };
 
   struct WasmGlobal {
-    LocalType type;
+    ValueType type;
     bool exported;
     bool mutability;
     WasmInitExpr init;
diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc
index 79b99fe..c218805 100644
--- a/src/wasm/wasm-module.cc
+++ b/src/wasm/wasm-module.cc
@@ -4,25 +4,28 @@
 
 #include <memory>
 
+#include "src/assembler-inl.h"
+#include "src/base/adapters.h"
 #include "src/base/atomic-utils.h"
 #include "src/code-stubs.h"
-
-#include "src/macro-assembler.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/debug/interface-types.h"
 #include "src/objects.h"
 #include "src/property-descriptor.h"
 #include "src/simulator.h"
 #include "src/snapshot/snapshot.h"
 #include "src/v8.h"
 
-#include "src/wasm/ast-decoder.h"
+#include "src/asmjs/asm-wasm-builder.h"
+#include "src/wasm/function-body-decoder.h"
 #include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-code-specialization.h"
 #include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-limits.h"
 #include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-objects.h"
 #include "src/wasm/wasm-result.h"
 
-#include "src/compiler/wasm-compiler.h"
-
 using namespace v8::internal;
 using namespace v8::internal::wasm;
 namespace base = v8::base;
@@ -40,204 +43,39 @@
 namespace {
 
 static const int kInvalidSigIndex = -1;
-static const int kPlaceholderMarker = 1000000000;
 
 byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
   return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
 }
 
-MaybeHandle<String> ExtractStringFromModuleBytes(
-    Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
-    uint32_t offset, uint32_t size) {
-  // TODO(wasm): cache strings from modules if it's a performance win.
-  Handle<SeqOneByteString> module_bytes = compiled_module->module_bytes();
-  DCHECK_GE(static_cast<size_t>(module_bytes->length()), offset);
-  DCHECK_GE(static_cast<size_t>(module_bytes->length() - offset), size);
-  Address raw = module_bytes->GetCharsAddress() + offset;
-  if (!unibrow::Utf8::Validate(reinterpret_cast<const byte*>(raw), size))
-    return {};  // UTF8 decoding error for name.
-  return isolate->factory()->NewStringFromUtf8SubString(
-      module_bytes, static_cast<int>(offset), static_cast<int>(size));
+static void MemoryFinalizer(const v8::WeakCallbackInfo<void>& data) {
+  DisallowHeapAllocation no_gc;
+  JSArrayBuffer** p = reinterpret_cast<JSArrayBuffer**>(data.GetParameter());
+  JSArrayBuffer* buffer = *p;
+
+  if (!buffer->was_neutered()) {
+    void* memory = buffer->backing_store();
+    DCHECK(memory != nullptr);
+    base::OS::Free(memory,
+                   RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()));
+
+    data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
+        -buffer->byte_length()->Number());
+  }
+
+  GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
 }
 
-void ReplaceReferenceInCode(Handle<Code> code, Handle<Object> old_ref,
-                            Handle<Object> new_ref) {
-  for (RelocIterator it(*code, 1 << RelocInfo::EMBEDDED_OBJECT); !it.done();
-       it.next()) {
-    if (it.rinfo()->target_object() == *old_ref) {
-      it.rinfo()->set_target_object(*new_ref);
-    }
-  }
-}
-
-Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
-  if (size > (WasmModule::kV8MaxPages * WasmModule::kPageSize)) {
-    // TODO(titzer): lift restriction on maximum memory allocated here.
-    return Handle<JSArrayBuffer>::null();
-  }
-  void* memory = isolate->array_buffer_allocator()->Allocate(size);
-  if (memory == nullptr) {
-    return Handle<JSArrayBuffer>::null();
-  }
-
-#if DEBUG
-  // Double check the API allocator actually zero-initialized the memory.
-  const byte* bytes = reinterpret_cast<const byte*>(memory);
-  for (size_t i = 0; i < size; ++i) {
-    DCHECK_EQ(0, bytes[i]);
-  }
+#if V8_TARGET_ARCH_64_BIT
+const bool kGuardRegionsSupported = true;
+#else
+const bool kGuardRegionsSupported = false;
 #endif
 
-  Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
-  JSArrayBuffer::Setup(buffer, isolate, false, memory, static_cast<int>(size));
-  buffer->set_is_neuterable(false);
-  return buffer;
+bool EnableGuardRegions() {
+  return FLAG_wasm_guard_pages && kGuardRegionsSupported;
 }
 
-void RelocateMemoryReferencesInCode(Handle<FixedArray> code_table,
-                                    Address old_start, Address start,
-                                    uint32_t prev_size, uint32_t new_size) {
-  for (int i = 0; i < code_table->length(); ++i) {
-    DCHECK(code_table->get(i)->IsCode());
-    Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
-    AllowDeferredHandleDereference embedding_raw_address;
-    int mask = (1 << RelocInfo::WASM_MEMORY_REFERENCE) |
-               (1 << RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
-    for (RelocIterator it(*code, mask); !it.done(); it.next()) {
-      it.rinfo()->update_wasm_memory_reference(old_start, start, prev_size,
-                                               new_size);
-    }
-  }
-}
-
-void RelocateGlobals(Handle<FixedArray> code_table, Address old_start,
-                     Address globals_start) {
-  for (int i = 0; i < code_table->length(); ++i) {
-    DCHECK(code_table->get(i)->IsCode());
-    Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
-    AllowDeferredHandleDereference embedding_raw_address;
-    int mask = 1 << RelocInfo::WASM_GLOBAL_REFERENCE;
-    for (RelocIterator it(*code, mask); !it.done(); it.next()) {
-      it.rinfo()->update_wasm_global_reference(old_start, globals_start);
-    }
-  }
-}
-
-Handle<Code> CreatePlaceholder(Factory* factory, uint32_t index,
-                               Code::Kind kind) {
-  // Create a placeholder code object and encode the corresponding index in
-  // the {constant_pool_offset} field of the code object.
-  // TODO(titzer): instead of placeholders, use a reloc_info mode.
-  static byte buffer[] = {0, 0, 0, 0};  // fake instructions.
-  static CodeDesc desc = {
-      buffer, arraysize(buffer), arraysize(buffer), 0, 0, nullptr, 0, nullptr};
-  Handle<Code> code = factory->NewCode(desc, Code::KindField::encode(kind),
-                                       Handle<Object>::null());
-  code->set_constant_pool_offset(static_cast<int>(index) + kPlaceholderMarker);
-  return code;
-}
-
-bool LinkFunction(Handle<Code> unlinked,
-                  std::vector<Handle<Code>>& code_table) {
-  bool modified = false;
-  int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
-  AllowDeferredHandleDereference embedding_raw_address;
-  for (RelocIterator it(*unlinked, mode_mask); !it.done(); it.next()) {
-    RelocInfo::Mode mode = it.rinfo()->rmode();
-    if (RelocInfo::IsCodeTarget(mode)) {
-      Code* target =
-          Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
-      if (target->constant_pool_offset() < kPlaceholderMarker) continue;
-      switch (target->kind()) {
-        case Code::WASM_FUNCTION:        // fall through
-        case Code::WASM_TO_JS_FUNCTION:  // fall through
-        case Code::JS_TO_WASM_FUNCTION: {
-          // Patch direct calls to placeholder code objects.
-          uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
-          Handle<Code> new_target = code_table[index];
-          if (target != *new_target) {
-            it.rinfo()->set_target_address(new_target->instruction_start(),
-                                           UPDATE_WRITE_BARRIER,
-                                           SKIP_ICACHE_FLUSH);
-            modified = true;
-          }
-          break;
-        }
-        default:
-          break;
-      }
-    }
-  }
-  return modified;
-}
-
-void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
-  for (int i = 0; i < code_table->length(); ++i) {
-    Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
-    Assembler::FlushICache(isolate, code->instruction_start(),
-                           code->instruction_size());
-  }
-}
-
-// Fetches the compilation unit of a wasm function and executes its parallel
-// phase.
-bool FetchAndExecuteCompilationUnit(
-    Isolate* isolate,
-    std::vector<compiler::WasmCompilationUnit*>* compilation_units,
-    std::queue<compiler::WasmCompilationUnit*>* executed_units,
-    base::Mutex* result_mutex, base::AtomicNumber<size_t>* next_unit) {
-  DisallowHeapAllocation no_allocation;
-  DisallowHandleAllocation no_handles;
-  DisallowHandleDereference no_deref;
-  DisallowCodeDependencyChange no_dependency_change;
-
-  // - 1 because AtomicIncrement returns the value after the atomic increment.
-  size_t index = next_unit->Increment(1) - 1;
-  if (index >= compilation_units->size()) {
-    return false;
-  }
-
-  compiler::WasmCompilationUnit* unit = compilation_units->at(index);
-  if (unit != nullptr) {
-    unit->ExecuteCompilation();
-    base::LockGuard<base::Mutex> guard(result_mutex);
-    executed_units->push(unit);
-  }
-  return true;
-}
-
-class WasmCompilationTask : public CancelableTask {
- public:
-  WasmCompilationTask(
-      Isolate* isolate,
-      std::vector<compiler::WasmCompilationUnit*>* compilation_units,
-      std::queue<compiler::WasmCompilationUnit*>* executed_units,
-      base::Semaphore* on_finished, base::Mutex* result_mutex,
-      base::AtomicNumber<size_t>* next_unit)
-      : CancelableTask(isolate),
-        isolate_(isolate),
-        compilation_units_(compilation_units),
-        executed_units_(executed_units),
-        on_finished_(on_finished),
-        result_mutex_(result_mutex),
-        next_unit_(next_unit) {}
-
-  void RunInternal() override {
-    while (FetchAndExecuteCompilationUnit(isolate_, compilation_units_,
-                                          executed_units_, result_mutex_,
-                                          next_unit_)) {
-    }
-    on_finished_->Signal();
-  }
-
-  Isolate* isolate_;
-  std::vector<compiler::WasmCompilationUnit*>* compilation_units_;
-  std::queue<compiler::WasmCompilationUnit*>* executed_units_;
-  base::Semaphore* on_finished_;
-  base::Mutex* result_mutex_;
-  base::AtomicNumber<size_t>* next_unit_;
-};
-
 static void RecordStats(Isolate* isolate, Code* code) {
   isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
   isolate->counters()->wasm_reloc_size()->Increment(
@@ -251,264 +89,521 @@
   }
 }
 
-Address GetGlobalStartAddressFromCodeTemplate(Object* undefined,
-                                              JSObject* object) {
-  auto instance = WasmInstanceObject::cast(object);
-  Address old_address = nullptr;
-  if (instance->has_globals_buffer()) {
-    old_address =
-        static_cast<Address>(instance->get_globals_buffer()->backing_store());
-  }
-  return old_address;
-}
+void* TryAllocateBackingStore(Isolate* isolate, size_t size,
+                              bool enable_guard_regions, bool& is_external) {
+  is_external = false;
+  // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
+  // systems. It may be safer to fail instead, given that other code might do
+  // things that would be unsafe if they expected guard pages where there
+  // weren't any.
+  if (enable_guard_regions && kGuardRegionsSupported) {
+    // TODO(eholk): On Windows we want to make sure we don't commit the guard
+    // pages yet.
 
-void InitializeParallelCompilation(
-    Isolate* isolate, const std::vector<WasmFunction>& functions,
-    std::vector<compiler::WasmCompilationUnit*>& compilation_units,
-    ModuleEnv& module_env, ErrorThrower* thrower) {
-  for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); ++i) {
-    const WasmFunction* func = &functions[i];
-    compilation_units[i] =
-        func->imported ? nullptr : new compiler::WasmCompilationUnit(
-                                       thrower, isolate, &module_env, func, i);
-  }
-}
+    // We always allocate the largest possible offset into the heap, so the
+    // addressable memory after the guard page can be made inaccessible.
+    const size_t alloc_size =
+        RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
+    DCHECK_EQ(0, size % base::OS::CommitPageSize());
 
-uint32_t* StartCompilationTasks(
-    Isolate* isolate,
-    std::vector<compiler::WasmCompilationUnit*>& compilation_units,
-    std::queue<compiler::WasmCompilationUnit*>& executed_units,
-    base::Semaphore* pending_tasks, base::Mutex& result_mutex,
-    base::AtomicNumber<size_t>& next_unit) {
-  const size_t num_tasks =
-      Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
-          V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
-  uint32_t* task_ids = new uint32_t[num_tasks];
-  for (size_t i = 0; i < num_tasks; ++i) {
-    WasmCompilationTask* task =
-        new WasmCompilationTask(isolate, &compilation_units, &executed_units,
-                                pending_tasks, &result_mutex, &next_unit);
-    task_ids[i] = task->id();
-    V8::GetCurrentPlatform()->CallOnBackgroundThread(
-        task, v8::Platform::kShortRunningTask);
-  }
-  return task_ids;
-}
-
-void WaitForCompilationTasks(Isolate* isolate, uint32_t* task_ids,
-                             base::Semaphore* pending_tasks) {
-  const size_t num_tasks =
-      Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
-          V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
-  for (size_t i = 0; i < num_tasks; ++i) {
-    // If the task has not started yet, then we abort it. Otherwise we wait for
-    // it to finish.
-    if (isolate->cancelable_task_manager()->TryAbort(task_ids[i]) !=
-        CancelableTaskManager::kTaskAborted) {
-      pending_tasks->Wait();
+    // AllocateGuarded makes the whole region inaccessible by default.
+    void* memory = base::OS::AllocateGuarded(alloc_size);
+    if (memory == nullptr) {
+      return nullptr;
     }
+
+    // Make the part we care about accessible.
+    base::OS::Unprotect(memory, size);
+
+    reinterpret_cast<v8::Isolate*>(isolate)
+        ->AdjustAmountOfExternalAllocatedMemory(size);
+
+    is_external = true;
+    return memory;
+  } else {
+    void* memory = isolate->array_buffer_allocator()->Allocate(size);
+    return memory;
   }
 }
 
-void FinishCompilationUnits(
-    std::queue<compiler::WasmCompilationUnit*>& executed_units,
-    std::vector<Handle<Code>>& results, base::Mutex& result_mutex) {
-  while (true) {
-    compiler::WasmCompilationUnit* unit = nullptr;
-    {
-      base::LockGuard<base::Mutex> guard(&result_mutex);
-      if (executed_units.empty()) {
-        break;
-      }
-      unit = executed_units.front();
-      executed_units.pop();
-    }
-    int j = unit->index();
-    results[j] = unit->FinishCompilation();
-    delete unit;
+void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
+  for (int i = 0; i < code_table->length(); ++i) {
+    Handle<Code> code = code_table->GetValueChecked<Code>(isolate, i);
+    Assembler::FlushICache(isolate, code->instruction_start(),
+                           code->instruction_size());
   }
 }
 
-void CompileInParallel(Isolate* isolate, const WasmModule* module,
-                       std::vector<Handle<Code>>& functions,
-                       ErrorThrower* thrower, ModuleEnv* module_env) {
-  // Data structures for the parallel compilation.
-  std::vector<compiler::WasmCompilationUnit*> compilation_units(
-      module->functions.size());
-  std::queue<compiler::WasmCompilationUnit*> executed_units;
+Handle<Script> CreateWasmScript(Isolate* isolate,
+                                const ModuleWireBytes& wire_bytes) {
+  Handle<Script> script =
+      isolate->factory()->NewScript(isolate->factory()->empty_string());
+  FixedArray* array = isolate->native_context()->embedder_data();
+  script->set_context_data(array->get(v8::Context::kDebugIdIndex));
+  script->set_type(Script::TYPE_WASM);
 
-  //-----------------------------------------------------------------------
-  // For parallel compilation:
-  // 1) The main thread allocates a compilation unit for each wasm function
-  //    and stores them in the vector {compilation_units}.
-  // 2) The main thread spawns {WasmCompilationTask} instances which run on
-  //    the background threads.
-  // 3.a) The background threads and the main thread pick one compilation
-  //      unit at a time and execute the parallel phase of the compilation
-  //      unit. After finishing the execution of the parallel phase, the
-  //      result is enqueued in {executed_units}.
-  // 3.b) If {executed_units} contains a compilation unit, the main thread
-  //      dequeues it and finishes the compilation.
-  // 4) After the parallel phase of all compilation units has started, the
-  //    main thread waits for all {WasmCompilationTask} instances to finish.
-  // 5) The main thread finishes the compilation.
+  int hash = StringHasher::HashSequentialString(
+      reinterpret_cast<const char*>(wire_bytes.start()), wire_bytes.length(),
+      kZeroHashSeed);
 
-  // Turn on the {CanonicalHandleScope} so that the background threads can
-  // use the node cache.
-  CanonicalHandleScope canonical(isolate);
+  const int kBufferSize = 32;
+  char buffer[kBufferSize];
+  int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
+  DCHECK(url_chars >= 0 && url_chars < kBufferSize);
+  MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
+      Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
+      TENURED);
+  script->set_source_url(*url_str.ToHandleChecked());
 
-  // 1) The main thread allocates a compilation unit for each wasm function
-  //    and stores them in the vector {compilation_units}.
-  InitializeParallelCompilation(isolate, module->functions, compilation_units,
-                                *module_env, thrower);
+  int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
+  DCHECK(name_chars >= 0 && name_chars < kBufferSize);
+  MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
+      Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
+      TENURED);
+  script->set_name(*name_str.ToHandleChecked());
 
-  // Objects for the synchronization with the background threads.
-  base::Mutex result_mutex;
-  base::AtomicNumber<size_t> next_unit(
-      static_cast<size_t>(FLAG_skip_compiling_wasm_funcs));
-
-  // 2) The main thread spawns {WasmCompilationTask} instances which run on
-  //    the background threads.
-  std::unique_ptr<uint32_t[]> task_ids(StartCompilationTasks(
-      isolate, compilation_units, executed_units, module->pending_tasks.get(),
-      result_mutex, next_unit));
-
-  // 3.a) The background threads and the main thread pick one compilation
-  //      unit at a time and execute the parallel phase of the compilation
-  //      unit. After finishing the execution of the parallel phase, the
-  //      result is enqueued in {executed_units}.
-  while (FetchAndExecuteCompilationUnit(isolate, &compilation_units,
-                                        &executed_units, &result_mutex,
-                                        &next_unit)) {
-    // 3.b) If {executed_units} contains a compilation unit, the main thread
-    //      dequeues it and finishes the compilation unit. Compilation units
-    //      are finished concurrently to the background threads to save
-    //      memory.
-    FinishCompilationUnits(executed_units, functions, result_mutex);
-  }
-  // 4) After the parallel phase of all compilation units has started, the
-  //    main thread waits for all {WasmCompilationTask} instances to finish.
-  WaitForCompilationTasks(isolate, task_ids.get(), module->pending_tasks.get());
-  // Finish the compilation of the remaining compilation units.
-  FinishCompilationUnits(executed_units, functions, result_mutex);
+  return script;
 }
 
-void CompileSequentially(Isolate* isolate, const WasmModule* module,
-                         std::vector<Handle<Code>>& functions,
-                         ErrorThrower* thrower, ModuleEnv* module_env) {
-  DCHECK(!thrower->error());
-
-  for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
-       i < module->functions.size(); ++i) {
-    const WasmFunction& func = module->functions[i];
-    if (func.imported) continue;  // Imports are compiled at instantiation time.
-
-    WasmName str = module->GetName(func.name_offset, func.name_length);
-    Handle<Code> code = Handle<Code>::null();
-    // Compile the function.
-    code = compiler::WasmCompilationUnit::CompileWasmFunction(
-        thrower, isolate, module_env, &func);
-    if (code.is_null()) {
-      thrower->CompileError("Compilation of #%d:%.*s failed.", i, str.length(),
-                            str.start());
-      break;
-    }
-      // Install the code into the linker table.
-    functions[i] = code;
-  }
-}
-
-void PatchDirectCalls(Handle<FixedArray> old_functions,
-                      Handle<FixedArray> new_functions, int start) {
-  DCHECK_EQ(new_functions->length(), old_functions->length());
-
-  DisallowHeapAllocation no_gc;
-  std::map<Code*, Code*> old_to_new_code;
-  for (int i = 0; i < new_functions->length(); ++i) {
-    old_to_new_code.insert(std::make_pair(Code::cast(old_functions->get(i)),
-                                          Code::cast(new_functions->get(i))));
-  }
-  int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
-  AllowDeferredHandleDereference embedding_raw_address;
-  for (int i = start; i < new_functions->length(); ++i) {
-    Code* wasm_function = Code::cast(new_functions->get(i));
-    for (RelocIterator it(wasm_function, mode_mask); !it.done(); it.next()) {
-      Code* old_code =
-          Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
-      if (old_code->kind() == Code::WASM_TO_JS_FUNCTION ||
-          old_code->kind() == Code::WASM_FUNCTION) {
-        auto found = old_to_new_code.find(old_code);
-        DCHECK(found != old_to_new_code.end());
-        Code* new_code = found->second;
-        if (new_code != old_code) {
-          it.rinfo()->set_target_address(new_code->instruction_start(),
-                                         UPDATE_WRITE_BARRIER,
-                                         SKIP_ICACHE_FLUSH);
+class JSToWasmWrapperCache {
+ public:
+  Handle<Code> CloneOrCompileJSToWasmWrapper(Isolate* isolate,
+                                             const wasm::WasmModule* module,
+                                             Handle<Code> wasm_code,
+                                             uint32_t index) {
+    const wasm::WasmFunction* func = &module->functions[index];
+    int cached_idx = sig_map_.Find(func->sig);
+    if (cached_idx >= 0) {
+      Handle<Code> code = isolate->factory()->CopyCode(code_cache_[cached_idx]);
+      // Now patch the call to wasm code.
+      for (RelocIterator it(*code, RelocInfo::kCodeTargetMask);; it.next()) {
+        DCHECK(!it.done());
+        Code* target =
+            Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+        if (target->kind() == Code::WASM_FUNCTION ||
+            target->kind() == Code::WASM_TO_JS_FUNCTION ||
+            target->builtin_index() == Builtins::kIllegal) {
+          it.rinfo()->set_target_address(wasm_code->instruction_start());
+          break;
         }
       }
+      return code;
+    }
+
+    Handle<Code> code =
+        compiler::CompileJSToWasmWrapper(isolate, module, wasm_code, index);
+    uint32_t new_cache_idx = sig_map_.FindOrInsert(func->sig);
+    DCHECK_EQ(code_cache_.size(), new_cache_idx);
+    USE(new_cache_idx);
+    code_cache_.push_back(code);
+    return code;
+  }
+
+ private:
+  // sig_map_ maps signatures to an index in code_cache_.
+  wasm::SignatureMap sig_map_;
+  std::vector<Handle<Code>> code_cache_;
+};
+
+// A helper for compiling an entire module.
+class CompilationHelper {
+ public:
+  CompilationHelper(Isolate* isolate, WasmModule* module)
+      : isolate_(isolate), module_(module) {}
+
+  // The actual runnable task that performs compilations in the background.
+  class CompilationTask : public CancelableTask {
+   public:
+    CompilationHelper* helper_;
+    explicit CompilationTask(CompilationHelper* helper)
+        : CancelableTask(helper->isolate_), helper_(helper) {}
+
+    void RunInternal() override {
+      while (helper_->FetchAndExecuteCompilationUnit()) {
+      }
+      helper_->module_->pending_tasks.get()->Signal();
+    }
+  };
+
+  Isolate* isolate_;
+  WasmModule* module_;
+  std::vector<compiler::WasmCompilationUnit*> compilation_units_;
+  std::queue<compiler::WasmCompilationUnit*> executed_units_;
+  base::Mutex result_mutex_;
+  base::AtomicNumber<size_t> next_unit_;
+
+  // Run by each compilation task and by the main thread.
+  bool FetchAndExecuteCompilationUnit() {
+    DisallowHeapAllocation no_allocation;
+    DisallowHandleAllocation no_handles;
+    DisallowHandleDereference no_deref;
+    DisallowCodeDependencyChange no_dependency_change;
+
+    // - 1 because AtomicIncrement returns the value after the atomic increment.
+    size_t index = next_unit_.Increment(1) - 1;
+    if (index >= compilation_units_.size()) {
+      return false;
+    }
+
+    compiler::WasmCompilationUnit* unit = compilation_units_.at(index);
+    if (unit != nullptr) {
+      unit->ExecuteCompilation();
+      base::LockGuard<base::Mutex> guard(&result_mutex_);
+      executed_units_.push(unit);
+    }
+    return true;
+  }
+
+  void InitializeParallelCompilation(const std::vector<WasmFunction>& functions,
+                                     ModuleBytesEnv& module_env,
+                                     ErrorThrower* thrower) {
+    compilation_units_.reserve(functions.size());
+    for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size();
+         ++i) {
+      const WasmFunction* func = &functions[i];
+      compilation_units_.push_back(
+          func->imported ? nullptr
+                         : new compiler::WasmCompilationUnit(
+                               thrower, isolate_, &module_env, func, i));
     }
   }
+
+  uint32_t* StartCompilationTasks() {
+    const size_t num_tasks =
+        Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+            V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+    uint32_t* task_ids = new uint32_t[num_tasks];
+    for (size_t i = 0; i < num_tasks; ++i) {
+      CompilationTask* task = new CompilationTask(this);
+      task_ids[i] = task->id();
+      V8::GetCurrentPlatform()->CallOnBackgroundThread(
+          task, v8::Platform::kShortRunningTask);
+    }
+    return task_ids;
+  }
+
+  void WaitForCompilationTasks(uint32_t* task_ids) {
+    const size_t num_tasks =
+        Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+            V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
+    for (size_t i = 0; i < num_tasks; ++i) {
+      // If the task has not started yet, then we abort it. Otherwise we wait
+      // for
+      // it to finish.
+      if (isolate_->cancelable_task_manager()->TryAbort(task_ids[i]) !=
+          CancelableTaskManager::kTaskAborted) {
+        module_->pending_tasks.get()->Wait();
+      }
+    }
+  }
+
+  void FinishCompilationUnits(std::vector<Handle<Code>>& results) {
+    while (true) {
+      compiler::WasmCompilationUnit* unit = nullptr;
+      {
+        base::LockGuard<base::Mutex> guard(&result_mutex_);
+        if (executed_units_.empty()) {
+          break;
+        }
+        unit = executed_units_.front();
+        executed_units_.pop();
+      }
+      int j = unit->index();
+      results[j] = unit->FinishCompilation();
+      delete unit;
+    }
+  }
+
+  void CompileInParallel(ModuleBytesEnv* module_env,
+                         std::vector<Handle<Code>>& results,
+                         ErrorThrower* thrower) {
+    const WasmModule* module = module_env->module_env.module;
+    // Data structures for the parallel compilation.
+
+    //-----------------------------------------------------------------------
+    // For parallel compilation:
+    // 1) The main thread allocates a compilation unit for each wasm function
+    //    and stores them in the vector {compilation_units}.
+    // 2) The main thread spawns {CompilationTask} instances which run on
+    //    the background threads.
+    // 3.a) The background threads and the main thread pick one compilation
+    //      unit at a time and execute the parallel phase of the compilation
+    //      unit. After finishing the execution of the parallel phase, the
+    //      result is enqueued in {executed_units}.
+    // 3.b) If {executed_units} contains a compilation unit, the main thread
+    //      dequeues it and finishes the compilation.
+    // 4) After the parallel phase of all compilation units has started, the
+    //    main thread waits for all {CompilationTask} instances to finish.
+    // 5) The main thread finishes the compilation.
+
+    // Turn on the {CanonicalHandleScope} so that the background threads can
+    // use the node cache.
+    CanonicalHandleScope canonical(isolate_);
+
+    // 1) The main thread allocates a compilation unit for each wasm function
+    //    and stores them in the vector {compilation_units}.
+    InitializeParallelCompilation(module->functions, *module_env, thrower);
+
+    // Objects for the synchronization with the background threads.
+    base::AtomicNumber<size_t> next_unit(
+        static_cast<size_t>(FLAG_skip_compiling_wasm_funcs));
+
+    // 2) The main thread spawns {CompilationTask} instances which run on
+    //    the background threads.
+    std::unique_ptr<uint32_t[]> task_ids(StartCompilationTasks());
+
+    // 3.a) The background threads and the main thread pick one compilation
+    //      unit at a time and execute the parallel phase of the compilation
+    //      unit. After finishing the execution of the parallel phase, the
+    //      result is enqueued in {executed_units}.
+    while (FetchAndExecuteCompilationUnit()) {
+      // 3.b) If {executed_units} contains a compilation unit, the main thread
+      //      dequeues it and finishes the compilation unit. Compilation units
+      //      are finished concurrently to the background threads to save
+      //      memory.
+      FinishCompilationUnits(results);
+    }
+    // 4) After the parallel phase of all compilation units has started, the
+    //    main thread waits for all {CompilationTask} instances to finish.
+    WaitForCompilationTasks(task_ids.get());
+    // Finish the compilation of the remaining compilation units.
+    FinishCompilationUnits(results);
+  }
+
+  void CompileSequentially(ModuleBytesEnv* module_env,
+                           std::vector<Handle<Code>>& results,
+                           ErrorThrower* thrower) {
+    DCHECK(!thrower->error());
+
+    const WasmModule* module = module_env->module_env.module;
+    for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
+         i < module->functions.size(); ++i) {
+      const WasmFunction& func = module->functions[i];
+      if (func.imported)
+        continue;  // Imports are compiled at instantiation time.
+
+      Handle<Code> code = Handle<Code>::null();
+      // Compile the function.
+      code = compiler::WasmCompilationUnit::CompileWasmFunction(
+          thrower, isolate_, module_env, &func);
+      if (code.is_null()) {
+        WasmName str = module_env->wire_bytes.GetName(&func);
+        thrower->CompileError("Compilation of #%d:%.*s failed.", i,
+                              str.length(), str.start());
+        break;
+      }
+      results[i] = code;
+    }
+  }
+
+  MaybeHandle<WasmModuleObject> CompileToModuleObject(
+      ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
+      Handle<Script> asm_js_script,
+      Vector<const byte> asm_js_offset_table_bytes) {
+    Factory* factory = isolate_->factory();
+    // The {module_wrapper} will take ownership of the {WasmModule} object,
+    // and it will be destroyed when the GC reclaims the wrapper object.
+    Handle<WasmModuleWrapper> module_wrapper =
+        WasmModuleWrapper::New(isolate_, module_);
+    WasmInstance temp_instance(module_);
+    temp_instance.context = isolate_->native_context();
+    temp_instance.mem_size = WasmModule::kPageSize * module_->min_mem_pages;
+    temp_instance.mem_start = nullptr;
+    temp_instance.globals_start = nullptr;
+
+    // Initialize the indirect tables with placeholders.
+    int function_table_count =
+        static_cast<int>(module_->function_tables.size());
+    Handle<FixedArray> function_tables =
+        factory->NewFixedArray(function_table_count, TENURED);
+    Handle<FixedArray> signature_tables =
+        factory->NewFixedArray(function_table_count, TENURED);
+    for (int i = 0; i < function_table_count; ++i) {
+      temp_instance.function_tables[i] = factory->NewFixedArray(1, TENURED);
+      temp_instance.signature_tables[i] = factory->NewFixedArray(1, TENURED);
+      function_tables->set(i, *temp_instance.function_tables[i]);
+      signature_tables->set(i, *temp_instance.signature_tables[i]);
+    }
+
+    HistogramTimerScope wasm_compile_module_time_scope(
+        isolate_->counters()->wasm_compile_module_time());
+
+    ModuleBytesEnv module_env(module_, &temp_instance, wire_bytes);
+
+    // The {code_table} array contains import wrappers and functions (which
+    // are both included in {functions.size()}, and export wrappers.
+    int code_table_size = static_cast<int>(module_->functions.size() +
+                                           module_->num_exported_functions);
+    Handle<FixedArray> code_table =
+        factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
+
+    // Initialize the code table with the illegal builtin. All call sites will
+    // be
+    // patched at instantiation.
+    Handle<Code> illegal_builtin = isolate_->builtins()->Illegal();
+    for (uint32_t i = 0; i < module_->functions.size(); ++i) {
+      code_table->set(static_cast<int>(i), *illegal_builtin);
+      temp_instance.function_code[i] = illegal_builtin;
+    }
+
+    isolate_->counters()->wasm_functions_per_module()->AddSample(
+        static_cast<int>(module_->functions.size()));
+    CompilationHelper helper(isolate_, module_);
+    if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0) {
+      // Avoid a race condition by collecting results into a second vector.
+      std::vector<Handle<Code>> results(temp_instance.function_code);
+      helper.CompileInParallel(&module_env, results, thrower);
+      temp_instance.function_code.swap(results);
+    } else {
+      helper.CompileSequentially(&module_env, temp_instance.function_code,
+                                 thrower);
+    }
+    if (thrower->error()) return {};
+
+    // At this point, compilation has completed. Update the code table.
+    for (size_t i = FLAG_skip_compiling_wasm_funcs;
+         i < temp_instance.function_code.size(); ++i) {
+      Code* code = *temp_instance.function_code[i];
+      code_table->set(static_cast<int>(i), code);
+      RecordStats(isolate_, code);
+    }
+
+    // Create heap objects for script, module bytes and asm.js offset table to
+    // be
+    // stored in the shared module data.
+    Handle<Script> script;
+    Handle<ByteArray> asm_js_offset_table;
+    if (asm_js_script.is_null()) {
+      script = CreateWasmScript(isolate_, wire_bytes);
+    } else {
+      script = asm_js_script;
+      asm_js_offset_table =
+          isolate_->factory()->NewByteArray(asm_js_offset_table_bytes.length());
+      asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
+                                   asm_js_offset_table_bytes.length());
+    }
+    // TODO(wasm): only save the sections necessary to deserialize a
+    // {WasmModule}. E.g. function bodies could be omitted.
+    Handle<String> module_bytes =
+        factory
+            ->NewStringFromOneByte({wire_bytes.start(), wire_bytes.length()},
+                                   TENURED)
+            .ToHandleChecked();
+    DCHECK(module_bytes->IsSeqOneByteString());
+
+    // Create the shared module data.
+    // TODO(clemensh): For the same module (same bytes / same hash), we should
+    // only have one WasmSharedModuleData. Otherwise, we might only set
+    // breakpoints on a (potentially empty) subset of the instances.
+
+    Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
+        isolate_, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
+        script, asm_js_offset_table);
+
+    // Create the compiled module object, and populate with compiled functions
+    // and information needed at instantiation time. This object needs to be
+    // serializable. Instantiation may occur off a deserialized version of this
+    // object.
+    Handle<WasmCompiledModule> compiled_module =
+        WasmCompiledModule::New(isolate_, shared);
+    compiled_module->set_num_imported_functions(
+        module_->num_imported_functions);
+    compiled_module->set_code_table(code_table);
+    compiled_module->set_min_mem_pages(module_->min_mem_pages);
+    compiled_module->set_max_mem_pages(module_->max_mem_pages);
+    if (function_table_count > 0) {
+      compiled_module->set_function_tables(function_tables);
+      compiled_module->set_signature_tables(signature_tables);
+      compiled_module->set_empty_function_tables(function_tables);
+    }
+
+    // If we created a wasm script, finish it now and make it public to the
+    // debugger.
+    if (asm_js_script.is_null()) {
+      script->set_wasm_compiled_module(*compiled_module);
+      isolate_->debug()->OnAfterCompile(script);
+    }
+
+    // Compile JS->WASM wrappers for exported functions.
+    JSToWasmWrapperCache js_to_wasm_cache;
+    int func_index = 0;
+    for (auto exp : module_->export_table) {
+      if (exp.kind != kExternalFunction) continue;
+      Handle<Code> wasm_code(Code::cast(code_table->get(exp.index)), isolate_);
+      Handle<Code> wrapper_code =
+          js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(isolate_, module_,
+                                                         wasm_code, exp.index);
+      int export_index =
+          static_cast<int>(module_->functions.size() + func_index);
+      code_table->set(export_index, *wrapper_code);
+      RecordStats(isolate_, *wrapper_code);
+      func_index++;
+    }
+
+    return WasmModuleObject::New(isolate_, compiled_module);
 }
+};
 
 static void ResetCompiledModule(Isolate* isolate, WasmInstanceObject* owner,
                                 WasmCompiledModule* compiled_module) {
   TRACE("Resetting %d\n", compiled_module->instance_id());
   Object* undefined = *isolate->factory()->undefined_value();
-  uint32_t old_mem_size = compiled_module->mem_size();
-  uint32_t default_mem_size = compiled_module->default_mem_size();
-  Object* mem_start = compiled_module->ptr_to_memory();
-  Address old_mem_address = nullptr;
-  Address globals_start =
-      GetGlobalStartAddressFromCodeTemplate(undefined, owner);
-
-  // Reset function tables.
-  FixedArray* function_tables = nullptr;
-  FixedArray* empty_function_tables = nullptr;
-  if (compiled_module->has_function_tables()) {
-    function_tables = compiled_module->ptr_to_function_tables();
-    empty_function_tables = compiled_module->ptr_to_empty_function_tables();
-    compiled_module->set_ptr_to_function_tables(empty_function_tables);
-  }
-
-  if (old_mem_size > 0) {
-    CHECK_NE(mem_start, undefined);
-    old_mem_address =
-        static_cast<Address>(JSArrayBuffer::cast(mem_start)->backing_store());
-  }
-  int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::WASM_GLOBAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-
-  // Patch code to update memory references, global references, and function
-  // table references.
   Object* fct_obj = compiled_module->ptr_to_code_table();
-  if (fct_obj != nullptr && fct_obj != undefined &&
-      (old_mem_size > 0 || globals_start != nullptr || function_tables)) {
-    FixedArray* functions = FixedArray::cast(fct_obj);
-    for (int i = 0; i < functions->length(); ++i) {
-      Code* code = Code::cast(functions->get(i));
-      bool changed = false;
-      for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
-        RelocInfo::Mode mode = it.rinfo()->rmode();
-        if (RelocInfo::IsWasmMemoryReference(mode) ||
-            RelocInfo::IsWasmMemorySizeReference(mode)) {
-          it.rinfo()->update_wasm_memory_reference(
-              old_mem_address, nullptr, old_mem_size, default_mem_size);
-          changed = true;
-        } else if (RelocInfo::IsWasmGlobalReference(mode)) {
-          it.rinfo()->update_wasm_global_reference(globals_start, nullptr);
-          changed = true;
-        } else if (RelocInfo::IsEmbeddedObject(mode) && function_tables) {
-          Object* old = it.rinfo()->target_object();
-          for (int j = 0; j < function_tables->length(); ++j) {
-            if (function_tables->get(j) == old) {
-              it.rinfo()->set_target_object(empty_function_tables->get(j));
-              changed = true;
-            }
-          }
-        }
+  if (fct_obj != nullptr && fct_obj != undefined) {
+    uint32_t old_mem_size = compiled_module->mem_size();
+    uint32_t default_mem_size = compiled_module->default_mem_size();
+    Object* mem_start = compiled_module->maybe_ptr_to_memory();
+
+    // Patch code to update memory references, global references, and function
+    // table references.
+    Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+    CodeSpecialization code_specialization(isolate, &specialization_zone);
+
+    if (old_mem_size > 0) {
+      CHECK_NE(mem_start, undefined);
+      Address old_mem_address =
+          static_cast<Address>(JSArrayBuffer::cast(mem_start)->backing_store());
+      code_specialization.RelocateMemoryReferences(
+          old_mem_address, old_mem_size, nullptr, default_mem_size);
+    }
+
+    if (owner->has_globals_buffer()) {
+      Address globals_start =
+          static_cast<Address>(owner->globals_buffer()->backing_store());
+      code_specialization.RelocateGlobals(globals_start, nullptr);
+    }
+
+    // Reset function tables.
+    if (compiled_module->has_function_tables()) {
+      FixedArray* function_tables = compiled_module->ptr_to_function_tables();
+      FixedArray* empty_function_tables =
+          compiled_module->ptr_to_empty_function_tables();
+      DCHECK_EQ(function_tables->length(), empty_function_tables->length());
+      for (int i = 0, e = function_tables->length(); i < e; ++i) {
+        code_specialization.RelocateObject(
+            handle(function_tables->get(i), isolate),
+            handle(empty_function_tables->get(i), isolate));
       }
+      compiled_module->set_ptr_to_function_tables(empty_function_tables);
+    }
+
+    FixedArray* functions = FixedArray::cast(fct_obj);
+    for (int i = compiled_module->num_imported_functions(),
+             end = functions->length();
+         i < end; ++i) {
+      Code* code = Code::cast(functions->get(i));
+      if (code->kind() != Code::WASM_FUNCTION) {
+        // From here on, there should only be wrappers for exported functions.
+        for (; i < end; ++i) {
+          DCHECK_EQ(Code::JS_TO_WASM_FUNCTION,
+                    Code::cast(functions->get(i))->kind());
+        }
+        break;
+      }
+      bool changed =
+          code_specialization.ApplyToWasmCode(code, SKIP_ICACHE_FLUSH);
+      // TODO(wasm): Check if this is faster than passing FLUSH_ICACHE_IF_NEEDED
+      // above.
       if (changed) {
         Assembler::FlushICache(isolate, code->instruction_start(),
                                code->instruction_size());
@@ -518,12 +613,58 @@
   compiled_module->reset_memory();
 }
 
+static void MemoryInstanceFinalizer(Isolate* isolate,
+                                    WasmInstanceObject* instance) {
+  DisallowHeapAllocation no_gc;
+  // If the memory object is destroyed, nothing needs to be done here.
+  if (!instance->has_memory_object()) return;
+  Handle<WasmInstanceWrapper> instance_wrapper =
+      handle(instance->instance_wrapper());
+  DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
+  DCHECK(instance_wrapper->has_instance());
+  bool has_prev = instance_wrapper->has_previous();
+  bool has_next = instance_wrapper->has_next();
+  Handle<WasmMemoryObject> memory_object(instance->memory_object());
+
+  if (!has_prev && !has_next) {
+    memory_object->ResetInstancesLink(isolate);
+    return;
+  } else {
+    Handle<WasmInstanceWrapper> next_wrapper, prev_wrapper;
+    if (!has_prev) {
+      Handle<WasmInstanceWrapper> next_wrapper =
+          instance_wrapper->next_wrapper();
+      next_wrapper->reset_previous_wrapper();
+      // As this is the first link in the memory object, destroying
+      // without updating memory object would corrupt the instance chain in
+      // the memory object.
+      memory_object->set_instances_link(*next_wrapper);
+    } else if (!has_next) {
+      instance_wrapper->previous_wrapper()->reset_next_wrapper();
+    } else {
+      DCHECK(has_next && has_prev);
+      Handle<WasmInstanceWrapper> prev_wrapper =
+          instance_wrapper->previous_wrapper();
+      Handle<WasmInstanceWrapper> next_wrapper =
+          instance_wrapper->next_wrapper();
+      prev_wrapper->set_next_wrapper(*next_wrapper);
+      next_wrapper->set_previous_wrapper(*prev_wrapper);
+    }
+    // Reset to avoid dangling pointers
+    instance_wrapper->reset();
+  }
+}
+
 static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+  DisallowHeapAllocation no_gc;
   JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
   WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
-  WasmCompiledModule* compiled_module = owner->get_compiled_module();
-  TRACE("Finalizing %d {\n", compiled_module->instance_id());
   Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+  // If a link to shared memory instances exists, update the list of memory
+  // instances before the instance is destroyed.
+  if (owner->has_instance_wrapper()) MemoryInstanceFinalizer(isolate, owner);
+  WasmCompiledModule* compiled_module = owner->compiled_module();
+  TRACE("Finalizing %d {\n", compiled_module->instance_id());
   DCHECK(compiled_module->has_weak_wasm_module());
   WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
 
@@ -540,8 +681,8 @@
     TRACE("}\n");
 
     DCHECK(!current_template->has_weak_prev_instance());
-    WeakCell* next = compiled_module->ptr_to_weak_next_instance();
-    WeakCell* prev = compiled_module->ptr_to_weak_prev_instance();
+    WeakCell* next = compiled_module->maybe_ptr_to_weak_next_instance();
+    WeakCell* prev = compiled_module->maybe_ptr_to_weak_prev_instance();
 
     if (current_template == compiled_module) {
       if (next == nullptr) {
@@ -596,40 +737,57 @@
   return {static_cast<int>(func.code_start_offset),
           static_cast<int>(func.code_end_offset - func.code_start_offset)};
 }
-
 }  // namespace
 
-const char* wasm::SectionName(WasmSectionCode code) {
-  switch (code) {
-    case kUnknownSectionCode:
-      return "Unknown";
-    case kTypeSectionCode:
-      return "Type";
-    case kImportSectionCode:
-      return "Import";
-    case kFunctionSectionCode:
-      return "Function";
-    case kTableSectionCode:
-      return "Table";
-    case kMemorySectionCode:
-      return "Memory";
-    case kGlobalSectionCode:
-      return "Global";
-    case kExportSectionCode:
-      return "Export";
-    case kStartSectionCode:
-      return "Start";
-    case kCodeSectionCode:
-      return "Code";
-    case kElementSectionCode:
-      return "Element";
-    case kDataSectionCode:
-      return "Data";
-    case kNameSectionCode:
-      return "Name";
-    default:
-      return "<unknown>";
+Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
+                                       size_t size, bool is_external,
+                                       bool enable_guard_regions) {
+  Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+  JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store,
+                       static_cast<int>(size));
+  buffer->set_is_neuterable(false);
+  buffer->set_has_guard_region(enable_guard_regions);
+
+  if (is_external) {
+    // We mark the buffer as external if we allocated it here with guard
+    // pages. That means we need to arrange for it to be freed.
+
+    // TODO(eholk): Finalizers may not run when the main thread is shutting
+    // down, which means we may leak memory here.
+    Handle<Object> global_handle = isolate->global_handles()->Create(*buffer);
+    GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+                            &MemoryFinalizer, v8::WeakCallbackType::kFinalizer);
   }
+  return buffer;
+}
+
+Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
+                                           bool enable_guard_regions) {
+  if (size > (FLAG_wasm_max_mem_pages * WasmModule::kPageSize)) {
+    // TODO(titzer): lift restriction on maximum memory allocated here.
+    return Handle<JSArrayBuffer>::null();
+  }
+
+  enable_guard_regions = enable_guard_regions && kGuardRegionsSupported;
+
+  bool is_external;  // Set by TryAllocateBackingStore
+  void* memory =
+      TryAllocateBackingStore(isolate, size, enable_guard_regions, is_external);
+
+  if (memory == nullptr) {
+    return Handle<JSArrayBuffer>::null();
+  }
+
+#if DEBUG
+  // Double check the API allocator actually zero-initialized the memory.
+  const byte* bytes = reinterpret_cast<const byte*>(memory);
+  for (size_t i = 0; i < size; ++i) {
+    DCHECK_EQ(0, bytes[i]);
+  }
+#endif
+
+  return SetupArrayBuffer(isolate, memory, size, is_external,
+                          enable_guard_regions);
 }
 
 std::ostream& wasm::operator<<(std::ostream& os, const WasmModule& module) {
@@ -650,15 +808,12 @@
   return os;
 }
 
-std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& pair) {
-  os << "#" << pair.function_->func_index << ":";
-  if (pair.function_->name_offset > 0) {
-    if (pair.module_) {
-      WasmName name = pair.module_->GetName(pair.function_->name_offset,
-                                            pair.function_->name_length);
-      os.write(name.start(), name.length());
-    } else {
-      os << "+" << pair.function_->func_index;
+std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) {
+  os << "#" << name.function_->func_index;
+  if (name.function_->name_offset > 0) {
+    if (name.name_.start()) {
+      os << ":";
+      os.write(name.name_.start(), name.name_.length());
     }
   } else {
     os << "?";
@@ -666,16 +821,19 @@
   return os;
 }
 
-Object* wasm::GetOwningWasmInstance(Code* code) {
-  DCHECK(code->kind() == Code::WASM_FUNCTION);
+WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) {
   DisallowHeapAllocation no_gc;
+  DCHECK(code->kind() == Code::WASM_FUNCTION ||
+         code->kind() == Code::WASM_INTERPRETER_ENTRY);
   FixedArray* deopt_data = code->deoptimization_data();
   DCHECK_NOT_NULL(deopt_data);
-  DCHECK(deopt_data->length() == 2);
+  DCHECK_EQ(code->kind() == Code::WASM_INTERPRETER_ENTRY ? 1 : 2,
+            deopt_data->length());
   Object* weak_link = deopt_data->get(0);
-  if (!weak_link->IsWeakCell()) return nullptr;
+  DCHECK(weak_link->IsWeakCell());
   WeakCell* cell = WeakCell::cast(weak_link);
-  return cell->value();
+  if (cell->cleared()) return nullptr;
+  return WasmInstanceObject::cast(cell->value());
 }
 
 int wasm::GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
@@ -683,168 +841,8 @@
   return GetFunctionOffsetAndLength(compiled_module, func_index).first;
 }
 
-bool wasm::GetPositionInfo(Handle<WasmCompiledModule> compiled_module,
-                           uint32_t position, Script::PositionInfo* info) {
-  std::vector<WasmFunction>& functions = compiled_module->module()->functions;
-
-  // Binary search for a function containing the given position.
-  int left = 0;                                    // inclusive
-  int right = static_cast<int>(functions.size());  // exclusive
-  if (right == 0) return false;
-  while (right - left > 1) {
-    int mid = left + (right - left) / 2;
-    if (functions[mid].code_start_offset <= position) {
-      left = mid;
-    } else {
-      right = mid;
-    }
-  }
-  // If the found entry does not contains the given position, return false.
-  WasmFunction& func = functions[left];
-  if (position < func.code_start_offset || position >= func.code_end_offset) {
-    return false;
-  }
-
-  info->line = left;
-  info->column = position - func.code_start_offset;
-  info->line_start = func.code_start_offset;
-  info->line_end = func.code_end_offset;
-  return true;
-}
-
-WasmModule::WasmModule(Zone* owned, const byte* module_start)
-    : owned_zone(owned),
-      module_start(module_start),
-      pending_tasks(new base::Semaphore(0)) {}
-
-MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
-    Isolate* isolate, Handle<WasmModuleWrapper> module_wrapper,
-    ErrorThrower* thrower) const {
-  Factory* factory = isolate->factory();
-
-  MaybeHandle<WasmCompiledModule> nothing;
-
-  WasmInstance temp_instance(this);
-  temp_instance.context = isolate->native_context();
-  temp_instance.mem_size = WasmModule::kPageSize * this->min_mem_pages;
-  temp_instance.mem_start = nullptr;
-  temp_instance.globals_start = nullptr;
-
-  // Initialize the indirect tables with placeholders.
-  int function_table_count = static_cast<int>(this->function_tables.size());
-  Handle<FixedArray> function_tables =
-      factory->NewFixedArray(function_table_count);
-  for (int i = 0; i < function_table_count; ++i) {
-    temp_instance.function_tables[i] = factory->NewFixedArray(0);
-    function_tables->set(i, *temp_instance.function_tables[i]);
-  }
-
-  HistogramTimerScope wasm_compile_module_time_scope(
-      isolate->counters()->wasm_compile_module_time());
-
-  ModuleEnv module_env;
-  module_env.module = this;
-  module_env.instance = &temp_instance;
-  module_env.origin = origin;
-
-  // The {code_table} array contains import wrappers and functions (which
-  // are both included in {functions.size()}, and export wrappers.
-  int code_table_size =
-      static_cast<int>(functions.size() + num_exported_functions);
-  Handle<FixedArray> code_table =
-      factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
-
-  // Initialize the code table with placeholders.
-  for (uint32_t i = 0; i < functions.size(); ++i) {
-    Code::Kind kind = Code::WASM_FUNCTION;
-    if (i < num_imported_functions) kind = Code::WASM_TO_JS_FUNCTION;
-    Handle<Code> placeholder = CreatePlaceholder(factory, i, kind);
-    code_table->set(static_cast<int>(i), *placeholder);
-    temp_instance.function_code[i] = placeholder;
-  }
-
-  isolate->counters()->wasm_functions_per_module()->AddSample(
-      static_cast<int>(functions.size()));
-  if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0) {
-    // Avoid a race condition by collecting results into a second vector.
-    std::vector<Handle<Code>> results;
-    results.reserve(temp_instance.function_code.size());
-    for (size_t i = 0; i < temp_instance.function_code.size(); ++i) {
-      results.push_back(temp_instance.function_code[i]);
-    }
-    CompileInParallel(isolate, this, results, thrower, &module_env);
-
-    for (size_t i = 0; i < results.size(); ++i) {
-      temp_instance.function_code[i] = results[i];
-    }
-  } else {
-    CompileSequentially(isolate, this, temp_instance.function_code, thrower,
-                        &module_env);
-  }
-  if (thrower->error()) return nothing;
-
-  // At this point, compilation has completed. Update the code table.
-  for (size_t i = FLAG_skip_compiling_wasm_funcs;
-       i < temp_instance.function_code.size(); ++i) {
-    Code* code = *temp_instance.function_code[i];
-    code_table->set(static_cast<int>(i), code);
-  }
-
-  // Link the functions in the module.
-  for (size_t i = FLAG_skip_compiling_wasm_funcs;
-       i < temp_instance.function_code.size(); ++i) {
-    Handle<Code> code = temp_instance.function_code[i];
-    bool modified = LinkFunction(code, temp_instance.function_code);
-    if (modified) {
-      // TODO(mtrofin): do we need to flush the cache here?
-      Assembler::FlushICache(isolate, code->instruction_start(),
-                             code->instruction_size());
-    }
-  }
-
-  // Create the compiled module object, and populate with compiled functions
-  // and information needed at instantiation time. This object needs to be
-  // serializable. Instantiation may occur off a deserialized version of this
-  // object.
-  Handle<WasmCompiledModule> ret =
-      WasmCompiledModule::New(isolate, module_wrapper);
-  ret->set_code_table(code_table);
-  ret->set_min_mem_pages(min_mem_pages);
-  ret->set_max_mem_pages(max_mem_pages);
-  if (function_table_count > 0) {
-    ret->set_function_tables(function_tables);
-    ret->set_empty_function_tables(function_tables);
-  }
-
-  // Compile JS->WASM wrappers for exported functions.
-  int func_index = 0;
-  for (auto exp : export_table) {
-    if (exp.kind != kExternalFunction) continue;
-    Handle<Code> wasm_code =
-        code_table->GetValueChecked<Code>(isolate, exp.index);
-    Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
-        isolate, &module_env, wasm_code, exp.index);
-    int export_index = static_cast<int>(functions.size() + func_index);
-    code_table->set(export_index, *wrapper_code);
-    func_index++;
-  }
-
-  {
-    // TODO(wasm): only save the sections necessary to deserialize a
-    // {WasmModule}. E.g. function bodies could be omitted.
-    size_t module_bytes_len = module_end - module_start;
-    DCHECK_LE(module_bytes_len, static_cast<size_t>(kMaxInt));
-    Vector<const uint8_t> module_bytes_vec(module_start,
-                                           static_cast<int>(module_bytes_len));
-    Handle<String> module_bytes_string =
-        factory->NewStringFromOneByte(module_bytes_vec, TENURED)
-            .ToHandleChecked();
-    DCHECK(module_bytes_string->IsSeqOneByteString());
-    ret->set_module_bytes(Handle<SeqOneByteString>::cast(module_bytes_string));
-  }
-
-  return ret;
-}
+WasmModule::WasmModule(Zone* owned)
+    : owned_zone(owned), pending_tasks(new base::Semaphore(0)) {}
 
 static WasmFunction* GetWasmFunctionForImportWrapper(Isolate* isolate,
                                                      Handle<Object> target) {
@@ -876,7 +874,7 @@
       code = handle(target);
     }
   }
-  DCHECK(found == 1);
+  DCHECK_EQ(1, found);
   return code;
 }
 
@@ -884,8 +882,8 @@
                                          FunctionSig* sig,
                                          Handle<JSReceiver> target,
                                          Handle<String> module_name,
-                                         MaybeHandle<String> import_name) {
-  Handle<Code> code;
+                                         MaybeHandle<String> import_name,
+                                         ModuleOrigin origin) {
   WasmFunction* other_func = GetWasmFunctionForImportWrapper(isolate, target);
   if (other_func) {
     if (sig->Equals(other_func->sig)) {
@@ -898,7 +896,7 @@
   } else {
     // Signature mismatch. Compile a new wrapper for the new signature.
     return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
-                                            module_name, import_name);
+                                            module_name, import_name, origin);
   }
 }
 
@@ -906,11 +904,13 @@
                                          Handle<FixedArray> dispatch_tables,
                                          int index, WasmFunction* function,
                                          Handle<Code> code) {
-  DCHECK_EQ(0, dispatch_tables->length() % 3);
-  for (int i = 0; i < dispatch_tables->length(); i += 3) {
+  DCHECK_EQ(0, dispatch_tables->length() % 4);
+  for (int i = 0; i < dispatch_tables->length(); i += 4) {
     int table_index = Smi::cast(dispatch_tables->get(i + 1))->value();
-    Handle<FixedArray> dispatch_table(
+    Handle<FixedArray> function_table(
         FixedArray::cast(dispatch_tables->get(i + 2)), isolate);
+    Handle<FixedArray> signature_table(
+        FixedArray::cast(dispatch_tables->get(i + 3)), isolate);
     if (function) {
       // TODO(titzer): the signature might need to be copied to avoid
       // a dangling pointer in the signature map.
@@ -919,12 +919,12 @@
       int sig_index = static_cast<int>(
           instance->module()->function_tables[table_index].map.FindOrInsert(
               function->sig));
-      dispatch_table->set(index, Smi::FromInt(sig_index));
-      dispatch_table->set(index + (dispatch_table->length() / 2), *code);
+      signature_table->set(index, Smi::FromInt(sig_index));
+      function_table->set(index, *code);
     } else {
       Code* code = nullptr;
-      dispatch_table->set(index, Smi::FromInt(-1));
-      dispatch_table->set(index + (dispatch_table->length() / 2), code);
+      signature_table->set(index, Smi::FromInt(-1));
+      function_table->set(index, code);
     }
   }
 }
@@ -946,20 +946,31 @@
 // A helper class to simplify instantiating a module from a compiled module.
 // It closes over the {Isolate}, the {ErrorThrower}, the {WasmCompiledModule},
 // etc.
-class WasmInstanceBuilder {
+class InstantiationHelper {
  public:
-  WasmInstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
-                      Handle<JSObject> module_object, Handle<JSReceiver> ffi,
-                      Handle<JSArrayBuffer> memory)
+  InstantiationHelper(Isolate* isolate, ErrorThrower* thrower,
+                      Handle<WasmModuleObject> module_object,
+                      MaybeHandle<JSReceiver> ffi,
+                      MaybeHandle<JSArrayBuffer> memory)
       : isolate_(isolate),
+        module_(module_object->compiled_module()->module()),
         thrower_(thrower),
         module_object_(module_object),
-        ffi_(ffi),
-        memory_(memory) {}
+        ffi_(ffi.is_null() ? Handle<JSReceiver>::null()
+                           : ffi.ToHandleChecked()),
+        memory_(memory.is_null() ? Handle<JSArrayBuffer>::null()
+                                 : memory.ToHandleChecked()) {}
 
   // Build an instance, in all of its glory.
-  MaybeHandle<JSObject> Build() {
-    MaybeHandle<JSObject> nothing;
+  MaybeHandle<WasmInstanceObject> Build() {
+    // Check that an imports argument was provided, if the module requires it.
+    // No point in continuing otherwise.
+    if (!module_->import_table.empty() && ffi_.is_null()) {
+      thrower_->TypeError(
+          "Imports argument must be present and must be an object");
+      return {};
+    }
+
     HistogramTimerScope wasm_instantiate_module_time_scope(
         isolate_->counters()->wasm_instantiate_module_time());
     Factory* factory = isolate_->factory();
@@ -982,8 +993,7 @@
       Handle<WasmCompiledModule> original;
       {
         DisallowHeapAllocation no_gc;
-        original = handle(
-            WasmCompiledModule::cast(module_object_->GetInternalField(0)));
+        original = handle(module_object_->compiled_module());
         if (original->has_weak_owning_instance()) {
           owner = handle(WasmInstanceObject::cast(
               original->weak_owning_instance()->value()));
@@ -1032,14 +1042,14 @@
               compiled_module_->instance_id());
       }
       compiled_module_->set_code_table(code_table);
+      compiled_module_->set_native_context(isolate_->native_context());
     }
-    module_ = reinterpret_cast<WasmModuleWrapper*>(
-                  *compiled_module_->module_wrapper())
-                  ->get();
 
     //--------------------------------------------------------------------------
     // Allocate the instance object.
     //--------------------------------------------------------------------------
+    Zone instantiation_zone(isolate_->allocator(), ZONE_NAME);
+    CodeSpecialization code_specialization(isolate_, &instantiation_zone);
     Handle<WasmInstanceObject> instance =
         WasmInstanceObject::New(isolate_, compiled_module_);
 
@@ -1049,19 +1059,23 @@
     MaybeHandle<JSArrayBuffer> old_globals;
     uint32_t globals_size = module_->globals_size;
     if (globals_size > 0) {
+      const bool enable_guard_regions = false;
       Handle<JSArrayBuffer> global_buffer =
-          NewArrayBuffer(isolate_, globals_size);
+          NewArrayBuffer(isolate_, globals_size, enable_guard_regions);
       globals_ = global_buffer;
       if (globals_.is_null()) {
         thrower_->RangeError("Out of memory: wasm globals");
-        return nothing;
+        return {};
       }
-      Address old_address =
-          owner.is_null() ? nullptr : GetGlobalStartAddressFromCodeTemplate(
-                                          isolate_->heap()->undefined_value(),
-                                          *owner.ToHandleChecked());
-      RelocateGlobals(code_table, old_address,
-                      static_cast<Address>(global_buffer->backing_store()));
+      Address old_globals_start = nullptr;
+      if (!owner.is_null()) {
+        DCHECK(owner.ToHandleChecked()->has_globals_buffer());
+        old_globals_start = static_cast<Address>(
+            owner.ToHandleChecked()->globals_buffer()->backing_store());
+      }
+      Address new_globals_start =
+          static_cast<Address>(global_buffer->backing_store());
+      code_specialization.RelocateGlobals(old_globals_start, new_globals_start);
       instance->set_globals_buffer(*global_buffer);
     }
 
@@ -1072,16 +1086,16 @@
         static_cast<int>(module_->function_tables.size());
     table_instances_.reserve(module_->function_tables.size());
     for (int index = 0; index < function_table_count; ++index) {
-      table_instances_.push_back({Handle<WasmTableObject>::null(),
-                                  Handle<FixedArray>::null(),
-                                  Handle<FixedArray>::null()});
+      table_instances_.push_back(
+          {Handle<WasmTableObject>::null(), Handle<FixedArray>::null(),
+           Handle<FixedArray>::null(), Handle<FixedArray>::null()});
     }
 
     //--------------------------------------------------------------------------
     // Process the imports for the module.
     //--------------------------------------------------------------------------
     int num_imported_functions = ProcessImports(code_table, instance);
-    if (num_imported_functions < 0) return nothing;
+    if (num_imported_functions < 0) return {};
 
     //--------------------------------------------------------------------------
     // Process the initialization for the module's globals.
@@ -1089,6 +1103,12 @@
     InitGlobals();
 
     //--------------------------------------------------------------------------
+    // Set up the indirect function tables for the new instance.
+    //--------------------------------------------------------------------------
+    if (function_table_count > 0)
+      InitializeTables(code_table, instance, &code_specialization);
+
+    //--------------------------------------------------------------------------
     // Set up the memory for the new instance.
     //--------------------------------------------------------------------------
     MaybeHandle<JSArrayBuffer> old_memory;
@@ -1099,11 +1119,45 @@
     if (!memory_.is_null()) {
       // Set externally passed ArrayBuffer non neuterable.
       memory_->set_is_neuterable(false);
+
+      DCHECK_IMPLIES(EnableGuardRegions(), module_->origin == kAsmJsOrigin ||
+                                               memory_->has_guard_region());
     } else if (min_mem_pages > 0) {
       memory_ = AllocateMemory(min_mem_pages);
-      if (memory_.is_null()) return nothing;  // failed to allocate memory
+      if (memory_.is_null()) return {};  // failed to allocate memory
     }
 
+    //--------------------------------------------------------------------------
+    // Check that indirect function table segments are within bounds.
+    //--------------------------------------------------------------------------
+    for (WasmTableInit& table_init : module_->table_inits) {
+      DCHECK(table_init.table_index < table_instances_.size());
+      uint32_t base = EvalUint32InitExpr(table_init.offset);
+      uint32_t table_size =
+          table_instances_[table_init.table_index].function_table->length();
+      if (!in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
+                     table_size)) {
+        thrower_->LinkError("table initializer is out of bounds");
+        return {};
+      }
+    }
+
+    //--------------------------------------------------------------------------
+    // Check that memory segments are within bounds.
+    //--------------------------------------------------------------------------
+    for (WasmDataSegment& seg : module_->data_segments) {
+      uint32_t base = EvalUint32InitExpr(seg.dest_addr);
+      uint32_t mem_size = memory_.is_null()
+          ? 0 : static_cast<uint32_t>(memory_->byte_length()->Number());
+      if (!in_bounds(base, seg.source_size, mem_size)) {
+        thrower_->LinkError("data segment is out of bounds");
+        return {};
+      }
+    }
+
+    //--------------------------------------------------------------------------
+    // Initialize memory.
+    //--------------------------------------------------------------------------
     if (!memory_.is_null()) {
       instance->set_memory_buffer(*memory_);
       Address mem_start = static_cast<Address>(memory_->backing_store());
@@ -1117,11 +1171,13 @@
               ? static_cast<Address>(
                     compiled_module_->memory()->backing_store())
               : nullptr;
-      RelocateMemoryReferencesInCode(code_table, old_mem_start, mem_start,
-                                     old_mem_size, mem_size);
+      // We might get instantiated again with the same memory. No patching
+      // needed in this case.
+      if (old_mem_start != mem_start || old_mem_size != mem_size) {
+        code_specialization.RelocateMemoryReferences(
+            old_mem_start, old_mem_size, mem_start, mem_size);
+      }
       compiled_module_->set_memory(memory_);
-    } else {
-      LoadDataSegments(nullptr, 0);
     }
 
     //--------------------------------------------------------------------------
@@ -1144,21 +1200,59 @@
     //--------------------------------------------------------------------------
     // Set up the exports object for the new instance.
     //--------------------------------------------------------------------------
-    ProcessExports(code_table, instance);
+    ProcessExports(code_table, instance, compiled_module_);
 
     //--------------------------------------------------------------------------
-    // Set up the indirect function tables for the new instance.
+    // Add instance to Memory object
     //--------------------------------------------------------------------------
-    if (function_table_count > 0) InitializeTables(code_table, instance);
+    DCHECK(wasm::IsWasmInstance(*instance));
+    if (instance->has_memory_object()) {
+      instance->memory_object()->AddInstance(isolate_, instance);
+    }
 
-    if (num_imported_functions > 0 || !owner.is_null()) {
-      // If the code was cloned, or new imports were compiled, patch.
-      PatchDirectCalls(old_code_table, code_table, num_imported_functions);
+    //--------------------------------------------------------------------------
+    // Initialize the indirect function tables.
+    //--------------------------------------------------------------------------
+    if (function_table_count > 0) LoadTableSegments(code_table, instance);
+
+    // Patch all code with the relocations registered in code_specialization.
+    {
+      code_specialization.RelocateDirectCalls(instance);
+      code_specialization.ApplyToWholeInstance(*instance, SKIP_ICACHE_FLUSH);
     }
 
     FlushICache(isolate_, code_table);
 
     //--------------------------------------------------------------------------
+    // Unpack and notify signal handler of protected instructions.
+    //--------------------------------------------------------------------------
+    if (FLAG_wasm_trap_handler) {
+      for (int i = 0; i < code_table->length(); ++i) {
+        Handle<Code> code = code_table->GetValueChecked<Code>(isolate_, i);
+
+        if (code->kind() != Code::WASM_FUNCTION) {
+          continue;
+        }
+
+        const intptr_t base = reinterpret_cast<intptr_t>(code->entry());
+
+        Zone zone(isolate_->allocator(), "Wasm Module");
+        ZoneVector<trap_handler::ProtectedInstructionData> unpacked(&zone);
+        const int mode_mask =
+            RelocInfo::ModeMask(RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING);
+        for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+          trap_handler::ProtectedInstructionData data;
+          data.instr_offset = it.rinfo()->data();
+          data.landing_offset =
+              reinterpret_cast<intptr_t>(it.rinfo()->pc()) - base;
+          unpacked.emplace_back(data);
+        }
+        // TODO(eholk): Register the protected instruction information once the
+        // trap handler is in place.
+      }
+    }
+
+    //--------------------------------------------------------------------------
     // Set up and link the new instance.
     //--------------------------------------------------------------------------
     {
@@ -1174,7 +1268,7 @@
         // we want all the publishing to happen free from GC interruptions, and
         // so we do it in
         // one GC-free scope afterwards.
-        original = handle(owner.ToHandleChecked()->get_compiled_module());
+        original = handle(owner.ToHandleChecked()->compiled_module());
         link_to_original = factory->NewWeakCell(original.ToHandleChecked());
       }
       // Publish the new instance to the instances chain.
@@ -1195,29 +1289,27 @@
       }
     }
 
-    DCHECK(wasm::IsWasmInstance(*instance));
-    if (instance->has_memory_object()) {
-      instance->get_memory_object()->AddInstance(*instance);
-    }
+    //--------------------------------------------------------------------------
+    // Set all breakpoints that were set on the shared module.
+    //--------------------------------------------------------------------------
+    WasmSharedModuleData::SetBreakpointsOnNewInstance(
+        compiled_module_->shared(), instance);
 
     //--------------------------------------------------------------------------
     // Run the start function if one was specified.
     //--------------------------------------------------------------------------
     if (module_->start_function_index >= 0) {
       HandleScope scope(isolate_);
-      ModuleEnv module_env;
-      module_env.module = module_;
-      module_env.instance = nullptr;
-      module_env.origin = module_->origin;
       int start_index = module_->start_function_index;
       Handle<Code> startup_code =
           code_table->GetValueChecked<Code>(isolate_, start_index);
       FunctionSig* sig = module_->functions[start_index].sig;
-      Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
-          isolate_, &module_env, startup_code, start_index);
+      Handle<Code> wrapper_code =
+          js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
+              isolate_, module_, startup_code, start_index);
       Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
-          isolate_, instance, factory->InternalizeUtf8String("start"),
-          wrapper_code, static_cast<int>(sig->parameter_count()), start_index);
+          isolate_, instance, MaybeHandle<String>(), start_index,
+          static_cast<int>(sig->parameter_count()), wrapper_code);
       RecordStats(isolate_, *startup_code);
       // Call the JS function.
       Handle<Object> undefined = factory->undefined_value();
@@ -1231,13 +1323,13 @@
         // chain. However, we need to set up everything before executing the
         // start function, such that stack trace information can be generated
         // correctly already in the start function.
-        return nothing;
+        return {};
       }
     }
 
     DCHECK(!isolate_->has_pending_exception());
     TRACE("Finishing instance %d\n", compiled_module_->instance_id());
-    TRACE_CHAIN(WasmCompiledModule::cast(module_object_->GetInternalField(0)));
+    TRACE_CHAIN(module_object_->compiled_module());
     return instance;
   }
 
@@ -1246,72 +1338,65 @@
   struct TableInstance {
     Handle<WasmTableObject> table_object;    // WebAssembly.Table instance
     Handle<FixedArray> js_wrappers;          // JSFunctions exported
-    Handle<FixedArray> dispatch_table;       // internal (code, sig) pairs
+    Handle<FixedArray> function_table;       // internal code array
+    Handle<FixedArray> signature_table;      // internal sig array
   };
 
   Isolate* isolate_;
-  WasmModule* module_;
+  WasmModule* const module_;
   ErrorThrower* thrower_;
-  Handle<JSObject> module_object_;
-  Handle<JSReceiver> ffi_;
-  Handle<JSArrayBuffer> memory_;
+  Handle<WasmModuleObject> module_object_;
+  Handle<JSReceiver> ffi_;        // TODO(titzer): Use MaybeHandle
+  Handle<JSArrayBuffer> memory_;  // TODO(titzer): Use MaybeHandle
   Handle<JSArrayBuffer> globals_;
   Handle<WasmCompiledModule> compiled_module_;
   std::vector<TableInstance> table_instances_;
   std::vector<Handle<JSFunction>> js_wrappers_;
+  JSToWasmWrapperCache js_to_wasm_cache_;
 
-  // Helper routine to print out errors with imports (FFI).
-  MaybeHandle<JSFunction> ReportFFIError(const char* error, uint32_t index,
-                                         Handle<String> module_name,
-                                         MaybeHandle<String> function_name) {
-    Handle<String> function_name_handle;
-    if (function_name.ToHandle(&function_name_handle)) {
-      thrower_->TypeError(
-          "Import #%d module=\"%.*s\" function=\"%.*s\" error: %s", index,
-          module_name->length(), module_name->ToCString().get(),
-          function_name_handle->length(),
-          function_name_handle->ToCString().get(), error);
-    } else {
-      thrower_->TypeError("Import #%d module=\"%.*s\" error: %s", index,
-                          module_name->length(), module_name->ToCString().get(),
-                          error);
-    }
-    thrower_->TypeError("Import ");
-    return MaybeHandle<JSFunction>();
+  // Helper routines to print out errors with imports.
+  void ReportLinkError(const char* error, uint32_t index,
+                       Handle<String> module_name, Handle<String> import_name) {
+    thrower_->LinkError(
+        "Import #%d module=\"%.*s\" function=\"%.*s\" error: %s", index,
+        module_name->length(), module_name->ToCString().get(),
+        import_name->length(), import_name->ToCString().get(), error);
+  }
+
+  MaybeHandle<Object> ReportLinkError(const char* error, uint32_t index,
+                                      Handle<String> module_name) {
+    thrower_->LinkError("Import #%d module=\"%.*s\" error: %s", index,
+                        module_name->length(), module_name->ToCString().get(),
+                        error);
+    return MaybeHandle<Object>();
   }
 
   // Look up an import value in the {ffi_} object.
   MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
-                                   MaybeHandle<String> import_name) {
-    if (ffi_.is_null()) {
-      return ReportFFIError("FFI is not an object", index, module_name,
-                            import_name);
-    }
+                                   Handle<String> import_name) {
+    // We pre-validated in the js-api layer that the ffi object is present, and
+    // a JSObject, if the module has imports.
+    DCHECK(!ffi_.is_null());
 
     // Look up the module first.
-    MaybeHandle<Object> result = Object::GetProperty(ffi_, module_name);
+    MaybeHandle<Object> result =
+        Object::GetPropertyOrElement(ffi_, module_name);
     if (result.is_null()) {
-      return ReportFFIError("module not found", index, module_name,
-                            import_name);
+      return ReportLinkError("module not found", index, module_name);
     }
 
     Handle<Object> module = result.ToHandleChecked();
 
-    if (!import_name.is_null()) {
-      // Look up the value in the module.
-      if (!module->IsJSReceiver()) {
-        return ReportFFIError("module is not an object or function", index,
-                              module_name, import_name);
-      }
+    // Look up the value in the module.
+    if (!module->IsJSReceiver()) {
+      return ReportLinkError("module is not an object or function", index,
+                             module_name);
+    }
 
-      result = Object::GetProperty(module, import_name.ToHandleChecked());
-      if (result.is_null()) {
-        return ReportFFIError("import not found", index, module_name,
-                              import_name);
-      }
-    } else {
-      // No function specified. Use the "default export".
-      result = module;
+    result = Object::GetPropertyOrElement(module, import_name);
+    if (result.is_null()) {
+      ReportLinkError("import not found", index, module_name, import_name);
+      return MaybeHandle<JSFunction>();
     }
 
     return result;
@@ -1331,21 +1416,21 @@
     }
   }
 
+  bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) {
+    return offset + size <= upper && offset + size >= offset;
+  }
+
   // Load data segments into the memory.
   void LoadDataSegments(Address mem_addr, size_t mem_size) {
-    Handle<SeqOneByteString> module_bytes = compiled_module_->module_bytes();
+    Handle<SeqOneByteString> module_bytes(compiled_module_->module_bytes(),
+                                          isolate_);
     for (const WasmDataSegment& segment : module_->data_segments) {
       uint32_t source_size = segment.source_size;
       // Segments of size == 0 are just nops.
       if (source_size == 0) continue;
       uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
-      if (dest_offset >= mem_size || source_size >= mem_size ||
-          dest_offset > (mem_size - source_size)) {
-        thrower_->TypeError("data segment (start = %" PRIu32 ", size = %" PRIu32
-                            ") does not fit into memory (size = %" PRIuS ")",
-                            dest_offset, source_size, mem_size);
-        return;
-      }
+      DCHECK(in_bounds(dest_offset, source_size,
+                       static_cast<uint32_t>(mem_size)));
       byte* dest = mem_addr + dest_offset;
       const byte* src = reinterpret_cast<const byte*>(
           module_bytes->GetCharsAddress() + segment.source_offset);
@@ -1365,17 +1450,17 @@
     TRACE("init [globals+%u] = %lf, type = %s\n", global.offset, num,
           WasmOpcodes::TypeName(global.type));
     switch (global.type) {
-      case kAstI32:
+      case kWasmI32:
         *GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num);
         break;
-      case kAstI64:
+      case kWasmI64:
         // TODO(titzer): initialization of imported i64 globals.
         UNREACHABLE();
         break;
-      case kAstF32:
+      case kWasmF32:
         *GetRawGlobalPtr<float>(global) = static_cast<float>(num);
         break;
-      case kAstF64:
+      case kWasmF64:
         *GetRawGlobalPtr<double>(global) = static_cast<double>(num);
         break;
       default:
@@ -1393,39 +1478,43 @@
     for (int index = 0; index < static_cast<int>(module_->import_table.size());
          ++index) {
       WasmImport& import = module_->import_table[index];
-      Handle<String> module_name =
-          ExtractStringFromModuleBytes(isolate_, compiled_module_,
-                                       import.module_name_offset,
-                                       import.module_name_length)
-              .ToHandleChecked();
-      Handle<String> function_name = Handle<String>::null();
-      if (import.field_name_length > 0) {
-        function_name = ExtractStringFromModuleBytes(isolate_, compiled_module_,
-                                                     import.field_name_offset,
-                                                     import.field_name_length)
-                            .ToHandleChecked();
-      }
+
+      Handle<String> module_name;
+      MaybeHandle<String> maybe_module_name =
+          WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+              isolate_, compiled_module_, import.module_name_offset,
+              import.module_name_length);
+      if (!maybe_module_name.ToHandle(&module_name)) return -1;
+
+      Handle<String> import_name;
+      MaybeHandle<String> maybe_import_name =
+          WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+              isolate_, compiled_module_, import.field_name_offset,
+              import.field_name_length);
+      if (!maybe_import_name.ToHandle(&import_name)) return -1;
 
       MaybeHandle<Object> result =
-          LookupImport(index, module_name, function_name);
+          LookupImport(index, module_name, import_name);
       if (thrower_->error()) return -1;
+      Handle<Object> value = result.ToHandleChecked();
 
       switch (import.kind) {
         case kExternalFunction: {
           // Function imports must be callable.
-          Handle<Object> function = result.ToHandleChecked();
-          if (!function->IsCallable()) {
-            ReportFFIError("function import requires a callable", index,
-                           module_name, function_name);
+          if (!value->IsCallable()) {
+            ReportLinkError("function import requires a callable", index,
+                            module_name, import_name);
             return -1;
           }
 
           Handle<Code> import_wrapper = CompileImportWrapper(
               isolate_, index, module_->functions[import.index].sig,
-              Handle<JSReceiver>::cast(function), module_name, function_name);
+              Handle<JSReceiver>::cast(value), module_name, import_name,
+              module_->origin);
           if (import_wrapper.is_null()) {
-            ReportFFIError("imported function does not match the expected type",
-                           index, module_name, function_name);
+            ReportLinkError(
+                "imported function does not match the expected type", index,
+                module_name, import_name);
             return -1;
           }
           code_table->set(num_imported_functions, *import_wrapper);
@@ -1434,10 +1523,9 @@
           break;
         }
         case kExternalTable: {
-          Handle<Object> value = result.ToHandleChecked();
           if (!WasmJs::IsWasmTableObject(isolate_, value)) {
-            ReportFFIError("table import requires a WebAssembly.Table", index,
-                           module_name, function_name);
+            ReportLinkError("table import requires a WebAssembly.Table", index,
+                            module_name, import_name);
             return -1;
           }
           WasmIndirectFunctionTable& table =
@@ -1445,23 +1533,43 @@
           TableInstance& table_instance = table_instances_[num_imported_tables];
           table_instance.table_object = Handle<WasmTableObject>::cast(value);
           table_instance.js_wrappers = Handle<FixedArray>(
-              table_instance.table_object->get_functions(), isolate_);
+              table_instance.table_object->functions(), isolate_);
 
-          // TODO(titzer): import table size must match exactly for now.
-          int table_size = table_instance.js_wrappers->length();
-          if (table_size != static_cast<int>(table.min_size)) {
-            thrower_->TypeError(
-                "table import %d is wrong size (%d), expected %u", index,
-                table_size, table.min_size);
+          int imported_cur_size = table_instance.js_wrappers->length();
+          if (imported_cur_size < static_cast<int>(table.min_size)) {
+            thrower_->LinkError(
+                "table import %d is smaller than minimum %d, got %u", index,
+                table.min_size, imported_cur_size);
             return -1;
           }
 
-          // Allocate a new dispatch table.
-          table_instance.dispatch_table =
-              isolate_->factory()->NewFixedArray(table_size * 2);
-          for (int i = 0; i < table_size * 2; ++i) {
-            table_instance.dispatch_table->set(i,
-                                               Smi::FromInt(kInvalidSigIndex));
+          if (table.has_max) {
+            int64_t imported_max_size =
+                table_instance.table_object->maximum_length();
+            if (imported_max_size < 0) {
+              thrower_->LinkError(
+                  "table import %d has no maximum length, expected %d", index,
+                  table.max_size);
+              return -1;
+            }
+            if (imported_max_size > table.max_size) {
+              thrower_->LinkError(
+                  "table import %d has maximum larger than maximum %d, "
+                  "got %" PRIx64,
+                  index, table.max_size, imported_max_size);
+              return -1;
+            }
+          }
+
+          // Allocate a new dispatch table and signature table.
+          int table_size = imported_cur_size;
+          table_instance.function_table =
+              isolate_->factory()->NewFixedArray(table_size);
+          table_instance.signature_table =
+              isolate_->factory()->NewFixedArray(table_size);
+          for (int i = 0; i < table_size; ++i) {
+            table_instance.signature_table->set(i,
+                                                Smi::FromInt(kInvalidSigIndex));
           }
           // Initialize the dispatch table with the (foreign) JS functions
           // that are already in the table.
@@ -1471,43 +1579,70 @@
             WasmFunction* function =
                 GetWasmFunctionForImportWrapper(isolate_, val);
             if (function == nullptr) {
-              thrower_->TypeError("table import %d[%d] is not a WASM function",
+              thrower_->LinkError("table import %d[%d] is not a WASM function",
                                   index, i);
               return -1;
             }
             int sig_index = table.map.FindOrInsert(function->sig);
-            table_instance.dispatch_table->set(i, Smi::FromInt(sig_index));
-            table_instance.dispatch_table->set(i + table_size,
-                                               *UnwrapImportWrapper(val));
+            table_instance.signature_table->set(i, Smi::FromInt(sig_index));
+            table_instance.function_table->set(i, *UnwrapImportWrapper(val));
           }
 
           num_imported_tables++;
           break;
         }
         case kExternalMemory: {
-          Handle<Object> object = result.ToHandleChecked();
-          if (!WasmJs::IsWasmMemoryObject(isolate_, object)) {
-            ReportFFIError("memory import must be a WebAssembly.Memory object",
-                           index, module_name, function_name);
+          // Validation should have failed if more than one memory object was
+          // provided.
+          DCHECK(!instance->has_memory_object());
+          if (!WasmJs::IsWasmMemoryObject(isolate_, value)) {
+            ReportLinkError("memory import must be a WebAssembly.Memory object",
+                            index, module_name, import_name);
             return -1;
           }
-          auto memory = Handle<WasmMemoryObject>::cast(object);
+          auto memory = Handle<WasmMemoryObject>::cast(value);
+          DCHECK(WasmJs::IsWasmMemoryObject(isolate_, memory));
           instance->set_memory_object(*memory);
-          memory_ = Handle<JSArrayBuffer>(memory->get_buffer(), isolate_);
+          memory_ = Handle<JSArrayBuffer>(memory->buffer(), isolate_);
+          uint32_t imported_cur_pages = static_cast<uint32_t>(
+              memory_->byte_length()->Number() / WasmModule::kPageSize);
+          if (imported_cur_pages < module_->min_mem_pages) {
+            thrower_->LinkError(
+                "memory import %d is smaller than maximum %u, got %u", index,
+                module_->min_mem_pages, imported_cur_pages);
+          }
+          int32_t imported_max_pages = memory->maximum_pages();
+          if (module_->has_max_mem) {
+            if (imported_max_pages < 0) {
+              thrower_->LinkError(
+                  "memory import %d has no maximum limit, expected at most %u",
+                  index, imported_max_pages);
+              return -1;
+            }
+            if (static_cast<uint32_t>(imported_max_pages) >
+                module_->max_mem_pages) {
+              thrower_->LinkError(
+                  "memory import %d has larger maximum than maximum %u, got %d",
+                  index, module_->max_mem_pages, imported_max_pages);
+              return -1;
+            }
+          }
           break;
         }
         case kExternalGlobal: {
           // Global imports are converted to numbers and written into the
           // {globals_} array buffer.
-          Handle<Object> object = result.ToHandleChecked();
-          MaybeHandle<Object> number = Object::ToNumber(object);
-          if (number.is_null()) {
-            ReportFFIError("global import could not be converted to number",
-                           index, module_name, function_name);
+          if (module_->globals[import.index].type == kWasmI64) {
+            ReportLinkError("global import cannot have type i64", index,
+                            module_name, import_name);
             return -1;
           }
-          Handle<Object> val = number.ToHandleChecked();
-          WriteGlobalValue(module_->globals[import.index], val);
+          if (!value->IsNumber()) {
+            ReportLinkError("global import must be a number", index,
+                            module_name, import_name);
+            return -1;
+          }
+          WriteGlobalValue(module_->globals[import.index], value);
           break;
         }
         default:
@@ -1546,7 +1681,7 @@
               module_->globals[global.init.val.global_index].offset;
           TRACE("init [globals+%u] = [globals+%d]\n", global.offset,
                 old_offset);
-          size_t size = (global.type == kAstI64 || global.type == kAstF64)
+          size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
                             ? sizeof(double)
                             : sizeof(int32_t);
           memcpy(raw_buffer_ptr(globals_, new_offset),
@@ -1565,12 +1700,13 @@
 
   // Allocate memory for a module instance as a new JSArrayBuffer.
   Handle<JSArrayBuffer> AllocateMemory(uint32_t min_mem_pages) {
-    if (min_mem_pages > WasmModule::kV8MaxPages) {
+    if (min_mem_pages > FLAG_wasm_max_mem_pages) {
       thrower_->RangeError("Out of memory: wasm memory too large");
       return Handle<JSArrayBuffer>::null();
     }
-    Handle<JSArrayBuffer> mem_buffer =
-        NewArrayBuffer(isolate_, min_mem_pages * WasmModule::kPageSize);
+    const bool enable_guard_regions = EnableGuardRegions();
+    Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
+        isolate_, min_mem_pages * WasmModule::kPageSize, enable_guard_regions);
 
     if (mem_buffer.is_null()) {
       thrower_->RangeError("Out of memory: wasm memory");
@@ -1578,70 +1714,118 @@
     return mem_buffer;
   }
 
+  bool NeedsWrappers() {
+    if (module_->num_exported_functions > 0) return true;
+    for (auto table_instance : table_instances_) {
+      if (!table_instance.js_wrappers.is_null()) return true;
+    }
+    for (auto table : module_->function_tables) {
+      if (table.exported) return true;
+    }
+    return false;
+  }
+
   // Process the exports, creating wrappers for functions, tables, memories,
   // and globals.
   void ProcessExports(Handle<FixedArray> code_table,
-                      Handle<WasmInstanceObject> instance) {
-    bool needs_wrappers = module_->num_exported_functions > 0;
-    for (auto table_instance : table_instances_) {
-      if (!table_instance.js_wrappers.is_null()) {
-        needs_wrappers = true;
-        break;
-      }
-    }
-    for (auto table : module_->function_tables) {
-      if (table.exported) {
-        needs_wrappers = true;
-        break;
-      }
-    }
-    if (needs_wrappers) {
+                      Handle<WasmInstanceObject> instance,
+                      Handle<WasmCompiledModule> compiled_module) {
+    if (NeedsWrappers()) {
       // Fill the table to cache the exported JSFunction wrappers.
       js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
                           Handle<JSFunction>::null());
     }
 
-    Handle<JSObject> exports_object = instance;
-    if (module_->export_table.size() > 0 && module_->origin == kWasmOrigin) {
+    Handle<JSObject> exports_object;
+    if (module_->origin == kWasmOrigin) {
       // Create the "exports" object.
+      exports_object = isolate_->factory()->NewJSObjectWithNullProto();
+    } else if (module_->origin == kAsmJsOrigin) {
       Handle<JSFunction> object_function = Handle<JSFunction>(
           isolate_->native_context()->object_function(), isolate_);
-      exports_object =
-          isolate_->factory()->NewJSObject(object_function, TENURED);
-      Handle<String> exports_name =
-          isolate_->factory()->InternalizeUtf8String("exports");
-      JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
+      exports_object = isolate_->factory()->NewJSObject(object_function);
+    } else {
+      UNREACHABLE();
     }
+    Handle<String> exports_name =
+        isolate_->factory()->InternalizeUtf8String("exports");
+    JSObject::AddProperty(instance, exports_name, exports_object, NONE);
+
+    Handle<String> foreign_init_name =
+        isolate_->factory()->InternalizeUtf8String(
+            wasm::AsmWasmBuilder::foreign_init_name);
+    Handle<String> single_function_name =
+        isolate_->factory()->InternalizeUtf8String(
+            wasm::AsmWasmBuilder::single_function_name);
 
     PropertyDescriptor desc;
-    desc.set_writable(false);
+    desc.set_writable(module_->origin == kAsmJsOrigin);
+    desc.set_enumerable(true);
 
-    // Process each export in the export table.
+    // Count up export indexes.
     int export_index = 0;
     for (auto exp : module_->export_table) {
+      if (exp.kind == kExternalFunction) {
+        ++export_index;
+      }
+    }
+
+    // Store weak references to all exported functions.
+    Handle<FixedArray> weak_exported_functions;
+    if (compiled_module->has_weak_exported_functions()) {
+      weak_exported_functions = compiled_module->weak_exported_functions();
+    } else {
+      weak_exported_functions =
+          isolate_->factory()->NewFixedArray(export_index);
+      compiled_module->set_weak_exported_functions(weak_exported_functions);
+    }
+    DCHECK_EQ(export_index, weak_exported_functions->length());
+
+    // Process each export in the export table (go in reverse so asm.js
+    // can skip duplicates).
+    for (auto exp : base::Reversed(module_->export_table)) {
       Handle<String> name =
-          ExtractStringFromModuleBytes(isolate_, compiled_module_,
-                                       exp.name_offset, exp.name_length)
+          WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+              isolate_, compiled_module_, exp.name_offset, exp.name_length)
               .ToHandleChecked();
+      Handle<JSObject> export_to;
+      if (module_->origin == kAsmJsOrigin && exp.kind == kExternalFunction &&
+          (String::Equals(name, foreign_init_name) ||
+           String::Equals(name, single_function_name))) {
+        export_to = instance;
+      } else {
+        export_to = exports_object;
+      }
+
       switch (exp.kind) {
         case kExternalFunction: {
           // Wrap and export the code as a JSFunction.
           WasmFunction& function = module_->functions[exp.index];
           int func_index =
-              static_cast<int>(module_->functions.size() + export_index);
+              static_cast<int>(module_->functions.size() + --export_index);
           Handle<JSFunction> js_function = js_wrappers_[exp.index];
           if (js_function.is_null()) {
             // Wrap the exported code as a JSFunction.
             Handle<Code> export_code =
                 code_table->GetValueChecked<Code>(isolate_, func_index);
+            MaybeHandle<String> func_name;
+            if (module_->origin == kAsmJsOrigin) {
+              // For modules arising from asm.js, honor the names section.
+              func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+                              isolate_, compiled_module_, function.name_offset,
+                              function.name_length)
+                              .ToHandleChecked();
+            }
             js_function = WasmExportedFunction::New(
-                isolate_, instance, name, export_code,
-                static_cast<int>(function.sig->parameter_count()),
-                function.func_index);
+                isolate_, instance, func_name, function.func_index,
+                static_cast<int>(function.sig->parameter_count()), export_code);
             js_wrappers_[exp.index] = js_function;
           }
           desc.set_value(js_function);
-          export_index++;
+          Handle<WeakCell> weak_export =
+              isolate_->factory()->NewWeakCell(js_function);
+          DCHECK_GT(weak_exported_functions->length(), export_index);
+          weak_exported_functions->set(export_index, *weak_export);
           break;
         }
         case kExternalTable: {
@@ -1651,7 +1835,7 @@
               module_->function_tables[exp.index];
           if (table_instance.table_object.is_null()) {
             uint32_t maximum =
-                table.has_max ? table.max_size : WasmModule::kV8MaxTableSize;
+                table.has_max ? table.max_size : FLAG_wasm_max_table_size;
             table_instance.table_object = WasmTableObject::New(
                 isolate_, table.min_size, maximum, &table_instance.js_wrappers);
           }
@@ -1663,15 +1847,16 @@
           Handle<WasmMemoryObject> memory_object;
           if (!instance->has_memory_object()) {
             // If there was no imported WebAssembly.Memory object, create one.
-            Handle<JSArrayBuffer> buffer(instance->get_memory_buffer(),
-                                         isolate_);
+            Handle<JSArrayBuffer> buffer(instance->memory_buffer(), isolate_);
             memory_object = WasmMemoryObject::New(
                 isolate_, buffer,
                 (module_->max_mem_pages != 0) ? module_->max_mem_pages : -1);
             instance->set_memory_object(*memory_object);
           } else {
-            memory_object = Handle<WasmMemoryObject>(
-                instance->get_memory_object(), isolate_);
+            memory_object =
+                Handle<WasmMemoryObject>(instance->memory_object(), isolate_);
+            DCHECK(WasmJs::IsWasmMemoryObject(isolate_, memory_object));
+            memory_object->ResetInstancesLink(isolate_);
           }
 
           desc.set_value(memory_object);
@@ -1682,15 +1867,19 @@
           WasmGlobal& global = module_->globals[exp.index];
           double num = 0;
           switch (global.type) {
-            case kAstI32:
+            case kWasmI32:
               num = *GetRawGlobalPtr<int32_t>(global);
               break;
-            case kAstF32:
+            case kWasmF32:
               num = *GetRawGlobalPtr<float>(global);
               break;
-            case kAstF64:
+            case kWasmF64:
               num = *GetRawGlobalPtr<double>(global);
               break;
+            case kWasmI64:
+              thrower_->LinkError(
+                  "export of globals of type I64 is not allowed.");
+              break;
             default:
               UNREACHABLE();
           }
@@ -1702,42 +1891,99 @@
           break;
       }
 
+      // Skip duplicates for asm.js.
+      if (module_->origin == kAsmJsOrigin) {
+        v8::Maybe<bool> status = JSReceiver::HasOwnProperty(export_to, name);
+        if (status.FromMaybe(false)) {
+          continue;
+        }
+      }
       v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
-          isolate_, exports_object, name, &desc, Object::THROW_ON_ERROR);
+          isolate_, export_to, name, &desc, Object::THROW_ON_ERROR);
       if (!status.IsJust()) {
-        thrower_->TypeError("export of %.*s failed.", name->length(),
+        thrower_->LinkError("export of %.*s failed.", name->length(),
                             name->ToCString().get());
         return;
       }
     }
+
+    if (module_->origin == kWasmOrigin) {
+      v8::Maybe<bool> success = JSReceiver::SetIntegrityLevel(
+          exports_object, FROZEN, Object::DONT_THROW);
+      DCHECK(success.FromMaybe(false));
+      USE(success);
+    }
   }
 
   void InitializeTables(Handle<FixedArray> code_table,
-                        Handle<WasmInstanceObject> instance) {
-    Handle<FixedArray> old_function_tables =
-        compiled_module_->function_tables();
+                        Handle<WasmInstanceObject> instance,
+                        CodeSpecialization* code_specialization) {
     int function_table_count =
         static_cast<int>(module_->function_tables.size());
     Handle<FixedArray> new_function_tables =
         isolate_->factory()->NewFixedArray(function_table_count);
+    Handle<FixedArray> new_signature_tables =
+        isolate_->factory()->NewFixedArray(function_table_count);
     for (int index = 0; index < function_table_count; ++index) {
       WasmIndirectFunctionTable& table = module_->function_tables[index];
       TableInstance& table_instance = table_instances_[index];
       int table_size = static_cast<int>(table.min_size);
 
-      if (table_instance.dispatch_table.is_null()) {
+      if (table_instance.function_table.is_null()) {
         // Create a new dispatch table if necessary.
-        table_instance.dispatch_table =
-            isolate_->factory()->NewFixedArray(table_size * 2);
+        table_instance.function_table =
+            isolate_->factory()->NewFixedArray(table_size);
+        table_instance.signature_table =
+            isolate_->factory()->NewFixedArray(table_size);
         for (int i = 0; i < table_size; ++i) {
           // Fill the table with invalid signature indexes so that
           // uninitialized entries will always fail the signature check.
-          table_instance.dispatch_table->set(i, Smi::FromInt(kInvalidSigIndex));
+          table_instance.signature_table->set(i,
+                                              Smi::FromInt(kInvalidSigIndex));
+        }
+      } else {
+        // Table is imported, patch table bounds check
+        DCHECK(table_size <= table_instance.function_table->length());
+        if (table_size < table_instance.function_table->length()) {
+          code_specialization->PatchTableSize(
+              table_size, table_instance.function_table->length());
         }
       }
 
       new_function_tables->set(static_cast<int>(index),
-                               *table_instance.dispatch_table);
+                               *table_instance.function_table);
+      new_signature_tables->set(static_cast<int>(index),
+                                *table_instance.signature_table);
+    }
+
+    FixedArray* old_function_tables =
+        compiled_module_->ptr_to_function_tables();
+    DCHECK_EQ(old_function_tables->length(), new_function_tables->length());
+    for (int i = 0, e = new_function_tables->length(); i < e; ++i) {
+      code_specialization->RelocateObject(
+          handle(old_function_tables->get(i), isolate_),
+          handle(new_function_tables->get(i), isolate_));
+    }
+    FixedArray* old_signature_tables =
+        compiled_module_->ptr_to_signature_tables();
+    DCHECK_EQ(old_signature_tables->length(), new_signature_tables->length());
+    for (int i = 0, e = new_signature_tables->length(); i < e; ++i) {
+      code_specialization->RelocateObject(
+          handle(old_signature_tables->get(i), isolate_),
+          handle(new_signature_tables->get(i), isolate_));
+    }
+
+    compiled_module_->set_function_tables(new_function_tables);
+    compiled_module_->set_signature_tables(new_signature_tables);
+  }
+
+  void LoadTableSegments(Handle<FixedArray> code_table,
+                         Handle<WasmInstanceObject> instance) {
+    int function_table_count =
+        static_cast<int>(module_->function_tables.size());
+    for (int index = 0; index < function_table_count; ++index) {
+      WasmIndirectFunctionTable& table = module_->function_tables[index];
+      TableInstance& table_instance = table_instances_[index];
 
       Handle<FixedArray> all_dispatch_tables;
       if (!table_instance.table_object.is_null()) {
@@ -1745,28 +1991,24 @@
         all_dispatch_tables = WasmTableObject::AddDispatchTable(
             isolate_, table_instance.table_object,
             Handle<WasmInstanceObject>::null(), index,
-            Handle<FixedArray>::null());
+            Handle<FixedArray>::null(), Handle<FixedArray>::null());
       }
 
       // TODO(titzer): this does redundant work if there are multiple tables,
       // since initializations are not sorted by table index.
       for (auto table_init : module_->table_inits) {
         uint32_t base = EvalUint32InitExpr(table_init.offset);
-        if (base > static_cast<uint32_t>(table_size) ||
-            (base + table_init.entries.size() >
-             static_cast<uint32_t>(table_size))) {
-          thrower_->CompileError("table initializer is out of bounds");
-          continue;
-        }
+        DCHECK(in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
+                         table_instance.function_table->length()));
         for (int i = 0; i < static_cast<int>(table_init.entries.size()); ++i) {
           uint32_t func_index = table_init.entries[i];
           WasmFunction* function = &module_->functions[func_index];
           int table_index = static_cast<int>(i + base);
           int32_t sig_index = table.map.Find(function->sig);
           DCHECK_GE(sig_index, 0);
-          table_instance.dispatch_table->set(table_index,
-                                             Smi::FromInt(sig_index));
-          table_instance.dispatch_table->set(table_index + table_size,
+          table_instance.signature_table->set(table_index,
+                                              Smi::FromInt(sig_index));
+          table_instance.function_table->set(table_index,
                                              code_table->get(func_index));
 
           if (!all_dispatch_tables.is_null()) {
@@ -1777,25 +2019,24 @@
               // TODO(titzer): We compile JS->WASM wrappers for functions are
               // not exported but are in an exported table. This should be done
               // at module compile time and cached instead.
-              WasmInstance temp_instance(module_);
-              temp_instance.context = isolate_->native_context();
-              temp_instance.mem_size = 0;
-              temp_instance.mem_start = nullptr;
-              temp_instance.globals_start = nullptr;
 
-              ModuleEnv module_env;
-              module_env.module = module_;
-              module_env.instance = &temp_instance;
-              module_env.origin = module_->origin;
-
-              Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
-                  isolate_, &module_env, wasm_code, func_index);
+              Handle<Code> wrapper_code =
+                  js_to_wasm_cache_.CloneOrCompileJSToWasmWrapper(
+                      isolate_, module_, wasm_code, func_index);
+              MaybeHandle<String> func_name;
+              if (module_->origin == kAsmJsOrigin) {
+                // For modules arising from asm.js, honor the names section.
+                func_name =
+                    WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+                        isolate_, compiled_module_, function->name_offset,
+                        function->name_length)
+                        .ToHandleChecked();
+              }
               Handle<WasmExportedFunction> js_function =
                   WasmExportedFunction::New(
-                      isolate_, instance, isolate_->factory()->empty_string(),
-                      wrapper_code,
+                      isolate_, instance, func_name, func_index,
                       static_cast<int>(function->sig->parameter_count()),
-                      func_index);
+                      wrapper_code);
               js_wrappers_[func_index] = js_function;
             }
             table_instance.js_wrappers->set(table_index,
@@ -1814,210 +2055,46 @@
         // Add the new dispatch table to the WebAssembly.Table object.
         all_dispatch_tables = WasmTableObject::AddDispatchTable(
             isolate_, table_instance.table_object, instance, index,
-            table_instance.dispatch_table);
+            table_instance.function_table, table_instance.signature_table);
       }
     }
-    // Patch all code that has references to the old indirect tables.
-    for (int i = 0; i < code_table->length(); ++i) {
-      if (!code_table->get(i)->IsCode()) continue;
-      Handle<Code> code(Code::cast(code_table->get(i)), isolate_);
-      for (int j = 0; j < function_table_count; ++j) {
-        ReplaceReferenceInCode(
-            code, Handle<Object>(old_function_tables->get(j), isolate_),
-            Handle<Object>(new_function_tables->get(j), isolate_));
-      }
-    }
-    compiled_module_->set_function_tables(new_function_tables);
   }
 };
 
-// Instantiates a WASM module, creating a WebAssembly.Instance from a
-// WebAssembly.Module.
-MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
-                                              ErrorThrower* thrower,
-                                              Handle<JSObject> wasm_module,
-                                              Handle<JSReceiver> ffi,
-                                              Handle<JSArrayBuffer> memory) {
-  WasmInstanceBuilder builder(isolate, thrower, wasm_module, ffi, memory);
-  return builder.Build();
-}
-
-Handle<String> wasm::GetWasmFunctionName(Isolate* isolate,
-                                         Handle<Object> instance_or_undef,
-                                         uint32_t func_index) {
-  if (!instance_or_undef->IsUndefined(isolate)) {
-    Handle<WasmCompiledModule> compiled_module(
-        Handle<WasmInstanceObject>::cast(instance_or_undef)
-            ->get_compiled_module());
-    MaybeHandle<String> maybe_name =
-        WasmCompiledModule::GetFunctionName(compiled_module, func_index);
-    if (!maybe_name.is_null()) return maybe_name.ToHandleChecked();
-  }
-  return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
-}
-
 bool wasm::IsWasmInstance(Object* object) {
   return WasmInstanceObject::IsWasmInstanceObject(object);
 }
 
-WasmCompiledModule* wasm::GetCompiledModule(Object* object) {
-  return WasmInstanceObject::cast(object)->get_compiled_module();
-}
-
-bool wasm::WasmIsAsmJs(Object* instance, Isolate* isolate) {
-  if (instance->IsUndefined(isolate)) return false;
-  DCHECK(IsWasmInstance(instance));
-  WasmCompiledModule* compiled_module =
-      GetCompiledModule(JSObject::cast(instance));
-  DCHECK_EQ(compiled_module->has_asm_js_offset_tables(),
-            compiled_module->script()->type() == Script::TYPE_NORMAL);
-  return compiled_module->has_asm_js_offset_tables();
-}
-
 Handle<Script> wasm::GetScript(Handle<JSObject> instance) {
-  DCHECK(IsWasmInstance(*instance));
-  WasmCompiledModule* compiled_module = GetCompiledModule(*instance);
-  DCHECK(compiled_module->has_script());
-  return compiled_module->script();
+  WasmCompiledModule* compiled_module =
+      WasmInstanceObject::cast(*instance)->compiled_module();
+  return handle(compiled_module->script());
 }
 
-int wasm::GetAsmWasmSourcePosition(Handle<JSObject> instance, int func_index,
-                                   int byte_offset) {
-  return WasmDebugInfo::GetAsmJsSourcePosition(GetDebugInfo(instance),
-                                               func_index, byte_offset);
+bool wasm::IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
+  return isolate->allow_code_gen_callback() == nullptr ||
+         isolate->allow_code_gen_callback()(v8::Utils::ToLocal(context));
 }
 
-Handle<SeqOneByteString> wasm::GetWasmBytes(Handle<JSObject> object) {
-  return Handle<WasmInstanceObject>::cast(object)
-      ->get_compiled_module()
-      ->module_bytes();
-}
-
-Handle<WasmDebugInfo> wasm::GetDebugInfo(Handle<JSObject> object) {
-  auto instance = Handle<WasmInstanceObject>::cast(object);
-  if (instance->has_debug_info()) {
-    Handle<WasmDebugInfo> info(instance->get_debug_info(),
-                               instance->GetIsolate());
-    return info;
-  }
-  Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
-  instance->set_debug_info(*new_info);
-  return new_info;
-}
-
-int wasm::GetNumberOfFunctions(Handle<JSObject> object) {
-  return static_cast<int>(
-      Handle<WasmInstanceObject>::cast(object)->module()->functions.size());
-}
-
-// TODO(clemensh): origin can be inferred from asm_js_script; remove it.
-MaybeHandle<WasmModuleObject> wasm::CreateModuleObjectFromBytes(
-    Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
-    ModuleOrigin origin, Handle<Script> asm_js_script,
-    const byte* asm_js_offset_tables_start,
-    const byte* asm_js_offset_tables_end) {
-  MaybeHandle<WasmModuleObject> nothing;
-  ModuleResult result = DecodeWasmModule(isolate, start, end, false, origin);
-  if (result.failed()) {
-    if (result.val) delete result.val;
-    thrower->CompileFailed("Wasm decoding failed", result);
-    return nothing;
-  }
-  // The {module_wrapper} will take ownership of the {WasmModule} object,
-  // and it will be destroyed when the GC reclaims the wrapper object.
-  Handle<WasmModuleWrapper> module_wrapper =
-      WasmModuleWrapper::New(isolate, const_cast<WasmModule*>(result.val));
-
-  // Compile the functions of the module, producing a compiled module.
-  MaybeHandle<WasmCompiledModule> maybe_compiled_module =
-      result.val->CompileFunctions(isolate, module_wrapper, thrower);
-
-  if (maybe_compiled_module.is_null()) return nothing;
-
-  Handle<WasmCompiledModule> compiled_module =
-      maybe_compiled_module.ToHandleChecked();
-
-  DCHECK_EQ(origin == kAsmJsOrigin, !asm_js_script.is_null());
-  DCHECK(!compiled_module->has_script());
-  DCHECK(!compiled_module->has_asm_js_offset_tables());
-  if (origin == kAsmJsOrigin) {
-    // Set script for the asm.js source, and the offset table mapping wasm byte
-    // offsets to source positions.
-    compiled_module->set_script(asm_js_script);
-    size_t offset_tables_len =
-        asm_js_offset_tables_end - asm_js_offset_tables_start;
-    DCHECK_GE(static_cast<size_t>(kMaxInt), offset_tables_len);
-    Handle<ByteArray> offset_tables =
-        isolate->factory()->NewByteArray(static_cast<int>(offset_tables_len));
-    memcpy(offset_tables->GetDataStartAddress(), asm_js_offset_tables_start,
-           offset_tables_len);
-    compiled_module->set_asm_js_offset_tables(offset_tables);
-  } else {
-    // Create a new Script object representing this wasm module, store it in the
-    // compiled wasm module, and register it at the debugger.
-    Handle<Script> script =
-        isolate->factory()->NewScript(isolate->factory()->empty_string());
-    script->set_type(Script::TYPE_WASM);
-
-    DCHECK_GE(kMaxInt, end - start);
-    int hash = StringHasher::HashSequentialString(
-        reinterpret_cast<const char*>(start), static_cast<int>(end - start),
-        kZeroHashSeed);
-
-    const int kBufferSize = 50;
-    char buffer[kBufferSize];
-    int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
-    DCHECK(url_chars >= 0 && url_chars < kBufferSize);
-    MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
-        Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
-        TENURED);
-    script->set_source_url(*url_str.ToHandleChecked());
-
-    int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
-    DCHECK(name_chars >= 0 && name_chars < kBufferSize);
-    MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
-        Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
-        TENURED);
-    script->set_name(*name_str.ToHandleChecked());
-
-    script->set_wasm_compiled_module(*compiled_module);
-    compiled_module->set_script(script);
-    isolate->debug()->OnAfterCompile(script);
-  }
-
-  return WasmModuleObject::New(isolate, compiled_module);
-}
-
-bool wasm::ValidateModuleBytes(Isolate* isolate, const byte* start,
-                               const byte* end, ErrorThrower* thrower,
-                               ModuleOrigin origin) {
-  ModuleResult result = DecodeWasmModule(isolate, start, end, true, origin);
-  if (result.val) {
-    delete result.val;
-  } else {
-    DCHECK(!result.ok());
-  }
-  return result.ok();
-}
-
-MaybeHandle<JSArrayBuffer> wasm::GetInstanceMemory(Isolate* isolate,
-                                                   Handle<JSObject> object) {
+MaybeHandle<JSArrayBuffer> wasm::GetInstanceMemory(
+    Isolate* isolate, Handle<WasmInstanceObject> object) {
   auto instance = Handle<WasmInstanceObject>::cast(object);
   if (instance->has_memory_buffer()) {
-    return Handle<JSArrayBuffer>(instance->get_memory_buffer(), isolate);
+    return Handle<JSArrayBuffer>(instance->memory_buffer(), isolate);
   }
   return MaybeHandle<JSArrayBuffer>();
 }
 
-void SetInstanceMemory(Handle<JSObject> object, JSArrayBuffer* buffer) {
+void SetInstanceMemory(Handle<WasmInstanceObject> instance,
+                       JSArrayBuffer* buffer) {
   DisallowHeapAllocation no_gc;
-  auto instance = Handle<WasmInstanceObject>::cast(object);
   instance->set_memory_buffer(buffer);
-  instance->get_compiled_module()->set_ptr_to_memory(buffer);
+  instance->compiled_module()->set_ptr_to_memory(buffer);
 }
 
 int32_t wasm::GetInstanceMemorySize(Isolate* isolate,
-                                    Handle<JSObject> instance) {
+                                    Handle<WasmInstanceObject> instance) {
+  DCHECK(IsWasmInstance(*instance));
   MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
       GetInstanceMemory(isolate, instance);
   Handle<JSArrayBuffer> buffer;
@@ -2028,92 +2105,247 @@
   }
 }
 
-uint32_t GetMaxInstanceMemorySize(Isolate* isolate,
-                                  Handle<WasmInstanceObject> instance) {
+uint32_t GetMaxInstanceMemoryPages(Isolate* isolate,
+                                   Handle<WasmInstanceObject> instance) {
   if (instance->has_memory_object()) {
-    Handle<WasmMemoryObject> memory_object(instance->get_memory_object(),
-                                           isolate);
-
-    int maximum = memory_object->maximum_pages();
-    if (maximum > 0) return static_cast<uint32_t>(maximum);
+    Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
+    if (memory_object->has_maximum_pages()) {
+      uint32_t maximum = static_cast<uint32_t>(memory_object->maximum_pages());
+      if (maximum < FLAG_wasm_max_mem_pages) return maximum;
+    }
   }
-  uint32_t compiled_max_pages =
-      instance->get_compiled_module()->max_mem_pages();
+  uint32_t compiled_max_pages = instance->compiled_module()->max_mem_pages();
   isolate->counters()->wasm_max_mem_pages_count()->AddSample(
       compiled_max_pages);
   if (compiled_max_pages != 0) return compiled_max_pages;
-  return WasmModule::kV8MaxPages;
+  return FLAG_wasm_max_mem_pages;
 }
 
-int32_t wasm::GrowInstanceMemory(Isolate* isolate, Handle<JSObject> object,
-                                 uint32_t pages) {
-  if (!IsWasmInstance(*object)) return -1;
-  auto instance = Handle<WasmInstanceObject>::cast(object);
-  if (pages == 0) return GetInstanceMemorySize(isolate, instance);
-  uint32_t max_pages = GetMaxInstanceMemorySize(isolate, instance);
-
-  Address old_mem_start = nullptr;
-  uint32_t old_size = 0, new_size = 0;
-
-  MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
-      GetInstanceMemory(isolate, instance);
+Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
+                                       MaybeHandle<JSArrayBuffer> buffer,
+                                       uint32_t pages, uint32_t max_pages) {
   Handle<JSArrayBuffer> old_buffer;
-  if (!maybe_mem_buffer.ToHandle(&old_buffer) ||
-      old_buffer->backing_store() == nullptr) {
-    // If module object does not have linear memory associated with it,
-    // Allocate new array buffer of given size.
-    new_size = pages * WasmModule::kPageSize;
-    if (max_pages < pages) return -1;
-  } else {
+  Address old_mem_start = nullptr;
+  uint32_t old_size = 0;
+  if (buffer.ToHandle(&old_buffer) && old_buffer->backing_store() != nullptr) {
     old_mem_start = static_cast<Address>(old_buffer->backing_store());
-    old_size = old_buffer->byte_length()->Number();
-    // If the old memory was zero-sized, we should have been in the
-    // "undefined" case above.
     DCHECK_NOT_NULL(old_mem_start);
-    DCHECK(old_size + pages * WasmModule::kPageSize <=
-           std::numeric_limits<uint32_t>::max());
-    new_size = old_size + pages * WasmModule::kPageSize;
+    old_size = old_buffer->byte_length()->Number();
+  }
+  DCHECK(old_size + pages * WasmModule::kPageSize <=
+         std::numeric_limits<uint32_t>::max());
+  uint32_t new_size = old_size + pages * WasmModule::kPageSize;
+  if (new_size <= old_size || max_pages * WasmModule::kPageSize < new_size ||
+      FLAG_wasm_max_mem_pages * WasmModule::kPageSize < new_size) {
+    return Handle<JSArrayBuffer>::null();
   }
 
-  if (new_size <= old_size || max_pages * WasmModule::kPageSize < new_size ||
-      WasmModule::kV8MaxPages * WasmModule::kPageSize < new_size) {
-    return -1;
-  }
-  Handle<JSArrayBuffer> buffer = NewArrayBuffer(isolate, new_size);
-  if (buffer.is_null()) return -1;
-  Address new_mem_start = static_cast<Address>(buffer->backing_store());
+  // TODO(gdeepti): Change the protection here instead of allocating a new
+  // buffer before guard regions are turned on, see issue #5886.
+  const bool enable_guard_regions =
+      !old_buffer.is_null() && old_buffer->has_guard_region();
+  Handle<JSArrayBuffer> new_buffer =
+      NewArrayBuffer(isolate, new_size, enable_guard_regions);
+  if (new_buffer.is_null()) return new_buffer;
+  Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
   if (old_size != 0) {
     memcpy(new_mem_start, old_mem_start, old_size);
   }
-  SetInstanceMemory(instance, *buffer);
-  Handle<FixedArray> code_table = instance->get_compiled_module()->code_table();
-  RelocateMemoryReferencesInCode(code_table, old_mem_start, new_mem_start,
-                                 old_size, new_size);
-  if (instance->has_memory_object()) {
-    instance->get_memory_object()->set_buffer(*buffer);
-  }
+  return new_buffer;
+}
 
+void UncheckedUpdateInstanceMemory(Isolate* isolate,
+                                   Handle<WasmInstanceObject> instance,
+                                   Address old_mem_start, uint32_t old_size) {
+  DCHECK(instance->has_memory_buffer());
+  Handle<JSArrayBuffer> mem_buffer(instance->memory_buffer());
+  uint32_t new_size = mem_buffer->byte_length()->Number();
+  Address new_mem_start = static_cast<Address>(mem_buffer->backing_store());
+  DCHECK_NOT_NULL(new_mem_start);
+  Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+  CodeSpecialization code_specialization(isolate, &specialization_zone);
+  code_specialization.RelocateMemoryReferences(old_mem_start, old_size,
+                                               new_mem_start, new_size);
+  code_specialization.ApplyToWholeInstance(*instance);
+}
+
+void DetachArrayBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer) {
+  const bool has_guard_regions =
+      (!buffer.is_null() && buffer->has_guard_region());
+  void* backing_store = buffer->backing_store();
+  if (backing_store != nullptr) {
+    DCHECK(!buffer->is_neuterable());
+    int64_t byte_length = NumberToSize(buffer->byte_length());
+    buffer->set_is_neuterable(true);
+    if (!has_guard_regions) {
+      buffer->set_is_external(true);
+      isolate->heap()->UnregisterArrayBuffer(*buffer);
+    }
+    buffer->Neuter();
+    if (!has_guard_regions) {
+      isolate->array_buffer_allocator()->Free(backing_store, byte_length);
+    } else {
+      base::OS::Free(backing_store, RoundUp(i::wasm::kWasmMaxHeapOffset,
+                                            base::OS::CommitPageSize()));
+      reinterpret_cast<v8::Isolate*>(isolate)
+          ->AdjustAmountOfExternalAllocatedMemory(-byte_length);
+    }
+  }
+}
+
+int32_t wasm::GrowWebAssemblyMemory(Isolate* isolate,
+                                    Handle<WasmMemoryObject> receiver,
+                                    uint32_t pages) {
+  DCHECK(WasmJs::IsWasmMemoryObject(isolate, receiver));
+  Handle<WasmMemoryObject> memory_object =
+      handle(WasmMemoryObject::cast(*receiver));
+  MaybeHandle<JSArrayBuffer> memory_buffer = handle(memory_object->buffer());
+  Handle<JSArrayBuffer> old_buffer;
+  uint32_t old_size = 0;
+  Address old_mem_start = nullptr;
+  if (memory_buffer.ToHandle(&old_buffer) &&
+      old_buffer->backing_store() != nullptr) {
+    old_size = old_buffer->byte_length()->Number();
+    old_mem_start = static_cast<Address>(old_buffer->backing_store());
+  }
+  Handle<JSArrayBuffer> new_buffer;
+  // Return current size if grow by 0
+  if (pages == 0) {
+    if (!old_buffer.is_null() && old_buffer->backing_store() != nullptr) {
+      new_buffer = SetupArrayBuffer(isolate, old_buffer->backing_store(),
+                                    old_size, old_buffer->is_external(),
+                                    old_buffer->has_guard_region());
+      memory_object->set_buffer(*new_buffer);
+      old_buffer->set_is_neuterable(true);
+      if (!old_buffer->has_guard_region()) {
+        old_buffer->set_is_external(true);
+        isolate->heap()->UnregisterArrayBuffer(*old_buffer);
+      }
+      // Neuter but don't free the memory because it is now being used by
+      // new_buffer.
+      old_buffer->Neuter();
+    }
+    DCHECK(old_size % WasmModule::kPageSize == 0);
+    return (old_size / WasmModule::kPageSize);
+  }
+  if (!memory_object->has_instances_link()) {
+    // Memory object does not have an instance associated with it, just grow
+    uint32_t max_pages;
+    if (memory_object->has_maximum_pages()) {
+      max_pages = static_cast<uint32_t>(memory_object->maximum_pages());
+      if (FLAG_wasm_max_mem_pages < max_pages) return -1;
+    } else {
+      max_pages = FLAG_wasm_max_mem_pages;
+    }
+    new_buffer = GrowMemoryBuffer(isolate, memory_buffer, pages, max_pages);
+    if (new_buffer.is_null()) return -1;
+  } else {
+    Handle<WasmInstanceWrapper> instance_wrapper(
+        memory_object->instances_link());
+    DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
+    DCHECK(instance_wrapper->has_instance());
+    Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
+    DCHECK(IsWasmInstance(*instance));
+    uint32_t max_pages = GetMaxInstanceMemoryPages(isolate, instance);
+
+    // Grow memory object buffer and update instances associated with it.
+    new_buffer = GrowMemoryBuffer(isolate, memory_buffer, pages, max_pages);
+    if (new_buffer.is_null()) return -1;
+    DCHECK(!instance_wrapper->has_previous());
+    SetInstanceMemory(instance, *new_buffer);
+    UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
+    while (instance_wrapper->has_next()) {
+      instance_wrapper = instance_wrapper->next_wrapper();
+      DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
+      Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
+      DCHECK(IsWasmInstance(*instance));
+      SetInstanceMemory(instance, *new_buffer);
+      UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
+    }
+  }
+  memory_object->set_buffer(*new_buffer);
+  DetachArrayBuffer(isolate, old_buffer);
   DCHECK(old_size % WasmModule::kPageSize == 0);
   return (old_size / WasmModule::kPageSize);
 }
 
+int32_t wasm::GrowMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
+                         uint32_t pages) {
+  if (!IsWasmInstance(*instance)) return -1;
+  if (pages == 0) return GetInstanceMemorySize(isolate, instance);
+  Handle<WasmInstanceObject> instance_obj(WasmInstanceObject::cast(*instance));
+  if (!instance_obj->has_memory_object()) {
+    // No other instances to grow, grow just the one.
+    MaybeHandle<JSArrayBuffer> instance_buffer =
+        GetInstanceMemory(isolate, instance);
+    Handle<JSArrayBuffer> old_buffer;
+    uint32_t old_size = 0;
+    Address old_mem_start = nullptr;
+    if (instance_buffer.ToHandle(&old_buffer) &&
+        old_buffer->backing_store() != nullptr) {
+      old_size = old_buffer->byte_length()->Number();
+      old_mem_start = static_cast<Address>(old_buffer->backing_store());
+    }
+    uint32_t max_pages = GetMaxInstanceMemoryPages(isolate, instance_obj);
+    Handle<JSArrayBuffer> buffer =
+        GrowMemoryBuffer(isolate, instance_buffer, pages, max_pages);
+    if (buffer.is_null()) return -1;
+    SetInstanceMemory(instance, *buffer);
+    UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
+    DCHECK(old_size % WasmModule::kPageSize == 0);
+    return (old_size / WasmModule::kPageSize);
+  } else {
+    return GrowWebAssemblyMemory(isolate, handle(instance_obj->memory_object()),
+                                 pages);
+  }
+}
+
+void wasm::GrowDispatchTables(Isolate* isolate,
+                              Handle<FixedArray> dispatch_tables,
+                              uint32_t old_size, uint32_t count) {
+  DCHECK_EQ(0, dispatch_tables->length() % 4);
+
+  Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+  for (int i = 0; i < dispatch_tables->length(); i += 4) {
+    Handle<FixedArray> old_function_table(
+        FixedArray::cast(dispatch_tables->get(i + 2)));
+    Handle<FixedArray> old_signature_table(
+        FixedArray::cast(dispatch_tables->get(i + 3)));
+    Handle<FixedArray> new_function_table =
+        isolate->factory()->CopyFixedArrayAndGrow(old_function_table, count);
+    Handle<FixedArray> new_signature_table =
+        isolate->factory()->CopyFixedArrayAndGrow(old_signature_table, count);
+
+    // Update dispatch tables with new function/signature tables
+    dispatch_tables->set(i + 2, *new_function_table);
+    dispatch_tables->set(i + 3, *new_signature_table);
+
+    // Patch the code of the respective instance.
+    CodeSpecialization code_specialization(isolate, &specialization_zone);
+    code_specialization.PatchTableSize(old_size, old_size + count);
+    code_specialization.RelocateObject(old_function_table, new_function_table);
+    code_specialization.RelocateObject(old_signature_table,
+                                       new_signature_table);
+    code_specialization.ApplyToWholeInstance(
+        WasmInstanceObject::cast(dispatch_tables->get(i)));
+  }
+}
+
 void testing::ValidateInstancesChain(Isolate* isolate,
-                                     Handle<JSObject> wasm_module,
+                                     Handle<WasmModuleObject> module_obj,
                                      int instance_count) {
   CHECK_GE(instance_count, 0);
   DisallowHeapAllocation no_gc;
-  WasmCompiledModule* compiled_module =
-      WasmCompiledModule::cast(wasm_module->GetInternalField(0));
+  WasmCompiledModule* compiled_module = module_obj->compiled_module();
   CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
-           *wasm_module);
+           *module_obj);
   Object* prev = nullptr;
   int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
   WasmCompiledModule* current_instance = compiled_module;
   while (current_instance->has_weak_next_instance()) {
     CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
           current_instance->ptr_to_weak_prev_instance()->value() == prev);
-    CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(),
-             *wasm_module);
+    CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
     CHECK(IsWasmInstance(
         current_instance->ptr_to_weak_owning_instance()->value()));
     prev = current_instance;
@@ -2126,63 +2358,359 @@
 }
 
 void testing::ValidateModuleState(Isolate* isolate,
-                                  Handle<JSObject> wasm_module) {
+                                  Handle<WasmModuleObject> module_obj) {
   DisallowHeapAllocation no_gc;
-  WasmCompiledModule* compiled_module =
-      WasmCompiledModule::cast(wasm_module->GetInternalField(0));
+  WasmCompiledModule* compiled_module = module_obj->compiled_module();
   CHECK(compiled_module->has_weak_wasm_module());
-  CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *wasm_module);
+  CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *module_obj);
   CHECK(!compiled_module->has_weak_prev_instance());
   CHECK(!compiled_module->has_weak_next_instance());
   CHECK(!compiled_module->has_weak_owning_instance());
 }
 
 void testing::ValidateOrphanedInstance(Isolate* isolate,
-                                       Handle<JSObject> object) {
+                                       Handle<WasmInstanceObject> instance) {
   DisallowHeapAllocation no_gc;
-  WasmInstanceObject* instance = WasmInstanceObject::cast(*object);
-  WasmCompiledModule* compiled_module = instance->get_compiled_module();
+  WasmCompiledModule* compiled_module = instance->compiled_module();
   CHECK(compiled_module->has_weak_wasm_module());
   CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
 }
 
-void WasmCompiledModule::RecreateModuleWrapper(Isolate* isolate,
-                                               Handle<FixedArray> array) {
-  Handle<WasmCompiledModule> compiled_module(
-      reinterpret_cast<WasmCompiledModule*>(*array), isolate);
+Handle<JSArray> wasm::GetImports(Isolate* isolate,
+                                 Handle<WasmModuleObject> module_object) {
+  Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
+                                             isolate);
+  Factory* factory = isolate->factory();
 
-  WasmModule* module = nullptr;
+  Handle<String> module_string = factory->InternalizeUtf8String("module");
+  Handle<String> name_string = factory->InternalizeUtf8String("name");
+  Handle<String> kind_string = factory->InternalizeUtf8String("kind");
+
+  Handle<String> function_string = factory->InternalizeUtf8String("function");
+  Handle<String> table_string = factory->InternalizeUtf8String("table");
+  Handle<String> memory_string = factory->InternalizeUtf8String("memory");
+  Handle<String> global_string = factory->InternalizeUtf8String("global");
+
+  // Create the result array.
+  WasmModule* module = compiled_module->module();
+  int num_imports = static_cast<int>(module->import_table.size());
+  Handle<JSArray> array_object = factory->NewJSArray(FAST_ELEMENTS, 0, 0);
+  Handle<FixedArray> storage = factory->NewFixedArray(num_imports);
+  JSArray::SetContent(array_object, storage);
+  array_object->set_length(Smi::FromInt(num_imports));
+
+  Handle<JSFunction> object_function =
+      Handle<JSFunction>(isolate->native_context()->object_function(), isolate);
+
+  // Populate the result array.
+  for (int index = 0; index < num_imports; ++index) {
+    WasmImport& import = module->import_table[index];
+
+    Handle<JSObject> entry = factory->NewJSObject(object_function);
+
+    Handle<String> import_kind;
+    switch (import.kind) {
+      case kExternalFunction:
+        import_kind = function_string;
+        break;
+      case kExternalTable:
+        import_kind = table_string;
+        break;
+      case kExternalMemory:
+        import_kind = memory_string;
+        break;
+      case kExternalGlobal:
+        import_kind = global_string;
+        break;
+      default:
+        UNREACHABLE();
+    }
+
+    MaybeHandle<String> import_module =
+        WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+            isolate, compiled_module, import.module_name_offset,
+            import.module_name_length);
+
+    MaybeHandle<String> import_name =
+        WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+            isolate, compiled_module, import.field_name_offset,
+            import.field_name_length);
+
+    JSObject::AddProperty(entry, module_string, import_module.ToHandleChecked(),
+                          NONE);
+    JSObject::AddProperty(entry, name_string, import_name.ToHandleChecked(),
+                          NONE);
+    JSObject::AddProperty(entry, kind_string, import_kind, NONE);
+
+    storage->set(index, *entry);
+  }
+
+  return array_object;
+}
+
+Handle<JSArray> wasm::GetExports(Isolate* isolate,
+                                 Handle<WasmModuleObject> module_object) {
+  Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
+                                             isolate);
+  Factory* factory = isolate->factory();
+
+  Handle<String> name_string = factory->InternalizeUtf8String("name");
+  Handle<String> kind_string = factory->InternalizeUtf8String("kind");
+
+  Handle<String> function_string = factory->InternalizeUtf8String("function");
+  Handle<String> table_string = factory->InternalizeUtf8String("table");
+  Handle<String> memory_string = factory->InternalizeUtf8String("memory");
+  Handle<String> global_string = factory->InternalizeUtf8String("global");
+
+  // Create the result array.
+  WasmModule* module = compiled_module->module();
+  int num_exports = static_cast<int>(module->export_table.size());
+  Handle<JSArray> array_object = factory->NewJSArray(FAST_ELEMENTS, 0, 0);
+  Handle<FixedArray> storage = factory->NewFixedArray(num_exports);
+  JSArray::SetContent(array_object, storage);
+  array_object->set_length(Smi::FromInt(num_exports));
+
+  Handle<JSFunction> object_function =
+      Handle<JSFunction>(isolate->native_context()->object_function(), isolate);
+
+  // Populate the result array.
+  for (int index = 0; index < num_exports; ++index) {
+    WasmExport& exp = module->export_table[index];
+
+    Handle<String> export_kind;
+    switch (exp.kind) {
+      case kExternalFunction:
+        export_kind = function_string;
+        break;
+      case kExternalTable:
+        export_kind = table_string;
+        break;
+      case kExternalMemory:
+        export_kind = memory_string;
+        break;
+      case kExternalGlobal:
+        export_kind = global_string;
+        break;
+      default:
+        UNREACHABLE();
+    }
+
+    Handle<JSObject> entry = factory->NewJSObject(object_function);
+
+    MaybeHandle<String> export_name =
+        WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+            isolate, compiled_module, exp.name_offset, exp.name_length);
+
+    JSObject::AddProperty(entry, name_string, export_name.ToHandleChecked(),
+                          NONE);
+    JSObject::AddProperty(entry, kind_string, export_kind, NONE);
+
+    storage->set(index, *entry);
+  }
+
+  return array_object;
+}
+
+Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
+                                        Handle<WasmModuleObject> module_object,
+                                        Handle<String> name,
+                                        ErrorThrower* thrower) {
+  Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
+                                             isolate);
+  Factory* factory = isolate->factory();
+
+  std::vector<CustomSectionOffset> custom_sections;
   {
-    Handle<SeqOneByteString> module_bytes = compiled_module->module_bytes();
-    // We parse the module again directly from the module bytes, so
-    // the underlying storage must not be moved meanwhile.
-    DisallowHeapAllocation no_allocation;
+    DisallowHeapAllocation no_gc;  // for raw access to string bytes.
+    Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
+                                          isolate);
     const byte* start =
         reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
     const byte* end = start + module_bytes->length();
-    // TODO(titzer): remember the module origin in the compiled_module
-    // For now, we assume serialized modules did not originate from asm.js.
-    ModuleResult result =
-        DecodeWasmModule(isolate, start, end, false, kWasmOrigin);
-    CHECK(result.ok());
-    CHECK_NOT_NULL(result.val);
-    module = const_cast<WasmModule*>(result.val);
+    custom_sections = DecodeCustomSections(start, end);
   }
 
-  Handle<WasmModuleWrapper> module_wrapper =
-      WasmModuleWrapper::New(isolate, module);
+  std::vector<Handle<Object>> matching_sections;
 
-  compiled_module->set_module_wrapper(module_wrapper);
-  DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
+  // Gather matching sections.
+  for (auto section : custom_sections) {
+    MaybeHandle<String> section_name =
+        WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+            isolate, compiled_module, section.name_offset, section.name_length);
+
+    if (!name->Equals(*section_name.ToHandleChecked())) continue;
+
+    // Make a copy of the payload data in the section.
+    bool is_external;  // Set by TryAllocateBackingStore
+    void* memory = TryAllocateBackingStore(isolate, section.payload_length,
+                                           false, is_external);
+
+    Handle<Object> section_data = factory->undefined_value();
+    if (memory) {
+      Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+      JSArrayBuffer::Setup(buffer, isolate, is_external, memory,
+                           static_cast<int>(section.payload_length));
+      DisallowHeapAllocation no_gc;  // for raw access to string bytes.
+      Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
+                                            isolate);
+      const byte* start =
+          reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
+      memcpy(memory, start + section.payload_offset, section.payload_length);
+      section_data = buffer;
+    } else {
+      thrower->RangeError("out of memory allocating custom section data");
+      return Handle<JSArray>();
+    }
+
+    matching_sections.push_back(section_data);
+  }
+
+  int num_custom_sections = static_cast<int>(matching_sections.size());
+  Handle<JSArray> array_object = factory->NewJSArray(FAST_ELEMENTS, 0, 0);
+  Handle<FixedArray> storage = factory->NewFixedArray(num_custom_sections);
+  JSArray::SetContent(array_object, storage);
+  array_object->set_length(Smi::FromInt(num_custom_sections));
+
+  for (int i = 0; i < num_custom_sections; i++) {
+    storage->set(i, *matching_sections[i]);
+  }
+
+  return array_object;
 }
 
-MaybeHandle<String> WasmCompiledModule::GetFunctionName(
-    Handle<WasmCompiledModule> compiled_module, uint32_t func_index) {
-  DCHECK_LT(func_index, compiled_module->module()->functions.size());
-  WasmFunction& function = compiled_module->module()->functions[func_index];
-  Isolate* isolate = compiled_module->GetIsolate();
-  MaybeHandle<String> string = ExtractStringFromModuleBytes(
-      isolate, compiled_module, function.name_offset, function.name_length);
-  if (!string.is_null()) return string.ToHandleChecked();
-  return {};
+bool wasm::SyncValidate(Isolate* isolate, ErrorThrower* thrower,
+                        const ModuleWireBytes& bytes) {
+  if (bytes.start() == nullptr || bytes.length() == 0) return false;
+  ModuleResult result =
+      DecodeWasmModule(isolate, bytes.start(), bytes.end(), true, kWasmOrigin);
+  if (result.val) delete result.val;
+  return result.ok();
+}
+
+MaybeHandle<WasmModuleObject> wasm::SyncCompileTranslatedAsmJs(
+    Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+    Handle<Script> asm_js_script,
+    Vector<const byte> asm_js_offset_table_bytes) {
+
+  ModuleResult result = DecodeWasmModule(isolate, bytes.start(), bytes.end(),
+                                         false, kAsmJsOrigin);
+  if (result.failed()) {
+    // TODO(titzer): use Result<std::unique_ptr<const WasmModule*>>?
+    if (result.val) delete result.val;
+    thrower->CompileFailed("Wasm decoding failed", result);
+    return {};
+  }
+
+  CompilationHelper helper(isolate, const_cast<WasmModule*>(result.val));
+  return helper.CompileToModuleObject(thrower, bytes, asm_js_script,
+                                      asm_js_offset_table_bytes);
+}
+
+MaybeHandle<WasmModuleObject> wasm::SyncCompile(Isolate* isolate,
+                                                ErrorThrower* thrower,
+                                                const ModuleWireBytes& bytes) {
+  if (!IsWasmCodegenAllowed(isolate, isolate->native_context())) {
+    thrower->CompileError("Wasm code generation disallowed in this context");
+    return {};
+  }
+
+  ModuleResult result =
+      DecodeWasmModule(isolate, bytes.start(), bytes.end(), false, kWasmOrigin);
+  if (result.failed()) {
+    if (result.val) delete result.val;
+    thrower->CompileFailed("Wasm decoding failed", result);
+    return {};
+  }
+
+  CompilationHelper helper(isolate, const_cast<WasmModule*>(result.val));
+  return helper.CompileToModuleObject(thrower, bytes, Handle<Script>(),
+                                      Vector<const byte>());
+}
+
+MaybeHandle<WasmInstanceObject> wasm::SyncInstantiate(
+    Isolate* isolate, ErrorThrower* thrower,
+    Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+    MaybeHandle<JSArrayBuffer> memory) {
+  InstantiationHelper helper(isolate, thrower, module_object, imports, memory);
+  return helper.Build();
+}
+
+void RejectPromise(Isolate* isolate, ErrorThrower* thrower,
+                   Handle<JSPromise> promise) {
+  v8::Local<v8::Promise::Resolver> resolver =
+      v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
+  Handle<Context> context(isolate->context(), isolate);
+  resolver->Reject(v8::Utils::ToLocal(context),
+                   v8::Utils::ToLocal(thrower->Reify()));
+}
+
+void ResolvePromise(Isolate* isolate, Handle<JSPromise> promise,
+                    Handle<Object> result) {
+  v8::Local<v8::Promise::Resolver> resolver =
+      v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
+  Handle<Context> context(isolate->context(), isolate);
+  resolver->Resolve(v8::Utils::ToLocal(context), v8::Utils::ToLocal(result));
+}
+
+void wasm::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+                        const ModuleWireBytes& bytes) {
+  ErrorThrower thrower(isolate, nullptr);
+  MaybeHandle<WasmModuleObject> module_object =
+      SyncCompile(isolate, &thrower, bytes);
+  if (thrower.error()) {
+    RejectPromise(isolate, &thrower, promise);
+    return;
+  }
+  ResolvePromise(isolate, promise, module_object.ToHandleChecked());
+}
+
+void wasm::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
+                            Handle<WasmModuleObject> module_object,
+                            MaybeHandle<JSReceiver> imports) {
+  ErrorThrower thrower(isolate, nullptr);
+  MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
+      isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
+  if (thrower.error()) {
+    RejectPromise(isolate, &thrower, promise);
+    return;
+  }
+  ResolvePromise(isolate, promise, instance_object.ToHandleChecked());
+}
+
+void wasm::AsyncCompileAndInstantiate(Isolate* isolate,
+                                      Handle<JSPromise> promise,
+                                      const ModuleWireBytes& bytes,
+                                      MaybeHandle<JSReceiver> imports) {
+  ErrorThrower thrower(isolate, nullptr);
+
+  // Compile the module.
+  MaybeHandle<WasmModuleObject> module_object =
+      SyncCompile(isolate, &thrower, bytes);
+  if (thrower.error()) {
+    RejectPromise(isolate, &thrower, promise);
+    return;
+  }
+  Handle<WasmModuleObject> module = module_object.ToHandleChecked();
+
+  // Instantiate the module.
+  MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
+      isolate, &thrower, module, imports, Handle<JSArrayBuffer>::null());
+  if (thrower.error()) {
+    RejectPromise(isolate, &thrower, promise);
+    return;
+  }
+
+  Handle<JSFunction> object_function =
+      Handle<JSFunction>(isolate->native_context()->object_function(), isolate);
+  Handle<JSObject> ret =
+      isolate->factory()->NewJSObject(object_function, TENURED);
+  Handle<String> module_property_name =
+      isolate->factory()->InternalizeUtf8String("module");
+  Handle<String> instance_property_name =
+      isolate->factory()->InternalizeUtf8String("instance");
+  JSObject::AddProperty(ret, module_property_name, module, NONE);
+  JSObject::AddProperty(ret, instance_property_name,
+                        instance_object.ToHandleChecked(), NONE);
+
+  ResolvePromise(isolate, promise, ret);
 }
diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h
index 2ad46e2..67fff1d 100644
--- a/src/wasm/wasm-module.h
+++ b/src/wasm/wasm-module.h
@@ -8,11 +8,12 @@
 #include <memory>
 
 #include "src/api.h"
+#include "src/debug/debug-interface.h"
 #include "src/globals.h"
 #include "src/handles.h"
+#include "src/managed.h"
 #include "src/parsing/preparse-data.h"
 
-#include "src/wasm/managed.h"
 #include "src/wasm/signature-map.h"
 #include "src/wasm/wasm-opcodes.h"
 
@@ -22,51 +23,16 @@
 class WasmCompiledModule;
 class WasmDebugInfo;
 class WasmModuleObject;
+class WasmInstanceObject;
+class WasmMemoryObject;
 
 namespace compiler {
 class CallDescriptor;
-class WasmCompilationUnit;
 }
 
 namespace wasm {
 class ErrorThrower;
 
-const size_t kMaxModuleSize = 1024 * 1024 * 1024;
-const size_t kMaxFunctionSize = 128 * 1024;
-const size_t kMaxStringSize = 256;
-const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x0d;
-
-const uint8_t kWasmFunctionTypeForm = 0x60;
-const uint8_t kWasmAnyFunctionTypeForm = 0x70;
-
-enum WasmSectionCode {
-  kUnknownSectionCode = 0,   // code for unknown sections
-  kTypeSectionCode = 1,      // Function signature declarations
-  kImportSectionCode = 2,    // Import declarations
-  kFunctionSectionCode = 3,  // Function declarations
-  kTableSectionCode = 4,     // Indirect function table and other tables
-  kMemorySectionCode = 5,    // Memory attributes
-  kGlobalSectionCode = 6,    // Global declarations
-  kExportSectionCode = 7,    // Exports
-  kStartSectionCode = 8,     // Start function declaration
-  kElementSectionCode = 9,   // Elements section
-  kCodeSectionCode = 10,     // Function code
-  kDataSectionCode = 11,     // Data segments
-  kNameSectionCode = 12,     // Name section (encoded as a string)
-};
-
-inline bool IsValidSectionCode(uint8_t byte) {
-  return kTypeSectionCode <= byte && byte <= kDataSectionCode;
-}
-
-const char* SectionName(WasmSectionCode code);
-
-// Constants for fixed-size elements within a module.
-static const uint32_t kMaxReturnCount = 1;
-static const uint8_t kResizableMaximumFlag = 1;
-static const int32_t kInvalidFunctionIndex = -1;
-
 enum WasmExternalKind {
   kExternalFunction = 0,
   kExternalTable = 1,
@@ -118,7 +84,7 @@
 
 // Static representation of a wasm global variable.
 struct WasmGlobal {
-  LocalType type;        // type of the global.
+  ValueType type;        // type of the global.
   bool mutability;       // {true} if mutable.
   WasmInitExpr init;     // the initialization expression of the global.
   uint32_t offset;       // offset into global memory.
@@ -170,21 +136,18 @@
   uint32_t index;         // index into the respective space.
 };
 
-enum ModuleOrigin { kWasmOrigin, kAsmJsOrigin };
+enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
+struct ModuleWireBytes;
 
 // Static representation of a module.
 struct V8_EXPORT_PRIVATE WasmModule {
   static const uint32_t kPageSize = 0x10000;    // Page size, 64kb.
   static const uint32_t kMinMemPages = 1;       // Minimum memory size = 64kb
-  static const size_t kV8MaxPages = 16384;      // Maximum memory size = 1gb
-  static const size_t kSpecMaxPages = 65536;    // Maximum according to the spec
-  static const size_t kV8MaxTableSize = 16 * 1024 * 1024;
 
   Zone* owned_zone;
-  const byte* module_start = nullptr;  // starting address for the module bytes
-  const byte* module_end = nullptr;    // end address for the module bytes
   uint32_t min_mem_pages = 0;  // minimum size of the memory in 64k pages
   uint32_t max_mem_pages = 0;  // maximum size of the memory in 64k pages
+  bool has_max_mem = false;    // try if a maximum memory size exists
   bool has_memory = false;     // true if the memory was defined or imported
   bool mem_export = false;     // true if the memory is exported
   // TODO(wasm): reconcile start function index being an int with
@@ -214,56 +177,11 @@
   // switch to libc-2.21 or higher.
   std::unique_ptr<base::Semaphore> pending_tasks;
 
-  WasmModule() : WasmModule(nullptr, nullptr) {}
-  WasmModule(Zone* owned_zone, const byte* module_start);
+  WasmModule() : WasmModule(nullptr) {}
+  WasmModule(Zone* owned_zone);
   ~WasmModule() {
     if (owned_zone) delete owned_zone;
   }
-
-  // Get a string stored in the module bytes representing a name.
-  WasmName GetName(uint32_t offset, uint32_t length) const {
-    if (length == 0) return {"<?>", 3};  // no name.
-    CHECK(BoundsCheck(offset, offset + length));
-    DCHECK_GE(static_cast<int>(length), 0);
-    return {reinterpret_cast<const char*>(module_start + offset),
-            static_cast<int>(length)};
-  }
-
-  // Get a string stored in the module bytes representing a function name.
-  WasmName GetName(WasmFunction* function) const {
-    return GetName(function->name_offset, function->name_length);
-  }
-
-  // Get a string stored in the module bytes representing a name.
-  WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
-    if (offset == 0 && length == 0) return {NULL, 0};  // no name.
-    CHECK(BoundsCheck(offset, offset + length));
-    DCHECK_GE(static_cast<int>(length), 0);
-    return {reinterpret_cast<const char*>(module_start + offset),
-            static_cast<int>(length)};
-  }
-
-  // Get a string stored in the module bytes representing a function name.
-  WasmName GetNameOrNull(const WasmFunction* function) const {
-    return GetNameOrNull(function->name_offset, function->name_length);
-  }
-
-  // Checks the given offset range is contained within the module bytes.
-  bool BoundsCheck(uint32_t start, uint32_t end) const {
-    size_t size = module_end - module_start;
-    return start <= size && end <= size;
-  }
-
-  // Creates a new instantiation of the module in the given isolate.
-  static MaybeHandle<JSObject> Instantiate(Isolate* isolate,
-                                           ErrorThrower* thrower,
-                                           Handle<JSObject> wasm_module,
-                                           Handle<JSReceiver> ffi,
-                                           Handle<JSArrayBuffer> memory);
-
-  MaybeHandle<WasmCompiledModule> CompileFunctions(
-      Isolate* isolate, Handle<Managed<WasmModule>> module_wrapper,
-      ErrorThrower* thrower) const;
 };
 
 typedef Managed<WasmModule> WasmModuleWrapper;
@@ -272,11 +190,10 @@
 struct WasmInstance {
   const WasmModule* module;  // static representation of the module.
   // -- Heap allocated --------------------------------------------------------
-  Handle<JSObject> js_object;            // JavaScript module object.
   Handle<Context> context;               // JavaScript native context.
-  Handle<JSArrayBuffer> mem_buffer;      // Handle to array buffer of memory.
-  Handle<JSArrayBuffer> globals_buffer;  // Handle to array buffer of globals.
   std::vector<Handle<FixedArray>> function_tables;  // indirect function tables.
+  std::vector<Handle<FixedArray>>
+      signature_tables;                    // indirect signature tables.
   std::vector<Handle<Code>> function_code;  // code objects for each function.
   // -- raw memory ------------------------------------------------------------
   byte* mem_start = nullptr;  // start of linear memory.
@@ -287,15 +204,77 @@
   explicit WasmInstance(const WasmModule* m)
       : module(m),
         function_tables(m->function_tables.size()),
+        signature_tables(m->function_tables.size()),
         function_code(m->functions.size()) {}
 };
 
+// Interface to the storage (wire bytes) of a wasm module.
+// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
+// on module_bytes, as this storage is only guaranteed to be alive as long as
+// this struct is alive.
+struct V8_EXPORT_PRIVATE ModuleWireBytes {
+  ModuleWireBytes(Vector<const byte> module_bytes)
+      : module_bytes_(module_bytes) {}
+  ModuleWireBytes(const byte* start, const byte* end)
+      : module_bytes_(start, static_cast<int>(end - start)) {
+    DCHECK_GE(kMaxInt, end - start);
+  }
+
+  // Get a string stored in the module bytes representing a name.
+  WasmName GetName(uint32_t offset, uint32_t length) const {
+    if (length == 0) return {"<?>", 3};  // no name.
+    CHECK(BoundsCheck(offset, length));
+    DCHECK_GE(length, 0);
+    return Vector<const char>::cast(
+        module_bytes_.SubVector(offset, offset + length));
+  }
+
+  // Get a string stored in the module bytes representing a function name.
+  WasmName GetName(const WasmFunction* function) const {
+    return GetName(function->name_offset, function->name_length);
+  }
+
+  // Get a string stored in the module bytes representing a name.
+  WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
+    if (offset == 0 && length == 0) return {NULL, 0};  // no name.
+    CHECK(BoundsCheck(offset, length));
+    DCHECK_GE(length, 0);
+    return Vector<const char>::cast(
+        module_bytes_.SubVector(offset, offset + length));
+  }
+
+  // Get a string stored in the module bytes representing a function name.
+  WasmName GetNameOrNull(const WasmFunction* function) const {
+    return GetNameOrNull(function->name_offset, function->name_length);
+  }
+
+  // Checks the given offset range is contained within the module bytes.
+  bool BoundsCheck(uint32_t offset, uint32_t length) const {
+    uint32_t size = static_cast<uint32_t>(module_bytes_.length());
+    return offset <= size && length <= size - offset;
+  }
+
+  Vector<const byte> GetFunctionBytes(const WasmFunction* function) const {
+    return module_bytes_.SubVector(function->code_start_offset,
+                                   function->code_end_offset);
+  }
+
+  const byte* start() const { return module_bytes_.start(); }
+  const byte* end() const { return module_bytes_.end(); }
+  int length() const { return module_bytes_.length(); }
+
+ private:
+  const Vector<const byte> module_bytes_;
+};
+
 // Interface provided to the decoder/graph builder which contains only
 // minimal information about the globals, functions, and function tables.
 struct V8_EXPORT_PRIVATE ModuleEnv {
+  ModuleEnv(const WasmModule* module, WasmInstance* instance)
+      : module(module), instance(instance) {}
+
   const WasmModule* module;
   WasmInstance* instance;
-  ModuleOrigin origin;
 
   bool IsValidGlobal(uint32_t index) const {
     return module && index < module->globals.size();
@@ -309,7 +288,7 @@
   bool IsValidTable(uint32_t index) const {
     return module && index < module->function_tables.size();
   }
-  LocalType GetGlobalType(uint32_t index) {
+  ValueType GetGlobalType(uint32_t index) {
     DCHECK(IsValidGlobal(index));
     return module->globals[index].type;
   }
@@ -326,13 +305,14 @@
     return &module->function_tables[index];
   }
 
-  bool asm_js() { return origin == kAsmJsOrigin; }
+  bool asm_js() { return module->origin == kAsmJsOrigin; }
 
   Handle<Code> GetFunctionCode(uint32_t index) {
     DCHECK_NOT_NULL(instance);
     return instance->function_code[index];
   }
 
+  // TODO(titzer): move these into src/compiler/wasm-compiler.cc
   static compiler::CallDescriptor* GetWasmCallDescriptor(Zone* zone,
                                                          FunctionSig* sig);
   static compiler::CallDescriptor* GetI32WasmCallDescriptor(
@@ -341,42 +321,36 @@
       Zone* zone, compiler::CallDescriptor* descriptor);
 };
 
+// A ModuleEnv together with ModuleWireBytes.
+struct ModuleBytesEnv {
+  ModuleBytesEnv(const WasmModule* module, WasmInstance* instance,
+                 Vector<const byte> module_bytes)
+      : module_env(module, instance), wire_bytes(module_bytes) {}
+  ModuleBytesEnv(const WasmModule* module, WasmInstance* instance,
+                 const ModuleWireBytes& wire_bytes)
+      : module_env(module, instance), wire_bytes(wire_bytes) {}
+
+  ModuleEnv module_env;
+  ModuleWireBytes wire_bytes;
+};
+
 // A helper for printing out the names of functions.
 struct WasmFunctionName {
+  WasmFunctionName(const WasmFunction* function, WasmName name)
+      : function_(function), name_(name) {}
+
   const WasmFunction* function_;
-  const WasmModule* module_;
-  WasmFunctionName(const WasmFunction* function, const ModuleEnv* menv)
-      : function_(function), module_(menv ? menv->module : nullptr) {}
+  WasmName name_;
 };
 
 std::ostream& operator<<(std::ostream& os, const WasmModule& module);
 std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
 std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
 
-// Extract a function name from the given wasm instance.
-// Returns "<WASM UNNAMED>" if no instance is passed, the function is unnamed or
-// the name is not a valid UTF-8 string.
-// TODO(5620): Refactor once we always get a wasm instance.
-Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> instance,
-                                   uint32_t func_index);
-
-// Return the binary source bytes of a wasm module.
-Handle<SeqOneByteString> GetWasmBytes(Handle<JSObject> wasm);
-
 // Get the debug info associated with the given wasm object.
 // If no debug info exists yet, it is created automatically.
 Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
 
-// Return the number of functions in the given wasm object.
-int GetNumberOfFunctions(Handle<JSObject> wasm);
-
-// Create and export JSFunction
-Handle<JSFunction> WrapExportCodeAsJSFunction(Isolate* isolate,
-                                              Handle<Code> export_code,
-                                              Handle<String> name,
-                                              FunctionSig* sig, int func_index,
-                                              Handle<JSObject> instance);
-
 // Check whether the given object represents a WebAssembly.Instance instance.
 // This checks the number and type of internal fields, so it's not 100 percent
 // secure. If it turns out that we need more complete checks, we could add a
@@ -384,64 +358,98 @@
 // else.
 bool IsWasmInstance(Object* instance);
 
-// Return the compiled module object for this WASM instance.
-WasmCompiledModule* GetCompiledModule(Object* wasm_instance);
-
-// Check whether the wasm module was generated from asm.js code.
-bool WasmIsAsmJs(Object* instance, Isolate* isolate);
-
 // Get the script of the wasm module. If the origin of the module is asm.js, the
 // returned Script will be a JavaScript Script of Script::TYPE_NORMAL, otherwise
 // it's of type TYPE_WASM.
 Handle<Script> GetScript(Handle<JSObject> instance);
 
-// Get the asm.js source position for the given byte offset in the given
-// function.
-int GetAsmWasmSourcePosition(Handle<JSObject> instance, int func_index,
-                             int byte_offset);
-
 V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes(
     Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
     ModuleOrigin origin, Handle<Script> asm_js_script,
-    const byte* asm_offset_tables_start, const byte* asm_offset_tables_end);
+    Vector<const byte> asm_offset_table);
 
-V8_EXPORT_PRIVATE bool ValidateModuleBytes(Isolate* isolate, const byte* start,
-                                           const byte* end,
-                                           ErrorThrower* thrower,
-                                           ModuleOrigin origin);
+V8_EXPORT_PRIVATE bool IsWasmCodegenAllowed(Isolate* isolate,
+                                            Handle<Context> context);
+
+V8_EXPORT_PRIVATE Handle<JSArray> GetImports(Isolate* isolate,
+                                             Handle<WasmModuleObject> module);
+V8_EXPORT_PRIVATE Handle<JSArray> GetExports(Isolate* isolate,
+                                             Handle<WasmModuleObject> module);
+V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
+    Isolate* isolate, Handle<WasmModuleObject> module, Handle<String> name,
+    ErrorThrower* thrower);
 
 // Get the offset of the code of a function within a module.
 int GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
                           int func_index);
 
-// Translate from byte offset in the module to function number and byte offset
-// within that function, encoded as line and column in the position info.
-bool GetPositionInfo(Handle<WasmCompiledModule> compiled_module,
-                     uint32_t position, Script::PositionInfo* info);
-
 // Assumed to be called with a code object associated to a wasm module instance.
 // Intended to be called from runtime functions.
 // Returns nullptr on failing to get owning instance.
-Object* GetOwningWasmInstance(Code* code);
+WasmInstanceObject* GetOwningWasmInstance(Code* code);
 
-MaybeHandle<JSArrayBuffer> GetInstanceMemory(Isolate* isolate,
-                                             Handle<JSObject> instance);
+MaybeHandle<JSArrayBuffer> GetInstanceMemory(
+    Isolate* isolate, Handle<WasmInstanceObject> instance);
 
-int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance);
+int32_t GetInstanceMemorySize(Isolate* isolate,
+                              Handle<WasmInstanceObject> instance);
 
-int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
-                           uint32_t pages);
+int32_t GrowInstanceMemory(Isolate* isolate,
+                           Handle<WasmInstanceObject> instance, uint32_t pages);
+
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
+                                     bool enable_guard_regions);
+
+int32_t GrowWebAssemblyMemory(Isolate* isolate,
+                              Handle<WasmMemoryObject> receiver,
+                              uint32_t pages);
+
+int32_t GrowMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
+                   uint32_t pages);
 
 void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
                           int index, Handle<JSFunction> js_function);
 
+void GrowDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
+                        uint32_t old_size, uint32_t count);
+
+//============================================================================
+//== Compilation and instantiation ===========================================
+//============================================================================
+V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate, ErrorThrower* thrower,
+                                    const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
+    Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes,
+    Handle<Script> asm_js_script, Vector<const byte> asm_js_offset_table_bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompile(
+    Isolate* isolate, ErrorThrower* thrower, const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE MaybeHandle<WasmInstanceObject> SyncInstantiate(
+    Isolate* isolate, ErrorThrower* thrower,
+    Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
+    MaybeHandle<JSArrayBuffer> memory);
+
+V8_EXPORT_PRIVATE void AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
+                                    const ModuleWireBytes& bytes);
+
+V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
+                                        Handle<JSPromise> promise,
+                                        Handle<WasmModuleObject> module_object,
+                                        MaybeHandle<JSReceiver> imports);
+
+V8_EXPORT_PRIVATE void AsyncCompileAndInstantiate(
+    Isolate* isolate, Handle<JSPromise> promise, const ModuleWireBytes& bytes,
+    MaybeHandle<JSReceiver> imports);
+
 namespace testing {
-
-void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> wasm_module,
+void ValidateInstancesChain(Isolate* isolate,
+                            Handle<WasmModuleObject> module_obj,
                             int instance_count);
-void ValidateModuleState(Isolate* isolate, Handle<JSObject> wasm_module);
-void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance);
-
+void ValidateModuleState(Isolate* isolate, Handle<WasmModuleObject> module_obj);
+void ValidateOrphanedInstance(Isolate* isolate,
+                              Handle<WasmInstanceObject> instance);
 }  // namespace testing
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/wasm-objects.cc b/src/wasm/wasm-objects.cc
index 68f66d2..c9b552f 100644
--- a/src/wasm/wasm-objects.cc
+++ b/src/wasm/wasm-objects.cc
@@ -3,7 +3,14 @@
 // found in the LICENSE file.
 
 #include "src/wasm/wasm-objects.h"
+#include "src/utils.h"
+
+#include "src/base/iterator.h"
+#include "src/debug/debug-interface.h"
+#include "src/objects-inl.h"
+#include "src/wasm/module-decoder.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-text.h"
 
 #define TRACE(...)                                      \
   do {                                                  \
@@ -18,29 +25,49 @@
 using namespace v8::internal;
 using namespace v8::internal::wasm;
 
-#define DEFINE_ACCESSORS(Container, name, field, type) \
-  type* Container::get_##name() {                      \
-    return type::cast(GetInternalField(field));        \
-  }                                                    \
-  void Container::set_##name(type* value) {            \
-    return SetInternalField(field, value);             \
+#define DEFINE_GETTER0(getter, Container, name, field, type) \
+  type* Container::name() { return type::cast(getter(field)); }
+
+#define DEFINE_ACCESSORS0(getter, setter, Container, name, field, type) \
+  DEFINE_GETTER0(getter, Container, name, field, type)                  \
+  void Container::set_##name(type* value) { return setter(field, value); }
+
+#define DEFINE_OPTIONAL_ACCESSORS0(getter, setter, Container, name, field, \
+                                   type)                                   \
+  DEFINE_ACCESSORS0(getter, setter, Container, name, field, type)          \
+  bool Container::has_##name() {                                           \
+    return !getter(field)->IsUndefined(GetIsolate());                      \
   }
 
-#define DEFINE_OPTIONAL_ACCESSORS(Container, name, field, type) \
-  bool Container::has_##name() {                                \
-    return !GetInternalField(field)->IsUndefined(GetIsolate()); \
-  }                                                             \
-  type* Container::get_##name() {                               \
-    return type::cast(GetInternalField(field));                 \
-  }                                                             \
-  void Container::set_##name(type* value) {                     \
-    return SetInternalField(field, value);                      \
+#define DEFINE_OPTIONAL_GETTER0(getter, Container, name, field, type) \
+  DEFINE_GETTER0(getter, Container, name, field, type)                \
+  bool Container::has_##name() {                                      \
+    return !getter(field)->IsUndefined(GetIsolate());                 \
   }
 
-#define DEFINE_GETTER(Container, name, field, type) \
-  type* Container::get_##name() { return type::cast(GetInternalField(field)); }
+#define DEFINE_GETTER0(getter, Container, name, field, type) \
+  type* Container::name() { return type::cast(getter(field)); }
 
-static uint32_t SafeUint32(Object* value) {
+#define DEFINE_OBJ_GETTER(Container, name, field, type) \
+  DEFINE_GETTER0(GetInternalField, Container, name, field, type)
+#define DEFINE_OBJ_ACCESSORS(Container, name, field, type)               \
+  DEFINE_ACCESSORS0(GetInternalField, SetInternalField, Container, name, \
+                    field, type)
+#define DEFINE_OPTIONAL_OBJ_ACCESSORS(Container, name, field, type)         \
+  DEFINE_OPTIONAL_ACCESSORS0(GetInternalField, SetInternalField, Container, \
+                             name, field, type)
+#define DEFINE_ARR_GETTER(Container, name, field, type) \
+  DEFINE_GETTER0(get, Container, name, field, type)
+#define DEFINE_ARR_ACCESSORS(Container, name, field, type) \
+  DEFINE_ACCESSORS0(get, set, Container, name, field, type)
+#define DEFINE_OPTIONAL_ARR_ACCESSORS(Container, name, field, type) \
+  DEFINE_OPTIONAL_ACCESSORS0(get, set, Container, name, field, type)
+#define DEFINE_OPTIONAL_ARR_GETTER(Container, name, field, type) \
+  DEFINE_OPTIONAL_GETTER0(get, Container, name, field, type)
+
+namespace {
+
+uint32_t SafeUint32(Object* value) {
   if (value->IsSmi()) {
     int32_t val = Smi::cast(value)->value();
     CHECK_GE(val, 0);
@@ -49,21 +76,146 @@
   DCHECK(value->IsHeapNumber());
   HeapNumber* num = HeapNumber::cast(value);
   CHECK_GE(num->value(), 0.0);
-  CHECK_LE(num->value(), static_cast<double>(kMaxUInt32));
+  CHECK_LE(num->value(), kMaxUInt32);
   return static_cast<uint32_t>(num->value());
 }
 
-static int32_t SafeInt32(Object* value) {
+int32_t SafeInt32(Object* value) {
   if (value->IsSmi()) {
     return Smi::cast(value)->value();
   }
   DCHECK(value->IsHeapNumber());
   HeapNumber* num = HeapNumber::cast(value);
-  CHECK_GE(num->value(), static_cast<double>(Smi::kMinValue));
-  CHECK_LE(num->value(), static_cast<double>(Smi::kMaxValue));
+  CHECK_GE(num->value(), Smi::kMinValue);
+  CHECK_LE(num->value(), Smi::kMaxValue);
   return static_cast<int32_t>(num->value());
 }
 
+// An iterator that returns first the module itself, then all modules linked via
+// next, then all linked via prev.
+class CompiledModulesIterator
+    : public std::iterator<std::input_iterator_tag,
+                           Handle<WasmCompiledModule>> {
+ public:
+  CompiledModulesIterator(Isolate* isolate,
+                          Handle<WasmCompiledModule> start_module, bool at_end)
+      : isolate_(isolate),
+        start_module_(start_module),
+        current_(at_end ? Handle<WasmCompiledModule>::null() : start_module) {}
+
+  Handle<WasmCompiledModule> operator*() const {
+    DCHECK(!current_.is_null());
+    return current_;
+  }
+
+  void operator++() { Advance(); }
+
+  bool operator!=(const CompiledModulesIterator& other) {
+    DCHECK(start_module_.is_identical_to(other.start_module_));
+    return !current_.is_identical_to(other.current_);
+  }
+
+ private:
+  void Advance() {
+    DCHECK(!current_.is_null());
+    if (!is_backwards_) {
+      if (current_->has_weak_next_instance()) {
+        WeakCell* weak_next = current_->ptr_to_weak_next_instance();
+        if (!weak_next->cleared()) {
+          current_ =
+              handle(WasmCompiledModule::cast(weak_next->value()), isolate_);
+          return;
+        }
+      }
+      // No more modules in next-links, now try the previous-links.
+      is_backwards_ = true;
+      current_ = start_module_;
+    }
+    if (current_->has_weak_prev_instance()) {
+      WeakCell* weak_prev = current_->ptr_to_weak_prev_instance();
+      if (!weak_prev->cleared()) {
+        current_ =
+            handle(WasmCompiledModule::cast(weak_prev->value()), isolate_);
+        return;
+      }
+    }
+    current_ = Handle<WasmCompiledModule>::null();
+  }
+
+  friend class CompiledModuleInstancesIterator;
+  Isolate* isolate_;
+  Handle<WasmCompiledModule> start_module_;
+  Handle<WasmCompiledModule> current_;
+  bool is_backwards_ = false;
+};
+
+// An iterator based on the CompiledModulesIterator, but it returns all live
+// instances, not the WasmCompiledModules itself.
+class CompiledModuleInstancesIterator
+    : public std::iterator<std::input_iterator_tag,
+                           Handle<WasmInstanceObject>> {
+ public:
+  CompiledModuleInstancesIterator(Isolate* isolate,
+                                  Handle<WasmCompiledModule> start_module,
+                                  bool at_end)
+      : it(isolate, start_module, at_end) {
+    while (NeedToAdvance()) ++it;
+  }
+
+  Handle<WasmInstanceObject> operator*() {
+    return handle(
+        WasmInstanceObject::cast((*it)->weak_owning_instance()->value()),
+        it.isolate_);
+  }
+
+  void operator++() {
+    do {
+      ++it;
+    } while (NeedToAdvance());
+  }
+
+  bool operator!=(const CompiledModuleInstancesIterator& other) {
+    return it != other.it;
+  }
+
+ private:
+  bool NeedToAdvance() {
+    return !it.current_.is_null() &&
+           (!it.current_->has_weak_owning_instance() ||
+            it.current_->ptr_to_weak_owning_instance()->cleared());
+  }
+  CompiledModulesIterator it;
+};
+
+v8::base::iterator_range<CompiledModuleInstancesIterator>
+iterate_compiled_module_instance_chain(
+    Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+  return {CompiledModuleInstancesIterator(isolate, compiled_module, false),
+          CompiledModuleInstancesIterator(isolate, compiled_module, true)};
+}
+
+#ifdef DEBUG
+bool IsBreakablePosition(Handle<WasmCompiledModule> compiled_module,
+                         int func_index, int offset_in_func) {
+  DisallowHeapAllocation no_gc;
+  AccountingAllocator alloc;
+  Zone tmp(&alloc, ZONE_NAME);
+  BodyLocalDecls locals(&tmp);
+  const byte* module_start = compiled_module->module_bytes()->GetChars();
+  WasmFunction& func = compiled_module->module()->functions[func_index];
+  BytecodeIterator iterator(module_start + func.code_start_offset,
+                            module_start + func.code_end_offset, &locals);
+  DCHECK_LT(0, locals.encoded_size);
+  for (uint32_t offset : iterator.offsets()) {
+    if (offset > static_cast<uint32_t>(offset_in_func)) break;
+    if (offset == static_cast<uint32_t>(offset_in_func)) return true;
+  }
+  return false;
+}
+#endif  // DEBUG
+
+}  // namespace
+
 Handle<WasmModuleObject> WasmModuleObject::New(
     Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
   ModuleOrigin origin = compiled_module->module()->origin;
@@ -97,8 +249,16 @@
   return reinterpret_cast<WasmModuleObject*>(object);
 }
 
+bool WasmModuleObject::IsWasmModuleObject(Object* object) {
+  return object->IsJSObject() &&
+         JSObject::cast(object)->GetInternalFieldCount() == kFieldCount;
+}
+
+DEFINE_OBJ_GETTER(WasmModuleObject, compiled_module, kCompiledModule,
+                  WasmCompiledModule)
+
 Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
-                                             uint32_t maximum,
+                                             int64_t maximum,
                                              Handle<FixedArray>* js_functions) {
   Handle<JSFunction> table_ctor(
       isolate->native_context()->wasm_table_constructor());
@@ -109,8 +269,8 @@
     (*js_functions)->set(i, null);
   }
   table_obj->SetInternalField(kFunctions, *(*js_functions));
-  table_obj->SetInternalField(kMaximum,
-                              static_cast<Object*>(Smi::FromInt(maximum)));
+  Handle<Object> max = isolate->factory()->NewNumber(maximum);
+  table_obj->SetInternalField(kMaximum, *max);
 
   Handle<FixedArray> dispatch_tables = isolate->factory()->NewFixedArray(0);
   table_obj->SetInternalField(kDispatchTables, *dispatch_tables);
@@ -119,27 +279,28 @@
   return Handle<WasmTableObject>::cast(table_obj);
 }
 
-DEFINE_GETTER(WasmTableObject, dispatch_tables, kDispatchTables, FixedArray)
+DEFINE_OBJ_GETTER(WasmTableObject, dispatch_tables, kDispatchTables, FixedArray)
 
 Handle<FixedArray> WasmTableObject::AddDispatchTable(
     Isolate* isolate, Handle<WasmTableObject> table_obj,
     Handle<WasmInstanceObject> instance, int table_index,
-    Handle<FixedArray> dispatch_table) {
+    Handle<FixedArray> function_table, Handle<FixedArray> signature_table) {
   Handle<FixedArray> dispatch_tables(
       FixedArray::cast(table_obj->GetInternalField(kDispatchTables)), isolate);
-  DCHECK_EQ(0, dispatch_tables->length() % 3);
+  DCHECK_EQ(0, dispatch_tables->length() % 4);
 
   if (instance.is_null()) return dispatch_tables;
   // TODO(titzer): use weak cells here to avoid leaking instances.
 
   // Grow the dispatch table and add a new triple at the end.
   Handle<FixedArray> new_dispatch_tables =
-      isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables, 3);
+      isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables, 4);
 
   new_dispatch_tables->set(dispatch_tables->length() + 0, *instance);
   new_dispatch_tables->set(dispatch_tables->length() + 1,
                            Smi::FromInt(table_index));
-  new_dispatch_tables->set(dispatch_tables->length() + 2, *dispatch_table);
+  new_dispatch_tables->set(dispatch_tables->length() + 2, *function_table);
+  new_dispatch_tables->set(dispatch_tables->length() + 3, *signature_table);
 
   table_obj->SetInternalField(WasmTableObject::kDispatchTables,
                               *new_dispatch_tables);
@@ -147,12 +308,16 @@
   return new_dispatch_tables;
 }
 
-DEFINE_ACCESSORS(WasmTableObject, functions, kFunctions, FixedArray)
+DEFINE_OBJ_ACCESSORS(WasmTableObject, functions, kFunctions, FixedArray)
 
-uint32_t WasmTableObject::current_length() { return get_functions()->length(); }
+uint32_t WasmTableObject::current_length() { return functions()->length(); }
 
-uint32_t WasmTableObject::maximum_length() {
-  return SafeUint32(GetInternalField(kMaximum));
+bool WasmTableObject::has_maximum_length() {
+  return GetInternalField(kMaximum)->Number() >= 0;
+}
+
+int64_t WasmTableObject::maximum_length() {
+  return static_cast<int64_t>(GetInternalField(kMaximum)->Number());
 }
 
 WasmTableObject* WasmTableObject::cast(Object* object) {
@@ -161,28 +326,42 @@
   return reinterpret_cast<WasmTableObject*>(object);
 }
 
+void WasmTableObject::Grow(Isolate* isolate, Handle<WasmTableObject> table,
+                           uint32_t count) {
+  Handle<FixedArray> dispatch_tables(table->dispatch_tables());
+  wasm::GrowDispatchTables(isolate, dispatch_tables,
+                           table->functions()->length(), count);
+}
+
 Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
                                                Handle<JSArrayBuffer> buffer,
-                                               int maximum) {
+                                               int32_t maximum) {
   Handle<JSFunction> memory_ctor(
       isolate->native_context()->wasm_memory_constructor());
-  Handle<JSObject> memory_obj = isolate->factory()->NewJSObject(memory_ctor);
+  Handle<JSObject> memory_obj =
+      isolate->factory()->NewJSObject(memory_ctor, TENURED);
   memory_obj->SetInternalField(kArrayBuffer, *buffer);
-  memory_obj->SetInternalField(kMaximum,
-                               static_cast<Object*>(Smi::FromInt(maximum)));
+  Handle<Object> max = isolate->factory()->NewNumber(maximum);
+  memory_obj->SetInternalField(kMaximum, *max);
   Handle<Symbol> memory_sym(isolate->native_context()->wasm_memory_sym());
   Object::SetProperty(memory_obj, memory_sym, memory_obj, STRICT).Check();
   return Handle<WasmMemoryObject>::cast(memory_obj);
 }
 
-DEFINE_ACCESSORS(WasmMemoryObject, buffer, kArrayBuffer, JSArrayBuffer)
+DEFINE_OBJ_ACCESSORS(WasmMemoryObject, buffer, kArrayBuffer, JSArrayBuffer)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmMemoryObject, instances_link, kInstancesLink,
+                              WasmInstanceWrapper)
 
 uint32_t WasmMemoryObject::current_pages() {
-  return SafeUint32(get_buffer()->byte_length()) / wasm::WasmModule::kPageSize;
+  return SafeUint32(buffer()->byte_length()) / wasm::WasmModule::kPageSize;
+}
+
+bool WasmMemoryObject::has_maximum_pages() {
+  return GetInternalField(kMaximum)->Number() >= 0;
 }
 
 int32_t WasmMemoryObject::maximum_pages() {
-  return SafeInt32(GetInternalField(kMaximum));
+  return static_cast<int32_t>(GetInternalField(kMaximum)->Number());
 }
 
 WasmMemoryObject* WasmMemoryObject::cast(Object* object) {
@@ -191,31 +370,50 @@
   return reinterpret_cast<WasmMemoryObject*>(object);
 }
 
-void WasmMemoryObject::AddInstance(WasmInstanceObject* instance) {
-  // TODO(gdeepti): This should be a weak list of instance objects
-  // for instances that share memory.
-  SetInternalField(kInstance, instance);
+void WasmMemoryObject::AddInstance(Isolate* isolate,
+                                   Handle<WasmInstanceObject> instance) {
+  Handle<WasmInstanceWrapper> instance_wrapper =
+      handle(instance->instance_wrapper());
+  if (has_instances_link()) {
+    Handle<WasmInstanceWrapper> current_wrapper(instances_link());
+    DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*current_wrapper));
+    DCHECK(!current_wrapper->has_previous());
+    instance_wrapper->set_next_wrapper(*current_wrapper);
+    current_wrapper->set_previous_wrapper(*instance_wrapper);
+  }
+  set_instances_link(*instance_wrapper);
 }
 
-DEFINE_ACCESSORS(WasmInstanceObject, compiled_module, kCompiledModule,
-                 WasmCompiledModule)
-DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, globals_buffer,
-                          kGlobalsArrayBuffer, JSArrayBuffer)
-DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, memory_buffer, kMemoryArrayBuffer,
-                          JSArrayBuffer)
-DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, kMemoryObject,
-                          WasmMemoryObject)
-DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, kDebugInfo,
-                          WasmDebugInfo)
+void WasmMemoryObject::ResetInstancesLink(Isolate* isolate) {
+  Handle<Object> undefined = isolate->factory()->undefined_value();
+  SetInternalField(kInstancesLink, *undefined);
+}
+
+DEFINE_OBJ_ACCESSORS(WasmInstanceObject, compiled_module, kCompiledModule,
+                     WasmCompiledModule)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, globals_buffer,
+                              kGlobalsArrayBuffer, JSArrayBuffer)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, memory_buffer,
+                              kMemoryArrayBuffer, JSArrayBuffer)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, memory_object, kMemoryObject,
+                              WasmMemoryObject)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, debug_info, kDebugInfo,
+                              WasmDebugInfo)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, instance_wrapper,
+                              kWasmMemInstanceWrapper, WasmInstanceWrapper)
 
 WasmModuleObject* WasmInstanceObject::module_object() {
-  return WasmModuleObject::cast(*get_compiled_module()->wasm_module());
+  return *compiled_module()->wasm_module();
 }
 
-WasmModule* WasmInstanceObject::module() {
-  return reinterpret_cast<WasmModuleWrapper*>(
-             *get_compiled_module()->module_wrapper())
-      ->get();
+WasmModule* WasmInstanceObject::module() { return compiled_module()->module(); }
+
+Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
+    Handle<WasmInstanceObject> instance) {
+  if (instance->has_debug_info()) return handle(instance->debug_info());
+  Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
+  instance->set_debug_info(*new_info);
+  return new_info;
 }
 
 WasmInstanceObject* WasmInstanceObject::cast(Object* object) {
@@ -224,7 +422,6 @@
 }
 
 bool WasmInstanceObject::IsWasmInstanceObject(Object* object) {
-  if (!object->IsObject()) return false;
   if (!object->IsJSObject()) return false;
 
   JSObject* obj = JSObject::cast(object);
@@ -246,15 +443,21 @@
 
 Handle<WasmInstanceObject> WasmInstanceObject::New(
     Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
-  Handle<Map> map = isolate->factory()->NewMap(
-      JS_OBJECT_TYPE, JSObject::kHeaderSize + kFieldCount * kPointerSize);
+  Handle<JSFunction> instance_cons(
+      isolate->native_context()->wasm_instance_constructor());
+  Handle<JSObject> instance_object =
+      isolate->factory()->NewJSObject(instance_cons, TENURED);
+  Handle<Symbol> instance_sym(isolate->native_context()->wasm_instance_sym());
+  Object::SetProperty(instance_object, instance_sym, instance_object, STRICT)
+      .Check();
   Handle<WasmInstanceObject> instance(
-      reinterpret_cast<WasmInstanceObject*>(
-          *isolate->factory()->NewJSObjectFromMap(map, TENURED)),
-      isolate);
+      reinterpret_cast<WasmInstanceObject*>(*instance_object), isolate);
 
   instance->SetInternalField(kCompiledModule, *compiled_module);
   instance->SetInternalField(kMemoryObject, isolate->heap()->undefined_value());
+  Handle<WasmInstanceWrapper> instance_wrapper =
+      WasmInstanceWrapper::New(isolate, instance);
+  instance->SetInternalField(kWasmMemInstanceWrapper, *instance_wrapper);
   return instance;
 }
 
@@ -275,8 +478,20 @@
 }
 
 Handle<WasmExportedFunction> WasmExportedFunction::New(
-    Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<String> name,
-    Handle<Code> export_wrapper, int arity, int func_index) {
+    Isolate* isolate, Handle<WasmInstanceObject> instance,
+    MaybeHandle<String> maybe_name, int func_index, int arity,
+    Handle<Code> export_wrapper) {
+  Handle<String> name;
+  if (maybe_name.is_null()) {
+    EmbeddedVector<char, 16> buffer;
+    int length = SNPrintF(buffer, "%d", func_index);
+    name = isolate->factory()
+               ->NewStringFromAscii(
+                   Vector<const char>::cast(buffer.SubVector(0, length)))
+               .ToHandleChecked();
+  } else {
+    name = maybe_name.ToHandleChecked();
+  }
   DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
   Handle<SharedFunctionInfo> shared =
       isolate->factory()->NewSharedFunctionInfo(name, export_wrapper, false);
@@ -291,22 +506,250 @@
   return Handle<WasmExportedFunction>::cast(function);
 }
 
+bool WasmSharedModuleData::IsWasmSharedModuleData(Object* object) {
+  if (!object->IsFixedArray()) return false;
+  FixedArray* arr = FixedArray::cast(object);
+  if (arr->length() != kFieldCount) return false;
+  Isolate* isolate = arr->GetIsolate();
+  if (!arr->get(kModuleWrapper)->IsForeign()) return false;
+  if (!arr->get(kModuleBytes)->IsUndefined(isolate) &&
+      !arr->get(kModuleBytes)->IsSeqOneByteString())
+    return false;
+  if (!arr->get(kScript)->IsScript()) return false;
+  if (!arr->get(kAsmJsOffsetTable)->IsUndefined(isolate) &&
+      !arr->get(kAsmJsOffsetTable)->IsByteArray())
+    return false;
+  if (!arr->get(kBreakPointInfos)->IsUndefined(isolate) &&
+      !arr->get(kBreakPointInfos)->IsFixedArray())
+    return false;
+  return true;
+}
+
+WasmSharedModuleData* WasmSharedModuleData::cast(Object* object) {
+  DCHECK(IsWasmSharedModuleData(object));
+  return reinterpret_cast<WasmSharedModuleData*>(object);
+}
+
+wasm::WasmModule* WasmSharedModuleData::module() {
+  // We populate the kModuleWrapper field with a Foreign holding the
+  // address to the address of a WasmModule. This is because we can
+  // handle both cases when the WasmModule's lifetime is managed through
+  // a Managed<WasmModule> object, as well as cases when it's managed
+  // by the embedder. CcTests fall into the latter case.
+  return *(reinterpret_cast<wasm::WasmModule**>(
+      Foreign::cast(get(kModuleWrapper))->foreign_address()));
+}
+
+DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, module_bytes, kModuleBytes,
+                              SeqOneByteString);
+DEFINE_ARR_GETTER(WasmSharedModuleData, script, kScript, Script);
+DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, asm_js_offset_table,
+                              kAsmJsOffsetTable, ByteArray);
+DEFINE_OPTIONAL_ARR_GETTER(WasmSharedModuleData, breakpoint_infos,
+                           kBreakPointInfos, FixedArray);
+
+Handle<WasmSharedModuleData> WasmSharedModuleData::New(
+    Isolate* isolate, Handle<Foreign> module_wrapper,
+    Handle<SeqOneByteString> module_bytes, Handle<Script> script,
+    Handle<ByteArray> asm_js_offset_table) {
+  Handle<FixedArray> arr =
+      isolate->factory()->NewFixedArray(kFieldCount, TENURED);
+
+  arr->set(kModuleWrapper, *module_wrapper);
+  if (!module_bytes.is_null()) {
+    arr->set(kModuleBytes, *module_bytes);
+  }
+  if (!script.is_null()) {
+    arr->set(kScript, *script);
+  }
+  if (!asm_js_offset_table.is_null()) {
+    arr->set(kAsmJsOffsetTable, *asm_js_offset_table);
+  }
+
+  DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*arr));
+  return Handle<WasmSharedModuleData>::cast(arr);
+}
+
+bool WasmSharedModuleData::is_asm_js() {
+  bool asm_js = module()->origin == wasm::ModuleOrigin::kAsmJsOrigin;
+  DCHECK_EQ(asm_js, script()->type() == Script::TYPE_NORMAL);
+  DCHECK_EQ(asm_js, has_asm_js_offset_table());
+  return asm_js;
+}
+
+void WasmSharedModuleData::ReinitializeAfterDeserialization(
+    Isolate* isolate, Handle<WasmSharedModuleData> shared) {
+  DCHECK(shared->get(kModuleWrapper)->IsUndefined(isolate));
+#ifdef DEBUG
+  // No BreakpointInfo objects should survive deserialization.
+  if (shared->has_breakpoint_infos()) {
+    for (int i = 0, e = shared->breakpoint_infos()->length(); i < e; ++i) {
+      DCHECK(shared->breakpoint_infos()->get(i)->IsUndefined(isolate));
+    }
+  }
+#endif
+
+  shared->set(kBreakPointInfos, isolate->heap()->undefined_value());
+
+  WasmModule* module = nullptr;
+  {
+    // We parse the module again directly from the module bytes, so
+    // the underlying storage must not be moved meanwhile.
+    DisallowHeapAllocation no_allocation;
+    SeqOneByteString* module_bytes = shared->module_bytes();
+    const byte* start =
+        reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
+    const byte* end = start + module_bytes->length();
+    // TODO(titzer): remember the module origin in the compiled_module
+    // For now, we assume serialized modules did not originate from asm.js.
+    ModuleResult result =
+        DecodeWasmModule(isolate, start, end, false, kWasmOrigin);
+    CHECK(result.ok());
+    CHECK_NOT_NULL(result.val);
+    module = const_cast<WasmModule*>(result.val);
+  }
+
+  Handle<WasmModuleWrapper> module_wrapper =
+      WasmModuleWrapper::New(isolate, module);
+
+  shared->set(kModuleWrapper, *module_wrapper);
+  DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
+}
+
+namespace {
+
+int GetBreakpointPos(Isolate* isolate, Object* break_point_info_or_undef) {
+  if (break_point_info_or_undef->IsUndefined(isolate)) return kMaxInt;
+  return BreakPointInfo::cast(break_point_info_or_undef)->source_position();
+}
+
+int FindBreakpointInfoInsertPos(Isolate* isolate,
+                                Handle<FixedArray> breakpoint_infos,
+                                int position) {
+  // Find insert location via binary search, taking care of undefined values on
+  // the right. Position is always greater than zero.
+  DCHECK_LT(0, position);
+
+  int left = 0;                            // inclusive
+  int right = breakpoint_infos->length();  // exclusive
+  while (right - left > 1) {
+    int mid = left + (right - left) / 2;
+    Object* mid_obj = breakpoint_infos->get(mid);
+    if (GetBreakpointPos(isolate, mid_obj) <= position) {
+      left = mid;
+    } else {
+      right = mid;
+    }
+  }
+
+  int left_pos = GetBreakpointPos(isolate, breakpoint_infos->get(left));
+  return left_pos < position ? left + 1 : left;
+}
+
+}  // namespace
+
+void WasmSharedModuleData::AddBreakpoint(Handle<WasmSharedModuleData> shared,
+                                         int position,
+                                         Handle<Object> break_point_object) {
+  Isolate* isolate = shared->GetIsolate();
+  Handle<FixedArray> breakpoint_infos;
+  if (shared->has_breakpoint_infos()) {
+    breakpoint_infos = handle(shared->breakpoint_infos(), isolate);
+  } else {
+    breakpoint_infos = isolate->factory()->NewFixedArray(4, TENURED);
+    shared->set(kBreakPointInfos, *breakpoint_infos);
+  }
+
+  int insert_pos =
+      FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+
+  // If a BreakPointInfo object already exists for this position, add the new
+  // breakpoint object and return.
+  if (insert_pos < breakpoint_infos->length() &&
+      GetBreakpointPos(isolate, breakpoint_infos->get(insert_pos)) ==
+          position) {
+    Handle<BreakPointInfo> old_info(
+        BreakPointInfo::cast(breakpoint_infos->get(insert_pos)), isolate);
+    BreakPointInfo::SetBreakPoint(old_info, break_point_object);
+    return;
+  }
+
+  // Enlarge break positions array if necessary.
+  bool need_realloc = !breakpoint_infos->get(breakpoint_infos->length() - 1)
+                           ->IsUndefined(isolate);
+  Handle<FixedArray> new_breakpoint_infos = breakpoint_infos;
+  if (need_realloc) {
+    new_breakpoint_infos = isolate->factory()->NewFixedArray(
+        2 * breakpoint_infos->length(), TENURED);
+    shared->set(kBreakPointInfos, *new_breakpoint_infos);
+    // Copy over the entries [0, insert_pos).
+    for (int i = 0; i < insert_pos; ++i)
+      new_breakpoint_infos->set(i, breakpoint_infos->get(i));
+  }
+
+  // Move elements [insert_pos+1, ...] up by one.
+  for (int i = insert_pos + 1; i < breakpoint_infos->length(); ++i) {
+    Object* entry = breakpoint_infos->get(i);
+    if (entry->IsUndefined(isolate)) break;
+    new_breakpoint_infos->set(i + 1, entry);
+  }
+
+  // Generate new BreakpointInfo.
+  Handle<BreakPointInfo> breakpoint_info =
+      isolate->factory()->NewBreakPointInfo(position);
+  BreakPointInfo::SetBreakPoint(breakpoint_info, break_point_object);
+
+  // Now insert new position at insert_pos.
+  new_breakpoint_infos->set(insert_pos, *breakpoint_info);
+}
+
+void WasmSharedModuleData::SetBreakpointsOnNewInstance(
+    Handle<WasmSharedModuleData> shared, Handle<WasmInstanceObject> instance) {
+  if (!shared->has_breakpoint_infos()) return;
+  Isolate* isolate = shared->GetIsolate();
+  Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
+                                             isolate);
+  Handle<WasmDebugInfo> debug_info =
+      WasmInstanceObject::GetOrCreateDebugInfo(instance);
+
+  Handle<FixedArray> breakpoint_infos(shared->breakpoint_infos(), isolate);
+  // If the array exists, it should not be empty.
+  DCHECK_LT(0, breakpoint_infos->length());
+
+  for (int i = 0, e = breakpoint_infos->length(); i < e; ++i) {
+    Handle<Object> obj(breakpoint_infos->get(i), isolate);
+    if (obj->IsUndefined(isolate)) {
+      for (; i < e; ++i) {
+        DCHECK(breakpoint_infos->get(i)->IsUndefined(isolate));
+      }
+      break;
+    }
+    Handle<BreakPointInfo> breakpoint_info = Handle<BreakPointInfo>::cast(obj);
+    int position = breakpoint_info->source_position();
+
+    // Find the function for this breakpoint, and set the breakpoint.
+    int func_index = compiled_module->GetContainingFunction(position);
+    DCHECK_LE(0, func_index);
+    WasmFunction& func = compiled_module->module()->functions[func_index];
+    int offset_in_func = position - func.code_start_offset;
+    WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
+  }
+}
+
 Handle<WasmCompiledModule> WasmCompiledModule::New(
-    Isolate* isolate, Handle<WasmModuleWrapper> module_wrapper) {
+    Isolate* isolate, Handle<WasmSharedModuleData> shared) {
   Handle<FixedArray> ret =
       isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
-  // WasmCompiledModule::cast would fail since module bytes are not set yet.
+  // WasmCompiledModule::cast would fail since fields are not set yet.
   Handle<WasmCompiledModule> compiled_module(
       reinterpret_cast<WasmCompiledModule*>(*ret), isolate);
   compiled_module->InitId();
-  compiled_module->set_module_wrapper(module_wrapper);
+  compiled_module->set_num_imported_functions(0);
+  compiled_module->set_shared(shared);
+  compiled_module->set_native_context(isolate->native_context());
   return compiled_module;
 }
 
-wasm::WasmModule* WasmCompiledModule::module() const {
-  return reinterpret_cast<WasmModuleWrapper*>(*module_wrapper())->get();
-}
-
 void WasmCompiledModule::InitId() {
 #if DEBUG
   static uint32_t instance_id_counter = 0;
@@ -315,19 +758,39 @@
 #endif
 }
 
+MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+    Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+    uint32_t offset, uint32_t size) {
+  // TODO(wasm): cache strings from modules if it's a performance win.
+  Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
+                                        isolate);
+  DCHECK_GE(module_bytes->length(), offset);
+  DCHECK_GE(module_bytes->length() - offset, size);
+  Address raw = module_bytes->GetCharsAddress() + offset;
+  if (!unibrow::Utf8::Validate(reinterpret_cast<const byte*>(raw), size))
+    return {};  // UTF8 decoding error for name.
+  DCHECK_GE(kMaxInt, offset);
+  DCHECK_GE(kMaxInt, size);
+  return isolate->factory()->NewStringFromUtf8SubString(
+      module_bytes, static_cast<int>(offset), static_cast<int>(size));
+}
+
 bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
   if (!obj->IsFixedArray()) return false;
   FixedArray* arr = FixedArray::cast(obj);
   if (arr->length() != PropertyIndices::Count) return false;
   Isolate* isolate = arr->GetIsolate();
-#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) \
-  if (!arr->get(kID_##NAME)->IsSmi()) return false;
-#define WCM_CHECK_OBJECT_OR_WEAK(TYPE, NAME)         \
-  if (!arr->get(kID_##NAME)->IsUndefined(isolate) && \
-      !arr->get(kID_##NAME)->Is##TYPE())             \
-    return false;
-#define WCM_CHECK_OBJECT(TYPE, NAME) WCM_CHECK_OBJECT_OR_WEAK(TYPE, NAME)
-#define WCM_CHECK_WEAK_LINK(TYPE, NAME) WCM_CHECK_OBJECT_OR_WEAK(WeakCell, NAME)
+#define WCM_CHECK_TYPE(NAME, TYPE_CHECK) \
+  do {                                   \
+    Object* obj = arr->get(kID_##NAME);  \
+    if (!(TYPE_CHECK)) return false;     \
+  } while (false);
+#define WCM_CHECK_OBJECT(TYPE, NAME) \
+  WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->Is##TYPE())
+#define WCM_CHECK_WASM_OBJECT(TYPE, NAME) \
+  WCM_CHECK_TYPE(NAME, TYPE::Is##TYPE(obj))
+#define WCM_CHECK_WEAK_LINK(TYPE, NAME) WCM_CHECK_OBJECT(WeakCell, NAME)
+#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) WCM_CHECK_TYPE(NAME, obj->IsSmi())
 #define WCM_CHECK(KIND, TYPE, NAME) WCM_CHECK_##KIND(TYPE, NAME)
   WCM_PROPERTY_TABLE(WCM_CHECK)
 #undef WCM_CHECK
@@ -341,7 +804,7 @@
   if (!FLAG_trace_wasm_instances) return;
   for (WasmCompiledModule* current = this; current != nullptr;) {
     PrintF("->%d", current->instance_id());
-    if (current->ptr_to_weak_next_instance() == nullptr) break;
+    if (!current->has_weak_next_instance()) break;
     CHECK(!current->ptr_to_weak_next_instance()->cleared());
     current =
         WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
@@ -350,6 +813,19 @@
 #endif
 }
 
+void WasmCompiledModule::ReinitializeAfterDeserialization(
+    Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+  // This method must only be called immediately after deserialization.
+  // At this point, no module wrapper exists, so the shared module data is
+  // incomplete.
+  Handle<WasmSharedModuleData> shared(
+      static_cast<WasmSharedModuleData*>(compiled_module->get(kID_shared)),
+      isolate);
+  DCHECK(!WasmSharedModuleData::IsWasmSharedModuleData(*shared));
+  WasmSharedModuleData::ReinitializeAfterDeserialization(isolate, shared);
+  DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
+}
+
 uint32_t WasmCompiledModule::mem_size() const {
   return has_memory() ? memory()->byte_length()->Number() : default_mem_size();
 }
@@ -357,3 +833,357 @@
 uint32_t WasmCompiledModule::default_mem_size() const {
   return min_mem_pages() * WasmModule::kPageSize;
 }
+
+MaybeHandle<String> WasmCompiledModule::GetFunctionNameOrNull(
+    Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+    uint32_t func_index) {
+  DCHECK_LT(func_index, compiled_module->module()->functions.size());
+  WasmFunction& function = compiled_module->module()->functions[func_index];
+  return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+      isolate, compiled_module, function.name_offset, function.name_length);
+}
+
+Handle<String> WasmCompiledModule::GetFunctionName(
+    Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+    uint32_t func_index) {
+  MaybeHandle<String> name =
+      GetFunctionNameOrNull(isolate, compiled_module, func_index);
+  if (!name.is_null()) return name.ToHandleChecked();
+  return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
+}
+
+Vector<const uint8_t> WasmCompiledModule::GetRawFunctionName(
+    uint32_t func_index) {
+  DCHECK_GT(module()->functions.size(), func_index);
+  WasmFunction& function = module()->functions[func_index];
+  SeqOneByteString* bytes = module_bytes();
+  DCHECK_GE(bytes->length(), function.name_offset);
+  DCHECK_GE(bytes->length() - function.name_offset, function.name_length);
+  return Vector<const uint8_t>(bytes->GetCharsAddress() + function.name_offset,
+                               function.name_length);
+}
+
+int WasmCompiledModule::GetFunctionOffset(uint32_t func_index) {
+  std::vector<WasmFunction>& functions = module()->functions;
+  if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
+  DCHECK_GE(kMaxInt, functions[func_index].code_start_offset);
+  return static_cast<int>(functions[func_index].code_start_offset);
+}
+
+int WasmCompiledModule::GetContainingFunction(uint32_t byte_offset) {
+  std::vector<WasmFunction>& functions = module()->functions;
+
+  // Binary search for a function containing the given position.
+  int left = 0;                                    // inclusive
+  int right = static_cast<int>(functions.size());  // exclusive
+  if (right == 0) return false;
+  while (right - left > 1) {
+    int mid = left + (right - left) / 2;
+    if (functions[mid].code_start_offset <= byte_offset) {
+      left = mid;
+    } else {
+      right = mid;
+    }
+  }
+  // If the found function does not contains the given position, return -1.
+  WasmFunction& func = functions[left];
+  if (byte_offset < func.code_start_offset ||
+      byte_offset >= func.code_end_offset) {
+    return -1;
+  }
+
+  return left;
+}
+
+bool WasmCompiledModule::GetPositionInfo(uint32_t position,
+                                         Script::PositionInfo* info) {
+  int func_index = GetContainingFunction(position);
+  if (func_index < 0) return false;
+
+  WasmFunction& function = module()->functions[func_index];
+
+  info->line = func_index;
+  info->column = position - function.code_start_offset;
+  info->line_start = function.code_start_offset;
+  info->line_end = function.code_end_offset;
+  return true;
+}
+
+namespace {
+
+enum AsmJsOffsetTableEntryLayout {
+  kOTEByteOffset,
+  kOTECallPosition,
+  kOTENumberConvPosition,
+  kOTESize
+};
+
+Handle<ByteArray> GetDecodedAsmJsOffsetTable(
+    Handle<WasmCompiledModule> compiled_module, Isolate* isolate) {
+  DCHECK(compiled_module->is_asm_js());
+  Handle<ByteArray> offset_table(
+      compiled_module->shared()->asm_js_offset_table(), isolate);
+
+  // The last byte in the asm_js_offset_tables ByteArray tells whether it is
+  // still encoded (0) or decoded (1).
+  enum AsmJsTableType : int { Encoded = 0, Decoded = 1 };
+  int table_type = offset_table->get(offset_table->length() - 1);
+  DCHECK(table_type == Encoded || table_type == Decoded);
+  if (table_type == Decoded) return offset_table;
+
+  AsmJsOffsetsResult asm_offsets;
+  {
+    DisallowHeapAllocation no_gc;
+    const byte* bytes_start = offset_table->GetDataStartAddress();
+    const byte* bytes_end = bytes_start + offset_table->length() - 1;
+    asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
+  }
+  // Wasm bytes must be valid and must contain asm.js offset table.
+  DCHECK(asm_offsets.ok());
+  DCHECK_GE(kMaxInt, asm_offsets.val.size());
+  int num_functions = static_cast<int>(asm_offsets.val.size());
+  int num_imported_functions =
+      static_cast<int>(compiled_module->module()->num_imported_functions);
+  DCHECK_EQ(compiled_module->module()->functions.size(),
+            static_cast<size_t>(num_functions) + num_imported_functions);
+  int num_entries = 0;
+  for (int func = 0; func < num_functions; ++func) {
+    size_t new_size = asm_offsets.val[func].size();
+    DCHECK_LE(new_size, static_cast<size_t>(kMaxInt) - num_entries);
+    num_entries += static_cast<int>(new_size);
+  }
+  // One byte to encode that this is a decoded table.
+  DCHECK_GE(kMaxInt,
+            1 + static_cast<uint64_t>(num_entries) * kOTESize * kIntSize);
+  int total_size = 1 + num_entries * kOTESize * kIntSize;
+  Handle<ByteArray> decoded_table =
+      isolate->factory()->NewByteArray(total_size, TENURED);
+  decoded_table->set(total_size - 1, AsmJsTableType::Decoded);
+  compiled_module->shared()->set_asm_js_offset_table(*decoded_table);
+
+  int idx = 0;
+  std::vector<WasmFunction>& wasm_funs = compiled_module->module()->functions;
+  for (int func = 0; func < num_functions; ++func) {
+    std::vector<AsmJsOffsetEntry>& func_asm_offsets = asm_offsets.val[func];
+    if (func_asm_offsets.empty()) continue;
+    int func_offset =
+        wasm_funs[num_imported_functions + func].code_start_offset;
+    for (AsmJsOffsetEntry& e : func_asm_offsets) {
+      // Byte offsets must be strictly monotonously increasing:
+      DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
+                                  decoded_table->get_int(idx - kOTESize));
+      decoded_table->set_int(idx + kOTEByteOffset, func_offset + e.byte_offset);
+      decoded_table->set_int(idx + kOTECallPosition, e.source_position_call);
+      decoded_table->set_int(idx + kOTENumberConvPosition,
+                             e.source_position_number_conversion);
+      idx += kOTESize;
+    }
+  }
+  DCHECK_EQ(total_size, idx * kIntSize + 1);
+  return decoded_table;
+}
+
+}  // namespace
+
+int WasmCompiledModule::GetAsmJsSourcePosition(
+    Handle<WasmCompiledModule> compiled_module, uint32_t func_index,
+    uint32_t byte_offset, bool is_at_number_conversion) {
+  Isolate* isolate = compiled_module->GetIsolate();
+  Handle<ByteArray> offset_table =
+      GetDecodedAsmJsOffsetTable(compiled_module, isolate);
+
+  DCHECK_LT(func_index, compiled_module->module()->functions.size());
+  uint32_t func_code_offset =
+      compiled_module->module()->functions[func_index].code_start_offset;
+  uint32_t total_offset = func_code_offset + byte_offset;
+
+  // Binary search for the total byte offset.
+  int left = 0;                                              // inclusive
+  int right = offset_table->length() / kIntSize / kOTESize;  // exclusive
+  DCHECK_LT(left, right);
+  while (right - left > 1) {
+    int mid = left + (right - left) / 2;
+    int mid_entry = offset_table->get_int(kOTESize * mid);
+    DCHECK_GE(kMaxInt, mid_entry);
+    if (static_cast<uint32_t>(mid_entry) <= total_offset) {
+      left = mid;
+    } else {
+      right = mid;
+    }
+  }
+  // There should be an entry for each position that could show up on the stack
+  // trace:
+  DCHECK_EQ(total_offset, offset_table->get_int(kOTESize * left));
+  int idx = is_at_number_conversion ? kOTENumberConvPosition : kOTECallPosition;
+  return offset_table->get_int(kOTESize * left + idx);
+}
+
+v8::debug::WasmDisassembly WasmCompiledModule::DisassembleFunction(
+    int func_index) {
+  DisallowHeapAllocation no_gc;
+
+  if (func_index < 0 ||
+      static_cast<uint32_t>(func_index) >= module()->functions.size())
+    return {};
+
+  SeqOneByteString* module_bytes_str = module_bytes();
+  Vector<const byte> module_bytes(module_bytes_str->GetChars(),
+                                  module_bytes_str->length());
+
+  std::ostringstream disassembly_os;
+  v8::debug::WasmDisassembly::OffsetTable offset_table;
+
+  PrintWasmText(module(), module_bytes, static_cast<uint32_t>(func_index),
+                disassembly_os, &offset_table);
+
+  return {disassembly_os.str(), std::move(offset_table)};
+}
+
+bool WasmCompiledModule::GetPossibleBreakpoints(
+    const v8::debug::Location& start, const v8::debug::Location& end,
+    std::vector<v8::debug::Location>* locations) {
+  DisallowHeapAllocation no_gc;
+
+  std::vector<WasmFunction>& functions = module()->functions;
+  if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
+      (!end.IsEmpty() &&
+       (end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
+    return false;
+
+  // start_func_index, start_offset and end_func_index is inclusive.
+  // end_offset is exclusive.
+  // start_offset and end_offset are module-relative byte offsets.
+  uint32_t start_func_index = start.GetLineNumber();
+  if (start_func_index >= functions.size()) return false;
+  int start_func_len = functions[start_func_index].code_end_offset -
+                       functions[start_func_index].code_start_offset;
+  if (start.GetColumnNumber() > start_func_len) return false;
+  uint32_t start_offset =
+      functions[start_func_index].code_start_offset + start.GetColumnNumber();
+  uint32_t end_func_index;
+  uint32_t end_offset;
+  if (end.IsEmpty()) {
+    // Default: everything till the end of the Script.
+    end_func_index = static_cast<uint32_t>(functions.size() - 1);
+    end_offset = functions[end_func_index].code_end_offset;
+  } else {
+    // If end is specified: Use it and check for valid input.
+    end_func_index = static_cast<uint32_t>(end.GetLineNumber());
+
+    // Special case: Stop before the start of the next function. Change to: Stop
+    // at the end of the function before, such that we don't disassemble the
+    // next function also.
+    if (end.GetColumnNumber() == 0 && end_func_index > 0) {
+      --end_func_index;
+      end_offset = functions[end_func_index].code_end_offset;
+    } else {
+      if (end_func_index >= functions.size()) return false;
+      end_offset =
+          functions[end_func_index].code_start_offset + end.GetColumnNumber();
+      if (end_offset > functions[end_func_index].code_end_offset) return false;
+    }
+  }
+
+  AccountingAllocator alloc;
+  Zone tmp(&alloc, ZONE_NAME);
+  const byte* module_start = module_bytes()->GetChars();
+
+  for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
+       ++func_idx) {
+    WasmFunction& func = functions[func_idx];
+    if (func.code_start_offset == func.code_end_offset) continue;
+
+    BodyLocalDecls locals(&tmp);
+    BytecodeIterator iterator(module_start + func.code_start_offset,
+                              module_start + func.code_end_offset, &locals);
+    DCHECK_LT(0u, locals.encoded_size);
+    for (uint32_t offset : iterator.offsets()) {
+      uint32_t total_offset = func.code_start_offset + offset;
+      if (total_offset >= end_offset) {
+        DCHECK_EQ(end_func_index, func_idx);
+        break;
+      }
+      if (total_offset < start_offset) continue;
+      locations->push_back(v8::debug::Location(func_idx, offset));
+    }
+  }
+  return true;
+}
+
+bool WasmCompiledModule::SetBreakPoint(
+    Handle<WasmCompiledModule> compiled_module, int* position,
+    Handle<Object> break_point_object) {
+  Isolate* isolate = compiled_module->GetIsolate();
+
+  // Find the function for this breakpoint.
+  int func_index = compiled_module->GetContainingFunction(*position);
+  if (func_index < 0) return false;
+  WasmFunction& func = compiled_module->module()->functions[func_index];
+  int offset_in_func = *position - func.code_start_offset;
+
+  // According to the current design, we should only be called with valid
+  // breakable positions.
+  DCHECK(IsBreakablePosition(compiled_module, func_index, offset_in_func));
+
+  // Insert new break point into break_positions of shared module data.
+  WasmSharedModuleData::AddBreakpoint(compiled_module->shared(), *position,
+                                      break_point_object);
+
+  // Iterate over all instances of this module and tell them to set this new
+  // breakpoint.
+  for (Handle<WasmInstanceObject> instance :
+       iterate_compiled_module_instance_chain(isolate, compiled_module)) {
+    Handle<WasmDebugInfo> debug_info =
+        WasmInstanceObject::GetOrCreateDebugInfo(instance);
+    WasmDebugInfo::SetBreakpoint(debug_info, func_index, offset_in_func);
+  }
+
+  return true;
+}
+
+MaybeHandle<FixedArray> WasmCompiledModule::CheckBreakPoints(int position) {
+  Isolate* isolate = GetIsolate();
+  if (!shared()->has_breakpoint_infos()) return {};
+
+  Handle<FixedArray> breakpoint_infos(shared()->breakpoint_infos(), isolate);
+  int insert_pos =
+      FindBreakpointInfoInsertPos(isolate, breakpoint_infos, position);
+  if (insert_pos >= breakpoint_infos->length()) return {};
+
+  Handle<Object> maybe_breakpoint_info(breakpoint_infos->get(insert_pos),
+                                       isolate);
+  if (maybe_breakpoint_info->IsUndefined(isolate)) return {};
+  Handle<BreakPointInfo> breakpoint_info =
+      Handle<BreakPointInfo>::cast(maybe_breakpoint_info);
+  if (breakpoint_info->source_position() != position) return {};
+
+  Handle<Object> breakpoint_objects(breakpoint_info->break_point_objects(),
+                                    isolate);
+  return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
+}
+
+Handle<WasmInstanceWrapper> WasmInstanceWrapper::New(
+    Isolate* isolate, Handle<WasmInstanceObject> instance) {
+  Handle<FixedArray> array =
+      isolate->factory()->NewFixedArray(kWrapperPropertyCount, TENURED);
+  Handle<WasmInstanceWrapper> instance_wrapper(
+      reinterpret_cast<WasmInstanceWrapper*>(*array), isolate);
+  Handle<WeakCell> cell = isolate->factory()->NewWeakCell(instance);
+  instance_wrapper->set(kWrapperInstanceObject, *cell);
+  return instance_wrapper;
+}
+
+bool WasmInstanceWrapper::IsWasmInstanceWrapper(Object* obj) {
+  if (!obj->IsFixedArray()) return false;
+  Handle<FixedArray> array = handle(FixedArray::cast(obj));
+  if (array->length() != kWrapperPropertyCount) return false;
+  if (!array->get(kWrapperInstanceObject)->IsWeakCell()) return false;
+  Isolate* isolate = array->GetIsolate();
+  if (!array->get(kNextInstanceWrapper)->IsUndefined(isolate) &&
+      !array->get(kNextInstanceWrapper)->IsFixedArray())
+    return false;
+  if (!array->get(kPreviousInstanceWrapper)->IsUndefined(isolate) &&
+      !array->get(kPreviousInstanceWrapper)->IsFixedArray())
+    return false;
+  return true;
+}
diff --git a/src/wasm/wasm-objects.h b/src/wasm/wasm-objects.h
index f74661f..c526a90 100644
--- a/src/wasm/wasm-objects.h
+++ b/src/wasm/wasm-objects.h
@@ -5,31 +5,41 @@
 #ifndef V8_WASM_OBJECTS_H_
 #define V8_WASM_OBJECTS_H_
 
-#include "src/objects-inl.h"
-#include "src/wasm/managed.h"
+#include "src/debug/debug.h"
+#include "src/debug/interface-types.h"
+#include "src/objects.h"
+#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/wasm-limits.h"
 
 namespace v8 {
 namespace internal {
 namespace wasm {
+class InterpretedFrame;
 struct WasmModule;
 }
 
 class WasmCompiledModule;
 class WasmDebugInfo;
 class WasmInstanceObject;
+class WasmInstanceWrapper;
 
 #define DECLARE_CASTS(name)             \
   static bool Is##name(Object* object); \
   static name* cast(Object* object)
 
+#define DECLARE_GETTER(name, type) type* name()
+
 #define DECLARE_ACCESSORS(name, type) \
-  type* get_##name();                 \
-  void set_##name(type* value)
+  void set_##name(type* value);       \
+  DECLARE_GETTER(name, type)
 
 #define DECLARE_OPTIONAL_ACCESSORS(name, type) \
   bool has_##name();                           \
-  type* get_##name();                          \
-  void set_##name(type* value)
+  DECLARE_ACCESSORS(name, type)
+
+#define DECLARE_OPTIONAL_GETTER(name, type) \
+  bool has_##name();                        \
+  DECLARE_GETTER(name, type)
 
 // Representation of a WebAssembly.Module JavaScript-level object.
 class WasmModuleObject : public JSObject {
@@ -40,13 +50,6 @@
   DECLARE_CASTS(WasmModuleObject);
 
   WasmCompiledModule* compiled_module();
-  wasm::WasmModule* module();
-  int num_functions();
-  bool is_asm_js();
-  int GetAsmWasmSourcePosition(int func_index, int byte_offset);
-  WasmDebugInfo* debug_info();
-  void set_debug_info(WasmDebugInfo* debug_info);
-  MaybeHandle<String> GetFunctionName(Isolate* isolate, int func_index);
 
   static Handle<WasmModuleObject> New(
       Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
@@ -61,38 +64,44 @@
   DECLARE_CASTS(WasmTableObject);
   DECLARE_ACCESSORS(functions, FixedArray);
 
-  FixedArray* get_dispatch_tables();
+  FixedArray* dispatch_tables();
   uint32_t current_length();
-  uint32_t maximum_length();
+  bool has_maximum_length();
+  int64_t maximum_length();  // Returns < 0 if no maximum.
 
   static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
-                                     uint32_t maximum,
+                                     int64_t maximum,
                                      Handle<FixedArray>* js_functions);
-  static bool Grow(Handle<WasmTableObject> table, uint32_t count);
+  static void Grow(Isolate* isolate, Handle<WasmTableObject> table,
+                   uint32_t count);
   static Handle<FixedArray> AddDispatchTable(
       Isolate* isolate, Handle<WasmTableObject> table,
       Handle<WasmInstanceObject> instance, int table_index,
-      Handle<FixedArray> dispatch_table);
+      Handle<FixedArray> function_table, Handle<FixedArray> signature_table);
 };
 
 // Representation of a WebAssembly.Memory JavaScript-level object.
 class WasmMemoryObject : public JSObject {
  public:
   // TODO(titzer): add the brand as an internal field instead of a property.
-  enum Fields : uint8_t { kArrayBuffer, kMaximum, kInstance, kFieldCount };
+  enum Fields : uint8_t { kArrayBuffer, kMaximum, kInstancesLink, kFieldCount };
 
   DECLARE_CASTS(WasmMemoryObject);
   DECLARE_ACCESSORS(buffer, JSArrayBuffer);
+  DECLARE_OPTIONAL_ACCESSORS(instances_link, WasmInstanceWrapper);
 
-  void AddInstance(WasmInstanceObject* object);
+  void AddInstance(Isolate* isolate, Handle<WasmInstanceObject> object);
+  void ResetInstancesLink(Isolate* isolate);
   uint32_t current_pages();
-  int32_t maximum_pages();  // returns < 0 if there is no maximum
+  bool has_maximum_pages();
+  int32_t maximum_pages();  // Returns < 0 if there is no maximum.
 
   static Handle<WasmMemoryObject> New(Isolate* isolate,
                                       Handle<JSArrayBuffer> buffer,
-                                      int maximum);
+                                      int32_t maximum);
 
-  static bool Grow(Handle<WasmMemoryObject> memory, uint32_t count);
+  static bool Grow(Isolate* isolate, Handle<WasmMemoryObject> memory,
+                   uint32_t count);
 };
 
 // Representation of a WebAssembly.Instance JavaScript-level object.
@@ -105,6 +114,7 @@
     kMemoryArrayBuffer,
     kGlobalsArrayBuffer,
     kDebugInfo,
+    kWasmMemInstanceWrapper,
     kFieldCount
   };
 
@@ -115,10 +125,16 @@
   DECLARE_OPTIONAL_ACCESSORS(memory_buffer, JSArrayBuffer);
   DECLARE_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject);
   DECLARE_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo);
+  DECLARE_OPTIONAL_ACCESSORS(instance_wrapper, WasmInstanceWrapper);
 
   WasmModuleObject* module_object();
   wasm::WasmModule* module();
 
+  // Get the debug info associated with the given wasm object.
+  // If no debug info exists yet, it is created automatically.
+  static Handle<WasmDebugInfo> GetOrCreateDebugInfo(
+      Handle<WasmInstanceObject> instance);
+
   static Handle<WasmInstanceObject> New(
       Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
 };
@@ -135,9 +151,47 @@
 
   static Handle<WasmExportedFunction> New(Isolate* isolate,
                                           Handle<WasmInstanceObject> instance,
-                                          Handle<String> name,
-                                          Handle<Code> export_wrapper,
-                                          int arity, int func_index);
+                                          MaybeHandle<String> maybe_name,
+                                          int func_index, int arity,
+                                          Handle<Code> export_wrapper);
+};
+
+// Information shared by all WasmCompiledModule objects for the same module.
+class WasmSharedModuleData : public FixedArray {
+  enum Fields {
+    kModuleWrapper,
+    kModuleBytes,
+    kScript,
+    kAsmJsOffsetTable,
+    kBreakPointInfos,
+    kFieldCount
+  };
+
+ public:
+  DECLARE_CASTS(WasmSharedModuleData);
+
+  DECLARE_GETTER(module, wasm::WasmModule);
+  DECLARE_OPTIONAL_ACCESSORS(module_bytes, SeqOneByteString);
+  DECLARE_GETTER(script, Script);
+  DECLARE_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray);
+  DECLARE_OPTIONAL_GETTER(breakpoint_infos, FixedArray);
+
+  static Handle<WasmSharedModuleData> New(
+      Isolate* isolate, Handle<Foreign> module_wrapper,
+      Handle<SeqOneByteString> module_bytes, Handle<Script> script,
+      Handle<ByteArray> asm_js_offset_table);
+
+  // Check whether this module was generated from asm.js source.
+  bool is_asm_js();
+
+  static void ReinitializeAfterDeserialization(Isolate*,
+                                               Handle<WasmSharedModuleData>);
+
+  static void AddBreakpoint(Handle<WasmSharedModuleData>, int position,
+                            Handle<Object> break_point_object);
+
+  static void SetBreakpointsOnNewInstance(Handle<WasmSharedModuleData>,
+                                          Handle<WasmInstanceObject>);
 };
 
 class WasmCompiledModule : public FixedArray {
@@ -149,7 +203,7 @@
     return reinterpret_cast<WasmCompiledModule*>(fixed_array);
   }
 
-#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID)                           \
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK)               \
   Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); }      \
                                                                      \
   MaybeHandle<TYPE> maybe_##NAME() const {                           \
@@ -157,9 +211,15 @@
     return MaybeHandle<TYPE>();                                      \
   }                                                                  \
                                                                      \
+  TYPE* maybe_ptr_to_##NAME() const {                                \
+    Object* obj = get(ID);                                           \
+    if (!(TYPE_CHECK)) return nullptr;                               \
+    return TYPE::cast(obj);                                          \
+  }                                                                  \
+                                                                     \
   TYPE* ptr_to_##NAME() const {                                      \
     Object* obj = get(ID);                                           \
-    if (!obj->Is##TYPE()) return nullptr;                            \
+    DCHECK(TYPE_CHECK);                                              \
     return TYPE::cast(obj);                                          \
   }                                                                  \
                                                                      \
@@ -167,11 +227,18 @@
                                                                      \
   void set_ptr_to_##NAME(TYPE* value) { set(ID, value); }            \
                                                                      \
-  bool has_##NAME() const { return get(ID)->Is##TYPE(); }            \
+  bool has_##NAME() const {                                          \
+    Object* obj = get(ID);                                           \
+    return TYPE_CHECK;                                               \
+  }                                                                  \
                                                                      \
   void reset_##NAME() { set_undefined(ID); }
 
-#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME)
+#define WCM_OBJECT(TYPE, NAME) \
+  WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE())
+
+#define WCM_WASM_OBJECT(TYPE, NAME) \
+  WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj))
 
 #define WCM_SMALL_NUMBER(TYPE, NAME)                               \
   TYPE NAME() const {                                              \
@@ -179,30 +246,29 @@
   }                                                                \
   void set_##NAME(TYPE value) { set(kID_##NAME, Smi::FromInt(value)); }
 
-#define WCM_WEAK_LINK(TYPE, NAME)                        \
-  WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME); \
-                                                         \
-  Handle<TYPE> NAME() const {                            \
-    return handle(TYPE::cast(weak_##NAME()->value()));   \
+#define WCM_WEAK_LINK(TYPE, NAME)                                           \
+  WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell()); \
+                                                                            \
+  Handle<TYPE> NAME() const {                                               \
+    return handle(TYPE::cast(weak_##NAME()->value()));                      \
   }
 
-#define CORE_WCM_PROPERTY_TABLE(MACRO)                \
-  MACRO(OBJECT, FixedArray, code_table)               \
-  MACRO(OBJECT, Foreign, module_wrapper)              \
-  /* For debugging: */                                \
-  MACRO(OBJECT, SeqOneByteString, module_bytes)       \
-  MACRO(OBJECT, Script, script)                       \
-  MACRO(OBJECT, ByteArray, asm_js_offset_tables)      \
-  /* End of debugging stuff */                        \
-  MACRO(OBJECT, FixedArray, function_tables)          \
-  MACRO(OBJECT, FixedArray, empty_function_tables)    \
-  MACRO(OBJECT, JSArrayBuffer, memory)                \
-  MACRO(SMALL_NUMBER, uint32_t, min_mem_pages)        \
-  MACRO(SMALL_NUMBER, uint32_t, max_mem_pages)        \
-  MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
-  MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
-  MACRO(WEAK_LINK, JSObject, owning_instance)         \
-  MACRO(WEAK_LINK, JSObject, wasm_module)
+#define CORE_WCM_PROPERTY_TABLE(MACRO)                  \
+  MACRO(WASM_OBJECT, WasmSharedModuleData, shared)      \
+  MACRO(OBJECT, Context, native_context)                \
+  MACRO(SMALL_NUMBER, uint32_t, num_imported_functions) \
+  MACRO(OBJECT, FixedArray, code_table)                 \
+  MACRO(OBJECT, FixedArray, weak_exported_functions)    \
+  MACRO(OBJECT, FixedArray, function_tables)            \
+  MACRO(OBJECT, FixedArray, signature_tables)           \
+  MACRO(OBJECT, FixedArray, empty_function_tables)      \
+  MACRO(OBJECT, JSArrayBuffer, memory)                  \
+  MACRO(SMALL_NUMBER, uint32_t, min_mem_pages)          \
+  MACRO(SMALL_NUMBER, uint32_t, max_mem_pages)          \
+  MACRO(WEAK_LINK, WasmCompiledModule, next_instance)   \
+  MACRO(WEAK_LINK, WasmCompiledModule, prev_instance)   \
+  MACRO(WEAK_LINK, JSObject, owning_instance)           \
+  MACRO(WEAK_LINK, WasmModuleObject, wasm_module)
 
 #if DEBUG
 #define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_NUMBER, uint32_t, instance_id)
@@ -223,8 +289,8 @@
   };
 
  public:
-  static Handle<WasmCompiledModule> New(
-      Isolate* isolate, Handle<Managed<wasm::WasmModule>> module_wrapper);
+  static Handle<WasmCompiledModule> New(Isolate* isolate,
+                                        Handle<WasmSharedModuleData> shared);
 
   static Handle<WasmCompiledModule> Clone(Isolate* isolate,
                                           Handle<WasmCompiledModule> module) {
@@ -234,30 +300,105 @@
     ret->reset_weak_owning_instance();
     ret->reset_weak_next_instance();
     ret->reset_weak_prev_instance();
+    ret->reset_weak_exported_functions();
     return ret;
   }
 
   uint32_t mem_size() const;
   uint32_t default_mem_size() const;
 
-  wasm::WasmModule* module() const;
-
 #define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
   WCM_PROPERTY_TABLE(DECLARATION)
 #undef DECLARATION
 
+// Allow to call method on WasmSharedModuleData also on this object.
+#define FORWARD_SHARED(type, name) \
+  type name() { return shared()->name(); }
+  FORWARD_SHARED(SeqOneByteString*, module_bytes)
+  FORWARD_SHARED(wasm::WasmModule*, module)
+  FORWARD_SHARED(Script*, script)
+  FORWARD_SHARED(bool, is_asm_js)
+#undef FORWARD_SHARED
+
   static bool IsWasmCompiledModule(Object* obj);
 
   void PrintInstancesChain();
 
-  static void RecreateModuleWrapper(Isolate* isolate,
-                                    Handle<FixedArray> compiled_module);
+  static void ReinitializeAfterDeserialization(Isolate*,
+                                               Handle<WasmCompiledModule>);
 
-  // Extract a function name from the given wasm instance.
+  // Get the function name of the function identified by the given index.
   // Returns a null handle if the function is unnamed or the name is not a valid
   // UTF-8 string.
-  static MaybeHandle<String> GetFunctionName(
-      Handle<WasmCompiledModule> compiled_module, uint32_t func_index);
+  static MaybeHandle<String> GetFunctionNameOrNull(
+      Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+      uint32_t func_index);
+
+  // Get the function name of the function identified by the given index.
+  // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
+  // valid UTF-8 string.
+  static Handle<String> GetFunctionName(
+      Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+      uint32_t func_index);
+
+  // Get the raw bytes of the function name of the function identified by the
+  // given index.
+  // Meant to be used for debugging or frame printing.
+  // Does not allocate, hence gc-safe.
+  Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
+
+  // Return the byte offset of the function identified by the given index.
+  // The offset will be relative to the start of the module bytes.
+  // Returns -1 if the function index is invalid.
+  int GetFunctionOffset(uint32_t func_index);
+
+  // Returns the function containing the given byte offset.
+  // Returns -1 if the byte offset is not contained in any function of this
+  // module.
+  int GetContainingFunction(uint32_t byte_offset);
+
+  // Translate from byte offset in the module to function number and byte offset
+  // within that function, encoded as line and column in the position info.
+  // Returns true if the position is valid inside this module, false otherwise.
+  bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
+
+  // Get the asm.js source position from a byte offset.
+  // Must only be called if the associated wasm object was created from asm.js.
+  static int GetAsmJsSourcePosition(Handle<WasmCompiledModule> compiled_module,
+                                    uint32_t func_index, uint32_t byte_offset,
+                                    bool is_at_number_conversion);
+
+  // Compute the disassembly of a wasm function.
+  // Returns the disassembly string and a list of <byte_offset, line, column>
+  // entries, mapping wasm byte offsets to line and column in the disassembly.
+  // The list is guaranteed to be ordered by the byte_offset.
+  // Returns an empty string and empty vector if the function index is invalid.
+  debug::WasmDisassembly DisassembleFunction(int func_index);
+
+  // Extract a portion of the wire bytes as UTF-8 string.
+  // Returns a null handle if the respective bytes do not form a valid UTF-8
+  // string.
+  static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
+      Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+      uint32_t offset, uint32_t size);
+
+  // Get a list of all possible breakpoints within a given range of this module.
+  bool GetPossibleBreakpoints(const debug::Location& start,
+                              const debug::Location& end,
+                              std::vector<debug::Location>* locations);
+
+  // Set a breakpoint on the given byte position inside the given module.
+  // This will affect all live and future instances of the module.
+  // The passed position might be modified to point to the next breakable
+  // location inside the same function.
+  // If it points outside a function, or behind the last breakable location,
+  // this function returns false and does not set any breakpoint.
+  static bool SetBreakPoint(Handle<WasmCompiledModule>, int* position,
+                            Handle<Object> break_point_object);
+
+  // Return an empty handle if no breakpoint is hit at that location, or a
+  // FixedArray with all hit breakpoint objects.
+  MaybeHandle<FixedArray> CheckBreakPoints(int position);
 
  private:
   void InitId();
@@ -267,36 +408,97 @@
 
 class WasmDebugInfo : public FixedArray {
  public:
-  enum class Fields { kFieldCount };
+  enum Fields {
+    kInstance,
+    kInterpreterHandle,
+    kInterpretedFunctions,
+    kFieldCount
+  };
 
-  static Handle<WasmDebugInfo> New(Handle<JSObject> wasm);
+  static Handle<WasmDebugInfo> New(Handle<WasmInstanceObject>);
 
-  static bool IsDebugInfo(Object* object);
-  static WasmDebugInfo* cast(Object* object);
+  static bool IsDebugInfo(Object*);
+  static WasmDebugInfo* cast(Object*);
 
-  JSObject* wasm_instance();
+  // Set a breakpoint in the given function at the given byte offset within that
+  // function. This will redirect all future calls to this function to the
+  // interpreter and will always pause at the given offset.
+  static void SetBreakpoint(Handle<WasmDebugInfo>, int func_index, int offset);
 
-  bool SetBreakPoint(int byte_offset);
+  // Make a function always execute in the interpreter without setting a
+  // breakpoints.
+  static void RedirectToInterpreter(Handle<WasmDebugInfo>, int func_index);
 
-  // Get the Script for the specified function.
-  static Script* GetFunctionScript(Handle<WasmDebugInfo> debug_info,
-                                   int func_index);
+  void PrepareStep(StepAction);
 
-  // Disassemble the specified function from this module.
-  static Handle<String> DisassembleFunction(Handle<WasmDebugInfo> debug_info,
-                                            int func_index);
+  void RunInterpreter(int func_index, uint8_t* arg_buffer);
 
-  // Get the offset table for the specified function, mapping from byte offsets
-  // to position in the disassembly.
-  // Returns an array with three entries per instruction: byte offset, line and
-  // column.
-  static Handle<FixedArray> GetFunctionOffsetTable(
-      Handle<WasmDebugInfo> debug_info, int func_index);
+  // Get the stack of the wasm interpreter as pairs of <function index, byte
+  // offset>. The list is ordered bottom-to-top, i.e. caller before callee.
+  std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
+      Address frame_pointer);
 
-  // Get the asm.js source position from a byte offset.
-  // Must only be called if the associated wasm object was created from asm.js.
-  static int GetAsmJsSourcePosition(Handle<WasmDebugInfo> debug_info,
-                                    int func_index, int byte_offset);
+  std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
+      Address frame_pointer, int idx);
+
+  // Returns the number of calls / function frames executed in the interpreter.
+  uint64_t NumInterpretedCalls();
+
+  DECLARE_GETTER(wasm_instance, WasmInstanceObject);
+};
+
+class WasmInstanceWrapper : public FixedArray {
+ public:
+  static Handle<WasmInstanceWrapper> New(Isolate* isolate,
+                                         Handle<WasmInstanceObject> instance);
+  static WasmInstanceWrapper* cast(Object* fixed_array) {
+    SLOW_DCHECK(IsWasmInstanceWrapper(fixed_array));
+    return reinterpret_cast<WasmInstanceWrapper*>(fixed_array);
+  }
+  static bool IsWasmInstanceWrapper(Object* obj);
+  bool has_instance() { return get(kWrapperInstanceObject)->IsWeakCell(); }
+  Handle<WasmInstanceObject> instance_object() {
+    Object* obj = get(kWrapperInstanceObject);
+    DCHECK(obj->IsWeakCell());
+    WeakCell* cell = WeakCell::cast(obj);
+    DCHECK(cell->value()->IsJSObject());
+    return handle(WasmInstanceObject::cast(cell->value()));
+  }
+  bool has_next() { return IsWasmInstanceWrapper(get(kNextInstanceWrapper)); }
+  bool has_previous() {
+    return IsWasmInstanceWrapper(get(kPreviousInstanceWrapper));
+  }
+  void set_next_wrapper(Object* obj) {
+    DCHECK(IsWasmInstanceWrapper(obj));
+    set(kNextInstanceWrapper, obj);
+  }
+  void set_previous_wrapper(Object* obj) {
+    DCHECK(IsWasmInstanceWrapper(obj));
+    set(kPreviousInstanceWrapper, obj);
+  }
+  Handle<WasmInstanceWrapper> next_wrapper() {
+    Object* obj = get(kNextInstanceWrapper);
+    DCHECK(IsWasmInstanceWrapper(obj));
+    return handle(WasmInstanceWrapper::cast(obj));
+  }
+  Handle<WasmInstanceWrapper> previous_wrapper() {
+    Object* obj = get(kPreviousInstanceWrapper);
+    DCHECK(IsWasmInstanceWrapper(obj));
+    return handle(WasmInstanceWrapper::cast(obj));
+  }
+  void reset_next_wrapper() { set_undefined(kNextInstanceWrapper); }
+  void reset_previous_wrapper() { set_undefined(kPreviousInstanceWrapper); }
+  void reset() {
+    for (int kID = 0; kID < kWrapperPropertyCount; kID++) set_undefined(kID);
+  }
+
+ private:
+  enum {
+    kWrapperInstanceObject,
+    kNextInstanceWrapper,
+    kPreviousInstanceWrapper,
+    kWrapperPropertyCount
+  };
 };
 
 #undef DECLARE_ACCESSORS
diff --git a/src/wasm/wasm-opcodes.cc b/src/wasm/wasm-opcodes.cc
index 8f81b81..ec1cbd5 100644
--- a/src/wasm/wasm-opcodes.cc
+++ b/src/wasm/wasm-opcodes.cc
@@ -4,38 +4,230 @@
 
 #include "src/wasm/wasm-opcodes.h"
 #include "src/messages.h"
+#include "src/runtime/runtime.h"
 #include "src/signature.h"
 
 namespace v8 {
 namespace internal {
 namespace wasm {
 
-typedef Signature<LocalType> FunctionSig;
+typedef Signature<ValueType> FunctionSig;
+
+#define CASE_OP(name, str) \
+  case kExpr##name:        \
+    return str;
+#define CASE_I32_OP(name, str) CASE_OP(I32##name, "i32." str)
+#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
+#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
+#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
+#define CASE_S128_OP(name, str) CASE_OP(S128##name, "s128." str)
+#define CASE_F32x4_OP(name, str) CASE_OP(F32x4##name, "f32x4." str)
+#define CASE_I32x4_OP(name, str) CASE_OP(I32x4##name, "i32x4." str)
+#define CASE_I16x8_OP(name, str) CASE_OP(I16x8##name, "i16x8." str)
+#define CASE_I8x16_OP(name, str) CASE_OP(I8x16##name, "i8x16." str)
+#define CASE_S32x4_OP(name, str) CASE_OP(S32x4##name, "s32x4." str)
+#define CASE_S16x8_OP(name, str) CASE_OP(S16x8##name, "s16x8." str)
+#define CASE_S8x16_OP(name, str) CASE_OP(S8x16##name, "s8x16." str)
+#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
+#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
+#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
+#define CASE_SIMD_OP(name, str)                                              \
+  CASE_F32x4_OP(name, str) CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) \
+      CASE_I8x16_OP(name, str)
+#define CASE_SIMDI_OP(name, str) \
+  CASE_I32x4_OP(name, str) CASE_I16x8_OP(name, str) CASE_I8x16_OP(name, str)
+#define CASE_SIGN_OP(TYPE, name, str) \
+  CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
+#define CASE_ALL_SIGN_OP(name, str) \
+  CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
+#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
+  CASE_##RES##_OP(U##name##SRC, str "_u/" src_suffix)    \
+      CASE_##RES##_OP(S##name##SRC, str "_s/" src_suffix)
+#define CASE_L32_OP(name, str)          \
+  CASE_SIGN_OP(I32, name##8, str "8")   \
+  CASE_SIGN_OP(I32, name##16, str "16") \
+  CASE_I32_OP(name, str "32")
 
 const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
   switch (opcode) {
-#define DECLARE_NAME_CASE(name, opcode, sig) \
-  case kExpr##name:                          \
-    return "Expr" #name;
-    FOREACH_OPCODE(DECLARE_NAME_CASE)
-#undef DECLARE_NAME_CASE
-    default:
-      break;
-  }
-  return "Unknown";
-}
+    // clang-format off
 
-const char* WasmOpcodes::ShortOpcodeName(WasmOpcode opcode) {
-  switch (opcode) {
-#define DECLARE_NAME_CASE(name, opcode, sig) \
-  case kExpr##name:                          \
-    return #name;
-    FOREACH_OPCODE(DECLARE_NAME_CASE)
-#undef DECLARE_NAME_CASE
-    default:
-      break;
+    // Standard opcodes
+    CASE_INT_OP(Eqz, "eqz")
+    CASE_ALL_OP(Eq, "eq")
+    CASE_ALL_OP(Ne, "ne")
+    CASE_ALL_OP(Add, "add")
+    CASE_ALL_OP(Sub, "sub")
+    CASE_ALL_OP(Mul, "mul")
+    CASE_ALL_SIGN_OP(Lt, "lt")
+    CASE_ALL_SIGN_OP(Gt, "gt")
+    CASE_ALL_SIGN_OP(Le, "le")
+    CASE_ALL_SIGN_OP(Ge, "ge")
+    CASE_INT_OP(Clz, "clz")
+    CASE_INT_OP(Ctz, "ctz")
+    CASE_INT_OP(Popcnt, "popcnt")
+    CASE_ALL_SIGN_OP(Div, "div")
+    CASE_SIGN_OP(INT, Rem, "rem")
+    CASE_INT_OP(And, "and")
+    CASE_INT_OP(Ior, "or")
+    CASE_INT_OP(Xor, "xor")
+    CASE_INT_OP(Shl, "shl")
+    CASE_SIGN_OP(INT, Shr, "shr")
+    CASE_INT_OP(Rol, "rol")
+    CASE_INT_OP(Ror, "ror")
+    CASE_FLOAT_OP(Abs, "abs")
+    CASE_FLOAT_OP(Neg, "neg")
+    CASE_FLOAT_OP(Ceil, "ceil")
+    CASE_FLOAT_OP(Floor, "floor")
+    CASE_FLOAT_OP(Trunc, "trunc")
+    CASE_FLOAT_OP(NearestInt, "nearest")
+    CASE_FLOAT_OP(Sqrt, "sqrt")
+    CASE_FLOAT_OP(Min, "min")
+    CASE_FLOAT_OP(Max, "max")
+    CASE_FLOAT_OP(CopySign, "copysign")
+    CASE_I32_OP(ConvertI64, "wrap/i64")
+    CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
+    CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
+    CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
+    CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
+    CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
+    CASE_F32_OP(ConvertF64, "demote/f64")
+    CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
+    CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
+    CASE_F64_OP(ConvertF32, "promote/f32")
+    CASE_I32_OP(ReinterpretF32, "reinterpret/f32")
+    CASE_I64_OP(ReinterpretF64, "reinterpret/f64")
+    CASE_F32_OP(ReinterpretI32, "reinterpret/i32")
+    CASE_F64_OP(ReinterpretI64, "reinterpret/i64")
+    CASE_OP(Unreachable, "unreachable")
+    CASE_OP(Nop, "nop")
+    CASE_OP(Block, "block")
+    CASE_OP(Loop, "loop")
+    CASE_OP(If, "if")
+    CASE_OP(Else, "else")
+    CASE_OP(End, "end")
+    CASE_OP(Br, "br")
+    CASE_OP(BrIf, "br_if")
+    CASE_OP(BrTable, "br_table")
+    CASE_OP(Return, "return")
+    CASE_OP(CallFunction, "call")
+    CASE_OP(CallIndirect, "call_indirect")
+    CASE_OP(Drop, "drop")
+    CASE_OP(Select, "select")
+    CASE_OP(GetLocal, "get_local")
+    CASE_OP(SetLocal, "set_local")
+    CASE_OP(TeeLocal, "tee_local")
+    CASE_OP(GetGlobal, "get_global")
+    CASE_OP(SetGlobal, "set_global")
+    CASE_ALL_OP(Const, "const")
+    CASE_OP(MemorySize, "current_memory")
+    CASE_OP(GrowMemory, "grow_memory")
+    CASE_ALL_OP(LoadMem, "load")
+    CASE_SIGN_OP(INT, LoadMem8, "load8")
+    CASE_SIGN_OP(INT, LoadMem16, "load16")
+    CASE_SIGN_OP(I64, LoadMem32, "load32")
+    CASE_ALL_OP(StoreMem, "store")
+    CASE_INT_OP(StoreMem8, "store8")
+    CASE_INT_OP(StoreMem16, "store16")
+    CASE_I64_OP(StoreMem32, "store32")
+
+    // Non-standard opcodes.
+    CASE_OP(Try, "try")
+    CASE_OP(Throw, "throw")
+    CASE_OP(Catch, "catch")
+
+    // asm.js-only opcodes.
+    CASE_F64_OP(Acos, "acos")
+    CASE_F64_OP(Asin, "asin")
+    CASE_F64_OP(Atan, "atan")
+    CASE_F64_OP(Cos, "cos")
+    CASE_F64_OP(Sin, "sin")
+    CASE_F64_OP(Tan, "tan")
+    CASE_F64_OP(Exp, "exp")
+    CASE_F64_OP(Log, "log")
+    CASE_F64_OP(Atan2, "atan2")
+    CASE_F64_OP(Pow, "pow")
+    CASE_F64_OP(Mod, "mod")
+    CASE_F32_OP(AsmjsLoadMem, "asmjs_load")
+    CASE_F64_OP(AsmjsLoadMem, "asmjs_load")
+    CASE_L32_OP(AsmjsLoadMem, "asmjs_load")
+    CASE_I32_OP(AsmjsStoreMem, "asmjs_store")
+    CASE_F32_OP(AsmjsStoreMem, "asmjs_store")
+    CASE_F64_OP(AsmjsStoreMem, "asmjs_store")
+    CASE_I32_OP(AsmjsStoreMem8, "asmjs_store8")
+    CASE_I32_OP(AsmjsStoreMem16, "asmjs_store16")
+    CASE_SIGN_OP(I32, AsmjsDiv, "asmjs_div")
+    CASE_SIGN_OP(I32, AsmjsRem, "asmjs_rem")
+    CASE_I32_OP(AsmjsSConvertF32, "asmjs_convert_s/f32")
+    CASE_I32_OP(AsmjsUConvertF32, "asmjs_convert_u/f32")
+    CASE_I32_OP(AsmjsSConvertF64, "asmjs_convert_s/f64")
+    CASE_I32_OP(AsmjsUConvertF64, "asmjs_convert_u/f64")
+
+    // SIMD opcodes.
+    CASE_SIMD_OP(Splat, "splat")
+    CASE_SIMD_OP(Neg, "neg")
+    CASE_SIMD_OP(Eq, "eq")
+    CASE_SIMD_OP(Ne, "ne")
+    CASE_SIMD_OP(Add, "add")
+    CASE_SIMD_OP(Sub, "sub")
+    CASE_SIMD_OP(Mul, "mul")
+    CASE_F32x4_OP(Abs, "abs")
+    CASE_F32x4_OP(Sqrt, "sqrt")
+    CASE_F32x4_OP(Div, "div")
+    CASE_F32x4_OP(RecipApprox, "recip_approx")
+    CASE_F32x4_OP(SqrtApprox, "sqrt_approx")
+    CASE_F32x4_OP(Min, "min")
+    CASE_F32x4_OP(Max, "max")
+    CASE_F32x4_OP(MinNum, "min_num")
+    CASE_F32x4_OP(MaxNum, "max_num")
+    CASE_F32x4_OP(Lt, "lt")
+    CASE_F32x4_OP(Le, "le")
+    CASE_F32x4_OP(Gt, "gt")
+    CASE_F32x4_OP(Ge, "ge")
+    CASE_CONVERT_OP(Convert, F32x4, I32x4, "i32", "convert")
+    CASE_CONVERT_OP(Convert, I32x4, F32x4, "f32", "convert")
+    CASE_F32x4_OP(ExtractLane, "extract_lane")
+    CASE_F32x4_OP(ReplaceLane, "replace_lane")
+    CASE_SIMDI_OP(ExtractLane, "extract_lane")
+    CASE_SIMDI_OP(ReplaceLane, "replace_lane")
+    CASE_SIGN_OP(SIMDI, Min, "min")
+    CASE_SIGN_OP(SIMDI, Max, "max")
+    CASE_SIGN_OP(SIMDI, Lt, "lt")
+    CASE_SIGN_OP(SIMDI, Le, "le")
+    CASE_SIGN_OP(SIMDI, Gt, "gt")
+    CASE_SIGN_OP(SIMDI, Ge, "ge")
+    CASE_SIGN_OP(SIMDI, Shr, "shr")
+    CASE_SIMDI_OP(Shl, "shl")
+    CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
+    CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
+    CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
+    CASE_SIGN_OP(I8x16, SubSaturate, "sub_saturate")
+    CASE_S128_OP(Or, "or")
+    CASE_S128_OP(Xor, "xor")
+    CASE_S128_OP(And, "and")
+    CASE_S128_OP(Not, "not")
+    CASE_S32x4_OP(Select, "select")
+    CASE_S32x4_OP(Swizzle, "swizzle")
+    CASE_S32x4_OP(Shuffle, "shuffle")
+    CASE_S16x8_OP(Select, "select")
+    CASE_S16x8_OP(Swizzle, "swizzle")
+    CASE_S16x8_OP(Shuffle, "shuffle")
+    CASE_S8x16_OP(Select, "select")
+    CASE_S8x16_OP(Swizzle, "swizzle")
+    CASE_S8x16_OP(Shuffle, "shuffle")
+
+    // Atomic operations.
+    CASE_L32_OP(AtomicAdd, "atomic_add")
+    CASE_L32_OP(AtomicAnd, "atomic_and")
+    CASE_L32_OP(AtomicCompareExchange, "atomic_cmpxchng")
+    CASE_L32_OP(AtomicExchange, "atomic_xchng")
+    CASE_L32_OP(AtomicOr, "atomic_or")
+    CASE_L32_OP(AtomicSub, "atomic_sub")
+    CASE_L32_OP(AtomicXor, "atomic_xor")
+
+    default : return "unknown";
+    // clang-format on
   }
-  return "Unknown";
 }
 
 bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
@@ -69,7 +261,7 @@
 
 // TODO(titzer): not static-initializer safe. Wrap in LazyInstance.
 #define DECLARE_SIG(name, ...)                      \
-  static LocalType kTypes_##name[] = {__VA_ARGS__}; \
+  static ValueType kTypes_##name[] = {__VA_ARGS__}; \
   static const FunctionSig kSig_##name(             \
       1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
 
diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h
index ec22579..a4812f5 100644
--- a/src/wasm/wasm-opcodes.h
+++ b/src/wasm/wasm-opcodes.h
@@ -7,6 +7,7 @@
 
 #include "src/globals.h"
 #include "src/machine-type.h"
+#include "src/runtime/runtime.h"
 #include "src/signature.h"
 
 namespace v8 {
@@ -14,31 +15,36 @@
 namespace wasm {
 
 // Binary encoding of local types.
-enum LocalTypeCode {
+enum ValueTypeCode {
   kLocalVoid = 0x40,
   kLocalI32 = 0x7f,
   kLocalI64 = 0x7e,
   kLocalF32 = 0x7d,
   kLocalF64 = 0x7c,
-  kLocalS128 = 0x7b
+  kLocalS128 = 0x7b,
+  kLocalS1x4 = 0x7a,
+  kLocalS1x8 = 0x79,
+  kLocalS1x16 = 0x78
 };
 
 // Type code for multi-value block types.
 static const uint8_t kMultivalBlock = 0x41;
 
-// We reuse the internal machine type to represent WebAssembly AST types.
+// We reuse the internal machine type to represent WebAssembly types.
 // A typedef improves readability without adding a whole new type system.
-typedef MachineRepresentation LocalType;
-const LocalType kAstStmt = MachineRepresentation::kNone;
-const LocalType kAstI32 = MachineRepresentation::kWord32;
-const LocalType kAstI64 = MachineRepresentation::kWord64;
-const LocalType kAstF32 = MachineRepresentation::kFloat32;
-const LocalType kAstF64 = MachineRepresentation::kFloat64;
-const LocalType kAstS128 = MachineRepresentation::kSimd128;
-// We use kTagged here because kNone is already used by kAstStmt.
-const LocalType kAstEnd = MachineRepresentation::kTagged;
+typedef MachineRepresentation ValueType;
+const ValueType kWasmStmt = MachineRepresentation::kNone;
+const ValueType kWasmI32 = MachineRepresentation::kWord32;
+const ValueType kWasmI64 = MachineRepresentation::kWord64;
+const ValueType kWasmF32 = MachineRepresentation::kFloat32;
+const ValueType kWasmF64 = MachineRepresentation::kFloat64;
+const ValueType kWasmS128 = MachineRepresentation::kSimd128;
+const ValueType kWasmS1x4 = MachineRepresentation::kSimd1x4;
+const ValueType kWasmS1x8 = MachineRepresentation::kSimd1x8;
+const ValueType kWasmS1x16 = MachineRepresentation::kSimd1x16;
+const ValueType kWasmVar = MachineRepresentation::kTagged;
 
-typedef Signature<LocalType> FunctionSig;
+typedef Signature<ValueType> FunctionSig;
 std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
 
 typedef Vector<const char> WasmName;
@@ -77,8 +83,7 @@
   V(I32Const, 0x41, _)         \
   V(I64Const, 0x42, _)         \
   V(F32Const, 0x43, _)         \
-  V(F64Const, 0x44, _)         \
-  V(I8Const, 0xcb, _ /* TODO(titzer): V8 specific, remove */)
+  V(F64Const, 0x44, _)
 
 // Load memory expressions.
 #define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -276,7 +281,6 @@
 
 #define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
   V(F32x4Splat, 0xe500, s_f)             \
-  V(F32x4ReplaceLane, 0xe502, s_sif)     \
   V(F32x4Abs, 0xe503, s_s)               \
   V(F32x4Neg, 0xe504, s_s)               \
   V(F32x4Sqrt, 0xe505, s_s)              \
@@ -290,152 +294,150 @@
   V(F32x4Max, 0xe50d, s_ss)              \
   V(F32x4MinNum, 0xe50e, s_ss)           \
   V(F32x4MaxNum, 0xe50f, s_ss)           \
-  V(F32x4Eq, 0xe510, s_ss)               \
-  V(F32x4Ne, 0xe511, s_ss)               \
-  V(F32x4Lt, 0xe512, s_ss)               \
-  V(F32x4Le, 0xe513, s_ss)               \
-  V(F32x4Gt, 0xe514, s_ss)               \
-  V(F32x4Ge, 0xe515, s_ss)               \
-  V(F32x4Select, 0xe516, s_sss)          \
-  V(F32x4Swizzle, 0xe517, s_s)           \
-  V(F32x4Shuffle, 0xe518, s_ss)          \
-  V(F32x4FromInt32x4, 0xe519, s_s)       \
-  V(F32x4FromUint32x4, 0xe51a, s_s)      \
+  V(F32x4Eq, 0xe510, s1x4_ss)            \
+  V(F32x4Ne, 0xe511, s1x4_ss)            \
+  V(F32x4Lt, 0xe512, s1x4_ss)            \
+  V(F32x4Le, 0xe513, s1x4_ss)            \
+  V(F32x4Gt, 0xe514, s1x4_ss)            \
+  V(F32x4Ge, 0xe515, s1x4_ss)            \
+  V(F32x4SConvertI32x4, 0xe519, s_s)     \
+  V(F32x4UConvertI32x4, 0xe51a, s_s)     \
   V(I32x4Splat, 0xe51b, s_i)             \
-  V(I32x4ReplaceLane, 0xe51d, s_sii)     \
   V(I32x4Neg, 0xe51e, s_s)               \
   V(I32x4Add, 0xe51f, s_ss)              \
   V(I32x4Sub, 0xe520, s_ss)              \
   V(I32x4Mul, 0xe521, s_ss)              \
-  V(I32x4Min_s, 0xe522, s_ss)            \
-  V(I32x4Max_s, 0xe523, s_ss)            \
-  V(I32x4Shl, 0xe524, s_si)              \
-  V(I32x4Shr_s, 0xe525, s_si)            \
-  V(I32x4Eq, 0xe526, s_ss)               \
-  V(I32x4Ne, 0xe527, s_ss)               \
-  V(I32x4Lt_s, 0xe528, s_ss)             \
-  V(I32x4Le_s, 0xe529, s_ss)             \
-  V(I32x4Gt_s, 0xe52a, s_ss)             \
-  V(I32x4Ge_s, 0xe52b, s_ss)             \
-  V(I32x4Select, 0xe52c, s_sss)          \
-  V(I32x4Swizzle, 0xe52d, s_s)           \
-  V(I32x4Shuffle, 0xe52e, s_ss)          \
-  V(I32x4FromFloat32x4, 0xe52f, s_s)     \
-  V(I32x4Min_u, 0xe530, s_ss)            \
-  V(I32x4Max_u, 0xe531, s_ss)            \
-  V(I32x4Shr_u, 0xe532, s_ss)            \
-  V(I32x4Lt_u, 0xe533, s_ss)             \
-  V(I32x4Le_u, 0xe534, s_ss)             \
-  V(I32x4Gt_u, 0xe535, s_ss)             \
-  V(I32x4Ge_u, 0xe536, s_ss)             \
-  V(Ui32x4FromFloat32x4, 0xe537, s_s)    \
+  V(I32x4MinS, 0xe522, s_ss)             \
+  V(I32x4MaxS, 0xe523, s_ss)             \
+  V(I32x4Eq, 0xe526, s1x4_ss)            \
+  V(I32x4Ne, 0xe527, s1x4_ss)            \
+  V(I32x4LtS, 0xe528, s1x4_ss)           \
+  V(I32x4LeS, 0xe529, s1x4_ss)           \
+  V(I32x4GtS, 0xe52a, s1x4_ss)           \
+  V(I32x4GeS, 0xe52b, s1x4_ss)           \
+  V(I32x4SConvertF32x4, 0xe52f, s_s)     \
+  V(I32x4MinU, 0xe530, s_ss)             \
+  V(I32x4MaxU, 0xe531, s_ss)             \
+  V(I32x4LtU, 0xe533, s1x4_ss)           \
+  V(I32x4LeU, 0xe534, s1x4_ss)           \
+  V(I32x4GtU, 0xe535, s1x4_ss)           \
+  V(I32x4GeU, 0xe536, s1x4_ss)           \
+  V(I32x4UConvertF32x4, 0xe537, s_s)     \
   V(I16x8Splat, 0xe538, s_i)             \
-  V(I16x8ReplaceLane, 0xe53a, s_sii)     \
   V(I16x8Neg, 0xe53b, s_s)               \
   V(I16x8Add, 0xe53c, s_ss)              \
-  V(I16x8AddSaturate_s, 0xe53d, s_ss)    \
+  V(I16x8AddSaturateS, 0xe53d, s_ss)     \
   V(I16x8Sub, 0xe53e, s_ss)              \
-  V(I16x8SubSaturate_s, 0xe53f, s_ss)    \
+  V(I16x8SubSaturateS, 0xe53f, s_ss)     \
   V(I16x8Mul, 0xe540, s_ss)              \
-  V(I16x8Min_s, 0xe541, s_ss)            \
-  V(I16x8Max_s, 0xe542, s_ss)            \
-  V(I16x8Shl, 0xe543, s_si)              \
-  V(I16x8Shr_s, 0xe544, s_si)            \
-  V(I16x8Eq, 0xe545, s_ss)               \
-  V(I16x8Ne, 0xe546, s_ss)               \
-  V(I16x8Lt_s, 0xe547, s_ss)             \
-  V(I16x8Le_s, 0xe548, s_ss)             \
-  V(I16x8Gt_s, 0xe549, s_ss)             \
-  V(I16x8Ge_s, 0xe54a, s_ss)             \
-  V(I16x8Select, 0xe54b, s_sss)          \
-  V(I16x8Swizzle, 0xe54c, s_s)           \
-  V(I16x8Shuffle, 0xe54d, s_ss)          \
-  V(I16x8AddSaturate_u, 0xe54e, s_ss)    \
-  V(I16x8SubSaturate_u, 0xe54f, s_ss)    \
-  V(I16x8Min_u, 0xe550, s_ss)            \
-  V(I16x8Max_u, 0xe551, s_ss)            \
-  V(I16x8Shr_u, 0xe552, s_si)            \
-  V(I16x8Lt_u, 0xe553, s_ss)             \
-  V(I16x8Le_u, 0xe554, s_ss)             \
-  V(I16x8Gt_u, 0xe555, s_ss)             \
-  V(I16x8Ge_u, 0xe556, s_ss)             \
+  V(I16x8MinS, 0xe541, s_ss)             \
+  V(I16x8MaxS, 0xe542, s_ss)             \
+  V(I16x8Eq, 0xe545, s1x8_ss)            \
+  V(I16x8Ne, 0xe546, s1x8_ss)            \
+  V(I16x8LtS, 0xe547, s1x8_ss)           \
+  V(I16x8LeS, 0xe548, s1x8_ss)           \
+  V(I16x8GtS, 0xe549, s1x8_ss)           \
+  V(I16x8GeS, 0xe54a, s1x8_ss)           \
+  V(I16x8AddSaturateU, 0xe54e, s_ss)     \
+  V(I16x8SubSaturateU, 0xe54f, s_ss)     \
+  V(I16x8MinU, 0xe550, s_ss)             \
+  V(I16x8MaxU, 0xe551, s_ss)             \
+  V(I16x8LtU, 0xe553, s1x8_ss)           \
+  V(I16x8LeU, 0xe554, s1x8_ss)           \
+  V(I16x8GtU, 0xe555, s1x8_ss)           \
+  V(I16x8GeU, 0xe556, s1x8_ss)           \
   V(I8x16Splat, 0xe557, s_i)             \
-  V(I8x16ReplaceLane, 0xe559, s_sii)     \
   V(I8x16Neg, 0xe55a, s_s)               \
   V(I8x16Add, 0xe55b, s_ss)              \
-  V(I8x16AddSaturate_s, 0xe55c, s_ss)    \
+  V(I8x16AddSaturateS, 0xe55c, s_ss)     \
   V(I8x16Sub, 0xe55d, s_ss)              \
-  V(I8x16SubSaturate_s, 0xe55e, s_ss)    \
+  V(I8x16SubSaturateS, 0xe55e, s_ss)     \
   V(I8x16Mul, 0xe55f, s_ss)              \
-  V(I8x16Min_s, 0xe560, s_ss)            \
-  V(I8x16Max_s, 0xe561, s_ss)            \
-  V(I8x16Shl, 0xe562, s_si)              \
-  V(I8x16Shr_s, 0xe563, s_si)            \
-  V(I8x16Eq, 0xe564, s_ss)               \
-  V(I8x16Neq, 0xe565, s_ss)              \
-  V(I8x16Lt_s, 0xe566, s_ss)             \
-  V(I8x16Le_s, 0xe567, s_ss)             \
-  V(I8x16Gt_s, 0xe568, s_ss)             \
-  V(I8x16Ge_s, 0xe569, s_ss)             \
-  V(I8x16Select, 0xe56a, s_sss)          \
-  V(I8x16Swizzle, 0xe56b, s_s)           \
-  V(I8x16Shuffle, 0xe56c, s_ss)          \
-  V(I8x16AddSaturate_u, 0xe56d, s_ss)    \
-  V(I8x16Sub_saturate_u, 0xe56e, s_ss)   \
-  V(I8x16Min_u, 0xe56f, s_ss)            \
-  V(I8x16Max_u, 0xe570, s_ss)            \
-  V(I8x16Shr_u, 0xe571, s_ss)            \
-  V(I8x16Lt_u, 0xe572, s_ss)             \
-  V(I8x16Le_u, 0xe573, s_ss)             \
-  V(I8x16Gt_u, 0xe574, s_ss)             \
-  V(I8x16Ge_u, 0xe575, s_ss)             \
+  V(I8x16MinS, 0xe560, s_ss)             \
+  V(I8x16MaxS, 0xe561, s_ss)             \
+  V(I8x16Eq, 0xe564, s1x16_ss)           \
+  V(I8x16Ne, 0xe565, s1x16_ss)           \
+  V(I8x16LtS, 0xe566, s1x16_ss)          \
+  V(I8x16LeS, 0xe567, s1x16_ss)          \
+  V(I8x16GtS, 0xe568, s1x16_ss)          \
+  V(I8x16GeS, 0xe569, s1x16_ss)          \
+  V(I8x16AddSaturateU, 0xe56d, s_ss)     \
+  V(I8x16SubSaturateU, 0xe56e, s_ss)     \
+  V(I8x16MinU, 0xe56f, s_ss)             \
+  V(I8x16MaxU, 0xe570, s_ss)             \
+  V(I8x16LtU, 0xe572, s1x16_ss)          \
+  V(I8x16LeU, 0xe573, s1x16_ss)          \
+  V(I8x16GtU, 0xe574, s1x16_ss)          \
+  V(I8x16GeU, 0xe575, s1x16_ss)          \
   V(S128And, 0xe576, s_ss)               \
-  V(S128Ior, 0xe577, s_ss)               \
+  V(S128Or, 0xe577, s_ss)                \
   V(S128Xor, 0xe578, s_ss)               \
-  V(S128Not, 0xe579, s_s)
+  V(S128Not, 0xe579, s_s)                \
+  V(S32x4Select, 0xe52c, s_s1x4ss)       \
+  V(S32x4Swizzle, 0xe52d, s_s)           \
+  V(S32x4Shuffle, 0xe52e, s_ss)          \
+  V(S16x8Select, 0xe54b, s_s1x8ss)       \
+  V(S16x8Swizzle, 0xe54c, s_s)           \
+  V(S16x8Shuffle, 0xe54d, s_ss)          \
+  V(S8x16Select, 0xe56a, s_s1x16ss)      \
+  V(S8x16Swizzle, 0xe56b, s_s)           \
+  V(S8x16Shuffle, 0xe56c, s_ss)
 
 #define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
   V(F32x4ExtractLane, 0xe501, _)         \
+  V(F32x4ReplaceLane, 0xe502, _)         \
   V(I32x4ExtractLane, 0xe51c, _)         \
+  V(I32x4ReplaceLane, 0xe51d, _)         \
+  V(I32x4Shl, 0xe524, _)                 \
+  V(I32x4ShrS, 0xe525, _)                \
+  V(I32x4ShrU, 0xe532, _)                \
   V(I16x8ExtractLane, 0xe539, _)         \
-  V(I8x16ExtractLane, 0xe558, _)
+  V(I16x8ReplaceLane, 0xe53a, _)         \
+  V(I16x8Shl, 0xe543, _)                 \
+  V(I16x8ShrS, 0xe544, _)                \
+  V(I16x8ShrU, 0xe552, _)                \
+  V(I8x16ExtractLane, 0xe558, _)         \
+  V(I8x16ReplaceLane, 0xe559, _)         \
+  V(I8x16Shl, 0xe562, _)                 \
+  V(I8x16ShrS, 0xe563, _)                \
+  V(I8x16ShrU, 0xe571, _)
 
 #define FOREACH_ATOMIC_OPCODE(V)               \
   V(I32AtomicAdd8S, 0xe601, i_ii)              \
   V(I32AtomicAdd8U, 0xe602, i_ii)              \
   V(I32AtomicAdd16S, 0xe603, i_ii)             \
   V(I32AtomicAdd16U, 0xe604, i_ii)             \
-  V(I32AtomicAdd32, 0xe605, i_ii)              \
+  V(I32AtomicAdd, 0xe605, i_ii)                \
   V(I32AtomicAnd8S, 0xe606, i_ii)              \
   V(I32AtomicAnd8U, 0xe607, i_ii)              \
   V(I32AtomicAnd16S, 0xe608, i_ii)             \
   V(I32AtomicAnd16U, 0xe609, i_ii)             \
-  V(I32AtomicAnd32, 0xe60a, i_ii)              \
+  V(I32AtomicAnd, 0xe60a, i_ii)                \
   V(I32AtomicCompareExchange8S, 0xe60b, i_ii)  \
   V(I32AtomicCompareExchange8U, 0xe60c, i_ii)  \
   V(I32AtomicCompareExchange16S, 0xe60d, i_ii) \
   V(I32AtomicCompareExchange16U, 0xe60e, i_ii) \
-  V(I32AtomicCompareExchange32, 0xe60f, i_ii)  \
+  V(I32AtomicCompareExchange, 0xe60f, i_ii)    \
   V(I32AtomicExchange8S, 0xe610, i_ii)         \
   V(I32AtomicExchange8U, 0xe611, i_ii)         \
   V(I32AtomicExchange16S, 0xe612, i_ii)        \
   V(I32AtomicExchange16U, 0xe613, i_ii)        \
-  V(I32AtomicExchange32, 0xe614, i_ii)         \
+  V(I32AtomicExchange, 0xe614, i_ii)           \
   V(I32AtomicOr8S, 0xe615, i_ii)               \
   V(I32AtomicOr8U, 0xe616, i_ii)               \
   V(I32AtomicOr16S, 0xe617, i_ii)              \
   V(I32AtomicOr16U, 0xe618, i_ii)              \
-  V(I32AtomicOr32, 0xe619, i_ii)               \
+  V(I32AtomicOr, 0xe619, i_ii)                 \
   V(I32AtomicSub8S, 0xe61a, i_ii)              \
   V(I32AtomicSub8U, 0xe61b, i_ii)              \
   V(I32AtomicSub16S, 0xe61c, i_ii)             \
   V(I32AtomicSub16U, 0xe61d, i_ii)             \
-  V(I32AtomicSub32, 0xe61e, i_ii)              \
+  V(I32AtomicSub, 0xe61e, i_ii)                \
   V(I32AtomicXor8S, 0xe61f, i_ii)              \
   V(I32AtomicXor8U, 0xe620, i_ii)              \
   V(I32AtomicXor16S, 0xe621, i_ii)             \
   V(I32AtomicXor16U, 0xe622, i_ii)             \
-  V(I32AtomicXor32, 0xe623, i_ii)
+  V(I32AtomicXor, 0xe623, i_ii)
 
 // All opcodes.
 #define FOREACH_OPCODE(V)          \
@@ -451,45 +453,49 @@
   FOREACH_ATOMIC_OPCODE(V)
 
 // All signatures.
-#define FOREACH_SIGNATURE(V)         \
-  FOREACH_SIMD_SIGNATURE(V)          \
-  V(i_ii, kAstI32, kAstI32, kAstI32) \
-  V(i_i, kAstI32, kAstI32)           \
-  V(i_v, kAstI32)                    \
-  V(i_ff, kAstI32, kAstF32, kAstF32) \
-  V(i_f, kAstI32, kAstF32)           \
-  V(i_dd, kAstI32, kAstF64, kAstF64) \
-  V(i_d, kAstI32, kAstF64)           \
-  V(i_l, kAstI32, kAstI64)           \
-  V(l_ll, kAstI64, kAstI64, kAstI64) \
-  V(i_ll, kAstI32, kAstI64, kAstI64) \
-  V(l_l, kAstI64, kAstI64)           \
-  V(l_i, kAstI64, kAstI32)           \
-  V(l_f, kAstI64, kAstF32)           \
-  V(l_d, kAstI64, kAstF64)           \
-  V(f_ff, kAstF32, kAstF32, kAstF32) \
-  V(f_f, kAstF32, kAstF32)           \
-  V(f_d, kAstF32, kAstF64)           \
-  V(f_i, kAstF32, kAstI32)           \
-  V(f_l, kAstF32, kAstI64)           \
-  V(d_dd, kAstF64, kAstF64, kAstF64) \
-  V(d_d, kAstF64, kAstF64)           \
-  V(d_f, kAstF64, kAstF32)           \
-  V(d_i, kAstF64, kAstI32)           \
-  V(d_l, kAstF64, kAstI64)           \
-  V(d_id, kAstF64, kAstI32, kAstF64) \
-  V(f_if, kAstF32, kAstI32, kAstF32) \
-  V(l_il, kAstI64, kAstI32, kAstI64)
+#define FOREACH_SIGNATURE(V)            \
+  FOREACH_SIMD_SIGNATURE(V)             \
+  V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
+  V(i_i, kWasmI32, kWasmI32)            \
+  V(i_v, kWasmI32)                      \
+  V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
+  V(i_f, kWasmI32, kWasmF32)            \
+  V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
+  V(i_d, kWasmI32, kWasmF64)            \
+  V(i_l, kWasmI32, kWasmI64)            \
+  V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
+  V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
+  V(l_l, kWasmI64, kWasmI64)            \
+  V(l_i, kWasmI64, kWasmI32)            \
+  V(l_f, kWasmI64, kWasmF32)            \
+  V(l_d, kWasmI64, kWasmF64)            \
+  V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
+  V(f_f, kWasmF32, kWasmF32)            \
+  V(f_d, kWasmF32, kWasmF64)            \
+  V(f_i, kWasmF32, kWasmI32)            \
+  V(f_l, kWasmF32, kWasmI64)            \
+  V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
+  V(d_d, kWasmF64, kWasmF64)            \
+  V(d_f, kWasmF64, kWasmF32)            \
+  V(d_i, kWasmF64, kWasmI32)            \
+  V(d_l, kWasmF64, kWasmI64)            \
+  V(d_id, kWasmF64, kWasmI32, kWasmF64) \
+  V(f_if, kWasmF32, kWasmI32, kWasmF32) \
+  V(l_il, kWasmI64, kWasmI32, kWasmI64)
 
-#define FOREACH_SIMD_SIGNATURE(V)                  \
-  V(s_s, kAstS128, kAstS128)                       \
-  V(s_f, kAstS128, kAstF32)                        \
-  V(s_sif, kAstS128, kAstS128, kAstI32, kAstF32)   \
-  V(s_ss, kAstS128, kAstS128, kAstS128)            \
-  V(s_sss, kAstS128, kAstS128, kAstS128, kAstS128) \
-  V(s_i, kAstS128, kAstI32)                        \
-  V(s_sii, kAstS128, kAstS128, kAstI32, kAstI32)   \
-  V(s_si, kAstS128, kAstS128, kAstI32)
+#define FOREACH_SIMD_SIGNATURE(V)                         \
+  V(s_s, kWasmS128, kWasmS128)                            \
+  V(s_f, kWasmS128, kWasmF32)                             \
+  V(s_ss, kWasmS128, kWasmS128, kWasmS128)                \
+  V(s1x4_ss, kWasmS1x4, kWasmS128, kWasmS128)             \
+  V(s1x8_ss, kWasmS1x8, kWasmS128, kWasmS128)             \
+  V(s1x16_ss, kWasmS1x16, kWasmS128, kWasmS128)           \
+  V(s_i, kWasmS128, kWasmI32)                             \
+  V(s_si, kWasmS128, kWasmS128, kWasmI32)                 \
+  V(i_s, kWasmI32, kWasmS128)                             \
+  V(s_s1x4ss, kWasmS128, kWasmS1x4, kWasmS128, kWasmS128) \
+  V(s_s1x8ss, kWasmS128, kWasmS1x8, kWasmS128, kWasmS128) \
+  V(s_s1x16ss, kWasmS128, kWasmS1x16, kWasmS128, kWasmS128)
 
 #define FOREACH_PREFIX(V) \
   V(Simd, 0xe5)           \
@@ -514,8 +520,7 @@
   V(TrapRemByZero)                 \
   V(TrapFloatUnrepresentable)      \
   V(TrapFuncInvalid)               \
-  V(TrapFuncSigMismatch)           \
-  V(TrapInvalidIndex)
+  V(TrapFuncSigMismatch)
 
 enum TrapReason {
 #define DECLARE_ENUM(name) k##name,
@@ -528,7 +533,6 @@
 class V8_EXPORT_PRIVATE WasmOpcodes {
  public:
   static const char* OpcodeName(WasmOpcode opcode);
-  static const char* ShortOpcodeName(WasmOpcode opcode);
   static FunctionSig* Signature(WasmOpcode opcode);
   static FunctionSig* AsmjsSignature(WasmOpcode opcode);
   static FunctionSig* AtomicSignature(WasmOpcode opcode);
@@ -541,21 +545,27 @@
     return 1 << ElementSizeLog2Of(type.representation());
   }
 
-  static byte MemSize(LocalType type) { return 1 << ElementSizeLog2Of(type); }
+  static byte MemSize(ValueType type) { return 1 << ElementSizeLog2Of(type); }
 
-  static LocalTypeCode LocalTypeCodeFor(LocalType type) {
+  static ValueTypeCode ValueTypeCodeFor(ValueType type) {
     switch (type) {
-      case kAstI32:
+      case kWasmI32:
         return kLocalI32;
-      case kAstI64:
+      case kWasmI64:
         return kLocalI64;
-      case kAstF32:
+      case kWasmF32:
         return kLocalF32;
-      case kAstF64:
+      case kWasmF64:
         return kLocalF64;
-      case kAstS128:
+      case kWasmS128:
         return kLocalS128;
-      case kAstStmt:
+      case kWasmS1x4:
+        return kLocalS1x4;
+      case kWasmS1x8:
+        return kLocalS1x8;
+      case kWasmS1x16:
+        return kLocalS1x16;
+      case kWasmStmt:
         return kLocalVoid;
       default:
         UNREACHABLE();
@@ -563,19 +573,25 @@
     }
   }
 
-  static MachineType MachineTypeFor(LocalType type) {
+  static MachineType MachineTypeFor(ValueType type) {
     switch (type) {
-      case kAstI32:
+      case kWasmI32:
         return MachineType::Int32();
-      case kAstI64:
+      case kWasmI64:
         return MachineType::Int64();
-      case kAstF32:
+      case kWasmF32:
         return MachineType::Float32();
-      case kAstF64:
+      case kWasmF64:
         return MachineType::Float64();
-      case kAstS128:
+      case kWasmS128:
         return MachineType::Simd128();
-      case kAstStmt:
+      case kWasmS1x4:
+        return MachineType::Simd1x4();
+      case kWasmS1x8:
+        return MachineType::Simd1x8();
+      case kWasmS1x16:
+        return MachineType::Simd1x16();
+      case kWasmStmt:
         return MachineType::None();
       default:
         UNREACHABLE();
@@ -583,32 +599,38 @@
     }
   }
 
-  static LocalType LocalTypeFor(MachineType type) {
+  static ValueType ValueTypeFor(MachineType type) {
     if (type == MachineType::Int8()) {
-      return kAstI32;
+      return kWasmI32;
     } else if (type == MachineType::Uint8()) {
-      return kAstI32;
+      return kWasmI32;
     } else if (type == MachineType::Int16()) {
-      return kAstI32;
+      return kWasmI32;
     } else if (type == MachineType::Uint16()) {
-      return kAstI32;
+      return kWasmI32;
     } else if (type == MachineType::Int32()) {
-      return kAstI32;
+      return kWasmI32;
     } else if (type == MachineType::Uint32()) {
-      return kAstI32;
+      return kWasmI32;
     } else if (type == MachineType::Int64()) {
-      return kAstI64;
+      return kWasmI64;
     } else if (type == MachineType::Uint64()) {
-      return kAstI64;
+      return kWasmI64;
     } else if (type == MachineType::Float32()) {
-      return kAstF32;
+      return kWasmF32;
     } else if (type == MachineType::Float64()) {
-      return kAstF64;
+      return kWasmF64;
     } else if (type == MachineType::Simd128()) {
-      return kAstS128;
+      return kWasmS128;
+    } else if (type == MachineType::Simd1x4()) {
+      return kWasmS1x4;
+    } else if (type == MachineType::Simd1x8()) {
+      return kWasmS1x8;
+    } else if (type == MachineType::Simd1x16()) {
+      return kWasmS1x16;
     } else {
       UNREACHABLE();
-      return kAstI32;
+      return kWasmI32;
     }
   }
 
@@ -639,44 +661,52 @@
     }
   }
 
-  static char ShortNameOf(LocalType type) {
+  static char ShortNameOf(ValueType type) {
     switch (type) {
-      case kAstI32:
+      case kWasmI32:
         return 'i';
-      case kAstI64:
+      case kWasmI64:
         return 'l';
-      case kAstF32:
+      case kWasmF32:
         return 'f';
-      case kAstF64:
+      case kWasmF64:
         return 'd';
-      case kAstS128:
+      case kWasmS128:
+      case kWasmS1x4:
+      case kWasmS1x8:
+      case kWasmS1x16:
         return 's';
-      case kAstStmt:
+      case kWasmStmt:
         return 'v';
-      case kAstEnd:
-        return 'x';
+      case kWasmVar:
+        return '*';
       default:
-        UNREACHABLE();
         return '?';
     }
   }
 
-  static const char* TypeName(LocalType type) {
+  static const char* TypeName(ValueType type) {
     switch (type) {
-      case kAstI32:
+      case kWasmI32:
         return "i32";
-      case kAstI64:
+      case kWasmI64:
         return "i64";
-      case kAstF32:
+      case kWasmF32:
         return "f32";
-      case kAstF64:
+      case kWasmF64:
         return "f64";
-      case kAstS128:
+      case kWasmS128:
         return "s128";
-      case kAstStmt:
+      case kWasmS1x4:
+        return "s1x4";
+      case kWasmS1x8:
+        return "s1x8";
+      case kWasmS1x16:
+        return "s1x16";
+      case kWasmStmt:
         return "<stmt>";
-      case kAstEnd:
-        return "<end>";
+      case kWasmVar:
+        return "<var>";
       default:
         return "<unknown>";
     }
diff --git a/src/wasm/wasm-result.cc b/src/wasm/wasm-result.cc
index 6d535e3..e22f9ad 100644
--- a/src/wasm/wasm-result.cc
+++ b/src/wasm/wasm-result.cc
@@ -64,14 +64,25 @@
 
 void ErrorThrower::CompileError(const char* format, ...) {
   if (error()) return;
+  wasm_error_ = true;
   va_list arguments;
   va_start(arguments, format);
   Format(isolate_->wasm_compile_error_function(), format, arguments);
   va_end(arguments);
 }
 
+void ErrorThrower::LinkError(const char* format, ...) {
+  if (error()) return;
+  wasm_error_ = true;
+  va_list arguments;
+  va_start(arguments, format);
+  Format(isolate_->wasm_link_error_function(), format, arguments);
+  va_end(arguments);
+}
+
 void ErrorThrower::RuntimeError(const char* format, ...) {
   if (error()) return;
+  wasm_error_ = true;
   va_list arguments;
   va_start(arguments, format);
   Format(isolate_->wasm_runtime_error_function(), format, arguments);
diff --git a/src/wasm/wasm-result.h b/src/wasm/wasm-result.h
index 53c6b8d..004ac22 100644
--- a/src/wasm/wasm-result.h
+++ b/src/wasm/wasm-result.h
@@ -95,6 +95,7 @@
   PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
   PRINTF_FORMAT(2, 3) void RangeError(const char* fmt, ...);
   PRINTF_FORMAT(2, 3) void CompileError(const char* fmt, ...);
+  PRINTF_FORMAT(2, 3) void LinkError(const char* fmt, ...);
   PRINTF_FORMAT(2, 3) void RuntimeError(const char* fmt, ...);
 
   template <typename T>
@@ -111,6 +112,7 @@
   }
 
   bool error() const { return !exception_.is_null(); }
+  bool wasm_error() { return wasm_error_; }
 
  private:
   void Format(i::Handle<i::JSFunction> constructor, const char* fmt, va_list);
@@ -118,6 +120,7 @@
   i::Isolate* isolate_;
   const char* context_;
   i::Handle<i::Object> exception_;
+  bool wasm_error_ = false;
 };
 }  // namespace wasm
 }  // namespace internal
diff --git a/src/wasm/wasm-text.cc b/src/wasm/wasm-text.cc
new file mode 100644
index 0000000..9ad86fb
--- /dev/null
+++ b/src/wasm/wasm-text.cc
@@ -0,0 +1,212 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-text.h"
+
+#include "src/debug/interface-types.h"
+#include "src/objects-inl.h"
+#include "src/ostreams.h"
+#include "src/vector.h"
+#include "src/wasm/function-body-decoder-impl.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone/zone.h"
+
+using namespace v8;
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+namespace {
+bool IsValidFunctionName(const Vector<const char> &name) {
+  if (name.is_empty()) return false;
+  const char *special_chars = "_.+-*/\\^~=<>!?@#$%&|:'`";
+  for (char c : name) {
+    bool valid_char = (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') ||
+                      (c >= 'A' && c <= 'Z') || strchr(special_chars, c);
+    if (!valid_char) return false;
+  }
+  return true;
+}
+
+}  // namespace
+
+void wasm::PrintWasmText(const WasmModule *module,
+                         const ModuleWireBytes &wire_bytes, uint32_t func_index,
+                         std::ostream &os,
+                         debug::WasmDisassembly::OffsetTable *offset_table) {
+  DCHECK_NOT_NULL(module);
+  DCHECK_GT(module->functions.size(), func_index);
+  const WasmFunction *fun = &module->functions[func_index];
+
+  AccountingAllocator allocator;
+  Zone zone(&allocator, ZONE_NAME);
+  int line_nr = 0;
+  int control_depth = 1;
+
+  // Print the function signature.
+  os << "func";
+  WasmName fun_name = wire_bytes.GetNameOrNull(fun);
+  if (IsValidFunctionName(fun_name)) {
+    os << " $";
+    os.write(fun_name.start(), fun_name.length());
+  }
+  size_t param_count = fun->sig->parameter_count();
+  if (param_count) {
+    os << " (param";
+    for (size_t i = 0; i < param_count; ++i)
+      os << ' ' << WasmOpcodes::TypeName(fun->sig->GetParam(i));
+    os << ')';
+  }
+  size_t return_count = fun->sig->return_count();
+  if (return_count) {
+    os << " (result";
+    for (size_t i = 0; i < return_count; ++i)
+      os << ' ' << WasmOpcodes::TypeName(fun->sig->GetReturn(i));
+    os << ')';
+  }
+  os << "\n";
+  ++line_nr;
+
+  // Print the local declarations.
+  BodyLocalDecls decls(&zone);
+  Vector<const byte> func_bytes = wire_bytes.GetFunctionBytes(fun);
+  BytecodeIterator i(func_bytes.begin(), func_bytes.end(), &decls);
+  DCHECK_LT(func_bytes.begin(), i.pc());
+  if (!decls.type_list.empty()) {
+    os << "(local";
+    for (const ValueType &v : decls.type_list) {
+      os << ' ' << WasmOpcodes::TypeName(v);
+    }
+    os << ")\n";
+    ++line_nr;
+  }
+
+  for (; i.has_next(); i.next()) {
+    WasmOpcode opcode = i.current();
+    if (opcode == kExprElse || opcode == kExprEnd) --control_depth;
+
+    DCHECK_LE(0, control_depth);
+    const int kMaxIndentation = 64;
+    int indentation = std::min(kMaxIndentation, 2 * control_depth);
+    if (offset_table) {
+      offset_table->emplace_back(i.pc_offset(), line_nr, indentation);
+    }
+
+    // 64 whitespaces
+    const char padding[kMaxIndentation + 1] =
+        "                                                                ";
+    os.write(padding, indentation);
+
+    switch (opcode) {
+      case kExprLoop:
+      case kExprIf:
+      case kExprBlock:
+      case kExprTry: {
+        BlockTypeOperand operand(&i, i.pc());
+        os << WasmOpcodes::OpcodeName(opcode);
+        for (unsigned i = 0; i < operand.arity; i++) {
+          os << " " << WasmOpcodes::TypeName(operand.read_entry(i));
+        }
+        control_depth++;
+        break;
+      }
+      case kExprBr:
+      case kExprBrIf: {
+        BreakDepthOperand operand(&i, i.pc());
+        os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.depth;
+        break;
+      }
+      case kExprElse:
+        os << "else";
+        control_depth++;
+        break;
+      case kExprEnd:
+        os << "end";
+        break;
+      case kExprBrTable: {
+        BranchTableOperand operand(&i, i.pc());
+        BranchTableIterator iterator(&i, operand);
+        os << "br_table";
+        while (iterator.has_next()) os << ' ' << iterator.next();
+        break;
+      }
+      case kExprCallIndirect: {
+        CallIndirectOperand operand(&i, i.pc());
+        DCHECK_EQ(0, operand.table_index);
+        os << "call_indirect " << operand.index;
+        break;
+      }
+      case kExprCallFunction: {
+        CallFunctionOperand operand(&i, i.pc());
+        os << "call " << operand.index;
+        break;
+      }
+      case kExprGetLocal:
+      case kExprSetLocal:
+      case kExprTeeLocal:
+      case kExprCatch: {
+        LocalIndexOperand operand(&i, i.pc());
+        os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.index;
+        break;
+      }
+      case kExprGetGlobal:
+      case kExprSetGlobal: {
+        GlobalIndexOperand operand(&i, i.pc());
+        os << WasmOpcodes::OpcodeName(opcode) << ' ' << operand.index;
+        break;
+      }
+#define CASE_CONST(type, str, cast_type)                           \
+  case kExpr##type##Const: {                                       \
+    Imm##type##Operand operand(&i, i.pc());                        \
+    os << #str ".const " << static_cast<cast_type>(operand.value); \
+    break;                                                         \
+  }
+        CASE_CONST(I32, i32, int32_t)
+        CASE_CONST(I64, i64, int64_t)
+        CASE_CONST(F32, f32, float)
+        CASE_CONST(F64, f64, double)
+
+#define CASE_OPCODE(opcode, _, __) case kExpr##opcode:
+        FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
+        FOREACH_STORE_MEM_OPCODE(CASE_OPCODE) {
+          MemoryAccessOperand operand(&i, i.pc(), kMaxUInt32);
+          os << WasmOpcodes::OpcodeName(opcode) << " offset=" << operand.offset
+             << " align=" << (1ULL << operand.alignment);
+          break;
+        }
+
+        FOREACH_SIMPLE_OPCODE(CASE_OPCODE)
+      case kExprUnreachable:
+      case kExprNop:
+      case kExprReturn:
+      case kExprMemorySize:
+      case kExprGrowMemory:
+      case kExprDrop:
+      case kExprSelect:
+      case kExprThrow:
+        os << WasmOpcodes::OpcodeName(opcode);
+        break;
+
+        // This group is just printed by their internal opcode name, as they
+        // should never be shown to end-users.
+        FOREACH_ASMJS_COMPAT_OPCODE(CASE_OPCODE)
+        // TODO(wasm): Add correct printing for SIMD and atomic opcodes once
+        // they are publicly available.
+        FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE)
+        FOREACH_SIMD_1_OPERAND_OPCODE(CASE_OPCODE)
+        FOREACH_ATOMIC_OPCODE(CASE_OPCODE)
+        os << WasmOpcodes::OpcodeName(opcode);
+        break;
+
+      default:
+        UNREACHABLE();
+        break;
+    }
+    os << '\n';
+    ++line_nr;
+  }
+  DCHECK_EQ(0, control_depth);
+  DCHECK(i.ok());
+}
diff --git a/src/wasm/wasm-text.h b/src/wasm/wasm-text.h
new file mode 100644
index 0000000..1608ea9
--- /dev/null
+++ b/src/wasm/wasm-text.h
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_S_EXPR_H_
+#define V8_WASM_S_EXPR_H_
+
+#include <cstdint>
+#include <ostream>
+#include <tuple>
+#include <vector>
+
+namespace v8 {
+
+namespace debug {
+struct WasmDisassemblyOffsetTableEntry;
+}  // namespace debug
+
+namespace internal {
+namespace wasm {
+
+// Forward declaration.
+struct WasmModule;
+struct ModuleWireBytes;
+
+// Generate disassembly according to official text format.
+// Output disassembly to the given output stream, and optionally return an
+// offset table of <byte offset, line, column> via the given pointer.
+void PrintWasmText(
+    const WasmModule *module, const ModuleWireBytes &wire_bytes,
+    uint32_t func_index, std::ostream &os,
+    std::vector<debug::WasmDisassemblyOffsetTableEntry> *offset_table);
+
+}  // namespace wasm
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_WASM_S_EXPR_H_
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 518df5a..2483bbd 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -9,6 +9,7 @@
 
 #include "src/base/cpu.h"
 #include "src/debug/debug.h"
+#include "src/objects-inl.h"
 #include "src/v8memory.h"
 
 namespace v8 {
@@ -16,15 +17,13 @@
 
 bool CpuFeatures::SupportsCrankshaft() { return true; }
 
-bool CpuFeatures::SupportsSimd128() { return false; }
+bool CpuFeatures::SupportsSimd128() { return true; }
 
 // -----------------------------------------------------------------------------
 // Implementation of Assembler
 
 
 static const byte kCallOpcode = 0xE8;
-// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
-static const int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
 
 
 void Assembler::emitl(uint32_t x) {
@@ -83,6 +82,12 @@
       entry - isolate()->heap()->memory_allocator()->code_range()->start()));
 }
 
+void Assembler::emit(Immediate x) {
+  if (!RelocInfo::IsNone(x.rmode_)) {
+    RecordRelocInfo(x.rmode_);
+  }
+  emitl(x.value_);
+}
 
 void Assembler::emit_rex_64(Register reg, Register rm_reg) {
   emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit());
@@ -281,6 +286,17 @@
   }
 }
 
+Address Assembler::target_address_at(Address pc, Code* code) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
 
 void Assembler::deserialization_set_target_internal_reference_at(
     Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
@@ -292,6 +308,10 @@
   return pc - kCallTargetAddressOffset;
 }
 
+void Assembler::deserialization_set_special_target_at(
+    Isolate* isolate, Address instruction_payload, Code* code, Address target) {
+  set_target_address_at(isolate, instruction_payload, code, target);
+}
 
 Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
   return code_targets_[Memory::int32_at(pc)];
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 5402a8c..9c3a9cd 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -15,7 +15,9 @@
 #include <sys/sysctl.h>
 #endif
 
+#include "src/assembler-inl.h"
 #include "src/base/bits.h"
+#include "src/base/cpu.h"
 #include "src/macro-assembler.h"
 #include "src/v8.h"
 
@@ -135,13 +137,18 @@
   return Memory::uint32_at(pc_);
 }
 
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  return Memory::uint32_at(pc_);
+}
+
 void RelocInfo::unchecked_update_wasm_memory_reference(
     Address address, ICacheFlushMode flush_mode) {
   Memory::Address_at(pc_) = address;
 }
 
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
-                                                  ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+                                           ICacheFlushMode flush_mode) {
   Memory::uint32_at(pc_) = size;
 }
 
@@ -601,12 +608,9 @@
                                         int size) {
   EnsureSpace ensure_space(this);
   emit_rex(dst, size);
-  if (is_int8(src.value_)) {
+  if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
     emit(0x83);
     emit_operand(subcode, dst);
-    if (!RelocInfo::IsNone(src.rmode_)) {
-      RecordRelocInfo(src.rmode_);
-    }
     emit(src.value_);
   } else {
     emit(0x81);
@@ -2045,158 +2049,142 @@
 
 void Assembler::testb(Register dst, Register src) {
   EnsureSpace ensure_space(this);
-  if (src.low_bits() == 4) {
-    emit_rex_32(src, dst);
-    emit(0x84);
-    emit_modrm(src, dst);
-  } else {
-    if (!dst.is_byte_register() || !src.is_byte_register()) {
-      // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
-      emit_rex_32(dst, src);
-    }
-    emit(0x84);
-    emit_modrm(dst, src);
-  }
+  emit_test(dst, src, sizeof(int8_t));
 }
 
-
 void Assembler::testb(Register reg, Immediate mask) {
   DCHECK(is_int8(mask.value_) || is_uint8(mask.value_));
-  EnsureSpace ensure_space(this);
-  if (reg.is(rax)) {
-    emit(0xA8);
-    emit(mask.value_);  // Low byte emitted.
-  } else {
-    if (!reg.is_byte_register()) {
-      // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
-      emit_rex_32(reg);
-    }
-    emit(0xF6);
-    emit_modrm(0x0, reg);
-    emit(mask.value_);  // Low byte emitted.
-  }
+  emit_test(reg, mask, sizeof(int8_t));
 }
 
-
 void Assembler::testb(const Operand& op, Immediate mask) {
   DCHECK(is_int8(mask.value_) || is_uint8(mask.value_));
-  EnsureSpace ensure_space(this);
-  emit_optional_rex_32(rax, op);
-  emit(0xF6);
-  emit_operand(rax, op);  // Operation code 0
-  emit(mask.value_);  // Low byte emitted.
+  emit_test(op, mask, sizeof(int8_t));
 }
 
 
 void Assembler::testb(const Operand& op, Register reg) {
-  EnsureSpace ensure_space(this);
-  if (!reg.is_byte_register()) {
-    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
-    emit_rex_32(reg, op);
-  } else {
-    emit_optional_rex_32(reg, op);
-  }
-  emit(0x84);
-  emit_operand(reg, op);
+  emit_test(op, reg, sizeof(int8_t));
 }
 
 void Assembler::testw(Register dst, Register src) {
-  EnsureSpace ensure_space(this);
-  emit(0x66);
-  if (src.low_bits() == 4) {
-    emit_rex_32(src, dst);
-  }
-  emit(0x85);
-  emit_modrm(src, dst);
+  emit_test(dst, src, sizeof(uint16_t));
 }
 
 void Assembler::testw(Register reg, Immediate mask) {
-  DCHECK(is_int16(mask.value_) || is_uint16(mask.value_));
-  EnsureSpace ensure_space(this);
-  emit(0x66);
-  if (reg.is(rax)) {
-    emit(0xA9);
-    emitw(mask.value_);
-  } else {
-    if (reg.low_bits() == 4) {
-      emit_rex_32(reg);
-    }
-    emit(0xF7);
-    emit_modrm(0x0, reg);
-    emitw(mask.value_);
-  }
+  emit_test(reg, mask, sizeof(int16_t));
 }
 
 void Assembler::testw(const Operand& op, Immediate mask) {
-  DCHECK(is_int16(mask.value_) || is_uint16(mask.value_));
-  EnsureSpace ensure_space(this);
-  emit(0x66);
-  emit_optional_rex_32(rax, op);
-  emit(0xF7);
-  emit_operand(rax, op);
-  emitw(mask.value_);
+  emit_test(op, mask, sizeof(int16_t));
 }
 
 void Assembler::testw(const Operand& op, Register reg) {
-  EnsureSpace ensure_space(this);
-  emit(0x66);
-  emit_optional_rex_32(reg, op);
-  emit(0x85);
-  emit_operand(rax, op);
+  emit_test(op, reg, sizeof(int16_t));
 }
 
 void Assembler::emit_test(Register dst, Register src, int size) {
   EnsureSpace ensure_space(this);
-  if (src.low_bits() == 4) {
-    emit_rex(src, dst, size);
-    emit(0x85);
-    emit_modrm(src, dst);
+  if (src.low_bits() == 4) std::swap(dst, src);
+  if (size == sizeof(int16_t)) {
+    emit(0x66);
+    size = sizeof(int32_t);
+  }
+  bool byte_operand = size == sizeof(int8_t);
+  if (byte_operand) {
+    size = sizeof(int32_t);
+    if (!src.is_byte_register() || !dst.is_byte_register()) {
+      emit_rex_32(dst, src);
+    }
   } else {
     emit_rex(dst, src, size);
-    emit(0x85);
-    emit_modrm(dst, src);
   }
+  emit(byte_operand ? 0x84 : 0x85);
+  emit_modrm(dst, src);
 }
 
 
 void Assembler::emit_test(Register reg, Immediate mask, int size) {
-  // testl with a mask that fits in the low byte is exactly testb.
   if (is_uint8(mask.value_)) {
-    testb(reg, mask);
-    return;
+    size = sizeof(int8_t);
+  } else if (is_uint16(mask.value_)) {
+    size = sizeof(int16_t);
   }
   EnsureSpace ensure_space(this);
-  if (reg.is(rax)) {
-    emit_rex(rax, size);
-    emit(0xA9);
-    emit(mask);
+  bool half_word = size == sizeof(int16_t);
+  if (half_word) {
+    emit(0x66);
+    size = sizeof(int32_t);
+  }
+  bool byte_operand = size == sizeof(int8_t);
+  if (byte_operand) {
+    size = sizeof(int32_t);
+    if (!reg.is_byte_register()) emit_rex_32(reg);
   } else {
     emit_rex(reg, size);
-    emit(0xF7);
+  }
+  if (reg.is(rax)) {
+    emit(byte_operand ? 0xA8 : 0xA9);
+  } else {
+    emit(byte_operand ? 0xF6 : 0xF7);
     emit_modrm(0x0, reg);
+  }
+  if (byte_operand) {
+    emit(mask.value_);
+  } else if (half_word) {
+    emitw(mask.value_);
+  } else {
     emit(mask);
   }
 }
 
-
 void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
-  // testl with a mask that fits in the low byte is exactly testb.
   if (is_uint8(mask.value_)) {
-    testb(op, mask);
-    return;
+    size = sizeof(int8_t);
+  } else if (is_uint16(mask.value_)) {
+    size = sizeof(int16_t);
   }
   EnsureSpace ensure_space(this);
+  bool half_word = size == sizeof(int16_t);
+  if (half_word) {
+    emit(0x66);
+    size = sizeof(int32_t);
+  }
+  bool byte_operand = size == sizeof(int8_t);
+  if (byte_operand) {
+    size = sizeof(int32_t);
+  }
   emit_rex(rax, op, size);
-  emit(0xF7);
+  emit(byte_operand ? 0xF6 : 0xF7);
   emit_operand(rax, op);  // Operation code 0
-  emit(mask);
+  if (byte_operand) {
+    emit(mask.value_);
+  } else if (half_word) {
+    emitw(mask.value_);
+  } else {
+    emit(mask);
+  }
 }
 
-
 void Assembler::emit_test(const Operand& op, Register reg, int size) {
   EnsureSpace ensure_space(this);
-  emit_rex(reg, op, size);
-  emit(0x85);
+  if (size == sizeof(int16_t)) {
+    emit(0x66);
+    size = sizeof(int32_t);
+  }
+  bool byte_operand = size == sizeof(int8_t);
+  if (byte_operand) {
+    size = sizeof(int32_t);
+    if (!reg.is_byte_register()) {
+      // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+      emit_rex_32(reg, op);
+    } else {
+      emit_optional_rex_32(reg, op);
+    }
+  } else {
+    emit_rex(reg, op, size);
+  }
+  emit(byte_operand ? 0x84 : 0x85);
   emit_operand(reg, op);
 }
 
@@ -3050,7 +3038,7 @@
 void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
   DCHECK(is_uint8(imm8));
   EnsureSpace ensure_space(this);
-  emit_optional_rex_32(src, dst);
+  emit_optional_rex_32(dst, src);
   emit(0x0F);
   emit(0xC6);
   emit_sse_operand(dst, src);
@@ -4683,6 +4671,14 @@
   emit(0xD8 | dst.low_bits());
 }
 
+void Assembler::RecordProtectedInstructionLanding(int pc_offset) {
+  EnsureSpace ensure_space(this);
+  RelocInfo rinfo(isolate(), pc(),
+                  RelocInfo::WASM_PROTECTED_INSTRUCTION_LANDING, pc_offset,
+                  nullptr);
+  reloc_info_writer.Write(&rinfo);
+}
+
 
 void Assembler::db(uint8_t data) {
   EnsureSpace ensure_space(this);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index e8ee9e4..07d8c25 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -79,6 +79,8 @@
   V(r14)                                 \
   V(r15)
 
+// The length of pushq(rbp), movp(rbp, rsp), Push(rsi) and Push(rdi).
+static const int kNoCodeAgeSequenceLength = kPointerSize == kInt64Size ? 6 : 17;
 
 // CPU Registers.
 //
@@ -203,6 +205,7 @@
   V(xmm14)
 
 static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
 
 struct XMMRegister {
   enum Code {
@@ -503,17 +506,10 @@
   static inline void set_target_address_at(
       Isolate* isolate, Address pc, Address constant_pool, Address target,
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
-  static inline Address target_address_at(Address pc, Code* code) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    return target_address_at(pc, constant_pool);
-  }
+  static inline Address target_address_at(Address pc, Code* code);
   static inline void set_target_address_at(
       Isolate* isolate, Address pc, Code* code, Address target,
-      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    set_target_address_at(isolate, pc, constant_pool, target,
-                          icache_flush_mode);
-  }
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
 
   // Return the code target address at a call site from the return address
   // of that call in the instruction stream.
@@ -523,9 +519,7 @@
   // This is for calls and branches within generated code.
   inline static void deserialization_set_special_target_at(
       Isolate* isolate, Address instruction_payload, Code* code,
-      Address target) {
-    set_target_address_at(isolate, instruction_payload, code, target);
-  }
+      Address target);
 
   // This sets the internal reference at the pc.
   inline static void deserialization_set_target_internal_reference_at(
@@ -1981,9 +1975,6 @@
     return pc_offset() - label->pos();
   }
 
-  // Mark generator continuation.
-  void RecordGeneratorContinuation();
-
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
@@ -2003,6 +1994,8 @@
     UNREACHABLE();
   }
 
+  void RecordProtectedInstructionLanding(int pc_offset);
+
   // Writes a single word of data in the code stream.
   // Used for inline tables, e.g., jump-tables.
   void db(uint8_t data);
@@ -2058,12 +2051,7 @@
                                RelocInfo::Mode rmode,
                                TypeFeedbackId ast_id = TypeFeedbackId::None());
   inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
-  void emit(Immediate x) {
-    if (!RelocInfo::IsNone(x.rmode_)) {
-      RecordRelocInfo(x.rmode_);
-    }
-    emitl(x.value_);
-  }
+  inline void emit(Immediate x);
 
   // Emits a REX prefix that encodes a 64-bit operand size and
   // the top bit of both register codes.
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index d62aafe..7b57c2c 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -4,18 +4,24 @@
 
 #if V8_TARGET_ARCH_X64
 
-#include "src/code-stubs.h"
 #include "src/api-arguments.h"
 #include "src/bootstrapper.h"
+#include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/counters.h"
+#include "src/double.h"
+#include "src/heap/heap-inl.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects/regexp-match-info.h"
 #include "src/regexp/jsregexp.h"
 #include "src/regexp/regexp-macro-assembler.h"
 #include "src/runtime/runtime.h"
-#include "src/x64/code-stubs-x64.h"
+
+#include "src/x64/code-stubs-x64.h"  // Cannot be the first include.
 
 namespace v8 {
 namespace internal {
@@ -32,17 +38,6 @@
   __ TailCallRuntime(Runtime::kNewArray);
 }
 
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
-  descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
-  descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
                                                ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
@@ -355,55 +350,6 @@
   __ ret(0);
 }
 
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  // Ensure that the vector and slot registers won't be clobbered before
-  // calling the miss handler.
-  DCHECK(!AreAliased(r8, r9, LoadWithVectorDescriptor::VectorRegister(),
-                     LoadDescriptor::SlotRegister()));
-
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
-                                                          r9, &miss);
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Label miss;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register index = LoadDescriptor::NameRegister();
-  Register scratch = rdi;
-  Register result = rax;
-  DCHECK(!scratch.is(receiver) && !scratch.is(index));
-  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
-         result.is(LoadDescriptor::SlotRegister()));
-
-  // StringCharAtGenerator doesn't use the result register until it's passed
-  // the different miss possibilities. If it did, we would have a conflict
-  // when FLAG_vector_ics is true.
-  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          RECEIVER_IS_STRING);
-  char_at_generator.GenerateFast(masm);
-  __ ret(0);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -495,7 +441,7 @@
   // (8) Is the external string one byte?  If yes, go to (5).
   // (9) Two byte sequential.  Load regexp code for two byte. Go to (E).
   // (10) Short external string or not a string?  If yes, bail out to runtime.
-  // (11) Sliced string.  Replace subject with parent. Go to (1).
+  // (11) Sliced or thin string.  Replace subject with parent. Go to (1).
 
   Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */,
       external_string /* 7 */, check_underlying /* 1 */,
@@ -525,6 +471,7 @@
   // have already been covered.
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmpp(rbx, Immediate(kExternalStringTag));
@@ -813,11 +760,18 @@
   __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
   __ j(not_zero, &runtime);
 
-  // (11) Sliced string.  Replace subject with parent. Go to (1).
+  // (11) Sliced or thin string.  Replace subject with parent. Go to (1).
+  Label thin_string;
+  __ cmpl(rbx, Immediate(kThinStringTag));
+  __ j(equal, &thin_string, Label::kNear);
   // Load offset into r14 and replace subject string with parent.
   __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
   __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
   __ jmp(&check_underlying);
+
+  __ bind(&thin_string);
+  __ movp(rdi, FieldOperand(rdi, ThinString::kActualOffset));
+  __ jmp(&check_underlying);
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -918,9 +872,6 @@
       // Call runtime on identical symbols since we need to throw a TypeError.
       __ cmpb(rcx, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
       __ j(equal, &runtime_call, Label::kFar);
-      // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ cmpb(rcx, Immediate(static_cast<uint8_t>(SIMD128_VALUE_TYPE)));
-      __ j(equal, &runtime_call, Label::kFar);
     }
     __ Set(rax, EQUAL);
     __ ret(0);
@@ -1117,9 +1068,11 @@
   if (cc == equal) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(rdx);
-      __ Push(rax);
-      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+      __ Push(rsi);
+      __ Call(strict() ? isolate()->builtins()->StrictEqual()
+                       : isolate()->builtins()->Equal(),
+              RelocInfo::CODE_TARGET);
+      __ Pop(rsi);
     }
     // Turn true into 0 and false into some non-zero value.
     STATIC_ASSERT(EQUAL == 0);
@@ -1188,8 +1141,7 @@
   // A monomorphic cache hit or an already megamorphic state: invoke the
   // function without changing the state.
   // We don't know if r11 is a WeakCell or a Symbol, but it's harmless to read
-  // at this position in a symbol (see static asserts in
-  // type-feedback-vector.h).
+  // at this position in a symbol (see static asserts in feedback-vector.h).
   Label check_allocation_site;
   __ cmpp(rdi, FieldOperand(r11, WeakCell::kValueOffset));
   __ j(equal, &done, Label::kFar);
@@ -1228,7 +1180,7 @@
   // write-barrier is needed.
   __ bind(&megamorphic);
   __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
-          TypeFeedbackVector::MegamorphicSentinel(isolate));
+          FeedbackVector::MegamorphicSentinel(isolate));
   __ jmp(&done);
 
   // An uninitialized cache is patched with the function or sentinel to
@@ -1297,201 +1249,6 @@
   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
 }
 
-static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
-                               Register slot) {
-  __ SmiAddConstant(FieldOperand(feedback_vector, slot, times_pointer_size,
-                                 FixedArray::kHeaderSize + kPointerSize),
-                    Smi::FromInt(1));
-}
-
-void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
-  // rdi - function
-  // rdx - slot id
-  // rbx - vector
-  // rcx - allocation site (loaded from vector[slot]).
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
-  __ cmpp(rdi, r8);
-  __ j(not_equal, miss);
-
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, rbx, rdx);
-
-  __ movp(rbx, rcx);
-  __ movp(rdx, rdi);
-  ArrayConstructorStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void CallICStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  // -- rax - number of arguments
-  // -- rdi - function
-  // -- rdx - slot id
-  // -- rbx - vector
-  // -----------------------------------
-  Isolate* isolate = masm->isolate();
-  Label extra_checks_or_miss, call, call_function, call_count_incremented;
-
-  // The checks. First, does rdi match the recorded monomorphic target?
-  __ SmiToInteger32(rdx, rdx);
-  __ movp(rcx,
-          FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
-
-  // We don't know that we have a weak cell. We might have a private symbol
-  // or an AllocationSite, but the memory is safe to examine.
-  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
-  // FixedArray.
-  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
-  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
-  // computed, meaning that it can't appear to be a pointer. If the low bit is
-  // 0, then hash is computed, but the 0 bit prevents the field from appearing
-  // to be a pointer.
-  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
-  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
-                    WeakCell::kValueOffset &&
-                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
-
-  __ cmpp(rdi, FieldOperand(rcx, WeakCell::kValueOffset));
-  __ j(not_equal, &extra_checks_or_miss);
-
-  // The compare above could have been a SMI/SMI comparison. Guard against this
-  // convincing us that we have a monomorphic JSFunction.
-  __ JumpIfSmi(rdi, &extra_checks_or_miss);
-
-  __ bind(&call_function);
-  // Increment the call count for monomorphic function calls.
-  IncrementCallCount(masm, rbx, rdx);
-
-  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
-                                                    tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&extra_checks_or_miss);
-  Label uninitialized, miss, not_allocation_site;
-
-  __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
-  __ j(equal, &call);
-
-  // Check if we have an allocation site.
-  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
-                 Heap::kAllocationSiteMapRootIndex);
-  __ j(not_equal, &not_allocation_site);
-
-  // We have an allocation site.
-  HandleArrayCase(masm, &miss);
-
-  __ bind(&not_allocation_site);
-
-  // The following cases attempt to handle MISS cases without going to the
-  // runtime.
-  if (FLAG_trace_ic) {
-    __ jmp(&miss);
-  }
-
-  __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
-  __ j(equal, &uninitialized);
-
-  // We are going megamorphic. If the feedback is a JSFunction, it is fine
-  // to handle it here. More complex cases are dealt with in the runtime.
-  __ AssertNotSmi(rcx);
-  __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &miss);
-  __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
-          TypeFeedbackVector::MegamorphicSentinel(isolate));
-
-  __ bind(&call);
-
-  // Increment the call count for megamorphic function calls.
-  IncrementCallCount(masm, rbx, rdx);
-
-  __ bind(&call_count_incremented);
-  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
-          RelocInfo::CODE_TARGET);
-
-  __ bind(&uninitialized);
-
-  // We are going monomorphic, provided we actually have a JSFunction.
-  __ JumpIfSmi(rdi, &miss);
-
-  // Goto miss case if we do not have a function.
-  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
-  __ j(not_equal, &miss);
-
-  // Make sure the function is not the Array() function, which requires special
-  // behavior on MISS.
-  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rcx);
-  __ cmpp(rdi, rcx);
-  __ j(equal, &miss);
-
-  // Make sure the function belongs to the same native context.
-  __ movp(rcx, FieldOperand(rdi, JSFunction::kContextOffset));
-  __ movp(rcx, ContextOperand(rcx, Context::NATIVE_CONTEXT_INDEX));
-  __ cmpp(rcx, NativeContextOperand());
-  __ j(not_equal, &miss);
-
-  // Store the function. Use a stub since we need a frame for allocation.
-  // rbx - vector
-  // rdx - slot (needs to be in smi form)
-  // rdi - function
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    CreateWeakCellStub create_stub(isolate);
-
-    __ Integer32ToSmi(rax, rax);
-    __ Integer32ToSmi(rdx, rdx);
-    __ Push(rax);
-    __ Push(rbx);
-    __ Push(rdx);
-    __ Push(rdi);
-    __ Push(rsi);
-    __ CallStub(&create_stub);
-    __ Pop(rsi);
-    __ Pop(rdi);
-    __ Pop(rdx);
-    __ Pop(rbx);
-    __ Pop(rax);
-    __ SmiToInteger32(rdx, rdx);
-    __ SmiToInteger32(rax, rax);
-  }
-
-  __ jmp(&call_function);
-
-  // We are here because tracing is on or we encountered a MISS case we can't
-  // handle here.
-  __ bind(&miss);
-  GenerateMiss(masm);
-
-  __ jmp(&call_count_incremented);
-
-  // Unreachable
-  __ int3();
-}
-
-void CallICStub::GenerateMiss(MacroAssembler* masm) {
-  FrameScope scope(masm, StackFrame::INTERNAL);
-
-  // Preserve the number of arguments.
-  __ Integer32ToSmi(rax, rax);
-  __ Push(rax);
-
-  // Push the receiver and the function and feedback info.
-  __ Integer32ToSmi(rdx, rdx);
-  __ Push(rdi);
-  __ Push(rbx);
-  __ Push(rdx);
-
-  // Call the entry.
-  __ CallRuntime(Runtime::kCallIC_Miss);
-
-  // Move result to edi and exit the internal frame.
-  __ movp(rdi, rax);
-
-  // Restore number of arguments.
-  __ Pop(rax);
-  __ SmiToInteger32(rax, rax);
-}
-
 bool CEntryStub::NeedsImmovableCode() {
   return false;
 }
@@ -1702,8 +1459,7 @@
     __ movp(rbp, rsp);
 
     // Push the stack frame type.
-    int marker = type();
-    __ Push(Smi::FromInt(marker));  // context slot
+    __ Push(Immediate(StackFrame::TypeToMarker(type())));  // context slot
     ExternalReference context_address(Isolate::kContextAddress, isolate());
     __ Load(kScratchRegister, context_address);
     __ Push(kScratchRegister);  // context
@@ -1750,13 +1506,13 @@
   __ Load(rax, js_entry_sp);
   __ testp(rax, rax);
   __ j(not_zero, &not_outermost_js);
-  __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+  __ Push(Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
   __ movp(rax, rbp);
   __ Store(js_entry_sp, rax);
   Label cont;
   __ jmp(&cont);
   __ bind(&not_outermost_js);
-  __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+  __ Push(Immediate(StackFrame::INNER_JSENTRY_FRAME));
   __ bind(&cont);
 
   // Jump to a faked try block that does the invoke, with a faked catch
@@ -1801,7 +1557,7 @@
   __ bind(&exit);
   // Check if the current stack frame is marked as the outermost JS frame.
   __ Pop(rbx);
-  __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+  __ cmpp(rbx, Immediate(StackFrame::OUTERMOST_JSENTRY_FRAME));
   __ j(not_equal, &not_outermost_js_2);
   __ Move(kScratchRegister, js_entry_sp);
   __ movp(Operand(kScratchRegister, 0), Immediate(0));
@@ -1938,75 +1694,6 @@
   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
 }
 
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  __ JumpIfNotSmi(code_, &slow_case_);
-  __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
-  __ j(above, &slow_case_);
-
-  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
-  __ movp(result_, FieldOperand(result_, index.reg, index.scale,
-                                FixedArray::kHeaderSize));
-  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
-  __ j(equal, &slow_case_);
-  __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
-  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
-  __ bind(&slow_case_);
-  call_helper.BeforeCall(masm);
-  __ Push(code_);
-  __ CallRuntime(Runtime::kStringCharFromCode);
-  if (!result_.is(rax)) {
-    __ movp(result_, rax);
-  }
-  call_helper.AfterCall(masm);
-  __ jmp(&exit_);
-
-  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
-                                          Register dest,
-                                          Register src,
-                                          Register count,
-                                          String::Encoding encoding) {
-  // Nothing to do for zero characters.
-  Label done;
-  __ testl(count, count);
-  __ j(zero, &done, Label::kNear);
-
-  // Make count the number of bytes to copy.
-  if (encoding == String::TWO_BYTE_ENCODING) {
-    STATIC_ASSERT(2 == sizeof(uc16));
-    __ addl(count, count);
-  }
-
-  // Copy remaining characters.
-  Label loop;
-  __ bind(&loop);
-  __ movb(kScratchRegister, Operand(src, 0));
-  __ movb(Operand(dest, 0), kScratchRegister);
-  __ incp(src);
-  __ incp(dest);
-  __ decl(count);
-  __ j(not_zero, &loop);
-
-  __ bind(&done);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -2616,61 +2303,6 @@
   __ jmp(done);
 }
 
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r1|. Jump to the |miss| label
-// otherwise.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
-                                                      Label* miss,
-                                                      Label* done,
-                                                      Register elements,
-                                                      Register name,
-                                                      Register r0,
-                                                      Register r1) {
-  DCHECK(!elements.is(r0));
-  DCHECK(!elements.is(r1));
-  DCHECK(!name.is(r0));
-  DCHECK(!name.is(r1));
-
-  __ AssertName(name);
-
-  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
-  __ decl(r0);
-
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ movl(r1, FieldOperand(name, Name::kHashFieldOffset));
-    __ shrl(r1, Immediate(Name::kHashShift));
-    if (i > 0) {
-      __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
-    }
-    __ andp(r1, r0);
-
-    // Scale the index by multiplying by the entry size.
-    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    __ leap(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
-
-    // Check if the key is identical to the name.
-    __ cmpp(name, Operand(elements, r1, times_pointer_size,
-                          kElementsStartOffset - kHeapObjectTag));
-    __ j(equal, done);
-  }
-
-  NameDictionaryLookupStub stub(masm->isolate(), elements, r0, r1,
-                                POSITIVE_LOOKUP);
-  __ Push(name);
-  __ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
-  __ shrl(r0, Immediate(Name::kHashShift));
-  __ Push(r0);
-  __ CallStub(&stub);
-
-  __ testp(r0, r0);
-  __ j(zero, miss);
-  __ jmp(done);
-}
-
-
 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   // we cannot call anything that could cause a GC from this stub.
@@ -2860,6 +2492,9 @@
   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
 }
 
+void RecordWriteStub::Activate(Code* code) {
+  code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+}
 
 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
     MacroAssembler* masm,
@@ -2949,209 +2584,6 @@
   __ jmp(rcx);  // Return to IC Miss stub, continuation still on stack.
 }
 
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
-                             Register receiver_map, Register scratch1,
-                             Register scratch2, Register scratch3,
-                             bool is_polymorphic, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next_loop, prepare_next;
-  Label start_polymorphic;
-
-  Register counter = scratch1;
-  Register length = scratch2;
-  Register cached_map = scratch3;
-
-  __ movp(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-  __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &start_polymorphic);
-
-  // found, now call handler.
-  Register handler = feedback;
-  __ movp(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  // Polymorphic, we have to loop from 2 to N
-  __ bind(&start_polymorphic);
-  __ SmiToInteger32(length, FieldOperand(feedback, FixedArray::kLengthOffset));
-  if (!is_polymorphic) {
-    // If the IC could be monomorphic we have to make sure we don't go past the
-    // end of the feedback array.
-    __ cmpl(length, Immediate(2));
-    __ j(equal, miss);
-  }
-  __ movl(counter, Immediate(2));
-
-  __ bind(&next_loop);
-  __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
-                                   FixedArray::kHeaderSize));
-  __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &prepare_next);
-  __ movp(handler, FieldOperand(feedback, counter, times_pointer_size,
-                                FixedArray::kHeaderSize + kPointerSize));
-  __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  __ bind(&prepare_next);
-  __ addl(counter, Immediate(2));
-  __ cmpl(counter, length);
-  __ j(less, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
-                                  Register receiver_map, Register feedback,
-                                  Register vector, Register integer_slot,
-                                  Label* compare_map, Label* load_smi_map,
-                                  Label* try_array) {
-  __ JumpIfSmi(receiver, load_smi_map);
-  __ movp(receiver_map, FieldOperand(receiver, 0));
-
-  __ bind(compare_map);
-  __ cmpp(receiver_map, FieldOperand(feedback, WeakCell::kValueOffset));
-  __ j(not_equal, try_array);
-  Register handler = feedback;
-  __ movp(handler, FieldOperand(vector, integer_slot, times_pointer_size,
-                                FixedArray::kHeaderSize + kPointerSize));
-  __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  KeyedStoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
-                                            Register receiver_map,
-                                            Register feedback, Register scratch,
-                                            Register scratch1,
-                                            Register scratch2, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next, next_loop, prepare_next;
-  Label transition_call;
-
-  Register cached_map = scratch;
-  Register counter = scratch1;
-  Register length = scratch2;
-
-  // Polymorphic, we have to loop from 0 to N - 1
-  __ movp(counter, Immediate(0));
-  __ movp(length, FieldOperand(feedback, FixedArray::kLengthOffset));
-  __ SmiToInteger32(length, length);
-
-  __ bind(&next_loop);
-  __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
-                                   FixedArray::kHeaderSize));
-  __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &prepare_next);
-  __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
-                                   FixedArray::kHeaderSize + kPointerSize));
-  __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &transition_call);
-  __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
-                                 FixedArray::kHeaderSize + 2 * kPointerSize));
-  __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
-  __ jmp(feedback);
-
-  __ bind(&transition_call);
-  DCHECK(receiver_map.is(StoreTransitionDescriptor::MapRegister()));
-  __ movp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  // The weak cell may have been cleared.
-  __ JumpIfSmi(receiver_map, miss);
-  // Get the handler in value.
-  __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
-                                 FixedArray::kHeaderSize + 2 * kPointerSize));
-  __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
-  __ jmp(feedback);
-
-  __ bind(&prepare_next);
-  __ addl(counter, Immediate(3));
-  __ cmpl(counter, length);
-  __ j(less, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // rdx
-  Register key = StoreWithVectorDescriptor::NameRegister();           // rcx
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // rbx
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // rdi
-  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(rax));         // rax
-  Register feedback = r8;
-  Register integer_slot = r9;
-  Register receiver_map = r11;
-  DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map));
-
-  __ SmiToInteger32(integer_slot, slot);
-  __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
-                                 FixedArray::kHeaderSize));
-
-  // Try to quickly handle the monomorphic case without knowing for sure
-  // if we have a weak cell in feedback. We do know it's safe to look
-  // at WeakCell::kValueOffset.
-  Label try_array, load_smi_map, compare_map;
-  Label not_array, miss;
-  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
-                        integer_slot, &compare_map, &load_smi_map, &try_array);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-  HandlePolymorphicKeyedStoreCase(masm, receiver_map, feedback, integer_slot,
-                                  r15, r14, &miss);
-
-  __ bind(&not_array);
-  Label try_poly_name;
-  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &try_poly_name);
-
-  Handle<Code> megamorphic_stub =
-      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmpp(key, feedback);
-  __ j(not_equal, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
-                                 FixedArray::kHeaderSize + kPointerSize));
-  HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, false,
-                   &miss);
-
-  __ bind(&miss);
-  KeyedStoreIC::GenerateMiss(masm);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(rbx);
-  CallICStub stub(isolate(), state());
-  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
 
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
@@ -3507,657 +2939,6 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rdi    : target
-  //  -- rdx    : new target
-  //  -- rsi    : context
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  __ AssertFunction(rdi);
-  __ AssertReceiver(rdx);
-
-  // Verify that the new target is a JSFunction.
-  Label new_object;
-  __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
-  __ j(not_equal, &new_object);
-
-  // Load the initial map and verify that it's in fact a map.
-  __ movp(rcx, FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(rcx, &new_object);
-  __ CmpObjectType(rcx, MAP_TYPE, rbx);
-  __ j(not_equal, &new_object);
-
-  // Fall back to runtime if the target differs from the new target's
-  // initial map constructor.
-  __ cmpp(rdi, FieldOperand(rcx, Map::kConstructorOrBackPointerOffset));
-  __ j(not_equal, &new_object);
-
-  // Allocate the JSObject on the heap.
-  Label allocate, done_allocate;
-  __ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
-  __ leal(rbx, Operand(rbx, times_pointer_size, 0));
-  __ Allocate(rbx, rax, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Initialize the JSObject fields.
-  __ movp(FieldOperand(rax, JSObject::kMapOffset), rcx);
-  __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
-  __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
-  __ movp(FieldOperand(rax, JSObject::kElementsOffset), rbx);
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ leap(rbx, FieldOperand(rax, JSObject::kHeaderSize));
-
-  // ----------- S t a t e -------------
-  //  -- rax    : result (tagged)
-  //  -- rbx    : result fields (untagged)
-  //  -- rdi    : result end (untagged)
-  //  -- rcx    : initial map
-  //  -- rsi    : context
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  // Perform in-object slack tracking if requested.
-  Label slack_tracking;
-  STATIC_ASSERT(Map::kNoSlackTracking == 0);
-  __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
-  __ testl(FieldOperand(rcx, Map::kBitField3Offset),
-           Immediate(Map::ConstructionCounter::kMask));
-  __ j(not_zero, &slack_tracking, Label::kNear);
-  {
-    // Initialize all in-object fields with undefined.
-    __ InitializeFieldsWithFiller(rbx, rdi, r11);
-    __ Ret();
-  }
-  __ bind(&slack_tracking);
-  {
-    // Decrease generous allocation count.
-    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-    __ subl(FieldOperand(rcx, Map::kBitField3Offset),
-            Immediate(1 << Map::ConstructionCounter::kShift));
-
-    // Initialize the in-object fields with undefined.
-    __ movzxbl(rdx, FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset));
-    __ negp(rdx);
-    __ leap(rdx, Operand(rdi, rdx, times_pointer_size, 0));
-    __ InitializeFieldsWithFiller(rbx, rdx, r11);
-
-    // Initialize the remaining (reserved) fields with one pointer filler map.
-    __ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
-    __ InitializeFieldsWithFiller(rdx, rdi, r11);
-
-    // Check if we can finalize the instance size.
-    Label finalize;
-    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
-    __ testl(FieldOperand(rcx, Map::kBitField3Offset),
-             Immediate(Map::ConstructionCounter::kMask));
-    __ j(zero, &finalize, Label::kNear);
-    __ Ret();
-
-    // Finalize the instance size.
-    __ bind(&finalize);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(rax);
-      __ Push(rcx);
-      __ CallRuntime(Runtime::kFinalizeInstanceSize);
-      __ Pop(rax);
-    }
-    __ Ret();
-  }
-
-  // Fall back to %AllocateInNewSpace.
-  __ bind(&allocate);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Integer32ToSmi(rbx, rbx);
-    __ Push(rcx);
-    __ Push(rbx);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(rcx);
-  }
-  __ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
-  __ leap(rdi, Operand(rax, rbx, times_pointer_size, 0));
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ decp(rdi);  // Remove the tag from the end address.
-  __ jmp(&done_allocate);
-
-  // Fall back to %NewObject.
-  __ bind(&new_object);
-  __ PopReturnAddressTo(rcx);
-  __ Push(rdi);
-  __ Push(rdx);
-  __ PushReturnAddressFrom(rcx);
-  __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
-void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rdi    : function
-  //  -- rsi    : context
-  //  -- rbp    : frame pointer
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  __ AssertFunction(rdi);
-
-  // Make rdx point to the JavaScript frame.
-  __ movp(rdx, rbp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
-    __ j(equal, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have rest parameters (only possible if we have an
-  // arguments adaptor frame below the function frame).
-  Label no_rest_parameters;
-  __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(not_equal, &no_rest_parameters, Label::kNear);
-
-  // Check if the arguments adaptor frame contains more arguments than
-  // specified by the function's internal formal parameter count.
-  Label rest_parameters;
-  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadSharedFunctionInfoSpecialField(
-      rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
-  __ SmiToInteger32(
-      rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ subl(rax, rcx);
-  __ j(greater, &rest_parameters);
-
-  // Return an empty rest parameter array.
-  __ bind(&no_rest_parameters);
-  {
-    // ----------- S t a t e -------------
-    //  -- rsi    : context
-    //  -- rsp[0] : return address
-    // -----------------------------------
-
-    // Allocate an empty rest parameter array.
-    Label allocate, done_allocate;
-    __ Allocate(JSArray::kSize, rax, rdx, rcx, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Setup the rest parameter array in rax.
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, rcx);
-    __ movp(FieldOperand(rax, JSArray::kMapOffset), rcx);
-    __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
-    __ movp(FieldOperand(rax, JSArray::kPropertiesOffset), rcx);
-    __ movp(FieldOperand(rax, JSArray::kElementsOffset), rcx);
-    __ movp(FieldOperand(rax, JSArray::kLengthOffset), Immediate(0));
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace.
-    __ bind(&allocate);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(Smi::FromInt(JSArray::kSize));
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-    }
-    __ jmp(&done_allocate);
-  }
-
-  __ bind(&rest_parameters);
-  {
-    // Compute the pointer to the first rest parameter (skippping the receiver).
-    __ leap(rbx, Operand(rbx, rax, times_pointer_size,
-                         StandardFrameConstants::kCallerSPOffset -
-                             1 * kPointerSize));
-
-    // ----------- S t a t e -------------
-    //  -- rdi    : function
-    //  -- rsi    : context
-    //  -- rax    : number of rest parameters
-    //  -- rbx    : pointer to first rest parameters
-    //  -- rsp[0] : return address
-    // -----------------------------------
-
-    // Allocate space for the rest parameter array plus the backing store.
-    Label allocate, done_allocate;
-    __ leal(rcx, Operand(rax, times_pointer_size,
-                         JSArray::kSize + FixedArray::kHeaderSize));
-    __ Allocate(rcx, rdx, r8, no_reg, &allocate, NO_ALLOCATION_FLAGS);
-    __ bind(&done_allocate);
-
-    // Compute the arguments.length in rdi.
-    __ Integer32ToSmi(rdi, rax);
-
-    // Setup the elements array in rdx.
-    __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
-    __ movp(FieldOperand(rdx, FixedArray::kMapOffset), rcx);
-    __ movp(FieldOperand(rdx, FixedArray::kLengthOffset), rdi);
-    {
-      Label loop, done_loop;
-      __ Set(rcx, 0);
-      __ bind(&loop);
-      __ cmpl(rcx, rax);
-      __ j(equal, &done_loop, Label::kNear);
-      __ movp(kScratchRegister, Operand(rbx, 0 * kPointerSize));
-      __ movp(
-          FieldOperand(rdx, rcx, times_pointer_size, FixedArray::kHeaderSize),
-          kScratchRegister);
-      __ subp(rbx, Immediate(1 * kPointerSize));
-      __ addl(rcx, Immediate(1));
-      __ jmp(&loop);
-      __ bind(&done_loop);
-    }
-
-    // Setup the rest parameter array in rax.
-    __ leap(rax,
-            Operand(rdx, rax, times_pointer_size, FixedArray::kHeaderSize));
-    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, rcx);
-    __ movp(FieldOperand(rax, JSArray::kMapOffset), rcx);
-    __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
-    __ movp(FieldOperand(rax, JSArray::kPropertiesOffset), rcx);
-    __ movp(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-    __ movp(FieldOperand(rax, JSArray::kLengthOffset), rdi);
-    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
-    __ Ret();
-
-    // Fall back to %AllocateInNewSpace (if not too big).
-    Label too_big_for_new_space;
-    __ bind(&allocate);
-    __ cmpl(rcx, Immediate(kMaxRegularHeapObjectSize));
-    __ j(greater, &too_big_for_new_space);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Integer32ToSmi(rax, rax);
-      __ Integer32ToSmi(rcx, rcx);
-      __ Push(rax);
-      __ Push(rbx);
-      __ Push(rcx);
-      __ CallRuntime(Runtime::kAllocateInNewSpace);
-      __ movp(rdx, rax);
-      __ Pop(rbx);
-      __ Pop(rax);
-      __ SmiToInteger32(rax, rax);
-    }
-    __ jmp(&done_allocate);
-
-    // Fall back to %NewRestParameter.
-    __ bind(&too_big_for_new_space);
-    __ PopReturnAddressTo(kScratchRegister);
-    __ Push(rdi);
-    __ PushReturnAddressFrom(kScratchRegister);
-    __ TailCallRuntime(Runtime::kNewRestParameter);
-  }
-}
-
-
-void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rdi    : function
-  //  -- rsi    : context
-  //  -- rbp    : frame pointer
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  __ AssertFunction(rdi);
-
-  // Make r9 point to the JavaScript frame.
-  __ movp(r9, rbp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ movp(r9, Operand(r9, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ cmpp(rdi, Operand(r9, StandardFrameConstants::kFunctionOffset));
-    __ j(equal, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
-  __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-  __ LoadSharedFunctionInfoSpecialField(
-      rcx, rcx, SharedFunctionInfo::kFormalParameterCountOffset);
-  __ leap(rdx, Operand(r9, rcx, times_pointer_size,
-                       StandardFrameConstants::kCallerSPOffset));
-  __ Integer32ToSmi(rcx, rcx);
-
-  // rcx : number of parameters (tagged)
-  // rdx : parameters pointer
-  // rdi : function
-  // rsp[0] : return address
-  // r9  : JavaScript frame pointer.
-  // Registers used over the whole function:
-  //  rbx: the mapped parameter count (untagged)
-  //  rax: the allocated object (tagged).
-  Factory* factory = isolate()->factory();
-
-  __ SmiToInteger64(rbx, rcx);
-  // rbx = parameter count (untagged)
-
-  // Check if the calling frame is an arguments adaptor frame.
-  Label adaptor_frame, try_allocate, runtime;
-  __ movp(rax, Operand(r9, StandardFrameConstants::kCallerFPOffset));
-  __ movp(r8, Operand(rax, CommonFrameConstants::kContextOrFrameTypeOffset));
-  __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(equal, &adaptor_frame);
-
-  // No adaptor, parameter count = argument count.
-  __ movp(r11, rbx);
-  __ jmp(&try_allocate, Label::kNear);
-
-  // We have an adaptor frame. Patch the parameters pointer.
-  __ bind(&adaptor_frame);
-  __ SmiToInteger64(
-      r11, Operand(rax, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ leap(rdx, Operand(rax, r11, times_pointer_size,
-                       StandardFrameConstants::kCallerSPOffset));
-
-  // rbx = parameter count (untagged)
-  // r11 = argument count (untagged)
-  // Compute the mapped parameter count = min(rbx, r11) in rbx.
-  __ cmpp(rbx, r11);
-  __ j(less_equal, &try_allocate, Label::kNear);
-  __ movp(rbx, r11);
-
-  __ bind(&try_allocate);
-
-  // Compute the sizes of backing store, parameter map, and arguments object.
-  // 1. Parameter map, has 2 extra words containing context and backing store.
-  const int kParameterMapHeaderSize =
-      FixedArray::kHeaderSize + 2 * kPointerSize;
-  Label no_parameter_map;
-  __ xorp(r8, r8);
-  __ testp(rbx, rbx);
-  __ j(zero, &no_parameter_map, Label::kNear);
-  __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
-  __ bind(&no_parameter_map);
-
-  // 2. Backing store.
-  __ leap(r8, Operand(r8, r11, times_pointer_size, FixedArray::kHeaderSize));
-
-  // 3. Arguments object.
-  __ addp(r8, Immediate(JSSloppyArgumentsObject::kSize));
-
-  // Do the allocation of all three objects in one go.
-  __ Allocate(r8, rax, r9, no_reg, &runtime, NO_ALLOCATION_FLAGS);
-
-  // rax = address of new object(s) (tagged)
-  // r11 = argument count (untagged)
-  // Get the arguments map from the current native context into r9.
-  Label has_mapped_parameters, instantiate;
-  __ movp(r9, NativeContextOperand());
-  __ testp(rbx, rbx);
-  __ j(not_zero, &has_mapped_parameters, Label::kNear);
-
-  const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
-  __ movp(r9, Operand(r9, Context::SlotOffset(kIndex)));
-  __ jmp(&instantiate, Label::kNear);
-
-  const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX;
-  __ bind(&has_mapped_parameters);
-  __ movp(r9, Operand(r9, Context::SlotOffset(kAliasedIndex)));
-  __ bind(&instantiate);
-
-  // rax = address of new object (tagged)
-  // rbx = mapped parameter count (untagged)
-  // r11 = argument count (untagged)
-  // r9 = address of arguments map (tagged)
-  __ movp(FieldOperand(rax, JSObject::kMapOffset), r9);
-  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
-  __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
-  __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
-
-  // Set up the callee in-object property.
-  __ AssertNotSmi(rdi);
-  __ movp(FieldOperand(rax, JSSloppyArgumentsObject::kCalleeOffset), rdi);
-
-  // Use the length (smi tagged) and set that as an in-object property too.
-  // Note: r11 is tagged from here on.
-  __ Integer32ToSmi(r11, r11);
-  __ movp(FieldOperand(rax, JSSloppyArgumentsObject::kLengthOffset), r11);
-
-  // Set up the elements pointer in the allocated arguments object.
-  // If we allocated a parameter map, rdi will point there, otherwise to the
-  // backing store.
-  __ leap(rdi, Operand(rax, JSSloppyArgumentsObject::kSize));
-  __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
-
-  // rax = address of new object (tagged)
-  // rbx = mapped parameter count (untagged)
-  // r11 = argument count (tagged)
-  // rdi = address of parameter map or backing store (tagged)
-
-  // Initialize parameter map. If there are no mapped arguments, we're done.
-  Label skip_parameter_map;
-  __ testp(rbx, rbx);
-  __ j(zero, &skip_parameter_map);
-
-  __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
-  // rbx contains the untagged argument count. Add 2 and tag to write.
-  __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
-  __ Integer64PlusConstantToSmi(r9, rbx, 2);
-  __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
-  __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
-  __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
-  __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
-
-  // Copy the parameter slots and the holes in the arguments.
-  // We need to fill in mapped_parameter_count slots. They index the context,
-  // where parameters are stored in reverse order, at
-  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
-  // The mapped parameter thus need to get indices
-  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
-  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
-  // We loop from right to left.
-  Label parameters_loop, parameters_test;
-
-  // Load tagged parameter count into r9.
-  __ Integer32ToSmi(r9, rbx);
-  __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
-  __ addp(r8, rcx);
-  __ subp(r8, r9);
-  __ movp(rcx, rdi);
-  __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
-  __ SmiToInteger64(r9, r9);
-  // r9 = loop variable (untagged)
-  // r8 = mapping index (tagged)
-  // rcx = address of parameter map (tagged)
-  // rdi = address of backing store (tagged)
-  __ jmp(&parameters_test, Label::kNear);
-
-  __ bind(&parameters_loop);
-  __ subp(r9, Immediate(1));
-  __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-  __ movp(FieldOperand(rcx, r9, times_pointer_size, kParameterMapHeaderSize),
-          r8);
-  __ movp(FieldOperand(rdi, r9, times_pointer_size, FixedArray::kHeaderSize),
-          kScratchRegister);
-  __ SmiAddConstant(r8, r8, Smi::FromInt(1));
-  __ bind(&parameters_test);
-  __ testp(r9, r9);
-  __ j(not_zero, &parameters_loop, Label::kNear);
-
-  __ bind(&skip_parameter_map);
-
-  // r11 = argument count (tagged)
-  // rdi = address of backing store (tagged)
-  // Copy arguments header and remaining slots (if there are any).
-  __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
-          factory->fixed_array_map());
-  __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r11);
-
-  Label arguments_loop, arguments_test;
-  __ movp(r8, rbx);
-  // Untag r11 for the loop below.
-  __ SmiToInteger64(r11, r11);
-  __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
-  __ subp(rdx, kScratchRegister);
-  __ jmp(&arguments_test, Label::kNear);
-
-  __ bind(&arguments_loop);
-  __ subp(rdx, Immediate(kPointerSize));
-  __ movp(r9, Operand(rdx, 0));
-  __ movp(FieldOperand(rdi, r8,
-                       times_pointer_size,
-                       FixedArray::kHeaderSize),
-          r9);
-  __ addp(r8, Immediate(1));
-
-  __ bind(&arguments_test);
-  __ cmpp(r8, r11);
-  __ j(less, &arguments_loop, Label::kNear);
-
-  // Return.
-  __ ret(0);
-
-  // Do the runtime call to allocate the arguments object.
-  // r11 = argument count (untagged)
-  __ bind(&runtime);
-  __ Integer32ToSmi(r11, r11);
-  __ PopReturnAddressTo(rax);
-  __ Push(rdi);  // Push function.
-  __ Push(rdx);  // Push parameters pointer.
-  __ Push(r11);  // Push parameter count.
-  __ PushReturnAddressFrom(rax);
-  __ TailCallRuntime(Runtime::kNewSloppyArguments);
-}
-
-
-void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rdi    : function
-  //  -- rsi    : context
-  //  -- rbp    : frame pointer
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  __ AssertFunction(rdi);
-
-  // Make rdx point to the JavaScript frame.
-  __ movp(rdx, rbp);
-  if (skip_stub_frame()) {
-    // For Ignition we need to skip the handler/stub frame to reach the
-    // JavaScript frame for the function.
-    __ movp(rdx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
-  }
-  if (FLAG_debug_code) {
-    Label ok;
-    __ cmpp(rdi, Operand(rdx, StandardFrameConstants::kFunctionOffset));
-    __ j(equal, &ok);
-    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
-    __ bind(&ok);
-  }
-
-  // Check if we have an arguments adaptor frame below the function frame.
-  Label arguments_adaptor, arguments_done;
-  __ movp(rbx, Operand(rdx, StandardFrameConstants::kCallerFPOffset));
-  __ Cmp(Operand(rbx, CommonFrameConstants::kContextOrFrameTypeOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
-  __ j(equal, &arguments_adaptor, Label::kNear);
-  {
-    __ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
-    __ LoadSharedFunctionInfoSpecialField(
-        rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
-    __ leap(rbx, Operand(rdx, rax, times_pointer_size,
-                         StandardFrameConstants::kCallerSPOffset -
-                             1 * kPointerSize));
-  }
-  __ jmp(&arguments_done, Label::kNear);
-  __ bind(&arguments_adaptor);
-  {
-    __ SmiToInteger32(
-        rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-    __ leap(rbx, Operand(rbx, rax, times_pointer_size,
-                         StandardFrameConstants::kCallerSPOffset -
-                             1 * kPointerSize));
-  }
-  __ bind(&arguments_done);
-
-  // ----------- S t a t e -------------
-  //  -- rax    : number of arguments
-  //  -- rbx    : pointer to the first argument
-  //  -- rdi    : function
-  //  -- rsi    : context
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  // Allocate space for the strict arguments object plus the backing store.
-  Label allocate, done_allocate;
-  __ leal(rcx, Operand(rax, times_pointer_size, JSStrictArgumentsObject::kSize +
-                                                    FixedArray::kHeaderSize));
-  __ Allocate(rcx, rdx, r8, no_reg, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Compute the arguments.length in rdi.
-  __ Integer32ToSmi(rdi, rax);
-
-  // Setup the elements array in rdx.
-  __ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
-  __ movp(FieldOperand(rdx, FixedArray::kMapOffset), rcx);
-  __ movp(FieldOperand(rdx, FixedArray::kLengthOffset), rdi);
-  {
-    Label loop, done_loop;
-    __ Set(rcx, 0);
-    __ bind(&loop);
-    __ cmpl(rcx, rax);
-    __ j(equal, &done_loop, Label::kNear);
-    __ movp(kScratchRegister, Operand(rbx, 0 * kPointerSize));
-    __ movp(
-        FieldOperand(rdx, rcx, times_pointer_size, FixedArray::kHeaderSize),
-        kScratchRegister);
-    __ subp(rbx, Immediate(1 * kPointerSize));
-    __ addl(rcx, Immediate(1));
-    __ jmp(&loop);
-    __ bind(&done_loop);
-  }
-
-  // Setup the strict arguments object in rax.
-  __ leap(rax,
-          Operand(rdx, rax, times_pointer_size, FixedArray::kHeaderSize));
-  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, rcx);
-  __ movp(FieldOperand(rax, JSStrictArgumentsObject::kMapOffset), rcx);
-  __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
-  __ movp(FieldOperand(rax, JSStrictArgumentsObject::kPropertiesOffset), rcx);
-  __ movp(FieldOperand(rax, JSStrictArgumentsObject::kElementsOffset), rdx);
-  __ movp(FieldOperand(rax, JSStrictArgumentsObject::kLengthOffset), rdi);
-  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
-  __ Ret();
-
-  // Fall back to %AllocateInNewSpace (if not too big).
-  Label too_big_for_new_space;
-  __ bind(&allocate);
-  __ cmpl(rcx, Immediate(kMaxRegularHeapObjectSize));
-  __ j(greater, &too_big_for_new_space);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ Integer32ToSmi(rax, rax);
-    __ Integer32ToSmi(rcx, rcx);
-    __ Push(rax);
-    __ Push(rbx);
-    __ Push(rcx);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ movp(rdx, rax);
-    __ Pop(rbx);
-    __ Pop(rax);
-    __ SmiToInteger32(rax, rax);
-  }
-  __ jmp(&done_allocate);
-
-  // Fall back to %NewStrictArguments.
-  __ bind(&too_big_for_new_space);
-  __ PopReturnAddressTo(kScratchRegister);
-  __ Push(rdi);
-  __ PushReturnAddressFrom(kScratchRegister);
-  __ TailCallRuntime(Runtime::kNewStrictArguments);
-}
-
-
 static int Offset(ExternalReference ref0, ExternalReference ref1) {
   int64_t offset = (ref0.address() - ref1.address());
   // Check that fits into int.
@@ -4165,7 +2946,6 @@
   return static_cast<int>(offset);
 }
 
-
 // Prepares stack to put arguments (aligns and so on).  WIN64 calling
 // convention requires to put the pointer to the return value slot into
 // rcx (rcx must be preserverd until CallApiFunctionAndReturn).  Saves
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index a181377..4240cb4 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -14,15 +14,6 @@
 
 class StringHelper : public AllStatic {
  public:
-  // Generate code for copying characters using the rep movs instruction.
-  // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
-  // not supported.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     String::Encoding encoding);
-
   // Compares two flat one-byte strings and returns result in rax.
   static void GenerateCompareFlatOneByteStrings(
       MacroAssembler* masm, Register left, Register right, Register scratch1,
@@ -63,14 +54,6 @@
                                      Handle<Name> name,
                                      Register r0);
 
-  static void GeneratePositiveLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register elements,
-                                     Register name,
-                                     Register r0,
-                                     Register r1);
-
   bool SometimesSetsUpAFrame() override { return false; }
 
  private:
@@ -325,9 +308,7 @@
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  void Activate(Code* code) override {
-    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
-  }
+  void Activate(Code* code) override;
 
   Register object() const {
     return Register::from_code(ObjectBits::decode(minor_key_));
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 911f3cb..f8ed7cb 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -8,6 +8,7 @@
 
 #include "src/codegen.h"
 #include "src/macro-assembler.h"
+#include "src/x64/assembler-x64-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -62,314 +63,14 @@
 
 #define __ ACCESS_MASM(masm)
 
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* allocation_memento_found) {
-  // Return address is on the stack.
-  Register scratch = rdi;
-  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    DCHECK(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(
-        receiver, scratch, allocation_memento_found);
-  }
-
-  // Set transitioned map.
-  __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Return address is on the stack.
-  DCHECK(receiver.is(rdx));
-  DCHECK(key.is(rcx));
-  DCHECK(value.is(rax));
-  DCHECK(target_map.is(rbx));
-
-  // The fail label is not actually used since we do not allocate.
-  Label allocated, new_backing_store, only_change_map, done;
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
-  __ j(equal, &only_change_map);
-
-  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
-  if (kPointerSize == kDoubleSize) {
-    // Check backing store for COW-ness. For COW arrays we have to
-    // allocate a new backing store.
-    __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
-                   Heap::kFixedCOWArrayMapRootIndex);
-    __ j(equal, &new_backing_store);
-  } else {
-    // For x32 port we have to allocate a new backing store as SMI size is
-    // not equal with double size.
-    DCHECK(kDoubleSize == 2 * kPointerSize);
-    __ jmp(&new_backing_store);
-  }
-
-  // Check if the backing store is in new-space. If not, we need to allocate
-  // a new one since the old one is in pointer-space.
-  // If in new space, we can reuse the old backing store because it is
-  // the same size.
-  __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
-
-  __ movp(r14, r8);  // Destination array equals source array.
-
-  // r8 : source FixedArray
-  // r9 : elements array length
-  // r14: destination FixedDoubleArray
-  // Set backing store's map
-  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-  __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
-
-  __ bind(&allocated);
-  // Set transitioned map.
-  __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
-  __ RecordWriteField(rdx,
-                      HeapObject::kMapOffset,
-                      rbx,
-                      rdi,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  // Convert smis to doubles and holes to hole NaNs.  The Array's length
-  // remains unchanged.
-  STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
-  STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
-
-  Label loop, entry, convert_hole;
-  __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
-  // r15: the-hole NaN
-  __ jmp(&entry);
-
-  // Allocate new backing store.
-  __ bind(&new_backing_store);
-  __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
-  __ Allocate(rdi, r14, r11, r15, fail, NO_ALLOCATION_FLAGS);
-  // Set backing store's map
-  __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-  __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
-  // Set receiver's backing store.
-  __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
-  __ movp(r11, r14);
-  __ RecordWriteField(rdx,
-                      JSObject::kElementsOffset,
-                      r11,
-                      r15,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  // Set backing store's length.
-  __ Integer32ToSmi(r11, r9);
-  __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
-  __ jmp(&allocated);
-
-  __ bind(&only_change_map);
-  // Set transitioned map.
-  __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
-  __ RecordWriteField(rdx,
-                      HeapObject::kMapOffset,
-                      rbx,
-                      rdi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&done);
-
-  // Conversion loop.
-  __ bind(&loop);
-  __ movp(rbx,
-          FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
-  // r9 : current element's index
-  // rbx: current element (smi-tagged)
-  __ JumpIfNotSmi(rbx, &convert_hole);
-  __ SmiToInteger32(rbx, rbx);
-  __ Cvtlsi2sd(kScratchDoubleReg, rbx);
-  __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
-           kScratchDoubleReg);
-  __ jmp(&entry);
-  __ bind(&convert_hole);
-
-  if (FLAG_debug_code) {
-    __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
-    __ Assert(equal, kObjectFoundInSmiOnlyArray);
-  }
-
-  __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
-  __ bind(&entry);
-  __ decp(r9);
-  __ j(not_sign, &loop);
-
-  __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Return address is on the stack.
-  DCHECK(receiver.is(rdx));
-  DCHECK(key.is(rcx));
-  DCHECK(value.is(rax));
-  DCHECK(target_map.is(rbx));
-
-  Label loop, entry, convert_hole, gc_required, only_change_map;
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
-  __ j(equal, &only_change_map);
-
-  __ Push(rsi);
-  __ Push(rax);
-
-  __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
-  // r8 : source FixedDoubleArray
-  // r9 : number of elements
-  __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
-  __ Allocate(rdi, r11, r14, r15, &gc_required, NO_ALLOCATION_FLAGS);
-  // r11: destination FixedArray
-  __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
-  __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
-  __ Integer32ToSmi(r14, r9);
-  __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
-
-  // Prepare for conversion loop.
-  __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
-  __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
-  // rsi: the-hole NaN
-  // rdi: pointer to the-hole
-
-  // Allocating heap numbers in the loop below can fail and cause a jump to
-  // gc_required. We can't leave a partly initialized FixedArray behind,
-  // so pessimistically fill it with holes now.
-  Label initialization_loop, initialization_loop_entry;
-  __ jmp(&initialization_loop_entry, Label::kNear);
-  __ bind(&initialization_loop);
-  __ movp(FieldOperand(r11, r9, times_pointer_size, FixedArray::kHeaderSize),
-          rdi);
-  __ bind(&initialization_loop_entry);
-  __ decp(r9);
-  __ j(not_sign, &initialization_loop);
-
-  __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
-  __ jmp(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ Pop(rax);
-  __ Pop(rsi);
-  __ jmp(fail);
-
-  // Box doubles into heap numbers.
-  __ bind(&loop);
-  __ movq(r14, FieldOperand(r8,
-                            r9,
-                            times_8,
-                            FixedDoubleArray::kHeaderSize));
-  // r9 : current element's index
-  // r14: current element
-  __ cmpq(r14, rsi);
-  __ j(equal, &convert_hole);
-
-  // Non-hole double, copy value into a heap number.
-  __ AllocateHeapNumber(rax, r15, &gc_required);
-  // rax: new heap number
-  __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
-  __ movp(FieldOperand(r11,
-                       r9,
-                       times_pointer_size,
-                       FixedArray::kHeaderSize),
-          rax);
-  __ movp(r15, r9);
-  __ RecordWriteArray(r11,
-                      rax,
-                      r15,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&entry, Label::kNear);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ movp(FieldOperand(r11,
-                       r9,
-                       times_pointer_size,
-                       FixedArray::kHeaderSize),
-          rdi);
-
-  __ bind(&entry);
-  __ decp(r9);
-  __ j(not_sign, &loop);
-
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
-  __ RecordWriteField(rdx,
-                      JSObject::kElementsOffset,
-                      r11,
-                      r15,
-                      kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ Pop(rax);
-  __ Pop(rsi);
-
-  __ bind(&only_change_map);
-  // Set transitioned map.
-  __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
-  __ RecordWriteField(rdx,
-                      HeapObject::kMapOffset,
-                      rbx,
-                      rdi,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-}
-
-
 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                        Register string,
                                        Register index,
                                        Register result,
                                        Label* call_runtime) {
+  Label indirect_string_loaded;
+  __ bind(&indirect_string_loaded);
+
   // Fetch the instance type of the receiver into result register.
   __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
@@ -380,16 +81,23 @@
   __ j(zero, &check_sequential, Label::kNear);
 
   // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ testb(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
+  Label cons_string, thin_string;
+  __ andl(result, Immediate(kStringRepresentationMask));
+  __ cmpl(result, Immediate(kConsStringTag));
+  __ j(equal, &cons_string, Label::kNear);
+  __ cmpl(result, Immediate(kThinStringTag));
+  __ j(equal, &thin_string, Label::kNear);
 
   // Handle slices.
-  Label indirect_string_loaded;
   __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
   __ addp(index, result);
   __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
+  __ jmp(&indirect_string_loaded);
+
+  // Handle thin strings.
+  __ bind(&thin_string);
+  __ movp(string, FieldOperand(string, ThinString::kActualOffset));
+  __ jmp(&indirect_string_loaded);
 
   // Handle cons strings.
   // Check whether the right hand side is the empty string (i.e. if
@@ -401,10 +109,7 @@
                  Heap::kempty_stringRootIndex);
   __ j(not_equal, call_runtime);
   __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+  __ jmp(&indirect_string_loaded);
 
   // Distinguish sequential and external strings. Only these two string
   // representations can reach here (slices and flat cons strings have been
@@ -498,32 +203,24 @@
   return result;
 }
 
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
 
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                               MarkingParity* parity) {
-  if (IsYoungSequence(isolate, sequence)) {
-    *age = kNoAgeCodeAge;
-    *parity = NO_MARKING_PARITY;
-  } else {
-    sequence++;  // Skip the kCallOpcode byte
-    Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
-        Assembler::kCallTargetAddressOffset;
-    Code* stub = GetCodeFromTargetAddress(target_address);
-    GetCodeAgeAndParity(stub, age, parity);
-  }
+  sequence++;  // Skip the kCallOpcode byte
+  Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+                           Assembler::kCallTargetAddressOffset;
+  Code* stub = GetCodeFromTargetAddress(target_address);
+  return GetAgeOfCodeAgeStub(stub);
 }
 
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
-                                Code::Age age,
-                                MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+                                Code::Age age) {
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
     Assembler::FlushICache(isolate, sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age);
     CodePatcher patcher(isolate, sequence, young_length);
     patcher.masm()->call(stub->instruction_start());
     patcher.masm()->Nop(
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 9fbf69e..1664a15 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -7,6 +7,7 @@
 #include "src/codegen.h"
 #include "src/deoptimizer.h"
 #include "src/full-codegen/full-codegen.h"
+#include "src/objects-inl.h"
 #include "src/register-configuration.h"
 #include "src/safepoint-table.h"
 
@@ -100,7 +101,7 @@
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
   for (int i = 0; i < XMMRegister::kMaxNumRegisters; ++i) {
-    double double_value = input_->GetDoubleRegister(i);
+    Float64 double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
 }
diff --git a/src/x64/eh-frame-x64.cc b/src/x64/eh-frame-x64.cc
index afbcf21..8604332 100644
--- a/src/x64/eh-frame-x64.cc
+++ b/src/x64/eh-frame-x64.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/eh-frame.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/x64/interface-descriptors-x64.cc b/src/x64/interface-descriptors-x64.cc
index 3ee4412..c784edd 100644
--- a/src/x64/interface-descriptors-x64.cc
+++ b/src/x64/interface-descriptors-x64.cc
@@ -64,35 +64,11 @@
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {rbx};
+  // SharedFunctionInfo, vector, slot index.
+  Register registers[] = {rbx, rcx, rdx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-void FastNewObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {rdi, rdx};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewRestParameterDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {rdi};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {rdi};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {rdi};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
 void TypeofDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {rbx};
@@ -144,15 +120,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {rdi, rdx};
+  Register registers[] = {rdi, rax, rdx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {rdi, rax, rdx, rbx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -180,6 +154,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void CallForwardVarargsDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // rcx : start index (to support rest parameters)
+  // rdi : the target to call
+  Register registers[] = {rdi, rcx};
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
 
 void ConstructStubDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -214,13 +195,12 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
-      CallInterfaceDescriptorData* data) {                          \
-    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
-  }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
+void ArrayConstructorDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  // kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
+  Register registers[] = {rdi, rdx, rax, rbx};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
+}
 
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -413,6 +393,14 @@
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
+void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      rbx,  // loaded new FP
+  };
+  data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 8d70f54..b75b38e 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -8,11 +8,14 @@
 #include "src/base/division-by-constant.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
+#include "src/counters.h"
 #include "src/debug/debug.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects-inl.h"
 #include "src/register-configuration.h"
 #include "src/x64/assembler-x64.h"
-#include "src/x64/macro-assembler-x64.h"
+
+#include "src/x64/macro-assembler-x64.h"  // Cannot be the first include.
 
 namespace v8 {
 namespace internal {
@@ -1570,6 +1573,11 @@
   j(NegateCondition(smi), on_not_smi, near_jump);
 }
 
+void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
+                                  Label::Distance near_jump) {
+  Condition smi = CheckSmi(src);
+  j(NegateCondition(smi), on_not_smi, near_jump);
+}
 
 void MacroAssembler::JumpUnlessNonNegativeSmi(
     Register src, Label* on_not_smi_or_negative,
@@ -2460,10 +2468,19 @@
   intptr_t smi = reinterpret_cast<intptr_t>(source);
   if (is_int32(smi)) {
     Push(Immediate(static_cast<int32_t>(smi)));
-  } else {
-    Register constant = GetSmiConstant(source);
-    Push(constant);
+    return;
   }
+  int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
+  int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
+  if (first_byte_set == last_byte_set && kPointerSize == kInt64Size) {
+    // This sequence has only 7 bytes, compared to the 12 bytes below.
+    Push(Immediate(0));
+    movb(Operand(rsp, first_byte_set),
+         Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set))));
+    return;
+  }
+  Register constant = GetSmiConstant(source);
+  Push(constant);
 }
 
 
@@ -2540,30 +2557,15 @@
   andl(scratch1, Immediate(kFlatOneByteStringMask));
   andl(scratch2, Immediate(kFlatOneByteStringMask));
   // Interleave the bits to check both scratch1 and scratch2 in one test.
-  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
-  leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  const int kShift = 8;
+  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
+  shlp(scratch2, Immediate(kShift));
+  orp(scratch1, scratch2);
   cmpl(scratch1,
-       Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
+       Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << kShift)));
   j(not_equal, on_fail, near_jump);
 }
 
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
-    Register instance_type, Register scratch, Label* failure,
-    Label::Distance near_jump) {
-  if (!scratch.is(instance_type)) {
-    movl(scratch, instance_type);
-  }
-
-  const int kFlatOneByteStringMask =
-      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
-  andl(scratch, Immediate(kFlatOneByteStringMask));
-  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
-  j(not_equal, failure, near_jump);
-}
-
-
 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
     Register first_object_instance_type, Register second_object_instance_type,
     Register scratch1, Register scratch2, Label* on_fail,
@@ -3663,66 +3665,6 @@
        Immediate(static_cast<int8_t>(type)));
 }
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Label* fail,
-                                             Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
-  j(below_equal, fail, distance);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
-                                          Label* fail,
-                                          Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
-    Register maybe_number,
-    Register elements,
-    Register index,
-    XMMRegister xmm_scratch,
-    Label* fail,
-    int elements_offset) {
-  Label smi_value, done;
-
-  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
-  CheckMap(maybe_number,
-           isolate()->factory()->heap_number_map(),
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  // Double value, turn potential sNaN into qNaN.
-  Move(xmm_scratch, 1.0);
-  mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
-  jmp(&done, Label::kNear);
-
-  bind(&smi_value);
-  // Value is a smi. convert to a double and store.
-  // Preserve original value.
-  SmiToInteger32(kScratchRegister, maybe_number);
-  Cvtlsi2sd(xmm_scratch, kScratchRegister);
-  bind(&done);
-  Movsd(FieldOperand(elements, index, times_8,
-                     FixedDoubleArray::kHeaderSize - elements_offset),
-        xmm_scratch);
-}
-
-
 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
 }
@@ -4099,32 +4041,6 @@
   bind(&done);
 }
 
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
-                                             Label* miss) {
-  // Get the prototype or initial map from the function.
-  movp(result,
-       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // If the prototype or initial map is the hole, don't return it and
-  // simply miss the cache instead. This will allow us to allocate a
-  // prototype object on-demand in the runtime system.
-  CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  j(equal, miss);
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  CmpObjectType(result, MAP_TYPE, kScratchRegister);
-  j(not_equal, &done, Label::kNear);
-
-  // Get the prototype from the initial map.
-  movp(result, FieldOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  bind(&done);
-}
-
-
 void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     Operand counter_operand = ExternalOperand(ExternalReference(counter));
@@ -4158,14 +4074,14 @@
   }
 }
 
-
-void MacroAssembler::DebugBreak() {
-  Set(rax, 0);  // No arguments.
-  LoadAddress(rbx,
-              ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
-  CEntryStub ces(isolate(), 1);
-  DCHECK(AllowThisStubCall(&ces));
-  Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
+void MacroAssembler::MaybeDropFrames() {
+  // Check whether we need to drop frames to restart a function on the stack.
+  ExternalReference restart_fp =
+      ExternalReference::debug_restart_fp_address(isolate());
+  Load(rbx, restart_fp);
+  testp(rbx, rbx);
+  j(not_zero, isolate()->builtins()->FrameDropperTrampoline(),
+    RelocInfo::CODE_TARGET);
 }
 
 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
@@ -4286,8 +4202,8 @@
   DCHECK(function.is(rdi));
   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
 
-  if (call_wrapper.NeedsDebugStepCheck()) {
-    FloodFunctionIfStepping(function, new_target, expected, actual);
+  if (call_wrapper.NeedsDebugHookCheck()) {
+    CheckDebugHook(function, new_target, expected, actual);
   }
 
   // Clear the new.target register if not given.
@@ -4367,6 +4283,7 @@
       DCHECK(actual.reg().is(rax));
       DCHECK(expected.reg().is(rbx));
     } else {
+      definitely_matches = true;
       Move(rax, actual.reg());
     }
   }
@@ -4387,17 +4304,15 @@
   }
 }
 
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
-                                             const ParameterCount& expected,
-                                             const ParameterCount& actual) {
-  Label skip_flooding;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(isolate());
-  Operand last_step_action_operand = ExternalOperand(last_step_action);
-  STATIC_ASSERT(StepFrame > StepIn);
-  cmpb(last_step_action_operand, Immediate(StepIn));
-  j(less, &skip_flooding);
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual) {
+  Label skip_hook;
+  ExternalReference debug_hook_active =
+      ExternalReference::debug_hook_on_function_call_address(isolate());
+  Operand debug_hook_active_operand = ExternalOperand(debug_hook_active);
+  cmpb(debug_hook_active_operand, Immediate(0));
+  j(equal, &skip_hook);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4414,7 +4329,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    CallRuntime(Runtime::kDebugOnFunctionCall);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -4428,13 +4343,13 @@
       SmiToInteger64(expected.reg(), expected.reg());
     }
   }
-  bind(&skip_flooding);
+  bind(&skip_hook);
 }
 
 void MacroAssembler::StubPrologue(StackFrame::Type type) {
   pushq(rbp);  // Caller's frame pointer.
   movp(rbp, rsp);
-  Push(Smi::FromInt(type));
+  Push(Immediate(StackFrame::TypeToMarker(type)));
 }
 
 void MacroAssembler::Prologue(bool code_pre_aging) {
@@ -4453,11 +4368,10 @@
   }
 }
 
-
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
   movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-  movp(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
-  movp(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+  movp(vector, FieldOperand(vector, JSFunction::kFeedbackVectorOffset));
+  movp(vector, FieldOperand(vector, Cell::kValueOffset));
 }
 
 
@@ -4471,7 +4385,7 @@
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   pushq(rbp);
   movp(rbp, rsp);
-  Push(Smi::FromInt(type));
+  Push(Immediate(StackFrame::TypeToMarker(type)));
   if (type == StackFrame::INTERNAL) {
     Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
     Push(kScratchRegister);
@@ -4488,9 +4402,8 @@
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
   if (emit_debug_code()) {
-    Move(kScratchRegister, Smi::FromInt(type));
     cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
-         kScratchRegister);
+         Immediate(StackFrame::TypeToMarker(type)));
     Check(equal, kStackFrameTypesMustMatch);
   }
   movp(rsp, rbp);
@@ -4529,7 +4442,7 @@
   movp(rbp, rsp);
 
   // Reserve room for entry stack pointer and push the code object.
-  Push(Smi::FromInt(frame_type));
+  Push(Immediate(StackFrame::TypeToMarker(frame_type)));
   DCHECK_EQ(-2 * kPointerSize, ExitFrameConstants::kSPOffset);
   Push(Immediate(0));  // Saved entry sp, patched before call.
   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
@@ -4958,125 +4871,6 @@
   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
 }
 
-
-void MacroAssembler::AllocateTwoByteString(Register result,
-                                           Register length,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
-                               kObjectAlignmentMask;
-  DCHECK(kShortSize == 2);
-  // scratch1 = length * 2 + kObjectAlignmentMask.
-  leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
-                kHeaderAlignment));
-  andp(scratch1, Immediate(~kObjectAlignmentMask));
-  if (kHeaderAlignment > 0) {
-    subp(scratch1, Immediate(kHeaderAlignment));
-  }
-
-  // Allocate two byte string in new space.
-  Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1, result, scratch2,
-           scratch3, gc_required, NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
-  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-  Integer32ToSmi(scratch1, length);
-  movp(FieldOperand(result, String::kLengthOffset), scratch1);
-  movp(FieldOperand(result, String::kHashFieldOffset),
-       Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
-                               kObjectAlignmentMask;
-  movl(scratch1, length);
-  DCHECK(kCharSize == 1);
-  addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
-  andp(scratch1, Immediate(~kObjectAlignmentMask));
-  if (kHeaderAlignment > 0) {
-    subp(scratch1, Immediate(kHeaderAlignment));
-  }
-
-  // Allocate one-byte string in new space.
-  Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1, result, scratch2,
-           scratch3, gc_required, NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
-  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-  Integer32ToSmi(scratch1, length);
-  movp(FieldOperand(result, String::kLengthOffset), scratch1);
-  movp(FieldOperand(result, String::kHashFieldOffset),
-       Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Label* gc_required) {
-  // Allocate heap number in new space.
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
-  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
-  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
-                                          Register scratch1,
-                                          Register scratch2,
-                                          Label* gc_required) {
-  // Allocate heap number in new space.
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
-  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  // Allocate heap number in new space.
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
-  movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
                                      Register value, Register scratch,
                                      Label* gc_required) {
@@ -5137,28 +4931,6 @@
   }
 }
 
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  DCHECK(IsFastElementsKind(expected_kind));
-  DCHECK(IsFastElementsKind(transitioned_kind));
-
-  // Check that the function's map is the same as the expected cached map.
-  movp(scratch, NativeContextOperand());
-  cmpp(map_in_out,
-       ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
-  j(not_equal, no_map_match);
-
-  // Use the transitioned cached map.
-  movp(map_in_out,
-       ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
 #ifdef _WIN64
 static const int kRegisterPassedArguments = 4;
 #else
@@ -5501,42 +5273,6 @@
               Heap::kAllocationMementoMapRootIndex);
 }
 
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
-    Register object,
-    Register scratch0,
-    Register scratch1,
-    Label* found) {
-  DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
-  DCHECK(!scratch1.is(scratch0));
-  Register current = scratch0;
-  Label loop_again, end;
-
-  movp(current, object);
-  movp(current, FieldOperand(current, HeapObject::kMapOffset));
-  movp(current, FieldOperand(current, Map::kPrototypeOffset));
-  CompareRoot(current, Heap::kNullValueRootIndex);
-  j(equal, &end);
-
-  // Loop based on the map going up the prototype chain.
-  bind(&loop_again);
-  movp(current, FieldOperand(current, HeapObject::kMapOffset));
-  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  CmpInstanceType(current, JS_OBJECT_TYPE);
-  j(below, found);
-  movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
-  DecodeField<Map::ElementsKindBits>(scratch1);
-  cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
-  j(equal, found);
-  movp(current, FieldOperand(current, Map::kPrototypeOffset));
-  CompareRoot(current, Heap::kNullValueRootIndex);
-  j(not_equal, &loop_again);
-
-  bind(&end);
-}
-
-
 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
   DCHECK(!dividend.is(rax));
   DCHECK(!dividend.is(rdx));
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index f085509..5f87709 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -10,6 +10,7 @@
 #include "src/base/flags.h"
 #include "src/frames.h"
 #include "src/globals.h"
+#include "src/x64/assembler-x64.h"
 #include "src/x64/frames-x64.h"
 
 namespace v8 {
@@ -323,10 +324,8 @@
       PointersToHereCheck pointers_to_here_check_for_value =
           kPointersToHereMaybeInteresting);
 
-  // ---------------------------------------------------------------------------
-  // Debugger Support
-
-  void DebugBreak();
+  // Frame restart support.
+  void MaybeDropFrames();
 
   // Generates function and stub prologue code.
   void StubPrologue(StackFrame::Type type);
@@ -390,9 +389,10 @@
                           const ParameterCount& actual, InvokeFlag flag,
                           const CallWrapper& call_wrapper);
 
-  void FloodFunctionIfStepping(Register fun, Register new_target,
-                               const ParameterCount& expected,
-                               const ParameterCount& actual);
+  // On function call, call into the debugger if necessary.
+  void CheckDebugHook(Register fun, Register new_target,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
@@ -550,6 +550,10 @@
                     Label* on_not_smi,
                     Label::Distance near_jump = Label::kFar);
 
+  // Jump to label if the value is not a tagged smi.
+  void JumpIfNotSmi(Operand src, Label* on_not_smi,
+                    Label::Distance near_jump = Label::kFar);
+
   // Jump to label if the value is not a non-negative tagged smi.
   void JumpUnlessNonNegativeSmi(Register src,
                                 Label* on_not_smi,
@@ -1112,29 +1116,6 @@
   // Always use unsigned comparisons: above and below, not less and greater.
   void CmpInstanceType(Register map, InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map,
-                               Label* fail,
-                               Label::Distance distance = Label::kFar);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiElements(Register map,
-                            Label* fail,
-                            Label::Distance distance = Label::kFar);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by index in
-  // the FastDoubleElements array elements, otherwise jump to fail.  Note that
-  // index must not be smi-tagged.
-  void StoreNumberToDoubleElements(Register maybe_number,
-                                   Register elements,
-                                   Register index,
-                                   XMMRegister xmm_scratch,
-                                   Label* fail,
-                                   int elements_offset = 0);
-
   // Compare an object's map with the specified map.
   void CompareMap(Register obj, Handle<Map> map);
 
@@ -1344,36 +1325,6 @@
                           Label* gc_required,
                           MutableMode mode = IMMUTABLE);
 
-  // Allocate a sequential string. All the header fields of the string object
-  // are initialized.
-  void AllocateTwoByteString(Register result,
-                             Register length,
-                             Register scratch1,
-                             Register scratch2,
-                             Register scratch3,
-                             Label* gc_required);
-  void AllocateOneByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-
-  // Allocate a raw cons string object. Only the map field of the result is
-  // initialized.
-  void AllocateTwoByteConsString(Register result,
-                          Register scratch1,
-                          Register scratch2,
-                          Label* gc_required);
-  void AllocateOneByteConsString(Register result, Register scratch1,
-                                 Register scratch2, Label* gc_required);
-
-  // Allocate a raw sliced string object. Only the map field of the result is
-  // initialized.
-  void AllocateTwoByteSlicedString(Register result,
-                            Register scratch1,
-                            Register scratch2,
-                            Label* gc_required);
-  void AllocateOneByteSlicedString(Register result, Register scratch1,
-                                   Register scratch2, Label* gc_required);
-
   // Allocate and initialize a JSValue wrapper with the specified {constructor}
   // and {value}.
   void AllocateJSValue(Register result, Register constructor, Register value,
@@ -1400,13 +1351,6 @@
   // |temp| holds |result|'s map when done.
   void GetMapConstructor(Register result, Register map, Register temp);
 
-  // Try to get function prototype of a function and puts the value in
-  // the result register. Checks that the function really is a
-  // function and jumps to the miss label if the fast checks fail. The
-  // function register will be untouched; the other register may be
-  // clobbered.
-  void TryGetFunctionPrototype(Register function, Register result, Label* miss);
-
   // Find the function context up the context chain.
   void LoadContext(Register dst, int context_chain_length);
 
@@ -1420,17 +1364,6 @@
     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
   }
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the native context if the map in register
-  // map_in_out is the cached Array map in the native context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(
-      ElementsKind expected_kind,
-      ElementsKind transitioned_kind,
-      Register map_in_out,
-      Register scratch,
-      Label* no_map_match);
-
   // Load the native context slot with the current index.
   void LoadNativeContextSlot(int index, Register dst);
 
@@ -1569,7 +1502,7 @@
   }
 
   // Load the type feedback vector from a JavaScript frame.
-  void EmitLoadTypeFeedbackVector(Register vector);
+  void EmitLoadFeedbackVector(Register vector);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -1593,20 +1526,6 @@
                                        Register scratch_reg,
                                        Label* no_memento_found);
 
-  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
-                                         Register scratch_reg,
-                                         Label* memento_found) {
-    Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
-                                    &no_memento_found);
-    j(equal, memento_found);
-    bind(&no_memento_found);
-  }
-
-  // Jumps to found label if a prototype map has dictionary elements.
-  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
-                                        Register scratch1, Label* found);
-
  private:
   // Order general registers are pushed by Pushad.
   // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
diff --git a/src/x87/OWNERS b/src/x87/OWNERS
index dd9998b..61245ae 100644
--- a/src/x87/OWNERS
+++ b/src/x87/OWNERS
@@ -1 +1,2 @@
 weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/src/x87/assembler-x87-inl.h b/src/x87/assembler-x87-inl.h
index fa9b5a4..8b2510b 100644
--- a/src/x87/assembler-x87-inl.h
+++ b/src/x87/assembler-x87-inl.h
@@ -41,6 +41,7 @@
 
 #include "src/assembler.h"
 #include "src/debug/debug.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -446,6 +447,17 @@
   }
 }
 
+Address Assembler::target_address_at(Address pc, Code* code) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  return target_address_at(pc, constant_pool);
+}
+
+void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  Address constant_pool = code ? code->constant_pool() : NULL;
+  set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
+}
 
 Address Assembler::target_address_from_return_address(Address pc) {
   return pc - kCallTargetAddressOffset;
diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc
index eb8dafa..2ba4dfd 100644
--- a/src/x87/assembler-x87.cc
+++ b/src/x87/assembler-x87.cc
@@ -116,13 +116,18 @@
   return Memory::uint32_at(pc_);
 }
 
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+  DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+  return Memory::uint32_at(pc_);
+}
+
 void RelocInfo::unchecked_update_wasm_memory_reference(
     Address address, ICacheFlushMode flush_mode) {
   Memory::Address_at(pc_) = address;
 }
 
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
-                                                  ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+                                           ICacheFlushMode flush_mode) {
   Memory::uint32_at(pc_) = size;
 }
 
diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h
index 160145b..c2105be 100644
--- a/src/x87/assembler-x87.h
+++ b/src/x87/assembler-x87.h
@@ -147,6 +147,7 @@
 const Register no_reg = {Register::kCode_no_reg};
 
 static const bool kSimpleFPAliasing = true;
+static const bool kSimdMaskRegisters = false;
 
 struct X87Register {
   enum Code {
@@ -496,16 +497,10 @@
   inline static void set_target_address_at(
       Isolate* isolate, Address pc, Address constant_pool, Address target,
       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
-  static inline Address target_address_at(Address pc, Code* code) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    return target_address_at(pc, constant_pool);
-  }
+  static inline Address target_address_at(Address pc, Code* code);
   static inline void set_target_address_at(
       Isolate* isolate, Address pc, Code* code, Address target,
-      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
-    Address constant_pool = code ? code->constant_pool() : NULL;
-    set_target_address_at(isolate, pc, constant_pool, target);
-  }
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
 
   // Return the code target address at a call site from the return address
   // of that call in the instruction stream.
@@ -957,9 +952,6 @@
     return pc_offset() - label->pos();
   }
 
-  // Mark generator continuation.
-  void RecordGeneratorContinuation();
-
   // Mark address of a debug break slot.
   void RecordDebugBreakSlot(RelocInfo::Mode mode);
 
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index 0ea919d..f67aea7 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -34,17 +34,6 @@
   __ TailCallRuntime(Runtime::kNewArray);
 }
 
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
-  descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
-  descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
                                                ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
@@ -292,59 +281,6 @@
   __ ret(0);
 }
 
-
-void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  // With careful management, we won't have to save slot and vector on
-  // the stack. Simply handle the possibly missing case first.
-  // TODO(mvstanton): this code can be more efficient.
-  __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
-         Immediate(isolate()->factory()->the_hole_value()));
-  __ j(equal, &miss);
-  __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
-  __ ret(0);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
-}
-
-
-void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Label miss;
-
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register index = LoadDescriptor::NameRegister();
-  Register scratch = edi;
-  DCHECK(!scratch.is(receiver) && !scratch.is(index));
-  Register result = eax;
-  DCHECK(!result.is(scratch));
-  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
-         result.is(LoadDescriptor::SlotRegister()));
-
-  // StringCharAtGenerator doesn't use the result register until it's passed
-  // the different miss possibilities. If it did, we would have a conflict
-  // when FLAG_vector_ics is true.
-
-  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          RECEIVER_IS_STRING);
-  char_at_generator.GenerateFast(masm);
-  __ ret(0);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
-
-  __ bind(&miss);
-  PropertyAccessCompiler::TailCallBuiltin(
-      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
-}
-
-
 void RegExpExecStub::Generate(MacroAssembler* masm) {
   // Just jump directly to runtime if native RegExp is not selected at compile
   // time or if regexp entry in generated code is turned off runtime switch or
@@ -436,7 +372,7 @@
   // (8) Is the external string one byte?  If yes, go to (5).
   // (9) Two byte sequential.  Load regexp code for two byte. Go to (E).
   // (10) Short external string or not a string?  If yes, bail out to runtime.
-  // (11) Sliced string.  Replace subject with parent. Go to (1).
+  // (11) Sliced or thin string.  Replace subject with parent. Go to (1).
 
   Label seq_one_byte_string /* 5 */, seq_two_byte_string /* 9 */,
       external_string /* 7 */, check_underlying /* 1 */,
@@ -466,6 +402,7 @@
   // have already been covered.
   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
   __ cmp(ebx, Immediate(kExternalStringTag));
@@ -744,11 +681,18 @@
   __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
   __ j(not_zero, &runtime);
 
-  // (11) Sliced string.  Replace subject with parent.  Go to (1).
+  // (11) Sliced or thin string.  Replace subject with parent.  Go to (1).
+  Label thin_string;
+  __ cmp(ebx, Immediate(kThinStringTag));
+  __ j(equal, &thin_string, Label::kNear);
   // Load offset into edi and replace subject string with parent.
   __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
   __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
   __ jmp(&check_underlying);  // Go to (1).
+
+  __ bind(&thin_string);
+  __ mov(eax, FieldOperand(eax, ThinString::kActualOffset));
+  __ jmp(&check_underlying);  // Go to (1).
 #endif  // V8_INTERPRETED_REGEXP
 }
 
@@ -848,9 +792,6 @@
       // Call runtime on identical symbols since we need to throw a TypeError.
       __ cmpb(ecx, Immediate(SYMBOL_TYPE));
       __ j(equal, &runtime_call, Label::kFar);
-      // Call runtime on identical SIMD values since we must throw a TypeError.
-      __ cmpb(ecx, Immediate(SIMD128_VALUE_TYPE));
-      __ j(equal, &runtime_call, Label::kFar);
     }
     __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
     __ ret(0);
@@ -1058,9 +999,11 @@
   if (cc == equal) {
     {
       FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(edx);
-      __ Push(eax);
-      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+      __ Push(esi);
+      __ Call(strict() ? isolate()->builtins()->StrictEqual()
+                       : isolate()->builtins()->Equal(),
+              RelocInfo::CODE_TARGET);
+      __ Pop(esi);
     }
     // Turn true into 0 and false into some non-zero value.
     STATIC_ASSERT(EQUAL == 0);
@@ -1132,8 +1075,7 @@
   // A monomorphic cache hit or an already megamorphic state: invoke the
   // function without changing the state.
   // We don't know if ecx is a WeakCell or a Symbol, but it's harmless to read
-  // at this position in a symbol (see static asserts in
-  // type-feedback-vector.h).
+  // at this position in a symbol (see static asserts in feedback-vector.h).
   Label check_allocation_site;
   __ cmp(edi, FieldOperand(ecx, WeakCell::kValueOffset));
   __ j(equal, &done, Label::kFar);
@@ -1172,7 +1114,7 @@
   __ bind(&megamorphic);
   __ mov(
       FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
-      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
+      Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
   __ jmp(&done, Label::kFar);
 
   // An uninitialized cache is patched with the function or sentinel to
@@ -1321,7 +1263,7 @@
   __ bind(&extra_checks_or_miss);
   Label uninitialized, miss, not_allocation_site;
 
-  __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
+  __ cmp(ecx, Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
   __ j(equal, &call);
 
   // Check if we have an allocation site.
@@ -1340,7 +1282,7 @@
     __ jmp(&miss);
   }
 
-  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
+  __ cmp(ecx, Immediate(FeedbackVector::UninitializedSentinel(isolate)));
   __ j(equal, &uninitialized);
 
   // We are going megamorphic. If the feedback is a JSFunction, it is fine
@@ -1350,7 +1292,7 @@
   __ j(not_equal, &miss);
   __ mov(
       FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
-      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
+      Immediate(FeedbackVector::MegamorphicSentinel(isolate)));
 
   __ bind(&call);
 
@@ -1471,7 +1413,6 @@
   if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
     save_doubles_code = *(save_doubles.GetCode());
   }
-  isolate->set_fp_stubs_generated(true);
 }
 
 
@@ -1832,86 +1773,6 @@
   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
 }
 
-
-// -------------------------------------------------------------------------
-// StringCharFromCodeGenerator
-
-void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
-  // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiShiftSize == 0);
-  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
-  __ test(code_, Immediate(kSmiTagMask |
-                           ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
-  __ j(not_zero, &slow_case_);
-
-  Factory* factory = masm->isolate()->factory();
-  __ Move(result_, Immediate(factory->single_character_string_cache()));
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagSize == 1);
-  STATIC_ASSERT(kSmiShiftSize == 0);
-  // At this point code register contains smi tagged one byte char code.
-  __ mov(result_, FieldOperand(result_,
-                               code_, times_half_pointer_size,
-                               FixedArray::kHeaderSize));
-  __ cmp(result_, factory->undefined_value());
-  __ j(equal, &slow_case_);
-  __ bind(&exit_);
-}
-
-
-void StringCharFromCodeGenerator::GenerateSlow(
-    MacroAssembler* masm,
-    const RuntimeCallHelper& call_helper) {
-  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
-
-  __ bind(&slow_case_);
-  call_helper.BeforeCall(masm);
-  __ push(code_);
-  __ CallRuntime(Runtime::kStringCharFromCode);
-  if (!result_.is(eax)) {
-    __ mov(result_, eax);
-  }
-  call_helper.AfterCall(masm);
-  __ jmp(&exit_);
-
-  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
-}
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
-                                          Register dest,
-                                          Register src,
-                                          Register count,
-                                          Register scratch,
-                                          String::Encoding encoding) {
-  DCHECK(!scratch.is(dest));
-  DCHECK(!scratch.is(src));
-  DCHECK(!scratch.is(count));
-
-  // Nothing to do for zero characters.
-  Label done;
-  __ test(count, count);
-  __ j(zero, &done);
-
-  // Make count the number of bytes to copy.
-  if (encoding == String::TWO_BYTE_ENCODING) {
-    __ shl(count, 1);
-  }
-
-  Label loop;
-  __ bind(&loop);
-  __ mov_b(scratch, Operand(src, 0));
-  __ mov_b(Operand(dest, 0), scratch);
-  __ inc(src);
-  __ inc(dest);
-  __ dec(count);
-  __ j(not_zero, &loop);
-
-  __ bind(&done);
-}
-
-
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
@@ -2497,67 +2358,6 @@
   __ jmp(done);
 }
 
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
-                                                      Label* miss,
-                                                      Label* done,
-                                                      Register elements,
-                                                      Register name,
-                                                      Register r0,
-                                                      Register r1) {
-  DCHECK(!elements.is(r0));
-  DCHECK(!elements.is(r1));
-  DCHECK(!name.is(r0));
-  DCHECK(!name.is(r1));
-
-  __ AssertName(name);
-
-  __ mov(r1, FieldOperand(elements, kCapacityOffset));
-  __ shr(r1, kSmiTagSize);  // convert smi to int
-  __ dec(r1);
-
-  // Generate an unrolled loop that performs a few probes before
-  // giving up. Measurements done on Gmail indicate that 2 probes
-  // cover ~93% of loads from dictionaries.
-  for (int i = 0; i < kInlinedProbes; i++) {
-    // Compute the masked index: (hash + i + i * i) & mask.
-    __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
-    __ shr(r0, Name::kHashShift);
-    if (i > 0) {
-      __ add(r0, Immediate(NameDictionary::GetProbeOffset(i)));
-    }
-    __ and_(r0, r1);
-
-    // Scale the index by multiplying by the entry size.
-    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
-    __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
-
-    // Check if the key is identical to the name.
-    __ cmp(name, Operand(elements,
-                         r0,
-                         times_4,
-                         kElementsStartOffset - kHeapObjectTag));
-    __ j(equal, done);
-  }
-
-  NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0,
-                                POSITIVE_LOOKUP);
-  __ push(name);
-  __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
-  __ shr(r0, Name::kHashShift);
-  __ push(r0);
-  __ CallStub(&stub);
-
-  __ test(r1, r1);
-  __ j(zero, miss);
-  __ jmp(done);
-}
-
-
 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
   // we cannot call anything that could cause a GC from this stub.
@@ -2834,334 +2634,6 @@
   __ jmp(ecx);  // Return to IC Miss stub, continuation still on stack.
 }
 
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
-  KeyedStoreICStub stub(isolate(), state());
-  stub.GenerateForTrampoline(masm);
-}
-
-// value is on the stack already.
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
-                                       Register key, Register vector,
-                                       Register slot, Register feedback,
-                                       bool is_polymorphic, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next, next_loop, prepare_next;
-  Label load_smi_map, compare_map;
-  Label start_polymorphic;
-  Label pop_and_miss;
-
-  __ push(receiver);
-  // Value, vector and slot are passed on the stack, so no need to save/restore
-  // them.
-
-  Register receiver_map = receiver;
-  Register cached_map = vector;
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &load_smi_map);
-  __ mov(receiver_map, FieldOperand(receiver, 0));
-  __ bind(&compare_map);
-  __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-
-  // A named keyed store might have a 2 element array, all other cases can count
-  // on an array with at least 2 {map, handler} pairs, so they can go right
-  // into polymorphic array handling.
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &start_polymorphic);
-
-  // found, now call handler.
-  Register handler = feedback;
-  DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
-  __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
-  __ pop(receiver);
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ jmp(handler);
-
-  // Polymorphic, we have to loop from 2 to N
-  __ bind(&start_polymorphic);
-  __ push(key);
-  Register counter = key;
-  __ mov(counter, Immediate(Smi::FromInt(2)));
-
-  if (!is_polymorphic) {
-    // If is_polymorphic is false, we may only have a two element array.
-    // Check against length now in that case.
-    __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
-    __ j(greater_equal, &pop_and_miss);
-  }
-
-  __ bind(&next_loop);
-  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
-                                  FixedArray::kHeaderSize));
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &prepare_next);
-  __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
-  __ pop(key);
-  __ pop(receiver);
-  __ jmp(handler);
-
-  __ bind(&prepare_next);
-  __ add(counter, Immediate(Smi::FromInt(2)));
-  __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
-  __ j(less, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ bind(&pop_and_miss);
-  __ pop(key);
-  __ pop(receiver);
-  __ jmp(miss);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-
-static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
-                                       Register key, Register vector,
-                                       Register slot, Register weak_cell,
-                                       Label* miss) {
-  // The store ic value is on the stack.
-  DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
-
-  // feedback initially contains the feedback array
-  Label compare_smi_map;
-
-  // Move the weak map into the weak_cell register.
-  Register ic_map = weak_cell;
-  __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &compare_smi_map);
-  __ cmp(ic_map, FieldOperand(receiver, 0));
-  __ j(not_equal, miss);
-  __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
-                                 FixedArray::kHeaderSize + kPointerSize));
-  __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
-  // jump to the handler.
-  __ jmp(weak_cell);
-
-  // In microbenchmarks, it made sense to unroll this code so that the call to
-  // the handler is duplicated for a HeapObject receiver and a Smi receiver.
-  __ bind(&compare_smi_map);
-  __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, miss);
-  __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
-                                 FixedArray::kHeaderSize + kPointerSize));
-  __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
-  // jump to the handler.
-  __ jmp(weak_cell);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
-  GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
-  GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
-                                            Register receiver, Register key,
-                                            Register vector, Register slot,
-                                            Register feedback, Label* miss) {
-  // feedback initially contains the feedback array
-  Label next, next_loop, prepare_next;
-  Label load_smi_map, compare_map;
-  Label transition_call;
-  Label pop_and_miss;
-
-  __ push(receiver);
-  // Value, vector and slot are passed on the stack, so no need to save/restore
-  // them.
-
-  Register receiver_map = receiver;
-  Register cached_map = vector;
-
-  // Receiver might not be a heap object.
-  __ JumpIfSmi(receiver, &load_smi_map);
-  __ mov(receiver_map, FieldOperand(receiver, 0));
-  __ bind(&compare_map);
-
-  // Polymorphic, we have to loop from 0 to N - 1
-  __ push(key);
-  // Current stack layout:
-  // - esp[0]    -- key
-  // - esp[4]    -- receiver
-  // - esp[8]    -- return address
-  // - esp[12]   -- vector
-  // - esp[16]   -- slot
-  // - esp[20]   -- value
-  //
-  // Required stack layout for handler call (see StoreWithVectorDescriptor):
-  // - esp[0]    -- return address
-  // - esp[4]    -- vector
-  // - esp[8]    -- slot
-  // - esp[12]   -- value
-  // - receiver, key, handler in registers.
-  Register counter = key;
-  __ mov(counter, Immediate(Smi::kZero));
-  __ bind(&next_loop);
-  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
-                                  FixedArray::kHeaderSize));
-  __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  __ j(not_equal, &prepare_next);
-  __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
-                                  FixedArray::kHeaderSize + kPointerSize));
-  __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
-  __ j(not_equal, &transition_call);
-  __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
-                                FixedArray::kHeaderSize + 2 * kPointerSize));
-  __ pop(key);
-  __ pop(receiver);
-  __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-  __ jmp(feedback);
-
-  __ bind(&transition_call);
-  // Current stack layout:
-  // - esp[0]    -- key
-  // - esp[4]    -- receiver
-  // - esp[8]    -- return address
-  // - esp[12]   -- vector
-  // - esp[16]   -- slot
-  // - esp[20]   -- value
-  //
-  // Required stack layout for handler call (see StoreTransitionDescriptor):
-  // - esp[0]    -- return address
-  // - esp[4]    -- vector
-  // - esp[8]    -- slot
-  // - esp[12]   -- value
-  // - receiver, key, map, handler in registers.
-  __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
-                                FixedArray::kHeaderSize + 2 * kPointerSize));
-  __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-
-  __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
-  // The weak cell may have been cleared.
-  __ JumpIfSmi(cached_map, &pop_and_miss);
-  DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
-  __ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
-
-  // Call store transition handler using StoreTransitionDescriptor calling
-  // convention.
-  __ pop(key);
-  __ pop(receiver);
-  // Ensure that the transition handler we are going to call has the same
-  // number of stack arguments which means that we don't have to adapt them
-  // before the call.
-  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-  STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
-  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
-                    StoreWithVectorDescriptor::kValue ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kValue);
-  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
-                    StoreWithVectorDescriptor::kSlot ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kSlot);
-  STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
-                    StoreWithVectorDescriptor::kVector ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kVector);
-  __ jmp(feedback);
-
-  __ bind(&prepare_next);
-  __ add(counter, Immediate(Smi::FromInt(3)));
-  __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
-  __ j(less, &next_loop);
-
-  // We exhausted our array of map handler pairs.
-  __ bind(&pop_and_miss);
-  __ pop(key);
-  __ pop(receiver);
-  __ jmp(miss);
-
-  __ bind(&load_smi_map);
-  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
-  __ jmp(&compare_map);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // edx
-  Register key = StoreWithVectorDescriptor::NameRegister();           // ecx
-  Register value = StoreWithVectorDescriptor::ValueRegister();        // eax
-  Register vector = StoreWithVectorDescriptor::VectorRegister();      // ebx
-  Register slot = StoreWithVectorDescriptor::SlotRegister();          // edi
-  Label miss;
-
-  if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
-    // Current stack layout:
-    // - esp[8]    -- value
-    // - esp[4]    -- slot
-    // - esp[0]    -- return address
-    STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
-    STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-    if (in_frame) {
-      __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
-      // If the vector is not on the stack, then insert the vector beneath
-      // return address in order to prepare for calling handler with
-      // StoreWithVector calling convention.
-      __ push(Operand(esp, 0));
-      __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
-      __ RecordComment("]");
-    } else {
-      __ mov(vector, Operand(esp, 1 * kPointerSize));
-    }
-    __ mov(slot, Operand(esp, 2 * kPointerSize));
-  }
-
-  Register scratch = value;
-  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize));
-
-  // Is it a weak cell?
-  Label try_array;
-  Label not_array, smi_key, key_okay;
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
-  __ j(not_equal, &try_array);
-  HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
-
-  // Is it a fixed array?
-  __ bind(&try_array);
-  __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &not_array);
-  HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch,
-                                  &miss);
-
-  __ bind(&not_array);
-  Label try_poly_name;
-  __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
-  __ j(not_equal, &try_poly_name);
-
-  Handle<Code> megamorphic_stub =
-      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
-  __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
-  __ bind(&try_poly_name);
-  // We might have a name in feedback, and a fixed array in the next slot.
-  __ cmp(key, scratch);
-  __ j(not_equal, &miss);
-  // If the name comparison succeeded, we know we have a fixed array with
-  // at least one map/handler pair.
-  __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
-                               FixedArray::kHeaderSize + kPointerSize));
-  HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
-                             &miss);
-
-  __ bind(&miss);
-  KeyedStoreIC::GenerateMiss(masm);
-}
-
-void CallICTrampolineStub::Generate(MacroAssembler* masm) {
-  __ EmitLoadTypeFeedbackVector(ebx);
-  CallICStub stub(isolate(), state());
-  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -3497,134 +2969,6 @@
   GenerateCase(masm, FAST_ELEMENTS);
 }
 
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- edi    : target
-  //  -- edx    : new target
-  //  -- esi    : context
-  //  -- esp[0] : return address
-  // -----------------------------------
-  __ AssertFunction(edi);
-  __ AssertReceiver(edx);
-
-  // Verify that the new target is a JSFunction.
-  Label new_object;
-  __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
-  __ j(not_equal, &new_object);
-
-  // Load the initial map and verify that it's in fact a map.
-  __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
-  __ JumpIfSmi(ecx, &new_object);
-  __ CmpObjectType(ecx, MAP_TYPE, ebx);
-  __ j(not_equal, &new_object);
-
-  // Fall back to runtime if the target differs from the new target's
-  // initial map constructor.
-  __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset));
-  __ j(not_equal, &new_object);
-
-  // Allocate the JSObject on the heap.
-  Label allocate, done_allocate;
-  __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
-  __ lea(ebx, Operand(ebx, times_pointer_size, 0));
-  __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
-  __ bind(&done_allocate);
-
-  // Initialize the JSObject fields.
-  __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
-  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
-         masm->isolate()->factory()->empty_fixed_array());
-  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
-  __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize));
-
-  // ----------- S t a t e -------------
-  //  -- eax    : result (tagged)
-  //  -- ebx    : result fields (untagged)
-  //  -- edi    : result end (untagged)
-  //  -- ecx    : initial map
-  //  -- esi    : context
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  // Perform in-object slack tracking if requested.
-  Label slack_tracking;
-  STATIC_ASSERT(Map::kNoSlackTracking == 0);
-  __ test(FieldOperand(ecx, Map::kBitField3Offset),
-          Immediate(Map::ConstructionCounter::kMask));
-  __ j(not_zero, &slack_tracking, Label::kNear);
-  {
-    // Initialize all in-object fields with undefined.
-    __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
-    __ InitializeFieldsWithFiller(ebx, edi, edx);
-    __ Ret();
-  }
-  __ bind(&slack_tracking);
-  {
-    // Decrease generous allocation count.
-    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
-    __ sub(FieldOperand(ecx, Map::kBitField3Offset),
-           Immediate(1 << Map::ConstructionCounter::kShift));
-
-    // Initialize the in-object fields with undefined.
-    __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
-    __ neg(edx);
-    __ lea(edx, Operand(edi, edx, times_pointer_size, 0));
-    __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
-    __ InitializeFieldsWithFiller(ebx, edx, edi);
-
-    // Initialize the remaining (reserved) fields with one pointer filler map.
-    __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
-    __ lea(edx, Operand(ebx, edx, times_pointer_size, 0));
-    __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
-    __ InitializeFieldsWithFiller(ebx, edx, edi);
-
-    // Check if we can finalize the instance size.
-    Label finalize;
-    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
-    __ test(FieldOperand(ecx, Map::kBitField3Offset),
-            Immediate(Map::ConstructionCounter::kMask));
-    __ j(zero, &finalize, Label::kNear);
-    __ Ret();
-
-    // Finalize the instance size.
-    __ bind(&finalize);
-    {
-      FrameScope scope(masm, StackFrame::INTERNAL);
-      __ Push(eax);
-      __ Push(ecx);
-      __ CallRuntime(Runtime::kFinalizeInstanceSize);
-      __ Pop(eax);
-    }
-    __ Ret();
-  }
-
-  // Fall back to %AllocateInNewSpace.
-  __ bind(&allocate);
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(ebx);
-    __ Push(ecx);
-    __ Push(ebx);
-    __ CallRuntime(Runtime::kAllocateInNewSpace);
-    __ Pop(ecx);
-  }
-  __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
-  __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
-  STATIC_ASSERT(kHeapObjectTag == 1);
-  __ dec(edi);
-  __ jmp(&done_allocate);
-
-  // Fall back to %NewObject.
-  __ bind(&new_object);
-  __ PopReturnAddressTo(ecx);
-  __ Push(edi);
-  __ Push(edx);
-  __ PushReturnAddressFrom(ecx);
-  __ TailCallRuntime(Runtime::kNewObject);
-}
-
 void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- edi    : function
diff --git a/src/x87/code-stubs-x87.h b/src/x87/code-stubs-x87.h
index 6290cfe..9aeae46 100644
--- a/src/x87/code-stubs-x87.h
+++ b/src/x87/code-stubs-x87.h
@@ -16,16 +16,6 @@
 
 class StringHelper : public AllStatic {
  public:
-  // Generate code for copying characters using the rep movs instruction.
-  // Copies ecx characters from esi to edi. Copying of overlapping regions is
-  // not supported.
-  static void GenerateCopyCharacters(MacroAssembler* masm,
-                                     Register dest,
-                                     Register src,
-                                     Register count,
-                                     Register scratch,
-                                     String::Encoding encoding);
-
   // Compares two flat one byte strings and returns result in eax.
   static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
                                                 Register left, Register right,
@@ -68,14 +58,6 @@
                                      Handle<Name> name,
                                      Register r0);
 
-  static void GeneratePositiveLookup(MacroAssembler* masm,
-                                     Label* miss,
-                                     Label* done,
-                                     Register elements,
-                                     Register name,
-                                     Register r0,
-                                     Register r1);
-
   bool SometimesSetsUpAFrame() override { return false; }
 
  private:
diff --git a/src/x87/codegen-x87.cc b/src/x87/codegen-x87.cc
index 5cda23d..9225935 100644
--- a/src/x87/codegen-x87.cc
+++ b/src/x87/codegen-x87.cc
@@ -212,280 +212,15 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* allocation_memento_found) {
-  Register scratch = edi;
-  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    DCHECK(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(
-        receiver, scratch, allocation_memento_found);
-  }
-
-  // Set transitioned map.
-  __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
-                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Return address is on the stack.
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-  DCHECK(value.is(eax));
-  DCHECK(target_map.is(ebx));
-
-  Label loop, entry, convert_hole, gc_required, only_change_map;
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
-  __ j(equal, &only_change_map);
-
-  __ push(eax);
-  __ push(ebx);
-  __ push(esi);
-
-  __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
-
-  // Allocate new FixedDoubleArray.
-  // edx: receiver
-  // edi: length of source FixedArray (smi-tagged)
-  AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
-  __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
-              REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
-
-  // eax: destination FixedDoubleArray
-  // edi: number of elements
-  // edx: receiver
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->fixed_double_array_map()));
-  __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
-  __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
-  // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
-  __ mov(ebx, eax);
-  __ RecordWriteField(edx, JSObject::kElementsOffset, ebx, edi, kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-  __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
-
-  // Prepare for conversion loop.
-  ExternalReference canonical_the_hole_nan_reference =
-      ExternalReference::address_of_the_hole_nan();
-  __ jmp(&entry);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-
-  // Restore registers before jumping into runtime.
-  __ pop(esi);
-  __ pop(ebx);
-  __ pop(eax);
-  __ jmp(fail);
-
-  // Convert and copy elements
-  // esi: source FixedArray
-  __ bind(&loop);
-  __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
-  // ebx: current element from source
-  // edi: index of current element
-  __ JumpIfNotSmi(ebx, &convert_hole);
-
-  // Normal smi, convert it to double and store.
-  __ SmiUntag(ebx);
-  __ push(ebx);
-  __ fild_s(Operand(esp, 0));
-  __ pop(ebx);
-  __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
-  __ jmp(&entry);
-
-  // Found hole, store hole_nan_as_double instead.
-  __ bind(&convert_hole);
-
-  if (FLAG_debug_code) {
-    __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
-    __ Assert(equal, kObjectFoundInSmiOnlyArray);
-  }
-
-  __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
-  __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
-
-  __ bind(&entry);
-  __ sub(edi, Immediate(Smi::FromInt(1)));
-  __ j(not_sign, &loop);
-
-  // Restore registers.
-  __ pop(esi);
-  __ pop(ebx);
-  __ pop(eax);
-
-  __ bind(&only_change_map);
-  // eax: value
-  // ebx: target map
-  // Set transitioned map.
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm,
-    Register receiver,
-    Register key,
-    Register value,
-    Register target_map,
-    AllocationSiteMode mode,
-    Label* fail) {
-  // Return address is on the stack.
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-  DCHECK(value.is(eax));
-  DCHECK(target_map.is(ebx));
-
-  Label loop, entry, convert_hole, gc_required, only_change_map, success;
-
-  if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
-  }
-
-  // Check for empty arrays, which only require a map transition and no changes
-  // to the backing store.
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
-  __ j(equal, &only_change_map);
-
-  __ push(esi);
-  __ push(eax);
-  __ push(edx);
-  __ push(ebx);
-
-  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-
-  // Allocate new FixedArray.
-  // ebx: length of source FixedDoubleArray (smi-tagged)
-  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
-  __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
-
-  // eax: destination FixedArray
-  // ebx: number of elements
-  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->fixed_array_map()));
-  __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
-  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // Allocating heap numbers in the loop below can fail and cause a jump to
-  // gc_required. We can't leave a partly initialized FixedArray behind,
-  // so pessimistically fill it with holes now.
-  Label initialization_loop, initialization_loop_entry;
-  __ jmp(&initialization_loop_entry, Label::kNear);
-  __ bind(&initialization_loop);
-  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
-         masm->isolate()->factory()->the_hole_value());
-  __ bind(&initialization_loop_entry);
-  __ sub(ebx, Immediate(Smi::FromInt(1)));
-  __ j(not_sign, &initialization_loop);
-
-  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-  __ jmp(&entry);
-
-  // ebx: target map
-  // edx: receiver
-  // Set transitioned map.
-  __ bind(&only_change_map);
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ jmp(&success);
-
-  // Call into runtime if GC is required.
-  __ bind(&gc_required);
-  __ pop(ebx);
-  __ pop(edx);
-  __ pop(eax);
-  __ pop(esi);
-  __ jmp(fail);
-
-  // Box doubles into heap numbers.
-  // edi: source FixedDoubleArray
-  // eax: destination FixedArray
-  __ bind(&loop);
-  // ebx: index of current element (smi-tagged)
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(equal, &convert_hole);
-
-  // Non-hole double, copy value into a heap number.
-  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
-  // edx: new heap number
-  __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
-  __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
-  __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
-  __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
-  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
-  __ mov(esi, ebx);
-  __ RecordWriteArray(eax, edx, esi, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ jmp(&entry, Label::kNear);
-
-  // Replace the-hole NaN with the-hole pointer.
-  __ bind(&convert_hole);
-  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
-         masm->isolate()->factory()->the_hole_value());
-
-  __ bind(&entry);
-  __ sub(ebx, Immediate(Smi::FromInt(1)));
-  __ j(not_sign, &loop);
-
-  __ pop(ebx);
-  __ pop(edx);
-  // ebx: target map
-  // edx: receiver
-  // Set transitioned map.
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  // Replace receiver's backing store with newly created and filled FixedArray.
-  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
-  __ RecordWriteField(edx, JSObject::kElementsOffset, eax, edi, kDontSaveFPRegs,
-                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-  // Restore registers.
-  __ pop(eax);
-  __ pop(esi);
-
-  __ bind(&success);
-}
-
-
 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
                                        Factory* factory,
                                        Register string,
                                        Register index,
                                        Register result,
                                        Label* call_runtime) {
+  Label indirect_string_loaded;
+  __ bind(&indirect_string_loaded);
+
   // Fetch the instance type of the receiver into result register.
   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
@@ -496,17 +231,24 @@
   __ j(zero, &check_sequential, Label::kNear);
 
   // Dispatch on the indirect string shape: slice or cons.
-  Label cons_string;
-  __ test(result, Immediate(kSlicedNotConsMask));
-  __ j(zero, &cons_string, Label::kNear);
+  Label cons_string, thin_string;
+  __ and_(result, Immediate(kStringRepresentationMask));
+  __ cmp(result, Immediate(kConsStringTag));
+  __ j(equal, &cons_string, Label::kNear);
+  __ cmp(result, Immediate(kThinStringTag));
+  __ j(equal, &thin_string, Label::kNear);
 
   // Handle slices.
-  Label indirect_string_loaded;
   __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
   __ SmiUntag(result);
   __ add(index, result);
   __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
-  __ jmp(&indirect_string_loaded, Label::kNear);
+  __ jmp(&indirect_string_loaded);
+
+  // Handle thin strings.
+  __ bind(&thin_string);
+  __ mov(string, FieldOperand(string, ThinString::kActualOffset));
+  __ jmp(&indirect_string_loaded);
 
   // Handle cons strings.
   // Check whether the right hand side is the empty string (i.e. if
@@ -518,10 +260,7 @@
          Immediate(factory->empty_string()));
   __ j(not_equal, call_runtime);
   __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
-
-  __ bind(&indirect_string_loaded);
-  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
-  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+  __ jmp(&indirect_string_loaded);
 
   // Distinguish sequential and external strings. Only these two string
   // representations can reach here (slices and flat cons strings have been
@@ -612,32 +351,24 @@
   return result;
 }
 
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+  if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
 
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
-                               MarkingParity* parity) {
-  if (IsYoungSequence(isolate, sequence)) {
-    *age = kNoAgeCodeAge;
-    *parity = NO_MARKING_PARITY;
-  } else {
-    sequence++;  // Skip the kCallOpcode byte
-    Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
-        Assembler::kCallTargetAddressOffset;
-    Code* stub = GetCodeFromTargetAddress(target_address);
-    GetCodeAgeAndParity(stub, age, parity);
-  }
+  sequence++;  // Skip the kCallOpcode byte
+  Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+                           Assembler::kCallTargetAddressOffset;
+  Code* stub = GetCodeFromTargetAddress(target_address);
+  return GetAgeOfCodeAgeStub(stub);
 }
 
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
-                                byte* sequence,
-                                Code::Age age,
-                                MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+                                Code::Age age) {
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
     Assembler::FlushICache(isolate, sequence, young_length);
   } else {
-    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    Code* stub = GetCodeAgeStub(isolate, age);
     CodePatcher patcher(isolate, sequence, young_length);
     patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
   }
diff --git a/src/x87/deoptimizer-x87.cc b/src/x87/deoptimizer-x87.cc
index 8df66bc..521b69d 100644
--- a/src/x87/deoptimizer-x87.cc
+++ b/src/x87/deoptimizer-x87.cc
@@ -164,8 +164,7 @@
   // Right trim the relocation info to free up remaining space.
   const int delta = reloc_info->length() - new_reloc_length;
   if (delta > 0) {
-    isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
-        reloc_info, delta);
+    isolate->heap()->RightTrimFixedArray(reloc_info, delta);
   }
 }
 
@@ -182,7 +181,7 @@
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
   for (int i = 0; i < X87Register::kMaxNumRegisters; ++i) {
-    double double_value = input_->GetDoubleRegister(i);
+    Float64 double_value = input_->GetDoubleRegister(i);
     output_frame->SetDoubleRegister(i, double_value);
   }
 }
diff --git a/src/x87/interface-descriptors-x87.cc b/src/x87/interface-descriptors-x87.cc
index 70b110a..de8ab58 100644
--- a/src/x87/interface-descriptors-x87.cc
+++ b/src/x87/interface-descriptors-x87.cc
@@ -64,16 +64,11 @@
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {ebx};
+  // SharedFunctionInfo, vector, slot index.
+  Register registers[] = {ebx, ecx, edx};
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void FastNewObjectDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  Register registers[] = {edi, edx};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
 void FastNewRestParameterDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {edi};
@@ -144,15 +139,13 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
+void CallICTrampolineDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
-  Register registers[] = {edi, edx};
+  Register registers[] = {edi, eax, edx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
 }
 
-
-void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
+void CallICDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   Register registers[] = {edi, eax, edx, ebx};
   data->InitializePlatformSpecific(arraysize(registers), registers);
@@ -216,14 +209,6 @@
   data->InitializePlatformSpecific(0, nullptr, nullptr);
 }
 
-#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
-  void Allocate##Type##Descriptor::InitializePlatformSpecific(      \
-      CallInterfaceDescriptorData* data) {                          \
-    data->InitializePlatformSpecific(0, nullptr, nullptr);          \
-  }
-SIMD128_TYPES(SIMD128_ALLOC_DESC)
-#undef SIMD128_ALLOC_DESC
-
 void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index ee81a68..62588d9 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -701,65 +701,6 @@
   cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
 }
 
-void MacroAssembler::CheckFastObjectElements(Register map,
-                                             Label* fail,
-                                             Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  STATIC_ASSERT(FAST_ELEMENTS == 2);
-  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
-  j(below_equal, fail, distance);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
-                                          Label* fail,
-                                          Label::Distance distance) {
-  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
-  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-  cmpb(FieldOperand(map, Map::kBitField2Offset),
-       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
-  j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
-    Register maybe_number,
-    Register elements,
-    Register key,
-    Register scratch,
-    Label* fail,
-    int elements_offset) {
-  Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
-  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
-  CheckMap(maybe_number,
-           isolate()->factory()->heap_number_map(),
-           fail,
-           DONT_DO_SMI_CHECK);
-
-  fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
-  jmp(&done, Label::kNear);
-
-  bind(&smi_value);
-  // Value is a smi. Convert to a double and store.
-  // Preserve original value.
-  mov(scratch, maybe_number);
-  SmiUntag(scratch);
-  push(scratch);
-  fild_s(Operand(esp, 0));
-  pop(scratch);
-  bind(&done);
-  fstp_d(FieldOperand(elements, key, times_4,
-                      FixedDoubleArray::kHeaderSize - elements_offset));
-}
-
-
 void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
   cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
 }
@@ -1025,11 +966,10 @@
   }
 }
 
-
-void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
   mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-  mov(vector, FieldOperand(vector, JSFunction::kLiteralsOffset));
-  mov(vector, FieldOperand(vector, LiteralsArray::kFeedbackVectorOffset));
+  mov(vector, FieldOperand(vector, JSFunction::kFeedbackVectorOffset));
+  mov(vector, FieldOperand(vector, Cell::kValueOffset));
 }
 
 
@@ -1595,139 +1535,6 @@
   mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
 }
 
-
-void MacroAssembler::AllocateTwoByteString(Register result,
-                                           Register length,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  DCHECK(kShortSize == 2);
-  // scratch1 = length * 2 + kObjectAlignmentMask.
-  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
-  and_(scratch1, Immediate(~kObjectAlignmentMask));
-
-  // Allocate two byte string in new space.
-  Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
-           REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->string_map()));
-  mov(scratch1, length);
-  SmiTag(scratch1);
-  mov(FieldOperand(result, String::kLengthOffset), scratch1);
-  mov(FieldOperand(result, String::kHashFieldOffset),
-      Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
-                                           Register scratch1, Register scratch2,
-                                           Register scratch3,
-                                           Label* gc_required) {
-  // Calculate the number of bytes needed for the characters in the string while
-  // observing object alignment.
-  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  mov(scratch1, length);
-  DCHECK(kCharSize == 1);
-  add(scratch1, Immediate(kObjectAlignmentMask));
-  and_(scratch1, Immediate(~kObjectAlignmentMask));
-
-  // Allocate one-byte string in new space.
-  Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
-           REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->one_byte_string_map()));
-  mov(scratch1, length);
-  SmiTag(scratch1);
-  mov(FieldOperand(result, String::kLengthOffset), scratch1);
-  mov(FieldOperand(result, String::kHashFieldOffset),
-      Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, int length,
-                                           Register scratch1, Register scratch2,
-                                           Label* gc_required) {
-  DCHECK(length > 0);
-
-  // Allocate one-byte string in new space.
-  Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
-           gc_required, NO_ALLOCATION_FLAGS);
-
-  // Set the map, length and hash field.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->one_byte_string_map()));
-  mov(FieldOperand(result, String::kLengthOffset),
-      Immediate(Smi::FromInt(length)));
-  mov(FieldOperand(result, String::kHashFieldOffset),
-      Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Label* gc_required) {
-  // Allocate heap number in new space.
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->cons_string_map()));
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->cons_one_byte_string_map()));
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
-                                          Register scratch1,
-                                          Register scratch2,
-                                          Label* gc_required) {
-  // Allocate heap number in new space.
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->sliced_string_map()));
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* gc_required) {
-  // Allocate heap number in new space.
-  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           NO_ALLOCATION_FLAGS);
-
-  // Set the map. The other fields are left uninitialized.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->sliced_one_byte_string_map()));
-}
-
-
 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
                                      Register value, Register scratch,
                                      Label* gc_required) {
@@ -1816,32 +1623,6 @@
   bind(&done);
 }
 
-
-void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
-                                             Register scratch, Label* miss) {
-  // Get the prototype or initial map from the function.
-  mov(result,
-      FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
-
-  // If the prototype or initial map is the hole, don't return it and
-  // simply miss the cache instead. This will allow us to allocate a
-  // prototype object on-demand in the runtime system.
-  cmp(result, Immediate(isolate()->factory()->the_hole_value()));
-  j(equal, miss);
-
-  // If the function does not have an initial map, we're done.
-  Label done;
-  CmpObjectType(result, MAP_TYPE, scratch);
-  j(not_equal, &done, Label::kNear);
-
-  // Get the prototype from the initial map.
-  mov(result, FieldOperand(result, Map::kPrototypeOffset));
-
-  // All done.
-  bind(&done);
-}
-
-
 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
   DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
   call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
@@ -2071,16 +1852,14 @@
   }
 }
 
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
-                                             const ParameterCount& expected,
-                                             const ParameterCount& actual) {
-  Label skip_flooding;
-  ExternalReference last_step_action =
-      ExternalReference::debug_last_step_action_address(isolate());
-  STATIC_ASSERT(StepFrame > StepIn);
-  cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
-  j(less, &skip_flooding);
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual) {
+  Label skip_hook;
+  ExternalReference debug_hook_active =
+      ExternalReference::debug_hook_on_function_call_address(isolate());
+  cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
+  j(equal, &skip_hook);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2097,7 +1876,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    CallRuntime(Runtime::kDebugOnFunctionCall);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -2111,7 +1890,7 @@
       SmiUntag(expected.reg());
     }
   }
-  bind(&skip_flooding);
+  bind(&skip_hook);
 }
 
 
@@ -2125,8 +1904,8 @@
   DCHECK(function.is(edi));
   DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
 
-  if (call_wrapper.NeedsDebugStepCheck()) {
-    FloodFunctionIfStepping(function, new_target, expected, actual);
+  if (call_wrapper.NeedsDebugHookCheck()) {
+    CheckDebugHook(function, new_target, expected, actual);
   }
 
   // Clear the new.target register if not given.
@@ -2230,28 +2009,6 @@
   mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
 }
 
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
-    ElementsKind expected_kind,
-    ElementsKind transitioned_kind,
-    Register map_in_out,
-    Register scratch,
-    Label* no_map_match) {
-  DCHECK(IsFastElementsKind(expected_kind));
-  DCHECK(IsFastElementsKind(transitioned_kind));
-
-  // Check that the function's map is the same as the expected cached map.
-  mov(scratch, NativeContextOperand());
-  cmp(map_in_out,
-      ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
-  j(not_equal, no_map_match);
-
-  // Use the transitioned cached map.
-  mov(map_in_out,
-      ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
   // Load the native context from the current context.
   mov(function, NativeContextOperand());
@@ -2606,19 +2363,6 @@
   mov(dst, FieldOperand(dst, offset));
 }
 
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
-    Register instance_type, Register scratch, Label* failure) {
-  if (!scratch.is(instance_type)) {
-    mov(scratch, instance_type);
-  }
-  and_(scratch,
-       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
-  cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
-  j(not_equal, failure);
-}
-
-
 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
                                                            Register object2,
                                                            Register scratch1,
@@ -2642,11 +2386,13 @@
   const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
   // Interleave bits from both instance types and compare them in one check.
-  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+  const int kShift = 8;
+  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << kShift));
   and_(scratch1, kFlatOneByteStringMask);
   and_(scratch2, kFlatOneByteStringMask);
-  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
+  shl(scratch2, kShift);
+  or_(scratch1, scratch2);
+  cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << kShift));
   j(not_equal, failure);
 }
 
@@ -3009,43 +2755,6 @@
   cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
 }
 
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
-    Register object,
-    Register scratch0,
-    Register scratch1,
-    Label* found) {
-  DCHECK(!scratch1.is(scratch0));
-  Factory* factory = isolate()->factory();
-  Register current = scratch0;
-  Label loop_again, end;
-
-  // scratch contained elements pointer.
-  mov(current, object);
-  mov(current, FieldOperand(current, HeapObject::kMapOffset));
-  mov(current, FieldOperand(current, Map::kPrototypeOffset));
-  cmp(current, Immediate(factory->null_value()));
-  j(equal, &end);
-
-  // Loop based on the map going up the prototype chain.
-  bind(&loop_again);
-  mov(current, FieldOperand(current, HeapObject::kMapOffset));
-  STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  CmpInstanceType(current, JS_OBJECT_TYPE);
-  j(below, found);
-  mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
-  DecodeField<Map::ElementsKindBits>(scratch1);
-  cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
-  j(equal, found);
-  mov(current, FieldOperand(current, Map::kPrototypeOffset));
-  cmp(current, Immediate(factory->null_value()));
-  j(not_equal, &loop_again);
-
-  bind(&end);
-}
-
-
 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
   DCHECK(!dividend.is(eax));
   DCHECK(!dividend.is(edx));
diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h
index 6bb6359..5f0d6bf 100644
--- a/src/x87/macro-assembler-x87.h
+++ b/src/x87/macro-assembler-x87.h
@@ -263,16 +263,6 @@
   // Load the global proxy from the current context.
   void LoadGlobalProxy(Register dst);
 
-  // Conditionally load the cached Array transitioned map of type
-  // transitioned_kind from the native context if the map in register
-  // map_in_out is the cached Array map in the native context of
-  // expected_kind.
-  void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
-                                           ElementsKind transitioned_kind,
-                                           Register map_in_out,
-                                           Register scratch,
-                                           Label* no_map_match);
-
   // Load the global function with the given index.
   void LoadGlobalFunction(int index, Register function);
 
@@ -342,9 +332,10 @@
                           const ParameterCount& actual, InvokeFlag flag,
                           const CallWrapper& call_wrapper);
 
-  void FloodFunctionIfStepping(Register fun, Register new_target,
-                               const ParameterCount& expected,
-                               const ParameterCount& actual);
+  // On function call, call into the debugger if necessary.
+  void CheckDebugHook(Register fun, Register new_target,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual);
 
   // Invoke the JavaScript function in the given register. Changes the
   // current context to the context in the function before invoking.
@@ -381,23 +372,6 @@
   // Compare instance type for map.
   void CmpInstanceType(Register map, InstanceType type);
 
-  // Check if a map for a JSObject indicates that the object can have both smi
-  // and HeapObject elements.  Jump to the specified label if it does not.
-  void CheckFastObjectElements(Register map, Label* fail,
-                               Label::Distance distance = Label::kFar);
-
-  // Check if a map for a JSObject indicates that the object has fast smi only
-  // elements.  Jump to the specified label if it does not.
-  void CheckFastSmiElements(Register map, Label* fail,
-                            Label::Distance distance = Label::kFar);
-
-  // Check to see if maybe_number can be stored as a double in
-  // FastDoubleElements. If it can, store it at the index specified by key in
-  // the FastDoubleElements array elements, otherwise jump to fail.
-  void StoreNumberToDoubleElements(Register maybe_number, Register elements,
-                                   Register key, Register scratch, Label* fail,
-                                   int offset = 0);
-
   // Compare an object's map with the specified map.
   void CompareMap(Register obj, Handle<Map> map);
 
@@ -494,7 +468,12 @@
     test(value, Immediate(kSmiTagMask));
     j(not_zero, not_smi_label, distance);
   }
-
+  // Jump if the operand is not a smi.
+  inline void JumpIfNotSmi(Operand value, Label* smi_label,
+                           Label::Distance distance = Label::kFar) {
+    test(value, Immediate(kSmiTagMask));
+    j(not_zero, smi_label, distance);
+  }
   // Jump if the value cannot be represented by a smi.
   inline void JumpIfNotValidSmiValue(Register value, Register scratch,
                                      Label* on_invalid,
@@ -629,31 +608,6 @@
   void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
                           Label* gc_required, MutableMode mode = IMMUTABLE);
 
-  // Allocate a sequential string. All the header fields of the string object
-  // are initialized.
-  void AllocateTwoByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateOneByteString(Register result, Register length,
-                             Register scratch1, Register scratch2,
-                             Register scratch3, Label* gc_required);
-  void AllocateOneByteString(Register result, int length, Register scratch1,
-                             Register scratch2, Label* gc_required);
-
-  // Allocate a raw cons string object. Only the map field of the result is
-  // initialized.
-  void AllocateTwoByteConsString(Register result, Register scratch1,
-                                 Register scratch2, Label* gc_required);
-  void AllocateOneByteConsString(Register result, Register scratch1,
-                                 Register scratch2, Label* gc_required);
-
-  // Allocate a raw sliced string object. Only the map field of the result is
-  // initialized.
-  void AllocateTwoByteSlicedString(Register result, Register scratch1,
-                                   Register scratch2, Label* gc_required);
-  void AllocateOneByteSlicedString(Register result, Register scratch1,
-                                   Register scratch2, Label* gc_required);
-
   // Allocate and initialize a JSValue wrapper with the specified {constructor}
   // and {value}.
   void AllocateJSValue(Register result, Register constructor, Register value,
@@ -683,14 +637,6 @@
   // |temp| holds |result|'s map when done.
   void GetMapConstructor(Register result, Register map, Register temp);
 
-  // Try to get function prototype of a function and puts the value in
-  // the result register. Checks that the function really is a
-  // function and jumps to the miss label if the fast checks fail. The
-  // function register will be untouched; the other registers may be
-  // clobbered.
-  void TryGetFunctionPrototype(Register function, Register result,
-                               Register scratch, Label* miss);
-
   // ---------------------------------------------------------------------------
   // Runtime calls
 
@@ -868,13 +814,6 @@
   // ---------------------------------------------------------------------------
   // String utilities.
 
-  // Check whether the instance type represents a flat one-byte string. Jump to
-  // the label if not. If the instance type can be scratched specify same
-  // register for both instance type and scratch.
-  void JumpIfInstanceTypeIsNotSequentialOneByte(
-      Register instance_type, Register scratch,
-      Label* on_not_flat_one_byte_string);
-
   // Checks if both objects are sequential one-byte strings, and jumps to label
   // if either is not.
   void JumpIfNotBothSequentialOneByteStrings(
@@ -898,7 +837,7 @@
   }
 
   // Load the type feedback vector from a JavaScript frame.
-  void EmitLoadTypeFeedbackVector(Register vector);
+  void EmitLoadFeedbackVector(Register vector);
 
   // Activation support.
   void EnterFrame(StackFrame::Type type);
@@ -922,20 +861,6 @@
                                        Register scratch_reg,
                                        Label* no_memento_found);
 
-  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
-                                         Register scratch_reg,
-                                         Label* memento_found) {
-    Label no_memento_found;
-    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
-                                    &no_memento_found);
-    j(equal, memento_found);
-    bind(&no_memento_found);
-  }
-
-  // Jumps to found label if a prototype map has dictionary elements.
-  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
-                                        Register scratch1, Label* found);
-
  private:
   bool generating_stub_;
   bool has_frame_;
diff --git a/src/zone/accounting-allocator.cc b/src/zone/accounting-allocator.cc
index 587e09d..c063063 100644
--- a/src/zone/accounting-allocator.cc
+++ b/src/zone/accounting-allocator.cc
@@ -73,7 +73,9 @@
   Segment* result = GetSegmentFromPool(bytes);
   if (result == nullptr) {
     result = AllocateSegment(bytes);
-    result->Initialize(bytes);
+    if (result != nullptr) {
+      result->Initialize(bytes);
+    }
   }
 
   return result;
diff --git a/src/zone/zone-allocator.h b/src/zone/zone-allocator.h
index 1e2862a..5852ca9 100644
--- a/src/zone/zone-allocator.h
+++ b/src/zone/zone-allocator.h
@@ -26,8 +26,10 @@
     typedef zone_allocator<O> other;
   };
 
-  // TODO(bbudge) Remove when V8 updates to MSVS 2015. See crbug.com/603131.
+#ifdef V8_CC_MSVC
+  // MSVS unfortunately requires the default constructor to be defined.
   zone_allocator() : zone_(nullptr) { UNREACHABLE(); }
+#endif
   explicit zone_allocator(Zone* zone) throw() : zone_(zone) {}
   explicit zone_allocator(const zone_allocator& other) throw()
       : zone_(other.zone_) {}
@@ -49,10 +51,15 @@
   size_type max_size() const throw() {
     return std::numeric_limits<int>::max() / sizeof(value_type);
   }
-  void construct(pointer p, const T& val) {
-    new (static_cast<void*>(p)) T(val);
+  template <typename U, typename... Args>
+  void construct(U* p, Args&&... args) {
+    void* v_p = const_cast<void*>(static_cast<const void*>(p));
+    new (v_p) U(std::forward<Args>(args)...);
   }
-  void destroy(pointer p) { p->~T(); }
+  template <typename U>
+  void destroy(U* p) {
+    p->~U();
+  }
 
   bool operator==(zone_allocator const& other) const {
     return zone_ == other.zone_;
diff --git a/src/zone/zone-chunk-list.h b/src/zone/zone-chunk-list.h
index f977a0c..8c7e5d9 100644
--- a/src/zone/zone-chunk-list.h
+++ b/src/zone/zone-chunk-list.h
@@ -5,6 +5,7 @@
 #include <stdlib.h>
 
 #include "src/globals.h"
+#include "src/utils.h"
 #include "src/zone/zone.h"
 
 #ifndef V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
diff --git a/src/zone/zone-containers.h b/src/zone/zone-containers.h
index 0aecd98..7a53d4b 100644
--- a/src/zone/zone-containers.h
+++ b/src/zone/zone-containers.h
@@ -36,6 +36,13 @@
   // having the value {def}.
   ZoneVector(size_t size, T def, Zone* zone)
       : std::vector<T, zone_allocator<T>>(size, def, zone_allocator<T>(zone)) {}
+
+  // Constructs a new vector and fills it with the contents of the range
+  // [first, last).
+  template <class InputIt>
+  ZoneVector(InputIt first, InputIt last, Zone* zone)
+      : std::vector<T, zone_allocator<T>>(first, last,
+                                          zone_allocator<T>(zone)) {}
 };
 
 // A wrapper subclass std::deque to make it easy to construct one
diff --git a/src/zone/zone-handle-set.h b/src/zone/zone-handle-set.h
new file mode 100644
index 0000000..641c740
--- /dev/null
+++ b/src/zone/zone-handle-set.h
@@ -0,0 +1,165 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_ZONE_HANDLE_SET_H_
+#define V8_ZONE_ZONE_HANDLE_SET_H_
+
+#include "src/handles.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class ZoneHandleSet final {
+ public:
+  ZoneHandleSet() : data_(kEmptyTag) {}
+  explicit ZoneHandleSet(Handle<T> handle)
+      : data_(bit_cast<intptr_t>(handle.address()) | kSingletonTag) {
+    DCHECK(IsAligned(bit_cast<intptr_t>(handle.address()), kPointerAlignment));
+  }
+
+  bool is_empty() const { return data_ == kEmptyTag; }
+
+  size_t size() const {
+    if ((data_ & kTagMask) == kEmptyTag) return 0;
+    if ((data_ & kTagMask) == kSingletonTag) return 1;
+    return list()->length();
+  }
+
+  Handle<T> at(size_t i) const {
+    DCHECK_NE(kEmptyTag, data_ & kTagMask);
+    if ((data_ & kTagMask) == kSingletonTag) {
+      DCHECK_EQ(0u, i);
+      return Handle<T>(singleton());
+    }
+    return Handle<T>(list()->at(static_cast<int>(i)));
+  }
+
+  Handle<T> operator[](size_t i) const { return at(i); }
+
+  void insert(Handle<T> handle, Zone* zone) {
+    T** const value = bit_cast<T**>(handle.address());
+    DCHECK(IsAligned(bit_cast<intptr_t>(value), kPointerAlignment));
+    if ((data_ & kTagMask) == kEmptyTag) {
+      data_ = bit_cast<intptr_t>(value) | kSingletonTag;
+    } else if ((data_ & kTagMask) == kSingletonTag) {
+      if (singleton() == value) return;
+      List* list = new (zone) List(2, zone);
+      if (singleton() < value) {
+        list->Add(singleton(), zone);
+        list->Add(value, zone);
+      } else {
+        list->Add(value, zone);
+        list->Add(singleton(), zone);
+      }
+      DCHECK(IsAligned(bit_cast<intptr_t>(list), kPointerAlignment));
+      data_ = bit_cast<intptr_t>(list) | kListTag;
+    } else {
+      DCHECK_EQ(kListTag, data_ & kTagMask);
+      List const* const old_list = list();
+      for (int i = 0; i < old_list->length(); ++i) {
+        if (old_list->at(i) == value) return;
+        if (old_list->at(i) > value) break;
+      }
+      List* new_list = new (zone) List(old_list->length() + 1, zone);
+      int i = 0;
+      for (; i < old_list->length(); ++i) {
+        if (old_list->at(i) > value) break;
+        new_list->Add(old_list->at(i), zone);
+      }
+      new_list->Add(value, zone);
+      for (; i < old_list->length(); ++i) {
+        new_list->Add(old_list->at(i), zone);
+      }
+      DCHECK_EQ(old_list->length() + 1, new_list->length());
+      DCHECK(IsAligned(bit_cast<intptr_t>(new_list), kPointerAlignment));
+      data_ = bit_cast<intptr_t>(new_list) | kListTag;
+    }
+  }
+
+  bool contains(ZoneHandleSet<T> const& other) const {
+    if (data_ == other.data_) return true;
+    if (data_ == kEmptyTag) return false;
+    if (other.data_ == kEmptyTag) return true;
+    if ((data_ & kTagMask) == kSingletonTag) return false;
+    DCHECK_EQ(kListTag, data_ & kTagMask);
+    if ((other.data_ & kTagMask) == kSingletonTag) {
+      return list()->Contains(other.singleton());
+    }
+    DCHECK_EQ(kListTag, other.data_ & kTagMask);
+    // TODO(bmeurer): Optimize this case.
+    for (int i = 0; i < other.list()->length(); ++i) {
+      if (!list()->Contains(other.list()->at(i))) return false;
+    }
+    return true;
+  }
+
+  void remove(Handle<T> handle, Zone* zone) {
+    // TODO(bmeurer): Optimize this case.
+    ZoneHandleSet<T> that;
+    for (size_t i = 0; i < size(); ++i) {
+      Handle<T> value = at(i);
+      if (value.address() != handle.address()) {
+        that.insert(value, zone);
+      }
+    }
+    std::swap(*this, that);
+  }
+
+  friend bool operator==(ZoneHandleSet<T> const& lhs,
+                         ZoneHandleSet<T> const& rhs) {
+    if (lhs.data_ == rhs.data_) return true;
+    if ((lhs.data_ & kTagMask) == kListTag &&
+        (rhs.data_ & kTagMask) == kListTag) {
+      List const* const lhs_list = lhs.list();
+      List const* const rhs_list = rhs.list();
+      if (lhs_list->length() == rhs_list->length()) {
+        for (int i = 0; i < lhs_list->length(); ++i) {
+          if (lhs_list->at(i) != rhs_list->at(i)) return false;
+        }
+        return true;
+      }
+    }
+    return false;
+  }
+
+  friend bool operator!=(ZoneHandleSet<T> const& lhs,
+                         ZoneHandleSet<T> const& rhs) {
+    return !(lhs == rhs);
+  }
+
+  friend size_t hash_value(ZoneHandleSet<T> const& set) {
+    return static_cast<size_t>(set.data_);
+  }
+
+ private:
+  typedef ZoneList<T**> List;
+
+  List const* list() const {
+    DCHECK_EQ(kListTag, data_ & kTagMask);
+    return bit_cast<List const*>(data_ - kListTag);
+  }
+
+  T** singleton() const {
+    DCHECK_EQ(kSingletonTag, data_ & kTagMask);
+    return bit_cast<T**>(data_ - kSingletonTag);
+  }
+
+  enum Tag : intptr_t {
+    kSingletonTag = 0,
+    kEmptyTag = 1,
+    kListTag = 2,
+    kTagMask = 3
+  };
+
+  STATIC_ASSERT(kTagMask < kPointerAlignment);
+
+  intptr_t data_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_ZONE_ZONE_HANDLE_SET_H_
diff --git a/src/zone/zone.cc b/src/zone/zone.cc
index 7228081..d2dd9ce 100644
--- a/src/zone/zone.cc
+++ b/src/zone/zone.cc
@@ -6,6 +6,7 @@
 
 #include <cstring>
 
+#include "src/utils.h"
 #include "src/v8.h"
 
 #ifdef V8_USE_ADDRESS_SANITIZER
@@ -48,7 +49,8 @@
       limit_(0),
       allocator_(allocator),
       segment_head_(nullptr),
-      name_(name) {
+      name_(name),
+      sealed_(false) {
   allocator_->ZoneCreation(this);
 }
 
@@ -61,16 +63,10 @@
 }
 
 void* Zone::New(size_t size) {
-  // Round up the requested size to fit the alignment.
-  size = RoundUp(size, kAlignment);
+  CHECK(!sealed_);
 
-  // If the allocation size is divisible by 8 then we return an 8-byte aligned
-  // address.
-  if (kPointerSize == 4 && kAlignment == 4) {
-    position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
-  } else {
-    DCHECK(kAlignment >= kPointerSize);
-  }
+  // Round up the requested size to fit the alignment.
+  size = RoundUp(size, kAlignmentInBytes);
 
   // Check if the requested size is available without expanding.
   Address result = position_;
@@ -90,7 +86,7 @@
   ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
 
   // Check that the result has the proper alignment and return it.
-  DCHECK(IsAddressAligned(result, kAlignment, 0));
+  DCHECK(IsAddressAligned(result, kAlignmentInBytes, 0));
   allocation_size_ += size;
   return reinterpret_cast<void*>(result);
 }
@@ -118,9 +114,9 @@
 // of the segment chain. Returns the new segment.
 Segment* Zone::NewSegment(size_t requested_size) {
   Segment* result = allocator_->GetSegment(requested_size);
-  DCHECK_GE(result->size(), requested_size);
-  segment_bytes_allocated_ += result->size();
   if (result != nullptr) {
+    DCHECK_GE(result->size(), requested_size);
+    segment_bytes_allocated_ += result->size();
     result->set_zone(this);
     result->set_next(segment_head_);
     segment_head_ = result;
@@ -131,7 +127,7 @@
 Address Zone::NewExpand(size_t size) {
   // Make sure the requested size is already properly aligned and that
   // there isn't enough room in the Zone to satisfy the request.
-  DCHECK_EQ(size, RoundDown(size, kAlignment));
+  DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
   DCHECK(limit_ < position_ ||
          reinterpret_cast<uintptr_t>(limit_) -
                  reinterpret_cast<uintptr_t>(position_) <
@@ -143,7 +139,7 @@
   // is to avoid excessive malloc() and free() overhead.
   Segment* head = segment_head_;
   const size_t old_size = (head == nullptr) ? 0 : head->size();
-  static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment;
+  static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes;
   const size_t new_size_no_overhead = size + (old_size << 1);
   size_t new_size = kSegmentOverhead + new_size_no_overhead;
   const size_t min_new_size = kSegmentOverhead + size;
@@ -172,7 +168,7 @@
   }
 
   // Recompute 'top' and 'limit' based on the new segment.
-  Address result = RoundUp(segment->start(), kAlignment);
+  Address result = RoundUp(segment->start(), kAlignmentInBytes);
   position_ = result + size;
   // Check for address overflow.
   // (Should not happen since the segment is guaranteed to accomodate
diff --git a/src/zone/zone.h b/src/zone/zone.h
index 4e3b96e..c916972 100644
--- a/src/zone/zone.h
+++ b/src/zone/zone.h
@@ -50,6 +50,9 @@
     return static_cast<T*>(New(length * sizeof(T)));
   }
 
+  // Seals the zone to prevent any further allocation.
+  void Seal() { sealed_ = true; }
+
   // Returns true if more memory has been allocated in zones than
   // the limit allows.
   bool excess_allocation() const {
@@ -63,16 +66,8 @@
   AccountingAllocator* allocator() const { return allocator_; }
 
  private:
-// All pointers returned from New() have this alignment.  In addition, if the
-// object being allocated has a size that is divisible by 8 then its alignment
-// will be 8. ASan requires 8-byte alignment. MIPS also requires 8-byte
-// alignment.
-#if defined(V8_USE_ADDRESS_SANITIZER) || defined(V8_TARGET_ARCH_MIPS)
-  static const size_t kAlignment = 8;
-  STATIC_ASSERT(kPointerSize <= 8);
-#else
-  static const size_t kAlignment = kPointerSize;
-#endif
+  // All pointers returned from New() are 8-byte aligned.
+  static const size_t kAlignmentInBytes = 8;
 
   // Never allocate segments smaller than this size in bytes.
   static const size_t kMinimumSegmentSize = 8 * KB;
@@ -114,6 +109,7 @@
 
   Segment* segment_head_;
   const char* name_;
+  bool sealed_;
 };
 
 // ZoneObject is an abstraction that helps define classes of objects
diff --git a/tools/callstats.html b/tools/callstats.html
index b70d40c..1bdf35e 100644
--- a/tools/callstats.html
+++ b/tools/callstats.html
@@ -10,106 +10,106 @@
     body {
       font-family: arial;
     }
-    
+
     table {
       display: table;
       border-spacing: 0px;
     }
-    
+
     tr {
       border-spacing: 0px;
       padding: 10px;
     }
-    
+
     td,
     th {
       padding: 3px 10px 3px 5px;
     }
-    
+
     .inline {
       display: inline-block;
       vertical-align: top;
     }
-    
+
     h2,
     h3 {
       margin-bottom: 0px;
     }
-    
+
     .hidden {
       display: none;
     }
-    
+
     .view {
       display: table;
     }
-    
+
     .column {
       display: table-cell;
       border-right: 1px black dotted;
       min-width: 200px;
     }
-    
+
     .column .header {
       padding: 0 10px 0 10px
     }
-    
+
     #column {
       display: none;
     }
-   
+
     .list {
       width: 100%;
     }
-    
+
     select {
       width: 100%
     }
-    
+
     .list tbody {
       cursor: pointer;
     }
-    
+
     .list tr:nth-child(even) {
       background-color: #EFEFEF;
     }
-    
+
     .list tr:nth-child(even).selected {
       background-color: #DDD;
     }
-    
+
     .list tr.child {
       display: none;
     }
-    
+
     .list tr.child.visible {
       display: table-row;
     }
-    
+
     .list .child .name {
       padding-left: 20px;
     }
-    
+
     .list .parent td {
       border-top: 1px solid #AAA;
     }
-    
+
     .list .total {
       font-weight: bold
     }
-    
+
     .list tr.parent {
       background-color: #FFF;
     }
-    
+
     .list tr.parent.selected {
       background-color: #DDD;
     }
-    
+
     tr.selected {
       background-color: #DDD;
     }
-    
+
     .codeSearch {
       display: block-inline;
       float: right;
@@ -118,53 +118,53 @@
       width: 1em;
       text-align: center;
     }
-    
+
     .list .position {
       text-align: right;
       display: none;
     }
-    
+
     .list div.toggle {
       cursor: pointer;
     }
-    
+
     #column_0 .position {
       display: table-cell;
     }
-    
+
     #column_0 .name {
       display: table-cell;
     }
-    
+
     .list .name {
       display: none;
       white-space: nowrap;
     }
-    
+
     .value {
       text-align: right;
     }
-    
+
     .selectedVersion {
       font-weight: bold;
     }
-    
+
     #baseline {
       width: auto;
     }
-    
+
     .compareSelector {
       padding-bottom: 20px;
     }
-    
+
     .pageDetailTable tbody {
       cursor: pointer
     }
-    
+
     .pageDetailTable tfoot td {
       border-top: 1px grey solid;
     }
-    
+
     #popover {
       position: absolute;
       transform: translateY(-50%) translateX(40px);
@@ -175,7 +175,7 @@
       display: none;
       white-space: nowrap;
     }
-    
+
     #popover table {
       position: relative;
       z-index: 1;
@@ -186,7 +186,7 @@
       padding: 3px 0px 3px 5px;
       white-space: nowrap;
     }
-    
+
     .popoverArrow {
       background-color: #FFF;
       position: absolute;
@@ -197,17 +197,17 @@
       left: -10px;
       z-index: 0;
     }
-    
+
     #popover .name {
       padding: 5px;
       font-weight: bold;
       text-align: center;
     }
-    
+
     #popover table .compare {
       display: none
     }
-    
+
     #popover table.compare .compare {
       display: table-cell;
     }
@@ -247,7 +247,7 @@
     var selectedPage;
     var baselineVersion;
     var selectedEntry;
-    
+
     // Marker to programatically replace the defaultData.
     var defaultData = /*default-data-start*/undefined/*default-data-end*/;
 
@@ -295,7 +295,7 @@
       });
       var oldView = $('view');
       oldView.parentNode.replaceChild(view, oldView);
-      
+
       var select = $('baseline');
       removeAllChildren(select);
       select.appendChild(document.createElement('option'));
@@ -348,9 +348,48 @@
       });
     }
 
+    window.addEventListener('popstate', (event) => {
+      popHistoryState(event.state);
+    });
+
+    function popHistoryState(state) {
+      if (!state.version) return false;
+      if (!versions) return false;
+      var version = versions.getByName(state.version);
+      if (!version) return false;
+      var page = version.get(state.page);
+      if (!page) return false;
+      if (!state.entry) {
+        showPage(page);
+      } else {
+        var entry = page.get(state.entry);
+        if (!entry) {
+          showPage(page);
+        } else {
+          showEntry(entry);
+        }
+      }
+      return true;
+    }
+
+    function pushHistoryState() {
+      var selection = selectedEntry ? selectedEntry : selectedPage;
+      if (!selection) return;
+      var state = selection.urlParams();
+      // Don't push a history state if it didn't change.
+      if (JSON.stringify(window.history.state) === JSON.stringify(state)) return;
+      var params = "?";
+      for (var pairs of Object.entries(state)) {
+        params += encodeURIComponent(pairs[0]) + "="
+            + encodeURIComponent(pairs[1]) + "&";
+      }
+      window.history.pushState(state, selection.toString(), params);
+    }
+
     function showPage(firstPage) {
-      var changeSelectedEntry = selectedEntry !== undefined 
+      var changeSelectedEntry = selectedEntry !== undefined
           && selectedEntry.page === selectedPage;
+      pushHistoryState();
       selectedPage = firstPage;
       selectedPage.sort();
       showPageInColumn(firstPage, 0);
@@ -367,6 +406,7 @@
         showEntryDetail(selectedPage.getEntry(selectedEntry));
       }
       showImpactList(selectedPage);
+      pushHistoryState();
     }
 
     function showPageInColumn(page, columnIndex) {
@@ -408,12 +448,6 @@
       var tbody = document.createElement('tbody');
       var referencePage = selectedPage;
       page.forEachSorted(selectedPage, (parentEntry, entry, referenceEntry) => {
-        // Filter out entries that do not exist in the first column for the default
-        // view.
-        if (baselineVersion === undefined && referenceEntry &&
-          referenceEntry.time == 0) {
-          return;
-        }
         var tr = document.createElement('tr');
         tbody.appendChild(tr);
         tr.entry = entry;
@@ -435,7 +469,7 @@
           }
           addCodeSearchButton(entry,
               td(tr, entry.name, 'name ' + entry.cssClass()));
-          
+
           diffStatus(
             td(tr, ms(entry.time), 'value time'),
             entry.time, referenceEntry.time);
@@ -445,7 +479,7 @@
           diffStatus(
             td(tr, count(entry.count), 'value count'),
             entry.count, referenceEntry.count);
-        } else if (baselineVersion !== undefined && referenceEntry 
+        } else if (baselineVersion !== undefined && referenceEntry
             && page.version !== baselineVersion) {
           // Show comparison of entry that does not exist on the current page.
           tr.entry = new Entry(0, referenceEntry.name);
@@ -477,7 +511,7 @@
             td(tr, count(entry.count, false), 'value count');
           } else {
             td(tr, '-', 'position');
-            td(tr, '-', 'name');
+            td(tr, referenceEntry.name, 'name');
             td(tr, '-', 'value time');
             td(tr, '-', 'value time');
             td(tr, '-', 'value count');
@@ -491,6 +525,11 @@
       });
     }
 
+    function showEntry(entry) {
+      selectedEntry = entry;
+      selectEntry(entry, true);
+    }
+
     function selectEntry(entry, updateSelectedPage) {
       if (updateSelectedPage) {
         entry = selectedPage.version.getEntry(entry);
@@ -501,7 +540,8 @@
       if (needsPageSwitch) showPage(entry.page);
       var childNodes = $('column_0').querySelector('.list tbody').childNodes;
       for (var i = 0; i < childNodes.length; i++) {
-        if (childNodes[i].entry.name == entry.name) {
+        if (childNodes[i].entry !== undefined &&
+            childNodes[i].entry.name == entry.name) {
           rowIndex = i;
           break;
         }
@@ -533,8 +573,9 @@
       showPageDetails(entry);
       showImpactList(entry.page);
       showGraphs(entry.page);
+      pushHistoryState();
     }
-    
+
     function showVersionDetails(entry) {
       var table, tbody, entries;
       table = $('detailView').querySelector('.versionDetailTable');
@@ -633,7 +674,7 @@
         return entry.getTimePercentImpact() > 0.1;
       });
       entries.sort((a, b) => {
-        var cmp = b.getTimePercentImpact() - a.getTimePercentImpact(); 
+        var cmp = b.getTimePercentImpact() - a.getTimePercentImpact();
         if (isCompareView || cmp.toFixed(1) == 0) {
           return b.getTimeImpact() - a.getTimeImpact();
         }
@@ -656,9 +697,9 @@
       });
       table.replaceChild(tbody, table.querySelector('tbody'));
     }
-    
+
     function showGraphs(page) {
-      var groups = page.groups.slice(); 
+      var groups = page.groups.slice();
       // Sort groups by the biggest impact
       groups.sort((a, b) => {
         return b.getTimeImpact() - a.getTimeImpact();
@@ -673,7 +714,7 @@
       showVersionGraph(groups, page);
       showPageVersionGraph(groups, page);
     }
-    
+
     function getGraphDataTable(groups) {
       var dataTable = new google.visualization.DataTable();
       dataTable.addColumn('string', 'Name');
@@ -712,20 +753,20 @@
       if (isDiffView) {
         pages.sort((a, b) => {
           return b.getEntry(selectedGroup).time-
-            a.getEntry(selectedGroup).time; 
+            a.getEntry(selectedGroup).time;
         });
       } else {
         pages.sort((a, b) => {
           return b.getEntry(selectedGroup).timePercent -
-            a.getEntry(selectedGroup).timePercent; 
+            a.getEntry(selectedGroup).timePercent;
         });
       }
       // Sort by sum of squared distance to the average.
       // pages.sort((a, b) => {
-      //   return a.distanceFromTotalPercent() - b.distanceFromTotalPercent(); 
+      //   return a.distanceFromTotalPercent() - b.distanceFromTotalPercent();
       // });
       // Calculate the entries for the pages
-      pages.forEach((page) => { 
+      pages.forEach((page) => {
         row = [page.name];
         groups.forEach((group) => {
           row.push(group.isTotal ? 0 : page.getEntry(group).time);
@@ -743,10 +784,10 @@
       var vs = versions.versions.filter(version => version.enabled);
       vs.sort((a, b) => {
         return b.getEntry(selectedGroup).getTimeImpact() -
-          a.getEntry(selectedGroup).getTimeImpact(); 
+          a.getEntry(selectedGroup).getTimeImpact();
       });
-      // Calculate the entries for the versions 
-      vs.forEach((version) => { 
+      // Calculate the entries for the versions
+      vs.forEach((version) => {
         row = [version.name];
         groups.forEach((group) => {
           row.push(group.isTotal ? 0 : version.getEntry(group).getTimeImpact());
@@ -763,10 +804,10 @@
       var row;
       var vs = versions.getPageVersions(page);
       vs.sort((a, b) => {
-        return b.getEntry(selectedGroup).time - a.getEntry(selectedGroup).time; 
+        return b.getEntry(selectedGroup).time - a.getEntry(selectedGroup).time;
       });
-      // Calculate the entries for the versions 
-      vs.forEach((page) => { 
+      // Calculate the entries for the versions
+      vs.forEach((page) => {
         row = [page.version.name];
         groups.forEach((group) => {
           row.push(group.isTotal ? 0 : page.getEntry(group).time);
@@ -781,8 +822,8 @@
     function renderGraph(title, groups, dataTable, id, isStacked) {
       var isDiffView = baselineVersion !== undefined;
       var formatter = new google.visualization.NumberFormat({
-        suffix: (isDiffView ? 'msΔ' : 'ms'), 
-        negativeColor: 'red', 
+        suffix: (isDiffView ? 'msΔ' : 'ms'),
+        negativeColor: 'red',
         groupingSymbol: "'"
       });
       for (var i = 1; i < dataTable.getNumberOfColumns(); i++) {
@@ -892,7 +933,7 @@
         node('.percent').textContent = percent(entry.timePercent, false);
         node('.percentPerEntry').textContent
             = percent(entry.timePercentPerEntry, false);
-        node('.percentVariance').textContent 
+        node('.percentVariance').textContent
             = percent(entry.timePercentVariancePercent, false);
         node('.count').textContent = count(entry._count, false);
         node('.countVariance').textContent
@@ -1046,12 +1087,42 @@
       handleLoadJSON(JSON.parse(text));
     }
 
+    function getStateFromParams() {
+      var query = window.location.search.substr(1);
+      var result = {};
+      query.split("&").forEach((part) => {
+        var item = part.split("=");
+        var key = decodeURIComponent(item[0])
+        result[key] = decodeURIComponent(item[1]);
+      });
+      return result;
+    }
+
+    function fixSinglePageJSON(json) {
+      // Try to detect the single-version case, where we're missing the toplevel
+      // version object. The incoming JSON is of the form:
+      //    {"Page 1": [... data points ... ], "Page 2": [...], ...}
+      // Instead of the default multi-page JSON:
+      //    {"Version 1": { "Page 1": ..., ...}, "Version 2": {...}, ...}
+      // In this case insert a single "Default" version as top-level entry.
+      var firstProperty = (object) => {
+        for (var key in object) return key;
+      };
+      var maybePage = json[firstProperty(json)];
+      if (!Array.isArray(maybePage)) return json;
+      return {"Default": json}
+    }
+
     function handleLoadJSON(json) {
+      json = fixSinglePageJSON(json);
+      var state = getStateFromParams();
       pages = new Pages();
       versions = Versions.fromJSON(json);
       initialize()
       showPage(versions.versions[0].pages[0]);
-      selectEntry(selectedPage.total);
+      if (!popHistoryState(state)) {
+        selectEntry(selectedPage.total);
+      }
     }
 
     function handleToggleGroup(event) {
@@ -1188,7 +1259,10 @@
       }
       get(index) {
         return this.versions[index]
-      };
+      }
+      getByName(name) {
+        return this.versions.find((each) => each.name == name);
+      }
       forEach(f) {
         this.versions.forEach(f);
       }
@@ -1298,7 +1372,7 @@
         return Math.sqrt(sum);
       }
       getTotalTimeVariancePercent(name, showDiff) {
-        return this.getTotalTimeVariance(name, showDiff) / 
+        return this.getTotalTimeVariance(name, showDiff) /
           this.getTotalTime(name, showDiff) * 100;
       }
       getTotalCount(name, showDiff) {
@@ -1329,7 +1403,7 @@
       version.sort();
       return version;
     }
-    
+
     class Pages extends Map {
       get(name) {
         if (name.indexOf('www.') == 0) {
@@ -1364,7 +1438,9 @@
           this.total,
           Group.groups.get('ic').entry(),
           Group.groups.get('optimize').entry(),
+          Group.groups.get('compile-background').entry(),
           Group.groups.get('compile').entry(),
+          Group.groups.get('parse-background').entry(),
           Group.groups.get('parse').entry(),
           Group.groups.get('callback').entry(),
           Group.groups.get('api').entry(),
@@ -1380,6 +1456,12 @@
         });
         this.version = version;
       }
+      toString() {
+        return this.version.name + ": " + this.name;
+      }
+      urlParams() {
+        return { version: this.version.name, page: this.name};
+      }
       add(entry) {
         // Ignore accidentally added Group entries.
         if (entry.name.startsWith(GroupedEntry.prefix)) return;
@@ -1437,7 +1519,7 @@
         var sum = 0;
         this.groups.forEach(group => {
           if (group == this.total) return;
-          var value = group.getTimePercentImpact() - 
+          var value = group.getTimePercentImpact() -
               this.getEntry(group).timePercent;
           sum += value * value;
         });
@@ -1473,6 +1555,11 @@
         this.parent = undefined;
         this.isTotal = false;
       }
+      urlParams() {
+        var params = this.page.urlParams();
+        params.entry = this.name;
+        return params;
+      }
       getCompareWithBaseline(value, property) {
         if (baselineVersion == undefined) return value;
         var baselineEntry = baselineVersion.getEntry(this);
@@ -1542,7 +1629,7 @@
       return new Entry(position, ...data);
     }
 
-    class Group { 
+    class Group {
       constructor(name, regexp, color) {
         this.name = name;
         this.regexp = regexp;
@@ -1554,19 +1641,27 @@
     Group.groups = new Map();
     Group.add = function(name, group) {
       this.groups.set(name, group);
+      return group;
     }
     Group.add('total', new Group('Total', /.*Total.*/, '#BBB'));
-    Group.add('ic', new Group('IC', /.*IC.*/, "#3366CC"));
+    Group.add('ic', new Group('IC', /.*IC_.*/, "#3366CC"));
     Group.add('optimize', new Group('Optimize',
         /StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"));
-    Group.add('compile', new Group('Compile', /.*Compile.*/, "#FFAA00"));
+    Group.add('compile-background', new Group('Compile-Background',
+        /(.*CompileBackground.*)/, "#b9a720"));
+    Group.add('compile', new Group('Compile',
+        /(^Compile.*)|(.*_Compile.*)/, "#FFAA00"));
+    Group.add('parse-background',
+        new Group('Parse-Background', /.*ParseBackground.*/, "#af744d"));
     Group.add('parse', new Group('Parse', /.*Parse.*/, "#FF6600"));
-    Group.add('callback', new Group('Callback', /.*Callback.*/, "#109618"));
+    Group.add('callback', new Group('Blink C++', /.*Callback.*/, "#109618"));
     Group.add('api', new Group('API', /.*API.*/, "#990099"));
     Group.add('gc', new Group('GC', /GC|AllocateInTargetSpace/, "#0099C6"));
     Group.add('javascript', new Group('JavaScript', /JS_Execution/, "#DD4477"));
-    Group.add('runtime', new Group('Runtime', /.*/, "#88BB00"));
-    Group.add('unclassified', new Group('Unclassified', /.*/, "#000"));
+    Group.add('runtime', new Group('V8 C++', /.*/, "#88BB00"));
+    var group =
+      Group.add('unclassified', new Group('Unclassified', /.*/, "#000"));
+    group.enabled = false;
 
     class GroupedEntry extends Entry {
       constructor(group) {
@@ -1587,24 +1682,29 @@
         return true;
       }
       forEach(fun) {
-        if (baselineVersion === undefined) {
-          this.entries.forEach(fun);
-          return;
-        }
-        // If we have a baslineVersion to compare against show also all entries
-        // from the other group.
-        var tmpEntries = baselineVersion.getEntry(this)
-          .entries.filter((entry) => {
-            return this.page.get(entry.name) == undefined
-          });
+        // Show also all entries which are in at least one version.
+        var dummyEntryNames = new Set();
+        versions.forEach((version) => {
+          var groupEntry = version.getEntry(this);
+          if (groupEntry != this) {
+            for (var entry of groupEntry.entries) {
+              if (this.page.get(entry.name) == undefined) {
+                dummyEntryNames.add(entry.name);
+              }
+            }
+          }
+        });
+        var tmpEntries = [];
+        for (var name of dummyEntryNames) {
+          var tmpEntry = new Entry(0, name, 0, 0, 0, 0, 0, 0);
+          tmpEntry.page = this.page;
+          tmpEntries.push(tmpEntry);
+        };
+
+        // Concatenate our real entries.
+        tmpEntries = tmpEntries.concat(this.entries);
 
         // The compared entries are sorted by absolute impact.
-        tmpEntries = tmpEntries.map((entry) => {
-          var tmpEntry = new Entry(0, entry.name, 0, 0, 0, 0, 0, 0);
-          tmpEntry.page = this.page;
-          return tmpEntry;
-        });
-        tmpEntries = tmpEntries.concat(this.entries);
         tmpEntries.sort((a, b) => {
           return a.time - b.time
         });
@@ -1702,14 +1802,14 @@
         better on this measurement.
       </div>
     </div>
-    
+
     <div id="versionSelector" class="inline toggleContentVisibility">
       <h2>Versions</h2>
       <div class="content hidden">
         <ul></ul>
       </div>
     </div>
-    
+
     <div id="pageSelector" class="inline toggleContentVisibility">
       <h2>Pages</h2>
       <div class="content hidden">
diff --git a/tools/callstats.py b/tools/callstats.py
index 262f9a6..7556eb4 100755
--- a/tools/callstats.py
+++ b/tools/callstats.py
@@ -126,25 +126,28 @@
   onLoad(window.location.href);
 })();"""
 
-def get_chrome_flags(js_flags, user_data_dir):
+def get_chrome_flags(js_flags, user_data_dir, arg_delimiter=""):
   return [
       "--no-default-browser-check",
       "--no-sandbox",
       "--disable-translate",
       "--enable-benchmarking",
-      "--js-flags={}".format(js_flags),
+      "--enable-stats-table",
+      "--js-flags={}{}{}".format(arg_delimiter, js_flags, arg_delimiter),
       "--no-first-run",
-      "--user-data-dir={}".format(user_data_dir),
+      "--user-data-dir={}{}{}".format(arg_delimiter, user_data_dir,
+                                      arg_delimiter),
     ]
 
-def get_chrome_replay_flags(args):
+def get_chrome_replay_flags(args, arg_delimiter=""):
   http_port = 4080 + args.port_offset
   https_port = 4443 + args.port_offset
   return [
-      "--host-resolver-rules=MAP *:80 localhost:%s, "  \
-                            "MAP *:443 localhost:%s, " \
-                            "EXCLUDE localhost" % (
-                                http_port, https_port),
+      "--host-resolver-rules=%sMAP *:80 localhost:%s, "  \
+                              "MAP *:443 localhost:%s, " \
+                              "EXCLUDE localhost%s" % (
+                               arg_delimiter, http_port, https_port,
+                               arg_delimiter),
       "--ignore-certificate-errors",
       "--disable-seccomp-sandbox",
       "--disable-web-security",
@@ -174,7 +177,7 @@
           user_data_dir = args.user_data_dir
         else:
           user_data_dir = tempfile.mkdtemp(prefix="chr_")
-        js_flags = "--runtime-call-stats"
+        js_flags = "--runtime-call-stats --noconcurrent-recompilation"
         if args.replay_wpr: js_flags += " --allow-natives-syntax"
         if args.js_flags: js_flags += " " + args.js_flags
         chrome_flags = get_chrome_flags(js_flags, user_data_dir)
@@ -295,10 +298,10 @@
     print("    "+site['url'])
   print("- " * 40)
   print("Launch chromium with the following commands for debugging:")
-  flags = get_chrome_flags("'--runtime-call-stats --allow-natives-syntax'",
-                           "/var/tmp/`date +%s`")
-  flags += get_chrome_replay_flags(args)
-  print("    $CHROMIUM_DIR/out/Release/chomium " + (" ".join(flags)) + " <URL>")
+  flags = get_chrome_flags("--runtime-call-stats --allow-natives-syntax",
+                           "/var/tmp/`date +%s`", '"')
+  flags += get_chrome_replay_flags(args, "'")
+  print("    $CHROMIUM_DIR/out/Release/chrome " + (" ".join(flags)) + " <URL>")
   print("- " * 40)
   replay_server = start_replay_server(args, sites, discard_output=False)
   try:
@@ -343,10 +346,12 @@
   groups = [];
   if args.aggregate:
     groups = [
-        ('Group-IC', re.compile(".*IC.*")),
+        ('Group-IC', re.compile(".*IC_.*")),
         ('Group-Optimize',
          re.compile("StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*")),
-        ('Group-Compile', re.compile(".*Compile.*")),
+        ('Group-CompileBackground', re.compile("(.*CompileBackground.*)")),
+        ('Group-Compile', re.compile("(^Compile.*)|(.*_Compile.*)")),
+        ('Group-ParseBackground', re.compile(".*ParseBackground.*")),
         ('Group-Parse', re.compile(".*Parse.*")),
         ('Group-Callback', re.compile(".*Callback.*")),
         ('Group-API', re.compile(".*API.*")),
@@ -385,12 +390,26 @@
           entries[group_name]['count'] += count
           break
     # Calculate the V8-Total (all groups except Callback)
-    total_v8 = { 'time': 0, 'count': 0 }
+    group_data = { 'time': 0, 'count': 0 }
     for group_name, regexp in groups:
       if group_name == 'Group-Callback': continue
-      total_v8['time'] += entries[group_name]['time']
-      total_v8['count'] += entries[group_name]['count']
-    entries['Group-Total-V8'] = total_v8
+      group_data['time'] += entries[group_name]['time']
+      group_data['count'] += entries[group_name]['count']
+    entries['Group-Total-V8'] = group_data
+    # Calculate the Parse-Total group
+    group_data = { 'time': 0, 'count': 0 }
+    for group_name, regexp in groups:
+      if not group_name.startswith('Group-Parse'): continue
+      group_data['time'] += entries[group_name]['time']
+      group_data['count'] += entries[group_name]['count']
+    entries['Group-Parse-Total'] = group_data
+    # Calculate the Compile-Total group
+    group_data = { 'time': 0, 'count': 0 }
+    for group_name, regexp in groups:
+      if not group_name.startswith('Group-Compile'): continue
+      group_data['time'] += entries[group_name]['time']
+      group_data['count'] += entries[group_name]['count']
+    entries['Group-Compile-Total'] = group_data
     # Append the sums as single entries to domain.
     for key in entries:
       if key not in domain: domain[key] = { 'time_list': [], 'count_list': [] }
diff --git a/tools/clang/CMakeLists.txt b/tools/clang/CMakeLists.txt
index f7c93c3..addcb56 100644
--- a/tools/clang/CMakeLists.txt
+++ b/tools/clang/CMakeLists.txt
@@ -19,15 +19,21 @@
 endif()
 
 include_directories("${CMAKE_SOURCE_DIR}/include"
-                    "${CMAKE_SOURCE_DIR}/tools/clang/include"
                     "${CMAKE_BINARY_DIR}/include"
                     "${CMAKE_BINARY_DIR}/tools/clang/include")
 
 link_directories("${CMAKE_SOURCE_DIR}/lib"
-                 "${CMAKE_SOURCE_DIR}/tools/clang/lib"
                  "${CMAKE_BINARY_DIR}/lib"
                  "${CMAKE_BINARY_DIR}/tools/clang/lib")
 
+if (DEFINED LLVM_EXTERNAL_CLANG_SOURCE_DIR)
+  include_directories("${LLVM_EXTERNAL_CLANG_SOURCE_DIR}/include")
+  link_directories("${LLVM_EXTERNAL_CLANG_SOURCE_DIR}/lib")
+else ()
+  include_directories("${CMAKE_SOURCE_DIR}/tools/clang/include")
+  link_directories("${CMAKE_SOURCE_DIR}/tools/clang/lib")
+endif ()
+
 # Tests for all enabled tools can be run by building this target.
 add_custom_target(cr-check-all COMMAND ${CMAKE_CTEST_COMMAND} -V)
 
diff --git a/tools/clang/base_bind_rewriters/CMakeLists.txt b/tools/clang/base_bind_rewriters/CMakeLists.txt
index 2939061..7abeb72 100644
--- a/tools/clang/base_bind_rewriters/CMakeLists.txt
+++ b/tools/clang/base_bind_rewriters/CMakeLists.txt
@@ -2,8 +2,11 @@
   BitReader
   MCParser
   Option
+  Support
   X86AsmParser
   X86CodeGen
+  X86Desc
+  X86Info
   )
 
 add_llvm_executable(base_bind_rewriters
@@ -23,6 +26,7 @@
   clangSema
   clangSerialization
   clangTooling
+  clangToolingCore
   )
 
 cr_install(TARGETS base_bind_rewriters RUNTIME DESTINATION bin)
diff --git a/tools/clang/blink_gc_plugin/BlinkGCPlugin.cpp b/tools/clang/blink_gc_plugin/BlinkGCPlugin.cpp
index d60d73f..d263a8c 100644
--- a/tools/clang/blink_gc_plugin/BlinkGCPlugin.cpp
+++ b/tools/clang/blink_gc_plugin/BlinkGCPlugin.cpp
@@ -23,19 +23,20 @@
 
  protected:
   // Overridden from PluginASTAction:
-  virtual std::unique_ptr<ASTConsumer> CreateASTConsumer(
-      CompilerInstance& instance,
-      llvm::StringRef ref) {
+  std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance& instance,
+                                                 llvm::StringRef ref) override {
     return llvm::make_unique<BlinkGCPluginConsumer>(instance, options_);
   }
 
-  virtual bool ParseArgs(const CompilerInstance&,
-                         const std::vector<std::string>& args) {
+  bool ParseArgs(const CompilerInstance&,
+                 const std::vector<std::string>& args) override {
     for (const auto& arg : args) {
       if (arg == "dump-graph") {
         options_.dump_graph = true;
       } else if (arg == "warn-unneeded-finalizer") {
         options_.warn_unneeded_finalizer = true;
+      } else if (arg == "use-chromium-style-naming") {
+        options_.use_chromium_style_naming = true;
       } else {
         llvm::errs() << "Unknown blink-gc-plugin argument: " << arg << "\n";
         return false;
diff --git a/tools/clang/blink_gc_plugin/BlinkGCPluginConsumer.cpp b/tools/clang/blink_gc_plugin/BlinkGCPluginConsumer.cpp
index ba5ba4a..c3e277a 100644
--- a/tools/clang/blink_gc_plugin/BlinkGCPluginConsumer.cpp
+++ b/tools/clang/blink_gc_plugin/BlinkGCPluginConsumer.cpp
@@ -75,6 +75,9 @@
 
   // Ignore GC implementation files.
   options_.ignored_directories.push_back("/heap/");
+
+  if (!options_.use_chromium_style_naming)
+    Config::UseLegacyNames();
 }
 
 void BlinkGCPluginConsumer::HandleTranslationUnit(ASTContext& context) {
@@ -143,7 +146,7 @@
 
     // Force parsing and AST building of the yet-uninstantiated function
     // template trace method bodies.
-    clang::LateParsedTemplate* lpt = sema.LateParsedTemplateMap[fd];
+    clang::LateParsedTemplate* lpt = sema.LateParsedTemplateMap[fd].get();
     sema.LateTemplateParser(sema.OpaqueParser, *lpt);
   }
 }
diff --git a/tools/clang/blink_gc_plugin/BlinkGCPluginOptions.h b/tools/clang/blink_gc_plugin/BlinkGCPluginOptions.h
index b941f08..baaa2ff 100644
--- a/tools/clang/blink_gc_plugin/BlinkGCPluginOptions.h
+++ b/tools/clang/blink_gc_plugin/BlinkGCPluginOptions.h
@@ -10,11 +10,10 @@
 #include <vector>
 
 struct BlinkGCPluginOptions {
-  BlinkGCPluginOptions()
-      : dump_graph(false),
-        warn_unneeded_finalizer(false) {}
-  bool dump_graph;
-  bool warn_unneeded_finalizer;
+  bool dump_graph = false;
+  bool warn_unneeded_finalizer = false;
+  // TODO(https://crbug.com/675879): Clean up after the Blink rename.
+  bool use_chromium_style_naming = false;
   std::set<std::string> ignored_classes;
   std::set<std::string> checked_namespaces;
   std::vector<std::string> ignored_directories;
diff --git a/tools/clang/blink_gc_plugin/CMakeLists.txt b/tools/clang/blink_gc_plugin/CMakeLists.txt
index 009807b..66549ed 100644
--- a/tools/clang/blink_gc_plugin/CMakeLists.txt
+++ b/tools/clang/blink_gc_plugin/CMakeLists.txt
@@ -30,6 +30,11 @@
   endforeach()
   set_property(TARGET clang APPEND PROPERTY SOURCES ${absolute_sources})
 
+  # TODO(https://crbug.com/675879): Clean up after the Blink rename.
+  cr_add_test(blink_gc_plugin_legacy_test
+    python tests/legacy_naming/test.py
+    ${CMAKE_BINARY_DIR}/bin/clang
+    )
   cr_add_test(blink_gc_plugin_test
     python tests/test.py
     ${CMAKE_BINARY_DIR}/bin/clang
@@ -40,6 +45,12 @@
 
   cr_install(TARGETS "lib${LIBRARYNAME}" LIBRARY DESTINATION lib)
 
+  # TODO(https://crbug.com/675879): Clean up after the Blink rename.
+  cr_add_test(blink_gc_plugin_legacy_test
+    python tests/legacy_naming/test.py
+    ${CMAKE_BINARY_DIR}/bin/clang
+    $<TARGET_FILE:lib${LIBRARYNAME}>
+    )
   cr_add_test(blink_gc_plugin_test
     python tests/test.py
     ${CMAKE_BINARY_DIR}/bin/clang
diff --git a/tools/clang/blink_gc_plugin/CheckFieldsVisitor.cpp b/tools/clang/blink_gc_plugin/CheckFieldsVisitor.cpp
index 396ed95..05419da 100644
--- a/tools/clang/blink_gc_plugin/CheckFieldsVisitor.cpp
+++ b/tools/clang/blink_gc_plugin/CheckFieldsVisitor.cpp
@@ -47,6 +47,14 @@
   invalid_fields_.push_back(std::make_pair(current_, kMemberInUnmanaged));
 }
 
+void CheckFieldsVisitor::AtIterator(Iterator* edge) {
+  if (!managed_host_)
+    return;
+
+  if (edge->IsUnsafe())
+    invalid_fields_.push_back(std::make_pair(current_, kIteratorToGCManaged));
+}
+
 void CheckFieldsVisitor::AtValue(Value* edge) {
   // TODO: what should we do to check unions?
   if (edge->value()->record()->isUnion())
diff --git a/tools/clang/blink_gc_plugin/CheckFieldsVisitor.h b/tools/clang/blink_gc_plugin/CheckFieldsVisitor.h
index ef806f2..c6ddd5e 100644
--- a/tools/clang/blink_gc_plugin/CheckFieldsVisitor.h
+++ b/tools/clang/blink_gc_plugin/CheckFieldsVisitor.h
@@ -9,7 +9,6 @@
 
 #include "Edge.h"
 
-struct BlinkGCPluginOptions;
 class FieldPoint;
 
 // This visitor checks that the fields of a class are "well formed".
@@ -29,7 +28,8 @@
     kMemberToGCUnmanaged,
     kMemberInUnmanaged,
     kPtrFromHeapToStack,
-    kGCDerivedPartObject
+    kGCDerivedPartObject,
+    kIteratorToGCManaged,
   };
 
   using Errors = std::vector<std::pair<FieldPoint*, Error>>;
@@ -43,6 +43,7 @@
   void AtMember(Member* edge) override;
   void AtValue(Value* edge) override;
   void AtCollection(Collection* edge) override;
+  void AtIterator(Iterator*) override;
 
  private:
   Error InvalidSmartPtr(Edge* ptr);
diff --git a/tools/clang/blink_gc_plugin/Config.cpp b/tools/clang/blink_gc_plugin/Config.cpp
index 0a6d5b7..bb32ad4 100644
--- a/tools/clang/blink_gc_plugin/Config.cpp
+++ b/tools/clang/blink_gc_plugin/Config.cpp
@@ -10,6 +10,51 @@
 
 using namespace clang;
 
+// Legacy names to be removed after Blink rename:
+namespace legacy {
+const char kCreateName[] = "create";
+const char kTraceName[] = "trace";
+const char kTraceImplName[] = "traceImpl";
+const char kFinalizeName[] = "finalizeGarbageCollectedObject";
+const char kTraceAfterDispatchName[] = "traceAfterDispatch";
+const char kTraceAfterDispatchImplName[] = "traceAfterDispatchImpl";
+const char kRegisterWeakMembersName[] = "registerWeakMembers";
+const char kAdjustAndMarkName[] = "adjustAndMark";
+const char kIsHeapObjectAliveName[] = "isHeapObjectAlive";
+}  // namespace legacy
+
+const char kNewOperatorName[] = "operator new";
+const char* kCreateName = "Create";
+const char* kTraceName = "Trace";
+const char* kTraceImplName = "TraceImpl";
+const char* kFinalizeName = "FinalizeGarbageCollectedObject";
+const char* kTraceAfterDispatchName = "TraceAfterDispatch";
+const char* kTraceAfterDispatchImplName = "TraceAfterDispatchImpl";
+const char* kRegisterWeakMembersName = "RegisterWeakMembers";
+const char kHeapAllocatorName[] = "HeapAllocator";
+const char kTraceIfNeededName[] = "TraceIfNeeded";
+const char kVisitorDispatcherName[] = "VisitorDispatcher";
+const char kVisitorVarName[] = "visitor";
+const char* kAdjustAndMarkName = "AdjustAndMark";
+const char* kIsHeapObjectAliveName = "IsHeapObjectAlive";
+const char kIsEagerlyFinalizedName[] = "IsEagerlyFinalizedMarker";
+const char kConstIteratorName[] = "const_iterator";
+const char kIteratorName[] = "iterator";
+const char kConstReverseIteratorName[] = "const_reverse_iterator";
+const char kReverseIteratorName[] = "reverse_iterator";
+
+void Config::UseLegacyNames() {
+  kCreateName = legacy::kCreateName;
+  kTraceName = legacy::kTraceName;
+  kTraceImplName = legacy::kTraceImplName;
+  kFinalizeName = legacy::kFinalizeName;
+  kTraceAfterDispatchName = legacy::kTraceAfterDispatchName;
+  kTraceAfterDispatchImplName = legacy::kTraceAfterDispatchImplName;
+  kRegisterWeakMembersName = legacy::kRegisterWeakMembersName;
+  kAdjustAndMarkName = legacy::kAdjustAndMarkName;
+  kIsHeapObjectAliveName = legacy::kIsHeapObjectAliveName;
+}
+
 bool Config::IsTemplateInstantiation(CXXRecordDecl* record) {
   ClassTemplateSpecializationDecl* spec =
       dyn_cast<clang::ClassTemplateSpecializationDecl>(record);
diff --git a/tools/clang/blink_gc_plugin/Config.h b/tools/clang/blink_gc_plugin/Config.h
index 8f494bb..2ab933f 100644
--- a/tools/clang/blink_gc_plugin/Config.h
+++ b/tools/clang/blink_gc_plugin/Config.h
@@ -17,24 +17,30 @@
 #include "clang/AST/AST.h"
 #include "clang/AST/Attr.h"
 
-const char kNewOperatorName[] = "operator new";
-const char kCreateName[] = "create";
-const char kTraceName[] = "trace";
-const char kTraceImplName[] = "traceImpl";
-const char kFinalizeName[] = "finalizeGarbageCollectedObject";
-const char kTraceAfterDispatchName[] = "traceAfterDispatch";
-const char kTraceAfterDispatchImplName[] = "traceAfterDispatchImpl";
-const char kRegisterWeakMembersName[] = "registerWeakMembers";
-const char kHeapAllocatorName[] = "HeapAllocator";
-const char kTraceIfNeededName[] = "TraceIfNeeded";
-const char kVisitorDispatcherName[] = "VisitorDispatcher";
-const char kVisitorVarName[] = "visitor";
-const char kAdjustAndMarkName[] = "adjustAndMark";
-const char kIsHeapObjectAliveName[] = "isHeapObjectAlive";
-const char kIsEagerlyFinalizedName[] = "IsEagerlyFinalizedMarker";
+extern const char kNewOperatorName[];
+extern const char* kCreateName;
+extern const char* kTraceName;
+extern const char* kTraceImplName;
+extern const char* kFinalizeName;
+extern const char* kTraceAfterDispatchName;
+extern const char* kTraceAfterDispatchImplName;
+extern const char* kRegisterWeakMembersName;
+extern const char kHeapAllocatorName[];
+extern const char kTraceIfNeededName[];
+extern const char kVisitorDispatcherName[];
+extern const char kVisitorVarName[];
+extern const char* kAdjustAndMarkName;
+extern const char* kIsHeapObjectAliveName;
+extern const char kIsEagerlyFinalizedName[];
+extern const char kConstIteratorName[];
+extern const char kIteratorName[];
+extern const char kConstReverseIteratorName[];
+extern const char kReverseIteratorName[];
 
 class Config {
  public:
+  static void UseLegacyNames();
+
   static bool IsMember(const std::string& name) {
     return name == "Member";
   }
@@ -101,6 +107,16 @@
            name == "PersistentHeapHashMap";
   }
 
+  static bool IsGCCollectionWithUnsafeIterator(const std::string& name) {
+    if (!IsGCCollection(name))
+      return false;
+    // The list hash set iterators refer to the set, not the
+    // backing store and are consequently safe.
+    if (name == "HeapListHashSet" || name == "PersistentHeapListHashSet")
+      return false;
+    return true;
+  }
+
   static bool IsHashMap(const std::string& name) {
     return name == "HashMap" ||
            name == "HeapHashMap" ||
@@ -131,6 +147,11 @@
            IsGCMixinBase(name);
   }
 
+  static bool IsIterator(const std::string& name) {
+    return name == kIteratorName || name == kConstIteratorName ||
+           name == kReverseIteratorName || name == kConstReverseIteratorName;
+  }
+
   // Returns true of the base classes that do not need a vtable entry for trace
   // because they cannot possibly initiate a GC during construction.
   static bool IsSafePolymorphicBase(const std::string& name) {
diff --git a/tools/clang/blink_gc_plugin/DiagnosticsReporter.cpp b/tools/clang/blink_gc_plugin/DiagnosticsReporter.cpp
index bd46f2c..1ae8425 100644
--- a/tools/clang/blink_gc_plugin/DiagnosticsReporter.cpp
+++ b/tools/clang/blink_gc_plugin/DiagnosticsReporter.cpp
@@ -147,6 +147,9 @@
     "[blink-gc] Left-most base class %0 of derived class %1"
     " must define a virtual trace method.";
 
+const char kIteratorToGCManagedCollectionNote[] =
+    "[blink-gc] Iterator field %0 to a GC managed collection declared here:";
+
 } // namespace
 
 DiagnosticBuilder DiagnosticsReporter::ReportDiagnostic(
@@ -253,6 +256,8 @@
       DiagnosticsEngine::Note, kOverriddenNonVirtualTraceNote);
   diag_manual_dispatch_method_note_ = diagnostic_.getCustomDiagID(
       DiagnosticsEngine::Note, kManualDispatchMethodNote);
+  diag_iterator_to_gc_managed_collection_note_ = diagnostic_.getCustomDiagID(
+      DiagnosticsEngine::Note, kIteratorToGCManagedCollectionNote);
 }
 
 bool DiagnosticsReporter::hasErrorOccurred() const
@@ -343,6 +348,8 @@
       note = diag_stack_allocated_field_note_;
     } else if (error.second == CheckFieldsVisitor::kGCDerivedPartObject) {
       note = diag_part_object_to_gc_derived_class_note_;
+    } else if (error.second == CheckFieldsVisitor::kIteratorToGCManaged) {
+      note = diag_iterator_to_gc_managed_collection_note_;
     } else {
       assert(false && "Unknown field error");
     }
diff --git a/tools/clang/blink_gc_plugin/DiagnosticsReporter.h b/tools/clang/blink_gc_plugin/DiagnosticsReporter.h
index ae4fd00..ddcfbfd 100644
--- a/tools/clang/blink_gc_plugin/DiagnosticsReporter.h
+++ b/tools/clang/blink_gc_plugin/DiagnosticsReporter.h
@@ -135,7 +135,7 @@
   unsigned diag_field_requires_finalization_note_;
   unsigned diag_overridden_non_virtual_trace_note_;
   unsigned diag_manual_dispatch_method_note_;
-
+  unsigned diag_iterator_to_gc_managed_collection_note_;
 };
 
 #endif // TOOLS_BLINK_GC_PLUGIN_DIAGNOSTICS_REPORTER_H_
diff --git a/tools/clang/blink_gc_plugin/Edge.cpp b/tools/clang/blink_gc_plugin/Edge.cpp
index 77c6cfe..428e747 100644
--- a/tools/clang/blink_gc_plugin/Edge.cpp
+++ b/tools/clang/blink_gc_plugin/Edge.cpp
@@ -23,6 +23,7 @@
 void RecursiveEdgeVisitor::AtPersistent(Persistent*) {}
 void RecursiveEdgeVisitor::AtCrossThreadPersistent(CrossThreadPersistent*) {}
 void RecursiveEdgeVisitor::AtCollection(Collection*) {}
+void RecursiveEdgeVisitor::AtIterator(Iterator*) {}
 
 void RecursiveEdgeVisitor::VisitValue(Value* e) {
   AtValue(e);
@@ -90,3 +91,7 @@
   e->AcceptMembers(this);
   Leave();
 }
+
+void RecursiveEdgeVisitor::VisitIterator(Iterator* e) {
+  AtIterator(e);
+}
diff --git a/tools/clang/blink_gc_plugin/Edge.h b/tools/clang/blink_gc_plugin/Edge.h
index 79aa409..d7af335 100644
--- a/tools/clang/blink_gc_plugin/Edge.h
+++ b/tools/clang/blink_gc_plugin/Edge.h
@@ -16,6 +16,7 @@
 class Edge;
 class Collection;
 class CrossThreadPersistent;
+class Iterator;
 class Member;
 class OwnPtr;
 class Persistent;
@@ -39,6 +40,7 @@
   virtual void VisitPersistent(Persistent*) {}
   virtual void VisitCrossThreadPersistent(CrossThreadPersistent*) {}
   virtual void VisitCollection(Collection*) {}
+  virtual void VisitIterator(Iterator*) {}
 };
 
 // Recursive edge visitor. The traversed path is accessible in context.
@@ -55,6 +57,7 @@
   void VisitPersistent(Persistent*) override;
   void VisitCrossThreadPersistent(CrossThreadPersistent*) override;
   void VisitCollection(Collection*) override;
+  void VisitIterator(Iterator*) override;
 
  protected:
   typedef std::deque<Edge*> Context;
@@ -74,6 +77,7 @@
   virtual void AtPersistent(Persistent*);
   virtual void AtCrossThreadPersistent(CrossThreadPersistent*);
   virtual void AtCollection(Collection*);
+  virtual void AtIterator(Iterator*);
 
  private:
   Context context_;
@@ -281,4 +285,30 @@
   bool is_root_;
 };
 
+// An iterator edge is a direct edge to some iterator type.
+class Iterator : public Edge {
+ public:
+  Iterator(RecordInfo* info, bool on_heap, bool is_unsafe)
+      : info_(info), on_heap_(on_heap), is_unsafe_(is_unsafe) {}
+  ~Iterator() {}
+
+  void Accept(EdgeVisitor* visitor) { visitor->VisitIterator(this); }
+  LivenessKind Kind() override { return kStrong; }
+  bool NeedsFinalization() { return false; }
+  TracingStatus NeedsTracing(NeedsTracingOption) {
+    if (on_heap_)
+      return TracingStatus::Needed();
+    return TracingStatus::Unneeded();
+  }
+
+  RecordInfo* info() const { return info_; }
+
+  bool IsUnsafe() const { return is_unsafe_; }
+
+ private:
+  RecordInfo* info_;
+  bool on_heap_;
+  bool is_unsafe_;
+};
+
 #endif  // TOOLS_BLINK_GC_PLUGIN_EDGE_H_
diff --git a/tools/clang/blink_gc_plugin/RecordInfo.cpp b/tools/clang/blink_gc_plugin/RecordInfo.cpp
index 46b8606..fe211ac 100644
--- a/tools/clang/blink_gc_plugin/RecordInfo.cpp
+++ b/tools/clang/blink_gc_plugin/RecordInfo.cpp
@@ -113,10 +113,10 @@
   // have a "GC base name", so are to be included and considered.
   SmallVector<const CXXRecordDecl*, 8> queue;
 
-  const CXXRecordDecl *base_record = record();
+  const CXXRecordDecl* base_record = record();
   while (true) {
     for (const auto& it : base_record->bases()) {
-      const RecordType *type = it.getType()->getAs<RecordType>();
+      const RecordType* type = it.getType()->getAs<RecordType>();
       CXXRecordDecl* base;
       if (!type)
         base = GetDependentTemplatedDecl(*it.getType());
@@ -171,17 +171,19 @@
 }
 
 bool RecordInfo::IsEagerlyFinalized() {
-  if (is_eagerly_finalized_ == kNotComputed) {
-    is_eagerly_finalized_ = kFalse;
-    if (IsGCFinalized()) {
-      for (Decl* decl : record_->decls()) {
-        if (TypedefDecl* typedef_decl = dyn_cast<TypedefDecl>(decl)) {
-          if (typedef_decl->getNameAsString() == kIsEagerlyFinalizedName) {
-            is_eagerly_finalized_ = kTrue;
-            break;
-          }
-        }
-      }
+  if (is_eagerly_finalized_ != kNotComputed)
+    return is_eagerly_finalized_;
+
+  is_eagerly_finalized_ = kFalse;
+  if (!IsGCFinalized())
+    return is_eagerly_finalized_;
+
+  for (Decl* decl : record_->decls()) {
+    if (TypedefDecl* typedef_decl = dyn_cast<TypedefDecl>(decl)) {
+      if (typedef_decl->getNameAsString() != kIsEagerlyFinalizedName)
+        continue;
+      is_eagerly_finalized_ = kTrue;
+      break;
     }
   }
   return is_eagerly_finalized_;
@@ -414,7 +416,13 @@
     // Ignore fields annotated with the GC_PLUGIN_IGNORE macro.
     if (Config::IsIgnoreAnnotated(field))
       continue;
-    if (Edge* edge = CreateEdge(field->getType().getTypePtrOrNull())) {
+    // Check if the unexpanded type should be recorded; needed
+    // to track iterator aliases only
+    const Type* unexpandedType = field->getType().getSplitUnqualifiedType().Ty;
+    Edge* edge = CreateEdgeFromOriginalType(unexpandedType);
+    if (!edge)
+      edge = CreateEdge(field->getType().getTypePtrOrNull());
+    if (edge) {
       fields_status = fields_status.LUB(edge->NeedsTracing(Edge::kRecursive));
       fields->insert(std::make_pair(field, FieldPoint(field, edge)));
     }
@@ -567,6 +575,36 @@
   return false;
 }
 
+Edge* RecordInfo::CreateEdgeFromOriginalType(const Type* type) {
+  if (!type)
+    return nullptr;
+
+  // look for "typedef ... iterator;"
+  if (!isa<ElaboratedType>(type))
+    return nullptr;
+  const ElaboratedType* elaboratedType = cast<ElaboratedType>(type);
+  if (!isa<TypedefType>(elaboratedType->getNamedType()))
+    return nullptr;
+  const TypedefType* typedefType =
+      cast<TypedefType>(elaboratedType->getNamedType());
+  std::string typeName = typedefType->getDecl()->getNameAsString();
+  if (!Config::IsIterator(typeName))
+    return nullptr;
+  RecordInfo* info =
+      cache_->Lookup(elaboratedType->getQualifier()->getAsType());
+
+  bool on_heap = false;
+  bool is_unsafe = false;
+  // Silently handle unknown types; the on-heap collection types will
+  // have to be in scope for the declaration to compile, though.
+  if (info) {
+    is_unsafe = Config::IsGCCollectionWithUnsafeIterator(info->name());
+    // Don't mark iterator as being on the heap if it is not supported.
+    on_heap = !is_unsafe && Config::IsGCCollection(info->name());
+  }
+  return new Iterator(info, on_heap, is_unsafe);
+}
+
 Edge* RecordInfo::CreateEdge(const Type* type) {
   if (!type) {
     return 0;
diff --git a/tools/clang/blink_gc_plugin/RecordInfo.h b/tools/clang/blink_gc_plugin/RecordInfo.h
index e672282..220a6d3 100644
--- a/tools/clang/blink_gc_plugin/RecordInfo.h
+++ b/tools/clang/blink_gc_plugin/RecordInfo.h
@@ -129,6 +129,7 @@
   bool InheritsTrace();
 
   Edge* CreateEdge(const clang::Type* type);
+  Edge* CreateEdgeFromOriginalType(const clang::Type* type);
 
   RecordCache* cache_;
   clang::CXXRecordDecl* record_;
diff --git a/tools/clang/blink_gc_plugin/tests/base_class_must_define_virtual_trace.cpp b/tools/clang/blink_gc_plugin/tests/base_class_must_define_virtual_trace.cpp
index cd38ec9..66219f1 100644
--- a/tools/clang/blink_gc_plugin/tests/base_class_must_define_virtual_trace.cpp
+++ b/tools/clang/blink_gc_plugin/tests/base_class_must_define_virtual_trace.cpp
@@ -6,13 +6,13 @@
 
 namespace blink {
 
-void PartDerived::trace(Visitor* visitor)
+void PartDerived::Trace(Visitor* visitor)
 {
 }
 
-void HeapDerived::trace(Visitor* visitor)
+void HeapDerived::Trace(Visitor* visitor)
 {
-    visitor->trace(m_part);
+    visitor->Trace(m_part);
 }
 
 
diff --git a/tools/clang/blink_gc_plugin/tests/base_class_must_define_virtual_trace.h b/tools/clang/blink_gc_plugin/tests/base_class_must_define_virtual_trace.h
index fbd26d7..a276bd5 100644
--- a/tools/clang/blink_gc_plugin/tests/base_class_must_define_virtual_trace.h
+++ b/tools/clang/blink_gc_plugin/tests/base_class_must_define_virtual_trace.h
@@ -11,23 +11,23 @@
 
 class PartBase {
     DISALLOW_NEW();
-    // Missing virtual trace.
+    // Missing virtual Trace.
 };
 
 class PartDerived : public PartBase {
     DISALLOW_NEW();
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 };
 
 class HeapBase : public GarbageCollected<HeapBase> {
-    // Missing virtual trace.
+    // Missing virtual Trace.
 };
 
 
 class HeapDerived : public HeapBase {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 private:
     PartDerived m_part;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/base_requires_tracing.cpp b/tools/clang/blink_gc_plugin/tests/base_requires_tracing.cpp
index 87559a8..474be51 100644
--- a/tools/clang/blink_gc_plugin/tests/base_requires_tracing.cpp
+++ b/tools/clang/blink_gc_plugin/tests/base_requires_tracing.cpp
@@ -6,16 +6,16 @@
 
 namespace blink {
 
-void A::trace(Visitor* visitor) { }
+void A::Trace(Visitor* visitor) { }
 
-void C::trace(Visitor* visitor) {
-  visitor->trace(m_a);
+void C::Trace(Visitor* visitor) {
+  visitor->Trace(m_a);
   // Missing B::trace(visitor)
 }
 
-void D::trace(Visitor* visitor) {
-  visitor->trace(m_a);
-  C::trace(visitor);
+void D::Trace(Visitor* visitor) {
+  visitor->Trace(m_a);
+  C::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/base_requires_tracing.h b/tools/clang/blink_gc_plugin/tests/base_requires_tracing.h
index 0205a08..c10d07f 100644
--- a/tools/clang/blink_gc_plugin/tests/base_requires_tracing.h
+++ b/tools/clang/blink_gc_plugin/tests/base_requires_tracing.h
@@ -11,23 +11,23 @@
 
 class A : public GarbageCollected<A> {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 };
 
 class B : public A {
-    // Does not need trace
+    // Does not need Trace
 };
 
 class C : public B {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<A> m_a;
 };
 
 class D : public C {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<A> m_a;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/base_requires_tracing.txt b/tools/clang/blink_gc_plugin/tests/base_requires_tracing.txt
index ee525b9..581f0da 100644
--- a/tools/clang/blink_gc_plugin/tests/base_requires_tracing.txt
+++ b/tools/clang/blink_gc_plugin/tests/base_requires_tracing.txt
@@ -1,4 +1,4 @@
 base_requires_tracing.cpp:11:1: warning: [blink-gc] Base class 'B' of derived class 'C' requires tracing.
-void C::trace(Visitor* visitor) {
+void C::Trace(Visitor* visitor) {
 ^
 1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/class_does_not_require_finalization.cpp b/tools/clang/blink_gc_plugin/tests/class_does_not_require_finalization.cpp
index 9c51eca..364099d 100644
--- a/tools/clang/blink_gc_plugin/tests/class_does_not_require_finalization.cpp
+++ b/tools/clang/blink_gc_plugin/tests/class_does_not_require_finalization.cpp
@@ -6,7 +6,7 @@
 
 namespace blink {
 
-void DoesNotNeedFinalizer::trace(Visitor* visitor)
+void DoesNotNeedFinalizer::Trace(Visitor* visitor)
 {
 }
 
@@ -14,7 +14,7 @@
 {
 }
 
-void DoesNotNeedFinalizer2::trace(Visitor* visitor)
+void DoesNotNeedFinalizer2::Trace(Visitor* visitor)
 {
 }
 
diff --git a/tools/clang/blink_gc_plugin/tests/class_does_not_require_finalization.h b/tools/clang/blink_gc_plugin/tests/class_does_not_require_finalization.h
index c6530f3..7ef1102 100644
--- a/tools/clang/blink_gc_plugin/tests/class_does_not_require_finalization.h
+++ b/tools/clang/blink_gc_plugin/tests/class_does_not_require_finalization.h
@@ -12,20 +12,20 @@
 class DoesNeedFinalizer : public GarbageCollectedFinalized<DoesNeedFinalizer> {
 public:
     ~DoesNeedFinalizer() { ; }
-    void trace(Visitor*);
+    void Trace(Visitor*);
 };
 
 class DoesNotNeedFinalizer
     : public GarbageCollectedFinalized<DoesNotNeedFinalizer> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 };
 
 class DoesNotNeedFinalizer2
     : public GarbageCollectedFinalized<DoesNotNeedFinalizer2> {
 public:
     ~DoesNotNeedFinalizer2();
-    void trace(Visitor*);
+    void Trace(Visitor*);
 };
 
 class HasEmptyDtor {
@@ -39,7 +39,7 @@
     : public GarbageCollectedFinalized<DoesNeedFinalizer2>,
       public HasEmptyDtor {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.cpp b/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.cpp
index 5bb87c9..cd3903a 100644
--- a/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.cpp
+++ b/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.cpp
@@ -6,17 +6,17 @@
 
 namespace blink {
 
-void Base::trace(Visitor* visitor) { }
+void Base::Trace(Visitor* visitor) { }
 
-void Mixin1::trace(Visitor* visitor) { }
+void Mixin1::Trace(Visitor* visitor) { }
 
-void Mixin2::trace(Visitor* visitor) { }
+void Mixin2::Trace(Visitor* visitor) { }
 
-// Missing: void Derived1::trace(Visitor* visitor);
+// Missing: void Derived1::Trace(Visitor* visitor);
 
-void Derived2::trace(Visitor* visitor) {
-    Base::trace(visitor);
-    Mixin1::trace(visitor);
+void Derived2::Trace(Visitor* visitor) {
+    Base::Trace(visitor);
+    Mixin1::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.h b/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.h
index 133f006..6c3a0aa 100644
--- a/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.h
+++ b/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.h
@@ -11,27 +11,27 @@
 
 class Base : public GarbageCollected<Base> {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 };
 
 class Mixin1 : public GarbageCollectedMixin {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 };
 
 class Mixin2 : public GarbageCollectedMixin {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 };
 
 class Derived1 : public Base, public Mixin1 {
     USING_GARBAGE_COLLECTED_MIXIN(Derived1);
-    // Requires trace method.
+    // Requires Trace method.
 };
 
 class Derived2 : public Base, public Mixin1, public Mixin2 {
     USING_GARBAGE_COLLECTED_MIXIN(Derived2);
-    void trace(Visitor*) override;
+    void Trace(Visitor*) override;
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.txt b/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.txt
index 33ae5f5..658af72 100644
--- a/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.txt
+++ b/tools/clang/blink_gc_plugin/tests/class_multiple_trace_bases.txt
@@ -9,6 +9,6 @@
 class Derived1 : public Base, public Mixin1 {
                               ^
 class_multiple_trace_bases.cpp:17:1: warning: [blink-gc] Base class 'Mixin2' of derived class 'Derived2' requires tracing.
-void Derived2::trace(Visitor* visitor) {
+void Derived2::Trace(Visitor* visitor) {
 ^
 2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/class_overrides_new.h b/tools/clang/blink_gc_plugin/tests/class_overrides_new.h
index 3e80e37..0135d48 100644
--- a/tools/clang/blink_gc_plugin/tests/class_overrides_new.h
+++ b/tools/clang/blink_gc_plugin/tests/class_overrides_new.h
@@ -12,7 +12,7 @@
 class HeapObject : public GarbageCollected<HeapObject> {
     WTF_MAKE_FAST_ALLOCATED;
 public:
-    void trace(Visitor*) { }
+    void Trace(Visitor*) { }
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_base.cpp b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_base.cpp
index 8d47634..6eeb01e 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_base.cpp
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_base.cpp
@@ -6,14 +6,14 @@
 
 namespace blink {
 
-void NeedsFinalizer::trace(Visitor* visitor)
+void NeedsFinalizer::Trace(Visitor* visitor)
 {
-    A::trace(visitor);
+    A::Trace(visitor);
 }
 
-void DoesNotNeedFinalizer::trace(Visitor* visitor)
+void DoesNotNeedFinalizer::Trace(Visitor* visitor)
 {
-    A::trace(visitor);
+    A::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_base.h b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_base.h
index 239c2cf..13a74dd 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_base.h
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_base.h
@@ -11,7 +11,7 @@
 
 class A : public GarbageCollected<A> {
 public:
-    virtual void trace(Visitor*) {}
+    virtual void Trace(Visitor*) {}
 };
 
 class B {
@@ -22,13 +22,13 @@
 // Second base class needs finalization.
 class NeedsFinalizer : public A, public B {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 };
 
 // Base does not need finalization.
 class DoesNotNeedFinalizer : public A {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_field.cpp b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_field.cpp
index eb23ab0..7b8ac0a 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_field.cpp
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_field.cpp
@@ -6,29 +6,29 @@
 
 namespace blink {
 
-void NeedsFinalizer::trace(Visitor* visitor)
+void NeedsFinalizer::Trace(Visitor* visitor)
 {
-    visitor->trace(m_as);
-    A::trace(visitor);
+    visitor->Trace(m_as);
+    A::Trace(visitor);
 }
 
-void AlsoNeedsFinalizer::trace(Visitor* visitor)
+void AlsoNeedsFinalizer::Trace(Visitor* visitor)
 {
-    visitor->trace(m_bs);
-    A::trace(visitor);
+    visitor->Trace(m_bs);
+    A::Trace(visitor);
 }
 
-void DoesNotNeedFinalizer::trace(Visitor* visitor)
+void DoesNotNeedFinalizer::Trace(Visitor* visitor)
 {
-    visitor->trace(m_bs);
-    A::trace(visitor);
+    visitor->Trace(m_bs);
+    A::Trace(visitor);
 }
 
-void AlsoDoesNotNeedFinalizer::trace(Visitor* visitor)
+void AlsoDoesNotNeedFinalizer::Trace(Visitor* visitor)
 {
-    visitor->trace(m_as);
-    visitor->trace(m_cs);
-    A::trace(visitor);
+    visitor->Trace(m_as);
+    visitor->Trace(m_cs);
+    A::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_field.h b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_field.h
index 9596127..a585f3f 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_field.h
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_field.h
@@ -11,20 +11,20 @@
 
 class A : public GarbageCollected<A> {
 public:
-    virtual void trace(Visitor*) { }
+    virtual void Trace(Visitor*) { }
 };
 
 // Has a non-trivial dtor (user-declared).
 class B {
 public:
     ~B() { }
-    void trace(Visitor*) { };
+    void Trace(Visitor*) { };
 };
 
 // Has a trivial dtor.
 class C {
 public:
-    void trace(Visitor*) { };
+    void Trace(Visitor*) { };
 };
 
 } // blink namespace
@@ -43,7 +43,7 @@
 // Off-heap vectors always need to be finalized.
 class NeedsFinalizer : public A {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Vector<Member<A> > m_as;
 };
@@ -52,7 +52,7 @@
 // need to be finalized.
 class AlsoNeedsFinalizer : public A {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     HeapVector<B, 10> m_bs;
 };
@@ -60,7 +60,7 @@
 // On-heap vectors with no inlined objects never need to be finalized.
 class DoesNotNeedFinalizer : public A {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     HeapVector<B> m_bs;
 };
@@ -69,7 +69,7 @@
 // don't need to be finalized.
 class AlsoDoesNotNeedFinalizer : public A {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     HeapVector<Member<A>, 10> m_as;
     HeapVector<C, 10> m_cs;
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_mixin.cpp b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_mixin.cpp
index 782810e..0ebca9a 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_mixin.cpp
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_mixin.cpp
@@ -6,32 +6,32 @@
 
 namespace blink {
 
-void MixinFinalizable::trace(Visitor* visitor)
+void MixinFinalizable::Trace(Visitor* visitor)
 {
-    visitor->trace(m_onHeap);
+    visitor->Trace(m_onHeap);
 }
 
-void MixinNotFinalizable::trace(Visitor* visitor)
+void MixinNotFinalizable::Trace(Visitor* visitor)
 {
-    visitor->trace(m_onHeap);
+    visitor->Trace(m_onHeap);
 }
 
-void NeedsFinalizer::trace(Visitor* visitor)
+void NeedsFinalizer::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
-    MixinFinalizable::trace(visitor);
+    visitor->Trace(m_obj);
+    MixinFinalizable::Trace(visitor);
 }
 
-void HasFinalizer::trace(Visitor* visitor)
+void HasFinalizer::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
-    MixinFinalizable::trace(visitor);
+    visitor->Trace(m_obj);
+    MixinFinalizable::Trace(visitor);
 }
 
-void NeedsNoFinalization::trace(Visitor* visitor)
+void NeedsNoFinalization::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
-    MixinNotFinalizable::trace(visitor);
+    visitor->Trace(m_obj);
+    MixinNotFinalizable::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_mixin.h b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_mixin.h
index 10befbd..6dd49b7 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_finalization_mixin.h
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_finalization_mixin.h
@@ -14,7 +14,7 @@
 
 class MixinFinalizable : public GarbageCollectedMixin {
 public:
-    virtual void trace(Visitor*) override;
+    virtual void Trace(Visitor*) override;
 private:
     RefPtr<OffHeap> m_offHeap; // Requires finalization
     Member<OnHeap> m_onHeap;
@@ -22,7 +22,7 @@
 
 class MixinNotFinalizable : public GarbageCollectedMixin {
 public:
-    virtual void trace(Visitor*) override;
+    virtual void Trace(Visitor*) override;
 private:
     Member<OnHeap> m_onHeap;
 };
@@ -32,7 +32,7 @@
     , public MixinFinalizable {
     USING_GARBAGE_COLLECTED_MIXIN(NeedsFinalizer);
 public:
-    virtual void trace(Visitor*) override;
+    virtual void Trace(Visitor*) override;
 private:
     Member<OnHeap> m_obj;
 };
@@ -41,7 +41,7 @@
                      public MixinFinalizable {
     USING_GARBAGE_COLLECTED_MIXIN(HasFinalizer);
 public:
-    virtual void trace(Visitor*) override;
+    virtual void Trace(Visitor*) override;
 private:
     Member<OnHeap> m_obj;
 };
@@ -51,7 +51,7 @@
     , public MixinNotFinalizable {
     USING_GARBAGE_COLLECTED_MIXIN(NeedsNoFinalization);
 public:
-    virtual void trace(Visitor*) override;
+    virtual void Trace(Visitor*) override;
 private:
     Member<OnHeap> m_obj;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_trace_method.cpp b/tools/clang/blink_gc_plugin/tests/class_requires_trace_method.cpp
index f18fdf6..e17dae0 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_trace_method.cpp
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_trace_method.cpp
@@ -6,14 +6,14 @@
 
 namespace blink {
 
-void Mixin2::trace(Visitor* visitor)
+void Mixin2::Trace(Visitor* visitor)
 {
-  Mixin::trace(visitor);
+  Mixin::Trace(visitor);
 }
 
-void Mixin3::trace(Visitor* visitor)
+void Mixin3::Trace(Visitor* visitor)
 {
-  Mixin::trace(visitor);
+  Mixin::Trace(visitor);
 }
 
 } // namespace blink
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_trace_method.h b/tools/clang/blink_gc_plugin/tests/class_requires_trace_method.h
index 4a442b7..5c8985e 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_trace_method.h
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_trace_method.h
@@ -24,7 +24,7 @@
 
 class Mixin : public GarbageCollectedMixin {
 public:
-  virtual void trace(Visitor*) override;
+  virtual void Trace(Visitor*) override;
   Member<Mixin> m_self;
 };
 
@@ -34,7 +34,7 @@
 
 class Mixin2 : public Mixin {
 public:
-  virtual void trace(Visitor*) override;
+  virtual void Trace(Visitor*) override;
 };
 
 class HeapObjectMixin2
@@ -44,14 +44,14 @@
 
 class Mixin3 : public Mixin {
 public:
-  virtual void trace(Visitor*) override;
+  virtual void Trace(Visitor*) override;
 };
 
 class HeapObjectMixin3
     : public GarbageCollected<HeapObjectMixin3>, public Mixin {
   USING_GARBAGE_COLLECTED_MIXIN(HeapObjectMixin2);
 public:
-  virtual void trace(Visitor*) override;
+  virtual void Trace(Visitor*) override;
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_trace_method_tmpl.cpp b/tools/clang/blink_gc_plugin/tests/class_requires_trace_method_tmpl.cpp
index 7051fb2..7c9405a 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_trace_method_tmpl.cpp
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_trace_method_tmpl.cpp
@@ -6,10 +6,10 @@
 
 namespace blink {
 
-// Does not need a trace method.
+// Does not need a Trace method.
 class NoTrace : public TemplatedObject<PartObjectA> { };
 
-// Needs a trace method.
+// Needs a Trace method.
 class NeedsTrace : public TemplatedObject<PartObjectB> { };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/class_requires_trace_method_tmpl.h b/tools/clang/blink_gc_plugin/tests/class_requires_trace_method_tmpl.h
index 70cab61..82011e1 100644
--- a/tools/clang/blink_gc_plugin/tests/class_requires_trace_method_tmpl.h
+++ b/tools/clang/blink_gc_plugin/tests/class_requires_trace_method_tmpl.h
@@ -18,7 +18,7 @@
 class PartObjectB {
     DISALLOW_NEW();
 public:
-    void trace(Visitor* visitor) { visitor->trace(m_obj); }
+    void Trace(Visitor* visitor) { visitor->Trace(m_obj); }
 private:
     Member<HeapObject> m_obj;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/crash_on_invalid.h b/tools/clang/blink_gc_plugin/tests/crash_on_invalid.h
index a77d097..c6ce1ee 100644
--- a/tools/clang/blink_gc_plugin/tests/crash_on_invalid.h
+++ b/tools/clang/blink_gc_plugin/tests/crash_on_invalid.h
@@ -18,7 +18,7 @@
                       public ScriptWrappable {
 public:
     virtual const WrapperTypeInfo *wrapperTypeInfo() const {}
-    void trace(Visitor *);
+    void Trace(Visitor *);
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/cycle_ptrs.cpp b/tools/clang/blink_gc_plugin/tests/cycle_ptrs.cpp
index f3b3989..631f7c4 100644
--- a/tools/clang/blink_gc_plugin/tests/cycle_ptrs.cpp
+++ b/tools/clang/blink_gc_plugin/tests/cycle_ptrs.cpp
@@ -6,12 +6,12 @@
 
 namespace blink {
 
-void A::trace(Visitor* visitor) {
-    visitor->trace(m_b);
+void A::Trace(Visitor* visitor) {
+    visitor->Trace(m_b);
 }
 
-void B::trace(Visitor* visitor) {
-    visitor->trace(m_a);
+void B::Trace(Visitor* visitor) {
+    visitor->Trace(m_a);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/cycle_ptrs.h b/tools/clang/blink_gc_plugin/tests/cycle_ptrs.h
index 8c07a06..cb404ef 100644
--- a/tools/clang/blink_gc_plugin/tests/cycle_ptrs.h
+++ b/tools/clang/blink_gc_plugin/tests/cycle_ptrs.h
@@ -21,14 +21,14 @@
 
 class A : public GarbageCollected<A> {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 private:
     Member<B> m_b;
 };
 
 class B : public GarbageCollectedFinalized<B> {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 private:
     Member<A> m_a;
     RefPtr<C> m_c;
diff --git a/tools/clang/blink_gc_plugin/tests/cycle_sub.cpp b/tools/clang/blink_gc_plugin/tests/cycle_sub.cpp
index dfe835a..fde6188 100644
--- a/tools/clang/blink_gc_plugin/tests/cycle_sub.cpp
+++ b/tools/clang/blink_gc_plugin/tests/cycle_sub.cpp
@@ -6,9 +6,9 @@
 
 namespace blink {
 
-void B::trace(Visitor* visitor) {
-    visitor->trace(m_c);
-    A::trace(visitor);
+void B::Trace(Visitor* visitor) {
+    visitor->Trace(m_c);
+    A::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/cycle_sub.h b/tools/clang/blink_gc_plugin/tests/cycle_sub.h
index a007061..e1a011b 100644
--- a/tools/clang/blink_gc_plugin/tests/cycle_sub.h
+++ b/tools/clang/blink_gc_plugin/tests/cycle_sub.h
@@ -16,12 +16,12 @@
 
 class A : public GarbageCollectedFinalized<A> {
 public:
-    virtual void trace(Visitor*) {}
+    virtual void Trace(Visitor*) {}
 };
 
 class B : public A {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 private:
     RefPtr<C> m_c;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/cycle_super.cpp b/tools/clang/blink_gc_plugin/tests/cycle_super.cpp
index d9ecd79..43394f6 100644
--- a/tools/clang/blink_gc_plugin/tests/cycle_super.cpp
+++ b/tools/clang/blink_gc_plugin/tests/cycle_super.cpp
@@ -6,16 +6,16 @@
 
 namespace blink {
 
-void A::trace(Visitor* visitor) {
-    visitor->trace(m_d);
+void A::Trace(Visitor* visitor) {
+    visitor->Trace(m_d);
 }
 
-void B::trace(Visitor* visitor) {
-    A::trace(visitor);
+void B::Trace(Visitor* visitor) {
+    A::Trace(visitor);
 }
 
-void C::trace(Visitor* visitor) {
-    B::trace(visitor);
+void C::Trace(Visitor* visitor) {
+    B::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/cycle_super.h b/tools/clang/blink_gc_plugin/tests/cycle_super.h
index 13b05c1..aecb14b 100644
--- a/tools/clang/blink_gc_plugin/tests/cycle_super.h
+++ b/tools/clang/blink_gc_plugin/tests/cycle_super.h
@@ -16,19 +16,19 @@
 
 class A : public GarbageCollectedFinalized<A> {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 private:
     RefPtr<D> m_d;
 };
 
 class B : public A {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 };
 
 class C : public B {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 };
 
 class D : public RefCounted<C> {
diff --git a/tools/clang/blink_gc_plugin/tests/cycle_super_neg.cpp b/tools/clang/blink_gc_plugin/tests/cycle_super_neg.cpp
index 33dec59..77ae04c 100644
--- a/tools/clang/blink_gc_plugin/tests/cycle_super_neg.cpp
+++ b/tools/clang/blink_gc_plugin/tests/cycle_super_neg.cpp
@@ -6,13 +6,13 @@
 
 namespace blink {
 
-void B::trace(Visitor* visitor) {
-    A::trace(visitor);
+void B::Trace(Visitor* visitor) {
+    A::Trace(visitor);
 }
 
-void D::trace(Visitor* visitor) {
-    visitor->trace(m_c);
-    A::trace(visitor);
+void D::Trace(Visitor* visitor) {
+    visitor->Trace(m_c);
+    A::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/cycle_super_neg.h b/tools/clang/blink_gc_plugin/tests/cycle_super_neg.h
index 6f99eff..e80cc63 100644
--- a/tools/clang/blink_gc_plugin/tests/cycle_super_neg.h
+++ b/tools/clang/blink_gc_plugin/tests/cycle_super_neg.h
@@ -19,12 +19,12 @@
 
 class A : public GarbageCollectedFinalized<A> {
 public:
-    virtual void trace(Visitor*) {}
+    virtual void Trace(Visitor*) {}
 };
 
 class B : public A {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 };
 
 class C : public RefCounted<C> {
@@ -34,7 +34,7 @@
 
 class D : public A {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 private:
     RefPtr<C> m_c;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/delayed_parsing.cpp b/tools/clang/blink_gc_plugin/tests/delayed_parsing.cpp
index 149d95e..ca55043 100644
--- a/tools/clang/blink_gc_plugin/tests/delayed_parsing.cpp
+++ b/tools/clang/blink_gc_plugin/tests/delayed_parsing.cpp
@@ -7,14 +7,14 @@
 namespace blink {
 
 struct HeapObject : public GarbageCollected<HeapObject> {
-    void trace(Visitor*) { }
+    void Trace(Visitor*) { }
 };
 
 template<typename T>
 class TemplateBase
     : public GarbageCollected<TemplateBase<T> > {
 public:
-    void trace(Visitor* visitor) { visitor->trace(m_obj); }
+    void Trace(Visitor* visitor) { visitor->Trace(m_obj); }
 private:
     Member<HeapObject> m_obj;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/destructor_access_finalized_field.cpp b/tools/clang/blink_gc_plugin/tests/destructor_access_finalized_field.cpp
index b6bbfd2..3cc8b94 100644
--- a/tools/clang/blink_gc_plugin/tests/destructor_access_finalized_field.cpp
+++ b/tools/clang/blink_gc_plugin/tests/destructor_access_finalized_field.cpp
@@ -20,16 +20,16 @@
     m_objs[0];
 }
 
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
-    visitor->trace(m_objs);
-    visitor->trace(m_part);
+    visitor->Trace(m_obj);
+    visitor->Trace(m_objs);
+    visitor->Trace(m_part);
 }
 
-void PartOther::trace(Visitor* visitor)
+void PartOther::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
+    visitor->Trace(m_obj);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/destructor_access_finalized_field.h b/tools/clang/blink_gc_plugin/tests/destructor_access_finalized_field.h
index 4c72156..074582f 100644
--- a/tools/clang/blink_gc_plugin/tests/destructor_access_finalized_field.h
+++ b/tools/clang/blink_gc_plugin/tests/destructor_access_finalized_field.h
@@ -19,7 +19,7 @@
 class PartOther {
     ALLOW_ONLY_INLINE_ALLOCATION();
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 
     HeapObject* obj() { return m_obj; }
 
@@ -30,7 +30,7 @@
 class HeapObject : public GarbageCollectedFinalized<HeapObject> {
 public:
     ~HeapObject();
-    void trace(Visitor*);
+    void Trace(Visitor*);
     bool foo() { return true; }
     void bar(HeapObject*) { }
 private:
diff --git a/tools/clang/blink_gc_plugin/tests/destructor_eagerly_finalized.cpp b/tools/clang/blink_gc_plugin/tests/destructor_eagerly_finalized.cpp
index 07409cc..8dc672d 100644
--- a/tools/clang/blink_gc_plugin/tests/destructor_eagerly_finalized.cpp
+++ b/tools/clang/blink_gc_plugin/tests/destructor_eagerly_finalized.cpp
@@ -12,9 +12,9 @@
     m_obj->foo();
 }
 
-void HeapObjectEagerFinalized::trace(Visitor* visitor)
+void HeapObjectEagerFinalized::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
+    visitor->Trace(m_obj);
 }
 
 HeapObjectEagerFinalizedAlso::~HeapObjectEagerFinalizedAlso()
@@ -27,11 +27,11 @@
     m_heapVector[0]->foo();
 }
 
-void HeapObjectEagerFinalizedAlso::trace(Visitor* visitor)
+void HeapObjectEagerFinalizedAlso::Trace(Visitor* visitor)
 {
-    visitor->trace(m_heapObject);
-    visitor->trace(m_heapObjectFinalized);
-    visitor->trace(m_heapVector);
+    visitor->Trace(m_heapObject);
+    visitor->Trace(m_heapObjectFinalized);
+    visitor->Trace(m_heapVector);
 }
 
 } // namespace blink
diff --git a/tools/clang/blink_gc_plugin/tests/destructor_eagerly_finalized.h b/tools/clang/blink_gc_plugin/tests/destructor_eagerly_finalized.h
index 77a29de..7a6d1a0 100644
--- a/tools/clang/blink_gc_plugin/tests/destructor_eagerly_finalized.h
+++ b/tools/clang/blink_gc_plugin/tests/destructor_eagerly_finalized.h
@@ -11,7 +11,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*) { }
+    void Trace(Visitor*) { }
     void foo() { }
 };
 
@@ -20,7 +20,7 @@
 public:
     EAGERLY_FINALIZED();
     ~HeapObjectEagerFinalized();
-    void trace(Visitor*);
+    void Trace(Visitor*);
 
     void foo() { }
 
@@ -34,7 +34,7 @@
 public:
     EAGERLY_FINALIZED();
     ~HeapObjectEagerFinalizedAlso();
-    void trace(Visitor*);
+    void Trace(Visitor*);
 
 private:
     Member<HeapObject> m_heapObject;
diff --git a/tools/clang/blink_gc_plugin/tests/destructor_in_nonfinalized_class.cpp b/tools/clang/blink_gc_plugin/tests/destructor_in_nonfinalized_class.cpp
index 8efc41d..b1143d3 100644
--- a/tools/clang/blink_gc_plugin/tests/destructor_in_nonfinalized_class.cpp
+++ b/tools/clang/blink_gc_plugin/tests/destructor_in_nonfinalized_class.cpp
@@ -12,9 +12,9 @@
     (void)this;
 }
 
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
+    visitor->Trace(m_obj);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/destructor_in_nonfinalized_class.h b/tools/clang/blink_gc_plugin/tests/destructor_in_nonfinalized_class.h
index f3fa506..86180e7 100644
--- a/tools/clang/blink_gc_plugin/tests/destructor_in_nonfinalized_class.h
+++ b/tools/clang/blink_gc_plugin/tests/destructor_in_nonfinalized_class.h
@@ -12,7 +12,7 @@
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
     ~HeapObject();
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<HeapObject> m_obj;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.cpp b/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.cpp
index b831077..15df881 100644
--- a/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.cpp
+++ b/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.cpp
@@ -6,18 +6,18 @@
 
 namespace blink {
 
-void PartObject::trace(Visitor* visitor) {
-    visitor->trace(m_obj1);
-    visitor->trace(m_obj2);
-    visitor->trace(m_obj3);
-    visitor->trace(m_obj4);
+void PartObject::Trace(Visitor* visitor) {
+    visitor->Trace(m_obj1);
+    visitor->Trace(m_obj2);
+    visitor->Trace(m_obj3);
+    visitor->Trace(m_obj4);
 }
 
-void HeapObject::trace(Visitor* visitor) {
-    visitor->trace(m_obj1);
-    visitor->trace(m_obj2);
-    visitor->trace(m_obj3);
-    visitor->trace(m_obj4);
+void HeapObject::Trace(Visitor* visitor) {
+    visitor->Trace(m_obj1);
+    visitor->Trace(m_obj2);
+    visitor->Trace(m_obj3);
+    visitor->Trace(m_obj4);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.h b/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.h
index 7700b82..294629e 100644
--- a/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.h
+++ b/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.h
@@ -19,7 +19,7 @@
     operator T*() const { return 0; }
     T* operator->() { return 0; }
 
-    void trace(Visitor* visitor)
+    void Trace(Visitor* visitor)
     {
     }
 };
@@ -32,23 +32,30 @@
 class PartObject {
     DISALLOW_NEW();
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     OwnPtr<HeapObject> m_obj1;
     RefPtr<HeapObject> m_obj2;
     bar::unique_ptr<HeapObject> m_obj3;
     std::unique_ptr<HeapObject> m_obj4;
+    Vector<int>::iterator m_iterator1;
+    HeapVector<Member<HeapObject>>::iterator m_iterator2;
+    HeapHashSet<PartObject>::const_iterator m_iterator3;
 };
 
 class HeapObject : public GarbageCollectedFinalized<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     PartObject m_part;
     OwnPtr<HeapObject> m_obj1;
     RefPtr<HeapObject> m_obj2;
     bar::unique_ptr<HeapObject> m_obj3;
     std::unique_ptr<HeapObject> m_obj4;
+    HeapHashMap<int, Member<HeapObject>>::reverse_iterator m_iterator3;
+    HeapDeque<Member<HeapObject>>::const_reverse_iterator m_iterator4;
+    HeapListHashSet<Member<HeapObject>>::const_iterator m_iterator5;
+    HeapLinkedHashSet<Member<HeapObject>>::const_iterator m_iterator6;
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.txt b/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.txt
index 5e428dc..5486505 100644
--- a/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.txt
+++ b/tools/clang/blink_gc_plugin/tests/fields_illegal_tracing.txt
@@ -11,20 +11,35 @@
 ./fields_illegal_tracing.h:40:5: note: [blink-gc] std::unique_ptr field 'm_obj4' to a GC managed class declared here:
     std::unique_ptr<HeapObject> m_obj4;
     ^
-./fields_illegal_tracing.h:43:1: warning: [blink-gc] Class 'HeapObject' contains invalid fields.
+./fields_illegal_tracing.h:42:5: note: [blink-gc] Iterator field 'm_iterator2' to a GC managed collection declared here:
+    HeapVector<Member<HeapObject>>::iterator m_iterator2;
+    ^
+./fields_illegal_tracing.h:43:5: note: [blink-gc] Iterator field 'm_iterator3' to a GC managed collection declared here:
+    HeapHashSet<PartObject>::const_iterator m_iterator3;
+    ^
+./fields_illegal_tracing.h:46:1: warning: [blink-gc] Class 'HeapObject' contains invalid fields.
 class HeapObject : public GarbageCollectedFinalized<HeapObject> {
 ^
-./fields_illegal_tracing.h:48:5: note: [blink-gc] OwnPtr field 'm_obj1' to a GC managed class declared here:
+./fields_illegal_tracing.h:51:5: note: [blink-gc] OwnPtr field 'm_obj1' to a GC managed class declared here:
     OwnPtr<HeapObject> m_obj1;
     ^
-./fields_illegal_tracing.h:49:5: note: [blink-gc] RefPtr field 'm_obj2' to a GC managed class declared here:
+./fields_illegal_tracing.h:52:5: note: [blink-gc] RefPtr field 'm_obj2' to a GC managed class declared here:
     RefPtr<HeapObject> m_obj2;
     ^
-./fields_illegal_tracing.h:51:5: note: [blink-gc] std::unique_ptr field 'm_obj4' to a GC managed class declared here:
+./fields_illegal_tracing.h:54:5: note: [blink-gc] std::unique_ptr field 'm_obj4' to a GC managed class declared here:
     std::unique_ptr<HeapObject> m_obj4;
     ^
+./fields_illegal_tracing.h:55:5: note: [blink-gc] Iterator field 'm_iterator3' to a GC managed collection declared here:
+    HeapHashMap<int, Member<HeapObject>>::reverse_iterator m_iterator3;
+    ^
+./fields_illegal_tracing.h:56:5: note: [blink-gc] Iterator field 'm_iterator4' to a GC managed collection declared here:
+    HeapDeque<Member<HeapObject>>::const_reverse_iterator m_iterator4;
+    ^
+./fields_illegal_tracing.h:58:5: note: [blink-gc] Iterator field 'm_iterator6' to a GC managed collection declared here:
+    HeapLinkedHashSet<Member<HeapObject>>::const_iterator m_iterator6;
+    ^
 fields_illegal_tracing.cpp:9:1: warning: [blink-gc] Class 'PartObject' has untraced or not traceable fields.
-void PartObject::trace(Visitor* visitor) {
+void PartObject::Trace(Visitor* visitor) {
 ^
 ./fields_illegal_tracing.h:37:5: note: [blink-gc] Untraceable field 'm_obj1' declared here:
     OwnPtr<HeapObject> m_obj1;
@@ -36,15 +51,18 @@
     std::unique_ptr<HeapObject> m_obj4;
     ^
 fields_illegal_tracing.cpp:16:1: warning: [blink-gc] Class 'HeapObject' has untraced or not traceable fields.
-void HeapObject::trace(Visitor* visitor) {
+void HeapObject::Trace(Visitor* visitor) {
 ^
-./fields_illegal_tracing.h:48:5: note: [blink-gc] Untraceable field 'm_obj1' declared here:
+./fields_illegal_tracing.h:51:5: note: [blink-gc] Untraceable field 'm_obj1' declared here:
     OwnPtr<HeapObject> m_obj1;
     ^
-./fields_illegal_tracing.h:49:5: note: [blink-gc] Untraceable field 'm_obj2' declared here:
+./fields_illegal_tracing.h:52:5: note: [blink-gc] Untraceable field 'm_obj2' declared here:
     RefPtr<HeapObject> m_obj2;
     ^
-./fields_illegal_tracing.h:51:5: note: [blink-gc] Untraceable field 'm_obj4' declared here:
+./fields_illegal_tracing.h:54:5: note: [blink-gc] Untraceable field 'm_obj4' declared here:
     std::unique_ptr<HeapObject> m_obj4;
     ^
+./fields_illegal_tracing.h:57:5: note: [blink-gc] Untraced field 'm_iterator5' declared here:
+    HeapListHashSet<Member<HeapObject>>::const_iterator m_iterator5;
+    ^
 4 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/fields_require_tracing.cpp b/tools/clang/blink_gc_plugin/tests/fields_require_tracing.cpp
index 880ce1e..5028768 100644
--- a/tools/clang/blink_gc_plugin/tests/fields_require_tracing.cpp
+++ b/tools/clang/blink_gc_plugin/tests/fields_require_tracing.cpp
@@ -6,21 +6,21 @@
 
 namespace blink {
 
-void PartObject::trace(Visitor* visitor) {
-    m_obj1->trace(visitor); // Don't allow direct tracing.
-    visitor->trace(m_obj2);
-    // Missing visitor->trace(m_obj3);
-    visitor->trace(m_parts);
+void PartObject::Trace(Visitor* visitor) {
+    m_obj1->Trace(visitor); // Don't allow direct tracing.
+    visitor->Trace(m_obj2);
+    // Missing visitor->Trace(m_obj3);
+    visitor->Trace(m_parts);
 }
 
-void PartBObject::trace(Visitor* visitor) {
-  // Missing visitor->trace(m_set);
-  visitor->trace(m_vector);
+void PartBObject::Trace(Visitor* visitor) {
+  // Missing visitor->Trace(m_set);
+  visitor->Trace(m_vector);
 }
 
-void HeapObject::trace(Visitor* visitor) {
-    // Missing visitor->trace(m_part);
-    visitor->trace(m_obj);
+void HeapObject::Trace(Visitor* visitor) {
+    // Missing visitor->Trace(m_part);
+    visitor->Trace(m_obj);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/fields_require_tracing.h b/tools/clang/blink_gc_plugin/tests/fields_require_tracing.h
index 1819411..c8159a9 100644
--- a/tools/clang/blink_gc_plugin/tests/fields_require_tracing.h
+++ b/tools/clang/blink_gc_plugin/tests/fields_require_tracing.h
@@ -15,7 +15,7 @@
 class PartBObject {
     DISALLOW_NEW();
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     HeapHashSet<PartBObject> m_set;
     HeapVector<PartBObject> m_vector;
@@ -24,7 +24,7 @@
 class PartObject {
     DISALLOW_NEW();
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<HeapObject> m_obj1;
     Member<HeapObject> m_obj2;
@@ -35,7 +35,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     PartObject m_part;
     Member<HeapObject> m_obj;
diff --git a/tools/clang/blink_gc_plugin/tests/fields_require_tracing.txt b/tools/clang/blink_gc_plugin/tests/fields_require_tracing.txt
index 39d49f3..75ca60c 100644
--- a/tools/clang/blink_gc_plugin/tests/fields_require_tracing.txt
+++ b/tools/clang/blink_gc_plugin/tests/fields_require_tracing.txt
@@ -1,5 +1,5 @@
 fields_require_tracing.cpp:9:1: warning: [blink-gc] Class 'PartObject' has untraced fields that require tracing.
-void PartObject::trace(Visitor* visitor) {
+void PartObject::Trace(Visitor* visitor) {
 ^
 ./fields_require_tracing.h:29:5: note: [blink-gc] Untraced field 'm_obj1' declared here:
     Member<HeapObject> m_obj1;
@@ -8,13 +8,13 @@
     Member<HeapObject> m_obj3;
     ^
 fields_require_tracing.cpp:16:1: warning: [blink-gc] Class 'PartBObject' has untraced fields that require tracing.
-void PartBObject::trace(Visitor* visitor) {
+void PartBObject::Trace(Visitor* visitor) {
 ^
 ./fields_require_tracing.h:20:5: note: [blink-gc] Untraced field 'm_set' declared here:
     HeapHashSet<PartBObject> m_set;
     ^
 fields_require_tracing.cpp:21:1: warning: [blink-gc] Class 'HeapObject' has untraced fields that require tracing.
-void HeapObject::trace(Visitor* visitor) {
+void HeapObject::Trace(Visitor* visitor) {
 ^
 ./fields_require_tracing.h:40:5: note: [blink-gc] Untraced field 'm_part' declared here:
     PartObject m_part;
diff --git a/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.cpp b/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.cpp
index 91244d1..14166a7 100644
--- a/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.cpp
+++ b/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.cpp
@@ -8,26 +8,26 @@
 
 static B* toB(A* a) { return static_cast<B*>(a); }
 
-void A::trace(Visitor* visitor)
+void A::Trace(Visitor* visitor)
 {
     switch (m_type) {
     case TB:
-        toB(this)->traceAfterDispatch(visitor);
+        toB(this)->TraceAfterDispatch(visitor);
         break;
     case TC:
-        static_cast<C*>(this)->traceAfterDispatch(visitor);
+        static_cast<C*>(this)->TraceAfterDispatch(visitor);
         break;
     case TD:
-        static_cast<D*>(this)->traceAfterDispatch(visitor);
+        static_cast<D*>(this)->TraceAfterDispatch(visitor);
         break;
     }
 }
 
-void A::traceAfterDispatch(Visitor* visitor)
+void A::TraceAfterDispatch(Visitor* visitor)
 {
 }
 
-void A::finalizeGarbageCollectedObject()
+void A::FinalizeGarbageCollectedObject()
 {
     switch (m_type) {
     case TB:
@@ -42,22 +42,22 @@
     }
 }
 
-void B::traceAfterDispatch(Visitor* visitor)
+void B::TraceAfterDispatch(Visitor* visitor)
 {
-    visitor->trace(m_a);
-    A::traceAfterDispatch(visitor);
+    visitor->Trace(m_a);
+    A::TraceAfterDispatch(visitor);
 }
 
-void C::traceAfterDispatch(Visitor* visitor)
+void C::TraceAfterDispatch(Visitor* visitor)
 {
-    visitor->trace(m_a);
-    A::traceAfterDispatch(visitor);
+    visitor->Trace(m_a);
+    A::TraceAfterDispatch(visitor);
 }
 
-void D::traceAfterDispatch(Visitor* visitor)
+void D::TraceAfterDispatch(Visitor* visitor)
 {
-    visitor->trace(m_a);
-    Abstract::traceAfterDispatch(visitor);
+    visitor->Trace(m_a);
+    Abstract::TraceAfterDispatch(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.h b/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.h
index acd16ec..485e5cf 100644
--- a/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.h
+++ b/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.h
@@ -11,30 +11,30 @@
 
 class NeedsFinalize : public GarbageCollectedFinalized<NeedsFinalize> {
 public:
-    void trace(Visitor*);
-    void traceAfterDispatch(Visitor*);
-    // Needs a finalizeGarbageCollectedObject method.
+    void Trace(Visitor*);
+    void TraceAfterDispatch(Visitor*);
+    // Needs a FinalizeGarbageCollectedObject method.
 };
 
 class NeedsDispatch : public GarbageCollectedFinalized<NeedsDispatch> {
 public:
-    void trace(Visitor*);
-    // Needs a traceAfterDispatch method.
-    void finalizeGarbageCollectedObject() { };
+    void Trace(Visitor*);
+    // Needs a TraceAfterDispatch method.
+    void FinalizeGarbageCollectedObject() { };
 };
 
 class NeedsFinalizedBase : public GarbageCollected<NeedsFinalizedBase> {
 public:
-    void trace(Visitor*) { };
-    void traceAfterDispatch(Visitor*) { };
-    void finalizeGarbageCollectedObject() { };
+    void Trace(Visitor*) { };
+    void TraceAfterDispatch(Visitor*) { };
+    void FinalizeGarbageCollectedObject() { };
 };
 
 class A : GarbageCollectedFinalized<A> {
 public:
-    void trace(Visitor*);
-    void traceAfterDispatch(Visitor*);
-    void finalizeGarbageCollectedObject();
+    void Trace(Visitor*);
+    void TraceAfterDispatch(Visitor*);
+    void FinalizeGarbageCollectedObject();
 protected:
     enum Type { TB, TC, TD };
     A(Type type) : m_type(type) { }
@@ -46,7 +46,7 @@
 public:
     B() : A(TB) { }
     ~B() { }
-    void traceAfterDispatch(Visitor*);
+    void TraceAfterDispatch(Visitor*);
 private:
     Member<A> m_a;
 };
@@ -54,7 +54,7 @@
 class C : public A {
 public:
     C() : A(TC) { }
-    void traceAfterDispatch(Visitor*);
+    void TraceAfterDispatch(Visitor*);
 private:
     Member<A> m_a;
 };
@@ -68,7 +68,7 @@
 class D : public Abstract {
 public:
     D() : Abstract(TD) { }
-    void traceAfterDispatch(Visitor*);
+    void TraceAfterDispatch(Visitor*);
 private:
     Member<A> m_a;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.txt b/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.txt
index 8a652a4..0a4122b 100644
--- a/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.txt
+++ b/tools/clang/blink_gc_plugin/tests/finalize_after_dispatch.txt
@@ -9,9 +9,9 @@
 class NeedsFinalizedBase : public GarbageCollected<NeedsFinalizedBase> {
 ^
 ./finalize_after_dispatch.h:30:5: note: [blink-gc] User-declared finalizer declared here:
-    void finalizeGarbageCollectedObject() { };
+    void FinalizeGarbageCollectedObject() { };
     ^
 finalize_after_dispatch.cpp:30:1: warning: [blink-gc] Missing dispatch to class 'D' in manual finalize dispatch.
-void A::finalizeGarbageCollectedObject()
+void A::FinalizeGarbageCollectedObject()
 ^
 4 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.cpp b/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.cpp
index e8f42f2..683461e 100644
--- a/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.cpp
+++ b/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.cpp
@@ -6,15 +6,15 @@
 
 namespace blink {
 
-void Mixin::trace(Visitor* visitor)
+void Mixin::Trace(Visitor* visitor)
 {
-    // Missing: visitor->trace(m_self);
+    // Missing: visitor->Trace(m_self);
 }
 
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 {
-    visitor->trace(m_mix);
-    // Missing: Mixin::trace(visitor);
+    visitor->Trace(m_mix);
+    // Missing: Mixin::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.h b/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.h
index 3c6f868..3136f31 100644
--- a/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.h
+++ b/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.h
@@ -11,7 +11,7 @@
 
 class Mixin : public GarbageCollectedMixin {
 public:
-    virtual void trace(Visitor*) override;
+    virtual void Trace(Visitor*) override;
 private:
     Member<Mixin> m_self;
 };
@@ -19,7 +19,7 @@
 class HeapObject : public GarbageCollected<HeapObject>, public Mixin {
     USING_GARBAGE_COLLECTED_MIXIN(HeapObject);
 public:
-    virtual void trace(Visitor*) override;
+    virtual void Trace(Visitor*) override;
 private:
     Member<Mixin> m_mix;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.txt b/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.txt
index 4051a6a..a14074f 100644
--- a/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.txt
+++ b/tools/clang/blink_gc_plugin/tests/garbage_collected_mixin.txt
@@ -1,10 +1,10 @@
 garbage_collected_mixin.cpp:9:1: warning: [blink-gc] Class 'Mixin' has untraced fields that require tracing.
-void Mixin::trace(Visitor* visitor)
+void Mixin::Trace(Visitor* visitor)
 ^
 ./garbage_collected_mixin.h:16:5: note: [blink-gc] Untraced field 'm_self' declared here:
     Member<Mixin> m_self;
     ^
 garbage_collected_mixin.cpp:14:1: warning: [blink-gc] Base class 'Mixin' of derived class 'HeapObject' requires tracing.
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 ^
 2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/heap/stubs.h b/tools/clang/blink_gc_plugin/tests/heap/stubs.h
index 813ab12..b674048 100644
--- a/tools/clang/blink_gc_plugin/tests/heap/stubs.h
+++ b/tools/clang/blink_gc_plugin/tests/heap/stubs.h
@@ -68,37 +68,62 @@
 class Vector : public VectorDestructorBase<inlineCapacity,
                                            Allocator::isGarbageCollected,
                                            VectorTraits<T>::needsDestruction> {
-public:
-    size_t size();
-    T& operator[](size_t);
+ public:
+  using iterator = T*;
+  using const_iterator = const T*;
+  using reverse_iterator = T*;
+  using const_reverse_iterator = const T*;
+
+  size_t size();
+  T& operator[](size_t);
 };
 
-template<
-    typename T,
-    size_t inlineCapacity = 0,
-    typename Allocator = DefaultAllocator>
-class Deque {};
+template <typename T,
+          size_t inlineCapacity = 0,
+          typename Allocator = DefaultAllocator>
+class Deque {
+ public:
+  using iterator = T*;
+  using const_iterator = const T*;
+  using reverse_iterator = T*;
+  using const_reverse_iterator = const T*;
+};
 
-template<
-    typename ValueArg,
-    typename HashArg = void,
-    typename TraitsArg = void,
-    typename Allocator = DefaultAllocator>
-class HashSet {};
+template <typename ValueArg,
+          typename HashArg = void,
+          typename TraitsArg = void,
+          typename Allocator = DefaultAllocator>
+class HashSet {
+ public:
+  typedef ValueArg* iterator;
+  typedef const ValueArg* const_iterator;
+  typedef ValueArg* reverse_iterator;
+  typedef const ValueArg* const_reverse_iterator;
+};
 
-template<
-    typename ValueArg,
-    typename HashArg = void,
-    typename TraitsArg = void,
-    typename Allocator = DefaultAllocator>
-class ListHashSet {};
+template <typename ValueArg,
+          typename HashArg = void,
+          typename TraitsArg = void,
+          typename Allocator = DefaultAllocator>
+class ListHashSet {
+ public:
+  typedef ValueArg* iterator;
+  typedef const ValueArg* const_iterator;
+  typedef ValueArg* reverse_iterator;
+  typedef const ValueArg* const_reverse_iterator;
+};
 
-template<
-    typename ValueArg,
-    typename HashArg = void,
-    typename TraitsArg = void,
-    typename Allocator = DefaultAllocator>
-class LinkedHashSet {};
+template <typename ValueArg,
+          typename HashArg = void,
+          typename TraitsArg = void,
+          typename Allocator = DefaultAllocator>
+class LinkedHashSet {
+ public:
+  typedef ValueArg* iterator;
+  typedef const ValueArg* const_iterator;
+  typedef ValueArg* reverse_iterator;
+  typedef const ValueArg* const_reverse_iterator;
+};
 
 template<
     typename ValueArg,
@@ -107,15 +132,19 @@
     typename Allocator = DefaultAllocator>
 class HashCountedSet {};
 
-template<
-    typename KeyArg,
-    typename MappedArg,
-    typename HashArg = void,
-    typename KeyTraitsArg = void,
-    typename MappedTraitsArg = void,
-    typename Allocator = DefaultAllocator>
-class HashMap {};
-
+template <typename KeyArg,
+          typename MappedArg,
+          typename HashArg = void,
+          typename KeyTraitsArg = void,
+          typename MappedTraitsArg = void,
+          typename Allocator = DefaultAllocator>
+class HashMap {
+ public:
+  typedef MappedArg* iterator;
+  typedef const MappedArg* const_iterator;
+  typedef MappedArg* reverse_iterator;
+  typedef const MappedArg* const_reverse_iterator;
+};
 }
 
 // Empty namespace declaration to exercise internal
@@ -161,8 +190,8 @@
 
 #define USING_GARBAGE_COLLECTED_MIXIN(type)                     \
 public:                                                         \
-    virtual void adjustAndMark(Visitor*) const override { }     \
-    virtual bool isHeapObjectAlive(Visitor*) const override { return 0; }
+    virtual void AdjustAndMark(Visitor*) const override { }     \
+    virtual bool IsHeapObjectAlive(Visitor*) const override { return 0; }
 
 #define EAGERLY_FINALIZED() typedef int IsEagerlyFinalizedMarker
 
@@ -251,13 +280,13 @@
 class VisitorHelper {
 public:
     template<typename T>
-    void trace(const T&);
+    void Trace(const T&);
 };
 
 class Visitor : public VisitorHelper<Visitor> {
 public:
     template<typename T, void (T::*method)(Visitor*)>
-    void registerWeakMembers(const T* obj);
+    void RegisterWeakMembers(const T* obj);
 };
 
 class InlinedGlobalMarkingVisitor
@@ -266,19 +295,19 @@
     InlinedGlobalMarkingVisitor* operator->() { return this; }
 
     template<typename T, void (T::*method)(Visitor*)>
-    void registerWeakMembers(const T* obj);
+    void RegisterWeakMembers(const T* obj);
 };
 
 class GarbageCollectedMixin {
 public:
-    virtual void adjustAndMark(Visitor*) const = 0;
-    virtual bool isHeapObjectAlive(Visitor*) const = 0;
-    virtual void trace(Visitor*) { }
+    virtual void AdjustAndMark(Visitor*) const = 0;
+    virtual bool IsHeapObjectAlive(Visitor*) const = 0;
+    virtual void Trace(Visitor*) { }
 };
 
 template<typename T>
 struct TraceIfNeeded {
-    static void trace(Visitor*, T*);
+    static void Trace(Visitor*, T*);
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/ignore_class.cpp b/tools/clang/blink_gc_plugin/tests/ignore_class.cpp
index c539eb6..26b0ab1 100644
--- a/tools/clang/blink_gc_plugin/tests/ignore_class.cpp
+++ b/tools/clang/blink_gc_plugin/tests/ignore_class.cpp
@@ -6,14 +6,14 @@
 
 namespace blink {
 
-void B::trace(Visitor* visitor)
+void B::Trace(Visitor* visitor)
 {
     // Class is ignored so no checking here.
 }
 
-void C::trace(Visitor* visitor)
+void C::Trace(Visitor* visitor)
 {
-    // Missing trace of m_obj.
+    // Missing Trace of m_obj.
     // Ignored base class B does not need tracing.
 }
 
diff --git a/tools/clang/blink_gc_plugin/tests/ignore_class.h b/tools/clang/blink_gc_plugin/tests/ignore_class.h
index 580ed7c..8bda898 100644
--- a/tools/clang/blink_gc_plugin/tests/ignore_class.h
+++ b/tools/clang/blink_gc_plugin/tests/ignore_class.h
@@ -11,7 +11,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> { };
 
-// Don't require trace method on ignored class.
+// Don't require Trace method on ignored class.
 class GC_PLUGIN_IGNORE("http://crbug.com/12345") A;
 class A : public GarbageCollected<A> {
 private:
@@ -22,7 +22,7 @@
 class GC_PLUGIN_IGNORE("http://crbug.com/12345") B;
 class B : public GarbageCollected<B> {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 private:
     Member<HeapObject> m_obj;
 };
@@ -30,7 +30,7 @@
 // Don't require tracing of an ignored base class.
 class C : public B {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<HeapObject> m_obj;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/ignore_class.txt b/tools/clang/blink_gc_plugin/tests/ignore_class.txt
index d3d2d80..212f332 100644
--- a/tools/clang/blink_gc_plugin/tests/ignore_class.txt
+++ b/tools/clang/blink_gc_plugin/tests/ignore_class.txt
@@ -1,5 +1,5 @@
 ignore_class.cpp:14:1: warning: [blink-gc] Class 'C' has untraced fields that require tracing.
-void C::trace(Visitor* visitor)
+void C::Trace(Visitor* visitor)
 ^
 ./ignore_class.h:35:5: note: [blink-gc] Untraced field 'm_obj' declared here:
     Member<HeapObject> m_obj;
diff --git a/tools/clang/blink_gc_plugin/tests/ignore_fields.cpp b/tools/clang/blink_gc_plugin/tests/ignore_fields.cpp
index 118af75..80ffd4e 100644
--- a/tools/clang/blink_gc_plugin/tests/ignore_fields.cpp
+++ b/tools/clang/blink_gc_plugin/tests/ignore_fields.cpp
@@ -6,9 +6,9 @@
 
 namespace blink {
 
-void C::trace(Visitor* visitor)
+void C::Trace(Visitor* visitor)
 {
-    // Missing trace of m_one.
+    // Missing Trace of m_one.
     // Not missing ignored field m_two.
 }
 
diff --git a/tools/clang/blink_gc_plugin/tests/ignore_fields.h b/tools/clang/blink_gc_plugin/tests/ignore_fields.h
index e12bbab..b0f773f 100644
--- a/tools/clang/blink_gc_plugin/tests/ignore_fields.h
+++ b/tools/clang/blink_gc_plugin/tests/ignore_fields.h
@@ -11,7 +11,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    virtual void trace(Visitor*) { }
+    virtual void Trace(Visitor*) { }
 };
 
 // Don't warn about raw pointers to heap allocated objects.
@@ -21,7 +21,7 @@
     HeapObject* m_obj;
 };
 
-// Don't require trace method when (all) GC fields are ignored.
+// Don't require Trace method when (all) GC fields are ignored.
 class B : public GarbageCollected<B> {
 private:
     GC_PLUGIN_IGNORE("http://crbug.com/12345")
@@ -31,7 +31,7 @@
 // Don't require tracing an ignored field.
 class C : public GarbageCollected<C> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<HeapObject> m_one;
     GC_PLUGIN_IGNORE("http://crbug.com/12345")
diff --git a/tools/clang/blink_gc_plugin/tests/ignore_fields.txt b/tools/clang/blink_gc_plugin/tests/ignore_fields.txt
index b4de498..a1b2591 100644
--- a/tools/clang/blink_gc_plugin/tests/ignore_fields.txt
+++ b/tools/clang/blink_gc_plugin/tests/ignore_fields.txt
@@ -1,5 +1,5 @@
 ignore_fields.cpp:9:1: warning: [blink-gc] Class 'C' has untraced fields that require tracing.
-void C::trace(Visitor* visitor)
+void C::Trace(Visitor* visitor)
 ^
 ./ignore_fields.h:36:5: note: [blink-gc] Untraced field 'm_one' declared here:
     Member<HeapObject> m_one;
diff --git a/tools/clang/blink_gc_plugin/tests/inner_class.cpp b/tools/clang/blink_gc_plugin/tests/inner_class.cpp
index 03a53ea..cb74d8b 100644
--- a/tools/clang/blink_gc_plugin/tests/inner_class.cpp
+++ b/tools/clang/blink_gc_plugin/tests/inner_class.cpp
@@ -6,9 +6,9 @@
 
 namespace blink {
 
-void SomeObject::InnerObject::trace(Visitor* visitor)
+void SomeObject::InnerObject::Trace(Visitor* visitor)
 {
-    // Missing: visitor->trace(m_obj);
+    // Missing: visitor->Trace(m_obj);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/inner_class.h b/tools/clang/blink_gc_plugin/tests/inner_class.h
index 30f6ce3..5010a2c 100644
--- a/tools/clang/blink_gc_plugin/tests/inner_class.h
+++ b/tools/clang/blink_gc_plugin/tests/inner_class.h
@@ -13,7 +13,7 @@
 private:
     class InnerObject : public GarbageCollected<InnerObject> {
     public:
-        void trace(Visitor*);
+        void Trace(Visitor*);
     private:
         Member<InnerObject> m_obj;
     };
diff --git a/tools/clang/blink_gc_plugin/tests/inner_class.txt b/tools/clang/blink_gc_plugin/tests/inner_class.txt
index acdef6e..9e57527 100644
--- a/tools/clang/blink_gc_plugin/tests/inner_class.txt
+++ b/tools/clang/blink_gc_plugin/tests/inner_class.txt
@@ -1,5 +1,5 @@
 inner_class.cpp:9:1: warning: [blink-gc] Class 'InnerObject' has untraced fields that require tracing.
-void SomeObject::InnerObject::trace(Visitor* visitor)
+void SomeObject::InnerObject::Trace(Visitor* visitor)
 ^
 ./inner_class.h:18:9: note: [blink-gc] Untraced field 'm_obj' declared here:
         Member<InnerObject> m_obj;
diff --git a/tools/clang/blink_gc_plugin/tests/left_most_gc_base.h b/tools/clang/blink_gc_plugin/tests/left_most_gc_base.h
index 0d76d61..96d01d3 100644
--- a/tools/clang/blink_gc_plugin/tests/left_most_gc_base.h
+++ b/tools/clang/blink_gc_plugin/tests/left_most_gc_base.h
@@ -20,7 +20,7 @@
 
 class C : public GarbageCollected<C> {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 };
 
 class IllFormed : public A, public C { }; // Error
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/base_class_must_define_virtual_trace.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_class_must_define_virtual_trace.cpp
new file mode 100644
index 0000000..cd38ec9
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_class_must_define_virtual_trace.cpp
@@ -0,0 +1,19 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base_class_must_define_virtual_trace.h"
+
+namespace blink {
+
+void PartDerived::trace(Visitor* visitor)
+{
+}
+
+void HeapDerived::trace(Visitor* visitor)
+{
+    visitor->trace(m_part);
+}
+
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/base_class_must_define_virtual_trace.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_class_must_define_virtual_trace.h
new file mode 100644
index 0000000..fbd26d7
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_class_must_define_virtual_trace.h
@@ -0,0 +1,38 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CLASS_MUST_DEFINE_VIRTUAL_TRACE_H_
+#define BASE_CLASS_MUST_DEFINE_VIRTUAL_TRACE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class PartBase {
+    DISALLOW_NEW();
+    // Missing virtual trace.
+};
+
+class PartDerived : public PartBase {
+    DISALLOW_NEW();
+public:
+    virtual void trace(Visitor*);
+};
+
+class HeapBase : public GarbageCollected<HeapBase> {
+    // Missing virtual trace.
+};
+
+
+class HeapDerived : public HeapBase {
+public:
+    virtual void trace(Visitor*);
+private:
+    PartDerived m_part;
+};
+
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/base_class_must_define_virtual_trace.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_class_must_define_virtual_trace.txt
new file mode 100644
index 0000000..f8276eb
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_class_must_define_virtual_trace.txt
@@ -0,0 +1,8 @@
+In file included from base_class_must_define_virtual_trace.cpp:5:
+./base_class_must_define_virtual_trace.h:12:1: warning: [blink-gc] Left-most base class 'PartBase' of derived class 'PartDerived' must define a virtual trace method.
+class PartBase {
+^
+./base_class_must_define_virtual_trace.h:23:1: warning: [blink-gc] Left-most base class 'HeapBase' of derived class 'HeapDerived' must define a virtual trace method.
+class HeapBase : public GarbageCollected<HeapBase> {
+^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/base_requires_tracing.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_requires_tracing.cpp
new file mode 100644
index 0000000..87559a8
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_requires_tracing.cpp
@@ -0,0 +1,21 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base_requires_tracing.h"
+
+namespace blink {
+
+void A::trace(Visitor* visitor) { }
+
+void C::trace(Visitor* visitor) {
+  visitor->trace(m_a);
+  // Missing B::trace(visitor)
+}
+
+void D::trace(Visitor* visitor) {
+  visitor->trace(m_a);
+  C::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/base_requires_tracing.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_requires_tracing.h
new file mode 100644
index 0000000..0205a08
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_requires_tracing.h
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_REQUIRES_TRACING_H_
+#define BASE_REQUIRES_TRACING_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A : public GarbageCollected<A> {
+public:
+    virtual void trace(Visitor*);
+};
+
+class B : public A {
+    // Does not need trace
+};
+
+class C : public B {
+public:
+    void trace(Visitor*);
+private:
+    Member<A> m_a;
+};
+
+class D : public C {
+public:
+    void trace(Visitor*);
+private:
+    Member<A> m_a;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/base_requires_tracing.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_requires_tracing.txt
new file mode 100644
index 0000000..ee525b9
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/base_requires_tracing.txt
@@ -0,0 +1,4 @@
+base_requires_tracing.cpp:11:1: warning: [blink-gc] Base class 'B' of derived class 'C' requires tracing.
+void C::trace(Visitor* visitor) {
+^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.cpp
new file mode 100644
index 0000000..9c51eca
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.cpp
@@ -0,0 +1,22 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "class_does_not_require_finalization.h"
+
+namespace blink {
+
+void DoesNotNeedFinalizer::trace(Visitor* visitor)
+{
+}
+
+DoesNotNeedFinalizer2::~DoesNotNeedFinalizer2()
+{
+}
+
+void DoesNotNeedFinalizer2::trace(Visitor* visitor)
+{
+}
+
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.flags b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.flags
new file mode 100644
index 0000000..b0bf138
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.flags
@@ -0,0 +1 @@
+-Xclang -plugin-arg-blink-gc-plugin -Xclang warn-unneeded-finalizer
\ No newline at end of file
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.h
new file mode 100644
index 0000000..c6530f3
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.h
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CLASS_DOES_NOT_REQUIRE_FINALIZATION_BASE_H_
+#define CLASS_DOES_NOT_REQUIRE_FINALIZATION_BASE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class DoesNeedFinalizer : public GarbageCollectedFinalized<DoesNeedFinalizer> {
+public:
+    ~DoesNeedFinalizer() { ; }
+    void trace(Visitor*);
+};
+
+class DoesNotNeedFinalizer
+    : public GarbageCollectedFinalized<DoesNotNeedFinalizer> {
+public:
+    void trace(Visitor*);
+};
+
+class DoesNotNeedFinalizer2
+    : public GarbageCollectedFinalized<DoesNotNeedFinalizer2> {
+public:
+    ~DoesNotNeedFinalizer2();
+    void trace(Visitor*);
+};
+
+class HasEmptyDtor {
+public:
+    virtual ~HasEmptyDtor() { }
+};
+
+// If there are any virtual destructors involved, give up.
+
+class DoesNeedFinalizer2
+    : public GarbageCollectedFinalized<DoesNeedFinalizer2>,
+      public HasEmptyDtor {
+public:
+    void trace(Visitor*);
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.txt
new file mode 100644
index 0000000..91e264d
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_does_not_require_finalization.txt
@@ -0,0 +1,8 @@
+In file included from class_does_not_require_finalization.cpp:5:
+./class_does_not_require_finalization.h:18:1: warning: [blink-gc] Class 'DoesNotNeedFinalizer' may not require finalization.
+class DoesNotNeedFinalizer
+^
+./class_does_not_require_finalization.h:24:1: warning: [blink-gc] Class 'DoesNotNeedFinalizer2' may not require finalization.
+class DoesNotNeedFinalizer2
+^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_multiple_trace_bases.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_multiple_trace_bases.cpp
new file mode 100644
index 0000000..5bb87c9
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_multiple_trace_bases.cpp
@@ -0,0 +1,22 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "class_multiple_trace_bases.h"
+
+namespace blink {
+
+void Base::trace(Visitor* visitor) { }
+
+void Mixin1::trace(Visitor* visitor) { }
+
+void Mixin2::trace(Visitor* visitor) { }
+
+// Missing: void Derived1::trace(Visitor* visitor);
+
+void Derived2::trace(Visitor* visitor) {
+    Base::trace(visitor);
+    Mixin1::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_multiple_trace_bases.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_multiple_trace_bases.h
new file mode 100644
index 0000000..133f006
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_multiple_trace_bases.h
@@ -0,0 +1,39 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CLASS_MULTIPLE_TRACE_BASES_H_
+#define CLASS_MULTIPLE_TRACE_BASES_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class Base : public GarbageCollected<Base> {
+public:
+    virtual void trace(Visitor*);
+};
+
+class Mixin1 : public GarbageCollectedMixin {
+public:
+    void trace(Visitor*);
+};
+
+class Mixin2 : public GarbageCollectedMixin {
+public:
+    void trace(Visitor*);
+};
+
+class Derived1 : public Base, public Mixin1 {
+    USING_GARBAGE_COLLECTED_MIXIN(Derived1);
+    // Requires trace method.
+};
+
+class Derived2 : public Base, public Mixin1, public Mixin2 {
+    USING_GARBAGE_COLLECTED_MIXIN(Derived2);
+    void trace(Visitor*) override;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_multiple_trace_bases.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_multiple_trace_bases.txt
new file mode 100644
index 0000000..33ae5f5
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_multiple_trace_bases.txt
@@ -0,0 +1,14 @@
+In file included from class_multiple_trace_bases.cpp:5:
+./class_multiple_trace_bases.h:27:1: warning: [blink-gc] Class 'Derived1' requires a trace method.
+class Derived1 : public Base, public Mixin1 {
+^
+./class_multiple_trace_bases.h:27:18: note: [blink-gc] Untraced base class 'Base' declared here:
+class Derived1 : public Base, public Mixin1 {
+                 ^
+./class_multiple_trace_bases.h:27:31: note: [blink-gc] Untraced base class 'Mixin1' declared here:
+class Derived1 : public Base, public Mixin1 {
+                              ^
+class_multiple_trace_bases.cpp:17:1: warning: [blink-gc] Base class 'Mixin2' of derived class 'Derived2' requires tracing.
+void Derived2::trace(Visitor* visitor) {
+^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_overrides_new.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_overrides_new.cpp
new file mode 100644
index 0000000..9f47f82
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_overrides_new.cpp
@@ -0,0 +1,7 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "class_overrides_new.h"
+
+// Nothing to define.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_overrides_new.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_overrides_new.h
new file mode 100644
index 0000000..3e80e37
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_overrides_new.h
@@ -0,0 +1,20 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CLASS_OVERRIDES_NEW_H_
+#define CLASS_OVERRIDES_NEW_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> {
+    WTF_MAKE_FAST_ALLOCATED;
+public:
+    void trace(Visitor*) { }
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_overrides_new.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_overrides_new.txt
new file mode 100644
index 0000000..17f50fe
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_overrides_new.txt
@@ -0,0 +1,8 @@
+In file included from class_overrides_new.cpp:5:
+./class_overrides_new.h:13:5: warning: [blink-gc] Garbage collected class 'HeapObject' is not permitted to override its new operator.
+    WTF_MAKE_FAST_ALLOCATED;
+    ^
+./heap/stubs.h:14:5: note: expanded from macro 'WTF_MAKE_FAST_ALLOCATED'
+    void* operator new(size_t size);            \
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_base.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_base.cpp
new file mode 100644
index 0000000..8d47634
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_base.cpp
@@ -0,0 +1,19 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "class_requires_finalization_base.h"
+
+namespace blink {
+
+void NeedsFinalizer::trace(Visitor* visitor)
+{
+    A::trace(visitor);
+}
+
+void DoesNotNeedFinalizer::trace(Visitor* visitor)
+{
+    A::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_base.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_base.h
new file mode 100644
index 0000000..239c2cf
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_base.h
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CLASS_REQUIRES_FINALIZATION_BASE_H_
+#define CLASS_REQUIRES_FINALIZATION_BASE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A : public GarbageCollected<A> {
+public:
+    virtual void trace(Visitor*) {}
+};
+
+class B {
+public:
+    ~B() { /* user-declared, thus, non-trivial */ }
+};
+
+// Second base class needs finalization.
+class NeedsFinalizer : public A, public B {
+public:
+    void trace(Visitor*);
+};
+
+// Base does not need finalization.
+class DoesNotNeedFinalizer : public A {
+public:
+    void trace(Visitor*);
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_base.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_base.txt
new file mode 100644
index 0000000..935883d
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_base.txt
@@ -0,0 +1,8 @@
+In file included from class_requires_finalization_base.cpp:5:
+./class_requires_finalization_base.h:23:1: warning: [blink-gc] Class 'NeedsFinalizer' requires finalization.
+class NeedsFinalizer : public A, public B {
+^
+./class_requires_finalization_base.h:23:34: note: [blink-gc] Base class 'B' requiring finalization declared here:
+class NeedsFinalizer : public A, public B {
+                                 ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_field.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_field.cpp
new file mode 100644
index 0000000..eb23ab0
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_field.cpp
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "class_requires_finalization_field.h"
+
+namespace blink {
+
+void NeedsFinalizer::trace(Visitor* visitor)
+{
+    visitor->trace(m_as);
+    A::trace(visitor);
+}
+
+void AlsoNeedsFinalizer::trace(Visitor* visitor)
+{
+    visitor->trace(m_bs);
+    A::trace(visitor);
+}
+
+void DoesNotNeedFinalizer::trace(Visitor* visitor)
+{
+    visitor->trace(m_bs);
+    A::trace(visitor);
+}
+
+void AlsoDoesNotNeedFinalizer::trace(Visitor* visitor)
+{
+    visitor->trace(m_as);
+    visitor->trace(m_cs);
+    A::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_field.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_field.h
new file mode 100644
index 0000000..9596127
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_field.h
@@ -0,0 +1,80 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CLASS_REQUIRES_FINALIZATION_H_
+#define CLASS_REQUIRES_FINALIZATION_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A : public GarbageCollected<A> {
+public:
+    virtual void trace(Visitor*) { }
+};
+
+// Has a non-trivial dtor (user-declared).
+class B {
+public:
+    ~B() { }
+    void trace(Visitor*) { };
+};
+
+// Has a trivial dtor.
+class C {
+public:
+    void trace(Visitor*) { };
+};
+
+} // blink namespace
+
+namespace WTF {
+
+template<>
+struct VectorTraits<blink::C> {
+    static const bool needsDestruction = false;
+};
+
+} // WTF namespace
+
+namespace blink {
+
+// Off-heap vectors always need to be finalized.
+class NeedsFinalizer : public A {
+public:
+    void trace(Visitor*);
+private:
+    Vector<Member<A> > m_as;
+};
+
+// On-heap vectors with inlined objects that need destruction
+// need to be finalized.
+class AlsoNeedsFinalizer : public A {
+public:
+    void trace(Visitor*);
+private:
+    HeapVector<B, 10> m_bs;
+};
+
+// On-heap vectors with no inlined objects never need to be finalized.
+class DoesNotNeedFinalizer : public A {
+public:
+    void trace(Visitor*);
+private:
+    HeapVector<B> m_bs;
+};
+
+// On-heap vectors with inlined objects that don't need destruction
+// don't need to be finalized.
+class AlsoDoesNotNeedFinalizer : public A {
+public:
+    void trace(Visitor*);
+private:
+    HeapVector<Member<A>, 10> m_as;
+    HeapVector<C, 10> m_cs;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_field.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_field.txt
new file mode 100644
index 0000000..9e37c46
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_field.txt
@@ -0,0 +1,14 @@
+In file included from class_requires_finalization_field.cpp:5:
+./class_requires_finalization_field.h:44:1: warning: [blink-gc] Class 'NeedsFinalizer' requires finalization.
+class NeedsFinalizer : public A {
+^
+./class_requires_finalization_field.h:48:5: note: [blink-gc] Field 'm_as' requiring finalization declared here:
+    Vector<Member<A> > m_as;
+    ^
+./class_requires_finalization_field.h:53:1: warning: [blink-gc] Class 'AlsoNeedsFinalizer' requires finalization.
+class AlsoNeedsFinalizer : public A {
+^
+./class_requires_finalization_field.h:57:5: note: [blink-gc] Field 'm_bs' requiring finalization declared here:
+    HeapVector<B, 10> m_bs;
+    ^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_mixin.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_mixin.cpp
new file mode 100644
index 0000000..782810e
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_mixin.cpp
@@ -0,0 +1,37 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "class_requires_finalization_mixin.h"
+
+namespace blink {
+
+void MixinFinalizable::trace(Visitor* visitor)
+{
+    visitor->trace(m_onHeap);
+}
+
+void MixinNotFinalizable::trace(Visitor* visitor)
+{
+    visitor->trace(m_onHeap);
+}
+
+void NeedsFinalizer::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+    MixinFinalizable::trace(visitor);
+}
+
+void HasFinalizer::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+    MixinFinalizable::trace(visitor);
+}
+
+void NeedsNoFinalization::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+    MixinNotFinalizable::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_mixin.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_mixin.h
new file mode 100644
index 0000000..10befbd
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_mixin.h
@@ -0,0 +1,61 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CLASS_REQUIRES_FINALIZATION_MIXIN_H_
+#define CLASS_REQUIRES_FINALIZATION_MIXIN_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class OffHeap : public RefCounted<OffHeap> { };
+class OnHeap : public GarbageCollected<OnHeap> { };
+
+class MixinFinalizable : public GarbageCollectedMixin {
+public:
+    virtual void trace(Visitor*) override;
+private:
+    RefPtr<OffHeap> m_offHeap; // Requires finalization
+    Member<OnHeap> m_onHeap;
+};
+
+class MixinNotFinalizable : public GarbageCollectedMixin {
+public:
+    virtual void trace(Visitor*) override;
+private:
+    Member<OnHeap> m_onHeap;
+};
+
+class NeedsFinalizer
+    : public GarbageCollected<NeedsFinalizer>
+    , public MixinFinalizable {
+    USING_GARBAGE_COLLECTED_MIXIN(NeedsFinalizer);
+public:
+    virtual void trace(Visitor*) override;
+private:
+    Member<OnHeap> m_obj;
+};
+
+class HasFinalizer : public GarbageCollectedFinalized<HasFinalizer>,
+                     public MixinFinalizable {
+    USING_GARBAGE_COLLECTED_MIXIN(HasFinalizer);
+public:
+    virtual void trace(Visitor*) override;
+private:
+    Member<OnHeap> m_obj;
+};
+
+class NeedsNoFinalization
+    : public GarbageCollected<NeedsNoFinalization>
+    , public MixinNotFinalizable {
+    USING_GARBAGE_COLLECTED_MIXIN(NeedsNoFinalization);
+public:
+    virtual void trace(Visitor*) override;
+private:
+    Member<OnHeap> m_obj;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_mixin.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_mixin.txt
new file mode 100644
index 0000000..0bf93d5
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_finalization_mixin.txt
@@ -0,0 +1,8 @@
+In file included from class_requires_finalization_mixin.cpp:5:
+./class_requires_finalization_mixin.h:30:1: warning: [blink-gc] Class 'NeedsFinalizer' requires finalization.
+class NeedsFinalizer
+^
+./class_requires_finalization_mixin.h:32:7: note: [blink-gc] Base class 'MixinFinalizable' requiring finalization declared here:
+    , public MixinFinalizable {
+      ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method.cpp
new file mode 100644
index 0000000..f18fdf6
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method.cpp
@@ -0,0 +1,19 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "class_requires_trace_method.h"
+
+namespace blink {
+
+void Mixin2::trace(Visitor* visitor)
+{
+  Mixin::trace(visitor);
+}
+
+void Mixin3::trace(Visitor* visitor)
+{
+  Mixin::trace(visitor);
+}
+
+} // namespace blink
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method.h
new file mode 100644
index 0000000..4a442b7
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method.h
@@ -0,0 +1,59 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CLASS_REQUIRES_TRACE_METHOD_H_
+#define CLASS_REQUIRES_TRACE_METHOD_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject;
+
+class PartObject {
+    DISALLOW_NEW();
+private:
+    Member<HeapObject> m_obj;
+};
+
+class HeapObject : public GarbageCollected<HeapObject> {
+private:
+    PartObject m_part;
+};
+
+class Mixin : public GarbageCollectedMixin {
+public:
+  virtual void trace(Visitor*) override;
+  Member<Mixin> m_self;
+};
+
+class HeapObjectMixin : public GarbageCollected<HeapObjectMixin>, public Mixin {
+  USING_GARBAGE_COLLECTED_MIXIN(HeapObjectMixin);
+};
+
+class Mixin2 : public Mixin {
+public:
+  virtual void trace(Visitor*) override;
+};
+
+class HeapObjectMixin2
+    : public GarbageCollected<HeapObjectMixin2>, public Mixin2 {
+  USING_GARBAGE_COLLECTED_MIXIN(HeapObjectMixin2);
+};
+
+class Mixin3 : public Mixin {
+public:
+  virtual void trace(Visitor*) override;
+};
+
+class HeapObjectMixin3
+    : public GarbageCollected<HeapObjectMixin3>, public Mixin {
+  USING_GARBAGE_COLLECTED_MIXIN(HeapObjectMixin2);
+public:
+  virtual void trace(Visitor*) override;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method.txt
new file mode 100644
index 0000000..de6fd94
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method.txt
@@ -0,0 +1,14 @@
+In file included from class_requires_trace_method.cpp:5:
+./class_requires_trace_method.h:14:1: warning: [blink-gc] Class 'PartObject' requires a trace method.
+class PartObject {
+^
+./class_requires_trace_method.h:17:5: note: [blink-gc] Untraced field 'm_obj' declared here:
+    Member<HeapObject> m_obj;
+    ^
+./class_requires_trace_method.h:20:1: warning: [blink-gc] Class 'HeapObject' requires a trace method.
+class HeapObject : public GarbageCollected<HeapObject> {
+^
+./class_requires_trace_method.h:22:5: note: [blink-gc] Untraced field 'm_part' declared here:
+    PartObject m_part;
+    ^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method_tmpl.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method_tmpl.cpp
new file mode 100644
index 0000000..7051fb2
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method_tmpl.cpp
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "class_requires_trace_method_tmpl.h"
+
+namespace blink {
+
+// Does not need a trace method.
+class NoTrace : public TemplatedObject<PartObjectA> { };
+
+// Needs a trace method.
+class NeedsTrace : public TemplatedObject<PartObjectB> { };
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method_tmpl.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method_tmpl.h
new file mode 100644
index 0000000..70cab61
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method_tmpl.h
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CLASS_REQUIRES_TRACE_METHOD_TMPL_H_
+#define CLASS_REQUIRES_TRACE_METHOD_TMPL_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> { };
+
+class PartObjectA {
+    DISALLOW_NEW();
+};
+
+class PartObjectB {
+    DISALLOW_NEW();
+public:
+    void trace(Visitor* visitor) { visitor->trace(m_obj); }
+private:
+    Member<HeapObject> m_obj;
+};
+
+template<typename T>
+class TemplatedObject {
+private:
+    T m_part;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method_tmpl.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method_tmpl.txt
new file mode 100644
index 0000000..49705b9
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/class_requires_trace_method_tmpl.txt
@@ -0,0 +1,8 @@
+In file included from class_requires_trace_method_tmpl.cpp:5:
+./class_requires_trace_method_tmpl.h:27:1: warning: [blink-gc] Class 'TemplatedObject<blink::PartObjectB>' requires a trace method.
+class TemplatedObject {
+^
+./class_requires_trace_method_tmpl.h:29:5: note: [blink-gc] Untraced field 'm_part' declared here:
+    T m_part;
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/crash_on_invalid.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/crash_on_invalid.cpp
new file mode 100644
index 0000000..6370812
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/crash_on_invalid.cpp
@@ -0,0 +1,7 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "crash_on_invalid.h"
+
+// Nothing to define.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/crash_on_invalid.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/crash_on_invalid.h
new file mode 100644
index 0000000..a77d097
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/crash_on_invalid.h
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Regression test for http://crbug.com/421958
+
+#ifndef CRASH_ON_INVALID_H_
+#define CRASH_ON_INVALID_H_
+
+namespace blink {
+
+class Visitor;
+class GamepadCommon {};
+class ScriptWrappable {};
+
+class Gamepad final : public GarbageCollectedFinalized<Gamepad>,
+                      public GamepadCommon,
+                      public ScriptWrappable {
+public:
+    virtual const WrapperTypeInfo *wrapperTypeInfo() const {}
+    void trace(Visitor *);
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/crash_on_invalid.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/crash_on_invalid.txt
new file mode 100644
index 0000000..cf19ff5
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/crash_on_invalid.txt
@@ -0,0 +1,8 @@
+In file included from crash_on_invalid.cpp:5:
+./crash_on_invalid.h:16:30: error: unknown template name 'GarbageCollectedFinalized'
+class Gamepad final : public GarbageCollectedFinalized<Gamepad>,
+                             ^
+./crash_on_invalid.h:20:19: error: unknown type name 'WrapperTypeInfo'
+    virtual const WrapperTypeInfo *wrapperTypeInfo() const {}
+                  ^
+2 errors generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.cpp
new file mode 100644
index 0000000..f3b3989
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.cpp
@@ -0,0 +1,17 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cycle_ptrs.h"
+
+namespace blink {
+
+void A::trace(Visitor* visitor) {
+    visitor->trace(m_b);
+}
+
+void B::trace(Visitor* visitor) {
+    visitor->trace(m_a);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.flags b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.flags
new file mode 100644
index 0000000..a55c2f0
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.flags
@@ -0,0 +1 @@
+-Xclang -plugin-arg-blink-gc-plugin -Xclang dump-graph
\ No newline at end of file
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.h
new file mode 100644
index 0000000..8c07a06
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.h
@@ -0,0 +1,54 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CYCLE_PTRS_H_
+#define CYCLE_PTRS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class B;
+class C;
+class D;
+class E;
+
+// This contains a leaking cycle:
+// E -per-> A -mem-> B -ref-> C -own-> D -own-vec-> E
+
+// The traced cycle from A -> B -> A does not leak.
+
+class A : public GarbageCollected<A> {
+public:
+    virtual void trace(Visitor*);
+private:
+    Member<B> m_b;
+};
+
+class B : public GarbageCollectedFinalized<B> {
+public:
+    virtual void trace(Visitor*);
+private:
+    Member<A> m_a;
+    RefPtr<C> m_c;
+};
+
+class C : public RefCounted<C> {
+private:
+    OwnPtr<D> m_d;
+};
+
+class D {
+private:
+    Vector<OwnPtr<E> > m_es;
+};
+
+class E {
+private:
+    Persistent<A> m_a;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.txt
new file mode 100644
index 0000000..4d242a6
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_ptrs.txt
@@ -0,0 +1,8 @@
+
+Found a potentially leaking cycle starting from a GC root:
+./cycle_ptrs.h:49:5: blink::E (m_a) => blink::A
+./cycle_ptrs.h:26:5: blink::A (m_b) => blink::B
+./cycle_ptrs.h:34:5: blink::B (m_c) => blink::C
+./cycle_ptrs.h:39:5: blink::C (m_d) => blink::D
+./cycle_ptrs.h:44:5: blink::D (m_es) => blink::E
+
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.cpp
new file mode 100644
index 0000000..dfe835a
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.cpp
@@ -0,0 +1,14 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cycle_sub.h"
+
+namespace blink {
+
+void B::trace(Visitor* visitor) {
+    visitor->trace(m_c);
+    A::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.flags b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.flags
new file mode 100644
index 0000000..a55c2f0
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.flags
@@ -0,0 +1 @@
+-Xclang -plugin-arg-blink-gc-plugin -Xclang dump-graph
\ No newline at end of file
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.h
new file mode 100644
index 0000000..a007061
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.h
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CYCLE_SUB_H_
+#define CYCLE_SUB_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class C;
+
+// This contains a leaking cycle:
+// C -per-> A -sub-> B -ref-> C
+
+class A : public GarbageCollectedFinalized<A> {
+public:
+    virtual void trace(Visitor*) {}
+};
+
+class B : public A {
+public:
+    virtual void trace(Visitor*);
+private:
+    RefPtr<C> m_c;
+};
+
+class C : public RefCounted<C> {
+private:
+    Persistent<A> m_a;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.txt
new file mode 100644
index 0000000..b37907d
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_sub.txt
@@ -0,0 +1,6 @@
+
+Found a potentially leaking cycle starting from a GC root:
+./cycle_sub.h:31:5:  blink::C (m_a) => blink::A
+./cycle_sub.h:22:11: blink::A (<subclass>) => blink::B
+./cycle_sub.h:26:5:  blink::B (m_c) => blink::C
+
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.cpp
new file mode 100644
index 0000000..d9ecd79
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.cpp
@@ -0,0 +1,21 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cycle_super.h"
+
+namespace blink {
+
+void A::trace(Visitor* visitor) {
+    visitor->trace(m_d);
+}
+
+void B::trace(Visitor* visitor) {
+    A::trace(visitor);
+}
+
+void C::trace(Visitor* visitor) {
+    B::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.flags b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.flags
new file mode 100644
index 0000000..a55c2f0
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.flags
@@ -0,0 +1 @@
+-Xclang -plugin-arg-blink-gc-plugin -Xclang dump-graph
\ No newline at end of file
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.h
new file mode 100644
index 0000000..13b05c1
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.h
@@ -0,0 +1,41 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CYCLE_SUPER_H_
+#define CYCLE_SUPER_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class D;
+
+// This contains a leaking cycle:
+// D -per-> C -sup-> B -sup-> A -ref-> D
+
+class A : public GarbageCollectedFinalized<A> {
+public:
+    virtual void trace(Visitor*);
+private:
+    RefPtr<D> m_d;
+};
+
+class B : public A {
+public:
+    virtual void trace(Visitor*);
+};
+
+class C : public B {
+public:
+    virtual void trace(Visitor*);
+};
+
+class D : public RefCounted<C> {
+private:
+    Persistent<C> m_c;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.txt
new file mode 100644
index 0000000..89b3675
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super.txt
@@ -0,0 +1,5 @@
+
+Found a potentially leaking cycle starting from a GC root:
+./cycle_super.h:36:5: blink::D (m_c) => blink::C
+./cycle_super.h:21:5: blink::C (blink::B <: blink::A <: m_d) => blink::D
+
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.cpp
new file mode 100644
index 0000000..33dec59
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.cpp
@@ -0,0 +1,18 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "cycle_super_neg.h"
+
+namespace blink {
+
+void B::trace(Visitor* visitor) {
+    A::trace(visitor);
+}
+
+void D::trace(Visitor* visitor) {
+    visitor->trace(m_c);
+    A::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.flags b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.flags
new file mode 100644
index 0000000..a55c2f0
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.flags
@@ -0,0 +1 @@
+-Xclang -plugin-arg-blink-gc-plugin -Xclang dump-graph
\ No newline at end of file
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.h
new file mode 100644
index 0000000..6f99eff
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.h
@@ -0,0 +1,44 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CYCLE_SUPER_NEG_H_
+#define CYCLE_SUPER_NEG_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class C;
+
+// The chain:
+//   C -per-> B -sup-> A -sub-> D -ref-> C
+// is not a leaking cycle, because the super-class relationship
+// should not transitively imply sub-class relationships.
+// I.e. B -/-> D
+
+class A : public GarbageCollectedFinalized<A> {
+public:
+    virtual void trace(Visitor*) {}
+};
+
+class B : public A {
+public:
+    virtual void trace(Visitor*);
+};
+
+class C : public RefCounted<C> {
+private:
+    Persistent<B> m_b;
+};
+
+class D : public A {
+public:
+    virtual void trace(Visitor*);
+private:
+    RefPtr<C> m_c;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/cycle_super_neg.txt
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/delayed_parsing.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/delayed_parsing.cpp
new file mode 100644
index 0000000..149d95e
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/delayed_parsing.cpp
@@ -0,0 +1,25 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+struct HeapObject : public GarbageCollected<HeapObject> {
+    void trace(Visitor*) { }
+};
+
+template<typename T>
+class TemplateBase
+    : public GarbageCollected<TemplateBase<T> > {
+public:
+    void trace(Visitor* visitor) { visitor->trace(m_obj); }
+private:
+    Member<HeapObject> m_obj;
+};
+
+class Subclass : public TemplateBase<Subclass> {
+};
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/delayed_parsing.flags b/tools/clang/blink_gc_plugin/tests/legacy_naming/delayed_parsing.flags
new file mode 100644
index 0000000..94af50f
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/delayed_parsing.flags
@@ -0,0 +1 @@
+-fdelayed-template-parsing
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/delayed_parsing.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/delayed_parsing.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/delayed_parsing.txt
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_access_finalized_field.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_access_finalized_field.cpp
new file mode 100644
index 0000000..b6bbfd2
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_access_finalized_field.cpp
@@ -0,0 +1,35 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "destructor_access_finalized_field.h"
+
+namespace blink {
+
+HeapObject::~HeapObject()
+{
+    // Valid access to fields.
+    if (m_ref->foo() && !m_obj) {
+        m_objs.size();
+        m_part.obj();
+    }
+
+    // Invalid access to fields.
+    bar(m_obj);
+    m_obj->foo();
+    m_objs[0];
+}
+
+void HeapObject::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+    visitor->trace(m_objs);
+    visitor->trace(m_part);
+}
+
+void PartOther::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_access_finalized_field.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_access_finalized_field.h
new file mode 100644
index 0000000..4c72156
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_access_finalized_field.h
@@ -0,0 +1,45 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DESTRUCTOR_ACCESS_FINALIZED_FIELD_H_
+#define DESTRUCTOR_ACCESS_FINALIZED_FIELD_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class Other : public RefCounted<Other> {
+public:
+    bool foo() { return true; }
+};
+
+class HeapObject;
+
+class PartOther {
+    ALLOW_ONLY_INLINE_ALLOCATION();
+public:
+    void trace(Visitor*);
+
+    HeapObject* obj() { return m_obj; }
+
+private:
+    Member<HeapObject> m_obj;
+};
+
+class HeapObject : public GarbageCollectedFinalized<HeapObject> {
+public:
+    ~HeapObject();
+    void trace(Visitor*);
+    bool foo() { return true; }
+    void bar(HeapObject*) { }
+private:
+    RefPtr<Other> m_ref;
+    Member<HeapObject> m_obj;
+    Vector<Member<HeapObject> > m_objs;
+    PartOther m_part;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_access_finalized_field.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_access_finalized_field.txt
new file mode 100644
index 0000000..0470b51
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_access_finalized_field.txt
@@ -0,0 +1,19 @@
+destructor_access_finalized_field.cpp:18:9: warning: [blink-gc] Finalizer '~HeapObject' accesses potentially finalized field 'm_obj'.
+    bar(m_obj);
+        ^
+./destructor_access_finalized_field.h:38:5: note: [blink-gc] Potentially finalized field 'm_obj' declared here:
+    Member<HeapObject> m_obj;
+    ^
+destructor_access_finalized_field.cpp:19:5: warning: [blink-gc] Finalizer '~HeapObject' accesses potentially finalized field 'm_obj'.
+    m_obj->foo();
+    ^
+./destructor_access_finalized_field.h:38:5: note: [blink-gc] Potentially finalized field 'm_obj' declared here:
+    Member<HeapObject> m_obj;
+    ^
+destructor_access_finalized_field.cpp:20:5: warning: [blink-gc] Finalizer '~HeapObject' accesses potentially finalized field 'm_objs'.
+    m_objs[0];
+    ^
+./destructor_access_finalized_field.h:39:5: note: [blink-gc] Potentially finalized field 'm_objs' declared here:
+    Vector<Member<HeapObject> > m_objs;
+    ^
+3 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_eagerly_finalized.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_eagerly_finalized.cpp
new file mode 100644
index 0000000..07409cc
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_eagerly_finalized.cpp
@@ -0,0 +1,37 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "destructor_eagerly_finalized.h"
+
+namespace blink {
+
+HeapObjectEagerFinalized::~HeapObjectEagerFinalized()
+{
+    // Valid access to a non-eagerly finalized field
+    m_obj->foo();
+}
+
+void HeapObjectEagerFinalized::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+}
+
+HeapObjectEagerFinalizedAlso::~HeapObjectEagerFinalizedAlso()
+{
+    // Valid access to a non-eagerly finalized field
+    m_heapObject->foo();
+
+    // Non-valid accesses to eagerly finalized fields.
+    m_heapObjectFinalized->foo();
+    m_heapVector[0]->foo();
+}
+
+void HeapObjectEagerFinalizedAlso::trace(Visitor* visitor)
+{
+    visitor->trace(m_heapObject);
+    visitor->trace(m_heapObjectFinalized);
+    visitor->trace(m_heapVector);
+}
+
+} // namespace blink
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_eagerly_finalized.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_eagerly_finalized.h
new file mode 100644
index 0000000..77a29de
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_eagerly_finalized.h
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DESTRUCTOR_EAGERLY_FINALIZED_H_
+#define DESTRUCTOR_EAGERLY_FINALIZED_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*) { }
+    void foo() { }
+};
+
+class HeapObjectEagerFinalized
+    : public GarbageCollectedFinalized<HeapObjectEagerFinalized> {
+public:
+    EAGERLY_FINALIZED();
+    ~HeapObjectEagerFinalized();
+    void trace(Visitor*);
+
+    void foo() { }
+
+private:
+    Member<HeapObject> m_obj;
+};
+
+// Accessing other eagerly finalized objects during finalization is not allowed.
+class HeapObjectEagerFinalizedAlso
+    : public GarbageCollectedFinalized<HeapObjectEagerFinalizedAlso> {
+public:
+    EAGERLY_FINALIZED();
+    ~HeapObjectEagerFinalizedAlso();
+    void trace(Visitor*);
+
+private:
+    Member<HeapObject> m_heapObject;
+    Member<HeapObjectEagerFinalized> m_heapObjectFinalized;
+    HeapVector<Member<HeapObjectEagerFinalized>> m_heapVector;
+};
+
+} // namespace blink
+
+#endif // DESTRUCTOR_EAGERLY_FINALIZED_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_eagerly_finalized.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_eagerly_finalized.txt
new file mode 100644
index 0000000..97d5089
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_eagerly_finalized.txt
@@ -0,0 +1,13 @@
+destructor_eagerly_finalized.cpp:26:5: warning: [blink-gc] Finalizer '~HeapObjectEagerFinalizedAlso' accesses eagerly finalized field 'm_heapObjectFinalized'.
+    m_heapObjectFinalized->foo();
+    ^
+./destructor_eagerly_finalized.h:41:5: note: [blink-gc] Field 'm_heapObjectFinalized' having eagerly finalized value, declared here:
+    Member<HeapObjectEagerFinalized> m_heapObjectFinalized;
+    ^
+destructor_eagerly_finalized.cpp:27:5: warning: [blink-gc] Finalizer '~HeapObjectEagerFinalizedAlso' accesses eagerly finalized field 'm_heapVector'.
+    m_heapVector[0]->foo();
+    ^
+./destructor_eagerly_finalized.h:42:5: note: [blink-gc] Field 'm_heapVector' having eagerly finalized value, declared here:
+    HeapVector<Member<HeapObjectEagerFinalized>> m_heapVector;
+    ^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_in_nonfinalized_class.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_in_nonfinalized_class.cpp
new file mode 100644
index 0000000..8efc41d
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_in_nonfinalized_class.cpp
@@ -0,0 +1,20 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "destructor_in_nonfinalized_class.h"
+
+namespace blink {
+
+HeapObject::~HeapObject()
+{
+    // Do something when destructed...
+    (void)this;
+}
+
+void HeapObject::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_in_nonfinalized_class.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_in_nonfinalized_class.h
new file mode 100644
index 0000000..f3fa506
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_in_nonfinalized_class.h
@@ -0,0 +1,22 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef DESTRUCTOR_IN_NONFINALIZED_CLASS_H_
+#define DESTRUCTOR_IN_NONFINALIZED_CLASS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    ~HeapObject();
+    void trace(Visitor*);
+private:
+    Member<HeapObject> m_obj;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_in_nonfinalized_class.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_in_nonfinalized_class.txt
new file mode 100644
index 0000000..cf19ea1
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/destructor_in_nonfinalized_class.txt
@@ -0,0 +1,8 @@
+In file included from destructor_in_nonfinalized_class.cpp:5:
+./destructor_in_nonfinalized_class.h:12:1: warning: [blink-gc] Class 'HeapObject' requires finalization.
+class HeapObject : public GarbageCollected<HeapObject> {
+^
+destructor_in_nonfinalized_class.cpp:9:1: note: [blink-gc] User-declared destructor declared here:
+HeapObject::~HeapObject()
+^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_illegal_tracing.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_illegal_tracing.cpp
new file mode 100644
index 0000000..b831077
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_illegal_tracing.cpp
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "fields_illegal_tracing.h"
+
+namespace blink {
+
+void PartObject::trace(Visitor* visitor) {
+    visitor->trace(m_obj1);
+    visitor->trace(m_obj2);
+    visitor->trace(m_obj3);
+    visitor->trace(m_obj4);
+}
+
+void HeapObject::trace(Visitor* visitor) {
+    visitor->trace(m_obj1);
+    visitor->trace(m_obj2);
+    visitor->trace(m_obj3);
+    visitor->trace(m_obj4);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_illegal_tracing.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_illegal_tracing.h
new file mode 100644
index 0000000..f4d91dd
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_illegal_tracing.h
@@ -0,0 +1,63 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef FIELDS_ILLEGAL_TRACING_H_
+#define FIELDS_ILLEGAL_TRACING_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+namespace bar {
+
+// check that (only) std::unique_ptr<> is reported
+// as an illegal smart pointer type.
+template<typename T> class unique_ptr {
+public:
+    ~unique_ptr() { }
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+
+    void trace(Visitor* visitor)
+    {
+    }
+};
+
+}
+
+class HeapObject;
+class PartObject;
+
+class PartObject {
+    DISALLOW_NEW();
+public:
+    void trace(Visitor*);
+private:
+    OwnPtr<HeapObject> m_obj1;
+    RefPtr<HeapObject> m_obj2;
+    bar::unique_ptr<HeapObject> m_obj3;
+    std::unique_ptr<HeapObject> m_obj4;
+    Vector<int>::iterator m_iterator1;
+    HeapVector<Member<HeapObject>>::iterator m_iterator2;
+    HeapHashSet<PartObject>::const_iterator m_iterator3;
+};
+
+class HeapObject : public GarbageCollectedFinalized<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    PartObject m_part;
+    OwnPtr<HeapObject> m_obj1;
+    RefPtr<HeapObject> m_obj2;
+    bar::unique_ptr<HeapObject> m_obj3;
+    std::unique_ptr<HeapObject> m_obj4;
+    HeapHashMap<int, Member<HeapObject>>::reverse_iterator m_iterator3;
+    HeapDeque<Member<HeapObject>>::const_reverse_iterator m_iterator4;
+    HeapListHashSet<Member<HeapObject>>::const_iterator m_iterator5;
+    HeapLinkedHashSet<Member<HeapObject>>::const_iterator m_iterator6;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_illegal_tracing.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_illegal_tracing.txt
new file mode 100644
index 0000000..61dc6a2
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_illegal_tracing.txt
@@ -0,0 +1,68 @@
+In file included from fields_illegal_tracing.cpp:5:
+./fields_illegal_tracing.h:32:1: warning: [blink-gc] Class 'PartObject' contains invalid fields.
+class PartObject {
+^
+./fields_illegal_tracing.h:37:5: note: [blink-gc] OwnPtr field 'm_obj1' to a GC managed class declared here:
+    OwnPtr<HeapObject> m_obj1;
+    ^
+./fields_illegal_tracing.h:38:5: note: [blink-gc] RefPtr field 'm_obj2' to a GC managed class declared here:
+    RefPtr<HeapObject> m_obj2;
+    ^
+./fields_illegal_tracing.h:40:5: note: [blink-gc] std::unique_ptr field 'm_obj4' to a GC managed class declared here:
+    std::unique_ptr<HeapObject> m_obj4;
+    ^
+./fields_illegal_tracing.h:42:5: note: [blink-gc] Iterator field 'm_iterator2' to a GC managed collection declared here:
+    HeapVector<Member<HeapObject>>::iterator m_iterator2;
+    ^
+./fields_illegal_tracing.h:43:5: note: [blink-gc] Iterator field 'm_iterator3' to a GC managed collection declared here:
+    HeapHashSet<PartObject>::const_iterator m_iterator3;
+    ^
+./fields_illegal_tracing.h:46:1: warning: [blink-gc] Class 'HeapObject' contains invalid fields.
+class HeapObject : public GarbageCollectedFinalized<HeapObject> {
+^
+./fields_illegal_tracing.h:51:5: note: [blink-gc] OwnPtr field 'm_obj1' to a GC managed class declared here:
+    OwnPtr<HeapObject> m_obj1;
+    ^
+./fields_illegal_tracing.h:52:5: note: [blink-gc] RefPtr field 'm_obj2' to a GC managed class declared here:
+    RefPtr<HeapObject> m_obj2;
+    ^
+./fields_illegal_tracing.h:54:5: note: [blink-gc] std::unique_ptr field 'm_obj4' to a GC managed class declared here:
+    std::unique_ptr<HeapObject> m_obj4;
+    ^
+./fields_illegal_tracing.h:55:5: note: [blink-gc] Iterator field 'm_iterator3' to a GC managed collection declared here:
+    HeapHashMap<int, Member<HeapObject>>::reverse_iterator m_iterator3;
+    ^
+./fields_illegal_tracing.h:56:5: note: [blink-gc] Iterator field 'm_iterator4' to a GC managed collection declared here:
+    HeapDeque<Member<HeapObject>>::const_reverse_iterator m_iterator4;
+    ^
+./fields_illegal_tracing.h:58:5: note: [blink-gc] Iterator field 'm_iterator6' to a GC managed collection declared here:
+    HeapLinkedHashSet<Member<HeapObject>>::const_iterator m_iterator6;
+    ^
+fields_illegal_tracing.cpp:9:1: warning: [blink-gc] Class 'PartObject' has untraced or not traceable fields.
+void PartObject::trace(Visitor* visitor) {
+^
+./fields_illegal_tracing.h:37:5: note: [blink-gc] Untraceable field 'm_obj1' declared here:
+    OwnPtr<HeapObject> m_obj1;
+    ^
+./fields_illegal_tracing.h:38:5: note: [blink-gc] Untraceable field 'm_obj2' declared here:
+    RefPtr<HeapObject> m_obj2;
+    ^
+./fields_illegal_tracing.h:40:5: note: [blink-gc] Untraceable field 'm_obj4' declared here:
+    std::unique_ptr<HeapObject> m_obj4;
+    ^
+fields_illegal_tracing.cpp:16:1: warning: [blink-gc] Class 'HeapObject' has untraced or not traceable fields.
+void HeapObject::trace(Visitor* visitor) {
+^
+./fields_illegal_tracing.h:51:5: note: [blink-gc] Untraceable field 'm_obj1' declared here:
+    OwnPtr<HeapObject> m_obj1;
+    ^
+./fields_illegal_tracing.h:52:5: note: [blink-gc] Untraceable field 'm_obj2' declared here:
+    RefPtr<HeapObject> m_obj2;
+    ^
+./fields_illegal_tracing.h:54:5: note: [blink-gc] Untraceable field 'm_obj4' declared here:
+    std::unique_ptr<HeapObject> m_obj4;
+    ^
+./fields_illegal_tracing.h:57:5: note: [blink-gc] Untraced field 'm_iterator5' declared here:
+    HeapListHashSet<Member<HeapObject>>::const_iterator m_iterator5;
+    ^
+4 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_require_tracing.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_require_tracing.cpp
new file mode 100644
index 0000000..880ce1e
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_require_tracing.cpp
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "fields_require_tracing.h"
+
+namespace blink {
+
+void PartObject::trace(Visitor* visitor) {
+    m_obj1->trace(visitor); // Don't allow direct tracing.
+    visitor->trace(m_obj2);
+    // Missing visitor->trace(m_obj3);
+    visitor->trace(m_parts);
+}
+
+void PartBObject::trace(Visitor* visitor) {
+  // Missing visitor->trace(m_set);
+  visitor->trace(m_vector);
+}
+
+void HeapObject::trace(Visitor* visitor) {
+    // Missing visitor->trace(m_part);
+    visitor->trace(m_obj);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_require_tracing.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_require_tracing.h
new file mode 100644
index 0000000..1819411
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_require_tracing.h
@@ -0,0 +1,46 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef FIELDS_REQUIRE_TRACING_H_
+#define FIELDS_REQUIRE_TRACING_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject;
+class PartObject;
+
+class PartBObject {
+    DISALLOW_NEW();
+public:
+    void trace(Visitor*);
+private:
+    HeapHashSet<PartBObject> m_set;
+    HeapVector<PartBObject> m_vector;
+};
+
+class PartObject {
+    DISALLOW_NEW();
+public:
+    void trace(Visitor*);
+private:
+    Member<HeapObject> m_obj1;
+    Member<HeapObject> m_obj2;
+    Member<HeapObject> m_obj3;
+
+    HeapVector<PartBObject> m_parts;
+};
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    PartObject m_part;
+    Member<HeapObject> m_obj;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_require_tracing.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_require_tracing.txt
new file mode 100644
index 0000000..39d49f3
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/fields_require_tracing.txt
@@ -0,0 +1,22 @@
+fields_require_tracing.cpp:9:1: warning: [blink-gc] Class 'PartObject' has untraced fields that require tracing.
+void PartObject::trace(Visitor* visitor) {
+^
+./fields_require_tracing.h:29:5: note: [blink-gc] Untraced field 'm_obj1' declared here:
+    Member<HeapObject> m_obj1;
+    ^
+./fields_require_tracing.h:31:5: note: [blink-gc] Untraced field 'm_obj3' declared here:
+    Member<HeapObject> m_obj3;
+    ^
+fields_require_tracing.cpp:16:1: warning: [blink-gc] Class 'PartBObject' has untraced fields that require tracing.
+void PartBObject::trace(Visitor* visitor) {
+^
+./fields_require_tracing.h:20:5: note: [blink-gc] Untraced field 'm_set' declared here:
+    HeapHashSet<PartBObject> m_set;
+    ^
+fields_require_tracing.cpp:21:1: warning: [blink-gc] Class 'HeapObject' has untraced fields that require tracing.
+void HeapObject::trace(Visitor* visitor) {
+^
+./fields_require_tracing.h:40:5: note: [blink-gc] Untraced field 'm_part' declared here:
+    PartObject m_part;
+    ^
+3 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/finalize_after_dispatch.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/finalize_after_dispatch.cpp
new file mode 100644
index 0000000..91244d1
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/finalize_after_dispatch.cpp
@@ -0,0 +1,63 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "finalize_after_dispatch.h"
+
+namespace blink {
+
+static B* toB(A* a) { return static_cast<B*>(a); }
+
+void A::trace(Visitor* visitor)
+{
+    switch (m_type) {
+    case TB:
+        toB(this)->traceAfterDispatch(visitor);
+        break;
+    case TC:
+        static_cast<C*>(this)->traceAfterDispatch(visitor);
+        break;
+    case TD:
+        static_cast<D*>(this)->traceAfterDispatch(visitor);
+        break;
+    }
+}
+
+void A::traceAfterDispatch(Visitor* visitor)
+{
+}
+
+void A::finalizeGarbageCollectedObject()
+{
+    switch (m_type) {
+    case TB:
+        toB(this)->~B();
+        break;
+    case TC:
+        static_cast<C*>(this)->~C();
+        break;
+    case TD:
+        // Missing static_cast<D*>(this)->~D();
+        break;
+    }
+}
+
+void B::traceAfterDispatch(Visitor* visitor)
+{
+    visitor->trace(m_a);
+    A::traceAfterDispatch(visitor);
+}
+
+void C::traceAfterDispatch(Visitor* visitor)
+{
+    visitor->trace(m_a);
+    A::traceAfterDispatch(visitor);
+}
+
+void D::traceAfterDispatch(Visitor* visitor)
+{
+    visitor->trace(m_a);
+    Abstract::traceAfterDispatch(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/finalize_after_dispatch.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/finalize_after_dispatch.h
new file mode 100644
index 0000000..acd16ec
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/finalize_after_dispatch.h
@@ -0,0 +1,78 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef FINALIZE_AFTER_DISPATCH_H_
+#define FINALIZE_AFTER_DISPATCH_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class NeedsFinalize : public GarbageCollectedFinalized<NeedsFinalize> {
+public:
+    void trace(Visitor*);
+    void traceAfterDispatch(Visitor*);
+    // Needs a finalizeGarbageCollectedObject method.
+};
+
+class NeedsDispatch : public GarbageCollectedFinalized<NeedsDispatch> {
+public:
+    void trace(Visitor*);
+    // Needs a traceAfterDispatch method.
+    void finalizeGarbageCollectedObject() { };
+};
+
+class NeedsFinalizedBase : public GarbageCollected<NeedsFinalizedBase> {
+public:
+    void trace(Visitor*) { };
+    void traceAfterDispatch(Visitor*) { };
+    void finalizeGarbageCollectedObject() { };
+};
+
+class A : GarbageCollectedFinalized<A> {
+public:
+    void trace(Visitor*);
+    void traceAfterDispatch(Visitor*);
+    void finalizeGarbageCollectedObject();
+protected:
+    enum Type { TB, TC, TD };
+    A(Type type) : m_type(type) { }
+private:
+    Type m_type;
+};
+
+class B : public A {
+public:
+    B() : A(TB) { }
+    ~B() { }
+    void traceAfterDispatch(Visitor*);
+private:
+    Member<A> m_a;
+};
+
+class C : public A {
+public:
+    C() : A(TC) { }
+    void traceAfterDispatch(Visitor*);
+private:
+    Member<A> m_a;
+};
+
+// This class is considered abstract does not need to be dispatched to.
+class Abstract : public A {
+protected:
+    Abstract(Type type) : A(type) { }
+};
+
+class D : public Abstract {
+public:
+    D() : Abstract(TD) { }
+    void traceAfterDispatch(Visitor*);
+private:
+    Member<A> m_a;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/finalize_after_dispatch.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/finalize_after_dispatch.txt
new file mode 100644
index 0000000..8a652a4
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/finalize_after_dispatch.txt
@@ -0,0 +1,17 @@
+In file included from finalize_after_dispatch.cpp:5:
+./finalize_after_dispatch.h:12:1: warning: [blink-gc] Class 'NeedsFinalize' is missing manual finalize dispatch.
+class NeedsFinalize : public GarbageCollectedFinalized<NeedsFinalize> {
+^
+./finalize_after_dispatch.h:19:1: warning: [blink-gc] Class 'NeedsDispatch' is missing manual trace dispatch.
+class NeedsDispatch : public GarbageCollectedFinalized<NeedsDispatch> {
+^
+./finalize_after_dispatch.h:26:1: warning: [blink-gc] Class 'NeedsFinalizedBase' requires finalization.
+class NeedsFinalizedBase : public GarbageCollected<NeedsFinalizedBase> {
+^
+./finalize_after_dispatch.h:30:5: note: [blink-gc] User-declared finalizer declared here:
+    void finalizeGarbageCollectedObject() { };
+    ^
+finalize_after_dispatch.cpp:30:1: warning: [blink-gc] Missing dispatch to class 'D' in manual finalize dispatch.
+void A::finalizeGarbageCollectedObject()
+^
+4 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/garbage_collected_mixin.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/garbage_collected_mixin.cpp
new file mode 100644
index 0000000..e8f42f2
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/garbage_collected_mixin.cpp
@@ -0,0 +1,20 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "garbage_collected_mixin.h"
+
+namespace blink {
+
+void Mixin::trace(Visitor* visitor)
+{
+    // Missing: visitor->trace(m_self);
+}
+
+void HeapObject::trace(Visitor* visitor)
+{
+    visitor->trace(m_mix);
+    // Missing: Mixin::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/garbage_collected_mixin.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/garbage_collected_mixin.h
new file mode 100644
index 0000000..3c6f868
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/garbage_collected_mixin.h
@@ -0,0 +1,29 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GARBAGE_COLLECTED_MIXIN_H_
+#define GARBAGE_COLLECTED_MIXIN_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class Mixin : public GarbageCollectedMixin {
+public:
+    virtual void trace(Visitor*) override;
+private:
+    Member<Mixin> m_self;
+};
+
+class HeapObject : public GarbageCollected<HeapObject>, public Mixin {
+    USING_GARBAGE_COLLECTED_MIXIN(HeapObject);
+public:
+    virtual void trace(Visitor*) override;
+private:
+    Member<Mixin> m_mix;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/garbage_collected_mixin.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/garbage_collected_mixin.txt
new file mode 100644
index 0000000..4051a6a
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/garbage_collected_mixin.txt
@@ -0,0 +1,10 @@
+garbage_collected_mixin.cpp:9:1: warning: [blink-gc] Class 'Mixin' has untraced fields that require tracing.
+void Mixin::trace(Visitor* visitor)
+^
+./garbage_collected_mixin.h:16:5: note: [blink-gc] Untraced field 'm_self' declared here:
+    Member<Mixin> m_self;
+    ^
+garbage_collected_mixin.cpp:14:1: warning: [blink-gc] Base class 'Mixin' of derived class 'HeapObject' requires tracing.
+void HeapObject::trace(Visitor* visitor)
+^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/heap/stubs.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/heap/stubs.h
new file mode 100644
index 0000000..f8fde06
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/heap/stubs.h
@@ -0,0 +1,324 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef HEAP_STUBS_H_
+#define HEAP_STUBS_H_
+
+#include "stddef.h"
+
+#define WTF_MAKE_FAST_ALLOCATED                 \
+    public:                                     \
+    void* operator new(size_t, void* p);        \
+    void* operator new[](size_t, void* p);      \
+    void* operator new(size_t size);            \
+    private:                                    \
+    typedef int __thisIsHereToForceASemicolonAfterThisMacro
+
+namespace WTF {
+
+template<typename T> class RefCounted { };
+
+template<typename T> class RawPtr {
+public:
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+};
+
+template<typename T> class RefPtr {
+public:
+    ~RefPtr() { }
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+};
+
+template<typename T> class OwnPtr {
+public:
+    ~OwnPtr() { }
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+};
+
+class DefaultAllocator {
+public:
+    static const bool isGarbageCollected = false;
+};
+
+template<typename T>
+struct VectorTraits {
+    static const bool needsDestruction = true;
+};
+
+template<size_t inlineCapacity, bool isGarbageCollected, bool tNeedsDestruction>
+class VectorDestructorBase {
+public:
+    ~VectorDestructorBase() {}
+};
+
+template<size_t inlineCapacity>
+class VectorDestructorBase<inlineCapacity, true, false> {};
+
+template<>
+class VectorDestructorBase<0, true, true> {};
+
+template<
+    typename T,
+    size_t inlineCapacity = 0,
+    typename Allocator = DefaultAllocator>
+class Vector : public VectorDestructorBase<inlineCapacity,
+                                           Allocator::isGarbageCollected,
+                                           VectorTraits<T>::needsDestruction> {
+ public:
+  using iterator = T*;
+  using const_iterator = const T*;
+  using reverse_iterator = T*;
+  using const_reverse_iterator = const T*;
+
+  size_t size();
+  T& operator[](size_t);
+};
+
+template <typename T,
+          size_t inlineCapacity = 0,
+          typename Allocator = DefaultAllocator>
+class Deque {
+ public:
+  using iterator = T*;
+  using const_iterator = const T*;
+  using reverse_iterator = T*;
+  using const_reverse_iterator = const T*;
+};
+
+template <typename ValueArg,
+          typename HashArg = void,
+          typename TraitsArg = void,
+          typename Allocator = DefaultAllocator>
+class HashSet {
+ public:
+  typedef ValueArg* iterator;
+  typedef const ValueArg* const_iterator;
+  typedef ValueArg* reverse_iterator;
+  typedef const ValueArg* const_reverse_iterator;
+};
+
+template <typename ValueArg,
+          typename HashArg = void,
+          typename TraitsArg = void,
+          typename Allocator = DefaultAllocator>
+class ListHashSet {
+ public:
+  typedef ValueArg* iterator;
+  typedef const ValueArg* const_iterator;
+  typedef ValueArg* reverse_iterator;
+  typedef const ValueArg* const_reverse_iterator;
+};
+
+template <typename ValueArg,
+          typename HashArg = void,
+          typename TraitsArg = void,
+          typename Allocator = DefaultAllocator>
+class LinkedHashSet {
+ public:
+  typedef ValueArg* iterator;
+  typedef const ValueArg* const_iterator;
+  typedef ValueArg* reverse_iterator;
+  typedef const ValueArg* const_reverse_iterator;
+};
+
+template<
+    typename ValueArg,
+    typename HashArg = void,
+    typename TraitsArg = void,
+    typename Allocator = DefaultAllocator>
+class HashCountedSet {};
+
+template <typename KeyArg,
+          typename MappedArg,
+          typename HashArg = void,
+          typename KeyTraitsArg = void,
+          typename MappedTraitsArg = void,
+          typename Allocator = DefaultAllocator>
+class HashMap {
+ public:
+  typedef MappedArg* iterator;
+  typedef const MappedArg* const_iterator;
+  typedef MappedArg* reverse_iterator;
+  typedef const MappedArg* const_reverse_iterator;
+};
+}
+
+// Empty namespace declaration to exercise internal
+// handling of namespace equality.
+namespace std {
+  /* empty */
+}
+
+namespace std {
+
+template<typename T> class unique_ptr {
+public:
+    ~unique_ptr() { }
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+};
+
+}
+
+namespace blink {
+
+using namespace WTF;
+
+#define DISALLOW_NEW()                   \
+    private:                                    \
+    void* operator new(size_t) = delete;        \
+    void* operator new(size_t, void*) = delete;
+
+#define STACK_ALLOCATED()                                   \
+    private:                                                \
+    __attribute__((annotate("blink_stack_allocated")))      \
+    void* operator new(size_t) = delete;                    \
+    void* operator new(size_t, void*) = delete;
+
+#define ALLOW_ONLY_INLINE_ALLOCATION()    \
+    public:                               \
+    void* operator new(size_t, void*);    \
+    private:                              \
+    void* operator new(size_t) = delete;
+
+#define GC_PLUGIN_IGNORE(bug)                           \
+    __attribute__((annotate("blink_gc_plugin_ignore")))
+
+#define USING_GARBAGE_COLLECTED_MIXIN(type)                     \
+public:                                                         \
+    virtual void adjustAndMark(Visitor*) const override { }     \
+    virtual bool isHeapObjectAlive(Visitor*) const override { return 0; }
+
+#define EAGERLY_FINALIZED() typedef int IsEagerlyFinalizedMarker
+
+template<typename T> class GarbageCollected { };
+
+template<typename T>
+class GarbageCollectedFinalized : public GarbageCollected<T> { };
+
+template<typename T>
+class RefCountedGarbageCollected : public GarbageCollectedFinalized<T> { };
+
+template<typename T> class Member {
+public:
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+    bool operator!() const { return false; }
+};
+
+template<typename T> class WeakMember {
+public:
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+    bool operator!() const { return false; }
+};
+
+template<typename T> class Persistent {
+public:
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+    bool operator!() const { return false; }
+};
+
+template<typename T> class WeakPersistent {
+public:
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+    bool operator!() const { return false; }
+};
+
+template<typename T> class CrossThreadPersistent {
+public:
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+    bool operator!() const { return false; }
+};
+
+template<typename T> class CrossThreadWeakPersistent {
+public:
+    operator T*() const { return 0; }
+    T* operator->() { return 0; }
+    bool operator!() const { return false; }
+};
+
+class HeapAllocator {
+public:
+    static const bool isGarbageCollected = true;
+};
+
+template<typename T, size_t inlineCapacity = 0>
+class HeapVector : public Vector<T, inlineCapacity, HeapAllocator> { };
+
+template<typename T, size_t inlineCapacity = 0>
+class HeapDeque : public Vector<T, inlineCapacity, HeapAllocator> { };
+
+template<typename T>
+class HeapHashSet : public HashSet<T, void, void, HeapAllocator> { };
+
+template<typename T>
+class HeapListHashSet : public ListHashSet<T, void, void, HeapAllocator> { };
+
+template<typename T>
+class HeapLinkedHashSet : public LinkedHashSet<T, void, void, HeapAllocator> {
+};
+
+template<typename T>
+class HeapHashCountedSet : public HashCountedSet<T, void, void, HeapAllocator> {
+};
+
+template<typename K, typename V>
+class HeapHashMap : public HashMap<K, V, void, void, void, HeapAllocator> { };
+
+template<typename T>
+class PersistentHeapVector : public Vector<T, 0, HeapAllocator> { };
+
+template <typename Derived>
+class VisitorHelper {
+public:
+    template<typename T>
+    void trace(const T&);
+};
+
+class Visitor : public VisitorHelper<Visitor> {
+public:
+    template<typename T, void (T::*method)(Visitor*)>
+    void registerWeakMembers(const T* obj);
+};
+
+class InlinedGlobalMarkingVisitor
+    : public VisitorHelper<InlinedGlobalMarkingVisitor> {
+public:
+    InlinedGlobalMarkingVisitor* operator->() { return this; }
+
+    template<typename T, void (T::*method)(Visitor*)>
+    void registerWeakMembers(const T* obj);
+};
+
+class GarbageCollectedMixin {
+public:
+    virtual void adjustAndMark(Visitor*) const = 0;
+    virtual bool isHeapObjectAlive(Visitor*) const = 0;
+    virtual void trace(Visitor*) { }
+};
+
+template<typename T>
+struct TraceIfNeeded {
+    static void trace(Visitor*, T*);
+};
+
+}
+
+namespace WTF {
+
+template<typename T>
+struct VectorTraits<blink::Member<T> > {
+    static const bool needsDestruction = false;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_class.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_class.cpp
new file mode 100644
index 0000000..c539eb6
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_class.cpp
@@ -0,0 +1,20 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ignore_class.h"
+
+namespace blink {
+
+void B::trace(Visitor* visitor)
+{
+    // Class is ignored so no checking here.
+}
+
+void C::trace(Visitor* visitor)
+{
+    // Missing trace of m_obj.
+    // Ignored base class B does not need tracing.
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_class.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_class.h
new file mode 100644
index 0000000..580ed7c
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_class.h
@@ -0,0 +1,40 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef IGNORE_CLASS_H_
+#define IGNORE_CLASS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> { };
+
+// Don't require trace method on ignored class.
+class GC_PLUGIN_IGNORE("http://crbug.com/12345") A;
+class A : public GarbageCollected<A> {
+private:
+    Member<HeapObject> m_obj;
+};
+
+// Don't require tracing of fields on ignored class.
+class GC_PLUGIN_IGNORE("http://crbug.com/12345") B;
+class B : public GarbageCollected<B> {
+public:
+    virtual void trace(Visitor*);
+private:
+    Member<HeapObject> m_obj;
+};
+
+// Don't require tracing of an ignored base class.
+class C : public B {
+public:
+    void trace(Visitor*);
+private:
+    Member<HeapObject> m_obj;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_class.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_class.txt
new file mode 100644
index 0000000..d3d2d80
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_class.txt
@@ -0,0 +1,7 @@
+ignore_class.cpp:14:1: warning: [blink-gc] Class 'C' has untraced fields that require tracing.
+void C::trace(Visitor* visitor)
+^
+./ignore_class.h:35:5: note: [blink-gc] Untraced field 'm_obj' declared here:
+    Member<HeapObject> m_obj;
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_fields.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_fields.cpp
new file mode 100644
index 0000000..118af75
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_fields.cpp
@@ -0,0 +1,15 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ignore_fields.h"
+
+namespace blink {
+
+void C::trace(Visitor* visitor)
+{
+    // Missing trace of m_one.
+    // Not missing ignored field m_two.
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_fields.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_fields.h
new file mode 100644
index 0000000..e12bbab
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_fields.h
@@ -0,0 +1,43 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef IGNORE_FIELDS_H_
+#define IGNORE_FIELDS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    virtual void trace(Visitor*) { }
+};
+
+// Don't warn about raw pointers to heap allocated objects.
+class A : public GarbageCollected<A>{
+private:
+    GC_PLUGIN_IGNORE("http://crbug.com/12345")
+    HeapObject* m_obj;
+};
+
+// Don't require trace method when (all) GC fields are ignored.
+class B : public GarbageCollected<B> {
+private:
+    GC_PLUGIN_IGNORE("http://crbug.com/12345")
+    Member<HeapObject> m_one;
+};
+
+// Don't require tracing an ignored field.
+class C : public GarbageCollected<C> {
+public:
+    void trace(Visitor*);
+private:
+    Member<HeapObject> m_one;
+    GC_PLUGIN_IGNORE("http://crbug.com/12345")
+    Member<HeapObject> m_two;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_fields.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_fields.txt
new file mode 100644
index 0000000..b4de498
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/ignore_fields.txt
@@ -0,0 +1,7 @@
+ignore_fields.cpp:9:1: warning: [blink-gc] Class 'C' has untraced fields that require tracing.
+void C::trace(Visitor* visitor)
+^
+./ignore_fields.h:36:5: note: [blink-gc] Untraced field 'm_one' declared here:
+    Member<HeapObject> m_one;
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/inner_class.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/inner_class.cpp
new file mode 100644
index 0000000..03a53ea
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/inner_class.cpp
@@ -0,0 +1,14 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "inner_class.h"
+
+namespace blink {
+
+void SomeObject::InnerObject::trace(Visitor* visitor)
+{
+    // Missing: visitor->trace(m_obj);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/inner_class.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/inner_class.h
new file mode 100644
index 0000000..30f6ce3
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/inner_class.h
@@ -0,0 +1,24 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INNER_CLASS_H_
+#define INNER_CLASS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class SomeObject {
+private:
+    class InnerObject : public GarbageCollected<InnerObject> {
+    public:
+        void trace(Visitor*);
+    private:
+        Member<InnerObject> m_obj;
+    };
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/inner_class.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/inner_class.txt
new file mode 100644
index 0000000..acdef6e
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/inner_class.txt
@@ -0,0 +1,7 @@
+inner_class.cpp:9:1: warning: [blink-gc] Class 'InnerObject' has untraced fields that require tracing.
+void SomeObject::InnerObject::trace(Visitor* visitor)
+^
+./inner_class.h:18:9: note: [blink-gc] Untraced field 'm_obj' declared here:
+        Member<InnerObject> m_obj;
+        ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/left_most_gc_base.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/left_most_gc_base.cpp
new file mode 100644
index 0000000..041d9f0
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/left_most_gc_base.cpp
@@ -0,0 +1,7 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "left_most_gc_base.h"
+
+// Nothing to define.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/left_most_gc_base.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/left_most_gc_base.h
new file mode 100644
index 0000000..0d76d61
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/left_most_gc_base.h
@@ -0,0 +1,30 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef LEFT_MOST_GC_BASE_H_
+#define LEFT_MOST_GC_BASE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A { };
+class B { };
+
+class Right : public A, public B, public GarbageCollected<Right> { };  // Error
+class Left : public GarbageCollected<Left>, public B, public A { };
+
+class DerivedRight : public Right, public Left { };  // Error
+class DerivedLeft : public Left, public Right { };
+
+class C : public GarbageCollected<C> {
+public:
+    virtual void trace(Visitor*);
+};
+
+class IllFormed : public A, public C { }; // Error
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/left_most_gc_base.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/left_most_gc_base.txt
new file mode 100644
index 0000000..e2d0418
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/left_most_gc_base.txt
@@ -0,0 +1,14 @@
+In file included from left_most_gc_base.cpp:5:
+./left_most_gc_base.h:15:1: warning: [blink-gc] Class 'Right' must derive its GC base in the left-most position.
+class Right : public A, public B, public GarbageCollected<Right> { };  // Error
+^
+./left_most_gc_base.h:18:1: warning: [blink-gc] Class 'DerivedRight' must derive its GC base in the left-most position.
+class DerivedRight : public Right, public Left { };  // Error
+^
+./left_most_gc_base.h:12:1: warning: [blink-gc] Left-most base class 'A' of derived class 'IllFormed' must be polymorphic.
+class A { };
+^
+./left_most_gc_base.h:26:1: warning: [blink-gc] Class 'IllFormed' must derive its GC base in the left-most position.
+class IllFormed : public A, public C { }; // Error
+^
+4 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/member_in_offheap_class.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/member_in_offheap_class.cpp
new file mode 100644
index 0000000..4b44c2d
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/member_in_offheap_class.cpp
@@ -0,0 +1,24 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "member_in_offheap_class.h"
+
+namespace blink {
+
+void OffHeapObject::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+}
+
+void PartObject::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+}
+
+void InlineObject::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/member_in_offheap_class.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/member_in_offheap_class.h
new file mode 100644
index 0000000..2a7c868
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/member_in_offheap_class.h
@@ -0,0 +1,48 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef MEMBER_IN_OFFHEAP_CLASS_H_
+#define MEMBER_IN_OFFHEAP_CLASS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> { };
+
+class OffHeapObject {
+public:
+    void trace(Visitor*);
+private:
+    Member<HeapObject> m_obj; // Must not contain Member.
+    Persistent<HeapVector<Member<HeapObject> > > m_objs; // OK
+};
+
+class StackObject {
+    STACK_ALLOCATED();
+private:
+    Member<HeapObject> m_obj; // OK
+    Member<OffHeapObject> m_memberOff; // NOT OK
+    HeapVector<Member<OffHeapObject>> m_heapVectorMemberOff; // NOT OK
+};
+
+class PartObject {
+    DISALLOW_NEW();
+public:
+    void trace(Visitor*);
+private:
+    Member<HeapObject> m_obj; // OK
+};
+
+class InlineObject {
+    ALLOW_ONLY_INLINE_ALLOCATION();
+public:
+    void trace(Visitor*);
+private:
+    Member<HeapObject> m_obj; // OK
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/member_in_offheap_class.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/member_in_offheap_class.txt
new file mode 100644
index 0000000..9d5f238
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/member_in_offheap_class.txt
@@ -0,0 +1,17 @@
+In file included from member_in_offheap_class.cpp:5:
+./member_in_offheap_class.h:14:1: warning: [blink-gc] Class 'OffHeapObject' contains invalid fields.
+class OffHeapObject {
+^
+./member_in_offheap_class.h:18:5: note: [blink-gc] Member field 'm_obj' in unmanaged class declared here:
+    Member<HeapObject> m_obj; // Must not contain Member.
+    ^
+./member_in_offheap_class.h:22:1: warning: [blink-gc] Class 'StackObject' contains invalid fields.
+class StackObject {
+^
+./member_in_offheap_class.h:26:5: note: [blink-gc] Member field 'm_memberOff' to non-GC managed class declared here:
+    Member<OffHeapObject> m_memberOff; // NOT OK
+    ^
+./member_in_offheap_class.h:27:5: note: [blink-gc] Member field 'm_heapVectorMemberOff' to non-GC managed class declared here:
+    HeapVector<Member<OffHeapObject>> m_heapVectorMemberOff; // NOT OK
+    ^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/non_virtual_trace.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/non_virtual_trace.cpp
new file mode 100644
index 0000000..9f57711
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/non_virtual_trace.cpp
@@ -0,0 +1,23 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "non_virtual_trace.h"
+
+namespace blink {
+
+void A::trace(Visitor* visitor)
+{
+}
+
+void C::trace(Visitor* visitor)
+{
+    B::trace(visitor);
+}
+
+void D::trace(Visitor* visitor)
+{
+    B::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/non_virtual_trace.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/non_virtual_trace.h
new file mode 100644
index 0000000..4179d49
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/non_virtual_trace.h
@@ -0,0 +1,32 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef NON_VIRTUAL_TRACE_H_
+#define NON_VIRTUAL_TRACE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A : public GarbageCollected<A> {
+public:
+    void trace(Visitor*);
+};
+
+class B : public A {
+};
+
+class C : public B {
+public:
+    void trace(Visitor*); // Cannot override a non-virtual trace.
+};
+
+class D : public B {
+public:
+    virtual void trace(Visitor*); // Cannot override a non-virtual trace.
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/non_virtual_trace.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/non_virtual_trace.txt
new file mode 100644
index 0000000..a05a94d
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/non_virtual_trace.txt
@@ -0,0 +1,17 @@
+In file included from non_virtual_trace.cpp:5:
+./non_virtual_trace.h:12:1: warning: [blink-gc] Left-most base class 'A' of derived class 'D' must define a virtual trace method.
+class A : public GarbageCollected<A> {
+^
+non_virtual_trace.cpp:13:1: warning: [blink-gc] Class 'C' overrides non-virtual trace of base class 'A'.
+void C::trace(Visitor* visitor)
+^
+./non_virtual_trace.h:14:5: note: [blink-gc] Non-virtual trace method declared here:
+    void trace(Visitor*);
+    ^
+non_virtual_trace.cpp:18:1: warning: [blink-gc] Class 'D' overrides non-virtual trace of base class 'A'.
+void D::trace(Visitor* visitor)
+^
+./non_virtual_trace.h:14:5: note: [blink-gc] Non-virtual trace method declared here:
+    void trace(Visitor*);
+    ^
+3 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/own_ptr_to_gc_managed_class.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/own_ptr_to_gc_managed_class.cpp
new file mode 100644
index 0000000..9e27c3d
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/own_ptr_to_gc_managed_class.cpp
@@ -0,0 +1,11 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "own_ptr_to_gc_managed_class.h"
+
+namespace blink {
+
+void HeapObject::trace(Visitor* visitor) { }
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/own_ptr_to_gc_managed_class.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/own_ptr_to_gc_managed_class.h
new file mode 100644
index 0000000..6f47baf
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/own_ptr_to_gc_managed_class.h
@@ -0,0 +1,30 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef OWN_PTR_TO_GC_MANAGED_CLASS_H_
+#define OWN_PTR_TO_GC_MANAGED_CLASS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject;
+
+class PartObject {
+    DISALLOW_NEW();
+private:
+    OwnPtr<HeapObject> m_obj;
+};
+
+class HeapObject : public GarbageCollectedFinalized<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    Vector<OwnPtr<HeapObject> > m_objs;
+    OwnPtr<HeapVector<Member<HeapObject> > > m_objs2;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/own_ptr_to_gc_managed_class.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/own_ptr_to_gc_managed_class.txt
new file mode 100644
index 0000000..4102e86
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/own_ptr_to_gc_managed_class.txt
@@ -0,0 +1,17 @@
+In file included from own_ptr_to_gc_managed_class.cpp:5:
+./own_ptr_to_gc_managed_class.h:14:1: warning: [blink-gc] Class 'PartObject' contains invalid fields.
+class PartObject {
+^
+./own_ptr_to_gc_managed_class.h:17:5: note: [blink-gc] OwnPtr field 'm_obj' to a GC managed class declared here:
+    OwnPtr<HeapObject> m_obj;
+    ^
+./own_ptr_to_gc_managed_class.h:20:1: warning: [blink-gc] Class 'HeapObject' contains invalid fields.
+class HeapObject : public GarbageCollectedFinalized<HeapObject> {
+^
+./own_ptr_to_gc_managed_class.h:24:5: note: [blink-gc] OwnPtr field 'm_objs' to a GC managed class declared here:
+    Vector<OwnPtr<HeapObject> > m_objs;
+    ^
+./own_ptr_to_gc_managed_class.h:25:5: note: [blink-gc] OwnPtr field 'm_objs2' to a GC managed class declared here:
+    OwnPtr<HeapVector<Member<HeapObject> > > m_objs2;
+    ^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/part_object_to_gc_derived_class.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/part_object_to_gc_derived_class.cpp
new file mode 100644
index 0000000..2da8661
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/part_object_to_gc_derived_class.cpp
@@ -0,0 +1,14 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "part_object_to_gc_derived_class.h"
+
+namespace blink {
+
+void B::trace(Visitor* visitor)
+{
+    visitor->trace(m_a);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/part_object_to_gc_derived_class.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/part_object_to_gc_derived_class.h
new file mode 100644
index 0000000..ef5a649
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/part_object_to_gc_derived_class.h
@@ -0,0 +1,23 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef PART_OBJECT_TO_GC_DERIVED_CLASS_H_
+#define PART_OBJECT_TO_GC_DERIVED_CLASS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A : public GarbageCollected<A> { };
+
+class B : public GarbageCollected<B> {
+public:
+    void trace(Visitor*);
+private:
+    A m_a;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/part_object_to_gc_derived_class.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/part_object_to_gc_derived_class.txt
new file mode 100644
index 0000000..5970132
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/part_object_to_gc_derived_class.txt
@@ -0,0 +1,8 @@
+In file included from part_object_to_gc_derived_class.cpp:5:
+./part_object_to_gc_derived_class.h:14:1: warning: [blink-gc] Class 'B' contains invalid fields.
+class B : public GarbageCollected<B> {
+^
+./part_object_to_gc_derived_class.h:18:5: note: [blink-gc] Part-object field 'm_a' to a GC derived class declared here:
+    A m_a;
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_field_in_gc_managed_class.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_field_in_gc_managed_class.cpp
new file mode 100644
index 0000000..7b3f286
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_field_in_gc_managed_class.cpp
@@ -0,0 +1,13 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "persistent_field_in_gc_managed_class.h"
+
+namespace blink {
+
+void HeapObject::trace(Visitor* visitor) {
+    visitor->trace(m_parts);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_field_in_gc_managed_class.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_field_in_gc_managed_class.h
new file mode 100644
index 0000000..a90f63c
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_field_in_gc_managed_class.h
@@ -0,0 +1,32 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef PERSISTENT_FIELD_IN_GC_MANAGED_CLASS_H_
+#define PERSISTENT_FIELD_IN_GC_MANAGED_CLASS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject;
+
+class PartObject {
+    DISALLOW_NEW();
+private:
+    Persistent<HeapObject> m_obj;
+};
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    PartObject m_part;
+    HeapVector<PartObject> m_parts;
+    PersistentHeapVector<Member<HeapObject> > m_objs;
+    WeakPersistent<HeapObject> m_weakPersistent;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_field_in_gc_managed_class.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_field_in_gc_managed_class.txt
new file mode 100644
index 0000000..dd5bc74
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_field_in_gc_managed_class.txt
@@ -0,0 +1,32 @@
+In file included from persistent_field_in_gc_managed_class.cpp:5:
+./persistent_field_in_gc_managed_class.h:20:1: warning: [blink-gc] Class 'HeapObject' contains GC root in field 'm_part'.
+class HeapObject : public GarbageCollected<HeapObject> {
+^
+./persistent_field_in_gc_managed_class.h:24:5: note: [blink-gc] Field 'm_part' with embedded GC root in 'HeapObject' declared here:
+    PartObject m_part;
+    ^
+./persistent_field_in_gc_managed_class.h:17:5: note: [blink-gc] Field 'm_obj' defining a GC root declared here:
+    Persistent<HeapObject> m_obj;
+    ^
+./persistent_field_in_gc_managed_class.h:20:1: warning: [blink-gc] Class 'HeapObject' contains GC root in field 'm_parts'.
+class HeapObject : public GarbageCollected<HeapObject> {
+^
+./persistent_field_in_gc_managed_class.h:25:5: note: [blink-gc] Field 'm_parts' with embedded GC root in 'HeapObject' declared here:
+    HeapVector<PartObject> m_parts;
+    ^
+./persistent_field_in_gc_managed_class.h:17:5: note: [blink-gc] Field 'm_obj' defining a GC root declared here:
+    Persistent<HeapObject> m_obj;
+    ^
+./persistent_field_in_gc_managed_class.h:20:1: warning: [blink-gc] Class 'HeapObject' contains GC root in field 'm_objs'.
+class HeapObject : public GarbageCollected<HeapObject> {
+^
+./persistent_field_in_gc_managed_class.h:26:5: note: [blink-gc] Field 'm_objs' defining a GC root declared here:
+    PersistentHeapVector<Member<HeapObject> > m_objs;
+    ^
+./persistent_field_in_gc_managed_class.h:20:1: warning: [blink-gc] Class 'HeapObject' contains GC root in field 'm_weakPersistent'.
+class HeapObject : public GarbageCollected<HeapObject> {
+^
+./persistent_field_in_gc_managed_class.h:27:5: note: [blink-gc] Field 'm_weakPersistent' defining a GC root declared here:
+    WeakPersistent<HeapObject> m_weakPersistent;
+    ^
+4 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_no_trace.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_no_trace.cpp
new file mode 100644
index 0000000..637b46f
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_no_trace.cpp
@@ -0,0 +1,14 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "persistent_no_trace.h"
+
+namespace blink {
+
+void HeapObject::trace(Visitor* visitor) {
+    visitor->trace(m_crossThreadPersistent);
+    visitor->trace(m_crossThreadWeakPersistent);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_no_trace.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_no_trace.h
new file mode 100644
index 0000000..c8beb99
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_no_trace.h
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef PERSISTENT_NO_TRACE_H_
+#define PERSISTENT_NO_TRACE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    CrossThreadPersistent<HeapObject> m_crossThreadPersistent;
+    CrossThreadWeakPersistent<HeapObject> m_crossThreadWeakPersistent;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_no_trace.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_no_trace.txt
new file mode 100644
index 0000000..dcfe76d
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/persistent_no_trace.txt
@@ -0,0 +1,10 @@
+persistent_no_trace.cpp:9:1: warning: [blink-gc] Class 'HeapObject' has untraced or not traceable fields.
+void HeapObject::trace(Visitor* visitor) {
+^
+./persistent_no_trace.h:16:5: note: [blink-gc] Untraceable field 'm_crossThreadPersistent' declared here:
+    CrossThreadPersistent<HeapObject> m_crossThreadPersistent;
+    ^
+./persistent_no_trace.h:17:5: note: [blink-gc] Untraceable field 'm_crossThreadWeakPersistent' declared here:
+    CrossThreadWeakPersistent<HeapObject> m_crossThreadWeakPersistent;
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/polymorphic_class_with_non_virtual_trace.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/polymorphic_class_with_non_virtual_trace.cpp
new file mode 100644
index 0000000..dc7620a
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/polymorphic_class_with_non_virtual_trace.cpp
@@ -0,0 +1,19 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "polymorphic_class_with_non_virtual_trace.h"
+
+namespace blink {
+
+void IsLeftMostPolymorphic::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+}
+
+void IsNotLeftMostPolymorphic::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/polymorphic_class_with_non_virtual_trace.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/polymorphic_class_with_non_virtual_trace.h
new file mode 100644
index 0000000..f5d999e
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/polymorphic_class_with_non_virtual_trace.h
@@ -0,0 +1,61 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef POLYMORPHIC_CLASS_WITH_NON_VIRTUAL_TRACE_H_
+#define POLYMORPHIC_CLASS_WITH_NON_VIRTUAL_TRACE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*) { }
+};
+
+class NonPolymorphicBase {
+};
+
+class PolymorphicBase {
+public:
+    virtual void foo();
+};
+
+class IsLeftMostPolymorphic
+    : public GarbageCollected<IsLeftMostPolymorphic>,
+      public PolymorphicBase {
+public:
+    void trace(Visitor*);
+private:
+    Member<HeapObject> m_obj;
+};
+
+class IsNotLeftMostPolymorphic
+    : public GarbageCollected<IsNotLeftMostPolymorphic>,
+      public NonPolymorphicBase,
+      public PolymorphicBase {
+public:
+    void trace(Visitor*);
+private:
+    Member<HeapObject> m_obj;
+};
+
+template<typename T>
+class TemplatedNonPolymorphicBase
+    : public GarbageCollected<TemplatedNonPolymorphicBase<T> > {
+public:
+    void trace(Visitor* visitor) { visitor->trace(m_obj); }
+private:
+    Member<HeapObject> m_obj;
+};
+
+// Looks OK, but will result in an incorrect object pointer when marking.
+class TemplatedIsNotLeftMostPolymorphic
+    : public TemplatedNonPolymorphicBase<TemplatedIsNotLeftMostPolymorphic>,
+      public PolymorphicBase {
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/polymorphic_class_with_non_virtual_trace.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/polymorphic_class_with_non_virtual_trace.txt
new file mode 100644
index 0000000..38f2e77
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/polymorphic_class_with_non_virtual_trace.txt
@@ -0,0 +1,8 @@
+In file included from polymorphic_class_with_non_virtual_trace.cpp:5:
+./polymorphic_class_with_non_virtual_trace.h:17:1: warning: [blink-gc] Left-most base class 'NonPolymorphicBase' of derived class 'IsNotLeftMostPolymorphic' must be polymorphic.
+class NonPolymorphicBase {
+^
+./polymorphic_class_with_non_virtual_trace.h:45:1: warning: [blink-gc] Left-most base class 'TemplatedNonPolymorphicBase<blink::TemplatedIsNotLeftMostPolymorphic>' of derived class 'TemplatedIsNotLeftMostPolymorphic' must be polymorphic.
+class TemplatedNonPolymorphicBase
+^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/pure_virtual_trace.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/pure_virtual_trace.cpp
new file mode 100644
index 0000000..d993a32
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/pure_virtual_trace.cpp
@@ -0,0 +1,7 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "pure_virtual_trace.h"
+
+// Nothing to define
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/pure_virtual_trace.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/pure_virtual_trace.h
new file mode 100644
index 0000000..356a95e
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/pure_virtual_trace.h
@@ -0,0 +1,19 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef PURE_VIRTUAL_TRACE_H_
+#define PURE_VIRTUAL_TRACE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A : public GarbageCollected<A> {
+public:
+    virtual void trace(Visitor*) = 0;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/pure_virtual_trace.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/pure_virtual_trace.txt
new file mode 100644
index 0000000..175a28a
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/pure_virtual_trace.txt
@@ -0,0 +1,5 @@
+In file included from pure_virtual_trace.cpp:5:
+./pure_virtual_trace.h:14:5: warning: [blink-gc] Garbage collected class 'A' is not permitted to declare a pure-virtual trace method.
+    virtual void trace(Visitor*) = 0;
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class.cpp
new file mode 100644
index 0000000..4d6cc05
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class.cpp
@@ -0,0 +1,13 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "raw_ptr_to_gc_managed_class.h"
+
+namespace blink {
+
+void HeapObject::trace(Visitor* visitor) {
+    visitor->trace(m_objs);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class.h
new file mode 100644
index 0000000..18fa9fa
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class.h
@@ -0,0 +1,33 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef RAW_PTR_TO_GC_MANAGED_CLASS_H_
+#define RAW_PTR_TO_GC_MANAGED_CLASS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject;
+
+class PartObject {
+    DISALLOW_NEW();
+private:
+    PartObject();
+
+    HeapObject* m_rawObj;
+    HeapObject& m_refObj;
+};
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    PartObject m_part;
+    HeapVector<HeapObject*> m_objs;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class.txt
new file mode 100644
index 0000000..98f5abe
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class.txt
@@ -0,0 +1,17 @@
+In file included from raw_ptr_to_gc_managed_class.cpp:5:
+./raw_ptr_to_gc_managed_class.h:14:1: warning: [blink-gc] Class 'PartObject' contains invalid fields.
+class PartObject {
+^
+./raw_ptr_to_gc_managed_class.h:19:5: note: [blink-gc] Raw pointer field 'm_rawObj' to a GC managed class declared here:
+    HeapObject* m_rawObj;
+    ^
+./raw_ptr_to_gc_managed_class.h:20:5: note: [blink-gc] Reference pointer field 'm_refObj' to a GC managed class declared here:
+    HeapObject& m_refObj;
+    ^
+./raw_ptr_to_gc_managed_class.h:23:1: warning: [blink-gc] Class 'HeapObject' contains invalid fields.
+class HeapObject : public GarbageCollected<HeapObject> {
+^
+./raw_ptr_to_gc_managed_class.h:28:5: note: [blink-gc] Raw pointer field 'm_objs' to a GC managed class declared here:
+    HeapVector<HeapObject*> m_objs;
+    ^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.cpp
new file mode 100644
index 0000000..f71d1b8
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.cpp
@@ -0,0 +1,13 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "raw_ptr_to_gc_managed_class_error.h"
+
+namespace blink {
+
+void HeapObject::trace(Visitor* visitor) {
+    visitor->trace(m_objs);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.flags b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.flags
new file mode 100644
index 0000000..2f41be6
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.flags
@@ -0,0 +1 @@
+-Werror
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.h
new file mode 100644
index 0000000..f4921c4
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.h
@@ -0,0 +1,33 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef RAW_PTR_TO_GC_MANAGED_CLASS_ERROR_H_
+#define RAW_PTR_TO_GC_MANAGED_CLASS_ERROR_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject;
+
+class PartObject {
+    DISALLOW_NEW();
+private:
+    PartObject();
+
+    HeapObject* m_rawObj;
+    HeapObject& m_refObj;
+};
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    PartObject m_part;
+    HeapVector<HeapObject*> m_objs;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.txt
new file mode 100644
index 0000000..c21c817
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/raw_ptr_to_gc_managed_class_error.txt
@@ -0,0 +1,17 @@
+In file included from raw_ptr_to_gc_managed_class_error.cpp:5:
+./raw_ptr_to_gc_managed_class_error.h:14:1: error: [blink-gc] Class 'PartObject' contains invalid fields.
+class PartObject {
+^
+./raw_ptr_to_gc_managed_class_error.h:19:5: note: [blink-gc] Raw pointer field 'm_rawObj' to a GC managed class declared here:
+    HeapObject* m_rawObj;
+    ^
+./raw_ptr_to_gc_managed_class_error.h:20:5: note: [blink-gc] Reference pointer field 'm_refObj' to a GC managed class declared here:
+    HeapObject& m_refObj;
+    ^
+./raw_ptr_to_gc_managed_class_error.h:23:1: error: [blink-gc] Class 'HeapObject' contains invalid fields.
+class HeapObject : public GarbageCollected<HeapObject> {
+^
+./raw_ptr_to_gc_managed_class_error.h:28:5: note: [blink-gc] Raw pointer field 'm_objs' to a GC managed class declared here:
+    HeapVector<HeapObject*> m_objs;
+    ^
+2 errors generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/ref_ptr_to_gc_managed_class.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/ref_ptr_to_gc_managed_class.cpp
new file mode 100644
index 0000000..e0a200f
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/ref_ptr_to_gc_managed_class.cpp
@@ -0,0 +1,11 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "ref_ptr_to_gc_managed_class.h"
+
+namespace blink {
+
+void HeapObject::trace(Visitor*) { }
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/ref_ptr_to_gc_managed_class.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/ref_ptr_to_gc_managed_class.h
new file mode 100644
index 0000000..c3df7f8
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/ref_ptr_to_gc_managed_class.h
@@ -0,0 +1,30 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef REF_PTR_TO_GC_MANAGED_CLASS_H_
+#define REF_PTR_TO_GC_MANAGED_CLASS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject;
+
+class PartObject {
+    DISALLOW_NEW();
+private:
+    RefPtr<HeapObject> m_obj;
+};
+
+class HeapObject : public GarbageCollectedFinalized<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    PartObject m_part;
+    Vector<RefPtr<HeapObject> > m_objs;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/ref_ptr_to_gc_managed_class.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/ref_ptr_to_gc_managed_class.txt
new file mode 100644
index 0000000..fd49785
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/ref_ptr_to_gc_managed_class.txt
@@ -0,0 +1,14 @@
+In file included from ref_ptr_to_gc_managed_class.cpp:5:
+./ref_ptr_to_gc_managed_class.h:14:1: warning: [blink-gc] Class 'PartObject' contains invalid fields.
+class PartObject {
+^
+./ref_ptr_to_gc_managed_class.h:17:5: note: [blink-gc] RefPtr field 'm_obj' to a GC managed class declared here:
+    RefPtr<HeapObject> m_obj;
+    ^
+./ref_ptr_to_gc_managed_class.h:20:1: warning: [blink-gc] Class 'HeapObject' contains invalid fields.
+class HeapObject : public GarbageCollectedFinalized<HeapObject> {
+^
+./ref_ptr_to_gc_managed_class.h:25:5: note: [blink-gc] RefPtr field 'm_objs' to a GC managed class declared here:
+    Vector<RefPtr<HeapObject> > m_objs;
+    ^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/register_weak_members_template.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/register_weak_members_template.cpp
new file mode 100644
index 0000000..6742c22
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/register_weak_members_template.cpp
@@ -0,0 +1,7 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "register_weak_members_template.h"
+
+// Nothing to define here.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/register_weak_members_template.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/register_weak_members_template.h
new file mode 100644
index 0000000..7d3905a
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/register_weak_members_template.h
@@ -0,0 +1,43 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef REGISTER_WEAK_MEMBERS_TEMPLATE_H_
+#define REGISTER_WEAK_MEMBERS_TEMPLATE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class X : public GarbageCollected<X> {
+ public:
+  void trace(Visitor* visitor) { traceImpl(visitor); }
+  void trace(InlinedGlobalMarkingVisitor visitor) { traceImpl(visitor); }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {}
+};
+
+class HasUntracedWeakMembers : public GarbageCollected<HasUntracedWeakMembers> {
+ public:
+  void trace(Visitor* visitor) { traceImpl(visitor); }
+  void trace(InlinedGlobalMarkingVisitor visitor) { traceImpl(visitor); }
+
+  // Don't have to be defined for the purpose of this test.
+  void clearWeakMembers(Visitor* visitor);
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    visitor->template registerWeakMembers<
+        HasUntracedWeakMembers,
+        &HasUntracedWeakMembers::clearWeakMembers>(this);
+  }
+
+  WeakMember<X> x_;
+};
+
+}
+
+#endif  // REGISTER_WEAK_MEMBERS_TEMPLATE_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/register_weak_members_template.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/register_weak_members_template.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/register_weak_members_template.txt
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/stack_allocated.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/stack_allocated.cpp
new file mode 100644
index 0000000..3c4e321
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/stack_allocated.cpp
@@ -0,0 +1,23 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "stack_allocated.h"
+
+namespace blink {
+
+// Verify that anon namespaces are checked.
+namespace {
+
+class AnonStackObject : public StackObject {
+public:
+    HeapObject* m_obj;
+};
+
+}
+
+void HeapObject::trace(Visitor* visitor)
+{
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/stack_allocated.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/stack_allocated.h
new file mode 100644
index 0000000..10d8f41
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/stack_allocated.h
@@ -0,0 +1,50 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef STACK_ALLOCATED_H_
+#define STACK_ALLOCATED_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject;
+
+class PartObject {
+    DISALLOW_NEW();
+private:
+    Member<HeapObject> m_obj; // Needs tracing.
+};
+
+class StackObject {
+    STACK_ALLOCATED();
+private:
+    Member<HeapObject> m_obj; // Does not need tracing.
+};
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    StackObject m_part; // Cannot embed a stack allocated object.
+};
+
+// Cannot derive from both heap- and stack-allocated objects.
+class DerivedHeapObject : public HeapObject, public StackObject {
+};
+
+// Cannot be stack-allocated and derive from a heap-allocated object.
+class DerivedHeapObject2 : public HeapObject {
+  STACK_ALLOCATED();
+};
+
+// STACK_ALLOCATED is inherited.
+class DerivedStackObject : public StackObject {
+private:
+    StackObject m_anotherPart; // Also fine.
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/stack_allocated.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/stack_allocated.txt
new file mode 100644
index 0000000..80980c3
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/stack_allocated.txt
@@ -0,0 +1,32 @@
+In file included from stack_allocated.cpp:5:
+./stack_allocated.h:14:1: warning: [blink-gc] Class 'PartObject' requires a trace method.
+class PartObject {
+^
+./stack_allocated.h:17:5: note: [blink-gc] Untraced field 'm_obj' declared here:
+    Member<HeapObject> m_obj; // Needs tracing.
+    ^
+./stack_allocated.h:26:1: warning: [blink-gc] Class 'HeapObject' contains invalid fields.
+class HeapObject : public GarbageCollected<HeapObject> {
+^
+./stack_allocated.h:30:5: note: [blink-gc] Stack-allocated field 'm_part' declared here:
+    StackObject m_part; // Cannot embed a stack allocated object.
+    ^
+./stack_allocated.h:34:27: warning: [blink-gc] Stack-allocated class 'DerivedHeapObject' derives class 'HeapObject' which is garbage collected.
+class DerivedHeapObject : public HeapObject, public StackObject {
+                          ^
+./stack_allocated.h:38:28: warning: [blink-gc] Stack-allocated class 'DerivedHeapObject2' derives class 'HeapObject' which is garbage collected.
+class DerivedHeapObject2 : public HeapObject {
+                           ^
+./stack_allocated.h:39:3: warning: [blink-gc] Garbage collected class 'DerivedHeapObject2' is not permitted to override its new operator.
+  STACK_ALLOCATED();
+  ^
+./heap/stubs.h:178:5: note: expanded from macro 'STACK_ALLOCATED'
+    __attribute__((annotate("blink_stack_allocated")))      \
+    ^
+stack_allocated.cpp:12:1: warning: [blink-gc] Class 'AnonStackObject' contains invalid fields.
+class AnonStackObject : public StackObject {
+^
+stack_allocated.cpp:14:5: note: [blink-gc] Raw pointer field 'm_obj' to a GC managed class declared here:
+    HeapObject* m_obj;
+    ^
+6 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/templated_class_with_local_class_requires_trace.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/templated_class_with_local_class_requires_trace.cpp
new file mode 100644
index 0000000..bd8b737
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/templated_class_with_local_class_requires_trace.cpp
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "templated_class_with_local_class_requires_trace.h"
+
+namespace blink {
+
+template<typename T>
+void TemplatedObject<T>::trace(Visitor* visitor)
+{
+    visitor->trace(m_local);
+    visitor->trace(m_memberRef);
+}
+
+class Test {
+public:
+    static void test()
+    {
+        HeapObject* obj = new HeapObject();
+        TemplatedObject<HeapObject>* instance =
+            new TemplatedObject<HeapObject>(obj);
+    }
+};
+
+} // namespace blink
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/templated_class_with_local_class_requires_trace.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/templated_class_with_local_class_requires_trace.h
new file mode 100644
index 0000000..d2b0225
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/templated_class_with_local_class_requires_trace.h
@@ -0,0 +1,52 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TEMPLATED_CLASS_WITH_LOCAL_CLASS_REQUIRES_TRACE_H
+#define TEMPLATED_CLASS_WITH_LOCAL_CLASS_REQUIRES_TRACE_H
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class NonHeapObject { };
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    HeapObject() { }
+
+    void trace(Visitor*) { }
+};
+
+template<typename T>
+class TemplatedObject final
+    : public GarbageCollectedFinalized<TemplatedObject<T> > {
+public:
+    TemplatedObject(T*)
+    {
+    }
+
+    void trace(Visitor*);
+
+private:
+    class Local final : public GarbageCollected<Local> {
+    public:
+        void trace(Visitor* visitor)
+        {
+            visitor->trace(m_heapObject);
+            visitor->trace(m_object);
+        }
+    private:
+        Member<HeapObject> m_heapObject;
+        OwnPtr<HeapObject> m_object;
+    };
+
+    Member<Local> m_local;
+    Member<T> m_memberRef;
+    OwnPtr<T> m_ownRef;
+};
+
+} // namespace blink
+
+#endif // TEMPLATED_CLASS_WITH_LOCAL_CLASS_REQUIRES_TRACE_H
+
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/templated_class_with_local_class_requires_trace.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/templated_class_with_local_class_requires_trace.txt
new file mode 100644
index 0000000..fa6b9f5
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/templated_class_with_local_class_requires_trace.txt
@@ -0,0 +1,26 @@
+In file included from templated_class_with_local_class_requires_trace.cpp:5:
+./templated_class_with_local_class_requires_trace.h:22:1: warning: [blink-gc] Class 'TemplatedObject<blink::HeapObject>' contains invalid fields.
+class TemplatedObject final
+^
+./templated_class_with_local_class_requires_trace.h:46:5: note: [blink-gc] OwnPtr field 'm_ownRef' to a GC managed class declared here:
+    OwnPtr<T> m_ownRef;
+    ^
+./templated_class_with_local_class_requires_trace.h:32:5: warning: [blink-gc] Class 'Local' contains invalid fields.
+    class Local final : public GarbageCollected<Local> {
+    ^
+./templated_class_with_local_class_requires_trace.h:41:9: note: [blink-gc] OwnPtr field 'm_object' to a GC managed class declared here:
+        OwnPtr<HeapObject> m_object;
+        ^
+./templated_class_with_local_class_requires_trace.h:32:5: warning: [blink-gc] Class 'Local' requires finalization.
+    class Local final : public GarbageCollected<Local> {
+    ^
+./templated_class_with_local_class_requires_trace.h:41:9: note: [blink-gc] Field 'm_object' requiring finalization declared here:
+        OwnPtr<HeapObject> m_object;
+        ^
+./templated_class_with_local_class_requires_trace.h:34:9: warning: [blink-gc] Class 'Local' has untraced or not traceable fields.
+        void trace(Visitor* visitor)
+        ^
+./templated_class_with_local_class_requires_trace.h:41:9: note: [blink-gc] Untraceable field 'm_object' declared here:
+        OwnPtr<HeapObject> m_object;
+        ^
+4 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/test.py b/tools/clang/blink_gc_plugin/tests/legacy_naming/test.py
new file mode 100755
index 0000000..475f6fb
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/test.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import os
+import subprocess
+import sys
+
+script_dir = os.path.dirname(os.path.realpath(__file__))
+tool_dir = os.path.abspath(os.path.join(script_dir, '../../../pylib'))
+sys.path.insert(0, tool_dir)
+
+from clang import plugin_testing
+
+
+class BlinkGcPluginTest(plugin_testing.ClangPluginTest):
+  """Test harness for the Blink GC plugin."""
+
+  def AdjustClangArguments(self, clang_cmd):
+    clang_cmd.append('-Wno-inaccessible-base')
+
+  def ProcessOneResult(self, test_name, actual):
+    # Some Blink GC plugins dump a JSON representation of the object graph, and
+    # use the processed results as the actual results of the test.
+    if os.path.exists('%s.graph.json' % test_name):
+      try:
+        actual = subprocess.check_output(
+            ['python', '../../process-graph.py', '-c',
+             '%s.graph.json' % test_name],
+            stderr=subprocess.STDOUT)
+      except subprocess.CalledProcessError, e:
+        # The graph processing script returns a failure exit code if the graph
+        # is bad (e.g. it has a cycle). The output still needs to be captured in
+        # that case, since the expected results capture the errors.
+        actual = e.output
+      finally:
+        # Clean up the .graph.json file to prevent false passes from stale
+        # results from a previous run.
+        os.remove('%s.graph.json' % test_name)
+    return super(BlinkGcPluginTest, self).ProcessOneResult(test_name, actual)
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      '--reset-results',
+      action='store_true',
+      help='If specified, overwrites the expected results in place.')
+  parser.add_argument('clang_path', help='The path to the clang binary.')
+  parser.add_argument('plugin_path',
+                      nargs='?',
+                      help='The path to the plugin library, if any.')
+  args = parser.parse_args()
+
+  return BlinkGcPluginTest(
+      os.path.dirname(os.path.realpath(__file__)),
+      args.clang_path,
+      args.plugin_path,
+      'blink-gc-plugin',
+      args.reset_results).Run()
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch.cpp
new file mode 100644
index 0000000..c246aaa
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch.cpp
@@ -0,0 +1,50 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "trace_after_dispatch.h"
+
+namespace blink {
+
+static B* toB(A* a) { return static_cast<B*>(a); }
+
+void A::trace(Visitor* visitor)
+{
+    switch (m_type) {
+    case TB:
+        toB(this)->traceAfterDispatch(visitor);
+        break;
+    case TC:
+        static_cast<C*>(this)->traceAfterDispatch(visitor);
+        break;
+    case TD:
+        // Missing static_cast<D*>(this)->traceAfterDispatch(visitor);
+        break;
+    }
+}
+
+void A::traceAfterDispatch(Visitor* visitor)
+{
+}
+
+void B::traceAfterDispatch(Visitor* visitor)
+{
+    visitor->trace(m_a);
+    // Missing A::traceAfterDispatch(visitor);
+    // Also check that calling trace does not count.
+    A::trace(visitor);
+}
+
+void C::traceAfterDispatch(Visitor* visitor)
+{
+    // Missing visitor->trace(m_a);
+    A::traceAfterDispatch(visitor);
+}
+
+void D::traceAfterDispatch(Visitor* visitor)
+{
+    visitor->trace(m_a);
+    Abstract::traceAfterDispatch(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch.h
new file mode 100644
index 0000000..a19a536
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch.h
@@ -0,0 +1,55 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACE_AFTER_DISPATCH_H_
+#define TRACE_AFTER_DISPATCH_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A : public GarbageCollected<A> {
+public:
+    void trace(Visitor*);
+    void traceAfterDispatch(Visitor*);
+protected:
+    enum Type { TB, TC, TD };
+    A(Type type) : m_type(type) { }
+private:
+    Type m_type;
+};
+
+class B : public A {
+public:
+    B() : A(TB) { }
+    void traceAfterDispatch(Visitor*);
+private:
+    Member<A> m_a;
+};
+
+class C : public A {
+public:
+    C() : A(TC) { }
+    void traceAfterDispatch(Visitor*);
+private:
+    Member<A> m_a;
+};
+
+// This class is considered abstract does not need to be dispatched to.
+class Abstract : public A {
+protected:
+    Abstract(Type type) : A(type) { }
+};
+
+class D : public Abstract {
+public:
+    D() : Abstract(TD) { }
+    void traceAfterDispatch(Visitor*);
+private:
+    Member<A> m_a;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch.txt
new file mode 100644
index 0000000..877fbbe
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch.txt
@@ -0,0 +1,13 @@
+trace_after_dispatch.cpp:11:1: warning: [blink-gc] Missing dispatch to class 'D' in manual trace dispatch.
+void A::trace(Visitor* visitor)
+^
+trace_after_dispatch.cpp:30:1: warning: [blink-gc] Base class 'A' of derived class 'B' requires tracing.
+void B::traceAfterDispatch(Visitor* visitor)
+^
+trace_after_dispatch.cpp:38:1: warning: [blink-gc] Class 'C' has untraced fields that require tracing.
+void C::traceAfterDispatch(Visitor* visitor)
+^
+./trace_after_dispatch.h:36:5: note: [blink-gc] Untraced field 'm_a' declared here:
+    Member<A> m_a;
+    ^
+3 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl.cpp
new file mode 100644
index 0000000..53a6855
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl.cpp
@@ -0,0 +1,74 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "trace_after_dispatch_impl.h"
+
+namespace blink {
+
+template <typename VisitorDispatcher>
+inline void TraceAfterDispatchInlinedBase::traceImpl(
+    VisitorDispatcher visitor) {
+  // Implement a simple form of manual dispatching, because BlinkGCPlugin
+  // checks if the tracing is dispatched to all derived classes.
+  //
+  // This function has to be implemented out-of-line, since we need to know the
+  // definition of derived classes here.
+  if (tag_ == DERIVED) {
+    static_cast<TraceAfterDispatchInlinedDerived*>(this)->traceAfterDispatch(
+        visitor);
+  } else {
+    traceAfterDispatch(visitor);
+  }
+}
+
+void TraceAfterDispatchExternBase::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+void TraceAfterDispatchExternBase::trace(InlinedGlobalMarkingVisitor visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceAfterDispatchExternBase::traceImpl(VisitorDispatcher visitor) {
+  if (tag_ == DERIVED) {
+    static_cast<TraceAfterDispatchExternDerived*>(this)->traceAfterDispatch(
+        visitor);
+  } else {
+    traceAfterDispatch(visitor);
+  }
+}
+
+void TraceAfterDispatchExternBase::traceAfterDispatch(Visitor* visitor) {
+  traceAfterDispatchImpl(visitor);
+}
+
+void TraceAfterDispatchExternBase::traceAfterDispatch(
+    InlinedGlobalMarkingVisitor visitor) {
+  traceAfterDispatchImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceAfterDispatchExternBase::traceAfterDispatchImpl(
+    VisitorDispatcher visitor) {
+  visitor->trace(x_base_);
+}
+
+void TraceAfterDispatchExternDerived::traceAfterDispatch(Visitor* visitor) {
+  traceAfterDispatchImpl(visitor);
+}
+
+void TraceAfterDispatchExternDerived::traceAfterDispatch(
+    InlinedGlobalMarkingVisitor visitor) {
+  traceAfterDispatchImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceAfterDispatchExternDerived::traceAfterDispatchImpl(
+    VisitorDispatcher visitor) {
+  visitor->trace(x_derived_);
+  TraceAfterDispatchExternBase::traceAfterDispatch(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl.h
new file mode 100644
index 0000000..fe25279
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl.h
@@ -0,0 +1,104 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACE_AFTER_DISPATCH_IMPL_H_
+#define TRACE_AFTER_DISPATCH_IMPL_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class X : public GarbageCollected<X> {
+ public:
+  void trace(Visitor*) {}
+};
+
+enum ClassTag {
+  BASE, DERIVED
+};
+
+class TraceAfterDispatchInlinedBase
+    : public GarbageCollected<TraceAfterDispatchInlinedBase> {
+ public:
+  explicit TraceAfterDispatchInlinedBase(ClassTag tag) : tag_(tag) {}
+
+  void trace(Visitor* visitor) { traceImpl(visitor); }
+  void trace(InlinedGlobalMarkingVisitor visitor) { traceImpl(visitor); }
+
+  void traceAfterDispatch(Visitor* visitor) { traceAfterDispatchImpl(visitor); }
+  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
+    traceAfterDispatchImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor);
+
+  template <typename VisitorDispatcher>
+  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
+    visitor->trace(x_base_);
+  }
+
+  ClassTag tag_;
+  Member<X> x_base_;
+};
+
+class TraceAfterDispatchInlinedDerived : public TraceAfterDispatchInlinedBase {
+ public:
+  TraceAfterDispatchInlinedDerived() : TraceAfterDispatchInlinedBase(DERIVED) {}
+
+  void traceAfterDispatch(Visitor* visitor) { traceAfterDispatchImpl(visitor); }
+  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
+    traceAfterDispatchImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
+    visitor->trace(x_derived_);
+    TraceAfterDispatchInlinedBase::traceAfterDispatch(visitor);
+  }
+
+  Member<X> x_derived_;
+};
+
+class TraceAfterDispatchExternBase
+    : public GarbageCollected<TraceAfterDispatchExternBase> {
+ public:
+  explicit TraceAfterDispatchExternBase(ClassTag tag) : tag_(tag) {}
+
+  void trace(Visitor* visitor);
+  void trace(InlinedGlobalMarkingVisitor visitor);
+
+  void traceAfterDispatch(Visitor* visitor);
+  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor);
+
+  template <typename VisitorDispatcher>
+  void traceAfterDispatchImpl(VisitorDispatcher visitor);
+
+  ClassTag tag_;
+  Member<X> x_base_;
+};
+
+class TraceAfterDispatchExternDerived : public TraceAfterDispatchExternBase {
+ public:
+  TraceAfterDispatchExternDerived() : TraceAfterDispatchExternBase(DERIVED) {}
+
+  void traceAfterDispatch(Visitor* visitor);
+  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceAfterDispatchImpl(VisitorDispatcher visitor);
+
+  Member<X> x_derived_;
+};
+
+}
+
+#endif  // TRACE_AFTER_DISPATCH_IMPL_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl.txt
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl_error.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl_error.cpp
new file mode 100644
index 0000000..23798f7
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl_error.cpp
@@ -0,0 +1,75 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "trace_after_dispatch_impl_error.h"
+
+namespace blink {
+
+template <typename VisitorDispatcher>
+inline void TraceAfterDispatchInlinedBase::traceImpl(
+    VisitorDispatcher visitor) {
+  // Implement a simple form of manual dispatching, because BlinkGCPlugin
+  // checks if the tracing is dispatched to all derived classes.
+  //
+  // This function has to be implemented out-of-line, since we need to know the
+  // definition of derived classes here.
+  if (tag_ == DERIVED) {
+    // Missing dispatch call:
+    // static_cast<TraceAfterDispatchInlinedDerived*>(this)->traceAfterDispatch(
+    //     visitor);
+  } else {
+    traceAfterDispatch(visitor);
+  }
+}
+
+void TraceAfterDispatchExternBase::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+void TraceAfterDispatchExternBase::trace(InlinedGlobalMarkingVisitor visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceAfterDispatchExternBase::traceImpl(VisitorDispatcher visitor) {
+  if (tag_ == DERIVED) {
+    // Missing dispatch call:
+    // static_cast<TraceAfterDispatchExternDerived*>(this)->traceAfterDispatch(
+    //     visitor);
+  } else {
+    traceAfterDispatch(visitor);
+  }
+}
+
+void TraceAfterDispatchExternBase::traceAfterDispatch(Visitor* visitor) {
+  traceAfterDispatchImpl(visitor);
+}
+
+void TraceAfterDispatchExternBase::traceAfterDispatch(
+    InlinedGlobalMarkingVisitor visitor) {
+  traceAfterDispatchImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceAfterDispatchExternBase::traceAfterDispatchImpl(
+    VisitorDispatcher visitor) {
+  // No trace call.
+}
+
+void TraceAfterDispatchExternDerived::traceAfterDispatch(Visitor* visitor) {
+  traceAfterDispatchImpl(visitor);
+}
+
+void TraceAfterDispatchExternDerived::traceAfterDispatch(
+    InlinedGlobalMarkingVisitor visitor) {
+  traceAfterDispatchImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceAfterDispatchExternDerived::traceAfterDispatchImpl(
+    VisitorDispatcher visitor) {
+  // Ditto.
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl_error.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl_error.h
new file mode 100644
index 0000000..b480e39
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl_error.h
@@ -0,0 +1,103 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACE_AFTER_DISPATCH_IMPL_ERROR_H_
+#define TRACE_AFTER_DISPATCH_IMPL_ERROR_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class X : public GarbageCollected<X> {
+ public:
+  void trace(Visitor*) {}
+};
+
+enum ClassTag {
+  BASE, DERIVED
+};
+
+class TraceAfterDispatchInlinedBase
+    : public GarbageCollected<TraceAfterDispatchInlinedBase> {
+ public:
+  explicit TraceAfterDispatchInlinedBase(ClassTag tag) : tag_(tag) {}
+
+  void trace(Visitor* visitor) { traceImpl(visitor); }
+  void trace(InlinedGlobalMarkingVisitor visitor) { traceImpl(visitor); }
+
+  void traceAfterDispatch(Visitor* visitor) { traceAfterDispatchImpl(visitor); }
+  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
+    traceAfterDispatchImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor);
+
+  template <typename VisitorDispatcher>
+  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
+    // No trace call; should get a warning.
+  }
+
+  ClassTag tag_;
+  Member<X> x_base_;
+};
+
+class TraceAfterDispatchInlinedDerived : public TraceAfterDispatchInlinedBase {
+ public:
+  TraceAfterDispatchInlinedDerived() : TraceAfterDispatchInlinedBase(DERIVED) {}
+
+  void traceAfterDispatch(Visitor* visitor) { traceAfterDispatchImpl(visitor); }
+  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
+    traceAfterDispatchImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
+    // No trace call (for member and base class).
+  }
+
+  Member<X> x_derived_;
+};
+
+class TraceAfterDispatchExternBase
+    : public GarbageCollected<TraceAfterDispatchExternBase> {
+ public:
+  explicit TraceAfterDispatchExternBase(ClassTag tag) : tag_(tag) {}
+
+  void trace(Visitor* visitor);
+  void trace(InlinedGlobalMarkingVisitor visitor);
+
+  void traceAfterDispatch(Visitor* visitor);
+  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor);
+
+  template <typename VisitorDispatcher>
+  void traceAfterDispatchImpl(VisitorDispatcher visitor);
+
+  ClassTag tag_;
+  Member<X> x_base_;
+};
+
+class TraceAfterDispatchExternDerived : public TraceAfterDispatchExternBase {
+ public:
+  TraceAfterDispatchExternDerived() : TraceAfterDispatchExternBase(DERIVED) {}
+
+  void traceAfterDispatch(Visitor* visitor);
+  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceAfterDispatchImpl(VisitorDispatcher visitor);
+
+  Member<X> x_derived_;
+};
+
+}
+
+#endif  // TRACE_AFTER_DISPATCH_IMPL_ERROR_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl_error.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl_error.txt
new file mode 100644
index 0000000..058fccb
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_after_dispatch_impl_error.txt
@@ -0,0 +1,34 @@
+trace_after_dispatch_impl_error.cpp:10:1: warning: [blink-gc] Missing dispatch to class 'TraceAfterDispatchInlinedDerived' in manual trace dispatch.
+inline void TraceAfterDispatchInlinedBase::traceImpl(
+^
+trace_after_dispatch_impl_error.cpp:35:1: warning: [blink-gc] Missing dispatch to class 'TraceAfterDispatchExternDerived' in manual trace dispatch.
+inline void TraceAfterDispatchExternBase::traceImpl(VisitorDispatcher visitor) {
+^
+In file included from trace_after_dispatch_impl_error.cpp:5:
+./trace_after_dispatch_impl_error.h:39:3: warning: [blink-gc] Class 'TraceAfterDispatchInlinedBase' has untraced fields that require tracing.
+  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
+  ^
+./trace_after_dispatch_impl_error.h:44:3: note: [blink-gc] Untraced field 'x_base_' declared here:
+  Member<X> x_base_;
+  ^
+./trace_after_dispatch_impl_error.h:58:3: warning: [blink-gc] Base class 'TraceAfterDispatchInlinedBase' of derived class 'TraceAfterDispatchInlinedDerived' requires tracing.
+  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
+  ^
+./trace_after_dispatch_impl_error.h:58:3: warning: [blink-gc] Class 'TraceAfterDispatchInlinedDerived' has untraced fields that require tracing.
+./trace_after_dispatch_impl_error.h:62:3: note: [blink-gc] Untraced field 'x_derived_' declared here:
+  Member<X> x_derived_;
+  ^
+trace_after_dispatch_impl_error.cpp:55:1: warning: [blink-gc] Class 'TraceAfterDispatchExternBase' has untraced fields that require tracing.
+inline void TraceAfterDispatchExternBase::traceAfterDispatchImpl(
+^
+./trace_after_dispatch_impl_error.h:84:3: note: [blink-gc] Untraced field 'x_base_' declared here:
+  Member<X> x_base_;
+  ^
+trace_after_dispatch_impl_error.cpp:70:1: warning: [blink-gc] Base class 'TraceAfterDispatchExternBase' of derived class 'TraceAfterDispatchExternDerived' requires tracing.
+inline void TraceAfterDispatchExternDerived::traceAfterDispatchImpl(
+^
+trace_after_dispatch_impl_error.cpp:70:1: warning: [blink-gc] Class 'TraceAfterDispatchExternDerived' has untraced fields that require tracing.
+./trace_after_dispatch_impl_error.h:98:3: note: [blink-gc] Untraced field 'x_derived_' declared here:
+  Member<X> x_derived_;
+  ^
+8 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_collections.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_collections.cpp
new file mode 100644
index 0000000..9ba7c96
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_collections.cpp
@@ -0,0 +1,13 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "trace_collections.h"
+
+namespace blink {
+
+void HeapObject::trace(Visitor* visitor)
+{
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_collections.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_collections.h
new file mode 100644
index 0000000..219b056
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_collections.h
@@ -0,0 +1,44 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACE_COLLECTIONS_H_
+#define TRACE_COLLECTIONS_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*);
+private:
+    HeapVector<Member<HeapObject> > m_heapVector;
+    Vector<Member<HeapObject>, 0, HeapAllocator> m_wtfVector;
+
+    HeapDeque<Member<HeapObject> > m_heapDeque;
+    Deque<Member<HeapObject>, 0, HeapAllocator> m_wtfDeque;
+
+    HeapHashSet<Member<HeapObject> > m_heapSet;
+    HashSet<Member<HeapObject>, void, HeapAllocator> m_wtfSet;
+
+    HeapListHashSet<Member<HeapObject> > m_heapListSet;
+    ListHashSet<Member<HeapObject>, void, HeapAllocator> m_wtfListSet;
+
+    HeapLinkedHashSet<Member<HeapObject> > m_heapLinkedSet;
+    LinkedHashSet<Member<HeapObject>, void, HeapAllocator> m_wtfLinkedSet;
+
+    HeapHashCountedSet<Member<HeapObject> > m_heapCountedSet;
+    HashCountedSet<Member<HeapObject>, void, HeapAllocator> m_wtfCountedSet;
+
+    HeapHashMap<int, Member<HeapObject> > m_heapMapKey;
+    HeapHashMap<Member<HeapObject>, int > m_heapMapVal;
+    HashMap<int, Member<HeapObject>, void, void, void, HeapAllocator>
+    m_wtfMapKey;
+    HashMap<Member<HeapObject>, int, void, void, void, HeapAllocator>
+    m_wtfMapVal;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_collections.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_collections.txt
new file mode 100644
index 0000000..7c20ad4
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_collections.txt
@@ -0,0 +1,52 @@
+trace_collections.cpp:9:1: warning: [blink-gc] Class 'HeapObject' has untraced fields that require tracing.
+void HeapObject::trace(Visitor* visitor)
+^
+./trace_collections.h:16:5: note: [blink-gc] Untraced field 'm_heapVector' declared here:
+    HeapVector<Member<HeapObject> > m_heapVector;
+    ^
+./trace_collections.h:17:5: note: [blink-gc] Untraced field 'm_wtfVector' declared here:
+    Vector<Member<HeapObject>, 0, HeapAllocator> m_wtfVector;
+    ^
+./trace_collections.h:19:5: note: [blink-gc] Untraced field 'm_heapDeque' declared here:
+    HeapDeque<Member<HeapObject> > m_heapDeque;
+    ^
+./trace_collections.h:20:5: note: [blink-gc] Untraced field 'm_wtfDeque' declared here:
+    Deque<Member<HeapObject>, 0, HeapAllocator> m_wtfDeque;
+    ^
+./trace_collections.h:22:5: note: [blink-gc] Untraced field 'm_heapSet' declared here:
+    HeapHashSet<Member<HeapObject> > m_heapSet;
+    ^
+./trace_collections.h:23:5: note: [blink-gc] Untraced field 'm_wtfSet' declared here:
+    HashSet<Member<HeapObject>, void, HeapAllocator> m_wtfSet;
+    ^
+./trace_collections.h:25:5: note: [blink-gc] Untraced field 'm_heapListSet' declared here:
+    HeapListHashSet<Member<HeapObject> > m_heapListSet;
+    ^
+./trace_collections.h:26:5: note: [blink-gc] Untraced field 'm_wtfListSet' declared here:
+    ListHashSet<Member<HeapObject>, void, HeapAllocator> m_wtfListSet;
+    ^
+./trace_collections.h:28:5: note: [blink-gc] Untraced field 'm_heapLinkedSet' declared here:
+    HeapLinkedHashSet<Member<HeapObject> > m_heapLinkedSet;
+    ^
+./trace_collections.h:29:5: note: [blink-gc] Untraced field 'm_wtfLinkedSet' declared here:
+    LinkedHashSet<Member<HeapObject>, void, HeapAllocator> m_wtfLinkedSet;
+    ^
+./trace_collections.h:31:5: note: [blink-gc] Untraced field 'm_heapCountedSet' declared here:
+    HeapHashCountedSet<Member<HeapObject> > m_heapCountedSet;
+    ^
+./trace_collections.h:32:5: note: [blink-gc] Untraced field 'm_wtfCountedSet' declared here:
+    HashCountedSet<Member<HeapObject>, void, HeapAllocator> m_wtfCountedSet;
+    ^
+./trace_collections.h:34:5: note: [blink-gc] Untraced field 'm_heapMapKey' declared here:
+    HeapHashMap<int, Member<HeapObject> > m_heapMapKey;
+    ^
+./trace_collections.h:35:5: note: [blink-gc] Untraced field 'm_heapMapVal' declared here:
+    HeapHashMap<Member<HeapObject>, int > m_heapMapVal;
+    ^
+./trace_collections.h:36:5: note: [blink-gc] Untraced field 'm_wtfMapKey' declared here:
+    HashMap<int, Member<HeapObject>, void, void, void, HeapAllocator>
+    ^
+./trace_collections.h:38:5: note: [blink-gc] Untraced field 'm_wtfMapVal' declared here:
+    HashMap<Member<HeapObject>, int, void, void, void, HeapAllocator>
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_if_needed.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_if_needed.cpp
new file mode 100644
index 0000000..563c6cc
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_if_needed.cpp
@@ -0,0 +1,16 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "trace_if_needed.h"
+
+namespace blink {
+
+template<typename T>
+void TemplatedObject<T>::trace(Visitor* visitor)
+{
+    TraceIfNeeded<T>::trace(visitor, &m_one);
+    // Missing trace of m_two
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_if_needed.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_if_needed.h
new file mode 100644
index 0000000..00b8f22
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_if_needed.h
@@ -0,0 +1,27 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACE_IF_NEEDED_H_
+#define TRACE_IF_NEEDED_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> { };
+
+template<typename T>
+class TemplatedObject : public GarbageCollected<TemplatedObject<T> > {
+public:
+    virtual void trace(Visitor*);
+private:
+    T m_one;
+    T m_two;
+};
+
+class InstantiatedObject : public TemplatedObject<Member<HeapObject> > { };
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_if_needed.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_if_needed.txt
new file mode 100644
index 0000000..79a24e8
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_if_needed.txt
@@ -0,0 +1,7 @@
+trace_if_needed.cpp:9:1: warning: [blink-gc] Class 'TemplatedObject<blink::Member<blink::HeapObject> >' has untraced fields that require tracing.
+template<typename T>
+^
+./trace_if_needed.h:20:5: note: [blink-gc] Untraced field 'm_two' declared here:
+    T m_two;
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_templated_super.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_templated_super.cpp
new file mode 100644
index 0000000..2b59034
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_templated_super.cpp
@@ -0,0 +1,36 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "trace_templated_super.h"
+
+namespace blink {
+
+template<typename T>
+void Super<T>::clearWeakMembers(Visitor* visitor)
+{
+    (void)m_weak;
+}
+
+template<typename T>
+void Super<T>::trace(Visitor* visitor)
+{
+    visitor->registerWeakMembers<Super<T>, &Super<T>::clearWeakMembers>(this);
+    visitor->trace(m_obj);
+    Mixin::trace(visitor);
+}
+
+template<typename T>
+void Sub<T>::trace(Visitor* visitor)
+{
+    // Missing trace of m_obj.
+    Super<T>::trace(visitor);
+}
+
+void HeapObject::trace(Visitor* visitor)
+{
+    visitor->trace(m_obj);
+    Sub<HeapObject>::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_templated_super.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_templated_super.h
new file mode 100644
index 0000000..de8fd7b
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_templated_super.h
@@ -0,0 +1,47 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACE_TEMPLATED_SUPER_H_
+#define TRACE_TEMPLATED_SUPER_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject;
+
+class Mixin : public GarbageCollectedMixin {
+public:
+    virtual void trace(Visitor*) override { }
+};
+
+template<typename T>
+class Super : public GarbageCollected<Super<T> >, public Mixin {
+    USING_GARBAGE_COLLECTED_MIXIN(Super);
+public:
+    virtual void trace(Visitor*) override;
+    void clearWeakMembers(Visitor*);
+private:
+    Member<HeapObject> m_obj;
+    WeakMember<HeapObject> m_weak;
+};
+
+template<typename T>
+class Sub : public Super<T> {
+public:
+    virtual void trace(Visitor* visitor) override;
+private:
+    Member<HeapObject> m_obj;
+};
+
+class HeapObject : public Sub<HeapObject> {
+public:
+    virtual void trace(Visitor*) override;
+private:
+    Member<HeapObject> m_obj;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_templated_super.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_templated_super.txt
new file mode 100644
index 0000000..291b018
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/trace_templated_super.txt
@@ -0,0 +1,7 @@
+trace_templated_super.cpp:23:1: warning: [blink-gc] Class 'Sub<blink::HeapObject>' has untraced fields that require tracing.
+template<typename T>
+^
+./trace_templated_super.h:35:5: note: [blink-gc] Untraced field 'm_obj' declared here:
+    Member<HeapObject> m_obj;
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl.cpp
new file mode 100644
index 0000000..c8849cc
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl.cpp
@@ -0,0 +1,28 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "traceimpl.h"
+
+namespace blink {
+
+void TraceImplExtern::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceImplExtern::traceImpl(VisitorDispatcher visitor) {
+  visitor->trace(x_);
+}
+
+void TraceImplBaseExtern::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceImplBaseExtern::traceImpl(VisitorDispatcher visitor) {
+  visitor->trace(x_);
+  Base::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl.h
new file mode 100644
index 0000000..64fae26
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl.h
@@ -0,0 +1,68 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACEIMPL_H_
+#define TRACEIMPL_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class X : public GarbageCollected<X> {
+ public:
+  virtual void trace(Visitor*) {}
+};
+
+class TraceImplInlined : public GarbageCollected<TraceImplInlined> {
+ public:
+  void trace(Visitor* visitor) { traceImpl(visitor); }
+
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    visitor->trace(x_);
+  }
+
+ private:
+  Member<X> x_;
+};
+
+class TraceImplExtern : public GarbageCollected<TraceImplExtern> {
+ public:
+  void trace(Visitor* visitor);
+  template <typename VisitorDispatcher>
+  inline void traceImpl(VisitorDispatcher);
+
+ private:
+  Member<X> x_;
+};
+
+class Base : public GarbageCollected<Base> {
+ public:
+  virtual void trace(Visitor* visitor) {}
+};
+
+class TraceImplBaseInlined : public Base {
+ public:
+  void trace(Visitor* visitor) override { traceImpl(visitor); }
+
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    Base::trace(visitor);
+  }
+};
+
+class TraceImplBaseExtern : public Base {
+ public:
+  void trace(Visitor* visitor) override;
+
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher);
+
+ private:
+  Member<X> x_;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl.txt
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_dependent_scope.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_dependent_scope.cpp
new file mode 100644
index 0000000..11b576c
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_dependent_scope.cpp
@@ -0,0 +1,13 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "traceimpl_dependent_scope.h"
+
+namespace blink {
+
+// Template instantiation.
+template class Derived<int>;
+template class DerivedMissingTrace<int>;
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_dependent_scope.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_dependent_scope.h
new file mode 100644
index 0000000..0d079f6
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_dependent_scope.h
@@ -0,0 +1,62 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACEIMPL_DEPENDENT_SCOPE_H_
+#define TRACEIMPL_DEPENDENT_SCOPE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class X : public GarbageCollected<X> {
+ public:
+  virtual void trace(Visitor*) {}
+};
+
+template <typename T>
+class Base : public GarbageCollected<Base<T> > {
+ public:
+  virtual void trace(Visitor* visitor) { traceImpl(visitor); }
+  virtual void trace(InlinedGlobalMarkingVisitor visitor) {
+    traceImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {}
+};
+
+template <typename T>
+class Derived : public Base<T> {
+ public:
+  void trace(Visitor* visitor) override { traceImpl(visitor); }
+  void trace(InlinedGlobalMarkingVisitor visitor) override {
+    traceImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    Base<T>::trace(visitor);
+  }
+};
+
+template <typename T>
+class DerivedMissingTrace : public Base<T> {
+ public:
+  void trace(Visitor* visitor) override { traceImpl(visitor); }
+  void trace(InlinedGlobalMarkingVisitor visitor) override {
+    traceImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    // Missing Base<T>::trace(visitor).
+  }
+};
+
+}
+
+#endif  // TRACEIMPL_DEPENDENT_SCOPE_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_dependent_scope.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_dependent_scope.txt
new file mode 100644
index 0000000..e1aab33
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_dependent_scope.txt
@@ -0,0 +1,5 @@
+In file included from traceimpl_dependent_scope.cpp:5:
+./traceimpl_dependent_scope.h:55:3: warning: [blink-gc] Base class 'Base<int>' of derived class 'DerivedMissingTrace<int>' requires tracing.
+  void traceImpl(VisitorDispatcher visitor) {
+  ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_derived_from_templated_base.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_derived_from_templated_base.cpp
new file mode 100644
index 0000000..9636fca
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_derived_from_templated_base.cpp
@@ -0,0 +1,7 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "traceimpl_derived_from_templated_base.h"
+
+// Nothing to define.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_derived_from_templated_base.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_derived_from_templated_base.h
new file mode 100644
index 0000000..21b9978
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_derived_from_templated_base.h
@@ -0,0 +1,37 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACEIMPL_DERIVED_FROM_TEMPLATED_BASE_H_
+#define TRACEIMPL_DERIVED_FROM_TEMPLATED_BASE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class X : public GarbageCollected<X> {
+ public:
+  virtual void trace(Visitor*) {}
+};
+
+template <int Y>
+class TraceImplTemplatedBase
+    : public GarbageCollected<TraceImplTemplatedBase<Y> > {
+ public:
+  void trace(Visitor* visitor) { traceImpl(visitor); }
+
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    visitor->trace(x_);
+  }
+
+ private:
+  Member<X> x_;
+};
+
+class TraceImplDerivedFromTemplatedBase : public TraceImplTemplatedBase<0> {
+};
+
+}
+
+#endif  // TRACEIMPL_DERIVED_FROM_TEMPLATED_BASE_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_derived_from_templated_base.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_derived_from_templated_base.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_derived_from_templated_base.txt
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_error.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_error.cpp
new file mode 100644
index 0000000..041c565
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_error.cpp
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "traceimpl_error.h"
+
+namespace blink {
+
+void TraceImplExternWithUntracedMember::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceImplExternWithUntracedMember::traceImpl(
+    VisitorDispatcher visitor) {
+  // Should get a warning as well.
+}
+
+void TraceImplExternWithUntracedBase::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void TraceImplExternWithUntracedBase::traceImpl(
+    VisitorDispatcher visitor) {
+  // Ditto.
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_error.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_error.h
new file mode 100644
index 0000000..5a883b4
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_error.h
@@ -0,0 +1,68 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACEIMPL_ERROR_H_
+#define TRACEIMPL_ERROR_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class X : public GarbageCollected<X> {
+ public:
+  virtual void trace(Visitor*) {}
+};
+
+class TraceImplInlinedWithUntracedMember
+    : public GarbageCollected<TraceImplInlinedWithUntracedMember> {
+ public:
+  void trace(Visitor* visitor) { traceImpl(visitor); }
+
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    // Empty; should get complaints from the plugin for untraced x_.
+  }
+
+ private:
+  Member<X> x_;
+};
+
+class TraceImplExternWithUntracedMember
+    : public GarbageCollected<TraceImplExternWithUntracedMember> {
+ public:
+  void trace(Visitor* visitor);
+
+  template <typename VisitorDispatcher>
+  inline void traceImpl(VisitorDispatcher);
+
+ private:
+  Member<X> x_;
+};
+
+class Base : public GarbageCollected<Base> {
+ public:
+  virtual void trace(Visitor*) {}
+};
+
+class TraceImplInlineWithUntracedBase : public Base {
+ public:
+  void trace(Visitor* visitor) override { traceImpl(visitor); }
+
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    // Empty; should get complaints from the plugin for untraced Base.
+  }
+};
+
+class TraceImplExternWithUntracedBase : public Base {
+ public:
+  void trace(Visitor*) override;
+
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor);
+};
+
+}
+
+#endif  // TRACEIMPL_ERROR_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_error.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_error.txt
new file mode 100644
index 0000000..070b029
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_error.txt
@@ -0,0 +1,20 @@
+In file included from traceimpl_error.cpp:5:
+./traceimpl_error.h:23:3: warning: [blink-gc] Class 'TraceImplInlinedWithUntracedMember' has untraced fields that require tracing.
+  void traceImpl(VisitorDispatcher visitor) {
+  ^
+./traceimpl_error.h:28:3: note: [blink-gc] Untraced field 'x_' declared here:
+  Member<X> x_;
+  ^
+./traceimpl_error.h:53:3: warning: [blink-gc] Base class 'Base' of derived class 'TraceImplInlineWithUntracedBase' requires tracing.
+  void traceImpl(VisitorDispatcher visitor) {
+  ^
+traceimpl_error.cpp:14:1: warning: [blink-gc] Class 'TraceImplExternWithUntracedMember' has untraced fields that require tracing.
+inline void TraceImplExternWithUntracedMember::traceImpl(
+^
+./traceimpl_error.h:40:3: note: [blink-gc] Untraced field 'x_' declared here:
+  Member<X> x_;
+  ^
+traceimpl_error.cpp:24:1: warning: [blink-gc] Base class 'Base' of derived class 'TraceImplExternWithUntracedBase' requires tracing.
+inline void TraceImplExternWithUntracedBase::traceImpl(
+^
+4 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_omitted_trace.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_omitted_trace.cpp
new file mode 100644
index 0000000..b6dc2df
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_omitted_trace.cpp
@@ -0,0 +1,7 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "traceimpl_omitted_trace.h"
+
+// Nothing to define here.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_omitted_trace.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_omitted_trace.h
new file mode 100644
index 0000000..3c5e955
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_omitted_trace.h
@@ -0,0 +1,47 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACEIMPL_OMITTED_TRACE_H_
+#define TRACEIMPL_OMITTED_TRACE_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A : public GarbageCollected<A> {
+ public:
+  virtual void trace(Visitor* visitor) { traceImpl(visitor); }
+  virtual void trace(InlinedGlobalMarkingVisitor visitor) {
+    traceImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {}
+};
+
+class B : public A {
+  // trace() isn't necessary because we've got nothing to trace here.
+};
+
+class C : public B {
+ public:
+  void trace(Visitor* visitor) override { traceImpl(visitor); }
+  void trace(InlinedGlobalMarkingVisitor visitor) override {
+    traceImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    // B::trace() is actually A::trace(), and in certain cases we only get
+    // limited information like "there is a function call that will be resolved
+    // to A::trace()". We still want to mark B as traced.
+    B::trace(visitor);
+  }
+};
+
+}
+
+#endif  // TRACEIMPL_OMITTED_TRACE_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_omitted_trace.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_omitted_trace.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_omitted_trace.txt
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded.cpp
new file mode 100644
index 0000000..02d4858
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded.cpp
@@ -0,0 +1,36 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "traceimpl_overloaded.h"
+
+namespace blink {
+
+void ExternBase::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+void ExternBase::trace(InlinedGlobalMarkingVisitor visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void ExternBase::traceImpl(VisitorDispatcher visitor) {
+  visitor->trace(x_base_);
+}
+
+void ExternDerived::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+void ExternDerived::trace(InlinedGlobalMarkingVisitor visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void ExternDerived::traceImpl(VisitorDispatcher visitor) {
+  visitor->trace(x_derived_);
+  ExternBase::trace(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded.h
new file mode 100644
index 0000000..808821d
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded.h
@@ -0,0 +1,75 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACEIMPL_OVERLOADED_H_
+#define TRACEIMPL_OVERLOADED_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class X : public GarbageCollected<X> {
+ public:
+  void trace(Visitor*) {}
+  void trace(InlinedGlobalMarkingVisitor) {}
+};
+
+class InlinedBase : public GarbageCollected<InlinedBase> {
+ public:
+  virtual void trace(Visitor* visitor) { traceImpl(visitor); }
+  virtual void trace(InlinedGlobalMarkingVisitor visitor) {
+    traceImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) { visitor->trace(x_base_); }
+
+  Member<X> x_base_;
+};
+
+class InlinedDerived : public InlinedBase {
+ public:
+  void trace(Visitor* visitor) override { traceImpl(visitor); }
+  void trace(InlinedGlobalMarkingVisitor visitor) override {
+    traceImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    visitor->trace(x_derived_);
+    InlinedBase::trace(visitor);
+  }
+
+  Member<X> x_derived_;
+};
+
+class ExternBase : public GarbageCollected<ExternBase> {
+ public:
+  virtual void trace(Visitor*);
+  virtual void trace(InlinedGlobalMarkingVisitor);
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher);
+
+  Member<X> x_base_;
+};
+
+class ExternDerived : public ExternBase {
+ public:
+  void trace(Visitor*) override;
+  void trace(InlinedGlobalMarkingVisitor) override;
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher);
+
+  Member<X> x_derived_;
+};
+
+}
+
+#endif  // TRACEIMPL_OVERLOADED_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded.txt
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded_error.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded_error.cpp
new file mode 100644
index 0000000..07cab63
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded_error.cpp
@@ -0,0 +1,35 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "traceimpl_overloaded_error.h"
+
+namespace blink {
+
+void ExternBase::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+void ExternBase::trace(InlinedGlobalMarkingVisitor visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void ExternBase::traceImpl(VisitorDispatcher visitor) {
+  // Missing visitor->trace(x_base_).
+}
+
+void ExternDerived::trace(Visitor* visitor) {
+  traceImpl(visitor);
+}
+
+void ExternDerived::trace(InlinedGlobalMarkingVisitor visitor) {
+  traceImpl(visitor);
+}
+
+template <typename VisitorDispatcher>
+inline void ExternDerived::traceImpl(VisitorDispatcher visitor) {
+  // Missing visitor->trace(x_derived_) and ExternBase::trace(visitor).
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded_error.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded_error.h
new file mode 100644
index 0000000..7d7a038
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded_error.h
@@ -0,0 +1,76 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRACEIMPL_OVERLOADED_ERROR_H_
+#define TRACEIMPL_OVERLOADED_ERROR_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class X : public GarbageCollected<X> {
+ public:
+  void trace(Visitor*) {}
+  void trace(InlinedGlobalMarkingVisitor) {}
+};
+
+class InlinedBase : public GarbageCollected<InlinedBase> {
+ public:
+  virtual void trace(Visitor* visitor) { traceImpl(visitor); }
+  virtual void trace(InlinedGlobalMarkingVisitor visitor) {
+    traceImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    // Missing visitor->trace(x_base_).
+  }
+
+  Member<X> x_base_;
+};
+
+class InlinedDerived : public InlinedBase {
+ public:
+  void trace(Visitor* visitor) override { traceImpl(visitor); }
+  void trace(InlinedGlobalMarkingVisitor visitor) override {
+    traceImpl(visitor);
+  }
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher visitor) {
+    // Missing visitor->trace(x_derived_) and InlinedBase::trace(visitor).
+  }
+
+  Member<X> x_derived_;
+};
+
+class ExternBase : public GarbageCollected<ExternBase> {
+ public:
+  virtual void trace(Visitor*);
+  virtual void trace(InlinedGlobalMarkingVisitor);
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher);
+
+  Member<X> x_base_;
+};
+
+class ExternDerived : public ExternBase {
+ public:
+  void trace(Visitor*) override;
+  void trace(InlinedGlobalMarkingVisitor) override;
+
+ private:
+  template <typename VisitorDispatcher>
+  void traceImpl(VisitorDispatcher);
+
+  Member<X> x_derived_;
+};
+
+}
+
+#endif  // TRACEIMPL_OVERLOADED_ERROR_H_
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded_error.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded_error.txt
new file mode 100644
index 0000000..644f9f0
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/traceimpl_overloaded_error.txt
@@ -0,0 +1,28 @@
+In file included from traceimpl_overloaded_error.cpp:5:
+./traceimpl_overloaded_error.h:27:3: warning: [blink-gc] Class 'InlinedBase' has untraced fields that require tracing.
+  void traceImpl(VisitorDispatcher visitor) {
+  ^
+./traceimpl_overloaded_error.h:31:3: note: [blink-gc] Untraced field 'x_base_' declared here:
+  Member<X> x_base_;
+  ^
+./traceimpl_overloaded_error.h:43:3: warning: [blink-gc] Base class 'InlinedBase' of derived class 'InlinedDerived' requires tracing.
+  void traceImpl(VisitorDispatcher visitor) {
+  ^
+./traceimpl_overloaded_error.h:43:3: warning: [blink-gc] Class 'InlinedDerived' has untraced fields that require tracing.
+./traceimpl_overloaded_error.h:47:3: note: [blink-gc] Untraced field 'x_derived_' declared here:
+  Member<X> x_derived_;
+  ^
+traceimpl_overloaded_error.cpp:18:1: warning: [blink-gc] Class 'ExternBase' has untraced fields that require tracing.
+inline void ExternBase::traceImpl(VisitorDispatcher visitor) {
+^
+./traceimpl_overloaded_error.h:59:3: note: [blink-gc] Untraced field 'x_base_' declared here:
+  Member<X> x_base_;
+  ^
+traceimpl_overloaded_error.cpp:31:1: warning: [blink-gc] Base class 'ExternBase' of derived class 'ExternDerived' requires tracing.
+inline void ExternDerived::traceImpl(VisitorDispatcher visitor) {
+^
+traceimpl_overloaded_error.cpp:31:1: warning: [blink-gc] Class 'ExternDerived' has untraced fields that require tracing.
+./traceimpl_overloaded_error.h:71:3: note: [blink-gc] Untraced field 'x_derived_' declared here:
+  Member<X> x_derived_;
+  ^
+6 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/virtual_and_trace_after_dispatch.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/virtual_and_trace_after_dispatch.cpp
new file mode 100644
index 0000000..2ba6f1e
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/virtual_and_trace_after_dispatch.cpp
@@ -0,0 +1,30 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "virtual_and_trace_after_dispatch.h"
+
+namespace blink {
+
+static B* toB(A* a) { return static_cast<B*>(a); }
+
+void A::trace(Visitor* visitor)
+{
+    switch (m_type) {
+    case TB:
+        toB(this)->traceAfterDispatch(visitor);
+        break;
+    }
+}
+
+void A::traceAfterDispatch(Visitor* visitor)
+{
+}
+
+void B::traceAfterDispatch(Visitor* visitor)
+{
+    visitor->trace(m_a);
+    A::traceAfterDispatch(visitor);
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/virtual_and_trace_after_dispatch.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/virtual_and_trace_after_dispatch.h
new file mode 100644
index 0000000..5048349
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/virtual_and_trace_after_dispatch.h
@@ -0,0 +1,34 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef VIRTUAL_AND_TRACE_AFTER_DISPATCH_H_
+#define VIRTUAL_AND_TRACE_AFTER_DISPATCH_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class A : public GarbageCollected<A> {
+public:
+    void trace(Visitor*);
+    void traceAfterDispatch(Visitor*);
+protected:
+    enum Type { TB };
+    A(Type type) : m_type(type) { }
+private:
+    Type m_type;
+};
+
+class B : public A {
+public:
+    B() : A(TB) { }
+    void traceAfterDispatch(Visitor*);
+    virtual void foo() { }
+private:
+    Member<A> m_a;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/virtual_and_trace_after_dispatch.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/virtual_and_trace_after_dispatch.txt
new file mode 100644
index 0000000..fb46696
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/virtual_and_trace_after_dispatch.txt
@@ -0,0 +1,11 @@
+In file included from virtual_and_trace_after_dispatch.cpp:5:
+./virtual_and_trace_after_dispatch.h:12:1: warning: [blink-gc] Left-most base class 'A' of derived class 'B' must be polymorphic.
+class A : public GarbageCollected<A> {
+^
+./virtual_and_trace_after_dispatch.h:23:1: warning: [blink-gc] Class 'B' contains or inherits virtual methods but implements manual dispatching.
+class B : public A {
+^
+./virtual_and_trace_after_dispatch.h:14:5: note: [blink-gc] Manual dispatch 'trace' declared here:
+    void trace(Visitor*);
+    ^
+2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/weak_fields_require_tracing.cpp b/tools/clang/blink_gc_plugin/tests/legacy_naming/weak_fields_require_tracing.cpp
new file mode 100644
index 0000000..382e9f9
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/weak_fields_require_tracing.cpp
@@ -0,0 +1,28 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "weak_fields_require_tracing.h"
+
+namespace blink {
+
+void HeapObject::trace(Visitor* visitor)
+{
+    // Missing visitor->trace(m_obj1);
+    // Missing visitor->trace(m_obj2);
+    // visitor->trace(m_obj3) in callback.
+    // Missing visitor->trace(m_set1);
+    visitor->trace(m_set2);
+    visitor->registerWeakMembers<HeapObject,
+                                 &HeapObject::clearWeakMembers>(this);
+}
+
+void HeapObject::clearWeakMembers(Visitor* visitor)
+{
+    visitor->trace(m_obj1);  // Does not count.
+    // Missing visitor->trace(m_obj2);
+    visitor->trace(m_obj3);  // OK.
+    visitor->trace(m_set1);  // Does not count.
+}
+
+}
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/weak_fields_require_tracing.h b/tools/clang/blink_gc_plugin/tests/legacy_naming/weak_fields_require_tracing.h
new file mode 100644
index 0000000..c6850e6
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/weak_fields_require_tracing.h
@@ -0,0 +1,26 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef WEAK_FIELDS_REQUIRE_TRACING_H_
+#define WEAK_FIELDS_REQUIRE_TRACING_H_
+
+#include "heap/stubs.h"
+
+namespace blink {
+
+class HeapObject : public GarbageCollected<HeapObject> {
+public:
+    void trace(Visitor*);
+    void clearWeakMembers(Visitor*);
+private:
+    Member<HeapObject> m_obj1;
+    WeakMember<HeapObject> m_obj2;
+    WeakMember<HeapObject> m_obj3;
+    HeapHashSet<WeakMember<HeapObject> > m_set1;
+    HeapHashSet<WeakMember<HeapObject> > m_set2;
+};
+
+}
+
+#endif
diff --git a/tools/clang/blink_gc_plugin/tests/legacy_naming/weak_fields_require_tracing.txt b/tools/clang/blink_gc_plugin/tests/legacy_naming/weak_fields_require_tracing.txt
new file mode 100644
index 0000000..02f56a3
--- /dev/null
+++ b/tools/clang/blink_gc_plugin/tests/legacy_naming/weak_fields_require_tracing.txt
@@ -0,0 +1,13 @@
+weak_fields_require_tracing.cpp:9:1: warning: [blink-gc] Class 'HeapObject' has untraced fields that require tracing.
+void HeapObject::trace(Visitor* visitor)
+^
+./weak_fields_require_tracing.h:17:5: note: [blink-gc] Untraced field 'm_obj1' declared here:
+    Member<HeapObject> m_obj1;
+    ^
+./weak_fields_require_tracing.h:18:5: note: [blink-gc] Untraced field 'm_obj2' declared here:
+    WeakMember<HeapObject> m_obj2;
+    ^
+./weak_fields_require_tracing.h:20:5: note: [blink-gc] Untraced field 'm_set1' declared here:
+    HeapHashSet<WeakMember<HeapObject> > m_set1;
+    ^
+1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/member_in_offheap_class.cpp b/tools/clang/blink_gc_plugin/tests/member_in_offheap_class.cpp
index 4b44c2d..f182eb1 100644
--- a/tools/clang/blink_gc_plugin/tests/member_in_offheap_class.cpp
+++ b/tools/clang/blink_gc_plugin/tests/member_in_offheap_class.cpp
@@ -6,19 +6,19 @@
 
 namespace blink {
 
-void OffHeapObject::trace(Visitor* visitor)
+void OffHeapObject::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
+    visitor->Trace(m_obj);
 }
 
-void PartObject::trace(Visitor* visitor)
+void PartObject::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
+    visitor->Trace(m_obj);
 }
 
-void InlineObject::trace(Visitor* visitor)
+void InlineObject::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
+    visitor->Trace(m_obj);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/member_in_offheap_class.h b/tools/clang/blink_gc_plugin/tests/member_in_offheap_class.h
index 2a7c868..89357a3 100644
--- a/tools/clang/blink_gc_plugin/tests/member_in_offheap_class.h
+++ b/tools/clang/blink_gc_plugin/tests/member_in_offheap_class.h
@@ -13,7 +13,7 @@
 
 class OffHeapObject {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<HeapObject> m_obj; // Must not contain Member.
     Persistent<HeapVector<Member<HeapObject> > > m_objs; // OK
@@ -30,7 +30,7 @@
 class PartObject {
     DISALLOW_NEW();
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<HeapObject> m_obj; // OK
 };
@@ -38,7 +38,7 @@
 class InlineObject {
     ALLOW_ONLY_INLINE_ALLOCATION();
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<HeapObject> m_obj; // OK
 };
diff --git a/tools/clang/blink_gc_plugin/tests/non_virtual_trace.cpp b/tools/clang/blink_gc_plugin/tests/non_virtual_trace.cpp
index 9f57711..820b930 100644
--- a/tools/clang/blink_gc_plugin/tests/non_virtual_trace.cpp
+++ b/tools/clang/blink_gc_plugin/tests/non_virtual_trace.cpp
@@ -6,18 +6,18 @@
 
 namespace blink {
 
-void A::trace(Visitor* visitor)
+void A::Trace(Visitor* visitor)
 {
 }
 
-void C::trace(Visitor* visitor)
+void C::Trace(Visitor* visitor)
 {
-    B::trace(visitor);
+    B::Trace(visitor);
 }
 
-void D::trace(Visitor* visitor)
+void D::Trace(Visitor* visitor)
 {
-    B::trace(visitor);
+    B::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/non_virtual_trace.h b/tools/clang/blink_gc_plugin/tests/non_virtual_trace.h
index 4179d49..4cbd470 100644
--- a/tools/clang/blink_gc_plugin/tests/non_virtual_trace.h
+++ b/tools/clang/blink_gc_plugin/tests/non_virtual_trace.h
@@ -11,7 +11,7 @@
 
 class A : public GarbageCollected<A> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 };
 
 class B : public A {
@@ -19,12 +19,12 @@
 
 class C : public B {
 public:
-    void trace(Visitor*); // Cannot override a non-virtual trace.
+    void Trace(Visitor*); // Cannot override a non-virtual Trace.
 };
 
 class D : public B {
 public:
-    virtual void trace(Visitor*); // Cannot override a non-virtual trace.
+    virtual void Trace(Visitor*); // Cannot override a non-virtual Trace.
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/non_virtual_trace.txt b/tools/clang/blink_gc_plugin/tests/non_virtual_trace.txt
index a05a94d..65d62eb 100644
--- a/tools/clang/blink_gc_plugin/tests/non_virtual_trace.txt
+++ b/tools/clang/blink_gc_plugin/tests/non_virtual_trace.txt
@@ -3,15 +3,15 @@
 class A : public GarbageCollected<A> {
 ^
 non_virtual_trace.cpp:13:1: warning: [blink-gc] Class 'C' overrides non-virtual trace of base class 'A'.
-void C::trace(Visitor* visitor)
+void C::Trace(Visitor* visitor)
 ^
 ./non_virtual_trace.h:14:5: note: [blink-gc] Non-virtual trace method declared here:
-    void trace(Visitor*);
+    void Trace(Visitor*);
     ^
 non_virtual_trace.cpp:18:1: warning: [blink-gc] Class 'D' overrides non-virtual trace of base class 'A'.
-void D::trace(Visitor* visitor)
+void D::Trace(Visitor* visitor)
 ^
 ./non_virtual_trace.h:14:5: note: [blink-gc] Non-virtual trace method declared here:
-    void trace(Visitor*);
+    void Trace(Visitor*);
     ^
 3 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/own_ptr_to_gc_managed_class.cpp b/tools/clang/blink_gc_plugin/tests/own_ptr_to_gc_managed_class.cpp
index 9e27c3d..7621c14 100644
--- a/tools/clang/blink_gc_plugin/tests/own_ptr_to_gc_managed_class.cpp
+++ b/tools/clang/blink_gc_plugin/tests/own_ptr_to_gc_managed_class.cpp
@@ -6,6 +6,6 @@
 
 namespace blink {
 
-void HeapObject::trace(Visitor* visitor) { }
+void HeapObject::Trace(Visitor* visitor) { }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/own_ptr_to_gc_managed_class.h b/tools/clang/blink_gc_plugin/tests/own_ptr_to_gc_managed_class.h
index 6f47baf..586716a 100644
--- a/tools/clang/blink_gc_plugin/tests/own_ptr_to_gc_managed_class.h
+++ b/tools/clang/blink_gc_plugin/tests/own_ptr_to_gc_managed_class.h
@@ -19,7 +19,7 @@
 
 class HeapObject : public GarbageCollectedFinalized<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Vector<OwnPtr<HeapObject> > m_objs;
     OwnPtr<HeapVector<Member<HeapObject> > > m_objs2;
diff --git a/tools/clang/blink_gc_plugin/tests/part_object_to_gc_derived_class.cpp b/tools/clang/blink_gc_plugin/tests/part_object_to_gc_derived_class.cpp
index 2da8661..43d4e0d 100644
--- a/tools/clang/blink_gc_plugin/tests/part_object_to_gc_derived_class.cpp
+++ b/tools/clang/blink_gc_plugin/tests/part_object_to_gc_derived_class.cpp
@@ -6,9 +6,9 @@
 
 namespace blink {
 
-void B::trace(Visitor* visitor)
+void B::Trace(Visitor* visitor)
 {
-    visitor->trace(m_a);
+    visitor->Trace(m_a);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/part_object_to_gc_derived_class.h b/tools/clang/blink_gc_plugin/tests/part_object_to_gc_derived_class.h
index ef5a649..2b76566 100644
--- a/tools/clang/blink_gc_plugin/tests/part_object_to_gc_derived_class.h
+++ b/tools/clang/blink_gc_plugin/tests/part_object_to_gc_derived_class.h
@@ -13,7 +13,7 @@
 
 class B : public GarbageCollected<B> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     A m_a;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/persistent_field_in_gc_managed_class.cpp b/tools/clang/blink_gc_plugin/tests/persistent_field_in_gc_managed_class.cpp
index 7b3f286..a4d03cd 100644
--- a/tools/clang/blink_gc_plugin/tests/persistent_field_in_gc_managed_class.cpp
+++ b/tools/clang/blink_gc_plugin/tests/persistent_field_in_gc_managed_class.cpp
@@ -6,8 +6,8 @@
 
 namespace blink {
 
-void HeapObject::trace(Visitor* visitor) {
-    visitor->trace(m_parts);
+void HeapObject::Trace(Visitor* visitor) {
+    visitor->Trace(m_parts);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/persistent_field_in_gc_managed_class.h b/tools/clang/blink_gc_plugin/tests/persistent_field_in_gc_managed_class.h
index a90f63c..546d749 100644
--- a/tools/clang/blink_gc_plugin/tests/persistent_field_in_gc_managed_class.h
+++ b/tools/clang/blink_gc_plugin/tests/persistent_field_in_gc_managed_class.h
@@ -19,7 +19,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     PartObject m_part;
     HeapVector<PartObject> m_parts;
diff --git a/tools/clang/blink_gc_plugin/tests/persistent_no_trace.cpp b/tools/clang/blink_gc_plugin/tests/persistent_no_trace.cpp
index 637b46f..e503ddb 100644
--- a/tools/clang/blink_gc_plugin/tests/persistent_no_trace.cpp
+++ b/tools/clang/blink_gc_plugin/tests/persistent_no_trace.cpp
@@ -6,9 +6,9 @@
 
 namespace blink {
 
-void HeapObject::trace(Visitor* visitor) {
-    visitor->trace(m_crossThreadPersistent);
-    visitor->trace(m_crossThreadWeakPersistent);
+void HeapObject::Trace(Visitor* visitor) {
+    visitor->Trace(m_crossThreadPersistent);
+    visitor->Trace(m_crossThreadWeakPersistent);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/persistent_no_trace.h b/tools/clang/blink_gc_plugin/tests/persistent_no_trace.h
index c8beb99..761f161 100644
--- a/tools/clang/blink_gc_plugin/tests/persistent_no_trace.h
+++ b/tools/clang/blink_gc_plugin/tests/persistent_no_trace.h
@@ -11,7 +11,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     CrossThreadPersistent<HeapObject> m_crossThreadPersistent;
     CrossThreadWeakPersistent<HeapObject> m_crossThreadWeakPersistent;
diff --git a/tools/clang/blink_gc_plugin/tests/persistent_no_trace.txt b/tools/clang/blink_gc_plugin/tests/persistent_no_trace.txt
index dcfe76d..d55d80b 100644
--- a/tools/clang/blink_gc_plugin/tests/persistent_no_trace.txt
+++ b/tools/clang/blink_gc_plugin/tests/persistent_no_trace.txt
@@ -1,5 +1,5 @@
 persistent_no_trace.cpp:9:1: warning: [blink-gc] Class 'HeapObject' has untraced or not traceable fields.
-void HeapObject::trace(Visitor* visitor) {
+void HeapObject::Trace(Visitor* visitor) {
 ^
 ./persistent_no_trace.h:16:5: note: [blink-gc] Untraceable field 'm_crossThreadPersistent' declared here:
     CrossThreadPersistent<HeapObject> m_crossThreadPersistent;
diff --git a/tools/clang/blink_gc_plugin/tests/polymorphic_class_with_non_virtual_trace.cpp b/tools/clang/blink_gc_plugin/tests/polymorphic_class_with_non_virtual_trace.cpp
index dc7620a..6bf5a9d 100644
--- a/tools/clang/blink_gc_plugin/tests/polymorphic_class_with_non_virtual_trace.cpp
+++ b/tools/clang/blink_gc_plugin/tests/polymorphic_class_with_non_virtual_trace.cpp
@@ -6,14 +6,14 @@
 
 namespace blink {
 
-void IsLeftMostPolymorphic::trace(Visitor* visitor)
+void IsLeftMostPolymorphic::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
+    visitor->Trace(m_obj);
 }
 
-void IsNotLeftMostPolymorphic::trace(Visitor* visitor)
+void IsNotLeftMostPolymorphic::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
+    visitor->Trace(m_obj);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/polymorphic_class_with_non_virtual_trace.h b/tools/clang/blink_gc_plugin/tests/polymorphic_class_with_non_virtual_trace.h
index f5d999e..7221280 100644
--- a/tools/clang/blink_gc_plugin/tests/polymorphic_class_with_non_virtual_trace.h
+++ b/tools/clang/blink_gc_plugin/tests/polymorphic_class_with_non_virtual_trace.h
@@ -11,7 +11,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*) { }
+    void Trace(Visitor*) { }
 };
 
 class NonPolymorphicBase {
@@ -26,7 +26,7 @@
     : public GarbageCollected<IsLeftMostPolymorphic>,
       public PolymorphicBase {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<HeapObject> m_obj;
 };
@@ -36,7 +36,7 @@
       public NonPolymorphicBase,
       public PolymorphicBase {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     Member<HeapObject> m_obj;
 };
@@ -45,7 +45,7 @@
 class TemplatedNonPolymorphicBase
     : public GarbageCollected<TemplatedNonPolymorphicBase<T> > {
 public:
-    void trace(Visitor* visitor) { visitor->trace(m_obj); }
+    void Trace(Visitor* visitor) { visitor->Trace(m_obj); }
 private:
     Member<HeapObject> m_obj;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/pure_virtual_trace.h b/tools/clang/blink_gc_plugin/tests/pure_virtual_trace.h
index 356a95e..12f7abd 100644
--- a/tools/clang/blink_gc_plugin/tests/pure_virtual_trace.h
+++ b/tools/clang/blink_gc_plugin/tests/pure_virtual_trace.h
@@ -11,7 +11,7 @@
 
 class A : public GarbageCollected<A> {
 public:
-    virtual void trace(Visitor*) = 0;
+    virtual void Trace(Visitor*) = 0;
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/pure_virtual_trace.txt b/tools/clang/blink_gc_plugin/tests/pure_virtual_trace.txt
index 175a28a..c7a534f 100644
--- a/tools/clang/blink_gc_plugin/tests/pure_virtual_trace.txt
+++ b/tools/clang/blink_gc_plugin/tests/pure_virtual_trace.txt
@@ -1,5 +1,5 @@
 In file included from pure_virtual_trace.cpp:5:
 ./pure_virtual_trace.h:14:5: warning: [blink-gc] Garbage collected class 'A' is not permitted to declare a pure-virtual trace method.
-    virtual void trace(Visitor*) = 0;
+    virtual void Trace(Visitor*) = 0;
     ^
 1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class.cpp b/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class.cpp
index 4d6cc05..6a0b693 100644
--- a/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class.cpp
+++ b/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class.cpp
@@ -6,8 +6,8 @@
 
 namespace blink {
 
-void HeapObject::trace(Visitor* visitor) {
-    visitor->trace(m_objs);
+void HeapObject::Trace(Visitor* visitor) {
+    visitor->Trace(m_objs);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class.h b/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class.h
index 18fa9fa..4033371 100644
--- a/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class.h
+++ b/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class.h
@@ -22,7 +22,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     PartObject m_part;
     HeapVector<HeapObject*> m_objs;
diff --git a/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class_error.cpp b/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class_error.cpp
index f71d1b8..67642c0 100644
--- a/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class_error.cpp
+++ b/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class_error.cpp
@@ -6,8 +6,8 @@
 
 namespace blink {
 
-void HeapObject::trace(Visitor* visitor) {
-    visitor->trace(m_objs);
+void HeapObject::Trace(Visitor* visitor) {
+    visitor->Trace(m_objs);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class_error.h b/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class_error.h
index f4921c4..99e5b56 100644
--- a/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class_error.h
+++ b/tools/clang/blink_gc_plugin/tests/raw_ptr_to_gc_managed_class_error.h
@@ -22,7 +22,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     PartObject m_part;
     HeapVector<HeapObject*> m_objs;
diff --git a/tools/clang/blink_gc_plugin/tests/ref_ptr_to_gc_managed_class.cpp b/tools/clang/blink_gc_plugin/tests/ref_ptr_to_gc_managed_class.cpp
index e0a200f..3d0286a 100644
--- a/tools/clang/blink_gc_plugin/tests/ref_ptr_to_gc_managed_class.cpp
+++ b/tools/clang/blink_gc_plugin/tests/ref_ptr_to_gc_managed_class.cpp
@@ -6,6 +6,6 @@
 
 namespace blink {
 
-void HeapObject::trace(Visitor*) { }
+void HeapObject::Trace(Visitor*) { }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/ref_ptr_to_gc_managed_class.h b/tools/clang/blink_gc_plugin/tests/ref_ptr_to_gc_managed_class.h
index c3df7f8..24ff08c 100644
--- a/tools/clang/blink_gc_plugin/tests/ref_ptr_to_gc_managed_class.h
+++ b/tools/clang/blink_gc_plugin/tests/ref_ptr_to_gc_managed_class.h
@@ -19,7 +19,7 @@
 
 class HeapObject : public GarbageCollectedFinalized<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     PartObject m_part;
     Vector<RefPtr<HeapObject> > m_objs;
diff --git a/tools/clang/blink_gc_plugin/tests/register_weak_members_template.h b/tools/clang/blink_gc_plugin/tests/register_weak_members_template.h
index 7d3905a..61d8fbb 100644
--- a/tools/clang/blink_gc_plugin/tests/register_weak_members_template.h
+++ b/tools/clang/blink_gc_plugin/tests/register_weak_members_template.h
@@ -11,26 +11,26 @@
 
 class X : public GarbageCollected<X> {
  public:
-  void trace(Visitor* visitor) { traceImpl(visitor); }
-  void trace(InlinedGlobalMarkingVisitor visitor) { traceImpl(visitor); }
+  void Trace(Visitor* visitor) { TraceImpl(visitor); }
+  void Trace(InlinedGlobalMarkingVisitor visitor) { TraceImpl(visitor); }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {}
+  void TraceImpl(VisitorDispatcher visitor) {}
 };
 
 class HasUntracedWeakMembers : public GarbageCollected<HasUntracedWeakMembers> {
  public:
-  void trace(Visitor* visitor) { traceImpl(visitor); }
-  void trace(InlinedGlobalMarkingVisitor visitor) { traceImpl(visitor); }
+  void Trace(Visitor* visitor) { TraceImpl(visitor); }
+  void Trace(InlinedGlobalMarkingVisitor visitor) { TraceImpl(visitor); }
 
   // Don't have to be defined for the purpose of this test.
   void clearWeakMembers(Visitor* visitor);
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    visitor->template registerWeakMembers<
+  void TraceImpl(VisitorDispatcher visitor) {
+    visitor->template RegisterWeakMembers<
         HasUntracedWeakMembers,
         &HasUntracedWeakMembers::clearWeakMembers>(this);
   }
diff --git a/tools/clang/blink_gc_plugin/tests/stack_allocated.cpp b/tools/clang/blink_gc_plugin/tests/stack_allocated.cpp
index 3c4e321..74ae83b 100644
--- a/tools/clang/blink_gc_plugin/tests/stack_allocated.cpp
+++ b/tools/clang/blink_gc_plugin/tests/stack_allocated.cpp
@@ -16,7 +16,7 @@
 
 }
 
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 {
 }
 
diff --git a/tools/clang/blink_gc_plugin/tests/stack_allocated.h b/tools/clang/blink_gc_plugin/tests/stack_allocated.h
index 10d8f41..574219c 100644
--- a/tools/clang/blink_gc_plugin/tests/stack_allocated.h
+++ b/tools/clang/blink_gc_plugin/tests/stack_allocated.h
@@ -25,7 +25,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     StackObject m_part; // Cannot embed a stack allocated object.
 };
diff --git a/tools/clang/blink_gc_plugin/tests/stack_allocated.txt b/tools/clang/blink_gc_plugin/tests/stack_allocated.txt
index c6bb373..80980c3 100644
--- a/tools/clang/blink_gc_plugin/tests/stack_allocated.txt
+++ b/tools/clang/blink_gc_plugin/tests/stack_allocated.txt
@@ -20,7 +20,7 @@
 ./stack_allocated.h:39:3: warning: [blink-gc] Garbage collected class 'DerivedHeapObject2' is not permitted to override its new operator.
   STACK_ALLOCATED();
   ^
-./heap/stubs.h:149:5: note: expanded from macro 'STACK_ALLOCATED'
+./heap/stubs.h:178:5: note: expanded from macro 'STACK_ALLOCATED'
     __attribute__((annotate("blink_stack_allocated")))      \
     ^
 stack_allocated.cpp:12:1: warning: [blink-gc] Class 'AnonStackObject' contains invalid fields.
diff --git a/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.cpp b/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.cpp
index bd8b737..739318b 100644
--- a/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.cpp
+++ b/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.cpp
@@ -7,10 +7,10 @@
 namespace blink {
 
 template<typename T>
-void TemplatedObject<T>::trace(Visitor* visitor)
+void TemplatedObject<T>::Trace(Visitor* visitor)
 {
-    visitor->trace(m_local);
-    visitor->trace(m_memberRef);
+    visitor->Trace(m_local);
+    visitor->Trace(m_memberRef);
 }
 
 class Test {
diff --git a/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.h b/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.h
index d2b0225..6656120 100644
--- a/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.h
+++ b/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.h
@@ -15,7 +15,7 @@
 public:
     HeapObject() { }
 
-    void trace(Visitor*) { }
+    void Trace(Visitor*) { }
 };
 
 template<typename T>
@@ -26,15 +26,15 @@
     {
     }
 
-    void trace(Visitor*);
+    void Trace(Visitor*);
 
 private:
     class Local final : public GarbageCollected<Local> {
     public:
-        void trace(Visitor* visitor)
+        void Trace(Visitor* visitor)
         {
-            visitor->trace(m_heapObject);
-            visitor->trace(m_object);
+            visitor->Trace(m_heapObject);
+            visitor->Trace(m_object);
         }
     private:
         Member<HeapObject> m_heapObject;
diff --git a/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.txt b/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.txt
index fa6b9f5..fa3e0ed 100644
--- a/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.txt
+++ b/tools/clang/blink_gc_plugin/tests/templated_class_with_local_class_requires_trace.txt
@@ -18,7 +18,7 @@
         OwnPtr<HeapObject> m_object;
         ^
 ./templated_class_with_local_class_requires_trace.h:34:9: warning: [blink-gc] Class 'Local' has untraced or not traceable fields.
-        void trace(Visitor* visitor)
+        void Trace(Visitor* visitor)
         ^
 ./templated_class_with_local_class_requires_trace.h:41:9: note: [blink-gc] Untraceable field 'm_object' declared here:
         OwnPtr<HeapObject> m_object;
diff --git a/tools/clang/blink_gc_plugin/tests/test.py b/tools/clang/blink_gc_plugin/tests/test.py
index a380cc6..b1338bf 100755
--- a/tools/clang/blink_gc_plugin/tests/test.py
+++ b/tools/clang/blink_gc_plugin/tests/test.py
@@ -20,6 +20,10 @@
 
   def AdjustClangArguments(self, clang_cmd):
     clang_cmd.append('-Wno-inaccessible-base')
+    clang_cmd.append('-Xclang')
+    clang_cmd.append('-plugin-arg-blink-gc-plugin')
+    clang_cmd.append('-Xclang')
+    clang_cmd.append('use-chromium-style-naming')
 
   def ProcessOneResult(self, test_name, actual):
     # Some Blink GC plugins dump a JSON representation of the object graph, and
diff --git a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.cpp b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.cpp
index c246aaa..4c62354 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.cpp
+++ b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.cpp
@@ -8,43 +8,43 @@
 
 static B* toB(A* a) { return static_cast<B*>(a); }
 
-void A::trace(Visitor* visitor)
+void A::Trace(Visitor* visitor)
 {
     switch (m_type) {
     case TB:
-        toB(this)->traceAfterDispatch(visitor);
+        toB(this)->TraceAfterDispatch(visitor);
         break;
     case TC:
-        static_cast<C*>(this)->traceAfterDispatch(visitor);
+        static_cast<C*>(this)->TraceAfterDispatch(visitor);
         break;
     case TD:
-        // Missing static_cast<D*>(this)->traceAfterDispatch(visitor);
+        // Missing static_cast<D*>(this)->TraceAfterDispatch(visitor);
         break;
     }
 }
 
-void A::traceAfterDispatch(Visitor* visitor)
+void A::TraceAfterDispatch(Visitor* visitor)
 {
 }
 
-void B::traceAfterDispatch(Visitor* visitor)
+void B::TraceAfterDispatch(Visitor* visitor)
 {
-    visitor->trace(m_a);
-    // Missing A::traceAfterDispatch(visitor);
-    // Also check that calling trace does not count.
-    A::trace(visitor);
+    visitor->Trace(m_a);
+    // Missing A::TraceAfterDispatch(visitor);
+    // Also check that calling Trace does not count.
+    A::Trace(visitor);
 }
 
-void C::traceAfterDispatch(Visitor* visitor)
+void C::TraceAfterDispatch(Visitor* visitor)
 {
-    // Missing visitor->trace(m_a);
-    A::traceAfterDispatch(visitor);
+    // Missing visitor->Trace(m_a);
+    A::TraceAfterDispatch(visitor);
 }
 
-void D::traceAfterDispatch(Visitor* visitor)
+void D::TraceAfterDispatch(Visitor* visitor)
 {
-    visitor->trace(m_a);
-    Abstract::traceAfterDispatch(visitor);
+    visitor->Trace(m_a);
+    Abstract::TraceAfterDispatch(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.h b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.h
index a19a536..0a5a7c7 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.h
+++ b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.h
@@ -11,8 +11,8 @@
 
 class A : public GarbageCollected<A> {
 public:
-    void trace(Visitor*);
-    void traceAfterDispatch(Visitor*);
+    void Trace(Visitor*);
+    void TraceAfterDispatch(Visitor*);
 protected:
     enum Type { TB, TC, TD };
     A(Type type) : m_type(type) { }
@@ -23,7 +23,7 @@
 class B : public A {
 public:
     B() : A(TB) { }
-    void traceAfterDispatch(Visitor*);
+    void TraceAfterDispatch(Visitor*);
 private:
     Member<A> m_a;
 };
@@ -31,7 +31,7 @@
 class C : public A {
 public:
     C() : A(TC) { }
-    void traceAfterDispatch(Visitor*);
+    void TraceAfterDispatch(Visitor*);
 private:
     Member<A> m_a;
 };
@@ -45,7 +45,7 @@
 class D : public Abstract {
 public:
     D() : Abstract(TD) { }
-    void traceAfterDispatch(Visitor*);
+    void TraceAfterDispatch(Visitor*);
 private:
     Member<A> m_a;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.txt b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.txt
index 877fbbe..4873999 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.txt
+++ b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch.txt
@@ -1,11 +1,11 @@
 trace_after_dispatch.cpp:11:1: warning: [blink-gc] Missing dispatch to class 'D' in manual trace dispatch.
-void A::trace(Visitor* visitor)
+void A::Trace(Visitor* visitor)
 ^
 trace_after_dispatch.cpp:30:1: warning: [blink-gc] Base class 'A' of derived class 'B' requires tracing.
-void B::traceAfterDispatch(Visitor* visitor)
+void B::TraceAfterDispatch(Visitor* visitor)
 ^
 trace_after_dispatch.cpp:38:1: warning: [blink-gc] Class 'C' has untraced fields that require tracing.
-void C::traceAfterDispatch(Visitor* visitor)
+void C::TraceAfterDispatch(Visitor* visitor)
 ^
 ./trace_after_dispatch.h:36:5: note: [blink-gc] Untraced field 'm_a' declared here:
     Member<A> m_a;
diff --git a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl.cpp b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl.cpp
index 53a6855..17bd1f8 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl.cpp
+++ b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl.cpp
@@ -7,7 +7,7 @@
 namespace blink {
 
 template <typename VisitorDispatcher>
-inline void TraceAfterDispatchInlinedBase::traceImpl(
+inline void TraceAfterDispatchInlinedBase::TraceImpl(
     VisitorDispatcher visitor) {
   // Implement a simple form of manual dispatching, because BlinkGCPlugin
   // checks if the tracing is dispatched to all derived classes.
@@ -15,60 +15,60 @@
   // This function has to be implemented out-of-line, since we need to know the
   // definition of derived classes here.
   if (tag_ == DERIVED) {
-    static_cast<TraceAfterDispatchInlinedDerived*>(this)->traceAfterDispatch(
+    static_cast<TraceAfterDispatchInlinedDerived*>(this)->TraceAfterDispatch(
         visitor);
   } else {
-    traceAfterDispatch(visitor);
+    TraceAfterDispatch(visitor);
   }
 }
 
-void TraceAfterDispatchExternBase::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void TraceAfterDispatchExternBase::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
-void TraceAfterDispatchExternBase::trace(InlinedGlobalMarkingVisitor visitor) {
-  traceImpl(visitor);
+void TraceAfterDispatchExternBase::Trace(InlinedGlobalMarkingVisitor visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceAfterDispatchExternBase::traceImpl(VisitorDispatcher visitor) {
+inline void TraceAfterDispatchExternBase::TraceImpl(VisitorDispatcher visitor) {
   if (tag_ == DERIVED) {
-    static_cast<TraceAfterDispatchExternDerived*>(this)->traceAfterDispatch(
+    static_cast<TraceAfterDispatchExternDerived*>(this)->TraceAfterDispatch(
         visitor);
   } else {
-    traceAfterDispatch(visitor);
+    TraceAfterDispatch(visitor);
   }
 }
 
-void TraceAfterDispatchExternBase::traceAfterDispatch(Visitor* visitor) {
-  traceAfterDispatchImpl(visitor);
+void TraceAfterDispatchExternBase::TraceAfterDispatch(Visitor* visitor) {
+  TraceAfterDispatchImpl(visitor);
 }
 
-void TraceAfterDispatchExternBase::traceAfterDispatch(
+void TraceAfterDispatchExternBase::TraceAfterDispatch(
     InlinedGlobalMarkingVisitor visitor) {
-  traceAfterDispatchImpl(visitor);
+  TraceAfterDispatchImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceAfterDispatchExternBase::traceAfterDispatchImpl(
+inline void TraceAfterDispatchExternBase::TraceAfterDispatchImpl(
     VisitorDispatcher visitor) {
-  visitor->trace(x_base_);
+  visitor->Trace(x_base_);
 }
 
-void TraceAfterDispatchExternDerived::traceAfterDispatch(Visitor* visitor) {
-  traceAfterDispatchImpl(visitor);
+void TraceAfterDispatchExternDerived::TraceAfterDispatch(Visitor* visitor) {
+  TraceAfterDispatchImpl(visitor);
 }
 
-void TraceAfterDispatchExternDerived::traceAfterDispatch(
+void TraceAfterDispatchExternDerived::TraceAfterDispatch(
     InlinedGlobalMarkingVisitor visitor) {
-  traceAfterDispatchImpl(visitor);
+  TraceAfterDispatchImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceAfterDispatchExternDerived::traceAfterDispatchImpl(
+inline void TraceAfterDispatchExternDerived::TraceAfterDispatchImpl(
     VisitorDispatcher visitor) {
-  visitor->trace(x_derived_);
-  TraceAfterDispatchExternBase::traceAfterDispatch(visitor);
+  visitor->Trace(x_derived_);
+  TraceAfterDispatchExternBase::TraceAfterDispatch(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl.h b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl.h
index fe25279..c5e3063 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl.h
+++ b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl.h
@@ -11,7 +11,7 @@
 
 class X : public GarbageCollected<X> {
  public:
-  void trace(Visitor*) {}
+  void Trace(Visitor*) {}
 };
 
 enum ClassTag {
@@ -23,21 +23,21 @@
  public:
   explicit TraceAfterDispatchInlinedBase(ClassTag tag) : tag_(tag) {}
 
-  void trace(Visitor* visitor) { traceImpl(visitor); }
-  void trace(InlinedGlobalMarkingVisitor visitor) { traceImpl(visitor); }
+  void Trace(Visitor* visitor) { TraceImpl(visitor); }
+  void Trace(InlinedGlobalMarkingVisitor visitor) { TraceImpl(visitor); }
 
-  void traceAfterDispatch(Visitor* visitor) { traceAfterDispatchImpl(visitor); }
-  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
-    traceAfterDispatchImpl(visitor);
+  void TraceAfterDispatch(Visitor* visitor) { TraceAfterDispatchImpl(visitor); }
+  void TraceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
+    TraceAfterDispatchImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor);
+  void TraceImpl(VisitorDispatcher visitor);
 
   template <typename VisitorDispatcher>
-  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
-    visitor->trace(x_base_);
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor) {
+    visitor->Trace(x_base_);
   }
 
   ClassTag tag_;
@@ -48,16 +48,16 @@
  public:
   TraceAfterDispatchInlinedDerived() : TraceAfterDispatchInlinedBase(DERIVED) {}
 
-  void traceAfterDispatch(Visitor* visitor) { traceAfterDispatchImpl(visitor); }
-  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
-    traceAfterDispatchImpl(visitor);
+  void TraceAfterDispatch(Visitor* visitor) { TraceAfterDispatchImpl(visitor); }
+  void TraceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
+    TraceAfterDispatchImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
-    visitor->trace(x_derived_);
-    TraceAfterDispatchInlinedBase::traceAfterDispatch(visitor);
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor) {
+    visitor->Trace(x_derived_);
+    TraceAfterDispatchInlinedBase::TraceAfterDispatch(visitor);
   }
 
   Member<X> x_derived_;
@@ -68,18 +68,18 @@
  public:
   explicit TraceAfterDispatchExternBase(ClassTag tag) : tag_(tag) {}
 
-  void trace(Visitor* visitor);
-  void trace(InlinedGlobalMarkingVisitor visitor);
+  void Trace(Visitor* visitor);
+  void Trace(InlinedGlobalMarkingVisitor visitor);
 
-  void traceAfterDispatch(Visitor* visitor);
-  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
+  void TraceAfterDispatch(Visitor* visitor);
+  void TraceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor);
+  void TraceImpl(VisitorDispatcher visitor);
 
   template <typename VisitorDispatcher>
-  void traceAfterDispatchImpl(VisitorDispatcher visitor);
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor);
 
   ClassTag tag_;
   Member<X> x_base_;
@@ -89,12 +89,12 @@
  public:
   TraceAfterDispatchExternDerived() : TraceAfterDispatchExternBase(DERIVED) {}
 
-  void traceAfterDispatch(Visitor* visitor);
-  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
+  void TraceAfterDispatch(Visitor* visitor);
+  void TraceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
 
  private:
   template <typename VisitorDispatcher>
-  void traceAfterDispatchImpl(VisitorDispatcher visitor);
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor);
 
   Member<X> x_derived_;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.cpp b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.cpp
index 23798f7..46553f3 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.cpp
+++ b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.cpp
@@ -7,7 +7,7 @@
 namespace blink {
 
 template <typename VisitorDispatcher>
-inline void TraceAfterDispatchInlinedBase::traceImpl(
+inline void TraceAfterDispatchInlinedBase::TraceImpl(
     VisitorDispatcher visitor) {
   // Implement a simple form of manual dispatching, because BlinkGCPlugin
   // checks if the tracing is dispatched to all derived classes.
@@ -16,58 +16,58 @@
   // definition of derived classes here.
   if (tag_ == DERIVED) {
     // Missing dispatch call:
-    // static_cast<TraceAfterDispatchInlinedDerived*>(this)->traceAfterDispatch(
+    // static_cast<TraceAfterDispatchInlinedDerived*>(this)->TraceAfterDispatch(
     //     visitor);
   } else {
-    traceAfterDispatch(visitor);
+    TraceAfterDispatch(visitor);
   }
 }
 
-void TraceAfterDispatchExternBase::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void TraceAfterDispatchExternBase::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
-void TraceAfterDispatchExternBase::trace(InlinedGlobalMarkingVisitor visitor) {
-  traceImpl(visitor);
+void TraceAfterDispatchExternBase::Trace(InlinedGlobalMarkingVisitor visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceAfterDispatchExternBase::traceImpl(VisitorDispatcher visitor) {
+inline void TraceAfterDispatchExternBase::TraceImpl(VisitorDispatcher visitor) {
   if (tag_ == DERIVED) {
     // Missing dispatch call:
-    // static_cast<TraceAfterDispatchExternDerived*>(this)->traceAfterDispatch(
+    // static_cast<TraceAfterDispatchExternDerived*>(this)->TraceAfterDispatch(
     //     visitor);
   } else {
-    traceAfterDispatch(visitor);
+    TraceAfterDispatch(visitor);
   }
 }
 
-void TraceAfterDispatchExternBase::traceAfterDispatch(Visitor* visitor) {
-  traceAfterDispatchImpl(visitor);
+void TraceAfterDispatchExternBase::TraceAfterDispatch(Visitor* visitor) {
+  TraceAfterDispatchImpl(visitor);
 }
 
-void TraceAfterDispatchExternBase::traceAfterDispatch(
+void TraceAfterDispatchExternBase::TraceAfterDispatch(
     InlinedGlobalMarkingVisitor visitor) {
-  traceAfterDispatchImpl(visitor);
+  TraceAfterDispatchImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceAfterDispatchExternBase::traceAfterDispatchImpl(
+inline void TraceAfterDispatchExternBase::TraceAfterDispatchImpl(
     VisitorDispatcher visitor) {
-  // No trace call.
+  // No Trace call.
 }
 
-void TraceAfterDispatchExternDerived::traceAfterDispatch(Visitor* visitor) {
-  traceAfterDispatchImpl(visitor);
+void TraceAfterDispatchExternDerived::TraceAfterDispatch(Visitor* visitor) {
+  TraceAfterDispatchImpl(visitor);
 }
 
-void TraceAfterDispatchExternDerived::traceAfterDispatch(
+void TraceAfterDispatchExternDerived::TraceAfterDispatch(
     InlinedGlobalMarkingVisitor visitor) {
-  traceAfterDispatchImpl(visitor);
+  TraceAfterDispatchImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceAfterDispatchExternDerived::traceAfterDispatchImpl(
+inline void TraceAfterDispatchExternDerived::TraceAfterDispatchImpl(
     VisitorDispatcher visitor) {
   // Ditto.
 }
diff --git a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.h b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.h
index b480e39..29f9a8a 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.h
+++ b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.h
@@ -11,7 +11,7 @@
 
 class X : public GarbageCollected<X> {
  public:
-  void trace(Visitor*) {}
+  void Trace(Visitor*) {}
 };
 
 enum ClassTag {
@@ -23,21 +23,21 @@
  public:
   explicit TraceAfterDispatchInlinedBase(ClassTag tag) : tag_(tag) {}
 
-  void trace(Visitor* visitor) { traceImpl(visitor); }
-  void trace(InlinedGlobalMarkingVisitor visitor) { traceImpl(visitor); }
+  void Trace(Visitor* visitor) { TraceImpl(visitor); }
+  void Trace(InlinedGlobalMarkingVisitor visitor) { TraceImpl(visitor); }
 
-  void traceAfterDispatch(Visitor* visitor) { traceAfterDispatchImpl(visitor); }
-  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
-    traceAfterDispatchImpl(visitor);
+  void TraceAfterDispatch(Visitor* visitor) { TraceAfterDispatchImpl(visitor); }
+  void TraceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
+    TraceAfterDispatchImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor);
+  void TraceImpl(VisitorDispatcher visitor);
 
   template <typename VisitorDispatcher>
-  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
-    // No trace call; should get a warning.
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor) {
+    // No Trace call; should get a warning.
   }
 
   ClassTag tag_;
@@ -48,15 +48,15 @@
  public:
   TraceAfterDispatchInlinedDerived() : TraceAfterDispatchInlinedBase(DERIVED) {}
 
-  void traceAfterDispatch(Visitor* visitor) { traceAfterDispatchImpl(visitor); }
-  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
-    traceAfterDispatchImpl(visitor);
+  void TraceAfterDispatch(Visitor* visitor) { TraceAfterDispatchImpl(visitor); }
+  void TraceAfterDispatch(InlinedGlobalMarkingVisitor visitor) {
+    TraceAfterDispatchImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
-    // No trace call (for member and base class).
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor) {
+    // No Trace call (for member and base class).
   }
 
   Member<X> x_derived_;
@@ -67,18 +67,18 @@
  public:
   explicit TraceAfterDispatchExternBase(ClassTag tag) : tag_(tag) {}
 
-  void trace(Visitor* visitor);
-  void trace(InlinedGlobalMarkingVisitor visitor);
+  void Trace(Visitor* visitor);
+  void Trace(InlinedGlobalMarkingVisitor visitor);
 
-  void traceAfterDispatch(Visitor* visitor);
-  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
+  void TraceAfterDispatch(Visitor* visitor);
+  void TraceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor);
+  void TraceImpl(VisitorDispatcher visitor);
 
   template <typename VisitorDispatcher>
-  void traceAfterDispatchImpl(VisitorDispatcher visitor);
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor);
 
   ClassTag tag_;
   Member<X> x_base_;
@@ -88,12 +88,12 @@
  public:
   TraceAfterDispatchExternDerived() : TraceAfterDispatchExternBase(DERIVED) {}
 
-  void traceAfterDispatch(Visitor* visitor);
-  void traceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
+  void TraceAfterDispatch(Visitor* visitor);
+  void TraceAfterDispatch(InlinedGlobalMarkingVisitor visitor);
 
  private:
   template <typename VisitorDispatcher>
-  void traceAfterDispatchImpl(VisitorDispatcher visitor);
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor);
 
   Member<X> x_derived_;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.txt b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.txt
index 058fccb..5637daa 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.txt
+++ b/tools/clang/blink_gc_plugin/tests/trace_after_dispatch_impl_error.txt
@@ -1,31 +1,31 @@
 trace_after_dispatch_impl_error.cpp:10:1: warning: [blink-gc] Missing dispatch to class 'TraceAfterDispatchInlinedDerived' in manual trace dispatch.
-inline void TraceAfterDispatchInlinedBase::traceImpl(
+inline void TraceAfterDispatchInlinedBase::TraceImpl(
 ^
 trace_after_dispatch_impl_error.cpp:35:1: warning: [blink-gc] Missing dispatch to class 'TraceAfterDispatchExternDerived' in manual trace dispatch.
-inline void TraceAfterDispatchExternBase::traceImpl(VisitorDispatcher visitor) {
+inline void TraceAfterDispatchExternBase::TraceImpl(VisitorDispatcher visitor) {
 ^
 In file included from trace_after_dispatch_impl_error.cpp:5:
 ./trace_after_dispatch_impl_error.h:39:3: warning: [blink-gc] Class 'TraceAfterDispatchInlinedBase' has untraced fields that require tracing.
-  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor) {
   ^
 ./trace_after_dispatch_impl_error.h:44:3: note: [blink-gc] Untraced field 'x_base_' declared here:
   Member<X> x_base_;
   ^
 ./trace_after_dispatch_impl_error.h:58:3: warning: [blink-gc] Base class 'TraceAfterDispatchInlinedBase' of derived class 'TraceAfterDispatchInlinedDerived' requires tracing.
-  void traceAfterDispatchImpl(VisitorDispatcher visitor) {
+  void TraceAfterDispatchImpl(VisitorDispatcher visitor) {
   ^
 ./trace_after_dispatch_impl_error.h:58:3: warning: [blink-gc] Class 'TraceAfterDispatchInlinedDerived' has untraced fields that require tracing.
 ./trace_after_dispatch_impl_error.h:62:3: note: [blink-gc] Untraced field 'x_derived_' declared here:
   Member<X> x_derived_;
   ^
 trace_after_dispatch_impl_error.cpp:55:1: warning: [blink-gc] Class 'TraceAfterDispatchExternBase' has untraced fields that require tracing.
-inline void TraceAfterDispatchExternBase::traceAfterDispatchImpl(
+inline void TraceAfterDispatchExternBase::TraceAfterDispatchImpl(
 ^
 ./trace_after_dispatch_impl_error.h:84:3: note: [blink-gc] Untraced field 'x_base_' declared here:
   Member<X> x_base_;
   ^
 trace_after_dispatch_impl_error.cpp:70:1: warning: [blink-gc] Base class 'TraceAfterDispatchExternBase' of derived class 'TraceAfterDispatchExternDerived' requires tracing.
-inline void TraceAfterDispatchExternDerived::traceAfterDispatchImpl(
+inline void TraceAfterDispatchExternDerived::TraceAfterDispatchImpl(
 ^
 trace_after_dispatch_impl_error.cpp:70:1: warning: [blink-gc] Class 'TraceAfterDispatchExternDerived' has untraced fields that require tracing.
 ./trace_after_dispatch_impl_error.h:98:3: note: [blink-gc] Untraced field 'x_derived_' declared here:
diff --git a/tools/clang/blink_gc_plugin/tests/trace_collections.cpp b/tools/clang/blink_gc_plugin/tests/trace_collections.cpp
index 9ba7c96..c4e7c9c 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_collections.cpp
+++ b/tools/clang/blink_gc_plugin/tests/trace_collections.cpp
@@ -6,7 +6,7 @@
 
 namespace blink {
 
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 {
 }
 
diff --git a/tools/clang/blink_gc_plugin/tests/trace_collections.h b/tools/clang/blink_gc_plugin/tests/trace_collections.h
index 219b056..208823a 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_collections.h
+++ b/tools/clang/blink_gc_plugin/tests/trace_collections.h
@@ -11,7 +11,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
 private:
     HeapVector<Member<HeapObject> > m_heapVector;
     Vector<Member<HeapObject>, 0, HeapAllocator> m_wtfVector;
diff --git a/tools/clang/blink_gc_plugin/tests/trace_collections.txt b/tools/clang/blink_gc_plugin/tests/trace_collections.txt
index 7c20ad4..1faecb2 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_collections.txt
+++ b/tools/clang/blink_gc_plugin/tests/trace_collections.txt
@@ -1,5 +1,5 @@
 trace_collections.cpp:9:1: warning: [blink-gc] Class 'HeapObject' has untraced fields that require tracing.
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 ^
 ./trace_collections.h:16:5: note: [blink-gc] Untraced field 'm_heapVector' declared here:
     HeapVector<Member<HeapObject> > m_heapVector;
diff --git a/tools/clang/blink_gc_plugin/tests/trace_if_needed.cpp b/tools/clang/blink_gc_plugin/tests/trace_if_needed.cpp
index 563c6cc..0787f6d 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_if_needed.cpp
+++ b/tools/clang/blink_gc_plugin/tests/trace_if_needed.cpp
@@ -7,10 +7,10 @@
 namespace blink {
 
 template<typename T>
-void TemplatedObject<T>::trace(Visitor* visitor)
+void TemplatedObject<T>::Trace(Visitor* visitor)
 {
-    TraceIfNeeded<T>::trace(visitor, &m_one);
-    // Missing trace of m_two
+    TraceIfNeeded<T>::Trace(visitor, &m_one);
+    // Missing Trace of m_two
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/trace_if_needed.h b/tools/clang/blink_gc_plugin/tests/trace_if_needed.h
index 00b8f22..9cc1089 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_if_needed.h
+++ b/tools/clang/blink_gc_plugin/tests/trace_if_needed.h
@@ -14,7 +14,7 @@
 template<typename T>
 class TemplatedObject : public GarbageCollected<TemplatedObject<T> > {
 public:
-    virtual void trace(Visitor*);
+    virtual void Trace(Visitor*);
 private:
     T m_one;
     T m_two;
diff --git a/tools/clang/blink_gc_plugin/tests/trace_templated_super.cpp b/tools/clang/blink_gc_plugin/tests/trace_templated_super.cpp
index 2b59034..a3de020 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_templated_super.cpp
+++ b/tools/clang/blink_gc_plugin/tests/trace_templated_super.cpp
@@ -13,24 +13,24 @@
 }
 
 template<typename T>
-void Super<T>::trace(Visitor* visitor)
+void Super<T>::Trace(Visitor* visitor)
 {
-    visitor->registerWeakMembers<Super<T>, &Super<T>::clearWeakMembers>(this);
-    visitor->trace(m_obj);
-    Mixin::trace(visitor);
+    visitor->RegisterWeakMembers<Super<T>, &Super<T>::clearWeakMembers>(this);
+    visitor->Trace(m_obj);
+    Mixin::Trace(visitor);
 }
 
 template<typename T>
-void Sub<T>::trace(Visitor* visitor)
+void Sub<T>::Trace(Visitor* visitor)
 {
-    // Missing trace of m_obj.
-    Super<T>::trace(visitor);
+    // Missing Trace of m_obj.
+    Super<T>::Trace(visitor);
 }
 
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 {
-    visitor->trace(m_obj);
-    Sub<HeapObject>::trace(visitor);
+    visitor->Trace(m_obj);
+    Sub<HeapObject>::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/trace_templated_super.h b/tools/clang/blink_gc_plugin/tests/trace_templated_super.h
index de8fd7b..a0c9031 100644
--- a/tools/clang/blink_gc_plugin/tests/trace_templated_super.h
+++ b/tools/clang/blink_gc_plugin/tests/trace_templated_super.h
@@ -13,14 +13,14 @@
 
 class Mixin : public GarbageCollectedMixin {
 public:
-    virtual void trace(Visitor*) override { }
+    virtual void Trace(Visitor*) override { }
 };
 
 template<typename T>
 class Super : public GarbageCollected<Super<T> >, public Mixin {
     USING_GARBAGE_COLLECTED_MIXIN(Super);
 public:
-    virtual void trace(Visitor*) override;
+    virtual void Trace(Visitor*) override;
     void clearWeakMembers(Visitor*);
 private:
     Member<HeapObject> m_obj;
@@ -30,14 +30,14 @@
 template<typename T>
 class Sub : public Super<T> {
 public:
-    virtual void trace(Visitor* visitor) override;
+    virtual void Trace(Visitor* visitor) override;
 private:
     Member<HeapObject> m_obj;
 };
 
 class HeapObject : public Sub<HeapObject> {
 public:
-    virtual void trace(Visitor*) override;
+    virtual void Trace(Visitor*) override;
 private:
     Member<HeapObject> m_obj;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl.cpp b/tools/clang/blink_gc_plugin/tests/traceimpl.cpp
index c8849cc..fe28a0b 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl.cpp
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl.cpp
@@ -6,23 +6,23 @@
 
 namespace blink {
 
-void TraceImplExtern::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void TraceImplExtern::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceImplExtern::traceImpl(VisitorDispatcher visitor) {
-  visitor->trace(x_);
+inline void TraceImplExtern::TraceImpl(VisitorDispatcher visitor) {
+  visitor->Trace(x_);
 }
 
-void TraceImplBaseExtern::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void TraceImplBaseExtern::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceImplBaseExtern::traceImpl(VisitorDispatcher visitor) {
-  visitor->trace(x_);
-  Base::trace(visitor);
+inline void TraceImplBaseExtern::TraceImpl(VisitorDispatcher visitor) {
+  visitor->Trace(x_);
+  Base::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl.h b/tools/clang/blink_gc_plugin/tests/traceimpl.h
index 64fae26..8cb51b1 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl.h
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl.h
@@ -11,16 +11,16 @@
 
 class X : public GarbageCollected<X> {
  public:
-  virtual void trace(Visitor*) {}
+  virtual void Trace(Visitor*) {}
 };
 
 class TraceImplInlined : public GarbageCollected<TraceImplInlined> {
  public:
-  void trace(Visitor* visitor) { traceImpl(visitor); }
+  void Trace(Visitor* visitor) { TraceImpl(visitor); }
 
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    visitor->trace(x_);
+  void TraceImpl(VisitorDispatcher visitor) {
+    visitor->Trace(x_);
   }
 
  private:
@@ -29,9 +29,9 @@
 
 class TraceImplExtern : public GarbageCollected<TraceImplExtern> {
  public:
-  void trace(Visitor* visitor);
+  void Trace(Visitor* visitor);
   template <typename VisitorDispatcher>
-  inline void traceImpl(VisitorDispatcher);
+  inline void TraceImpl(VisitorDispatcher);
 
  private:
   Member<X> x_;
@@ -39,25 +39,25 @@
 
 class Base : public GarbageCollected<Base> {
  public:
-  virtual void trace(Visitor* visitor) {}
+  virtual void Trace(Visitor* visitor) {}
 };
 
 class TraceImplBaseInlined : public Base {
  public:
-  void trace(Visitor* visitor) override { traceImpl(visitor); }
+  void Trace(Visitor* visitor) override { TraceImpl(visitor); }
 
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    Base::trace(visitor);
+  void TraceImpl(VisitorDispatcher visitor) {
+    Base::Trace(visitor);
   }
 };
 
 class TraceImplBaseExtern : public Base {
  public:
-  void trace(Visitor* visitor) override;
+  void Trace(Visitor* visitor) override;
 
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher);
+  void TraceImpl(VisitorDispatcher);
 
  private:
   Member<X> x_;
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_dependent_scope.h b/tools/clang/blink_gc_plugin/tests/traceimpl_dependent_scope.h
index 0d079f6..1c54627 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_dependent_scope.h
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_dependent_scope.h
@@ -11,49 +11,49 @@
 
 class X : public GarbageCollected<X> {
  public:
-  virtual void trace(Visitor*) {}
+  virtual void Trace(Visitor*) {}
 };
 
 template <typename T>
 class Base : public GarbageCollected<Base<T> > {
  public:
-  virtual void trace(Visitor* visitor) { traceImpl(visitor); }
-  virtual void trace(InlinedGlobalMarkingVisitor visitor) {
-    traceImpl(visitor);
+  virtual void Trace(Visitor* visitor) { TraceImpl(visitor); }
+  virtual void Trace(InlinedGlobalMarkingVisitor visitor) {
+    TraceImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {}
+  void TraceImpl(VisitorDispatcher visitor) {}
 };
 
 template <typename T>
 class Derived : public Base<T> {
  public:
-  void trace(Visitor* visitor) override { traceImpl(visitor); }
-  void trace(InlinedGlobalMarkingVisitor visitor) override {
-    traceImpl(visitor);
+  void Trace(Visitor* visitor) override { TraceImpl(visitor); }
+  void Trace(InlinedGlobalMarkingVisitor visitor) override {
+    TraceImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    Base<T>::trace(visitor);
+  void TraceImpl(VisitorDispatcher visitor) {
+    Base<T>::Trace(visitor);
   }
 };
 
 template <typename T>
 class DerivedMissingTrace : public Base<T> {
  public:
-  void trace(Visitor* visitor) override { traceImpl(visitor); }
-  void trace(InlinedGlobalMarkingVisitor visitor) override {
-    traceImpl(visitor);
+  void Trace(Visitor* visitor) override { TraceImpl(visitor); }
+  void Trace(InlinedGlobalMarkingVisitor visitor) override {
+    TraceImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    // Missing Base<T>::trace(visitor).
+  void TraceImpl(VisitorDispatcher visitor) {
+    // Missing Base<T>::Trace(visitor).
   }
 };
 
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_dependent_scope.txt b/tools/clang/blink_gc_plugin/tests/traceimpl_dependent_scope.txt
index e1aab33..b4779a0 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_dependent_scope.txt
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_dependent_scope.txt
@@ -1,5 +1,5 @@
 In file included from traceimpl_dependent_scope.cpp:5:
 ./traceimpl_dependent_scope.h:55:3: warning: [blink-gc] Base class 'Base<int>' of derived class 'DerivedMissingTrace<int>' requires tracing.
-  void traceImpl(VisitorDispatcher visitor) {
+  void TraceImpl(VisitorDispatcher visitor) {
   ^
 1 warning generated.
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_derived_from_templated_base.h b/tools/clang/blink_gc_plugin/tests/traceimpl_derived_from_templated_base.h
index 21b9978..e5ebdd5 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_derived_from_templated_base.h
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_derived_from_templated_base.h
@@ -11,18 +11,18 @@
 
 class X : public GarbageCollected<X> {
  public:
-  virtual void trace(Visitor*) {}
+  virtual void Trace(Visitor*) {}
 };
 
 template <int Y>
 class TraceImplTemplatedBase
     : public GarbageCollected<TraceImplTemplatedBase<Y> > {
  public:
-  void trace(Visitor* visitor) { traceImpl(visitor); }
+  void Trace(Visitor* visitor) { TraceImpl(visitor); }
 
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    visitor->trace(x_);
+  void TraceImpl(VisitorDispatcher visitor) {
+    visitor->Trace(x_);
   }
 
  private:
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_error.cpp b/tools/clang/blink_gc_plugin/tests/traceimpl_error.cpp
index 041c565..c14e52d 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_error.cpp
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_error.cpp
@@ -6,22 +6,22 @@
 
 namespace blink {
 
-void TraceImplExternWithUntracedMember::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void TraceImplExternWithUntracedMember::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceImplExternWithUntracedMember::traceImpl(
+inline void TraceImplExternWithUntracedMember::TraceImpl(
     VisitorDispatcher visitor) {
   // Should get a warning as well.
 }
 
-void TraceImplExternWithUntracedBase::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void TraceImplExternWithUntracedBase::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void TraceImplExternWithUntracedBase::traceImpl(
+inline void TraceImplExternWithUntracedBase::TraceImpl(
     VisitorDispatcher visitor) {
   // Ditto.
 }
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_error.h b/tools/clang/blink_gc_plugin/tests/traceimpl_error.h
index 5a883b4..c7254d4 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_error.h
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_error.h
@@ -11,16 +11,16 @@
 
 class X : public GarbageCollected<X> {
  public:
-  virtual void trace(Visitor*) {}
+  virtual void Trace(Visitor*) {}
 };
 
 class TraceImplInlinedWithUntracedMember
     : public GarbageCollected<TraceImplInlinedWithUntracedMember> {
  public:
-  void trace(Visitor* visitor) { traceImpl(visitor); }
+  void Trace(Visitor* visitor) { TraceImpl(visitor); }
 
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
+  void TraceImpl(VisitorDispatcher visitor) {
     // Empty; should get complaints from the plugin for untraced x_.
   }
 
@@ -31,10 +31,10 @@
 class TraceImplExternWithUntracedMember
     : public GarbageCollected<TraceImplExternWithUntracedMember> {
  public:
-  void trace(Visitor* visitor);
+  void Trace(Visitor* visitor);
 
   template <typename VisitorDispatcher>
-  inline void traceImpl(VisitorDispatcher);
+  inline void TraceImpl(VisitorDispatcher);
 
  private:
   Member<X> x_;
@@ -42,25 +42,25 @@
 
 class Base : public GarbageCollected<Base> {
  public:
-  virtual void trace(Visitor*) {}
+  virtual void Trace(Visitor*) {}
 };
 
 class TraceImplInlineWithUntracedBase : public Base {
  public:
-  void trace(Visitor* visitor) override { traceImpl(visitor); }
+  void Trace(Visitor* visitor) override { TraceImpl(visitor); }
 
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
+  void TraceImpl(VisitorDispatcher visitor) {
     // Empty; should get complaints from the plugin for untraced Base.
   }
 };
 
 class TraceImplExternWithUntracedBase : public Base {
  public:
-  void trace(Visitor*) override;
+  void Trace(Visitor*) override;
 
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor);
+  void TraceImpl(VisitorDispatcher visitor);
 };
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_error.txt b/tools/clang/blink_gc_plugin/tests/traceimpl_error.txt
index 070b029..ec976a0 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_error.txt
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_error.txt
@@ -1,20 +1,20 @@
 In file included from traceimpl_error.cpp:5:
 ./traceimpl_error.h:23:3: warning: [blink-gc] Class 'TraceImplInlinedWithUntracedMember' has untraced fields that require tracing.
-  void traceImpl(VisitorDispatcher visitor) {
+  void TraceImpl(VisitorDispatcher visitor) {
   ^
 ./traceimpl_error.h:28:3: note: [blink-gc] Untraced field 'x_' declared here:
   Member<X> x_;
   ^
 ./traceimpl_error.h:53:3: warning: [blink-gc] Base class 'Base' of derived class 'TraceImplInlineWithUntracedBase' requires tracing.
-  void traceImpl(VisitorDispatcher visitor) {
+  void TraceImpl(VisitorDispatcher visitor) {
   ^
 traceimpl_error.cpp:14:1: warning: [blink-gc] Class 'TraceImplExternWithUntracedMember' has untraced fields that require tracing.
-inline void TraceImplExternWithUntracedMember::traceImpl(
+inline void TraceImplExternWithUntracedMember::TraceImpl(
 ^
 ./traceimpl_error.h:40:3: note: [blink-gc] Untraced field 'x_' declared here:
   Member<X> x_;
   ^
 traceimpl_error.cpp:24:1: warning: [blink-gc] Base class 'Base' of derived class 'TraceImplExternWithUntracedBase' requires tracing.
-inline void TraceImplExternWithUntracedBase::traceImpl(
+inline void TraceImplExternWithUntracedBase::TraceImpl(
 ^
 4 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_omitted_trace.h b/tools/clang/blink_gc_plugin/tests/traceimpl_omitted_trace.h
index 3c5e955..7a171be 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_omitted_trace.h
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_omitted_trace.h
@@ -11,34 +11,34 @@
 
 class A : public GarbageCollected<A> {
  public:
-  virtual void trace(Visitor* visitor) { traceImpl(visitor); }
-  virtual void trace(InlinedGlobalMarkingVisitor visitor) {
-    traceImpl(visitor);
+  virtual void Trace(Visitor* visitor) { TraceImpl(visitor); }
+  virtual void Trace(InlinedGlobalMarkingVisitor visitor) {
+    TraceImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {}
+  void TraceImpl(VisitorDispatcher visitor) {}
 };
 
 class B : public A {
-  // trace() isn't necessary because we've got nothing to trace here.
+  // Trace() isn't necessary because we've got nothing to trace here.
 };
 
 class C : public B {
  public:
-  void trace(Visitor* visitor) override { traceImpl(visitor); }
-  void trace(InlinedGlobalMarkingVisitor visitor) override {
-    traceImpl(visitor);
+  void Trace(Visitor* visitor) override { TraceImpl(visitor); }
+  void Trace(InlinedGlobalMarkingVisitor visitor) override {
+    TraceImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    // B::trace() is actually A::trace(), and in certain cases we only get
+  void TraceImpl(VisitorDispatcher visitor) {
+    // B::Trace() is actually A::Trace(), and in certain cases we only get
     // limited information like "there is a function call that will be resolved
-    // to A::trace()". We still want to mark B as traced.
-    B::trace(visitor);
+    // to A::Trace()". We still want to mark B as Traced.
+    B::Trace(visitor);
   }
 };
 
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded.cpp b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded.cpp
index 02d4858..e5a2fee 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded.cpp
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded.cpp
@@ -6,31 +6,31 @@
 
 namespace blink {
 
-void ExternBase::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void ExternBase::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
-void ExternBase::trace(InlinedGlobalMarkingVisitor visitor) {
-  traceImpl(visitor);
+void ExternBase::Trace(InlinedGlobalMarkingVisitor visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void ExternBase::traceImpl(VisitorDispatcher visitor) {
-  visitor->trace(x_base_);
+inline void ExternBase::TraceImpl(VisitorDispatcher visitor) {
+  visitor->Trace(x_base_);
 }
 
-void ExternDerived::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void ExternDerived::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
-void ExternDerived::trace(InlinedGlobalMarkingVisitor visitor) {
-  traceImpl(visitor);
+void ExternDerived::Trace(InlinedGlobalMarkingVisitor visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void ExternDerived::traceImpl(VisitorDispatcher visitor) {
-  visitor->trace(x_derived_);
-  ExternBase::trace(visitor);
+inline void ExternDerived::TraceImpl(VisitorDispatcher visitor) {
+  visitor->Trace(x_derived_);
+  ExternBase::Trace(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded.h b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded.h
index 808821d..63ba65a 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded.h
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded.h
@@ -11,36 +11,36 @@
 
 class X : public GarbageCollected<X> {
  public:
-  void trace(Visitor*) {}
-  void trace(InlinedGlobalMarkingVisitor) {}
+  void Trace(Visitor*) {}
+  void Trace(InlinedGlobalMarkingVisitor) {}
 };
 
 class InlinedBase : public GarbageCollected<InlinedBase> {
  public:
-  virtual void trace(Visitor* visitor) { traceImpl(visitor); }
-  virtual void trace(InlinedGlobalMarkingVisitor visitor) {
-    traceImpl(visitor);
+  virtual void Trace(Visitor* visitor) { TraceImpl(visitor); }
+  virtual void Trace(InlinedGlobalMarkingVisitor visitor) {
+    TraceImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) { visitor->trace(x_base_); }
+  void TraceImpl(VisitorDispatcher visitor) { visitor->Trace(x_base_); }
 
   Member<X> x_base_;
 };
 
 class InlinedDerived : public InlinedBase {
  public:
-  void trace(Visitor* visitor) override { traceImpl(visitor); }
-  void trace(InlinedGlobalMarkingVisitor visitor) override {
-    traceImpl(visitor);
+  void Trace(Visitor* visitor) override { TraceImpl(visitor); }
+  void Trace(InlinedGlobalMarkingVisitor visitor) override {
+    TraceImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    visitor->trace(x_derived_);
-    InlinedBase::trace(visitor);
+  void TraceImpl(VisitorDispatcher visitor) {
+    visitor->Trace(x_derived_);
+    InlinedBase::Trace(visitor);
   }
 
   Member<X> x_derived_;
@@ -48,24 +48,24 @@
 
 class ExternBase : public GarbageCollected<ExternBase> {
  public:
-  virtual void trace(Visitor*);
-  virtual void trace(InlinedGlobalMarkingVisitor);
+  virtual void Trace(Visitor*);
+  virtual void Trace(InlinedGlobalMarkingVisitor);
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher);
+  void TraceImpl(VisitorDispatcher);
 
   Member<X> x_base_;
 };
 
 class ExternDerived : public ExternBase {
  public:
-  void trace(Visitor*) override;
-  void trace(InlinedGlobalMarkingVisitor) override;
+  void Trace(Visitor*) override;
+  void Trace(InlinedGlobalMarkingVisitor) override;
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher);
+  void TraceImpl(VisitorDispatcher);
 
   Member<X> x_derived_;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.cpp b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.cpp
index 07cab63..80d0f65 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.cpp
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.cpp
@@ -6,30 +6,30 @@
 
 namespace blink {
 
-void ExternBase::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void ExternBase::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
-void ExternBase::trace(InlinedGlobalMarkingVisitor visitor) {
-  traceImpl(visitor);
+void ExternBase::Trace(InlinedGlobalMarkingVisitor visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void ExternBase::traceImpl(VisitorDispatcher visitor) {
-  // Missing visitor->trace(x_base_).
+inline void ExternBase::TraceImpl(VisitorDispatcher visitor) {
+  // Missing visitor->Trace(x_base_).
 }
 
-void ExternDerived::trace(Visitor* visitor) {
-  traceImpl(visitor);
+void ExternDerived::Trace(Visitor* visitor) {
+  TraceImpl(visitor);
 }
 
-void ExternDerived::trace(InlinedGlobalMarkingVisitor visitor) {
-  traceImpl(visitor);
+void ExternDerived::Trace(InlinedGlobalMarkingVisitor visitor) {
+  TraceImpl(visitor);
 }
 
 template <typename VisitorDispatcher>
-inline void ExternDerived::traceImpl(VisitorDispatcher visitor) {
-  // Missing visitor->trace(x_derived_) and ExternBase::trace(visitor).
+inline void ExternDerived::TraceImpl(VisitorDispatcher visitor) {
+  // Missing visitor->Trace(x_derived_) and ExternBase::Trace(visitor).
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.h b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.h
index 7d7a038..be587de 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.h
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.h
@@ -11,21 +11,21 @@
 
 class X : public GarbageCollected<X> {
  public:
-  void trace(Visitor*) {}
-  void trace(InlinedGlobalMarkingVisitor) {}
+  void Trace(Visitor*) {}
+  void Trace(InlinedGlobalMarkingVisitor) {}
 };
 
 class InlinedBase : public GarbageCollected<InlinedBase> {
  public:
-  virtual void trace(Visitor* visitor) { traceImpl(visitor); }
-  virtual void trace(InlinedGlobalMarkingVisitor visitor) {
-    traceImpl(visitor);
+  virtual void Trace(Visitor* visitor) { TraceImpl(visitor); }
+  virtual void Trace(InlinedGlobalMarkingVisitor visitor) {
+    TraceImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    // Missing visitor->trace(x_base_).
+  void TraceImpl(VisitorDispatcher visitor) {
+    // Missing visitor->Trace(x_base_).
   }
 
   Member<X> x_base_;
@@ -33,15 +33,15 @@
 
 class InlinedDerived : public InlinedBase {
  public:
-  void trace(Visitor* visitor) override { traceImpl(visitor); }
-  void trace(InlinedGlobalMarkingVisitor visitor) override {
-    traceImpl(visitor);
+  void Trace(Visitor* visitor) override { TraceImpl(visitor); }
+  void Trace(InlinedGlobalMarkingVisitor visitor) override {
+    TraceImpl(visitor);
   }
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher visitor) {
-    // Missing visitor->trace(x_derived_) and InlinedBase::trace(visitor).
+  void TraceImpl(VisitorDispatcher visitor) {
+    // Missing visitor->Trace(x_derived_) and InlinedBase::Trace(visitor).
   }
 
   Member<X> x_derived_;
@@ -49,24 +49,24 @@
 
 class ExternBase : public GarbageCollected<ExternBase> {
  public:
-  virtual void trace(Visitor*);
-  virtual void trace(InlinedGlobalMarkingVisitor);
+  virtual void Trace(Visitor*);
+  virtual void Trace(InlinedGlobalMarkingVisitor);
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher);
+  void TraceImpl(VisitorDispatcher);
 
   Member<X> x_base_;
 };
 
 class ExternDerived : public ExternBase {
  public:
-  void trace(Visitor*) override;
-  void trace(InlinedGlobalMarkingVisitor) override;
+  void Trace(Visitor*) override;
+  void Trace(InlinedGlobalMarkingVisitor) override;
 
  private:
   template <typename VisitorDispatcher>
-  void traceImpl(VisitorDispatcher);
+  void TraceImpl(VisitorDispatcher);
 
   Member<X> x_derived_;
 };
diff --git a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.txt b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.txt
index 644f9f0..b603a08 100644
--- a/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.txt
+++ b/tools/clang/blink_gc_plugin/tests/traceimpl_overloaded_error.txt
@@ -1,25 +1,25 @@
 In file included from traceimpl_overloaded_error.cpp:5:
 ./traceimpl_overloaded_error.h:27:3: warning: [blink-gc] Class 'InlinedBase' has untraced fields that require tracing.
-  void traceImpl(VisitorDispatcher visitor) {
+  void TraceImpl(VisitorDispatcher visitor) {
   ^
 ./traceimpl_overloaded_error.h:31:3: note: [blink-gc] Untraced field 'x_base_' declared here:
   Member<X> x_base_;
   ^
 ./traceimpl_overloaded_error.h:43:3: warning: [blink-gc] Base class 'InlinedBase' of derived class 'InlinedDerived' requires tracing.
-  void traceImpl(VisitorDispatcher visitor) {
+  void TraceImpl(VisitorDispatcher visitor) {
   ^
 ./traceimpl_overloaded_error.h:43:3: warning: [blink-gc] Class 'InlinedDerived' has untraced fields that require tracing.
 ./traceimpl_overloaded_error.h:47:3: note: [blink-gc] Untraced field 'x_derived_' declared here:
   Member<X> x_derived_;
   ^
 traceimpl_overloaded_error.cpp:18:1: warning: [blink-gc] Class 'ExternBase' has untraced fields that require tracing.
-inline void ExternBase::traceImpl(VisitorDispatcher visitor) {
+inline void ExternBase::TraceImpl(VisitorDispatcher visitor) {
 ^
 ./traceimpl_overloaded_error.h:59:3: note: [blink-gc] Untraced field 'x_base_' declared here:
   Member<X> x_base_;
   ^
 traceimpl_overloaded_error.cpp:31:1: warning: [blink-gc] Base class 'ExternBase' of derived class 'ExternDerived' requires tracing.
-inline void ExternDerived::traceImpl(VisitorDispatcher visitor) {
+inline void ExternDerived::TraceImpl(VisitorDispatcher visitor) {
 ^
 traceimpl_overloaded_error.cpp:31:1: warning: [blink-gc] Class 'ExternDerived' has untraced fields that require tracing.
 ./traceimpl_overloaded_error.h:71:3: note: [blink-gc] Untraced field 'x_derived_' declared here:
diff --git a/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.cpp b/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.cpp
index 2ba6f1e..b284ddf 100644
--- a/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.cpp
+++ b/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.cpp
@@ -8,23 +8,23 @@
 
 static B* toB(A* a) { return static_cast<B*>(a); }
 
-void A::trace(Visitor* visitor)
+void A::Trace(Visitor* visitor)
 {
     switch (m_type) {
     case TB:
-        toB(this)->traceAfterDispatch(visitor);
+        toB(this)->TraceAfterDispatch(visitor);
         break;
     }
 }
 
-void A::traceAfterDispatch(Visitor* visitor)
+void A::TraceAfterDispatch(Visitor* visitor)
 {
 }
 
-void B::traceAfterDispatch(Visitor* visitor)
+void B::TraceAfterDispatch(Visitor* visitor)
 {
-    visitor->trace(m_a);
-    A::traceAfterDispatch(visitor);
+    visitor->Trace(m_a);
+    A::TraceAfterDispatch(visitor);
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.h b/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.h
index 5048349..c6a7c95 100644
--- a/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.h
+++ b/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.h
@@ -11,8 +11,8 @@
 
 class A : public GarbageCollected<A> {
 public:
-    void trace(Visitor*);
-    void traceAfterDispatch(Visitor*);
+    void Trace(Visitor*);
+    void TraceAfterDispatch(Visitor*);
 protected:
     enum Type { TB };
     A(Type type) : m_type(type) { }
@@ -23,7 +23,7 @@
 class B : public A {
 public:
     B() : A(TB) { }
-    void traceAfterDispatch(Visitor*);
+    void TraceAfterDispatch(Visitor*);
     virtual void foo() { }
 private:
     Member<A> m_a;
diff --git a/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.txt b/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.txt
index fb46696..7a54b97 100644
--- a/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.txt
+++ b/tools/clang/blink_gc_plugin/tests/virtual_and_trace_after_dispatch.txt
@@ -5,7 +5,7 @@
 ./virtual_and_trace_after_dispatch.h:23:1: warning: [blink-gc] Class 'B' contains or inherits virtual methods but implements manual dispatching.
 class B : public A {
 ^
-./virtual_and_trace_after_dispatch.h:14:5: note: [blink-gc] Manual dispatch 'trace' declared here:
-    void trace(Visitor*);
+./virtual_and_trace_after_dispatch.h:14:5: note: [blink-gc] Manual dispatch 'Trace' declared here:
+    void Trace(Visitor*);
     ^
 2 warnings generated.
diff --git a/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.cpp b/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.cpp
index 382e9f9..db9d535 100644
--- a/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.cpp
+++ b/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.cpp
@@ -6,23 +6,23 @@
 
 namespace blink {
 
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 {
-    // Missing visitor->trace(m_obj1);
-    // Missing visitor->trace(m_obj2);
-    // visitor->trace(m_obj3) in callback.
-    // Missing visitor->trace(m_set1);
-    visitor->trace(m_set2);
-    visitor->registerWeakMembers<HeapObject,
+    // Missing visitor->Trace(m_obj1);
+    // Missing visitor->Trace(m_obj2);
+    // visitor->Trace(m_obj3) in callback.
+    // Missing visitor->Trace(m_set1);
+    visitor->Trace(m_set2);
+    visitor->RegisterWeakMembers<HeapObject,
                                  &HeapObject::clearWeakMembers>(this);
 }
 
 void HeapObject::clearWeakMembers(Visitor* visitor)
 {
-    visitor->trace(m_obj1);  // Does not count.
-    // Missing visitor->trace(m_obj2);
-    visitor->trace(m_obj3);  // OK.
-    visitor->trace(m_set1);  // Does not count.
+    visitor->Trace(m_obj1);  // Does not count.
+    // Missing visitor->Trace(m_obj2);
+    visitor->Trace(m_obj3);  // OK.
+    visitor->Trace(m_set1);  // Does not count.
 }
 
 }
diff --git a/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.h b/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.h
index c6850e6..f7a2e62 100644
--- a/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.h
+++ b/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.h
@@ -11,7 +11,7 @@
 
 class HeapObject : public GarbageCollected<HeapObject> {
 public:
-    void trace(Visitor*);
+    void Trace(Visitor*);
     void clearWeakMembers(Visitor*);
 private:
     Member<HeapObject> m_obj1;
diff --git a/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.txt b/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.txt
index 02f56a3..8cb90be 100644
--- a/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.txt
+++ b/tools/clang/blink_gc_plugin/tests/weak_fields_require_tracing.txt
@@ -1,5 +1,5 @@
 weak_fields_require_tracing.cpp:9:1: warning: [blink-gc] Class 'HeapObject' has untraced fields that require tracing.
-void HeapObject::trace(Visitor* visitor)
+void HeapObject::Trace(Visitor* visitor)
 ^
 ./weak_fields_require_tracing.h:17:5: note: [blink-gc] Untraced field 'm_obj1' declared here:
     Member<HeapObject> m_obj1;
diff --git a/tools/clang/empty_string/EmptyStringConverter.cpp b/tools/clang/empty_string/EmptyStringConverter.cpp
index fce692f..c2da652 100644
--- a/tools/clang/empty_string/EmptyStringConverter.cpp
+++ b/tools/clang/empty_string/EmptyStringConverter.cpp
@@ -94,11 +94,17 @@
                            &constructor_callback_);
   match_finder->addMatcher(cxxNewExpr(has(constructor_call)),
                            &constructor_callback_);
-  match_finder->addMatcher(cxxBindTemporaryExpr(has(constructor_call)),
-                           &temporary_callback_);
+  // The implicitly generated constructor for temporary could be wrapped by
+  // implicitCastExpr, so ignoringParenImpCasts is needed.
   match_finder->addMatcher(
-      cxxConstructorDecl(forEach(expr(has(constructor_call)))),
-      &initializer_callback_);
+      cxxBindTemporaryExpr(ignoringParenImpCasts(forEach(constructor_call))),
+      &temporary_callback_);
+  // Note that forEachConstructorInitializer is needed. The std::string
+  // constructor is wrapped by exprWithCleanups and cxxCtorInitializer.
+  // forEach() would not work.
+  match_finder->addMatcher(cxxConstructorDecl(forEachConstructorInitializer(
+                               withInitializer(expr(has(constructor_call))))),
+                           &initializer_callback_);
 }
 
 void ConstructorCallback::run(const MatchFinder::MatchResult& result) {
@@ -111,7 +117,8 @@
       result.Nodes.getNodeAs<clang::CXXConstructExpr>("call");
   clang::CharSourceRange range =
       clang::CharSourceRange::getTokenRange(call->getParenOrBraceRange());
-  replacements_->insert(Replacement(*result.SourceManager, range, ""));
+  auto err = replacements_->add(Replacement(*result.SourceManager, range, ""));
+  assert(!err);
 }
 
 void InitializerCallback::run(const MatchFinder::MatchResult& result) {
@@ -122,7 +129,8 @@
 
   const clang::CXXConstructExpr* call =
       result.Nodes.getNodeAs<clang::CXXConstructExpr>("call");
-  replacements_->insert(Replacement(*result.SourceManager, call, ""));
+  auto err = replacements_->add(Replacement(*result.SourceManager, call, ""));
+  assert(!err);
 }
 
 void TemporaryCallback::run(const MatchFinder::MatchResult& result) {
@@ -139,11 +147,14 @@
   // for |call| in the explicit case doesn't include the closing parenthesis.
   clang::SourceRange range = call->getParenOrBraceRange();
   if (range.isValid()) {
-    replacements_->insert(Replacement(*result.SourceManager, literal, ""));
+    auto err =
+        replacements_->add(Replacement(*result.SourceManager, literal, ""));
+    assert(!err);
   } else {
-    replacements_->insert(
+    auto err = replacements_->add(
         Replacement(*result.SourceManager, call,
                     literal->isWide() ? "std::wstring()" : "std::string()"));
+    assert(!err);
   }
 }
 
diff --git a/tools/clang/pass_to_move/PassToMove.cpp b/tools/clang/pass_to_move/PassToMove.cpp
index c5858f4..48d4aae 100644
--- a/tools/clang/pass_to_move/PassToMove.cpp
+++ b/tools/clang/pass_to_move/PassToMove.cpp
@@ -49,22 +49,22 @@
   const char kMoveRefText[] = "std::move(";
   const char kMovePtrText[] = "std::move(*";
 
-  replacements_->emplace(*result.SourceManager,
-                         result.SourceManager->getSpellingLoc(
-                             arg->getLocStart()),
-                         0,
-                         is_arrow ? kMovePtrText : kMoveRefText);
+  auto err = replacements_->add(
+      Replacement(*result.SourceManager,
+                  result.SourceManager->getSpellingLoc(arg->getLocStart()), 0,
+                  is_arrow ? kMovePtrText : kMoveRefText));
+  assert(!err);
 
   // Delete everything but the closing parentheses from the original call to
   // Pass(): the closing parantheses is left to match up with the parantheses
   // just inserted with std::move.
-  replacements_->emplace(*result.SourceManager,
-                         clang::CharSourceRange::getCharRange(
-                             result.SourceManager->getSpellingLoc(
-                                 callee->getOperatorLoc()),
-                             result.SourceManager->getSpellingLoc(
-                                 call_expr->getRParenLoc())),
-                         "");
+  err = replacements_->add(Replacement(
+      *result.SourceManager,
+      clang::CharSourceRange::getCharRange(
+          result.SourceManager->getSpellingLoc(callee->getOperatorLoc()),
+          result.SourceManager->getSpellingLoc(call_expr->getRParenLoc())),
+      ""));
+  assert(!err);
 }
 
 }  // namespace
diff --git a/tools/clang/pass_to_move/tests/test-expected.cc b/tools/clang/pass_to_move/tests/test-expected.cc
index 65003f8..4da848d 100644
--- a/tools/clang/pass_to_move/tests/test-expected.cc
+++ b/tools/clang/pass_to_move/tests/test-expected.cc
@@ -54,9 +54,7 @@
   A a5;
   F f = std::move(F(std::move(a5)));
 
-  // Chained Pass is handled (mostly) correctly. The replacement applier dedupes
-  // the insertion of std::move, so the result is not completely correct...
-  // ... but hopefully there's very little code following this broken pattern.
+  // Chained Pass is handled correctly.
   A a6;
-  A a7 = std::move(a6));
+  A a7 = std::move(std::move(a6));
 }
diff --git a/tools/clang/pass_to_move/tests/test-original.cc b/tools/clang/pass_to_move/tests/test-original.cc
index 1e2a96d..c561e12 100644
--- a/tools/clang/pass_to_move/tests/test-original.cc
+++ b/tools/clang/pass_to_move/tests/test-original.cc
@@ -54,9 +54,7 @@
   A a5;
   F f = F(a5.Pass()).Pass();
 
-  // Chained Pass is handled (mostly) correctly. The replacement applier dedupes
-  // the insertion of std::move, so the result is not completely correct...
-  // ... but hopefully there's very little code following this broken pattern.
+  // Chained Pass is handled correctly.
   A a6;
   A a7 = a6.Pass().Pass();
 }
diff --git a/tools/clang/plugins/FindBadConstructsConsumer.cpp b/tools/clang/plugins/FindBadConstructsConsumer.cpp
index 0fd85b1..636261b 100644
--- a/tools/clang/plugins/FindBadConstructsConsumer.cpp
+++ b/tools/clang/plugins/FindBadConstructsConsumer.cpp
@@ -653,7 +653,7 @@
 
       // HACK: I'm at a loss about how to get the syntax checker to get
       // whether a template is externed or not. For the first pass here,
-      // just do retarded string comparisons.
+      // just do simple string comparisons.
       if (TemplateDecl* decl = name.getAsTemplateDecl()) {
         std::string base_name = decl->getNameAsString();
         if (base_name == "basic_string")
@@ -675,7 +675,15 @@
     }
     case Type::Typedef: {
       while (const TypedefType* TT = dyn_cast<TypedefType>(type)) {
-        type = TT->getDecl()->getUnderlyingType().getTypePtr();
+        if (auto* decl = TT->getDecl()) {
+          const std::string name = decl->getNameAsString();
+          auto* context = decl->getDeclContext();
+          if (name == "atomic_int" && context->isStdNamespace()) {
+            (*trivial_member)++;
+            return;
+          }
+          type = decl->getUnderlyingType().getTypePtr();
+        }
       }
       CountType(type,
                 trivial_member,
@@ -967,7 +975,7 @@
       continue;
 
     // Parse and build AST for yet-uninstantiated template functions.
-    clang::LateParsedTemplate* lpt = sema.LateParsedTemplateMap[fd];
+    clang::LateParsedTemplate* lpt = sema.LateParsedTemplateMap[fd].get();
     sema.LateTemplateParser(sema.OpaqueParser, *lpt);
   }
 }
diff --git a/tools/clang/plugins/tests/trivial_ctor.cpp b/tools/clang/plugins/tests/trivial_ctor.cpp
new file mode 100644
index 0000000..c632faf
--- /dev/null
+++ b/tools/clang/plugins/tests/trivial_ctor.cpp
@@ -0,0 +1,21 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "trivial_ctor.h"
+
+// Due to https://bugs.chromium.org/p/chromium/issues/detail?id=663463, we treat
+// templated classes/structs as non-trivial, even if they really are trivial.
+// Thus, classes that have such a class/struct as a member get flagged as being
+// themselves non-trivial, even if (like |MySpinLock|) they are. Special-case
+// [std::]atomic_int.
+class TrivialTemplateOK {
+ private:
+  MySpinLock lock_;
+};
+
+int main() {
+  MySpinLock lock;
+  TrivialTemplateOK one;
+  return 0;
+}
diff --git a/tools/clang/plugins/tests/trivial_ctor.h b/tools/clang/plugins/tests/trivial_ctor.h
new file mode 100644
index 0000000..2fa3003
--- /dev/null
+++ b/tools/clang/plugins/tests/trivial_ctor.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TRIVIAL_CTOR_H_
+#define TRIVIAL_CTOR_H_
+
+// Mocked for testing:
+namespace std {
+
+template<typename T>
+struct atomic {
+  T i;
+};
+
+typedef atomic<int> atomic_int;
+
+}  // namespace std
+
+struct MySpinLock {
+  MySpinLock();
+  ~MySpinLock();
+  MySpinLock(const MySpinLock&);
+  MySpinLock(MySpinLock&&);
+  std::atomic_int lock_;
+};
+
+#endif  // TRIVIAL_CTOR_H_
diff --git a/tools/clang/plugins/tests/trivial_ctor.txt b/tools/clang/plugins/tests/trivial_ctor.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/clang/plugins/tests/trivial_ctor.txt
diff --git a/tools/clang/rewrite_scoped_refptr/RewriteScopedRefptr.cpp b/tools/clang/rewrite_scoped_refptr/RewriteScopedRefptr.cpp
index d6a3d04..f334231 100644
--- a/tools/clang/rewrite_scoped_refptr/RewriteScopedRefptr.cpp
+++ b/tools/clang/rewrite_scoped_refptr/RewriteScopedRefptr.cpp
@@ -167,7 +167,9 @@
 void GetRewriterCallback::run(const MatchFinder::MatchResult& result) {
   const clang::Expr* arg = result.Nodes.getNodeAs<clang::Expr>("arg");
   assert(arg && "Unexpected match! No Expr captured!");
-  replacements_->insert(RewriteImplicitToExplicitConversion(result, arg));
+  auto err =
+      replacements_->add(RewriteImplicitToExplicitConversion(result, arg));
+  assert(!err);
 }
 
 class VarRewriterCallback : public MatchFinder::MatchCallback {
@@ -199,8 +201,9 @@
   // In this case, it will only rewrite the .cc definition. Oh well. This should
   // be rare enough that these cases can be manually handled, since the style
   // guide prohibits globals of non-POD type.
-  replacements_->insert(RewriteRawPtrToScopedRefptr(
+  auto err = replacements_->add(RewriteRawPtrToScopedRefptr(
       result, tsi->getTypeLoc().getBeginLoc(), tsi->getTypeLoc().getEndLoc()));
+  assert(!err);
 }
 
 class FunctionRewriterCallback : public MatchFinder::MatchCallback {
@@ -230,8 +233,9 @@
 
   for (clang::FunctionDecl* f : function_decl->redecls()) {
     clang::SourceRange range = f->getReturnTypeSourceRange();
-    replacements_->insert(
+    auto err = replacements_->add(
         RewriteRawPtrToScopedRefptr(result, range.getBegin(), range.getEnd()));
+    assert(!err);
   }
 }
 
@@ -248,7 +252,9 @@
 void MacroRewriterCallback::run(const MatchFinder::MatchResult& result) {
   const clang::Expr* const expr = result.Nodes.getNodeAs<clang::Expr>("expr");
   assert(expr && "Unexpected match! No Expr captured!");
-  replacements_->insert(RewriteImplicitToExplicitConversion(result, expr));
+  auto err =
+      replacements_->add(RewriteImplicitToExplicitConversion(result, expr));
+  assert(!err);
 }
 
 }  // namespace
@@ -352,12 +358,12 @@
 
   // Find temporary scoped_refptr<T>'s being unsafely assigned to a T*.
   VarRewriterCallback var_callback(&replacements);
-  auto initialized_with_temporary = ignoringImpCasts(exprWithCleanups(
-      has(cxxMemberCallExpr(base_matcher, is_unsafe_temporary_conversion))));
-  match_finder.addMatcher(id("var",
-                             varDecl(hasInitializer(initialized_with_temporary),
-                                     hasType(pointerType()))),
-                          &var_callback);
+  auto initialized_with_temporary = has(ignoringImpCasts(
+      cxxMemberCallExpr(base_matcher, is_unsafe_temporary_conversion)));
+  match_finder.addMatcher(
+      id("var", varDecl(hasInitializer(initialized_with_temporary),
+                        hasType(pointerType()))),
+      &var_callback);
   match_finder.addMatcher(
       cxxConstructorDecl(forEachConstructorInitializer(
           allOf(withInitializer(initialized_with_temporary),
@@ -380,10 +386,10 @@
   MacroRewriterCallback macro_callback(&replacements);
   // CHECK_EQ/CHECK_NE helpers.
   match_finder.addMatcher(
-      callExpr(callee(is_logging_helper),
-               argumentCountIs(3),
-               hasAnyArgument(id("expr", expr(hasType(is_scoped_refptr)))),
-               hasAnyArgument(hasType(pointerType())),
+      callExpr(callee(is_logging_helper), argumentCountIs(3),
+               hasAnyArgument(ignoringParenImpCasts(
+                   id("expr", expr(hasType(is_scoped_refptr))))),
+               hasAnyArgument(ignoringParenImpCasts(hasType(pointerType()))),
                hasArgument(2, stringLiteral())),
       &macro_callback);
   // ASSERT_EQ/ASSERT_NE/EXPECT_EQ/EXPECT_EQ, which use the same underlying
diff --git a/tools/clang/rewrite_scoped_refptr/tests/ref-to-local-returned-as-raw-expected.cc b/tools/clang/rewrite_scoped_refptr/tests/ref-to-local-returned-as-raw-expected.cc
index 8608120..2e63f50 100644
--- a/tools/clang/rewrite_scoped_refptr/tests/ref-to-local-returned-as-raw-expected.cc
+++ b/tools/clang/rewrite_scoped_refptr/tests/ref-to-local-returned-as-raw-expected.cc
@@ -2,9 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/memory/ref_counted.h"
+#include "scoped_refptr.h"
 
-struct Foo : public base::RefCounted<Foo> {
+struct Foo {
   int dummy;
 };
 
diff --git a/tools/clang/rewrite_scoped_refptr/tests/ref-to-local-returned-as-raw-original.cc b/tools/clang/rewrite_scoped_refptr/tests/ref-to-local-returned-as-raw-original.cc
index 8608120..2e63f50 100644
--- a/tools/clang/rewrite_scoped_refptr/tests/ref-to-local-returned-as-raw-original.cc
+++ b/tools/clang/rewrite_scoped_refptr/tests/ref-to-local-returned-as-raw-original.cc
@@ -2,9 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/memory/ref_counted.h"
+#include "scoped_refptr.h"
 
-struct Foo : public base::RefCounted<Foo> {
+struct Foo {
   int dummy;
 };
 
diff --git a/tools/clang/rewrite_scoped_refptr/tests/temp-returned-as-raw-expected.cc b/tools/clang/rewrite_scoped_refptr/tests/temp-returned-as-raw-expected.cc
index 1987bbb..ee58c08 100644
--- a/tools/clang/rewrite_scoped_refptr/tests/temp-returned-as-raw-expected.cc
+++ b/tools/clang/rewrite_scoped_refptr/tests/temp-returned-as-raw-expected.cc
@@ -2,9 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/memory/ref_counted.h"
+#include "scoped_refptr.h"
 
-struct Foo : public base::RefCounted<Foo> {
+struct Foo {
   int dummy;
 };
 
diff --git a/tools/clang/rewrite_scoped_refptr/tests/temp-returned-as-raw-original.cc b/tools/clang/rewrite_scoped_refptr/tests/temp-returned-as-raw-original.cc
index e0fd791..9acc0de 100644
--- a/tools/clang/rewrite_scoped_refptr/tests/temp-returned-as-raw-original.cc
+++ b/tools/clang/rewrite_scoped_refptr/tests/temp-returned-as-raw-original.cc
@@ -2,9 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/memory/ref_counted.h"
+#include "scoped_refptr.h"
 
-struct Foo : public base::RefCounted<Foo> {
+struct Foo {
   int dummy;
 };
 
diff --git a/tools/clang/rewrite_scoped_refptr/tests/test11-expected.cc b/tools/clang/rewrite_scoped_refptr/tests/test11-expected.cc
index 4557b52..9e3dec8 100644
--- a/tools/clang/rewrite_scoped_refptr/tests/test11-expected.cc
+++ b/tools/clang/rewrite_scoped_refptr/tests/test11-expected.cc
@@ -10,7 +10,7 @@
   int dummy;
 };
 
-typedef std::vector<scoped_refptr<Foo> > FooList;
+typedef std::vector<scoped_refptr<Foo>> FooList;
 
 void TestsAScopedRefptr() {
   FooList list;
diff --git a/tools/clang/rewrite_scoped_refptr/tests/test11-original.cc b/tools/clang/rewrite_scoped_refptr/tests/test11-original.cc
index c79148b..452f3b0 100644
--- a/tools/clang/rewrite_scoped_refptr/tests/test11-original.cc
+++ b/tools/clang/rewrite_scoped_refptr/tests/test11-original.cc
@@ -10,7 +10,7 @@
   int dummy;
 };
 
-typedef std::vector<scoped_refptr<Foo> > FooList;
+typedef std::vector<scoped_refptr<Foo>> FooList;
 
 void TestsAScopedRefptr() {
   FooList list;
diff --git a/tools/clang/rewrite_scoped_refptr/tests/test12-expected.cc b/tools/clang/rewrite_scoped_refptr/tests/test12-expected.cc
index fdaa80e..c365828 100644
--- a/tools/clang/rewrite_scoped_refptr/tests/test12-expected.cc
+++ b/tools/clang/rewrite_scoped_refptr/tests/test12-expected.cc
@@ -12,10 +12,10 @@
   int dummy;
 };
 
-typedef std::map<std::string, scoped_refptr<const Foo> > MyMap;
+typedef std::map<std::string, scoped_refptr<const Foo>> MyMap;
 
 class MyIter
-    : public std::iterator<std::input_iterator_tag, scoped_refptr<const Foo> > {
+    : public std::iterator<std::input_iterator_tag, scoped_refptr<const Foo>> {
  public:
   MyIter() {}
   MyIter(const MyIter& other) : it_(other.it_) {}
diff --git a/tools/clang/rewrite_scoped_refptr/tests/test12-original.cc b/tools/clang/rewrite_scoped_refptr/tests/test12-original.cc
index 33f1eb1..8f30ee2 100644
--- a/tools/clang/rewrite_scoped_refptr/tests/test12-original.cc
+++ b/tools/clang/rewrite_scoped_refptr/tests/test12-original.cc
@@ -12,10 +12,10 @@
   int dummy;
 };
 
-typedef std::map<std::string, scoped_refptr<const Foo> > MyMap;
+typedef std::map<std::string, scoped_refptr<const Foo>> MyMap;
 
 class MyIter
-    : public std::iterator<std::input_iterator_tag, scoped_refptr<const Foo> > {
+    : public std::iterator<std::input_iterator_tag, scoped_refptr<const Foo>> {
  public:
   MyIter() {}
   MyIter(const MyIter& other) : it_(other.it_) {}
diff --git a/tools/clang/rewrite_to_chrome_style/CMakeLists.txt b/tools/clang/rewrite_to_chrome_style/CMakeLists.txt
index 8fa96ef..1935e23 100644
--- a/tools/clang/rewrite_to_chrome_style/CMakeLists.txt
+++ b/tools/clang/rewrite_to_chrome_style/CMakeLists.txt
@@ -7,6 +7,7 @@
   )
 
 add_llvm_executable(rewrite_to_chrome_style
+  EditTracker.cpp
   RewriteToChromeStyle.cpp
   )
 
diff --git a/tools/clang/rewrite_to_chrome_style/EditTracker.cpp b/tools/clang/rewrite_to_chrome_style/EditTracker.cpp
new file mode 100644
index 0000000..cd8228e
--- /dev/null
+++ b/tools/clang/rewrite_to_chrome_style/EditTracker.cpp
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "EditTracker.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+void EditTracker::Add(const clang::SourceManager& source_manager,
+                      clang::SourceLocation location,
+                      llvm::StringRef original_text,
+                      llvm::StringRef new_text) {
+  llvm::StringRef filename;
+  for (int i = 0; i < 10; i++) {
+    filename = source_manager.getFilename(location);
+    if (!filename.empty() || !location.isMacroID())
+      break;
+    // Otherwise, no filename and the SourceLocation is a macro ID. Look one
+    // level up the stack...
+    location = source_manager.getImmediateMacroCallerLoc(location);
+  }
+  assert(!filename.empty() && "Can't track edit with no filename!");
+  auto result = tracked_edits_.try_emplace(original_text);
+  if (result.second) {
+    result.first->getValue().new_text = new_text;
+  }
+  result.first->getValue().filenames.try_emplace(filename);
+}
+
+void EditTracker::SerializeTo(llvm::StringRef tag,
+                              llvm::raw_ostream& output) const {
+  for (const auto& edit : tracked_edits_) {
+    for (const auto& filename : edit.getValue().filenames) {
+      output << filename.getKey() << ":" << tag << ":" << edit.getKey() << ":"
+             << edit.getValue().new_text << "\n";
+    }
+  }
+}
diff --git a/tools/clang/rewrite_to_chrome_style/EditTracker.h b/tools/clang/rewrite_to_chrome_style/EditTracker.h
new file mode 100644
index 0000000..ef5e301
--- /dev/null
+++ b/tools/clang/rewrite_to_chrome_style/EditTracker.h
@@ -0,0 +1,49 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TOOLS_CLANG_REWRITE_TO_CHROME_STYLE_EDIT_TRACKER_H_
+#define TOOLS_CLANG_REWRITE_TO_CHROME_STYLE_EDIT_TRACKER_H_
+
+#include <map>
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+
+namespace llvm {
+class raw_ostream;
+}  // namespace llvm
+
+struct EditInfo {
+  std::string new_text;
+  llvm::StringSet<> filenames;
+};
+
+// Simple class that tracks the edits made by path. Used to dump the databaes
+// used by the Blink rebase helper.
+class EditTracker {
+ public:
+  EditTracker() = default;
+
+  void Add(const clang::SourceManager& source_manager,
+           clang::SourceLocation location,
+           llvm::StringRef original_text,
+           llvm::StringRef new_text);
+
+  // Serializes the tracked edits to |output|. Emits:
+  // <filename>:<tag>:<original text>:<new text>
+  // for each distinct filename for each tracked edit.
+  void SerializeTo(llvm::StringRef tag, llvm::raw_ostream& output) const;
+
+ private:
+  EditTracker(const EditTracker&) = delete;
+  EditTracker& operator=(const EditTracker&) = delete;
+
+  // The string key is the original text.
+  llvm::StringMap<EditInfo> tracked_edits_;
+};
+
+#endif  // #define TOOLS_CLANG_REWRITE_TO_CHROME_STYLE_EDIT_TRACKER_H_
diff --git a/tools/clang/rewrite_to_chrome_style/RewriteToChromeStyle.cpp b/tools/clang/rewrite_to_chrome_style/RewriteToChromeStyle.cpp
index 3b42fc2..bc98ce4 100644
--- a/tools/clang/rewrite_to_chrome_style/RewriteToChromeStyle.cpp
+++ b/tools/clang/rewrite_to_chrome_style/RewriteToChromeStyle.cpp
@@ -14,10 +14,9 @@
 
 #include <assert.h>
 #include <algorithm>
-#include <fstream>
 #include <memory>
+#include <set>
 #include <string>
-#include <unordered_map>
 
 #include "clang/AST/ASTContext.h"
 #include "clang/ASTMatchers/ASTMatchFinder.h"
@@ -25,20 +24,19 @@
 #include "clang/ASTMatchers/ASTMatchersMacros.h"
 #include "clang/Basic/CharInfo.h"
 #include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/CompilerInstance.h"
 #include "clang/Frontend/FrontendActions.h"
+#include "clang/Lex/MacroArgs.h"
 #include "clang/Lex/Lexer.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
 #include "clang/Tooling/CommonOptionsParser.h"
 #include "clang/Tooling/Refactoring.h"
 #include "clang/Tooling/Tooling.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/TargetSelect.h"
 
-#if defined(_WIN32)
-#include <windows.h>
-#else
-#include <sys/file.h>
-#include <unistd.h>
-#endif
+#include "EditTracker.h"
 
 using namespace clang::ast_matchers;
 using clang::tooling::CommonOptionsParser;
@@ -50,11 +48,27 @@
 const char kBlinkFieldPrefix[] = "m_";
 const char kBlinkStaticMemberPrefix[] = "s_";
 const char kGeneratedFileRegex[] = "^gen/|/gen/";
+const char kGMockMethodNamePrefix[] = "gmock_";
+
+template <typename MatcherType, typename NodeType>
+bool IsMatching(const MatcherType& matcher,
+                const NodeType& node,
+                clang::ASTContext& context) {
+  return !match(matcher, node, context).empty();
+}
 
 const clang::ast_matchers::internal::
     VariadicDynCastAllOfMatcher<clang::Expr, clang::UnresolvedMemberExpr>
         unresolvedMemberExpr;
 
+const clang::ast_matchers::internal::
+    VariadicDynCastAllOfMatcher<clang::Expr, clang::DependentScopeDeclRefExpr>
+        dependentScopeDeclRefExpr;
+
+const clang::ast_matchers::internal::
+    VariadicDynCastAllOfMatcher<clang::Expr, clang::CXXDependentScopeMemberExpr>
+        cxxDependentScopeMemberExpr;
+
 AST_MATCHER(clang::FunctionDecl, isOverloadedOperator) {
   return Node.isOverloadedOperator();
 }
@@ -70,6 +84,41 @@
   return InnerMatcher.matches(*Node.getTemplatedDecl(), Finder, Builder);
 }
 
+// Matches a CXXMethodDecl of a method declared via MOCK_METHODx macro if such
+// method mocks a method matched by the InnerMatcher.  For example if "foo"
+// matcher matches "interfaceMethod", then mocksMethod(foo()) will match
+// "gmock_interfaceMethod" declared by MOCK_METHOD_x(interfaceMethod).
+AST_MATCHER_P(clang::CXXMethodDecl,
+              mocksMethod,
+              clang::ast_matchers::internal::Matcher<clang::CXXMethodDecl>,
+              InnerMatcher) {
+  if (!Node.getDeclName().isIdentifier())
+    return false;
+
+  llvm::StringRef method_name = Node.getName();
+  if (!method_name.startswith(kGMockMethodNamePrefix))
+    return false;
+
+  llvm::StringRef mocked_method_name =
+      method_name.substr(strlen(kGMockMethodNamePrefix));
+  for (const auto& potentially_mocked_method : Node.getParent()->methods()) {
+    if (!potentially_mocked_method->isVirtual())
+      continue;
+
+    clang::DeclarationName decl_name = potentially_mocked_method->getDeclName();
+    if (!decl_name.isIdentifier() ||
+        potentially_mocked_method->getName() != mocked_method_name)
+      continue;
+    if (potentially_mocked_method->getNumParams() != Node.getNumParams())
+      continue;
+
+    if (InnerMatcher.matches(*potentially_mocked_method, Finder, Builder))
+      return true;
+  }
+
+  return false;
+}
+
 // If |InnerMatcher| matches |top|, then the returned matcher will match:
 // - |top::function|
 // - |top::Class::method|
@@ -112,6 +161,13 @@
   return true;
 }
 
+void PrintForDiagnostics(clang::raw_ostream& os,
+                         const clang::FunctionDecl& decl) {
+  decl.getLocStart().print(os, decl.getASTContext().getSourceManager());
+  os << ": ";
+  decl.getNameForDiagnostic(os, decl.getASTContext().getPrintingPolicy(), true);
+}
+
 template <typename T>
 bool MatchAllOverriddenMethods(
     const clang::CXXMethodDecl& decl,
@@ -136,13 +192,54 @@
   // one we did not rename which creates a behaviour change. So assert and
   // demand the user to fix the code first (or add the method to our
   // blacklist T_T).
-  if (override_matches || override_not_matches)
-    assert(override_matches != override_not_matches);
+  if (override_matches && override_not_matches) {
+    // blink::InternalSettings::trace method overrides
+    // 1) blink::InternalSettingsGenerated::trace
+    //    (won't be renamed because it is in generated code)
+    // 2) blink::Supplement<blink::Page>::trace
+    //    (will be renamed).
+    // It is safe to rename blink::InternalSettings::trace, because
+    // both 1 and 2 will both be renamed (#1 via manual changes of the code
+    // generator for DOM bindings and #2 via the clang tool).
+    auto internal_settings_class_decl = cxxRecordDecl(
+        hasName("InternalSettings"),
+        hasParent(namespaceDecl(hasName("blink"),
+                                hasParent(translationUnitDecl()))));
+    auto is_method_safe_to_rename = cxxMethodDecl(
+        hasName("trace"),
+        anyOf(hasParent(internal_settings_class_decl),  // in .h file
+              has(nestedNameSpecifier(specifiesType(    // in .cpp file
+                  hasDeclaration(internal_settings_class_decl))))));
+    if (IsMatching(is_method_safe_to_rename, decl, decl.getASTContext()))
+      return true;
+
+    // For previously unknown conflicts, error out and require a human to
+    // analyse the problem (rather than falling back to a potentially unsafe /
+    // code semantics changing rename).
+    llvm::errs() << "ERROR: ";
+    PrintForDiagnostics(llvm::errs(), decl);
+    llvm::errs() << " method overrides "
+                 << "some virtual methods that will be automatically renamed "
+                 << "and some that won't be renamed.";
+    llvm::errs() << "\n";
+    for (auto it = decl.begin_overridden_methods();
+         it != decl.end_overridden_methods(); ++it) {
+      if (MatchAllOverriddenMethods(**it, inner_matcher, finder, builder))
+        llvm::errs() << "Overriden method that will be renamed: ";
+      else
+        llvm::errs() << "Overriden method that will not be renamed: ";
+      PrintForDiagnostics(llvm::errs(), **it);
+      llvm::errs() << "\n";
+    }
+    llvm::errs() << "\n";
+    assert(false);
+  }
 
   // If the method overrides something that doesn't match, so the method itself
   // doesn't match.
   if (override_not_matches)
     return false;
+
   // If the method overrides something that matches, so the method ifself
   // matches.
   if (override_matches)
@@ -158,6 +255,34 @@
   return MatchAllOverriddenMethods(Node, InnerMatcher, Finder, Builder);
 }
 
+// Matches |T::m| and/or |x->T::m| and/or |x->m| CXXDependentScopeMemberExpr
+// if member |m| comes from a type that matches the InnerMatcher.
+AST_MATCHER_P(clang::CXXDependentScopeMemberExpr,
+              hasMemberFromType,
+              clang::ast_matchers::internal::Matcher<clang::QualType>,
+              InnerMatcher) {
+  // Given |T::m| and/or |x->T::m| and/or |x->m| ...
+  if (clang::NestedNameSpecifier* nestedNameSpecifier = Node.getQualifier()) {
+    // ... if |T| is present, then InnerMatcher has to match |T|.
+    clang::QualType qualType(nestedNameSpecifier->getAsType(), 0);
+    return InnerMatcher.matches(qualType, Finder, Builder);
+  } else {
+    // ... if there is no |T|, then InnerMatcher has to match the type of |x|.
+    clang::Expr* base_expr = Node.isImplicitAccess() ? nullptr : Node.getBase();
+    return base_expr &&
+           InnerMatcher.matches(base_expr->getType(), Finder, Builder);
+  }
+}
+
+// Matches |const Class<T>&| QualType if InnerMatcher matches |Class<T>|.
+AST_MATCHER_P(clang::QualType,
+              hasBaseType,
+              clang::ast_matchers::internal::Matcher<clang::Type>,
+              InnerMatcher) {
+  const clang::Type* type = Node.getTypePtrOrNull();
+  return type && InnerMatcher.matches(*type, Finder, Builder);
+}
+
 bool IsMethodOverrideOf(const clang::CXXMethodDecl& decl,
                         const char* class_name) {
   if (decl.getParent()->getQualifiedNameAsString() == class_name)
@@ -170,37 +295,61 @@
   return false;
 }
 
-bool IsBlacklistedFunction(const clang::FunctionDecl& decl) {
-  // swap() functions should match the signature of std::swap for ADL tricks.
-  return decl.getName() == "swap";
+bool IsBlacklistedFunctionName(llvm::StringRef name) {
+  // https://crbug.com/672902: Method names with an underscore are typically
+  // mimicked after std library / are typically not originating from Blink.
+  // Do not rewrite such names (like push_back, emplace_back, etc.).
+  if (name.find('_') != llvm::StringRef::npos)
+    return true;
+
+  return false;
 }
 
-bool IsBlacklistedMethod(const clang::CXXMethodDecl& decl) {
-  if (decl.isStatic())
-    return false;
+bool IsBlacklistedFreeFunctionName(llvm::StringRef name) {
+  // swap() functions should match the signature of std::swap for ADL tricks.
+  return name == "swap";
+}
 
-  clang::StringRef name = decl.getName();
+bool IsBlacklistedInstanceMethodName(llvm::StringRef name) {
+  static const char* kBlacklistedNames[] = {
+      // We should avoid renaming the method names listed below, because
+      // 1. They are used in templated code (e.g. in <algorithms>)
+      // 2. They (begin+end) are used in range-based for syntax sugar
+      //    - for (auto x : foo) { ... }  // <- foo.begin() will be called.
+      "begin", "end", "rbegin", "rend", "lock", "unlock", "try_lock",
 
-  // These methods should never be renamed.
-  static const char* kBlacklistMethods[] = {"trace", "traceImpl", "lock",
-                                            "unlock", "try_lock"};
-  for (const auto& b : kBlacklistMethods) {
+      // https://crbug.com/672902: Should not rewrite names that mimick methods
+      // from std library.
+      "back", "empty", "erase", "front", "insert",
+  };
+  for (const auto& b : kBlacklistedNames) {
     if (name == b)
       return true;
   }
+  return false;
+}
 
-  // Iterator methods shouldn't be renamed to work with stl and range-for
-  // loops.
-  std::string ret_type = decl.getReturnType().getAsString();
-  if (ret_type.find("iterator") != std::string::npos ||
-      ret_type.find("Iterator") != std::string::npos) {
-    static const char* kIteratorBlacklist[] = {"begin", "end", "rbegin",
-                                               "rend"};
-    for (const auto& b : kIteratorBlacklist) {
-      if (name == b)
-        return true;
-    }
-  }
+bool IsBlacklistedMethodName(llvm::StringRef name) {
+  return IsBlacklistedFunctionName(name) ||
+         IsBlacklistedInstanceMethodName(name);
+}
+
+bool IsBlacklistedFunction(const clang::FunctionDecl& decl) {
+  clang::StringRef name = decl.getName();
+  return IsBlacklistedFunctionName(name) || IsBlacklistedFreeFunctionName(name);
+}
+
+bool IsBlacklistedMethod(const clang::CXXMethodDecl& decl) {
+  clang::StringRef name = decl.getName();
+  if (IsBlacklistedFunctionName(name))
+    return true;
+
+  // Remaining cases are only applicable to instance methods.
+  if (decl.isStatic())
+    return false;
+
+  if (IsBlacklistedInstanceMethodName(name))
+    return true;
 
   // Subclasses of InspectorAgent will subclass "disable()" from both blink and
   // from gen/, which is problematic, but DevTools folks don't want to rename
@@ -258,6 +407,65 @@
   return output;
 }
 
+bool CanBeEvaluatedAtCompileTime(const clang::Stmt* stmt,
+                                 const clang::ASTContext& context) {
+  auto* expr = clang::dyn_cast<clang::Expr>(stmt);
+  if (!expr) {
+    // If the statement is not an expression then it's a constant.
+    return true;
+  }
+
+  // Function calls create non-consistent behaviour. For some template
+  // instantiations they can be constexpr while for others they are not, which
+  // changes the output of isEvaluatable().
+  if (expr->hasNonTrivialCall(context))
+    return false;
+
+  // Recurse on children. If they are all const (or are uses of template
+  // input) then the statement can be considered const. For whatever reason the
+  // below checks can give different-and-less-consistent responses if we call
+  // them on a complex expression than if we call them on the most primitive
+  // pieces (some pieces would say false but the whole thing says true).
+  for (auto* child : expr->children()) {
+    if (!CanBeEvaluatedAtCompileTime(child, context))
+      return false;
+  }
+
+  // If the expression depends on template input, we can not call
+  // isEvaluatable() on it as it will do bad things/crash.
+  if (!expr->isInstantiationDependent()) {
+    // If the expression can be evaluated at compile time, then it should have a
+    // kFoo style name. Otherwise, not.
+    return expr->isEvaluatable(context);
+  }
+
+  // We do our best to figure out special cases as we come across them here, for
+  // template dependent situations. Some cases in code are only considered
+  // instantiation dependent for some template instantiations! Which is
+  // terrible! So most importantly we try to match isEvaluatable in those cases.
+  switch (expr->getStmtClass()) {
+    case clang::Stmt::CXXThisExprClass:
+      return false;
+    case clang::Stmt::DeclRefExprClass: {
+      auto* declref = clang::dyn_cast<clang::DeclRefExpr>(expr);
+      auto* decl = declref->getDecl();
+      if (auto* vardecl = clang::dyn_cast<clang::VarDecl>(decl)) {
+        if (auto* initializer = vardecl->getInit())
+          return CanBeEvaluatedAtCompileTime(initializer, context);
+        return false;
+      }
+      break;
+    }
+
+    default:
+      break;
+  }
+
+  // Otherwise, we consider depending on template parameters to not interfere
+  // with being const.. with exceptions hopefully covered above.
+  return true;
+}
+
 bool IsProbablyConst(const clang::VarDecl& decl,
                      const clang::ASTContext& context) {
   clang::QualType type = decl.getType();
@@ -267,6 +475,14 @@
   if (type.isVolatileQualified())
     return false;
 
+  if (decl.isConstexpr())
+    return true;
+
+  // Parameters should not be renamed to |kFooBar| style (even if they are
+  // const and have an initializer (aka default value)).
+  if (clang::isa<clang::ParmVarDecl>(&decl))
+    return false;
+
   // http://google.github.io/styleguide/cppguide.html#Constant_Names
   // Static variables that are const-qualified should use kConstantStyle naming.
   if (decl.getStorageDuration() == clang::SD_Static)
@@ -276,24 +492,96 @@
   if (!initializer)
     return false;
 
-  // If the expression is dependent on a template input, then we are not
-  // sure if it can be compile-time generated as calling isEvaluatable() is
-  // not valid on |initializer|.
-  // TODO(crbug.com/581218): We could probably look at each compiled
-  // instantiation of the template and see if they are all compile-time
-  // isEvaluable().
-  if (initializer->isInstantiationDependent())
-    return false;
-
-  // If the expression can be evaluated at compile time, then it should have a
-  // kFoo style name. Otherwise, not.
-  return initializer->isEvaluatable(context);
+  return CanBeEvaluatedAtCompileTime(initializer, context);
 }
 
 AST_MATCHER_P(clang::QualType, hasString, std::string, ExpectedString) {
   return ExpectedString == Node.getAsString();
 }
 
+bool ShouldPrefixFunctionName(const std::string& old_method_name) {
+  // Functions that are named similarily to a type - they should be prefixed
+  // with a "Get" prefix.
+  static const char* kConflictingMethods[] = {
+      "animationWorklet",
+      "audioWorklet",
+      "binaryType",
+      "blob",
+      "channelCountMode",
+      "color",
+      "counterDirectives",
+      "document",
+      "emptyChromeClient",
+      "emptyEditorClient",
+      "emptySpellCheckerClient",
+      "entryType",
+      "error",
+      "fileUtilities",
+      "font",
+      "frame",
+      "frameBlameContext",
+      "frontend",
+      "hash",
+      "heapObjectHeader",
+      "iconURL",
+      "inputMethodController",
+      "inputType",
+      "layout",
+      "layoutBlock",
+      "layoutObject",
+      "layoutSize",
+      "length",
+      "lineCap",
+      "lineEndings",
+      "lineJoin",
+      "listItems",
+      "matchedProperties",
+      "midpointState",
+      "mouseEvent",
+      "name",
+      "navigationType",
+      "node",
+      "outcome",
+      "pagePopup",
+      "paintWorklet",
+      "path",
+      "processingInstruction",
+      "readyState",
+      "relList",
+      "resource",
+      "response",
+      "sandboxSupport",
+      "screenInfo",
+      "scrollAnimator",
+      "settings",
+      "signalingState",
+      "state",
+      "string",
+      "styleSheet",
+      "text",
+      "textAlign",
+      "textBaseline",
+      "theme",
+      "thread",
+      "timing",
+      "topLevelBlameContext",
+      "vector",
+      "widget",
+      "wordBoundaries",
+      "wrapperTypeInfo",
+  };
+  for (const auto& conflicting_method : kConflictingMethods) {
+    if (old_method_name == conflicting_method)
+      return true;
+  }
+
+  return false;
+}
+
+AST_MATCHER(clang::FunctionDecl, shouldPrefixFunctionName) {
+  return ShouldPrefixFunctionName(Node.getName().str());
+}
+
 bool GetNameForDecl(const clang::FunctionDecl& decl,
                     clang::ASTContext& context,
                     std::string& name) {
@@ -302,14 +590,18 @@
 
   // Given
   //   class Foo {};
+  //   class DerivedFoo : class Foo;
   //   using Bar = Foo;
   //   Bar f1();  // <- |Bar| would be matched by hasString("Bar") below.
   //   Bar f2();  // <- |Bar| would be matched by hasName("Foo") below.
+  //   DerivedFoo f3();  // <- |DerivedFoo| matched by isDerivedFrom(...) below.
   // |type_with_same_name_as_function| matcher matches Bar and Foo return types.
   auto type_with_same_name_as_function = qualType(anyOf(
-      hasString(name),  // hasString matches the type as spelled (Bar above).
-      hasDeclaration(namedDecl(hasName(name)))));  // hasDeclaration matches
-                                                   // resolved type (Foo above).
+      // hasString matches the type as spelled (Bar above).
+      hasString(name),
+      // hasDeclaration matches resolved type (Foo or DerivedFoo above).
+      hasDeclaration(namedDecl(hasName(name)))));
+
   // |type_containing_same_name_as_function| matcher will match all of the
   // return types below:
   // - Foo foo()  // Direct application of |type_with_same_name_as_function|.
@@ -320,9 +612,18 @@
                      hasDescendant(type_with_same_name_as_function)));
   // https://crbug.com/582312: Prepend "Get" if method name conflicts with
   // return type.
-  auto conflict_matcher =
-      functionDecl(returns(type_containing_same_name_as_function));
-  if (!match(conflict_matcher, decl, context).empty())
+  auto conflict_matcher = functionDecl(anyOf(
+      // For functions and non-virtual or base method implementations just
+      // compare with the immediate return type.
+      functionDecl(returns(type_containing_same_name_as_function),
+                   unless(cxxMethodDecl(isOverride()))),
+      // For methods that override one or more methods, compare with the return
+      // type of the *base* methods.
+      cxxMethodDecl(isOverride(), forEachOverridden(returns(
+                                      type_containing_same_name_as_function))),
+      // And also check hardcoded list of function names to prefix with "Get".
+      shouldPrefixFunctionName()));
+  if (IsMatching(conflict_matcher, decl, context))
     name = "Get" + name;
 
   return true;
@@ -417,6 +718,12 @@
     if (original_name.size() >= 2 && original_name[0] == 'k' &&
         clang::isUppercase(original_name[1]))
       return false;
+    // Or names are spelt with underscore casing. While they are actually
+    // compile consts, the author wrote it explicitly as a variable not as
+    // a constant (they would have used kFormat otherwise here), so preserve
+    // it rather than try to mangle a kFormat out of it.
+    if (original_name.find('_') != StringRef::npos)
+      return false;
 
     name = 'k';
     name.append(original_name.data(), original_name.size());
@@ -509,6 +816,24 @@
 };
 
 template <>
+struct TargetNodeTraits<clang::DependentScopeDeclRefExpr> {
+  static clang::SourceLocation GetLoc(
+      const clang::DependentScopeDeclRefExpr& expr) {
+    return expr.getLocation();
+  }
+  static const char* GetName() { return "expr"; }
+};
+
+template <>
+struct TargetNodeTraits<clang::CXXDependentScopeMemberExpr> {
+  static clang::SourceLocation GetLoc(
+      const clang::CXXDependentScopeMemberExpr& expr) {
+    return expr.getMemberLoc();
+  }
+  static const char* GetName() { return "expr"; }
+};
+
+template <>
 struct TargetNodeTraits<clang::CXXCtorInitializer> {
   static clang::SourceLocation GetLoc(const clang::CXXCtorInitializer& init) {
     assert(init.isWritten());
@@ -536,84 +861,408 @@
   static const char* GetType() { return "UnresolvedMemberExpr"; }
 };
 
-template <typename DeclNode, typename TargetNode>
+template <>
+struct TargetNodeTraits<clang::UnresolvedUsingValueDecl> {
+  static clang::SourceLocation GetLoc(
+      const clang::UnresolvedUsingValueDecl& decl) {
+    return decl.getNameInfo().getLoc();
+  }
+  static const char* GetName() { return "decl"; }
+  static const char* GetType() { return "UnresolvedUsingValueDecl"; }
+};
+
+template <typename TargetNode>
 class RewriterBase : public MatchFinder::MatchCallback {
  public:
   explicit RewriterBase(std::set<Replacement>* replacements)
       : replacements_(replacements) {}
 
-  void run(const MatchFinder::MatchResult& result) override {
-    const DeclNode* decl = result.Nodes.getNodeAs<DeclNode>("decl");
-    // If false, there's no name to be renamed.
-    if (!decl->getIdentifier())
-      return;
-    clang::SourceLocation decl_loc =
-        TargetNodeTraits<clang::NamedDecl>::GetLoc(*decl);
-    if (decl_loc.isMacroID()) {
-      // Get the location of the spelling of the declaration. If token pasting
-      // was used this will be in "scratch space" and we don't know how to get
-      // from there back to/ the actual macro with the foo##bar text. So just
-      // don't replace in that case.
-      clang::SourceLocation spell =
-          result.SourceManager->getSpellingLoc(decl_loc);
-      if (strcmp(result.SourceManager->getBufferName(spell),
-                 "<scratch space>") == 0)
-        return;
-    }
-    clang::ASTContext* context = result.Context;
-    std::string new_name;
-    if (!GetNameForDecl(*decl, *context, new_name))
-      return;  // If false, the name was not suitable for renaming.
-    llvm::StringRef old_name = decl->getName();
-    if (old_name == new_name)
-      return;
-    clang::SourceLocation loc = TargetNodeTraits<TargetNode>::GetLoc(
-        *result.Nodes.getNodeAs<TargetNode>(
-            TargetNodeTraits<TargetNode>::GetName()));
-    clang::CharSourceRange range = clang::CharSourceRange::getTokenRange(loc);
-    replacements_->emplace(*result.SourceManager, range, new_name);
-    replacement_names_.emplace(old_name.str(), std::move(new_name));
+  const TargetNode& GetTargetNode(const MatchFinder::MatchResult& result) {
+    const TargetNode* target_node = result.Nodes.getNodeAs<TargetNode>(
+        TargetNodeTraits<TargetNode>::GetName());
+    assert(target_node);
+    return *target_node;
   }
 
-  const std::unordered_map<std::string, std::string>& replacement_names()
-      const {
-    return replacement_names_;
+  bool GenerateReplacement(const MatchFinder::MatchResult& result,
+                           clang::SourceLocation loc,
+                           llvm::StringRef old_name,
+                           std::string new_name,
+                           Replacement* replacement) {
+    const clang::ASTContext& context = *result.Context;
+    const clang::SourceManager& source_manager = *result.SourceManager;
+
+    if (loc.isMacroID()) {
+      // Try to jump "above" the scratch buffer if |loc| is inside
+      // token##Concatenation.
+      const int kMaxJumps = 5;
+      bool verified_out_of_scratch_space = false;
+      for (int i = 0; i < kMaxJumps && !verified_out_of_scratch_space; i++) {
+        clang::SourceLocation spell = source_manager.getSpellingLoc(loc);
+        verified_out_of_scratch_space =
+            source_manager.getBufferName(spell) != "<scratch space>";
+        if (!verified_out_of_scratch_space)
+          loc = source_manager.getImmediateMacroCallerLoc(loc);
+      }
+      if (!verified_out_of_scratch_space)
+        return false;
+    }
+
+    // If the edit affects only the first character of the identifier, then
+    // narrow down the edit to only this single character.  This is important
+    // for dealing with toFooBar -> ToFooBar method renaming when the method
+    // name is built using macro token concatenation like to##macroArgument - in
+    // this case we should only rewrite "t" -> "T" and leave "o##macroArgument"
+    // untouched.
+    llvm::StringRef expected_old_text = old_name;
+    llvm::StringRef new_text = new_name;
+    if (loc.isMacroID() && expected_old_text.substr(1) == new_text.substr(1)) {
+      expected_old_text = expected_old_text.substr(0, 1);
+      new_text = new_text.substr(0, 1);
+    }
+    clang::SourceLocation spell = source_manager.getSpellingLoc(loc);
+    clang::CharSourceRange range = clang::CharSourceRange::getCharRange(
+        spell, spell.getLocWithOffset(expected_old_text.size()));
+
+    // We need to ensure that |actual_old_text| is the same as
+    // |expected_old_text| - it can be different if |actual_old_text| contains
+    // a macro argument (see DEFINE_WITH_TOKEN_CONCATENATION2 in
+    // macros-original.cc testcase).
+    StringRef actual_old_text = clang::Lexer::getSourceText(
+        range, source_manager, context.getLangOpts());
+    if (actual_old_text != expected_old_text)
+      return false;
+
+    if (replacement)
+      *replacement = Replacement(source_manager, range, new_text);
+    return true;
   }
 
+  virtual clang::SourceLocation GetTargetLoc(
+      const MatchFinder::MatchResult& result) {
+    return TargetNodeTraits<TargetNode>::GetLoc(GetTargetNode(result));
+  }
+
+  void AddReplacement(const MatchFinder::MatchResult& result,
+                      llvm::StringRef old_name,
+                      std::string new_name) {
+    if (old_name == new_name)
+      return;
+
+    clang::SourceLocation loc = GetTargetLoc(result);
+    if (loc.isInvalid())
+      return;
+
+    Replacement replacement;
+    if (!GenerateReplacement(result, loc, old_name, new_name, &replacement))
+      return;
+
+    replacements_->insert(std::move(replacement));
+    edit_tracker_.Add(*result.SourceManager, loc, old_name, new_name);
+  }
+
+  const EditTracker& edit_tracker() const { return edit_tracker_; }
+
  private:
   std::set<Replacement>* const replacements_;
-  std::unordered_map<std::string, std::string> replacement_names_;
+  EditTracker edit_tracker_;
 };
 
-using FieldDeclRewriter = RewriterBase<clang::FieldDecl, clang::NamedDecl>;
-using VarDeclRewriter = RewriterBase<clang::VarDecl, clang::NamedDecl>;
-using MemberRewriter = RewriterBase<clang::FieldDecl, clang::MemberExpr>;
-using DeclRefRewriter = RewriterBase<clang::VarDecl, clang::DeclRefExpr>;
-using FieldDeclRefRewriter = RewriterBase<clang::FieldDecl, clang::DeclRefExpr>;
-using FunctionDeclRewriter =
-    RewriterBase<clang::FunctionDecl, clang::NamedDecl>;
-using FunctionRefRewriter =
-    RewriterBase<clang::FunctionDecl, clang::DeclRefExpr>;
-using ConstructorInitializerRewriter =
-    RewriterBase<clang::FieldDecl, clang::CXXCtorInitializer>;
+template <typename DeclNode, typename TargetNode>
+class DeclRewriterBase : public RewriterBase<TargetNode> {
+ public:
+  using Base = RewriterBase<TargetNode>;
 
-using MethodDeclRewriter = RewriterBase<clang::CXXMethodDecl, clang::NamedDecl>;
+  explicit DeclRewriterBase(std::set<Replacement>* replacements)
+      : Base(replacements) {}
+
+  void run(const MatchFinder::MatchResult& result) override {
+    const DeclNode* decl = result.Nodes.getNodeAs<DeclNode>("decl");
+    assert(decl);
+    llvm::StringRef old_name = decl->getName();
+
+    // Return early if there's no name to be renamed.
+    if (!decl->getIdentifier())
+      return;
+
+    // Get the new name.
+    std::string new_name;
+    if (!GetNameForDecl(*decl, *result.Context, new_name))
+      return;  // If false, the name was not suitable for renaming.
+
+    // Check if we are able to rewrite the decl (to avoid rewriting if the
+    // decl's identifier is part of macro##Token##Concatenation).
+    clang::SourceLocation decl_loc =
+        TargetNodeTraits<clang::NamedDecl>::GetLoc(*decl);
+    if (!Base::GenerateReplacement(result, decl_loc, old_name, new_name,
+                                   nullptr))
+      return;
+
+    Base::AddReplacement(result, old_name, std::move(new_name));
+  }
+};
+
+using FieldDeclRewriter = DeclRewriterBase<clang::FieldDecl, clang::NamedDecl>;
+using VarDeclRewriter = DeclRewriterBase<clang::VarDecl, clang::NamedDecl>;
+using MemberRewriter = DeclRewriterBase<clang::FieldDecl, clang::MemberExpr>;
+using DeclRefRewriter = DeclRewriterBase<clang::VarDecl, clang::DeclRefExpr>;
+using FieldDeclRefRewriter =
+    DeclRewriterBase<clang::FieldDecl, clang::DeclRefExpr>;
+using FunctionDeclRewriter =
+    DeclRewriterBase<clang::FunctionDecl, clang::NamedDecl>;
+using FunctionRefRewriter =
+    DeclRewriterBase<clang::FunctionDecl, clang::DeclRefExpr>;
+using ConstructorInitializerRewriter =
+    DeclRewriterBase<clang::FieldDecl, clang::CXXCtorInitializer>;
+
+using MethodDeclRewriter =
+    DeclRewriterBase<clang::CXXMethodDecl, clang::NamedDecl>;
 using MethodRefRewriter =
-    RewriterBase<clang::CXXMethodDecl, clang::DeclRefExpr>;
+    DeclRewriterBase<clang::CXXMethodDecl, clang::DeclRefExpr>;
 using MethodMemberRewriter =
-    RewriterBase<clang::CXXMethodDecl, clang::MemberExpr>;
+    DeclRewriterBase<clang::CXXMethodDecl, clang::MemberExpr>;
 
 using EnumConstantDeclRewriter =
-    RewriterBase<clang::EnumConstantDecl, clang::NamedDecl>;
+    DeclRewriterBase<clang::EnumConstantDecl, clang::NamedDecl>;
 using EnumConstantDeclRefRewriter =
-    RewriterBase<clang::EnumConstantDecl, clang::DeclRefExpr>;
+    DeclRewriterBase<clang::EnumConstantDecl, clang::DeclRefExpr>;
 
 using UnresolvedLookupRewriter =
-    RewriterBase<clang::NamedDecl, clang::UnresolvedLookupExpr>;
+    DeclRewriterBase<clang::NamedDecl, clang::UnresolvedLookupExpr>;
 using UnresolvedMemberRewriter =
-    RewriterBase<clang::NamedDecl, clang::UnresolvedMemberExpr>;
+    DeclRewriterBase<clang::NamedDecl, clang::UnresolvedMemberExpr>;
 
-using UsingDeclRewriter = RewriterBase<clang::UsingDecl, clang::NamedDecl>;
+using UsingDeclRewriter = DeclRewriterBase<clang::UsingDecl, clang::NamedDecl>;
+
+class GMockMemberRewriter
+    : public DeclRewriterBase<clang::CXXMethodDecl, clang::MemberExpr> {
+ public:
+  using Base = DeclRewriterBase<clang::CXXMethodDecl, clang::MemberExpr>;
+
+  explicit GMockMemberRewriter(std::set<Replacement>* replacements)
+      : Base(replacements) {}
+
+  std::unique_ptr<clang::PPCallbacks> CreatePreprocessorCallbacks() {
+    return llvm::make_unique<GMockMemberRewriter::PPCallbacks>(this);
+  }
+
+  clang::SourceLocation GetTargetLoc(
+      const MatchFinder::MatchResult& result) override {
+    // Find location of the gmock_##MockedMethod identifier.
+    clang::SourceLocation target_loc = Base::GetTargetLoc(result);
+
+    // Find location of EXPECT_CALL macro invocation.
+    clang::SourceLocation macro_call_loc =
+        result.SourceManager->getExpansionLoc(target_loc);
+
+    // Map |macro_call_loc| to argument location (location of the method name
+    // that needs renaming).
+    auto it = expect_call_to_2nd_arg.find(macro_call_loc);
+    if (it == expect_call_to_2nd_arg.end())
+      return clang::SourceLocation();
+    return it->second;
+  }
+
+ private:
+  std::map<clang::SourceLocation, clang::SourceLocation> expect_call_to_2nd_arg;
+
+  // Called from PPCallbacks with the locations of EXPECT_CALL macro invocation:
+  // Example:
+  //   EXPECT_CALL(my_mock, myMethod(123, 456));
+  //   ^- expansion_loc     ^- actual_arg_loc
+  void RecordExpectCallMacroInvocation(clang::SourceLocation expansion_loc,
+                                       clang::SourceLocation second_arg_loc) {
+    expect_call_to_2nd_arg[expansion_loc] = second_arg_loc;
+  }
+
+  class PPCallbacks : public clang::PPCallbacks {
+   public:
+    explicit PPCallbacks(GMockMemberRewriter* rewriter) : rewriter_(rewriter) {}
+    ~PPCallbacks() override {}
+    void MacroExpands(const clang::Token& name,
+                      const clang::MacroDefinition& def,
+                      clang::SourceRange range,
+                      const clang::MacroArgs* args) override {
+      clang::IdentifierInfo* id = name.getIdentifierInfo();
+      if (!id)
+        return;
+
+      if (id->getName() != "EXPECT_CALL")
+        return;
+
+      if (def.getMacroInfo()->getNumArgs() != 2)
+        return;
+
+      // TODO(lukasza): Should check if def.getMacroInfo()->getDefinitionLoc()
+      // is in testing/gmock/include/gmock/gmock-spec-builders.h but I don't
+      // know how to get clang::SourceManager to call getFileName.
+
+      rewriter_->RecordExpectCallMacroInvocation(
+          name.getLocation(), args->getUnexpArgument(1)->getLocation());
+    }
+
+   private:
+    GMockMemberRewriter* rewriter_;
+  };
+};
+
+clang::DeclarationName GetUnresolvedName(
+    const clang::UnresolvedMemberExpr& expr) {
+  return expr.getMemberName();
+}
+
+clang::DeclarationName GetUnresolvedName(
+    const clang::DependentScopeDeclRefExpr& expr) {
+  return expr.getDeclName();
+}
+
+clang::DeclarationName GetUnresolvedName(
+    const clang::CXXDependentScopeMemberExpr& expr) {
+  return expr.getMember();
+}
+
+clang::DeclarationName GetUnresolvedName(
+    const clang::UnresolvedUsingValueDecl& decl) {
+  return decl.getDeclName();
+}
+
+// Returns whether |expr_node| is used as a callee in the AST (i.e. if
+// |expr_node| needs to resolve to a method or a function).
+bool IsCallee(const clang::Expr& expr, clang::ASTContext& context) {
+  auto matcher = stmt(hasParent(callExpr(callee(equalsNode(&expr)))));
+  return IsMatching(matcher, expr, context);
+}
+
+// Returns whether |decl| will be used as a callee in the AST (i.e. if the value
+// brought by the using declaration will resolve to a method or a function).
+bool IsCallee(const clang::UnresolvedUsingValueDecl& decl,
+              clang::ASTContext& /* context */) {
+  // Caller (i.e. GuessNameForUnresolvedDependentNode) should have already
+  // filtered out fields before calling |IsCallee|.
+  clang::IdentifierInfo* info = GetUnresolvedName(decl).getAsIdentifierInfo();
+  assert(info);
+  bool name_looks_like_a_field = info->getName().startswith(kBlinkFieldPrefix);
+  assert(!name_looks_like_a_field);
+
+  // Looking just at clang::UnresolvedUsingValueDecl, we cannot tell whether it
+  // refers to something callable or not.  Since fields should have been already
+  // filtered out before calling IsCallee (see the assert above), let's assume
+  // that |using Base::foo| refers to a method.
+  return true;
+}
+
+template <typename TargetNode>
+class UnresolvedRewriterBase : public RewriterBase<TargetNode> {
+ public:
+  using Base = RewriterBase<TargetNode>;
+
+  explicit UnresolvedRewriterBase(std::set<Replacement>* replacements)
+      : RewriterBase<TargetNode>(replacements) {}
+
+  void run(const MatchFinder::MatchResult& result) override {
+    const TargetNode& node = Base::GetTargetNode(result);
+
+    clang::DeclarationName decl_name = GetUnresolvedName(node);
+    switch (decl_name.getNameKind()) {
+      // Do not rewrite this:
+      //   return operator T*();
+      // into this:
+      //   return Operator type - parameter - 0 - 0 * T * ();
+      case clang::DeclarationName::NameKind::CXXConversionFunctionName:
+      case clang::DeclarationName::NameKind::CXXOperatorName:
+      case clang::DeclarationName::NameKind::CXXLiteralOperatorName:
+        return;
+      default:
+        break;
+    }
+
+    // Make sure there is an old name + extract the old name.
+    clang::IdentifierInfo* info = GetUnresolvedName(node).getAsIdentifierInfo();
+    if (!info)
+      return;
+    llvm::StringRef old_name = info->getName();
+
+    // Try to guess a new name.
+    std::string new_name;
+    if (GuessNameForUnresolvedDependentNode(node, *result.Context, old_name,
+                                            new_name))
+      Base::AddReplacement(result, old_name, std::move(new_name));
+  }
+
+ private:
+  // This method calculates a new name for nodes that depend on template
+  // parameters (http://en.cppreference.com/w/cpp/language/dependent_name).  The
+  // renaming is based on crude heuristics, because such nodes are not bound to
+  // a specific decl until template instantiation - at the point of rename, one
+  // cannot tell whether the node will eventually resolve to a field / method /
+  // constant / etc.
+  //
+  // The method returns false if no renaming should be done.
+  // Otherwise the method returns true and sets |new_name|.
+  bool GuessNameForUnresolvedDependentNode(const TargetNode& node,
+                                           clang::ASTContext& context,
+                                           llvm::StringRef old_name,
+                                           std::string& new_name) {
+    // |m_fieldName| -> |field_name_|.
+    if (old_name.startswith(kBlinkFieldPrefix)) {
+      std::string field_name = old_name.substr(strlen(kBlinkFieldPrefix));
+      if (field_name.find('_') == std::string::npos) {
+        new_name = CamelCaseToUnderscoreCase(field_name) + "_";
+        return true;
+      }
+    }
+
+    // |T::myMethod(...)| -> |T::MyMethod(...)|.
+    if ((old_name.find('_') == std::string::npos) && IsCallee(node, context) &&
+        !IsBlacklistedMethodName(old_name)) {
+      new_name = old_name;
+      new_name[0] = clang::toUppercase(old_name[0]);
+      if (ShouldPrefixFunctionName(old_name))
+        new_name = "Get" + new_name;
+      return true;
+    }
+
+    // In the future we can consider more heuristics:
+    // - "s_" and "g_" prefixes
+    // - "ALL_CAPS"
+    // - |T::myStaticField| -> |T::kMyStaticField|
+    //   (but have to be careful not to rename |value| in WTF/TypeTraits.h?)
+    return false;
+  }
+};
+
+using UnresolvedDependentMemberRewriter =
+    UnresolvedRewriterBase<clang::UnresolvedMemberExpr>;
+
+using UnresolvedUsingValueDeclRewriter =
+    UnresolvedRewriterBase<clang::UnresolvedUsingValueDecl>;
+
+using DependentScopeDeclRefExprRewriter =
+    UnresolvedRewriterBase<clang::DependentScopeDeclRefExpr>;
+
+using CXXDependentScopeMemberExprRewriter =
+    UnresolvedRewriterBase<clang::CXXDependentScopeMemberExpr>;
+
+class SourceFileCallbacks : public clang::tooling::SourceFileCallbacks {
+ public:
+  explicit SourceFileCallbacks(GMockMemberRewriter* gmock_member_rewriter)
+      : gmock_member_rewriter_(gmock_member_rewriter) {
+    assert(gmock_member_rewriter);
+  }
+
+  ~SourceFileCallbacks() override {}
+
+  // clang::tooling::SourceFileCallbacks override:
+  bool handleBeginSource(clang::CompilerInstance& compiler,
+                         llvm::StringRef Filename) override {
+    compiler.getPreprocessor().addPPCallbacks(
+        gmock_member_rewriter_->CreatePreprocessorCallbacks());
+    return true;
+  }
+
+ private:
+  GMockMemberRewriter* gmock_member_rewriter_;
+};
 
 }  // namespace
 
@@ -637,13 +1286,19 @@
   auto blink_namespace_decl =
       namespaceDecl(anyOf(hasName("blink"), hasName("WTF")),
                     hasParent(translationUnitDecl()));
+  auto protocol_namespace_decl =
+      namespaceDecl(hasName("protocol"),
+                    hasParent(namespaceDecl(hasName("blink"),
+                                            hasParent(translationUnitDecl()))));
 
   // Given top-level compilation unit:
   //   namespace WTF {
   //     void foo() {}
   //   }
   // matches |foo|.
-  auto decl_under_blink_namespace = decl(hasAncestor(blink_namespace_decl));
+  auto decl_under_blink_namespace =
+      decl(hasAncestor(blink_namespace_decl),
+           unless(hasAncestor(protocol_namespace_decl)));
 
   // Given top-level compilation unit:
   //   void WTF::function() {}
@@ -669,8 +1324,8 @@
   auto field_decl_matcher = id("decl", fieldDecl(in_blink_namespace));
   auto is_type_trait_value =
       varDecl(hasName("value"), hasStaticStorageDuration(), isPublic(),
-              hasType(isConstQualified()), hasType(type(anyOf(
-                  booleanType(), enumType()))),
+              hasType(isConstQualified()),
+              hasType(type(anyOf(builtinType(), enumType()))),
               unless(hasAncestor(recordDecl(
                   has(cxxMethodDecl(isUserProvided(), isInstanceMethod()))))));
   auto var_decl_matcher =
@@ -800,7 +1455,7 @@
   //   S s;
   //   s.g();
   //   void (S::*p)() = &S::g;
-  // matches |&S::g| but not |s.g()|.
+  // matches |&S::g| but not |s.g|.
   auto method_ref_matcher = id(
       "expr", declRefExpr(to(method_decl_matcher),
                           // Ignore template substitutions.
@@ -814,7 +1469,7 @@
   //   S s;
   //   s.g();
   //   void (S::*p)() = &S::g;
-  // matches |s.g()| but not |&S::g|.
+  // matches |s.g| but not |&S::g|.
   auto method_member_matcher =
       id("expr", memberExpr(member(method_decl_matcher)));
 
@@ -885,7 +1540,7 @@
   match_finder.addMatcher(unresolved_lookup_matcher,
                           &unresolved_lookup_rewriter);
 
-  // Unresolved member expressions ========
+  // Unresolved member expressions (for non-dependent fields / methods) ========
   // Similar to unresolved lookup expressions, but for methods in a member
   // context, e.g. var_with_templated_type.Method().
   auto unresolved_member_matcher = expr(id(
@@ -899,6 +1554,36 @@
   match_finder.addMatcher(unresolved_member_matcher,
                           &unresolved_member_rewriter);
 
+  // Unresolved using value decls ========
+  // Example:
+  //  template <typename T>
+  //  class BaseClass {
+  //   public:
+  //    unsigned long m_size;
+  //  };
+  //  template <typename T>
+  //  class DerivedClass : protected BaseClass<T> {
+  //   private:
+  //    using Base = BaseClass<T>;
+  //    using Base::m_size;  // <- |m_size| here is matched by
+  //    void method() {      //    |unresolved_using_value_decl_matcher|.
+  //      m_size = 123;  // <- |m_size| here is matched by
+  //    }                //    |unresolved_dependent_using_matcher|.
+  //  };
+  auto unresolved_dependent_using_matcher =
+      expr(id("expr", unresolvedMemberExpr(allOverloadsMatch(allOf(
+                          in_blink_namespace, unresolvedUsingValueDecl())))));
+  UnresolvedDependentMemberRewriter unresolved_dependent_member_rewriter(
+      &replacements);
+  match_finder.addMatcher(unresolved_dependent_using_matcher,
+                          &unresolved_dependent_member_rewriter);
+  auto unresolved_using_value_decl_matcher =
+      decl(id("decl", unresolvedUsingValueDecl(in_blink_namespace)));
+  UnresolvedUsingValueDeclRewriter unresolved_using_value_decl_rewriter(
+      &replacements);
+  match_finder.addMatcher(unresolved_using_value_decl_matcher,
+                          &unresolved_using_value_decl_rewriter);
+
   // Using declarations ========
   // Given
   //   using blink::X;
@@ -911,44 +1596,77 @@
   UsingDeclRewriter using_decl_rewriter(&replacements);
   match_finder.addMatcher(using_decl_matcher, &using_decl_rewriter);
 
+  // Matches any QualType that refers to a blink type:
+  // - const blink::Foo&
+  // - blink::Foo*
+  // - blink::Foo<T>
+  auto blink_qual_type_base_matcher = hasBaseType(hasUnqualifiedDesugaredType(
+      anyOf(enumType(hasDeclaration(in_blink_namespace)),
+            injectedClassNameType(hasDeclaration(in_blink_namespace)),
+            recordType(hasDeclaration(in_blink_namespace)),
+            templateSpecializationType(hasDeclaration(in_blink_namespace)),
+            templateTypeParmType(hasDeclaration(in_blink_namespace)))));
+  auto blink_qual_type_matcher = qualType(anyOf(
+      blink_qual_type_base_matcher, pointsTo(blink_qual_type_base_matcher),
+      references(blink_qual_type_base_matcher)));
+
+  // Template-dependent decl lookup ========
+  // Given
+  //   template <typename T> void f() { T::foo(); }
+  // matches |T::foo|.
+  auto dependent_scope_decl_ref_expr_matcher =
+      expr(id("expr", dependentScopeDeclRefExpr(has(nestedNameSpecifier(
+                          specifiesType(blink_qual_type_matcher))))));
+  DependentScopeDeclRefExprRewriter dependent_scope_decl_ref_expr_rewriter(
+      &replacements);
+  match_finder.addMatcher(dependent_scope_decl_ref_expr_matcher,
+                          &dependent_scope_decl_ref_expr_rewriter);
+
+  // Template-dependent member lookup ========
+  // Given
+  //   template <typename T>
+  //   class Foo {
+  //     void f() { T::foo(); }
+  //     void g(T x) { x.bar(); }
+  //   };
+  // matches |T::foo| and |x.bar|.
+  auto cxx_dependent_scope_member_expr_matcher =
+      expr(id("expr", cxxDependentScopeMemberExpr(
+                          hasMemberFromType(blink_qual_type_matcher))));
+  CXXDependentScopeMemberExprRewriter cxx_dependent_scope_member_expr_rewriter(
+      &replacements);
+  match_finder.addMatcher(cxx_dependent_scope_member_expr_matcher,
+                          &cxx_dependent_scope_member_expr_rewriter);
+
+  // GMock calls lookup ========
+  // Given
+  //   EXPECT_CALL(obj, myMethod(...))
+  // will match obj.gmock_myMethod(...) call generated by the macro
+  // (but only if it mocks a Blink method).
+  auto gmock_member_matcher =
+      id("expr", memberExpr(hasDeclaration(
+                     decl(cxxMethodDecl(mocksMethod(method_decl_matcher))))));
+  GMockMemberRewriter gmock_member_rewriter(&replacements);
+  match_finder.addMatcher(gmock_member_matcher, &gmock_member_rewriter);
+
+  // Prepare and run the tool.
+  SourceFileCallbacks source_file_callbacks(&gmock_member_rewriter);
   std::unique_ptr<clang::tooling::FrontendActionFactory> factory =
-      clang::tooling::newFrontendActionFactory(&match_finder);
+      clang::tooling::newFrontendActionFactory(&match_finder,
+                                               &source_file_callbacks);
   int result = tool.run(factory.get());
   if (result != 0)
     return result;
 
-#if defined(_WIN32)
-  HANDLE lockfd = CreateFile("rewrite-sym.lock", GENERIC_READ, FILE_SHARE_READ,
-                             NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
-  OVERLAPPED overlapped = {};
-  LockFileEx(lockfd, LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &overlapped);
-#else
-  int lockfd = open("rewrite-sym.lock", O_RDWR | O_CREAT, 0666);
-  while (flock(lockfd, LOCK_EX)) {  // :D
-  }
-#endif
-
-  std::ofstream replacement_db_file("rewrite-sym.txt",
-                                    std::ios_base::out | std::ios_base::app);
-  for (const auto& p : field_decl_rewriter.replacement_names())
-    replacement_db_file << "var:" << p.first << ":" << p.second << "\n";
-  for (const auto& p : var_decl_rewriter.replacement_names())
-    replacement_db_file << "var:" << p.first << ":" << p.second << "\n";
-  for (const auto& p : enum_member_decl_rewriter.replacement_names())
-    replacement_db_file << "enu:" << p.first << ":" << p.second << "\n";
-  for (const auto& p : function_decl_rewriter.replacement_names())
-    replacement_db_file << "fun:" << p.first << ":" << p.second << "\n";
-  for (const auto& p : method_decl_rewriter.replacement_names())
-    replacement_db_file << "fun:" << p.first << ":" << p.second << "\n";
-  replacement_db_file.close();
-
-#if defined(_WIN32)
-  UnlockFileEx(lockfd, 0, 1, 0, &overlapped);
-  CloseHandle(lockfd);
-#else
-  flock(lockfd, LOCK_UN);
-  close(lockfd);
-#endif
+  // Supplemental data for the Blink rename rebase helper.
+  // TODO(dcheng): There's a lot of match rewriters missing from this list.
+  llvm::outs() << "==== BEGIN TRACKED EDITS ====\n";
+  field_decl_rewriter.edit_tracker().SerializeTo("var", llvm::outs());
+  var_decl_rewriter.edit_tracker().SerializeTo("var", llvm::outs());
+  enum_member_decl_rewriter.edit_tracker().SerializeTo("enu", llvm::outs());
+  function_decl_rewriter.edit_tracker().SerializeTo("fun", llvm::outs());
+  method_decl_rewriter.edit_tracker().SerializeTo("fun", llvm::outs());
+  llvm::outs() << "==== END TRACKED EDITS ====\n";
 
   // Serialization format is documented in tools/clang/scripts/run_tool.py
   llvm::outs() << "==== BEGIN EDITS ====\n";
diff --git a/tools/clang/rewrite_to_chrome_style/tests/constants-expected.cc b/tools/clang/rewrite_to_chrome_style/tests/constants-expected.cc
index e27982a..4b2f89f 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/constants-expected.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/constants-expected.cc
@@ -33,16 +33,17 @@
     const int kFunctionConstantFromExpression = 4 + 6;
     const int kFunctionConstantFromOtherConsts =
         kFunctionConstant + kFunctionConstantFromExpression;
-    // These don't do the right thing right now, but names like this don't
-    // exist in blink (hopefully).
-    const int kShould_be_renamed_to_a_const = 9 - 2;
-    const int kShould_also_be_renamed_to_a_const =
+    // These are constants but they are hacker_case, so we just leave them as
+    // is since the author explicitly did this.
+    const int should_not_be_renamed_to_a_const = 9 - 2;
+    const int should_not_also_be_renamed_to_a_const =
         kFunctionConstant + kFunctionConstantFromOtherConsts;
     const int not_compile_time_const = kFunctionConstant + Function();
   }
 };
 
-void F() {
+// |constParam| should not be renamed to |kConstParam|.
+void F(const bool const_param = true) {
   // Constant in function body.
   static const char kStaticString[] = "abc";
   // Constant-style naming, since it's initialized with a literal.
diff --git a/tools/clang/rewrite_to_chrome_style/tests/constants-original.cc b/tools/clang/rewrite_to_chrome_style/tests/constants-original.cc
index bc51bff..8e06731 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/constants-original.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/constants-original.cc
@@ -33,16 +33,17 @@
     const int kFunctionConstantFromExpression = 4 + 6;
     const int kFunctionConstantFromOtherConsts =
         kFunctionConstant + kFunctionConstantFromExpression;
-    // These don't do the right thing right now, but names like this don't
-    // exist in blink (hopefully).
-    const int should_be_renamed_to_a_const = 9 - 2;
-    const int should_also_be_renamed_to_a_const =
+    // These are constants but they are hacker_case, so we just leave them as
+    // is since the author explicitly did this.
+    const int should_not_be_renamed_to_a_const = 9 - 2;
+    const int should_not_also_be_renamed_to_a_const =
         kFunctionConstant + kFunctionConstantFromOtherConsts;
     const int not_compile_time_const = kFunctionConstant + Function();
   }
 };
 
-void F() {
+// |constParam| should not be renamed to |kConstParam|.
+void F(const bool constParam = true) {
   // Constant in function body.
   static const char staticString[] = "abc";
   // Constant-style naming, since it's initialized with a literal.
diff --git a/tools/clang/rewrite_to_chrome_style/tests/fields-expected.cc b/tools/clang/rewrite_to_chrome_style/tests/fields-expected.cc
index faef4ec..e59d532 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/fields-expected.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/fields-expected.cc
@@ -145,6 +145,21 @@
       !kIsGarbageCollected ? kRefCountedLifetime : kGarbageCollectedLifetime;
 };
 
+template <typename T>
+struct GenericHashTraitsBase {
+  // We don't want to capitalize fields in type traits
+  // (i.e. the |value| -> |kValue| rename is undesirable below).
+  // This problem is prevented by IsCallee heuristic.
+  static const int kWeakHandlingFlag = TypeTrait2<T>::value ? 123 : 456;
+};
+
+template <int Format>
+struct IntermediateFormat {
+  // Some type traits have int type.  Example below is loosely based on
+  // third_party/WebKit/Source/platform/graphics/gpu/WebGLImageConversion.cpp
+  static const int value = (Format == 123) ? 456 : 789;
+};
+
 };  // namespace WTF
 
 void F() {
diff --git a/tools/clang/rewrite_to_chrome_style/tests/fields-original.cc b/tools/clang/rewrite_to_chrome_style/tests/fields-original.cc
index 726c521..d2c1b4d 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/fields-original.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/fields-original.cc
@@ -142,6 +142,21 @@
       !isGarbageCollected ? RefCountedLifetime : GarbageCollectedLifetime;
 };
 
+template <typename T>
+struct GenericHashTraitsBase {
+  // We don't want to capitalize fields in type traits
+  // (i.e. the |value| -> |kValue| rename is undesirable below).
+  // This problem is prevented by IsCallee heuristic.
+  static const int kWeakHandlingFlag = TypeTrait2<T>::value ? 123 : 456;
+};
+
+template <int Format>
+struct IntermediateFormat {
+  // Some type traits have int type.  Example below is loosely based on
+  // third_party/WebKit/Source/platform/graphics/gpu/WebGLImageConversion.cpp
+  static const int value = (Format == 123) ? 456 : 789;
+};
+
 };  // namespace WTF
 
 void F() {
diff --git a/tools/clang/rewrite_to_chrome_style/tests/function-templates-expected.cc b/tools/clang/rewrite_to_chrome_style/tests/function-templates-expected.cc
index 69a3349..402ae44 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/function-templates-expected.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/function-templates-expected.cc
@@ -21,14 +21,11 @@
  public:
   template <typename U, typename V>
   Checked(const Checked<U, V>& rhs) {
-    // This (incorrectly) doesn't get rewritten, since it's not instantiated. In
-    // this case, the AST representation contains a bunch of
-    // CXXDependentScopeMemberExpr nodes.
-    if (rhs.hasOverflowed())
-      this->overflowed();
-    if (!IsInBounds<T>(rhs.m_value))
-      this->overflowed();
-    value_ = static_cast<T>(rhs.m_value);
+    if (rhs.HasOverflowed())
+      this->Overflowed();
+    if (!IsInBounds<T>(rhs.value_))
+      this->Overflowed();
+    value_ = static_cast<T>(rhs.value_);
   }
 
   bool HasOverflowed() const { return false; }
@@ -39,12 +36,29 @@
 };
 
 template <typename To, typename From>
-To Bitwise_cast(From from) {
-  static_assert(sizeof(To) == sizeof(From));
+To bitwise_cast(From from) {
+  static_assert(sizeof(To) == sizeof(From), "msg");
   return reinterpret_cast<To>(from);
 }
 
 }  // namespace WTF
 
-using WTF::Bitwise_cast;
+namespace mojo {
+
+template <typename U>
+struct ArrayTraits;
+
+template <typename U>
+struct ArrayTraits<WTF::Checked<U, int>> {
+  static bool HasOverflowed(WTF::Checked<U, int>& input) {
+    // |hasOverflowed| below should be rewritten to |HasOverflowed|
+    // (because this is a method of WTF::Checked;  it doesn't matter
+    // that we are not in WTF namespace *here*).
+    return input.HasOverflowed();
+  }
+};
+
+}  // namespace mojo
+
+using WTF::bitwise_cast;
 using WTF::SafeCast;
diff --git a/tools/clang/rewrite_to_chrome_style/tests/function-templates-original.cc b/tools/clang/rewrite_to_chrome_style/tests/function-templates-original.cc
index 71267b0..80e244b 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/function-templates-original.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/function-templates-original.cc
@@ -21,9 +21,6 @@
  public:
   template<typename U, typename V>
   Checked(const Checked<U, V>& rhs){
-    // This (incorrectly) doesn't get rewritten, since it's not instantiated. In
-    // this case, the AST representation contains a bunch of
-    // CXXDependentScopeMemberExpr nodes.
     if (rhs.hasOverflowed())
       this->overflowed();
     if (!isInBounds<T>(rhs.m_value))
@@ -40,11 +37,28 @@
 
 template<typename To, typename From>
 To bitwise_cast(From from) {
-  static_assert(sizeof(To) == sizeof(From));
+  static_assert(sizeof(To) == sizeof(From), "msg");
   return reinterpret_cast<To>(from);
 }
 
 }  // namespace WTF
 
+namespace mojo {
+
+template <typename U>
+struct ArrayTraits;
+
+template <typename U>
+struct ArrayTraits<WTF::Checked<U, int>> {
+  static bool HasOverflowed(WTF::Checked<U, int>& input) {
+    // |hasOverflowed| below should be rewritten to |HasOverflowed|
+    // (because this is a method of WTF::Checked;  it doesn't matter
+    // that we are not in WTF namespace *here*).
+    return input.hasOverflowed();
+  }
+};
+
+}  // namespace mojo
+
 using WTF::bitwise_cast;
 using WTF::safeCast;
diff --git a/tools/clang/rewrite_to_chrome_style/tests/gmock-expected.cc b/tools/clang/rewrite_to_chrome_style/tests/gmock-expected.cc
new file mode 100644
index 0000000..e9f1b04
--- /dev/null
+++ b/tools/clang/rewrite_to_chrome_style/tests/gmock-expected.cc
@@ -0,0 +1,28 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gmock/gmock.h"
+
+namespace blink {
+
+class Interface {
+ public:
+  virtual void MyMethod(int my_param) {}
+};
+
+class MockedInterface : public Interface {
+ public:
+  MOCK_METHOD1(MyMethod, void(int));
+};
+
+void Test() {
+  MockedInterface mocked_interface;
+  EXPECT_CALL(mocked_interface, MyMethod(1));
+  EXPECT_CALL(
+      mocked_interface,  // A comment to prevent reformatting into single line.
+      MyMethod(1));
+  mocked_interface.MyMethod(123);
+}
+
+}  // namespace blink
diff --git a/tools/clang/rewrite_to_chrome_style/tests/gmock-original.cc b/tools/clang/rewrite_to_chrome_style/tests/gmock-original.cc
new file mode 100644
index 0000000..102bf1a
--- /dev/null
+++ b/tools/clang/rewrite_to_chrome_style/tests/gmock-original.cc
@@ -0,0 +1,28 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gmock/gmock.h"
+
+namespace blink {
+
+class Interface {
+ public:
+  virtual void myMethod(int my_param) {}
+};
+
+class MockedInterface : public Interface {
+ public:
+  MOCK_METHOD1(myMethod, void(int));
+};
+
+void test() {
+  MockedInterface mockedInterface;
+  EXPECT_CALL(mockedInterface, myMethod(1));
+  EXPECT_CALL(
+      mockedInterface,  // A comment to prevent reformatting into single line.
+      myMethod(1));
+  mockedInterface.myMethod(123);
+}
+
+}  // namespace blink
diff --git a/tools/clang/rewrite_to_chrome_style/tests/macros-expected.cc b/tools/clang/rewrite_to_chrome_style/tests/macros-expected.cc
index 2dcda6f..7136069 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/macros-expected.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/macros-expected.cc
@@ -2,11 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Identifiers in macros should never be rewritten, as the risk of things
-// breaking is extremely high.
-
 #define DEFINE_TYPE_CASTS(thisType, argumentType, argumentName, predicate) \
-  inline thisType* to##thisType(argumentType* argumentName) {              \
+  inline thisType* To##thisType(argumentType* argumentName) {              \
     if (!predicate)                                                        \
       asm("int 3");                                                        \
     return static_cast<thisType*>(argumentName);                           \
@@ -26,9 +23,7 @@
 
 void F() {
   Base* base_ptr = new Derived;
-  // 'toDerived' should not be renamed, since the definition lives inside
-  // a macro invocation.
-  Derived* derived_ptr = toDerived(base_ptr);
+  Derived* derived_ptr = ToDerived(base_ptr);
   long long as_int = ToInt(base_ptr);
   // 'derivedPtr' should be renamed: it's a reference to a declaration defined
   // outside a macro invocation.
@@ -50,4 +45,47 @@
   CALL_METHOD_FROM_MACRO();
 };
 
+#define DEFINE_WITH_TOKEN_CONCATENATION2(arg1, arg2) \
+  void arg1##arg2() {}
+// We definitely don't want to rewrite |arg1| on the previous line into
+// either |Arg1| or |Frg1| or |Brg1| or |Foo| or |Baz|.
+
+// We might or might not want to rewrite |foo|->|Foo| and |baz|->|Baz| below.
+// The test below just spells out the current behavior of the tool (which one
+// can argue is accidental).
+DEFINE_WITH_TOKEN_CONCATENATION2(foo, Bar1)
+DEFINE_WITH_TOKEN_CONCATENATION2(baz, Bar2)
+
+void TokenConcatenationTest2() {
+  // We might or might not want to rewrite |foo|->|Foo| and |baz|->|Baz| below.
+  // The test below just spells out the current behavior of the tool (which one
+  // can argue is accidental).
+  fooBar1();
+  bazBar2();
+}
+
+class FieldsMacro {
+ public:
+  // We shouldn't rewrite |m_fooBar| -> |foo_bar_|, because we cannot rewrite
+  // |m_##name| -> |???|.
+  FieldsMacro() : m_fooBar(123), m_barBaz(456) {}
+
+#define DECLARE_FIELD(name, Name) \
+ private:                         \
+  int m_##name;                   \
+                                  \
+ public:                          \
+  int name() { return m_##name; } \
+  void Set##Name(int value) { m_##name = value; }
+
+  DECLARE_FIELD(FooBar, FooBar)
+  DECLARE_FIELD(BarBaz, BarBaz)
+};
+
+int FieldsMacroTest() {
+  FieldsMacro fm;
+  fm.SetFooBar(789);
+  return fm.FooBar() + fm.BarBaz();
+}
+
 }  // namespace blink
diff --git a/tools/clang/rewrite_to_chrome_style/tests/macros-original.cc b/tools/clang/rewrite_to_chrome_style/tests/macros-original.cc
index ab68484..8a924eb 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/macros-original.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/macros-original.cc
@@ -2,9 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Identifiers in macros should never be rewritten, as the risk of things
-// breaking is extremely high.
-
 #define DEFINE_TYPE_CASTS(thisType, argumentType, argumentName, predicate) \
   inline thisType* to##thisType(argumentType* argumentName) {              \
     if (!predicate)                                                        \
@@ -26,8 +23,6 @@
 
 void F() {
   Base* basePtr = new Derived;
-  // 'toDerived' should not be renamed, since the definition lives inside
-  // a macro invocation.
   Derived* derivedPtr = toDerived(basePtr);
   long long asInt = toInt(basePtr);
   // 'derivedPtr' should be renamed: it's a reference to a declaration defined
@@ -50,4 +45,47 @@
   CALL_METHOD_FROM_MACRO();
 };
 
+#define DEFINE_WITH_TOKEN_CONCATENATION2(arg1, arg2) \
+  void arg1##arg2() {}
+// We definitely don't want to rewrite |arg1| on the previous line into
+// either |Arg1| or |Frg1| or |Brg1| or |Foo| or |Baz|.
+
+// We might or might not want to rewrite |foo|->|Foo| and |baz|->|Baz| below.
+// The test below just spells out the current behavior of the tool (which one
+// can argue is accidental).
+DEFINE_WITH_TOKEN_CONCATENATION2(foo, Bar1)
+DEFINE_WITH_TOKEN_CONCATENATION2(baz, Bar2)
+
+void tokenConcatenationTest2() {
+  // We might or might not want to rewrite |foo|->|Foo| and |baz|->|Baz| below.
+  // The test below just spells out the current behavior of the tool (which one
+  // can argue is accidental).
+  fooBar1();
+  bazBar2();
+}
+
+class FieldsMacro {
+ public:
+  // We shouldn't rewrite |m_fooBar| -> |foo_bar_|, because we cannot rewrite
+  // |m_##name| -> |???|.
+  FieldsMacro() : m_fooBar(123), m_barBaz(456) {}
+
+#define DECLARE_FIELD(name, Name) \
+ private:                         \
+  int m_##name;                   \
+                                  \
+ public:                          \
+  int name() { return m_##name; } \
+  void set##Name(int value) { m_##name = value; }
+
+  DECLARE_FIELD(fooBar, FooBar)
+  DECLARE_FIELD(barBaz, BarBaz)
+};
+
+int fieldsMacroTest() {
+  FieldsMacro fm;
+  fm.setFooBar(789);
+  return fm.fooBar() + fm.barBaz();
+}
+
 }  // namespace blink
diff --git a/tools/clang/rewrite_to_chrome_style/tests/methods-expected.cc b/tools/clang/rewrite_to_chrome_style/tests/methods-expected.cc
index 569e446..af33c2b 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/methods-expected.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/methods-expected.cc
@@ -50,12 +50,13 @@
 
   // These are special functions that we don't rename so that range-based
   // for loops and STL things work.
-  MyIterator begin() {}
-  my_iterator end() {}
-  my_iterator rbegin() {}
-  MyIterator rend() {}
-  // The trace() method is used by Oilpan, we shouldn't rename it.
-  void trace() {}
+  MyIterator begin() { return {}; }
+  my_iterator end() { return {}; }
+  my_iterator rbegin() { return {}; }
+  MyIterator rend() { return {}; }
+  // The trace() method is used by Oilpan, but we plan to tweak the Oilpan's
+  // clang plugin, so that it recognizes the new method name.
+  void Trace() {}
   // These are used by std::unique_lock and std::lock_guard.
   void lock() {}
   void unlock() {}
@@ -64,20 +65,12 @@
 
 class Other {
   // Static begin/end/trace don't count, and should be renamed.
-  static MyIterator Begin() {}
-  static my_iterator End() {}
+  static MyIterator Begin() { return {}; }
+  static my_iterator End() { return {}; }
   static void Trace() {}
   static void Lock() {}
 };
 
-class NonIterators {
-  // begin()/end() and friends are renamed if they don't return an iterator.
-  void Begin() {}
-  int End() { return 0; }
-  void Rbegin() {}
-  int Rend() { return 0; }
-};
-
 // Test that the actual method definition is also updated.
 void Task::DoTheWork() {
   ReallyDoTheWork();
@@ -180,6 +173,10 @@
   class Baz {};
   class FooBar {};
 
+  // Should be renamed to GetReadyState, because of
+  // ShouldPrefixFunctionName heuristic.
+  int GetReadyState() { return 123; }
+
   template <typename T>
   class MyRefPtr {};
 
@@ -197,6 +194,80 @@
   MyRefPtr<FooBar> foobar_;
 };
 
+namespace get_prefix_vs_inheritance {
+
+// Regression test for https://crbug.com/673031:
+// 1. |frame| accessor/method should be renamed in the same way for
+//    WebFrameImplBase and WebLocalFrameImpl.
+// 2. Need to rename |frame| to |GetFrame| (not to |Frame|) to avoid
+//    a conflict with the Frame type.
+
+class FrameFoo {};
+class LocalFrame : public FrameFoo {};
+
+class WebFrameImplBase {
+ public:
+  // Using |frameFoo| to test inheritance, and NOT just the presence on the
+  // ShouldPrefixFunctionName list.
+  virtual FrameFoo* GetFrameFoo() const = 0;
+};
+
+class WebLocalFrameImpl : public WebFrameImplBase {
+ public:
+  LocalFrame* GetFrameFoo() const override { return nullptr; }
+};
+
+// This is also a regression test for https://crbug.com/673031.  We should NOT
+// rewrite in a non-virtual case, because walking the inheritance chain of the
+// return type depends too much on unrelated context (i.e. walking the
+// inheritance chain might not be possible if the return type is
+// forward-declared).
+class LayoutObjectFoo {};
+class LayoutBoxModelObject : public LayoutObjectFoo {};
+class PaintLayerStackingNode {
+ public:
+  // |layoutObjectFoo| should NOT be renamed to |GetLayoutObjectFoo| (just to
+  // |LayoutObjectFoo|) - see the big comment above.  We use layoutObject*Foo*
+  // to test inheritance-related behavior and avoid testing whether method name
+  // is covered via ShouldPrefixFunctionName.
+  LayoutBoxModelObject* LayoutObjectFoo() { return nullptr; }
+};
+
+}  // namespace get_prefix_vs_inheritance
+
+namespace blacklisting_of_method_and_function_names {
+
+class Foo {
+  // Expecting |swap| method to be renamed to |Swap| - we blacklist renaming of
+  // |swap| *function*, because it needs to have the same casing as std::swap,
+  // so that ADL can kick-in and pull it from another namespace depending on the
+  // bargument.  We have a choice to rename or not rename |swap| *methods* - we
+  // chose to rename to be consistent (i.e. we rename |clear| -> |Clear|) and
+  // because Google C++ Styke Guide uses "Swap" in examples.
+  void Swap() {}
+  static void Swap(Foo& x, Foo& y) {}
+
+  // We don't rename |begin|, so that <algorithms> and other templates that
+  // expect |begin|, |end|, etc. continue to work.  This is only necessary
+  // for instance methods - renaming static methods and funcitons is okay.
+  void begin() {}
+  static void Begin(int x) {}
+
+  // https://crbug.com672902: std-like names should not be rewritten.
+  void emplace_back(int x) {}
+  void insert(int x) {}
+  void push_back(int x) {}
+  int* back() { return nullptr; }
+  int* front() { return nullptr; }
+  void erase() {}
+  bool empty() { return true; }
+};
+
+void Begin(int x) {}
+void swap(Foo& x, Foo& y) {}
+
+}  // blacklisting_of_method_and_function_names
+
 }  // namespace blink
 
 namespace WTF {
diff --git a/tools/clang/rewrite_to_chrome_style/tests/methods-original.cc b/tools/clang/rewrite_to_chrome_style/tests/methods-original.cc
index bc9255b..2c9e4ce 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/methods-original.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/methods-original.cc
@@ -54,11 +54,12 @@
 
   // These are special functions that we don't rename so that range-based
   // for loops and STL things work.
-  MyIterator begin() {}
-  my_iterator end() {}
-  my_iterator rbegin() {}
-  MyIterator rend() {}
-  // The trace() method is used by Oilpan, we shouldn't rename it.
+  MyIterator begin() { return {}; }
+  my_iterator end() { return {}; }
+  my_iterator rbegin() { return {}; }
+  MyIterator rend() { return {}; }
+  // The trace() method is used by Oilpan, but we plan to tweak the Oilpan's
+  // clang plugin, so that it recognizes the new method name.
   void trace() {}
   // These are used by std::unique_lock and std::lock_guard.
   void lock() {}
@@ -68,20 +69,12 @@
 
 class Other {
   // Static begin/end/trace don't count, and should be renamed.
-  static MyIterator begin() {}
-  static my_iterator end() {}
+  static MyIterator begin() { return {}; }
+  static my_iterator end() { return {}; }
   static void trace() {}
   static void lock() {}
 };
 
-class NonIterators {
-  // begin()/end() and friends are renamed if they don't return an iterator.
-  void begin() {}
-  int end() { return 0; }
-  void rbegin() {}
-  int rend() { return 0; }
-};
-
 // Test that the actual method definition is also updated.
 void Task::doTheWork() {
   reallyDoTheWork();
@@ -184,6 +177,10 @@
   class Baz {};
   class FooBar {};
 
+  // Should be renamed to GetReadyState, because of
+  // ShouldPrefixFunctionName heuristic.
+  int readyState() { return 123; }
+
   template <typename T>
   class MyRefPtr {};
 
@@ -201,6 +198,80 @@
   MyRefPtr<FooBar> foobar_;
 };
 
+namespace get_prefix_vs_inheritance {
+
+// Regression test for https://crbug.com/673031:
+// 1. |frame| accessor/method should be renamed in the same way for
+//    WebFrameImplBase and WebLocalFrameImpl.
+// 2. Need to rename |frame| to |GetFrame| (not to |Frame|) to avoid
+//    a conflict with the Frame type.
+
+class FrameFoo {};
+class LocalFrame : public FrameFoo {};
+
+class WebFrameImplBase {
+ public:
+  // Using |frameFoo| to test inheritance, and NOT just the presence on the
+  // ShouldPrefixFunctionName list.
+  virtual FrameFoo* frameFoo() const = 0;
+};
+
+class WebLocalFrameImpl : public WebFrameImplBase {
+ public:
+  LocalFrame* frameFoo() const override { return nullptr; }
+};
+
+// This is also a regression test for https://crbug.com/673031.  We should NOT
+// rewrite in a non-virtual case, because walking the inheritance chain of the
+// return type depends too much on unrelated context (i.e. walking the
+// inheritance chain might not be possible if the return type is
+// forward-declared).
+class LayoutObjectFoo {};
+class LayoutBoxModelObject : public LayoutObjectFoo {};
+class PaintLayerStackingNode {
+ public:
+  // |layoutObjectFoo| should NOT be renamed to |GetLayoutObjectFoo| (just to
+  // |LayoutObjectFoo|) - see the big comment above.  We use layoutObject*Foo*
+  // to test inheritance-related behavior and avoid testing whether method name
+  // is covered via ShouldPrefixFunctionName.
+  LayoutBoxModelObject* layoutObjectFoo() { return nullptr; }
+};
+
+}  // namespace get_prefix_vs_inheritance
+
+namespace blacklisting_of_method_and_function_names {
+
+class Foo {
+  // Expecting |swap| method to be renamed to |Swap| - we blacklist renaming of
+  // |swap| *function*, because it needs to have the same casing as std::swap,
+  // so that ADL can kick-in and pull it from another namespace depending on the
+  // bargument.  We have a choice to rename or not rename |swap| *methods* - we
+  // chose to rename to be consistent (i.e. we rename |clear| -> |Clear|) and
+  // because Google C++ Styke Guide uses "Swap" in examples.
+  void swap() {}
+  static void swap(Foo& x, Foo& y) {}
+
+  // We don't rename |begin|, so that <algorithms> and other templates that
+  // expect |begin|, |end|, etc. continue to work.  This is only necessary
+  // for instance methods - renaming static methods and funcitons is okay.
+  void begin() {}
+  static void begin(int x) {}
+
+  // https://crbug.com672902: std-like names should not be rewritten.
+  void emplace_back(int x) {}
+  void insert(int x) {}
+  void push_back(int x) {}
+  int* back() { return nullptr; }
+  int* front() { return nullptr; }
+  void erase() {}
+  bool empty() { return true; }
+};
+
+void begin(int x) {}
+void swap(Foo& x, Foo& y) {}
+
+}  // blacklisting_of_method_and_function_names
+
 }  // namespace blink
 
 namespace WTF {
diff --git a/tools/clang/rewrite_to_chrome_style/tests/namespaces-expected.cc b/tools/clang/rewrite_to_chrome_style/tests/namespaces-expected.cc
index da13adc..679731e 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/namespaces-expected.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/namespaces-expected.cc
@@ -12,6 +12,11 @@
 void Foo();
 }  // namespace nested
 
+// blink::protocol namespace is blacklisted.
+namespace protocol {
+void foo();
+}  // namespace protocol
+
 }  // namespace blink
 
 namespace WTF {
diff --git a/tools/clang/rewrite_to_chrome_style/tests/namespaces-original.cc b/tools/clang/rewrite_to_chrome_style/tests/namespaces-original.cc
index ff24012..0383450 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/namespaces-original.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/namespaces-original.cc
@@ -12,6 +12,11 @@
 void foo();
 }  // namespace nested
 
+// blink::protocol namespace is blacklisted.
+namespace protocol {
+void foo();
+}  // namespace protocol
+
 }  // namespace blink
 
 namespace WTF {
diff --git a/tools/clang/rewrite_to_chrome_style/tests/template-expected.cc b/tools/clang/rewrite_to_chrome_style/tests/template-expected.cc
index c9a1fe8..5c9103d 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/template-expected.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/template-expected.cc
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <type_traits>
+
 namespace not_blink {
 
 void function(int x) {}
@@ -9,6 +11,7 @@
 class Class {
  public:
   void method() {}
+  virtual void virtualMethod() {}
   template <typename T>
   void methodTemplate(T) {}
   template <typename T>
@@ -18,24 +21,95 @@
 template <typename T>
 void functionTemplate(T x) {}
 
+template <typename T = Class>
+void functionTemplate2() {
+  T::staticMethodTemplate(123);
+}
+
+template <typename T = Class>
+class TemplatedClass {
+ public:
+  void anotherMethod() { T::staticMethodTemplate(123); }
+};
+
 }  // not_blink
 
 namespace blink {
 
+bool FunctionNotMarkedConstexpr(int a) {
+  return a == 4 || a == 10;
+}
+
+template <typename T>
+bool TemplatedFunctionNotMarkedConstexpr(T t) {
+  return !!t;
+}
+
+int g_global_number;
+
 template <typename T, int number>
 void F() {
-  // We don't assert on this, and we don't end up considering it a const for
-  // now.
+  // These are const but hacker_case so we leave them alone.
   const int maybe_a_const = sizeof(T);
   const int is_a_const = number;
+  // These are const expressions so they get a k prefix.
+  const int kMaybeAConstToo = sizeof(T);
+  const int kIsAConstToo = number;
+  // These are built from calls to functions which produces inconsistent
+  // results so they should not be considered const to be safe.
+  const bool from_a_method = FunctionNotMarkedConstexpr(number);
+  const bool from_a_templated_method =
+      TemplatedFunctionNotMarkedConstexpr(number);
+  // A complex statement of const things is const.
+  const bool kComplexConst = number || (number + 1);
+  // A complex statement with a non-const thing is not const.
+  const bool complex_not_const = number || (g_global_number + 1);
+  // A const built from other consts is a const.
+  const bool kConstFromAConst = kComplexConst || number;
 }
 
 template <int number, typename... T>
 void F() {
-  // We don't assert on this, and we don't end up considering it a const for
-  // now.
+  // These are const but hacker_case so we leave them alone.
   const int maybe_a_const = sizeof...(T);
   const int is_a_const = number;
+  // These are const expressions so they get a k prefix.
+  const int kMaybeAConstToo = sizeof...(T);
+  const int kIsAConstToo = number;
+}
+
+namespace test_member_in_template {
+
+template <typename T>
+class HasAMember {
+ public:
+  HasAMember() {}
+  HasAMember(const T&) {}
+
+  void UsesMember() { const int not_const = i_; }
+  void AlsoUsesMember();
+
+ private:
+  int i_;
+};
+
+template <typename T>
+void HasAMember<T>::AlsoUsesMember() {
+  const int not_const = i_;
+}
+
+template <typename T>
+static void BasedOnSubType(const HasAMember<T>& t) {
+  const HasAMember<T> problematic_not_const(t);
+}
+
+void Run() {
+  HasAMember<int>().UsesMember();
+
+  BasedOnSubType<int>(HasAMember<int>());
+  enum E { A };
+  BasedOnSubType<E>(HasAMember<E>());
+}
 }
 
 namespace test_template_arg_is_function {
@@ -52,6 +126,15 @@
   H<int, F>(0);
   // Non-Blink should stay the same.
   H<int, not_blink::function>(1);
+
+  // The int one makes the methods called from F() considered as constexpr, and
+  // can be collapsed to not have template arguments before it reaches the AST.
+  F<int, 10>();
+  // The enum one makes them not constexpr, as it doesn't collapse away the
+  // template stuff as much. This can lead to conflicting decisions about
+  // the names inside F() vs the above instantiation.
+  enum E { A };
+  F<E, 11>();
 }
 
 }  // namespace test_template_arg_is_function
@@ -119,6 +202,25 @@
 
 }  // test_template_arg_is_method_template_in_non_member_context
 
+namespace test_inherited_field {
+
+template <typename T>
+class BaseClass {
+ public:
+  unsigned long size_;
+};
+
+template <typename T>
+class DerivedClass : protected BaseClass<T> {
+ private:
+  using Base = BaseClass<T>;
+  // https://crbug.com/640016: Need to rewrite |m_size| into |size_|.
+  using Base::size_;
+  void Method() { size_ = 123; }
+};
+
+}  // namespace test_inherited_field
+
 namespace test_template_arg_is_method_template_in_member_context {
 
 struct Class {
@@ -161,4 +263,118 @@
 
 }  // namespace test_unnamed_arg
 
+namespace cxx_dependent_scope_member_expr_testing {
+
+class PartitionAllocator {
+ public:
+  static void Method() {}
+};
+
+template <typename Allocator = PartitionAllocator>
+class Vector {
+ public:
+  // https://crbug.com/582315: |Allocator::method| is a
+  // CXXDependentScopeMemberExpr.
+  void AnotherMethod() {
+    if (std::is_class<Allocator>::value)  // Shouldn't rename |value|
+      Allocator::Method();                // Should rename |method| -> |Method|.
+  }
+};
+
+template <typename Allocator = PartitionAllocator>
+void Test() {
+  // https://crbug.com/582315: |Allocator::method| is a
+  // DependentScopeDeclRefExpr.
+  if (std::is_class<Allocator>::value)  // Shouldn't rename |value|.
+    Allocator::Method();                // Should rename |method|.
+}
+
+class InterceptingCanvasBase : public ::not_blink::Class {
+ public:
+  virtual void VirtualMethodInBlink(){};
+};
+
+template <typename DerivedCanvas>
+class InterceptingCanvas : public InterceptingCanvasBase {
+ public:
+  void virtualMethod() override {
+    this->Class::virtualMethod();  // https://crbug.com/582315#c19
+    this->InterceptingCanvasBase::VirtualMethodInBlink();
+  }
+};
+
+template <typename T>
+class ThreadSpecific {
+ public:
+  T* operator->();
+  operator T*();
+};
+
+template <typename T>
+inline ThreadSpecific<T>::operator T*() {
+  return nullptr;
+}
+
+template <typename T>
+inline T* ThreadSpecific<T>::operator->() {
+  return operator T*();
+}
+
+class Class {
+ public:
+  virtual void VirtualMethodInBlink() {}
+};
+
+}  // namespace cxx_dependent_scope_member_expr_testing
+
+namespace blacklisting_of_renaming_of_begin_method {
+
+template <typename T>
+class IntrusiveHeap {
+ public:
+  // https://crbug.com/672353: |begin| shouldn't be rewritten to |Begin|.
+  const T* begin() const { return nullptr; }
+};
+
+}  // namespace blacklisting_of_renaming_of_begin_method
+
 }  // namespace blink
+
+namespace not_blink {
+
+namespace cxx_dependent_scope_member_expr_testing {
+
+class Base : public ::blink::cxx_dependent_scope_member_expr_testing::Class {
+ public:
+  virtual void virtualMethod() {}
+};
+
+template <typename T>
+class Derived : public Base {
+ public:
+  void virtualMethod() override {
+    this->Class::VirtualMethodInBlink();
+    this->Base::virtualMethod();
+  }
+};
+
+}  // namespace cxx_dependent_scope_member_expr_testing
+
+namespace blink_methods_called_from_mojo_traits_are_not_rewritten {
+
+template <typename V>
+struct MapTraits;
+
+template <typename V>
+struct MapTraits<blink::test_unnamed_arg::Class<V>> {
+  static void SetToEmpty(blink::test_unnamed_arg::Class<V>* output) {
+    // Need to rewrite |f| to |F| below (because this method name
+    // does get rewritten when processing blink::test_unnamed_arg::Class).
+    // See also https://crbug.com/670434.
+    output->F(123);
+  }
+};
+
+}  // namespace blink_methods_called_from_mojo_traits_are_not_rewritten
+
+}  // namespace not_blink
diff --git a/tools/clang/rewrite_to_chrome_style/tests/template-original.cc b/tools/clang/rewrite_to_chrome_style/tests/template-original.cc
index 561fec4..47aef5d 100644
--- a/tools/clang/rewrite_to_chrome_style/tests/template-original.cc
+++ b/tools/clang/rewrite_to_chrome_style/tests/template-original.cc
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <type_traits>
+
 namespace not_blink {
 
 void function(int x) {}
@@ -9,6 +11,7 @@
 class Class {
  public:
   void method() {}
+  virtual void virtualMethod() {}
   template <typename T>
   void methodTemplate(T) {}
   template <typename T>
@@ -18,24 +21,94 @@
 template <typename T>
 void functionTemplate(T x) {}
 
+template <typename T = Class>
+void functionTemplate2() {
+  T::staticMethodTemplate(123);
+}
+
+template <typename T = Class>
+class TemplatedClass {
+ public:
+  void anotherMethod() { T::staticMethodTemplate(123); }
+};
+
 }  // not_blink
 
 namespace blink {
 
+bool functionNotMarkedConstexpr(int a) {
+  return a == 4 || a == 10;
+}
+
+template <typename T>
+bool templatedFunctionNotMarkedConstexpr(T t) {
+  return !!t;
+}
+
+int g_globalNumber;
+
 template <typename T, int number>
 void F() {
-  // We don't assert on this, and we don't end up considering it a const for
-  // now.
+  // These are const but hacker_case so we leave them alone.
   const int maybe_a_const = sizeof(T);
   const int is_a_const = number;
+  // These are const expressions so they get a k prefix.
+  const int maybeAConstToo = sizeof(T);
+  const int isAConstToo = number;
+  // These are built from calls to functions which produces inconsistent
+  // results so they should not be considered const to be safe.
+  const bool fromAMethod = functionNotMarkedConstexpr(number);
+  const bool fromATemplatedMethod = templatedFunctionNotMarkedConstexpr(number);
+  // A complex statement of const things is const.
+  const bool complexConst = number || (number + 1);
+  // A complex statement with a non-const thing is not const.
+  const bool complexNotConst = number || (g_globalNumber + 1);
+  // A const built from other consts is a const.
+  const bool constFromAConst = complexConst || number;
 }
 
 template <int number, typename... T>
 void F() {
-  // We don't assert on this, and we don't end up considering it a const for
-  // now.
+  // These are const but hacker_case so we leave them alone.
   const int maybe_a_const = sizeof...(T);
   const int is_a_const = number;
+  // These are const expressions so they get a k prefix.
+  const int maybeAConstToo = sizeof...(T);
+  const int isAConstToo = number;
+}
+
+namespace test_member_in_template {
+
+template <typename T>
+class HasAMember {
+ public:
+  HasAMember() {}
+  HasAMember(const T&) {}
+
+  void usesMember() { const int notConst = m_i; }
+  void alsoUsesMember();
+
+ private:
+  int m_i;
+};
+
+template <typename T>
+void HasAMember<T>::alsoUsesMember() {
+  const int notConst = m_i;
+}
+
+template <typename T>
+static void basedOnSubType(const HasAMember<T>& t) {
+  const HasAMember<T> problematicNotConst(t);
+}
+
+void Run() {
+  HasAMember<int>().usesMember();
+
+  basedOnSubType<int>(HasAMember<int>());
+  enum E { A };
+  basedOnSubType<E>(HasAMember<E>());
+}
 }
 
 namespace test_template_arg_is_function {
@@ -52,6 +125,15 @@
   h<int, f>(0);
   // Non-Blink should stay the same.
   h<int, not_blink::function>(1);
+
+  // The int one makes the methods called from F() considered as constexpr, and
+  // can be collapsed to not have template arguments before it reaches the AST.
+  F<int, 10>();
+  // The enum one makes them not constexpr, as it doesn't collapse away the
+  // template stuff as much. This can lead to conflicting decisions about
+  // the names inside F() vs the above instantiation.
+  enum E { A };
+  F<E, 11>();
 }
 
 }  // namespace test_template_arg_is_function
@@ -119,6 +201,25 @@
 
 }  // test_template_arg_is_method_template_in_non_member_context
 
+namespace test_inherited_field {
+
+template <typename T>
+class BaseClass {
+ public:
+  unsigned long m_size;
+};
+
+template <typename T>
+class DerivedClass : protected BaseClass<T> {
+ private:
+  using Base = BaseClass<T>;
+  // https://crbug.com/640016: Need to rewrite |m_size| into |size_|.
+  using Base::m_size;
+  void method() { m_size = 123; }
+};
+
+}  // namespace test_inherited_field
+
 namespace test_template_arg_is_method_template_in_member_context {
 
 struct Class {
@@ -161,4 +262,118 @@
 
 }  // namespace test_unnamed_arg
 
+namespace cxx_dependent_scope_member_expr_testing {
+
+class PartitionAllocator {
+ public:
+  static void method() {}
+};
+
+template <typename Allocator = PartitionAllocator>
+class Vector {
+ public:
+  // https://crbug.com/582315: |Allocator::method| is a
+  // CXXDependentScopeMemberExpr.
+  void anotherMethod() {
+    if (std::is_class<Allocator>::value)  // Shouldn't rename |value|
+      Allocator::method();                // Should rename |method| -> |Method|.
+  }
+};
+
+template <typename Allocator = PartitionAllocator>
+void test() {
+  // https://crbug.com/582315: |Allocator::method| is a
+  // DependentScopeDeclRefExpr.
+  if (std::is_class<Allocator>::value)  // Shouldn't rename |value|.
+    Allocator::method();                // Should rename |method|.
+}
+
+class InterceptingCanvasBase : public ::not_blink::Class {
+ public:
+  virtual void virtualMethodInBlink(){};
+};
+
+template <typename DerivedCanvas>
+class InterceptingCanvas : public InterceptingCanvasBase {
+ public:
+  void virtualMethod() override {
+    this->Class::virtualMethod();  // https://crbug.com/582315#c19
+    this->InterceptingCanvasBase::virtualMethodInBlink();
+  }
+};
+
+template <typename T>
+class ThreadSpecific {
+ public:
+  T* operator->();
+  operator T*();
+};
+
+template <typename T>
+inline ThreadSpecific<T>::operator T*() {
+  return nullptr;
+}
+
+template <typename T>
+inline T* ThreadSpecific<T>::operator->() {
+  return operator T*();
+}
+
+class Class {
+ public:
+  virtual void virtualMethodInBlink() {}
+};
+
+}  // namespace cxx_dependent_scope_member_expr_testing
+
+namespace blacklisting_of_renaming_of_begin_method {
+
+template <typename T>
+class IntrusiveHeap {
+ public:
+  // https://crbug.com/672353: |begin| shouldn't be rewritten to |Begin|.
+  const T* begin() const { return nullptr; }
+};
+
+}  // namespace blacklisting_of_renaming_of_begin_method
+
 }  // namespace blink
+
+namespace not_blink {
+
+namespace cxx_dependent_scope_member_expr_testing {
+
+class Base : public ::blink::cxx_dependent_scope_member_expr_testing::Class {
+ public:
+  virtual void virtualMethod() {}
+};
+
+template <typename T>
+class Derived : public Base {
+ public:
+  void virtualMethod() override {
+    this->Class::virtualMethodInBlink();
+    this->Base::virtualMethod();
+  }
+};
+
+}  // namespace cxx_dependent_scope_member_expr_testing
+
+namespace blink_methods_called_from_mojo_traits_are_not_rewritten {
+
+template <typename V>
+struct MapTraits;
+
+template <typename V>
+struct MapTraits<blink::test_unnamed_arg::Class<V>> {
+  static void SetToEmpty(blink::test_unnamed_arg::Class<V>* output) {
+    // Need to rewrite |f| to |F| below (because this method name
+    // does get rewritten when processing blink::test_unnamed_arg::Class).
+    // See also https://crbug.com/670434.
+    output->f(123);
+  }
+};
+
+}  // namespace blink_methods_called_from_mojo_traits_are_not_rewritten
+
+}  // namespace not_blink
diff --git a/tools/clang/scripts/apply_edits.py b/tools/clang/scripts/apply_edits.py
new file mode 100755
index 0000000..7d373a9
--- /dev/null
+++ b/tools/clang/scripts/apply_edits.py
@@ -0,0 +1,221 @@
+#!/usr/bin/env python
+# Copyright (c) 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Applies edits generated by a clang tool that was run on Chromium code.
+
+Synopsis:
+
+  cat run_tool.out | extract_edits.py | apply_edits.py <build dir> <filters...>
+
+For example - to apply edits only to WTF sources:
+
+  ... | apply_edits.py out/gn third_party/WebKit/Source/wtf
+
+In addition to filters specified on the command line, the tool also skips edits
+that apply to files that are not covered by git.
+"""
+
+import argparse
+import collections
+import functools
+import multiprocessing
+import os
+import os.path
+import subprocess
+import sys
+
+script_dir = os.path.dirname(os.path.realpath(__file__))
+tool_dir = os.path.abspath(os.path.join(script_dir, '../pylib'))
+sys.path.insert(0, tool_dir)
+
+from clang import compile_db
+
+Edit = collections.namedtuple('Edit',
+                              ('edit_type', 'offset', 'length', 'replacement'))
+
+
+def _GetFilesFromGit(paths=None):
+  """Gets the list of files in the git repository.
+
+  Args:
+    paths: Prefix filter for the returned paths. May contain multiple entries.
+  """
+  args = []
+  if sys.platform == 'win32':
+    args.append('git.bat')
+  else:
+    args.append('git')
+  args.append('ls-files')
+  if paths:
+    args.extend(paths)
+  command = subprocess.Popen(args, stdout=subprocess.PIPE)
+  output, _ = command.communicate()
+  return [os.path.realpath(p) for p in output.splitlines()]
+
+
+def _ParseEditsFromStdin(build_directory):
+  """Extracts generated list of edits from the tool's stdout.
+
+  The expected format is documented at the top of this file.
+
+  Args:
+    build_directory: Directory that contains the compile database. Used to
+      normalize the filenames.
+    stdout: The stdout from running the clang tool.
+
+  Returns:
+    A dictionary mapping filenames to the associated edits.
+  """
+  path_to_resolved_path = {}
+  def _ResolvePath(path):
+    if path in path_to_resolved_path:
+      return path_to_resolved_path[path]
+
+    if not os.path.isfile(path):
+      resolved_path = os.path.realpath(os.path.join(build_directory, path))
+    else:
+      resolved_path = path
+
+    if not os.path.isfile(resolved_path):
+      sys.stderr.write('Edit applies to a non-existent file: %s\n' % path)
+      resolved_path = None
+
+    path_to_resolved_path[path] = resolved_path
+    return resolved_path
+
+  edits = collections.defaultdict(list)
+  for line in sys.stdin:
+    line = line.rstrip("\n\r")
+    try:
+      edit_type, path, offset, length, replacement = line.split(':::', 4)
+      replacement = replacement.replace('\0', '\n')
+      path = _ResolvePath(path)
+      if not path: continue
+      edits[path].append(Edit(edit_type, int(offset), int(length), replacement))
+    except ValueError:
+      sys.stderr.write('Unable to parse edit: %s\n' % line)
+  return edits
+
+
+def _ApplyEditsToSingleFile(filename, edits):
+  # Sort the edits and iterate through them in reverse order. Sorting allows
+  # duplicate edits to be quickly skipped, while reversing means that
+  # subsequent edits don't need to have their offsets updated with each edit
+  # applied.
+  edit_count = 0
+  error_count = 0
+  edits.sort()
+  last_edit = None
+  with open(filename, 'rb+') as f:
+    contents = bytearray(f.read())
+    for edit in reversed(edits):
+      if edit == last_edit:
+        continue
+      if (last_edit is not None and edit.edit_type == last_edit.edit_type and
+          edit.offset == last_edit.offset and edit.length == last_edit.length):
+        sys.stderr.write(
+            'Conflicting edit: %s at offset %d, length %d: "%s" != "%s"\n' %
+            (filename, edit.offset, edit.length, edit.replacement,
+             last_edit.replacement))
+        error_count += 1
+        continue
+
+      last_edit = edit
+      contents[edit.offset:edit.offset + edit.length] = edit.replacement
+      if not edit.replacement:
+        _ExtendDeletionIfElementIsInList(contents, edit.offset)
+      edit_count += 1
+    f.seek(0)
+    f.truncate()
+    f.write(contents)
+  return (edit_count, error_count)
+
+
+def _ApplyEdits(edits):
+  """Apply the generated edits.
+
+  Args:
+    edits: A dict mapping filenames to Edit instances that apply to that file.
+  """
+  edit_count = 0
+  error_count = 0
+  done_files = 0
+  for k, v in edits.iteritems():
+    tmp_edit_count, tmp_error_count = _ApplyEditsToSingleFile(k, v)
+    edit_count += tmp_edit_count
+    error_count += tmp_error_count
+    done_files += 1
+    percentage = (float(done_files) / len(edits)) * 100
+    sys.stderr.write('Applied %d edits (%d errors) to %d files [%.2f%%]\r' %
+                     (edit_count, error_count, done_files, percentage))
+
+  sys.stderr.write('\n')
+  return -error_count
+
+
+_WHITESPACE_BYTES = frozenset((ord('\t'), ord('\n'), ord('\r'), ord(' ')))
+
+
+def _ExtendDeletionIfElementIsInList(contents, offset):
+  """Extends the range of a deletion if the deleted element was part of a list.
+
+  This rewriter helper makes it easy for refactoring tools to remove elements
+  from a list. Even if a matcher callback knows that it is removing an element
+  from a list, it may not have enough information to accurately remove the list
+  element; for example, another matcher callback may end up removing an adjacent
+  list element, or all the list elements may end up being removed.
+
+  With this helper, refactoring tools can simply remove the list element and not
+  worry about having to include the comma in the replacement.
+
+  Args:
+    contents: A bytearray with the deletion already applied.
+    offset: The offset in the bytearray where the deleted range used to be.
+  """
+  char_before = char_after = None
+  left_trim_count = 0
+  for byte in reversed(contents[:offset]):
+    left_trim_count += 1
+    if byte in _WHITESPACE_BYTES:
+      continue
+    if byte in (ord(','), ord(':'), ord('('), ord('{')):
+      char_before = chr(byte)
+    break
+
+  right_trim_count = 0
+  for byte in contents[offset:]:
+    right_trim_count += 1
+    if byte in _WHITESPACE_BYTES:
+      continue
+    if byte == ord(','):
+      char_after = chr(byte)
+    break
+
+  if char_before:
+    if char_after:
+      del contents[offset:offset + right_trim_count]
+    elif char_before in (',', ':'):
+      del contents[offset - left_trim_count:offset]
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      'build_directory',
+      help='path to the build dir (dir that edit paths are relative to)')
+  parser.add_argument(
+      'path_filter',
+      nargs='*',
+      help='optional paths to filter what files the tool is run on')
+  args = parser.parse_args()
+
+  filenames = set(_GetFilesFromGit(args.path_filter))
+  edits = _ParseEditsFromStdin(args.build_directory)
+  return _ApplyEdits(
+      {k: v for k, v in edits.iteritems()
+            if os.path.realpath(k) in filenames})
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/tools/clang/scripts/extract_edits.py b/tools/clang/scripts/extract_edits.py
new file mode 100755
index 0000000..b0df9c3
--- /dev/null
+++ b/tools/clang/scripts/extract_edits.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# Copyright (c) 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Script to extract edits from clang tool output.
+
+If a clang tool emits edits, then the edits should look like this:
+    ...
+    ==== BEGIN EDITS ====
+    <edit1>
+    <edit2>
+    ...
+    ==== END EDITS ====
+    ...
+
+extract_edits.py takes input that is concatenated from multiple tool invocations
+and extract just the edits.  In other words, given the following input:
+    ...
+    ==== BEGIN EDITS ====
+    <edit1>
+    <edit2>
+    ==== END EDITS ====
+    ...
+    ==== BEGIN EDITS ====
+    <yet another edit1>
+    <yet another edit2>
+    ==== END EDITS ====
+    ...
+extract_edits.py would emit the following output:
+    <edit1>
+    <edit2>
+    <yet another edit1>
+    <yet another edit2>
+
+This python script is mainly needed on Windows.
+On unix this script can be replaced with running sed as follows:
+
+    $ cat run_tool.debug.out \
+        | sed '/^==== BEGIN EDITS ====$/,/^==== END EDITS ====$/{//!b};d'
+        | sort | uniq
+"""
+
+
+import sys
+
+
+def main():
+  unique_lines = set()
+  inside_marker_lines = False
+  for line in sys.stdin:
+    line = line.rstrip("\n\r")
+    if line == '==== BEGIN EDITS ====':
+      inside_marker_lines = True
+      continue
+    if line == '==== END EDITS ====':
+      inside_marker_lines = False
+      continue
+    if inside_marker_lines and line not in unique_lines:
+      unique_lines.add(line)
+      print line
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/tools/clang/scripts/generate_win_compdb.py b/tools/clang/scripts/generate_win_compdb.py
index 32f5f75..a7a7ba6 100755
--- a/tools/clang/scripts/generate_win_compdb.py
+++ b/tools/clang/scripts/generate_win_compdb.py
@@ -11,6 +11,7 @@
 work until clang tooling can be improved upstream.
 """
 
+import argparse
 import os
 import re
 import json
@@ -60,10 +61,18 @@
 
 
 def main(argv):
+  # Parse argument
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+      'build_path',
+      nargs='?',
+      help='Path to build directory',
+      default='out/Debug')
+  args = parser.parse_args()
   # First, generate the compile database.
   print 'Generating compile DB with ninja...'
   compile_db_as_json = subprocess.check_output(shlex.split(
-      'ninja -C out/Debug -t compdb cc cxx objc objcxx'))
+      'ninja -C %s -t compdb cc cxx objc objcxx' % args.build_path))
 
   compile_db = json.loads(compile_db_as_json)
   print 'Read in %d entries from the compile db' % len(compile_db)
@@ -74,7 +83,7 @@
   compile_db = [e for e in compile_db if '_nacl.cc.pdb' not in e['command']
       and '_nacl_win64.cc.pdb' not in e['command']]
   print 'Filtered out %d entries...' % (original_length - len(compile_db))
-  f = file('out/Debug/compile_commands.json', 'w')
+  f = file('%s/compile_commands.json' % args.build_path, 'w')
   f.write(json.dumps(compile_db, indent=2))
   print 'Done!'
 
diff --git a/tools/clang/scripts/run_tool.py b/tools/clang/scripts/run_tool.py
index 68f12e9..53c7d0f 100755
--- a/tools/clang/scripts/run_tool.py
+++ b/tools/clang/scripts/run_tool.py
@@ -4,45 +4,55 @@
 # found in the LICENSE file.
 """Wrapper script to help run clang tools across Chromium code.
 
-How to use this tool:
-If you want to run the tool across all Chromium code:
+How to use run_tool.py:
+If you want to run a clang tool across all Chromium code:
 run_tool.py <tool> <path/to/compiledb>
 
-If you want to include all files mentioned in the compilation database:
+If you want to include all files mentioned in the compilation database
+(this will also include generated files, unlike the previous command):
 run_tool.py <tool> <path/to/compiledb> --all
 
-If you only want to run the tool across just chrome/browser and content/browser:
+If you want to run the clang tool across only chrome/browser and
+content/browser:
 run_tool.py <tool> <path/to/compiledb> chrome/browser content/browser
 
-Please see https://chromium.googlesource.com/chromium/src/+/master/docs/clang_tool_refactoring.md for more
-information, which documents the entire automated refactoring flow in Chromium.
+Please see docs/clang_tool_refactoring.md for more information, which documents
+the entire automated refactoring flow in Chromium.
 
-Why use this tool:
+Why use run_tool.py (instead of running a clang tool directly):
 The clang tool implementation doesn't take advantage of multiple cores, and if
 it fails mysteriously in the middle, all the generated replacements will be
-lost.
+lost. Additionally, if the work is simply sharded across multiple cores by
+running multiple RefactoringTools, problems arise when they attempt to rewrite a
+file at the same time.
 
-Unfortunately, if the work is simply sharded across multiple cores by running
-multiple RefactoringTools, problems arise when they attempt to rewrite a file at
-the same time. To work around that, clang tools that are run using this tool
-should output edits to stdout in the following format:
+run_tool.py will
+1) run multiple instances of clang tool in parallel
+2) gather stdout from clang tool invocations
+3) "atomically" forward #2 to stdout
 
-==== BEGIN EDITS ====
-r:<file path>:<offset>:<length>:<replacement text>
-r:<file path>:<offset>:<length>:<replacement text>
-...etc...
-==== END EDITS ====
+Output of run_tool.py can be piped into extract_edits.py and then into
+apply_edits.py. These tools will extract individual edits and apply them to the
+source files. These tools assume the clang tool emits the edits in the
+following format:
+    ...
+    ==== BEGIN EDITS ====
+    r:::<file path>:::<offset>:::<length>:::<replacement text>
+    r:::<file path>:::<offset>:::<length>:::<replacement text>
+    ...etc...
+    ==== END EDITS ====
+    ...
 
-Any generated edits are applied once the clang tool has finished running
-across Chromium, regardless of whether some instances failed or not.
+extract_edits.py extracts only lines between BEGIN/END EDITS markers
+apply_edits.py reads edit lines from stdin and applies the edits
 """
 
 import argparse
-import collections
 import functools
 import multiprocessing
 import os
 import os.path
+import re
 import subprocess
 import sys
 
@@ -52,9 +62,6 @@
 
 from clang import compile_db
 
-Edit = collections.namedtuple('Edit',
-                              ('edit_type', 'offset', 'length', 'replacement'))
-
 
 def _GetFilesFromGit(paths=None):
   """Gets the list of files in the git repository.
@@ -85,90 +92,62 @@
           for entry in compile_db.Read(build_directory)]
 
 
-def _ExtractEditsFromStdout(build_directory, stdout):
-  """Extracts generated list of edits from the tool's stdout.
-
-  The expected format is documented at the top of this file.
-
-  Args:
-    build_directory: Directory that contains the compile database. Used to
-      normalize the filenames.
-    stdout: The stdout from running the clang tool.
-
-  Returns:
-    A dictionary mapping filenames to the associated edits.
-  """
-  lines = stdout.splitlines()
-  start_index = lines.index('==== BEGIN EDITS ====')
-  end_index = lines.index('==== END EDITS ====')
-  edits = collections.defaultdict(list)
-  for line in lines[start_index + 1:end_index]:
-    try:
-      edit_type, path, offset, length, replacement = line.split(':::', 4)
-      replacement = replacement.replace('\0', '\n')
-      # Normalize the file path emitted by the clang tool.
-      path = os.path.realpath(os.path.join(build_directory, path))
-      edits[path].append(Edit(edit_type, int(offset), int(length), replacement))
-    except ValueError:
-      print 'Unable to parse edit: %s' % line
-  return edits
-
-
-def _ExecuteTool(toolname, build_directory, filename):
-  """Executes the tool.
+def _ExecuteTool(toolname, tool_args, build_directory, filename):
+  """Executes the clang tool.
 
   This is defined outside the class so it can be pickled for the multiprocessing
   module.
 
   Args:
-    toolname: Path to the tool to execute.
+    toolname: Name of the clang tool to execute.
+    tool_args: Arguments to be passed to the clang tool. Can be None.
     build_directory: Directory that contains the compile database.
-    filename: The file to run the tool over.
+    filename: The file to run the clang tool over.
 
   Returns:
     A dictionary that must contain the key "status" and a boolean value
     associated with it.
 
-    If status is True, then the generated edits are stored with the key "edits"
-    in the dictionary.
+    If status is True, then the generated output is stored with the key
+    "stdout_text" in the dictionary.
 
     Otherwise, the filename and the output from stderr are associated with the
-    keys "filename" and "stderr" respectively.
+    keys "filename" and "stderr_text" respectively.
   """
+  args = [toolname, '-p', build_directory, filename]
+  if (tool_args):
+    args.extend(tool_args)
   command = subprocess.Popen(
-      (toolname, '-p', build_directory, filename),
-      stdout=subprocess.PIPE,
-      stderr=subprocess.PIPE)
-  stdout, stderr = command.communicate()
+      args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+  stdout_text, stderr_text = command.communicate()
+  stderr_text = re.sub(
+      r"^warning: .*'linker' input unused \[-Wunused-command-line-argument\]\n",
+      "", stderr_text, flags=re.MULTILINE)
   if command.returncode != 0:
-    return {'status': False, 'filename': filename, 'stderr': stderr}
+    return {'status': False, 'filename': filename, 'stderr_text': stderr_text}
   else:
-    return {'status': True,
-            'edits': _ExtractEditsFromStdout(build_directory, stdout)}
+    return {'status': True, 'filename': filename, 'stdout_text': stdout_text,
+            'stderr_text': stderr_text}
 
 
 class _CompilerDispatcher(object):
   """Multiprocessing controller for running clang tools in parallel."""
 
-  def __init__(self, toolname, build_directory, filenames):
+  def __init__(self, toolname, tool_args, build_directory, filenames):
     """Initializer method.
 
     Args:
       toolname: Path to the tool to execute.
+      tool_args: Arguments to be passed to the tool. Can be None.
       build_directory: Directory that contains the compile database.
       filenames: The files to run the tool over.
     """
     self.__toolname = toolname
+    self.__tool_args = tool_args
     self.__build_directory = build_directory
     self.__filenames = filenames
     self.__success_count = 0
     self.__failed_count = 0
-    self.__edit_count = 0
-    self.__edits = collections.defaultdict(list)
-
-  @property
-  def edits(self):
-    return self.__edits
 
   @property
   def failed_count(self):
@@ -178,12 +157,12 @@
     """Does the grunt work."""
     pool = multiprocessing.Pool()
     result_iterator = pool.imap_unordered(
-        functools.partial(_ExecuteTool, self.__toolname,
-                          self.__build_directory), self.__filenames)
+        functools.partial(_ExecuteTool, self.__toolname, self.__tool_args,
+                          self.__build_directory),
+                          self.__filenames)
     for result in result_iterator:
       self.__ProcessResult(result)
-    sys.stdout.write('\n')
-    sys.stdout.flush()
+    sys.stderr.write('\n')
 
   def __ProcessResult(self, result):
     """Handles result processing.
@@ -193,95 +172,18 @@
     """
     if result['status']:
       self.__success_count += 1
-      for k, v in result['edits'].iteritems():
-        self.__edits[k].extend(v)
-        self.__edit_count += len(v)
+      sys.stdout.write(result['stdout_text'])
+      sys.stderr.write(result['stderr_text'])
     else:
       self.__failed_count += 1
-      sys.stdout.write('\nFailed to process %s\n' % result['filename'])
-      sys.stdout.write(result['stderr'])
-      sys.stdout.write('\n')
-    percentage = (float(self.__success_count + self.__failed_count) /
-                  len(self.__filenames)) * 100
-    sys.stdout.write('Succeeded: %d, Failed: %d, Edits: %d [%.2f%%]\r' %
-                     (self.__success_count, self.__failed_count,
-                      self.__edit_count, percentage))
-    sys.stdout.flush()
-
-
-def _ApplyEdits(edits):
-  """Apply the generated edits.
-
-  Args:
-    edits: A dict mapping filenames to Edit instances that apply to that file.
-  """
-  edit_count = 0
-  for k, v in edits.iteritems():
-    # Sort the edits and iterate through them in reverse order. Sorting allows
-    # duplicate edits to be quickly skipped, while reversing means that
-    # subsequent edits don't need to have their offsets updated with each edit
-    # applied.
-    v.sort()
-    last_edit = None
-    with open(k, 'rb+') as f:
-      contents = bytearray(f.read())
-      for edit in reversed(v):
-        if edit == last_edit:
-          continue
-        last_edit = edit
-        contents[edit.offset:edit.offset + edit.length] = edit.replacement
-        if not edit.replacement:
-          _ExtendDeletionIfElementIsInList(contents, edit.offset)
-        edit_count += 1
-      f.seek(0)
-      f.truncate()
-      f.write(contents)
-  print 'Applied %d edits to %d files' % (edit_count, len(edits))
-
-
-_WHITESPACE_BYTES = frozenset((ord('\t'), ord('\n'), ord('\r'), ord(' ')))
-
-
-def _ExtendDeletionIfElementIsInList(contents, offset):
-  """Extends the range of a deletion if the deleted element was part of a list.
-
-  This rewriter helper makes it easy for refactoring tools to remove elements
-  from a list. Even if a matcher callback knows that it is removing an element
-  from a list, it may not have enough information to accurately remove the list
-  element; for example, another matcher callback may end up removing an adjacent
-  list element, or all the list elements may end up being removed.
-
-  With this helper, refactoring tools can simply remove the list element and not
-  worry about having to include the comma in the replacement.
-
-  Args:
-    contents: A bytearray with the deletion already applied.
-    offset: The offset in the bytearray where the deleted range used to be.
-  """
-  char_before = char_after = None
-  left_trim_count = 0
-  for byte in reversed(contents[:offset]):
-    left_trim_count += 1
-    if byte in _WHITESPACE_BYTES:
-      continue
-    if byte in (ord(','), ord(':'), ord('('), ord('{')):
-      char_before = chr(byte)
-    break
-
-  right_trim_count = 0
-  for byte in contents[offset:]:
-    right_trim_count += 1
-    if byte in _WHITESPACE_BYTES:
-      continue
-    if byte == ord(','):
-      char_after = chr(byte)
-    break
-
-  if char_before:
-    if char_after:
-      del contents[offset:offset + right_trim_count]
-    elif char_before in (',', ':'):
-      del contents[offset - left_trim_count:offset]
+      sys.stderr.write('\nFailed to process %s\n' % result['filename'])
+      sys.stderr.write(result['stderr_text'])
+      sys.stderr.write('\n')
+    done_count = self.__success_count + self.__failed_count
+    percentage = (float(done_count) / len(self.__filenames)) * 100
+    sys.stderr.write(
+        'Processed %d files with %s tool (%d failures) [%.2f%%]\r' %
+        (done_count, self.__toolname, self.__failed_count, percentage))
 
 
 def main():
@@ -299,6 +201,9 @@
       'path_filter',
       nargs='*',
       help='optional paths to filter what files the tool is run on')
+  parser.add_argument(
+      '--tool-args', nargs='*',
+      help='optional arguments passed to the tool')
   args = parser.parse_args()
 
   os.environ['PATH'] = '%s%s%s' % (
@@ -312,24 +217,19 @@
     compile_db.GenerateWithNinja(args.compile_database)
 
   if args.all:
-    filenames = set(_GetFilesFromCompileDB(args.compile_database))
-    source_filenames = filenames
+    source_filenames = set(_GetFilesFromCompileDB(args.compile_database))
   else:
-    filenames = set(_GetFilesFromGit(args.path_filter))
+    git_filenames = set(_GetFilesFromGit(args.path_filter))
     # Filter out files that aren't C/C++/Obj-C/Obj-C++.
     extensions = frozenset(('.c', '.cc', '.cpp', '.m', '.mm'))
     source_filenames = [f
-                        for f in filenames
+                        for f in git_filenames
                         if os.path.splitext(f)[1] in extensions]
-  dispatcher = _CompilerDispatcher(args.tool, args.compile_database,
+
+  dispatcher = _CompilerDispatcher(args.tool, args.tool_args,
+                                   args.compile_database,
                                    source_filenames)
   dispatcher.Run()
-  # Filter out edits to files that aren't in the git repository, since it's not
-  # useful to modify files that aren't under source control--typically, these
-  # are generated files or files in a git submodule that's not part of Chromium.
-  _ApplyEdits({k: v
-               for k, v in dispatcher.edits.iteritems()
-               if os.path.realpath(k) in filenames})
   return -dispatcher.failed_count
 
 
diff --git a/tools/clang/scripts/test_tool.py b/tools/clang/scripts/test_tool.py
index 728db47..f21e8c3 100755
--- a/tools/clang/scripts/test_tool.py
+++ b/tools/clang/scripts/test_tool.py
@@ -42,6 +42,60 @@
   return '%d test%s' % (tests, 's' if tests != 1 else '')
 
 
+def _RunToolAndApplyEdits(tools_clang_scripts_directory,
+                          tool_to_test,
+                          test_directory_for_tool,
+                          actual_files):
+  try:
+    # Stage the test files in the git index. If they aren't staged, then
+    # run_tool.py will skip them when applying replacements.
+    args = ['add']
+    args.extend(actual_files)
+    _RunGit(args)
+
+    # Launch the following pipeline:
+    #     run_tool.py ... | extract_edits.py | apply_edits.py ...
+    args = ['python',
+            os.path.join(tools_clang_scripts_directory, 'run_tool.py'),
+            tool_to_test,
+            test_directory_for_tool]
+    args.extend(actual_files)
+    run_tool = subprocess.Popen(args, stdout=subprocess.PIPE)
+
+    args = ['python',
+            os.path.join(tools_clang_scripts_directory, 'extract_edits.py')]
+    extract_edits = subprocess.Popen(args, stdin=run_tool.stdout,
+                                     stdout=subprocess.PIPE)
+
+    args = ['python',
+            os.path.join(tools_clang_scripts_directory, 'apply_edits.py'),
+            test_directory_for_tool]
+    apply_edits = subprocess.Popen(args, stdin=extract_edits.stdout,
+                                   stdout=subprocess.PIPE)
+
+    # Wait for the pipeline to finish running + check exit codes.
+    stdout, _ = apply_edits.communicate()
+    for process in [run_tool, extract_edits, apply_edits]:
+      process.wait()
+      if process.returncode != 0:
+        print "Failure while running the tool."
+        return process.returncode
+
+    # Reformat the resulting edits via: git cl format.
+    args = ['cl', 'format']
+    args.extend(actual_files)
+    _RunGit(args)
+
+    return 0
+
+  finally:
+    # No matter what, unstage the git changes we made earlier to avoid polluting
+    # the index.
+    args = ['reset', '--quiet', 'HEAD']
+    args.extend(actual_files)
+    _RunGit(args)
+
+
 def main(argv):
   if len(argv) < 1:
     print 'Usage: test_tool.py <clang tool>'
@@ -49,6 +103,7 @@
     sys.exit(1)
 
   tool_to_test = argv[0]
+  print '\nTesting %s\n' % tool_to_test
   tools_clang_scripts_directory = os.path.dirname(os.path.realpath(__file__))
   tools_clang_directory = os.path.dirname(tools_clang_scripts_directory)
   test_directory_for_tool = os.path.join(
@@ -64,78 +119,67 @@
   include_paths = []
   include_paths.append(
       os.path.realpath(os.path.join(tools_clang_directory, '../..')))
-  # Many gtest headers expect to have testing/gtest/include in the include
-  # search path.
+  # Many gtest and gmock headers expect to have testing/gtest/include and/or
+  # testing/gmock/include in the include search path.
   include_paths.append(
       os.path.realpath(os.path.join(tools_clang_directory,
                                     '../..',
                                     'testing/gtest/include')))
+  include_paths.append(
+      os.path.realpath(os.path.join(tools_clang_directory,
+                                    '../..',
+                                    'testing/gmock/include')))
 
-  try:
-    # Set up the test environment.
-    for source, actual in zip(source_files, actual_files):
-      shutil.copyfile(source, actual)
-    # Stage the test files in the git index. If they aren't staged, then
-    # run_tools.py will skip them when applying replacements.
-    args = ['add']
-    args.extend(actual_files)
-    _RunGit(args)
-    # Generate a temporary compilation database to run the tool over.
-    with open(compile_database, 'w') as f:
-      f.write(_GenerateCompileCommands(actual_files, include_paths))
+  if len(actual_files) == 0:
+    print 'Tool "%s" does not have compatible test files.' % tool_to_test
+    return 1
 
-    args = ['python',
-            os.path.join(tools_clang_scripts_directory, 'run_tool.py'),
-            tool_to_test,
-            test_directory_for_tool]
-    args.extend(actual_files)
-    run_tool = subprocess.Popen(args, stdout=subprocess.PIPE)
-    stdout, _ = run_tool.communicate()
-    if run_tool.returncode != 0:
-      print 'run_tool failed:\n%s' % stdout
-      sys.exit(1)
+  # Set up the test environment.
+  for source, actual in zip(source_files, actual_files):
+    shutil.copyfile(source, actual)
+  # Generate a temporary compilation database to run the tool over.
+  with open(compile_database, 'w') as f:
+    f.write(_GenerateCompileCommands(actual_files, include_paths))
 
-    args = ['cl', 'format']
-    args.extend(actual_files)
-    _RunGit(args)
+  # Run the tool.
+  exitcode = _RunToolAndApplyEdits(tools_clang_scripts_directory, tool_to_test,
+                                   test_directory_for_tool, actual_files)
+  if (exitcode != 0):
+    return exitcode
 
-    passed = 0
-    failed = 0
-    for expected, actual in zip(expected_files, actual_files):
-      print '[ RUN      ] %s' % os.path.relpath(actual)
-      expected_output = actual_output = None
-      with open(expected, 'r') as f:
-        expected_output = f.readlines()
-      with open(actual, 'r') as f:
-        actual_output = f.readlines()
-      if actual_output != expected_output:
-        failed += 1
-        for line in difflib.unified_diff(expected_output, actual_output,
-                                         fromfile=os.path.relpath(expected),
-                                         tofile=os.path.relpath(actual)):
-          sys.stdout.write(line)
-        print '[  FAILED  ] %s' % os.path.relpath(actual)
-        # Don't clean up the file on failure, so the results can be referenced
-        # more easily.
-        continue
-      print '[       OK ] %s' % os.path.relpath(actual)
-      passed += 1
-      os.remove(actual)
+  # Compare actual-vs-expected results.
+  passed = 0
+  failed = 0
+  for expected, actual in zip(expected_files, actual_files):
+    print '[ RUN      ] %s' % os.path.relpath(actual)
+    expected_output = actual_output = None
+    with open(expected, 'r') as f:
+      expected_output = f.readlines()
+    with open(actual, 'r') as f:
+      actual_output = f.readlines()
+    if actual_output != expected_output:
+      failed += 1
+      for line in difflib.unified_diff(expected_output, actual_output,
+                                       fromfile=os.path.relpath(expected),
+                                       tofile=os.path.relpath(actual)):
+        sys.stdout.write(line)
+      print '[  FAILED  ] %s' % os.path.relpath(actual)
+      # Don't clean up the file on failure, so the results can be referenced
+      # more easily.
+      continue
+    print '[       OK ] %s' % os.path.relpath(actual)
+    passed += 1
+    os.remove(actual)
 
-    if failed == 0:
-      os.remove(compile_database)
+  if failed == 0:
+    os.remove(compile_database)
 
-    print '[==========] %s ran.' % _NumberOfTestsToString(len(source_files))
-    if passed > 0:
-      print '[  PASSED  ] %s.' % _NumberOfTestsToString(passed)
-    if failed > 0:
-      print '[  FAILED  ] %s.' % _NumberOfTestsToString(failed)
-  finally:
-    # No matter what, unstage the git changes we made earlier to avoid polluting
-    # the index.
-    args = ['reset', '--quiet', 'HEAD']
-    args.extend(actual_files)
-    _RunGit(args)
+  print '[==========] %s ran.' % _NumberOfTestsToString(len(source_files))
+  if passed > 0:
+    print '[  PASSED  ] %s.' % _NumberOfTestsToString(passed)
+  if failed > 0:
+    print '[  FAILED  ] %s.' % _NumberOfTestsToString(failed)
+    return 1
 
 
 if __name__ == '__main__':
diff --git a/tools/clang/scripts/update.py b/tools/clang/scripts/update.py
index 100bdec..dc7a439 100755
--- a/tools/clang/scripts/update.py
+++ b/tools/clang/scripts/update.py
@@ -27,14 +27,14 @@
 # Do NOT CHANGE this if you don't know what you're doing -- see
 # https://chromium.googlesource.com/chromium/src/+/master/docs/updating_clang.md
 # Reverting problematic clang rolls is safe, though.
-CLANG_REVISION = '282487'
+CLANG_REVISION = '289944'
 
 use_head_revision = 'LLVM_FORCE_HEAD_REVISION' in os.environ
 if use_head_revision:
   CLANG_REVISION = 'HEAD'
 
 # This is incremented when pushing a new build of Clang at the same revision.
-CLANG_SUB_REVISION=1
+CLANG_SUB_REVISION=2
 
 PACKAGE_VERSION = "%s-%s" % (CLANG_REVISION, CLANG_SUB_REVISION)
 
@@ -167,14 +167,6 @@
 
 def GetSvnRevision(svn_repo):
   """Returns current revision of the svn repo at svn_repo."""
-  if sys.platform == 'darwin':
-    # mac_files toolchain must be set for hermetic builds.
-    root = os.path.dirname(os.path.dirname(os.path.dirname(
-        os.path.dirname(__file__))))
-    sys.path.append(os.path.join(root, 'build'))
-    import mac_toolchain
-
-    mac_toolchain.SetToolchainEnvironment()
   svn_info = subprocess.check_output('svn info ' + svn_repo, shell=True)
   m = re.search(r'Revision: (\d+)', svn_info)
   return m.group(1)
@@ -312,6 +304,17 @@
   args.gcc_toolchain = gcc_dir
 
 
+def AddSvnToPathOnWin():
+  """Download svn.exe and add it to PATH."""
+  if sys.platform != 'win32':
+    return
+  svn_ver = 'svn-1.6.6-win'
+  svn_dir = os.path.join(LLVM_BUILD_TOOLS_DIR, svn_ver)
+  if not os.path.exists(svn_dir):
+    DownloadAndUnpack(CDS_URL + '/tools/%s.zip' % svn_ver, LLVM_BUILD_TOOLS_DIR)
+  os.environ['PATH'] = svn_dir + os.pathsep + os.environ.get('PATH', '')
+
+
 def AddCMakeToPath():
   """Download CMake and add it to PATH."""
   if sys.platform == 'win32':
@@ -396,8 +399,7 @@
 
   need_gold_plugin = 'LLVM_DOWNLOAD_GOLD_PLUGIN' in os.environ or (
       sys.platform.startswith('linux') and
-      'buildtype=Official' in os.environ.get('GYP_DEFINES', '') and
-      'branding=Chrome' in os.environ.get('GYP_DEFINES', ''))
+      'buildtype=Official' in os.environ.get('GYP_DEFINES', ''))
 
   if ReadStampFile() == PACKAGE_VERSION and not args.force_local_build:
     print 'Clang is already up to date.'
@@ -448,6 +450,7 @@
     return 1
 
   DownloadHostGcc(args)
+  AddSvnToPathOnWin()
   AddCMakeToPath()
   AddGnuWinToPath()
 
@@ -457,6 +460,10 @@
   Checkout('Clang', LLVM_REPO_URL + '/cfe/trunk', CLANG_DIR)
   if sys.platform == 'win32' or use_head_revision:
     Checkout('LLD', LLVM_REPO_URL + '/lld/trunk', LLD_DIR)
+  elif os.path.exists(LLD_DIR):
+    # In case someone sends a tryjob that temporary adds lld to the checkout,
+    # make sure it's not around on future builds.
+    RmTree(LLD_DIR)
   Checkout('compiler-rt', LLVM_REPO_URL + '/compiler-rt/trunk', COMPILER_RT_DIR)
   if sys.platform == 'darwin':
     # clang needs a libc++ checkout, else -stdlib=libc++ won't find includes
@@ -492,7 +499,6 @@
                      '-DCMAKE_BUILD_TYPE=Release',
                      '-DLLVM_ENABLE_ASSERTIONS=ON',
                      '-DLLVM_ENABLE_THREADS=OFF',
-                     '-DLLVM_ENABLE_TIMESTAMPS=OFF',
                      # Statically link MSVCRT to avoid DLL dependencies.
                      '-DLLVM_USE_CRT_RELEASE=MT',
                      ]
@@ -626,6 +632,7 @@
   cc_args = base_cmake_args if sys.platform != 'win32' else cmake_args
   if cc is not None:  cc_args.append('-DCMAKE_C_COMPILER=' + cc)
   if cxx is not None: cc_args.append('-DCMAKE_CXX_COMPILER=' + cxx)
+  chrome_tools = list(set(['plugins', 'blink_gc_plugin'] + args.extra_tools))
   cmake_args += base_cmake_args + [
       '-DLLVM_BINUTILS_INCDIR=' + binutils_incdir,
       '-DCMAKE_C_FLAGS=' + ' '.join(cflags),
@@ -638,7 +645,7 @@
       # explicitly, https://crbug.com/622775
       '-DENABLE_LINKER_BUILD_ID=ON',
       '-DCHROMIUM_TOOLS_SRC=%s' % os.path.join(CHROMIUM_DIR, 'tools', 'clang'),
-      '-DCHROMIUM_TOOLS=%s' % ';'.join(args.tools)]
+      '-DCHROMIUM_TOOLS=%s' % ';'.join(chrome_tools)]
 
   EnsureDirExists(LLVM_BUILD_DIR)
   os.chdir(LLVM_BUILD_DIR)
@@ -656,7 +663,7 @@
 
   RunCommand(['ninja'], msvc_arch='x64')
 
-  if args.tools:
+  if chrome_tools:
     # If any Chromium tools were built, install those now.
     RunCommand(['ninja', 'cr-install'], msvc_arch='x64')
 
@@ -839,9 +846,8 @@
                       help='print current clang version (e.g. x.y.z) and exit.')
   parser.add_argument('--run-tests', action='store_true',
                       help='run tests after building; only for local builds')
-  parser.add_argument('--tools', nargs='*',
-                      help='select which chrome tools to build',
-                      default=['plugins', 'blink_gc_plugin'])
+  parser.add_argument('--extra-tools', nargs='*', default=[],
+                      help='select additional chrome tools to build')
   parser.add_argument('--without-android', action='store_false',
                       help='don\'t build Android ASan runtime (linux only)',
                       dest='with_android',
@@ -874,6 +880,11 @@
       print 'Skipping Clang update (make_clang_dir= was set in GYP_DEFINES).'
       return 0
 
+  if use_head_revision:
+    # TODO(hans): Trunk was updated; remove after the next roll.
+    global VERSION
+    VERSION = '5.0.0'
+
   global CLANG_REVISION, PACKAGE_VERSION
   if args.print_revision:
     if use_head_revision or args.llvm_force_head_revision:
diff --git a/tools/clang/scripts/upload_revision.py b/tools/clang/scripts/upload_revision.py
index 8a7994e..8eb4eb3 100755
--- a/tools/clang/scripts/upload_revision.py
+++ b/tools/clang/scripts/upload_revision.py
@@ -72,7 +72,7 @@
   Git(["commit", "-m", "Roll clang {}:{}.\n\n{}".format(
       clang_old_revision, clang_revision, commit_message)])
 
-  Git(["cl", "upload"])
+  Git(["cl", "upload", "-f"])
   Git(["cl", "try", "-b", "linux_upload_clang", "-r", git_revision])
   Git(["cl", "try", "-b", "mac_upload_clang", "-r", git_revision])
   Git(["cl", "try", "-b", "win_upload_clang", "-r", git_revision])
diff --git a/tools/clang/translation_unit/TranslationUnitGenerator.cpp b/tools/clang/translation_unit/TranslationUnitGenerator.cpp
index 4d7524d..e6be43e 100644
--- a/tools/clang/translation_unit/TranslationUnitGenerator.cpp
+++ b/tools/clang/translation_unit/TranslationUnitGenerator.cpp
@@ -265,9 +265,5 @@
       clang::tooling::newFrontendActionFactory<CompilationIndexerAction>();
   clang::tooling::ClangTool tool(options.getCompilations(),
                                  options.getSourcePathList());
-  // This clang tool does not actually produce edits, but run_tool.py expects
-  // this. So we just print an empty edit block.
-  llvm::outs() << "==== BEGIN EDITS ====\n";
-  llvm::outs() << "==== END EDITS ====\n";
   return tool.run(frontend_factory.get());
 }
diff --git a/tools/clang/value_cleanup/tests/list-value-append-expected.cc b/tools/clang/value_cleanup/tests/list-value-append-expected.cc
index 1de3ff6..8d7a57a 100644
--- a/tools/clang/value_cleanup/tests/list-value-append-expected.cc
+++ b/tools/clang/value_cleanup/tests/list-value-append-expected.cc
@@ -4,7 +4,7 @@
 
 #include <memory>
 
-#include "base/values.h"
+#include "values.h"
 
 #define true true
 
diff --git a/tools/clang/value_cleanup/tests/list-value-append-original.cc b/tools/clang/value_cleanup/tests/list-value-append-original.cc
index b28b169..2a1a03b 100644
--- a/tools/clang/value_cleanup/tests/list-value-append-original.cc
+++ b/tools/clang/value_cleanup/tests/list-value-append-original.cc
@@ -4,7 +4,7 @@
 
 #include <memory>
 
-#include "base/values.h"
+#include "values.h"
 
 #define true true
 
diff --git a/tools/clang/value_cleanup/tests/values.h b/tools/clang/value_cleanup/tests/values.h
new file mode 100644
index 0000000..e3c63a0
--- /dev/null
+++ b/tools/clang/value_cleanup/tests/values.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef VALUES_H_
+#define VALUES_H_
+
+#include <vector>
+
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class Value {};
+
+// FundamentalValue represents the simple fundamental types of values.
+class FundamentalValue : public Value {
+ public:
+  explicit FundamentalValue(bool in_value);
+  explicit FundamentalValue(int in_value);
+  explicit FundamentalValue(double in_value);
+};
+
+class StringValue : public Value {
+ public:
+  // Initializes a StringValue with a UTF-8 narrow character string.
+  explicit StringValue(StringPiece in_value);
+
+  // Initializes a StringValue with a string16.
+  explicit StringValue(const string16& in_value);
+};
+
+// Stub base::ListValue class that supports Append(Value*).
+class ListValue : public Value {
+ public:
+  ListValue();
+
+  // Appends a Value to the end of the list.
+  void Append(std::unique_ptr<Value> in_value);
+
+  // Deprecated version of the above.
+  void Append(Value* in_value);
+
+  // Convenience forms of Append.
+  void AppendBoolean(bool in_value);
+  void AppendInteger(int in_value);
+  void AppendDouble(double in_value);
+  void AppendString(StringPiece in_value);
+  void AppendString(const string16& in_value);
+  void AppendStrings(const std::vector<std::string>& in_values);
+  void AppendStrings(const std::vector<string16>& in_values);
+};
+
+}  // namespace base
+
+#endif  // VALUES_H_
\ No newline at end of file
diff --git a/tools/dev/gm.py b/tools/dev/gm.py
new file mode 100755
index 0000000..9e61368
--- /dev/null
+++ b/tools/dev/gm.py
@@ -0,0 +1,302 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""\
+Convenience wrapper for compiling V8 with gn/ninja and running tests.
+Sets up build output directories if they don't exist.
+Produces simulator builds for non-Intel target architectures.
+Uses Goma by default if it is detected (at output directory setup time).
+Expects to be run from the root of a V8 checkout.
+
+Usage:
+    gm.py [<arch>].[<mode>].[<target>] [testname...]
+
+All arguments are optional. Most combinations should work, e.g.:
+    gm.py ia32.debug x64.release d8
+    gm.py x64 mjsunit/foo cctest/test-bar/*
+"""
+# See HELP below for additional documentation.
+
+import os
+import subprocess
+import sys
+
+BUILD_OPTS_DEFAULT = ""
+BUILD_OPTS_GOMA = "-j1000 -l50"
+BUILD_TARGETS_TEST = ["d8", "cctest", "unittests"]
+BUILD_TARGETS_ALL = ["all"]
+
+# All arches that this script understands.
+ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
+          "s390", "s390x", "x87"]
+# Arches that get built/run when you don't specify any.
+DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
+# Modes that this script understands.
+MODES = ["release", "debug", "optdebug"]
+# Modes that get built/run when you don't specify any.
+DEFAULT_MODES = ["release", "debug"]
+# Build targets that can be manually specified.
+TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers"]
+# Build targets that get built when you don't specify any (and specified tests
+# don't imply any other targets).
+DEFAULT_TARGETS = ["d8"]
+# Tests that run-tests.py would run by default that can be run with
+# BUILD_TARGETS_TESTS.
+DEFAULT_TESTS = ["cctest", "debugger", "intl", "message", "mjsunit",
+                 "preparser", "unittests"]
+# These can be suffixed to any <arch>.<mode> combo, or used standalone,
+# or used as global modifiers (affecting all <arch>.<mode> combos).
+ACTIONS = {
+  "all": {"targets": BUILD_TARGETS_ALL, "tests": []},
+  "tests": {"targets": BUILD_TARGETS_TEST, "tests": []},
+  "check": {"targets": BUILD_TARGETS_TEST, "tests": DEFAULT_TESTS},
+  "checkall": {"targets": BUILD_TARGETS_ALL, "tests": ["ALL"]},
+}
+
+HELP = """<arch> can be any of: %(arches)s
+<mode> can be any of: %(modes)s
+<target> can be any of:
+ - cctest, d8, unittests, v8_fuzzers (build respective binary)
+ - all (build all binaries)
+ - tests (build test binaries)
+ - check (build test binaries, run most tests)
+ - checkall (build all binaries, run more tests)
+""" % {"arches": " ".join(ARCHES),
+       "modes": " ".join(MODES)}
+
+TESTSUITES_TARGETS = {"benchmarks": "d8",
+              "cctest": "cctest",
+              "debugger": "d8",
+              "fuzzer": "v8_fuzzers",
+              "intl": "d8",
+              "message": "d8",
+              "mjsunit": "d8",
+              "mozilla": "d8",
+              "preparser": "d8",
+              "test262": "d8",
+              "unittests": "unittests",
+              "webkit": "d8"}
+
+OUTDIR = "out"
+
+IS_GOMA_MACHINE = (os.path.exists(os.path.expanduser("~/goma")) or
+                   os.environ.get('GOMADIR'))
+
+USE_GOMA = "true" if IS_GOMA_MACHINE else "false"
+BUILD_OPTS = BUILD_OPTS_GOMA if IS_GOMA_MACHINE else BUILD_OPTS_DEFAULT
+
+RELEASE_ARGS_TEMPLATE = """\
+is_component_build = false
+is_debug = false
+%s
+use_goma = {GOMA}
+v8_enable_backtrace = true
+v8_enable_disassembler = true
+v8_enable_object_print = true
+v8_enable_verify_heap = true
+""".replace("{GOMA}", USE_GOMA)
+
+DEBUG_ARGS_TEMPLATE = """\
+gdb_index = true
+is_component_build = true
+is_debug = true
+symbol_level = 2
+%s
+use_goma = {GOMA}
+v8_enable_backtrace = true
+v8_enable_slow_dchecks = true
+v8_optimized_debug = false
+""".replace("{GOMA}", USE_GOMA)
+
+OPTDEBUG_ARGS_TEMPLATE = """\
+gdb_index = false
+is_component_build = true
+is_debug = true
+symbol_level = 1
+%s
+use_goma = {GOMA}
+v8_enable_backtrace = true
+v8_enable_verify_heap = true
+v8_optimized_debug = true
+""".replace("{GOMA}", USE_GOMA)
+
+ARGS_TEMPLATES = {
+  "release": RELEASE_ARGS_TEMPLATE,
+  "debug": DEBUG_ARGS_TEMPLATE,
+  "optdebug": OPTDEBUG_ARGS_TEMPLATE
+}
+
+def PrintHelpAndExit():
+  print(__doc__)
+  print(HELP)
+  sys.exit(0)
+
+def _Call(cmd, silent=False):
+  if not silent: print("# %s" % cmd)
+  return subprocess.call(cmd, shell=True)
+
+def _Write(filename, content):
+  print("# echo > %s << EOF\n%sEOF" % (filename, content))
+  with open(filename, "w") as f:
+    f.write(content)
+
+def GetPath(arch, mode):
+  subdir = "%s.%s" % (arch, mode)
+  return os.path.join(OUTDIR, subdir)
+
+class Config(object):
+  def __init__(self, arch, mode, targets, tests=[]):
+    self.arch = arch
+    self.mode = mode
+    self.targets = set(targets)
+    self.tests = set(tests)
+
+  def Extend(self, targets, tests=[]):
+    self.targets.update(targets)
+    self.tests.update(tests)
+
+  def GetTargetCpu(self):
+    cpu = "x86"
+    if self.arch.endswith("64") or self.arch == "s390x":
+      cpu = "x64"
+    return "target_cpu = \"%s\"" % cpu
+
+  def GetV8TargetCpu(self):
+    if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
+                     "s390", "s390x"):
+      return "\nv8_target_cpu = \"%s\"" % self.arch
+    return ""
+
+  def GetGnArgs(self):
+    template = ARGS_TEMPLATES[self.mode]
+    arch_specific = self.GetTargetCpu() + self.GetV8TargetCpu()
+    return template % arch_specific
+
+  def Build(self):
+    path = GetPath(self.arch, self.mode)
+    args_gn = os.path.join(path, "args.gn")
+    if not os.path.exists(path):
+      print("# mkdir -p %s" % path)
+      os.makedirs(path)
+    if not os.path.exists(args_gn):
+      _Write(args_gn, self.GetGnArgs())
+      code = _Call("gn gen %s" % path)
+      if code != 0: return code
+    targets = " ".join(self.targets)
+    return _Call("ninja -C %s %s %s" % (path, BUILD_OPTS, targets))
+
+  def RunTests(self):
+    if not self.tests: return 0
+    if "ALL" in self.tests:
+      tests = ""
+    else:
+      tests = " ".join(self.tests)
+    return _Call("tools/run-tests.py --arch=%s --mode=%s %s" %
+                 (self.arch, self.mode, tests))
+
+def GetTestBinary(argstring):
+  for suite in TESTSUITES_TARGETS:
+    if argstring.startswith(suite): return TESTSUITES_TARGETS[suite]
+  return None
+
+class ArgumentParser(object):
+  def __init__(self):
+    self.global_targets = set()
+    self.global_tests = set()
+    self.global_actions = set()
+    self.configs = {}
+
+  def PopulateConfigs(self, arches, modes, targets, tests):
+    for a in arches:
+      for m in modes:
+        path = GetPath(a, m)
+        if path not in self.configs:
+          self.configs[path] = Config(a, m, targets, tests)
+        else:
+          self.configs[path].Extend(targets, tests)
+
+  def ProcessGlobalActions(self):
+    have_configs = len(self.configs) > 0
+    for action in self.global_actions:
+      impact = ACTIONS[action]
+      if (have_configs):
+        for c in self.configs:
+          self.configs[c].Extend(**impact)
+      else:
+        self.PopulateConfigs(DEFAULT_ARCHES, DEFAULT_MODES, **impact)
+
+  def ParseArg(self, argstring):
+    if argstring in ("-h", "--help", "help"):
+      PrintHelpAndExit()
+    arches = []
+    modes = []
+    targets = []
+    actions = []
+    tests = []
+    words = argstring.split('.')
+    if len(words) == 1:
+      word = words[0]
+      if word in ACTIONS:
+        self.global_actions.add(word)
+        return
+      if word in TARGETS:
+        self.global_targets.add(word)
+        return
+      maybe_target = GetTestBinary(word)
+      if maybe_target is not None:
+        self.global_tests.add(word)
+        self.global_targets.add(maybe_target)
+        return
+    for word in words:
+      if word in ARCHES:
+        arches.append(word)
+      elif word in MODES:
+        modes.append(word)
+      elif word in TARGETS:
+        targets.append(word)
+      elif word in ACTIONS:
+        actions.append(word)
+      else:
+        print("Didn't understand: %s" % word)
+        sys.exit(1)
+    # Process actions.
+    for action in actions:
+      impact = ACTIONS[action]
+      targets += impact["targets"]
+      tests += impact["tests"]
+    # Fill in defaults for things that weren't specified.
+    arches = arches or DEFAULT_ARCHES
+    modes = modes or DEFAULT_MODES
+    targets = targets or DEFAULT_TARGETS
+    # Produce configs.
+    self.PopulateConfigs(arches, modes, targets, tests)
+
+  def ParseArguments(self, argv):
+    if len(argv) == 0:
+      PrintHelpAndExit()
+    for argstring in argv:
+      self.ParseArg(argstring)
+    self.ProcessGlobalActions()
+    for c in self.configs:
+      self.configs[c].Extend(self.global_targets, self.global_tests)
+    return self.configs
+
+def Main(argv):
+  parser = ArgumentParser()
+  configs = parser.ParseArguments(argv[1:])
+  return_code = 0
+  for c in configs:
+    return_code += configs[c].Build()
+  for c in configs:
+    return_code += configs[c].RunTests()
+  if return_code == 0:
+    _Call("notify-send 'Done!' 'V8 compilation finished successfully.'",
+          silent=True)
+  else:
+    _Call("notify-send 'Error!' 'V8 compilation finished with errors.'",
+          silent=True)
+  return return_code
+
+if __name__ == "__main__":
+  sys.exit(Main(sys.argv))
diff --git a/tools/foozzie/BUILD.gn b/tools/foozzie/BUILD.gn
new file mode 100644
index 0000000..532c1fa
--- /dev/null
+++ b/tools/foozzie/BUILD.gn
@@ -0,0 +1,19 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+if (v8_correctness_fuzzer) {
+  copy("v8_correctness_fuzzer_resources") {
+    sources = [
+      "v8_commands.py",
+      "v8_foozzie.py",
+      "v8_mock.js",
+      "v8_mock_archs.js",
+      "v8_suppressions.js",
+      "v8_suppressions.py",
+    ]
+    outputs = [ "$root_out_dir/{{source_file_part}}" ]
+  }
+}
diff --git a/tools/foozzie/testdata/failure_output.txt b/tools/foozzie/testdata/failure_output.txt
new file mode 100644
index 0000000..f428c5d
--- /dev/null
+++ b/tools/foozzie/testdata/failure_output.txt
@@ -0,0 +1,50 @@
+#
+# V8 correctness failure
+# V8 correctness configs: x64,ignition:x64,ignition_staging
+# V8 correctness sources: f60
+# V8 correctness suppression: 
+#
+# CHECK
+#
+# Compared x64,ignition with x64,ignition_staging
+#
+# Flags of x64,ignition:
+--abort_on_stack_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --ignition --turbo-filter=~ --hydrogen-filter=~ --validate-asm --nocrankshaft
+# Flags of x64,ignition_staging:
+--abort_on_stack_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --ignition-staging --validate-asm
+#
+# Difference:
+- unknown
++ not unknown
+#
+# Source file:
+name/to/file.js
+#
+### Start of configuration x64,ignition:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird error
+        ^
+3
+unknown
+
+
+### End of configuration x64,ignition
+#
+### Start of configuration x64,ignition_staging:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+not unknown
+
+
+### End of configuration x64,ignition_staging
+
diff --git a/tools/foozzie/testdata/fuzz-123.js b/tools/foozzie/testdata/fuzz-123.js
new file mode 100644
index 0000000..7af5c2e
--- /dev/null
+++ b/tools/foozzie/testdata/fuzz-123.js
@@ -0,0 +1,5 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Empty test dummy.
diff --git a/tools/foozzie/testdata/test_d8_1.py b/tools/foozzie/testdata/test_d8_1.py
new file mode 100644
index 0000000..15a93fa
--- /dev/null
+++ b/tools/foozzie/testdata/test_d8_1.py
@@ -0,0 +1,14 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+print """
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird error
+        ^
+3
+unknown
+"""
diff --git a/tools/foozzie/testdata/test_d8_2.py b/tools/foozzie/testdata/test_d8_2.py
new file mode 100644
index 0000000..f2bdacf
--- /dev/null
+++ b/tools/foozzie/testdata/test_d8_2.py
@@ -0,0 +1,14 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+print """
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+unknown
+"""
diff --git a/tools/foozzie/testdata/test_d8_3.py b/tools/foozzie/testdata/test_d8_3.py
new file mode 100644
index 0000000..a6c8682
--- /dev/null
+++ b/tools/foozzie/testdata/test_d8_3.py
@@ -0,0 +1,14 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+print """
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+not unknown
+"""
diff --git a/tools/foozzie/testdata/v8_build_config.json b/tools/foozzie/testdata/v8_build_config.json
new file mode 100644
index 0000000..ea27b1c
--- /dev/null
+++ b/tools/foozzie/testdata/v8_build_config.json
@@ -0,0 +1 @@
+{"v8_current_cpu": "x64"}
diff --git a/tools/foozzie/v8_commands.py b/tools/foozzie/v8_commands.py
new file mode 100644
index 0000000..0b3cae7
--- /dev/null
+++ b/tools/foozzie/v8_commands.py
@@ -0,0 +1,64 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Fork from commands.py and output.py in v8 test driver.
+
+import signal
+import subprocess
+import sys
+from threading import Event, Timer
+
+
+class Output(object):
+  def __init__(self, exit_code, timed_out, stdout, pid):
+    self.exit_code = exit_code
+    self.timed_out = timed_out
+    self.stdout = stdout
+    self.pid = pid
+
+  def HasCrashed(self):
+    # Timed out tests will have exit_code -signal.SIGTERM.
+    if self.timed_out:
+      return False
+    return (self.exit_code < 0 and
+            self.exit_code != -signal.SIGABRT)
+
+  def HasTimedOut(self):
+    return self.timed_out
+
+
+def Execute(args, cwd, timeout=None):
+  popen_args = [c for c in args if c != ""]
+  try:
+    process = subprocess.Popen(
+      args=popen_args,
+      stdout=subprocess.PIPE,
+      stderr=subprocess.STDOUT,
+      cwd=cwd
+    )
+  except Exception as e:
+    sys.stderr.write("Error executing: %s\n" % popen_args)
+    raise e
+
+  timeout_event = Event()
+
+  def kill_process():
+    timeout_event.set()
+    try:
+      process.kill()
+    except OSError:
+      sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+
+
+  timer = Timer(timeout, kill_process)
+  timer.start()
+  stdout, _ = process.communicate()
+  timer.cancel()
+
+  return Output(
+      process.returncode,
+      timeout_event.is_set(),
+      stdout.decode('utf-8', 'replace').encode('utf-8'),
+      process.pid,
+  )
diff --git a/tools/foozzie/v8_foozzie.py b/tools/foozzie/v8_foozzie.py
new file mode 100755
index 0000000..ddd8558
--- /dev/null
+++ b/tools/foozzie/v8_foozzie.py
@@ -0,0 +1,321 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+V8 correctness fuzzer launcher script.
+"""
+
+import argparse
+import hashlib
+import itertools
+import json
+import os
+import re
+import sys
+import traceback
+
+import v8_commands
+import v8_suppressions
+
+CONFIGS = dict(
+  default=['--validate-asm'],
+  fullcode=['--nocrankshaft', '--turbo-filter=~', '--validate-asm'],
+  ignition=['--ignition', '--turbo-filter=~', '--hydrogen-filter=~',
+            '--validate-asm', '--nocrankshaft'],
+  ignition_eager=['--ignition', '--turbo-filter=~', '--hydrogen-filter=~',
+                  '--validate-asm', '--nocrankshaft', '--no-lazy',
+                  '--no-lazy-inner-functions'],
+  ignition_staging=['--ignition-staging', '--validate-asm'],
+  ignition_turbo=['--ignition-staging', '--turbo', '--validate-asm'],
+  ignition_turbo_opt=['--ignition-staging', '--turbo', '--always-opt',
+                      '--validate-asm'],
+)
+
+# Timeout in seconds for one d8 run.
+TIMEOUT = 3
+
+# Return codes.
+RETURN_PASS = 0
+RETURN_FAIL = 2
+
+BASE_PATH = os.path.dirname(os.path.abspath(__file__))
+PREAMBLE = [
+  os.path.join(BASE_PATH, 'v8_mock.js'),
+  os.path.join(BASE_PATH, 'v8_suppressions.js'),
+]
+ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
+
+FLAGS = ['--abort_on_stack_overflow', '--expose-gc', '--allow-natives-syntax',
+         '--invoke-weak-callbacks', '--omit-quit', '--es-staging']
+
+SUPPORTED_ARCHS = ['ia32', 'x64', 'arm', 'arm64']
+
+# Output for suppressed failure case.
+FAILURE_HEADER_TEMPLATE = """#
+# V8 correctness failure
+# V8 correctness configs: %(configs)s
+# V8 correctness sources: %(source_key)s
+# V8 correctness suppression: %(suppression)s
+"""
+
+# Extended output for failure case. The 'CHECK' is for the minimizer.
+FAILURE_TEMPLATE = FAILURE_HEADER_TEMPLATE + """#
+# CHECK
+#
+# Compared %(first_config_label)s with %(second_config_label)s
+#
+# Flags of %(first_config_label)s:
+%(first_config_flags)s
+# Flags of %(second_config_label)s:
+%(second_config_flags)s
+#
+# Difference:
+%(difference)s
+#
+# Source file:
+%(source)s
+#
+### Start of configuration %(first_config_label)s:
+%(first_config_output)s
+### End of configuration %(first_config_label)s
+#
+### Start of configuration %(second_config_label)s:
+%(second_config_output)s
+### End of configuration %(second_config_label)s
+"""
+
+FUZZ_TEST_RE = re.compile(r'.*fuzz(-\d+\.js)')
+SOURCE_RE = re.compile(r'print\("v8-foozzie source: (.*)"\);')
+
+# The number of hex digits used from the hash of the original source file path.
+# Keep the number small to avoid duplicate explosion.
+ORIGINAL_SOURCE_HASH_LENGTH = 3
+
+# Placeholder string if no original source file could be determined.
+ORIGINAL_SOURCE_DEFAULT = 'none'
+
+
+def infer_arch(d8):
+  """Infer the V8 architecture from the build configuration next to the
+  executable.
+  """
+  with open(os.path.join(os.path.dirname(d8), 'v8_build_config.json')) as f:
+    arch = json.load(f)['v8_current_cpu']
+  return 'ia32' if arch == 'x86' else arch
+
+
+def parse_args():
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+    '--random-seed', type=int, required=True,
+    help='random seed passed to both runs')
+  parser.add_argument(
+      '--first-config', help='first configuration', default='ignition')
+  parser.add_argument(
+      '--second-config', help='second configuration', default='ignition_turbo')
+  parser.add_argument(
+      '--first-d8', default='d8',
+      help='optional path to first d8 executable, '
+           'default: bundled in the same directory as this script')
+  parser.add_argument(
+      '--second-d8',
+      help='optional path to second d8 executable, default: same as first')
+  parser.add_argument('testcase', help='path to test case')
+  options = parser.parse_args()
+
+  # Ensure we have a test case.
+  assert (os.path.exists(options.testcase) and
+          os.path.isfile(options.testcase)), (
+      'Test case %s doesn\'t exist' % options.testcase)
+
+  # Use first d8 as default for second d8.
+  options.second_d8 = options.second_d8 or options.first_d8
+
+  # Ensure absolute paths.
+  if not os.path.isabs(options.first_d8):
+    options.first_d8 = os.path.join(BASE_PATH, options.first_d8)
+  if not os.path.isabs(options.second_d8):
+    options.second_d8 = os.path.join(BASE_PATH, options.second_d8)
+
+  # Ensure executables exist.
+  assert os.path.exists(options.first_d8)
+  assert os.path.exists(options.second_d8)
+
+  # Infer architecture from build artifacts.
+  options.first_arch = infer_arch(options.first_d8)
+  options.second_arch = infer_arch(options.second_d8)
+
+  # Ensure we make a sane comparison.
+  assert (options.first_arch != options.second_arch or
+          options.first_config != options.second_config), (
+      'Need either arch or config difference.')
+  assert options.first_arch in SUPPORTED_ARCHS
+  assert options.second_arch in SUPPORTED_ARCHS
+  assert options.first_config in CONFIGS
+  assert options.second_config in CONFIGS
+
+  return options
+
+
+def get_meta_data(content):
+  """Extracts original-source-file paths from test case content."""
+  sources = []
+  for line in content.splitlines():
+    match = SOURCE_RE.match(line)
+    if match:
+      sources.append(match.group(1))
+  return {'sources': sources}
+
+
+def content_bailout(content, ignore_fun):
+  """Print failure state and return if ignore_fun matches content."""
+  bug = (ignore_fun(content) or '').strip()
+  if bug:
+    print FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression=bug)
+    return True
+  return False
+
+
+def pass_bailout(output, step_number):
+  """Print info and return if in timeout or crash pass states."""
+  if output.HasTimedOut():
+    # Dashed output, so that no other clusterfuzz tools can match the
+    # words timeout or crash.
+    print '# V8 correctness - T-I-M-E-O-U-T %d' % step_number
+    return True
+  if output.HasCrashed():
+    print '# V8 correctness - C-R-A-S-H %d' % step_number
+    return True
+  return False
+
+
+def fail_bailout(output, ignore_by_output_fun):
+  """Print failure state and return if ignore_by_output_fun matches output."""
+  bug = (ignore_by_output_fun(output.stdout) or '').strip()
+  if bug:
+    print FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression=bug)
+    return True
+  return False
+
+
+def main():
+  options = parse_args()
+
+  # Suppressions are architecture and configuration specific.
+  suppress = v8_suppressions.get_suppression(
+      options.first_arch, options.first_config,
+      options.second_arch, options.second_config,
+  )
+
+  # Static bailout based on test case content or metadata.
+  with open(options.testcase) as f:
+    content = f.read()
+  if content_bailout(get_meta_data(content), suppress.ignore_by_metadata):
+    return RETURN_FAIL
+  if content_bailout(content, suppress.ignore_by_content):
+    return RETURN_FAIL
+
+  # Set up runtime arguments.
+  common_flags = FLAGS + ['--random-seed', str(options.random_seed)]
+  first_config_flags = common_flags + CONFIGS[options.first_config]
+  second_config_flags = common_flags + CONFIGS[options.second_config]
+
+  def run_d8(d8, config_flags):
+    preamble = PREAMBLE[:]
+    if options.first_arch != options.second_arch:
+      preamble.append(ARCH_MOCKS)
+    args = [d8] + config_flags + preamble + [options.testcase]
+    print " ".join(args)
+    if d8.endswith('.py'):
+      # Wrap with python in tests.
+      args = [sys.executable] + args
+    return v8_commands.Execute(
+        args,
+        cwd=os.path.dirname(options.testcase),
+        timeout=TIMEOUT,
+    )
+
+  first_config_output = run_d8(options.first_d8, first_config_flags)
+
+  # Early bailout based on first run's output.
+  if pass_bailout(first_config_output, 1):
+    return RETURN_PASS
+
+  second_config_output = run_d8(options.second_d8, second_config_flags)
+
+  # Bailout based on second run's output.
+  if pass_bailout(second_config_output, 2):
+    return RETURN_PASS
+
+  difference, source = suppress.diff(
+      first_config_output.stdout, second_config_output.stdout)
+
+  if source:
+    source_key = hashlib.sha1(source).hexdigest()[:ORIGINAL_SOURCE_HASH_LENGTH]
+  else:
+    source = ORIGINAL_SOURCE_DEFAULT
+    source_key = ORIGINAL_SOURCE_DEFAULT
+
+  if difference:
+    # Only bail out due to suppressed output if there was a difference. If a
+    # suppression doesn't show up anymore in the statistics, we might want to
+    # remove it.
+    if fail_bailout(first_config_output, suppress.ignore_by_output1):
+      return RETURN_FAIL
+    if fail_bailout(second_config_output, suppress.ignore_by_output2):
+      return RETURN_FAIL
+
+    # The first three entries will be parsed by clusterfuzz. Format changes
+    # will require changes on the clusterfuzz side.
+    first_config_label = '%s,%s' % (options.first_arch, options.first_config)
+    second_config_label = '%s,%s' % (options.second_arch, options.second_config)
+    print (FAILURE_TEMPLATE % dict(
+        configs='%s:%s' % (first_config_label, second_config_label),
+        source_key=source_key,
+        suppression='', # We can't tie bugs to differences.
+        first_config_label=first_config_label,
+        second_config_label=second_config_label,
+        first_config_flags=' '.join(first_config_flags),
+        second_config_flags=' '.join(second_config_flags),
+        first_config_output=
+            first_config_output.stdout.decode('utf-8', 'replace'),
+        second_config_output=
+            second_config_output.stdout.decode('utf-8', 'replace'),
+        source=source,
+        difference=difference.decode('utf-8', 'replace'),
+    )).encode('utf-8', 'replace')
+    return RETURN_FAIL
+
+  # TODO(machenbach): Figure out if we could also return a bug in case there's
+  # no difference, but one of the line suppressions has matched - and without
+  # the match there would be a difference.
+
+  print '# V8 correctness - pass'
+  return RETURN_PASS
+
+
+if __name__ == "__main__":
+  try:
+    result = main()
+  except SystemExit:
+    # Make sure clusterfuzz reports internal errors and wrong usage.
+    # Use one label for all internal and usage errors.
+    print FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression='wrong_usage')
+    result = RETURN_FAIL
+  except MemoryError:
+    # Running out of memory happens occasionally but is not actionable.
+    print '# V8 correctness - pass'
+    result = RETURN_PASS
+  except Exception as e:
+    print FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression='internal_error')
+    print '# Internal error: %s' % e
+    traceback.print_exc(file=sys.stdout)
+    result = RETURN_FAIL
+
+  sys.exit(result)
diff --git a/tools/foozzie/v8_foozzie_test.py b/tools/foozzie/v8_foozzie_test.py
new file mode 100644
index 0000000..4452fde
--- /dev/null
+++ b/tools/foozzie/v8_foozzie_test.py
@@ -0,0 +1,116 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+import unittest
+
+import v8_foozzie
+import v8_suppressions
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+FOOZZIE = os.path.join(BASE_DIR, 'v8_foozzie.py')
+TEST_DATA = os.path.join(BASE_DIR, 'testdata')
+
+class UnitTest(unittest.TestCase):
+  def testDiff(self):
+    # TODO(machenbach): Mock out suppression configuration.
+    suppress = v8_suppressions.get_suppression(
+        'x64', 'fullcode', 'x64', 'default')
+    one = ''
+    two = ''
+    diff = None, None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    one = 'a \n  b\nc();'
+    two = 'a \n  b\nc();'
+    diff = None, None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    # Ignore line before caret, caret position, stack trace char numbers
+    # error message and validator output.
+    one = """
+undefined
+weird stuff
+      ^
+Validation of asm.js module failed: foo bar
+somefile.js: TypeError: undefined is not a function
+stack line :15: foo
+  undefined
+"""
+    two = """
+undefined
+other weird stuff
+            ^
+somefile.js: TypeError: baz is not a function
+stack line :2: foo
+Validation of asm.js module failed: baz
+  undefined
+"""
+    diff = None, None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    one = """
+Still equal
+Extra line
+"""
+    two = """
+Still equal
+"""
+    diff = '- Extra line', None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    one = """
+Still equal
+"""
+    two = """
+Still equal
+Extra line
+"""
+    diff = '+ Extra line', None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    one = """
+undefined
+somefile.js: TypeError: undefined is not a constructor
+"""
+    two = """
+undefined
+otherfile.js: TypeError: undefined is not a constructor
+"""
+    diff = """- somefile.js: TypeError: undefined is not a constructor
++ otherfile.js: TypeError: undefined is not a constructor""", None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+
+def cut_verbose_output(stdout):
+  return '\n'.join(stdout.split('\n')[2:])
+
+
+def run_foozzie(first_d8, second_d8):
+  return subprocess.check_output([
+    sys.executable, FOOZZIE,
+    '--random-seed', '12345',
+    '--first-d8', os.path.join(TEST_DATA, first_d8),
+    '--second-d8', os.path.join(TEST_DATA, second_d8),
+    '--first-config', 'ignition',
+    '--second-config', 'ignition_staging',
+    os.path.join(TEST_DATA, 'fuzz-123.js'),
+  ])
+
+
+class SystemTest(unittest.TestCase):
+  def testSyntaxErrorDiffPass(self):
+    stdout = run_foozzie('test_d8_1.py', 'test_d8_2.py')
+    self.assertEquals('# V8 correctness - pass\n', cut_verbose_output(stdout))
+
+  def testDifferentOutputFail(self):
+    with open(os.path.join(TEST_DATA, 'failure_output.txt')) as f:
+      expected_output = f.read()
+    with self.assertRaises(subprocess.CalledProcessError) as ctx:
+      run_foozzie('test_d8_1.py', 'test_d8_3.py')
+    e = ctx.exception
+    self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
+    self.assertEquals(expected_output, cut_verbose_output(e.output))
diff --git a/tools/foozzie/v8_mock.js b/tools/foozzie/v8_mock.js
new file mode 100644
index 0000000..298a853
--- /dev/null
+++ b/tools/foozzie/v8_mock.js
@@ -0,0 +1,106 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is intended for permanent JS behavior changes for mocking out
+// non-deterministic behavior. For temporary suppressions, please refer to
+// v8_suppressions.js.
+// This file is loaded before each correctness test cases and won't get
+// minimized.
+
+
+// This will be overridden in the test cases. The override can be minimized.
+var __PrettyPrint = function __PrettyPrint(msg) { print(msg); };
+
+// Mock Math.random.
+(function () {
+  var index = 0
+  Math.random = function() {
+    index = (index + 1) % 10;
+    return index / 10.0;
+  }
+})();
+
+// Mock Date.
+(function () {
+  var index = 0
+  var mockDate = 1477662728696
+  var mockDateNow = function() {
+    index = (index + 1) % 10
+    mockDate = mockDate + index + 1
+    return mockDate
+  }
+
+  var origDate = Date;
+  var handler = {
+    construct: function(target, args, newTarget) {
+      if (args.length > 0) {
+        return new (
+            Function.prototype.bind.apply(origDate, [null].concat(args)));
+      } else {
+        return new origDate(mockDateNow());
+      }
+    },
+    get: function(target, property, receiver) {
+      if (property == "now") {
+        return mockDateNow;
+      }
+    },
+  }
+
+  Date = new Proxy(Date, handler);
+})();
+
+// Mock performace.now().
+(function () {
+  performance.now = function () { return 1.2; }
+})();
+
+// Mock stack traces.
+Error.prepareStackTrace = function (error, structuredStackTrace) {
+  return "";
+};
+Object.defineProperty(
+    Error, 'prepareStackTrace', { configurable: false, writable: false });
+
+// Mock buffer access in float typed arrays because of varying NaN patterns.
+// Note, for now we just use noop forwarding proxies, because they already
+// turn off optimizations.
+(function () {
+  var mock = function(arrayType) {
+    var handler = {
+      construct: function(target, args) {
+        return new Proxy(
+            Function.prototype.bind.apply(arrayType, [null].concat(args)), {});
+      },
+    };
+    return new Proxy(arrayType, handler);
+  }
+
+  Float32Array = mock(Float32Array);
+  Float64Array = mock(Float64Array);
+})();
+
+// Mock Worker.
+(function () {
+  var index = 0;
+  // TODO(machenbach): Randomize this for each test case, but keep stable
+  // during comparison. Also data and random above.
+  var workerMessages = [
+    undefined, 0, -1, "", "foo", 42, [], {}, [0], {"x": 0}
+  ];
+  Worker = function(code){
+    try {
+      __PrettyPrint(eval(code));
+    } catch(e) {
+      __PrettyPrint(e);
+    }
+    this.getMessage = function(){
+      index = (index + 1) % 10;
+      return workerMessages[index];
+    }
+    this.postMessage = function(msg){
+      __PrettyPrint(msg);
+    }
+  };
+})();
diff --git a/tools/foozzie/v8_mock_archs.js b/tools/foozzie/v8_mock_archs.js
new file mode 100644
index 0000000..227d767
--- /dev/null
+++ b/tools/foozzie/v8_mock_archs.js
@@ -0,0 +1,45 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is intended for permanent JS behavior changes for mocking out
+// non-deterministic behavior. For temporary suppressions, please refer to
+// v8_suppressions.js.
+// This mocks only architecture specific differences. Refer to v8_mocks.js
+// for the general case.
+// This file is loaded before each correctness test cases and won't get
+// minimized.
+
+// Mock maximum typed-array length and limit to 1MiB.
+(function () {
+  var mock = function(arrayType) {
+    var handler = {
+      construct: function(target, args) {
+        var arrayLength = args[0]
+        if (args.length > 0 &&
+            Number.isInteger(args[0]) &&
+            args[0] > 1048576) {
+          args[0] = 1048576
+        } else if (args.length > 2 &&
+                   Number.isInteger(args[2]) &&
+                   args[2] > 1048576) {
+          args[2] = 1048576
+        }
+        return new (
+            Function.prototype.bind.apply(arrayType, [null].concat(args)));
+      },
+    };
+    return new Proxy(arrayType, handler);
+  }
+
+  ArrayBuffer = mock(ArrayBuffer);
+  Int8Array = mock(Int8Array);
+  Uint8Array = mock(Uint8Array);
+  Uint8ClampedArray = mock(Uint8ClampedArray);
+  Int16Array = mock(Int16Array);
+  Uint16Array = mock(Uint16Array);
+  Int32Array = mock(Int32Array);
+  Uint32Array = mock(Uint32Array);
+  Float32Array = mock(Float32Array);
+  Float64Array = mock(Float64Array);
+})();
diff --git a/tools/foozzie/v8_suppressions.js b/tools/foozzie/v8_suppressions.js
new file mode 100644
index 0000000..2d9cf2f
--- /dev/null
+++ b/tools/foozzie/v8_suppressions.js
@@ -0,0 +1,20 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is loaded before each correctness test case and after v8_mock.js.
+// You can temporarily change JS behavior here to silence known problems.
+// Please refer to a bug in a comment and remove the suppression once the
+// problem is fixed.
+
+// Suppress http://crbug.com/662429
+(function () {
+  var __real_Math_pow = Math.pow
+  Math.pow = function(a, b){
+    if (b < 0) {
+      return 0.000017;
+    } else {
+      return __real_Math_pow(a, b);
+    }
+  }
+})();
diff --git a/tools/foozzie/v8_suppressions.py b/tools/foozzie/v8_suppressions.py
new file mode 100644
index 0000000..23d137a
--- /dev/null
+++ b/tools/foozzie/v8_suppressions.py
@@ -0,0 +1,346 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Suppressions for V8 correctness fuzzer failures.
+
+We support three types of suppressions:
+1. Ignore test case by pattern.
+Map a regular expression to a bug entry. A new failure will be reported
+when the pattern matches a JS test case.
+Subsequent matches will be recoreded under the first failure.
+
+2. Ignore test run by output pattern:
+Map a regular expression to a bug entry. A new failure will be reported
+when the pattern matches the output of a particular run.
+Subsequent matches will be recoreded under the first failure.
+
+3. Relax line-to-line comparisons with expressions of lines to ignore and
+lines to be normalized (i.e. ignore only portions of lines).
+These are not tied to bugs, be careful to not silently switch off this tool!
+
+Alternatively, think about adding a behavior change to v8_suppressions.js
+to silence a particular class of problems.
+"""
+
+import itertools
+import re
+
+# Max line length for regular experessions checking for lines to ignore.
+MAX_LINE_LENGTH = 512
+
+# For ignoring lines before carets and to ignore caret positions.
+CARET_RE = re.compile(r'^\s*\^\s*$')
+
+# Ignore by original source files. Map from bug->list of relative file paths in
+# V8, e.g. '/v8/test/mjsunit/d8-performance-now.js' including /v8/. A test will
+# be suppressed if one of the files below was used to mutate the test.
+IGNORE_SOURCES = {
+  # This contains a usage of f.arguments that often fires.
+  'crbug.com/662424': [
+    '/v8/test/mjsunit/bugs/bug-222.js',
+    '/v8/test/mjsunit/bugs/bug-941049.js',
+    '/v8/test/mjsunit/regress/regress-crbug-668795.js',
+    '/v8/test/mjsunit/regress/regress-2989.js',
+  ],
+
+  'crbug.com/681088': [
+    '/v8/test/mjsunit/asm/asm-validation.js',
+    '/v8/test/mjsunit/asm/b5528-comma.js',
+    '/v8/test/mjsunit/asm/pointer-masking.js',
+    '/v8/test/mjsunit/compiler/regress-443744.js',
+    '/v8/test/mjsunit/regress/regress-599719.js',
+    '/v8/test/mjsunit/regress/wasm/regression-647649.js',
+    '/v8/test/mjsunit/wasm/asm-wasm.js',
+    '/v8/test/mjsunit/wasm/asm-wasm-deopt.js',
+    '/v8/test/mjsunit/wasm/asm-wasm-heap.js',
+    '/v8/test/mjsunit/wasm/asm-wasm-literals.js',
+    '/v8/test/mjsunit/wasm/asm-wasm-stack.js',
+  ],
+
+  'crbug.com/681241': [
+    '/v8/test/mjsunit/regress/regress-617526.js',
+    '/v8/test/mjsunit/regress/wasm/regression-02862.js',
+  ],
+
+  'crbug.com/688159': [
+    '/v8/test/mjsunit/es7/exponentiation-operator.js',
+  ],
+}
+
+# Ignore by test case pattern. Map from bug->regexp.
+# Regular expressions are assumed to be compiled. We use regexp.match.
+# Make sure the code doesn't match in the preamble portion of the test case
+# (i.e. in the modified inlined mjsunit.js). You can reference the comment
+# between the two parts like so:
+#  'crbug.com/666308':
+#      re.compile(r'.*End stripped down and modified version.*'
+#                 r'\.prototype.*instanceof.*.*', re.S)
+# TODO(machenbach): Insert a JS sentinel between the two parts, because
+# comments are stripped during minimization.
+IGNORE_TEST_CASES = {
+}
+
+# Ignore by output pattern. Map from config->bug->regexp. Config '' is used
+# to match all configurations. Otherwise use either a compiler configuration,
+# e.g. fullcode or validate_asm or an architecture, e.g. x64 or ia32 or a
+# comma-separated combination, e.g. x64,fullcode, for more specific
+# suppressions.
+# Bug is preferred to be a crbug.com/XYZ, but can be any short distinguishable
+# label.
+# Regular expressions are assumed to be compiled. We use regexp.search.
+IGNORE_OUTPUT = {
+  '': {
+    'crbug.com/664068':
+        re.compile(r'RangeError(?!: byte length)', re.S),
+    'crbug.com/667678':
+        re.compile(r'\[native code\]', re.S),
+    'crbug.com/681806':
+        re.compile(r'WebAssembly\.Instance', re.S),
+    'crbug.com/681088':
+        re.compile(r'TypeError: Cannot read property \w+ of undefined', re.S),
+  },
+  'validate_asm': {
+    'validate_asm':
+        re.compile(r'TypeError'),
+  },
+}
+
+# Lines matching any of the following regular expressions will be ignored
+# if appearing on both sides. The capturing groups need to match exactly.
+# Use uncompiled regular expressions - they'll be compiled later.
+ALLOWED_LINE_DIFFS = [
+  # Ignore caret position in stack traces.
+  r'^\s*\^\s*$',
+
+  # Ignore some stack trace headers as messages might not match.
+  r'^(.*)TypeError: .* is not a function$',
+  r'^(.*)TypeError: .* is not a constructor$',
+  r'^(.*)TypeError: (.*) is not .*$',
+  r'^(.*)ReferenceError: .* is not defined$',
+  r'^(.*):\d+: ReferenceError: .* is not defined$',
+
+  # These are rarely needed. It includes some cases above.
+  r'^\w*Error: .* is not .*$',
+  r'^(.*) \w*Error: .* is not .*$',
+  r'^(.*):\d+: \w*Error: .* is not .*$',
+
+  # Some test cases just print the message.
+  r'^.* is not a function(.*)$',
+  r'^(.*) is not a .*$',
+
+  # Ignore lines of stack traces as character positions might not match.
+  r'^    at (?:new )?([^:]*):\d+:\d+(.*)$',
+  r'^(.*):\d+:(.*)$',
+
+  # crbug.com/662840
+  r"^.*(?:Trying to access ')?(\w*)(?:(?:' through proxy)|"
+  r"(?: is not defined))$",
+
+  # crbug.com/680064. This subsumes one of the above expressions.
+  r'^(.*)TypeError: .* function$',
+
+  # crbug.com/681326
+  r'^(.*<anonymous>):\d+:\d+(.*)$',
+]
+
+# Lines matching any of the following regular expressions will be ignored.
+# Use uncompiled regular expressions - they'll be compiled later.
+IGNORE_LINES = [
+  r'^Validation of asm\.js module failed: .+$',
+  r'^.*:\d+: Invalid asm.js: .*$',
+  r'^Warning: unknown flag .*$',
+  r'^Warning: .+ is deprecated.*$',
+  r'^Try --help for options$',
+
+  # crbug.com/677032
+  r'^.*:\d+:.*asm\.js.*: success$',
+
+  # crbug.com/680064
+  r'^\s*at .* \(<anonymous>\)$',
+
+  # crbug.com/689877
+  r'^.*SyntaxError: .*Stack overflow$',
+]
+
+
+###############################################################################
+# Implementation - you should not need to change anything below this point.
+
+# Compile regular expressions.
+ALLOWED_LINE_DIFFS = [re.compile(exp) for exp in ALLOWED_LINE_DIFFS]
+IGNORE_LINES = [re.compile(exp) for exp in IGNORE_LINES]
+
+ORIGINAL_SOURCE_PREFIX = 'v8-foozzie source: '
+
+def line_pairs(lines):
+  return itertools.izip_longest(
+      lines, itertools.islice(lines, 1, None), fillvalue=None)
+
+
+def caret_match(line1, line2):
+  if (not line1 or
+      not line2 or
+      len(line1) > MAX_LINE_LENGTH or
+      len(line2) > MAX_LINE_LENGTH):
+    return False
+  return bool(CARET_RE.match(line1) and CARET_RE.match(line2))
+
+
+def short_line_output(line):
+  if len(line) <= MAX_LINE_LENGTH:
+    # Avoid copying.
+    return line
+  return line[0:MAX_LINE_LENGTH] + '...'
+
+
+def ignore_by_regexp(line1, line2, allowed):
+  if len(line1) > MAX_LINE_LENGTH or len(line2) > MAX_LINE_LENGTH:
+    return False
+  for exp in allowed:
+    match1 = exp.match(line1)
+    match2 = exp.match(line2)
+    if match1 and match2:
+      # If there are groups in the regexp, ensure the groups matched the same
+      # things.
+      if match1.groups() == match2.groups():  # tuple comparison
+        return True
+  return False
+
+
+def diff_output(output1, output2, allowed, ignore1, ignore2):
+  """Returns a tuple (difference, source).
+
+  The difference is None if there's no difference, otherwise a string
+  with a readable diff.
+
+  The source is the last source output within the test case, or None if no
+  such output existed.
+  """
+  def useful_line(ignore):
+    def fun(line):
+      return all(not e.match(line) for e in ignore)
+    return fun
+
+  lines1 = filter(useful_line(ignore1), output1)
+  lines2 = filter(useful_line(ignore2), output2)
+
+  # This keeps track where we are in the original source file of the fuzz
+  # test case.
+  source = None
+
+  for ((line1, lookahead1), (line2, lookahead2)) in itertools.izip_longest(
+      line_pairs(lines1), line_pairs(lines2), fillvalue=(None, None)):
+
+    # Only one of the two iterators should run out.
+    assert not (line1 is None and line2 is None)
+
+    # One iterator ends earlier.
+    if line1 is None:
+      return '+ %s' % short_line_output(line2), source
+    if line2 is None:
+      return '- %s' % short_line_output(line1), source
+
+    # If lines are equal, no further checks are necessary.
+    if line1 == line2:
+      # Instrumented original-source-file output must be equal in both
+      # versions. It only makes sense to update it here when both lines
+      # are equal.
+      if line1.startswith(ORIGINAL_SOURCE_PREFIX):
+        source = line1[len(ORIGINAL_SOURCE_PREFIX):]
+      continue
+
+    # Look ahead. If next line is a caret, ignore this line.
+    if caret_match(lookahead1, lookahead2):
+      continue
+
+    # Check if a regexp allows these lines to be different.
+    if ignore_by_regexp(line1, line2, allowed):
+      continue
+
+    # Lines are different.
+    return (
+        '- %s\n+ %s' % (short_line_output(line1), short_line_output(line2)),
+        source,
+    )
+
+  # No difference found.
+  return None, source
+
+
+def get_suppression(arch1, config1, arch2, config2):
+  return V8Suppression(arch1, config1, arch2, config2)
+
+
+class Suppression(object):
+  def diff(self, output1, output2):
+    return None
+
+  def ignore_by_metadata(self, metadata):
+    return False
+
+  def ignore_by_content(self, testcase):
+    return False
+
+  def ignore_by_output1(self, output):
+    return False
+
+  def ignore_by_output2(self, output):
+    return False
+
+
+class V8Suppression(Suppression):
+  def __init__(self, arch1, config1, arch2, config2):
+    self.arch1 = arch1
+    self.config1 = config1
+    self.arch2 = arch2
+    self.config2 = config2
+
+  def diff(self, output1, output2):
+    return diff_output(
+        output1.splitlines(),
+        output2.splitlines(),
+        ALLOWED_LINE_DIFFS,
+        IGNORE_LINES,
+        IGNORE_LINES,
+    )
+
+  def ignore_by_content(self, testcase):
+    for bug, exp in IGNORE_TEST_CASES.iteritems():
+      if exp.match(testcase):
+        return bug
+    return False
+
+  def ignore_by_metadata(self, metadata):
+    for bug, sources in IGNORE_SOURCES.iteritems():
+      for source in sources:
+        if source in metadata['sources']:
+          return bug
+    return False
+
+  def ignore_by_output1(self, output):
+    return self.ignore_by_output(output, self.arch1, self.config1)
+
+  def ignore_by_output2(self, output):
+    return self.ignore_by_output(output, self.arch2, self.config2)
+
+  def ignore_by_output(self, output, arch, config):
+    def check(mapping):
+      for bug, exp in mapping.iteritems():
+        if exp.search(output):
+          return bug
+      return None
+    bug = check(IGNORE_OUTPUT.get('', {}))
+    if bug:
+      return bug
+    bug = check(IGNORE_OUTPUT.get(arch, {}))
+    if bug:
+      return bug
+    bug = check(IGNORE_OUTPUT.get(config, {}))
+    if bug:
+      return bug
+    bug = check(IGNORE_OUTPUT.get('%s,%s' % (arch, config), {}))
+    if bug:
+      return bug
+    return None
diff --git a/tools/fuzz-harness.sh b/tools/fuzz-harness.sh
index c874d01..01f0353 100755
--- a/tools/fuzz-harness.sh
+++ b/tools/fuzz-harness.sh
@@ -87,7 +87,7 @@
 
 fi
 
-flags='--debug-code --expose-gc --verify-gc'
+flags='--expose-gc --verify-gc'
 python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \
     "$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
 exit_code=$(cat w* | grep " looking good" -c)
diff --git a/tools/gcmole/gcmole-tools.tar.gz.sha1 b/tools/gcmole/gcmole-tools.tar.gz.sha1
index 67d758f..f9e3e01 100644
--- a/tools/gcmole/gcmole-tools.tar.gz.sha1
+++ b/tools/gcmole/gcmole-tools.tar.gz.sha1
@@ -1 +1 @@
-b10748117f8f53d05dda0a77424b8794e645e330
+a21e6b0d08afcfe454042c2c1fbf1d1738caf129
\ No newline at end of file
diff --git a/tools/gcmole/gcmole.cc b/tools/gcmole/gcmole.cc
index 9f1f781..f7a6c94 100644
--- a/tools/gcmole/gcmole.cc
+++ b/tools/gcmole/gcmole.cc
@@ -318,22 +318,24 @@
 
 class Environment {
  public:
-  Environment() { }
+  Environment() = default;
 
   static Environment Unreachable() {
     Environment env;
-    env.live_.set();
+    env.unreachable_ = true;
     return env;
   }
 
   static Environment Merge(const Environment& l,
                            const Environment& r) {
-    return Environment(l, r);
+    Environment out(l);
+    out &= r;
+    return out;
   }
 
   Environment ApplyEffect(ExprEffect effect) const {
     Environment out = effect.hasGC() ? Environment() : Environment(*this);
-    if (effect.env() != NULL) out.live_ |= effect.env()->live_;
+    if (effect.env()) out |= *effect.env();
     return out;
   }
 
@@ -342,20 +344,23 @@
   bool IsAlive(const std::string& name) const {
     SymbolTable::iterator code = symbol_table_.find(name);
     if (code == symbol_table_.end()) return false;
-    return live_[code->second];
+    return is_live(code->second);
   }
 
   bool Equal(const Environment& env) {
-    return live_ == env.live_;
+    if (unreachable_ && env.unreachable_) return true;
+    size_t size = std::max(live_.size(), env.live_.size());
+    for (size_t i = 0; i < size; ++i) {
+      if (is_live(i) != env.is_live(i)) return false;
+    }
+    return true;
   }
 
   Environment Define(const std::string& name) const {
     return Environment(*this, SymbolToCode(name));
   }
 
-  void MDefine(const std::string& name) {
-    live_.set(SymbolToCode(name));
-  }
+  void MDefine(const std::string& name) { set_live(SymbolToCode(name)); }
 
   static int SymbolToCode(const std::string& name) {
     SymbolTable::iterator code = symbol_table_.find(name);
@@ -370,12 +375,7 @@
   }
 
   static void ClearSymbolTable() {
-    std::vector<Environment*>::iterator end = envs_.end();
-    for (std::vector<Environment*>::iterator i = envs_.begin();
-         i != end;
-         ++i) {
-      delete *i;
-    }
+    for (Environment* e : envs_) delete e;
     envs_.clear();
     symbol_table_.clear();
   }
@@ -383,15 +383,11 @@
   void Print() const {
     bool comma = false;
     std::cout << "{";
-    SymbolTable::iterator end = symbol_table_.end();
-    for (SymbolTable::iterator i = symbol_table_.begin();
-         i != end;
-         ++i) {
-      if (live_[i->second]) {
-        if (comma) std::cout << ", ";
-        std::cout << i->first;
-        comma = true;
-      }
+    for (auto& e : symbol_table_) {
+      if (!is_live(e.second)) continue;
+      if (comma) std::cout << ", ";
+      std::cout << e.first;
+      comma = true;
     }
     std::cout << "}";
   }
@@ -403,20 +399,54 @@
   }
 
  private:
-  Environment(const Environment& l, const Environment& r)
-      : live_(l.live_ & r.live_) {
-  }
-
   Environment(const Environment& l, int code)
       : live_(l.live_) {
-    live_.set(code);
+    set_live(code);
+  }
+
+  void set_live(size_t pos) {
+    if (unreachable_) return;
+    if (pos >= live_.size()) live_.resize(pos + 1);
+    live_[pos] = true;
+  }
+
+  bool is_live(size_t pos) const {
+    return unreachable_ || (live_.size() > pos && live_[pos]);
+  }
+
+  Environment& operator|=(const Environment& o) {
+    if (o.unreachable_) {
+      unreachable_ = true;
+      live_.clear();
+    } else if (!unreachable_) {
+      for (size_t i = 0, e = o.live_.size(); i < e; ++i) {
+        if (o.live_[i]) set_live(i);
+      }
+    }
+    return *this;
+  }
+
+  Environment& operator&=(const Environment& o) {
+    if (o.unreachable_) return *this;
+    if (unreachable_) return *this = o;
+
+    // Carry over false bits from the tail of o.live_, and reset all bits that
+    // are not set in o.live_.
+    size_t size = std::max(live_.size(), o.live_.size());
+    if (size > live_.size()) live_.resize(size);
+    for (size_t i = 0; i < size; ++i) {
+      if (live_[i] && (i >= o.live_.size() || !o.live_[i])) live_[i] = false;
+    }
+    return *this;
   }
 
   static SymbolTable symbol_table_;
-  static std::vector<Environment* > envs_;
+  static std::vector<Environment*> envs_;
 
-  static const int kMaxNumberOfLocals = 256;
-  std::bitset<kMaxNumberOfLocals> live_;
+  std::vector<bool> live_;
+  // unreachable_ == true implies live_.empty(), but still is_live(i) returns
+  // true for all i.
+  bool unreachable_ = false;
 
   friend class ExprEffect;
   friend class CallProps;
@@ -432,8 +462,11 @@
     if (in.hasRawDef()) raw_def_.set(arg);
     if (in.hasRawUse()) raw_use_.set(arg);
     if (in.env() != NULL) {
-      if (env_ == NULL) env_ = in.env();
-      env_->live_ |= in.env()->live_;
+      if (env_ == NULL) {
+        env_ = in.env();
+      } else {
+        *env_ |= *in.env();
+      }
     }
   }
 
@@ -462,8 +495,7 @@
 
 
 Environment::SymbolTable Environment::symbol_table_;
-std::vector<Environment* > Environment::envs_;
-
+std::vector<Environment*> Environment::envs_;
 
 ExprEffect ExprEffect::Merge(ExprEffect a, ExprEffect b) {
   Environment* a_env = a.env();
@@ -471,7 +503,7 @@
   Environment* out = NULL;
   if (a_env != NULL && b_env != NULL) {
     out = Environment::Allocate(*a_env);
-    out->live_ &= b_env->live_;
+    *out &= *b_env;
   }
   return ExprEffect(a.effect_ | b.effect_, out);
 }
@@ -483,7 +515,7 @@
   Environment* out = (b_env == NULL) ? a_env : b_env;
   if (a_env != NULL && b_env != NULL) {
     out = Environment::Allocate(*b_env);
-    out->live_ |= a_env->live_;
+    *out |= *a_env;
   }
   return ExprEffect(a.effect_ | b.effect_, out);
 }
diff --git a/tools/gdbinit b/tools/gdbinit
index b696a8f..c78baa2 100644
--- a/tools/gdbinit
+++ b/tools/gdbinit
@@ -11,6 +11,15 @@
 Usage: job tagged_ptr
 end
 
+# Print v8::Local handle value.
+define jlh
+call _v8_internal_Print_Object(*(v8::internal::Object**)(*$arg0))
+end
+document jlh
+Print content of a v8::Local handle
+Usage: jlh local_handle
+end
+
 # Print Code objects containing given PC.
 define jco
 call _v8_internal_Print_Code((void*)($arg0))
@@ -20,15 +29,25 @@
 Usage: jco pc
 end
 
-# Print TypeFeedbackVector
+# Print FeedbackVector
 define jfv
-call _v8_internal_Print_TypeFeedbackVector((void*)($arg0))
+call _v8_internal_Print_FeedbackVector((void*)($arg0))
 end
 document jfv
-Print a v8 TypeFeedbackVector object
-Usage: jtv tagged_ptr
+Print a v8 FeedbackVector object
+Usage: jfv tagged_ptr
 end
 
+# Print FeedbackMetadata
+define jfm
+call _v8_internal_Print_FeedbackMetadata((void*)($arg0))
+end
+document jfm
+Print a v8 FeedbackMetadata object
+Usage: jfm tagged_ptr
+end
+
+
 # Print DescriptorArray.
 define jda
 call _v8_internal_Print_DescriptorArray((void*)($arg0))
@@ -38,6 +57,15 @@
 Usage: jda tagged_ptr
 end
 
+# Print LayoutDescriptor.
+define jld
+call _v8_internal_Print_LayoutDescriptor((void*)($arg0))
+end
+document jld
+Print a v8 LayoutDescriptor object
+Usage: jld tagged_ptr
+end
+
 # Print TransitionArray.
 define jta
 call _v8_internal_Print_TransitionArray((void*)($arg0))
diff --git a/tools/gen-postmortem-metadata.py b/tools/gen-postmortem-metadata.py
index 5fd39f3..2e903d7 100644
--- a/tools/gen-postmortem-metadata.py
+++ b/tools/gen-postmortem-metadata.py
@@ -55,6 +55,8 @@
 #
 consts_misc = [
     { 'name': 'FirstNonstringType',     'value': 'FIRST_NONSTRING_TYPE' },
+    { 'name': 'APIObjectType',          'value': 'JS_API_OBJECT_TYPE' },
+    { 'name': 'SpecialAPIObjectType',   'value': 'JS_SPECIAL_API_OBJECT_TYPE' },
 
     { 'name': 'IsNotStringMask',        'value': 'kIsNotStringMask' },
     { 'name': 'StringTag',              'value': 'kStringTag' },
@@ -91,12 +93,12 @@
 
     { 'name': 'prop_idx_first',
         'value': 'DescriptorArray::kFirstIndex' },
-    { 'name': 'prop_type_field',
-        'value': 'DATA' },
-    { 'name': 'prop_type_const_field',
-        'value': 'DATA_CONSTANT' },
-    { 'name': 'prop_type_mask',
-        'value': 'PropertyDetails::TypeField::kMask' },
+    { 'name': 'prop_kind_Data',
+        'value': 'kData' },
+    { 'name': 'prop_kind_Accessor',
+        'value': 'kAccessor' },
+    { 'name': 'prop_kind_mask',
+        'value': 'PropertyDetails::KindField::kMask' },
     { 'name': 'prop_index_mask',
         'value': 'PropertyDetails::FieldIndexField::kMask' },
     { 'name': 'prop_index_shift',
@@ -127,13 +129,13 @@
         'value': 'Representation::Kind::kExternal' },
 
     { 'name': 'prop_desc_key',
-        'value': 'DescriptorArray::kDescriptorKey' },
+        'value': 'DescriptorArray::kEntryKeyIndex' },
     { 'name': 'prop_desc_details',
-        'value': 'DescriptorArray::kDescriptorDetails' },
+        'value': 'DescriptorArray::kEntryDetailsIndex' },
     { 'name': 'prop_desc_value',
-        'value': 'DescriptorArray::kDescriptorValue' },
+        'value': 'DescriptorArray::kEntryValueIndex' },
     { 'name': 'prop_desc_size',
-        'value': 'DescriptorArray::kDescriptorSize' },
+        'value': 'DescriptorArray::kEntrySize' },
 
     { 'name': 'elements_fast_holey_elements',
         'value': 'FAST_HOLEY_ELEMENTS' },
@@ -153,6 +155,8 @@
     { 'name': 'bit_field3_number_of_own_descriptors_shift',
         'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
 
+    { 'name': 'off_fp_context_or_frame_type',
+        'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'},
     { 'name': 'off_fp_context',
         'value': 'StandardFrameConstants::kContextOffset' },
     { 'name': 'off_fp_constant_pool',
@@ -225,6 +229,7 @@
     'JSFunction, context, Context, kContextOffset',
     'HeapObject, map, Map, kMapOffset',
     'JSObject, elements, Object, kElementsOffset',
+    'JSObject, internal_fields, uintptr_t, kHeaderSize',
     'FixedArray, data, uintptr_t, kHeaderSize',
     'JSArrayBuffer, backing_store, Object, kBackingStoreOffset',
     'JSArrayBufferView, byte_offset, Object, kByteOffsetOffset',
diff --git a/tools/ic-processor b/tools/ic-processor
new file mode 100755
index 0000000..f41b447
--- /dev/null
+++ b/tools/ic-processor
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+  if ! expr "X${arg}" : "^X-" > /dev/null; then
+    log_file=${arg}
+  fi
+done
+
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+  d8_public=`which d8`
+  if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x "$d8_exec" ]; then
+  D8_PATH=`pwd`/out/native
+  d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x "$d8_exec" ]; then
+  d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x "$d8_exec" ]; then
+  echo "d8 shell not found in $D8_PATH"
+  echo "To build, execute 'make native' from the V8 directory"
+  exit 1
+fi
+
+# nm spits out 'no symbols found' messages to stderr.
+cat $log_file | $d8_exec $tools_path/splaytree.js $tools_path/codemap.js \
+  $tools_path/csvparser.js $tools_path/consarray.js \
+  $tools_path/profile.js $tools_path/profile_view.js \
+  $tools_path/logreader.js $tools_path/ic-processor.js \
+  $tools_path/SourceMap.js \
+  $tools_path/ic-processor-driver.js -- $@ 2>/dev/null
diff --git a/tools/ic-processor-driver.js b/tools/ic-processor-driver.js
new file mode 100644
index 0000000..58c608d
--- /dev/null
+++ b/tools/ic-processor-driver.js
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function processArguments(args) {
+  var processor = new ArgumentsProcessor(args);
+  if (processor.parse()) {
+    return processor.result();
+  } else {
+    processor.printUsageAndExit();
+  }
+}
+
+function initSourceMapSupport() {
+  // Pull dev tools source maps  into our name space.
+  SourceMap = WebInspector.SourceMap;
+
+  // Overwrite the load function to load scripts synchronously.
+  SourceMap.load = function(sourceMapURL) {
+    var content = readFile(sourceMapURL);
+    var sourceMapObject = (JSON.parse(content));
+    return new SourceMap(sourceMapURL, sourceMapObject);
+  };
+}
+
+var params = processArguments(arguments);
+var sourceMap = null;
+if (params.sourceMap) {
+  initSourceMapSupport();
+  sourceMap = SourceMap.load(params.sourceMap);
+}
+var icProcessor = new IcProcessor();
+icProcessor.processLogFile(params.logFileName);
diff --git a/tools/ic-processor.js b/tools/ic-processor.js
new file mode 100644
index 0000000..bb4f02e
--- /dev/null
+++ b/tools/ic-processor.js
@@ -0,0 +1,282 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function inherits(childCtor, parentCtor) {
+  childCtor.prototype.__proto__ = parentCtor.prototype;
+};
+
+/**
+ * A thin wrapper around shell's 'read' function showing a file name on error.
+ */
+function readFile(fileName) {
+  try {
+    return read(fileName);
+  } catch (e) {
+    print(fileName + ': ' + (e.message || e));
+    throw e;
+  }
+}
+
+/**
+ * Parser for dynamic code optimization state.
+ */
+function parseState(s) {
+  switch (s) {
+  case "": return Profile.CodeState.COMPILED;
+  case "~": return Profile.CodeState.OPTIMIZABLE;
+  case "*": return Profile.CodeState.OPTIMIZED;
+  }
+  throw new Error("unknown code state: " + s);
+}
+
+
+function IcProcessor() {
+  var propertyICParser = [parseInt, parseInt, parseInt, null, null, parseInt,
+                          null, null, null];
+  LogReader.call(this, {
+      'code-creation': {
+          parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'],
+          processor: this.processCodeCreation },
+      'code-move': { parsers: [parseInt, parseInt],
+          processor: this.processCodeMove },
+      'code-delete': { parsers: [parseInt],
+          processor: this.processCodeDelete },
+      'sfi-move': { parsers: [parseInt, parseInt],
+          processor: this.processFunctionMove },
+      'LoadIC': {
+        parsers : propertyICParser,
+        processor: this.processPropertyIC.bind(this, "LoadIC") },
+      'StoreIC': {
+        parsers : propertyICParser,
+        processor: this.processPropertyIC.bind(this, "StoreIC") },
+      'KeyedLoadIC': {
+        parsers : propertyICParser,
+        processor: this.processPropertyIC.bind(this, "KeyedLoadIC") },
+      'KeyedStoreIC': {
+        parsers : propertyICParser,
+        processor: this.processPropertyIC.bind(this, "KeyedStoreIC") },
+      'CompareIC': {
+        parsers : [parseInt, parseInt, parseInt, parseInt, null, null, null,
+                   null, null, null, null],
+        processor: this.processCompareIC },
+      'BinaryOpIC': {
+        parsers : [parseInt, parseInt, parseInt, parseInt, null, null,
+                   parseInt],
+        processor: this.processBinaryOpIC },
+      'ToBooleanIC': {
+        parsers : [parseInt, parseInt, parseInt, parseInt, null, null],
+        processor: this.processToBooleanIC },
+      'PatchIC': {
+        parsers : [parseInt, parseInt, parseInt],
+        processor: this.processPatchIC },
+      });
+  this.deserializedEntriesNames_ = [];
+  this.profile_ = new Profile();
+
+  this.LoadIC = 0;
+  this.StoreIC = 0;
+  this.KeyedLoadIC = 0;
+  this.KeyedStoreIC = 0;
+  this.CompareIC = 0;
+  this.BinaryOpIC = 0;
+  this.ToBooleanIC = 0;
+  this.PatchIC = 0;
+}
+inherits(IcProcessor, LogReader);
+
+/**
+ * @override
+ */
+IcProcessor.prototype.printError = function(str) {
+  print(str);
+};
+
+
+IcProcessor.prototype.processLogFile = function(fileName) {
+  this.lastLogFileName_ = fileName;
+  var line;
+  while (line = readline()) {
+    this.processLogLine(line);
+  }
+  print();
+  print("=====================");
+  print("Load: " + this.LoadIC);
+  print("Store: " + this.StoreIC);
+  print("KeyedLoad: " + this.KeyedLoadIC);
+  print("KeyedStore: " + this.KeyedStoreIC);
+  print("CompareIC: " + this.CompareIC);
+  print("BinaryOpIC: " + this.BinaryOpIC);
+  print("ToBooleanIC: " + this.ToBooleanIC);
+  print("PatchIC: " + this.PatchIC);
+};
+
+
+IcProcessor.prototype.processCodeCreation = function(
+    type, kind, start, size, name, maybe_func) {
+  name = this.deserializedEntriesNames_[start] || name;
+  if (maybe_func.length) {
+    var funcAddr = parseInt(maybe_func[0]);
+    var state = parseState(maybe_func[1]);
+    this.profile_.addFuncCode(type, name, start, size, funcAddr, state);
+  } else {
+    this.profile_.addCode(type, name, start, size);
+  }
+};
+
+
+IcProcessor.prototype.processCodeMove = function(from, to) {
+  this.profile_.moveCode(from, to);
+};
+
+
+IcProcessor.prototype.processCodeDelete = function(start) {
+  this.profile_.deleteCode(start);
+};
+
+
+IcProcessor.prototype.processFunctionMove = function(from, to) {
+  this.profile_.moveFunc(from, to);
+};
+
+IcProcessor.prototype.formatName = function(entry) {
+  if (!entry) return "<unknown>"
+  var name = entry.func.getName();
+  var re = /(.*):[0-9]+:[0-9]+$/;
+  var array = re.exec(name);
+  if (!array) return name;
+  return array[1];
+}
+
+IcProcessor.prototype.processPropertyIC = function (
+    type, pc, line, column, old_state, new_state, map, name, modifier,
+    slow_reason) {
+  this[type]++;
+  var entry = this.profile_.findEntry(pc);
+  print(type + " (" + old_state + "->" + new_state + modifier + ") at " +
+        this.formatName(entry) + ":" + line + ":" + column + " " + name +
+        " (map 0x" + map.toString(16) + ")");
+}
+
+IcProcessor.prototype.processCompareIC = function (
+    pc, line, column, stub, op, old_left, old_right, old_state, new_left,
+    new_right, new_state) {
+  var entry = this.profile_.findEntry(pc);
+  this.CompareIC++;
+  print("CompareIC[" + op + "] ((" +
+        old_left + "+" + old_right + "=" + old_state + ")->(" +
+        new_left + "+" + new_right + "=" + new_state + ")) at " +
+        this.formatName(entry) + ":" + line + ":" + column);
+}
+
+IcProcessor.prototype.processBinaryOpIC = function (
+    pc, line, column, stub, old_state, new_state, allocation_site) {
+  var entry = this.profile_.findEntry(pc);
+  this.BinaryOpIC++;
+  print("BinaryOpIC (" + old_state + "->" + new_state + ") at " +
+        this.formatName(entry) + ":" + line + ":" + column);
+}
+
+IcProcessor.prototype.processToBooleanIC = function (
+    pc, line, column, stub, old_state, new_state) {
+  var entry = this.profile_.findEntry(pc);
+  this.ToBooleanIC++;
+  print("ToBooleanIC (" + old_state + "->" + new_state + ") at " +
+        this.formatName(entry) + ":" + line + ":" + column);
+}
+
+IcProcessor.prototype.processPatchIC = function (pc, test, delta) {
+  var entry = this.profile_.findEntry(pc);
+  this.PatchIC++;
+  print("PatchIC (0x" + test.toString(16) + ", " + delta + ") at " +
+        this.formatName(entry));
+}
+
+function padLeft(s, len) {
+  s = s.toString();
+  if (s.length < len) {
+    var padLength = len - s.length;
+    if (!(padLength in padLeft)) {
+      padLeft[padLength] = new Array(padLength + 1).join(' ');
+    }
+    s = padLeft[padLength] + s;
+  }
+  return s;
+};
+
+
+function ArgumentsProcessor(args) {
+  this.args_ = args;
+  this.result_ = ArgumentsProcessor.DEFAULTS;
+
+  this.argsDispatch_ = {
+    '--range': ['range', 'auto,auto',
+        'Specify the range limit as [start],[end]'],
+    '--source-map': ['sourceMap', null,
+        'Specify the source map that should be used for output']
+  };
+};
+
+
+ArgumentsProcessor.DEFAULTS = {
+  logFileName: 'v8.log',
+  range: 'auto,auto',
+};
+
+
+ArgumentsProcessor.prototype.parse = function() {
+  while (this.args_.length) {
+    var arg = this.args_.shift();
+    if (arg.charAt(0) != '-') {
+      this.result_.logFileName = arg;
+      continue;
+    }
+    var userValue = null;
+    var eqPos = arg.indexOf('=');
+    if (eqPos != -1) {
+      userValue = arg.substr(eqPos + 1);
+      arg = arg.substr(0, eqPos);
+    }
+    if (arg in this.argsDispatch_) {
+      var dispatch = this.argsDispatch_[arg];
+      this.result_[dispatch[0]] = userValue == null ? dispatch[1] : userValue;
+    } else {
+      return false;
+    }
+  }
+  return true;
+};
+
+
+ArgumentsProcessor.prototype.result = function() {
+  return this.result_;
+};
+
+
+ArgumentsProcessor.prototype.printUsageAndExit = function() {
+
+  function padRight(s, len) {
+    s = s.toString();
+    if (s.length < len) {
+      s = s + (new Array(len - s.length + 1).join(' '));
+    }
+    return s;
+  }
+
+  print('Cmdline args: [options] [log-file-name]\n' +
+        'Default log file name is "' +
+        ArgumentsProcessor.DEFAULTS.logFileName + '".\n');
+  print('Options:');
+  for (var arg in this.argsDispatch_) {
+    var synonyms = [arg];
+    var dispatch = this.argsDispatch_[arg];
+    for (var synArg in this.argsDispatch_) {
+      if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
+        synonyms.push(synArg);
+        delete this.argsDispatch_[synArg];
+      }
+    }
+    print('  ' + padRight(synonyms.join(', '), 20) + " " + dispatch[2]);
+  }
+  quit(2);
+};
diff --git a/tools/ignition/linux_perf_report.py b/tools/ignition/linux_perf_report.py
index 69db37c..4e0b884 100755
--- a/tools/ignition/linux_perf_report.py
+++ b/tools/ignition/linux_perf_report.py
@@ -54,6 +54,8 @@
   r"v8::internal::(?:\(anonymous namespace\)::)?Compile|v8::internal::Parser")
 JIT_CODE_SYMBOLS_RE = re.compile(
   r"(LazyCompile|Compile|Eval|Script):(\*|~)")
+GC_SYMBOLS_RE = re.compile(
+  r"v8::internal::Heap::CollectGarbage")
 
 
 def strip_function_parameters(symbol):
@@ -74,7 +76,7 @@
 
 def collapsed_callchains_generator(perf_stream, hide_other=False,
                                    hide_compiler=False, hide_jit=False,
-                                   show_full_signatures=False):
+                                   hide_gc=False, show_full_signatures=False):
   current_chain = []
   skip_until_end_of_chain = False
   compiler_symbol_in_chain = False
@@ -122,6 +124,11 @@
         current_chain.append("[jit]")
         yield current_chain
         skip_until_end_of_chain = True
+    elif GC_SYMBOLS_RE.match(symbol):
+      if not hide_gc:
+        current_chain.append("[gc]")
+        yield current_chain
+        skip_until_end_of_chain = True
     elif symbol == "Stub:CEntryStub" and compiler_symbol_in_chain:
       if not hide_compiler:
         current_chain.append("[compiler]")
@@ -212,6 +219,11 @@
     action="store_true"
   )
   command_line_parser.add_argument(
+    "--hide-gc",
+    help="Hide samples from garbage collection",
+    action="store_true"
+  )
+  command_line_parser.add_argument(
     "--show-full-signatures", "-s",
     help="show full signatures instead of function names",
     action="store_true"
@@ -237,7 +249,8 @@
 
   callchains = collapsed_callchains_generator(
     perf.stdout, program_options.hide_other, program_options.hide_compiler,
-    program_options.hide_jit, program_options.show_full_signatures)
+    program_options.hide_jit, program_options.hide_gc,
+    program_options.show_full_signatures)
 
   if program_options.output_flamegraph:
     write_flamegraph_input_file(program_options.output_stream, callchains)
diff --git a/tools/jsfunfuzz/fuzz-harness.sh b/tools/jsfunfuzz/fuzz-harness.sh
index 205a61b..8d064b2 100755
--- a/tools/jsfunfuzz/fuzz-harness.sh
+++ b/tools/jsfunfuzz/fuzz-harness.sh
@@ -65,7 +65,7 @@
 
 fi
 
-flags='--debug-code --expose-gc --verify-gc'
+flags='--expose-gc --verify-gc'
 python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \
     "$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
 exit_code=$(cat w* | grep " looking good" -c)
diff --git a/tools/luci-go/linux64/isolate.sha1 b/tools/luci-go/linux64/isolate.sha1
index 41d0add..b8593a7 100644
--- a/tools/luci-go/linux64/isolate.sha1
+++ b/tools/luci-go/linux64/isolate.sha1
@@ -1 +1 @@
-cf7c1fac12790056ac393774827a5720c7590bac
+3c0fbcab83730c86bbd5a09e760388dcb7053bc4
diff --git a/tools/luci-go/mac64/isolate.sha1 b/tools/luci-go/mac64/isolate.sha1
index 15744d6..bf7e1c1 100644
--- a/tools/luci-go/mac64/isolate.sha1
+++ b/tools/luci-go/mac64/isolate.sha1
@@ -1 +1 @@
-4678a9332ef5a7b90b184763afee1c100981f710
+d37a2f34eff58e1fb04038bd52381001479d4aa1
diff --git a/tools/luci-go/win64/isolate.exe.sha1 b/tools/luci-go/win64/isolate.exe.sha1
index 7c5b7eb..c575f97 100644
--- a/tools/luci-go/win64/isolate.exe.sha1
+++ b/tools/luci-go/win64/isolate.exe.sha1
@@ -1 +1 @@
-98457ff4fc79d05661fea53d2b3aff70fac90022
+d4b894493b1ee5c04ec5bc88e6ea286426540770
diff --git a/tools/mb/mb.py b/tools/mb/mb.py
index 536dc00..b37c9dd 100755
--- a/tools/mb/mb.py
+++ b/tools/mb/mb.py
@@ -777,7 +777,13 @@
       self.WriteFile(gn_runtime_deps_path, '\n'.join(gn_labels) + '\n')
       cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
 
-    ret, _, _ = self.Run(cmd)
+    # Override msvs infra environment variables.
+    # TODO(machenbach): Remove after GYP_MSVS_VERSION is removed on infra side.
+    env = {}
+    env.update(os.environ)
+    env['GYP_MSVS_VERSION'] = '2015'
+
+    ret, _, _ = self.Run(cmd, env=env)
     if ret:
         # If `gn gen` failed, we should exit early rather than trying to
         # generate isolates. Run() will have already logged any error output.
@@ -1008,7 +1014,6 @@
                  + logdog_command + test_cmdline)
     elif use_x11 and test_type == 'windowed_test_launcher':
       extra_files = [
-          'xdisplaycheck',
           '../../testing/test_env.py',
           '../../testing/xvfb.py',
       ]
diff --git a/tools/memory/lsan/suppressions.txt b/tools/memory/lsan/suppressions.txt
new file mode 100644
index 0000000..36e59ec
--- /dev/null
+++ b/tools/memory/lsan/suppressions.txt
@@ -0,0 +1,19 @@
+# Do not add new suppressions below.
+# TODO(machenbach): Delete this file as soon as it is empty.
+
+# cctest
+leak:v8::internal::Debug::NextAsyncTaskId
+leak:v8::internal::wasm::DecodeWasmModule
+leak:v8::internal::wasm::WasmInterpreter::WasmInterpreter
+leak:v8::internal::WasmDebugInfo::SetBreakpoint
+
+# debugger
+leak:v8_inspector::WasmTranslation::TranslatorImpl::DisassemblingTranslator::AddFakeScript
+leak:v8::internal::compiler::JumpThreading::ApplyForwarding
+
+# mjsunit
+leak:v8::internal::FuncNameInferrer::FuncNameInferrer
+leak:v8::internal::JSArrayBuffer::SetupAllocatingData
+
+# unittests
+leak:v8::internal::Isolate::FindOrAllocatePerThreadDataForThisThread
diff --git a/tools/parser-shell.cc b/tools/parser-shell.cc
index 0517bbf..29f5207 100644
--- a/tools/parser-shell.cc
+++ b/tools/parser-shell.cc
@@ -36,8 +36,9 @@
 #include "include/libplatform/libplatform.h"
 #include "src/api.h"
 #include "src/compiler.h"
+#include "src/objects-inl.h"
 #include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parsing.h"
 #include "src/parsing/preparse-data-format.h"
 #include "src/parsing/preparse-data.h"
 #include "src/parsing/preparser.h"
@@ -93,15 +94,12 @@
   i::ScriptData* cached_data_impl = NULL;
   // First round of parsing (produce data to cache).
   {
-    Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator(), ZONE_NAME);
-    ParseInfo info(&zone, script);
+    ParseInfo info(script);
     info.set_cached_data(&cached_data_impl);
     info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
     v8::base::ElapsedTimer timer;
     timer.Start();
-    // Allow lazy parsing; otherwise we won't produce cached data.
-    info.set_allow_lazy_parsing();
-    bool success = Parser::ParseStatic(&info);
+    bool success = parsing::ParseProgram(&info);
     parse_time1 = timer.Elapsed();
     if (!success) {
       fprintf(stderr, "Parsing failed\n");
@@ -110,15 +108,12 @@
   }
   // Second round of parsing (consume cached data).
   {
-    Zone zone(reinterpret_cast<i::Isolate*>(isolate)->allocator(), ZONE_NAME);
-    ParseInfo info(&zone, script);
+    ParseInfo info(script);
     info.set_cached_data(&cached_data_impl);
     info.set_compile_options(v8::ScriptCompiler::kConsumeParserCache);
     v8::base::ElapsedTimer timer;
     timer.Start();
-    // Allow lazy parsing; otherwise cached data won't help.
-    info.set_allow_lazy_parsing();
-    bool success = Parser::ParseStatic(&info);
+    bool success = parsing::ParseProgram(&info);
     parse_time2 = timer.Elapsed();
     if (!success) {
       fprintf(stderr, "Parsing failed\n");
diff --git a/tools/presubmit.py b/tools/presubmit.py
index f9ae2bd..9857497 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -55,7 +55,6 @@
 # build/include_what_you_use: Started giving false positives for variables
 #   named "string" and "map" assuming that you needed to include STL headers.
 # TODO(bmeurer): Fix and re-enable readability/check
-# TODO(epertoso): Maybe re-enable readability/fn_size after
 # http://crrev.com/2199323003 relands.
 
 LINT_RULES = """
@@ -70,6 +69,12 @@
 
 LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
 FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
+ASSERT_OPTIMIZED_PATTERN = re.compile("assertOptimized")
+FLAGS_ENABLE_OPT = re.compile("//\s*Flags:.*--(crankshaft|turbo)[^-].*\n")
+ASSERT_UNOPTIMIZED_PATTERN = re.compile("assertUnoptimized")
+FLAGS_NO_ALWAYS_OPT = re.compile("//\s*Flags:.*--no-?always-opt.*\n")
+
+TOOLS_PATH = dirname(abspath(__file__))
 
 def CppLintWorker(command):
   try:
@@ -156,13 +161,34 @@
   files and invoke a custom check on the files.
   """
 
-  def Run(self, path):
+  def RunOnPath(self, path):
+    """Runs processor on all files under the given path."""
+
     all_files = []
     for file in self.GetPathsToSearch():
       all_files += self.FindFilesIn(join(path, file))
-    if not self.ProcessFiles(all_files, path):
-      return False
-    return True
+    return self.ProcessFiles(all_files)
+
+  def RunOnFiles(self, files):
+    """Runs processor only on affected files."""
+
+    # Helper for getting directory pieces.
+    dirs = lambda f: dirname(f).split(os.sep)
+
+    # Path offsets where to look (to be in sync with RunOnPath).
+    # Normalize '.' to check for it with str.startswith.
+    search_paths = [('' if p == '.' else p) for p in self.GetPathsToSearch()]
+
+    all_files = [
+      f.AbsoluteLocalPath()
+      for f in files
+      if (not self.IgnoreFile(f.LocalPath()) and
+          self.IsRelevant(f.LocalPath()) and
+          all(not self.IgnoreDir(d) for d in dirs(f.LocalPath())) and
+          any(map(f.LocalPath().startswith, search_paths)))
+    ]
+
+    return self.ProcessFiles(all_files)
 
   def IgnoreDir(self, name):
     return (name.startswith('.') or
@@ -214,7 +240,7 @@
 
     return None
 
-  def ProcessFiles(self, files, path):
+  def ProcessFiles(self, files):
     good_files_cache = FileContentsCache('.cpplint-cache')
     good_files_cache.Load()
     files = good_files_cache.FilterUnchangedFiles(files)
@@ -224,7 +250,7 @@
 
     filters = ",".join([n for n in LINT_RULES])
     command = [sys.executable, 'cpplint.py', '--filter', filters]
-    cpplint = self.GetCpplintScript(join(path, "tools"))
+    cpplint = self.GetCpplintScript(TOOLS_PATH)
     if cpplint is None:
       print('Could not find cpplint.py. Make sure '
             'depot_tools is installed and in the path.')
@@ -314,11 +340,13 @@
                        'libraries.cc',
                        'libraries-empty.cc',
                        'lua_binarytrees.js',
+                       'meta-123.js',
                        'memops.js',
                        'poppler.js',
                        'primes.js',
                        'raytrace.js',
                        'regexp-pcre.js',
+                       'resources-123.js',
                        'rjsmin.py',
                        'script-breakpoint.h',
                        'sqlite.js',
@@ -336,6 +364,8 @@
                        'zlib.js']
   IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
 
+  IGNORE_COPYRIGHTS_DIRECTORY = "test/test262/local-tests"
+
   def EndOfDeclaration(self, line):
     return line == "}" or line == "};"
 
@@ -351,7 +381,8 @@
       if '\t' in contents:
         print "%s contains tabs" % name
         result = False
-    if not base in SourceProcessor.IGNORE_COPYRIGHTS:
+    if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
+        not SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORY in name:
       if not COPYRIGHT_HEADER_PATTERN.search(contents):
         print "%s is missing a correct copyright header." % name
         result = False
@@ -374,14 +405,25 @@
       print "%s does not end with a single new line." % name
       result = False
     # Sanitize flags for fuzzer.
-    if "mjsunit" in name:
+    if "mjsunit" in name or "debugger" in name:
       match = FLAGS_LINE.search(contents)
       if match:
         print "%s Flags should use '-' (not '_')" % name
         result = False
+      if not "mjsunit/mjsunit.js" in name:
+        if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
+            not FLAGS_ENABLE_OPT.search(contents):
+          print "%s Flag --crankshaft or --turbo should be set " \
+                "if assertOptimized() is used" % name
+          result = False
+        if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
+            not FLAGS_NO_ALWAYS_OPT.search(contents):
+          print "%s Flag --no-always-opt should be set if " \
+                "assertUnoptimized() is used" % name
+          result = False
     return result
 
-  def ProcessFiles(self, files, path):
+  def ProcessFiles(self, files):
     success = True
     violations = 0
     for file in files:
@@ -438,45 +480,46 @@
   json.loads(contents, object_pairs_hook=check_pairs)
   return status["success"]
 
-def CheckStatusFiles(workspace):
-  success = True
-  suite_paths = utils.GetSuitePaths(join(workspace, "test"))
-  for root in suite_paths:
-    suite_path = join(workspace, "test", root)
-    status_file_path = join(suite_path, root + ".status")
-    suite = testsuite.TestSuite.LoadTestSuite(suite_path)
-    if suite and exists(status_file_path):
+
+class StatusFilesProcessor(SourceFileProcessor):
+  """Checks status files for incorrect syntax and duplicate keys."""
+
+  def IsRelevant(self, name):
+    # Several changes to files under the test directories could impact status
+    # files.
+    return True
+
+  def GetPathsToSearch(self):
+    return ['test']
+
+  def ProcessFiles(self, files):
+    test_path = join(dirname(TOOLS_PATH), 'test')
+    status_files = set([])
+    for file_path in files:
+      if file_path.startswith(test_path):
+        # Strip off absolute path prefix pointing to test suites.
+        pieces = file_path[len(test_path):].lstrip(os.sep).split(os.sep)
+        if pieces:
+          # Infer affected status file name. Only care for existing status
+          # files. Some directories under "test" don't have any.
+          if not os.path.isdir(join(test_path, pieces[0])):
+            continue
+          status_file = join(test_path, pieces[0], pieces[0] + ".status")
+          if not os.path.exists(status_file):
+            continue
+          status_files.add(status_file)
+
+    success = True
+    for status_file_path in sorted(status_files):
       success &= statusfile.PresubmitCheck(status_file_path)
       success &= _CheckStatusFileForDuplicateKeys(status_file_path)
-  return success
+    return success
 
-def CheckAuthorizedAuthor(input_api, output_api):
-  """For non-googler/chromites committers, verify the author's email address is
-  in AUTHORS.
-  """
-  # TODO(maruel): Add it to input_api?
-  import fnmatch
 
-  author = input_api.change.author_email
-  if not author:
-    input_api.logging.info('No author, skipping AUTHOR check')
-    return []
-  authors_path = input_api.os_path.join(
-      input_api.PresubmitLocalPath(), 'AUTHORS')
-  valid_authors = (
-      input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
-      for line in open(authors_path))
-  valid_authors = [item.group(1).lower() for item in valid_authors if item]
-  if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
-    input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
-    return [output_api.PresubmitPromptWarning(
-        ('%s is not in AUTHORS file. If you are a new contributor, please visit'
-        '\n'
-        'http://www.chromium.org/developers/contributing-code and read the '
-        '"Legal" section\n'
-        'If you are a chromite, verify the contributor signed the CLA.') %
-        author)]
-  return []
+def CheckDeps(workspace):
+  checkdeps_py = join(workspace, 'buildtools', 'checkdeps', 'checkdeps.py')
+  return subprocess.call([sys.executable, checkdeps_py, workspace]) == 0
+
 
 def GetOptions():
   result = optparse.OptionParser()
@@ -490,13 +533,16 @@
   parser = GetOptions()
   (options, args) = parser.parse_args()
   success = True
+  print "Running checkdeps..."
+  success &= CheckDeps(workspace)
   print "Running C++ lint check..."
   if not options.no_lint:
-    success &= CppLintProcessor().Run(workspace)
+    success &= CppLintProcessor().RunOnPath(workspace)
   print "Running copyright header, trailing whitespaces and " \
         "two empty lines between declarations check..."
-  success &= SourceProcessor().Run(workspace)
-  success &= CheckStatusFiles(workspace)
+  success &= SourceProcessor().RunOnPath(workspace)
+  print "Running status-files check..."
+  success &= StatusFilesProcessor().RunOnPath(workspace)
   if success:
     return 0
   else:
diff --git a/tools/release/create_release.py b/tools/release/create_release.py
index 14d44b4..8c0ad48 100755
--- a/tools/release/create_release.py
+++ b/tools/release/create_release.py
@@ -15,12 +15,7 @@
   MESSAGE = "Preparation."
 
   def RunStep(self):
-    fetchspecs = [
-      "+refs/heads/*:refs/heads/*",
-      "+refs/pending/*:refs/pending/*",
-      "+refs/pending-tags/*:refs/pending-tags/*",
-    ]
-    self.Git("fetch origin %s" % " ".join(fetchspecs))
+    self.Git("fetch origin +refs/heads/*:refs/heads/*")
     self.GitCheckout("origin/master")
     self.DeleteBranch("work-branch")
 
@@ -155,12 +150,23 @@
     TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE"))
 
 
+class PushBranchRef(Step):
+  MESSAGE = "Create branch ref."
+
+  def RunStep(self):
+    cmd = "push origin %s:refs/heads/%s" % (self["push_hash"], self["version"])
+    if self._options.dry_run:
+      print "Dry run. Command:\ngit %s" % cmd
+    else:
+      self.Git(cmd)
+
+
 class MakeBranch(Step):
   MESSAGE = "Create the branch."
 
   def RunStep(self):
     self.Git("reset --hard origin/master")
-    self.Git("checkout -b work-branch %s" % self["push_hash"])
+    self.Git("new-branch work-branch --upstream origin/%s" % self["version"])
     self.GitCheckoutFile(CHANGELOG_FILE, self["latest_version"])
     self.GitCheckoutFile(VERSION_FILE, self["latest_version"])
     self.GitCheckoutFile(WATCHLISTS_FILE, self["latest_version"])
@@ -223,37 +229,11 @@
     os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
 
 
-class FixBrokenTag(Step):
-  MESSAGE = "Check for a missing tag and fix that instead."
-
-  def RunStep(self):
-    commit = None
-    try:
-      commit = self.GitLog(
-          n=1, format="%H",
-          grep=self["commit_title"],
-          branch="origin/%s" % self["version"],
-      )
-    except GitFailedException:
-      # In the normal case, the remote doesn't exist yet and git will fail.
-      pass
-    if commit:
-      print "Found %s. Trying to repair tag and bail out." % self["version"]
-      self.Git("tag %s %s" % (self["version"], commit))
-      self.Git("push origin refs/tags/%s" % self["version"])
-      return True
-
-
 class PushBranch(Step):
   MESSAGE = "Push changes."
 
   def RunStep(self):
-    pushspecs = [
-      "refs/heads/work-branch:refs/pending/heads/%s" % self["version"],
-      "%s:refs/pending-tags/heads/%s" % (self["push_hash"], self["version"]),
-      "%s:refs/heads/%s" % (self["push_hash"], self["version"]),
-    ]
-    cmd = "push origin %s" % " ".join(pushspecs)
+    cmd = "cl land --bypass-hooks -f"
     if self._options.dry_run:
       print "Dry run. Command:\ngit %s" % cmd
     else:
@@ -319,12 +299,12 @@
       DetectLastRelease,
       PrepareChangeLog,
       EditChangeLog,
+      PushBranchRef,
       MakeBranch,
       AddChangeLog,
       SetVersion,
       EnableMergeWatchlist,
       CommitBranch,
-      FixBrokenTag,
       PushBranch,
       TagRevision,
       CleanUp,
diff --git a/tools/release/test_scripts.py b/tools/release/test_scripts.py
index a344376..0cf1aff 100644
--- a/tools/release/test_scripts.py
+++ b/tools/release/test_scripts.py
@@ -945,10 +945,7 @@
           change_log)
 
     expectations = [
-      Cmd("git fetch origin "
-          "+refs/heads/*:refs/heads/* "
-          "+refs/pending/*:refs/pending/* "
-          "+refs/pending-tags/*:refs/pending-tags/*", ""),
+      Cmd("git fetch origin +refs/heads/*:refs/heads/*", ""),
       Cmd("git checkout -f origin/master", ""),
       Cmd("git branch", ""),
       Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
@@ -962,8 +959,9 @@
       Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
       Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
       Cmd("git log -1 --format=%an rev1", "author1@chromium.org\n"),
+      Cmd("git push origin push_hash:refs/heads/3.22.5", ""),
       Cmd("git reset --hard origin/master", ""),
-      Cmd("git checkout -b work-branch push_hash", ""),
+      Cmd("git new-branch work-branch --upstream origin/3.22.5", ""),
       Cmd("git checkout -f 3.22.4 -- ChangeLog", "", cb=ResetChangeLog),
       Cmd("git checkout -f 3.22.4 -- include/v8-version.h", "",
           cb=self.WriteFakeVersionFile),
@@ -971,12 +969,7 @@
           cb=self.WriteFakeWatchlistsFile),
       Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
           cb=CheckVersionCommit),
-      Cmd("git log -1 --format=%H --grep=\"Version 3.22.5\" origin/3.22.5",
-          ""),
-      Cmd("git push origin "
-          "refs/heads/work-branch:refs/pending/heads/3.22.5 "
-          "push_hash:refs/pending-tags/heads/3.22.5 "
-          "push_hash:refs/heads/3.22.5", ""),
+      Cmd("git cl land --bypass-hooks -f", ""),
       Cmd("git fetch", ""),
       Cmd("git log -1 --format=%H --grep="
           "\"Version 3.22.5\" origin/3.22.5", "hsh_to_tag"),
diff --git a/tools/run-deopt-fuzzer.py b/tools/run-deopt-fuzzer.py
index b143430..27f5cc7 100755
--- a/tools/run-deopt-fuzzer.py
+++ b/tools/run-deopt-fuzzer.py
@@ -60,8 +60,7 @@
 MODE_FLAGS = {
     "debug"   : ["--nohard-abort", "--nodead-code-elimination",
                  "--nofold-constants", "--enable-slow-asserts",
-                 "--debug-code", "--verify-heap",
-                 "--noconcurrent-recompilation"],
+                 "--verify-heap", "--noconcurrent-recompilation"],
     "release" : ["--nohard-abort", "--nodead-code-elimination",
                  "--nofold-constants", "--noconcurrent-recompilation"]}
 
diff --git a/tools/run-tests.py b/tools/run-tests.py
index e94f599..55e33f9 100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -65,9 +65,9 @@
 TEST_MAP = {
   # This needs to stay in sync with test/bot_default.isolate.
   "bot_default": [
+    "debugger",
     "mjsunit",
     "cctest",
-    "debugger",
     "inspector",
     "webkit",
     "fuzzer",
@@ -78,9 +78,9 @@
   ],
   # This needs to stay in sync with test/default.isolate.
   "default": [
+    "debugger",
     "mjsunit",
     "cctest",
-    "debugger",
     "inspector",
     "fuzzer",
     "message",
@@ -90,9 +90,9 @@
   ],
   # This needs to stay in sync with test/optimize_for_size.isolate.
   "optimize_for_size": [
+    "debugger",
     "mjsunit",
     "cctest",
-    "debugger",
     "inspector",
     "webkit",
     "intl",
@@ -104,16 +104,18 @@
 
 TIMEOUT_DEFAULT = 60
 
-VARIANTS = ["default", "turbofan", "ignition_staging"]
+# Variants ordered by expected runtime (slowest first).
+VARIANTS = ["ignition_staging", "default", "turbofan"]
 
 MORE_VARIANTS = [
-  "ignition",
   "stress",
   "turbofan_opt",
+  "ignition",
   "asm_wasm",
+  "wasm_traps",
 ]
 
-EXHAUSTIVE_VARIANTS = VARIANTS + MORE_VARIANTS
+EXHAUSTIVE_VARIANTS = MORE_VARIANTS + VARIANTS
 
 VARIANT_ALIASES = {
   # The default for developer workstations.
@@ -126,7 +128,7 @@
 
 DEBUG_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
                "--nofold-constants", "--enable-slow-asserts",
-               "--debug-code", "--verify-heap"]
+               "--verify-heap"]
 RELEASE_FLAGS = ["--nohard-abort", "--nodead-code-elimination",
                  "--nofold-constants"]
 
@@ -402,7 +404,15 @@
   )
 
   if options.asan:
-    os.environ['ASAN_OPTIONS'] = symbolizer
+    asan_options = [symbolizer]
+    if not utils.GuessOS() == 'macos':
+      # LSAN is not available on mac.
+      asan_options.append('detect_leaks=1')
+      os.environ['LSAN_OPTIONS'] = ":".join([
+        'suppressions=%s' % os.path.join(
+            BASE_DIR, 'tools', 'memory', 'lsan', 'suppressions.txt'),
+      ])
+    os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
 
   if options.sancov_dir:
     assert os.path.exists(options.sancov_dir)
diff --git a/tools/run_perf.py b/tools/run_perf.py
index 2b406bd..9e93d41 100755
--- a/tools/run_perf.py
+++ b/tools/run_perf.py
@@ -42,12 +42,11 @@
 defaults.
 
 A suite's results_processor may point to an optional python script. If
-specified, it is called after running the tests like this (with a path
-relatve to the suite level's path):
-<results_processor file> <same flags as for d8> <suite level name> <output>
+specified, it is called after running the tests (with a path relative to the
+suite level's path). It is expected to read the measurement's output text
+on stdin and print the processed output to stdout.
 
-The <output> is a temporary file containing d8 output. The results_regexp will
-be applied to the output of this script.
+The results_regexp will be applied to the processed output.
 
 A suite without "tests" is considered a performance test itself.
 
@@ -179,7 +178,7 @@
   gathered by repeated calls to ConsumeOutput.
   """
   def __init__(self, graphs, units, results_regexp, stddev_regexp):
-    self.name = graphs[-1]
+    self.name = '/'.join(graphs)
     self.graphs = graphs
     self.units = units
     self.results_regexp = results_regexp
@@ -238,6 +237,25 @@
   return lambda: iter(left), lambda: iter(right)
 
 
+def RunResultsProcessor(results_processor, stdout, count):
+  # Dummy pass through for null-runs.
+  if stdout is None:
+    return None
+
+  # We assume the results processor is relative to the suite.
+  assert os.path.exists(results_processor)
+  p = subprocess.Popen(
+      [sys.executable, results_processor],
+      stdin=subprocess.PIPE,
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+  )
+  result, _ = p.communicate(input=stdout)
+  print ">>> Processed stdout (#%d):" % count
+  print result
+  return result
+
+
 def AccumulateResults(
     graph_names, trace_configs, iter_output, trybot, no_patch, calc_total):
   """Iterates over the output of multiple benchmark reruns and accumulates
@@ -361,6 +379,7 @@
     self.flags = []
     self.test_flags = []
     self.resources = []
+    self.results_processor = None
     self.results_regexp = None
     self.stddev_regexp = None
     self.units = "score"
@@ -399,6 +418,8 @@
     self.timeout = suite.get("timeout_%s" % arch, self.timeout)
     self.units = suite.get("units", parent.units)
     self.total = suite.get("total", parent.total)
+    self.results_processor = suite.get(
+        "results_processor", parent.results_processor)
 
     # A regular expression for results. If the parent graph provides a
     # regexp and the current suite has none, a string place holder for the
@@ -445,6 +466,15 @@
   def main(self):
     return self._suite.get("main", "")
 
+  def PostProcess(self, stdouts_iter):
+    if self.results_processor:
+      def it():
+        for i, stdout in enumerate(stdouts_iter()):
+          yield RunResultsProcessor(self.results_processor, stdout, i + 1)
+      return it
+    else:
+      return stdouts_iter
+
   def ChangeCWD(self, suite_path):
     """Changes the cwd to to path defined in the current graph.
 
@@ -462,6 +492,8 @@
     # TODO(machenbach): This requires +.exe if run on windows.
     extra_flags = extra_flags or []
     cmd = [os.path.join(shell_dir, self.binary)]
+    if self.binary.endswith(".py"):
+      cmd = [sys.executable] + cmd
     if self.binary != 'd8' and '--prof' in extra_flags:
       print "Profiler supported only on a benchmark run with d8"
     return cmd + self.GetCommandFlags(extra_flags=extra_flags)
@@ -473,7 +505,7 @@
         AccumulateResults(
             self.graphs,
             self._children,
-            iter_output=stdout_with_patch,
+            iter_output=self.PostProcess(stdout_with_patch),
             trybot=trybot,
             no_patch=False,
             calc_total=self.total,
@@ -481,7 +513,7 @@
         AccumulateResults(
             self.graphs,
             self._children,
-            iter_output=stdout_no_patch,
+            iter_output=self.PostProcess(stdout_no_patch),
             trybot=trybot,
             no_patch=True,
             calc_total=self.total,
@@ -758,6 +790,8 @@
     )
 
   def PreTests(self, node, path):
+    if isinstance(node, RunnableConfig):
+      node.ChangeCWD(path)
     suite_dir = os.path.abspath(os.path.dirname(path))
     if node.path:
       bench_rel = os.path.normpath(os.path.join(*node.path))
@@ -911,7 +945,6 @@
       raise Exception("Could not set CPU governor. Present value is %s"
                       % cur_value )
 
-# TODO: Implement results_processor.
 def Main(args):
   logging.getLogger().setLevel(logging.INFO)
   parser = optparse.OptionParser()
diff --git a/tools/testrunner/local/commands.py b/tools/testrunner/local/commands.py
index a9315cb..94b892c 100644
--- a/tools/testrunner/local/commands.py
+++ b/tools/testrunner/local/commands.py
@@ -26,6 +26,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
+import os
 import subprocess
 import sys
 from threading import Timer
@@ -62,11 +63,19 @@
     prev_error_mode = Win32SetErrorMode(error_mode)
     Win32SetErrorMode(error_mode | prev_error_mode)
 
+  env = os.environ.copy()
+  # GTest shard information is read by the V8 tests runner. Make sure it
+  # doesn't leak into the execution of gtests we're wrapping. Those might
+  # otherwise apply a second level of sharding and as a result skip tests.
+  env.pop('GTEST_TOTAL_SHARDS', None)
+  env.pop('GTEST_SHARD_INDEX', None)
+
   try:
     process = subprocess.Popen(
       args=popen_args,
       stdout=subprocess.PIPE,
       stderr=subprocess.PIPE,
+      env=env,
       **rest
     )
   except Exception as e:
diff --git a/tools/testrunner/local/execution.py b/tools/testrunner/local/execution.py
index 4cb9e45..6adfd09 100644
--- a/tools/testrunner/local/execution.py
+++ b/tools/testrunner/local/execution.py
@@ -197,12 +197,17 @@
     self.perf_failures = False
     self.printed_allocations = False
     self.tests = [ t for s in suites for t in s.tests ]
+
+    # Always pre-sort by status file, slowest tests first.
+    slow_key = lambda t: statusfile.IsSlow(t.outcomes)
+    self.tests.sort(key=slow_key, reverse=True)
+
+    # Sort by stored duration of not opted out.
     if not context.no_sorting:
       for t in self.tests:
         t.duration = self.perfdata.FetchPerfData(t) or 1.0
-      slow_key = lambda t: statusfile.IsSlow(t.outcomes)
-      self.tests.sort(key=slow_key, reverse=True)
       self.tests.sort(key=lambda t: t.duration, reverse=True)
+
     self._CommonInit(suites, progress_indicator, context)
 
   def _CommonInit(self, suites, progress_indicator, context):
diff --git a/tools/testrunner/local/pool_unittest.py b/tools/testrunner/local/pool_unittest.py
index 335d20a..235eca6 100644
--- a/tools/testrunner/local/pool_unittest.py
+++ b/tools/testrunner/local/pool_unittest.py
@@ -23,9 +23,10 @@
   def testException(self):
     results = set()
     pool = Pool(3)
-    for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
-      # Item 10 will not appear in results due to an internal exception.
-      results.add(result.value)
+    with self.assertRaises(Exception):
+      for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
+        # Item 10 will not appear in results due to an internal exception.
+        results.add(result.value)
     expect = set(range(0, 12))
     expect.remove(10)
     self.assertEquals(expect, results)
diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py
index 0917540..b03fcc1 100644
--- a/tools/testrunner/local/statusfile.py
+++ b/tools/testrunner/local/statusfile.py
@@ -232,11 +232,22 @@
     else:
       _ParseOutcomeList(rule, section[rule], rules, variables)
 
+JS_TEST_PATHS = {
+  'debugger': [[]],
+  'inspector': [[]],
+  'intl': [[]],
+  'message': [[]],
+  'mjsunit': [[]],
+  'mozilla': [['data']],
+  'test262': [['data', 'test'], ['local-tests', 'test']],
+  'webkit': [[]],
+}
 
 def PresubmitCheck(path):
   with open(path) as f:
     contents = ReadContent(f.read())
-  root_prefix = os.path.basename(os.path.dirname(path)) + "/"
+  basename = os.path.basename(os.path.dirname(path))
+  root_prefix = basename + "/"
   status = {"success": True}
   def _assert(check, message):  # Like "assert", but doesn't throw.
     if not check:
@@ -255,6 +266,11 @@
                 "Suite name prefix must not be used in rule keys")
         _assert(not rule.endswith('.js'),
                 ".js extension must not be used in rule keys.")
+        if basename in JS_TEST_PATHS  and '*' not in rule:
+          _assert(any(os.path.exists(os.path.join(os.path.dirname(path),
+                                                  *(paths + [rule + ".js"])))
+                      for paths in JS_TEST_PATHS[basename]),
+                  "missing file for %s test %s" % (basename, rule))
     return status["success"]
   except Exception as e:
     print e
diff --git a/tools/testrunner/local/variants.py b/tools/testrunner/local/variants.py
index ea42bf5..4d1c6a3 100644
--- a/tools/testrunner/local/variants.py
+++ b/tools/testrunner/local/variants.py
@@ -12,8 +12,8 @@
   "ignition": [["--ignition"]],
   "ignition_staging": [["--ignition-staging"]],
   "ignition_turbofan": [["--ignition-staging", "--turbo"]],
-  "preparser": [["--min-preparse-length=0"]],
   "asm_wasm": [["--validate-asm"]],
+  "wasm_traps": [["--wasm_guard_pages", "--invoke-weak-callbacks"]],
 }
 
 # FAST_VARIANTS implies no --always-opt.
@@ -25,10 +25,10 @@
   "ignition": [["--ignition"]],
   "ignition_staging": [["--ignition-staging"]],
   "ignition_turbofan": [["--ignition-staging", "--turbo"]],
-  "preparser": [["--min-preparse-length=0"]],
   "asm_wasm": [["--validate-asm"]],
+  "wasm_traps": [["--wasm_guard_pages", "--invoke-weak-callbacks"]],
 }
 
 ALL_VARIANTS = set(["default", "stress", "turbofan", "turbofan_opt",
                     "nocrankshaft", "ignition", "ignition_staging",
-                    "ignition_turbofan", "preparser", "asm_wasm"])
+                    "ignition_turbofan", "asm_wasm", "wasm_traps"])
diff --git a/tools/testrunner/testrunner.isolate b/tools/testrunner/testrunner.isolate
index 533ef68..bfc9318 100644
--- a/tools/testrunner/testrunner.isolate
+++ b/tools/testrunner/testrunner.isolate
@@ -27,5 +27,12 @@
         ],
       },
     }],
+    ['lsan==1', {
+      'variables': {
+        'files': [
+          '../memory/lsan/suppressions.txt',
+        ],
+      },
+    }],
   ],
 }
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index ec56d49..51b5ae6 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -25,7 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
 function inherits(childCtor, parentCtor) {
   childCtor.prototype.__proto__ = parentCtor.prototype;
 };
@@ -41,7 +40,7 @@
 
 
 V8Profile.IC_RE =
-    /^(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Call|Load|Store)IC_)/;
+    /^(LoadGlobalIC: )|(Handler: )|(Stub: )|(Builtin: )|(BytecodeHandler: )|(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Load|Store)IC_)/;
 
 
 /**
diff --git a/tools/try_perf.py b/tools/try_perf.py
index 05e240e..b9dac9c 100755
--- a/tools/try_perf.py
+++ b/tools/try_perf.py
@@ -46,7 +46,6 @@
   'octane-pr',
   'octane-tf',
   'octane-tf-pr',
-  'simdjs',
   'sunspider',
   'sunspider-ignition',
   'unity',
diff --git a/tools/turbolizer/node.js b/tools/turbolizer/node.js
index 3656e5d..b718cdc 100644
--- a/tools/turbolizer/node.js
+++ b/tools/turbolizer/node.js
@@ -26,7 +26,7 @@
     return this.opcode.startsWith('JS');
   },
   isSimplified: function() {
-    if (this.isJavaScript) return false;
+    if (this.isJavaScript()) return false;
     return this.opcode.endsWith('Phi') ||
       this.opcode.startsWith('Boolean') ||
       this.opcode.startsWith('Number') ||
diff --git a/tools/unittests/run_perf_test.py b/tools/unittests/run_perf_test.py
index 1a4d738..e7342e6 100644
--- a/tools/unittests/run_perf_test.py
+++ b/tools/unittests/run_perf_test.py
@@ -19,6 +19,10 @@
 # Requires python-coverage and python-mock. Native python coverage
 # version >= 3.7.1 should be installed to get the best speed.
 
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+RUN_PERF = os.path.join(BASE_DIR, 'run_perf.py')
+TEST_DATA = os.path.join(BASE_DIR, 'unittests', 'testdata')
+
 TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
 
 V8_JSON = {
@@ -295,12 +299,12 @@
       {"name": "DeltaBlue", "results": ["5.0", "6.0"], "stddev": "0.8"},
     ])
     self._VerifyErrors(
-        ["Test Richards should only run once since a stddev is provided "
+        ["Test test/Richards should only run once since a stddev is provided "
          "by the test.",
-         "Test DeltaBlue should only run once since a stddev is provided "
+         "Test test/DeltaBlue should only run once since a stddev is provided "
          "by the test.",
          "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
-         "DeltaBlue."])
+         "test/DeltaBlue."])
     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
 
   def testBuildbot(self):
@@ -340,7 +344,7 @@
     ])
     self._VerifyErrors(
         ["Regexp \"^Richards: (.+)$\" "
-         "returned a non-numeric for test Richards.",
+         "returned a non-numeric for test test/Richards.",
          "Not all traces have the same number of results."])
     self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
 
@@ -353,7 +357,7 @@
       {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
     ])
     self._VerifyErrors(
-        ["Regexp \"^Richards: (.+)$\" didn't match for test Richards."])
+        ["Regexp \"^Richards: (.+)$\" didn't match for test test/Richards."])
     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
 
   def testOneRunGeneric(self):
@@ -397,8 +401,8 @@
       {"name": "DeltaBlue", "results": [], "stddev": ""},
     ])
     self._VerifyErrors([
-      "Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
-      "Regexp \"^DeltaBlue: (.+)$\" didn't match for test DeltaBlue.",
+      "Regexp \"^Richards: (.+)$\" didn't match for test test/Richards.",
+      "Regexp \"^DeltaBlue: (.+)$\" didn't match for test test/DeltaBlue.",
     ])
     self._VerifyMock(
         path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
@@ -473,3 +477,71 @@
     l, r = run_perf.Unzip(Gen())
     self.assertEquals([1, 2, 3], list(l()))
     self.assertEquals([2, 3, 4], list(r()))
+
+  #############################################################################
+  ### System tests
+
+  def _RunPerf(self, mocked_d8, test_json):
+    output_json = path.join(TEST_WORKSPACE, "output.json")
+    args = [
+      sys.executable, RUN_PERF,
+      "--binary-override-path", os.path.join(TEST_DATA, mocked_d8),
+      "--json-test-results", output_json,
+      os.path.join(TEST_DATA, test_json),
+    ]
+    subprocess.check_output(args)
+    return self._LoadResults(output_json)
+
+  def testNormal(self):
+    results = self._RunPerf("d8_mocked1.py", "test1.json")
+    self.assertEquals([], results['errors'])
+    self.assertEquals([
+      {
+        'units': 'score',
+        'graphs': ['test1', 'Richards'],
+        'results': [u'1.2', u'1.2'],
+        'stddev': '',
+      },
+      {
+        'units': 'score',
+        'graphs': ['test1', 'DeltaBlue'],
+        'results': [u'2.1', u'2.1'],
+        'stddev': '',
+      },
+    ], results['traces'])
+
+  def testResultsProcessor(self):
+    results = self._RunPerf("d8_mocked2.py", "test2.json")
+    self.assertEquals([], results['errors'])
+    self.assertEquals([
+      {
+        'units': 'score',
+        'graphs': ['test2', 'Richards'],
+        'results': [u'1.2', u'1.2'],
+        'stddev': '',
+      },
+      {
+        'units': 'score',
+        'graphs': ['test2', 'DeltaBlue'],
+        'results': [u'2.1', u'2.1'],
+        'stddev': '',
+      },
+    ], results['traces'])
+
+  def testResultsProcessorNested(self):
+    results = self._RunPerf("d8_mocked2.py", "test3.json")
+    self.assertEquals([], results['errors'])
+    self.assertEquals([
+      {
+        'units': 'score',
+        'graphs': ['test3', 'Octane', 'Richards'],
+        'results': [u'1.2'],
+        'stddev': '',
+      },
+      {
+        'units': 'score',
+        'graphs': ['test3', 'Octane', 'DeltaBlue'],
+        'results': [u'2.1'],
+        'stddev': '',
+      },
+    ], results['traces'])
diff --git a/tools/unittests/testdata/d8_mocked1.py b/tools/unittests/testdata/d8_mocked1.py
new file mode 100644
index 0000000..53405a6
--- /dev/null
+++ b/tools/unittests/testdata/d8_mocked1.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+print 'Richards: 1.2'
+print 'DeltaBlue: 2.1'
diff --git a/tools/unittests/testdata/d8_mocked2.py b/tools/unittests/testdata/d8_mocked2.py
new file mode 100644
index 0000000..71a3d04
--- /dev/null
+++ b/tools/unittests/testdata/d8_mocked2.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+print 'Richards1: 1'
+print 'DeltaBlue1: 1'
+print 'Richards2: 0.2'
+print 'DeltaBlue2: 1.0'
+print 'DeltaBlue3: 0.1'
diff --git a/tools/unittests/testdata/results_processor.py b/tools/unittests/testdata/results_processor.py
new file mode 100644
index 0000000..69c23e3
--- /dev/null
+++ b/tools/unittests/testdata/results_processor.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Fake results processor for testing that just sums some things up.
+"""
+
+import fileinput
+import re
+
+richards = 0.0
+deltablue = 0.0
+
+for line in fileinput.input():
+  match = re.match(r'^Richards\d: (.*)$', line)
+  if match:
+    richards += float(match.group(1))
+  match = re.match(r'^DeltaBlue\d: (.*)$', line)
+  if match:
+    deltablue += float(match.group(1))
+
+print 'Richards: %f' % richards
+print 'DeltaBlue: %f' % deltablue
diff --git a/tools/unittests/testdata/test1.json b/tools/unittests/testdata/test1.json
new file mode 100644
index 0000000..7fa1faa
--- /dev/null
+++ b/tools/unittests/testdata/test1.json
@@ -0,0 +1,11 @@
+{
+  "path": ["."],
+  "flags": [],
+  "main": "run.js",
+  "run_count": 2,
+  "results_regexp": "^%s: (.+)$",
+  "tests": [
+    {"name": "Richards"},
+    {"name": "DeltaBlue"}
+  ]
+}
diff --git a/tools/unittests/testdata/test2.json b/tools/unittests/testdata/test2.json
new file mode 100644
index 0000000..79fed26
--- /dev/null
+++ b/tools/unittests/testdata/test2.json
@@ -0,0 +1,12 @@
+{
+  "path": ["."],
+  "flags": [],
+  "main": "run.js",
+  "run_count": 2,
+  "results_processor": "results_processor.py",
+  "results_regexp": "^%s: (.+)$",
+  "tests": [
+    {"name": "Richards"},
+    {"name": "DeltaBlue"}
+  ]
+}
diff --git a/tools/unittests/testdata/test3.json b/tools/unittests/testdata/test3.json
new file mode 100644
index 0000000..1b7ef96
--- /dev/null
+++ b/tools/unittests/testdata/test3.json
@@ -0,0 +1,16 @@
+{
+  "path": ["."],
+  "flags": [],
+  "run_count": 1,
+  "results_processor": "results_processor.py",
+  "tests": [{
+    "path": ["."],
+    "name": "Octane",
+    "main": "run.js",
+    "results_regexp": "^%s: (.+)$",
+    "tests": [
+      {"name": "Richards"},
+      {"name": "DeltaBlue"}
+    ]
+  }]
+}
diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py
index 0ff0cf3..a294b0b 100644
--- a/tools/v8heapconst.py
+++ b/tools/v8heapconst.py
@@ -30,34 +30,32 @@
 
 # List of known V8 instance types.
 INSTANCE_TYPES = {
-  64: "STRING_TYPE",
-  68: "ONE_BYTE_STRING_TYPE",
-  65: "CONS_STRING_TYPE",
-  69: "CONS_ONE_BYTE_STRING_TYPE",
-  67: "SLICED_STRING_TYPE",
-  71: "SLICED_ONE_BYTE_STRING_TYPE",
-  66: "EXTERNAL_STRING_TYPE",
-  70: "EXTERNAL_ONE_BYTE_STRING_TYPE",
-  74: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
-  82: "SHORT_EXTERNAL_STRING_TYPE",
-  86: "SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE",
-  90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
   0: "INTERNALIZED_STRING_TYPE",
-  4: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
   2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
+  4: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
   6: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
   10: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
   18: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE",
   22: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
   26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
+  64: "STRING_TYPE",
+  65: "CONS_STRING_TYPE",
+  66: "EXTERNAL_STRING_TYPE",
+  67: "SLICED_STRING_TYPE",
+  68: "ONE_BYTE_STRING_TYPE",
+  69: "CONS_ONE_BYTE_STRING_TYPE",
+  70: "EXTERNAL_ONE_BYTE_STRING_TYPE",
+  71: "SLICED_ONE_BYTE_STRING_TYPE",
+  74: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
+  82: "SHORT_EXTERNAL_STRING_TYPE",
+  86: "SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE",
+  90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
   128: "SYMBOL_TYPE",
+  129: "HEAP_NUMBER_TYPE",
   130: "SIMD128_VALUE_TYPE",
+  131: "ODDBALL_TYPE",
   132: "MAP_TYPE",
   133: "CODE_TYPE",
-  131: "ODDBALL_TYPE",
-  169: "CELL_TYPE",
-  172: "PROPERTY_CELL_TYPE",
-  129: "HEAP_NUMBER_TYPE",
   134: "MUTABLE_HEAP_NUMBER_TYPE",
   135: "FOREIGN_TYPE",
   136: "BYTE_ARRAY_TYPE",
@@ -72,6 +70,7 @@
   145: "FIXED_FLOAT32_ARRAY_TYPE",
   146: "FIXED_FLOAT64_ARRAY_TYPE",
   147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
+  148: "FIXED_DOUBLE_ARRAY_TYPE",
   149: "FILLER_TYPE",
   150: "ACCESSOR_INFO_TYPE",
   151: "ACCESSOR_PAIR_TYPE",
@@ -80,222 +79,276 @@
   154: "CALL_HANDLER_INFO_TYPE",
   155: "FUNCTION_TEMPLATE_INFO_TYPE",
   156: "OBJECT_TEMPLATE_INFO_TYPE",
-  157: "SIGNATURE_INFO_TYPE",
-  158: "TYPE_SWITCH_INFO_TYPE",
-  160: "ALLOCATION_MEMENTO_TYPE",
-  159: "ALLOCATION_SITE_TYPE",
-  161: "SCRIPT_TYPE",
-  162: "TYPE_FEEDBACK_INFO_TYPE",
-  163: "ALIASED_ARGUMENTS_ENTRY_TYPE",
-  164: "BOX_TYPE",
-  173: "PROTOTYPE_INFO_TYPE",
-  174: "CONTEXT_EXTENSION_TYPE",
-  167: "FIXED_ARRAY_TYPE",
-  148: "FIXED_DOUBLE_ARRAY_TYPE",
-  168: "SHARED_FUNCTION_INFO_TYPE",
-  170: "WEAK_CELL_TYPE",
-  171: "TRANSITION_ARRAY_TYPE",
-  180: "JS_MESSAGE_OBJECT_TYPE",
-  179: "JS_VALUE_TYPE",
-  181: "JS_DATE_TYPE",
-  183: "JS_OBJECT_TYPE",
-  184: "JS_ARGUMENTS_TYPE",
-  185: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
-  186: "JS_GENERATOR_OBJECT_TYPE",
-  187: "JS_MODULE_TYPE",
-  176: "JS_GLOBAL_OBJECT_TYPE",
-  177: "JS_GLOBAL_PROXY_TYPE",
-  182: "JS_API_OBJECT_TYPE",
-  178: "JS_SPECIAL_API_OBJECT_TYPE",
-  188: "JS_ARRAY_TYPE",
-  189: "JS_ARRAY_BUFFER_TYPE",
-  190: "JS_TYPED_ARRAY_TYPE",
-  191: "JS_DATA_VIEW_TYPE",
-  175: "JS_PROXY_TYPE",
-  192: "JS_SET_TYPE",
-  193: "JS_MAP_TYPE",
-  194: "JS_SET_ITERATOR_TYPE",
-  195: "JS_MAP_ITERATOR_TYPE",
-  196: "JS_WEAK_MAP_TYPE",
-  197: "JS_WEAK_SET_TYPE",
-  198: "JS_PROMISE_TYPE",
-  199: "JS_REGEXP_TYPE",
-  200: "JS_ERROR_TYPE",
-  201: "JS_BOUND_FUNCTION_TYPE",
-  202: "JS_FUNCTION_TYPE",
+  157: "ALLOCATION_SITE_TYPE",
+  158: "ALLOCATION_MEMENTO_TYPE",
+  159: "SCRIPT_TYPE",
+  160: "TYPE_FEEDBACK_INFO_TYPE",
+  161: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+  162: "BOX_TYPE",
+  163: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
+  164: "PROMISE_REACTION_JOB_INFO_TYPE",
   165: "DEBUG_INFO_TYPE",
   166: "BREAK_POINT_INFO_TYPE",
+  167: "PROTOTYPE_INFO_TYPE",
+  168: "TUPLE2_TYPE",
+  169: "TUPLE3_TYPE",
+  170: "CONTEXT_EXTENSION_TYPE",
+  171: "CONSTANT_ELEMENTS_PAIR_TYPE",
+  172: "MODULE_TYPE",
+  173: "MODULE_INFO_ENTRY_TYPE",
+  174: "FIXED_ARRAY_TYPE",
+  175: "TRANSITION_ARRAY_TYPE",
+  176: "SHARED_FUNCTION_INFO_TYPE",
+  177: "CELL_TYPE",
+  178: "WEAK_CELL_TYPE",
+  179: "PROPERTY_CELL_TYPE",
+  180: "JS_PROXY_TYPE",
+  181: "JS_GLOBAL_OBJECT_TYPE",
+  182: "JS_GLOBAL_PROXY_TYPE",
+  183: "JS_SPECIAL_API_OBJECT_TYPE",
+  184: "JS_VALUE_TYPE",
+  185: "JS_MESSAGE_OBJECT_TYPE",
+  186: "JS_DATE_TYPE",
+  187: "JS_API_OBJECT_TYPE",
+  188: "JS_OBJECT_TYPE",
+  189: "JS_ARGUMENTS_TYPE",
+  190: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+  191: "JS_GENERATOR_OBJECT_TYPE",
+  192: "JS_MODULE_NAMESPACE_TYPE",
+  193: "JS_ARRAY_TYPE",
+  194: "JS_ARRAY_BUFFER_TYPE",
+  195: "JS_TYPED_ARRAY_TYPE",
+  196: "JS_DATA_VIEW_TYPE",
+  197: "JS_SET_TYPE",
+  198: "JS_MAP_TYPE",
+  199: "JS_SET_ITERATOR_TYPE",
+  200: "JS_MAP_ITERATOR_TYPE",
+  201: "JS_WEAK_MAP_TYPE",
+  202: "JS_WEAK_SET_TYPE",
+  203: "JS_PROMISE_TYPE",
+  204: "JS_REGEXP_TYPE",
+  205: "JS_ERROR_TYPE",
+  206: "JS_STRING_ITERATOR_TYPE",
+  207: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
+  208: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
+  209: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
+  210: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  211: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  212: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  213: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  214: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  215: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  216: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  217: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  218: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  219: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  220: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  221: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  222: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  223: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  224: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  225: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+  226: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
+  227: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
+  228: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
+  229: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
+  230: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
+  231: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
+  232: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
+  233: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
+  234: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
+  235: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+  236: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+  237: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
+  238: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
+  239: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+  240: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+  241: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
+  242: "JS_BOUND_FUNCTION_TYPE",
+  243: "JS_FUNCTION_TYPE",
 }
 
 # List of known V8 maps.
 KNOWN_MAPS = {
-  0x08101: (138, "FreeSpaceMap"),
-  0x0812d: (132, "MetaMap"),
-  0x08159: (131, "NullMap"),
-  0x08185: (167, "FixedArrayMap"),
-  0x081b1: (4, "OneByteInternalizedStringMap"),
-  0x081dd: (149, "OnePointerFillerMap"),
-  0x08209: (149, "TwoPointerFillerMap"),
-  0x08235: (131, "UninitializedMap"),
-  0x08261: (131, "UndefinedMap"),
-  0x0828d: (129, "HeapNumberMap"),
-  0x082b9: (131, "TheHoleMap"),
-  0x082e5: (131, "BooleanMap"),
-  0x08311: (136, "ByteArrayMap"),
-  0x0833d: (167, "FixedCOWArrayMap"),
-  0x08369: (167, "HashTableMap"),
-  0x08395: (128, "SymbolMap"),
-  0x083c1: (68, "OneByteStringMap"),
-  0x083ed: (167, "ScopeInfoMap"),
-  0x08419: (168, "SharedFunctionInfoMap"),
-  0x08445: (133, "CodeMap"),
-  0x08471: (167, "FunctionContextMap"),
-  0x0849d: (169, "CellMap"),
-  0x084c9: (170, "WeakCellMap"),
-  0x084f5: (172, "GlobalPropertyCellMap"),
-  0x08521: (135, "ForeignMap"),
-  0x0854d: (171, "TransitionArrayMap"),
-  0x08579: (131, "NoInterceptorResultSentinelMap"),
-  0x085a5: (131, "ArgumentsMarkerMap"),
-  0x085d1: (167, "NativeContextMap"),
-  0x085fd: (167, "ModuleContextMap"),
-  0x08629: (167, "ScriptContextMap"),
-  0x08655: (167, "BlockContextMap"),
-  0x08681: (167, "CatchContextMap"),
-  0x086ad: (167, "WithContextMap"),
-  0x086d9: (148, "FixedDoubleArrayMap"),
-  0x08705: (134, "MutableHeapNumberMap"),
-  0x08731: (167, "OrderedHashTableMap"),
-  0x0875d: (167, "SloppyArgumentsElementsMap"),
-  0x08789: (180, "JSMessageObjectMap"),
-  0x087b5: (183, "NeanderMap"),
-  0x087e1: (137, "BytecodeArrayMap"),
-  0x0880d: (64, "StringMap"),
-  0x08839: (69, "ConsOneByteStringMap"),
-  0x08865: (65, "ConsStringMap"),
-  0x08891: (67, "SlicedStringMap"),
-  0x088bd: (71, "SlicedOneByteStringMap"),
-  0x088e9: (66, "ExternalStringMap"),
-  0x08915: (74, "ExternalStringWithOneByteDataMap"),
-  0x08941: (70, "ExternalOneByteStringMap"),
-  0x0896d: (82, "ShortExternalStringMap"),
-  0x08999: (90, "ShortExternalStringWithOneByteDataMap"),
-  0x089c5: (0, "InternalizedStringMap"),
-  0x089f1: (2, "ExternalInternalizedStringMap"),
-  0x08a1d: (10, "ExternalInternalizedStringWithOneByteDataMap"),
-  0x08a49: (6, "ExternalOneByteInternalizedStringMap"),
-  0x08a75: (18, "ShortExternalInternalizedStringMap"),
-  0x08aa1: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
-  0x08acd: (22, "ShortExternalOneByteInternalizedStringMap"),
-  0x08af9: (86, "ShortExternalOneByteStringMap"),
-  0x08b25: (130, "Float32x4Map"),
-  0x08b51: (130, "Int32x4Map"),
-  0x08b7d: (130, "Uint32x4Map"),
-  0x08ba9: (130, "Bool32x4Map"),
-  0x08bd5: (130, "Int16x8Map"),
-  0x08c01: (130, "Uint16x8Map"),
-  0x08c2d: (130, "Bool16x8Map"),
-  0x08c59: (130, "Int8x16Map"),
-  0x08c85: (130, "Uint8x16Map"),
-  0x08cb1: (130, "Bool8x16Map"),
-  0x08cdd: (131, "ExceptionMap"),
-  0x08d09: (131, "TerminationExceptionMap"),
-  0x08d35: (131, "OptimizedOutMap"),
-  0x08d61: (131, "StaleRegisterMap"),
-  0x08d8d: (167, "DebugEvaluateContextMap"),
-  0x08db9: (167, "ScriptContextTableMap"),
-  0x08de5: (167, "UnseededNumberDictionaryMap"),
-  0x08e11: (183, "ExternalMap"),
-  0x08e3d: (86, "NativeSourceStringMap"),
-  0x08e69: (140, "FixedUint8ArrayMap"),
-  0x08e95: (139, "FixedInt8ArrayMap"),
-  0x08ec1: (142, "FixedUint16ArrayMap"),
-  0x08eed: (141, "FixedInt16ArrayMap"),
-  0x08f19: (144, "FixedUint32ArrayMap"),
-  0x08f45: (143, "FixedInt32ArrayMap"),
-  0x08f71: (145, "FixedFloat32ArrayMap"),
-  0x08f9d: (146, "FixedFloat64ArrayMap"),
-  0x08fc9: (147, "FixedUint8ClampedArrayMap"),
-  0x08ff5: (161, "ScriptMap"),
-  0x09021: (159, "AllocationSiteMap"),
-  0x0904d: (160, "AllocationMementoMap"),
-  0x09079: (150, "AccessorInfoMap"),
-  0x090a5: (164, "BoxMap"),
-  0x090d1: (151, "AccessorPairMap"),
-  0x090fd: (152, "AccessCheckInfoMap"),
-  0x09129: (153, "InterceptorInfoMap"),
-  0x09155: (154, "CallHandlerInfoMap"),
-  0x09181: (155, "FunctionTemplateInfoMap"),
-  0x091ad: (156, "ObjectTemplateInfoMap"),
-  0x091d9: (162, "TypeFeedbackInfoMap"),
-  0x09205: (163, "AliasedArgumentsEntryMap"),
-  0x09231: (165, "DebugInfoMap"),
-  0x0925d: (166, "BreakPointInfoMap"),
-  0x09289: (173, "PrototypeInfoMap"),
-  0x092b5: (174, "ContextExtensionMap"),
+  0x84101: (138, "FreeSpaceMap"),
+  0x8412d: (132, "MetaMap"),
+  0x84159: (131, "NullMap"),
+  0x84185: (174, "FixedArrayMap"),
+  0x841b1: (4, "OneByteInternalizedStringMap"),
+  0x841dd: (149, "OnePointerFillerMap"),
+  0x84209: (149, "TwoPointerFillerMap"),
+  0x84235: (131, "UninitializedMap"),
+  0x84261: (131, "UndefinedMap"),
+  0x8428d: (129, "HeapNumberMap"),
+  0x842b9: (131, "TheHoleMap"),
+  0x842e5: (131, "BooleanMap"),
+  0x84311: (136, "ByteArrayMap"),
+  0x8433d: (174, "FixedCOWArrayMap"),
+  0x84369: (174, "HashTableMap"),
+  0x84395: (128, "SymbolMap"),
+  0x843c1: (68, "OneByteStringMap"),
+  0x843ed: (174, "ScopeInfoMap"),
+  0x84419: (176, "SharedFunctionInfoMap"),
+  0x84445: (133, "CodeMap"),
+  0x84471: (174, "FunctionContextMap"),
+  0x8449d: (177, "CellMap"),
+  0x844c9: (178, "WeakCellMap"),
+  0x844f5: (179, "GlobalPropertyCellMap"),
+  0x84521: (135, "ForeignMap"),
+  0x8454d: (175, "TransitionArrayMap"),
+  0x84579: (131, "NoInterceptorResultSentinelMap"),
+  0x845a5: (131, "ArgumentsMarkerMap"),
+  0x845d1: (174, "NativeContextMap"),
+  0x845fd: (174, "ModuleContextMap"),
+  0x84629: (174, "ScriptContextMap"),
+  0x84655: (174, "BlockContextMap"),
+  0x84681: (174, "CatchContextMap"),
+  0x846ad: (174, "WithContextMap"),
+  0x846d9: (148, "FixedDoubleArrayMap"),
+  0x84705: (134, "MutableHeapNumberMap"),
+  0x84731: (174, "OrderedHashTableMap"),
+  0x8475d: (174, "SloppyArgumentsElementsMap"),
+  0x84789: (185, "JSMessageObjectMap"),
+  0x847b5: (137, "BytecodeArrayMap"),
+  0x847e1: (174, "ModuleInfoMap"),
+  0x8480d: (64, "StringMap"),
+  0x84839: (69, "ConsOneByteStringMap"),
+  0x84865: (65, "ConsStringMap"),
+  0x84891: (67, "SlicedStringMap"),
+  0x848bd: (71, "SlicedOneByteStringMap"),
+  0x848e9: (66, "ExternalStringMap"),
+  0x84915: (74, "ExternalStringWithOneByteDataMap"),
+  0x84941: (70, "ExternalOneByteStringMap"),
+  0x8496d: (82, "ShortExternalStringMap"),
+  0x84999: (90, "ShortExternalStringWithOneByteDataMap"),
+  0x849c5: (0, "InternalizedStringMap"),
+  0x849f1: (2, "ExternalInternalizedStringMap"),
+  0x84a1d: (10, "ExternalInternalizedStringWithOneByteDataMap"),
+  0x84a49: (6, "ExternalOneByteInternalizedStringMap"),
+  0x84a75: (18, "ShortExternalInternalizedStringMap"),
+  0x84aa1: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
+  0x84acd: (22, "ShortExternalOneByteInternalizedStringMap"),
+  0x84af9: (86, "ShortExternalOneByteStringMap"),
+  0x84b25: (130, "Float32x4Map"),
+  0x84b51: (130, "Int32x4Map"),
+  0x84b7d: (130, "Uint32x4Map"),
+  0x84ba9: (130, "Bool32x4Map"),
+  0x84bd5: (130, "Int16x8Map"),
+  0x84c01: (130, "Uint16x8Map"),
+  0x84c2d: (130, "Bool16x8Map"),
+  0x84c59: (130, "Int8x16Map"),
+  0x84c85: (130, "Uint8x16Map"),
+  0x84cb1: (130, "Bool8x16Map"),
+  0x84cdd: (131, "ExceptionMap"),
+  0x84d09: (131, "TerminationExceptionMap"),
+  0x84d35: (131, "OptimizedOutMap"),
+  0x84d61: (131, "StaleRegisterMap"),
+  0x84d8d: (174, "DebugEvaluateContextMap"),
+  0x84db9: (174, "ScriptContextTableMap"),
+  0x84de5: (174, "UnseededNumberDictionaryMap"),
+  0x84e11: (188, "ExternalMap"),
+  0x84e3d: (86, "NativeSourceStringMap"),
+  0x84e69: (140, "FixedUint8ArrayMap"),
+  0x84e95: (139, "FixedInt8ArrayMap"),
+  0x84ec1: (142, "FixedUint16ArrayMap"),
+  0x84eed: (141, "FixedInt16ArrayMap"),
+  0x84f19: (144, "FixedUint32ArrayMap"),
+  0x84f45: (143, "FixedInt32ArrayMap"),
+  0x84f71: (145, "FixedFloat32ArrayMap"),
+  0x84f9d: (146, "FixedFloat64ArrayMap"),
+  0x84fc9: (147, "FixedUint8ClampedArrayMap"),
+  0x84ff5: (159, "ScriptMap"),
+  0x85021: (157, "AllocationSiteMap"),
+  0x8504d: (158, "AllocationMementoMap"),
+  0x85079: (150, "AccessorInfoMap"),
+  0x850a5: (155, "FunctionTemplateInfoMap"),
+  0x850d1: (168, "Tuple2Map"),
+  0x850fd: (167, "PrototypeInfoMap"),
+  0x85129: (151, "AccessorPairMap"),
+  0x85155: (152, "AccessCheckInfoMap"),
+  0x85181: (153, "InterceptorInfoMap"),
+  0x851ad: (154, "CallHandlerInfoMap"),
+  0x851d9: (156, "ObjectTemplateInfoMap"),
+  0x85205: (160, "TypeFeedbackInfoMap"),
+  0x85231: (161, "AliasedArgumentsEntryMap"),
+  0x8525d: (162, "BoxMap"),
+  0x85289: (163, "PromiseResolveThenableJobInfoMap"),
+  0x852b5: (164, "PromiseReactionJobInfoMap"),
+  0x852e1: (165, "DebugInfoMap"),
+  0x8530d: (166, "BreakPointInfoMap"),
+  0x85339: (169, "Tuple3Map"),
+  0x85365: (170, "ContextExtensionMap"),
+  0x85391: (171, "ConstantElementsPairMap"),
+  0x853bd: (172, "ModuleMap"),
+  0x853e9: (173, "ModuleInfoEntryMap"),
 }
 
 # List of known V8 objects.
 KNOWN_OBJECTS = {
-  ("OLD_SPACE", 0x08101): "NullValue",
-  ("OLD_SPACE", 0x0811d): "EmptyDescriptorArray",
-  ("OLD_SPACE", 0x08125): "EmptyFixedArray",
-  ("OLD_SPACE", 0x08151): "UninitializedValue",
-  ("OLD_SPACE", 0x081a1): "UndefinedValue",
-  ("OLD_SPACE", 0x081bd): "NanValue",
-  ("OLD_SPACE", 0x081cd): "TheHoleValue",
-  ("OLD_SPACE", 0x081f9): "TrueValue",
-  ("OLD_SPACE", 0x08239): "FalseValue",
-  ("OLD_SPACE", 0x08269): "empty_string",
-  ("OLD_SPACE", 0x08275): "NoInterceptorResultSentinel",
-  ("OLD_SPACE", 0x082bd): "ArgumentsMarker",
-  ("OLD_SPACE", 0x082f5): "EmptyByteArray",
-  ("OLD_SPACE", 0x082fd): "EmptyWeakCell",
-  ("OLD_SPACE", 0x0830d): "InfinityValue",
-  ("OLD_SPACE", 0x0831d): "MinusZeroValue",
-  ("OLD_SPACE", 0x0832d): "MinusInfinityValue",
-  ("OLD_SPACE", 0x09961): "EmptyLiteralsArray",
-  ("OLD_SPACE", 0x0996d): "ClearedOptimizedCodeMap",
-  ("OLD_SPACE", 0x09979): "Exception",
-  ("OLD_SPACE", 0x099ad): "TerminationException",
-  ("OLD_SPACE", 0x099ed): "OptimizedOut",
-  ("OLD_SPACE", 0x09a25): "StaleRegister",
-  ("OLD_SPACE", 0x09a5d): "EmptyFixedUint8Array",
-  ("OLD_SPACE", 0x09a6d): "EmptyFixedInt8Array",
-  ("OLD_SPACE", 0x09a7d): "EmptyFixedUint16Array",
-  ("OLD_SPACE", 0x09a8d): "EmptyFixedInt16Array",
-  ("OLD_SPACE", 0x09a9d): "EmptyFixedUint32Array",
-  ("OLD_SPACE", 0x09aad): "EmptyFixedInt32Array",
-  ("OLD_SPACE", 0x09abd): "EmptyFixedFloat32Array",
-  ("OLD_SPACE", 0x09acd): "EmptyFixedFloat64Array",
-  ("OLD_SPACE", 0x09add): "EmptyFixedUint8ClampedArray",
-  ("OLD_SPACE", 0x09aed): "EmptyScript",
-  ("OLD_SPACE", 0x09b2d): "UndefinedCell",
-  ("OLD_SPACE", 0x09b35): "EmptySloppyArgumentsElements",
-  ("OLD_SPACE", 0x09b45): "EmptySlowElementDictionary",
-  ("OLD_SPACE", 0x09b91): "DummyVector",
-  ("OLD_SPACE", 0x09c09): "EmptyPropertyCell",
-  ("OLD_SPACE", 0x09c19): "ArrayProtector",
-  ("OLD_SPACE", 0x09c29): "IsConcatSpreadableProtector",
-  ("OLD_SPACE", 0x09c31): "HasInstanceProtector",
-  ("OLD_SPACE", 0x09c41): "SpeciesProtector",
-  ("OLD_SPACE", 0x09c49): "NumberStringCache",
-  ("OLD_SPACE", 0x0a451): "SingleCharacterStringCache",
-  ("OLD_SPACE", 0x0a909): "StringSplitCache",
-  ("OLD_SPACE", 0x0ad11): "RegExpMultipleCache",
-  ("OLD_SPACE", 0x0b119): "NativesSourceCache",
-  ("OLD_SPACE", 0x0b2e5): "ExperimentalNativesSourceCache",
-  ("OLD_SPACE", 0x0b309): "ExtraNativesSourceCache",
-  ("OLD_SPACE", 0x0b325): "ExperimentalExtraNativesSourceCache",
-  ("OLD_SPACE", 0x0b331): "IntrinsicFunctionNames",
-  ("OLD_SPACE", 0x244bd): "EmptyPropertiesDictionary",
-  ("OLD_SPACE", 0x24509): "ScriptList",
-  ("OLD_SPACE", 0x3fd85): "CodeStubs",
-  ("OLD_SPACE", 0x49285): "WeakObjectToCodeTable",
-  ("OLD_SPACE", 0x49399): "WeakNewSpaceObjectToCodeList",
-  ("OLD_SPACE", 0x493e1): "NoScriptSharedFunctionInfos",
-  ("OLD_SPACE", 0x50cf9): "MessageListeners",
-  ("OLD_SPACE", 0x5494d): "StringTable",
-  ("CODE_SPACE", 0x184a1): "JsConstructEntryCode",
-  ("CODE_SPACE", 0x23fe1): "JsEntryCode",
+  ("OLD_SPACE", 0x84101): "NullValue",
+  ("OLD_SPACE", 0x8411d): "EmptyDescriptorArray",
+  ("OLD_SPACE", 0x84125): "EmptyFixedArray",
+  ("OLD_SPACE", 0x84151): "UninitializedValue",
+  ("OLD_SPACE", 0x841a1): "UndefinedValue",
+  ("OLD_SPACE", 0x841bd): "NanValue",
+  ("OLD_SPACE", 0x841cd): "TheHoleValue",
+  ("OLD_SPACE", 0x841fd): "HoleNanValue",
+  ("OLD_SPACE", 0x84209): "TrueValue",
+  ("OLD_SPACE", 0x84249): "FalseValue",
+  ("OLD_SPACE", 0x84279): "empty_string",
+  ("OLD_SPACE", 0x84285): "NoInterceptorResultSentinel",
+  ("OLD_SPACE", 0x842cd): "ArgumentsMarker",
+  ("OLD_SPACE", 0x84305): "EmptyByteArray",
+  ("OLD_SPACE", 0x8430d): "EmptyWeakCell",
+  ("OLD_SPACE", 0x8431d): "InfinityValue",
+  ("OLD_SPACE", 0x8432d): "MinusZeroValue",
+  ("OLD_SPACE", 0x8433d): "MinusInfinityValue",
+  ("OLD_SPACE", 0x85939): "EmptyLiteralsArray",
+  ("OLD_SPACE", 0x85945): "EmptyTypeFeedbackVector",
+  ("OLD_SPACE", 0x85955): "EmptyScopeInfo",
+  ("OLD_SPACE", 0x8595d): "Exception",
+  ("OLD_SPACE", 0x85991): "TerminationException",
+  ("OLD_SPACE", 0x859d1): "OptimizedOut",
+  ("OLD_SPACE", 0x85a09): "StaleRegister",
+  ("OLD_SPACE", 0x85a41): "EmptyFixedUint8Array",
+  ("OLD_SPACE", 0x85a51): "EmptyFixedInt8Array",
+  ("OLD_SPACE", 0x85a61): "EmptyFixedUint16Array",
+  ("OLD_SPACE", 0x85a71): "EmptyFixedInt16Array",
+  ("OLD_SPACE", 0x85a81): "EmptyFixedUint32Array",
+  ("OLD_SPACE", 0x85a91): "EmptyFixedInt32Array",
+  ("OLD_SPACE", 0x85aa1): "EmptyFixedFloat32Array",
+  ("OLD_SPACE", 0x85ab1): "EmptyFixedFloat64Array",
+  ("OLD_SPACE", 0x85ac1): "EmptyFixedUint8ClampedArray",
+  ("OLD_SPACE", 0x85ad1): "EmptyScript",
+  ("OLD_SPACE", 0x85b11): "UndefinedCell",
+  ("OLD_SPACE", 0x85b19): "EmptySloppyArgumentsElements",
+  ("OLD_SPACE", 0x85b29): "EmptySlowElementDictionary",
+  ("OLD_SPACE", 0x85b75): "DummyVector",
+  ("OLD_SPACE", 0x85bb9): "EmptyPropertyCell",
+  ("OLD_SPACE", 0x85bc9): "ArrayProtector",
+  ("OLD_SPACE", 0x85bd9): "IsConcatSpreadableProtector",
+  ("OLD_SPACE", 0x85be1): "HasInstanceProtector",
+  ("OLD_SPACE", 0x85bf1): "SpeciesProtector",
+  ("OLD_SPACE", 0x85bf9): "StringLengthProtector",
+  ("OLD_SPACE", 0x85c09): "FastArrayIterationProtector",
+  ("OLD_SPACE", 0x85c11): "ArrayIteratorProtector",
+  ("OLD_SPACE", 0x85c19): "ArrayBufferNeuteringProtector",
+  ("OLD_SPACE", 0x85c29): "NumberStringCache",
+  ("OLD_SPACE", 0x86431): "SingleCharacterStringCache",
+  ("OLD_SPACE", 0x86859): "StringSplitCache",
+  ("OLD_SPACE", 0x86c61): "RegExpMultipleCache",
+  ("OLD_SPACE", 0x87069): "NativesSourceCache",
+  ("OLD_SPACE", 0x871d1): "ExperimentalNativesSourceCache",
+  ("OLD_SPACE", 0x871ed): "ExtraNativesSourceCache",
+  ("OLD_SPACE", 0x87209): "ExperimentalExtraNativesSourceCache",
+  ("OLD_SPACE", 0x87215): "EmptyPropertiesDictionary",
+  ("OLD_SPACE", 0x87261): "ScriptList",
+  ("OLD_SPACE", 0x9ab99): "CodeStubs",
+  ("OLD_SPACE", 0xa2bd5): "WeakObjectToCodeTable",
+  ("OLD_SPACE", 0xa2ce9): "WeakNewSpaceObjectToCodeList",
+  ("OLD_SPACE", 0xa2d31): "NoScriptSharedFunctionInfos",
+  ("OLD_SPACE", 0xb26e9): "MessageListeners",
+  ("OLD_SPACE", 0xb6d75): "StringTable",
+  ("CODE_SPACE", 0x1aa01): "JsConstructEntryCode",
+  ("CODE_SPACE", 0x29ba1): "JsEntryCode",
 }
diff --git a/tools/whitespace.txt b/tools/whitespace.txt
index 0f4384f..062a517 100644
--- a/tools/whitespace.txt
+++ b/tools/whitespace.txt
@@ -6,4 +6,5 @@
 "I'm so deoptimized today!"
 The doubles heard this and started to unbox.
 The Smi looked at them when a crazy v8-autoroll account showed up......
-The autoroller bought a round of Himbeerbrause. Suddenly .......
+The autoroller bought a round of Himbeerbrause. Suddenly ......
+.